summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-08-01 08:38:30 +0200
committerMichaël Zasso <targos@protonmail.com>2019-08-01 12:53:56 +0200
commit2dcc3665abf57c3607cebffdeeca062f5894885d (patch)
tree4f560748132edcfb4c22d6f967a7e80d23d7ea2c
parent1ee47d550c6de132f06110aa13eceb7551d643b3 (diff)
downloadandroid-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.tar.gz
android-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.tar.bz2
android-node-v8-2dcc3665abf57c3607cebffdeeca062f5894885d.zip
deps: update V8 to 7.6.303.28
PR-URL: https://github.com/nodejs/node/pull/28016 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Refael Ackermann (רפאל פלחי) <refack@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com> Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
-rw-r--r--deps/v8/.git-blame-ignore-revs3
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/.gn5
-rw-r--r--deps/v8/.vpython8
-rw-r--r--deps/v8/AUTHORS9
-rw-r--r--deps/v8/BUILD.gn1283
-rw-r--r--deps/v8/COMMON_OWNERS38
-rw-r--r--deps/v8/ChangeLog1520
-rw-r--r--deps/v8/DEPS32
-rw-r--r--deps/v8/ENG_REVIEW_OWNERS9
-rw-r--r--deps/v8/INFRA_OWNERS3
-rw-r--r--deps/v8/MIPS_OWNERS (renamed from deps/v8/src/builtins/mips/OWNERS)0
-rw-r--r--deps/v8/OWNERS67
-rw-r--r--deps/v8/PPC_OWNERS (renamed from deps/v8/src/builtins/ppc/OWNERS)0
-rw-r--r--deps/v8/PRESUBMIT.py2
-rw-r--r--deps/v8/S390_OWNERS (renamed from deps/v8/src/builtins/s390/OWNERS)0
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h4
-rw-r--r--deps/v8/benchmarks/micro/slice-perf.js83
-rw-r--r--deps/v8/build_overrides/OWNERS1
-rw-r--r--deps/v8/custom_deps/OWNERS3
-rw-r--r--deps/v8/docs/OWNERS2
-rw-r--r--deps/v8/gni/OWNERS1
-rw-r--r--deps/v8/gni/proto_library.gni14
-rw-r--r--deps/v8/gni/snapshot_toolchain.gni (renamed from deps/v8/snapshot_toolchain.gni)0
-rw-r--r--deps/v8/gni/v8.gni4
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h18
-rw-r--r--deps/v8/include/v8-inspector.h7
-rw-r--r--deps/v8/include/v8-internal.h43
-rw-r--r--deps/v8/include/v8-platform.h1
-rw-r--r--deps/v8/include/v8-profiler.h77
-rw-r--r--deps/v8/include/v8-util.h18
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h274
-rw-r--r--deps/v8/infra/OWNERS5
-rw-r--r--deps/v8/infra/mb/gn_isolate_map.pyl4
-rw-r--r--deps/v8/infra/mb/mb_config.pyl20
-rw-r--r--deps/v8/infra/testing/OWNERS4
-rw-r--r--deps/v8/infra/testing/builders.pyl310
-rw-r--r--deps/v8/samples/OWNERS2
-rw-r--r--deps/v8/src/DEPS1
-rw-r--r--deps/v8/src/api/api-arguments-inl.h (renamed from deps/v8/src/api-arguments-inl.h)17
-rw-r--r--deps/v8/src/api/api-arguments.cc (renamed from deps/v8/src/api-arguments.cc)12
-rw-r--r--deps/v8/src/api/api-arguments.h (renamed from deps/v8/src/api-arguments.h)20
-rw-r--r--deps/v8/src/api/api-inl.h (renamed from deps/v8/src/api-inl.h)18
-rw-r--r--deps/v8/src/api/api-natives.cc (renamed from deps/v8/src/api-natives.cc)124
-rw-r--r--deps/v8/src/api/api-natives.h (renamed from deps/v8/src/api-natives.h)14
-rw-r--r--deps/v8/src/api/api.cc (renamed from deps/v8/src/api.cc)1588
-rw-r--r--deps/v8/src/api/api.h (renamed from deps/v8/src/api.h)42
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc120
-rw-r--r--deps/v8/src/asmjs/asm-js.cc62
-rw-r--r--deps/v8/src/asmjs/asm-js.h2
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc4
-rw-r--r--deps/v8/src/asmjs/asm-parser.h2
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc8
-rw-r--r--deps/v8/src/asmjs/asm-scanner.h4
-rw-r--r--deps/v8/src/asmjs/asm-types.cc4
-rw-r--r--deps/v8/src/asmjs/asm-types.h4
-rw-r--r--deps/v8/src/assembler-arch.h30
-rw-r--r--deps/v8/src/assembler-inl.h30
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.cc72
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.h16
-rw-r--r--deps/v8/src/ast/ast-source-ranges.h25
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc55
-rw-r--r--deps/v8/src/ast/ast-value-factory.h14
-rw-r--r--deps/v8/src/ast/ast.cc27
-rw-r--r--deps/v8/src/ast/ast.h38
-rw-r--r--deps/v8/src/ast/modules.cc4
-rw-r--r--deps/v8/src/ast/modules.h12
-rw-r--r--deps/v8/src/ast/prettyprinter.cc47
-rw-r--r--deps/v8/src/ast/prettyprinter.h4
-rw-r--r--deps/v8/src/ast/scopes.cc81
-rw-r--r--deps/v8/src/ast/scopes.h21
-rw-r--r--deps/v8/src/ast/variables.cc4
-rw-r--r--deps/v8/src/ast/variables.h4
-rw-r--r--deps/v8/src/base/OWNERS1
-rw-r--r--deps/v8/src/base/compiler-specific.h6
-rw-r--r--deps/v8/src/base/flags.h2
-rw-r--r--deps/v8/src/base/format-macros.h97
-rw-r--r--deps/v8/src/base/logging.cc20
-rw-r--r--deps/v8/src/base/logging.h40
-rw-r--r--deps/v8/src/base/macros.h1
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc7
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc18
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc5
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc5
-rw-r--r--deps/v8/src/base/platform/platform.h1
-rw-r--r--deps/v8/src/base/small-vector.h6
-rw-r--r--deps/v8/src/base/type-traits.h (renamed from deps/v8/src/type-traits.h)6
-rw-r--r--deps/v8/src/builtins/accessors.cc (renamed from deps/v8/src/accessors.cc)97
-rw-r--r--deps/v8/src/builtins/accessors.h (renamed from deps/v8/src/accessors.h)18
-rw-r--r--deps/v8/src/builtins/arguments.tq1
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc107
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc100
-rw-r--r--deps/v8/src/builtins/array-join.tq26
-rw-r--r--deps/v8/src/builtins/array-reverse.tq2
-rw-r--r--deps/v8/src/builtins/array-slice.tq8
-rw-r--r--deps/v8/src/builtins/base.tq703
-rw-r--r--deps/v8/src/builtins/boolean.tq42
-rw-r--r--deps/v8/src/builtins/builtins-api.cc58
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.cc29
-rw-r--r--deps/v8/src/builtins/builtins-arguments-gen.h14
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc105
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h13
-rw-r--r--deps/v8/src/builtins/builtins-array.cc126
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc20
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-async-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc10
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc13
-rw-r--r--deps/v8/src/builtins/builtins-bigint-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-bigint.cc10
-rw-r--r--deps/v8/src/builtins/builtins-boolean-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc39
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc18
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-call.cc6
-rw-r--r--deps/v8/src/builtins/builtins-callsite.cc4
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.cc33
-rw-r--r--deps/v8/src/builtins/builtins-collections-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-collections.cc4
-rw-r--r--deps/v8/src/builtins/builtins-console-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-console.cc16
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc17
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-constructor.h4
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-data-view-gen.h4
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc10
-rw-r--r--deps/v8/src/builtins/builtins-date-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-date.cc80
-rw-r--r--deps/v8/src/builtins/builtins-debug-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h131
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h12
-rw-r--r--deps/v8/src/builtins/builtins-error.cc12
-rw-r--r--deps/v8/src/builtins/builtins-extras-utils.cc10
-rw-r--r--deps/v8/src/builtins/builtins-function-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-function.cc22
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc8
-rw-r--r--deps/v8/src/builtins/builtins-global-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-global.cc10
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc49
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc153
-rw-r--r--deps/v8/src/builtins/builtins-internal.cc6
-rw-r--r--deps/v8/src/builtins/builtins-interpreter-gen.cc4
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc27
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-iterator-gen.h11
-rw-r--r--deps/v8/src/builtins/builtins-json.cc14
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.h4
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.cc255
-rw-r--r--deps/v8/src/builtins/builtins-math-gen.h5
-rw-r--r--deps/v8/src/builtins/builtins-math.cc4
-rw-r--r--deps/v8/src/builtins/builtins-microtask-queue-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-number-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-number.cc8
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc74
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-object.cc14
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.cc117
-rw-r--r--deps/v8/src/builtins/builtins-promise-gen.h30
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc4
-rw-r--r--deps/v8/src/builtins/builtins-promise.h2
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc289
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.h26
-rw-r--r--deps/v8/src/builtins/builtins-reflect-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc10
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc323
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h14
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc6
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc163
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc19
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc429
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h32
-rw-r--r--deps/v8/src/builtins/builtins-string.cc30
-rw-r--r--deps/v8/src/builtins/builtins-symbol-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc8
-rw-r--r--deps/v8/src/builtins/builtins-trace.cc8
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc180
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h15
-rw-r--r--deps/v8/src/builtins/builtins-typed-array.cc22
-rw-r--r--deps/v8/src/builtins/builtins-utils-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-utils-inl.h2
-rw-r--r--deps/v8/src/builtins/builtins-utils.h54
-rw-r--r--deps/v8/src/builtins/builtins-wasm-gen.cc12
-rw-r--r--deps/v8/src/builtins/builtins-weak-refs.cc16
-rw-r--r--deps/v8/src/builtins/builtins.cc148
-rw-r--r--deps/v8/src/builtins/builtins.h38
-rw-r--r--deps/v8/src/builtins/collections.tq5
-rw-r--r--deps/v8/src/builtins/constants-table-builder.cc14
-rw-r--r--deps/v8/src/builtins/constants-table-builder.h8
-rw-r--r--deps/v8/src/builtins/data-view.tq1
-rw-r--r--deps/v8/src/builtins/frames.tq1
-rw-r--r--deps/v8/src/builtins/growable-fixed-array-gen.h2
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc81
-rw-r--r--deps/v8/src/builtins/internal-coverage.tq64
-rw-r--r--deps/v8/src/builtins/math.tq238
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc60
-rw-r--r--deps/v8/src/builtins/mips64/OWNERS1
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc60
-rw-r--r--deps/v8/src/builtins/object-fromentries.tq4
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc92
-rw-r--r--deps/v8/src/builtins/proxy-get-property.tq63
-rw-r--r--deps/v8/src/builtins/proxy-has-property.tq55
-rw-r--r--deps/v8/src/builtins/proxy-revoke.tq3
-rw-r--r--deps/v8/src/builtins/proxy-set-property.tq84
-rw-r--r--deps/v8/src/builtins/proxy.tq23
-rw-r--r--deps/v8/src/builtins/regexp-replace.tq120
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc91
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc118
-rw-r--r--deps/v8/src/builtins/string-iterator.tq46
-rw-r--r--deps/v8/src/builtins/string-slice.tq35
-rw-r--r--deps/v8/src/builtins/string-substring.tq50
-rw-r--r--deps/v8/src/builtins/string.tq136
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq51
-rw-r--r--deps/v8/src/builtins/typed-array-every.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-filter.tq5
-rw-r--r--deps/v8/src/builtins/typed-array-find.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-findindex.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-foreach.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-reduce.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-reduceright.tq4
-rw-r--r--deps/v8/src/builtins/typed-array-some.tq4
-rw-r--r--deps/v8/src/builtins/typed-array.tq122
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc84
-rw-r--r--deps/v8/src/codegen/OWNERS13
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm-inl.h (renamed from deps/v8/src/arm/assembler-arm-inl.h)108
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc (renamed from deps/v8/src/arm/assembler-arm.cc)852
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.h (renamed from deps/v8/src/arm/assembler-arm.h)424
-rw-r--r--deps/v8/src/codegen/arm/constants-arm.cc (renamed from deps/v8/src/arm/constants-arm.cc)36
-rw-r--r--deps/v8/src/codegen/arm/constants-arm.h (renamed from deps/v8/src/arm/constants-arm.h)233
-rw-r--r--deps/v8/src/codegen/arm/cpu-arm.cc (renamed from deps/v8/src/arm/cpu-arm.cc)37
-rw-r--r--deps/v8/src/codegen/arm/interface-descriptors-arm.cc (renamed from deps/v8/src/arm/interface-descriptors-arm.cc)5
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc (renamed from deps/v8/src/arm/macro-assembler-arm.cc)285
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h (renamed from deps/v8/src/arm/macro-assembler-arm.h)105
-rw-r--r--deps/v8/src/codegen/arm/register-arm.h (renamed from deps/v8/src/arm/register-arm.h)20
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64-inl.h (renamed from deps/v8/src/arm64/assembler-arm64-inl.h)232
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.cc (renamed from deps/v8/src/arm64/assembler-arm64.cc)606
-rw-r--r--deps/v8/src/codegen/arm64/assembler-arm64.h (renamed from deps/v8/src/arm64/assembler-arm64.h)312
-rw-r--r--deps/v8/src/codegen/arm64/constants-arm64.h (renamed from deps/v8/src/arm64/constants-arm64.h)807
-rw-r--r--deps/v8/src/codegen/arm64/cpu-arm64.cc116
-rw-r--r--deps/v8/src/codegen/arm64/decoder-arm64-inl.h (renamed from deps/v8/src/arm64/decoder-arm64-inl.h)117
-rw-r--r--deps/v8/src/codegen/arm64/decoder-arm64.cc (renamed from deps/v8/src/arm64/decoder-arm64.cc)14
-rw-r--r--deps/v8/src/codegen/arm64/decoder-arm64.h (renamed from deps/v8/src/arm64/decoder-arm64.h)28
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64-constants.cc (renamed from deps/v8/src/arm64/instructions-arm64-constants.cc)0
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.cc (renamed from deps/v8/src/arm64/instructions-arm64.cc)36
-rw-r--r--deps/v8/src/codegen/arm64/instructions-arm64.h (renamed from deps/v8/src/arm64/instructions-arm64.h)90
-rw-r--r--deps/v8/src/codegen/arm64/instrument-arm64.cc (renamed from deps/v8/src/arm64/instrument-arm64.cc)156
-rw-r--r--deps/v8/src/codegen/arm64/instrument-arm64.h (renamed from deps/v8/src/arm64/instrument-arm64.h)33
-rw-r--r--deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc (renamed from deps/v8/src/arm64/interface-descriptors-arm64.cc)5
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h (renamed from deps/v8/src/arm64/macro-assembler-arm64-inl.h)266
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc (renamed from deps/v8/src/arm64/macro-assembler-arm64.cc)541
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h (renamed from deps/v8/src/arm64/macro-assembler-arm64.h)303
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.cc (renamed from deps/v8/src/arm64/register-arm64.cc)2
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h (renamed from deps/v8/src/arm64/register-arm64.h)25
-rw-r--r--deps/v8/src/codegen/arm64/utils-arm64.cc (renamed from deps/v8/src/arm64/utils-arm64.cc)8
-rw-r--r--deps/v8/src/codegen/arm64/utils-arm64.h (renamed from deps/v8/src/arm64/utils-arm64.h)17
-rw-r--r--deps/v8/src/codegen/assembler-arch.h30
-rw-r--r--deps/v8/src/codegen/assembler-inl.h30
-rw-r--r--deps/v8/src/codegen/assembler.cc (renamed from deps/v8/src/assembler.cc)74
-rw-r--r--deps/v8/src/codegen/assembler.h (renamed from deps/v8/src/assembler.h)69
-rw-r--r--deps/v8/src/codegen/bailout-reason.cc (renamed from deps/v8/src/bailout-reason.cc)2
-rw-r--r--deps/v8/src/codegen/bailout-reason.h (renamed from deps/v8/src/bailout-reason.h)7
-rw-r--r--deps/v8/src/codegen/callable.h (renamed from deps/v8/src/callable.h)10
-rw-r--r--deps/v8/src/codegen/code-comments.cc (renamed from deps/v8/src/code-comments.cc)4
-rw-r--r--deps/v8/src/codegen/code-comments.h (renamed from deps/v8/src/code-comments.h)6
-rw-r--r--deps/v8/src/codegen/code-desc.cc (renamed from deps/v8/src/code-desc.cc)4
-rw-r--r--deps/v8/src/codegen/code-desc.h (renamed from deps/v8/src/code-desc.h)8
-rw-r--r--deps/v8/src/codegen/code-factory.cc (renamed from deps/v8/src/code-factory.cc)36
-rw-r--r--deps/v8/src/codegen/code-factory.h (renamed from deps/v8/src/code-factory.h)16
-rw-r--r--deps/v8/src/codegen/code-reference.cc (renamed from deps/v8/src/code-reference.cc)18
-rw-r--r--deps/v8/src/codegen/code-reference.h (renamed from deps/v8/src/code-reference.h)8
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc (renamed from deps/v8/src/code-stub-assembler.cc)1184
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h (renamed from deps/v8/src/code-stub-assembler.h)239
-rw-r--r--deps/v8/src/codegen/compilation-cache.cc (renamed from deps/v8/src/compilation-cache.cc)50
-rw-r--r--deps/v8/src/codegen/compilation-cache.h (renamed from deps/v8/src/compilation-cache.h)31
-rw-r--r--deps/v8/src/codegen/compiler.cc (renamed from deps/v8/src/compiler.cc)113
-rw-r--r--deps/v8/src/codegen/compiler.h (renamed from deps/v8/src/compiler.h)20
-rw-r--r--deps/v8/src/codegen/constant-pool.cc (renamed from deps/v8/src/constant-pool.cc)4
-rw-r--r--deps/v8/src/codegen/constant-pool.h (renamed from deps/v8/src/constant-pool.h)14
-rw-r--r--deps/v8/src/codegen/constants-arch.h28
-rw-r--r--deps/v8/src/codegen/cpu-features.h (renamed from deps/v8/src/cpu-features.h)9
-rw-r--r--deps/v8/src/codegen/external-reference-table.cc (renamed from deps/v8/src/external-reference-table.cc)8
-rw-r--r--deps/v8/src/codegen/external-reference-table.h (renamed from deps/v8/src/external-reference-table.h)12
-rw-r--r--deps/v8/src/codegen/external-reference.cc (renamed from deps/v8/src/external-reference.cc)42
-rw-r--r--deps/v8/src/codegen/external-reference.h (renamed from deps/v8/src/external-reference.h)14
-rw-r--r--deps/v8/src/codegen/flush-instruction-cache.cc (renamed from deps/v8/src/flush-instruction-cache.cc)6
-rw-r--r--deps/v8/src/codegen/flush-instruction-cache.h (renamed from deps/v8/src/flush-instruction-cache.h)6
-rw-r--r--deps/v8/src/codegen/handler-table.cc (renamed from deps/v8/src/handler-table.cc)40
-rw-r--r--deps/v8/src/codegen/handler-table.h (renamed from deps/v8/src/handler-table.h)18
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32-inl.h (renamed from deps/v8/src/ia32/assembler-ia32-inl.h)56
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc (renamed from deps/v8/src/ia32/assembler-ia32.cc)179
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h (renamed from deps/v8/src/ia32/assembler-ia32.h)109
-rw-r--r--deps/v8/src/codegen/ia32/constants-ia32.h (renamed from deps/v8/src/ia32/constants-ia32.h)8
-rw-r--r--deps/v8/src/codegen/ia32/cpu-ia32.cc (renamed from deps/v8/src/ia32/cpu-ia32.cc)2
-rw-r--r--deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc (renamed from deps/v8/src/ia32/interface-descriptors-ia32.cc)4
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc (renamed from deps/v8/src/ia32/macro-assembler-ia32.cc)95
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h (renamed from deps/v8/src/ia32/macro-assembler-ia32.h)56
-rw-r--r--deps/v8/src/codegen/ia32/register-ia32.h (renamed from deps/v8/src/ia32/register-ia32.h)16
-rw-r--r--deps/v8/src/codegen/ia32/sse-instr.h (renamed from deps/v8/src/ia32/sse-instr.h)6
-rw-r--r--deps/v8/src/codegen/interface-descriptors.cc (renamed from deps/v8/src/interface-descriptors.cc)5
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h (renamed from deps/v8/src/interface-descriptors.h)23
-rw-r--r--deps/v8/src/codegen/label.h (renamed from deps/v8/src/label.h)6
-rw-r--r--deps/v8/src/codegen/machine-type.cc (renamed from deps/v8/src/machine-type.cc)5
-rw-r--r--deps/v8/src/codegen/machine-type.h (renamed from deps/v8/src/machine-type.h)88
-rw-r--r--deps/v8/src/codegen/macro-assembler-inl.h15
-rw-r--r--deps/v8/src/codegen/macro-assembler.h (renamed from deps/v8/src/macro-assembler.h)49
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips-inl.h (renamed from deps/v8/src/mips/assembler-mips-inl.h)52
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc (renamed from deps/v8/src/mips/assembler-mips.cc)640
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.h (renamed from deps/v8/src/mips/assembler-mips.h)100
-rw-r--r--deps/v8/src/codegen/mips/constants-mips.cc (renamed from deps/v8/src/mips/constants-mips.cc)35
-rw-r--r--deps/v8/src/codegen/mips/constants-mips.h (renamed from deps/v8/src/mips/constants-mips.h)142
-rw-r--r--deps/v8/src/codegen/mips/cpu-mips.cc (renamed from deps/v8/src/mips/cpu-mips.cc)11
-rw-r--r--deps/v8/src/codegen/mips/interface-descriptors-mips.cc (renamed from deps/v8/src/mips/interface-descriptors-mips.cc)25
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc (renamed from deps/v8/src/mips/macro-assembler-mips.cc)206
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h (renamed from deps/v8/src/mips/macro-assembler-mips.h)127
-rw-r--r--deps/v8/src/codegen/mips/register-mips.h (renamed from deps/v8/src/mips/register-mips.h)18
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64-inl.h (renamed from deps/v8/src/mips64/assembler-mips64-inl.h)53
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.cc (renamed from deps/v8/src/mips64/assembler-mips64.cc)700
-rw-r--r--deps/v8/src/codegen/mips64/assembler-mips64.h (renamed from deps/v8/src/mips64/assembler-mips64.h)100
-rw-r--r--deps/v8/src/codegen/mips64/constants-mips64.cc (renamed from deps/v8/src/mips64/constants-mips64.cc)35
-rw-r--r--deps/v8/src/codegen/mips64/constants-mips64.h (renamed from deps/v8/src/mips64/constants-mips64.h)120
-rw-r--r--deps/v8/src/codegen/mips64/cpu-mips64.cc (renamed from deps/v8/src/mips64/cpu-mips64.cc)11
-rw-r--r--deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc (renamed from deps/v8/src/mips64/interface-descriptors-mips64.cc)13
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc (renamed from deps/v8/src/mips64/macro-assembler-mips64.cc)220
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h (renamed from deps/v8/src/mips64/macro-assembler-mips64.h)114
-rw-r--r--deps/v8/src/codegen/mips64/register-mips64.h (renamed from deps/v8/src/mips64/register-mips64.h)18
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc (renamed from deps/v8/src/optimized-compilation-info.cc)23
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h (renamed from deps/v8/src/optimized-compilation-info.h)24
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc-inl.h (renamed from deps/v8/src/ppc/assembler-ppc-inl.h)72
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc (renamed from deps/v8/src/ppc/assembler-ppc.cc)245
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h (renamed from deps/v8/src/ppc/assembler-ppc.h)88
-rw-r--r--deps/v8/src/codegen/ppc/code-stubs-ppc.cc (renamed from deps/v8/src/ppc/code-stubs-ppc.cc)20
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.cc (renamed from deps/v8/src/ppc/constants-ppc.cc)6
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h (renamed from deps/v8/src/ppc/constants-ppc.h)4513
-rw-r--r--deps/v8/src/codegen/ppc/cpu-ppc.cc (renamed from deps/v8/src/ppc/cpu-ppc.cc)10
-rw-r--r--deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc (renamed from deps/v8/src/ppc/interface-descriptors-ppc.cc)6
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc (renamed from deps/v8/src/ppc/macro-assembler-ppc.cc)133
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h (renamed from deps/v8/src/ppc/macro-assembler-ppc.h)40
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h (renamed from deps/v8/src/ppc/register-ppc.h)19
-rw-r--r--deps/v8/src/codegen/register-arch.h31
-rw-r--r--deps/v8/src/codegen/register-configuration.cc (renamed from deps/v8/src/register-configuration.cc)16
-rw-r--r--deps/v8/src/codegen/register-configuration.h (renamed from deps/v8/src/register-configuration.h)14
-rw-r--r--deps/v8/src/codegen/register.h (renamed from deps/v8/src/register.h)8
-rw-r--r--deps/v8/src/codegen/reglist.h (renamed from deps/v8/src/reglist.h)10
-rw-r--r--deps/v8/src/codegen/reloc-info.cc (renamed from deps/v8/src/reloc-info.cc)73
-rw-r--r--deps/v8/src/codegen/reloc-info.h (renamed from deps/v8/src/reloc-info.h)48
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390-inl.h (renamed from deps/v8/src/s390/assembler-s390-inl.h)48
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc (renamed from deps/v8/src/s390/assembler-s390.cc)37
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.h (renamed from deps/v8/src/s390/assembler-s390.h)950
-rw-r--r--deps/v8/src/codegen/s390/code-stubs-s390.cc (renamed from deps/v8/src/s390/code-stubs-s390.cc)18
-rw-r--r--deps/v8/src/codegen/s390/constants-s390.cc (renamed from deps/v8/src/s390/constants-s390.cc)2
-rw-r--r--deps/v8/src/codegen/s390/constants-s390.h (renamed from deps/v8/src/s390/constants-s390.h)81
-rw-r--r--deps/v8/src/codegen/s390/cpu-s390.cc (renamed from deps/v8/src/s390/cpu-s390.cc)2
-rw-r--r--deps/v8/src/codegen/s390/interface-descriptors-s390.cc (renamed from deps/v8/src/s390/interface-descriptors-s390.cc)4
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc (renamed from deps/v8/src/s390/macro-assembler-s390.cc)152
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h (renamed from deps/v8/src/s390/macro-assembler-s390.h)59
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h (renamed from deps/v8/src/s390/register-s390.h)14
-rw-r--r--deps/v8/src/codegen/safepoint-table.cc (renamed from deps/v8/src/safepoint-table.cc)108
-rw-r--r--deps/v8/src/codegen/safepoint-table.h (renamed from deps/v8/src/safepoint-table.h)91
-rw-r--r--deps/v8/src/codegen/signature.h (renamed from deps/v8/src/signature.h)10
-rw-r--r--deps/v8/src/codegen/source-position-table.cc (renamed from deps/v8/src/source-position-table.cc)12
-rw-r--r--deps/v8/src/codegen/source-position-table.h (renamed from deps/v8/src/source-position-table.h)14
-rw-r--r--deps/v8/src/codegen/source-position.cc (renamed from deps/v8/src/source-position.cc)34
-rw-r--r--deps/v8/src/codegen/source-position.h (renamed from deps/v8/src/source-position.h)24
-rw-r--r--deps/v8/src/codegen/string-constants.cc (renamed from deps/v8/src/string-constants.cc)6
-rw-r--r--deps/v8/src/codegen/string-constants.h (renamed from deps/v8/src/string-constants.h)8
-rw-r--r--deps/v8/src/codegen/turbo-assembler.cc (renamed from deps/v8/src/turbo-assembler.cc)6
-rw-r--r--deps/v8/src/codegen/turbo-assembler.h (renamed from deps/v8/src/turbo-assembler.h)22
-rw-r--r--deps/v8/src/codegen/unoptimized-compilation-info.cc (renamed from deps/v8/src/unoptimized-compilation-info.cc)8
-rw-r--r--deps/v8/src/codegen/unoptimized-compilation-info.h (renamed from deps/v8/src/unoptimized-compilation-info.h)18
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h (renamed from deps/v8/src/x64/assembler-x64-inl.h)108
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc (renamed from deps/v8/src/x64/assembler-x64.cc)293
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h (renamed from deps/v8/src/x64/assembler-x64.h)131
-rw-r--r--deps/v8/src/codegen/x64/constants-x64.h (renamed from deps/v8/src/x64/constants-x64.h)8
-rw-r--r--deps/v8/src/codegen/x64/cpu-x64.cc (renamed from deps/v8/src/x64/cpu-x64.cc)2
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64.cc (renamed from deps/v8/src/x64/interface-descriptors-x64.cc)6
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc (renamed from deps/v8/src/x64/macro-assembler-x64.cc)240
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h (renamed from deps/v8/src/x64/macro-assembler-x64.h)94
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h (renamed from deps/v8/src/x64/register-x64.h)16
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h (renamed from deps/v8/src/x64/sse-instr.h)6
-rw-r--r--deps/v8/src/common/assert-scope.cc (renamed from deps/v8/src/assert-scope.cc)11
-rw-r--r--deps/v8/src/common/assert-scope.h (renamed from deps/v8/src/assert-scope.h)120
-rw-r--r--deps/v8/src/common/checks.h (renamed from deps/v8/src/checks.h)10
-rw-r--r--deps/v8/src/common/globals.h (renamed from deps/v8/src/globals.h)204
-rw-r--r--deps/v8/src/common/ptr-compr-inl.h (renamed from deps/v8/src/ptr-compr-inl.h)75
-rw-r--r--deps/v8/src/common/ptr-compr.h (renamed from deps/v8/src/ptr-compr.h)8
-rw-r--r--deps/v8/src/common/v8memory.h (renamed from deps/v8/src/v8memory.h)28
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc14
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h12
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc18
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h6
-rw-r--r--deps/v8/src/compiler/access-builder.cc509
-rw-r--r--deps/v8/src/compiler/access-builder.h25
-rw-r--r--deps/v8/src/compiler/access-info.cc469
-rw-r--r--deps/v8/src/compiler/access-info.h147
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc112
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc10
-rw-r--r--deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.h4
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc128
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h2
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc2
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc162
-rw-r--r--deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.h4
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h2
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc56
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h10
-rw-r--r--deps/v8/src/compiler/backend/gap-resolver.cc2
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc78
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc2
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h4
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc2
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector-impl.h4
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc25
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h5
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc5
-rw-r--r--deps/v8/src/compiler/backend/instruction.h10
-rw-r--r--deps/v8/src/compiler/backend/mips/OWNERS1
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc16
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc7
-rw-r--r--deps/v8/src/compiler/backend/mips64/OWNERS1
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc14
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc2
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc13
-rw-r--r--deps/v8/src/compiler/backend/move-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/backend/move-optimizer.h2
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc19
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc3
-rw-r--r--deps/v8/src/compiler/backend/register-allocator-verifier.cc4
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc7
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h8
-rw-r--r--deps/v8/src/compiler/backend/s390/OWNERS4
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc16
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc2
-rw-r--r--deps/v8/src/compiler/backend/unwinding-info-writer.h2
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc86
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc57
-rw-r--r--deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.h4
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc4
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.h4
-rw-r--r--deps/v8/src/compiler/branch-elimination.h2
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc10
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h21
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc703
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h455
-rw-r--r--deps/v8/src/compiler/bytecode-liveness-map.h2
-rw-r--r--deps/v8/src/compiler/c-linkage.cc6
-rw-r--r--deps/v8/src/compiler/checkpoint-elimination.h2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc140
-rw-r--r--deps/v8/src/compiler/code-assembler.h85
-rw-r--r--deps/v8/src/compiler/common-node-cache.cc2
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc14
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h3
-rw-r--r--deps/v8/src/compiler/common-operator.cc5
-rw-r--r--deps/v8/src/compiler/common-operator.h13
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc134
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h22
-rw-r--r--deps/v8/src/compiler/compiler-source-position-table.h4
-rw-r--r--deps/v8/src/compiler/constant-folding-reducer.cc2
-rw-r--r--deps/v8/src/compiler/control-equivalence.h2
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h2
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc31
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h5
-rw-r--r--deps/v8/src/compiler/decompression-elimination.cc219
-rw-r--r--deps/v8/src/compiler/decompression-elimination.h78
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc501
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h250
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc7
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h3
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc5
-rw-r--r--deps/v8/src/compiler/escape-analysis.h2
-rw-r--r--deps/v8/src/compiler/frame-states.cc17
-rw-r--r--deps/v8/src/compiler/frame-states.h8
-rw-r--r--deps/v8/src/compiler/frame.h4
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc85
-rw-r--r--deps/v8/src/compiler/graph-assembler.h12
-rw-r--r--deps/v8/src/compiler/graph-reducer.h2
-rw-r--r--deps/v8/src/compiler/graph-trimmer.h2
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc46
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h4
-rw-r--r--deps/v8/src/compiler/graph.h2
-rw-r--r--deps/v8/src/compiler/int64-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc1429
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h8
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc2
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h2
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc42
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc4
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-graph.cc4
-rw-r--r--deps/v8/src/compiler/js-graph.h4
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc311
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h113
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc2
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc27
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h5
-rw-r--r--deps/v8/src/compiler/js-inlining.cc229
-rw-r--r--deps/v8/src/compiler/js-inlining.h9
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc53
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h13
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc648
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h19
-rw-r--r--deps/v8/src/compiler/js-operator.cc6
-rw-r--r--deps/v8/src/compiler/js-operator.h10
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc9
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h4
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc22
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h2
-rw-r--r--deps/v8/src/compiler/linkage.cc15
-rw-r--r--deps/v8/src/compiler/linkage.h16
-rw-r--r--deps/v8/src/compiler/load-elimination.cc236
-rw-r--r--deps/v8/src/compiler/load-elimination.h89
-rw-r--r--deps/v8/src/compiler/loop-analysis.h2
-rw-r--r--deps/v8/src/compiler/loop-peeling.h2
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc118
-rw-r--r--deps/v8/src/compiler/machine-graph.cc2
-rw-r--r--deps/v8/src/compiler/machine-graph.h2
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc22
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc9
-rw-r--r--deps/v8/src/compiler/machine-operator.h10
-rw-r--r--deps/v8/src/compiler/map-inference.cc149
-rw-r--r--deps/v8/src/compiler/map-inference.h108
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc242
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h9
-rw-r--r--deps/v8/src/compiler/node-cache.cc2
-rw-r--r--deps/v8/src/compiler/node-matchers.h15
-rw-r--r--deps/v8/src/compiler/node-origin-table.h4
-rw-r--r--deps/v8/src/compiler/node-properties.cc55
-rw-r--r--deps/v8/src/compiler/node-properties.h11
-rw-r--r--deps/v8/src/compiler/node.cc2
-rw-r--r--deps/v8/src/compiler/node.h2
-rw-r--r--deps/v8/src/compiler/opcodes.h25
-rw-r--r--deps/v8/src/compiler/operation-typer.cc15
-rw-r--r--deps/v8/src/compiler/operation-typer.h3
-rw-r--r--deps/v8/src/compiler/operator-properties.h2
-rw-r--r--deps/v8/src/compiler/operator.h4
-rw-r--r--deps/v8/src/compiler/osr.cc6
-rw-r--r--deps/v8/src/compiler/per-isolate-compiler-cache.h2
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc5
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc155
-rw-r--r--deps/v8/src/compiler/pipeline.h12
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc173
-rw-r--r--deps/v8/src/compiler/property-access-builder.h22
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc13
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h180
-rw-r--r--deps/v8/src/compiler/refs-map.h2
-rw-r--r--deps/v8/src/compiler/representation-change.cc99
-rw-r--r--deps/v8/src/compiler/representation-change.h2
-rw-r--r--deps/v8/src/compiler/schedule.cc2
-rw-r--r--deps/v8/src/compiler/schedule.h2
-rw-r--r--deps/v8/src/compiler/scheduler.cc3
-rw-r--r--deps/v8/src/compiler/scheduler.h2
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc360
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h47
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc96
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc108
-rw-r--r--deps/v8/src/compiler/simplified-operator.h84
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc2
-rw-r--r--deps/v8/src/compiler/state-values-utils.h2
-rw-r--r--deps/v8/src/compiler/type-cache.h9
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc33
-rw-r--r--deps/v8/src/compiler/typed-optimization.h2
-rw-r--r--deps/v8/src/compiler/typer.cc54
-rw-r--r--deps/v8/src/compiler/typer.h2
-rw-r--r--deps/v8/src/compiler/types.cc15
-rw-r--r--deps/v8/src/compiler/types.h10
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.h2
-rw-r--r--deps/v8/src/compiler/vector-slot-pair.cc (renamed from deps/v8/src/vector-slot-pair.cc)4
-rw-r--r--deps/v8/src/compiler/vector-slot-pair.h (renamed from deps/v8/src/vector-slot-pair.h)12
-rw-r--r--deps/v8/src/compiler/verifier.cc16
-rw-r--r--deps/v8/src/compiler/verifier.h2
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc670
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h86
-rw-r--r--deps/v8/src/compiler/write-barrier-kind.h52
-rw-r--r--deps/v8/src/compiler/zone-stats.h2
-rw-r--r--deps/v8/src/constants-arch.h28
-rw-r--r--deps/v8/src/d8/OWNERS5
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.cc (renamed from deps/v8/src/async-hooks-wrapper.cc)6
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.h (renamed from deps/v8/src/async-hooks-wrapper.h)10
-rw-r--r--deps/v8/src/d8/d8-console.cc (renamed from deps/v8/src/d8-console.cc)6
-rw-r--r--deps/v8/src/d8/d8-console.h (renamed from deps/v8/src/d8-console.h)6
-rw-r--r--deps/v8/src/d8/d8-js.cc (renamed from deps/v8/src/d8-js.cc)2
-rw-r--r--deps/v8/src/d8/d8-platforms.cc (renamed from deps/v8/src/d8-platforms.cc)2
-rw-r--r--deps/v8/src/d8/d8-platforms.h (renamed from deps/v8/src/d8-platforms.h)6
-rw-r--r--deps/v8/src/d8/d8-posix.cc (renamed from deps/v8/src/d8-posix.cc)113
-rw-r--r--deps/v8/src/d8/d8-windows.cc (renamed from deps/v8/src/d8-windows.cc)4
-rw-r--r--deps/v8/src/d8/d8.cc (renamed from deps/v8/src/d8.cc)185
-rw-r--r--deps/v8/src/d8/d8.h (renamed from deps/v8/src/d8.h)127
-rw-r--r--deps/v8/src/date/OWNERS3
-rw-r--r--deps/v8/src/date/date.cc (renamed from deps/v8/src/date.cc)60
-rw-r--r--deps/v8/src/date/date.h (renamed from deps/v8/src/date.h)15
-rw-r--r--deps/v8/src/date/dateparser-inl.h (renamed from deps/v8/src/dateparser-inl.h)48
-rw-r--r--deps/v8/src/date/dateparser.cc (renamed from deps/v8/src/dateparser.cc)100
-rw-r--r--deps/v8/src/date/dateparser.h (renamed from deps/v8/src/dateparser.h)61
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc8
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc8
-rw-r--r--deps/v8/src/debug/debug-coverage.cc204
-rw-r--r--deps/v8/src/debug/debug-coverage.h9
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc42
-rw-r--r--deps/v8/src/debug/debug-evaluate.h2
-rw-r--r--deps/v8/src/debug/debug-frames.cc16
-rw-r--r--deps/v8/src/debug/debug-frames.h10
-rw-r--r--deps/v8/src/debug/debug-interface.h15
-rw-r--r--deps/v8/src/debug/debug-property-iterator.cc17
-rw-r--r--deps/v8/src/debug/debug-property-iterator.h6
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.cc6
-rw-r--r--deps/v8/src/debug/debug-scope-iterator.h2
-rw-r--r--deps/v8/src/debug/debug-scopes.cc80
-rw-r--r--deps/v8/src/debug/debug-scopes.h2
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.cc13
-rw-r--r--deps/v8/src/debug/debug-stack-trace-iterator.h2
-rw-r--r--deps/v8/src/debug/debug-type-profile.cc33
-rw-r--r--deps/v8/src/debug/debug-type-profile.h4
-rw-r--r--deps/v8/src/debug/debug.cc180
-rw-r--r--deps/v8/src/debug/debug.h14
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc4
-rw-r--r--deps/v8/src/debug/interface-types.h2
-rw-r--r--deps/v8/src/debug/liveedit.cc105
-rw-r--r--deps/v8/src/debug/liveedit.h4
-rw-r--r--deps/v8/src/debug/mips/OWNERS1
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc4
-rw-r--r--deps/v8/src/debug/mips64/OWNERS1
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc4
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc4
-rw-r--r--deps/v8/src/debug/s390/OWNERS4
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc6
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc8
-rw-r--r--deps/v8/src/deoptimizer/OWNERS5
-rw-r--r--deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc (renamed from deps/v8/src/arm/deoptimizer-arm.cc)24
-rw-r--r--deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc (renamed from deps/v8/src/arm64/deoptimizer-arm64.cc)32
-rw-r--r--deps/v8/src/deoptimizer/deoptimize-reason.cc (renamed from deps/v8/src/deoptimize-reason.cc)2
-rw-r--r--deps/v8/src/deoptimizer/deoptimize-reason.h (renamed from deps/v8/src/deoptimize-reason.h)8
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc (renamed from deps/v8/src/deoptimizer.cc)454
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.h (renamed from deps/v8/src/deoptimizer.h)70
-rw-r--r--deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc (renamed from deps/v8/src/ia32/deoptimizer-ia32.cc)21
-rw-r--r--deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc (renamed from deps/v8/src/mips/deoptimizer-mips.cc)17
-rw-r--r--deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc (renamed from deps/v8/src/mips64/deoptimizer-mips64.cc)15
-rw-r--r--deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc (renamed from deps/v8/src/ppc/deoptimizer-ppc.cc)13
-rw-r--r--deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc (renamed from deps/v8/src/s390/deoptimizer-s390.cc)8
-rw-r--r--deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc (renamed from deps/v8/src/x64/deoptimizer-x64.cc)25
-rw-r--r--deps/v8/src/diagnostics/OWNERS1
-rw-r--r--deps/v8/src/diagnostics/arm/disasm-arm.cc (renamed from deps/v8/src/arm/disasm-arm.cc)225
-rw-r--r--deps/v8/src/diagnostics/arm/eh-frame-arm.cc (renamed from deps/v8/src/arm/eh-frame-arm.cc)2
-rw-r--r--deps/v8/src/diagnostics/arm64/disasm-arm64.cc (renamed from deps/v8/src/arm64/disasm-arm64.cc)949
-rw-r--r--deps/v8/src/diagnostics/arm64/disasm-arm64.h (renamed from deps/v8/src/arm64/disasm-arm64.h)28
-rw-r--r--deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc (renamed from deps/v8/src/arm64/eh-frame-arm64.cc)4
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.cc (renamed from deps/v8/src/basic-block-profiler.cc)12
-rw-r--r--deps/v8/src/diagnostics/basic-block-profiler.h (renamed from deps/v8/src/basic-block-profiler.h)10
-rw-r--r--deps/v8/src/diagnostics/code-tracer.h (renamed from deps/v8/src/code-tracer.h)20
-rw-r--r--deps/v8/src/diagnostics/compilation-statistics.cc (renamed from deps/v8/src/compilation-statistics.cc)35
-rw-r--r--deps/v8/src/diagnostics/compilation-statistics.h (renamed from deps/v8/src/compilation-statistics.h)14
-rw-r--r--deps/v8/src/diagnostics/disasm.h (renamed from deps/v8/src/disasm.h)10
-rw-r--r--deps/v8/src/diagnostics/disassembler.cc (renamed from deps/v8/src/disassembler.cc)82
-rw-r--r--deps/v8/src/diagnostics/disassembler.h (renamed from deps/v8/src/disassembler.h)10
-rw-r--r--deps/v8/src/diagnostics/eh-frame.cc (renamed from deps/v8/src/eh-frame.cc)14
-rw-r--r--deps/v8/src/diagnostics/eh-frame.h (renamed from deps/v8/src/eh-frame.h)12
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.cc (renamed from deps/v8/src/gdb-jit.cc)443
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.h (renamed from deps/v8/src/gdb-jit.h)6
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc (renamed from deps/v8/src/ia32/disasm-ia32.cc)1125
-rw-r--r--deps/v8/src/diagnostics/mips/disasm-mips.cc (renamed from deps/v8/src/mips/disasm-mips.cc)138
-rw-r--r--deps/v8/src/diagnostics/mips64/disasm-mips64.cc (renamed from deps/v8/src/mips64/disasm-mips64.cc)121
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc (renamed from deps/v8/src/objects-debug.cc)1413
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc (renamed from deps/v8/src/objects-printer.cc)660
-rw-r--r--deps/v8/src/diagnostics/perf-jit.cc (renamed from deps/v8/src/perf-jit.cc)88
-rw-r--r--deps/v8/src/diagnostics/perf-jit.h (renamed from deps/v8/src/perf-jit.h)8
-rw-r--r--deps/v8/src/diagnostics/ppc/disasm-ppc.cc (renamed from deps/v8/src/ppc/disasm-ppc.cc)52
-rw-r--r--deps/v8/src/diagnostics/s390/disasm-s390.cc (renamed from deps/v8/src/s390/disasm-s390.cc)308
-rw-r--r--deps/v8/src/diagnostics/unwinder.cc (renamed from deps/v8/src/unwinder.cc)7
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc (renamed from deps/v8/src/unwinding-info-win64.cc)102
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.h (renamed from deps/v8/src/unwinding-info-win64.h)8
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc (renamed from deps/v8/src/x64/disasm-x64.cc)667
-rw-r--r--deps/v8/src/diagnostics/x64/eh-frame-x64.cc (renamed from deps/v8/src/x64/eh-frame-x64.cc)2
-rw-r--r--deps/v8/src/execution/OWNERS10
-rw-r--r--deps/v8/src/execution/arguments-inl.h (renamed from deps/v8/src/arguments-inl.h)14
-rw-r--r--deps/v8/src/execution/arguments.cc (renamed from deps/v8/src/arguments.cc)2
-rw-r--r--deps/v8/src/execution/arguments.h (renamed from deps/v8/src/arguments.h)19
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.cc (renamed from deps/v8/src/arm/frame-constants-arm.cc)8
-rw-r--r--deps/v8/src/execution/arm/frame-constants-arm.h (renamed from deps/v8/src/arm/frame-constants-arm.h)8
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc (renamed from deps/v8/src/arm/simulator-arm.cc)484
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.h (renamed from deps/v8/src/arm/simulator-arm.h)161
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.cc (renamed from deps/v8/src/arm64/frame-constants-arm64.cc)10
-rw-r--r--deps/v8/src/execution/arm64/frame-constants-arm64.h (renamed from deps/v8/src/arm64/frame-constants-arm64.h)10
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc (renamed from deps/v8/src/arm64/simulator-arm64.cc)968
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.h (renamed from deps/v8/src/arm64/simulator-arm64.h)127
-rw-r--r--deps/v8/src/execution/arm64/simulator-logic-arm64.cc (renamed from deps/v8/src/arm64/simulator-logic-arm64.cc)2
-rw-r--r--deps/v8/src/execution/execution.cc (renamed from deps/v8/src/execution.cc)55
-rw-r--r--deps/v8/src/execution/execution.h (renamed from deps/v8/src/execution.h)36
-rw-r--r--deps/v8/src/execution/frame-constants.h (renamed from deps/v8/src/frame-constants.h)37
-rw-r--r--deps/v8/src/execution/frames-inl.h (renamed from deps/v8/src/frames-inl.h)25
-rw-r--r--deps/v8/src/execution/frames.cc (renamed from deps/v8/src/frames.cc)465
-rw-r--r--deps/v8/src/execution/frames.h (renamed from deps/v8/src/frames.h)77
-rw-r--r--deps/v8/src/execution/futex-emulation.cc (renamed from deps/v8/src/futex-emulation.cc)18
-rw-r--r--deps/v8/src/execution/futex-emulation.h (renamed from deps/v8/src/futex-emulation.h)11
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.cc (renamed from deps/v8/src/ia32/frame-constants-ia32.cc)10
-rw-r--r--deps/v8/src/execution/ia32/frame-constants-ia32.h (renamed from deps/v8/src/ia32/frame-constants-ia32.h)8
-rw-r--r--deps/v8/src/execution/isolate-data.h (renamed from deps/v8/src/isolate-data.h)16
-rw-r--r--deps/v8/src/execution/isolate-inl.h (renamed from deps/v8/src/isolate-inl.h)82
-rw-r--r--deps/v8/src/execution/isolate.cc (renamed from deps/v8/src/isolate.cc)503
-rw-r--r--deps/v8/src/execution/isolate.h (renamed from deps/v8/src/isolate.h)188
-rw-r--r--deps/v8/src/execution/message-template.h (renamed from deps/v8/src/message-template.h)23
-rw-r--r--deps/v8/src/execution/messages.cc (renamed from deps/v8/src/messages.cc)161
-rw-r--r--deps/v8/src/execution/messages.h (renamed from deps/v8/src/messages.h)23
-rw-r--r--deps/v8/src/execution/microtask-queue.cc (renamed from deps/v8/src/microtask-queue.cc)12
-rw-r--r--deps/v8/src/execution/microtask-queue.h (renamed from deps/v8/src/microtask-queue.h)6
-rw-r--r--deps/v8/src/execution/mips/frame-constants-mips.cc (renamed from deps/v8/src/mips/frame-constants-mips.cc)10
-rw-r--r--deps/v8/src/execution/mips/frame-constants-mips.h (renamed from deps/v8/src/mips/frame-constants-mips.h)8
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.cc (renamed from deps/v8/src/mips/simulator-mips.cc)567
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.h (renamed from deps/v8/src/mips/simulator-mips.h)103
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.cc (renamed from deps/v8/src/mips64/frame-constants-mips64.cc)10
-rw-r--r--deps/v8/src/execution/mips64/frame-constants-mips64.h (renamed from deps/v8/src/mips64/frame-constants-mips64.h)8
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc (renamed from deps/v8/src/mips64/simulator-mips64.cc)428
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.h (renamed from deps/v8/src/mips64/simulator-mips64.h)102
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.cc (renamed from deps/v8/src/ppc/frame-constants-ppc.cc)9
-rw-r--r--deps/v8/src/execution/ppc/frame-constants-ppc.h (renamed from deps/v8/src/ppc/frame-constants-ppc.h)8
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc (renamed from deps/v8/src/ppc/simulator-ppc.cc)130
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.h (renamed from deps/v8/src/ppc/simulator-ppc.h)19
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc (renamed from deps/v8/src/runtime-profiler.cc)85
-rw-r--r--deps/v8/src/execution/runtime-profiler.h (renamed from deps/v8/src/runtime-profiler.h)8
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.cc (renamed from deps/v8/src/s390/frame-constants-s390.cc)9
-rw-r--r--deps/v8/src/execution/s390/frame-constants-s390.h (renamed from deps/v8/src/s390/frame-constants-s390.h)8
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc (renamed from deps/v8/src/s390/simulator-s390.cc)1352
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.h (renamed from deps/v8/src/s390/simulator-s390.h)83
-rw-r--r--deps/v8/src/execution/simulator-base.cc (renamed from deps/v8/src/simulator-base.cc)6
-rw-r--r--deps/v8/src/execution/simulator-base.h (renamed from deps/v8/src/simulator-base.h)10
-rw-r--r--deps/v8/src/execution/simulator.h (renamed from deps/v8/src/simulator.h)24
-rw-r--r--deps/v8/src/execution/thread-id.cc (renamed from deps/v8/src/thread-id.cc)2
-rw-r--r--deps/v8/src/execution/thread-id.h (renamed from deps/v8/src/thread-id.h)6
-rw-r--r--deps/v8/src/execution/thread-local-top.cc (renamed from deps/v8/src/thread-local-top.cc)6
-rw-r--r--deps/v8/src/execution/thread-local-top.h (renamed from deps/v8/src/thread-local-top.h)12
-rw-r--r--deps/v8/src/execution/v8threads.cc (renamed from deps/v8/src/v8threads.cc)78
-rw-r--r--deps/v8/src/execution/v8threads.h (renamed from deps/v8/src/v8threads.h)20
-rw-r--r--deps/v8/src/execution/vm-state-inl.h (renamed from deps/v8/src/vm-state-inl.h)22
-rw-r--r--deps/v8/src/execution/vm-state.h (renamed from deps/v8/src/vm-state.h)11
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.cc (renamed from deps/v8/src/x64/frame-constants-x64.cc)7
-rw-r--r--deps/v8/src/execution/x64/frame-constants-x64.h (renamed from deps/v8/src/x64/frame-constants-x64.h)8
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc17
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.cc2
-rw-r--r--deps/v8/src/extensions/gc-extension.h2
-rw-r--r--deps/v8/src/extensions/ignition-statistics-extension.cc2
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc18
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.cc2
-rw-r--r--deps/v8/src/flags/flag-definitions.h (renamed from deps/v8/src/flag-definitions.h)244
-rw-r--r--deps/v8/src/flags/flags.cc (renamed from deps/v8/src/flags.cc)103
-rw-r--r--deps/v8/src/flags/flags.h (renamed from deps/v8/src/flags.h)16
-rw-r--r--deps/v8/src/handles/OWNERS4
-rw-r--r--deps/v8/src/handles/global-handles.cc (renamed from deps/v8/src/global-handles.cc)116
-rw-r--r--deps/v8/src/handles/global-handles.h (renamed from deps/v8/src/global-handles.h)15
-rw-r--r--deps/v8/src/handles/handles-inl.h (renamed from deps/v8/src/handles-inl.h)68
-rw-r--r--deps/v8/src/handles/handles.cc (renamed from deps/v8/src/handles.cc)32
-rw-r--r--deps/v8/src/handles/handles.h (renamed from deps/v8/src/handles.h)65
-rw-r--r--deps/v8/src/handles/maybe-handles-inl.h (renamed from deps/v8/src/maybe-handles-inl.h)15
-rw-r--r--deps/v8/src/handles/maybe-handles.h (renamed from deps/v8/src/maybe-handles.h)11
-rw-r--r--deps/v8/src/heap/array-buffer-collector.cc4
-rw-r--r--deps/v8/src/heap/array-buffer-tracker-inl.h16
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc4
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h4
-rw-r--r--deps/v8/src/heap/code-stats.cc32
-rw-r--r--deps/v8/src/heap/combined-heap.cc19
-rw-r--r--deps/v8/src/heap/combined-heap.h41
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc130
-rw-r--r--deps/v8/src/heap/concurrent-marking.h8
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc31
-rw-r--r--deps/v8/src/heap/embedder-tracing.h39
-rw-r--r--deps/v8/src/heap/factory-inl.h8
-rw-r--r--deps/v8/src/heap/factory.cc1263
-rw-r--r--deps/v8/src/heap/factory.h216
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc6
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h2
-rw-r--r--deps/v8/src/heap/gc-tracer.cc116
-rw-r--r--deps/v8/src/heap/gc-tracer.h42
-rw-r--r--deps/v8/src/heap/heap-controller.cc167
-rw-r--r--deps/v8/src/heap/heap-controller.h80
-rw-r--r--deps/v8/src/heap/heap-inl.h75
-rw-r--r--deps/v8/src/heap/heap-write-barrier-inl.h41
-rw-r--r--deps/v8/src/heap/heap-write-barrier.h15
-rw-r--r--deps/v8/src/heap/heap.cc1192
-rw-r--r--deps/v8/src/heap/heap.h142
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h47
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc6
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h2
-rw-r--r--deps/v8/src/heap/incremental-marking.cc126
-rw-r--r--deps/v8/src/heap/incremental-marking.h18
-rw-r--r--deps/v8/src/heap/invalidated-slots-inl.h18
-rw-r--r--deps/v8/src/heap/invalidated-slots.cc2
-rw-r--r--deps/v8/src/heap/invalidated-slots.h4
-rw-r--r--deps/v8/src/heap/item-parallel-job.cc4
-rw-r--r--deps/v8/src/heap/item-parallel-job.h4
-rw-r--r--deps/v8/src/heap/local-allocator-inl.h6
-rw-r--r--deps/v8/src/heap/local-allocator.h2
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h98
-rw-r--r--deps/v8/src/heap/mark-compact.cc404
-rw-r--r--deps/v8/src/heap/mark-compact.h16
-rw-r--r--deps/v8/src/heap/marking.h2
-rw-r--r--deps/v8/src/heap/memory-reducer.cc9
-rw-r--r--deps/v8/src/heap/memory-reducer.h4
-rw-r--r--deps/v8/src/heap/object-stats.cc312
-rw-r--r--deps/v8/src/heap/object-stats.h2
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h32
-rw-r--r--deps/v8/src/heap/objects-visiting.cc30
-rw-r--r--deps/v8/src/heap/objects-visiting.h18
-rw-r--r--deps/v8/src/heap/read-only-heap.cc145
-rw-r--r--deps/v8/src/heap/read-only-heap.h75
-rw-r--r--deps/v8/src/heap/remembered-set.h33
-rw-r--r--deps/v8/src/heap/scavenge-job.cc6
-rw-r--r--deps/v8/src/heap/scavenge-job.h4
-rw-r--r--deps/v8/src/heap/scavenger-inl.h62
-rw-r--r--deps/v8/src/heap/scavenger.cc31
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc244
-rw-r--r--deps/v8/src/heap/slot-set.h7
-rw-r--r--deps/v8/src/heap/spaces-inl.h39
-rw-r--r--deps/v8/src/heap/spaces.cc416
-rw-r--r--deps/v8/src/heap/spaces.h197
-rw-r--r--deps/v8/src/heap/store-buffer.cc10
-rw-r--r--deps/v8/src/heap/store-buffer.h6
-rw-r--r--deps/v8/src/heap/stress-scavenge-observer.cc2
-rw-r--r--deps/v8/src/heap/sweeper.cc64
-rw-r--r--deps/v8/src/heap/sweeper.h6
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc556
-rw-r--r--deps/v8/src/ic/accessor-assembler.h8
-rw-r--r--deps/v8/src/ic/binary-op-assembler.cc2
-rw-r--r--deps/v8/src/ic/binary-op-assembler.h2
-rw-r--r--deps/v8/src/ic/call-optimization.cc30
-rw-r--r--deps/v8/src/ic/call-optimization.h4
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h12
-rw-r--r--deps/v8/src/ic/handler-configuration.cc6
-rw-r--r--deps/v8/src/ic/handler-configuration.h12
-rw-r--r--deps/v8/src/ic/ic-inl.h34
-rw-r--r--deps/v8/src/ic/ic-stats.cc18
-rw-r--r--deps/v8/src/ic/ic.cc373
-rw-r--r--deps/v8/src/ic/ic.h21
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc22
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h2
-rw-r--r--deps/v8/src/ic/stub-cache.cc10
-rw-r--r--deps/v8/src/init/OWNERS5
-rw-r--r--deps/v8/src/init/bootstrapper.cc (renamed from deps/v8/src/bootstrapper.cc)731
-rw-r--r--deps/v8/src/init/bootstrapper.h (renamed from deps/v8/src/bootstrapper.h)15
-rw-r--r--deps/v8/src/init/heap-symbols.h (renamed from deps/v8/src/heap-symbols.h)446
-rw-r--r--deps/v8/src/init/icu_util.cc (renamed from deps/v8/src/icu_util.cc)8
-rw-r--r--deps/v8/src/init/icu_util.h (renamed from deps/v8/src/icu_util.h)7
-rw-r--r--deps/v8/src/init/isolate-allocator.cc (renamed from deps/v8/src/isolate-allocator.cc)8
-rw-r--r--deps/v8/src/init/isolate-allocator.h (renamed from deps/v8/src/isolate-allocator.h)10
-rw-r--r--deps/v8/src/init/setup-isolate-deserialize.cc (renamed from deps/v8/src/setup-isolate-deserialize.cc)7
-rw-r--r--deps/v8/src/init/setup-isolate-full.cc (renamed from deps/v8/src/setup-isolate-full.cc)4
-rw-r--r--deps/v8/src/init/setup-isolate.h (renamed from deps/v8/src/setup-isolate.h)6
-rw-r--r--deps/v8/src/init/startup-data-util.cc (renamed from deps/v8/src/startup-data-util.cc)14
-rw-r--r--deps/v8/src/init/startup-data-util.h (renamed from deps/v8/src/startup-data-util.h)7
-rw-r--r--deps/v8/src/init/v8.cc (renamed from deps/v8/src/v8.cc)33
-rw-r--r--deps/v8/src/init/v8.h (renamed from deps/v8/src/v8.h)8
-rw-r--r--deps/v8/src/inspector/BUILD.gn15
-rw-r--r--deps/v8/src/inspector/DEPS9
-rw-r--r--deps/v8/src/inspector/OWNERS3
-rw-r--r--deps/v8/src/inspector/js_protocol.pdl22
-rw-r--r--deps/v8/src/inspector/string-16.cc390
-rw-r--r--deps/v8/src/inspector/string-16.h1
-rw-r--r--deps/v8/src/inspector/string-util.cc3
-rw-r--r--deps/v8/src/inspector/v8-console.cc1
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc89
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h7
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc9
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h1
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc3
-rw-r--r--deps/v8/src/inspector/v8-inspector-protocol-encoding.cc51
-rw-r--r--deps/v8/src/inspector/v8-inspector-protocol-encoding.h26
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc63
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h1
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc4
-rw-r--r--deps/v8/src/inspector/v8-string-conversions.cc403
-rw-r--r--deps/v8/src/inspector/v8-string-conversions.h17
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc14
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc30
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc7
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.cc4
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc141
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h6
-rw-r--r--deps/v8/src/interpreter/bytecode-jump-table.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-label.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-node.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-node.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h2
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-source-info.cc2
-rw-r--r--deps/v8/src/interpreter/bytecode-source-info.h2
-rw-r--r--deps/v8/src/interpreter/bytecodes.h2
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc5
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h4
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc2
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc4
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc11
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h6
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc22
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc23
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h4
-rw-r--r--deps/v8/src/interpreter/interpreter.cc29
-rw-r--r--deps/v8/src/json-parser.cc959
-rw-r--r--deps/v8/src/json-parser.h167
-rw-r--r--deps/v8/src/json/OWNERS3
-rw-r--r--deps/v8/src/json/json-parser.cc1194
-rw-r--r--deps/v8/src/json/json-parser.h358
-rw-r--r--deps/v8/src/json/json-stringifier.cc (renamed from deps/v8/src/json-stringifier.cc)80
-rw-r--r--deps/v8/src/json/json-stringifier.h (renamed from deps/v8/src/json-stringifier.h)8
-rw-r--r--deps/v8/src/libplatform/default-worker-threads-task-runner.cc14
-rw-r--r--deps/v8/src/libplatform/default-worker-threads-task-runner.h6
-rw-r--r--deps/v8/src/libplatform/tracing/DEPS4
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.cc166
-rw-r--r--deps/v8/src/libplatform/tracing/json-trace-event-listener.h45
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-consumer.cc44
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-consumer.h80
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-producer.cc45
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-producer.h70
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc28
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-shared-memory.h45
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tasks.cc52
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tasks.h55
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc130
-rw-r--r--deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h86
-rw-r--r--deps/v8/src/libplatform/tracing/trace-event-listener.h34
-rw-r--r--deps/v8/src/libplatform/tracing/trace-writer.cc1
-rw-r--r--deps/v8/src/libplatform/tracing/tracing-controller.cc198
-rw-r--r--deps/v8/src/libsampler/sampler.cc50
-rw-r--r--deps/v8/src/libsampler/sampler.h4
-rw-r--r--deps/v8/src/logging/OWNERS1
-rw-r--r--deps/v8/src/logging/code-events.h (renamed from deps/v8/src/code-events.h)10
-rw-r--r--deps/v8/src/logging/counters-definitions.h (renamed from deps/v8/src/counters-definitions.h)111
-rw-r--r--deps/v8/src/logging/counters-inl.h (renamed from deps/v8/src/counters-inl.h)8
-rw-r--r--deps/v8/src/logging/counters.cc (renamed from deps/v8/src/counters.cc)23
-rw-r--r--deps/v8/src/logging/counters.h (renamed from deps/v8/src/counters.h)53
-rw-r--r--deps/v8/src/logging/log-inl.h (renamed from deps/v8/src/log-inl.h)14
-rw-r--r--deps/v8/src/logging/log-utils.cc (renamed from deps/v8/src/log-utils.cc)33
-rw-r--r--deps/v8/src/logging/log-utils.h (renamed from deps/v8/src/log-utils.h)12
-rw-r--r--deps/v8/src/logging/log.cc (renamed from deps/v8/src/log.cc)389
-rw-r--r--deps/v8/src/logging/log.h (renamed from deps/v8/src/log.h)28
-rw-r--r--deps/v8/src/macro-assembler-inl.h15
-rw-r--r--deps/v8/src/mips/OWNERS1
-rw-r--r--deps/v8/src/mips64/OWNERS1
-rw-r--r--deps/v8/src/numbers/OWNERS5
-rw-r--r--deps/v8/src/numbers/bignum-dtoa.cc (renamed from deps/v8/src/bignum-dtoa.cc)110
-rw-r--r--deps/v8/src/numbers/bignum-dtoa.h (renamed from deps/v8/src/bignum-dtoa.h)8
-rw-r--r--deps/v8/src/numbers/bignum.cc (renamed from deps/v8/src/bignum.cc)51
-rw-r--r--deps/v8/src/numbers/bignum.h (renamed from deps/v8/src/bignum.h)12
-rw-r--r--deps/v8/src/numbers/cached-powers.cc (renamed from deps/v8/src/cached-powers.cc)13
-rw-r--r--deps/v8/src/numbers/cached-powers.h (renamed from deps/v8/src/cached-powers.h)8
-rw-r--r--deps/v8/src/numbers/conversions-inl.h (renamed from deps/v8/src/conversions-inl.h)52
-rw-r--r--deps/v8/src/numbers/conversions.cc (renamed from deps/v8/src/conversions.cc)121
-rw-r--r--deps/v8/src/numbers/conversions.h (renamed from deps/v8/src/conversions.h)18
-rw-r--r--deps/v8/src/numbers/diy-fp.cc (renamed from deps/v8/src/diy-fp.cc)2
-rw-r--r--deps/v8/src/numbers/diy-fp.h (renamed from deps/v8/src/diy-fp.h)7
-rw-r--r--deps/v8/src/numbers/double.h (renamed from deps/v8/src/double.h)21
-rw-r--r--deps/v8/src/numbers/dtoa.cc (renamed from deps/v8/src/dtoa.cc)26
-rw-r--r--deps/v8/src/numbers/dtoa.h (renamed from deps/v8/src/dtoa.h)8
-rw-r--r--deps/v8/src/numbers/fast-dtoa.cc (renamed from deps/v8/src/fast-dtoa.cc)133
-rw-r--r--deps/v8/src/numbers/fast-dtoa.h (renamed from deps/v8/src/fast-dtoa.h)8
-rw-r--r--deps/v8/src/numbers/fixed-dtoa.cc (renamed from deps/v8/src/fixed-dtoa.cc)37
-rw-r--r--deps/v8/src/numbers/fixed-dtoa.h (renamed from deps/v8/src/fixed-dtoa.h)8
-rw-r--r--deps/v8/src/numbers/hash-seed-inl.h (renamed from deps/v8/src/hash-seed-inl.h)10
-rw-r--r--deps/v8/src/numbers/math-random.cc (renamed from deps/v8/src/math-random.cc)24
-rw-r--r--deps/v8/src/numbers/math-random.h (renamed from deps/v8/src/math-random.h)10
-rw-r--r--deps/v8/src/numbers/strtod.cc (renamed from deps/v8/src/strtod.cc)55
-rw-r--r--deps/v8/src/numbers/strtod.h (renamed from deps/v8/src/strtod.h)8
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h22
-rw-r--r--deps/v8/src/objects/allocation-site-scopes-inl.h (renamed from deps/v8/src/allocation-site-scopes-inl.h)10
-rw-r--r--deps/v8/src/objects/allocation-site-scopes.h (renamed from deps/v8/src/allocation-site-scopes.h)20
-rw-r--r--deps/v8/src/objects/allocation-site.h45
-rw-r--r--deps/v8/src/objects/api-callbacks-inl.h10
-rw-r--r--deps/v8/src/objects/arguments-inl.h20
-rw-r--r--deps/v8/src/objects/arguments.h2
-rw-r--r--deps/v8/src/objects/bigint.cc64
-rw-r--r--deps/v8/src/objects/bigint.h8
-rw-r--r--deps/v8/src/objects/cell-inl.h2
-rw-r--r--deps/v8/src/objects/cell.h2
-rw-r--r--deps/v8/src/objects/code-inl.h207
-rw-r--r--deps/v8/src/objects/code.cc143
-rw-r--r--deps/v8/src/objects/code.h27
-rw-r--r--deps/v8/src/objects/compilation-cache-inl.h32
-rw-r--r--deps/v8/src/objects/compilation-cache.h2
-rw-r--r--deps/v8/src/objects/compressed-slots-inl.h28
-rw-r--r--deps/v8/src/objects/contexts-inl.h (renamed from deps/v8/src/contexts-inl.h)42
-rw-r--r--deps/v8/src/objects/contexts.cc (renamed from deps/v8/src/contexts.cc)95
-rw-r--r--deps/v8/src/objects/contexts.h (renamed from deps/v8/src/contexts.h)17
-rw-r--r--deps/v8/src/objects/data-handler-inl.h10
-rw-r--r--deps/v8/src/objects/debug-objects-inl.h12
-rw-r--r--deps/v8/src/objects/debug-objects.cc75
-rw-r--r--deps/v8/src/objects/debug-objects.h37
-rw-r--r--deps/v8/src/objects/descriptor-array-inl.h34
-rw-r--r--deps/v8/src/objects/descriptor-array.h27
-rw-r--r--deps/v8/src/objects/dictionary-inl.h50
-rw-r--r--deps/v8/src/objects/dictionary.h16
-rw-r--r--deps/v8/src/objects/elements-inl.h (renamed from deps/v8/src/elements-inl.h)14
-rw-r--r--deps/v8/src/objects/elements-kind.cc (renamed from deps/v8/src/elements-kind.cc)68
-rw-r--r--deps/v8/src/objects/elements-kind.h (renamed from deps/v8/src/elements-kind.h)79
-rw-r--r--deps/v8/src/objects/elements.cc (renamed from deps/v8/src/elements.cc)1819
-rw-r--r--deps/v8/src/objects/elements.h (renamed from deps/v8/src/elements.h)24
-rw-r--r--deps/v8/src/objects/embedder-data-array.cc2
-rw-r--r--deps/v8/src/objects/embedder-data-array.h6
-rw-r--r--deps/v8/src/objects/embedder-data-slot-inl.h8
-rw-r--r--deps/v8/src/objects/embedder-data-slot.h4
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h13
-rw-r--r--deps/v8/src/objects/feedback-cell.h19
-rw-r--r--deps/v8/src/objects/feedback-vector-inl.h (renamed from deps/v8/src/feedback-vector-inl.h)65
-rw-r--r--deps/v8/src/objects/feedback-vector.cc (renamed from deps/v8/src/feedback-vector.cc)124
-rw-r--r--deps/v8/src/objects/feedback-vector.h (renamed from deps/v8/src/feedback-vector.h)60
-rw-r--r--deps/v8/src/objects/field-index-inl.h (renamed from deps/v8/src/field-index-inl.h)20
-rw-r--r--deps/v8/src/objects/field-index.h (renamed from deps/v8/src/field-index.h)27
-rw-r--r--deps/v8/src/objects/field-type.cc (renamed from deps/v8/src/field-type.cc)26
-rw-r--r--deps/v8/src/objects/field-type.h (renamed from deps/v8/src/field-type.h)10
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h523
-rw-r--r--deps/v8/src/objects/fixed-array.h153
-rw-r--r--deps/v8/src/objects/foreign-inl.h8
-rw-r--r--deps/v8/src/objects/foreign.h2
-rw-r--r--deps/v8/src/objects/frame-array-inl.h6
-rw-r--r--deps/v8/src/objects/frame-array.h2
-rw-r--r--deps/v8/src/objects/free-space-inl.h10
-rw-r--r--deps/v8/src/objects/free-space.h2
-rw-r--r--deps/v8/src/objects/function-kind.h (renamed from deps/v8/src/function-kind.h)8
-rw-r--r--deps/v8/src/objects/hash-table-inl.h8
-rw-r--r--deps/v8/src/objects/hash-table.h6
-rw-r--r--deps/v8/src/objects/heap-number-inl.h18
-rw-r--r--deps/v8/src/objects/heap-object-inl.h11
-rw-r--r--deps/v8/src/objects/heap-object.h14
-rw-r--r--deps/v8/src/objects/instance-type-inl.h15
-rw-r--r--deps/v8/src/objects/instance-type.h40
-rw-r--r--deps/v8/src/objects/intl-objects.cc223
-rw-r--r--deps/v8/src/objects/intl-objects.h34
-rw-r--r--deps/v8/src/objects/intl-objects.tq64
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h71
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc52
-rw-r--r--deps/v8/src/objects/js-array-buffer.h56
-rw-r--r--deps/v8/src/objects/js-array-inl.h10
-rw-r--r--deps/v8/src/objects/js-array.h2
-rw-r--r--deps/v8/src/objects/js-break-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-break-iterator.cc10
-rw-r--r--deps/v8/src/objects/js-break-iterator.h2
-rw-r--r--deps/v8/src/objects/js-collator-inl.h2
-rw-r--r--deps/v8/src/objects/js-collator.cc6
-rw-r--r--deps/v8/src/objects/js-collator.h4
-rw-r--r--deps/v8/src/objects/js-collection-inl.h8
-rw-r--r--deps/v8/src/objects/js-collection-iterator.h4
-rw-r--r--deps/v8/src/objects/js-collection.h14
-rw-r--r--deps/v8/src/objects/js-date-time-format-inl.h8
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc349
-rw-r--r--deps/v8/src/objects/js-date-time-format.h15
-rw-r--r--deps/v8/src/objects/js-generator-inl.h2
-rw-r--r--deps/v8/src/objects/js-list-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-list-format.cc39
-rw-r--r--deps/v8/src/objects/js-list-format.h15
-rw-r--r--deps/v8/src/objects/js-locale-inl.h6
-rw-r--r--deps/v8/src/objects/js-locale.cc215
-rw-r--r--deps/v8/src/objects/js-locale.h14
-rw-r--r--deps/v8/src/objects/js-number-format-inl.h50
-rw-r--r--deps/v8/src/objects/js-number-format.cc1151
-rw-r--r--deps/v8/src/objects/js-number-format.h101
-rw-r--r--deps/v8/src/objects/js-objects-inl.h288
-rw-r--r--deps/v8/src/objects/js-objects.cc926
-rw-r--r--deps/v8/src/objects/js-objects.h81
-rw-r--r--deps/v8/src/objects/js-plural-rules-inl.h8
-rw-r--r--deps/v8/src/objects/js-plural-rules.cc31
-rw-r--r--deps/v8/src/objects/js-plural-rules.h17
-rw-r--r--deps/v8/src/objects/js-promise-inl.h4
-rw-r--r--deps/v8/src/objects/js-proxy-inl.h4
-rw-r--r--deps/v8/src/objects/js-proxy.h2
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h32
-rw-r--r--deps/v8/src/objects/js-regexp-string-iterator-inl.h2
-rw-r--r--deps/v8/src/objects/js-regexp.h8
-rw-r--r--deps/v8/src/objects/js-relative-time-format-inl.h4
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc56
-rw-r--r--deps/v8/src/objects/js-relative-time-format.h14
-rw-r--r--deps/v8/src/objects/js-segment-iterator-inl.h4
-rw-r--r--deps/v8/src/objects/js-segment-iterator.cc16
-rw-r--r--deps/v8/src/objects/js-segment-iterator.h16
-rw-r--r--deps/v8/src/objects/js-segmenter-inl.h4
-rw-r--r--deps/v8/src/objects/js-segmenter.cc4
-rw-r--r--deps/v8/src/objects/js-segmenter.h16
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h100
-rw-r--r--deps/v8/src/objects/keys.cc (renamed from deps/v8/src/keys.cc)113
-rw-r--r--deps/v8/src/objects/keys.h (renamed from deps/v8/src/keys.h)8
-rw-r--r--deps/v8/src/objects/layout-descriptor-inl.h (renamed from deps/v8/src/layout-descriptor-inl.h)40
-rw-r--r--deps/v8/src/objects/layout-descriptor.cc (renamed from deps/v8/src/layout-descriptor.cc)33
-rw-r--r--deps/v8/src/objects/layout-descriptor.h (renamed from deps/v8/src/layout-descriptor.h)6
-rw-r--r--deps/v8/src/objects/literal-objects-inl.h4
-rw-r--r--deps/v8/src/objects/literal-objects.cc50
-rw-r--r--deps/v8/src/objects/lookup-cache-inl.h (renamed from deps/v8/src/lookup-cache-inl.h)14
-rw-r--r--deps/v8/src/objects/lookup-cache.cc (renamed from deps/v8/src/lookup-cache.cc)2
-rw-r--r--deps/v8/src/objects/lookup-cache.h (renamed from deps/v8/src/lookup-cache.h)8
-rw-r--r--deps/v8/src/objects/lookup-inl.h (renamed from deps/v8/src/lookup-inl.h)45
-rw-r--r--deps/v8/src/objects/lookup.cc (renamed from deps/v8/src/lookup.cc)207
-rw-r--r--deps/v8/src/objects/lookup.h (renamed from deps/v8/src/lookup.h)20
-rw-r--r--deps/v8/src/objects/managed.h8
-rw-r--r--deps/v8/src/objects/map-inl.h142
-rw-r--r--deps/v8/src/objects/map-updater.cc (renamed from deps/v8/src/map-updater.cc)91
-rw-r--r--deps/v8/src/objects/map-updater.h (renamed from deps/v8/src/map-updater.h)16
-rw-r--r--deps/v8/src/objects/map.cc672
-rw-r--r--deps/v8/src/objects/map.h83
-rw-r--r--deps/v8/src/objects/maybe-object-inl.h123
-rw-r--r--deps/v8/src/objects/maybe-object.h126
-rw-r--r--deps/v8/src/objects/microtask-inl.h2
-rw-r--r--deps/v8/src/objects/microtask.h2
-rw-r--r--deps/v8/src/objects/module-inl.h16
-rw-r--r--deps/v8/src/objects/module.cc102
-rw-r--r--deps/v8/src/objects/module.h2
-rw-r--r--deps/v8/src/objects/name-inl.h24
-rw-r--r--deps/v8/src/objects/name.h6
-rw-r--r--deps/v8/src/objects/object-list-macros.h270
-rw-r--r--deps/v8/src/objects/object-macros-undef.h32
-rw-r--r--deps/v8/src/objects/object-macros.h248
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h (renamed from deps/v8/src/objects-body-descriptors-inl.h)181
-rw-r--r--deps/v8/src/objects/objects-body-descriptors.h (renamed from deps/v8/src/objects-body-descriptors.h)12
-rw-r--r--deps/v8/src/objects/objects-definitions.h (renamed from deps/v8/src/objects-definitions.h)121
-rw-r--r--deps/v8/src/objects/objects-inl.h (renamed from deps/v8/src/objects-inl.h)228
-rw-r--r--deps/v8/src/objects/objects.cc (renamed from deps/v8/src/objects.cc)1794
-rw-r--r--deps/v8/src/objects/objects.h (renamed from deps/v8/src/objects.h)481
-rw-r--r--deps/v8/src/objects/oddball-inl.h29
-rw-r--r--deps/v8/src/objects/oddball.h37
-rw-r--r--deps/v8/src/objects/ordered-hash-table-inl.h10
-rw-r--r--deps/v8/src/objects/ordered-hash-table.cc204
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h10
-rw-r--r--deps/v8/src/objects/promise.h12
-rw-r--r--deps/v8/src/objects/property-array-inl.h13
-rw-r--r--deps/v8/src/objects/property-array.h6
-rw-r--r--deps/v8/src/objects/property-cell.h2
-rw-r--r--deps/v8/src/objects/property-descriptor-object-inl.h2
-rw-r--r--deps/v8/src/objects/property-descriptor-object.h2
-rw-r--r--deps/v8/src/objects/property-descriptor.cc (renamed from deps/v8/src/property-descriptor.cc)28
-rw-r--r--deps/v8/src/objects/property-descriptor.h (renamed from deps/v8/src/property-descriptor.h)12
-rw-r--r--deps/v8/src/objects/property-details.h (renamed from deps/v8/src/property-details.h)47
-rw-r--r--deps/v8/src/objects/property.cc (renamed from deps/v8/src/property.cc)33
-rw-r--r--deps/v8/src/objects/property.h (renamed from deps/v8/src/property.h)16
-rw-r--r--deps/v8/src/objects/prototype-info-inl.h10
-rw-r--r--deps/v8/src/objects/prototype-info.h2
-rw-r--r--deps/v8/src/objects/prototype-inl.h (renamed from deps/v8/src/prototype-inl.h)38
-rw-r--r--deps/v8/src/objects/prototype.h (renamed from deps/v8/src/prototype.h)11
-rw-r--r--deps/v8/src/objects/regexp-match-info.h2
-rw-r--r--deps/v8/src/objects/scope-info.cc152
-rw-r--r--deps/v8/src/objects/scope-info.h15
-rw-r--r--deps/v8/src/objects/script-inl.h14
-rw-r--r--deps/v8/src/objects/script.h2
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h186
-rw-r--r--deps/v8/src/objects/shared-function-info.h64
-rw-r--r--deps/v8/src/objects/slots-atomic-inl.h1
-rw-r--r--deps/v8/src/objects/slots-inl.h19
-rw-r--r--deps/v8/src/objects/slots.h4
-rw-r--r--deps/v8/src/objects/smi-inl.h4
-rw-r--r--deps/v8/src/objects/smi.h8
-rw-r--r--deps/v8/src/objects/stack-frame-info-inl.h8
-rw-r--r--deps/v8/src/objects/stack-frame-info.cc36
-rw-r--r--deps/v8/src/objects/stack-frame-info.h16
-rw-r--r--deps/v8/src/objects/string-comparator.cc2
-rw-r--r--deps/v8/src/objects/string-comparator.h4
-rw-r--r--deps/v8/src/objects/string-inl.h265
-rw-r--r--deps/v8/src/objects/string-table-inl.h31
-rw-r--r--deps/v8/src/objects/string-table.h29
-rw-r--r--deps/v8/src/objects/string.cc349
-rw-r--r--deps/v8/src/objects/string.h126
-rw-r--r--deps/v8/src/objects/struct-inl.h25
-rw-r--r--deps/v8/src/objects/struct.h37
-rw-r--r--deps/v8/src/objects/tagged-impl-inl.h257
-rw-r--r--deps/v8/src/objects/tagged-impl.cc39
-rw-r--r--deps/v8/src/objects/tagged-impl.h181
-rw-r--r--deps/v8/src/objects/tagged-value-inl.h39
-rw-r--r--deps/v8/src/objects/tagged-value.h42
-rw-r--r--deps/v8/src/objects/template-objects-inl.h2
-rw-r--r--deps/v8/src/objects/template-objects.cc16
-rw-r--r--deps/v8/src/objects/template-objects.h13
-rw-r--r--deps/v8/src/objects/templates-inl.h54
-rw-r--r--deps/v8/src/objects/templates.h18
-rw-r--r--deps/v8/src/objects/transitions-inl.h (renamed from deps/v8/src/transitions-inl.h)110
-rw-r--r--deps/v8/src/objects/transitions.cc (renamed from deps/v8/src/transitions.cc)207
-rw-r--r--deps/v8/src/objects/transitions.h (renamed from deps/v8/src/transitions.h)39
-rw-r--r--deps/v8/src/objects/type-hints.cc (renamed from deps/v8/src/type-hints.cc)2
-rw-r--r--deps/v8/src/objects/type-hints.h (renamed from deps/v8/src/type-hints.h)8
-rw-r--r--deps/v8/src/objects/value-serializer.cc (renamed from deps/v8/src/value-serializer.cc)109
-rw-r--r--deps/v8/src/objects/value-serializer.h (renamed from deps/v8/src/value-serializer.h)14
-rw-r--r--deps/v8/src/objects/visitors.cc (renamed from deps/v8/src/visitors.cc)4
-rw-r--r--deps/v8/src/objects/visitors.h (renamed from deps/v8/src/visitors.h)10
-rw-r--r--deps/v8/src/parsing/expression-scope-reparenter.cc2
-rw-r--r--deps/v8/src/parsing/expression-scope.h4
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc2
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.h2
-rw-r--r--deps/v8/src/parsing/literal-buffer.cc80
-rw-r--r--deps/v8/src/parsing/literal-buffer.h104
-rw-r--r--deps/v8/src/parsing/parse-info.cc15
-rw-r--r--deps/v8/src/parsing/parse-info.h14
-rw-r--r--deps/v8/src/parsing/parser-base.h164
-rw-r--r--deps/v8/src/parsing/parser.cc103
-rw-r--r--deps/v8/src/parsing/parser.h31
-rw-r--r--deps/v8/src/parsing/parsing.cc4
-rw-r--r--deps/v8/src/parsing/parsing.h2
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.cc (renamed from deps/v8/src/pending-compilation-error-handler.cc)11
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.h (renamed from deps/v8/src/pending-compilation-error-handler.h)12
-rw-r--r--deps/v8/src/parsing/preparse-data-impl.h2
-rw-r--r--deps/v8/src/parsing/preparse-data.cc6
-rw-r--r--deps/v8/src/parsing/preparse-data.h8
-rw-r--r--deps/v8/src/parsing/preparser.cc12
-rw-r--r--deps/v8/src/parsing/preparser.h53
-rw-r--r--deps/v8/src/parsing/rewriter.cc2
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc41
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h2
-rw-r--r--deps/v8/src/parsing/scanner-inl.h4
-rw-r--r--deps/v8/src/parsing/scanner.cc80
-rw-r--r--deps/v8/src/parsing/scanner.h111
-rw-r--r--deps/v8/src/parsing/token.h4
-rw-r--r--deps/v8/src/ppc/OWNERS4
-rw-r--r--deps/v8/src/profiler/OWNERS1
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc35
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h6
-rw-r--r--deps/v8/src/profiler/circular-queue.h2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc82
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h62
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc22
-rw-r--r--deps/v8/src/profiler/heap-profiler.h2
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator-inl.h2
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc653
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h8
-rw-r--r--deps/v8/src/profiler/profile-generator.cc120
-rw-r--r--deps/v8/src/profiler/profile-generator.h46
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc116
-rw-r--r--deps/v8/src/profiler/profiler-listener.h9
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc57
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.h77
-rw-r--r--deps/v8/src/profiler/strings-storage.cc26
-rw-r--r--deps/v8/src/profiler/strings-storage.h2
-rw-r--r--deps/v8/src/profiler/tick-sample.cc31
-rw-r--r--deps/v8/src/profiler/tick-sample.h6
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.cc6
-rw-r--r--deps/v8/src/protobuf/DEPS3
-rw-r--r--deps/v8/src/protobuf/OWNERS1
-rw-r--r--deps/v8/src/protobuf/protobuf-compiler-main.cc28
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc21
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h4
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc20
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h4
-rw-r--r--deps/v8/src/regexp/gen-regexp-special-case.cc125
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc29
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h4
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc8
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h4
-rw-r--r--deps/v8/src/regexp/jsregexp.cc230
-rw-r--r--deps/v8/src/regexp/jsregexp.h4
-rw-r--r--deps/v8/src/regexp/mips/OWNERS1
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc17
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h4
-rw-r--r--deps/v8/src/regexp/mips64/OWNERS1
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc17
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h4
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc17
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h4
-rw-r--r--deps/v8/src/regexp/property-sequences.h2
-rw-r--r--deps/v8/src/regexp/regexp-ast.cc2
-rw-r--r--deps/v8/src/regexp/regexp-ast.h4
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h8
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc10
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc64
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h2
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc14
-rw-r--r--deps/v8/src/regexp/regexp-parser.h2
-rw-r--r--deps/v8/src/regexp/regexp-stack.cc3
-rw-r--r--deps/v8/src/regexp/regexp-stack.h2
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc44
-rw-r--r--deps/v8/src/regexp/regexp-utils.h2
-rw-r--r--deps/v8/src/regexp/s390/OWNERS4
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc19
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h4
-rw-r--r--deps/v8/src/regexp/special-case.h79
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc27
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h4
-rw-r--r--deps/v8/src/register-arch.h31
-rw-r--r--deps/v8/src/roots.cc89
-rw-r--r--deps/v8/src/roots/roots-inl.h (renamed from deps/v8/src/roots-inl.h)65
-rw-r--r--deps/v8/src/roots/roots.cc47
-rw-r--r--deps/v8/src/roots/roots.h (renamed from deps/v8/src/roots.h)73
-rw-r--r--deps/v8/src/runtime/runtime-array.cc549
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc20
-rw-r--r--deps/v8/src/runtime/runtime-bigint.cc6
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc125
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc6
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc62
-rw-r--r--deps/v8/src/runtime/runtime-date.cc10
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc62
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc12
-rw-r--r--deps/v8/src/runtime/runtime-function.cc26
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc12
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc28
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc56
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc18
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc16
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc40
-rw-r--r--deps/v8/src/runtime/runtime-module.cc15
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc25
-rw-r--r--deps/v8/src/runtime/runtime-object.cc180
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc10
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc16
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc36
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc56
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc54
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc42
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc14
-rw-r--r--deps/v8/src/runtime/runtime-test.cc268
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc78
-rw-r--r--deps/v8/src/runtime/runtime-utils.h38
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc118
-rw-r--r--deps/v8/src/runtime/runtime-weak-refs.cc12
-rw-r--r--deps/v8/src/runtime/runtime.cc21
-rw-r--r--deps/v8/src/runtime/runtime.h48
-rw-r--r--deps/v8/src/s390/OWNERS4
-rw-r--r--deps/v8/src/sanitizer/OWNERS3
-rw-r--r--deps/v8/src/sanitizer/asan.h (renamed from deps/v8/src/asan.h)8
-rw-r--r--deps/v8/src/sanitizer/lsan-page-allocator.cc (renamed from deps/v8/src/base/lsan-page-allocator.cc)2
-rw-r--r--deps/v8/src/sanitizer/lsan-page-allocator.h (renamed from deps/v8/src/base/lsan-page-allocator.h)12
-rw-r--r--deps/v8/src/sanitizer/msan.h (renamed from deps/v8/src/msan.h)8
-rw-r--r--deps/v8/src/sanitizer/tsan.h (renamed from deps/v8/src/base/tsan.h)6
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc98
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.cc17
-rw-r--r--deps/v8/src/snapshot/deserializer-allocator.h2
-rw-r--r--deps/v8/src/snapshot/deserializer.cc173
-rw-r--r--deps/v8/src/snapshot/deserializer.h6
-rw-r--r--deps/v8/src/snapshot/embedded-file-writer.cc843
-rw-r--r--deps/v8/src/snapshot/embedded-file-writer.h483
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc (renamed from deps/v8/src/snapshot/embedded-data.cc)40
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.h (renamed from deps/v8/src/snapshot/embedded-data.h)13
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-empty.cc (renamed from deps/v8/src/snapshot/embedded-empty.cc)0
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.cc214
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-file-writer.h221
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc132
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.h64
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc156
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h105
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc140
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.h63
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc109
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h61
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc615
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.h78
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc116
-rw-r--r--deps/v8/src/snapshot/natives-common.cc4
-rw-r--r--deps/v8/src/snapshot/natives-external.cc10
-rw-r--r--deps/v8/src/snapshot/natives.h4
-rw-r--r--deps/v8/src/snapshot/object-deserializer.cc18
-rw-r--r--deps/v8/src/snapshot/partial-deserializer.cc2
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc73
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h4
-rw-r--r--deps/v8/src/snapshot/read-only-deserializer.cc23
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.cc27
-rw-r--r--deps/v8/src/snapshot/read-only-serializer.h5
-rw-r--r--deps/v8/src/snapshot/references.h4
-rw-r--r--deps/v8/src/snapshot/roots-serializer.cc8
-rw-r--r--deps/v8/src/snapshot/roots-serializer.h2
-rw-r--r--deps/v8/src/snapshot/serializer-allocator.cc2
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc16
-rw-r--r--deps/v8/src/snapshot/serializer-common.h22
-rw-r--r--deps/v8/src/snapshot/serializer.cc187
-rw-r--r--deps/v8/src/snapshot/serializer.h17
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc73
-rw-r--r--deps/v8/src/snapshot/snapshot-external.cc3
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc4
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h4
-rw-r--r--deps/v8/src/snapshot/snapshot.h10
-rw-r--r--deps/v8/src/snapshot/startup-deserializer.cc6
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc67
-rw-r--r--deps/v8/src/string-hasher-inl.h164
-rw-r--r--deps/v8/src/string-hasher.h102
-rw-r--r--deps/v8/src/strings/OWNERS5
-rw-r--r--deps/v8/src/strings/char-predicates-inl.h (renamed from deps/v8/src/char-predicates-inl.h)14
-rw-r--r--deps/v8/src/strings/char-predicates.cc (renamed from deps/v8/src/char-predicates.cc)2
-rw-r--r--deps/v8/src/strings/char-predicates.h (renamed from deps/v8/src/char-predicates.h)10
-rw-r--r--deps/v8/src/strings/string-builder-inl.h (renamed from deps/v8/src/string-builder-inl.h)30
-rw-r--r--deps/v8/src/strings/string-builder.cc (renamed from deps/v8/src/string-builder.cc)35
-rw-r--r--deps/v8/src/strings/string-case.cc (renamed from deps/v8/src/string-case.cc)8
-rw-r--r--deps/v8/src/strings/string-case.h (renamed from deps/v8/src/string-case.h)6
-rw-r--r--deps/v8/src/strings/string-hasher-inl.h81
-rw-r--r--deps/v8/src/strings/string-hasher.h58
-rw-r--r--deps/v8/src/strings/string-search.h (renamed from deps/v8/src/string-search.h)102
-rw-r--r--deps/v8/src/strings/string-stream.cc (renamed from deps/v8/src/string-stream.cc)299
-rw-r--r--deps/v8/src/strings/string-stream.h (renamed from deps/v8/src/string-stream.h)16
-rw-r--r--deps/v8/src/strings/unicode-decoder.cc81
-rw-r--r--deps/v8/src/strings/unicode-decoder.h74
-rw-r--r--deps/v8/src/strings/unicode-inl.h (renamed from deps/v8/src/unicode-inl.h)38
-rw-r--r--deps/v8/src/strings/unicode.cc (renamed from deps/v8/src/unicode.cc)1213
-rw-r--r--deps/v8/src/strings/unicode.h (renamed from deps/v8/src/unicode.h)81
-rw-r--r--deps/v8/src/strings/uri.cc (renamed from deps/v8/src/uri.cc)10
-rw-r--r--deps/v8/src/strings/uri.h (renamed from deps/v8/src/uri.h)12
-rw-r--r--deps/v8/src/tasks/cancelable-task.cc (renamed from deps/v8/src/cancelable-task.cc)4
-rw-r--r--deps/v8/src/tasks/cancelable-task.h (renamed from deps/v8/src/cancelable-task.h)8
-rw-r--r--deps/v8/src/tasks/task-utils.cc (renamed from deps/v8/src/task-utils.cc)4
-rw-r--r--deps/v8/src/tasks/task-utils.h (renamed from deps/v8/src/task-utils.h)6
-rw-r--r--deps/v8/src/third_party/vtune/ittnotify_config.h10
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc3
-rw-r--r--deps/v8/src/torque/OWNERS6
-rw-r--r--deps/v8/src/torque/ast.h132
-rw-r--r--deps/v8/src/torque/constants.h82
-rw-r--r--deps/v8/src/torque/contextual.h3
-rw-r--r--deps/v8/src/torque/csa-generator.cc37
-rw-r--r--deps/v8/src/torque/declarable.cc17
-rw-r--r--deps/v8/src/torque/declarable.h143
-rw-r--r--deps/v8/src/torque/declaration-visitor.cc478
-rw-r--r--deps/v8/src/torque/declaration-visitor.h137
-rw-r--r--deps/v8/src/torque/declarations.cc136
-rw-r--r--deps/v8/src/torque/declarations.h43
-rw-r--r--deps/v8/src/torque/earley-parser.h23
-rw-r--r--deps/v8/src/torque/file-visitor.cc34
-rw-r--r--deps/v8/src/torque/file-visitor.h38
-rw-r--r--deps/v8/src/torque/global-context.h33
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc1499
-rw-r--r--deps/v8/src/torque/implementation-visitor.h37
-rw-r--r--deps/v8/src/torque/ls/json-parser.cc6
-rw-r--r--deps/v8/src/torque/ls/json-parser.h2
-rw-r--r--deps/v8/src/torque/ls/message-handler.cc211
-rw-r--r--deps/v8/src/torque/ls/message-handler.h13
-rw-r--r--deps/v8/src/torque/ls/message.h81
-rw-r--r--deps/v8/src/torque/ls/torque-language-server.cc1
-rw-r--r--deps/v8/src/torque/server-data.cc24
-rw-r--r--deps/v8/src/torque/server-data.h29
-rw-r--r--deps/v8/src/torque/torque-compiler.cc97
-rw-r--r--deps/v8/src/torque/torque-compiler.h16
-rw-r--r--deps/v8/src/torque/torque-parser.cc401
-rw-r--r--deps/v8/src/torque/torque.cc40
-rw-r--r--deps/v8/src/torque/type-oracle.cc7
-rw-r--r--deps/v8/src/torque/type-oracle.h28
-rw-r--r--deps/v8/src/torque/type-visitor.cc292
-rw-r--r--deps/v8/src/torque/type-visitor.h45
-rw-r--r--deps/v8/src/torque/types.cc210
-rw-r--r--deps/v8/src/torque/types.h176
-rw-r--r--deps/v8/src/torque/utils.cc43
-rw-r--r--deps/v8/src/torque/utils.h77
-rw-r--r--deps/v8/src/tracing/OWNERS1
-rw-r--r--deps/v8/src/tracing/trace-event.cc6
-rw-r--r--deps/v8/src/tracing/traced-value.cc4
-rw-r--r--deps/v8/src/tracing/tracing-category-observer.cc11
-rw-r--r--deps/v8/src/trap-handler/DEPS4
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h4
-rw-r--r--deps/v8/src/unicode-cache.h33
-rw-r--r--deps/v8/src/unicode-decoder.cc88
-rw-r--r--deps/v8/src/unicode-decoder.h158
-rw-r--r--deps/v8/src/utils/OWNERS1
-rw-r--r--deps/v8/src/utils/address-map.cc (renamed from deps/v8/src/address-map.cc)8
-rw-r--r--deps/v8/src/utils/address-map.h (renamed from deps/v8/src/address-map.h)12
-rw-r--r--deps/v8/src/utils/allocation.cc (renamed from deps/v8/src/allocation.cc)29
-rw-r--r--deps/v8/src/utils/allocation.h (renamed from deps/v8/src/allocation.h)29
-rw-r--r--deps/v8/src/utils/bit-vector.cc (renamed from deps/v8/src/bit-vector.cc)6
-rw-r--r--deps/v8/src/utils/bit-vector.h (renamed from deps/v8/src/bit-vector.h)8
-rw-r--r--deps/v8/src/utils/boxed-float.h (renamed from deps/v8/src/boxed-float.h)8
-rw-r--r--deps/v8/src/utils/detachable-vector.cc (renamed from deps/v8/src/detachable-vector.cc)2
-rw-r--r--deps/v8/src/utils/detachable-vector.h (renamed from deps/v8/src/detachable-vector.h)6
-rw-r--r--deps/v8/src/utils/identity-map.cc (renamed from deps/v8/src/identity-map.cc)4
-rw-r--r--deps/v8/src/utils/identity-map.h (renamed from deps/v8/src/identity-map.h)10
-rw-r--r--deps/v8/src/utils/locked-queue-inl.h (renamed from deps/v8/src/locked-queue-inl.h)14
-rw-r--r--deps/v8/src/utils/locked-queue.h (renamed from deps/v8/src/locked-queue.h)8
-rw-r--r--deps/v8/src/utils/memcopy.cc (renamed from deps/v8/src/memcopy.cc)4
-rw-r--r--deps/v8/src/utils/memcopy.h (renamed from deps/v8/src/memcopy.h)22
-rw-r--r--deps/v8/src/utils/ostreams.cc (renamed from deps/v8/src/ostreams.cc)14
-rw-r--r--deps/v8/src/utils/ostreams.h (renamed from deps/v8/src/ostreams.h)10
-rw-r--r--deps/v8/src/utils/pointer-with-payload.h (renamed from deps/v8/src/pointer-with-payload.h)6
-rw-r--r--deps/v8/src/utils/splay-tree-inl.h (renamed from deps/v8/src/splay-tree-inl.h)8
-rw-r--r--deps/v8/src/utils/splay-tree.h (renamed from deps/v8/src/splay-tree.h)23
-rw-r--r--deps/v8/src/utils/utils-inl.h (renamed from deps/v8/src/utils-inl.h)12
-rw-r--r--deps/v8/src/utils/utils.cc (renamed from deps/v8/src/utils.cc)71
-rw-r--r--deps/v8/src/utils/utils.h (renamed from deps/v8/src/utils.h)244
-rw-r--r--deps/v8/src/utils/v8dll-main.cc (renamed from deps/v8/src/v8dll-main.cc)6
-rw-r--r--deps/v8/src/utils/vector.h (renamed from deps/v8/src/vector.h)148
-rw-r--r--deps/v8/src/utils/version.cc (renamed from deps/v8/src/version.cc)9
-rw-r--r--deps/v8/src/utils/version.h (renamed from deps/v8/src/version.h)6
-rw-r--r--deps/v8/src/wasm/DEPS2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h49
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h57
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h44
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h4
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc6
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h59
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc87
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h17
-rw-r--r--deps/v8/src/wasm/baseline/mips/OWNERS1
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h19
-rw-r--r--deps/v8/src/wasm/baseline/mips64/OWNERS1
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h51
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h49
-rw-r--r--deps/v8/src/wasm/baseline/s390/OWNERS4
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h49
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h79
-rw-r--r--deps/v8/src/wasm/c-api.cc1730
-rw-r--r--deps/v8/src/wasm/c-api.h42
-rw-r--r--deps/v8/src/wasm/compilation-environment.h10
-rw-r--r--deps/v8/src/wasm/decoder.h15
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h310
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc8
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h2
-rw-r--r--deps/v8/src/wasm/function-compiler.cc107
-rw-r--r--deps/v8/src/wasm/function-compiler.h38
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc43
-rw-r--r--deps/v8/src/wasm/js-to-wasm-wrapper-cache.h2
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc4
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h2
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc2
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.h2
-rw-r--r--deps/v8/src/wasm/memory-tracing.cc25
-rw-r--r--deps/v8/src/wasm/memory-tracing.h2
-rw-r--r--deps/v8/src/wasm/module-compiler.cc1046
-rw-r--r--deps/v8/src/wasm/module-compiler.h26
-rw-r--r--deps/v8/src/wasm/module-decoder.cc61
-rw-r--r--deps/v8/src/wasm/module-decoder.h2
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc445
-rw-r--r--deps/v8/src/wasm/object-access.h2
-rw-r--r--deps/v8/src/wasm/signature-map.cc2
-rw-r--r--deps/v8/src/wasm/signature-map.h6
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc18
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h2
-rw-r--r--deps/v8/src/wasm/value-type.h3
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc703
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h217
-rw-r--r--deps/v8/src/wasm/wasm-constants.h6
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc129
-rw-r--r--deps/v8/src/wasm/wasm-engine.cc357
-rw-r--r--deps/v8/src/wasm/wasm-engine.h31
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc51
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h2
-rw-r--r--deps/v8/src/wasm/wasm-features.cc6
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.cc41
-rw-r--r--deps/v8/src/wasm/wasm-import-wrapper-cache.h40
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc235
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h3
-rw-r--r--deps/v8/src/wasm/wasm-js.cc416
-rw-r--r--deps/v8/src/wasm/wasm-js.h2
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h6
-rw-r--r--deps/v8/src/wasm/wasm-memory.cc35
-rw-r--r--deps/v8/src/wasm/wasm-memory.h10
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc57
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h13
-rw-r--r--deps/v8/src/wasm/wasm-module.cc40
-rw-r--r--deps/v8/src/wasm/wasm-module.h20
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h58
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc532
-rw-r--r--deps/v8/src/wasm/wasm-objects.h113
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc83
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h57
-rw-r--r--deps/v8/src/wasm/wasm-result.cc4
-rw-r--r--deps/v8/src/wasm/wasm-result.h2
-rw-r--r--deps/v8/src/wasm/wasm-serialization.cc34
-rw-r--r--deps/v8/src/wasm/wasm-text.cc8
-rw-r--r--deps/v8/src/wasm/wasm-tier.h13
-rw-r--r--deps/v8/src/wasm/wasm-value.h6
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc2
-rw-r--r--deps/v8/src/zone/zone-allocator.h22
-rw-r--r--deps/v8/src/zone/zone-chunk-list.h4
-rw-r--r--deps/v8/src/zone/zone-containers.h4
-rw-r--r--deps/v8/src/zone/zone-handle-set.h14
-rw-r--r--deps/v8/src/zone/zone-list-inl.h10
-rw-r--r--deps/v8/src/zone/zone-segment.cc2
-rw-r--r--deps/v8/src/zone/zone-segment.h2
-rw-r--r--deps/v8/src/zone/zone-splay-tree.h2
-rw-r--r--deps/v8/src/zone/zone.cc6
-rw-r--r--deps/v8/src/zone/zone.h12
-rw-r--r--deps/v8/test/BUILD.gn4
-rw-r--r--deps/v8/test/OWNERS4
-rw-r--r--deps/v8/test/benchmarks/benchmarks.status21
-rw-r--r--deps/v8/test/cctest/BUILD.gn13
-rw-r--r--deps/v8/test/cctest/OWNERS1
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.cc11
-rw-r--r--deps/v8/test/cctest/assembler-helper-arm.h4
-rw-r--r--deps/v8/test/cctest/cctest.cc16
-rw-r--r--deps/v8/test/cctest/cctest.h25
-rw-r--r--deps/v8/test/cctest/cctest.status33
-rw-r--r--deps/v8/test/cctest/collector.h (renamed from deps/v8/src/collector.h)8
-rw-r--r--deps/v8/test/cctest/compiler/c-signature.h12
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h4
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h6
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.cc2
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h16
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc14
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h4
-rw-r--r--deps/v8/test/cctest/compiler/graph-and-builders.h43
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h286
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.cc89
-rw-r--r--deps/v8/test/cctest/compiler/serializer-tester.h4
-rw-r--r--deps/v8/test/cctest/compiler/test-basic-block-profiler.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-branch-combine.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-code-assembler.cc24
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc57
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc18
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc131
-rw-r--r--deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-deopt.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsbranches.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc12
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsexceptions.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsobjects.cc10
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsops.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-load-store.cc54
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc103
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stackcheck.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-tail-calls.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-unwinding-info.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-run-variables.cc8
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h4
-rw-r--r--deps/v8/test/cctest/gay-fixed.cc2
-rw-r--r--deps/v8/test/cctest/gay-fixed.h2
-rw-r--r--deps/v8/test/cctest/gay-precision.cc2
-rw-r--r--deps/v8/test/cctest/gay-precision.h2
-rw-r--r--deps/v8/test/cctest/gay-shortest.cc2
-rw-r--r--deps/v8/test/cctest/gay-shortest.h2
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc2
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.h18
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc26
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-marking.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc56
-rw-r--r--deps/v8/test/cctest/heap/test-external-string-tracker.cc8
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc766
-rw-r--r--deps/v8/test/cctest/heap/test-incremental-marking.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-invalidated-slots.cc42
-rw-r--r--deps/v8/test/cctest/heap/test-iterators.cc101
-rw-r--r--deps/v8/test/cctest/heap/test-lab.cc14
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc22
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc26
-rw-r--r--deps/v8/test/cctest/heap/test-unmapper.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-weak-references.cc36
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc31
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden284
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden1
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden139
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden1
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc38
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.cc10
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h12
-rw-r--r--deps/v8/test/cctest/interpreter/source-position-matcher.cc4
-rw-r--r--deps/v8/test/cctest/interpreter/source-position-matcher.h6
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc48
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc37
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc100
-rw-r--r--deps/v8/test/cctest/interpreter/test-source-positions.cc12
-rw-r--r--deps/v8/test/cctest/libplatform/DEPS3
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc272
-rw-r--r--deps/v8/test/cctest/parsing/test-parse-decision.cc16
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc51
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc2
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner.cc4
-rw-r--r--deps/v8/test/cctest/setup-isolate-for-tests.h2
-rw-r--r--deps/v8/test/cctest/test-accessor-assembler.cc4
-rw-r--r--deps/v8/test/cctest/test-accessors.cc15
-rw-r--r--deps/v8/test/cctest/test-allocation.cc4
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc16
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc32
-rw-r--r--deps/v8/test/cctest/test-api-stack-traces.cc808
-rw-r--r--deps/v8/test/cctest/test-api.cc1623
-rw-r--r--deps/v8/test/cctest/test-api.h18
-rw-r--r--deps/v8/test/cctest/test-array-list.cc2
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc217
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc327
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc107
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc476
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc507
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc56
-rw-r--r--deps/v8/test/cctest/test-assembler-s390.cc366
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc91
-rw-r--r--deps/v8/test/cctest/test-atomicops.cc2
-rw-r--r--deps/v8/test/cctest/test-bignum-dtoa.cc136
-rw-r--r--deps/v8/test/cctest/test-bignum.cc10
-rw-r--r--deps/v8/test/cctest/test-bit-vector.cc4
-rw-r--r--deps/v8/test/cctest/test-circular-queue.cc8
-rw-r--r--deps/v8/test/cctest/test-code-layout.cc12
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc255
-rw-r--r--deps/v8/test/cctest/test-compiler.cc126
-rw-r--r--deps/v8/test/cctest/test-constantpool.cc4
-rw-r--r--deps/v8/test/cctest/test-conversions.cc8
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc665
-rw-r--r--deps/v8/test/cctest/test-date.cc8
-rw-r--r--deps/v8/test/cctest/test-debug.cc220
-rw-r--r--deps/v8/test/cctest/test-decls.cc39
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc14
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc40
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc29
-rw-r--r--deps/v8/test/cctest/test-disasm-arm64.cc14
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc17
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc20
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc20
-rw-r--r--deps/v8/test/cctest/test-disasm-ppc.cc20
-rw-r--r--deps/v8/test/cctest/test-disasm-s390.cc20
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc19
-rw-r--r--deps/v8/test/cctest/test-diy-fp.cc4
-rw-r--r--deps/v8/test/cctest/test-double.cc6
-rw-r--r--deps/v8/test/cctest/test-dtoa.cc135
-rw-r--r--deps/v8/test/cctest/test-elements-kind.cc136
-rw-r--r--deps/v8/test/cctest/test-factory.cc94
-rw-r--r--deps/v8/test/cctest/test-fast-dtoa.cc84
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc14
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.h4
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc657
-rw-r--r--deps/v8/test/cctest/test-fixed-dtoa.cc226
-rw-r--r--deps/v8/test/cctest/test-flags.cc14
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc14
-rw-r--r--deps/v8/test/cctest/test-fuzz-arm64.cc6
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc252
-rw-r--r--deps/v8/test/cctest/test-global-object.cc4
-rw-r--r--deps/v8/test/cctest/test-hashcode.cc54
-rw-r--r--deps/v8/test/cctest/test-hashmap.cc4
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc41
-rw-r--r--deps/v8/test/cctest/test-icache.cc8
-rw-r--r--deps/v8/test/cctest/test-identity-map.cc6
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc121
-rw-r--r--deps/v8/test/cctest/test-intl.cc4
-rw-r--r--deps/v8/test/cctest/test-javascript-arm64.cc16
-rw-r--r--deps/v8/test/cctest/test-js-arm64-variables.cc16
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc118
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc15
-rw-r--r--deps/v8/test/cctest/test-lockers.cc17
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc18
-rw-r--r--deps/v8/test/cctest/test-log.cc91
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc26
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc54
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc59
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc75
-rw-r--r--deps/v8/test/cctest/test-managed.cc2
-rw-r--r--deps/v8/test/cctest/test-mementos.cc10
-rw-r--r--deps/v8/test/cctest/test-modules.cc2
-rw-r--r--deps/v8/test/cctest/test-object.cc152
-rw-r--r--deps/v8/test/cctest/test-orderedhashtable.cc4
-rw-r--r--deps/v8/test/cctest/test-parsing.cc221
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm.cc12
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc23
-rw-r--r--deps/v8/test/cctest/test-random-number-generator.cc6
-rw-r--r--deps/v8/test/cctest/test-regexp.cc44
-rw-r--r--deps/v8/test/cctest/test-representation.cc2
-rw-r--r--deps/v8/test/cctest/test-roots.cc4
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc18
-rw-r--r--deps/v8/test/cctest/test-serialize.cc158
-rw-r--r--deps/v8/test/cctest/test-smi-lexicographic-compare.cc8
-rw-r--r--deps/v8/test/cctest/test-stack-unwinding-x64.cc5
-rw-r--r--deps/v8/test/cctest/test-strings.cc73
-rw-r--r--deps/v8/test/cctest/test-strtod.cc32
-rw-r--r--deps/v8/test/cctest/test-symbols.cc6
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm.cc10
-rw-r--r--deps/v8/test/cctest/test-sync-primitives-arm64.cc14
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc8
-rw-r--r--deps/v8/test/cctest/test-threads.cc2
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc2
-rw-r--r--deps/v8/test/cctest/test-transitions.cc20
-rw-r--r--deps/v8/test/cctest/test-transitions.h2
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc6
-rw-r--r--deps/v8/test/cctest/test-types.cc10
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc175
-rw-r--r--deps/v8/test/cctest/test-unscopables-hidden-prototype.cc2
-rw-r--r--deps/v8/test/cctest/test-unwinder.cc34
-rw-r--r--deps/v8/test/cctest/test-usecounters.cc2
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc16
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.h9
-rw-r--r--deps/v8/test/cctest/test-utils.cc28
-rw-r--r--deps/v8/test/cctest/test-version.cc12
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc51
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc39
-rw-r--r--deps/v8/test/cctest/torque/test-torque.cc39
-rw-r--r--deps/v8/test/cctest/trace-extension.cc6
-rw-r--r--deps/v8/test/cctest/trace-extension.h2
-rw-r--r--deps/v8/test/cctest/unicode-helpers.cc2
-rw-r--r--deps/v8/test/cctest/unicode-helpers.h2
-rw-r--r--deps/v8/test/cctest/wasm/test-c-wasm-entry.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc4
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc14
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc9
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc21
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc247
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc78
-rw-r--r--deps/v8/test/cctest/wasm/test-streaming-compilation.cc13
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc24
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc60
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc9
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc33
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc27
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc64
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h14
-rw-r--r--deps/v8/test/common/assembler-tester.h4
-rw-r--r--deps/v8/test/common/types-fuzz.h8
-rw-r--r--deps/v8/test/common/wasm/test-signatures.h41
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h20
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc23
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.h9
-rw-r--r--deps/v8/test/debugger/debug/compiler/debug-catch-prediction.js6
-rw-r--r--deps/v8/test/debugger/debug/compiler/osr-typing-debug-change.js4
-rw-r--r--deps/v8/test/debugger/debug/debug-break-inline.js1
-rw-r--r--deps/v8/test/debugger/debug/debug-compile-optimized.js3
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-arguments.js2
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-locals-optimized-double.js1
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-locals-optimized.js1
-rw-r--r--deps/v8/test/debugger/debug/debug-liveedit-inline.js3
-rw-r--r--deps/v8/test/debugger/debug/debug-materialized.js2
-rw-r--r--deps/v8/test/debugger/debug/debug-optimize.js1
-rw-r--r--deps/v8/test/debugger/debug/debug-scopes.js3
-rw-r--r--deps/v8/test/debugger/debug/debug-step-turbofan.js1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js3
-rw-r--r--deps/v8/test/debugger/debug/for-in-opt.js9
-rw-r--r--deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js1
-rw-r--r--deps/v8/test/debugger/debug/ignition/optimized-debug-frame.js1
-rw-r--r--deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-392114.js1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-4309-1.js2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-4309-2.js2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-4309-3.js2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-4320.js1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-514362.js1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-5279.js12
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-387599.js1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-633999.js1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-debug-code-recompilation.js2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-debug-deopt-while-recompile.js7
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js3
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js1
-rw-r--r--deps/v8/test/debugger/regress/regress-5901-1.js1
-rw-r--r--deps/v8/test/debugger/regress/regress-5901-2.js1
-rw-r--r--deps/v8/test/debugger/regress/regress-5950.js1
-rw-r--r--deps/v8/test/debugger/regress/regress-6526.js3
-rw-r--r--deps/v8/test/debugger/regress/regress-7421.js6
-rw-r--r--deps/v8/test/debugger/regress/regress-crbug-736758.js1
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc2
-rw-r--r--deps/v8/test/fuzzer/multi-return.cc12
-rw-r--r--deps/v8/test/fuzzer/parser.cc4
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc5
-rw-r--r--deps/v8/test/fuzzer/regexp.cc2
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc9
-rw-r--r--deps/v8/test/fuzzer/wasm-code.cc8
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc28
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc12
-rw-r--r--deps/v8/test/fuzzer/wasm.cc4
-rw-r--r--deps/v8/test/inspector/DEPS10
-rw-r--r--deps/v8/test/inspector/OWNERS1
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt85
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt17
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js1
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt12
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js1
-rw-r--r--deps/v8/test/inspector/debugger/framework-break-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/framework-break.js2
-rw-r--r--deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js1
-rw-r--r--deps/v8/test/inspector/debugger/script-on-after-compile-snapshot-expected.txt28
-rw-r--r--deps/v8/test/inspector/debugger/script-on-after-compile-snapshot.js39
-rw-r--r--deps/v8/test/inspector/debugger/set-instrumentation-breakpoint-expected.txt89
-rw-r--r--deps/v8/test/inspector/debugger/set-instrumentation-breakpoint.js131
-rw-r--r--deps/v8/test/inspector/debugger/step-into-optimized-blackbox.js3
-rw-r--r--deps/v8/test/inspector/debugger/wasm-anyref-global-expected.txt7
-rw-r--r--deps/v8/test/inspector/debugger/wasm-anyref-global.js83
-rw-r--r--deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt108
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scope-info.js10
-rw-r--r--deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-expected.txt18
-rw-r--r--deps/v8/test/inspector/inspector-test.cc83
-rw-r--r--deps/v8/test/inspector/inspector.status3
-rw-r--r--deps/v8/test/inspector/isolate-data.cc13
-rw-r--r--deps/v8/test/inspector/isolate-data.h4
-rw-r--r--deps/v8/test/inspector/runtime/enable-async-stack-expected.txt4
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt10
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js17
-rw-r--r--deps/v8/test/inspector/task-runner.h4
-rw-r--r--deps/v8/test/intl/date-format/check-calendar.js60
-rw-r--r--deps/v8/test/intl/date-format/check-numbering-system.js68
-rw-r--r--deps/v8/test/intl/date-format/en-format-range-to-parts.js49
-rw-r--r--deps/v8/test/intl/date-format/format-range-to-parts.js63
-rw-r--r--deps/v8/test/intl/general/CanonicalizeLocaleListTakeLocale.js2
-rw-r--r--deps/v8/test/intl/general/supported-locales-of.js10
-rw-r--r--deps/v8/test/intl/intl.status9
-rw-r--r--deps/v8/test/intl/list-format/format.js4
-rw-r--r--deps/v8/test/intl/locale/locale-canonicalization.js2
-rw-r--r--deps/v8/test/intl/locale/locale-constructor.js2
-rw-r--r--deps/v8/test/intl/locale/locale-properties.js2
-rw-r--r--deps/v8/test/intl/locale/maximize_minimize.js2
-rw-r--r--deps/v8/test/intl/locale/property.js2
-rw-r--r--deps/v8/test/intl/locale/regress-8032.js2
-rw-r--r--deps/v8/test/intl/number-format/check-numbering-system.js68
-rw-r--r--deps/v8/test/intl/number-format/unified/compact-display.js15
-rw-r--r--deps/v8/test/intl/number-format/unified/constructor-order.js70
-rw-r--r--deps/v8/test/intl/number-format/unified/currency-display.js39
-rw-r--r--deps/v8/test/intl/number-format/unified/currency-sign.js41
-rw-r--r--deps/v8/test/intl/number-format/unified/no-compact-display.js30
-rw-r--r--deps/v8/test/intl/number-format/unified/notation.js89
-rw-r--r--deps/v8/test/intl/number-format/unified/sign-display.js28
-rw-r--r--deps/v8/test/intl/number-format/unified/style-unit.js180
-rw-r--r--deps/v8/test/intl/number-format/unified/unit-display.js36
-rw-r--r--deps/v8/test/intl/regress-7982.js2
-rw-r--r--deps/v8/test/intl/regress-8604.js8
-rw-r--r--deps/v8/test/intl/regress-8657.js2
-rw-r--r--deps/v8/test/intl/regress-966285.js10
-rw-r--r--deps/v8/test/intl/regress-971636.js10
-rw-r--r--deps/v8/test/intl/relative-time-format/numberingSystems.js46
-rw-r--r--deps/v8/test/js-perf-test/Array/slice.js141
-rw-r--r--deps/v8/test/js-perf-test/ArraySort/sort-base.js4
-rw-r--r--deps/v8/test/js-perf-test/BigInt/run.js28
-rw-r--r--deps/v8/test/js-perf-test/BigInt/to-boolean.js59
-rw-r--r--deps/v8/test/js-perf-test/Intl/constructor.js12
-rw-r--r--deps/v8/test/js-perf-test/JSTests1.json29
-rw-r--r--deps/v8/test/js-perf-test/JSTests2.json17
-rw-r--r--deps/v8/test/js-perf-test/JSTests3.json8
-rw-r--r--deps/v8/test/js-perf-test/JSTests5.json17
-rw-r--r--deps/v8/test/js-perf-test/ManyClosures/create-many-closures.js1
-rw-r--r--deps/v8/test/js-perf-test/ObjectFreeze/array-indexof-includes.js53
-rw-r--r--deps/v8/test/js-perf-test/ObjectFreeze/run.js2
-rw-r--r--deps/v8/test/js-perf-test/ObjectFreeze/spread-call.js59
-rw-r--r--deps/v8/test/js-perf-test/StackTrace/capture.js1
-rw-r--r--deps/v8/test/js-perf-test/StackTrace/serialize.js1
-rw-r--r--deps/v8/test/message/fail/class-field-constructor.js2
-rw-r--r--deps/v8/test/message/fail/class-field-constructor.out2
-rw-r--r--deps/v8/test/message/fail/class-field-static-constructor.js2
-rw-r--r--deps/v8/test/message/fail/class-field-static-constructor.out2
-rw-r--r--deps/v8/test/message/fail/class-field-static-prototype.js2
-rw-r--r--deps/v8/test/message/fail/class-field-static-prototype.out2
-rw-r--r--deps/v8/test/message/fail/class-fields-computed.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-computed.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-private-class-in-function.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-private-class-in-function.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-private-outside-class.js6
-rw-r--r--deps/v8/test/message/fail/class-fields-private-outside-class.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-private-source-positions.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-private-source-positions.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-early-2.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-early-2.out6
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-early.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-early.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-in-module.js1
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-in-module.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-read.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-read.out6
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-write.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-private-throw-write.out6
-rw-r--r--deps/v8/test/message/fail/class-fields-private-undeclared-lazy-class.js13
-rw-r--r--deps/v8/test/message/fail/class-fields-private-undeclared-lazy-class.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-private-undefined-inner-class.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-private-undefined-inner-class.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-static-throw.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-static-throw.out4
-rw-r--r--deps/v8/test/message/fail/class-fields-throw.js2
-rw-r--r--deps/v8/test/message/fail/class-fields-throw.out8
-rw-r--r--deps/v8/test/message/fail/destructuring-object-private-name.js2
-rw-r--r--deps/v8/test/message/fail/destructuring-object-private-name.out2
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-ellipsis.out1
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-max-display-depth.out1
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-proxy.out1
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular-substructure.out1
-rw-r--r--deps/v8/test/message/fail/json-stringify-circular.out1
-rw-r--r--deps/v8/test/message/fail/wasm-async-compile-fail.js10
-rw-r--r--deps/v8/test/message/fail/wasm-async-compile-fail.out5
-rw-r--r--deps/v8/test/message/fail/wasm-async-instantiate-fail.js10
-rw-r--r--deps/v8/test/message/fail/wasm-async-instantiate-fail.out5
-rw-r--r--deps/v8/test/message/fail/wasm-exception-rethrow.js21
-rw-r--r--deps/v8/test/message/fail/wasm-exception-rethrow.out4
-rw-r--r--deps/v8/test/message/fail/wasm-exception-throw.js17
-rw-r--r--deps/v8/test/message/fail/wasm-exception-throw.out4
-rw-r--r--deps/v8/test/message/fail/wasm-streaming-compile-fail.js12
-rw-r--r--deps/v8/test/message/fail/wasm-streaming-compile-fail.out5
-rw-r--r--deps/v8/test/message/fail/wasm-streaming-instantiate-fail.js12
-rw-r--r--deps/v8/test/message/fail/wasm-streaming-instantiate-fail.out5
-rw-r--r--deps/v8/test/message/fail/wasm-sync-compile-fail.js9
-rw-r--r--deps/v8/test/message/fail/wasm-sync-compile-fail.out6
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.js2
-rw-r--r--deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.out2
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js6
-rw-r--r--deps/v8/test/mjsunit/arguments-deopt.js13
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js8
-rw-r--r--deps/v8/test/mjsunit/array-literal-feedback.js16
-rw-r--r--deps/v8/test/mjsunit/array-literal-transitions.js10
-rw-r--r--deps/v8/test/mjsunit/array-methods-read-only-length.js6
-rw-r--r--deps/v8/test/mjsunit/array-natives-elements.js1
-rw-r--r--deps/v8/test/mjsunit/array-push12.js1
-rw-r--r--deps/v8/test/mjsunit/array-push3.js3
-rw-r--r--deps/v8/test/mjsunit/array-push9.js1
-rw-r--r--deps/v8/test/mjsunit/array-shift2.js1
-rw-r--r--deps/v8/test/mjsunit/array-shift4.js1
-rw-r--r--deps/v8/test/mjsunit/array-sort.js9
-rw-r--r--deps/v8/test/mjsunit/array-store-and-grow.js3
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces-promise-all.js2
-rw-r--r--deps/v8/test/mjsunit/async-stack-traces.js18
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block-opt.js9
-rw-r--r--deps/v8/test/mjsunit/code-coverage-block.js55
-rw-r--r--deps/v8/test/mjsunit/code-coverage-class-fields.js144
-rw-r--r--deps/v8/test/mjsunit/compare-known-objects-tostringtag.js4
-rw-r--r--deps/v8/test/mjsunit/compiler-regress-787301.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/array-access.js108
-rw-r--r--deps/v8/test/mjsunit/compiler/array-constructor.js77
-rw-r--r--deps/v8/test/mjsunit/compiler/array-every.js50
-rw-r--r--deps/v8/test/mjsunit/compiler/array-find.js50
-rw-r--r--deps/v8/test/mjsunit/compiler/array-findindex.js50
-rw-r--r--deps/v8/test/mjsunit/compiler/array-is-array.js44
-rw-r--r--deps/v8/test/mjsunit/compiler/array-length.js30
-rw-r--r--deps/v8/test/mjsunit/compiler/array-slice-clone.js52
-rw-r--r--deps/v8/test/mjsunit/compiler/array-some.js50
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-add-static.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js50
-rw-r--r--deps/v8/test/mjsunit/compiler/field-representation-tracking.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/load-elimination-const-field.js156
-rw-r--r--deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/number-comparison-truncations.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-for-in.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-alignment.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-array-len.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-assert.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope-id.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-block-scope.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-for-let.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-forin-nested.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-infinite.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-labeled.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-literals-adapted.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-literals.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-manual1.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-manual2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-maze1.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-maze2.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-nested2b.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-one.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-regex-id.js4
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-simple.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-try-catch.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-two.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-while-let.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-607493.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-645851.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-650215.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-669517.js6
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-673244.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-803022.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9017.js39
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9137-1.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9137-2.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-919754.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-957559.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-958021.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-958350.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-958420.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-961986.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-966560-1.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-966560-2.js14
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-965513.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-v8-9139.js20
-rw-r--r--deps/v8/test/mjsunit/compiler/spread-call.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/string-startswith.js81
-rw-r--r--deps/v8/test/mjsunit/compiler/try-osr.js4
-rw-r--r--deps/v8/test/mjsunit/concurrent-initial-prototype-change.js2
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking.js98
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js1
-rw-r--r--deps/v8/test/mjsunit/constant-folding.js2
-rw-r--r--deps/v8/test/mjsunit/context-calls-maintained.js8
-rw-r--r--deps/v8/test/mjsunit/cross-realm-filtering.js12
-rw-r--r--deps/v8/test/mjsunit/dehoisted-array-index.js9
-rw-r--r--deps/v8/test/mjsunit/deopt-recursive-eager-once.js6
-rw-r--r--deps/v8/test/mjsunit/deopt-recursive-lazy-once.js6
-rw-r--r--deps/v8/test/mjsunit/deopt-recursive-soft-once.js6
-rw-r--r--deps/v8/test/mjsunit/deopt-unlinked.js8
-rw-r--r--deps/v8/test/mjsunit/deopt-with-fp-regs.js1
-rw-r--r--deps/v8/test/mjsunit/deserialize-optimize-inner.js1
-rw-r--r--deps/v8/test/mjsunit/dictionary-prototypes.js11
-rw-r--r--deps/v8/test/mjsunit/div-mul-minus-one.js3
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js7
-rw-r--r--deps/v8/test/mjsunit/elements-transition-hoisting.js7
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-1.js2
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-10.js2
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-11.js1
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-12.js1
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-4.js1
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-5.js3
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-6.js1
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-7.js1
-rw-r--r--deps/v8/test/mjsunit/elide-double-hole-check-8.js1
-rw-r--r--deps/v8/test/mjsunit/ensure-growing-store-learns.js4
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator-detached.js1
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator-turbo.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js6
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-crankshaft.js9
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-declaration-sloppy.js26
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-declaration.js26
-rw-r--r--deps/v8/test/mjsunit/es6/block-scoping-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-scoping.js2
-rw-r--r--deps/v8/test/mjsunit/es6/call-with-spread-modify-array-iterator.js1
-rw-r--r--deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js1
-rw-r--r--deps/v8/test/mjsunit/es6/call-with-spread.js2
-rw-r--r--deps/v8/test/mjsunit/es6/classes.js6
-rw-r--r--deps/v8/test/mjsunit/es6/collection-iterator.js1
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js4
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js4
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js4
-rw-r--r--deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js4
-rw-r--r--deps/v8/test/mjsunit/es6/computed-property-names-deopt.js1
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount-nolazy.js2
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount.js2
-rw-r--r--deps/v8/test/mjsunit/es6/indexed-integer-exotics.js1
-rw-r--r--deps/v8/test/mjsunit/es6/instanceof-proxies.js2
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect.js2
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js3
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js2
-rw-r--r--deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect4.js3
-rw-r--r--deps/v8/test/mjsunit/es6/math-clz32.js1
-rw-r--r--deps/v8/test/mjsunit/es6/math-fround.js1
-rw-r--r--deps/v8/test/mjsunit/es6/math-trunc.js4
-rw-r--r--deps/v8/test/mjsunit/es6/object-literals-method.js1
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-cross-realm-exception.js1
-rw-r--r--deps/v8/test/mjsunit/es6/proxies-get-own-property-descriptor.js4
-rw-r--r--deps/v8/test/mjsunit/es6/proxies.js1
-rw-r--r--deps/v8/test/mjsunit/es6/reflect-define-property.js6
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-347906.js1
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-3741.js1
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-411237.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-4160.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-508074.js1
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-5598.js1
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-666622.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-9234.js35
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-crbug-448730.js1
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-inlined-new-target.js1
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-misc.js10
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-mutated-prototype.js4
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-pristine-prototype.js1
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-prototype-proxy.js1
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-prototype-setter1.js1
-rw-r--r--deps/v8/test/mjsunit/es6/spread-array-prototype-setter2.js1
-rw-r--r--deps/v8/test/mjsunit/es6/spread-call.js1
-rw-r--r--deps/v8/test/mjsunit/es6/super-with-spread-modify-array-iterator.js1
-rw-r--r--deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js1
-rw-r--r--deps/v8/test/mjsunit/es6/super-with-spread.js1
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-detached.js (renamed from deps/v8/test/mjsunit/es6/typedarray-neutered.js)9
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-tostring.js6
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js10
-rw-r--r--deps/v8/test/mjsunit/es9/object-spread-basic.js48
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-866357.js1
-rw-r--r--deps/v8/test/mjsunit/es9/regress/regress-866727.js3
-rw-r--r--deps/v8/test/mjsunit/fast-prototype.js58
-rw-r--r--deps/v8/test/mjsunit/field-type-tracking.js14
-rw-r--r--deps/v8/test/mjsunit/filter-element-kinds.js1
-rw-r--r--deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js481
-rw-r--r--deps/v8/test/mjsunit/generated-transition-stub.js4
-rw-r--r--deps/v8/test/mjsunit/getters-on-elements.js9
-rw-r--r--deps/v8/test/mjsunit/global-infinity-strict.js2
-rw-r--r--deps/v8/test/mjsunit/global-nan-strict.js2
-rw-r--r--deps/v8/test/mjsunit/global-nan.js1
-rw-r--r--deps/v8/test/mjsunit/global-undefined-strict.js2
-rw-r--r--deps/v8/test/mjsunit/global-undefined.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/turbo.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/typedarray.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/block-lazy-compile.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-reduced.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/generators.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/modules-import-namespace.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-ic.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-special-object.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields-static.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/private-fields.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/private-methods.js95
-rw-r--r--deps/v8/test/mjsunit/harmony/public-instance-class-fields.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/public-static-class-fields.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress-generators-resume.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-772649.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-8808.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-347528.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/sharedarraybuffer.js17
-rw-r--r--deps/v8/test/mjsunit/harmony/string-matchAll-deleted-matchAll.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/string-matchAll.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/to-string.js52
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js22
-rw-r--r--deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js2
-rw-r--r--deps/v8/test/mjsunit/ignition/osr-from-bytecode.js1
-rw-r--r--deps/v8/test/mjsunit/ignition/osr-from-generator.js4
-rw-r--r--deps/v8/test/mjsunit/ignition/throw-if-hole.js4
-rw-r--r--deps/v8/test/mjsunit/ignition/throw-if-not-hole.js6
-rw-r--r--deps/v8/test/mjsunit/ignition/throw-super-not-called.js4
-rw-r--r--deps/v8/test/mjsunit/immutable-context-slot-inlining.js1
-rw-r--r--deps/v8/test/mjsunit/induction-variable-turbofan.js1
-rw-r--r--deps/v8/test/mjsunit/integrity-level-map-update.js1
-rw-r--r--deps/v8/test/mjsunit/interrupt-budget-override.js2
-rw-r--r--deps/v8/test/mjsunit/json-parse-slice.js6
-rw-r--r--deps/v8/test/mjsunit/json-parser-recursive.js3
-rw-r--r--deps/v8/test/mjsunit/json-stringify-typedarray.js18
-rw-r--r--deps/v8/test/mjsunit/json.js3
-rw-r--r--deps/v8/test/mjsunit/keyed-has-ic.js34
-rw-r--r--deps/v8/test/mjsunit/keyed-ic.js67
-rw-r--r--deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js1
-rw-r--r--deps/v8/test/mjsunit/keyed-load-null-receiver.js42
-rw-r--r--deps/v8/test/mjsunit/keyed-load-with-string-key.js1
-rw-r--r--deps/v8/test/mjsunit/keyed-load-with-symbol-key.js1
-rw-r--r--deps/v8/test/mjsunit/large-object-literal-slow-elements.js1
-rw-r--r--deps/v8/test/mjsunit/lea-add.js4
-rw-r--r--deps/v8/test/mjsunit/lithium/DivI.js2
-rw-r--r--deps/v8/test/mjsunit/lithium/MathExp.js1
-rw-r--r--deps/v8/test/mjsunit/lithium/MulI.js3
-rw-r--r--deps/v8/test/mjsunit/lithium/StoreKeyed.js2
-rw-r--r--deps/v8/test/mjsunit/lithium/StoreKeyedExternal.js5
-rw-r--r--deps/v8/test/mjsunit/load_poly_effect.js1
-rw-r--r--deps/v8/test/mjsunit/math-abs.js5
-rw-r--r--deps/v8/test/mjsunit/math-ceil.js4
-rw-r--r--deps/v8/test/mjsunit/math-deopt.js10
-rw-r--r--deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js1
-rw-r--r--deps/v8/test/mjsunit/math-floor-of-div-nosudiv.js8
-rw-r--r--deps/v8/test/mjsunit/math-floor-part1.js6
-rw-r--r--deps/v8/test/mjsunit/math-floor-part4.js8
-rw-r--r--deps/v8/test/mjsunit/math-min-max.js2
-rw-r--r--deps/v8/test/mjsunit/math-pow.js1
-rw-r--r--deps/v8/test/mjsunit/math-round.js5
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js6
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status99
-rw-r--r--deps/v8/test/mjsunit/modules-turbo1.js1
-rw-r--r--deps/v8/test/mjsunit/modules-turbo2.js1
-rw-r--r--deps/v8/test/mjsunit/never-optimize.js3
-rw-r--r--deps/v8/test/mjsunit/number-isnan-opt.js5
-rw-r--r--deps/v8/test/mjsunit/object-freeze.js219
-rw-r--r--deps/v8/test/mjsunit/object-keys-typedarray.js17
-rw-r--r--deps/v8/test/mjsunit/object-prevent-extensions.js181
-rw-r--r--deps/v8/test/mjsunit/object-seal.js219
-rw-r--r--deps/v8/test/mjsunit/optimized-filter.js54
-rw-r--r--deps/v8/test/mjsunit/optimized-foreach-holey-2.js3
-rw-r--r--deps/v8/test/mjsunit/optimized-foreach-holey.js3
-rw-r--r--deps/v8/test/mjsunit/optimized-foreach-polymorph.js15
-rw-r--r--deps/v8/test/mjsunit/optimized-includes-polymorph.js1
-rw-r--r--deps/v8/test/mjsunit/optimized-map.js93
-rw-r--r--deps/v8/test/mjsunit/optimized-reduce.js5
-rw-r--r--deps/v8/test/mjsunit/parallel-optimize-disabled.js2
-rw-r--r--deps/v8/test/mjsunit/pixel-array-rounding.js1
-rw-r--r--deps/v8/test/mjsunit/promise-perform-all-resolve-lookup.js28
-rw-r--r--deps/v8/test/mjsunit/promise-perform-all-settled-resolve-lookup.js28
-rw-r--r--deps/v8/test/mjsunit/promise-perfrom-race-resolve-lookup.js28
-rw-r--r--deps/v8/test/mjsunit/prototype-non-existing.js2
-rw-r--r--deps/v8/test/mjsunit/regexp-override-symbol-match-all.js2
-rw-r--r--deps/v8/test/mjsunit/regress-906893.js1
-rw-r--r--deps/v8/test/mjsunit/regress-918763.js1
-rw-r--r--deps/v8/test/mjsunit/regress-958725.js25
-rw-r--r--deps/v8/test/mjsunit/regress-963346.js15
-rw-r--r--deps/v8/test/mjsunit/regress-966460.js11
-rw-r--r--deps/v8/test/mjsunit/regress-v8-8445-2.js2
-rw-r--r--deps/v8/test/mjsunit/regress-v8-8445.js2
-rw-r--r--deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1118.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1257.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2132.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2339.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2451.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-252797.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-298269.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2989.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3032.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3650-3.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3709.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-379770.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-385565.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3976.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4121.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4380.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5252.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5262.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-612412.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6607-1.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6607-2.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-666046.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6941.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6948.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6989.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6991.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7014-1.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7014-2.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-7135.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-852765.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-8913.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-9017.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-902552.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-903697.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-9105.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-9165.js47
-rw-r--r--deps/v8/test/mjsunit/regress/regress-961237.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-961508.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-963891.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-976627.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-150545.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-319860.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-480807.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-513507.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-522895.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-554831.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-587068.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-594183.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-638551.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-640369.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-645888.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-662830.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-665587.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-668795.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-827013.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-899464.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-9161.js59
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-934166.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-935800.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-937734.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-941703.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-951400.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-959645-1.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-959645-2.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-959727.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-961522.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-961709-1.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-961709-2.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-963568.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-964833.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-964869.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-967065.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-967151.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-967254.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-967434.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-971383.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-980529.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-osr-context.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-param-local-type.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-store-uncacheable.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-trap-allocation-memento.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-4153-1.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-7682.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9243.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9267-1.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-9267-2.js23
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-834619.js1
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-9017.js38
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-952342.js18
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-956771.js28
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-956771b.js19
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-957405.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-961129.js18
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-964607.js29
-rw-r--r--deps/v8/test/mjsunit/reindexing-in-classes.js72
-rw-r--r--deps/v8/test/mjsunit/shared-function-tier-up-turbo.js3
-rw-r--r--deps/v8/test/mjsunit/shift-for-integer-div.js6
-rw-r--r--deps/v8/test/mjsunit/sin-cos.js1
-rw-r--r--deps/v8/test/mjsunit/smi-mul-const.js1
-rw-r--r--deps/v8/test/mjsunit/smi-mul.js2
-rw-r--r--deps/v8/test/mjsunit/smi-representation.js1
-rw-r--r--deps/v8/test/mjsunit/stack-trace-cpp-function-template-1.js37
-rw-r--r--deps/v8/test/mjsunit/stack-trace-cpp-function-template-2.js45
-rw-r--r--deps/v8/test/mjsunit/strict-mode.js10
-rw-r--r--deps/v8/test/mjsunit/string-charcodeat-external.js1
-rw-r--r--deps/v8/test/mjsunit/string-deopt.js16
-rw-r--r--deps/v8/test/mjsunit/strong-rooted-literals.js13
-rw-r--r--deps/v8/test/mjsunit/switch.js1
-rw-r--r--deps/v8/test/mjsunit/tools/trace-ic.js62
-rw-r--r--deps/v8/test/mjsunit/track-fields.js12
-rw-r--r--deps/v8/test/mjsunit/ubsan-fuzzerbugs.js18
-rw-r--r--deps/v8/test/mjsunit/unary-minus-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/unbox-double-arrays.js11
-rw-r--r--deps/v8/test/mjsunit/unbox-smi-field.js1
-rw-r--r--deps/v8/test/mjsunit/undetectable.js5
-rw-r--r--deps/v8/test/mjsunit/value-wrapper-accessor.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/anyfunc.js101
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-globals.js86
-rw-r--r--deps/v8/test/mjsunit/wasm/anyref-table.js16
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/README19
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast1047
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast.js445
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast308
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast.js470
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast130
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast.js170
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast392
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast.js505
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast5685
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast.js13859
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast673
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast.js440
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast947
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast.js866
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast1469
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast.js2651
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast1602
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast.js2096
-rw-r--r--deps/v8/test/mjsunit/wasm/bulk-memory.js11
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js32
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js56
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js113
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-lazy-validation.js38
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js53
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-streaming-lazy-validation.js33
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions-anyref.js45
-rw-r--r--deps/v8/test/mjsunit/wasm/import-table.js36
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js8
-rw-r--r--deps/v8/test/mjsunit/wasm/interpreter.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/js-api.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/multi-table-element-section.js134
-rw-r--r--deps/v8/test/mjsunit/wasm/return-calls.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/streaming-api.js43
-rw-r--r--deps/v8/test/mjsunit/wasm/table-access.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/table-fill.js200
-rw-r--r--deps/v8/test/mjsunit/wasm/table-get.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js240
-rw-r--r--deps/v8/test/mjsunit/wasm/table-grow.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/type-reflection.js100
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js57
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc158
-rw-r--r--deps/v8/test/mozilla/mozilla.status30
-rw-r--r--deps/v8/test/test262/test262.status42
-rw-r--r--deps/v8/test/test262/testcfg.py14
-rw-r--r--deps/v8/test/torque/test-torque.tq111
-rw-r--r--deps/v8/test/unittests/BUILD.gn46
-rw-r--r--deps/v8/test/unittests/api/exception-unittest.cc2
-rw-r--r--deps/v8/test/unittests/api/isolate-unittest.cc8
-rw-r--r--deps/v8/test/unittests/api/remote-object-unittest.cc8
-rw-r--r--deps/v8/test/unittests/api/v8-object-unittest.cc5
-rw-r--r--deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc2
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc118
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc119
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc4
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc6
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc6
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc6
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc6
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc4
-rw-r--r--deps/v8/test/unittests/base/division-by-constant-unittest.cc5
-rw-r--r--deps/v8/test/unittests/base/flags-unittest.cc7
-rw-r--r--deps/v8/test/unittests/base/functional-unittest.cc19
-rw-r--r--deps/v8/test/unittests/base/iterator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/logging-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/ostreams-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/platform/time-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/threaded-list-unittest.cc2
-rw-r--r--deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc (renamed from deps/v8/test/unittests/code-stub-assembler-unittest.cc)10
-rw-r--r--deps/v8/test/unittests/codegen/code-stub-assembler-unittest.h (renamed from deps/v8/test/unittests/code-stub-assembler-unittest.h)2
-rw-r--r--deps/v8/test/unittests/codegen/register-configuration-unittest.cc (renamed from deps/v8/test/unittests/register-configuration-unittest.cc)2
-rw-r--r--deps/v8/test/unittests/codegen/source-position-table-unittest.cc (renamed from deps/v8/test/unittests/source-position-table-unittest.cc)6
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc47
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc362
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h91
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h12
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/code-assembler-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/control-equivalence-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc1094
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc30
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h2
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc31
-rw-r--r--deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc35
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc14
-rw-r--r--deps/v8/test/unittests/compiler/mips/OWNERS1
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc90
-rw-r--r--deps/v8/test/unittests/compiler/mips64/OWNERS1
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc77
-rw-r--r--deps/v8/test/unittests/compiler/node-cache-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc6
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h2
-rw-r--r--deps/v8/test/unittests/compiler/node-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc21
-rw-r--r--deps/v8/test/unittests/compiler/s390/OWNERS4
-rw-r--r--deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/schedule-unittest.cc7
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc19
-rw-r--r--deps/v8/test/unittests/compiler/state-values-utils-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc27
-rw-r--r--deps/v8/test/unittests/date/date-cache-unittest.cc109
-rw-r--r--deps/v8/test/unittests/diagnostics/eh-frame-iterator-unittest.cc (renamed from deps/v8/test/unittests/eh-frame-iterator-unittest.cc)2
-rw-r--r--deps/v8/test/unittests/diagnostics/eh-frame-writer-unittest.cc (renamed from deps/v8/test/unittests/eh-frame-writer-unittest.cc)2
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc (renamed from deps/v8/test/unittests/microtask-queue-unittest.cc)14
-rw-r--r--deps/v8/test/unittests/heap/code-object-registry-unittest.cc92
-rw-r--r--deps/v8/test/unittests/heap/embedder-tracing-unittest.cc21
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc176
-rw-r--r--deps/v8/test/unittests/heap/heap-controller-unittest.cc121
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/marking-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/memory-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/object-stats-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/scavenge-job-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc6
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc4
-rw-r--r--deps/v8/test/unittests/heap/unmapper-unittest.cc8
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc8
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc10
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc18
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc12
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-operands-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-source-info-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-utils.h2
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc28
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc8
-rw-r--r--deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc20
-rw-r--r--deps/v8/test/unittests/logging/counters-unittest.cc (renamed from deps/v8/test/unittests/counters-unittest.cc)25
-rw-r--r--deps/v8/test/unittests/numbers/bigint-unittest.cc (renamed from deps/v8/test/unittests/bigint-unittest.cc)8
-rw-r--r--deps/v8/test/unittests/numbers/conversions-unittest.cc (renamed from deps/v8/test/unittests/conversions-unittest.cc)4
-rw-r--r--deps/v8/test/unittests/objects/object-unittest.cc (renamed from deps/v8/test/unittests/object-unittest.cc)14
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc (renamed from deps/v8/test/unittests/value-serializer-unittest.cc)6
-rw-r--r--deps/v8/test/unittests/parser/ast-value-unittest.cc4
-rw-r--r--deps/v8/test/unittests/parser/preparser-unittest.cc4
-rw-r--r--deps/v8/test/unittests/profiler/strings-storage-unittest.cc (renamed from deps/v8/test/unittests/strings-storage-unittest.cc)2
-rw-r--r--deps/v8/test/unittests/regress/regress-crbug-938251-unittest.cc2
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc1
-rw-r--r--deps/v8/test/unittests/strings/char-predicates-unittest.cc (renamed from deps/v8/test/unittests/char-predicates-unittest.cc)9
-rw-r--r--deps/v8/test/unittests/strings/unicode-unittest.cc (renamed from deps/v8/test/unittests/unicode-unittest.cc)39
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc (renamed from deps/v8/test/unittests/background-compile-task-unittest.cc)14
-rw-r--r--deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc (renamed from deps/v8/test/unittests/cancelable-tasks-unittest.cc)2
-rw-r--r--deps/v8/test/unittests/test-helpers.cc10
-rw-r--r--deps/v8/test/unittests/test-utils.cc14
-rw-r--r--deps/v8/test/unittests/test-utils.h12
-rw-r--r--deps/v8/test/unittests/torque/ls-message-unittest.cc93
-rw-r--r--deps/v8/test/unittests/torque/ls-server-data-unittest.cc156
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc197
-rw-r--r--deps/v8/test/unittests/torque/torque-utils-unittest.cc11
-rw-r--r--deps/v8/test/unittests/unittests.status5
-rw-r--r--deps/v8/test/unittests/utils/allocation-unittest.cc (renamed from deps/v8/test/unittests/allocation-unittest.cc)2
-rw-r--r--deps/v8/test/unittests/utils/detachable-vector-unittest.cc (renamed from deps/v8/test/unittests/detachable-vector-unittest.cc)2
-rw-r--r--deps/v8/test/unittests/utils/locked-queue-unittest.cc (renamed from deps/v8/test/unittests/locked-queue-unittest.cc)8
-rw-r--r--deps/v8/test/unittests/utils/utils-unittest.cc (renamed from deps/v8/test/unittests/utils-unittest.cc)22
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc154
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc8
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc87
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc10
-rw-r--r--deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc102
-rw-r--r--deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc4
-rw-r--r--deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc4
-rw-r--r--deps/v8/test/wasm-api-tests/BUILD.gn35
-rw-r--r--deps/v8/test/wasm-api-tests/DEPS5
-rw-r--r--deps/v8/test/wasm-api-tests/OWNERS1
-rw-r--r--deps/v8/test/wasm-api-tests/callbacks.cc195
-rw-r--r--deps/v8/test/wasm-api-tests/run-all-wasm-api-tests.cc17
-rw-r--r--deps/v8/test/wasm-api-tests/testcfg.py85
-rw-r--r--deps/v8/test/wasm-api-tests/wasm-api-tests.status16
-rw-r--r--deps/v8/test/wasm-spec-tests/OWNERS1
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/webkit/webkit.status9
-rw-r--r--deps/v8/third_party/inspector_protocol/.clang-format36
-rw-r--r--deps/v8/third_party/inspector_protocol/BUILD.gn34
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v82
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/code_generator.py32
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/convert_protocol_to_json.py2
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding.cc2190
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding.h510
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc1878
-rw-r--r--deps/v8/third_party/inspector_protocol/encoding/encoding_test_helper.h33
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_cpp.template9
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template69
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template6
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template395
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/encoding_h.template60
-rw-r--r--deps/v8/third_party/inspector_protocol/pdl.py24
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/roll.py47
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template4
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template4
-rw-r--r--deps/v8/third_party/v8/builtins/array-sort.tq395
-rw-r--r--deps/v8/tools/BUILD.gn1
-rw-r--r--deps/v8/tools/OWNERS4
-rwxr-xr-xdeps/v8/tools/bash-completion.sh6
-rw-r--r--deps/v8/tools/cfi/blacklist.txt2
-rwxr-xr-xdeps/v8/tools/check-static-initializers.sh4
-rw-r--r--deps/v8/tools/clusterfuzz/OWNERS4
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/failure_output.txt4
-rw-r--r--deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt7
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py13
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie_test.py2
-rw-r--r--deps/v8/tools/clusterfuzz/v8_fuzz_config.py7
-rw-r--r--deps/v8/tools/clusterfuzz/v8_sanity_checks.js3
-rw-r--r--deps/v8/tools/codemap.js14
-rwxr-xr-xdeps/v8/tools/dev/gm.py10
-rw-r--r--deps/v8/tools/dumpcpp-driver.js3
-rw-r--r--deps/v8/tools/gcmole/BUILD.gn2
-rw-r--r--deps/v8/tools/gcmole/README2
-rw-r--r--deps/v8/tools/gcmole/gcmole-test.cc6
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/gcmole/package.sh0
-rwxr-xr-xdeps/v8/tools/gcmole/run-gcmole.py2
-rw-r--r--deps/v8/tools/gdbinit85
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py23
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py4
-rwxr-xr-xdeps/v8/tools/js2c.py13
-rwxr-xr-xdeps/v8/tools/mb/mb.py54
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py26
-rwxr-xr-xdeps/v8/tools/node/build_gn.py143
-rwxr-xr-xdeps/v8/tools/node/test_update_node.py125
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/.gitignore7
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me1
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo1
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/delete_me1
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h20
-rw-r--r--deps/v8/tools/node/testdata/node/deps/v8/v8_foo1
-rw-r--r--deps/v8/tools/node/testdata/v8/.gitignore3
-rw-r--r--deps/v8/tools/node/testdata/v8/base/trace_event/common/common0
-rw-r--r--deps/v8/tools/node/testdata/v8/baz/v8_foo1
-rw-r--r--deps/v8/tools/node/testdata/v8/baz/v8_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/new/v8_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/gtest_bar1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/gtest_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new1
-rw-r--r--deps/v8/tools/node/testdata/v8/v8_foo1
-rw-r--r--deps/v8/tools/node/testdata/v8/v8_new1
-rwxr-xr-xdeps/v8/tools/node/update_node.py180
-rw-r--r--deps/v8/tools/profviz/worker.js2
-rwxr-xr-x[-rw-r--r--]deps/v8/tools/run-wasm-api-tests.py16
-rw-r--r--[-rwxr-xr-x]deps/v8/tools/run_perf.py937
-rw-r--r--deps/v8/tools/shell-utils.h2
-rw-r--r--deps/v8/tools/testrunner/OWNERS4
-rw-r--r--deps/v8/tools/testrunner/base_runner.py27
-rw-r--r--deps/v8/tools/testrunner/local/command.py9
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/variants.py4
-rw-r--r--deps/v8/tools/testrunner/objects/output.py16
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py9
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py5
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py63
-rw-r--r--deps/v8/tools/tick-processor.html3
-rw-r--r--deps/v8/tools/tickprocessor-driver.js3
-rw-r--r--deps/v8/tools/tickprocessor.js15
-rwxr-xr-xdeps/v8/tools/torque/format-torque.py21
-rwxr-xr-xdeps/v8/tools/torque/make-torque-parser.py71
-rw-r--r--deps/v8/tools/torque/vim-torque/syntax/torque.vim2
-rw-r--r--deps/v8/tools/torque/vscode-torque/package.json8
-rw-r--r--deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json350
-rwxr-xr-xdeps/v8/tools/unittests/run_perf_test.py277
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json1
-rwxr-xr-xdeps/v8/tools/v8_presubmit.py8
-rw-r--r--deps/v8/tools/v8heapconst.py462
-rw-r--r--deps/v8/tools/vim/ninja-build.vim5
-rw-r--r--deps/v8/tools/wasm-compilation-hints/OWNERS2
-rwxr-xr-xdeps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py60
-rwxr-xr-xdeps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py47
-rw-r--r--deps/v8/tools/wasm-compilation-hints/wasm.py108
-rw-r--r--deps/v8/tools/whitespace.txt2
2692 files changed, 122870 insertions, 67915 deletions
diff --git a/deps/v8/.git-blame-ignore-revs b/deps/v8/.git-blame-ignore-revs
index 58d0039ab9..5ae3977031 100644
--- a/deps/v8/.git-blame-ignore-revs
+++ b/deps/v8/.git-blame-ignore-revs
@@ -20,3 +20,6 @@
# Update of quotations in DEPS file.
e50b49a0e38b34e2b28e026f4d1c7e0da0c7bb1a
+
+# Rewrite code base to use "." instead of "->" to access Object members.
+878ccb33bd3cf0e6dc018ff8d15843f585ac07be
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 7fc0f66b37..6350393ebf 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -94,7 +94,6 @@ GTAGS
TAGS
bsuite
compile_commands.json
-d8
!/test/mjsunit/d8
d8_g
gccauses
diff --git a/deps/v8/.gn b/deps/v8/.gn
index 573fd030d8..328778fb46 100644
--- a/deps/v8/.gn
+++ b/deps/v8/.gn
@@ -7,11 +7,6 @@ import("//build/dotfile_settings.gni")
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"
-# The secondary source root is a parallel directory tree where
-# GN build files are placed when they can not be placed directly
-# in the source tree, e.g. for third party source trees.
-secondary_source = "//build/secondary/"
-
# These are the targets to check headers for by default. The files in targets
# matching these patterns (see "gn help label_pattern" for format) will have
# their includes checked for proper dependencies when you run either
diff --git a/deps/v8/.vpython b/deps/v8/.vpython
index f8d3b7278a..3b7cb32468 100644
--- a/deps/v8/.vpython
+++ b/deps/v8/.vpython
@@ -66,3 +66,11 @@ wheel: <
name: "infra/python/wheels/mock-py2_py3"
version: "version:2.0.0"
>
+
+# Used by:
+# tools/run_perf.py
+# tools/unittests/run_perf_test.py
+wheel: <
+ name: "infra/python/wheels/numpy/${vpython_platform}"
+ version: "version:1.11.3"
+>
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 5182ae5201..1c8424243d 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -22,8 +22,10 @@ Wave Computing, Inc. <*@wavecomp.com>
Loongson Technology Corporation Limited <*@loongson.cn>
Code Aurora Forum <*@codeaurora.org>
Home Jinni Inc. <*@homejinni.com>
-IBM Inc. <*@*ibm.com>
+IBM Inc. <*@*.ibm.com>
+IBM Inc. <*@ibm.com>
Samsung <*@*.samsung.com>
+Samsung <*@samsung.com>
Joyent, Inc <*@joyent.com>
RT-RK Computer Based System <*@rt-rk.com>
Amazon, Inc <*@amazon.com>
@@ -44,6 +46,7 @@ Alessandro Pignotti <alessandro@leaningtech.com>
Alex Kodat <akodat@rocketsoftware.com>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
+Alexander Neville <dark@volatile.bz>
Alexandre Vassalotti <avassalotti@gmail.com>
Alexis Campailla <alexis@janeasystems.com>
Allan Sandfeld Jensen <allan.jensen@qt.io>
@@ -99,6 +102,7 @@ Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
James M Snell <jasnell@gmail.com>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
+Jiawen Geng <technicalcute@gmail.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
@@ -136,6 +140,7 @@ Noj Vek <nojvek@gmail.com>
Oleksandr Chekhovskyi <oleksandr.chekhovskyi@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
+Peng Fei <pfgenyun@gmail.com>
Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Peter Wong <peter.wm.wong@gmail.com>
@@ -145,11 +150,13 @@ PhistucK <phistuck@gmail.com>
Qingyan Li <qingyan.liqy@alibaba-inc.com>
Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
Rafal Krypa <rafal@krypa.net>
+Raul Tambre <raul@tambre.ee>
Ray Glover <ray@rayglover.net>
Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de>
Rick Waldron <waldron.rick@gmail.com>
Rob Wu <rob@robwu.nl>
+Robert Meijer <robert.s.meijer@gmail.com>
Robert Mustacchi <rm@fingolfin.org>
Robert Nagy <robert.nagy@gmail.com>
Ruben Bridgewater <ruben@bridgewater.de>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 10fee26420..8640517ae5 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -15,8 +15,8 @@ if (is_android) {
import("//build/config/android/rules.gni")
}
+import("gni/snapshot_toolchain.gni")
import("gni/v8.gni")
-import("snapshot_toolchain.gni")
# Specifies if the target build is a simulator build. Comparing target cpu
# with v8 target cpu to not affect simulator builds for making cross-compile
@@ -85,11 +85,14 @@ declare_args() {
# Enable fast mksnapshot runs.
v8_enable_fast_mksnapshot = false
+ # Optimize code for Torque executable, even during a debug build.
+ v8_enable_fast_torque = ""
+
# Enable embedded builtins.
v8_enable_embedded_builtins = true
# Enable the registration of unwinding info for Windows/x64.
- v8_win64_unwinding_info = false
+ v8_win64_unwinding_info = true
# Enable code comments for builtins in the snapshot (impacts performance).
v8_enable_snapshot_code_comments = false
@@ -136,10 +139,6 @@ declare_args() {
# Use Siphash as added protection against hash flooding attacks.
v8_use_siphash = false
- # Use Perfetto (https://perfetto.dev) as the default TracingController. Not
- # currently implemented.
- v8_use_perfetto = false
-
# Switches off inlining in V8.
v8_no_inline = false
@@ -186,7 +185,8 @@ declare_args() {
v8_check_header_includes = false
# Enable sharing read-only space across isolates.
- v8_enable_shared_ro_heap = false
+ # Sets -DV8_SHARED_RO_HEAP.
+ v8_enable_shared_ro_heap = ""
}
# We reuse the snapshot toolchain for building torque and other generators to
@@ -224,6 +224,12 @@ if (v8_check_microtasks_scopes_consistency == "") {
if (v8_enable_snapshot_native_code_counters == "") {
v8_enable_snapshot_native_code_counters = v8_enable_debugging_features
}
+if (v8_enable_shared_ro_heap == "") {
+ v8_enable_shared_ro_heap = v8_enable_lite_mode
+}
+if (v8_enable_fast_torque == "") {
+ v8_enable_fast_torque = v8_enable_fast_mksnapshot
+}
assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
"Untrusted code mitigations are unsupported on ia32")
@@ -233,8 +239,9 @@ assert(!v8_enable_lite_mode || v8_enable_embedded_builtins,
assert(!v8_enable_lite_mode || v8_use_snapshot,
"Lite mode requires a snapshot build")
-assert(v8_use_snapshot || !v8_enable_shared_ro_heap,
- "Nosnapshot builds are not supported with shared read-only heap enabled")
+assert(
+ !v8_enable_pointer_compression || !v8_enable_shared_ro_heap,
+ "Pointer compression is not supported with shared read-only heap enabled")
v8_random_seed = "314159265"
v8_toolset_for_shell = "host"
@@ -850,8 +857,8 @@ action("postmortem-metadata") {
# NOSORT
sources = [
- "src/objects.h",
- "src/objects-inl.h",
+ "src/objects/objects.h",
+ "src/objects/objects-inl.h",
"src/objects/allocation-site-inl.h",
"src/objects/allocation-site.h",
"src/objects/cell-inl.h",
@@ -912,11 +919,7 @@ action("postmortem-metadata") {
}
torque_files = [
- "src/builtins/base.tq",
- "src/builtins/growable-fixed-array.tq",
- "src/builtins/frames.tq",
"src/builtins/arguments.tq",
- "src/builtins/array.tq",
"src/builtins/array-copywithin.tq",
"src/builtins/array-every.tq",
"src/builtins/array-filter.tq",
@@ -925,32 +928,45 @@ torque_files = [
"src/builtins/array-foreach.tq",
"src/builtins/array-join.tq",
"src/builtins/array-lastindexof.tq",
- "src/builtins/array-of.tq",
"src/builtins/array-map.tq",
- "src/builtins/array-reduce.tq",
+ "src/builtins/array-of.tq",
"src/builtins/array-reduce-right.tq",
+ "src/builtins/array-reduce.tq",
"src/builtins/array-reverse.tq",
"src/builtins/array-shift.tq",
"src/builtins/array-slice.tq",
"src/builtins/array-some.tq",
"src/builtins/array-splice.tq",
"src/builtins/array-unshift.tq",
+ "src/builtins/array.tq",
+ "src/builtins/base.tq",
+ "src/builtins/boolean.tq",
"src/builtins/collections.tq",
"src/builtins/data-view.tq",
"src/builtins/extras-utils.tq",
+ "src/builtins/frames.tq",
+ "src/builtins/growable-fixed-array.tq",
+ "src/builtins/internal-coverage.tq",
"src/builtins/iterator.tq",
+ "src/builtins/math.tq",
"src/builtins/object-fromentries.tq",
- "src/builtins/proxy.tq",
"src/builtins/proxy-constructor.tq",
+ "src/builtins/proxy-get-property.tq",
+ "src/builtins/proxy-has-property.tq",
"src/builtins/proxy-revocable.tq",
"src/builtins/proxy-revoke.tq",
- "src/builtins/regexp.tq",
+ "src/builtins/proxy-set-property.tq",
+ "src/builtins/proxy.tq",
"src/builtins/regexp-replace.tq",
+ "src/builtins/regexp.tq",
+ "src/builtins/string.tq",
"src/builtins/string-endswith.tq",
"src/builtins/string-html.tq",
+ "src/builtins/string-iterator.tq",
"src/builtins/string-repeat.tq",
+ "src/builtins/string-slice.tq",
"src/builtins/string-startswith.tq",
- "src/builtins/typed-array.tq",
+ "src/builtins/string-substring.tq",
"src/builtins/typed-array-createtypedarray.tq",
"src/builtins/typed-array-every.tq",
"src/builtins/typed-array-filter.tq",
@@ -962,10 +978,16 @@ torque_files = [
"src/builtins/typed-array-slice.tq",
"src/builtins/typed-array-some.tq",
"src/builtins/typed-array-subarray.tq",
+ "src/builtins/typed-array.tq",
+ "src/objects/intl-objects.tq",
"test/torque/test-torque.tq",
"third_party/v8/builtins/array-sort.tq",
]
+if (!v8_enable_i18n_support) {
+ torque_files -= [ "src/objects/intl-objects.tq" ]
+}
+
torque_namespaces = [
"arguments",
"array",
@@ -984,18 +1006,24 @@ torque_namespaces = [
"array-unshift",
"array-lastindexof",
"base",
+ "boolean",
"collections",
"data-view",
"extras-utils",
"growable-fixed-array",
+ "internal-coverage",
"iterator",
+ "math",
"object",
"proxy",
"regexp",
"regexp-replace",
"string",
"string-html",
+ "string-iterator",
"string-repeat",
+ "string-slice",
+ "string-substring",
"test",
"typed-array",
"typed-array-createtypedarray",
@@ -1027,14 +1055,22 @@ action("run_torque") {
sources = torque_files
outputs = [
- "$target_gen_dir/torque-generated/builtin-definitions-from-dsl.h",
- "$target_gen_dir/torque-generated/class-definitions-from-dsl.h",
- "$target_gen_dir/torque-generated/objects-printer-from-dsl.cc",
+ "$target_gen_dir/torque-generated/builtin-definitions-tq.h",
+ "$target_gen_dir/torque-generated/field-offsets-tq.h",
+ "$target_gen_dir/torque-generated/class-verifiers-tq.cc",
+ "$target_gen_dir/torque-generated/class-verifiers-tq.h",
+ "$target_gen_dir/torque-generated/objects-printer-tq.cc",
+ "$target_gen_dir/torque-generated/class-definitions-tq.cc",
+ "$target_gen_dir/torque-generated/class-definitions-tq-inl.h",
+ "$target_gen_dir/torque-generated/class-definitions-tq.h",
+ "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc",
+ "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h",
+ "$target_gen_dir/torque-generated/csa-types-tq.h",
]
foreach(namespace, torque_namespaces) {
outputs += [
- "$target_gen_dir/torque-generated/builtins-$namespace-from-dsl-gen.cc",
- "$target_gen_dir/torque-generated/builtins-$namespace-from-dsl-gen.h",
+ "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.cc",
+ "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.h",
]
}
@@ -1071,11 +1107,15 @@ v8_source_set("torque_generated_initializers") {
":v8_maybe_icu",
]
- sources = []
+ sources = [
+ "$target_gen_dir/torque-generated/csa-types-tq.h",
+ "$target_gen_dir/torque-generated/exported-macros-assembler-tq.cc",
+ "$target_gen_dir/torque-generated/exported-macros-assembler-tq.h",
+ ]
foreach(namespace, torque_namespaces) {
sources += [
- "$target_gen_dir/torque-generated/builtins-$namespace-from-dsl-gen.cc",
- "$target_gen_dir/torque-generated/builtins-$namespace-from-dsl-gen.h",
+ "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.cc",
+ "$target_gen_dir/torque-generated/builtins-$namespace-gen-tq.h",
]
}
@@ -1095,7 +1135,10 @@ v8_source_set("torque_generated_definitions") {
]
sources = [
- "$target_gen_dir/torque-generated/objects-printer-from-dsl.cc",
+ "$target_gen_dir/torque-generated/class-definitions-tq.cc",
+ "$target_gen_dir/torque-generated/class-verifiers-tq.cc",
+ "$target_gen_dir/torque-generated/class-verifiers-tq.h",
+ "$target_gen_dir/torque-generated/objects-printer-tq.cc",
]
configs = [ ":internal_config" ]
@@ -1163,14 +1206,11 @@ template("run_mksnapshot") {
# mksnapshot needs to know which target OS to use at runtime. It's weird,
# but the target OS is really |current_os|.
"--target_os=$current_os",
+ "--target_arch=$current_cpu",
]
args += invoker.args
- if (v8_win64_unwinding_info) {
- args += [ "--win64-unwinding-info" ]
- }
-
if (v8_enable_embedded_builtins) {
outputs += [ "$target_gen_dir/embedded${suffix}.S" ]
args += [
@@ -1278,6 +1318,7 @@ action("v8_dump_build_config") {
"$root_out_dir/v8_build_config.json",
]
is_gcov_coverage = v8_code_coverage && !is_clang
+ is_full_debug = v8_enable_debugging_features && !v8_optimized_debug
args = [
rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
"current_cpu=\"$current_cpu\"",
@@ -1288,6 +1329,7 @@ action("v8_dump_build_config") {
"is_clang=$is_clang",
"is_component_build=$is_component_build",
"is_debug=$v8_enable_debugging_features",
+ "is_full_debug=$is_full_debug",
"is_gcov_coverage=$is_gcov_coverage",
"is_msan=$is_msan",
"is_tsan=$is_tsan",
@@ -1349,7 +1391,7 @@ v8_source_set("v8_nosnapshot") {
sources = [
"$target_gen_dir/extras-libraries.cc",
- "src/snapshot/embedded-empty.cc",
+ "src/snapshot/embedded/embedded-empty.cc",
"src/snapshot/snapshot-empty.cc",
]
@@ -1382,7 +1424,7 @@ if (v8_use_snapshot && !v8_use_external_startup_data) {
sources = [
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/snapshot.cc",
- "src/setup-isolate-deserialize.cc",
+ "src/init/setup-isolate-deserialize.cc",
]
if (v8_enable_embedded_builtins && emit_builtins_as_inline_asm) {
@@ -1391,7 +1433,7 @@ if (v8_use_snapshot && !v8_use_external_startup_data) {
} else if (v8_enable_embedded_builtins) {
sources += [ "$target_gen_dir/embedded.S" ]
} else {
- sources += [ "src/snapshot/embedded-empty.cc" ]
+ sources += [ "src/snapshot/embedded/embedded-empty.cc" ]
}
configs = [ ":internal_config" ]
@@ -1416,7 +1458,7 @@ if (v8_use_snapshot && v8_use_external_startup_data) {
}
sources = [
- "src/setup-isolate-deserialize.cc",
+ "src/init/setup-isolate-deserialize.cc",
"src/snapshot/natives-external.cc",
"src/snapshot/snapshot-external.cc",
]
@@ -1444,7 +1486,7 @@ if (v8_use_snapshot && v8_use_external_startup_data) {
]
}
} else {
- sources += [ "src/snapshot/embedded-empty.cc" ]
+ sources += [ "src/snapshot/embedded/embedded-empty.cc" ]
}
configs = [ ":internal_config" ]
@@ -1520,8 +1562,8 @@ v8_source_set("v8_initializers") {
"src/builtins/growable-fixed-array-gen.cc",
"src/builtins/growable-fixed-array-gen.h",
"src/builtins/setup-builtins-internal.cc",
- "src/code-stub-assembler.cc",
- "src/code-stub-assembler.h",
+ "src/codegen/code-stub-assembler.cc",
+ "src/codegen/code-stub-assembler.h",
"src/heap/setup-heap-internal.cc",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
@@ -1545,7 +1587,7 @@ v8_source_set("v8_initializers") {
# These source files take an unusually large amount of time to
# compile. Build them separately to avoid bottlenecks.
"src/builtins/builtins-regexp-gen.cc",
- "src/code-stub-assembler.cc",
+ "src/codegen/code-stub-assembler.cc",
]
if (v8_current_cpu == "x86") {
@@ -1606,7 +1648,7 @@ v8_source_set("v8_init") {
sources = [
### gcmole(all) ###
- "src/setup-isolate-full.cc",
+ "src/init/setup-isolate-full.cc",
]
public_deps = [
@@ -1659,7 +1701,7 @@ v8_header_set("v8_shared_internal_headers") {
configs = [ ":internal_config" ]
sources = [
- "src/globals.h",
+ "src/common/globals.h",
]
deps = [
@@ -1668,6 +1710,7 @@ v8_header_set("v8_shared_internal_headers") {
}
v8_compiler_sources = [
+ ### gcmole(all) ###
"src/compiler/access-builder.cc",
"src/compiler/access-builder.h",
"src/compiler/access-info.cc",
@@ -1735,6 +1778,8 @@ v8_compiler_sources = [
"src/compiler/control-flow-optimizer.h",
"src/compiler/dead-code-elimination.cc",
"src/compiler/dead-code-elimination.h",
+ "src/compiler/decompression-elimination.cc",
+ "src/compiler/decompression-elimination.h",
"src/compiler/diamond.h",
"src/compiler/effect-control-linearizer.cc",
"src/compiler/effect-control-linearizer.h",
@@ -1805,6 +1850,8 @@ v8_compiler_sources = [
"src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
"src/compiler/machine-operator.h",
+ "src/compiler/map-inference.cc",
+ "src/compiler/map-inference.h",
"src/compiler/memory-optimizer.cc",
"src/compiler/memory-optimizer.h",
"src/compiler/node-aux-data.h",
@@ -1878,10 +1925,13 @@ v8_compiler_sources = [
"src/compiler/types.h",
"src/compiler/value-numbering-reducer.cc",
"src/compiler/value-numbering-reducer.h",
+ "src/compiler/vector-slot-pair.cc",
+ "src/compiler/vector-slot-pair.h",
"src/compiler/verifier.cc",
"src/compiler/verifier.h",
"src/compiler/wasm-compiler.cc",
"src/compiler/wasm-compiler.h",
+ "src/compiler/write-barrier-kind.h",
"src/compiler/zone-stats.cc",
"src/compiler/zone-stats.h",
]
@@ -1958,25 +2008,13 @@ v8_source_set("v8_base_without_compiler") {
"include/v8-wasm-trap-handler-posix.h",
"include/v8.h",
"include/v8config.h",
- "src/accessors.cc",
- "src/accessors.h",
- "src/address-map.cc",
- "src/address-map.h",
- "src/allocation-site-scopes-inl.h",
- "src/allocation-site-scopes.h",
- "src/allocation.cc",
- "src/allocation.h",
- "src/api-arguments-inl.h",
- "src/api-arguments.cc",
- "src/api-arguments.h",
- "src/api-natives.cc",
- "src/api-natives.h",
- "src/api.cc",
- "src/api.h",
- "src/arguments-inl.h",
- "src/arguments.cc",
- "src/arguments.h",
- "src/asan.h",
+ "src/api/api-arguments-inl.h",
+ "src/api/api-arguments.cc",
+ "src/api/api-arguments.h",
+ "src/api/api-natives.cc",
+ "src/api/api-natives.h",
+ "src/api/api.cc",
+ "src/api/api.h",
"src/asmjs/asm-js.cc",
"src/asmjs/asm-js.h",
"src/asmjs/asm-names.h",
@@ -1986,12 +2024,6 @@ v8_source_set("v8_base_without_compiler") {
"src/asmjs/asm-scanner.h",
"src/asmjs/asm-types.cc",
"src/asmjs/asm-types.h",
- "src/assembler-arch.h",
- "src/assembler-inl.h",
- "src/assembler.cc",
- "src/assembler.h",
- "src/assert-scope.cc",
- "src/assert-scope.h",
"src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-source-ranges.h",
@@ -2010,24 +2042,12 @@ v8_source_set("v8_base_without_compiler") {
"src/ast/source-range-ast-visitor.h",
"src/ast/variables.cc",
"src/ast/variables.h",
- "src/bailout-reason.cc",
- "src/bailout-reason.h",
- "src/basic-block-profiler.cc",
- "src/basic-block-profiler.h",
- "src/bignum-dtoa.cc",
- "src/bignum-dtoa.h",
- "src/bignum.cc",
- "src/bignum.h",
- "src/bit-vector.cc",
- "src/bit-vector.h",
- "src/bootstrapper.cc",
- "src/bootstrapper.h",
- "src/boxed-float.h",
+ "src/builtins/accessors.cc",
+ "src/builtins/accessors.h",
"src/builtins/builtins-api.cc",
"src/builtins/builtins-array.cc",
"src/builtins/builtins-arraybuffer.cc",
"src/builtins/builtins-bigint.cc",
- "src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc",
"src/builtins/builtins-collections.cc",
@@ -2063,55 +2083,81 @@ v8_source_set("v8_base_without_compiler") {
"src/builtins/builtins.h",
"src/builtins/constants-table-builder.cc",
"src/builtins/constants-table-builder.h",
- "src/cached-powers.cc",
- "src/cached-powers.h",
- "src/callable.h",
- "src/cancelable-task.cc",
- "src/cancelable-task.h",
- "src/char-predicates-inl.h",
- "src/char-predicates.cc",
- "src/char-predicates.h",
- "src/checks.h",
- "src/code-comments.cc",
- "src/code-comments.h",
- "src/code-desc.cc",
- "src/code-desc.h",
- "src/code-events.h",
- "src/code-factory.cc",
- "src/code-factory.h",
- "src/code-reference.cc",
- "src/code-reference.h",
- "src/code-tracer.h",
- "src/collector.h",
- "src/compilation-cache.cc",
- "src/compilation-cache.h",
- "src/compilation-statistics.cc",
- "src/compilation-statistics.h",
+ "src/codegen/assembler-arch.h",
+ "src/codegen/assembler-inl.h",
+ "src/codegen/assembler.cc",
+ "src/codegen/assembler.h",
+ "src/codegen/bailout-reason.cc",
+ "src/codegen/bailout-reason.h",
+ "src/codegen/callable.h",
+ "src/codegen/code-comments.cc",
+ "src/codegen/code-comments.h",
+ "src/codegen/code-desc.cc",
+ "src/codegen/code-desc.h",
+ "src/codegen/code-factory.cc",
+ "src/codegen/code-factory.h",
+ "src/codegen/code-reference.cc",
+ "src/codegen/code-reference.h",
+ "src/codegen/compilation-cache.cc",
+ "src/codegen/compilation-cache.h",
+ "src/codegen/compiler.cc",
+ "src/codegen/compiler.h",
+ "src/codegen/constant-pool.cc",
+ "src/codegen/constant-pool.h",
+ "src/codegen/constants-arch.h",
+ "src/codegen/cpu-features.h",
+ "src/codegen/external-reference-table.cc",
+ "src/codegen/external-reference-table.h",
+ "src/codegen/external-reference.cc",
+ "src/codegen/external-reference.h",
+ "src/codegen/flush-instruction-cache.cc",
+ "src/codegen/flush-instruction-cache.h",
+ "src/codegen/handler-table.cc",
+ "src/codegen/handler-table.h",
+ "src/codegen/interface-descriptors.cc",
+ "src/codegen/interface-descriptors.h",
+ "src/codegen/label.h",
+ "src/codegen/machine-type.cc",
+ "src/codegen/machine-type.h",
+ "src/codegen/macro-assembler-inl.h",
+ "src/codegen/macro-assembler.h",
+ "src/codegen/optimized-compilation-info.cc",
+ "src/codegen/optimized-compilation-info.h",
+ "src/codegen/register-arch.h",
+ "src/codegen/register-configuration.cc",
+ "src/codegen/register-configuration.h",
+ "src/codegen/register.h",
+ "src/codegen/reglist.h",
+ "src/codegen/reloc-info.cc",
+ "src/codegen/reloc-info.h",
+ "src/codegen/safepoint-table.cc",
+ "src/codegen/safepoint-table.h",
+ "src/codegen/signature.h",
+ "src/codegen/source-position-table.cc",
+ "src/codegen/source-position-table.h",
+ "src/codegen/source-position.cc",
+ "src/codegen/source-position.h",
+ "src/codegen/string-constants.cc",
+ "src/codegen/string-constants.h",
+ "src/codegen/turbo-assembler.cc",
+ "src/codegen/turbo-assembler.h",
+ "src/codegen/unoptimized-compilation-info.cc",
+ "src/codegen/unoptimized-compilation-info.h",
+ "src/common/assert-scope.cc",
+ "src/common/assert-scope.h",
+ "src/common/checks.h",
+ "src/common/ptr-compr-inl.h",
+ "src/common/ptr-compr.h",
+ "src/common/v8memory.h",
"src/compiler-dispatcher/compiler-dispatcher.cc",
"src/compiler-dispatcher/compiler-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
- "src/compiler.cc",
- "src/compiler.h",
- "src/constant-pool.cc",
- "src/constant-pool.h",
- "src/constants-arch.h",
- "src/contexts-inl.h",
- "src/contexts.cc",
- "src/contexts.h",
- "src/conversions-inl.h",
- "src/conversions.cc",
- "src/conversions.h",
- "src/counters-definitions.h",
- "src/counters-inl.h",
- "src/counters.cc",
- "src/counters.h",
- "src/cpu-features.h",
- "src/date.cc",
- "src/date.h",
- "src/dateparser-inl.h",
- "src/dateparser.cc",
- "src/dateparser.h",
+ "src/date/date.cc",
+ "src/date/date.h",
+ "src/date/dateparser-inl.h",
+ "src/date/dateparser.cc",
+ "src/date/dateparser.h",
"src/debug/debug-coverage.cc",
"src/debug/debug-coverage.h",
"src/debug/debug-evaluate.cc",
@@ -2134,29 +2180,60 @@ v8_source_set("v8_base_without_compiler") {
"src/debug/interface-types.h",
"src/debug/liveedit.cc",
"src/debug/liveedit.h",
- "src/deoptimize-reason.cc",
- "src/deoptimize-reason.h",
- "src/deoptimizer.cc",
- "src/deoptimizer.h",
- "src/detachable-vector.cc",
- "src/detachable-vector.h",
- "src/disasm.h",
- "src/disassembler.cc",
- "src/disassembler.h",
- "src/diy-fp.cc",
- "src/diy-fp.h",
- "src/double.h",
- "src/dtoa.cc",
- "src/dtoa.h",
- "src/eh-frame.cc",
- "src/eh-frame.h",
- "src/elements-inl.h",
- "src/elements-kind.cc",
- "src/elements-kind.h",
- "src/elements.cc",
- "src/elements.h",
- "src/execution.cc",
- "src/execution.h",
+ "src/deoptimizer/deoptimize-reason.cc",
+ "src/deoptimizer/deoptimize-reason.h",
+ "src/deoptimizer/deoptimizer.cc",
+ "src/deoptimizer/deoptimizer.h",
+ "src/diagnostics/basic-block-profiler.cc",
+ "src/diagnostics/basic-block-profiler.h",
+ "src/diagnostics/code-tracer.h",
+ "src/diagnostics/compilation-statistics.cc",
+ "src/diagnostics/compilation-statistics.h",
+ "src/diagnostics/disasm.h",
+ "src/diagnostics/disassembler.cc",
+ "src/diagnostics/disassembler.h",
+ "src/diagnostics/eh-frame.cc",
+ "src/diagnostics/eh-frame.h",
+ "src/diagnostics/gdb-jit.cc",
+ "src/diagnostics/gdb-jit.h",
+ "src/diagnostics/objects-debug.cc",
+ "src/diagnostics/objects-printer.cc",
+ "src/diagnostics/perf-jit.cc",
+ "src/diagnostics/perf-jit.h",
+ "src/diagnostics/unwinder.cc",
+ "src/execution/arguments-inl.h",
+ "src/execution/arguments.cc",
+ "src/execution/arguments.h",
+ "src/execution/execution.cc",
+ "src/execution/execution.h",
+ "src/execution/frame-constants.h",
+ "src/execution/frames-inl.h",
+ "src/execution/frames.cc",
+ "src/execution/frames.h",
+ "src/execution/futex-emulation.cc",
+ "src/execution/futex-emulation.h",
+ "src/execution/isolate-data.h",
+ "src/execution/isolate-inl.h",
+ "src/execution/isolate.cc",
+ "src/execution/isolate.h",
+ "src/execution/message-template.h",
+ "src/execution/messages.cc",
+ "src/execution/messages.h",
+ "src/execution/microtask-queue.cc",
+ "src/execution/microtask-queue.h",
+ "src/execution/runtime-profiler.cc",
+ "src/execution/runtime-profiler.h",
+ "src/execution/simulator-base.cc",
+ "src/execution/simulator-base.h",
+ "src/execution/simulator.h",
+ "src/execution/thread-id.cc",
+ "src/execution/thread-id.h",
+ "src/execution/thread-local-top.cc",
+ "src/execution/thread-local-top.h",
+ "src/execution/v8threads.cc",
+ "src/execution/v8threads.h",
+ "src/execution/vm-state-inl.h",
+ "src/execution/vm-state.h",
"src/extensions/externalize-string-extension.cc",
"src/extensions/externalize-string-extension.h",
"src/extensions/free-buffer-extension.cc",
@@ -2169,44 +2246,16 @@ v8_source_set("v8_base_without_compiler") {
"src/extensions/statistics-extension.h",
"src/extensions/trigger-failure-extension.cc",
"src/extensions/trigger-failure-extension.h",
- "src/external-reference-table.cc",
- "src/external-reference-table.h",
- "src/external-reference.cc",
- "src/external-reference.h",
- "src/fast-dtoa.cc",
- "src/fast-dtoa.h",
- "src/feedback-vector-inl.h",
- "src/feedback-vector.cc",
- "src/feedback-vector.h",
- "src/field-index-inl.h",
- "src/field-index.h",
- "src/field-type.cc",
- "src/field-type.h",
- "src/fixed-dtoa.cc",
- "src/fixed-dtoa.h",
- "src/flag-definitions.h",
- "src/flags.cc",
- "src/flags.h",
- "src/flush-instruction-cache.cc",
- "src/flush-instruction-cache.h",
- "src/frame-constants.h",
- "src/frames-inl.h",
- "src/frames.cc",
- "src/frames.h",
- "src/function-kind.h",
- "src/futex-emulation.cc",
- "src/futex-emulation.h",
- "src/gdb-jit.cc",
- "src/gdb-jit.h",
- "src/global-handles.cc",
- "src/global-handles.h",
- "src/handler-table.cc",
- "src/handler-table.h",
- "src/handles-inl.h",
- "src/handles.cc",
- "src/handles.h",
- "src/hash-seed-inl.h",
- "src/heap-symbols.h",
+ "src/flags/flag-definitions.h",
+ "src/flags/flags.cc",
+ "src/flags/flags.h",
+ "src/handles/global-handles.cc",
+ "src/handles/global-handles.h",
+ "src/handles/handles-inl.h",
+ "src/handles/handles.cc",
+ "src/handles/handles.h",
+ "src/handles/maybe-handles-inl.h",
+ "src/handles/maybe-handles.h",
"src/heap/array-buffer-collector.cc",
"src/heap/array-buffer-collector.h",
"src/heap/array-buffer-tracker-inl.h",
@@ -2215,6 +2264,8 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/barrier.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
+ "src/heap/combined-heap.cc",
+ "src/heap/combined-heap.h",
"src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h",
"src/heap/embedder-tracing.cc",
@@ -2292,12 +2343,18 @@ v8_source_set("v8_base_without_compiler") {
"src/ic/ic.h",
"src/ic/stub-cache.cc",
"src/ic/stub-cache.h",
- "src/icu_util.cc",
- "src/icu_util.h",
- "src/identity-map.cc",
- "src/identity-map.h",
- "src/interface-descriptors.cc",
- "src/interface-descriptors.h",
+ "src/init/bootstrapper.cc",
+ "src/init/bootstrapper.h",
+ "src/init/heap-symbols.h",
+ "src/init/icu_util.cc",
+ "src/init/icu_util.h",
+ "src/init/isolate-allocator.cc",
+ "src/init/isolate-allocator.h",
+ "src/init/setup-isolate.h",
+ "src/init/startup-data-util.cc",
+ "src/init/startup-data-util.h",
+ "src/init/v8.cc",
+ "src/init/v8.h",
"src/interpreter/block-coverage-builder.h",
"src/interpreter/bytecode-array-accessor.cc",
"src/interpreter/bytecode-array-accessor.h",
@@ -2343,61 +2400,46 @@ v8_source_set("v8_base_without_compiler") {
"src/interpreter/interpreter-intrinsics.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
- "src/isolate-allocator.cc",
- "src/isolate-allocator.h",
- "src/isolate-data.h",
- "src/isolate-inl.h",
- "src/isolate.cc",
- "src/isolate.h",
- "src/json-parser.cc",
- "src/json-parser.h",
- "src/json-stringifier.cc",
- "src/json-stringifier.h",
- "src/keys.cc",
- "src/keys.h",
- "src/label.h",
- "src/layout-descriptor-inl.h",
- "src/layout-descriptor.cc",
- "src/layout-descriptor.h",
- "src/locked-queue-inl.h",
- "src/locked-queue.h",
- "src/log-inl.h",
- "src/log-utils.cc",
- "src/log-utils.h",
- "src/log.cc",
- "src/log.h",
- "src/lookup-cache-inl.h",
- "src/lookup-cache.cc",
- "src/lookup-cache.h",
- "src/lookup-inl.h",
- "src/lookup.cc",
- "src/lookup.h",
- "src/machine-type.cc",
- "src/machine-type.h",
- "src/macro-assembler-inl.h",
- "src/macro-assembler.h",
- "src/map-updater.cc",
- "src/map-updater.h",
- "src/math-random.cc",
- "src/math-random.h",
- "src/maybe-handles-inl.h",
- "src/maybe-handles.h",
- "src/memcopy.cc",
- "src/memcopy.h",
- "src/message-template.h",
- "src/messages.cc",
- "src/messages.h",
- "src/microtask-queue.cc",
- "src/microtask-queue.h",
- "src/msan.h",
- "src/objects-body-descriptors-inl.h",
- "src/objects-body-descriptors.h",
- "src/objects-debug.cc",
- "src/objects-inl.h",
- "src/objects-printer.cc",
- "src/objects.cc",
- "src/objects.h",
+ "src/json/json-parser.cc",
+ "src/json/json-parser.h",
+ "src/json/json-stringifier.cc",
+ "src/json/json-stringifier.h",
+ "src/logging/code-events.h",
+ "src/logging/counters-definitions.h",
+ "src/logging/counters-inl.h",
+ "src/logging/counters.cc",
+ "src/logging/counters.h",
+ "src/logging/log-inl.h",
+ "src/logging/log-utils.cc",
+ "src/logging/log-utils.h",
+ "src/logging/log.cc",
+ "src/logging/log.h",
+ "src/numbers/bignum-dtoa.cc",
+ "src/numbers/bignum-dtoa.h",
+ "src/numbers/bignum.cc",
+ "src/numbers/bignum.h",
+ "src/numbers/cached-powers.cc",
+ "src/numbers/cached-powers.h",
+ "src/numbers/conversions-inl.h",
+ "src/numbers/conversions.cc",
+ "src/numbers/conversions.h",
+ "src/numbers/diy-fp.cc",
+ "src/numbers/diy-fp.h",
+ "src/numbers/double.h",
+ "src/numbers/dtoa.cc",
+ "src/numbers/dtoa.h",
+ "src/numbers/fast-dtoa.cc",
+ "src/numbers/fast-dtoa.h",
+ "src/numbers/fixed-dtoa.cc",
+ "src/numbers/fixed-dtoa.h",
+ "src/numbers/hash-seed-inl.h",
+ "src/numbers/math-random.cc",
+ "src/numbers/math-random.h",
+ "src/numbers/strtod.cc",
+ "src/numbers/strtod.h",
"src/objects/allocation-site-inl.h",
+ "src/objects/allocation-site-scopes-inl.h",
+ "src/objects/allocation-site-scopes.h",
"src/objects/allocation-site.h",
"src/objects/api-callbacks-inl.h",
"src/objects/api-callbacks.h",
@@ -2414,6 +2456,9 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/compilation-cache.h",
"src/objects/compressed-slots-inl.h",
"src/objects/compressed-slots.h",
+ "src/objects/contexts-inl.h",
+ "src/objects/contexts.cc",
+ "src/objects/contexts.h",
"src/objects/data-handler.h",
"src/objects/debug-objects-inl.h",
"src/objects/debug-objects.cc",
@@ -2422,6 +2467,11 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/descriptor-array.h",
"src/objects/dictionary-inl.h",
"src/objects/dictionary.h",
+ "src/objects/elements-inl.h",
+ "src/objects/elements-kind.cc",
+ "src/objects/elements-kind.h",
+ "src/objects/elements.cc",
+ "src/objects/elements.h",
"src/objects/embedder-data-array-inl.h",
"src/objects/embedder-data-array.cc",
"src/objects/embedder-data-array.h",
@@ -2429,10 +2479,18 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/embedder-data-slot.h",
"src/objects/feedback-cell-inl.h",
"src/objects/feedback-cell.h",
+ "src/objects/feedback-vector-inl.h",
+ "src/objects/feedback-vector.cc",
+ "src/objects/feedback-vector.h",
+ "src/objects/field-index-inl.h",
+ "src/objects/field-index.h",
+ "src/objects/field-type.cc",
+ "src/objects/field-type.h",
"src/objects/fixed-array-inl.h",
"src/objects/fixed-array.h",
"src/objects/frame-array-inl.h",
"src/objects/frame-array.h",
+ "src/objects/function-kind.h",
"src/objects/hash-table-inl.h",
"src/objects/hash-table.h",
"src/objects/heap-number-inl.h",
@@ -2496,12 +2554,25 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/js-segmenter.h",
"src/objects/js-weak-refs-inl.h",
"src/objects/js-weak-refs.h",
+ "src/objects/keys.cc",
+ "src/objects/keys.h",
+ "src/objects/layout-descriptor-inl.h",
+ "src/objects/layout-descriptor.cc",
+ "src/objects/layout-descriptor.h",
"src/objects/literal-objects-inl.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
+ "src/objects/lookup-cache-inl.h",
+ "src/objects/lookup-cache.cc",
+ "src/objects/lookup-cache.h",
+ "src/objects/lookup-inl.h",
+ "src/objects/lookup.cc",
+ "src/objects/lookup.h",
"src/objects/managed.cc",
"src/objects/managed.h",
"src/objects/map-inl.h",
+ "src/objects/map-updater.cc",
+ "src/objects/map-updater.h",
"src/objects/map.cc",
"src/objects/map.h",
"src/objects/maybe-object-inl.h",
@@ -2513,8 +2584,14 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/module.h",
"src/objects/name-inl.h",
"src/objects/name.h",
+ "src/objects/object-list-macros.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
+ "src/objects/objects-body-descriptors-inl.h",
+ "src/objects/objects-body-descriptors.h",
+ "src/objects/objects-inl.h",
+ "src/objects/objects.cc",
+ "src/objects/objects.h",
"src/objects/oddball-inl.h",
"src/objects/oddball.h",
"src/objects/ordered-hash-table-inl.h",
@@ -2528,8 +2605,14 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/property-cell.h",
"src/objects/property-descriptor-object-inl.h",
"src/objects/property-descriptor-object.h",
+ "src/objects/property-descriptor.cc",
+ "src/objects/property-descriptor.h",
+ "src/objects/property-details.h",
+ "src/objects/property.cc",
+ "src/objects/property.h",
"src/objects/prototype-info-inl.h",
"src/objects/prototype-info.h",
+ "src/objects/prototype.h",
"src/objects/regexp-match-info.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
@@ -2552,20 +2635,32 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/string.h",
"src/objects/struct-inl.h",
"src/objects/struct.h",
+ "src/objects/tagged-impl-inl.h",
+ "src/objects/tagged-impl.cc",
+ "src/objects/tagged-impl.h",
+ "src/objects/tagged-value-inl.h",
+ "src/objects/tagged-value.h",
"src/objects/template-objects-inl.h",
"src/objects/template-objects.cc",
"src/objects/template-objects.h",
"src/objects/templates-inl.h",
"src/objects/templates.h",
- "src/optimized-compilation-info.cc",
- "src/optimized-compilation-info.h",
- "src/ostreams.cc",
- "src/ostreams.h",
+ "src/objects/transitions-inl.h",
+ "src/objects/transitions.cc",
+ "src/objects/transitions.h",
+ "src/objects/type-hints.cc",
+ "src/objects/type-hints.h",
+ "src/objects/value-serializer.cc",
+ "src/objects/value-serializer.h",
+ "src/objects/visitors.cc",
+ "src/objects/visitors.h",
"src/parsing/expression-scope-reparenter.cc",
"src/parsing/expression-scope-reparenter.h",
"src/parsing/expression-scope.h",
"src/parsing/func-name-inferrer.cc",
"src/parsing/func-name-inferrer.h",
+ "src/parsing/literal-buffer.cc",
+ "src/parsing/literal-buffer.h",
"src/parsing/parse-info.cc",
"src/parsing/parse-info.h",
"src/parsing/parser-base.h",
@@ -2573,6 +2668,8 @@ v8_source_set("v8_base_without_compiler") {
"src/parsing/parser.h",
"src/parsing/parsing.cc",
"src/parsing/parsing.h",
+ "src/parsing/pending-compilation-error-handler.cc",
+ "src/parsing/pending-compilation-error-handler.h",
"src/parsing/preparse-data-impl.h",
"src/parsing/preparse-data.cc",
"src/parsing/preparse-data.h",
@@ -2587,11 +2684,6 @@ v8_source_set("v8_base_without_compiler") {
"src/parsing/scanner.h",
"src/parsing/token.cc",
"src/parsing/token.h",
- "src/pending-compilation-error-handler.cc",
- "src/pending-compilation-error-handler.h",
- "src/perf-jit.cc",
- "src/perf-jit.h",
- "src/pointer-with-payload.h",
"src/profiler/allocation-tracker.cc",
"src/profiler/allocation-tracker.h",
"src/profiler/circular-queue-inl.h",
@@ -2617,14 +2709,6 @@ v8_source_set("v8_base_without_compiler") {
"src/profiler/tick-sample.h",
"src/profiler/tracing-cpu-profiler.cc",
"src/profiler/tracing-cpu-profiler.h",
- "src/property-descriptor.cc",
- "src/property-descriptor.h",
- "src/property-details.h",
- "src/property.cc",
- "src/property.h",
- "src/prototype.h",
- "src/ptr-compr-inl.h",
- "src/ptr-compr.h",
"src/regexp/bytecodes-irregexp.h",
"src/regexp/interpreter-irregexp.cc",
"src/regexp/interpreter-irregexp.h",
@@ -2644,22 +2728,14 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/regexp-macro-assembler.h",
"src/regexp/regexp-parser.cc",
"src/regexp/regexp-parser.h",
+ "src/regexp/regexp-special-case.h",
"src/regexp/regexp-stack.cc",
"src/regexp/regexp-stack.h",
"src/regexp/regexp-utils.cc",
"src/regexp/regexp-utils.h",
- "src/register-arch.h",
- "src/register-configuration.cc",
- "src/register-configuration.h",
- "src/register.h",
- "src/reglist.h",
- "src/reloc-info.cc",
- "src/reloc-info.h",
- "src/roots-inl.h",
- "src/roots.cc",
- "src/roots.h",
- "src/runtime-profiler.cc",
- "src/runtime-profiler.h",
+ "src/roots/roots-inl.h",
+ "src/roots/roots.cc",
+ "src/roots/roots.h",
"src/runtime/runtime-array.cc",
"src/runtime/runtime-atomics.cc",
"src/runtime/runtime-bigint.cc",
@@ -2693,21 +2769,19 @@ v8_source_set("v8_base_without_compiler") {
"src/runtime/runtime-weak-refs.cc",
"src/runtime/runtime.cc",
"src/runtime/runtime.h",
- "src/safepoint-table.cc",
- "src/safepoint-table.h",
- "src/setup-isolate.h",
- "src/signature.h",
- "src/simulator-base.cc",
- "src/simulator-base.h",
- "src/simulator.h",
+ "src/sanitizer/asan.h",
+ "src/sanitizer/lsan-page-allocator.cc",
+ "src/sanitizer/lsan-page-allocator.h",
+ "src/sanitizer/msan.h",
+ "src/sanitizer/tsan.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/deserializer-allocator.cc",
"src/snapshot/deserializer-allocator.h",
"src/snapshot/deserializer.cc",
"src/snapshot/deserializer.h",
- "src/snapshot/embedded-data.cc",
- "src/snapshot/embedded-data.h",
+ "src/snapshot/embedded/embedded-data.cc",
+ "src/snapshot/embedded/embedded-data.h",
"src/snapshot/natives-common.cc",
"src/snapshot/natives.h",
"src/snapshot/object-deserializer.cc",
@@ -2737,85 +2811,69 @@ v8_source_set("v8_base_without_compiler") {
"src/snapshot/startup-deserializer.h",
"src/snapshot/startup-serializer.cc",
"src/snapshot/startup-serializer.h",
- "src/source-position-table.cc",
- "src/source-position-table.h",
- "src/source-position.cc",
- "src/source-position.h",
- "src/splay-tree-inl.h",
- "src/splay-tree.h",
- "src/startup-data-util.cc",
- "src/startup-data-util.h",
- "src/string-builder-inl.h",
- "src/string-builder.cc",
- "src/string-case.cc",
- "src/string-case.h",
- "src/string-constants.cc",
- "src/string-constants.h",
- "src/string-hasher-inl.h",
- "src/string-hasher.h",
- "src/string-search.h",
- "src/string-stream.cc",
- "src/string-stream.h",
- "src/strtod.cc",
- "src/strtod.h",
- "src/task-utils.cc",
- "src/task-utils.h",
+ "src/strings/char-predicates-inl.h",
+ "src/strings/char-predicates.cc",
+ "src/strings/char-predicates.h",
+ "src/strings/string-builder-inl.h",
+ "src/strings/string-builder.cc",
+ "src/strings/string-case.cc",
+ "src/strings/string-case.h",
+ "src/strings/string-hasher-inl.h",
+ "src/strings/string-hasher.h",
+ "src/strings/string-search.h",
+ "src/strings/string-stream.cc",
+ "src/strings/string-stream.h",
+ "src/strings/unicode-decoder.cc",
+ "src/strings/unicode-decoder.h",
+ "src/strings/unicode-inl.h",
+ "src/strings/unicode.cc",
+ "src/strings/unicode.h",
+ "src/strings/uri.cc",
+ "src/strings/uri.h",
+ "src/tasks/cancelable-task.cc",
+ "src/tasks/cancelable-task.h",
+ "src/tasks/task-utils.cc",
+ "src/tasks/task-utils.h",
"src/third_party/siphash/halfsiphash.cc",
"src/third_party/siphash/halfsiphash.h",
"src/third_party/utf8-decoder/utf8-decoder.h",
- "src/thread-id.cc",
- "src/thread-id.h",
- "src/thread-local-top.cc",
- "src/thread-local-top.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/tracing/traced-value.cc",
"src/tracing/traced-value.h",
"src/tracing/tracing-category-observer.cc",
"src/tracing/tracing-category-observer.h",
- "src/transitions-inl.h",
- "src/transitions.cc",
- "src/transitions.h",
"src/trap-handler/handler-inside.cc",
"src/trap-handler/handler-outside.cc",
"src/trap-handler/handler-shared.cc",
"src/trap-handler/trap-handler-internal.h",
"src/trap-handler/trap-handler.h",
- "src/turbo-assembler.cc",
- "src/turbo-assembler.h",
- "src/type-hints.cc",
- "src/type-hints.h",
- "src/type-traits.h",
- "src/unicode-cache.h",
- "src/unicode-decoder.cc",
- "src/unicode-decoder.h",
- "src/unicode-inl.h",
- "src/unicode.cc",
- "src/unicode.h",
- "src/unoptimized-compilation-info.cc",
- "src/unoptimized-compilation-info.h",
- "src/unwinder.cc",
- "src/uri.cc",
- "src/uri.h",
- "src/utils-inl.h",
- "src/utils.cc",
- "src/utils.h",
- "src/v8.cc",
- "src/v8.h",
- "src/v8memory.h",
- "src/v8threads.cc",
- "src/v8threads.h",
- "src/value-serializer.cc",
- "src/value-serializer.h",
- "src/vector-slot-pair.cc",
- "src/vector-slot-pair.h",
- "src/vector.h",
- "src/version.cc",
- "src/version.h",
- "src/visitors.cc",
- "src/visitors.h",
- "src/vm-state-inl.h",
- "src/vm-state.h",
+ "src/utils/address-map.cc",
+ "src/utils/address-map.h",
+ "src/utils/allocation.cc",
+ "src/utils/allocation.h",
+ "src/utils/bit-vector.cc",
+ "src/utils/bit-vector.h",
+ "src/utils/boxed-float.h",
+ "src/utils/detachable-vector.cc",
+ "src/utils/detachable-vector.h",
+ "src/utils/identity-map.cc",
+ "src/utils/identity-map.h",
+ "src/utils/locked-queue-inl.h",
+ "src/utils/locked-queue.h",
+ "src/utils/memcopy.cc",
+ "src/utils/memcopy.h",
+ "src/utils/ostreams.cc",
+ "src/utils/ostreams.h",
+ "src/utils/pointer-with-payload.h",
+ "src/utils/splay-tree-inl.h",
+ "src/utils/splay-tree.h",
+ "src/utils/utils-inl.h",
+ "src/utils/utils.cc",
+ "src/utils/utils.h",
+ "src/utils/vector.h",
+ "src/utils/version.cc",
+ "src/utils/version.h",
"src/wasm/baseline/liftoff-assembler-defs.h",
"src/wasm/baseline/liftoff-assembler.cc",
"src/wasm/baseline/liftoff-assembler.h",
@@ -2916,40 +2974,50 @@ v8_source_set("v8_base_without_compiler") {
# These source files take an unusually large amount of time to
# compile. Build them separately to avoid bottlenecks.
- "src/api.cc",
- "src/elements.cc",
+ "src/api/api.cc",
"src/heap/heap.cc",
- "src/objects.cc",
+ "src/objects/elements.cc",
+ "src/objects/objects.cc",
"src/parsing/parser.cc",
]
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
+ "src/codegen/ia32/assembler-ia32-inl.h",
+ "src/codegen/ia32/assembler-ia32.cc",
+ "src/codegen/ia32/assembler-ia32.h",
+ "src/codegen/ia32/constants-ia32.h",
+ "src/codegen/ia32/cpu-ia32.cc",
+ "src/codegen/ia32/interface-descriptors-ia32.cc",
+ "src/codegen/ia32/macro-assembler-ia32.cc",
+ "src/codegen/ia32/macro-assembler-ia32.h",
+ "src/codegen/ia32/register-ia32.h",
+ "src/codegen/ia32/sse-instr.h",
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-codes-ia32.h",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
"src/debug/ia32/debug-ia32.cc",
- "src/ia32/assembler-ia32-inl.h",
- "src/ia32/assembler-ia32.cc",
- "src/ia32/assembler-ia32.h",
- "src/ia32/constants-ia32.h",
- "src/ia32/cpu-ia32.cc",
- "src/ia32/deoptimizer-ia32.cc",
- "src/ia32/disasm-ia32.cc",
- "src/ia32/frame-constants-ia32.cc",
- "src/ia32/frame-constants-ia32.h",
- "src/ia32/interface-descriptors-ia32.cc",
- "src/ia32/macro-assembler-ia32.cc",
- "src/ia32/macro-assembler-ia32.h",
- "src/ia32/register-ia32.h",
- "src/ia32/sse-instr.h",
+ "src/deoptimizer/ia32/deoptimizer-ia32.cc",
+ "src/diagnostics/ia32/disasm-ia32.cc",
+ "src/execution/ia32/frame-constants-ia32.cc",
+ "src/execution/ia32/frame-constants-ia32.h",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
+ "src/codegen/x64/assembler-x64-inl.h",
+ "src/codegen/x64/assembler-x64.cc",
+ "src/codegen/x64/assembler-x64.h",
+ "src/codegen/x64/constants-x64.h",
+ "src/codegen/x64/cpu-x64.cc",
+ "src/codegen/x64/interface-descriptors-x64.cc",
+ "src/codegen/x64/macro-assembler-x64.cc",
+ "src/codegen/x64/macro-assembler-x64.h",
+ "src/codegen/x64/register-x64.h",
+ "src/codegen/x64/sse-instr.h",
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-codes-x64.h",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
@@ -2957,26 +3025,17 @@ v8_source_set("v8_base_without_compiler") {
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.h",
"src/debug/x64/debug-x64.cc",
+ "src/deoptimizer/x64/deoptimizer-x64.cc",
+ "src/diagnostics/x64/disasm-x64.cc",
+ "src/diagnostics/x64/eh-frame-x64.cc",
+ "src/execution/x64/frame-constants-x64.cc",
+ "src/execution/x64/frame-constants-x64.h",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h",
"src/wasm/baseline/x64/liftoff-assembler-x64.h",
- "src/x64/assembler-x64-inl.h",
- "src/x64/assembler-x64.cc",
- "src/x64/assembler-x64.h",
- "src/x64/constants-x64.h",
- "src/x64/cpu-x64.cc",
- "src/x64/deoptimizer-x64.cc",
- "src/x64/disasm-x64.cc",
- "src/x64/eh-frame-x64.cc",
- "src/x64/frame-constants-x64.cc",
- "src/x64/frame-constants-x64.h",
- "src/x64/interface-descriptors-x64.cc",
- "src/x64/macro-assembler-x64.cc",
- "src/x64/macro-assembler-x64.h",
- "src/x64/register-x64.h",
- "src/x64/sse-instr.h",
]
+
# iOS Xcode simulator builds run on an x64 target. iOS and macOS are both
# based on Darwin and thus POSIX-compliant to a similar degree.
if (is_linux || is_mac || is_ios) {
@@ -2988,32 +3047,25 @@ v8_source_set("v8_base_without_compiler") {
}
if (is_win) {
sources += [
+ "src/diagnostics/unwinding-info-win64.cc",
+ "src/diagnostics/unwinding-info-win64.h",
"src/trap-handler/handler-inside-win.cc",
"src/trap-handler/handler-inside-win.h",
"src/trap-handler/handler-outside-win.cc",
- "src/unwinding-info-win64.cc",
- "src/unwinding-info-win64.h",
]
}
} else if (v8_current_cpu == "arm") {
sources += [ ### gcmole(arch:arm) ###
- "src/arm/assembler-arm-inl.h",
- "src/arm/assembler-arm.cc",
- "src/arm/assembler-arm.h",
- "src/arm/constants-arm.cc",
- "src/arm/constants-arm.h",
- "src/arm/cpu-arm.cc",
- "src/arm/deoptimizer-arm.cc",
- "src/arm/disasm-arm.cc",
- "src/arm/eh-frame-arm.cc",
- "src/arm/frame-constants-arm.cc",
- "src/arm/frame-constants-arm.h",
- "src/arm/interface-descriptors-arm.cc",
- "src/arm/macro-assembler-arm.cc",
- "src/arm/macro-assembler-arm.h",
- "src/arm/register-arm.h",
- "src/arm/simulator-arm.cc",
- "src/arm/simulator-arm.h",
+ "src/codegen/arm/assembler-arm-inl.h",
+ "src/codegen/arm/assembler-arm.cc",
+ "src/codegen/arm/assembler-arm.h",
+ "src/codegen/arm/constants-arm.cc",
+ "src/codegen/arm/constants-arm.h",
+ "src/codegen/arm/cpu-arm.cc",
+ "src/codegen/arm/interface-descriptors-arm.cc",
+ "src/codegen/arm/macro-assembler-arm.cc",
+ "src/codegen/arm/macro-assembler-arm.h",
+ "src/codegen/arm/register-arm.h",
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-codes-arm.h",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
@@ -3021,42 +3073,40 @@ v8_source_set("v8_base_without_compiler") {
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.h",
"src/debug/arm/debug-arm.cc",
+ "src/deoptimizer/arm/deoptimizer-arm.cc",
+ "src/diagnostics/arm/disasm-arm.cc",
+ "src/diagnostics/arm/eh-frame-arm.cc",
+ "src/execution/arm/frame-constants-arm.cc",
+ "src/execution/arm/frame-constants-arm.h",
+ "src/execution/arm/simulator-arm.cc",
+ "src/execution/arm/simulator-arm.h",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
- "src/arm64/assembler-arm64-inl.h",
- "src/arm64/assembler-arm64.cc",
- "src/arm64/assembler-arm64.h",
- "src/arm64/constants-arm64.h",
- "src/arm64/cpu-arm64.cc",
- "src/arm64/decoder-arm64-inl.h",
- "src/arm64/decoder-arm64.cc",
- "src/arm64/decoder-arm64.h",
- "src/arm64/deoptimizer-arm64.cc",
- "src/arm64/disasm-arm64.cc",
- "src/arm64/disasm-arm64.h",
- "src/arm64/eh-frame-arm64.cc",
- "src/arm64/frame-constants-arm64.cc",
- "src/arm64/frame-constants-arm64.h",
- "src/arm64/instructions-arm64-constants.cc",
- "src/arm64/instructions-arm64.cc",
- "src/arm64/instructions-arm64.h",
- "src/arm64/instrument-arm64.cc",
- "src/arm64/instrument-arm64.h",
- "src/arm64/interface-descriptors-arm64.cc",
- "src/arm64/macro-assembler-arm64-inl.h",
- "src/arm64/macro-assembler-arm64.cc",
- "src/arm64/macro-assembler-arm64.h",
- "src/arm64/register-arm64.cc",
- "src/arm64/register-arm64.h",
- "src/arm64/simulator-arm64.cc",
- "src/arm64/simulator-arm64.h",
- "src/arm64/simulator-logic-arm64.cc",
- "src/arm64/utils-arm64.cc",
- "src/arm64/utils-arm64.h",
+ "src/codegen/arm64/assembler-arm64-inl.h",
+ "src/codegen/arm64/assembler-arm64.cc",
+ "src/codegen/arm64/assembler-arm64.h",
+ "src/codegen/arm64/constants-arm64.h",
+ "src/codegen/arm64/cpu-arm64.cc",
+ "src/codegen/arm64/decoder-arm64-inl.h",
+ "src/codegen/arm64/decoder-arm64.cc",
+ "src/codegen/arm64/decoder-arm64.h",
+ "src/codegen/arm64/instructions-arm64-constants.cc",
+ "src/codegen/arm64/instructions-arm64.cc",
+ "src/codegen/arm64/instructions-arm64.h",
+ "src/codegen/arm64/instrument-arm64.cc",
+ "src/codegen/arm64/instrument-arm64.h",
+ "src/codegen/arm64/interface-descriptors-arm64.cc",
+ "src/codegen/arm64/macro-assembler-arm64-inl.h",
+ "src/codegen/arm64/macro-assembler-arm64.cc",
+ "src/codegen/arm64/macro-assembler-arm64.h",
+ "src/codegen/arm64/register-arm64.cc",
+ "src/codegen/arm64/register-arm64.h",
+ "src/codegen/arm64/utils-arm64.cc",
+ "src/codegen/arm64/utils-arm64.h",
"src/compiler/backend/arm64/code-generator-arm64.cc",
"src/compiler/backend/arm64/instruction-codes-arm64.h",
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
@@ -3064,6 +3114,15 @@ v8_source_set("v8_base_without_compiler") {
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
"src/debug/arm64/debug-arm64.cc",
+ "src/deoptimizer/arm64/deoptimizer-arm64.cc",
+ "src/diagnostics/arm64/disasm-arm64.cc",
+ "src/diagnostics/arm64/disasm-arm64.h",
+ "src/diagnostics/arm64/eh-frame-arm64.cc",
+ "src/execution/arm64/frame-constants-arm64.cc",
+ "src/execution/arm64/frame-constants-arm64.h",
+ "src/execution/arm64/simulator-arm64.cc",
+ "src/execution/arm64/simulator-arm64.h",
+ "src/execution/arm64/simulator-logic-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
@@ -3071,114 +3130,114 @@ v8_source_set("v8_base_without_compiler") {
jumbo_excluded_sources += [
# TODO(mostynb@vewd.com): fix this code so it doesn't need
# to be excluded, see the comments inside.
- "src/arm64/instructions-arm64-constants.cc",
+ "src/codegen/arm64/instructions-arm64-constants.cc",
]
} else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") {
sources += [ ### gcmole(arch:mipsel) ###
+ "src/codegen/mips/assembler-mips-inl.h",
+ "src/codegen/mips/assembler-mips.cc",
+ "src/codegen/mips/assembler-mips.h",
+ "src/codegen/mips/constants-mips.cc",
+ "src/codegen/mips/constants-mips.h",
+ "src/codegen/mips/cpu-mips.cc",
+ "src/codegen/mips/interface-descriptors-mips.cc",
+ "src/codegen/mips/macro-assembler-mips.cc",
+ "src/codegen/mips/macro-assembler-mips.h",
+ "src/codegen/mips/register-mips.h",
"src/compiler/backend/mips/code-generator-mips.cc",
"src/compiler/backend/mips/instruction-codes-mips.h",
"src/compiler/backend/mips/instruction-scheduler-mips.cc",
"src/compiler/backend/mips/instruction-selector-mips.cc",
"src/debug/mips/debug-mips.cc",
- "src/mips/assembler-mips-inl.h",
- "src/mips/assembler-mips.cc",
- "src/mips/assembler-mips.h",
- "src/mips/constants-mips.cc",
- "src/mips/constants-mips.h",
- "src/mips/cpu-mips.cc",
- "src/mips/deoptimizer-mips.cc",
- "src/mips/disasm-mips.cc",
- "src/mips/frame-constants-mips.cc",
- "src/mips/frame-constants-mips.h",
- "src/mips/interface-descriptors-mips.cc",
- "src/mips/macro-assembler-mips.cc",
- "src/mips/macro-assembler-mips.h",
- "src/mips/register-mips.h",
- "src/mips/simulator-mips.cc",
- "src/mips/simulator-mips.h",
+ "src/deoptimizer/mips/deoptimizer-mips.cc",
+ "src/diagnostics/mips/disasm-mips.cc",
+ "src/execution/mips/frame-constants-mips.cc",
+ "src/execution/mips/frame-constants-mips.h",
+ "src/execution/mips/simulator-mips.cc",
+ "src/execution/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
+ "src/codegen/mips64/assembler-mips64-inl.h",
+ "src/codegen/mips64/assembler-mips64.cc",
+ "src/codegen/mips64/assembler-mips64.h",
+ "src/codegen/mips64/constants-mips64.cc",
+ "src/codegen/mips64/constants-mips64.h",
+ "src/codegen/mips64/cpu-mips64.cc",
+ "src/codegen/mips64/interface-descriptors-mips64.cc",
+ "src/codegen/mips64/macro-assembler-mips64.cc",
+ "src/codegen/mips64/macro-assembler-mips64.h",
+ "src/codegen/mips64/register-mips64.h",
"src/compiler/backend/mips64/code-generator-mips64.cc",
"src/compiler/backend/mips64/instruction-codes-mips64.h",
"src/compiler/backend/mips64/instruction-scheduler-mips64.cc",
"src/compiler/backend/mips64/instruction-selector-mips64.cc",
"src/debug/mips64/debug-mips64.cc",
- "src/mips64/assembler-mips64-inl.h",
- "src/mips64/assembler-mips64.cc",
- "src/mips64/assembler-mips64.h",
- "src/mips64/constants-mips64.cc",
- "src/mips64/constants-mips64.h",
- "src/mips64/cpu-mips64.cc",
- "src/mips64/deoptimizer-mips64.cc",
- "src/mips64/disasm-mips64.cc",
- "src/mips64/frame-constants-mips64.cc",
- "src/mips64/frame-constants-mips64.h",
- "src/mips64/interface-descriptors-mips64.cc",
- "src/mips64/macro-assembler-mips64.cc",
- "src/mips64/macro-assembler-mips64.h",
- "src/mips64/register-mips64.h",
- "src/mips64/simulator-mips64.cc",
- "src/mips64/simulator-mips64.h",
+ "src/deoptimizer/mips64/deoptimizer-mips64.cc",
+ "src/diagnostics/mips64/disasm-mips64.cc",
+ "src/execution/mips64/frame-constants-mips64.cc",
+ "src/execution/mips64/frame-constants-mips64.h",
+ "src/execution/mips64/simulator-mips64.cc",
+ "src/execution/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
+ "src/codegen/ppc/assembler-ppc-inl.h",
+ "src/codegen/ppc/assembler-ppc.cc",
+ "src/codegen/ppc/assembler-ppc.h",
+ "src/codegen/ppc/constants-ppc.cc",
+ "src/codegen/ppc/constants-ppc.h",
+ "src/codegen/ppc/cpu-ppc.cc",
+ "src/codegen/ppc/interface-descriptors-ppc.cc",
+ "src/codegen/ppc/macro-assembler-ppc.cc",
+ "src/codegen/ppc/macro-assembler-ppc.h",
+ "src/codegen/ppc/register-ppc.h",
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/debug/ppc/debug-ppc.cc",
- "src/ppc/assembler-ppc-inl.h",
- "src/ppc/assembler-ppc.cc",
- "src/ppc/assembler-ppc.h",
- "src/ppc/constants-ppc.cc",
- "src/ppc/constants-ppc.h",
- "src/ppc/cpu-ppc.cc",
- "src/ppc/deoptimizer-ppc.cc",
- "src/ppc/disasm-ppc.cc",
- "src/ppc/frame-constants-ppc.cc",
- "src/ppc/frame-constants-ppc.h",
- "src/ppc/interface-descriptors-ppc.cc",
- "src/ppc/macro-assembler-ppc.cc",
- "src/ppc/macro-assembler-ppc.h",
- "src/ppc/register-ppc.h",
- "src/ppc/simulator-ppc.cc",
- "src/ppc/simulator-ppc.h",
+ "src/deoptimizer/ppc/deoptimizer-ppc.cc",
+ "src/diagnostics/ppc/disasm-ppc.cc",
+ "src/execution/ppc/frame-constants-ppc.cc",
+ "src/execution/ppc/frame-constants-ppc.h",
+ "src/execution/ppc/simulator-ppc.cc",
+ "src/execution/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
+ "src/codegen/s390/assembler-s390-inl.h",
+ "src/codegen/s390/assembler-s390.cc",
+ "src/codegen/s390/assembler-s390.h",
+ "src/codegen/s390/constants-s390.cc",
+ "src/codegen/s390/constants-s390.h",
+ "src/codegen/s390/cpu-s390.cc",
+ "src/codegen/s390/interface-descriptors-s390.cc",
+ "src/codegen/s390/macro-assembler-s390.cc",
+ "src/codegen/s390/macro-assembler-s390.h",
+ "src/codegen/s390/register-s390.h",
"src/compiler/backend/s390/code-generator-s390.cc",
"src/compiler/backend/s390/instruction-codes-s390.h",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/debug/s390/debug-s390.cc",
+ "src/deoptimizer/s390/deoptimizer-s390.cc",
+ "src/diagnostics/s390/disasm-s390.cc",
+ "src/execution/s390/frame-constants-s390.cc",
+ "src/execution/s390/frame-constants-s390.h",
+ "src/execution/s390/simulator-s390.cc",
+ "src/execution/s390/simulator-s390.h",
"src/regexp/s390/regexp-macro-assembler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.h",
- "src/s390/assembler-s390-inl.h",
- "src/s390/assembler-s390.cc",
- "src/s390/assembler-s390.h",
- "src/s390/constants-s390.cc",
- "src/s390/constants-s390.h",
- "src/s390/cpu-s390.cc",
- "src/s390/deoptimizer-s390.cc",
- "src/s390/disasm-s390.cc",
- "src/s390/frame-constants-s390.cc",
- "src/s390/frame-constants-s390.h",
- "src/s390/interface-descriptors-s390.cc",
- "src/s390/macro-assembler-s390.cc",
- "src/s390/macro-assembler-s390.h",
- "src/s390/register-s390.h",
- "src/s390/simulator-s390.cc",
- "src/s390/simulator-s390.h",
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
}
@@ -3203,13 +3262,14 @@ v8_source_set("v8_base_without_compiler") {
]
if (v8_enable_i18n_support) {
+ deps += [ ":run_gen-regexp-special-case" ]
+ sources += [ "$target_gen_dir/src/regexp/special-case.cc" ]
if (is_win) {
deps += [ "//third_party/icu:icudata" ]
}
} else {
sources -= [
"src/builtins/builtins-intl.cc",
- "src/char-predicates.cc",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-break-iterator-inl.h",
@@ -3243,6 +3303,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/js-segmenter.cc",
"src/objects/js-segmenter.h",
"src/runtime/runtime-intl.cc",
+ "src/strings/char-predicates.cc",
]
}
@@ -3275,6 +3336,7 @@ v8_source_set("torque_base") {
"src/torque/ast.h",
"src/torque/cfg.cc",
"src/torque/cfg.h",
+ "src/torque/constants.h",
"src/torque/contextual.h",
"src/torque/csa-generator.cc",
"src/torque/csa-generator.h",
@@ -3286,8 +3348,6 @@ v8_source_set("torque_base") {
"src/torque/declarations.h",
"src/torque/earley-parser.cc",
"src/torque/earley-parser.h",
- "src/torque/file-visitor.cc",
- "src/torque/file-visitor.h",
"src/torque/global-context.h",
"src/torque/implementation-visitor.cc",
"src/torque/implementation-visitor.h",
@@ -3303,6 +3363,8 @@ v8_source_set("torque_base") {
"src/torque/torque-parser.h",
"src/torque/type-oracle.cc",
"src/torque/type-oracle.h",
+ "src/torque/type-visitor.cc",
+ "src/torque/type-visitor.h",
"src/torque/types.cc",
"src/torque/types.h",
"src/torque/utils.cc",
@@ -3329,8 +3391,16 @@ v8_source_set("torque_base") {
]
if (is_win && is_asan) {
+ # Due to a bug in ASAN on Windows (chromium:893437), we disable ASAN for
+ # Torque on Windows.
remove_configs += [ "//build/config/sanitizers:default_sanitizer_flags" ]
}
+
+ if (is_debug && !v8_optimized_debug && v8_enable_fast_torque) {
+ # The :no_optimize config is added to v8_add_configs in v8.gni.
+ remove_configs += [ "//build/config/compiler:no_optimize" ]
+ configs += [ ":always_optimize" ]
+ }
}
v8_source_set("torque_ls_base") {
@@ -3394,7 +3464,6 @@ v8_component("v8_libbase") {
"src/base/file-utils.cc",
"src/base/file-utils.h",
"src/base/flags.h",
- "src/base/format-macros.h",
"src/base/free_deleter.h",
"src/base/functional.cc",
"src/base/functional.h",
@@ -3407,8 +3476,6 @@ v8_component("v8_libbase") {
"src/base/list.h",
"src/base/logging.cc",
"src/base/logging.h",
- "src/base/lsan-page-allocator.cc",
- "src/base/lsan-page-allocator.h",
"src/base/macros.h",
"src/base/once.cc",
"src/base/once.h",
@@ -3436,7 +3503,7 @@ v8_component("v8_libbase") {
"src/base/sys-info.h",
"src/base/template-utils.h",
"src/base/timezone-cache.h",
- "src/base/tsan.h",
+ "src/base/type-traits.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
]
@@ -3601,7 +3668,25 @@ v8_component("v8_libplatform") {
":v8_libbase",
]
if (v8_use_perfetto) {
- deps += [ "third_party/perfetto:libperfetto" ]
+ sources += [
+ "src/libplatform/tracing/json-trace-event-listener.cc",
+ "src/libplatform/tracing/json-trace-event-listener.h",
+ "src/libplatform/tracing/perfetto-consumer.cc",
+ "src/libplatform/tracing/perfetto-consumer.h",
+ "src/libplatform/tracing/perfetto-producer.cc",
+ "src/libplatform/tracing/perfetto-producer.h",
+ "src/libplatform/tracing/perfetto-shared-memory.cc",
+ "src/libplatform/tracing/perfetto-shared-memory.h",
+ "src/libplatform/tracing/perfetto-tasks.cc",
+ "src/libplatform/tracing/perfetto-tasks.h",
+ "src/libplatform/tracing/perfetto-tracing-controller.cc",
+ "src/libplatform/tracing/perfetto-tracing-controller.h",
+ "src/libplatform/tracing/trace-event-listener.h",
+ ]
+ deps += [
+ "//third_party/perfetto:libperfetto",
+ "//third_party/perfetto/protos/perfetto/trace/chrome:minimal_complete_lite",
+ ]
}
}
@@ -3679,9 +3764,11 @@ v8_static_library("wee8") {
configs = [ ":internal_config" ]
sources = [
+ ### gcmole(all) ###
"src/wasm/c-api.cc",
- "third_party/wasm-c-api/wasm.h",
- "third_party/wasm-c-api/wasm.hh",
+ "src/wasm/c-api.h",
+ "third_party/wasm-api/wasm.h",
+ "third_party/wasm-api/wasm.hh",
]
}
@@ -3717,8 +3804,18 @@ if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
- "src/snapshot/embedded-file-writer.cc",
- "src/snapshot/embedded-file-writer.h",
+ "src/snapshot/embedded/embedded-file-writer.cc",
+ "src/snapshot/embedded/embedded-file-writer.h",
+ "src/snapshot/embedded/platform-embedded-file-writer-aix.cc",
+ "src/snapshot/embedded/platform-embedded-file-writer-aix.h",
+ "src/snapshot/embedded/platform-embedded-file-writer-base.cc",
+ "src/snapshot/embedded/platform-embedded-file-writer-base.h",
+ "src/snapshot/embedded/platform-embedded-file-writer-generic.cc",
+ "src/snapshot/embedded/platform-embedded-file-writer-generic.h",
+ "src/snapshot/embedded/platform-embedded-file-writer-mac.cc",
+ "src/snapshot/embedded/platform-embedded-file-writer-mac.h",
+ "src/snapshot/embedded/platform-embedded-file-writer-win.cc",
+ "src/snapshot/embedded/platform-embedded-file-writer-win.h",
"src/snapshot/mksnapshot.cc",
]
@@ -3733,12 +3830,6 @@ if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
":v8_nosnapshot",
"//build/win:default_exe_manifest",
]
-
- if (target_os == "fuchsia") {
- defines = [ "V8_TARGET_OS_FUCHSIA" ]
- } else if (target_os == "win") {
- defines = [ "V8_TARGET_OS_WIN" ]
- }
}
}
@@ -3801,6 +3892,50 @@ v8_executable("torque-language-server") {
}
}
+if (current_toolchain == v8_generator_toolchain) {
+ v8_executable("gen-regexp-special-case") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ sources = [
+ "src/regexp/gen-regexp-special-case.cc",
+ ]
+
+ deps = [
+ ":v8_libbase",
+ "//build/win:default_exe_manifest",
+ "//third_party/icu",
+ ]
+
+ configs = [ ":internal_config" ]
+ }
+}
+
+action("run_gen-regexp-special-case") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ script = "tools/run.py"
+
+ sources = v8_extra_library_files
+
+ deps = [
+ ":gen-regexp-special-case($v8_generator_toolchain)",
+ ]
+
+ output_file = "$target_gen_dir/src/regexp/special-case.cc"
+
+ outputs = [
+ output_file,
+ ]
+
+ args = [
+ "./" + rebase_path(
+ get_label_info(":gen-regexp-special-case($v8_generator_toolchain)",
+ "root_out_dir") + "/gen-regexp-special-case",
+ root_build_dir),
+ rebase_path(output_file, root_build_dir),
+ ]
+}
+
###############################################################################
# Public targets
#
@@ -3833,6 +3968,12 @@ group("gn_all") {
}
}
+group("v8_python_base") {
+ data = [
+ ".vpython",
+ ]
+}
+
group("v8_clusterfuzz") {
testonly = true
@@ -3899,7 +4040,7 @@ group("v8_fuzzers") {
if (is_component_build) {
v8_component("v8") {
sources = [
- "src/v8dll-main.cc",
+ "src/utils/v8dll-main.cc",
]
public_deps = [
@@ -3916,7 +4057,7 @@ if (is_component_build) {
testonly = true
sources = [
- "src/v8dll-main.cc",
+ "src/utils/v8dll-main.cc",
]
public_deps = [
@@ -3965,15 +4106,15 @@ if (is_component_build) {
v8_executable("d8") {
sources = [
- "src/async-hooks-wrapper.cc",
- "src/async-hooks-wrapper.h",
- "src/d8-console.cc",
- "src/d8-console.h",
- "src/d8-js.cc",
- "src/d8-platforms.cc",
- "src/d8-platforms.h",
- "src/d8.cc",
- "src/d8.h",
+ "src/d8/async-hooks-wrapper.cc",
+ "src/d8/async-hooks-wrapper.h",
+ "src/d8/d8-console.cc",
+ "src/d8/d8-console.h",
+ "src/d8/d8-js.cc",
+ "src/d8/d8-platforms.cc",
+ "src/d8/d8-platforms.h",
+ "src/d8/d8.cc",
+ "src/d8/d8.h",
]
configs = [
@@ -3991,9 +4132,9 @@ v8_executable("d8") {
]
if (is_posix || is_fuchsia) {
- sources += [ "src/d8-posix.cc" ]
+ sources += [ "src/d8/d8-posix.cc" ]
} else if (is_win) {
- sources += [ "src/d8-windows.cc" ]
+ sources += [ "src/d8/d8-windows.cc" ]
}
if (v8_correctness_fuzzer) {
@@ -4326,16 +4467,13 @@ if (!build_with_chromium && v8_use_perfetto) {
"GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER",
]
cflags = [
- # Using -isystem instead of include_dirs (-I), so we don't need to suppress
- # warnings coming from libprotobuf headers. Doing so would mask warnings in
- # our own code.
- "-isystem",
- rebase_path("third_party/protobuf/src", root_build_dir),
"-Wno-unknown-warning-option",
"-Wno-deprecated",
"-Wno-undef",
"-Wno-zero-as-null-pointer-constant",
+ "-Wno-thread-safety-attributes",
]
+ include_dirs = [ "third_party/protobuf/src" ]
}
# Configuration used to build libprotobuf_* and the protoc compiler.
@@ -4343,7 +4481,9 @@ if (!build_with_chromium && v8_use_perfetto) {
# Apply the lighter supressions and macro definitions from above.
configs = [ ":protobuf_gen_config" ]
- defines = [ "HAVE_PTHREAD=1" ]
+ if (!is_win) {
+ defines = [ "HAVE_PTHREAD=1" ]
+ }
if (is_clang) {
cflags = [
"-Wno-unused-private-field",
@@ -4355,6 +4495,9 @@ if (!build_with_chromium && v8_use_perfetto) {
"-Wno-tautological-constant-compare",
]
}
+ if (is_win) {
+ cflags += [ "-Wno-microsoft-unqualified-friend" ]
+ }
}
source_set("protobuf_lite") {
@@ -4389,6 +4532,9 @@ if (!build_with_chromium && v8_use_perfetto) {
"//build/config/compiler:no_chromium_code",
":protobuf_config",
]
+ if (is_win) {
+ configs -= [ "//build/config/win:lean_and_mean" ]
+ }
public_configs = [ ":protobuf_gen_config" ]
}
@@ -4458,6 +4604,9 @@ if (!build_with_chromium && v8_use_perfetto) {
"//build/config/compiler:no_chromium_code",
":protobuf_config",
]
+ if (is_win) {
+ configs -= [ "//build/config/win:lean_and_mean" ]
+ }
public_configs = [ ":protobuf_gen_config" ]
}
@@ -4483,69 +4632,8 @@ if (!build_with_chromium && v8_use_perfetto) {
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_primitive_field.cc",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_service.cc",
"third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_string_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_doc_comment.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_enum.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_enum_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_field_base.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_generator.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_helpers.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_map_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_message_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_primitive_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_reflection_class.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_enum_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_message_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_repeated_primitive_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_source_generator_base.cc",
- "third_party/protobuf/src/google/protobuf/compiler/csharp/csharp_wrapper_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_context.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_doc_comment.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_enum.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_enum_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_enum_field_lite.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_enum_lite.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_extension.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_extension_lite.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_file.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_generator.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_generator_factory.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_helpers.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_map_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_map_field_lite.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_message.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_message_builder.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_message_builder_lite.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_message_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_message_field_lite.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_message_lite.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_name_resolver.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_primitive_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_primitive_field_lite.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_service.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_shared_code_generator.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_string_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/java/java_string_field_lite.cc",
- "third_party/protobuf/src/google/protobuf/compiler/js/js_generator.cc",
- "third_party/protobuf/src/google/protobuf/compiler/js/well_known_types_embed.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_enum_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_extension.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_file.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_generator.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_helpers.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_map_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_message.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_message_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_oneof.cc",
- "third_party/protobuf/src/google/protobuf/compiler/objectivec/objectivec_primitive_field.cc",
- "third_party/protobuf/src/google/protobuf/compiler/php/php_generator.cc",
"third_party/protobuf/src/google/protobuf/compiler/plugin.cc",
"third_party/protobuf/src/google/protobuf/compiler/plugin.pb.cc",
- "third_party/protobuf/src/google/protobuf/compiler/python/python_generator.cc",
- "third_party/protobuf/src/google/protobuf/compiler/ruby/ruby_generator.cc",
"third_party/protobuf/src/google/protobuf/compiler/subprocess.cc",
"third_party/protobuf/src/google/protobuf/compiler/zip_writer.cc",
]
@@ -4554,6 +4642,9 @@ if (!build_with_chromium && v8_use_perfetto) {
"//build/config/compiler:no_chromium_code",
":protobuf_config",
]
+ if (is_win) {
+ configs -= [ "//build/config/win:lean_and_mean" ]
+ }
public_configs = [ ":protobuf_gen_config" ]
}
@@ -4563,7 +4654,7 @@ if (!build_with_chromium && v8_use_perfetto) {
"//build/win:default_exe_manifest",
]
sources = [
- "third_party/protobuf/src/google/protobuf/compiler/main.cc",
+ "src/protobuf/protobuf-compiler-main.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS
new file mode 100644
index 0000000000..79f1428658
--- /dev/null
+++ b/deps/v8/COMMON_OWNERS
@@ -0,0 +1,38 @@
+adamk@chromium.org
+ahaas@chromium.org
+bbudge@chromium.org
+binji@chromium.org
+bmeurer@chromium.org
+cbruni@chromium.org
+clemensh@chromium.org
+danno@chromium.org
+delphick@chromium.org
+gdeepti@chromium.org
+gsathya@chromium.org
+hablich@chromium.org
+hpayer@chromium.org
+ishell@chromium.org
+jarin@chromium.org
+jgruber@chromium.org
+jkummerow@chromium.org
+leszeks@chromium.org
+machenbach@chromium.org
+mathias@chromium.org
+marja@chromium.org
+mlippautz@chromium.org
+mslekova@chromium.org
+mstarzinger@chromium.org
+mvstanton@chromium.org
+mythria@chromium.org
+neis@chromium.org
+petermarshall@chromium.org
+rmcilroy@chromium.org
+sergiyb@chromium.org
+sigurds@chromium.org
+solanes@chromium.org
+szuend@chromium.org
+tebbi@chromium.org
+titzer@chromium.org
+ulan@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 6d315e6a9e..c21ac11760 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1523 @@
+2019-05-28: Version 7.6.303
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.302
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.301
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.300
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.299
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.298
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.297
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.296
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.295
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.294
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-28: Version 7.6.293
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.292
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.291
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.290
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.289
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.288
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.287
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.286
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.285
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.284
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.283
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.282
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.281
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.280
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.279
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.278
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.277
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.276
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-27: Version 7.6.275
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-24: Version 7.6.274
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-24: Version 7.6.273
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-24: Version 7.6.272
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-24: Version 7.6.271
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-24: Version 7.6.270
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-24: Version 7.6.269
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-24: Version 7.6.268
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-23: Version 7.6.267
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-23: Version 7.6.266
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-23: Version 7.6.265
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-23: Version 7.6.264
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-23: Version 7.6.263
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-23: Version 7.6.262
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-23: Version 7.6.261
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.260
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.259
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.258
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.257
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.256
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.255
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.254
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.253
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.252
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.251
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.250
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.249
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-22: Version 7.6.248
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.247
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.246
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.245
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.244
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.243
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.242
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.241
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.240
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.239
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.238
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.237
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.236
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.235
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.234
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.233
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.232
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.231
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.230
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-21: Version 7.6.229
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.228
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.227
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.226
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.225
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.224
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.223
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.222
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.221
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.220
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.219
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.218
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.217
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.216
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.215
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.214
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.213
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.212
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-20: Version 7.6.211
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-17: Version 7.6.210
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-17: Version 7.6.209
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-17: Version 7.6.208
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-17: Version 7.6.207
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-17: Version 7.6.206
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-17: Version 7.6.205
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-16: Version 7.6.204
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-16: Version 7.6.203
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-16: Version 7.6.202
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-16: Version 7.6.201
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-16: Version 7.6.200
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-16: Version 7.6.199
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-16: Version 7.6.198
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-16: Version 7.6.197
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-16: Version 7.6.196
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.195
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.194
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.193
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.192
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.191
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.190
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.189
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.188
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.187
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.186
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-15: Version 7.6.185
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.184
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.183
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.182
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.181
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.180
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.179
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.178
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.177
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.176
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.175
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.174
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.173
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-14: Version 7.6.172
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.171
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.170
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.169
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.168
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.167
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.166
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.165
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.164
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.163
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.162
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-13: Version 7.6.161
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.160
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.159
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.158
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.157
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.156
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.155
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.154
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.153
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.152
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.151
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.150
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.149
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.148
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.147
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.146
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-10: Version 7.6.145
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.144
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.143
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.142
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.141
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.140
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.139
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.138
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.137
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.136
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.135
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-09: Version 7.6.134
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.133
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.132
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.131
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.130
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.129
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.128
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.127
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.126
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.125
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.124
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.123
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.122
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.121
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.120
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-08: Version 7.6.119
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.118
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.117
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.116
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.115
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.114
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.113
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.112
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.111
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.110
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-07: Version 7.6.109
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.108
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.107
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.106
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.105
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.104
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.103
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.102
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.101
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.100
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-06: Version 7.6.99
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-04: Version 7.6.98
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-04: Version 7.6.97
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-03: Version 7.6.96
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-03: Version 7.6.95
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-03: Version 7.6.94
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-03: Version 7.6.93
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-03: Version 7.6.92
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-03: Version 7.6.91
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-03: Version 7.6.90
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-02: Version 7.6.89
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-02: Version 7.6.88
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-02: Version 7.6.87
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-02: Version 7.6.86
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-02: Version 7.6.85
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-02: Version 7.6.84
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-02: Version 7.6.83
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-01: Version 7.6.82
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-01: Version 7.6.81
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-01: Version 7.6.80
+
+ Performance and stability improvements on all platforms.
+
+
+2019-05-01: Version 7.6.79
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.78
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.77
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.76
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.75
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.74
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.73
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.72
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.71
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.70
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-30: Version 7.6.69
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.68
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.67
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.66
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.65
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.64
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.63
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.62
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.61
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.60
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.59
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.58
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.57
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.56
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-29: Version 7.6.55
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-28: Version 7.6.54
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-27: Version 7.6.53
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-27: Version 7.6.52
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.51
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.50
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.49
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.48
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.47
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.46
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.45
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.44
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.43
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.42
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.41
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-26: Version 7.6.40
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.39
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.38
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.37
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.36
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.35
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.34
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.33
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.32
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.31
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.30
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-25: Version 7.6.29
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.28
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.27
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.26
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.25
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.24
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.23
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.22
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.21
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.20
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.19
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.18
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.17
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.16
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.15
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.14
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-24: Version 7.6.13
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.12
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.11
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.10
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.9
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.8
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.7
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.6
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.5
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.4
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.3
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-23: Version 7.6.2
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-19: Version 7.6.1
+
+ Performance and stability improvements on all platforms.
+
+
+2019-04-17: Version 7.5.289
+
+ Performance and stability improvements on all platforms.
+
+
2019-04-17: Version 7.5.288
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 450bfd7862..bca59b724f 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -12,7 +12,7 @@ vars = {
'check_v8_header_includes': False,
# GN CIPD package version.
- 'gn_version': 'git_revision:64b846c96daeb3eaf08e26d8a84d8451c6cb712b',
+ 'gn_version': 'git_revision:81ee1967d3fcbc829bac1c005c3da59739c88df9',
# luci-go CIPD package version.
'luci_go': 'git_revision:25958d48e89e980e2a97daeddc977fb5e2e1fb8c',
@@ -57,15 +57,15 @@ vars = {
deps = {
'v8/build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + 'a0b2e3b2708bcf81ec00ac1738b586bcc5e04eea',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '4cebfa34c79bcfbce6a3f55d1b4f7628bb70ea8a',
'v8/third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '7e7523be4e21b0841ae815ef37521a5476f68549',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '26af0d34d281440ad0dc6d2e43fe60f32ef62da0',
'v8/third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '35f7e139f33f1ddbfdb68b65dda29aff430c3f6f',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '64e5d7d43a1ff205e3787ab6150bbc1a1837332b',
'v8/third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a959e4f0cb643003f2d75d179cede449979e3e77',
'v8/buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'd5c58b84d50d256968271db459cd29b22bff1ba2',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '0218c0f9ac9fdba00e5c27b5aca94d3a64c74f34',
'v8/buildtools/clang_format/script':
Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917',
'v8/buildtools/linux64': {
@@ -89,7 +89,7 @@ deps = {
'condition': 'host_os == "mac"',
},
'v8/buildtools/third_party/libc++/trunk':
- Var('chromium_url') + '/chromium/llvm-project/libcxx.git' + '@' + '9b96c3dbd4e89c10d9fd8364da4b65f93c6f4276',
+ Var('chromium_url') + '/chromium/llvm-project/libcxx.git' + '@' + '5938e0582bac570a41edb3d6a2217c299adc1bc6',
'v8/buildtools/third_party/libc++abi/trunk':
Var('chromium_url') + '/chromium/llvm-project/libcxxabi.git' + '@' + '0d529660e32d77d9111912d73f2c74fc5fa2a858',
'v8/buildtools/third_party/libunwind/trunk':
@@ -105,7 +105,7 @@ deps = {
'condition': 'host_os == "win"',
},
'v8/base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'ebb658ab38d1b23183458ed0430f5b11853a25a3',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'cfe8887fa6ac3170e23a68949930e28d4705a16f',
'v8/third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '4e2cea441bfd43f0863d14f57b1e1844260b9884',
'condition': 'checkout_android',
@@ -158,7 +158,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'acbf095c15e9524a0a1116792c3b6698f8e9b85b',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'a7b33124672f301cebe0ca94a67ca7d0362e3d6a',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@@ -166,25 +166,25 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + 'a42c2f604f3ae23099e73605df7864988d289d98',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + 'ae68779f84fc36bd88ba4fe0ff78ed9ea3c91d73',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'b617b277186e03b1065ac6d43912b1c4147c2982',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'f71fb4f9a912ec945401cc49a287a759b6131026',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + 'aa60736aded9fc32a0e21a81f5fc51f6009d01f3',
+ Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '779c4f0f8488c64587b75dbb001d18c3c0c4cda9',
'v8/test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '8e5ab69e8c31135265cba570d54d41f6ade19e45',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a9abd418ccc7999b00b8c7df60b25620a7d3c541',
'v8/test/test262/harness':
- Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '9bd99c6f33be10561970bfe16f2f16a8a3d88722',
+ Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b',
'v8/third_party/qemu-linux-x64': {
'packages': [
{
@@ -206,7 +206,7 @@ deps = {
'dep_type': 'cipd',
},
'v8/tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'edee5c0b3641ab345cbe3cf29f1b1cdbd6819549',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'fe8ba88894e4b3927d3cd9e24274a0f1a688cf71',
'v8/tools/luci-go': {
'packages': [
{
@@ -236,9 +236,9 @@ deps = {
'dep_type': 'cipd',
},
'v8/test/wasm-js/data':
- Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'd14d538e5fccdc03a02948963addad10ad45b50d',
+ Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'bc7d3006bbda0de5031c2a1b9266a62fa7895019',
'v8/third_party/perfetto':
- Var('android_url') + '/platform/external/perfetto.git' + '@' + '21a33afeef568f72668acf77668a32307a363d6e',
+ Var('android_url') + '/platform/external/perfetto.git' + '@' + '10c98fe0cfae669f71610d97e9da94260a6da173',
'v8/third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91',
}
diff --git a/deps/v8/ENG_REVIEW_OWNERS b/deps/v8/ENG_REVIEW_OWNERS
new file mode 100644
index 0000000000..6b189307ad
--- /dev/null
+++ b/deps/v8/ENG_REVIEW_OWNERS
@@ -0,0 +1,9 @@
+# Eng reviewers. This is to define an escalation path for potential
+# disagreement among owners. Please consult before adding top-level
+# directories.
+
+adamk@chromium.org
+danno@chromium.org
+hpayer@chromium.org
+rmcilroy@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/INFRA_OWNERS b/deps/v8/INFRA_OWNERS
new file mode 100644
index 0000000000..4b847b21f7
--- /dev/null
+++ b/deps/v8/INFRA_OWNERS
@@ -0,0 +1,3 @@
+machenbach@chromium.org
+sergiyb@chromium.org
+tmrts@chromium.org
diff --git a/deps/v8/src/builtins/mips/OWNERS b/deps/v8/MIPS_OWNERS
index cab3679d65..cab3679d65 100644
--- a/deps/v8/src/builtins/mips/OWNERS
+++ b/deps/v8/MIPS_OWNERS
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index b2161c06ca..c428ba6d0b 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -1,42 +1,31 @@
-adamk@chromium.org
-ahaas@chromium.org
-aseemgarg@chromium.org
-bbudge@chromium.org
-binji@chromium.org
-bmeurer@chromium.org
-cbruni@chromium.org
-clemensh@chromium.org
-danno@chromium.org
-delphick@chromium.org
-gdeepti@chromium.org
-gsathya@chromium.org
-hablich@chromium.org
-herhut@chromium.org
-hpayer@chromium.org
-ishell@chromium.org
-jarin@chromium.org
-jgruber@chromium.org
-jkummerow@chromium.org
-leszeks@chromium.org
-machenbach@chromium.org
-mathias@chromium.org
-marja@chromium.org
-mlippautz@chromium.org
-mslekova@chromium.org
-mstarzinger@chromium.org
-mvstanton@chromium.org
-mythria@chromium.org
-neis@chromium.org
-petermarshall@chromium.org
-rmcilroy@chromium.org
-sergiyb@chromium.org
-sigurds@chromium.org
-szuend@chromium.org
-tebbi@chromium.org
-titzer@chromium.org
-ulan@chromium.org
-verwaest@chromium.org
-yangguo@chromium.org
+# Eng reviewer. Please reach out before adding new top-level directories.
+# Disagreement among owners should be escalated to eng reviewers.
+file://ENG_REVIEW_OWNERS
+
+# TODO(9247) remove this.
+file://COMMON_OWNERS
+
+per-file .clang-format=file://INFRA_OWNERS
+per-file .clang-tidy=file://INFRA_OWNERS
+per-file .editorconfig=file://INFRA_OWNERS
+per-file .git-blame-ignore-revs=file://INFRA_OWNERS
+per-file .gitattributes=file://INFRA_OWNERS
+per-file .gitignore=file://INFRA_OWNERS
+per-file .gn=file://INFRA_OWNERS
+per-file .vpython=file://INFRA_OWNERS
+per-file .ycm_extra_conf.py=file://INFRA_OWNERS
+per-file BUILD.gn=file://INFRA_OWNERS
+per-file DEPS=file://INFRA_OWNERS
+per-file PRESUBMIT=file://INFRA_OWNERS
+per-file codereview.settings=file://INFRA_OWNERS
+
+per-file AUTHORS=file://COMMON_OWNERS
+per-file WATCHLIST=file://COMMON_OWNERS
+
+per-file *-mips*=file://MIPS_OWNERS
+per-file *-mips64*=file://MIPS_OWNERS
+per-file *-ppc*=file://PPC_OWNERS
+per-file *-s390*=file://S390_OWNERS
# TEAM: v8-dev@googlegroups.com
# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/builtins/ppc/OWNERS b/deps/v8/PPC_OWNERS
index 6d1a8fc472..6d1a8fc472 100644
--- a/deps/v8/src/builtins/ppc/OWNERS
+++ b/deps/v8/PPC_OWNERS
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 8aea920ef4..201bf55f71 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -433,6 +433,8 @@ def _CheckMacroUndefs(input_api, output_api):
undef_match = undef_pattern.match(line)
if undef_match:
+ if "// NOLINT" in line:
+ continue
name = undef_match.group(1)
if not name in defined_macros:
errors.append('{}:{}: Macro named \'{}\' was not defined before.'
diff --git a/deps/v8/src/builtins/s390/OWNERS b/deps/v8/S390_OWNERS
index 6d1a8fc472..6d1a8fc472 100644
--- a/deps/v8/src/builtins/s390/OWNERS
+++ b/deps/v8/S390_OWNERS
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index f9b9ad3b01..f1878a18da 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -420,6 +420,9 @@
INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
TRACE_EVENT_FLAG_NONE, "value", \
static_cast<int>(value))
+#define TRACE_COUNTER_WITH_FLAG1(category_group, name, flag, value) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+ flag, "value", static_cast<int>(value))
#define TRACE_COPY_COUNTER1(category_group, name, value) \
INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
TRACE_EVENT_FLAG_COPY, "value", \
@@ -1069,7 +1072,6 @@
#define TRACE_EVENT_FLAG_HAS_PROCESS_ID (static_cast<unsigned int>(1 << 11))
#define TRACE_EVENT_FLAG_HAS_LOCAL_ID (static_cast<unsigned int>(1 << 12))
#define TRACE_EVENT_FLAG_HAS_GLOBAL_ID (static_cast<unsigned int>(1 << 13))
-#define TRACE_EVENT_FLAG_DISALLOW_POSTTASK (static_cast<unsigned int>(1 << 14))
// TODO(eseckler): Remove once we have native support for typed proto events in
// TRACE_EVENT macros.
#define TRACE_EVENT_FLAG_TYPED_PROTO_ARGS (static_cast<unsigned int>(1 << 15))
diff --git a/deps/v8/benchmarks/micro/slice-perf.js b/deps/v8/benchmarks/micro/slice-perf.js
deleted file mode 100644
index 300d212666..0000000000
--- a/deps/v8/benchmarks/micro/slice-perf.js
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-const kIterations = 1000000;
-const kIterationShort = 10000;
-const kArraySize = 64;
-
-let smi_array = [];
-for (let i = 0; i < kArraySize; ++i) smi_array[i] = Math.floor(Math.random() * 100);
-
-let start = performance.now();
-for (let x = 0; x < kIterations; ++x) {
- smi_array.slice(0);
-}
-let stop = performance.now();
-print("smi_array copy: " + (Math.floor((stop - start)*10)/10) + " ms");
-
-start = performance.now();
-for (let x = 0; x < kIterations; ++x) {
- smi_array.slice(x % kArraySize);
-}
-stop = performance.now();
-print("smi_array: " + (Math.floor((stop - start)*10)/10) + " ms");
-
-let double_array = [];
-for (let i = 0; i < kArraySize; ++i) double_array[i] = Math.random() * 100;
-start = performance.now();
-for (let x = 0; x < kIterations; ++x) {
- double_array.slice(x % kArraySize);
-}
-stop = performance.now();
-print("double_array: " + (Math.floor((stop - start)*10)/10) + " ms");
-
-let object_array = [];
-for (let i = 0; i < kArraySize; ++i) object_array[i] = new Object();
-start = performance.now();
-for (let x = 0; x < kIterations; ++x) {
- object_array.slice(x % kArraySize);
-}
-stop = performance.now();
-print("object_array: " + (Math.floor((stop - start)*10)/10) + " ms");
-
-let dictionary_array = [];
-for (let i = 0; i < kArraySize; ++i) dictionary_array[i] = new Object();
-dictionary_array[100000] = new Object();
-start = performance.now();
-for (let x = 0; x < kIterationShort; ++x) {
- dictionary_array.slice(x % kArraySize);
-}
-stop = performance.now();
-print("dictionary: " + (Math.floor((stop - start)*10)/10) + " ms");
-
-let arguments_array;
-function sloppy() {
- arguments_array = arguments;
-}
-sloppy.apply(null, smi_array);
-start = performance.now();
-for (let x = 0; x < kIterations; ++x) {
- let r = Array.prototype.slice.call(arguments_array, x % kArraySize);
-}
-stop = performance.now();
-print("arguments_array (sloppy): " + (Math.floor((stop - start)*10)/10) + " ms");
-
-function sloppy2 (a) {
- arguments_array = arguments;
-}
-sloppy2.apply(null, smi_array);
-start = performance.now();
-for (let x = 0; x < kIterations; ++x) {
- Array.prototype.slice.call(arguments_array, x % kArraySize);
-}
-stop = performance.now();
-print("arguments_array (fast aliased): " + (Math.floor((stop - start)*10)/10) + " ms");
-
-delete arguments_array[5];
-start = performance.now();
-for (let x = 0; x < kIterationShort; ++x) {
- Array.prototype.slice.call(arguments_array, x % kArraySize);
-}
-stop = performance.now();
-print("arguments_array (slow aliased): " + (Math.floor((stop - start)*10)/10) + " ms");
diff --git a/deps/v8/build_overrides/OWNERS b/deps/v8/build_overrides/OWNERS
new file mode 100644
index 0000000000..bdb1d555a4
--- /dev/null
+++ b/deps/v8/build_overrides/OWNERS
@@ -0,0 +1 @@
+file://INFRA_OWNERS
diff --git a/deps/v8/custom_deps/OWNERS b/deps/v8/custom_deps/OWNERS
index 76719caca0..bdb1d555a4 100644
--- a/deps/v8/custom_deps/OWNERS
+++ b/deps/v8/custom_deps/OWNERS
@@ -1,2 +1 @@
-machenbach@chromium.org
-sergiyb@chromium.org \ No newline at end of file
+file://INFRA_OWNERS
diff --git a/deps/v8/docs/OWNERS b/deps/v8/docs/OWNERS
new file mode 100644
index 0000000000..39b706f0cc
--- /dev/null
+++ b/deps/v8/docs/OWNERS
@@ -0,0 +1,2 @@
+hablich@chromium.org
+mathias@chromium.org
diff --git a/deps/v8/gni/OWNERS b/deps/v8/gni/OWNERS
new file mode 100644
index 0000000000..bdb1d555a4
--- /dev/null
+++ b/deps/v8/gni/OWNERS
@@ -0,0 +1 @@
+file://INFRA_OWNERS
diff --git a/deps/v8/gni/proto_library.gni b/deps/v8/gni/proto_library.gni
index 6a00276289..cf581ed46e 100644
--- a/deps/v8/gni/proto_library.gni
+++ b/deps/v8/gni/proto_library.gni
@@ -13,6 +13,12 @@ template("proto_library") {
set_sources_assignment_filter([])
+ if (host_os == "win") {
+ host_executable_suffix = ".exe"
+ } else {
+ host_executable_suffix = ""
+ }
+
# All the proto imports should be relative to the project root.
proto_in_dir = "//"
if (defined(invoker.proto_in_dir)) {
@@ -42,8 +48,9 @@ template("proto_library") {
if (defined(invoker.generator_plugin_label)) {
plugin_host_label = invoker.generator_plugin_label + "($host_toolchain)"
- plugin_path = get_label_info(plugin_host_label, "root_out_dir") + "/" +
- get_label_info(plugin_host_label, "name")
+ plugin_path =
+ get_label_info(plugin_host_label, "root_out_dir") + "/" +
+ get_label_info(plugin_host_label, "name") + host_executable_suffix
generate_with_plugin = true
} else if (defined(invoker.generator_plugin_script)) {
plugin_path = invoker.generator_plugin_script
@@ -107,7 +114,8 @@ template("proto_library") {
outputs = get_path_info(protogens, "abspath")
protoc_label = "//:protoc($host_toolchain)"
- protoc_path = get_label_info(protoc_label, "root_out_dir") + "/protoc"
+ protoc_path = get_label_info(protoc_label, "root_out_dir") + "/protoc" +
+ host_executable_suffix
args = [
# Path should be rebased because |root_build_dir| for current toolchain
# may be different from |root_out_dir| of protoc built on host toolchain.
diff --git a/deps/v8/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni
index f4f1f1d88e..f4f1f1d88e 100644
--- a/deps/v8/snapshot_toolchain.gni
+++ b/deps/v8/gni/snapshot_toolchain.gni
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 0a120df8e1..506b8428ee 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -63,6 +63,10 @@ declare_args() {
# Expose symbols for dynamic linking.
v8_expose_symbols = false
+
+ # Use Perfetto (https://perfetto.dev) as the default TracingController. Not
+ # currently implemented.
+ v8_use_perfetto = false
}
if (v8_use_external_startup_data == "") {
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index bc249cb9ec..ccdca0a8c5 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -23,6 +23,9 @@ class Mutex;
namespace platform {
namespace tracing {
+class PerfettoTracingController;
+class TraceEventListener;
+
const int kTraceMaxNumArgs = 2;
class V8_PLATFORM_EXPORT TraceObject {
@@ -238,6 +241,14 @@ class V8_PLATFORM_EXPORT TracingController
TracingController();
~TracingController() override;
void Initialize(TraceBuffer* trace_buffer);
+#ifdef V8_USE_PERFETTO
+ // Must be called before StartTracing() if V8_USE_PERFETTO is true. Provides
+ // the output stream for the JSON trace data.
+ void InitializeForPerfetto(std::ostream* output_stream);
+ // Provide an optional listener for testing that will receive trace events.
+ // Must be called before StartTracing().
+ void SetTraceEventListenerForTesting(TraceEventListener* listener);
+#endif
// v8::TracingController implementation.
const uint8_t* GetCategoryGroupEnabled(const char* category_group) override;
@@ -280,6 +291,13 @@ class V8_PLATFORM_EXPORT TracingController
std::unique_ptr<base::Mutex> mutex_;
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_;
std::atomic_bool recording_{false};
+#ifdef V8_USE_PERFETTO
+ std::atomic_bool perfetto_recording_{false};
+ std::unique_ptr<PerfettoTracingController> perfetto_tracing_controller_;
+ std::ostream* output_stream_ = nullptr;
+ std::unique_ptr<TraceEventListener> json_listener_;
+ TraceEventListener* listener_for_testing_ = nullptr;
+#endif
// Disallow copy and assign
TracingController(const TracingController&) = delete;
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index 702013588c..b96a6e29ac 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -87,7 +87,6 @@ class V8_EXPORT V8ContextInfo {
static int executionContextId(v8::Local<v8::Context> context);
- private:
// Disallow copying and allocating this one.
enum NotNullTagEnum { NotNullLiteral };
void* operator new(size_t) = delete;
@@ -131,7 +130,11 @@ class V8_EXPORT V8InspectorSession {
// Dispatching protocol messages.
static bool canDispatchMethod(const StringView& method);
virtual void dispatchProtocolMessage(const StringView& message) = 0;
- virtual std::unique_ptr<StringBuffer> stateJSON() = 0;
+ virtual V8_DEPRECATED("Use state() instead",
+ std::unique_ptr<StringBuffer> stateJSON()) {
+ return nullptr;
+ }
+ virtual std::vector<uint8_t> state() = 0;
virtual std::vector<std::unique_ptr<protocol::Schema::API::Domain>>
supportedDomains() = 0;
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 8e700a4d4d..ef13006d13 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -48,28 +48,32 @@ const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
template <size_t tagged_ptr_size>
struct SmiTagging;
+constexpr intptr_t kIntptrAllBitsSet = intptr_t{-1};
+constexpr uintptr_t kUintptrAllBitsSet =
+ static_cast<uintptr_t>(kIntptrAllBitsSet);
+
// Smi constants for systems where tagged pointer is a 32-bit value.
template <>
struct SmiTagging<4> {
enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
+
+ static constexpr intptr_t kSmiMinValue =
+ static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
+ static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
+
V8_INLINE static int SmiToInt(const internal::Address value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down (requires >> to be sign extending).
return static_cast<int>(static_cast<intptr_t>(value)) >> shift_bits;
}
V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
- // To be representable as an tagged small integer, the two
- // most-significant bits of 'value' must be either 00 or 11 due to
- // sign-extension. To check this we add 01 to the two
- // most-significant bits, and check if the most-significant bit is 0.
- //
- // CAUTION: The original code below:
- // bool result = ((value + 0x40000000) & 0x80000000) == 0;
- // may lead to incorrect results according to the C language spec, and
- // in fact doesn't work correctly with gcc4.1.1 in some cases: The
- // compiler may produce undefined results in case of signed integer
- // overflow. The computation must be done w/ unsigned ints.
- return static_cast<uintptr_t>(value) + 0x40000000U < 0x80000000U;
+ // Is value in range [kSmiMinValue, kSmiMaxValue].
+ // Use unsigned operations in order to avoid undefined behaviour in case of
+ // signed integer overflow.
+ return (static_cast<uintptr_t>(value) -
+ static_cast<uintptr_t>(kSmiMinValue)) <=
+ (static_cast<uintptr_t>(kSmiMaxValue) -
+ static_cast<uintptr_t>(kSmiMinValue));
}
};
@@ -77,6 +81,11 @@ struct SmiTagging<4> {
template <>
struct SmiTagging<8> {
enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
+
+ static constexpr intptr_t kSmiMinValue =
+ static_cast<intptr_t>(kUintptrAllBitsSet << (kSmiValueSize - 1));
+ static constexpr intptr_t kSmiMaxValue = -(kSmiMinValue + 1);
+
V8_INLINE static int SmiToInt(const internal::Address value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
@@ -98,15 +107,15 @@ const int kApiTaggedSize = kApiSystemPointerSize;
#endif
#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
-typedef SmiTagging<kApiInt32Size> PlatformSmiTagging;
+using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
#else
-typedef SmiTagging<kApiTaggedSize> PlatformSmiTagging;
+using PlatformSmiTagging = SmiTagging<kApiTaggedSize>;
#endif
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
-const int kSmiMinValue = (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
-const int kSmiMaxValue = -(kSmiMinValue + 1);
+const int kSmiMinValue = static_cast<int>(PlatformSmiTagging::kSmiMinValue);
+const int kSmiMaxValue = static_cast<int>(PlatformSmiTagging::kSmiMaxValue);
constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
@@ -165,8 +174,6 @@ class Internals {
static const int kNodeStateMask = 0x7;
static const int kNodeStateIsWeakValue = 2;
static const int kNodeStateIsPendingValue = 3;
- static const int kNodeIsIndependentShift = 3;
- static const int kNodeIsActiveShift = 4;
static const int kFirstNonstringType = 0x40;
static const int kOddballType = 0x43;
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 556407d876..b707fafc49 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -109,7 +109,6 @@ class TaskRunner {
TaskRunner() = default;
virtual ~TaskRunner() = default;
- private:
TaskRunner(const TaskRunner&) = delete;
TaskRunner& operator=(const TaskRunner&) = delete;
};
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 672a694e07..46d3eb8aa4 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -5,6 +5,7 @@
#ifndef V8_V8_PROFILER_H_
#define V8_V8_PROFILER_H_
+#include <limits.h>
#include <unordered_set>
#include <vector>
#include "v8.h" // NOLINT(build/include)
@@ -297,6 +298,53 @@ enum CpuProfilingMode {
kCallerLineNumbers,
};
+// Determines how names are derived for functions sampled.
+enum CpuProfilingNamingMode {
+ // Use the immediate name of functions at compilation time.
+ kStandardNaming,
+ // Use more verbose naming for functions without names, inferred from scope
+ // where possible.
+ kDebugNaming,
+};
+
+/**
+ * Optional profiling attributes.
+ */
+class V8_EXPORT CpuProfilingOptions {
+ public:
+ // Indicates that the sample buffer size should not be explicitly limited.
+ static const unsigned kNoSampleLimit = UINT_MAX;
+
+ /**
+ * \param mode Type of computation of stack frame line numbers.
+ * \param max_samples The maximum number of samples that should be recorded by
+ * the profiler. Samples obtained after this limit will be
+ * discarded.
+ * \param sampling_interval_us controls the profile-specific target
+ * sampling interval. The provided sampling
+ * interval will be snapped to the next lowest
+ * non-zero multiple of the profiler's sampling
+ * interval, set via SetSamplingInterval(). If
+ * zero, the sampling interval will be equal to
+ * the profiler's sampling interval.
+ */
+ CpuProfilingOptions(CpuProfilingMode mode = kLeafNodeLineNumbers,
+ unsigned max_samples = kNoSampleLimit,
+ int sampling_interval_us = 0)
+ : mode_(mode),
+ max_samples_(max_samples),
+ sampling_interval_us_(sampling_interval_us) {}
+
+ CpuProfilingMode mode() const { return mode_; }
+ unsigned max_samples() const { return max_samples_; }
+ int sampling_interval_us() const { return sampling_interval_us_; }
+
+ private:
+ CpuProfilingMode mode_;
+ unsigned max_samples_;
+ int sampling_interval_us_;
+};
+
/**
* Interface for controlling CPU profiling. Instance of the
* profiler can be created using v8::CpuProfiler::New method.
@@ -308,7 +356,8 @@ class V8_EXPORT CpuProfiler {
* initialized. The profiler object must be disposed after use by calling
* |Dispose| method.
*/
- static CpuProfiler* New(Isolate* isolate);
+ static CpuProfiler* New(Isolate* isolate,
+ CpuProfilingNamingMode = kDebugNaming);
/**
* Synchronously collect current stack sample in all profilers attached to
@@ -339,18 +388,26 @@ class V8_EXPORT CpuProfiler {
void SetUsePreciseSampling(bool);
/**
- * Starts collecting CPU profile. Title may be an empty string. It
- * is allowed to have several profiles being collected at
- * once. Attempts to start collecting several profiles with the same
- * title are silently ignored. While collecting a profile, functions
- * from all security contexts are included in it. The token-based
- * filtering is only performed when querying for a profile.
+ * Starts collecting a CPU profile. Title may be an empty string. Several
+ * profiles may be collected at once. Attempts to start collecting several
+ * profiles with the same title are silently ignored.
+ */
+ void StartProfiling(Local<String> title, CpuProfilingOptions options);
+
+ /**
+ * Starts profiling with the same semantics as above, except with expanded
+ * parameters.
*
* |record_samples| parameter controls whether individual samples should
* be recorded in addition to the aggregated tree.
+ *
+ * |max_samples| controls the maximum number of samples that should be
+ * recorded by the profiler. Samples obtained after this limit will be
+ * discarded.
*/
- void StartProfiling(Local<String> title, CpuProfilingMode mode,
- bool record_samples = false);
+ void StartProfiling(
+ Local<String> title, CpuProfilingMode mode, bool record_samples = false,
+ unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
/**
* The same as StartProfiling above, but the CpuProfilingMode defaults to
* kLeafNodeLineNumbers mode, which was the previous default behavior of the
@@ -391,7 +448,6 @@ class V8_EXPORT CpuProfiler {
CpuProfiler& operator=(const CpuProfiler&);
};
-
/**
* HeapSnapshotEdge represents a directed connection between heap
* graph nodes: from retainers to retained nodes.
@@ -742,7 +798,6 @@ class V8_EXPORT EmbedderGraph {
*/
virtual const char* NamePrefix() { return nullptr; }
- private:
Node(const Node&) = delete;
Node& operator=(const Node&) = delete;
};
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 2496260707..29d813e427 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -195,14 +195,6 @@ class PersistentValueMapBase {
}
/**
- * Call V8::RegisterExternallyReferencedObject with the map value for given
- * key.
- */
- V8_DEPRECATED(
- "Used TracedGlobal and EmbedderHeapTracer::RegisterEmbedderReference",
- inline void RegisterExternallyReferencedObject(K& key));
-
- /**
* Return value for key and remove it from the map.
*/
Global<V> Remove(const K& key) {
@@ -353,16 +345,6 @@ class PersistentValueMapBase {
};
template <typename K, typename V, typename Traits>
-inline void
-PersistentValueMapBase<K, V, Traits>::RegisterExternallyReferencedObject(
- K& key) {
- assert(Contains(key));
- V8::RegisterExternallyReferencedObject(
- reinterpret_cast<internal::Address*>(FromVal(Traits::Get(&impl_, key))),
- reinterpret_cast<internal::Isolate*>(GetIsolate()));
-}
-
-template <typename K, typename V, typename Traits>
class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
public:
explicit PersistentValueMap(Isolate* isolate)
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index dfcd5b467d..483bdd166f 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 7
-#define V8_MINOR_VERSION 5
-#define V8_BUILD_NUMBER 288
-#define V8_PATCH_LEVEL 22
+#define V8_MINOR_VERSION 6
+#define V8_BUILD_NUMBER 303
+#define V8_PATCH_LEVEL 28
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 5c99f67258..c54b088404 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -122,7 +122,6 @@ class ExternalString;
class Isolate;
class LocalEmbedderHeapTracer;
class MicrotaskQueue;
-class NeverReadOnlySpaceObject;
struct ScriptStreamingData;
template<typename T> class CustomArguments;
class PropertyCallbackArguments;
@@ -545,38 +544,6 @@ template <class T> class PersistentBase {
*/
V8_INLINE void AnnotateStrongRetainer(const char* label);
- /**
- * Allows the embedder to tell the v8 garbage collector that a certain object
- * is alive. Only allowed when the embedder is asked to trace its heap by
- * EmbedderHeapTracer.
- */
- V8_DEPRECATED(
- "Used TracedGlobal and EmbedderHeapTracer::RegisterEmbedderReference",
- V8_INLINE void RegisterExternalReference(Isolate* isolate) const);
-
- /**
- * Marks the reference to this object independent. Garbage collector is free
- * to ignore any object groups containing this object. Weak callback for an
- * independent handle should not assume that it will be preceded by a global
- * GC prologue callback or followed by a global GC epilogue callback.
- */
- V8_DEPRECATED(
- "Weak objects are always considered independent. "
- "Use TracedGlobal when trying to use EmbedderHeapTracer. "
- "Use a strong handle when trying to keep an object alive.",
- V8_INLINE void MarkIndependent());
-
- /**
- * Marks the reference to this object as active. The scavenge garbage
- * collection should not reclaim the objects marked as active, even if the
- * object held by the handle is otherwise unreachable.
- *
- * This bit is cleared after the each garbage collection pass.
- */
- V8_DEPRECATED("Use TracedGlobal.", V8_INLINE void MarkActive());
-
- V8_DEPRECATED("See MarkIndependent.", V8_INLINE bool IsIndependent() const);
-
/** Returns true if the handle's reference is weak. */
V8_INLINE bool IsWeak() const;
@@ -1701,8 +1668,7 @@ class V8_EXPORT ScriptCompiler {
Local<String> arguments[], size_t context_extension_count,
Local<Object> context_extensions[],
CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason,
- Local<ScriptOrModule>* script_or_module_out = nullptr);
+ NoCacheReason no_cache_reason = kNoCacheNoReason);
/**
* Creates and returns code cache for the specified unbound_script.
@@ -1934,6 +1900,11 @@ class V8_EXPORT StackFrame {
* Returns whether or not the associated functions is defined in wasm.
*/
bool IsWasm() const;
+
+ /**
+ * Returns whether or not the associated function is defined by the user.
+ */
+ bool IsUserJavaScript() const;
};
@@ -1952,10 +1923,11 @@ enum StateTag {
// A RegisterState represents the current state of registers used
// by the sampling profiler API.
struct RegisterState {
- RegisterState() : pc(nullptr), sp(nullptr), fp(nullptr) {}
+ RegisterState() : pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {}
void* pc; // Instruction pointer.
void* sp; // Stack pointer.
void* fp; // Frame pointer.
+ void* lr; // Link register (or nullptr on platforms without a link register).
};
// The output structure filled up by GetStackSample API function.
@@ -2121,10 +2093,10 @@ class V8_EXPORT ValueSerializer {
void WriteDouble(double value);
void WriteRawBytes(const void* source, size_t length);
- private:
ValueSerializer(const ValueSerializer&) = delete;
void operator=(const ValueSerializer&) = delete;
+ private:
struct PrivateData;
PrivateData* private_;
};
@@ -2223,10 +2195,10 @@ class V8_EXPORT ValueDeserializer {
V8_WARN_UNUSED_RESULT bool ReadDouble(double* value);
V8_WARN_UNUSED_RESULT bool ReadRawBytes(size_t length, const void** data);
- private:
ValueDeserializer(const ValueDeserializer&) = delete;
void operator=(const ValueDeserializer&) = delete;
+ private:
struct PrivateData;
PrivateData* private_;
};
@@ -2521,9 +2493,6 @@ class V8_EXPORT Value : public Data {
V8_WARN_UNUSED_RESULT MaybeLocal<BigInt> ToBigInt(
Local<Context> context) const;
- V8_DEPRECATED("ToBoolean can never throw. Use Local version.",
- V8_WARN_UNUSED_RESULT MaybeLocal<Boolean> ToBoolean(
- Local<Context> context) const);
V8_WARN_UNUSED_RESULT MaybeLocal<Number> ToNumber(
Local<Context> context) const;
V8_WARN_UNUSED_RESULT MaybeLocal<String> ToString(
@@ -2539,16 +2508,6 @@ class V8_EXPORT Value : public Data {
V8_WARN_UNUSED_RESULT MaybeLocal<Int32> ToInt32(Local<Context> context) const;
Local<Boolean> ToBoolean(Isolate* isolate) const;
- V8_DEPRECATED("Use maybe version",
- Local<Number> ToNumber(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<String> ToString(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<Object> ToObject(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<Integer> ToInteger(Isolate* isolate) const);
- V8_DEPRECATED("Use maybe version",
- Local<Int32> ToInt32(Isolate* isolate) const);
/**
* Attempts to convert a string to an array index.
@@ -2559,9 +2518,6 @@ class V8_EXPORT Value : public Data {
bool BooleanValue(Isolate* isolate) const;
- V8_DEPRECATED("BooleanValue can never throw. Use Isolate version.",
- V8_WARN_UNUSED_RESULT Maybe<bool> BooleanValue(
- Local<Context> context) const);
V8_WARN_UNUSED_RESULT Maybe<double> NumberValue(Local<Context> context) const;
V8_WARN_UNUSED_RESULT Maybe<int64_t> IntegerValue(
Local<Context> context) const;
@@ -2765,6 +2721,10 @@ class V8_EXPORT String : public Name {
*/
virtual bool IsCacheable() const { return true; }
+ // Disallow copying and assigning.
+ ExternalStringResourceBase(const ExternalStringResourceBase&) = delete;
+ void operator=(const ExternalStringResourceBase&) = delete;
+
protected:
ExternalStringResourceBase() = default;
@@ -2794,10 +2754,6 @@ class V8_EXPORT String : public Name {
*/
virtual void Unlock() const {}
- // Disallow copying and assigning.
- ExternalStringResourceBase(const ExternalStringResourceBase&) = delete;
- void operator=(const ExternalStringResourceBase&) = delete;
-
private:
friend class internal::ExternalString;
friend class v8::String;
@@ -2881,43 +2837,23 @@ class V8_EXPORT String : public Name {
V8_INLINE static String* Cast(v8::Value* obj);
- // TODO(dcarney): remove with deprecation of New functions.
- enum NewStringType {
- kNormalString = static_cast<int>(v8::NewStringType::kNormal),
- kInternalizedString = static_cast<int>(v8::NewStringType::kInternalized)
- };
-
- /** Allocates a new string from UTF-8 data.*/
- static V8_DEPRECATED(
- "Use maybe version",
- Local<String> NewFromUtf8(Isolate* isolate, const char* data,
- NewStringType type = kNormalString,
- int length = -1));
-
/** Allocates a new string from UTF-8 data. Only returns an empty value when
* length > kMaxLength. **/
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromUtf8(
- Isolate* isolate, const char* data, v8::NewStringType type,
- int length = -1);
+ Isolate* isolate, const char* data,
+ NewStringType type = NewStringType::kNormal, int length = -1);
/** Allocates a new string from Latin-1 data. Only returns an empty value
* when length > kMaxLength. **/
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromOneByte(
- Isolate* isolate, const uint8_t* data, v8::NewStringType type,
- int length = -1);
-
- /** Allocates a new string from UTF-16 data.*/
- static V8_DEPRECATED(
- "Use maybe version",
- Local<String> NewFromTwoByte(Isolate* isolate, const uint16_t* data,
- NewStringType type = kNormalString,
- int length = -1));
+ Isolate* isolate, const uint8_t* data,
+ NewStringType type = NewStringType::kNormal, int length = -1);
/** Allocates a new string from UTF-16 data. Only returns an empty value when
* length > kMaxLength. **/
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromTwoByte(
- Isolate* isolate, const uint16_t* data, v8::NewStringType type,
- int length = -1);
+ Isolate* isolate, const uint16_t* data,
+ NewStringType type = NewStringType::kNormal, int length = -1);
/**
* Creates a new string by concatenating the left and the right strings
@@ -2956,10 +2892,6 @@ class V8_EXPORT String : public Name {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static V8_DEPRECATED(
- "Use maybe version",
- Local<String> NewExternal(Isolate* isolate,
- ExternalOneByteStringResource* resource));
static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalOneByte(
Isolate* isolate, ExternalOneByteStringResource* resource);
@@ -3356,8 +3288,8 @@ enum class IntegrityLevel { kFrozen, kSealed };
*/
class V8_EXPORT Object : public Value {
public:
- V8_DEPRECATE_SOON("Use maybe version",
- bool Set(Local<Value> key, Local<Value> value));
+ V8_DEPRECATED("Use maybe version",
+ bool Set(Local<Value> key, Local<Value> value));
/**
* Set only return Just(true) or Empty(), so if it should never fail, use
* result.Check().
@@ -3365,8 +3297,8 @@ class V8_EXPORT Object : public Value {
V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context,
Local<Value> key, Local<Value> value);
- V8_DEPRECATE_SOON("Use maybe version",
- bool Set(uint32_t index, Local<Value> value));
+ V8_DEPRECATED("Use maybe version",
+ bool Set(uint32_t index, Local<Value> value));
V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context, uint32_t index,
Local<Value> value);
@@ -3410,11 +3342,11 @@ class V8_EXPORT Object : public Value {
V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
- V8_DEPRECATE_SOON("Use maybe version", Local<Value> Get(Local<Value> key));
+ V8_DEPRECATED("Use maybe version", Local<Value> Get(Local<Value> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
Local<Value> key);
- V8_DEPRECATE_SOON("Use maybe version", Local<Value> Get(uint32_t index));
+ V8_DEPRECATED("Use maybe version", Local<Value> Get(uint32_t index));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
uint32_t index);
@@ -3898,9 +3830,6 @@ class ReturnValue {
}
// Local setters
template <typename S>
- V8_INLINE V8_DEPRECATED("Use Global<> instead",
- void Set(const Persistent<S>& handle));
- template <typename S>
V8_INLINE void Set(const Global<S>& handle);
template <typename S>
V8_INLINE void Set(const TracedGlobal<S>& handle);
@@ -5288,38 +5217,6 @@ class V8_EXPORT Date : public Object {
V8_INLINE static Date* Cast(Value* obj);
- /**
- * Time zone redetection indicator for
- * DateTimeConfigurationChangeNotification.
- *
- * kSkip indicates V8 that the notification should not trigger redetecting
- * host time zone. kRedetect indicates V8 that host time zone should be
- * redetected, and used to set the default time zone.
- *
- * The host time zone detection may require file system access or similar
- * operations unlikely to be available inside a sandbox. If v8 is run inside a
- * sandbox, the host time zone has to be detected outside the sandbox before
- * calling DateTimeConfigurationChangeNotification function.
- */
- enum class TimeZoneDetection { kSkip, kRedetect };
-
- /**
- * Notification that the embedder has changed the time zone,
- * daylight savings time, or other date / time configuration
- * parameters. V8 keeps a cache of various values used for
- * date / time computation. This notification will reset
- * those cached values for the current context so that date /
- * time configuration changes would be reflected in the Date
- * object.
- *
- * This API should not be called more than needed as it will
- * negatively impact the performance of date operations.
- */
- V8_DEPRECATED("Use Isolate::DateTimeConfigurationChangeNotification",
- static void DateTimeConfigurationChangeNotification(
- Isolate* isolate, TimeZoneDetection time_zone_detection =
- TimeZoneDetection::kSkip));
-
private:
static void CheckCast(Value* obj);
};
@@ -6006,21 +5903,6 @@ class V8_EXPORT FunctionTemplate : public Template {
void SetAcceptAnyReceiver(bool value);
/**
- * Determines whether the __proto__ accessor ignores instances of
- * the function template. If instances of the function template are
- * ignored, __proto__ skips all instances and instead returns the
- * next object in the prototype chain.
- *
- * Call with a value of true to make the __proto__ accessor ignore
- * instances of the function template. Call with a value of false
- * to make the __proto__ accessor not ignore instances of the
- * function template. By default, instances of a function template
- * are not ignored.
- */
- V8_DEPRECATED("This feature is incompatible with ES6+.",
- void SetHiddenPrototype(bool value));
-
- /**
* Sets the ReadOnly flag in the attributes of the 'prototype' property
* of functions created from this FunctionTemplate to true.
*/
@@ -6734,7 +6616,8 @@ class PromiseRejectMessage {
typedef void (*PromiseRejectCallback)(PromiseRejectMessage message);
// --- Microtasks Callbacks ---
-typedef void (*MicrotasksCompletedCallback)(Isolate*);
+V8_DEPRECATE_SOON("Use *WithData version.",
+ typedef void (*MicrotasksCompletedCallback)(Isolate*));
typedef void (*MicrotasksCompletedCallbackWithData)(Isolate*, void*);
typedef void (*MicrotaskCallback)(void* data);
@@ -6823,11 +6706,12 @@ class V8_EXPORT MicrotaskQueue {
*/
virtual int GetMicrotasksScopeDepth() const = 0;
+ MicrotaskQueue(const MicrotaskQueue&) = delete;
+ MicrotaskQueue& operator=(const MicrotaskQueue&) = delete;
+
private:
friend class internal::MicrotaskQueue;
MicrotaskQueue() = default;
- MicrotaskQueue(const MicrotaskQueue&) = delete;
- MicrotaskQueue& operator=(const MicrotaskQueue&) = delete;
};
/**
@@ -7218,6 +7102,11 @@ enum class MemoryPressureLevel { kNone, kModerate, kCritical };
*/
class V8_EXPORT EmbedderHeapTracer {
public:
+ enum TraceFlags : uint64_t {
+ kNoFlags = 0,
+ kReduceMemory = 1 << 0,
+ };
+
// Indicator for the stack state of the embedder.
enum EmbedderStackState {
kUnknown,
@@ -7234,6 +7123,24 @@ class V8_EXPORT EmbedderHeapTracer {
virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& value) = 0;
};
+ /**
+ * Summary of a garbage collection cycle. See |TraceEpilogue| on how the
+ * summary is reported.
+ */
+ struct TraceSummary {
+ /**
+ * Time spent managing the retained memory in milliseconds. This can e.g.
+ * include the time tracing through objects in the embedder.
+ */
+ double time = 0.0;
+
+ /**
+ * Memory retained by the embedder through the |EmbedderHeapTracer|
+ * mechanism in bytes.
+ */
+ size_t allocated_size = 0;
+ };
+
virtual ~EmbedderHeapTracer() = default;
/**
@@ -7256,7 +7163,8 @@ class V8_EXPORT EmbedderHeapTracer {
/**
* Called at the beginning of a GC cycle.
*/
- virtual void TracePrologue() = 0;
+ V8_DEPRECATE_SOON("Use version with flags.", virtual void TracePrologue()) {}
+ virtual void TracePrologue(TraceFlags flags);
/**
* Called to advance tracing in the embedder.
@@ -7279,9 +7187,12 @@ class V8_EXPORT EmbedderHeapTracer {
/**
* Called at the end of a GC cycle.
*
- * Note that allocation is *not* allowed within |TraceEpilogue|.
+ * Note that allocation is *not* allowed within |TraceEpilogue|. Can be
+ * overriden to fill a |TraceSummary| that is used by V8 to schedule future
+ * garbage collections.
*/
- virtual void TraceEpilogue() = 0;
+ virtual void TraceEpilogue() {}
+ virtual void TraceEpilogue(TraceSummary* trace_summary) { TraceEpilogue(); }
/**
* Called upon entering the final marking pause. No more incremental marking
@@ -7319,6 +7230,14 @@ class V8_EXPORT EmbedderHeapTracer {
void GarbageCollectionForTesting(EmbedderStackState stack_state);
/*
+ * Called by the embedder to signal newly allocated memory. Not bound to
+ * tracing phases. Embedders should trade off when increments are reported as
+ * V8 may consult global heuristics on whether to trigger garbage collection
+ * on this change.
+ */
+ void IncreaseAllocatedSize(size_t bytes);
+
+ /*
* Returns the v8::Isolate this tracer is attached too and |nullptr| if it
* is not attached to any v8::Isolate.
*/
@@ -8675,7 +8594,10 @@ class V8_EXPORT V8 {
/**
* Sets V8 flags from a string.
*/
- static void SetFlagsFromString(const char* str, int length);
+ static void SetFlagsFromString(const char* str);
+ static void SetFlagsFromString(const char* str, size_t length);
+ V8_DEPRECATED("use size_t version",
+ static void SetFlagsFromString(const char* str, int length));
/**
* Sets V8 flags from the command line.
@@ -8846,9 +8768,6 @@ class V8_EXPORT V8 {
const char* label);
static Value* Eternalize(Isolate* isolate, Value* handle);
- static void RegisterExternallyReferencedObject(internal::Address* location,
- internal::Isolate* isolate);
-
template <class K, class V, class T>
friend class PersistentValueMapBase;
@@ -9796,14 +9715,6 @@ void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
}
template <class T>
-bool PersistentBase<T>::IsIndependent() const {
- typedef internal::Internals I;
- if (this->IsEmpty()) return false;
- return I::GetNodeFlag(reinterpret_cast<internal::Address*>(this->val_),
- I::kNodeIsIndependentShift);
-}
-
-template <class T>
bool PersistentBase<T>::IsWeak() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
@@ -9870,31 +9781,6 @@ void PersistentBase<T>::AnnotateStrongRetainer(const char* label) {
}
template <class T>
-void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
- if (IsEmpty()) return;
- V8::RegisterExternallyReferencedObject(
- reinterpret_cast<internal::Address*>(this->val_),
- reinterpret_cast<internal::Isolate*>(isolate));
-}
-
-template <class T>
-void PersistentBase<T>::MarkIndependent() {
- typedef internal::Internals I;
- if (this->IsEmpty()) return;
- I::UpdateNodeFlag(reinterpret_cast<internal::Address*>(this->val_), true,
- I::kNodeIsIndependentShift);
-}
-
-template <class T>
-void PersistentBase<T>::MarkActive() {
- typedef internal::Internals I;
- if (this->IsEmpty()) return;
- I::UpdateNodeFlag(reinterpret_cast<internal::Address*>(this->val_), true,
- I::kNodeIsActiveShift);
-}
-
-
-template <class T>
void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
typedef internal::Internals I;
if (this->IsEmpty()) return;
@@ -10019,17 +9905,6 @@ void TracedGlobal<T>::SetFinalizationCallback(
template <typename T>
ReturnValue<T>::ReturnValue(internal::Address* slot) : value_(slot) {}
-template<typename T>
-template<typename S>
-void ReturnValue<T>::Set(const Persistent<S>& handle) {
- TYPE_CHECK(T, S);
- if (V8_UNLIKELY(handle.IsEmpty())) {
- *value_ = GetDefaultValue();
- } else {
- *value_ = *reinterpret_cast<internal::Address*>(*handle);
- }
-}
-
template <typename T>
template <typename S>
void ReturnValue<T>::Set(const Global<S>& handle) {
@@ -10949,7 +10824,8 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
*external_memory = amount;
int64_t allocation_diff_since_last_mc =
- *external_memory - *external_memory_at_last_mc;
+ static_cast<int64_t>(static_cast<uint64_t>(*external_memory) -
+ static_cast<uint64_t>(*external_memory_at_last_mc));
// Only check memory pressure and potentially trigger GC if the amount of
// external memory increased.
if (allocation_diff_since_last_mc > kMemoryReducerActivationLimit) {
diff --git a/deps/v8/infra/OWNERS b/deps/v8/infra/OWNERS
index c05d1d3921..a75a43666e 100644
--- a/deps/v8/infra/OWNERS
+++ b/deps/v8/infra/OWNERS
@@ -1,4 +1,3 @@
-machenbach@chromium.org
-sergiyb@chromium.org
+file://INFRA_OWNERS
+
tandrii@chromium.org
-tmrts@chromium.org \ No newline at end of file
diff --git a/deps/v8/infra/mb/gn_isolate_map.pyl b/deps/v8/infra/mb/gn_isolate_map.pyl
index 8f13079ea3..05b147d503 100644
--- a/deps/v8/infra/mb/gn_isolate_map.pyl
+++ b/deps/v8/infra/mb/gn_isolate_map.pyl
@@ -47,6 +47,10 @@
"label": "//test:v8_perf",
"type": "script",
},
+ "perf_integration": {
+ "label": "//test:v8_perf",
+ "type": "script",
+ },
"jsfunfuzz": {
"label": "//tools/jsfunfuzz:v8_jsfunfuzz",
"type": "script",
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 6d05b7f237..354415ef43 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -92,6 +92,8 @@
'V8 Linux gcc': 'release_x86_gcc',
'V8 Linux64 gcc - debug': 'debug_x64_gcc',
# FYI.
+ 'V8 iOS - sim': 'release_x64_ios_simulator',
+ 'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
'V8 Linux64 - pointer compression': 'release_x64_pointer_compression',
'V8 Linux64 - arm64 - sim - pointer compression - builder':
'release_simulate_arm64_pointer_compression',
@@ -151,6 +153,7 @@
# Arm64.
'V8 Android Arm64 - builder': 'release_android_arm64',
'V8 Android Arm64 - debug builder': 'debug_android_arm64',
+ 'V8 Arm64 - builder': 'release_arm64',
'V8 Linux - arm64 - sim': 'release_simulate_arm64',
'V8 Linux - arm64 - sim - debug': 'debug_simulate_arm64',
'V8 Linux - arm64 - sim - nosnap - debug':
@@ -191,6 +194,7 @@
'v8_android_arm64_compile_dbg': 'debug_android_arm64',
'v8_android_arm64_n5x_rel_ng': 'release_android_arm64',
'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot',
+ 'v8_ios_simulator': 'release_x64_ios_simulator',
'v8_linux_noembed_rel_ng': 'release_x86_noembed_trybot',
'v8_linux_rel_ng': 'release_x86_gcmole_trybot',
'v8_linux_optional_rel_ng': 'release_x86_trybot',
@@ -212,6 +216,8 @@
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot',
+ 'v8_linux64_nodcheck_rel_ng': 'release_x64',
+ 'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto',
'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression',
'v8_linux64_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
@@ -390,6 +396,8 @@
# Release configs for arm.
'release_arm': [
'release_bot', 'arm', 'hard_float'],
+ 'release_arm64': [
+ 'release_bot', 'arm64', 'hard_float'],
'release_android_arm': [
'release_bot', 'arm', 'android', 'minimal_symbols',
'android_strip_outputs'],
@@ -433,6 +441,8 @@
'release_x64_gcc_coverage': [
'release_bot', 'x64', 'coverage', 'gcc', 'no_custom_libcxx',
'no_sysroot'],
+ 'release_x64_ios_simulator': [
+ 'release_bot', 'x64', 'ios_simulator'],
'release_x64_internal': [
'release_bot', 'x64', 'v8_snapshot_internal'],
'release_x64_jumbo': [
@@ -490,6 +500,8 @@
'debug_bot', 'x64', 'jumbo_limited'],
'debug_x64_minimal_symbols': [
'debug_bot', 'x64', 'minimal_symbols'],
+ 'debug_x64_perfetto': [
+ 'debug_bot', 'x64', 'perfetto'],
'debug_x64_trybot': [
'debug_trybot', 'x64'],
'debug_x64_trybot_custom': [
@@ -646,6 +658,10 @@
'gn_args': 'arm_float_abi="hard"',
},
+ 'ios_simulator': {
+ 'gn_args': 'target_cpu="x64" target_os="ios"',
+ },
+
'jumbo': {
'gn_args': 'use_jumbo_build=true',
},
@@ -699,6 +715,10 @@
'gn_args': 'use_sysroot=false',
},
+ 'perfetto': {
+ 'gn_args': 'v8_use_perfetto=true',
+ },
+
'release': {
'gn_args': 'is_debug=false',
},
diff --git a/deps/v8/infra/testing/OWNERS b/deps/v8/infra/testing/OWNERS
index c8693c972c..50b5741785 100644
--- a/deps/v8/infra/testing/OWNERS
+++ b/deps/v8/infra/testing/OWNERS
@@ -1,5 +1,3 @@
set noparent
-machenbach@chromium.org
-sergiyb@chromium.org
-tmrts@chromium.org \ No newline at end of file
+file://INFRA_OWNERS
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index 00e385711a..0d39ea31f7 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -51,6 +51,7 @@
'v8_linux_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -65,6 +66,9 @@
],
},
'v8_linux_gc_stress_dbg': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mjsunit', 'variant': 'slow_path', 'test_args': ['--gc-stress'], 'shards': 2},
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 5},
@@ -81,6 +85,7 @@
'v8_linux_nodcheck_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -94,11 +99,17 @@
],
},
'v8_linux_noembed_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 2},
],
},
'v8_linux_noi18n_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla', 'variant': 'default'},
{'name': 'test262', 'variant': 'default'},
@@ -106,6 +117,9 @@
],
},
'v8_linux_nosnap_rel': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 6},
],
@@ -121,6 +135,7 @@
'v8_linux_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -140,6 +155,7 @@
'v8_linux_optional_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
# Code serializer.
@@ -193,6 +209,9 @@
],
},
'v8_linux_verify_csa_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 2},
],
@@ -200,6 +219,9 @@
##############################################################################
# Linux32 with arm simulators
'v8_linux_arm_dbg': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla'},
@@ -210,11 +232,17 @@
],
},
'v8_linux_arm_lite_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
],
},
'v8_linux_arm_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mjsunit_sp_frame_access', 'shards': 2},
{'name': 'mozilla', 'shards': 2},
@@ -227,6 +255,9 @@
##############################################################################
# Linux64
'v8_linux64_asan_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'test262_variants', 'shards': 7},
{'name': 'v8testing', 'shards': 3},
@@ -235,6 +266,9 @@
],
},
'v8_linux64_cfi_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'benchmarks'},
{'name': 'mozilla'},
@@ -246,6 +280,7 @@
'v8_linux64_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -261,6 +296,9 @@
],
},
'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{
'name': 'mjsunit',
@@ -270,6 +308,9 @@
],
},
'v8_linux64_fyi_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
# Stress sampling.
{'name': 'mjsunit', 'variant': 'stress_sampling'},
@@ -280,12 +321,43 @@
],
},
'v8_linux64_msan_rel': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'test262', 'shards': 2},
{'name': 'v8testing', 'shards': 5},
],
},
+ 'v8_linux64_nodcheck_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
+ },
+ 'tests': [
+ {'name': 'benchmarks'},
+ {'name': 'benchmarks', 'variant': 'extra'},
+ {'name': 'mozilla'},
+ {'name': 'mozilla', 'variant': 'extra'},
+ {'name': 'perf_integration'},
+ {'name': 'test262_variants', 'shards': 2},
+ {'name': 'test262_variants', 'variant': 'extra', 'shards': 2},
+ {'name': 'v8testing', 'shards': 2},
+ {'name': 'v8testing', 'variant': 'extra'},
+ ],
+ },
+ 'v8_linux64_perfetto_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'v8_linux64_pointer_compression_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 3},
],
@@ -293,6 +365,7 @@
'v8_linux64_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
# TODO(machenbach): Add benchmarks.
@@ -313,6 +386,7 @@
'v8_linux64_rel_xg': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
{'name': 'v8initializers'},
@@ -320,11 +394,17 @@
],
},
'v8_linux64_sanitizer_coverage_rel': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 3},
],
},
'v8_linux64_tsan_rel': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'benchmarks'},
{'name': 'mozilla'},
@@ -335,16 +415,25 @@
],
},
'v8_linux64_tsan_isolates_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7},
],
},
'v8_linux64_ubsan_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 2},
],
},
'v8_linux64_verify_csa_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 2},
],
@@ -352,6 +441,9 @@
##############################################################################
# Linux64 with arm64 simulators
'v8_linux_arm64_dbg': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla', 'shards': 2},
@@ -362,11 +454,17 @@
],
},
'v8_linux_arm64_gc_stress_dbg': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 10},
],
},
'v8_linux_arm64_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mjsunit_sp_frame_access', 'shards': 2},
{'name': 'mozilla', 'shards': 2},
@@ -377,6 +475,9 @@
],
},
'v8_linux64_arm64_pointer_compression_rel_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -530,6 +631,9 @@
##############################################################################
# Main.
'V8 Fuzzer': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -542,6 +646,7 @@
'V8 Linux': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -582,6 +687,9 @@
],
},
'V8 Linux - arm64 - sim - MSAN': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'test262', 'shards': 3},
{'name': 'v8testing', 'shards': 4},
@@ -590,6 +698,7 @@
'V8 Linux - debug': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -649,26 +758,38 @@
],
},
'V8 Linux - noembed': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing'},
],
},
'V8 Linux - noembed - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 3},
],
},
'V8 Linux - full debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
'priority': 35,
},
'tests': [
- {'name': 'v8testing', 'variant': 'default', 'shards': 3},
+ {'name': 'v8testing', 'variant': 'default', 'shards': 4},
],
},
'V8 Linux - gc stress': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{
'name': 'd8testing',
@@ -684,6 +805,9 @@
],
},
'V8 Linux - noi18n - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla', 'variant': 'default'},
{'name': 'test262', 'variant': 'default'},
@@ -691,6 +815,9 @@
],
},
'V8 Linux - nosnap': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -703,6 +830,9 @@
],
},
'V8 Linux - nosnap - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -713,6 +843,9 @@
],
},
'V8 Linux - predictable': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'benchmarks'},
{'name': 'd8testing'},
@@ -720,6 +853,9 @@
],
},
'V8 Linux - shared': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -727,6 +863,9 @@
],
},
'V8 Linux - verify csa': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing'},
],
@@ -742,6 +881,7 @@
'V8 Linux64': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -750,6 +890,7 @@
{'name': 'mozilla'},
{'name': 'mozilla', 'variant': 'extra'},
{'name': 'optimize_for_size'},
+ {'name': 'perf_integration'},
{'name': 'test262_variants', 'shards': 2},
{'name': 'test262_variants', 'variant': 'extra'},
{'name': 'v8initializers'},
@@ -775,6 +916,9 @@
],
},
'V8 Linux64 - cfi': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'benchmarks'},
{'name': 'mozilla'},
@@ -784,6 +928,9 @@
],
},
'V8 Linux64 - custom snapshot - debug': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mjsunit', 'test_args': ['--no-harness']},
],
@@ -791,6 +938,7 @@
'V8 Linux64 - debug': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-14.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -825,6 +973,9 @@
],
},
'V8 Linux64 - debug - fyi': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
# Infra staging.
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
@@ -833,7 +984,23 @@
{'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1},
],
},
+ 'V8 Linux64 - debug - perfetto': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 3600,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 2},
+ ],
+ },
'V8 Linux64 - fyi': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
# Infra staging.
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 1},
@@ -843,21 +1010,33 @@
],
},
'V8 Linux64 - gcov coverage': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing'},
],
},
'V8 Linux64 - internal snapshot': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing'},
],
},
'V8 Linux64 - pointer compression': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 2},
],
},
'V8 Linux64 - shared': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -865,11 +1044,17 @@
],
},
'V8 Linux64 - verify csa': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing'},
],
},
'V8 Linux64 ASAN': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'test262_variants', 'shards': 5},
{'name': 'v8testing', 'shards': 2},
@@ -878,6 +1063,9 @@
],
},
'V8 Linux64 GC Stress - custom snapshot': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{
'name': 'mjsunit',
@@ -887,6 +1075,9 @@
],
},
'V8 Linux64 TSAN': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'benchmarks'},
{'name': 'mozilla'},
@@ -897,6 +1088,9 @@
],
},
'V8 Linux64 TSAN - concurrent marking': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -924,11 +1118,17 @@
],
},
'V8 Linux64 TSAN - isolates': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7},
],
},
'V8 Linux64 UBSan': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262', 'shards': 2},
@@ -1068,6 +1268,7 @@
'swarming_dimensions': {
'cores': '2',
'cpu': 'armv7l',
+ 'os': 'Ubuntu-14.04',
},
'swarming_task_attrs': {
'expiration': 21600,
@@ -1114,6 +1315,7 @@
'swarming_dimensions': {
'cores': '2',
'cpu': 'armv7l',
+ 'os': 'Ubuntu-14.04',
},
'swarming_task_attrs': {
'expiration': 21600,
@@ -1163,6 +1365,7 @@
'swarming_dimensions': {
'cores': '2',
'cpu': 'armv7l',
+ 'os': 'Ubuntu-14.04',
},
'swarming_task_attrs': {
'expiration': 21600,
@@ -1190,6 +1393,9 @@
],
},
'V8 Linux - arm - sim': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla'},
@@ -1226,6 +1432,9 @@
],
},
'V8 Linux - arm - sim - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla'},
@@ -1273,16 +1482,25 @@
],
},
'V8 Linux - arm - sim - lite': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 2},
],
},
'V8 Linux - arm - sim - lite - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
],
},
'V8 Linux - arm64 - sim': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla'},
@@ -1293,6 +1511,9 @@
],
},
'V8 Linux - arm64 - sim - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
# TODO(machenbach): Remove longer timeout when this builder scales better.
'swarming_task_attrs': {
'hard_timeout': 3600,
@@ -1307,6 +1528,9 @@
],
},
'V8 Linux - arm64 - sim - gc stress': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 7200,
@@ -1321,6 +1545,9 @@
],
},
'V8 Linux - mips64el - sim': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -1332,6 +1559,9 @@
],
},
'V8 Linux - mipsel - sim': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -1343,6 +1573,9 @@
],
},
'V8 Linux - ppc64 - sim': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -1353,6 +1586,9 @@
],
},
'V8 Linux - s390x - sim': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -1363,6 +1599,9 @@
],
},
'V8 Linux64 - arm64 - sim - pointer compression': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 14400,
'hard_timeout': 3600,
@@ -1388,6 +1627,9 @@
##############################################################################
# Clusterfuzz.
'V8 NumFuzz': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 13800,
'hard_timeout': 4200,
@@ -1402,6 +1644,9 @@
],
},
'V8 NumFuzz - TSAN': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 13800,
'hard_timeout': 4200,
@@ -1447,6 +1692,9 @@
],
},
'V8 NumFuzz - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'swarming_task_attrs': {
'expiration': 13800,
'hard_timeout': 4200,
@@ -1501,6 +1749,9 @@
##############################################################################
# Branches.
'V8 Linux - beta branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1508,6 +1759,9 @@
],
},
'V8 Linux - beta branch - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1515,6 +1769,9 @@
],
},
'V8 Linux - stable branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1522,6 +1779,9 @@
],
},
'V8 Linux - stable branch - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1529,6 +1789,9 @@
],
},
'V8 Linux64 - beta branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1536,6 +1799,9 @@
],
},
'V8 Linux64 - beta branch - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1543,6 +1809,9 @@
],
},
'V8 Linux64 - stable branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1550,6 +1819,9 @@
],
},
'V8 Linux64 - stable branch - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1557,6 +1829,9 @@
],
},
'V8 arm - sim - beta branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1564,6 +1839,9 @@
],
},
'V8 arm - sim - beta branch - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1571,6 +1849,9 @@
],
},
'V8 arm - sim - stable branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1578,6 +1859,9 @@
],
},
'V8 arm - sim - stable branch - debug': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'mozilla'},
{'name': 'test262'},
@@ -1585,41 +1869,65 @@
],
},
'V8 mips64el - sim - beta branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'unittests'},
],
},
'V8 mips64el - sim - stable branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'unittests'},
],
},
'V8 mipsel - sim - beta branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 4},
],
},
'V8 mipsel - sim - stable branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'v8testing', 'shards': 4},
],
},
'V8 ppc64 - sim - beta branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'unittests'},
],
},
'V8 ppc64 - sim - stable branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'unittests'},
],
},
'V8 s390x - sim - beta branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'unittests'},
],
},
'V8 s390x - sim - stable branch': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-14.04',
+ },
'tests': [
{'name': 'unittests'},
],
diff --git a/deps/v8/samples/OWNERS b/deps/v8/samples/OWNERS
new file mode 100644
index 0000000000..9c4f2439aa
--- /dev/null
+++ b/deps/v8/samples/OWNERS
@@ -0,0 +1,2 @@
+mathias@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 74c48a6ddd..d24e647b24 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -8,6 +8,7 @@ include_rules = [
"+src/compiler/code-assembler.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
+ "+src/heap/combined-heap.h",
"+src/heap/embedder-tracing.h",
"+src/heap/factory.h",
"+src/heap/factory-inl.h",
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api/api-arguments-inl.h
index 7f83708b96..05bb35786a 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api/api-arguments-inl.h
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_API_ARGUMENTS_INL_H_
-#define V8_API_ARGUMENTS_INL_H_
+#ifndef V8_API_API_ARGUMENTS_INL_H_
+#define V8_API_API_ARGUMENTS_INL_H_
-#include "src/api-arguments.h"
+#include "src/api/api-arguments.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/debug/debug.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/slots-inl.h"
#include "src/tracing/trace-event.h"
-#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -42,7 +43,7 @@ Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) {
// Check the ReturnValue.
FullObjectSlot slot = slot_at(kReturnValueOffset);
// Nothing was set, return empty handle as per previous behaviour.
- if ((*slot)->IsTheHole(isolate)) return Handle<V>();
+ if ((*slot).IsTheHole(isolate)) return Handle<V>();
Handle<V> result = Handle<V>::cast(Handle<Object>(slot.location()));
result->VerifyApiCallResultType();
return result;
@@ -143,7 +144,7 @@ Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo handler) {
LOG(isolate, ApiObjectAccess("call", holder()));
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
v8::FunctionCallback f =
- v8::ToCData<v8::FunctionCallback>(handler->callback());
+ v8::ToCData<v8::FunctionCallback>(handler.callback());
Handle<Object> receiver_check_unsupported;
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects &&
!isolate->debug()->PerformSideEffectCheckForCallback(
@@ -367,4 +368,4 @@ Handle<Object> PropertyCallbackArguments::CallAccessorSetter(
} // namespace internal
} // namespace v8
-#endif // V8_API_ARGUMENTS_INL_H_
+#endif // V8_API_API_ARGUMENTS_INL_H_
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api/api-arguments.cc
index 76e821cad7..51317fd0bf 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api/api-arguments.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-arguments.h"
+#include "src/api/api-arguments.h"
-#include "src/api-arguments-inl.h"
+#include "src/api/api-arguments-inl.h"
namespace v8 {
namespace internal {
@@ -28,8 +28,8 @@ PropertyCallbackArguments::PropertyCallbackArguments(
HeapObject the_hole = ReadOnlyRoots(isolate).the_hole_value();
slot_at(T::kReturnValueDefaultValueIndex).store(the_hole);
slot_at(T::kReturnValueIndex).store(the_hole);
- DCHECK((*slot_at(T::kHolderIndex))->IsHeapObject());
- DCHECK((*slot_at(T::kIsolateIndex))->IsSmi());
+ DCHECK((*slot_at(T::kHolderIndex)).IsHeapObject());
+ DCHECK((*slot_at(T::kIsolateIndex)).IsSmi());
}
FunctionCallbackArguments::FunctionCallbackArguments(
@@ -46,8 +46,8 @@ FunctionCallbackArguments::FunctionCallbackArguments(
HeapObject the_hole = ReadOnlyRoots(isolate).the_hole_value();
slot_at(T::kReturnValueDefaultValueIndex).store(the_hole);
slot_at(T::kReturnValueIndex).store(the_hole);
- DCHECK((*slot_at(T::kHolderIndex))->IsHeapObject());
- DCHECK((*slot_at(T::kIsolateIndex))->IsSmi());
+ DCHECK((*slot_at(T::kHolderIndex)).IsHeapObject());
+ DCHECK((*slot_at(T::kIsolateIndex)).IsSmi());
}
} // namespace internal
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api/api-arguments.h
index 4f1ea8c85a..794681b71d 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api/api-arguments.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_API_ARGUMENTS_H_
-#define V8_API_ARGUMENTS_H_
+#ifndef V8_API_API_ARGUMENTS_H_
+#define V8_API_API_ARGUMENTS_H_
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/debug/debug.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "src/objects/slots.h"
-#include "src/visitors.h"
+#include "src/objects/visitors.h"
namespace v8 {
namespace internal {
@@ -60,8 +60,8 @@ class CustomArguments : public CustomArgumentsBase {
class PropertyCallbackArguments
: public CustomArguments<PropertyCallbackInfo<Value> > {
public:
- typedef PropertyCallbackInfo<Value> T;
- typedef CustomArguments<T> Super;
+ using T = PropertyCallbackInfo<Value>;
+ using Super = CustomArguments<T>;
static const int kArgsLength = T::kArgsLength;
static const int kThisIndex = T::kThisIndex;
static const int kHolderIndex = T::kHolderIndex;
@@ -150,8 +150,8 @@ class PropertyCallbackArguments
class FunctionCallbackArguments
: public CustomArguments<FunctionCallbackInfo<Value> > {
public:
- typedef FunctionCallbackInfo<Value> T;
- typedef CustomArguments<T> Super;
+ using T = FunctionCallbackInfo<Value>;
+ using Super = CustomArguments<T>;
static const int kArgsLength = T::kArgsLength;
static const int kHolderIndex = T::kHolderIndex;
static const int kDataIndex = T::kDataIndex;
@@ -186,4 +186,4 @@ class FunctionCallbackArguments
} // namespace internal
} // namespace v8
-#endif // V8_API_ARGUMENTS_H_
+#endif // V8_API_API_ARGUMENTS_H_
diff --git a/deps/v8/src/api-inl.h b/deps/v8/src/api/api-inl.h
index 9ccb9e4a6a..d152412b47 100644
--- a/deps/v8/src/api-inl.h
+++ b/deps/v8/src/api/api-inl.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_API_INL_H_
-#define V8_API_INL_H_
+#ifndef V8_API_API_INL_H_
+#define V8_API_API_INL_H_
-#include "src/api.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/foreign-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/stack-frame-info.h"
namespace v8 {
@@ -18,13 +18,13 @@ inline T ToCData(v8::internal::Object obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
if (obj == v8::internal::Smi::kZero) return nullptr;
return reinterpret_cast<T>(
- v8::internal::Foreign::cast(obj)->foreign_address());
+ v8::internal::Foreign::cast(obj).foreign_address());
}
template <>
inline v8::internal::Address ToCData(v8::internal::Object obj) {
if (obj == v8::internal::Smi::kZero) return v8::internal::kNullAddress;
- return v8::internal::Foreign::cast(obj)->foreign_address();
+ return v8::internal::Foreign::cast(obj).foreign_address();
}
template <typename T>
@@ -117,7 +117,7 @@ MAKE_TO_LOCAL(ScriptOrModuleToLocal, Script, ScriptOrModule)
DCHECK(that == nullptr || \
v8::internal::Object( \
*reinterpret_cast<const v8::internal::Address*>(that)) \
- ->Is##To()); \
+ .Is##To()); \
return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::Address*>( \
const_cast<v8::From*>(that))); \
@@ -151,4 +151,4 @@ Handle<Context> HandleScopeImplementer::LastEnteredOrMicrotaskContext() {
} // namespace internal
} // namespace v8
-#endif // V8_API_INL_H_
+#endif // V8_API_API_INL_H_
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api/api-natives.cc
index 7b5541a1c3..c22b7c47f9 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api/api-natives.cc
@@ -2,21 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-natives.h"
+#include "src/api/api-natives.h"
-#include "src/api-inl.h"
-#include "src/isolate-inl.h"
-#include "src/lookup.h"
-#include "src/message-template.h"
+#include "src/api/api-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/lookup.h"
#include "src/objects/property-cell.h"
#include "src/objects/templates.h"
namespace v8 {
namespace internal {
-
namespace {
class InvokeScope {
@@ -66,12 +65,12 @@ MaybeHandle<Object> DefineAccessorProperty(
Handle<Object> getter, Handle<Object> setter, PropertyAttributes attributes,
bool force_instantiate) {
DCHECK(!getter->IsFunctionTemplateInfo() ||
- !FunctionTemplateInfo::cast(*getter)->do_not_cache());
+ !FunctionTemplateInfo::cast(*getter).do_not_cache());
DCHECK(!setter->IsFunctionTemplateInfo() ||
- !FunctionTemplateInfo::cast(*setter)->do_not_cache());
+ !FunctionTemplateInfo::cast(*setter).do_not_cache());
if (getter->IsFunctionTemplateInfo()) {
if (force_instantiate ||
- FunctionTemplateInfo::cast(*getter)->BreakAtEntry()) {
+ FunctionTemplateInfo::cast(*getter).BreakAtEntry()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, getter,
InstantiateFunction(isolate,
@@ -81,7 +80,7 @@ MaybeHandle<Object> DefineAccessorProperty(
}
if (setter->IsFunctionTemplateInfo()) {
if (force_instantiate ||
- FunctionTemplateInfo::cast(*setter)->BreakAtEntry()) {
+ FunctionTemplateInfo::cast(*setter).BreakAtEntry()) {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, setter,
InstantiateFunction(isolate,
@@ -96,7 +95,6 @@ MaybeHandle<Object> DefineAccessorProperty(
return object;
}
-
MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> name,
@@ -126,7 +124,6 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
return value;
}
-
void DisableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
Handle<Map> old_map(object->map(), isolate);
// Copy map so it won't interfere constructor's initial map.
@@ -135,7 +132,6 @@ void DisableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
}
-
void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
Handle<Map> old_map(object->map(), isolate);
// Copy map so it won't interfere constructor's initial map.
@@ -145,12 +141,11 @@ void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
JSObject::MigrateToMap(object, new_map);
}
-
class AccessCheckDisableScope {
public:
AccessCheckDisableScope(Isolate* isolate, Handle<JSObject> obj)
: isolate_(isolate),
- disabled_(obj->map()->is_access_check_needed()),
+ disabled_(obj->map().is_access_check_needed()),
obj_(obj) {
if (disabled_) {
DisableAccessChecks(isolate_, obj_);
@@ -193,11 +188,11 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
int max_number_of_properties = 0;
TemplateInfoT info = *data;
while (!info.is_null()) {
- Object props = info->property_accessors();
- if (!props->IsUndefined(isolate)) {
- max_number_of_properties += TemplateList::cast(props)->length();
+ Object props = info.property_accessors();
+ if (!props.IsUndefined(isolate)) {
+ max_number_of_properties += TemplateList::cast(props).length();
}
- info = info->GetParent(isolate);
+ info = info.GetParent(isolate);
}
if (max_number_of_properties > 0) {
@@ -210,7 +205,7 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
temp = handle(temp->GetParent(isolate), isolate)) {
// Accumulate accessors.
Object maybe_properties = temp->property_accessors();
- if (!maybe_properties->IsUndefined(isolate)) {
+ if (!maybe_properties.IsUndefined(isolate)) {
valid_descriptors = AccessorInfo::AppendUnique(
isolate, handle(maybe_properties, isolate), array,
valid_descriptors);
@@ -228,7 +223,7 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
}
Object maybe_property_list = data->property_list();
- if (maybe_property_list->IsUndefined(isolate)) return obj;
+ if (maybe_property_list.IsUndefined(isolate)) return obj;
Handle<TemplateList> properties(TemplateList::cast(maybe_property_list),
isolate);
if (properties->length() == 0) return obj;
@@ -237,22 +232,24 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
for (int c = 0; c < data->number_of_properties(); c++) {
auto name = handle(Name::cast(properties->get(i++)), isolate);
Object bit = properties->get(i++);
- if (bit->IsSmi()) {
+ if (bit.IsSmi()) {
PropertyDetails details(Smi::cast(bit));
PropertyAttributes attributes = details.attributes();
PropertyKind kind = details.kind();
if (kind == kData) {
auto prop_data = handle(properties->get(i++), isolate);
- RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
- prop_data, attributes),
- JSObject);
+ RETURN_ON_EXCEPTION(
+ isolate,
+ DefineDataProperty(isolate, obj, name, prop_data, attributes),
+ JSObject);
} else {
auto getter = handle(properties->get(i++), isolate);
auto setter = handle(properties->get(i++), isolate);
RETURN_ON_EXCEPTION(
- isolate, DefineAccessorProperty(isolate, obj, name, getter, setter,
- attributes, is_hidden_prototype),
+ isolate,
+ DefineAccessorProperty(isolate, obj, name, getter, setter,
+ attributes, is_hidden_prototype),
JSObject);
}
} else {
@@ -266,9 +263,10 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
static_cast<v8::Intrinsic>(Smi::ToInt(properties->get(i++)));
auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate);
- RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
- prop_data, attributes),
- JSObject);
+ RETURN_ON_EXCEPTION(
+ isolate,
+ DefineDataProperty(isolate, obj, name, prop_data, attributes),
+ JSObject);
}
}
return obj;
@@ -289,20 +287,20 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
Handle<FixedArray> fast_cache =
isolate->fast_template_instantiations_cache();
- return fast_cache->GetValue<JSObject>(isolate, serial_number - 1);
- } else if (caching_mode == CachingMode::kUnlimited ||
- (serial_number <=
- TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
+ Handle<Object> object{fast_cache->get(serial_number - 1), isolate};
+ if (object->IsUndefined(isolate)) return {};
+ return Handle<JSObject>::cast(object);
+ }
+ if (caching_mode == CachingMode::kUnlimited ||
+ (serial_number <= TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<SimpleNumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
int entry = slow_cache->FindEntry(isolate, serial_number);
- if (entry == SimpleNumberDictionary::kNotFound) {
- return MaybeHandle<JSObject>();
+ if (entry != SimpleNumberDictionary::kNotFound) {
+ return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
}
- return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
- } else {
- return MaybeHandle<JSObject>();
}
+ return {};
}
void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
@@ -338,7 +336,7 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
Handle<FixedArray> fast_cache =
isolate->fast_template_instantiations_cache();
- DCHECK(!fast_cache->get(serial_number - 1)->IsUndefined(isolate));
+ DCHECK(!fast_cache->get(serial_number - 1).IsUndefined(isolate));
fast_cache->set_undefined(serial_number - 1);
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
@@ -356,11 +354,11 @@ bool IsSimpleInstantiation(Isolate* isolate, ObjectTemplateInfo info,
JSReceiver new_target) {
DisallowHeapAllocation no_gc;
- if (!new_target->IsJSFunction()) return false;
+ if (!new_target.IsJSFunction()) return false;
JSFunction fun = JSFunction::cast(new_target);
- if (fun->shared()->function_data() != info->constructor()) return false;
- if (info->immutable_proto()) return false;
- return fun->context()->native_context() == isolate->raw_native_context();
+ if (fun.shared().function_data() != info.constructor()) return false;
+ if (info.immutable_proto()) return false;
+ return fun.context().native_context() == isolate->raw_native_context();
}
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
@@ -389,7 +387,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
if (constructor.is_null()) {
Object maybe_constructor_info = info->constructor();
- if (maybe_constructor_info->IsUndefined(isolate)) {
+ if (maybe_constructor_info.IsUndefined(isolate)) {
constructor = isolate->object_function();
} else {
// Enter a new scope. Recursion could otherwise create a lot of handles.
@@ -473,9 +471,9 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<Object> prototype;
if (!data->remove_prototype()) {
Object prototype_templ = data->GetPrototypeTemplate();
- if (prototype_templ->IsUndefined(isolate)) {
+ if (prototype_templ.IsUndefined(isolate)) {
Object protoype_provider_templ = data->GetPrototypeProviderTemplate();
- if (protoype_provider_templ->IsUndefined(isolate)) {
+ if (protoype_provider_templ.IsUndefined(isolate)) {
prototype = isolate->factory()->NewJSObject(isolate->object_function());
} else {
ASSIGN_RETURN_ON_EXCEPTION(
@@ -488,11 +486,11 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
InstantiateObject(
isolate,
handle(ObjectTemplateInfo::cast(prototype_templ), isolate),
- Handle<JSReceiver>(), data->hidden_prototype(), true),
+ Handle<JSReceiver>(), false, true),
JSFunction);
}
Object parent = data->GetParentTemplate();
- if (!parent->IsUndefined(isolate)) {
+ if (!parent.IsUndefined(isolate)) {
Handle<Object> parent_prototype;
ASSIGN_RETURN_ON_EXCEPTION(isolate, parent_prototype,
GetInstancePrototype(isolate, parent),
@@ -504,8 +502,8 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
InstanceType function_type =
(!data->needs_access_check() &&
- data->GetNamedPropertyHandler()->IsUndefined(isolate) &&
- data->GetIndexedPropertyHandler()->IsUndefined(isolate))
+ data->GetNamedPropertyHandler().IsUndefined(isolate) &&
+ data->GetIndexedPropertyHandler().IsUndefined(isolate))
? JS_API_OBJECT_TYPE
: JS_SPECIAL_API_OBJECT_TYPE;
@@ -517,7 +515,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
function);
}
MaybeHandle<JSObject> result =
- ConfigureInstance(isolate, function, data, data->hidden_prototype());
+ ConfigureInstance(isolate, function, data, false);
if (result.is_null()) {
// Uncache on error.
if (serial_number) {
@@ -529,12 +527,11 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
return function;
}
-
void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
int length, Handle<Object>* data) {
Object maybe_list = templ->property_list();
Handle<TemplateList> list;
- if (maybe_list->IsUndefined(isolate)) {
+ if (maybe_list.IsUndefined(isolate)) {
list = TemplateList::New(isolate, length);
} else {
list = handle(TemplateList::cast(maybe_list), isolate);
@@ -598,7 +595,6 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
}
-
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, v8::Intrinsic intrinsic,
PropertyAttributes attributes) {
@@ -610,7 +606,6 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
}
-
void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<TemplateInfo> info,
Handle<Name> name,
@@ -623,13 +618,12 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
}
-
void ApiNatives::AddNativeDataProperty(Isolate* isolate,
Handle<TemplateInfo> info,
Handle<AccessorInfo> property) {
Object maybe_list = info->property_accessors();
Handle<TemplateList> list;
- if (maybe_list->IsUndefined(isolate)) {
+ if (maybe_list.IsUndefined(isolate)) {
list = TemplateList::New(isolate, 1);
} else {
list = handle(TemplateList::cast(maybe_list), isolate);
@@ -653,7 +647,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
if (obj->remove_prototype()) {
DCHECK(prototype.is_null());
- DCHECK(result->shared()->IsApiFunction());
+ DCHECK(result->shared().IsApiFunction());
DCHECK(!result->IsConstructor());
DCHECK(!result->has_prototype_slot());
return result;
@@ -669,7 +663,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
if (prototype->IsTheHole(isolate)) {
prototype = isolate->factory()->NewFunctionPrototype(result);
- } else if (obj->GetPrototypeProviderTemplate()->IsUndefined(isolate)) {
+ } else if (obj->GetPrototypeProviderTemplate().IsUndefined(isolate)) {
JSObject::AddProperty(isolate, Handle<JSObject>::cast(prototype),
isolate->factory()->constructor_string(), result,
DONT_ENUM);
@@ -677,7 +671,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
int embedder_field_count = 0;
bool immutable_proto = false;
- if (!obj->GetInstanceTemplate()->IsUndefined(isolate)) {
+ if (!obj->GetInstanceTemplate().IsUndefined(isolate)) {
Handle<ObjectTemplateInfo> GetInstanceTemplate = Handle<ObjectTemplateInfo>(
ObjectTemplateInfo::cast(obj->GetInstanceTemplate()), isolate);
embedder_field_count = GetInstanceTemplate->embedder_field_count();
@@ -700,7 +694,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// undetectable and callable. If we ever see the need to have an object
// that is undetectable but not callable, we need to update the types.h
// to allow encoding this.
- CHECK(!obj->GetInstanceCallHandler()->IsUndefined(isolate));
+ CHECK(!obj->GetInstanceCallHandler().IsUndefined(isolate));
map->set_is_undetectable(true);
}
@@ -711,16 +705,16 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
}
// Set interceptor information in the map.
- if (!obj->GetNamedPropertyHandler()->IsUndefined(isolate)) {
+ if (!obj->GetNamedPropertyHandler().IsUndefined(isolate)) {
map->set_has_named_interceptor(true);
map->set_may_have_interesting_symbols(true);
}
- if (!obj->GetIndexedPropertyHandler()->IsUndefined(isolate)) {
+ if (!obj->GetIndexedPropertyHandler().IsUndefined(isolate)) {
map->set_has_indexed_interceptor(true);
}
// Mark instance as callable in the map.
- if (!obj->GetInstanceCallHandler()->IsUndefined(isolate)) {
+ if (!obj->GetInstanceCallHandler().IsUndefined(isolate)) {
map->set_is_callable(true);
map->set_is_constructor(!obj->undetectable());
}
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api/api-natives.h
index 9a9ae50da8..153212cc6c 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api/api-natives.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_API_NATIVES_H_
-#define V8_API_NATIVES_H_
+#ifndef V8_API_API_NATIVES_H_
+#define V8_API_API_NATIVES_H_
#include "include/v8.h"
#include "src/base/macros.h"
-#include "src/handles.h"
-#include "src/maybe-handles.h"
-#include "src/objects.h"
-#include "src/property-details.h"
+#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
+#include "src/objects/objects.h"
+#include "src/objects/property-details.h"
namespace v8 {
namespace internal {
@@ -61,4 +61,4 @@ class ApiNatives {
} // namespace internal
} // namespace v8
-#endif // V8_API_NATIVES_H_
+#endif // V8_API_API_NATIVES_H_
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api/api.cc
index dc92ff2e79..399aca7eb6 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -2,58 +2,62 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api/api.h"
#include <string.h> // For memcpy, strlen.
#include <cmath> // For isnan.
#include <limits>
#include <vector>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
#include "include/v8-util.h"
-#include "src/accessors.h"
-#include "src/api-natives.h"
-#include "src/assert-scope.h"
+#include "src/api/api-natives.h"
#include "src/base/functional.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/safe_conversions.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/bootstrapper.h"
+#include "src/builtins/accessors.h"
#include "src/builtins/builtins-utils.h"
-#include "src/char-predicates-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/cpu-features.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
-#include "src/compiler.h"
-#include "src/contexts.h"
-#include "src/conversions-inl.h"
-#include "src/counters.h"
-#include "src/cpu-features.h"
-#include "src/date.h"
+#include "src/date/date.h"
#include "src/debug/debug-coverage.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-type-profile.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
-#include "src/deoptimizer.h"
-#include "src/detachable-vector.h"
-#include "src/execution.h"
-#include "src/frames-inl.h"
-#include "src/gdb-jit.h"
-#include "src/global-handles.h"
-#include "src/globals.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/diagnostics/gdb-jit.h"
+#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/messages.h"
+#include "src/execution/microtask-queue.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/simulator.h"
+#include "src/execution/v8threads.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/handles/global-handles.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-inl.h"
-#include "src/icu_util.h"
-#include "src/isolate-inl.h"
-#include "src/json-parser.h"
-#include "src/json-stringifier.h"
-#include "src/messages.h"
-#include "src/microtask-queue.h"
-#include "src/objects-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/init/icu_util.h"
+#include "src/init/startup-data-util.h"
+#include "src/init/v8.h"
+#include "src/json/json-parser.h"
+#include "src/json/json-stringifier.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/contexts.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/embedder-data-slot-inl.h"
#include "src/objects/frame-array-inl.h"
@@ -65,44 +69,41 @@
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
+#include "src/objects/property-descriptor.h"
+#include "src/objects/property-details.h"
+#include "src/objects/property.h"
+#include "src/objects/prototype.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/templates.h"
+#include "src/objects/value-serializer.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
+#include "src/parsing/pending-compilation-error-handler.h"
#include "src/parsing/scanner-character-streams.h"
-#include "src/pending-compilation-error-handler.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/profiler/profile-generator-inl.h"
#include "src/profiler/tick-sample.h"
-#include "src/property-descriptor.h"
-#include "src/property-details.h"
-#include "src/property.h"
-#include "src/prototype.h"
-#include "src/runtime-profiler.h"
#include "src/runtime/runtime.h"
-#include "src/simulator.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/read-only-serializer.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-serializer.h"
-#include "src/startup-data-util.h"
-#include "src/string-hasher.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/strings/string-hasher.h"
+#include "src/strings/unicode-inl.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/unicode-inl.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
-#include "src/value-serializer.h"
-#include "src/version.h"
-#include "src/vm-state-inl.h"
+#include "src/utils/detachable-vector.h"
+#include "src/utils/version.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -121,7 +122,7 @@
#include "include/v8-wasm-trap-handler-win.h"
#include "src/trap-handler/handler-inside-win.h"
#if V8_TARGET_ARCH_X64
-#include "src/unwinding-info-win64.h"
+#include "src/diagnostics/unwinding-info-win64.h"
#endif // V8_TARGET_ARCH_X64
#endif // V8_OS_WIN
@@ -239,7 +240,6 @@ namespace v8 {
#define RETURN_TO_LOCAL_UNCHECKED(maybe_local, T) \
return maybe_local.FromMaybe(Local<T>());
-
#define RETURN_ESCAPED(value) return handle_scope.Escape(value);
namespace {
@@ -286,7 +286,7 @@ class CallDepthScope {
i::Handle<i::Context> env = Utils::OpenHandle(*context);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
if (!isolate->context().is_null() &&
- isolate->context()->native_context() == env->native_context()) {
+ isolate->context().native_context() == env->native_context()) {
context_ = Local<Context>();
} else {
impl->SaveContext(isolate->context());
@@ -302,7 +302,7 @@ class CallDepthScope {
isolate_->set_context(impl->RestoreContext());
i::Handle<i::Context> env = Utils::OpenHandle(*context_);
- microtask_queue = env->native_context()->microtask_queue();
+ microtask_queue = env->native_context().microtask_queue();
}
if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
if (do_callback) isolate_->FireCallCompletedCallback(microtask_queue);
@@ -335,7 +335,6 @@ class CallDepthScope {
} // namespace
-
static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
i::Handle<i::Script> script) {
i::Handle<i::Object> scriptName(script->GetNameOrSourceURL(), isolate);
@@ -358,7 +357,6 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
return origin;
}
-
// --- E x c e p t i o n B e h a v i o r ---
void i::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location) {
@@ -462,7 +460,6 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location,
FATAL("API fatal error handler returned after process out of memory");
}
-
void Utils::ReportApiFailure(const char* location, const char* message) {
i::Isolate* isolate = i::Isolate::Current();
FatalErrorCallback callback = nullptr;
@@ -510,12 +507,10 @@ static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
return false;
}
-
void V8::SetNativesDataBlob(StartupData* natives_blob) {
i::V8::SetNativesBlob(natives_blob);
}
-
void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
i::V8::SetSnapshotBlob(snapshot_blob);
}
@@ -648,7 +643,7 @@ size_t SnapshotCreator::AddData(i::Address object) {
i::HandleScope scope(isolate);
i::Handle<i::Object> obj(i::Object(object), isolate);
i::Handle<i::ArrayList> list;
- if (!isolate->heap()->serialized_objects()->IsArrayList()) {
+ if (!isolate->heap()->serialized_objects().IsArrayList()) {
list = i::ArrayList::New(isolate, 1);
} else {
list = i::Handle<i::ArrayList>(
@@ -668,7 +663,7 @@ size_t SnapshotCreator::AddData(Local<Context> context, i::Address object) {
i::HandleScope scope(isolate);
i::Handle<i::Object> obj(i::Object(object), isolate);
i::Handle<i::ArrayList> list;
- if (!ctx->serialized_objects()->IsArrayList()) {
+ if (!ctx->serialized_objects().IsArrayList()) {
list = i::ArrayList::New(isolate, 1);
} else {
list = i::Handle<i::ArrayList>(
@@ -684,7 +679,7 @@ namespace {
void ConvertSerializedObjectsToFixedArray(Local<Context> context) {
i::Handle<i::Context> ctx = Utils::OpenHandle(*context);
i::Isolate* isolate = ctx->GetIsolate();
- if (!ctx->serialized_objects()->IsArrayList()) {
+ if (!ctx->serialized_objects().IsArrayList()) {
ctx->set_serialized_objects(i::ReadOnlyRoots(isolate).empty_fixed_array());
} else {
i::Handle<i::ArrayList> list(i::ArrayList::cast(ctx->serialized_objects()),
@@ -695,7 +690,7 @@ void ConvertSerializedObjectsToFixedArray(Local<Context> context) {
}
void ConvertSerializedObjectsToFixedArray(i::Isolate* isolate) {
- if (!isolate->heap()->serialized_objects()->IsArrayList()) {
+ if (!isolate->heap()->serialized_objects().IsArrayList()) {
isolate->heap()->SetSerializedObjects(
i::ReadOnlyRoots(isolate).empty_fixed_array());
} else {
@@ -737,7 +732,7 @@ StartupData SnapshotCreator::CreateBlob(
i::Handle<i::Context> context =
v8::Utils::OpenHandle(*data->contexts_.Get(i));
global_proxy_sizes->set(i,
- i::Smi::FromInt(context->global_proxy()->Size()));
+ i::Smi::FromInt(context->global_proxy().Size()));
}
isolate->heap()->SetSerializedGlobalProxySizes(*global_proxy_sizes);
}
@@ -754,8 +749,6 @@ StartupData SnapshotCreator::CreateBlob(
isolate->heap()->CompactWeakArrayLists(internal::AllocationType::kOld);
}
- isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
-
if (function_code_handling == FunctionCodeHandling::kClear) {
// Clear out re-compilable data from all shared function infos. Any
// JSFunctions using these SFIs will have their code pointers reset by the
@@ -774,16 +767,16 @@ StartupData SnapshotCreator::CreateBlob(
i::HeapIterator heap_iterator(isolate->heap());
for (i::HeapObject current_obj = heap_iterator.next();
!current_obj.is_null(); current_obj = heap_iterator.next()) {
- if (current_obj->IsSharedFunctionInfo()) {
+ if (current_obj.IsSharedFunctionInfo()) {
i::SharedFunctionInfo shared =
i::SharedFunctionInfo::cast(current_obj);
- if (shared->CanDiscardCompiled()) {
+ if (shared.CanDiscardCompiled()) {
sfis_to_clear.emplace_back(shared, isolate);
}
- } else if (current_obj->IsJSRegExp()) {
+ } else if (current_obj.IsJSRegExp()) {
i::JSRegExp regexp = i::JSRegExp::cast(current_obj);
- if (regexp->HasCompiledCode()) {
- regexp->DiscardCompiledCodeForSerialization();
+ if (regexp.HasCompiledCode()) {
+ regexp.DiscardCompiledCodeForSerialization();
}
}
}
@@ -820,23 +813,22 @@ StartupData SnapshotCreator::CreateBlob(
i::HeapIterator heap_iterator(isolate->heap());
for (i::HeapObject current_obj = heap_iterator.next(); !current_obj.is_null();
current_obj = heap_iterator.next()) {
- if (current_obj->IsJSFunction()) {
+ if (current_obj.IsJSFunction()) {
i::JSFunction fun = i::JSFunction::cast(current_obj);
// Complete in-object slack tracking for all functions.
- fun->CompleteInobjectSlackTrackingIfActive();
+ fun.CompleteInobjectSlackTrackingIfActive();
// Also, clear out feedback vectors, or any optimized code.
- if (!fun->raw_feedback_cell()->value()->IsUndefined()) {
- fun->raw_feedback_cell()->set_value(
+ if (!fun.raw_feedback_cell().value().IsUndefined()) {
+ fun.raw_feedback_cell().set_value(
i::ReadOnlyRoots(isolate).undefined_value());
- fun->set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy));
+ fun.set_code(isolate->builtins()->builtin(i::Builtins::kCompileLazy));
}
if (function_code_handling == FunctionCodeHandling::kClear) {
- DCHECK(fun->shared()->HasWasmExportedFunctionData() ||
- fun->shared()->HasBuiltinId() ||
- fun->shared()->IsApiFunction() ||
- fun->shared()->HasUncompiledDataWithoutPreparseData());
+ DCHECK(fun.shared().HasWasmExportedFunctionData() ||
+ fun.shared().HasBuiltinId() || fun.shared().IsApiFunction() ||
+ fun.shared().HasUncompiledDataWithoutPreparseData());
}
}
}
@@ -896,11 +888,19 @@ void V8::SetDcheckErrorHandler(DcheckErrorCallback that) {
v8::base::SetDcheckFunction(that);
}
-void V8::SetFlagsFromString(const char* str, int length) {
+void V8::SetFlagsFromString(const char* str) {
+ SetFlagsFromString(str, strlen(str));
+}
+
+void V8::SetFlagsFromString(const char* str, size_t length) {
i::FlagList::SetFlagsFromString(str, length);
i::FlagList::EnforceFlagImplications();
}
+void V8::SetFlagsFromString(const char* str, int length) {
+ CHECK_LE(0, length);
+ SetFlagsFromString(str, static_cast<size_t>(length));
+}
void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) {
i::FlagList::SetFlagsFromCommandLine(argc, argv, remove_flags);
@@ -950,15 +950,12 @@ void RegisterExtension(std::unique_ptr<Extension> extension) {
RegisteredExtension::Register(std::move(extension));
}
-Extension::Extension(const char* name,
- const char* source,
- int dep_count,
- const char** deps,
- int source_length)
+Extension::Extension(const char* name, const char* source, int dep_count,
+ const char** deps, int source_length)
: name_(name),
- source_length_(source_length >= 0 ?
- source_length :
- (source ? static_cast<int>(strlen(source)) : 0)),
+ source_length_(source_length >= 0
+ ? source_length
+ : (source ? static_cast<int>(strlen(source)) : 0)),
dep_count_(dep_count),
deps_(deps),
auto_enable_(false) {
@@ -1009,7 +1006,7 @@ i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
- i::Object(*obj)->ObjectVerify(isolate);
+ i::Object(*obj).ObjectVerify(isolate);
}
#endif // VERIFY_HEAP
return result.location();
@@ -1022,7 +1019,7 @@ i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
isolate->global_handles()->CreateTraced(*obj, slot);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
- i::Object(*obj)->ObjectVerify(isolate);
+ i::Object(*obj).ObjectVerify(isolate);
}
#endif // VERIFY_HEAP
return result.location();
@@ -1042,11 +1039,6 @@ void V8::MoveTracedGlobalReference(internal::Address** from,
i::GlobalHandles::MoveTracedGlobal(from, to);
}
-void V8::RegisterExternallyReferencedObject(i::Address* location,
- i::Isolate* isolate) {
- isolate->heap()->RegisterExternallyReferencedObject(location);
-}
-
void V8::MakeWeak(i::Address* location, void* parameter,
WeakCallbackInfo<void>::Callback weak_callback,
WeakCallbackType type) {
@@ -1089,12 +1081,10 @@ Value* V8::Eternalize(Isolate* v8_isolate, Value* value) {
isolate->eternal_handles()->Get(index).location());
}
-
void V8::FromJustIsNothing() {
Utils::ApiCheck(false, "v8::FromJust", "Maybe value is Nothing.");
}
-
void V8::ToLocalEmpty() {
Utils::ApiCheck(false, "v8::ToLocalChecked", "Empty MaybeLocal.");
}
@@ -1105,14 +1095,9 @@ void V8::InternalFieldOutOfBounds(int index) {
"Internal field out of bounds.");
}
-
// --- H a n d l e s ---
-
-HandleScope::HandleScope(Isolate* isolate) {
- Initialize(isolate);
-}
-
+HandleScope::HandleScope(Isolate* isolate) { Initialize(isolate); }
void HandleScope::Initialize(Isolate* isolate) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -1134,7 +1119,6 @@ void HandleScope::Initialize(Isolate* isolate) {
current->level++;
}
-
HandleScope::~HandleScope() {
i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
}
@@ -1156,16 +1140,16 @@ i::Address* HandleScope::CreateHandle(i::Isolate* isolate, i::Address value) {
EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
escape_slot_ =
- CreateHandle(isolate, i::ReadOnlyRoots(isolate).the_hole_value()->ptr());
+ CreateHandle(isolate, i::ReadOnlyRoots(isolate).the_hole_value().ptr());
Initialize(v8_isolate);
}
i::Address* EscapableHandleScope::Escape(i::Address* escape_value) {
i::Heap* heap = reinterpret_cast<i::Isolate*>(GetIsolate())->heap();
- Utils::ApiCheck(i::Object(*escape_slot_)->IsTheHole(heap->isolate()),
+ Utils::ApiCheck(i::Object(*escape_slot_).IsTheHole(heap->isolate()),
"EscapableHandleScope::Escape", "Escape value set twice");
if (escape_value == nullptr) {
- *escape_slot_ = i::ReadOnlyRoots(heap).undefined_value()->ptr();
+ *escape_slot_ = i::ReadOnlyRoots(heap).undefined_value().ptr();
return nullptr;
}
*escape_slot_ = *escape_value;
@@ -1188,7 +1172,6 @@ SealHandleScope::SealHandleScope(Isolate* isolate)
current->sealed_level = current->level;
}
-
SealHandleScope::~SealHandleScope() {
i::HandleScopeData* current = isolate_->handle_scope_data();
DCHECK_EQ(current->next, current->limit);
@@ -1256,11 +1239,9 @@ static i::Handle<i::EmbedderDataArray> EmbedderDataFor(Context* context,
const char* location) {
i::Handle<i::Context> env = Utils::OpenHandle(context);
i::Isolate* isolate = env->GetIsolate();
- bool ok =
- Utils::ApiCheck(env->IsNativeContext(),
- location,
- "Not a native context") &&
- Utils::ApiCheck(index >= 0, location, "Negative index");
+ bool ok = Utils::ApiCheck(env->IsNativeContext(), location,
+ "Not a native context") &&
+ Utils::ApiCheck(index >= 0, location, "Negative index");
if (!ok) return i::Handle<i::EmbedderDataArray>();
// TODO(ishell): remove cast once embedder_data slot has a proper type.
i::Handle<i::EmbedderDataArray> data(
@@ -1280,7 +1261,7 @@ uint32_t Context::GetNumberOfEmbedderDataFields() {
CHECK(context->IsNativeContext());
// TODO(ishell): remove cast once embedder_data slot has a proper type.
return static_cast<uint32_t>(
- i::EmbedderDataArray::cast(context->embedder_data())->length());
+ i::EmbedderDataArray::cast(context->embedder_data()).length());
}
v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
@@ -1294,7 +1275,6 @@ v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
return Utils::ToLocal(result);
}
-
void Context::SetEmbedderData(int index, v8::Local<Value> value) {
const char* location = "v8::Context::SetEmbedderData()";
i::Handle<i::EmbedderDataArray> data =
@@ -1306,7 +1286,6 @@ void Context::SetEmbedderData(int index, v8::Local<Value> value) {
*Utils::OpenHandle(*GetEmbedderData(index)));
}
-
void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
i::Handle<i::EmbedderDataArray> data =
@@ -1318,7 +1297,6 @@ void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
return result;
}
-
void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
i::Handle<i::EmbedderDataArray> data =
@@ -1328,16 +1306,13 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
DCHECK_EQ(value, GetAlignedPointerFromEmbedderData(index));
}
-
// --- T e m p l a t e ---
-
static void InitializeTemplate(i::Handle<i::TemplateInfo> that, int type) {
that->set_number_of_properties(0);
that->set_tag(i::Smi::FromInt(type));
}
-
void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
v8::PropertyAttribute attribute) {
auto templ = Utils::OpenHandle(this);
@@ -1363,12 +1338,11 @@ void Template::SetPrivate(v8::Local<Private> name, v8::Local<Data> value,
attribute);
}
-void Template::SetAccessorProperty(
- v8::Local<v8::Name> name,
- v8::Local<FunctionTemplate> getter,
- v8::Local<FunctionTemplate> setter,
- v8::PropertyAttribute attribute,
- v8::AccessControl access_control) {
+void Template::SetAccessorProperty(v8::Local<v8::Name> name,
+ v8::Local<FunctionTemplate> getter,
+ v8::Local<FunctionTemplate> setter,
+ v8::PropertyAttribute attribute,
+ v8::AccessControl access_control) {
// TODO(verwaest): Remove |access_control|.
DCHECK_EQ(v8::DEFAULT, access_control);
auto templ = Utils::OpenHandle(this);
@@ -1383,7 +1357,6 @@ void Template::SetAccessorProperty(
static_cast<i::PropertyAttributes>(attribute));
}
-
// --- F u n c t i o n T e m p l a t e ---
static void InitializeFunctionTemplate(
i::Handle<i::FunctionTemplateInfo> info) {
@@ -1416,8 +1389,8 @@ void FunctionTemplate::SetPrototypeProviderTemplate(
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> result = Utils::OpenHandle(*prototype_provider);
auto info = Utils::OpenHandle(this);
- CHECK(info->GetPrototypeTemplate()->IsUndefined(i_isolate));
- CHECK(info->GetParentTemplate()->IsUndefined(i_isolate));
+ CHECK(info->GetPrototypeTemplate().IsUndefined(i_isolate));
+ CHECK(info->GetParentTemplate().IsUndefined(i_isolate));
i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, info,
result);
}
@@ -1428,13 +1401,12 @@ static void EnsureNotInstantiated(i::Handle<i::FunctionTemplateInfo> info,
"FunctionTemplate already instantiated");
}
-
void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::Inherit");
i::Isolate* i_isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- CHECK(info->GetPrototypeProviderTemplate()->IsUndefined(i_isolate));
+ CHECK(info->GetPrototypeProviderTemplate().IsUndefined(i_isolate));
i::FunctionTemplateInfo::SetParentTemplate(i_isolate, info,
Utils::OpenHandle(*value));
}
@@ -1492,9 +1464,9 @@ MaybeLocal<FunctionTemplate> FunctionTemplate::FromSnapshot(Isolate* isolate,
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::FixedArray serialized_objects = i_isolate->heap()->serialized_objects();
int int_index = static_cast<int>(index);
- if (int_index < serialized_objects->length()) {
- i::Object info = serialized_objects->get(int_index);
- if (info->IsFunctionTemplateInfo()) {
+ if (int_index < serialized_objects.length()) {
+ i::Object info = serialized_objects.get(int_index);
+ if (info.IsFunctionTemplateInfo()) {
return Utils::ToLocal(i::Handle<i::FunctionTemplateInfo>(
i::FunctionTemplateInfo::cast(info), i_isolate));
}
@@ -1518,7 +1490,6 @@ Local<Signature> Signature::New(Isolate* isolate,
return Utils::SignatureToLocal(Utils::OpenHandle(*receiver));
}
-
Local<AccessorSignature> AccessorSignature::New(
Isolate* isolate, Local<FunctionTemplate> receiver) {
return Utils::AccessorSignatureToLocal(Utils::OpenHandle(*receiver));
@@ -1549,7 +1520,6 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
info->set_call_code(*obj);
}
-
namespace {
template <typename Getter, typename Setter>
@@ -1602,7 +1572,7 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
}
i::Isolate* isolate = handle->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- if (handle->GetInstanceTemplate()->IsUndefined(isolate)) {
+ if (handle->GetInstanceTemplate().IsUndefined(isolate)) {
Local<ObjectTemplate> templ =
ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle));
i::FunctionTemplateInfo::SetInstanceTemplate(isolate, handle,
@@ -1613,7 +1583,6 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
return Utils::ToLocal(result);
}
-
void FunctionTemplate::SetLength(int length) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetLength");
@@ -1622,7 +1591,6 @@ void FunctionTemplate::SetLength(int length) {
info->set_length(length);
}
-
void FunctionTemplate::SetClassName(Local<String> name) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetClassName");
@@ -1631,7 +1599,6 @@ void FunctionTemplate::SetClassName(Local<String> name) {
info->set_class_name(*Utils::OpenHandle(*name));
}
-
void FunctionTemplate::SetAcceptAnyReceiver(bool value) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetAcceptAnyReceiver");
@@ -1640,16 +1607,6 @@ void FunctionTemplate::SetAcceptAnyReceiver(bool value) {
info->set_accept_any_receiver(value);
}
-
-void FunctionTemplate::SetHiddenPrototype(bool value) {
- auto info = Utils::OpenHandle(this);
- EnsureNotInstantiated(info, "v8::FunctionTemplate::SetHiddenPrototype");
- auto isolate = info->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- info->set_hidden_prototype(value);
-}
-
-
void FunctionTemplate::ReadOnlyPrototype() {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::ReadOnlyPrototype");
@@ -1658,7 +1615,6 @@ void FunctionTemplate::ReadOnlyPrototype() {
info->set_read_only_prototype(true);
}
-
void FunctionTemplate::RemovePrototype() {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::RemovePrototype");
@@ -1667,16 +1623,13 @@ void FunctionTemplate::RemovePrototype() {
info->set_remove_prototype(true);
}
-
// --- O b j e c t T e m p l a t e ---
-
Local<ObjectTemplate> ObjectTemplate::New(
Isolate* isolate, v8::Local<FunctionTemplate> constructor) {
return New(reinterpret_cast<i::Isolate*>(isolate), constructor);
}
-
static Local<ObjectTemplate> ObjectTemplateNew(
i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
bool do_not_cache) {
@@ -1708,9 +1661,9 @@ MaybeLocal<ObjectTemplate> ObjectTemplate::FromSnapshot(Isolate* isolate,
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::FixedArray serialized_objects = i_isolate->heap()->serialized_objects();
int int_index = static_cast<int>(index);
- if (int_index < serialized_objects->length()) {
- i::Object info = serialized_objects->get(int_index);
- if (info->IsObjectTemplateInfo()) {
+ if (int_index < serialized_objects.length()) {
+ i::Object info = serialized_objects.get(int_index);
+ if (info.IsObjectTemplateInfo()) {
return Utils::ToLocal(i::Handle<i::ObjectTemplateInfo>(
i::ObjectTemplateInfo::cast(info), i_isolate));
}
@@ -1721,10 +1674,9 @@ MaybeLocal<ObjectTemplate> ObjectTemplate::FromSnapshot(Isolate* isolate,
// Ensure that the object template has a constructor. If no
// constructor is available we create one.
static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
- i::Isolate* isolate,
- ObjectTemplate* object_template) {
+ i::Isolate* isolate, ObjectTemplate* object_template) {
i::Object obj = Utils::OpenHandle(object_template)->constructor();
- if (!obj->IsUndefined(isolate)) {
+ if (!obj.IsUndefined(isolate)) {
i::FunctionTemplateInfo info = i::FunctionTemplateInfo::cast(obj);
return i::Handle<i::FunctionTemplateInfo>(info, isolate);
}
@@ -1917,7 +1869,6 @@ void ObjectTemplate::SetHandler(
config.flags);
}
-
void ObjectTemplate::MarkAsUndetectable() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -1927,7 +1878,6 @@ void ObjectTemplate::MarkAsUndetectable() {
cons->set_undetectable(true);
}
-
void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
Local<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -2057,7 +2007,6 @@ void ObjectTemplate::SetImmutableProto() {
// --- S c r i p t s ---
-
// Internally, UnboundScript is a SharedFunctionInfo, and Script is a
// JSFunction.
@@ -2068,7 +2017,6 @@ ScriptCompiler::CachedData::CachedData(const uint8_t* data_, int length_,
rejected(false),
buffer_policy(buffer_policy_) {}
-
ScriptCompiler::CachedData::~CachedData() {
if (buffer_policy == BufferOwned) {
delete[] data;
@@ -2110,13 +2058,12 @@ int UnboundScript::GetId() {
return script->id();
}
-
int UnboundScript::GetLineNumber(int code_pos) {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, UnboundScript, GetLineNumber);
- if (obj->script()->IsScript()) {
+ if (obj->script().IsScript()) {
i::Handle<i::Script> script(i::Script::cast(obj->script()), isolate);
return i::Script::GetLineNumber(script, code_pos);
} else {
@@ -2124,49 +2071,45 @@ int UnboundScript::GetLineNumber(int code_pos) {
}
}
-
Local<Value> UnboundScript::GetScriptName() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, UnboundScript, GetName);
- if (obj->script()->IsScript()) {
- i::Object name = i::Script::cast(obj->script())->name();
+ if (obj->script().IsScript()) {
+ i::Object name = i::Script::cast(obj->script()).name();
return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
} else {
return Local<String>();
}
}
-
Local<Value> UnboundScript::GetSourceURL() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, UnboundScript, GetSourceURL);
- if (obj->script()->IsScript()) {
- i::Object url = i::Script::cast(obj->script())->source_url();
+ if (obj->script().IsScript()) {
+ i::Object url = i::Script::cast(obj->script()).source_url();
return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
} else {
return Local<String>();
}
}
-
Local<Value> UnboundScript::GetSourceMappingURL() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, UnboundScript, GetSourceMappingURL);
- if (obj->script()->IsScript()) {
- i::Object url = i::Script::cast(obj->script())->source_mapping_url();
+ if (obj->script().IsScript()) {
+ i::Object url = i::Script::cast(obj->script()).source_mapping_url();
return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
} else {
return Local<String>();
}
}
-
MaybeLocal<Value> Script::Run(Local<Context> context) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
@@ -2186,7 +2129,6 @@ MaybeLocal<Value> Script::Run(Local<Context> context) {
RETURN_ESCAPED(result);
}
-
Local<Value> ScriptOrModule::GetResourceName() {
i::Handle<i::Script> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
@@ -2205,8 +2147,8 @@ Local<PrimitiveArray> ScriptOrModule::GetHostDefinedOptions() {
Local<UnboundScript> Script::GetUnboundScript() {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::SharedFunctionInfo sfi = i::JSFunction::cast(*obj)->shared();
- i::Isolate* isolate = sfi->GetIsolate();
+ i::SharedFunctionInfo sfi = i::JSFunction::cast(*obj).shared();
+ i::Isolate* isolate = sfi.GetIsolate();
return ToApiHandle<UnboundScript>(i::handle(sfi, isolate));
}
@@ -2280,14 +2222,14 @@ Local<Value> Module::GetException() const {
int Module::GetModuleRequestsLength() const {
i::Handle<i::Module> self = Utils::OpenHandle(this);
- return self->info()->module_requests()->length();
+ return self->info().module_requests().length();
}
Local<String> Module::GetModuleRequest(int i) const {
CHECK_GE(i, 0);
i::Handle<i::Module> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
- i::Handle<i::FixedArray> module_requests(self->info()->module_requests(),
+ i::Handle<i::FixedArray> module_requests(self->info().module_requests(),
isolate);
CHECK_LT(i, module_requests->length());
return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
@@ -2299,7 +2241,7 @@ Location Module::GetModuleRequestLocation(int i) const {
i::HandleScope scope(isolate);
i::Handle<i::Module> self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> module_request_positions(
- self->info()->module_request_positions(), isolate);
+ self->info().module_request_positions(), isolate);
CHECK_LT(i, module_request_positions->length());
int position = i::Smi::ToInt(module_request_positions->get(i));
i::Handle<i::Script> script(self->script(), isolate);
@@ -2499,83 +2441,70 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Local<Context> v8_context, Source* source, size_t arguments_count,
Local<String> arguments[], size_t context_extension_count,
Local<Object> context_extensions[], CompileOptions options,
- NoCacheReason no_cache_reason,
- Local<ScriptOrModule>* script_or_module_out) {
- Local<Function> result;
-
- {
- PREPARE_FOR_EXECUTION(v8_context, ScriptCompiler, CompileFunctionInContext,
- Function);
- TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
-
- DCHECK(options == CompileOptions::kConsumeCodeCache ||
- options == CompileOptions::kEagerCompile ||
- options == CompileOptions::kNoCompileOptions);
-
- i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
-
- DCHECK(context->IsNativeContext());
+ NoCacheReason no_cache_reason) {
+ PREPARE_FOR_EXECUTION(v8_context, ScriptCompiler, CompileFunctionInContext,
+ Function);
+ TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
- i::Handle<i::FixedArray> arguments_list =
- isolate->factory()->NewFixedArray(static_cast<int>(arguments_count));
- for (int i = 0; i < static_cast<int>(arguments_count); i++) {
- i::Handle<i::String> argument = Utils::OpenHandle(*arguments[i]);
- if (!IsIdentifier(isolate, argument)) return Local<Function>();
- arguments_list->set(i, *argument);
- }
+ DCHECK(options == CompileOptions::kConsumeCodeCache ||
+ options == CompileOptions::kEagerCompile ||
+ options == CompileOptions::kNoCompileOptions);
- for (size_t i = 0; i < context_extension_count; ++i) {
- i::Handle<i::JSReceiver> extension =
- Utils::OpenHandle(*context_extensions[i]);
- if (!extension->IsJSObject()) return Local<Function>();
- context = isolate->factory()->NewWithContext(
- context,
- i::ScopeInfo::CreateForWithScope(
- isolate,
- context->IsNativeContext()
- ? i::Handle<i::ScopeInfo>::null()
- : i::Handle<i::ScopeInfo>(context->scope_info(), isolate)),
- extension);
- }
+ i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
- i::Compiler::ScriptDetails script_details = GetScriptDetails(
- isolate, source->resource_name, source->resource_line_offset,
- source->resource_column_offset, source->source_map_url,
- source->host_defined_options);
+ DCHECK(context->IsNativeContext());
+ i::Handle<i::SharedFunctionInfo> outer_info(
+ context->empty_function().shared(), isolate);
+
+ i::Handle<i::JSFunction> fun;
+ i::Handle<i::FixedArray> arguments_list =
+ isolate->factory()->NewFixedArray(static_cast<int>(arguments_count));
+ for (int i = 0; i < static_cast<int>(arguments_count); i++) {
+ i::Handle<i::String> argument = Utils::OpenHandle(*arguments[i]);
+ if (!IsIdentifier(isolate, argument)) return Local<Function>();
+ arguments_list->set(i, *argument);
+ }
+
+ for (size_t i = 0; i < context_extension_count; ++i) {
+ i::Handle<i::JSReceiver> extension =
+ Utils::OpenHandle(*context_extensions[i]);
+ if (!extension->IsJSObject()) return Local<Function>();
+ context = isolate->factory()->NewWithContext(
+ context,
+ i::ScopeInfo::CreateForWithScope(
+ isolate,
+ context->IsNativeContext()
+ ? i::Handle<i::ScopeInfo>::null()
+ : i::Handle<i::ScopeInfo>(context->scope_info(), isolate)),
+ extension);
+ }
- i::ScriptData* script_data = nullptr;
- if (options == kConsumeCodeCache) {
- DCHECK(source->cached_data);
- // ScriptData takes care of pointer-aligning the data.
- script_data = new i::ScriptData(source->cached_data->data,
- source->cached_data->length);
- }
+ i::Compiler::ScriptDetails script_details = GetScriptDetails(
+ isolate, source->resource_name, source->resource_line_offset,
+ source->resource_column_offset, source->source_map_url,
+ source->host_defined_options);
- i::Handle<i::JSFunction> scoped_result;
- has_pending_exception =
- !i::Compiler::GetWrappedFunction(
- Utils::OpenHandle(*source->source_string), arguments_list, context,
- script_details, source->resource_options, script_data, options,
- no_cache_reason)
- .ToHandle(&scoped_result);
- if (options == kConsumeCodeCache) {
- source->cached_data->rejected = script_data->rejected();
- }
- delete script_data;
- RETURN_ON_FAILED_EXECUTION(Function);
- result = handle_scope.Escape(Utils::CallableToLocal(scoped_result));
+ i::ScriptData* script_data = nullptr;
+ if (options == kConsumeCodeCache) {
+ DCHECK(source->cached_data);
+ // ScriptData takes care of pointer-aligning the data.
+ script_data = new i::ScriptData(source->cached_data->data,
+ source->cached_data->length);
}
- if (script_or_module_out != nullptr) {
- i::Handle<i::JSFunction> function =
- i::Handle<i::JSFunction>::cast(Utils::OpenHandle(*result));
- i::Isolate* isolate = function->GetIsolate();
- i::Handle<i::SharedFunctionInfo> shared(function->shared(), isolate);
- i::Handle<i::Script> script(i::Script::cast(shared->script()), isolate);
- *script_or_module_out = v8::Utils::ScriptOrModuleToLocal(script);
+ i::Handle<i::JSFunction> result;
+ has_pending_exception =
+ !i::Compiler::GetWrappedFunction(
+ Utils::OpenHandle(*source->source_string), arguments_list, context,
+ script_details, source->resource_options, script_data, options,
+ no_cache_reason)
+ .ToHandle(&result);
+ if (options == kConsumeCodeCache) {
+ source->cached_data->rejected = script_data->rejected();
}
-
- return result;
+ delete script_data;
+ RETURN_ON_FAILED_EXECUTION(Function);
+ RETURN_ESCAPED(Utils::CallableToLocal(result));
}
void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); }
@@ -2674,7 +2603,6 @@ MaybeLocal<Script> Script::Compile(Local<Context> context, Local<String> source,
return ScriptCompiler::Compile(context, &script_source);
}
-
// --- E x c e p t i o n s ---
v8::TryCatch::TryCatch(v8::Isolate* isolate)
@@ -2692,7 +2620,6 @@ v8::TryCatch::TryCatch(v8::Isolate* isolate)
isolate_->RegisterTryCatchHandler(this);
}
-
v8::TryCatch::~TryCatch() {
if (rethrow_) {
v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_);
@@ -2729,19 +2656,12 @@ void v8::TryCatch::operator delete[](void*, size_t) { base::OS::Abort(); }
bool v8::TryCatch::HasCaught() const {
return !i::Object(reinterpret_cast<i::Address>(exception_))
- ->IsTheHole(isolate_);
+ .IsTheHole(isolate_);
}
+bool v8::TryCatch::CanContinue() const { return can_continue_; }
-bool v8::TryCatch::CanContinue() const {
- return can_continue_;
-}
-
-
-bool v8::TryCatch::HasTerminated() const {
- return has_terminated_;
-}
-
+bool v8::TryCatch::HasTerminated() const { return has_terminated_; }
v8::Local<v8::Value> v8::TryCatch::ReThrow() {
if (!HasCaught()) return v8::Local<v8::Value>();
@@ -2749,7 +2669,6 @@ v8::Local<v8::Value> v8::TryCatch::ReThrow() {
return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate_));
}
-
v8::Local<Value> v8::TryCatch::Exception() const {
if (HasCaught()) {
// Check for out of memory exception.
@@ -2760,11 +2679,10 @@ v8::Local<Value> v8::TryCatch::Exception() const {
}
}
-
MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
if (!HasCaught()) return v8::Local<Value>();
i::Object raw_obj(reinterpret_cast<i::Address>(exception_));
- if (!raw_obj->IsJSObject()) return v8::Local<Value>();
+ if (!raw_obj.IsJSObject()) return v8::Local<Value>();
PREPARE_FOR_EXECUTION(context, TryCatch, StackTrace, Value);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
i::Handle<i::String> name = isolate->factory()->stack_string();
@@ -2779,18 +2697,16 @@ MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
RETURN_ESCAPED(result);
}
-
v8::Local<v8::Message> v8::TryCatch::Message() const {
i::Object message(reinterpret_cast<i::Address>(message_obj_));
- DCHECK(message->IsJSMessageObject() || message->IsTheHole(isolate_));
- if (HasCaught() && !message->IsTheHole(isolate_)) {
+ DCHECK(message.IsJSMessageObject() || message.IsTheHole(isolate_));
+ if (HasCaught() && !message.IsTheHole(isolate_)) {
return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
} else {
return v8::Local<v8::Message>();
}
}
-
void v8::TryCatch::Reset() {
if (!rethrow_ && HasCaught() && isolate_->has_scheduled_exception()) {
// If an exception was caught but is still scheduled because no API call
@@ -2801,28 +2717,20 @@ void v8::TryCatch::Reset() {
ResetInternal();
}
-
void v8::TryCatch::ResetInternal() {
i::Object the_hole = i::ReadOnlyRoots(isolate_).the_hole_value();
- exception_ = reinterpret_cast<void*>(the_hole->ptr());
- message_obj_ = reinterpret_cast<void*>(the_hole->ptr());
+ exception_ = reinterpret_cast<void*>(the_hole.ptr());
+ message_obj_ = reinterpret_cast<void*>(the_hole.ptr());
}
-
-void v8::TryCatch::SetVerbose(bool value) {
- is_verbose_ = value;
-}
+void v8::TryCatch::SetVerbose(bool value) { is_verbose_ = value; }
bool v8::TryCatch::IsVerbose() const { return is_verbose_; }
-void v8::TryCatch::SetCaptureMessage(bool value) {
- capture_message_ = value;
-}
-
+void v8::TryCatch::SetCaptureMessage(bool value) { capture_message_ = value; }
// --- M e s s a g e ---
-
Local<String> Message::Get() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -2846,12 +2754,10 @@ ScriptOrigin Message::GetScriptOrigin() const {
return GetScriptOriginForScript(isolate, script);
}
-
v8::Local<Value> Message::GetScriptResourceName() const {
return GetScriptOrigin().ResourceName();
}
-
v8::Local<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -2863,26 +2769,31 @@ v8::Local<v8::StackTrace> Message::GetStackTrace() const {
return scope.Escape(Utils::StackTraceToLocal(stackTrace));
}
-
Maybe<int> Message::GetLineNumber(Local<Context> context) const {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
- auto msg = i::Handle<i::JSMessageObject>::cast(self);
- return Just(msg->GetLineNumber());
+ i::JSMessageObject::EnsureSourcePositionsAvailable(isolate, self);
+ return Just(self->GetLineNumber());
}
-
int Message::GetStartPosition() const {
auto self = Utils::OpenHandle(this);
- return self->start_position();
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
+ i::JSMessageObject::EnsureSourcePositionsAvailable(isolate, self);
+ return self->GetStartPosition();
}
-
int Message::GetEndPosition() const {
auto self = Utils::OpenHandle(this);
- return self->end_position();
+ i::Isolate* isolate = self->GetIsolate();
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
+ i::JSMessageObject::EnsureSourcePositionsAvailable(isolate, self);
+ return self->GetEndPosition();
}
int Message::ErrorLevel() const {
@@ -2895,8 +2806,8 @@ int Message::GetStartColumn() const {
i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
- auto msg = i::Handle<i::JSMessageObject>::cast(self);
- return msg->GetColumnNumber();
+ i::JSMessageObject::EnsureSourcePositionsAvailable(isolate, self);
+ return self->GetColumnNumber();
}
Maybe<int> Message::GetStartColumn(Local<Context> context) const {
@@ -2908,11 +2819,11 @@ int Message::GetEndColumn() const {
i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
- auto msg = i::Handle<i::JSMessageObject>::cast(self);
- const int column_number = msg->GetColumnNumber();
+ i::JSMessageObject::EnsureSourcePositionsAvailable(isolate, self);
+ const int column_number = self->GetColumnNumber();
if (column_number == -1) return -1;
- const int start = self->start_position();
- const int end = self->end_position();
+ const int start = self->GetStartPosition();
+ const int end = self->GetEndPosition();
return column_number + (end - start);
}
@@ -2920,40 +2831,36 @@ Maybe<int> Message::GetEndColumn(Local<Context> context) const {
return Just(GetEndColumn());
}
-
bool Message::IsSharedCrossOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
return Utils::OpenHandle(this)
->script()
- ->origin_options()
+ .origin_options()
.IsSharedCrossOrigin();
}
bool Message::IsOpaque() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- return Utils::OpenHandle(this)->script()->origin_options().IsOpaque();
+ return Utils::OpenHandle(this)->script().origin_options().IsOpaque();
}
-
MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope handle_scope(reinterpret_cast<Isolate*>(isolate));
- auto msg = i::Handle<i::JSMessageObject>::cast(self);
- RETURN_ESCAPED(Utils::ToLocal(msg->GetSourceLine()));
+ i::JSMessageObject::EnsureSourcePositionsAvailable(isolate, self);
+ RETURN_ESCAPED(Utils::ToLocal(self->GetSourceLine()));
}
-
void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i_isolate->PrintCurrentStackTrace(out);
}
-
// --- S t a c k T r a c e ---
Local<StackFrame> StackTrace::GetFrame(Isolate* v8_isolate,
@@ -2970,11 +2877,9 @@ int StackTrace::GetFrameCount() const {
return Utils::OpenHandle(this)->length();
}
-
-Local<StackTrace> StackTrace::CurrentStackTrace(
- Isolate* isolate,
- int frame_limit,
- StackTraceOptions options) {
+Local<StackTrace> StackTrace::CurrentStackTrace(Isolate* isolate,
+ int frame_limit,
+ StackTraceOptions options) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::FixedArray> stackTrace =
@@ -2982,19 +2887,16 @@ Local<StackTrace> StackTrace::CurrentStackTrace(
return Utils::StackTraceToLocal(stackTrace);
}
-
// --- S t a c k F r a m e ---
int StackFrame::GetLineNumber() const {
return i::StackTraceFrame::GetLineNumber(Utils::OpenHandle(this));
}
-
int StackFrame::GetColumn() const {
return i::StackTraceFrame::GetColumnNumber(Utils::OpenHandle(this));
}
-
int StackFrame::GetScriptId() const {
return i::StackTraceFrame::GetScriptId(Utils::OpenHandle(this));
}
@@ -3009,7 +2911,6 @@ Local<String> StackFrame::GetScriptName() const {
: Local<String>();
}
-
Local<String> StackFrame::GetScriptNameOrSourceURL() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
@@ -3020,7 +2921,6 @@ Local<String> StackFrame::GetScriptNameOrSourceURL() const {
: Local<String>();
}
-
Local<String> StackFrame::GetFunctionName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
@@ -3043,6 +2943,10 @@ bool StackFrame::IsWasm() const {
return i::StackTraceFrame::IsWasm(Utils::OpenHandle(this));
}
+bool StackFrame::IsUserJavaScript() const {
+ return i::StackTraceFrame::IsUserJavaScript(Utils::OpenHandle(this));
+}
+
// --- J S O N ---
MaybeLocal<Value> JSON::Parse(Local<Context> context,
@@ -3051,9 +2955,9 @@ MaybeLocal<Value> JSON::Parse(Local<Context> context,
i::Handle<i::String> string = Utils::OpenHandle(*json_string);
i::Handle<i::String> source = i::String::Flatten(isolate, string);
i::Handle<i::Object> undefined = isolate->factory()->undefined_value();
- auto maybe = source->IsSeqOneByteString()
- ? i::JsonParser<true>::Parse(isolate, source, undefined)
- : i::JsonParser<false>::Parse(isolate, source, undefined);
+ auto maybe = source->IsOneByteRepresentation()
+ ? i::JsonParser<uint8_t>::Parse(isolate, source, undefined)
+ : i::JsonParser<uint16_t>::Parse(isolate, source, undefined);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(maybe, &result);
RETURN_ON_FAILED_EXECUTION(Value);
@@ -3332,7 +3236,6 @@ bool Value::FullIsUndefined() const {
return result;
}
-
bool Value::FullIsNull() const {
i::Handle<i::Object> object = Utils::OpenHandle(this);
bool result = object->IsNull();
@@ -3340,28 +3243,21 @@ bool Value::FullIsNull() const {
return result;
}
-
bool Value::IsTrue() const {
i::Handle<i::Object> object = Utils::OpenHandle(this);
if (object->IsSmi()) return false;
return object->IsTrue();
}
-
bool Value::IsFalse() const {
i::Handle<i::Object> object = Utils::OpenHandle(this);
if (object->IsSmi()) return false;
return object->IsFalse();
}
-
bool Value::IsFunction() const { return Utils::OpenHandle(this)->IsCallable(); }
-
-bool Value::IsName() const {
- return Utils::OpenHandle(this)->IsName();
-}
-
+bool Value::IsName() const { return Utils::OpenHandle(this)->IsName(); }
bool Value::FullIsString() const {
bool result = Utils::OpenHandle(this)->IsString();
@@ -3369,61 +3265,46 @@ bool Value::FullIsString() const {
return result;
}
+bool Value::IsSymbol() const { return Utils::OpenHandle(this)->IsSymbol(); }
-bool Value::IsSymbol() const {
- return Utils::OpenHandle(this)->IsSymbol();
-}
-
-
-bool Value::IsArray() const {
- return Utils::OpenHandle(this)->IsJSArray();
-}
-
+bool Value::IsArray() const { return Utils::OpenHandle(this)->IsJSArray(); }
bool Value::IsArrayBuffer() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsJSArrayBuffer() && !i::JSArrayBuffer::cast(*obj)->is_shared();
+ return obj->IsJSArrayBuffer() && !i::JSArrayBuffer::cast(*obj).is_shared();
}
-
bool Value::IsArrayBufferView() const {
return Utils::OpenHandle(this)->IsJSArrayBufferView();
}
-
bool Value::IsTypedArray() const {
return Utils::OpenHandle(this)->IsJSTypedArray();
}
-#define VALUE_IS_TYPED_ARRAY(Type, typeName, TYPE, ctype) \
- bool Value::Is##Type##Array() const { \
- i::Handle<i::Object> obj = Utils::OpenHandle(this); \
- return obj->IsJSTypedArray() && \
- i::JSTypedArray::cast(*obj)->type() == i::kExternal##Type##Array; \
+#define VALUE_IS_TYPED_ARRAY(Type, typeName, TYPE, ctype) \
+ bool Value::Is##Type##Array() const { \
+ i::Handle<i::Object> obj = Utils::OpenHandle(this); \
+ return obj->IsJSTypedArray() && \
+ i::JSTypedArray::cast(*obj).type() == i::kExternal##Type##Array; \
}
TYPED_ARRAYS(VALUE_IS_TYPED_ARRAY)
#undef VALUE_IS_TYPED_ARRAY
-
bool Value::IsDataView() const {
return Utils::OpenHandle(this)->IsJSDataView();
}
-
bool Value::IsSharedArrayBuffer() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->IsJSArrayBuffer() && i::JSArrayBuffer::cast(*obj)->is_shared();
+ return obj->IsJSArrayBuffer() && i::JSArrayBuffer::cast(*obj).is_shared();
}
-
bool Value::IsObject() const { return Utils::OpenHandle(this)->IsJSReceiver(); }
-
-bool Value::IsNumber() const {
- return Utils::OpenHandle(this)->IsNumber();
-}
+bool Value::IsNumber() const { return Utils::OpenHandle(this)->IsNumber(); }
bool Value::IsBigInt() const { return Utils::OpenHandle(this)->IsBigInt(); }
@@ -3450,10 +3331,7 @@ VALUE_IS_SPECIFIC_TYPE(WebAssemblyCompiledModule, WasmModuleObject)
#undef VALUE_IS_SPECIFIC_TYPE
-
-bool Value::IsBoolean() const {
- return Utils::OpenHandle(this)->IsBoolean();
-}
+bool Value::IsBoolean() const { return Utils::OpenHandle(this)->IsBoolean(); }
bool Value::IsExternal() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
@@ -3462,12 +3340,11 @@ bool Value::IsExternal() const {
// Check the instance type is JS_OBJECT (instance type of Externals) before
// attempting to get the Isolate since that guarantees the object is writable
// and GetIsolate will work.
- if (heap_obj->map()->instance_type() != i::JS_OBJECT_TYPE) return false;
- i::Isolate* isolate = i::JSObject::cast(*heap_obj)->GetIsolate();
+ if (heap_obj->map().instance_type() != i::JS_OBJECT_TYPE) return false;
+ i::Isolate* isolate = i::JSObject::cast(*heap_obj).GetIsolate();
return heap_obj->IsExternal(isolate);
}
-
bool Value::IsInt32() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return true;
@@ -3477,26 +3354,21 @@ bool Value::IsInt32() const {
return false;
}
-
bool Value::IsUint32() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return i::Smi::ToInt(*obj) >= 0;
if (obj->IsNumber()) {
double value = obj->Number();
- return !i::IsMinusZero(value) &&
- value >= 0 &&
- value <= i::kMaxUInt32 &&
- value == i::FastUI2D(i::FastD2UI(value));
+ return !i::IsMinusZero(value) && value >= 0 && value <= i::kMaxUInt32 &&
+ value == i::FastUI2D(i::FastD2UI(value));
}
return false;
}
-
bool Value::IsNativeError() const {
return Utils::OpenHandle(this)->IsJSError();
}
-
bool Value::IsRegExp() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsJSRegExp();
@@ -3506,27 +3378,24 @@ bool Value::IsAsyncFunction() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (!obj->IsJSFunction()) return false;
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(obj);
- return i::IsAsyncFunction(func->shared()->kind());
+ return i::IsAsyncFunction(func->shared().kind());
}
bool Value::IsGeneratorFunction() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (!obj->IsJSFunction()) return false;
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(obj);
- return i::IsGeneratorFunction(func->shared()->kind());
+ return i::IsGeneratorFunction(func->shared().kind());
}
-
bool Value::IsGeneratorObject() const {
return Utils::OpenHandle(this)->IsJSGeneratorObject();
}
-
bool Value::IsMapIterator() const {
return Utils::OpenHandle(this)->IsJSMapIterator();
}
-
bool Value::IsSetIterator() const {
return Utils::OpenHandle(this)->IsJSSetIterator();
}
@@ -3548,12 +3417,6 @@ MaybeLocal<String> Value::ToString(Local<Context> context) const {
RETURN_ESCAPED(result);
}
-
-Local<String> Value::ToString(Isolate* isolate) const {
- RETURN_TO_LOCAL_UNCHECKED(ToString(isolate->GetCurrentContext()), String);
-}
-
-
MaybeLocal<String> Value::ToDetailString(Local<Context> context) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsString()) return ToApiHandle<String>(obj);
@@ -3564,7 +3427,6 @@ MaybeLocal<String> Value::ToDetailString(Local<Context> context) const {
RETURN_ESCAPED(result);
}
-
MaybeLocal<Object> Value::ToObject(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsJSReceiver()) return ToApiHandle<Object>(obj);
@@ -3576,11 +3438,6 @@ MaybeLocal<Object> Value::ToObject(Local<Context> context) const {
RETURN_ESCAPED(result);
}
-
-Local<v8::Object> Value::ToObject(Isolate* isolate) const {
- RETURN_TO_LOCAL_UNCHECKED(ToObject(isolate->GetCurrentContext()), Object);
-}
-
MaybeLocal<BigInt> Value::ToBigInt(Local<Context> context) const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsBigInt()) return ToApiHandle<BigInt>(obj);
@@ -3597,18 +3454,12 @@ bool Value::BooleanValue(Isolate* v8_isolate) const {
reinterpret_cast<i::Isolate*>(v8_isolate));
}
-MaybeLocal<Boolean> Value::ToBoolean(Local<Context> context) const {
- return ToBoolean(context->GetIsolate());
-}
-
-
Local<Boolean> Value::ToBoolean(Isolate* v8_isolate) const {
auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
return ToApiHandle<Boolean>(
isolate->factory()->ToBoolean(BooleanValue(v8_isolate)));
}
-
MaybeLocal<Number> Value::ToNumber(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return ToApiHandle<Number>(obj);
@@ -3620,12 +3471,6 @@ MaybeLocal<Number> Value::ToNumber(Local<Context> context) const {
RETURN_ESCAPED(result);
}
-
-Local<Number> Value::ToNumber(Isolate* isolate) const {
- RETURN_TO_LOCAL_UNCHECKED(ToNumber(isolate->GetCurrentContext()), Number);
-}
-
-
MaybeLocal<Integer> Value::ToInteger(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return ToApiHandle<Integer>(obj);
@@ -3637,12 +3482,6 @@ MaybeLocal<Integer> Value::ToInteger(Local<Context> context) const {
RETURN_ESCAPED(result);
}
-
-Local<Integer> Value::ToInteger(Isolate* isolate) const {
- RETURN_TO_LOCAL_UNCHECKED(ToInteger(isolate->GetCurrentContext()), Integer);
-}
-
-
MaybeLocal<Int32> Value::ToInt32(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return ToApiHandle<Int32>(obj);
@@ -3654,12 +3493,6 @@ MaybeLocal<Int32> Value::ToInt32(Local<Context> context) const {
RETURN_ESCAPED(result);
}
-
-Local<Int32> Value::ToInt32(Isolate* isolate) const {
- RETURN_TO_LOCAL_UNCHECKED(ToInt32(isolate->GetCurrentContext()), Int32);
-}
-
-
MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return ToApiHandle<Uint32>(obj);
@@ -3688,84 +3521,70 @@ void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) {
"Isolate is not initialized or V8 has died");
}
-
void External::CheckCast(v8::Value* that) {
Utils::ApiCheck(that->IsExternal(), "v8::External::Cast",
"Could not convert to external");
}
-
void v8::Object::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsJSReceiver(), "v8::Object::Cast",
"Could not convert to object");
}
-
void v8::Function::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsCallable(), "v8::Function::Cast",
"Could not convert to function");
}
-
void v8::Boolean::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsBoolean(), "v8::Boolean::Cast",
"Could not convert to boolean");
}
-
void v8::Name::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsName(), "v8::Name::Cast", "Could not convert to name");
}
-
void v8::String::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsString(), "v8::String::Cast",
"Could not convert to string");
}
-
void v8::Symbol::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsSymbol(), "v8::Symbol::Cast",
"Could not convert to symbol");
}
-
void v8::Private::CheckCast(v8::Data* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsSymbol() &&
- i::Handle<i::Symbol>::cast(obj)->is_private(),
- "v8::Private::Cast",
- "Could not convert to private");
+ Utils::ApiCheck(
+ obj->IsSymbol() && i::Handle<i::Symbol>::cast(obj)->is_private(),
+ "v8::Private::Cast", "Could not convert to private");
}
-
void v8::Number::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsNumber(),
- "v8::Number::Cast()",
+ Utils::ApiCheck(obj->IsNumber(), "v8::Number::Cast()",
"Could not convert to number");
}
-
void v8::Integer::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsNumber(), "v8::Integer::Cast",
"Could not convert to number");
}
-
void v8::Int32::CheckCast(v8::Value* that) {
Utils::ApiCheck(that->IsInt32(), "v8::Int32::Cast",
"Could not convert to 32-bit signed integer");
}
-
void v8::Uint32::CheckCast(v8::Value* that) {
Utils::ApiCheck(that->IsUint32(), "v8::Uint32::Cast",
"Could not convert to 32-bit unsigned integer");
@@ -3782,31 +3601,26 @@ void v8::Array::CheckCast(Value* that) {
"Could not convert to array");
}
-
void v8::Map::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsJSMap(), "v8::Map::Cast", "Could not convert to Map");
}
-
void v8::Set::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsJSSet(), "v8_Set_Cast", "Could not convert to Set");
}
-
void v8::Promise::CheckCast(Value* that) {
Utils::ApiCheck(that->IsPromise(), "v8::Promise::Cast",
"Could not convert to promise");
}
-
void v8::Promise::Resolver::CheckCast(Value* that) {
Utils::ApiCheck(that->IsPromise(), "v8::Promise::Resolver::Cast",
"Could not convert to promise resolver");
}
-
void v8::Proxy::CheckCast(Value* that) {
Utils::ApiCheck(that->IsProxy(), "v8::Proxy::Cast",
"Could not convert to proxy");
@@ -3821,23 +3635,19 @@ void v8::WasmModuleObject::CheckCast(Value* that) {
void v8::ArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(
- obj->IsJSArrayBuffer() && !i::JSArrayBuffer::cast(*obj)->is_shared(),
+ obj->IsJSArrayBuffer() && !i::JSArrayBuffer::cast(*obj).is_shared(),
"v8::ArrayBuffer::Cast()", "Could not convert to ArrayBuffer");
}
-
void v8::ArrayBufferView::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSArrayBufferView(),
- "v8::ArrayBufferView::Cast()",
+ Utils::ApiCheck(obj->IsJSArrayBufferView(), "v8::ArrayBufferView::Cast()",
"Could not convert to ArrayBufferView");
}
-
void v8::TypedArray::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSTypedArray(),
- "v8::TypedArray::Cast()",
+ Utils::ApiCheck(obj->IsJSTypedArray(), "v8::TypedArray::Cast()",
"Could not convert to TypedArray");
}
@@ -3846,7 +3656,7 @@ void v8::TypedArray::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that); \
Utils::ApiCheck( \
obj->IsJSTypedArray() && \
- i::JSTypedArray::cast(*obj)->type() == i::kExternal##Type##Array, \
+ i::JSTypedArray::cast(*obj).type() == i::kExternal##Type##Array, \
"v8::" #Type "Array::Cast()", "Could not convert to " #Type "Array"); \
}
@@ -3854,45 +3664,38 @@ TYPED_ARRAYS(CHECK_TYPED_ARRAY_CAST)
#undef CHECK_TYPED_ARRAY_CAST
-
void v8::DataView::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSDataView(),
- "v8::DataView::Cast()",
+ Utils::ApiCheck(obj->IsJSDataView(), "v8::DataView::Cast()",
"Could not convert to DataView");
}
-
void v8::SharedArrayBuffer::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(
- obj->IsJSArrayBuffer() && i::JSArrayBuffer::cast(*obj)->is_shared(),
+ obj->IsJSArrayBuffer() && i::JSArrayBuffer::cast(*obj).is_shared(),
"v8::SharedArrayBuffer::Cast()",
"Could not convert to SharedArrayBuffer");
}
-
void v8::Date::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsJSDate(), "v8::Date::Cast()",
"Could not convert to date");
}
-
void v8::StringObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsStringWrapper(), "v8::StringObject::Cast()",
"Could not convert to StringObject");
}
-
void v8::SymbolObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsSymbolWrapper(), "v8::SymbolObject::Cast()",
"Could not convert to SymbolObject");
}
-
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsNumberWrapper(), "v8::NumberObject::Cast()",
@@ -3911,21 +3714,12 @@ void v8::BooleanObject::CheckCast(v8::Value* that) {
"Could not convert to BooleanObject");
}
-
void v8::RegExp::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- Utils::ApiCheck(obj->IsJSRegExp(),
- "v8::RegExp::Cast()",
+ Utils::ApiCheck(obj->IsJSRegExp(), "v8::RegExp::Cast()",
"Could not convert to regular expression");
}
-
-Maybe<bool> Value::BooleanValue(Local<Context> context) const {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- return Just(Utils::OpenHandle(this)->BooleanValue(isolate));
-}
-
-
Maybe<double> Value::NumberValue(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(obj->Number());
@@ -3938,7 +3732,6 @@ Maybe<double> Value::NumberValue(Local<Context> context) const {
return Just(num->Number());
}
-
Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) {
@@ -3953,7 +3746,6 @@ Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
return Just(NumberToInt64(*num));
}
-
Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(NumberToInt32(*obj));
@@ -3967,7 +3759,6 @@ Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
: static_cast<int32_t>(num->Number()));
}
-
Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) return Just(NumberToUint32(*obj));
@@ -3981,7 +3772,6 @@ Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
: static_cast<uint32_t>(num->Number()));
}
-
MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
auto self = Utils::OpenHandle(this);
if (self->IsSmi()) {
@@ -4007,7 +3797,6 @@ MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
return Local<Uint32>();
}
-
Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
auto self = Utils::OpenHandle(this);
@@ -4015,14 +3804,12 @@ Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
return i::Object::Equals(isolate, self, other);
}
-
bool Value::StrictEquals(Local<Value> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
return self->StrictEquals(*other);
}
-
bool Value::SameValue(Local<Value> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
@@ -4066,13 +3853,11 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
return Just(true);
}
-
bool v8::Object::Set(v8::Local<Value> key, v8::Local<Value> value) {
auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return Set(context, key, value).FromMaybe(false);
}
-
Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
v8::Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4086,13 +3871,11 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
return Just(true);
}
-
bool v8::Object::Set(uint32_t index, v8::Local<Value> value) {
auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
return Set(context, index, value).FromMaybe(false);
}
-
Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
v8::Local<Value> value) {
@@ -4110,7 +3893,6 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
return result;
}
-
Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
uint32_t index,
v8::Local<Value> value) {
@@ -4299,7 +4081,6 @@ Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
return Just(true);
}
-
MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
Local<Value> key) {
PREPARE_FOR_EXECUTION(context, Object, Get, Value);
@@ -4312,13 +4093,11 @@ MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
RETURN_ESCAPED(Utils::ToLocal(result));
}
-
Local<Value> v8::Object::Get(v8::Local<Value> key) {
auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Get(context, key), Value);
}
-
MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
PREPARE_FOR_EXECUTION(context, Object, Get, Value);
auto self = Utils::OpenHandle(this);
@@ -4329,19 +4108,16 @@ MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
RETURN_ESCAPED(Utils::ToLocal(result));
}
-
Local<Value> v8::Object::Get(uint32_t index) {
auto context = ContextFromNeverReadOnlySpaceObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(Get(context, index), Value);
}
-
MaybeLocal<Value> v8::Object::GetPrivate(Local<Context> context,
Local<Private> key) {
return Get(context, Local<Value>(reinterpret_cast<Value*>(*key)));
}
-
Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
Local<Context> context, Local<Value> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4364,7 +4140,6 @@ Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
return Just(static_cast<PropertyAttribute>(result.FromJust()));
}
-
MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
Local<Name> key) {
PREPARE_FOR_EXECUTION(context, Object, GetOwnPropertyDescriptor, Value);
@@ -4382,7 +4157,6 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
RETURN_ESCAPED(Utils::ToLocal(desc.ToObject(isolate)));
}
-
Local<Value> v8::Object::GetPrototype() {
auto isolate = Utils::OpenHandle(this)->GetIsolate();
auto self = Utils::OpenHandle(this);
@@ -4390,7 +4164,6 @@ Local<Value> v8::Object::GetPrototype() {
return Utils::ToLocal(i::PrototypeIterator::GetCurrent(iter));
}
-
Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4408,17 +4181,16 @@ Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
return Just(true);
}
-
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Local<FunctionTemplate> tmpl) {
auto self = Utils::OpenHandle(this);
auto isolate = self->GetIsolate();
i::PrototypeIterator iter(isolate, *self, i::kStartAtReceiver);
auto tmpl_info = *Utils::OpenHandle(*tmpl);
- while (!tmpl_info->IsTemplateFor(iter.GetCurrent<i::JSObject>())) {
+ while (!tmpl_info.IsTemplateFor(iter.GetCurrent<i::JSObject>())) {
iter.Advance();
if (iter.IsAtEnd()) return Local<Object>();
- if (!iter.GetCurrent()->IsJSObject()) return Local<Object>();
+ if (!iter.GetCurrent().IsJSObject()) return Local<Object>();
}
// IsTemplateFor() ensures that iter.GetCurrent() can't be a Proxy here.
return Utils::ToLocal(i::handle(iter.GetCurrent<i::JSObject>(), isolate));
@@ -4446,9 +4218,9 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(
RETURN_ON_FAILED_EXECUTION(Array);
value =
accumulator.GetKeys(static_cast<i::GetKeysConversion>(key_conversion));
- DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
- self->map()->EnumLength() == 0 ||
- self->map()->instance_descriptors()->enum_cache()->keys() != *value);
+ DCHECK(self->map().EnumLength() == i::kInvalidEnumCacheSentinel ||
+ self->map().EnumLength() == 0 ||
+ self->map().instance_descriptors().enum_cache().keys() != *value);
auto result = isolate->factory()->NewJSArrayWithElements(value);
RETURN_ESCAPED(Utils::ToLocal(result));
}
@@ -4469,15 +4241,14 @@ MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, Object, ObjectProtoToString, String);
auto self = Utils::OpenHandle(this);
Local<Value> result;
- has_pending_exception =
- !ToLocal<Value>(i::Execution::Call(isolate, isolate->object_to_string(),
- self, 0, nullptr),
- &result);
+ has_pending_exception = !ToLocal<Value>(
+ i::Execution::CallBuiltin(isolate, isolate->object_to_string(), self, 0,
+ nullptr),
+ &result);
RETURN_ON_FAILED_EXECUTION(String);
RETURN_ESCAPED(Local<String>::Cast(result));
}
-
Local<String> v8::Object::GetConstructorName() {
auto self = Utils::OpenHandle(this);
i::Handle<i::String> name = i::JSReceiver::GetConstructorName(self);
@@ -4561,12 +4332,10 @@ Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
return maybe;
}
-
Maybe<bool> v8::Object::HasPrivate(Local<Context> context, Local<Private> key) {
return HasOwnProperty(context, Local<Name>(reinterpret_cast<Name*>(*key)));
}
-
Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Object, Delete, Nothing<bool>(), i::HandleScope);
@@ -4577,7 +4346,6 @@ Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
return result;
}
-
Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Object, Has, Nothing<bool>(), i::HandleScope);
@@ -4637,7 +4405,6 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
getter_side_effect_type, setter_side_effect_type);
}
-
void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
Local<Function> setter,
PropertyAttribute attribute,
@@ -4718,7 +4485,6 @@ Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
return result;
}
-
Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
uint32_t index) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4754,14 +4520,12 @@ bool v8::Object::HasNamedLookupInterceptor() {
i::Handle<i::JSObject>::cast(self)->HasNamedInterceptor();
}
-
bool v8::Object::HasIndexedLookupInterceptor() {
auto self = Utils::OpenHandle(this);
return self->IsJSObject() &&
i::Handle<i::JSObject>::cast(self)->HasIndexedInterceptor();
}
-
MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Local<Context> context, Local<Name> key) {
PREPARE_FOR_EXECUTION(context, Object, GetRealNamedPropertyInPrototypeChain,
@@ -4783,7 +4547,6 @@ MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
RETURN_ESCAPED(result);
}
-
Maybe<PropertyAttribute>
v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
Local<Context> context, Local<Name> key) {
@@ -4809,7 +4572,6 @@ v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
return Just(static_cast<PropertyAttribute>(result.FromJust()));
}
-
MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
Local<Name> key) {
PREPARE_FOR_EXECUTION(context, Object, GetRealNamedProperty, Value);
@@ -4825,7 +4587,6 @@ MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
RETURN_ESCAPED(result);
}
-
Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
Local<Context> context, Local<Name> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4846,7 +4607,6 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
static_cast<PropertyAttribute>(result.FromJust()));
}
-
Local<v8::Object> v8::Object::Clone() {
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto isolate = self->GetIsolate();
@@ -4856,23 +4616,20 @@ Local<v8::Object> v8::Object::Clone() {
return Utils::ToLocal(result);
}
-
Local<v8::Context> v8::Object::CreationContext() {
auto self = Utils::OpenHandle(this);
i::Handle<i::Context> context = self->GetCreationContext();
return Utils::ToLocal(context);
}
-
int v8::Object::GetIdentityHash() {
i::DisallowHeapAllocation no_gc;
auto isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope scope(isolate);
auto self = Utils::OpenHandle(this);
- return self->GetOrCreateIdentityHash(isolate)->value();
+ return self->GetOrCreateIdentityHash(isolate).value();
}
-
bool v8::Object::IsCallable() {
auto self = Utils::OpenHandle(this);
return self->IsCallable();
@@ -4902,7 +4659,6 @@ MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
RETURN_ESCAPED(result);
}
-
MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
Local<Value> argv[]) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -4955,13 +4711,13 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
isolate->debug_execution_mode() == i::DebugInfo::kSideEffects;
if (should_set_has_no_side_effect) {
CHECK(self->IsJSFunction() &&
- i::JSFunction::cast(*self)->shared()->IsApiFunction());
+ i::JSFunction::cast(*self).shared().IsApiFunction());
i::Object obj =
- i::JSFunction::cast(*self)->shared()->get_api_func_data()->call_code();
- if (obj->IsCallHandlerInfo()) {
+ i::JSFunction::cast(*self).shared().get_api_func_data().call_code();
+ if (obj.IsCallHandlerInfo()) {
i::CallHandlerInfo handler_info = i::CallHandlerInfo::cast(obj);
- if (!handler_info->IsSideEffectFreeCallHandlerInfo()) {
- handler_info->SetNextCallHasNoSideEffect();
+ if (!handler_info.IsSideEffectFreeCallHandlerInfo()) {
+ handler_info.SetNextCallHasNoSideEffect();
}
}
}
@@ -4971,15 +4727,15 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
i::Execution::New(isolate, self, self, argc, args), &result);
if (should_set_has_no_side_effect) {
i::Object obj =
- i::JSFunction::cast(*self)->shared()->get_api_func_data()->call_code();
- if (obj->IsCallHandlerInfo()) {
+ i::JSFunction::cast(*self).shared().get_api_func_data().call_code();
+ if (obj.IsCallHandlerInfo()) {
i::CallHandlerInfo handler_info = i::CallHandlerInfo::cast(obj);
if (has_pending_exception) {
// Restore the map if an exception prevented restoration.
- handler_info->NextCallHasNoSideEffect();
+ handler_info.NextCallHasNoSideEffect();
} else {
- DCHECK(handler_info->IsSideEffectCallHandlerInfo() ||
- handler_info->IsSideEffectFreeCallHandlerInfo());
+ DCHECK(handler_info.IsSideEffectCallHandlerInfo() ||
+ handler_info.IsSideEffectFreeCallHandlerInfo());
}
}
}
@@ -4987,7 +4743,6 @@ MaybeLocal<Object> Function::NewInstanceWithSideEffectType(
RETURN_ESCAPED(result);
}
-
MaybeLocal<v8::Value> Function::Call(Local<Context> context,
v8::Local<v8::Value> recv, int argc,
v8::Local<v8::Value> argv[]) {
@@ -5013,10 +4768,9 @@ void Function::SetName(v8::Local<v8::String> name) {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) return;
auto func = i::Handle<i::JSFunction>::cast(self);
- func->shared()->SetName(*Utils::OpenHandle(*name));
+ func->shared().SetName(*Utils::OpenHandle(*name));
}
-
Local<Value> Function::GetName() const {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
@@ -5030,12 +4784,11 @@ Local<Value> Function::GetName() const {
}
if (self->IsJSFunction()) {
auto func = i::Handle<i::JSFunction>::cast(self);
- return Utils::ToLocal(handle(func->shared()->Name(), isolate));
+ return Utils::ToLocal(handle(func->shared().Name(), isolate));
}
return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
-
Local<Value> Function::GetInferredName() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
@@ -5043,11 +4796,10 @@ Local<Value> Function::GetInferredName() const {
self->GetIsolate()->factory()->undefined_value());
}
auto func = i::Handle<i::JSFunction>::cast(self);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(),
- func->GetIsolate()));
+ return Utils::ToLocal(
+ i::Handle<i::Object>(func->shared().inferred_name(), func->GetIsolate()));
}
-
Local<Value> Function::GetDebugName() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
@@ -5059,7 +4811,6 @@ Local<Value> Function::GetDebugName() const {
return Utils::ToLocal(i::Handle<i::Object>(*name, self->GetIsolate()));
}
-
Local<Value> Function::GetDisplayName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@@ -5079,70 +4830,64 @@ Local<Value> Function::GetDisplayName() const {
return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
-
ScriptOrigin Function::GetScriptOrigin() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
return v8::ScriptOrigin(Local<Value>());
}
auto func = i::Handle<i::JSFunction>::cast(self);
- if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()),
+ if (func->shared().script().IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(func->shared().script()),
func->GetIsolate());
return GetScriptOriginForScript(func->GetIsolate(), script);
}
return v8::ScriptOrigin(Local<Value>());
}
-
const int Function::kLineOffsetNotFound = -1;
-
int Function::GetScriptLineNumber() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
return kLineOffsetNotFound;
}
auto func = i::Handle<i::JSFunction>::cast(self);
- if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()),
+ if (func->shared().script().IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(func->shared().script()),
func->GetIsolate());
- return i::Script::GetLineNumber(script, func->shared()->StartPosition());
+ return i::Script::GetLineNumber(script, func->shared().StartPosition());
}
return kLineOffsetNotFound;
}
-
int Function::GetScriptColumnNumber() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
return kLineOffsetNotFound;
}
auto func = i::Handle<i::JSFunction>::cast(self);
- if (func->shared()->script()->IsScript()) {
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()),
+ if (func->shared().script().IsScript()) {
+ i::Handle<i::Script> script(i::Script::cast(func->shared().script()),
func->GetIsolate());
- return i::Script::GetColumnNumber(script, func->shared()->StartPosition());
+ return i::Script::GetColumnNumber(script, func->shared().StartPosition());
}
return kLineOffsetNotFound;
}
-
int Function::ScriptId() const {
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
return v8::UnboundScript::kNoScriptId;
}
auto func = i::Handle<i::JSFunction>::cast(self);
- if (!func->shared()->script()->IsScript()) {
+ if (!func->shared().script().IsScript()) {
return v8::UnboundScript::kNoScriptId;
}
- i::Handle<i::Script> script(i::Script::cast(func->shared()->script()),
+ i::Handle<i::Script> script(i::Script::cast(func->shared().script()),
func->GetIsolate());
return script->id();
}
-
Local<v8::Value> Function::GetBoundFunction() const {
auto self = Utils::OpenHandle(this);
if (self->IsJSBoundFunction()) {
@@ -5159,25 +4904,25 @@ int Name::GetIdentityHash() {
return static_cast<int>(self->Hash());
}
-
int String::Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
return str->length();
}
-
bool String::IsOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
return str->IsOneByteRepresentation();
}
-
// Helpers for ContainsOnlyOneByteHelper
-template<size_t size> struct OneByteMask;
-template<> struct OneByteMask<4> {
+template <size_t size>
+struct OneByteMask;
+template <>
+struct OneByteMask<4> {
static const uint32_t value = 0xFF00FF00;
};
-template<> struct OneByteMask<8> {
+template <>
+struct OneByteMask<8> {
static const uint64_t value = V8_2PART_UINT64_C(0xFF00FF00, FF00FF00);
};
static const uintptr_t kOneByteMask = OneByteMask<sizeof(uintptr_t)>::value;
@@ -5186,10 +4931,9 @@ static inline bool Unaligned(const uint16_t* chars) {
return reinterpret_cast<const uintptr_t>(chars) & kAlignmentMask;
}
-
static inline const uint16_t* Align(const uint16_t* chars) {
- return reinterpret_cast<uint16_t*>(
- reinterpret_cast<uintptr_t>(chars) & ~kAlignmentMask);
+ return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(chars) &
+ ~kAlignmentMask);
}
class ContainsOnlyOneByteHelper {
@@ -5214,9 +4958,9 @@ class ContainsOnlyOneByteHelper {
// Read word aligned in blocks,
// checking the return value at the end of each block.
const uint16_t* aligned_end = Align(end);
- const int increment = sizeof(uintptr_t)/sizeof(uint16_t);
+ const int increment = sizeof(uintptr_t) / sizeof(uint16_t);
const int inner_loops = 16;
- while (chars + inner_loops*increment < aligned_end) {
+ while (chars + inner_loops * increment < aligned_end) {
for (int i = 0; i < inner_loops; i++) {
acc |= *reinterpret_cast<const uintptr_t*>(chars);
chars += increment;
@@ -5239,16 +4983,16 @@ class ContainsOnlyOneByteHelper {
bool CheckCons(i::ConsString cons_string) {
while (true) {
// Check left side if flat.
- i::String left = cons_string->first();
+ i::String left = cons_string.first();
i::ConsString left_as_cons = i::String::VisitFlat(this, left, 0);
if (!is_one_byte_) return false;
// Check right side if flat.
- i::String right = cons_string->second();
+ i::String right = cons_string.second();
i::ConsString right_as_cons = i::String::VisitFlat(this, right, 0);
if (!is_one_byte_) return false;
// Standard recurse/iterate trick.
if (!left_as_cons.is_null() && !right_as_cons.is_null()) {
- if (left->length() < right->length()) {
+ if (left.length() < right.length()) {
CheckCons(left_as_cons);
cons_string = right_as_cons;
} else {
@@ -5278,7 +5022,6 @@ class ContainsOnlyOneByteHelper {
DISALLOW_COPY_AND_ASSIGN(ContainsOnlyOneByteHelper);
};
-
bool String::ContainsOnlyOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (str->IsOneByteRepresentation()) return true;
@@ -5327,7 +5070,7 @@ static int WriteUtf8Impl(i::Vector<const Char> string, char* write_start,
bool write_null = !(options & v8::String::NO_NULL_TERMINATION);
bool replace_invalid_utf8 = (options & v8::String::REPLACE_INVALID_UTF8);
char* current_write = write_start;
- const Char* read_start = string.start();
+ const Char* read_start = string.begin();
int read_index = 0;
int read_length = string.length();
int prev_char = unibrow::Utf16::kNoPreviousCharacter;
@@ -5466,8 +5209,7 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string,
i::Handle<i::String> str = Utils::OpenHandle(string);
str = i::String::Flatten(isolate, str);
int end = start + length;
- if ((length == -1) || (length > str->length() - start) )
- end = str->length();
+ if ((length == -1) || (length > str->length() - start)) end = str->length();
if (end < 0) return 0;
i::String::WriteToFlat(*str, buffer, start, end);
if (!(options & String::NO_NULL_TERMINATION) &&
@@ -5477,45 +5219,40 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string,
return end - start;
}
-
int String::WriteOneByte(Isolate* isolate, uint8_t* buffer, int start,
int length, int options) const {
return WriteHelper(reinterpret_cast<i::Isolate*>(isolate), this, buffer,
start, length, options);
}
-
int String::Write(Isolate* isolate, uint16_t* buffer, int start, int length,
int options) const {
return WriteHelper(reinterpret_cast<i::Isolate*>(isolate), this, buffer,
start, length, options);
}
-
bool v8::String::IsExternal() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
return i::StringShape(*str).IsExternalTwoByte();
}
-
bool v8::String::IsExternalOneByte() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
return i::StringShape(*str).IsExternalOneByte();
}
-
void v8::String::VerifyExternalStringResource(
v8::String::ExternalStringResource* value) const {
i::DisallowHeapAllocation no_allocation;
i::String str = *Utils::OpenHandle(this);
const v8::String::ExternalStringResource* expected;
- if (str->IsThinString()) {
- str = i::ThinString::cast(str)->actual();
+ if (str.IsThinString()) {
+ str = i::ThinString::cast(str).actual();
}
if (i::StringShape(str).IsExternalTwoByte()) {
- const void* resource = i::ExternalTwoByteString::cast(str)->resource();
+ const void* resource = i::ExternalTwoByteString::cast(str).resource();
expected = reinterpret_cast<const ExternalStringResource*>(resource);
} else {
expected = nullptr;
@@ -5530,22 +5267,22 @@ void v8::String::VerifyExternalStringResourceBase(
const v8::String::ExternalStringResourceBase* expected;
Encoding expectedEncoding;
- if (str->IsThinString()) {
- str = i::ThinString::cast(str)->actual();
+ if (str.IsThinString()) {
+ str = i::ThinString::cast(str).actual();
}
if (i::StringShape(str).IsExternalOneByte()) {
- const void* resource = i::ExternalOneByteString::cast(str)->resource();
+ const void* resource = i::ExternalOneByteString::cast(str).resource();
expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
expectedEncoding = ONE_BYTE_ENCODING;
} else if (i::StringShape(str).IsExternalTwoByte()) {
- const void* resource = i::ExternalTwoByteString::cast(str)->resource();
+ const void* resource = i::ExternalTwoByteString::cast(str).resource();
expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
expectedEncoding = TWO_BYTE_ENCODING;
} else {
expected = nullptr;
expectedEncoding =
- str->IsOneByteRepresentation() ? ONE_BYTE_ENCODING : TWO_BYTE_ENCODING;
+ str.IsOneByteRepresentation() ? ONE_BYTE_ENCODING : TWO_BYTE_ENCODING;
}
CHECK_EQ(expected, value);
CHECK_EQ(expectedEncoding, encoding);
@@ -5553,11 +5290,11 @@ void v8::String::VerifyExternalStringResourceBase(
String::ExternalStringResource* String::GetExternalStringResourceSlow() const {
i::DisallowHeapAllocation no_allocation;
- typedef internal::Internals I;
+ using I = internal::Internals;
i::String str = *Utils::OpenHandle(this);
- if (str->IsThinString()) {
- str = i::ThinString::cast(str)->actual();
+ if (str.IsThinString()) {
+ str = i::ThinString::cast(str).actual();
}
if (i::StringShape(str).IsExternalTwoByte()) {
@@ -5570,12 +5307,12 @@ String::ExternalStringResource* String::GetExternalStringResourceSlow() const {
String::ExternalStringResourceBase* String::GetExternalStringResourceBaseSlow(
String::Encoding* encoding_out) const {
i::DisallowHeapAllocation no_allocation;
- typedef internal::Internals I;
+ using I = internal::Internals;
ExternalStringResourceBase* resource = nullptr;
i::String str = *Utils::OpenHandle(this);
- if (str->IsThinString()) {
- str = i::ThinString::cast(str)->actual();
+ if (str.IsThinString()) {
+ str = i::ThinString::cast(str).actual();
}
internal::Address string = str.ptr();
@@ -5594,17 +5331,16 @@ v8::String::GetExternalOneByteStringResource() const {
i::DisallowHeapAllocation no_allocation;
i::String str = *Utils::OpenHandle(this);
if (i::StringShape(str).IsExternalOneByte()) {
- return i::ExternalOneByteString::cast(str)->resource();
- } else if (str->IsThinString()) {
- str = i::ThinString::cast(str)->actual();
+ return i::ExternalOneByteString::cast(str).resource();
+ } else if (str.IsThinString()) {
+ str = i::ThinString::cast(str).actual();
if (i::StringShape(str).IsExternalOneByte()) {
- return i::ExternalOneByteString::cast(str)->resource();
+ return i::ExternalOneByteString::cast(str).resource();
}
}
return nullptr;
}
-
Local<Value> Symbol::Name() const {
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
@@ -5630,24 +5366,20 @@ Local<Value> Symbol::Name() const {
return Utils::ToLocal(name);
}
-
Local<Value> Private::Name() const {
return reinterpret_cast<const Symbol*>(this)->Name();
}
-
double Number::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->Number();
}
-
bool Boolean::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->IsTrue();
}
-
int64_t Integer::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
@@ -5657,7 +5389,6 @@ int64_t Integer::Value() const {
}
}
-
int32_t Int32::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
@@ -5667,7 +5398,6 @@ int32_t Int32::Value() const {
}
}
-
uint32_t Uint32::Value() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsSmi()) {
@@ -5695,7 +5425,7 @@ Local<Value> v8::Object::SlowGetInternalField(int index) {
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
const char* location = "v8::Object::GetInternalField()";
if (!InternalFieldOK(obj, index, location)) return Local<Value>();
- i::Handle<i::Object> value(i::JSObject::cast(*obj)->GetEmbedderField(index),
+ i::Handle<i::Object> value(i::JSObject::cast(*obj).GetEmbedderField(index),
obj->GetIsolate());
return Utils::ToLocal(value);
}
@@ -5735,7 +5465,7 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
const char* location = "v8::Object::SetAlignedPointerInInternalFields()";
i::DisallowHeapAllocation no_gc;
i::JSObject js_obj = i::JSObject::cast(*obj);
- int nof_embedder_fields = js_obj->GetEmbedderFieldCount();
+ int nof_embedder_fields = js_obj.GetEmbedderFieldCount();
for (int i = 0; i < argc; i++) {
int index = indices[i];
if (!Utils::ApiCheck(index < nof_embedder_fields, location,
@@ -5752,25 +5482,20 @@ void v8::Object::SetAlignedPointerInInternalFields(int argc, int indices[],
static void* ExternalValue(i::Object obj) {
// Obscure semantics for undefined, but somehow checked in our unit tests...
- if (obj->IsUndefined()) {
+ if (obj.IsUndefined()) {
return nullptr;
}
- i::Object foreign = i::JSObject::cast(obj)->GetEmbedderField(0);
- return reinterpret_cast<void*>(i::Foreign::cast(foreign)->foreign_address());
+ i::Object foreign = i::JSObject::cast(obj).GetEmbedderField(0);
+ return reinterpret_cast<void*>(i::Foreign::cast(foreign).foreign_address());
}
// --- E n v i r o n m e n t ---
-
void v8::V8::InitializePlatform(Platform* platform) {
i::V8::InitializePlatform(platform);
}
-
-void v8::V8::ShutdownPlatform() {
- i::V8::ShutdownPlatform();
-}
-
+void v8::V8::ShutdownPlatform() { i::V8::ShutdownPlatform(); }
bool v8::V8::Initialize() {
i::V8::Initialize();
@@ -5825,7 +5550,6 @@ void v8::V8::SetEntropySource(EntropySource entropy_source) {
base::RandomNumberGenerator::SetEntropySource(entropy_source);
}
-
void v8::V8::SetReturnAddressLocationResolver(
ReturnAddressLocationResolver return_address_resolver) {
i::StackFrame::SetReturnAddressLocationResolver(return_address_resolver);
@@ -5884,16 +5608,12 @@ void v8::V8::InitializeExternalStartupData(const char* directory_path) {
i::InitializeExternalStartupData(directory_path);
}
-
void v8::V8::InitializeExternalStartupData(const char* natives_blob,
const char* snapshot_blob) {
i::InitializeExternalStartupData(natives_blob, snapshot_blob);
}
-
-const char* v8::V8::GetVersion() {
- return i::Version::GetVersion();
-}
+const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
template <typename ObjectType>
struct InvokeBootstrapper;
@@ -5953,8 +5673,8 @@ static i::Handle<ObjectType> CreateEnvironment(
global_constructor = EnsureConstructor(isolate, *global_template);
// Create a fresh template for the global proxy object.
- proxy_template = ObjectTemplate::New(
- reinterpret_cast<v8::Isolate*>(isolate));
+ proxy_template =
+ ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate));
proxy_constructor = EnsureConstructor(isolate, *proxy_template);
// Set the global template to be the prototype template of
@@ -5968,7 +5688,7 @@ static i::Handle<ObjectType> CreateEnvironment(
// Migrate security handlers from global_template to
// proxy_template. Temporarily removing access check
// information from the global template.
- if (!global_constructor->GetAccessCheckInfo()->IsUndefined(isolate)) {
+ if (!global_constructor->GetAccessCheckInfo().IsUndefined(isolate)) {
i::FunctionTemplateInfo::SetAccessCheckInfo(
isolate, proxy_constructor,
i::handle(global_constructor->GetAccessCheckInfo(), isolate));
@@ -5984,15 +5704,14 @@ static i::Handle<ObjectType> CreateEnvironment(
// interceptors, we need to replace them temporarily with noop
// interceptors, so the map is correctly marked as having interceptors,
// but we don't invoke any.
- if (!global_constructor->GetNamedPropertyHandler()->IsUndefined(
- isolate)) {
+ if (!global_constructor->GetNamedPropertyHandler().IsUndefined(isolate)) {
named_interceptor =
handle(global_constructor->GetNamedPropertyHandler(), isolate);
i::FunctionTemplateInfo::SetNamedPropertyHandler(
isolate, global_constructor,
i::ReadOnlyRoots(isolate).noop_interceptor_info_handle());
}
- if (!global_constructor->GetIndexedPropertyHandler()->IsUndefined(
+ if (!global_constructor->GetIndexedPropertyHandler().IsUndefined(
isolate)) {
indexed_interceptor =
handle(global_constructor->GetIndexedPropertyHandler(), isolate);
@@ -6043,7 +5762,7 @@ Local<Context> NewContext(
// TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't
// fail.
// Sanity-check that the isolate is initialized and usable.
- CHECK(isolate->builtins()->builtin(i::Builtins::kIllegal)->IsCode());
+ CHECK(isolate->builtins()->builtin(i::Builtins::kIllegal).IsCode());
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext");
LOG_API(isolate, Context, New);
@@ -6120,13 +5839,11 @@ void v8::Context::SetSecurityToken(Local<Value> token) {
env->set_security_token(*token_handle);
}
-
void v8::Context::UseDefaultSecurityToken() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
env->set_security_token(env->global_object());
}
-
Local<Value> v8::Context::GetSecurityToken() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
@@ -6135,7 +5852,6 @@ Local<Value> v8::Context::GetSecurityToken() {
return Utils::ToLocal(token_handle);
}
-
v8::Isolate* Context::GetIsolate() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
return reinterpret_cast<Isolate*>(env->GetIsolate());
@@ -6147,14 +5863,13 @@ v8::Local<v8::Object> Context::Global() {
i::Handle<i::Object> global(context->global_proxy(), isolate);
// TODO(dcarney): This should always return the global proxy
// but can't presently as calls to GetProtoype will return the wrong result.
- if (i::Handle<i::JSGlobalProxy>::cast(
- global)->IsDetachedFrom(context->global_object())) {
+ if (i::Handle<i::JSGlobalProxy>::cast(global)->IsDetachedFrom(
+ context->global_object())) {
global = i::Handle<i::Object>(context->global_object(), isolate);
}
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
-
void Context::DetachGlobal() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -6162,7 +5877,6 @@ void Context::DetachGlobal() {
isolate->bootstrapper()->DetachGlobal(context);
}
-
Local<v8::Object> Context::GetExtrasBindingObject() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -6170,7 +5884,6 @@ Local<v8::Object> Context::GetExtrasBindingObject() {
return Utils::ToLocal(binding);
}
-
void Context::AllowCodeGenerationFromStrings(bool allow) {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -6180,14 +5893,11 @@ void Context::AllowCodeGenerationFromStrings(bool allow) {
: i::ReadOnlyRoots(isolate).false_value());
}
-
bool Context::IsCodeGenerationFromStringsAllowed() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
- return !context->allow_code_gen_from_strings()->IsFalse(
- context->GetIsolate());
+ return !context->allow_code_gen_from_strings().IsFalse(context->GetIsolate());
}
-
void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Handle<i::String> error_handle = Utils::OpenHandle(*error);
@@ -6197,17 +5907,17 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(Local<String> error) {
namespace {
i::Address* GetSerializedDataFromFixedArray(i::Isolate* isolate,
i::FixedArray list, size_t index) {
- if (index < static_cast<size_t>(list->length())) {
+ if (index < static_cast<size_t>(list.length())) {
int int_index = static_cast<int>(index);
- i::Object object = list->get(int_index);
- if (!object->IsTheHole(isolate)) {
- list->set_the_hole(isolate, int_index);
+ i::Object object = list.get(int_index);
+ if (!object.IsTheHole(isolate)) {
+ list.set_the_hole(isolate, int_index);
// Shrink the list so that the last element is not the hole (unless it's
// the first element, because we don't want to end up with a non-canonical
// empty FixedArray).
- int last = list->length() - 1;
- while (last >= 0 && list->is_the_hole(isolate, last)) last--;
- if (last != -1) list->Shrink(isolate, last + 1);
+ int last = list.length() - 1;
+ while (last >= 0 && list.is_the_hole(isolate, last)) last--;
+ if (last != -1) list.Shrink(isolate, last + 1);
return i::Handle<i::Object>(object, isolate).location();
}
}
@@ -6303,7 +6013,7 @@ bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
// If it's a global proxy, then test with the global object. Note that the
// inner global object may not necessarily be a JSGlobalObject.
i::PrototypeIterator iter(self->GetIsolate(),
- i::JSObject::cast(*obj)->map());
+ i::JSObject::cast(*obj).map());
// The global proxy should always have a prototype, as it is a bug to call
// this on a detached JSGlobalProxy.
DCHECK(!iter.IsAtEnd());
@@ -6312,7 +6022,6 @@ bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
return false;
}
-
Local<External> v8::External::New(Isolate* isolate, void* value) {
STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -6322,37 +6031,35 @@ Local<External> v8::External::New(Isolate* isolate, void* value) {
return Utils::ExternalToLocal(external);
}
-
void* External::Value() const {
return ExternalValue(*Utils::OpenHandle(this));
}
-
// anonymous namespace for string creation helper functions
namespace {
inline int StringLength(const char* string) {
- return i::StrLength(string);
+ size_t len = strlen(string);
+ CHECK_GE(i::kMaxInt, len);
+ return static_cast<int>(len);
}
-
inline int StringLength(const uint8_t* string) {
- return i::StrLength(reinterpret_cast<const char*>(string));
+ return StringLength(reinterpret_cast<const char*>(string));
}
-
inline int StringLength(const uint16_t* string) {
- int length = 0;
- while (string[length] != '\0')
- length++;
- return length;
+ size_t length = 0;
+ while (string[length] != '\0') length++;
+ CHECK_GE(i::kMaxInt, length);
+ return static_cast<int>(length);
}
V8_WARN_UNUSED_RESULT
inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
- v8::NewStringType type,
+ NewStringType type,
i::Vector<const char> string) {
- if (type == v8::NewStringType::kInternalized) {
+ if (type == NewStringType::kInternalized) {
return factory->InternalizeUtf8String(string);
}
return factory->NewStringFromUtf8(string);
@@ -6360,25 +6067,24 @@ inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
V8_WARN_UNUSED_RESULT
inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
- v8::NewStringType type,
+ NewStringType type,
i::Vector<const uint8_t> string) {
- if (type == v8::NewStringType::kInternalized) {
- return factory->InternalizeOneByteString(string);
+ if (type == NewStringType::kInternalized) {
+ return factory->InternalizeString(string);
}
return factory->NewStringFromOneByte(string);
}
V8_WARN_UNUSED_RESULT
inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
- v8::NewStringType type,
+ NewStringType type,
i::Vector<const uint16_t> string) {
- if (type == v8::NewStringType::kInternalized) {
- return factory->InternalizeTwoByteString(string);
+ if (type == NewStringType::kInternalized) {
+ return factory->InternalizeString(string);
}
return factory->NewStringFromTwoByte(string);
}
-
STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength);
} // anonymous namespace
@@ -6403,43 +6109,21 @@ STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength);
result = Utils::ToLocal(handle_result); \
}
-Local<String> String::NewFromUtf8(Isolate* isolate,
- const char* data,
- NewStringType type,
- int length) {
- NEW_STRING(isolate, String, NewFromUtf8, char, data,
- static_cast<v8::NewStringType>(type), length);
- RETURN_TO_LOCAL_UNCHECKED(result, String);
-}
-
-
MaybeLocal<String> String::NewFromUtf8(Isolate* isolate, const char* data,
- v8::NewStringType type, int length) {
+ NewStringType type, int length) {
NEW_STRING(isolate, String, NewFromUtf8, char, data, type, length);
return result;
}
-
MaybeLocal<String> String::NewFromOneByte(Isolate* isolate, const uint8_t* data,
- v8::NewStringType type, int length) {
+ NewStringType type, int length) {
NEW_STRING(isolate, String, NewFromOneByte, uint8_t, data, type, length);
return result;
}
-
-Local<String> String::NewFromTwoByte(Isolate* isolate,
- const uint16_t* data,
- NewStringType type,
- int length) {
- NEW_STRING(isolate, String, NewFromTwoByte, uint16_t, data,
- static_cast<v8::NewStringType>(type), length);
- RETURN_TO_LOCAL_UNCHECKED(result, String);
-}
-
-
MaybeLocal<String> String::NewFromTwoByte(Isolate* isolate,
const uint16_t* data,
- v8::NewStringType type, int length) {
+ NewStringType type, int length) {
NEW_STRING(isolate, String, NewFromTwoByte, uint16_t, data, type, length);
return result;
}
@@ -6456,8 +6140,9 @@ Local<String> v8::String::Concat(Isolate* v8_isolate, Local<String> left,
if (left_string->length() + right_string->length() > i::String::kMaxLength) {
return Local<String>();
}
- i::Handle<i::String> result = isolate->factory()->NewConsString(
- left_string, right_string).ToHandleChecked();
+ i::Handle<i::String> result = isolate->factory()
+ ->NewConsString(left_string, right_string)
+ .ToHandleChecked();
return Utils::ToLocal(result);
}
@@ -6483,7 +6168,6 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
}
}
-
MaybeLocal<String> v8::String::NewExternalOneByte(
Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
CHECK(resource && resource->data());
@@ -6506,23 +6190,16 @@ MaybeLocal<String> v8::String::NewExternalOneByte(
}
}
-
-Local<String> v8::String::NewExternal(
- Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
- RETURN_TO_LOCAL_UNCHECKED(NewExternalOneByte(isolate, resource), String);
-}
-
-
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::DisallowHeapAllocation no_allocation;
i::String obj = *Utils::OpenHandle(this);
- if (obj->IsThinString()) {
- obj = i::ThinString::cast(obj)->actual();
+ if (obj.IsThinString()) {
+ obj = i::ThinString::cast(obj).actual();
}
- if (!obj->SupportsExternalization()) {
+ if (!obj.SupportsExternalization()) {
return false;
}
@@ -6534,24 +6211,23 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
CHECK(resource && resource->data());
- bool result = obj->MakeExternal(resource);
+ bool result = obj.MakeExternal(resource);
DCHECK(result);
- DCHECK(obj->IsExternalString());
+ DCHECK(obj.IsExternalString());
return result;
}
-
bool v8::String::MakeExternal(
v8::String::ExternalOneByteStringResource* resource) {
i::DisallowHeapAllocation no_allocation;
i::String obj = *Utils::OpenHandle(this);
- if (obj->IsThinString()) {
- obj = i::ThinString::cast(obj)->actual();
+ if (obj.IsThinString()) {
+ obj = i::ThinString::cast(obj).actual();
}
- if (!obj->SupportsExternalization()) {
+ if (!obj.SupportsExternalization()) {
return false;
}
@@ -6563,21 +6239,20 @@ bool v8::String::MakeExternal(
CHECK(resource && resource->data());
- bool result = obj->MakeExternal(resource);
- DCHECK_IMPLIES(result, obj->IsExternalString());
+ bool result = obj.MakeExternal(resource);
+ DCHECK_IMPLIES(result, obj.IsExternalString());
return result;
}
-
bool v8::String::CanMakeExternal() {
i::DisallowHeapAllocation no_allocation;
i::String obj = *Utils::OpenHandle(this);
- if (obj->IsThinString()) {
- obj = i::ThinString::cast(obj)->actual();
+ if (obj.IsThinString()) {
+ obj = i::ThinString::cast(obj).actual();
}
- if (!obj->SupportsExternalization()) {
+ if (!obj.SupportsExternalization()) {
return false;
}
@@ -6596,7 +6271,6 @@ Isolate* v8::Object::GetIsolate() {
return reinterpret_cast<Isolate*>(i_isolate);
}
-
Local<v8::Object> v8::Object::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Object, New);
@@ -6675,13 +6349,12 @@ Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
return Utils::ToLocal(obj);
}
-
double v8::NumberObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
i::Isolate* isolate = jsvalue->GetIsolate();
LOG_API(isolate, NumberObject, NumberValue);
- return jsvalue->value()->Number();
+ return jsvalue->value().Number();
}
Local<v8::Value> v8::BigIntObject::New(Isolate* isolate, int64_t value) {
@@ -6716,16 +6389,14 @@ Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
return Utils::ToLocal(obj);
}
-
bool v8::BooleanObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
i::Isolate* isolate = jsvalue->GetIsolate();
LOG_API(isolate, BooleanObject, BooleanValue);
- return jsvalue->value()->IsTrue(isolate);
+ return jsvalue->value().IsTrue(isolate);
}
-
Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
Local<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
@@ -6737,7 +6408,6 @@ Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
return Utils::ToLocal(obj);
}
-
Local<v8::String> v8::StringObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -6747,17 +6417,16 @@ Local<v8::String> v8::StringObject::ValueOf() const {
i::Handle<i::String>(i::String::cast(jsvalue->value()), isolate));
}
-
Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, SymbolObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::Object> obj = i::Object::ToObject(
- i_isolate, Utils::OpenHandle(*value)).ToHandleChecked();
+ i::Handle<i::Object> obj =
+ i::Object::ToObject(i_isolate, Utils::OpenHandle(*value))
+ .ToHandleChecked();
return Utils::ToLocal(obj);
}
-
Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
@@ -6767,7 +6436,6 @@ Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value()), isolate));
}
-
MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
@@ -6782,34 +6450,24 @@ MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
RETURN_ESCAPED(result);
}
-
double v8::Date::ValueOf() const {
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
i::Isolate* isolate = jsdate->GetIsolate();
LOG_API(isolate, Date, NumberValue);
- return jsdate->value()->Number();
+ return jsdate->value().Number();
}
// Assert that the static TimeZoneDetection cast in
// DateTimeConfigurationChangeNotification is valid.
-#define TIME_ZONE_DETECTION_ASSERT_EQ(value) \
- STATIC_ASSERT( \
- static_cast<int>(v8::Isolate::TimeZoneDetection::value) == \
- static_cast<int>(base::TimezoneCache::TimeZoneDetection::value)); \
- STATIC_ASSERT(static_cast<int>(v8::Isolate::TimeZoneDetection::value) == \
- static_cast<int>(v8::Date::TimeZoneDetection::value));
+#define TIME_ZONE_DETECTION_ASSERT_EQ(value) \
+ STATIC_ASSERT( \
+ static_cast<int>(v8::Isolate::TimeZoneDetection::value) == \
+ static_cast<int>(base::TimezoneCache::TimeZoneDetection::value));
TIME_ZONE_DETECTION_ASSERT_EQ(kSkip)
TIME_ZONE_DETECTION_ASSERT_EQ(kRedetect)
#undef TIME_ZONE_DETECTION_ASSERT_EQ
-// static
-void v8::Date::DateTimeConfigurationChangeNotification(
- Isolate* isolate, TimeZoneDetection time_zone_detection) {
- isolate->DateTimeConfigurationChangeNotification(
- static_cast<v8::Isolate::TimeZoneDetection>(time_zone_detection));
-}
-
MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
Local<String> pattern, Flags flags) {
PREPARE_FOR_EXECUTION(context, RegExp, New, RegExp);
@@ -6822,14 +6480,12 @@ MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
RETURN_ESCAPED(result);
}
-
Local<v8::String> v8::RegExp::GetSource() const {
i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this);
return Utils::ToLocal(
i::Handle<i::String>(obj->Pattern(), obj->GetIsolate()));
}
-
// Assert that the static flags cast in GetFlags is valid.
#define REGEXP_FLAG_ASSERT_EQ(flag) \
STATIC_ASSERT(static_cast<int>(v8::RegExp::flag) == \
@@ -6847,7 +6503,6 @@ v8::RegExp::Flags v8::RegExp::GetFlags() const {
return RegExp::Flags(static_cast<int>(obj->GetFlags()));
}
-
Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Array, New);
@@ -6881,14 +6536,13 @@ Local<v8::Array> v8::Array::New(Isolate* isolate, Local<Value>* elements,
uint32_t v8::Array::Length() const {
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
i::Object length = obj->length();
- if (length->IsSmi()) {
+ if (length.IsSmi()) {
return i::Smi::ToInt(length);
} else {
- return static_cast<uint32_t>(length->Number());
+ return static_cast<uint32_t>(length.Number());
}
}
-
Local<v8::Map> v8::Map::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Map, New);
@@ -6897,13 +6551,11 @@ Local<v8::Map> v8::Map::New(Isolate* isolate) {
return Utils::ToLocal(obj);
}
-
size_t v8::Map::Size() const {
i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
- return i::OrderedHashMap::cast(obj->table())->NumberOfElements();
+ return i::OrderedHashMap::cast(obj->table()).NumberOfElements();
}
-
void Map::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
@@ -6912,21 +6564,19 @@ void Map::Clear() {
i::JSMap::Clear(isolate, self);
}
-
MaybeLocal<Value> Map::Get(Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION(context, Map, Get, Value);
auto self = Utils::OpenHandle(this);
Local<Value> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
has_pending_exception =
- !ToLocal<Value>(i::Execution::Call(isolate, isolate->map_get(), self,
- arraysize(argv), argv),
+ !ToLocal<Value>(i::Execution::CallBuiltin(isolate, isolate->map_get(),
+ self, arraysize(argv), argv),
&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
-
MaybeLocal<Map> Map::Set(Local<Context> context, Local<Value> key,
Local<Value> value) {
PREPARE_FOR_EXECUTION(context, Map, Set, Map);
@@ -6934,37 +6584,38 @@ MaybeLocal<Map> Map::Set(Local<Context> context, Local<Value> key,
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
Utils::OpenHandle(*value)};
- has_pending_exception = !i::Execution::Call(isolate, isolate->map_set(), self,
- arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->map_set(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Map);
RETURN_ESCAPED(Local<Map>::Cast(Utils::ToLocal(result)));
}
-
Maybe<bool> Map::Has(Local<Context> context, Local<Value> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Map, Has, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception = !i::Execution::Call(isolate, isolate->map_has(), self,
- arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->map_has(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->IsTrue(isolate));
}
-
Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Map, Delete, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception = !i::Execution::Call(isolate, isolate->map_delete(),
- self, arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->map_delete(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->IsTrue(isolate));
}
@@ -7024,7 +6675,6 @@ Local<Array> Map::AsArray() const {
MapAsArray(isolate, obj->table(), 0, MapAsArrayKind::kEntries));
}
-
Local<v8::Set> v8::Set::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Set, New);
@@ -7033,13 +6683,11 @@ Local<v8::Set> v8::Set::New(Isolate* isolate) {
return Utils::ToLocal(obj);
}
-
size_t v8::Set::Size() const {
i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
- return i::OrderedHashSet::cast(obj->table())->NumberOfElements();
+ return i::OrderedHashSet::cast(obj->table()).NumberOfElements();
}
-
void Set::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
@@ -7048,43 +6696,43 @@ void Set::Clear() {
i::JSSet::Clear(isolate, self);
}
-
MaybeLocal<Set> Set::Add(Local<Context> context, Local<Value> key) {
PREPARE_FOR_EXECUTION(context, Set, Add, Set);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception = !i::Execution::Call(isolate, isolate->set_add(), self,
- arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->set_add(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Set);
RETURN_ESCAPED(Local<Set>::Cast(Utils::ToLocal(result)));
}
-
Maybe<bool> Set::Has(Local<Context> context, Local<Value> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Set, Has, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception = !i::Execution::Call(isolate, isolate->set_has(), self,
- arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->set_has(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->IsTrue(isolate));
}
-
Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8(isolate, context, Set, Delete, Nothing<bool>(), i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception = !i::Execution::Call(isolate, isolate->set_delete(),
- self, arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->set_delete(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->IsTrue(isolate));
}
@@ -7129,7 +6777,6 @@ Local<Array> Set::AsArray() const {
SetAsArray(isolate, obj->table(), 0, SetAsArrayKind::kValues));
}
-
MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, Promise_Resolver, New, Resolver);
Local<Promise::Resolver> result;
@@ -7139,13 +6786,11 @@ MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
RETURN_ESCAPED(result);
}
-
Local<Promise> Promise::Resolver::GetPromise() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
return Local<Promise>::Cast(Utils::ToLocal(promise));
}
-
Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -7164,7 +6809,6 @@ Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
return Just(true);
}
-
Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
@@ -7183,30 +6827,30 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
return Just(true);
}
-
MaybeLocal<Promise> Promise::Catch(Local<Context> context,
Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, Promise, Catch, Promise);
auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*handler)};
i::Handle<i::Object> result;
- has_pending_exception = !i::Execution::Call(isolate, isolate->promise_catch(),
- self, arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->promise_catch(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Promise);
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
-
MaybeLocal<Promise> Promise::Then(Local<Context> context,
Local<Function> handler) {
PREPARE_FOR_EXECUTION(context, Promise, Then, Promise);
auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*handler)};
i::Handle<i::Object> result;
- has_pending_exception = !i::Execution::Call(isolate, isolate->promise_then(),
- self, arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->promise_then(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Promise);
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
@@ -7219,9 +6863,10 @@ MaybeLocal<Promise> Promise::Then(Local<Context> context,
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*on_fulfilled),
Utils::OpenHandle(*on_rejected)};
i::Handle<i::Object> result;
- has_pending_exception = !i::Execution::Call(isolate, isolate->promise_then(),
- self, arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->promise_then(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Promise);
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
@@ -7268,26 +6913,22 @@ Local<Value> Proxy::GetTarget() {
return Utils::ToLocal(target);
}
-
Local<Value> Proxy::GetHandler() {
i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
i::Handle<i::Object> handler(self->handler(), self->GetIsolate());
return Utils::ToLocal(handler);
}
-
bool Proxy::IsRevoked() {
i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
return self->IsRevoked();
}
-
void Proxy::Revoke() {
i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
i::JSProxy::Revoke(self);
}
-
MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
Local<Object> local_handler) {
PREPARE_FOR_EXECUTION(context, Proxy, New, Proxy);
@@ -7317,7 +6958,7 @@ OwnedBuffer CompiledWasmModule::Serialize() {
MemorySpan<const uint8_t> CompiledWasmModule::GetWireBytesRef() {
i::Vector<const uint8_t> bytes_vec = native_module_->wire_bytes();
- return {bytes_vec.start(), bytes_vec.size()};
+ return {bytes_vec.begin(), bytes_vec.size()};
}
WasmModuleObject::TransferrableModule
@@ -7452,14 +7093,11 @@ WasmModuleObjectBuilderStreaming::WasmModuleObjectBuilderStreaming(
Local<Promise> WasmModuleObjectBuilderStreaming::GetPromise() { return {}; }
void WasmModuleObjectBuilderStreaming::OnBytesReceived(const uint8_t* bytes,
- size_t size) {
-}
+ size_t size) {}
-void WasmModuleObjectBuilderStreaming::Finish() {
-}
+void WasmModuleObjectBuilderStreaming::Finish() {}
-void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {
-}
+void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {}
// static
v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
@@ -7507,7 +7145,7 @@ v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length,
void WasmMemoryDeleter(void* buffer, size_t lenght, void* info) {
internal::wasm::WasmEngine* engine =
reinterpret_cast<internal::wasm::WasmEngine*>(info);
- CHECK(engine->memory_tracker()->FreeMemoryIfIsWasmMemory(nullptr, buffer));
+ CHECK(engine->memory_tracker()->FreeWasmMemory(nullptr, buffer));
}
void ArrayBufferDeleter(void* buffer, size_t length, void* info) {
@@ -7547,7 +7185,6 @@ size_t v8::ArrayBuffer::ByteLength() const {
return obj->byte_length();
}
-
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
@@ -7562,7 +7199,6 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
return Utils::ToLocal(obj);
}
-
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
size_t byte_length,
ArrayBufferCreationMode mode) {
@@ -7580,24 +7216,22 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
return Utils::ToLocal(obj);
}
-
Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
i::Handle<i::JSArrayBuffer> buffer;
if (obj->IsJSDataView()) {
i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj),
obj->GetIsolate());
- DCHECK(data_view->buffer()->IsJSArrayBuffer());
+ DCHECK(data_view->buffer().IsJSArrayBuffer());
buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()),
data_view->GetIsolate());
} else {
DCHECK(obj->IsJSTypedArray());
- buffer = i::JSTypedArray::cast(*obj)->GetBuffer();
+ buffer = i::JSTypedArray::cast(*obj).GetBuffer();
}
return Utils::ToLocal(buffer);
}
-
size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
size_t byte_offset = self->byte_offset();
@@ -7612,16 +7246,13 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
DCHECK(self->IsJSTypedArray());
i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*self),
isolate);
- i::Handle<i::FixedTypedArrayBase> fixed_array(
- i::FixedTypedArrayBase::cast(typed_array->elements()), isolate);
- source = reinterpret_cast<char*>(fixed_array->DataPtr());
+ source = reinterpret_cast<char*>(typed_array->DataPtr());
}
memcpy(dest, source + byte_offset, bytes_to_copy);
}
return bytes_to_copy;
}
-
bool v8::ArrayBufferView::HasBuffer() const {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()),
@@ -7629,22 +7260,19 @@ bool v8::ArrayBufferView::HasBuffer() const {
return buffer->backing_store() != nullptr;
}
-
size_t v8::ArrayBufferView::ByteOffset() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
return obj->WasDetached() ? 0 : obj->byte_offset();
}
-
size_t v8::ArrayBufferView::ByteLength() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
return obj->WasDetached() ? 0 : obj->byte_length();
}
-
size_t v8::TypedArray::Length() {
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
- return obj->WasDetached() ? 0 : obj->length_value();
+ return obj->WasDetached() ? 0 : obj->length();
}
static_assert(v8::TypedArray::kMaxLength == i::Smi::kMaxValue,
@@ -7703,7 +7331,6 @@ Local<DataView> DataView::New(Local<ArrayBuffer> array_buffer,
return Utils::ToLocal(obj);
}
-
Local<DataView> DataView::New(Local<SharedArrayBuffer> shared_array_buffer,
size_t byte_offset, size_t byte_length) {
CHECK(i::FLAG_harmony_sharedarraybuffer);
@@ -7810,7 +7437,6 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
return Utils::ToLocalShared(obj);
}
-
Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
Isolate* isolate, void* data, size_t byte_length,
ArrayBufferCreationMode mode) {
@@ -7836,7 +7462,6 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
return Utils::ToLocal(result);
}
-
Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
@@ -7844,7 +7469,6 @@ Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
i_isolate->SymbolFor(i::RootIndex::kPublicSymbolTable, i_name, false));
}
-
Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
@@ -7886,7 +7510,6 @@ Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
return v8::Local<Private>(reinterpret_cast<Private*>(*result));
}
-
Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
@@ -7895,7 +7518,6 @@ Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
return v8::Local<Private>(reinterpret_cast<Private*>(*result));
}
-
Local<Number> v8::Number::New(Isolate* isolate, double value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (std::isnan(value)) {
@@ -7907,19 +7529,17 @@ Local<Number> v8::Number::New(Isolate* isolate, double value) {
return Utils::NumberToLocal(result);
}
-
Local<Integer> v8::Integer::New(Isolate* isolate, int32_t value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (i::Smi::IsValid(value)) {
- return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
- internal_isolate));
+ return Utils::IntegerToLocal(
+ i::Handle<i::Object>(i::Smi::FromInt(value), internal_isolate));
}
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result);
}
-
Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
bool fits_into_int32_t = (value & (1 << 31)) == 0;
@@ -8012,17 +7632,15 @@ bool Isolate::InContext() {
return !isolate->context().is_null();
}
-
v8::Local<v8::Context> Isolate::GetCurrentContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Context context = isolate->context();
if (context.is_null()) return Local<Context>();
- i::Context native_context = context->native_context();
+ i::Context native_context = context.native_context();
if (native_context.is_null()) return Local<Context>();
return Utils::ToLocal(i::Handle<i::Context>(native_context, isolate));
}
-
v8::Local<v8::Context> Isolate::GetEnteredContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Handle<i::Object> last =
@@ -8129,26 +7747,22 @@ void Isolate::TerminateExecution() {
isolate->stack_guard()->RequestTerminateExecution();
}
-
bool Isolate::IsExecutionTerminating() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return IsExecutionTerminatingCheck(isolate);
}
-
void Isolate::CancelTerminateExecution() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->stack_guard()->ClearTerminateExecution();
isolate->CancelTerminateExecution();
}
-
void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->RequestInterrupt(callback, data);
}
-
void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
CHECK(i::FLAG_expose_gc);
if (type == kMinorGarbageCollection) {
@@ -8163,7 +7777,6 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
}
}
-
Isolate* Isolate::GetCurrent() {
i::Isolate* isolate = i::Isolate::Current();
return reinterpret_cast<Isolate*>(isolate);
@@ -8244,8 +7857,7 @@ Isolate* Isolate::New(const Isolate::CreateParams& params) {
void Isolate::Dispose() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- if (!Utils::ApiCheck(!isolate->IsInUse(),
- "v8::Isolate::Dispose()",
+ if (!Utils::ApiCheck(!isolate->IsInUse(), "v8::Isolate::Dispose()",
"Disposing the isolate that is entered by a thread.")) {
return;
}
@@ -8262,19 +7874,16 @@ void Isolate::DiscardThreadSpecificMetadata() {
isolate->DiscardPerThreadDataForThisThread();
}
-
void Isolate::Enter() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->Enter();
}
-
void Isolate::Exit() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->Exit();
}
-
void Isolate::SetAbortOnUncaughtExceptionCallback(
AbortOnUncaughtExceptionCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8319,11 +7928,9 @@ Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
break;
default:
UNREACHABLE();
- break;
}
}
-
Isolate::DisallowJavascriptExecutionScope::~DisallowJavascriptExecutionScope() {
switch (on_failure_) {
case CRASH_ON_FAILURE:
@@ -8337,23 +7944,20 @@ Isolate::DisallowJavascriptExecutionScope::~DisallowJavascriptExecutionScope() {
break;
default:
UNREACHABLE();
- break;
}
}
-
Isolate::AllowJavascriptExecutionScope::AllowJavascriptExecutionScope(
Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- internal_assert_ = reinterpret_cast<void*>(
- new i::AllowJavascriptExecution(i_isolate));
- internal_throws_ = reinterpret_cast<void*>(
- new i::NoThrowOnJavascriptExecution(i_isolate));
+ internal_assert_ =
+ reinterpret_cast<void*>(new i::AllowJavascriptExecution(i_isolate));
+ internal_throws_ =
+ reinterpret_cast<void*>(new i::NoThrowOnJavascriptExecution(i_isolate));
internal_dump_ =
reinterpret_cast<void*>(new i::NoDumpOnJavascriptExecution(i_isolate));
}
-
Isolate::AllowJavascriptExecutionScope::~AllowJavascriptExecutionScope() {
delete reinterpret_cast<i::AllowJavascriptExecution*>(internal_assert_);
delete reinterpret_cast<i::NoThrowOnJavascriptExecution*>(internal_throws_);
@@ -8392,12 +7996,18 @@ i::Address* Isolate::GetDataFromSnapshotOnce(size_t index) {
void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Heap* heap = isolate->heap();
- heap_statistics->total_heap_size_ = heap->CommittedMemory();
+ i::ReadOnlySpace* ro_space = heap->read_only_space();
+
+ heap_statistics->total_heap_size_ =
+ heap->CommittedMemory() + ro_space->CommittedMemory();
heap_statistics->total_heap_size_executable_ =
heap->CommittedMemoryExecutable();
- heap_statistics->total_physical_size_ = heap->CommittedPhysicalMemory();
- heap_statistics->total_available_size_ = heap->Available();
- heap_statistics->used_heap_size_ = heap->SizeOfObjects();
+ heap_statistics->total_physical_size_ =
+ heap->CommittedPhysicalMemory() + ro_space->CommittedPhysicalMemory();
+ heap_statistics->total_available_size_ =
+ heap->Available() + ro_space->Available();
+ heap_statistics->used_heap_size_ =
+ heap->SizeOfObjects() + ro_space->SizeOfObjects();
heap_statistics->heap_size_limit_ = heap->MaxReserved();
// TODO(7424): There is no public API for the {WasmEngine} yet. Once such an
// API becomes available we should report the malloced memory separately. For
@@ -8415,12 +8025,10 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->does_zap_garbage_ = heap->ShouldZapGarbage();
}
-
size_t Isolate::NumberOfHeapSpaces() {
return i::LAST_SPACE - i::FIRST_SPACE + 1;
}
-
bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
size_t index) {
if (!space_statistics) return false;
@@ -8440,14 +8048,12 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
return true;
}
-
size_t Isolate::NumberOfTrackedHeapObjectTypes() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Heap* heap = isolate->heap();
return heap->NumberOfTrackedHeapObjectTypes();
}
-
bool Isolate::GetHeapObjectStatisticsAtLastGC(
HeapObjectStatistics* object_statistics, size_t type_index) {
if (!object_statistics) return false;
@@ -8514,28 +8120,24 @@ void Isolate::SetEventLogger(LogEventCallback that) {
isolate->set_event_logger(that);
}
-
void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
if (callback == nullptr) return;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->AddBeforeCallEnteredCallback(callback);
}
-
void Isolate::RemoveBeforeCallEnteredCallback(
BeforeCallEnteredCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->RemoveBeforeCallEnteredCallback(callback);
}
-
void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
if (callback == nullptr) return;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->AddCallCompletedCallback(callback);
}
-
void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->RemoveCallCompletedCallback(callback);
@@ -8561,7 +8163,6 @@ void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
isolate->SetPromiseRejectCallback(callback);
}
-
void Isolate::RunMicrotasks() {
DCHECK_NE(MicrotasksPolicy::kScoped, GetMicrotasksPolicy());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8588,13 +8189,11 @@ void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
isolate->default_microtask_queue()->EnqueueMicrotask(*microtask);
}
-
void Isolate::SetMicrotasksPolicy(MicrotasksPolicy policy) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->default_microtask_queue()->set_microtasks_policy(policy);
}
-
MicrotasksPolicy Isolate::GetMicrotasksPolicy() const {
i::Isolate* isolate =
reinterpret_cast<i::Isolate*>(const_cast<Isolate*>(this));
@@ -8644,19 +8243,16 @@ void Isolate::SetUseCounterCallback(UseCounterCallback callback) {
reinterpret_cast<i::Isolate*>(this)->SetUseCounterCallback(callback);
}
-
void Isolate::SetCounterFunction(CounterLookupCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->counters()->ResetCounterFunction(callback);
}
-
void Isolate::SetCreateHistogramFunction(CreateHistogramCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->counters()->ResetCreateHistogramFunction(callback);
}
-
void Isolate::SetAddHistogramSampleFunction(
AddHistogramSampleCallback callback) {
reinterpret_cast<i::Isolate*>(this)
@@ -8664,7 +8260,6 @@ void Isolate::SetAddHistogramSampleFunction(
->SetAddHistogramSampleFunction(callback);
}
-
bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
@@ -8686,14 +8281,13 @@ void Isolate::LowMemoryNotification() {
i::HeapIterator iterator(isolate->heap());
for (i::HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (obj->IsAbstractCode()) {
- i::AbstractCode::cast(obj)->DropStackFrameCache();
+ if (obj.IsAbstractCode()) {
+ i::AbstractCode::cast(obj).DropStackFrameCache();
}
}
}
}
-
int Isolate::ContextDisposedNotification(bool dependant_context) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!dependant_context) {
@@ -8705,13 +8299,11 @@ int Isolate::ContextDisposedNotification(bool dependant_context) {
return isolate->heap()->NotifyContextDisposed(dependant_context);
}
-
void Isolate::IsolateInForegroundNotification() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->IsolateInForegroundNotification();
}
-
void Isolate::IsolateInBackgroundNotification() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->IsolateInBackgroundNotification();
@@ -8759,7 +8351,6 @@ void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
isolate->logger()->SetCodeEventHandler(options, event_handler);
}
-
void Isolate::SetStackLimit(uintptr_t stack_limit) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
CHECK(stack_limit);
@@ -8788,8 +8379,8 @@ UnwindState Isolate::GetUnwindState() {
i::Code js_entry = isolate->heap()->builtin(i::Builtins::kJSEntry);
unwind_state.js_entry_stub.code.start =
- reinterpret_cast<const void*>(js_entry->InstructionStart());
- unwind_state.js_entry_stub.code.length_in_bytes = js_entry->InstructionSize();
+ reinterpret_cast<const void*>(js_entry.InstructionStart());
+ unwind_state.js_entry_stub.code.length_in_bytes = js_entry.InstructionSize();
return unwind_state;
}
@@ -8863,31 +8454,28 @@ bool Isolate::AddMessageListenerWithErrorLevel(MessageCallback that,
return true;
}
-
void Isolate::RemoveMessageListeners(MessageCallback that) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::DisallowHeapAllocation no_gc;
i::TemplateList listeners = isolate->heap()->message_listeners();
- for (int i = 0; i < listeners->length(); i++) {
- if (listeners->get(i)->IsUndefined(isolate)) continue; // skip deleted ones
- i::FixedArray listener = i::FixedArray::cast(listeners->get(i));
- i::Foreign callback_obj = i::Foreign::cast(listener->get(0));
- if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
- listeners->set(i, i::ReadOnlyRoots(isolate).undefined_value());
+ for (int i = 0; i < listeners.length(); i++) {
+ if (listeners.get(i).IsUndefined(isolate)) continue; // skip deleted ones
+ i::FixedArray listener = i::FixedArray::cast(listeners.get(i));
+ i::Foreign callback_obj = i::Foreign::cast(listener.get(0));
+ if (callback_obj.foreign_address() == FUNCTION_ADDR(that)) {
+ listeners.set(i, i::ReadOnlyRoots(isolate).undefined_value());
}
}
}
-
void Isolate::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetFailedAccessCheckCallback(callback);
}
-
void Isolate::SetCaptureStackTraceForUncaughtExceptions(
bool capture, int frame_limit, StackTrace::StackTraceOptions options) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8895,19 +8483,16 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions(
options);
}
-
void Isolate::VisitExternalResources(ExternalResourceVisitor* visitor) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->VisitExternalResources(visitor);
}
-
bool Isolate::IsInUse() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->IsInUse();
}
-
void Isolate::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::DisallowHeapAllocation no_allocation;
@@ -9024,9 +8609,7 @@ String::Utf8Value::Utf8Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
str->WriteUtf8(isolate, str_);
}
-String::Utf8Value::~Utf8Value() {
- i::DeleteArray(str_);
-}
+String::Utf8Value::~Utf8Value() { i::DeleteArray(str_); }
String::Value::Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
: str_(nullptr), length_(0) {
@@ -9043,9 +8626,7 @@ String::Value::Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
str->Write(isolate, str_);
}
-String::Value::~Value() {
- i::DeleteArray(str_);
-}
+String::Value::~Value() { i::DeleteArray(str_); }
#define DEFINE_ERROR(NAME, name) \
Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
@@ -9071,7 +8652,6 @@ DEFINE_ERROR(Error, error)
#undef DEFINE_ERROR
-
Local<Message> Exception::CreateMessage(Isolate* isolate,
Local<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
@@ -9082,7 +8662,6 @@ Local<Message> Exception::CreateMessage(Isolate* isolate,
scope.CloseAndEscape(i_isolate->CreateMessage(obj, nullptr)));
}
-
Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
if (!obj->IsJSObject()) return Local<StackTrace>();
@@ -9092,7 +8671,6 @@ Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
return Utils::StackTraceToLocal(isolate->GetDetailedStackTrace(js_obj));
}
-
// --- D e b u g S u p p o r t ---
void debug::SetContextId(Local<Context> context, int id) {
@@ -9101,7 +8679,7 @@ void debug::SetContextId(Local<Context> context, int id) {
int debug::GetContextId(Local<Context> context) {
i::Object value = Utils::OpenHandle(*context)->debug_context_id();
- return (value->IsSmi()) ? i::Smi::ToInt(value) : 0;
+ return (value.IsSmi()) ? i::Smi::ToInt(value) : 0;
}
void debug::SetInspector(Isolate* isolate,
@@ -9147,6 +8725,14 @@ MaybeLocal<Array> debug::GetPrivateFields(Local<Context> context,
RETURN_ESCAPED(Utils::ToLocal(result));
}
+Local<Context> debug::GetCreationContext(Local<Object> value) {
+ i::Handle<i::Object> val = Utils::OpenHandle(*value);
+ if (val->IsJSGlobalProxy()) {
+ return Local<Context>();
+ }
+ return value->CreationContext();
+}
+
void debug::ChangeBreakOnException(Isolate* isolate, ExceptionBreakState type) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal_isolate->debug()->ChangeBreakOnException(
@@ -9228,13 +8814,13 @@ std::vector<int> debug::Script::LineEnds() const {
i::Isolate* isolate = script->GetIsolate();
i::HandleScope scope(isolate);
i::Script::InitLineEnds(script);
- CHECK(script->line_ends()->IsFixedArray());
+ CHECK(script->line_ends().IsFixedArray());
i::Handle<i::FixedArray> line_ends(i::FixedArray::cast(script->line_ends()),
isolate);
std::vector<int> result(line_ends->length());
for (int i = 0; i < line_ends->length(); ++i) {
i::Smi line_end = i::Smi::cast(line_ends->get(i));
- result[i] = line_end->value();
+ result[i] = line_end.value();
}
return result;
}
@@ -9274,7 +8860,7 @@ Maybe<int> debug::Script::ContextId() const {
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Object value = script->context_data();
- if (value->IsSmi()) return Just(i::Smi::ToInt(value));
+ if (value.IsSmi()) return Just(i::Smi::ToInt(value));
return Nothing<int>();
}
@@ -9317,11 +8903,11 @@ bool debug::Script::GetPossibleBreakpoints(
this->SourceMappingURL().IsEmpty()) {
i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- return module_object->GetPossibleBreakpoints(start, end, locations);
+ return module_object.GetPossibleBreakpoints(start, end, locations);
}
i::Script::InitLineEnds(script);
- CHECK(script->line_ends()->IsFixedArray());
+ CHECK(script->line_ends().IsFixedArray());
i::Isolate* isolate = script->GetIsolate();
i::Handle<i::FixedArray> line_ends =
i::Handle<i::FixedArray>::cast(i::handle(script->line_ends(), isolate));
@@ -9367,7 +8953,7 @@ int debug::Script::GetSourceOffset(const debug::Location& location) const {
if (script->type() == i::Script::TYPE_WASM) {
if (this->SourceMappingURL().IsEmpty()) {
return i::WasmModuleObject::cast(script->wasm_module_object())
- ->GetFunctionOffset(location.GetLineNumber()) +
+ .GetFunctionOffset(location.GetLineNumber()) +
location.GetColumnNumber();
}
DCHECK_EQ(0, location.GetLineNumber());
@@ -9381,7 +8967,7 @@ int debug::Script::GetSourceOffset(const debug::Location& location) const {
}
i::Script::InitLineEnds(script);
- CHECK(script->line_ends()->IsFixedArray());
+ CHECK(script->line_ends().IsFixedArray());
i::Handle<i::FixedArray> line_ends = i::Handle<i::FixedArray>::cast(
i::handle(script->line_ends(), script->GetIsolate()));
CHECK(line_ends->length());
@@ -9423,6 +9009,19 @@ bool debug::Script::SetBreakpoint(v8::Local<v8::String> condition,
return true;
}
+bool debug::Script::SetBreakpointOnScriptEntry(BreakpointId* id) const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::SharedFunctionInfo::ScriptIterator it(isolate, *script);
+ for (i::SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
+ if (sfi.is_toplevel()) {
+ return isolate->debug()->SetBreakpointForFunction(
+ handle(sfi, isolate), isolate->factory()->empty_string(), id);
+ }
+ }
+ return false;
+}
+
void debug::RemoveBreakpoint(Isolate* v8_isolate, BreakpointId id) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::HandleScope handle_scope(isolate);
@@ -9444,7 +9043,7 @@ int debug::WasmScript::NumFunctions() const {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- const i::wasm::WasmModule* module = module_object->module();
+ const i::wasm::WasmModule* module = module_object.module();
DCHECK_GE(i::kMaxInt, module->functions.size());
return static_cast<int>(module->functions.size());
}
@@ -9455,7 +9054,7 @@ int debug::WasmScript::NumImportedFunctions() const {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- const i::wasm::WasmModule* module = module_object->module();
+ const i::wasm::WasmModule* module = module_object.module();
DCHECK_GE(i::kMaxInt, module->num_imported_functions);
return static_cast<int>(module->num_imported_functions);
}
@@ -9467,7 +9066,7 @@ std::pair<int, int> debug::WasmScript::GetFunctionRange(
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- const i::wasm::WasmModule* module = module_object->module();
+ const i::wasm::WasmModule* module = module_object.module();
DCHECK_LE(0, function_index);
DCHECK_GT(module->functions.size(), function_index);
const i::wasm::WasmFunction& func = module->functions[function_index];
@@ -9483,15 +9082,15 @@ uint32_t debug::WasmScript::GetFunctionHash(int function_index) {
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- const i::wasm::WasmModule* module = module_object->module();
+ const i::wasm::WasmModule* module = module_object.module();
DCHECK_LE(0, function_index);
DCHECK_GT(module->functions.size(), function_index);
const i::wasm::WasmFunction& func = module->functions[function_index];
i::wasm::ModuleWireBytes wire_bytes(
- module_object->native_module()->wire_bytes());
+ module_object.native_module()->wire_bytes());
i::Vector<const i::byte> function_bytes = wire_bytes.GetFunctionBytes(&func);
// TODO(herhut): Maybe also take module, name and signature into account.
- return i::StringHasher::HashSequentialString(function_bytes.start(),
+ return i::StringHasher::HashSequentialString(function_bytes.begin(),
function_bytes.length(), 0);
}
@@ -9502,7 +9101,7 @@ debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmModuleObject module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
- return module_object->DisassembleFunction(function_index);
+ return module_object.DisassembleFunction(function_index);
}
debug::Location::Location(int line_number, int column_number)
@@ -9536,8 +9135,8 @@ void debug::GetLoadedScripts(v8::Isolate* v8_isolate,
i::Script::Iterator iterator(isolate);
for (i::Script script = iterator.Next(); !script.is_null();
script = iterator.Next()) {
- if (!script->IsUserJavaScript()) continue;
- if (script->HasValidSource()) {
+ if (!script.IsUserJavaScript()) continue;
+ if (script.HasValidSource()) {
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script_handle(script, isolate);
scripts.Append(ToApiHandle<Script>(script_handle));
@@ -9588,8 +9187,8 @@ void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
*Utils::OpenHandle(*script));
for (i::SharedFunctionInfo info = iter.Next(); !info.is_null();
info = iter.Next()) {
- if (info->HasDebugInfo()) {
- info->GetDebugInfo()->set_computed_debug_is_blackboxed(false);
+ if (info.HasDebugInfo()) {
+ info.GetDebugInfo().set_computed_debug_is_blackboxed(false);
}
}
}
@@ -9625,7 +9224,7 @@ v8::MaybeLocal<v8::Array> v8::Object::PreviewEntries(bool* is_key_value) {
if (object->IsJSMapIterator()) {
i::Handle<i::JSMapIterator> it = i::Handle<i::JSMapIterator>::cast(object);
MapAsArrayKind const kind =
- static_cast<MapAsArrayKind>(it->map()->instance_type());
+ static_cast<MapAsArrayKind>(it->map().instance_type());
*is_key_value = kind == MapAsArrayKind::kEntries;
if (!it->HasMore()) return v8::Array::New(v8_isolate);
return Utils::ToLocal(
@@ -9634,7 +9233,7 @@ v8::MaybeLocal<v8::Array> v8::Object::PreviewEntries(bool* is_key_value) {
if (object->IsJSSetIterator()) {
i::Handle<i::JSSetIterator> it = i::Handle<i::JSSetIterator>::cast(object);
SetAsArrayKind const kind =
- static_cast<SetAsArrayKind>(it->map()->instance_type());
+ static_cast<SetAsArrayKind>(it->map().instance_type());
*is_key_value = kind == SetAsArrayKind::kEntries;
if (!it->HasMore()) return v8::Array::New(v8_isolate);
return Utils::ToLocal(
@@ -9661,8 +9260,8 @@ Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
name, builtin_id, i::LanguageMode::kStrict);
i::Handle<i::JSFunction> fun = isolate->factory()->NewFunction(args);
- fun->shared()->set_internal_formal_parameter_count(0);
- fun->shared()->set_length(0);
+ fun->shared().set_internal_formal_parameter_count(0);
+ fun->shared().set_length(0);
return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
}
@@ -9703,8 +9302,8 @@ v8::Local<v8::StackTrace> debug::GetDetailedStackTrace(
MaybeLocal<debug::Script> debug::GeneratorObject::Script() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
- i::Object maybe_script = obj->function()->shared()->script();
- if (!maybe_script->IsScript()) return MaybeLocal<debug::Script>();
+ i::Object maybe_script = obj->function().shared().script();
+ if (!maybe_script.IsScript()) return MaybeLocal<debug::Script>();
i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
return ToApiHandle<debug::Script>(script);
}
@@ -9717,13 +9316,13 @@ Local<Function> debug::GeneratorObject::Function() {
debug::Location debug::GeneratorObject::SuspendedLocation() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
CHECK(obj->is_suspended());
- i::Object maybe_script = obj->function()->shared()->script();
- if (!maybe_script->IsScript()) return debug::Location();
+ i::Object maybe_script = obj->function().shared().script();
+ if (!maybe_script.IsScript()) return debug::Location();
i::Isolate* isolate = obj->GetIsolate();
i::Handle<i::Script> script(i::Script::cast(maybe_script), isolate);
i::Script::PositionInfo info;
i::SharedFunctionInfo::EnsureSourcePositionsAvailable(
- isolate, i::handle(obj->function()->shared(), isolate));
+ isolate, i::handle(obj->function().shared(), isolate));
i::Script::GetPositionInfo(script, obj->source_position(), &info,
i::Script::WITH_OFFSET);
return debug::Location(info.line, info.column);
@@ -9768,7 +9367,7 @@ void debug::GlobalLexicalScopeNames(
i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
i::Isolate* isolate = context->GetIsolate();
i::Handle<i::ScriptContextTable> table(
- context->global_object()->native_context()->script_context_table(),
+ context->global_object().native_context().script_context_table(),
isolate);
for (int i = 0; i < table->used(); i++) {
i::Handle<i::Context> context =
@@ -9816,8 +9415,8 @@ bool debug::SetFunctionBreakpoint(v8::Local<v8::Function> function,
i::Handle<i::String> condition_string =
condition.IsEmpty() ? isolate->factory()->empty_string()
: Utils::OpenHandle(*condition);
- return isolate->debug()->SetBreakpointForFunction(jsfunction,
- condition_string, id);
+ return isolate->debug()->SetBreakpointForFunction(
+ handle(jsfunction->shared(), isolate), condition_string, id);
}
debug::PostponeInterruptsScope::PostponeInterruptsScope(v8::Isolate* isolate)
@@ -9956,8 +9555,8 @@ v8::MaybeLocal<v8::Value> debug::WeakMap::Get(v8::Local<v8::Context> context,
Local<Value> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
has_pending_exception =
- !ToLocal<Value>(i::Execution::Call(isolate, isolate->weakmap_get(), self,
- arraysize(argv), argv),
+ !ToLocal<Value>(i::Execution::CallBuiltin(isolate, isolate->weakmap_get(),
+ self, arraysize(argv), argv),
&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
@@ -9971,9 +9570,10 @@ v8::MaybeLocal<debug::WeakMap> debug::WeakMap::Set(
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
Utils::OpenHandle(*value)};
- has_pending_exception = !i::Execution::Call(isolate, isolate->weakmap_set(),
- self, arraysize(argv), argv)
- .ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::CallBuiltin(isolate, isolate->weakmap_set(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(WeakMap);
RETURN_ESCAPED(Local<WeakMap>::Cast(Utils::ToLocal(result)));
}
@@ -10022,42 +9622,36 @@ int CpuProfileNode::GetLineNumber() const {
return reinterpret_cast<const i::ProfileNode*>(this)->line_number();
}
-
int CpuProfileNode::GetColumnNumber() const {
- return reinterpret_cast<const i::ProfileNode*>(this)->
- entry()->column_number();
+ return reinterpret_cast<const i::ProfileNode*>(this)
+ ->entry()
+ ->column_number();
}
-
unsigned int CpuProfileNode::GetHitLineCount() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->GetHitLineCount();
}
-
bool CpuProfileNode::GetLineTicks(LineTick* entries,
unsigned int length) const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->GetLineTicks(entries, length);
}
-
const char* CpuProfileNode::GetBailoutReason() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->entry()->bailout_reason();
}
-
unsigned CpuProfileNode::GetHitCount() const {
return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks();
}
-
unsigned CpuProfileNode::GetCallUid() const {
return reinterpret_cast<const i::ProfileNode*>(this)->function_id();
}
-
unsigned CpuProfileNode::GetNodeId() const {
return reinterpret_cast<const i::ProfileNode*>(this)->id();
}
@@ -10071,7 +9665,6 @@ int CpuProfileNode::GetChildrenCount() const {
reinterpret_cast<const i::ProfileNode*>(this)->children()->size());
}
-
const CpuProfileNode* CpuProfileNode::GetChild(int index) const {
const i::ProfileNode* child =
reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index);
@@ -10089,7 +9682,6 @@ const std::vector<CpuProfileDeoptInfo>& CpuProfileNode::GetDeoptInfos() const {
return node->deopt_infos();
}
-
void CpuProfile::Delete() {
i::CpuProfile* profile = reinterpret_cast<i::CpuProfile*>(this);
i::CpuProfiler* profiler = profile->cpu_profiler();
@@ -10097,53 +9689,46 @@ void CpuProfile::Delete() {
profiler->DeleteProfile(profile);
}
-
Local<String> CpuProfile::GetTitle() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
i::Isolate* isolate = profile->top_down()->isolate();
- return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String(
- profile->title()));
+ return ToApiHandle<String>(
+ isolate->factory()->InternalizeUtf8String(profile->title()));
}
-
const CpuProfileNode* CpuProfile::GetTopDownRoot() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root());
}
-
const CpuProfileNode* CpuProfile::GetSample(int index) const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return reinterpret_cast<const CpuProfileNode*>(profile->sample(index).node);
}
-
int64_t CpuProfile::GetSampleTimestamp(int index) const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return (profile->sample(index).timestamp - base::TimeTicks())
.InMicroseconds();
}
-
int64_t CpuProfile::GetStartTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return (profile->start_time() - base::TimeTicks()).InMicroseconds();
}
-
int64_t CpuProfile::GetEndTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
return (profile->end_time() - base::TimeTicks()).InMicroseconds();
}
-
int CpuProfile::GetSamplesCount() const {
return reinterpret_cast<const i::CpuProfile*>(this)->samples_count();
}
-CpuProfiler* CpuProfiler::New(Isolate* isolate) {
+CpuProfiler* CpuProfiler::New(Isolate* isolate, CpuProfilingNamingMode mode) {
return reinterpret_cast<CpuProfiler*>(
- new i::CpuProfiler(reinterpret_cast<i::Isolate*>(isolate)));
+ new i::CpuProfiler(reinterpret_cast<i::Isolate*>(isolate), mode));
}
void CpuProfiler::Dispose() { delete reinterpret_cast<i::CpuProfiler*>(this); }
@@ -10168,15 +9753,25 @@ void CpuProfiler::CollectSample() {
reinterpret_cast<i::CpuProfiler*>(this)->CollectSample();
}
+void CpuProfiler::StartProfiling(Local<String> title,
+ CpuProfilingOptions options) {
+ reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
+ *Utils::OpenHandle(*title), options);
+}
+
void CpuProfiler::StartProfiling(Local<String> title, bool record_samples) {
+ CpuProfilingOptions options(
+ kLeafNodeLineNumbers,
+ record_samples ? CpuProfilingOptions::kNoSampleLimit : 0);
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
- *Utils::OpenHandle(*title), record_samples, kLeafNodeLineNumbers);
+ *Utils::OpenHandle(*title), options);
}
void CpuProfiler::StartProfiling(Local<String> title, CpuProfilingMode mode,
- bool record_samples) {
+ bool record_samples, unsigned max_samples) {
+ CpuProfilingOptions options(mode, record_samples ? max_samples : 0);
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
- *Utils::OpenHandle(*title), record_samples, mode);
+ *Utils::OpenHandle(*title), options);
}
CpuProfile* CpuProfiler::StopProfiling(Local<String> title) {
@@ -10185,7 +9780,6 @@ CpuProfile* CpuProfiler::StopProfiling(Local<String> title) {
*Utils::OpenHandle(*title)));
}
-
void CpuProfiler::SetIdle(bool is_idle) {
i::CpuProfiler* profiler = reinterpret_cast<i::CpuProfiler*>(this);
i::Isolate* isolate = profiler->isolate();
@@ -10271,12 +9865,10 @@ static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
reinterpret_cast<const i::HeapGraphEdge*>(edge));
}
-
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
}
-
Local<Value> HeapGraphEdge::GetName() const {
i::HeapGraphEdge* edge = ToInternal(this);
i::Isolate* isolate = edge->isolate();
@@ -10292,68 +9884,56 @@ Local<Value> HeapGraphEdge::GetName() const {
case i::HeapGraphEdge::kHidden:
return ToApiHandle<Number>(
isolate->factory()->NewNumberFromInt(edge->index()));
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
-
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
const i::HeapEntry* from = ToInternal(this)->from();
return reinterpret_cast<const HeapGraphNode*>(from);
}
-
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
const i::HeapEntry* to = ToInternal(this)->to();
return reinterpret_cast<const HeapGraphNode*>(to);
}
-
static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
return const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(entry));
}
-
HeapGraphNode::Type HeapGraphNode::GetType() const {
return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
}
-
Local<String> HeapGraphNode::GetName() const {
i::Isolate* isolate = ToInternal(this)->isolate();
return ToApiHandle<String>(
isolate->factory()->InternalizeUtf8String(ToInternal(this)->name()));
}
-
-SnapshotObjectId HeapGraphNode::GetId() const {
- return ToInternal(this)->id();
-}
-
+SnapshotObjectId HeapGraphNode::GetId() const { return ToInternal(this)->id(); }
size_t HeapGraphNode::GetShallowSize() const {
return ToInternal(this)->self_size();
}
-
int HeapGraphNode::GetChildrenCount() const {
return ToInternal(this)->children_count();
}
-
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
return reinterpret_cast<const HeapGraphEdge*>(ToInternal(this)->child(index));
}
-
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
return const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
}
-
void HeapSnapshot::Delete() {
i::Isolate* isolate = ToInternal(this)->profiler()->isolate();
if (isolate->heap_profiler()->GetSnapshotsCount() > 1) {
@@ -10364,69 +9944,56 @@ void HeapSnapshot::Delete() {
}
}
-
const HeapGraphNode* HeapSnapshot::GetRoot() const {
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
}
-
const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const {
return reinterpret_cast<const HeapGraphNode*>(
ToInternal(this)->GetEntryById(id));
}
-
int HeapSnapshot::GetNodesCount() const {
return static_cast<int>(ToInternal(this)->entries().size());
}
-
const HeapGraphNode* HeapSnapshot::GetNode(int index) const {
return reinterpret_cast<const HeapGraphNode*>(
&ToInternal(this)->entries().at(index));
}
-
SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const {
return ToInternal(this)->max_snapshot_js_object_id();
}
-
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
- Utils::ApiCheck(format == kJSON,
- "v8::HeapSnapshot::Serialize",
+ Utils::ApiCheck(format == kJSON, "v8::HeapSnapshot::Serialize",
"Unknown serialization format");
- Utils::ApiCheck(stream->GetChunkSize() > 0,
- "v8::HeapSnapshot::Serialize",
+ Utils::ApiCheck(stream->GetChunkSize() > 0, "v8::HeapSnapshot::Serialize",
"Invalid stream chunk size");
i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
serializer.Serialize(stream);
}
-
// static
STATIC_CONST_MEMBER_DEFINITION const SnapshotObjectId
HeapProfiler::kUnknownObjectId;
-
int HeapProfiler::GetSnapshotCount() {
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotsCount();
}
-
const HeapSnapshot* HeapProfiler::GetHeapSnapshot(int index) {
return reinterpret_cast<const HeapSnapshot*>(
reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshot(index));
}
-
SnapshotObjectId HeapProfiler::GetObjectId(Local<Value> value) {
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(obj);
}
-
Local<Value> HeapProfiler::FindObjectById(SnapshotObjectId id) {
i::Handle<i::Object> obj =
reinterpret_cast<i::HeapProfiler*>(this)->FindHeapObjectById(id);
@@ -10434,31 +10001,26 @@ Local<Value> HeapProfiler::FindObjectById(SnapshotObjectId id) {
return Utils::ToLocal(obj);
}
-
void HeapProfiler::ClearObjectIds() {
reinterpret_cast<i::HeapProfiler*>(this)->ClearHeapObjectMap();
}
-
const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
ActivityControl* control, ObjectNameResolver* resolver) {
return reinterpret_cast<const HeapSnapshot*>(
- reinterpret_cast<i::HeapProfiler*>(this)
- ->TakeSnapshot(control, resolver));
+ reinterpret_cast<i::HeapProfiler*>(this)->TakeSnapshot(control,
+ resolver));
}
-
void HeapProfiler::StartTrackingHeapObjects(bool track_allocations) {
reinterpret_cast<i::HeapProfiler*>(this)->StartHeapObjectsTracking(
track_allocations);
}
-
void HeapProfiler::StopTrackingHeapObjects() {
reinterpret_cast<i::HeapProfiler*>(this)->StopHeapObjectsTracking();
}
-
SnapshotObjectId HeapProfiler::GetHeapStats(OutputStream* stream,
int64_t* timestamp_us) {
i::HeapProfiler* heap_profiler = reinterpret_cast<i::HeapProfiler*>(this);
@@ -10472,12 +10034,10 @@ bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
sample_interval, stack_depth, flags);
}
-
void HeapProfiler::StopSamplingHeapProfiler() {
reinterpret_cast<i::HeapProfiler*>(this)->StopSamplingHeapProfiler();
}
-
AllocationProfile* HeapProfiler::GetAllocationProfile() {
return reinterpret_cast<i::HeapProfiler*>(this)->GetAllocationProfile();
}
@@ -10501,12 +10061,10 @@ void HeapProfiler::RemoveBuildEmbedderGraphCallback(
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
-
void Testing::SetStressRunType(Testing::StressType type) {
internal::Testing::set_stress_type(type);
}
-
int Testing::GetStressRuns() {
if (internal::FLAG_stress_runs != 0) return internal::FLAG_stress_runs;
#ifdef DEBUG
@@ -10518,12 +10076,6 @@ int Testing::GetStressRuns() {
#endif
}
-
-static void SetFlagsFromString(const char* flags) {
- V8::SetFlagsFromString(flags, i::StrLength(flags));
-}
-
-
void Testing::PrepareStressRun(int run) {
static const char* kLazyOptimizations =
"--prepare-always-opt "
@@ -10537,33 +10089,43 @@ void Testing::PrepareStressRun(int run) {
static const char* kDeoptEvery13Times = "--deopt-every-n-times=13";
if (internal::Testing::stress_type() == Testing::kStressTypeDeopt &&
internal::FLAG_deopt_every_n_times == 0) {
- SetFlagsFromString(kDeoptEvery13Times);
+ V8::SetFlagsFromString(kDeoptEvery13Times);
}
#ifdef DEBUG
// As stressing in debug mode only make two runs skip the deopt stressing
// here.
if (run == GetStressRuns() - 1) {
- SetFlagsFromString(kForcedOptimizations);
+ V8::SetFlagsFromString(kForcedOptimizations);
} else {
- SetFlagsFromString(kLazyOptimizations);
+ V8::SetFlagsFromString(kLazyOptimizations);
}
#else
if (run == GetStressRuns() - 1) {
- SetFlagsFromString(kForcedOptimizations);
+ V8::SetFlagsFromString(kForcedOptimizations);
} else if (run != GetStressRuns() - 2) {
- SetFlagsFromString(kLazyOptimizations);
+ V8::SetFlagsFromString(kLazyOptimizations);
}
#endif
}
-
void Testing::DeoptimizeAll(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::HandleScope scope(i_isolate);
i::Deoptimizer::DeoptimizeAll(i_isolate);
}
+void EmbedderHeapTracer::TracePrologue(TraceFlags flags) {
+#if __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated"
+#endif
+ TracePrologue();
+#if __clang__
+#pragma clang diagnostic pop
+#endif
+}
+
void EmbedderHeapTracer::FinalizeTracing() {
if (isolate_) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(isolate_);
@@ -10585,6 +10147,17 @@ void EmbedderHeapTracer::GarbageCollectionForTesting(
kGCCallbackFlagForced);
}
+void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) {
+ if (isolate_) {
+ i::LocalEmbedderHeapTracer* const tracer =
+ reinterpret_cast<i::Isolate*>(isolate_)
+ ->heap()
+ ->local_embedder_heap_tracer();
+ DCHECK_NOT_NULL(tracer);
+ tracer->IncreaseAllocatedSize(bytes);
+ }
+}
+
void EmbedderHeapTracer::RegisterEmbedderReference(
const TracedGlobal<v8::Value>& ref) {
if (ref.IsEmpty()) return;
@@ -10608,10 +10181,7 @@ const size_t HandleScopeImplementer::kEnteredContextsOffset =
const size_t HandleScopeImplementer::kIsMicrotaskContextOffset =
offsetof(HandleScopeImplementer, is_microtask_context_);
-void HandleScopeImplementer::FreeThreadResources() {
- Free();
-}
-
+void HandleScopeImplementer::FreeThreadResources() { Free(); }
char* HandleScopeImplementer::ArchiveThread(char* storage) {
HandleScopeData* current = isolate_->handle_scope_data();
@@ -10624,12 +10194,10 @@ char* HandleScopeImplementer::ArchiveThread(char* storage) {
return storage + ArchiveSpacePerThread();
}
-
int HandleScopeImplementer::ArchiveSpacePerThread() {
return sizeof(HandleScopeImplementer);
}
-
char* HandleScopeImplementer::RestoreThread(char* storage) {
MemCopy(this, storage, sizeof(*this));
*isolate_->handle_scope_data() = handle_scope_data_;
@@ -10723,13 +10291,11 @@ DeferredHandles* HandleScopeImplementer::Detach(Address* prev_limit) {
return deferred;
}
-
void HandleScopeImplementer::BeginDeferredScope() {
DCHECK_NULL(last_handle_before_deferred_block_);
last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next;
}
-
DeferredHandles::~DeferredHandles() {
isolate_->UnlinkDeferredHandles(this);
@@ -10764,7 +10330,6 @@ void DeferredHandles::Iterate(RootVisitor* v) {
}
}
-
void InvokeAccessorGetterCallback(
v8::Local<v8::Name> property,
const v8::PropertyCallbackInfo<v8::Value>& info,
@@ -10779,7 +10344,6 @@ void InvokeAccessorGetterCallback(
getter(property, info);
}
-
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback) {
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
diff --git a/deps/v8/src/api.h b/deps/v8/src/api/api.h
index e8b3dcf55b..e041a5daf0 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api/api.h
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_API_H_
-#define V8_API_H_
+#ifndef V8_API_API_H_
+#define V8_API_API_H_
#include "include/v8-testing.h"
-#include "src/contexts.h"
-#include "src/detachable-vector.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/bigint.h"
+#include "src/objects/contexts.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-proxy.h"
#include "src/objects/module.h"
+#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
+#include "src/utils/detachable-vector.h"
#include "src/objects/templates.h"
@@ -39,10 +39,7 @@ class WeakMap;
// visible file.
class Consts {
public:
- enum TemplateType {
- FUNCTION_TEMPLATE = 0,
- OBJECT_TEMPLATE = 1
- };
+ enum TemplateType { FUNCTION_TEMPLATE = 0, OBJECT_TEMPLATE = 1 };
};
template <typename T>
@@ -61,14 +58,13 @@ inline v8::internal::Handle<v8::internal::Object> FromCData(
class ApiFunction {
public:
- explicit ApiFunction(v8::internal::Address addr) : addr_(addr) { }
+ explicit ApiFunction(v8::internal::Address addr) : addr_(addr) {}
v8::internal::Address address() { return addr_; }
+
private:
v8::internal::Address addr_;
};
-
-
class RegisteredExtension {
public:
static void Register(std::unique_ptr<Extension>);
@@ -76,6 +72,7 @@ class RegisteredExtension {
Extension* extension() const { return extension_.get(); }
RegisteredExtension* next() const { return next_; }
static RegisteredExtension* first_extension() { return first_extension_; }
+
private:
explicit RegisteredExtension(Extension*);
explicit RegisteredExtension(std::unique_ptr<Extension>);
@@ -135,8 +132,7 @@ class RegisteredExtension {
class Utils {
public:
- static inline bool ApiCheck(bool condition,
- const char* location,
+ static inline bool ApiCheck(bool condition, const char* location,
const char* message) {
if (!condition) Utils::ReportApiFailure(location, message);
return condition;
@@ -290,7 +286,6 @@ inline v8::Local<T> ToApiHandle(
return Utils::Convert<v8::internal::Object, T>(obj);
}
-
template <class T>
inline bool ToLocal(v8::internal::MaybeHandle<v8::internal::Object> maybe,
Local<T>* local) {
@@ -329,7 +324,6 @@ class V8_EXPORT_PRIVATE DeferredHandles {
friend class Isolate;
};
-
// This class is here in order to be able to declare it a friend of
// HandleScope. Moving these methods to be members of HandleScope would be
// neat in some ways, but it would expose internal implementation details in
@@ -361,12 +355,9 @@ class HandleScopeImplementer {
: isolate_(isolate),
spare_(nullptr),
call_depth_(0),
- last_handle_before_deferred_block_(nullptr) {
- }
+ last_handle_before_deferred_block_(nullptr) {}
- ~HandleScopeImplementer() {
- DeleteArray(spare_);
- }
+ ~HandleScopeImplementer() { DeleteArray(spare_); }
// Threading support for handle data.
static int ArchiveSpacePerThread();
@@ -383,8 +374,8 @@ class HandleScopeImplementer {
inline void DeleteExtensions(internal::Address* prev_limit);
// Call depth represents nested v8 api calls.
- inline void IncrementCallDepth() {call_depth_++;}
- inline void DecrementCallDepth() {call_depth_--;}
+ inline void IncrementCallDepth() { call_depth_++; }
+ inline void DecrementCallDepth() { call_depth_--; }
inline bool CallDepthIsZero() { return call_depth_ == 0; }
inline void EnterContext(Context context);
@@ -489,7 +480,6 @@ Context HandleScopeImplementer::RestoreContext() {
return last_context;
}
-
bool HandleScopeImplementer::HasSavedContexts() {
return !saved_contexts_.empty();
}
@@ -581,4 +571,4 @@ class Testing {
} // namespace internal
} // namespace v8
-#endif // V8_API_H_
+#endif // V8_API_API_H_
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
deleted file mode 100644
index 66ce3f9da4..0000000000
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// CPU specific code for arm independent of OS goes here.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/arm64/utils-arm64.h"
-#include "src/cpu-features.h"
-
-namespace v8 {
-namespace internal {
-
-class CacheLineSizes {
- public:
- CacheLineSizes() {
-#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN)
- cache_type_register_ = 0;
-#else
- // Copy the content of the cache type register to a core register.
- __asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT
- : [ctr] "=r"(cache_type_register_));
-#endif
- }
-
- uint32_t icache_line_size() const { return ExtractCacheLineSize(0); }
- uint32_t dcache_line_size() const { return ExtractCacheLineSize(16); }
-
- private:
- uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
- // The cache type register holds the size of cache lines in words as a
- // power of two.
- return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xF);
- }
-
- uint32_t cache_type_register_;
-};
-
-void CpuFeatures::FlushICache(void* address, size_t length) {
-#if defined(V8_HOST_ARCH_ARM64)
-#if defined(V8_OS_WIN)
- ::FlushInstructionCache(GetCurrentProcess(), address, length);
-#else
- // The code below assumes user space cache operations are allowed. The goal
- // of this routine is to make sure the code generated is visible to the I
- // side of the CPU.
-
- uintptr_t start = reinterpret_cast<uintptr_t>(address);
- // Sizes will be used to generate a mask big enough to cover a pointer.
- CacheLineSizes sizes;
- uintptr_t dsize = sizes.dcache_line_size();
- uintptr_t isize = sizes.icache_line_size();
- // Cache line sizes are always a power of 2.
- DCHECK_EQ(CountSetBits(dsize, 64), 1);
- DCHECK_EQ(CountSetBits(isize, 64), 1);
- uintptr_t dstart = start & ~(dsize - 1);
- uintptr_t istart = start & ~(isize - 1);
- uintptr_t end = start + length;
-
- __asm__ __volatile__ ( // NOLINT
- // Clean every line of the D cache containing the target data.
- "0: \n\t"
- // dc : Data Cache maintenance
- // c : Clean
- // i : Invalidate
- // va : by (Virtual) Address
- // c : to the point of Coherency
- // See ARM DDI 0406B page B2-12 for more information.
- // We would prefer to use "cvau" (clean to the point of unification) here
- // but we use "civac" to work around Cortex-A53 errata 819472, 826319,
- // 827319 and 824069.
- "dc civac, %[dline] \n\t"
- "add %[dline], %[dline], %[dsize] \n\t"
- "cmp %[dline], %[end] \n\t"
- "b.lt 0b \n\t"
- // Barrier to make sure the effect of the code above is visible to the rest
- // of the world.
- // dsb : Data Synchronisation Barrier
- // ish : Inner SHareable domain
- // The point of unification for an Inner Shareable shareability domain is
- // the point by which the instruction and data caches of all the processors
- // in that Inner Shareable shareability domain are guaranteed to see the
- // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
- // information.
- "dsb ish \n\t"
- // Invalidate every line of the I cache containing the target data.
- "1: \n\t"
- // ic : instruction cache maintenance
- // i : invalidate
- // va : by address
- // u : to the point of unification
- "ic ivau, %[iline] \n\t"
- "add %[iline], %[iline], %[isize] \n\t"
- "cmp %[iline], %[end] \n\t"
- "b.lt 1b \n\t"
- // Barrier to make sure the effect of the code above is visible to the rest
- // of the world.
- "dsb ish \n\t"
- // Barrier to ensure any prefetching which happened before this code is
- // discarded.
- // isb : Instruction Synchronisation Barrier
- "isb \n\t"
- : [dline] "+r" (dstart),
- [iline] "+r" (istart)
- : [dsize] "r" (dsize),
- [isize] "r" (isize),
- [end] "r" (end)
- // This code does not write to memory but without the dependency gcc might
- // move this code before the code is generated.
- : "cc", "memory"
- ); // NOLINT
-#endif // V8_OS_WIN
-#endif // V8_HOST_ARCH_ARM64
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 44a3f439f6..5a38eeef36 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -6,24 +6,24 @@
#include "src/asmjs/asm-names.h"
#include "src/asmjs/asm-parser.h"
-#include "src/assert-scope.h"
#include "src/ast/ast.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
-#include "src/compiler.h"
-#include "src/counters.h"
-#include "src/execution.h"
-#include "src/handles.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/common/assert-scope.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate.h"
+#include "src/execution/message-template.h"
+#include "src/handles/handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/message-template.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
-#include "src/unoptimized-compilation-info.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
@@ -42,7 +42,7 @@ namespace {
Handle<Object> StdlibMathMember(Isolate* isolate, Handle<JSReceiver> stdlib,
Handle<Name> name) {
Handle<Name> math_name(
- isolate->factory()->InternalizeOneByteString(StaticCharVector("Math")));
+ isolate->factory()->InternalizeString(StaticCharVector("Math")));
Handle<Object> math = JSReceiver::GetDataProperty(stdlib, math_name);
if (!math->IsJSReceiver()) return isolate->factory()->undefined_value();
Handle<JSReceiver> math_receiver = Handle<JSReceiver>::cast(math);
@@ -68,16 +68,16 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
if (members.contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
- Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
- StaticCharVector(#fname))); \
+ Handle<Name> name( \
+ isolate->factory()->InternalizeString(StaticCharVector(#fname))); \
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
if (!value->IsJSFunction()) return false; \
SharedFunctionInfo shared = Handle<JSFunction>::cast(value)->shared(); \
- if (!shared->HasBuiltinId() || \
- shared->builtin_id() != Builtins::kMath##FName) { \
+ if (!shared.HasBuiltinId() || \
+ shared.builtin_id() != Builtins::kMath##FName) { \
return false; \
} \
- DCHECK_EQ(shared->GetCode(), \
+ DCHECK_EQ(shared.GetCode(), \
isolate->builtins()->builtin(Builtins::kMath##FName)); \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
@@ -85,23 +85,23 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
#define STDLIB_MATH_CONST(cname, const_value) \
if (members.contains(wasm::AsmJsParser::StandardMember::kMath##cname)) { \
members.Remove(wasm::AsmJsParser::StandardMember::kMath##cname); \
- Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
- StaticCharVector(#cname))); \
+ Handle<Name> name( \
+ isolate->factory()->InternalizeString(StaticCharVector(#cname))); \
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
if (!value->IsNumber() || value->Number() != const_value) return false; \
}
STDLIB_MATH_VALUE_LIST(STDLIB_MATH_CONST)
#undef STDLIB_MATH_CONST
-#define STDLIB_ARRAY_TYPE(fname, FName) \
- if (members.contains(wasm::AsmJsParser::StandardMember::k##FName)) { \
- members.Remove(wasm::AsmJsParser::StandardMember::k##FName); \
- *is_typed_array = true; \
- Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
- StaticCharVector(#FName))); \
- Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name); \
- if (!value->IsJSFunction()) return false; \
- Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
- if (!func.is_identical_to(isolate->fname())) return false; \
+#define STDLIB_ARRAY_TYPE(fname, FName) \
+ if (members.contains(wasm::AsmJsParser::StandardMember::k##FName)) { \
+ members.Remove(wasm::AsmJsParser::StandardMember::k##FName); \
+ *is_typed_array = true; \
+ Handle<Name> name( \
+ isolate->factory()->InternalizeString(StaticCharVector(#FName))); \
+ Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name); \
+ if (!value->IsJSFunction()) return false; \
+ Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
+ if (!func.is_identical_to(isolate->fname())) return false; \
}
STDLIB_ARRAY_TYPE(int8_array_fun, Int8Array)
STDLIB_ARRAY_TYPE(uint8_array_fun, Uint8Array)
@@ -137,7 +137,7 @@ void ReportCompilationSuccess(Handle<Script> script, int position,
if (FLAG_suppress_asm_messages || !FLAG_trace_asm_time) return;
EmbeddedVector<char, 100> text;
int length = SNPrintF(
- text, "success, asm->wasm: %0.3f ms, compile: %0.3f ms, %" PRIuS " bytes",
+ text, "success, asm->wasm: %0.3f ms, compile: %0.3f ms, %zu bytes",
translate_time, compile_time, module_size);
CHECK_NE(-1, length);
text.Truncate(length);
@@ -264,7 +264,7 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
if (FLAG_trace_asm_parser) {
PrintF(
"[asm.js translation successful: time=%0.3fms, "
- "translate_zone=%" PRIuS "KB, compile_zone+=%" PRIuS "KB]\n",
+ "translate_zone=%zuKB, compile_zone+=%zuKB]\n",
translate_time_, translate_zone_size_ / KB, compile_zone_size / KB);
}
return SUCCEEDED;
@@ -408,7 +408,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
if (thrower.error()) {
ScopedVector<char> error_reason(100);
SNPrintF(error_reason, "Internal wasm failure: %s", thrower.error_msg());
- ReportInstantiationFailure(script, position, error_reason.start());
+ ReportInstantiationFailure(script, position, error_reason.begin());
} else {
ReportInstantiationFailure(script, position, "Internal wasm failure");
}
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index 05707cad98..46dd3f2e34 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -7,7 +7,7 @@
// Clients of this interface shouldn't depend on lots of asmjs internals.
// Do not include anything from src/asmjs here!
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index b17fc9f8a7..3d290a1fe1 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -13,8 +13,8 @@
#include "src/asmjs/asm-types.h"
#include "src/base/optional.h"
#include "src/base/overflowing-math.h"
-#include "src/conversions-inl.h"
-#include "src/flags.h"
+#include "src/flags/flags.h"
+#include "src/numbers/conversions-inl.h"
#include "src/parsing/scanner.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-opcodes.h"
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index 42d0aa8d6e..8740cdad11 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -11,7 +11,7 @@
#include "src/asmjs/asm-scanner.h"
#include "src/asmjs/asm-types.h"
#include "src/base/enum-set.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/zone/zone-containers.h"
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index 27d9eee6b4..a9e9c2ca56 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -4,10 +4,12 @@
#include "src/asmjs/asm-scanner.h"
-#include "src/char-predicates-inl.h"
-#include "src/conversions.h"
-#include "src/flags.h"
+#include <cinttypes>
+
+#include "src/flags/flags.h"
+#include "src/numbers/conversions.h"
#include "src/parsing/scanner.h"
+#include "src/strings/char-predicates-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/asmjs/asm-scanner.h b/deps/v8/src/asmjs/asm-scanner.h
index 1f38f0fc66..076a7607e3 100644
--- a/deps/v8/src/asmjs/asm-scanner.h
+++ b/deps/v8/src/asmjs/asm-scanner.h
@@ -11,7 +11,7 @@
#include "src/asmjs/asm-names.h"
#include "src/base/logging.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -29,7 +29,7 @@ class Utf16CharacterStream;
// (for performance).
class V8_EXPORT_PRIVATE AsmJsScanner {
public:
- typedef int32_t token_t;
+ using token_t = int32_t;
explicit AsmJsScanner(Utf16CharacterStream* stream);
diff --git a/deps/v8/src/asmjs/asm-types.cc b/deps/v8/src/asmjs/asm-types.cc
index 656f92a2dd..1fc12df2c9 100644
--- a/deps/v8/src/asmjs/asm-types.cc
+++ b/deps/v8/src/asmjs/asm-types.cc
@@ -6,8 +6,8 @@
#include <cinttypes>
-#include "src/utils.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/asmjs/asm-types.h b/deps/v8/src/asmjs/asm-types.h
index fb044a95f9..8bb9e5d8e0 100644
--- a/deps/v8/src/asmjs/asm-types.h
+++ b/deps/v8/src/asmjs/asm-types.h
@@ -9,7 +9,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@@ -60,7 +60,7 @@ class AsmOverloadedFunctionType;
class AsmValueType {
public:
- typedef uint32_t bitset_t;
+ using bitset_t = uint32_t;
enum : uint32_t {
#define DEFINE_TAG(CamelName, string_name, number, parent_types) \
diff --git a/deps/v8/src/assembler-arch.h b/deps/v8/src/assembler-arch.h
deleted file mode 100644
index 5858907537..0000000000
--- a/deps/v8/src/assembler-arch.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ASSEMBLER_ARCH_H_
-#define V8_ASSEMBLER_ARCH_H_
-
-#include "src/assembler.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/assembler-ppc.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/assembler-s390.h"
-#else
-#error Unknown architecture.
-#endif
-
-#endif // V8_ASSEMBLER_ARCH_H_
diff --git a/deps/v8/src/assembler-inl.h b/deps/v8/src/assembler-inl.h
deleted file mode 100644
index 5cf4fae63a..0000000000
--- a/deps/v8/src/assembler-inl.h
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ASSEMBLER_INL_H_
-#define V8_ASSEMBLER_INL_H_
-
-#include "src/assembler.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm-inl.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/assembler-ppc-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips-inl.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64-inl.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/assembler-s390-inl.h"
-#else
-#error Unknown architecture.
-#endif
-
-#endif // V8_ASSEMBLER_INL_H_
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
index 7e3a25890b..95bd94d8d4 100644
--- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/ast/ast-function-literal-id-reindexer.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/ast/ast.h"
@@ -17,13 +17,83 @@ AstFunctionLiteralIdReindexer::AstFunctionLiteralIdReindexer(size_t stack_limit,
AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() = default;
void AstFunctionLiteralIdReindexer::Reindex(Expression* pattern) {
+#ifdef DEBUG
+ visited_.clear();
+#endif
Visit(pattern);
+ CheckVisited(pattern);
}
void AstFunctionLiteralIdReindexer::VisitFunctionLiteral(FunctionLiteral* lit) {
+ // Make sure we're not already in the visited set.
+ DCHECK(visited_.insert(lit).second);
+
AstTraversalVisitor::VisitFunctionLiteral(lit);
lit->set_function_literal_id(lit->function_literal_id() + delta_);
}
+void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) {
+ // Manually visit the class literal so that we can change the property walk.
+ // This should be kept in-sync with AstTraversalVisitor::VisitClassLiteral.
+
+ if (expr->extends() != nullptr) {
+ Visit(expr->extends());
+ }
+ Visit(expr->constructor());
+ if (expr->static_fields_initializer() != nullptr) {
+ Visit(expr->static_fields_initializer());
+ }
+ if (expr->instance_members_initializer_function() != nullptr) {
+ Visit(expr->instance_members_initializer_function());
+ }
+ ZonePtrList<ClassLiteral::Property>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ClassLiteralProperty* prop = props->at(i);
+
+ // Private fields and public fields with computed names have both their key
+ // and value present in instance_members_initializer_function, so they will
+ // already have been visited.
+ if ((prop->is_computed_name() || prop->is_private()) &&
+ !prop->value()->IsFunctionLiteral()) {
+ if (!prop->key()->IsLiteral()) {
+ CheckVisited(prop->key());
+ }
+ CheckVisited(prop->value());
+ } else {
+ if (!prop->key()->IsLiteral()) {
+ Visit(prop->key());
+ }
+ Visit(prop->value());
+ }
+ }
+}
+
+#ifdef DEBUG
+namespace {
+
+class AstFunctionLiteralIdReindexChecker final
+ : public AstTraversalVisitor<AstFunctionLiteralIdReindexChecker> {
+ public:
+ AstFunctionLiteralIdReindexChecker(size_t stack_limit,
+ const std::set<FunctionLiteral*>* visited)
+ : AstTraversalVisitor(stack_limit), visited_(visited) {}
+
+ void VisitFunctionLiteral(FunctionLiteral* lit) {
+ // TODO(leszeks): It would be nice to print the unvisited function literal
+ // here, but that requires more advanced DCHECK support with formatting.
+ DCHECK(visited_->find(lit) != visited_->end());
+ }
+
+ private:
+ const std::set<FunctionLiteral*>* visited_;
+};
+
+} // namespace
+
+void AstFunctionLiteralIdReindexer::CheckVisited(Expression* expr) {
+ AstFunctionLiteralIdReindexChecker(stack_limit(), &visited_).Visit(expr);
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.h b/deps/v8/src/ast/ast-function-literal-id-reindexer.h
index 400196da68..f4dac7b01e 100644
--- a/deps/v8/src/ast/ast-function-literal-id-reindexer.h
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.h
@@ -8,6 +8,10 @@
#include "src/ast/ast-traversal-visitor.h"
#include "src/base/macros.h"
+#ifdef DEBUG
+#include <set>
+#endif
+
namespace v8 {
namespace internal {
@@ -23,10 +27,22 @@ class AstFunctionLiteralIdReindexer final
// AstTraversalVisitor implementation.
void VisitFunctionLiteral(FunctionLiteral* lit);
+ void VisitClassLiteral(ClassLiteral* lit);
private:
int delta_;
+#ifdef DEBUG
+ // Visited set, only used in DCHECKs for verification.
+ std::set<FunctionLiteral*> visited_;
+
+ // Visit all function literals, checking if they have already been visited
+ // (are in the visited set).
+ void CheckVisited(Expression* expr);
+#else
+ void CheckVisited(Expression* expr) {}
+#endif
+
DISALLOW_COPY_AND_ASSIGN(AstFunctionLiteralIdReindexer);
};
diff --git a/deps/v8/src/ast/ast-source-ranges.h b/deps/v8/src/ast/ast-source-ranges.h
index a04e68fa2f..1b42a055dd 100644
--- a/deps/v8/src/ast/ast-source-ranges.h
+++ b/deps/v8/src/ast/ast-source-ranges.h
@@ -25,6 +25,18 @@ struct SourceRange {
int end = kNoSourcePosition) {
return that.IsEmpty() ? Empty() : SourceRange(that.end, end);
}
+
+ static constexpr int kFunctionLiteralSourcePosition = -2;
+ STATIC_ASSERT(kFunctionLiteralSourcePosition == kNoSourcePosition - 1);
+
+ // Source ranges associated with a function literal do not contain real
+ // source positions; instead, they are created with special marker values.
+ // These are later recognized and rewritten during processing in
+ // Coverage::Collect().
+ static SourceRange FunctionLiteralMarkerRange() {
+ return {kFunctionLiteralSourcePosition, kFunctionLiteralSourcePosition};
+ }
+
int32_t start, end;
};
@@ -35,6 +47,7 @@ struct SourceRange {
V(Block) \
V(CaseClause) \
V(Conditional) \
+ V(FunctionLiteral) \
V(IfStatement) \
V(IterationStatement) \
V(JumpStatement) \
@@ -155,6 +168,18 @@ class ConditionalSourceRanges final : public AstNodeSourceRanges {
SourceRange else_range_;
};
+class FunctionLiteralSourceRanges final : public AstNodeSourceRanges {
+ public:
+ SourceRange GetRange(SourceRangeKind kind) override {
+ DCHECK(HasRange(kind));
+ return SourceRange::FunctionLiteralMarkerRange();
+ }
+
+ bool HasRange(SourceRangeKind kind) override {
+ return kind == SourceRangeKind::kBody;
+ }
+};
+
class IfStatementSourceRanges final : public AstNodeSourceRanges {
public:
explicit IfStatementSourceRanges(const SourceRange& then_range,
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index d6f12cb34a..55415f0bdd 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -27,11 +27,11 @@
#include "src/ast/ast-value-factory.h"
-#include "src/char-predicates-inl.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/string-hasher.h"
-#include "src/utils-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/strings/string-hasher.h"
+#include "src/utils/utils-inl.h"
namespace v8 {
namespace internal {
@@ -54,37 +54,16 @@ class OneByteStringStream {
} // namespace
-class AstRawStringInternalizationKey : public StringTableKey {
- public:
- explicit AstRawStringInternalizationKey(const AstRawString* string)
- : StringTableKey(string->hash_field()), string_(string) {}
-
- bool IsMatch(Object other) override {
- if (string_->is_one_byte())
- return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
- return String::cast(other)->IsTwoByteEqualTo(
- Vector<const uint16_t>::cast(string_->literal_bytes_));
- }
-
- Handle<String> AsHandle(Isolate* isolate) override {
- if (string_->is_one_byte())
- return isolate->factory()->NewOneByteInternalizedString(
- string_->literal_bytes_, string_->hash_field());
- return isolate->factory()->NewTwoByteInternalizedString(
- Vector<const uint16_t>::cast(string_->literal_bytes_),
- string_->hash_field());
- }
-
- private:
- const AstRawString* string_;
-};
-
void AstRawString::Internalize(Isolate* isolate) {
DCHECK(!has_string_);
if (literal_bytes_.length() == 0) {
set_string(isolate->factory()->empty_string());
+ } else if (is_one_byte()) {
+ OneByteStringKey key(hash_field_, literal_bytes_);
+ set_string(StringTable::LookupKey(isolate, &key));
} else {
- AstRawStringInternalizationKey key(this);
+ TwoByteStringKey key(hash_field_,
+ Vector<const uint16_t>::cast(literal_bytes_));
set_string(StringTable::LookupKey(isolate, &key));
}
}
@@ -108,13 +87,13 @@ bool AstRawString::IsOneByteEqualTo(const char* data) const {
size_t length = static_cast<size_t>(literal_bytes_.length());
if (length != strlen(data)) return false;
- return 0 == strncmp(reinterpret_cast<const char*>(literal_bytes_.start()),
+ return 0 == strncmp(reinterpret_cast<const char*>(literal_bytes_.begin()),
data, length);
}
uint16_t AstRawString::FirstCharacter() const {
if (is_one_byte()) return literal_bytes_[0];
- const uint16_t* c = reinterpret_cast<const uint16_t*>(literal_bytes_.start());
+ const uint16_t* c = reinterpret_cast<const uint16_t*>(literal_bytes_.begin());
return *c;
}
@@ -193,7 +172,7 @@ AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed)
Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data))); \
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>( \
- literal.start(), literal.length(), hash_seed_); \
+ literal.begin(), literal.length(), hash_seed_); \
name##_string_ = new (&zone_) AstRawString(true, literal, hash_field); \
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
@@ -213,20 +192,20 @@ AstRawString* AstValueFactory::GetOneByteStringInternal(
int key = literal[0];
if (V8_UNLIKELY(one_character_strings_[key] == nullptr)) {
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
- literal.start(), literal.length(), hash_seed_);
+ literal.begin(), literal.length(), hash_seed_);
one_character_strings_[key] = GetString(hash_field, true, literal);
}
return one_character_strings_[key];
}
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
- literal.start(), literal.length(), hash_seed_);
+ literal.begin(), literal.length(), hash_seed_);
return GetString(hash_field, true, literal);
}
AstRawString* AstValueFactory::GetTwoByteStringInternal(
Vector<const uint16_t> literal) {
uint32_t hash_field = StringHasher::HashSequentialString<uint16_t>(
- literal.start(), literal.length(), hash_seed_);
+ literal.begin(), literal.length(), hash_seed_);
return GetString(hash_field, false, Vector<const byte>::cast(literal));
}
@@ -298,7 +277,7 @@ AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte,
// Copy literal contents for later comparison.
int length = literal_bytes.length();
byte* new_literal_bytes = zone_->NewArray<byte>(length);
- memcpy(new_literal_bytes, literal_bytes.start(), length);
+ memcpy(new_literal_bytes, literal_bytes.begin(), length);
AstRawString* new_string = new (zone_) AstRawString(
is_one_byte, Vector<const byte>(new_literal_bytes, length), hash_field);
CHECK_NOT_NULL(new_string);
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index f063c6942a..bc732ce994 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -31,10 +31,10 @@
#include <forward_list>
#include "src/base/hashmap.h"
-#include "src/conversions.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
+#include "src/numbers/conversions.h"
// Ast(Raw|Cons)String and AstValueFactory are for storing strings and
// values independent of the V8 heap and internalizing them later. During
@@ -60,9 +60,7 @@ class AstRawString final : public ZoneObject {
// Access the physical representation:
bool is_one_byte() const { return is_one_byte_; }
int byte_length() const { return literal_bytes_.length(); }
- const unsigned char* raw_data() const {
- return literal_bytes_.start();
- }
+ const unsigned char* raw_data() const { return literal_bytes_.begin(); }
// For storing AstRawStrings in a hash map.
uint32_t hash_field() const { return hash_field_; }
@@ -203,6 +201,7 @@ class AstBigInt {
F(bigint, "bigint") \
F(boolean, "boolean") \
F(computed, "<computed>") \
+ F(dot_brand, ".brand") \
F(constructor, "constructor") \
F(default, "default") \
F(done, "done") \
@@ -298,8 +297,7 @@ class AstValueFactory {
return GetOneByteStringInternal(literal);
}
const AstRawString* GetOneByteString(const char* string) {
- return GetOneByteString(Vector<const uint8_t>(
- reinterpret_cast<const uint8_t*>(string), StrLength(string)));
+ return GetOneByteString(OneByteVector(string));
}
const AstRawString* GetTwoByteString(Vector<const uint16_t> literal) {
return GetTwoByteStringInternal(literal);
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 7d2227e2c8..a930a374b8 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -12,17 +12,17 @@
#include "src/base/hashmap.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins.h"
-#include "src/contexts.h"
-#include "src/conversions-inl.h"
-#include "src/double.h"
-#include "src/elements.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions-inl.h"
+#include "src/numbers/double.h"
+#include "src/objects/contexts.h"
+#include "src/objects/elements.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/map.h"
-#include "src/property-details.h"
-#include "src/property.h"
-#include "src/string-stream.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-details.h"
+#include "src/objects/property.h"
+#include "src/strings/string-stream.h"
#include "src/zone/zone-list-inl.h"
namespace v8 {
@@ -282,6 +282,17 @@ std::unique_ptr<char[]> FunctionLiteral::GetDebugName() const {
return result;
}
+bool FunctionLiteral::requires_brand_initialization() const {
+ Scope* outer = scope_->outer_scope();
+
+ // If there are no variables declared in the outer scope other than
+ // the class name variable, the outer class scope may be elided when
+ // the function is deserialized after preparsing.
+ if (!outer->is_class_scope()) return false;
+
+ return outer->AsClassScope()->brand() != nullptr;
+}
+
ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
Kind kind, bool is_computed_name)
: LiteralProperty(key, value, is_computed_name),
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index eb72b4f243..27d298c88e 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -10,12 +10,12 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
#include "src/ast/variables.h"
-#include "src/bailout-reason.h"
#include "src/base/threaded-list.h"
-#include "src/globals.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/codegen/label.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/label.h"
#include "src/objects/literal-objects.h"
#include "src/objects/smi.h"
#include "src/parsing/token.h"
@@ -422,7 +422,7 @@ class DoExpression final : public Expression {
class Declaration : public AstNode {
public:
- typedef base::ThreadedList<Declaration> List;
+ using List = base::ThreadedList<Declaration>;
Variable* var() const { return var_; }
void set_var(Variable* var) { var_ = var; }
@@ -1300,7 +1300,7 @@ class ObjectLiteralProperty final : public LiteralProperty {
// for minimizing the work when constructing it at runtime.
class ObjectLiteral final : public AggregateLiteral {
public:
- typedef ObjectLiteralProperty Property;
+ using Property = ObjectLiteralProperty;
Handle<ObjectBoilerplateDescription> boilerplate_description() const {
DCHECK(!boilerplate_description_.is_null());
@@ -2342,6 +2342,8 @@ class FunctionLiteral final : public Expression {
return RequiresInstanceMembersInitializer::decode(bit_field_);
}
+ bool requires_brand_initialization() const;
+
ProducedPreparseData* produced_preparse_data() const {
return produced_preparse_data_;
}
@@ -2435,12 +2437,10 @@ class ClassLiteralProperty final : public LiteralProperty {
}
void set_private_name_var(Variable* var) {
- DCHECK_EQ(FIELD, kind());
DCHECK(is_private());
private_or_computed_name_var_ = var;
}
Variable* private_name_var() const {
- DCHECK_EQ(FIELD, kind());
DCHECK(is_private());
return private_or_computed_name_var_;
}
@@ -2459,7 +2459,7 @@ class ClassLiteralProperty final : public LiteralProperty {
class InitializeClassMembersStatement final : public Statement {
public:
- typedef ClassLiteralProperty Property;
+ using Property = ClassLiteralProperty;
ZonePtrList<Property>* fields() const { return fields_; }
@@ -2474,9 +2474,9 @@ class InitializeClassMembersStatement final : public Statement {
class ClassLiteral final : public Expression {
public:
- typedef ClassLiteralProperty Property;
+ using Property = ClassLiteralProperty;
- Scope* scope() const { return scope_; }
+ ClassScope* scope() const { return scope_; }
Variable* class_variable() const { return class_variable_; }
Expression* extends() const { return extends_; }
FunctionLiteral* constructor() const { return constructor_; }
@@ -2508,7 +2508,7 @@ class ClassLiteral final : public Expression {
private:
friend class AstNodeFactory;
- ClassLiteral(Scope* scope, Variable* class_variable, Expression* extends,
+ ClassLiteral(ClassScope* scope, Variable* class_variable, Expression* extends,
FunctionLiteral* constructor, ZonePtrList<Property>* properties,
FunctionLiteral* static_fields_initializer,
FunctionLiteral* instance_members_initializer_function,
@@ -2531,7 +2531,7 @@ class ClassLiteral final : public Expression {
}
int end_position_;
- Scope* scope_;
+ ClassScope* scope_;
Variable* class_variable_;
Expression* extends_;
FunctionLiteral* constructor_;
@@ -2753,6 +2753,9 @@ class AstVisitor {
return false; \
} \
\
+ protected: \
+ uintptr_t stack_limit() const { return stack_limit_; } \
+ \
private: \
void InitializeAstVisitor(Isolate* isolate) { \
stack_limit_ = isolate->stack_guard()->real_climit(); \
@@ -2938,6 +2941,13 @@ class AstNodeFactory final {
}
class ThisExpression* ThisExpression() {
+ // Clear any previously set "parenthesized" flag on this_expression_ so this
+ // particular token does not inherit the it. The flag is used to check
+ // during arrow function head parsing whether we came from parenthesized
+ // exprssion parsing, since additional arrow function verification was done
+ // there. It does not matter whether a flag is unset after arrow head
+ // verification, so clearing at this point is fine.
+ this_expression_->clear_parenthesized();
return this_expression_;
}
@@ -3226,7 +3236,7 @@ class AstNodeFactory final {
}
ClassLiteral* NewClassLiteral(
- Scope* scope, Variable* variable, Expression* extends,
+ ClassScope* scope, Variable* variable, Expression* extends,
FunctionLiteral* constructor,
ZonePtrList<ClassLiteral::Property>* properties,
FunctionLiteral* static_fields_initializer,
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index d1be965a4a..5e9bbc6332 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -5,9 +5,9 @@
#include "src/ast/modules.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
-#include "src/objects-inl.h"
#include "src/objects/module-inl.h"
-#include "src/pending-compilation-error-handler.h"
+#include "src/objects/objects-inl.h"
+#include "src/parsing/pending-compilation-error-handler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index 2c778ff349..c3aa2bd0ad 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -125,12 +125,12 @@ class ModuleDescriptor : public ZoneObject {
bool operator()(const AstRawString* lhs, const AstRawString* rhs) const;
};
- typedef ZoneMap<const AstRawString*, ModuleRequest, AstRawStringComparer>
- ModuleRequestMap;
- typedef ZoneMultimap<const AstRawString*, Entry*, AstRawStringComparer>
- RegularExportMap;
- typedef ZoneMap<const AstRawString*, Entry*, AstRawStringComparer>
- RegularImportMap;
+ using ModuleRequestMap =
+ ZoneMap<const AstRawString*, ModuleRequest, AstRawStringComparer>;
+ using RegularExportMap =
+ ZoneMultimap<const AstRawString*, Entry*, AstRawStringComparer>;
+ using RegularImportMap =
+ ZoneMap<const AstRawString*, Entry*, AstRawStringComparer>;
// Module requests.
const ModuleRequestMap& module_requests() const { return module_requests_; }
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index c7f6e3d9f0..eca091d61f 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -9,10 +9,10 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
#include "src/base/platform/platform.h"
-#include "src/globals.h"
-#include "src/objects-inl.h"
-#include "src/string-builder-inl.h"
-#include "src/vector.h"
+#include "src/common/globals.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -756,7 +756,7 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info, Variable* var,
reinterpret_cast<void*>(var), VariableMode2String(var->mode()),
var->maybe_assigned() == kMaybeAssigned ? "true" : "false");
SNPrintF(buf + pos, ")");
- PrintLiteralIndented(buf.start(), value, true);
+ PrintLiteralIndented(buf.begin(), value, true);
}
}
@@ -1054,12 +1054,19 @@ void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
if (node->extends() != nullptr) {
PrintIndentedVisit("EXTENDS", node->extends());
}
+ Scope* outer = node->constructor()->scope()->outer_scope();
+ if (outer->is_class_scope()) {
+ Variable* brand = outer->AsClassScope()->brand();
+ if (brand != nullptr) {
+ PrintLiteralWithModeIndented("BRAND", brand, brand->raw_name());
+ }
+ }
if (node->static_fields_initializer() != nullptr) {
PrintIndentedVisit("STATIC FIELDS INITIALIZER",
node->static_fields_initializer());
}
if (node->instance_members_initializer_function() != nullptr) {
- PrintIndentedVisit("INSTANCE ELEMENTS INITIALIZER",
+ PrintIndentedVisit("INSTANCE MEMBERS INITIALIZER",
node->instance_members_initializer_function());
}
PrintClassProperties(node->properties());
@@ -1067,7 +1074,7 @@ void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
void AstPrinter::VisitInitializeClassMembersStatement(
InitializeClassMembersStatement* node) {
- IndentedScope indent(this, "INITIALIZE CLASS ELEMENTS", node->position());
+ IndentedScope indent(this, "INITIALIZE CLASS MEMBERS", node->position());
PrintClassProperties(node->fields());
}
@@ -1093,7 +1100,7 @@ void AstPrinter::PrintClassProperties(
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "PROPERTY%s%s - %s", property->is_static() ? " - STATIC" : "",
property->is_private() ? " - PRIVATE" : " - PUBLIC", prop_kind);
- IndentedScope prop(this, buf.start());
+ IndentedScope prop(this, buf.begin());
PrintIndentedVisit("KEY", properties->at(i)->key());
PrintIndentedVisit("VALUE", properties->at(i)->value());
}
@@ -1137,7 +1144,7 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
buf[i] = '\0';
PrintIndented("FLAGS ");
- Print("%s", buf.start());
+ Print("%s", buf.begin());
Print("\n");
}
@@ -1177,7 +1184,7 @@ void AstPrinter::PrintObjectProperties(
}
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "PROPERTY - %s", prop_kind);
- IndentedScope prop(this, buf.start());
+ IndentedScope prop(this, buf.begin());
PrintIndentedVisit("KEY", properties->at(i)->key());
PrintIndentedVisit("VALUE", properties->at(i)->value());
}
@@ -1201,7 +1208,7 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
if (!node->is_resolved()) {
SNPrintF(buf + pos, " unresolved");
- PrintLiteralWithModeIndented(buf.start(), nullptr, node->raw_name());
+ PrintLiteralWithModeIndented(buf.begin(), nullptr, node->raw_name());
} else {
Variable* var = node->var();
switch (var->location()) {
@@ -1224,7 +1231,7 @@ void AstPrinter::VisitVariableProxy(VariableProxy* node) {
SNPrintF(buf + pos, " module");
break;
}
- PrintLiteralWithModeIndented(buf.start(), var, node->raw_name());
+ PrintLiteralWithModeIndented(buf.begin(), var, node->raw_name());
}
}
@@ -1242,21 +1249,21 @@ void AstPrinter::VisitCompoundAssignment(CompoundAssignment* node) {
void AstPrinter::VisitYield(Yield* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "YIELD");
- IndentedScope indent(this, buf.start(), node->position());
+ IndentedScope indent(this, buf.begin(), node->position());
Visit(node->expression());
}
void AstPrinter::VisitYieldStar(YieldStar* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "YIELD_STAR");
- IndentedScope indent(this, buf.start(), node->position());
+ IndentedScope indent(this, buf.begin(), node->position());
Visit(node->expression());
}
void AstPrinter::VisitAwait(Await* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "AWAIT");
- IndentedScope indent(this, buf.start(), node->position());
+ IndentedScope indent(this, buf.begin(), node->position());
Visit(node->expression());
}
@@ -1268,7 +1275,7 @@ void AstPrinter::VisitThrow(Throw* node) {
void AstPrinter::VisitProperty(Property* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "PROPERTY");
- IndentedScope indent(this, buf.start(), node->position());
+ IndentedScope indent(this, buf.begin(), node->position());
Visit(node->obj());
AssignType property_kind = Property::GetAssignType(node);
@@ -1285,7 +1292,7 @@ void AstPrinter::VisitProperty(Property* node) {
void AstPrinter::VisitResolvedProperty(ResolvedProperty* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "RESOLVED-PROPERTY");
- IndentedScope indent(this, buf.start(), node->position());
+ IndentedScope indent(this, buf.begin(), node->position());
PrintIndentedVisit("RECEIVER", node->object());
PrintIndentedVisit("PROPERTY", node->property());
@@ -1294,7 +1301,7 @@ void AstPrinter::VisitResolvedProperty(ResolvedProperty* node) {
void AstPrinter::VisitCall(Call* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "CALL");
- IndentedScope indent(this, buf.start());
+ IndentedScope indent(this, buf.begin());
Visit(node->expression());
PrintArguments(node->arguments());
@@ -1312,7 +1319,7 @@ void AstPrinter::VisitCallRuntime(CallRuntime* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "CALL RUNTIME %s%s", node->debug_name(),
node->is_jsruntime() ? " (JS function)" : "");
- IndentedScope indent(this, buf.start(), node->position());
+ IndentedScope indent(this, buf.begin(), node->position());
PrintArguments(node->arguments());
}
@@ -1327,7 +1334,7 @@ void AstPrinter::VisitCountOperation(CountOperation* node) {
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "%s %s", (node->is_prefix() ? "PRE" : "POST"),
Token::Name(node->op()));
- IndentedScope indent(this, buf.start(), node->position());
+ IndentedScope indent(this, buf.begin(), node->position());
Visit(node->expression());
}
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index e1efdbfb88..cceb5fc269 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -5,10 +5,10 @@
#ifndef V8_AST_PRETTYPRINTER_H_
#define V8_AST_PRETTYPRINTER_H_
-#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
-#include "src/function-kind.h"
+#include "src/utils/allocation.h"
+#include "src/objects/function-kind.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index c3950af7ab..e45303c64b 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -6,14 +6,14 @@
#include <set>
-#include "src/accessors.h"
#include "src/ast/ast.h"
#include "src/base/optional.h"
-#include "src/bootstrapper.h"
-#include "src/counters.h"
-#include "src/message-template.h"
-#include "src/objects-inl.h"
+#include "src/builtins/accessors.h"
+#include "src/execution/message-template.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/scope-info.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
@@ -145,9 +145,16 @@ ClassScope::ClassScope(Zone* zone, Scope* outer_scope)
set_language_mode(LanguageMode::kStrict);
}
-ClassScope::ClassScope(Zone* zone, Handle<ScopeInfo> scope_info)
+ClassScope::ClassScope(Zone* zone, AstValueFactory* ast_value_factory,
+ Handle<ScopeInfo> scope_info)
: Scope(zone, CLASS_SCOPE, scope_info) {
set_language_mode(LanguageMode::kStrict);
+ if (scope_info->HasClassBrand()) {
+ Variable* brand =
+ LookupInScopeInfo(ast_value_factory->dot_brand_string(), this);
+ DCHECK_NOT_NULL(brand);
+ EnsureRareData()->brand = brand;
+ }
}
Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
@@ -303,8 +310,8 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
Scope* innermost_scope = nullptr;
Scope* outer_scope = nullptr;
while (!scope_info.is_null()) {
- if (scope_info->scope_type() == WITH_SCOPE) {
- if (scope_info->IsDebugEvaluateScope()) {
+ if (scope_info.scope_type() == WITH_SCOPE) {
+ if (scope_info.IsDebugEvaluateScope()) {
outer_scope = new (zone)
DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info, isolate));
outer_scope->set_is_debug_evaluate_scope();
@@ -314,45 +321,46 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
new (zone) Scope(zone, WITH_SCOPE, handle(scope_info, isolate));
}
- } else if (scope_info->scope_type() == SCRIPT_SCOPE) {
+ } else if (scope_info.scope_type() == SCRIPT_SCOPE) {
// If we reach a script scope, it's the outermost scope. Install the
// scope info of this script context onto the existing script scope to
// avoid nesting script scopes.
if (deserialization_mode == DeserializationMode::kIncludingVariables) {
script_scope->SetScriptScopeInfo(handle(scope_info, isolate));
}
- DCHECK(!scope_info->HasOuterScopeInfo());
+ DCHECK(!scope_info.HasOuterScopeInfo());
break;
- } else if (scope_info->scope_type() == FUNCTION_SCOPE) {
+ } else if (scope_info.scope_type() == FUNCTION_SCOPE) {
outer_scope = new (zone)
DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info, isolate));
- if (scope_info->IsAsmModule()) {
+ if (scope_info.IsAsmModule()) {
outer_scope->AsDeclarationScope()->set_is_asm_module();
}
- } else if (scope_info->scope_type() == EVAL_SCOPE) {
+ } else if (scope_info.scope_type() == EVAL_SCOPE) {
outer_scope = new (zone)
DeclarationScope(zone, EVAL_SCOPE, handle(scope_info, isolate));
- } else if (scope_info->scope_type() == CLASS_SCOPE) {
- outer_scope = new (zone) ClassScope(zone, handle(scope_info, isolate));
- } else if (scope_info->scope_type() == BLOCK_SCOPE) {
- if (scope_info->is_declaration_scope()) {
+ } else if (scope_info.scope_type() == CLASS_SCOPE) {
+ outer_scope = new (zone)
+ ClassScope(zone, ast_value_factory, handle(scope_info, isolate));
+ } else if (scope_info.scope_type() == BLOCK_SCOPE) {
+ if (scope_info.is_declaration_scope()) {
outer_scope = new (zone)
DeclarationScope(zone, BLOCK_SCOPE, handle(scope_info, isolate));
} else {
outer_scope =
new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info, isolate));
}
- } else if (scope_info->scope_type() == MODULE_SCOPE) {
+ } else if (scope_info.scope_type() == MODULE_SCOPE) {
outer_scope = new (zone)
ModuleScope(isolate, handle(scope_info, isolate), ast_value_factory);
} else {
- DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE);
- DCHECK_EQ(scope_info->ContextLocalCount(), 1);
- DCHECK_EQ(scope_info->ContextLocalMode(0), VariableMode::kVar);
- DCHECK_EQ(scope_info->ContextLocalInitFlag(0), kCreatedInitialized);
- String name = scope_info->ContextLocalName(0);
+ DCHECK_EQ(scope_info.scope_type(), CATCH_SCOPE);
+ DCHECK_EQ(scope_info.ContextLocalCount(), 1);
+ DCHECK_EQ(scope_info.ContextLocalMode(0), VariableMode::kVar);
+ DCHECK_EQ(scope_info.ContextLocalInitFlag(0), kCreatedInitialized);
+ String name = scope_info.ContextLocalName(0);
MaybeAssignedFlag maybe_assigned =
- scope_info->ContextLocalMaybeAssignedFlag(0);
+ scope_info.ContextLocalMaybeAssignedFlag(0);
outer_scope = new (zone)
Scope(zone, ast_value_factory->GetString(handle(name, isolate)),
maybe_assigned, handle(scope_info, isolate));
@@ -365,8 +373,8 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
}
current_scope = outer_scope;
if (innermost_scope == nullptr) innermost_scope = current_scope;
- scope_info = scope_info->HasOuterScopeInfo() ? scope_info->OuterScopeInfo()
- : ScopeInfo();
+ scope_info = scope_info.HasOuterScopeInfo() ? scope_info.OuterScopeInfo()
+ : ScopeInfo();
}
if (deserialization_mode == DeserializationMode::kIncludingVariables &&
@@ -1710,6 +1718,11 @@ void Scope::Print(int n) {
if (class_scope->rare_data_ != nullptr) {
PrintMap(n1, "// private name vars:\n",
&(class_scope->rare_data_->private_name_map), true, function);
+ Variable* brand = class_scope->brand();
+ if (brand != nullptr) {
+ Indent(n1, "// brand var:\n");
+ PrintVar(n1, brand);
+ }
}
}
@@ -2512,5 +2525,21 @@ VariableProxy* ClassScope::ResolvePrivateNamesPartially() {
return nullptr;
}
+Variable* ClassScope::DeclareBrandVariable(AstValueFactory* ast_value_factory,
+ int class_token_pos) {
+ DCHECK_IMPLIES(rare_data_ != nullptr, rare_data_->brand == nullptr);
+ bool was_added;
+ Variable* brand = Declare(zone(), ast_value_factory->dot_brand_string(),
+ VariableMode::kConst, NORMAL_VARIABLE,
+ InitializationFlag::kNeedsInitialization,
+ MaybeAssignedFlag::kMaybeAssigned, &was_added);
+ DCHECK(was_added);
+ brand->ForceContextAllocation();
+ brand->set_is_used();
+ EnsureRareData()->brand = brand;
+ brand->set_initializer_position(class_token_pos);
+ return brand;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 8d64fbf2ee..1feaad2a90 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -9,10 +9,10 @@
#include "src/base/compiler-specific.h"
#include "src/base/hashmap.h"
#include "src/base/threaded-list.h"
-#include "src/function-kind.h"
-#include "src/globals.h"
-#include "src/objects.h"
-#include "src/pointer-with-payload.h"
+#include "src/common/globals.h"
+#include "src/objects/function-kind.h"
+#include "src/objects/objects.h"
+#include "src/utils/pointer-with-payload.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -30,8 +30,8 @@ class Statement;
class StringSet;
class VariableProxy;
-typedef base::ThreadedList<VariableProxy, VariableProxy::UnresolvedNext>
- UnresolvedList;
+using UnresolvedList =
+ base::ThreadedList<VariableProxy, VariableProxy::UnresolvedNext>;
// A hash map to support fast variable declaration and lookup.
class VariableMap : public ZoneHashMap {
@@ -1169,7 +1169,8 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
public:
ClassScope(Zone* zone, Scope* outer_scope);
// Deserialization.
- ClassScope(Zone* zone, Handle<ScopeInfo> scope_info);
+ ClassScope(Zone* zone, AstValueFactory* ast_value_factory,
+ Handle<ScopeInfo> scope_info);
// Declare a private name in the private name map and add it to the
// local variables of this scope.
@@ -1205,6 +1206,11 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
// and the current tail.
void MigrateUnresolvedPrivateNameTail(AstNodeFactory* ast_node_factory,
UnresolvedList::Iterator tail);
+ Variable* DeclareBrandVariable(AstValueFactory* ast_value_factory,
+ int class_token_pos);
+ Variable* brand() {
+ return rare_data_ == nullptr ? nullptr : rare_data_->brand;
+ }
private:
friend class Scope;
@@ -1222,6 +1228,7 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope {
explicit RareData(Zone* zone) : private_name_map(zone) {}
UnresolvedList unresolved_private_names;
VariableMap private_name_map;
+ Variable* brand = nullptr;
};
V8_INLINE RareData* EnsureRareData() {
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index addcf8db2b..26f037ea68 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -5,8 +5,8 @@
#include "src/ast/variables.h"
#include "src/ast/scopes.h"
-#include "src/globals.h"
-#include "src/objects-inl.h"
+#include "src/common/globals.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 6dbb9dbac4..df40fee754 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -7,7 +7,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/base/threaded-list.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -193,7 +193,7 @@ class Variable final : public ZoneObject {
: kNeedsInitialization;
}
- typedef base::ThreadedList<Variable> List;
+ using List = base::ThreadedList<Variable>;
private:
Scope* scope_;
diff --git a/deps/v8/src/base/OWNERS b/deps/v8/src/base/OWNERS
index 5d24bda820..9c6fd3c859 100644
--- a/deps/v8/src/base/OWNERS
+++ b/deps/v8/src/base/OWNERS
@@ -1,3 +1,4 @@
+clemensh@chromium.org
mlippautz@chromium.org
# COMPONENT: Blink>JavaScript
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index 46859bff85..5d68f7e11b 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -7,9 +7,9 @@
#include "include/v8config.h"
-// Annotate a typedef or function indicating it's ok if it's not used.
-// Use like:
-// typedef Foo Bar ALLOW_UNUSED_TYPE;
+// Annotate a using ALLOW_UNUSED_TYPE = or function indicating it's ok if it's
+// not used. Use like:
+// using Bar = Foo;
#if V8_HAS_ATTRIBUTE_UNUSED
#define ALLOW_UNUSED_TYPE __attribute__((unused))
#else
diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h
index 2c30fe7613..055f0ff498 100644
--- a/deps/v8/src/base/flags.h
+++ b/deps/v8/src/base/flags.h
@@ -81,6 +81,8 @@ class Flags final {
constexpr operator mask_type() const { return mask_; }
constexpr bool operator!() const { return !mask_; }
+ Flags without(flag_type flag) { return *this & (~Flags(flag)); }
+
friend size_t hash_value(const Flags& flags) { return flags.mask_; }
private:
diff --git a/deps/v8/src/base/format-macros.h b/deps/v8/src/base/format-macros.h
deleted file mode 100644
index e2234684a8..0000000000
--- a/deps/v8/src/base/format-macros.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BASE_FORMAT_MACROS_H_
-#define V8_BASE_FORMAT_MACROS_H_
-
-// This file defines the format macros for some integer types.
-
-// To print a 64-bit value in a portable way:
-// int64_t value;
-// printf("xyz:%" PRId64, value);
-// The "d" in the macro corresponds to %d; you can also use PRIu64 etc.
-//
-// For wide strings, prepend "Wide" to the macro:
-// int64_t value;
-// StringPrintf(L"xyz: %" WidePRId64, value);
-//
-// To print a size_t value in a portable way:
-// size_t size;
-// printf("xyz: %" PRIuS, size);
-// The "u" in the macro corresponds to %u, and S is for "size".
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "src/base/build_config.h"
-
-#if defined(V8_OS_POSIX) && (defined(_INTTYPES_H) || defined(_INTTYPES_H_)) && \
- !defined(PRId64)
-#error "inttypes.h has already been included before this header file, but "
-#error "without __STDC_FORMAT_MACROS defined."
-#endif
-
-#if defined(V8_OS_POSIX) && !defined(__STDC_FORMAT_MACROS)
-#define __STDC_FORMAT_MACROS
-#endif
-
-#include <inttypes.h>
-
-#if defined(V8_OS_POSIX)
-
-// GCC will concatenate wide and narrow strings correctly, so nothing needs to
-// be done here.
-#define WidePRId64 PRId64
-#define WidePRIu64 PRIu64
-#define WidePRIx64 PRIx64
-
-#if !defined(PRIuS)
-#define PRIuS "zu"
-#endif
-
-// The size of NSInteger and NSUInteger varies between 32-bit and 64-bit
-// architectures and Apple does not provides standard format macros and
-// recommends casting. This has many drawbacks, so instead define macros
-// for formatting those types.
-#if defined(V8_OS_MACOSX)
-#if defined(V8_HOST_ARCH_64_BIT)
-#if !defined(PRIdNS)
-#define PRIdNS "ld"
-#endif
-#if !defined(PRIuNS)
-#define PRIuNS "lu"
-#endif
-#if !defined(PRIxNS)
-#define PRIxNS "lx"
-#endif
-#else // defined(V8_HOST_ARCH_64_BIT)
-#if !defined(PRIdNS)
-#define PRIdNS "d"
-#endif
-#if !defined(PRIuNS)
-#define PRIuNS "u"
-#endif
-#if !defined(PRIxNS)
-#define PRIxNS "x"
-#endif
-#endif
-#endif // defined(V8_OS_MACOSX)
-
-#else // V8_OS_WIN
-
-#if !defined(PRId64) || !defined(PRIu64) || !defined(PRIx64)
-#error "inttypes.h provided by win toolchain should define these."
-#endif
-
-#define WidePRId64 L"I64d"
-#define WidePRIu64 L"I64u"
-#define WidePRIx64 L"I64x"
-
-#if !defined(PRIuS)
-#define PRIuS "Iu"
-#endif
-
-#endif
-
-#endif // V8_BASE_FORMAT_MACROS_H_
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index 64f7fed413..4087eb3423 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -53,7 +53,12 @@ void PrettyPrintChar(std::ostream& os, int ch) {
}
void DefaultDcheckHandler(const char* file, int line, const char* message) {
+#ifdef DEBUG
V8_Fatal(file, line, "Debug check failed: %s.", message);
+#else
+ // This case happens only for unit tests.
+ V8_Fatal("Debug check failed: %s.", message);
+#endif
}
} // namespace
@@ -144,7 +149,13 @@ class FailureMessage {
} // namespace
+#ifdef DEBUG
void V8_Fatal(const char* file, int line, const char* format, ...) {
+#else
+void V8_Fatal(const char* format, ...) {
+ const char* file = "";
+ int line = 0;
+#endif
va_list arguments;
va_start(arguments, format);
// Format the error message into a stack object for later retrieveal by the
@@ -171,6 +182,15 @@ void V8_Fatal(const char* file, int line, const char* format, ...) {
v8::base::OS::Abort();
}
+#if !defined(DEBUG) && defined(OFFICIAL_BUILD)
+void V8_FatalNoContext() {
+ v8::base::OS::PrintError("V8 CHECK or FATAL\n");
+ if (v8::base::g_print_stack_trace) v8::base::g_print_stack_trace();
+ fflush(stderr);
+ v8::base::OS::Abort();
+}
+#endif
+
void V8_Dcheck(const char* file, int line, const char* message) {
v8::base::g_dcheck_function(file, line, message);
}
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 9a9538d065..f2f68725a6 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -14,17 +14,39 @@
#include "src/base/compiler-specific.h"
#include "src/base/template-utils.h"
-[[noreturn]] PRINTF_FORMAT(3, 4) V8_BASE_EXPORT V8_NOINLINE
- void V8_Fatal(const char* file, int line, const char* format, ...);
-
V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
const char* message);
#ifdef DEBUG
+// In debug, include file, line, and full error message for all
+// FATAL() calls.
+[[noreturn]] PRINTF_FORMAT(3, 4) V8_BASE_EXPORT V8_NOINLINE
+ void V8_Fatal(const char* file, int line, const char* format, ...);
#define FATAL(...) V8_Fatal(__FILE__, __LINE__, __VA_ARGS__)
+
+#elif !defined(OFFICIAL_BUILD)
+// In non-official release, include full error message, but drop file & line
+// numbers. It saves binary size to drop the |file| & |line| as opposed to just
+// passing in "", 0 for them.
+[[noreturn]] PRINTF_FORMAT(1, 2) V8_BASE_EXPORT V8_NOINLINE
+ void V8_Fatal(const char* format, ...);
+#define FATAL(...) V8_Fatal(__VA_ARGS__)
#else
-#define FATAL(...) V8_Fatal("", 0, __VA_ARGS__)
+// In official builds, include only messages that contain parameters because
+// single-message errors can always be derived from stack traces.
+[[noreturn]] V8_BASE_EXPORT V8_NOINLINE void V8_FatalNoContext();
+[[noreturn]] PRINTF_FORMAT(1, 2) V8_BASE_EXPORT V8_NOINLINE
+ void V8_Fatal(const char* format, ...);
+// FATAL(msg) -> V8_FatalNoContext()
+// FATAL(msg, ...) -> V8_Fatal()
+#define FATAL_HELPER(_7, _6, _5, _4, _3, _2, _1, _0, ...) _0
+#define FATAL_DISCARD_ARG(arg) V8_FatalNoContext()
+#define FATAL(...) \
+ FATAL_HELPER(__VA_ARGS__, V8_Fatal, V8_Fatal, V8_Fatal, V8_Fatal, V8_Fatal, \
+ V8_Fatal, V8_Fatal, FATAL_DISCARD_ARG) \
+ (__VA_ARGS__)
#endif
+
#define UNIMPLEMENTED() FATAL("unimplemented code")
#define UNREACHABLE() FATAL("unreachable code")
@@ -38,6 +60,14 @@ V8_BASE_EXPORT void SetPrintStackTrace(void (*print_stack_trace_)());
V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
const char*));
+// In official builds, assume all check failures can be debugged given just the
+// stack trace.
+#if !defined(DEBUG) && defined(OFFICIAL_BUILD)
+#define CHECK_FAILED_HANDLER(message) FATAL("ignored")
+#else
+#define CHECK_FAILED_HANDLER(message) FATAL("Check failed: %s.", message)
+#endif
+
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by DEBUG, so the check will be executed regardless of
// compilation mode.
@@ -47,7 +77,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
#define CHECK_WITH_MSG(condition, message) \
do { \
if (V8_UNLIKELY(!(condition))) { \
- FATAL("Check failed: %s.", message); \
+ CHECK_FAILED_HANDLER(message); \
} \
} while (false)
#define CHECK(condition) CHECK_WITH_MSG(condition, #condition)
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index 1276805182..ad70e9820d 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -8,7 +8,6 @@
#include <limits>
#include "src/base/compiler-specific.h"
-#include "src/base/format-macros.h"
#include "src/base/logging.h"
// No-op macro which is used to work around MSVC's funky VA_ARGS support.
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index 5d878c91a6..11499f572c 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -150,7 +150,12 @@ void OS::SignalCodeMovingGC() {
int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
const auto kNanosPerMicrosecond = 1000ULL;
const auto kMicrosPerSecond = 1000000ULL;
- const zx_time_t nanos_since_thread_started = zx_clock_get(ZX_CLOCK_THREAD);
+ zx_time_t nanos_since_thread_started;
+ zx_status_t status =
+ zx_clock_get_new(ZX_CLOCK_THREAD, &nanos_since_thread_started);
+ if (status != ZX_OK) {
+ return -1;
+ }
// First convert to microseconds, rounding up.
const uint64_t micros_since_thread_started =
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 059f393eb6..3c22487058 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -106,9 +106,6 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
if (fscanf(fp, "%" V8PRIxPTR, &offset) != 1) break;
- // Adjust {start} based on {offset}.
- start -= offset;
-
int c;
if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
// Found a read-only executable entry. Skip characters until we reach
@@ -135,6 +132,21 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
snprintf(lib_name, kLibNameLen, "%08" V8PRIxPTR "-%08" V8PRIxPTR, start,
end);
}
+
+#ifdef V8_OS_ANDROID
+ size_t lib_name_length = strlen(lib_name);
+ if (lib_name_length < 4 ||
+ strncmp(&lib_name[lib_name_length - 4], ".apk", 4) != 0) {
+ // Only adjust {start} based on {offset} if the file isn't the APK,
+ // since we load the library directly from the APK and don't want to
+ // apply the offset of the .so in the APK as the libraries offset.
+ start -= offset;
+ }
+#else
+ // Adjust {start} based on {offset}.
+ start -= offset;
+#endif
+
result.push_back(SharedLibraryAddress(lib_name, start, end));
} else {
// Entry not describing executable data. Skip to end of line to set up
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 2301c26688..7f4ce192db 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -678,11 +678,6 @@ int OS::VSNPrintF(char* str,
// POSIX string support.
//
-char* OS::StrChr(char* str, int c) {
- return strchr(str, c);
-}
-
-
void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
strncpy(dest, src, n);
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 6dc2053fcd..d01b1c07fe 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -670,11 +670,6 @@ int OS::VSNPrintF(char* str, int length, const char* format, va_list args) {
}
-char* OS::StrChr(char* str, int c) {
- return const_cast<char*>(strchr(str, c));
-}
-
-
void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
// Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
size_t buffer_size = static_cast<size_t>(length);
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 78b1bcbaff..e073704b2c 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -207,7 +207,6 @@ class V8_BASE_EXPORT OS {
static PRINTF_FORMAT(3, 0) int VSNPrintF(char* str, int length,
const char* format, va_list args);
- static char* StrChr(char* str, int c);
static void StrNCpy(char* dest, int length, const char* src, size_t n);
// Support for the profiler. Can do nothing, in which case ticks
diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h
index 9c098c4829..bb024ca87e 100644
--- a/deps/v8/src/base/small-vector.h
+++ b/deps/v8/src/base/small-vector.h
@@ -5,7 +5,9 @@
#ifndef V8_BASE_SMALL_VECTOR_H_
#define V8_BASE_SMALL_VECTOR_H_
+#include <algorithm>
#include <type_traits>
+#include <utility>
#include "src/base/bits.h"
#include "src/base/macros.h"
@@ -29,6 +31,10 @@ class SmallVector {
explicit SmallVector(size_t size) { resize_no_init(size); }
SmallVector(const SmallVector& other) V8_NOEXCEPT { *this = other; }
SmallVector(SmallVector&& other) V8_NOEXCEPT { *this = std::move(other); }
+ SmallVector(std::initializer_list<T> init) {
+ resize_no_init(init.size());
+ memcpy(begin_, init.begin(), sizeof(T) * init.size());
+ }
~SmallVector() {
if (is_big()) free(begin_);
diff --git a/deps/v8/src/type-traits.h b/deps/v8/src/base/type-traits.h
index a2fe928bf7..9b6e8971a7 100644
--- a/deps/v8/src/type-traits.h
+++ b/deps/v8/src/base/type-traits.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TYPE_TRAITS_H_
-#define V8_TYPE_TRAITS_H_
+#ifndef V8_BASE_TYPE_TRAITS_H_
+#define V8_BASE_TYPE_TRAITS_H_
#include <type_traits>
@@ -45,4 +45,4 @@ struct negation : std::integral_constant<bool, !T::value> {};
} // namespace internal
} // namespace v8
-#endif // V8_TYPE_TRAITS_H_
+#endif // V8_BASE_TYPE_TRAITS_H_
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 221c1f5b92..25d37d73b4 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -2,23 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/accessors.h"
-
-#include "src/api-inl.h"
-#include "src/contexts.h"
-#include "src/counters.h"
-#include "src/deoptimizer.h"
-#include "src/execution.h"
-#include "src/field-index-inl.h"
-#include "src/frames-inl.h"
+#include "src/builtins/accessors.h"
+
+#include "src/api/api-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/messages.h"
#include "src/heap/factory.h"
-#include "src/isolate-inl.h"
-#include "src/messages.h"
+#include "src/logging/counters.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/contexts.h"
+#include "src/objects/field-index-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/module-inl.h"
-#include "src/property-details.h"
-#include "src/prototype.h"
+#include "src/objects/property-details.h"
+#include "src/objects/prototype.h"
namespace v8 {
namespace internal {
@@ -61,7 +61,6 @@ static V8_INLINE bool CheckForName(Isolate* isolate, Handle<Name> name,
return false;
}
-
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
@@ -99,7 +98,6 @@ Accessors::ReplaceAccessorWithDataProperty(Handle<Object> receiver,
return value;
}
-
//
// Accessors::ReconfigureToDataProperty
//
@@ -124,12 +122,10 @@ void Accessors::ReconfigureToDataProperty(
}
}
-
//
// Accessors::ArgumentsIterator
//
-
void Accessors::ArgumentsIteratorGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
@@ -144,22 +140,19 @@ Handle<AccessorInfo> Accessors::MakeArgumentsIteratorInfo(Isolate* isolate) {
return MakeAccessor(isolate, name, &ArgumentsIteratorGetter, nullptr);
}
-
//
// Accessors::ArrayLength
//
-
void Accessors::ArrayLengthGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kArrayLengthGetter);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
JSArray holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
- Object result = holder->length();
+ Object result = holder.length();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
@@ -187,7 +180,7 @@ void Accessors::ArrayLengthSetter(
}
if (!was_readonly && V8_UNLIKELY(JSArray::HasReadOnlyLength(array)) &&
- length != array->length()->Number()) {
+ length != array->length().Number()) {
// AnythingToArrayLength() may have called setter re-entrantly and modified
// its property descriptor. Don't perform this check if "length" was
// previously readonly, as this may have been called during
@@ -207,7 +200,7 @@ void Accessors::ArrayLengthSetter(
JSArray::SetLength(array, length);
uint32_t actual_new_len = 0;
- CHECK(array->length()->ToArrayLength(&actual_new_len));
+ CHECK(array->length().ToArrayLength(&actual_new_len));
// Fail if there were non-deletable elements.
if (actual_new_len != length) {
if (info.ShouldThrowOnError()) {
@@ -240,8 +233,7 @@ void Accessors::ModuleNamespaceEntryGetter(
JSModuleNamespace holder =
JSModuleNamespace::cast(*Utils::OpenHandle(*info.Holder()));
Handle<Object> result;
- if (!holder
- ->GetExport(isolate, Handle<String>::cast(Utils::OpenHandle(*name)))
+ if (!holder.GetExport(isolate, Handle<String>::cast(Utils::OpenHandle(*name)))
.ToHandle(&result)) {
isolate->OptionalRescheduleException(false);
} else {
@@ -274,14 +266,12 @@ Handle<AccessorInfo> Accessors::MakeModuleNamespaceEntryInfo(
&ModuleNamespaceEntrySetter);
}
-
//
// Accessors::StringLength
//
void Accessors::StringLengthGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kStringLengthGetter);
@@ -294,12 +284,12 @@ void Accessors::StringLengthGetter(
// in the hierarchy, in this case for String values.
Object value = *Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
- if (!value->IsString()) {
+ if (!value.IsString()) {
// Not a string value. That means that we either got a String wrapper or
// a Value with a String wrapper in its prototype chain.
- value = JSValue::cast(*Utils::OpenHandle(*info.Holder()))->value();
+ value = JSValue::cast(*Utils::OpenHandle(*info.Holder())).value();
}
- Object result = Smi::FromInt(String::cast(value)->length());
+ Object result = Smi::FromInt(String::cast(value).length());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
@@ -322,8 +312,7 @@ static Handle<Object> GetFunctionPrototype(Isolate* isolate,
}
void Accessors::FunctionPrototypeGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kFunctionPrototypeGetter);
@@ -355,15 +344,12 @@ Handle<AccessorInfo> Accessors::MakeFunctionPrototypeInfo(Isolate* isolate) {
&FunctionPrototypeGetter, &FunctionPrototypeSetter);
}
-
//
// Accessors::FunctionLength
//
-
void Accessors::FunctionLengthGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kFunctionLengthGetter);
@@ -380,15 +366,12 @@ Handle<AccessorInfo> Accessors::MakeFunctionLengthInfo(Isolate* isolate) {
&FunctionLengthGetter, &ReconfigureToDataProperty);
}
-
//
// Accessors::FunctionName
//
-
void Accessors::FunctionNameGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSFunction> function =
@@ -402,7 +385,6 @@ Handle<AccessorInfo> Accessors::MakeFunctionNameInfo(Isolate* isolate) {
&FunctionNameGetter, &ReconfigureToDataProperty);
}
-
//
// Accessors::FunctionArguments
//
@@ -496,10 +478,10 @@ Handle<JSObject> GetFrameArguments(Isolate* isolate,
DCHECK(array->length() == length);
for (int i = 0; i < length; i++) {
Object value = frame->GetParameter(i);
- if (value->IsTheHole(isolate)) {
+ if (value.IsTheHole(isolate)) {
// Generators currently use holes as dummy arguments when resuming. We
// must not leak those.
- DCHECK(IsResumableFunction(function->shared()->kind()));
+ DCHECK(IsResumableFunction(function->shared().kind()));
value = ReadOnlyRoots(isolate).undefined_value();
}
array->set(i, value);
@@ -526,16 +508,14 @@ Handle<JSObject> Accessors::FunctionGetArguments(JavaScriptFrame* frame,
return Handle<JSObject>();
}
-
void Accessors::FunctionArgumentsGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Object> result = isolate->factory()->null_value();
- if (!function->shared()->native()) {
+ if (!function->shared().native()) {
// Find the top invocation of the function by traversing frames.
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
@@ -554,14 +534,13 @@ Handle<AccessorInfo> Accessors::MakeFunctionArgumentsInfo(Isolate* isolate) {
&FunctionArgumentsGetter, nullptr);
}
-
//
// Accessors::FunctionCaller
//
static inline bool AllowAccessToFunction(Context current_context,
JSFunction function) {
- return current_context->HasSameSecurityTokenAs(function->context());
+ return current_context.HasSameSecurityTokenAs(function.context());
}
class FrameFunctionIterator {
@@ -585,7 +564,7 @@ class FrameFunctionIterator {
bool FindNextNonTopLevel() {
do {
if (!next().ToHandle(&function_)) return false;
- } while (function_->shared()->is_toplevel());
+ } while (function_->shared().is_toplevel());
return true;
}
@@ -594,8 +573,8 @@ class FrameFunctionIterator {
// unless directly exposed, in which case the native flag is set on them.
// Returns true if one is found, and false if the iterator ends before.
bool FindFirstNativeOrUserJavaScript() {
- while (!function_->shared()->native() &&
- !function_->shared()->IsUserJavaScript()) {
+ while (!function_->shared().native() &&
+ !function_->shared().IsUserJavaScript()) {
if (!next().ToHandle(&function_)) return false;
}
return true;
@@ -663,11 +642,10 @@ class FrameFunctionIterator {
int inlined_frame_index_;
};
-
MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
Handle<JSFunction> function) {
FrameFunctionIterator it(isolate);
- if (function->shared()->native()) {
+ if (function->shared().native()) {
return MaybeHandle<JSFunction>();
}
// Find the function from the frames. Return null in case no frame
@@ -694,7 +672,7 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
// Censor if the caller is not a sloppy mode function.
// Change from ES5, which used to throw, see:
// https://bugs.ecmascript.org/show_bug.cgi?id=310
- if (is_strict(caller->shared()->language_mode())) {
+ if (is_strict(caller->shared().language_mode())) {
return MaybeHandle<JSFunction>();
}
// Don't return caller from another security context.
@@ -704,10 +682,8 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
return caller;
}
-
void Accessors::FunctionCallerGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSFunction> function =
@@ -729,7 +705,6 @@ Handle<AccessorInfo> Accessors::MakeFunctionCallerInfo(Isolate* isolate) {
&FunctionCallerGetter, nullptr);
}
-
//
// Accessors::BoundFunctionLength
//
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/builtins/accessors.h
index 200e0f6880..43a6534296 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/builtins/accessors.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ACCESSORS_H_
-#define V8_ACCESSORS_H_
+#ifndef V8_BUILTINS_ACCESSORS_H_
+#define V8_BUILTINS_ACCESSORS_H_
#include "include/v8.h"
-#include "src/allocation.h"
-#include "src/globals.h"
-#include "src/property-details.h"
+#include "src/common/globals.h"
+#include "src/objects/property-details.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -110,9 +110,9 @@ class Accessors : public AllStatic {
// conventions as many of the internal methods in objects.cc:
// - The return value is unset iff there was an exception.
// - If the ShouldThrow argument is true, the return value must not be false.
- typedef void (*AccessorNameBooleanSetterCallback)(
- Local<v8::Name> property, Local<v8::Value> value,
- const PropertyCallbackInfo<v8::Boolean>& info);
+ using AccessorNameBooleanSetterCallback =
+ void (*)(Local<v8::Name> property, Local<v8::Value> value,
+ const PropertyCallbackInfo<v8::Boolean>& info);
V8_EXPORT_PRIVATE static Handle<AccessorInfo> MakeAccessor(
Isolate* isolate, Handle<Name> name, AccessorNameGetterCallback getter,
@@ -130,4 +130,4 @@ class Accessors : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_ACCESSORS_H_
+#endif // V8_BUILTINS_ACCESSORS_H_
diff --git a/deps/v8/src/builtins/arguments.tq b/deps/v8/src/builtins/arguments.tq
index 4ae22a66ac..add66917c0 100644
--- a/deps/v8/src/builtins/arguments.tq
+++ b/deps/v8/src/builtins/arguments.tq
@@ -31,6 +31,7 @@ namespace arguments {
// It is difficult to actually check/assert this, since interpreted or JITted
// frames are StandardFrames, but so are hand-written builtins. Doing that
// more refined check would be prohibitively expensive.
+ @export
macro GetArgumentsFrameAndCount(implicit context: Context)(f: JSFunction):
ArgumentsInfo {
let frame: Frame = LoadParentFramePointer();
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index e0a5a90978..54c16932fa 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -4,23 +4,23 @@
#if V8_TARGET_ARCH_ARM
-#include "src/api-arguments.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
-#include "src/macro-assembler-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -29,8 +29,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
#if defined(__thumb__)
// Thumb mode builtin.
DCHECK_EQ(1, reinterpret_cast<uintptr_t>(
@@ -38,14 +37,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
1);
#endif
__ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
- if (exit_frame_type == BUILTIN_EXIT) {
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK(exit_frame_type == EXIT);
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
- RelocInfo::CODE_TARGET);
- }
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
@@ -403,7 +396,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
- __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, r3,
+ __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
@@ -841,13 +834,12 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
-static void ReplaceClosureCodeWithOptimizedCode(
- MacroAssembler* masm, Register optimized_code, Register closure,
- Register scratch1, Register scratch2, Register scratch3) {
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure) {
// Store code entry in the closure.
__ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
- __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
+ __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
@@ -882,14 +874,14 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
- Register scratch1, Register scratch2,
- Register scratch3) {
+ Register scratch1,
+ Register scratch2) {
// ----------- S t a t e -------------
// -- r3 : new target (preserved for callee if needed, and caller)
// -- r1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, r1, r3, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, r1, r3, scratch1, scratch2));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -898,7 +890,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ ldr(
optimized_code_entry,
- FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
@@ -961,8 +954,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
- ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
- scratch2, scratch3, feedback_vector);
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
__ LoadCodeObjectEntry(r2, optimized_code_entry);
__ Jump(r2);
@@ -1081,7 +1073,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6);
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
@@ -1351,7 +1343,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ ldr(r2, MemOperand(r2));
__ bind(&trampoline_loaded);
- __ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value()));
+ __ add(lr, r2, Operand(interpreter_entry_return_pc_offset.value()));
// Initialize the dispatch table register.
__ Move(
@@ -2080,17 +2072,27 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- r4 : the number of [[BoundArguments]]
// -----------------------------------
- // Reserve stack space for the [[BoundArguments]].
+ Register scratch = r6;
+
{
- Label done;
- __ sub(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
- __ CompareRoot(sp, RootIndex::kRealStackLimit);
- __ b(hs, &done);
- // Restore the stack pointer.
- __ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+ Label done;
+ __ mov(scratch, Operand(r4, LSL, kPointerSizeLog2));
+ {
+ UseScratchRegisterScope temps(masm);
+ Register remaining_stack_size = temps.Acquire();
+
+ // Compute the space we have left. The stack might already be overflowed
+ // here which will cause remaining_stack_size to become negative.
+ __ LoadRoot(remaining_stack_size, RootIndex::kRealStackLimit);
+ __ sub(remaining_stack_size, sp, remaining_stack_size);
+
+ // Check if the arguments will overflow the stack.
+ __ cmp(remaining_stack_size, scratch);
+ }
+ __ b(gt, &done);
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2099,7 +2101,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
- Register scratch = r6;
+ // Reserve stack space for the [[BoundArguments]].
+ __ AllocateStackSpace(scratch);
// Relocate arguments down the stack.
{
@@ -2855,26 +2858,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ add(r6, r6, Operand(1));
__ str(r6, MemOperand(r9, kLevelOffset));
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1);
- __ Move(r0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
__ StoreReturnAddressAndCall(r3);
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1);
- __ Move(r0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -2956,7 +2941,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
- typedef FunctionCallbackArguments FCA;
+ using FCA = FunctionCallbackArguments;
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
@@ -2977,7 +2962,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// sp[5 * kPointerSize]: undefined (kNewTarget)
// Reserve space on the stack.
- __ sub(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+ __ AllocateStackSpace(FCA::kArgsLength * kPointerSize);
// kHolder.
__ str(holder, MemOperand(sp, 0 * kPointerSize));
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 36fa042324..f81a1955ee 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -4,23 +4,23 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/api-arguments.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
-#include "src/macro-assembler-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -29,17 +29,10 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ Mov(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
- if (exit_frame_type == BUILTIN_EXIT) {
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK(exit_frame_type == EXIT);
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
- RelocInfo::CODE_TARGET);
- }
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
@@ -108,26 +101,6 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
// Check if the arguments will overflow the stack.
__ Cmp(scratch, Operand(num_args, LSL, kSystemPointerSizeLog2));
__ B(le, stack_overflow);
-
-#if defined(V8_OS_WIN)
- // Simulate _chkstk to extend stack guard page on Windows ARM64.
- const int kPageSize = 4096;
- Label chkstk, chkstk_done;
- Register probe = temps.AcquireX();
-
- __ Sub(scratch, sp, Operand(num_args, LSL, kSystemPointerSizeLog2));
- __ Mov(probe, sp);
-
- // Loop start of stack probe.
- __ Bind(&chkstk);
- __ Sub(probe, probe, kPageSize);
- __ Cmp(probe, scratch);
- __ B(lo, &chkstk_done);
- __ Ldrb(xzr, MemOperand(probe));
- __ B(&chkstk);
-
- __ Bind(&chkstk_done);
-#endif
}
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
@@ -473,7 +446,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ StoreTaggedField(
x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
- __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
+ __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0,
kLRHasNotBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
@@ -957,14 +930,13 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
-static void ReplaceClosureCodeWithOptimizedCode(
- MacroAssembler* masm, Register optimized_code, Register closure,
- Register scratch1, Register scratch2, Register scratch3) {
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure) {
// Store code entry in the closure.
__ StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset));
- __ Mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
- __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
+ __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
@@ -1003,14 +975,14 @@ static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register feedback_vector,
- Register scratch1, Register scratch2,
- Register scratch3) {
+ Register scratch1,
+ Register scratch2) {
// ----------- S t a t e -------------
// -- x3 : new target (preserved for callee if needed, and caller)
// -- x1 : target function (preserved for callee if needed, and caller)
// -- feedback vector (preserved for caller if needed)
// -----------------------------------
- DCHECK(!AreAliased(feedback_vector, x1, x3, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(feedback_vector, x1, x3, scratch1, scratch2));
Label optimized_code_slot_is_weak_ref, fallthrough;
@@ -1019,7 +991,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadAnyTaggedField(
optimized_code_entry,
- FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret is at a weak reference to a code
@@ -1083,8 +1056,7 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// the optimized functions list, then tail call the optimized code.
// The feedback vector is no longer used, so re-use it as a scratch
// register.
- ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
- scratch2, scratch3, feedback_vector);
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadCodeObjectEntry(x2, optimized_code_entry);
__ Jump(x2);
@@ -1206,7 +1178,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Read off the optimized code slot in the feedback vector, and if there
// is optimized code or an optimization marker, call that instead.
- MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
+ MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4);
// Increment invocation count for the function.
// MaybeTailCallOptimizedCodeSlot preserves feedback_vector, so safe to reuse
@@ -1510,7 +1482,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Ldr(x1, MemOperand(x1));
__ Bind(&trampoline_loaded);
- __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value()));
+ __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset.value()));
// Initialize the dispatch table register.
__ Mov(
@@ -2501,7 +2473,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Sub(x10, sp, x10);
// Check if the arguments will overflow the stack.
__ Cmp(x10, Operand(bound_argc, LSL, kSystemPointerSizeLog2));
- __ B(hs, &done);
+ __ B(gt, &done);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&done);
}
@@ -3463,25 +3435,9 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ Add(level_reg, level_reg, 1);
__ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ Mov(x0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
__ Mov(x10, x3); // TODO(arm64): Load target into x10 directly.
__ StoreReturnAddressAndCall(x10);
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ Mov(x0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -3574,7 +3530,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
- typedef FunctionCallbackArguments FCA;
+ using FCA = FunctionCallbackArguments;
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
@@ -3595,7 +3551,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// sp[5 * kSystemPointerSize]: undefined (kNewTarget)
// Reserve space on the stack.
- __ Sub(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
+ __ Claim(FCA::kArgsLength, kSystemPointerSize);
// kHolder.
__ Str(holder, MemOperand(sp, 0 * kSystemPointerSize));
diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq
index 8c531141f0..72e1a3661e 100644
--- a/deps/v8/src/builtins/array-join.tq
+++ b/deps/v8/src/builtins/array-join.tq
@@ -383,31 +383,31 @@ namespace array_join {
if (IsElementsKindGreaterThan(kind, UINT32_ELEMENTS)) {
if (kind == INT32_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedInt32Array>;
+ loadFn = LoadJoinTypedElement<typed_array::Int32Elements>;
} else if (kind == FLOAT32_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedFloat32Array>;
+ loadFn = LoadJoinTypedElement<typed_array::Float32Elements>;
} else if (kind == FLOAT64_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedFloat64Array>;
+ loadFn = LoadJoinTypedElement<typed_array::Float64Elements>;
} else if (kind == UINT8_CLAMPED_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedUint8ClampedArray>;
+ loadFn = LoadJoinTypedElement<typed_array::Uint8ClampedElements>;
} else if (kind == BIGUINT64_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedBigUint64Array>;
+ loadFn = LoadJoinTypedElement<typed_array::BigUint64Elements>;
} else if (kind == BIGINT64_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedBigInt64Array>;
+ loadFn = LoadJoinTypedElement<typed_array::BigInt64Elements>;
} else {
unreachable;
}
} else {
if (kind == UINT8_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedUint8Array>;
+ loadFn = LoadJoinTypedElement<typed_array::Uint8Elements>;
} else if (kind == INT8_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedInt8Array>;
+ loadFn = LoadJoinTypedElement<typed_array::Int8Elements>;
} else if (kind == UINT16_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedUint16Array>;
+ loadFn = LoadJoinTypedElement<typed_array::Uint16Elements>;
} else if (kind == INT16_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedInt16Array>;
+ loadFn = LoadJoinTypedElement<typed_array::Int16Elements>;
} else if (kind == UINT32_ELEMENTS) {
- loadFn = LoadJoinTypedElement<FixedUint32Array>;
+ loadFn = LoadJoinTypedElement<typed_array::Uint32Elements>;
} else {
unreachable;
}
@@ -624,7 +624,7 @@ namespace array_join {
// the algorithm.
const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
context, receiver, '%TypedArray%.prototype.join');
- const length: Smi = typedArray.length;
+ const length = Convert<Number>(typedArray.length);
return CycleProtectedArrayJoin<JSTypedArray>(
false, typedArray, length, separator, Undefined, Undefined);
@@ -640,7 +640,7 @@ namespace array_join {
// the algorithm.
const typedArray: JSTypedArray = typed_array::ValidateTypedArray(
context, receiver, '%TypedArray%.prototype.toLocaleString');
- const length: Smi = typedArray.length;
+ const length = Convert<Number>(typedArray.length);
return CycleProtectedArrayJoin<JSTypedArray>(
true, typedArray, length, ',', locales, options);
diff --git a/deps/v8/src/builtins/array-reverse.tq b/deps/v8/src/builtins/array-reverse.tq
index 80e9efe2f0..f1ba8fddf7 100644
--- a/deps/v8/src/builtins/array-reverse.tq
+++ b/deps/v8/src/builtins/array-reverse.tq
@@ -35,7 +35,7 @@ namespace array_reverse {
StoreElement<array::FastPackedSmiElements, Smi>(implicit context: Context)(
elements: FixedArrayBase, index: Smi, value: Smi) {
const elems: FixedArray = UnsafeCast<FixedArray>(elements);
- StoreFixedArrayElementSmi(elems, index, value, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(elems, index, value, SKIP_WRITE_BARRIER);
}
StoreElement<array::FastPackedObjectElements, Object>(
diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq
index 847729b607..5162329408 100644
--- a/deps/v8/src/builtins/array-slice.tq
+++ b/deps/v8/src/builtins/array-slice.tq
@@ -66,8 +66,12 @@ namespace array_slice {
const newElement: Object = e != Hole ?
argumentsContext[UnsafeCast<Smi>(e)] :
unmappedElements.objects[current];
- StoreFixedArrayElementSmi(
- resultElements, indexOut++, newElement, SKIP_WRITE_BARRIER);
+ // It is safe to skip the write barrier here because resultElements was
+ // allocated together with result in a folded allocation.
+ // TODO(tebbi): The verification of this fails at the moment due to
+ // missing load elimination.
+ StoreFixedArrayElement(
+ resultElements, indexOut++, newElement, UNSAFE_SKIP_WRITE_BARRIER);
}
// Fill in the rest of the result that contains the unmapped parameters
diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq
index dd061acf81..76e1a486c8 100644
--- a/deps/v8/src/builtins/base.tq
+++ b/deps/v8/src/builtins/base.tq
@@ -2,21 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include 'src/builtins/builtins-regexp-gen.h'
#include 'src/builtins/builtins-utils-gen.h'
#include 'src/builtins/builtins.h'
-#include 'src/code-factory.h'
-#include 'src/elements-kind.h'
+#include 'src/codegen/code-factory.h'
#include 'src/heap/factory-inl.h'
-#include 'src/objects.h'
#include 'src/objects/arguments.h'
#include 'src/objects/bigint.h'
+#include 'src/objects/elements-kind.h'
#include 'src/objects/free-space.h'
#include 'src/objects/js-generator.h'
#include 'src/objects/js-promise.h'
#include 'src/objects/js-regexp-string-iterator.h'
#include 'src/objects/module.h'
+#include 'src/objects/objects.h'
#include 'src/objects/stack-frame-info.h'
-#include 'src/builtins/builtins-regexp-gen.h'
+#include 'src/objects/template-objects.h'
type void;
type never;
@@ -30,7 +31,10 @@ type PositiveSmi extends Smi;
// The Smi value zero, which is often used as null for HeapObject types.
type Zero extends PositiveSmi;
-extern class HeapObject extends Tagged { map: Map; }
+@abstract
+extern class HeapObject extends Tagged {
+ map: Map;
+}
type Object = Smi | HeapObject;
type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
@@ -55,6 +59,8 @@ type bool generates 'TNode<BoolT>' constexpr 'bool';
type bint generates 'TNode<BInt>' constexpr 'BInt';
type string constexpr 'const char*';
+type NameDictionary extends FixedArray;
+
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
type Code extends HeapObject generates 'TNode<Code>';
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
@@ -66,6 +72,8 @@ extern class Context extends HeapObject {
native_context: Object;
}
type NativeContext extends Context;
+
+@generateCppClass
extern class Oddball extends HeapObject {
to_number_raw: float64;
to_string: String;
@@ -73,26 +81,35 @@ extern class Oddball extends HeapObject {
type_of: String;
kind: Smi;
}
+
extern class HeapNumber extends HeapObject { value: float64; }
type Number = Smi | HeapNumber;
type BigInt extends HeapObject generates 'TNode<BigInt>';
type Numeric = Number | BigInt;
-extern class Name extends HeapObject { hash_field: int32; }
+@abstract
+@noVerifier
+extern class Name extends HeapObject {
+ hash_field: int32;
+}
extern class Symbol extends Name {
flags: int32;
name: Object;
}
-// abstract
-extern class String extends Name { length: uint32; }
+@abstract
+extern class String extends Name {
+ length: uint32;
+}
extern class ConsString extends String {
first: String;
second: String;
}
+@abstract
+@noVerifier
extern class ExternalString extends String {
resource: RawPtr;
resource_data: RawPtr;
@@ -104,7 +121,10 @@ extern class ExternalTwoByteString extends ExternalString {}
extern class InternalizedString extends String {}
// TODO(v8:8983): Add declaration for variable-sized region.
-extern class SeqString extends String {}
+@abstract
+@noVerifier
+extern class SeqString extends String {
+}
extern class SeqOneByteString extends SeqString {}
extern class SeqTwoByteString extends SeqString {}
@@ -118,15 +138,26 @@ extern class ThinString extends String { actual: String; }
// The HeapNumber value NaN
type NaN extends HeapNumber;
-extern class Struct extends HeapObject {}
+@abstract
+@noVerifier
+@generatePrint
+@generateCppClass
+extern class Struct extends HeapObject {
+}
+@abstract
+@dirtyInstantiatedAbstractClass
@generatePrint
+@generateCppClass
extern class Tuple2 extends Struct {
value1: Object;
value2: Object;
}
+@abstract
+@dirtyInstantiatedAbstractClass
@generatePrint
+@generateCppClass
extern class Tuple3 extends Tuple2 {
value3: Object;
}
@@ -137,9 +168,12 @@ type DirectString extends String;
type RootIndex generates 'TNode<Int32T>' constexpr 'RootIndex';
-type Map extends HeapObject generates 'TNode<Map>';
-
-extern class FixedArrayBase extends HeapObject { length: Smi; }
+@abstract
+@noVerifier
+@generateCppClass
+extern class FixedArrayBase extends HeapObject {
+ length: Smi;
+}
extern class FixedArray extends FixedArrayBase { objects[length]: Object; }
@@ -151,6 +185,39 @@ extern class WeakFixedArray extends HeapObject { length: Smi; }
extern class ByteArray extends FixedArrayBase {}
+type LayoutDescriptor extends ByteArray
+ generates 'TNode<LayoutDescriptor>';
+type TransitionArray extends WeakFixedArray
+ generates 'TNode<TransitionArray>';
+
+// InstanceType actually extends uint16, but a bunch of methods in
+// CodeStubAssembler expect a TNode<Int32T>, so keeping it signed for now.
+type InstanceType extends int16 constexpr 'InstanceType';
+
+extern class Map extends HeapObject {
+ instance_size_in_words: uint8;
+ in_object_properties_start_or_constructor_function_index: uint8;
+ used_or_unused_instance_size_in_words: uint8;
+ visitor_id: uint8;
+ instance_type: InstanceType;
+ bit_field: uint8;
+ bit_field2: uint8;
+ bit_field3: uint32;
+
+ @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
+ @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
+
+ prototype: HeapObject;
+ constructor_or_back_pointer: Object;
+ instance_descriptors: DescriptorArray;
+ @if(V8_DOUBLE_FIELDS_UNBOXING) layout_descriptor: LayoutDescriptor;
+ @ifnot(V8_DOUBLE_FIELDS_UNBOXING) layout_descriptor: void;
+ dependent_code: DependentCode;
+ prototype_validity_cell: Smi | Cell;
+ weak transitions_or_prototype_info: Map | TransitionArray |
+ PrototypeInfo | Smi;
+}
+
type BytecodeArray extends FixedArrayBase;
@generatePrint
@@ -159,20 +226,74 @@ extern class EnumCache extends Struct {
indices: FixedArray;
}
+@generatePrint
+extern class SourcePositionTableWithFrameCache extends Struct {
+ source_position_table: ByteArray;
+ stack_frame_cache: Object;
+}
+
+// We make this class abstract because it is missing the variable-sized part,
+// which is still impossible to express in Torque.
+@abstract
+extern class DescriptorArray extends HeapObject {
+ number_of_all_descriptors: uint16;
+ number_of_descriptors: uint16;
+ raw_number_of_marked_descriptors: uint16;
+ filler16_bits: uint16;
+ enum_cache: EnumCache;
+ // DescriptorEntry needs to be a struct with three fields.
+ // desriptors : DescriptorEntry[number_of_all_descriptors]
+}
+
// These intrinsics should never be called from Torque code. They're used
// internally by the 'new' operator and only declared here because it's simpler
// than building the definition from C++.
intrinsic %GetAllocationBaseSize<Class: type>(map: Map): intptr;
intrinsic %Allocate<Class: type>(size: intptr): Class;
-intrinsic %AllocateInternalClass<Class: type>(slotCount: constexpr intptr): Class;
+intrinsic %AllocateInternalClass<Class: type>(slotCount: constexpr intptr):
+ Class;
+intrinsic %AddIndexedFieldSizeToObjectSize<T: type>(
+ baseSize: intptr, indexSize: T, fieldSize: int32): intptr {
+ const convertedIndexSize = Convert<int32>(indexSize);
+ const variableSize: int32 =
+ TryInt32Mul(convertedIndexSize, fieldSize) otherwise unreachable;
+ const convertedVariableSize = Convert<intptr>(variableSize);
+ return TryIntPtrAdd(baseSize, convertedVariableSize) otherwise unreachable;
+}
+
+intrinsic
+%InitializeFieldsFromIterator<Container: type, Index: type, Iterator: type>(
+ c: Container, length: Index, i: Iterator) {
+ try {
+ let mutableIterator = i;
+ let current: Index = 0;
+ while (current < length) {
+ // TODO(danno): The indexed accessor on the container requires that the
+ // '[]=' operator be defined explicitly for the Container
+ // (e.g. FixedArray). We should change this to use slice references
+ // once they are implemented.
+ c[current++] = mutableIterator.Next() otherwise NoMore;
+ }
+ }
+ label NoMore deferred {
+ unreachable;
+ }
+}
+
+@abstract
+@noVerifier
extern class JSReceiver extends HeapObject {
properties_or_hash: FixedArrayBase | Smi;
}
type Constructor extends JSReceiver;
-extern class JSObject extends JSReceiver { elements: FixedArrayBase; }
+@abstract
+@dirtyInstantiatedAbstractClass
+extern class JSObject extends JSReceiver {
+ @noVerifier elements: FixedArrayBase;
+}
macro NewJSObject(
map: Map, properties: FixedArrayBase | Smi,
@@ -190,12 +311,29 @@ macro NewJSObject(implicit context: Context)(): JSObject {
};
}
+macro GetDerivedMap(implicit context: Context)(
+ target: JSFunction, newTarget: JSReceiver): Map {
+ try {
+ const constructor = Cast<JSFunction>(newTarget) otherwise SlowPath;
+ const map =
+ Cast<Map>(constructor.prototype_or_initial_map) otherwise SlowPath;
+ if (LoadConstructorOrBackPointer(map) != target) {
+ goto SlowPath;
+ }
+
+ return map;
+ }
+ label SlowPath {
+ return runtime::GetDerivedMap(context, target, newTarget);
+ }
+}
+
extern class JSFunction extends JSObject {
shared_function_info: SharedFunctionInfo;
context: Context;
- feedback_cell: Smi;
+ feedback_cell: FeedbackCell;
weak code: Code;
- weak prototype_or_initial_map: JSReceiver | Map;
+ @noVerifier weak prototype_or_initial_map: JSReceiver | Map;
}
extern class JSProxy extends JSReceiver {
@@ -203,6 +341,7 @@ extern class JSProxy extends JSReceiver {
handler: Object;
}
+@noVerifier
extern class JSProxyRevocableResult extends JSObject {
proxy: Object;
revoke: Object;
@@ -224,9 +363,12 @@ extern class JSGlobalProxy extends JSObject { native_context: Object; }
extern class JSValue extends JSObject { value: Object; }
extern class JSArgumentsObject extends JSObject {}
+@noVerifier
+@hasSameInstanceTypeAsParent
extern class JSArgumentsObjectWithLength extends JSArgumentsObject {
length: Object;
}
+@hasSameInstanceTypeAsParent
extern class JSSloppyArgumentsObject extends JSArgumentsObjectWithLength {
callee: Object;
}
@@ -281,21 +423,36 @@ macro NewJSArray(implicit context: Context)(map: Map, length: Smi): JSArray {
// holey elements when the global NoElementsProtector is not invalidated.
transient type FastJSArray extends JSArray;
+// A HeapObject with a JSArray map, and either fast packed elements, or fast
+// holey elements or frozen, sealed elements when the global NoElementsProtector
+// is not invalidated.
+transient type FastJSArrayForRead extends JSArray;
+
// A FastJSArray when the global ArraySpeciesProtector is not invalidated.
transient type FastJSArrayForCopy extends FastJSArray;
// A FastJSArray when the global ArrayIteratorProtector is not invalidated.
transient type FastJSArrayWithNoCustomIteration extends FastJSArray;
+// A FastJSArrayForRead when the global ArrayIteratorProtector is not
+// invalidated.
+transient type FastJSArrayForReadWithNoCustomIteration extends
+ FastJSArrayForRead;
+
type NoSharedNameSentinel extends Smi;
type JSModuleNamespace extends JSObject;
type WeakArrayList extends HeapObject;
-extern class JSWeakCollection extends JSObject { table: Object; }
+@abstract
+@noVerifier
+extern class JSWeakCollection extends JSObject {
+ table: Object;
+}
extern class JSWeakSet extends JSWeakCollection {}
extern class JSWeakMap extends JSWeakCollection {}
+@noVerifier
extern class JSCollectionIterator extends JSObject {
table: Object;
index: Object;
@@ -307,8 +464,11 @@ extern class JSMessageObject extends JSObject {
arguments: Object;
script: Script;
stack_frames: Object;
+ shared_info: SharedFunctionInfo | Undefined;
+
// Raw data fields.
// TODO(ishell): store as int32 instead of Smi.
+ bytecode_offset: Smi;
start_position: Smi;
end_position: Smi;
error_level: Smi;
@@ -319,7 +479,7 @@ extern class PrototypeInfo extends Struct {
prototype_users: WeakArrayList | Zero;
registry_slot: Smi;
validity_cell: Object;
- object_create_map: Smi | WeakArrayList;
+ @noVerifier object_create_map: Smi | WeakArrayList;
bit_field: Smi;
}
@@ -340,7 +500,6 @@ extern class Script extends Struct {
source_mapping_url: Object;
host_defined_options: Object;
}
-type DebugInfo extends HeapObject;
extern class EmbedderDataArray extends HeapObject { length: Smi; }
@@ -361,17 +520,14 @@ extern class SharedFunctionInfo extends HeapObject {
weak function_data: Object;
name_or_scope_info: String | NoSharedNameSentinel | ScopeInfo;
outer_scope_info_or_feedback_metadata: HeapObject;
- script_or_debug_info: Script | DebugInfo;
+ script_or_debug_info: Script | DebugInfo | Undefined;
length: int16;
formal_parameter_count: uint16;
// Currently set to uint16, can be set to uint8 to save space.
expected_nof_properties: uint16;
function_token_offset: int16;
flags: int32;
-}
-
-extern class SharedFunctionInfoWithID extends SharedFunctionInfo {
- unique_id: int32;
+ @if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
}
extern class JSBoundFunction extends JSObject {
@@ -382,23 +538,16 @@ extern class JSBoundFunction extends JSObject {
type Callable = JSFunction | JSBoundFunction | JSProxy;
-extern class FixedTypedArrayBase extends FixedArrayBase {
- base_pointer: Smi;
- external_pointer: RawPtr;
-}
extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
FixedArrayBase): intptr;
-type FixedTypedArray extends FixedTypedArrayBase
- generates 'TNode<FixedTypedArray>';
-
-extern class SloppyArgumentsElements extends FixedArray {}
+type SloppyArgumentsElements extends FixedArray;
type NumberDictionary extends HeapObject
generates 'TNode<NumberDictionary>';
extern class FreeSpace extends HeapObject {
size: Smi;
- next: FreeSpace;
+ @noVerifier next: FreeSpace;
}
// %RawDownCast should *never* be used anywhere in Torque code except for
@@ -420,10 +569,18 @@ const OBJECT_FUNCTION_INDEX: constexpr NativeContextSlot
generates 'Context::OBJECT_FUNCTION_INDEX';
const ITERATOR_RESULT_MAP_INDEX: constexpr NativeContextSlot
generates 'Context::ITERATOR_RESULT_MAP_INDEX';
+const JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX: constexpr NativeContextSlot
+ generates 'Context::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX';
const JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX: constexpr NativeContextSlot
generates 'Context::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX';
const PROXY_REVOCABLE_RESULT_MAP_INDEX: constexpr NativeContextSlot
generates 'Context::PROXY_REVOCABLE_RESULT_MAP_INDEX';
+const REFLECT_APPLY_INDEX: constexpr NativeContextSlot
+ generates 'Context::REFLECT_APPLY_INDEX';
+const REGEXP_LAST_MATCH_INFO_INDEX: constexpr NativeContextSlot
+ generates 'Context::REGEXP_LAST_MATCH_INFO_INDEX';
+const INITIAL_STRING_ITERATOR_MAP_INDEX: constexpr NativeContextSlot
+ generates 'Context::INITIAL_STRING_ITERATOR_MAP_INDEX';
extern operator '[]' macro LoadContextElement(
NativeContext, NativeContextSlot): Object;
extern operator '[]=' macro StoreContextElement(
@@ -444,6 +601,7 @@ extern class JSArrayBuffer extends JSObject {
backing_store: RawPtr;
}
+@abstract
extern class JSArrayBufferView extends JSObject {
buffer: JSArrayBuffer;
byte_offset: uintptr;
@@ -451,9 +609,7 @@ extern class JSArrayBufferView extends JSObject {
}
extern class JSTypedArray extends JSArrayBufferView {
- AttachOffHeapBuffer(
- buffer: JSArrayBuffer, map: Map, length: PositiveSmi,
- byteOffset: uintptr): void {
+ AttachOffHeapBuffer(buffer: JSArrayBuffer, byteOffset: uintptr): void {
const basePointer: Smi = 0;
// The max byteOffset is 8 * MaxSmi on the particular platform. 32 bit
@@ -471,18 +627,18 @@ extern class JSTypedArray extends JSArrayBufferView {
IsMockArrayBufferAllocatorFlag() ||
Convert<uintptr>(externalPointer) >= Convert<uintptr>(backingStore));
+ this.elements = kEmptyByteArray;
this.buffer = buffer;
- this.elements = new FixedTypedArrayBase{
- map,
- length,
- base_pointer: basePointer,
- external_pointer: externalPointer
- };
+ this.external_pointer = externalPointer;
+ this.base_pointer = basePointer;
}
- length: Smi;
+ length: uintptr;
+ external_pointer: RawPtr;
+ base_pointer: ByteArray | Smi;
}
+@noVerifier
extern class JSAccessorPropertyDescriptor extends JSObject {
get: Object;
set: Object;
@@ -490,7 +646,11 @@ extern class JSAccessorPropertyDescriptor extends JSObject {
configurable: Object;
}
-extern class JSCollection extends JSObject { table: Object; }
+@abstract
+@noVerifier
+extern class JSCollection extends JSObject {
+ table: Object;
+}
extern class JSSet extends JSCollection {}
extern class JSMap extends JSCollection {}
@@ -506,6 +666,11 @@ extern class JSDate extends JSObject {
cache_stamp: Undefined | Smi | NaN;
}
+extern class JSGlobalObject extends JSObject {
+ native_context: NativeContext;
+ global_proxy: JSGlobalProxy;
+}
+
extern class JSAsyncFromSyncIterator extends JSObject {
sync_iterator: JSReceiver;
next: Object;
@@ -516,6 +681,7 @@ extern class JSStringIterator extends JSObject {
next_index: Smi;
}
+@noVerifier
extern class JSDataPropertyDescriptor extends JSObject {
value: Object;
writable: Object;
@@ -523,6 +689,7 @@ extern class JSDataPropertyDescriptor extends JSObject {
configurable: Object;
}
+@abstract
extern class TemplateInfo extends Struct {
tag: Object;
serial_number: Object;
@@ -531,6 +698,12 @@ extern class TemplateInfo extends Struct {
property_accessors: Object;
}
+@generatePrint
+extern class TemplateObjectDescription extends Struct {
+ raw_strings: FixedArray;
+ cooked_strings: FixedArray;
+}
+
extern class FunctionTemplateRareData extends Struct {
prototype_template: Object;
prototype_provider_template: Object;
@@ -549,7 +722,7 @@ extern class FunctionTemplateInfo extends TemplateInfo {
function_template_rare_data: Object;
shared_function_info: Object;
flag: Smi;
- length: Smi;
+ @noVerifier length: Smi;
cached_property_name: Object;
}
@@ -560,7 +733,7 @@ extern class ObjectTemplateInfo extends TemplateInfo {
extern class PropertyArray extends HeapObject { length_and_hash: Smi; }
-extern class DependentCode extends WeakFixedArray {}
+type DependentCode extends WeakFixedArray;
extern class PropertyCell extends HeapObject {
name: Name;
@@ -569,9 +742,8 @@ extern class PropertyCell extends HeapObject {
dependent_code: DependentCode;
}
-extern class JSDataView extends JSArrayBufferView {}
+extern class JSDataView extends JSArrayBufferView { data_pointer: RawPtr; }
-type InstanceType generates 'TNode<Int32T>' constexpr 'InstanceType';
type ElementsKind generates 'TNode<Int32T>' constexpr 'ElementsKind';
type LanguageMode extends Smi constexpr 'LanguageMode';
type ExtractFixedArrayFlags
@@ -583,20 +755,28 @@ type WriteBarrierMode
generates 'TNode<Int32T>' constexpr 'WriteBarrierMode';
type MessageTemplate constexpr 'MessageTemplate';
+type PrimitiveType constexpr 'PrimitiveType';
type ToIntegerTruncationMode
constexpr 'CodeStubAssembler::ToIntegerTruncationMode';
type AllocationFlags constexpr 'AllocationFlags';
+type SlackTrackingMode constexpr 'SlackTrackingMode';
+
+type UnicodeEncoding constexpr 'UnicodeEncoding';
+const UTF16:
+ constexpr UnicodeEncoding generates 'UnicodeEncoding::UTF16';
+const UTF32:
+ constexpr UnicodeEncoding generates 'UnicodeEncoding::UTF32';
extern class Foreign extends HeapObject { foreign_address: RawPtr; }
extern class InterceptorInfo extends Struct {
- getter: Foreign | Zero;
- setter: Foreign | Zero;
- query: Foreign | Zero;
- descriptor: Foreign | Zero;
- deleter: Foreign | Zero;
- enumerator: Foreign | Zero;
- definer: Foreign | Zero;
+ @noVerifier getter: Foreign | Zero;
+ @noVerifier setter: Foreign | Zero;
+ @noVerifier query: Foreign | Zero;
+ @noVerifier descriptor: Foreign | Zero;
+ @noVerifier deleter: Foreign | Zero;
+ @noVerifier enumerator: Foreign | Zero;
+ @noVerifier definer: Foreign | Zero;
data: Object;
flags: Smi;
}
@@ -620,11 +800,13 @@ extern class Cell extends HeapObject { value: Object; }
extern class DataHandler extends Struct {
smi_handler: Smi | Code;
validity_cell: Smi | Cell;
- weak data_1: Object;
- weak data_2: Object;
- weak data_3: Object;
+ @noVerifier weak data_1: Object;
+ @noVerifier weak data_2: Object;
+ @noVerifier weak data_3: Object;
}
+@abstract
+@dirtyInstantiatedAbstractClass
extern class JSGeneratorObject extends JSObject {
function: JSFunction;
context: Context;
@@ -649,7 +831,9 @@ extern class JSPromise extends JSObject {
flags: Smi;
}
-extern class Microtask extends Struct {}
+@abstract
+extern class Microtask extends Struct {
+}
extern class CallbackTask extends Microtask {
callback: Foreign;
@@ -664,10 +848,12 @@ extern class CallableTask extends Microtask {
extern class StackFrameInfo extends Struct {
line_number: Smi;
column_number: Smi;
+ promise_all_index: Smi;
script_id: Smi;
script_name: Object;
script_name_or_source_url: Object;
function_name: Object;
+ wasm_module_name: Object;
flag: Smi;
}
@@ -692,13 +878,21 @@ extern class WasmExportedFunctionData extends Struct {
function_index: Smi;
}
+extern class WasmJSFunctionData extends Struct { wrapper_code: Code; }
+
+extern class WasmCapiFunctionData extends Struct {
+ call_target: RawPtr;
+ embedder_data: RawPtr;
+ wrapper_code: Code;
+ serialized_signature: ByteArray; // PodArray<wasm::ValueType>
+}
+
extern class WasmDebugInfo extends Struct {
instance: WasmInstanceObject;
interpreter_handle: Foreign | Undefined;
- interpreted_functions: FixedArray;
- locals_names: FixedArray;
- c_wasm_entries: FixedArray;
- c_wasm_entry_map: Foreign; // Managed<wasm::SignatureMap>
+ locals_names: FixedArray | Undefined;
+ c_wasm_entries: FixedArray | Undefined;
+ c_wasm_entry_map: Foreign | Undefined; // Managed<wasm::SignatureMap>
}
extern class WasmExceptionTag extends Struct { index: Smi; }
@@ -719,6 +913,8 @@ const PACKED_DOUBLE_ELEMENTS:
constexpr ElementsKind generates 'PACKED_DOUBLE_ELEMENTS';
const HOLEY_DOUBLE_ELEMENTS:
constexpr ElementsKind generates 'HOLEY_DOUBLE_ELEMENTS';
+const LAST_FROZEN_ELEMENTS_KIND:
+ constexpr ElementsKind generates 'LAST_FROZEN_ELEMENTS_KIND';
const DICTIONARY_ELEMENTS:
constexpr ElementsKind generates 'DICTIONARY_ELEMENTS';
@@ -750,17 +946,10 @@ const kPretenured:
const kAllowLargeObjectAllocation: constexpr AllocationFlags
generates 'CodeStubAssembler::kAllowLargeObjectAllocation';
-type FixedUint8Array extends FixedTypedArray;
-type FixedInt8Array extends FixedTypedArray;
-type FixedUint16Array extends FixedTypedArray;
-type FixedInt16Array extends FixedTypedArray;
-type FixedUint32Array extends FixedTypedArray;
-type FixedInt32Array extends FixedTypedArray;
-type FixedFloat32Array extends FixedTypedArray;
-type FixedFloat64Array extends FixedTypedArray;
-type FixedUint8ClampedArray extends FixedTypedArray;
-type FixedBigUint64Array extends FixedTypedArray;
-type FixedBigInt64Array extends FixedTypedArray;
+const kWithSlackTracking: constexpr SlackTrackingMode
+ generates 'SlackTrackingMode::kWithSlackTracking';
+const kNoSlackTracking: constexpr SlackTrackingMode
+ generates 'SlackTrackingMode::kNoSlackTracking';
const kFixedDoubleArrays: constexpr ExtractFixedArrayFlags
generates 'CodeStubAssembler::ExtractFixedArrayFlag::kFixedDoubleArrays';
@@ -773,6 +962,8 @@ const kFixedArrayMapRootIndex:
constexpr RootIndex generates 'RootIndex::kFixedArrayMap';
const kFixedCOWArrayMapRootIndex:
constexpr RootIndex generates 'RootIndex::kFixedCOWArrayMap';
+const kEmptyByteArrayRootIndex:
+ constexpr RootIndex generates 'RootIndex::kEmptyByteArray';
const kEmptyFixedArrayRootIndex:
constexpr RootIndex generates 'RootIndex::kEmptyFixedArray';
const kTheHoleValueRootIndex:
@@ -806,26 +997,25 @@ const kTypedArrayTooShort: constexpr MessageTemplate
generates 'MessageTemplate::kTypedArrayTooShort';
const kInvalidCountValue: constexpr MessageTemplate
generates 'MessageTemplate::kInvalidCountValue';
-const kProxyNonObject: constexpr MessageTemplate
- generates 'MessageTemplate::kProxyNonObject';
-const kProxyHandlerOrTargetRevoked: constexpr MessageTemplate
- generates 'MessageTemplate::kProxyHandlerOrTargetRevoked';
const kConstructorNotFunction: constexpr MessageTemplate
generates 'MessageTemplate::kConstructorNotFunction';
+const kSymbolToString: constexpr MessageTemplate
+ generates 'MessageTemplate::kSymbolToString';
+const kPropertyNotFunction: constexpr MessageTemplate
+ generates 'MessageTemplate::kPropertyNotFunction';
const kMaxArrayIndex:
constexpr uint32 generates 'JSArray::kMaxArrayIndex';
-const kTypedArrayMaxByteLength:
- constexpr uintptr generates 'FixedTypedArrayBase::kMaxByteLength';
+const kArrayBufferMaxByteLength:
+ constexpr uintptr generates 'JSArrayBuffer::kMaxByteLength';
const V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP:
constexpr int31 generates 'V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP';
const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger';
const kSmiMaxValue: constexpr uintptr generates 'kSmiMaxValue';
+const kSmiMax: uintptr = kSmiMaxValue;
const kStringMaxLength: constexpr int31 generates 'String::kMaxLength';
const kFixedArrayMaxLength:
constexpr int31 generates 'FixedArray::kMaxLength';
-const kFixedTypedArrayBaseHeaderSize: constexpr intptr
- generates 'FixedTypedArrayBase::kHeaderSize';
const kObjectAlignmentMask: constexpr intptr
generates 'kObjectAlignmentMask';
const kMinAddedElementsCapacity:
@@ -861,6 +1051,9 @@ const kInvalidDataViewAccessorOffset: constexpr MessageTemplate
const kStrictReadOnlyProperty: constexpr MessageTemplate
generates 'MessageTemplate::kStrictReadOnlyProperty';
+const kString: constexpr PrimitiveType
+ generates 'PrimitiveType::kString';
+
type Hole extends Oddball;
type Null extends Oddball;
type Undefined extends Oddball;
@@ -880,6 +1073,7 @@ extern macro Int32TrueConstant(): bool;
extern macro Int32FalseConstant(): bool;
extern macro EmptyStringConstant(): EmptyString;
extern macro LengthStringConstant(): String;
+extern macro NanConstant(): NaN;
const Hole: Hole = TheHoleConstant();
const Null: Null = NullConstant();
@@ -888,6 +1082,7 @@ const True: True = TrueConstant();
const False: False = FalseConstant();
const kEmptyString: EmptyString = EmptyStringConstant();
const kLengthString: String = LengthStringConstant();
+const kNaN: NaN = NanConstant();
const true: constexpr bool generates 'true';
const false: constexpr bool generates 'false';
@@ -902,6 +1097,8 @@ const INTPTR_PARAMETERS: constexpr ParameterMode
const SKIP_WRITE_BARRIER:
constexpr WriteBarrierMode generates 'SKIP_WRITE_BARRIER';
+const UNSAFE_SKIP_WRITE_BARRIER:
+ constexpr WriteBarrierMode generates 'UNSAFE_SKIP_WRITE_BARRIER';
extern class AsyncGeneratorRequest extends Struct {
next: AsyncGeneratorRequest | Undefined;
@@ -933,10 +1130,11 @@ extern class PromiseReaction extends Struct {
promise_or_capability: JSPromise | PromiseCapability | Undefined;
}
+@abstract
extern class PromiseReactionJobTask extends Microtask {
argument: Object;
context: Context;
- handler: Callable | Undefined;
+ @noVerifier handler: Callable | Undefined;
promise_or_capability: JSPromise | PromiseCapability | Undefined;
}
@@ -957,6 +1155,23 @@ extern class JSRegExp extends JSObject {
flags: Smi | Undefined;
}
+@noVerifier
+extern class JSIteratorResult extends JSObject {
+ value: Object;
+ done: Boolean;
+}
+
+macro NewJSIteratorResult(implicit context: Context)(
+ value: Object, done: Boolean): JSIteratorResult {
+ return new JSIteratorResult{
+ map: GetIteratorResultMap(),
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ value,
+ done
+ };
+}
+
// Note: Although a condition for a FastJSRegExp is having a positive smi
// lastIndex (see RegExpBuiltinsAssembler::BranchIfFastRegExp), it is possible
// for this to change without transitioning the transient type. As a precaution,
@@ -972,6 +1187,7 @@ RegExpBuiltinsAssembler::FastLoadLastIndex(FastJSRegExp): Smi;
extern operator '.lastIndex=' macro
RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void;
+@hasSameInstanceTypeAsParent
extern class JSRegExpResult extends JSArray {
index: Object;
input: Object;
@@ -986,10 +1202,14 @@ extern class JSRegExpStringIterator extends JSObject {
const kRegExpMatchInfoFirstCaptureIndex:
constexpr int31 generates 'RegExpMatchInfo::kFirstCaptureIndex';
+const kRegExpMatchInfoNumberOfCapturesIndex:
+ constexpr int31 generates 'RegExpMatchInfo::kNumberOfCapturesIndex';
+
macro GetStartOfCaptureIndex(captureIndex: constexpr int31): constexpr int31 {
return kRegExpMatchInfoFirstCaptureIndex + (captureIndex * 2);
}
+@hasSameInstanceTypeAsParent
extern class RegExpMatchInfo extends FixedArray {
GetStartOfCapture(implicit context: Context)(captureIndex: constexpr int31):
Smi {
@@ -1001,15 +1221,18 @@ extern class RegExpMatchInfo extends FixedArray {
const index: constexpr int31 = GetStartOfCaptureIndex(captureIndex) + 1;
return UnsafeCast<Smi>(this.objects[index]);
}
+ NumberOfCaptures(implicit context: Context)(): Smi {
+ return UnsafeCast<Smi>(this.objects[kRegExpMatchInfoNumberOfCapturesIndex]);
+ }
}
extern class AccessorInfo extends Struct {
name: Object;
flags: Smi;
expected_receiver_type: Object;
- setter: Foreign | Zero;
- getter: Foreign | Zero;
- js_getter: Foreign | Zero;
+ @noVerifier setter: Foreign | Zero;
+ @noVerifier getter: Foreign | Zero;
+ @noVerifier js_getter: Foreign | Zero;
data: Object;
}
@@ -1020,18 +1243,54 @@ extern class AccessorPair extends Struct {
extern class BreakPoint extends Tuple2 {}
extern class BreakPointInfo extends Tuple2 {}
+type CoverageInfo extends FixedArray;
+
+extern class DebugInfo extends Struct {
+ shared_function_info: SharedFunctionInfo;
+ debugger_hints: Smi;
+ script: Undefined | Script;
+ original_bytecode_array: Undefined | BytecodeArray;
+ debug_bytecode_array: Undefined | BytecodeArray;
+ break_points: FixedArray;
+ flags: Smi;
+ coverage_info: CoverageInfo | Undefined;
+}
+
+extern class FeedbackVector extends HeapObject {
+ shared_function_info: SharedFunctionInfo;
+ // TODO(v8:9108): currently no support for MaybeObject in Torque
+ @noVerifier optimized_code_weak_or_smi: Object;
+ closure_feedback_cell_array: FixedArray;
+ length: int32;
+ invocation_count: int32;
+ profiler_ticks: int32;
+ // TODO(v8:9287) The padding is not necessary on platforms with 4 bytes
+ // tagged pointers, we should make it conditional; however, platform-specific
+ // interacts badly with GCMole, so we need to address that first.
+ padding: uint32;
+}
+
+extern class FeedbackCell extends Struct {
+ value: Undefined | FeedbackVector | FixedArray;
+ interrupt_budget: int32;
+}
+
+type AllocationSite extends Struct;
+extern class AllocationMemento extends Struct {
+ @noVerifier allocation_site: AllocationSite;
+}
extern class WasmModuleObject extends JSObject {
native_module: Foreign;
export_wrappers: FixedArray;
script: Script;
weak_instance_list: WeakArrayList;
- asm_js_offset_table: ByteArray;
- break_point_infos: FixedArray;
+ asm_js_offset_table: ByteArray | Undefined;
+ break_point_infos: FixedArray | Undefined;
}
extern class WasmTableObject extends JSObject {
- elements: FixedArray;
+ entries: FixedArray;
maximum_length: Smi | HeapNumber | Undefined;
dispatch_tables: FixedArray;
raw_type: Smi;
@@ -1040,7 +1299,7 @@ extern class WasmTableObject extends JSObject {
extern class WasmMemoryObject extends JSObject {
array_buffer: JSArrayBuffer;
maximum_pages: Smi;
- instances: WeakArrayList;
+ instances: WeakArrayList | Undefined;
}
extern class WasmGlobalObject extends JSObject {
@@ -1055,9 +1314,11 @@ extern class WasmExceptionObject extends JSObject {
exception_tag: HeapObject;
}
-extern class WasmExceptionPackage extends JSReceiver {}
+@noVerifier
+extern class WasmExceptionPackage extends JSReceiver {
+}
-extern class WasmExportedFunction extends JSFunction {}
+type WasmExportedFunction extends JSFunction;
extern class AsmWasmData extends Struct {
managed_native_module: Foreign; // Managed<wasm::NativeModule>
@@ -1073,6 +1334,7 @@ extern macro SelectBooleanConstant(bool): Boolean;
extern macro Print(constexpr string);
extern macro Print(constexpr string, Object);
extern macro Comment(constexpr string);
+extern macro StaticAssert(bool);
extern macro Print(Object);
extern macro DebugBreak();
extern transitioning macro ToInteger_Inline(Context, Object): Number;
@@ -1087,6 +1349,8 @@ extern transitioning macro ToSmiLength(implicit context: Context)(Object):
extern transitioning macro ToString_Inline(Context, Object): String;
extern transitioning macro ToThisString(implicit context: Context)(
Object, String): String;
+extern transitioning macro ToThisValue(implicit context: Context)(
+ Object, constexpr PrimitiveType, constexpr string): Object;
extern transitioning macro GetProperty(implicit context: Context)(
Object, Object): Object;
extern transitioning builtin SetProperty(implicit context: Context)(
@@ -1096,7 +1360,7 @@ extern transitioning builtin SetPropertyInLiteral(implicit context: Context)(
extern transitioning builtin DeleteProperty(implicit context: Context)(
Object, Object, LanguageMode);
extern transitioning builtin HasProperty(implicit context: Context)(
- JSReceiver, Object): Boolean;
+ Object, Object): Boolean;
extern transitioning macro HasProperty_Inline(implicit context: Context)(
JSReceiver, Object): Boolean;
@@ -1111,7 +1375,12 @@ extern macro ThrowTypeError(implicit context: Context)(
extern macro ThrowTypeError(implicit context: Context)(
constexpr MessageTemplate, Object): never;
extern macro ThrowTypeError(implicit context: Context)(
+ constexpr MessageTemplate, Object, Object): never;
+extern macro ThrowTypeError(implicit context: Context)(
constexpr MessageTemplate, Object, Object, Object): never;
+extern transitioning runtime ThrowTypeErrorIfStrict(implicit context: Context)(
+ Smi, Object, Object): void;
+
extern macro ArraySpeciesCreate(Context, Object, Number): JSReceiver;
extern macro ArrayCreate(implicit context: Context)(Number): JSArray;
extern macro BuildAppendJSArray(
@@ -1139,7 +1408,12 @@ extern macro ToObject_Inline(Context, Object): JSReceiver;
extern macro IsNullOrUndefined(Object): bool;
extern macro IsTheHole(Object): bool;
extern macro IsString(HeapObject): bool;
-extern builtin ToString(Context, Object): String;
+transitioning builtin ToString(context: Context, o: Object): String {
+ return ToStringImpl(context, o);
+}
+extern transitioning runtime ToStringRT(Context, Object): String;
+extern transitioning builtin NonPrimitiveToPrimitive_String(
+ Context, Object): Object;
extern transitioning runtime NormalizeElements(Context, JSObject);
extern transitioning runtime TransitionElementsKindWithKind(
@@ -1148,7 +1422,6 @@ extern transitioning runtime TransitionElementsKindWithKind(
extern macro LoadBufferObject(RawPtr, constexpr int32): Object;
extern macro LoadBufferPointer(RawPtr, constexpr int32): RawPtr;
extern macro LoadBufferSmi(RawPtr, constexpr int32): Smi;
-extern macro LoadFixedTypedArrayOnHeapBackingStore(FixedTypedArrayBase): RawPtr;
extern macro LoadRoot(constexpr RootIndex): Object;
extern macro StoreRoot(constexpr RootIndex, Object): Object;
@@ -1157,6 +1430,7 @@ extern runtime StringEqual(Context, String, String): Oddball;
extern builtin StringLessThan(Context, String, String): Boolean;
extern macro StringCharCodeAt(String, intptr): int32;
extern runtime StringCompareSequence(Context, String, String, Number): Boolean;
+extern macro StringFromSingleCharCode(int32): String;
extern macro StrictEqual(Object, Object): Boolean;
extern macro SmiLexicographicCompare(Smi, Smi): Smi;
@@ -1336,6 +1610,10 @@ macro Max(x: Number, y: Number): Number {
return NumberMax(x, y);
}
+extern macro TryIntPtrAdd(intptr, intptr): intptr labels Overflow;
+extern macro TryIntPtrSub(intptr, intptr): intptr labels Overflow;
+extern macro TryInt32Mul(int32, int32): int32 labels Overflow;
+
extern operator '<<' macro ConstexprUintPtrShl(
constexpr uintptr, constexpr int31): constexpr uintptr;
extern operator '>>>' macro ConstexprUintPtrShr(
@@ -1376,6 +1654,8 @@ extern macro HeapObjectToJSDataView(HeapObject): JSDataView
labels CastError;
extern macro HeapObjectToJSProxy(HeapObject): JSProxy
labels CastError;
+extern macro HeapObjectToJSStringIterator(HeapObject): JSStringIterator
+ labels CastError;
extern macro HeapObjectToJSArrayBuffer(HeapObject): JSArrayBuffer
labels CastError;
extern macro TaggedToHeapObject(Object): HeapObject
@@ -1403,6 +1683,8 @@ extern macro HeapObjectToHeapNumber(HeapObject): HeapNumber
extern macro HeapObjectToSloppyArgumentsElements(HeapObject):
SloppyArgumentsElements
labels CastError;
+extern macro HeapObjectToRegExpMatchInfo(HeapObject):
+ RegExpMatchInfo labels CastError;
extern macro TaggedToNumber(Object): Number
labels CastError;
@@ -1435,6 +1717,12 @@ Cast<HeapObject>(o: HeapObject): HeapObject
return o;
}
+Cast<Null>(o: HeapObject): Null
+ labels CastError {
+ if (o != Null) goto CastError;
+ return %RawDownCast<Null>(o);
+}
+
Cast<FixedArray>(o: HeapObject): FixedArray
labels CastError {
return HeapObjectToFixedArray(o) otherwise CastError;
@@ -1460,6 +1748,11 @@ Cast<JSProxy>(o: HeapObject): JSProxy
return HeapObjectToJSProxy(o) otherwise CastError;
}
+Cast<JSStringIterator>(o: HeapObject): JSStringIterator
+ labels CastError {
+ return HeapObjectToJSStringIterator(o) otherwise CastError;
+}
+
Cast<JSTypedArray>(o: HeapObject): JSTypedArray
labels CastError {
if (IsJSTypedArray(o)) return %RawDownCast<JSTypedArray>(o);
@@ -1511,15 +1804,21 @@ Cast<NumberDictionary>(o: HeapObject): NumberDictionary
goto CastError;
}
-Cast<FixedTypedArrayBase>(o: HeapObject): FixedTypedArrayBase
+Cast<String>(o: HeapObject): String
+ labels CastError {
+ return HeapObjectToString(o) otherwise CastError;
+}
+
+Cast<Oddball>(o: HeapObject): Oddball
labels CastError {
- if (IsFixedTypedArray(o)) return %RawDownCast<FixedTypedArrayBase>(o);
+ if (IsOddball(o)) return %RawDownCast<Oddball>(o);
goto CastError;
}
-Cast<String>(o: HeapObject): String
+Cast<Symbol>(o: HeapObject): Symbol
labels CastError {
- return HeapObjectToString(o) otherwise CastError;
+ if (IsSymbol(o)) return %RawDownCast<Symbol>(o);
+ goto CastError;
}
Cast<DirectString>(o: HeapObject): DirectString
@@ -1550,6 +1849,12 @@ Cast<Map>(implicit context: Context)(o: HeapObject): Map
goto CastError;
}
+Cast<JSValue>(o: HeapObject): JSValue
+ labels CastError {
+ if (IsJSValue(o)) return %RawDownCast<JSValue>(o);
+ goto CastError;
+}
+
Cast<JSArgumentsObjectWithLength>(implicit context: Context)(o: HeapObject):
JSArgumentsObjectWithLength
labels CastError {
@@ -1588,6 +1893,24 @@ Cast<FastJSArray>(implicit context: Context)(o: HeapObject): FastJSArray
return %RawDownCast<FastJSArray>(o);
}
+Cast<FastJSArrayForRead>(implicit context: Context)(o: HeapObject):
+ FastJSArrayForRead
+ labels CastError {
+ const map: Map = o.map;
+ if (!IsJSArrayMap(map)) goto CastError;
+
+ // Bailout if receiver has slow elements.
+ const elementsKind: ElementsKind = LoadMapElementsKind(map);
+ if (!IsElementsKindLessThanOrEqual(elementsKind, LAST_FROZEN_ELEMENTS_KIND))
+ goto CastError;
+
+ // Verify that our prototype is the initial array prototype.
+ if (!IsPrototypeInitialArrayPrototype(map)) goto CastError;
+
+ if (IsNoElementsProtectorCellInvalid()) goto CastError;
+ return %RawDownCast<FastJSArrayForRead>(o);
+}
+
Cast<FastJSArrayForCopy>(implicit context: Context)(o: HeapObject):
FastJSArrayForCopy
labels CastError {
@@ -1604,6 +1927,14 @@ Cast<FastJSArrayWithNoCustomIteration>(implicit context: Context)(
return %RawDownCast<FastJSArrayWithNoCustomIteration>(o);
}
+Cast<FastJSArrayForReadWithNoCustomIteration>(implicit context: Context)(
+ o: HeapObject): FastJSArrayForReadWithNoCustomIteration
+ labels CastError {
+ if (IsArrayIteratorProtectorCellInvalid()) goto CastError;
+ const a: FastJSArrayForRead = Cast<FastJSArrayForRead>(o) otherwise CastError;
+ return %RawDownCast<FastJSArrayForReadWithNoCustomIteration>(o);
+}
+
Cast<JSReceiver>(implicit context: Context)(o: HeapObject): JSReceiver
labels CastError {
if (IsJSReceiver(o)) return %RawDownCast<JSReceiver>(o);
@@ -1616,6 +1947,21 @@ Cast<JSFunction>(implicit context: Context)(o: HeapObject): JSFunction
goto CastError;
}
+extern macro IsDebugInfo(HeapObject): bool;
+Cast<DebugInfo>(implicit context: Context)(o: HeapObject): DebugInfo
+ labels CastError {
+ if (IsDebugInfo(o)) return %RawDownCast<DebugInfo>(o);
+ goto CastError;
+}
+
+extern macro IsCoverageInfo(HeapObject): bool;
+Cast<CoverageInfo>(implicit context: Context)(o: HeapObject): CoverageInfo
+ labels CastError {
+ // TODO(jgruber): Assign an instance type.
+ if (IsFixedArray(o)) return %RawDownCast<CoverageInfo>(o);
+ goto CastError;
+}
+
extern macro AllocateHeapNumberWithValue(float64): HeapNumber;
extern macro ChangeInt32ToTagged(int32): Number;
extern macro ChangeUint32ToTagged(uint32): Number;
@@ -1643,15 +1989,12 @@ extern macro ChangeFloat64ToUintPtr(float64): uintptr;
extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
extern macro LoadNativeContext(Context): NativeContext;
+extern macro TruncateFloat64ToFloat32(float64): float32;
+extern macro TruncateHeapNumberValueToWord32(Number): int32;
extern macro LoadJSArrayElementsMap(constexpr ElementsKind, Context): Map;
extern macro LoadJSArrayElementsMap(ElementsKind, Context): Map;
extern macro ChangeNonnegativeNumberToUintPtr(Number): uintptr;
extern macro TryNumberToUintPtr(Number): uintptr labels IfNegative;
-macro TryUintPtrToPositiveSmi(ui: uintptr): PositiveSmi labels IfOverflow {
- if (ui > kSmiMaxValue) goto IfOverflow;
- return %RawDownCast<PositiveSmi>(SmiTag(Signed(ui)));
-}
-
extern macro NumberConstant(constexpr float64): Number;
extern macro NumberConstant(constexpr int32): Number;
extern macro NumberConstant(constexpr uint32): Number;
@@ -1663,6 +2006,7 @@ extern macro Float64Constant(constexpr int31): float64;
extern macro Float64Constant(constexpr float64): float64;
extern macro SmiConstant(constexpr int31): Smi;
extern macro SmiConstant(constexpr Smi): Smi;
+extern macro SmiConstant(constexpr MessageTemplate): Smi;
extern macro BoolConstant(constexpr bool): bool;
extern macro StringConstant(constexpr string): String;
extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode;
@@ -1768,6 +2112,10 @@ macro Convert<To: type, From: type>(i: From): To {
return i;
}
+macro Convert<To: type, From: type>(i: From): To labels Overflow {
+ return i;
+}
+
extern macro ConvertElementsKindToInt(ElementsKind): int32;
Convert<int32, ElementsKind>(elementsKind: ElementsKind): int32 {
return ConvertElementsKindToInt(elementsKind);
@@ -1823,6 +2171,20 @@ Convert<PositiveSmi, intptr>(i: intptr): PositiveSmi {
assert(IsValidPositiveSmi(i));
return %RawDownCast<PositiveSmi>(SmiTag(i));
}
+Convert<PositiveSmi, uintptr>(ui: uintptr): PositiveSmi labels IfOverflow {
+ if (ui > kSmiMaxValue) deferred {
+ goto IfOverflow;
+ }
+ return %RawDownCast<PositiveSmi>(SmiTag(Signed(ui)));
+}
+Convert<PositiveSmi, intptr>(i: intptr): PositiveSmi labels IfOverflow {
+ if (IsValidPositiveSmi(i)) {
+ return %RawDownCast<PositiveSmi>(SmiTag(i));
+ } else
+ deferred {
+ goto IfOverflow;
+ }
+}
Convert<int32, Smi>(s: Smi): int32 {
return SmiToInt32(s);
}
@@ -1838,6 +2200,12 @@ Convert<uintptr, Number>(n: Number): uintptr {
Convert<float64, float32>(f: float32): float64 {
return ChangeFloat32ToFloat64(f);
}
+Convert<float32, float64>(f: float64): float32 {
+ return TruncateFloat64ToFloat32(f);
+}
+Convert<float32, Number>(n: Number): float32 {
+ return Convert<float32>(ChangeNumberToFloat64(n));
+}
Convert<Number, float64>(d: float64): Number {
return AllocateHeapNumberWithValue(d);
}
@@ -1899,6 +2267,8 @@ UnsafeCast<Object>(o: Object): Object {
const kFixedArrayMap: Map =
%RawDownCast<Map>(LoadRoot(kFixedArrayMapRootIndex));
const kCOWMap: Map = %RawDownCast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
+const kEmptyByteArray: ByteArray =
+ %RawDownCast<ByteArray>(LoadRoot(kEmptyByteArrayRootIndex));
const kEmptyFixedArray: FixedArray =
%RawDownCast<FixedArray>(LoadRoot(kEmptyFixedArrayRootIndex));
@@ -1913,8 +2283,8 @@ extern macro IsMockArrayBufferAllocatorFlag(): bool;
extern macro IsPrototypeTypedArrayPrototype(implicit context: Context)(Map):
bool;
-extern operator '.data_ptr' macro TypedArrayBuiltinsAssembler::LoadDataPtr(
- JSTypedArray): RawPtr;
+extern operator '.data_ptr' macro LoadJSTypedArrayBackingStore(JSTypedArray):
+ RawPtr;
extern operator '.elements_kind' macro LoadMapElementsKind(Map): ElementsKind;
extern operator '.elements_kind' macro LoadElementsKind(JSTypedArray):
@@ -1945,12 +2315,20 @@ extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, Smi): void;
extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, constexpr int31, HeapObject): void;
-extern operator '.objects[]=' macro StoreFixedArrayElementSmi(
+extern operator '.objects[]=' macro StoreFixedArrayElement(
FixedArray, Smi, Object): void;
-extern operator '.objects[]=' macro StoreFixedArrayElementSmi(
+extern macro StoreFixedArrayElement(
FixedArray, Smi, Object, constexpr WriteBarrierMode): void;
extern macro StoreFixedArrayElement(
+ FixedArray, Smi, Smi, constexpr WriteBarrierMode): void;
+extern macro StoreFixedArrayElement(
+ FixedArray, constexpr int31, Object, constexpr WriteBarrierMode): void;
+extern macro StoreFixedArrayElement(
+ FixedArray, constexpr int31, Smi, constexpr WriteBarrierMode): void;
+extern macro StoreFixedArrayElement(
FixedArray, intptr, Object, constexpr WriteBarrierMode): void;
+extern macro StoreFixedArrayElement(
+ FixedArray, intptr, Smi, constexpr WriteBarrierMode): void;
extern operator '.floats[]=' macro StoreFixedDoubleArrayElement(
FixedDoubleArray, intptr, float64): void;
extern operator '.floats[]=' macro StoreFixedDoubleArrayElementSmi(
@@ -1967,12 +2345,12 @@ operator '[]=' macro StoreFixedArrayDirect(a: FixedArray, i: Smi, v: Object) {
a.objects[i] = v;
}
-extern operator '.instance_type' macro LoadMapInstanceType(Map): int32;
-
extern macro GetNumberDictionaryNumberOfElements(NumberDictionary): Smi;
extern macro GetIteratorMethod(implicit context: Context)(HeapObject): Object
labels IfIteratorUndefined;
+extern macro LoadConstructorOrBackPointer(Map): Object;
+
extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr): Object
labels NotData, IfHole;
extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, Object)
@@ -2032,10 +2410,16 @@ extern macro CopyFixedArrayElements(
constexpr ElementsKind, FixedArray, constexpr ElementsKind, FixedArray, Smi,
Smi, Smi): void;
+extern macro AllocateJSArray(
+ constexpr ElementsKind, Map, intptr, Smi,
+ constexpr AllocationFlags): JSArray;
extern macro AllocateJSArray(constexpr ElementsKind, Map, intptr, Smi): JSArray;
extern macro AllocateJSArray(constexpr ElementsKind, Map, Smi, Smi): JSArray;
extern macro AllocateJSArray(Map, FixedArrayBase, Smi): JSArray;
extern macro AllocateJSObjectFromMap(Map): JSObject;
+extern macro AllocateJSObjectFromMap(
+ Map, FixedArray, FixedArray, constexpr AllocationFlags,
+ constexpr SlackTrackingMode): JSObject;
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
labels IfHole;
@@ -2055,16 +2439,32 @@ macro GetArrayBufferNoInitFunction(implicit context: Context)(): JSFunction {
return UnsafeCast<JSFunction>(
LoadNativeContext(context)[ARRAY_BUFFER_NOINIT_FUN_INDEX]);
}
-
+macro GetFastPackedElementsJSArrayMap(implicit context: Context)(): Map {
+ return UnsafeCast<Map>(
+ LoadNativeContext(context)[JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX]);
+}
macro GetFastPackedSmiElementsJSArrayMap(implicit context: Context)(): Map {
return UnsafeCast<Map>(
LoadNativeContext(context)[JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX]);
}
-
macro GetProxyRevocableResultMap(implicit context: Context)(): Map {
return UnsafeCast<Map>(
LoadNativeContext(context)[PROXY_REVOCABLE_RESULT_MAP_INDEX]);
}
+macro GetIteratorResultMap(implicit context: Context)(): Map {
+ return UnsafeCast<Map>(LoadNativeContext(context)[ITERATOR_RESULT_MAP_INDEX]);
+}
+macro GetInitialStringIteratorMap(implicit context: Context)(): Map {
+ return UnsafeCast<Map>(
+ LoadNativeContext(context)[INITIAL_STRING_ITERATOR_MAP_INDEX]);
+}
+macro GetReflectApply(implicit context: Context)(): Callable {
+ return UnsafeCast<Callable>(LoadNativeContext(context)[REFLECT_APPLY_INDEX]);
+}
+macro GetRegExpLastMatchInfo(implicit context: Context)(): RegExpMatchInfo {
+ return %RawDownCast<RegExpMatchInfo>(
+ LoadNativeContext(context)[REGEXP_LAST_MATCH_INFO_INDEX]);
+}
extern transitioning macro Call(Context, Callable, Object): Object;
extern transitioning macro Call(Context, Callable, Object, Object): Object;
@@ -2194,7 +2594,7 @@ struct FastJSArrayWitness {
} else {
const elements = Cast<FixedArray>(this.unstable.elements)
otherwise unreachable;
- StoreFixedArrayElementSmi(elements, k, Hole, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(elements, k, Hole);
}
}
@@ -2276,6 +2676,7 @@ macro NewFastJSArrayWitness(array: FastJSArray): FastJSArrayWitness {
extern macro TransitionElementsKind(
JSObject, Map, constexpr ElementsKind,
constexpr ElementsKind): void labels Bailout;
+extern macro PerformStackCheck(implicit context: Context)(): void;
extern macro IsCallable(HeapObject): bool;
extern macro IsConstructor(HeapObject): bool;
@@ -2287,20 +2688,22 @@ extern macro IsJSFunction(HeapObject): bool;
extern macro IsJSObject(HeapObject): bool;
extern macro IsJSTypedArray(HeapObject): bool;
extern macro IsNumberDictionary(HeapObject): bool;
-extern macro IsFixedTypedArray(HeapObject): bool;
extern macro IsContext(HeapObject): bool;
extern macro IsJSReceiver(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool;
extern macro IsFixedArray(HeapObject): bool;
+extern macro IsName(HeapObject): bool;
+extern macro IsPrivateSymbol(HeapObject): bool;
extern macro IsNumber(Object): bool;
extern macro IsNumberNormalized(Number): bool;
+extern macro IsOddball(HeapObject): bool;
+extern macro IsSymbol(HeapObject): bool;
extern macro IsJSArrayMap(Map): bool;
extern macro IsExtensibleMap(Map): bool;
+extern macro IsJSValue(HeapObject): bool;
extern macro IsCustomElementsReceiverInstanceType(int32): bool;
-extern macro IsFastJSArrayWithNoCustomIteration(implicit context: Context)(
- Object): bool;
extern macro Typeof(Object): Object;
// Return true iff number is NaN.
@@ -2362,14 +2765,20 @@ transitioning macro GetLengthProperty(implicit context: Context)(o: Object):
}
}
+transitioning macro GetMethod(implicit context: Context)(
+ o: Object, name: constexpr string): Callable labels IfNullOrUndefined {
+ const value = GetProperty(o, name);
+ if (value == Undefined || value == Null) goto IfNullOrUndefined;
+ return Cast<Callable>(value)
+ otherwise ThrowTypeError(kPropertyNotFunction, value, name, o);
+}
+
extern macro NumberToString(Number): String;
extern macro IsOneByteStringInstanceType(InstanceType): bool;
extern macro AllocateSeqOneByteString(implicit context: Context)(uint32):
String;
extern macro AllocateSeqTwoByteString(implicit context: Context)(uint32):
String;
-extern macro TryIntPtrAdd(intptr, intptr): intptr
- labels IfOverflow;
extern macro ConvertToRelativeIndex(implicit context: Context)(
Object, intptr): intptr;
@@ -2383,6 +2792,7 @@ struct KeyValuePair {
// Macro definitions for compatibility that expose functionality to the CSA
// using "legacy" APIs. In Torque code, these should not be used.
+@export
macro IsFastJSArray(o: Object, context: Context): bool {
try {
// Long-term, it's likely not a good idea to have this slow-path test here,
@@ -2396,14 +2806,24 @@ macro IsFastJSArray(o: Object, context: Context): bool {
return Is<FastJSArray>(o);
}
-macro BranchIfFastJSArray(o: Object, context: Context): never
- labels True, False {
+@export
+macro BranchIfFastJSArray(o: Object, context: Context): never labels True,
+ False {
// Long-term, it's likely not a good idea to have this slow-path test here,
// since it fundamentally breaks the type system.
GotoIfForceSlowPath() otherwise False;
BranchIf<FastJSArray>(o) otherwise True, False;
}
+@export
+macro BranchIfFastJSArrayForRead(o: Object, context: Context):
+ never labels True, False {
+ // Long-term, it's likely not a good idea to have this slow-path test here,
+ // since it fundamentally breaks the type system.
+ GotoIfForceSlowPath() otherwise False;
+ BranchIf<FastJSArrayForRead>(o) otherwise True, False;
+}
+
macro BranchIfNotFastJSArray(o: Object, context: Context): never
labels True, False {
BranchIfNot<FastJSArray>(o) otherwise True, False;
@@ -2417,13 +2837,25 @@ macro BranchIfFastJSArrayForCopy(o: Object, context: Context): never
BranchIf<FastJSArrayForCopy>(o) otherwise True, False;
}
+@export
macro IsFastJSArrayWithNoCustomIteration(context: Context, o: Object): bool {
return Is<FastJSArrayWithNoCustomIteration>(o);
}
+@export
+macro IsFastJSArrayForReadWithNoCustomIteration(context: Context, o: Object):
+ bool {
+ return Is<FastJSArrayForReadWithNoCustomIteration>(o);
+}
+
extern transitioning runtime
CreateDataProperty(implicit context: Context)(JSReceiver, Object, Object);
+namespace runtime {
+ extern runtime
+ GetDerivedMap(Context, JSFunction, JSReceiver): Map;
+}
+
transitioning builtin FastCreateDataProperty(implicit context: Context)(
receiver: JSReceiver, key: Object, value: Object): Object {
try {
@@ -2468,3 +2900,32 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)(
}
return Undefined;
}
+
+@export
+transitioning macro ToStringImpl(context: Context, o: Object): String {
+ let result: Object = o;
+ while (true) {
+ typeswitch (result) {
+ case (num: Number): {
+ return NumberToString(num);
+ }
+ case (str: String): {
+ return str;
+ }
+ case (oddball: Oddball): {
+ return oddball.to_string;
+ }
+ case (JSReceiver): {
+ result = NonPrimitiveToPrimitive_String(context, result);
+ continue;
+ }
+ case (Symbol): {
+ ThrowTypeError(kSymbolToString);
+ }
+ case (Object): {
+ return ToStringRT(context, o);
+ }
+ }
+ }
+ unreachable;
+}
diff --git a/deps/v8/src/builtins/boolean.tq b/deps/v8/src/builtins/boolean.tq
new file mode 100644
index 0000000000..a41ef76d21
--- /dev/null
+++ b/deps/v8/src/builtins/boolean.tq
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace boolean {
+ const kNameDictionaryInitialCapacity:
+ constexpr int32 generates 'NameDictionary::kInitialCapacity';
+
+ extern macro ConstructorBuiltinsAssembler::IsDictionaryMap(Map): bool;
+ extern macro CodeStubAssembler::AllocateNameDictionary(constexpr int32):
+ NameDictionary;
+
+ // TODO(v8:9120): This is a workaround to get access to target and new.target
+ // in javascript builtins. Requires cleanup once this is fully supported by
+ // torque.
+ const NEW_TARGET_INDEX:
+ constexpr int32 generates 'Descriptor::kJSNewTarget';
+ const TARGET_INDEX: constexpr int32 generates 'Descriptor::kJSTarget';
+ extern macro Parameter(constexpr int32): Object;
+
+ javascript builtin
+ BooleanConstructor(context: Context, receiver: Object, ...arguments): Object {
+ const value = SelectBooleanConstant(ToBoolean(arguments[0]));
+
+ const newTarget = Parameter(NEW_TARGET_INDEX);
+ if (newTarget == Undefined) {
+ return value;
+ }
+
+ const target = UnsafeCast<JSFunction>(Parameter(TARGET_INDEX));
+ const map = GetDerivedMap(target, UnsafeCast<JSReceiver>(newTarget));
+ let properties = kEmptyFixedArray;
+ if (IsDictionaryMap(map)) {
+ properties = AllocateNameDictionary(kNameDictionaryInitialCapacity);
+ }
+
+ const obj = UnsafeCast<JSValue>(AllocateJSObjectFromMap(
+ map, properties, kEmptyFixedArray, kNone, kWithSlackTracking));
+ obj.value = value;
+ return obj;
+ }
+}
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index ff77f32e4a..7ee879ab51 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-arguments-inl.h"
-#include "src/api-natives.h"
+#include "src/api/api-arguments-inl.h"
+#include "src/api/api-natives.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/prototype.h"
#include "src/objects/templates.h"
-#include "src/prototype.h"
-#include "src/visitors.h"
+#include "src/objects/visitors.h"
namespace v8 {
namespace internal {
@@ -23,23 +23,23 @@ namespace {
// TODO(dcarney): CallOptimization duplicates this logic, merge.
JSReceiver GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo info,
JSReceiver receiver) {
- Object recv_type = info->signature();
+ Object recv_type = info.signature();
// No signature, return holder.
- if (!recv_type->IsFunctionTemplateInfo()) return receiver;
+ if (!recv_type.IsFunctionTemplateInfo()) return receiver;
// A Proxy cannot have been created from the signature template.
- if (!receiver->IsJSObject()) return JSReceiver();
+ if (!receiver.IsJSObject()) return JSReceiver();
JSObject js_obj_receiver = JSObject::cast(receiver);
FunctionTemplateInfo signature = FunctionTemplateInfo::cast(recv_type);
// Check the receiver. Fast path for receivers with no hidden prototypes.
- if (signature->IsTemplateFor(js_obj_receiver)) return receiver;
- if (!js_obj_receiver->map()->has_hidden_prototype()) return JSReceiver();
+ if (signature.IsTemplateFor(js_obj_receiver)) return receiver;
+ if (!js_obj_receiver.map().has_hidden_prototype()) return JSReceiver();
for (PrototypeIterator iter(isolate, js_obj_receiver, kStartAtPrototype,
PrototypeIterator::END_AT_NON_HIDDEN);
!iter.IsAtEnd(); iter.Advance()) {
JSObject current = iter.GetCurrent<JSObject>();
- if (signature->IsTemplateFor(current)) return current;
+ if (signature.IsTemplateFor(current)) return current;
}
return JSReceiver();
}
@@ -53,7 +53,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
JSReceiver raw_holder;
if (is_construct) {
DCHECK(args.receiver()->IsTheHole(isolate));
- if (fun_data->GetInstanceTemplate()->IsUndefined(isolate)) {
+ if (fun_data->GetInstanceTemplate().IsUndefined(isolate)) {
v8::Local<ObjectTemplate> templ =
ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate),
ToApiHandle<v8::FunctionTemplate>(fun_data));
@@ -98,10 +98,10 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
}
Object raw_call_data = fun_data->call_code();
- if (!raw_call_data->IsUndefined(isolate)) {
- DCHECK(raw_call_data->IsCallHandlerInfo());
+ if (!raw_call_data.IsUndefined(isolate)) {
+ DCHECK(raw_call_data.IsCallHandlerInfo());
CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
- Object data_obj = call_data->data();
+ Object data_obj = call_data.data();
FunctionCallbackArguments custom(isolate, data_obj, *function, raw_holder,
*new_target, args.address_of_arg_at(1),
@@ -129,7 +129,7 @@ BUILTIN(HandleApiCall) {
Handle<JSFunction> function = args.target();
Handle<Object> receiver = args.receiver();
Handle<HeapObject> new_target = args.new_target();
- Handle<FunctionTemplateInfo> fun_data(function->shared()->get_api_func_data(),
+ Handle<FunctionTemplateInfo> fun_data(function->shared().get_api_func_data(),
isolate);
if (new_target->IsJSReceiver()) {
RETURN_RESULT_OR_FAILURE(
@@ -171,12 +171,12 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
RuntimeCallCounterId::kInvokeApiFunction);
DCHECK(function->IsFunctionTemplateInfo() ||
(function->IsJSFunction() &&
- JSFunction::cast(*function)->shared()->IsApiFunction()));
+ JSFunction::cast(*function).shared().IsApiFunction()));
// Do proper receiver conversion for non-strict mode api functions.
if (!is_construct && !receiver->IsJSReceiver()) {
if (function->IsFunctionTemplateInfo() ||
- is_sloppy(JSFunction::cast(*function)->shared()->language_mode())) {
+ is_sloppy(JSFunction::cast(*function).shared().language_mode())) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
Object::ConvertReceiver(isolate, receiver),
Object);
@@ -191,7 +191,7 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> fun_data =
function->IsFunctionTemplateInfo()
? Handle<FunctionTemplateInfo>::cast(function)
- : handle(JSFunction::cast(*function)->shared()->get_api_func_data(),
+ : handle(JSFunction::cast(*function).shared().get_api_func_data(),
isolate);
// Construct BuiltinArguments object:
// new target, function, arguments reversed, receiver.
@@ -211,8 +211,8 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
}
DCHECK_EQ(cursor, BuiltinArguments::kPaddingOffset);
argv[BuiltinArguments::kPaddingOffset] =
- ReadOnlyRoots(isolate).the_hole_value()->ptr();
- argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc)->ptr();
+ ReadOnlyRoots(isolate).the_hole_value().ptr();
+ argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc).ptr();
argv[BuiltinArguments::kTargetOffset] = function->ptr();
argv[BuiltinArguments::kNewTargetOffset] = new_target->ptr();
MaybeHandle<Object> result;
@@ -254,12 +254,12 @@ V8_WARN_UNUSED_RESULT static Object HandleApiCallAsFunctionOrConstructor(
// Get the invocation callback from the function descriptor that was
// used to create the called object.
- DCHECK(obj->map()->is_callable());
- JSFunction constructor = JSFunction::cast(obj->map()->GetConstructor());
- DCHECK(constructor->shared()->IsApiFunction());
+ DCHECK(obj.map().is_callable());
+ JSFunction constructor = JSFunction::cast(obj.map().GetConstructor());
+ DCHECK(constructor.shared().IsApiFunction());
Object handler =
- constructor->shared()->get_api_func_data()->GetInstanceCallHandler();
- DCHECK(!handler->IsUndefined(isolate));
+ constructor.shared().get_api_func_data().GetInstanceCallHandler();
+ DCHECK(!handler.IsUndefined(isolate));
CallHandlerInfo call_data = CallHandlerInfo::cast(handler);
// Get the data for the call and perform the callback.
@@ -267,7 +267,7 @@ V8_WARN_UNUSED_RESULT static Object HandleApiCallAsFunctionOrConstructor(
{
HandleScope scope(isolate);
LOG(isolate, ApiObjectAccess("call non-function", obj));
- FunctionCallbackArguments custom(isolate, call_data->data(), constructor,
+ FunctionCallbackArguments custom(isolate, call_data.data(), constructor,
obj, new_target, args.address_of_arg_at(1),
args.length() - 1);
Handle<Object> result_handle = custom.Call(call_data);
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.cc b/deps/v8/src/builtins/builtins-arguments-gen.cc
index 2d25cdc32a..6cc9fd9623 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.cc
+++ b/deps/v8/src/builtins/builtins-arguments-gen.cc
@@ -4,20 +4,20 @@
#include "src/builtins/builtins-arguments-gen.h"
-#include "src/arguments.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/frame-constants.h"
-#include "src/interface-descriptors.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/arguments.h"
+#include "src/execution/frame-constants.h"
#include "src/objects/arguments.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
-typedef compiler::Node Node;
+using Node = compiler::Node;
std::tuple<Node*, Node*, Node*>
ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
@@ -112,9 +112,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
ParameterMode mode = OptimalParameterMode();
Node* zero = IntPtrOrSmiConstant(0, mode);
- ArgumentsBuiltinsFromDSLAssembler::ArgumentsInfo info =
- GetArgumentsFrameAndCount(CAST(context),
- UncheckedCast<JSFunction>(function));
+ TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(
+ CAST(context), UncheckedCast<JSFunction>(function));
VARIABLE(result, MachineRepresentation::kTagged);
Label no_rest_parameters(this), runtime(this, Label::kDeferred),
@@ -167,9 +166,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
ParameterMode mode = OptimalParameterMode();
Node* zero = IntPtrOrSmiConstant(0, mode);
- ArgumentsBuiltinsFromDSLAssembler::ArgumentsInfo info =
- GetArgumentsFrameAndCount(CAST(context),
- UncheckedCast<JSFunction>(function));
+ TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(
+ CAST(context), UncheckedCast<JSFunction>(function));
GotoIfFixedArraySizeDoesntFitInNewSpace(
info.argument_count, &runtime,
@@ -216,9 +214,8 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
Label done(this, &result), empty(this), no_parameters(this),
runtime(this, Label::kDeferred);
- ArgumentsBuiltinsFromDSLAssembler::ArgumentsInfo info =
- GetArgumentsFrameAndCount(CAST(context),
- UncheckedCast<JSFunction>(function));
+ TorqueStructArgumentsInfo info = GetArgumentsFrameAndCount(
+ CAST(context), UncheckedCast<JSFunction>(function));
GotoIf(WordEqual(info.argument_count, zero), &empty);
diff --git a/deps/v8/src/builtins/builtins-arguments-gen.h b/deps/v8/src/builtins/builtins-arguments-gen.h
index 0f921c1ca6..4eeae4bf86 100644
--- a/deps/v8/src/builtins/builtins-arguments-gen.h
+++ b/deps/v8/src/builtins/builtins-arguments-gen.h
@@ -5,21 +5,19 @@
#ifndef V8_BUILTINS_BUILTINS_ARGUMENTS_GEN_H_
#define V8_BUILTINS_BUILTINS_ARGUMENTS_GEN_H_
-#include "src/code-stub-assembler.h"
-#include "torque-generated/builtins-arguments-from-dsl-gen.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
-typedef compiler::Node Node;
-typedef compiler::CodeAssemblerState CodeAssemblerState;
-typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+using Node = compiler::Node;
+using CodeAssemblerState = compiler::CodeAssemblerState;
+using CodeAssemblerLabel = compiler::CodeAssemblerLabel;
-class ArgumentsBuiltinsAssembler : public CodeStubAssembler,
- public ArgumentsBuiltinsFromDSLAssembler {
+class ArgumentsBuiltinsAssembler : public CodeStubAssembler {
public:
explicit ArgumentsBuiltinsAssembler(CodeAssemblerState* state)
- : CodeStubAssembler(state), ArgumentsBuiltinsFromDSLAssembler(state) {}
+ : CodeStubAssembler(state) {}
Node* EmitFastNewStrictArguments(Node* context, Node* function);
Node* EmitFastNewSloppyArguments(Node* context, Node* function);
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 57f2f776ea..29bcae6feb 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -9,19 +9,18 @@
#include "src/builtins/builtins-typed-array-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
-#include "src/frame-constants.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/execution/frame-constants.h"
#include "src/heap/factory-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/property-cell.h"
-#include "torque-generated/builtins-typed-array-createtypedarray-from-dsl-gen.h"
namespace v8 {
namespace internal {
using Node = compiler::Node;
-using IteratorRecord = IteratorBuiltinsFromDSLAssembler::IteratorRecord;
+using IteratorRecord = TorqueStructIteratorRecord;
ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
compiler::CodeAssemblerState* state)
@@ -37,15 +36,14 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
TNode<Smi> length = CAST(len_);
const char* method_name = "%TypedArray%.prototype.map";
- TypedArrayCreatetypedarrayBuiltinsFromDSLAssembler typedarray_asm(state());
- TNode<JSTypedArray> a = typedarray_asm.TypedArraySpeciesCreateByLength(
+ TNode<JSTypedArray> a = TypedArraySpeciesCreateByLength(
context(), method_name, original_array, length);
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
- CSA_ASSERT(this, SmiLessThanOrEqual(CAST(len_), LoadJSTypedArrayLength(a)));
+ CSA_ASSERT(this, UintPtrLessThanOrEqual(SmiUntag(CAST(len_)),
+ LoadJSTypedArrayLength(a)));
fast_typed_array_target_ =
- Word32Equal(LoadInstanceType(LoadElements(original_array)),
- LoadInstanceType(LoadElements(a)));
+ Word32Equal(LoadElementsKind(original_array), LoadElementsKind(a));
a_.Bind(a);
}
@@ -149,8 +147,8 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
Label throw_not_typed_array(this, Label::kDeferred);
GotoIf(TaggedIsSmi(receiver_), &throw_not_typed_array);
- GotoIfNot(HasInstanceType(CAST(receiver_), JS_TYPED_ARRAY_TYPE),
- &throw_not_typed_array);
+ TNode<Map> typed_array_map = LoadMap(CAST(receiver_));
+ GotoIfNot(IsJSTypedArrayMap(typed_array_map), &throw_not_typed_array);
TNode<JSTypedArray> typed_array = CAST(receiver_);
o_ = typed_array;
@@ -159,7 +157,7 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
LoadJSArrayBufferViewBuffer(typed_array);
ThrowIfArrayBufferIsDetached(context_, array_buffer, name_);
- len_ = LoadJSTypedArrayLength(typed_array);
+ len_ = ChangeUintPtrToTagged(LoadJSTypedArrayLength(typed_array));
Label throw_not_callable(this, Label::kDeferred);
Label distinguish_types(this);
@@ -177,13 +175,13 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
BIND(&unexpected_instance_type);
Unreachable();
- std::vector<int32_t> instance_types = {
-#define INSTANCE_TYPE(Type, type, TYPE, ctype) FIXED_##TYPE##_ARRAY_TYPE,
- TYPED_ARRAYS(INSTANCE_TYPE)
-#undef INSTANCE_TYPE
+ std::vector<int32_t> elements_kinds = {
+#define ELEMENTS_KIND(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
+ TYPED_ARRAYS(ELEMENTS_KIND)
+#undef ELEMENTS_KIND
};
std::list<Label> labels;
- for (size_t i = 0; i < instance_types.size(); ++i) {
+ for (size_t i = 0; i < elements_kinds.size(); ++i) {
labels.emplace_back(this);
}
std::vector<Label*> label_ptrs;
@@ -201,16 +199,15 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
k_.Bind(NumberDec(len()));
}
CSA_ASSERT(this, IsSafeInteger(k()));
- Node* instance_type = LoadInstanceType(LoadElements(typed_array));
- Switch(instance_type, &unexpected_instance_type, instance_types.data(),
+ TNode<Int32T> elements_kind = LoadMapElementsKind(typed_array_map);
+ Switch(elements_kind, &unexpected_instance_type, elements_kinds.data(),
label_ptrs.data(), labels.size());
size_t i = 0;
for (auto it = labels.begin(); it != labels.end(); ++i, ++it) {
BIND(&*it);
Label done(this);
- source_elements_kind_ = ElementsKindForInstanceType(
- static_cast<InstanceType>(instance_types[i]));
+ source_elements_kind_ = static_cast<ElementsKind>(elements_kinds[i]);
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
// spec violation. Should go to &throw_detached and throw a TypeError
// instead.
@@ -224,21 +221,6 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
}
}
- ElementsKind ArrayBuiltinsAssembler::ElementsKindForInstanceType(
- InstanceType type) {
- switch (type) {
-#define INSTANCE_TYPE_TO_ELEMENTS_KIND(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- return TYPE##_ELEMENTS;
-
- TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENTS_KIND)
-#undef INSTANCE_TYPE_TO_ELEMENTS_KIND
-
- default:
- UNREACHABLE();
- }
- }
-
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
Node* array_buffer, const CallResultProcessor& processor, Label* detached,
ForEachDirection direction, TNode<JSTypedArray> typed_array) {
@@ -246,13 +228,7 @@ ArrayBuiltinsAssembler::ArrayBuiltinsAssembler(
FastLoopBody body = [&](Node* index) {
GotoIf(IsDetachedBuffer(array_buffer), detached);
- Node* elements = LoadElements(typed_array);
- Node* base_ptr =
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* external_ptr =
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
- MachineType::Pointer());
- Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
Node* value = LoadFixedTypedArrayElementAsTagged(
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
k_.Bind(index);
@@ -551,8 +527,8 @@ TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) {
TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
CSA_ASSERT(this,
- Word32Or(Word32BinaryNot(
- IsHoleyFastElementsKind(LoadElementsKind(array))),
+ Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
+ LoadElementsKind(array))),
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
ParameterMode mode = OptimalParameterMode();
@@ -571,8 +547,8 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) {
TNode<JSArray> array = CAST(Parameter(Descriptor::kSource));
CSA_ASSERT(this,
- Word32Or(Word32BinaryNot(
- IsHoleyFastElementsKind(LoadElementsKind(array))),
+ Word32Or(Word32BinaryNot(IsHoleyFastElementsKindForRead(
+ LoadElementsKind(array))),
Word32BinaryNot(IsNoElementsProtectorCellInvalid())));
ParameterMode mode = OptimalParameterMode();
@@ -936,7 +912,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
- BranchIfFastJSArray(receiver, context, &init_index, &call_runtime);
+ BranchIfFastJSArrayForRead(receiver, context, &init_index, &call_runtime);
BIND(&init_index);
VARIABLE(index_var, MachineType::PointerRepresentation(), intptr_zero);
@@ -994,12 +970,16 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(PACKED_ELEMENTS == 2);
STATIC_ASSERT(HOLEY_ELEMENTS == 3);
- GotoIf(Uint32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_ELEMENTS)),
+ GotoIf(IsElementsKindLessThanOrEqual(elements_kind, HOLEY_ELEMENTS),
&if_smiorobjects);
- GotoIf(Word32Equal(elements_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
- &if_packed_doubles);
- GotoIf(Word32Equal(elements_kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
+ GotoIf(
+ ElementsKindEqual(elements_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
+ &if_packed_doubles);
+ GotoIf(ElementsKindEqual(elements_kind, Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
&if_holey_doubles);
+ GotoIf(
+ IsElementsKindLessThanOrEqual(elements_kind, LAST_FROZEN_ELEMENTS_KIND),
+ &if_smiorobjects);
Goto(&return_not_found);
BIND(&if_smiorobjects);
@@ -1637,6 +1617,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
BIND(&if_typedarray);
{
// If {array} is a JSTypedArray, the {index} must always be a Smi.
+ // TODO(v8:4153): Update this and the relevant TurboFan code.
CSA_ASSERT(this, TaggedIsSmi(index));
// Check that the {array}s buffer wasn't detached.
@@ -1646,8 +1627,9 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
// [[ArrayIteratorNextIndex]] anymore, since a JSTypedArray's
// length cannot change anymore, so this {iterator} will never
// produce values again anyways.
- TNode<Smi> length = LoadJSTypedArrayLength(CAST(array));
- GotoIfNot(SmiBelow(CAST(index), length), &allocate_iterator_result);
+ TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(array));
+ GotoIfNot(UintPtrLessThan(SmiUntag(CAST(index)), length),
+ &allocate_iterator_result);
StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
SmiInc(CAST(index)));
@@ -1660,14 +1642,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
&allocate_iterator_result);
TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
- Node* elements = LoadElements(CAST(array));
- Node* base_ptr =
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* external_ptr =
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
- MachineType::Pointer());
- TNode<WordT> data_ptr =
- IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(CAST(array));
var_value.Bind(LoadFixedTypedArrayElementAsTagged(data_ptr, CAST(index),
elements_kind));
Goto(&allocate_entry_if_needed);
@@ -2200,7 +2175,7 @@ void ArrayBuiltinsAssembler::GenerateConstructor(
void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
ElementsKind kind, AllocationSiteOverrideMode mode) {
- typedef ArrayNoArgumentConstructorDescriptor Descriptor;
+ using Descriptor = ArrayNoArgumentConstructorDescriptor;
Node* native_context = LoadObjectField(Parameter(Descriptor::kFunction),
JSFunction::kContextOffset);
bool track_allocation_site =
@@ -2216,7 +2191,7 @@ void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
ElementsKind kind, AllocationSiteOverrideMode mode) {
- typedef ArraySingleArgumentConstructorDescriptor Descriptor;
+ using Descriptor = ArraySingleArgumentConstructorDescriptor;
Node* context = Parameter(Descriptor::kContext);
Node* function = Parameter(Descriptor::kFunction);
Node* native_context = LoadObjectField(function, JSFunction::kContextOffset);
@@ -2310,7 +2285,7 @@ GENERATE_ARRAY_CTOR(SingleArgument, HoleyDouble, HOLEY_DOUBLE_ELEMENTS,
#undef GENERATE_ARRAY_CTOR
TF_BUILTIN(InternalArrayNoArgumentConstructor_Packed, ArrayBuiltinsAssembler) {
- typedef ArrayNoArgumentConstructorDescriptor Descriptor;
+ using Descriptor = ArrayNoArgumentConstructorDescriptor;
TNode<Map> array_map =
CAST(LoadObjectField(Parameter(Descriptor::kFunction),
JSFunction::kPrototypeOrInitialMapOffset));
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 8a9846fac2..6b8c704038 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
#define V8_BUILTINS_BUILTINS_ARRAY_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -14,14 +14,13 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
public:
explicit ArrayBuiltinsAssembler(compiler::CodeAssemblerState* state);
- typedef std::function<void(ArrayBuiltinsAssembler* masm)>
- BuiltinResultGenerator;
+ using BuiltinResultGenerator =
+ std::function<void(ArrayBuiltinsAssembler* masm)>;
- typedef std::function<Node*(ArrayBuiltinsAssembler* masm, Node* k_value,
- Node* k)>
- CallResultProcessor;
+ using CallResultProcessor = std::function<Node*(ArrayBuiltinsAssembler* masm,
+ Node* k_value, Node* k)>;
- typedef std::function<void(ArrayBuiltinsAssembler* masm)> PostLoopAction;
+ using PostLoopAction = std::function<void(ArrayBuiltinsAssembler* masm)>;
void FindResultGenerator();
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index 921b2c1f2f..e6ab965a7e 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -4,19 +4,19 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/contexts.h"
-#include "src/counters.h"
+#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
-#include "src/elements-inl.h"
-#include "src/global-handles.h"
-#include "src/isolate.h"
-#include "src/lookup.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
+#include "src/logging/counters.h"
+#include "src/objects/contexts.h"
+#include "src/objects/elements-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/prototype.h"
#include "src/objects/smi.h"
-#include "src/prototype.h"
namespace v8 {
namespace internal {
@@ -29,8 +29,8 @@ inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
}
inline bool HasSimpleElements(JSObject current) {
- return !current->map()->IsCustomElementsReceiverMap() &&
- !current->GetElementsAccessor()->HasAccessors(current);
+ return !current.map().IsCustomElementsReceiverMap() &&
+ !current.GetElementsAccessor()->HasAccessors(current);
}
inline bool HasOnlySimpleReceiverElements(Isolate* isolate, JSObject receiver) {
@@ -43,7 +43,7 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver receiver) {
DisallowHeapAllocation no_gc;
PrototypeIterator iter(isolate, receiver, kStartAtReceiver);
for (; !iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent()->IsJSProxy()) return false;
+ if (iter.GetCurrent().IsJSProxy()) return false;
JSObject current = iter.GetCurrent<JSObject>();
if (!HasSimpleElements(current)) return false;
}
@@ -70,8 +70,8 @@ void MatchArrayElementsKindToArguments(Isolate* isolate, Handle<JSArray> array,
int last_arg_index = std::min(first_arg_index + num_arguments, args_length);
for (int i = first_arg_index; i < last_arg_index; i++) {
Object arg = (*args)[i];
- if (arg->IsHeapObject()) {
- if (arg->IsHeapNumber()) {
+ if (arg.IsHeapObject()) {
+ if (arg.IsHeapNumber()) {
target_kind = PACKED_DOUBLE_ELEMENTS;
} else {
target_kind = PACKED_ELEMENTS;
@@ -101,7 +101,7 @@ inline bool EnsureJSArrayWithWritableFastElements(Isolate* isolate,
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
ElementsKind origin_kind = array->GetElementsKind();
if (IsDictionaryElementsKind(origin_kind)) return false;
- if (!array->map()->is_extensible()) return false;
+ if (!array->map().is_extensible()) return false;
if (args == nullptr) return true;
// If there may be elements accessors in the prototype chain, the fast path
@@ -148,7 +148,7 @@ V8_WARN_UNUSED_RESULT Maybe<double> GetLengthProperty(
Isolate* isolate, Handle<JSReceiver> receiver) {
if (receiver->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- double length = array->length()->Number();
+ double length = array->length().Number();
DCHECK(0 <= length && length <= kMaxSafeInteger);
return Just(length);
@@ -373,7 +373,7 @@ BUILTIN(ArrayPush) {
// Fast Elements Path
int to_add = args.length() - 1;
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- uint32_t len = static_cast<uint32_t>(array->length()->Number());
+ uint32_t len = static_cast<uint32_t>(array->length().Number());
if (to_add == 0) return *isolate->factory()->NewNumberFromUint(len);
// Currently fixed arrays cannot grow too big, so we should never hit this.
@@ -457,7 +457,7 @@ BUILTIN(ArrayPop) {
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- uint32_t len = static_cast<uint32_t>(array->length()->Number());
+ uint32_t len = static_cast<uint32_t>(array->length().Number());
if (len == 0) return ReadOnlyRoots(isolate).undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
@@ -597,7 +597,7 @@ BUILTIN(ArrayUnshift) {
Handle<JSArray> array = Handle<JSArray>::cast(args.receiver());
// These are checked in the Torque builtin.
- DCHECK(array->map()->is_extensible());
+ DCHECK(array->map().is_extensible());
DCHECK(!IsDictionaryElementsKind(array->GetElementsKind()));
DCHECK(IsJSArrayFastElementMovingAllowed(isolate, *array));
DCHECK(!isolate->IsAnyInitialArrayPrototype(array));
@@ -644,7 +644,7 @@ class ArrayConcatVisitor {
IsFixedArrayField::encode(storage->IsFixedArray()) |
HasSimpleElementsField::encode(
storage->IsFixedArray() ||
- !storage->map()->IsCustomElementsReceiverMap())) {
+ !storage->map().IsCustomElementsReceiverMap())) {
DCHECK(!(this->fast_elements() && !is_fixed_array()));
}
@@ -708,7 +708,7 @@ class ArrayConcatVisitor {
// provided-for index range, go to dictionary mode now.
if (fast_elements() &&
index_offset_ >
- static_cast<uint32_t>(FixedArrayBase::cast(*storage_)->length())) {
+ static_cast<uint32_t>(FixedArrayBase::cast(*storage_).length())) {
SetDictionaryMode();
}
}
@@ -811,7 +811,7 @@ class ArrayConcatVisitor {
uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
DisallowHeapAllocation no_gc;
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
+ uint32_t length = static_cast<uint32_t>(array->length().Number());
int element_count = 0;
switch (array->GetElementsKind()) {
case PACKED_SMI_ELEMENTS:
@@ -819,6 +819,8 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case HOLEY_ELEMENTS: {
// Fast elements can't have lengths that are not representable by
// a 32-bit signed integer.
@@ -826,7 +828,7 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
int fast_length = static_cast<int>(length);
FixedArray elements = FixedArray::cast(array->elements());
for (int i = 0; i < fast_length; i++) {
- if (!elements->get(i)->IsTheHole(isolate)) element_count++;
+ if (!elements.get(i).IsTheHole(isolate)) element_count++;
}
break;
}
@@ -836,23 +838,23 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
// a 32-bit signed integer.
DCHECK_GE(static_cast<int32_t>(FixedDoubleArray::kMaxLength), 0);
int fast_length = static_cast<int>(length);
- if (array->elements()->IsFixedArray()) {
- DCHECK_EQ(FixedArray::cast(array->elements())->length(), 0);
+ if (array->elements().IsFixedArray()) {
+ DCHECK_EQ(FixedArray::cast(array->elements()).length(), 0);
break;
}
FixedDoubleArray elements = FixedDoubleArray::cast(array->elements());
for (int i = 0; i < fast_length; i++) {
- if (!elements->is_the_hole(i)) element_count++;
+ if (!elements.is_the_hole(i)) element_count++;
}
break;
}
case DICTIONARY_ELEMENTS: {
NumberDictionary dictionary = NumberDictionary::cast(array->elements());
- int capacity = dictionary->Capacity();
+ int capacity = dictionary.Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < capacity; i++) {
- Object key = dictionary->KeyAt(i);
- if (dictionary->IsKey(roots, key)) {
+ Object key = dictionary.KeyAt(i);
+ if (dictionary.IsKey(roots, key)) {
element_count++;
}
}
@@ -886,13 +888,15 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case HOLEY_ELEMENTS: {
DisallowHeapAllocation no_gc;
FixedArray elements = FixedArray::cast(object->elements());
- uint32_t length = static_cast<uint32_t>(elements->length());
+ uint32_t length = static_cast<uint32_t>(elements.length());
if (range < length) length = range;
for (uint32_t i = 0; i < length; i++) {
- if (!elements->get(i)->IsTheHole(isolate)) {
+ if (!elements.get(i).IsTheHole(isolate)) {
indices->push_back(i);
}
}
@@ -900,8 +904,8 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
}
case HOLEY_DOUBLE_ELEMENTS:
case PACKED_DOUBLE_ELEMENTS: {
- if (object->elements()->IsFixedArray()) {
- DCHECK_EQ(object->elements()->length(), 0);
+ if (object->elements().IsFixedArray()) {
+ DCHECK_EQ(object->elements().length(), 0);
break;
}
Handle<FixedDoubleArray> elements(
@@ -918,13 +922,13 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
case DICTIONARY_ELEMENTS: {
DisallowHeapAllocation no_gc;
NumberDictionary dict = NumberDictionary::cast(object->elements());
- uint32_t capacity = dict->Capacity();
+ uint32_t capacity = dict.Capacity();
ReadOnlyRoots roots(isolate);
FOR_WITH_HANDLE_SCOPE(isolate, uint32_t, j = 0, j, j < capacity, j++, {
- Object k = dict->KeyAt(j);
- if (!dict->IsKey(roots, k)) continue;
- DCHECK(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
+ Object k = dict.KeyAt(j);
+ if (!dict.IsKey(roots, k)) continue;
+ DCHECK(k.IsNumber());
+ uint32_t index = static_cast<uint32_t>(k.Number());
if (index < range) {
indices->push_back(index);
}
@@ -936,7 +940,9 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
{
- uint32_t length = static_cast<uint32_t>(object->elements()->length());
+ // TODO(bmeurer, v8:4153): Change this to size_t later.
+ uint32_t length =
+ static_cast<uint32_t>(Handle<JSTypedArray>::cast(object)->length());
if (range <= length) {
length = range;
// We will add all indices, so we might as well clear it first
@@ -966,7 +972,7 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
case SLOW_STRING_WRAPPER_ELEMENTS: {
DCHECK(object->IsJSValue());
Handle<JSValue> js_value = Handle<JSValue>::cast(object);
- DCHECK(js_value->value()->IsString());
+ DCHECK(js_value->value().IsString());
Handle<String> string(String::cast(js_value->value()), isolate);
uint32_t length = static_cast<uint32_t>(string->length());
uint32_t i = 0;
@@ -1027,7 +1033,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
if (receiver->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- length = static_cast<uint32_t>(array->length()->Number());
+ length = static_cast<uint32_t>(array->length().Number());
} else {
Handle<Object> val;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -1057,6 +1063,8 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case HOLEY_ELEMENTS: {
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
@@ -1088,8 +1096,8 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
if (length == 0) break;
// Run through the elements FixedArray and use HasElement and GetElement
// to check the prototype for missing elements.
- if (array->elements()->IsFixedArray()) {
- DCHECK_EQ(array->elements()->length(), 0);
+ if (array->elements().IsFixedArray()) {
+ DCHECK_EQ(array->elements().length(), 0);
break;
}
Handle<FixedDoubleArray> elements(
@@ -1165,7 +1173,6 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
case SLOW_STRING_WRAPPER_ELEMENTS:
// |array| is guaranteed to be an array or typed array.
UNREACHABLE();
- break;
}
visitor->increase_index_offset(length);
return true;
@@ -1190,7 +1197,7 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
Isolate* isolate) {
int argument_count = args->length();
- bool is_array_species = *species == isolate->context()->array_function();
+ bool is_array_species = *species == isolate->context().array_function();
// Pass 1: estimate the length and number of elements of the result.
// The actual length can be larger if any of the arguments have getters
@@ -1207,11 +1214,11 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
uint32_t element_estimate;
if (obj->IsJSArray()) {
Handle<JSArray> array(Handle<JSArray>::cast(obj));
- length_estimate = static_cast<uint32_t>(array->length()->Number());
+ length_estimate = static_cast<uint32_t>(array->length().Number());
if (length_estimate != 0) {
ElementsKind array_kind =
GetPackedElementsKind(array->GetElementsKind());
- if (IsPackedFrozenOrSealedElementsKind(array_kind)) {
+ if (IsFrozenOrSealedElementsKind(array_kind)) {
array_kind = PACKED_ELEMENTS;
}
kind = GetMoreGeneralElementsKind(kind, array_kind);
@@ -1264,16 +1271,16 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
} else {
DisallowHeapAllocation no_gc;
JSArray array = JSArray::cast(*obj);
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
- switch (array->GetElementsKind()) {
+ uint32_t length = static_cast<uint32_t>(array.length().Number());
+ switch (array.GetElementsKind()) {
case HOLEY_DOUBLE_ELEMENTS:
case PACKED_DOUBLE_ELEMENTS: {
// Empty array is FixedArray but not FixedDoubleArray.
if (length == 0) break;
FixedDoubleArray elements =
- FixedDoubleArray::cast(array->elements());
+ FixedDoubleArray::cast(array.elements());
for (uint32_t i = 0; i < length; i++) {
- if (elements->is_the_hole(i)) {
+ if (elements.is_the_hole(i)) {
// TODO(jkummerow/verwaest): We could be a bit more clever
// here: Check if there are no elements/getters on the
// prototype chain, and if so, allow creation of a holey
@@ -1282,7 +1289,7 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
failure = true;
break;
}
- double double_value = elements->get_scalar(i);
+ double double_value = elements.get_scalar(i);
double_storage->set(j, double_value);
j++;
}
@@ -1291,9 +1298,9 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
case HOLEY_SMI_ELEMENTS:
case PACKED_SMI_ELEMENTS: {
Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
- FixedArray elements(FixedArray::cast(array->elements()));
+ FixedArray elements(FixedArray::cast(array.elements()));
for (uint32_t i = 0; i < length; i++) {
- Object element = elements->get(i);
+ Object element = elements.get(i);
if (element == the_hole) {
failure = true;
break;
@@ -1305,6 +1312,8 @@ Object Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
break;
}
case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
@@ -1376,9 +1385,8 @@ bool IsSimpleArray(Isolate* isolate, Handle<JSArray> obj) {
DisallowHeapAllocation no_gc;
Map map = obj->map();
// If there is only the 'length' property we are fine.
- if (map->prototype() ==
- isolate->native_context()->initial_array_prototype() &&
- map->NumberOfOwnDescriptors() == 1) {
+ if (map.prototype() == isolate->native_context()->initial_array_prototype() &&
+ map.NumberOfOwnDescriptors() == 1) {
return true;
}
// TODO(cbruni): slower lookup for array subclasses and support slow
@@ -1405,12 +1413,12 @@ MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate,
// and calculating total length.
for (int i = 0; i < n_arguments; i++) {
Object arg = (*args)[i];
- if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
+ if (!arg.IsJSArray()) return MaybeHandle<JSArray>();
if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
return MaybeHandle<JSArray>();
}
// TODO(cbruni): support fast concatenation of DICTIONARY_ELEMENTS.
- if (!JSObject::cast(arg)->HasFastElements()) {
+ if (!JSObject::cast(arg).HasFastElements()) {
return MaybeHandle<JSArray>();
}
Handle<JSArray> array(JSArray::cast(arg), isolate);
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index a966122c97..9ecb1815bc 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -4,12 +4,12 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/conversions.h"
-#include "src/counters.h"
+#include "src/handles/maybe-handles-inl.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/maybe-handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -43,7 +43,7 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
SharedFlag shared_flag =
- (*target == target->native_context()->array_buffer_fun())
+ (*target == target->native_context().array_buffer_fun())
? SharedFlag::kNotShared
: SharedFlag::kShared;
if (!JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer>::cast(result),
@@ -61,12 +61,12 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
BUILTIN(ArrayBufferConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
- DCHECK(*target == target->native_context()->array_buffer_fun() ||
- *target == target->native_context()->shared_array_buffer_fun());
+ DCHECK(*target == target->native_context().array_buffer_fun() ||
+ *target == target->native_context().shared_array_buffer_fun());
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- handle(target->shared()->Name(), isolate)));
+ handle(target->shared().Name(), isolate)));
}
// [[Construct]]
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
@@ -120,7 +120,7 @@ BUILTIN(ArrayBufferIsView) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
Object arg = args[1];
- return isolate->heap()->ToBoolean(arg->IsJSArrayBufferView());
+ return isolate->heap()->ToBoolean(arg.IsJSArrayBufferView());
}
static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
@@ -203,7 +203,7 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
Handle<Object> new_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, new_obj, Execution::New(isolate, ctor, argc, argv.start()));
+ isolate, new_obj, Execution::New(isolate, ctor, argc, argv.begin()));
new_ = Handle<JSReceiver>::cast(new_obj);
}
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index eb17d743a7..03df1aaaad 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -5,10 +5,10 @@
#include "src/builtins/builtins-async-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-async-gen.cc b/deps/v8/src/builtins/builtins-async-gen.cc
index 7ba72844e8..6c04037a63 100644
--- a/deps/v8/src/builtins/builtins-async-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-gen.cc
@@ -190,15 +190,7 @@ Node* AsyncBuiltinsAssembler::Await(Node* context, Node* generator, Node* value,
Label if_old(this), if_new(this), done(this),
if_slow_constructor(this, Label::kDeferred);
- STATIC_ASSERT(sizeof(FLAG_harmony_await_optimization) == 1);
- TNode<Word32T> flag_value = UncheckedCast<Word32T>(Load(
- MachineType::Uint8(),
- ExternalConstant(
- ExternalReference::address_of_harmony_await_optimization_flag())));
- GotoIf(Word32Equal(flag_value, Int32Constant(0)), &if_old);
-
- // We're running with --harmony-await-optimization enabled, which means
- // we do the `PromiseResolve(%Promise%,value)` avoiding to unnecessarily
+ // We do the `PromiseResolve(%Promise%,value)` avoiding to unnecessarily
// create wrapper promises. Now if {value} is already a promise with the
// intrinsics %Promise% constructor as its "constructor", we don't need
// to allocate the wrapper promise and can just use the `AwaitOptimized`
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index de19d24bac..d14e811db8 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -5,9 +5,9 @@
#include "src/builtins/builtins-async-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/frames-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/execution/frames-inl.h"
#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
@@ -389,7 +389,7 @@ TF_BUILTIN(AsyncGeneratorAwaitCaught, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
- typedef AsyncGeneratorResumeNextDescriptor Descriptor;
+ using Descriptor = AsyncGeneratorResumeNextDescriptor;
Node* const generator = Parameter(Descriptor::kGenerator);
Node* const context = Parameter(Descriptor::kContext);
@@ -552,7 +552,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
}
TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) {
- typedef AsyncGeneratorRejectDescriptor Descriptor;
+ using Descriptor = AsyncGeneratorRejectDescriptor;
Node* const generator = Parameter(Descriptor::kGenerator);
Node* const value = Parameter(Descriptor::kValue);
Node* const context = Parameter(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index d4937c0e1f..215faa73b1 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -5,9 +5,9 @@
#include "src/builtins/builtins-async-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/frames-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/execution/frames-inl.h"
namespace v8 {
namespace internal {
@@ -25,10 +25,9 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
Variable* var_exception,
const char* method_name);
- typedef std::function<void(Node* const context, Node* const promise,
- Label* if_exception)>
- UndefinedMethodHandler;
- typedef std::function<Node*(Node*)> SyncIteratorNodeGenerator;
+ using UndefinedMethodHandler = std::function<void(
+ Node* const context, Node* const promise, Label* if_exception)>;
+ using SyncIteratorNodeGenerator = std::function<Node*(Node*)>;
void Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
const SyncIteratorNodeGenerator& get_method,
diff --git a/deps/v8/src/builtins/builtins-bigint-gen.cc b/deps/v8/src/builtins/builtins-bigint-gen.cc
index 9b21320086..8a752f2517 100644
--- a/deps/v8/src/builtins/builtins-bigint-gen.cc
+++ b/deps/v8/src/builtins/builtins-bigint-gen.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-bigint.cc b/deps/v8/src/builtins/builtins-bigint.cc
index 5c8a7e4871..a8a847ef47 100644
--- a/deps/v8/src/builtins/builtins-bigint.cc
+++ b/deps/v8/src/builtins/builtins-bigint.cc
@@ -4,9 +4,9 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/conversions.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif
@@ -83,8 +83,8 @@ MaybeHandle<BigInt> ThisBigIntValue(Isolate* isolate, Handle<Object> value,
if (value->IsJSValue()) {
// 2a. Assert: value.[[BigIntData]] is a BigInt value.
// 2b. Return value.[[BigIntData]].
- Object data = JSValue::cast(*value)->value();
- if (data->IsBigInt()) return handle(BigInt::cast(data), isolate);
+ Object data = JSValue::cast(*value).value();
+ if (data.IsBigInt()) return handle(BigInt::cast(data), isolate);
}
// 3. Throw a TypeError exception.
THROW_NEW_ERROR(
diff --git a/deps/v8/src/builtins/builtins-boolean-gen.cc b/deps/v8/src/builtins/builtins-boolean-gen.cc
index 8f723d09cf..30cf7ba0c1 100644
--- a/deps/v8/src/builtins/builtins-boolean-gen.cc
+++ b/deps/v8/src/builtins/builtins-boolean-gen.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/objects/oddball.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
deleted file mode 100644
index b10f013020..0000000000
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils-inl.h"
-#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// ES #sec-boolean-objects
-
-// ES #sec-boolean-constructor
-BUILTIN(BooleanConstructor) {
- HandleScope scope(isolate);
- if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- return isolate->heap()->ToBoolean(value->BooleanValue(isolate));
- }
- // [[Construct]]
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target();
- Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
- DCHECK(*target == target->native_context()->boolean_function());
- Handle<JSObject> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- JSObject::New(target, new_target, Handle<AllocationSite>::null()));
- Handle<JSValue>::cast(result)->set_value(
- isolate->heap()->ToBoolean(value->BooleanValue(isolate)));
- return *result;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 51870cbc4e..05142a8f07 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -6,9 +6,9 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/globals.h"
-#include "src/isolate.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments.h"
#include "src/objects/property-cell.h"
@@ -151,7 +151,7 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
TNode<Int32T> kind = LoadMapElementsKind(arguments_list_map);
- GotoIf(Int32GreaterThan(kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ GotoIf(IsElementsKindGreaterThan(kind, LAST_FROZEN_ELEMENTS_KIND),
&if_runtime);
Branch(Word32And(kind, Int32Constant(1)), &if_holey_array, &if_done);
}
@@ -306,11 +306,13 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
var_elements = LoadElements(spread_array);
// Check elements kind of {spread}.
- GotoIf(Int32LessThan(spread_kind, Int32Constant(PACKED_DOUBLE_ELEMENTS)),
+ GotoIf(IsElementsKindLessThanOrEqual(spread_kind, HOLEY_ELEMENTS),
&if_smiorobject);
+ GotoIf(IsElementsKindLessThanOrEqual(spread_kind, LAST_FAST_ELEMENTS_KIND),
+ &if_double);
Branch(
- Int32GreaterThan(spread_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
- &if_generic, &if_double);
+ IsElementsKindLessThanOrEqual(spread_kind, LAST_FROZEN_ELEMENTS_KIND),
+ &if_smiorobject, &if_generic);
}
BIND(&if_generic);
@@ -478,7 +480,7 @@ TNode<JSReceiver> CallOrConstructBuiltinsAssembler::GetCompatibleReceiver(
// the receiver did not pass the {signature} check.
TNode<Map> holder_map = LoadMap(holder);
var_holder = LoadMapPrototype(holder_map);
- GotoIf(IsSetWord32(LoadMapBitField3(holder_map),
+ GotoIf(IsSetWord32(LoadMapBitField2(holder_map),
Map::HasHiddenPrototypeBit::kMask),
&holder_loop);
ThrowTypeError(context, MessageTemplate::kIllegalInvocation);
diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h
index 5a64d36c34..a15f31dd09 100644
--- a/deps/v8/src/builtins/builtins-call-gen.h
+++ b/deps/v8/src/builtins/builtins-call-gen.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_CALL_GEN_H_
#define V8_BUILTINS_BUILTINS_CALL_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-call.cc b/deps/v8/src/builtins/builtins-call.cc
index bd199218a6..36732ba398 100644
--- a/deps/v8/src/builtins/builtins-call.cc
+++ b/deps/v8/src/builtins/builtins-call.cc
@@ -4,9 +4,9 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-#include "src/handles-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-callsite.cc b/deps/v8/src/builtins/builtins-callsite.cc
index 51580899d2..d98eba4eeb 100644
--- a/deps/v8/src/builtins/builtins-callsite.cc
+++ b/deps/v8/src/builtins/builtins-callsite.cc
@@ -4,10 +4,10 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
#include "src/heap/heap-inl.h" // For ToBoolean.
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc
index 19a6ae0729..b5a9851c70 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.cc
+++ b/deps/v8/src/builtins/builtins-collections-gen.cc
@@ -7,14 +7,12 @@
#include "src/builtins/builtins-constructor-gen.h"
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection.h"
#include "src/objects/ordered-hash-table.h"
-#include "torque-generated/builtins-base-from-dsl-gen.h"
-#include "torque-generated/builtins-collections-from-dsl-gen.h"
namespace v8 {
namespace internal {
@@ -25,11 +23,10 @@ using TNode = compiler::TNode<T>;
template <class T>
using TVariable = compiler::TypedCodeAssemblerVariable<T>;
-class BaseCollectionsAssembler : public CodeStubAssembler,
- public CollectionsBuiltinsFromDSLAssembler {
+class BaseCollectionsAssembler : public CodeStubAssembler {
public:
explicit BaseCollectionsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state), CollectionsBuiltinsFromDSLAssembler(state) {}
+ : CodeStubAssembler(state) {}
virtual ~BaseCollectionsAssembler() = default;
@@ -158,7 +155,7 @@ void BaseCollectionsAssembler::AddConstructorEntry(
var_exception);
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(key_value)));
if (variant == kMap || variant == kWeakMap) {
- BaseBuiltinsFromDSLAssembler::KeyValuePair pair =
+ TorqueStructKeyValuePair pair =
if_may_have_side_effects != nullptr
? LoadKeyValuePairNoSideEffects(context, key_value,
if_may_have_side_effects)
@@ -318,7 +315,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable(
TNode<Object> add_func = GetAddFunction(variant, context, collection);
IteratorBuiltinsAssembler iterator_assembler(this->state());
- IteratorBuiltinsAssembler::IteratorRecord iterator =
+ TorqueStructIteratorRecord iterator =
iterator_assembler.GetIterator(context, iterable);
CSA_ASSERT(this, Word32BinaryNot(IsUndefined(iterator.object)));
@@ -598,8 +595,8 @@ class CollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
// Transitions the iterator to the non obsolete backing store.
// This is a NOP if the [table] is not obsolete.
- typedef std::function<void(Node* const table, Node* const index)>
- UpdateInTransition;
+ using UpdateInTransition =
+ std::function<void(Node* const table, Node* const index)>;
template <typename TableType>
std::pair<TNode<TableType>, TNode<IntPtrT>> Transition(
TNode<TableType> const table, TNode<IntPtrT> const index,
@@ -830,7 +827,7 @@ void CollectionsBuiltinsAssembler::SameValueZeroSmi(Node* key_smi,
void CollectionsBuiltinsAssembler::BranchIfMapIteratorProtectorValid(
Label* if_true, Label* if_false) {
Node* protector_cell = LoadRoot(RootIndex::kMapIteratorProtector);
- DCHECK(isolate()->heap()->map_iterator_protector()->IsPropertyCell());
+ DCHECK(isolate()->heap()->map_iterator_protector().IsPropertyCell());
Branch(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
SmiConstant(Isolate::kProtectorValid)),
if_true, if_false);
@@ -887,7 +884,7 @@ void BranchIfIterableWithOriginalKeyOrValueMapIterator(
void CollectionsBuiltinsAssembler::BranchIfSetIteratorProtectorValid(
Label* if_true, Label* if_false) {
Node* const protector_cell = LoadRoot(RootIndex::kSetIteratorProtector);
- DCHECK(isolate()->heap()->set_iterator_protector()->IsPropertyCell());
+ DCHECK(isolate()->heap()->set_iterator_protector().IsPropertyCell());
Branch(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
SmiConstant(Isolate::kProtectorValid)),
if_true, if_false);
@@ -1576,8 +1573,8 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashMapNewEntry(
Node* const hash, Node* const number_of_buckets, Node* const occupancy) {
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
- Node* const bucket_entry = UnsafeLoadFixedArrayElement(
- table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize);
+ TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement(
+ table, bucket, OrderedHashMap::HashTableStartIndex() * kTaggedSize));
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
@@ -1750,8 +1747,8 @@ void CollectionsBuiltinsAssembler::StoreOrderedHashSetNewEntry(
Node* const number_of_buckets, Node* const occupancy) {
Node* const bucket =
WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1)));
- Node* const bucket_entry = UnsafeLoadFixedArrayElement(
- table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize);
+ TNode<Smi> bucket_entry = CAST(UnsafeLoadFixedArrayElement(
+ table, bucket, OrderedHashSet::HashTableStartIndex() * kTaggedSize));
// Store the entry elements.
Node* const entry_start = IntPtrAdd(
@@ -2299,8 +2296,8 @@ class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler {
// Builds code that finds the EphemeronHashTable entry for a {key} using the
// comparison code generated by {key_compare}. The key index is returned if
// the {key} is found.
- typedef std::function<void(TNode<Object> entry_key, Label* if_same)>
- KeyComparator;
+ using KeyComparator =
+ std::function<void(TNode<Object> entry_key, Label* if_same)>;
TNode<IntPtrT> FindKeyIndex(TNode<HeapObject> table, TNode<IntPtrT> key_hash,
TNode<IntPtrT> entry_mask,
const KeyComparator& key_compare);
diff --git a/deps/v8/src/builtins/builtins-collections-gen.h b/deps/v8/src/builtins/builtins-collections-gen.h
index a78ad5a4a7..2bde108e9a 100644
--- a/deps/v8/src/builtins/builtins-collections-gen.h
+++ b/deps/v8/src/builtins/builtins-collections-gen.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_COLLECTIONS_GEN_H_
#define V8_BUILTINS_BUILTINS_COLLECTIONS_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-collections.cc b/deps/v8/src/builtins/builtins-collections.cc
index be7a47290b..d201091a61 100644
--- a/deps/v8/src/builtins/builtins-collections.cc
+++ b/deps/v8/src/builtins/builtins-collections.cc
@@ -4,9 +4,9 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-console-gen.cc b/deps/v8/src/builtins/builtins-console-gen.cc
index c3a997af9a..8dc7e5e8f6 100644
--- a/deps/v8/src/builtins/builtins-console-gen.cc
+++ b/deps/v8/src/builtins/builtins-console-gen.cc
@@ -4,9 +4,9 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
-#include "src/frame-constants.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
index edfb0a45c7..973f1785d1 100644
--- a/deps/v8/src/builtins/builtins-console.cc
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
#include "src/debug/interface-types.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -67,7 +67,7 @@ void LogTimerEvent(Isolate* isolate, BuiltinArguments args,
HandleScope scope(isolate);
std::unique_ptr<char[]> name;
const char* raw_name = "default";
- if (args.length() > 1 && args[1]->IsString()) {
+ if (args.length() > 1 && args[1].IsString()) {
// Try converting the first argument to a string.
name = args.at<String>(1)->ToCString();
raw_name = name.get();
@@ -119,9 +119,9 @@ void InstallContextFunction(Isolate* isolate, Handle<JSObject> target,
name_string, builtin_id, i::LanguageMode::kSloppy);
Handle<JSFunction> fun = factory->NewFunction(args);
- fun->shared()->set_native(true);
- fun->shared()->DontAdaptArguments();
- fun->shared()->set_length(1);
+ fun->shared().set_native(true);
+ fun->shared().DontAdaptArguments();
+ fun->shared().set_length(1);
JSObject::AddProperty(isolate, fun, factory->console_context_id_symbol(),
handle(Smi::FromInt(context_id), isolate), NONE);
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 5de4eca89e..a725f3c4a1 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -9,12 +9,12 @@
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/counters.h"
-#include "src/interface-descriptors.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -54,7 +54,7 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) {
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
-typedef compiler::Node Node;
+using Node = compiler::Node;
TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
Node* shared_function_info = Parameter(Descriptor::kSharedFunctionInfo);
@@ -738,8 +738,7 @@ TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) {
}
}
-TF_BUILTIN(GenericConstructorLazyDeoptContinuation,
- ConstructorBuiltinsAssembler) {
+TF_BUILTIN(GenericLazyDeoptContinuation, ConstructorBuiltinsAssembler) {
Node* result = Parameter(Descriptor::kResult);
Return(result);
}
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 3079d9a4f7..9093a5a77b 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_CONSTRUCTOR_GEN_H_
#define V8_BUILTINS_BUILTINS_CONSTRUCTOR_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-constructor.h b/deps/v8/src/builtins/builtins-constructor.h
index 428c8cea8e..e3fc416771 100644
--- a/deps/v8/src/builtins/builtins-constructor.h
+++ b/deps/v8/src/builtins/builtins-constructor.h
@@ -5,10 +5,10 @@
#ifndef V8_BUILTINS_BUILTINS_CONSTRUCTOR_H_
#define V8_BUILTINS_BUILTINS_CONSTRUCTOR_H_
-#include "src/contexts.h"
-#include "src/objects.h"
+#include "src/objects/contexts.h"
#include "src/objects/dictionary.h"
#include "src/objects/js-array.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 20344cf8ef..bc7e349ce1 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -4,9 +4,9 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
namespace v8 {
@@ -131,7 +131,7 @@ TF_BUILTIN(ToName, CodeStubAssembler) {
{
// We don't have a fast-path for BigInt currently, so just
// tail call to the %ToString runtime function here for now.
- TailCallRuntime(Runtime::kToString, context, input);
+ TailCallRuntime(Runtime::kToStringRT, context, input);
}
BIND(&if_inputisname);
@@ -211,14 +211,6 @@ TF_BUILTIN(NumberToString, CodeStubAssembler) {
Return(NumberToString(input));
}
-// ES section #sec-tostring
-TF_BUILTIN(ToString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* input = Parameter(Descriptor::kArgument);
-
- Return(ToString(context, input));
-}
-
// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive(
Node* context, Node* input, OrdinaryToPrimitiveHint hint) {
diff --git a/deps/v8/src/builtins/builtins-data-view-gen.h b/deps/v8/src/builtins/builtins-data-view-gen.h
index 3be41ddf94..eeb84f34db 100644
--- a/deps/v8/src/builtins/builtins-data-view-gen.h
+++ b/deps/v8/src/builtins/builtins-data-view-gen.h
@@ -5,9 +5,9 @@
#ifndef V8_BUILTINS_BUILTINS_DATA_VIEW_GEN_H_
#define V8_BUILTINS_BUILTINS_DATA_VIEW_GEN_H_
-#include "src/code-stub-assembler.h"
-#include "src/elements-kind.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/objects/bigint.h"
+#include "src/objects/elements-kind.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index f40cd0f68e..cf46a9f031 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -4,12 +4,12 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/conversions.h"
-#include "src/counters.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -101,6 +101,8 @@ BUILTIN(DataViewConstructor) {
// 13. Set O's [[ByteOffset]] internal slot to offset.
Handle<JSDataView>::cast(result)->set_byte_offset(view_byte_offset);
+ Handle<JSDataView>::cast(result)->set_data_pointer(
+ static_cast<uint8_t*>(array_buffer->backing_store()) + view_byte_offset);
// 14. Return O.
return *result;
diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc
index 3a09f9f52d..ca84948d48 100644
--- a/deps/v8/src/builtins/builtins-date-gen.cc
+++ b/deps/v8/src/builtins/builtins-date-gen.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 6701db28a3..d333873542 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -4,17 +4,17 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/conversions.h"
-#include "src/counters.h"
-#include "src/date.h"
-#include "src/dateparser-inl.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/date/date.h"
+#include "src/date/dateparser-inl.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#include "src/objects/js-date-time-format.h"
#endif
-#include "src/string-stream.h"
+#include "src/strings/string-stream.h"
namespace v8 {
namespace internal {
@@ -123,12 +123,12 @@ double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
result = DateParser::Parse(isolate, str_content.ToUC16Vector(), *tmp);
}
if (!result) return std::numeric_limits<double>::quiet_NaN();
- double const day = MakeDay(tmp->get(0)->Number(), tmp->get(1)->Number(),
- tmp->get(2)->Number());
- double const time = MakeTime(tmp->get(3)->Number(), tmp->get(4)->Number(),
- tmp->get(5)->Number(), tmp->get(6)->Number());
+ double const day =
+ MakeDay(tmp->get(0).Number(), tmp->get(1).Number(), tmp->get(2).Number());
+ double const time = MakeTime(tmp->get(3).Number(), tmp->get(4).Number(),
+ tmp->get(5).Number(), tmp->get(6).Number());
double date = MakeDate(day, time);
- if (tmp->get(7)->IsNull(isolate)) {
+ if (tmp->get(7).IsNull(isolate)) {
if (date >= -DateCache::kMaxTimeBeforeUTCInMs &&
date <= DateCache::kMaxTimeBeforeUTCInMs) {
date = isolate->date_cache()->ToUTC(static_cast<int64_t>(date));
@@ -136,14 +136,14 @@ double ParseDateTimeString(Isolate* isolate, Handle<String> str) {
return std::numeric_limits<double>::quiet_NaN();
}
} else {
- date -= tmp->get(7)->Number() * 1000.0;
+ date -= tmp->get(7).Number() * 1000.0;
}
return DateCache::TimeClip(date);
}
enum ToDateStringMode { kDateOnly, kTimeOnly, kDateAndTime };
-typedef base::SmallVector<char, 128> DateBuffer;
+using DateBuffer = base::SmallVector<char, 128>;
template <class... Args>
DateBuffer FormatDate(const char* format, Args... args) {
@@ -222,7 +222,7 @@ BUILTIN(DateConstructor) {
} else if (argc == 1) {
Handle<Object> value = args.at(1);
if (value->IsJSDate()) {
- time_val = Handle<JSDate>::cast(value)->value()->Number();
+ time_val = Handle<JSDate>::cast(value)->value().Number();
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
Object::ToPrimitive(value));
@@ -374,7 +374,7 @@ BUILTIN(DatePrototypeSetDate) {
Handle<Object> value = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
Object::ToNumber(isolate, value));
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
@@ -397,8 +397,8 @@ BUILTIN(DatePrototypeSetFullYear) {
Object::ToNumber(isolate, year));
double y = year->Number(), m = 0.0, dt = 1.0;
int time_within_day = 0;
- if (!std::isnan(date->value()->Number())) {
- int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ if (!std::isnan(date->value().Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value().Number());
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
@@ -432,7 +432,7 @@ BUILTIN(DatePrototypeSetHours) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour,
Object::ToNumber(isolate, hour));
double h = hour->Number();
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
@@ -471,7 +471,7 @@ BUILTIN(DatePrototypeSetMilliseconds) {
Handle<Object> ms = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
Object::ToNumber(isolate, ms));
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
@@ -493,7 +493,7 @@ BUILTIN(DatePrototypeSetMinutes) {
Handle<Object> min = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min,
Object::ToNumber(isolate, min));
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
@@ -528,7 +528,7 @@ BUILTIN(DatePrototypeSetMonth) {
Handle<Object> month = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
Object::ToNumber(isolate, month));
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
@@ -557,7 +557,7 @@ BUILTIN(DatePrototypeSetSeconds) {
Handle<Object> sec = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec,
Object::ToNumber(isolate, sec));
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
@@ -595,8 +595,8 @@ BUILTIN(DatePrototypeSetUTCDate) {
Handle<Object> value = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
Object::ToNumber(isolate, value));
- if (std::isnan(date->value()->Number())) return date->value();
- int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ if (std::isnan(date->value().Number())) return date->value();
+ int64_t const time_ms = static_cast<int64_t>(date->value().Number());
int const days = isolate->date_cache()->DaysFromTime(time_ms);
int const time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
int year, month, day;
@@ -616,8 +616,8 @@ BUILTIN(DatePrototypeSetUTCFullYear) {
Object::ToNumber(isolate, year));
double y = year->Number(), m = 0.0, dt = 1.0;
int time_within_day = 0;
- if (!std::isnan(date->value()->Number())) {
- int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ if (!std::isnan(date->value().Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value().Number());
int const days = isolate->date_cache()->DaysFromTime(time_ms);
time_within_day = isolate->date_cache()->TimeInDay(time_ms, days);
int year, month, day;
@@ -650,7 +650,7 @@ BUILTIN(DatePrototypeSetUTCHours) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hour,
Object::ToNumber(isolate, hour));
double h = hour->Number();
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int day = isolate->date_cache()->DaysFromTime(time_ms);
@@ -688,7 +688,7 @@ BUILTIN(DatePrototypeSetUTCMilliseconds) {
Handle<Object> ms = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms,
Object::ToNumber(isolate, ms));
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int day = isolate->date_cache()->DaysFromTime(time_ms);
@@ -709,7 +709,7 @@ BUILTIN(DatePrototypeSetUTCMinutes) {
Handle<Object> min = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min,
Object::ToNumber(isolate, min));
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int day = isolate->date_cache()->DaysFromTime(time_ms);
@@ -743,7 +743,7 @@ BUILTIN(DatePrototypeSetUTCMonth) {
Handle<Object> month = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month,
Object::ToNumber(isolate, month));
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int days = isolate->date_cache()->DaysFromTime(time_ms);
@@ -771,7 +771,7 @@ BUILTIN(DatePrototypeSetUTCSeconds) {
Handle<Object> sec = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec,
Object::ToNumber(isolate, sec));
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (!std::isnan(time_val)) {
int64_t const time_ms = static_cast<int64_t>(time_val);
int day = isolate->date_cache()->DaysFromTime(time_ms);
@@ -796,7 +796,7 @@ BUILTIN(DatePrototypeToDateString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toDateString");
DateBuffer buffer =
- ToDateString(date->value()->Number(), isolate->date_cache(), kDateOnly);
+ ToDateString(date->value().Number(), isolate->date_cache(), kDateOnly);
RETURN_RESULT_OR_FAILURE(
isolate, isolate->factory()->NewStringFromUtf8(VectorOf(buffer)));
}
@@ -805,7 +805,7 @@ BUILTIN(DatePrototypeToDateString) {
BUILTIN(DatePrototypeToISOString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toISOString");
- double const time_val = date->value()->Number();
+ double const time_val = date->value().Number();
if (std::isnan(time_val)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidTimeValue));
@@ -833,7 +833,7 @@ BUILTIN(DatePrototypeToString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toString");
DateBuffer buffer =
- ToDateString(date->value()->Number(), isolate->date_cache());
+ ToDateString(date->value().Number(), isolate->date_cache());
RETURN_RESULT_OR_FAILURE(
isolate, isolate->factory()->NewStringFromUtf8(VectorOf(buffer)));
}
@@ -843,7 +843,7 @@ BUILTIN(DatePrototypeToTimeString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toTimeString");
DateBuffer buffer =
- ToDateString(date->value()->Number(), isolate->date_cache(), kTimeOnly);
+ ToDateString(date->value().Number(), isolate->date_cache(), kTimeOnly);
RETURN_RESULT_OR_FAILURE(
isolate, isolate->factory()->NewStringFromUtf8(VectorOf(buffer)));
}
@@ -908,7 +908,7 @@ BUILTIN(DatePrototypeToLocaleTimeString) {
BUILTIN(DatePrototypeToUTCString) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.toUTCString");
- double const time_val = date->value()->Number();
+ double const time_val = date->value().Number();
if (std::isnan(time_val)) {
return *isolate->factory()->NewStringFromAsciiChecked("Invalid Date");
}
@@ -929,7 +929,7 @@ BUILTIN(DatePrototypeToUTCString) {
BUILTIN(DatePrototypeGetYear) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSDate, date, "Date.prototype.getYear");
- double time_val = date->value()->Number();
+ double time_val = date->value().Number();
if (std::isnan(time_val)) return date->value();
int64_t time_ms = static_cast<int64_t>(time_val);
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
@@ -954,8 +954,8 @@ BUILTIN(DatePrototypeSetYear) {
}
}
int time_within_day = 0;
- if (!std::isnan(date->value()->Number())) {
- int64_t const time_ms = static_cast<int64_t>(date->value()->Number());
+ if (!std::isnan(date->value().Number())) {
+ int64_t const time_ms = static_cast<int64_t>(date->value().Number());
int64_t local_time_ms = isolate->date_cache()->ToLocal(time_ms);
int const days = isolate->date_cache()->DaysFromTime(local_time_ms);
time_within_day = isolate->date_cache()->TimeInDay(local_time_ms, days);
diff --git a/deps/v8/src/builtins/builtins-debug-gen.cc b/deps/v8/src/builtins/builtins-debug-gen.cc
index 4bdf876478..9d47cf1600 100644
--- a/deps/v8/src/builtins/builtins-debug-gen.cc
+++ b/deps/v8/src/builtins/builtins-debug-gen.cc
@@ -5,7 +5,7 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index aea41b8635..3412edb89d 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -8,15 +8,13 @@
#include "builtins-generated/bytecodes-builtins-list.h"
// include generated header
-#include "torque-generated/builtin-definitions-from-dsl.h"
+#include "torque-generated/builtin-definitions-tq.h"
namespace v8 {
namespace internal {
// CPP: Builtin in C++. Entered via BUILTIN_EXIT frame.
// Args: name
-// API: Builtin in C++ for API callbacks. Entered via EXIT frame.
-// Args: name
// TFJ: Builtin in Turbofan, with JS linkage (callable as Javascript function).
// Args: name, arguments count, explicit argument names...
// TFS: Builtin in Turbofan, with CodeStub linkage.
@@ -33,13 +31,12 @@ namespace internal {
// TODO(jgruber): Remove DummyDescriptor once all ASM builtins have been
// properly associated with their descriptor.
-#define BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
+#define BUILTIN_LIST_BASE(CPP, TFJ, TFC, TFS, TFH, ASM) \
/* GC write barrirer */ \
TFC(RecordWrite, RecordWrite) \
TFC(EphemeronKeyBarrier, EphemeronKeyBarrier) \
\
- /* Adaptors for CPP/API builtin */ \
- TFC(AdaptorWithExitFrame, CppBuiltinAdaptor) \
+ /* Adaptor for CPP builtin */ \
TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \
\
/* Calls */ \
@@ -167,9 +164,9 @@ namespace internal {
/* API callback handling */ \
ASM(CallApiCallback, ApiCallback) \
ASM(CallApiGetter, ApiGetter) \
- API(HandleApiCall) \
- API(HandleApiCallAsFunction) \
- API(HandleApiCallAsConstructor) \
+ CPP(HandleApiCall) \
+ CPP(HandleApiCallAsFunction) \
+ CPP(HandleApiCallAsConstructor) \
\
/* Adapters for Turbofan into runtime */ \
TFC(AllocateInYoungGeneration, Allocate) \
@@ -202,7 +199,6 @@ namespace internal {
TFC(ToNumberConvertBigInt, TypeConversion) \
TFC(ToNumeric, TypeConversion) \
TFC(NumberToString, TypeConversion) \
- TFC(ToString, TypeConversion) \
TFC(ToInteger, TypeConversion) \
TFC(ToInteger_TruncateMinusZero, TypeConversion) \
TFC(ToLength, TypeConversion) \
@@ -264,6 +260,9 @@ namespace internal {
/* Object property helpers */ \
TFS(HasProperty, kObject, kKey) \
TFS(DeleteProperty, kObject, kKey, kLanguageMode) \
+ /* ES #sec-copydataproperties */ \
+ TFS(CopyDataProperties, kTarget, kSource) \
+ TFS(SetDataProperties, kTarget, kSource) \
\
/* Abort */ \
TFC(Abort, Abort) \
@@ -396,8 +395,6 @@ namespace internal {
CPP(BigIntPrototypeValueOf) \
\
/* Boolean */ \
- /* ES #sec-boolean-constructor */ \
- CPP(BooleanConstructor) \
/* ES6 #sec-boolean.prototype.tostring */ \
TFJ(BooleanPrototypeToString, 0, kReceiver) \
/* ES6 #sec-boolean.prototype.valueof */ \
@@ -641,50 +638,14 @@ namespace internal {
/* Math */ \
/* ES6 #sec-math.abs */ \
TFJ(MathAbs, 1, kReceiver, kX) \
- /* ES6 #sec-math.acos */ \
- TFJ(MathAcos, 1, kReceiver, kX) \
- /* ES6 #sec-math.acosh */ \
- TFJ(MathAcosh, 1, kReceiver, kX) \
- /* ES6 #sec-math.asin */ \
- TFJ(MathAsin, 1, kReceiver, kX) \
- /* ES6 #sec-math.asinh */ \
- TFJ(MathAsinh, 1, kReceiver, kX) \
- /* ES6 #sec-math.atan */ \
- TFJ(MathAtan, 1, kReceiver, kX) \
- /* ES6 #sec-math.atanh */ \
- TFJ(MathAtanh, 1, kReceiver, kX) \
- /* ES6 #sec-math.atan2 */ \
- TFJ(MathAtan2, 2, kReceiver, kY, kX) \
- /* ES6 #sec-math.cbrt */ \
- TFJ(MathCbrt, 1, kReceiver, kX) \
/* ES6 #sec-math.ceil */ \
TFJ(MathCeil, 1, kReceiver, kX) \
- /* ES6 #sec-math.clz32 */ \
- TFJ(MathClz32, 1, kReceiver, kX) \
- /* ES6 #sec-math.cos */ \
- TFJ(MathCos, 1, kReceiver, kX) \
- /* ES6 #sec-math.cosh */ \
- TFJ(MathCosh, 1, kReceiver, kX) \
- /* ES6 #sec-math.exp */ \
- TFJ(MathExp, 1, kReceiver, kX) \
- /* ES6 #sec-math.expm1 */ \
- TFJ(MathExpm1, 1, kReceiver, kX) \
/* ES6 #sec-math.floor */ \
TFJ(MathFloor, 1, kReceiver, kX) \
- /* ES6 #sec-math.fround */ \
- TFJ(MathFround, 1, kReceiver, kX) \
/* ES6 #sec-math.hypot */ \
CPP(MathHypot) \
/* ES6 #sec-math.imul */ \
TFJ(MathImul, 2, kReceiver, kX, kY) \
- /* ES6 #sec-math.log */ \
- TFJ(MathLog, 1, kReceiver, kX) \
- /* ES6 #sec-math.log1p */ \
- TFJ(MathLog1p, 1, kReceiver, kX) \
- /* ES6 #sec-math.log10 */ \
- TFJ(MathLog10, 1, kReceiver, kX) \
- /* ES6 #sec-math.log2 */ \
- TFJ(MathLog2, 1, kReceiver, kX) \
/* ES6 #sec-math.max */ \
TFJ(MathMax, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-math.min */ \
@@ -695,18 +656,6 @@ namespace internal {
TFJ(MathRandom, 0, kReceiver) \
/* ES6 #sec-math.round */ \
TFJ(MathRound, 1, kReceiver, kX) \
- /* ES6 #sec-math.sign */ \
- TFJ(MathSign, 1, kReceiver, kX) \
- /* ES6 #sec-math.sin */ \
- TFJ(MathSin, 1, kReceiver, kX) \
- /* ES6 #sec-math.sinh */ \
- TFJ(MathSinh, 1, kReceiver, kX) \
- /* ES6 #sec-math.sqrt */ \
- TFJ(MathTan, 1, kReceiver, kX) \
- /* ES6 #sec-math.tan */ \
- TFJ(MathTanh, 1, kReceiver, kX) \
- /* ES6 #sec-math.tanh */ \
- TFJ(MathSqrt, 1, kReceiver, kX) \
/* ES6 #sec-math.trunc */ \
TFJ(MathTrunc, 1, kReceiver, kX) \
\
@@ -752,6 +701,7 @@ namespace internal {
TFC(GreaterThanOrEqual, Compare) \
TFC(Equal, Compare) \
TFC(SameValue, Compare) \
+ TFC(SameValueNumbersOnly, Compare) \
TFC(StrictEqual, Compare) \
TFS(BitwiseNot, kValue) \
TFS(Decrement, kValue) \
@@ -869,11 +819,6 @@ namespace internal {
/* V8 Extras: v8.resolvePromise(promise, resolution) */ \
TFJ(PromiseInternalResolve, 2, kReceiver, kPromise, kResolution) \
\
- /* Proxy */ \
- TFS(ProxyGetProperty, kProxy, kName, kReceiverValue, kOnNonExistent) \
- TFS(ProxyHasProperty, kProxy, kName) \
- TFS(ProxySetProperty, kProxy, kName, kValue, kReceiverValue) \
- \
/* Reflect */ \
ASM(ReflectApply, Dummy) \
ASM(ReflectConstruct, Dummy) \
@@ -995,14 +940,6 @@ namespace internal {
CPP(StringFromCodePoint) \
/* ES6 #sec-string.fromcharcode */ \
TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-string.prototype.charat */ \
- TFJ(StringPrototypeCharAt, 1, kReceiver, kPosition) \
- /* ES6 #sec-string.prototype.charcodeat */ \
- TFJ(StringPrototypeCharCodeAt, 1, kReceiver, kPosition) \
- /* ES6 #sec-string.prototype.codepointat */ \
- TFJ(StringPrototypeCodePointAt, 1, kReceiver, kPosition) \
- /* ES6 #sec-string.prototype.concat */ \
- TFJ(StringPrototypeConcat, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.includes */ \
TFJ(StringPrototypeIncludes, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
@@ -1025,32 +962,16 @@ namespace internal {
TFJ(StringPrototypeReplace, 2, kReceiver, kSearch, kReplace) \
/* ES6 #sec-string.prototype.search */ \
TFJ(StringPrototypeSearch, 1, kReceiver, kRegexp) \
- /* ES6 #sec-string.prototype.slice */ \
- TFJ(StringPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.substr */ \
TFJ(StringPrototypeSubstr, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-string.prototype.substring */ \
- TFJ(StringPrototypeSubstring, \
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-string.prototype.tostring */ \
- TFJ(StringPrototypeToString, 0, kReceiver) \
TFJ(StringPrototypeTrim, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFJ(StringPrototypeTrimEnd, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
TFJ(StringPrototypeTrimStart, \
SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 #sec-string.prototype.valueof */ \
- TFJ(StringPrototypeValueOf, 0, kReceiver) \
/* ES6 #sec-string.raw */ \
CPP(StringRaw) \
- /* ES6 #sec-string.prototype-@@iterator */ \
- TFJ(StringPrototypeIterator, 0, kReceiver) \
- \
- /* StringIterator */ \
- /* ES6 #sec-%stringiteratorprototype%.next */ \
- TFJ(StringIteratorPrototypeNext, 0, kReceiver) \
- TFS(StringToList, kSource) \
\
/* Symbol */ \
/* ES #sec-symbol-constructor */ \
@@ -1071,7 +992,7 @@ namespace internal {
/* TypedArray */ \
/* ES #sec-typedarray-constructors */ \
TFJ(TypedArrayBaseConstructor, 0, kReceiver) \
- TFJ(GenericConstructorLazyDeoptContinuation, 1, kReceiver, kResult) \
+ TFJ(GenericLazyDeoptContinuation, 1, kReceiver, kResult) \
TFJ(TypedArrayConstructor, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
@@ -1124,6 +1045,7 @@ namespace internal {
TFC(WasmStackOverflow, NoContext) \
TFC(WasmToNumber, TypeConversion) \
TFC(WasmThrow, WasmThrow) \
+ TFC(WasmRethrow, WasmThrow) \
TFS(ThrowWasmTrapUnreachable) \
TFS(ThrowWasmTrapMemOutOfBounds) \
TFS(ThrowWasmTrapUnalignedAccess) \
@@ -1396,10 +1318,10 @@ namespace internal {
CPP(StringPrototypeToUpperCase)
#endif // V8_INTL_SUPPORT
-#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM) \
- BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) \
- BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
+#define BUILTIN_LIST(CPP, TFJ, TFC, TFS, TFH, BCH, ASM) \
+ BUILTIN_LIST_BASE(CPP, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_FROM_TORQUE(CPP, TFJ, TFC, TFS, TFH, ASM) \
+ BUILTIN_LIST_INTL(CPP, TFJ, TFS) \
BUILTIN_LIST_BYTECODE_HANDLERS(BCH)
// The exception thrown in the following builtins are caught
@@ -1438,6 +1360,7 @@ namespace internal {
V(WasmStackOverflow) \
V(WasmToNumber) \
V(WasmThrow) \
+ V(WasmRethrow) \
V(DoubleToI) \
V(WasmI64ToBigInt) \
V(WasmBigIntToI64)
@@ -1448,25 +1371,25 @@ namespace internal {
#define IGNORE_BUILTIN(...)
-#define BUILTIN_LIST_C(V) \
- BUILTIN_LIST(V, V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+#define BUILTIN_LIST_C(V) \
+ BUILTIN_LIST(V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_A(V) \
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V)
+ IGNORE_BUILTIN, IGNORE_BUILTIN, V)
-#define BUILTIN_LIST_TFS(V) \
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, \
- V, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#define BUILTIN_LIST_TFS(V) \
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_TFJ(V) \
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ BUILTIN_LIST(IGNORE_BUILTIN, V, IGNORE_BUILTIN, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
#define BUILTIN_LIST_TFC(V) \
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, V, \
- IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, V, IGNORE_BUILTIN, \
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index 81c760a26d..c2eb44debe 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -6,8 +6,8 @@
#define V8_BUILTINS_BUILTINS_DESCRIPTORS_H_
#include "src/builtins/builtins.h"
+#include "src/codegen/interface-descriptors.h"
#include "src/compiler/code-assembler.h"
-#include "src/interface-descriptors.h"
#include "src/objects/shared-function-info.h"
namespace v8 {
@@ -31,19 +31,19 @@ namespace internal {
// Define interface descriptors for builtins with StubCall linkage.
#define DEFINE_TFC_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
- typedef InterfaceDescriptor##Descriptor Builtin_##Name##_InterfaceDescriptor;
+ using Builtin_##Name##_InterfaceDescriptor = InterfaceDescriptor##Descriptor;
#define DEFINE_TFS_INTERFACE_DESCRIPTOR(Name, ...) \
- typedef Name##Descriptor Builtin_##Name##_InterfaceDescriptor;
+ using Builtin_##Name##_InterfaceDescriptor = Name##Descriptor;
// Define interface descriptors for IC handlers/dispatchers.
#define DEFINE_TFH_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
- typedef InterfaceDescriptor##Descriptor Builtin_##Name##_InterfaceDescriptor;
+ using Builtin_##Name##_InterfaceDescriptor = InterfaceDescriptor##Descriptor;
#define DEFINE_ASM_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \
- typedef InterfaceDescriptor##Descriptor Builtin_##Name##_InterfaceDescriptor;
+ using Builtin_##Name##_InterfaceDescriptor = InterfaceDescriptor##Descriptor;
-BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DEFINE_TFJ_INTERFACE_DESCRIPTOR,
+BUILTIN_LIST(IGNORE_BUILTIN, DEFINE_TFJ_INTERFACE_DESCRIPTOR,
DEFINE_TFC_INTERFACE_DESCRIPTOR, DEFINE_TFS_INTERFACE_DESCRIPTOR,
DEFINE_TFH_INTERFACE_DESCRIPTOR, IGNORE_BUILTIN,
DEFINE_ASM_INTERFACE_DESCRIPTOR)
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index 48ffc3ba0c..e099baeb34 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/accessors.h"
+#include "src/builtins/accessors.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/isolate-inl.h"
-#include "src/messages.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/messages.h"
+#include "src/logging/counters.h"
#include "src/objects/api-callbacks.h"
-#include "src/property-descriptor.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-extras-utils.cc b/deps/v8/src/builtins/builtins-extras-utils.cc
index 31d5fd3069..10368c0484 100644
--- a/deps/v8/src/builtins/builtins-extras-utils.cc
+++ b/deps/v8/src/builtins/builtins-extras-utils.cc
@@ -4,10 +4,10 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/elements.h"
+#include "src/objects/elements.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -39,7 +39,7 @@ BUILTIN(ExtrasUtilsUncurryThis) {
DCHECK_EQ(2, args.length());
Handle<JSFunction> function = args.at<JSFunction>(1);
- Handle<NativeContext> native_context(isolate->context()->native_context(),
+ Handle<NativeContext> native_context(isolate->context().native_context(),
isolate);
Handle<Context> context = isolate->factory()->NewBuiltinContext(
native_context,
@@ -64,7 +64,7 @@ BUILTIN(ExtrasUtilsUncurryThis) {
BUILTIN(ExtrasUtilsCallReflectApply) {
HandleScope scope(isolate);
Handle<Context> context(isolate->context(), isolate);
- Handle<NativeContext> native_context(isolate->context()->native_context(),
+ Handle<NativeContext> native_context(isolate->context().native_context(),
isolate);
Handle<JSFunction> function(
JSFunction::cast(context->get(
diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc
index b8fb69256c..411d9a6930 100644
--- a/deps/v8/src/builtins/builtins-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-function-gen.cc
@@ -4,8 +4,8 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
-#include "src/frame-constants.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/execution/frame-constants.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/descriptor-array.h"
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 900ef9fd3b..f9a356f94b 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -4,14 +4,14 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/compiler.h"
-#include "src/conversions.h"
-#include "src/counters.h"
-#include "src/lookup.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/compiler.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
#include "src/objects/api-callbacks.h"
-#include "src/string-builder-inl.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/string-builder-inl.h"
namespace v8 {
namespace internal {
@@ -90,7 +90,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
Execution::Call(isolate, function, target_global_proxy, 0, nullptr),
Object);
function = Handle<JSFunction>::cast(result);
- function->shared()->set_name_should_print_as_anonymous(true);
+ function->shared().set_name_should_print_as_anonymous(true);
}
// If new.target is equal to target then the function created
@@ -149,7 +149,7 @@ BUILTIN(AsyncFunctionConstructor) {
// determined after the function is resumed.
Handle<JSFunction> func = Handle<JSFunction>::cast(maybe_func);
Handle<Script> script =
- handle(Script::cast(func->shared()->script()), isolate);
+ handle(Script::cast(func->shared().script()), isolate);
int position = Script::GetEvalPosition(isolate, script);
USE(position);
@@ -168,7 +168,7 @@ BUILTIN(AsyncGeneratorFunctionConstructor) {
// determined after the function is resumed.
Handle<JSFunction> func = Handle<JSFunction>::cast(maybe_func);
Handle<Script> script =
- handle(Script::cast(func->shared()->script()), isolate);
+ handle(Script::cast(func->shared().script()), isolate);
int position = Script::GetEvalPosition(isolate, script);
USE(position);
@@ -279,7 +279,7 @@ BUILTIN(FunctionPrototypeToString) {
// With the revised toString behavior, all callable objects are valid
// receivers for this method.
if (receiver->IsJSReceiver() &&
- JSReceiver::cast(*receiver)->map()->is_callable()) {
+ JSReceiver::cast(*receiver).map().is_callable()) {
return ReadOnlyRoots(isolate).function_native_code_string();
}
THROW_NEW_ERROR_RETURN_FAILURE(
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index 04b378db3a..7e75bbcee0 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -4,11 +4,11 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/execution/isolate.h"
#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-global-gen.cc b/deps/v8/src/builtins/builtins-global-gen.cc
index 5708fe67fb..fa21f81650 100644
--- a/deps/v8/src/builtins/builtins-global-gen.cc
+++ b/deps/v8/src/builtins/builtins-global-gen.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index 83820de135..53e974c452 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -4,11 +4,11 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/compiler.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
-#include "src/uri.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/compiler.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/uri.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index a722a6180b..d1b50f2cdc 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -4,10 +4,10 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/ic/ic.h"
#include "src/ic/keyed-store-generic.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -26,13 +26,13 @@ class HandlerBuiltinsAssembler : public CodeStubAssembler {
// compile-time types (int) by dispatching over the runtime type and
// emitting a specialized copy of the given case function for each elements
// kind. Use with caution. This produces a *lot* of code.
- typedef std::function<void(ElementsKind)> ElementsKindSwitchCase;
+ using ElementsKindSwitchCase = std::function<void(ElementsKind)>;
void DispatchByElementsKind(TNode<Int32T> elements_kind,
const ElementsKindSwitchCase& case_function);
// Dispatches over all possible combinations of {from,to} elements kinds.
- typedef std::function<void(ElementsKind, ElementsKind)>
- ElementsKindTransitionSwitchCase;
+ using ElementsKindTransitionSwitchCase =
+ std::function<void(ElementsKind, ElementsKind)>;
void DispatchForElementsKindTransition(
TNode<Int32T> from_kind, TNode<Int32T> to_kind,
const ElementsKindTransitionSwitchCase& case_function);
@@ -73,7 +73,7 @@ void Builtins::Generate_StoreIC_Uninitialized(
// TODO(mythria): Check if we can remove feedback vector and slot parameters in
// descriptor.
void HandlerBuiltinsAssembler::Generate_KeyedStoreIC_Slow() {
- typedef StoreWithVectorDescriptor Descriptor;
+ using Descriptor = StoreWithVectorDescriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
Node* name = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
@@ -106,7 +106,7 @@ TF_BUILTIN(KeyedStoreIC_Slow_NoTransitionHandleCOW, HandlerBuiltinsAssembler) {
}
void HandlerBuiltinsAssembler::Generate_StoreInArrayLiteralIC_Slow() {
- typedef StoreWithVectorDescriptor Descriptor;
+ using Descriptor = StoreWithVectorDescriptor;
Node* array = Parameter(Descriptor::kReceiver);
Node* index = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
@@ -204,7 +204,7 @@ void HandlerBuiltinsAssembler::DispatchForElementsKindTransition(
void HandlerBuiltinsAssembler::Generate_ElementsTransitionAndStore(
KeyedAccessStoreMode store_mode) {
- typedef StoreTransitionDescriptor Descriptor;
+ using Descriptor = StoreTransitionDescriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
@@ -243,17 +243,17 @@ TF_BUILTIN(ElementsTransitionAndStore_Standard, HandlerBuiltinsAssembler) {
TF_BUILTIN(ElementsTransitionAndStore_GrowNoTransitionHandleCOW,
HandlerBuiltinsAssembler) {
- Generate_ElementsTransitionAndStore(STORE_AND_GROW_NO_TRANSITION_HANDLE_COW);
+ Generate_ElementsTransitionAndStore(STORE_AND_GROW_HANDLE_COW);
}
TF_BUILTIN(ElementsTransitionAndStore_NoTransitionIgnoreOOB,
HandlerBuiltinsAssembler) {
- Generate_ElementsTransitionAndStore(STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS);
+ Generate_ElementsTransitionAndStore(STORE_IGNORE_OUT_OF_BOUNDS);
}
TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW,
HandlerBuiltinsAssembler) {
- Generate_ElementsTransitionAndStore(STORE_NO_TRANSITION_HANDLE_COW);
+ Generate_ElementsTransitionAndStore(STORE_HANDLE_COW);
}
// All elements kinds handled by EmitElementStore. Specifically, this includes
@@ -264,6 +264,7 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW,
V(PACKED_ELEMENTS) \
V(PACKED_SEALED_ELEMENTS) \
V(HOLEY_ELEMENTS) \
+ V(HOLEY_SEALED_ELEMENTS) \
V(PACKED_DOUBLE_ELEMENTS) \
V(HOLEY_DOUBLE_ELEMENTS) \
V(UINT8_ELEMENTS) \
@@ -302,11 +303,17 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind(
Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels,
arraysize(elements_kinds));
-#define ELEMENTS_KINDS_CASE(KIND) \
- BIND(&if_##KIND); \
- { \
- case_function(KIND); \
- Goto(&next); \
+#define ELEMENTS_KINDS_CASE(KIND) \
+ BIND(&if_##KIND); \
+ { \
+ if (!FLAG_enable_sealed_frozen_elements_kind && \
+ IsFrozenOrSealedElementsKindUnchecked(KIND)) { \
+ /* Disable support for frozen or sealed elements kinds. */ \
+ Unreachable(); \
+ } else { \
+ case_function(KIND); \
+ Goto(&next); \
+ } \
}
ELEMENTS_KINDS(ELEMENTS_KINDS_CASE)
#undef ELEMENTS_KINDS_CASE
@@ -321,7 +328,7 @@ void HandlerBuiltinsAssembler::DispatchByElementsKind(
void HandlerBuiltinsAssembler::Generate_StoreFastElementIC(
KeyedAccessStoreMode store_mode) {
- typedef StoreWithVectorDescriptor Descriptor;
+ using Descriptor = StoreWithVectorDescriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
@@ -352,15 +359,15 @@ TF_BUILTIN(StoreFastElementIC_Standard, HandlerBuiltinsAssembler) {
TF_BUILTIN(StoreFastElementIC_GrowNoTransitionHandleCOW,
HandlerBuiltinsAssembler) {
- Generate_StoreFastElementIC(STORE_AND_GROW_NO_TRANSITION_HANDLE_COW);
+ Generate_StoreFastElementIC(STORE_AND_GROW_HANDLE_COW);
}
TF_BUILTIN(StoreFastElementIC_NoTransitionIgnoreOOB, HandlerBuiltinsAssembler) {
- Generate_StoreFastElementIC(STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS);
+ Generate_StoreFastElementIC(STORE_IGNORE_OUT_OF_BOUNDS);
}
TF_BUILTIN(StoreFastElementIC_NoTransitionHandleCOW, HandlerBuiltinsAssembler) {
- Generate_StoreFastElementIC(STORE_NO_TRANSITION_HANDLE_COW);
+ Generate_StoreFastElementIC(STORE_HANDLE_COW);
}
TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
@@ -429,7 +436,7 @@ TF_BUILTIN(KeyedLoadIC_SloppyArguments, CodeStubAssembler) {
}
void HandlerBuiltinsAssembler::Generate_KeyedStoreIC_SloppyArguments() {
- typedef StoreWithVectorDescriptor Descriptor;
+ using Descriptor = StoreWithVectorDescriptor;
Node* receiver = Parameter(Descriptor::kReceiver);
Node* key = Parameter(Descriptor::kName);
Node* value = Parameter(Descriptor::kValue);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 1b4805ef19..baaadb722a 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
-#include "src/counters.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/macro-assembler.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/ic/accessor-assembler.h"
#include "src/ic/keyed-store-generic.h"
-#include "src/macro-assembler.h"
+#include "src/logging/counters.h"
#include "src/objects/debug-objects.h"
#include "src/objects/shared-function-info.h"
#include "src/runtime/runtime.h"
@@ -599,6 +599,113 @@ TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
}
}
+namespace {
+
+class SetOrCopyDataPropertiesAssembler : public CodeStubAssembler {
+ public:
+ explicit SetOrCopyDataPropertiesAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ TNode<Object> SetOrCopyDataProperties(TNode<Context> context,
+ TNode<JSReceiver> target,
+ TNode<Object> source, Label* if_runtime,
+ bool use_set = true) {
+ Label if_done(this), if_noelements(this),
+ if_sourcenotjsobject(this, Label::kDeferred);
+
+ // JSValue wrappers for numbers don't have any enumerable own properties,
+ // so we can immediately skip the whole operation if {source} is a Smi.
+ GotoIf(TaggedIsSmi(source), &if_done);
+
+ // Otherwise check if {source} is a proper JSObject, and if not, defer
+ // to testing for non-empty strings below.
+ TNode<Map> source_map = LoadMap(CAST(source));
+ TNode<Int32T> source_instance_type = LoadMapInstanceType(source_map);
+ GotoIfNot(IsJSObjectInstanceType(source_instance_type),
+ &if_sourcenotjsobject);
+
+ TNode<FixedArrayBase> source_elements = LoadElements(CAST(source));
+ GotoIf(IsEmptyFixedArray(source_elements), &if_noelements);
+ Branch(IsEmptySlowElementDictionary(source_elements), &if_noelements,
+ if_runtime);
+
+ BIND(&if_noelements);
+ {
+ // If the target is deprecated, the object will be updated on first store.
+ // If the source for that store equals the target, this will invalidate
+ // the cached representation of the source. Handle this case in runtime.
+ TNode<Map> target_map = LoadMap(target);
+ GotoIf(IsDeprecatedMap(target_map), if_runtime);
+
+ if (use_set) {
+ TNode<BoolT> target_is_simple_receiver = IsSimpleObjectMap(target_map);
+ ForEachEnumerableOwnProperty(
+ context, source_map, CAST(source), kEnumerationOrder,
+ [=](TNode<Name> key, TNode<Object> value) {
+ KeyedStoreGenericGenerator::SetProperty(
+ state(), context, target, target_is_simple_receiver, key,
+ value, LanguageMode::kStrict);
+ },
+ if_runtime);
+ } else {
+ ForEachEnumerableOwnProperty(
+ context, source_map, CAST(source), kEnumerationOrder,
+ [=](TNode<Name> key, TNode<Object> value) {
+ CallBuiltin(Builtins::kSetPropertyInLiteral, context, target, key,
+ value);
+ },
+ if_runtime);
+ }
+ Goto(&if_done);
+ }
+
+ BIND(&if_sourcenotjsobject);
+ {
+ // Handle other JSReceivers in the runtime.
+ GotoIf(IsJSReceiverInstanceType(source_instance_type), if_runtime);
+
+ // Non-empty strings are the only non-JSReceivers that need to be
+ // handled explicitly by Object.assign() and CopyDataProperties.
+ GotoIfNot(IsStringInstanceType(source_instance_type), &if_done);
+ TNode<IntPtrT> source_length = LoadStringLengthAsWord(CAST(source));
+ Branch(WordEqual(source_length, IntPtrConstant(0)), &if_done, if_runtime);
+ }
+
+ BIND(&if_done);
+ return UndefinedConstant();
+ }
+};
+
+} // namespace
+
+// ES #sec-copydataproperties
+TF_BUILTIN(CopyDataProperties, SetOrCopyDataPropertiesAssembler) {
+ TNode<JSObject> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<Object> source = CAST(Parameter(Descriptor::kSource));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ CSA_ASSERT(this, WordNotEqual(target, source));
+
+ Label if_runtime(this, Label::kDeferred);
+ Return(SetOrCopyDataProperties(context, target, source, &if_runtime, false));
+
+ BIND(&if_runtime);
+ TailCallRuntime(Runtime::kCopyDataProperties, context, target, source);
+}
+
+TF_BUILTIN(SetDataProperties, SetOrCopyDataPropertiesAssembler) {
+ TNode<JSReceiver> target = CAST(Parameter(Descriptor::kTarget));
+ TNode<Object> source = CAST(Parameter(Descriptor::kSource));
+ TNode<Context> context = CAST(Parameter(Descriptor::kContext));
+
+ Label if_runtime(this, Label::kDeferred);
+ Return(SetOrCopyDataProperties(context, target, source, &if_runtime, true));
+
+ BIND(&if_runtime);
+ TailCallRuntime(Runtime::kSetDataProperties, context, target, source);
+}
+
TF_BUILTIN(ForInEnumerate, CodeStubAssembler) {
Node* receiver = Parameter(Descriptor::kReceiver);
Node* context = Parameter(Descriptor::kContext);
@@ -646,19 +753,21 @@ TF_BUILTIN(SameValue, CodeStubAssembler) {
Return(FalseConstant());
}
-class InternalBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit InternalBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+TF_BUILTIN(SameValueNumbersOnly, CodeStubAssembler) {
+ Node* lhs = Parameter(Descriptor::kLeft);
+ Node* rhs = Parameter(Descriptor::kRight);
- template <typename Descriptor>
- void GenerateAdaptorWithExitFrameType(
- Builtins::ExitFrameType exit_frame_type);
-};
+ Label if_true(this), if_false(this);
+ BranchIfSameValue(lhs, rhs, &if_true, &if_false, SameValueMode::kNumbersOnly);
-template <typename Descriptor>
-void InternalBuiltinsAssembler::GenerateAdaptorWithExitFrameType(
- Builtins::ExitFrameType exit_frame_type) {
+ BIND(&if_true);
+ Return(TrueConstant());
+
+ BIND(&if_false);
+ Return(FalseConstant());
+}
+
+TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
TNode<WordT> c_function =
@@ -682,9 +791,9 @@ void InternalBuiltinsAssembler::GenerateAdaptorWithExitFrameType(
argc,
Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
- TNode<Code> code = HeapConstant(
- CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
- exit_frame_type == Builtins::BUILTIN_EXIT));
+ const bool builtin_exit_frame = true;
+ TNode<Code> code = HeapConstant(CodeFactory::CEntry(
+ isolate(), 1, kDontSaveFPRegs, kArgvOnStack, builtin_exit_frame));
// Unconditionally push argc, target and new target as extra stack arguments.
// They will be used by stack frame iterators when constructing stack trace.
@@ -697,14 +806,6 @@ void InternalBuiltinsAssembler::GenerateAdaptorWithExitFrameType(
new_target); // additional stack argument 4
}
-TF_BUILTIN(AdaptorWithExitFrame, InternalBuiltinsAssembler) {
- GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::EXIT);
-}
-
-TF_BUILTIN(AdaptorWithBuiltinExitFrame, InternalBuiltinsAssembler) {
- GenerateAdaptorWithExitFrameType<Descriptor>(Builtins::BUILTIN_EXIT);
-}
-
TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc
index 3a1eb078d0..75fa21b3f4 100644
--- a/deps/v8/src/builtins/builtins-internal.cc
+++ b/deps/v8/src/builtins/builtins-internal.cc
@@ -4,9 +4,9 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/interface-descriptors.h"
-#include "src/objects-inl.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-interpreter-gen.cc b/deps/v8/src/builtins/builtins-interpreter-gen.cc
index f0d5160330..d01fbe98f7 100644
--- a/deps/v8/src/builtins/builtins-interpreter-gen.cc
+++ b/deps/v8/src/builtins/builtins-interpreter-gen.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "src/builtins/builtins.h"
-#include "src/globals.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
index 9a95bb33c2..991790b490 100644
--- a/deps/v8/src/builtins/builtins-intl-gen.cc
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -8,11 +8,11 @@
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-utils-gen.h"
-#include "src/code-stub-assembler.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/objects/js-list-format-inl.h"
#include "src/objects/js-list-format.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index a5ef6a3c7c..882afa3c32 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -12,10 +12,9 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/date.h"
-#include "src/elements.h"
-#include "src/objects-inl.h"
+#include "src/date/date.h"
+#include "src/logging/counters.h"
+#include "src/objects/elements.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-break-iterator-inl.h"
@@ -28,8 +27,9 @@
#include "src/objects/js-relative-time-format-inl.h"
#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/js-segmenter-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/objects/smi.h"
-#include "src/property-descriptor.h"
#include "unicode/brkiter.h"
@@ -227,7 +227,7 @@ namespace {
Handle<JSFunction> CreateBoundFunction(Isolate* isolate,
Handle<JSObject> object,
Builtins::Name builtin_id, int len) {
- Handle<NativeContext> native_context(isolate->context()->native_context(),
+ Handle<NativeContext> native_context(isolate->context().native_context(),
isolate);
Handle<Context> context = isolate->factory()->NewBuiltinContext(
native_context,
@@ -482,13 +482,14 @@ BUILTIN(NumberFormatInternalFormatNumber) {
Object::ToNumber(isolate, value));
}
- icu::NumberFormat* icu_number_format =
- number_format->icu_number_format()->raw();
- CHECK_NOT_NULL(icu_number_format);
+ icu::number::LocalizedNumberFormatter* icu_localized_number_formatter =
+ number_format->icu_number_formatter().raw();
+ CHECK_NOT_NULL(icu_localized_number_formatter);
+ // Return FormatNumber(nf, x).
RETURN_RESULT_OR_FAILURE(
- isolate,
- JSNumberFormat::FormatNumeric(isolate, *icu_number_format, numeric_obj));
+ isolate, JSNumberFormat::FormatNumeric(
+ isolate, *icu_localized_number_formatter, numeric_obj));
}
BUILTIN(DateTimeFormatConstructor) {
@@ -969,7 +970,7 @@ BUILTIN(CollatorInternalCompare) {
Object::ToString(isolate, y));
// 7. Return CompareStrings(collator, X, Y).
- icu::Collator* icu_collator = collator->icu_collator()->raw();
+ icu::Collator* icu_collator = collator->icu_collator().raw();
CHECK_NOT_NULL(icu_collator);
return *Intl::CompareStrings(isolate, *icu_collator, string_x, string_y);
}
@@ -1071,7 +1072,7 @@ BUILTIN(SegmenterPrototypeSegment) {
RETURN_RESULT_OR_FAILURE(
isolate,
JSSegmentIterator::Create(
- isolate, segmenter_holder->icu_break_iterator()->raw()->clone(),
+ isolate, segmenter_holder->icu_break_iterator().raw()->clone(),
segmenter_holder->granularity(), text));
}
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.cc b/deps/v8/src/builtins/builtins-iterator-gen.cc
index ec8cfd1d78..0484501bfb 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-iterator-gen.cc
@@ -9,15 +9,13 @@
#include "src/builtins/builtins-string-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/heap/factory-inl.h"
-#include "torque-generated/builtins-base-from-dsl-gen.h"
namespace v8 {
namespace internal {
-typedef IteratorBuiltinsFromDSLAssembler::IteratorRecord IteratorRecord;
-
+using IteratorRecord = TorqueStructIteratorRecord;
using compiler::Node;
TNode<Object> IteratorBuiltinsAssembler::GetIteratorMethod(Node* context,
@@ -270,8 +268,10 @@ void IteratorBuiltinsAssembler::FastIterableToList(
TVariable<Object>* var_result, Label* slow) {
Label done(this), check_string(this), check_map(this), check_set(this);
- GotoIfNot(IsFastJSArrayWithNoCustomIteration(context, iterable),
- &check_string);
+ GotoIfNot(
+ Word32Or(IsFastJSArrayWithNoCustomIteration(context, iterable),
+ IsFastJSArrayForReadWithNoCustomIteration(context, iterable)),
+ &check_string);
// Fast path for fast JSArray.
*var_result =
diff --git a/deps/v8/src/builtins/builtins-iterator-gen.h b/deps/v8/src/builtins/builtins-iterator-gen.h
index 2c79e9095a..cf421dc5b7 100644
--- a/deps/v8/src/builtins/builtins-iterator-gen.h
+++ b/deps/v8/src/builtins/builtins-iterator-gen.h
@@ -5,20 +5,19 @@
#ifndef V8_BUILTINS_BUILTINS_ITERATOR_GEN_H_
#define V8_BUILTINS_BUILTINS_ITERATOR_GEN_H_
-#include "src/code-stub-assembler.h"
-#include "torque-generated/builtins-base-from-dsl-gen.h"
-#include "torque-generated/builtins-iterator-from-dsl-gen.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
using compiler::Node;
-class IteratorBuiltinsAssembler : public CodeStubAssembler,
- public IteratorBuiltinsFromDSLAssembler {
+class IteratorBuiltinsAssembler : public CodeStubAssembler {
public:
explicit IteratorBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state), IteratorBuiltinsFromDSLAssembler(state) {}
+ : CodeStubAssembler(state) {}
+
+ using IteratorRecord = TorqueStructIteratorRecord;
// Returns object[Symbol.iterator].
TNode<Object> GetIteratorMethod(Node* context, Node* object);
diff --git a/deps/v8/src/builtins/builtins-json.cc b/deps/v8/src/builtins/builtins-json.cc
index c3f6672b0f..896a45389c 100644
--- a/deps/v8/src/builtins/builtins-json.cc
+++ b/deps/v8/src/builtins/builtins-json.cc
@@ -4,10 +4,10 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/json-parser.h"
-#include "src/json-stringifier.h"
-#include "src/objects-inl.h"
+#include "src/json/json-parser.h"
+#include "src/json/json-stringifier.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -22,9 +22,9 @@ BUILTIN(JsonParse) {
Object::ToString(isolate, source));
string = String::Flatten(isolate, string);
RETURN_RESULT_OR_FAILURE(
- isolate, string->IsSeqOneByteString()
- ? JsonParser<true>::Parse(isolate, string, reviver)
- : JsonParser<false>::Parse(isolate, string, reviver));
+ isolate, String::IsOneByteRepresentationUnderneath(*string)
+ ? JsonParser<uint8_t>::Parse(isolate, string, reviver)
+ : JsonParser<uint16_t>::Parse(isolate, string, reviver));
}
// ES6 section 24.3.2 JSON.stringify.
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index c1715542fc..c73cbee1bc 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -6,8 +6,8 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/feedback-vector.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/shared-function-info.h"
namespace v8 {
@@ -44,7 +44,7 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
Label fallthrough(this);
TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
- feedback_vector, FeedbackVector::kOptimizedCodeOffset);
+ feedback_vector, FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.h b/deps/v8/src/builtins/builtins-lazy-gen.h
index 7f64aa096b..6036da4661 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.h
+++ b/deps/v8/src/builtins/builtins-lazy-gen.h
@@ -5,14 +5,14 @@
#ifndef V8_BUILTINS_BUILTINS_LAZY_GEN_H_
#define V8_BUILTINS_BUILTINS_LAZY_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
class LazyBuiltinsAssembler : public CodeStubAssembler {
public:
- typedef JSTrampolineDescriptor Descriptor;
+ using Descriptor = JSTrampolineDescriptor;
explicit LazyBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
diff --git a/deps/v8/src/builtins/builtins-math-gen.cc b/deps/v8/src/builtins/builtins-math-gen.cc
index 16d1e7d234..46195e74ed 100644
--- a/deps/v8/src/builtins/builtins-math-gen.cc
+++ b/deps/v8/src/builtins/builtins-math-gen.cc
@@ -6,8 +6,8 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -141,15 +141,6 @@ void MathBuiltinsAssembler::MathRoundingOperation(
}
}
-void MathBuiltinsAssembler::MathUnaryOperation(
- Node* context, Node* x,
- TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>)) {
- Node* x_value = TruncateTaggedToFloat64(context, x);
- Node* value = (this->*float64op)(x_value);
- Node* result = AllocateHeapNumberWithValue(value);
- Return(result);
-}
-
void MathBuiltinsAssembler::MathMaxMin(
Node* context, Node* argc,
TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>,
@@ -170,61 +161,6 @@ void MathBuiltinsAssembler::MathMaxMin(
arguments.PopAndReturn(ChangeFloat64ToTagged(result.value()));
}
-// ES6 #sec-math.acos
-TF_BUILTIN(MathAcos, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Acos);
-}
-
-// ES6 #sec-math.acosh
-TF_BUILTIN(MathAcosh, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Acosh);
-}
-
-// ES6 #sec-math.asin
-TF_BUILTIN(MathAsin, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Asin);
-}
-
-// ES6 #sec-math.asinh
-TF_BUILTIN(MathAsinh, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Asinh);
-}
-
-// ES6 #sec-math.atan
-TF_BUILTIN(MathAtan, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Atan);
-}
-
-// ES6 #sec-math.atanh
-TF_BUILTIN(MathAtanh, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Atanh);
-}
-
-// ES6 #sec-math.atan2
-TF_BUILTIN(MathAtan2, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* y = Parameter(Descriptor::kY);
- Node* x = Parameter(Descriptor::kX);
-
- Node* y_value = TruncateTaggedToFloat64(context, y);
- Node* x_value = TruncateTaggedToFloat64(context, x);
- Node* value = Float64Atan2(y_value, x_value);
- Node* result = AllocateHeapNumberWithValue(value);
- Return(result);
-}
-
// ES6 #sec-math.ceil
TF_BUILTIN(MathCeil, MathBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -232,99 +168,6 @@ TF_BUILTIN(MathCeil, MathBuiltinsAssembler) {
MathRoundingOperation(context, x, &CodeStubAssembler::Float64Ceil);
}
-// ES6 #sec-math.cbrt
-TF_BUILTIN(MathCbrt, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Cbrt);
-}
-
-// ES6 #sec-math.clz32
-TF_BUILTIN(MathClz32, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
-
- // Shared entry point for the clz32 operation.
- VARIABLE(var_clz32_x, MachineRepresentation::kWord32);
- Label do_clz32(this);
-
- // We might need to loop once for ToNumber conversion.
- VARIABLE(var_x, MachineRepresentation::kTagged);
- Label loop(this, &var_x);
- var_x.Bind(Parameter(Descriptor::kX));
- Goto(&loop);
- BIND(&loop);
- {
- // Load the current {x} value.
- Node* x = var_x.value();
-
- // Check if {x} is a Smi or a HeapObject.
- Label if_xissmi(this), if_xisnotsmi(this);
- Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
-
- BIND(&if_xissmi);
- {
- var_clz32_x.Bind(SmiToInt32(x));
- Goto(&do_clz32);
- }
-
- BIND(&if_xisnotsmi);
- {
- // Check if {x} is a HeapNumber.
- Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
- Branch(IsHeapNumber(x), &if_xisheapnumber, &if_xisnotheapnumber);
-
- BIND(&if_xisheapnumber);
- {
- var_clz32_x.Bind(TruncateHeapNumberValueToWord32(x));
- Goto(&do_clz32);
- }
-
- BIND(&if_xisnotheapnumber);
- {
- // Need to convert {x} to a Number first.
- var_x.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, x));
- Goto(&loop);
- }
- }
- }
-
- BIND(&do_clz32);
- {
- Node* x_value = var_clz32_x.value();
- Node* value = Word32Clz(x_value);
- Node* result = ChangeInt32ToTagged(value);
- Return(result);
- }
-}
-
-// ES6 #sec-math.cos
-TF_BUILTIN(MathCos, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Cos);
-}
-
-// ES6 #sec-math.cosh
-TF_BUILTIN(MathCosh, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Cosh);
-}
-
-// ES6 #sec-math.exp
-TF_BUILTIN(MathExp, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Exp);
-}
-
-// ES6 #sec-math.expm1
-TF_BUILTIN(MathExpm1, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Expm1);
-}
-
// ES6 #sec-math.floor
TF_BUILTIN(MathFloor, MathBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -332,17 +175,6 @@ TF_BUILTIN(MathFloor, MathBuiltinsAssembler) {
MathRoundingOperation(context, x, &CodeStubAssembler::Float64Floor);
}
-// ES6 #sec-math.fround
-TF_BUILTIN(MathFround, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- Node* x_value = TruncateTaggedToFloat64(context, x);
- Node* value32 = TruncateFloat64ToFloat32(x_value);
- Node* value = ChangeFloat32ToFloat64(value32);
- Node* result = AllocateHeapNumberWithValue(value);
- Return(result);
-}
-
// ES6 #sec-math.imul
TF_BUILTIN(MathImul, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
@@ -355,34 +187,6 @@ TF_BUILTIN(MathImul, CodeStubAssembler) {
Return(result);
}
-// ES6 #sec-math.log
-TF_BUILTIN(MathLog, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Log);
-}
-
-// ES6 #sec-math.log1p
-TF_BUILTIN(MathLog1p, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Log1p);
-}
-
-// ES6 #sec-math.log10
-TF_BUILTIN(MathLog10, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Log10);
-}
-
-// ES6 #sec-math.log2
-TF_BUILTIN(MathLog2, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Log2);
-}
-
CodeStubAssembler::Node* MathBuiltinsAssembler::MathPow(Node* context,
Node* base,
Node* exponent) {
@@ -446,61 +250,6 @@ TF_BUILTIN(MathRound, MathBuiltinsAssembler) {
MathRoundingOperation(context, x, &CodeStubAssembler::Float64Round);
}
-// ES6 #sec-math.sign
-TF_BUILTIN(MathSign, CodeStubAssembler) {
- // Convert the {x} value to a Number.
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- Node* x_value = TruncateTaggedToFloat64(context, x);
-
- // Return -1 if {x} is negative, 1 if {x} is positive, or {x} itself.
- Label if_xisnegative(this), if_xispositive(this);
- GotoIf(Float64LessThan(x_value, Float64Constant(0.0)), &if_xisnegative);
- GotoIf(Float64LessThan(Float64Constant(0.0), x_value), &if_xispositive);
- Return(ChangeFloat64ToTagged(x_value));
-
- BIND(&if_xisnegative);
- Return(SmiConstant(-1));
-
- BIND(&if_xispositive);
- Return(SmiConstant(1));
-}
-
-// ES6 #sec-math.sin
-TF_BUILTIN(MathSin, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Sin);
-}
-
-// ES6 #sec-math.sinh
-TF_BUILTIN(MathSinh, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Sinh);
-}
-
-// ES6 #sec-math.sqrt
-TF_BUILTIN(MathSqrt, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Sqrt);
-}
-
-// ES6 #sec-math.tan
-TF_BUILTIN(MathTan, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Tan);
-}
-
-// ES6 #sec-math.tanh
-TF_BUILTIN(MathTanh, MathBuiltinsAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* x = Parameter(Descriptor::kX);
- MathUnaryOperation(context, x, &CodeStubAssembler::Float64Tanh);
-}
-
// ES6 #sec-math.trunc
TF_BUILTIN(MathTrunc, MathBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
diff --git a/deps/v8/src/builtins/builtins-math-gen.h b/deps/v8/src/builtins/builtins-math-gen.h
index 7b9079b6e9..4bb76d9692 100644
--- a/deps/v8/src/builtins/builtins-math-gen.h
+++ b/deps/v8/src/builtins/builtins-math-gen.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_MATH_GEN_H_
#define V8_BUILTINS_BUILTINS_MATH_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -21,9 +21,6 @@ class MathBuiltinsAssembler : public CodeStubAssembler {
void MathRoundingOperation(
Node* context, Node* x,
TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>));
- void MathUnaryOperation(
- Node* context, Node* x,
- TNode<Float64T> (CodeStubAssembler::*float64op)(SloppyTNode<Float64T>));
void MathMaxMin(Node* context, Node* argc,
TNode<Float64T> (CodeStubAssembler::*float64op)(
SloppyTNode<Float64T>, SloppyTNode<Float64T>),
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
index ae2bf03d69..6d3274a4a5 100644
--- a/deps/v8/src/builtins/builtins-math.cc
+++ b/deps/v8/src/builtins/builtins-math.cc
@@ -4,8 +4,8 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
index 08c5a96193..4987787c35 100644
--- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/builtins/builtins-utils-gen.h"
-#include "src/code-stub-assembler.h"
-#include "src/microtask-queue.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/execution/microtask-queue.h"
#include "src/objects/js-weak-refs.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/promise.h"
diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc
index 582f6242ad..5b3af79f00 100644
--- a/deps/v8/src/builtins/builtins-number-gen.cc
+++ b/deps/v8/src/builtins/builtins-number-gen.cc
@@ -5,7 +5,7 @@
#include "src/builtins/builtins-math-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/ic/binary-op-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index d1c13307e8..929e686604 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -4,10 +4,10 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/conversions.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index da265356fd..314331d498 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -6,14 +6,14 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/heap/factory-inl.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/keyed-store-generic.h"
#include "src/objects/js-generator.h"
#include "src/objects/property-descriptor-object.h"
+#include "src/objects/property-details.h"
#include "src/objects/shared-function-info.h"
-#include "src/property-details.h"
namespace v8 {
namespace internal {
@@ -21,7 +21,7 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 19.1 Object Objects
-typedef compiler::Node Node;
+using Node = compiler::Node;
template <class T>
using TNode = CodeStubAssembler::TNode<T>;
@@ -48,9 +48,6 @@ class ObjectBuiltinsAssembler : public CodeStubAssembler {
Node* IsSpecialReceiverMap(SloppyTNode<Map> map);
TNode<Word32T> IsStringWrapperElementsKind(TNode<Map> map);
-
- void ObjectAssignFast(TNode<Context> context, TNode<JSReceiver> to,
- TNode<Object> from, Label* slow);
};
class ObjectEntriesValuesBuiltinsAssembler : public ObjectBuiltinsAssembler {
@@ -190,8 +187,8 @@ TNode<BoolT> ObjectEntriesValuesBuiltinsAssembler::IsPropertyKindData(
TNode<Uint32T> ObjectEntriesValuesBuiltinsAssembler::HasHiddenPrototype(
TNode<Map> map) {
- TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
- return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field3);
+ TNode<Uint32T> bit_field2 = Unsigned(LoadMapBitField2(map));
+ return DecodeWord32<Map::HasHiddenPrototypeBit>(bit_field2);
}
void ObjectEntriesValuesBuiltinsAssembler::GetOwnValuesOrEntries(
@@ -499,18 +496,8 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
// second argument.
// 4. For each element nextSource of sources, in ascending index order,
args.ForEach(
- [=](Node* next_source_) {
- TNode<Object> next_source = CAST(next_source_);
- Label slow(this), cont(this);
- ObjectAssignFast(context, to, next_source, &slow);
- Goto(&cont);
-
- BIND(&slow);
- {
- CallRuntime(Runtime::kSetDataProperties, context, to, next_source);
- Goto(&cont);
- }
- BIND(&cont);
+ [=](Node* next_source) {
+ CallBuiltin(Builtins::kSetDataProperties, context, to, next_source);
},
IntPtrConstant(1));
Goto(&done);
@@ -520,53 +507,6 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
args.PopAndReturn(to);
}
-// This function mimics what FastAssign() function does for C++ implementation.
-void ObjectBuiltinsAssembler::ObjectAssignFast(TNode<Context> context,
- TNode<JSReceiver> to,
- TNode<Object> from,
- Label* slow) {
- Label done(this);
-
- // Non-empty strings are the only non-JSReceivers that need to be handled
- // explicitly by Object.assign.
- GotoIf(TaggedIsSmi(from), &done);
- TNode<Map> from_map = LoadMap(CAST(from));
- TNode<Int32T> from_instance_type = LoadMapInstanceType(from_map);
- {
- Label cont(this);
- GotoIf(IsJSReceiverInstanceType(from_instance_type), &cont);
- GotoIfNot(IsStringInstanceType(from_instance_type), &done);
- {
- Branch(
- Word32Equal(LoadStringLengthAsWord32(CAST(from)), Int32Constant(0)),
- &done, slow);
- }
- BIND(&cont);
- }
-
- // If the target is deprecated, the object will be updated on first store. If
- // the source for that store equals the target, this will invalidate the
- // cached representation of the source. Handle this case in runtime.
- TNode<Map> to_map = LoadMap(to);
- GotoIf(IsDeprecatedMap(to_map), slow);
- TNode<BoolT> to_is_simple_receiver = IsSimpleObjectMap(to_map);
-
- GotoIfNot(IsJSObjectInstanceType(from_instance_type), slow);
- GotoIfNot(IsEmptyFixedArray(LoadElements(CAST(from))), slow);
-
- ForEachEnumerableOwnProperty(
- context, from_map, CAST(from), kEnumerationOrder,
- [=](TNode<Name> key, TNode<Object> value) {
- KeyedStoreGenericGenerator::SetProperty(state(), context, to,
- to_is_simple_receiver, key,
- value, LanguageMode::kStrict);
- },
- slow);
-
- Goto(&done);
- BIND(&done);
-}
-
// ES #sec-object.keys
TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
Node* object = Parameter(Descriptor::kObject);
diff --git a/deps/v8/src/builtins/builtins-object-gen.h b/deps/v8/src/builtins/builtins-object-gen.h
index 9489f0d1e0..fa0024cde2 100644
--- a/deps/v8/src/builtins/builtins-object-gen.h
+++ b/deps/v8/src/builtins/builtins-object-gen.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_OBJECT_GEN_H_
#define V8_BUILTINS_BUILTINS_OBJECT_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 6aa20e07a4..59e4373f98 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -4,14 +4,14 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/code-factory.h"
+#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/keys.h"
-#include "src/lookup.h"
-#include "src/message-template.h"
-#include "src/objects-inl.h"
-#include "src/property-descriptor.h"
+#include "src/logging/counters.h"
+#include "src/objects/keys.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-promise-gen.cc b/deps/v8/src/builtins/builtins-promise-gen.cc
index 513a595822..ad70fb1dd1 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.cc
+++ b/deps/v8/src/builtins/builtins-promise-gen.cc
@@ -9,19 +9,19 @@
#include "src/builtins/builtins-promise.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/objects/js-promise.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
namespace v8 {
namespace internal {
-typedef compiler::Node Node;
+using Node = compiler::Node;
template <class T>
using TNode = CodeStubAssembler::TNode<T>;
-using IteratorRecord = IteratorBuiltinsAssembler::IteratorRecord;
+using IteratorRecord = TorqueStructIteratorRecord;
Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
Node* const native_context = LoadNativeContext(context);
@@ -516,7 +516,8 @@ Node* PromiseBuiltinsAssembler::AllocatePromiseReaction(
Node* PromiseBuiltinsAssembler::AllocatePromiseReactionJobTask(
Node* map, Node* context, Node* argument, Node* handler,
Node* promise_or_capability) {
- Node* const microtask = Allocate(PromiseReactionJobTask::kSize);
+ Node* const microtask =
+ Allocate(PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks);
StoreMapNoWriteBarrier(microtask, map);
StoreObjectFieldNoWriteBarrier(
microtask, PromiseReactionJobTask::kArgumentOffset, argument);
@@ -640,8 +641,10 @@ Node* PromiseBuiltinsAssembler::TriggerPromiseReactions(
// Morph {current} from a PromiseReaction into a PromiseReactionJobTask
// and schedule that on the microtask queue. We try to minimize the number
// of stores here to avoid screwing up the store buffer.
- STATIC_ASSERT(static_cast<int>(PromiseReaction::kSize) ==
- static_cast<int>(PromiseReactionJobTask::kSize));
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kSize) ==
+ static_cast<int>(
+ PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks));
if (type == PromiseReaction::kFulfill) {
StoreMapNoWriteBarrier(current,
RootIndex::kPromiseFulfillReactionJobTaskMap);
@@ -722,20 +725,18 @@ Node* PromiseBuiltinsAssembler::InvokeThen(Node* native_context, Node* receiver,
return var_result.value();
}
-Node* PromiseBuiltinsAssembler::InvokeResolve(Node* native_context,
- Node* constructor, Node* value,
- Label* if_exception,
- Variable* var_exception) {
+Node* PromiseBuiltinsAssembler::CallResolve(Node* native_context,
+ Node* constructor, Node* resolve,
+ Node* value, Label* if_exception,
+ Variable* var_exception) {
CSA_ASSERT(this, IsNativeContext(native_context));
-
+ CSA_ASSERT(this, IsConstructor(constructor));
VARIABLE(var_result, MachineRepresentation::kTagged);
Label if_fast(this), if_slow(this, Label::kDeferred), done(this, &var_result);
- // We can skip the "resolve" lookup on {constructor} if it's the
- // Promise constructor and the Promise.resolve protector is intact,
- // as that guards the lookup path for the "resolve" property on the
- // Promise constructor.
- BranchIfPromiseResolveLookupChainIntact(native_context, constructor, &if_fast,
- &if_slow);
+
+ // Undefined can never be a valid value for the resolve function,
+ // instead it is used as a special marker for the fast path.
+ Branch(IsUndefined(resolve), &if_fast, &if_slow);
BIND(&if_fast);
{
@@ -749,9 +750,7 @@ Node* PromiseBuiltinsAssembler::InvokeResolve(Node* native_context,
BIND(&if_slow);
{
- Node* const resolve =
- GetProperty(native_context, constructor, factory()->resolve_string());
- GotoIfException(resolve, if_exception, var_exception);
+ CSA_ASSERT(this, IsCallable(resolve));
Node* const result = CallJS(
CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
@@ -2047,8 +2046,32 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
TVARIABLE(Smi, var_index, SmiConstant(1));
Label loop(this, &var_index), done_loop(this),
too_many_elements(this, Label::kDeferred),
- close_iterator(this, Label::kDeferred);
+ close_iterator(this, Label::kDeferred), if_slow(this, Label::kDeferred);
+
+ // We can skip the "resolve" lookup on {constructor} if it's the
+ // Promise constructor and the Promise.resolve protector is intact,
+ // as that guards the lookup path for the "resolve" property on the
+ // Promise constructor.
+ TVARIABLE(Object, var_promise_resolve_function, UndefinedConstant());
+ GotoIfNotPromiseResolveLookupChainIntact(native_context, constructor,
+ &if_slow);
Goto(&loop);
+
+ BIND(&if_slow);
+ {
+ // 5. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
+ TNode<Object> resolve =
+ GetProperty(native_context, constructor, factory()->resolve_string());
+ GotoIfException(resolve, if_exception, var_exception);
+
+ // 6. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError*
+ // exception.
+ ThrowIfNotCallable(CAST(context), resolve, "resolve");
+
+ var_promise_resolve_function = resolve;
+ Goto(&loop);
+ }
+
BIND(&loop);
{
// Let next be IteratorStep(iteratorRecord.[[Iterator]]).
@@ -2120,8 +2143,7 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
// the PromiseReaction (aka we can pass undefined to PerformPromiseThen),
// since this is only necessary for DevTools and PromiseHooks.
Label if_fast(this), if_slow(this);
- GotoIfNotPromiseResolveLookupChainIntact(native_context, constructor,
- &if_slow);
+ GotoIfNot(IsUndefined(var_promise_resolve_function.value()), &if_slow);
GotoIf(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
&if_slow);
GotoIf(IsPromiseSpeciesProtectorCellInvalid(), &if_slow);
@@ -2142,10 +2164,11 @@ Node* PromiseBuiltinsAssembler::PerformPromiseAll(
BIND(&if_slow);
{
- // Let nextPromise be ? Invoke(constructor, "resolve", « nextValue »).
- Node* const next_promise =
- InvokeResolve(native_context, constructor, next_value,
- &close_iterator, var_exception);
+ // Let nextPromise be ? Call(constructor, _promiseResolve_, « nextValue
+ // »).
+ Node* const next_promise = CallResolve(
+ native_context, constructor, var_promise_resolve_function.value(),
+ next_value, &close_iterator, var_exception);
// Perform ? Invoke(nextPromise, "then", « resolveElement,
// resultCapability.[[Reject]] »).
@@ -2587,11 +2610,34 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
// Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability).
{
- Label loop(this), break_loop(this);
+ // We can skip the "resolve" lookup on {constructor} if it's the
+ // Promise constructor and the Promise.resolve protector is intact,
+ // as that guards the lookup path for the "resolve" property on the
+ // Promise constructor.
+ Label loop(this), break_loop(this), if_slow(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ TVARIABLE(Object, var_promise_resolve_function, UndefinedConstant());
+ GotoIfNotPromiseResolveLookupChainIntact(native_context, receiver,
+ &if_slow);
Goto(&loop);
+
+ BIND(&if_slow);
+ {
+ // 3. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`).
+ TNode<Object> resolve =
+ GetProperty(native_context, receiver, factory()->resolve_string());
+ GotoIfException(resolve, &reject_promise, &var_exception);
+
+ // 4. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError*
+ // exception.
+ ThrowIfNotCallable(CAST(context), resolve, "resolve");
+
+ var_promise_resolve_function = resolve;
+ Goto(&loop);
+ }
+
BIND(&loop);
{
- Node* const native_context = LoadNativeContext(context);
Node* const fast_iterator_result_map = LoadContextElement(
native_context, Context::ITERATOR_RESULT_MAP_INDEX);
@@ -2610,10 +2656,11 @@ TF_BUILTIN(PromiseRace, PromiseBuiltinsAssembler) {
iter_assembler.IteratorValue(context, next, fast_iterator_result_map,
&reject_promise, &var_exception);
- // Let nextPromise be ? Invoke(constructor, "resolve", « nextValue »).
- Node* const next_promise =
- InvokeResolve(native_context, receiver, next_value, &close_iterator,
- &var_exception);
+ // Let nextPromise be ? Call(constructor, _promiseResolve_, « nextValue
+ // »).
+ Node* const next_promise = CallResolve(
+ native_context, receiver, var_promise_resolve_function.value(),
+ next_value, &close_iterator, &var_exception);
// Perform ? Invoke(nextPromise, "then", « resolveElement,
// resultCapability.[[Reject]] »).
diff --git a/deps/v8/src/builtins/builtins-promise-gen.h b/deps/v8/src/builtins/builtins-promise-gen.h
index b0555b2594..71443ca920 100644
--- a/deps/v8/src/builtins/builtins-promise-gen.h
+++ b/deps/v8/src/builtins/builtins-promise-gen.h
@@ -5,15 +5,13 @@
#ifndef V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
#define V8_BUILTINS_BUILTINS_PROMISE_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/objects/promise.h"
-#include "torque-generated/builtins-base-from-dsl-gen.h"
-#include "torque-generated/builtins-iterator-from-dsl-gen.h"
namespace v8 {
namespace internal {
-typedef compiler::CodeAssemblerState CodeAssemblerState;
+using CodeAssemblerState = compiler::CodeAssemblerState;
class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
public:
@@ -115,8 +113,10 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* receiver_map, Label* if_fast,
Label* if_slow);
- Node* InvokeResolve(Node* native_context, Node* constructor, Node* value,
- Label* if_exception, Variable* var_exception);
+ // If resolve is Undefined, we use the builtin %PromiseResolve%
+ // intrinsic, otherwise we use the given resolve function.
+ Node* CallResolve(Node* native_context, Node* constructor, Node* resolve,
+ Node* value, Label* if_exception, Variable* var_exception);
template <typename... TArgs>
Node* InvokeThen(Node* native_context, Node* receiver, TArgs... args);
@@ -131,14 +131,14 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
Node* CreateThrowerFunction(Node* reason, Node* native_context);
- typedef std::function<TNode<Object>(TNode<Context> context, TNode<Smi> index,
- TNode<NativeContext> native_context,
- TNode<PromiseCapability> capability)>
- PromiseAllResolvingElementFunction;
+ using PromiseAllResolvingElementFunction =
+ std::function<TNode<Object>(TNode<Context> context, TNode<Smi> index,
+ TNode<NativeContext> native_context,
+ TNode<PromiseCapability> capability)>;
Node* PerformPromiseAll(
Node* context, Node* constructor, Node* capability,
- const IteratorBuiltinsFromDSLAssembler::IteratorRecord& record,
+ const TorqueStructIteratorRecord& record,
const PromiseAllResolvingElementFunction& create_resolve_element_function,
const PromiseAllResolvingElementFunction& create_reject_element_function,
Label* if_exception, Variable* var_exception);
@@ -170,10 +170,10 @@ class V8_EXPORT_PRIVATE PromiseBuiltinsAssembler : public CodeStubAssembler {
const PromiseAllResolvingElementFunction& create_resolve_element_function,
const PromiseAllResolvingElementFunction& create_reject_element_function);
- typedef std::function<TNode<Object>(TNode<Context> context,
- TNode<NativeContext> native_context,
- TNode<Object> value)>
- CreatePromiseAllResolveElementFunctionValue;
+ using CreatePromiseAllResolveElementFunctionValue =
+ std::function<TNode<Object>(TNode<Context> context,
+ TNode<NativeContext> native_context,
+ TNode<Object> value)>;
void Generate_PromiseAllResolveElementClosure(
TNode<Context> context, TNode<Object> value, TNode<JSFunction> function,
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
index f742252050..5eca1eb9c0 100644
--- a/deps/v8/src/builtins/builtins-promise.cc
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -6,9 +6,9 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-promise.h b/deps/v8/src/builtins/builtins-promise.h
index 66545feafe..a97ab7ad1d 100644
--- a/deps/v8/src/builtins/builtins-promise.h
+++ b/deps/v8/src/builtins/builtins-promise.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_PROMISE_H_
#define V8_BUILTINS_BUILTINS_PROMISE_H_
-#include "src/contexts.h"
+#include "src/objects/contexts.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 825e229451..a1a2f6308f 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -7,8 +7,8 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -262,246 +262,22 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
{ ThrowTypeError(context, MessageTemplate::kProxyRevoked, "construct"); }
}
-TF_BUILTIN(ProxyHasProperty, ProxiesCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* proxy = Parameter(Descriptor::kProxy);
- Node* name = Parameter(Descriptor::kName);
-
- CSA_ASSERT(this, IsJSProxy(proxy));
-
- PerformStackCheck(CAST(context));
-
- // 1. Assert: IsPropertyKey(P) is true.
- CSA_ASSERT(this, IsName(name));
- CSA_ASSERT(this, Word32Equal(IsPrivateSymbol(name), Int32Constant(0)));
-
- Label throw_proxy_handler_revoked(this, Label::kDeferred),
- trap_undefined(this),
- if_try_get_own_property_bailout(this, Label::kDeferred),
- trap_not_callable(this, Label::kDeferred), return_true(this),
- return_false(this), check_target_desc(this);
-
- // 2. Let handler be O.[[ProxyHandler]].
- Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
-
- // 3. If handler is null, throw a TypeError exception.
- // 4. Assert: Type(handler) is Object.
- GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
-
- // 5. Let target be O.[[ProxyTarget]].
- Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
-
- // 6. Let trap be ? GetMethod(handler, "has").
- // 7. If trap is undefined, then (see 7.a below).
- Handle<Name> trap_name = factory()->has_string();
- Node* trap = GetMethod(context, handler, trap_name, &trap_undefined);
-
- GotoIf(TaggedIsSmi(trap), &trap_not_callable);
- GotoIfNot(IsCallable(trap), &trap_not_callable);
-
- // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, « target, P
- // »)).
- BranchIfToBooleanIsTrue(CallJS(CodeFactory::Call(isolate()), context, trap,
- handler, target, name),
- &return_true, &check_target_desc);
-
- BIND(&check_target_desc);
- {
- // 9. If booleanTrapResult is false, then (see 9.a. in CheckHasTrapResult).
- CheckHasTrapResult(context, target, proxy, name, &return_false,
- &if_try_get_own_property_bailout);
- }
-
- BIND(&if_try_get_own_property_bailout);
- {
- CallRuntime(Runtime::kCheckProxyHasTrap, context, name, target);
- Return(FalseConstant());
- }
-
- BIND(&trap_undefined);
- {
- // 7.a. Return ? target.[[HasProperty]](P).
- TailCallBuiltin(Builtins::kHasProperty, context, target, name);
- }
-
- BIND(&return_false);
- Return(FalseConstant());
-
- BIND(&return_true);
- Return(TrueConstant());
-
- BIND(&throw_proxy_handler_revoked);
- ThrowTypeError(context, MessageTemplate::kProxyRevoked, "has");
-
- BIND(&trap_not_callable);
- ThrowTypeError(context, MessageTemplate::kPropertyNotFunction, trap,
- StringConstant("has"), proxy);
-}
-
-TF_BUILTIN(ProxyGetProperty, ProxiesCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* proxy = Parameter(Descriptor::kProxy);
- Node* name = Parameter(Descriptor::kName);
- Node* receiver = Parameter(Descriptor::kReceiverValue);
- Node* on_non_existent = Parameter(Descriptor::kOnNonExistent);
-
- CSA_ASSERT(this, IsJSProxy(proxy));
-
- // 1. Assert: IsPropertyKey(P) is true.
- CSA_ASSERT(this, TaggedIsNotSmi(name));
- CSA_ASSERT(this, IsName(name));
- CSA_ASSERT(this, Word32Equal(IsPrivateSymbol(name), Int32Constant(0)));
-
- Label throw_proxy_handler_revoked(this, Label::kDeferred),
- trap_undefined(this);
-
- // 2. Let handler be O.[[ProxyHandler]].
- Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
-
- // 3. If handler is null, throw a TypeError exception.
- GotoIf(IsNull(handler), &throw_proxy_handler_revoked);
-
- // 4. Assert: Type(handler) is Object.
- CSA_ASSERT(this, IsJSReceiver(handler));
-
- // 5. Let target be O.[[ProxyTarget]].
- Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
-
- // 6. Let trap be ? GetMethod(handler, "get").
- // 7. If trap is undefined, then (see 7.a below).
- Handle<Name> trap_name = factory()->get_string();
- Node* trap = GetMethod(context, handler, trap_name, &trap_undefined);
-
- // 8. Let trapResult be ? Call(trap, handler, « target, P, Receiver »).
- Node* trap_result = CallJS(
- CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
- context, trap, handler, target, name, receiver);
-
- // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
- Label return_result(this);
- CheckGetSetTrapResult(context, target, proxy, name, trap_result,
- &return_result, JSProxy::kGet);
-
- BIND(&return_result);
- {
- // 11. Return trapResult.
- Return(trap_result);
- }
-
- BIND(&trap_undefined);
- {
- // 7.a. Return ? target.[[Get]](P, Receiver).
- // TODO(mslekova): Introduce GetPropertyWithReceiver stub
- Return(CallRuntime(Runtime::kGetPropertyWithReceiver, context, target, name,
- receiver, on_non_existent));
- }
-
- BIND(&throw_proxy_handler_revoked);
- ThrowTypeError(context, MessageTemplate::kProxyRevoked, "get");
-}
-
-TF_BUILTIN(ProxySetProperty, ProxiesCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* proxy = Parameter(Descriptor::kProxy);
- Node* name = Parameter(Descriptor::kName);
- Node* value = Parameter(Descriptor::kValue);
- Node* receiver = Parameter(Descriptor::kReceiverValue);
-
- CSA_ASSERT(this, IsJSProxy(proxy));
-
- // 1. Assert: IsPropertyKey(P) is true.
- CSA_ASSERT(this, TaggedIsNotSmi(name));
- CSA_ASSERT(this, IsName(name));
-
- Label throw_proxy_handler_revoked(this, Label::kDeferred),
- trap_undefined(this), failure(this, Label::kDeferred),
- continue_checks(this), success(this),
- private_symbol(this, Label::kDeferred);
-
- GotoIf(IsPrivateSymbol(name), &private_symbol);
-
- // 2. Let handler be O.[[ProxyHandler]].
- Node* handler = LoadObjectField(proxy, JSProxy::kHandlerOffset);
-
- // 3. If handler is null, throw a TypeError exception.
- GotoIfNot(IsJSReceiver(handler), &throw_proxy_handler_revoked);
-
- // 4. Assert: Type(handler) is Object.
- CSA_ASSERT(this, IsJSReceiver(handler));
-
- // 5. Let target be O.[[ProxyTarget]].
- Node* target = LoadObjectField(proxy, JSProxy::kTargetOffset);
-
- // 6. Let trap be ? GetMethod(handler, "set").
- // 7. If trap is undefined, then (see 7.a below).
- Handle<Name> set_string = factory()->set_string();
- Node* trap = GetMethod(context, handler, set_string, &trap_undefined);
-
- // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
- // « target, P, V, Receiver »)).
- // 9. If booleanTrapResult is false, return false.
- BranchIfToBooleanIsTrue(
- CallJS(CodeFactory::Call(isolate(),
- ConvertReceiverMode::kNotNullOrUndefined),
- context, trap, handler, target, name, value, receiver),
- &continue_checks, &failure);
-
- BIND(&continue_checks);
- {
- // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
- Label return_result(this);
- CheckGetSetTrapResult(context, target, proxy, name, value, &success,
- JSProxy::kSet);
- }
-
- BIND(&failure);
- {
- CallRuntime(Runtime::kThrowTypeErrorIfStrict, context,
- SmiConstant(MessageTemplate::kProxyTrapReturnedFalsishFor),
- HeapConstant(set_string), name);
- Goto(&success);
- }
-
- // 12. Return true.
- BIND(&success);
- Return(value);
-
- BIND(&private_symbol);
- {
- Label failure(this);
-
- CallRuntime(Runtime::kThrowTypeErrorIfStrict, context,
- SmiConstant(MessageTemplate::kProxyPrivate));
- Return(UndefinedConstant());
- }
-
- BIND(&trap_undefined);
- {
- // 7.a. Return ? target.[[Set]](P, V, Receiver).
- CallRuntime(Runtime::kSetPropertyWithReceiver, context, target, name, value,
- receiver);
- Return(value);
- }
-
- BIND(&throw_proxy_handler_revoked);
- ThrowTypeError(context, MessageTemplate::kProxyRevoked, "set");
-}
-
-void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
+Node* ProxiesCodeStubAssembler::CheckGetSetTrapResult(
Node* context, Node* target, Node* proxy, Node* name, Node* trap_result,
- Label* check_passed, JSProxy::AccessKind access_kind) {
+ JSProxy::AccessKind access_kind) {
Node* map = LoadMap(target);
VARIABLE(var_value, MachineRepresentation::kTagged);
VARIABLE(var_details, MachineRepresentation::kWord32);
VARIABLE(var_raw_value, MachineRepresentation::kTagged);
- Label if_found_value(this), check_in_runtime(this, Label::kDeferred);
+ Label if_found_value(this), check_in_runtime(this, Label::kDeferred),
+ check_passed(this);
GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime);
Node* instance_type = LoadInstanceType(target);
TryGetOwnProperty(context, target, target, map, instance_type, name,
&if_found_value, &var_value, &var_details, &var_raw_value,
- check_passed, &check_in_runtime, kReturnAccessorPair);
+ &check_passed, &check_in_runtime, kReturnAccessorPair);
BIND(&if_found_value);
{
@@ -513,7 +289,7 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
// false, then:
GotoIfNot(IsSetWord32(var_details.value(),
PropertyDetails::kAttributesDontDeleteMask),
- check_passed);
+ &check_passed);
// If IsDataDescriptor(targetDesc) is true and
// targetDesc.[[Writable]] is false, then:
@@ -523,11 +299,11 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
{
Node* read_only = IsSetWord32(var_details.value(),
PropertyDetails::kAttributesReadOnlyMask);
- GotoIfNot(read_only, check_passed);
+ GotoIfNot(read_only, &check_passed);
// If SameValue(trapResult, targetDesc.[[Value]]) is false,
// throw a TypeError exception.
- BranchIfSameValue(trap_result, var_value.value(), check_passed,
+ BranchIfSameValue(trap_result, var_value.value(), &check_passed,
&throw_non_configurable_data);
}
@@ -545,7 +321,7 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
// defined it's set as null.
GotoIf(IsUndefined(getter), &continue_check);
GotoIf(IsNull(getter), &continue_check);
- Goto(check_passed);
+ Goto(&check_passed);
// 10.b.i. If trapResult is not undefined, throw a TypeError exception.
BIND(&continue_check);
@@ -558,14 +334,7 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
GotoIf(IsUndefined(setter), &throw_non_configurable_accessor);
GotoIf(IsNull(setter), &throw_non_configurable_accessor);
}
- Goto(check_passed);
- }
-
- BIND(&check_in_runtime);
- {
- CallRuntime(Runtime::kCheckProxyGetSetTrapResult, context, name, target,
- trap_result, SmiConstant(access_kind));
- Return(trap_result);
+ Goto(&check_passed);
}
BIND(&throw_non_configurable_data);
@@ -588,13 +357,21 @@ void ProxiesCodeStubAssembler::CheckGetSetTrapResult(
ThrowTypeError(context, MessageTemplate::kProxySetFrozenAccessor, name);
}
}
+
+ BIND(&check_in_runtime);
+ {
+ CallRuntime(Runtime::kCheckProxyGetSetTrapResult, context, name, target,
+ trap_result, SmiConstant(access_kind));
+ Goto(&check_passed);
+ }
+
+ BIND(&check_passed);
+ return trap_result;
}
}
-void ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
- Node* proxy, Node* name,
- Label* check_passed,
- Label* if_bailout) {
+Node* ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
+ Node* proxy, Node* name) {
Node* target_map = LoadMap(target);
VARIABLE(var_value, MachineRepresentation::kTagged);
VARIABLE(var_details, MachineRepresentation::kWord32);
@@ -602,14 +379,15 @@ void ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
Label if_found_value(this, Label::kDeferred),
throw_non_configurable(this, Label::kDeferred),
- throw_non_extensible(this, Label::kDeferred);
+ throw_non_extensible(this, Label::kDeferred), check_passed(this),
+ check_in_runtime(this, Label::kDeferred);
// 9.a. Let targetDesc be ? target.[[GetOwnProperty]](P).
- GotoIfNot(IsUniqueNameNoIndex(CAST(name)), if_bailout);
+ GotoIfNot(IsUniqueNameNoIndex(CAST(name)), &check_in_runtime);
Node* instance_type = LoadInstanceType(target);
TryGetOwnProperty(context, target, target, target_map, instance_type, name,
&if_found_value, &var_value, &var_details, &var_raw_value,
- check_passed, if_bailout, kReturnAccessorPair);
+ &check_passed, &check_in_runtime, kReturnAccessorPair);
// 9.b. If targetDesc is not undefined, then (see 9.b.i. below).
BIND(&if_found_value);
@@ -625,7 +403,7 @@ void ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
// 9.b.iii. If extensibleTarget is false, throw a TypeError exception.
GotoIfNot(target_extensible, &throw_non_extensible);
- Goto(check_passed);
+ Goto(&check_passed);
}
BIND(&throw_non_configurable);
@@ -633,6 +411,15 @@ void ProxiesCodeStubAssembler::CheckHasTrapResult(Node* context, Node* target,
BIND(&throw_non_extensible);
{ ThrowTypeError(context, MessageTemplate::kProxyHasNonExtensible, name); }
+
+ BIND(&check_in_runtime);
+ {
+ CallRuntime(Runtime::kCheckProxyHasTrapResult, context, name, target);
+ Goto(&check_passed);
+ }
+
+ BIND(&check_passed);
+ return FalseConstant();
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h
index 01479d9999..fcaac7df66 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.h
+++ b/deps/v8/src/builtins/builtins-proxy-gen.h
@@ -5,9 +5,8 @@
#ifndef V8_BUILTINS_BUILTINS_PROXY_GEN_H_
#define V8_BUILTINS_BUILTINS_PROXY_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/objects/js-proxy.h"
-#include "torque-generated/builtins-proxy-from-dsl-gen.h"
namespace v8 {
namespace internal {
@@ -18,16 +17,6 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
explicit ProxiesCodeStubAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
- // ES6 section 9.5.8 [[Get]] ( P, Receiver )
- // name should not be an index.
- Node* ProxyGetProperty(Node* context, Node* proxy, Node* name,
- Node* receiver);
-
- // ES6 section 9.5.9 [[Set]] ( P, V, Receiver )
- // name should not be an index.
- Node* ProxySetProperty(Node* context, Node* proxy, Node* name, Node* value,
- Node* receiver);
-
Node* AllocateProxy(Node* target, Node* handler, Node* context);
Node* AllocateProxyRevokeFunction(Node* proxy, Node* context);
@@ -35,6 +24,13 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
// TODO(v8:9120): Remove this once torque support exists
Node* GetProxyConstructorJSNewTarget();
+ Node* CheckGetSetTrapResult(Node* context, Node* target, Node* proxy,
+ Node* name, Node* trap_result,
+ JSProxy::AccessKind access_kind);
+
+ Node* CheckHasTrapResult(Node* context, Node* target, Node* proxy,
+ Node* name);
+
protected:
enum ProxyRevokeFunctionContextSlot {
kProxySlot = Context::MIN_CONTEXT_SLOTS,
@@ -44,12 +40,6 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler {
Node* AllocateJSArrayForCodeStubArguments(Node* context,
CodeStubArguments& args, Node* argc,
ParameterMode mode);
- void CheckHasTrapResult(Node* context, Node* target, Node* proxy, Node* name,
- Label* check_passed, Label* if_bailout);
-
- void CheckGetSetTrapResult(Node* context, Node* target, Node* proxy,
- Node* name, Node* trap_result, Label* if_not_found,
- JSProxy::AccessKind access_kind);
private:
Node* CreateProxyRevokeFunctionContext(Node* proxy, Node* native_context);
diff --git a/deps/v8/src/builtins/builtins-reflect-gen.cc b/deps/v8/src/builtins/builtins-reflect-gen.cc
index 52e08275c7..dade25b7c7 100644
--- a/deps/v8/src/builtins/builtins-reflect-gen.cc
+++ b/deps/v8/src/builtins/builtins-reflect-gen.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index a81d5173a0..e998652dad 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -4,11 +4,11 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/keys.h"
-#include "src/lookup.h"
-#include "src/objects-inl.h"
-#include "src/property-descriptor.h"
+#include "src/logging/counters.h"
+#include "src/objects/keys.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 1a0e014e3e..51ee2796e6 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -8,10 +8,10 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/builtins/growable-fixed-array-gen.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/counters.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/heap/factory-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
@@ -35,61 +35,46 @@ TNode<IntPtrT> RegExpBuiltinsAssembler::IntPtrZero() {
TNode<JSRegExpResult> RegExpBuiltinsAssembler::AllocateRegExpResult(
TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
- TNode<String> input) {
-#ifdef DEBUG
- TNode<Smi> max_length = SmiConstant(JSArray::kInitialMaxFastElementArray);
- CSA_ASSERT(this, SmiLessThanOrEqual(length, max_length));
-#endif // DEBUG
+ TNode<String> input, TNode<FixedArray>* elements_out) {
+ CSA_ASSERT(this, SmiLessThanOrEqual(
+ length, SmiConstant(JSArray::kMaxFastArrayLength)));
+ CSA_ASSERT(this, SmiGreaterThan(length, SmiConstant(0)));
- // Allocate the JSRegExpResult together with its elements fixed array.
- // Initial preparations first.
+ // Allocate.
- TNode<IntPtrT> length_intptr = SmiUntag(length);
const ElementsKind elements_kind = PACKED_ELEMENTS;
+ TNode<Map> map = CAST(LoadContextElement(LoadNativeContext(context),
+ Context::REGEXP_RESULT_MAP_INDEX));
+ Node* no_allocation_site = nullptr;
+ TNode<IntPtrT> length_intptr = SmiUntag(length);
+ TNode<IntPtrT> capacity = length_intptr;
- TNode<IntPtrT> elements_size = GetFixedArrayAllocationSize(
- length_intptr, elements_kind, INTPTR_PARAMETERS);
- TNode<IntPtrT> total_size =
- IntPtrAdd(elements_size, IntPtrConstant(JSRegExpResult::kSize));
-
- static const int kRegExpResultOffset = 0;
- static const int kElementsOffset =
- kRegExpResultOffset + JSRegExpResult::kSize;
-
- // The folded allocation.
-
- TNode<HeapObject> result = Allocate(total_size);
- TNode<HeapObject> elements = InnerAllocate(result, kElementsOffset);
-
- // Initialize the JSRegExpResult.
+ // Note: The returned `elements` may be in young large object space, but
+ // `array` is guaranteed to be in new space so we could skip write barriers
+ // below.
+ TNode<JSArray> array;
+ TNode<FixedArrayBase> elements;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ elements_kind, map, length, no_allocation_site, capacity,
+ INTPTR_PARAMETERS, kAllowLargeObjectAllocation, JSRegExpResult::kSize);
- TNode<Context> native_context = LoadNativeContext(context);
- TNode<Map> map = CAST(
- LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX));
- StoreMapNoWriteBarrier(result, map);
+ // Finish result initialization.
- StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOrHashOffset,
- EmptyFixedArrayConstant());
- StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, elements);
- StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, length);
+ TNode<JSRegExpResult> result = CAST(array);
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index);
- StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kInputOffset, input);
+ // TODO(jgruber,tebbi): Could skip barrier but the MemoryOptimizer complains.
+ StoreObjectField(result, JSRegExpResult::kInputOffset, input);
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kGroupsOffset,
UndefinedConstant());
- // Initialize the elements.
-
- DCHECK(!IsDoubleElementsKind(elements_kind));
- const RootIndex map_index = RootIndex::kFixedArrayMap;
- DCHECK(RootsTable::IsImmortalImmovable(map_index));
- StoreMapNoWriteBarrier(elements, map_index);
- StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
+ // Finish elements initialization.
FillFixedArrayWithValue(elements_kind, elements, IntPtrZero(), length_intptr,
RootIndex::kUndefinedValue);
- return CAST(result);
+ if (elements_out) *elements_out = CAST(elements);
+ return result;
}
TNode<Object> RegExpBuiltinsAssembler::RegExpCreate(
@@ -177,11 +162,11 @@ TNode<JSRegExpResult> RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
TNode<String> first =
CAST(CallBuiltin(Builtins::kSubString, context, string, start, end));
- TNode<JSRegExpResult> result =
- AllocateRegExpResult(context, num_results, start, string);
- TNode<FixedArray> result_elements = CAST(LoadElements(result));
+ TNode<FixedArray> result_elements;
+ TNode<JSRegExpResult> result = AllocateRegExpResult(
+ context, num_results, start, string, &result_elements);
- UnsafeStoreFixedArrayElement(result_elements, 0, first, SKIP_WRITE_BARRIER);
+ UnsafeStoreFixedArrayElement(result_elements, 0, first);
// If no captures exist we can skip named capture handling as well.
GotoIf(SmiEqual(num_results, SmiConstant(1)), &out);
@@ -1923,6 +1908,22 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
Variable* vars[] = {array.var_array(), array.var_length(),
array.var_capacity()};
Label loop(this, 3, vars), out(this);
+
+ // Check if the regexp is an ATOM type. If then, keep the literal string to
+ // search for so that we can avoid calling substring in the loop below.
+ TVARIABLE(BoolT, var_atom, Int32FalseConstant());
+ TVARIABLE(String, var_search_string, EmptyStringConstant());
+ if (is_fastpath) {
+ TNode<JSRegExp> maybe_atom_regexp = CAST(regexp);
+ TNode<FixedArray> data =
+ CAST(LoadObjectField(maybe_atom_regexp, JSRegExp::kDataOffset));
+ GotoIfNot(SmiEqual(CAST(LoadFixedArrayElement(data, JSRegExp::kTagIndex)),
+ SmiConstant(JSRegExp::ATOM)),
+ &loop);
+ var_search_string =
+ CAST(LoadFixedArrayElement(data, JSRegExp::kAtomPatternIndex));
+ var_atom = Int32TrueConstant();
+ }
Goto(&loop);
BIND(&loop);
@@ -1937,13 +1938,22 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
RegExpPrototypeExecBodyWithoutResult(CAST(context), CAST(regexp),
string, &if_didnotmatch, true);
- Node* const match_from = UnsafeLoadFixedArrayElement(
- match_indices, RegExpMatchInfo::kFirstCaptureIndex);
- Node* const match_to = UnsafeLoadFixedArrayElement(
- match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+ Label dosubstring(this), donotsubstring(this);
+ Branch(var_atom.value(), &donotsubstring, &dosubstring);
- var_match.Bind(CallBuiltin(Builtins::kSubString, context, string,
- match_from, match_to));
+ BIND(&dosubstring);
+ {
+ Node* const match_from = UnsafeLoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+ Node* const match_to = UnsafeLoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+ var_match.Bind(CallBuiltin(Builtins::kSubString, context, string,
+ match_from, match_to));
+ Goto(&if_didmatch);
+ }
+
+ BIND(&donotsubstring);
+ var_match.Bind(var_search_string.value());
Goto(&if_didmatch);
} else {
DCHECK(!is_fastpath);
@@ -2685,213 +2695,6 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
string, maybe_limit));
}
-Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
- Node* context, Node* regexp, Node* string, Node* replace_callable) {
- // The fast path is reached only if {receiver} is a global unmodified
- // JSRegExp instance and {replace_callable} is callable.
-
- CSA_ASSERT(this, IsFastRegExp(context, regexp));
- CSA_ASSERT(this, IsCallable(replace_callable));
- CSA_ASSERT(this, IsString(string));
-
- Isolate* const isolate = this->isolate();
-
- Node* const undefined = UndefinedConstant();
- TNode<IntPtrT> int_one = IntPtrConstant(1);
-
- Node* const native_context = LoadNativeContext(context);
-
- Label out(this);
- VARIABLE(var_result, MachineRepresentation::kTagged);
-
- // Set last index to 0.
- FastStoreLastIndex(regexp, SmiZero());
-
- // Allocate {result_array}.
- Node* result_array;
- {
- ElementsKind kind = PACKED_ELEMENTS;
- TNode<Map> array_map = LoadJSArrayElementsMap(kind, native_context);
- TNode<IntPtrT> capacity = IntPtrConstant(16);
- TNode<Smi> length = SmiZero();
- Node* const allocation_site = nullptr;
- ParameterMode capacity_mode = CodeStubAssembler::INTPTR_PARAMETERS;
-
- result_array = AllocateJSArray(kind, array_map, capacity, length,
- allocation_site, capacity_mode);
- }
-
- // Call into runtime for RegExpExecMultiple.
- TNode<FixedArray> last_match_info = CAST(LoadContextElement(
- native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX));
- Node* const res = CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
- string, last_match_info, result_array);
-
- // Reset last index to 0.
- FastStoreLastIndex(regexp, SmiZero());
-
- // If no matches, return the subject string.
- var_result.Bind(string);
- GotoIf(IsNull(res), &out);
-
- // Reload last match info since it might have changed.
- last_match_info = CAST(LoadContextElement(
- native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX));
-
- Node* const res_length = LoadJSArrayLength(res);
- TNode<FixedArray> const res_elems = CAST(LoadElements(res));
-
- TNode<Smi> const num_capture_registers = CAST(LoadFixedArrayElement(
- last_match_info, RegExpMatchInfo::kNumberOfCapturesIndex));
-
- Label if_hasexplicitcaptures(this), if_noexplicitcaptures(this),
- create_result(this);
- Branch(SmiEqual(num_capture_registers, SmiConstant(2)),
- &if_noexplicitcaptures, &if_hasexplicitcaptures);
-
- BIND(&if_noexplicitcaptures);
- {
- // If the number of captures is two then there are no explicit captures in
- // the regexp, just the implicit capture that captures the whole match. In
- // this case we can simplify quite a bit and end up with something faster.
- // The builder will consist of some integers that indicate slices of the
- // input string and some replacements that were returned from the replace
- // function.
-
- TVARIABLE(Smi, var_match_start, SmiZero());
-
- TNode<IntPtrT> const end = SmiUntag(res_length);
- TVARIABLE(IntPtrT, var_i, IntPtrZero());
-
- Variable* vars[] = {&var_i, &var_match_start};
- Label loop(this, 2, vars);
- Goto(&loop);
- BIND(&loop);
- {
- GotoIfNot(IntPtrLessThan(var_i.value(), end), &create_result);
-
- Node* const elem = LoadFixedArrayElement(res_elems, var_i.value());
-
- Label if_issmi(this), if_isstring(this), loop_epilogue(this);
- Branch(TaggedIsSmi(elem), &if_issmi, &if_isstring);
-
- BIND(&if_issmi);
- {
- TNode<Smi> smi_elem = CAST(elem);
- // Integers represent slices of the original string.
- Label if_isnegativeorzero(this), if_ispositive(this);
- BranchIfSmiLessThanOrEqual(smi_elem, SmiZero(), &if_isnegativeorzero,
- &if_ispositive);
-
- BIND(&if_ispositive);
- {
- TNode<IntPtrT> int_elem = SmiUntag(smi_elem);
- TNode<IntPtrT> new_match_start =
- Signed(IntPtrAdd(WordShr(int_elem, IntPtrConstant(11)),
- WordAnd(int_elem, IntPtrConstant(0x7FF))));
- var_match_start = SmiTag(new_match_start);
- Goto(&loop_epilogue);
- }
-
- BIND(&if_isnegativeorzero);
- {
- var_i = IntPtrAdd(var_i.value(), int_one);
-
- TNode<Smi> const next_elem =
- CAST(LoadFixedArrayElement(res_elems, var_i.value()));
-
- var_match_start = SmiSub(next_elem, smi_elem);
- Goto(&loop_epilogue);
- }
- }
-
- BIND(&if_isstring);
- {
- CSA_ASSERT(this, IsString(elem));
-
- Callable call_callable = CodeFactory::Call(isolate);
- TNode<Smi> match_start = var_match_start.value();
- Node* const replacement_obj =
- CallJS(call_callable, context, replace_callable, undefined, elem,
- match_start, string);
-
- TNode<String> const replacement_str =
- ToString_Inline(context, replacement_obj);
- StoreFixedArrayElement(res_elems, var_i.value(), replacement_str);
-
- TNode<Smi> const elem_length = LoadStringLengthAsSmi(elem);
- var_match_start = SmiAdd(match_start, elem_length);
-
- Goto(&loop_epilogue);
- }
-
- BIND(&loop_epilogue);
- {
- var_i = IntPtrAdd(var_i.value(), int_one);
- Goto(&loop);
- }
- }
- }
-
- BIND(&if_hasexplicitcaptures);
- {
- Node* const from = IntPtrZero();
- Node* const to = SmiUntag(res_length);
- const int increment = 1;
-
- BuildFastLoop(
- from, to,
- [this, res_elems, isolate, native_context, context, undefined,
- replace_callable](Node* index) {
- Node* const elem = LoadFixedArrayElement(res_elems, index);
-
- Label do_continue(this);
- GotoIf(TaggedIsSmi(elem), &do_continue);
-
- // elem must be an Array.
- // Use the apply argument as backing for global RegExp
- // properties.
-
- CSA_ASSERT(this, HasInstanceType(elem, JS_ARRAY_TYPE));
-
- // TODO(jgruber): Remove indirection through
- // Call->ReflectApply.
- Callable call_callable = CodeFactory::Call(isolate);
- Node* const reflect_apply =
- LoadContextElement(native_context, Context::REFLECT_APPLY_INDEX);
-
- Node* const replacement_obj =
- CallJS(call_callable, context, reflect_apply, undefined,
- replace_callable, undefined, elem);
-
- // Overwrite the i'th element in the results with the string
- // we got back from the callback function.
-
- TNode<String> const replacement_str =
- ToString_Inline(context, replacement_obj);
- StoreFixedArrayElement(res_elems, index, replacement_str);
-
- Goto(&do_continue);
- BIND(&do_continue);
- },
- increment, CodeStubAssembler::INTPTR_PARAMETERS,
- CodeStubAssembler::IndexAdvanceMode::kPost);
-
- Goto(&create_result);
- }
-
- BIND(&create_result);
- {
- Node* const result = CallRuntime(Runtime::kStringBuilderConcat, context,
- res, res_length, string);
- var_result.Bind(result);
- Goto(&out);
- }
-
- BIND(&out);
- return var_result.value();
-}
-
class RegExpStringIteratorAssembler : public RegExpBuiltinsAssembler {
public:
explicit RegExpStringIteratorAssembler(compiler::CodeAssemblerState* state)
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 879138867b..88c00095b9 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -6,8 +6,8 @@
#define V8_BUILTINS_BUILTINS_REGEXP_GEN_H_
#include "src/base/optional.h"
-#include "src/code-stub-assembler.h"
-#include "src/message-template.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/execution/message-template.h"
namespace v8 {
namespace internal {
@@ -38,10 +38,9 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
// Allocate a RegExpResult with the given length (the number of captures,
// including the match itself), index (the index where the match starts),
// and input string.
- TNode<JSRegExpResult> AllocateRegExpResult(TNode<Context> context,
- TNode<Smi> length,
- TNode<Smi> index,
- TNode<String> input);
+ TNode<JSRegExpResult> AllocateRegExpResult(
+ TNode<Context> context, TNode<Smi> length, TNode<Smi> index,
+ TNode<String> input, TNode<FixedArray>* elements_out = nullptr);
TNode<Object> FastLoadLastIndex(TNode<JSRegExp> regexp);
TNode<Object> SlowLoadLastIndex(TNode<Context> context, TNode<Object> regexp);
@@ -148,9 +147,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
TNode<String> const string,
TNode<Smi> const limit);
-
- Node* ReplaceGlobalCallableFastPath(Node* context, Node* regexp, Node* string,
- Node* replace_callable);
};
class RegExpMatchAllAssembler : public RegExpBuiltinsAssembler {
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
index e763ab10ea..3e0f7182c7 100644
--- a/deps/v8/src/builtins/builtins-regexp.cc
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -4,11 +4,11 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-utils.h"
-#include "src/string-builder-inl.h"
+#include "src/strings/string-builder-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index d92e988aef..1e9ac8377c 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -4,8 +4,8 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
-#include "src/objects.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -21,12 +21,12 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
: CodeStubAssembler(state) {}
protected:
- typedef Node* (CodeAssembler::*AssemblerFunction)(MachineType type,
- Node* base, Node* offset,
- Node* value,
- Node* value_high);
+ using AssemblerFunction = Node* (CodeAssembler::*)(MachineType type,
+ Node* base, Node* offset,
+ Node* value,
+ Node* value_high);
void ValidateSharedTypedArray(Node* tagged, Node* context,
- Node** out_instance_type,
+ Node** out_elements_kind,
Node** out_backing_store);
Node* ConvertTaggedAtomicIndexToWord32(Node* tagged, Node* context,
Node** number_index);
@@ -46,7 +46,7 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
};
void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
- Node* tagged, Node* context, Node** out_instance_type,
+ Node* tagged, Node* context, Node** out_elements_kind,
Node** out_backing_store) {
Label not_float_or_clamped(this), invalid(this);
@@ -54,8 +54,8 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
GotoIf(TaggedIsSmi(tagged), &invalid);
// Fail if the array's instance type is not JSTypedArray.
- GotoIfNot(InstanceTypeEqual(LoadInstanceType(tagged), JS_TYPED_ARRAY_TYPE),
- &invalid);
+ Node* tagged_map = LoadMap(tagged);
+ GotoIfNot(IsJSTypedArrayMap(tagged_map), &invalid);
// Fail if the array's JSArrayBuffer is not shared.
TNode<JSArrayBuffer> array_buffer = LoadJSArrayBufferViewBuffer(CAST(tagged));
@@ -63,20 +63,18 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
GotoIfNot(IsSetWord32<JSArrayBuffer::IsSharedBit>(bitfield), &invalid);
// Fail if the array's element type is float32, float64 or clamped.
- Node* elements_instance_type = LoadInstanceType(LoadElements(tagged));
- STATIC_ASSERT(FIXED_INT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_INT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_INT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_UINT8_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_UINT16_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_UINT32_ARRAY_TYPE < FIXED_FLOAT32_ARRAY_TYPE);
- GotoIf(Int32LessThan(elements_instance_type,
- Int32Constant(FIXED_FLOAT32_ARRAY_TYPE)),
+ STATIC_ASSERT(INT8_ELEMENTS < FLOAT32_ELEMENTS);
+ STATIC_ASSERT(INT16_ELEMENTS < FLOAT32_ELEMENTS);
+ STATIC_ASSERT(INT32_ELEMENTS < FLOAT32_ELEMENTS);
+ STATIC_ASSERT(UINT8_ELEMENTS < FLOAT32_ELEMENTS);
+ STATIC_ASSERT(UINT16_ELEMENTS < FLOAT32_ELEMENTS);
+ STATIC_ASSERT(UINT32_ELEMENTS < FLOAT32_ELEMENTS);
+ Node* elements_kind = LoadMapElementsKind(tagged_map);
+ GotoIf(Int32LessThan(elements_kind, Int32Constant(FLOAT32_ELEMENTS)),
&not_float_or_clamped);
- STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT8_CLAMPED_ARRAY_TYPE);
- Branch(Int32GreaterThan(elements_instance_type,
- Int32Constant(FIXED_UINT8_CLAMPED_ARRAY_TYPE)),
+ STATIC_ASSERT(BIGINT64_ELEMENTS > UINT8_CLAMPED_ELEMENTS);
+ STATIC_ASSERT(BIGUINT64_ELEMENTS > UINT8_CLAMPED_ELEMENTS);
+ Branch(Int32GreaterThan(elements_kind, Int32Constant(UINT8_CLAMPED_ELEMENTS)),
&not_float_or_clamped, &invalid);
BIND(&invalid);
@@ -86,7 +84,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
}
BIND(&not_float_or_clamped);
- *out_instance_type = elements_instance_type;
+ *out_elements_kind = elements_kind;
TNode<RawPtrT> backing_store = LoadJSArrayBufferBackingStore(array_buffer);
TNode<UintPtrT> byte_offset = LoadJSArrayBufferViewByteOffset(CAST(tagged));
@@ -116,13 +114,14 @@ Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
}
void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
- Node* index_word,
+ Node* index,
Node* context) {
// Check if the index is in bounds. If not, throw RangeError.
Label check_passed(this);
- Node* array_length_word32 =
- TruncateTaggedToWord32(context, LoadJSTypedArrayLength(CAST(array)));
- GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed);
+ TNode<UintPtrT> array_length = LoadJSTypedArrayLength(CAST(array));
+ // TODO(v8:4153): Use UintPtr for the {index} as well.
+ GotoIf(UintPtrLessThan(ChangeUint32ToWord(index), array_length),
+ &check_passed);
ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex);
@@ -136,10 +135,8 @@ void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
// ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be
// detached and the TypedArray length can't change either, so skipping this
// check in Release mode is safe.
- CSA_ASSERT(this,
- Uint32LessThan(index_word,
- TruncateTaggedToWord32(
- context, LoadJSTypedArrayLength(CAST(array)))));
+ CSA_ASSERT(this, UintPtrLessThan(ChangeUint32ToWord(index_word),
+ LoadJSTypedArrayLength(CAST(array))));
}
#endif
@@ -170,9 +167,9 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
Node* index = Parameter(Descriptor::kIndex);
Node* context = Parameter(Descriptor::kContext);
- Node* instance_type;
+ Node* elements_kind;
Node* backing_store;
- ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
+ ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
Node* index_integer;
Node* index_word32 =
@@ -183,13 +180,11 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
i64(this), u64(this), other(this);
int32_t case_values[] = {
- FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE,
- FIXED_INT16_ARRAY_TYPE, FIXED_UINT16_ARRAY_TYPE,
- FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
- FIXED_BIGINT64_ARRAY_TYPE, FIXED_BIGUINT64_ARRAY_TYPE,
+ INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, UINT16_ELEMENTS,
+ INT32_ELEMENTS, UINT32_ELEMENTS, BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS,
};
Label* case_labels[] = {&i8, &u8, &i16, &u16, &i32, &u32, &i64, &u64};
- Switch(instance_type, &other, case_values, case_labels,
+ Switch(elements_kind, &other, case_values, case_labels,
arraysize(case_labels));
BIND(&i8);
@@ -244,9 +239,9 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
Node* value = Parameter(Descriptor::kValue);
Node* context = Parameter(Descriptor::kContext);
- Node* instance_type;
+ Node* elements_kind;
Node* backing_store;
- ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
+ ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
Node* index_integer;
Node* index_word32 =
@@ -255,11 +250,9 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
Node* index_word = ChangeUint32ToWord(index_word32);
Label u8(this), u16(this), u32(this), u64(this), other(this);
- STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
- GotoIf(
- Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
- &u64);
+ STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
+ STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
+ GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &u64);
Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
@@ -269,11 +262,11 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
#endif
int32_t case_values[] = {
- FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
- FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+ INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS,
+ UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS,
};
Label* case_labels[] = {&u8, &u8, &u16, &u16, &u32, &u32};
- Switch(instance_type, &other, case_values, case_labels,
+ Switch(elements_kind, &other, case_values, case_labels,
arraysize(case_labels));
BIND(&u8);
@@ -320,9 +313,9 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
Node* value = Parameter(Descriptor::kValue);
Node* context = Parameter(Descriptor::kContext);
- Node* instance_type;
+ Node* elements_kind;
Node* backing_store;
- ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
+ ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
Node* index_integer;
Node* index_word32 =
@@ -337,11 +330,9 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
i64(this), u64(this), big(this), other(this);
- STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
- GotoIf(
- Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
- &big);
+ STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
+ STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
+ GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
#if DEBUG
@@ -350,13 +341,13 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
int32_t case_values[] = {
- FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
- FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+ INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS,
+ UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS,
};
Label* case_labels[] = {
&i8, &u8, &i16, &u16, &i32, &u32,
};
- Switch(instance_type, &other, case_values, case_labels,
+ Switch(elements_kind, &other, case_values, case_labels,
arraysize(case_labels));
BIND(&i8);
@@ -394,10 +385,8 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
TVARIABLE(UintPtrT, var_high);
BigIntToRawBytes(value_bigint, &var_low, &var_high);
Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
- GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
- &i64);
- GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
- &u64);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(BIGINT64_ELEMENTS)), &i64);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(BIGUINT64_ELEMENTS)), &u64);
Unreachable();
BIND(&i64);
@@ -426,9 +415,9 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
Node* new_value = Parameter(Descriptor::kNewValue);
Node* context = Parameter(Descriptor::kContext);
- Node* instance_type;
+ Node* elements_kind;
Node* backing_store;
- ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
+ ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
Node* index_integer;
Node* index_word32 =
@@ -444,11 +433,9 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
i64(this), u64(this), big(this), other(this);
- STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
- GotoIf(
- Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
- &big);
+ STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
+ STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
+ GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
Node* old_value_integer = ToInteger_Inline(CAST(context), CAST(old_value));
Node* new_value_integer = ToInteger_Inline(CAST(context), CAST(new_value));
@@ -459,13 +446,13 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
Node* new_value_word32 = TruncateTaggedToWord32(context, new_value_integer);
int32_t case_values[] = {
- FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
- FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+ INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS,
+ UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS,
};
Label* case_labels[] = {
&i8, &u8, &i16, &u16, &i32, &u32,
};
- Switch(instance_type, &other, case_values, case_labels,
+ Switch(elements_kind, &other, case_values, case_labels,
arraysize(case_labels));
BIND(&i8);
@@ -512,10 +499,8 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
BigIntToRawBytes(new_value_bigint, &var_new_low, &var_new_high);
Node* old_high = Is64() ? nullptr : static_cast<Node*>(var_old_high.value());
Node* new_high = Is64() ? nullptr : static_cast<Node*>(var_new_high.value());
- GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
- &i64);
- GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
- &u64);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(BIGINT64_ELEMENTS)), &i64);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(BIGUINT64_ELEMENTS)), &u64);
Unreachable();
BIND(&i64);
@@ -558,9 +543,9 @@ BINOP_BUILTIN(Xor)
void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
Node* array, Node* index, Node* value, Node* context,
AssemblerFunction function, Runtime::FunctionId runtime_function) {
- Node* instance_type;
+ Node* elements_kind;
Node* backing_store;
- ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
+ ValidateSharedTypedArray(array, context, &elements_kind, &backing_store);
Node* index_integer;
Node* index_word32 =
@@ -576,11 +561,9 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
i64(this), u64(this), big(this), other(this);
- STATIC_ASSERT(FIXED_BIGINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
- STATIC_ASSERT(FIXED_BIGUINT64_ARRAY_TYPE > FIXED_UINT32_ARRAY_TYPE);
- GotoIf(
- Int32GreaterThan(instance_type, Int32Constant(FIXED_UINT32_ARRAY_TYPE)),
- &big);
+ STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS);
+ STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS);
+ GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big);
Node* value_integer = ToInteger_Inline(CAST(context), CAST(value));
#if DEBUG
@@ -589,13 +572,13 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
int32_t case_values[] = {
- FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
- FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
+ INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS,
+ UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS,
};
Label* case_labels[] = {
&i8, &u8, &i16, &u16, &i32, &u32,
};
- Switch(instance_type, &other, case_values, case_labels,
+ Switch(elements_kind, &other, case_values, case_labels,
arraysize(case_labels));
BIND(&i8);
@@ -635,10 +618,8 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
TVARIABLE(UintPtrT, var_high);
BigIntToRawBytes(value_bigint, &var_low, &var_high);
Node* high = Is64() ? nullptr : static_cast<Node*>(var_high.value());
- GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGINT64_ARRAY_TYPE)),
- &i64);
- GotoIf(Word32Equal(instance_type, Int32Constant(FIXED_BIGUINT64_ARRAY_TYPE)),
- &u64);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(BIGINT64_ELEMENTS)), &i64);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(BIGUINT64_ELEMENTS)), &u64);
Unreachable();
BIND(&i64);
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 96c5558536..fa6534d463 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -7,14 +7,14 @@
#include "src/base/platform/time.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/conversions-inl.h"
-#include "src/counters.h"
-#include "src/futex-emulation.h"
-#include "src/globals.h"
+#include "src/codegen/code-factory.h"
+#include "src/common/globals.h"
+#include "src/execution/futex-emulation.h"
#include "src/heap/factory.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -74,8 +74,7 @@ V8_WARN_UNUSED_RESULT Maybe<size_t> ValidateAtomicAccess(
size_t access_index;
if (!TryNumberToSize(*access_index_obj, &access_index) ||
- typed_array->WasDetached() ||
- access_index >= typed_array->length_value()) {
+ typed_array->WasDetached() || access_index >= typed_array->length()) {
isolate->Throw(*isolate->factory()->NewRangeError(
MessageTemplate::kInvalidAtomicAccessIndex));
return Nothing<size_t>();
@@ -164,13 +163,13 @@ BUILTIN(AtomicsWait) {
double timeout_number;
if (timeout->IsUndefined(isolate)) {
- timeout_number = ReadOnlyRoots(isolate).infinity_value()->Number();
+ timeout_number = ReadOnlyRoots(isolate).infinity_value().Number();
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, timeout,
Object::ToNumber(isolate, timeout));
timeout_number = timeout->Number();
if (std::isnan(timeout_number))
- timeout_number = ReadOnlyRoots(isolate).infinity_value()->Number();
+ timeout_number = ReadOnlyRoots(isolate).infinity_value().Number();
else if (timeout_number < 0)
timeout_number = 0;
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 74b2db0542..5689b42619 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -7,16 +7,16 @@
#include "src/builtins/builtins-regexp-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-inl.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/objects/property-cell.h"
namespace v8 {
namespace internal {
-typedef compiler::Node Node;
+using Node = compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
@@ -302,7 +302,8 @@ TF_BUILTIN(StringAdd_ConvertLeft, StringBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
// TODO(danno): The ToString and JSReceiverToPrimitive below could be
// combined to avoid duplicate smi and instance type checks.
- left = ToString(context, JSReceiverToPrimitive(context, left));
+ left =
+ ToStringImpl(CAST(context), CAST(JSReceiverToPrimitive(context, left)));
TailCallBuiltin(Builtins::kStringAdd_CheckNone, context, left, right);
}
@@ -312,7 +313,8 @@ TF_BUILTIN(StringAdd_ConvertRight, StringBuiltinsAssembler) {
Node* context = Parameter(Descriptor::kContext);
// TODO(danno): The ToString and JSReceiverToPrimitive below could be
// combined to avoid duplicate smi and instance type checks.
- right = ToString(context, JSReceiverToPrimitive(context, right));
+ right =
+ ToStringImpl(CAST(context), CAST(JSReceiverToPrimitive(context, right)));
TailCallBuiltin(Builtins::kStringAdd_CheckNone, context, left, right);
}
@@ -323,29 +325,6 @@ TF_BUILTIN(SubString, StringBuiltinsAssembler) {
Return(SubString(string, SmiUntag(from), SmiUntag(to)));
}
-void StringBuiltinsAssembler::GenerateStringAt(
- char const* method_name, TNode<Context> context, TNode<Object> receiver,
- TNode<Object> maybe_position, TNode<Object> default_return,
- const StringAtAccessor& accessor) {
- // Check that {receiver} is coercible to Object and convert it to a String.
- TNode<String> string = ToThisString(context, receiver, method_name);
-
- // Convert the {position} to a Smi and check that it's in bounds of the
- // {string}.
- Label if_outofbounds(this, Label::kDeferred);
- TNode<Number> position = ToInteger_Inline(
- context, maybe_position, CodeStubAssembler::kTruncateMinusZero);
- GotoIfNot(TaggedIsSmi(position), &if_outofbounds);
- TNode<IntPtrT> index = SmiUntag(CAST(position));
- TNode<IntPtrT> length = LoadStringLengthAsWord(string);
- GotoIfNot(UintPtrLessThan(index, length), &if_outofbounds);
- TNode<Object> result = accessor(string, length, index);
- Return(result);
-
- BIND(&if_outofbounds);
- Return(default_return);
-}
-
void StringBuiltinsAssembler::GenerateStringRelationalComparison(Node* context,
Node* left,
Node* right,
@@ -707,81 +686,6 @@ TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
}
}
-// ES6 #sec-string.prototype.charat
-TF_BUILTIN(StringPrototypeCharAt, StringBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition));
-
- GenerateStringAt("String.prototype.charAt", context, receiver, maybe_position,
- EmptyStringConstant(),
- [this](TNode<String> string, TNode<IntPtrT> length,
- TNode<IntPtrT> index) {
- TNode<Int32T> code = StringCharCodeAt(string, index);
- return StringFromSingleCharCode(code);
- });
-}
-
-// ES6 #sec-string.prototype.charcodeat
-TF_BUILTIN(StringPrototypeCharCodeAt, StringBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition));
-
- GenerateStringAt("String.prototype.charCodeAt", context, receiver,
- maybe_position, NanConstant(),
- [this](TNode<String> receiver, TNode<IntPtrT> length,
- TNode<IntPtrT> index) {
- Node* value = StringCharCodeAt(receiver, index);
- return SmiFromInt32(value);
- });
-}
-
-// ES6 #sec-string.prototype.codepointat
-TF_BUILTIN(StringPrototypeCodePointAt, StringBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
- TNode<Object> maybe_position = CAST(Parameter(Descriptor::kPosition));
-
- GenerateStringAt("String.prototype.codePointAt", context, receiver,
- maybe_position, UndefinedConstant(),
- [this](TNode<String> receiver, TNode<IntPtrT> length,
- TNode<IntPtrT> index) {
- // This is always a call to a builtin from Javascript,
- // so we need to produce UTF32.
- Node* value = LoadSurrogatePairAt(receiver, length, index,
- UnicodeEncoding::UTF32);
- return SmiFromInt32(value);
- });
-}
-
-// ES6 String.prototype.concat(...args)
-// ES6 #sec-string.prototype.concat
-TF_BUILTIN(StringPrototypeConcat, CodeStubAssembler) {
- // TODO(ishell): use constants from Descriptor once the JSFunction linkage
- // arguments are reordered.
- CodeStubArguments arguments(
- this,
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount)));
- TNode<Object> receiver = arguments.GetReceiver();
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- receiver = ToThisString(context, receiver, "String.prototype.concat");
-
- // Concatenate all the arguments passed to this builtin.
- VARIABLE(var_result, MachineRepresentation::kTagged);
- var_result.Bind(receiver);
- arguments.ForEach(
- CodeStubAssembler::VariableList({&var_result}, zone()),
- [this, context, &var_result](Node* arg) {
- arg = ToString_Inline(context, arg);
- var_result.Bind(CallStub(CodeFactory::StringAdd(isolate()), context,
- var_result.value(), arg));
- });
- arguments.PopAndReturn(var_result.value());
-}
-
void StringBuiltinsAssembler::StringIndexOf(
Node* const subject_string, Node* const search_string, Node* const position,
const std::function<void(Node*)>& f_return) {
@@ -844,8 +748,8 @@ void StringBuiltinsAssembler::StringIndexOf(
search_to_direct.instance_type(), &one_one,
&one_two, &two_one, &two_two);
- typedef const uint8_t onebyte_t;
- typedef const uc16 twobyte_t;
+ using onebyte_t = const uint8_t;
+ using twobyte_t = const uc16;
BIND(&one_one);
{
@@ -1591,57 +1495,6 @@ TF_BUILTIN(StringPrototypeSearch, StringMatchSearchAssembler) {
Generate(kSearch, "String.prototype.search", receiver, maybe_regexp, context);
}
-// ES6 section 21.1.3.18 String.prototype.slice ( start, end )
-TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
- Label out(this);
- TVARIABLE(IntPtrT, var_start);
- TVARIABLE(IntPtrT, var_end);
-
- const int kStart = 0;
- const int kEnd = 1;
- Node* argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
- Node* const receiver = args.GetReceiver();
- TNode<Object> start = args.GetOptionalArgumentValue(kStart);
- TNode<Object> end = args.GetOptionalArgumentValue(kEnd);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- // 1. Let O be ? RequireObjectCoercible(this value).
- RequireObjectCoercible(context, receiver, "String.prototype.slice");
-
- // 2. Let S be ? ToString(O).
- TNode<String> const subject_string =
- CAST(CallBuiltin(Builtins::kToString, context, receiver));
-
- // 3. Let len be the number of elements in S.
- TNode<IntPtrT> const length = LoadStringLengthAsWord(subject_string);
-
- // Convert {start} to a relative index.
- var_start = ConvertToRelativeIndex(context, start, length);
-
- // 5. If end is undefined, let intEnd be len;
- var_end = length;
- GotoIf(IsUndefined(end), &out);
-
- // Convert {end} to a relative index.
- var_end = ConvertToRelativeIndex(context, end, length);
- Goto(&out);
-
- Label return_emptystring(this);
- BIND(&out);
- {
- GotoIf(IntPtrLessThanOrEqual(var_end.value(), var_start.value()),
- &return_emptystring);
- TNode<String> const result =
- SubString(subject_string, var_start.value(), var_end.value());
- args.PopAndReturn(result);
- }
-
- BIND(&return_emptystring);
- args.PopAndReturn(EmptyStringConstant());
-}
-
TNode<JSArray> StringBuiltinsAssembler::StringToArray(
TNode<Context> context, TNode<String> subject_string,
TNode<Smi> subject_length, TNode<Number> limit_number) {
@@ -1918,56 +1771,6 @@ TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
}
}
-TNode<Smi> StringBuiltinsAssembler::ToSmiBetweenZeroAnd(
- SloppyTNode<Context> context, SloppyTNode<Object> value,
- SloppyTNode<Smi> limit) {
- Label out(this);
- TVARIABLE(Smi, var_result);
-
- TNode<Number> const value_int =
- ToInteger_Inline(context, value, CodeStubAssembler::kTruncateMinusZero);
-
- Label if_issmi(this), if_isnotsmi(this, Label::kDeferred);
- Branch(TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
-
- BIND(&if_issmi);
- {
- TNode<Smi> value_smi = CAST(value_int);
- Label if_isinbounds(this), if_isoutofbounds(this, Label::kDeferred);
- Branch(SmiAbove(value_smi, limit), &if_isoutofbounds, &if_isinbounds);
-
- BIND(&if_isinbounds);
- {
- var_result = CAST(value_int);
- Goto(&out);
- }
-
- BIND(&if_isoutofbounds);
- {
- TNode<Smi> const zero = SmiConstant(0);
- var_result =
- SelectConstant<Smi>(SmiLessThan(value_smi, zero), zero, limit);
- Goto(&out);
- }
- }
-
- BIND(&if_isnotsmi);
- {
- // {value} is a heap number - in this case, it is definitely out of bounds.
- TNode<HeapNumber> value_int_hn = CAST(value_int);
-
- TNode<Float64T> const float_zero = Float64Constant(0.);
- TNode<Smi> const smi_zero = SmiConstant(0);
- TNode<Float64T> const value_float = LoadHeapNumberValue(value_int_hn);
- var_result = SelectConstant<Smi>(Float64LessThan(value_float, float_zero),
- smi_zero, limit);
- Goto(&out);
- }
-
- BIND(&out);
- return var_result.value();
-}
-
TF_BUILTIN(StringSubstring, CodeStubAssembler) {
TNode<String> string = CAST(Parameter(Descriptor::kString));
TNode<IntPtrT> from = UncheckedCast<IntPtrT>(Parameter(Descriptor::kFrom));
@@ -1976,61 +1779,6 @@ TF_BUILTIN(StringSubstring, CodeStubAssembler) {
Return(SubString(string, from, to));
}
-// ES6 #sec-string.prototype.substring
-TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
- const int kStartArg = 0;
- const int kEndArg = 1;
-
- Node* const argc =
- ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
- CodeStubArguments args(this, argc);
-
- TNode<Object> receiver = args.GetReceiver();
- TNode<Object> start = args.GetOptionalArgumentValue(kStartArg);
- TNode<Object> end = args.GetOptionalArgumentValue(kEndArg);
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
-
- Label out(this);
-
- TVARIABLE(Smi, var_start);
- TVARIABLE(Smi, var_end);
-
- // Check that {receiver} is coercible to Object and convert it to a String.
- TNode<String> const string =
- ToThisString(context, receiver, "String.prototype.substring");
-
- TNode<Smi> const length = LoadStringLengthAsSmi(string);
-
- // Conversion and bounds-checks for {start}.
- var_start = ToSmiBetweenZeroAnd(context, start, length);
-
- // Conversion and bounds-checks for {end}.
- {
- var_end = length;
- GotoIf(IsUndefined(end), &out);
-
- var_end = ToSmiBetweenZeroAnd(context, end, length);
-
- Label if_endislessthanstart(this);
- Branch(SmiLessThan(var_end.value(), var_start.value()),
- &if_endislessthanstart, &out);
-
- BIND(&if_endislessthanstart);
- {
- TNode<Smi> const tmp = var_end.value();
- var_end = var_start.value();
- var_start = tmp;
- Goto(&out);
- }
- }
-
- BIND(&out);
- {
- args.PopAndReturn(SubString(string, SmiUntag(var_start.value()),
- SmiUntag(var_end.value())));
- }
-}
-
// ES6 #sec-string.prototype.trim
TF_BUILTIN(StringPrototypeTrim, StringTrimAssembler) {
TNode<IntPtrT> argc =
@@ -2201,50 +1949,6 @@ void StringTrimAssembler::GotoIfNotWhiteSpaceOrLineTerminator(
BIND(&out);
}
-// ES6 #sec-string.prototype.tostring
-TF_BUILTIN(StringPrototypeToString, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
-
- Node* result = ToThisValue(context, receiver, PrimitiveType::kString,
- "String.prototype.toString");
- Return(result);
-}
-
-// ES6 #sec-string.prototype.valueof
-TF_BUILTIN(StringPrototypeValueOf, CodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
-
- Node* result = ToThisValue(context, receiver, PrimitiveType::kString,
- "String.prototype.valueOf");
- Return(result);
-}
-
-TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<Object> receiver = CAST(Parameter(Descriptor::kReceiver));
-
- Node* string =
- ToThisString(context, receiver, "String.prototype[Symbol.iterator]");
-
- Node* native_context = LoadNativeContext(context);
- Node* map = LoadContextElement(native_context,
- Context::INITIAL_STRING_ITERATOR_MAP_INDEX);
- Node* iterator = Allocate(JSStringIterator::kSize);
- StoreMapNoWriteBarrier(iterator, map);
- StoreObjectFieldRoot(iterator, JSValue::kPropertiesOrHashOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
- RootIndex::kEmptyFixedArray);
- StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kStringOffset,
- string);
- Node* index = SmiConstant(0);
- StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
- index);
- Return(iterator);
-}
-
// Return the |word32| codepoint at {index}. Supports SeqStrings and
// ExternalStrings.
TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
@@ -2310,59 +2014,6 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
return var_result.value();
}
-// ES6 #sec-%stringiteratorprototype%.next
-TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
- VARIABLE(var_value, MachineRepresentation::kTagged);
- VARIABLE(var_done, MachineRepresentation::kTagged);
-
- var_value.Bind(UndefinedConstant());
- var_done.Bind(TrueConstant());
-
- Label throw_bad_receiver(this), next_codepoint(this), return_result(this);
-
- Node* context = Parameter(Descriptor::kContext);
- Node* iterator = Parameter(Descriptor::kReceiver);
-
- GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
- GotoIfNot(
- InstanceTypeEqual(LoadInstanceType(iterator), JS_STRING_ITERATOR_TYPE),
- &throw_bad_receiver);
-
- Node* string = LoadObjectField(iterator, JSStringIterator::kStringOffset);
- TNode<IntPtrT> position = SmiUntag(
- CAST(LoadObjectField(iterator, JSStringIterator::kNextIndexOffset)));
- TNode<IntPtrT> length = LoadStringLengthAsWord(string);
-
- Branch(IntPtrLessThan(position, length), &next_codepoint, &return_result);
-
- BIND(&next_codepoint);
- {
- UnicodeEncoding encoding = UnicodeEncoding::UTF16;
- TNode<Int32T> ch = LoadSurrogatePairAt(string, length, position, encoding);
- TNode<String> value = StringFromSingleCodePoint(ch, encoding);
- var_value.Bind(value);
- TNode<IntPtrT> length = LoadStringLengthAsWord(value);
- StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
- SmiTag(Signed(IntPtrAdd(position, length))));
- var_done.Bind(FalseConstant());
- Goto(&return_result);
- }
-
- BIND(&return_result);
- {
- Node* result =
- AllocateJSIteratorResult(context, var_value.value(), var_done.value());
- Return(result);
- }
-
- BIND(&throw_bad_receiver);
- {
- // The {receiver} is not a valid JSGeneratorObject.
- ThrowTypeError(context, MessageTemplate::kIncompatibleMethodReceiver,
- StringConstant("String Iterator.prototype.next"), iterator);
- }
-}
-
void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration(
TNode<Object> object, TNode<Context> context, Label* if_true,
Label* if_false) {
@@ -2372,71 +2023,11 @@ void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration(
// Check that the String iterator hasn't been modified in a way that would
// affect iteration.
Node* protector_cell = LoadRoot(RootIndex::kStringIteratorProtector);
- DCHECK(isolate()->heap()->string_iterator_protector()->IsPropertyCell());
+ DCHECK(isolate()->heap()->string_iterator_protector().IsPropertyCell());
Branch(WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
SmiConstant(Isolate::kProtectorValid)),
if_true, if_false);
}
-// This function assumes StringPrimitiveWithNoCustomIteration is true.
-TNode<JSArray> StringBuiltinsAssembler::StringToList(TNode<Context> context,
- TNode<String> string) {
- const ElementsKind kind = PACKED_ELEMENTS;
- const TNode<IntPtrT> length = LoadStringLengthAsWord(string);
-
- TNode<Map> array_map =
- LoadJSArrayElementsMap(kind, LoadNativeContext(context));
- TNode<JSArray> array =
- AllocateJSArray(kind, array_map, length, SmiTag(length), nullptr,
- INTPTR_PARAMETERS, kAllowLargeObjectAllocation);
- TNode<FixedArrayBase> elements = LoadElements(array);
-
- const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
- TNode<IntPtrT> first_to_element_offset =
- ElementOffsetFromIndex(IntPtrConstant(0), kind, INTPTR_PARAMETERS, 0);
- TNode<IntPtrT> first_offset =
- IntPtrAdd(first_to_element_offset, IntPtrConstant(first_element_offset));
- TVARIABLE(IntPtrT, var_offset, first_offset);
- TVARIABLE(IntPtrT, var_position, IntPtrConstant(0));
- Label done(this), next_codepoint(this, {&var_position, &var_offset});
-
- Goto(&next_codepoint);
-
- BIND(&next_codepoint);
- {
- // Loop condition.
- GotoIfNot(IntPtrLessThan(var_position.value(), length), &done);
- const UnicodeEncoding encoding = UnicodeEncoding::UTF16;
- TNode<Int32T> ch =
- LoadSurrogatePairAt(string, length, var_position.value(), encoding);
- TNode<String> value = StringFromSingleCodePoint(ch, encoding);
-
- Store(elements, var_offset.value(), value);
-
- // Increment the position.
- TNode<IntPtrT> ch_length = LoadStringLengthAsWord(value);
- var_position = IntPtrAdd(var_position.value(), ch_length);
- // Increment the array offset and continue the loop.
- var_offset = IntPtrAdd(var_offset.value(), IntPtrConstant(kTaggedSize));
- Goto(&next_codepoint);
- }
-
- BIND(&done);
- TNode<IntPtrT> new_length = IntPtrDiv(
- IntPtrSub(var_offset.value(), first_offset), IntPtrConstant(kTaggedSize));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(new_length, IntPtrConstant(0)));
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(length, new_length));
- StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset,
- SmiTag(new_length));
-
- return UncheckedCast<JSArray>(array);
-}
-
-TF_BUILTIN(StringToList, StringBuiltinsAssembler) {
- TNode<Context> context = CAST(Parameter(Descriptor::kContext));
- TNode<String> string = CAST(Parameter(Descriptor::kSource));
- Return(StringToList(context, string));
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
index 9485d40011..92ebd3803b 100644
--- a/deps/v8/src/builtins/builtins-string-gen.h
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_BUILTINS_STRING_GEN_H_
#define V8_BUILTINS_BUILTINS_STRING_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -28,9 +28,12 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
Label* if_true,
Label* if_false);
- protected:
- TNode<JSArray> StringToList(TNode<Context> context, TNode<String> string);
+ TNode<Int32T> LoadSurrogatePairAt(SloppyTNode<String> string,
+ SloppyTNode<IntPtrT> length,
+ SloppyTNode<IntPtrT> index,
+ UnicodeEncoding encoding);
+ protected:
void StringEqual_Loop(Node* lhs, Node* lhs_instance_type,
MachineType lhs_type, Node* rhs,
Node* rhs_instance_type, MachineType rhs_type,
@@ -59,23 +62,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
void GenerateStringRelationalComparison(Node* context, Node* left,
Node* right, Operation op);
- TNode<Smi> ToSmiBetweenZeroAnd(SloppyTNode<Context> context,
- SloppyTNode<Object> value,
- SloppyTNode<Smi> limit);
-
- typedef std::function<TNode<Object>(
- TNode<String> receiver, TNode<IntPtrT> length, TNode<IntPtrT> index)>
- StringAtAccessor;
-
- void GenerateStringAt(const char* method_name, TNode<Context> context,
- TNode<Object> receiver, TNode<Object> maybe_position,
- TNode<Object> default_return,
- const StringAtAccessor& accessor);
-
- TNode<Int32T> LoadSurrogatePairAt(SloppyTNode<String> string,
- SloppyTNode<IntPtrT> length,
- SloppyTNode<IntPtrT> index,
- UnicodeEncoding encoding);
+ using StringAtAccessor = std::function<TNode<Object>(
+ TNode<String> receiver, TNode<IntPtrT> length, TNode<IntPtrT> index)>;
void StringIndexOf(Node* const subject_string, Node* const search_string,
Node* const position,
@@ -107,8 +95,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
//
// Contains fast paths for Smi and RegExp objects.
// Important: {regexp_call} may not contain any code that can call into JS.
- typedef std::function<void()> NodeFunction0;
- typedef std::function<void(Node* fn)> NodeFunction1;
+ using NodeFunction0 = std::function<void()>;
+ using NodeFunction1 = std::function<void(Node* fn)>;
void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
Node* const maybe_string,
Handle<Symbol> symbol,
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 43edd628d7..74b15cf99b 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -4,18 +4,18 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/conversions.h"
-#include "src/counters.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif
#include "src/regexp/regexp-utils.h"
-#include "src/string-builder-inl.h"
-#include "src/string-case.h"
-#include "src/unicode-inl.h"
-#include "src/unicode.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-case.h"
+#include "src/strings/unicode-inl.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -261,23 +261,23 @@ V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper(
unibrow::uchar chars[Converter::kMaxWidth];
// We can assume that the string is not empty
uc32 current = stream.GetNext();
- bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
+ bool ignore_overflow = Converter::kIsToLower || result.IsSeqTwoByteString();
for (int i = 0; i < result_length;) {
bool has_next = stream.HasMore();
uc32 next = has_next ? stream.GetNext() : 0;
int char_length = mapping->get(current, next, chars);
if (char_length == 0) {
// The case conversion of this character is the character itself.
- result->Set(i, current);
+ result.Set(i, current);
i++;
} else if (char_length == 1 &&
(ignore_overflow || !ToUpperOverflows(current))) {
// Common case: converting the letter resulted in one character.
DCHECK(static_cast<uc32>(chars[0]) != current);
- result->Set(i, chars[0]);
+ result.Set(i, chars[0]);
has_changed_character = true;
i++;
- } else if (result_length == string->length()) {
+ } else if (result_length == string.length()) {
bool overflows = ToUpperOverflows(current);
// We've assumed that the result would be as long as the
// input but here is a character that converts to several
@@ -318,7 +318,7 @@ V8_WARN_UNUSED_RESULT static Object ConvertCaseHelper(
: Smi::FromInt(current_length);
} else {
for (int j = 0; j < char_length; j++) {
- result->Set(i, chars[j]);
+ result.Set(i, chars[j]);
i++;
}
has_changed_character = true;
@@ -361,7 +361,7 @@ V8_WARN_UNUSED_RESULT static Object ConvertCase(
bool has_changed_character = false;
int index_to_first_unprocessed = FastAsciiConvert<Converter::kIsToLower>(
reinterpret_cast<char*>(result->GetChars(no_gc)),
- reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
+ reinterpret_cast<const char*>(flat_content.ToOneByteVector().begin()),
length, &has_changed_character);
// If not ASCII, we discard the result and take the 2 byte path.
if (index_to_first_unprocessed == length)
@@ -376,9 +376,9 @@ V8_WARN_UNUSED_RESULT static Object ConvertCase(
}
Object answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
- if (answer->IsException(isolate) || answer->IsString()) return answer;
+ if (answer.IsException(isolate) || answer.IsString()) return answer;
- DCHECK(answer->IsSmi());
+ DCHECK(answer.IsSmi());
length = Smi::ToInt(answer);
if (s->IsOneByteRepresentation() && length > 0) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
diff --git a/deps/v8/src/builtins/builtins-symbol-gen.cc b/deps/v8/src/builtins/builtins-symbol-gen.cc
index 0a9a1f26c7..4e8c9f9850 100644
--- a/deps/v8/src/builtins/builtins-symbol-gen.cc
+++ b/deps/v8/src/builtins/builtins-symbol-gen.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 66fa69afff..ea6e594706 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -4,9 +4,9 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
#include "src/heap/heap-inl.h" // For public_symbol_table().
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -56,11 +56,11 @@ BUILTIN(SymbolKeyFor) {
Object result;
if (symbol->is_public()) {
result = symbol->name();
- DCHECK(result->IsString());
+ DCHECK(result.IsString());
} else {
result = ReadOnlyRoots(isolate).undefined_value();
}
- DCHECK_EQ(isolate->heap()->public_symbol_table()->SlowReverseLookup(*symbol),
+ DCHECK_EQ(isolate->heap()->public_symbol_table().SlowReverseLookup(*symbol),
result);
return result;
}
diff --git a/deps/v8/src/builtins/builtins-trace.cc b/deps/v8/src/builtins/builtins-trace.cc
index 0cda07a27a..b067bb0249 100644
--- a/deps/v8/src/builtins/builtins-trace.cc
+++ b/deps/v8/src/builtins/builtins-trace.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/json-stringifier.h"
-#include "src/objects-inl.h"
+#include "src/json/json-stringifier.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index 1206785526..8484685a6a 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -8,9 +8,8 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/builtins/growable-fixed-array-gen.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory-inl.h"
-#include "torque-generated/builtins-typed-array-createtypedarray-from-dsl-gen.h"
namespace v8 {
namespace internal {
@@ -28,34 +27,17 @@ using TNode = compiler::TNode<T>;
// -----------------------------------------------------------------------------
// ES6 section 22.2 TypedArray Objects
-TNode<Map> TypedArrayBuiltinsAssembler::LoadMapForType(
- TNode<JSTypedArray> array) {
- TVARIABLE(Map, var_typed_map);
- TNode<Map> array_map = LoadMap(array);
- TNode<Int32T> elements_kind = LoadMapElementsKind(array_map);
- ReadOnlyRoots roots(isolate());
-
- DispatchTypedArrayByElementsKind(
- elements_kind,
- [&](ElementsKind kind, int size, int typed_array_fun_index) {
- Handle<Map> map(roots.MapForFixedTypedArray(kind), isolate());
- var_typed_map = HeapConstant(map);
- });
-
- return var_typed_map.value();
-}
-
// Setup the TypedArray which is under construction.
// - Set the length.
// - Set the byte_offset.
// - Set the byte_length.
// - Set EmbedderFields to 0.
void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
- TNode<Smi> length,
+ TNode<UintPtrT> length,
TNode<UintPtrT> byte_offset,
TNode<UintPtrT> byte_length) {
- CSA_ASSERT(this, TaggedIsPositiveSmi(length));
- StoreObjectField(holder, JSTypedArray::kLengthOffset, length);
+ StoreObjectFieldNoWriteBarrier(holder, JSTypedArray::kLengthOffset, length,
+ MachineType::PointerRepresentation());
StoreObjectFieldNoWriteBarrier(holder, JSArrayBufferView::kByteOffsetOffset,
byte_offset,
MachineType::PointerRepresentation());
@@ -70,6 +52,7 @@ void TypedArrayBuiltinsAssembler::SetupTypedArray(TNode<JSTypedArray> holder,
// Allocate a new ArrayBuffer and initialize it with empty properties and
// elements.
+// TODO(bmeurer,v8:4153): Rename this and maybe fix up the implementation a bit.
TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
TNode<Context> context, TNode<JSTypedArray> holder,
TNode<UintPtrT> byte_length) {
@@ -115,43 +98,16 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0));
}
- StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
- return buffer;
-}
-
-TNode<FixedTypedArrayBase> TypedArrayBuiltinsAssembler::AllocateOnHeapElements(
- TNode<Map> map, TNode<IntPtrT> total_size, TNode<Number> length) {
- CSA_ASSERT(this, IntPtrGreaterThanOrEqual(total_size, IntPtrConstant(0)));
-
- // Allocate a FixedTypedArray and set the length, base pointer and external
- // pointer.
- CSA_ASSERT(this, IsRegularHeapObjectSize(total_size));
-
- TNode<Object> elements;
+ StoreObjectField(holder, JSTypedArray::kBufferOffset, buffer);
- if (UnalignedLoadSupported(MachineRepresentation::kFloat64) &&
- UnalignedStoreSupported(MachineRepresentation::kFloat64)) {
- elements = AllocateInNewSpace(total_size);
- } else {
- elements = AllocateInNewSpace(total_size, kDoubleAlignment);
- }
-
- StoreMapNoWriteBarrier(elements, map);
- StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
- StoreObjectFieldNoWriteBarrier(
- elements, FixedTypedArrayBase::kBasePointerOffset, elements);
+ TNode<ByteArray> elements = AllocateByteArray(byte_length);
+ StoreObjectField(holder, JSTypedArray::kElementsOffset, elements);
+ StoreObjectField(holder, JSTypedArray::kBasePointerOffset, elements);
StoreObjectFieldNoWriteBarrier(
- elements, FixedTypedArrayBase::kExternalPointerOffset,
- IntPtrConstant(FixedTypedArrayBase::ExternalPointerValueForOnHeapArray()),
+ holder, JSTypedArray::kExternalPointerOffset,
+ PointerConstant(JSTypedArray::ExternalPointerForOnHeapArray()),
MachineType::PointerRepresentation());
- return CAST(elements);
-}
-
-TNode<RawPtrT> TypedArrayBuiltinsAssembler::LoadDataPtr(
- TNode<JSTypedArray> typed_array) {
- TNode<FixedArrayBase> elements = LoadElements(typed_array);
- CSA_ASSERT(this, IsFixedTypedArray(elements));
- return LoadFixedTypedArrayBackingStore(CAST(elements));
+ return buffer;
}
TF_BUILTIN(TypedArrayBaseConstructor, TypedArrayBuiltinsAssembler) {
@@ -238,10 +194,10 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
// Default to zero if the {receiver}s buffer was detached.
TNode<JSArrayBuffer> receiver_buffer =
LoadJSArrayBufferViewBuffer(CAST(receiver));
- TNode<Smi> length = Select<Smi>(
- IsDetachedBuffer(receiver_buffer), [=] { return SmiConstant(0); },
+ TNode<UintPtrT> length = Select<UintPtrT>(
+ IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
[=] { return LoadJSTypedArrayLength(CAST(receiver)); });
- Return(length);
+ Return(ChangeUintPtrToTagged(length));
}
TNode<Word32T> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
@@ -269,7 +225,7 @@ TNode<IntPtrT> TypedArrayBuiltinsAssembler::GetTypedArrayElementSize(
return element_size.value();
}
-TypedArrayBuiltinsFromDSLAssembler::TypedArrayElementsInfo
+TorqueStructTypedArrayElementsInfo
TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
TNode<JSTypedArray> typed_array) {
TNode<Int32T> elements_kind = LoadElementsKind(typed_array);
@@ -282,13 +238,10 @@ TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(
[&](ElementsKind kind, int size, int typed_array_fun_index) {
DCHECK_GT(size, 0);
var_size_log2 = UintPtrConstant(ElementsKindToShiftSize(kind));
-
- Handle<Map> map(roots.MapForFixedTypedArray(kind), isolate());
- var_map = HeapConstant(map);
});
- return TypedArrayBuiltinsFromDSLAssembler::TypedArrayElementsInfo{
- var_size_log2.value(), var_map.value(), elements_kind};
+ return TorqueStructTypedArrayElementsInfo{var_size_log2.value(),
+ elements_kind};
}
TNode<JSFunction> TypedArrayBuiltinsAssembler::GetDefaultConstructor(
@@ -328,8 +281,9 @@ void TypedArrayBuiltinsAssembler::ThrowIfLengthLessThan(
TNode<Smi> min_length) {
// If typed_array.[[ArrayLength]] < min_length, throw a TypeError exception.
Label if_length_is_not_short(this);
- TNode<Smi> new_length = LoadJSTypedArrayLength(typed_array);
- GotoIfNot(SmiLessThan(new_length, min_length), &if_length_is_not_short);
+ TNode<UintPtrT> new_length = LoadJSTypedArrayLength(typed_array);
+ GotoIfNot(UintPtrLessThan(new_length, SmiUntag(min_length)),
+ &if_length_is_not_short);
ThrowTypeError(context, MessageTemplate::kTypedArrayTooShort);
BIND(&if_length_is_not_short);
@@ -383,8 +337,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
// Check for possible range errors.
- TNode<IntPtrT> source_length = SmiUntag(LoadJSTypedArrayLength(source));
- TNode<IntPtrT> target_length = SmiUntag(LoadJSTypedArrayLength(target));
+ TNode<IntPtrT> source_length = Signed(LoadJSTypedArrayLength(source));
+ TNode<IntPtrT> target_length = Signed(LoadJSTypedArrayLength(target));
TNode<IntPtrT> required_target_length = IntPtrAdd(source_length, offset);
GotoIf(IntPtrGreaterThan(required_target_length, target_length),
@@ -392,8 +346,8 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
// Grab pointers and byte lengths we need later on.
- TNode<RawPtrT> target_data_ptr = LoadDataPtr(target);
- TNode<RawPtrT> source_data_ptr = LoadDataPtr(source);
+ TNode<RawPtrT> target_data_ptr = LoadJSTypedArrayBackingStore(target);
+ TNode<RawPtrT> source_data_ptr = LoadJSTypedArrayBackingStore(source);
TNode<Word32T> source_el_kind = LoadElementsKind(source);
TNode<Word32T> target_el_kind = LoadElementsKind(target);
@@ -434,7 +388,7 @@ void TypedArrayBuiltinsAssembler::SetTypedArraySource(
IsBigInt64ElementsKind(target_el_kind)),
&exception);
- TNode<IntPtrT> source_length = SmiUntag(LoadJSTypedArrayLength(source));
+ TNode<IntPtrT> source_length = Signed(LoadJSTypedArrayLength(source));
CallCCopyTypedArrayElementsToTypedArray(source, target, source_length,
offset);
Goto(&out);
@@ -455,7 +409,7 @@ void TypedArrayBuiltinsAssembler::SetJSArraySource(
IntPtrLessThanOrEqual(offset, IntPtrConstant(Smi::kMaxValue)));
TNode<IntPtrT> source_length = SmiUntag(LoadFastJSArrayLength(source));
- TNode<IntPtrT> target_length = SmiUntag(LoadJSTypedArrayLength(target));
+ TNode<IntPtrT> target_length = Signed(LoadJSTypedArrayLength(target));
// Maybe out of bounds?
GotoIf(IntPtrGreaterThan(IntPtrAdd(source_length, offset), target_length),
@@ -811,34 +765,25 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
DispatchTypedArrayByElementsKind(
elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) {
- TNode<FixedTypedArrayBase> elements =
- CAST(LoadElements(new_typed_array));
BuildFastLoop(
IntPtrConstant(0), length,
[&](Node* index) {
TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS);
- TNode<IntPtrT> intptr_index = UncheckedCast<IntPtrT>(index);
- if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
- EmitBigTypedArrayElementStore(new_typed_array, elements,
- intptr_index, item, context,
- &if_detached);
- } else {
- Node* value =
- PrepareValueForWriteToTypedArray(item, kind, context);
-
- // ToNumber may execute JavaScript code, which could detach
- // the array's buffer.
- Node* buffer = LoadObjectField(new_typed_array,
- JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), &if_detached);
-
- // GC may move backing store in ToNumber, thus load backing
- // store everytime in this loop.
- TNode<RawPtrT> backing_store =
- LoadFixedTypedArrayBackingStore(elements);
- StoreElement(backing_store, kind, index, value,
- INTPTR_PARAMETERS);
- }
+ Node* value =
+ PrepareValueForWriteToTypedArray(item, kind, context);
+
+ // ToNumber/ToBigInt may execute JavaScript code, which could
+ // detach the array's buffer.
+ Node* buffer =
+ LoadObjectField(new_typed_array, JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_detached);
+
+ // GC may move backing store in ToNumber, thus load backing
+ // store everytime in this loop.
+ TNode<RawPtrT> backing_store =
+ LoadJSTypedArrayBackingStore(new_typed_array);
+ StoreElement(backing_store, kind, index, value,
+ INTPTR_PARAMETERS);
},
1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
});
@@ -956,7 +901,8 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
// Source is a TypedArray with unmodified iterator behavior. Use the
// source object directly, taking advantage of the special-case code in
// TypedArrayCopyElements
- final_length = LoadJSTypedArrayLength(CAST(source));
+ // TODO(v8:4153): This needs to be handle to huge TypedArrays.
+ final_length = SmiTag(Signed(LoadJSTypedArrayLength(CAST(source))));
final_source = source;
Goto(&create_typed_array);
}
@@ -1033,7 +979,6 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
TNode<Word32T> elements_kind = LoadElementsKind(target_obj.value());
// 7e/13 : Copy the elements
- TNode<FixedTypedArrayBase> elements = CAST(LoadElements(target_obj.value()));
BuildFastLoop(
SmiConstant(0), final_length.value(),
[&](Node* index) {
@@ -1044,31 +989,24 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
CAST(CallJS(CodeFactory::Call(isolate()), context, map_fn, this_arg,
k_value, index));
- TNode<IntPtrT> intptr_index = SmiUntag(index);
DispatchTypedArrayByElementsKind(
elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) {
- if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
- EmitBigTypedArrayElementStore(target_obj.value(), elements,
- intptr_index, mapped_value,
- context, &if_detached);
- } else {
- Node* const final_value = PrepareValueForWriteToTypedArray(
- mapped_value, kind, context);
-
- // ToNumber may execute JavaScript code, which could detach
- // the array's buffer.
- Node* buffer = LoadObjectField(target_obj.value(),
- JSTypedArray::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), &if_detached);
-
- // GC may move backing store in map_fn, thus load backing
- // store in each iteration of this loop.
- TNode<RawPtrT> backing_store =
- LoadFixedTypedArrayBackingStore(elements);
- StoreElement(backing_store, kind, index, final_value,
- SMI_PARAMETERS);
- }
+ Node* const final_value =
+ PrepareValueForWriteToTypedArray(mapped_value, kind, context);
+
+ // ToNumber/ToBigInt may execute JavaScript code, which could
+ // detach the array's buffer.
+ Node* buffer = LoadObjectField(target_obj.value(),
+ JSTypedArray::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), &if_detached);
+
+ // GC may move backing store in map_fn, thus load backing
+ // store in each iteration of this loop.
+ TNode<RawPtrT> backing_store =
+ LoadJSTypedArrayBackingStore(target_obj.value());
+ StoreElement(backing_store, kind, index, final_value,
+ SMI_PARAMETERS);
});
},
1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index ab0ee6016d..6fb02a657c 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -5,16 +5,14 @@
#ifndef V8_BUILTINS_BUILTINS_TYPED_ARRAY_GEN_H_
#define V8_BUILTINS_BUILTINS_TYPED_ARRAY_GEN_H_
-#include "src/code-stub-assembler.h"
-#include "torque-generated/builtins-typed-array-from-dsl-gen.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
public:
- using ElementsInfo =
- TypedArrayBuiltinsFromDSLAssembler::TypedArrayElementsInfo;
+ using ElementsInfo = TorqueStructTypedArrayElementsInfo;
explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
@@ -29,7 +27,7 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
const char* method_name,
IterationKind iteration_kind);
- void SetupTypedArray(TNode<JSTypedArray> holder, TNode<Smi> length,
+ void SetupTypedArray(TNode<JSTypedArray> holder, TNode<UintPtrT> length,
TNode<UintPtrT> byte_offset,
TNode<UintPtrT> byte_length);
void AttachBuffer(TNode<JSTypedArray> holder, TNode<JSArrayBuffer> buffer,
@@ -40,15 +38,10 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<JSTypedArray> holder,
TNode<UintPtrT> byte_length);
- TNode<FixedTypedArrayBase> AllocateOnHeapElements(TNode<Map> map,
- TNode<IntPtrT> byte_length,
- TNode<Number> length);
-
TNode<Map> LoadMapForType(TNode<JSTypedArray> array);
TNode<BoolT> IsMockArrayBufferAllocatorFlag();
TNode<UintPtrT> CalculateExternalPointer(TNode<UintPtrT> backing_store,
TNode<UintPtrT> byte_offset);
- TNode<RawPtrT> LoadDataPtr(TNode<JSTypedArray> typed_array);
// Returns true if kind is either UINT8_ELEMENTS or UINT8_CLAMPED_ELEMENTS.
TNode<Word32T> IsUint8ElementsKind(TNode<Word32T> kind);
@@ -114,7 +107,7 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
TNode<IntPtrT> start,
TNode<IntPtrT> end);
- typedef std::function<void(ElementsKind, int, int)> TypedArraySwitchCase;
+ using TypedArraySwitchCase = std::function<void(ElementsKind, int, int)>;
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc
index ac1b23c8d3..326841bb4a 100644
--- a/deps/v8/src/builtins/builtins-typed-array.cc
+++ b/deps/v8/src/builtins/builtins-typed-array.cc
@@ -4,11 +4,11 @@
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
-#include "src/counters.h"
-#include "src/elements.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/elements.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -33,7 +33,7 @@ int64_t CapRelativeIndex(Handle<Object> num, int64_t minimum, int64_t maximum) {
: std::min<int64_t>(relative, maximum);
} else {
DCHECK(num->IsHeapNumber());
- double relative = HeapNumber::cast(*num)->value();
+ double relative = HeapNumber::cast(*num).value();
DCHECK(!std::isnan(relative));
return static_cast<int64_t>(
relative < 0 ? std::max<double>(relative + maximum, minimum)
@@ -51,7 +51,7 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
- int64_t len = array->length_value();
+ int64_t len = array->length();
int64_t to = 0;
int64_t from = 0;
int64_t final = len;
@@ -93,14 +93,12 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
DCHECK_LT(to, len);
DCHECK_GE(len - count, 0);
- Handle<FixedTypedArrayBase> elements(
- FixedTypedArrayBase::cast(array->elements()), isolate);
size_t element_size = array->element_size();
to = to * element_size;
from = from * element_size;
count = count * element_size;
- uint8_t* data = static_cast<uint8_t*>(elements->DataPtr());
+ uint8_t* data = static_cast<uint8_t*>(array->DataPtr());
std::memmove(data + to, data + from, count);
return *array;
@@ -124,7 +122,7 @@ BUILTIN(TypedArrayPrototypeFill) {
Object::ToNumber(isolate, obj_value));
}
- int64_t len = array->length_value();
+ int64_t len = array->length();
int64_t start = 0;
int64_t end = len;
@@ -171,7 +169,7 @@ BUILTIN(TypedArrayPrototypeIncludes) {
if (args.length() < 2) return ReadOnlyRoots(isolate).false_value();
- int64_t len = array->length_value();
+ int64_t len = array->length();
if (len == 0) return ReadOnlyRoots(isolate).false_value();
int64_t index = 0;
@@ -203,7 +201,7 @@ BUILTIN(TypedArrayPrototypeIndexOf) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
- int64_t len = array->length_value();
+ int64_t len = array->length();
if (len == 0) return Smi::FromInt(-1);
int64_t index = 0;
@@ -234,7 +232,7 @@ BUILTIN(TypedArrayPrototypeLastIndexOf) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
- int64_t len = array->length_value();
+ int64_t len = array->length();
if (len == 0) return Smi::FromInt(-1);
int64_t index = len - 1;
diff --git a/deps/v8/src/builtins/builtins-utils-gen.h b/deps/v8/src/builtins/builtins-utils-gen.h
index a9b040b040..f9e2ba74fa 100644
--- a/deps/v8/src/builtins/builtins-utils-gen.h
+++ b/deps/v8/src/builtins/builtins-utils-gen.h
@@ -29,7 +29,7 @@ class CodeAssemblerState;
#define TF_BUILTIN(Name, AssemblerBase) \
class Name##Assembler : public AssemblerBase { \
public: \
- typedef Builtin_##Name##_InterfaceDescriptor Descriptor; \
+ using Descriptor = Builtin_##Name##_InterfaceDescriptor; \
\
explicit Name##Assembler(compiler::CodeAssemblerState* state) \
: AssemblerBase(state) {} \
diff --git a/deps/v8/src/builtins/builtins-utils-inl.h b/deps/v8/src/builtins/builtins-utils-inl.h
index 6696324dbd..c9d15f09dd 100644
--- a/deps/v8/src/builtins/builtins-utils-inl.h
+++ b/deps/v8/src/builtins/builtins-utils-inl.h
@@ -7,7 +7,7 @@
#include "src/builtins/builtins-utils.h"
-#include "src/arguments-inl.h"
+#include "src/execution/arguments-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index ebae29ea42..822f9df6ec 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -5,11 +5,11 @@
#ifndef V8_BUILTINS_BUILTINS_UTILS_H_
#define V8_BUILTINS_BUILTINS_UTILS_H_
-#include "src/arguments.h"
#include "src/base/logging.h"
#include "src/builtins/builtins.h"
+#include "src/execution/arguments.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
namespace v8 {
namespace internal {
@@ -66,31 +66,31 @@ class BuiltinArguments : public Arguments {
// through the BuiltinArguments object args.
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
-#define BUILTIN(name) \
- V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
- BuiltinArguments args, Isolate* isolate); \
- \
- V8_NOINLINE static Address Builtin_Impl_Stats_##name( \
- int args_length, Address* args_object, Isolate* isolate) { \
- BuiltinArguments args(args_length, args_object); \
- RuntimeCallTimerScope timer(isolate, \
- RuntimeCallCounterId::kBuiltin_##name); \
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
- "V8.Builtin_" #name); \
- return Builtin_Impl_##name(args, isolate)->ptr(); \
- } \
- \
- V8_WARN_UNUSED_RESULT Address Builtin_##name( \
- int args_length, Address* args_object, Isolate* isolate) { \
- DCHECK(isolate->context().is_null() || isolate->context()->IsContext()); \
- if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) { \
- return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \
- } \
- BuiltinArguments args(args_length, args_object); \
- return Builtin_Impl_##name(args, isolate)->ptr(); \
- } \
- \
- V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
+#define BUILTIN(name) \
+ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
+ BuiltinArguments args, Isolate* isolate); \
+ \
+ V8_NOINLINE static Address Builtin_Impl_Stats_##name( \
+ int args_length, Address* args_object, Isolate* isolate) { \
+ BuiltinArguments args(args_length, args_object); \
+ RuntimeCallTimerScope timer(isolate, \
+ RuntimeCallCounterId::kBuiltin_##name); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
+ "V8.Builtin_" #name); \
+ return Builtin_Impl_##name(args, isolate).ptr(); \
+ } \
+ \
+ V8_WARN_UNUSED_RESULT Address Builtin_##name( \
+ int args_length, Address* args_object, Isolate* isolate) { \
+ DCHECK(isolate->context().is_null() || isolate->context().IsContext()); \
+ if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) { \
+ return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \
+ } \
+ BuiltinArguments args(args_length, args_object); \
+ return Builtin_Impl_##name(args, isolate).ptr(); \
+ } \
+ \
+ V8_WARN_UNUSED_RESULT static Object Builtin_Impl_##name( \
BuiltinArguments args, Isolate* isolate)
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc
index 373e9dbfef..0b86d35853 100644
--- a/deps/v8/src/builtins/builtins-wasm-gen.cc
+++ b/deps/v8/src/builtins/builtins-wasm-gen.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "src/builtins/builtins-utils-gen.h"
-#include "src/code-stub-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
@@ -105,6 +105,14 @@ TF_BUILTIN(WasmThrow, WasmBuiltinsAssembler) {
TailCallRuntimeWithCEntry(Runtime::kThrow, centry, context, exception);
}
+TF_BUILTIN(WasmRethrow, WasmBuiltinsAssembler) {
+ TNode<Object> exception = UncheckedParameter(Descriptor::kException);
+ TNode<Object> instance = LoadInstanceFromFrame();
+ TNode<Code> centry = LoadCEntryFromInstance(instance);
+ TNode<Object> context = LoadContextFromInstance(instance);
+ TailCallRuntimeWithCEntry(Runtime::kReThrow, centry, context, exception);
+}
+
TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) {
TNode<Uint32T> address =
UncheckedCast<Uint32T>(Parameter(Descriptor::kAddress));
diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc
index 1d8a6f39f6..78f37c0cf5 100644
--- a/deps/v8/src/builtins/builtins-weak-refs.cc
+++ b/deps/v8/src/builtins/builtins-weak-refs.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/builtins/builtins-utils-inl.h"
-#include "src/counters.h"
+#include "src/logging/counters.h"
#include "src/objects/js-weak-refs-inl.h"
namespace v8 {
@@ -15,7 +15,7 @@ BUILTIN(FinalizationGroupConstructor) {
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- handle(target->shared()->Name(), isolate)));
+ handle(target->shared().Name(), isolate)));
}
// [[Construct]]
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
@@ -38,9 +38,9 @@ BUILTIN(FinalizationGroupConstructor) {
finalization_group->set_flags(
JSFinalizationGroup::ScheduledForCleanupField::encode(false));
- DCHECK(finalization_group->active_cells()->IsUndefined(isolate));
- DCHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
- DCHECK(finalization_group->key_map()->IsUndefined(isolate));
+ DCHECK(finalization_group->active_cells().IsUndefined(isolate));
+ DCHECK(finalization_group->cleared_cells().IsUndefined(isolate));
+ DCHECK(finalization_group->key_map().IsUndefined(isolate));
return *finalization_group;
}
@@ -125,7 +125,7 @@ BUILTIN(WeakRefConstructor) {
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
- handle(target->shared()->Name(), isolate)));
+ handle(target->shared().Name(), isolate)));
}
// [[Construct]]
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
@@ -155,14 +155,14 @@ BUILTIN(WeakRefConstructor) {
BUILTIN(WeakRefDeref) {
HandleScope scope(isolate);
CHECK_RECEIVER(JSWeakRef, weak_ref, "WeakRef.prototype.deref");
- if (weak_ref->target()->IsJSReceiver()) {
+ if (weak_ref->target().IsJSReceiver()) {
Handle<JSReceiver> target =
handle(JSReceiver::cast(weak_ref->target()), isolate);
// AddKeepDuringJobTarget might allocate and cause a GC, but it won't clear
// weak_ref since we hold a Handle to its target.
isolate->heap()->AddKeepDuringJobTarget(target);
} else {
- DCHECK(weak_ref->target()->IsUndefined(isolate));
+ DCHECK(weak_ref->target().IsUndefined(isolate));
}
return weak_ref->target();
}
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 4eee41febd..ed4a844c98 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -4,18 +4,21 @@
#include "src/builtins/builtins.h"
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
#include "src/builtins/builtins-descriptors.h"
-#include "src/callable.h"
-#include "src/code-tracer.h"
-#include "src/isolate.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/code-tracer.h"
+#include "src/execution/isolate.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/logging/code-events.h" // For CodeCreateEvent.
+#include "src/logging/log.h" // For Logger.
#include "src/objects/fixed-array.h"
-#include "src/ostreams.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/visitors.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/visitors.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -32,28 +35,49 @@ namespace {
struct BuiltinMetadata {
const char* name;
Builtins::Kind kind;
- // For CPP and API builtins it's cpp_entry address and for TFJ it's a
- // parameter count.
- Address cpp_entry_or_parameter_count;
+
+ struct BytecodeAndScale {
+ interpreter::Bytecode bytecode : 8;
+ interpreter::OperandScale scale : 8;
+ };
+
+ STATIC_ASSERT(sizeof(interpreter::Bytecode) == 1);
+ STATIC_ASSERT(sizeof(interpreter::OperandScale) == 1);
+ STATIC_ASSERT(sizeof(BytecodeAndScale) <= sizeof(Address));
+
+ // The `data` field has kind-specific contents.
+ union KindSpecificData {
+ // TODO(jgruber): Union constructors are needed since C++11 does not support
+ // designated initializers (e.g.: {.parameter_count = count}). Update once
+ // we're at C++20 :)
+ // The constructors are marked constexpr to avoid the need for a static
+ // initializer for builtins.cc (see check-static-initializers.sh).
+ constexpr KindSpecificData() : cpp_entry(kNullAddress) {}
+ constexpr KindSpecificData(Address cpp_entry) : cpp_entry(cpp_entry) {}
+ constexpr KindSpecificData(int parameter_count,
+ int /* To disambiguate from above */)
+ : parameter_count(static_cast<int16_t>(parameter_count)) {}
+ constexpr KindSpecificData(interpreter::Bytecode bytecode,
+ interpreter::OperandScale scale)
+ : bytecode_and_scale{bytecode, scale} {}
+ Address cpp_entry; // For CPP builtins.
+ int16_t parameter_count; // For TFJ builtins.
+ BytecodeAndScale bytecode_and_scale; // For BCH builtins.
+ } data;
};
#define DECL_CPP(Name, ...) \
- {#Name, Builtins::CPP, FUNCTION_ADDR(Builtin_##Name)},
-#define DECL_API(Name, ...) \
- {#Name, Builtins::API, FUNCTION_ADDR(Builtin_##Name)},
-#define DECL_TFJ(Name, Count, ...) \
- {#Name, Builtins::TFJ, static_cast<Address>(Count)},
-#define DECL_TFC(Name, ...) {#Name, Builtins::TFC, kNullAddress},
-#define DECL_TFS(Name, ...) {#Name, Builtins::TFS, kNullAddress},
-#define DECL_TFH(Name, ...) {#Name, Builtins::TFH, kNullAddress},
-#define DECL_BCH(Name, ...) {#Name, Builtins::BCH, kNullAddress},
-#define DECL_ASM(Name, ...) {#Name, Builtins::ASM, kNullAddress},
-const BuiltinMetadata builtin_metadata[] = {
- BUILTIN_LIST(DECL_CPP, DECL_API, DECL_TFJ, DECL_TFC, DECL_TFS, DECL_TFH,
- DECL_BCH, DECL_ASM)
-};
+ {#Name, Builtins::CPP, {FUNCTION_ADDR(Builtin_##Name)}},
+#define DECL_TFJ(Name, Count, ...) {#Name, Builtins::TFJ, {Count, 0}},
+#define DECL_TFC(Name, ...) {#Name, Builtins::TFC, {}},
+#define DECL_TFS(Name, ...) {#Name, Builtins::TFS, {}},
+#define DECL_TFH(Name, ...) {#Name, Builtins::TFH, {}},
+#define DECL_BCH(Name, OperandScale, Bytecode) \
+ {#Name, Builtins::BCH, {Bytecode, OperandScale}},
+#define DECL_ASM(Name, ...) {#Name, Builtins::ASM, {}},
+const BuiltinMetadata builtin_metadata[] = {BUILTIN_LIST(
+ DECL_CPP, DECL_TFJ, DECL_TFC, DECL_TFS, DECL_TFH, DECL_BCH, DECL_ASM)};
#undef DECL_CPP
-#undef DECL_API
#undef DECL_TFJ
#undef DECL_TFC
#undef DECL_TFS
@@ -81,13 +105,13 @@ const char* Builtins::Lookup(Address pc) {
// Off-heap pc's can be looked up through binary search.
if (FLAG_embedded_builtins) {
Code maybe_builtin = InstructionStream::TryLookupCode(isolate_, pc);
- if (!maybe_builtin.is_null()) return name(maybe_builtin->builtin_index());
+ if (!maybe_builtin.is_null()) return name(maybe_builtin.builtin_index());
}
// May be called during initialization (disassembler).
if (initialized_) {
for (int i = 0; i < builtin_count; i++) {
- if (isolate_->heap()->builtin(i)->contains(pc)) return name(i);
+ if (isolate_->heap()->builtin(i).contains(pc)) return name(i);
}
}
return nullptr;
@@ -130,7 +154,7 @@ Handle<Code> Builtins::builtin_handle(int index) {
// static
int Builtins::GetStackParameterCount(Name name) {
DCHECK(Builtins::KindOf(name) == TFJ);
- return static_cast<int>(builtin_metadata[name].cpp_entry_or_parameter_count);
+ return builtin_metadata[name].data.parameter_count;
}
// static
@@ -145,8 +169,8 @@ Callable Builtins::CallableFor(Isolate* isolate, Name name) {
key = Builtin_##Name##_InterfaceDescriptor::key(); \
break; \
}
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER,
- CASE_OTHER, CASE_OTHER, IGNORE_BUILTIN, CASE_OTHER)
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, CASE_OTHER, CASE_OTHER,
+ CASE_OTHER, IGNORE_BUILTIN, CASE_OTHER)
#undef CASE_OTHER
default:
Builtins::Kind kind = Builtins::KindOf(name);
@@ -190,19 +214,19 @@ void Builtins::PrintBuiltinSize() {
const char* kind = KindNameOf(i);
Code code = builtin(i);
PrintF(stdout, "%s Builtin, %s, %d\n", kind, builtin_name,
- code->InstructionSize());
+ code.InstructionSize());
}
}
// static
Address Builtins::CppEntryOf(int index) {
- DCHECK(Builtins::HasCppImplementation(index));
- return builtin_metadata[index].cpp_entry_or_parameter_count;
+ DCHECK(Builtins::IsCpp(index));
+ return builtin_metadata[index].data.cpp_entry;
}
// static
bool Builtins::IsBuiltin(const Code code) {
- return Builtins::IsBuiltinId(code->builtin_index());
+ return Builtins::IsBuiltinId(code.builtin_index());
}
bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
@@ -221,7 +245,7 @@ bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
// static
bool Builtins::IsIsolateIndependentBuiltin(const Code code) {
if (FLAG_embedded_builtins) {
- const int builtin_index = code->builtin_index();
+ const int builtin_index = code.builtin_index();
return Builtins::IsBuiltinId(builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
} else {
@@ -250,7 +274,36 @@ void Builtins::UpdateBuiltinEntryTable(Isolate* isolate) {
Heap* heap = isolate->heap();
Address* builtin_entry_table = isolate->builtin_entry_table();
for (int i = 0; i < builtin_count; i++) {
- builtin_entry_table[i] = heap->builtin(i)->InstructionStart();
+ builtin_entry_table[i] = heap->builtin(i).InstructionStart();
+ }
+}
+
+// static
+void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
+ if (!isolate->logger()->is_listening_to_code_events() &&
+ !isolate->is_profiling()) {
+ return; // No need to iterate the entire table in this case.
+ }
+
+ Address* builtins = isolate->builtins_table();
+ int i = 0;
+ for (; i < kFirstBytecodeHandler; i++) {
+ auto code = AbstractCode::cast(Object(builtins[i]));
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG, code,
+ Builtins::name(i)));
+ }
+
+ STATIC_ASSERT(kLastBytecodeHandlerPlusOne == builtin_count);
+ for (; i < builtin_count; i++) {
+ auto code = AbstractCode::cast(Object(builtins[i]));
+ interpreter::Bytecode bytecode =
+ builtin_metadata[i].data.bytecode_and_scale.bytecode;
+ interpreter::OperandScale scale =
+ builtin_metadata[i].data.bytecode_and_scale.scale;
+ PROFILE(isolate,
+ CodeCreateEvent(
+ CodeEventListener::BYTECODE_HANDLER_TAG, code,
+ interpreter::Bytecodes::ToString(bytecode, scale).c_str()));
}
}
@@ -291,16 +344,18 @@ constexpr int OffHeapTrampolineGenerator::kBufferSize;
} // namespace
// static
-Handle<Code> Builtins::GenerateOffHeapTrampolineFor(Isolate* isolate,
- Address off_heap_entry) {
+Handle<Code> Builtins::GenerateOffHeapTrampolineFor(
+ Isolate* isolate, Address off_heap_entry, int32_t kind_specfic_flags) {
DCHECK_NOT_NULL(isolate->embedded_blob());
DCHECK_NE(0, isolate->embedded_blob_size());
OffHeapTrampolineGenerator generator(isolate);
CodeDesc desc = generator.Generate(off_heap_entry);
- return isolate->factory()->NewCode(desc, Code::BUILTIN,
- generator.CodeObject());
+ return Factory::CodeBuilder(isolate, desc, Code::BUILTIN)
+ .set_self_reference(generator.CodeObject())
+ .set_read_only_data_container(kind_specfic_flags)
+ .Build();
}
// static
@@ -330,7 +385,6 @@ const char* Builtins::KindNameOf(int index) {
// clang-format off
switch (kind) {
case CPP: return "CPP";
- case API: return "API";
case TFJ: return "TFJ";
case TFC: return "TFC";
case TFS: return "TFS";
@@ -346,12 +400,6 @@ const char* Builtins::KindNameOf(int index) {
bool Builtins::IsCpp(int index) { return Builtins::KindOf(index) == CPP; }
// static
-bool Builtins::HasCppImplementation(int index) {
- Kind kind = Builtins::KindOf(index);
- return (kind == CPP || kind == API);
-}
-
-// static
bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
Handle<JSObject> target_global_proxy) {
if (FLAG_allow_unsafe_function_constructor) return true;
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index e077bc1b15..f885c6f29f 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -7,7 +7,7 @@
#include "src/base/flags.h"
#include "src/builtins/builtins-definitions.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -47,7 +47,7 @@ class Builtins {
enum Name : int32_t {
#define DEF_ENUM(Name, ...) k##Name,
BUILTIN_LIST(DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM, DEF_ENUM,
- DEF_ENUM, DEF_ENUM)
+ DEF_ENUM)
#undef DEF_ENUM
builtin_count,
@@ -60,12 +60,20 @@ class Builtins {
static const int32_t kNoBuiltinId = -1;
+ static constexpr int kFirstWideBytecodeHandler =
+ kFirstBytecodeHandler + kNumberOfBytecodeHandlers;
+ static constexpr int kFirstExtraWideBytecodeHandler =
+ kFirstWideBytecodeHandler + kNumberOfWideBytecodeHandlers;
+ static constexpr int kLastBytecodeHandlerPlusOne =
+ kFirstExtraWideBytecodeHandler + kNumberOfWideBytecodeHandlers;
+ STATIC_ASSERT(kLastBytecodeHandlerPlusOne == builtin_count);
+
static constexpr bool IsBuiltinId(int maybe_id) {
return 0 <= maybe_id && maybe_id < builtin_count;
}
// The different builtin kinds are documented in builtins-definitions.h.
- enum Kind { CPP, API, TFJ, TFC, TFS, TFH, BCH, ASM };
+ enum Kind { CPP, TFJ, TFC, TFS, TFH, BCH, ASM };
static BailoutId GetContinuationBailoutId(Name name);
static Name GetBuiltinFromBailoutId(BailoutId);
@@ -102,7 +110,6 @@ class Builtins {
static const char* KindNameOf(int index);
static bool IsCpp(int index);
- static bool HasCppImplementation(int index);
// True, iff the given code object is a builtin. Note that this does not
// necessarily mean that its kind is Code::BUILTIN.
@@ -115,14 +122,6 @@ class Builtins {
// True, iff the given code object is a builtin with off-heap embedded code.
static bool IsIsolateIndependentBuiltin(const Code code);
- static constexpr int kFirstWideBytecodeHandler =
- kFirstBytecodeHandler + kNumberOfBytecodeHandlers;
- static constexpr int kFirstExtraWideBytecodeHandler =
- kFirstWideBytecodeHandler + kNumberOfWideBytecodeHandlers;
- STATIC_ASSERT(kFirstExtraWideBytecodeHandler +
- kNumberOfWideBytecodeHandlers ==
- builtin_count);
-
// True, iff the given builtin contains no isolate-specific code and can be
// embedded into the binary.
static constexpr bool kAllBuiltinsAreIsolateIndependent = true;
@@ -143,6 +142,9 @@ class Builtins {
// the builtins table.
static void UpdateBuiltinEntryTable(Isolate* isolate);
+ // Emits a CodeCreateEvent for every builtin.
+ static void EmitCodeCreateEvents(Isolate* isolate);
+
bool is_initialized() const { return initialized_; }
// Used by SetupIsolateDelegate and Deserializer.
@@ -156,10 +158,7 @@ class Builtins {
Handle<Object> receiver, int argc, Handle<Object> args[],
Handle<HeapObject> new_target);
- enum ExitFrameType { EXIT, BUILTIN_EXIT };
-
- static void Generate_Adaptor(MacroAssembler* masm, Address builtin_address,
- ExitFrameType exit_frame_type);
+ static void Generate_Adaptor(MacroAssembler* masm, Address builtin_address);
static void Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
@@ -172,7 +171,8 @@ class Builtins {
// The result should not be used directly, but only from the related Factory
// function.
static Handle<Code> GenerateOffHeapTrampolineFor(Isolate* isolate,
- Address off_heap_entry);
+ Address off_heap_entry,
+ int32_t kind_specific_flags);
// Generate the RelocInfo ByteArray that would be generated for an offheap
// trampoline.
@@ -230,8 +230,8 @@ class Builtins {
#define DECLARE_TF(Name, ...) \
static void Generate_##Name(compiler::CodeAssemblerState* state);
- BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
- DECLARE_TF, DECLARE_TF, IGNORE_BUILTIN, DECLARE_ASM)
+ BUILTIN_LIST(IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF, DECLARE_TF, DECLARE_TF,
+ IGNORE_BUILTIN, DECLARE_ASM)
#undef DECLARE_ASM
#undef DECLARE_TF
diff --git a/deps/v8/src/builtins/collections.tq b/deps/v8/src/builtins/collections.tq
index 8cf9f3b90d..eb95a77023 100644
--- a/deps/v8/src/builtins/collections.tq
+++ b/deps/v8/src/builtins/collections.tq
@@ -5,9 +5,9 @@
#include 'src/builtins/builtins-collections-gen.h'
namespace collections {
+ @export
macro LoadKeyValuePairNoSideEffects(implicit context: Context)(o: Object):
- KeyValuePair
- labels MayHaveSideEffects {
+ KeyValuePair labels MayHaveSideEffects {
typeswitch (o) {
case (a: FastJSArray): {
const length: Smi = a.length;
@@ -42,6 +42,7 @@ namespace collections {
}
}
+ @export
transitioning macro LoadKeyValuePair(implicit context: Context)(o: Object):
KeyValuePair {
try {
diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc
index 6c779e74f6..94e8dc05ec 100644
--- a/deps/v8/src/builtins/constants-table-builder.cc
+++ b/deps/v8/src/builtins/constants-table-builder.cc
@@ -4,10 +4,10 @@
#include "src/builtins/constants-table-builder.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
#include "src/objects/oddball-inl.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
@@ -72,7 +72,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
DCHECK(isolate_->IsGeneratingEmbeddedBuiltins());
DCHECK(self_reference->IsOddball());
- DCHECK(Oddball::cast(*self_reference)->kind() ==
+ DCHECK(Oddball::cast(*self_reference).kind() ==
Oddball::kSelfReferenceMarker);
#endif
@@ -101,20 +101,20 @@ void BuiltinsConstantsTableBuilder::Finalize() {
for (auto it = it_scope.begin(); it != it_scope.end(); ++it) {
uint32_t index = *it.entry();
Object value = it.key();
- if (value->IsCode() && Code::cast(value)->kind() == Code::BUILTIN) {
+ if (value.IsCode() && Code::cast(value).kind() == Code::BUILTIN) {
// Replace placeholder code objects with the real builtin.
// See also: SetupIsolateDelegate::PopulateWithPlaceholders.
// TODO(jgruber): Deduplicate placeholders and their corresponding
// builtin.
- value = builtins->builtin(Code::cast(value)->builtin_index());
+ value = builtins->builtin(Code::cast(value).builtin_index());
}
- DCHECK(value->IsHeapObject());
+ DCHECK(value.IsHeapObject());
table->set(index, value);
}
#ifdef DEBUG
for (int i = 0; i < map_.size(); i++) {
- DCHECK(table->get(i)->IsHeapObject());
+ DCHECK(table->get(i).IsHeapObject());
DCHECK_NE(ReadOnlyRoots(isolate_).undefined_value(), table->get(i));
DCHECK_NE(ReadOnlyRoots(isolate_).self_reference_marker(), table->get(i));
}
diff --git a/deps/v8/src/builtins/constants-table-builder.h b/deps/v8/src/builtins/constants-table-builder.h
index 53cf2b4d49..89c95912a1 100644
--- a/deps/v8/src/builtins/constants-table-builder.h
+++ b/deps/v8/src/builtins/constants-table-builder.h
@@ -5,10 +5,10 @@
#ifndef V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
#define V8_BUILTINS_CONSTANTS_TABLE_BUILDER_H_
-#include "src/allocation.h"
#include "src/base/macros.h"
-#include "src/handles.h"
-#include "src/identity-map.h"
+#include "src/utils/allocation.h"
+#include "src/utils/identity-map.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
@@ -42,7 +42,7 @@ class BuiltinsConstantsTableBuilder final {
Isolate* isolate_;
// Maps objects to corresponding indices within the constants list.
- typedef IdentityMap<uint32_t, FreeStoreAllocationPolicy> ConstantsMap;
+ using ConstantsMap = IdentityMap<uint32_t, FreeStoreAllocationPolicy>;
ConstantsMap map_;
DISALLOW_COPY_AND_ASSIGN(BuiltinsConstantsTableBuilder);
diff --git a/deps/v8/src/builtins/data-view.tq b/deps/v8/src/builtins/data-view.tq
index 383be19a9d..842e9527ee 100644
--- a/deps/v8/src/builtins/data-view.tq
+++ b/deps/v8/src/builtins/data-view.tq
@@ -527,7 +527,6 @@ namespace data_view {
extern macro ToNumber(Context, Object): Number;
extern macro ToBigInt(Context, Object): BigInt;
- extern macro TruncateFloat64ToFloat32(float64): float32;
extern macro TruncateFloat64ToWord32(float64): uint32;
extern macro DataViewBuiltinsAssembler::StoreWord8(RawPtr, uintptr, uint32):
diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq
index 6113e41f91..5559188347 100644
--- a/deps/v8/src/builtins/frames.tq
+++ b/deps/v8/src/builtins/frames.tq
@@ -141,6 +141,7 @@ Cast<ArgumentsAdaptorFrame>(implicit context: Context)(f: Frame):
// beginning of builtin code while the target value is still in the register
// and the former should be used in slow paths in order to reduce register
// pressure on the fast path.
+@export
macro LoadTargetFromFrame(): JSFunction {
return LoadFramePointer().function;
}
diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.h b/deps/v8/src/builtins/growable-fixed-array-gen.h
index f720659dee..42f2afb281 100644
--- a/deps/v8/src/builtins/growable-fixed-array-gen.h
+++ b/deps/v8/src/builtins/growable-fixed-array-gen.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
#define V8_BUILTINS_GROWABLE_FIXED_ARRAY_GEN_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 40a37b6a9f..0d80c681fb 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -4,24 +4,24 @@
#if V8_TARGET_ARCH_IA32
-#include "src/api-arguments.h"
+#include "src/api/api-arguments.h"
#include "src/base/adapters.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
-#include "src/macro-assembler-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
@@ -30,18 +30,11 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ Move(kJavaScriptCallExtraArg1Register,
Immediate(ExternalReference::Create(address)));
- if (exit_frame_type == BUILTIN_EXIT) {
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK(exit_frame_type == EXIT);
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
- RelocInfo::CODE_TARGET);
- }
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -405,7 +398,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ push(Immediate(StackFrame::TypeToMarker(type)));
// Reserve a slot for the context. It is filled after the root register has
// been set up.
- __ sub(esp, Immediate(kSystemPointerSize));
+ __ AllocateStackSpace(kSystemPointerSize);
// Save callee-saved registers (C calling conventions).
__ push(edi);
__ push(esi);
@@ -810,7 +803,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
// Load the optimized code from the feedback vector and re-use the register.
Register optimized_code_entry = scratch;
__ mov(optimized_code_entry,
- FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+ FieldOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
@@ -1242,7 +1236,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ lea(scratch1,
Operand(num_args, times_system_pointer_size, kSystemPointerSize));
- __ AllocateStackFrame(scratch1);
+ __ AllocateStackSpace(scratch1);
// Step 2 move return_address and slots around it to the correct locations.
// Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
@@ -1388,7 +1382,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ bind(&trampoline_loaded);
__ Pop(eax);
- __ add(scratch, Immediate(interpreter_entry_return_pc_offset->value()));
+ __ add(scratch, Immediate(interpreter_entry_return_pc_offset.value()));
__ push(scratch);
// Initialize the dispatch table register.
@@ -2219,7 +2213,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label done;
__ lea(ecx, Operand(edx, times_system_pointer_size, 0));
- __ sub(esp, ecx);
+ __ sub(esp, ecx); // Not Windows-friendly, but corrected below.
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack
// limit".
@@ -2235,6 +2229,19 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+#if V8_OS_WIN
+ // Correctly allocate the stack space that was checked above.
+ {
+ Label win_done;
+ __ cmp(ecx, TurboAssemblerBase::kStackPageSize);
+ __ j(less_equal, &win_done, Label::kNear);
+ // Reset esp and walk through the range touching every page.
+ __ lea(esp, Operand(esp, edx, times_system_pointer_size, 0));
+ __ AllocateStackSpace(ecx);
+ __ bind(&win_done);
+ }
+#endif
+
// Adjust effective number of arguments to include return address.
__ inc(eax);
@@ -2648,7 +2655,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
arraysize(wasm::kFpParamRegisters),
"frame size mismatch");
- __ sub(esp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
+ __ AllocateStackSpace(kSimd128Size * arraysize(wasm::kFpParamRegisters));
int offset = 0;
for (DoubleRegister reg : wasm::kFpParamRegisters) {
__ movdqu(Operand(esp, offset), reg);
@@ -2882,7 +2889,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
if (CpuFeatures::IsSupported(SSE3)) {
CpuFeatureScope scope(masm, SSE3);
// Reserve space for 64 bit answer.
- __ sub(esp, Immediate(kDoubleSize)); // Nolint.
+ __ AllocateStackSpace(kDoubleSize); // Nolint.
// Do conversion, which cannot fail because we checked the exponent.
__ fisttp_d(Operand(esp, 0));
__ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
@@ -3005,16 +3012,6 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ mov(esi, __ ExternalReferenceAsOperand(next_address, esi));
__ mov(edi, __ ExternalReferenceAsOperand(limit_address, edi));
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, eax);
- __ Move(Operand(esp, 0),
- Immediate(ExternalReference::isolate_address(isolate)));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
Label profiler_disabled;
Label end_profiler_check;
__ Move(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
@@ -3033,16 +3030,6 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ call(function_address);
__ bind(&end_profiler_check);
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, eax);
- __ mov(eax, Immediate(ExternalReference::isolate_address(isolate)));
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
Label prologue;
// Load the value from ReturnValue
__ mov(eax, return_value_operand);
@@ -3164,7 +3151,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(!AreAliased(api_function_address, argc, holder));
- typedef FunctionCallbackArguments FCA;
+ using FCA = FunctionCallbackArguments;
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
diff --git a/deps/v8/src/builtins/internal-coverage.tq b/deps/v8/src/builtins/internal-coverage.tq
new file mode 100644
index 0000000000..4e75c6d837
--- /dev/null
+++ b/deps/v8/src/builtins/internal-coverage.tq
@@ -0,0 +1,64 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-regexp-gen.h'
+
+namespace internal_coverage {
+
+ const kHasCoverageInfo:
+ constexpr int31 generates 'DebugInfo::kHasCoverageInfo';
+
+ const kFirstSlotIndex:
+ constexpr int31 generates 'CoverageInfo::kFirstSlotIndex';
+ const kSlotBlockCountIndex:
+ constexpr int31 generates 'CoverageInfo::kSlotBlockCountIndex';
+ const kSlotIndexCountLog2:
+ constexpr int31 generates 'CoverageInfo::kSlotIndexCountLog2';
+ const kSlotIndexCountMask:
+ constexpr int31 generates 'CoverageInfo::kSlotIndexCountMask';
+
+ macro GetCoverageInfo(implicit context: Context)(function: JSFunction):
+ CoverageInfo labels IfNoCoverageInfo {
+ const shared: SharedFunctionInfo = function.shared_function_info;
+ const debugInfo = Cast<DebugInfo>(shared.script_or_debug_info)
+ otherwise goto IfNoCoverageInfo;
+
+ if ((debugInfo.flags & kHasCoverageInfo) == 0) goto IfNoCoverageInfo;
+ return UnsafeCast<CoverageInfo>(debugInfo.coverage_info);
+ }
+
+ macro SlotCount(coverageInfo: CoverageInfo): Smi {
+ assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below.
+ assert(kFirstSlotIndex == (coverageInfo.length & kSlotIndexCountMask));
+ return coverageInfo.length >> kSlotIndexCountLog2;
+ }
+
+ macro FirstIndexForSlot(implicit context: Context)(slot: Smi): Smi {
+ assert(kFirstSlotIndex == 0); // Otherwise we'd have to consider it below.
+ return slot << kSlotIndexCountLog2;
+ }
+
+ macro IncrementBlockCount(implicit context: Context)(
+ coverageInfo: CoverageInfo, slot: Smi) {
+ assert(slot < SlotCount(coverageInfo));
+ const slotStart: Smi = FirstIndexForSlot(slot);
+ const index: Smi = slotStart + kSlotBlockCountIndex;
+ coverageInfo.objects[index] =
+ UnsafeCast<Smi>(coverageInfo.objects[index]) + 1;
+ }
+
+ builtin IncBlockCounter(implicit context: Context)(
+ function: JSFunction, coverageArraySlotIndex: Smi): Object {
+ // It's quite possible that a function contains IncBlockCounter bytecodes,
+ // but no coverage info exists. This happens e.g. by selecting the
+ // best-effort coverage collection mode, which triggers deletion of all
+ // coverage infos in order to avoid memory leaks.
+
+ const coverageInfo: CoverageInfo =
+ GetCoverageInfo(function) otherwise return Undefined;
+ IncrementBlockCount(coverageInfo, coverageArraySlotIndex);
+ return Undefined;
+ }
+
+} // namespace internal_coverage
diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq
new file mode 100644
index 0000000000..84dd1261fa
--- /dev/null
+++ b/deps/v8/src/builtins/math.tq
@@ -0,0 +1,238 @@
+// Copyright 2019 the V8 project authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+namespace math {
+ // ES6 #sec-math.acos
+ extern macro Float64Acos(float64): float64;
+
+ transitioning javascript builtin
+ MathAcos(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Acos(value));
+ }
+
+ // ES6 #sec-math.acosh
+ extern macro Float64Acosh(float64): float64;
+
+ transitioning javascript builtin
+ MathAcosh(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Acosh(value));
+ }
+
+ // ES6 #sec-math.asin
+ extern macro Float64Asin(float64): float64;
+
+ transitioning javascript builtin
+ MathAsin(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Asin(value));
+ }
+
+ // ES6 #sec-math.asinh
+ extern macro Float64Asinh(float64): float64;
+
+ transitioning javascript builtin
+ MathAsinh(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Asinh(value));
+ }
+
+ // ES6 #sec-math.atan
+ extern macro Float64Atan(float64): float64;
+
+ transitioning javascript builtin
+ MathAtan(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Atan(value));
+ }
+
+ // ES6 #sec-math.atan2
+ extern macro Float64Atan2(float64, float64): float64;
+
+ transitioning javascript builtin
+ MathAtan2(context: Context, receiver: Object, y: Object, x: Object): Number {
+ const yValue = Convert<float64>(ToNumber_Inline(context, y));
+ const xValue = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Atan2(yValue, xValue));
+ }
+
+ // ES6 #sec-math.atanh
+ extern macro Float64Atanh(float64): float64;
+
+ transitioning javascript builtin
+ MathAtanh(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Atanh(value));
+ }
+
+ // ES6 #sec-math.cbrt
+ extern macro Float64Cbrt(float64): float64;
+
+ transitioning javascript builtin
+ MathCbrt(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Cbrt(value));
+ }
+
+ // ES6 #sec-math.clz32
+ extern macro Word32Clz(int32): int32;
+
+ transitioning javascript builtin
+ MathClz32(context: Context, receiver: Object, x: Object): Number {
+ const num = ToNumber_Inline(context, x);
+
+ let value: int32;
+ typeswitch (num) {
+ case (s: Smi): {
+ value = Convert<int32>(s);
+ }
+ case (h: HeapNumber): {
+ value = TruncateHeapNumberValueToWord32(h);
+ }
+ }
+
+ return Convert<Number>(Word32Clz(value));
+ }
+
+ // ES6 #sec-math.cos
+ extern macro Float64Cos(float64): float64;
+
+ transitioning javascript builtin
+ MathCos(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Cos(value));
+ }
+
+ // ES6 #sec-math.cosh
+ extern macro Float64Cosh(float64): float64;
+
+ transitioning javascript builtin
+ MathCosh(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Cosh(value));
+ }
+
+ // ES6 #sec-math.exp
+ extern macro Float64Exp(float64): float64;
+
+ transitioning javascript builtin
+ MathExp(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Exp(value));
+ }
+
+ // ES6 #sec-math.expm1
+ extern macro Float64Expm1(float64): float64;
+
+ transitioning javascript builtin
+ MathExpm1(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Expm1(value));
+ }
+
+ // ES6 #sec-math.fround
+ transitioning javascript builtin
+ MathFround(context: Context, receiver: Object, x: Object): Number {
+ const x32 = Convert<float32>(ToNumber_Inline(context, x));
+ const x64 = Convert<float64>(x32);
+ return Convert<Number>(x64);
+ }
+
+ // ES6 #sec-math.log
+ extern macro Float64Log(float64): float64;
+
+ transitioning javascript builtin
+ MathLog(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Log(value));
+ }
+
+ // ES6 #sec-math.log1p
+ extern macro Float64Log1p(float64): float64;
+
+ transitioning javascript builtin
+ MathLog1p(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Log1p(value));
+ }
+
+ // ES6 #sec-math.log10
+ extern macro Float64Log10(float64): float64;
+
+ transitioning javascript builtin
+ MathLog10(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Log10(value));
+ }
+
+ // ES6 #sec-math.log2
+ extern macro Float64Log2(float64): float64;
+
+ transitioning javascript builtin
+ MathLog2(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Log2(value));
+ }
+
+ // ES6 #sec-math.sin
+ extern macro Float64Sin(float64): float64;
+
+ transitioning javascript builtin
+ MathSin(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Sin(value));
+ }
+
+ // ES6 #sec-math.sign
+ transitioning javascript builtin
+ MathSign(context: Context, receiver: Object, x: Object): Number {
+ const num = ToNumber_Inline(context, x);
+ const value = Convert<float64>(num);
+
+ if (value < 0) {
+ return -1;
+ } else if (value > 0) {
+ return 1;
+ } else {
+ return num;
+ }
+ }
+
+ // ES6 #sec-math.sinh
+ extern macro Float64Sinh(float64): float64;
+
+ transitioning javascript builtin
+ MathSinh(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Sinh(value));
+ }
+
+ // ES6 #sec-math.sqrt
+ extern macro Float64Sqrt(float64): float64;
+
+ transitioning javascript builtin
+ MathSqrt(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Sqrt(value));
+ }
+
+ // ES6 #sec-math.tan
+ extern macro Float64Tan(float64): float64;
+
+ transitioning javascript builtin
+ MathTan(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Tan(value));
+ }
+
+ // ES6 #sec-math.tanh
+ extern macro Float64Tanh(float64): float64;
+
+ transitioning javascript builtin
+ MathTanh(context: Context, receiver: Object, x: Object): Number {
+ const value = Convert<float64>(ToNumber_Inline(context, x));
+ return Convert<Number>(Float64Tanh(value));
+ }
+}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 00ae715719..ec65c78ee9 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -4,24 +4,24 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/api-arguments.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/mips/constants-mips.h"
+#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
-#include "src/macro-assembler-inl.h"
-#include "src/mips/constants-mips.h"
-#include "src/objects-inl.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -30,17 +30,10 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
- if (exit_frame_type == BUILTIN_EXIT) {
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK(exit_frame_type == EXIT);
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
- RelocInfo::CODE_TARGET);
- }
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
@@ -874,7 +867,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry = scratch1;
__ lw(optimized_code_entry,
- FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak cell to a code
@@ -1310,7 +1304,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ lw(t0, MemOperand(t0));
__ bind(&trampoline_loaded);
- __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value()));
+ __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
// Initialize the dispatch table register.
__ li(kInterpreterDispatchTableRegister,
@@ -2853,26 +2847,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ Addu(s2, s2, Operand(1));
__ sw(s2, MemOperand(s5, kLevelOffset));
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, a0);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
__ StoreReturnAddressAndCall(t9);
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, a0);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -2965,7 +2941,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(!AreAliased(api_function_address, argc, call_data,
holder, scratch, base));
- typedef FunctionCallbackArguments FCA;
+ using FCA = FunctionCallbackArguments;
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
@@ -3082,7 +3058,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register api_function_address = a2;
// Here and below +1 is for name() pushed after the args_ array.
- typedef PropertyCallbackArguments PCA;
+ using PCA = PropertyCallbackArguments;
__ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
__ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
__ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
diff --git a/deps/v8/src/builtins/mips64/OWNERS b/deps/v8/src/builtins/mips64/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/builtins/mips64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index bba01d5668..34a5774d65 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -4,24 +4,24 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/api-arguments.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/mips64/constants-mips64.h"
+#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
-#include "src/macro-assembler-inl.h"
-#include "src/mips64/constants-mips64.h"
-#include "src/objects-inl.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -30,17 +30,10 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
- if (exit_frame_type == BUILTIN_EXIT) {
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK(exit_frame_type == EXIT);
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
- RelocInfo::CODE_TARGET);
- }
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
@@ -891,7 +884,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
Register optimized_code_entry = scratch1;
__ Ld(optimized_code_entry,
- FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
@@ -1327,7 +1321,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Ld(t0, MemOperand(t0));
__ bind(&trampoline_loaded);
- __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value()));
+ __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
// Initialize the dispatch table register.
__ li(kInterpreterDispatchTableRegister,
@@ -2892,26 +2886,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ Addu(s2, s2, Operand(1));
__ Sw(s2, MemOperand(s5, kLevelOffset));
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, a0);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
__ StoreReturnAddressAndCall(t9);
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, a0);
- __ li(a0, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -3001,7 +2977,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(!AreAliased(api_function_address, argc, call_data,
holder, scratch, base));
- typedef FunctionCallbackArguments FCA;
+ using FCA = FunctionCallbackArguments;
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
@@ -3122,7 +3098,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register api_function_address = a2;
// Here and below +1 is for name() pushed after the args_ array.
- typedef PropertyCallbackArguments PCA;
+ using PCA = PropertyCallbackArguments;
__ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
__ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
__ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
diff --git a/deps/v8/src/builtins/object-fromentries.tq b/deps/v8/src/builtins/object-fromentries.tq
index 2557b082a7..93851d4e11 100644
--- a/deps/v8/src/builtins/object-fromentries.tq
+++ b/deps/v8/src/builtins/object-fromentries.tq
@@ -42,9 +42,7 @@ namespace object {
}
label IfSlow {
const result: JSObject = NewJSObject();
- const fastIteratorResultMap: Map =
- Cast<Map>(LoadNativeContext(context)[ITERATOR_RESULT_MAP_INDEX])
- otherwise unreachable;
+ const fastIteratorResultMap: Map = GetIteratorResultMap();
let i: iterator::IteratorRecord = iterator::GetIterator(iterable);
try {
assert(!IsNullOrUndefined(i.object));
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index f089f086af..e3c6ce6407 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -4,22 +4,22 @@
#if V8_TARGET_ARCH_PPC
-#include "src/api-arguments.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
-#include "src/macro-assembler-inl.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -28,17 +28,10 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
- if (exit_frame_type == BUILTIN_EXIT) {
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK(exit_frame_type == EXIT);
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
- RelocInfo::CODE_TARGET);
- }
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
@@ -92,6 +85,21 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch, Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ sub(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftImm(r0, num_args, Operand(kPointerSizeLog2));
+ __ cmp(scratch, r0);
+ __ ble(stack_overflow); // Signed comparison.
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -102,6 +110,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ Label stack_overflow;
+
+ Generate_StackOverflowCheck(masm, r3, r8, &stack_overflow);
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
@@ -164,21 +175,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ add(sp, sp, r4);
__ addi(sp, sp, Operand(kPointerSize));
__ blr();
-}
-void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch, Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(scratch, RootIndex::kRealStackLimit);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ sub(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftImm(r0, num_args, Operand(kPointerSizeLog2));
- __ cmp(scratch, r0);
- __ ble(stack_overflow); // Signed comparison.
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bkpt(0); // Unreachable code.
+ }
}
} // namespace
@@ -906,7 +909,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadP(
optimized_code_entry,
- FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
@@ -1361,7 +1365,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadP(r5, MemOperand(r5));
__ bind(&trampoline_loaded);
- __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value()));
+ __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset.value()));
__ mtlr(r0);
// Initialize the dispatch table register.
@@ -2979,26 +2983,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ addi(r16, r16, Operand(1));
__ stw(r16, MemOperand(r17, kLevelOffset));
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r3);
- __ Move(r3, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
__ StoreReturnAddressAndCall(scratch);
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r3);
- __ Move(r3, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -3077,7 +3063,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
Register scratch = r7;
DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
- typedef FunctionCallbackArguments FCA;
+ using FCA = FunctionCallbackArguments;
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
diff --git a/deps/v8/src/builtins/proxy-get-property.tq b/deps/v8/src/builtins/proxy-get-property.tq
new file mode 100644
index 0000000000..0915a66d5f
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-get-property.tq
@@ -0,0 +1,63 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ extern transitioning runtime
+ GetPropertyWithReceiver(implicit context: Context)(Object, Name, Object, Smi):
+ Object;
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
+ // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-get-p-receiver
+ transitioning builtin
+ ProxyGetProperty(implicit context: Context)(
+ proxy: JSProxy, name: Name, receiverValue: Object,
+ onNonExistent: Smi): Object {
+ // 1. Assert: IsPropertyKey(P) is true.
+ assert(TaggedIsNotSmi(name));
+ assert(IsName(name));
+ assert(!IsPrivateSymbol(name));
+
+ // 2. Let handler be O.[[ProxyHandler]].
+ const handler: Object = proxy.handler;
+
+ // 3. If handler is null, throw a TypeError exception.
+ if (handler == Null) {
+ ThrowTypeError(kProxyRevoked, 'get');
+ }
+
+ // 4. Assert: Type(handler) is Object.
+ const handlerJSReceiver = UnsafeCast<JSReceiver>(handler);
+
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 6. Let trap be ? GetMethod(handler, "get").
+ // 7. If trap is undefined, then (see 7.a below).
+ // 7.a. Return ? target.[[Get]](P, Receiver).
+ // TODO(mslekova): Introduce GetPropertyWithReceiver stub
+ const trap: Callable = GetMethod(handlerJSReceiver, 'get')
+ otherwise return GetPropertyWithReceiver(
+ target, name, receiverValue, onNonExistent);
+
+ // 8. Let trapResult be ? Call(trap, handler, « target, P, Receiver »).
+ const trapResult =
+ Call(context, trap, handlerJSReceiver, target, name, receiverValue);
+
+ // 9. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ // 10. If targetDesc is not undefined and targetDesc.[[Configurable]] is
+ // false, then
+ // a. If IsDataDescriptor(targetDesc) is true and targetDesc.[[Writable]]
+ // is false, then
+ // i. If SameValue(trapResult, targetDesc.[[Value]]) is false, throw a
+ // TypeError exception.
+ // b. If IsAccessorDescriptor(targetDesc) is true and targetDesc.[[Get]]
+ // is undefined, then
+ // i. If trapResult is not undefined, throw a TypeError exception.
+ // 11. Return trapResult.
+ return CheckGetSetTrapResult(target, proxy, name, trapResult, kProxyGet);
+ }
+}
diff --git a/deps/v8/src/builtins/proxy-has-property.tq b/deps/v8/src/builtins/proxy-has-property.tq
new file mode 100644
index 0000000000..ab3898a9c7
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-has-property.tq
@@ -0,0 +1,55 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
+ // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p
+ transitioning builtin ProxyHasProperty(implicit context: Context)(
+ proxy: JSProxy, name: Name): Object {
+ assert(IsJSProxy(proxy));
+
+ PerformStackCheck();
+
+ // 1. Assert: IsPropertyKey(P) is true.
+ assert(IsName(name));
+ assert(!IsPrivateSymbol(name));
+
+ try {
+ // 2. Let handler be O.[[ProxyHandler]].
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ const handler =
+ Cast<JSReceiver>(proxy.handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 6. Let trap be ? GetMethod(handler, "has").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handler, 'has')
+ otherwise goto TrapUndefined(target);
+
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler, «
+ // target»)).
+ // 9. If booleanTrapResult is false, then (see 9.a. in
+ // CheckHasTrapResult).
+ // 10. Return booleanTrapResult.
+ const trapResult = Call(context, trap, handler, target, name);
+ if (BranchIfToBooleanIsTrue(trapResult)) {
+ return True;
+ }
+ return CheckHasTrapResult(target, proxy, name);
+ }
+ label TrapUndefined(target: Object) {
+ // 7.a. Return ? target.[[HasProperty]](P).
+ tail HasProperty(target, name);
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ ThrowTypeError(kProxyRevoked, 'has');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy-revoke.tq b/deps/v8/src/builtins/proxy-revoke.tq
index 8ab5d8dd51..400f586b21 100644
--- a/deps/v8/src/builtins/proxy-revoke.tq
+++ b/deps/v8/src/builtins/proxy-revoke.tq
@@ -8,9 +8,8 @@ namespace proxy {
// Proxy Revocation Functions
// https://tc39.github.io/ecma262/#sec-proxy-revocation-functions
- // TODO(v8:9007) remove receiver in argument since we don't use it
transitioning javascript builtin
- ProxyRevoke(context: Context, receiver: Object): Undefined {
+ ProxyRevoke(implicit context: Context)(): Undefined {
// 1. Let p be F.[[RevocableProxy]].
const proxyObject: Object = context[PROXY_SLOT];
diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq
new file mode 100644
index 0000000000..72181e08a8
--- /dev/null
+++ b/deps/v8/src/builtins/proxy-set-property.tq
@@ -0,0 +1,84 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-proxy-gen.h'
+
+namespace proxy {
+
+ extern transitioning runtime
+ SetPropertyWithReceiver(implicit context:
+ Context)(Object, Name, Object, Object): void;
+
+ transitioning macro CallThrowTypeErrorIfStrict(implicit context: Context)(
+ message: constexpr MessageTemplate) {
+ ThrowTypeErrorIfStrict(SmiConstant(message), Null, Null);
+ }
+
+ // ES #sec-proxy-object-internal-methods-and-internal-slots-set-p-v-receiver
+ // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-set-p-v-receiver
+ transitioning builtin
+ ProxySetProperty(implicit context: Context)(
+ proxy: JSProxy, name: Name, value: Object,
+ receiverValue: Object): Object {
+ // 1. Assert: IsPropertyKey(P) is true.
+ assert(TaggedIsNotSmi(name));
+ assert(IsName(name));
+
+ if (IsPrivateSymbol(name)) {
+ CallThrowTypeErrorIfStrict(kProxyPrivate);
+ return Undefined;
+ }
+
+ // 2. Let handler be O.[[ProxyHandler]].
+ const handler: Object = proxy.handler;
+
+ try {
+ // 3. If handler is null, throw a TypeError exception.
+ // 4. Assert: Type(handler) is Object.
+ const handlerJSReceiver =
+ Cast<JSReceiver>(handler) otherwise ThrowProxyHandlerRevoked;
+
+ // 5. Let target be O.[[ProxyTarget]].
+ const target = proxy.target;
+
+ // 6. Let trap be ? GetMethod(handler, "set").
+ // 7. If trap is undefined, then (see 7.a below).
+ const trap: Callable = GetMethod(handlerJSReceiver, 'set')
+ otherwise goto TrapUndefined(target);
+
+ // 8. Let booleanTrapResult be ToBoolean(? Call(trap, handler,
+ // « target, P, V, Receiver »)).
+ // 9. If booleanTrapResult is false, return false.
+ // 10. Let targetDesc be ? target.[[GetOwnProperty]](P).
+ // 11. If targetDesc is not undefined and targetDesc.[[Configurable]] is
+ // false, then
+ // a. If IsDataDescriptor(targetDesc) is true and
+ // targetDesc.[[Writable]] is false, then
+ // i. If SameValue(V, targetDesc.[[Value]]) is false, throw a
+ // TypeError exception.
+ // b. If IsAccessorDescriptor(targetDesc) is true, then
+ // i. If targetDesc.[[Set]] is undefined, throw a TypeError
+ // exception.
+ // 12. Return true.
+ const trapResult = Call(
+ context, trap, handlerJSReceiver, target, name, value, receiverValue);
+ if (BranchIfToBooleanIsTrue(trapResult)) {
+ return CheckGetSetTrapResult(
+ target, proxy, name, trapResult, kProxySet);
+ }
+ ThrowTypeErrorIfStrict(
+ SmiConstant(kProxyTrapReturnedFalsishFor), 'set', name);
+ return value;
+ }
+ label TrapUndefined(target: Object) {
+ // 7.a. Return ? target.[[Set]](P, V, Receiver).
+ SetPropertyWithReceiver(target, name, value, receiverValue);
+ return value;
+ }
+ label ThrowProxyHandlerRevoked deferred {
+ assert(handler == Null);
+ ThrowTypeError(kProxyRevoked, 'set');
+ }
+ }
+}
diff --git a/deps/v8/src/builtins/proxy.tq b/deps/v8/src/builtins/proxy.tq
index 95e05be14f..16bba85292 100644
--- a/deps/v8/src/builtins/proxy.tq
+++ b/deps/v8/src/builtins/proxy.tq
@@ -19,4 +19,27 @@ namespace proxy {
Cast<JSReceiver>(proxy.handler) otherwise return true;
return false;
}
+
+ extern transitioning macro ProxiesCodeStubAssembler::CheckGetSetTrapResult(
+ implicit context:
+ Context)(Object, JSProxy, Name, Object, constexpr int31): Object;
+
+ extern transitioning macro ProxiesCodeStubAssembler::CheckHasTrapResult(
+ implicit context: Context)(Object, JSProxy, Name): Object;
+
+ const kProxyNonObject: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyNonObject';
+ const kProxyHandlerOrTargetRevoked: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyHandlerOrTargetRevoked';
+ const kProxyRevoked: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyRevoked';
+ const kProxyTrapReturnedFalsishFor: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyTrapReturnedFalsishFor';
+ const kProxyPrivate: constexpr MessageTemplate
+ generates 'MessageTemplate::kProxyPrivate';
+
+ const kProxyGet: constexpr int31
+ generates 'JSProxy::AccessKind::kGet';
+ const kProxySet: constexpr int31
+ generates 'JSProxy::AccessKind::kSet';
}
diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq
index 4b5d542b60..9b95f99f41 100644
--- a/deps/v8/src/builtins/regexp-replace.tq
+++ b/deps/v8/src/builtins/regexp-replace.tq
@@ -11,23 +11,125 @@ namespace regexp_replace {
extern builtin
SubString(implicit context: Context)(String, Smi, Smi): String;
- extern transitioning macro
- RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
- implicit context: Context)(JSRegExp, String, Callable): String;
- extern macro
- RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Number, bool): Smi;
-
+ extern runtime RegExpExecMultiple(implicit context: Context)(
+ JSRegExp, String, RegExpMatchInfo, JSArray): Null | JSArray;
extern transitioning runtime
RegExpReplaceRT(Context, JSReceiver, String, Object): String;
extern transitioning runtime
+ StringBuilderConcat(implicit context: Context)(JSArray, Smi, String): String;
+ extern transitioning runtime
StringReplaceNonGlobalRegExpWithFunction(implicit context: Context)(
String, JSRegExp, Callable): String;
extern macro
+ RegExpBuiltinsAssembler::AdvanceStringIndexFast(String, Number, bool): Smi;
+ extern macro
RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResultFast(
implicit context: Context)(JSReceiver, String):
RegExpMatchInfo labels IfDidNotMatch;
+ transitioning macro RegExpReplaceCallableNoExplicitCaptures(implicit context:
+ Context)(
+ matchesElements: FixedArray, matchesLength: intptr, string: String,
+ replaceFn: Callable) {
+ let matchStart: Smi = 0;
+ for (let i: intptr = 0; i < matchesLength; i++) {
+ typeswitch (matchesElements.objects[i]) {
+ // Element represents a slice.
+ case (elSmi: Smi): {
+ // The slice's match start and end is either encoded as one or two
+ // smis. A positive smi indicates a single smi encoding (see
+ // ReplacementStringBuilder::AddSubjectSlice()).
+ if (elSmi > 0) {
+ // For single smi encoding, see
+ // StringBuilderSubstringLength::encode() and
+ // StringBuilderSubstringPosition::encode().
+ const elInt: intptr = Convert<intptr>(elSmi);
+ const newMatchStart: intptr = (elInt >> 11) + (elInt & 0x7FF);
+ matchStart = Convert<Smi>(newMatchStart);
+ } else {
+ // For two smi encoding, the length is negative followed by the
+ // match start.
+ const nextEl: Smi = UnsafeCast<Smi>(matchesElements.objects[++i]);
+ matchStart = nextEl - elSmi;
+ }
+ }
+ // Element represents the matched substring, which is then passed to the
+ // replace function.
+ case (elString: String): {
+ const replacementObj: Object =
+ Call(context, replaceFn, Undefined, elString, matchStart, string);
+ const replacement: String = ToString_Inline(context, replacementObj);
+ matchesElements.objects[i] = replacement;
+ matchStart += elString.length_smi;
+ }
+ case (Object): deferred {
+ unreachable;
+ }
+ }
+ }
+ }
+
+ transitioning macro
+ RegExpReplaceCallableWithExplicitCaptures(implicit context: Context)(
+ matchesElements: FixedArray, matchesLength: intptr, string: String,
+ replaceFn: Callable) {
+ for (let i: intptr = 0; i < matchesLength; i++) {
+ const elArray =
+ Cast<JSArray>(matchesElements.objects[i]) otherwise continue;
+
+ // The JSArray is expanded into the function args by Reflect.apply().
+ // TODO(jgruber): Remove indirection through Call->ReflectApply.
+ const replacementObj: Object = Call(
+ context, GetReflectApply(), Undefined, replaceFn, Undefined, elArray);
+
+ // Overwrite the i'th element in the results with the string
+ // we got back from the callback function.
+ matchesElements.objects[i] = ToString_Inline(context, replacementObj);
+ }
+ }
+
+ transitioning macro RegExpReplaceFastGlobalCallable(implicit context:
+ Context)(
+ regexp: FastJSRegExp, string: String, replaceFn: Callable): String {
+ regexp.lastIndex = 0;
+
+ const kInitialCapacity: Smi = 16;
+ const kInitialLength: Smi = 0;
+ const result: Null | JSArray = RegExpExecMultiple(
+ regexp, string, GetRegExpLastMatchInfo(),
+ AllocateJSArray(
+ PACKED_ELEMENTS, GetFastPackedElementsJSArrayMap(),
+ kInitialCapacity, kInitialLength));
+
+ regexp.lastIndex = 0;
+
+ // If no matches, return the subject string.
+ if (result == Null) return string;
+
+ const matches: JSArray = UnsafeCast<JSArray>(result);
+ const matchesLength: Smi = Cast<Smi>(matches.length) otherwise unreachable;
+ const matchesLengthInt: intptr = Convert<intptr>(matchesLength);
+ const matchesElements: FixedArray =
+ UnsafeCast<FixedArray>(matches.elements);
+
+ // Reload last match info since it might have changed.
+ const nofCaptures: Smi = GetRegExpLastMatchInfo().NumberOfCaptures();
+
+ // If the number of captures is two then there are no explicit captures in
+ // the regexp, just the implicit capture that captures the whole match. In
+ // this case we can simplify quite a bit and end up with something faster.
+ if (nofCaptures == 2) {
+ RegExpReplaceCallableNoExplicitCaptures(
+ matchesElements, matchesLengthInt, string, replaceFn);
+ } else {
+ RegExpReplaceCallableWithExplicitCaptures(
+ matchesElements, matchesLengthInt, string, replaceFn);
+ }
+
+ return StringBuilderConcat(matches, matchesLength, string);
+ }
+
transitioning macro RegExpReplaceFastString(implicit context: Context)(
regexp: FastJSRegExp, string: String, replaceString: String): String {
// The fast path is reached only if {receiver} is an unmodified JSRegExp
@@ -80,7 +182,7 @@ namespace regexp_replace {
typeswitch (replaceValue) {
case (replaceFn: Callable): {
return regexp.global ?
- ReplaceGlobalCallableFastPath(regexp, string, replaceFn) :
+ RegExpReplaceFastGlobalCallable(regexp, string, replaceFn) :
StringReplaceNonGlobalRegExpWithFunction(string, regexp, replaceFn);
}
case (Object): {
@@ -116,7 +218,7 @@ namespace regexp_replace {
// if (IsCallable(replace)) {
// if (IsGlobal(receiver)) {
// // Called 'fast-path' but contains several runtime calls.
- // ReplaceGlobalCallableFastPath()
+ // RegExpReplaceFastGlobalCallable()
// } else {
// CallRuntime(StringReplaceNonGlobalRegExpWithFunction)
// }
@@ -124,7 +226,7 @@ namespace regexp_replace {
// if (replace.contains("$")) {
// CallRuntime(RegExpReplace)
// } else {
- // ReplaceSimpleStringFastPath()
+ // RegExpReplaceFastString()
// }
// }
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index b242e8921a..bf8c0cb68a 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -4,22 +4,22 @@
#if V8_TARGET_ARCH_S390
-#include "src/api-arguments.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
-#include "src/macro-assembler-inl.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/wasm/wasm-objects.h"
@@ -28,17 +28,10 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
- if (exit_frame_type == BUILTIN_EXIT) {
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK(exit_frame_type == EXIT);
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
- RelocInfo::CODE_TARGET);
- }
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
@@ -92,6 +85,21 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
+ Register scratch, Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ __ LoadRoot(scratch, RootIndex::kRealStackLimit);
+ // Make scratch the space we have left. The stack might already be overflowed
+ // here which will cause scratch to become negative.
+ __ SubP(scratch, sp, scratch);
+ // Check if the arguments will overflow the stack.
+ __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2));
+ __ CmpP(scratch, r0);
+ __ ble(stack_overflow); // Signed comparison.
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
@@ -102,6 +110,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
+ Label stack_overflow;
+
+ Generate_StackOverflowCheck(masm, r2, r7, &stack_overflow);
+
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
@@ -158,21 +170,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ AddP(sp, sp, r3);
__ AddP(sp, sp, Operand(kPointerSize));
__ Ret();
-}
-void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
- Register scratch, Label* stack_overflow) {
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- __ LoadRoot(scratch, RootIndex::kRealStackLimit);
- // Make scratch the space we have left. The stack might already be overflowed
- // here which will cause scratch to become negative.
- __ SubP(scratch, sp, scratch);
- // Check if the arguments will overflow the stack.
- __ ShiftLeftP(r0, num_args, Operand(kPointerSizeLog2));
- __ CmpP(scratch, r0);
- __ ble(stack_overflow); // Signed comparison.
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ bkpt(0); // Unreachable code.
+ }
}
} // namespace
@@ -960,7 +964,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadP(
optimized_code_entry,
- FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
// Check if the code entry is a Smi. If yes, we interpret it as an
// optimisation marker. Otherwise, interpret it as a weak reference to a code
@@ -3016,26 +3021,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ AddP(r8, Operand(1));
__ StoreW(r8, MemOperand(r9, kLevelOffset));
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r2);
- __ Move(r2, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
__ StoreReturnAddressAndCall(scratch);
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1, r2);
- __ Move(r2, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -3114,7 +3101,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
Register scratch = r6;
DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
- typedef FunctionCallbackArguments FCA;
+ using FCA = FunctionCallbackArguments;
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index a9d260372c..7b4a068300 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -2,21 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/setup-isolate.h"
+#include "src/init/setup-isolate.h"
-#include "src/assembler-inl.h"
#include "src/builtins/builtins.h"
-#include "src/code-events.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/macro-assembler.h"
#include "src/compiler/code-assembler.h"
-#include "src/handles-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h" // For MemoryAllocator::code_range.
-#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-generator.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
@@ -31,11 +30,6 @@ BUILTIN_LIST_C(FORWARD_DECLARE)
namespace {
-void PostBuildProfileAndTracing(Isolate* isolate, Code code, const char* name) {
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::BUILTIN_TAG,
- AbstractCode::cast(code), name));
-}
-
AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
int32_t builtin_index) {
AssemblerOptions options = AssemblerOptions::Default(isolate);
@@ -62,8 +56,8 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
return options;
}
-typedef void (*MacroAssemblerGenerator)(MacroAssembler*);
-typedef void (*CodeAssemblerGenerator)(compiler::CodeAssemblerState*);
+using MacroAssemblerGenerator = void (*)(MacroAssembler*);
+using CodeAssemblerGenerator = void (*)(compiler::CodeAssemblerState*);
Handle<Code> BuildPlaceholder(Isolate* isolate, int32_t builtin_index) {
HandleScope scope(isolate);
@@ -81,8 +75,10 @@ Handle<Code> BuildPlaceholder(Isolate* isolate, int32_t builtin_index) {
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::BUILTIN, masm.CodeObject(), builtin_index);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::BUILTIN)
+ .set_self_reference(masm.CodeObject())
+ .set_builtin_index(builtin_index)
+ .Build();
return scope.CloseAndEscape(code);
}
@@ -110,9 +106,7 @@ Code BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
DCHECK_EQ(Builtins::KindOf(Builtins::kJSConstructEntry), Builtins::ASM);
DCHECK_EQ(Builtins::KindOf(Builtins::kJSRunMicrotasksEntry), Builtins::ASM);
if (Builtins::IsJSEntryVariant(builtin_index)) {
- static constexpr int kJSEntryHandlerCount = 1;
- handler_table_offset =
- HandlerTable::EmitReturnTableStart(&masm, kJSEntryHandlerCount);
+ handler_table_offset = HandlerTable::EmitReturnTableStart(&masm);
HandlerTable::EmitReturnEntry(
&masm, 0, isolate->builtins()->js_entry_handler_offset());
}
@@ -121,23 +115,18 @@ Code BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index,
masm.GetCode(isolate, &desc, MacroAssembler::kNoSafepointTable,
handler_table_offset);
- static constexpr bool kIsNotTurbofanned = false;
- static constexpr int kStackSlots = 0;
-
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::BUILTIN, masm.CodeObject(), builtin_index,
- MaybeHandle<ByteArray>(), DeoptimizationData::Empty(isolate), kMovable,
- kIsNotTurbofanned, kStackSlots);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::BUILTIN)
+ .set_self_reference(masm.CodeObject())
+ .set_builtin_index(builtin_index)
+ .Build();
#if defined(V8_OS_WIN_X64)
isolate->SetBuiltinUnwindData(builtin_index, masm.GetUnwindInfo());
#endif
- PostBuildProfileAndTracing(isolate, *code, s_name);
return *code;
}
Code BuildAdaptor(Isolate* isolate, int32_t builtin_index,
- Address builtin_address,
- Builtins::ExitFrameType exit_frame_type, const char* name) {
+ Address builtin_address, const char* name) {
HandleScope scope(isolate);
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
@@ -149,12 +138,13 @@ Code BuildAdaptor(Isolate* isolate, int32_t builtin_index,
ExternalAssemblerBuffer(buffer, kBufferSize));
masm.set_builtin_index(builtin_index);
DCHECK(!masm.has_frame());
- Builtins::Generate_Adaptor(&masm, builtin_address, exit_frame_type);
+ Builtins::Generate_Adaptor(&masm, builtin_address);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::BUILTIN, masm.CodeObject(), builtin_index);
- PostBuildProfileAndTracing(isolate, *code, name);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::BUILTIN)
+ .set_self_reference(masm.CodeObject())
+ .set_builtin_index(builtin_index)
+ .Build();
return *code;
}
@@ -179,7 +169,6 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin_index));
- PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
@@ -207,7 +196,6 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin_index));
- PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
@@ -216,7 +204,7 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// static
void SetupIsolateDelegate::AddBuiltin(Builtins* builtins, int index,
Code code) {
- DCHECK_EQ(index, code->builtin_index());
+ DCHECK_EQ(index, code.builtin_index());
builtins->set_builtin(index, code);
}
@@ -241,12 +229,13 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
static const int kRelocMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
HeapIterator iterator(isolate->heap());
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (!obj->IsCode()) continue;
+ if (!obj.IsCode()) continue;
Code code = Code::cast(obj);
bool flush_icache = false;
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
@@ -254,26 +243,26 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
if (RelocInfo::IsCodeTargetMode(rinfo->rmode())) {
Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
DCHECK_IMPLIES(RelocInfo::IsRelativeCodeTarget(rinfo->rmode()),
- Builtins::IsIsolateIndependent(target->builtin_index()));
- if (!target->is_builtin()) continue;
- Code new_target = builtins->builtin(target->builtin_index());
- rinfo->set_target_address(new_target->raw_instruction_start(),
+ Builtins::IsIsolateIndependent(target.builtin_index()));
+ if (!target.is_builtin()) continue;
+ Code new_target = builtins->builtin(target.builtin_index());
+ rinfo->set_target_address(new_target.raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else {
- DCHECK(RelocInfo::IsEmbeddedObject(rinfo->rmode()));
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
Object object = rinfo->target_object();
- if (!object->IsCode()) continue;
+ if (!object.IsCode()) continue;
Code target = Code::cast(object);
- if (!target->is_builtin()) continue;
- Code new_target = builtins->builtin(target->builtin_index());
+ if (!target.is_builtin()) continue;
+ Code new_target = builtins->builtin(target.builtin_index());
rinfo->set_target_object(isolate->heap(), new_target,
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
}
flush_icache = true;
}
if (flush_icache) {
- FlushInstructionCache(code->raw_instruction_start(),
- code->raw_instruction_size());
+ FlushInstructionCache(code.raw_instruction_start(),
+ code.raw_instruction_size());
}
}
}
@@ -285,22 +274,14 @@ Code GenerateBytecodeHandler(Isolate* isolate, int builtin_index,
interpreter::OperandScale operand_scale,
interpreter::Bytecode bytecode) {
DCHECK(interpreter::Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
-
Handle<Code> code = interpreter::GenerateBytecodeHandler(
isolate, bytecode, operand_scale, builtin_index,
BuiltinAssemblerOptions(isolate, builtin_index));
-
- PostBuildProfileAndTracing(isolate, *code, name);
-
return *code;
}
} // namespace
-#ifdef _MSC_VER
-#pragma optimize( "", off )
-#endif
-
// static
void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
Builtins* builtins = isolate->builtins();
@@ -313,13 +294,8 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
int index = 0;
Code code;
-#define BUILD_CPP(Name) \
- code = BuildAdaptor(isolate, index, FUNCTION_ADDR(Builtin_##Name), \
- Builtins::BUILTIN_EXIT, #Name); \
- AddBuiltin(builtins, index++, code);
-#define BUILD_API(Name) \
- code = BuildAdaptor(isolate, index, FUNCTION_ADDR(Builtin_##Name), \
- Builtins::EXIT, #Name); \
+#define BUILD_CPP(Name) \
+ code = BuildAdaptor(isolate, index, FUNCTION_ADDR(Builtin_##Name), #Name); \
AddBuiltin(builtins, index++, code);
#define BUILD_TFJ(Name, Argc, ...) \
code = BuildWithCodeStubAssemblerJS( \
@@ -354,11 +330,10 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
#Name); \
AddBuiltin(builtins, index++, code);
- BUILTIN_LIST(BUILD_CPP, BUILD_API, BUILD_TFJ, BUILD_TFC, BUILD_TFS, BUILD_TFH,
- BUILD_BCH, BUILD_ASM);
+ BUILTIN_LIST(BUILD_CPP, BUILD_TFJ, BUILD_TFC, BUILD_TFS, BUILD_TFH, BUILD_BCH,
+ BUILD_ASM);
#undef BUILD_CPP
-#undef BUILD_API
#undef BUILD_TFJ
#undef BUILD_TFC
#undef BUILD_TFS
@@ -370,13 +345,13 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
ReplacePlaceholders(isolate);
#define SET_PROMISE_REJECTION_PREDICTION(Name) \
- builtins->builtin(Builtins::k##Name)->set_is_promise_rejection(true);
+ builtins->builtin(Builtins::k##Name).set_is_promise_rejection(true);
BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(SET_PROMISE_REJECTION_PREDICTION)
#undef SET_PROMISE_REJECTION_PREDICTION
#define SET_EXCEPTION_CAUGHT_PREDICTION(Name) \
- builtins->builtin(Builtins::k##Name)->set_is_exception_caught(true);
+ builtins->builtin(Builtins::k##Name).set_is_exception_caught(true);
BUILTIN_EXCEPTION_CAUGHT_PREDICTION_LIST(SET_EXCEPTION_CAUGHT_PREDICTION)
#undef SET_EXCEPTION_CAUGHT_PREDICTION
@@ -384,10 +359,5 @@ void SetupIsolateDelegate::SetupBuiltinsInternal(Isolate* isolate) {
builtins->MarkInitialized();
}
-#ifdef _MSC_VER
-#pragma optimize( "", on )
-#endif
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/string-iterator.tq b/deps/v8/src/builtins/string-iterator.tq
new file mode 100644
index 0000000000..f5c6099c25
--- /dev/null
+++ b/deps/v8/src/builtins/string-iterator.tq
@@ -0,0 +1,46 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace string_iterator {
+
+ macro NewJSStringIterator(implicit context: Context)(
+ string: String, nextIndex: Smi): JSStringIterator {
+ return new JSStringIterator{
+ map: GetInitialStringIteratorMap(),
+ properties_or_hash: kEmptyFixedArray,
+ elements: kEmptyFixedArray,
+ string: string,
+ next_index: nextIndex
+ };
+ }
+
+ // ES6 #sec-string.prototype-@@iterator
+ transitioning javascript builtin StringPrototypeIterator(
+ implicit context: Context)(receiver: Object): JSStringIterator {
+ const name: String =
+ ToThisString(receiver, 'String.prototype[Symbol.iterator]');
+ const index: Smi = 0;
+ return NewJSStringIterator(name, index);
+ }
+
+ // ES6 #sec-%stringiteratorprototype%.next
+ transitioning javascript builtin StringIteratorPrototypeNext(
+ implicit context: Context)(receiver: Object): JSIteratorResult {
+ const iterator = Cast<JSStringIterator>(receiver) otherwise ThrowTypeError(
+ kIncompatibleMethodReceiver, 'String Iterator.prototype.next',
+ receiver);
+ const string = iterator.string;
+ const position: intptr = SmiUntag(iterator.next_index);
+ const length: intptr = string.length_intptr;
+ if (position >= length) {
+ return NewJSIteratorResult(Undefined, True);
+ }
+ // Move to next codepoint.
+ const encoding = UTF16;
+ const ch = string::LoadSurrogatePairAt(string, length, position, encoding);
+ const value: String = string::StringFromSingleCodePoint(ch, encoding);
+ iterator.next_index = SmiTag(position + value.length_intptr);
+ return NewJSIteratorResult(value, False);
+ }
+}
diff --git a/deps/v8/src/builtins/string-slice.tq b/deps/v8/src/builtins/string-slice.tq
new file mode 100644
index 0000000000..41eb38b0ad
--- /dev/null
+++ b/deps/v8/src/builtins/string-slice.tq
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace string_slice {
+
+ extern macro SubString(String, intptr, intptr): String;
+
+ // ES6 #sec-string.prototype.slice ( start, end )
+ // https://tc39.github.io/ecma262/#sec-string.prototype.slice
+ transitioning javascript builtin StringPrototypeSlice(
+ implicit context: Context)(receiver: Object, ...arguments): String {
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ // 2. Let S be ? ToString(O).
+ const string: String = ToThisString(receiver, 'String.prototype.slice');
+
+ // 3. Let len be the number of elements in S.
+ const length: intptr = string.length_intptr;
+
+ // Convert {start} to a relative index.
+ const start: intptr = ConvertToRelativeIndex(arguments[0], length);
+
+ // 5. If end is undefined, let intEnd be len;
+ // else Convert {end} to a relative index.
+ const temp = arguments[1];
+ const end: intptr =
+ temp == Undefined ? length : ConvertToRelativeIndex(temp, length);
+
+ if (end <= start) {
+ return kEmptyString;
+ }
+
+ return SubString(string, start, end);
+ }
+}
diff --git a/deps/v8/src/builtins/string-substring.tq b/deps/v8/src/builtins/string-substring.tq
new file mode 100644
index 0000000000..f322eeed06
--- /dev/null
+++ b/deps/v8/src/builtins/string-substring.tq
@@ -0,0 +1,50 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+namespace string_substring {
+
+ extern macro SubString(String, intptr, intptr): String;
+
+ transitioning macro ToSmiBetweenZeroAnd(implicit context: Context)(
+ value: Object, limit: Smi): Smi {
+ const valueInt: Number =
+ ToInteger_Inline(context, value, kTruncateMinusZero);
+ typeswitch (valueInt) {
+ case (valueSmi: Smi): {
+ if (SmiAbove(valueSmi, limit)) {
+ return valueSmi < 0 ? 0 : limit;
+ }
+ return valueSmi;
+ }
+ // {value} is a heap number - in this case, it is definitely out of
+ // bounds.
+ case (hn: HeapNumber): {
+ const valueFloat: float64 = LoadHeapNumberValue(hn);
+ return valueFloat < 0. ? 0 : limit;
+ }
+ }
+ }
+
+ // ES6 #sec-string.prototype.substring
+ transitioning javascript builtin StringPrototypeSubstring(
+ implicit context: Context)(receiver: Object, ...arguments): String {
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ const string: String = ToThisString(receiver, 'String.prototype.substring');
+ const length = string.length_smi;
+
+ // Conversion and bounds-checks for {start}.
+ let start: Smi = ToSmiBetweenZeroAnd(arguments[0], length);
+
+ // Conversion and bounds-checks for {end}.
+ let end: Smi = arguments[1] == Undefined ?
+ length :
+ ToSmiBetweenZeroAnd(arguments[1], length);
+ if (end < start) {
+ const tmp: Smi = end;
+ end = start;
+ start = tmp;
+ }
+ return SubString(string, SmiUntag(start), SmiUntag(end));
+ }
+}
diff --git a/deps/v8/src/builtins/string.tq b/deps/v8/src/builtins/string.tq
new file mode 100644
index 0000000000..1e5a74eb49
--- /dev/null
+++ b/deps/v8/src/builtins/string.tq
@@ -0,0 +1,136 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/builtins/builtins-string-gen.h'
+
+namespace string {
+ // ES6 #sec-string.prototype.tostring
+ transitioning javascript builtin
+ StringPrototypeToString(implicit context: Context)(receiver: Object): Object {
+ return ToThisValue(receiver, kString, 'String.prototype.toString');
+ }
+
+ // ES6 #sec-string.prototype.valueof
+ transitioning javascript builtin
+ StringPrototypeValueOf(implicit context: Context)(receiver: Object): Object {
+ return ToThisValue(receiver, kString, 'String.prototype.valueOf');
+ }
+
+ extern macro StringBuiltinsAssembler::LoadSurrogatePairAt(
+ String, intptr, intptr, constexpr UnicodeEncoding): int32;
+ extern macro StringFromSingleCodePoint(int32, constexpr UnicodeEncoding):
+ String;
+
+ // This function assumes StringPrimitiveWithNoCustomIteration is true.
+ transitioning builtin StringToList(implicit context: Context)(string: String):
+ JSArray {
+ const kind = PACKED_ELEMENTS;
+ const stringLength: intptr = string.length_intptr;
+
+ const map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context));
+ const array: JSArray = AllocateJSArray(
+ kind, map, stringLength, SmiTag(stringLength),
+ kAllowLargeObjectAllocation);
+ const elements = UnsafeCast<FixedArray>(array.elements);
+ const encoding = UTF16;
+ let arrayLength: Smi = 0;
+ let i: intptr = 0;
+ while (i < stringLength) {
+ const ch: int32 = LoadSurrogatePairAt(string, stringLength, i, encoding);
+ const value: String = StringFromSingleCodePoint(ch, encoding);
+ elements[arrayLength] = value;
+ // Increment and continue the loop.
+ i = i + value.length_intptr;
+ arrayLength++;
+ }
+ assert(arrayLength >= 0);
+ assert(SmiTag(stringLength) >= arrayLength);
+ array.length = arrayLength;
+
+ return array;
+ }
+
+ transitioning macro GenerateStringAt(implicit context: Context)(
+ receiver: Object, position: Object, methodName: constexpr string):
+ never labels IfInBounds(String, intptr, intptr),
+ IfOutOfBounds {
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ const string: String = ToThisString(receiver, methodName);
+ // Convert the {position} to a Smi and check that it's in bounds of
+ // the {string}.
+ const indexNumber: Number =
+ ToInteger_Inline(context, position, kTruncateMinusZero);
+ if (TaggedIsNotSmi(indexNumber)) goto IfOutOfBounds;
+ const index: intptr = SmiUntag(UnsafeCast<Smi>(indexNumber));
+ const length: intptr = string.length_intptr;
+ if (Convert<uintptr>(index) >= Convert<uintptr>(length)) goto IfOutOfBounds;
+ goto IfInBounds(string, index, length);
+ }
+
+ // ES6 #sec-string.prototype.charat
+ transitioning javascript builtin StringPrototypeCharAt(
+ implicit context: Context)(receiver: Object, position: Object): Object {
+ try {
+ GenerateStringAt(receiver, position, 'String.prototype.charAt')
+ otherwise IfInBounds, IfOutOfBounds;
+ }
+ label IfInBounds(string: String, index: intptr, length: intptr) {
+ const code: int32 = StringCharCodeAt(string, index);
+ return StringFromSingleCharCode(code);
+ }
+ label IfOutOfBounds {
+ return kEmptyString;
+ }
+ }
+
+ // ES6 #sec-string.prototype.charcodeat
+ transitioning javascript builtin StringPrototypeCharCodeAt(
+ implicit context: Context)(receiver: Object, position: Object): Object {
+ try {
+ GenerateStringAt(receiver, position, 'String.prototype.charCodeAt')
+ otherwise IfInBounds, IfOutOfBounds;
+ }
+ label IfInBounds(string: String, index: intptr, length: intptr) {
+ const code: int32 = StringCharCodeAt(string, index);
+ return Convert<Smi>(code);
+ }
+ label IfOutOfBounds {
+ return kNaN;
+ }
+ }
+
+ // ES6 #sec-string.prototype.codepointat
+ transitioning javascript builtin StringPrototypeCodePointAt(
+ implicit context: Context)(receiver: Object, position: Object): Object {
+ try {
+ GenerateStringAt(receiver, position, 'String.prototype.codePointAt')
+ otherwise IfInBounds, IfOutOfBounds;
+ }
+ label IfInBounds(string: String, index: intptr, length: intptr) {
+ // This is always a call to a builtin from Javascript, so we need to
+ // produce UTF32.
+ const code: int32 = LoadSurrogatePairAt(string, length, index, UTF32);
+ return Convert<Smi>(code);
+ }
+ label IfOutOfBounds {
+ return Undefined;
+ }
+ }
+
+ // ES6 String.prototype.concat(...args)
+ // ES6 #sec-string.prototype.concat
+ transitioning javascript builtin StringPrototypeConcat(
+ implicit context: Context)(receiver: Object, ...arguments): Object {
+ // Check that {receiver} is coercible to Object and convert it to a String.
+ let string: String = ToThisString(receiver, 'String.prototype.concat');
+
+ // Concatenate all the arguments passed to this builtin.
+ const length: intptr = Convert<intptr>(arguments.length);
+ for (let i: intptr = 0; i < length; i++) {
+ const temp: String = ToString_Inline(context, arguments[i]);
+ string = string + temp;
+ }
+ return string;
+ }
+}
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 31be407e52..a0d745b2f4 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -12,26 +12,18 @@ namespace typed_array_createtypedarray {
implicit context: Context)(JSFunction, JSReceiver): JSTypedArray;
extern macro TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
implicit context: Context)(JSTypedArray, uintptr): JSArrayBuffer;
- extern macro TypedArrayBuiltinsAssembler::AllocateOnHeapElements(
- Map, intptr, Number): FixedTypedArrayBase;
extern macro TypedArrayBuiltinsAssembler::GetDefaultConstructor(
implicit context: Context)(JSTypedArray): JSFunction;
extern macro TypedArrayBuiltinsAssembler::IsSharedArrayBuffer(JSArrayBuffer):
bool;
extern macro TypedArrayBuiltinsAssembler::SetupTypedArray(
- JSTypedArray, Smi, uintptr, uintptr): void;
+ JSTypedArray, uintptr, uintptr, uintptr): void;
extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)(
Map, String): never;
extern runtime TypedArrayCopyElements(Context, JSTypedArray, Object, Number):
void;
- macro CalculateTotalElementsByteSize(byteLength: intptr): intptr {
- return (kFixedTypedArrayBaseHeaderSize + kObjectAlignmentMask +
- byteLength) &
- ~kObjectAlignmentMask;
- }
-
transitioning macro TypedArrayInitialize(implicit context: Context)(
initialize: constexpr bool, typedArray: JSTypedArray, length: PositiveSmi,
elementsInfo: typed_array::TypedArrayElementsInfo,
@@ -51,14 +43,8 @@ namespace typed_array_createtypedarray {
AllocateEmptyOnHeapBuffer(typedArray, byteLength);
- const totalSize =
- CalculateTotalElementsByteSize(Convert<intptr>(byteLength));
- const elements =
- AllocateOnHeapElements(elementsInfo.map, totalSize, length);
- typedArray.elements = elements;
-
if constexpr (initialize) {
- const backingStore = LoadFixedTypedArrayOnHeapBackingStore(elements);
+ const backingStore = typedArray.data_ptr;
typed_array::CallCMemset(backingStore, 0, byteLength);
}
}
@@ -73,12 +59,12 @@ namespace typed_array_createtypedarray {
label AttachOffHeapBuffer(bufferObj: Object) {
const buffer = Cast<JSArrayBuffer>(bufferObj) otherwise unreachable;
const byteOffset: uintptr = 0;
- typedArray.AttachOffHeapBuffer(
- buffer, elementsInfo.map, length, byteOffset);
+ typedArray.AttachOffHeapBuffer(buffer, byteOffset);
}
const byteOffset: uintptr = 0;
- SetupTypedArray(typedArray, length, byteOffset, byteLength);
+ SetupTypedArray(
+ typedArray, Convert<uintptr>(length), byteOffset, byteLength);
return byteLength;
}
@@ -126,7 +112,7 @@ namespace typed_array_createtypedarray {
goto IfSlow;
} else if (length > 0) {
- assert(byteLength <= kTypedArrayMaxByteLength);
+ assert(byteLength <= kArrayBufferMaxByteLength);
typed_array::CallCMemcpy(typedArray.data_ptr, src.data_ptr, byteLength);
}
}
@@ -157,7 +143,9 @@ namespace typed_array_createtypedarray {
let bufferConstructor: JSReceiver = GetArrayBufferFunction();
const srcBuffer: JSArrayBuffer = srcTypedArray.buffer;
// TODO(petermarshall): Throw on detached typedArray.
- let length: Smi = IsDetachedBuffer(srcBuffer) ? 0 : srcTypedArray.length;
+ // TODO(v8:4156): Update this to support huge TypedArrays.
+ let length =
+ IsDetachedBuffer(srcBuffer) ? 0 : Convert<Number>(srcTypedArray.length);
// The spec requires that constructing a typed array using a SAB-backed
// typed array use the ArrayBuffer constructor, not the species constructor.
@@ -236,9 +224,9 @@ namespace typed_array_createtypedarray {
goto IfInvalidLength;
}
- SetupTypedArray(typedArray, newLength, offset, newByteLength);
- typedArray.AttachOffHeapBuffer(
- buffer, elementsInfo.map, newLength, offset);
+ SetupTypedArray(
+ typedArray, Convert<uintptr>(newLength), offset, newByteLength);
+ typedArray.AttachOffHeapBuffer(buffer, offset);
}
label IfInvalidAlignment(problemString: String) deferred {
ThrowInvalidTypedArrayAlignment(typedArray.map, problemString);
@@ -288,11 +276,14 @@ namespace typed_array_createtypedarray {
const array: JSTypedArray = EmitFastNewObject(target, newTarget);
// We need to set the byte_offset / byte_length to some sane values
// to keep the heap verifier happy.
- // TODO(bmeurer): Fix this initialization to not use EmitFastNewObject,
- // which causes the problem, since it puts Undefined into all slots of
- // the object even though that doesn't make any sense for these fields.
+ // TODO(bmeurer, v8:4153): Fix this initialization to not use
+ // EmitFastNewObject, which causes the problem, since it puts
+ // Undefined into all slots of the object even though that
+ // doesn't make any sense for these fields.
array.byte_offset = 0;
array.byte_length = 0;
+ array.length = 0;
+ array.base_pointer = Convert<Smi>(0);
// 5. Let elementSize be the Number value of the Element Size value in Table
// 56 for constructorName.
@@ -371,15 +362,15 @@ namespace typed_array_createtypedarray {
}
}
+ @export
transitioning macro TypedArraySpeciesCreateByLength(implicit context:
Context)(
methodName: constexpr string, exemplar: JSTypedArray,
- length: Smi): JSTypedArray {
- assert(Is<PositiveSmi>(length));
+ length: PositiveSmi): JSTypedArray {
const numArgs: constexpr int31 = 1;
const typedArray: JSTypedArray = TypedArraySpeciesCreate(
methodName, numArgs, exemplar, length, Undefined, Undefined);
- if (typedArray.length < length) deferred {
+ if (typedArray.length < Convert<uintptr>(length)) deferred {
ThrowTypeError(kTypedArrayTooShort);
}
diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq
index 85b4a77618..4f8804880e 100644
--- a/deps/v8/src/builtins/typed-array-every.tq
+++ b/deps/v8/src/builtins/typed-array-every.tq
@@ -11,7 +11,9 @@ namespace typed_array_every {
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
thisArg: Object): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: Smi = Convert<Smi>(witness.Get().length);
+ // TODO(v8:4153): Support huge TypedArrays here.
+ const length =
+ Cast<Smi>(Convert<Number>(witness.Get().length)) otherwise unreachable;
for (let k: Smi = 0; k < length; k++) {
// BUG(4895): We should throw on detached buffers rather than simply exit.
witness.Recheck() otherwise break;
diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq
index d73f21efa1..9407c3a7af 100644
--- a/deps/v8/src/builtins/typed-array-filter.tq
+++ b/deps/v8/src/builtins/typed-array-filter.tq
@@ -21,7 +21,8 @@ namespace typed_array_filter {
const src = typed_array::EnsureAttached(array) otherwise IsDetached;
// 3. Let len be O.[[ArrayLength]].
- const len: Smi = src.length;
+ // TODO(v8:4153): Support huge TypedArrays here.
+ const len = Cast<Smi>(Convert<Number>(src.length)) otherwise unreachable;
// 4. If IsCallable(callbackfn) is false, throw a TypeError exception.
const callbackfn = Cast<Callable>(arguments[0])
@@ -58,7 +59,7 @@ namespace typed_array_filter {
}
// 10. Let A be ? TypedArraySpeciesCreate(O, captured).
- const lengthSmi: Smi = Convert<Smi>(kept.length);
+ const lengthSmi = Convert<PositiveSmi>(kept.length);
const typedArray: JSTypedArray =
typed_array_createtypedarray::TypedArraySpeciesCreateByLength(
kBuiltinName, array, lengthSmi);
diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq
index 19b0602241..3c331eb3bb 100644
--- a/deps/v8/src/builtins/typed-array-find.tq
+++ b/deps/v8/src/builtins/typed-array-find.tq
@@ -11,7 +11,9 @@ namespace typed_array_find {
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
thisArg: Object): Object {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: Smi = Convert<Smi>(witness.Get().length);
+ // TODO(v8:4153): Support huge TypedArrays here.
+ const length =
+ Cast<Smi>(Convert<Number>(witness.Get().length)) otherwise unreachable;
for (let k: Smi = 0; k < length; k++) {
// BUG(4895): We should throw on detached buffers rather than simply exit.
witness.Recheck() otherwise break;
diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq
index fd77774e21..05f112d0d5 100644
--- a/deps/v8/src/builtins/typed-array-findindex.tq
+++ b/deps/v8/src/builtins/typed-array-findindex.tq
@@ -11,7 +11,9 @@ namespace typed_array_findindex {
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
thisArg: Object): Number {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: Smi = Convert<Smi>(witness.Get().length);
+ // TODO(v8:4153): Support huge TypedArrays here.
+ const length =
+ Cast<Smi>(Convert<Number>(witness.Get().length)) otherwise unreachable;
for (let k: Smi = 0; k < length; k++) {
// BUG(4895): We should throw on detached buffers rather than simply exit.
witness.Recheck() otherwise break;
diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq
index 49ed0a67c0..dbf1a121da 100644
--- a/deps/v8/src/builtins/typed-array-foreach.tq
+++ b/deps/v8/src/builtins/typed-array-foreach.tq
@@ -11,7 +11,9 @@ namespace typed_array_foreach {
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
thisArg: Object): Object {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: Smi = Convert<Smi>(array.length);
+ // TODO(v8:4153): Support huge TypedArrays here.
+ const length =
+ Cast<Smi>(Convert<Number>(witness.Get().length)) otherwise unreachable;
for (let k: Smi = 0; k < length; k++) {
// BUG(4895): We should throw on detached buffers rather than simply exit.
witness.Recheck() otherwise break;
diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq
index 232f2fc570..7af918a07b 100644
--- a/deps/v8/src/builtins/typed-array-reduce.tq
+++ b/deps/v8/src/builtins/typed-array-reduce.tq
@@ -11,7 +11,9 @@ namespace typed_array_reduce {
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
initialValue: Object): Object {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: Smi = Convert<Smi>(witness.Get().length);
+ // TODO(v8:4153): Support huge TypedArrays here.
+ const length =
+ Cast<Smi>(Convert<Number>(witness.Get().length)) otherwise unreachable;
let accumulator = initialValue;
for (let k: Smi = 0; k < length; k++) {
// BUG(4895): We should throw on detached buffers rather than simply exit.
diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq
index 3aa9511a06..59ce7ff55b 100644
--- a/deps/v8/src/builtins/typed-array-reduceright.tq
+++ b/deps/v8/src/builtins/typed-array-reduceright.tq
@@ -11,7 +11,9 @@ namespace typed_array_reduceright {
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
initialValue: Object): Object {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: Smi = Convert<Smi>(array.length);
+ // TODO(v8:4153): Support huge TypedArrays here.
+ const length =
+ Cast<Smi>(Convert<Number>(witness.Get().length)) otherwise unreachable;
let accumulator = initialValue;
for (let k: Smi = length - 1; k >= 0; k--) {
// BUG(4895): We should throw on detached buffers rather than simply exit.
diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq
index f56d63c071..991cad6b1b 100644
--- a/deps/v8/src/builtins/typed-array-some.tq
+++ b/deps/v8/src/builtins/typed-array-some.tq
@@ -11,7 +11,9 @@ namespace typed_array_some {
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
thisArg: Object): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
- const length: Smi = Convert<Smi>(witness.Get().length);
+ // TODO(v8:4153): Support huge TypedArrays here.
+ const length =
+ Cast<Smi>(Convert<Number>(witness.Get().length)) otherwise unreachable;
for (let k: Smi = 0; k < length; k++) {
// BUG(4895): We should throw on detached buffers rather than simply exit.
witness.Recheck() otherwise break;
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index d86b48da05..8f923947f1 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -5,6 +5,21 @@
#include 'src/builtins/builtins-typed-array-gen.h'
namespace typed_array {
+ // Naming convention from elements.cc. We have a similar intent but implement
+ // fastpaths using generics instead of using a class hierarchy for elements
+ // kinds specific implementations.
+ type Uint8Elements;
+ type Int8Elements;
+ type Uint16Elements;
+ type Int16Elements;
+ type Uint32Elements;
+ type Int32Elements;
+ type Float32Elements;
+ type Float64Elements;
+ type Uint8ClampedElements;
+ type BigUint64Elements;
+ type BigInt64Elements;
+
struct TypedArrayElementsInfo {
// Calculates the number of bytes required for specified number of elements.
CalculateByteLength(lengthSmi: PositiveSmi): uintptr labels IfInvalid {
@@ -19,7 +34,7 @@ namespace typed_array {
// Calculates the maximum number of elements supported by a specified number
// of bytes.
CalculateLength(byteLength: uintptr): PositiveSmi labels IfInvalid {
- return TryUintPtrToPositiveSmi(byteLength >>> this.sizeLog2)
+ return Convert<PositiveSmi>(byteLength >>> this.sizeLog2)
otherwise IfInvalid;
}
@@ -34,7 +49,6 @@ namespace typed_array {
}
sizeLog2: uintptr;
- map: Map;
kind: ElementsKind;
}
extern runtime TypedArraySortFast(Context, Object): JSTypedArray;
@@ -55,8 +69,8 @@ namespace typed_array {
ElementsKind): bool;
extern macro LoadFixedTypedArrayElementAsTagged(
RawPtr, Smi, constexpr ElementsKind, constexpr ParameterMode): Object;
- extern macro StoreFixedTypedArrayElementFromTagged(
- Context, FixedTypedArrayBase, Smi, Object, constexpr ElementsKind,
+ extern macro StoreJSTypedArrayElementFromTagged(
+ Context, JSTypedArray, Smi, Object, constexpr ElementsKind,
constexpr ParameterMode);
type LoadFn = builtin(Context, JSTypedArray, Smi) => Object;
@@ -120,31 +134,31 @@ namespace typed_array {
macro GetLoadFnForElementsKind(elementsKind: ElementsKind): LoadFn {
if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) {
if (elementsKind == INT32_ELEMENTS) {
- return LoadFixedElement<FixedInt32Array>;
+ return LoadFixedElement<Int32Elements>;
} else if (elementsKind == FLOAT32_ELEMENTS) {
- return LoadFixedElement<FixedFloat32Array>;
+ return LoadFixedElement<Float32Elements>;
} else if (elementsKind == FLOAT64_ELEMENTS) {
- return LoadFixedElement<FixedFloat64Array>;
+ return LoadFixedElement<Float64Elements>;
} else if (elementsKind == UINT8_CLAMPED_ELEMENTS) {
- return LoadFixedElement<FixedUint8ClampedArray>;
+ return LoadFixedElement<Uint8ClampedElements>;
} else if (elementsKind == BIGUINT64_ELEMENTS) {
- return LoadFixedElement<FixedBigUint64Array>;
+ return LoadFixedElement<BigUint64Elements>;
} else if (elementsKind == BIGINT64_ELEMENTS) {
- return LoadFixedElement<FixedBigInt64Array>;
+ return LoadFixedElement<BigInt64Elements>;
} else {
unreachable;
}
} else {
if (elementsKind == UINT8_ELEMENTS) {
- return LoadFixedElement<FixedUint8Array>;
+ return LoadFixedElement<Uint8Elements>;
} else if (elementsKind == INT8_ELEMENTS) {
- return LoadFixedElement<FixedInt8Array>;
+ return LoadFixedElement<Int8Elements>;
} else if (elementsKind == UINT16_ELEMENTS) {
- return LoadFixedElement<FixedUint16Array>;
+ return LoadFixedElement<Uint16Elements>;
} else if (elementsKind == INT16_ELEMENTS) {
- return LoadFixedElement<FixedInt16Array>;
+ return LoadFixedElement<Int16Elements>;
} else if (elementsKind == UINT32_ELEMENTS) {
- return LoadFixedElement<FixedUint32Array>;
+ return LoadFixedElement<Uint32Elements>;
} else {
unreachable;
}
@@ -152,37 +166,37 @@ namespace typed_array {
}
macro KindForArrayType<T: type>(): constexpr ElementsKind;
- KindForArrayType<FixedUint8Array>(): constexpr ElementsKind {
+ KindForArrayType<Uint8Elements>(): constexpr ElementsKind {
return UINT8_ELEMENTS;
}
- KindForArrayType<FixedInt8Array>(): constexpr ElementsKind {
+ KindForArrayType<Int8Elements>(): constexpr ElementsKind {
return INT8_ELEMENTS;
}
- KindForArrayType<FixedUint16Array>(): constexpr ElementsKind {
+ KindForArrayType<Uint16Elements>(): constexpr ElementsKind {
return UINT16_ELEMENTS;
}
- KindForArrayType<FixedInt16Array>(): constexpr ElementsKind {
+ KindForArrayType<Int16Elements>(): constexpr ElementsKind {
return INT16_ELEMENTS;
}
- KindForArrayType<FixedUint32Array>(): constexpr ElementsKind {
+ KindForArrayType<Uint32Elements>(): constexpr ElementsKind {
return UINT32_ELEMENTS;
}
- KindForArrayType<FixedInt32Array>(): constexpr ElementsKind {
+ KindForArrayType<Int32Elements>(): constexpr ElementsKind {
return INT32_ELEMENTS;
}
- KindForArrayType<FixedFloat32Array>(): constexpr ElementsKind {
+ KindForArrayType<Float32Elements>(): constexpr ElementsKind {
return FLOAT32_ELEMENTS;
}
- KindForArrayType<FixedFloat64Array>(): constexpr ElementsKind {
+ KindForArrayType<Float64Elements>(): constexpr ElementsKind {
return FLOAT64_ELEMENTS;
}
- KindForArrayType<FixedUint8ClampedArray>(): constexpr ElementsKind {
+ KindForArrayType<Uint8ClampedElements>(): constexpr ElementsKind {
return UINT8_CLAMPED_ELEMENTS;
}
- KindForArrayType<FixedBigUint64Array>(): constexpr ElementsKind {
+ KindForArrayType<BigUint64Elements>(): constexpr ElementsKind {
return BIGUINT64_ELEMENTS;
}
- KindForArrayType<FixedBigInt64Array>(): constexpr ElementsKind {
+ KindForArrayType<BigInt64Elements>(): constexpr ElementsKind {
return BIGINT64_ELEMENTS;
}
@@ -193,12 +207,11 @@ namespace typed_array {
}
builtin StoreFixedElement<T: type>(
- context: Context, array: JSTypedArray, index: Smi,
+ context: Context, typedArray: JSTypedArray, index: Smi,
value: Object): Object {
- const elements: FixedTypedArrayBase =
- UnsafeCast<FixedTypedArrayBase>(array.elements);
- StoreFixedTypedArrayElementFromTagged(
- context, elements, index, value, KindForArrayType<T>(), SMI_PARAMETERS);
+ StoreJSTypedArrayElementFromTagged(
+ context, typedArray, index, value, KindForArrayType<T>(),
+ SMI_PARAMETERS);
return Undefined;
}
@@ -298,7 +311,8 @@ namespace typed_array {
}
// 4. Let len be obj.[[ArrayLength]].
- const len: Smi = array.length;
+ // TODO(v8:4153): Support huge TypedArrays here.
+ const len = Cast<Smi>(Convert<Number>(array.length)) otherwise unreachable;
// Arrays of length 1 or less are considered sorted.
if (len < 2) return array;
@@ -312,42 +326,42 @@ namespace typed_array {
if (IsElementsKindGreaterThan(elementsKind, UINT32_ELEMENTS)) {
if (elementsKind == INT32_ELEMENTS) {
- loadfn = LoadFixedElement<FixedInt32Array>;
- storefn = StoreFixedElement<FixedInt32Array>;
+ loadfn = LoadFixedElement<Int32Elements>;
+ storefn = StoreFixedElement<Int32Elements>;
} else if (elementsKind == FLOAT32_ELEMENTS) {
- loadfn = LoadFixedElement<FixedFloat32Array>;
- storefn = StoreFixedElement<FixedFloat32Array>;
+ loadfn = LoadFixedElement<Float32Elements>;
+ storefn = StoreFixedElement<Float32Elements>;
} else if (elementsKind == FLOAT64_ELEMENTS) {
- loadfn = LoadFixedElement<FixedFloat64Array>;
- storefn = StoreFixedElement<FixedFloat64Array>;
+ loadfn = LoadFixedElement<Float64Elements>;
+ storefn = StoreFixedElement<Float64Elements>;
} else if (elementsKind == UINT8_CLAMPED_ELEMENTS) {
- loadfn = LoadFixedElement<FixedUint8ClampedArray>;
- storefn = StoreFixedElement<FixedUint8ClampedArray>;
+ loadfn = LoadFixedElement<Uint8ClampedElements>;
+ storefn = StoreFixedElement<Uint8ClampedElements>;
} else if (elementsKind == BIGUINT64_ELEMENTS) {
- loadfn = LoadFixedElement<FixedBigUint64Array>;
- storefn = StoreFixedElement<FixedBigUint64Array>;
+ loadfn = LoadFixedElement<BigUint64Elements>;
+ storefn = StoreFixedElement<BigUint64Elements>;
} else if (elementsKind == BIGINT64_ELEMENTS) {
- loadfn = LoadFixedElement<FixedBigInt64Array>;
- storefn = StoreFixedElement<FixedBigInt64Array>;
+ loadfn = LoadFixedElement<BigInt64Elements>;
+ storefn = StoreFixedElement<BigInt64Elements>;
} else {
unreachable;
}
} else {
if (elementsKind == UINT8_ELEMENTS) {
- loadfn = LoadFixedElement<FixedUint8Array>;
- storefn = StoreFixedElement<FixedUint8Array>;
+ loadfn = LoadFixedElement<Uint8Elements>;
+ storefn = StoreFixedElement<Uint8Elements>;
} else if (elementsKind == INT8_ELEMENTS) {
- loadfn = LoadFixedElement<FixedInt8Array>;
- storefn = StoreFixedElement<FixedInt8Array>;
+ loadfn = LoadFixedElement<Int8Elements>;
+ storefn = StoreFixedElement<Int8Elements>;
} else if (elementsKind == UINT16_ELEMENTS) {
- loadfn = LoadFixedElement<FixedUint16Array>;
- storefn = StoreFixedElement<FixedUint16Array>;
+ loadfn = LoadFixedElement<Uint16Elements>;
+ storefn = StoreFixedElement<Uint16Elements>;
} else if (elementsKind == INT16_ELEMENTS) {
- loadfn = LoadFixedElement<FixedInt16Array>;
- storefn = StoreFixedElement<FixedInt16Array>;
+ loadfn = LoadFixedElement<Int16Elements>;
+ storefn = StoreFixedElement<Int16Elements>;
} else if (elementsKind == UINT32_ELEMENTS) {
- loadfn = LoadFixedElement<FixedUint32Array>;
- storefn = StoreFixedElement<FixedUint32Array>;
+ loadfn = LoadFixedElement<Uint32Elements>;
+ storefn = StoreFixedElement<Uint32Elements>;
} else {
unreachable;
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index bcdf5928e1..5c09b3a8de 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -4,24 +4,24 @@
#if V8_TARGET_ARCH_X64
-#include "src/api-arguments.h"
+#include "src/api/api-arguments.h"
#include "src/base/adapters.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/codegen/code-factory.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/heap/heap-inl.h"
-#include "src/macro-assembler-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/cell.h"
#include "src/objects/debug-objects.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
@@ -30,18 +30,11 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
- ExitFrameType exit_frame_type) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
__ LoadAddress(kJavaScriptCallExtraArg1Register,
ExternalReference::Create(address));
- if (exit_frame_type == BUILTIN_EXIT) {
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
- RelocInfo::CODE_TARGET);
- } else {
- DCHECK(exit_frame_type == EXIT);
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
- RelocInfo::CODE_TARGET);
- }
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -390,7 +383,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ Push(Immediate(StackFrame::TypeToMarker(type)));
// Reserve a slot for the context. It is filled after the root register has
// been set up.
- __ subq(rsp, Immediate(kSystemPointerSize));
+ __ AllocateStackSpace(kSystemPointerSize);
// Save callee-saved registers (X64/X32/Win64 calling conventions).
__ pushq(r12);
__ pushq(r13);
@@ -404,7 +397,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef _WIN64
// On Win64 XMM6-XMM15 are callee-save.
- __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ AllocateStackSpace(EntryFrameConstants::kXMMRegistersBlockSize);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
@@ -909,7 +902,8 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadAnyTaggedField(
optimized_code_entry,
- FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset),
+ FieldOperand(feedback_vector,
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset),
decompr_scratch);
// Check if the code entry is a Smi. If yes, we interpret it as an
@@ -1396,7 +1390,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
kScratchRegister));
__ bind(&trampoline_loaded);
- __ addq(rbx, Immediate(interpreter_entry_return_pc_offset->value()));
+ __ addq(rbx, Immediate(interpreter_entry_return_pc_offset.value()));
__ Push(rbx);
// Initialize dispatch table register.
@@ -2331,18 +2325,16 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// -- rbx : the number of [[BoundArguments]] (checked to be non-zero)
// -----------------------------------
- // Reserve stack space for the [[BoundArguments]].
+ // Check the stack for overflow.
{
Label done;
- __ leaq(kScratchRegister, Operand(rbx, times_system_pointer_size, 0));
- __ subq(rsp, kScratchRegister);
- // Check the stack for overflow. We are not trying to catch interruptions
- // (i.e. debug break and preemption) here, so check the "real stack
- // limit".
- __ CompareRoot(rsp, RootIndex::kRealStackLimit);
+ __ shlq(rbx, Immediate(kSystemPointerSizeLog2));
+ __ movq(kScratchRegister, rsp);
+ __ subq(kScratchRegister, rbx);
+ // We are not trying to catch interruptions (i.e. debug break and
+ // preemption) here, so check the "real stack limit".
+ __ CompareRoot(kScratchRegister, RootIndex::kRealStackLimit);
__ j(above_equal, &done, Label::kNear);
- // Restore the stack pointer.
- __ leaq(rsp, Operand(rsp, rbx, times_system_pointer_size, 0));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
@@ -2351,6 +2343,10 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ bind(&done);
}
+ // Reserve stack space for the [[BoundArguments]].
+ __ movq(kScratchRegister, rbx);
+ __ AllocateStackSpace(kScratchRegister);
+
// Adjust effective number of arguments to include return address.
__ incl(rax);
@@ -2358,7 +2354,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
Label loop;
__ Set(rcx, 0);
- __ leaq(rbx, Operand(rsp, rbx, times_system_pointer_size, 0));
+ __ addq(rbx, rsp);
__ bind(&loop);
__ movq(kScratchRegister,
Operand(rbx, rcx, times_system_pointer_size, 0));
@@ -2647,7 +2643,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
arraysize(wasm::kFpParamRegisters),
"frame size mismatch");
- __ subq(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
+ __ AllocateStackSpace(kSimd128Size * arraysize(wasm::kFpParamRegisters));
int offset = 0;
for (DoubleRegister reg : wasm::kFpParamRegisters) {
__ movdqu(Operand(rsp, offset), reg);
@@ -3006,15 +3002,6 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
__ addl(Operand(base_reg, kLevelOffset), Immediate(1));
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1);
- __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_enter_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
Label profiler_disabled;
Label end_profiler_check;
__ Move(rax, ExternalReference::is_profiling_address(isolate));
@@ -3035,15 +3022,6 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// Call the api function!
__ call(rax);
- if (FLAG_log_timer_events) {
- FrameScope frame(masm, StackFrame::MANUAL);
- __ PushSafepointRegisters();
- __ PrepareCallCFunction(1);
- __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
- __ CallCFunction(ExternalReference::log_leave_external_function(), 1);
- __ PopSafepointRegisters();
- }
-
// Load the value from ReturnValue
__ movq(rax, return_value_operand);
__ bind(&prologue);
@@ -3160,7 +3138,7 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
DCHECK(!AreAliased(api_function_address, argc, holder, call_data,
kScratchRegister));
- typedef FunctionCallbackArguments FCA;
+ using FCA = FunctionCallbackArguments;
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
new file mode 100644
index 0000000000..345e80a16e
--- /dev/null
+++ b/deps/v8/src/codegen/OWNERS
@@ -0,0 +1,13 @@
+ahaas@chromium.org
+bmeurer@chromium.org
+clemensh@chromium.org
+jarin@chromium.org
+jgruber@chromium.org
+jkummerow@chromium.org
+mslekova@chromium.org
+mstarzinger@chromium.org
+mvstanton@chromium.org
+neis@chromium.org
+rmcilroy@chromium.org
+sigurds@chromium.org
+tebbi@chromium.org
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h
index 17a38cbbfe..3fbd679104 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h
@@ -34,14 +34,14 @@
// significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
-#define V8_ARM_ASSEMBLER_ARM_INL_H_
+#ifndef V8_CODEGEN_ARM_ASSEMBLER_ARM_INL_H_
+#define V8_CODEGEN_ARM_ASSEMBLER_ARM_INL_H_
-#include "src/arm/assembler-arm.h"
+#include "src/codegen/arm/assembler-arm.h"
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -55,7 +55,6 @@ int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
}
-
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
@@ -63,12 +62,11 @@ void RelocInfo::apply(intptr_t delta) {
*p += delta; // relocate entry
} else if (RelocInfo::IsRelativeCodeTarget(rmode_)) {
Instruction* branch = Instruction::At(pc_);
- int32_t branch_offset = branch->GetBranchOffset() + delta;
+ int32_t branch_offset = branch->GetBranchOffset() - delta;
branch->SetBranchOffset(branch_offset);
}
}
-
Address RelocInfo::target_address() {
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
@@ -88,25 +86,25 @@ Address RelocInfo::target_address_address() {
}
}
-
Address RelocInfo::constant_pool_entry_address() {
DCHECK(IsInConstantPool());
return Assembler::constant_pool_entry_address(pc_, constant_pool_);
}
-
-int RelocInfo::target_address_size() {
- return kPointerSize;
-}
+int RelocInfo::target_address_size() { return kPointerSize; }
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- if (IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT) {
+ if (IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT) {
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
@@ -117,8 +115,8 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
@@ -142,7 +140,6 @@ Address RelocInfo::target_internal_reference() {
return Memory<Address>(pc_);
}
-
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return pc_;
@@ -167,7 +164,7 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
@@ -184,10 +181,6 @@ Handle<Code> Assembler::relative_code_target_object_handle_at(
return GetCodeTarget(code_target_index);
}
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) : rmode_(rmode) {
- value_.immediate = immediate;
-}
-
Operand Operand::Zero() { return Operand(static_cast<int32_t>(0)); }
Operand::Operand(const ExternalReference& f)
@@ -208,76 +201,12 @@ void Assembler::CheckBuffer() {
MaybeCheckConstPool();
}
-
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
}
-
-Address Assembler::target_address_from_return_address(Address pc) {
- // Returns the address of the call target from the return address that will
- // be returned to after a call.
- // Call sequence on V7 or later is:
- // movw ip, #... @ call address low 16
- // movt ip, #... @ call address high 16
- // blx ip
- // @ return address
- // For V6 when the constant pool is unavailable, it is:
- // mov ip, #... @ call address low 8
- // orr ip, ip, #... @ call address 2nd 8
- // orr ip, ip, #... @ call address 3rd 8
- // orr ip, ip, #... @ call address high 8
- // blx ip
- // @ return address
- // In cases that need frequent patching, the address is in the
- // constant pool. It could be a small constant pool load:
- // ldr ip, [pc, #...] @ call address
- // blx ip
- // @ return address
- Address candidate = pc - 2 * kInstrSize;
- Instr candidate_instr(Memory<int32_t>(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr)) {
- return candidate;
- } else {
- if (CpuFeatures::IsSupported(ARMv7)) {
- candidate -= 1 * kInstrSize;
- DCHECK(IsMovW(Memory<int32_t>(candidate)) &&
- IsMovT(Memory<int32_t>(candidate + kInstrSize)));
- } else {
- candidate -= 3 * kInstrSize;
- DCHECK(IsMovImmed(Memory<int32_t>(candidate)) &&
- IsOrrImmed(Memory<int32_t>(candidate + kInstrSize)) &&
- IsOrrImmed(Memory<int32_t>(candidate + 2 * kInstrSize)) &&
- IsOrrImmed(Memory<int32_t>(candidate + 3 * kInstrSize)));
- }
- return candidate;
- }
-}
-
-
-Address Assembler::return_address_from_call_start(Address pc) {
- if (IsLdrPcImmediateOffset(Memory<int32_t>(pc))) {
- // Load from constant pool, small section.
- return pc + kInstrSize * 2;
- } else {
- if (CpuFeatures::IsSupported(ARMv7)) {
- DCHECK(IsMovW(Memory<int32_t>(pc)));
- DCHECK(IsMovT(Memory<int32_t>(pc + kInstrSize)));
- // A movw / movt load immediate.
- return pc + kInstrSize * 3;
- } else {
- DCHECK(IsMovImmed(Memory<int32_t>(pc)));
- DCHECK(IsOrrImmed(Memory<int32_t>(pc + kInstrSize)));
- DCHECK(IsOrrImmed(Memory<int32_t>(pc + 2 * kInstrSize)));
- DCHECK(IsOrrImmed(Memory<int32_t>(pc + 3 * kInstrSize)));
- // A mov / orr load immediate.
- return pc + kInstrSize * 5;
- }
- }
-}
-
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code code, Address target) {
DCHECK(!Builtins::IsIsolateIndependentBuiltin(code));
@@ -293,12 +222,10 @@ void Assembler::deserialization_set_target_internal_reference_at(
Memory<Address>(pc) = target;
}
-
bool Assembler::is_constant_pool_load(Address pc) {
return IsLdrPcImmediateOffset(Memory<int32_t>(pc));
}
-
Address Assembler::constant_pool_entry_address(Address pc,
Address constant_pool) {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory<int32_t>(pc)));
@@ -306,7 +233,6 @@ Address Assembler::constant_pool_entry_address(Address pc,
return pc + GetLdrRegisterImmediateOffset(instr) + Instruction::kPcLoadDelta;
}
-
Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
@@ -432,4 +358,4 @@ T UseScratchRegisterScope::AcquireVfp() {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
+#endif // V8_CODEGEN_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 23c19fcfe2..c8ef586fc1 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -34,18 +34,18 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "src/arm/assembler-arm.h"
+#include "src/codegen/arm/assembler-arm.h"
#if V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm-inl.h"
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/string-constants.h"
+#include "src/codegen/arm/assembler-arm-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -196,7 +196,6 @@ static constexpr unsigned CpuFeaturesFromCompiler() {
#endif
}
-
void CpuFeatures::ProbeImpl(bool cross_compile) {
dcache_line_size_ = 64;
@@ -249,7 +248,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
DCHECK_IMPLIES(IsSupported(ARMv8), IsSupported(ARMv7_SUDIV));
}
-
void CpuFeatures::PrintTarget() {
const char* arm_arch = nullptr;
const char* arm_target_type = "";
@@ -277,11 +275,11 @@ void CpuFeatures::PrintTarget() {
#if defined CAN_USE_NEON
arm_fpu = " neon";
#elif defined CAN_USE_VFP3_INSTRUCTIONS
-# if defined CAN_USE_VFP32DREGS
+#if defined CAN_USE_VFP32DREGS
arm_fpu = " vfp3";
-# else
+#else
arm_fpu = " vfp3-d16";
-# endif
+#endif
#else
arm_fpu = " vfp2";
#endif
@@ -298,12 +296,10 @@ void CpuFeatures::PrintTarget() {
arm_thumb = " thumb";
#endif
- printf("target%s%s %s%s%s %s\n",
- arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
- arm_float_abi);
+ printf("target%s%s %s%s%s %s\n", arm_target_type, arm_no_probe, arm_arch,
+ arm_fpu, arm_thumb, arm_float_abi);
}
-
void CpuFeatures::PrintFeatures() {
printf("ARMv8=%d ARMv7=%d VFPv3=%d VFP32DREGS=%d NEON=%d SUDIV=%d",
CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
@@ -319,7 +315,6 @@ void CpuFeatures::PrintFeatures() {
printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
}
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -351,10 +346,9 @@ uint32_t RelocInfo::wasm_call_tag() const {
Operand::Operand(Handle<HeapObject> handle) {
rm_ = no_reg;
value_.immediate = static_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
}
-
Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
DCHECK(is_uint5(shift_imm));
@@ -375,7 +369,6 @@ Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
}
}
-
Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
DCHECK(shift_op != RRX);
rm_ = rm;
@@ -387,14 +380,14 @@ Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(str);
return result;
@@ -450,7 +443,6 @@ void NeonMemOperand::SetAlignment(int align) {
break;
default:
UNREACHABLE();
- break;
}
}
@@ -499,8 +491,7 @@ const Instr kVldrDPCPattern = 13 * B24 | L | pc.code() * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
-const Instr kBlxRegPattern =
- B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
+const Instr kBlxRegPattern = B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
const Instr kBlxIp = al | kBlxRegPattern | ip.code();
const Instr kMovMvnMask = 0x6D * B21 | 0xF * B16;
const Instr kMovMvnPattern = 0xD * B21;
@@ -555,9 +546,7 @@ Assembler::Assembler(const AssemblerOptions& options,
}
}
-Assembler::~Assembler() {
- DCHECK_EQ(const_pool_blocked_nesting_, 0);
-}
+Assembler::~Assembler() { DCHECK_EQ(const_pool_blocked_nesting_, 0); }
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
@@ -600,13 +589,11 @@ void Assembler::Align(int m) {
}
}
-
void Assembler::CodeTargetAlign() {
// Preferred alignment of jump targets on some ARM chips.
Align(8);
}
-
Condition Assembler::GetCondition(Instr instr) {
return Instruction::ConditionField(instr);
}
@@ -615,12 +602,10 @@ bool Assembler::IsLdrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
}
-
bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
}
-
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
DCHECK(IsLdrRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
@@ -628,7 +613,6 @@ int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
return positive ? offset : -offset;
}
-
int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
DCHECK(IsVldrDRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
@@ -637,7 +621,6 @@ int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
return positive ? offset : -offset;
}
-
Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
DCHECK(IsLdrRegisterImmediate(instr));
bool positive = offset >= 0;
@@ -649,7 +632,6 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
return (instr & ~kOff12Mask) | offset;
}
-
Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
DCHECK(IsVldrDRegisterImmediate(instr));
DCHECK((offset & ~3) == offset); // Must be 64-bit aligned.
@@ -662,12 +644,10 @@ Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
return (instr & ~kOff8Mask) | (offset >> 2);
}
-
bool Assembler::IsStrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
}
-
Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
DCHECK(IsStrRegisterImmediate(instr));
bool positive = offset >= 0;
@@ -679,12 +659,10 @@ Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
return (instr & ~kOff12Mask) | offset;
}
-
bool Assembler::IsAddRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
}
-
Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
DCHECK(IsAddRegisterImmediate(instr));
DCHECK_GE(offset, 0);
@@ -693,52 +671,42 @@ Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
return (instr & ~kOff12Mask) | offset;
}
-
Register Assembler::GetRd(Instr instr) {
return Register::from_code(Instruction::RdValue(instr));
}
-
Register Assembler::GetRn(Instr instr) {
return Register::from_code(Instruction::RnValue(instr));
}
-
Register Assembler::GetRm(Instr instr) {
return Register::from_code(Instruction::RmValue(instr));
}
-
bool Assembler::IsPush(Instr instr) {
return ((instr & ~kRdMask) == kPushRegPattern);
}
-
bool Assembler::IsPop(Instr instr) {
return ((instr & ~kRdMask) == kPopRegPattern);
}
-
bool Assembler::IsStrRegFpOffset(Instr instr) {
return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
}
-
bool Assembler::IsLdrRegFpOffset(Instr instr) {
return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
}
-
bool Assembler::IsStrRegFpNegOffset(Instr instr) {
return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
}
-
bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
}
-
bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// ldr<cond> <Rd>, [pc +/- offset_12].
@@ -755,51 +723,41 @@ bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
return (instr & kVldrDPCMask) == kVldrDPCPattern;
}
-
bool Assembler::IsBlxReg(Instr instr) {
// Check the instruction is indeed a
// blxcc <Rm>
return (instr & kBlxRegMask) == kBlxRegPattern;
}
-
bool Assembler::IsBlxIp(Instr instr) {
// Check the instruction is indeed a
// blx ip
return instr == kBlxIp;
}
-
bool Assembler::IsTstImmediate(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
- (I | TST | S);
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == (I | TST | S);
}
-
bool Assembler::IsCmpRegister(Instr instr) {
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
- (CMP | S);
+ (CMP | S);
}
-
bool Assembler::IsCmpImmediate(Instr instr) {
- return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
- (I | CMP | S);
+ return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == (I | CMP | S);
}
-
Register Assembler::GetCmpImmediateRegister(Instr instr) {
DCHECK(IsCmpImmediate(instr));
return GetRn(instr);
}
-
int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
DCHECK(IsCmpImmediate(instr));
return instr & kOff12Mask;
}
-
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -817,7 +775,6 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
// The link chain is terminated by a branch offset pointing to the
// same position.
-
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
if (is_uint24(instr)) {
@@ -834,7 +791,6 @@ int Assembler::target_at(int pos) {
return pos + Instruction::kPcLoadDelta + imm26;
}
-
void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
if (is_uint24(instr)) {
@@ -955,21 +911,51 @@ void Assembler::print(const Label* L) {
b = "b";
switch (cond) {
- case eq: c = "eq"; break;
- case ne: c = "ne"; break;
- case hs: c = "hs"; break;
- case lo: c = "lo"; break;
- case mi: c = "mi"; break;
- case pl: c = "pl"; break;
- case vs: c = "vs"; break;
- case vc: c = "vc"; break;
- case hi: c = "hi"; break;
- case ls: c = "ls"; break;
- case ge: c = "ge"; break;
- case lt: c = "lt"; break;
- case gt: c = "gt"; break;
- case le: c = "le"; break;
- case al: c = ""; break;
+ case eq:
+ c = "eq";
+ break;
+ case ne:
+ c = "ne";
+ break;
+ case hs:
+ c = "hs";
+ break;
+ case lo:
+ c = "lo";
+ break;
+ case mi:
+ c = "mi";
+ break;
+ case pl:
+ c = "pl";
+ break;
+ case vs:
+ c = "vs";
+ break;
+ case vc:
+ c = "vc";
+ break;
+ case hi:
+ c = "hi";
+ break;
+ case ls:
+ c = "ls";
+ break;
+ case ge:
+ c = "ge";
+ break;
+ case lt:
+ c = "lt";
+ break;
+ case gt:
+ c = "gt";
+ break;
+ case le:
+ c = "le";
+ break;
+ case al:
+ c = "";
+ break;
default:
c = "";
UNREACHABLE();
@@ -984,7 +970,6 @@ void Assembler::print(const Label* L) {
}
}
-
void Assembler::bind_to(Label* L, int pos) {
DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
while (L->is_linked()) {
@@ -996,17 +981,14 @@ void Assembler::bind_to(Label* L, int pos) {
// Keep track of the last bound label so we don't eliminate any instructions
// before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
+ if (pos > last_bound_pos_) last_bound_pos_ = pos;
}
-
void Assembler::bind(Label* L) {
DCHECK(!L->is_bound()); // label can only be bound once
bind_to(L, pc_offset());
}
-
void Assembler::next(Label* L) {
DCHECK(L->is_linked());
int link = target_at(L->pos());
@@ -1061,15 +1043,13 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
}
} else {
Instr alu_insn = (*instr & kALUMask);
- if (alu_insn == ADD ||
- alu_insn == SUB) {
+ if (alu_insn == ADD || alu_insn == SUB) {
if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8,
nullptr)) {
*instr ^= kAddSubFlip;
return true;
}
- } else if (alu_insn == AND ||
- alu_insn == BIC) {
+ } else if (alu_insn == AND || alu_insn == BIC) {
if (FitsShifter(~imm32, rotate_imm, immed_8, nullptr)) {
*instr ^= kAndBicFlip;
return true;
@@ -1310,10 +1290,10 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
// register offset the constructors make sure than both shift_imm_
// and shift_op_ are initialized.
DCHECK(x.rm_ != pc);
- instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ instr |= B25 | x.shift_imm_ * B7 | x.shift_op_ | x.rm_.code();
}
DCHECK((am & (P | W)) == P || x.rn_ != pc); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+ emit(instr | am | x.rn_.code() * B16 | rd.code() * B12);
}
void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
@@ -1363,14 +1343,14 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
instr |= x.rm_.code();
}
DCHECK((am & (P | W)) == P || x.rn_ != pc); // no pc base with writeback
- emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+ emit(instr | am | x.rn_.code() * B16 | rd.code() * B12);
}
void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) {
DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
DCHECK_NE(rl, 0);
DCHECK(rn != pc);
- emit(instr | rn.code()*B16 | rl);
+ emit(instr | rn.code() * B16 | rl);
}
void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
@@ -1390,14 +1370,12 @@ void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
DCHECK((am & (P | W)) == P || x.rn_ != pc); // no pc base with writeback
// Post-indexed addressing requires W == 1; different than in AddrMode2/3.
- if ((am & P) == 0)
- am |= W;
+ if ((am & P) == 0) am |= W;
DCHECK_GE(offset_8, 0); // no masking needed
- emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
+ emit(instr | am | x.rn_.code() * B16 | crd.code() * B12 | offset_8);
}
-
int Assembler::branch_offset(Label* L) {
int target_pos;
if (L->is_bound()) {
@@ -1420,7 +1398,6 @@ int Assembler::branch_offset(Label* L) {
return target_pos - (pc_offset() + Instruction::kPcLoadDelta);
}
-
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
@@ -1447,7 +1424,7 @@ void Assembler::bl(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
void Assembler::blx(int branch_offset) {
DCHECK_EQ(branch_offset & 1, 0);
- int h = ((branch_offset & 2) >> 1)*B24;
+ int h = ((branch_offset & 2) >> 1) * B24;
int imm24 = branch_offset >> 2;
const bool blx_imm_check = is_int24(imm24);
CHECK(blx_imm_check);
@@ -1456,37 +1433,33 @@ void Assembler::blx(int branch_offset) {
void Assembler::blx(Register target, Condition cond) {
DCHECK(target != pc);
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
+ emit(cond | B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX | target.code());
}
void Assembler::bx(Register target, Condition cond) {
DCHECK(target != pc); // use of pc is actually allowed, but discouraged
- emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
+ emit(cond | B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BX | target.code());
}
-
void Assembler::b(Label* L, Condition cond) {
CheckBuffer();
b(branch_offset(L), cond);
}
-
void Assembler::bl(Label* L, Condition cond) {
CheckBuffer();
bl(branch_offset(L), cond);
}
-
void Assembler::blx(Label* L) {
CheckBuffer();
blx(branch_offset(L));
}
-
// Data-processing instructions.
-void Assembler::and_(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::and_(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | AND | s, dst, src1, src2);
}
@@ -1495,8 +1468,8 @@ void Assembler::and_(Register dst, Register src1, Register src2, SBit s,
and_(dst, src1, Operand(src2), s, cond);
}
-void Assembler::eor(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::eor(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | EOR | s, dst, src1, src2);
}
@@ -1505,8 +1478,8 @@ void Assembler::eor(Register dst, Register src1, Register src2, SBit s,
AddrMode1(cond | EOR | s, dst, src1, Operand(src2));
}
-void Assembler::sub(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::sub(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | SUB | s, dst, src1, src2);
}
@@ -1515,14 +1488,13 @@ void Assembler::sub(Register dst, Register src1, Register src2, SBit s,
sub(dst, src1, Operand(src2), s, cond);
}
-void Assembler::rsb(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::rsb(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | RSB | s, dst, src1, src2);
}
-
-void Assembler::add(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::add(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | ADD | s, dst, src1, src2);
}
@@ -1531,24 +1503,21 @@ void Assembler::add(Register dst, Register src1, Register src2, SBit s,
add(dst, src1, Operand(src2), s, cond);
}
-void Assembler::adc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::adc(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | ADC | s, dst, src1, src2);
}
-
-void Assembler::sbc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::sbc(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | SBC | s, dst, src1, src2);
}
-
-void Assembler::rsc(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::rsc(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | RSC | s, dst, src1, src2);
}
-
void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
AddrMode1(cond | TST | S, no_reg, src1, src2);
}
@@ -1561,7 +1530,6 @@ void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
AddrMode1(cond | TEQ | S, no_reg, src1, src2);
}
-
void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
AddrMode1(cond | CMP | S, no_reg, src1, src2);
}
@@ -1570,20 +1538,18 @@ void Assembler::cmp(Register src1, Register src2, Condition cond) {
cmp(src1, Operand(src2), cond);
}
-void Assembler::cmp_raw_immediate(
- Register src, int raw_immediate, Condition cond) {
+void Assembler::cmp_raw_immediate(Register src, int raw_immediate,
+ Condition cond) {
DCHECK(is_uint12(raw_immediate));
emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
}
-
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
AddrMode1(cond | CMN | S, no_reg, src1, src2);
}
-
-void Assembler::orr(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::orr(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | ORR | s, dst, src1, src2);
}
@@ -1641,25 +1607,21 @@ void Assembler::mov_label_offset(Register dst, Label* label) {
}
}
-
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
DCHECK(IsEnabled(ARMv7));
- emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
+ emit(cond | 0x30 * B20 | reg.code() * B12 | EncodeMovwImmediate(immediate));
}
-
void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
DCHECK(IsEnabled(ARMv7));
- emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
+ emit(cond | 0x34 * B20 | reg.code() * B12 | EncodeMovwImmediate(immediate));
}
-
-void Assembler::bic(Register dst, Register src1, const Operand& src2,
- SBit s, Condition cond) {
+void Assembler::bic(Register dst, Register src1, const Operand& src2, SBit s,
+ Condition cond) {
AddrMode1(cond | BIC | s, dst, src1, src2);
}
-
void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
AddrMode1(cond | MVN | s, dst, no_reg, src);
}
@@ -1695,20 +1657,18 @@ void Assembler::lsr(Register dst, Register src1, const Operand& src2, SBit s,
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
SBit s, Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
- emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
+ emit(cond | A | s | dst.code() * B16 | srcA.code() * B12 | src2.code() * B8 |
+ B7 | B4 | src1.code());
}
-
void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
DCHECK(IsEnabled(ARMv7));
- emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
+ emit(cond | B22 | B21 | dst.code() * B16 | srcA.code() * B12 |
+ src2.code() * B8 | B7 | B4 | src1.code());
}
-
void Assembler::sdiv(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
@@ -1717,7 +1677,6 @@ void Assembler::sdiv(Register dst, Register src1, Register src2,
src2.code() * B8 | B4 | src1.code());
}
-
void Assembler::udiv(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
@@ -1726,7 +1685,6 @@ void Assembler::udiv(Register dst, Register src1, Register src2,
src2.code() * B8 | B4 | src1.code());
}
-
void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
@@ -1734,7 +1692,6 @@ void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
}
-
void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc && srcA != pc);
@@ -1742,7 +1699,6 @@ void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
}
-
void Assembler::smmul(Register dst, Register src1, Register src2,
Condition cond) {
DCHECK(dst != pc && src1 != pc && src2 != pc);
@@ -1750,73 +1706,49 @@ void Assembler::smmul(Register dst, Register src1, Register src2,
src2.code() * B8 | B4 | src1.code());
}
-
-void Assembler::smlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
+void Assembler::smlal(Register dstL, Register dstH, Register src1,
+ Register src2, SBit s, Condition cond) {
DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
DCHECK(dstL != dstH);
- emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
+ emit(cond | B23 | B22 | A | s | dstH.code() * B16 | dstL.code() * B12 |
+ src2.code() * B8 | B7 | B4 | src1.code());
}
-
-void Assembler::smull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
+void Assembler::smull(Register dstL, Register dstH, Register src1,
+ Register src2, SBit s, Condition cond) {
DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
DCHECK(dstL != dstH);
- emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
+ emit(cond | B23 | B22 | s | dstH.code() * B16 | dstL.code() * B12 |
+ src2.code() * B8 | B7 | B4 | src1.code());
}
-
-void Assembler::umlal(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
+void Assembler::umlal(Register dstL, Register dstH, Register src1,
+ Register src2, SBit s, Condition cond) {
DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
DCHECK(dstL != dstH);
- emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
+ emit(cond | B23 | A | s | dstH.code() * B16 | dstL.code() * B12 |
+ src2.code() * B8 | B7 | B4 | src1.code());
}
-
-void Assembler::umull(Register dstL,
- Register dstH,
- Register src1,
- Register src2,
- SBit s,
- Condition cond) {
+void Assembler::umull(Register dstL, Register dstH, Register src1,
+ Register src2, SBit s, Condition cond) {
DCHECK(dstL != pc && dstH != pc && src1 != pc && src2 != pc);
DCHECK(dstL != dstH);
- emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
- src2.code()*B8 | B7 | B4 | src1.code());
+ emit(cond | B23 | s | dstH.code() * B16 | dstL.code() * B12 |
+ src2.code() * B8 | B7 | B4 | src1.code());
}
-
// Miscellaneous arithmetic instructions.
void Assembler::clz(Register dst, Register src, Condition cond) {
DCHECK(dst != pc && src != pc);
- emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
- 15*B8 | CLZ | src.code());
+ emit(cond | B24 | B22 | B21 | 15 * B16 | dst.code() * B12 | 15 * B8 | CLZ |
+ src.code());
}
-
// Saturating instructions.
// Unsigned saturate.
-void Assembler::usat(Register dst,
- int satpos,
- const Operand& src,
+void Assembler::usat(Register dst, int satpos, const Operand& src,
Condition cond) {
DCHECK(dst != pc && src.rm_ != pc);
DCHECK((satpos >= 0) && (satpos <= 31));
@@ -1825,24 +1757,20 @@ void Assembler::usat(Register dst,
int sh = 0;
if (src.shift_op_ == ASR) {
- sh = 1;
+ sh = 1;
}
emit(cond | 0x6 * B24 | 0xE * B20 | satpos * B16 | dst.code() * B12 |
src.shift_imm_ * B7 | sh * B6 | 0x1 * B4 | src.rm_.code());
}
-
// Bitfield manipulation instructions.
// Unsigned bit field extract.
// Extracts #width adjacent bits from position #lsb in a register, and
// writes them to the low bits of a destination register.
// ubfx dst, src, #lsb, #width
-void Assembler::ubfx(Register dst,
- Register src,
- int lsb,
- int width,
+void Assembler::ubfx(Register dst, Register src, int lsb, int width,
Condition cond) {
DCHECK(IsEnabled(ARMv7));
DCHECK(dst != pc && src != pc);
@@ -1852,16 +1780,12 @@ void Assembler::ubfx(Register dst,
lsb * B7 | B6 | B4 | src.code());
}
-
// Signed bit field extract.
// Extracts #width adjacent bits from position #lsb in a register, and
// writes them to the low bits of a destination register. The extracted
// value is sign extended to fill the destination register.
// sbfx dst, src, #lsb, #width
-void Assembler::sbfx(Register dst,
- Register src,
- int lsb,
- int width,
+void Assembler::sbfx(Register dst, Register src, int lsb, int width,
Condition cond) {
DCHECK(IsEnabled(ARMv7));
DCHECK(dst != pc && src != pc);
@@ -1871,7 +1795,6 @@ void Assembler::sbfx(Register dst,
lsb * B7 | B6 | B4 | src.code());
}
-
// Bit field clear.
// Sets #width adjacent bits at position #lsb in the destination register
// to zero, preserving the value of the other bits.
@@ -1885,15 +1808,11 @@ void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 | 0xF);
}
-
// Bit field insert.
// Inserts #width adjacent bits from the low bits of the source register
// into position #lsb of the destination register.
// bfi dst, src, #lsb, #width
-void Assembler::bfi(Register dst,
- Register src,
- int lsb,
- int width,
+void Assembler::bfi(Register dst, Register src, int lsb, int width,
Condition cond) {
DCHECK(IsEnabled(ARMv7));
DCHECK(dst != pc && src != pc);
@@ -1904,11 +1823,8 @@ void Assembler::bfi(Register dst,
src.code());
}
-
-void Assembler::pkhbt(Register dst,
- Register src1,
- const Operand& src2,
- Condition cond ) {
+void Assembler::pkhbt(Register dst, Register src1, const Operand& src2,
+ Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
// Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
@@ -1918,14 +1834,11 @@ void Assembler::pkhbt(Register dst,
DCHECK(src2.rm() != pc);
DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
DCHECK(src2.shift_op() == LSL);
- emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
- src2.shift_imm_*B7 | B4 | src2.rm().code());
+ emit(cond | 0x68 * B20 | src1.code() * B16 | dst.code() * B12 |
+ src2.shift_imm_ * B7 | B4 | src2.rm().code());
}
-
-void Assembler::pkhtb(Register dst,
- Register src1,
- const Operand& src2,
+void Assembler::pkhtb(Register dst, Register src1, const Operand& src2,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
@@ -1937,11 +1850,10 @@ void Assembler::pkhtb(Register dst,
DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
DCHECK(src2.shift_op() == ASR);
int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
- emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
- asr*B7 | B6 | B4 | src2.rm().code());
+ emit(cond | 0x68 * B20 | src1.code() * B16 | dst.code() * B12 | asr * B7 |
+ B6 | B4 | src2.rm().code());
}
-
void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.233.
// cond(31-28) | 01101010(27-20) | 1111(19-16) |
@@ -1953,7 +1865,6 @@ void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
-
void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.233.
@@ -1967,7 +1878,6 @@ void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
-
void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.235.
// cond(31-28) | 01101011(27-20) | 1111(19-16) |
@@ -1979,7 +1889,6 @@ void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
-
void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.235.
@@ -1993,7 +1902,6 @@ void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
-
void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.274.
// cond(31-28) | 01101110(27-20) | 1111(19-16) |
@@ -2005,7 +1913,6 @@ void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
-
void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.271.
@@ -2019,7 +1926,6 @@ void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
-
void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.275.
// cond(31-28) | 01101100(27-20) | 1111(19-16) |
@@ -2031,7 +1937,6 @@ void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
-
void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.276.
// cond(31-28) | 01101111(27-20) | 1111(19-16) |
@@ -2043,7 +1948,6 @@ void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
}
-
void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.273.
@@ -2057,7 +1961,6 @@ void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
}
-
void Assembler::rbit(Register dst, Register src, Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8.8.144.
// cond(31-28) | 011011111111(27-16) | Rd(15-12) | 11110011(11-4) | Rm(3-0)
@@ -2078,10 +1981,9 @@ void Assembler::rev(Register dst, Register src, Condition cond) {
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
DCHECK(dst != pc);
- emit(cond | B24 | s | 15*B16 | dst.code()*B12);
+ emit(cond | B24 | s | 15 * B16 | dst.code() * B12);
}
-
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
Condition cond) {
DCHECK_NE(fields & 0x000F0000, 0); // At least one field must be set.
@@ -2101,58 +2003,49 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
msr(fields, Operand(scratch), cond);
return;
}
- instr = I | rotate_imm*B8 | immed_8;
+ instr = I | rotate_imm * B8 | immed_8;
} else {
DCHECK(src.IsRegister()); // Only rm is allowed.
instr = src.rm_.code();
}
- emit(cond | instr | B24 | B21 | fields | 15*B12);
+ emit(cond | instr | B24 | B21 | fields | 15 * B12);
}
-
// Load/Store instructions.
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
AddrMode2(cond | B26 | L, dst, src);
}
-
void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
AddrMode2(cond | B26, src, dst);
}
-
void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
AddrMode2(cond | B26 | B | L, dst, src);
}
-
void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
AddrMode2(cond | B26 | B, src, dst);
}
-
void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
AddrMode3(cond | L | B7 | H | B4, dst, src);
}
-
void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
AddrMode3(cond | B7 | H | B4, src, dst);
}
-
void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
AddrMode3(cond | L | B7 | S6 | B4, dst, src);
}
-
void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
AddrMode3(cond | L | B7 | S6 | H | B4, dst, src);
}
-
-void Assembler::ldrd(Register dst1, Register dst2,
- const MemOperand& src, Condition cond) {
+void Assembler::ldrd(Register dst1, Register dst2, const MemOperand& src,
+ Condition cond) {
DCHECK(src.rm() == no_reg);
DCHECK(dst1 != lr); // r14.
DCHECK_EQ(0, dst1.code() % 2);
@@ -2160,9 +2053,8 @@ void Assembler::ldrd(Register dst1, Register dst2,
AddrMode3(cond | B7 | B6 | B4, dst1, src);
}
-
-void Assembler::strd(Register src1, Register src2,
- const MemOperand& dst, Condition cond) {
+void Assembler::strd(Register src1, Register src2, const MemOperand& dst,
+ Condition cond) {
DCHECK(dst.rm() == no_reg);
DCHECK(src1 != lr); // r14.
DCHECK_EQ(0, src1.code() % 2);
@@ -2291,11 +2183,8 @@ void Assembler::pld(const MemOperand& address) {
address.rn().code() * B16 | 0xF * B12 | offset);
}
-
// Load/Store multiple instructions.
-void Assembler::ldm(BlockAddrMode am,
- Register base,
- RegList dst,
+void Assembler::ldm(BlockAddrMode am, Register base, RegList dst,
Condition cond) {
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
DCHECK(base == sp || (dst & sp.bit()) == 0);
@@ -2313,15 +2202,11 @@ void Assembler::ldm(BlockAddrMode am,
}
}
-
-void Assembler::stm(BlockAddrMode am,
- Register base,
- RegList src,
+void Assembler::stm(BlockAddrMode am, Register base, RegList src,
Condition cond) {
AddrMode4(cond | B27 | am, base, src);
}
-
// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-arm.h .
@@ -2336,7 +2221,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
svc(kStopCode + kMaxStopCode, cond);
}
}
-#else // def __arm__
+#else // def __arm__
if (cond != al) {
Label skip;
b(&skip, NegateCondition(cond));
@@ -2353,13 +2238,11 @@ void Assembler::bkpt(uint32_t imm16) {
emit(al | B24 | B21 | (imm16 >> 4) * B8 | BKPT | (imm16 & 0xF));
}
-
void Assembler::svc(uint32_t imm24, Condition cond) {
DCHECK(is_uint24(imm24));
- emit(cond | 15*B24 | imm24);
+ emit(cond | 15 * B24 | imm24);
}
-
void Assembler::dmb(BarrierOption option) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Details available in ARM DDI 0406C.b, A8-378.
@@ -2371,7 +2254,6 @@ void Assembler::dmb(BarrierOption option) {
}
}
-
void Assembler::dsb(BarrierOption option) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Details available in ARM DDI 0406C.b, A8-380.
@@ -2383,7 +2265,6 @@ void Assembler::dsb(BarrierOption option) {
}
}
-
void Assembler::isb(BarrierOption option) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Details available in ARM DDI 0406C.b, A8-389.
@@ -2402,16 +2283,12 @@ void Assembler::csdb() {
}
// Coprocessor instructions.
-void Assembler::cdp(Coprocessor coproc,
- int opcode_1,
- CRegister crd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
+void Assembler::cdp(Coprocessor coproc, int opcode_1, CRegister crd,
+ CRegister crn, CRegister crm, int opcode_2,
Condition cond) {
DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
- crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 15) * B20 | crn.code() * B16 |
+ crd.code() * B12 | coproc * B8 | (opcode_2 & 7) * B5 | crm.code());
}
void Assembler::cdp2(Coprocessor coproc, int opcode_1, CRegister crd,
@@ -2419,17 +2296,12 @@ void Assembler::cdp2(Coprocessor coproc, int opcode_1, CRegister crd,
cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
}
-
-void Assembler::mcr(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
+void Assembler::mcr(Coprocessor coproc, int opcode_1, Register rd,
+ CRegister crn, CRegister crm, int opcode_2,
Condition cond) {
DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7) * B21 | crn.code() * B16 |
+ rd.code() * B12 | coproc * B8 | (opcode_2 & 7) * B5 | B4 | crm.code());
}
void Assembler::mcr2(Coprocessor coproc, int opcode_1, Register rd,
@@ -2437,17 +2309,12 @@ void Assembler::mcr2(Coprocessor coproc, int opcode_1, Register rd,
mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
-
-void Assembler::mrc(Coprocessor coproc,
- int opcode_1,
- Register rd,
- CRegister crn,
- CRegister crm,
- int opcode_2,
+void Assembler::mrc(Coprocessor coproc, int opcode_1, Register rd,
+ CRegister crn, CRegister crm, int opcode_2,
Condition cond) {
DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
- emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
- rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7) * B21 | L | crn.code() * B16 |
+ rd.code() * B12 | coproc * B8 | (opcode_2 & 7) * B5 | B4 | crm.code());
}
void Assembler::mrc2(Coprocessor coproc, int opcode_1, Register rd,
@@ -2455,26 +2322,17 @@ void Assembler::mrc2(Coprocessor coproc, int opcode_1, Register rd,
mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
}
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& src,
- LFlag l,
- Condition cond) {
+void Assembler::ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l, Condition cond) {
AddrMode5(cond | B27 | B26 | l | L | coproc * B8, crd, src);
}
-
-void Assembler::ldc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
+void Assembler::ldc(Coprocessor coproc, CRegister crd, Register rn, int option,
+ LFlag l, Condition cond) {
// Unindexed addressing.
DCHECK(is_uint8(option));
- emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
+ emit(cond | B27 | B26 | U | l | L | rn.code() * B16 | crd.code() * B12 |
+ coproc * B8 | (option & 255));
}
void Assembler::ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
@@ -2487,12 +2345,9 @@ void Assembler::ldc2(Coprocessor coproc, CRegister crd, Register rn, int option,
ldc(coproc, crd, rn, option, l, kSpecialCondition);
}
-
// Support for VFP.
-void Assembler::vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
+void Assembler::vldr(const DwVfpRegister dst, const Register base, int offset,
const Condition cond) {
// Ddst = MEM(Rbase + offset).
// Instruction details available in ARM DDI 0406C.b, A8-924.
@@ -2510,8 +2365,8 @@ void Assembler::vldr(const DwVfpRegister dst,
DCHECK_GE(offset, 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ emit(cond | 0xD * B24 | u * B23 | d * B22 | B20 | base.code() * B16 |
+ vd * B12 | 0xB * B8 | ((offset / 4) & 255));
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -2528,9 +2383,7 @@ void Assembler::vldr(const DwVfpRegister dst,
}
}
-
-void Assembler::vldr(const DwVfpRegister dst,
- const MemOperand& operand,
+void Assembler::vldr(const DwVfpRegister dst, const MemOperand& operand,
const Condition cond) {
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(operand.am_ == Offset);
@@ -2545,10 +2398,7 @@ void Assembler::vldr(const DwVfpRegister dst,
}
}
-
-void Assembler::vldr(const SwVfpRegister dst,
- const Register base,
- int offset,
+void Assembler::vldr(const SwVfpRegister dst, const Register base, int offset,
const Condition cond) {
// Sdst = MEM(Rbase + offset).
// Instruction details available in ARM DDI 0406A, A8-628.
@@ -2564,8 +2414,8 @@ void Assembler::vldr(const SwVfpRegister dst,
DCHECK_GE(offset, 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
+ emit(cond | u * B23 | d * B22 | 0xD1 * B20 | base.code() * B16 | sd * B12 |
+ 0xA * B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address in a
// scratch register.
@@ -2582,9 +2432,7 @@ void Assembler::vldr(const SwVfpRegister dst,
}
}
-
-void Assembler::vldr(const SwVfpRegister dst,
- const MemOperand& operand,
+void Assembler::vldr(const SwVfpRegister dst, const MemOperand& operand,
const Condition cond) {
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
@@ -2598,10 +2446,7 @@ void Assembler::vldr(const SwVfpRegister dst,
}
}
-
-void Assembler::vstr(const DwVfpRegister src,
- const Register base,
- int offset,
+void Assembler::vstr(const DwVfpRegister src, const Register base, int offset,
const Condition cond) {
// MEM(Rbase + offset) = Dsrc.
// Instruction details available in ARM DDI 0406C.b, A8-1082.
@@ -2619,8 +2464,8 @@ void Assembler::vstr(const DwVfpRegister src,
src.split_code(&vd, &d);
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
- ((offset / 4) & 255));
+ emit(cond | 0xD * B24 | u * B23 | d * B22 | base.code() * B16 | vd * B12 |
+ 0xB * B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address in the a
// scratch register.
@@ -2637,9 +2482,7 @@ void Assembler::vstr(const DwVfpRegister src,
}
}
-
-void Assembler::vstr(const DwVfpRegister src,
- const MemOperand& operand,
+void Assembler::vstr(const DwVfpRegister src, const MemOperand& operand,
const Condition cond) {
DCHECK(VfpRegisterIsAvailable(src));
DCHECK(operand.am_ == Offset);
@@ -2654,10 +2497,7 @@ void Assembler::vstr(const DwVfpRegister src,
}
}
-
-void Assembler::vstr(const SwVfpRegister src,
- const Register base,
- int offset,
+void Assembler::vstr(const SwVfpRegister src, const Register base, int offset,
const Condition cond) {
// MEM(Rbase + offset) = SSrc.
// Instruction details available in ARM DDI 0406A, A8-786.
@@ -2673,8 +2513,8 @@ void Assembler::vstr(const SwVfpRegister src,
src.split_code(&sd, &d);
DCHECK_GE(offset, 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | ((offset / 4) & 255));
+ emit(cond | u * B23 | d * B22 | 0xD0 * B20 | base.code() * B16 | sd * B12 |
+ 0xA * B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address in a
// scratch register.
@@ -2691,9 +2531,7 @@ void Assembler::vstr(const SwVfpRegister src,
}
}
-
-void Assembler::vstr(const SwVfpRegister src,
- const MemOperand& operand,
+void Assembler::vstr(const SwVfpRegister src, const MemOperand& operand,
const Condition cond) {
DCHECK(operand.am_ == Offset);
if (operand.rm().is_valid()) {
@@ -2721,8 +2559,8 @@ void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
DCHECK_LE(count, 16);
- emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
- 0xB*B8 | count*2);
+ emit(cond | B27 | B26 | am | d * B22 | B20 | base.code() * B16 | sd * B12 |
+ 0xB * B8 | count * 2);
}
void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
@@ -2739,8 +2577,8 @@ void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
DCHECK_LE(count, 16);
- emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
- 0xB*B8 | count*2);
+ emit(cond | B27 | B26 | am | d * B22 | base.code() * B16 | sd * B12 |
+ 0xB * B8 | count * 2);
}
void Assembler::vldm(BlockAddrMode am, Register base, SwVfpRegister first,
@@ -2755,8 +2593,8 @@ void Assembler::vldm(BlockAddrMode am, Register base, SwVfpRegister first,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
- emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
- 0xA*B8 | count);
+ emit(cond | B27 | B26 | am | d * B22 | B20 | base.code() * B16 | sd * B12 |
+ 0xA * B8 | count);
}
void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
@@ -2771,8 +2609,8 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
- emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
- 0xA*B8 | count);
+ emit(cond | B27 | B26 | am | d * B22 | base.code() * B16 | sd * B12 |
+ 0xA * B8 | count);
}
static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) {
@@ -2866,7 +2704,8 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
// Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
int vd, d;
dst.split_code(&vd, &d);
- emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
+ emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B8 |
+ enc);
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -2900,20 +2739,18 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
}
}
-void Assembler::vmov(const SwVfpRegister dst,
- const SwVfpRegister src,
+void Assembler::vmov(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
- emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
+ emit(cond | 0xE * B24 | d * B22 | 0xB * B20 | sd * B12 | 0xA * B8 | B6 |
+ m * B5 | sm);
}
-
-void Assembler::vmov(const DwVfpRegister dst,
- const DwVfpRegister src,
+void Assembler::vmov(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// Dd = Dm
// Instruction details available in ARM DDI 0406C.b, A8-938.
@@ -2925,14 +2762,12 @@ void Assembler::vmov(const DwVfpRegister dst,
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
- vm);
+ emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B8 | B6 |
+ m * B5 | vm);
}
-void Assembler::vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
- const Condition cond) {
+void Assembler::vmov(const DwVfpRegister dst, const Register src1,
+ const Register src2, const Condition cond) {
// Dm = <Rt,Rt2>.
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
@@ -2941,15 +2776,12 @@ void Assembler::vmov(const DwVfpRegister dst,
DCHECK(src1 != pc && src2 != pc);
int vm, m;
dst.split_code(&vm, &m);
- emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
+ emit(cond | 0xC * B24 | B22 | src2.code() * B16 | src1.code() * B12 |
+ 0xB * B8 | m * B5 | B4 | vm);
}
-
-void Assembler::vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
- const Condition cond) {
+void Assembler::vmov(const Register dst1, const Register dst2,
+ const DwVfpRegister src, const Condition cond) {
// <Rt,Rt2> = Dm.
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
@@ -2958,13 +2790,11 @@ void Assembler::vmov(const Register dst1,
DCHECK(dst1 != pc && dst2 != pc);
int vm, m;
src.split_code(&vm, &m);
- emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
+ emit(cond | 0xC * B24 | B22 | B20 | dst2.code() * B16 | dst1.code() * B12 |
+ 0xB * B8 | m * B5 | B4 | vm);
}
-
-void Assembler::vmov(const SwVfpRegister dst,
- const Register src,
+void Assembler::vmov(const SwVfpRegister dst, const Register src,
const Condition cond) {
// Sn = Rt.
// Instruction details available in ARM DDI 0406A, A8-642.
@@ -2973,12 +2803,10 @@ void Assembler::vmov(const SwVfpRegister dst,
DCHECK(src != pc);
int sn, n;
dst.split_code(&sn, &n);
- emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
+ emit(cond | 0xE * B24 | sn * B16 | src.code() * B12 | 0xA * B8 | n * B7 | B4);
}
-
-void Assembler::vmov(const Register dst,
- const SwVfpRegister src,
+void Assembler::vmov(const Register dst, const SwVfpRegister src,
const Condition cond) {
// Rt = Sn.
// Instruction details available in ARM DDI 0406A, A8-642.
@@ -2987,14 +2815,14 @@ void Assembler::vmov(const Register dst,
DCHECK(dst != pc);
int sn, n;
src.split_code(&sn, &n);
- emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
+ emit(cond | 0xE * B24 | B20 | sn * B16 | dst.code() * B12 | 0xA * B8 |
+ n * B7 | B4);
}
// Type of data to read from or write to VFP register.
// Used as specifier in generic vcvt instruction.
enum VFPType { S32, U32, F32, F64 };
-
static bool IsSignedVFPType(VFPType type) {
switch (type) {
case S32:
@@ -3006,7 +2834,6 @@ static bool IsSignedVFPType(VFPType type) {
}
}
-
static bool IsIntegerVFPType(VFPType type) {
switch (type) {
case S32:
@@ -3020,7 +2847,6 @@ static bool IsIntegerVFPType(VFPType type) {
}
}
-
static bool IsDoubleVFPType(VFPType type) {
switch (type) {
case F32:
@@ -3032,15 +2858,11 @@ static bool IsDoubleVFPType(VFPType type) {
}
}
-
// Split five bit reg_code based on size of reg_type.
// 32-bit register codes are Vm:M
// 64-bit register codes are M:Vm
// where Vm is four bits, and M is a single bit.
-static void SplitRegCode(VFPType reg_type,
- int reg_code,
- int* vm,
- int* m) {
+static void SplitRegCode(VFPType reg_type, int reg_code, int* vm, int* m) {
DCHECK((reg_code >= 0) && (reg_code <= 31));
if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
SwVfpRegister::split_code(reg_code, vm, m);
@@ -3049,14 +2871,10 @@ static void SplitRegCode(VFPType reg_type,
}
}
-
// Encode vcvt.src_type.dst_type instruction.
-static Instr EncodeVCVT(const VFPType dst_type,
- const int dst_code,
- const VFPType src_type,
- const int src_code,
- VFPConversionMode mode,
- const Condition cond) {
+static Instr EncodeVCVT(const VFPType dst_type, const int dst_code,
+ const VFPType src_type, const int src_code,
+ VFPConversionMode mode, const Condition cond) {
DCHECK(src_type != dst_type);
int D, Vd, M, Vm;
SplitRegCode(src_type, src_code, &Vm, &M);
@@ -3082,102 +2900,76 @@ static Instr EncodeVCVT(const VFPType dst_type,
op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
}
- return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
- Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
+ return (cond | 0xE * B24 | B23 | D * B22 | 0x3 * B20 | B19 | opc2 * B16 |
+ Vd * B12 | 0x5 * B9 | sz * B8 | op * B7 | B6 | M * B5 | Vm);
} else {
// Conversion between IEEE double and single precision.
// Instruction details available in ARM DDI 0406B, A8.6.298.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
// Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
- return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
- Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
+ return (cond | 0xE * B24 | B23 | D * B22 | 0x3 * B20 | 0x7 * B16 |
+ Vd * B12 | 0x5 * B9 | sz * B8 | B7 | B6 | M * B5 | Vm);
}
}
-
-void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
+void Assembler::vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
DCHECK(VfpRegisterIsAvailable(dst));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
-
-void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
+void Assembler::vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
-
-void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
+void Assembler::vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
DCHECK(VfpRegisterIsAvailable(dst));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
-
void Assembler::vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode, const Condition cond) {
emit(EncodeVCVT(F32, dst.code(), U32, src.code(), mode, cond));
}
-
void Assembler::vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode, const Condition cond) {
emit(EncodeVCVT(S32, dst.code(), F32, src.code(), mode, cond));
}
-
void Assembler::vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode, const Condition cond) {
emit(EncodeVCVT(U32, dst.code(), F32, src.code(), mode, cond));
}
-
-void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
+void Assembler::vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
-
-void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
+void Assembler::vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
-
-void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
- const SwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
+void Assembler::vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
DCHECK(VfpRegisterIsAvailable(dst));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
-
-void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
- VFPConversionMode mode,
- const Condition cond) {
+void Assembler::vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
-
-void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
- int fraction_bits,
+void Assembler::vcvt_f64_s32(const DwVfpRegister dst, int fraction_bits,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-874.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
@@ -3190,13 +2982,11 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
int imm5 = 32 - fraction_bits;
int i = imm5 & 1;
int imm4 = (imm5 >> 1) & 0xF;
- emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
- vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
+ emit(cond | 0xE * B24 | B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
+ vd * B12 | 0x5 * B9 | B8 | B7 | B6 | i * B5 | imm4);
}
-
-void Assembler::vneg(const DwVfpRegister dst,
- const DwVfpRegister src,
+void Assembler::vneg(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-968.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
@@ -3208,11 +2998,10 @@ void Assembler::vneg(const DwVfpRegister dst,
int vm, m;
src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
- m*B5 | vm);
+ emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
+ B8 | B6 | m * B5 | vm);
}
-
void Assembler::vneg(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-968.
@@ -3227,9 +3016,7 @@ void Assembler::vneg(const SwVfpRegister dst, const SwVfpRegister src,
B6 | m * B5 | vm);
}
-
-void Assembler::vabs(const DwVfpRegister dst,
- const DwVfpRegister src,
+void Assembler::vabs(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-524.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
@@ -3240,11 +3027,10 @@ void Assembler::vabs(const DwVfpRegister dst,
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
- m*B5 | vm);
+ emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B8 | B7 |
+ B6 | m * B5 | vm);
}
-
void Assembler::vabs(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-524.
@@ -3258,11 +3044,8 @@ void Assembler::vabs(const SwVfpRegister dst, const SwVfpRegister src,
m * B5 | vm);
}
-
-void Assembler::vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
+void Assembler::vadd(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond) {
// Dd = vadd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406C.b, A8-830.
@@ -3277,11 +3060,10 @@ void Assembler::vadd(const DwVfpRegister dst,
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
- n*B7 | m*B5 | vm);
+ emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
+ 0x5 * B9 | B8 | n * B7 | m * B5 | vm);
}
-
void Assembler::vadd(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Sd = vadd(Sn, Sm) single precision floating point addition.
@@ -3299,11 +3081,8 @@ void Assembler::vadd(const SwVfpRegister dst, const SwVfpRegister src1,
0x5 * B9 | n * B7 | m * B5 | vm);
}
-
-void Assembler::vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
+void Assembler::vsub(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond) {
// Dd = vsub(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406C.b, A8-1086.
@@ -3318,11 +3097,10 @@ void Assembler::vsub(const DwVfpRegister dst,
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
- n*B7 | B6 | m*B5 | vm);
+ emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
+ 0x5 * B9 | B8 | n * B7 | B6 | m * B5 | vm);
}
-
void Assembler::vsub(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Sd = vsub(Sn, Sm) single precision floating point subtraction.
@@ -3340,11 +3118,8 @@ void Assembler::vsub(const SwVfpRegister dst, const SwVfpRegister src1,
0x5 * B9 | n * B7 | B6 | m * B5 | vm);
}
-
-void Assembler::vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
+void Assembler::vmul(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond) {
// Dd = vmul(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406C.b, A8-960.
@@ -3359,11 +3134,10 @@ void Assembler::vmul(const DwVfpRegister dst,
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
- n*B7 | m*B5 | vm);
+ emit(cond | 0x1C * B23 | d * B22 | 0x2 * B20 | vn * B16 | vd * B12 |
+ 0x5 * B9 | B8 | n * B7 | m * B5 | vm);
}
-
void Assembler::vmul(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Sd = vmul(Sn, Sm) single precision floating point multiplication.
@@ -3381,11 +3155,8 @@ void Assembler::vmul(const SwVfpRegister dst, const SwVfpRegister src1,
0x5 * B9 | n * B7 | m * B5 | vm);
}
-
-void Assembler::vmla(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
+void Assembler::vmla(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-932.
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
@@ -3398,11 +3169,10 @@ void Assembler::vmla(const DwVfpRegister dst,
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
- vm);
+ emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | B8 |
+ n * B7 | m * B5 | vm);
}
-
void Assembler::vmla(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-932.
@@ -3418,11 +3188,8 @@ void Assembler::vmla(const SwVfpRegister dst, const SwVfpRegister src1,
m * B5 | vm);
}
-
-void Assembler::vmls(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
+void Assembler::vmls(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-932.
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
@@ -3435,11 +3202,10 @@ void Assembler::vmls(const DwVfpRegister dst,
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
- emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
- m*B5 | vm);
+ emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | B8 |
+ n * B7 | B6 | m * B5 | vm);
}
-
void Assembler::vmls(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-932.
@@ -3455,11 +3221,8 @@ void Assembler::vmls(const SwVfpRegister dst, const SwVfpRegister src1,
B6 | m * B5 | vm);
}
-
-void Assembler::vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
+void Assembler::vdiv(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond) {
// Dd = vdiv(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
// Instruction details available in ARM DDI 0406C.b, A8-882.
@@ -3474,11 +3237,10 @@ void Assembler::vdiv(const DwVfpRegister dst,
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
- vm);
+ emit(cond | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | B8 |
+ n * B7 | m * B5 | vm);
}
-
void Assembler::vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond) {
// Sd = vdiv(Sn, Sm) single precision floating point division.
@@ -3496,9 +3258,7 @@ void Assembler::vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
m * B5 | vm);
}
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
+void Assembler::vcmp(const DwVfpRegister src1, const DwVfpRegister src2,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
// Instruction details available in ARM DDI 0406C.b, A8-864.
@@ -3510,11 +3270,10 @@ void Assembler::vcmp(const DwVfpRegister src1,
src1.split_code(&vd, &d);
int vm, m;
src2.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
- m*B5 | vm);
+ emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x4 * B16 | vd * B12 |
+ 0x5 * B9 | B8 | B6 | m * B5 | vm);
}
-
void Assembler::vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
const Condition cond) {
// vcmp(Sd, Sm) single precision floating point comparison.
@@ -3529,9 +3288,7 @@ void Assembler::vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
0x5 * B9 | B6 | m * B5 | vm);
}
-
-void Assembler::vcmp(const DwVfpRegister src1,
- const double src2,
+void Assembler::vcmp(const DwVfpRegister src1, const double src2,
const Condition cond) {
// vcmp(Dd, #0.0) double precision floating point comparison.
// Instruction details available in ARM DDI 0406C.b, A8-864.
@@ -3541,10 +3298,10 @@ void Assembler::vcmp(const DwVfpRegister src1,
DCHECK_EQ(src2, 0.0);
int vd, d;
src1.split_code(&vd, &d);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
+ emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x5 * B16 | vd * B12 |
+ 0x5 * B9 | B8 | B6);
}
-
void Assembler::vcmp(const SwVfpRegister src1, const float src2,
const Condition cond) {
// vcmp(Sd, #0.0) single precision floating point comparison.
@@ -3686,8 +3443,7 @@ void Assembler::vsel(Condition cond, const SwVfpRegister dst,
vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
}
-void Assembler::vsqrt(const DwVfpRegister dst,
- const DwVfpRegister src,
+void Assembler::vsqrt(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-1058.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
@@ -3698,11 +3454,10 @@ void Assembler::vsqrt(const DwVfpRegister dst,
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
- emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
- m*B5 | vm);
+ emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
+ B8 | 0x3 * B6 | m * B5 | vm);
}
-
void Assembler::vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// Instruction details available in ARM DDI 0406C.b, A8-1058.
@@ -3716,7 +3471,6 @@ void Assembler::vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
0x3 * B6 | m * B5 | vm);
}
-
void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
@@ -3724,7 +3478,6 @@ void Assembler::vmsr(Register dst, Condition cond) {
emit(cond | 0xE * B24 | 0xE * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
}
-
void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
@@ -3732,7 +3485,6 @@ void Assembler::vmrs(Register dst, Condition cond) {
emit(cond | 0xE * B24 | 0xF * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
}
-
void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
@@ -3746,7 +3498,6 @@ void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
0x5 * B9 | B6 | m * B5 | vm);
}
-
void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3760,7 +3511,6 @@ void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
0x5 * B9 | B8 | B6 | m * B5 | vm);
}
-
void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
@@ -3774,7 +3524,6 @@ void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
}
-
void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3788,7 +3537,6 @@ void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
}
-
void Assembler::vrintp(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
@@ -3802,7 +3550,6 @@ void Assembler::vrintp(const SwVfpRegister dst, const SwVfpRegister src) {
vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
}
-
void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3816,7 +3563,6 @@ void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
}
-
void Assembler::vrintm(const SwVfpRegister dst, const SwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
@@ -3830,7 +3576,6 @@ void Assembler::vrintm(const SwVfpRegister dst, const SwVfpRegister src) {
vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
}
-
void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
// cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
// 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
@@ -3844,7 +3589,6 @@ void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
}
-
void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
@@ -3858,7 +3602,6 @@ void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
0x5 * B9 | B7 | B6 | m * B5 | vm);
}
-
void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond) {
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
@@ -3872,11 +3615,9 @@ void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
0x5 * B9 | B8 | B7 | B6 | m * B5 | vm);
}
-
// Support for NEON.
-void Assembler::vld1(NeonSize size,
- const NeonListOperand& dst,
+void Assembler::vld1(NeonSize size, const NeonListOperand& dst,
const NeonMemOperand& src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.320.
// 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
@@ -3884,8 +3625,9 @@ void Assembler::vld1(NeonSize size,
DCHECK(IsEnabled(NEON));
int vd, d;
dst.base().split_code(&vd, &d);
- emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
- dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
+ emit(0xFU * B28 | 4 * B24 | d * B22 | 2 * B20 | src.rn().code() * B16 |
+ vd * B12 | dst.type() * B8 | size * B6 | src.align() * B4 |
+ src.rm().code());
}
void Assembler::vst1(NeonSize size, const NeonListOperand& src,
@@ -3896,11 +3638,10 @@ void Assembler::vst1(NeonSize size, const NeonListOperand& src,
DCHECK(IsEnabled(NEON));
int vd, d;
src.base().split_code(&vd, &d);
- emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
- size*B6 | dst.align()*B4 | dst.rm().code());
+ emit(0xFU * B28 | 4 * B24 | d * B22 | dst.rn().code() * B16 | vd * B12 |
+ src.type() * B8 | size * B6 | dst.align() * B4 | dst.rm().code());
}
-
void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.346.
// 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
@@ -3952,7 +3693,6 @@ static int EncodeScalar(NeonDataType dt, int index) {
break;
default:
UNREACHABLE();
- break;
}
return (opc1_opc2 >> 2) * B21 | (opc1_opc2 & 0x3) * B5;
}
@@ -4003,7 +3743,6 @@ void Assembler::vdup(NeonSize size, QwNeonRegister dst, Register src) {
break;
default:
UNREACHABLE();
- break;
}
int vd, d;
dst.split_code(&vd, &d);
@@ -4137,7 +3876,6 @@ static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
break;
default:
UNREACHABLE();
- break;
}
int vd, d;
NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
@@ -4231,7 +3969,6 @@ static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op, NeonRegType reg_type,
break;
default:
UNREACHABLE();
- break;
}
int vd, d;
NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
@@ -4338,7 +4075,6 @@ static Instr EncodeNeonBinOp(FPBinOp op, QwNeonRegister dst,
break;
default:
UNREACHABLE();
- break;
}
int vd, d;
dst.split_code(&vd, &d);
@@ -4404,7 +4140,6 @@ static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt,
break;
default:
UNREACHABLE();
- break;
}
int vd, d;
dst.split_code(&vd, &d);
@@ -4560,7 +4295,6 @@ static Instr EncodeNeonShiftOp(NeonShiftOp op, NeonSize size, bool is_unsigned,
}
default:
UNREACHABLE();
- break;
}
int vd, d;
@@ -4667,7 +4401,6 @@ static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
break;
default:
UNREACHABLE();
- break;
}
int vd, d;
dst.split_code(&vd, &d);
@@ -4819,7 +4552,6 @@ static Instr EncodeNeonSizedOp(NeonSizedOp op, NeonRegType reg_type,
break;
default:
UNREACHABLE();
- break;
}
int vd, d;
NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
@@ -4940,52 +4672,45 @@ void Assembler::nop(int type) {
// We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
// a type.
DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
- emit(al | 13*B21 | type*B12 | type);
+ emit(al | 13 * B21 | type * B12 | type);
}
void Assembler::pop() { add(sp, sp, Operand(kPointerSize)); }
bool Assembler::IsMovT(Instr instr) {
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
- ((kNumRegisters-1)*B12) | // mask out register
+ ((kNumRegisters - 1) * B12) | // mask out register
EncodeMovwImmediate(0xFFFF)); // mask out immediate value
return instr == kMovtPattern;
}
-
bool Assembler::IsMovW(Instr instr) {
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
- ((kNumRegisters-1)*B12) | // mask out destination
+ ((kNumRegisters - 1) * B12) | // mask out destination
EncodeMovwImmediate(0xFFFF)); // mask out immediate value
return instr == kMovwPattern;
}
-
Instr Assembler::GetMovTPattern() { return kMovtPattern; }
-
Instr Assembler::GetMovWPattern() { return kMovwPattern; }
-
Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
DCHECK_LT(immediate, 0x10000);
return ((immediate & 0xF000) << 4) | (immediate & 0xFFF);
}
-
Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
instruction &= ~EncodeMovwImmediate(0xFFFF);
return instruction | EncodeMovwImmediate(immediate);
}
-
int Assembler::DecodeShiftImm(Instr instr) {
int rotate = Instruction::RotateValue(instr) * 2;
int immed8 = Instruction::Immed8Value(instr);
return base::bits::RotateRight32(immed8, rotate);
}
-
Instr Assembler::PatchShiftImm(Instr instr, int immed) {
uint32_t rotate_imm = 0;
uint32_t immed_8 = 0;
@@ -4995,24 +4720,20 @@ Instr Assembler::PatchShiftImm(Instr instr, int immed) {
return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
}
-
bool Assembler::IsNop(Instr instr, int type) {
DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
// Check for mov rx, rx where x = type.
- return instr == (al | 13*B21 | type*B12 | type);
+ return instr == (al | 13 * B21 | type * B12 | type);
}
-
bool Assembler::IsMovImmed(Instr instr) {
return (instr & kMovImmedMask) == kMovImmedPattern;
}
-
bool Assembler::IsOrrImmed(Instr instr) {
return (instr & kOrrImmedMask) == kOrrImmedPattern;
}
-
// static
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
@@ -5020,12 +4741,10 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
return FitsShifter(imm32, &dummy1, &dummy2, nullptr);
}
-
bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
return is_uint12(abs(imm32));
}
-
// Debugging.
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
@@ -5033,7 +4752,6 @@ void Assembler::RecordConstPool(int size) {
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
}
-
void Assembler::GrowBuffer() {
DCHECK_EQ(buffer_start_, buffer_->start());
@@ -5072,7 +4790,6 @@ void Assembler::GrowBuffer() {
// to relocate any emitted relocation entries.
}
-
void Assembler::db(uint8_t data) {
// db is used to write raw data. The constant pool should be emitted or
// blocked before using db.
@@ -5082,7 +4799,6 @@ void Assembler::db(uint8_t data) {
pc_ += sizeof(uint8_t);
}
-
void Assembler::dd(uint32_t data) {
// dd is used to write raw data. The constant pool should be emitted or
// blocked before using dd.
@@ -5092,7 +4808,6 @@ void Assembler::dd(uint32_t data) {
pc_ += sizeof(uint32_t);
}
-
void Assembler::dq(uint64_t value) {
// dq is used to write raw data. The constant pool should be emitted or
// blocked before using dq.
@@ -5168,7 +4883,6 @@ void Assembler::BlockConstPoolFor(int instructions) {
}
}
-
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h
index c62c604177..4db825fa97 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/codegen/arm/assembler-arm.h
@@ -37,18 +37,18 @@
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
-#ifndef V8_ARM_ASSEMBLER_ARM_H_
-#define V8_ARM_ASSEMBLER_ARM_H_
+#ifndef V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
+#define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
#include <stdio.h>
#include <vector>
-#include "src/arm/constants-arm.h"
-#include "src/arm/register-arm.h"
-#include "src/assembler.h"
-#include "src/boxed-float.h"
-#include "src/constant-pool.h"
-#include "src/double.h"
+#include "src/codegen/arm/constants-arm.h"
+#include "src/codegen/arm/register-arm.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/constant-pool.h"
+#include "src/numbers/double.h"
+#include "src/utils/boxed-float.h"
namespace v8 {
namespace internal {
@@ -57,16 +57,16 @@ class SafepointTableBuilder;
// Coprocessor number
enum Coprocessor {
- p0 = 0,
- p1 = 1,
- p2 = 2,
- p3 = 3,
- p4 = 4,
- p5 = 5,
- p6 = 6,
- p7 = 7,
- p8 = 8,
- p9 = 9,
+ p0 = 0,
+ p1 = 1,
+ p2 = 2,
+ p3 = 3,
+ p4 = 4,
+ p5 = 5,
+ p6 = 6,
+ p7 = 7,
+ p8 = 8,
+ p9 = 9,
p10 = 10,
p11 = 11,
p12 = 12,
@@ -83,7 +83,10 @@ class V8_EXPORT_PRIVATE Operand {
public:
// immediate
V8_INLINE explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : rmode_(rmode) {
+ value_.immediate = immediate;
+ }
V8_INLINE static Operand Zero();
V8_INLINE explicit Operand(const ExternalReference& f);
explicit Operand(Handle<HeapObject> handle);
@@ -145,9 +148,7 @@ class V8_EXPORT_PRIVATE Operand {
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
- bool IsImmediate() const {
- return !rm_.is_valid();
- }
+ bool IsImmediate() const { return !rm_.is_valid(); }
HeapObjectRequest heap_object_request() const {
DCHECK(IsHeapObjectRequest());
@@ -156,8 +157,8 @@ class V8_EXPORT_PRIVATE Operand {
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
- rmode_ == RelocInfo::EMBEDDED_OBJECT ||
- rmode_ == RelocInfo::CODE_TARGET);
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
@@ -165,12 +166,11 @@ class V8_EXPORT_PRIVATE Operand {
Register rs() const { return rs_; }
ShiftOp shift_op() const { return shift_op_; }
-
private:
Register rm_ = no_reg;
Register rs_ = no_reg;
ShiftOp shift_op_;
- int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
@@ -201,8 +201,8 @@ class V8_EXPORT_PRIVATE MemOperand {
// [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
// [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
// [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
- explicit MemOperand(Register rn, Register rm,
- ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+ explicit MemOperand(Register rn, Register rm, ShiftOp shift_op, int shift_imm,
+ AddrMode am = Offset);
V8_INLINE static MemOperand PointerAddressFromSmiKey(Register array,
Register key,
AddrMode am = Offset) {
@@ -229,12 +229,12 @@ class V8_EXPORT_PRIVATE MemOperand {
}
private:
- Register rn_; // base
- Register rm_; // register offset
+ Register rn_; // base
+ Register rm_; // register offset
int32_t offset_; // valid if rm_ == no_reg
ShiftOp shift_op_;
int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
- AddrMode am_; // bits P, U, and W
+ AddrMode am_; // bits P, U, and W
friend class Assembler;
};
@@ -266,22 +266,28 @@ class V8_EXPORT_PRIVATE NeonMemOperand {
class NeonListOperand {
public:
explicit NeonListOperand(DoubleRegister base, int register_count = 1)
- : base_(base), register_count_(register_count) {}
+ : base_(base), register_count_(register_count) {}
explicit NeonListOperand(QwNeonRegister q_reg)
- : base_(q_reg.low()), register_count_(2) {}
+ : base_(q_reg.low()), register_count_(2) {}
DoubleRegister base() const { return base_; }
int register_count() { return register_count_; }
int length() const { return register_count_ - 1; }
NeonListType type() const {
switch (register_count_) {
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
// Fall through.
- case 1: return nlt_1;
- case 2: return nlt_2;
- case 3: return nlt_3;
- case 4: return nlt_4;
+ case 1:
+ return nlt_1;
+ case 2:
+ return nlt_2;
+ case 3:
+ return nlt_3;
+ case 4:
+ return nlt_4;
}
}
+
private:
DoubleRegister base_;
int register_count_;
@@ -301,9 +307,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
virtual ~Assembler();
- virtual void AbortedCodeGeneration() {
- pending_32_bit_constants_.clear();
- }
+ virtual void AbortedCodeGeneration() { pending_32_bit_constants_.clear(); }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@@ -355,14 +359,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- V8_INLINE static Address target_address_from_return_address(Address pc);
-
- // Given the address of the beginning of a call, return the address
- // in the instruction stream that the call will return from.
- V8_INLINE static Address return_address_from_call_start(Address pc);
-
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -404,9 +400,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
RelocInfo::Mode rmode = RelocInfo::NONE);
void bl(int branch_offset, Condition cond = al,
RelocInfo::Mode rmode = RelocInfo::NONE);
- void blx(int branch_offset); // v5 and above
+ void blx(int branch_offset); // v5 and above
void blx(Register target, Condition cond = al); // v5 and above
- void bx(Register target, Condition cond = al); // v5 and above, plus v4t
+ void bx(Register target, Condition cond = al); // v5 and above, plus v4t
// Convenience branch instructions using labels
void b(Label* L, Condition cond = al);
@@ -417,37 +413,37 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Data-processing instructions
- void and_(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
+ void and_(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
Condition cond = al);
- void eor(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
+ void eor(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
void eor(Register dst, Register src1, Register src2, SBit s = LeaveCC,
Condition cond = al);
- void sub(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void sub(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
+ void sub(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
+ void sub(Register dst, Register src1, Register src2, SBit s = LeaveCC,
+ Condition cond = al);
- void rsb(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
+ void rsb(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
- void add(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void add(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
+ void add(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
+ void add(Register dst, Register src1, Register src2, SBit s = LeaveCC,
+ Condition cond = al);
- void adc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
+ void adc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
- void sbc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
+ void sbc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
- void rsc(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
+ void rsc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
void tst(Register src1, const Operand& src2, Condition cond = al);
void tst(Register src1, Register src2, Condition cond = al);
@@ -461,13 +457,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cmn(Register src1, const Operand& src2, Condition cond = al);
- void orr(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
- void orr(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
+ void orr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
+ void orr(Register dst, Register src1, Register src2, SBit s = LeaveCC,
+ Condition cond = al);
- void mov(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
+ void mov(Register dst, const Operand& src, SBit s = LeaveCC,
+ Condition cond = al);
void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al);
// Load the position of the label relative to the generated code object
@@ -479,11 +475,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movw(Register reg, uint32_t immediate, Condition cond = al);
void movt(Register reg, uint32_t immediate, Condition cond = al);
- void bic(Register dst, Register src1, const Operand& src2,
- SBit s = LeaveCC, Condition cond = al);
+ void bic(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+ Condition cond = al);
- void mvn(Register dst, const Operand& src,
- SBit s = LeaveCC, Condition cond = al);
+ void mvn(Register dst, const Operand& src, SBit s = LeaveCC,
+ Condition cond = al);
// Shift instructions
@@ -504,13 +500,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
- void sdiv(Register dst, Register src1, Register src2,
- Condition cond = al);
+ void sdiv(Register dst, Register src1, Register src2, Condition cond = al);
void udiv(Register dst, Register src1, Register src2, Condition cond = al);
- void mul(Register dst, Register src1, Register src2,
- SBit s = LeaveCC, Condition cond = al);
+ void mul(Register dst, Register src1, Register src2, SBit s = LeaveCC,
+ Condition cond = al);
void smmla(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
@@ -562,8 +557,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void bfc(Register dst, int lsb, int width, Condition cond = al);
- void bfi(Register dst, Register src, int lsb, int width,
- Condition cond = al);
+ void bfi(Register dst, Register src, int lsb, int width, Condition cond = al);
void pkhbt(Register dst, Register src1, const Operand& src2,
Condition cond = al);
@@ -604,12 +598,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void strh(Register src, const MemOperand& dst, Condition cond = al);
void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
- void ldrd(Register dst1,
- Register dst2,
- const MemOperand& src, Condition cond = al);
- void strd(Register src1,
- Register src2,
- const MemOperand& dst, Condition cond = al);
+ void ldrd(Register dst1, Register dst2, const MemOperand& src,
+ Condition cond = al);
+ void strd(Register src1, Register src2, const MemOperand& dst,
+ Condition cond = al);
// Load literal from a pc relative address.
void ldr_pcrel(Register dst, int imm12, Condition cond = al);
@@ -633,8 +625,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
// Exception-generating instructions and debugging support
- void stop(const char* msg,
- Condition cond = al,
+ void stop(const char* msg, Condition cond = al,
int32_t code = kDefaultStopCode);
void bkpt(uint32_t imm16); // v5 and above
@@ -651,28 +642,25 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Coprocessor instructions
- void cdp(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
- int opcode_2, Condition cond = al);
+ void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn,
+ CRegister crm, int opcode_2, Condition cond = al);
- void cdp2(Coprocessor coproc, int opcode_1,
- CRegister crd, CRegister crn, CRegister crm,
+ void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn,
+ CRegister crm,
int opcode_2); // v5 and above
- void mcr(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
+ void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
+ CRegister crm, int opcode_2 = 0, Condition cond = al);
- void mcr2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
+ void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
+ CRegister crm,
int opcode_2 = 0); // v5 and above
- void mrc(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
- int opcode_2 = 0, Condition cond = al);
+ void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
+ CRegister crm, int opcode_2 = 0, Condition cond = al);
- void mrc2(Coprocessor coproc, int opcode_1,
- Register rd, CRegister crn, CRegister crm,
+ void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn,
+ CRegister crm,
int opcode_2 = 0); // v5 and above
void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
@@ -688,215 +676,146 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Support for VFP.
// All these APIs support S0 to S31 and D0 to D31.
- void vldr(const DwVfpRegister dst,
- const Register base,
- int offset,
+ void vldr(const DwVfpRegister dst, const Register base, int offset,
const Condition cond = al);
- void vldr(const DwVfpRegister dst,
- const MemOperand& src,
+ void vldr(const DwVfpRegister dst, const MemOperand& src,
const Condition cond = al);
- void vldr(const SwVfpRegister dst,
- const Register base,
- int offset,
+ void vldr(const SwVfpRegister dst, const Register base, int offset,
const Condition cond = al);
- void vldr(const SwVfpRegister dst,
- const MemOperand& src,
+ void vldr(const SwVfpRegister dst, const MemOperand& src,
const Condition cond = al);
- void vstr(const DwVfpRegister src,
- const Register base,
- int offset,
+ void vstr(const DwVfpRegister src, const Register base, int offset,
const Condition cond = al);
- void vstr(const DwVfpRegister src,
- const MemOperand& dst,
+ void vstr(const DwVfpRegister src, const MemOperand& dst,
const Condition cond = al);
- void vstr(const SwVfpRegister src,
- const Register base,
- int offset,
+ void vstr(const SwVfpRegister src, const Register base, int offset,
const Condition cond = al);
- void vstr(const SwVfpRegister src,
- const MemOperand& dst,
+ void vstr(const SwVfpRegister src, const MemOperand& dst,
const Condition cond = al);
- void vldm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond = al);
+ void vldm(BlockAddrMode am, Register base, DwVfpRegister first,
+ DwVfpRegister last, Condition cond = al);
- void vstm(BlockAddrMode am,
- Register base,
- DwVfpRegister first,
- DwVfpRegister last,
- Condition cond = al);
+ void vstm(BlockAddrMode am, Register base, DwVfpRegister first,
+ DwVfpRegister last, Condition cond = al);
- void vldm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond = al);
+ void vldm(BlockAddrMode am, Register base, SwVfpRegister first,
+ SwVfpRegister last, Condition cond = al);
- void vstm(BlockAddrMode am,
- Register base,
- SwVfpRegister first,
- SwVfpRegister last,
- Condition cond = al);
+ void vstm(BlockAddrMode am, Register base, SwVfpRegister first,
+ SwVfpRegister last, Condition cond = al);
void vmov(const SwVfpRegister dst, Float32 imm);
- void vmov(const DwVfpRegister dst,
- Double imm,
+ void vmov(const DwVfpRegister dst, Double imm,
const Register extra_scratch = no_reg);
- void vmov(const SwVfpRegister dst,
- const SwVfpRegister src,
+ void vmov(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const DwVfpRegister src,
+ void vmov(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
- void vmov(const DwVfpRegister dst,
- const Register src1,
- const Register src2,
+ void vmov(const DwVfpRegister dst, const Register src1, const Register src2,
const Condition cond = al);
- void vmov(const Register dst1,
- const Register dst2,
- const DwVfpRegister src,
+ void vmov(const Register dst1, const Register dst2, const DwVfpRegister src,
const Condition cond = al);
- void vmov(const SwVfpRegister dst,
- const Register src,
+ void vmov(const SwVfpRegister dst, const Register src,
const Condition cond = al);
- void vmov(const Register dst,
- const SwVfpRegister src,
+ void vmov(const Register dst, const SwVfpRegister src,
const Condition cond = al);
- void vcvt_f64_s32(const DwVfpRegister dst,
- const SwVfpRegister src,
+ void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_f32_s32(const SwVfpRegister dst,
- const SwVfpRegister src,
+ void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_f64_u32(const DwVfpRegister dst,
- const SwVfpRegister src,
+ void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_f32_u32(const SwVfpRegister dst,
- const SwVfpRegister src,
+ void vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_s32_f32(const SwVfpRegister dst,
- const SwVfpRegister src,
+ void vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_u32_f32(const SwVfpRegister dst,
- const SwVfpRegister src,
+ void vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_s32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
+ void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_u32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
+ void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_f64_f32(const DwVfpRegister dst,
- const SwVfpRegister src,
+ void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_f32_f64(const SwVfpRegister dst,
- const DwVfpRegister src,
+ void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
- void vcvt_f64_s32(const DwVfpRegister dst,
- int fraction_bits,
+ void vcvt_f64_s32(const DwVfpRegister dst, int fraction_bits,
const Condition cond = al);
void vmrs(const Register dst, const Condition cond = al);
void vmsr(const Register dst, const Condition cond = al);
- void vneg(const DwVfpRegister dst,
- const DwVfpRegister src,
+ void vneg(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
void vneg(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
- void vabs(const DwVfpRegister dst,
- const DwVfpRegister src,
+ void vabs(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
void vabs(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
- void vadd(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
+ void vadd(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond = al);
void vadd(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
- void vsub(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
+ void vsub(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond = al);
void vsub(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
- void vmul(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
+ void vmul(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond = al);
void vmul(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
- void vmla(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
+ void vmla(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond = al);
void vmla(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
- void vmls(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
+ void vmls(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond = al);
void vmls(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
- void vdiv(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
+ void vdiv(const DwVfpRegister dst, const DwVfpRegister src1,
+ const DwVfpRegister src2, const Condition cond = al);
void vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2, const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const DwVfpRegister src2,
+ void vcmp(const DwVfpRegister src1, const DwVfpRegister src2,
const Condition cond = al);
void vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
const Condition cond = al);
- void vcmp(const DwVfpRegister src1,
- const double src2,
+ void vcmp(const DwVfpRegister src1, const double src2,
const Condition cond = al);
void vcmp(const SwVfpRegister src1, const float src2,
const Condition cond = al);
- void vmaxnm(const DwVfpRegister dst,
- const DwVfpRegister src1,
+ void vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2);
- void vmaxnm(const SwVfpRegister dst,
- const SwVfpRegister src1,
+ void vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2);
- void vminnm(const DwVfpRegister dst,
- const DwVfpRegister src1,
+ void vminnm(const DwVfpRegister dst, const DwVfpRegister src1,
const DwVfpRegister src2);
- void vminnm(const SwVfpRegister dst,
- const SwVfpRegister src1,
+ void vminnm(const SwVfpRegister dst, const SwVfpRegister src1,
const SwVfpRegister src2);
// VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
- void vsel(const Condition cond,
- const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2);
- void vsel(const Condition cond,
- const SwVfpRegister dst,
- const SwVfpRegister src1,
- const SwVfpRegister src2);
-
- void vsqrt(const DwVfpRegister dst,
- const DwVfpRegister src,
+ void vsel(const Condition cond, const DwVfpRegister dst,
+ const DwVfpRegister src1, const DwVfpRegister src2);
+ void vsel(const Condition cond, const SwVfpRegister dst,
+ const SwVfpRegister src1, const SwVfpRegister src2);
+
+ void vsqrt(const DwVfpRegister dst, const DwVfpRegister src,
const Condition cond = al);
void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
const Condition cond = al);
@@ -918,11 +837,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Support for NEON.
// All these APIs support D0 to D31 and Q0 to Q15.
- void vld1(NeonSize size,
- const NeonListOperand& dst,
+ void vld1(NeonSize size, const NeonListOperand& dst,
const NeonMemOperand& src);
- void vst1(NeonSize size,
- const NeonListOperand& src,
+ void vst1(NeonSize size, const NeonListOperand& src,
const NeonMemOperand& dst);
// dt represents the narrower type
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
@@ -966,16 +883,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
QwNeonRegister src2);
void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
- void vmul(QwNeonRegister dst, QwNeonRegister src1,
- QwNeonRegister src2);
+ void vmul(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
- void vmin(NeonDataType dt, QwNeonRegister dst,
- QwNeonRegister src1, QwNeonRegister src2);
+ void vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
- void vmax(NeonDataType dt, QwNeonRegister dst,
- QwNeonRegister src1, QwNeonRegister src2);
+ void vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2);
void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
void vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
@@ -1035,7 +951,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
};
- void nop(int type = 0); // 0 is the default non-marking type.
+ void nop(int type = 0); // 0 is the default non-marking type.
void push(Register src, Condition cond = al) {
str(src, MemOperand(sp, 4, NegPreIndex), cond);
@@ -1088,9 +1004,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockConstPool();
}
- ~BlockConstPoolScope() {
- assem_->EndBlockConstPool();
- }
+ ~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
private:
Assembler* assem_;
@@ -1327,7 +1241,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_const_pool_before_; // Block emission before this pc offset.
+ int no_const_pool_before_; // Block emission before this pc offset.
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
@@ -1439,4 +1353,4 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_ASSEMBLER_ARM_H_
+#endif // V8_CODEGEN_ARM_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/codegen/arm/constants-arm.cc
index b50948fc36..ecde19bf9a 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/codegen/arm/constants-arm.cc
@@ -4,8 +4,7 @@
#if V8_TARGET_ARCH_ARM
-#include "src/arm/constants-arm.h"
-
+#include "src/codegen/arm/constants-arm.h"
namespace v8 {
namespace internal {
@@ -19,24 +18,22 @@ Float64 Instruction::DoubleImmedVmov() const {
//
// where B = ~b. Only the high 16 bits are affected.
uint64_t high16;
- high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
- high16 |= (0xFF * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
- high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
- high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
+ high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
+ high16 |= (0xFF * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
+ high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
+ high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
uint64_t imm = high16 << 48;
return Float64::FromBits(imm);
}
-
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumRegisters] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
};
-
// List of alias names which can be used when referring to ARM registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{10, "sl"}, {11, "r11"}, {12, "r12"}, {13, "r13"},
@@ -47,23 +44,18 @@ const Registers::RegisterAlias Registers::aliases_[] = {
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* VFPRegisters::names_[kNumVFPRegisters] = {
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
- "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
- "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
- "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
- "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
- "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
-};
-
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10",
+ "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21",
+ "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", "d0",
+ "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
+ "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
+ "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
const char* VFPRegisters::Name(int reg, bool is_double) {
DCHECK((0 <= reg) && (reg < kNumVFPRegisters));
return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
}
-
int VFPRegisters::Number(const char* name, bool* is_double) {
for (int i = 0; i < kNumVFPRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
@@ -81,7 +73,6 @@ int VFPRegisters::Number(const char* name, bool* is_double) {
return kNoRegister;
}
-
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
@@ -103,7 +94,6 @@ int Registers::Number(const char* name) {
return kNoRegister;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/codegen/arm/constants-arm.h
index 48eaa3484a..66eea2180b 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/codegen/arm/constants-arm.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_CONSTANTS_ARM_H_
-#define V8_ARM_CONSTANTS_ARM_H_
+#ifndef V8_CODEGEN_ARM_CONSTANTS_ARM_H_
+#define V8_CODEGEN_ARM_CONSTANTS_ARM_H_
#include <stdint.h>
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/boxed-float.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/common/globals.h"
+#include "src/utils/boxed-float.h"
+#include "src/utils/utils.h"
// ARM EABI is required.
#if defined(__arm__) && !defined(__ARM_EABI__)
@@ -76,37 +76,35 @@ constexpr int kRootRegisterBias = 4095;
enum Condition {
kNoCondition = -1,
- eq = 0 << 28, // Z set Equal.
- ne = 1 << 28, // Z clear Not equal.
- cs = 2 << 28, // C set Unsigned higher or same.
- cc = 3 << 28, // C clear Unsigned lower.
- mi = 4 << 28, // N set Negative.
- pl = 5 << 28, // N clear Positive or zero.
- vs = 6 << 28, // V set Overflow.
- vc = 7 << 28, // V clear No overflow.
- hi = 8 << 28, // C set, Z clear Unsigned higher.
- ls = 9 << 28, // C clear or Z set Unsigned lower or same.
- ge = 10 << 28, // N == V Greater or equal.
- lt = 11 << 28, // N != V Less than.
- gt = 12 << 28, // Z clear, N == V Greater than.
- le = 13 << 28, // Z set or N != V Less then or equal
- al = 14 << 28, // Always.
+ eq = 0 << 28, // Z set Equal.
+ ne = 1 << 28, // Z clear Not equal.
+ cs = 2 << 28, // C set Unsigned higher or same.
+ cc = 3 << 28, // C clear Unsigned lower.
+ mi = 4 << 28, // N set Negative.
+ pl = 5 << 28, // N clear Positive or zero.
+ vs = 6 << 28, // V set Overflow.
+ vc = 7 << 28, // V clear No overflow.
+ hi = 8 << 28, // C set, Z clear Unsigned higher.
+ ls = 9 << 28, // C clear or Z set Unsigned lower or same.
+ ge = 10 << 28, // N == V Greater or equal.
+ lt = 11 << 28, // N != V Less than.
+ gt = 12 << 28, // Z clear, N == V Greater than.
+ le = 13 << 28, // Z set or N != V Less then or equal
+ al = 14 << 28, // Always.
kSpecialCondition = 15 << 28, // Special condition (refer to section A3.2.1).
kNumberOfConditions = 16,
// Aliases.
- hs = cs, // C set Unsigned higher or same.
- lo = cc // C clear Unsigned lower.
+ hs = cs, // C set Unsigned higher or same.
+ lo = cc // C clear Unsigned lower.
};
-
inline Condition NegateCondition(Condition cond) {
DCHECK(cond != al);
return static_cast<Condition>(cond ^ ne);
}
-
// -----------------------------------------------------------------------------
// Instructions encoding.
@@ -114,22 +112,21 @@ inline Condition NegateCondition(Condition cond) {
// representing instructions from usual 32 bit values.
// Instruction objects are pointers to 32bit values, and provide methods to
// access the various ISA fields.
-typedef int32_t Instr;
-
+using Instr = int32_t;
// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
// as defined in section A3.4
enum Opcode {
- AND = 0 << 21, // Logical AND.
- EOR = 1 << 21, // Logical Exclusive OR.
- SUB = 2 << 21, // Subtract.
- RSB = 3 << 21, // Reverse Subtract.
- ADD = 4 << 21, // Add.
- ADC = 5 << 21, // Add with Carry.
- SBC = 6 << 21, // Subtract with Carry.
- RSC = 7 << 21, // Reverse Subtract with Carry.
- TST = 8 << 21, // Test.
- TEQ = 9 << 21, // Test Equivalence.
+ AND = 0 << 21, // Logical AND.
+ EOR = 1 << 21, // Logical Exclusive OR.
+ SUB = 2 << 21, // Subtract.
+ RSB = 3 << 21, // Reverse Subtract.
+ ADD = 4 << 21, // Add.
+ ADC = 5 << 21, // Add with Carry.
+ SBC = 6 << 21, // Subtract with Carry.
+ RSC = 7 << 21, // Reverse Subtract with Carry.
+ TST = 8 << 21, // Test.
+ TEQ = 9 << 21, // Test Equivalence.
CMP = 10 << 21, // Compare.
CMN = 11 << 21, // Compare Negated.
ORR = 12 << 21, // Logical (inclusive) OR.
@@ -138,20 +135,18 @@ enum Opcode {
MVN = 15 << 21 // Move Not.
};
-
// The bits for bit 7-4 for some type 0 miscellaneous instructions.
enum MiscInstructionsBits74 {
// With bits 22-21 01.
- BX = 1 << 4,
- BXJ = 2 << 4,
- BLX = 3 << 4,
- BKPT = 7 << 4,
+ BX = 1 << 4,
+ BXJ = 2 << 4,
+ BLX = 3 << 4,
+ BKPT = 7 << 4,
// With bits 22-21 11.
- CLZ = 1 << 4
+ CLZ = 1 << 4
};
-
// Instruction encoding bits and masks.
enum {
H = 1 << 5, // Halfword (or byte).
@@ -216,30 +211,24 @@ enum BarrierOption {
SY = 0xf,
};
-
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants.
// Condition code updating mode.
enum SBit {
- SetCC = 1 << 20, // Set condition code.
- LeaveCC = 0 << 20 // Leave condition code unchanged.
+ SetCC = 1 << 20, // Set condition code.
+ LeaveCC = 0 << 20 // Leave condition code unchanged.
};
-
// Status register selection.
-enum SRegister {
- CPSR = 0 << 22,
- SPSR = 1 << 22
-};
-
+enum SRegister { CPSR = 0 << 22, SPSR = 1 << 22 };
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum ShiftOp {
- LSL = 0 << 5, // Logical shift left.
- LSR = 1 << 5, // Logical shift right.
- ASR = 2 << 5, // Arithmetic shift right.
- ROR = 3 << 5, // Rotate right.
+ LSL = 0 << 5, // Logical shift left.
+ LSR = 1 << 5, // Logical shift right.
+ ASR = 2 << 5, // Arithmetic shift right.
+ ROR = 3 << 5, // Rotate right.
// RRX is encoded as ROR with shift_imm == 0.
// Use a special code to make the distinction. The RRX ShiftOp is only used
@@ -249,7 +238,6 @@ enum ShiftOp {
kNumberOfShifts = 4
};
-
// Status register fields.
enum SRegisterField {
CPSR_c = CPSR | 1 << 16,
@@ -263,47 +251,45 @@ enum SRegisterField {
};
// Status register field mask (or'ed SRegisterField enum values).
-typedef uint32_t SRegisterFieldMask;
-
+using SRegisterFieldMask = uint32_t;
// Memory operand addressing mode.
enum AddrMode {
// Bit encoding P U W.
- Offset = (8|4|0) << 21, // Offset (without writeback to base).
- PreIndex = (8|4|1) << 21, // Pre-indexed addressing with writeback.
- PostIndex = (0|4|0) << 21, // Post-indexed addressing with writeback.
- NegOffset = (8|0|0) << 21, // Negative offset (without writeback to base).
- NegPreIndex = (8|0|1) << 21, // Negative pre-indexed with writeback.
- NegPostIndex = (0|0|0) << 21 // Negative post-indexed with writeback.
+ Offset = (8 | 4 | 0) << 21, // Offset (without writeback to base).
+ PreIndex = (8 | 4 | 1) << 21, // Pre-indexed addressing with writeback.
+ PostIndex = (0 | 4 | 0) << 21, // Post-indexed addressing with writeback.
+ NegOffset =
+ (8 | 0 | 0) << 21, // Negative offset (without writeback to base).
+ NegPreIndex = (8 | 0 | 1) << 21, // Negative pre-indexed with writeback.
+ NegPostIndex = (0 | 0 | 0) << 21 // Negative post-indexed with writeback.
};
-
// Load/store multiple addressing mode.
enum BlockAddrMode {
// Bit encoding P U W .
- da = (0|0|0) << 21, // Decrement after.
- ia = (0|4|0) << 21, // Increment after.
- db = (8|0|0) << 21, // Decrement before.
- ib = (8|4|0) << 21, // Increment before.
- da_w = (0|0|1) << 21, // Decrement after with writeback to base.
- ia_w = (0|4|1) << 21, // Increment after with writeback to base.
- db_w = (8|0|1) << 21, // Decrement before with writeback to base.
- ib_w = (8|4|1) << 21, // Increment before with writeback to base.
+ da = (0 | 0 | 0) << 21, // Decrement after.
+ ia = (0 | 4 | 0) << 21, // Increment after.
+ db = (8 | 0 | 0) << 21, // Decrement before.
+ ib = (8 | 4 | 0) << 21, // Increment before.
+ da_w = (0 | 0 | 1) << 21, // Decrement after with writeback to base.
+ ia_w = (0 | 4 | 1) << 21, // Increment after with writeback to base.
+ db_w = (8 | 0 | 1) << 21, // Decrement before with writeback to base.
+ ib_w = (8 | 4 | 1) << 21, // Increment before with writeback to base.
// Alias modes for comparison when writeback does not matter.
- da_x = (0|0|0) << 21, // Decrement after.
- ia_x = (0|4|0) << 21, // Increment after.
- db_x = (8|0|0) << 21, // Decrement before.
- ib_x = (8|4|0) << 21, // Increment before.
+ da_x = (0 | 0 | 0) << 21, // Decrement after.
+ ia_x = (0 | 4 | 0) << 21, // Increment after.
+ db_x = (8 | 0 | 0) << 21, // Decrement before.
+ ib_x = (8 | 4 | 0) << 21, // Increment before.
- kBlockAddrModeMask = (8|4|1) << 21
+ kBlockAddrModeMask = (8 | 4 | 1) << 21
};
-
// Coprocessor load/store operand size.
enum LFlag {
- Long = 1 << 22, // Long load/store coprocessor.
- Short = 0 << 22 // Short load/store coprocessor.
+ Long = 1 << 22, // Long load/store coprocessor.
+ Short = 0 << 22 // Short load/store coprocessor.
};
// Neon sizes.
@@ -333,12 +319,7 @@ inline NeonSize NeonDataTypeToSize(NeonDataType dt) {
return static_cast<NeonSize>(NeonSz(dt));
}
-enum NeonListType {
- nlt_1 = 0x7,
- nlt_2 = 0xA,
- nlt_3 = 0x6,
- nlt_4 = 0x2
-};
+enum NeonListType { nlt_1 = 0x7, nlt_2 = 0xA, nlt_3 = 0x6, nlt_4 = 0x2 };
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
@@ -357,8 +338,7 @@ enum SoftwareInterruptCodes {
};
const uint32_t kStopCodeMask = kStopCode - 1;
const uint32_t kMaxStopCode = kStopCode - 1;
-const int32_t kDefaultStopCode = -1;
-
+const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding.
enum VFPRegPrecision {
@@ -368,10 +348,7 @@ enum VFPRegPrecision {
};
// VFP FPSCR constants.
-enum VFPConversionMode {
- kFPSCRRounding = 0,
- kDefaultRoundToZero = 1
-};
+enum VFPConversionMode { kFPSCRRounding = 0, kDefaultRoundToZero = 1 };
// This mask does not include the "inexact" or "input denormal" cumulative
// exceptions flags, because we usually don't want to check for it.
@@ -388,13 +365,12 @@ const uint32_t kVFPZConditionFlagBit = 1 << 30;
const uint32_t kVFPCConditionFlagBit = 1 << 29;
const uint32_t kVFPVConditionFlagBit = 1 << 28;
-
// VFP rounding modes. See ARM DDI 0406B Page A2-29.
enum VFPRoundingMode {
- RN = 0 << 22, // Round to Nearest.
- RP = 1 << 22, // Round towards Plus Infinity.
- RM = 2 << 22, // Round towards Minus Infinity.
- RZ = 3 << 22, // Round towards zero.
+ RN = 0 << 22, // Round to Nearest.
+ RP = 1 << 22, // Round towards Plus Infinity.
+ RM = 2 << 22, // Round towards Minus Infinity.
+ RZ = 3 << 22, // Round towards zero.
// Aliases.
kRoundToNearest = RN,
@@ -421,7 +397,6 @@ enum Hint { no_hint };
// Hints are not used on the arm. Negating is trivial.
inline Hint NegateHint(Hint ignored) { return no_hint; }
-
// -----------------------------------------------------------------------------
// Instruction abstraction.
@@ -470,9 +445,7 @@ class Instruction {
// Extract a single bit from the instruction bits and return it as bit 0 in
// the result.
- inline int Bit(int nr) const {
- return (InstructionBits() >> nr) & 1;
- }
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
// Extract a bit field <hi:lo> from the instruction bits and return it in the
// least-significant bits of the result.
@@ -489,9 +462,7 @@ class Instruction {
// Extract a single bit from the instruction bits and return it as bit 0 in
// the result.
- static inline int Bit(Instr instr, int nr) {
- return (instr >> nr) & 1;
- }
+ static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
// Extract a bit field <hi:lo> from the instruction bits and return it in the
// least-significant bits of the result.
@@ -516,7 +487,6 @@ class Instruction {
// e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
// 0xC0810002 ConditionField(instr) will return 0xC.
-
// Generally applicable fields
inline int ConditionValue() const { return Bits(31, 28); }
inline Condition ConditionField() const {
@@ -564,14 +534,12 @@ class Instruction {
}
// Fields used in Data processing instructions
- inline int OpcodeValue() const {
- return static_cast<Opcode>(Bits(24, 21));
- }
+ inline int OpcodeValue() const { return static_cast<Opcode>(Bits(24, 21)); }
inline Opcode OpcodeField() const {
return static_cast<Opcode>(BitField(24, 21));
}
inline int SValue() const { return Bit(20); }
- // with register
+ // with register
inline int RmValue() const { return Bits(3, 0); }
DECLARE_STATIC_ACCESSOR(RmValue)
inline int ShiftValue() const { return static_cast<ShiftOp>(Bits(6, 5)); }
@@ -581,28 +549,29 @@ class Instruction {
inline int RegShiftValue() const { return Bit(4); }
inline int RsValue() const { return Bits(11, 8); }
inline int ShiftAmountValue() const { return Bits(11, 7); }
- // with immediate
+ // with immediate
inline int RotateValue() const { return Bits(11, 8); }
DECLARE_STATIC_ACCESSOR(RotateValue)
inline int Immed8Value() const { return Bits(7, 0); }
DECLARE_STATIC_ACCESSOR(Immed8Value)
inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const {
- return Immed4Value() << 12 | Offset12Value(); }
+ return Immed4Value() << 12 | Offset12Value();
+ }
DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue)
// Fields used in Load/Store instructions
inline int PUValue() const { return Bits(24, 23); }
inline int PUField() const { return BitField(24, 23); }
- inline int BValue() const { return Bit(22); }
- inline int WValue() const { return Bit(21); }
- inline int LValue() const { return Bit(20); }
- // with register uses same fields as Data processing instructions above
- // with immediate
+ inline int BValue() const { return Bit(22); }
+ inline int WValue() const { return Bit(21); }
+ inline int LValue() const { return Bit(20); }
+ // with register uses same fields as Data processing instructions above
+ // with immediate
inline int Offset12Value() const { return Bits(11, 0); }
- // multiple
+ // multiple
inline int RlistValue() const { return Bits(15, 0); }
- // extra loads and stores
+ // extra loads and stores
inline int SignValue() const { return Bit(6); }
inline int HValue() const { return Bit(5); }
inline int ImmedHValue() const { return Bits(11, 8); }
@@ -640,10 +609,10 @@ class Instruction {
inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
// Test for miscellaneous instructions encodings of type 0 instructions.
- inline bool IsMiscType0() const { return (Bit(24) == 1)
- && (Bit(23) == 0)
- && (Bit(20) == 0)
- && ((Bit(7) == 0)); }
+ inline bool IsMiscType0() const {
+ return (Bit(24) == 1) && (Bit(23) == 0) && (Bit(20) == 0) &&
+ ((Bit(7) == 0));
+ }
// Test for nop-like instructions which fall under type 1.
inline bool IsNopLikeType1() const { return Bits(24, 8) == 0x120F0; }
@@ -654,13 +623,13 @@ class Instruction {
}
// Special accessors that test for existence of a value.
- inline bool HasS() const { return SValue() == 1; }
- inline bool HasB() const { return BValue() == 1; }
- inline bool HasW() const { return WValue() == 1; }
- inline bool HasL() const { return LValue() == 1; }
- inline bool HasU() const { return UValue() == 1; }
+ inline bool HasS() const { return SValue() == 1; }
+ inline bool HasB() const { return BValue() == 1; }
+ inline bool HasW() const { return WValue() == 1; }
+ inline bool HasL() const { return LValue() == 1; }
+ inline bool HasU() const { return UValue() == 1; }
inline bool HasSign() const { return SignValue() == 1; }
- inline bool HasH() const { return HValue() == 1; }
+ inline bool HasH() const { return HValue() == 1; }
inline bool HasLink() const { return LinkValue() == 1; }
// Decode the double immediate from a vmov instruction.
@@ -674,7 +643,6 @@ class Instruction {
return reinterpret_cast<Instruction*>(pc);
}
-
private:
// Join split register codes, depending on register precision.
// four_bit is the position of the least-significant bit of the four
@@ -698,7 +666,6 @@ class Instruction {
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
-
// Helper functions for converting between register numbers and names.
class Registers {
public:
@@ -739,4 +706,4 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 32;
} // namespace internal
} // namespace v8
-#endif // V8_ARM_CONSTANTS_ARM_H_
+#endif // V8_CODEGEN_ARM_CONSTANTS_ARM_H_
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/codegen/arm/cpu-arm.cc
index c0898bca86..868f360d5e 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/codegen/arm/cpu-arm.cc
@@ -6,7 +6,7 @@
#ifdef __arm__
#ifdef __QNXNTO__
#include <sys/mman.h> // for cache flushing.
-#undef MAP_TYPE
+#undef MAP_TYPE // NOLINT
#else
#include <sys/syscall.h> // for cache flushing.
#endif
@@ -14,15 +14,14 @@
#if V8_TARGET_ARCH_ARM
-#include "src/cpu-features.h"
+#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
-// The inlining of this seems to trigger an LTO bug that clobbers a register on
-// arm, see https://crbug.com/952759#c6.
-__attribute__((noinline)) void CpuFeatures::FlushICache(void* start,
- size_t size) {
+// The inlining of this seems to trigger an LTO bug that clobbers a register,
+// see https://crbug.com/952759 and https://bugs.llvm.org/show_bug.cgi?id=41575.
+V8_NOINLINE void CpuFeatures::FlushICache(void* start, size_t size) {
#if !defined(USE_SIMULATOR)
#if V8_OS_QNX
msync(start, size, MS_SYNC | MS_INVALIDATE_ICACHE);
@@ -44,21 +43,21 @@ __attribute__((noinline)) void CpuFeatures::FlushICache(void* start,
// Use a different variant of the asm with GCC because some versions doesn't
// support r7 as an asm input.
asm volatile(
- // This assembly works for both ARM and Thumb targets.
+ // This assembly works for both ARM and Thumb targets.
- // Preserve r7; it is callee-saved, and GCC uses it as a frame pointer for
- // Thumb targets.
- " push {r7}\n"
- // r0 = beg
- // r1 = end
- // r2 = flags (0)
- " ldr r7, =%c[scno]\n" // r7 = syscall number
- " svc 0\n"
+ // Preserve r7; it is callee-saved, and GCC uses it as a frame pointer for
+ // Thumb targets.
+ " push {r7}\n"
+ // r0 = beg
+ // r1 = end
+ // r2 = flags (0)
+ " ldr r7, =%c[scno]\n" // r7 = syscall number
+ " svc 0\n"
- " pop {r7}\n"
- :
- : "r" (beg), "r" (end), "r" (flg), [scno] "i" (__ARM_NR_cacheflush)
- : "memory");
+ " pop {r7}\n"
+ :
+ : "r"(beg), "r"(end), "r"(flg), [scno] "i"(__ARM_NR_cacheflush)
+ : "memory");
#endif
#endif
#endif // !USE_SIMULATOR
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
index 5acfe1ba0a..575fd27805 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc
@@ -4,9 +4,9 @@
#if V8_TARGET_ARCH_ARM
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
@@ -74,7 +74,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 56f323a32f..bcda320f8b 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -6,31 +6,31 @@
#if V8_TARGET_ARCH_ARM
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/bootstrapper.h"
-#include "src/callable.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
-#include "src/double.h"
-#include "src/external-reference-table.h"
-#include "src/frames-inl.h"
+#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/register-configuration.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/numbers/double.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
-#include "src/arm/macro-assembler-arm.h"
+#include "src/codegen/arm/macro-assembler-arm.h"
#endif
namespace v8 {
@@ -535,7 +535,6 @@ void MacroAssembler::Mls(Register dst, Register src1, Register src2,
}
}
-
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.IsRegister() && !src2.MustOutputRelocInfo(this) &&
@@ -547,13 +546,12 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
base::bits::IsPowerOfTwo(src2.immediate() + 1)) {
CpuFeatureScope scope(this, ARMv7);
ubfx(dst, src1, 0,
- WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
+ WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else {
and_(dst, src1, src2, LeaveCC, cond);
}
}
-
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
DCHECK_LT(lsb, 32);
@@ -569,7 +567,6 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
}
}
-
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
DCHECK_LT(lsb, 32);
@@ -590,7 +587,6 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
}
}
-
void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
Condition cond) {
DCHECK_LT(lsb, 32);
@@ -610,9 +606,8 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), cond);
}
-
void MacroAssembler::RecordWriteField(Register object, int offset,
- Register value, Register dst,
+ Register value,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
@@ -630,26 +625,21 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
- add(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- tst(dst, Operand(kPointerSize - 1));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ add(scratch, object, Operand(offset - kHeapObjectTag));
+ tst(scratch, Operand(kPointerSize - 1));
b(eq, &ok);
stop("Unaligned cell in write barrier");
bind(&ok);
}
- RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
+ save_fp, remembered_set_action, OMIT_SMI_CHECK);
bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
- mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
- }
}
void TurboAssembler::SaveRegisters(RegList registers) {
@@ -675,7 +665,7 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
ldm(ia_w, sp, regs);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode) {
EphemeronKeyBarrierDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
@@ -689,7 +679,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
Register fp_mode_parameter(
descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
- MovePair(object_parameter, object, slot_parameter, address);
+ MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
RelocInfo::CODE_TARGET);
@@ -697,26 +687,24 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
}
void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ Register object, Operand offset, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode) {
CallRecordWriteStub(
- object, address, remembered_set_action, fp_mode,
+ object, offset, remembered_set_action, fp_mode,
isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
kNullAddress);
}
void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Address wasm_target) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Register object, Operand offset, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target) {
+ CallRecordWriteStub(object, offset, remembered_set_action, fp_mode,
Handle<Code>::null(), wasm_target);
}
void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Handle<Code> code_target, Address wasm_target) {
+ Register object, Operand offset, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target, Address wasm_target) {
DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
@@ -737,7 +725,7 @@ void TurboAssembler::CallRecordWriteStub(
Register fp_mode_parameter(
descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
- MovePair(object_parameter, object, slot_parameter, address);
+ MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
@@ -750,20 +738,54 @@ void TurboAssembler::CallRecordWriteStub(
RestoreRegisters(registers);
}
-// Will clobber 3 registers: object, address, and value. The register 'object'
-// contains a heap object pointer. The heap object tag is shifted away.
-// A scratch register also needs to be available.
-void MacroAssembler::RecordWrite(Register object, Register address,
+void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset) {
+ DCHECK_NE(dst_object, dst_slot);
+ DCHECK(offset.IsRegister() || offset.IsImmediate());
+ // If `offset` is a register, it cannot overlap with `object`.
+ DCHECK_IMPLIES(offset.IsRegister(), offset.rm() != object);
+
+ // If the slot register does not overlap with the object register, we can
+ // overwrite it.
+ if (dst_slot != object) {
+ add(dst_slot, object, offset);
+ Move(dst_object, object);
+ return;
+ }
+
+ DCHECK_EQ(dst_slot, object);
+
+ // If the destination object register does not overlap with the offset
+ // register, we can overwrite it.
+ if (!offset.IsRegister() || (offset.rm() != dst_object)) {
+ Move(dst_object, dst_slot);
+ add(dst_slot, dst_slot, offset);
+ return;
+ }
+
+ DCHECK_EQ(dst_object, offset.rm());
+
+ // We only have `dst_slot` and `dst_object` left as distinct registers so we
+ // have to swap them. We write this as a add+sub sequence to avoid using a
+ // scratch register.
+ add(dst_slot, dst_slot, dst_object);
+ sub(dst_object, dst_slot, dst_object);
+}
+
+// The register 'object' contains a heap object pointer. The heap object tag is
+// shifted away. A scratch register also needs to be available.
+void MacroAssembler::RecordWrite(Register object, Operand offset,
Register value, LinkRegisterStatus lr_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- DCHECK(object != value);
+ DCHECK_NE(object, value);
if (emit_debug_code()) {
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- ldr(scratch, MemOperand(address));
+ add(scratch, object, offset);
+ ldr(scratch, MemOperand(scratch));
cmp(scratch, value);
}
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
@@ -782,32 +804,21 @@ void MacroAssembler::RecordWrite(Register object, Register address,
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
- CheckPageFlag(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- eq,
+ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &done);
+ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
&done);
// Record the actual write.
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+ CallRecordWriteStub(object, offset, remembered_set_action, fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
- mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
- }
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
@@ -828,32 +839,14 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) {
void TurboAssembler::PushStandardFrame(Register function_reg) {
DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
- stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
- fp.bit() | lr.bit());
+ stm(db_w, sp,
+ (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() | fp.bit() |
+ lr.bit());
int offset = -StandardFrameConstants::kContextOffset;
offset += function_reg.is_valid() ? kPointerSize : 0;
add(fp, sp, Operand(offset));
}
-
-// Push and pop all registers that can hold pointers.
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of contiguous register values starting with r0.
- DCHECK_EQ(kSafepointSavedRegisters, (1 << kNumSafepointSavedRegisters) - 1);
- // Safepoints expect a block of kNumSafepointRegisters values on the
- // stack, so adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK_GE(num_unsaved, 0);
- sub(sp, sp, Operand(num_unsaved * kPointerSize));
- stm(db_w, sp, kSafepointSavedRegisters);
-}
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ldm(ia_w, sp, kSafepointSavedRegisters);
- add(sp, sp, Operand(num_unsaved * kPointerSize));
-}
-
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
@@ -1307,6 +1300,44 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type) {
return frame_ends;
}
+#ifdef V8_OS_WIN
+void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
+ // "Functions that allocate 4 KB or more on the stack must ensure that each
+ // page prior to the final page is touched in order." Source:
+ // https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions?view=vs-2019#stack
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
+ Label check_offset;
+ Label touch_next_page;
+ jmp(&check_offset);
+ bind(&touch_next_page);
+ sub(sp, sp, Operand(kStackPageSize));
+ // Just to touch the page, before we increment further.
+ vldr(scratch, MemOperand(sp));
+ sub(bytes_scratch, bytes_scratch, Operand(kStackPageSize));
+
+ bind(&check_offset);
+ cmp(bytes_scratch, Operand(kStackPageSize));
+ b(gt, &touch_next_page);
+
+ sub(sp, sp, bytes_scratch);
+}
+
+void TurboAssembler::AllocateStackSpace(int bytes) {
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = no_dreg;
+ while (bytes > kStackPageSize) {
+ if (scratch == no_dreg) {
+ scratch = temps.AcquireD();
+ }
+ sub(sp, sp, Operand(kStackPageSize));
+ vldr(scratch, MemOperand(sp));
+ bytes -= kStackPageSize;
+ }
+ sub(sp, sp, Operand(bytes));
+}
+#endif
+
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
@@ -1347,7 +1378,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Reserve place for the return address and stack space and align the frame
// preparing for calling the runtime function.
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
- sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
+ AllocateStackSpace((stack_space + 1) * kPointerSize);
if (frame_alignment > 0) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
@@ -1366,7 +1397,7 @@ int TurboAssembler::ActivationFrameAlignment() {
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
return base::OS::ActivationFrameAlignment();
-#else // V8_HOST_ARCH_ARM
+#else // V8_HOST_ARCH_ARM
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
@@ -1426,7 +1457,6 @@ void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) {
}
}
-
// On ARM this is just a synonym to make the purpose clear.
void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) {
MovFromFloatResult(dst);
@@ -1710,7 +1740,6 @@ void MacroAssembler::PushStackHandler() {
str(sp, MemOperand(r6));
}
-
void MacroAssembler::PopStackHandler() {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -1722,11 +1751,8 @@ void MacroAssembler::PopStackHandler() {
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
}
-
-void MacroAssembler::CompareObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type) {
+void MacroAssembler::CompareObjectType(Register object, Register map,
+ Register type_reg, InstanceType type) {
UseScratchRegisterScope temps(this);
const Register temp = type_reg == no_reg ? temps.Acquire() : type_reg;
@@ -1734,9 +1760,7 @@ void MacroAssembler::CompareObjectType(Register object,
CompareInstanceType(map, temp, type);
}
-
-void MacroAssembler::CompareInstanceType(Register map,
- Register type_reg,
+void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
@@ -1764,16 +1788,6 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
b(ls, on_in_range);
}
-void MacroAssembler::TryDoubleToInt32Exact(Register result,
- DwVfpRegister double_input,
- LowDwVfpRegister double_scratch) {
- DCHECK(double_input != double_scratch);
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
- vcvt_f64_s32(double_scratch, double_scratch.low());
- VFPCompareAndSetFlags(double_input, double_scratch);
-}
-
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DwVfpRegister double_input,
Label* done) {
@@ -1809,7 +1823,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
// If we fell through then inline version didn't succeed - call stub instead.
push(lr);
- sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
+ AllocateStackSpace(kDoubleSize); // Put input on stack.
vstr(double_input, MemOperand(sp, 0));
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
@@ -1838,8 +1852,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
CallCodeObject(centry);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments,
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// All parameters are on the stack. r0 has the return value after call.
@@ -1908,7 +1921,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -1921,8 +1933,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
- if (emit_debug_code())
- Check(cond, reason);
+ if (emit_debug_code()) Check(cond, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
@@ -1987,7 +1998,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
ldr(dst, ContextMemOperand(dst, index));
}
-
void TurboAssembler::InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
mov(kRootRegister, Operand(isolate_root));
@@ -2001,13 +2011,6 @@ void MacroAssembler::SmiTag(Register dst, Register src, SBit s) {
add(dst, src, Operand(src), s);
}
-void MacroAssembler::UntagAndJumpIfSmi(
- Register dst, Register src, Label* smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- SmiUntag(dst, src, SetCC);
- b(cc, smi_case); // Shifter carry is not set for a smi.
-}
-
void MacroAssembler::SmiTst(Register value) {
tst(value, Operand(kSmiTagMask));
}
@@ -2032,15 +2035,6 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
b(ne, not_smi_label);
}
-void MacroAssembler::JumpIfEitherSmi(Register reg1,
- Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- tst(reg1, Operand(kSmiTagMask));
- tst(reg2, Operand(kSmiTagMask), ne);
- b(eq, on_either_smi);
-}
-
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2049,7 +2043,6 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2084,7 +2077,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
-
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2140,7 +2132,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
-
void TurboAssembler::CheckFor32DRegs(Register scratch) {
Move(scratch, ExternalReference::cpu_features());
ldr(scratch, MemOperand(scratch));
@@ -2330,20 +2321,20 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
int num_double_arguments,
Register scratch) {
int frame_alignment = ActivationFrameAlignment();
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
UseScratchRegisterScope temps(this);
if (!scratch.is_valid()) scratch = temps.Acquire();
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
mov(scratch, sp);
- sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ AllocateStackSpace((stack_passed_arguments + 1) * kPointerSize);
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
and_(sp, sp, Operand(-frame_alignment));
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else if (stack_passed_arguments > 0) {
- sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ AllocateStackSpace(stack_passed_arguments * kPointerSize);
}
}
@@ -2354,7 +2345,6 @@ void TurboAssembler::MovToFloatParameter(DwVfpRegister src) {
}
}
-
// On ARM this is just a synonym to make the purpose clear.
void TurboAssembler::MovToFloatResult(DwVfpRegister src) {
MovToFloatParameter(src);
@@ -2449,8 +2439,8 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Pop(scratch1);
}
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (ActivationFrameAlignment() > kPointerSize) {
ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
@@ -2458,8 +2448,10 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
-void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
- Condition cc, Label* condition_met) {
+void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc,
+ Label* condition_met) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
@@ -2467,11 +2459,8 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
b(cc, condition_met);
}
-Register GetRegisterThatIsNotOneOf(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5,
Register reg6) {
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index ce05b98f88..4f497dcea4 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -6,13 +6,13 @@
#error This header must be included via macro-assembler.h
#endif
-#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
-#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
+#ifndef V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
+#define V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
-#include "src/arm/assembler-arm.h"
-#include "src/bailout-reason.h"
-#include "src/contexts.h"
-#include "src/globals.h"
+#include "src/codegen/arm/assembler-arm.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/common/globals.h"
+#include "src/objects/contexts.h"
namespace v8 {
namespace internal {
@@ -29,9 +29,7 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-
-Register GetRegisterThatIsNotOneOf(Register reg1,
- Register reg2 = no_reg,
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
@@ -52,6 +50,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type);
+// Allocate stack space of given size (i.e. decrement {sp} by the value
+// stored in the given register, or by a constant). If you need to perform a
+// stack check, do it before calling this function because this function may
+// write into the newly allocated space. It may also overwrite the given
+// register's value, in the version that takes a register.
+#ifdef V8_OS_WIN
+ void AllocateStackSpace(Register bytes_scratch);
+ void AllocateStackSpace(int bytes);
+#else
+ void AllocateStackSpace(Register bytes) { sub(sp, sp, bytes); }
+ void AllocateStackSpace(int bytes) { sub(sp, sp, Operand(bytes)); }
+#endif
+
// Push a fixed frame, consisting of lr, fp
void PushCommonFrame(Register marker_reg = no_reg);
@@ -337,7 +348,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void VmovLow(Register dst, DwVfpRegister src);
void VmovLow(DwVfpRegister dst, Register src);
- void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+ void CheckPageFlag(Register object, int mask, Condition cc,
Label* condition_met);
// Check whether d16-d31 are available on the CPU. The result is given by the
@@ -347,15 +358,25 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Register address,
+ void CallRecordWriteStub(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Register address,
+ void CallRecordWriteStub(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode, Address wasm_target);
- void CallEphemeronKeyBarrier(Register object, Register address,
+ void CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode);
+ // For a given |object| and |offset|:
+ // - Move |object| to |dst_object|.
+ // - Compute the address of the slot pointed to by |offset| in |object| and
+ // write it to |dst_slot|. |offset| can be either an immediate or a
+ // register.
+ // This method makes sure |object| and |offset| are allowed to overlap with
+ // the destination registers.
+ void MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset);
+
// Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
// values to location, saving [d0..(d15|d31)].
void SaveFPRegs(Register location, Register scratch);
@@ -541,7 +562,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallCFunctionHelper(Register function, int num_reg_arguments,
int num_double_arguments);
- void CallRecordWriteStub(Register object, Register address,
+ void CallRecordWriteStub(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode, Handle<Code> code_target,
Address wasm_target);
@@ -566,29 +587,23 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
+ // stored.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
- Register object, int offset, Register value, Register scratch,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ Register object, int offset, Register value, LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
+ // For a given |object| notify the garbage collector that the slot at |offset|
+ // has been written. |value| is the object being stored.
void RecordWrite(
- Register object, Register address, Register value,
+ Register object, Operand offset, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
-
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
void EnterExitFrame(bool save_doubles, int stack_space = 0,
@@ -649,17 +664,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// register unless the heap_object register is the same register as one of the
// other registers.
// Type_reg can be no_reg. In that case a scratch register is used.
- void CompareObjectType(Register heap_object,
- Register map,
- Register type_reg,
+ void CompareObjectType(Register heap_object, Register map, Register type_reg,
InstanceType type);
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
- void CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type);
+ void CompareInstanceType(Register map, Register type_reg, InstanceType type);
// Compare the object in a register to a value from the root list.
// Acquires a scratch register.
@@ -688,18 +699,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
- // Try to convert a double to a signed 32-bit integer.
- // Z flag set to one and result assigned if the conversion is exact.
- void TryDoubleToInt32Exact(Register result,
- DwVfpRegister double_input,
- LowDwVfpRegister double_scratch);
-
// ---------------------------------------------------------------------------
// Runtime calls
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f,
- int num_arguments,
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Convenience function: Same as above, but takes the fid instead.
@@ -732,10 +736,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// StatsCounter support
- void IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
// ---------------------------------------------------------------------------
// Smi utilities
@@ -743,16 +747,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void SmiTag(Register reg, SBit s = LeaveCC);
void SmiTag(Register dst, Register src, SBit s = LeaveCC);
- // Untag the source value into destination and jump if source is a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
// Test if the register contains a smi (Z == 0 (eq) if true).
void SmiTst(Register value);
// Jump if either of the registers contain a non-smi.
void JumpIfNotSmi(Register value, Label* not_smi_label);
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
@@ -776,12 +774,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- template<typename Field>
+ template <typename Field>
void DecodeField(Register dst, Register src) {
Ubfx(dst, src, Field::kShift, Field::kSize);
}
- template<typename Field>
+ template <typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
@@ -809,7 +807,6 @@ inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
-
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@@ -819,4 +816,4 @@ inline MemOperand NativeContextMemOperand() {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
+#endif // V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h
index 3584a6b19f..f3639a8f27 100644
--- a/deps/v8/src/arm/register-arm.h
+++ b/deps/v8/src/codegen/arm/register-arm.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_REGISTER_ARM_H_
-#define V8_ARM_REGISTER_ARM_H_
+#ifndef V8_CODEGEN_ARM_REGISTER_ARM_H_
+#define V8_CODEGEN_ARM_REGISTER_ARM_H_
-#include "src/register.h"
-#include "src/reglist.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
namespace v8 {
namespace internal {
@@ -151,7 +151,7 @@ enum SwVfpRegisterCode {
// This way, we make sure no registers in the list ever overlap. However, a list
// may represent multiple different sets of registers,
// e.g. [d0 s2 s3] <=> [s0 s1 d1].
-typedef uint64_t VfpRegList;
+using VfpRegList = uint64_t;
// Single word VFP register.
class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
@@ -179,7 +179,7 @@ ASSERT_TRIVIALLY_COPYABLE(SwVfpRegister);
static_assert(sizeof(SwVfpRegister) == sizeof(int),
"SwVfpRegister can efficiently be passed by value");
-typedef SwVfpRegister FloatRegister;
+using FloatRegister = SwVfpRegister;
enum DoubleRegisterCode {
#define REGISTER_CODE(R) kDoubleCode_##R,
@@ -217,7 +217,7 @@ ASSERT_TRIVIALLY_COPYABLE(DwVfpRegister);
static_assert(sizeof(DwVfpRegister) == sizeof(int),
"DwVfpRegister can efficiently be passed by value");
-typedef DwVfpRegister DoubleRegister;
+using DoubleRegister = DwVfpRegister;
// Double word VFP register d0-15.
class LowDwVfpRegister
@@ -272,9 +272,9 @@ class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
explicit constexpr QwNeonRegister(int code) : RegisterBase(code) {}
};
-typedef QwNeonRegister QuadRegister;
+using QuadRegister = QwNeonRegister;
-typedef QwNeonRegister Simd128Register;
+using Simd128Register = QwNeonRegister;
enum CRegisterCode {
#define REGISTER_CODE(R) kCCode_##R,
@@ -366,4 +366,4 @@ constexpr Register kRootRegister = r10; // Roots array pointer.
} // namespace internal
} // namespace v8
-#endif // V8_ARM_REGISTER_ARM_H_
+#endif // V8_CODEGEN_ARM_REGISTER_ARM_H_
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
index fc8e31aac3..5680d8b054 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_ASSEMBLER_ARM64_INL_H_
-#define V8_ARM64_ASSEMBLER_ARM64_INL_H_
+#ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
+#define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
-#include "src/arm64/assembler-arm64.h"
-#include "src/assembler.h"
+#include "src/codegen/arm64/assembler-arm64.h"
+#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -35,24 +35,20 @@ void RelocInfo::apply(intptr_t delta) {
}
}
-
inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
return (reg_size_ == other.reg_size_) && (reg_type_ == other.reg_type_);
}
-
inline bool CPURegister::IsZero() const {
DCHECK(IsValid());
return IsRegister() && (reg_code_ == kZeroRegCode);
}
-
inline bool CPURegister::IsSP() const {
DCHECK(IsValid());
return IsRegister() && (reg_code_ == kSPRegInternalCode);
}
-
inline void CPURegList::Combine(const CPURegList& other) {
DCHECK(IsValid());
DCHECK(other.type() == type_);
@@ -60,7 +56,6 @@ inline void CPURegList::Combine(const CPURegList& other) {
list_ |= other.list();
}
-
inline void CPURegList::Remove(const CPURegList& other) {
DCHECK(IsValid());
if (other.type() == type_) {
@@ -68,14 +63,12 @@ inline void CPURegList::Remove(const CPURegList& other) {
}
}
-
inline void CPURegList::Combine(const CPURegister& other) {
DCHECK(other.type() == type_);
DCHECK(other.SizeInBits() == size_);
Combine(other.code());
}
-
inline void CPURegList::Remove(const CPURegister& other1,
const CPURegister& other2,
const CPURegister& other3,
@@ -86,21 +79,18 @@ inline void CPURegList::Remove(const CPURegister& other1,
if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
}
-
inline void CPURegList::Combine(int code) {
DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ |= (1ULL << code);
}
-
inline void CPURegList::Remove(int code) {
DCHECK(IsValid());
DCHECK(CPURegister::Create(code, size_, type_).IsValid());
list_ &= ~(1ULL << code);
}
-
inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return sp;
@@ -110,7 +100,6 @@ inline Register Register::XRegFromCode(unsigned code) {
}
}
-
inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wsp;
@@ -200,10 +189,9 @@ inline VRegister CPURegister::Q() const {
return VRegister::QRegFromCode(reg_code_);
}
-
// Immediate.
// Default initializer is for int types
-template<typename T>
+template <typename T>
struct ImmediateInitializer {
static const bool kIsIntType = true;
static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
@@ -222,51 +210,43 @@ struct ImmediateInitializer<Smi> {
}
};
-
-template<>
+template <>
struct ImmediateInitializer<ExternalReference> {
static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(ExternalReference t) {
return RelocInfo::EXTERNAL_REFERENCE;
}
- static inline int64_t immediate_for(ExternalReference t) {;
+ static inline int64_t immediate_for(ExternalReference t) {
return static_cast<int64_t>(t.address());
}
};
-
-template<typename T>
+template <typename T>
Immediate::Immediate(Handle<T> value) {
InitializeHandle(value);
}
-
-template<typename T>
+template <typename T>
Immediate::Immediate(T t)
: value_(ImmediateInitializer<T>::immediate_for(t)),
rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
-
-template<typename T>
+template <typename T>
Immediate::Immediate(T t, RelocInfo::Mode rmode)
- : value_(ImmediateInitializer<T>::immediate_for(t)),
- rmode_(rmode) {
+ : value_(ImmediateInitializer<T>::immediate_for(t)), rmode_(rmode) {
STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
}
// Operand.
-template<typename T>
+template <typename T>
Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
-
-template<typename T>
+template <typename T>
Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
-
-template<typename T>
+template <typename T>
Operand::Operand(T t, RelocInfo::Mode rmode)
- : immediate_(t, rmode),
- reg_(NoReg) {}
+ : immediate_(t, rmode), reg_(NoReg) {}
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
: immediate_(0),
@@ -279,7 +259,6 @@ Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
DCHECK_IMPLIES(reg.IsSP(), shift_amount == 0);
}
-
Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
: immediate_(0),
reg_(reg),
@@ -297,7 +276,7 @@ Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
bool Operand::IsHeapObjectRequest() const {
DCHECK_IMPLIES(heap_object_request_.has_value(), reg_.Is(NoReg));
DCHECK_IMPLIES(heap_object_request_.has_value(),
- immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT ||
+ immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT ||
immediate_.rmode() == RelocInfo::CODE_TARGET);
return heap_object_request_.has_value();
}
@@ -311,17 +290,14 @@ bool Operand::IsImmediate() const {
return reg_.Is(NoReg) && !IsHeapObjectRequest();
}
-
bool Operand::IsShiftedRegister() const {
return reg_.IsValid() && (shift_ != NO_SHIFT);
}
-
bool Operand::IsExtendedRegister() const {
return reg_.IsValid() && (extend_ != NO_EXTEND);
}
-
bool Operand::IsZero() const {
if (IsImmediate()) {
return ImmediateValue() == 0;
@@ -330,7 +306,6 @@ bool Operand::IsZero() const {
}
}
-
Operand Operand::ToExtendedRegister() const {
DCHECK(IsShiftedRegister());
DCHECK((shift_ == LSL) && (shift_amount_ <= 4));
@@ -339,9 +314,9 @@ Operand Operand::ToExtendedRegister() const {
Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
- immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT) ||
+ immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT) ||
(heap_object_request().kind() == HeapObjectRequest::kStringConstant &&
- immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT));
+ immediate_.rmode() == RelocInfo::FULL_EMBEDDED_OBJECT));
return immediate_;
}
@@ -350,7 +325,6 @@ Immediate Operand::immediate() const {
return immediate_;
}
-
int64_t Operand::ImmediateValue() const {
DCHECK(IsImmediate());
return immediate_.value();
@@ -366,64 +340,50 @@ Register Operand::reg() const {
return reg_;
}
-
Shift Operand::shift() const {
DCHECK(IsShiftedRegister());
return shift_;
}
-
Extend Operand::extend() const {
DCHECK(IsExtendedRegister());
return extend_;
}
-
unsigned Operand::shift_amount() const {
DCHECK(IsShiftedRegister() || IsExtendedRegister());
return shift_amount_;
}
-
-Operand Operand::UntagSmi(Register smi) {
- DCHECK(smi.Is64Bits());
- DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- return Operand(smi, ASR, kSmiShift);
-}
-
-
-Operand Operand::UntagSmiAndScale(Register smi, int scale) {
- DCHECK(smi.Is64Bits());
- DCHECK((scale >= 0) && (scale <= (64 - kSmiValueSize)));
- DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
- if (scale > kSmiShift) {
- return Operand(smi, LSL, scale - kSmiShift);
- } else if (scale < kSmiShift) {
- return Operand(smi, ASR, kSmiShift - scale);
- }
- return Operand(smi);
-}
-
-
MemOperand::MemOperand()
- : base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(Offset),
- shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
-}
-
+ : base_(NoReg),
+ regoffset_(NoReg),
+ offset_(0),
+ addrmode_(Offset),
+ shift_(NO_SHIFT),
+ extend_(NO_EXTEND),
+ shift_amount_(0) {}
MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
- : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
- shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
+ : base_(base),
+ regoffset_(NoReg),
+ offset_(offset),
+ addrmode_(addrmode),
+ shift_(NO_SHIFT),
+ extend_(NO_EXTEND),
+ shift_amount_(0) {
DCHECK(base.Is64Bits() && !base.IsZero());
}
-
-MemOperand::MemOperand(Register base,
- Register regoffset,
- Extend extend,
+MemOperand::MemOperand(Register base, Register regoffset, Extend extend,
unsigned shift_amount)
- : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
- shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
+ : base_(base),
+ regoffset_(regoffset),
+ offset_(0),
+ addrmode_(Offset),
+ shift_(NO_SHIFT),
+ extend_(extend),
+ shift_amount_(shift_amount) {
DCHECK(base.Is64Bits() && !base.IsZero());
DCHECK(!regoffset.IsSP());
DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
@@ -432,13 +392,15 @@ MemOperand::MemOperand(Register base,
DCHECK(regoffset.Is64Bits() || (extend != SXTX));
}
-
-MemOperand::MemOperand(Register base,
- Register regoffset,
- Shift shift,
+MemOperand::MemOperand(Register base, Register regoffset, Shift shift,
unsigned shift_amount)
- : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
- shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
+ : base_(base),
+ regoffset_(regoffset),
+ offset_(0),
+ addrmode_(Offset),
+ shift_(shift),
+ extend_(NO_EXTEND),
+ shift_amount_(shift_amount) {
DCHECK(base.Is64Bits() && !base.IsZero());
DCHECK(regoffset.Is64Bits() && !regoffset.IsSP());
DCHECK(shift == LSL);
@@ -485,20 +447,13 @@ bool MemOperand::IsImmediateOffset() const {
return (addrmode_ == Offset) && regoffset_.Is(NoReg);
}
-
bool MemOperand::IsRegisterOffset() const {
return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
}
+bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
-bool MemOperand::IsPreIndex() const {
- return addrmode_ == PreIndex;
-}
-
-
-bool MemOperand::IsPostIndex() const {
- return addrmode_ == PostIndex;
-}
+bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
Operand MemOperand::OffsetAsOperand() const {
if (IsImmediateOffset()) {
@@ -513,7 +468,6 @@ Operand MemOperand::OffsetAsOperand() const {
}
}
-
void Assembler::Unreachable() {
#ifdef USE_SIMULATOR
debug("UNREACHABLE", __LINE__, BREAK);
@@ -523,14 +477,12 @@ void Assembler::Unreachable() {
#endif
}
-
Address Assembler::target_pointer_address_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
DCHECK(instr->IsLdrLiteralX());
return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
}
-
// Read/Modify the code target address in the branch/call instruction at pc.
Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
@@ -554,6 +506,12 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
}
}
+Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(Address pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(pc);
+ CHECK(!instr->IsLdrLiteralX());
+ return GetCompressedEmbeddedObject(ReadUnalignedValue<int32_t>(pc));
+}
+
Address Assembler::runtime_entry_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
if (instr->IsLdrLiteralX()) {
@@ -564,19 +522,6 @@ Address Assembler::runtime_entry_at(Address pc) {
}
}
-Address Assembler::target_address_from_return_address(Address pc) {
- // Returns the address of the call target from the return address that will
- // be returned to after a call.
- // Call sequence on ARM64 is:
- // ldr ip0, #... @ load from literal pool
- // blr ip0
- Address candidate = pc - 2 * kInstrSize;
- Instruction* instr = reinterpret_cast<Instruction*>(candidate);
- USE(instr);
- DCHECK(instr->IsLdrLiteralX());
- return candidate;
-}
-
int Assembler::deserialization_special_target_size(Address location) {
Instruction* instr = reinterpret_cast<Instruction*>(location);
if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) {
@@ -649,7 +594,6 @@ int RelocInfo::target_address_size() {
}
}
-
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@@ -679,20 +623,23 @@ Address RelocInfo::target_address_address() {
}
}
-
Address RelocInfo::constant_pool_entry_address() {
DCHECK(IsInConstantPool());
return Assembler::target_pointer_address_at(pc_);
}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- if (rmode_ == EMBEDDED_OBJECT) {
+ if (IsFullEmbeddedObject(rmode_)) {
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
} else {
@@ -704,8 +651,8 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
@@ -729,7 +676,6 @@ Address RelocInfo::target_internal_reference() {
return Memory<Address>(pc_);
}
-
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return pc_;
@@ -755,7 +701,7 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
@@ -787,7 +733,6 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
}
}
-
LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
DCHECK(rt.IsValid());
if (rt.IsRegister()) {
@@ -837,7 +782,6 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
}
}
-
LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
@@ -847,7 +791,6 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
}
}
-
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
DCHECK_EQ(kStartOfLabelLinkChain, 0);
int offset = LinkAndGetByteOffsetTo(label);
@@ -855,7 +798,6 @@ int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
return offset >> kInstrSizeLog2;
}
-
Instr Assembler::Flags(FlagsUpdate S) {
if (S == SetFlags) {
return 1 << FlagsUpdate_offset;
@@ -865,11 +807,7 @@ Instr Assembler::Flags(FlagsUpdate S) {
UNREACHABLE();
}
-
-Instr Assembler::Cond(Condition cond) {
- return cond << Condition_offset;
-}
-
+Instr Assembler::Cond(Condition cond) { return cond << Condition_offset; }
Instr Assembler::ImmPCRelAddress(int imm21) {
CHECK(is_int21(imm21));
@@ -879,31 +817,26 @@ Instr Assembler::ImmPCRelAddress(int imm21) {
return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
}
-
Instr Assembler::ImmUncondBranch(int imm26) {
CHECK(is_int26(imm26));
return truncate_to_int26(imm26) << ImmUncondBranch_offset;
}
-
Instr Assembler::ImmCondBranch(int imm19) {
CHECK(is_int19(imm19));
return truncate_to_int19(imm19) << ImmCondBranch_offset;
}
-
Instr Assembler::ImmCmpBranch(int imm19) {
CHECK(is_int19(imm19));
return truncate_to_int19(imm19) << ImmCmpBranch_offset;
}
-
Instr Assembler::ImmTestBranch(int imm14) {
CHECK(is_int14(imm14));
return truncate_to_int14(imm14) << ImmTestBranch_offset;
}
-
Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
DCHECK(is_uint6(bit_pos));
// Subtract five from the shift offset, as we need bit 5 from bit_pos.
@@ -914,12 +847,10 @@ Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
return b5 | b40;
}
-
Instr Assembler::SF(Register rd) {
- return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
}
-
Instr Assembler::ImmAddSub(int imm) {
DCHECK(IsImmAddSub(imm));
if (is_uint12(imm)) { // No shift required.
@@ -930,7 +861,6 @@ Instr Assembler::ImmAddSub(int imm) {
return imm;
}
-
Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
((reg_size == kWRegSizeInBits) && is_uint5(imms)));
@@ -938,7 +868,6 @@ Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
return imms << ImmS_offset;
}
-
Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
((reg_size == kWRegSizeInBits) && is_uint5(immr)));
@@ -947,7 +876,6 @@ Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
return immr << ImmR_offset;
}
-
Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
DCHECK(is_uint6(imms));
@@ -956,7 +884,6 @@ Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
return imms << ImmSetBits_offset;
}
-
Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
@@ -965,13 +892,11 @@ Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
return immr << ImmRotate_offset;
}
-
Instr Assembler::ImmLLiteral(int imm19) {
CHECK(is_int19(imm19));
return truncate_to_int19(imm19) << ImmLLiteral_offset;
}
-
Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0));
@@ -979,47 +904,39 @@ Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
return bitn << BitN_offset;
}
-
Instr Assembler::ShiftDP(Shift shift) {
DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
return shift << ShiftDP_offset;
}
-
Instr Assembler::ImmDPShift(unsigned amount) {
DCHECK(is_uint6(amount));
return amount << ImmDPShift_offset;
}
-
Instr Assembler::ExtendMode(Extend extend) {
return extend << ExtendMode_offset;
}
-
Instr Assembler::ImmExtendShift(unsigned left_shift) {
DCHECK_LE(left_shift, 4);
return left_shift << ImmExtendShift_offset;
}
-
Instr Assembler::ImmCondCmp(unsigned imm) {
DCHECK(is_uint5(imm));
return imm << ImmCondCmp_offset;
}
-
Instr Assembler::Nzcv(StatusFlags nzcv) {
return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
}
-
Instr Assembler::ImmLSUnsigned(int imm12) {
DCHECK(is_uint12(imm12));
return imm12 << ImmLSUnsigned_offset;
}
-
Instr Assembler::ImmLS(int imm9) {
DCHECK(is_int9(imm9));
return truncate_to_int9(imm9) << ImmLS_offset;
@@ -1032,37 +949,31 @@ Instr Assembler::ImmLSPair(int imm7, unsigned size) {
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
}
-
Instr Assembler::ImmShiftLS(unsigned shift_amount) {
DCHECK(is_uint1(shift_amount));
return shift_amount << ImmShiftLS_offset;
}
-
Instr Assembler::ImmException(int imm16) {
DCHECK(is_uint16(imm16));
return imm16 << ImmException_offset;
}
-
Instr Assembler::ImmSystemRegister(int imm15) {
DCHECK(is_uint15(imm15));
return imm15 << ImmSystemRegister_offset;
}
-
Instr Assembler::ImmHint(int imm7) {
DCHECK(is_uint7(imm7));
return imm7 << ImmHint_offset;
}
-
Instr Assembler::ImmBarrierDomain(int imm2) {
DCHECK(is_uint2(imm2));
return imm2 << ImmBarrierDomain_offset;
}
-
Instr Assembler::ImmBarrierType(int imm2) {
DCHECK(is_uint2(imm2));
return imm2 << ImmBarrierType_offset;
@@ -1081,13 +992,11 @@ unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
return size;
}
-
Instr Assembler::ImmMoveWide(int imm) {
DCHECK(is_uint16(imm));
return imm << ImmMoveWide_offset;
}
-
Instr Assembler::ShiftMoveWide(int shift) {
DCHECK(is_uint2(shift));
return shift << ShiftMoveWide_offset;
@@ -1100,12 +1009,10 @@ Instr Assembler::FPScale(unsigned scale) {
return scale << FPScale_offset;
}
-
const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
return reg.Is64Bits() ? xzr : wzr;
}
-
inline void Assembler::CheckBufferSpace() {
DCHECK_LT(pc_, buffer_start_ + buffer_->size());
if (buffer_space() < kGap) {
@@ -1113,7 +1020,6 @@ inline void Assembler::CheckBufferSpace() {
}
}
-
inline void Assembler::CheckBuffer() {
CheckBufferSpace();
if (pc_offset() >= next_veneer_pool_check_) {
@@ -1127,4 +1033,4 @@ inline void Assembler::CheckBuffer() {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_ASSEMBLER_ARM64_INL_H_
+#endif // V8_CODEGEN_ARM64_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc
index 2763a647c7..1806f82b46 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc
@@ -28,14 +28,14 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64.h"
+#include "src/codegen/arm64/assembler-arm64.h"
-#include "src/arm64/assembler-arm64-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/frame-constants.h"
-#include "src/register-configuration.h"
-#include "src/string-constants.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/string-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -55,7 +55,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// the feature any more.
}
-void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintTarget() {}
void CpuFeatures::PrintFeatures() {}
// -----------------------------------------------------------------------------
@@ -72,7 +72,6 @@ CPURegister CPURegList::PopLowestIndex() {
return CPURegister::Create(index, size_, type_);
}
-
CPURegister CPURegList::PopHighestIndex() {
DCHECK(IsValid());
if (IsEmpty()) {
@@ -85,7 +84,6 @@ CPURegister CPURegList::PopHighestIndex() {
return CPURegister::Create(index, size_, type_);
}
-
void CPURegList::RemoveCalleeSaved() {
if (type() == CPURegister::kRegister) {
Remove(GetCalleeSaved(RegisterSizeInBits()));
@@ -98,6 +96,18 @@ void CPURegList::RemoveCalleeSaved() {
}
}
+void CPURegList::Align() {
+ // Use padreg, if necessary, to maintain stack alignment.
+ if (Count() % 2 != 0) {
+ if (IncludesAliasOf(padreg)) {
+ Remove(padreg);
+ } else {
+ Combine(padreg);
+ }
+ }
+
+ DCHECK_EQ(Count() % 2, 0);
+}
CPURegList CPURegList::GetCalleeSaved(int size) {
return CPURegList(CPURegister::kRegister, size, 19, 29);
@@ -107,7 +117,6 @@ CPURegList CPURegList::GetCalleeSavedV(int size) {
return CPURegList(CPURegister::kVRegister, size, 8, 15);
}
-
CPURegList CPURegList::GetCallerSaved(int size) {
// x18 is the platform register and is reserved for the use of platform ABIs.
// Registers x0-x17 and lr (x30) are caller-saved.
@@ -123,7 +132,6 @@ CPURegList CPURegList::GetCallerSavedV(int size) {
return list;
}
-
// This function defines the list of registers which are associated with a
// safepoint slot. Safepoint register slots are saved contiguously on the stack.
// MacroAssembler::SafepointRegisterStackIndex handles mapping from register
@@ -152,7 +160,6 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
return list;
}
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -173,7 +180,6 @@ bool RelocInfo::IsCodedSpecially() {
}
}
-
bool RelocInfo::IsInConstantPool() {
Instruction* instr = reinterpret_cast<Instruction*>(pc_);
return instr->IsLdrLiteralX();
@@ -216,9 +222,9 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
}
int number_of_unique_regs =
- CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
+ CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
int number_of_unique_fpregs =
- CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
+ CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
DCHECK(number_of_valid_regs >= number_of_unique_regs);
DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
@@ -227,7 +233,6 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
(number_of_valid_fpregs != number_of_unique_fpregs);
}
-
bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
const CPURegister& reg3, const CPURegister& reg4,
const CPURegister& reg5, const CPURegister& reg6,
@@ -280,10 +285,9 @@ bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
void Immediate::InitializeHandle(Handle<HeapObject> handle) {
value_ = static_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
}
-
bool Operand::NeedsRelocation(const Assembler* assembler) const {
RelocInfo::Mode rmode = immediate_.rmode();
@@ -339,13 +343,11 @@ bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
return write_reloc_info;
}
-
int ConstPool::DistanceToFirstUse() {
DCHECK_GE(first_use_, 0);
return assm_->pc_offset() - first_use_;
}
-
int ConstPool::MaxPcOffset() {
// There are no pending entries in the pool so we can never get out of
// range.
@@ -356,7 +358,6 @@ int ConstPool::MaxPcOffset() {
return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
}
-
int ConstPool::WorstCaseSize() {
if (IsEmpty()) return 0;
@@ -369,7 +370,6 @@ int ConstPool::WorstCaseSize() {
return 4 * kInstrSize + EntryCount() * kSystemPointerSize;
}
-
int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
if (IsEmpty()) return 0;
@@ -387,7 +387,6 @@ int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
return prologue_size + EntryCount() * kSystemPointerSize;
}
-
void ConstPool::Emit(bool require_jump) {
DCHECK(!assm_->is_const_pool_blocked());
// Prevent recursive pool emission and protect from veneer pools.
@@ -442,7 +441,6 @@ void ConstPool::Emit(bool require_jump) {
static_cast<unsigned>(size));
}
-
void ConstPool::Clear() {
shared_entries_.clear();
handle_to_index_map_.clear();
@@ -450,30 +448,25 @@ void ConstPool::Clear() {
first_use_ = -1;
}
-
void ConstPool::EmitMarker() {
// A constant pool size is expressed in number of 32-bits words.
// Currently all entries are 64-bit.
// + 1 is for the crash guard.
// + 0/1 for alignment.
- int word_count = EntryCount() * 2 + 1 +
- (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
- assm_->Emit(LDR_x_lit |
- Assembler::ImmLLiteral(word_count) |
+ int word_count =
+ EntryCount() * 2 + 1 + (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
+ assm_->Emit(LDR_x_lit | Assembler::ImmLLiteral(word_count) |
Assembler::Rt(xzr));
}
-
MemOperand::PairResult MemOperand::AreConsistentForPair(
- const MemOperand& operandA,
- const MemOperand& operandB,
+ const MemOperand& operandA, const MemOperand& operandB,
int access_size_log2) {
DCHECK_GE(access_size_log2, 0);
DCHECK_LE(access_size_log2, 3);
// Step one: check that they share the same base, that the mode is Offset
// and that the offset is a multiple of access size.
- if (!operandA.base().Is(operandB.base()) ||
- (operandA.addrmode() != Offset) ||
+ if (!operandA.base().Is(operandB.base()) || (operandA.addrmode() != Offset) ||
(operandB.addrmode() != Offset) ||
((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
return kNotPair;
@@ -491,7 +484,6 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
return kNotPair;
}
-
void ConstPool::EmitGuard() {
#ifdef DEBUG
Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
@@ -501,7 +493,6 @@ void ConstPool::EmitGuard() {
assm_->EmitPoolGuard();
}
-
void ConstPool::EmitEntries() {
DCHECK(IsAligned(assm_->pc_offset(), 8));
@@ -520,7 +511,6 @@ void ConstPool::EmitEntries() {
Clear();
}
-
// Assembler
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
@@ -623,7 +613,7 @@ void Assembler::CodeTargetAlign() {
Align(8);
}
-void Assembler::CheckLabelLinkChain(Label const * label) {
+void Assembler::CheckLabelLinkChain(Label const* label) {
#ifdef DEBUG
if (label->is_linked()) {
static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
@@ -632,7 +622,7 @@ void Assembler::CheckLabelLinkChain(Label const * label) {
bool end_of_chain = false;
while (!end_of_chain) {
if (++links_checked > kMaxLinksToCheck) break;
- Instruction * link = InstructionAt(linkoffset);
+ Instruction* link = InstructionAt(linkoffset);
int64_t linkpcoffset = link->ImmPCOffset();
int64_t prevlinkoffset = linkoffset + linkpcoffset;
@@ -643,7 +633,6 @@ void Assembler::CheckLabelLinkChain(Label const * label) {
#endif
}
-
void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
Label* label,
Instruction* label_veneer) {
@@ -724,7 +713,6 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
CheckLabelLinkChain(label);
}
-
void Assembler::bind(Label* label) {
// Bind label to the address at pc_. All instructions (most likely branches)
// that are linked to this label will be updated to point to the newly-bound
@@ -787,7 +775,6 @@ void Assembler::bind(Label* label) {
DCHECK(!label->is_linked());
}
-
int Assembler::LinkAndGetByteOffsetTo(Label* label) {
DCHECK_EQ(sizeof(*pc_), 1);
CheckLabelLinkChain(label);
@@ -830,7 +817,6 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
return offset;
}
-
void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
DCHECK(label->is_linked());
CheckLabelLinkChain(label);
@@ -840,7 +826,7 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
bool end_of_chain = false;
while (!end_of_chain) {
- Instruction * link = InstructionAt(link_offset);
+ Instruction* link = InstructionAt(link_offset);
link_pcoffset = static_cast<int>(link->ImmPCOffset());
// ADR instructions are not handled by veneers.
@@ -848,7 +834,7 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
int max_reachable_pc =
static_cast<int>(InstructionOffset(link) +
Instruction::ImmBranchRange(link->BranchType()));
- typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
+ using unresolved_info_it = std::multimap<int, FarBranchInfo>::iterator;
std::pair<unresolved_info_it, unresolved_info_it> range;
range = unresolved_branches_.equal_range(max_reachable_pc);
unresolved_info_it it;
@@ -865,7 +851,6 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
}
}
-
void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
if (unresolved_branches_.empty()) {
DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
@@ -881,11 +866,10 @@ void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
next_veneer_pool_check_ = kMaxInt;
} else {
next_veneer_pool_check_ =
- unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
}
}
-
void Assembler::StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
// Prevent constant pool checks happening by setting the next check to
@@ -894,7 +878,6 @@ void Assembler::StartBlockConstPool() {
}
}
-
void Assembler::EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
@@ -908,13 +891,11 @@ void Assembler::EndBlockConstPool() {
}
}
-
bool Assembler::is_const_pool_blocked() const {
return (const_pool_blocked_nesting_ > 0) ||
(pc_offset() < no_const_pool_before_);
}
-
bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
@@ -929,7 +910,6 @@ bool Assembler::IsConstantPoolAt(Instruction* instr) {
return result;
}
-
int Assembler::ConstantPoolSizeAt(Instruction* instr) {
#ifdef USE_SIMULATOR
// Assembler::debug() embeds constants directly into the instruction stream.
@@ -937,9 +917,8 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
// disassembling the constants.
if ((instr->Mask(ExceptionMask) == HLT) &&
(instr->ImmException() == kImmExceptionIsDebug)) {
- const char* message =
- reinterpret_cast<const char*>(
- instr->InstructionAtOffset(kDebugMessageOffset));
+ const char* message = reinterpret_cast<const char*>(
+ instr->InstructionAtOffset(kDebugMessageOffset));
int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
return RoundUp(size, kInstrSize) / kInstrSize;
}
@@ -956,18 +935,13 @@ int Assembler::ConstantPoolSizeAt(Instruction* instr) {
}
}
-
void Assembler::EmitPoolGuard() {
// We must generate only one instruction as this is used in scopes that
// control the size of the code generated.
Emit(BLR | Rn(xzr));
}
-
-void Assembler::StartBlockVeneerPool() {
- ++veneer_pool_blocked_nesting_;
-}
-
+void Assembler::StartBlockVeneerPool() { ++veneer_pool_blocked_nesting_; }
void Assembler::EndBlockVeneerPool() {
if (--veneer_pool_blocked_nesting_ == 0) {
@@ -977,13 +951,11 @@ void Assembler::EndBlockVeneerPool() {
}
}
-
void Assembler::br(const Register& xn) {
DCHECK(xn.Is64Bits());
Emit(BR | Rn(xn));
}
-
void Assembler::blr(const Register& xn) {
DCHECK(xn.Is64Bits());
// The pattern 'blr xzr' is used as a guard to detect when execution falls
@@ -992,624 +964,447 @@ void Assembler::blr(const Register& xn) {
Emit(BLR | Rn(xn));
}
-
void Assembler::ret(const Register& xn) {
DCHECK(xn.Is64Bits());
Emit(RET | Rn(xn));
}
+void Assembler::b(int imm26) { Emit(B | ImmUncondBranch(imm26)); }
-void Assembler::b(int imm26) {
- Emit(B | ImmUncondBranch(imm26));
-}
-
-
-void Assembler::b(Label* label) {
- b(LinkAndGetInstructionOffsetTo(label));
-}
-
+void Assembler::b(Label* label) { b(LinkAndGetInstructionOffsetTo(label)); }
void Assembler::b(int imm19, Condition cond) {
Emit(B_cond | ImmCondBranch(imm19) | cond);
}
-
void Assembler::b(Label* label, Condition cond) {
b(LinkAndGetInstructionOffsetTo(label), cond);
}
+void Assembler::bl(int imm26) { Emit(BL | ImmUncondBranch(imm26)); }
-void Assembler::bl(int imm26) {
- Emit(BL | ImmUncondBranch(imm26));
-}
-
+void Assembler::bl(Label* label) { bl(LinkAndGetInstructionOffsetTo(label)); }
-void Assembler::bl(Label* label) {
- bl(LinkAndGetInstructionOffsetTo(label));
-}
-
-
-void Assembler::cbz(const Register& rt,
- int imm19) {
+void Assembler::cbz(const Register& rt, int imm19) {
Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
}
-
-void Assembler::cbz(const Register& rt,
- Label* label) {
+void Assembler::cbz(const Register& rt, Label* label) {
cbz(rt, LinkAndGetInstructionOffsetTo(label));
}
-
-void Assembler::cbnz(const Register& rt,
- int imm19) {
+void Assembler::cbnz(const Register& rt, int imm19) {
Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
}
-
-void Assembler::cbnz(const Register& rt,
- Label* label) {
+void Assembler::cbnz(const Register& rt, Label* label) {
cbnz(rt, LinkAndGetInstructionOffsetTo(label));
}
-
-void Assembler::tbz(const Register& rt,
- unsigned bit_pos,
- int imm14) {
+void Assembler::tbz(const Register& rt, unsigned bit_pos, int imm14) {
DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
-
-void Assembler::tbz(const Register& rt,
- unsigned bit_pos,
- Label* label) {
+void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
}
-
-void Assembler::tbnz(const Register& rt,
- unsigned bit_pos,
- int imm14) {
+void Assembler::tbnz(const Register& rt, unsigned bit_pos, int imm14) {
DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
}
-
-void Assembler::tbnz(const Register& rt,
- unsigned bit_pos,
- Label* label) {
+void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
}
-
void Assembler::adr(const Register& rd, int imm21) {
DCHECK(rd.Is64Bits());
Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
}
-
void Assembler::adr(const Register& rd, Label* label) {
adr(rd, LinkAndGetByteOffsetTo(label));
}
-
void Assembler::nop(NopMarkerTypes n) {
DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
}
-
-void Assembler::add(const Register& rd,
- const Register& rn,
+void Assembler::add(const Register& rd, const Register& rn,
const Operand& operand) {
AddSub(rd, rn, operand, LeaveFlags, ADD);
}
-
-void Assembler::adds(const Register& rd,
- const Register& rn,
+void Assembler::adds(const Register& rd, const Register& rn,
const Operand& operand) {
AddSub(rd, rn, operand, SetFlags, ADD);
}
-
-void Assembler::cmn(const Register& rn,
- const Operand& operand) {
+void Assembler::cmn(const Register& rn, const Operand& operand) {
Register zr = AppropriateZeroRegFor(rn);
adds(zr, rn, operand);
}
-
-void Assembler::sub(const Register& rd,
- const Register& rn,
+void Assembler::sub(const Register& rd, const Register& rn,
const Operand& operand) {
AddSub(rd, rn, operand, LeaveFlags, SUB);
}
-
-void Assembler::subs(const Register& rd,
- const Register& rn,
+void Assembler::subs(const Register& rd, const Register& rn,
const Operand& operand) {
AddSub(rd, rn, operand, SetFlags, SUB);
}
-
void Assembler::cmp(const Register& rn, const Operand& operand) {
Register zr = AppropriateZeroRegFor(rn);
subs(zr, rn, operand);
}
-
void Assembler::neg(const Register& rd, const Operand& operand) {
Register zr = AppropriateZeroRegFor(rd);
sub(rd, zr, operand);
}
-
void Assembler::negs(const Register& rd, const Operand& operand) {
Register zr = AppropriateZeroRegFor(rd);
subs(rd, zr, operand);
}
-
-void Assembler::adc(const Register& rd,
- const Register& rn,
+void Assembler::adc(const Register& rd, const Register& rn,
const Operand& operand) {
AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
}
-
-void Assembler::adcs(const Register& rd,
- const Register& rn,
+void Assembler::adcs(const Register& rd, const Register& rn,
const Operand& operand) {
AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
}
-
-void Assembler::sbc(const Register& rd,
- const Register& rn,
+void Assembler::sbc(const Register& rd, const Register& rn,
const Operand& operand) {
AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
}
-
-void Assembler::sbcs(const Register& rd,
- const Register& rn,
+void Assembler::sbcs(const Register& rd, const Register& rn,
const Operand& operand) {
AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
}
-
void Assembler::ngc(const Register& rd, const Operand& operand) {
Register zr = AppropriateZeroRegFor(rd);
sbc(rd, zr, operand);
}
-
void Assembler::ngcs(const Register& rd, const Operand& operand) {
Register zr = AppropriateZeroRegFor(rd);
sbcs(rd, zr, operand);
}
-
// Logical instructions.
-void Assembler::and_(const Register& rd,
- const Register& rn,
+void Assembler::and_(const Register& rd, const Register& rn,
const Operand& operand) {
Logical(rd, rn, operand, AND);
}
-
-void Assembler::ands(const Register& rd,
- const Register& rn,
+void Assembler::ands(const Register& rd, const Register& rn,
const Operand& operand) {
Logical(rd, rn, operand, ANDS);
}
-
-void Assembler::tst(const Register& rn,
- const Operand& operand) {
+void Assembler::tst(const Register& rn, const Operand& operand) {
ands(AppropriateZeroRegFor(rn), rn, operand);
}
-
-void Assembler::bic(const Register& rd,
- const Register& rn,
+void Assembler::bic(const Register& rd, const Register& rn,
const Operand& operand) {
Logical(rd, rn, operand, BIC);
}
-
-void Assembler::bics(const Register& rd,
- const Register& rn,
+void Assembler::bics(const Register& rd, const Register& rn,
const Operand& operand) {
Logical(rd, rn, operand, BICS);
}
-
-void Assembler::orr(const Register& rd,
- const Register& rn,
+void Assembler::orr(const Register& rd, const Register& rn,
const Operand& operand) {
Logical(rd, rn, operand, ORR);
}
-
-void Assembler::orn(const Register& rd,
- const Register& rn,
+void Assembler::orn(const Register& rd, const Register& rn,
const Operand& operand) {
Logical(rd, rn, operand, ORN);
}
-
-void Assembler::eor(const Register& rd,
- const Register& rn,
+void Assembler::eor(const Register& rd, const Register& rn,
const Operand& operand) {
Logical(rd, rn, operand, EOR);
}
-
-void Assembler::eon(const Register& rd,
- const Register& rn,
+void Assembler::eon(const Register& rd, const Register& rn,
const Operand& operand) {
Logical(rd, rn, operand, EON);
}
-
-void Assembler::lslv(const Register& rd,
- const Register& rn,
+void Assembler::lslv(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
}
-
-void Assembler::lsrv(const Register& rd,
- const Register& rn,
+void Assembler::lsrv(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
}
-
-void Assembler::asrv(const Register& rd,
- const Register& rn,
+void Assembler::asrv(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
}
-
-void Assembler::rorv(const Register& rd,
- const Register& rn,
+void Assembler::rorv(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
}
-
// Bitfield operations.
void Assembler::bfm(const Register& rd, const Register& rn, int immr,
int imms) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
- Emit(SF(rd) | BFM | N |
- ImmR(immr, rd.SizeInBits()) |
- ImmS(imms, rn.SizeInBits()) |
- Rn(rn) | Rd(rd));
+ Emit(SF(rd) | BFM | N | ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd));
}
-
void Assembler::sbfm(const Register& rd, const Register& rn, int immr,
int imms) {
DCHECK(rd.Is64Bits() || rn.Is32Bits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
- Emit(SF(rd) | SBFM | N |
- ImmR(immr, rd.SizeInBits()) |
- ImmS(imms, rn.SizeInBits()) |
- Rn(rn) | Rd(rd));
+ Emit(SF(rd) | SBFM | N | ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd));
}
-
void Assembler::ubfm(const Register& rd, const Register& rn, int immr,
int imms) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
- Emit(SF(rd) | UBFM | N |
- ImmR(immr, rd.SizeInBits()) |
- ImmS(imms, rn.SizeInBits()) |
- Rn(rn) | Rd(rd));
+ Emit(SF(rd) | UBFM | N | ImmR(immr, rd.SizeInBits()) |
+ ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd));
}
-
void Assembler::extr(const Register& rd, const Register& rn, const Register& rm,
int lsb) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
- Emit(SF(rd) | EXTR | N | Rm(rm) |
- ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
+ Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.SizeInBits()) | Rn(rn) |
+ Rd(rd));
}
-
-void Assembler::csel(const Register& rd,
- const Register& rn,
- const Register& rm,
+void Assembler::csel(const Register& rd, const Register& rn, const Register& rm,
Condition cond) {
ConditionalSelect(rd, rn, rm, cond, CSEL);
}
-
-void Assembler::csinc(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
+void Assembler::csinc(const Register& rd, const Register& rn,
+ const Register& rm, Condition cond) {
ConditionalSelect(rd, rn, rm, cond, CSINC);
}
-
-void Assembler::csinv(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
+void Assembler::csinv(const Register& rd, const Register& rn,
+ const Register& rm, Condition cond) {
ConditionalSelect(rd, rn, rm, cond, CSINV);
}
-
-void Assembler::csneg(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
+void Assembler::csneg(const Register& rd, const Register& rn,
+ const Register& rm, Condition cond) {
ConditionalSelect(rd, rn, rm, cond, CSNEG);
}
-
-void Assembler::cset(const Register &rd, Condition cond) {
+void Assembler::cset(const Register& rd, Condition cond) {
DCHECK((cond != al) && (cond != nv));
Register zr = AppropriateZeroRegFor(rd);
csinc(rd, zr, zr, NegateCondition(cond));
}
-
-void Assembler::csetm(const Register &rd, Condition cond) {
+void Assembler::csetm(const Register& rd, Condition cond) {
DCHECK((cond != al) && (cond != nv));
Register zr = AppropriateZeroRegFor(rd);
csinv(rd, zr, zr, NegateCondition(cond));
}
-
-void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
+void Assembler::cinc(const Register& rd, const Register& rn, Condition cond) {
DCHECK((cond != al) && (cond != nv));
csinc(rd, rn, rn, NegateCondition(cond));
}
-
-void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
+void Assembler::cinv(const Register& rd, const Register& rn, Condition cond) {
DCHECK((cond != al) && (cond != nv));
csinv(rd, rn, rn, NegateCondition(cond));
}
-
-void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
+void Assembler::cneg(const Register& rd, const Register& rn, Condition cond) {
DCHECK((cond != al) && (cond != nv));
csneg(rd, rn, rn, NegateCondition(cond));
}
-
-void Assembler::ConditionalSelect(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond,
+void Assembler::ConditionalSelect(const Register& rd, const Register& rn,
+ const Register& rm, Condition cond,
ConditionalSelectOp op) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
}
-
-void Assembler::ccmn(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond) {
+void Assembler::ccmn(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond) {
ConditionalCompare(rn, operand, nzcv, cond, CCMN);
}
-
-void Assembler::ccmp(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond) {
+void Assembler::ccmp(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond) {
ConditionalCompare(rn, operand, nzcv, cond, CCMP);
}
-
-void Assembler::DataProcessing3Source(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra,
+void Assembler::DataProcessing3Source(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra,
DataProcessing3SourceOp op) {
Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
}
-
-void Assembler::mul(const Register& rd,
- const Register& rn,
+void Assembler::mul(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(AreSameSizeAndType(rd, rn, rm));
Register zr = AppropriateZeroRegFor(rn);
DataProcessing3Source(rd, rn, rm, zr, MADD);
}
-
-void Assembler::madd(const Register& rd,
- const Register& rn,
- const Register& rm,
+void Assembler::madd(const Register& rd, const Register& rn, const Register& rm,
const Register& ra) {
DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
DataProcessing3Source(rd, rn, rm, ra, MADD);
}
-
-void Assembler::mneg(const Register& rd,
- const Register& rn,
+void Assembler::mneg(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(AreSameSizeAndType(rd, rn, rm));
Register zr = AppropriateZeroRegFor(rn);
DataProcessing3Source(rd, rn, rm, zr, MSUB);
}
-
-void Assembler::msub(const Register& rd,
- const Register& rn,
- const Register& rm,
+void Assembler::msub(const Register& rd, const Register& rn, const Register& rm,
const Register& ra) {
DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
DataProcessing3Source(rd, rn, rm, ra, MSUB);
}
-
-void Assembler::smaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
+void Assembler::smaddl(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
DCHECK(rd.Is64Bits() && ra.Is64Bits());
DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
}
-
-void Assembler::smsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
+void Assembler::smsubl(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
DCHECK(rd.Is64Bits() && ra.Is64Bits());
DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
}
-
-void Assembler::umaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
+void Assembler::umaddl(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
DCHECK(rd.Is64Bits() && ra.Is64Bits());
DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
}
-
-void Assembler::umsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
+void Assembler::umsubl(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
DCHECK(rd.Is64Bits() && ra.Is64Bits());
DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
}
-
-void Assembler::smull(const Register& rd,
- const Register& rn,
+void Assembler::smull(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(rd.Is64Bits());
DCHECK(rn.Is32Bits() && rm.Is32Bits());
DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
}
-
-void Assembler::smulh(const Register& rd,
- const Register& rn,
+void Assembler::smulh(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(AreSameSizeAndType(rd, rn, rm));
DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
}
-
-void Assembler::sdiv(const Register& rd,
- const Register& rn,
+void Assembler::sdiv(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
}
-
-void Assembler::udiv(const Register& rd,
- const Register& rn,
+void Assembler::udiv(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
}
-
-void Assembler::rbit(const Register& rd,
- const Register& rn) {
+void Assembler::rbit(const Register& rd, const Register& rn) {
DataProcessing1Source(rd, rn, RBIT);
}
-
-void Assembler::rev16(const Register& rd,
- const Register& rn) {
+void Assembler::rev16(const Register& rd, const Register& rn) {
DataProcessing1Source(rd, rn, REV16);
}
-
-void Assembler::rev32(const Register& rd,
- const Register& rn) {
+void Assembler::rev32(const Register& rd, const Register& rn) {
DCHECK(rd.Is64Bits());
DataProcessing1Source(rd, rn, REV);
}
-
-void Assembler::rev(const Register& rd,
- const Register& rn) {
+void Assembler::rev(const Register& rd, const Register& rn) {
DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
}
-
-void Assembler::clz(const Register& rd,
- const Register& rn) {
+void Assembler::clz(const Register& rd, const Register& rn) {
DataProcessing1Source(rd, rn, CLZ);
}
-
-void Assembler::cls(const Register& rd,
- const Register& rn) {
+void Assembler::cls(const Register& rd, const Register& rn) {
DataProcessing1Source(rd, rn, CLS);
}
-
-void Assembler::ldp(const CPURegister& rt,
- const CPURegister& rt2,
+void Assembler::ldp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src) {
LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
}
-
-void Assembler::stp(const CPURegister& rt,
- const CPURegister& rt2,
+void Assembler::stp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& dst) {
LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
}
-
-void Assembler::ldpsw(const Register& rt,
- const Register& rt2,
+void Assembler::ldpsw(const Register& rt, const Register& rt2,
const MemOperand& src) {
DCHECK(rt.Is64Bits());
LoadStorePair(rt, rt2, src, LDPSW_x);
}
-
-void Assembler::LoadStorePair(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairOp op) {
+void Assembler::LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& addr, LoadStorePairOp op) {
// 'rt' and 'rt2' can only be aliased for stores.
DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
@@ -1637,54 +1432,44 @@ void Assembler::LoadStorePair(const CPURegister& rt,
Emit(addrmodeop | memop);
}
-
// Memory instructions.
void Assembler::ldrb(const Register& rt, const MemOperand& src) {
LoadStore(rt, src, LDRB_w);
}
-
void Assembler::strb(const Register& rt, const MemOperand& dst) {
LoadStore(rt, dst, STRB_w);
}
-
void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
}
-
void Assembler::ldrh(const Register& rt, const MemOperand& src) {
LoadStore(rt, src, LDRH_w);
}
-
void Assembler::strh(const Register& rt, const MemOperand& dst) {
LoadStore(rt, dst, STRH_w);
}
-
void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
}
-
void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
LoadStore(rt, src, LoadOpFor(rt));
}
-
void Assembler::str(const CPURegister& rt, const MemOperand& src) {
LoadStore(rt, src, StoreOpFor(rt));
}
-
void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
DCHECK(rt.Is64Bits());
LoadStore(rt, src, LDRSW_x);
}
-
void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
// The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
// constant pool. It should not be emitted.
@@ -1697,14 +1482,14 @@ Operand Operand::EmbeddedNumber(double number) {
if (DoubleToSmiInteger(number, &smi)) {
return Operand(Immediate(Smi::FromInt(smi)));
}
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.heap_object_request_.emplace(number);
DCHECK(result.IsHeapObjectRequest());
return result;
}
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.heap_object_request_.emplace(str);
DCHECK(result.IsHeapObjectRequest());
return result;
@@ -4066,7 +3851,6 @@ void Assembler::EmitStringData(const char* string) {
EmitData(pad, RoundUp(pc_offset(), kInstrSize) - pc_offset());
}
-
void Assembler::debug(const char* message, uint32_t code, Instr params) {
#ifdef USE_SIMULATOR
if (options().enable_simulator_code) {
@@ -4102,11 +3886,8 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
}
}
-
-void Assembler::Logical(const Register& rd,
- const Register& rn,
- const Operand& operand,
- LogicalOp op) {
+void Assembler::Logical(const Register& rd, const Register& rn,
+ const Operand& operand, LogicalOp op) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(!operand.NeedsRelocation(this));
if (operand.IsImmediate()) {
@@ -4139,12 +3920,8 @@ void Assembler::Logical(const Register& rd,
}
}
-
-void Assembler::LogicalImmediate(const Register& rd,
- const Register& rn,
- unsigned n,
- unsigned imm_s,
- unsigned imm_r,
+void Assembler::LogicalImmediate(const Register& rd, const Register& rn,
+ unsigned n, unsigned imm_s, unsigned imm_r,
LogicalOp op) {
unsigned reg_size = rd.SizeInBits();
Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
@@ -4153,11 +3930,8 @@ void Assembler::LogicalImmediate(const Register& rd,
Rn(rn));
}
-
-void Assembler::ConditionalCompare(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond,
+void Assembler::ConditionalCompare(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond,
ConditionalCompareOp op) {
Instr ccmpop;
DCHECK(!operand.NeedsRelocation(this));
@@ -4173,9 +3947,7 @@ void Assembler::ConditionalCompare(const Register& rn,
Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
}
-
-void Assembler::DataProcessing1Source(const Register& rd,
- const Register& rn,
+void Assembler::DataProcessing1Source(const Register& rd, const Register& rn,
DataProcessing1SourceOp op) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
Emit(SF(rn) | op | Rn(rn) | Rd(rd));
@@ -4251,9 +4023,7 @@ void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8,
Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
}
-void Assembler::EmitShift(const Register& rd,
- const Register& rn,
- Shift shift,
+void Assembler::EmitShift(const Register& rd, const Register& rn, Shift shift,
unsigned shift_amount) {
switch (shift) {
case LSL:
@@ -4273,11 +4043,8 @@ void Assembler::EmitShift(const Register& rd,
}
}
-
-void Assembler::EmitExtendShift(const Register& rd,
- const Register& rn,
- Extend extend,
- unsigned left_shift) {
+void Assembler::EmitExtendShift(const Register& rd, const Register& rn,
+ Extend extend, unsigned left_shift) {
DCHECK(rd.SizeInBits() >= rn.SizeInBits());
unsigned reg_size = rd.SizeInBits();
// Use the correct size of register.
@@ -4291,10 +4058,14 @@ void Assembler::EmitExtendShift(const Register& rd,
switch (extend) {
case UXTB:
case UXTH:
- case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
+ case UXTW:
+ ubfm(rd, rn_, non_shift_bits, high_bit);
+ break;
case SXTB:
case SXTH:
- case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
+ case SXTW:
+ sbfm(rd, rn_, non_shift_bits, high_bit);
+ break;
case UXTX:
case SXTX: {
DCHECK_EQ(rn.SizeInBits(), kXRegSizeInBits);
@@ -4302,7 +4073,8 @@ void Assembler::EmitExtendShift(const Register& rd,
lsl(rd, rn_, left_shift);
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
} else {
// No need to extend as the extended bits would be shifted away.
@@ -4310,25 +4082,19 @@ void Assembler::EmitExtendShift(const Register& rd,
}
}
-
-void Assembler::DataProcShiftedRegister(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
+void Assembler::DataProcShiftedRegister(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S,
Instr op) {
DCHECK(operand.IsShiftedRegister());
DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
DCHECK(!operand.NeedsRelocation(this));
- Emit(SF(rd) | op | Flags(S) |
- ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
- Rm(operand.reg()) | Rn(rn) | Rd(rd));
+ Emit(SF(rd) | op | Flags(S) | ShiftDP(operand.shift()) |
+ ImmDPShift(operand.shift_amount()) | Rm(operand.reg()) | Rn(rn) |
+ Rd(rd));
}
-
-void Assembler::DataProcExtendedRegister(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
+void Assembler::DataProcExtendedRegister(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S,
Instr op) {
DCHECK(!operand.NeedsRelocation(this));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
@@ -4337,14 +4103,12 @@ void Assembler::DataProcExtendedRegister(const Register& rd,
dest_reg | RnSP(rn));
}
-
bool Assembler::IsImmAddSub(int64_t immediate) {
return is_uint12(immediate) ||
(is_uint12(immediate >> 12) && ((immediate & 0xFFF) == 0));
}
-void Assembler::LoadStore(const CPURegister& rt,
- const MemOperand& addr,
+void Assembler::LoadStore(const CPURegister& rt, const MemOperand& addr,
LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base());
@@ -4397,10 +4161,7 @@ void Assembler::LoadStore(const CPURegister& rt,
}
}
-
-bool Assembler::IsImmLSUnscaled(int64_t offset) {
- return is_int9(offset);
-}
+bool Assembler::IsImmLSUnscaled(int64_t offset) { return is_int9(offset); }
bool Assembler::IsImmLSScaled(int64_t offset, unsigned size) {
bool offset_is_size_multiple = (((offset >> size) << size) == offset);
@@ -4412,7 +4173,6 @@ bool Assembler::IsImmLSPair(int64_t offset, unsigned size) {
return offset_is_size_multiple && is_int7(offset >> size);
}
-
bool Assembler::IsImmLLiteral(int64_t offset) {
int inst_size = static_cast<int>(kInstrSizeLog2);
bool offset_is_inst_multiple =
@@ -4422,7 +4182,6 @@ bool Assembler::IsImmLLiteral(int64_t offset) {
return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
}
-
// Test if a given value can be encoded in the immediate field of a logical
// instruction.
// If it can be encoded, the function returns true, and values pointed to by n,
@@ -4430,11 +4189,8 @@ bool Assembler::IsImmLLiteral(int64_t offset) {
// by the corresponding fields in the logical instruction.
// If it can not be encoded, the function returns false, and the values pointed
// to by n, imm_s and imm_r are undefined.
-bool Assembler::IsImmLogical(uint64_t value,
- unsigned width,
- unsigned* n,
- unsigned* imm_s,
- unsigned* imm_r) {
+bool Assembler::IsImmLogical(uint64_t value, unsigned width, unsigned* n,
+ unsigned* imm_s, unsigned* imm_r) {
DCHECK((n != nullptr) && (imm_s != nullptr) && (imm_r != nullptr));
DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
@@ -4568,12 +4324,8 @@ bool Assembler::IsImmLogical(uint64_t value,
// (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
// be derived using a table lookup on CLZ(d).
static const uint64_t multipliers[] = {
- 0x0000000000000001UL,
- 0x0000000100000001UL,
- 0x0001000100010001UL,
- 0x0101010101010101UL,
- 0x1111111111111111UL,
- 0x5555555555555555UL,
+ 0x0000000000000001UL, 0x0000000100000001UL, 0x0001000100010001UL,
+ 0x0101010101010101UL, 0x1111111111111111UL, 0x5555555555555555UL,
};
int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
// Ensure that the index to the multipliers array is within bounds.
@@ -4631,12 +4383,10 @@ bool Assembler::IsImmLogical(uint64_t value,
return true;
}
-
bool Assembler::IsImmConditionalCompare(int64_t immediate) {
return is_uint5(immediate);
}
-
bool Assembler::IsImmFP32(float imm) {
// Valid values will have the form:
// aBbb.bbbc.defg.h000.0000.0000.0000.0000
@@ -4660,7 +4410,6 @@ bool Assembler::IsImmFP32(float imm) {
return true;
}
-
bool Assembler::IsImmFP64(double imm) {
// Valid values will have the form:
// aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
@@ -4685,7 +4434,6 @@ bool Assembler::IsImmFP64(double imm) {
return true;
}
-
void Assembler::GrowBuffer() {
// Compute new buffer size.
int old_size = buffer_->size();
@@ -4792,7 +4540,6 @@ void Assembler::BlockConstPoolFor(int instructions) {
}
}
-
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
@@ -4804,7 +4551,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// There is nothing to do if there are no pending constant pool entries.
- if (constpool_.IsEmpty()) {
+ if (constpool_.IsEmpty()) {
// Calculate the offset of the next check.
SetNextConstPoolCheckIn(kCheckConstPoolInterval);
return;
@@ -4817,18 +4564,15 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
int dist = constpool_.DistanceToFirstUse();
int count = constpool_.EntryCount();
- if (!force_emit &&
- (dist < kApproxMaxDistToConstPool) &&
+ if (!force_emit && (dist < kApproxMaxDistToConstPool) &&
(count < kApproxMaxPoolEntryCount)) {
return;
}
-
// Emit veneers for branches that would go out of range during emission of the
// constant pool.
int worst_case_size = constpool_.WorstCaseSize();
- CheckVeneerPool(false, require_jump,
- kVeneerDistanceMargin + worst_case_size);
+ CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + worst_case_size);
// Check that the code buffer is large enough before emitting the constant
// pool (this includes the gap to the relocation information).
@@ -4848,22 +4592,20 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
SetNextConstPoolCheckIn(kCheckConstPoolInterval);
}
-
bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
// Account for the branch around the veneers and the guard.
int protection_offset = 2 * kInstrSize;
- return pc_offset() > max_reachable_pc - margin - protection_offset -
- static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+ return pc_offset() >
+ max_reachable_pc - margin - protection_offset -
+ static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
}
-
void Assembler::RecordVeneerPool(int location_offset, int size) {
RelocInfo rinfo(reinterpret_cast<Address>(buffer_start_) + location_offset,
RelocInfo::VENEER_POOL, static_cast<intptr_t>(size), Code());
reloc_info_writer.Write(&rinfo);
}
-
void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
BlockPoolsScope scope(this);
RecordComment("[ Veneers");
@@ -4926,7 +4668,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
next_veneer_pool_check_ = kMaxInt;
} else {
next_veneer_pool_check_ =
- unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
}
bind(&end);
@@ -4934,11 +4676,10 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
RecordComment("]");
}
-
void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
int margin) {
// There is nothing to do if there are no pending veneer pool entries.
- if (unresolved_branches_.empty()) {
+ if (unresolved_branches_.empty()) {
DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
return;
}
@@ -4961,23 +4702,20 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
EmitVeneers(force_emit, require_jump, margin);
} else {
next_veneer_pool_check_ =
- unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
}
}
-
int Assembler::buffer_space() const {
return static_cast<int>(reloc_info_writer.pos() - pc_);
}
-
void Assembler::RecordConstPool(int size) {
// We only need this for debugger support, to correctly compute offsets in the
// code.
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
}
-
void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
// The code at the current instruction should be:
// adr rd, 0
@@ -4994,8 +4732,7 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
}
Instruction* expected_movz =
InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstrSize);
- CHECK(expected_movz->IsMovz() &&
- (expected_movz->ImmMoveWide() == 0) &&
+ CHECK(expected_movz->IsMovz() && (expected_movz->ImmMoveWide() == 0) &&
(expected_movz->ShiftMoveWide() == 0));
int scratch_code = expected_movz->Rd();
@@ -5020,6 +4757,17 @@ void PatchingAssembler::PatchSubSp(uint32_t immediate) {
sub(sp, sp, immediate);
}
+#undef NEON_3DIFF_LONG_LIST
+#undef NEON_3DIFF_HN_LIST
+#undef NEON_ACROSSLANES_LIST
+#undef NEON_FP2REGMISC_FCVT_LIST
+#undef NEON_FP2REGMISC_LIST
+#undef NEON_3SAME_LIST
+#undef NEON_FP3SAME_LIST_V2
+#undef NEON_BYELEMENT_LIST
+#undef NEON_FPBYELEMENT_LIST
+#undef NEON_BYELEMENT_LONG_LIST
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h
index 7dd97809e3..04cd422241 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/assembler-arm64.h
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
-#define V8_ARM64_ASSEMBLER_ARM64_H_
+#ifndef V8_CODEGEN_ARM64_ASSEMBLER_ARM64_H_
+#define V8_CODEGEN_ARM64_ASSEMBLER_ARM64_H_
#include <deque>
#include <list>
#include <map>
#include <vector>
-#include "src/arm64/constants-arm64.h"
-#include "src/arm64/instructions-arm64.h"
-#include "src/arm64/register-arm64.h"
-#include "src/assembler.h"
#include "src/base/optional.h"
-#include "src/constant-pool.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/codegen/arm64/constants-arm64.h"
+#include "src/codegen/arm64/instructions-arm64.h"
+#include "src/codegen/arm64/register-arm64.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/constant-pool.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
// Windows arm64 SDK defines mvn to NEON intrinsic neon_not which will not
// be used here.
@@ -34,15 +34,15 @@ class SafepointTableBuilder;
// Immediates.
class Immediate {
public:
- template<typename T>
+ template <typename T>
inline explicit Immediate(Handle<T> handle);
// This is allowed to be an implicit constructor because Immediate is
// a wrapper class that doesn't normally perform any type conversion.
- template<typename T>
+ template <typename T>
inline Immediate(T value); // NOLINT(runtime/explicit)
- template<typename T>
+ template <typename T>
inline Immediate(T value, RelocInfo::Mode rmode);
int64_t value() const { return value_; }
@@ -55,7 +55,6 @@ class Immediate {
RelocInfo::Mode rmode_;
};
-
// -----------------------------------------------------------------------------
// Operands.
constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -71,16 +70,13 @@ class Operand {
// <shift_amount> is uint6_t.
// This is allowed to be an implicit constructor because Operand is
// a wrapper class that doesn't normally perform any type conversion.
- inline Operand(Register reg,
- Shift shift = LSL,
+ inline Operand(Register reg, Shift shift = LSL,
unsigned shift_amount = 0); // NOLINT(runtime/explicit)
// rm, <extend> {#<shift_amount>}
// where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
// <shift_amount> is uint2_t.
- inline Operand(Register reg,
- Extend extend,
- unsigned shift_amount = 0);
+ inline Operand(Register reg, Extend extend, unsigned shift_amount = 0);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedStringConstant(const StringConstantBase* str);
@@ -89,15 +85,15 @@ class Operand {
inline HeapObjectRequest heap_object_request() const;
inline Immediate immediate_for_heap_object_request() const;
- template<typename T>
+ template <typename T>
inline explicit Operand(Handle<T> handle);
// Implicit constructor for all int types, ExternalReference, and Smi.
- template<typename T>
+ template <typename T>
inline Operand(T t); // NOLINT(runtime/explicit)
// Implicit constructor for int types.
- template<typename T>
+ template <typename T>
inline Operand(T t, RelocInfo::Mode rmode);
inline bool IsImmediate() const;
@@ -120,10 +116,6 @@ class Operand {
// Relocation information.
bool NeedsRelocation(const Assembler* assembler) const;
- // Helpers
- inline static Operand UntagSmi(Register smi);
- inline static Operand UntagSmiAndScale(Register smi, int scale);
-
private:
base::Optional<HeapObjectRequest> heap_object_request_;
Immediate immediate_;
@@ -133,24 +125,17 @@ class Operand {
unsigned shift_amount_;
};
-
// MemOperand represents a memory operand in a load or store instruction.
class MemOperand {
public:
inline MemOperand();
- inline explicit MemOperand(Register base,
- int64_t offset = 0,
+ inline explicit MemOperand(Register base, int64_t offset = 0,
AddrMode addrmode = Offset);
- inline explicit MemOperand(Register base,
- Register regoffset,
- Shift shift = LSL,
+ inline explicit MemOperand(Register base, Register regoffset,
+ Shift shift = LSL, unsigned shift_amount = 0);
+ inline explicit MemOperand(Register base, Register regoffset, Extend extend,
unsigned shift_amount = 0);
- inline explicit MemOperand(Register base,
- Register regoffset,
- Extend extend,
- unsigned shift_amount = 0);
- inline explicit MemOperand(Register base,
- const Operand& offset,
+ inline explicit MemOperand(Register base, const Operand& offset,
AddrMode addrmode = Offset);
const Register& base() const { return base_; }
@@ -170,9 +155,9 @@ class MemOperand {
inline Operand OffsetAsOperand() const;
enum PairResult {
- kNotPair, // Can't use a pair instruction.
- kPairAB, // Can use a pair instruction (operandA has lower address).
- kPairBA // Can use a pair instruction (operandB has lower address).
+ kNotPair, // Can't use a pair instruction.
+ kPairAB, // Can use a pair instruction (operandA has lower address).
+ kPairBA // Can use a pair instruction (operandB has lower address).
};
// Check if two MemOperand are consistent for stp/ldp use.
static PairResult AreConsistentForPair(const MemOperand& operandA,
@@ -189,7 +174,6 @@ class MemOperand {
unsigned shift_amount_;
};
-
class ConstPool {
public:
explicit ConstPool(Assembler* assm) : assm_(assm), first_use_(-1) {}
@@ -219,7 +203,7 @@ class ConstPool {
void EmitGuard();
void EmitEntries();
- typedef std::map<uint64_t, int> SharedEntryMap;
+ using SharedEntryMap = std::map<uint64_t, int>;
// Adds a shared entry to entries_, using 'entry_map' to determine whether we
// already track this entry. Returns true if this is the first time we add
// this entry, false otherwise.
@@ -244,7 +228,6 @@ class ConstPool {
std::vector<std::pair<uint64_t, std::vector<int> > > entries_;
};
-
// -----------------------------------------------------------------------------
// Assembler.
@@ -302,7 +285,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// before they go out of scope.
void bind(Label* label);
-
// RelocInfo and pools ------------------------------------------------------
// Record relocation information for current pc_.
@@ -338,6 +320,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// This might need to be temporarily encoded as an offset into code_targets_.
inline Handle<Code> code_target_object_handle_at(Address pc);
+ inline Handle<HeapObject> compressed_embedded_object_handle_at(Address pc);
+
// Returns the target address for a runtime function for the call encoded
// at 'pc'.
// Runtime entries can be temporarily encoded as the offset between the
@@ -346,10 +330,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// before it is moved into the code space.
inline Address runtime_entry_at(Address pc);
- // Return the code target address at a call site from the return address of
- // that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
// This sets the branch destination. 'location' here can be either the pc of
// an immediate branch or the address of an entry in the constant pool.
// This is for calls and branches within generated code.
@@ -508,90 +488,62 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Data Processing instructions.
// Add.
- void add(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void add(const Register& rd, const Register& rn, const Operand& operand);
// Add and update status flags.
- void adds(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void adds(const Register& rd, const Register& rn, const Operand& operand);
// Compare negative.
void cmn(const Register& rn, const Operand& operand);
// Subtract.
- void sub(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void sub(const Register& rd, const Register& rn, const Operand& operand);
// Subtract and update status flags.
- void subs(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void subs(const Register& rd, const Register& rn, const Operand& operand);
// Compare.
void cmp(const Register& rn, const Operand& operand);
// Negate.
- void neg(const Register& rd,
- const Operand& operand);
+ void neg(const Register& rd, const Operand& operand);
// Negate and update status flags.
- void negs(const Register& rd,
- const Operand& operand);
+ void negs(const Register& rd, const Operand& operand);
// Add with carry bit.
- void adc(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void adc(const Register& rd, const Register& rn, const Operand& operand);
// Add with carry bit and update status flags.
- void adcs(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void adcs(const Register& rd, const Register& rn, const Operand& operand);
// Subtract with carry bit.
- void sbc(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void sbc(const Register& rd, const Register& rn, const Operand& operand);
// Subtract with carry bit and update status flags.
- void sbcs(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void sbcs(const Register& rd, const Register& rn, const Operand& operand);
// Negate with carry bit.
- void ngc(const Register& rd,
- const Operand& operand);
+ void ngc(const Register& rd, const Operand& operand);
// Negate with carry bit and update status flags.
- void ngcs(const Register& rd,
- const Operand& operand);
+ void ngcs(const Register& rd, const Operand& operand);
// Logical instructions.
// Bitwise and (A & B).
- void and_(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void and_(const Register& rd, const Register& rn, const Operand& operand);
// Bitwise and (A & B) and update status flags.
- void ands(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void ands(const Register& rd, const Register& rn, const Operand& operand);
// Bit test, and set flags.
void tst(const Register& rn, const Operand& operand);
// Bit clear (A & ~B).
- void bic(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void bic(const Register& rd, const Register& rn, const Operand& operand);
// Bit clear (A & ~B) and update status flags.
- void bics(const Register& rd,
- const Register& rn,
- const Operand& operand);
+ void bics(const Register& rd, const Register& rn, const Operand& operand);
// Bitwise and.
void and_(const VRegister& vd, const VRegister& vn, const VRegister& vm);
@@ -756,19 +708,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Signed extend byte.
- void sxtb(const Register& rd, const Register& rn) {
- sbfm(rd, rn, 0, 7);
- }
+ void sxtb(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 7); }
// Signed extend halfword.
- void sxth(const Register& rd, const Register& rn) {
- sbfm(rd, rn, 0, 15);
- }
+ void sxth(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 15); }
// Signed extend word.
- void sxtw(const Register& rd, const Register& rn) {
- sbfm(rd, rn, 0, 31);
- }
+ void sxtw(const Register& rd, const Register& rn) { sbfm(rd, rn, 0, 31); }
// Ubfm aliases.
// Logical shift left.
@@ -799,46 +745,32 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Unsigned extend byte.
- void uxtb(const Register& rd, const Register& rn) {
- ubfm(rd, rn, 0, 7);
- }
+ void uxtb(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 7); }
// Unsigned extend halfword.
- void uxth(const Register& rd, const Register& rn) {
- ubfm(rd, rn, 0, 15);
- }
+ void uxth(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 15); }
// Unsigned extend word.
- void uxtw(const Register& rd, const Register& rn) {
- ubfm(rd, rn, 0, 31);
- }
+ void uxtw(const Register& rd, const Register& rn) { ubfm(rd, rn, 0, 31); }
// Extract.
void extr(const Register& rd, const Register& rn, const Register& rm,
int lsb);
// Conditional select: rd = cond ? rn : rm.
- void csel(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void csel(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
// Conditional select increment: rd = cond ? rn : rm + 1.
- void csinc(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void csinc(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
// Conditional select inversion: rd = cond ? rn : ~rm.
- void csinv(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void csinv(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
// Conditional select negation: rd = cond ? rn : -rm.
- void csneg(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void csneg(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
// Conditional set: rd = cond ? 1 : 0.
@@ -863,15 +795,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Conditional comparison.
// Conditional compare negative.
- void ccmn(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
+ void ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
// Conditional compare.
- void ccmp(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
+ void ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
// Multiplication.
@@ -879,18 +807,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void mul(const Register& rd, const Register& rn, const Register& rm);
// 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
- void madd(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void madd(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
void mneg(const Register& rd, const Register& rn, const Register& rm);
// 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
- void msub(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void msub(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// 32 x 32 -> 64-bit multiply.
@@ -900,27 +824,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void smulh(const Register& rd, const Register& rn, const Register& rm);
// Signed 32 x 32 -> 64-bit multiply and accumulate.
- void smaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void smaddl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// Unsigned 32 x 32 -> 64-bit multiply and accumulate.
- void umaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void umaddl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// Signed 32 x 32 -> 64-bit multiply and subtract.
- void smsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void smsubl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// Unsigned 32 x 32 -> 64-bit multiply and subtract.
- void umsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
+ void umsubl(const Register& rd, const Register& rn, const Register& rm,
const Register& ra);
// Signed integer divide.
@@ -2301,11 +2217,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline static Instr Nzcv(StatusFlags nzcv);
static bool IsImmAddSub(int64_t immediate);
- static bool IsImmLogical(uint64_t value,
- unsigned width,
- unsigned* n,
- unsigned* imm_s,
- unsigned* imm_r);
+ static bool IsImmLogical(uint64_t value, unsigned width, unsigned* n,
+ unsigned* imm_s, unsigned* imm_r);
// MemOperand offset encoding.
inline static Instr ImmLSUnsigned(int imm12);
@@ -2498,9 +2411,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockConstPool();
}
- ~BlockConstPoolScope() {
- assem_->EndBlockConstPool();
- }
+ ~BlockConstPoolScope() { assem_->EndBlockConstPool(); }
private:
Assembler* assem_;
@@ -2544,9 +2455,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockPools();
}
- ~BlockPoolsScope() {
- assem_->EndBlockPools();
- }
+ ~BlockPoolsScope() { assem_->EndBlockPools(); }
private:
Assembler* assem_;
@@ -2557,9 +2466,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
- void LoadStore(const CPURegister& rt,
- const MemOperand& addr,
- LoadStoreOp op);
+ void LoadStore(const CPURegister& rt, const MemOperand& addr, LoadStoreOp op);
void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op);
void LoadStoreStruct(const VRegister& vt, const MemOperand& addr,
@@ -2577,46 +2484,29 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsImmLSPair(int64_t offset, unsigned size);
- void Logical(const Register& rd,
- const Register& rn,
- const Operand& operand,
+ void Logical(const Register& rd, const Register& rn, const Operand& operand,
LogicalOp op);
- void LogicalImmediate(const Register& rd,
- const Register& rn,
- unsigned n,
- unsigned imm_s,
- unsigned imm_r,
- LogicalOp op);
-
- void ConditionalCompare(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond,
+ void LogicalImmediate(const Register& rd, const Register& rn, unsigned n,
+ unsigned imm_s, unsigned imm_r, LogicalOp op);
+
+ void ConditionalCompare(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond,
ConditionalCompareOp op);
static bool IsImmConditionalCompare(int64_t immediate);
- void AddSubWithCarry(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
+ void AddSubWithCarry(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S,
AddSubWithCarryOp op);
// Functions for emulating operands not directly supported by the instruction
// set.
- void EmitShift(const Register& rd,
- const Register& rn,
- Shift shift,
+ void EmitShift(const Register& rd, const Register& rn, Shift shift,
unsigned amount);
- void EmitExtendShift(const Register& rd,
- const Register& rn,
- Extend extend,
+ void EmitExtendShift(const Register& rd, const Register& rn, Extend extend,
unsigned left_shift);
- void AddSub(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- AddSubOp op);
+ void AddSub(const Register& rd, const Register& rn, const Operand& operand,
+ FlagsUpdate S, AddSubOp op);
static bool IsImmFP32(float imm);
static bool IsImmFP64(double imm);
@@ -2642,32 +2532,20 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static uint32_t FPToImm8(double imm);
// Instruction helpers.
- void MoveWide(const Register& rd,
- uint64_t imm,
- int shift,
+ void MoveWide(const Register& rd, uint64_t imm, int shift,
MoveWideImmediateOp mov_op);
- void DataProcShiftedRegister(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
- Instr op);
- void DataProcExtendedRegister(const Register& rd,
- const Register& rn,
- const Operand& operand,
- FlagsUpdate S,
+ void DataProcShiftedRegister(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S, Instr op);
+ void DataProcExtendedRegister(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S,
Instr op);
- void ConditionalSelect(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond,
+ void ConditionalSelect(const Register& rd, const Register& rn,
+ const Register& rm, Condition cond,
ConditionalSelectOp op);
- void DataProcessing1Source(const Register& rd,
- const Register& rn,
+ void DataProcessing1Source(const Register& rd, const Register& rn,
DataProcessing1SourceOp op);
- void DataProcessing3Source(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra,
+ void DataProcessing3Source(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra,
DataProcessing3SourceOp op);
void FPDataProcessing1Source(const VRegister& fd, const VRegister& fn,
FPDataProcessing1SourceOp op);
@@ -2742,7 +2620,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kStartOfLabelLinkChain = 0;
// Verify that a label's link chain is intact.
- void CheckLabelLinkChain(Label const * label);
+ void CheckLabelLinkChain(Label const* label);
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -2765,7 +2643,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Emit data inline in the instruction stream.
- void EmitData(void const * data, unsigned size) {
+ void EmitData(void const* data, unsigned size) {
DCHECK_EQ(sizeof(*pc_), 1);
DCHECK_LE(pc_ + size, buffer_start_ + buffer_->size());
@@ -2812,7 +2690,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
- int no_const_pool_before_; // Block emission before this pc offset.
+ int no_const_pool_before_; // Block emission before this pc offset.
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -2968,12 +2846,10 @@ class PatchingAssembler : public Assembler {
class EnsureSpace {
public:
- explicit EnsureSpace(Assembler* assembler) {
- assembler->CheckBufferSpace();
- }
+ explicit EnsureSpace(Assembler* assembler) { assembler->CheckBufferSpace(); }
};
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_ASSEMBLER_ARM64_H_
+#endif // V8_CODEGEN_ARM64_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h
index c93aad9f61..eb3fb3a6be 100644
--- a/deps/v8/src/arm64/constants-arm64.h
+++ b/deps/v8/src/codegen/arm64/constants-arm64.h
@@ -2,31 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_CONSTANTS_ARM64_H_
-#define V8_ARM64_CONSTANTS_ARM64_H_
+#ifndef V8_CODEGEN_ARM64_CONSTANTS_ARM64_H_
+#define V8_CODEGEN_ARM64_CONSTANTS_ARM64_H_
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
// Assert that this is an LP64 system, or LLP64 on Windows.
STATIC_ASSERT(sizeof(int) == sizeof(int32_t));
#if defined(V8_OS_WIN)
STATIC_ASSERT(sizeof(1L) == sizeof(int32_t));
#else
-STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
+STATIC_ASSERT(sizeof(long) == sizeof(int64_t)); // NOLINT(runtime/int)
STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
#endif
-STATIC_ASSERT(sizeof(void *) == sizeof(int64_t));
+STATIC_ASSERT(sizeof(void*) == sizeof(int64_t));
STATIC_ASSERT(sizeof(1) == sizeof(int32_t));
-
// Get the standard printf format macros for C99 stdint types.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
-
namespace v8 {
namespace internal {
@@ -148,7 +146,7 @@ const unsigned kFloat16ExponentBias = 15;
// TODO(sigurds): Choose best value.
constexpr int kRootRegisterBias = 256;
-typedef uint16_t float16;
+using float16 = uint16_t;
#define INSTRUCTION_FIELDS_LIST(V_) \
/* Register fields */ \
@@ -269,29 +267,28 @@ typedef uint16_t float16;
V_(ImmNEONImmh, 22, 19, Bits) \
V_(ImmNEONImmb, 18, 16, Bits)
-#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
-/* NZCV */ \
-V_(Flags, 31, 28, Bits, uint32_t) \
-V_(N, 31, 31, Bits, bool) \
-V_(Z, 30, 30, Bits, bool) \
-V_(C, 29, 29, Bits, bool) \
-V_(V, 28, 28, Bits, bool) \
-M_(NZCV, Flags_mask) \
- \
-/* FPCR */ \
-V_(AHP, 26, 26, Bits, bool) \
-V_(DN, 25, 25, Bits, bool) \
-V_(FZ, 24, 24, Bits, bool) \
-V_(RMode, 23, 22, Bits, FPRounding) \
-M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
-
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+ /* NZCV */ \
+ V_(Flags, 31, 28, Bits, uint32_t) \
+ V_(N, 31, 31, Bits, bool) \
+ V_(Z, 30, 30, Bits, bool) \
+ V_(C, 29, 29, Bits, bool) \
+ V_(V, 28, 28, Bits, bool) \
+ M_(NZCV, Flags_mask) \
+ \
+ /* FPCR */ \
+ V_(AHP, 26, 26, Bits, bool) \
+ V_(DN, 25, 25, Bits, bool) \
+ V_(FZ, 24, 24, Bits, bool) \
+ V_(RMode, 23, 22, Bits, FPRounding) \
+ M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
// Fields offsets.
-#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2) \
- const int Name##_offset = LowBit; \
- const int Name##_width = HighBit - LowBit + 1; \
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2) \
+ const int Name##_offset = LowBit; \
+ const int Name##_width = HighBit - LowBit + 1; \
const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
-#define DECLARE_INSTRUCTION_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1) \
+#define DECLARE_INSTRUCTION_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1) \
DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2)
INSTRUCTION_FIELDS_LIST(DECLARE_INSTRUCTION_FIELDS_OFFSETS)
SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
@@ -306,8 +303,10 @@ const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
enum Condition {
eq = 0,
ne = 1,
- hs = 2, cs = hs,
- lo = 3, cc = lo,
+ hs = 2,
+ cs = hs,
+ lo = 3,
+ cc = lo,
mi = 4,
pl = 5,
vs = 6,
@@ -329,36 +328,33 @@ inline Condition NegateCondition(Condition cond) {
return static_cast<Condition>(cond ^ 1);
}
-enum FlagsUpdate {
- SetFlags = 1,
- LeaveFlags = 0
-};
+enum FlagsUpdate { SetFlags = 1, LeaveFlags = 0 };
enum StatusFlags {
- NoFlag = 0,
+ NoFlag = 0,
// Derive the flag combinations from the system register bit descriptions.
- NFlag = N_mask,
- ZFlag = Z_mask,
- CFlag = C_mask,
- VFlag = V_mask,
- NZFlag = NFlag | ZFlag,
- NCFlag = NFlag | CFlag,
- NVFlag = NFlag | VFlag,
- ZCFlag = ZFlag | CFlag,
- ZVFlag = ZFlag | VFlag,
- CVFlag = CFlag | VFlag,
- NZCFlag = NFlag | ZFlag | CFlag,
- NZVFlag = NFlag | ZFlag | VFlag,
- NCVFlag = NFlag | CFlag | VFlag,
- ZCVFlag = ZFlag | CFlag | VFlag,
- NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
// Floating-point comparison results.
- FPEqualFlag = ZCFlag,
- FPLessThanFlag = NFlag,
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
FPGreaterThanFlag = CFlag,
- FPUnorderedFlag = CVFlag
+ FPUnorderedFlag = CVFlag
};
enum Shift {
@@ -372,14 +368,14 @@ enum Shift {
enum Extend {
NO_EXTEND = -1,
- UXTB = 0,
- UXTH = 1,
- UXTW = 2,
- UXTX = 3,
- SXTB = 4,
- SXTH = 5,
- SXTW = 6,
- SXTX = 7
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
};
enum SystemHint {
@@ -394,32 +390,28 @@ enum SystemHint {
enum BarrierDomain {
OuterShareable = 0,
- NonShareable = 1,
+ NonShareable = 1,
InnerShareable = 2,
- FullSystem = 3
+ FullSystem = 3
};
enum BarrierType {
- BarrierOther = 0,
- BarrierReads = 1,
+ BarrierOther = 0,
+ BarrierReads = 1,
BarrierWrites = 2,
- BarrierAll = 3
+ BarrierAll = 3
};
// System/special register names.
// This information is not encoded as one field but as the concatenation of
// multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
enum SystemRegister {
- NZCV = ((0x1 << SysO0_offset) |
- (0x3 << SysOp1_offset) |
- (0x4 << CRn_offset) |
- (0x2 << CRm_offset) |
- (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset,
- FPCR = ((0x1 << SysO0_offset) |
- (0x3 << SysOp1_offset) |
- (0x4 << CRn_offset) |
- (0x4 << CRm_offset) |
- (0x0 << SysOp2_offset)) >> ImmSystemRegister_offset
+ NZCV = ((0x1 << SysO0_offset) | (0x3 << SysOp1_offset) | (0x4 << CRn_offset) |
+ (0x2 << CRm_offset) | (0x0 << SysOp2_offset)) >>
+ ImmSystemRegister_offset,
+ FPCR = ((0x1 << SysO0_offset) | (0x3 << SysOp1_offset) | (0x4 << CRn_offset) |
+ (0x4 << CRm_offset) | (0x0 << SysOp2_offset)) >>
+ ImmSystemRegister_offset
};
// Instruction enumerations.
@@ -448,10 +440,10 @@ const uint32_t kUnallocatedInstruction = 0xffffffff;
// Generic fields.
enum GenericInstrField : uint32_t {
- SixtyFourBits = 0x80000000,
- ThirtyTwoBits = 0x00000000,
- FP32 = 0x00000000,
- FP64 = 0x00400000
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
};
enum NEONFormatField : uint32_t {
@@ -499,153 +491,148 @@ enum NEONScalarFormatField : uint32_t {
enum PCRelAddressingOp : uint32_t {
PCRelAddressingFixed = 0x10000000,
PCRelAddressingFMask = 0x1F000000,
- PCRelAddressingMask = 0x9F000000,
- ADR = PCRelAddressingFixed | 0x00000000,
- ADRP = PCRelAddressingFixed | 0x80000000
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
};
// Add/sub (immediate, shifted and extended.)
const int kSFOffset = 31;
enum AddSubOp : uint32_t {
- AddSubOpMask = 0x60000000,
+ AddSubOpMask = 0x60000000,
AddSubSetFlagsBit = 0x20000000,
- ADD = 0x00000000,
- ADDS = ADD | AddSubSetFlagsBit,
- SUB = 0x40000000,
- SUBS = SUB | AddSubSetFlagsBit
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
};
-#define ADD_SUB_OP_LIST(V) \
- V(ADD), \
- V(ADDS), \
- V(SUB), \
- V(SUBS)
+#define ADD_SUB_OP_LIST(V) V(ADD), V(ADDS), V(SUB), V(SUBS)
enum AddSubImmediateOp : uint32_t {
AddSubImmediateFixed = 0x11000000,
AddSubImmediateFMask = 0x1F000000,
- AddSubImmediateMask = 0xFF000000,
- #define ADD_SUB_IMMEDIATE(A) \
- A##_w_imm = AddSubImmediateFixed | A, \
+ AddSubImmediateMask = 0xFF000000,
+#define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
- #undef ADD_SUB_IMMEDIATE
+#undef ADD_SUB_IMMEDIATE
};
enum AddSubShiftedOp : uint32_t {
- AddSubShiftedFixed = 0x0B000000,
- AddSubShiftedFMask = 0x1F200000,
- AddSubShiftedMask = 0xFF200000,
- #define ADD_SUB_SHIFTED(A) \
- A##_w_shift = AddSubShiftedFixed | A, \
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+#define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
- #undef ADD_SUB_SHIFTED
+#undef ADD_SUB_SHIFTED
};
enum AddSubExtendedOp : uint32_t {
- AddSubExtendedFixed = 0x0B200000,
- AddSubExtendedFMask = 0x1F200000,
- AddSubExtendedMask = 0xFFE00000,
- #define ADD_SUB_EXTENDED(A) \
- A##_w_ext = AddSubExtendedFixed | A, \
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+#define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
- #undef ADD_SUB_EXTENDED
+#undef ADD_SUB_EXTENDED
};
// Add/sub with carry.
enum AddSubWithCarryOp : uint32_t {
AddSubWithCarryFixed = 0x1A000000,
AddSubWithCarryFMask = 0x1FE00000,
- AddSubWithCarryMask = 0xFFE0FC00,
- ADC_w = AddSubWithCarryFixed | ADD,
- ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
- ADC = ADC_w,
- ADCS_w = AddSubWithCarryFixed | ADDS,
- ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
- SBC_w = AddSubWithCarryFixed | SUB,
- SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
- SBC = SBC_w,
- SBCS_w = AddSubWithCarryFixed | SUBS,
- SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
};
-
// Logical (immediate and shifted register).
enum LogicalOp : uint32_t {
LogicalOpMask = 0x60200000,
- NOT = 0x00200000,
- AND = 0x00000000,
- BIC = AND | NOT,
- ORR = 0x20000000,
- ORN = ORR | NOT,
- EOR = 0x40000000,
- EON = EOR | NOT,
- ANDS = 0x60000000,
- BICS = ANDS | NOT
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
};
// Logical immediate.
enum LogicalImmediateOp : uint32_t {
LogicalImmediateFixed = 0x12000000,
LogicalImmediateFMask = 0x1F800000,
- LogicalImmediateMask = 0xFF800000,
- AND_w_imm = LogicalImmediateFixed | AND,
- AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
- ORR_w_imm = LogicalImmediateFixed | ORR,
- ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
- EOR_w_imm = LogicalImmediateFixed | EOR,
- EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
- ANDS_w_imm = LogicalImmediateFixed | ANDS,
- ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
};
// Logical shifted register.
enum LogicalShiftedOp : uint32_t {
LogicalShiftedFixed = 0x0A000000,
LogicalShiftedFMask = 0x1F000000,
- LogicalShiftedMask = 0xFF200000,
- AND_w = LogicalShiftedFixed | AND,
- AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
- AND_shift = AND_w,
- BIC_w = LogicalShiftedFixed | BIC,
- BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
- BIC_shift = BIC_w,
- ORR_w = LogicalShiftedFixed | ORR,
- ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
- ORR_shift = ORR_w,
- ORN_w = LogicalShiftedFixed | ORN,
- ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
- ORN_shift = ORN_w,
- EOR_w = LogicalShiftedFixed | EOR,
- EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
- EOR_shift = EOR_w,
- EON_w = LogicalShiftedFixed | EON,
- EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
- EON_shift = EON_w,
- ANDS_w = LogicalShiftedFixed | ANDS,
- ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
- ANDS_shift = ANDS_w,
- BICS_w = LogicalShiftedFixed | BICS,
- BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
- BICS_shift = BICS_w
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
};
// Move wide immediate.
enum MoveWideImmediateOp : uint32_t {
MoveWideImmediateFixed = 0x12800000,
MoveWideImmediateFMask = 0x1F800000,
- MoveWideImmediateMask = 0xFF800000,
- MOVN = 0x00000000,
- MOVZ = 0x40000000,
- MOVK = 0x60000000,
- MOVN_w = MoveWideImmediateFixed | MOVN,
- MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
- MOVZ_w = MoveWideImmediateFixed | MOVZ,
- MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
- MOVK_w = MoveWideImmediateFixed | MOVK,
- MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
};
// Bitfield.
@@ -653,16 +640,16 @@ const int kBitfieldNOffset = 22;
enum BitfieldOp : uint32_t {
BitfieldFixed = 0x13000000,
BitfieldFMask = 0x1F800000,
- BitfieldMask = 0xFF800000,
- SBFM_w = BitfieldFixed | 0x00000000,
- SBFM_x = BitfieldFixed | 0x80000000,
- SBFM = SBFM_w,
- BFM_w = BitfieldFixed | 0x20000000,
- BFM_x = BitfieldFixed | 0xA0000000,
- BFM = BFM_w,
- UBFM_w = BitfieldFixed | 0x40000000,
- UBFM_x = BitfieldFixed | 0xC0000000,
- UBFM = UBFM_w
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
// Bitfield N field.
};
@@ -670,59 +657,59 @@ enum BitfieldOp : uint32_t {
enum ExtractOp : uint32_t {
ExtractFixed = 0x13800000,
ExtractFMask = 0x1F800000,
- ExtractMask = 0xFFA00000,
- EXTR_w = ExtractFixed | 0x00000000,
- EXTR_x = ExtractFixed | 0x80000000,
- EXTR = EXTR_w
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
};
// Unconditional branch.
enum UnconditionalBranchOp : uint32_t {
UnconditionalBranchFixed = 0x14000000,
UnconditionalBranchFMask = 0x7C000000,
- UnconditionalBranchMask = 0xFC000000,
- B = UnconditionalBranchFixed | 0x00000000,
- BL = UnconditionalBranchFixed | 0x80000000
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
};
// Unconditional branch to register.
enum UnconditionalBranchToRegisterOp : uint32_t {
UnconditionalBranchToRegisterFixed = 0xD6000000,
UnconditionalBranchToRegisterFMask = 0xFE000000,
- UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
- BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
- BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
- RET = UnconditionalBranchToRegisterFixed | 0x005F0000
+ UnconditionalBranchToRegisterMask = 0xFFFFFC1F,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000
};
// Compare and branch.
enum CompareBranchOp : uint32_t {
CompareBranchFixed = 0x34000000,
CompareBranchFMask = 0x7E000000,
- CompareBranchMask = 0xFF000000,
- CBZ_w = CompareBranchFixed | 0x00000000,
- CBZ_x = CompareBranchFixed | 0x80000000,
- CBZ = CBZ_w,
- CBNZ_w = CompareBranchFixed | 0x01000000,
- CBNZ_x = CompareBranchFixed | 0x81000000,
- CBNZ = CBNZ_w
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
};
// Test and branch.
enum TestBranchOp : uint32_t {
TestBranchFixed = 0x36000000,
TestBranchFMask = 0x7E000000,
- TestBranchMask = 0x7F000000,
- TBZ = TestBranchFixed | 0x00000000,
- TBNZ = TestBranchFixed | 0x01000000
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
};
// Conditional branch.
enum ConditionalBranchOp : uint32_t {
ConditionalBranchFixed = 0x54000000,
ConditionalBranchFMask = 0xFE000000,
- ConditionalBranchMask = 0xFF000010,
- B_cond = ConditionalBranchFixed | 0x00000000
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
};
// System.
@@ -730,39 +717,36 @@ enum ConditionalBranchOp : uint32_t {
// and CR fields to encode parameters. To handle this cleanly, the system
// instructions are split into more than one enum.
-enum SystemOp : uint32_t {
- SystemFixed = 0xD5000000,
- SystemFMask = 0xFFC00000
-};
+enum SystemOp : uint32_t { SystemFixed = 0xD5000000, SystemFMask = 0xFFC00000 };
enum SystemSysRegOp : uint32_t {
SystemSysRegFixed = 0xD5100000,
SystemSysRegFMask = 0xFFD00000,
- SystemSysRegMask = 0xFFF00000,
- MRS = SystemSysRegFixed | 0x00200000,
- MSR = SystemSysRegFixed | 0x00000000
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
};
enum SystemHintOp : uint32_t {
SystemHintFixed = 0xD503201F,
SystemHintFMask = 0xFFFFF01F,
- SystemHintMask = 0xFFFFF01F,
- HINT = SystemHintFixed | 0x00000000
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
};
// Exception.
enum ExceptionOp : uint32_t {
ExceptionFixed = 0xD4000000,
ExceptionFMask = 0xFF000000,
- ExceptionMask = 0xFFE0001F,
- HLT = ExceptionFixed | 0x00400000,
- BRK = ExceptionFixed | 0x00200000,
- SVC = ExceptionFixed | 0x00000001,
- HVC = ExceptionFixed | 0x00000002,
- SMC = ExceptionFixed | 0x00000003,
- DCPS1 = ExceptionFixed | 0x00A00001,
- DCPS2 = ExceptionFixed | 0x00A00002,
- DCPS3 = ExceptionFixed | 0x00A00003
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
};
// Code used to spot hlt instructions that should not be hit.
const int kHltBadCode = 0xbad;
@@ -770,10 +754,10 @@ const int kHltBadCode = 0xbad;
enum MemBarrierOp : uint32_t {
MemBarrierFixed = 0xD503309F,
MemBarrierFMask = 0xFFFFF09F,
- MemBarrierMask = 0xFFFFF0FF,
- DSB = MemBarrierFixed | 0x00000000,
- DMB = MemBarrierFixed | 0x00000020,
- ISB = MemBarrierFixed | 0x00000040
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
};
// Any load or store (including pair).
@@ -799,53 +783,52 @@ enum LoadStorePairAnyOp : uint32_t {
enum LoadStorePairOp : uint32_t {
LoadStorePairMask = 0xC4400000,
LoadStorePairLBit = 1 << 22,
- #define LOAD_STORE_PAIR(A, B, C) \
- A##_##B = C
+#define LOAD_STORE_PAIR(A, B, C) A##_##B = C
LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
- #undef LOAD_STORE_PAIR
+#undef LOAD_STORE_PAIR
};
enum LoadStorePairPostIndexOp : uint32_t {
LoadStorePairPostIndexFixed = 0x28800000,
LoadStorePairPostIndexFMask = 0x3B800000,
- LoadStorePairPostIndexMask = 0xFFC00000,
- #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ LoadStorePairPostIndexMask = 0xFFC00000,
+#define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
- #undef LOAD_STORE_PAIR_POST_INDEX
+#undef LOAD_STORE_PAIR_POST_INDEX
};
enum LoadStorePairPreIndexOp : uint32_t {
LoadStorePairPreIndexFixed = 0x29800000,
LoadStorePairPreIndexFMask = 0x3B800000,
- LoadStorePairPreIndexMask = 0xFFC00000,
- #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ LoadStorePairPreIndexMask = 0xFFC00000,
+#define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
- #undef LOAD_STORE_PAIR_PRE_INDEX
+#undef LOAD_STORE_PAIR_PRE_INDEX
};
enum LoadStorePairOffsetOp : uint32_t {
LoadStorePairOffsetFixed = 0x29000000,
LoadStorePairOffsetFMask = 0x3B800000,
- LoadStorePairOffsetMask = 0xFFC00000,
- #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ LoadStorePairOffsetMask = 0xFFC00000,
+#define LOAD_STORE_PAIR_OFFSET(A, B, C) \
A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
- #undef LOAD_STORE_PAIR_OFFSET
+#undef LOAD_STORE_PAIR_OFFSET
};
// Load literal.
enum LoadLiteralOp : uint32_t {
LoadLiteralFixed = 0x18000000,
LoadLiteralFMask = 0x3B000000,
- LoadLiteralMask = 0xFF000000,
- LDR_w_lit = LoadLiteralFixed | 0x00000000,
- LDR_x_lit = LoadLiteralFixed | 0x40000000,
- LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
- PRFM_lit = LoadLiteralFixed | 0xC0000000,
- LDR_s_lit = LoadLiteralFixed | 0x04000000,
- LDR_d_lit = LoadLiteralFixed | 0x44000000
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000
};
// clang-format off
@@ -881,11 +864,11 @@ enum LoadLiteralOp : uint32_t {
enum LoadStoreUnscaledOffsetOp : uint32_t {
LoadStoreUnscaledOffsetFixed = 0x38000000,
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
- LoadStoreUnscaledOffsetMask = 0xFFE00C00,
- #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+#define LOAD_STORE_UNSCALED(A, B, C, D) \
A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
- #undef LOAD_STORE_UNSCALED
+#undef LOAD_STORE_UNSCALED
};
// Load/store (post, pre, offset and unsigned.)
@@ -901,46 +884,46 @@ enum LoadStoreOp : uint32_t {
enum LoadStorePostIndex : uint32_t {
LoadStorePostIndexFixed = 0x38000400,
LoadStorePostIndexFMask = 0x3B200C00,
- LoadStorePostIndexMask = 0xFFE00C00,
- #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ LoadStorePostIndexMask = 0xFFE00C00,
+#define LOAD_STORE_POST_INDEX(A, B, C, D) \
A##B##_##C##_post = LoadStorePostIndexFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
- #undef LOAD_STORE_POST_INDEX
+#undef LOAD_STORE_POST_INDEX
};
// Load/store pre index.
enum LoadStorePreIndex : uint32_t {
LoadStorePreIndexFixed = 0x38000C00,
LoadStorePreIndexFMask = 0x3B200C00,
- LoadStorePreIndexMask = 0xFFE00C00,
- #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ LoadStorePreIndexMask = 0xFFE00C00,
+#define LOAD_STORE_PRE_INDEX(A, B, C, D) \
A##B##_##C##_pre = LoadStorePreIndexFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
- #undef LOAD_STORE_PRE_INDEX
+#undef LOAD_STORE_PRE_INDEX
};
// Load/store unsigned offset.
enum LoadStoreUnsignedOffset : uint32_t {
LoadStoreUnsignedOffsetFixed = 0x39000000,
LoadStoreUnsignedOffsetFMask = 0x3B000000,
- LoadStoreUnsignedOffsetMask = 0xFFC00000,
- PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
- #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+#define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
- #undef LOAD_STORE_UNSIGNED_OFFSET
+#undef LOAD_STORE_UNSIGNED_OFFSET
};
// Load/store register offset.
enum LoadStoreRegisterOffset : uint32_t {
LoadStoreRegisterOffsetFixed = 0x38200800,
LoadStoreRegisterOffsetFMask = 0x3B200C00,
- LoadStoreRegisterOffsetMask = 0xFFE00C00,
- PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
- #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+#define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
- #undef LOAD_STORE_REGISTER_OFFSET
+#undef LOAD_STORE_REGISTER_OFFSET
};
// Load/store acquire/release.
@@ -950,34 +933,34 @@ enum LoadStoreAcquireReleaseOp : uint32_t {
LoadStoreAcquireReleaseMask = 0xCFC08000,
STLXR_b = LoadStoreAcquireReleaseFixed | 0x00008000,
LDAXR_b = LoadStoreAcquireReleaseFixed | 0x00408000,
- STLR_b = LoadStoreAcquireReleaseFixed | 0x00808000,
- LDAR_b = LoadStoreAcquireReleaseFixed | 0x00C08000,
+ STLR_b = LoadStoreAcquireReleaseFixed | 0x00808000,
+ LDAR_b = LoadStoreAcquireReleaseFixed | 0x00C08000,
STLXR_h = LoadStoreAcquireReleaseFixed | 0x40008000,
LDAXR_h = LoadStoreAcquireReleaseFixed | 0x40408000,
- STLR_h = LoadStoreAcquireReleaseFixed | 0x40808000,
- LDAR_h = LoadStoreAcquireReleaseFixed | 0x40C08000,
+ STLR_h = LoadStoreAcquireReleaseFixed | 0x40808000,
+ LDAR_h = LoadStoreAcquireReleaseFixed | 0x40C08000,
STLXR_w = LoadStoreAcquireReleaseFixed | 0x80008000,
LDAXR_w = LoadStoreAcquireReleaseFixed | 0x80408000,
- STLR_w = LoadStoreAcquireReleaseFixed | 0x80808000,
- LDAR_w = LoadStoreAcquireReleaseFixed | 0x80C08000,
+ STLR_w = LoadStoreAcquireReleaseFixed | 0x80808000,
+ LDAR_w = LoadStoreAcquireReleaseFixed | 0x80C08000,
STLXR_x = LoadStoreAcquireReleaseFixed | 0xC0008000,
LDAXR_x = LoadStoreAcquireReleaseFixed | 0xC0408000,
- STLR_x = LoadStoreAcquireReleaseFixed | 0xC0808000,
- LDAR_x = LoadStoreAcquireReleaseFixed | 0xC0C08000,
+ STLR_x = LoadStoreAcquireReleaseFixed | 0xC0808000,
+ LDAR_x = LoadStoreAcquireReleaseFixed | 0xC0C08000,
};
// Conditional compare.
enum ConditionalCompareOp : uint32_t {
ConditionalCompareMask = 0x60000000,
- CCMN = 0x20000000,
- CCMP = 0x60000000
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
};
// Conditional compare register.
enum ConditionalCompareRegisterOp : uint32_t {
ConditionalCompareRegisterFixed = 0x1A400000,
ConditionalCompareRegisterFMask = 0x1FE00800,
- ConditionalCompareRegisterMask = 0xFFE00C10,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
CCMN_w = ConditionalCompareRegisterFixed | CCMN,
CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
CCMP_w = ConditionalCompareRegisterFixed | CCMP,
@@ -988,7 +971,7 @@ enum ConditionalCompareRegisterOp : uint32_t {
enum ConditionalCompareImmediateOp : uint32_t {
ConditionalCompareImmediateFixed = 0x1A400800,
ConditionalCompareImmediateFMask = 0x1FE00800,
- ConditionalCompareImmediateMask = 0xFFE00C10,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
@@ -999,71 +982,71 @@ enum ConditionalCompareImmediateOp : uint32_t {
enum ConditionalSelectOp : uint32_t {
ConditionalSelectFixed = 0x1A800000,
ConditionalSelectFMask = 0x1FE00000,
- ConditionalSelectMask = 0xFFE00C00,
- CSEL_w = ConditionalSelectFixed | 0x00000000,
- CSEL_x = ConditionalSelectFixed | 0x80000000,
- CSEL = CSEL_w,
- CSINC_w = ConditionalSelectFixed | 0x00000400,
- CSINC_x = ConditionalSelectFixed | 0x80000400,
- CSINC = CSINC_w,
- CSINV_w = ConditionalSelectFixed | 0x40000000,
- CSINV_x = ConditionalSelectFixed | 0xC0000000,
- CSINV = CSINV_w,
- CSNEG_w = ConditionalSelectFixed | 0x40000400,
- CSNEG_x = ConditionalSelectFixed | 0xC0000400,
- CSNEG = CSNEG_w
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
};
// Data processing 1 source.
enum DataProcessing1SourceOp : uint32_t {
DataProcessing1SourceFixed = 0x5AC00000,
DataProcessing1SourceFMask = 0x5FE00000,
- DataProcessing1SourceMask = 0xFFFFFC00,
- RBIT = DataProcessing1SourceFixed | 0x00000000,
- RBIT_w = RBIT,
- RBIT_x = RBIT | SixtyFourBits,
- REV16 = DataProcessing1SourceFixed | 0x00000400,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
REV16_w = REV16,
REV16_x = REV16 | SixtyFourBits,
- REV = DataProcessing1SourceFixed | 0x00000800,
- REV_w = REV,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
REV32_x = REV | SixtyFourBits,
- REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
- CLZ = DataProcessing1SourceFixed | 0x00001000,
- CLZ_w = CLZ,
- CLZ_x = CLZ | SixtyFourBits,
- CLS = DataProcessing1SourceFixed | 0x00001400,
- CLS_w = CLS,
- CLS_x = CLS | SixtyFourBits
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits
};
// Data processing 2 source.
enum DataProcessing2SourceOp : uint32_t {
DataProcessing2SourceFixed = 0x1AC00000,
DataProcessing2SourceFMask = 0x5FE00000,
- DataProcessing2SourceMask = 0xFFE0FC00,
- UDIV_w = DataProcessing2SourceFixed | 0x00000800,
- UDIV_x = DataProcessing2SourceFixed | 0x80000800,
- UDIV = UDIV_w,
- SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
- SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
- SDIV = SDIV_w,
- LSLV_w = DataProcessing2SourceFixed | 0x00002000,
- LSLV_x = DataProcessing2SourceFixed | 0x80002000,
- LSLV = LSLV_w,
- LSRV_w = DataProcessing2SourceFixed | 0x00002400,
- LSRV_x = DataProcessing2SourceFixed | 0x80002400,
- LSRV = LSRV_w,
- ASRV_w = DataProcessing2SourceFixed | 0x00002800,
- ASRV_x = DataProcessing2SourceFixed | 0x80002800,
- ASRV = ASRV_w,
- RORV_w = DataProcessing2SourceFixed | 0x00002C00,
- RORV_x = DataProcessing2SourceFixed | 0x80002C00,
- RORV = RORV_w,
- CRC32B = DataProcessing2SourceFixed | 0x00004000,
- CRC32H = DataProcessing2SourceFixed | 0x00004400,
- CRC32W = DataProcessing2SourceFixed | 0x00004800,
- CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
CRC32CB = DataProcessing2SourceFixed | 0x00005000,
CRC32CH = DataProcessing2SourceFixed | 0x00005400,
CRC32CW = DataProcessing2SourceFixed | 0x00005800,
@@ -1074,68 +1057,68 @@ enum DataProcessing2SourceOp : uint32_t {
enum DataProcessing3SourceOp : uint32_t {
DataProcessing3SourceFixed = 0x1B000000,
DataProcessing3SourceFMask = 0x1F000000,
- DataProcessing3SourceMask = 0xFFE08000,
- MADD_w = DataProcessing3SourceFixed | 0x00000000,
- MADD_x = DataProcessing3SourceFixed | 0x80000000,
- MADD = MADD_w,
- MSUB_w = DataProcessing3SourceFixed | 0x00008000,
- MSUB_x = DataProcessing3SourceFixed | 0x80008000,
- MSUB = MSUB_w,
- SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
- SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
- SMULH_x = DataProcessing3SourceFixed | 0x80400000,
- UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
- UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
- UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
};
// Floating point compare.
enum FPCompareOp : uint32_t {
FPCompareFixed = 0x1E202000,
FPCompareFMask = 0x5F203C00,
- FPCompareMask = 0xFFE0FC1F,
- FCMP_s = FPCompareFixed | 0x00000000,
- FCMP_d = FPCompareFixed | FP64 | 0x00000000,
- FCMP = FCMP_s,
- FCMP_s_zero = FPCompareFixed | 0x00000008,
- FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
- FCMP_zero = FCMP_s_zero,
- FCMPE_s = FPCompareFixed | 0x00000010,
- FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
- FCMPE_s_zero = FPCompareFixed | 0x00000018,
- FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018
};
// Floating point conditional compare.
enum FPConditionalCompareOp : uint32_t {
FPConditionalCompareFixed = 0x1E200400,
FPConditionalCompareFMask = 0x5F200C00,
- FPConditionalCompareMask = 0xFFE00C10,
- FCCMP_s = FPConditionalCompareFixed | 0x00000000,
- FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
- FCCMP = FCCMP_s,
- FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
- FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
- FCCMPE = FCCMPE_s
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
};
// Floating point conditional select.
enum FPConditionalSelectOp : uint32_t {
FPConditionalSelectFixed = 0x1E200C00,
FPConditionalSelectFMask = 0x5F200C00,
- FPConditionalSelectMask = 0xFFE00C00,
- FCSEL_s = FPConditionalSelectFixed | 0x00000000,
- FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
- FCSEL = FCSEL_s
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
};
// Floating point immediate.
enum FPImmediateOp : uint32_t {
FPImmediateFixed = 0x1E201000,
FPImmediateFMask = 0x5F201C00,
- FPImmediateMask = 0xFFE01C00,
- FMOV_s_imm = FPImmediateFixed | 0x00000000,
- FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
};
// Floating point data processing 1 source.
@@ -1188,49 +1171,49 @@ enum FPDataProcessing1SourceOp : uint32_t {
enum FPDataProcessing2SourceOp : uint32_t {
FPDataProcessing2SourceFixed = 0x1E200800,
FPDataProcessing2SourceFMask = 0x5F200C00,
- FPDataProcessing2SourceMask = 0xFFE0FC00,
- FMUL = FPDataProcessing2SourceFixed | 0x00000000,
- FMUL_s = FMUL,
- FMUL_d = FMUL | FP64,
- FDIV = FPDataProcessing2SourceFixed | 0x00001000,
- FDIV_s = FDIV,
- FDIV_d = FDIV | FP64,
- FADD = FPDataProcessing2SourceFixed | 0x00002000,
- FADD_s = FADD,
- FADD_d = FADD | FP64,
- FSUB = FPDataProcessing2SourceFixed | 0x00003000,
- FSUB_s = FSUB,
- FSUB_d = FSUB | FP64,
- FMAX = FPDataProcessing2SourceFixed | 0x00004000,
- FMAX_s = FMAX,
- FMAX_d = FMAX | FP64,
- FMIN = FPDataProcessing2SourceFixed | 0x00005000,
- FMIN_s = FMIN,
- FMIN_d = FMIN | FP64,
- FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
FMAXNM_s = FMAXNM,
FMAXNM_d = FMAXNM | FP64,
- FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
FMINNM_s = FMINNM,
FMINNM_d = FMINNM | FP64,
- FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
- FNMUL_s = FNMUL,
- FNMUL_d = FNMUL | FP64
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
};
// Floating point data processing 3 source.
enum FPDataProcessing3SourceOp : uint32_t {
FPDataProcessing3SourceFixed = 0x1F000000,
FPDataProcessing3SourceFMask = 0x5F000000,
- FPDataProcessing3SourceMask = 0xFFE08000,
- FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
- FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
- FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
- FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
- FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
- FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
- FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
- FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
};
// Conversion between floating point and integer.
@@ -1310,27 +1293,27 @@ enum FPIntegerConvertOp : uint32_t {
enum FPFixedPointConvertOp : uint32_t {
FPFixedPointConvertFixed = 0x1E000000,
FPFixedPointConvertFMask = 0x5F200000,
- FPFixedPointConvertMask = 0xFFFF0000,
- FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
FCVTZS_ws_fixed = FCVTZS_fixed,
FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
- FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
FCVTZU_ws_fixed = FCVTZU_fixed,
FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
- SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
- SCVTF_sw_fixed = SCVTF_fixed,
- SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
- SCVTF_dw_fixed = SCVTF_fixed | FP64,
- SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
- UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
- UCVTF_sw_fixed = UCVTF_fixed,
- UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
- UCVTF_dw_fixed = UCVTF_fixed | FP64,
- UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
};
// NEON instructions with two register operands.
@@ -2099,4 +2082,4 @@ enum UnallocatedOp : uint32_t {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_CONSTANTS_ARM64_H_
+#endif // V8_CODEGEN_ARM64_CONSTANTS_ARM64_H_
diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc
new file mode 100644
index 0000000000..e0ab589914
--- /dev/null
+++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc
@@ -0,0 +1,116 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for arm independent of OS goes here.
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/codegen/arm64/utils-arm64.h"
+#include "src/codegen/cpu-features.h"
+
+namespace v8 {
+namespace internal {
+
+class CacheLineSizes {
+ public:
+ CacheLineSizes() {
+#if !defined(V8_HOST_ARCH_ARM64) || defined(V8_OS_WIN)
+ cache_type_register_ = 0;
+#else
+ // Copy the content of the cache type register to a core register.
+ __asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r"(cache_type_register_));
+#endif
+ }
+
+ uint32_t icache_line_size() const { return ExtractCacheLineSize(0); }
+ uint32_t dcache_line_size() const { return ExtractCacheLineSize(16); }
+
+ private:
+ uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
+ // The cache type register holds the size of cache lines in words as a
+ // power of two.
+ return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xF);
+ }
+
+ uint32_t cache_type_register_;
+};
+
+void CpuFeatures::FlushICache(void* address, size_t length) {
+#if defined(V8_HOST_ARCH_ARM64)
+#if defined(V8_OS_WIN)
+ ::FlushInstructionCache(GetCurrentProcess(), address, length);
+#else
+ // The code below assumes user space cache operations are allowed. The goal
+ // of this routine is to make sure the code generated is visible to the I
+ // side of the CPU.
+
+ uintptr_t start = reinterpret_cast<uintptr_t>(address);
+ // Sizes will be used to generate a mask big enough to cover a pointer.
+ CacheLineSizes sizes;
+ uintptr_t dsize = sizes.dcache_line_size();
+ uintptr_t isize = sizes.icache_line_size();
+ // Cache line sizes are always a power of 2.
+ DCHECK_EQ(CountSetBits(dsize, 64), 1);
+ DCHECK_EQ(CountSetBits(isize, 64), 1);
+ uintptr_t dstart = start & ~(dsize - 1);
+ uintptr_t istart = start & ~(isize - 1);
+ uintptr_t end = start + length;
+
+ __asm__ __volatile__( // NOLINT
+ // Clean every line of the D cache containing the
+ // target data.
+ "0: \n\t"
+ // dc : Data Cache maintenance
+ // c : Clean
+ // i : Invalidate
+ // va : by (Virtual) Address
+ // c : to the point of Coherency
+ // See ARM DDI 0406B page B2-12 for more information.
+ // We would prefer to use "cvau" (clean to the point of unification) here
+ // but we use "civac" to work around Cortex-A53 errata 819472, 826319,
+ // 827319 and 824069.
+ "dc civac, %[dline] \n\t"
+ "add %[dline], %[dline], %[dsize] \n\t"
+ "cmp %[dline], %[end] \n\t"
+ "b.lt 0b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the
+ // rest of the world. dsb : Data Synchronisation Barrier
+ // ish : Inner SHareable domain
+ // The point of unification for an Inner Shareable shareability domain is
+ // the point by which the instruction and data caches of all the
+ // processors in that Inner Shareable shareability domain are guaranteed
+ // to see the same copy of a memory location. See ARM DDI 0406B page
+ // B2-12 for more information.
+ "dsb ish \n\t"
+ // Invalidate every line of the I cache containing the target data.
+ "1: \n\t"
+ // ic : instruction cache maintenance
+ // i : invalidate
+ // va : by address
+ // u : to the point of unification
+ "ic ivau, %[iline] \n\t"
+ "add %[iline], %[iline], %[isize] \n\t"
+ "cmp %[iline], %[end] \n\t"
+ "b.lt 1b \n\t"
+ // Barrier to make sure the effect of the code above is visible to the
+ // rest of the world.
+ "dsb ish \n\t"
+ // Barrier to ensure any prefetching which happened before this code is
+ // discarded.
+ // isb : Instruction Synchronisation Barrier
+ "isb \n\t"
+ : [dline] "+r"(dstart), [iline] "+r"(istart)
+ : [dsize] "r"(dsize), [isize] "r"(isize), [end] "r"(end)
+ // This code does not write to memory but without the dependency gcc might
+ // move this code before the code is generated.
+ : "cc", "memory"); // NOLINT
+#endif // V8_OS_WIN
+#endif // V8_HOST_ARCH_ARM64
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/decoder-arm64-inl.h b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h
index c2181ddc40..25d69b3898 100644
--- a/deps/v8/src/arm64/decoder-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h
@@ -2,30 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DECODER_ARM64_INL_H_
-#define V8_ARM64_DECODER_ARM64_INL_H_
-
-#include "src/arm64/decoder-arm64.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#ifndef V8_CODEGEN_ARM64_DECODER_ARM64_INL_H_
+#define V8_CODEGEN_ARM64_DECODER_ARM64_INL_H_
+#include "src/codegen/arm64/decoder-arm64.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
-
// Top-level instruction decode function.
-template<typename V>
-void Decoder<V>::Decode(Instruction *instr) {
+template <typename V>
+void Decoder<V>::Decode(Instruction* instr) {
if (instr->Bits(28, 27) == 0) {
V::VisitUnallocated(instr);
} else {
switch (instr->Bits(27, 24)) {
// 0: PC relative addressing.
- case 0x0: DecodePCRelAddressing(instr); break;
+ case 0x0:
+ DecodePCRelAddressing(instr);
+ break;
// 1: Add/sub immediate.
- case 0x1: DecodeAddSubImmediate(instr); break;
+ case 0x1:
+ DecodeAddSubImmediate(instr);
+ break;
// A: Logical shifted register.
// Add/sub with carry.
@@ -38,15 +40,21 @@ void Decoder<V>::Decode(Instruction *instr) {
// Add/sub extended register.
// Data processing 3 source.
case 0xA:
- case 0xB: DecodeDataProcessing(instr); break;
+ case 0xB:
+ DecodeDataProcessing(instr);
+ break;
// 2: Logical immediate.
// Move wide immediate.
- case 0x2: DecodeLogical(instr); break;
+ case 0x2:
+ DecodeLogical(instr);
+ break;
// 3: Bitfield.
// Extract.
- case 0x3: DecodeBitfieldExtract(instr); break;
+ case 0x3:
+ DecodeBitfieldExtract(instr);
+ break;
// 4: Unconditional branch immediate.
// Exception generation.
@@ -59,7 +67,9 @@ void Decoder<V>::Decode(Instruction *instr) {
case 0x4:
case 0x5:
case 0x6:
- case 0x7: DecodeBranchSystemException(instr); break;
+ case 0x7:
+ DecodeBranchSystemException(instr);
+ break;
// 8,9: Load/store register pair post-index.
// Load register literal.
@@ -74,7 +84,9 @@ void Decoder<V>::Decode(Instruction *instr) {
case 0x8:
case 0x9:
case 0xC:
- case 0xD: DecodeLoadStore(instr); break;
+ case 0xD:
+ DecodeLoadStore(instr);
+ break;
// E: FP fixed point conversion.
// FP integer conversion.
@@ -88,13 +100,14 @@ void Decoder<V>::Decode(Instruction *instr) {
// F: FP data processing 3 source.
// Advanced SIMD.
case 0xE:
- case 0xF: DecodeFP(instr); break;
+ case 0xF:
+ DecodeFP(instr);
+ break;
}
}
}
-
-template<typename V>
+template <typename V>
void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
DCHECK_EQ(0x0, instr->Bits(27, 24));
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
@@ -103,8 +116,7 @@ void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
V::VisitPCRelAddressing(instr);
}
-
-template<typename V>
+template <typename V>
void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
DCHECK_EQ(0x4, instr->Bits(27, 24) & 0xC); // 0x4, 0x5, 0x6, 0x7
@@ -176,12 +188,9 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
}
}
} else {
- if ((instr->Bit(24) == 0x1) ||
- (instr->Bits(20, 16) != 0x1F) ||
- (instr->Bits(15, 10) != 0) ||
- (instr->Bits(4, 0) != 0) ||
- (instr->Bits(24, 21) == 0x3) ||
- (instr->Bits(24, 22) == 0x3)) {
+ if ((instr->Bit(24) == 0x1) || (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) || (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) || (instr->Bits(24, 22) == 0x3)) {
V::VisitUnallocated(instr);
} else {
V::VisitUnconditionalBranchToRegister(instr);
@@ -197,8 +206,7 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
}
}
-
-template<typename V>
+template <typename V>
void Decoder<V>::DecodeLoadStore(Instruction* instr) {
DCHECK_EQ(0x8, instr->Bits(27, 24) & 0xA); // 0x8, 0x9, 0xC, 0xD
@@ -325,8 +333,7 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
}
}
-
-template<typename V>
+template <typename V>
void Decoder<V>::DecodeLogical(Instruction* instr) {
DCHECK_EQ(0x2, instr->Bits(27, 24));
@@ -345,8 +352,7 @@ void Decoder<V>::DecodeLogical(Instruction* instr) {
}
}
-
-template<typename V>
+template <typename V>
void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
DCHECK_EQ(0x3, instr->Bits(27, 24));
@@ -371,8 +377,7 @@ void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
}
}
-
-template<typename V>
+template <typename V>
void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
DCHECK_EQ(0x1, instr->Bits(27, 24));
if (instr->Bit(23) == 1) {
@@ -382,11 +387,9 @@ void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
}
}
-
-template<typename V>
+template <typename V>
void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
- DCHECK((instr->Bits(27, 24) == 0xA) ||
- (instr->Bits(27, 24) == 0xB) );
+ DCHECK((instr->Bits(27, 24) == 0xA) || (instr->Bits(27, 24) == 0xB));
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
@@ -406,8 +409,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
break;
}
case 2: {
- if ((instr->Bit(29) == 0) ||
- (instr->Mask(0x00000410) != 0)) {
+ if ((instr->Bit(29) == 0) || (instr->Mask(0x00000410) != 0)) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(11) == 0) {
@@ -431,8 +433,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
V::VisitUnallocated(instr);
} else {
if (instr->Bit(30) == 0) {
- if ((instr->Bit(15) == 0x1) ||
- (instr->Bits(15, 11) == 0) ||
+ if ((instr->Bit(15) == 0x1) || (instr->Bits(15, 11) == 0) ||
(instr->Bits(15, 12) == 0x1) ||
(instr->Bits(15, 12) == 0x3) ||
(instr->Bits(15, 13) == 0x3) ||
@@ -444,8 +445,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
V::VisitDataProcessing2Source(instr);
}
} else {
- if ((instr->Bit(13) == 1) ||
- (instr->Bits(20, 16) != 0) ||
+ if ((instr->Bit(13) == 1) || (instr->Bits(20, 16) != 0) ||
(instr->Bits(15, 14) != 0) ||
(instr->Mask(0xA01FFC00) == 0x00000C00) ||
(instr->Mask(0x201FF800) == 0x00001800)) {
@@ -461,12 +461,14 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
case 1:
case 3:
case 5:
- case 7: V::VisitUnallocated(instr); break;
+ case 7:
+ V::VisitUnallocated(instr);
+ break;
}
}
} else {
if (instr->Bit(28) == 0) {
- if (instr->Bit(21) == 0) {
+ if (instr->Bit(21) == 0) {
if ((instr->Bits(23, 22) == 0x3) ||
(instr->Mask(0x80008000) == 0x00008000)) {
V::VisitUnallocated(instr);
@@ -483,8 +485,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
}
}
} else {
- if ((instr->Bit(30) == 0x1) ||
- (instr->Bits(30, 29) == 0x1) ||
+ if ((instr->Bit(30) == 0x1) || (instr->Bits(30, 29) == 0x1) ||
(instr->Mask(0xE0600000) == 0x00200000) ||
(instr->Mask(0xE0608000) == 0x00400000) ||
(instr->Mask(0x60608000) == 0x00408000) ||
@@ -499,11 +500,9 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
}
}
-
-template<typename V>
+template <typename V>
void Decoder<V>::DecodeFP(Instruction* instr) {
- DCHECK((instr->Bits(27, 24) == 0xE) ||
- (instr->Bits(27, 24) == 0xF) );
+ DCHECK((instr->Bits(27, 24) == 0xE) || (instr->Bits(27, 24) == 0xF));
if (instr->Bit(28) == 0) {
DecodeNEONVectorDataProcessing(instr);
@@ -516,8 +515,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
if (instr->Bit(29) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
- if ((instr->Bit(23) == 1) ||
- (instr->Bit(18) == 1) ||
+ if ((instr->Bit(23) == 1) || (instr->Bit(18) == 1) ||
(instr->Mask(0x80008000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x00000000) ||
(instr->Mask(0x000E0000) == 0x000A0000) ||
@@ -569,8 +567,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
V::VisitFPDataProcessing1Source(instr);
}
} else if (instr->Bits(13, 10) == 8) {
- if ((instr->Bits(15, 14) != 0) ||
- (instr->Bits(2, 0) != 0) ||
+ if ((instr->Bits(15, 14) != 0) || (instr->Bits(2, 0) != 0) ||
(instr->Mask(0x80800000) != 0x00000000)) {
V::VisitUnallocated(instr);
} else {
@@ -606,7 +603,8 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
V::VisitFPConditionalSelect(instr);
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
}
}
@@ -806,8 +804,7 @@ void Decoder<V>::DecodeNEONScalarDataProcessing(Instruction* instr) {
}
}
-
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_DECODER_ARM64_INL_H_
+#endif // V8_CODEGEN_ARM64_DECODER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/decoder-arm64.cc b/deps/v8/src/codegen/arm64/decoder-arm64.cc
index 56b3e0255e..af8bbf9655 100644
--- a/deps/v8/src/arm64/decoder-arm64.cc
+++ b/deps/v8/src/codegen/arm64/decoder-arm64.cc
@@ -4,27 +4,23 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/decoder-arm64.h"
-#include "src/globals.h"
-#include "src/utils.h"
-
+#include "src/codegen/arm64/decoder-arm64.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
-
void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_back(new_visitor);
}
-
void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor);
visitors_.push_front(new_visitor);
}
-
void DispatchingDecoderVisitor::InsertVisitorBefore(
DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
@@ -41,7 +37,6 @@ void DispatchingDecoderVisitor::InsertVisitorBefore(
visitors_.insert(it, new_visitor);
}
-
void DispatchingDecoderVisitor::InsertVisitorAfter(
DecoderVisitor* new_visitor, DecoderVisitor* registered_visitor) {
visitors_.remove(new_visitor);
@@ -59,12 +54,10 @@ void DispatchingDecoderVisitor::InsertVisitorAfter(
visitors_.push_back(new_visitor);
}
-
void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
visitors_.remove(visitor);
}
-
#define DEFINE_VISITOR_CALLERS(A) \
void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
if (!(instr->Mask(A##FMask) == A##Fixed)) { \
@@ -78,7 +71,6 @@ void DispatchingDecoderVisitor::RemoveVisitor(DecoderVisitor* visitor) {
VISITOR_LIST(DEFINE_VISITOR_CALLERS)
#undef DEFINE_VISITOR_CALLERS
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/decoder-arm64.h b/deps/v8/src/codegen/arm64/decoder-arm64.h
index 6597c1788f..7621c516ce 100644
--- a/deps/v8/src/arm64/decoder-arm64.h
+++ b/deps/v8/src/codegen/arm64/decoder-arm64.h
@@ -2,18 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DECODER_ARM64_H_
-#define V8_ARM64_DECODER_ARM64_H_
+#ifndef V8_CODEGEN_ARM64_DECODER_ARM64_H_
+#define V8_CODEGEN_ARM64_DECODER_ARM64_H_
#include <list>
-#include "src/arm64/instructions-arm64.h"
-#include "src/globals.h"
+#include "src/codegen/arm64/instructions-arm64.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
-
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \
@@ -90,13 +89,13 @@ class V8_EXPORT_PRIVATE DecoderVisitor {
public:
virtual ~DecoderVisitor() {}
- #define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
+#define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0;
VISITOR_LIST(DECLARE)
- #undef DECLARE
+#undef DECLARE
};
// A visitor that dispatches to a list of visitors.
-class DispatchingDecoderVisitor : public DecoderVisitor {
+class V8_EXPORT_PRIVATE DispatchingDecoderVisitor : public DecoderVisitor {
public:
DispatchingDecoderVisitor() {}
virtual ~DispatchingDecoderVisitor() {}
@@ -120,7 +119,7 @@ class DispatchingDecoderVisitor : public DecoderVisitor {
//
// will call in order visitor methods in V3, V2, V1, V4.
void AppendVisitor(DecoderVisitor* visitor);
- V8_EXPORT_PRIVATE void PrependVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
void InsertVisitorBefore(DecoderVisitor* new_visitor,
DecoderVisitor* registered_visitor);
void InsertVisitorAfter(DecoderVisitor* new_visitor,
@@ -132,16 +131,16 @@ class DispatchingDecoderVisitor : public DecoderVisitor {
void VisitNEONShiftImmediate(const Instruction* instr);
- #define DECLARE(A) void Visit##A(Instruction* instr);
+#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
- #undef DECLARE
+#undef DECLARE
private:
// Visitors are registered in a list.
std::list<DecoderVisitor*> visitors_;
};
-template<typename V>
+template <typename V>
class Decoder : public V {
public:
Decoder() {}
@@ -149,7 +148,7 @@ class Decoder : public V {
// Top-level instruction decoder function. Decodes an instruction and calls
// the visitor functions registered with the Decoder class.
- virtual void Decode(Instruction *instr);
+ virtual void Decode(Instruction* instr);
private:
// Decode the PC relative addressing instruction, and call the corresponding
@@ -208,8 +207,7 @@ class Decoder : public V {
void DecodeNEONScalarDataProcessing(Instruction* instr);
};
-
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_DECODER_ARM64_H_
+#endif // V8_CODEGEN_ARM64_DECODER_ARM64_H_
diff --git a/deps/v8/src/arm64/instructions-arm64-constants.cc b/deps/v8/src/codegen/arm64/instructions-arm64-constants.cc
index 5c0d42a8c6..5c0d42a8c6 100644
--- a/deps/v8/src/arm64/instructions-arm64-constants.cc
+++ b/deps/v8/src/codegen/arm64/instructions-arm64-constants.cc
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/codegen/arm64/instructions-arm64.cc
index 11b59a9e9b..dfc2ef1323 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.cc
@@ -4,8 +4,8 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/instructions-arm64.h"
+#include "src/codegen/arm64/instructions-arm64.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
namespace v8 {
namespace internal {
@@ -35,12 +35,12 @@ bool Instruction::IsLoad() const {
case LDR_d:
case LDR_q:
return true;
- default: return false;
+ default:
+ return false;
}
}
}
-
bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
@@ -61,14 +61,13 @@ bool Instruction::IsStore() const {
case STR_d:
case STR_q:
return true;
- default: return false;
+ default:
+ return false;
}
}
}
-
-static uint64_t RotateRight(uint64_t value,
- unsigned int rotate,
+static uint64_t RotateRight(uint64_t value, unsigned int rotate,
unsigned int width) {
DCHECK_LE(width, 64);
rotate &= 63;
@@ -76,9 +75,7 @@ static uint64_t RotateRight(uint64_t value,
(value >> rotate);
}
-
-static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
- uint64_t value,
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size, uint64_t value,
unsigned width) {
DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
(width == 32));
@@ -90,7 +87,6 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
return result;
}
-
// Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are not
// met.
@@ -134,9 +130,8 @@ uint64_t Instruction::ImmLogical() {
return 0;
}
uint64_t bits = (1ULL << ((imm_s & mask) + 1)) - 1;
- return RepeatBitsAcrossReg(reg_size,
- RotateRight(bits, imm_r & mask, width),
- width);
+ return RepeatBitsAcrossReg(
+ reg_size, RotateRight(bits, imm_r & mask, width), width);
}
}
}
@@ -188,7 +183,6 @@ unsigned CalcLSPairDataSize(LoadStorePairOp op) {
}
}
-
int64_t Instruction::ImmPCOffset() {
int64_t offset;
if (IsPCRelAddressing()) {
@@ -211,18 +205,15 @@ int64_t Instruction::ImmPCOffset() {
return offset;
}
-
Instruction* Instruction::ImmPCOffsetTarget() {
return InstructionAtOffset(ImmPCOffset());
}
-
bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
ptrdiff_t offset) {
return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
}
-
bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
return IsValidImmPCOffset(BranchType(), DistanceTo(target));
}
@@ -258,7 +249,6 @@ void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
}
}
-
void Instruction::SetBranchImmTarget(Instruction* target) {
DCHECK(IsAligned(DistanceTo(target), kInstrSize));
DCHECK(
@@ -287,7 +277,8 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
imm_mask = ImmTestBranch_mask;
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
SetInstructionBits(Mask(~imm_mask) | branch_imm);
}
@@ -307,7 +298,6 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(
patcher.brk(low16);
}
-
void Instruction::SetImmLLiteral(Instruction* source) {
DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstrSize));
@@ -319,7 +309,6 @@ void Instruction::SetImmLLiteral(Instruction* source) {
SetInstructionBits(Mask(~mask) | imm);
}
-
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-arm64-inl.h to work around this.
@@ -331,7 +320,6 @@ bool InstructionSequence::IsInlineData() const {
// to update this method too.
}
-
// TODO(jbramley): We can't put this inline in the class because things like
// xzr and Register are not defined in that header. Consider adding
// instructions-arm64-inl.h to work around this.
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/codegen/arm64/instructions-arm64.h
index 8514469227..5c3cf687e7 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/codegen/arm64/instructions-arm64.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
-#define V8_ARM64_INSTRUCTIONS_ARM64_H_
+#ifndef V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
+#define V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
-#include "src/arm64/constants-arm64.h"
-#include "src/arm64/register-arm64.h"
-#include "src/arm64/utils-arm64.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/codegen/arm64/constants-arm64.h"
+#include "src/codegen/arm64/register-arm64.h"
+#include "src/codegen/arm64/utils-arm64.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -18,7 +18,7 @@ struct AssemblerOptions;
// ISA constants. --------------------------------------------------------------
-typedef uint32_t Instr;
+using Instr = uint32_t;
#if defined(V8_OS_WIN)
extern "C" {
@@ -54,17 +54,13 @@ unsigned CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
- CondBranchType = 1,
- UncondBranchType = 2,
+ CondBranchType = 1,
+ UncondBranchType = 2,
CompareBranchType = 3,
- TestBranchType = 4
+ TestBranchType = 4
};
-enum AddrMode {
- Offset,
- PreIndex,
- PostIndex
-};
+enum AddrMode { Offset, PreIndex, PostIndex };
enum FPRounding {
// The first four values are encodable directly by FPCR<RMode>.
@@ -79,10 +75,7 @@ enum FPRounding {
FPRoundOdd
};
-enum Reg31Mode {
- Reg31IsStackPointer,
- Reg31IsZeroRegister
-};
+enum Reg31Mode { Reg31IsStackPointer, Reg31IsZeroRegister };
// Instructions. ---------------------------------------------------------------
@@ -96,9 +89,7 @@ class Instruction {
*reinterpret_cast<Instr*>(this) = new_instr;
}
- int Bit(int pos) const {
- return (InstructionBits() >> pos) & 1;
- }
+ int Bit(int pos) const { return (InstructionBits() >> pos) & 1; }
uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, InstructionBits());
@@ -109,9 +100,7 @@ class Instruction {
return signed_bitextract_32(msb, lsb, bits);
}
- Instr Mask(uint32_t mask) const {
- return InstructionBits() & mask;
- }
+ Instr Mask(uint32_t mask) const { return InstructionBits() & mask; }
V8_INLINE const Instruction* following(int count = 1) const {
return InstructionAtOffset(count * static_cast<int>(kInstrSize));
@@ -125,14 +114,12 @@ class Instruction {
return following(-count);
}
- V8_INLINE Instruction* preceding(int count = 1) {
- return following(-count);
- }
+ V8_INLINE Instruction* preceding(int count = 1) { return following(-count); }
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
int32_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
- #undef DEFINE_GETTER
+#undef DEFINE_GETTER
// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
// formed from ImmPCRelLo and ImmPCRelHi.
@@ -180,13 +167,9 @@ class Instruction {
return Mask(CompareBranchFMask) == CompareBranchFixed;
}
- bool IsTestBranch() const {
- return Mask(TestBranchFMask) == TestBranchFixed;
- }
+ bool IsTestBranch() const { return Mask(TestBranchFMask) == TestBranchFixed; }
- bool IsImmBranch() const {
- return BranchType() != UnknownBranchType;
- }
+ bool IsImmBranch() const { return BranchType() != UnknownBranchType; }
static float Imm8ToFP32(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
@@ -219,17 +202,13 @@ class Instruction {
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
- bool IsLdrLiteralX() const {
- return Mask(LoadLiteralMask) == LDR_x_lit;
- }
+ bool IsLdrLiteralX() const { return Mask(LoadLiteralMask) == LDR_x_lit; }
bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
}
- bool IsAdr() const {
- return Mask(PCRelAddressingMask) == ADR;
- }
+ bool IsAdr() const { return Mask(PCRelAddressingMask) == ADR; }
bool IsBrk() const { return Mask(ExceptionMask) == BRK; }
@@ -345,11 +324,16 @@ class Instruction {
int ImmBranch() const {
switch (BranchType()) {
- case CondBranchType: return ImmCondBranch();
- case UncondBranchType: return ImmUncondBranch();
- case CompareBranchType: return ImmCmpBranch();
- case TestBranchType: return ImmTestBranch();
- default: UNREACHABLE();
+ case CondBranchType:
+ return ImmCondBranch();
+ case UncondBranchType:
+ return ImmUncondBranch();
+ case CompareBranchType:
+ return ImmCmpBranch();
+ case TestBranchType:
+ return ImmTestBranch();
+ default:
+ UNREACHABLE();
}
return 0;
}
@@ -393,9 +377,7 @@ class Instruction {
// mov r<n>, r<n>
// which is encoded as
// orr r<n>, xzr, r<n>
- return (Mask(LogicalShiftedMask) == ORR_x) &&
- (Rd() == Rm()) &&
- (Rd() == n);
+ return (Mask(LogicalShiftedMask) == ORR_x) && (Rd() == Rm()) && (Rd() == n);
}
// Find the PC offset encoded in this instruction. 'this' may be a branch or
@@ -439,7 +421,8 @@ class Instruction {
return this + offset;
}
- template<typename T> V8_INLINE static Instruction* Cast(T src) {
+ template <typename T>
+ V8_INLINE static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src);
}
@@ -447,7 +430,6 @@ class Instruction {
return reinterpret_cast<Address>(target) - reinterpret_cast<Address>(this);
}
-
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target);
@@ -468,7 +450,6 @@ class InstructionSequence : public Instruction {
uint64_t InlineData() const;
};
-
// Simulator/Debugger debug instructions ---------------------------------------
// Each debug marker is represented by a HLT instruction. The immediate comment
// field in the instruction is used to identify the type of debug marker. Each
@@ -751,5 +732,4 @@ class NEONFormatDecoder {
} // namespace internal
} // namespace v8
-
-#endif // V8_ARM64_INSTRUCTIONS_ARM64_H_
+#endif // V8_CODEGEN_ARM64_INSTRUCTIONS_ARM64_H_
diff --git a/deps/v8/src/arm64/instrument-arm64.cc b/deps/v8/src/codegen/arm64/instrument-arm64.cc
index 10d8ee4bc7..a399cd3ad1 100644
--- a/deps/v8/src/arm64/instrument-arm64.cc
+++ b/deps/v8/src/codegen/arm64/instrument-arm64.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/instrument-arm64.h"
+#include "src/codegen/arm64/instrument-arm64.h"
namespace v8 {
namespace internal {
@@ -13,21 +13,11 @@ Counter::Counter(const char* name, CounterType type)
strncpy(name_, name, kCounterNameMaxLength);
}
+void Counter::Enable() { enabled_ = true; }
-void Counter::Enable() {
- enabled_ = true;
-}
-
-
-void Counter::Disable() {
- enabled_ = false;
-}
-
-
-bool Counter::IsEnabled() {
- return enabled_;
-}
+void Counter::Disable() { enabled_ = false; }
+bool Counter::IsEnabled() { return enabled_; }
void Counter::Increment() {
if (enabled_) {
@@ -35,7 +25,6 @@ void Counter::Increment() {
}
}
-
uint64_t Counter::count() {
uint64_t result = count_;
if (type_ == Gauge) {
@@ -45,21 +34,14 @@ uint64_t Counter::count() {
return result;
}
+const char* Counter::name() { return name_; }
-const char* Counter::name() {
- return name_;
-}
+CounterType Counter::type() { return type_; }
-
-CounterType Counter::type() {
- return type_;
-}
-
-
-typedef struct {
+struct CounterDescriptor {
const char* name;
CounterType type;
-} CounterDescriptor;
+};
static const CounterDescriptor kCounterList[] = {
{"Instruction", Cumulative},
@@ -121,7 +103,6 @@ Instrument::Instrument(const char* datafile, uint64_t sample_period)
DumpCounterNames();
}
-
Instrument::~Instrument() {
// Dump any remaining instruction data to the output file.
DumpCounters();
@@ -137,7 +118,6 @@ Instrument::~Instrument() {
}
}
-
void Instrument::Update() {
// Increment the instruction counter, and dump all counters if a sample period
// has elapsed.
@@ -150,7 +130,6 @@ void Instrument::Update() {
}
}
-
void Instrument::DumpCounters() {
// Iterate through the counter objects, dumping their values to the output
// stream.
@@ -162,7 +141,6 @@ void Instrument::DumpCounters() {
fflush(output_stream_);
}
-
void Instrument::DumpCounterNames() {
// Iterate through the counter objects, dumping the counter names to the
// output stream.
@@ -174,16 +152,19 @@ void Instrument::DumpCounterNames() {
fflush(output_stream_);
}
-
void Instrument::HandleInstrumentationEvent(unsigned event) {
switch (event) {
- case InstrumentStateEnable: Enable(); break;
- case InstrumentStateDisable: Disable(); break;
- default: DumpEventMarker(event);
+ case InstrumentStateEnable:
+ Enable();
+ break;
+ case InstrumentStateDisable:
+ Disable();
+ break;
+ default:
+ DumpEventMarker(event);
}
}
-
void Instrument::DumpEventMarker(unsigned marker) {
// Dumpan event marker to the output stream as a specially formatted comment
// line.
@@ -193,7 +174,6 @@ void Instrument::DumpEventMarker(unsigned marker) {
(marker >> 8) & 0xFF, counter->count());
}
-
Counter* Instrument::GetCounter(const char* name) {
// Get a Counter object by name from the counter list.
std::list<Counter*>::const_iterator it;
@@ -206,13 +186,12 @@ Counter* Instrument::GetCounter(const char* name) {
// A Counter by that name does not exist: print an error message to stderr
// and the output file, and exit.
static const char* error_message =
- "# Error: Unknown counter \"%s\". Exiting.\n";
+ "# Error: Unknown counter \"%s\". Exiting.\n";
fprintf(stderr, error_message, name);
fprintf(output_stream_, error_message, name);
exit(1);
}
-
void Instrument::Enable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
@@ -220,7 +199,6 @@ void Instrument::Enable() {
}
}
-
void Instrument::Disable() {
std::list<Counter*>::iterator it;
for (it = counters_.begin(); it != counters_.end(); it++) {
@@ -228,28 +206,24 @@ void Instrument::Disable() {
}
}
-
void Instrument::VisitPCRelAddressing(Instruction* instr) {
Update();
static Counter* counter = GetCounter("PC Addressing");
counter->Increment();
}
-
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
-
void Instrument::VisitLogicalImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Logical DP");
counter->Increment();
}
-
void Instrument::VisitMoveWideImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Move Immediate");
@@ -262,70 +236,60 @@ void Instrument::VisitMoveWideImmediate(Instruction* instr) {
}
}
-
void Instrument::VisitBitfield(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
-
void Instrument::VisitExtract(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
-
void Instrument::VisitUnconditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
-
void Instrument::VisitUnconditionalBranchToRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Unconditional Branch");
counter->Increment();
}
-
void Instrument::VisitCompareBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Compare and Branch");
counter->Increment();
}
-
void Instrument::VisitTestBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Test and Branch");
counter->Increment();
}
-
void Instrument::VisitConditionalBranch(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Branch");
counter->Increment();
}
-
void Instrument::VisitSystem(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
-
void Instrument::VisitException(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
-
void Instrument::InstrumentLoadStorePair(Instruction* instr) {
static Counter* load_pair_counter = GetCounter("Load Pair");
static Counter* store_pair_counter = GetCounter("Store Pair");
@@ -336,32 +300,27 @@ void Instrument::InstrumentLoadStorePair(Instruction* instr) {
}
}
-
void Instrument::VisitLoadStorePairPostIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
-
void Instrument::VisitLoadStorePairOffset(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
-
void Instrument::VisitLoadStorePairPreIndex(Instruction* instr) {
Update();
InstrumentLoadStorePair(instr);
}
-
void Instrument::VisitLoadLiteral(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Load Literal");
counter->Increment();
}
-
void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* load_int_counter = GetCounter("Load Integer");
static Counter* store_int_counter = GetCounter("Store Integer");
@@ -369,52 +328,56 @@ void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreMask)) {
- case STRB_w: // Fall through.
- case STRH_w: // Fall through.
- case STR_w: // Fall through.
- case STR_x: store_int_counter->Increment(); break;
- case STR_s: // Fall through.
- case STR_d: store_fp_counter->Increment(); break;
- case LDRB_w: // Fall through.
- case LDRH_w: // Fall through.
- case LDR_w: // Fall through.
- case LDR_x: // Fall through.
- case LDRSB_x: // Fall through.
- case LDRSH_x: // Fall through.
- case LDRSW_x: // Fall through.
- case LDRSB_w: // Fall through.
- case LDRSH_w: load_int_counter->Increment(); break;
- case LDR_s: // Fall through.
- case LDR_d: load_fp_counter->Increment(); break;
- default: UNREACHABLE();
+ case STRB_w: // Fall through.
+ case STRH_w: // Fall through.
+ case STR_w: // Fall through.
+ case STR_x:
+ store_int_counter->Increment();
+ break;
+ case STR_s: // Fall through.
+ case STR_d:
+ store_fp_counter->Increment();
+ break;
+ case LDRB_w: // Fall through.
+ case LDRH_w: // Fall through.
+ case LDR_w: // Fall through.
+ case LDR_x: // Fall through.
+ case LDRSB_x: // Fall through.
+ case LDRSH_x: // Fall through.
+ case LDRSW_x: // Fall through.
+ case LDRSB_w: // Fall through.
+ case LDRSH_w:
+ load_int_counter->Increment();
+ break;
+ case LDR_s: // Fall through.
+ case LDR_d:
+ load_fp_counter->Increment();
+ break;
+ default:
+ UNREACHABLE();
}
}
-
void Instrument::VisitLoadStoreUnscaledOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
-
void Instrument::VisitLoadStorePostIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
-
void Instrument::VisitLoadStorePreIndex(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
-
void Instrument::VisitLoadStoreRegisterOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
}
-
void Instrument::VisitLoadStoreUnsignedOffset(Instruction* instr) {
Update();
InstrumentLoadStore(instr);
@@ -433,7 +396,9 @@ void Instrument::VisitLoadStoreAcquireRelease(Instruction* instr) {
case LDAXR_b: // Fall-through.
case LDAXR_h: // Fall-through.
case LDAXR_w: // Fall-through.
- case LDAXR_x: load_counter->Increment(); break;
+ case LDAXR_x:
+ load_counter->Increment();
+ break;
case STLR_b: // Fall-through.
case STLR_h: // Fall-through.
case STLR_w: // Fall-through.
@@ -441,8 +406,11 @@ void Instrument::VisitLoadStoreAcquireRelease(Instruction* instr) {
case STLXR_b: // Fall-through.
case STLXR_h: // Fall-through.
case STLXR_w: // Fall-through.
- case STLXR_x: store_counter->Increment(); break;
- default: UNREACHABLE();
+ case STLXR_x:
+ store_counter->Increment();
+ break;
+ default:
+ UNREACHABLE();
}
}
@@ -452,126 +420,108 @@ void Instrument::VisitLogicalShifted(Instruction* instr) {
counter->Increment();
}
-
void Instrument::VisitAddSubShifted(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
-
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
-
void Instrument::VisitAddSubWithCarry(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
-
void Instrument::VisitConditionalCompareRegister(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
-
void Instrument::VisitConditionalCompareImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
-
void Instrument::VisitConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
-
void Instrument::VisitDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
-
void Instrument::VisitDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
-
void Instrument::VisitDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other Int DP");
counter->Increment();
}
-
void Instrument::VisitFPCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
-
void Instrument::VisitFPConditionalCompare(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Compare");
counter->Increment();
}
-
void Instrument::VisitFPConditionalSelect(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Conditional Select");
counter->Increment();
}
-
void Instrument::VisitFPImmediate(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
-
void Instrument::VisitFPDataProcessing1Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
-
void Instrument::VisitFPDataProcessing2Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
-
void Instrument::VisitFPDataProcessing3Source(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
-
void Instrument::VisitFPIntegerConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
counter->Increment();
}
-
void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
Update();
static Counter* counter = GetCounter("FP DP");
@@ -738,13 +688,11 @@ void Instrument::VisitUnallocated(Instruction* instr) {
counter->Increment();
}
-
void Instrument::VisitUnimplemented(Instruction* instr) {
Update();
static Counter* counter = GetCounter("Other");
counter->Increment();
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/instrument-arm64.h b/deps/v8/src/codegen/arm64/instrument-arm64.h
index 8b3d7e6023..690cb7a49f 100644
--- a/deps/v8/src/arm64/instrument-arm64.h
+++ b/deps/v8/src/codegen/arm64/instrument-arm64.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_INSTRUMENT_ARM64_H_
-#define V8_ARM64_INSTRUMENT_ARM64_H_
+#ifndef V8_CODEGEN_ARM64_INSTRUMENT_ARM64_H_
+#define V8_CODEGEN_ARM64_INSTRUMENT_ARM64_H_
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
-#include "src/arm64/constants-arm64.h"
-#include "src/arm64/decoder-arm64.h"
+#include "src/codegen/arm64/constants-arm64.h"
+#include "src/codegen/arm64/decoder-arm64.h"
namespace v8 {
namespace internal {
@@ -17,19 +17,13 @@ namespace internal {
const int kCounterNameMaxLength = 256;
const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
-
-enum InstrumentState {
- InstrumentStateDisable = 0,
- InstrumentStateEnable = 1
-};
-
+enum InstrumentState { InstrumentStateDisable = 0, InstrumentStateEnable = 1 };
enum CounterType {
Gauge = 0, // Gauge counters reset themselves after reading.
Cumulative = 1 // Cumulative counters keep their value after reading.
};
-
class Counter {
public:
explicit Counter(const char* name, CounterType type = Gauge);
@@ -49,18 +43,17 @@ class Counter {
CounterType type_;
};
-
-class Instrument: public DecoderVisitor {
+class Instrument : public DecoderVisitor {
public:
explicit Instrument(
const char* datafile = nullptr,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
- // Declare all Visitor functions.
- #define DECLARE(A) void Visit##A(Instruction* instr);
+// Declare all Visitor functions.
+#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
- #undef DECLARE
+#undef DECLARE
private:
void Update();
@@ -77,11 +70,11 @@ class Instrument: public DecoderVisitor {
std::list<Counter*> counters_;
- FILE *output_stream_;
+ FILE* output_stream_;
uint64_t sample_period_;
};
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_INSTRUMENT_ARM64_H_
+#endif // V8_CODEGEN_ARM64_INSTRUMENT_ARM64_H_
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
index b16fa8ae67..2d86ace4bc 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc
@@ -4,9 +4,9 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
@@ -74,7 +74,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
diff --git a/deps/v8/src/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index be6cd4c933..62bd9c26bf 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64-inl.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -2,28 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
-#define V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+#ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+#define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
#include <ctype.h>
-#include "src/globals.h"
+#include "src/common/globals.h"
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/assembler-arm64.h"
-#include "src/arm64/instrument-arm64.h"
#include "src/base/bits.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/assembler-arm64.h"
+#include "src/codegen/arm64/instrument-arm64.h"
+#include "src/codegen/macro-assembler.h"
namespace v8 {
namespace internal {
-
MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-
void TurboAssembler::And(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
@@ -50,9 +48,7 @@ void TurboAssembler::Bic(const Register& rd, const Register& rn,
LogicalMacro(rd, rn, operand, BIC);
}
-
-void MacroAssembler::Bics(const Register& rd,
- const Register& rn,
+void MacroAssembler::Bics(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -97,11 +93,8 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand,
}
}
-
-void MacroAssembler::Ccmn(const Register& rn,
- const Operand& operand,
- StatusFlags nzcv,
- Condition cond) {
+void MacroAssembler::Ccmn(const Register& rn, const Operand& operand,
+ StatusFlags nzcv, Condition cond) {
DCHECK(allow_macro_instructions());
if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP);
@@ -186,45 +179,35 @@ void TurboAssembler::Adc(const Register& rd, const Register& rn,
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
}
-
-void MacroAssembler::Adcs(const Register& rd,
- const Register& rn,
+void MacroAssembler::Adcs(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
}
-
-void MacroAssembler::Sbc(const Register& rd,
- const Register& rn,
+void MacroAssembler::Sbc(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
}
-
-void MacroAssembler::Sbcs(const Register& rd,
- const Register& rn,
+void MacroAssembler::Sbcs(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
}
-
-void MacroAssembler::Ngc(const Register& rd,
- const Operand& operand) {
+void MacroAssembler::Ngc(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
Register zr = AppropriateZeroRegFor(rd);
Sbc(rd, zr, operand);
}
-
-void MacroAssembler::Ngcs(const Register& rd,
- const Operand& operand) {
+void MacroAssembler::Ngcs(const Register& rd, const Operand& operand) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
Register zr = AppropriateZeroRegFor(rd);
@@ -303,10 +286,7 @@ void TurboAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb,
bfi(rd, rn, lsb, width);
}
-
-void MacroAssembler::Bfxil(const Register& rd,
- const Register& rn,
- unsigned lsb,
+void MacroAssembler::Bfxil(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -340,9 +320,7 @@ void TurboAssembler::Brk(int code) {
brk(code);
}
-
-void MacroAssembler::Cinc(const Register& rd,
- const Register& rn,
+void MacroAssembler::Cinc(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -350,9 +328,7 @@ void MacroAssembler::Cinc(const Register& rd,
cinc(rd, rn, cond);
}
-
-void MacroAssembler::Cinv(const Register& rd,
- const Register& rn,
+void MacroAssembler::Cinv(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -380,18 +356,15 @@ void TurboAssembler::Cneg(const Register& rd, const Register& rn,
cneg(rd, rn, cond);
}
-
// Conditionally zero the destination register. Only X registers are supported
// due to the truncation side-effect when used on W registers.
-void MacroAssembler::CzeroX(const Register& rd,
- Condition cond) {
+void MacroAssembler::CzeroX(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP() && rd.Is64Bits());
DCHECK((cond != al) && (cond != nv));
csel(rd, xzr, rd, cond);
}
-
// Conditionally move a value into the destination register. Only X registers
// are supported due to the truncation side-effect when used on W registers.
void TurboAssembler::CmovX(const Register& rd, const Register& rn,
@@ -432,22 +405,16 @@ void TurboAssembler::Csinc(const Register& rd, const Register& rn,
csinc(rd, rn, rm, cond);
}
-
-void MacroAssembler::Csinv(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
+void MacroAssembler::Csinv(const Register& rd, const Register& rn,
+ const Register& rm, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
csinv(rd, rn, rm, cond);
}
-
-void MacroAssembler::Csneg(const Register& rd,
- const Register& rn,
- const Register& rm,
- Condition cond) {
+void MacroAssembler::Csneg(const Register& rd, const Register& rn,
+ const Register& rm, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@@ -469,11 +436,8 @@ void TurboAssembler::Debug(const char* message, uint32_t code, Instr params) {
debug(message, code, params);
}
-
-void MacroAssembler::Extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb) {
+void MacroAssembler::Extr(const Register& rd, const Register& rn,
+ const Register& rm, unsigned lsb) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
extr(rd, rn, rm, lsb);
@@ -716,13 +680,11 @@ void TurboAssembler::Fsub(const VRegister& fd, const VRegister& fn,
fsub(fd, fn, fm);
}
-
void MacroAssembler::Hint(SystemHint code) {
DCHECK(allow_macro_instructions());
hint(code);
}
-
void MacroAssembler::Hlt(int code) {
DCHECK(allow_macro_instructions());
hlt(code);
@@ -792,7 +754,6 @@ void TurboAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
mrs(rt, sysreg);
}
-
void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
DCHECK(allow_macro_instructions());
msr(sysreg, rt);
@@ -831,7 +792,6 @@ void TurboAssembler::Ret(const Register& xn) {
CheckVeneerPool(false, false);
}
-
void MacroAssembler::Rev(const Register& rd, const Register& rn) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -864,10 +824,7 @@ void TurboAssembler::Ror(const Register& rd, const Register& rn,
rorv(rd, rn, rm);
}
-
-void MacroAssembler::Sbfiz(const Register& rd,
- const Register& rn,
- unsigned lsb,
+void MacroAssembler::Sbfiz(const Register& rd, const Register& rn, unsigned lsb,
unsigned width) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -894,21 +851,15 @@ void TurboAssembler::Sdiv(const Register& rd, const Register& rn,
sdiv(rd, rn, rm);
}
-
-void MacroAssembler::Smaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
+void MacroAssembler::Smaddl(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smaddl(rd, rn, rm, ra);
}
-
-void MacroAssembler::Smsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
+void MacroAssembler::Smsubl(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
smsubl(rd, rn, rm, ra);
@@ -921,9 +872,7 @@ void TurboAssembler::Smull(const Register& rd, const Register& rn,
smull(rd, rn, rm);
}
-
-void MacroAssembler::Smulh(const Register& rd,
- const Register& rn,
+void MacroAssembler::Smulh(const Register& rd, const Register& rn,
const Register& rm) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@@ -982,21 +931,15 @@ void TurboAssembler::Udiv(const Register& rd, const Register& rn,
udiv(rd, rn, rm);
}
-
-void MacroAssembler::Umaddl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
+void MacroAssembler::Umaddl(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
umaddl(rd, rn, rm, ra);
}
-
-void MacroAssembler::Umsubl(const Register& rd,
- const Register& rn,
- const Register& rm,
- const Register& ra) {
+void MacroAssembler::Umsubl(const Register& rd, const Register& rn,
+ const Register& rm, const Register& ra) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
umsubl(rd, rn, rm, ra);
@@ -1025,7 +968,6 @@ void TurboAssembler::InitializeRootRegister() {
Mov(kRootRegister, Operand(isolate_root));
}
-
void MacroAssembler::SmiTag(Register dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
@@ -1100,70 +1042,6 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
JumpIfSmi(value, nullptr, not_smi_label);
}
-
-void MacroAssembler::JumpIfBothSmi(Register value1,
- Register value2,
- Label* both_smi_label,
- Label* not_smi_label) {
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- // Check if both tag bits are clear.
- Orr(tmp, value1, value2);
- JumpIfSmi(tmp, both_smi_label, not_smi_label);
-}
-
-
-void MacroAssembler::JumpIfEitherSmi(Register value1,
- Register value2,
- Label* either_smi_label,
- Label* not_smi_label) {
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- // Check if either tag bit is clear.
- And(tmp, value1, value2);
- JumpIfSmi(tmp, either_smi_label, not_smi_label);
-}
-
-
-void MacroAssembler::JumpIfEitherNotSmi(Register value1,
- Register value2,
- Label* not_smi_label) {
- JumpIfBothSmi(value1, value2, nullptr, not_smi_label);
-}
-
-
-void MacroAssembler::JumpIfBothNotSmi(Register value1,
- Register value2,
- Label* not_smi_label) {
- JumpIfEitherSmi(value1, value2, nullptr, not_smi_label);
-}
-
-
-void MacroAssembler::ObjectTag(Register tagged_obj, Register obj) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- if (emit_debug_code()) {
- Label ok;
- Tbz(obj, 0, &ok);
- Abort(AbortReason::kObjectTagged);
- Bind(&ok);
- }
- Orr(tagged_obj, obj, kHeapObjectTag);
-}
-
-
-void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
- STATIC_ASSERT(kHeapObjectTag == 1);
- if (emit_debug_code()) {
- Label ok;
- Tbnz(obj, 0, &ok);
- Abort(AbortReason::kObjectNotTagged);
- Bind(&ok);
- }
- Bic(untagged_obj, obj, kHeapObjectTag);
-}
-
void TurboAssembler::jmp(Label* L) { B(L); }
void TurboAssembler::Push(Handle<HeapObject> handle) {
@@ -1191,7 +1069,13 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
return;
}
DCHECK_EQ(size % 16, 0);
-
+#if V8_OS_WIN
+ while (size > kStackPageSize) {
+ Sub(sp, sp, kStackPageSize);
+ Str(xzr, MemOperand(sp));
+ size -= kStackPageSize;
+ }
+#endif
Sub(sp, sp, size);
}
@@ -1207,22 +1091,33 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
}
AssertPositiveOrZero(count);
- Sub(sp, sp, size);
-}
-
-
-void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
- DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
- const Operand size(count_smi,
- (shift >= 0) ? (LSL) : (LSR),
- (shift >= 0) ? (shift) : (-shift));
-
- if (size.IsZero()) {
- return;
- }
+#if V8_OS_WIN
+ // "Functions that allocate 4k or more worth of stack must ensure that each
+ // page prior to the final page is touched in order." Source:
+ // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=vs-2019#stack
+ // Callers expect count register to not be clobbered, so copy it.
+ UseScratchRegisterScope temps(this);
+ Register bytes_scratch = temps.AcquireX();
+ Mov(bytes_scratch, size);
+
+ Label check_offset;
+ Label touch_next_page;
+ B(&check_offset);
+ Bind(&touch_next_page);
+ Sub(sp, sp, kStackPageSize);
+ // Just to touch the page, before we increment further.
+ Str(xzr, MemOperand(sp));
+ Sub(bytes_scratch, bytes_scratch, kStackPageSize);
+
+ Bind(&check_offset);
+ Cmp(bytes_scratch, kStackPageSize);
+ B(gt, &touch_next_page);
+
+ Sub(sp, sp, bytes_scratch);
+#else
Sub(sp, sp, size);
+#endif
}
void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
@@ -1280,25 +1175,8 @@ void TurboAssembler::DropSlots(int64_t count) {
void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
-void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
- DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
- const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
- const Operand size(count_smi,
- (shift >= 0) ? (LSL) : (LSR),
- (shift >= 0) ? (shift) : (-shift));
-
- if (size.IsZero()) {
- return;
- }
-
- Add(sp, sp, size);
-}
-
-
-void MacroAssembler::CompareAndBranch(const Register& lhs,
- const Operand& rhs,
- Condition cond,
- Label* label) {
+void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs,
+ Condition cond, Label* label) {
if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
((cond == eq) || (cond == ne))) {
if (cond == eq) {
@@ -1338,26 +1216,22 @@ void TurboAssembler::TestAndBranchIfAllClear(const Register& reg,
}
}
-
void MacroAssembler::InlineData(uint64_t data) {
DCHECK(is_uint16(data));
InstructionAccurateScope scope(this, 1);
movz(xzr, data);
}
-
void MacroAssembler::EnableInstrumentation() {
InstructionAccurateScope scope(this, 1);
movn(xzr, InstrumentStateEnable);
}
-
void MacroAssembler::DisableInstrumentation() {
InstructionAccurateScope scope(this, 1);
movn(xzr, InstrumentStateDisable);
}
-
void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
DCHECK_EQ(strlen(marker_name), 2);
@@ -1372,4 +1246,4 @@ void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
+#endif // V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_INL_H_
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index fcf5f3c811..aab9fc79a2 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -4,29 +4,29 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/assembler.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
-#include "src/bootstrapper.h"
-#include "src/callable.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
-#include "src/external-reference-table.h"
-#include "src/frame-constants.h"
-#include "src/frames-inl.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
-#include "src/macro-assembler-inl.h"
-#include "src/register-configuration.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
-#include "src/arm64/macro-assembler-arm64.h"
+#include "src/codegen/arm64/macro-assembler-arm64.h"
#endif
namespace v8 {
@@ -40,25 +40,11 @@ CPURegList TurboAssembler::DefaultFPTmpList() {
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
- int bytes = 0;
auto list = kCallerSaved;
- // We only allow one exclusion register, so if the list is of even length
- // before exclusions, it must still be afterwards, to maintain alignment.
- // Therefore, we can ignore the exclusion register in the computation.
- // However, we leave it in the argument list to mirror the prototype for
- // Push/PopCallerSaved().
-
- // X18 is excluded from caller-saved register list on ARM64 which makes
- // caller-saved registers in odd number. padreg is used accordingly to
- // maintain the alignment.
- DCHECK_EQ(list.Count() % 2, 1);
- if (exclusion.Is(no_reg)) {
- bytes += kXRegSizeInBits / 8;
- } else {
- bytes -= kXRegSizeInBits / 8;
- }
+ list.Remove(exclusion);
+ list.Align();
- bytes += list.Count() * kXRegSizeInBits / 8;
+ int bytes = list.Count() * kXRegSizeInBits / 8;
if (fp_mode == kSaveFPRegs) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
@@ -69,20 +55,13 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) {
- int bytes = 0;
auto list = kCallerSaved;
+ list.Remove(exclusion);
+ list.Align();
- // X18 is excluded from caller-saved register list on ARM64, use padreg
- // accordingly to maintain alignment.
- if (!exclusion.Is(no_reg)) {
- list.Remove(exclusion);
- } else {
- list.Combine(padreg);
- }
-
- DCHECK_EQ(list.Count() % 2, 0);
PushCPURegList(list);
- bytes += list.Count() * kXRegSizeInBits / 8;
+
+ int bytes = list.Count() * kXRegSizeInBits / 8;
if (fp_mode == kSaveFPRegs) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
@@ -101,16 +80,9 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
}
auto list = kCallerSaved;
+ list.Remove(exclusion);
+ list.Align();
- // X18 is excluded from caller-saved register list on ARM64, use padreg
- // accordingly to maintain alignment.
- if (!exclusion.Is(no_reg)) {
- list.Remove(exclusion);
- } else {
- list.Combine(padreg);
- }
-
- DCHECK_EQ(list.Count() % 2, 0);
PopCPURegList(list);
bytes += list.Count() * kXRegSizeInBits / 8;
@@ -319,7 +291,8 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
ExternalReference reference = bit_cast<ExternalReference>(addr);
IndirectLoadExternalReference(rd, reference);
return;
- } else if (operand.ImmediateRMode() == RelocInfo::EMBEDDED_OBJECT) {
+ } else if (operand.ImmediateRMode() ==
+ RelocInfo::FULL_EMBEDDED_OBJECT) {
Handle<HeapObject> x(
reinterpret_cast<Address*>(operand.ImmediateValue()));
IndirectLoadConstant(rd, x);
@@ -351,8 +324,8 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
// this case, the instruction is discarded.
//
// If sp is an operand, add #0 is emitted, otherwise, orr #0.
- if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
- (discard_mode == kDontDiscardForSameWReg))) {
+ if (!rd.Is(operand.reg()) ||
+ (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) {
Assembler::mov(rd, operand.reg());
}
// This case can handle writes into the system stack pointer directly.
@@ -571,7 +544,6 @@ unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
return count;
}
-
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
@@ -727,7 +699,7 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
Ldr(temp, operand.immediate());
AddSubMacro(rd, rn, temp, S, op);
} else if ((operand.IsImmediate() &&
- !IsImmAddSub(operand.ImmediateValue())) ||
+ !IsImmAddSub(operand.ImmediateValue())) ||
(rn.IsZero() && !operand.IsShiftedRegister()) ||
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
UseScratchRegisterScope temps(this);
@@ -781,9 +753,9 @@ void TurboAssembler::AddSubWithCarryMacro(const Register& rd,
// Add/sub with carry (shifted register).
DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
DCHECK(operand.shift() != ROR);
- DCHECK(is_uintn(operand.shift_amount(),
- rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
- : kWRegSizeInBitsLog2));
+ DCHECK(is_uintn(operand.shift_amount(), rd.SizeInBits() == kXRegSizeInBits
+ ? kXRegSizeInBitsLog2
+ : kWRegSizeInBitsLog2));
Register temp = temps.AcquireSameSizeAs(rn);
EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
AddSubWithCarry(rd, rn, temp, S, op);
@@ -880,17 +852,15 @@ bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch(
// instruction in the chain is too far away.
if (label->is_bound() || label->is_linked()) {
need_longer_range =
- !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
+ !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
}
if (!need_longer_range && !label->is_bound()) {
int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
- unresolved_branches_.insert(
- std::pair<int, FarBranchInfo>(max_reachable_pc,
- FarBranchInfo(pc_offset(), label)));
+ unresolved_branches_.insert(std::pair<int, FarBranchInfo>(
+ max_reachable_pc, FarBranchInfo(pc_offset(), label)));
// Also maintain the next pool check.
- next_veneer_pool_check_ =
- Min(next_veneer_pool_check_,
- max_reachable_pc - kVeneerDistanceCheckMargin);
+ next_veneer_pool_check_ = Min(
+ next_veneer_pool_check_, max_reachable_pc - kVeneerDistanceCheckMargin);
}
return need_longer_range;
}
@@ -919,8 +889,8 @@ void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
- InstructionAccurateScope scope(
- this, PatchingAssembler::kAdrFarPatchableNInstrs);
+ InstructionAccurateScope scope(this,
+ PatchingAssembler::kAdrFarPatchableNInstrs);
adr(rd, label);
for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
nop(ADR_FAR_NOP);
@@ -936,12 +906,23 @@ void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) {
B(static_cast<Condition>(type), label);
} else {
switch (type) {
- case always: B(label); break;
- case never: break;
- case reg_zero: Cbz(reg, label); break;
- case reg_not_zero: Cbnz(reg, label); break;
- case reg_bit_clear: Tbz(reg, bit, label); break;
- case reg_bit_set: Tbnz(reg, bit, label); break;
+ case always:
+ B(label);
+ break;
+ case never:
+ break;
+ case reg_zero:
+ Cbz(reg, label);
+ break;
+ case reg_not_zero:
+ Cbnz(reg, label);
+ break;
+ case reg_bit_clear:
+ Tbz(reg, bit, label);
+ break;
+ case reg_bit_set:
+ Tbnz(reg, bit, label);
+ break;
default:
UNREACHABLE();
}
@@ -954,7 +935,7 @@ void TurboAssembler::B(Label* label, Condition cond) {
Label done;
bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
+ NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
if (need_extra_instructions) {
b(&done, NegateCondition(cond));
@@ -970,7 +951,7 @@ void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
Label done;
bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
if (need_extra_instructions) {
tbz(rt, bit_pos, &done);
@@ -986,7 +967,7 @@ void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
Label done;
bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
if (need_extra_instructions) {
tbnz(rt, bit_pos, &done);
@@ -1002,7 +983,7 @@ void TurboAssembler::Cbnz(const Register& rt, Label* label) {
Label done;
bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
if (need_extra_instructions) {
cbz(rt, &done);
@@ -1018,7 +999,7 @@ void TurboAssembler::Cbz(const Register& rt, Label* label) {
Label done;
bool need_extra_instructions =
- NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
if (need_extra_instructions) {
cbnz(rt, &done);
@@ -1029,7 +1010,6 @@ void TurboAssembler::Cbz(const Register& rt, Label* label) {
bind(&done);
}
-
// Pseudo-instructions.
void TurboAssembler::Abs(const Register& rd, const Register& rm,
@@ -1053,7 +1033,6 @@ void TurboAssembler::Abs(const Register& rd, const Register& rm,
}
}
-
// Abstracted stack operations.
void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1,
@@ -1124,53 +1103,6 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
str(src0, MemOperand(sp, src1.SizeInBytes()));
}
-void MacroAssembler::PushPopQueue::PushQueued() {
- DCHECK_EQ(0, size_ % 16);
- if (queued_.empty()) return;
-
- size_t count = queued_.size();
- size_t index = 0;
- while (index < count) {
- // PushHelper can only handle registers with the same size and type, and it
- // can handle only four at a time. Batch them up accordingly.
- CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
- int batch_index = 0;
- do {
- batch[batch_index++] = queued_[index++];
- } while ((batch_index < 4) && (index < count) &&
- batch[0].IsSameSizeAndType(queued_[index]));
-
- masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
- batch[0], batch[1], batch[2], batch[3]);
- }
-
- queued_.clear();
-}
-
-
-void MacroAssembler::PushPopQueue::PopQueued() {
- DCHECK_EQ(0, size_ % 16);
- if (queued_.empty()) return;
-
- size_t count = queued_.size();
- size_t index = 0;
- while (index < count) {
- // PopHelper can only handle registers with the same size and type, and it
- // can handle only four at a time. Batch them up accordingly.
- CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
- int batch_index = 0;
- do {
- batch[batch_index++] = queued_[index++];
- } while ((batch_index < 4) && (index < count) &&
- batch[0].IsSameSizeAndType(queued_[index]));
-
- masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
- batch[0], batch[1], batch[2], batch[3]);
- }
-
- queued_.clear();
-}
-
void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
DCHECK_EQ(0, (size * registers.Count()) % 16);
@@ -1351,16 +1283,13 @@ void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
Stp(src1, src2, MemOperand(sp, offset));
}
-
-void MacroAssembler::PeekPair(const CPURegister& dst1,
- const CPURegister& dst2,
+void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2,
int offset) {
DCHECK(AreSameSizeAndType(dst1, dst2));
DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
Ldp(dst1, dst2, MemOperand(sp, offset));
}
-
void MacroAssembler::PushCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
@@ -1380,7 +1309,6 @@ void MacroAssembler::PushCalleeSavedRegisters() {
stp(x19, x20, tos);
}
-
void MacroAssembler::PopCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
@@ -1535,7 +1463,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
-
void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
AllowDeferredHandleDereference heap_object_check;
if (object->IsHeapObject()) {
@@ -1635,7 +1562,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
-
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
@@ -1712,8 +1638,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
CallCodeObject(centry);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments,
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// All arguments must be on the stack before this function is called.
// x0 holds the return value after the call.
@@ -1764,7 +1689,7 @@ int TurboAssembler::ActivationFrameAlignment() {
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
return base::OS::ActivationFrameAlignment();
-#else // V8_HOST_ARCH_ARM64
+#else // V8_HOST_ARCH_ARM64
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
@@ -2138,23 +2063,6 @@ void TurboAssembler::CallForDeoptimization(Address target, int deopt_id) {
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
}
-void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
- VRegister scratch_d,
- Label* on_successful_conversion,
- Label* on_failed_conversion) {
- // Convert to an int and back again, then compare with the original value.
- Fcvtzs(as_int, value);
- Scvtf(scratch_d, as_int);
- Fcmp(value, scratch_d);
-
- if (on_successful_conversion) {
- B(on_successful_conversion, eq);
- }
- if (on_failed_conversion) {
- B(on_failed_conversion, ne);
- }
-}
-
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
@@ -2494,7 +2402,8 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// sp[1] : type
// sp[0] : for alignment
} else if (type == StackFrame::WASM_COMPILED ||
- type == StackFrame::WASM_COMPILE_LAZY) {
+ type == StackFrame::WASM_COMPILE_LAZY ||
+ type == StackFrame::WASM_EXIT) {
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
@@ -2531,13 +2440,11 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
Pop(fp, lr);
}
-
void MacroAssembler::ExitFramePreserveFPRegs() {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PushCPURegList(kCallerSavedV);
}
-
void MacroAssembler::ExitFrameRestoreFPRegs() {
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
@@ -2613,7 +2520,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-
// Leave the current exit frame.
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
@@ -2673,7 +2579,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
IncrementCounter(counter, -value, scratch1, scratch2);
@@ -2688,36 +2593,27 @@ void MacroAssembler::MaybeDropFrames() {
ne);
}
-void MacroAssembler::JumpIfObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type,
- Label* if_cond_pass,
- Condition cond) {
+void MacroAssembler::JumpIfObjectType(Register object, Register map,
+ Register type_reg, InstanceType type,
+ Label* if_cond_pass, Condition cond) {
CompareObjectType(object, map, type_reg, type);
B(cond, if_cond_pass);
}
-
// Sets condition flags based on comparison, and returns type in type_reg.
-void MacroAssembler::CompareObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type) {
+void MacroAssembler::CompareObjectType(Register object, Register map,
+ Register type_reg, InstanceType type) {
LoadTaggedPointerField(map, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(map, type_reg, type);
}
-
// Sets condition flags based on comparison, and returns type in type_reg.
-void MacroAssembler::CompareInstanceType(Register map,
- Register type_reg,
+void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
Cmp(type_reg, type);
}
-
void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
// Load the map's "bit field 2".
Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
@@ -2801,6 +2697,13 @@ void TurboAssembler::DecompressTaggedSigned(const Register& destination,
RecordComment("]");
}
+void TurboAssembler::DecompressTaggedSigned(const Register& destination,
+ const Register& source) {
+ RecordComment("[ DecompressTaggedSigned");
+ Sxtw(destination, source);
+ RecordComment("]");
+}
+
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedPointer");
@@ -2809,6 +2712,13 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination,
RecordComment("]");
}
+void TurboAssembler::DecompressTaggedPointer(const Register& destination,
+ const Register& source) {
+ RecordComment("[ DecompressTaggedPointer");
+ Add(destination, kRootRegister, Operand(source, SXTW));
+ RecordComment("]");
+}
+
void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
@@ -2834,65 +2744,29 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
RecordComment("]");
}
-void MacroAssembler::CompareAndSplit(const Register& lhs,
- const Operand& rhs,
- Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if ((if_true == if_false) && (if_false == fall_through)) {
- // Fall through.
- } else if (if_true == if_false) {
- B(if_true);
- } else if (if_false == fall_through) {
- CompareAndBranch(lhs, rhs, cond, if_true);
- } else if (if_true == fall_through) {
- CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
- } else {
- CompareAndBranch(lhs, rhs, cond, if_true);
- B(if_false);
- }
-}
-
-
-void MacroAssembler::TestAndSplit(const Register& reg,
- uint64_t bit_pattern,
- Label* if_all_clear,
- Label* if_any_set,
- Label* fall_through) {
- if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
- // Fall through.
- } else if (if_all_clear == if_any_set) {
- B(if_all_clear);
- } else if (if_all_clear == fall_through) {
- TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
- } else if (if_any_set == fall_through) {
- TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
+void TurboAssembler::DecompressAnyTagged(const Register& destination,
+ const Register& source) {
+ RecordComment("[ DecompressAnyTagged");
+ if (kUseBranchlessPtrDecompression) {
+ UseScratchRegisterScope temps(this);
+ // Branchlessly compute |masked_root|:
+ // masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
+ STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+ Register masked_root = temps.AcquireX();
+ // Sign extend tag bit to entire register.
+ Sbfx(masked_root, source, 0, kSmiTagSize);
+ And(masked_root, masked_root, kRootRegister);
+ // Now this add operation will either leave the value unchanged if it is a
+ // smi or add the isolate root if it is a heap object.
+ Add(destination, masked_root, Operand(source, SXTW));
} else {
- TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
- B(if_all_clear);
+ Label done;
+ Sxtw(destination, source);
+ JumpIfSmi(destination, &done);
+ Add(destination, kRootRegister, destination);
+ bind(&done);
}
-}
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK_GE(num_unsaved, 0);
- DCHECK_EQ(num_unsaved % 2, 0);
- DCHECK_EQ(kSafepointSavedRegisters % 2, 0);
- PopXRegList(kSafepointSavedRegisters);
- Drop(num_unsaved);
-}
-
-
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
- // adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK_GE(num_unsaved, 0);
- DCHECK_EQ(num_unsaved % 2, 0);
- DCHECK_EQ(kSafepointSavedRegisters % 2, 0);
- Claim(num_unsaved);
- PushXRegList(kSafepointSavedRegisters);
+ RecordComment("]");
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
@@ -2919,36 +2793,22 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
}
}
-void MacroAssembler::CheckPageFlag(const Register& object,
- const Register& scratch, int mask,
+void TurboAssembler::CheckPageFlag(const Register& object, int mask,
Condition cc, Label* condition_met) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
And(scratch, object, ~kPageAlignmentMask);
Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
if (cc == eq) {
TestAndBranchIfAnySet(scratch, mask, condition_met);
} else {
+ DCHECK_EQ(cc, ne);
TestAndBranchIfAllClear(scratch, mask, condition_met);
}
}
-void TurboAssembler::CheckPageFlagSet(const Register& object,
- const Register& scratch, int mask,
- Label* if_any_set) {
- And(scratch, object, ~kPageAlignmentMask);
- Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- TestAndBranchIfAnySet(scratch, mask, if_any_set);
-}
-
-void TurboAssembler::CheckPageFlagClear(const Register& object,
- const Register& scratch, int mask,
- Label* if_all_clear) {
- And(scratch, object, ~kPageAlignmentMask);
- Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
- TestAndBranchIfAllClear(scratch, mask, if_all_clear);
-}
-
void MacroAssembler::RecordWriteField(Register object, int offset,
- Register value, Register scratch,
+ Register value,
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
@@ -2966,26 +2826,21 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize));
- Add(scratch, object, offset - kHeapObjectTag);
if (emit_debug_code()) {
Label ok;
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ Add(scratch, object, offset - kHeapObjectTag);
Tst(scratch, kTaggedSize - 1);
B(eq, &ok);
Abort(AbortReason::kUnalignedCellInWriteBarrier);
Bind(&ok);
}
- RecordWrite(object, scratch, value, lr_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
+ save_fp, remembered_set_action, OMIT_SMI_CHECK);
Bind(&done);
-
- // Clobber clobbered input registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
- Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
- }
}
void TurboAssembler::SaveRegisters(RegList registers) {
@@ -3012,7 +2867,7 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
PopCPURegList(regs);
}
-void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode) {
EphemeronKeyBarrierDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
@@ -3026,7 +2881,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
Register fp_mode_parameter(
descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
- MovePair(object_parameter, object, slot_parameter, address);
+ MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
@@ -3035,26 +2890,24 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
}
void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
+ Register object, Operand offset, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode) {
CallRecordWriteStub(
- object, address, remembered_set_action, fp_mode,
+ object, offset, remembered_set_action, fp_mode,
isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
kNullAddress);
}
void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Address wasm_target) {
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
+ Register object, Operand offset, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Address wasm_target) {
+ CallRecordWriteStub(object, offset, remembered_set_action, fp_mode,
Handle<Code>::null(), wasm_target);
}
void TurboAssembler::CallRecordWriteStub(
- Register object, Register address,
- RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
- Handle<Code> code_target, Address wasm_target) {
+ Register object, Operand offset, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, Handle<Code> code_target, Address wasm_target) {
DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
// TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
// i.e. always emit remember set and save FP registers in RecordWriteStub. If
@@ -3075,7 +2928,7 @@ void TurboAssembler::CallRecordWriteStub(
Register fp_mode_parameter(
descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
- MovePair(object_parameter, object, slot_parameter, address);
+ MoveObjectAndSlot(object_parameter, slot_parameter, object, offset);
Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
@@ -3088,12 +2941,44 @@ void TurboAssembler::CallRecordWriteStub(
RestoreRegisters(registers);
}
-// Will clobber: object, address, value.
-// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
+void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset) {
+ DCHECK_NE(dst_object, dst_slot);
+ // If `offset` is a register, it cannot overlap with `object`.
+ DCHECK_IMPLIES(!offset.IsImmediate(), offset.reg() != object);
+
+ // If the slot register does not overlap with the object register, we can
+ // overwrite it.
+ if (dst_slot != object) {
+ Add(dst_slot, object, offset);
+ Mov(dst_object, object);
+ return;
+ }
+
+ DCHECK_EQ(dst_slot, object);
+
+ // If the destination object register does not overlap with the offset
+ // register, we can overwrite it.
+ if (offset.IsImmediate() || (offset.reg() != dst_object)) {
+ Mov(dst_object, dst_slot);
+ Add(dst_slot, dst_slot, offset);
+ return;
+ }
+
+ DCHECK_EQ(dst_object, offset.reg());
+
+ // We only have `dst_slot` and `dst_object` left as distinct registers so we
+ // have to swap them. We write this as a add+sub sequence to avoid using a
+ // scratch register.
+ Add(dst_slot, dst_slot, dst_object);
+ Sub(dst_object, dst_slot, dst_object);
+}
+
+// If lr_status is kLRHasBeenSaved, lr will be clobbered.
//
// The register 'object' contains a heap object pointer. The heap object tag is
// shifted away.
-void MacroAssembler::RecordWrite(Register object, Register address,
+void MacroAssembler::RecordWrite(Register object, Operand offset,
Register value, LinkRegisterStatus lr_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
@@ -3105,7 +2990,8 @@ void MacroAssembler::RecordWrite(Register object, Register address,
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- LoadTaggedPointerField(temp, MemOperand(address));
+ Add(temp, object, offset);
+ LoadTaggedPointerField(temp, MemOperand(temp));
Cmp(temp, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
@@ -3118,32 +3004,22 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
+ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, ne,
+ &done);
- CheckPageFlagClear(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask, &done);
- CheckPageFlagClear(object,
- value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- &done);
+ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &done);
// Record the actual write.
if (lr_status == kLRHasNotBeenSaved) {
Push(padreg, lr);
}
- CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
+ CallRecordWriteStub(object, offset, remembered_set_action, fp_mode);
if (lr_status == kLRHasNotBeenSaved) {
Pop(lr, padreg);
}
Bind(&done);
-
- // Clobber clobbered registers when running with the debug-code flag
- // turned on to provoke errors.
- if (emit_debug_code()) {
- Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
- Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
- }
}
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
@@ -3219,10 +3095,9 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadTaggedPointerField(dst, ContextMemOperand(dst, index));
}
-
// This is the main Printf implementation. All other Printf variants call
// PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
-void MacroAssembler::PrintfNoPreserve(const char * format,
+void MacroAssembler::PrintfNoPreserve(const char* format,
const CPURegister& arg0,
const CPURegister& arg1,
const CPURegister& arg2,
@@ -3247,7 +3122,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
// We can use caller-saved registers as scratch values, except for the
// arguments and the PCS registers where they might need to go.
CPURegList tmp_list = kCallerSaved;
- tmp_list.Remove(x0); // Used to pass the format string.
+ tmp_list.Remove(x0); // Used to pass the format string.
tmp_list.Remove(kPCSVarargs);
tmp_list.Remove(arg0, arg1, arg2, arg3);
@@ -3332,7 +3207,8 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Adr(x0, &format_address);
// Emit the format string directly in the instruction stream.
- { BlockPoolsScope scope(this);
+ {
+ BlockPoolsScope scope(this);
Label after_data;
B(&after_data);
Bind(&format_address);
@@ -3352,7 +3228,7 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
{
InstructionAccurateScope scope(this, kPrintfLength / kInstrSize);
hlt(kImmExceptionIsPrintf);
- dc32(arg_count); // kPrintfArgCountOffset
+ dc32(arg_count); // kPrintfArgCountOffset
// Determine the argument pattern.
uint32_t arg_pattern_list = 0;
@@ -3367,18 +3243,15 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
}
- dc32(arg_pattern_list); // kPrintfArgPatternListOffset
+ dc32(arg_pattern_list); // kPrintfArgPatternListOffset
}
#else
Call(ExternalReference::printf_function());
#endif
}
-
-void MacroAssembler::Printf(const char * format,
- CPURegister arg0,
- CPURegister arg1,
- CPURegister arg2,
+void MacroAssembler::Printf(const char* format, CPURegister arg0,
+ CPURegister arg1, CPURegister arg2,
CPURegister arg3) {
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
@@ -3387,11 +3260,8 @@ void MacroAssembler::Printf(const char * format,
TmpList()->set_list(0);
FPTmpList()->set_list(0);
- // x18 is the platform register and is reserved for the use of platform ABIs.
- // It is not part of the kCallerSaved list, but we add it here anyway to
- // ensure `reg_list.Count() % 2 == 0` which is required in multiple spots.
CPURegList saved_registers = kCallerSaved;
- saved_registers.Combine(x18.code());
+ saved_registers.Align();
// Preserve all caller-saved registers as well as NZCV.
// PushCPURegList asserts that the size of each list is a multiple of 16
@@ -3407,7 +3277,8 @@ void MacroAssembler::Printf(const char * format,
TmpList()->set_list(tmp_list.list());
FPTmpList()->set_list(fp_tmp_list.list());
- { UseScratchRegisterScope temps(this);
+ {
+ UseScratchRegisterScope temps(this);
// If any of the arguments are the current stack pointer, allocate a new
// register for them, and adjust the value to compensate for pushing the
// caller-saved registers.
@@ -3429,7 +3300,8 @@ void MacroAssembler::Printf(const char * format,
}
// Preserve NZCV.
- { UseScratchRegisterScope temps(this);
+ {
+ UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Mrs(tmp, NZCV);
Push(tmp, xzr);
@@ -3438,7 +3310,8 @@ void MacroAssembler::Printf(const char * format,
PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
// Restore NZCV.
- { UseScratchRegisterScope temps(this);
+ {
+ UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Pop(xzr, tmp);
Msr(NZCV, tmp);
@@ -3457,7 +3330,6 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
availablefp_->set_list(old_availablefp_);
}
-
Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
int code = AcquireNextAvailable(available_).code();
return Register::Create(code, reg.SizeInBits());
@@ -3468,7 +3340,6 @@ VRegister UseScratchRegisterScope::AcquireSameSizeAs(const VRegister& reg) {
return VRegister::Create(code, reg.SizeInBits());
}
-
CPURegister UseScratchRegisterScope::AcquireNextAvailable(
CPURegList* available) {
CHECK(!available->IsEmpty());
@@ -3477,7 +3348,6 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
return result;
}
-
MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
@@ -3486,50 +3356,6 @@ MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
-#define __ masm->
-
-void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
- const Label* smi_check) {
- Assembler::BlockPoolsScope scope(masm);
- if (reg.IsValid()) {
- DCHECK(smi_check->is_bound());
- DCHECK(reg.Is64Bits());
-
- // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
- // 'check' in the other bits. The possible offset is limited in that we
- // use BitField to pack the data, and the underlying data type is a
- // uint32_t.
- uint32_t delta =
- static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
- __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
- } else {
- DCHECK(!smi_check->is_bound());
-
- // An offset of 0 indicates that there is no patch site.
- __ InlineData(0);
- }
-}
-
-InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
- : reg_(NoReg), smi_check_delta_(0), smi_check_(nullptr) {
- InstructionSequence* inline_data = InstructionSequence::At(info);
- DCHECK(inline_data->IsInlineData());
- if (inline_data->IsInlineData()) {
- uint64_t payload = inline_data->InlineData();
- // We use BitField to decode the payload, and BitField can only handle
- // 32-bit values.
- DCHECK(is_uint32(payload));
- if (payload != 0) {
- uint32_t payload32 = static_cast<uint32_t>(payload);
- int reg_code = RegisterBits::decode(payload32);
- reg_ = Register::XRegFromCode(reg_code);
- smi_check_delta_ = DeltaBits::decode(payload32);
- DCHECK_NE(0, smi_check_delta_);
- smi_check_ = inline_data->preceding(smi_check_delta_);
- }
- }
-}
-
void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
// We can use adr to load a pc relative location.
adr(rd, -pc_offset());
@@ -3539,9 +3365,6 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
Mov(kSpeculationPoisonRegister, -1);
}
-#undef __
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 9ba2b11a72..6961428f35 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -6,24 +6,23 @@
#error This header must be included via macro-assembler.h
#endif
-#ifndef V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
-#define V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
+#ifndef V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
+#define V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
#include <vector>
-#include "src/arm64/assembler-arm64.h"
-#include "src/bailout-reason.h"
#include "src/base/bits.h"
-#include "src/globals.h"
+#include "src/codegen/arm64/assembler-arm64.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/common/globals.h"
// Simulator specific helpers.
#if USE_SIMULATOR
- // TODO(all): If possible automatically prepend an indicator like
- // UNIMPLEMENTED or LOCATION.
- #define ASM_UNIMPLEMENTED(message) \
- __ Debug(message, __LINE__, NO_PARAM)
- #define ASM_UNIMPLEMENTED_BREAK(message) \
- __ Debug(message, __LINE__, \
+// TODO(all): If possible automatically prepend an indicator like
+// UNIMPLEMENTED or LOCATION.
+#define ASM_UNIMPLEMENTED(message) __ Debug(message, __LINE__, NO_PARAM)
+#define ASM_UNIMPLEMENTED_BREAK(message) \
+ __ Debug(message, __LINE__, \
FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
#if DEBUG
#define ASM_LOCATION(message) __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
@@ -40,7 +39,6 @@
#define ASM_LOCATION_IN_ASSEMBLER(message)
#endif
-
namespace v8 {
namespace internal {
@@ -110,11 +108,14 @@ enum BranchType {
// 'always' is used to generate unconditional branches.
// 'never' is used to not generate a branch (generally as the inverse
// branch type of 'always).
- always, never,
+ always,
+ never,
// cbz and cbnz
- reg_zero, reg_not_zero,
+ reg_zero,
+ reg_not_zero,
// tbz and tbnz
- reg_bit_clear, reg_bit_set,
+ reg_bit_clear,
+ reg_bit_set,
// Aliases.
kBranchTypeFirstCondition = eq,
@@ -388,6 +389,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
V(fmla, Fmla) \
V(fmls, Fmls) \
V(fmulx, Fmulx) \
+ V(fnmul, Fnmul) \
V(frecps, Frecps) \
V(frsqrts, Frsqrts) \
V(mla, Mla) \
@@ -646,10 +648,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load a literal from the inline constant pool.
inline void Ldr(const CPURegister& rt, const Operand& imm);
- // Claim or drop stack space without actually accessing memory.
+ // Claim or drop stack space.
//
- // In debug mode, both of these will write invalid data into the claimed or
- // dropped space.
+ // On Windows, Claim will write a value every 4k, as is required by the stack
+ // expansion mechanism.
//
// The stack pointer must be aligned to 16 bytes and the size claimed or
// dropped must be a multiple of 16 bytes.
@@ -740,15 +742,24 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SaveRegisters(RegList registers);
void RestoreRegisters(RegList registers);
- void CallRecordWriteStub(Register object, Register address,
+ void CallRecordWriteStub(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode);
- void CallRecordWriteStub(Register object, Register address,
+ void CallRecordWriteStub(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode, Address wasm_target);
- void CallEphemeronKeyBarrier(Register object, Register address,
+ void CallEphemeronKeyBarrier(Register object, Operand offset,
SaveFPRegsMode fp_mode);
+ // For a given |object| and |offset|:
+ // - Move |object| to |dst_object|.
+ // - Compute the address of the slot pointed to by |offset| in |object| and
+ // write it to |dst_slot|.
+ // This method makes sure |object| and |offset| are allowed to overlap with
+ // the destination registers.
+ void MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset);
+
// Alternative forms of Push and Pop, taking a RegList or CPURegList that
// specifies the registers that are to be pushed or popped. Higher-numbered
// registers are associated with higher memory addresses (as in the A32 push
@@ -783,11 +794,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
PreShiftImmMode mode);
- void CheckPageFlagSet(const Register& object, const Register& scratch,
- int mask, Label* if_any_set);
-
- void CheckPageFlagClear(const Register& object, const Register& scratch,
- int mask, Label* if_all_clear);
+ void CheckPageFlag(const Register& object, int mask, Condition cc,
+ Label* condition_met);
// Test the bits of register defined by bit_pattern, and branch if ANY of
// those bits are set. May corrupt the status flags.
@@ -1199,10 +1207,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
+ void DecompressTaggedSigned(const Register& destination,
+ const Register& source);
void DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand);
+ void DecompressTaggedPointer(const Register& destination,
+ const Register& source);
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
+ void DecompressAnyTagged(const Register& destination, const Register& source);
protected:
// The actual Push and Pop implementations. These don't generate any code
@@ -1271,7 +1284,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
- void CallRecordWriteStub(Register object, Register address,
+ void CallRecordWriteStub(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode, Handle<Code> code_target,
Address wasm_target);
@@ -1535,11 +1548,13 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void LoadObject(Register result, Handle<Object> object);
- inline void PushSizeRegList(RegList registers, unsigned reg_size,
+ inline void PushSizeRegList(
+ RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
PushCPURegList(CPURegList(type, reg_size, registers));
}
- inline void PopSizeRegList(RegList registers, unsigned reg_size,
+ inline void PopSizeRegList(
+ RegList registers, unsigned reg_size,
CPURegister::RegisterType type = CPURegister::kRegister) {
PopCPURegList(CPURegList(type, reg_size, registers));
}
@@ -1571,54 +1586,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Push the specified register 'count' times.
void PushMultipleTimes(CPURegister src, Register count);
- // Sometimes callers need to push or pop multiple registers in a way that is
- // difficult to structure efficiently for fixed Push or Pop calls. This scope
- // allows push requests to be queued up, then flushed at once. The
- // MacroAssembler will try to generate the most efficient sequence required.
- //
- // Unlike the other Push and Pop macros, PushPopQueue can handle mixed sets of
- // register sizes and types.
- class PushPopQueue {
- public:
- explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) {}
-
- ~PushPopQueue() {
- DCHECK(queued_.empty());
- }
-
- void Queue(const CPURegister& rt) {
- size_ += rt.SizeInBytes();
- queued_.push_back(rt);
- }
-
- void PushQueued();
- void PopQueued();
-
- private:
- MacroAssembler* masm_;
- int size_;
- std::vector<CPURegister> queued_;
- };
-
// Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
// values peeked will be adjacent, with the value in 'dst2' being from a
// higher address than 'dst1'. The offset is in bytes. The stack pointer must
// be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
- // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
- // register.
- inline void ClaimBySMI(const Register& count_smi,
- uint64_t unit_size = kXRegSize);
- inline void DropBySMI(const Register& count_smi,
- uint64_t unit_size = kXRegSize);
-
// Compare a register with an operand, and branch to label depending on the
// condition. May corrupt the status flags.
- inline void CompareAndBranch(const Register& lhs,
- const Operand& rhs,
- Condition cond,
- Label* label);
+ inline void CompareAndBranch(const Register& lhs, const Operand& rhs,
+ Condition cond, Label* label);
// Insert one or more instructions into the instruction stream that encode
// some caller-defined data. The instructions used will be executable with no
@@ -1660,14 +1637,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
static int SafepointRegisterStackIndex(int reg_code);
- template<typename Field>
+ template <typename Field>
void DecodeField(Register dst, Register src) {
static const int shift = Field::kShift;
static const int setbits = CountSetBits(Field::kMask, 32);
Ubfx(dst, src, shift, setbits);
}
- template<typename Field>
+ template <typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
@@ -1678,26 +1655,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
inline void SmiTag(Register smi);
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
- inline void JumpIfBothSmi(Register value1, Register value2,
- Label* both_smi_label,
- Label* not_smi_label = nullptr);
- inline void JumpIfEitherSmi(Register value1, Register value2,
- Label* either_smi_label,
- Label* not_smi_label = nullptr);
- inline void JumpIfEitherNotSmi(Register value1,
- Register value2,
- Label* not_smi_label);
- inline void JumpIfBothNotSmi(Register value1,
- Register value2,
- Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object,
AbortReason reason = AbortReason::kOperandIsASmi);
- inline void ObjectTag(Register tagged_obj, Register obj);
- inline void ObjectUntag(Register untagged_obj, Register obj);
-
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);
@@ -1716,24 +1678,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
- // Try to represent a double as a signed 64-bit int.
- // This succeeds if the result compares equal to the input, so inputs of -0.0
- // are represented as 0 and handled as a success.
- //
- // On output the Z flag is set if the operation was successful.
- void TryRepresentDoubleAsInt64(Register as_int, VRegister value,
- VRegister scratch_d,
- Label* on_successful_conversion = nullptr,
- Label* on_failed_conversion = nullptr) {
- DCHECK(as_int.Is64Bits());
- TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
- on_failed_conversion);
- }
-
// ---- Calling / Jumping helpers ----
- void CallRuntime(const Runtime::Function* f,
- int num_arguments,
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Convenience function: Same as above, but takes the fid instead.
@@ -1798,12 +1745,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// are the same register). It leaves the heap object in the heap_object
// register unless the heap_object register is the same register as one of the
// other registers.
- void CompareObjectType(Register heap_object,
- Register map,
- Register type_reg,
+ void CompareObjectType(Register heap_object, Register map, Register type_reg,
InstanceType type);
-
// Compare object type for heap object, and branch if equal (or not.)
// heap_object contains a non-Smi whose object type should be compared with
// the given type. This both sets the flags and leaves the object type in
@@ -1811,19 +1755,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// type_reg and map register are the same register). It leaves the heap
// object in the heap_object register unless the heap_object register is the
// same register as one of the other registers.
- void JumpIfObjectType(Register object,
- Register map,
- Register type_reg,
- InstanceType type,
- Label* if_cond_pass,
+ void JumpIfObjectType(Register object, Register map, Register type_reg,
+ InstanceType type, Label* if_cond_pass,
Condition cond = eq);
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
- void CompareInstanceType(Register map,
- Register type_reg,
- InstanceType type);
+ void CompareInstanceType(Register map, Register type_reg, InstanceType type);
// Load the elements kind field from a map, and return it in the result
// register.
@@ -1843,23 +1782,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpIfIsInRange(const Register& value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
- // Compare the contents of a register with an operand, and branch to true,
- // false or fall through, depending on condition.
- void CompareAndSplit(const Register& lhs,
- const Operand& rhs,
- Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through);
-
- // Test the bits of register defined by bit_pattern, and branch to
- // if_any_set, if_all_clear or fall_through accordingly.
- void TestAndSplit(const Register& reg,
- uint64_t bit_pattern,
- Label* if_all_clear,
- Label* if_any_set,
- Label* fall_through);
-
// ---------------------------------------------------------------------------
// Frames.
@@ -1920,30 +1842,21 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Garbage collector support (GC).
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
-
- void CheckPageFlag(const Register& object, const Register& scratch, int mask,
- Condition cc, Label* condition_met);
-
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
- // stored. value and scratch registers are clobbered by the operation.
+ // stored.
// The offset is the offset from the start of the object, not the offset from
// the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
void RecordWriteField(
- Register object, int offset, Register value, Register scratch,
- LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+ Register object, int offset, Register value, LinkRegisterStatus lr_status,
+ SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
- // For a given |object| notify the garbage collector that the slot |address|
- // has been written. |value| is the object being stored. The value and
- // address registers are clobbered by the operation.
+ // For a given |object| notify the garbage collector that the slot at |offset|
+ // has been written. |value| is the object being stored.
void RecordWrite(
- Register object, Register address, Register value,
+ Register object, Operand offset, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
@@ -1955,12 +1868,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register reg, RootIndex index,
AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);
- // Abort if the specified register contains the invalid color bit pattern.
- // The pattern must be in bits [1:0] of 'reg' register.
- //
- // If emit_debug_code() is false, this emits no code.
- void AssertHasValidColor(const Register& reg);
-
void LoadNativeContextSlot(int index, Register dst);
// Like printf, but print at run-time from generated code.
@@ -1978,37 +1885,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// a problem, preserve the important registers manually and then call
// PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
// implicitly preserved.
- void Printf(const char * format,
- CPURegister arg0 = NoCPUReg,
- CPURegister arg1 = NoCPUReg,
- CPURegister arg2 = NoCPUReg,
+ void Printf(const char* format, CPURegister arg0 = NoCPUReg,
+ CPURegister arg1 = NoCPUReg, CPURegister arg2 = NoCPUReg,
CPURegister arg3 = NoCPUReg);
// Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
//
// The return code from the system printf call will be returned in x0.
- void PrintfNoPreserve(const char * format,
- const CPURegister& arg0 = NoCPUReg,
+ void PrintfNoPreserve(const char* format, const CPURegister& arg0 = NoCPUReg,
const CPURegister& arg1 = NoCPUReg,
const CPURegister& arg2 = NoCPUReg,
const CPURegister& arg3 = NoCPUReg);
- private:
- // Try to represent a double as an int so that integer fast-paths may be
- // used. Not every valid integer value is guaranteed to be caught.
- // It supports both 32-bit and 64-bit integers depending whether 'as_int'
- // is a W or X register.
- //
- // This does not distinguish between +0 and -0, so if this distinction is
- // important it must be checked separately.
- //
- // On output the Z flag is set if the operation was successful.
- void TryRepresentDoubleAsInt(Register as_int, VRegister value,
- VRegister scratch_d,
- Label* on_successful_conversion = nullptr,
- Label* on_failed_conversion = nullptr);
-
- public:
// Far branches resolving.
//
// The various classes of branch instructions with immediate offsets have
@@ -2025,7 +1913,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
-
// Use this scope when you need a one-to-one mapping between methods and
// instructions. This scope prevents the MacroAssembler from being called and
// literal pools from being emitted. It also asserts the number of instructions
@@ -2081,7 +1968,7 @@ class InstructionAccurateScope {
// original state, even if the lists were modified by some other means. Note
// that this scope can be nested but the destructors need to run in the opposite
// order as the constructors. We do not have assertions for this.
-class UseScratchRegisterScope {
+class V8_EXPORT_PRIVATE UseScratchRegisterScope {
public:
explicit UseScratchRegisterScope(TurboAssembler* tasm)
: available_(tasm->TmpList()),
@@ -2092,7 +1979,7 @@ class UseScratchRegisterScope {
DCHECK_EQ(availablefp_->type(), CPURegister::kVRegister);
}
- V8_EXPORT_PRIVATE ~UseScratchRegisterScope();
+ ~UseScratchRegisterScope();
// Take a register from the appropriate temps list. It will be returned
// automatically when the scope ends.
@@ -2109,73 +1996,23 @@ class UseScratchRegisterScope {
VRegister AcquireSameSizeAs(const VRegister& reg);
private:
- V8_EXPORT_PRIVATE static CPURegister AcquireNextAvailable(
- CPURegList* available);
+ static CPURegister AcquireNextAvailable(CPURegList* available);
// Available scratch registers.
- CPURegList* available_; // kRegister
- CPURegList* availablefp_; // kVRegister
+ CPURegList* available_; // kRegister
+ CPURegList* availablefp_; // kVRegister
// The state of the available lists at the start of this scope.
- RegList old_available_; // kRegister
- RegList old_availablefp_; // kVRegister
+ RegList old_available_; // kRegister
+ RegList old_availablefp_; // kVRegister
};
MemOperand ContextMemOperand(Register context, int index = 0);
MemOperand NativeContextMemOperand();
-// Encode and decode information about patchable inline SMI checks.
-class InlineSmiCheckInfo {
- public:
- explicit InlineSmiCheckInfo(Address info);
-
- bool HasSmiCheck() const { return smi_check_ != nullptr; }
-
- const Register& SmiRegister() const {
- return reg_;
- }
-
- Instruction* SmiCheck() const {
- return smi_check_;
- }
-
- int SmiCheckDelta() const { return smi_check_delta_; }
-
- // Use MacroAssembler::InlineData to emit information about patchable inline
- // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
- // indicate that there is no inline SMI check. Note that 'reg' cannot be sp.
- //
- // The generated patch information can be read using the InlineSMICheckInfo
- // class.
- static void Emit(MacroAssembler* masm, const Register& reg,
- const Label* smi_check);
-
- // Emit information to indicate that there is no inline SMI check.
- static void EmitNotInlined(MacroAssembler* masm) {
- Label unbound;
- Emit(masm, NoReg, &unbound);
- }
-
- private:
- Register reg_;
- int smi_check_delta_;
- Instruction* smi_check_;
-
- // Fields in the data encoded by InlineData.
-
- // A width of 5 (Rd_width) for the SMI register precludes the use of sp,
- // since kSPRegInternalCode is 63. However, sp should never hold a SMI or be
- // used in a patchable check. The Emit() method checks this.
- //
- // Note that the total size of the fields is restricted by the underlying
- // storage size handled by the BitField class, which is a uint32_t.
- class RegisterBits : public BitField<unsigned, 0, 5> {};
- class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
-};
-
} // namespace internal
} // namespace v8
#define ACCESS_MASM(masm) masm->
-#endif // V8_ARM64_MACRO_ASSEMBLER_ARM64_H_
+#endif // V8_CODEGEN_ARM64_MACRO_ASSEMBLER_ARM64_H_
diff --git a/deps/v8/src/arm64/register-arm64.cc b/deps/v8/src/codegen/arm64/register-arm64.cc
index cf1b320624..6a56ce18f6 100644
--- a/deps/v8/src/arm64/register-arm64.cc
+++ b/deps/v8/src/codegen/arm64/register-arm64.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/register-arm64.h"
+#include "src/codegen/arm64/register-arm64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index f5a79d46f1..741866dfd6 100644
--- a/deps/v8/src/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_REGISTER_ARM64_H_
-#define V8_ARM64_REGISTER_ARM64_H_
+#ifndef V8_CODEGEN_ARM64_REGISTER_ARM64_H_
+#define V8_CODEGEN_ARM64_REGISTER_ARM64_H_
-#include "src/arm64/utils-arm64.h"
-#include "src/globals.h"
-#include "src/register.h"
-#include "src/reglist.h"
+#include "src/codegen/arm64/utils-arm64.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -551,16 +551,14 @@ V8_EXPORT_PRIVATE bool AreConsecutive(const VRegister& reg1,
const VRegister& reg3 = NoVReg,
const VRegister& reg4 = NoVReg);
-typedef VRegister FloatRegister;
-typedef VRegister DoubleRegister;
-typedef VRegister Simd128Register;
+using FloatRegister = VRegister;
+using DoubleRegister = VRegister;
+using Simd128Register = VRegister;
// -----------------------------------------------------------------------------
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
@@ -628,6 +626,9 @@ class V8_EXPORT_PRIVATE CPURegList {
// preparing registers for an AAPCS64 function call, for example.
void RemoveCalleeSaved();
+ // Align the list to 16 bytes.
+ void Align();
+
CPURegister PopLowestIndex();
CPURegister PopHighestIndex();
@@ -747,4 +748,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = x8;
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_REGISTER_ARM64_H_
+#endif // V8_CODEGEN_ARM64_REGISTER_ARM64_H_
diff --git a/deps/v8/src/arm64/utils-arm64.cc b/deps/v8/src/codegen/arm64/utils-arm64.cc
index 6d200be18a..2f972ce502 100644
--- a/deps/v8/src/arm64/utils-arm64.cc
+++ b/deps/v8/src/codegen/arm64/utils-arm64.cc
@@ -4,8 +4,7 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/utils-arm64.h"
-
+#include "src/codegen/arm64/utils-arm64.h"
namespace v8 {
namespace internal {
@@ -81,7 +80,6 @@ int CountLeadingZeros(uint64_t value, int width) {
return base::bits::CountLeadingZeros64(value << (64 - width));
}
-
int CountLeadingSignBits(int64_t value, int width) {
DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64));
if (value >= 0) {
@@ -91,7 +89,6 @@ int CountLeadingSignBits(int64_t value, int width) {
}
}
-
int CountTrailingZeros(uint64_t value, int width) {
DCHECK((width == 32) || (width == 64));
if (width == 64) {
@@ -101,7 +98,6 @@ int CountTrailingZeros(uint64_t value, int width) {
static_cast<uint32_t>(value & 0xFFFFFFFFF)));
}
-
int CountSetBits(uint64_t value, int width) {
DCHECK((width == 32) || (width == 64));
if (width == 64) {
@@ -121,12 +117,10 @@ int HighestSetBitPosition(uint64_t value) {
return 63 - CountLeadingZeros(value, 64);
}
-
uint64_t LargestPowerOf2Divisor(uint64_t value) {
return value & (-(int64_t)value);
}
-
int MaskToBit(uint64_t mask) {
DCHECK_EQ(CountSetBits(mask, 64), 1);
return CountTrailingZeros(mask, 64);
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/codegen/arm64/utils-arm64.h
index 00ed1c20c3..6bddce6fff 100644
--- a/deps/v8/src/arm64/utils-arm64.h
+++ b/deps/v8/src/codegen/arm64/utils-arm64.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_UTILS_ARM64_H_
-#define V8_ARM64_UTILS_ARM64_H_
+#ifndef V8_CODEGEN_ARM64_UTILS_ARM64_H_
+#define V8_CODEGEN_ARM64_UTILS_ARM64_H_
#include <cmath>
-#include "src/arm64/constants-arm64.h"
-#include "src/utils.h"
+#include "src/codegen/arm64/constants-arm64.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -40,7 +40,6 @@ int HighestSetBitPosition(uint64_t value);
uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
-
template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
@@ -70,7 +69,6 @@ T ReverseBytes(T value, int block_bytes_log2) {
return result;
}
-
// NaN tests.
inline bool IsSignallingNaN(double num) {
uint64_t raw = bit_cast<uint64_t>(num);
@@ -80,7 +78,6 @@ inline bool IsSignallingNaN(double num) {
return false;
}
-
inline bool IsSignallingNaN(float num) {
uint32_t raw = bit_cast<uint32_t>(num);
if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
@@ -99,27 +96,23 @@ inline bool IsQuietNaN(T num) {
return std::isnan(num) && !IsSignallingNaN(num);
}
-
// Convert the NaN in 'num' to a quiet NaN.
inline double ToQuietNaN(double num) {
DCHECK(std::isnan(num));
return bit_cast<double>(bit_cast<uint64_t>(num) | kDQuietNanMask);
}
-
inline float ToQuietNaN(float num) {
DCHECK(std::isnan(num));
return bit_cast<float>(bit_cast<uint32_t>(num) |
static_cast<uint32_t>(kSQuietNanMask));
}
-
// Fused multiply-add.
inline double FusedMultiplyAdd(double op1, double op2, double a) {
return fma(op1, op2, a);
}
-
inline float FusedMultiplyAdd(float op1, float op2, float a) {
return fmaf(op1, op2, a);
}
@@ -127,4 +120,4 @@ inline float FusedMultiplyAdd(float op1, float op2, float a) {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_UTILS_ARM64_H_
+#endif // V8_CODEGEN_ARM64_UTILS_ARM64_H_
diff --git a/deps/v8/src/codegen/assembler-arch.h b/deps/v8/src/codegen/assembler-arch.h
new file mode 100644
index 0000000000..cab4cbfc3b
--- /dev/null
+++ b/deps/v8/src/codegen/assembler-arch.h
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ASSEMBLER_ARCH_H_
+#define V8_CODEGEN_ASSEMBLER_ARCH_H_
+
+#include "src/codegen/assembler.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/codegen/ia32/assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/codegen/x64/assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/codegen/arm64/assembler-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/codegen/arm/assembler-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/codegen/ppc/assembler-ppc.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/codegen/mips/assembler-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/codegen/mips64/assembler-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/codegen/s390/assembler-s390.h"
+#else
+#error Unknown architecture.
+#endif
+
+#endif // V8_CODEGEN_ASSEMBLER_ARCH_H_
diff --git a/deps/v8/src/codegen/assembler-inl.h b/deps/v8/src/codegen/assembler-inl.h
new file mode 100644
index 0000000000..fd08a38555
--- /dev/null
+++ b/deps/v8/src/codegen/assembler-inl.h
@@ -0,0 +1,30 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ASSEMBLER_INL_H_
+#define V8_CODEGEN_ASSEMBLER_INL_H_
+
+#include "src/codegen/assembler.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/codegen/ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/codegen/x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/codegen/arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/codegen/ppc/assembler-ppc-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/codegen/mips/assembler-mips-inl.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/codegen/mips64/assembler-mips64-inl.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/codegen/s390/assembler-s390-inl.h"
+#else
+#error Unknown architecture.
+#endif
+
+#endif // V8_CODEGEN_ASSEMBLER_INL_H_
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/codegen/assembler.cc
index 82d0fb9794..687ae98bfe 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -32,36 +32,23 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
-#include "src/assembler-inl.h"
-#include "src/deoptimizer.h"
-#include "src/disassembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h" // For MemoryAllocator. TODO(jkummerow): Drop.
-#include "src/isolate.h"
-#include "src/ostreams.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
-#include "src/string-constants.h"
-#include "src/vector.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
-AssemblerOptions AssemblerOptions::EnableV8AgnosticCode() const {
- AssemblerOptions options = *this;
- options.v8_agnostic_code = true;
- options.record_reloc_info_for_serialization = false;
- options.enable_root_array_delta_access = false;
- // Inherit |enable_simulator_code| value.
- options.isolate_independent_code = false;
- options.inline_offheap_trampolines = false;
- // Inherit |code_range_start| value.
- // Inherit |use_pc_relative_calls_and_jumps| value.
- return options;
-}
-
AssemblerOptions AssemblerOptions::Default(
Isolate* isolate, bool explicitly_support_serialization) {
AssemblerOptions options;
@@ -167,23 +154,6 @@ void AssemblerBase::Print(Isolate* isolate) {
}
// -----------------------------------------------------------------------------
-// Implementation of PredictableCodeSizeScope
-
-PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
- int expected_size)
- : assembler_(assembler),
- expected_size_(expected_size),
- start_offset_(assembler->pc_offset()),
- old_value_(assembler->predictable_code_size()) {
- assembler_->set_predictable_code_size(true);
-}
-
-PredictableCodeSizeScope::~PredictableCodeSizeScope() {
- CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
- assembler_->set_predictable_code_size(old_value_);
-}
-
-// -----------------------------------------------------------------------------
// Implementation of CpuFeatureScope
#ifdef DEBUG
@@ -240,13 +210,11 @@ void Assembler::DataAlign(int m) {
}
void AssemblerBase::RequestHeapObject(HeapObjectRequest request) {
- DCHECK(!options().v8_agnostic_code);
request.set_offset(pc_offset());
heap_object_requests_.push_front(request);
}
int AssemblerBase::AddCodeTarget(Handle<Code> target) {
- DCHECK(!options().v8_agnostic_code);
int current = static_cast<int>(code_targets_.size());
if (current > 0 && !target.is_null() &&
code_targets_.back().address() == target.address()) {
@@ -258,19 +226,21 @@ int AssemblerBase::AddCodeTarget(Handle<Code> target) {
}
}
-Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
- DCHECK(!options().v8_agnostic_code);
- DCHECK_LE(0, code_target_index);
- DCHECK_LT(code_target_index, code_targets_.size());
- return code_targets_[code_target_index];
+int AssemblerBase::AddCompressedEmbeddedObject(Handle<HeapObject> object) {
+ int current = static_cast<int>(compressed_embedded_objects_.size());
+ compressed_embedded_objects_.push_back(object);
+ return current;
}
-void AssemblerBase::UpdateCodeTarget(intptr_t code_target_index,
- Handle<Code> code) {
- DCHECK(!options().v8_agnostic_code);
- DCHECK_LE(0, code_target_index);
- DCHECK_LT(code_target_index, code_targets_.size());
- code_targets_[code_target_index] = code;
+Handle<HeapObject> AssemblerBase::GetCompressedEmbeddedObject(
+ intptr_t index) const {
+ DCHECK_LT(static_cast<size_t>(index), compressed_embedded_objects_.size());
+ return compressed_embedded_objects_[index];
+}
+
+Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
+ DCHECK_LT(static_cast<size_t>(code_target_index), code_targets_.size());
+ return code_targets_[code_target_index];
}
int Assembler::WriteCodeComments() {
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/codegen/assembler.h
index cefd4ae8d8..eae5d53a4f 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -32,21 +32,21 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#ifndef V8_ASSEMBLER_H_
-#define V8_ASSEMBLER_H_
+#ifndef V8_CODEGEN_ASSEMBLER_H_
+#define V8_CODEGEN_ASSEMBLER_H_
#include <forward_list>
-#include "src/code-comments.h"
-#include "src/cpu-features.h"
-#include "src/deoptimize-reason.h"
-#include "src/external-reference.h"
-#include "src/flags.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/objects.h"
-#include "src/reglist.h"
-#include "src/reloc-info.h"
+#include "src/codegen/code-comments.h"
+#include "src/codegen/cpu-features.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/reglist.h"
+#include "src/codegen/reloc-info.h"
+#include "src/common/globals.h"
+#include "src/deoptimizer/deoptimize-reason.h"
+#include "src/flags/flags.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects.h"
namespace v8 {
@@ -135,9 +135,6 @@ class HeapObjectRequest {
enum class CodeObjectRequired { kNo, kYes };
struct V8_EXPORT_PRIVATE AssemblerOptions {
- // Prohibits using any V8-specific features of assembler like (isolates,
- // heap objects, external references, etc.).
- bool v8_agnostic_code = false;
// Recording reloc info for external references and off-heap targets is
// needed whenever code is serialized, e.g. into the snapshot or as a WASM
// module. This flag allows this reloc info to be disabled for code that
@@ -171,9 +168,6 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
// on a function prologue/epilogue.
bool collect_win64_unwind_info = false;
- // Constructs V8-agnostic set of options from current state.
- AssemblerOptions EnableV8AgnosticCode() const;
-
static AssemblerOptions Default(
Isolate* isolate, bool explicitly_support_serialization = false);
};
@@ -257,7 +251,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// This function is called when code generation is aborted, so that
// the assembler could clean up internal data structures.
- virtual void AbortedCodeGeneration() { }
+ virtual void AbortedCodeGeneration() {}
// Debugging
void Print(Isolate* isolate);
@@ -270,15 +264,16 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
}
}
- static const int kMinimalBufferSize = 4*KB;
+ static const int kMinimalBufferSize = 4 * KB;
protected:
// Add 'target' to the {code_targets_} vector, if necessary, and return the
// offset at which it is stored.
int AddCodeTarget(Handle<Code> target);
Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
- // Update to the code target at {code_target_index} to {target}.
- void UpdateCodeTarget(intptr_t code_target_index, Handle<Code> target);
+
+ int AddCompressedEmbeddedObject(Handle<HeapObject> object);
+ Handle<HeapObject> GetCompressedEmbeddedObject(intptr_t index) const;
// The buffer into which code and relocation info are generated.
std::unique_ptr<AssemblerBuffer> buffer_;
@@ -326,6 +321,13 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// the code handle in the vector instead.
std::vector<Handle<Code>> code_targets_;
+ // When pointer compression is enabled, we need to store indexes to this
+ // table in the code until we are ready to copy the code and embed the real
+ // object pointers. We don't need to do the same thing for non-compressed
+ // embedded objects, because we've got enough space (kPointerSize) in the
+ // code stream to just embed the address of the object handle.
+ std::vector<Handle<HeapObject>> compressed_embedded_objects_;
+
const AssemblerOptions options_;
uint64_t enabled_cpu_features_;
bool emit_debug_code_;
@@ -349,30 +351,13 @@ class DontEmitDebugCodeScope {
: assembler_(assembler), old_value_(assembler->emit_debug_code()) {
assembler_->set_emit_debug_code(false);
}
- ~DontEmitDebugCodeScope() {
- assembler_->set_emit_debug_code(old_value_);
- }
+ ~DontEmitDebugCodeScope() { assembler_->set_emit_debug_code(old_value_); }
+
private:
AssemblerBase* assembler_;
bool old_value_;
};
-
-// Avoids using instructions that vary in size in unpredictable ways between the
-// snapshot and the running VM.
-class PredictableCodeSizeScope {
- public:
- PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
- ~PredictableCodeSizeScope();
-
- private:
- AssemblerBase* const assembler_;
- int const expected_size_;
- int const start_offset_;
- bool const old_value_;
-};
-
-
// Enable a specified feature within a scope.
class V8_EXPORT_PRIVATE CpuFeatureScope {
public:
@@ -400,4 +385,4 @@ class V8_EXPORT_PRIVATE CpuFeatureScope {
} // namespace internal
} // namespace v8
-#endif // V8_ASSEMBLER_H_
+#endif // V8_CODEGEN_ASSEMBLER_H_
diff --git a/deps/v8/src/bailout-reason.cc b/deps/v8/src/codegen/bailout-reason.cc
index 54b3dbda54..f4573fbe9c 100644
--- a/deps/v8/src/bailout-reason.cc
+++ b/deps/v8/src/codegen/bailout-reason.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/bailout-reason.h"
+#include "src/codegen/bailout-reason.h"
#include "src/base/logging.h"
namespace v8 {
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h
index 139ee14931..e928a634ac 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/codegen/bailout-reason.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BAILOUT_REASON_H_
-#define V8_BAILOUT_REASON_H_
+#ifndef V8_CODEGEN_BAILOUT_REASON_H_
+#define V8_CODEGEN_BAILOUT_REASON_H_
#include <cstdint>
@@ -53,7 +53,6 @@ namespace internal {
V(kPromiseAlreadySettled, "Promise already settled") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
- V(kRegisterWasClobbered, "Register was clobbered") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
V(kShouldNotDirectlyEnterOsrFunction, \
"Should not directly enter OSR-compiled function") \
@@ -126,4 +125,4 @@ bool IsValidAbortReason(int reason_id);
} // namespace internal
} // namespace v8
-#endif // V8_BAILOUT_REASON_H_
+#endif // V8_CODEGEN_BAILOUT_REASON_H_
diff --git a/deps/v8/src/callable.h b/deps/v8/src/codegen/callable.h
index c24c9ae554..49ee70717e 100644
--- a/deps/v8/src/callable.h
+++ b/deps/v8/src/codegen/callable.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CALLABLE_H_
-#define V8_CALLABLE_H_
+#ifndef V8_CODEGEN_CALLABLE_H_
+#define V8_CODEGEN_CALLABLE_H_
-#include "src/allocation.h"
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -30,4 +30,4 @@ class Callable final {
} // namespace internal
} // namespace v8
-#endif // V8_CALLABLE_H_
+#endif // V8_CODEGEN_CALLABLE_H_
diff --git a/deps/v8/src/code-comments.cc b/deps/v8/src/codegen/code-comments.cc
index db5611c8dd..7888863373 100644
--- a/deps/v8/src/code-comments.cc
+++ b/deps/v8/src/codegen/code-comments.cc
@@ -5,8 +5,8 @@
#include <cstring>
#include <iomanip>
-#include "src/assembler-inl.h"
-#include "src/code-comments.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/code-comments.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/code-comments.h b/deps/v8/src/codegen/code-comments.h
index 0c247fd247..f366cd5547 100644
--- a/deps/v8/src/code-comments.h
+++ b/deps/v8/src/codegen/code-comments.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CODE_COMMENTS_H_
-#define V8_CODE_COMMENTS_H_
+#ifndef V8_CODEGEN_CODE_COMMENTS_H_
+#define V8_CODEGEN_CODE_COMMENTS_H_
#include <ostream>
#include <string>
@@ -68,4 +68,4 @@ void PrintCodeCommentsSection(std::ostream& out, Address code_comments_start,
} // namespace internal
} // namespace v8
-#endif // V8_CODE_COMMENTS_H_
+#endif // V8_CODEGEN_CODE_COMMENTS_H_
diff --git a/deps/v8/src/code-desc.cc b/deps/v8/src/codegen/code-desc.cc
index f66b73f0a0..ea2e4712b6 100644
--- a/deps/v8/src/code-desc.cc
+++ b/deps/v8/src/codegen/code-desc.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/code-desc.h"
+#include "src/codegen/code-desc.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/code-desc.h b/deps/v8/src/codegen/code-desc.h
index 4da4ee395c..9a2ee2d868 100644
--- a/deps/v8/src/code-desc.h
+++ b/deps/v8/src/codegen/code-desc.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CODE_DESC_H_
-#define V8_CODE_DESC_H_
+#ifndef V8_CODEGEN_CODE_DESC_H_
+#define V8_CODEGEN_CODE_DESC_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -80,4 +80,4 @@ class CodeDesc {
} // namespace internal
} // namespace v8
-#endif // V8_CODE_DESC_H_
+#endif // V8_CODEGEN_CODE_DESC_H_
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index 3c52fb0752..931b783730 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
-#include "src/bootstrapper.h"
#include "src/builtins/builtins-descriptors.h"
#include "src/ic/ic.h"
-#include "src/objects-inl.h"
+#include "src/init/bootstrapper.h"
#include "src/objects/allocation-site-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -104,15 +104,15 @@ Callable CodeFactory::KeyedStoreIC_SloppyArguments(Isolate* isolate,
case STANDARD_STORE:
builtin_index = Builtins::kKeyedStoreIC_SloppyArguments_Standard;
break;
- case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ case STORE_AND_GROW_HANDLE_COW:
builtin_index =
Builtins::kKeyedStoreIC_SloppyArguments_GrowNoTransitionHandleCOW;
break;
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ case STORE_IGNORE_OUT_OF_BOUNDS:
builtin_index =
Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB;
break;
- case STORE_NO_TRANSITION_HANDLE_COW:
+ case STORE_HANDLE_COW:
builtin_index =
Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW;
break;
@@ -129,13 +129,13 @@ Callable CodeFactory::KeyedStoreIC_Slow(Isolate* isolate,
case STANDARD_STORE:
builtin_index = Builtins::kKeyedStoreIC_Slow_Standard;
break;
- case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ case STORE_AND_GROW_HANDLE_COW:
builtin_index = Builtins::kKeyedStoreIC_Slow_GrowNoTransitionHandleCOW;
break;
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ case STORE_IGNORE_OUT_OF_BOUNDS:
builtin_index = Builtins::kKeyedStoreIC_Slow_NoTransitionIgnoreOOB;
break;
- case STORE_NO_TRANSITION_HANDLE_COW:
+ case STORE_HANDLE_COW:
builtin_index = Builtins::kKeyedStoreIC_Slow_NoTransitionHandleCOW;
break;
default:
@@ -151,15 +151,15 @@ Callable CodeFactory::StoreInArrayLiteralIC_Slow(Isolate* isolate,
case STANDARD_STORE:
builtin_index = Builtins::kStoreInArrayLiteralIC_Slow_Standard;
break;
- case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ case STORE_AND_GROW_HANDLE_COW:
builtin_index =
Builtins::kStoreInArrayLiteralIC_Slow_GrowNoTransitionHandleCOW;
break;
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ case STORE_IGNORE_OUT_OF_BOUNDS:
builtin_index =
Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB;
break;
- case STORE_NO_TRANSITION_HANDLE_COW:
+ case STORE_HANDLE_COW:
builtin_index =
Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionHandleCOW;
break;
@@ -176,15 +176,15 @@ Callable CodeFactory::ElementsTransitionAndStore(Isolate* isolate,
case STANDARD_STORE:
builtin_index = Builtins::kElementsTransitionAndStore_Standard;
break;
- case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ case STORE_AND_GROW_HANDLE_COW:
builtin_index =
Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW;
break;
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ case STORE_IGNORE_OUT_OF_BOUNDS:
builtin_index =
Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB;
break;
- case STORE_NO_TRANSITION_HANDLE_COW:
+ case STORE_HANDLE_COW:
builtin_index =
Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW;
break;
@@ -201,13 +201,13 @@ Callable CodeFactory::StoreFastElementIC(Isolate* isolate,
case STANDARD_STORE:
builtin_index = Builtins::kStoreFastElementIC_Standard;
break;
- case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ case STORE_AND_GROW_HANDLE_COW:
builtin_index = Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW;
break;
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ case STORE_IGNORE_OUT_OF_BOUNDS:
builtin_index = Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB;
break;
- case STORE_NO_TRANSITION_HANDLE_COW:
+ case STORE_HANDLE_COW:
builtin_index = Builtins::kStoreFastElementIC_NoTransitionHandleCOW;
break;
default:
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/codegen/code-factory.h
index 8a4f13e91e..57484e89ec 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CODE_FACTORY_H_
-#define V8_CODE_FACTORY_H_
+#ifndef V8_CODEGEN_CODE_FACTORY_H_
+#define V8_CODEGEN_CODE_FACTORY_H_
-#include "src/allocation.h"
-#include "src/callable.h"
-#include "src/globals.h"
-#include "src/interface-descriptors.h"
-#include "src/type-hints.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/common/globals.h"
+#include "src/objects/type-hints.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -108,4 +108,4 @@ class V8_EXPORT_PRIVATE CodeFactory final {
} // namespace internal
} // namespace v8
-#endif // V8_CODE_FACTORY_H_
+#endif // V8_CODEGEN_CODE_FACTORY_H_
diff --git a/deps/v8/src/code-reference.cc b/deps/v8/src/codegen/code-reference.cc
index 12279aacce..63c8d37497 100644
--- a/deps/v8/src/code-reference.cc
+++ b/deps/v8/src/codegen/code-reference.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/code-reference.h"
+#include "src/codegen/code-reference.h"
-#include "src/code-desc.h"
-#include "src/globals.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/codegen/code-desc.h"
+#include "src/common/globals.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
@@ -33,16 +33,16 @@ struct WasmOps {
Address constant_pool() const { return code->constant_pool(); }
Address instruction_start() const {
- return reinterpret_cast<Address>(code->instructions().start());
+ return reinterpret_cast<Address>(code->instructions().begin());
}
Address instruction_end() const {
- return reinterpret_cast<Address>(code->instructions().start() +
+ return reinterpret_cast<Address>(code->instructions().begin() +
code->instructions().size());
}
int instruction_size() const { return code->instructions().length(); }
- const byte* relocation_start() const { return code->reloc_info().start(); }
+ const byte* relocation_start() const { return code->reloc_info().begin(); }
const byte* relocation_end() const {
- return code->reloc_info().start() + code->reloc_info().length();
+ return code->reloc_info().begin() + code->reloc_info().length();
}
int relocation_size() const { return code->reloc_info().length(); }
Address code_comments() const { return code->code_comments(); }
diff --git a/deps/v8/src/code-reference.h b/deps/v8/src/codegen/code-reference.h
index e53a9356ee..4326cf0b96 100644
--- a/deps/v8/src/code-reference.h
+++ b/deps/v8/src/codegen/code-reference.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CODE_REFERENCE_H_
-#define V8_CODE_REFERENCE_H_
+#ifndef V8_CODEGEN_CODE_REFERENCE_H_
+#define V8_CODEGEN_CODE_REFERENCE_H_
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/objects/code.h"
namespace v8 {
@@ -67,4 +67,4 @@ ASSERT_TRIVIALLY_COPYABLE(CodeReference);
} // namespace internal
} // namespace v8
-#endif // V8_CODE_REFERENCE_H_
+#endif // V8_CODEGEN_CODE_REFERENCE_H_
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index 56192e4df1..d967d84874 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
-#include "src/frames-inl.h"
-#include "src/frames.h"
-#include "src/function-kind.h"
+#include "src/codegen/code-factory.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/frames.h"
#include "src/heap/heap-inl.h" // For Page/MemoryChunk. TODO(jkummerow): Drop.
+#include "src/logging/counters.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/function-kind.h"
#include "src/objects/heap-number.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -29,7 +29,8 @@ template <class T>
using SloppyTNode = compiler::SloppyTNode<T>;
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
- : compiler::CodeAssembler(state), BaseBuiltinsFromDSLAssembler(state) {
+ : compiler::CodeAssembler(state),
+ TorqueGeneratedExportedMacrosAssembler(state) {
if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) {
HandleBreakOnNode();
}
@@ -168,14 +169,13 @@ void CodeStubAssembler::FailAssert(
const char* extra_node4_name, Node* extra_node5,
const char* extra_node5_name) {
DCHECK_NOT_NULL(message);
- char chars[1024];
- Vector<char> buffer(chars);
+ EmbeddedVector<char, 1024> chars;
if (file != nullptr) {
- SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
+ SNPrintF(chars, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
} else {
- SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
+ SNPrintF(chars, "CSA_ASSERT failed: %s\n", message);
}
- Node* message_node = StringConstant(&(buffer[0]));
+ Node* message_node = StringConstant(chars.begin());
#ifdef DEBUG
// Only print the extra nodes in debug builds.
@@ -294,7 +294,7 @@ bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
}
} else {
DCHECK_EQ(mode, SMI_PARAMETERS);
- if (ToSmiConstant(test, &smi_test) && smi_test->value() == 0) {
+ if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) {
return true;
}
}
@@ -640,6 +640,23 @@ TNode<IntPtrT> CodeStubAssembler::TryIntPtrAdd(TNode<IntPtrT> a,
return Projection<0>(pair);
}
+TNode<IntPtrT> CodeStubAssembler::TryIntPtrSub(TNode<IntPtrT> a,
+ TNode<IntPtrT> b,
+ Label* if_overflow) {
+ TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(a, b);
+ TNode<BoolT> overflow = Projection<1>(pair);
+ GotoIf(overflow, if_overflow);
+ return Projection<0>(pair);
+}
+
+TNode<Int32T> CodeStubAssembler::TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
+ Label* if_overflow) {
+ TNode<PairT<Int32T, BoolT>> pair = Int32MulWithOverflow(a, b);
+ TNode<BoolT> overflow = Projection<1>(pair);
+ GotoIf(overflow, if_overflow);
+ return Projection<0>(pair);
+}
+
TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
@@ -1233,15 +1250,25 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
bool const new_space = !(flags & kPretenured);
- if (!(flags & kAllowLargeObjectAllocation)) {
+ bool const allow_large_objects = flags & kAllowLargeObjectAllocation;
+ // For optimized allocations, we don't allow the allocation to happen in a
+ // different generation than requested.
+ bool const always_allocated_in_requested_space =
+ !new_space || !allow_large_objects || FLAG_young_generation_large_objects;
+ if (!allow_large_objects) {
intptr_t size_constant;
if (ToIntPtrConstant(size_in_bytes, size_constant)) {
CHECK_LE(size_constant, kMaxRegularHeapObjectSize);
+ } else {
+ CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
}
}
- if (!(flags & kDoubleAlignment) && !(flags & kAllowLargeObjectAllocation)) {
- return OptimizedAllocate(size_in_bytes, new_space ? AllocationType::kYoung
- : AllocationType::kOld);
+ if (!(flags & kDoubleAlignment) && always_allocated_in_requested_space) {
+ return OptimizedAllocate(
+ size_in_bytes,
+ new_space ? AllocationType::kYoung : AllocationType::kOld,
+ allow_large_objects ? AllowLargeObjects::kTrue
+ : AllowLargeObjects::kFalse);
}
TNode<ExternalReference> top_address = ExternalConstant(
new_space
@@ -1346,35 +1373,35 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
BIND(&if_bigint);
{
- Node* result =
- CallRuntime(Runtime::kBigIntToBoolean, NoContextConstant(), value);
- CSA_ASSERT(this, IsBoolean(result));
- Branch(WordEqual(result, TrueConstant()), if_true, if_false);
+ TNode<BigInt> bigint = CAST(value);
+ TNode<Word32T> bitfield = LoadBigIntBitfield(bigint);
+ TNode<Uint32T> length = DecodeWord32<BigIntBase::LengthBits>(bitfield);
+ Branch(Word32Equal(length, Int32Constant(0)), if_false, if_true);
}
}
}
-Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) {
+Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType type) {
Node* frame_pointer = LoadParentFramePointer();
- return Load(rep, frame_pointer, IntPtrConstant(offset));
+ return Load(type, frame_pointer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
- MachineType rep) {
- return Load(rep, buffer, IntPtrConstant(offset));
+ MachineType type) {
+ return Load(type, buffer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
- int offset, MachineType rep) {
+ int offset, MachineType type) {
CSA_ASSERT(this, IsStrong(object));
- return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
+ return Load(type, object, IntPtrConstant(offset - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset,
- MachineType rep) {
+ MachineType type) {
CSA_ASSERT(this, IsStrong(object));
- return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
+ return Load(type, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
@@ -1475,18 +1502,19 @@ TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object))));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
- return Select<HeapObject>(TaggedIsSmi(properties),
- [=] { return EmptyFixedArrayConstant(); },
- [=] { return CAST(properties); });
+ return Select<HeapObject>(
+ TaggedIsSmi(properties), [=] { return EmptyFixedArrayConstant(); },
+ [=] { return CAST(properties); });
}
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object);
- return Select<HeapObject>(TaggedIsSmi(properties),
- [=] { return EmptyPropertyDictionaryConstant(); },
- [=] { return CAST(properties); });
+ return Select<HeapObject>(
+ TaggedIsSmi(properties),
+ [=] { return EmptyPropertyDictionaryConstant(); },
+ [=] { return CAST(properties); });
}
TNode<Number> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
@@ -1505,7 +1533,7 @@ TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
CSA_ASSERT(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)),
IsElementsKindInRange(LoadElementsKind(array),
PACKED_SEALED_ELEMENTS,
- PACKED_FROZEN_ELEMENTS)));
+ HOLEY_FROZEN_ELEMENTS)));
// JSArray length is always a positive Smi for fast arrays.
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
return UncheckedCast<Smi>(length);
@@ -1582,7 +1610,7 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind(
TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- return CAST(LoadObjectField(map, Map::kDescriptorsOffset));
+ return CAST(LoadObjectField(map, Map::kInstanceDescriptorsOffset));
}
TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
@@ -1665,8 +1693,9 @@ Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
TNode<Object> CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
TNode<HeapObject> object =
CAST(LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
- return Select<Object>(IsMap(object), [=] { return object; },
- [=] { return UndefinedConstant(); });
+ return Select<Object>(
+ IsMap(object), [=] { return object; },
+ [=] { return UndefinedConstant(); });
}
TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties(
@@ -1674,11 +1703,12 @@ TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties(
// This check can have false positives, since it applies to any JSValueType.
GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout);
- TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
- GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask |
- Map::HasHiddenPrototypeBit::kMask),
+ GotoIf(IsSetWord32(LoadMapBitField2(map), Map::HasHiddenPrototypeBit::kMask),
bailout);
+ TNode<Uint32T> bit_field3 = LoadMapBitField3(map);
+ GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask), bailout);
+
return bit_field3;
}
@@ -2004,39 +2034,18 @@ TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
return Signed(DecodeWord<PropertyArray::LengthField>(value));
}
-TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayBackingStore(
- TNode<FixedTypedArrayBase> typed_array) {
+TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayBackingStore(
+ TNode<JSTypedArray> typed_array) {
// Backing store = external_pointer + base_pointer.
Node* external_pointer =
- LoadObjectField(typed_array, FixedTypedArrayBase::kExternalPointerOffset,
+ LoadObjectField(typed_array, JSTypedArray::kExternalPointerOffset,
MachineType::Pointer());
Node* base_pointer =
- LoadObjectField(typed_array, FixedTypedArrayBase::kBasePointerOffset);
+ LoadObjectField(typed_array, JSTypedArray::kBasePointerOffset);
return UncheckedCast<RawPtrT>(
IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
}
-TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayOnHeapBackingStore(
- TNode<FixedTypedArrayBase> typed_array) {
- // This is specialized method of retrieving the backing store pointer for on
- // heap allocated typed array buffer. On heap allocated buffer's backing
- // stores are a fixed offset from the pointer to a typed array's elements. See
- // TypedArrayBuiltinsAssembler::AllocateOnHeapElements().
- TNode<WordT> backing_store =
- IntPtrAdd(BitcastTaggedToWord(typed_array),
- IntPtrConstant(
- FixedTypedArrayBase::ExternalPointerValueForOnHeapArray()));
-
-#ifdef DEBUG
- // Verify that this is an on heap backing store.
- TNode<RawPtrT> expected_backing_store_pointer =
- LoadFixedTypedArrayBackingStore(typed_array);
- CSA_ASSERT(this, WordEqual(backing_store, expected_backing_store_pointer));
-#endif
-
- return UncheckedCast<RawPtrT>(backing_store);
-}
-
Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
Node* data_pointer, Node* offset) {
if (Is64()) {
@@ -2312,11 +2321,11 @@ TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
return var_result.value();
}
-void CodeStubAssembler::StoreFixedTypedArrayElementFromTagged(
- TNode<Context> context, TNode<FixedTypedArrayBase> elements,
+void CodeStubAssembler::StoreJSTypedArrayElementFromTagged(
+ TNode<Context> context, TNode<JSTypedArray> typed_array,
TNode<Object> index_node, TNode<Object> value, ElementsKind elements_kind,
ParameterMode parameter_mode) {
- TNode<RawPtrT> data_pointer = LoadFixedTypedArrayBackingStore(elements);
+ TNode<RawPtrT> data_pointer = LoadJSTypedArrayBackingStore(typed_array);
switch (elements_kind) {
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
@@ -2341,13 +2350,10 @@ void CodeStubAssembler::StoreFixedTypedArrayElementFromTagged(
LoadHeapNumberValue(CAST(value)), parameter_mode);
break;
case BIGUINT64_ELEMENTS:
- case BIGINT64_ELEMENTS: {
- TNode<IntPtrT> offset =
- ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
- EmitBigTypedArrayElementStore(elements, data_pointer, offset,
- CAST(value));
+ case BIGINT64_ELEMENTS:
+ StoreElement(data_pointer, elements_kind, index_node,
+ UncheckedCast<BigInt>(value), parameter_mode);
break;
- }
default:
UNREACHABLE();
}
@@ -2436,7 +2442,8 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
// Handled by if_holey.
- HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
+ HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS, HOLEY_SEALED_ELEMENTS,
+ HOLEY_FROZEN_ELEMENTS,
// Handled by if_packed_double.
PACKED_DOUBLE_ELEMENTS,
// Handled by if_holey_double.
@@ -2444,7 +2451,7 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged(
Label* labels[] = {// PACKED_{SMI,}_ELEMENTS
&if_packed, &if_packed, &if_packed, &if_packed,
// HOLEY_{SMI,}_ELEMENTS
- &if_holey, &if_holey,
+ &if_holey, &if_holey, &if_holey, &if_holey,
// PACKED_DOUBLE_ELEMENTS
&if_packed_double,
// HOLEY_DOUBLE_ELEMENTS
@@ -2714,8 +2721,7 @@ void CodeStubAssembler::StoreObjectField(Node* object, int offset,
DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
OptimizedStoreField(MachineRepresentation::kTagged,
- UncheckedCast<HeapObject>(object), offset, value,
- WriteBarrierKind::kFullWriteBarrier);
+ UncheckedCast<HeapObject>(object), offset, value);
}
void CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
@@ -2730,12 +2736,24 @@ void CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
void CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value, MachineRepresentation rep) {
- OptimizedStoreField(rep, UncheckedCast<HeapObject>(object), offset, value,
- WriteBarrierKind::kNoWriteBarrier);
+ if (CanBeTaggedPointer(rep)) {
+ OptimizedStoreFieldAssertNoWriteBarrier(
+ rep, UncheckedCast<HeapObject>(object), offset, value);
+ } else {
+ OptimizedStoreFieldUnsafeNoWriteBarrier(
+ rep, UncheckedCast<HeapObject>(object), offset, value);
+ }
+}
+
+void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier(
+ TNode<HeapObject> object, int offset, TNode<Object> value) {
+ OptimizedStoreFieldUnsafeNoWriteBarrier(MachineRepresentation::kTagged,
+ object, offset, value);
}
void CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
- Node* object, Node* offset, Node* value, MachineRepresentation rep) {
+ Node* object, SloppyTNode<IntPtrT> offset, Node* value,
+ MachineRepresentation rep) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep);
@@ -2755,9 +2773,9 @@ void CodeStubAssembler::StoreMapNoWriteBarrier(Node* object,
void CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
- OptimizedStoreField(MachineRepresentation::kTaggedPointer,
- UncheckedCast<HeapObject>(object), HeapObject::kMapOffset,
- map, WriteBarrierKind::kNoWriteBarrier);
+ OptimizedStoreFieldAssertNoWriteBarrier(MachineRepresentation::kTaggedPointer,
+ UncheckedCast<HeapObject>(object),
+ HeapObject::kMapOffset, map);
}
void CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
@@ -2786,6 +2804,7 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
this, Word32Or(IsFixedArraySubclass(object), IsPropertyArray(object)));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
+ barrier_mode == UNSAFE_SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER ||
barrier_mode == UPDATE_EPHEMERON_KEY_WRITE_BARRIER);
DCHECK(IsAligned(additional_offset, kTaggedSize));
@@ -2820,6 +2839,9 @@ void CodeStubAssembler::StoreFixedArrayOrPropertyArrayElement(
FixedArray::kHeaderSize));
if (barrier_mode == SKIP_WRITE_BARRIER) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value);
+ } else if (barrier_mode == UNSAFE_SKIP_WRITE_BARRIER) {
+ UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
+ value);
} else if (barrier_mode == UPDATE_EPHEMERON_KEY_WRITE_BARRIER) {
StoreEphemeronKey(object, offset, value);
} else {
@@ -2854,6 +2876,7 @@ void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
DCHECK(IsAligned(additional_offset, kTaggedSize));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
+ barrier_mode == UNSAFE_SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
int header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
@@ -2865,6 +2888,9 @@ void CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
FeedbackVector::kHeaderSize));
if (barrier_mode == SKIP_WRITE_BARRIER) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value);
+ } else if (barrier_mode == UNSAFE_SKIP_WRITE_BARRIER) {
+ UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
+ value);
} else {
Store(object, offset, value);
}
@@ -3148,6 +3174,55 @@ TNode<UintPtrT> CodeStubAssembler::LoadBigIntDigit(TNode<BigInt> bigint,
MachineType::UintPtr()));
}
+TNode<ByteArray> CodeStubAssembler::AllocateByteArray(TNode<UintPtrT> length,
+ AllocationFlags flags) {
+ Comment("AllocateByteArray");
+ VARIABLE(var_result, MachineRepresentation::kTagged);
+
+ // Compute the ByteArray size and check if it fits into new space.
+ Label if_lengthiszero(this), if_sizeissmall(this),
+ if_notsizeissmall(this, Label::kDeferred), if_join(this);
+ GotoIf(WordEqual(length, UintPtrConstant(0)), &if_lengthiszero);
+
+ Node* raw_size =
+ GetArrayAllocationSize(Signed(length), UINT8_ELEMENTS, INTPTR_PARAMETERS,
+ ByteArray::kHeaderSize + kObjectAlignmentMask);
+ TNode<WordT> size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
+ Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
+ &if_sizeissmall, &if_notsizeissmall);
+
+ BIND(&if_sizeissmall);
+ {
+ // Just allocate the ByteArray in new space.
+ TNode<Object> result =
+ AllocateInNewSpace(UncheckedCast<IntPtrT>(size), flags);
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kByteArrayMap));
+ StoreMapNoWriteBarrier(result, RootIndex::kByteArrayMap);
+ StoreObjectFieldNoWriteBarrier(result, ByteArray::kLengthOffset,
+ SmiTag(Signed(length)));
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+
+ BIND(&if_notsizeissmall);
+ {
+ // We might need to allocate in large object space, go to the runtime.
+ Node* result = CallRuntime(Runtime::kAllocateByteArray, NoContextConstant(),
+ ChangeUintPtrToTagged(length));
+ var_result.Bind(result);
+ Goto(&if_join);
+ }
+
+ BIND(&if_lengthiszero);
+ {
+ var_result.Bind(LoadRoot(RootIndex::kEmptyByteArray));
+ Goto(&if_join);
+ }
+
+ BIND(&if_join);
+ return CAST(var_result.value());
+}
+
TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
uint32_t length, AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
@@ -3167,9 +3242,9 @@ TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
}
TNode<BoolT> CodeStubAssembler::IsZeroOrContext(SloppyTNode<Object> object) {
- return Select<BoolT>(WordEqual(object, SmiConstant(0)),
- [=] { return Int32TrueConstant(); },
- [=] { return IsContext(CAST(object)); });
+ return Select<BoolT>(
+ WordEqual(object, SmiConstant(0)), [=] { return Int32TrueConstant(); },
+ [=] { return IsContext(CAST(object)); });
}
TNode<String> CodeStubAssembler::AllocateSeqOneByteString(
@@ -3800,7 +3875,8 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
BuildFastLoop(
start_address, end_address,
[this, value](Node* current) {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
+ UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current,
+ value);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
}
@@ -3814,11 +3890,11 @@ TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
TNode<JSArray> CodeStubAssembler::AllocateJSArray(
TNode<Map> array_map, TNode<FixedArrayBase> elements, TNode<Smi> length,
- Node* allocation_site) {
+ Node* allocation_site, int array_header_size) {
Comment("begin allocation of JSArray passing in elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
- int base_size = JSArray::kSize;
+ int base_size = array_header_size;
if (allocation_site != nullptr) {
base_size += AllocationMemento::kSize;
}
@@ -3834,7 +3910,7 @@ std::pair<TNode<JSArray>, TNode<FixedArrayBase>>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
Node* allocation_site, Node* capacity, ParameterMode capacity_mode,
- AllocationFlags allocation_flags) {
+ AllocationFlags allocation_flags, int array_header_size) {
Comment("begin allocation of JSArray with elements");
CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0);
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
@@ -3842,28 +3918,35 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
TVARIABLE(JSArray, array);
TVARIABLE(FixedArrayBase, elements);
- if (IsIntPtrOrSmiConstantZero(capacity, capacity_mode)) {
- TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
- array = AllocateJSArray(array_map, empty_array, length, allocation_site);
- return {array.value(), empty_array};
- }
-
Label out(this), empty(this), nonempty(this);
- Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
- &empty, &nonempty);
+ int capacity_int;
+ if (TryGetIntPtrOrSmiConstantValue(capacity, &capacity_int, capacity_mode)) {
+ if (capacity_int == 0) {
+ TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
+ array = AllocateJSArray(array_map, empty_array, length, allocation_site,
+ array_header_size);
+ return {array.value(), empty_array};
+ } else {
+ Goto(&nonempty);
+ }
+ } else {
+ Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
+ &empty, &nonempty);
- BIND(&empty);
- {
- TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
- array = AllocateJSArray(array_map, empty_array, length, allocation_site);
- elements = empty_array;
- Goto(&out);
+ BIND(&empty);
+ {
+ TNode<FixedArrayBase> empty_array = EmptyFixedArrayConstant();
+ array = AllocateJSArray(array_map, empty_array, length, allocation_site,
+ array_header_size);
+ elements = empty_array;
+ Goto(&out);
+ }
}
BIND(&nonempty);
{
- int base_size = JSArray::kSize;
+ int base_size = array_header_size;
if (allocation_site != nullptr) base_size += AllocationMemento::kSize;
const int elements_offset = base_size;
@@ -3890,8 +3973,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
AllocateFixedArray(kind, capacity, capacity_mode, allocation_flags);
if (IsDoubleElementsKind(kind)) {
- FillFixedDoubleArrayWithZero(CAST(elements.value()),
- ParameterToIntPtr(capacity, capacity_mode));
+ FillFixedDoubleArrayWithZero(
+ CAST(elements.value()), ParameterToIntPtr(capacity, capacity_mode));
} else {
FillFixedArrayWithSmiZero(CAST(elements.value()),
ParameterToIntPtr(capacity, capacity_mode));
@@ -3900,8 +3983,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
// The JSArray and possibly allocation memento next. Note that
// allocation_flags are *not* passed on here and the resulting JSArray
// will always be in new space.
- array =
- AllocateJSArray(array_map, elements.value(), length, allocation_site);
+ array = AllocateJSArray(array_map, elements.value(), length,
+ allocation_site, array_header_size);
Goto(&out);
@@ -3915,20 +3998,20 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
InnerAllocate(array.value(), elements_offset));
StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
- elements.value());
+ elements.value());
// Setup elements object.
STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
RootIndex elements_map_index = IsDoubleElementsKind(kind)
- ? RootIndex::kFixedDoubleArrayMap
- : RootIndex::kFixedArrayMap;
+ ? RootIndex::kFixedDoubleArrayMap
+ : RootIndex::kFixedArrayMap;
DCHECK(RootsTable::IsImmortalImmovable(elements_map_index));
StoreMapNoWriteBarrier(elements.value(), elements_map_index);
TNode<Smi> capacity_smi = ParameterToTagged(capacity, capacity_mode);
CSA_ASSERT(this, SmiGreaterThan(capacity_smi, SmiConstant(0)));
StoreObjectFieldNoWriteBarrier(elements.value(), FixedArray::kLengthOffset,
- capacity_smi);
+ capacity_smi);
Goto(&out);
}
@@ -3973,8 +4056,8 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
Label out(this), nonempty(this);
- Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
- &out, &nonempty);
+ Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
+ &out, &nonempty);
BIND(&nonempty);
{
@@ -4022,13 +4105,15 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
VARIABLE(var_new_elements, MachineRepresentation::kTagged);
TVARIABLE(Int32T, var_elements_kind, LoadMapElementsKind(LoadMap(array)));
- Label allocate_jsarray(this), holey_extract(this);
+ Label allocate_jsarray(this), holey_extract(this),
+ allocate_jsarray_main(this);
bool need_conversion =
convert_holes == HoleConversionMode::kConvertToUndefined;
if (need_conversion) {
// We need to take care of holes, if the array is of holey elements kind.
- GotoIf(IsHoleyFastElementsKind(var_elements_kind.value()), &holey_extract);
+ GotoIf(IsHoleyFastElementsKindForRead(var_elements_kind.value()),
+ &holey_extract);
}
// Simple extraction that preserves holes.
@@ -4063,6 +4148,17 @@ Node* CodeStubAssembler::CloneFastJSArray(Node* context, Node* array,
}
BIND(&allocate_jsarray);
+
+ // Handle sealed, frozen elements kinds
+ CSA_ASSERT(this, IsElementsKindLessThanOrEqual(var_elements_kind.value(),
+ LAST_FROZEN_ELEMENTS_KIND));
+ GotoIf(IsElementsKindLessThanOrEqual(var_elements_kind.value(),
+ LAST_FAST_ELEMENTS_KIND),
+ &allocate_jsarray_main);
+ var_elements_kind = Int32Constant(PACKED_ELEMENTS);
+ Goto(&allocate_jsarray_main);
+
+ BIND(&allocate_jsarray_main);
// Use the cannonical map for the chosen elements kind.
Node* native_context = LoadNativeContext(context);
TNode<Map> array_map =
@@ -4530,13 +4626,13 @@ void CodeStubAssembler::FillPropertyArrayWithUndefined(Node* array,
CSA_SLOW_ASSERT(this, IsPropertyArray(array));
ElementsKind kind = PACKED_ELEMENTS;
Node* value = UndefinedConstant();
- BuildFastFixedArrayForEach(array, kind, from_node, to_node,
- [this, value](Node* array, Node* offset) {
- StoreNoWriteBarrier(
- MachineRepresentation::kTagged, array,
- offset, value);
- },
- mode);
+ BuildFastFixedArrayForEach(
+ array, kind, from_node, to_node,
+ [this, value](Node* array, Node* offset) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
+ value);
+ },
+ mode);
}
void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, Node* array,
@@ -4839,8 +4935,8 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("[ CopyFixedArrayElements");
// Typed array elements are not supported.
- DCHECK(!IsFixedTypedArrayElementsKind(from_kind));
- DCHECK(!IsFixedTypedArrayElementsKind(to_kind));
+ DCHECK(!IsTypedArrayElementsKind(from_kind));
+ DCHECK(!IsTypedArrayElementsKind(to_kind));
Label done(this);
bool from_double_elements = IsDoubleElementsKind(from_kind);
@@ -4953,8 +5049,8 @@ void CodeStubAssembler::CopyFixedArrayElements(
StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array_adjusted,
to_offset, value);
} else {
- StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array_adjusted,
- to_offset, value);
+ UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged,
+ to_array_adjusted, to_offset, value);
}
Goto(&next_iter);
@@ -5107,18 +5203,19 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
(ToInt32Constant(from_index, from_index_constant) &&
ToInt32Constant(to_index, to_index_constant) &&
from_index_constant == to_index_constant));
- BuildFastLoop(vars, from_offset, limit_offset,
- [this, from_string, to_string, &current_to_offset, to_increment,
- type, rep, index_same](Node* offset) {
- Node* value = Load(type, from_string, offset);
- StoreNoWriteBarrier(
- rep, to_string,
- index_same ? offset : current_to_offset.value(), value);
- if (!index_same) {
- Increment(&current_to_offset, to_increment);
- }
- },
- from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+ BuildFastLoop(
+ vars, from_offset, limit_offset,
+ [this, from_string, to_string, &current_to_offset, to_increment, type,
+ rep, index_same](Node* offset) {
+ Node* value = Load(type, from_string, offset);
+ StoreNoWriteBarrier(rep, to_string,
+ index_same ? offset : current_to_offset.value(),
+ value);
+ if (!index_same) {
+ Increment(&current_to_offset, to_increment);
+ }
+ },
+ from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
}
Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
@@ -5659,26 +5756,27 @@ TNode<UintPtrT> CodeStubAssembler::TryNumberToUintPtr(TNode<Number> value,
Label* if_negative) {
TVARIABLE(UintPtrT, result);
Label done(this, &result);
- Branch(TaggedIsSmi(value),
- [&] {
- TNode<Smi> value_smi = CAST(value);
- if (if_negative == nullptr) {
- CSA_SLOW_ASSERT(this, SmiLessThan(SmiConstant(-1), value_smi));
- } else {
- GotoIfNot(TaggedIsPositiveSmi(value), if_negative);
- }
- result = UncheckedCast<UintPtrT>(SmiToIntPtr(value_smi));
- Goto(&done);
- },
- [&] {
- TNode<HeapNumber> value_hn = CAST(value);
- TNode<Float64T> value = LoadHeapNumberValue(value_hn);
- if (if_negative != nullptr) {
- GotoIf(Float64LessThan(value, Float64Constant(0.0)), if_negative);
- }
- result = ChangeFloat64ToUintPtr(value);
- Goto(&done);
- });
+ Branch(
+ TaggedIsSmi(value),
+ [&] {
+ TNode<Smi> value_smi = CAST(value);
+ if (if_negative == nullptr) {
+ CSA_SLOW_ASSERT(this, SmiLessThan(SmiConstant(-1), value_smi));
+ } else {
+ GotoIfNot(TaggedIsPositiveSmi(value), if_negative);
+ }
+ result = UncheckedCast<UintPtrT>(SmiToIntPtr(value_smi));
+ Goto(&done);
+ },
+ [&] {
+ TNode<HeapNumber> value_hn = CAST(value);
+ TNode<Float64T> value = LoadHeapNumberValue(value_hn);
+ if (if_negative != nullptr) {
+ GotoIf(Float64LessThan(value, Float64Constant(0.0)), if_negative);
+ }
+ result = ChangeFloat64ToUintPtr(value);
+ Goto(&done);
+ });
BIND(&done);
return result.value();
@@ -5827,6 +5925,21 @@ Node* CodeStubAssembler::ThrowIfNotJSReceiver(Node* context, Node* value,
return var_value_map.value();
}
+void CodeStubAssembler::ThrowIfNotCallable(TNode<Context> context,
+ TNode<Object> value,
+ const char* method_name) {
+ Label out(this), throw_exception(this, Label::kDeferred);
+
+ GotoIf(TaggedIsSmi(value), &throw_exception);
+ Branch(IsCallable(CAST(value)), &out, &throw_exception);
+
+ // The {value} is not a compatible receiver for this method.
+ BIND(&throw_exception);
+ ThrowTypeError(context, MessageTemplate::kCalledNonCallable, method_name);
+
+ BIND(&out);
+}
+
void CodeStubAssembler::ThrowRangeError(Node* context, MessageTemplate message,
Node* arg0, Node* arg1, Node* arg2) {
Node* template_index = SmiConstant(static_cast<int>(message));
@@ -5884,11 +5997,11 @@ TNode<BoolT> CodeStubAssembler::IsExtensibleMap(SloppyTNode<Map> map) {
return IsSetWord32<Map::IsExtensibleBit>(LoadMapBitField2(map));
}
-TNode<BoolT> CodeStubAssembler::IsPackedFrozenOrSealedElementsKindMap(
+TNode<BoolT> CodeStubAssembler::IsFrozenOrSealedElementsKindMap(
SloppyTNode<Map> map) {
CSA_ASSERT(this, IsMap(map));
return IsElementsKindInRange(LoadMapElementsKind(map), PACKED_SEALED_ELEMENTS,
- PACKED_FROZEN_ELEMENTS);
+ HOLEY_FROZEN_ELEMENTS);
}
TNode<BoolT> CodeStubAssembler::IsExtensibleNonPrototypeMap(TNode<Map> map) {
@@ -5903,6 +6016,10 @@ TNode<BoolT> CodeStubAssembler::IsCallableMap(SloppyTNode<Map> map) {
return IsSetWord32<Map::IsCallableBit>(LoadMapBitField(map));
}
+TNode<BoolT> CodeStubAssembler::IsDebugInfo(TNode<HeapObject> object) {
+ return HasInstanceType(object, DEBUG_INFO_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsDeprecatedMap(SloppyTNode<Map> map) {
CSA_ASSERT(this, IsMap(map));
return IsSetWord32<Map::IsDeprecatedBit>(LoadMapBitField3(map));
@@ -6182,6 +6299,11 @@ TNode<BoolT> CodeStubAssembler::IsJSProxy(SloppyTNode<HeapObject> object) {
return HasInstanceType(object, JS_PROXY_TYPE);
}
+TNode<BoolT> CodeStubAssembler::IsJSStringIterator(
+ SloppyTNode<HeapObject> object) {
+ return HasInstanceType(object, JS_STRING_ITERATOR_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSGlobalProxy(
SloppyTNode<HeapObject> object) {
return HasInstanceType(object, JS_GLOBAL_PROXY_TYPE);
@@ -6429,14 +6551,15 @@ TNode<BoolT> CodeStubAssembler::IsPrimitiveInstanceType(
TNode<BoolT> CodeStubAssembler::IsPrivateSymbol(
SloppyTNode<HeapObject> object) {
- return Select<BoolT>(IsSymbol(object),
- [=] {
- TNode<Symbol> symbol = CAST(object);
- TNode<Uint32T> flags = LoadObjectField<Uint32T>(
- symbol, Symbol::kFlagsOffset);
- return IsSetWord32<Symbol::IsPrivateBit>(flags);
- },
- [=] { return Int32FalseConstant(); });
+ return Select<BoolT>(
+ IsSymbol(object),
+ [=] {
+ TNode<Symbol> symbol = CAST(object);
+ TNode<Uint32T> flags =
+ LoadObjectField<Uint32T>(symbol, Symbol::kFlagsOffset);
+ return IsSetWord32<Symbol::IsPrivateBit>(flags);
+ },
+ [=] { return Int32FalseConstant(); });
}
TNode<BoolT> CodeStubAssembler::IsNativeContext(
@@ -6501,8 +6624,17 @@ TNode<BoolT> CodeStubAssembler::IsJSFunctionMap(SloppyTNode<Map> map) {
return IsJSFunctionInstanceType(LoadMapInstanceType(map));
}
+TNode<BoolT> CodeStubAssembler::IsJSTypedArrayInstanceType(
+ SloppyTNode<Int32T> instance_type) {
+ return InstanceTypeEqual(instance_type, JS_TYPED_ARRAY_TYPE);
+}
+
+TNode<BoolT> CodeStubAssembler::IsJSTypedArrayMap(SloppyTNode<Map> map) {
+ return IsJSTypedArrayInstanceType(LoadMapInstanceType(map));
+}
+
TNode<BoolT> CodeStubAssembler::IsJSTypedArray(SloppyTNode<HeapObject> object) {
- return HasInstanceType(object, JS_TYPED_ARRAY_TYPE);
+ return IsJSTypedArrayMap(LoadMap(object));
}
TNode<BoolT> CodeStubAssembler::IsJSArrayBuffer(
@@ -6514,23 +6646,14 @@ TNode<BoolT> CodeStubAssembler::IsJSDataView(TNode<HeapObject> object) {
return HasInstanceType(object, JS_DATA_VIEW_TYPE);
}
-TNode<BoolT> CodeStubAssembler::IsFixedTypedArray(
- SloppyTNode<HeapObject> object) {
- TNode<Int32T> instance_type = LoadInstanceType(object);
- return UncheckedCast<BoolT>(Word32And(
- Int32GreaterThanOrEqual(instance_type,
- Int32Constant(FIRST_FIXED_TYPED_ARRAY_TYPE)),
- Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_FIXED_TYPED_ARRAY_TYPE))));
-}
-
TNode<BoolT> CodeStubAssembler::IsJSRegExp(SloppyTNode<HeapObject> object) {
return HasInstanceType(object, JS_REGEXP_TYPE);
}
TNode<BoolT> CodeStubAssembler::IsNumber(SloppyTNode<Object> object) {
- return Select<BoolT>(TaggedIsSmi(object), [=] { return Int32TrueConstant(); },
- [=] { return IsHeapNumber(CAST(object)); });
+ return Select<BoolT>(
+ TaggedIsSmi(object), [=] { return Int32TrueConstant(); },
+ [=] { return IsHeapNumber(CAST(object)); });
}
TNode<BoolT> CodeStubAssembler::IsNumeric(SloppyTNode<Object> object) {
@@ -6566,9 +6689,9 @@ TNode<BoolT> CodeStubAssembler::IsNumberNormalized(SloppyTNode<Number> number) {
}
TNode<BoolT> CodeStubAssembler::IsNumberPositive(SloppyTNode<Number> number) {
- return Select<BoolT>(TaggedIsSmi(number),
- [=] { return TaggedIsPositiveSmi(number); },
- [=] { return IsHeapNumberPositive(CAST(number)); });
+ return Select<BoolT>(
+ TaggedIsSmi(number), [=] { return TaggedIsPositiveSmi(number); },
+ [=] { return IsHeapNumberPositive(CAST(number)); });
}
// TODO(cbruni): Use TNode<HeapNumber> instead of custom name.
@@ -6585,9 +6708,10 @@ TNode<BoolT> CodeStubAssembler::IsNumberNonNegativeSafeInteger(
TaggedIsSmi(number), [=] { return TaggedIsPositiveSmi(number); },
[=] {
TNode<HeapNumber> heap_number = CAST(number);
- return Select<BoolT>(IsInteger(heap_number),
- [=] { return IsHeapNumberPositive(heap_number); },
- [=] { return Int32FalseConstant(); });
+ return Select<BoolT>(
+ IsInteger(heap_number),
+ [=] { return IsHeapNumberPositive(heap_number); },
+ [=] { return Int32FalseConstant(); });
});
}
@@ -6652,9 +6776,9 @@ TNode<BoolT> CodeStubAssembler::IsHeapNumberUint32(TNode<HeapNumber> number) {
}
TNode<BoolT> CodeStubAssembler::IsNumberArrayIndex(TNode<Number> number) {
- return Select<BoolT>(TaggedIsSmi(number),
- [=] { return TaggedIsPositiveSmi(number); },
- [=] { return IsHeapNumberUint32(CAST(number)); });
+ return Select<BoolT>(
+ TaggedIsSmi(number), [=] { return TaggedIsPositiveSmi(number); },
+ [=] { return IsHeapNumberUint32(CAST(number)); });
}
Node* CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace(Node* element_count,
@@ -7600,12 +7724,12 @@ TNode<Number> CodeStubAssembler::ToNumber_Inline(SloppyTNode<Context> context,
BIND(&not_smi);
{
- var_result =
- Select<Number>(IsHeapNumber(CAST(input)), [=] { return CAST(input); },
- [=] {
- return CAST(CallBuiltin(Builtins::kNonNumberToNumber,
- context, input));
- });
+ var_result = Select<Number>(
+ IsHeapNumber(CAST(input)), [=] { return CAST(input); },
+ [=] {
+ return CAST(
+ CallBuiltin(Builtins::kNonNumberToNumber, context, input));
+ });
Goto(&end);
}
@@ -7818,71 +7942,6 @@ TNode<Number> CodeStubAssembler::ToUint32(SloppyTNode<Context> context,
return CAST(var_result.value());
}
-TNode<String> CodeStubAssembler::ToString(SloppyTNode<Context> context,
- SloppyTNode<Object> input) {
- TVARIABLE(Object, result, input);
- Label loop(this, &result), done(this);
- Goto(&loop);
- BIND(&loop);
- {
- // Load the current {input} value.
- TNode<Object> input = result.value();
-
- // Dispatch based on the type of the {input.}
- Label if_inputisnumber(this), if_inputisoddball(this),
- if_inputissymbol(this), if_inputisreceiver(this, Label::kDeferred),
- runtime(this, Label::kDeferred);
- GotoIf(TaggedIsSmi(input), &if_inputisnumber);
- TNode<Int32T> input_instance_type = LoadInstanceType(CAST(input));
- GotoIf(IsStringInstanceType(input_instance_type), &done);
- GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver);
- GotoIf(IsHeapNumberInstanceType(input_instance_type), &if_inputisnumber);
- GotoIf(IsOddballInstanceType(input_instance_type), &if_inputisoddball);
- Branch(IsSymbolInstanceType(input_instance_type), &if_inputissymbol,
- &runtime);
-
- BIND(&if_inputisnumber);
- {
- // Convert the Number {input} to a String.
- TNode<Number> number_input = CAST(input);
- result = NumberToString(number_input);
- Goto(&done);
- }
-
- BIND(&if_inputisoddball);
- {
- // Just return the {input}'s string representation.
- result = LoadObjectField(CAST(input), Oddball::kToStringOffset);
- Goto(&done);
- }
-
- BIND(&if_inputissymbol);
- {
- // Throw a type error when {input} is a Symbol.
- ThrowTypeError(context, MessageTemplate::kSymbolToString);
- }
-
- BIND(&if_inputisreceiver);
- {
- // Convert the JSReceiver {input} to a primitive first,
- // and then run the loop again with the new {input},
- // which is then a primitive value.
- result = CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String, context,
- input);
- Goto(&loop);
- }
-
- BIND(&runtime);
- {
- result = CallRuntime(Runtime::kToString, context, input);
- Goto(&done);
- }
- }
-
- BIND(&done);
- return CAST(result.value());
-}
-
TNode<String> CodeStubAssembler::ToString_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input) {
VARIABLE(var_result, MachineRepresentation::kTagged, input);
@@ -8171,9 +8230,7 @@ void CodeStubAssembler::Increment(Variable* variable, int value,
ParameterMode mode) {
DCHECK_IMPLIES(mode == INTPTR_PARAMETERS,
variable->rep() == MachineType::PointerRepresentation());
- DCHECK_IMPLIES(mode == SMI_PARAMETERS,
- variable->rep() == MachineRepresentation::kTagged ||
- variable->rep() == MachineRepresentation::kTaggedSigned);
+ DCHECK_IMPLIES(mode == SMI_PARAMETERS, CanBeTaggedSigned(variable->rep()));
variable->Bind(IntPtrOrSmiAdd(variable->value(),
IntPtrOrSmiConstant(value, mode), mode));
}
@@ -8735,15 +8792,16 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
first_inclusive,
IntPtrMul(ChangeInt32ToIntPtr(number_of_valid_entries), factor));
- BuildFastLoop(last_exclusive, first_inclusive,
- [=](SloppyTNode<IntPtrT> name_index) {
- TNode<MaybeObject> element =
- LoadArrayElement(array, Array::kHeaderSize, name_index);
- TNode<Name> candidate_name = CAST(element);
- *var_name_index = name_index;
- GotoIf(WordEqual(candidate_name, unique_name), if_found);
- },
- -Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre);
+ BuildFastLoop(
+ last_exclusive, first_inclusive,
+ [=](SloppyTNode<IntPtrT> name_index) {
+ TNode<MaybeObject> element =
+ LoadArrayElement(array, Array::kHeaderSize, name_index);
+ TNode<Name> candidate_name = CAST(element);
+ *var_name_index = name_index;
+ GotoIf(WordEqual(candidate_name, unique_name), if_found);
+ },
+ -Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre);
Goto(if_not_found);
}
@@ -9061,10 +9119,10 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
// Check if |object| is still stable, i.e. we can proceed using
// property details from preloaded |descriptors|.
- var_stable =
- Select<BoolT>(var_stable.value(),
- [=] { return WordEqual(LoadMap(object), map); },
- [=] { return Int32FalseConstant(); });
+ var_stable = Select<BoolT>(
+ var_stable.value(),
+ [=] { return WordEqual(LoadMap(object), map); },
+ [=] { return Int32FalseConstant(); });
Goto(&next_iteration);
}
@@ -9694,7 +9752,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
GotoIf(IsDetachedBuffer(buffer), if_absent);
- Node* length = SmiUntag(LoadJSTypedArrayLength(CAST(object)));
+ TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
Branch(UintPtrLessThan(intptr_index, length), if_found, if_absent);
}
BIND(&if_oob);
@@ -9990,7 +10048,7 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
element_size_shift -= kSmiShiftBits;
Smi smi_index;
constant_index = ToSmiConstant(index_node, &smi_index);
- if (constant_index) index = smi_index->value();
+ if (constant_index) index = smi_index.value();
index_node = BitcastTaggedToWord(index_node);
} else {
DCHECK(mode == INTPTR_PARAMETERS);
@@ -10335,7 +10393,32 @@ MachineRepresentation ElementsKindToMachineRepresentation(ElementsKind kind) {
void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
Node* index, Node* value,
ParameterMode mode) {
- if (IsFixedTypedArrayElementsKind(kind)) {
+ if (kind == BIGINT64_ELEMENTS || kind == BIGUINT64_ELEMENTS) {
+ TNode<IntPtrT> offset = ElementOffsetFromIndex(index, kind, mode, 0);
+ TVARIABLE(UintPtrT, var_low);
+ // Only used on 32-bit platforms.
+ TVARIABLE(UintPtrT, var_high);
+ BigIntToRawBytes(CAST(value), &var_low, &var_high);
+
+ MachineRepresentation rep = WordT::kMachineRepresentation;
+#if defined(V8_TARGET_BIG_ENDIAN)
+ if (!Is64()) {
+ StoreNoWriteBarrier(rep, elements, offset, var_high.value());
+ StoreNoWriteBarrier(rep, elements,
+ IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize)),
+ var_low.value());
+ } else {
+ StoreNoWriteBarrier(rep, elements, offset, var_low.value());
+ }
+#else
+ StoreNoWriteBarrier(rep, elements, offset, var_low.value());
+ if (!Is64()) {
+ StoreNoWriteBarrier(rep, elements,
+ IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize)),
+ var_high.value());
+ }
+#endif
+ } else if (IsTypedArrayElementsKind(kind)) {
if (kind == UINT8_CLAMPED_ELEMENTS) {
CSA_ASSERT(this,
Word32Equal(value, Word32And(Int32Constant(0xFF), value)));
@@ -10349,8 +10432,9 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
TNode<Float64T> value_float64 = UncheckedCast<Float64T>(value);
StoreFixedDoubleArrayElement(CAST(elements), index, value_float64, mode);
} else {
- WriteBarrierMode barrier_mode =
- IsSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ WriteBarrierMode barrier_mode = IsSmiElementsKind(kind)
+ ? UNSAFE_SKIP_WRITE_BARRIER
+ : UPDATE_WRITE_BARRIER;
StoreFixedArrayElement(CAST(elements), index, value, barrier_mode, 0, mode);
}
}
@@ -10386,7 +10470,7 @@ Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
TNode<Object> input, ElementsKind elements_kind, TNode<Context> context) {
- DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
+ DCHECK(IsTypedArrayElementsKind(elements_kind));
MachineRepresentation rep;
switch (elements_kind) {
@@ -10474,24 +10558,6 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
return var_result.value();
}
-void CodeStubAssembler::EmitBigTypedArrayElementStore(
- TNode<JSTypedArray> object, TNode<FixedTypedArrayBase> elements,
- TNode<IntPtrT> intptr_key, TNode<Object> value, TNode<Context> context,
- Label* opt_if_detached) {
- TNode<BigInt> bigint_value = ToBigInt(context, value);
-
- if (opt_if_detached != nullptr) {
- // Check if buffer has been detached. Must happen after {ToBigInt}!
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), opt_if_detached);
- }
-
- TNode<RawPtrT> backing_store = LoadFixedTypedArrayBackingStore(elements);
- TNode<IntPtrT> offset = ElementOffsetFromIndex(intptr_key, BIGINT64_ELEMENTS,
- INTPTR_PARAMETERS, 0);
- EmitBigTypedArrayElementStore(elements, backing_store, offset, bigint_value);
-}
-
void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
TVariable<UintPtrT>* var_low,
TVariable<UintPtrT>* var_high) {
@@ -10525,40 +10591,6 @@ void CodeStubAssembler::BigIntToRawBytes(TNode<BigInt> bigint,
BIND(&done);
}
-void CodeStubAssembler::EmitBigTypedArrayElementStore(
- TNode<FixedTypedArrayBase> elements, TNode<RawPtrT> backing_store,
- TNode<IntPtrT> offset, TNode<BigInt> bigint_value) {
- TVARIABLE(UintPtrT, var_low);
- // Only used on 32-bit platforms.
- TVARIABLE(UintPtrT, var_high);
- BigIntToRawBytes(bigint_value, &var_low, &var_high);
-
- // Assert that offset < elements.length. Given that it's an offset for a raw
- // pointer we correct it by the usual kHeapObjectTag offset.
- CSA_ASSERT(
- this, IsOffsetInBounds(offset, LoadAndUntagFixedArrayBaseLength(elements),
- kHeapObjectTag, BIGINT64_ELEMENTS));
-
- MachineRepresentation rep = WordT::kMachineRepresentation;
-#if defined(V8_TARGET_BIG_ENDIAN)
- if (!Is64()) {
- StoreNoWriteBarrier(rep, backing_store, offset, var_high.value());
- StoreNoWriteBarrier(rep, backing_store,
- IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize)),
- var_low.value());
- } else {
- StoreNoWriteBarrier(rep, backing_store, offset, var_low.value());
- }
-#else
- StoreNoWriteBarrier(rep, backing_store, offset, var_low.value());
- if (!Is64()) {
- StoreNoWriteBarrier(rep, backing_store,
- IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize)),
- var_high.value());
- }
-#endif
-}
-
void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode,
@@ -10577,7 +10609,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
ParameterMode parameter_mode = INTPTR_PARAMETERS;
TNode<IntPtrT> intptr_key = TryToIntptr(key, bailout);
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ if (IsTypedArrayElementsKind(elements_kind)) {
Label done(this);
// IntegerIndexedElementSet converts value to a Number/BigInt prior to the
@@ -10595,10 +10627,9 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
GotoIf(IsDetachedBuffer(buffer), bailout);
// Bounds check.
- Node* length =
- TaggedToParameter(LoadJSTypedArrayLength(CAST(object)), parameter_mode);
+ TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
- if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
// Skip the store if we write beyond the length or
// to a property with a negative integer index.
GotoIfNot(UintPtrLessThan(intptr_key, length), &done);
@@ -10612,33 +10643,22 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
DebugBreak();
}
- if (elements_kind == BIGINT64_ELEMENTS ||
- elements_kind == BIGUINT64_ELEMENTS) {
- TNode<BigInt> bigint_value = UncheckedCast<BigInt>(value);
-
- TNode<RawPtrT> backing_store =
- LoadFixedTypedArrayBackingStore(CAST(elements));
- TNode<IntPtrT> offset = ElementOffsetFromIndex(
- intptr_key, BIGINT64_ELEMENTS, INTPTR_PARAMETERS, 0);
- EmitBigTypedArrayElementStore(CAST(elements), backing_store, offset,
- bigint_value);
- } else {
- Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
- StoreElement(backing_store, elements_kind, intptr_key, value,
- parameter_mode);
- }
+ TNode<RawPtrT> backing_store = LoadJSTypedArrayBackingStore(CAST(object));
+ StoreElement(backing_store, elements_kind, intptr_key, value,
+ parameter_mode);
Goto(&done);
BIND(&done);
return;
}
- DCHECK(IsFastElementsKind(elements_kind) ||
- elements_kind == PACKED_SEALED_ELEMENTS);
+ DCHECK(
+ IsFastElementsKind(elements_kind) ||
+ IsInRange(elements_kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS));
- Node* length =
- SelectImpl(IsJSArray(object), [=]() { return LoadJSArrayLength(object); },
- [=]() { return LoadFixedArrayBaseLength(elements); },
- MachineRepresentation::kTagged);
+ Node* length = SelectImpl(
+ IsJSArray(object), [=]() { return LoadJSArrayLength(object); },
+ [=]() { return LoadFixedArrayBaseLength(elements); },
+ MachineRepresentation::kTagged);
length = TaggedToParameter(length, parameter_mode);
// In case value is stored into a fast smi array, assure that the value is
@@ -10651,7 +10671,8 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
}
if (IsGrowStoreMode(store_mode) &&
- !(elements_kind == PACKED_SEALED_ELEMENTS)) {
+ !(IsInRange(elements_kind, PACKED_SEALED_ELEMENTS,
+ HOLEY_SEALED_ELEMENTS))) {
elements = CheckForCapacityGrow(object, elements, elements_kind, length,
intptr_key, parameter_mode, bailout);
} else {
@@ -10965,9 +10986,7 @@ Node* CodeStubAssembler::BuildFastLoop(
ParameterMode parameter_mode, IndexAdvanceMode advance_mode) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(start_index, parameter_mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(end_index, parameter_mode));
- MachineRepresentation index_rep = (parameter_mode == INTPTR_PARAMETERS)
- ? MachineType::PointerRepresentation()
- : MachineRepresentation::kTaggedSigned;
+ MachineRepresentation index_rep = ParameterRepresentation(parameter_mode);
VARIABLE(var, index_rep, start_index);
VariableList vars_copy(vars.begin(), vars.end(), zone());
vars_copy.push_back(&var);
@@ -11361,7 +11380,8 @@ Node* CodeStubAssembler::RelationalComparison(Operation op, Node* left,
Node* right_map = LoadMap(right);
Label if_left_heapnumber(this), if_left_bigint(this, Label::kDeferred),
- if_left_string(this), if_left_other(this, Label::kDeferred);
+ if_left_string(this, Label::kDeferred),
+ if_left_other(this, Label::kDeferred);
GotoIf(IsHeapNumberMap(left_map), &if_left_heapnumber);
Node* left_instance_type = LoadMapInstanceType(left_map);
GotoIf(IsBigIntInstanceType(left_instance_type), &if_left_bigint);
@@ -11849,7 +11869,8 @@ Node* CodeStubAssembler::Equal(Node* left, Node* right, Node* context,
{
GotoIf(TaggedIsSmi(right), &use_symmetry);
- Label if_left_symbol(this), if_left_number(this), if_left_string(this),
+ Label if_left_symbol(this), if_left_number(this),
+ if_left_string(this, Label::kDeferred),
if_left_bigint(this, Label::kDeferred), if_left_oddball(this),
if_left_receiver(this);
@@ -12185,9 +12206,12 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
// }
// }
- Label if_equal(this), if_notequal(this), end(this);
+ Label if_equal(this), if_notequal(this), if_not_equivalent_types(this),
+ end(this);
VARIABLE(result, MachineRepresentation::kTagged);
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kNone);
+
// Check if {lhs} and {rhs} refer to the same object.
Label if_same(this), if_notsame(this);
Branch(WordEqual(lhs, rhs), &if_same, &if_notsame);
@@ -12196,9 +12220,6 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
{
// The {lhs} and {rhs} reference the exact same value, yet we need special
// treatment for HeapNumber, as NaN is not equal to NaN.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
- }
GenerateEqual_Same(lhs, &if_equal, &if_notequal, var_type_feedback);
}
@@ -12207,10 +12228,6 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
// The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
// BigInt and String they can still be considered equal.
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
- }
-
// Check if {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(this), if_lhsisnotsmi(this);
Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
@@ -12236,10 +12253,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Node* lhs_value = LoadHeapNumberValue(lhs);
Node* rhs_value = SmiToFloat64(rhs);
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
// Perform a floating point comparison of {lhs} and {rhs}.
Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
@@ -12260,17 +12274,15 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Node* lhs_value = LoadHeapNumberValue(lhs);
Node* rhs_value = LoadHeapNumberValue(rhs);
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kNumber);
// Perform a floating point comparison of {lhs} and {rhs}.
Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
}
BIND(&if_rhsisnotnumber);
- Goto(&if_notequal);
+ Goto(&if_not_equivalent_types);
}
}
@@ -12281,7 +12293,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
BIND(&if_rhsissmi);
- Goto(&if_notequal);
+ Goto(&if_not_equivalent_types);
BIND(&if_rhsisnotsmi);
{
@@ -12289,7 +12301,7 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
// Check if {lhs} is a String.
- Label if_lhsisstring(this), if_lhsisnotstring(this);
+ Label if_lhsisstring(this, Label::kDeferred), if_lhsisnotstring(this);
Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
&if_lhsisnotstring);
@@ -12319,92 +12331,94 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
}
BIND(&if_rhsisnotstring);
- Goto(&if_notequal);
+ Goto(&if_not_equivalent_types);
}
BIND(&if_lhsisnotstring);
-
- // Check if {lhs} is a BigInt.
- Label if_lhsisbigint(this), if_lhsisnotbigint(this);
- Branch(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint,
- &if_lhsisnotbigint);
-
- BIND(&if_lhsisbigint);
{
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
-
- // Check if {rhs} is also a BigInt.
- Label if_rhsisbigint(this, Label::kDeferred),
- if_rhsisnotbigint(this);
- Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
- &if_rhsisnotbigint);
+ // Check if {lhs} is a BigInt.
+ Label if_lhsisbigint(this), if_lhsisnotbigint(this);
+ Branch(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint,
+ &if_lhsisnotbigint);
- BIND(&if_rhsisbigint);
+ BIND(&if_lhsisbigint);
{
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kBigInt));
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
+ // Check if {rhs} is also a BigInt.
+ Label if_rhsisbigint(this, Label::kDeferred),
+ if_rhsisnotbigint(this);
+ Branch(IsBigIntInstanceType(rhs_instance_type), &if_rhsisbigint,
+ &if_rhsisnotbigint);
+
+ BIND(&if_rhsisbigint);
+ {
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kBigInt);
+ result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), lhs, rhs));
+ Goto(&end);
}
- result.Bind(CallRuntime(Runtime::kBigIntEqualToBigInt,
- NoContextConstant(), lhs, rhs));
- Goto(&end);
- }
- BIND(&if_rhsisnotbigint);
- Goto(&if_notequal);
- }
-
- BIND(&if_lhsisnotbigint);
- if (var_type_feedback != nullptr) {
- // Load the instance type of {rhs}.
- Node* rhs_map = LoadMap(rhs);
- Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
-
- Label if_lhsissymbol(this), if_lhsisreceiver(this),
- if_lhsisoddball(this);
- GotoIf(IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver);
- GotoIf(IsBooleanMap(lhs_map), &if_notequal);
- GotoIf(IsOddballInstanceType(lhs_instance_type), &if_lhsisoddball);
- Branch(IsSymbolInstanceType(lhs_instance_type), &if_lhsissymbol,
- &if_notequal);
-
- BIND(&if_lhsisreceiver);
- {
- GotoIf(IsBooleanMap(rhs_map), &if_notequal);
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kReceiver));
- GotoIf(IsJSReceiverInstanceType(rhs_instance_type), &if_notequal);
- var_type_feedback->Bind(SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined));
- GotoIf(IsOddballInstanceType(rhs_instance_type), &if_notequal);
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kAny));
- Goto(&if_notequal);
+ BIND(&if_rhsisnotbigint);
+ Goto(&if_not_equivalent_types);
}
- BIND(&if_lhsisoddball);
- {
- STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE);
- GotoIf(IsBooleanMap(rhs_map), &if_notequal);
- GotoIf(
- Int32LessThan(rhs_instance_type, Int32Constant(ODDBALL_TYPE)),
- &if_notequal);
- var_type_feedback->Bind(SmiConstant(
- CompareOperationFeedback::kReceiverOrNullOrUndefined));
- Goto(&if_notequal);
- }
+ BIND(&if_lhsisnotbigint);
+ if (var_type_feedback != nullptr) {
+ // Load the instance type of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+ Label if_lhsissymbol(this), if_lhsisreceiver(this),
+ if_lhsisoddball(this);
+ GotoIf(IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver);
+ GotoIf(IsBooleanMap(lhs_map), &if_not_equivalent_types);
+ GotoIf(IsOddballInstanceType(lhs_instance_type),
+ &if_lhsisoddball);
+ Branch(IsSymbolInstanceType(lhs_instance_type), &if_lhsissymbol,
+ &if_not_equivalent_types);
+
+ BIND(&if_lhsisreceiver);
+ {
+ GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types);
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kReceiver);
+ GotoIf(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_notequal);
+ OverwriteFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
+ GotoIf(IsOddballInstanceType(rhs_instance_type), &if_notequal);
+ Goto(&if_not_equivalent_types);
+ }
- BIND(&if_lhsissymbol);
- {
- GotoIfNot(IsSymbolInstanceType(rhs_instance_type), &if_notequal);
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kSymbol));
+ BIND(&if_lhsisoddball);
+ {
+ STATIC_ASSERT(LAST_PRIMITIVE_TYPE == ODDBALL_TYPE);
+ GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types);
+ GotoIf(Int32LessThan(rhs_instance_type,
+ Int32Constant(ODDBALL_TYPE)),
+ &if_not_equivalent_types);
+ OverwriteFeedback(
+ var_type_feedback,
+ CompareOperationFeedback::kReceiverOrNullOrUndefined);
+ Goto(&if_notequal);
+ }
+
+ BIND(&if_lhsissymbol);
+ {
+ GotoIfNot(IsSymbolInstanceType(rhs_instance_type),
+ &if_not_equivalent_types);
+ OverwriteFeedback(var_type_feedback,
+ CompareOperationFeedback::kSymbol);
+ Goto(&if_notequal);
+ }
+ } else {
Goto(&if_notequal);
}
- } else {
- Goto(&if_notequal);
}
}
}
@@ -12421,10 +12435,8 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
BIND(&if_rhsissmi);
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kSignedSmall));
- }
+ CombineFeedback(var_type_feedback,
+ CompareOperationFeedback::kSignedSmall);
Goto(&if_notequal);
BIND(&if_rhsisnotsmi);
@@ -12442,17 +12454,14 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Node* lhs_value = SmiToFloat64(lhs);
Node* rhs_value = LoadHeapNumberValue(rhs);
- if (var_type_feedback != nullptr) {
- var_type_feedback->Bind(
- SmiConstant(CompareOperationFeedback::kNumber));
- }
+ CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber);
// Perform a floating point comparison of {lhs} and {rhs}.
Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
}
BIND(&if_rhsisnotnumber);
- Goto(&if_notequal);
+ Goto(&if_not_equivalent_types);
}
}
}
@@ -12463,6 +12472,12 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
Goto(&end);
}
+ BIND(&if_not_equivalent_types);
+ {
+ OverwriteFeedback(var_type_feedback, CompareOperationFeedback::kAny);
+ Goto(&if_notequal);
+ }
+
BIND(&if_notequal);
{
result.Bind(FalseConstant());
@@ -12505,60 +12520,60 @@ void CodeStubAssembler::BranchIfSameValue(Node* lhs, Node* rhs, Label* if_true,
BIND(&if_lhsisheapobject);
{
// Check if the {rhs} is a Smi.
- Branch(TaggedIsSmi(rhs),
- [&] {
- // Since {rhs} is a Smi, the comparison can only yield true
- // iff the {lhs} is a HeapNumber with the same float64 value.
- GotoIfNot(IsHeapNumber(lhs), if_false);
- var_lhs_value.Bind(LoadHeapNumberValue(lhs));
- var_rhs_value.Bind(SmiToFloat64(rhs));
- Goto(&do_fcmp);
- },
- [&] {
- // Now this can only yield true if either both {lhs} and {rhs} are
- // HeapNumbers with the same value, or both are Strings with the
- // same character sequence, or both are BigInts with the same
- // value.
- Label if_lhsisheapnumber(this), if_lhsisstring(this),
- if_lhsisbigint(this);
- Node* const lhs_map = LoadMap(lhs);
- GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
- if (mode != SameValueMode::kNumbersOnly) {
- Node* const lhs_instance_type = LoadMapInstanceType(lhs_map);
- GotoIf(IsStringInstanceType(lhs_instance_type), &if_lhsisstring);
- GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint);
- }
- Goto(if_false);
-
- BIND(&if_lhsisheapnumber);
- {
- GotoIfNot(IsHeapNumber(rhs), if_false);
- var_lhs_value.Bind(LoadHeapNumberValue(lhs));
- var_rhs_value.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
- }
-
- if (mode != SameValueMode::kNumbersOnly) {
- BIND(&if_lhsisstring);
- {
- // Now we can only yield true if {rhs} is also a String
- // with the same sequence of characters.
- GotoIfNot(IsString(rhs), if_false);
- Node* const result = CallBuiltin(
- Builtins::kStringEqual, NoContextConstant(), lhs, rhs);
- Branch(IsTrue(result), if_true, if_false);
- }
-
- BIND(&if_lhsisbigint);
- {
- GotoIfNot(IsBigInt(rhs), if_false);
- Node* const result =
- CallRuntime(Runtime::kBigIntEqualToBigInt,
- NoContextConstant(), lhs, rhs);
- Branch(IsTrue(result), if_true, if_false);
- }
- }
- });
+ Branch(
+ TaggedIsSmi(rhs),
+ [&] {
+ // Since {rhs} is a Smi, the comparison can only yield true
+ // iff the {lhs} is a HeapNumber with the same float64 value.
+ GotoIfNot(IsHeapNumber(lhs), if_false);
+ var_lhs_value.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_value.Bind(SmiToFloat64(rhs));
+ Goto(&do_fcmp);
+ },
+ [&] {
+ // Now this can only yield true if either both {lhs} and {rhs} are
+ // HeapNumbers with the same value, or both are Strings with the
+ // same character sequence, or both are BigInts with the same
+ // value.
+ Label if_lhsisheapnumber(this), if_lhsisstring(this),
+ if_lhsisbigint(this);
+ Node* const lhs_map = LoadMap(lhs);
+ GotoIf(IsHeapNumberMap(lhs_map), &if_lhsisheapnumber);
+ if (mode != SameValueMode::kNumbersOnly) {
+ Node* const lhs_instance_type = LoadMapInstanceType(lhs_map);
+ GotoIf(IsStringInstanceType(lhs_instance_type), &if_lhsisstring);
+ GotoIf(IsBigIntInstanceType(lhs_instance_type), &if_lhsisbigint);
+ }
+ Goto(if_false);
+
+ BIND(&if_lhsisheapnumber);
+ {
+ GotoIfNot(IsHeapNumber(rhs), if_false);
+ var_lhs_value.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_value.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ if (mode != SameValueMode::kNumbersOnly) {
+ BIND(&if_lhsisstring);
+ {
+ // Now we can only yield true if {rhs} is also a String
+ // with the same sequence of characters.
+ GotoIfNot(IsString(rhs), if_false);
+ Node* const result = CallBuiltin(Builtins::kStringEqual,
+ NoContextConstant(), lhs, rhs);
+ Branch(IsTrue(result), if_true, if_false);
+ }
+
+ BIND(&if_lhsisbigint);
+ {
+ GotoIfNot(IsBigInt(rhs), if_false);
+ Node* const result = CallRuntime(Runtime::kBigIntEqualToBigInt,
+ NoContextConstant(), lhs, rhs);
+ Branch(IsTrue(result), if_true, if_false);
+ }
+ }
+ });
}
BIND(&do_fcmp);
@@ -13211,9 +13226,9 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSArrayBufferViewByteOffset(
JSArrayBufferView::kByteOffsetOffset);
}
-TNode<Smi> CodeStubAssembler::LoadJSTypedArrayLength(
+TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength(
TNode<JSTypedArray> typed_array) {
- return LoadObjectField<Smi>(typed_array, JSTypedArray::kLengthOffset);
+ return LoadObjectField<UintPtrT>(typed_array, JSTypedArray::kLengthOffset);
}
CodeStubArguments::CodeStubArguments(
@@ -13247,7 +13262,7 @@ void CodeStubArguments::SetReceiver(TNode<Object> object) const {
TNode<WordT> CodeStubArguments::AtIndexPtr(
Node* index, CodeStubAssembler::ParameterMode mode) const {
- typedef compiler::Node Node;
+ using Node = compiler::Node;
Node* negated_index = assembler_->IntPtrOrSmiSub(
assembler_->IntPtrOrSmiConstant(0, mode), index, mode);
Node* offset = assembler_->ElementOffsetFromIndex(
@@ -13389,6 +13404,19 @@ Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
return IsSetWord32(elements_kind, 1);
}
+Node* CodeStubAssembler::IsHoleyFastElementsKindForRead(Node* elements_kind) {
+ CSA_ASSERT(this,
+ Uint32LessThanOrEqual(elements_kind,
+ Int32Constant(LAST_FROZEN_ELEMENTS_KIND)));
+
+ STATIC_ASSERT(HOLEY_SMI_ELEMENTS == (PACKED_SMI_ELEMENTS | 1));
+ STATIC_ASSERT(HOLEY_ELEMENTS == (PACKED_ELEMENTS | 1));
+ STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == (PACKED_DOUBLE_ELEMENTS | 1));
+ STATIC_ASSERT(HOLEY_SEALED_ELEMENTS == (PACKED_SEALED_ELEMENTS | 1));
+ STATIC_ASSERT(HOLEY_FROZEN_ELEMENTS == (PACKED_FROZEN_ELEMENTS | 1));
+ return IsSetWord32(elements_kind, 1);
+}
+
Node* CodeStubAssembler::IsElementsKindGreaterThan(
Node* target_kind, ElementsKind reference_kind) {
return Int32GreaterThan(target_kind, Int32Constant(reference_kind));
@@ -13402,10 +13430,9 @@ TNode<BoolT> CodeStubAssembler::IsElementsKindLessThanOrEqual(
TNode<BoolT> CodeStubAssembler::IsElementsKindInRange(
TNode<Int32T> target_kind, ElementsKind lower_reference_kind,
ElementsKind higher_reference_kind) {
- return Int32LessThanOrEqual(
+ return Uint32LessThanOrEqual(
Int32Sub(target_kind, Int32Constant(lower_reference_kind)),
- Int32Sub(Int32Constant(higher_reference_kind),
- Int32Constant(lower_reference_kind)));
+ Int32Constant(higher_reference_kind - lower_reference_kind));
}
Node* CodeStubAssembler::IsDebugActive() {
@@ -13504,7 +13531,9 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
ASM_WASM_DATA_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
- FUNCTION_TEMPLATE_INFO_TYPE};
+ FUNCTION_TEMPLATE_INFO_TYPE,
+ WASM_JS_FUNCTION_DATA_TYPE,
+ WASM_CAPI_FUNCTION_DATA_TYPE};
Label check_is_bytecode_array(this);
Label check_is_exported_function_data(this);
Label check_is_asm_wasm_data(this);
@@ -13512,12 +13541,16 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
Label check_is_uncompiled_data_with_preparse_data(this);
Label check_is_function_template_info(this);
Label check_is_interpreter_data(this);
+ Label check_is_wasm_js_function_data(this);
+ Label check_is_wasm_capi_function_data(this);
Label* case_labels[] = {&check_is_bytecode_array,
&check_is_exported_function_data,
&check_is_asm_wasm_data,
&check_is_uncompiled_data_without_preparse_data,
&check_is_uncompiled_data_with_preparse_data,
- &check_is_function_template_info};
+ &check_is_function_template_info,
+ &check_is_wasm_js_function_data,
+ &check_is_wasm_capi_function_data};
STATIC_ASSERT(arraysize(case_values) == arraysize(case_labels));
Switch(data_type, &check_is_interpreter_data, case_values, case_labels,
arraysize(case_labels));
@@ -13560,6 +13593,18 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
CAST(sfi_data), InterpreterData::kInterpreterTrampolineOffset));
Goto(&done);
+ // IsWasmJSFunctionData: Use the wrapper code.
+ BIND(&check_is_wasm_js_function_data);
+ sfi_code = CAST(
+ LoadObjectField(CAST(sfi_data), WasmJSFunctionData::kWrapperCodeOffset));
+ Goto(&done);
+
+ // IsWasmCapiFunctionData: Use the wrapper code.
+ BIND(&check_is_wasm_capi_function_data);
+ sfi_code = CAST(LoadObjectField(CAST(sfi_data),
+ WasmCapiFunctionData::kWrapperCodeOffset));
+ Goto(&done);
+
BIND(&done);
return sfi_code.value();
}
@@ -13683,12 +13728,12 @@ Node* CodeStubAssembler::CheckEnumCache(Node* receiver, Label* if_empty,
return receiver_map;
}
-TNode<Object> CodeStubAssembler::GetArgumentValue(
- BaseBuiltinsFromDSLAssembler::Arguments args, TNode<IntPtrT> index) {
+TNode<Object> CodeStubAssembler::GetArgumentValue(TorqueStructArguments args,
+ TNode<IntPtrT> index) {
return CodeStubArguments(this, args).GetOptionalArgumentValue(index);
}
-BaseBuiltinsFromDSLAssembler::Arguments CodeStubAssembler::GetFrameArguments(
+TorqueStructArguments CodeStubAssembler::GetFrameArguments(
TNode<RawPtrT> frame, TNode<IntPtrT> argc) {
return CodeStubArguments(this, argc, frame, INTPTR_PARAMETERS)
.GetTorqueArguments();
@@ -13855,43 +13900,40 @@ void CodeStubAssembler::GotoIfInitialPrototypePropertiesModified(
TNode<Map> prototype_map = LoadMap(LoadMapPrototype(object_map));
GotoIfNot(WordEqual(prototype_map, initial_prototype_map), if_modified);
- if (FLAG_track_constant_fields) {
- // With constant field tracking, we need to make sure that important
- // properties in the prototype has not been tampered with. We do this by
- // checking that their slots in the prototype's descriptor array are still
- // marked as const.
- TNode<DescriptorArray> descriptors = LoadMapDescriptors(prototype_map);
-
- TNode<Uint32T> combined_details;
- for (int i = 0; i < properties.length(); i++) {
- // Assert the descriptor index is in-bounds.
- int descriptor = properties[i].descriptor_index;
- CSA_ASSERT(this, Int32LessThan(Int32Constant(descriptor),
- LoadNumberOfDescriptors(descriptors)));
- // Assert that the name is correct. This essentially checks that
- // the descriptor index corresponds to the insertion order in
- // the bootstrapper.
- CSA_ASSERT(this,
- WordEqual(LoadKeyByDescriptorEntry(descriptors, descriptor),
- LoadRoot(properties[i].name_root_index)));
-
- TNode<Uint32T> details =
- DescriptorArrayGetDetails(descriptors, Uint32Constant(descriptor));
- if (i == 0) {
- combined_details = details;
- } else {
- combined_details = Unsigned(Word32And(combined_details, details));
- }
+ // We need to make sure that relevant properties in the prototype have
+ // not been tampered with. We do this by checking that their slots
+ // in the prototype's descriptor array are still marked as const.
+ TNode<DescriptorArray> descriptors = LoadMapDescriptors(prototype_map);
+
+ TNode<Uint32T> combined_details;
+ for (int i = 0; i < properties.length(); i++) {
+ // Assert the descriptor index is in-bounds.
+ int descriptor = properties[i].descriptor_index;
+ CSA_ASSERT(this, Int32LessThan(Int32Constant(descriptor),
+ LoadNumberOfDescriptors(descriptors)));
+ // Assert that the name is correct. This essentially checks that
+ // the descriptor index corresponds to the insertion order in
+ // the bootstrapper.
+ CSA_ASSERT(this,
+ WordEqual(LoadKeyByDescriptorEntry(descriptors, descriptor),
+ LoadRoot(properties[i].name_root_index)));
+
+ TNode<Uint32T> details =
+ DescriptorArrayGetDetails(descriptors, Uint32Constant(descriptor));
+ if (i == 0) {
+ combined_details = details;
+ } else {
+ combined_details = Unsigned(Word32And(combined_details, details));
}
+ }
- TNode<Uint32T> constness =
- DecodeWord32<PropertyDetails::ConstnessField>(combined_details);
+ TNode<Uint32T> constness =
+ DecodeWord32<PropertyDetails::ConstnessField>(combined_details);
- GotoIfNot(
- Word32Equal(constness,
- Int32Constant(static_cast<int>(PropertyConstness::kConst))),
- if_modified);
- }
+ GotoIfNot(
+ Word32Equal(constness,
+ Int32Constant(static_cast<int>(PropertyConstness::kConst))),
+ if_modified);
}
TNode<String> CodeStubAssembler::TaggedToDirectString(TNode<Object> value,
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 2c8e6e1ad3..207eb509e1 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -2,25 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CODE_STUB_ASSEMBLER_H_
-#define V8_CODE_STUB_ASSEMBLER_H_
+#ifndef V8_CODEGEN_CODE_STUB_ASSEMBLER_H_
+#define V8_CODEGEN_CODE_STUB_ASSEMBLER_H_
#include <functional>
-#include "src/bailout-reason.h"
#include "src/base/macros.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/common/globals.h"
#include "src/compiler/code-assembler.h"
-#include "src/frames.h"
-#include "src/globals.h"
-#include "src/message-template.h"
-#include "src/objects.h"
+#include "src/execution/frames.h"
+#include "src/execution/message-template.h"
#include "src/objects/arguments.h"
#include "src/objects/bigint.h"
+#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/smi.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
-#include "torque-generated/builtins-base-from-dsl-gen.h"
+#include "torque-generated/exported-macros-assembler-tq.h"
namespace v8 {
namespace internal {
@@ -205,7 +205,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
// from a compiler directory OWNER).
class V8_EXPORT_PRIVATE CodeStubAssembler
: public compiler::CodeAssembler,
- public BaseBuiltinsFromDSLAssembler {
+ public TorqueGeneratedExportedMacrosAssembler {
public:
using Node = compiler::Node;
template <class T>
@@ -227,7 +227,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum SlackTrackingMode { kWithSlackTracking, kNoSlackTracking };
- typedef base::Flags<AllocationFlag> AllocationFlags;
+ using AllocationFlags = base::Flags<AllocationFlag>;
enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS };
@@ -278,7 +278,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
if (mode == ParameterMode::SMI_PARAMETERS) {
Smi constant;
if (ToSmiConstant(node, &constant)) {
- *out = static_cast<intptr_t>(constant->value());
+ *out = static_cast<intptr_t>(constant.value());
return true;
}
} else {
@@ -365,6 +365,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return CAST(heap_object);
}
+ TNode<JSStringIterator> HeapObjectToJSStringIterator(
+ TNode<HeapObject> heap_object, Label* fail) {
+ GotoIfNot(IsJSStringIterator(heap_object), fail);
+ return CAST(heap_object);
+ }
+
TNode<JSReceiver> HeapObjectToCallable(TNode<HeapObject> heap_object,
Label* fail) {
GotoIfNot(IsCallable(heap_object), fail);
@@ -509,6 +515,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<IntPtrT> TryIntPtrAdd(TNode<IntPtrT> a, TNode<IntPtrT> b,
Label* if_overflow);
+ TNode<IntPtrT> TryIntPtrSub(TNode<IntPtrT> a, TNode<IntPtrT> b,
+ Label* if_overflow);
+ TNode<Int32T> TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b,
+ Label* if_overflow);
TNode<Smi> TrySmiAdd(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
TNode<Smi> TrySmiSub(TNode<Smi> a, TNode<Smi> b, Label* if_overflow);
@@ -614,8 +624,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsRegularHeapObjectSize(TNode<IntPtrT> size);
- typedef std::function<void(Label*, Label*)> BranchGenerator;
- typedef std::function<Node*()> NodeGenerator;
+ using BranchGenerator = std::function<void(Label*, Label*)>;
+ using NodeGenerator = std::function<Node*()>;
void Assert(const BranchGenerator& branch, const char* message = nullptr,
const char* file = nullptr, int line = 0,
@@ -706,8 +716,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
template <class A>
TNode<A> SelectConstant(TNode<BoolT> condition, TNode<A> true_value,
TNode<A> false_value) {
- return Select<A>(condition, [=] { return true_value; },
- [=] { return false_value; });
+ return Select<A>(
+ condition, [=] { return true_value; }, [=] { return false_value; });
}
TNode<Int32T> SelectInt32Constant(SloppyTNode<BoolT> condition,
@@ -798,11 +808,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Load value from current parent frame by given offset in bytes.
Node* LoadFromParentFrame(int offset,
- MachineType rep = MachineType::AnyTagged());
+ MachineType type = MachineType::AnyTagged());
// Load an object pointer from a buffer that isn't in the heap.
Node* LoadBufferObject(Node* buffer, int offset,
- MachineType rep = MachineType::AnyTagged());
+ MachineType type = MachineType::AnyTagged());
TNode<RawPtrT> LoadBufferPointer(TNode<RawPtrT> buffer, int offset) {
return UncheckedCast<RawPtrT>(
LoadBufferObject(buffer, offset, MachineType::Pointer()));
@@ -812,7 +822,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Load a field from an object on the heap.
Node* LoadObjectField(SloppyTNode<HeapObject> object, int offset,
- MachineType rep);
+ MachineType type);
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0>
@@ -831,7 +841,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
Node* LoadObjectField(SloppyTNode<HeapObject> object,
- SloppyTNode<IntPtrT> offset, MachineType rep);
+ SloppyTNode<IntPtrT> offset, MachineType type);
TNode<Object> LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset) {
return UncheckedCast<Object>(
@@ -858,6 +868,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
+ TNode<Object> LoadConstructorOrBackPointer(TNode<Map> map) {
+ return LoadObjectField(map, Map::kConstructorOrBackPointerOffset);
+ }
+
// Reference is the CSA-equivalent of a Torque reference value,
// representing an inner pointer into a HeapObject.
struct Reference {
@@ -873,37 +887,36 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
- return CAST(LoadObjectField(reference.object, reference.offset,
- MachineTypeOf<T>::value));
+ return CAST(LoadFromObject(MachineTypeOf<T>::value, reference.object,
+ reference.offset));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
- return UncheckedCast<T>(LoadObjectField(reference.object, reference.offset,
- MachineTypeOf<T>::value));
+ return UncheckedCast<T>(LoadFromObject(MachineTypeOf<T>::value,
+ reference.object, reference.offset));
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
- int const_offset;
+ MachineRepresentation rep = MachineRepresentationOf<T>::value;
+ StoreToObjectWriteBarrier write_barrier = StoreToObjectWriteBarrier::kFull;
if (std::is_same<T, Smi>::value) {
- StoreObjectFieldNoWriteBarrier(reference.object, reference.offset, value);
- } else if (std::is_same<T, Map>::value &&
- ToInt32Constant(reference.offset, const_offset) &&
- const_offset == HeapObject::kMapOffset) {
- StoreMap(reference.object, value);
- } else {
- StoreObjectField(reference.object, reference.offset, value);
+ write_barrier = StoreToObjectWriteBarrier::kNone;
+ } else if (std::is_same<T, Map>::value) {
+ write_barrier = StoreToObjectWriteBarrier::kMap;
}
+ StoreToObject(rep, reference.object, reference.offset, value,
+ write_barrier);
}
template <class T, typename std::enable_if<
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
- StoreObjectFieldNoWriteBarrier<T>(reference.object, reference.offset,
- value);
+ StoreToObject(MachineRepresentationOf<T>::value, reference.object,
+ reference.offset, value, StoreToObjectWriteBarrier::kNone);
}
// Tag a smi and store it.
@@ -1219,10 +1232,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Float64T> LoadDoubleWithHoleCheck(
SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
- TNode<RawPtrT> LoadFixedTypedArrayBackingStore(
- TNode<FixedTypedArrayBase> typed_array);
- TNode<RawPtrT> LoadFixedTypedArrayOnHeapBackingStore(
- TNode<FixedTypedArrayBase> typed_array);
Node* LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
@@ -1239,10 +1248,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BigInt> BigIntFromInt32Pair(TNode<IntPtrT> low, TNode<IntPtrT> high);
TNode<BigInt> BigIntFromUint32Pair(TNode<UintPtrT> low, TNode<UintPtrT> high);
- void StoreFixedTypedArrayElementFromTagged(
- TNode<Context> context, TNode<FixedTypedArrayBase> elements,
- TNode<Object> index_node, TNode<Object> value, ElementsKind elements_kind,
- ParameterMode parameter_mode);
+ void StoreJSTypedArrayElementFromTagged(TNode<Context> context,
+ TNode<JSTypedArray> typed_array,
+ TNode<Object> index_node,
+ TNode<Object> value,
+ ElementsKind elements_kind,
+ ParameterMode parameter_mode);
// Context manipulation
TNode<Object> LoadContextElement(SloppyTNode<Context> context,
@@ -1298,18 +1309,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
+ void UnsafeStoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
+ int offset, TNode<Object> value);
void StoreObjectFieldNoWriteBarrier(
- Node* object, Node* offset, Node* value,
+ Node* object, SloppyTNode<IntPtrT> offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
template <class T = Object>
- void StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object,
- TNode<IntPtrT> offset, TNode<T> value) {
+ void StoreObjectFieldNoWriteBarrier(Node* object, SloppyTNode<IntPtrT> offset,
+ TNode<T> value) {
StoreObjectFieldNoWriteBarrier(object, offset, value,
MachineRepresentationOf<T>::value);
}
template <class T = Object>
- void StoreObjectFieldNoWriteBarrier(TNode<HeapObject> object, int offset,
+ void StoreObjectFieldNoWriteBarrier(Node* object, int offset,
TNode<T> value) {
StoreObjectFieldNoWriteBarrier(object, offset, value,
MachineRepresentationOf<T>::value);
@@ -1337,12 +1350,20 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return StoreFixedArrayElement(object, index, value, barrier_mode,
CheckBounds::kDebugOnly);
}
+ void UnsafeStoreFixedArrayElement(
+ TNode<FixedArray> object, int index, TNode<Smi> value,
+ WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER) {
+ DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
+ return StoreFixedArrayElement(object, index, value,
+ UNSAFE_SKIP_WRITE_BARRIER,
+ CheckBounds::kDebugOnly);
+ }
void StoreFixedArrayElement(TNode<FixedArray> object, int index,
TNode<Smi> value,
CheckBounds check_bounds = CheckBounds::kAlways) {
return StoreFixedArrayElement(object, IntPtrConstant(index), value,
- SKIP_WRITE_BARRIER, 0, INTPTR_PARAMETERS,
- check_bounds);
+ UNSAFE_SKIP_WRITE_BARRIER, 0,
+ INTPTR_PARAMETERS, check_bounds);
}
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
@@ -1385,6 +1406,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
additional_offset, parameter_mode,
CheckBounds::kDebugOnly);
}
+ void UnsafeStoreFixedArrayElement(
+ TNode<FixedArray> array, Node* index, TNode<Smi> value,
+ WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS) {
+ DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
+ return StoreFixedArrayElement(array, index, value,
+ UNSAFE_SKIP_WRITE_BARRIER, additional_offset,
+ parameter_mode, CheckBounds::kDebugOnly);
+ }
void StorePropertyArrayElement(
TNode<PropertyArray> array, Node* index, SloppyTNode<Object> value,
@@ -1395,19 +1426,27 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
additional_offset, parameter_mode);
}
- void StoreFixedArrayElementSmi(
+ void StoreFixedArrayElement(
TNode<FixedArray> array, TNode<Smi> index, TNode<Object> value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
StoreFixedArrayElement(array, index, value, barrier_mode, 0,
SMI_PARAMETERS);
}
- void StoreFixedArrayElement(TNode<FixedArray> array, TNode<IntPtrT> index,
- TNode<Smi> value) {
- StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0);
+ void StoreFixedArrayElement(
+ TNode<FixedArray> array, TNode<IntPtrT> index, TNode<Smi> value,
+ WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER,
+ int additional_offset = 0) {
+ DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
+ StoreFixedArrayElement(array, index, TNode<Object>{value},
+ UNSAFE_SKIP_WRITE_BARRIER, additional_offset);
}
- void StoreFixedArrayElement(TNode<FixedArray> array, TNode<Smi> index,
- TNode<Smi> value) {
- StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER, 0,
+ void StoreFixedArrayElement(
+ TNode<FixedArray> array, TNode<Smi> index, TNode<Smi> value,
+ WriteBarrierMode barrier_mode = SKIP_WRITE_BARRIER,
+ int additional_offset = 0) {
+ DCHECK_EQ(SKIP_WRITE_BARRIER, barrier_mode);
+ StoreFixedArrayElement(array, index, TNode<Object>{value},
+ UNSAFE_SKIP_WRITE_BARRIER, additional_offset,
SMI_PARAMETERS);
}
@@ -1500,6 +1539,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Word32T> LoadBigIntBitfield(TNode<BigInt> bigint);
TNode<UintPtrT> LoadBigIntDigit(TNode<BigInt> bigint, int digit_index);
+ // Allocate a ByteArray with the given length.
+ TNode<ByteArray> AllocateByteArray(TNode<UintPtrT> length,
+ AllocationFlags flags = kNone);
+
// Allocate a SeqOneByteString with the given length.
TNode<String> AllocateSeqOneByteString(uint32_t length,
AllocationFlags flags = kNone);
@@ -1589,7 +1632,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
ElementsKind kind, TNode<Map> array_map, TNode<Smi> length,
Node* allocation_site, Node* capacity,
ParameterMode capacity_mode = INTPTR_PARAMETERS,
- AllocationFlags allocation_flags = kNone);
+ AllocationFlags allocation_flags = kNone,
+ int array_header_size = JSArray::kSize);
// Allocate a JSArray and fill elements with the hole.
// The ParameterMode argument is only used for the capacity parameter.
@@ -1606,16 +1650,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
TNode<JSArray> AllocateJSArray(ElementsKind kind, TNode<Map> array_map,
- TNode<IntPtrT> capacity, TNode<Smi> length) {
+ TNode<IntPtrT> capacity, TNode<Smi> length,
+ AllocationFlags allocation_flags = kNone) {
return AllocateJSArray(kind, array_map, capacity, length, nullptr,
- INTPTR_PARAMETERS);
+ INTPTR_PARAMETERS, allocation_flags);
}
// Allocate a JSArray and initialize the header fields.
TNode<JSArray> AllocateJSArray(TNode<Map> array_map,
TNode<FixedArrayBase> elements,
TNode<Smi> length,
- Node* allocation_site = nullptr);
+ Node* allocation_site = nullptr,
+ int array_header_size = JSArray::kSize);
enum class HoleConversionMode { kDontConvert, kConvertToUndefined };
// Clone a fast JSArray |array| into a new fast JSArray.
@@ -1840,7 +1886,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
kAllFixedArraysDontCopyCOW = kAllFixedArrays | kDontCopyCOW
};
- typedef base::Flags<ExtractFixedArrayFlag> ExtractFixedArrayFlags;
+ using ExtractFixedArrayFlags = base::Flags<ExtractFixedArrayFlag>;
// Copy a portion of an existing FixedArray or FixedDoubleArray into a new
// array, including special appropriate handling for empty arrays and COW
@@ -2114,6 +2160,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* ThrowIfNotJSReceiver(Node* context, Node* value,
MessageTemplate msg_template,
const char* method_name = nullptr);
+ void ThrowIfNotCallable(TNode<Context> context, TNode<Object> value,
+ const char* method_name);
void ThrowRangeError(Node* context, MessageTemplate message,
Node* arg0 = nullptr, Node* arg1 = nullptr,
@@ -2144,11 +2192,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsConsStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsConstructorMap(SloppyTNode<Map> map);
TNode<BoolT> IsConstructor(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsDebugInfo(TNode<HeapObject> object);
TNode<BoolT> IsDeprecatedMap(SloppyTNode<Map> map);
TNode<BoolT> IsNameDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsGlobalDictionary(SloppyTNode<HeapObject> object);
TNode<BoolT> IsExtensibleMap(SloppyTNode<Map> map);
- TNode<BoolT> IsPackedFrozenOrSealedElementsKindMap(SloppyTNode<Map> map);
+ TNode<BoolT> IsFrozenOrSealedElementsKindMap(SloppyTNode<Map> map);
TNode<BoolT> IsExtensibleNonPrototypeMap(TNode<Map> map);
TNode<BoolT> IsExternalStringInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsFeedbackCell(SloppyTNode<HeapObject> object);
@@ -2161,7 +2210,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsFixedArrayWithKindOrEmpty(SloppyTNode<HeapObject> object,
ElementsKind kind);
TNode<BoolT> IsFixedDoubleArray(SloppyTNode<HeapObject> object);
- TNode<BoolT> IsFixedTypedArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsFunctionWithPrototypeSlotMap(SloppyTNode<Map> map);
TNode<BoolT> IsHashTable(SloppyTNode<HeapObject> object);
TNode<BoolT> IsEphemeronHashTable(SloppyTNode<HeapObject> object);
@@ -2190,10 +2238,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSPromiseMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSPromise(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSProxy(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSStringIterator(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSReceiverInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSReceiverMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSReceiver(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSRegExp(SloppyTNode<HeapObject> object);
+ TNode<BoolT> IsJSTypedArrayInstanceType(SloppyTNode<Int32T> instance_type);
+ TNode<BoolT> IsJSTypedArrayMap(SloppyTNode<Map> map);
TNode<BoolT> IsJSTypedArray(SloppyTNode<HeapObject> object);
TNode<BoolT> IsJSValueInstanceType(SloppyTNode<Int32T> instance_type);
TNode<BoolT> IsJSValueMap(SloppyTNode<Map> map);
@@ -2318,6 +2369,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Node* IsFastSmiOrTaggedElementsKind(Node* elements_kind);
Node* IsFastSmiElementsKind(Node* elements_kind);
Node* IsHoleyFastElementsKind(Node* elements_kind);
+ Node* IsHoleyFastElementsKindForRead(Node* elements_kind);
Node* IsElementsKindGreaterThan(Node* target_kind,
ElementsKind reference_kind);
TNode<BoolT> IsElementsKindLessThanOrEqual(TNode<Int32T> target_kind,
@@ -2399,8 +2451,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SloppyTNode<Object> input);
// Convert any object to a String.
- TNode<String> ToString(SloppyTNode<Context> context,
- SloppyTNode<Object> input);
TNode<String> ToString_Inline(SloppyTNode<Context> context,
SloppyTNode<Object> input);
@@ -2880,10 +2930,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// This is a type of a lookup in holder generator function. In case of a
// property lookup the {key} is guaranteed to be an unique name and in case of
// element lookup the key is an Int32 index.
- typedef std::function<void(Node* receiver, Node* holder, Node* map,
- Node* instance_type, Node* key, Label* next_holder,
- Label* if_bailout)>
- LookupInHolder;
+ using LookupInHolder = std::function<void(
+ Node* receiver, Node* holder, Node* map, Node* instance_type, Node* key,
+ Label* next_holder, Label* if_bailout)>;
// For integer indexed exotic cases, check if the given string cannot be a
// special index. If we are not sure that the given string is not a special
@@ -2990,20 +3039,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Context> context);
// Store value to an elements array with given elements kind.
+ // TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS
+ // we pass {value} as BigInt object instead of int64_t. We should
+ // teach TurboFan to handle int64_t on 32-bit platforms eventually.
void StoreElement(Node* elements, ElementsKind kind, Node* index, Node* value,
ParameterMode mode);
- void EmitBigTypedArrayElementStore(TNode<JSTypedArray> object,
- TNode<FixedTypedArrayBase> elements,
- TNode<IntPtrT> intptr_key,
- TNode<Object> value,
- TNode<Context> context,
- Label* opt_if_detached);
- // Part of the above, refactored out to reuse in another place.
- void EmitBigTypedArrayElementStore(TNode<FixedTypedArrayBase> elements,
- TNode<RawPtrT> backing_store,
- TNode<IntPtrT> offset,
- TNode<BigInt> bigint_value);
// Implements the BigInt part of
// https://tc39.github.io/proposal-bigint/#sec-numbertorawbytes,
// including truncation to 64 bits (i.e. modulo 2^64).
@@ -3048,7 +3089,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class IndexAdvanceMode { kPre, kPost };
- typedef std::function<void(Node* index)> FastLoopBody;
+ using FastLoopBody = std::function<void(Node* index)>;
Node* BuildFastLoop(const VariableList& var_list, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
@@ -3065,8 +3106,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class ForEachDirection { kForward, kReverse };
- typedef std::function<void(Node* fixed_array, Node* offset)>
- FastFixedArrayForEachBody;
+ using FastFixedArrayForEachBody =
+ std::function<void(Node* fixed_array, Node* offset)>;
void BuildFastFixedArrayForEach(
const CodeStubAssembler::VariableList& vars, Node* fixed_array,
@@ -3230,7 +3271,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
const char* method_name);
// JSTypedArray helpers
- TNode<Smi> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
+ TNode<UintPtrT> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
+ TNode<RawPtrT> LoadJSTypedArrayBackingStore(TNode<JSTypedArray> typed_array);
TNode<IntPtrT> ElementOffsetFromIndex(Node* index, ElementsKind kind,
ParameterMode mode, int base_size = 0);
@@ -3270,11 +3312,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* if_fast, Label* if_slow);
Node* CheckEnumCache(Node* receiver, Label* if_empty, Label* if_runtime);
- TNode<Object> GetArgumentValue(BaseBuiltinsFromDSLAssembler::Arguments args,
+ TNode<Object> GetArgumentValue(TorqueStructArguments args,
TNode<IntPtrT> index);
- BaseBuiltinsFromDSLAssembler::Arguments GetFrameArguments(
- TNode<RawPtrT> frame, TNode<IntPtrT> argc);
+ TorqueStructArguments GetFrameArguments(TNode<RawPtrT> frame,
+ TNode<IntPtrT> argc);
// Support for printf-style debugging
void Print(const char* s);
@@ -3398,8 +3440,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Uint32T> DescriptorArrayGetDetails(TNode<DescriptorArray> descriptors,
TNode<Uint32T> descriptor_number);
- typedef std::function<void(TNode<IntPtrT> descriptor_key_index)>
- ForEachDescriptorBodyFunction;
+ using ForEachDescriptorBodyFunction =
+ std::function<void(TNode<IntPtrT> descriptor_key_index)>;
// Descriptor array accessors based on key_index, which is equal to
// DescriptorArray::ToKeyIndex(descriptor).
@@ -3428,8 +3470,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<MaybeObject> LoadFieldTypeByDescriptorEntry(
TNode<DescriptorArray> descriptors, TNode<IntPtrT> descriptor);
- typedef std::function<void(TNode<Name> key, TNode<Object> value)>
- ForEachKeyValueFunction;
+ using ForEachKeyValueFunction =
+ std::function<void(TNode<Name> key, TNode<Object> value)>;
enum ForEachEnumerationMode {
// String and then Symbol properties according to the spec
@@ -3554,7 +3596,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
class V8_EXPORT_PRIVATE CodeStubArguments {
public:
- typedef compiler::Node Node;
+ using Node = compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
template <class T>
@@ -3579,7 +3621,7 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
// Used by Torque to construct arguments based on a Torque-defined
// struct of values.
CodeStubArguments(CodeStubAssembler* assembler,
- BaseBuiltinsFromDSLAssembler::Arguments torque_arguments)
+ TorqueStructArguments torque_arguments)
: assembler_(assembler),
argc_mode_(CodeStubAssembler::INTPTR_PARAMETERS),
receiver_mode_(ReceiverMode::kHasReceiver),
@@ -3616,11 +3658,10 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
return argc_;
}
- BaseBuiltinsFromDSLAssembler::Arguments GetTorqueArguments() const {
+ TorqueStructArguments GetTorqueArguments() const {
DCHECK_EQ(argc_mode_, CodeStubAssembler::INTPTR_PARAMETERS);
- return BaseBuiltinsFromDSLAssembler::Arguments{
- assembler_->UncheckedCast<RawPtrT>(fp_), base_,
- assembler_->UncheckedCast<IntPtrT>(argc_)};
+ return TorqueStructArguments{assembler_->UncheckedCast<RawPtrT>(fp_), base_,
+ assembler_->UncheckedCast<IntPtrT>(argc_)};
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index) {
@@ -3633,7 +3674,7 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
return assembler_->UncheckedCast<IntPtrT>(argc_);
}
- typedef std::function<void(Node* arg)> ForEachBodyFunction;
+ using ForEachBodyFunction = std::function<void(Node* arg)>;
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const ForEachBodyFunction& body, Node* first = nullptr,
@@ -3672,7 +3713,7 @@ class ToDirectStringAssembler : public CodeStubAssembler {
enum Flag {
kDontUnpackSlicedStrings = 1 << 0,
};
- typedef base::Flags<Flag> Flags;
+ using Flags = base::Flags<Flag>;
ToDirectStringAssembler(compiler::CodeAssemblerState* state, Node* string,
Flags flags = Flags());
@@ -3717,4 +3758,4 @@ DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags)
} // namespace internal
} // namespace v8
-#endif // V8_CODE_STUB_ASSEMBLER_H_
+#endif // V8_CODEGEN_CODE_STUB_ASSEMBLER_H_
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc
index e210dd4025..6e9613005e 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/codegen/compilation-cache.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compilation-cache.h"
+#include "src/codegen/compilation-cache.h"
-#include "src/counters.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/heap/factory.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/objects/compilation-cache-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
-#include "src/visitors.h"
+#include "src/objects/visitors.h"
namespace v8 {
namespace internal {
@@ -29,8 +29,8 @@ CompilationCache::CompilationCache(Isolate* isolate)
eval_contextual_(isolate),
reg_exp_(isolate, kRegExpGenerations),
enabled_(true) {
- CompilationSubCache* subcaches[kSubCacheCount] =
- {&script_, &eval_global_, &eval_contextual_, &reg_exp_};
+ CompilationSubCache* subcaches[kSubCacheCount] = {
+ &script_, &eval_global_, &eval_contextual_, &reg_exp_};
for (int i = 0; i < kSubCacheCount; ++i) {
subcaches_[i] = subcaches[i];
}
@@ -39,7 +39,7 @@ CompilationCache::CompilationCache(Isolate* isolate)
Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
DCHECK(generation < generations_);
Handle<CompilationCacheTable> result;
- if (tables_[generation]->IsUndefined(isolate())) {
+ if (tables_[generation].IsUndefined(isolate())) {
result = CompilationCacheTable::New(isolate(), kInitialCacheSize);
tables_[generation] = *result;
} else {
@@ -53,8 +53,8 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
void CompilationSubCache::Age() {
// Don't directly age single-generation caches.
if (generations_ == 1) {
- if (!tables_[0]->IsUndefined(isolate())) {
- CompilationCacheTable::cast(tables_[0])->Age();
+ if (!tables_[0].IsUndefined(isolate())) {
+ CompilationCacheTable::cast(tables_[0]).Age();
}
return;
}
@@ -76,14 +76,14 @@ void CompilationSubCache::Iterate(RootVisitor* v) {
void CompilationSubCache::Clear() {
MemsetPointer(reinterpret_cast<Address*>(tables_),
- ReadOnlyRoots(isolate()).undefined_value()->ptr(),
- generations_);
+ ReadOnlyRoots(isolate()).undefined_value().ptr(), generations_);
}
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
- { HandleScope scope(isolate());
+ {
+ HandleScope scope(isolate());
for (int generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
table->Remove(*function_info);
@@ -107,13 +107,13 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
// an undefined name to have the same origin.
Handle<Object> name;
if (!maybe_name.ToHandle(&name)) {
- return script->name()->IsUndefined(isolate());
+ return script->name().IsUndefined(isolate());
}
// Do the fast bailout checks first.
if (line_offset != script->line_offset()) return false;
if (column_offset != script->column_offset()) return false;
// Check that both names are strings. If not, no match.
- if (!name->IsString() || !script->name()->IsString()) return false;
+ if (!name->IsString() || !script->name().IsString()) return false;
// Are the origin_options same?
if (resource_options.Flags() != script->origin_options().Flags())
return false;
@@ -135,7 +135,8 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheScript::Lookup(
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
- { HandleScope scope(isolate());
+ {
+ HandleScope scope(isolate());
const int generation = 0;
DCHECK_EQ(generations(), 1);
Handle<CompilationCacheTable> table = GetTable(generation);
@@ -218,9 +219,8 @@ void CompilationCacheEval::Put(Handle<String> source,
SetFirstTable(table);
}
-MaybeHandle<FixedArray> CompilationCacheRegExp::Lookup(
- Handle<String> source,
- JSRegExp::Flags flags) {
+MaybeHandle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
+ JSRegExp::Flags flags) {
HandleScope scope(isolate());
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
@@ -245,8 +245,7 @@ MaybeHandle<FixedArray> CompilationCacheRegExp::Lookup(
}
}
-void CompilationCacheRegExp::Put(Handle<String> source,
- JSRegExp::Flags flags,
+void CompilationCacheRegExp::Put(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data) {
HandleScope scope(isolate());
Handle<CompilationCacheTable> table = GetFirstTable();
@@ -343,8 +342,7 @@ void CompilationCache::PutEval(Handle<String> source,
LOG(isolate(), CompilationCacheEvent("put", cache_type, *function_info));
}
-void CompilationCache::PutRegExp(Handle<String> source,
- JSRegExp::Flags flags,
+void CompilationCache::PutRegExp(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data) {
if (!IsEnabled()) return;
@@ -369,9 +367,7 @@ void CompilationCache::MarkCompactPrologue() {
}
}
-void CompilationCache::Enable() {
- enabled_ = true;
-}
+void CompilationCache::Enable() { enabled_ = true; }
void CompilationCache::Disable() {
enabled_ = false;
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h
index a387c05cb6..35595b1985 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/codegen/compilation-cache.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILATION_CACHE_H_
-#define V8_COMPILATION_CACHE_H_
+#ifndef V8_CODEGEN_COMPILATION_CACHE_H_
+#define V8_CODEGEN_COMPILATION_CACHE_H_
-#include "src/allocation.h"
#include "src/objects/compilation-cache.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -24,8 +24,7 @@ class RootVisitor;
class CompilationSubCache {
public:
CompilationSubCache(Isolate* isolate, int generations)
- : isolate_(isolate),
- generations_(generations) {
+ : isolate_(isolate), generations_(generations) {
tables_ = NewArray<Object>(generations);
}
@@ -73,7 +72,6 @@ class CompilationSubCache {
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
-
// Sub-cache for scripts.
class CompilationCacheScript : public CompilationSubCache {
public:
@@ -98,7 +96,6 @@ class CompilationCacheScript : public CompilationSubCache {
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
};
-
// Sub-cache for eval scripts. Two caches for eval are used. One for eval calls
// in native contexts and one for eval calls in other contexts. The cache
// considers the following pieces of information when checking for matching
@@ -111,7 +108,7 @@ class CompilationCacheScript : public CompilationSubCache {
// More specifically these are the CompileString, DebugEvaluate and
// DebugEvaluateGlobal runtime functions.
// 4. The start position of the calling scope.
-class CompilationCacheEval: public CompilationSubCache {
+class CompilationCacheEval : public CompilationSubCache {
public:
explicit CompilationCacheEval(Isolate* isolate)
: CompilationSubCache(isolate, 1) {}
@@ -130,18 +127,17 @@ class CompilationCacheEval: public CompilationSubCache {
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
-
// Sub-cache for regular expressions.
-class CompilationCacheRegExp: public CompilationSubCache {
+class CompilationCacheRegExp : public CompilationSubCache {
public:
CompilationCacheRegExp(Isolate* isolate, int generations)
- : CompilationSubCache(isolate, generations) { }
+ : CompilationSubCache(isolate, generations) {}
MaybeHandle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
- void Put(Handle<String> source,
- JSRegExp::Flags flags,
+ void Put(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
@@ -170,8 +166,8 @@ class V8_EXPORT_PRIVATE CompilationCache {
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
- MaybeHandle<FixedArray> LookupRegExp(
- Handle<String> source, JSRegExp::Flags flags);
+ MaybeHandle<FixedArray> LookupRegExp(Handle<String> source,
+ JSRegExp::Flags flags);
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
@@ -188,8 +184,7 @@ class V8_EXPORT_PRIVATE CompilationCache {
// Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping.
- void PutRegExp(Handle<String> source,
- JSRegExp::Flags flags,
+ void PutRegExp(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data);
// Clear the cache - also used to initialize the cache at startup.
@@ -243,4 +238,4 @@ class V8_EXPORT_PRIVATE CompilationCache {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILATION_CACHE_H_
+#endif // V8_CODEGEN_COMPILATION_CACHE_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/codegen/compiler.cc
index d3cebfd58e..5197dd3a2f 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -2,44 +2,44 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler.h"
+#include "src/codegen/compiler.h"
#include <algorithm>
#include <memory>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/asmjs/asm-js.h"
-#include "src/assembler-inl.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/base/optional.h"
-#include "src/bootstrapper.h"
-#include "src/compilation-cache.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/common/globals.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler/pipeline.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/globals.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/vm-state-inl.h"
#include "src/heap/heap-inl.h"
+#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate-inl.h"
-#include "src/log-inl.h"
-#include "src/message-template.h"
+#include "src/logging/log-inl.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/map.h"
-#include "src/optimized-compilation-info.h"
-#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
-#include "src/runtime-profiler.h"
#include "src/snapshot/code-serializer.h"
-#include "src/unoptimized-compilation-info.h"
-#include "src/vm-state-inl.h"
+#include "src/utils/ostreams.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
namespace v8 {
@@ -94,7 +94,7 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
int line_num = Script::GetLineNumber(script, shared->StartPosition()) + 1;
int column_num = Script::GetColumnNumber(script, shared->StartPosition()) + 1;
- String script_name = script->name()->IsString()
+ String script_name = script->name().IsString()
? String::cast(script->name())
: ReadOnlyRoots(isolate).empty_string();
CodeEventListener::LogEventsAndTags log_tag =
@@ -127,9 +127,9 @@ void LogFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
}
ScriptOriginOptions OriginOptionsForEval(Object script) {
- if (!script->IsScript()) return ScriptOriginOptions();
+ if (!script.IsScript()) return ScriptOriginOptions();
- const auto outer_origin_options = Script::cast(script)->origin_options();
+ const auto outer_origin_options = Script::cast(script).origin_options();
return ScriptOriginOptions(outer_origin_options.IsSharedCrossOrigin(),
outer_origin_options.IsOpaque());
}
@@ -270,7 +270,7 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
compiled_functions++;
- code_size += function->shared()->SourceSize();
+ code_size += function->shared().SourceSize();
PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
compiled_functions, code_size, compilation_time);
}
@@ -305,8 +305,12 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode,
switch (mode) {
case OptimizedCompilationJob::kConcurrent:
time_background += time_taken_to_execute_;
+ counters->turbofan_optimize_concurrent_total_time()->AddSample(
+ static_cast<int>(ElapsedTime().InMicroseconds()));
break;
case OptimizedCompilationJob::kSynchronous:
+ counters->turbofan_optimize_non_concurrent_total_time()->AddSample(
+ static_cast<int>(ElapsedTime().InMicroseconds()));
time_foreground += time_taken_to_execute_;
break;
}
@@ -379,7 +383,7 @@ void InstallBytecodeArray(Handle<BytecodeArray> bytecode_array,
Script::GetLineNumber(script, shared_info->StartPosition()) + 1;
int column_num =
Script::GetColumnNumber(script, shared_info->StartPosition()) + 1;
- String script_name = script->name()->IsString()
+ String script_name = script->name().IsString()
? String::cast(script->name())
: ReadOnlyRoots(isolate).empty_string();
CodeEventListener::LogEventsAndTags log_tag = Logger::ToNativeByScript(
@@ -429,8 +433,8 @@ void EnsureSharedFunctionInfosArrayOnScript(ParseInfo* parse_info,
Isolate* isolate) {
DCHECK(parse_info->is_toplevel());
DCHECK(!parse_info->script().is_null());
- if (parse_info->script()->shared_function_infos()->length() > 0) {
- DCHECK_EQ(parse_info->script()->shared_function_infos()->length(),
+ if (parse_info->script()->shared_function_infos().length() > 0) {
+ DCHECK_EQ(parse_info->script()->shared_function_infos().length(),
parse_info->max_function_literal_id() + 1);
return;
}
@@ -655,15 +659,15 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
if (osr_offset.IsNone()) {
if (function->has_feedback_vector()) {
FeedbackVector feedback_vector = function->feedback_vector();
- feedback_vector->EvictOptimizedCodeMarkedForDeoptimization(
+ feedback_vector.EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "GetCodeFromOptimizedCodeCache");
- Code code = feedback_vector->optimized_code();
+ Code code = feedback_vector.optimized_code();
if (!code.is_null()) {
// Caching of optimized code enabled and optimized code found.
- DCHECK(!code->marked_for_deoptimization());
- DCHECK(function->shared()->is_compiled());
- return Handle<Code>(code, feedback_vector->GetIsolate());
+ DCHECK(!code.marked_for_deoptimization());
+ DCHECK(function->shared().is_compiled());
+ return Handle<Code>(code, feedback_vector.GetIsolate());
}
}
}
@@ -696,7 +700,7 @@ void InsertCodeIntoOptimizedCodeCache(
// Cache optimized context-specific code.
Handle<JSFunction> function = compilation_info->closure();
Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
- Handle<Context> native_context(function->context()->native_context(),
+ Handle<Context> native_context(function->context().native_context(),
function->GetIsolate());
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
@@ -796,7 +800,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// If code was pending optimization for testing, delete remove the strong root
// that was preventing the bytecode from being flushed between marking and
// optimization.
- if (!isolate->heap()->pending_optimize_for_test_bytecode()->IsUndefined()) {
+ if (!isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) {
Handle<ObjectHashTable> table =
handle(ObjectHashTable::cast(
isolate->heap()->pending_optimize_for_test_bytecode()),
@@ -823,7 +827,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Reset profiler ticks, function is no longer considered hot.
DCHECK(shared->is_compiled());
- function->feedback_vector()->set_profiler_ticks(0);
+ function->feedback_vector().set_profiler_ticks(0);
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
@@ -833,7 +837,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
- bool has_script = shared->script()->IsScript();
+ bool has_script = shared->script().IsScript();
// BUG(5946): This DCHECK is necessary to make certain that we won't
// tolerate the lack of a script without bytecode.
DCHECK_IMPLIES(!has_script, shared->HasBytecodeArray());
@@ -877,8 +881,8 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// Set the optimization marker and return a code object which checks it.
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
DCHECK(function->IsInterpreted() ||
- (!function->is_compiled() && function->shared()->IsInterpreted()));
- DCHECK(function->shared()->HasBytecodeArray());
+ (!function->is_compiled() && function->shared().IsInterpreted()));
+ DCHECK(function->shared().HasBytecodeArray());
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
} else {
@@ -1145,7 +1149,6 @@ void BackgroundCompileTask::Run() {
}
}
-
// ----------------------------------------------------------------------------
// Implementation of Compiler
@@ -1175,7 +1178,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info) {
DCHECK(shared_info->is_compiled());
DCHECK(shared_info->HasBytecodeArray());
- DCHECK(!shared_info->GetBytecodeArray()->HasSourcePositionTable());
+ DCHECK(!shared_info->GetBytecodeArray().HasSourcePositionTable());
// Collecting source positions requires allocating a new source position
// table.
@@ -1263,8 +1266,8 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
// If debugging, make sure that instrumented bytecode has the source position
// table set on it as well.
if (shared_info->HasDebugInfo() &&
- shared_info->GetDebugInfo()->HasInstrumentedBytecodeArray()) {
- shared_info->GetDebugBytecodeArray()->set_source_position_table(
+ shared_info->GetDebugInfo().HasInstrumentedBytecodeArray()) {
+ shared_info->GetDebugBytecodeArray().set_source_position_table(
source_position_table);
}
@@ -1312,7 +1315,7 @@ bool Compiler::Compile(Handle<SharedFunctionInfo> shared_info,
parse_info.set_consumed_preparse_data(ConsumedPreparseData::For(
isolate,
handle(
- shared_info->uncompiled_data_with_preparse_data()->preparse_data(),
+ shared_info->uncompiled_data_with_preparse_data().preparse_data(),
isolate)));
}
@@ -1374,7 +1377,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
JSFunction::InitializeFeedbackCell(function);
// Optimize now if --always-opt is enabled.
- if (FLAG_always_opt && !function->shared()->HasAsmWasmData()) {
+ if (FLAG_always_opt && !function->shared().HasAsmWasmData()) {
if (FLAG_trace_opt) {
PrintF("[optimizing ");
function->ShortPrint();
@@ -1392,7 +1395,7 @@ bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag,
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
- DCHECK(function->shared()->is_compiled());
+ DCHECK(function->shared().is_compiled());
DCHECK(function->is_compiled());
return true;
}
@@ -1446,8 +1449,8 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
// Optimization failed, get unoptimized code. Unoptimized code must exist
// already if we are optimizing.
DCHECK(!isolate->has_pending_exception());
- DCHECK(function->shared()->is_compiled());
- DCHECK(function->shared()->IsInterpreted());
+ DCHECK(function->shared().is_compiled());
+ DCHECK(function->shared().IsInterpreted());
code = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
@@ -1456,7 +1459,7 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
- DCHECK(function->shared()->is_compiled());
+ DCHECK(function->shared().is_compiled());
DCHECK(function->is_compiled());
DCHECK_IMPLIES(function->HasOptimizationMarker(),
function->IsInOptimizationQueue());
@@ -1596,11 +1599,10 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
return result;
}
-
bool Compiler::CodeGenerationFromStringsAllowed(Isolate* isolate,
Handle<Context> context,
Handle<String> source) {
- DCHECK(context->allow_code_gen_from_strings()->IsFalse(isolate));
+ DCHECK(context->allow_code_gen_from_strings().IsFalse(isolate));
// Check with callback if set.
AllowCodeGenerationFromStringsCallback callback =
isolate->allow_code_gen_callback();
@@ -1622,20 +1624,21 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
// Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings()->IsFalse(isolate) &&
+ if (native_context->allow_code_gen_from_strings().IsFalse(isolate) &&
!CodeGenerationFromStringsAllowed(isolate, native_context, source)) {
Handle<Object> error_message =
native_context->ErrorMessageForCodeGenerationFromStrings();
- THROW_NEW_ERROR(isolate, NewEvalError(MessageTemplate::kCodeGenFromStrings,
- error_message),
- JSFunction);
+ THROW_NEW_ERROR(
+ isolate,
+ NewEvalError(MessageTemplate::kCodeGenFromStrings, error_message),
+ JSFunction);
}
// Compile source string in the native context.
int eval_scope_position = 0;
int eval_position = kNoSourcePosition;
Handle<SharedFunctionInfo> outer_info(
- native_context->empty_function()->shared(), isolate);
+ native_context->empty_function().shared(), isolate);
return Compiler::GetFunctionFromEval(
source, outer_info, native_context, LanguageMode::kSloppy, restriction,
parameters_end_pos, eval_scope_position, eval_position);
@@ -2034,7 +2037,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
SharedFunctionInfo::ScriptIterator infos(isolate, *script);
for (SharedFunctionInfo info = infos.Next(); !info.is_null();
info = infos.Next()) {
- if (info->is_wrapped()) {
+ if (info.is_wrapped()) {
wrapped = Handle<SharedFunctionInfo>(info, isolate);
break;
}
@@ -2159,7 +2162,7 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
// Reset profiler ticks, function is no longer considered hot.
- compilation_info->closure()->feedback_vector()->set_profiler_ticks(0);
+ compilation_info->closure()->feedback_vector().set_profiler_ticks(0);
DCHECK(!shared->HasBreakInfo());
@@ -2214,12 +2217,12 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
JSFunction::InitializeFeedbackCell(function);
Code code = function->has_feedback_vector()
- ? function->feedback_vector()->optimized_code()
+ ? function->feedback_vector().optimized_code()
: Code();
if (!code.is_null()) {
// Caching of optimized code enabled and optimized code found.
- DCHECK(!code->marked_for_deoptimization());
- DCHECK(function->shared()->is_compiled());
+ DCHECK(!code.marked_for_deoptimization());
+ DCHECK(function->shared().is_compiled());
function->set_code(code);
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/codegen/compiler.h
index 2894f565fd..a598706373 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_H_
-#define V8_COMPILER_H_
+#ifndef V8_CODEGEN_COMPILER_H_
+#define V8_CODEGEN_COMPILER_H_
#include <forward_list>
#include <memory>
-#include "src/allocation.h"
-#include "src/bailout-reason.h"
#include "src/base/platform/elapsed-timer.h"
-#include "src/code-events.h"
-#include "src/contexts.h"
-#include "src/isolate.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/execution/isolate.h"
+#include "src/logging/code-events.h"
+#include "src/utils/allocation.h"
+#include "src/objects/contexts.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -35,8 +35,8 @@ class UnoptimizedCompilationInfo;
class UnoptimizedCompilationJob;
class WorkerThreadRuntimeCallStats;
-typedef std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>
- UnoptimizedCompilationJobList;
+using UnoptimizedCompilationJobList =
+ std::forward_list<std::unique_ptr<UnoptimizedCompilationJob>>;
// The V8 compiler API.
//
@@ -405,4 +405,4 @@ struct ScriptStreamingData {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_H_
+#endif // V8_CODEGEN_COMPILER_H_
diff --git a/deps/v8/src/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index ebb7099120..613a142f24 100644
--- a/deps/v8/src/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/constant-pool.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/constant-pool.h"
+#include "src/codegen/assembler-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/constant-pool.h b/deps/v8/src/codegen/constant-pool.h
index 5b87d9a4a5..4399f6fc1f 100644
--- a/deps/v8/src/constant-pool.h
+++ b/deps/v8/src/codegen/constant-pool.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CONSTANT_POOL_H_
-#define V8_CONSTANT_POOL_H_
+#ifndef V8_CODEGEN_CONSTANT_POOL_H_
+#define V8_CODEGEN_CONSTANT_POOL_H_
#include <map>
-#include "src/double.h"
-#include "src/globals.h"
-#include "src/label.h"
-#include "src/reloc-info.h"
+#include "src/codegen/label.h"
+#include "src/codegen/reloc-info.h"
+#include "src/common/globals.h"
+#include "src/numbers/double.h"
namespace v8 {
namespace internal {
@@ -164,4 +164,4 @@ class ConstantPoolBuilder {
} // namespace internal
} // namespace v8
-#endif // V8_CONSTANT_POOL_H_
+#endif // V8_CODEGEN_CONSTANT_POOL_H_
diff --git a/deps/v8/src/codegen/constants-arch.h b/deps/v8/src/codegen/constants-arch.h
new file mode 100644
index 0000000000..b49d2b64f2
--- /dev/null
+++ b/deps/v8/src/codegen/constants-arch.h
@@ -0,0 +1,28 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_CONSTANTS_ARCH_H_
+#define V8_CODEGEN_CONSTANTS_ARCH_H_
+
+#if V8_TARGET_ARCH_ARM
+#include "src/codegen/arm/constants-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/codegen/arm64/constants-arm64.h" // NOLINT
+#elif V8_TARGET_ARCH_IA32
+#include "src/codegen/ia32/constants-ia32.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/codegen/mips/constants-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/codegen/mips64/constants-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/codegen/ppc/constants-ppc.h" // NOLINT
+#elif V8_TARGET_ARCH_S390
+#include "src/codegen/s390/constants-s390.h" // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/codegen/x64/constants-x64.h" // NOLINT
+#else
+#error Unsupported target architecture.
+#endif
+
+#endif // V8_CODEGEN_CONSTANTS_ARCH_H_
diff --git a/deps/v8/src/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index 23023707a7..b2f792e339 100644
--- a/deps/v8/src/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CPU_FEATURES_H_
-#define V8_CPU_FEATURES_H_
+#ifndef V8_CODEGEN_CPU_FEATURES_H_
+#define V8_CODEGEN_CPU_FEATURES_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
@@ -48,6 +48,7 @@ enum CpuFeature {
GENERAL_INSTR_EXT,
FLOATING_POINT_EXT,
VECTOR_FACILITY,
+ VECTOR_ENHANCE_FACILITY_1,
MISC_INSTR_EXT2,
NUMBER_OF_CPU_FEATURES,
@@ -121,4 +122,4 @@ class V8_EXPORT_PRIVATE CpuFeatures : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_CPU_FEATURES_H_
+#endif // V8_CODEGEN_CPU_FEATURES_H_
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/codegen/external-reference-table.cc
index a54dce2ea5..b43f1a2405 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/codegen/external-reference-table.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/external-reference-table.h"
+#include "src/codegen/external-reference-table.h"
-#include "src/accessors.h"
-#include "src/counters.h"
-#include "src/external-reference.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/external-reference.h"
#include "src/ic/stub-cache.h"
+#include "src/logging/counters.h"
#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
#define SYMBOLIZE_FUNCTION
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/codegen/external-reference-table.h
index 45d2f95c7e..798859b185 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/codegen/external-reference-table.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_EXTERNAL_REFERENCE_TABLE_H_
-#define V8_EXTERNAL_REFERENCE_TABLE_H_
+#ifndef V8_CODEGEN_EXTERNAL_REFERENCE_TABLE_H_
+#define V8_CODEGEN_EXTERNAL_REFERENCE_TABLE_H_
#include <vector>
-#include "src/accessors.h"
+#include "src/builtins/accessors.h"
#include "src/builtins/builtins.h"
-#include "src/counters-definitions.h"
-#include "src/external-reference.h"
+#include "src/codegen/external-reference.h"
+#include "src/logging/counters-definitions.h"
namespace v8 {
namespace internal {
@@ -108,4 +108,4 @@ STATIC_ASSERT(ExternalReferenceTable::kSizeInBytes ==
} // namespace internal
} // namespace v8
-#endif // V8_EXTERNAL_REFERENCE_TABLE_H_
+#endif // V8_CODEGEN_EXTERNAL_REFERENCE_TABLE_H_
diff --git a/deps/v8/src/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index a4d50ae21f..5538f361f0 100644
--- a/deps/v8/src/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -2,32 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/external-reference.h"
+#include "src/codegen/external-reference.h"
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/base/ieee754.h"
+#include "src/codegen/cpu-features.h"
#include "src/compiler/code-assembler.h"
-#include "src/counters.h"
-#include "src/cpu-features.h"
-#include "src/date.h"
+#include "src/date/date.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/elements.h"
-#include "src/hash-seed-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/heap/heap.h"
+#include "src/logging/counters.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/elements.h"
#include "src/objects/ordered-hash-table.h"
// For IncrementalMarking::RecordWriteFromCode. TODO(jkummerow): Drop.
+#include "src/execution/isolate.h"
+#include "src/execution/microtask-queue.h"
+#include "src/execution/simulator-base.h"
#include "src/heap/heap-inl.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
-#include "src/log.h"
-#include "src/math-random.h"
-#include "src/microtask-queue.h"
-#include "src/objects-inl.h"
+#include "src/logging/log.h"
+#include "src/numbers/math-random.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-stack.h"
-#include "src/simulator-base.h"
-#include "src/string-search.h"
+#include "src/strings/string-search.h"
#include "src/wasm/wasm-external-refs.h"
// Include native regexp-macro-assembler.
@@ -338,9 +338,6 @@ FUNCTION_REFERENCE(f64_mod_wrapper_function, f64_mod_wrapper)
FUNCTION_REFERENCE(wasm_call_trap_callback_for_testing,
wasm::call_trap_callback_for_testing)
-FUNCTION_REFERENCE(log_enter_external_function, Logger::EnterExternal)
-FUNCTION_REFERENCE(log_leave_external_function, Logger::LeaveExternal)
-
ExternalReference ExternalReference::isolate_root(Isolate* isolate) {
return ExternalReference(isolate->isolate_root());
}
@@ -415,11 +412,6 @@ ExternalReference ExternalReference::address_of_pending_message_obj(
FUNCTION_REFERENCE(abort_with_reason, i::abort_with_reason)
-ExternalReference
-ExternalReference::address_of_harmony_await_optimization_flag() {
- return ExternalReference(&FLAG_harmony_await_optimization);
-}
-
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<Address>(&double_min_int_constant));
}
@@ -500,7 +492,7 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#elif V8_TARGET_ARCH_S390
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
#else
- UNREACHABLE();
+UNREACHABLE();
#endif
FUNCTION_REFERENCE_WITH_ISOLATE(re_check_stack_guard_state, re_stack_check_func)
@@ -642,7 +634,7 @@ FUNCTION_REFERENCE(orderedhashmap_gethash_raw, OrderedHashMap::GetHash)
Address GetOrCreateHash(Isolate* isolate, Address raw_key) {
DisallowHeapAllocation no_gc;
- return Object(raw_key)->GetOrCreateHash(isolate).ptr();
+ return Object(raw_key).GetOrCreateHash(isolate).ptr();
}
FUNCTION_REFERENCE(get_or_create_hash_raw, GetOrCreateHash)
diff --git a/deps/v8/src/external-reference.h b/deps/v8/src/codegen/external-reference.h
index 532c3b660d..4c83a9b33a 100644
--- a/deps/v8/src/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_EXTERNAL_REFERENCE_H_
-#define V8_EXTERNAL_REFERENCE_H_
+#ifndef V8_CODEGEN_EXTERNAL_REFERENCE_H_
+#define V8_CODEGEN_EXTERNAL_REFERENCE_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -89,8 +89,6 @@ class StatsCounter;
V(address_of_double_neg_constant, "double_negate_constant") \
V(address_of_float_abs_constant, "float_absolute_constant") \
V(address_of_float_neg_constant, "float_negate_constant") \
- V(address_of_harmony_await_optimization_flag, \
- "FLAG_harmony_await_optimization") \
V(address_of_min_int, "LDoubleConstant::min_int") \
V(address_of_mock_arraybuffer_allocator_flag, \
"FLAG_mock_arraybuffer_allocator") \
@@ -150,8 +148,6 @@ class StatsCounter;
V(libc_memcpy_function, "libc_memcpy") \
V(libc_memmove_function, "libc_memmove") \
V(libc_memset_function, "libc_memset") \
- V(log_enter_external_function, "Logger::EnterExternal") \
- V(log_leave_external_function, "Logger::LeaveExternal") \
V(mod_two_doubles_operation, "mod_two_doubles") \
V(new_deoptimizer_function, "Deoptimizer::New()") \
V(orderedhashmap_gethash_raw, "orderedhashmap_gethash_raw") \
@@ -274,8 +270,6 @@ class ExternalReference {
EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(COUNT_EXTERNAL_REFERENCE);
#undef COUNT_EXTERNAL_REFERENCE
- typedef Address ExternalReferenceRedirector(Address original, Type type);
-
ExternalReference() : address_(kNullAddress) {}
static ExternalReference Create(const SCTableReference& table_ref);
static ExternalReference Create(StatsCounter* counter);
@@ -330,4 +324,4 @@ void abort_with_reason(int reason);
} // namespace internal
} // namespace v8
-#endif // V8_EXTERNAL_REFERENCE_H_
+#endif // V8_CODEGEN_EXTERNAL_REFERENCE_H_
diff --git a/deps/v8/src/flush-instruction-cache.cc b/deps/v8/src/codegen/flush-instruction-cache.cc
index 54f3f6c6ff..cb4088af13 100644
--- a/deps/v8/src/flush-instruction-cache.cc
+++ b/deps/v8/src/codegen/flush-instruction-cache.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/flush-instruction-cache.h"
+#include "src/codegen/flush-instruction-cache.h"
#include "src/base/platform/mutex.h"
-#include "src/cpu-features.h"
-#include "src/simulator.h"
+#include "src/codegen/cpu-features.h"
+#include "src/execution/simulator.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/flush-instruction-cache.h b/deps/v8/src/codegen/flush-instruction-cache.h
index 48adc5a95a..88e5bd38cc 100644
--- a/deps/v8/src/flush-instruction-cache.h
+++ b/deps/v8/src/codegen/flush-instruction-cache.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FLUSH_INSTRUCTION_CACHE_H_
-#define V8_FLUSH_INSTRUCTION_CACHE_H_
+#ifndef V8_CODEGEN_FLUSH_INSTRUCTION_CACHE_H_
+#define V8_CODEGEN_FLUSH_INSTRUCTION_CACHE_H_
#include "include/v8-internal.h"
#include "src/base/macros.h"
@@ -20,4 +20,4 @@ V8_EXPORT_PRIVATE V8_INLINE void FlushInstructionCache(Address start,
} // namespace internal
} // namespace v8
-#endif // V8_FLUSH_INSTRUCTION_CACHE_H_
+#endif // V8_CODEGEN_FLUSH_INSTRUCTION_CACHE_H_
diff --git a/deps/v8/src/handler-table.cc b/deps/v8/src/codegen/handler-table.cc
index 56c5cefecb..12a05e1fba 100644
--- a/deps/v8/src/handler-table.cc
+++ b/deps/v8/src/codegen/handler-table.cc
@@ -2,51 +2,44 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/handler-table.h"
+#include "src/codegen/handler-table.h"
#include <iomanip>
-#include "src/assembler-inl.h"
-#include "src/objects-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/objects/code-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
HandlerTable::HandlerTable(Code code)
- : HandlerTable(code->InstructionStart(), code->has_handler_table()
- ? code->handler_table_offset()
- : 0) {}
+ : HandlerTable(code.InstructionStart() + code.handler_table_offset(),
+ code.handler_table_size()) {}
HandlerTable::HandlerTable(BytecodeArray bytecode_array)
- : HandlerTable(bytecode_array->handler_table()) {}
+ : HandlerTable(bytecode_array.handler_table()) {}
HandlerTable::HandlerTable(ByteArray byte_array)
- : number_of_entries_(byte_array->length() / kRangeEntrySize /
+ : number_of_entries_(byte_array.length() / kRangeEntrySize /
sizeof(int32_t)),
#ifdef DEBUG
mode_(kRangeBasedEncoding),
#endif
raw_encoded_data_(
- reinterpret_cast<Address>(byte_array->GetDataStartAddress())) {
+ reinterpret_cast<Address>(byte_array.GetDataStartAddress())) {
+ DCHECK_EQ(0, byte_array.length() % (kRangeEntrySize * sizeof(int32_t)));
}
-// TODO(jgruber,v8:8758): This constructor should eventually take the handler
-// table size in addition to the offset. That way the {HandlerTable} class
-// remains independent of how the offset/size is encoded in the various code
-// objects. This could even allow us to change the encoding to no longer expect
-// the "number of entries" in the beginning.
-HandlerTable::HandlerTable(Address instruction_start,
- size_t handler_table_offset)
- : number_of_entries_(0),
+HandlerTable::HandlerTable(Address handler_table, int handler_table_size)
+ : number_of_entries_(handler_table_size / kReturnEntrySize /
+ sizeof(int32_t)),
#ifdef DEBUG
mode_(kReturnAddressBasedEncoding),
#endif
- raw_encoded_data_(instruction_start + handler_table_offset) {
- if (handler_table_offset > 0) {
- number_of_entries_ = Memory<int32_t>(raw_encoded_data_);
- raw_encoded_data_ += sizeof(int32_t);
- }
+ raw_encoded_data_(handler_table) {
+ static_assert(4 < kReturnEntrySize * sizeof(int32_t), "allowed padding");
+ DCHECK_GE(4, handler_table_size % (kReturnEntrySize * sizeof(int32_t)));
}
int HandlerTable::GetRangeStart(int index) const {
@@ -131,11 +124,10 @@ int HandlerTable::LengthForRange(int entries) {
}
// static
-int HandlerTable::EmitReturnTableStart(Assembler* masm, int entries) {
+int HandlerTable::EmitReturnTableStart(Assembler* masm) {
masm->DataAlign(sizeof(int32_t)); // Make sure entries are aligned.
masm->RecordComment(";;; Exception handler table.");
int table_start = masm->pc_offset();
- masm->dd(entries);
return table_start;
}
diff --git a/deps/v8/src/handler-table.h b/deps/v8/src/codegen/handler-table.h
index b4ea8b6ed7..eaa062873b 100644
--- a/deps/v8/src/handler-table.h
+++ b/deps/v8/src/codegen/handler-table.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HANDLER_TABLE_H_
-#define V8_HANDLER_TABLE_H_
+#ifndef V8_CODEGEN_HANDLER_TABLE_H_
+#define V8_CODEGEN_HANDLER_TABLE_H_
-#include "src/assert-scope.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -49,7 +49,7 @@ class V8_EXPORT_PRIVATE HandlerTable {
explicit HandlerTable(Code code);
explicit HandlerTable(ByteArray byte_array);
explicit HandlerTable(BytecodeArray bytecode_array);
- explicit HandlerTable(Address instruction_start, size_t handler_table_offset);
+ explicit HandlerTable(Address handler_table, int handler_table_size);
// Getters for handler table based on ranges.
int GetRangeStart(int index) const;
@@ -67,7 +67,7 @@ class V8_EXPORT_PRIVATE HandlerTable {
static int LengthForRange(int entries);
// Emitters for handler table based on return addresses.
- static int EmitReturnTableStart(Assembler* masm, int entries);
+ static int EmitReturnTableStart(Assembler* masm);
static void EmitReturnEntry(Assembler* masm, int offset, int handler);
// Lookup handler in a table based on ranges. The {pc_offset} is an offset to
@@ -106,7 +106,7 @@ class V8_EXPORT_PRIVATE HandlerTable {
EncodingMode mode_;
#endif
- // Direct pointer into the encoded data. This pointer points into object on
+ // Direct pointer into the encoded data. This pointer points into objects on
// the GC heap (either {ByteArray} or {Code}) and hence would become stale
// during a collection. Hence we disallow any allocation.
Address raw_encoded_data_;
@@ -132,4 +132,4 @@ class V8_EXPORT_PRIVATE HandlerTable {
} // namespace internal
} // namespace v8
-#endif // V8_HANDLER_TABLE_H_
+#endif // V8_CODEGEN_HANDLER_TABLE_H_
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
index 8e7fb59975..e274b41fa3 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32-inl.h
@@ -34,14 +34,14 @@
// A light-weight IA32 Assembler.
-#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
-#define V8_IA32_ASSEMBLER_IA32_INL_H_
+#ifndef V8_CODEGEN_IA32_ASSEMBLER_IA32_INL_H_
+#define V8_CODEGEN_IA32_ASSEMBLER_IA32_INL_H_
-#include "src/ia32/assembler-ia32.h"
+#include "src/codegen/ia32/assembler-ia32.h"
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -50,7 +50,6 @@ bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
-
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
DCHECK_EQ(kApplyMask, (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
@@ -68,7 +67,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
-
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@@ -79,31 +77,29 @@ Address RelocInfo::target_address_address() {
return pc_;
}
+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
-Address RelocInfo::constant_pool_entry_address() {
- UNREACHABLE();
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kSpecialTargetSize;
-}
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- WriteUnalignedValue(pc_, target->ptr());
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
+ WriteUnalignedValue(pc_, target.ptr());
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
@@ -131,7 +127,6 @@ Address RelocInfo::target_internal_reference() {
return ReadUnalignedValue<Address>(pc_);
}
-
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return pc_;
@@ -157,7 +152,7 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ if (IsFullEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_)) {
WriteUnalignedValue(pc_, kNullAddress);
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
@@ -175,14 +170,13 @@ void Assembler::emit(uint32_t x) {
pc_ += sizeof(uint32_t);
}
-
void Assembler::emit_q(uint64_t x) {
WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint64_t);
}
void Assembler::emit(Handle<HeapObject> handle) {
- emit(handle.address(), RelocInfo::EMBEDDED_OBJECT);
+ emit(handle.address(), RelocInfo::FULL_EMBEDDED_OBJECT);
}
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode) {
@@ -196,7 +190,6 @@ void Assembler::emit(Handle<Code> code, RelocInfo::Mode rmode) {
emit(code.address(), rmode);
}
-
void Assembler::emit(const Immediate& x) {
if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) {
Label* label = reinterpret_cast<Label*>(x.immediate());
@@ -212,7 +205,6 @@ void Assembler::emit(const Immediate& x) {
}
}
-
void Assembler::emit_code_relative_offset(Label* label) {
if (label->is_bound()) {
int32_t pos;
@@ -236,7 +228,6 @@ void Assembler::emit_w(const Immediate& x) {
pc_ += sizeof(uint16_t);
}
-
Address Assembler::target_address_at(Address pc, Address constant_pool) {
return pc + sizeof(int32_t) + ReadUnalignedValue<int32_t>(pc);
}
@@ -250,14 +241,10 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
}
}
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
-
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- !code.is_null() ? code->constant_pool() : kNullAddress,
+ !code.is_null() ? code.constant_pool() : kNullAddress,
target);
}
@@ -270,19 +257,16 @@ Displacement Assembler::disp_at(Label* L) {
return Displacement(long_at(L->pos()));
}
-
void Assembler::disp_at_put(Label* L, Displacement disp) {
long_at_put(L->pos(), disp.data());
}
-
void Assembler::emit_disp(Label* L, Displacement::Type type) {
Displacement disp(L, type);
L->link_to(pc_offset());
emit(static_cast<int>(disp.data()));
}
-
void Assembler::emit_near_disp(Label* L) {
byte disp = 0x00;
if (L->is_near_linked()) {
@@ -299,7 +283,6 @@ void Assembler::deserialization_set_target_internal_reference_at(
WriteUnalignedValue(pc, target);
}
-
void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
DCHECK_EQ(len_, 1);
DCHECK_EQ(scale & -4, 0);
@@ -309,7 +292,6 @@ void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
len_ = 2;
}
-
void Operand::set_disp8(int8_t disp) {
DCHECK(len_ == 1 || len_ == 2);
*reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
@@ -318,4 +300,4 @@ void Operand::set_disp8(int8_t disp) {
} // namespace internal
} // namespace v8
-#endif // V8_IA32_ASSEMBLER_IA32_INL_H_
+#endif // V8_CODEGEN_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 0667d787c2..99d38890e3 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -34,7 +34,7 @@
// significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "src/ia32/assembler-ia32.h"
+#include "src/codegen/ia32/assembler-ia32.h"
#include <cstring>
@@ -47,15 +47,15 @@
#include <sys/sysctl.h>
#endif
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/conversions-inl.h"
-#include "src/deoptimizer.h"
-#include "src/disassembler.h"
-#include "src/macro-assembler.h"
-#include "src/string-constants.h"
-#include "src/v8.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/init/v8.h"
+#include "src/numbers/conversions-inl.h"
namespace v8 {
namespace internal {
@@ -63,14 +63,14 @@ namespace internal {
Immediate Immediate::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Immediate(Smi::FromInt(smi));
- Immediate result(0, RelocInfo::EMBEDDED_OBJECT);
+ Immediate result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Immediate Immediate::EmbeddedStringConstant(const StringConstantBase* str) {
- Immediate result(0, RelocInfo::EMBEDDED_OBJECT);
+ Immediate result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(str);
return result;
@@ -97,7 +97,6 @@ V8_INLINE uint64_t _xgetbv(unsigned int xcr) {
#endif // !V8_LIBC_MSVCRT
-
bool OSHasAVXSupport() {
#if V8_OS_MACOSX
// Mac OS X up to 10.9 has a bug where AVX transitions were indeed being
@@ -121,8 +120,9 @@ bool OSHasAVXSupport() {
return (feature_mask & 0x6) == 0x6;
}
-} // namespace
+#undef _XCR_XFEATURE_ENABLED_MASK
+} // namespace
void CpuFeatures::ProbeImpl(bool cross_compile) {
base::CPU cpu;
@@ -154,8 +154,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
}
}
-
-void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintTarget() {}
void CpuFeatures::PrintFeatures() {
printf(
"SSE3=%d SSSE3=%d SSE4_1=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d LZCNT=%d "
@@ -167,7 +166,6 @@ void CpuFeatures::PrintFeatures() {
CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM));
}
-
// -----------------------------------------------------------------------------
// Implementation of Displacement
@@ -183,7 +181,6 @@ void Displacement::init(Label* L, Type type) {
data_ = NextField::encode(next) | TypeField::encode(type);
}
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -201,9 +198,7 @@ bool RelocInfo::IsCodedSpecially() {
return RelocInfo::ModeMask(rmode_) & kApplyMask;
}
-bool RelocInfo::IsInConstantPool() {
- return false;
-}
+bool RelocInfo::IsInConstantPool() { return false; }
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
@@ -232,11 +227,7 @@ Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
}
}
-
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp,
+Operand::Operand(Register base, Register index, ScaleFactor scale, int32_t disp,
RelocInfo::Mode rmode) {
DCHECK(index != esp); // illegal addressing mode
// [base + index*scale + disp/r]
@@ -257,10 +248,7 @@ Operand::Operand(Register base,
}
}
-
-Operand::Operand(Register index,
- ScaleFactor scale,
- int32_t disp,
+Operand::Operand(Register index, ScaleFactor scale, int32_t disp,
RelocInfo::Mode rmode) {
DCHECK(index != esp); // illegal addressing mode
// [index*scale + disp/r]
@@ -269,12 +257,10 @@ Operand::Operand(Register index,
set_dispr(disp, rmode);
}
-
bool Operand::is_reg_only() const {
return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only.
}
-
Register Operand::reg() const {
DCHECK(is_reg_only());
return Register::from_code(buf_[0] & 0x07);
@@ -305,8 +291,7 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// Implementation of Assembler.
// Emit a single byte. Must always be inlined.
-#define EMIT(x) \
- *pc_++ = (x)
+#define EMIT(x) *pc_++ = (x)
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
@@ -379,7 +364,6 @@ void Assembler::Align(int m) {
Nop((m - (addr & mask)) & mask);
}
-
bool Assembler::IsNop(Address addr) {
byte* a = reinterpret_cast<byte*>(addr);
while (*a == 0x66) a++;
@@ -388,7 +372,6 @@ bool Assembler::IsNop(Address addr) {
return false;
}
-
void Assembler::Nop(int bytes) {
EnsureSpace ensure_space(this);
// Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
@@ -457,43 +440,36 @@ void Assembler::Nop(int bytes) {
}
}
-
void Assembler::CodeTargetAlign() {
Align(16); // Preferred alignment of jump targets on ia32.
}
-
void Assembler::cpuid() {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0xA2);
}
-
void Assembler::pushad() {
EnsureSpace ensure_space(this);
EMIT(0x60);
}
-
void Assembler::popad() {
EnsureSpace ensure_space(this);
EMIT(0x61);
}
-
void Assembler::pushfd() {
EnsureSpace ensure_space(this);
EMIT(0x9C);
}
-
void Assembler::popfd() {
EnsureSpace ensure_space(this);
EMIT(0x9D);
}
-
void Assembler::push(const Immediate& x) {
EnsureSpace ensure_space(this);
if (x.is_int8()) {
@@ -505,14 +481,12 @@ void Assembler::push(const Immediate& x) {
}
}
-
void Assembler::push_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0x68);
emit(imm32);
}
-
void Assembler::push(Register src) {
EnsureSpace ensure_space(this);
EMIT(0x50 | src.code());
@@ -524,7 +498,6 @@ void Assembler::push(Operand src) {
emit_operand(esi, src);
}
-
void Assembler::pop(Register dst) {
DCHECK_NOT_NULL(reloc_info_writer.last_pc());
EnsureSpace ensure_space(this);
@@ -537,7 +510,6 @@ void Assembler::pop(Operand dst) {
emit_operand(eax, dst);
}
-
void Assembler::enter(const Immediate& size) {
EnsureSpace ensure_space(this);
EMIT(0xC8);
@@ -545,7 +517,6 @@ void Assembler::enter(const Immediate& size) {
EMIT(0);
}
-
void Assembler::leave() {
EnsureSpace ensure_space(this);
EMIT(0xC9);
@@ -595,14 +566,12 @@ void Assembler::mov_w(Operand dst, const Immediate& src) {
EMIT(static_cast<int8_t>(src.immediate() >> 8));
}
-
void Assembler::mov(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
emit(imm32);
}
-
void Assembler::mov(Register dst, const Immediate& x) {
EnsureSpace ensure_space(this);
EMIT(0xB8 | dst.code());
@@ -621,7 +590,6 @@ void Assembler::mov(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::mov(Register dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x89);
@@ -701,33 +669,28 @@ void Assembler::cmov(Condition cc, Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::cld() {
EnsureSpace ensure_space(this);
EMIT(0xFC);
}
-
void Assembler::rep_movs() {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0xA5);
}
-
void Assembler::rep_stos() {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0xAB);
}
-
void Assembler::stos() {
EnsureSpace ensure_space(this);
EMIT(0xAB);
}
-
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (src == eax || dst == eax) { // Single-byte encoding.
@@ -835,12 +798,10 @@ void Assembler::add(Operand dst, const Immediate& x) {
emit_arith(0, dst, x);
}
-
void Assembler::and_(Register dst, int32_t imm32) {
and_(dst, Immediate(imm32));
}
-
void Assembler::and_(Register dst, const Immediate& x) {
EnsureSpace ensure_space(this);
emit_arith(4, Operand(dst), x);
@@ -946,18 +907,17 @@ void Assembler::cmp(Operand op, Handle<HeapObject> handle) {
void Assembler::cmpb_al(Operand op) {
EnsureSpace ensure_space(this);
- EMIT(0x38); // CMP r/m8, r8
+ EMIT(0x38); // CMP r/m8, r8
emit_operand(eax, op); // eax has same code as register al.
}
void Assembler::cmpw_ax(Operand op) {
EnsureSpace ensure_space(this);
EMIT(0x66);
- EMIT(0x39); // CMP r/m16, r16
+ EMIT(0x39); // CMP r/m16, r16
emit_operand(eax, op); // eax has same code as register ax.
}
-
void Assembler::dec_b(Register dst) {
CHECK(dst.is_byte_register());
EnsureSpace ensure_space(this);
@@ -971,7 +931,6 @@ void Assembler::dec_b(Operand dst) {
emit_operand(ecx, dst);
}
-
void Assembler::dec(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0x48 | dst.code());
@@ -983,7 +942,6 @@ void Assembler::dec(Operand dst) {
emit_operand(ecx, dst);
}
-
void Assembler::cdq() {
EnsureSpace ensure_space(this);
EMIT(0x99);
@@ -1001,7 +959,6 @@ void Assembler::div(Operand src) {
emit_operand(esi, src);
}
-
void Assembler::imul(Register reg) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
@@ -1015,7 +972,6 @@ void Assembler::imul(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::imul(Register dst, Register src, int32_t imm32) {
imul(dst, Operand(src), imm32);
}
@@ -1033,7 +989,6 @@ void Assembler::imul(Register dst, Operand src, int32_t imm32) {
}
}
-
void Assembler::inc(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0x40 | dst.code());
@@ -1051,14 +1006,12 @@ void Assembler::lea(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
EMIT(0xE0 | src.code());
}
-
void Assembler::neg(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
@@ -1071,7 +1024,6 @@ void Assembler::neg(Operand dst) {
emit_operand(ebx, dst);
}
-
void Assembler::not_(Register dst) {
EnsureSpace ensure_space(this);
EMIT(0xF7);
@@ -1084,7 +1036,6 @@ void Assembler::not_(Operand dst) {
emit_operand(edx, dst);
}
-
void Assembler::or_(Register dst, int32_t imm32) {
EnsureSpace ensure_space(this);
emit_arith(1, Operand(dst), Immediate(imm32));
@@ -1107,7 +1058,6 @@ void Assembler::or_(Operand dst, Register src) {
emit_operand(src, dst);
}
-
void Assembler::rcl(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
@@ -1121,7 +1071,6 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
}
}
-
void Assembler::rcr(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
DCHECK(is_uint5(imm8)); // illegal shift count
@@ -1438,25 +1387,21 @@ void Assembler::bsf(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::hlt() {
EnsureSpace ensure_space(this);
EMIT(0xF4);
}
-
void Assembler::int3() {
EnsureSpace ensure_space(this);
EMIT(0xCC);
}
-
void Assembler::nop() {
EnsureSpace ensure_space(this);
EMIT(0x90);
}
-
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
DCHECK(is_uint16(imm16));
@@ -1469,14 +1414,12 @@ void Assembler::ret(int imm16) {
}
}
-
void Assembler::ud2() {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x0B);
}
-
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -1508,7 +1451,6 @@ void Assembler::print(const Label* L) {
}
}
-
void Assembler::bind_to(Label* L, int pos) {
EnsureSpace ensure_space(this);
DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
@@ -1564,7 +1506,6 @@ void Assembler::bind_to(Label* L, int pos) {
L->bind_to(pos);
}
-
void Assembler::bind(Label* L) {
EnsureSpace ensure_space(this);
DCHECK(!L->is_bound()); // label can only be bound once
@@ -1696,7 +1637,6 @@ void Assembler::jmp(Operand adr) {
emit_operand(esp, adr);
}
-
void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@@ -1704,13 +1644,12 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
emit(code, rmode);
}
-
void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
DCHECK(0 <= cc && static_cast<int>(cc) < 16);
if (L->is_bound()) {
const int short_size = 2;
- const int long_size = 6;
+ const int long_size = 6;
int offs = L->pos() - pc_offset();
DCHECK_LE(offs, 0);
if (is_int8(offs - short_size)) {
@@ -1749,7 +1688,6 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
}
}
-
void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
DCHECK((0 <= cc) && (static_cast<int>(cc) < 16));
@@ -1763,7 +1701,6 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
}
}
-
void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
// 0000 1111 1000 tttn #32-bit disp
@@ -1772,7 +1709,6 @@ void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) {
emit(code, rmode);
}
-
// FPU instructions.
void Assembler::fld(int i) {
@@ -1780,34 +1716,29 @@ void Assembler::fld(int i) {
emit_farith(0xD9, 0xC0, i);
}
-
void Assembler::fstp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDD, 0xD8, i);
}
-
void Assembler::fld1() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xE8);
}
-
void Assembler::fldpi() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xEB);
}
-
void Assembler::fldz() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xEE);
}
-
void Assembler::fldln2() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
@@ -1894,88 +1825,75 @@ void Assembler::fistp_d(Operand adr) {
emit_operand(edi, adr);
}
-
void Assembler::fabs() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xE1);
}
-
void Assembler::fchs() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xE0);
}
-
void Assembler::fcos() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xFF);
}
-
void Assembler::fsin() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xFE);
}
-
void Assembler::fptan() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF2);
}
-
void Assembler::fyl2x() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF1);
}
-
void Assembler::f2xm1() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF0);
}
-
void Assembler::fscale() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xFD);
}
-
void Assembler::fninit() {
EnsureSpace ensure_space(this);
EMIT(0xDB);
EMIT(0xE3);
}
-
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
}
-
void Assembler::fadd_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xC0, i);
}
-
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xE8, i);
}
-
void Assembler::fsub_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xE0, i);
@@ -1987,168 +1905,142 @@ void Assembler::fisub_s(Operand adr) {
emit_operand(esp, adr);
}
-
void Assembler::fmul_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xC8, i);
}
-
void Assembler::fmul(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC8, i);
}
-
void Assembler::fdiv(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xF8, i);
}
-
void Assembler::fdiv_i(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD8, 0xF0, i);
}
-
void Assembler::faddp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xC0, i);
}
-
void Assembler::fsubp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xE8, i);
}
-
void Assembler::fsubrp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xE0, i);
}
-
void Assembler::fmulp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xC8, i);
}
-
void Assembler::fdivp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xF8, i);
}
-
void Assembler::fprem() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF8);
}
-
void Assembler::fprem1() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF5);
}
-
void Assembler::fxch(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD9, 0xC8, i);
}
-
void Assembler::fincstp() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xF7);
}
-
void Assembler::ffree(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDD, 0xC0, i);
}
-
void Assembler::ftst() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xE4);
}
-
void Assembler::fucomp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDD, 0xE8, i);
}
-
void Assembler::fucompp() {
EnsureSpace ensure_space(this);
EMIT(0xDA);
EMIT(0xE9);
}
-
void Assembler::fucomi(int i) {
EnsureSpace ensure_space(this);
EMIT(0xDB);
EMIT(0xE8 + i);
}
-
void Assembler::fucomip() {
EnsureSpace ensure_space(this);
EMIT(0xDF);
EMIT(0xE9);
}
-
void Assembler::fcompp() {
EnsureSpace ensure_space(this);
EMIT(0xDE);
EMIT(0xD9);
}
-
void Assembler::fnstsw_ax() {
EnsureSpace ensure_space(this);
EMIT(0xDF);
EMIT(0xE0);
}
-
void Assembler::fwait() {
EnsureSpace ensure_space(this);
EMIT(0x9B);
}
-
void Assembler::frndint() {
EnsureSpace ensure_space(this);
EMIT(0xD9);
EMIT(0xFC);
}
-
void Assembler::fnclex() {
EnsureSpace ensure_space(this);
EMIT(0xDB);
EMIT(0xE2);
}
-
void Assembler::sahf() {
EnsureSpace ensure_space(this);
EMIT(0x9E);
}
-
void Assembler::setcc(Condition cc, Register reg) {
DCHECK(reg.is_byte_register());
EnsureSpace ensure_space(this);
@@ -2177,7 +2069,6 @@ void Assembler::cvttsd2si(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@@ -2406,7 +2297,6 @@ void Assembler::ucomisd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2419,7 +2309,6 @@ void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EMIT(static_cast<byte>(mode) | 0x8);
}
-
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2432,7 +2321,6 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
EMIT(static_cast<byte>(mode) | 0x8);
}
-
void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2441,7 +2329,6 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
void Assembler::movmskps(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2465,7 +2352,6 @@ void Assembler::minsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@@ -2595,7 +2481,6 @@ void Assembler::movd(Operand dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
DCHECK(IsEnabled(SSE4_1));
DCHECK(is_uint8(imm8));
@@ -2671,7 +2556,6 @@ void Assembler::psllq(XMMRegister reg, uint8_t shift) {
EMIT(shift);
}
-
void Assembler::psllq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2689,7 +2573,6 @@ void Assembler::psrlq(XMMRegister reg, uint8_t shift) {
EMIT(shift);
}
-
void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2886,7 +2769,6 @@ void Assembler::minss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
Operand src2) {
@@ -3137,22 +3019,18 @@ void Assembler::emit_sse_operand(XMMRegister reg, Operand adr) {
emit_operand(ireg, adr);
}
-
void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
EMIT(0xC0 | dst.code() << 3 | src.code());
}
-
void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
EMIT(0xC0 | dst.code() << 3 | src.code());
}
-
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
EMIT(0xC0 | (dst.code() << 3) | src.code());
}
-
void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
if (mm != k0F || w != kW0) {
@@ -3166,14 +3044,12 @@ void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
}
}
-
void Assembler::emit_vex_prefix(Register vreg, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
XMMRegister ivreg = XMMRegister::from_code(vreg.code());
emit_vex_prefix(ivreg, l, pp, mm, w);
}
-
void Assembler::GrowBuffer() {
DCHECK(buffer_overflow());
DCHECK_EQ(buffer_start_, buffer_->start());
@@ -3227,7 +3103,6 @@ void Assembler::GrowBuffer() {
DCHECK(!buffer_overflow());
}
-
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
DCHECK(is_uint8(op1) && is_uint8(op2)); // wrong opcode
DCHECK(is_uint8(imm8));
@@ -3237,7 +3112,6 @@ void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
EMIT(imm8);
}
-
void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
DCHECK((0 <= sel) && (sel <= 7));
Register ireg = Register::from_code(sel);
@@ -3269,7 +3143,7 @@ void Assembler::emit_operand(int code, Operand adr) {
DCHECK(!options().isolate_independent_code ||
adr.rmode_ != RelocInfo::CODE_TARGET);
DCHECK(!options().isolate_independent_code ||
- adr.rmode_ != RelocInfo::EMBEDDED_OBJECT);
+ adr.rmode_ != RelocInfo::FULL_EMBEDDED_OBJECT);
DCHECK(!options().isolate_independent_code ||
adr.rmode_ != RelocInfo::EXTERNAL_REFERENCE);
@@ -3295,7 +3169,6 @@ void Assembler::emit_operand(int code, Operand adr) {
}
}
-
void Assembler::emit_label(Label* label) {
if (label->is_bound()) {
internal_reference_positions_.push_back(pc_offset());
@@ -3305,46 +3178,42 @@ void Assembler::emit_label(Label* label) {
}
}
-
void Assembler::emit_farith(int b1, int b2, int i) {
DCHECK(is_uint8(b1) && is_uint8(b2)); // wrong opcode
- DCHECK(0 <= i && i < 8); // illegal stack offset
+ DCHECK(0 <= i && i < 8); // illegal stack offset
EMIT(b1);
EMIT(b2 + i);
}
-
void Assembler::db(uint8_t data) {
EnsureSpace ensure_space(this);
EMIT(data);
}
-
void Assembler::dd(uint32_t data) {
EnsureSpace ensure_space(this);
emit(data);
}
-
void Assembler::dq(uint64_t data) {
EnsureSpace ensure_space(this);
emit_q(data);
}
-
void Assembler::dd(Label* label) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
emit_label(label);
}
-
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
reloc_info_writer.Write(&rinfo);
}
+#undef EMIT
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index e06b716610..d2dcb0f348 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -34,19 +34,19 @@
// A light-weight IA32 Assembler.
-#ifndef V8_IA32_ASSEMBLER_IA32_H_
-#define V8_IA32_ASSEMBLER_IA32_H_
+#ifndef V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
+#define V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
#include <deque>
-#include "src/assembler.h"
-#include "src/ia32/constants-ia32.h"
-#include "src/ia32/register-ia32.h"
-#include "src/ia32/sse-instr.h"
-#include "src/isolate.h"
-#include "src/label.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/ia32/constants-ia32.h"
+#include "src/codegen/ia32/register-ia32.h"
+#include "src/codegen/ia32/sse-instr.h"
+#include "src/codegen/label.h"
+#include "src/execution/isolate.h"
#include "src/objects/smi.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -55,35 +55,34 @@ class SafepointTableBuilder;
enum Condition {
// any value < 0 is considered no_condition
- no_condition = -1,
-
- overflow = 0,
- no_overflow = 1,
- below = 2,
- above_equal = 3,
- equal = 4,
- not_equal = 5,
- below_equal = 6,
- above = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
+ no_condition = -1,
+
+ overflow = 0,
+ no_overflow = 1,
+ below = 2,
+ above_equal = 3,
+ equal = 4,
+ not_equal = 5,
+ below_equal = 6,
+ above = 7,
+ negative = 8,
+ positive = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
greater_equal = 13,
- less_equal = 14,
- greater = 15,
+ less_equal = 14,
+ greater = 15,
// aliases
- carry = below,
- not_carry = above_equal,
- zero = equal,
- not_zero = not_equal,
- sign = negative,
- not_sign = positive
+ carry = below,
+ not_carry = above_equal,
+ zero = equal,
+ not_zero = not_equal,
+ sign = negative,
+ not_sign = positive
};
-
// Returns the equivalent of !cc.
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
@@ -92,7 +91,6 @@ inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
-
enum RoundingMode {
kRoundToNearest = 0x0,
kRoundDown = 0x1,
@@ -113,20 +111,18 @@ class Immediate {
inline explicit Immediate(const ExternalReference& ext)
: Immediate(ext.address(), RelocInfo::EXTERNAL_REFERENCE) {}
inline explicit Immediate(Handle<HeapObject> handle)
- : Immediate(handle.address(), RelocInfo::EMBEDDED_OBJECT) {}
+ : Immediate(handle.address(), RelocInfo::FULL_EMBEDDED_OBJECT) {}
inline explicit Immediate(Smi value)
: Immediate(static_cast<intptr_t>(value.ptr())) {}
static Immediate EmbeddedNumber(double number); // Smi or HeapNumber.
static Immediate EmbeddedStringConstant(const StringConstantBase* str);
- static Immediate CodeRelativeOffset(Label* label) {
- return Immediate(label);
- }
+ static Immediate CodeRelativeOffset(Label* label) { return Immediate(label); }
bool is_heap_object_request() const {
DCHECK_IMPLIES(is_heap_object_request_,
- rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
@@ -142,7 +138,8 @@ class Immediate {
}
bool is_embedded_object() const {
- return !is_heap_object_request() && rmode() == RelocInfo::EMBEDDED_OBJECT;
+ return !is_heap_object_request() &&
+ rmode() == RelocInfo::FULL_EMBEDDED_OBJECT;
}
Handle<HeapObject> embedded_object() const {
@@ -194,7 +191,6 @@ class Immediate {
friend class MacroAssembler;
};
-
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -340,14 +336,14 @@ class Displacement {
void print() {
PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
- NextField::decode(data_));
+ NextField::decode(data_));
}
private:
int data_;
- class TypeField: public BitField<Type, 0, 2> {};
- class NextField: public BitField<int, 2, 32-2> {};
+ class TypeField : public BitField<Type, 0, 2> {};
+ class NextField : public BitField<int, 2, 32 - 2> {};
void init(Label* L, Type type);
};
@@ -401,10 +397,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
// This sets the branch destination (which is in the instruction on x86).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -421,10 +413,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kSpecialTargetSize = kSystemPointerSize;
- // Distance between the address of the code target in the call instruction
- // and the return address
- static constexpr int kCallTargetAddressOffset = kSystemPointerSize;
-
// One byte opcode for test al, 0xXX.
static constexpr byte kTestAlByte = 0xA8;
// One byte opcode for nop.
@@ -614,7 +602,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void div(Operand src);
// Signed multiply instructions.
- void imul(Register src); // edx:eax = eax * src.
+ void imul(Register src); // edx:eax = eax * src.
void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, Operand src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
@@ -626,7 +614,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void lea(Register dst, Operand src);
// Unsigned multiply instruction.
- void mul(Register src); // edx:eax = eax * reg.
+ void mul(Register src); // edx:eax = eax * reg.
void neg(Register dst);
void neg(Operand dst);
@@ -751,14 +739,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(Operand adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
- // unconditionoal jump relative to the current address. Low-level rountine,
+ // Unconditional jump relative to the current address. Low-level routine,
// use with caution!
void jmp_rel(int offset);
// Conditional jumps
- void j(Condition cc,
- Label* L,
- Label::Distance distance = Label::kFar);
+ void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
void j(Condition cc, Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
@@ -1678,10 +1664,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
private:
- uint32_t long_at(int pos) {
+ uint32_t long_at(int pos) {
return ReadUnalignedValue<uint32_t>(addr_at(pos));
}
- void long_at_put(int pos, uint32_t x) {
+ void long_at_put(int pos, uint32_t x) {
WriteUnalignedValue(addr_at(pos), x);
}
@@ -1776,7 +1762,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
std::map<Label*, std::vector<int>> label_farjmp_maps_;
};
-
// Helper class that ensures that there is enough space for generating
// instructions and relocation information. The constructor makes
// sure that there is enough space and (in debug mode) the destructor
@@ -1807,4 +1792,4 @@ class EnsureSpace {
} // namespace internal
} // namespace v8
-#endif // V8_IA32_ASSEMBLER_IA32_H_
+#endif // V8_CODEGEN_IA32_ASSEMBLER_IA32_H_
diff --git a/deps/v8/src/ia32/constants-ia32.h b/deps/v8/src/codegen/ia32/constants-ia32.h
index 38ad1280f1..af3bd09330 100644
--- a/deps/v8/src/ia32/constants-ia32.h
+++ b/deps/v8/src/codegen/ia32/constants-ia32.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_CONSTANTS_IA32_H_
-#define V8_IA32_CONSTANTS_IA32_H_
+#ifndef V8_CODEGEN_IA32_CONSTANTS_IA32_H_
+#define V8_CODEGEN_IA32_CONSTANTS_IA32_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -20,4 +20,4 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
} // namespace internal
} // namespace v8
-#endif // V8_IA32_CONSTANTS_IA32_H_
+#endif // V8_CODEGEN_IA32_CONSTANTS_IA32_H_
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/codegen/ia32/cpu-ia32.cc
index 73b71e8dde..5e6d8a6207 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/codegen/ia32/cpu-ia32.cc
@@ -10,7 +10,7 @@
#if V8_TARGET_ARCH_IA32
-#include "src/cpu-features.h"
+#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
index d69366c099..428912c7bd 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/codegen/ia32/interface-descriptors-ia32.cc
@@ -4,9 +4,9 @@
#if V8_TARGET_ARCH_IA32
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index b599d9ff3c..6a0be9386e 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -7,25 +7,25 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/bootstrapper.h"
-#include "src/callable.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/ia32/assembler-ia32-inl.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/external-reference-table.h"
-#include "src/frame-constants.h"
-#include "src/frames-inl.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
-#include "src/ia32/assembler-ia32-inl.h"
-#include "src/macro-assembler.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
-#include "src/ia32/macro-assembler-ia32.h"
+#include "src/codegen/ia32/macro-assembler-ia32.h"
#endif
namespace v8 {
@@ -276,7 +276,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
if (fp_mode == kSaveFPRegs) {
// Save all XMM registers except XMM0.
int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
- sub(esp, Immediate(delta));
+ AllocateStackSpace(delta);
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
@@ -312,17 +312,6 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
-void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, Label* lost_precision,
- Label* is_nan, Label::Distance dst) {
- DCHECK(input_reg != scratch);
- cvttsd2si(result_reg, Operand(input_reg));
- Cvtsi2sd(scratch, Operand(result_reg));
- ucomisd(scratch, input_reg);
- j(not_equal, lost_precision, dst);
- j(parity_even, is_nan, dst);
-}
-
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
SaveFPRegsMode save_fp,
@@ -504,9 +493,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Label::kNear);
CheckPageFlag(object,
value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- zero,
- &done,
+ MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
Label::kNear);
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
@@ -660,14 +647,12 @@ void TurboAssembler::SarPair_cl(Register high, Register low) {
bind(&done);
}
-void MacroAssembler::CmpObjectType(Register heap_object,
- InstanceType type,
+void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
Register map) {
mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
-
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
@@ -703,7 +688,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
-
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -761,7 +745,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
-
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
test(object, Immediate(kSmiTagMask));
@@ -798,27 +781,35 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
}
#ifdef V8_OS_WIN
-void TurboAssembler::AllocateStackFrame(Register bytes_scratch) {
+void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
// In windows, we cannot increment the stack size by more than one page
// (minimum page size is 4KB) without accessing at least one byte on the
// page. Check this:
// https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
- constexpr int kPageSize = 4 * 1024;
Label check_offset;
Label touch_next_page;
jmp(&check_offset);
bind(&touch_next_page);
- sub(esp, Immediate(kPageSize));
+ sub(esp, Immediate(kStackPageSize));
// Just to touch the page, before we increment further.
mov(Operand(esp, 0), Immediate(0));
- sub(bytes_scratch, Immediate(kPageSize));
+ sub(bytes_scratch, Immediate(kStackPageSize));
bind(&check_offset);
- cmp(bytes_scratch, kPageSize);
+ cmp(bytes_scratch, kStackPageSize);
j(greater, &touch_next_page);
sub(esp, bytes_scratch);
}
+
+void TurboAssembler::AllocateStackSpace(int bytes) {
+ while (bytes > kStackPageSize) {
+ sub(esp, Immediate(kStackPageSize));
+ mov(Operand(esp, 0), Immediate(0));
+ bytes -= kStackPageSize;
+ }
+ sub(esp, Immediate(bytes));
+}
#endif
void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
@@ -855,20 +846,19 @@ void MacroAssembler::EnterExitFramePrologue(StackFrame::Type frame_type,
mov(ExternalReferenceAsOperand(c_function_address, scratch), edx);
}
-
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
int space =
XMMRegister::kNumRegisters * kDoubleSize + argc * kSystemPointerSize;
- sub(esp, Immediate(space));
+ AllocateStackSpace(space);
const int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
- sub(esp, Immediate(argc * kSystemPointerSize));
+ AllocateStackSpace(argc * kSystemPointerSize);
}
// Get the required frame alignment for the OS.
@@ -900,7 +890,6 @@ void MacroAssembler::EnterApiExitFrame(int argc, Register scratch) {
EnterExitFrameEpilogue(argc, false);
}
-
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Optionally restore all XMM registers.
if (save_doubles) {
@@ -978,8 +967,7 @@ void MacroAssembler::PopStackHandler(Register scratch) {
add(esp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments,
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
@@ -1631,7 +1619,7 @@ void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
// We don't have an xmm scratch register, so move the data via the stack. This
// path is rarely required, so it's acceptable to be slow.
DCHECK_LT(imm8, 2);
- sub(esp, Immediate(kDoubleSize));
+ AllocateStackSpace(kDoubleSize);
movsd(Operand(esp, 0), src);
mov(dst, Operand(esp, imm8 * kUInt32Size));
add(esp, Immediate(kDoubleSize));
@@ -1652,7 +1640,7 @@ void TurboAssembler::Pinsrd(XMMRegister dst, Operand src, uint8_t imm8) {
// We don't have an xmm scratch register, so move the data via the stack. This
// path is rarely required, so it's acceptable to be slow.
DCHECK_LT(imm8, 2);
- sub(esp, Immediate(kDoubleSize));
+ AllocateStackSpace(kDoubleSize);
// Write original content of {dst} to the stack.
movsd(Operand(esp, 0), dst);
// Overwrite the portion specified in {imm8}.
@@ -1805,19 +1793,18 @@ void TurboAssembler::Abort(AbortReason reason) {
int3();
}
-
void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
- sub(esp, Immediate((num_arguments + 1) * kSystemPointerSize));
+ AllocateStackSpace((num_arguments + 1) * kSystemPointerSize);
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kSystemPointerSize), scratch);
} else {
- sub(esp, Immediate(num_arguments * kSystemPointerSize));
+ AllocateStackSpace(num_arguments * kSystemPointerSize);
}
}
@@ -1839,16 +1826,12 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
// Save the frame pointer and PC so that the stack layout remains iterable,
// even without an ExitFrame which normally exists between JS and C frames.
if (isolate() != nullptr) {
- // Get the current PC via call, pop. This gets the return address pushed to
- // the stack by call.
- Label get_pc;
- call(&get_pc);
- bind(&get_pc);
// Find two caller-saved scratch registers.
Register scratch1 = eax;
Register scratch2 = ecx;
if (function == eax) scratch1 = edx;
if (function == ecx) scratch2 = edx;
+ PushPC();
pop(scratch1);
mov(ExternalReferenceAsOperand(
ExternalReference::fast_c_call_caller_pc_address(isolate()),
@@ -1876,6 +1859,14 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
}
}
+void TurboAssembler::PushPC() {
+ // Push the current PC onto the stack as "return address" via calling
+ // the next instruction.
+ Label get_pc;
+ call(&get_pc);
+ bind(&get_pc);
+}
+
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
DCHECK_IMPLIES(options().isolate_independent_code,
Builtins::IsIsolateIndependentBuiltin(*code_object));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 7a7591c6f0..345ae815af 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -6,20 +6,20 @@
#error This header must be included via macro-assembler.h
#endif
-#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
-#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
+#ifndef V8_CODEGEN_IA32_MACRO_ASSEMBLER_IA32_H_
+#define V8_CODEGEN_IA32_MACRO_ASSEMBLER_IA32_H_
-#include "src/assembler.h"
-#include "src/bailout-reason.h"
-#include "src/globals.h"
-#include "src/ia32/assembler-ia32.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/codegen/ia32/assembler-ia32.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
-typedef Operand MemOperand;
+using MemOperand = Operand;
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
@@ -40,17 +40,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void LeaveFrame(StackFrame::Type type);
-// Allocate a stack frame of given size (i.e. decrement {esp} by the value
-// stored in the given register).
+// Allocate stack space of given size (i.e. decrement {esp} by the value
+// stored in the given register, or by a constant). If you need to perform a
+// stack check, do it before calling this function because this function may
+// write into the newly allocated space. It may also overwrite the given
+// register's value, in the version that takes a register.
#ifdef V8_OS_WIN
- // On win32, take special care if the number of bytes is greater than 4096:
- // Ensure that each page within the new stack frame is touched once in
- // decreasing order. See
- // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
- // Use {bytes_scratch} as scratch register for this procedure.
- void AllocateStackFrame(Register bytes_scratch);
+ void AllocateStackSpace(Register bytes_scratch);
+ void AllocateStackSpace(int bytes);
#else
- void AllocateStackFrame(Register bytes) { sub(esp, bytes); }
+ void AllocateStackSpace(Register bytes) { sub(esp, bytes); }
+ void AllocateStackSpace(int bytes) { sub(esp, Immediate(bytes)); }
#endif
// Print a message to stdout and abort execution.
@@ -195,6 +195,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
+ void PushPC();
+
// Operand pointing to an external reference.
// May emit code to set up the scratch register. The operand is
// only guaranteed to be correct as long as the scratch register
@@ -441,7 +443,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
mov(dst, Immediate(x));
}
}
- void Set(Operand dst, int32_t x) { mov(dst, Immediate(x)); }
void PushRoot(RootIndex index);
@@ -515,14 +516,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
- // Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { pushad(); }
- void PopSafepointRegisters() { popad(); }
-
// ---------------------------------------------------------------------------
// JavaScript invokes
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
@@ -547,10 +543,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
- void DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, Label* lost_precision, Label* is_nan,
- Label::Distance dst = Label::kFar);
-
// Smi tagging support.
void SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
@@ -558,14 +550,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
add(reg, reg);
}
- // Modifies the register even if it does not contain a Smi!
- void UntagSmi(Register reg, Label* is_smi) {
- STATIC_ASSERT(kSmiTagSize == 1);
- sar(reg, kSmiTagSize);
- STATIC_ASSERT(kSmiTag == 0);
- j(not_carry, is_smi);
- }
-
// Jump if register contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label,
Label::Distance distance = Label::kFar) {
@@ -579,7 +563,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
j(not_zero, smi_label, distance);
}
- template<typename Field>
+ template <typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift;
@@ -731,4 +715,4 @@ inline Operand NativeContextOperand() {
} // namespace internal
} // namespace v8
-#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_
+#endif // V8_CODEGEN_IA32_MACRO_ASSEMBLER_IA32_H_
diff --git a/deps/v8/src/ia32/register-ia32.h b/deps/v8/src/codegen/ia32/register-ia32.h
index b1e213b4d7..aa24cf6511 100644
--- a/deps/v8/src/ia32/register-ia32.h
+++ b/deps/v8/src/codegen/ia32/register-ia32.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_REGISTER_IA32_H_
-#define V8_IA32_REGISTER_IA32_H_
+#ifndef V8_CODEGEN_IA32_REGISTER_IA32_H_
+#define V8_CODEGEN_IA32_REGISTER_IA32_H_
-#include "src/register.h"
-#include "src/reglist.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
namespace v8 {
namespace internal {
@@ -92,11 +92,11 @@ class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
};
-typedef XMMRegister FloatRegister;
+using FloatRegister = XMMRegister;
-typedef XMMRegister DoubleRegister;
+using DoubleRegister = XMMRegister;
-typedef XMMRegister Simd128Register;
+using Simd128Register = XMMRegister;
#define DEFINE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
@@ -163,4 +163,4 @@ constexpr Register kSpeculationPoisonRegister = no_reg;
} // namespace internal
} // namespace v8
-#endif // V8_IA32_REGISTER_IA32_H_
+#endif // V8_CODEGEN_IA32_REGISTER_IA32_H_
diff --git a/deps/v8/src/ia32/sse-instr.h b/deps/v8/src/codegen/ia32/sse-instr.h
index f9d4c59e07..87c333d188 100644
--- a/deps/v8/src/ia32/sse-instr.h
+++ b/deps/v8/src/codegen/ia32/sse-instr.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_SSE_INSTR_H_
-#define V8_IA32_SSE_INSTR_H_
+#ifndef V8_CODEGEN_IA32_SSE_INSTR_H_
+#define V8_CODEGEN_IA32_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
V(packsswb, 66, 0F, 63) \
@@ -79,4 +79,4 @@
V(pmovzxwd, 66, 0F, 38, 33) \
V(ptest, 66, 0F, 38, 17)
-#endif // V8_IA32_SSE_INSTR_H_
+#endif // V8_CODEGEN_IA32_SSE_INSTR_H_
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/codegen/interface-descriptors.cc
index 01c179ee7e..f8f874359b 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/codegen/interface-descriptors.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -194,7 +194,6 @@ const Register FastNewObjectDescriptor::NewTargetRegister() {
return kJavaScriptCallNewTargetRegister;
}
-
void LoadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index abebe2b8bd..d166b477d8 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INTERFACE_DESCRIPTORS_H_
-#define V8_INTERFACE_DESCRIPTORS_H_
+#ifndef V8_CODEGEN_INTERFACE_DESCRIPTORS_H_
+#define V8_CODEGEN_INTERFACE_DESCRIPTORS_H_
#include <memory>
-#include "src/globals.h"
-#include "src/isolate.h"
-#include "src/machine-type.h"
-#include "src/register-arch.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/register-arch.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
@@ -99,7 +99,7 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
// untagged stack arguments.
kNoStackScan = 1u << 1,
};
- typedef base::Flags<Flag> Flags;
+ using Flags = base::Flags<Flag>;
CallInterfaceDescriptorData() = default;
@@ -225,7 +225,7 @@ class V8_EXPORT_PRIVATE CallDescriptors : public AllStatic {
class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
public:
- typedef CallInterfaceDescriptorData::Flags Flags;
+ using Flags = CallInterfaceDescriptorData::Flags;
CallInterfaceDescriptor() : data_(nullptr) {}
virtual ~CallInterfaceDescriptor() = default;
@@ -485,10 +485,10 @@ class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
// Dummy descriptor used to mark builtins that don't yet have their proper
// descriptor associated.
-typedef VoidDescriptor DummyDescriptor;
+using DummyDescriptor = VoidDescriptor;
// Dummy descriptor that marks builtins with C calling convention.
-typedef VoidDescriptor CCallDescriptor;
+using CCallDescriptor = VoidDescriptor;
class AllocateDescriptor : public CallInterfaceDescriptor {
public:
@@ -951,7 +951,6 @@ class CompareDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CompareDescriptor, CallInterfaceDescriptor)
};
-
class BinaryOpDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kLeft, kRight)
@@ -1283,4 +1282,4 @@ INTERFACE_DESCRIPTOR_LIST(DEF_KEY)
} // namespace internal
} // namespace v8
-#endif // V8_INTERFACE_DESCRIPTORS_H_
+#endif // V8_CODEGEN_INTERFACE_DESCRIPTORS_H_
diff --git a/deps/v8/src/label.h b/deps/v8/src/codegen/label.h
index a70f17292e..430958d190 100644
--- a/deps/v8/src/label.h
+++ b/deps/v8/src/codegen/label.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LABEL_H_
-#define V8_LABEL_H_
+#ifndef V8_CODEGEN_LABEL_H_
+#define V8_CODEGEN_LABEL_H_
#include "src/base/macros.h"
@@ -109,4 +109,4 @@ class Label {
} // namespace internal
} // namespace v8
-#endif // V8_LABEL_H_
+#endif // V8_CODEGEN_LABEL_H_
diff --git a/deps/v8/src/machine-type.cc b/deps/v8/src/codegen/machine-type.cc
index f4e7117838..5d2ecb48cd 100644
--- a/deps/v8/src/machine-type.cc
+++ b/deps/v8/src/codegen/machine-type.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/machine-type.h"
-#include "src/ostreams.h"
+#include "src/codegen/machine-type.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -84,7 +84,6 @@ std::ostream& operator<<(std::ostream& os, MachineSemantic type) {
UNREACHABLE();
}
-
std::ostream& operator<<(std::ostream& os, MachineType type) {
if (type == MachineType::None()) {
return os;
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/codegen/machine-type.h
index 73e9780ac2..15e3df65c5 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/codegen/machine-type.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MACHINE_TYPE_H_
-#define V8_MACHINE_TYPE_H_
+#ifndef V8_CODEGEN_MACHINE_TYPE_H_
+#define V8_CODEGEN_MACHINE_TYPE_H_
#include <iosfwd>
#include "src/base/bits.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -235,6 +235,77 @@ class MachineType {
return MachineType(MachineRepresentation::kBit, MachineSemantic::kNone);
}
+ // These methods return compressed representations when the compressed
+ // pointer flag is enabled. Otherwise, they returned the corresponding tagged
+ // one.
+ constexpr static MachineRepresentation RepCompressedTagged() {
+#ifdef V8_COMPRESS_POINTERS
+ return MachineRepresentation::kCompressed;
+#else
+ return MachineRepresentation::kTagged;
+#endif
+ }
+ constexpr static MachineRepresentation RepCompressedTaggedSigned() {
+#ifdef V8_COMPRESS_POINTERS
+ return MachineRepresentation::kCompressedSigned;
+#else
+ return MachineRepresentation::kTaggedSigned;
+#endif
+ }
+ constexpr static MachineRepresentation RepCompressedTaggedPointer() {
+#ifdef V8_COMPRESS_POINTERS
+ return MachineRepresentation::kCompressedPointer;
+#else
+ return MachineRepresentation::kTaggedPointer;
+#endif
+ }
+
+ constexpr static MachineType TypeCompressedTagged() {
+#ifdef V8_COMPRESS_POINTERS
+ return MachineType::AnyCompressed();
+#else
+ return MachineType::AnyTagged();
+#endif
+ }
+ constexpr static MachineType TypeCompressedTaggedSigned() {
+#ifdef V8_COMPRESS_POINTERS
+ return MachineType::CompressedSigned();
+#else
+ return MachineType::TaggedSigned();
+#endif
+ }
+ constexpr static MachineType TypeCompressedTaggedPointer() {
+#ifdef V8_COMPRESS_POINTERS
+ return MachineType::CompressedPointer();
+#else
+ return MachineType::TaggedPointer();
+#endif
+ }
+
+ constexpr bool IsCompressedTagged() const {
+#ifdef V8_COMPRESS_POINTERS
+ return IsCompressed();
+#else
+ return IsTagged();
+#endif
+ }
+
+ constexpr bool IsCompressedTaggedSigned() const {
+#ifdef V8_COMPRESS_POINTERS
+ return IsCompressedSigned();
+#else
+ return IsTaggedSigned();
+#endif
+ }
+
+ constexpr bool IsCompressedTaggedPointer() const {
+#ifdef V8_COMPRESS_POINTERS
+ return IsCompressedPointer();
+#else
+ return IsTaggedPointer();
+#endif
+ }
+
static MachineType TypeForRepresentation(const MachineRepresentation& rep,
bool isSigned = true) {
switch (rep) {
@@ -333,6 +404,14 @@ inline bool IsAnyCompressed(MachineRepresentation rep) {
rep == MachineRepresentation::kCompressedSigned;
}
+inline bool IsAnyCompressedTagged(MachineRepresentation rep) {
+#ifdef V8_COMPRESS_POINTERS
+ return IsAnyCompressed(rep);
+#else
+ return IsAnyTagged(rep);
+#endif
+}
+
// Gets the log2 of the element size in bytes of the machine type.
V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) {
switch (rep) {
@@ -352,6 +431,7 @@ V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ return kSystemPointerSizeLog2;
case MachineRepresentation::kCompressedSigned:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
@@ -375,4 +455,4 @@ V8_EXPORT_PRIVATE inline constexpr int RepresentationBit(
} // namespace internal
} // namespace v8
-#endif // V8_MACHINE_TYPE_H_
+#endif // V8_CODEGEN_MACHINE_TYPE_H_
diff --git a/deps/v8/src/codegen/macro-assembler-inl.h b/deps/v8/src/codegen/macro-assembler-inl.h
new file mode 100644
index 0000000000..3e237efc6f
--- /dev/null
+++ b/deps/v8/src/codegen/macro-assembler-inl.h
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_MACRO_ASSEMBLER_INL_H_
+#define V8_CODEGEN_MACRO_ASSEMBLER_INL_H_
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+
+#if V8_TARGET_ARCH_ARM64
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#endif
+
+#endif // V8_CODEGEN_MACRO_ASSEMBLER_INL_H_
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index 440fb4a27e..29da269e8c 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -2,19 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MACRO_ASSEMBLER_H_
-#define V8_MACRO_ASSEMBLER_H_
+#ifndef V8_CODEGEN_MACRO_ASSEMBLER_H_
+#define V8_CODEGEN_MACRO_ASSEMBLER_H_
-#include "src/frames.h"
+#include "src/codegen/turbo-assembler.h"
+#include "src/execution/frames.h"
#include "src/heap/heap.h"
-#include "src/turbo-assembler.h"
// Helper types to make boolean flag easier to read at call-site.
-enum InvokeFlag {
- CALL_FUNCTION,
- JUMP_FUNCTION
-};
-
+enum InvokeFlag { CALL_FUNCTION, JUMP_FUNCTION };
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
@@ -35,27 +31,27 @@ enum AllocationFlags {
// This is the only place allowed to include the platform-specific headers.
#define INCLUDED_FROM_MACRO_ASSEMBLER_H
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/macro-assembler-ia32.h"
+#include "src/codegen/ia32/macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "src/x64/macro-assembler-x64.h"
+#include "src/codegen/x64/macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/constants-arm64.h"
-#include "src/arm64/macro-assembler-arm64.h"
+#include "src/codegen/arm64/constants-arm64.h"
+#include "src/codegen/arm64/macro-assembler-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/constants-arm.h"
-#include "src/arm/macro-assembler-arm.h"
+#include "src/codegen/arm/constants-arm.h"
+#include "src/codegen/arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/constants-ppc.h"
-#include "src/ppc/macro-assembler-ppc.h"
+#include "src/codegen/ppc/constants-ppc.h"
+#include "src/codegen/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/constants-mips.h"
-#include "src/mips/macro-assembler-mips.h"
+#include "src/codegen/mips/constants-mips.h"
+#include "src/codegen/mips/macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/constants-mips64.h"
-#include "src/mips64/macro-assembler-mips64.h"
+#include "src/codegen/mips64/constants-mips64.h"
+#include "src/codegen/mips64/macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_S390
-#include "src/s390/constants-s390.h"
-#include "src/s390/macro-assembler-s390.h"
+#include "src/codegen/s390/constants-s390.h"
+#include "src/codegen/s390/macro-assembler-s390.h"
#else
#error Unsupported target architecture.
#endif
@@ -168,11 +164,10 @@ class ConstantPoolUnavailableScope {
DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
};
-
-class AllowExternalCallThatCantCauseGC: public FrameScope {
+class AllowExternalCallThatCantCauseGC : public FrameScope {
public:
explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
- : FrameScope(masm, StackFrame::NONE) { }
+ : FrameScope(masm, StackFrame::NONE) {}
};
// Prevent the use of the RootArray during the lifetime of this
@@ -222,4 +217,4 @@ class ParameterCount {
} // namespace internal
} // namespace v8
-#endif // V8_MACRO_ASSEMBLER_H_
+#endif // V8_CODEGEN_MACRO_ASSEMBLER_H_
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/codegen/mips/assembler-mips-inl.h
index f134fb3e85..d8181ad8f5 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/codegen/mips/assembler-mips-inl.h
@@ -33,15 +33,14 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
+#ifndef V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_
+#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_
-#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
-#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#include "src/codegen/mips/assembler-mips.h"
-#include "src/mips/assembler-mips.h"
-
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -53,9 +52,7 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
-bool Operand::is_reg() const {
- return rm_.is_valid();
-}
+bool Operand::is_reg() const { return rm_.is_valid(); }
int32_t Operand::immediate() const {
DCHECK(!is_reg());
@@ -75,7 +72,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
-
Address RelocInfo::target_address() {
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
@@ -108,23 +104,15 @@ Address RelocInfo::target_address_address() {
}
}
-Address RelocInfo::constant_pool_entry_address() {
- UNREACHABLE();
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kSpecialTargetSize;
-}
+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- !code.is_null() ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code.constant_pool() : kNullAddress,
+ target);
}
int Assembler::deserialization_special_target_size(
@@ -171,13 +159,17 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObject(rmode_));
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- if (IsCodeTarget(rmode_) || IsEmbeddedObject(rmode_)) {
+ if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
@@ -188,8 +180,8 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObject(rmode_));
- Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
@@ -227,7 +219,6 @@ Address RelocInfo::target_internal_reference() {
}
}
-
Address RelocInfo::target_internal_reference_address() {
DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
@@ -252,7 +243,7 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
@@ -283,14 +274,12 @@ Handle<Code> Assembler::relative_code_target_object_handle_at(
// -----------------------------------------------------------------------------
// Assembler.
-
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
-
void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
@@ -305,7 +294,6 @@ void Assembler::CheckForEmitInForbiddenSlot() {
}
}
-
void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
if (IsPrevInstrCompactBranch()) {
if (Instruction::IsForbiddenAfterBranchInstr(x)) {
@@ -355,4 +343,4 @@ EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#endif // V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 0d957d80ce..d6337aefb6 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -32,16 +32,16 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "src/mips/assembler-mips.h"
+#include "src/codegen/mips/assembler-mips.h"
#if V8_TARGET_ARCH_MIPS
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/deoptimizer.h"
-#include "src/mips/assembler-mips-inl.h"
+#include "src/codegen/mips/assembler-mips-inl.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/objects/heap-number-inl.h"
-#include "src/string-constants.h"
namespace v8 {
namespace internal {
@@ -66,15 +66,14 @@ static unsigned CpuFeaturesImpliedByCompiler() {
return answer;
}
-
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
- // If the compiler is allowed to use fpu then we can use fpu too in our
- // code generation.
+ // If the compiler is allowed to use fpu then we can use fpu too in our
+ // code generation.
#ifndef __mips__
// For the simulator build, use FPU.
supported_ |= 1u << FPU;
@@ -117,71 +116,56 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#endif
}
-
-void CpuFeatures::PrintTarget() { }
-void CpuFeatures::PrintFeatures() { }
-
+void CpuFeatures::PrintTarget() {}
+void CpuFeatures::PrintFeatures() {}
int ToNumber(Register reg) {
DCHECK(reg.is_valid());
const int kNumbers[] = {
- 0, // zero_reg
- 1, // at
- 2, // v0
- 3, // v1
- 4, // a0
- 5, // a1
- 6, // a2
- 7, // a3
- 8, // t0
- 9, // t1
- 10, // t2
- 11, // t3
- 12, // t4
- 13, // t5
- 14, // t6
- 15, // t7
- 16, // s0
- 17, // s1
- 18, // s2
- 19, // s3
- 20, // s4
- 21, // s5
- 22, // s6
- 23, // s7
- 24, // t8
- 25, // t9
- 26, // k0
- 27, // k1
- 28, // gp
- 29, // sp
- 30, // fp
- 31, // ra
+ 0, // zero_reg
+ 1, // at
+ 2, // v0
+ 3, // v1
+ 4, // a0
+ 5, // a1
+ 6, // a2
+ 7, // a3
+ 8, // t0
+ 9, // t1
+ 10, // t2
+ 11, // t3
+ 12, // t4
+ 13, // t5
+ 14, // t6
+ 15, // t7
+ 16, // s0
+ 17, // s1
+ 18, // s2
+ 19, // s3
+ 20, // s4
+ 21, // s5
+ 22, // s6
+ 23, // s7
+ 24, // t8
+ 25, // t9
+ 26, // k0
+ 27, // k1
+ 28, // gp
+ 29, // sp
+ 30, // fp
+ 31, // ra
};
return kNumbers[reg.code()];
}
-
Register ToRegister(int num) {
DCHECK(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = {
- zero_reg,
- at,
- v0, v1,
- a0, a1, a2, a3,
- t0, t1, t2, t3, t4, t5, t6, t7,
- s0, s1, s2, s3, s4, s5, s6, s7,
- t8, t9,
- k0, k1,
- gp,
- sp,
- fp,
- ra
- };
+ zero_reg, at, v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, t7,
+ s0, s1, s2, s3, s4, s5, s6, s7, t8, t9, k0, k1, gp, sp, fp, ra};
return kRegisters[num];
}
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
@@ -197,10 +181,7 @@ bool RelocInfo::IsCodedSpecially() {
return true;
}
-
-bool RelocInfo::IsInConstantPool() {
- return false;
-}
+bool RelocInfo::IsInConstantPool() { return false; }
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
@@ -213,21 +194,21 @@ uint32_t RelocInfo::wasm_call_tag() const {
// See assembler-mips-inl.h for inlined constructors.
Operand::Operand(Handle<HeapObject> handle)
- : rm_(no_reg), rmode_(RelocInfo::EMBEDDED_OBJECT) {
+ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
value_.immediate = static_cast<intptr_t>(handle.address());
}
Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(str);
return result;
@@ -237,9 +218,9 @@ MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
}
-
MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
- OffsetAddend offset_addend) : Operand(rm) {
+ OffsetAddend offset_addend)
+ : Operand(rm) {
offset_ = unit * multiplier + offset_addend;
}
@@ -297,7 +278,7 @@ const Instr kSwRegFpNegOffsetPattern =
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xFFE00000;
-const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
+const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
Assembler::Assembler(const AssemblerOptions& options,
@@ -312,7 +293,8 @@ Assembler::Assembler(const AssemblerOptions& options,
// We leave space (16 * kTrampolineSlotsSize)
// for BlockTrampolinePoolScope buffer.
next_buffer_check_ = FLAG_force_long_branches
- ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ ? kMaxInt
+ : kMaxBranchOffset - kTrampolineSlotsSize * 16;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
@@ -362,126 +344,88 @@ void Assembler::Align(int m) {
}
}
-
void Assembler::CodeTargetAlign() {
// No advantage to aligning branch/call targets to more than
// single instruction, that I am aware of.
Align(4);
}
-
Register Assembler::GetRtReg(Instr instr) {
return Register::from_code((instr & kRtFieldMask) >> kRtShift);
}
-
Register Assembler::GetRsReg(Instr instr) {
return Register::from_code((instr & kRsFieldMask) >> kRsShift);
}
-
Register Assembler::GetRdReg(Instr instr) {
return Register::from_code((instr & kRdFieldMask) >> kRdShift);
}
-
uint32_t Assembler::GetRt(Instr instr) {
return (instr & kRtFieldMask) >> kRtShift;
}
-
-uint32_t Assembler::GetRtField(Instr instr) {
- return instr & kRtFieldMask;
-}
-
+uint32_t Assembler::GetRtField(Instr instr) { return instr & kRtFieldMask; }
uint32_t Assembler::GetRs(Instr instr) {
return (instr & kRsFieldMask) >> kRsShift;
}
-
-uint32_t Assembler::GetRsField(Instr instr) {
- return instr & kRsFieldMask;
-}
-
+uint32_t Assembler::GetRsField(Instr instr) { return instr & kRsFieldMask; }
uint32_t Assembler::GetRd(Instr instr) {
- return (instr & kRdFieldMask) >> kRdShift;
-}
-
-
-uint32_t Assembler::GetRdField(Instr instr) {
- return instr & kRdFieldMask;
+ return (instr & kRdFieldMask) >> kRdShift;
}
+uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
uint32_t Assembler::GetSa(Instr instr) {
return (instr & kSaFieldMask) >> kSaShift;
}
+uint32_t Assembler::GetSaField(Instr instr) { return instr & kSaFieldMask; }
-uint32_t Assembler::GetSaField(Instr instr) {
- return instr & kSaFieldMask;
-}
-
-
-uint32_t Assembler::GetOpcodeField(Instr instr) {
- return instr & kOpcodeMask;
-}
-
+uint32_t Assembler::GetOpcodeField(Instr instr) { return instr & kOpcodeMask; }
uint32_t Assembler::GetFunction(Instr instr) {
return (instr & kFunctionFieldMask) >> kFunctionShift;
}
-
uint32_t Assembler::GetFunctionField(Instr instr) {
return instr & kFunctionFieldMask;
}
+uint32_t Assembler::GetImmediate16(Instr instr) { return instr & kImm16Mask; }
-uint32_t Assembler::GetImmediate16(Instr instr) {
- return instr & kImm16Mask;
-}
-
-
-uint32_t Assembler::GetLabelConst(Instr instr) {
- return instr & ~kImm16Mask;
-}
-
+uint32_t Assembler::GetLabelConst(Instr instr) { return instr & ~kImm16Mask; }
bool Assembler::IsPop(Instr instr) {
return (instr & ~kRtMask) == kPopRegPattern;
}
-
bool Assembler::IsPush(Instr instr) {
return (instr & ~kRtMask) == kPushRegPattern;
}
-
bool Assembler::IsSwRegFpOffset(Instr instr) {
return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
}
-
bool Assembler::IsLwRegFpOffset(Instr instr) {
return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
}
-
bool Assembler::IsSwRegFpNegOffset(Instr instr) {
return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
kSwRegFpNegOffsetPattern);
}
-
bool Assembler::IsLwRegFpNegOffset(Instr instr) {
return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
kLwRegFpNegOffsetPattern);
}
-
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -526,7 +470,7 @@ bool Assembler::IsMsaBranch(Instr instr) {
}
bool Assembler::IsBranch(Instr instr) {
- uint32_t opcode = GetOpcodeField(instr);
+ uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
// Checks if the instruction is a branch.
@@ -549,7 +493,6 @@ bool Assembler::IsBranch(Instr instr) {
return isBranch;
}
-
bool Assembler::IsBc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
// Checks if the instruction is a BC or BALC.
@@ -570,35 +513,25 @@ bool Assembler::IsBzc(Instr instr) {
(opcode == POP76 && GetRsField(instr) != 0);
}
-
bool Assembler::IsEmittedConstant(Instr instr) {
uint32_t label_constant = GetLabelConst(instr);
return label_constant == 0; // Emitted label const in reg-exp engine.
}
+bool Assembler::IsBeq(Instr instr) { return GetOpcodeField(instr) == BEQ; }
-bool Assembler::IsBeq(Instr instr) {
- return GetOpcodeField(instr) == BEQ;
-}
-
-
-bool Assembler::IsBne(Instr instr) {
- return GetOpcodeField(instr) == BNE;
-}
-
+bool Assembler::IsBne(Instr instr) { return GetOpcodeField(instr) == BNE; }
bool Assembler::IsBeqzc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
return opcode == POP66 && GetRsField(instr) != 0;
}
-
bool Assembler::IsBnezc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
return opcode == POP76 && GetRsField(instr) != 0;
}
-
bool Assembler::IsBeqc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rs = GetRsField(instr);
@@ -606,7 +539,6 @@ bool Assembler::IsBeqc(Instr instr) {
return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
}
-
bool Assembler::IsBnec(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rs = GetRsField(instr);
@@ -621,14 +553,15 @@ bool Assembler::IsJicOrJialc(Instr instr) {
}
bool Assembler::IsJump(Instr instr) {
- uint32_t opcode = GetOpcodeField(instr);
+ uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
uint32_t rd_field = GetRdField(instr);
uint32_t function_field = GetFunctionField(instr);
// Checks if the instruction is a jump.
return opcode == J || opcode == JAL ||
- (opcode == SPECIAL && rt_field == 0 &&
- ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
+ (opcode == SPECIAL && rt_field == 0 &&
+ ((function_field == JALR) ||
+ (rd_field == 0 && (function_field == JR))));
}
bool Assembler::IsJ(Instr instr) {
@@ -637,35 +570,28 @@ bool Assembler::IsJ(Instr instr) {
return opcode == J;
}
-
-bool Assembler::IsJal(Instr instr) {
- return GetOpcodeField(instr) == JAL;
-}
-
+bool Assembler::IsJal(Instr instr) { return GetOpcodeField(instr) == JAL; }
bool Assembler::IsJr(Instr instr) {
- if (!IsMipsArchVariant(kMips32r6)) {
+ if (!IsMipsArchVariant(kMips32r6)) {
return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
} else {
- return GetOpcodeField(instr) == SPECIAL &&
- GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
+ return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) == 0 &&
+ GetFunctionField(instr) == JALR;
}
}
-
bool Assembler::IsJalr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL &&
- GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
+ return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) != 0 &&
+ GetFunctionField(instr) == JALR;
}
-
bool Assembler::IsLui(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
// Checks if the instruction is a load upper immediate.
return opcode == LUI;
}
-
bool Assembler::IsOri(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
// Checks if the instruction is a load upper immediate.
@@ -717,68 +643,57 @@ bool Assembler::IsNop(Instr instr, unsigned int type) {
Register nop_rt_reg = (type == 0) ? zero_reg : at;
bool ret = (opcode == SPECIAL && function == SLL &&
rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
- sa == type);
+ rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) && sa == type);
return ret;
}
-
int32_t Assembler::GetBranchOffset(Instr instr) {
DCHECK(IsBranch(instr));
return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
}
-
bool Assembler::IsLw(Instr instr) {
return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
}
-
int16_t Assembler::GetLwOffset(Instr instr) {
DCHECK(IsLw(instr));
return ((instr & kImm16Mask));
}
-
Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
DCHECK(IsLw(instr));
// We actually create a new lw instruction based on the original one.
- Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
- | (offset & kImm16Mask);
+ Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) |
+ (offset & kImm16Mask);
return temp_instr;
}
-
bool Assembler::IsSw(Instr instr) {
return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
}
-
Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
DCHECK(IsSw(instr));
return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
}
-
bool Assembler::IsAddImmediate(Instr instr) {
return ((instr & kOpcodeMask) == ADDIU);
}
-
Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
DCHECK(IsAddImmediate(instr));
return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
}
-
bool Assembler::IsAndImmediate(Instr instr) {
return GetOpcodeField(instr) == ANDI;
}
-
static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
if (IsMipsArchVariant(kMips32r6)) {
if (Assembler::IsBc(instr)) {
@@ -790,7 +705,6 @@ static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
return Assembler::OffsetSize::kOffset16;
}
-
static inline int32_t AddBranchOffset(int pos, Instr instr) {
int bits = OffsetSizeInBits(instr);
const int32_t mask = (1 << bits) - 1;
@@ -894,11 +808,11 @@ int Assembler::target_at(int pos, bool is_internal) {
if ((instr & ~kImm16Mask) == 0) {
// Emitted label constant, not part of a branch.
if (instr == 0) {
- return kEndOfChain;
- } else {
- int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- return (imm18 + pos);
- }
+ return kEndOfChain;
+ } else {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
}
// Check we have a branch or jump instruction.
DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra));
@@ -951,7 +865,6 @@ int Assembler::target_at(int pos, bool is_internal) {
return 0;
}
-
static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
Instr instr) {
int32_t bits = OffsetSizeInBits(instr);
@@ -966,7 +879,6 @@ static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
return instr | (imm & mask);
}
-
void Assembler::target_at_put(int32_t pos, int32_t target_pos,
bool is_internal) {
Instr instr = instr_at(pos);
@@ -1099,7 +1011,6 @@ void Assembler::print(const Label* L) {
}
}
-
void Assembler::bind_to(Label* L, int pos) {
DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
int32_t trampoline_pos = kInvalidSlotPos;
@@ -1142,17 +1053,14 @@ void Assembler::bind_to(Label* L, int pos) {
// Keep track of the last bound label so we don't eliminate any instructions
// before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
+ if (pos > last_bound_pos_) last_bound_pos_ = pos;
}
-
void Assembler::bind(Label* L) {
DCHECK(!L->is_bound()); // Label can only be bound once.
bind_to(L, pc_offset());
}
-
void Assembler::next(Label* L, bool is_internal) {
DCHECK(L->is_linked());
int link = target_at(L->pos(), is_internal);
@@ -1164,25 +1072,21 @@ void Assembler::next(Label* L, bool is_internal) {
}
}
-
bool Assembler::is_near(Label* L) {
DCHECK(L->is_bound());
return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
}
-
bool Assembler::is_near(Label* L, OffsetSize bits) {
if (L == nullptr || !L->is_bound()) return true;
return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize;
}
-
bool Assembler::is_near_branch(Label* L) {
DCHECK(L->is_bound());
return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L);
}
-
int Assembler::BranchOffset(Instr instr) {
// At pre-R6 and for other R6 branches the offset is 16 bits.
int bits = OffsetSize::kOffset16;
@@ -1209,7 +1113,6 @@ int Assembler::BranchOffset(Instr instr) {
return (1 << (bits + 2 - 1)) - 1;
}
-
// We have to use a temporary register for things that can be relocated even
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
@@ -1218,91 +1121,67 @@ bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
return !RelocInfo::IsNone(rmode);
}
-void Assembler::GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- Register rd,
- uint16_t sa,
+void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
+ Register rd, uint16_t sa,
SecondaryField func) {
DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (sa << kSaShift) | func;
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa << kSaShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- uint16_t msb,
- uint16_t lsb,
+void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
+ uint16_t msb, uint16_t lsb,
SecondaryField func) {
DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (msb << kRdShift) | (lsb << kSaShift) | func;
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (msb << kRdShift) | (lsb << kSaShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
+void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt,
+ FPURegister ft, FPURegister fs, FPURegister fd,
SecondaryField func) {
DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
- Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
- | (fd.code() << kFdShift) | func;
+ Instr instr = opcode | fmt | (ft.code() << kFtShift) |
+ (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- FPURegister fr,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
+void Assembler::GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft,
+ FPURegister fs, FPURegister fd,
SecondaryField func) {
DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
- Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
- | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) |
+ (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPURegister fs,
- FPURegister fd,
+void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
+ FPURegister fs, FPURegister fd,
SecondaryField func) {
DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
- Instr instr = opcode | fmt | (rt.code() << kRtShift)
- | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ Instr instr = opcode | fmt | (rt.code() << kRtShift) |
+ (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPUControlRegister fs,
- SecondaryField func) {
+void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
+ FPUControlRegister fs, SecondaryField func) {
DCHECK(fs.is_valid() && rt.is_valid());
Instr instr =
opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
}
-
// Instructions with immediate value.
// Registers are in the order of the instruction encoding, from left to right.
void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
int32_t j,
CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (j & kImm16Mask);
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (j & kImm16Mask);
emit(instr, is_compact_branch);
}
@@ -1325,17 +1204,15 @@ void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
emit(instr, is_compact_branch);
}
-
void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
int32_t j,
CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
- | (j & kImm16Mask);
+ Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
+ (j & kImm16Mask);
emit(instr, is_compact_branch);
}
-
void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && (is_int21(offset21)));
@@ -1343,7 +1220,6 @@ void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
emit(instr, is_compact_branch);
}
-
void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
uint32_t offset21) {
DCHECK(rs.is_valid() && (is_uint21(offset21)));
@@ -1351,7 +1227,6 @@ void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
emit(instr);
}
-
void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
CompactBranchType is_compact_branch) {
DCHECK(is_int26(offset26));
@@ -1359,9 +1234,7 @@ void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
emit(instr, is_compact_branch);
}
-
-void Assembler::GenInstrJump(Opcode opcode,
- uint32_t address) {
+void Assembler::GenInstrJump(Opcode opcode, uint32_t address) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint26(address));
Instr instr = opcode | address;
@@ -1497,7 +1370,7 @@ int32_t Assembler::get_trampoline_entry(int32_t pos) {
if (!internal_trampoline_exception_) {
if (trampoline_.start() > pos) {
- trampoline_entry = trampoline_.take_slot();
+ trampoline_entry = trampoline_.take_slot();
}
if (kInvalidSlotPos == trampoline_entry) {
@@ -1507,7 +1380,6 @@ int32_t Assembler::get_trampoline_entry(int32_t pos) {
return trampoline_entry;
}
-
uint32_t Assembler::jump_address(Label* L) {
int32_t target_pos;
@@ -1579,7 +1451,6 @@ int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
return offset;
}
-
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
@@ -1605,52 +1476,40 @@ void Assembler::label_at_put(Label* L, int at_offset) {
}
}
-
//------- Branch and jump instructions --------
-void Assembler::b(int16_t offset) {
- beq(zero_reg, zero_reg, offset);
-}
-
-
-void Assembler::bal(int16_t offset) {
- bgezal(zero_reg, offset);
-}
+void Assembler::b(int16_t offset) { beq(zero_reg, zero_reg, offset); }
+void Assembler::bal(int16_t offset) { bgezal(zero_reg, offset); }
void Assembler::bc(int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::balc(int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::beq(Register rs, Register rt, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BEQ, rs, rt, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bgez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZ, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bgezc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs != zero_reg);
@@ -1659,7 +1518,6 @@ void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgec(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs != zero_reg);
@@ -1668,7 +1526,6 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgezal(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
DCHECK(rs != ra);
@@ -1677,14 +1534,12 @@ void Assembler::bgezal(Register rs, int16_t offset) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bgtz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BGTZ, rs, zero_reg, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bgtzc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
@@ -1692,14 +1547,12 @@ void Assembler::bgtzc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::blez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::blezc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
@@ -1707,14 +1560,12 @@ void Assembler::blezc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bltzc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs != zero_reg);
@@ -1723,7 +1574,6 @@ void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs != zero_reg);
@@ -1732,14 +1582,12 @@ void Assembler::bltc(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bltz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bltzal(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg);
DCHECK(rs != ra);
@@ -1748,14 +1596,12 @@ void Assembler::bltzal(Register rs, int16_t offset) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bne(Register rs, Register rt, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BNE, rs, rt, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bovc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
if (rs.code() >= rt.code()) {
@@ -1765,7 +1611,6 @@ void Assembler::bovc(Register rs, Register rt, int16_t offset) {
}
}
-
void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
if (rs.code() >= rt.code()) {
@@ -1775,7 +1620,6 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
}
}
-
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
@@ -1784,7 +1628,6 @@ void Assembler::blezalc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
@@ -1792,7 +1635,6 @@ void Assembler::bgezalc(Register rt, int16_t offset) {
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK(!IsMipsArchVariant(kMips32r6));
DCHECK(rs != zero_reg);
@@ -1802,7 +1644,6 @@ void Assembler::bgezall(Register rs, int16_t offset) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
@@ -1810,7 +1651,6 @@ void Assembler::bltzalc(Register rt, int16_t offset) {
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
@@ -1819,7 +1659,6 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
@@ -1828,7 +1667,6 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rt != zero_reg);
@@ -1837,7 +1675,6 @@ void Assembler::bnezalc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::beqc(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
@@ -1848,14 +1685,12 @@ void Assembler::beqc(Register rs, Register rt, int16_t offset) {
}
}
-
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs != zero_reg);
GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bnec(Register rs, Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
@@ -1866,14 +1701,12 @@ void Assembler::bnec(Register rs, Register rt, int16_t offset) {
}
}
-
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs != zero_reg);
GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
@@ -1887,7 +1720,6 @@ void Assembler::j(int32_t target) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::jr(Register rs) {
if (!IsMipsArchVariant(kMips32r6)) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1898,7 +1730,6 @@ void Assembler::jr(Register rs) {
}
}
-
void Assembler::jal(int32_t target) {
#ifdef DEBUG
// Get pc of delay slot.
@@ -1912,7 +1743,6 @@ void Assembler::jal(int32_t target) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::jalr(Register rs, Register rd) {
DCHECK(rs.code() != rd.code());
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1920,19 +1750,16 @@ void Assembler::jalr(Register rs, Register rd) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::jic(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrImmediate(POP66, zero_reg, rt, offset);
}
-
void Assembler::jialc(Register rt, int16_t offset) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrImmediate(POP76, zero_reg, rt, offset);
}
-
// -------Data-processing-instructions---------
// Arithmetic.
@@ -1941,17 +1768,14 @@ void Assembler::addu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
}
-
void Assembler::addiu(Register rd, Register rs, int32_t j) {
GenInstrImmediate(ADDIU, rs, rd, j);
}
-
void Assembler::subu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
}
-
void Assembler::mul(Register rd, Register rs, Register rt) {
if (!IsMipsArchVariant(kMips32r6)) {
GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
@@ -1960,113 +1784,92 @@ void Assembler::mul(Register rd, Register rs, Register rt) {
}
}
-
void Assembler::mulu(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
}
-
void Assembler::muh(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
}
-
void Assembler::muhu(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
}
-
void Assembler::mod(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
}
-
void Assembler::modu(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
}
-
void Assembler::mult(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
}
-
void Assembler::multu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
}
-
void Assembler::div(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
}
-
void Assembler::div(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
}
-
void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
}
-
void Assembler::divu(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
}
-
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
}
-
void Assembler::andi(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
GenInstrImmediate(ANDI, rs, rt, j);
}
-
void Assembler::or_(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
}
-
void Assembler::ori(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
GenInstrImmediate(ORI, rs, rt, j);
}
-
void Assembler::xor_(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
}
-
void Assembler::xori(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
GenInstrImmediate(XORI, rs, rt, j);
}
-
void Assembler::nor(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
}
-
// Shifts.
-void Assembler::sll(Register rd,
- Register rt,
- uint16_t sa,
+void Assembler::sll(Register rd, Register rt, uint16_t sa,
bool coming_from_nop) {
// Don't allow nop instructions in the form sll zero_reg, zero_reg to be
// generated using the sll instruction. They must be generated using
@@ -2075,52 +1878,44 @@ void Assembler::sll(Register rd,
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
}
-
void Assembler::sllv(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
}
-
void Assembler::srl(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
}
-
void Assembler::srlv(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
}
-
void Assembler::sra(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
}
-
void Assembler::srav(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
}
-
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
}
-
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
- Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
}
-
void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
DCHECK_LE(sa, 3);
@@ -2130,7 +1925,6 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
emit(instr);
}
-
// ------------Memory-instructions-------------
void Assembler::AdjustBaseAndOffset(MemOperand& src,
@@ -2253,35 +2047,30 @@ void Assembler::lb(Register rd, const MemOperand& rs) {
GenInstrImmediate(LB, source.rm(), rd, source.offset());
}
-
void Assembler::lbu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
GenInstrImmediate(LBU, source.rm(), rd, source.offset());
}
-
void Assembler::lh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
GenInstrImmediate(LH, source.rm(), rd, source.offset());
}
-
void Assembler::lhu(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
GenInstrImmediate(LHU, source.rm(), rd, source.offset());
}
-
void Assembler::lw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
GenInstrImmediate(LW, source.rm(), rd, source.offset());
}
-
void Assembler::lwl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
@@ -2289,7 +2078,6 @@ void Assembler::lwl(Register rd, const MemOperand& rs) {
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
}
-
void Assembler::lwr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
@@ -2297,28 +2085,24 @@ void Assembler::lwr(Register rd, const MemOperand& rs) {
GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
-
void Assembler::sb(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
GenInstrImmediate(SB, source.rm(), rd, source.offset());
}
-
void Assembler::sh(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
GenInstrImmediate(SH, source.rm(), rd, source.offset());
}
-
void Assembler::sw(Register rd, const MemOperand& rs) {
MemOperand source = rs;
AdjustBaseAndOffset(source);
GenInstrImmediate(SW, source.rm(), rd, source.offset());
}
-
void Assembler::swl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
@@ -2326,7 +2110,6 @@ void Assembler::swl(Register rd, const MemOperand& rs) {
GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
}
-
void Assembler::swr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) ||
@@ -2374,7 +2157,6 @@ void Assembler::lui(Register rd, int32_t j) {
GenInstrImmediate(LUI, zero_reg, rd, j);
}
-
void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
@@ -2393,7 +2175,6 @@ void Assembler::addiupc(Register rs, int32_t imm19) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
void Assembler::lwpc(Register rs, int32_t offset19) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs.is_valid() && is_int19(offset19));
@@ -2401,7 +2182,6 @@ void Assembler::lwpc(Register rs, int32_t offset19) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
void Assembler::auipc(Register rs, int16_t imm16) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs.is_valid());
@@ -2409,7 +2189,6 @@ void Assembler::auipc(Register rs, int16_t imm16) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
void Assembler::aluipc(Register rs, int16_t imm16) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(rs.is_valid());
@@ -2417,7 +2196,6 @@ void Assembler::aluipc(Register rs, int16_t imm16) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -2426,17 +2204,13 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
// We need to invalidate breaks that could be stops as well because the
// simulator expects a char pointer after the stop instruction.
// See constants-mips.h for explanation.
- DCHECK((break_as_stop &&
- code <= kMaxStopCode &&
- code > kMaxWatchpointCode) ||
- (!break_as_stop &&
- (code > kMaxStopCode ||
- code <= kMaxWatchpointCode)));
+ DCHECK(
+ (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
Instr break_instr = SPECIAL | BREAK | (code << 6);
emit(break_instr);
}
-
void Assembler::stop(const char* msg, uint32_t code) {
DCHECK_GT(code, kMaxWatchpointCode);
DCHECK_LE(code, kMaxStopCode);
@@ -2447,23 +2221,20 @@ void Assembler::stop(const char* msg, uint32_t code) {
#endif
}
-
void Assembler::tge(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
- Instr instr = SPECIAL | TGE | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
+ Instr instr =
+ SPECIAL | TGE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
emit(instr);
}
-
void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
- Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
+ Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | rt.code() << kRtShift |
+ code << 6;
emit(instr);
}
-
void Assembler::tlt(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
Instr instr =
@@ -2471,16 +2242,13 @@ void Assembler::tlt(Register rs, Register rt, uint16_t code) {
emit(instr);
}
-
void Assembler::tltu(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
- Instr instr =
- SPECIAL | TLTU | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
+ Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | rt.code() << kRtShift |
+ code << 6;
emit(instr);
}
-
void Assembler::teq(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
Instr instr =
@@ -2488,7 +2256,6 @@ void Assembler::teq(Register rs, Register rt, uint16_t code) {
emit(instr);
}
-
void Assembler::tne(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
Instr instr =
@@ -2507,62 +2274,51 @@ void Assembler::mfhi(Register rd) {
GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
}
-
void Assembler::mflo(Register rd) {
GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
}
-
// Set on less than instructions.
void Assembler::slt(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
}
-
void Assembler::sltu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
}
-
void Assembler::slti(Register rt, Register rs, int32_t j) {
GenInstrImmediate(SLTI, rs, rt, j);
}
-
void Assembler::sltiu(Register rt, Register rs, int32_t j) {
GenInstrImmediate(SLTIU, rs, rt, j);
}
-
// Conditional move.
void Assembler::movz(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
}
-
void Assembler::movn(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
}
-
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
-
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
-
void Assembler::seleqz(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
}
-
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
if (!IsMipsArchVariant(kMips32r6)) {
@@ -2573,7 +2329,6 @@ void Assembler::clz(Register rd, Register rs) {
}
}
-
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
@@ -2581,7 +2336,6 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
-
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
@@ -2589,22 +2343,19 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
-
void Assembler::bitswap(Register rd, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
}
-
void Assembler::pref(int32_t hint, const MemOperand& rs) {
DCHECK(!IsMipsArchVariant(kLoongson));
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
- Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
- | (rs.offset_);
+ Instr instr =
+ PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_);
emit(instr);
}
-
void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(is_uint3(bp));
@@ -2637,56 +2388,46 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset());
}
-
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
MemOperand tmp = src;
AdjustBaseAndOffset(tmp);
GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset());
}
-
void Assembler::mtc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MTC1, rt, fs, f0);
}
-
void Assembler::mthc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MTHC1, rt, fs, f0);
}
-
void Assembler::mfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
-
void Assembler::mfhc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFHC1, rt, fs, f0);
}
-
void Assembler::ctc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CTC1, rt, fs);
}
-
void Assembler::cfc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CFC1, rt, fs);
}
-
void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
}
-
void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
}
-
void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
@@ -2695,17 +2436,14 @@ void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
}
-
void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
sel(S, fd, fs, ft);
}
-
void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
sel(D, fd, fs, ft);
}
-
void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
@@ -2713,13 +2451,11 @@ void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
}
-
void Assembler::selnez(Register rd, Register rs, Register rt) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
}
-
void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
@@ -2727,94 +2463,78 @@ void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
}
-
void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
seleqz(D, fd, fs, ft);
}
-
void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
seleqz(S, fd, fs, ft);
}
-
void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
selnez(D, fd, fs, ft);
}
-
void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
selnez(S, fd, fs, ft);
}
-
void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
}
-
void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
DCHECK(!IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
}
-
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
-
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
-
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
-
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK(!IsMipsArchVariant(kMips32r6));
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
-
// Arithmetic.
void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, ADD_S);
}
-
void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
}
-
void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, SUB_S);
}
-
void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
}
-
void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, MUL_S);
}
-
void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
}
@@ -2826,7 +2546,7 @@ void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
}
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
- FPURegister ft) {
+ FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
}
@@ -2867,223 +2587,184 @@ void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, DIV_S);
}
-
void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
}
-
void Assembler::abs_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, ABS_S);
}
-
void Assembler::abs_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
}
-
void Assembler::mov_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
}
-
void Assembler::mov_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
}
-
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, NEG_S);
}
-
void Assembler::neg_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
}
-
void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S);
}
-
void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
}
-
void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
}
-
void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
}
-
void Assembler::recip_d(FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
}
-
void Assembler::recip_s(FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
}
-
// Conversions.
void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
}
-
void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
}
-
void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
}
-
void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
}
-
void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
}
-
void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
}
-
void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
}
-
void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
}
-
void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
}
-
void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
}
-
void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
-
void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK((fmt == D) || (fmt == S));
GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
}
-
void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
-
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
-
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
-
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
-
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
-
void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
}
-
void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
}
-
void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
}
-
void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
}
-
void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
}
-
void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
}
-
void Assembler::class_s(FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
}
-
void Assembler::class_d(FPURegister fd, FPURegister fs) {
DCHECK(IsMipsArchVariant(kMips32r6));
GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
}
-
void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
@@ -3091,7 +2772,6 @@ void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
}
-
void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
@@ -3099,7 +2779,6 @@ void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
}
-
void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
@@ -3107,7 +2786,6 @@ void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
}
-
void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
@@ -3115,92 +2793,76 @@ void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
}
-
void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
min(S, fd, fs, ft);
}
-
void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
min(D, fd, fs, ft);
}
-
void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
max(S, fd, fs, ft);
}
-
void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
max(D, fd, fs, ft);
}
-
void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
mina(S, fd, fs, ft);
}
-
void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
mina(D, fd, fs, ft);
}
-
void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
maxa(S, fd, fs, ft);
}
-
void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
maxa(D, fd, fs, ft);
}
-
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
-
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
-
void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
}
-
void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
}
-
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) &&
IsFp64Mode());
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
-
void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
}
-
// Conditions for >= MIPSr6.
-void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
- FPURegister fd, FPURegister fs, FPURegister ft) {
+void Assembler::cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
+ FPURegister fs, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
- Instr instr = COP1 | fmt | ft.code() << kFtShift |
- fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
+ Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
+ fd.code() << kFdShift | (0 << 5) | cond;
emit(instr);
}
-
void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
FPURegister ft) {
cmp(cond, W, fd, fs, ft);
@@ -3211,7 +2873,6 @@ void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
cmp(cond, L, fd, fs, ft);
}
-
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3220,7 +2881,6 @@ void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bc1nez(int16_t offset, FPURegister ft) {
DCHECK(IsMipsArchVariant(kMips32r6));
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3229,40 +2889,34 @@ void Assembler::bc1nez(int16_t offset, FPURegister ft) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
// Conditions for < MIPSr6.
-void Assembler::c(FPUCondition cond, SecondaryField fmt,
- FPURegister fs, FPURegister ft, uint16_t cc) {
+void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs,
+ FPURegister ft, uint16_t cc) {
DCHECK(is_uint3(cc));
DCHECK(fmt == S || fmt == D);
DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
- Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
- | cc << 8 | 3 << 4 | cond;
+ Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift | cc << 8 |
+ 3 << 4 | cond;
emit(instr);
}
-
void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
uint16_t cc) {
c(cond, S, fs, ft, cc);
}
-
void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
uint16_t cc) {
c(cond, D, fs, ft, cc);
}
-
-void Assembler::fcmp(FPURegister src1, const double src2,
- FPUCondition cond) {
+void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) {
DCHECK_EQ(src2, 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
c(cond, D, src1, f14, 0);
}
-
void Assembler::bc1f(int16_t offset, uint16_t cc) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
@@ -3271,7 +2925,6 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bc1t(int16_t offset, uint16_t cc) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
@@ -3329,7 +2982,7 @@ MSA_BRANCH_LIST(MSA_BRANCH)
MSA_LD_ST_LIST(MSA_LD_ST)
#undef MSA_LD_ST
-#undef MSA_BRANCH_LIST
+#undef MSA_LD_ST_LIST
#define MSA_I10_LIST(V) \
V(ldi_b, I5_DF_b) \
@@ -3858,7 +3511,7 @@ void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
if (imm == kEndOfJumpChain) {
return;
}
- imm += pc_delta;
+ imm -= pc_delta;
DCHECK_EQ(imm & 3, 0);
PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, ori_offset);
return;
@@ -3911,25 +3564,21 @@ void Assembler::GrowBuffer() {
DCHECK(!overflow());
}
-
void Assembler::db(uint8_t data) {
CheckForEmitInForbiddenSlot();
EmitHelper(data);
}
-
void Assembler::dd(uint32_t data) {
CheckForEmitInForbiddenSlot();
EmitHelper(data);
}
-
void Assembler::dq(uint64_t data) {
CheckForEmitInForbiddenSlot();
EmitHelper(data);
}
-
void Assembler::dd(Label* label) {
uint32_t data;
CheckForEmitInForbiddenSlot();
@@ -3944,7 +3593,6 @@ void Assembler::dd(Label* label) {
EmitHelper(data);
}
-
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
@@ -3958,7 +3606,6 @@ void Assembler::BlockTrampolinePoolFor(int instructions) {
BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
}
-
void Assembler::CheckTrampolinePool() {
// Some small sequences of instructions must not be broken up by the
// insertion of a trampoline pool; such sequences are protected by setting
@@ -3981,7 +3628,8 @@ void Assembler::CheckTrampolinePool() {
DCHECK_GE(unbound_labels_count_, 0);
if (unbound_labels_count_ > 0) {
// First we emit jump (2 instructions), then we emit trampoline pool.
- { BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label after_pool;
if (IsMipsArchVariant(kMips32r6)) {
bc(&after_pool);
@@ -4013,13 +3661,12 @@ void Assembler::CheckTrampolinePool() {
} else {
// Number of branches to unbound label at this point is zero, so we can
// move next buffer check to maximum.
- next_buffer_check_ = pc_offset() +
- kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ next_buffer_check_ =
+ pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16;
}
return;
}
-
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
@@ -4050,7 +3697,6 @@ Address Assembler::target_address_at(Address pc) {
UNREACHABLE();
}
-
// On Mips, a target address is stored in a lui/ori instruction pair, each
// of which load 16 bits of the 32-bit address to a register.
// Patching the address must replace both instr, and flush the i-cache.
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/codegen/mips/assembler-mips.h
index 5cbf871630..640e11cf1a 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/codegen/mips/assembler-mips.h
@@ -32,19 +32,18 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-
-#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
-#define V8_MIPS_ASSEMBLER_MIPS_H_
+#ifndef V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_
+#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_
#include <stdio.h>
#include <set>
-#include "src/assembler.h"
-#include "src/external-reference.h"
-#include "src/label.h"
-#include "src/mips/constants-mips.h"
-#include "src/mips/register-mips.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/label.h"
+#include "src/codegen/mips/constants-mips.h"
+#include "src/codegen/mips/register-mips.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -98,7 +97,7 @@ class Operand {
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
- rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
@@ -121,25 +120,19 @@ class Operand {
// friend class MacroAssembler;
};
-
// On MIPS we have only one addressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
// Immediate value attached to offset.
- enum OffsetAddend {
- offset_minus_one = -1,
- offset_zero = 0
- };
+ enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 };
explicit MemOperand(Register rn, int32_t offset = 0);
explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
OffsetAddend offset_addend = offset_zero);
int32_t offset() const { return offset_; }
- bool OffsetIsInt16Encodable() const {
- return is_int16(offset_);
- }
+ bool OffsetIsInt16Encodable() const { return is_int16(offset_); }
private:
int32_t offset_;
@@ -159,7 +152,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler() { }
+ virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@@ -262,10 +255,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, uint32_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the lui/ori instructions etc.
@@ -318,14 +307,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// pair.
static constexpr int kInstructionsFor32BitConstant = 2;
- // Distance between the instruction referring to the address of the call
- // target and the return address.
-#ifdef _MIPS_ARCH_MIPS32R6
- static constexpr int kCallTargetAddressOffset = 2 * kInstrSize;
-#else
- static constexpr int kCallTargetAddressOffset = 4 * kInstrSize;
-#endif
-
// Max offset for instructions with 16-bit offset field
static constexpr int kMaxBranchOffset = (1 << (18 - 1)) - 1;
@@ -373,7 +354,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
sll(zero_reg, nop_rt_reg, type, true);
}
-
// --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
@@ -496,7 +476,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void jic(Register rt, int16_t offset);
void jialc(Register rt, int16_t offset);
-
// -------Data-processing-instructions---------
// Arithmetic.
@@ -571,12 +550,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void auipc(Register rs, int16_t imm16);
void aluipc(Register rs, int16_t imm16);
-
// ----------------Prefetch--------------------
void pref(int32_t hint, const MemOperand& rs);
-
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -737,8 +714,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvt_d_s(FPURegister fd, FPURegister fs);
// Conditions and branches for MIPSr6.
- void cmp(FPUCondition cond, SecondaryField fmt,
- FPURegister fd, FPURegister ft, FPURegister fs);
+ void cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
+ FPURegister ft, FPURegister fs);
void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
@@ -752,8 +729,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Conditions and branches for non MIPSr6.
- void c(FPUCondition cond, SecondaryField fmt,
- FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs,
+ uint16_t cc = 0);
void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
@@ -1370,9 +1347,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockTrampolinePool();
}
- ~BlockTrampolinePoolScope() {
- assem_->EndBlockTrampolinePool();
- }
+ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
private:
Assembler* assem_;
@@ -1389,9 +1364,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockGrowBuffer();
}
- ~BlockGrowBufferScope() {
- assem_->EndBlockGrowBuffer();
- }
+ ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); }
private:
Assembler* assem_;
@@ -1567,9 +1540,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
no_trampoline_pool_before_ = pc_offset;
}
- void StartBlockTrampolinePool() {
- trampoline_pool_blocked_nesting_++;
- }
+ void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
void EndBlockTrampolinePool() {
trampoline_pool_blocked_nesting_--;
@@ -1582,13 +1553,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return trampoline_pool_blocked_nesting_ > 0;
}
- bool has_exception() const {
- return internal_trampoline_exception_;
- }
+ bool has_exception() const { return internal_trampoline_exception_; }
- bool is_trampoline_emitted() const {
- return trampoline_emitted_;
- }
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
// Temporarily block automatic assembly buffer growth.
void StartBlockGrowBuffer() {
@@ -1601,9 +1568,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
block_buffer_growth_ = false;
}
- bool is_buffer_growth_blocked() const {
- return block_buffer_growth_;
- }
+ bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
void EmitForbiddenSlotInstruction() {
if (IsPrevInstrCompactBranch()) {
@@ -1696,12 +1661,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrRegister(Opcode opcode, Register rs, Register rt, Register rd,
uint16_t sa = 0, SecondaryField func = nullptrSF);
- void GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- uint16_t msb,
- uint16_t lsb,
- SecondaryField func);
+ void GenInstrRegister(Opcode opcode, Register rs, Register rt, uint16_t msb,
+ uint16_t lsb, SecondaryField func);
void GenInstrRegister(Opcode opcode, SecondaryField fmt, FPURegister ft,
FPURegister fs, FPURegister fd,
@@ -1737,9 +1698,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Opcode opcode, int32_t offset26,
CompactBranchType is_compact_branch = CompactBranchType::NO);
-
- void GenInstrJump(Opcode opcode,
- uint32_t address);
+ void GenInstrJump(Opcode opcode, uint32_t address);
// MSA
void GenInstrMsaI8(SecondaryField operation, uint32_t imm8, MSARegister ws,
@@ -1845,12 +1804,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
free_slot_count_ = slot_count;
end_ = start + slot_count * kTrampolineSlotsSize;
}
- int start() {
- return start_;
- }
- int end() {
- return end_;
- }
+ int start() { return start_; }
+ int end() { return end_; }
int take_slot() {
int trampoline_slot = kInvalidSlotPos;
if (free_slot_count_ <= 0) {
@@ -1928,8 +1883,7 @@ class UseScratchRegisterScope {
RegList old_available_;
};
-
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_ASSEMBLER_MIPS_H_
+#endif // V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/codegen/mips/constants-mips.cc
index 1955d593ed..4411387060 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/codegen/mips/constants-mips.cc
@@ -4,35 +4,21 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/mips/constants-mips.h"
+#include "src/codegen/mips/constants-mips.h"
namespace v8 {
namespace internal {
-
// -----------------------------------------------------------------------------
// Registers.
-
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
- "zero_reg",
- "at",
- "v0", "v1",
- "a0", "a1", "a2", "a3",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9",
- "k0", "k1",
- "gp",
- "sp",
- "fp",
- "ra",
- "LO", "HI",
- "pc"
-};
-
+ "zero_reg", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0",
+ "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1",
+ "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0",
+ "k1", "gp", "sp", "fp", "ra", "LO", "HI", "pc"};
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
@@ -52,7 +38,6 @@ const char* Registers::Name(int reg) {
return result;
}
-
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
@@ -74,13 +59,10 @@ int Registers::Number(const char* name) {
return kInvalidRegister;
}
-
const char* FPURegisters::names_[kNumFPURegisters] = {
- "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
- "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
- "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
-};
-
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
+ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
@@ -96,7 +78,6 @@ const char* FPURegisters::Name(int creg) {
return result;
}
-
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumFPURegisters; i++) {
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/codegen/mips/constants-mips.h
index e1a3f2bb38..d2b3f6b08f 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/codegen/mips/constants-mips.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_CONSTANTS_MIPS_H_
-#define V8_MIPS_CONSTANTS_MIPS_H_
-#include "src/cpu-features.h"
+#ifndef V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_
+#define V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_
+#include "src/codegen/cpu-features.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
-#define UNIMPLEMENTED_MIPS() \
- v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+#define UNIMPLEMENTED_MIPS() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
#else
#define UNIMPLEMENTED_MIPS()
@@ -24,45 +24,38 @@ enum ArchVariants {
};
#ifdef _MIPS_ARCH_MIPS32R2
- static const ArchVariants kArchVariant = kMips32r2;
+static const ArchVariants kArchVariant = kMips32r2;
#elif _MIPS_ARCH_MIPS32R6
- static const ArchVariants kArchVariant = kMips32r6;
+static const ArchVariants kArchVariant = kMips32r6;
#elif _MIPS_ARCH_LOONGSON
// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
// which predates (and is a subset of) the mips32r2 and r1 architectures.
- static const ArchVariants kArchVariant = kLoongson;
+static const ArchVariants kArchVariant = kLoongson;
#elif _MIPS_ARCH_MIPS32RX
// This flags referred to compatibility mode that creates universal code that
// can run on any MIPS32 architecture revision. The dynamically generated code
// by v8 is specialized for the MIPS host detected in runtime probing.
- static const ArchVariants kArchVariant = kMips32r1;
+static const ArchVariants kArchVariant = kMips32r1;
#else
- static const ArchVariants kArchVariant = kMips32r1;
+static const ArchVariants kArchVariant = kMips32r1;
#endif
-enum Endianness {
- kLittle,
- kBig
-};
+enum Endianness { kLittle, kBig };
#if defined(V8_TARGET_LITTLE_ENDIAN)
- static const Endianness kArchEndian = kLittle;
+static const Endianness kArchEndian = kLittle;
#elif defined(V8_TARGET_BIG_ENDIAN)
- static const Endianness kArchEndian = kBig;
+static const Endianness kArchEndian = kBig;
#else
#error Unknown endianness
#endif
-enum FpuMode {
- kFP32,
- kFP64,
- kFPXX
-};
+enum FpuMode { kFP32, kFP64, kFPXX };
#if defined(FPU_MODE_FP32)
- static const FpuMode kFpuMode = kFP32;
+static const FpuMode kFpuMode = kFP32;
#elif defined(FPU_MODE_FP64)
- static const FpuMode kFpuMode = kFP64;
+static const FpuMode kFpuMode = kFP64;
#elif defined(FPU_MODE_FPXX)
#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R6)
static const FpuMode kFpuMode = kFPXX;
@@ -73,11 +66,11 @@ static const FpuMode kFpuMode = kFPXX;
static const FpuMode kFpuMode = kFP32;
#endif
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+#if defined(__mips_hard_float) && __mips_hard_float != 0
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false;
-#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+#elif defined(__mips_soft_float) && __mips_soft_float != 0
// This flag is raised when -msoft-float is passed to the compiler.
// Although FPU is a base requirement for v8, soft-float ABI is used
// on soft-float systems with FPU kernel emulation.
@@ -101,8 +94,7 @@ const uint32_t kHoleNanLower32Offset = 4;
#define IsFpxxMode() (kFpuMode == kFPXX)
#ifndef _MIPS_ARCH_MIPS32RX
-#define IsMipsArchVariant(check) \
- (kArchVariant == check)
+#define IsMipsArchVariant(check) (kArchVariant == check)
#else
#define IsMipsArchVariant(check) \
(CpuFeatures::IsSupported(static_cast<CpuFeature>(check)))
@@ -203,11 +195,8 @@ const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit;
const uint32_t kFCSRFlagMask =
- kFCSRInexactFlagMask |
- kFCSRUnderflowFlagMask |
- kFCSROverflowFlagMask |
- kFCSRDivideByZeroFlagMask |
- kFCSRInvalidOpFlagMask;
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
@@ -290,7 +279,7 @@ class MSARegisters {
// Instructions encoding constants.
// On MIPS all instructions are 32 bits.
-typedef int32_t Instr;
+using Instr = int32_t;
// Special Software Interrupt codes when used in the presence of the MIPS
// simulator.
@@ -310,22 +299,21 @@ const uint32_t kMaxWatchpointCode = 31;
const uint32_t kMaxStopCode = 127;
STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
-
// ----- Fields offset and length.
-const int kOpcodeShift = 26;
-const int kOpcodeBits = 6;
-const int kRsShift = 21;
-const int kRsBits = 5;
-const int kRtShift = 16;
-const int kRtBits = 5;
-const int kRdShift = 11;
-const int kRdBits = 5;
-const int kSaShift = 6;
-const int kSaBits = 5;
+const int kOpcodeShift = 26;
+const int kOpcodeBits = 6;
+const int kRsShift = 21;
+const int kRsBits = 5;
+const int kRtShift = 16;
+const int kRtBits = 5;
+const int kRdShift = 11;
+const int kRdBits = 5;
+const int kSaShift = 6;
+const int kSaBits = 5;
const int kLsaSaBits = 2;
const int kFunctionShift = 0;
-const int kFunctionBits = 6;
-const int kLuiShift = 16;
+const int kFunctionBits = 6;
+const int kLuiShift = 16;
const int kBp2Shift = 6;
const int kBp2Bits = 2;
const int kBaseShift = 21;
@@ -336,19 +324,19 @@ const int kBit6Bits = 1;
const int kImm9Shift = 7;
const int kImm9Bits = 9;
const int kImm16Shift = 0;
-const int kImm16Bits = 16;
+const int kImm16Bits = 16;
const int kImm18Shift = 0;
const int kImm18Bits = 18;
const int kImm19Shift = 0;
const int kImm19Bits = 19;
const int kImm21Shift = 0;
-const int kImm21Bits = 21;
+const int kImm21Bits = 21;
const int kImm26Shift = 0;
-const int kImm26Bits = 26;
+const int kImm26Bits = 26;
const int kImm28Shift = 0;
-const int kImm28Bits = 28;
+const int kImm28Bits = 28;
const int kImm32Shift = 0;
-const int kImm32Bits = 32;
+const int kImm32Bits = 32;
const int kMsaImm8Shift = 16;
const int kMsaImm8Bits = 8;
const int kMsaImm5Shift = 16;
@@ -362,20 +350,20 @@ const int kMsaImmMI10Bits = 10;
// and are therefore shifted by 2.
const int kImmFieldShift = 2;
-const int kFrBits = 5;
-const int kFrShift = 21;
-const int kFsShift = 11;
-const int kFsBits = 5;
-const int kFtShift = 16;
-const int kFtBits = 5;
-const int kFdShift = 6;
-const int kFdBits = 5;
-const int kFCccShift = 8;
-const int kFCccBits = 3;
-const int kFBccShift = 18;
-const int kFBccBits = 3;
-const int kFBtrueShift = 16;
-const int kFBtrueBits = 1;
+const int kFrBits = 5;
+const int kFrShift = 21;
+const int kFsShift = 11;
+const int kFsBits = 5;
+const int kFtShift = 16;
+const int kFtBits = 5;
+const int kFdShift = 6;
+const int kFdBits = 5;
+const int kFCccShift = 8;
+const int kFCccBits = 3;
+const int kFBccShift = 18;
+const int kFBccBits = 3;
+const int kFBtrueShift = 16;
+const int kFBtrueBits = 1;
const int kWtBits = 5;
const int kWtShift = 16;
const int kWsBits = 5;
@@ -1045,7 +1033,6 @@ enum Condition {
cc_default = kNoCondition
};
-
// Returns the equivalent of !cc.
// Negation of the default kNoCondition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
@@ -1055,7 +1042,6 @@ inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
-
inline Condition NegateFpuCondition(Condition cc) {
DCHECK(cc != cc_always);
switch (cc) {
@@ -1119,7 +1105,6 @@ enum MSABranchDF {
MSA_BRANCH_V
};
-
// ----- Coprocessor conditions.
enum FPUCondition {
kNoFPUCondition = -1,
@@ -1141,7 +1126,6 @@ enum FPUCondition {
NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only.
};
-
// FPU rounding modes.
enum FPURoundingMode {
RN = 0 << 0, // Round to Nearest.
@@ -1176,15 +1160,9 @@ enum class MaxMinKind : int { kMin = 0, kMax = 1 };
// Branch hints are not used on the MIPS. They are defined so that they can
// appear in shared function signatures, but will be ignored in MIPS
// implementations.
-enum Hint {
- no_hint = 0
-};
-
-
-inline Hint NegateHint(Hint hint) {
- return no_hint;
-}
+enum Hint { no_hint = 0 };
+inline Hint NegateHint(Hint hint) { return no_hint; }
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -1244,16 +1222,13 @@ class InstructionBase {
}
// Read one particular bit out of the instruction bits.
- inline int Bit(int nr) const {
- return (InstructionBits() >> nr) & 1;
- }
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
}
-
static constexpr uint64_t kOpcodeImmediateTypeMask =
OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
@@ -1677,7 +1652,6 @@ class Instruction : public InstructionGetters<InstructionBase> {
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
-
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
@@ -1793,7 +1767,7 @@ InstructionBase::Type InstructionBase::InstructionType() const {
}
default:
- return kImmediateType;
+ return kImmediateType;
}
}
@@ -1924,4 +1898,4 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_CONSTANTS_MIPS_H_
+#endif // V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/codegen/mips/cpu-mips.cc
index a8feba60db..a7120d1c7a 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/codegen/mips/cpu-mips.cc
@@ -13,12 +13,11 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/cpu-features.h"
+#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
-
void CpuFeatures::FlushICache(void* start, size_t size) {
#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
@@ -28,10 +27,10 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#if defined(ANDROID)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
- char *end = reinterpret_cast<char *>(start) + size;
- cacheflush(
- reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
-#else // ANDROID
+ char* end = reinterpret_cast<char*>(start) + size;
+ cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
+ 0);
+#else // ANDROID
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
index 5ef0bd08ac..0a36e26577 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/codegen/mips/interface-descriptors-mips.cc
@@ -4,9 +4,9 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
@@ -61,6 +61,18 @@ void RecordWriteDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
}
+void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
+
+ data->RestrictAllocatableRegisters(default_stub_registers,
+ arraysize(default_stub_registers));
+
+ CHECK_LE(static_cast<size_t>(kParameterCount),
+ arraysize(default_stub_registers));
+ data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
+}
+
const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() {
return a1;
}
@@ -89,7 +101,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
@@ -239,10 +250,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- a1, // kApiFunctionAddress
- a2, // kArgc
- a3, // kCallData
- a0, // kHolder
+ a1, // kApiFunctionAddress
+ a2, // kArgc
+ a3, // kCallData
+ a0, // kHolder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index f4cfdbe33c..483b7e895b 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -6,29 +6,29 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
-#include "src/bootstrapper.h"
-#include "src/callable.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
-#include "src/external-reference-table.h"
-#include "src/frames-inl.h"
+#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
-#include "src/macro-assembler.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
-#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
-#include "src/mips/macro-assembler-mips.h"
+#include "src/codegen/mips/macro-assembler-mips.h"
#endif
namespace v8 {
@@ -133,7 +133,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)));
}
-
void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
@@ -155,34 +154,12 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Addu(fp, sp, Operand(offset));
}
-// Push and pop all registers that can hold pointers.
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of kNumSafepointRegisters values on the
- // stack, so adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK_GE(num_unsaved, 0);
- if (num_unsaved > 0) {
- Subu(sp, sp, Operand(num_unsaved * kPointerSize));
- }
- MultiPush(kSafepointSavedRegisters);
-}
-
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- MultiPop(kSafepointSavedRegisters);
- if (num_unsaved > 0) {
- Addu(sp, sp, Operand(num_unsaved * kPointerSize));
- }
-}
-
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
return kSafepointRegisterStackIndexMap[reg_code];
}
-
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
@@ -251,6 +228,32 @@ void TurboAssembler::RestoreRegisters(RegList registers) {
MultiPop(regs);
}
+void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
+ SaveFPRegsMode fp_mode) {
+ EphemeronKeyBarrierDescriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
+
+ SaveRegisters(registers);
+
+ Register object_parameter(
+ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
+ Register slot_parameter(descriptor.GetRegisterParameter(
+ EphemeronKeyBarrierDescriptor::kSlotAddress));
+ Register fp_mode_parameter(
+ descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
+
+ Push(object);
+ Push(address);
+
+ Pop(slot_parameter);
+ Pop(object_parameter);
+
+ Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
+ Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
+ RelocInfo::CODE_TARGET);
+ RestoreRegisters(registers);
+}
+
void TurboAssembler::CallRecordWriteStub(
Register object, Register address,
RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
@@ -346,9 +349,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- eq,
- &done);
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
// Record the actual write.
if (ra_status == kRAHasNotBeenSaved) {
@@ -938,7 +939,6 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
}
}
-
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
if (IsMipsArchVariant(kLoongson)) {
lw(zero_reg, rs);
@@ -1405,7 +1405,6 @@ void TurboAssembler::MultiPush(RegList regs) {
}
}
-
void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
@@ -1418,7 +1417,6 @@ void TurboAssembler::MultiPop(RegList regs) {
addiu(sp, sp, stack_offset);
}
-
void TurboAssembler::MultiPushFPU(RegList regs) {
int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -1432,7 +1430,6 @@ void TurboAssembler::MultiPushFPU(RegList regs) {
}
}
-
void TurboAssembler::MultiPopFPU(RegList regs) {
int16_t stack_offset = 0;
@@ -1459,8 +1456,7 @@ void TurboAssembler::AddPair(Register dst_low, Register dst_high,
}
void TurboAssembler::AddPair(Register dst_low, Register dst_high,
- Register left_low, Register left_high,
- int32_t imm,
+ Register left_low, Register left_high, int32_t imm,
Register scratch1, Register scratch2) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch3 = t8;
@@ -2675,13 +2671,10 @@ void TurboAssembler::Popcnt(Register rd, Register rs) {
srl(rd, rd, shift);
}
-void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
- DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
- Register except_flag,
- CheckForInexactConversion check_inexact) {
+void MacroAssembler::EmitFPUTruncate(
+ FPURoundingMode rounding_mode, Register result, DoubleRegister double_input,
+ Register scratch, DoubleRegister double_scratch, Register except_flag,
+ CheckForInexactConversion check_inexact) {
DCHECK(result != scratch);
DCHECK(double_input != double_scratch);
DCHECK(except_flag != scratch);
@@ -2758,8 +2751,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
cfc1(scratch, FCSR);
ctc1(scratch2, FCSR);
// Check for overflow and NaNs.
- And(scratch,
- scratch,
+ And(scratch, scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
Branch(done, eq, scratch, Operand(zero_reg));
@@ -2872,8 +2864,7 @@ void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
b(offset);
// Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+ if (bdslot == PROTECT) nop();
}
void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
@@ -2900,7 +2891,6 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
}
}
-
int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
@@ -3280,8 +3270,7 @@ bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
}
}
// Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+ if (bdslot == PROTECT) nop();
return true;
}
@@ -3376,8 +3365,7 @@ void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
bal(offset);
// Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+ if (bdslot == PROTECT) nop();
}
void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
@@ -3509,7 +3497,6 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
}
break;
-
// Unsigned comparison.
case Ugreater:
// rs > r2
@@ -3633,8 +3620,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
}
// Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+ if (bdslot == PROTECT) nop();
return true;
}
@@ -4101,7 +4087,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
Label skip;
if (cond != al) {
- Branch(&skip, NegateCondition(cond), reg, op);
+ Branch(&skip, NegateCondition(cond), reg, op);
}
Addu(sp, sp, Operand(count * kPointerSize));
@@ -4111,11 +4097,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
}
}
-
-
-void MacroAssembler::Swap(Register reg1,
- Register reg2,
- Register scratch) {
+void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
if (scratch == no_reg) {
Xor(reg1, reg1, Operand(reg2));
Xor(reg2, reg2, Operand(reg1));
@@ -4171,7 +4153,6 @@ void MacroAssembler::PushStackHandler() {
sw(sp, MemOperand(t2));
}
-
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(a1);
@@ -4258,7 +4239,6 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
-
// -----------------------------------------------------------------------------
// JavaScript invokes.
@@ -4508,18 +4488,15 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(a1, no_reg, expected, actual, flag);
}
-
// ---------------------------------------------------------------------------
// Support functions.
-void MacroAssembler::GetObjectType(Register object,
- Register map,
+void MacroAssembler::GetObjectType(Register object, Register map,
Register type_reg) {
lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
-
// -----------------------------------------------------------------------------
// Runtime calls.
@@ -4686,7 +4663,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -4698,14 +4674,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-
// -----------------------------------------------------------------------------
// Debugging.
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
- if (emit_debug_code())
- Check(cc, reason, rs, rt);
+ if (emit_debug_code()) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@@ -4942,7 +4916,7 @@ int TurboAssembler::ActivationFrameAlignment() {
// Note: This will break if we ever start generating snapshots on one Mips
// platform for another Mips platform with a different alignment.
return base::OS::ActivationFrameAlignment();
-#else // V8_HOST_ARCH_MIPS
+#else // V8_HOST_ARCH_MIPS
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
@@ -4951,33 +4925,23 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_MIPS
}
-
void MacroAssembler::AssertStackIsAligned() {
if (emit_debug_code()) {
- const int frame_alignment = ActivationFrameAlignment();
- const int frame_alignment_mask = frame_alignment - 1;
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment > kPointerSize) {
- Label alignment_as_expected;
- DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- andi(scratch, sp, frame_alignment_mask);
- Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
- // Don't use Check here, as it will call Runtime_Abort re-entering here.
- stop("Unexpected stack alignment");
- bind(&alignment_as_expected);
- }
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ stop("Unexpected stack alignment");
+ bind(&alignment_as_expected);
}
-}
-
-void MacroAssembler::UntagAndJumpIfSmi(Register dst,
- Register src,
- Label* smi_case) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- JumpIfSmi(src, smi_case, scratch, USE_DELAY_SLOT);
- SmiUntag(dst, src);
+ }
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
@@ -4987,28 +4951,13 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
-void MacroAssembler::JumpIfNotSmi(Register value,
- Label* not_smi_label,
- Register scratch,
- BranchDelaySlot bd) {
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
+ Register scratch, BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
-
-void MacroAssembler::JumpIfEitherSmi(Register reg1,
- Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(1, kSmiTagMask);
- // Both Smi tags must be 1 (not Smi).
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- and_(scratch, reg1, reg2);
- JumpIfSmi(scratch, on_either_smi);
-}
-
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -5019,7 +4968,6 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -5058,7 +5006,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
-
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5112,7 +5059,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
-
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1 == src2) {
@@ -5336,8 +5282,8 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
// mips, even though those argument slots are not normally used.
// Remaining arguments are pushed on the stack, above (higher address than)
// the argument slots.
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
@@ -5460,8 +5406,8 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
}
}
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -5470,7 +5416,6 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
}
}
-
#undef BRANCH_ARGS_CHECK
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
@@ -5481,11 +5426,8 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
-Register GetRegisterThatIsNotOneOf(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5,
Register reg6) {
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index 73b6b30ed6..f394e01769 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -6,13 +6,13 @@
#error This header must be included via macro-assembler.h
#endif
-#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
-#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#ifndef V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#define V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_
-#include "src/assembler.h"
-#include "src/contexts.h"
-#include "src/globals.h"
-#include "src/mips/assembler-mips.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/mips/assembler-mips.h"
+#include "src/common/globals.h"
+#include "src/objects/contexts.h"
namespace v8 {
namespace internal {
@@ -32,12 +32,8 @@ enum class AbortReason : uint8_t;
// trying to update gp register for position-independent-code. Whenever
// MIPS generated code calls C code, it must be via t9 register.
-
// Flags used for LeaveExitFrame function.
-enum LeaveExitFrameMode {
- EMIT_RETURN = true,
- NO_EMIT_RETURN = false
-};
+enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
// Flags used for the li macro-assembler function.
enum LiFlags {
@@ -49,13 +45,11 @@ enum LiFlags {
CONSTANT_SIZE = 1
};
-
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
-Register GetRegisterThatIsNotOneOf(Register reg1,
- Register reg2 = no_reg,
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
@@ -68,18 +62,15 @@ inline MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
-
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
-
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
inline MemOperand CFunctionArgumentOperand(int index) {
@@ -131,22 +122,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Abort(AbortReason msg);
// Arguments macros.
-#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2
#define COND_ARGS cond, r1, r2
// Cases when relocation is not needed.
-#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
- void Name(target_type target, BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, target_type target) { \
- Name(target, bd); \
- } \
- void Name(target_type target, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- COND_TYPED_ARGS) { \
- Name(target, COND_ARGS, bd); \
+#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target, BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, target_type target) { \
+ Name(target, bd); \
+ } \
+ void Name(target_type target, COND_TYPED_ARGS, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, target_type target, COND_TYPED_ARGS) { \
+ Name(target, COND_ARGS, bd); \
}
#define DECLARE_BRANCH_PROTOTYPES(Name) \
@@ -207,8 +195,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRelative(Register destination, int32_t offset) override;
// Jump, Call, and Ret pseudo instructions implementing inter-working.
-#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
- const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+#define COND_ARGS \
+ Condition cond = al, Register rs = zero_reg, \
+ const Operand &rt = Operand(zero_reg), \
+ BranchDelaySlot bd = PROTECT
void Jump(Register target, int16_t offset = 0, COND_ARGS);
void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS);
@@ -219,8 +209,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Call(Register target, int16_t offset = 0, COND_ARGS);
void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
- void Call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Label* target);
@@ -249,25 +238,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
- Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
+ Register rs = zero_reg,
+ const Operand& rt = Operand(zero_reg)) {
Ret(cond, rs, rt, bd);
}
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
- void Drop(int count,
- Condition cond = cc_always,
- Register reg = no_reg,
+ void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
const Operand& op = Operand(no_reg));
// Trivial case of DropAndRet that utilizes the delay slot and only emits
// 2 instructions.
void DropAndRet(int drop);
- void DropAndRet(int drop,
- Condition cond,
- Register reg,
- const Operand& op);
+ void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
void push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
@@ -329,6 +314,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CallRecordWriteStub(Register object, Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode, Address wasm_target);
+ void CallEphemeronKeyBarrier(Register object, Register address,
+ SaveFPRegsMode fp_mode);
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
@@ -402,13 +389,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
-#define DEFINE_INSTRUCTION3(instr) \
- void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
- void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
- instr(rd_hi, rd_lo, rs, Operand(rt)); \
- } \
- void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
- instr(rd_hi, rd_lo, rs, Operand(j)); \
+#define DEFINE_INSTRUCTION3(instr) \
+ void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
+ void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
+ instr(rd_hi, rd_lo, rs, Operand(rt)); \
+ } \
+ void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
+ instr(rd_hi, rd_lo, rs, Operand(j)); \
}
DEFINE_INSTRUCTION(Addu)
@@ -550,8 +537,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch1, Register scratch2);
void AddPair(Register dst_low, Register dst_high, Register left_low,
- Register left_high, int32_t imm,
- Register scratch1, Register scratch2);
+ Register left_high, int32_t imm, Register scratch1,
+ Register scratch2);
void SubPair(Register dst_low, Register dst_high, Register left_low,
Register left_high, Register right_low, Register right_high,
@@ -961,11 +948,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Pref(int32_t hint, const MemOperand& rs);
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
-
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// The except_flag will contain any exceptions caused by the instruction.
@@ -1035,9 +1017,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// Support functions.
- void GetObjectType(Register function,
- Register map,
- Register type_reg);
+ void GetObjectType(Register function, Register map, Register type_reg);
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1077,17 +1057,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// StatsCounter support.
- void IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
// -------------------------------------------------------------------------
// Smi utilities.
- void SmiTag(Register reg) {
- Addu(reg, reg, reg);
- }
+ void SmiTag(Register reg) { Addu(reg, reg, reg); }
void SmiTag(Register dst, Register src) { Addu(dst, src, src); }
@@ -1096,19 +1074,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
And(scratch, value, Operand(kSmiTagMask));
}
- // Untag the source value into destination and jump if source is a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value,
- Label* not_smi_label,
- Register scratch = at,
+ void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
BranchDelaySlot bd = PROTECT);
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
-
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -1131,12 +1100,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- template<typename Field>
+ template <typename Field>
void DecodeField(Register dst, Register src) {
Ext(dst, src, Field::kShift, Field::kSize);
}
- template<typename Field>
+ template <typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
@@ -1189,4 +1158,4 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#endif // V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_
diff --git a/deps/v8/src/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h
index 24ddd588a9..0c8496c6ce 100644
--- a/deps/v8/src/mips/register-mips.h
+++ b/deps/v8/src/codegen/mips/register-mips.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_REGISTER_MIPS_H_
-#define V8_MIPS_REGISTER_MIPS_H_
+#ifndef V8_CODEGEN_MIPS_REGISTER_MIPS_H_
+#define V8_CODEGEN_MIPS_REGISTER_MIPS_H_
-#include "src/mips/constants-mips.h"
-#include "src/register.h"
-#include "src/reglist.h"
+#include "src/codegen/mips/constants-mips.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
namespace v8 {
namespace internal {
@@ -260,9 +260,9 @@ class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
// but it is not in common use. Someday we will want to support this in v8.)
// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef FPURegister FloatRegister;
+using FloatRegister = FPURegister;
-typedef FPURegister DoubleRegister;
+using DoubleRegister = FPURegister;
#define DECLARE_DOUBLE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
@@ -272,7 +272,7 @@ DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// SIMD registers.
-typedef MSARegister Simd128Register;
+using Simd128Register = MSARegister;
#define DECLARE_SIMD128_REGISTER(R) \
constexpr Simd128Register R = Simd128Register::from_code<kMsaCode_##R>();
@@ -379,4 +379,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_REGISTER_MIPS_H_
+#endif // V8_CODEGEN_MIPS_REGISTER_MIPS_H_
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
index acbf3cb1a1..7b9946d16e 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64-inl.h
@@ -33,14 +33,14 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#ifndef V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
-#define V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
+#ifndef V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_INL_H_
+#define V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_INL_H_
-#include "src/mips64/assembler-mips64.h"
+#include "src/codegen/mips64/assembler-mips64.h"
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -52,9 +52,7 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
-bool Operand::is_reg() const {
- return rm_.is_valid();
-}
+bool Operand::is_reg() const { return rm_.is_valid(); }
int64_t Operand::immediate() const {
DCHECK(!is_reg());
@@ -72,7 +70,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
-
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@@ -97,24 +94,15 @@ Address RelocInfo::target_address_address() {
return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
}
+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
-Address RelocInfo::constant_pool_entry_address() {
- UNREACHABLE();
-}
-
-
-int RelocInfo::target_address_size() {
- return Assembler::kSpecialTargetSize;
-}
-
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- !code.is_null() ? code->constant_pool() : kNullAddress, target);
+ !code.is_null() ? code.constant_pool() : kNullAddress,
+ target);
}
int Assembler::deserialization_special_target_size(
@@ -150,13 +138,17 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
@@ -164,8 +156,8 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
@@ -198,7 +190,6 @@ Address RelocInfo::target_internal_reference() {
}
}
-
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
return pc_;
@@ -223,7 +214,7 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
@@ -239,14 +230,12 @@ void RelocInfo::WipeOut() {
// -----------------------------------------------------------------------------
// Assembler.
-
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
-
void Assembler::CheckForEmitInForbiddenSlot() {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
@@ -261,7 +250,6 @@ void Assembler::CheckForEmitInForbiddenSlot() {
}
}
-
void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) {
if (IsPrevInstrCompactBranch()) {
if (Instruction::IsForbiddenAfterBranchInstr(x)) {
@@ -306,7 +294,6 @@ void Assembler::emit(Instr x, CompactBranchType is_compact_branch) {
EmitHelper(x, is_compact_branch);
}
-
void Assembler::emit(uint64_t data) {
CheckForEmitInForbiddenSlot();
EmitHelper(data);
@@ -317,4 +304,4 @@ EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
-#endif // V8_MIPS64_ASSEMBLER_MIPS64_INL_H_
+#endif // V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_INL_H_
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/codegen/mips64/assembler-mips64.cc
index 3518b2c657..cb8e3dd7d1 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.cc
@@ -32,20 +32,19 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#include "src/mips64/assembler-mips64.h"
+#include "src/codegen/mips64/assembler-mips64.h"
#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
-#include "src/deoptimizer.h"
-#include "src/mips64/assembler-mips64-inl.h"
+#include "src/codegen/mips64/assembler-mips64-inl.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/objects/heap-number-inl.h"
-#include "src/string-constants.h"
namespace v8 {
namespace internal {
-
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
// can be defined to enable FPU instructions when building the
@@ -66,15 +65,14 @@ static unsigned CpuFeaturesImpliedByCompiler() {
return answer;
}
-
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
- // If the compiler is allowed to use fpu then we can use fpu too in our
- // code generation.
+ // If the compiler is allowed to use fpu then we can use fpu too in our
+ // code generation.
#ifndef __mips__
// For the simulator build, use FPU.
supported_ |= 1u << FPU;
@@ -95,71 +93,56 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#endif
}
-
-void CpuFeatures::PrintTarget() { }
-void CpuFeatures::PrintFeatures() { }
-
+void CpuFeatures::PrintTarget() {}
+void CpuFeatures::PrintFeatures() {}
int ToNumber(Register reg) {
DCHECK(reg.is_valid());
const int kNumbers[] = {
- 0, // zero_reg
- 1, // at
- 2, // v0
- 3, // v1
- 4, // a0
- 5, // a1
- 6, // a2
- 7, // a3
- 8, // a4
- 9, // a5
- 10, // a6
- 11, // a7
- 12, // t0
- 13, // t1
- 14, // t2
- 15, // t3
- 16, // s0
- 17, // s1
- 18, // s2
- 19, // s3
- 20, // s4
- 21, // s5
- 22, // s6
- 23, // s7
- 24, // t8
- 25, // t9
- 26, // k0
- 27, // k1
- 28, // gp
- 29, // sp
- 30, // fp
- 31, // ra
+ 0, // zero_reg
+ 1, // at
+ 2, // v0
+ 3, // v1
+ 4, // a0
+ 5, // a1
+ 6, // a2
+ 7, // a3
+ 8, // a4
+ 9, // a5
+ 10, // a6
+ 11, // a7
+ 12, // t0
+ 13, // t1
+ 14, // t2
+ 15, // t3
+ 16, // s0
+ 17, // s1
+ 18, // s2
+ 19, // s3
+ 20, // s4
+ 21, // s5
+ 22, // s6
+ 23, // s7
+ 24, // t8
+ 25, // t9
+ 26, // k0
+ 27, // k1
+ 28, // gp
+ 29, // sp
+ 30, // fp
+ 31, // ra
};
return kNumbers[reg.code()];
}
-
Register ToRegister(int num) {
DCHECK(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = {
- zero_reg,
- at,
- v0, v1,
- a0, a1, a2, a3, a4, a5, a6, a7,
- t0, t1, t2, t3,
- s0, s1, s2, s3, s4, s5, s6, s7,
- t8, t9,
- k0, k1,
- gp,
- sp,
- fp,
- ra
- };
+ zero_reg, at, v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3,
+ s0, s1, s2, s3, s4, s5, s6, s7, t8, t9, k0, k1, gp, sp, fp, ra};
return kRegisters[num];
}
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
@@ -174,10 +157,7 @@ bool RelocInfo::IsCodedSpecially() {
return true;
}
-
-bool RelocInfo::IsInConstantPool() {
- return false;
-}
+bool RelocInfo::IsInConstantPool() { return false; }
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
@@ -190,21 +170,21 @@ uint32_t RelocInfo::wasm_call_tag() const {
// See assembler-mips-inl.h for inlined constructors.
Operand::Operand(Handle<HeapObject> handle)
- : rm_(no_reg), rmode_(RelocInfo::EMBEDDED_OBJECT) {
+ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
value_.immediate = static_cast<intptr_t>(handle.address());
}
Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(str);
return result;
@@ -214,7 +194,6 @@ MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
}
-
MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
OffsetAddend offset_addend)
: Operand(rm) {
@@ -274,7 +253,7 @@ const Instr kSwRegFpNegOffsetPattern =
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xFFE00000;
-const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
+const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
Assembler::Assembler(const AssemblerOptions& options,
@@ -289,7 +268,8 @@ Assembler::Assembler(const AssemblerOptions& options,
// We leave space (16 * kTrampolineSlotsSize)
// for BlockTrampolinePoolScope buffer.
next_buffer_check_ = FLAG_force_long_branches
- ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ ? kMaxInt
+ : kMaxBranchOffset - kTrampolineSlotsSize * 16;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
@@ -339,126 +319,88 @@ void Assembler::Align(int m) {
}
}
-
void Assembler::CodeTargetAlign() {
// No advantage to aligning branch/call targets to more than
// single instruction, that I am aware of.
Align(4);
}
-
Register Assembler::GetRtReg(Instr instr) {
return Register::from_code((instr & kRtFieldMask) >> kRtShift);
}
-
Register Assembler::GetRsReg(Instr instr) {
return Register::from_code((instr & kRsFieldMask) >> kRsShift);
}
-
Register Assembler::GetRdReg(Instr instr) {
return Register::from_code((instr & kRdFieldMask) >> kRdShift);
}
-
uint32_t Assembler::GetRt(Instr instr) {
return (instr & kRtFieldMask) >> kRtShift;
}
-
-uint32_t Assembler::GetRtField(Instr instr) {
- return instr & kRtFieldMask;
-}
-
+uint32_t Assembler::GetRtField(Instr instr) { return instr & kRtFieldMask; }
uint32_t Assembler::GetRs(Instr instr) {
return (instr & kRsFieldMask) >> kRsShift;
}
-
-uint32_t Assembler::GetRsField(Instr instr) {
- return instr & kRsFieldMask;
-}
-
+uint32_t Assembler::GetRsField(Instr instr) { return instr & kRsFieldMask; }
uint32_t Assembler::GetRd(Instr instr) {
- return (instr & kRdFieldMask) >> kRdShift;
-}
-
-
-uint32_t Assembler::GetRdField(Instr instr) {
- return instr & kRdFieldMask;
+ return (instr & kRdFieldMask) >> kRdShift;
}
+uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
uint32_t Assembler::GetSa(Instr instr) {
return (instr & kSaFieldMask) >> kSaShift;
}
+uint32_t Assembler::GetSaField(Instr instr) { return instr & kSaFieldMask; }
-uint32_t Assembler::GetSaField(Instr instr) {
- return instr & kSaFieldMask;
-}
-
-
-uint32_t Assembler::GetOpcodeField(Instr instr) {
- return instr & kOpcodeMask;
-}
-
+uint32_t Assembler::GetOpcodeField(Instr instr) { return instr & kOpcodeMask; }
uint32_t Assembler::GetFunction(Instr instr) {
return (instr & kFunctionFieldMask) >> kFunctionShift;
}
-
uint32_t Assembler::GetFunctionField(Instr instr) {
return instr & kFunctionFieldMask;
}
+uint32_t Assembler::GetImmediate16(Instr instr) { return instr & kImm16Mask; }
-uint32_t Assembler::GetImmediate16(Instr instr) {
- return instr & kImm16Mask;
-}
-
-
-uint32_t Assembler::GetLabelConst(Instr instr) {
- return instr & ~kImm16Mask;
-}
-
+uint32_t Assembler::GetLabelConst(Instr instr) { return instr & ~kImm16Mask; }
bool Assembler::IsPop(Instr instr) {
return (instr & ~kRtMask) == kPopRegPattern;
}
-
bool Assembler::IsPush(Instr instr) {
return (instr & ~kRtMask) == kPushRegPattern;
}
-
bool Assembler::IsSwRegFpOffset(Instr instr) {
return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
}
-
bool Assembler::IsLwRegFpOffset(Instr instr) {
return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
}
-
bool Assembler::IsSwRegFpNegOffset(Instr instr) {
return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
kSwRegFpNegOffsetPattern);
}
-
bool Assembler::IsLwRegFpNegOffset(Instr instr) {
return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
kLwRegFpNegOffsetPattern);
}
-
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -503,7 +445,7 @@ bool Assembler::IsMsaBranch(Instr instr) {
}
bool Assembler::IsBranch(Instr instr) {
- uint32_t opcode = GetOpcodeField(instr);
+ uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
uint32_t rs_field = GetRsField(instr);
// Checks if the instruction is a branch.
@@ -526,7 +468,6 @@ bool Assembler::IsBranch(Instr instr) {
return isBranch;
}
-
bool Assembler::IsBc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
// Checks if the instruction is a BC or BALC.
@@ -547,35 +488,25 @@ bool Assembler::IsBzc(Instr instr) {
(opcode == POP76 && GetRsField(instr) != 0);
}
-
bool Assembler::IsEmittedConstant(Instr instr) {
uint32_t label_constant = GetLabelConst(instr);
return label_constant == 0; // Emitted label const in reg-exp engine.
}
+bool Assembler::IsBeq(Instr instr) { return GetOpcodeField(instr) == BEQ; }
-bool Assembler::IsBeq(Instr instr) {
- return GetOpcodeField(instr) == BEQ;
-}
-
-
-bool Assembler::IsBne(Instr instr) {
- return GetOpcodeField(instr) == BNE;
-}
-
+bool Assembler::IsBne(Instr instr) { return GetOpcodeField(instr) == BNE; }
bool Assembler::IsBeqzc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
return opcode == POP66 && GetRsField(instr) != 0;
}
-
bool Assembler::IsBnezc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
return opcode == POP76 && GetRsField(instr) != 0;
}
-
bool Assembler::IsBeqc(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rs = GetRsField(instr);
@@ -583,7 +514,6 @@ bool Assembler::IsBeqc(Instr instr) {
return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0
}
-
bool Assembler::IsBnec(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
uint32_t rs = GetRsField(instr);
@@ -606,53 +536,45 @@ bool Assembler::IsMov(Instr instr, Register rd, Register rs) {
}
bool Assembler::IsJump(Instr instr) {
- uint32_t opcode = GetOpcodeField(instr);
+ uint32_t opcode = GetOpcodeField(instr);
uint32_t rt_field = GetRtField(instr);
uint32_t rd_field = GetRdField(instr);
uint32_t function_field = GetFunctionField(instr);
// Checks if the instruction is a jump.
return opcode == J || opcode == JAL ||
- (opcode == SPECIAL && rt_field == 0 &&
- ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
+ (opcode == SPECIAL && rt_field == 0 &&
+ ((function_field == JALR) ||
+ (rd_field == 0 && (function_field == JR))));
}
-
bool Assembler::IsJ(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
// Checks if the instruction is a jump.
return opcode == J;
}
-
-bool Assembler::IsJal(Instr instr) {
- return GetOpcodeField(instr) == JAL;
-}
-
+bool Assembler::IsJal(Instr instr) { return GetOpcodeField(instr) == JAL; }
bool Assembler::IsJr(Instr instr) {
return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
}
-
bool Assembler::IsJalr(Instr instr) {
return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
}
-
bool Assembler::IsLui(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
// Checks if the instruction is a load upper immediate.
return opcode == LUI;
}
-
bool Assembler::IsOri(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
// Checks if the instruction is a load upper immediate.
return opcode == ORI;
}
-
bool Assembler::IsNop(Instr instr, unsigned int type) {
// See Assembler::nop(type).
DCHECK_LT(type, 32);
@@ -670,68 +592,57 @@ bool Assembler::IsNop(Instr instr, unsigned int type) {
Register nop_rt_reg = (type == 0) ? zero_reg : at;
bool ret = (opcode == SPECIAL && function == SLL &&
rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
- rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
- sa == type);
+ rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) && sa == type);
return ret;
}
-
int32_t Assembler::GetBranchOffset(Instr instr) {
DCHECK(IsBranch(instr));
return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
}
-
bool Assembler::IsLw(Instr instr) {
return (static_cast<uint32_t>(instr & kOpcodeMask) == LW);
}
-
int16_t Assembler::GetLwOffset(Instr instr) {
DCHECK(IsLw(instr));
return ((instr & kImm16Mask));
}
-
Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
DCHECK(IsLw(instr));
// We actually create a new lw instruction based on the original one.
- Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
- | (offset & kImm16Mask);
+ Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) |
+ (offset & kImm16Mask);
return temp_instr;
}
-
bool Assembler::IsSw(Instr instr) {
return (static_cast<uint32_t>(instr & kOpcodeMask) == SW);
}
-
Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
DCHECK(IsSw(instr));
return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
}
-
bool Assembler::IsAddImmediate(Instr instr) {
return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
}
-
Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
DCHECK(IsAddImmediate(instr));
return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
}
-
bool Assembler::IsAndImmediate(Instr instr) {
return GetOpcodeField(instr) == ANDI;
}
-
static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
if (kArchVariant == kMips64r6) {
if (Assembler::IsBc(instr)) {
@@ -743,7 +654,6 @@ static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
return Assembler::OffsetSize::kOffset16;
}
-
static inline int32_t AddBranchOffset(int pos, Instr instr) {
int bits = OffsetSizeInBits(instr);
const int32_t mask = (1 << bits) - 1;
@@ -761,7 +671,6 @@ static inline int32_t AddBranchOffset(int pos, Instr instr) {
}
}
-
int Assembler::target_at(int pos, bool is_internal) {
if (is_internal) {
int64_t* p = reinterpret_cast<int64_t*>(buffer_start_ + pos);
@@ -780,11 +689,11 @@ int Assembler::target_at(int pos, bool is_internal) {
if ((instr & ~kImm16Mask) == 0) {
// Emitted label constant, not part of a branch.
if (instr == 0) {
- return kEndOfChain;
- } else {
- int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- return (imm18 + pos);
- }
+ return kEndOfChain;
+ } else {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
}
// Check we have a branch or jump instruction.
DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr) ||
@@ -859,7 +768,6 @@ int Assembler::target_at(int pos, bool is_internal) {
}
}
-
static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
Instr instr) {
int32_t bits = OffsetSizeInBits(instr);
@@ -874,7 +782,6 @@ static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
return instr | (imm & mask);
}
-
void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
if (is_internal) {
uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
@@ -1024,7 +931,6 @@ void Assembler::print(const Label* L) {
}
}
-
void Assembler::bind_to(Label* L, int pos) {
DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
int trampoline_pos = kInvalidSlotPos;
@@ -1069,17 +975,14 @@ void Assembler::bind_to(Label* L, int pos) {
// Keep track of the last bound label so we don't eliminate any instructions
// before a bound label.
- if (pos > last_bound_pos_)
- last_bound_pos_ = pos;
+ if (pos > last_bound_pos_) last_bound_pos_ = pos;
}
-
void Assembler::bind(Label* L) {
DCHECK(!L->is_bound()); // Label can only be bound once.
bind_to(L, pc_offset());
}
-
void Assembler::next(Label* L, bool is_internal) {
DCHECK(L->is_linked());
int link = target_at(L->pos(), is_internal);
@@ -1091,26 +994,22 @@ void Assembler::next(Label* L, bool is_internal) {
}
}
-
bool Assembler::is_near(Label* L) {
DCHECK(L->is_bound());
return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
}
-
bool Assembler::is_near(Label* L, OffsetSize bits) {
if (L == nullptr || !L->is_bound()) return true;
return ((pc_offset() - L->pos()) <
(1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
}
-
bool Assembler::is_near_branch(Label* L) {
DCHECK(L->is_bound());
return kArchVariant == kMips64r6 ? is_near_r6(L) : is_near_pre_r6(L);
}
-
int Assembler::BranchOffset(Instr instr) {
// At pre-R6 and for other R6 branches the offset is 16 bits.
int bits = OffsetSize::kOffset16;
@@ -1137,7 +1036,6 @@ int Assembler::BranchOffset(Instr instr) {
return (1 << (bits + 2 - 1)) - 1;
}
-
// We have to use a temporary register for things that can be relocated even
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
@@ -1146,91 +1044,67 @@ bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
return !RelocInfo::IsNone(rmode);
}
-void Assembler::GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- Register rd,
- uint16_t sa,
+void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
+ Register rd, uint16_t sa,
SecondaryField func) {
DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (sa << kSaShift) | func;
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa << kSaShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- uint16_t msb,
- uint16_t lsb,
+void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt,
+ uint16_t msb, uint16_t lsb,
SecondaryField func) {
DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (msb << kRdShift) | (lsb << kSaShift) | func;
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (msb << kRdShift) | (lsb << kSaShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
+void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt,
+ FPURegister ft, FPURegister fs, FPURegister fd,
SecondaryField func) {
DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
- Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
- | (fd.code() << kFdShift) | func;
+ Instr instr = opcode | fmt | (ft.code() << kFtShift) |
+ (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- FPURegister fr,
- FPURegister ft,
- FPURegister fs,
- FPURegister fd,
+void Assembler::GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft,
+ FPURegister fs, FPURegister fd,
SecondaryField func) {
DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
- Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
- | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) |
+ (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPURegister fs,
- FPURegister fd,
+void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
+ FPURegister fs, FPURegister fd,
SecondaryField func) {
DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
- Instr instr = opcode | fmt | (rt.code() << kRtShift)
- | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ Instr instr = opcode | fmt | (rt.code() << kRtShift) |
+ (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
}
-
-void Assembler::GenInstrRegister(Opcode opcode,
- SecondaryField fmt,
- Register rt,
- FPUControlRegister fs,
- SecondaryField func) {
+void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt,
+ FPUControlRegister fs, SecondaryField func) {
DCHECK(fs.is_valid() && rt.is_valid());
Instr instr =
opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
}
-
// Instructions with immediate value.
// Registers are in the order of the instruction encoding, from left to right.
void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt,
int32_t j,
CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (j & kImm16Mask);
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (j & kImm16Mask);
emit(instr, is_compact_branch);
}
@@ -1253,17 +1127,15 @@ void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF,
emit(instr, is_compact_branch);
}
-
void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft,
int32_t j,
CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
- Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
- | (j & kImm16Mask);
+ Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) |
+ (j & kImm16Mask);
emit(instr, is_compact_branch);
}
-
void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
CompactBranchType is_compact_branch) {
DCHECK(rs.is_valid() && (is_int21(offset21)));
@@ -1271,7 +1143,6 @@ void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21,
emit(instr, is_compact_branch);
}
-
void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
uint32_t offset21) {
DCHECK(rs.is_valid() && (is_uint21(offset21)));
@@ -1279,7 +1150,6 @@ void Assembler::GenInstrImmediate(Opcode opcode, Register rs,
emit(instr);
}
-
void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
CompactBranchType is_compact_branch) {
DCHECK(is_int26(offset26));
@@ -1287,9 +1157,7 @@ void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26,
emit(instr, is_compact_branch);
}
-
-void Assembler::GenInstrJump(Opcode opcode,
- uint32_t address) {
+void Assembler::GenInstrJump(Opcode opcode, uint32_t address) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint26(address));
Instr instr = opcode | address;
@@ -1424,7 +1292,7 @@ int32_t Assembler::get_trampoline_entry(int32_t pos) {
int32_t trampoline_entry = kInvalidSlotPos;
if (!internal_trampoline_exception_) {
if (trampoline_.start() > pos) {
- trampoline_entry = trampoline_.take_slot();
+ trampoline_entry = trampoline_.take_slot();
}
if (kInvalidSlotPos == trampoline_entry) {
@@ -1434,7 +1302,6 @@ int32_t Assembler::get_trampoline_entry(int32_t pos) {
return trampoline_entry;
}
-
uint64_t Assembler::jump_address(Label* L) {
int64_t target_pos;
if (L->is_bound()) {
@@ -1454,7 +1321,6 @@ uint64_t Assembler::jump_address(Label* L) {
return imm;
}
-
uint64_t Assembler::jump_offset(Label* L) {
int64_t target_pos;
int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0;
@@ -1523,7 +1389,6 @@ int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
return offset;
}
-
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
@@ -1549,52 +1414,40 @@ void Assembler::label_at_put(Label* L, int at_offset) {
}
}
-
//------- Branch and jump instructions --------
-void Assembler::b(int16_t offset) {
- beq(zero_reg, zero_reg, offset);
-}
-
-
-void Assembler::bal(int16_t offset) {
- bgezal(zero_reg, offset);
-}
+void Assembler::b(int16_t offset) { beq(zero_reg, zero_reg, offset); }
+void Assembler::bal(int16_t offset) { bgezal(zero_reg, offset); }
void Assembler::bc(int32_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::balc(int32_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::beq(Register rs, Register rt, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BEQ, rs, rt, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bgez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZ, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bgezc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
@@ -1603,7 +1456,6 @@ void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgec(Register rs, Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
@@ -1612,7 +1464,6 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgezal(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
DCHECK(rs != ra);
@@ -1621,14 +1472,12 @@ void Assembler::bgezal(Register rs, int16_t offset) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bgtz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BGTZ, rs, zero_reg, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bgtzc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
@@ -1636,14 +1485,12 @@ void Assembler::bgtzc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::blez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::blezc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
@@ -1651,14 +1498,12 @@ void Assembler::blezc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bltzc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
@@ -1667,7 +1512,6 @@ void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bltc(Register rs, Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
@@ -1676,14 +1520,12 @@ void Assembler::bltc(Register rs, Register rt, int16_t offset) {
GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bltz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bltzal(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6 || rs == zero_reg);
DCHECK(rs != ra);
@@ -1692,14 +1534,12 @@ void Assembler::bltzal(Register rs, int16_t offset) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bne(Register rs, Register rt, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BNE, rs, rt, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bovc(Register rs, Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
if (rs.code() >= rt.code()) {
@@ -1709,7 +1549,6 @@ void Assembler::bovc(Register rs, Register rt, int16_t offset) {
}
}
-
void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
if (rs.code() >= rt.code()) {
@@ -1719,7 +1558,6 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
}
}
-
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
@@ -1728,7 +1566,6 @@ void Assembler::blezalc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
@@ -1736,7 +1573,6 @@ void Assembler::bgezalc(Register rt, int16_t offset) {
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK_NE(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
@@ -1746,7 +1582,6 @@ void Assembler::bgezall(Register rs, int16_t offset) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
@@ -1754,7 +1589,6 @@ void Assembler::bltzalc(Register rt, int16_t offset) {
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
@@ -1763,7 +1597,6 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
@@ -1772,7 +1605,6 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rt != zero_reg);
@@ -1781,7 +1613,6 @@ void Assembler::bnezalc(Register rt, int16_t offset) {
CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::beqc(Register rs, Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
@@ -1792,14 +1623,12 @@ void Assembler::beqc(Register rs, Register rt, int16_t offset) {
}
}
-
void Assembler::beqzc(Register rs, int32_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::bnec(Register rs, Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0);
@@ -1810,26 +1639,22 @@ void Assembler::bnec(Register rs, Register rt, int16_t offset) {
}
}
-
void Assembler::bnezc(Register rs, int32_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs != zero_reg);
GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH);
}
-
void Assembler::j(int64_t target) {
// Deprecated. Use PC-relative jumps instead.
UNREACHABLE();
}
-
void Assembler::j(Label* target) {
// Deprecated. Use PC-relative jumps instead.
UNREACHABLE();
}
-
void Assembler::jal(Label* target) {
// Deprecated. Use PC-relative jumps instead.
UNREACHABLE();
@@ -1840,7 +1665,6 @@ void Assembler::jal(int64_t target) {
UNREACHABLE();
}
-
void Assembler::jr(Register rs) {
if (kArchVariant != kMips64r6) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1851,7 +1675,6 @@ void Assembler::jr(Register rs) {
}
}
-
void Assembler::jalr(Register rs, Register rd) {
DCHECK(rs.code() != rd.code());
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -1859,19 +1682,16 @@ void Assembler::jalr(Register rs, Register rd) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::jic(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrImmediate(POP66, zero_reg, rt, offset);
}
-
void Assembler::jialc(Register rt, int16_t offset) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrImmediate(POP76, zero_reg, rt, offset);
}
-
// -------Data-processing-instructions---------
// Arithmetic.
@@ -1880,217 +1700,178 @@ void Assembler::addu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
}
-
void Assembler::addiu(Register rd, Register rs, int32_t j) {
GenInstrImmediate(ADDIU, rs, rd, j);
}
-
void Assembler::subu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
}
-
void Assembler::mul(Register rd, Register rs, Register rt) {
if (kArchVariant == kMips64r6) {
- GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
} else {
- GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
}
}
-
void Assembler::muh(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
}
-
void Assembler::mulu(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
}
-
void Assembler::muhu(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
}
-
void Assembler::dmul(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
}
-
void Assembler::dmuh(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
}
-
void Assembler::dmulu(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
}
-
void Assembler::dmuhu(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
}
-
void Assembler::mult(Register rs, Register rt) {
DCHECK_NE(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
}
-
void Assembler::multu(Register rs, Register rt) {
DCHECK_NE(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
}
-
void Assembler::daddiu(Register rd, Register rs, int32_t j) {
GenInstrImmediate(DADDIU, rs, rd, j);
}
-
void Assembler::div(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
}
-
void Assembler::div(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
}
-
void Assembler::mod(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
}
-
void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
}
-
void Assembler::divu(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
}
-
void Assembler::modu(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
}
-
void Assembler::daddu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
}
-
void Assembler::dsubu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
}
-
void Assembler::dmult(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
}
-
void Assembler::dmultu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
}
-
void Assembler::ddiv(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
}
-
void Assembler::ddiv(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
}
-
void Assembler::dmod(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
}
-
void Assembler::ddivu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
}
-
void Assembler::ddivu(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
}
-
void Assembler::dmodu(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
}
-
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
}
-
void Assembler::andi(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
GenInstrImmediate(ANDI, rs, rt, j);
}
-
void Assembler::or_(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
}
-
void Assembler::ori(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
GenInstrImmediate(ORI, rs, rt, j);
}
-
void Assembler::xor_(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
}
-
void Assembler::xori(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
GenInstrImmediate(XORI, rs, rt, j);
}
-
void Assembler::nor(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
}
-
// Shifts.
-void Assembler::sll(Register rd,
- Register rt,
- uint16_t sa,
+void Assembler::sll(Register rd, Register rt, uint16_t sa,
bool coming_from_nop) {
// Don't allow nop instructions in the form sll zero_reg, zero_reg to be
// generated using the sll instruction. They must be generated using
@@ -2099,76 +1880,64 @@ void Assembler::sll(Register rd,
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL);
}
-
void Assembler::sllv(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
}
-
void Assembler::srl(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL);
}
-
void Assembler::srlv(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
}
-
void Assembler::sra(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA);
}
-
void Assembler::srav(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
}
-
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
- Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
}
-
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
- Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
}
-
void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL);
}
-
void Assembler::dsllv(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
}
-
void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL);
}
-
void Assembler::dsrlv(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
}
-
void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
emit(instr);
}
@@ -2180,38 +1949,32 @@ void Assembler::drotr32(Register rd, Register rt, uint16_t sa) {
}
void Assembler::drotrv(Register rd, Register rt, Register rs) {
- DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
- | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
+ DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) |
+ (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
emit(instr);
}
-
void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA);
}
-
void Assembler::dsrav(Register rd, Register rt, Register rs) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
}
-
void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSLL32);
}
-
void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRL32);
}
-
void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, DSRA32);
}
-
void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
DCHECK_LE(sa, 3);
@@ -2221,7 +1984,6 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
emit(instr);
}
-
void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid());
DCHECK_LE(sa, 3);
@@ -2231,7 +1993,6 @@ void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
emit(instr);
}
-
// ------------Memory-instructions-------------
void Assembler::AdjustBaseAndOffset(MemOperand& src,
@@ -2366,68 +2127,56 @@ void Assembler::lb(Register rd, const MemOperand& rs) {
GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
}
-
void Assembler::lbu(Register rd, const MemOperand& rs) {
GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
}
-
void Assembler::lh(Register rd, const MemOperand& rs) {
GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
}
-
void Assembler::lhu(Register rd, const MemOperand& rs) {
GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
}
-
void Assembler::lw(Register rd, const MemOperand& rs) {
GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
}
-
void Assembler::lwu(Register rd, const MemOperand& rs) {
GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
}
-
void Assembler::lwl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
}
-
void Assembler::lwr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
-
void Assembler::sb(Register rd, const MemOperand& rs) {
GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
}
-
void Assembler::sh(Register rd, const MemOperand& rs) {
GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
}
-
void Assembler::sw(Register rd, const MemOperand& rs) {
GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
}
-
void Assembler::swl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
}
-
void Assembler::swr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK_EQ(kArchVariant, kMips64r2);
@@ -2481,7 +2230,6 @@ void Assembler::lui(Register rd, int32_t j) {
GenInstrImmediate(LUI, zero_reg, rd, j);
}
-
void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
@@ -2489,64 +2237,54 @@ void Assembler::aui(Register rt, Register rs, int32_t j) {
GenInstrImmediate(LUI, rs, rt, j);
}
-
void Assembler::daui(Register rt, Register rs, int32_t j) {
DCHECK(is_uint16(j));
DCHECK(rs != zero_reg);
GenInstrImmediate(DAUI, rs, rt, j);
}
-
void Assembler::dahi(Register rs, int32_t j) {
DCHECK(is_uint16(j));
GenInstrImmediate(REGIMM, rs, DAHI, j);
}
-
void Assembler::dati(Register rs, int32_t j) {
DCHECK(is_uint16(j));
GenInstrImmediate(REGIMM, rs, DATI, j);
}
-
void Assembler::ldl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
}
-
void Assembler::ldr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
}
-
void Assembler::sdl(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
}
-
void Assembler::sdr(Register rd, const MemOperand& rs) {
DCHECK(is_int16(rs.offset_));
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
}
-
void Assembler::ld(Register rd, const MemOperand& rs) {
GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
}
-
void Assembler::sd(Register rd, const MemOperand& rs) {
GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
}
-
// ---------PC-Relative instructions-----------
void Assembler::addiupc(Register rs, int32_t imm19) {
@@ -2556,7 +2294,6 @@ void Assembler::addiupc(Register rs, int32_t imm19) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
void Assembler::lwpc(Register rs, int32_t offset19) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid() && is_int19(offset19));
@@ -2564,7 +2301,6 @@ void Assembler::lwpc(Register rs, int32_t offset19) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
void Assembler::lwupc(Register rs, int32_t offset19) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid() && is_int19(offset19));
@@ -2572,7 +2308,6 @@ void Assembler::lwupc(Register rs, int32_t offset19) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
void Assembler::ldpc(Register rs, int32_t offset18) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid() && is_int18(offset18));
@@ -2580,7 +2315,6 @@ void Assembler::ldpc(Register rs, int32_t offset18) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
void Assembler::auipc(Register rs, int16_t imm16) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid());
@@ -2588,7 +2322,6 @@ void Assembler::auipc(Register rs, int16_t imm16) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
void Assembler::aluipc(Register rs, int16_t imm16) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(rs.is_valid());
@@ -2596,7 +2329,6 @@ void Assembler::aluipc(Register rs, int16_t imm16) {
GenInstrImmediate(PCREL, rs, imm21);
}
-
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -2605,17 +2337,13 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
// We need to invalidate breaks that could be stops as well because the
// simulator expects a char pointer after the stop instruction.
// See constants-mips.h for explanation.
- DCHECK((break_as_stop &&
- code <= kMaxStopCode &&
- code > kMaxWatchpointCode) ||
- (!break_as_stop &&
- (code > kMaxStopCode ||
- code <= kMaxWatchpointCode)));
+ DCHECK(
+ (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
Instr break_instr = SPECIAL | BREAK | (code << 6);
emit(break_instr);
}
-
void Assembler::stop(const char* msg, uint32_t code) {
DCHECK_GT(code, kMaxWatchpointCode);
DCHECK_LE(code, kMaxStopCode);
@@ -2626,23 +2354,20 @@ void Assembler::stop(const char* msg, uint32_t code) {
#endif
}
-
void Assembler::tge(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
- Instr instr = SPECIAL | TGE | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
+ Instr instr =
+ SPECIAL | TGE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
emit(instr);
}
-
void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
- Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
+ Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | rt.code() << kRtShift |
+ code << 6;
emit(instr);
}
-
void Assembler::tlt(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
Instr instr =
@@ -2650,16 +2375,13 @@ void Assembler::tlt(Register rs, Register rt, uint16_t code) {
emit(instr);
}
-
void Assembler::tltu(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
- Instr instr =
- SPECIAL | TLTU | rs.code() << kRsShift
- | rt.code() << kRtShift | code << 6;
+ Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | rt.code() << kRtShift |
+ code << 6;
emit(instr);
}
-
void Assembler::teq(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
Instr instr =
@@ -2667,7 +2389,6 @@ void Assembler::teq(Register rs, Register rt, uint16_t code) {
emit(instr);
}
-
void Assembler::tne(Register rs, Register rt, uint16_t code) {
DCHECK(is_uint10(code));
Instr instr =
@@ -2686,96 +2407,78 @@ void Assembler::mfhi(Register rd) {
GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
}
-
void Assembler::mflo(Register rd) {
GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
}
-
// Set on less than instructions.
void Assembler::slt(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
}
-
void Assembler::sltu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
}
-
void Assembler::slti(Register rt, Register rs, int32_t j) {
GenInstrImmediate(SLTI, rs, rt, j);
}
-
void Assembler::sltiu(Register rt, Register rs, int32_t j) {
GenInstrImmediate(SLTIU, rs, rt, j);
}
-
// Conditional move.
void Assembler::movz(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
}
-
void Assembler::movn(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
}
-
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
Register rt = Register::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
-
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
Register rt = Register::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
}
-
void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) {
min(S, fd, fs, ft);
}
-
void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) {
min(D, fd, fs, ft);
}
-
void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) {
max(S, fd, fs, ft);
}
-
void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) {
max(D, fd, fs, ft);
}
-
void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) {
mina(S, fd, fs, ft);
}
-
void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) {
mina(D, fd, fs, ft);
}
-
void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) {
maxa(S, fd, fs, ft);
}
-
void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) {
maxa(D, fd, fs, ft);
}
-
void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
@@ -2783,7 +2486,6 @@ void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
}
-
void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
@@ -2791,21 +2493,18 @@ void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
}
-
// GPR.
void Assembler::seleqz(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
}
-
// GPR.
void Assembler::selnez(Register rd, Register rs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
}
-
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
if (kArchVariant != kMips64r6) {
@@ -2816,7 +2515,6 @@ void Assembler::clz(Register rd, Register rs) {
}
}
-
void Assembler::dclz(Register rd, Register rs) {
if (kArchVariant != kMips64r6) {
// dclz instr requires same GPR number in 'rd' and 'rt' fields.
@@ -2826,7 +2524,6 @@ void Assembler::dclz(Register rd, Register rs) {
}
}
-
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// ins instr has 'rt' field as dest, and two uint5: msb, lsb.
@@ -2834,7 +2531,6 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
-
void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dins.
// dins instr has 'rt' field as dest, and two uint5: msb, lsb.
@@ -2863,7 +2559,6 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
-
void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dext.
// dext instr has 'rt' field as dest, and two uint5: msbd, lsb.
@@ -2885,27 +2580,23 @@ void Assembler::dextu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
}
-
void Assembler::bitswap(Register rd, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL);
}
-
void Assembler::dbitswap(Register rd, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, DBSHFL);
}
-
void Assembler::pref(int32_t hint, const MemOperand& rs) {
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
- Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
- | (rs.offset_);
+ Instr instr =
+ PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_);
emit(instr);
}
-
void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(is_uint3(bp));
@@ -2913,7 +2604,6 @@ void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) {
GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL);
}
-
void Assembler::dalign(Register rd, Register rs, Register rt, uint8_t bp) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK(is_uint3(bp));
@@ -2953,7 +2643,6 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
}
-
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
}
@@ -2966,47 +2655,38 @@ void Assembler::sdc1(FPURegister fs, const MemOperand& src) {
GenInstrImmediate(SDC1, src.rm(), fs, src.offset_);
}
-
void Assembler::mtc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MTC1, rt, fs, f0);
}
-
void Assembler::mthc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MTHC1, rt, fs, f0);
}
-
void Assembler::dmtc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, DMTC1, rt, fs, f0);
}
-
void Assembler::mfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
-
void Assembler::mfhc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFHC1, rt, fs, f0);
}
-
void Assembler::dmfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, DMFC1, rt, fs, f0);
}
-
void Assembler::ctc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CTC1, rt, fs);
}
-
void Assembler::cfc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CFC1, rt, fs);
}
-
void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
@@ -3015,17 +2695,14 @@ void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, SEL);
}
-
void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) {
sel(S, fd, fs, ft);
}
-
void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) {
sel(D, fd, fs, ft);
}
-
// FPR.
void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
@@ -3033,79 +2710,66 @@ void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C);
}
-
void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) {
seleqz(D, fd, fs, ft);
}
-
void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) {
seleqz(S, fd, fs, ft);
}
-
void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) {
selnez(D, fd, fs, ft);
}
-
void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) {
selnez(S, fd, fs, ft);
}
-
void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C);
}
-
void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C);
}
-
void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK_EQ(kArchVariant, kMips64r2);
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
-
void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK_EQ(kArchVariant, kMips64r2);
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
-
void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK_EQ(kArchVariant, kMips64r2);
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, S, ft, fs, fd, MOVF);
}
-
void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) {
DCHECK_EQ(kArchVariant, kMips64r2);
FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0);
GenInstrRegister(COP1, D, ft, fs, fd, MOVF);
}
-
void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C);
}
-
void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) {
DCHECK_EQ(kArchVariant, kMips64r2);
GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C);
}
-
// FPR.
void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
@@ -3114,34 +2778,28 @@ void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C);
}
-
// Arithmetic.
void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, ADD_D);
}
-
void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
}
-
void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, SUB_D);
}
-
void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
}
-
void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, MUL_D);
}
-
void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
}
@@ -3155,7 +2813,7 @@ void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
}
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
- FPURegister ft) {
+ FPURegister ft) {
// On Loongson 3A (MIPS64R2), MADD.D instruction is actually fused MADD.D and
// this causes failure in some of the tests. Since this optimization is rarely
// used, and not used at all on MIPS64R6, this isntruction is removed.
@@ -3198,201 +2856,162 @@ void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, S, ft, fs, fd, DIV_D);
}
-
void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
}
-
void Assembler::abs_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
}
-
void Assembler::abs_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
}
-
void Assembler::mov_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
}
-
void Assembler::mov_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
}
-
void Assembler::neg_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, NEG_D);
}
-
void Assembler::neg_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
}
-
void Assembler::sqrt_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, SQRT_D);
}
-
void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
}
-
void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S);
}
-
void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D);
}
-
void Assembler::recip_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D);
}
-
void Assembler::recip_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S);
}
-
// Conversions.
void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
}
-
void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
}
-
void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
}
-
void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
}
-
void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
}
-
void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
}
-
void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
}
-
void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
}
-
void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
}
-
void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
}
-
void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); }
-
void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); }
-
void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, fmt, f0, fs, fd, RINT);
}
-
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
-
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
-
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
-
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
-
void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
}
-
void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
}
-
void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
}
-
void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
}
-
void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
}
-
void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
}
-
void Assembler::class_s(FPURegister fd, FPURegister fs) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S);
}
-
void Assembler::class_d(FPURegister fd, FPURegister fs) {
DCHECK_EQ(kArchVariant, kMips64r6);
GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D);
}
-
void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
@@ -3400,7 +3019,6 @@ void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
}
-
void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
@@ -3408,50 +3026,42 @@ void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs,
GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
}
-
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
-
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
-
void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
}
-
void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
}
-
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
-
void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
}
-
// Conditions for >= MIPSr6.
-void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
- FPURegister fd, FPURegister fs, FPURegister ft) {
+void Assembler::cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
+ FPURegister fs, FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
- Instr instr = COP1 | fmt | ft.code() << kFtShift |
- fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
+ Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
+ fd.code() << kFdShift | (0 << 5) | cond;
emit(instr);
}
-
void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs,
FPURegister ft) {
cmp(cond, W, fd, fs, ft);
@@ -3462,7 +3072,6 @@ void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs,
cmp(cond, L, fd, fs, ft);
}
-
void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3471,7 +3080,6 @@ void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bc1nez(int16_t offset, FPURegister ft) {
DCHECK_EQ(kArchVariant, kMips64r6);
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -3480,41 +3088,35 @@ void Assembler::bc1nez(int16_t offset, FPURegister ft) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
// Conditions for < MIPSr6.
-void Assembler::c(FPUCondition cond, SecondaryField fmt,
- FPURegister fs, FPURegister ft, uint16_t cc) {
+void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs,
+ FPURegister ft, uint16_t cc) {
DCHECK_NE(kArchVariant, kMips64r6);
DCHECK(is_uint3(cc));
DCHECK(fmt == S || fmt == D);
DCHECK_EQ(fmt & ~(31 << kRsShift), 0);
- Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
- | cc << 8 | 3 << 4 | cond;
+ Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift |
+ cc << 8 | 3 << 4 | cond;
emit(instr);
}
-
void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft,
uint16_t cc) {
c(cond, S, fs, ft, cc);
}
-
void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft,
uint16_t cc) {
c(cond, D, fs, ft, cc);
}
-
-void Assembler::fcmp(FPURegister src1, const double src2,
- FPUCondition cond) {
+void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) {
DCHECK_EQ(src2, 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
c(cond, D, src1, f14, 0);
}
-
void Assembler::bc1f(int16_t offset, uint16_t cc) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
@@ -3523,7 +3125,6 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
BlockTrampolinePoolFor(1); // For associated delay slot.
}
-
void Assembler::bc1t(int16_t offset, uint16_t cc) {
BlockTrampolinePoolScope block_trampoline_pool(this);
DCHECK(is_uint3(cc));
@@ -3581,7 +3182,7 @@ MSA_BRANCH_LIST(MSA_BRANCH)
MSA_LD_ST_LIST(MSA_LD_ST)
#undef MSA_LD_ST
-#undef MSA_BRANCH_LIST
+#undef MSA_LD_ST_LIST
#define MSA_I10_LIST(V) \
V(ldi_b, I5_DF_b) \
@@ -4114,7 +3715,6 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
}
}
-
void Assembler::GrowBuffer() {
// Compute new buffer size.
int old_size = buffer_->size();
@@ -4158,25 +3758,21 @@ void Assembler::GrowBuffer() {
DCHECK(!overflow());
}
-
void Assembler::db(uint8_t data) {
CheckForEmitInForbiddenSlot();
EmitHelper(data);
}
-
void Assembler::dd(uint32_t data) {
CheckForEmitInForbiddenSlot();
EmitHelper(data);
}
-
void Assembler::dq(uint64_t data) {
CheckForEmitInForbiddenSlot();
EmitHelper(data);
}
-
void Assembler::dd(Label* label) {
uint64_t data;
CheckForEmitInForbiddenSlot();
@@ -4191,7 +3787,6 @@ void Assembler::dd(Label* label) {
EmitHelper(data);
}
-
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
@@ -4200,13 +3795,11 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
reloc_info_writer.Write(&rinfo);
}
-
void Assembler::BlockTrampolinePoolFor(int instructions) {
CheckTrampolinePoolQuick(instructions);
BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
}
-
void Assembler::CheckTrampolinePool() {
// Some small sequences of instructions must not be broken up by the
// insertion of a trampoline pool; such sequences are protected by setting
@@ -4229,7 +3822,8 @@ void Assembler::CheckTrampolinePool() {
DCHECK_GE(unbound_labels_count_, 0);
if (unbound_labels_count_ > 0) {
// First we emit jump (2 instructions), then we emit trampoline pool.
- { BlockTrampolinePoolScope block_trampoline_pool(this);
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label after_pool;
if (kArchVariant == kMips64r6) {
bc(&after_pool);
@@ -4270,13 +3864,12 @@ void Assembler::CheckTrampolinePool() {
} else {
// Number of branches to unbound label at this point is zero, so we can
// move next buffer check to maximum.
- next_buffer_check_ = pc_offset() +
- kMaxBranchOffset - kTrampolineSlotsSize * 16;
+ next_buffer_check_ =
+ pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16;
}
return;
}
-
Address Assembler::target_address_at(Address pc) {
Instr instr0 = instr_at(pc);
Instr instr1 = instr_at(pc + 1 * kInstrSize);
@@ -4287,10 +3880,10 @@ Address Assembler::target_address_at(Address pc) {
if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
(GetOpcodeField(instr3) == ORI)) {
// Assemble the 48 bit value.
- int64_t addr = static_cast<int64_t>(
- ((uint64_t)(GetImmediate16(instr0)) << 32) |
- ((uint64_t)(GetImmediate16(instr1)) << 16) |
- ((uint64_t)(GetImmediate16(instr3))));
+ int64_t addr =
+ static_cast<int64_t>(((uint64_t)(GetImmediate16(instr0)) << 32) |
+ ((uint64_t)(GetImmediate16(instr1)) << 16) |
+ ((uint64_t)(GetImmediate16(instr3))));
// Sign extend to get canonical address.
addr = (addr << 16) >> 16;
@@ -4300,7 +3893,6 @@ Address Assembler::target_address_at(Address pc) {
UNREACHABLE();
}
-
// On Mips64, a target address is stored in a 4-instruction sequence:
// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/codegen/mips64/assembler-mips64.h
index 940f588eba..c7c027eef7 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/assembler-mips64.h
@@ -32,19 +32,19 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-#ifndef V8_MIPS64_ASSEMBLER_MIPS64_H_
-#define V8_MIPS64_ASSEMBLER_MIPS64_H_
+#ifndef V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_
+#define V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_
#include <stdio.h>
#include <set>
-#include "src/assembler.h"
-#include "src/contexts.h"
-#include "src/external-reference.h"
-#include "src/label.h"
-#include "src/mips64/constants-mips64.h"
-#include "src/mips64/register-mips64.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/label.h"
+#include "src/codegen/mips64/constants-mips64.h"
+#include "src/codegen/mips64/register-mips64.h"
+#include "src/objects/contexts.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -96,7 +96,7 @@ class Operand {
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
- rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
@@ -119,25 +119,19 @@ class Operand {
friend class MacroAssembler;
};
-
// On MIPS we have only one addressing mode with base_reg + offset.
// Class MemOperand represents a memory operand in load and store instructions.
class MemOperand : public Operand {
public:
// Immediate value attached to offset.
- enum OffsetAddend {
- offset_minus_one = -1,
- offset_zero = 0
- };
+ enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 };
explicit MemOperand(Register rn, int32_t offset = 0);
explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
OffsetAddend offset_addend = offset_zero);
int32_t offset() const { return offset_; }
- bool OffsetIsInt16Encodable() const {
- return is_int16(offset_);
- }
+ bool OffsetIsInt16Encodable() const { return is_int16(offset_); }
private:
int32_t offset_;
@@ -157,7 +151,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit Assembler(const AssemblerOptions&,
std::unique_ptr<AssemblerBuffer> = {});
- virtual ~Assembler() { }
+ virtual ~Assembler() {}
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@@ -261,10 +255,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, uint64_t target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
static void JumpLabelToJumpRegister(Address pc);
// This sets the branch destination (which gets loaded at the call address).
@@ -311,14 +301,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kInstructionsFor32BitConstant = 2;
static constexpr int kInstructionsFor64BitConstant = 4;
- // Distance between the instruction referring to the address of the call
- // target and the return address.
-#ifdef _MIPS_ARCH_MIPS64R6
- static constexpr int kCallTargetAddressOffset = 5 * kInstrSize;
-#else
- static constexpr int kCallTargetAddressOffset = 6 * kInstrSize;
-#endif
-
// Difference between address of current opcode and value read from pc
// register.
static constexpr int kPcLoadDelta = 4;
@@ -370,7 +352,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
sll(zero_reg, nop_rt_reg, type, true);
}
-
// --------Branch-and-jump-instructions----------
// We don't use likely variant of instructions.
void b(int16_t offset);
@@ -612,12 +593,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void auipc(Register rs, int16_t imm16);
void aluipc(Register rs, int16_t imm16);
-
// ----------------Prefetch--------------------
void pref(int32_t hint, const MemOperand& rs);
-
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -758,7 +737,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void rint_d(FPURegister fd, FPURegister fs);
void rint(SecondaryField fmt, FPURegister fd, FPURegister fs);
-
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
void trunc_l_s(FPURegister fd, FPURegister fs);
@@ -795,8 +773,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvt_d_s(FPURegister fd, FPURegister fs);
// Conditions and branches for MIPSr6.
- void cmp(FPUCondition cond, SecondaryField fmt,
- FPURegister fd, FPURegister ft, FPURegister fs);
+ void cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd,
+ FPURegister ft, FPURegister fs);
void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
@@ -810,8 +788,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
// Conditions and branches for non MIPSr6.
- void c(FPUCondition cond, SecondaryField fmt,
- FPURegister ft, FPURegister fs, uint16_t cc = 0);
+ void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs,
+ uint16_t cc = 0);
void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
@@ -1431,9 +1409,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockTrampolinePool();
}
- ~BlockTrampolinePoolScope() {
- assem_->EndBlockTrampolinePool();
- }
+ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
private:
Assembler* assem_;
@@ -1450,9 +1426,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
assem_->StartBlockGrowBuffer();
}
- ~BlockGrowBufferScope() {
- assem_->EndBlockGrowBuffer();
- }
+ ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); }
private:
Assembler* assem_;
@@ -1516,7 +1490,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsBeqc(Instr instr);
static bool IsBnec(Instr instr);
-
static bool IsJump(Instr instr);
static bool IsJ(Instr instr);
static bool IsLui(Instr instr);
@@ -1614,9 +1587,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
no_trampoline_pool_before_ = pc_offset;
}
- void StartBlockTrampolinePool() {
- trampoline_pool_blocked_nesting_++;
- }
+ void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
void EndBlockTrampolinePool() {
trampoline_pool_blocked_nesting_--;
@@ -1629,13 +1600,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return trampoline_pool_blocked_nesting_ > 0;
}
- bool has_exception() const {
- return internal_trampoline_exception_;
- }
+ bool has_exception() const { return internal_trampoline_exception_; }
- bool is_trampoline_emitted() const {
- return trampoline_emitted_;
- }
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
// Temporarily block automatic assembly buffer growth.
void StartBlockGrowBuffer() {
@@ -1648,9 +1615,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
block_buffer_growth_ = false;
}
- bool is_buffer_growth_blocked() const {
- return block_buffer_growth_;
- }
+ bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
void EmitForbiddenSlotInstruction() {
if (IsPrevInstrCompactBranch()) {
@@ -1731,12 +1696,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrRegister(Opcode opcode, Register rs, Register rt, Register rd,
uint16_t sa = 0, SecondaryField func = nullptrSF);
- void GenInstrRegister(Opcode opcode,
- Register rs,
- Register rt,
- uint16_t msb,
- uint16_t lsb,
- SecondaryField func);
+ void GenInstrRegister(Opcode opcode, Register rs, Register rt, uint16_t msb,
+ uint16_t lsb, SecondaryField func);
void GenInstrRegister(Opcode opcode, SecondaryField fmt, FPURegister ft,
FPURegister fs, FPURegister fd,
@@ -1772,8 +1733,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Opcode opcode, int32_t offset26,
CompactBranchType is_compact_branch = CompactBranchType::NO);
- void GenInstrJump(Opcode opcode,
- uint32_t address);
+ void GenInstrJump(Opcode opcode, uint32_t address);
// MSA
void GenInstrMsaI8(SecondaryField operation, uint32_t imm8, MSARegister ws,
@@ -1871,12 +1831,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
free_slot_count_ = slot_count;
end_ = start + slot_count * kTrampolineSlotsSize;
}
- int start() {
- return start_;
- }
- int end() {
- return end_;
- }
+ int start() { return start_; }
+ int end() { return end_; }
int take_slot() {
int trampoline_slot = kInvalidSlotPos;
if (free_slot_count_ <= 0) {
@@ -1959,4 +1915,4 @@ class UseScratchRegisterScope {
} // namespace internal
} // namespace v8
-#endif // V8_MIPS64_ASSEMBLER_MIPS64_H_
+#endif // V8_CODEGEN_MIPS64_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/mips64/constants-mips64.cc b/deps/v8/src/codegen/mips64/constants-mips64.cc
index c087753aee..e4ee3c182a 100644
--- a/deps/v8/src/mips64/constants-mips64.cc
+++ b/deps/v8/src/codegen/mips64/constants-mips64.cc
@@ -4,35 +4,21 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/mips64/constants-mips64.h"
+#include "src/codegen/mips64/constants-mips64.h"
namespace v8 {
namespace internal {
-
// -----------------------------------------------------------------------------
// Registers.
-
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
- "zero_reg",
- "at",
- "v0", "v1",
- "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
- "t0", "t1", "t2", "t3",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9",
- "k0", "k1",
- "gp",
- "sp",
- "fp",
- "ra",
- "LO", "HI",
- "pc"
-};
-
+ "zero_reg", "at", "v0", "v1", "a0", "a1", "a2", "a3", "a4",
+ "a5", "a6", "a7", "t0", "t1", "t2", "t3", "s0", "s1",
+ "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0",
+ "k1", "gp", "sp", "fp", "ra", "LO", "HI", "pc"};
// List of alias names which can be used when referring to MIPS registers.
const Registers::RegisterAlias Registers::aliases_[] = {
@@ -52,7 +38,6 @@ const char* Registers::Name(int reg) {
return result;
}
-
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
@@ -74,13 +59,10 @@ int Registers::Number(const char* name) {
return kInvalidRegister;
}
-
const char* FPURegisters::names_[kNumFPURegisters] = {
- "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
- "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
- "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
-};
-
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
+ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
// List of alias names which can be used when referring to MIPS registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
@@ -96,7 +78,6 @@ const char* FPURegisters::Name(int creg) {
return result;
}
-
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumFPURegisters; i++) {
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/codegen/mips64/constants-mips64.h
index 66e0c8470e..751fa3c35e 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/codegen/mips64/constants-mips64.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS64_CONSTANTS_MIPS64_H_
-#define V8_MIPS64_CONSTANTS_MIPS64_H_
+#ifndef V8_CODEGEN_MIPS64_CONSTANTS_MIPS64_H_
+#define V8_CODEGEN_MIPS64_CONSTANTS_MIPS64_H_
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
// UNIMPLEMENTED_ macro for MIPS.
#ifdef DEBUG
-#define UNIMPLEMENTED_MIPS() \
- v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+#define UNIMPLEMENTED_MIPS() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
#else
#define UNIMPLEMENTED_MIPS()
@@ -20,38 +20,32 @@
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
-enum ArchVariants {
- kMips64r2,
- kMips64r6
-};
-
+enum ArchVariants { kMips64r2, kMips64r6 };
#ifdef _MIPS_ARCH_MIPS64R2
- static const ArchVariants kArchVariant = kMips64r2;
-#elif _MIPS_ARCH_MIPS64R6
- static const ArchVariants kArchVariant = kMips64r6;
+static const ArchVariants kArchVariant = kMips64r2;
+#elif _MIPS_ARCH_MIPS64R6
+static const ArchVariants kArchVariant = kMips64r6;
#else
- static const ArchVariants kArchVariant = kMips64r2;
+static const ArchVariants kArchVariant = kMips64r2;
#endif
-
- enum Endianness { kLittle, kBig };
+enum Endianness { kLittle, kBig };
#if defined(V8_TARGET_LITTLE_ENDIAN)
- static const Endianness kArchEndian = kLittle;
+static const Endianness kArchEndian = kLittle;
#elif defined(V8_TARGET_BIG_ENDIAN)
- static const Endianness kArchEndian = kBig;
+static const Endianness kArchEndian = kBig;
#else
#error Unknown endianness
#endif
-
// TODO(plind): consider renaming these ...
-#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+#if defined(__mips_hard_float) && __mips_hard_float != 0
// Use floating-point coprocessor instructions. This flag is raised when
// -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false;
-#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+#elif defined(__mips_soft_float) && __mips_soft_float != 0
// This flag is raised when -msoft-float is passed to the compiler.
// Although FPU is a base requirement for v8, soft-float ABI is used
// on soft-float systems with FPU kernel emulation.
@@ -166,11 +160,8 @@ const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit;
const uint32_t kFCSRFlagMask =
- kFCSRInexactFlagMask |
- kFCSRUnderflowFlagMask |
- kFCSROverflowFlagMask |
- kFCSRDivideByZeroFlagMask |
- kFCSRInvalidOpFlagMask;
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
@@ -253,7 +244,7 @@ class MSARegisters {
// Instructions encoding constants.
// On MIPS all instructions are 32 bits.
-typedef int32_t Instr;
+using Instr = int32_t;
// Special Software Interrupt codes when used in the presence of the MIPS
// simulator.
@@ -273,22 +264,21 @@ const uint32_t kMaxWatchpointCode = 31;
const uint32_t kMaxStopCode = 127;
STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
-
// ----- Fields offset and length.
-const int kOpcodeShift = 26;
-const int kOpcodeBits = 6;
-const int kRsShift = 21;
-const int kRsBits = 5;
-const int kRtShift = 16;
-const int kRtBits = 5;
-const int kRdShift = 11;
-const int kRdBits = 5;
-const int kSaShift = 6;
-const int kSaBits = 5;
+const int kOpcodeShift = 26;
+const int kOpcodeBits = 6;
+const int kRsShift = 21;
+const int kRsBits = 5;
+const int kRtShift = 16;
+const int kRtBits = 5;
+const int kRdShift = 11;
+const int kRdBits = 5;
+const int kSaShift = 6;
+const int kSaBits = 5;
const int kLsaSaBits = 2;
const int kFunctionShift = 0;
-const int kFunctionBits = 6;
-const int kLuiShift = 16;
+const int kFunctionBits = 6;
+const int kLuiShift = 16;
const int kBp2Shift = 6;
const int kBp2Bits = 2;
const int kBp3Shift = 6;
@@ -327,20 +317,20 @@ const int kMsaImmMI10Bits = 10;
// and are therefore shifted by 2.
const int kImmFieldShift = 2;
-const int kFrBits = 5;
-const int kFrShift = 21;
-const int kFsShift = 11;
-const int kFsBits = 5;
-const int kFtShift = 16;
-const int kFtBits = 5;
-const int kFdShift = 6;
-const int kFdBits = 5;
-const int kFCccShift = 8;
-const int kFCccBits = 3;
-const int kFBccShift = 18;
-const int kFBccBits = 3;
-const int kFBtrueShift = 16;
-const int kFBtrueBits = 1;
+const int kFrBits = 5;
+const int kFrShift = 21;
+const int kFsShift = 11;
+const int kFsBits = 5;
+const int kFtShift = 16;
+const int kFtBits = 5;
+const int kFdShift = 6;
+const int kFdBits = 5;
+const int kFCccShift = 8;
+const int kFCccBits = 3;
+const int kFBccShift = 18;
+const int kFBccBits = 3;
+const int kFBtrueShift = 16;
+const int kFBtrueBits = 1;
const int kWtBits = 5;
const int kWtShift = 16;
const int kWsBits = 5;
@@ -1080,7 +1070,6 @@ enum Condition {
cc_default = kNoCondition
};
-
// Returns the equivalent of !cc.
// Negation of the default kNoCondition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
@@ -1090,7 +1079,6 @@ inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
-
inline Condition NegateFpuCondition(Condition cc) {
DCHECK(cc != cc_always);
switch (cc) {
@@ -1154,7 +1142,6 @@ enum MSABranchDF {
MSA_BRANCH_V
};
-
// ----- Coprocessor conditions.
enum FPUCondition {
kNoFPUCondition = -1,
@@ -1176,7 +1163,6 @@ enum FPUCondition {
NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only.
};
-
// FPU rounding modes.
enum FPURoundingMode {
RN = 0 << 0, // Round to Nearest.
@@ -1211,15 +1197,9 @@ enum class MaxMinKind : int { kMin = 0, kMax = 1 };
// Branch hints are not used on the MIPS. They are defined so that they can
// appear in shared function signatures, but will be ignored in MIPS
// implementations.
-enum Hint {
- no_hint = 0
-};
-
-
-inline Hint NegateHint(Hint hint) {
- return no_hint;
-}
+enum Hint { no_hint = 0 };
+inline Hint NegateHint(Hint hint) { return no_hint; }
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -1279,9 +1259,7 @@ class InstructionBase {
}
// Read one particular bit out of the instruction bits.
- inline int Bit(int nr) const {
- return (InstructionBits() >> nr) & 1;
- }
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
// Read a bit field out of the instruction bits.
inline int Bits(int hi, int lo) const {
@@ -1344,7 +1322,6 @@ class InstructionBase {
FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC);
-
// Accessors for the different named fields used in the MIPS encoding.
inline Opcode OpcodeValue() const {
return static_cast<Opcode>(
@@ -1735,7 +1712,6 @@ class Instruction : public InstructionGetters<InstructionBase> {
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
-
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
@@ -2007,4 +1983,4 @@ bool InstructionGetters<T>::IsForbiddenAfterBranchInstr(Instr instr) {
} // namespace internal
} // namespace v8
-#endif // V8_MIPS64_CONSTANTS_MIPS64_H_
+#endif // V8_CODEGEN_MIPS64_CONSTANTS_MIPS64_H_
diff --git a/deps/v8/src/mips64/cpu-mips64.cc b/deps/v8/src/codegen/mips64/cpu-mips64.cc
index db2002d5ed..0f3713b937 100644
--- a/deps/v8/src/mips64/cpu-mips64.cc
+++ b/deps/v8/src/codegen/mips64/cpu-mips64.cc
@@ -13,12 +13,11 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/cpu-features.h"
+#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
-
void CpuFeatures::FlushICache(void* start, size_t size) {
#if !defined(USE_SIMULATOR)
// Nothing to do, flushing no instructions.
@@ -28,10 +27,10 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
#if defined(ANDROID) && !defined(__LP64__)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
- char *end = reinterpret_cast<char *>(start) + size;
- cacheflush(
- reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
-#else // ANDROID
+ char* end = reinterpret_cast<char*>(start) + size;
+ cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
+ 0);
+#else // ANDROID
long res; // NOLINT(runtime/int)
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
index 851d1d32cc..e32d6c6d6e 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/codegen/mips64/interface-descriptors-mips64.cc
@@ -4,9 +4,9 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
@@ -101,7 +101,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
@@ -251,10 +250,10 @@ void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
- a1, // kApiFunctionAddress
- a2, // kArgc
- a3, // kCallData
- a0, // kHolder
+ a1, // kApiFunctionAddress
+ a2, // kArgc
+ a3, // kCallData
+ a0, // kHolder
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index f358dd811a..65c0b592eb 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -6,29 +6,29 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
-#include "src/bootstrapper.h"
-#include "src/callable.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
-#include "src/external-reference-table.h"
-#include "src/frames-inl.h"
+#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
-#include "src/macro-assembler.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
-#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
-#include "src/mips64/macro-assembler-mips64.h"
+#include "src/codegen/mips64/macro-assembler-mips64.h"
#endif
namespace v8 {
@@ -131,7 +131,6 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
}
-
void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
@@ -153,34 +152,12 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
Daddu(fp, sp, Operand(offset));
}
-// Push and pop all registers that can hold pointers.
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of kNumSafepointRegisters values on the
- // stack, so adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK_GE(num_unsaved, 0);
- if (num_unsaved > 0) {
- Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
- }
- MultiPush(kSafepointSavedRegisters);
-}
-
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- MultiPop(kSafepointSavedRegisters);
- if (num_unsaved > 0) {
- Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
- }
-}
-
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
return kSafepointRegisterStackIndexMap[reg_code];
}
-
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
@@ -370,9 +347,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(object,
value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- eq,
- &done);
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
// Record the actual write.
if (ra_status == kRAHasNotBeenSaved) {
@@ -1096,9 +1071,8 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
}
}
-
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
- pref(hint, rs);
+ pref(hint, rs);
}
void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
@@ -1353,7 +1327,6 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
}
}
-
// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
// bits,
// second word in high bits.
@@ -1381,7 +1354,6 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
}
}
-
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
Register scratch) {
@@ -1947,7 +1919,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
ori(rd, rd, (immediate >> 16) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, immediate & kImm16Mask);
- } else if (mode == ADDRESS_LOAD) {
+ } else if (mode == ADDRESS_LOAD) {
// We always need the same number of instructions as we may need to patch
// this code to load another value which may need all 4 instructions.
lui(rd, (j.immediate() >> 32) & kImm16Mask);
@@ -1988,7 +1960,6 @@ void TurboAssembler::MultiPush(RegList regs) {
}
}
-
void TurboAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
@@ -2001,7 +1972,6 @@ void TurboAssembler::MultiPop(RegList regs) {
daddiu(sp, sp, stack_offset);
}
-
void TurboAssembler::MultiPushFPU(RegList regs) {
int16_t num_to_push = base::bits::CountPopulation(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -2015,7 +1985,6 @@ void TurboAssembler::MultiPushFPU(RegList regs) {
}
}
-
void TurboAssembler::MultiPopFPU(RegList regs) {
int16_t stack_offset = 0;
@@ -2028,7 +1997,6 @@ void TurboAssembler::MultiPopFPU(RegList regs) {
daddiu(sp, sp, stack_offset);
}
-
void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK_LT(pos, 32);
@@ -2263,29 +2231,23 @@ void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
bind(&conversion_done);
}
-
void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
round_l_d(fd, fs);
}
-
void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
floor_l_d(fd, fs);
}
-
void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
ceil_l_d(fd, fs);
}
-
void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
trunc_l_d(fd, fs);
}
-
-void MacroAssembler::Trunc_l_ud(FPURegister fd,
- FPURegister fs,
+void MacroAssembler::Trunc_l_ud(FPURegister fd, FPURegister fs,
FPURegister scratch) {
BlockTrampolinePoolScope block_trampoline_pool(this);
// Load to GPR.
@@ -2329,22 +2291,18 @@ void TurboAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
dmtc1(t8, fd);
}
-
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
trunc_w_d(fd, fs);
}
-
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
round_w_d(fd, fs);
}
-
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
floor_w_d(fd, fs);
}
-
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
ceil_w_d(fd, fs);
}
@@ -2659,7 +2617,7 @@ void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
- FPURegister ft, FPURegister scratch) {
+ FPURegister ft, FPURegister scratch) {
DCHECK(fr != scratch && fs != scratch && ft != scratch);
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
@@ -3161,13 +3119,10 @@ void TurboAssembler::Dpopcnt(Register rd, Register rs) {
dsrl32(rd, rd, shift);
}
-void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
- DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
- Register except_flag,
- CheckForInexactConversion check_inexact) {
+void MacroAssembler::EmitFPUTruncate(
+ FPURoundingMode rounding_mode, Register result, DoubleRegister double_input,
+ Register scratch, DoubleRegister double_scratch, Register except_flag,
+ CheckForInexactConversion check_inexact) {
DCHECK(result != scratch);
DCHECK(double_input != double_scratch);
DCHECK(except_flag != scratch);
@@ -3244,8 +3199,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
cfc1(scratch, FCSR);
ctc1(scratch2, FCSR);
// Check for overflow and NaNs.
- And(scratch,
- scratch,
+ And(scratch, scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
Branch(done, eq, scratch, Operand(zero_reg));
@@ -3358,8 +3312,7 @@ void TurboAssembler::BranchShortHelper(int16_t offset, Label* L,
b(offset);
// Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+ if (bdslot == PROTECT) nop();
}
void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
@@ -3386,7 +3339,6 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
}
}
-
int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
if (L) {
offset = branch_offset_helper(L, bits) >> 2;
@@ -3769,8 +3721,7 @@ bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
}
// Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+ if (bdslot == PROTECT) nop();
return true;
}
@@ -3866,8 +3817,7 @@ void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
bal(offset);
// Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+ if (bdslot == PROTECT) nop();
}
void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
@@ -3999,7 +3949,6 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
}
break;
-
// Unsigned comparison.
case Ugreater:
// rs > r2
@@ -4123,8 +4072,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
}
// Emit a nop in the branch delay slot if required.
- if (bdslot == PROTECT)
- nop();
+ if (bdslot == PROTECT) nop();
return true;
}
@@ -4461,7 +4409,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
Label skip;
if (cond != al) {
- Branch(&skip, NegateCondition(cond), reg, op);
+ Branch(&skip, NegateCondition(cond), reg, op);
}
Daddu(sp, sp, Operand(count * kPointerSize));
@@ -4471,11 +4419,7 @@ void TurboAssembler::Drop(int count, Condition cond, Register reg,
}
}
-
-
-void MacroAssembler::Swap(Register reg1,
- Register reg2,
- Register scratch) {
+void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
if (scratch == no_reg) {
Xor(reg1, reg1, Operand(reg2));
Xor(reg2, reg2, Operand(reg1));
@@ -4531,12 +4475,12 @@ void MacroAssembler::PushStackHandler() {
Sd(sp, MemOperand(t2));
}
-
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(a1);
- Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
- kPointerSize)));
+ Daddu(sp, sp,
+ Operand(
+ static_cast<int64_t>(StackHandlerConstants::kSize - kPointerSize)));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
li(scratch,
@@ -4620,7 +4564,6 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
}
}
-
// -----------------------------------------------------------------------------
// JavaScript invokes.
@@ -4870,18 +4813,15 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(a1, no_reg, expected, actual, flag);
}
-
// ---------------------------------------------------------------------------
// Support functions.
-void MacroAssembler::GetObjectType(Register object,
- Register map,
+void MacroAssembler::GetObjectType(Register object, Register map,
Register type_reg) {
Ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
-
// -----------------------------------------------------------------------------
// Runtime calls.
@@ -5052,7 +4992,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -5067,14 +5006,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-
// -----------------------------------------------------------------------------
// Debugging.
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
- if (emit_debug_code())
- Check(cc, reason, rs, rt);
+ if (emit_debug_code()) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@@ -5142,7 +5079,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
Ld(dst, ContextMemOperand(dst, index));
}
-
void TurboAssembler::StubPrologue(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5261,8 +5197,9 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
- Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
- kNumOfSavedRegisters * kDoubleSize));
+ Dsubu(t8, fp,
+ Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
+ kNumOfSavedRegisters * kDoubleSize));
for (int i = 0; i < kNumOfSavedRegisters; i++) {
FPURegister reg = FPURegister::from_code(2 * i);
Ldc1(reg, MemOperand(t8, i * kDoubleSize));
@@ -5312,7 +5249,7 @@ int TurboAssembler::ActivationFrameAlignment() {
// Note: This will break if we ever start generating snapshots on one Mips
// platform for another Mips platform with a different alignment.
return base::OS::ActivationFrameAlignment();
-#else // V8_HOST_ARCH_MIPS
+#else // V8_HOST_ARCH_MIPS
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
// if the target platform will need alignment, so this is controlled from a
@@ -5321,26 +5258,25 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif // V8_HOST_ARCH_MIPS
}
-
void MacroAssembler::AssertStackIsAligned() {
if (emit_debug_code()) {
- const int frame_alignment = ActivationFrameAlignment();
- const int frame_alignment_mask = frame_alignment - 1;
-
- if (frame_alignment > kPointerSize) {
- Label alignment_as_expected;
- DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
- {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- andi(scratch, sp, frame_alignment_mask);
- Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
- }
- // Don't use Check here, as it will call Runtime_Abort re-entering here.
- stop("Unexpected stack alignment");
- bind(&alignment_as_expected);
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
+
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
}
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ stop("Unexpected stack alignment");
+ bind(&alignment_as_expected);
}
+ }
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
@@ -5353,16 +5289,6 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
-void MacroAssembler::UntagAndJumpIfSmi(Register dst,
- Register src,
- Label* smi_case) {
- // DCHECK(dst!=src);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- JumpIfSmi(src, smi_case, scratch, USE_DELAY_SLOT);
- SmiUntag(dst, src);
-}
-
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Register scratch, BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
@@ -5370,33 +5296,13 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
-void MacroAssembler::JumpIfNotSmi(Register value,
- Label* not_smi_label,
- Register scratch,
- BranchDelaySlot bd) {
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
+ Register scratch, BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
-
-void MacroAssembler::JumpIfEitherSmi(Register reg1,
- Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- // TODO(plind): Find some better to fix this assert issue.
-#if defined(__APPLE__)
- DCHECK_EQ(1, kSmiTagMask);
-#else
- DCHECK_EQ((int64_t)1, kSmiTagMask);
-#endif
- // Both Smi tags must be 1 (not Smi).
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- and_(scratch, reg1, reg2);
- JumpIfSmi(scratch, on_either_smi);
-}
-
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -5407,7 +5313,6 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -5446,7 +5351,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
-
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -5500,7 +5404,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
-
void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
FPURegister src2, Label* out_of_line) {
if (src1 == src2) {
@@ -5727,8 +5630,8 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
// Both ABIs: Remaining arguments are pushed on the stack, above (higher
// address than) the (O32) argument slots. (arg slot calculation handled by
// CalculateStackPassedWords()).
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (frame_alignment > kPointerSize) {
// Make stack end at alignment and make room for num_arguments - 4 words
// and the original value of sp.
@@ -5843,8 +5746,8 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
- int stack_passed_arguments = CalculateStackPassedWords(
- num_reg_arguments, num_double_arguments);
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -5853,7 +5756,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
-
#undef BRANCH_ARGS_CHECK
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
@@ -5864,12 +5766,8 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
-
-Register GetRegisterThatIsNotOneOf(Register reg1,
- Register reg2,
- Register reg3,
- Register reg4,
- Register reg5,
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5,
Register reg6) {
RegList regs = 0;
if (reg1.is_valid()) regs |= reg1.bit();
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index a6b5283c58..d0f9b7f5bc 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -6,12 +6,12 @@
#error This header must be included via macro-assembler.h
#endif
-#ifndef V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
-#define V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
+#ifndef V8_CODEGEN_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
+#define V8_CODEGEN_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
-#include "src/assembler.h"
-#include "src/globals.h"
-#include "src/mips64/assembler-mips64.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/mips64/assembler-mips64.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -31,18 +31,11 @@ enum class AbortReason : uint8_t;
// trying to update gp register for position-independent-code. Whenever
// MIPS generated code calls C code, it must be via t9 register.
-
// Flags used for LeaveExitFrame function.
-enum LeaveExitFrameMode {
- EMIT_RETURN = true,
- NO_EMIT_RETURN = false
-};
+enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
-enum BranchDelaySlot {
- USE_DELAY_SLOT,
- PROTECT
-};
+enum BranchDelaySlot { USE_DELAY_SLOT, PROTECT };
// Flags used for the li macro-assembler function.
enum LiFlags {
@@ -66,8 +59,7 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
-Register GetRegisterThatIsNotOneOf(Register reg1,
- Register reg2 = no_reg,
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
Register reg5 = no_reg,
@@ -82,23 +74,19 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
#define SmiWordOffset(offset) offset
#endif
-
inline MemOperand ContextMemOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
-
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
-
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
// TODO(plind): Currently ONLY used for O32. Should be fixed for
@@ -153,22 +141,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Abort(AbortReason msg);
// Arguments macros.
-#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2
#define COND_ARGS cond, r1, r2
// Cases when relocation is not needed.
-#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
- void Name(target_type target, BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, target_type target) { \
- Name(target, bd); \
- } \
- void Name(target_type target, \
- COND_TYPED_ARGS, \
- BranchDelaySlot bd = PROTECT); \
- inline void Name(BranchDelaySlot bd, \
- target_type target, \
- COND_TYPED_ARGS) { \
- Name(target, COND_ARGS, bd); \
+#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target, BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, target_type target) { \
+ Name(target, bd); \
+ } \
+ void Name(target_type target, COND_TYPED_ARGS, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, target_type target, COND_TYPED_ARGS) { \
+ Name(target, COND_ARGS, bd); \
}
#define DECLARE_BRANCH_PROTOTYPES(Name) \
@@ -235,8 +220,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRelative(Register destination, int32_t offset) override;
// Jump, Call, and Ret pseudo instructions implementing inter-working.
-#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
- const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+#define COND_ARGS \
+ Condition cond = al, Register rs = zero_reg, \
+ const Operand &rt = Operand(zero_reg), \
+ BranchDelaySlot bd = PROTECT
void Jump(Register target, COND_ARGS);
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
@@ -244,8 +231,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
void Call(Register target, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
- void Call(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
COND_ARGS);
void Call(Label* target);
@@ -274,25 +260,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ret(COND_ARGS);
inline void Ret(BranchDelaySlot bd, Condition cond = al,
- Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
+ Register rs = zero_reg,
+ const Operand& rt = Operand(zero_reg)) {
Ret(cond, rs, rt, bd);
}
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
- void Drop(int count,
- Condition cond = cc_always,
- Register reg = no_reg,
+ void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
const Operand& op = Operand(no_reg));
// Trivial case of DropAndRet that utilizes the delay slot and only emits
// 2 instructions.
void DropAndRet(int drop);
- void DropAndRet(int drop,
- Condition cond,
- Register reg,
- const Operand& op);
+ void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
void Ld(Register rd, const MemOperand& rs);
void Sd(Register rd, const MemOperand& rs);
@@ -357,7 +339,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode, Address wasm_target);
void CallEphemeronKeyBarrier(Register object, Register address,
- SaveFPRegsMode fp_mode);
+ SaveFPRegsMode fp_mode);
// Push multiple registers on the stack.
// Registers are saved in numerical order, with higher numbered registers
@@ -979,11 +961,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
-
// Convert double to unsigned long.
void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
@@ -1044,8 +1021,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
- void LoadGlobalFunctionInitialMap(Register function,
- Register map,
+ void LoadGlobalFunctionInitialMap(Register function, Register map,
Register scratch);
// -------------------------------------------------------------------------
@@ -1084,9 +1060,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// Support functions.
- void GetObjectType(Register function,
- Register map,
- Register type_reg);
+ void GetObjectType(Register function, Register map, Register type_reg);
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1126,10 +1100,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// -------------------------------------------------------------------------
// StatsCounter support.
- void IncrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
- void DecrementCounter(StatsCounter* counter, int value,
- Register scratch1, Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
// -------------------------------------------------------------------------
// Smi utilities.
@@ -1144,9 +1118,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
}
- void SmiTag(Register reg) {
- SmiTag(reg, reg);
- }
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
// Left-shifted from int32 equivalent of Smi.
void SmiScale(Register dst, Register src, int scale) {
@@ -1164,19 +1136,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask));
}
- // Untag the source value into destination and jump if source is a smi.
- // Source and destination can be the same register.
- void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value,
- Label* not_smi_label,
- Register scratch = at,
+ void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
BranchDelaySlot bd = PROTECT);
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
-
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
@@ -1199,12 +1163,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
- template<typename Field>
+ template <typename Field>
void DecodeField(Register dst, Register src) {
Ext(dst, src, Field::kShift, Field::kSize);
}
- template<typename Field>
+ template <typename Field>
void DecodeField(Register reg) {
DecodeField<Field>(reg, reg);
}
@@ -1265,4 +1229,4 @@ void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
} // namespace internal
} // namespace v8
-#endif // V8_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
+#endif // V8_CODEGEN_MIPS64_MACRO_ASSEMBLER_MIPS64_H_
diff --git a/deps/v8/src/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h
index 5da1b7a087..976b7bf3f0 100644
--- a/deps/v8/src/mips64/register-mips64.h
+++ b/deps/v8/src/codegen/mips64/register-mips64.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS64_REGISTER_MIPS64_H_
-#define V8_MIPS64_REGISTER_MIPS64_H_
+#ifndef V8_CODEGEN_MIPS64_REGISTER_MIPS64_H_
+#define V8_CODEGEN_MIPS64_REGISTER_MIPS64_H_
-#include "src/mips64/constants-mips64.h"
-#include "src/register.h"
-#include "src/reglist.h"
+#include "src/codegen/mips64/constants-mips64.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
namespace v8 {
namespace internal {
@@ -266,9 +266,9 @@ class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
// but it is not in common use. Someday we will want to support this in v8.)
// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
-typedef FPURegister FloatRegister;
+using FloatRegister = FPURegister;
-typedef FPURegister DoubleRegister;
+using DoubleRegister = FPURegister;
#define DECLARE_DOUBLE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
@@ -278,7 +278,7 @@ DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
// SIMD registers.
-typedef MSARegister Simd128Register;
+using Simd128Register = MSARegister;
#define DECLARE_SIMD128_REGISTER(R) \
constexpr Simd128Register R = Simd128Register::from_code<kMsaCode_##R>();
@@ -386,4 +386,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
} // namespace internal
} // namespace v8
-#endif // V8_MIPS64_REGISTER_MIPS64_H_
+#endif // V8_CODEGEN_MIPS64_REGISTER_MIPS64_H_
diff --git a/deps/v8/src/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index 9c0fdd5448..596d5c261e 100644
--- a/deps/v8/src/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/optimized-compilation-info.h"
+#include "src/codegen/optimized-compilation-info.h"
-#include "src/api.h"
+#include "src/api/api.h"
+#include "src/codegen/source-position.h"
#include "src/debug/debug.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
-#include "src/source-position.h"
#include "src/tracing/trace-event.h"
#include "src/tracing/traced-value.h"
#include "src/wasm/function-compiler.h"
@@ -83,6 +83,7 @@ void OptimizedCompilationInfo::ConfigureFlags() {
#endif // ENABLE_GDB_JIT_INTERFACE && DEBUG
break;
case Code::WASM_FUNCTION:
+ case Code::WASM_TO_CAPI_FUNCTION:
SetFlag(kSwitchJumpTableEnabled);
break;
default:
@@ -91,8 +92,7 @@ void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_turbo_control_flow_aware_allocation) {
MarkAsTurboControlFlowAwareAllocation();
- }
- if (FLAG_turbo_preprocess_ranges) {
+ } else {
MarkAsTurboPreprocessRanges();
}
}
@@ -151,12 +151,12 @@ void OptimizedCompilationInfo::RetryOptimization(BailoutReason reason) {
std::unique_ptr<char[]> OptimizedCompilationInfo::GetDebugName() const {
if (!shared_info().is_null()) {
- return shared_info()->DebugName()->ToCString();
+ return shared_info()->DebugName().ToCString();
}
Vector<const char> name_vec = debug_name_;
if (name_vec.empty()) name_vec = ArrayVector("unknown");
std::unique_ptr<char[]> name(new char[name_vec.length() + 1]);
- memcpy(name.get(), name_vec.start(), name_vec.length());
+ memcpy(name.get(), name_vec.begin(), name_vec.length());
name[name_vec.length()] = '\0';
return name;
}
@@ -169,6 +169,8 @@ StackFrame::Type OptimizedCompilationInfo::GetOutputStackFrameType() const {
return StackFrame::STUB;
case Code::WASM_FUNCTION:
return StackFrame::WASM_COMPILED;
+ case Code::WASM_TO_CAPI_FUNCTION:
+ return StackFrame::WASM_EXIT;
case Code::JS_TO_WASM_FUNCTION:
return StackFrame::JS_TO_WASM;
case Code::WASM_TO_JS_FUNCTION:
@@ -215,7 +217,7 @@ bool OptimizedCompilationInfo::has_global_object() const {
JSGlobalObject OptimizedCompilationInfo::global_object() const {
DCHECK(has_global_object());
- return native_context()->global_object();
+ return native_context().global_object();
}
int OptimizedCompilationInfo::AddInlinedFunction(
@@ -238,7 +240,6 @@ OptimizedCompilationInfo::InlinedFunctionHolder::InlinedFunctionHolder(
Handle<SharedFunctionInfo> inlined_shared_info,
Handle<BytecodeArray> inlined_bytecode, SourcePosition pos)
: shared_info(inlined_shared_info), bytecode_array(inlined_bytecode) {
- DCHECK_EQ(shared_info->GetBytecodeArray(), *bytecode_array);
position.position = pos;
// initialized when generating the deoptimization literals
position.inlined_function_id = DeoptimizationData::kNotInlinedIndex;
diff --git a/deps/v8/src/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index c722a928dc..eca3a8fa32 100644
--- a/deps/v8/src/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OPTIMIZED_COMPILATION_INFO_H_
-#define V8_OPTIMIZED_COMPILATION_INFO_H_
+#ifndef V8_CODEGEN_OPTIMIZED_COMPILATION_INFO_H_
+#define V8_CODEGEN_OPTIMIZED_COMPILATION_INFO_H_
#include <memory>
-#include "src/bailout-reason.h"
-#include "src/frames.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/objects.h"
-#include "src/source-position-table.h"
-#include "src/utils.h"
-#include "src/vector.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/codegen/source-position-table.h"
+#include "src/common/globals.h"
+#include "src/execution/frames.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
namespace v8 {
@@ -259,7 +259,7 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
}
};
- typedef std::vector<InlinedFunctionHolder> InlinedFunctionList;
+ using InlinedFunctionList = std::vector<InlinedFunctionHolder>;
InlinedFunctionList& inlined_functions() { return inlined_functions_; }
// Returns the inlining id for source position tracking.
@@ -339,4 +339,4 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
} // namespace internal
} // namespace v8
-#endif // V8_OPTIMIZED_COMPILATION_INFO_H_
+#endif // V8_CODEGEN_OPTIMIZED_COMPILATION_INFO_H_
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
index 31aaf7ae80..166b9d4423 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc-inl.h
@@ -34,14 +34,14 @@
// significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
-#ifndef V8_PPC_ASSEMBLER_PPC_INL_H_
-#define V8_PPC_ASSEMBLER_PPC_INL_H_
+#ifndef V8_CODEGEN_PPC_ASSEMBLER_PPC_INL_H_
+#define V8_CODEGEN_PPC_ASSEMBLER_PPC_INL_H_
-#include "src/ppc/assembler-ppc.h"
+#include "src/codegen/ppc/assembler-ppc.h"
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -65,7 +65,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
-
Address RelocInfo::target_internal_reference() {
if (IsInternalReference(rmode_)) {
// Jump table entry
@@ -77,13 +76,11 @@ Address RelocInfo::target_internal_reference() {
}
}
-
Address RelocInfo::target_internal_reference_address() {
DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
}
-
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@@ -112,7 +109,6 @@ Address RelocInfo::target_address_address() {
return pc_;
}
-
Address RelocInfo::constant_pool_entry_address() {
if (FLAG_enable_embedded_constant_pool) {
DCHECK(constant_pool_);
@@ -124,49 +120,20 @@ Address RelocInfo::constant_pool_entry_address() {
UNREACHABLE();
}
-
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
-Address Assembler::target_address_from_return_address(Address pc) {
-// Returns the address of the call target from the return address that will
-// be returned to after a call.
-// Call sequence is :
-// mov ip, @ call address
-// mtlr ip
-// blrl
-// @ return address
- int len;
- ConstantPoolEntry::Access access;
- if (FLAG_enable_embedded_constant_pool &&
- IsConstantPoolLoadEnd(pc - 3 * kInstrSize, &access)) {
- len = (access == ConstantPoolEntry::OVERFLOWED) ? 2 : 1;
- } else {
- len = kMovInstructionsNoConstantPool;
- }
- return pc - (len + 2) * kInstrSize;
-}
-
-
-Address Assembler::return_address_from_call_start(Address pc) {
- int len;
- ConstantPoolEntry::Access access;
- if (FLAG_enable_embedded_constant_pool &&
- IsConstantPoolLoadStart(pc, &access)) {
- len = (access == ConstantPoolEntry::OVERFLOWED) ? 2 : 1;
- } else {
- len = kMovInstructionsNoConstantPool;
- }
- return pc + (len + 2) * kInstrSize;
-}
-
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
@@ -174,8 +141,8 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
@@ -213,7 +180,7 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
@@ -276,7 +243,6 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
UNREACHABLE();
}
-
#if V8_TARGET_ARCH_PPC64
const uint32_t kLoadIntptrOpcode = LD;
#else
@@ -309,7 +275,6 @@ bool Assembler::IsConstantPoolLoadStart(Address pc,
return true;
}
-
bool Assembler::IsConstantPoolLoadEnd(Address pc,
ConstantPoolEntry::Access* access) {
Instr instr = instr_at(pc);
@@ -331,7 +296,6 @@ bool Assembler::IsConstantPoolLoadEnd(Address pc,
return true;
}
-
int Assembler::GetConstantPoolOffset(Address pc,
ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
@@ -353,7 +317,6 @@ int Assembler::GetConstantPoolOffset(Address pc,
return offset;
}
-
void Assembler::PatchConstantPoolAccessInstruction(
int pc_offset, int offset, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
@@ -387,7 +350,6 @@ void Assembler::PatchConstantPoolAccessInstruction(
}
}
-
Address Assembler::target_constant_pool_address_at(
Address pc, Address constant_pool, ConstantPoolEntry::Access access,
ConstantPoolEntry::Type type) {
@@ -397,7 +359,6 @@ Address Assembler::target_constant_pool_address_at(
return addr;
}
-
// This sets the branch destination (which gets loaded at the call address).
// This is for calls and branches within generated code. The serializer
// has already deserialized the mov instructions etc.
@@ -405,7 +366,7 @@ Address Assembler::target_constant_pool_address_at(
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- !code.is_null() ? code->constant_pool() : kNullAddress,
+ !code.is_null() ? code.constant_pool() : kNullAddress,
target);
}
@@ -423,7 +384,6 @@ void Assembler::deserialization_set_target_internal_reference_at(
}
}
-
// This code assumes the FIXED_SEQUENCE of lis/ori
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
@@ -494,4 +454,4 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
} // namespace internal
} // namespace v8
-#endif // V8_PPC_ASSEMBLER_PPC_INL_H_
+#endif // V8_CODEGEN_PPC_ASSEMBLER_PPC_INL_H_
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index c38aabcfca..3241f821f9 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -34,16 +34,16 @@
// modified significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
-#include "src/ppc/assembler-ppc.h"
+#include "src/codegen/ppc/assembler-ppc.h"
#if V8_TARGET_ARCH_PPC
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/ppc/assembler-ppc-inl.h"
-#include "src/string-constants.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/ppc/assembler-ppc-inl.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
@@ -54,7 +54,6 @@ static unsigned CpuFeaturesImpliedByCompiler() {
return answer;
}
-
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
icache_line_size_ = 128;
@@ -110,7 +109,6 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#endif
}
-
void CpuFeatures::PrintTarget() {
const char* ppc_arch = nullptr;
@@ -123,12 +121,10 @@ void CpuFeatures::PrintTarget() {
printf("target %s\n", ppc_arch);
}
-
void CpuFeatures::PrintFeatures() {
printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
}
-
Register ToRegister(int num) {
DCHECK(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
@@ -138,7 +134,6 @@ Register ToRegister(int num) {
return kRegisters[num];
}
-
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@@ -154,7 +149,6 @@ bool RelocInfo::IsCodedSpecially() {
return true;
}
-
bool RelocInfo::IsInConstantPool() {
if (FLAG_enable_embedded_constant_pool && constant_pool_ != kNullAddress) {
return Assembler::IsConstantPoolLoadStart(pc_);
@@ -175,20 +169,20 @@ uint32_t RelocInfo::wasm_call_tag() const {
Operand::Operand(Handle<HeapObject> handle) {
rm_ = no_reg;
value_.immediate = static_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
}
Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(str);
return result;
@@ -286,10 +280,8 @@ void Assembler::Align(int m) {
}
}
-
void Assembler::CodeTargetAlign() { Align(8); }
-
Condition Assembler::GetCondition(Instr instr) {
switch (instr & kCondMask) {
case BT:
@@ -302,36 +294,28 @@ Condition Assembler::GetCondition(Instr instr) {
return al;
}
-
bool Assembler::IsLis(Instr instr) {
return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr) == r0;
}
-
bool Assembler::IsLi(Instr instr) {
return ((instr & kOpcodeMask) == ADDI) && GetRA(instr) == r0;
}
-
bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
-
bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
-
bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
-
Register Assembler::GetRA(Instr instr) {
return Register::from_code(Instruction::RAValue(instr));
}
-
Register Assembler::GetRB(Instr instr) {
return Register::from_code(Instruction::RBValue(instr));
}
-
#if V8_TARGET_ARCH_PPC64
// This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
@@ -356,21 +340,17 @@ bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
}
#endif
-
bool Assembler::IsCmpRegister(Instr instr) {
return (((instr & kOpcodeMask) == EXT2) &&
((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
}
-
bool Assembler::IsRlwinm(Instr instr) {
return ((instr & kOpcodeMask) == RLWINMX);
}
-
bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
-
#if V8_TARGET_ARCH_PPC64
bool Assembler::IsRldicl(Instr instr) {
return (((instr & kOpcodeMask) == EXT5) &&
@@ -378,30 +358,25 @@ bool Assembler::IsRldicl(Instr instr) {
}
#endif
-
bool Assembler::IsCmpImmediate(Instr instr) {
return ((instr & kOpcodeMask) == CMPI);
}
-
bool Assembler::IsCrSet(Instr instr) {
return (((instr & kOpcodeMask) == EXT1) &&
((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
}
-
Register Assembler::GetCmpImmediateRegister(Instr instr) {
DCHECK(IsCmpImmediate(instr));
return GetRA(instr);
}
-
int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
DCHECK(IsCmpImmediate(instr));
return instr & kOff16Mask;
}
-
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -412,11 +387,9 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
// to be generated; pos() is the position of the last
// instruction using the label.
-
// The link chain is terminated by a negative code position (must be aligned)
const int kEndOfChain = -4;
-
// Dummy opcodes for unbound label mov instructions or jump table entries.
enum {
kUnboundMovLabelOffsetOpcode = 0 << 26,
@@ -457,7 +430,6 @@ int Assembler::target_at(int pos) {
return pos + link;
}
-
void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Instr instr = instr_at(pos);
uint32_t opcode = instr & kOpcodeMask;
@@ -498,9 +470,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
// pointer in a register.
Register dst = Register::from_code(instr_at(pos + kInstrSize));
int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- PatchingAssembler patcher(options(),
- reinterpret_cast<byte*>(buffer_start_ + pos),
- 2);
+ PatchingAssembler patcher(
+ options(), reinterpret_cast<byte*>(buffer_start_ + pos), 2);
patcher.bitwise_mov32(dst, offset);
break;
}
@@ -545,7 +516,6 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
}
}
-
int Assembler::max_reach_from(int pos) {
Instr instr = instr_at(pos);
uint32_t opcode = instr & kOpcodeMask;
@@ -567,7 +537,6 @@ int Assembler::max_reach_from(int pos) {
return 0;
}
-
void Assembler::bind_to(Label* L, int pos) {
DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
int32_t trampoline_pos = kInvalidSlotPos;
@@ -599,13 +568,11 @@ void Assembler::bind_to(Label* L, int pos) {
if (pos > last_bound_pos_) last_bound_pos_ = pos;
}
-
void Assembler::bind(Label* L) {
DCHECK(!L->is_bound()); // label can only be bound once
bind_to(L, pc_offset());
}
-
void Assembler::next(Label* L) {
DCHECK(L->is_linked());
int link = target_at(L->pos());
@@ -617,7 +584,6 @@ void Assembler::next(Label* L) {
}
}
-
bool Assembler::is_near(Label* L, Condition cond) {
DCHECK(L->is_bound());
if (L->is_bound() == false) return false;
@@ -628,13 +594,11 @@ bool Assembler::is_near(Label* L, Condition cond) {
return is_intn(offset, maxReach);
}
-
void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
DoubleRegister frb, RCBit r) {
emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
}
-
void Assembler::d_form(Instr instr, Register rt, Register ra,
const intptr_t val, bool signed_disp) {
if (signed_disp) {
@@ -669,7 +633,6 @@ void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
m5 * B5 | sh5 * B1 | r);
}
-
void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
int maskbit, RCBit r) {
int m0_4 = maskbit & 0x1F;
@@ -679,7 +642,6 @@ void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
m5 * B5 | r);
}
-
// Returns the next free trampoline entry.
int32_t Assembler::get_trampoline_entry() {
int32_t trampoline_entry = kInvalidSlotPos;
@@ -694,7 +656,6 @@ int32_t Assembler::get_trampoline_entry() {
return trampoline_entry;
}
-
int Assembler::link(Label* L) {
int position;
if (L->is_bound()) {
@@ -715,55 +676,44 @@ int Assembler::link(Label* L) {
return position;
}
-
// Branch instructions.
-
void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
}
-
void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
}
-
// Pseudo op - branch to link register
void Assembler::blr() { bclr(BA, 0, LeaveLK); }
-
// Pseudo op - branch to count register -- used for "jump"
void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
-
void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
-
void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
int imm16 = branch_offset;
CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
}
-
void Assembler::b(int branch_offset, LKBit lk) {
int imm26 = branch_offset;
CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
emit(BX | (imm26 & kImm26Mask) | lk);
}
-
void Assembler::xori(Register dst, Register src, const Operand& imm) {
d_form(XORI, src, dst, imm.immediate(), false);
}
-
void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
d_form(XORIS, rs, ra, imm.immediate(), false);
}
-
void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
RCBit rc) {
sh &= 0x1F;
@@ -773,7 +723,6 @@ void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
me << 1 | rc);
}
-
void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
RCBit rc) {
mb &= 0x1F;
@@ -782,7 +731,6 @@ void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
me << 1 | rc);
}
-
void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
RCBit rc) {
sh &= 0x1F;
@@ -792,48 +740,40 @@ void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
me << 1 | rc);
}
-
void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
}
-
void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
}
-
void Assembler::clrrwi(Register dst, Register src, const Operand& val,
RCBit rc) {
DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
}
-
void Assembler::clrlwi(Register dst, Register src, const Operand& val,
RCBit rc) {
DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
rlwinm(dst, src, 0, val.immediate(), 31, rc);
}
-
void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
rlwnm(ra, rs, rb, 0, 31, r);
}
-
void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
rlwinm(ra, rs, sh, 0, 31, r);
}
-
void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
rlwinm(ra, rs, 32 - sh, 0, 31, r);
}
-
void Assembler::subi(Register dst, Register src, const Operand& imm) {
addi(dst, src, Operand(-(imm.immediate())));
}
@@ -853,7 +793,6 @@ void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
}
-
void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
@@ -873,83 +812,69 @@ void Assembler::subfic(Register dst, Register src, const Operand& imm) {
d_form(SUBFIC, dst, src, imm.immediate(), true);
}
-
void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
}
-
// Multiply low word
void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
}
-
// Multiply hi word
void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
}
-
// Multiply hi word unsigned
void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
}
-
// Divide word
void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
}
-
// Divide word unsigned
void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
}
-
void Assembler::addi(Register dst, Register src, const Operand& imm) {
DCHECK(src != r0); // use li instead to show intent
d_form(ADDI, dst, src, imm.immediate(), true);
}
-
void Assembler::addis(Register dst, Register src, const Operand& imm) {
DCHECK(src != r0); // use lis instead to show intent
d_form(ADDIS, dst, src, imm.immediate(), true);
}
-
void Assembler::addic(Register dst, Register src, const Operand& imm) {
d_form(ADDIC, dst, src, imm.immediate(), true);
}
-
void Assembler::andi(Register ra, Register rs, const Operand& imm) {
d_form(ANDIx, rs, ra, imm.immediate(), false);
}
-
void Assembler::andis(Register ra, Register rs, const Operand& imm) {
d_form(ANDISx, rs, ra, imm.immediate(), false);
}
-
void Assembler::ori(Register ra, Register rs, const Operand& imm) {
d_form(ORI, rs, ra, imm.immediate(), false);
}
-
void Assembler::oris(Register dst, Register src, const Operand& imm) {
d_form(ORIS, src, dst, imm.immediate(), false);
}
-
void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.immediate();
#if V8_TARGET_ARCH_PPC64
@@ -963,7 +888,6 @@ void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
}
-
void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
uintptr_t uimm16 = src2.immediate();
#if V8_TARGET_ARCH_PPC64
@@ -977,7 +901,6 @@ void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
}
-
void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
intptr_t imm16 = src2.immediate();
int L = 0;
@@ -995,7 +918,6 @@ void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
}
-
void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
uintptr_t uimm16 = src2.immediate();
int L = 0;
@@ -1005,61 +927,51 @@ void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
}
-
void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
cb * B6);
}
-
// Pseudo op - load immediate
void Assembler::li(Register dst, const Operand& imm) {
d_form(ADDI, dst, r0, imm.immediate(), true);
}
-
void Assembler::lis(Register dst, const Operand& imm) {
d_form(ADDIS, dst, r0, imm.immediate(), true);
}
-
// Pseudo op - move register
void Assembler::mr(Register dst, Register src) {
// actually or(dst, src, src)
orx(dst, src, src);
}
-
void Assembler::lbz(Register dst, const MemOperand& src) {
DCHECK(src.ra_ != r0);
d_form(LBZ, dst, src.ra(), src.offset(), true);
}
-
void Assembler::lhz(Register dst, const MemOperand& src) {
DCHECK(src.ra_ != r0);
d_form(LHZ, dst, src.ra(), src.offset(), true);
}
-
void Assembler::lwz(Register dst, const MemOperand& src) {
DCHECK(src.ra_ != r0);
d_form(LWZ, dst, src.ra(), src.offset(), true);
}
-
void Assembler::lwzu(Register dst, const MemOperand& src) {
DCHECK(src.ra_ != r0);
d_form(LWZU, dst, src.ra(), src.offset(), true);
}
-
void Assembler::lha(Register dst, const MemOperand& src) {
DCHECK(src.ra_ != r0);
d_form(LHA, dst, src.ra(), src.offset(), true);
}
-
void Assembler::lwa(Register dst, const MemOperand& src) {
#if V8_TARGET_ARCH_PPC64
int offset = src.offset();
@@ -1077,30 +989,25 @@ void Assembler::stb(Register dst, const MemOperand& src) {
d_form(STB, dst, src.ra(), src.offset(), true);
}
-
void Assembler::sth(Register dst, const MemOperand& src) {
DCHECK(src.ra_ != r0);
d_form(STH, dst, src.ra(), src.offset(), true);
}
-
void Assembler::stw(Register dst, const MemOperand& src) {
DCHECK(src.ra_ != r0);
d_form(STW, dst, src.ra(), src.offset(), true);
}
-
void Assembler::stwu(Register dst, const MemOperand& src) {
DCHECK(src.ra_ != r0);
d_form(STWU, dst, src.ra(), src.offset(), true);
}
-
void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
}
-
#if V8_TARGET_ARCH_PPC64
// 64bit specific instructions
void Assembler::ld(Register rd, const MemOperand& src) {
@@ -1111,7 +1018,6 @@ void Assembler::ld(Register rd, const MemOperand& src) {
emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
}
-
void Assembler::ldu(Register rd, const MemOperand& src) {
int offset = src.offset();
DCHECK(src.ra_ != r0);
@@ -1120,7 +1026,6 @@ void Assembler::ldu(Register rd, const MemOperand& src) {
emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
}
-
void Assembler::std(Register rs, const MemOperand& src) {
int offset = src.offset();
DCHECK(src.ra_ != r0);
@@ -1129,7 +1034,6 @@ void Assembler::std(Register rs, const MemOperand& src) {
emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
}
-
void Assembler::stdu(Register rs, const MemOperand& src) {
int offset = src.offset();
DCHECK(src.ra_ != r0);
@@ -1138,58 +1042,48 @@ void Assembler::stdu(Register rs, const MemOperand& src) {
emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
}
-
void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
}
-
void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
}
-
void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
}
-
void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
}
-
void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
}
-
void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
}
-
void Assembler::clrrdi(Register dst, Register src, const Operand& val,
RCBit rc) {
DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
rldicr(dst, src, 0, 63 - val.immediate(), rc);
}
-
void Assembler::clrldi(Register dst, Register src, const Operand& val,
RCBit rc) {
DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
rldicl(dst, src, 0, val.immediate(), rc);
}
-
void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
}
-
void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
int sh0_4 = sh & 0x1F;
int sh5 = (sh >> 5) & 0x1;
@@ -1198,41 +1092,34 @@ void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
sh5 * B1 | r);
}
-
void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
rldcl(ra, rs, rb, 0, r);
}
-
void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
rldicl(ra, rs, sh, 0, r);
}
-
void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
rldicl(ra, rs, 64 - sh, 0, r);
}
-
void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
}
-
void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
}
-
void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
}
#endif
-
// Function descriptor for AIX.
// Code address skips the function descriptor "header".
// TOC and static chain are ignored and set to 0.
@@ -1247,7 +1134,6 @@ void Assembler::function_descriptor() {
}
}
-
int Assembler::instructions_required_for_mov(Register dst,
const Operand& src) const {
bool canOptimize =
@@ -1262,7 +1148,6 @@ int Assembler::instructions_required_for_mov(Register dst,
return kMovInstructionsNoConstantPool;
}
-
bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
bool canOptimize) const {
if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
@@ -1289,14 +1174,12 @@ bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
return true;
}
-
void Assembler::EnsureSpaceFor(int space_needed) {
if (buffer_space() <= (kGap + space_needed)) {
GrowBuffer(space_needed);
}
}
-
bool Operand::must_output_reloc_info(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
if (assembler != nullptr && assembler->predictable_code_size()) return true;
@@ -1307,7 +1190,6 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
return true;
}
-
// Primarily used for loading constants
// This should really move to be in macro-assembler as it
// is really a pseudo instruction
@@ -1395,30 +1277,28 @@ void Assembler::mov(Register dst, const Operand& src) {
bitwise_mov(dst, value);
}
-
void Assembler::bitwise_mov(Register dst, intptr_t value) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
#if V8_TARGET_ARCH_PPC64
- int32_t hi_32 = static_cast<int32_t>(value >> 32);
- int32_t lo_32 = static_cast<int32_t>(value);
- int hi_word = static_cast<int>(hi_32 >> 16);
- int lo_word = static_cast<int>(hi_32 & 0xFFFF);
- lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
- ori(dst, dst, Operand(lo_word));
- sldi(dst, dst, Operand(32));
- hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
- lo_word = static_cast<int>(lo_32 & 0xFFFF);
- oris(dst, dst, Operand(hi_word));
- ori(dst, dst, Operand(lo_word));
+ int32_t hi_32 = static_cast<int32_t>(value >> 32);
+ int32_t lo_32 = static_cast<int32_t>(value);
+ int hi_word = static_cast<int>(hi_32 >> 16);
+ int lo_word = static_cast<int>(hi_32 & 0xFFFF);
+ lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
+ ori(dst, dst, Operand(lo_word));
+ sldi(dst, dst, Operand(32));
+ hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
+ lo_word = static_cast<int>(lo_32 & 0xFFFF);
+ oris(dst, dst, Operand(hi_word));
+ ori(dst, dst, Operand(lo_word));
#else
- int hi_word = static_cast<int>(value >> 16);
- int lo_word = static_cast<int>(value & 0xFFFF);
- lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
- ori(dst, dst, Operand(lo_word));
+ int hi_word = static_cast<int>(value >> 16);
+ int lo_word = static_cast<int>(value & 0xFFFF);
+ lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
+ ori(dst, dst, Operand(lo_word));
#endif
}
-
void Assembler::bitwise_mov32(Register dst, int32_t value) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int hi_word = static_cast<int>(value >> 16);
@@ -1427,7 +1307,6 @@ void Assembler::bitwise_mov32(Register dst, int32_t value) {
ori(dst, dst, Operand(lo_word));
}
-
void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (is_int16(value)) {
@@ -1442,7 +1321,6 @@ void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
}
}
-
void Assembler::mov_label_offset(Register dst, Label* label) {
int position = link(label);
if (label->is_bound()) {
@@ -1469,7 +1347,6 @@ void Assembler::mov_label_offset(Register dst, Label* label) {
}
}
-
void Assembler::add_label_offset(Register dst, Register base, Label* label,
int delta) {
int position = link(label);
@@ -1498,7 +1375,6 @@ void Assembler::add_label_offset(Register dst, Register base, Label* label,
}
}
-
void Assembler::mov_label_addr(Register dst, Label* label) {
CheckBuffer();
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
@@ -1528,7 +1404,6 @@ void Assembler::mov_label_addr(Register dst, Label* label) {
}
}
-
void Assembler::emit_label_addr(Label* label) {
CheckBuffer();
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
@@ -1556,38 +1431,31 @@ void Assembler::emit_label_addr(Label* label) {
}
}
-
// Special register instructions
void Assembler::crxor(int bt, int ba, int bb) {
emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
}
-
void Assembler::creqv(int bt, int ba, int bb) {
emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
}
-
void Assembler::mflr(Register dst) {
emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
}
-
void Assembler::mtlr(Register src) {
emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
}
-
void Assembler::mtctr(Register src) {
emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
}
-
void Assembler::mtxer(Register src) {
emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
}
-
void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
DCHECK_LT(static_cast<int>(bit), 32);
int bf = cr.code();
@@ -1595,37 +1463,30 @@ void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
}
-
void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
-
#if V8_TARGET_ARCH_PPC64
void Assembler::mffprd(Register dst, DoubleRegister src) {
emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
}
-
void Assembler::mffprwz(Register dst, DoubleRegister src) {
emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
}
-
void Assembler::mtfprd(DoubleRegister dst, Register src) {
emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
}
-
void Assembler::mtfprwz(DoubleRegister dst, Register src) {
emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
}
-
void Assembler::mtfprwa(DoubleRegister dst, Register src) {
emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
}
#endif
-
// Exception-generating instructions and debugging support.
// Stops with a non-negative code less than kNumOfWatchedStops support
// enabling/disabling and a counter feature. See simulator-ppc.h .
@@ -1647,21 +1508,16 @@ void Assembler::dcbf(Register ra, Register rb) {
emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
}
-
void Assembler::sync() { emit(EXT2 | SYNC); }
-
void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
-
void Assembler::icbi(Register ra, Register rb) {
emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
}
-
void Assembler::isync() { emit(EXT1 | ISYNC); }
-
// Floating point support
void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
@@ -1674,7 +1530,6 @@ void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
}
-
void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -1685,7 +1540,6 @@ void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
}
-
void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -1696,7 +1550,6 @@ void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
}
-
void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -1707,7 +1560,6 @@ void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
}
-
void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -1718,7 +1570,6 @@ void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
}
-
void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -1729,7 +1580,6 @@ void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
}
-
void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -1740,7 +1590,6 @@ void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
}
-
void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
int offset = src.offset();
Register ra = src.ra();
@@ -1751,133 +1600,111 @@ void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
}
-
void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frb, RCBit rc) {
a_form(EXT4 | FSUB, frt, fra, frb, rc);
}
-
void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frb, RCBit rc) {
a_form(EXT4 | FADD, frt, fra, frb, rc);
}
-
void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, RCBit rc) {
emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
rc);
}
-
void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frb, RCBit rc) {
a_form(EXT4 | FDIV, frt, fra, frb, rc);
}
-
void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
CRegister cr) {
DCHECK(cr.code() >= 0 && cr.code() <= 7);
emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
}
-
void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
}
-
void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
}
-
void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc) {
@@ -1885,55 +1712,46 @@ void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
frc.code() * B6 | rc);
}
-
void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
DCHECK_LT(static_cast<int>(bit), 32);
int bt = bit;
emit(EXT4 | MTFSB0 | bt * B21 | rc);
}
-
void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
DCHECK_LT(static_cast<int>(bit), 32);
int bt = bit;
emit(EXT4 | MTFSB1 | bt * B21 | rc);
}
-
void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
}
-
void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
emit(EXT4 | MFFS | frt.code() * B21 | rc);
}
-
void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
RCBit rc) {
emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
}
-
void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
RCBit rc) {
emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
}
-
void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc) {
@@ -1941,7 +1759,6 @@ void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
frc.code() * B6 | rc);
}
-
void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc) {
@@ -1969,7 +1786,6 @@ void Assembler::nop(int type) {
ori(reg, reg, Operand::Zero());
}
-
bool Assembler::IsNop(Instr instr, int type) {
int reg = 0;
switch (type) {
@@ -1988,7 +1804,6 @@ bool Assembler::IsNop(Instr instr, int type) {
return instr == (ORI | reg * B21 | reg * B16);
}
-
void Assembler::GrowBuffer(int needed) {
DCHECK_EQ(buffer_start_, buffer_->start());
@@ -2029,42 +1844,36 @@ void Assembler::GrowBuffer(int needed) {
// to relocate any emitted relocation entries.
}
-
void Assembler::db(uint8_t data) {
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
}
-
void Assembler::dd(uint32_t data) {
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
}
-
void Assembler::dq(uint64_t value) {
CheckBuffer();
*reinterpret_cast<uint64_t*>(pc_) = value;
pc_ += sizeof(uint64_t);
}
-
void Assembler::dp(uintptr_t data) {
CheckBuffer();
*reinterpret_cast<uintptr_t*>(pc_) = data;
pc_ += sizeof(uintptr_t);
}
-
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (!ShouldRecordRelocInfo(rmode)) return;
DeferredRelocInfo rinfo(pc_offset(), rmode, data);
relocations_.push_back(rinfo);
}
-
void Assembler::EmitRelocations() {
EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
@@ -2091,12 +1900,10 @@ void Assembler::EmitRelocations() {
}
}
-
void Assembler::BlockTrampolinePoolFor(int instructions) {
BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
}
-
void Assembler::CheckTrampolinePool() {
// Some small sequences of instructions must not be broken up by the
// insertion of a trampoline pool; such sequences are protected by setting
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index 0773484c79..2c4225849f 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -37,20 +37,20 @@
// A light-weight PPC Assembler
// Generates user mode instructions for the PPC architecture up
-#ifndef V8_PPC_ASSEMBLER_PPC_H_
-#define V8_PPC_ASSEMBLER_PPC_H_
+#ifndef V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
+#define V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
#include <stdio.h>
#include <vector>
-#include "src/assembler.h"
-#include "src/constant-pool.h"
-#include "src/double.h"
-#include "src/external-reference.h"
-#include "src/label.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/constant-pool.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/label.h"
+#include "src/codegen/ppc/constants-ppc.h"
+#include "src/codegen/ppc/register-ppc.h"
+#include "src/numbers/double.h"
#include "src/objects/smi.h"
-#include "src/ppc/constants-ppc.h"
-#include "src/ppc/register-ppc.h"
namespace v8 {
namespace internal {
@@ -61,7 +61,7 @@ class SafepointTableBuilder;
// Machine instruction Operands
// Class Operand represents a shifter operand in data processing instructions
-class Operand {
+class V8_EXPORT_PRIVATE Operand {
public:
// immediate
V8_INLINE explicit Operand(intptr_t immediate,
@@ -106,7 +106,7 @@ class Operand {
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
- rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
@@ -126,28 +126,21 @@ class Operand {
friend class MacroAssembler;
};
-
// Class MemOperand represents a memory operand in load and store instructions
// On PowerPC we have base register + 16bit signed value
// Alternatively we can have a 16bit signed value immediate
-class MemOperand {
+class V8_EXPORT_PRIVATE MemOperand {
public:
explicit MemOperand(Register rn, int32_t offset = 0);
explicit MemOperand(Register ra, Register rb);
- int32_t offset() const {
- return offset_;
- }
+ int32_t offset() const { return offset_; }
// PowerPC - base register
- Register ra() const {
- return ra_;
- }
+ Register ra() const { return ra_; }
- Register rb() const {
- return rb_;
- }
+ Register rb() const { return rb_; }
private:
Register ra_; // base
@@ -157,7 +150,6 @@ class MemOperand {
friend class Assembler;
};
-
class DeferredRelocInfo {
public:
DeferredRelocInfo() {}
@@ -174,7 +166,6 @@ class DeferredRelocInfo {
intptr_t data_;
};
-
class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
@@ -266,14 +257,6 @@ class Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
- // Given the address of the beginning of a call, return the address
- // in the instruction stream that the call will return to.
- V8_INLINE static Address return_address_from_call_start(Address pc);
-
// This sets the branch destination.
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -314,37 +297,26 @@ class Assembler : public AssemblerBase {
? kMovInstructionsConstantPool
: kMovInstructionsNoConstantPool;
- // Distance between the instruction referring to the address of the call
- // target and the return address.
-
- // Call sequence is a FIXED_SEQUENCE:
- // mov r8, @ call address
- // mtlr r8
- // blrl
- // @ return address
- static constexpr int kCallTargetAddressOffset =
- (kMovInstructions + 2) * kInstrSize;
-
static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
return ((cr.code() * CRWIDTH) + crbit);
}
#define DECLARE_PPC_X_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
- inline void name(const Register rt, const Register ra, \
- const Register rb, const RCBit rc = LeaveRC) { \
+ inline void name(const Register rt, const Register ra, const Register rb, \
+ const RCBit rc = LeaveRC) { \
x_form(instr_name, rt, ra, rb, rc); \
}
#define DECLARE_PPC_X_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
- inline void name(const Register ra, const Register rs, \
- const Register rb, const RCBit rc = LeaveRC) { \
+ inline void name(const Register ra, const Register rs, const Register rb, \
+ const RCBit rc = LeaveRC) { \
x_form(instr_name, rs, ra, rb, rc); \
}
-#define DECLARE_PPC_X_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
- inline void name(const Register dst, const Register src, \
- const RCBit rc = LeaveRC) { \
- x_form(instr_name, src, dst, r0, rc); \
+#define DECLARE_PPC_X_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
+ inline void name(const Register dst, const Register src, \
+ const RCBit rc = LeaveRC) { \
+ x_form(instr_name, src, dst, r0, rc); \
}
#define DECLARE_PPC_X_INSTRUCTIONS_D_FORM(name, instr_name, instr_value) \
@@ -358,10 +330,10 @@ class Assembler : public AssemblerBase {
name(dst, src.ra(), src.rb()); \
}
-#define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
- inline void name(const Register dst, const Register src, \
- const int sh, const RCBit rc = LeaveRC) { \
- x_form(instr_name, src.code(), dst.code(), sh, rc); \
+#define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
+ inline void name(const Register dst, const Register src, const int sh, \
+ const RCBit rc = LeaveRC) { \
+ x_form(instr_name, src.code(), dst.code(), sh, rc); \
}
#define DECLARE_PPC_X_INSTRUCTIONS_F_FORM(name, instr_name, instr_value) \
@@ -402,8 +374,8 @@ class Assembler : public AssemblerBase {
#else
int L = 0;
#endif
- emit(instr | cr.code() * B23 | L * B21 | s1.code() * B16 |
- s2.code() * B11 | rc);
+ emit(instr | cr.code() * B23 | L * B21 | s1.code() * B16 | s2.code() * B11 |
+ rc);
}
PPC_X_OPCODE_A_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_A_FORM)
@@ -1342,4 +1314,4 @@ class PatchingAssembler : public Assembler {
} // namespace internal
} // namespace v8
-#endif // V8_PPC_ASSEMBLER_PPC_H_
+#endif // V8_CODEGEN_PPC_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/codegen/ppc/code-stubs-ppc.cc
index c0d7b58b0f..937c745662 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/codegen/ppc/code-stubs-ppc.cc
@@ -4,27 +4,25 @@
#if V8_TARGET_ARCH_PPC
-#include "src/api-arguments-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-arguments-inl.h"
#include "src/base/bits.h"
-#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/double.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/macro-assembler.h"
+#include "src/init/bootstrapper.h"
+#include "src/numbers/double.h"
#include "src/objects/api-callbacks.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
-namespace internal {
-
-} // namespace internal
+namespace internal {} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/constants-ppc.cc b/deps/v8/src/codegen/ppc/constants-ppc.cc
index e6eec643f4..4cee2cbcb5 100644
--- a/deps/v8/src/ppc/constants-ppc.cc
+++ b/deps/v8/src/codegen/ppc/constants-ppc.cc
@@ -4,8 +4,7 @@
#if V8_TARGET_ARCH_PPC
-#include "src/ppc/constants-ppc.h"
-
+#include "src/codegen/ppc/constants-ppc.h"
namespace v8 {
namespace internal {
@@ -17,13 +16,11 @@ const char* Registers::names_[kNumRegisters] = {
"r11", "ip", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
"r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "fp"};
-
const char* DoubleRegisters::names_[kNumDoubleRegisters] = {
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
"d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
"d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
-
int DoubleRegisters::Number(const char* name) {
for (int i = 0; i < kNumDoubleRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
@@ -35,7 +32,6 @@ int DoubleRegisters::Number(const char* name) {
return kNoRegister;
}
-
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index 60a3c57322..f6ebc6a7ba 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_CONSTANTS_PPC_H_
-#define V8_PPC_CONSTANTS_PPC_H_
+#ifndef V8_CODEGEN_PPC_CONSTANTS_PPC_H_
+#define V8_CODEGEN_PPC_CONSTANTS_PPC_H_
#include <stdint.h>
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
// UNIMPLEMENTED_ macro for PPC.
#ifdef DEBUG
@@ -20,9 +20,9 @@
#define UNIMPLEMENTED_PPC()
#endif
-#if V8_HOST_ARCH_PPC && \
+#if V8_HOST_ARCH_PPC && \
(V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \
- (!defined(_CALL_ELF) || _CALL_ELF == 1)))
+ (!defined(_CALL_ELF) || _CALL_ELF == 1)))
#define ABI_USES_FUNCTION_DESCRIPTORS 1
#else
#define ABI_USES_FUNCTION_DESCRIPTORS 0
@@ -34,15 +34,16 @@
#define ABI_PASSES_HANDLES_IN_REGS 0
#endif
-#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || \
- V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2)
+#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN || \
+ (defined(_CALL_ELF) && _CALL_ELF == 2)
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
#else
#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
#endif
-#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && (V8_TARGET_LITTLE_ENDIAN || \
- (defined(_CALL_ELF) && _CALL_ELF == 2)))
+#if !V8_HOST_ARCH_PPC || \
+ (V8_TARGET_ARCH_PPC64 && \
+ (V8_TARGET_LITTLE_ENDIAN || (defined(_CALL_ELF) && _CALL_ELF == 2)))
#define ABI_CALL_VIA_IP 1
#else
#define ABI_CALL_VIA_IP 0
@@ -114,13 +115,11 @@ enum Condition {
al = 10 // Always.
};
-
inline Condition NegateCondition(Condition cond) {
DCHECK(cond != al);
return static_cast<Condition>(cond ^ ne);
}
-
// -----------------------------------------------------------------------------
// Instructions encoding.
@@ -128,990 +127,990 @@ inline Condition NegateCondition(Condition cond) {
// representing instructions from usual 32 bit values.
// Instruction objects are pointers to 32bit values, and provide methods to
// access the various ISA fields.
-typedef uint32_t Instr;
-
-#define PPC_XX3_OPCODE_LIST(V) \
- /* VSX Scalar Add Double-Precision */ \
- V(xsadddp, XSADDDP, 0xF0000100) \
- /* VSX Scalar Add Single-Precision */ \
- V(xsaddsp, XSADDSP, 0xF0000000) \
- /* VSX Scalar Compare Ordered Double-Precision */ \
- V(xscmpodp, XSCMPODP, 0xF0000158) \
- /* VSX Scalar Compare Unordered Double-Precision */ \
- V(xscmpudp, XSCMPUDP, 0xF0000118) \
- /* VSX Scalar Copy Sign Double-Precision */ \
- V(xscpsgndp, XSCPSGNDP, 0xF0000580) \
- /* VSX Scalar Divide Double-Precision */ \
- V(xsdivdp, XSDIVDP, 0xF00001C0) \
- /* VSX Scalar Divide Single-Precision */ \
- V(xsdivsp, XSDIVSP, 0xF00000C0) \
- /* VSX Scalar Multiply-Add Type-A Double-Precision */ \
- V(xsmaddadp, XSMADDADP, 0xF0000108) \
- /* VSX Scalar Multiply-Add Type-A Single-Precision */ \
- V(xsmaddasp, XSMADDASP, 0xF0000008) \
- /* VSX Scalar Multiply-Add Type-M Double-Precision */ \
- V(xsmaddmdp, XSMADDMDP, 0xF0000148) \
- /* VSX Scalar Multiply-Add Type-M Single-Precision */ \
- V(xsmaddmsp, XSMADDMSP, 0xF0000048) \
- /* VSX Scalar Maximum Double-Precision */ \
- V(xsmaxdp, XSMAXDP, 0xF0000500) \
- /* VSX Scalar Minimum Double-Precision */ \
- V(xsmindp, XSMINDP, 0xF0000540) \
- /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \
- V(xsmsubadp, XSMSUBADP, 0xF0000188) \
- /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \
- V(xsmsubasp, XSMSUBASP, 0xF0000088) \
- /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \
- V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \
- /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \
- V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \
- /* VSX Scalar Multiply Double-Precision */ \
- V(xsmuldp, XSMULDP, 0xF0000180) \
- /* VSX Scalar Multiply Single-Precision */ \
- V(xsmulsp, XSMULSP, 0xF0000080) \
- /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \
- V(xsnmaddadp, XSNMADDADP, 0xF0000508) \
- /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \
- V(xsnmaddasp, XSNMADDASP, 0xF0000408) \
- /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \
- V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \
- /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \
- V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \
- /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \
- V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \
- /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \
- V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \
- /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \
- V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \
- /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \
- V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \
- /* VSX Scalar Reciprocal Estimate Double-Precision */ \
- V(xsredp, XSREDP, 0xF0000168) \
- /* VSX Scalar Reciprocal Estimate Single-Precision */ \
- V(xsresp, XSRESP, 0xF0000068) \
- /* VSX Scalar Subtract Double-Precision */ \
- V(xssubdp, XSSUBDP, 0xF0000140) \
- /* VSX Scalar Subtract Single-Precision */ \
- V(xssubsp, XSSUBSP, 0xF0000040) \
- /* VSX Scalar Test for software Divide Double-Precision */ \
- V(xstdivdp, XSTDIVDP, 0xF00001E8) \
- /* VSX Vector Add Double-Precision */ \
- V(xvadddp, XVADDDP, 0xF0000300) \
- /* VSX Vector Add Single-Precision */ \
- V(xvaddsp, XVADDSP, 0xF0000200) \
- /* VSX Vector Compare Equal To Double-Precision */ \
- V(xvcmpeqdp, XVCMPEQDP, 0xF0000318) \
- /* VSX Vector Compare Equal To Double-Precision & record CR6 */ \
- V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718) \
- /* VSX Vector Compare Equal To Single-Precision */ \
- V(xvcmpeqsp, XVCMPEQSP, 0xF0000218) \
- /* VSX Vector Compare Equal To Single-Precision & record CR6 */ \
- V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618) \
- /* VSX Vector Compare Greater Than or Equal To Double-Precision */ \
- V(xvcmpgedp, XVCMPGEDP, 0xF0000398) \
- /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */ \
- /* CR6 */ \
- V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798) \
- /* VSX Vector Compare Greater Than or Equal To Single-Precision */ \
- V(xvcmpgesp, XVCMPGESP, 0xF0000298) \
- /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */ \
- /* CR6 */ \
- V(xvcmpgespx, XVCMPGESPx, 0xF0000698) \
- /* VSX Vector Compare Greater Than Double-Precision */ \
- V(xvcmpgtdp, XVCMPGTDP, 0xF0000358) \
- /* VSX Vector Compare Greater Than Double-Precision & record CR6 */ \
- V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758) \
- /* VSX Vector Compare Greater Than Single-Precision */ \
- V(xvcmpgtsp, XVCMPGTSP, 0xF0000258) \
- /* VSX Vector Compare Greater Than Single-Precision & record CR6 */ \
- V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658) \
- /* VSX Vector Copy Sign Double-Precision */ \
- V(xvcpsgndp, XVCPSGNDP, 0xF0000780) \
- /* VSX Vector Copy Sign Single-Precision */ \
- V(xvcpsgnsp, XVCPSGNSP, 0xF0000680) \
- /* VSX Vector Divide Double-Precision */ \
- V(xvdivdp, XVDIVDP, 0xF00003C0) \
- /* VSX Vector Divide Single-Precision */ \
- V(xvdivsp, XVDIVSP, 0xF00002C0) \
- /* VSX Vector Multiply-Add Type-A Double-Precision */ \
- V(xvmaddadp, XVMADDADP, 0xF0000308) \
- /* VSX Vector Multiply-Add Type-A Single-Precision */ \
- V(xvmaddasp, XVMADDASP, 0xF0000208) \
- /* VSX Vector Multiply-Add Type-M Double-Precision */ \
- V(xvmaddmdp, XVMADDMDP, 0xF0000348) \
- /* VSX Vector Multiply-Add Type-M Single-Precision */ \
- V(xvmaddmsp, XVMADDMSP, 0xF0000248) \
- /* VSX Vector Maximum Double-Precision */ \
- V(xvmaxdp, XVMAXDP, 0xF0000700) \
- /* VSX Vector Maximum Single-Precision */ \
- V(xvmaxsp, XVMAXSP, 0xF0000600) \
- /* VSX Vector Minimum Double-Precision */ \
- V(xvmindp, XVMINDP, 0xF0000740) \
- /* VSX Vector Minimum Single-Precision */ \
- V(xvminsp, XVMINSP, 0xF0000640) \
- /* VSX Vector Multiply-Subtract Type-A Double-Precision */ \
- V(xvmsubadp, XVMSUBADP, 0xF0000388) \
- /* VSX Vector Multiply-Subtract Type-A Single-Precision */ \
- V(xvmsubasp, XVMSUBASP, 0xF0000288) \
- /* VSX Vector Multiply-Subtract Type-M Double-Precision */ \
- V(xvmsubmdp, XVMSUBMDP, 0xF00003C8) \
- /* VSX Vector Multiply-Subtract Type-M Single-Precision */ \
- V(xvmsubmsp, XVMSUBMSP, 0xF00002C8) \
- /* VSX Vector Multiply Double-Precision */ \
- V(xvmuldp, XVMULDP, 0xF0000380) \
- /* VSX Vector Multiply Single-Precision */ \
- V(xvmulsp, XVMULSP, 0xF0000280) \
- /* VSX Vector Negative Multiply-Add Type-A Double-Precision */ \
- V(xvnmaddadp, XVNMADDADP, 0xF0000708) \
- /* VSX Vector Negative Multiply-Add Type-A Single-Precision */ \
- V(xvnmaddasp, XVNMADDASP, 0xF0000608) \
- /* VSX Vector Negative Multiply-Add Type-M Double-Precision */ \
- V(xvnmaddmdp, XVNMADDMDP, 0xF0000748) \
- /* VSX Vector Negative Multiply-Add Type-M Single-Precision */ \
- V(xvnmaddmsp, XVNMADDMSP, 0xF0000648) \
- /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */ \
- V(xvnmsubadp, XVNMSUBADP, 0xF0000788) \
- /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */ \
- V(xvnmsubasp, XVNMSUBASP, 0xF0000688) \
- /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */ \
- V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8) \
- /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */ \
- V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8) \
- /* VSX Vector Reciprocal Estimate Double-Precision */ \
- V(xvredp, XVREDP, 0xF0000368) \
- /* VSX Vector Reciprocal Estimate Single-Precision */ \
- V(xvresp, XVRESP, 0xF0000268) \
- /* VSX Vector Subtract Double-Precision */ \
- V(xvsubdp, XVSUBDP, 0xF0000340) \
- /* VSX Vector Subtract Single-Precision */ \
- V(xvsubsp, XVSUBSP, 0xF0000240) \
- /* VSX Vector Test for software Divide Double-Precision */ \
- V(xvtdivdp, XVTDIVDP, 0xF00003E8) \
- /* VSX Vector Test for software Divide Single-Precision */ \
- V(xvtdivsp, XVTDIVSP, 0xF00002E8) \
- /* VSX Logical AND */ \
- V(xxland, XXLAND, 0xF0000410) \
- /* VSX Logical AND with Complement */ \
- V(xxlandc, XXLANDC, 0xF0000450) \
- /* VSX Logical Equivalence */ \
- V(xxleqv, XXLEQV, 0xF00005D0) \
- /* VSX Logical NAND */ \
- V(xxlnand, XXLNAND, 0xF0000590) \
- /* VSX Logical NOR */ \
- V(xxlnor, XXLNOR, 0xF0000510) \
- /* VSX Logical OR */ \
- V(xxlor, XXLOR, 0xF0000490) \
- /* VSX Logical OR with Complement */ \
- V(xxlorc, XXLORC, 0xF0000550) \
- /* VSX Logical XOR */ \
- V(xxlxor, XXLXOR, 0xF00004D0) \
- /* VSX Merge High Word */ \
- V(xxmrghw, XXMRGHW, 0xF0000090) \
- /* VSX Merge Low Word */ \
- V(xxmrglw, XXMRGLW, 0xF0000190) \
- /* VSX Permute Doubleword Immediate */ \
- V(xxpermdi, XXPERMDI, 0xF0000050) \
- /* VSX Shift Left Double by Word Immediate */ \
- V(xxsldwi, XXSLDWI, 0xF0000010) \
- /* VSX Splat Word */ \
+using Instr = uint32_t;
+
+#define PPC_XX3_OPCODE_LIST(V) \
+ /* VSX Scalar Add Double-Precision */ \
+ V(xsadddp, XSADDDP, 0xF0000100) \
+ /* VSX Scalar Add Single-Precision */ \
+ V(xsaddsp, XSADDSP, 0xF0000000) \
+ /* VSX Scalar Compare Ordered Double-Precision */ \
+ V(xscmpodp, XSCMPODP, 0xF0000158) \
+ /* VSX Scalar Compare Unordered Double-Precision */ \
+ V(xscmpudp, XSCMPUDP, 0xF0000118) \
+ /* VSX Scalar Copy Sign Double-Precision */ \
+ V(xscpsgndp, XSCPSGNDP, 0xF0000580) \
+ /* VSX Scalar Divide Double-Precision */ \
+ V(xsdivdp, XSDIVDP, 0xF00001C0) \
+ /* VSX Scalar Divide Single-Precision */ \
+ V(xsdivsp, XSDIVSP, 0xF00000C0) \
+ /* VSX Scalar Multiply-Add Type-A Double-Precision */ \
+ V(xsmaddadp, XSMADDADP, 0xF0000108) \
+ /* VSX Scalar Multiply-Add Type-A Single-Precision */ \
+ V(xsmaddasp, XSMADDASP, 0xF0000008) \
+ /* VSX Scalar Multiply-Add Type-M Double-Precision */ \
+ V(xsmaddmdp, XSMADDMDP, 0xF0000148) \
+ /* VSX Scalar Multiply-Add Type-M Single-Precision */ \
+ V(xsmaddmsp, XSMADDMSP, 0xF0000048) \
+ /* VSX Scalar Maximum Double-Precision */ \
+ V(xsmaxdp, XSMAXDP, 0xF0000500) \
+ /* VSX Scalar Minimum Double-Precision */ \
+ V(xsmindp, XSMINDP, 0xF0000540) \
+ /* VSX Scalar Multiply-Subtract Type-A Double-Precision */ \
+ V(xsmsubadp, XSMSUBADP, 0xF0000188) \
+ /* VSX Scalar Multiply-Subtract Type-A Single-Precision */ \
+ V(xsmsubasp, XSMSUBASP, 0xF0000088) \
+ /* VSX Scalar Multiply-Subtract Type-M Double-Precision */ \
+ V(xsmsubmdp, XSMSUBMDP, 0xF00001C8) \
+ /* VSX Scalar Multiply-Subtract Type-M Single-Precision */ \
+ V(xsmsubmsp, XSMSUBMSP, 0xF00000C8) \
+ /* VSX Scalar Multiply Double-Precision */ \
+ V(xsmuldp, XSMULDP, 0xF0000180) \
+ /* VSX Scalar Multiply Single-Precision */ \
+ V(xsmulsp, XSMULSP, 0xF0000080) \
+ /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */ \
+ V(xsnmaddadp, XSNMADDADP, 0xF0000508) \
+ /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */ \
+ V(xsnmaddasp, XSNMADDASP, 0xF0000408) \
+ /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */ \
+ V(xsnmaddmdp, XSNMADDMDP, 0xF0000548) \
+ /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */ \
+ V(xsnmaddmsp, XSNMADDMSP, 0xF0000448) \
+ /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */ \
+ V(xsnmsubadp, XSNMSUBADP, 0xF0000588) \
+ /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */ \
+ V(xsnmsubasp, XSNMSUBASP, 0xF0000488) \
+ /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */ \
+ V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8) \
+ /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */ \
+ V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8) \
+ /* VSX Scalar Reciprocal Estimate Double-Precision */ \
+ V(xsredp, XSREDP, 0xF0000168) \
+ /* VSX Scalar Reciprocal Estimate Single-Precision */ \
+ V(xsresp, XSRESP, 0xF0000068) \
+ /* VSX Scalar Subtract Double-Precision */ \
+ V(xssubdp, XSSUBDP, 0xF0000140) \
+ /* VSX Scalar Subtract Single-Precision */ \
+ V(xssubsp, XSSUBSP, 0xF0000040) \
+ /* VSX Scalar Test for software Divide Double-Precision */ \
+ V(xstdivdp, XSTDIVDP, 0xF00001E8) \
+ /* VSX Vector Add Double-Precision */ \
+ V(xvadddp, XVADDDP, 0xF0000300) \
+ /* VSX Vector Add Single-Precision */ \
+ V(xvaddsp, XVADDSP, 0xF0000200) \
+ /* VSX Vector Compare Equal To Double-Precision */ \
+ V(xvcmpeqdp, XVCMPEQDP, 0xF0000318) \
+ /* VSX Vector Compare Equal To Double-Precision & record CR6 */ \
+ V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718) \
+ /* VSX Vector Compare Equal To Single-Precision */ \
+ V(xvcmpeqsp, XVCMPEQSP, 0xF0000218) \
+ /* VSX Vector Compare Equal To Single-Precision & record CR6 */ \
+ V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618) \
+ /* VSX Vector Compare Greater Than or Equal To Double-Precision */ \
+ V(xvcmpgedp, XVCMPGEDP, 0xF0000398) \
+ /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */ \
+ /* CR6 */ \
+ V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798) \
+ /* VSX Vector Compare Greater Than or Equal To Single-Precision */ \
+ V(xvcmpgesp, XVCMPGESP, 0xF0000298) \
+ /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */ \
+ /* CR6 */ \
+ V(xvcmpgespx, XVCMPGESPx, 0xF0000698) \
+ /* VSX Vector Compare Greater Than Double-Precision */ \
+ V(xvcmpgtdp, XVCMPGTDP, 0xF0000358) \
+ /* VSX Vector Compare Greater Than Double-Precision & record CR6 */ \
+ V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758) \
+ /* VSX Vector Compare Greater Than Single-Precision */ \
+ V(xvcmpgtsp, XVCMPGTSP, 0xF0000258) \
+ /* VSX Vector Compare Greater Than Single-Precision & record CR6 */ \
+ V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658) \
+ /* VSX Vector Copy Sign Double-Precision */ \
+ V(xvcpsgndp, XVCPSGNDP, 0xF0000780) \
+ /* VSX Vector Copy Sign Single-Precision */ \
+ V(xvcpsgnsp, XVCPSGNSP, 0xF0000680) \
+ /* VSX Vector Divide Double-Precision */ \
+ V(xvdivdp, XVDIVDP, 0xF00003C0) \
+ /* VSX Vector Divide Single-Precision */ \
+ V(xvdivsp, XVDIVSP, 0xF00002C0) \
+ /* VSX Vector Multiply-Add Type-A Double-Precision */ \
+ V(xvmaddadp, XVMADDADP, 0xF0000308) \
+ /* VSX Vector Multiply-Add Type-A Single-Precision */ \
+ V(xvmaddasp, XVMADDASP, 0xF0000208) \
+ /* VSX Vector Multiply-Add Type-M Double-Precision */ \
+ V(xvmaddmdp, XVMADDMDP, 0xF0000348) \
+ /* VSX Vector Multiply-Add Type-M Single-Precision */ \
+ V(xvmaddmsp, XVMADDMSP, 0xF0000248) \
+ /* VSX Vector Maximum Double-Precision */ \
+ V(xvmaxdp, XVMAXDP, 0xF0000700) \
+ /* VSX Vector Maximum Single-Precision */ \
+ V(xvmaxsp, XVMAXSP, 0xF0000600) \
+ /* VSX Vector Minimum Double-Precision */ \
+ V(xvmindp, XVMINDP, 0xF0000740) \
+ /* VSX Vector Minimum Single-Precision */ \
+ V(xvminsp, XVMINSP, 0xF0000640) \
+ /* VSX Vector Multiply-Subtract Type-A Double-Precision */ \
+ V(xvmsubadp, XVMSUBADP, 0xF0000388) \
+ /* VSX Vector Multiply-Subtract Type-A Single-Precision */ \
+ V(xvmsubasp, XVMSUBASP, 0xF0000288) \
+ /* VSX Vector Multiply-Subtract Type-M Double-Precision */ \
+ V(xvmsubmdp, XVMSUBMDP, 0xF00003C8) \
+ /* VSX Vector Multiply-Subtract Type-M Single-Precision */ \
+ V(xvmsubmsp, XVMSUBMSP, 0xF00002C8) \
+ /* VSX Vector Multiply Double-Precision */ \
+ V(xvmuldp, XVMULDP, 0xF0000380) \
+ /* VSX Vector Multiply Single-Precision */ \
+ V(xvmulsp, XVMULSP, 0xF0000280) \
+ /* VSX Vector Negative Multiply-Add Type-A Double-Precision */ \
+ V(xvnmaddadp, XVNMADDADP, 0xF0000708) \
+ /* VSX Vector Negative Multiply-Add Type-A Single-Precision */ \
+ V(xvnmaddasp, XVNMADDASP, 0xF0000608) \
+ /* VSX Vector Negative Multiply-Add Type-M Double-Precision */ \
+ V(xvnmaddmdp, XVNMADDMDP, 0xF0000748) \
+ /* VSX Vector Negative Multiply-Add Type-M Single-Precision */ \
+ V(xvnmaddmsp, XVNMADDMSP, 0xF0000648) \
+ /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */ \
+ V(xvnmsubadp, XVNMSUBADP, 0xF0000788) \
+ /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */ \
+ V(xvnmsubasp, XVNMSUBASP, 0xF0000688) \
+ /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */ \
+ V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8) \
+ /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */ \
+ V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8) \
+ /* VSX Vector Reciprocal Estimate Double-Precision */ \
+ V(xvredp, XVREDP, 0xF0000368) \
+ /* VSX Vector Reciprocal Estimate Single-Precision */ \
+ V(xvresp, XVRESP, 0xF0000268) \
+ /* VSX Vector Subtract Double-Precision */ \
+ V(xvsubdp, XVSUBDP, 0xF0000340) \
+ /* VSX Vector Subtract Single-Precision */ \
+ V(xvsubsp, XVSUBSP, 0xF0000240) \
+ /* VSX Vector Test for software Divide Double-Precision */ \
+ V(xvtdivdp, XVTDIVDP, 0xF00003E8) \
+ /* VSX Vector Test for software Divide Single-Precision */ \
+ V(xvtdivsp, XVTDIVSP, 0xF00002E8) \
+ /* VSX Logical AND */ \
+ V(xxland, XXLAND, 0xF0000410) \
+ /* VSX Logical AND with Complement */ \
+ V(xxlandc, XXLANDC, 0xF0000450) \
+ /* VSX Logical Equivalence */ \
+ V(xxleqv, XXLEQV, 0xF00005D0) \
+ /* VSX Logical NAND */ \
+ V(xxlnand, XXLNAND, 0xF0000590) \
+ /* VSX Logical NOR */ \
+ V(xxlnor, XXLNOR, 0xF0000510) \
+ /* VSX Logical OR */ \
+ V(xxlor, XXLOR, 0xF0000490) \
+ /* VSX Logical OR with Complement */ \
+ V(xxlorc, XXLORC, 0xF0000550) \
+ /* VSX Logical XOR */ \
+ V(xxlxor, XXLXOR, 0xF00004D0) \
+ /* VSX Merge High Word */ \
+ V(xxmrghw, XXMRGHW, 0xF0000090) \
+ /* VSX Merge Low Word */ \
+ V(xxmrglw, XXMRGLW, 0xF0000190) \
+ /* VSX Permute Doubleword Immediate */ \
+ V(xxpermdi, XXPERMDI, 0xF0000050) \
+ /* VSX Shift Left Double by Word Immediate */ \
+ V(xxsldwi, XXSLDWI, 0xF0000010) \
+ /* VSX Splat Word */ \
V(xxspltw, XXSPLTW, 0xF0000290)
-#define PPC_Z23_OPCODE_LIST(V) \
- /* Decimal Quantize */ \
- V(dqua, DQUA, 0xEC000006) \
- /* Decimal Quantize Immediate */ \
- V(dquai, DQUAI, 0xEC000086) \
- /* Decimal Quantize Immediate Quad */ \
- V(dquaiq, DQUAIQ, 0xFC000086) \
- /* Decimal Quantize Quad */ \
- V(dquaq, DQUAQ, 0xFC000006) \
- /* Decimal Floating Round To FP Integer Without Inexact */ \
- V(drintn, DRINTN, 0xEC0001C6) \
- /* Decimal Floating Round To FP Integer Without Inexact Quad */ \
- V(drintnq, DRINTNQ, 0xFC0001C6) \
- /* Decimal Floating Round To FP Integer With Inexact */ \
- V(drintx, DRINTX, 0xEC0000C6) \
- /* Decimal Floating Round To FP Integer With Inexact Quad */ \
- V(drintxq, DRINTXQ, 0xFC0000C6) \
- /* Decimal Floating Reround */ \
- V(drrnd, DRRND, 0xEC000046) \
- /* Decimal Floating Reround Quad */ \
+#define PPC_Z23_OPCODE_LIST(V) \
+ /* Decimal Quantize */ \
+ V(dqua, DQUA, 0xEC000006) \
+ /* Decimal Quantize Immediate */ \
+ V(dquai, DQUAI, 0xEC000086) \
+ /* Decimal Quantize Immediate Quad */ \
+ V(dquaiq, DQUAIQ, 0xFC000086) \
+ /* Decimal Quantize Quad */ \
+ V(dquaq, DQUAQ, 0xFC000006) \
+ /* Decimal Floating Round To FP Integer Without Inexact */ \
+ V(drintn, DRINTN, 0xEC0001C6) \
+ /* Decimal Floating Round To FP Integer Without Inexact Quad */ \
+ V(drintnq, DRINTNQ, 0xFC0001C6) \
+ /* Decimal Floating Round To FP Integer With Inexact */ \
+ V(drintx, DRINTX, 0xEC0000C6) \
+ /* Decimal Floating Round To FP Integer With Inexact Quad */ \
+ V(drintxq, DRINTXQ, 0xFC0000C6) \
+ /* Decimal Floating Reround */ \
+ V(drrnd, DRRND, 0xEC000046) \
+ /* Decimal Floating Reround Quad */ \
V(drrndq, DRRNDQ, 0xFC000046)
-#define PPC_Z22_OPCODE_LIST(V) \
- /* Decimal Floating Shift Coefficient Left Immediate */ \
- V(dscli, DSCLI, 0xEC000084) \
- /* Decimal Floating Shift Coefficient Left Immediate Quad */ \
- V(dscliq, DSCLIQ, 0xFC000084) \
- /* Decimal Floating Shift Coefficient Right Immediate */ \
- V(dscri, DSCRI, 0xEC0000C4) \
- /* Decimal Floating Shift Coefficient Right Immediate Quad */ \
- V(dscriq, DSCRIQ, 0xFC0000C4) \
- /* Decimal Floating Test Data Class */ \
- V(dtstdc, DTSTDC, 0xEC000184) \
- /* Decimal Floating Test Data Class Quad */ \
- V(dtstdcq, DTSTDCQ, 0xFC000184) \
- /* Decimal Floating Test Data Group */ \
- V(dtstdg, DTSTDG, 0xEC0001C4) \
- /* Decimal Floating Test Data Group Quad */ \
+#define PPC_Z22_OPCODE_LIST(V) \
+ /* Decimal Floating Shift Coefficient Left Immediate */ \
+ V(dscli, DSCLI, 0xEC000084) \
+ /* Decimal Floating Shift Coefficient Left Immediate Quad */ \
+ V(dscliq, DSCLIQ, 0xFC000084) \
+ /* Decimal Floating Shift Coefficient Right Immediate */ \
+ V(dscri, DSCRI, 0xEC0000C4) \
+ /* Decimal Floating Shift Coefficient Right Immediate Quad */ \
+ V(dscriq, DSCRIQ, 0xFC0000C4) \
+ /* Decimal Floating Test Data Class */ \
+ V(dtstdc, DTSTDC, 0xEC000184) \
+ /* Decimal Floating Test Data Class Quad */ \
+ V(dtstdcq, DTSTDCQ, 0xFC000184) \
+ /* Decimal Floating Test Data Group */ \
+ V(dtstdg, DTSTDG, 0xEC0001C4) \
+ /* Decimal Floating Test Data Group Quad */ \
V(dtstdgq, DTSTDGQ, 0xFC0001C4)
-#define PPC_XX2_OPCODE_LIST(V) \
- /* Move To VSR Doubleword */ \
- V(mtvsrd, MTVSRD, 0x7C000166) \
- /* Move To VSR Word Algebraic */ \
- V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
- /* Move To VSR Word and Zero */ \
- V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \
- /* VSX Scalar Absolute Value Double-Precision */ \
- V(xsabsdp, XSABSDP, 0xF0000564) \
- /* VSX Scalar Convert Double-Precision to Single-Precision */ \
- V(xscvdpsp, XSCVDPSP, 0xF0000424) \
- /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
- /* signalling */ \
- V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
- /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
- /* Saturate */ \
- V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \
- /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */ \
- /* Saturate */ \
- V(xscvdpsxws, XSCVDPSXWS, 0xF0000160) \
- /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */ \
- /* Doubleword Saturate */ \
- V(xscvdpuxds, XSCVDPUXDS, 0xF0000520) \
- /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */ \
- /* Saturate */ \
- V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \
- /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \
- V(xscvspdp, XSCVSPDP, 0xF0000524) \
- /* Scalar Convert Single-Precision to Double-Precision format Non- */ \
- /* signalling */ \
- V(xscvspdpn, XSCVSPDPN, 0xF000052C) \
- /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
- V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \
- /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
- V(xscvsxdsp, XSCVSXDSP, 0xF00004E0) \
- /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */ \
- /* Precision */ \
- V(xscvuxddp, XSCVUXDDP, 0xF00005A0) \
- /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */ \
- /* Precision */ \
- V(xscvuxdsp, XSCVUXDSP, 0xF00004A0) \
- /* VSX Scalar Negative Absolute Value Double-Precision */ \
- V(xsnabsdp, XSNABSDP, 0xF00005A4) \
- /* VSX Scalar Negate Double-Precision */ \
- V(xsnegdp, XSNEGDP, 0xF00005E4) \
- /* VSX Scalar Round to Double-Precision Integer */ \
- V(xsrdpi, XSRDPI, 0xF0000124) \
- /* VSX Scalar Round to Double-Precision Integer using Current rounding */ \
- /* mode */ \
- V(xsrdpic, XSRDPIC, 0xF00001AC) \
- /* VSX Scalar Round to Double-Precision Integer toward -Infinity */ \
- V(xsrdpim, XSRDPIM, 0xF00001E4) \
- /* VSX Scalar Round to Double-Precision Integer toward +Infinity */ \
- V(xsrdpip, XSRDPIP, 0xF00001A4) \
- /* VSX Scalar Round to Double-Precision Integer toward Zero */ \
- V(xsrdpiz, XSRDPIZ, 0xF0000164) \
- /* VSX Scalar Round to Single-Precision */ \
- V(xsrsp, XSRSP, 0xF0000464) \
- /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */ \
- V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \
- /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \
- V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \
- /* VSX Scalar Square Root Double-Precision */ \
- V(xssqrtdp, XSSQRTDP, 0xF000012C) \
- /* VSX Scalar Square Root Single-Precision */ \
- V(xssqrtsp, XSSQRTSP, 0xF000002C) \
- /* VSX Scalar Test for software Square Root Double-Precision */ \
- V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \
- /* VSX Vector Absolute Value Double-Precision */ \
- V(xvabsdp, XVABSDP, 0xF0000764) \
- /* VSX Vector Absolute Value Single-Precision */ \
- V(xvabssp, XVABSSP, 0xF0000664) \
- /* VSX Vector Convert Double-Precision to Single-Precision */ \
- V(xvcvdpsp, XVCVDPSP, 0xF0000624) \
- /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \
- /* Saturate */ \
- V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760) \
- /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \
- /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */ \
- /* Doubleword Saturate */ \
- V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720) \
- /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320) \
- /* VSX Vector Convert Single-Precision to Double-Precision */ \
- V(xvcvspdp, XVCVSPDP, 0xF0000724) \
- /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \
- /* Saturate */ \
- V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \
- /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
- /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \
- /* Doubleword Saturate */ \
- V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \
- /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
- /* Saturate */ \
- V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
- /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
- V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \
- /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \
- V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \
- /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \
- V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \
- /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
- V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
- /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \
- /* Precision */ \
- V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \
- /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */ \
- /* Precision */ \
- V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \
- /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */ \
- V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0) \
- /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
- V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \
- /* VSX Vector Negative Absolute Value Double-Precision */ \
- V(xvnabsdp, XVNABSDP, 0xF00007A4) \
- /* VSX Vector Negative Absolute Value Single-Precision */ \
- V(xvnabssp, XVNABSSP, 0xF00006A4) \
- /* VSX Vector Negate Double-Precision */ \
- V(xvnegdp, XVNEGDP, 0xF00007E4) \
- /* VSX Vector Negate Single-Precision */ \
- V(xvnegsp, XVNEGSP, 0xF00006E4) \
- /* VSX Vector Round to Double-Precision Integer */ \
- V(xvrdpi, XVRDPI, 0xF0000324) \
- /* VSX Vector Round to Double-Precision Integer using Current rounding */ \
- /* mode */ \
- V(xvrdpic, XVRDPIC, 0xF00003AC) \
- /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \
- V(xvrdpim, XVRDPIM, 0xF00003E4) \
- /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \
- V(xvrdpip, XVRDPIP, 0xF00003A4) \
- /* VSX Vector Round to Double-Precision Integer toward Zero */ \
- V(xvrdpiz, XVRDPIZ, 0xF0000364) \
- /* VSX Vector Round to Single-Precision Integer */ \
- V(xvrspi, XVRSPI, 0xF0000224) \
- /* VSX Vector Round to Single-Precision Integer using Current rounding */ \
- /* mode */ \
- V(xvrspic, XVRSPIC, 0xF00002AC) \
- /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \
- V(xvrspim, XVRSPIM, 0xF00002E4) \
- /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \
- V(xvrspip, XVRSPIP, 0xF00002A4) \
- /* VSX Vector Round to Single-Precision Integer toward Zero */ \
- V(xvrspiz, XVRSPIZ, 0xF0000264) \
- /* VSX Vector Reciprocal Square Root Estimate Double-Precision */ \
- V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328) \
- /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
- V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
- /* VSX Vector Square Root Double-Precision */ \
- V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
- /* VSX Vector Square Root Single-Precision */ \
- V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
- /* VSX Vector Test for software Square Root Double-Precision */ \
- V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8) \
- /* VSX Vector Test for software Square Root Single-Precision */ \
+#define PPC_XX2_OPCODE_LIST(V) \
+ /* Move To VSR Doubleword */ \
+ V(mtvsrd, MTVSRD, 0x7C000166) \
+ /* Move To VSR Word Algebraic */ \
+ V(mtvsrwa, MTVSRWA, 0x7C0001A6) \
+ /* Move To VSR Word and Zero */ \
+ V(mtvsrwz, MTVSRWZ, 0x7C0001E6) \
+ /* VSX Scalar Absolute Value Double-Precision */ \
+ V(xsabsdp, XSABSDP, 0xF0000564) \
+ /* VSX Scalar Convert Double-Precision to Single-Precision */ \
+ V(xscvdpsp, XSCVDPSP, 0xF0000424) \
+ /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
+ /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
+ /* Saturate */ \
+ V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \
+ /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xscvdpsxws, XSCVDPSXWS, 0xF0000160) \
+ /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */ \
+ /* Doubleword Saturate */ \
+ V(xscvdpuxds, XSCVDPUXDS, 0xF0000520) \
+ /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \
+ /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \
+ V(xscvspdp, XSCVSPDP, 0xF0000524) \
+ /* Scalar Convert Single-Precision to Double-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvspdpn, XSCVSPDPN, 0xF000052C) \
+ /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
+ V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \
+ /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
+ V(xscvsxdsp, XSCVSXDSP, 0xF00004E0) \
+ /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */ \
+ /* Precision */ \
+ V(xscvuxddp, XSCVUXDDP, 0xF00005A0) \
+ /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */ \
+ /* Precision */ \
+ V(xscvuxdsp, XSCVUXDSP, 0xF00004A0) \
+ /* VSX Scalar Negative Absolute Value Double-Precision */ \
+ V(xsnabsdp, XSNABSDP, 0xF00005A4) \
+ /* VSX Scalar Negate Double-Precision */ \
+ V(xsnegdp, XSNEGDP, 0xF00005E4) \
+ /* VSX Scalar Round to Double-Precision Integer */ \
+ V(xsrdpi, XSRDPI, 0xF0000124) \
+ /* VSX Scalar Round to Double-Precision Integer using Current rounding */ \
+ /* mode */ \
+ V(xsrdpic, XSRDPIC, 0xF00001AC) \
+ /* VSX Scalar Round to Double-Precision Integer toward -Infinity */ \
+ V(xsrdpim, XSRDPIM, 0xF00001E4) \
+ /* VSX Scalar Round to Double-Precision Integer toward +Infinity */ \
+ V(xsrdpip, XSRDPIP, 0xF00001A4) \
+ /* VSX Scalar Round to Double-Precision Integer toward Zero */ \
+ V(xsrdpiz, XSRDPIZ, 0xF0000164) \
+ /* VSX Scalar Round to Single-Precision */ \
+ V(xsrsp, XSRSP, 0xF0000464) \
+ /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */ \
+ V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128) \
+ /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */ \
+ V(xsrsqrtesp, XSRSQRTESP, 0xF0000028) \
+ /* VSX Scalar Square Root Double-Precision */ \
+ V(xssqrtdp, XSSQRTDP, 0xF000012C) \
+ /* VSX Scalar Square Root Single-Precision */ \
+ V(xssqrtsp, XSSQRTSP, 0xF000002C) \
+ /* VSX Scalar Test for software Square Root Double-Precision */ \
+ V(xstsqrtdp, XSTSQRTDP, 0xF00001A8) \
+ /* VSX Vector Absolute Value Double-Precision */ \
+ V(xvabsdp, XVABSDP, 0xF0000764) \
+ /* VSX Vector Absolute Value Single-Precision */ \
+ V(xvabssp, XVABSSP, 0xF0000664) \
+ /* VSX Vector Convert Double-Precision to Single-Precision */ \
+ V(xvcvdpsp, XVCVDPSP, 0xF0000624) \
+ /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */ \
+ /* Saturate */ \
+ V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760) \
+ /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360) \
+ /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */ \
+ /* Doubleword Saturate */ \
+ V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720) \
+ /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320) \
+ /* VSX Vector Convert Single-Precision to Double-Precision */ \
+ V(xvcvspdp, XVCVSPDP, 0xF0000724) \
+ /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */ \
+ /* Saturate */ \
+ V(xvcvspsxds, XVCVSPSXDS, 0xF0000660) \
+ /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvspsxws, XVCVSPSXWS, 0xF0000260) \
+ /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */ \
+ /* Doubleword Saturate */ \
+ V(xvcvspuxds, XVCVSPUXDS, 0xF0000620) \
+ /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(xvcvspuxws, XVCVSPUXWS, 0xF0000220) \
+ /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */ \
+ V(xvcvsxddp, XVCVSXDDP, 0xF00007E0) \
+ /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */ \
+ V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0) \
+ /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */ \
+ V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0) \
+ /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */ \
+ V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */ \
+ /* Precision */ \
+ V(xvcvuxddp, XVCVUXDDP, 0xF00007A0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */ \
+ /* Precision */ \
+ V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */ \
+ V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0) \
+ /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */ \
+ V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0) \
+ /* VSX Vector Negative Absolute Value Double-Precision */ \
+ V(xvnabsdp, XVNABSDP, 0xF00007A4) \
+ /* VSX Vector Negative Absolute Value Single-Precision */ \
+ V(xvnabssp, XVNABSSP, 0xF00006A4) \
+ /* VSX Vector Negate Double-Precision */ \
+ V(xvnegdp, XVNEGDP, 0xF00007E4) \
+ /* VSX Vector Negate Single-Precision */ \
+ V(xvnegsp, XVNEGSP, 0xF00006E4) \
+ /* VSX Vector Round to Double-Precision Integer */ \
+ V(xvrdpi, XVRDPI, 0xF0000324) \
+ /* VSX Vector Round to Double-Precision Integer using Current rounding */ \
+ /* mode */ \
+ V(xvrdpic, XVRDPIC, 0xF00003AC) \
+ /* VSX Vector Round to Double-Precision Integer toward -Infinity */ \
+ V(xvrdpim, XVRDPIM, 0xF00003E4) \
+ /* VSX Vector Round to Double-Precision Integer toward +Infinity */ \
+ V(xvrdpip, XVRDPIP, 0xF00003A4) \
+ /* VSX Vector Round to Double-Precision Integer toward Zero */ \
+ V(xvrdpiz, XVRDPIZ, 0xF0000364) \
+ /* VSX Vector Round to Single-Precision Integer */ \
+ V(xvrspi, XVRSPI, 0xF0000224) \
+ /* VSX Vector Round to Single-Precision Integer using Current rounding */ \
+ /* mode */ \
+ V(xvrspic, XVRSPIC, 0xF00002AC) \
+ /* VSX Vector Round to Single-Precision Integer toward -Infinity */ \
+ V(xvrspim, XVRSPIM, 0xF00002E4) \
+ /* VSX Vector Round to Single-Precision Integer toward +Infinity */ \
+ V(xvrspip, XVRSPIP, 0xF00002A4) \
+ /* VSX Vector Round to Single-Precision Integer toward Zero */ \
+ V(xvrspiz, XVRSPIZ, 0xF0000264) \
+ /* VSX Vector Reciprocal Square Root Estimate Double-Precision */ \
+ V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328) \
+ /* VSX Vector Reciprocal Square Root Estimate Single-Precision */ \
+ V(xvrsqrtesp, XVRSQRTESP, 0xF0000228) \
+ /* VSX Vector Square Root Double-Precision */ \
+ V(xvsqrtdp, XVSQRTDP, 0xF000032C) \
+ /* VSX Vector Square Root Single-Precision */ \
+ V(xvsqrtsp, XVSQRTSP, 0xF000022C) \
+ /* VSX Vector Test for software Square Root Double-Precision */ \
+ V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8) \
+ /* VSX Vector Test for software Square Root Single-Precision */ \
V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8)
-#define PPC_EVX_OPCODE_LIST(V) \
- /* Vector Load Double Word into Double Word by External PID Indexed */ \
- V(evlddepx, EVLDDEPX, 0x7C00063E) \
- /* Vector Store Double of Double by External PID Indexed */ \
- V(evstddepx, EVSTDDEPX, 0x7C00073E) \
- /* Bit Reversed Increment */ \
- V(brinc, BRINC, 0x1000020F) \
- /* Vector Absolute Value */ \
- V(evabs, EVABS, 0x10000208) \
- /* Vector Add Immediate Word */ \
- V(evaddiw, EVADDIW, 0x10000202) \
- /* Vector Add Signed, Modulo, Integer to Accumulator Word */ \
- V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9) \
- /* Vector Add Signed, Saturate, Integer to Accumulator Word */ \
- V(evaddssiaaw, EVADDSSIAAW, 0x100004C1) \
- /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */ \
- V(evaddumiaaw, EVADDUMIAAW, 0x100004C8) \
- /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */ \
- V(evaddusiaaw, EVADDUSIAAW, 0x100004C0) \
- /* Vector Add Word */ \
- V(evaddw, EVADDW, 0x10000200) \
- /* Vector AND */ \
- V(evand, EVAND, 0x10000211) \
- /* Vector AND with Complement */ \
- V(evandc, EVANDC, 0x10000212) \
- /* Vector Compare Equal */ \
- V(evcmpeq, EVCMPEQ, 0x10000234) \
- /* Vector Compare Greater Than Signed */ \
- V(evcmpgts, EVCMPGTS, 0x10000231) \
- /* Vector Compare Greater Than Unsigned */ \
- V(evcmpgtu, EVCMPGTU, 0x10000230) \
- /* Vector Compare Less Than Signed */ \
- V(evcmplts, EVCMPLTS, 0x10000233) \
- /* Vector Compare Less Than Unsigned */ \
- V(evcmpltu, EVCMPLTU, 0x10000232) \
- /* Vector Count Leading Signed Bits Word */ \
- V(evcntlsw, EVCNTLSW, 0x1000020E) \
- /* Vector Count Leading Zeros Word */ \
- V(evcntlzw, EVCNTLZW, 0x1000020D) \
- /* Vector Divide Word Signed */ \
- V(evdivws, EVDIVWS, 0x100004C6) \
- /* Vector Divide Word Unsigned */ \
- V(evdivwu, EVDIVWU, 0x100004C7) \
- /* Vector Equivalent */ \
- V(eveqv, EVEQV, 0x10000219) \
- /* Vector Extend Sign Byte */ \
- V(evextsb, EVEXTSB, 0x1000020A) \
- /* Vector Extend Sign Half Word */ \
- V(evextsh, EVEXTSH, 0x1000020B) \
- /* Vector Load Double Word into Double Word */ \
- V(evldd, EVLDD, 0x10000301) \
- /* Vector Load Double Word into Double Word Indexed */ \
- V(evlddx, EVLDDX, 0x10000300) \
- /* Vector Load Double into Four Half Words */ \
- V(evldh, EVLDH, 0x10000305) \
- /* Vector Load Double into Four Half Words Indexed */ \
- V(evldhx, EVLDHX, 0x10000304) \
- /* Vector Load Double into Two Words */ \
- V(evldw, EVLDW, 0x10000303) \
- /* Vector Load Double into Two Words Indexed */ \
- V(evldwx, EVLDWX, 0x10000302) \
- /* Vector Load Half Word into Half Words Even and Splat */ \
- V(evlhhesplat, EVLHHESPLAT, 0x10000309) \
- /* Vector Load Half Word into Half Words Even and Splat Indexed */ \
- V(evlhhesplatx, EVLHHESPLATX, 0x10000308) \
- /* Vector Load Half Word into Half Word Odd Signed and Splat */ \
- V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F) \
- /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */ \
- V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E) \
- /* Vector Load Half Word into Half Word Odd Unsigned and Splat */ \
- V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D) \
- /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */ \
- V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C) \
- /* Vector Load Word into Two Half Words Even */ \
- V(evlwhe, EVLWHE, 0x10000311) \
- /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \
- V(evlwhos, EVLWHOS, 0x10000317) \
- /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */ \
- /* extension) */ \
- V(evlwhosx, EVLWHOSX, 0x10000316) \
- /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */ \
- V(evlwhou, EVLWHOU, 0x10000315) \
- /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */ \
- /* extended) */ \
- V(evlwhoux, EVLWHOUX, 0x10000314) \
- /* Vector Load Word into Two Half Words and Splat */ \
- V(evlwhsplat, EVLWHSPLAT, 0x1000031D) \
- /* Vector Load Word into Two Half Words and Splat Indexed */ \
- V(evlwhsplatx, EVLWHSPLATX, 0x1000031C) \
- /* Vector Load Word into Word and Splat */ \
- V(evlwwsplat, EVLWWSPLAT, 0x10000319) \
- /* Vector Load Word into Word and Splat Indexed */ \
- V(evlwwsplatx, EVLWWSPLATX, 0x10000318) \
- /* Vector Merge High */ \
- V(evmergehi, EVMERGEHI, 0x1000022C) \
- /* Vector Merge High/Low */ \
- V(evmergehilo, EVMERGEHILO, 0x1000022E) \
- /* Vector Merge Low */ \
- V(evmergelo, EVMERGELO, 0x1000022D) \
- /* Vector Merge Low/High */ \
- V(evmergelohi, EVMERGELOHI, 0x1000022F) \
- /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
- /* and Accumulate */ \
- V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B) \
- /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
- /* and Accumulate Negative */ \
- V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB) \
- /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
- /* and Accumulate */ \
- V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529) \
- /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
- /* and Accumulate Negative */ \
- V(evmhegsmian, EVMHEGSMIAN, 0x100005A9) \
- /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
- /* and Accumulate */ \
- V(evmhegumiaa, EVMHEGUMIAA, 0x10000528) \
- /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
- /* and Accumulate Negative */ \
- V(evmhegumian, EVMHEGUMIAN, 0x100005A8) \
- /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */ \
- V(evmhesmf, EVMHESMF, 0x1000040B) \
- /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */ \
- /* Accumulator */ \
- V(evmhesmfa, EVMHESMFA, 0x1000042B) \
- /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
- /* Accumulate into Words */ \
- V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B) \
- /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
- /* Accumulate Negative into Words */ \
- V(evmhesmfanw, EVMHESMFANW, 0x1000058B) \
- /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */ \
- V(evmhesmi, EVMHESMI, 0x10000409) \
- /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */ \
- /* Accumulator */ \
- V(evmhesmia, EVMHESMIA, 0x10000429) \
- /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
- /* Accumulate into Words */ \
- V(evmhesmiaaw, EVMHESMIAAW, 0x10000509) \
- /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
- /* Accumulate Negative into Words */ \
- V(evmhesmianw, EVMHESMIANW, 0x10000589) \
- /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */ \
- V(evmhessf, EVMHESSF, 0x10000403) \
- /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */ \
- /* Accumulator */ \
- V(evmhessfa, EVMHESSFA, 0x10000423) \
- /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
- /* Accumulate into Words */ \
- V(evmhessfaaw, EVMHESSFAAW, 0x10000503) \
- /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
- /* Accumulate Negative into Words */ \
- V(evmhessfanw, EVMHESSFANW, 0x10000583) \
- /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
- /* Accumulate into Words */ \
- V(evmhessiaaw, EVMHESSIAAW, 0x10000501) \
- /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
- /* Accumulate Negative into Words */ \
- V(evmhessianw, EVMHESSIANW, 0x10000581) \
- /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */ \
- V(evmheumi, EVMHEUMI, 0x10000408) \
- /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */ \
- /* Accumulator */ \
- V(evmheumia, EVMHEUMIA, 0x10000428) \
- /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
- /* Accumulate into Words */ \
- V(evmheumiaaw, EVMHEUMIAAW, 0x10000508) \
- /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
- /* Accumulate Negative into Words */ \
- V(evmheumianw, EVMHEUMIANW, 0x10000588) \
- /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
- /* Accumulate into Words */ \
- V(evmheusiaaw, EVMHEUSIAAW, 0x10000500) \
- /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
- /* Accumulate Negative into Words */ \
- V(evmheusianw, EVMHEUSIANW, 0x10000580) \
- /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
- /* and Accumulate */ \
- V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F) \
- /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
- /* and Accumulate Negative */ \
- V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF) \
- /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */ \
- /* and Accumulate */ \
- V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D) \
- /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */ \
- /* Accumulate Negative */ \
- V(evmhogsmian, EVMHOGSMIAN, 0x100005AD) \
- /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
- /* and Accumulate */ \
- V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C) \
- /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
- /* and Accumulate Negative */ \
- V(evmhogumian, EVMHOGUMIAN, 0x100005AC) \
- /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */ \
- V(evmhosmf, EVMHOSMF, 0x1000040F) \
- /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */ \
- /* Accumulator */ \
- V(evmhosmfa, EVMHOSMFA, 0x1000042F) \
- /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
- /* Accumulate into Words */ \
- V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F) \
- /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
- /* Accumulate Negative into Words */ \
- V(evmhosmfanw, EVMHOSMFANW, 0x1000058F) \
- /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */ \
- V(evmhosmi, EVMHOSMI, 0x1000040D) \
- /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */ \
- /* Accumulator */ \
- V(evmhosmia, EVMHOSMIA, 0x1000042D) \
- /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
- /* Accumulate into Words */ \
- V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D) \
- /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
- /* Accumulate Negative into Words */ \
- V(evmhosmianw, EVMHOSMIANW, 0x1000058D) \
- /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */ \
- V(evmhossf, EVMHOSSF, 0x10000407) \
- /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */ \
- /* Accumulator */ \
- V(evmhossfa, EVMHOSSFA, 0x10000427) \
- /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
- /* Accumulate into Words */ \
- V(evmhossfaaw, EVMHOSSFAAW, 0x10000507) \
- /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
- /* Accumulate Negative into Words */ \
- V(evmhossfanw, EVMHOSSFANW, 0x10000587) \
- /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
- /* Accumulate into Words */ \
- V(evmhossiaaw, EVMHOSSIAAW, 0x10000505) \
- /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
- /* Accumulate Negative into Words */ \
- V(evmhossianw, EVMHOSSIANW, 0x10000585) \
- /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */ \
- V(evmhoumi, EVMHOUMI, 0x1000040C) \
- /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */ \
- /* Accumulator */ \
- V(evmhoumia, EVMHOUMIA, 0x1000042C) \
- /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
- /* Accumulate into Words */ \
- V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C) \
- /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
- /* Accumulate Negative into Words */ \
- V(evmhoumianw, EVMHOUMIANW, 0x1000058C) \
- /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
- /* Accumulate into Words */ \
- V(evmhousiaaw, EVMHOUSIAAW, 0x10000504) \
- /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
- /* Accumulate Negative into Words */ \
- V(evmhousianw, EVMHOUSIANW, 0x10000584) \
- /* Initialize Accumulator */ \
- V(evmra, EVMRA, 0x100004C4) \
- /* Vector Multiply Word High Signed, Modulo, Fractional */ \
- V(evmwhsmf, EVMWHSMF, 0x1000044F) \
- /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */ \
- V(evmwhsmfa, EVMWHSMFA, 0x1000046F) \
- /* Vector Multiply Word High Signed, Modulo, Integer */ \
- V(evmwhsmi, EVMWHSMI, 0x1000044D) \
- /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */ \
- V(evmwhsmia, EVMWHSMIA, 0x1000046D) \
- /* Vector Multiply Word High Signed, Saturate, Fractional */ \
- V(evmwhssf, EVMWHSSF, 0x10000447) \
- /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */ \
- V(evmwhssfa, EVMWHSSFA, 0x10000467) \
- /* Vector Multiply Word High Unsigned, Modulo, Integer */ \
- V(evmwhumi, EVMWHUMI, 0x1000044C) \
- /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */ \
- V(evmwhumia, EVMWHUMIA, 0x1000046C) \
- /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */ \
- /* Words */ \
- V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549) \
- /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */ \
- /* Negative in Words */ \
- V(evmwlsmianw, EVMWLSMIANW, 0x100005C9) \
- /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */ \
- /* Words */ \
- V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541) \
- /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */ \
- /* Negative in Words */ \
- V(evmwlssianw, EVMWLSSIANW, 0x100005C1) \
- /* Vector Multiply Word Low Unsigned, Modulo, Integer */ \
- V(evmwlumi, EVMWLUMI, 0x10000448) \
- /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */ \
- V(evmwlumia, EVMWLUMIA, 0x10000468) \
- /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */ \
- /* Words */ \
- V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548) \
- /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */ \
- /* Negative in Words */ \
- V(evmwlumianw, EVMWLUMIANW, 0x100005C8) \
- /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
- /* in Words */ \
- V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540) \
- /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
- /* Negative in Words */ \
- V(evmwlusianw, EVMWLUSIANW, 0x100005C0) \
- /* Vector Multiply Word Signed, Modulo, Fractional */ \
- V(evmwsmf, EVMWSMF, 0x1000045B) \
- /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */ \
- V(evmwsmfa, EVMWSMFA, 0x1000047B) \
- /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
- V(evmwsmfaa, EVMWSMFAA, 0x1000055B) \
- /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
- /* Negative */ \
- V(evmwsmfan, EVMWSMFAN, 0x100005DB) \
- /* Vector Multiply Word Signed, Modulo, Integer */ \
- V(evmwsmi, EVMWSMI, 0x10000459) \
- /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */ \
- V(evmwsmia, EVMWSMIA, 0x10000479) \
- /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */ \
- V(evmwsmiaa, EVMWSMIAA, 0x10000559) \
- /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */ \
- V(evmwsmian, EVMWSMIAN, 0x100005D9) \
- /* Vector Multiply Word Signed, Saturate, Fractional */ \
- V(evmwssf, EVMWSSF, 0x10000453) \
- /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */ \
- V(evmwssfa, EVMWSSFA, 0x10000473) \
- /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
- V(evmwssfaa, EVMWSSFAA, 0x10000553) \
- /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
- /* Negative */ \
- V(evmwssfan, EVMWSSFAN, 0x100005D3) \
- /* Vector Multiply Word Unsigned, Modulo, Integer */ \
- V(evmwumi, EVMWUMI, 0x10000458) \
- /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */ \
- V(evmwumia, EVMWUMIA, 0x10000478) \
- /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
- V(evmwumiaa, EVMWUMIAA, 0x10000558) \
- /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
- /* Negative */ \
- V(evmwumian, EVMWUMIAN, 0x100005D8) \
- /* Vector NAND */ \
- V(evnand, EVNAND, 0x1000021E) \
- /* Vector Negate */ \
- V(evneg, EVNEG, 0x10000209) \
- /* Vector NOR */ \
- V(evnor, EVNOR, 0x10000218) \
- /* Vector OR */ \
- V(evor, EVOR, 0x10000217) \
- /* Vector OR with Complement */ \
- V(evorc, EVORC, 0x1000021B) \
- /* Vector Rotate Left Word */ \
- V(evrlw, EVRLW, 0x10000228) \
- /* Vector Rotate Left Word Immediate */ \
- V(evrlwi, EVRLWI, 0x1000022A) \
- /* Vector Round Word */ \
- V(evrndw, EVRNDW, 0x1000020C) \
- /* Vector Shift Left Word */ \
- V(evslw, EVSLW, 0x10000224) \
- /* Vector Shift Left Word Immediate */ \
- V(evslwi, EVSLWI, 0x10000226) \
- /* Vector Splat Fractional Immediate */ \
- V(evsplatfi, EVSPLATFI, 0x1000022B) \
- /* Vector Splat Immediate */ \
- V(evsplati, EVSPLATI, 0x10000229) \
- /* Vector Shift Right Word Immediate Signed */ \
- V(evsrwis, EVSRWIS, 0x10000223) \
- /* Vector Shift Right Word Immediate Unsigned */ \
- V(evsrwiu, EVSRWIU, 0x10000222) \
- /* Vector Shift Right Word Signed */ \
- V(evsrws, EVSRWS, 0x10000221) \
- /* Vector Shift Right Word Unsigned */ \
- V(evsrwu, EVSRWU, 0x10000220) \
- /* Vector Store Double of Double */ \
- V(evstdd, EVSTDD, 0x10000321) \
- /* Vector Store Double of Double Indexed */ \
- V(evstddx, EVSTDDX, 0x10000320) \
- /* Vector Store Double of Four Half Words */ \
- V(evstdh, EVSTDH, 0x10000325) \
- /* Vector Store Double of Four Half Words Indexed */ \
- V(evstdhx, EVSTDHX, 0x10000324) \
- /* Vector Store Double of Two Words */ \
- V(evstdw, EVSTDW, 0x10000323) \
- /* Vector Store Double of Two Words Indexed */ \
- V(evstdwx, EVSTDWX, 0x10000322) \
- /* Vector Store Word of Two Half Words from Even */ \
- V(evstwhe, EVSTWHE, 0x10000331) \
- /* Vector Store Word of Two Half Words from Even Indexed */ \
- V(evstwhex, EVSTWHEX, 0x10000330) \
- /* Vector Store Word of Two Half Words from Odd */ \
- V(evstwho, EVSTWHO, 0x10000335) \
- /* Vector Store Word of Two Half Words from Odd Indexed */ \
- V(evstwhox, EVSTWHOX, 0x10000334) \
- /* Vector Store Word of Word from Even */ \
- V(evstwwe, EVSTWWE, 0x10000339) \
- /* Vector Store Word of Word from Even Indexed */ \
- V(evstwwex, EVSTWWEX, 0x10000338) \
- /* Vector Store Word of Word from Odd */ \
- V(evstwwo, EVSTWWO, 0x1000033D) \
- /* Vector Store Word of Word from Odd Indexed */ \
- V(evstwwox, EVSTWWOX, 0x1000033C) \
- /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */ \
- V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB) \
- /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */ \
- V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3) \
- /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */ \
- V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA) \
- /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */ \
- V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2) \
- /* Vector Subtract from Word */ \
- V(evsubfw, EVSUBFW, 0x10000204) \
- /* Vector Subtract Immediate from Word */ \
- V(evsubifw, EVSUBIFW, 0x10000206) \
- /* Vector XOR */ \
- V(evxor, EVXOR, 0x10000216) \
- /* Floating-Point Double-Precision Absolute Value */ \
- V(efdabs, EFDABS, 0x100002E4) \
- /* Floating-Point Double-Precision Add */ \
- V(efdadd, EFDADD, 0x100002E0) \
- /* Floating-Point Double-Precision Convert from Single-Precision */ \
- V(efdcfs, EFDCFS, 0x100002EF) \
- /* Convert Floating-Point Double-Precision from Signed Fraction */ \
- V(efdcfsf, EFDCFSF, 0x100002F3) \
- /* Convert Floating-Point Double-Precision from Signed Integer */ \
- V(efdcfsi, EFDCFSI, 0x100002F1) \
- /* Convert Floating-Point Double-Precision from Signed Integer */ \
- /* Doubleword */ \
- V(efdcfsid, EFDCFSID, 0x100002E3) \
- /* Convert Floating-Point Double-Precision from Unsigned Fraction */ \
- V(efdcfuf, EFDCFUF, 0x100002F2) \
- /* Convert Floating-Point Double-Precision from Unsigned Integer */ \
- V(efdcfui, EFDCFUI, 0x100002F0) \
- /* Convert Floating-Point Double-Precision fromUnsigned Integer */ \
- /* Doubleword */ \
- V(efdcfuid, EFDCFUID, 0x100002E2) \
- /* Floating-Point Double-Precision Compare Equal */ \
- V(efdcmpeq, EFDCMPEQ, 0x100002EE) \
- /* Floating-Point Double-Precision Compare Greater Than */ \
- V(efdcmpgt, EFDCMPGT, 0x100002EC) \
- /* Floating-Point Double-Precision Compare Less Than */ \
- V(efdcmplt, EFDCMPLT, 0x100002ED) \
- /* Convert Floating-Point Double-Precision to Signed Fraction */ \
- V(efdctsf, EFDCTSF, 0x100002F7) \
- /* Convert Floating-Point Double-Precision to Signed Integer */ \
- V(efdctsi, EFDCTSI, 0x100002F5) \
- /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */ \
- /* with Round toward Zero */ \
- V(efdctsidz, EFDCTSIDZ, 0x100002EB) \
- /* Convert Floating-Point Double-Precision to Signed Integer with Round */ \
- /* toward Zero */ \
- V(efdctsiz, EFDCTSIZ, 0x100002FA) \
- /* Convert Floating-Point Double-Precision to Unsigned Fraction */ \
- V(efdctuf, EFDCTUF, 0x100002F6) \
- /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
- V(efdctui, EFDCTUI, 0x100002F4) \
- /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
- /* Doubleword with Round toward Zero */ \
- V(efdctuidz, EFDCTUIDZ, 0x100002EA) \
- /* Convert Floating-Point Double-Precision to Unsigned Integer with */ \
- /* Round toward Zero */ \
- V(efdctuiz, EFDCTUIZ, 0x100002F8) \
- /* Floating-Point Double-Precision Divide */ \
- V(efddiv, EFDDIV, 0x100002E9) \
- /* Floating-Point Double-Precision Multiply */ \
- V(efdmul, EFDMUL, 0x100002E8) \
- /* Floating-Point Double-Precision Negative Absolute Value */ \
- V(efdnabs, EFDNABS, 0x100002E5) \
- /* Floating-Point Double-Precision Negate */ \
- V(efdneg, EFDNEG, 0x100002E6) \
- /* Floating-Point Double-Precision Subtract */ \
- V(efdsub, EFDSUB, 0x100002E1) \
- /* Floating-Point Double-Precision Test Equal */ \
- V(efdtsteq, EFDTSTEQ, 0x100002FE) \
- /* Floating-Point Double-Precision Test Greater Than */ \
- V(efdtstgt, EFDTSTGT, 0x100002FC) \
- /* Floating-Point Double-Precision Test Less Than */ \
- V(efdtstlt, EFDTSTLT, 0x100002FD) \
- /* Floating-Point Single-Precision Convert from Double-Precision */ \
- V(efscfd, EFSCFD, 0x100002CF) \
- /* Floating-Point Absolute Value */ \
- V(efsabs, EFSABS, 0x100002C4) \
- /* Floating-Point Add */ \
- V(efsadd, EFSADD, 0x100002C0) \
- /* Convert Floating-Point from Signed Fraction */ \
- V(efscfsf, EFSCFSF, 0x100002D3) \
- /* Convert Floating-Point from Signed Integer */ \
- V(efscfsi, EFSCFSI, 0x100002D1) \
- /* Convert Floating-Point from Unsigned Fraction */ \
- V(efscfuf, EFSCFUF, 0x100002D2) \
- /* Convert Floating-Point from Unsigned Integer */ \
- V(efscfui, EFSCFUI, 0x100002D0) \
- /* Floating-Point Compare Equal */ \
- V(efscmpeq, EFSCMPEQ, 0x100002CE) \
- /* Floating-Point Compare Greater Than */ \
- V(efscmpgt, EFSCMPGT, 0x100002CC) \
- /* Floating-Point Compare Less Than */ \
- V(efscmplt, EFSCMPLT, 0x100002CD) \
- /* Convert Floating-Point to Signed Fraction */ \
- V(efsctsf, EFSCTSF, 0x100002D7) \
- /* Convert Floating-Point to Signed Integer */ \
- V(efsctsi, EFSCTSI, 0x100002D5) \
- /* Convert Floating-Point to Signed Integer with Round toward Zero */ \
- V(efsctsiz, EFSCTSIZ, 0x100002DA) \
- /* Convert Floating-Point to Unsigned Fraction */ \
- V(efsctuf, EFSCTUF, 0x100002D6) \
- /* Convert Floating-Point to Unsigned Integer */ \
- V(efsctui, EFSCTUI, 0x100002D4) \
- /* Convert Floating-Point to Unsigned Integer with Round toward Zero */ \
- V(efsctuiz, EFSCTUIZ, 0x100002D8) \
- /* Floating-Point Divide */ \
- V(efsdiv, EFSDIV, 0x100002C9) \
- /* Floating-Point Multiply */ \
- V(efsmul, EFSMUL, 0x100002C8) \
- /* Floating-Point Negative Absolute Value */ \
- V(efsnabs, EFSNABS, 0x100002C5) \
- /* Floating-Point Negate */ \
- V(efsneg, EFSNEG, 0x100002C6) \
- /* Floating-Point Subtract */ \
- V(efssub, EFSSUB, 0x100002C1) \
- /* Floating-Point Test Equal */ \
- V(efststeq, EFSTSTEQ, 0x100002DE) \
- /* Floating-Point Test Greater Than */ \
- V(efststgt, EFSTSTGT, 0x100002DC) \
- /* Floating-Point Test Less Than */ \
- V(efststlt, EFSTSTLT, 0x100002DD) \
- /* Vector Floating-Point Absolute Value */ \
- V(evfsabs, EVFSABS, 0x10000284) \
- /* Vector Floating-Point Add */ \
- V(evfsadd, EVFSADD, 0x10000280) \
- /* Vector Convert Floating-Point from Signed Fraction */ \
- V(evfscfsf, EVFSCFSF, 0x10000293) \
- /* Vector Convert Floating-Point from Signed Integer */ \
- V(evfscfsi, EVFSCFSI, 0x10000291) \
- /* Vector Convert Floating-Point from Unsigned Fraction */ \
- V(evfscfuf, EVFSCFUF, 0x10000292) \
- /* Vector Convert Floating-Point from Unsigned Integer */ \
- V(evfscfui, EVFSCFUI, 0x10000290) \
- /* Vector Floating-Point Compare Equal */ \
- V(evfscmpeq, EVFSCMPEQ, 0x1000028E) \
- /* Vector Floating-Point Compare Greater Than */ \
- V(evfscmpgt, EVFSCMPGT, 0x1000028C) \
- /* Vector Floating-Point Compare Less Than */ \
- V(evfscmplt, EVFSCMPLT, 0x1000028D) \
- /* Vector Convert Floating-Point to Signed Fraction */ \
- V(evfsctsf, EVFSCTSF, 0x10000297) \
- /* Vector Convert Floating-Point to Signed Integer */ \
- V(evfsctsi, EVFSCTSI, 0x10000295) \
- /* Vector Convert Floating-Point to Signed Integer with Round toward */ \
- /* Zero */ \
- V(evfsctsiz, EVFSCTSIZ, 0x1000029A) \
- /* Vector Convert Floating-Point to Unsigned Fraction */ \
- V(evfsctuf, EVFSCTUF, 0x10000296) \
- /* Vector Convert Floating-Point to Unsigned Integer */ \
- V(evfsctui, EVFSCTUI, 0x10000294) \
- /* Vector Convert Floating-Point to Unsigned Integer with Round toward */ \
- /* Zero */ \
- V(evfsctuiz, EVFSCTUIZ, 0x10000298) \
- /* Vector Floating-Point Divide */ \
- V(evfsdiv, EVFSDIV, 0x10000289) \
- /* Vector Floating-Point Multiply */ \
- V(evfsmul, EVFSMUL, 0x10000288) \
- /* Vector Floating-Point Negative Absolute Value */ \
- V(evfsnabs, EVFSNABS, 0x10000285) \
- /* Vector Floating-Point Negate */ \
- V(evfsneg, EVFSNEG, 0x10000286) \
- /* Vector Floating-Point Subtract */ \
- V(evfssub, EVFSSUB, 0x10000281) \
- /* Vector Floating-Point Test Equal */ \
- V(evfststeq, EVFSTSTEQ, 0x1000029E) \
- /* Vector Floating-Point Test Greater Than */ \
- V(evfststgt, EVFSTSTGT, 0x1000029C) \
- /* Vector Floating-Point Test Less Than */ \
+#define PPC_EVX_OPCODE_LIST(V) \
+ /* Vector Load Double Word into Double Word by External PID Indexed */ \
+ V(evlddepx, EVLDDEPX, 0x7C00063E) \
+ /* Vector Store Double of Double by External PID Indexed */ \
+ V(evstddepx, EVSTDDEPX, 0x7C00073E) \
+ /* Bit Reversed Increment */ \
+ V(brinc, BRINC, 0x1000020F) \
+ /* Vector Absolute Value */ \
+ V(evabs, EVABS, 0x10000208) \
+ /* Vector Add Immediate Word */ \
+ V(evaddiw, EVADDIW, 0x10000202) \
+ /* Vector Add Signed, Modulo, Integer to Accumulator Word */ \
+ V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9) \
+ /* Vector Add Signed, Saturate, Integer to Accumulator Word */ \
+ V(evaddssiaaw, EVADDSSIAAW, 0x100004C1) \
+ /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */ \
+ V(evaddumiaaw, EVADDUMIAAW, 0x100004C8) \
+ /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */ \
+ V(evaddusiaaw, EVADDUSIAAW, 0x100004C0) \
+ /* Vector Add Word */ \
+ V(evaddw, EVADDW, 0x10000200) \
+ /* Vector AND */ \
+ V(evand, EVAND, 0x10000211) \
+ /* Vector AND with Complement */ \
+ V(evandc, EVANDC, 0x10000212) \
+ /* Vector Compare Equal */ \
+ V(evcmpeq, EVCMPEQ, 0x10000234) \
+ /* Vector Compare Greater Than Signed */ \
+ V(evcmpgts, EVCMPGTS, 0x10000231) \
+ /* Vector Compare Greater Than Unsigned */ \
+ V(evcmpgtu, EVCMPGTU, 0x10000230) \
+ /* Vector Compare Less Than Signed */ \
+ V(evcmplts, EVCMPLTS, 0x10000233) \
+ /* Vector Compare Less Than Unsigned */ \
+ V(evcmpltu, EVCMPLTU, 0x10000232) \
+ /* Vector Count Leading Signed Bits Word */ \
+ V(evcntlsw, EVCNTLSW, 0x1000020E) \
+ /* Vector Count Leading Zeros Word */ \
+ V(evcntlzw, EVCNTLZW, 0x1000020D) \
+ /* Vector Divide Word Signed */ \
+ V(evdivws, EVDIVWS, 0x100004C6) \
+ /* Vector Divide Word Unsigned */ \
+ V(evdivwu, EVDIVWU, 0x100004C7) \
+ /* Vector Equivalent */ \
+ V(eveqv, EVEQV, 0x10000219) \
+ /* Vector Extend Sign Byte */ \
+ V(evextsb, EVEXTSB, 0x1000020A) \
+ /* Vector Extend Sign Half Word */ \
+ V(evextsh, EVEXTSH, 0x1000020B) \
+ /* Vector Load Double Word into Double Word */ \
+ V(evldd, EVLDD, 0x10000301) \
+ /* Vector Load Double Word into Double Word Indexed */ \
+ V(evlddx, EVLDDX, 0x10000300) \
+ /* Vector Load Double into Four Half Words */ \
+ V(evldh, EVLDH, 0x10000305) \
+ /* Vector Load Double into Four Half Words Indexed */ \
+ V(evldhx, EVLDHX, 0x10000304) \
+ /* Vector Load Double into Two Words */ \
+ V(evldw, EVLDW, 0x10000303) \
+ /* Vector Load Double into Two Words Indexed */ \
+ V(evldwx, EVLDWX, 0x10000302) \
+ /* Vector Load Half Word into Half Words Even and Splat */ \
+ V(evlhhesplat, EVLHHESPLAT, 0x10000309) \
+ /* Vector Load Half Word into Half Words Even and Splat Indexed */ \
+ V(evlhhesplatx, EVLHHESPLATX, 0x10000308) \
+ /* Vector Load Half Word into Half Word Odd Signed and Splat */ \
+ V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F) \
+ /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */ \
+ V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E) \
+ /* Vector Load Half Word into Half Word Odd Unsigned and Splat */ \
+ V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D) \
+ /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */ \
+ V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C) \
+ /* Vector Load Word into Two Half Words Even */ \
+ V(evlwhe, EVLWHE, 0x10000311) \
+ /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */ \
+ V(evlwhos, EVLWHOS, 0x10000317) \
+ /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */ \
+ /* extension) */ \
+ V(evlwhosx, EVLWHOSX, 0x10000316) \
+ /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */ \
+ V(evlwhou, EVLWHOU, 0x10000315) \
+ /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */ \
+ /* extended) */ \
+ V(evlwhoux, EVLWHOUX, 0x10000314) \
+ /* Vector Load Word into Two Half Words and Splat */ \
+ V(evlwhsplat, EVLWHSPLAT, 0x1000031D) \
+ /* Vector Load Word into Two Half Words and Splat Indexed */ \
+ V(evlwhsplatx, EVLWHSPLATX, 0x1000031C) \
+ /* Vector Load Word into Word and Splat */ \
+ V(evlwwsplat, EVLWWSPLAT, 0x10000319) \
+ /* Vector Load Word into Word and Splat Indexed */ \
+ V(evlwwsplatx, EVLWWSPLATX, 0x10000318) \
+ /* Vector Merge High */ \
+ V(evmergehi, EVMERGEHI, 0x1000022C) \
+ /* Vector Merge High/Low */ \
+ V(evmergehilo, EVMERGEHILO, 0x1000022E) \
+ /* Vector Merge Low */ \
+ V(evmergelo, EVMERGELO, 0x1000022D) \
+ /* Vector Merge Low/High */ \
+ V(evmergelohi, EVMERGELOHI, 0x1000022F) \
+ /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
+ /* and Accumulate */ \
+ V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B) \
+ /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */ \
+ /* and Accumulate Negative */ \
+ V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB) \
+ /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
+ /* and Accumulate */ \
+ V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529) \
+ /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */ \
+ /* and Accumulate Negative */ \
+ V(evmhegsmian, EVMHEGSMIAN, 0x100005A9) \
+ /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
+ /* and Accumulate */ \
+ V(evmhegumiaa, EVMHEGUMIAA, 0x10000528) \
+ /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */ \
+ /* and Accumulate Negative */ \
+ V(evmhegumian, EVMHEGUMIAN, 0x100005A8) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */ \
+ V(evmhesmf, EVMHESMF, 0x1000040B) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */ \
+ /* Accumulator */ \
+ V(evmhesmfa, EVMHESMFA, 0x1000042B) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
+ /* Accumulate into Words */ \
+ V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhesmfanw, EVMHESMFANW, 0x1000058B) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */ \
+ V(evmhesmi, EVMHESMI, 0x10000409) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */ \
+ /* Accumulator */ \
+ V(evmhesmia, EVMHESMIA, 0x10000429) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhesmiaaw, EVMHESMIAAW, 0x10000509) \
+ /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhesmianw, EVMHESMIANW, 0x10000589) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */ \
+ V(evmhessf, EVMHESSF, 0x10000403) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */ \
+ /* Accumulator */ \
+ V(evmhessfa, EVMHESSFA, 0x10000423) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
+ /* Accumulate into Words */ \
+ V(evmhessfaaw, EVMHESSFAAW, 0x10000503) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhessfanw, EVMHESSFANW, 0x10000583) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhessiaaw, EVMHESSIAAW, 0x10000501) \
+ /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhessianw, EVMHESSIANW, 0x10000581) \
+ /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */ \
+ V(evmheumi, EVMHEUMI, 0x10000408) \
+ /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */ \
+ /* Accumulator */ \
+ V(evmheumia, EVMHEUMIA, 0x10000428) \
+ /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmheumiaaw, EVMHEUMIAAW, 0x10000508) \
+ /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmheumianw, EVMHEUMIANW, 0x10000588) \
+ /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmheusiaaw, EVMHEUSIAAW, 0x10000500) \
+ /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmheusianw, EVMHEUSIANW, 0x10000580) \
+ /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
+ /* and Accumulate */ \
+ V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F) \
+ /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */ \
+ /* and Accumulate Negative */ \
+ V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF) \
+ /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */ \
+ /* and Accumulate */ \
+ V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D) \
+ /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */ \
+ /* Accumulate Negative */ \
+ V(evmhogsmian, EVMHOGSMIAN, 0x100005AD) \
+ /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
+ /* and Accumulate */ \
+ V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C) \
+ /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */ \
+ /* and Accumulate Negative */ \
+ V(evmhogumian, EVMHOGUMIAN, 0x100005AC) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */ \
+ V(evmhosmf, EVMHOSMF, 0x1000040F) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */ \
+ /* Accumulator */ \
+ V(evmhosmfa, EVMHOSMFA, 0x1000042F) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
+ /* Accumulate into Words */ \
+ V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhosmfanw, EVMHOSMFANW, 0x1000058F) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */ \
+ V(evmhosmi, EVMHOSMI, 0x1000040D) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */ \
+ /* Accumulator */ \
+ V(evmhosmia, EVMHOSMIA, 0x1000042D) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D) \
+ /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhosmianw, EVMHOSMIANW, 0x1000058D) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */ \
+ V(evmhossf, EVMHOSSF, 0x10000407) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */ \
+ /* Accumulator */ \
+ V(evmhossfa, EVMHOSSFA, 0x10000427) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
+ /* Accumulate into Words */ \
+ V(evmhossfaaw, EVMHOSSFAAW, 0x10000507) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhossfanw, EVMHOSSFANW, 0x10000587) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhossiaaw, EVMHOSSIAAW, 0x10000505) \
+ /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhossianw, EVMHOSSIANW, 0x10000585) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */ \
+ V(evmhoumi, EVMHOUMI, 0x1000040C) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */ \
+ /* Accumulator */ \
+ V(evmhoumia, EVMHOUMIA, 0x1000042C) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhoumianw, EVMHOUMIANW, 0x1000058C) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
+ /* Accumulate into Words */ \
+ V(evmhousiaaw, EVMHOUSIAAW, 0x10000504) \
+ /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */ \
+ /* Accumulate Negative into Words */ \
+ V(evmhousianw, EVMHOUSIANW, 0x10000584) \
+ /* Initialize Accumulator */ \
+ V(evmra, EVMRA, 0x100004C4) \
+ /* Vector Multiply Word High Signed, Modulo, Fractional */ \
+ V(evmwhsmf, EVMWHSMF, 0x1000044F) \
+ /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */ \
+ V(evmwhsmfa, EVMWHSMFA, 0x1000046F) \
+ /* Vector Multiply Word High Signed, Modulo, Integer */ \
+ V(evmwhsmi, EVMWHSMI, 0x1000044D) \
+ /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */ \
+ V(evmwhsmia, EVMWHSMIA, 0x1000046D) \
+ /* Vector Multiply Word High Signed, Saturate, Fractional */ \
+ V(evmwhssf, EVMWHSSF, 0x10000447) \
+ /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */ \
+ V(evmwhssfa, EVMWHSSFA, 0x10000467) \
+ /* Vector Multiply Word High Unsigned, Modulo, Integer */ \
+ V(evmwhumi, EVMWHUMI, 0x1000044C) \
+ /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */ \
+ V(evmwhumia, EVMWHUMIA, 0x1000046C) \
+ /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */ \
+ /* Words */ \
+ V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549) \
+ /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */ \
+ /* Negative in Words */ \
+ V(evmwlsmianw, EVMWLSMIANW, 0x100005C9) \
+ /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */ \
+ /* Words */ \
+ V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541) \
+ /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */ \
+ /* Negative in Words */ \
+ V(evmwlssianw, EVMWLSSIANW, 0x100005C1) \
+ /* Vector Multiply Word Low Unsigned, Modulo, Integer */ \
+ V(evmwlumi, EVMWLUMI, 0x10000448) \
+ /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */ \
+ V(evmwlumia, EVMWLUMIA, 0x10000468) \
+ /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */ \
+ /* Words */ \
+ V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548) \
+ /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */ \
+ /* Negative in Words */ \
+ V(evmwlumianw, EVMWLUMIANW, 0x100005C8) \
+ /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
+ /* in Words */ \
+ V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540) \
+ /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */ \
+ /* Negative in Words */ \
+ V(evmwlusianw, EVMWLUSIANW, 0x100005C0) \
+ /* Vector Multiply Word Signed, Modulo, Fractional */ \
+ V(evmwsmf, EVMWSMF, 0x1000045B) \
+ /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */ \
+ V(evmwsmfa, EVMWSMFA, 0x1000047B) \
+ /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
+ V(evmwsmfaa, EVMWSMFAA, 0x1000055B) \
+ /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */ \
+ /* Negative */ \
+ V(evmwsmfan, EVMWSMFAN, 0x100005DB) \
+ /* Vector Multiply Word Signed, Modulo, Integer */ \
+ V(evmwsmi, EVMWSMI, 0x10000459) \
+ /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */ \
+ V(evmwsmia, EVMWSMIA, 0x10000479) \
+ /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */ \
+ V(evmwsmiaa, EVMWSMIAA, 0x10000559) \
+ /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */ \
+ V(evmwsmian, EVMWSMIAN, 0x100005D9) \
+ /* Vector Multiply Word Signed, Saturate, Fractional */ \
+ V(evmwssf, EVMWSSF, 0x10000453) \
+ /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */ \
+ V(evmwssfa, EVMWSSFA, 0x10000473) \
+ /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
+ V(evmwssfaa, EVMWSSFAA, 0x10000553) \
+ /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */ \
+ /* Negative */ \
+ V(evmwssfan, EVMWSSFAN, 0x100005D3) \
+ /* Vector Multiply Word Unsigned, Modulo, Integer */ \
+ V(evmwumi, EVMWUMI, 0x10000458) \
+ /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */ \
+ V(evmwumia, EVMWUMIA, 0x10000478) \
+ /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
+ V(evmwumiaa, EVMWUMIAA, 0x10000558) \
+ /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */ \
+ /* Negative */ \
+ V(evmwumian, EVMWUMIAN, 0x100005D8) \
+ /* Vector NAND */ \
+ V(evnand, EVNAND, 0x1000021E) \
+ /* Vector Negate */ \
+ V(evneg, EVNEG, 0x10000209) \
+ /* Vector NOR */ \
+ V(evnor, EVNOR, 0x10000218) \
+ /* Vector OR */ \
+ V(evor, EVOR, 0x10000217) \
+ /* Vector OR with Complement */ \
+ V(evorc, EVORC, 0x1000021B) \
+ /* Vector Rotate Left Word */ \
+ V(evrlw, EVRLW, 0x10000228) \
+ /* Vector Rotate Left Word Immediate */ \
+ V(evrlwi, EVRLWI, 0x1000022A) \
+ /* Vector Round Word */ \
+ V(evrndw, EVRNDW, 0x1000020C) \
+ /* Vector Shift Left Word */ \
+ V(evslw, EVSLW, 0x10000224) \
+ /* Vector Shift Left Word Immediate */ \
+ V(evslwi, EVSLWI, 0x10000226) \
+ /* Vector Splat Fractional Immediate */ \
+ V(evsplatfi, EVSPLATFI, 0x1000022B) \
+ /* Vector Splat Immediate */ \
+ V(evsplati, EVSPLATI, 0x10000229) \
+ /* Vector Shift Right Word Immediate Signed */ \
+ V(evsrwis, EVSRWIS, 0x10000223) \
+ /* Vector Shift Right Word Immediate Unsigned */ \
+ V(evsrwiu, EVSRWIU, 0x10000222) \
+ /* Vector Shift Right Word Signed */ \
+ V(evsrws, EVSRWS, 0x10000221) \
+ /* Vector Shift Right Word Unsigned */ \
+ V(evsrwu, EVSRWU, 0x10000220) \
+ /* Vector Store Double of Double */ \
+ V(evstdd, EVSTDD, 0x10000321) \
+ /* Vector Store Double of Double Indexed */ \
+ V(evstddx, EVSTDDX, 0x10000320) \
+ /* Vector Store Double of Four Half Words */ \
+ V(evstdh, EVSTDH, 0x10000325) \
+ /* Vector Store Double of Four Half Words Indexed */ \
+ V(evstdhx, EVSTDHX, 0x10000324) \
+ /* Vector Store Double of Two Words */ \
+ V(evstdw, EVSTDW, 0x10000323) \
+ /* Vector Store Double of Two Words Indexed */ \
+ V(evstdwx, EVSTDWX, 0x10000322) \
+ /* Vector Store Word of Two Half Words from Even */ \
+ V(evstwhe, EVSTWHE, 0x10000331) \
+ /* Vector Store Word of Two Half Words from Even Indexed */ \
+ V(evstwhex, EVSTWHEX, 0x10000330) \
+ /* Vector Store Word of Two Half Words from Odd */ \
+ V(evstwho, EVSTWHO, 0x10000335) \
+ /* Vector Store Word of Two Half Words from Odd Indexed */ \
+ V(evstwhox, EVSTWHOX, 0x10000334) \
+ /* Vector Store Word of Word from Even */ \
+ V(evstwwe, EVSTWWE, 0x10000339) \
+ /* Vector Store Word of Word from Even Indexed */ \
+ V(evstwwex, EVSTWWEX, 0x10000338) \
+ /* Vector Store Word of Word from Odd */ \
+ V(evstwwo, EVSTWWO, 0x1000033D) \
+ /* Vector Store Word of Word from Odd Indexed */ \
+ V(evstwwox, EVSTWWOX, 0x1000033C) \
+ /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */ \
+ V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB) \
+ /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */ \
+ V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3) \
+ /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */ \
+ V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA) \
+ /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */ \
+ V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2) \
+ /* Vector Subtract from Word */ \
+ V(evsubfw, EVSUBFW, 0x10000204) \
+ /* Vector Subtract Immediate from Word */ \
+ V(evsubifw, EVSUBIFW, 0x10000206) \
+ /* Vector XOR */ \
+ V(evxor, EVXOR, 0x10000216) \
+ /* Floating-Point Double-Precision Absolute Value */ \
+ V(efdabs, EFDABS, 0x100002E4) \
+ /* Floating-Point Double-Precision Add */ \
+ V(efdadd, EFDADD, 0x100002E0) \
+ /* Floating-Point Double-Precision Convert from Single-Precision */ \
+ V(efdcfs, EFDCFS, 0x100002EF) \
+ /* Convert Floating-Point Double-Precision from Signed Fraction */ \
+ V(efdcfsf, EFDCFSF, 0x100002F3) \
+ /* Convert Floating-Point Double-Precision from Signed Integer */ \
+ V(efdcfsi, EFDCFSI, 0x100002F1) \
+ /* Convert Floating-Point Double-Precision from Signed Integer */ \
+ /* Doubleword */ \
+ V(efdcfsid, EFDCFSID, 0x100002E3) \
+ /* Convert Floating-Point Double-Precision from Unsigned Fraction */ \
+ V(efdcfuf, EFDCFUF, 0x100002F2) \
+ /* Convert Floating-Point Double-Precision from Unsigned Integer */ \
+ V(efdcfui, EFDCFUI, 0x100002F0) \
+ /* Convert Floating-Point Double-Precision fromUnsigned Integer */ \
+ /* Doubleword */ \
+ V(efdcfuid, EFDCFUID, 0x100002E2) \
+ /* Floating-Point Double-Precision Compare Equal */ \
+ V(efdcmpeq, EFDCMPEQ, 0x100002EE) \
+ /* Floating-Point Double-Precision Compare Greater Than */ \
+ V(efdcmpgt, EFDCMPGT, 0x100002EC) \
+ /* Floating-Point Double-Precision Compare Less Than */ \
+ V(efdcmplt, EFDCMPLT, 0x100002ED) \
+ /* Convert Floating-Point Double-Precision to Signed Fraction */ \
+ V(efdctsf, EFDCTSF, 0x100002F7) \
+ /* Convert Floating-Point Double-Precision to Signed Integer */ \
+ V(efdctsi, EFDCTSI, 0x100002F5) \
+ /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */ \
+ /* with Round toward Zero */ \
+ V(efdctsidz, EFDCTSIDZ, 0x100002EB) \
+ /* Convert Floating-Point Double-Precision to Signed Integer with Round */ \
+ /* toward Zero */ \
+ V(efdctsiz, EFDCTSIZ, 0x100002FA) \
+ /* Convert Floating-Point Double-Precision to Unsigned Fraction */ \
+ V(efdctuf, EFDCTUF, 0x100002F6) \
+ /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
+ V(efdctui, EFDCTUI, 0x100002F4) \
+ /* Convert Floating-Point Double-Precision to Unsigned Integer */ \
+ /* Doubleword with Round toward Zero */ \
+ V(efdctuidz, EFDCTUIDZ, 0x100002EA) \
+ /* Convert Floating-Point Double-Precision to Unsigned Integer with */ \
+ /* Round toward Zero */ \
+ V(efdctuiz, EFDCTUIZ, 0x100002F8) \
+ /* Floating-Point Double-Precision Divide */ \
+ V(efddiv, EFDDIV, 0x100002E9) \
+ /* Floating-Point Double-Precision Multiply */ \
+ V(efdmul, EFDMUL, 0x100002E8) \
+ /* Floating-Point Double-Precision Negative Absolute Value */ \
+ V(efdnabs, EFDNABS, 0x100002E5) \
+ /* Floating-Point Double-Precision Negate */ \
+ V(efdneg, EFDNEG, 0x100002E6) \
+ /* Floating-Point Double-Precision Subtract */ \
+ V(efdsub, EFDSUB, 0x100002E1) \
+ /* Floating-Point Double-Precision Test Equal */ \
+ V(efdtsteq, EFDTSTEQ, 0x100002FE) \
+ /* Floating-Point Double-Precision Test Greater Than */ \
+ V(efdtstgt, EFDTSTGT, 0x100002FC) \
+ /* Floating-Point Double-Precision Test Less Than */ \
+ V(efdtstlt, EFDTSTLT, 0x100002FD) \
+ /* Floating-Point Single-Precision Convert from Double-Precision */ \
+ V(efscfd, EFSCFD, 0x100002CF) \
+ /* Floating-Point Absolute Value */ \
+ V(efsabs, EFSABS, 0x100002C4) \
+ /* Floating-Point Add */ \
+ V(efsadd, EFSADD, 0x100002C0) \
+ /* Convert Floating-Point from Signed Fraction */ \
+ V(efscfsf, EFSCFSF, 0x100002D3) \
+ /* Convert Floating-Point from Signed Integer */ \
+ V(efscfsi, EFSCFSI, 0x100002D1) \
+ /* Convert Floating-Point from Unsigned Fraction */ \
+ V(efscfuf, EFSCFUF, 0x100002D2) \
+ /* Convert Floating-Point from Unsigned Integer */ \
+ V(efscfui, EFSCFUI, 0x100002D0) \
+ /* Floating-Point Compare Equal */ \
+ V(efscmpeq, EFSCMPEQ, 0x100002CE) \
+ /* Floating-Point Compare Greater Than */ \
+ V(efscmpgt, EFSCMPGT, 0x100002CC) \
+ /* Floating-Point Compare Less Than */ \
+ V(efscmplt, EFSCMPLT, 0x100002CD) \
+ /* Convert Floating-Point to Signed Fraction */ \
+ V(efsctsf, EFSCTSF, 0x100002D7) \
+ /* Convert Floating-Point to Signed Integer */ \
+ V(efsctsi, EFSCTSI, 0x100002D5) \
+ /* Convert Floating-Point to Signed Integer with Round toward Zero */ \
+ V(efsctsiz, EFSCTSIZ, 0x100002DA) \
+ /* Convert Floating-Point to Unsigned Fraction */ \
+ V(efsctuf, EFSCTUF, 0x100002D6) \
+ /* Convert Floating-Point to Unsigned Integer */ \
+ V(efsctui, EFSCTUI, 0x100002D4) \
+ /* Convert Floating-Point to Unsigned Integer with Round toward Zero */ \
+ V(efsctuiz, EFSCTUIZ, 0x100002D8) \
+ /* Floating-Point Divide */ \
+ V(efsdiv, EFSDIV, 0x100002C9) \
+ /* Floating-Point Multiply */ \
+ V(efsmul, EFSMUL, 0x100002C8) \
+ /* Floating-Point Negative Absolute Value */ \
+ V(efsnabs, EFSNABS, 0x100002C5) \
+ /* Floating-Point Negate */ \
+ V(efsneg, EFSNEG, 0x100002C6) \
+ /* Floating-Point Subtract */ \
+ V(efssub, EFSSUB, 0x100002C1) \
+ /* Floating-Point Test Equal */ \
+ V(efststeq, EFSTSTEQ, 0x100002DE) \
+ /* Floating-Point Test Greater Than */ \
+ V(efststgt, EFSTSTGT, 0x100002DC) \
+ /* Floating-Point Test Less Than */ \
+ V(efststlt, EFSTSTLT, 0x100002DD) \
+ /* Vector Floating-Point Absolute Value */ \
+ V(evfsabs, EVFSABS, 0x10000284) \
+ /* Vector Floating-Point Add */ \
+ V(evfsadd, EVFSADD, 0x10000280) \
+ /* Vector Convert Floating-Point from Signed Fraction */ \
+ V(evfscfsf, EVFSCFSF, 0x10000293) \
+ /* Vector Convert Floating-Point from Signed Integer */ \
+ V(evfscfsi, EVFSCFSI, 0x10000291) \
+ /* Vector Convert Floating-Point from Unsigned Fraction */ \
+ V(evfscfuf, EVFSCFUF, 0x10000292) \
+ /* Vector Convert Floating-Point from Unsigned Integer */ \
+ V(evfscfui, EVFSCFUI, 0x10000290) \
+ /* Vector Floating-Point Compare Equal */ \
+ V(evfscmpeq, EVFSCMPEQ, 0x1000028E) \
+ /* Vector Floating-Point Compare Greater Than */ \
+ V(evfscmpgt, EVFSCMPGT, 0x1000028C) \
+ /* Vector Floating-Point Compare Less Than */ \
+ V(evfscmplt, EVFSCMPLT, 0x1000028D) \
+ /* Vector Convert Floating-Point to Signed Fraction */ \
+ V(evfsctsf, EVFSCTSF, 0x10000297) \
+ /* Vector Convert Floating-Point to Signed Integer */ \
+ V(evfsctsi, EVFSCTSI, 0x10000295) \
+ /* Vector Convert Floating-Point to Signed Integer with Round toward */ \
+ /* Zero */ \
+ V(evfsctsiz, EVFSCTSIZ, 0x1000029A) \
+ /* Vector Convert Floating-Point to Unsigned Fraction */ \
+ V(evfsctuf, EVFSCTUF, 0x10000296) \
+ /* Vector Convert Floating-Point to Unsigned Integer */ \
+ V(evfsctui, EVFSCTUI, 0x10000294) \
+ /* Vector Convert Floating-Point to Unsigned Integer with Round toward */ \
+ /* Zero */ \
+ V(evfsctuiz, EVFSCTUIZ, 0x10000298) \
+ /* Vector Floating-Point Divide */ \
+ V(evfsdiv, EVFSDIV, 0x10000289) \
+ /* Vector Floating-Point Multiply */ \
+ V(evfsmul, EVFSMUL, 0x10000288) \
+ /* Vector Floating-Point Negative Absolute Value */ \
+ V(evfsnabs, EVFSNABS, 0x10000285) \
+ /* Vector Floating-Point Negate */ \
+ V(evfsneg, EVFSNEG, 0x10000286) \
+ /* Vector Floating-Point Subtract */ \
+ V(evfssub, EVFSSUB, 0x10000281) \
+ /* Vector Floating-Point Test Equal */ \
+ V(evfststeq, EVFSTSTEQ, 0x1000029E) \
+ /* Vector Floating-Point Test Greater Than */ \
+ V(evfststgt, EVFSTSTGT, 0x1000029C) \
+ /* Vector Floating-Point Test Less Than */ \
V(evfststlt, EVFSTSTLT, 0x1000029D)
-#define PPC_VC_OPCODE_LIST(V) \
- /* Vector Compare Bounds Single-Precision */ \
- V(vcmpbfp, VCMPBFP, 0x100003C6) \
- /* Vector Compare Equal To Single-Precision */ \
- V(vcmpeqfp, VCMPEQFP, 0x100000C6) \
- /* Vector Compare Equal To Unsigned Byte */ \
- V(vcmpequb, VCMPEQUB, 0x10000006) \
- /* Vector Compare Equal To Unsigned Doubleword */ \
- V(vcmpequd, VCMPEQUD, 0x100000C7) \
- /* Vector Compare Equal To Unsigned Halfword */ \
- V(vcmpequh, VCMPEQUH, 0x10000046) \
- /* Vector Compare Equal To Unsigned Word */ \
- V(vcmpequw, VCMPEQUW, 0x10000086) \
- /* Vector Compare Greater Than or Equal To Single-Precision */ \
- V(vcmpgefp, VCMPGEFP, 0x100001C6) \
- /* Vector Compare Greater Than Single-Precision */ \
- V(vcmpgtfp, VCMPGTFP, 0x100002C6) \
- /* Vector Compare Greater Than Signed Byte */ \
- V(vcmpgtsb, VCMPGTSB, 0x10000306) \
- /* Vector Compare Greater Than Signed Doubleword */ \
- V(vcmpgtsd, VCMPGTSD, 0x100003C7) \
- /* Vector Compare Greater Than Signed Halfword */ \
- V(vcmpgtsh, VCMPGTSH, 0x10000346) \
- /* Vector Compare Greater Than Signed Word */ \
- V(vcmpgtsw, VCMPGTSW, 0x10000386) \
- /* Vector Compare Greater Than Unsigned Byte */ \
- V(vcmpgtub, VCMPGTUB, 0x10000206) \
- /* Vector Compare Greater Than Unsigned Doubleword */ \
- V(vcmpgtud, VCMPGTUD, 0x100002C7) \
- /* Vector Compare Greater Than Unsigned Halfword */ \
- V(vcmpgtuh, VCMPGTUH, 0x10000246) \
- /* Vector Compare Greater Than Unsigned Word */ \
+#define PPC_VC_OPCODE_LIST(V) \
+ /* Vector Compare Bounds Single-Precision */ \
+ V(vcmpbfp, VCMPBFP, 0x100003C6) \
+ /* Vector Compare Equal To Single-Precision */ \
+ V(vcmpeqfp, VCMPEQFP, 0x100000C6) \
+ /* Vector Compare Equal To Unsigned Byte */ \
+ V(vcmpequb, VCMPEQUB, 0x10000006) \
+ /* Vector Compare Equal To Unsigned Doubleword */ \
+ V(vcmpequd, VCMPEQUD, 0x100000C7) \
+ /* Vector Compare Equal To Unsigned Halfword */ \
+ V(vcmpequh, VCMPEQUH, 0x10000046) \
+ /* Vector Compare Equal To Unsigned Word */ \
+ V(vcmpequw, VCMPEQUW, 0x10000086) \
+ /* Vector Compare Greater Than or Equal To Single-Precision */ \
+ V(vcmpgefp, VCMPGEFP, 0x100001C6) \
+ /* Vector Compare Greater Than Single-Precision */ \
+ V(vcmpgtfp, VCMPGTFP, 0x100002C6) \
+ /* Vector Compare Greater Than Signed Byte */ \
+ V(vcmpgtsb, VCMPGTSB, 0x10000306) \
+ /* Vector Compare Greater Than Signed Doubleword */ \
+ V(vcmpgtsd, VCMPGTSD, 0x100003C7) \
+ /* Vector Compare Greater Than Signed Halfword */ \
+ V(vcmpgtsh, VCMPGTSH, 0x10000346) \
+ /* Vector Compare Greater Than Signed Word */ \
+ V(vcmpgtsw, VCMPGTSW, 0x10000386) \
+ /* Vector Compare Greater Than Unsigned Byte */ \
+ V(vcmpgtub, VCMPGTUB, 0x10000206) \
+ /* Vector Compare Greater Than Unsigned Doubleword */ \
+ V(vcmpgtud, VCMPGTUD, 0x100002C7) \
+ /* Vector Compare Greater Than Unsigned Halfword */ \
+ V(vcmpgtuh, VCMPGTUH, 0x10000246) \
+ /* Vector Compare Greater Than Unsigned Word */ \
V(vcmpgtuw, VCMPGTUW, 0x10000286)
#define PPC_X_OPCODE_A_FORM_LIST(V) \
@@ -1254,459 +1253,459 @@ typedef uint32_t Instr;
/* Load Doubleword And Reserve Indexed */ \
V(ldarx, LDARX, 0x7C0000A8)
-#define PPC_X_OPCODE_UNUSED_LIST(V) \
- /* Bit Permute Doubleword */ \
- V(bpermd, BPERMD, 0x7C0001F8) \
- /* Extend Sign Word */ \
- V(extsw, EXTSW, 0x7C0007B4) \
- /* Load Word Algebraic with Update Indexed */ \
- V(lwaux, LWAUX, 0x7C0002EA) \
- /* Load Word Algebraic Indexed */ \
- V(lwax, LWAX, 0x7C0002AA) \
- /* Parity Doubleword */ \
- V(prtyd, PRTYD, 0x7C000174) \
- /* Store Doubleword Byte-Reverse Indexed */ \
- V(stdbrx, STDBRX, 0x7C000528) \
- /* Trap Doubleword */ \
- V(td, TD, 0x7C000088) \
- /* Branch Conditional to Branch Target Address Register */ \
- V(bctar, BCTAR, 0x4C000460) \
- /* Compare Byte */ \
- V(cmpb, CMPB, 0x7C0003F8) \
- /* Data Cache Block Flush */ \
- V(dcbf, DCBF, 0x7C0000AC) \
- /* Data Cache Block Store */ \
- V(dcbst, DCBST, 0x7C00006C) \
- /* Data Cache Block Touch */ \
- V(dcbt, DCBT, 0x7C00022C) \
- /* Data Cache Block Touch for Store */ \
- V(dcbtst, DCBTST, 0x7C0001EC) \
- /* Data Cache Block Zero */ \
- V(dcbz, DCBZ, 0x7C0007EC) \
- /* Equivalent */ \
- V(eqv, EQV, 0x7C000238) \
- /* Instruction Cache Block Invalidate */ \
- V(icbi, ICBI, 0x7C0007AC) \
- /* NAND */ \
- V(nand, NAND, 0x7C0003B8) \
- /* Parity Word */ \
- V(prtyw, PRTYW, 0x7C000134) \
- /* Store Halfword Byte-Reverse Indexed */ \
- V(sthbrx, STHBRX, 0x7C00072C) \
- /* Store Word Byte-Reverse Indexed */ \
- V(stwbrx, STWBRX, 0x7C00052C) \
- /* Synchronize */ \
- V(sync, SYNC, 0x7C0004AC) \
- /* Trap Word */ \
- V(tw, TW, 0x7C000008) \
- /* ExecuExecuted No Operation */ \
- V(xnop, XNOP, 0x68000000) \
- /* Convert Binary Coded Decimal To Declets */ \
- V(cbcdtd, CBCDTD, 0x7C000274) \
- /* Convert Declets To Binary Coded Decimal */ \
- V(cdtbcd, CDTBCD, 0x7C000234) \
- /* Decimal Floating Add */ \
- V(dadd, DADD, 0xEC000004) \
- /* Decimal Floating Add Quad */ \
- V(daddq, DADDQ, 0xFC000004) \
- /* Decimal Floating Convert From Fixed */ \
- V(dcffix, DCFFIX, 0xEC000644) \
- /* Decimal Floating Convert From Fixed Quad */ \
- V(dcffixq, DCFFIXQ, 0xFC000644) \
- /* Decimal Floating Compare Ordered */ \
- V(dcmpo, DCMPO, 0xEC000104) \
- /* Decimal Floating Compare Ordered Quad */ \
- V(dcmpoq, DCMPOQ, 0xFC000104) \
- /* Decimal Floating Compare Unordered */ \
- V(dcmpu, DCMPU, 0xEC000504) \
- /* Decimal Floating Compare Unordered Quad */ \
- V(dcmpuq, DCMPUQ, 0xFC000504) \
- /* Decimal Floating Convert To DFP Long */ \
- V(dctdp, DCTDP, 0xEC000204) \
- /* Decimal Floating Convert To Fixed */ \
- V(dctfix, DCTFIX, 0xEC000244) \
- /* Decimal Floating Convert To Fixed Quad */ \
- V(dctfixq, DCTFIXQ, 0xFC000244) \
- /* Decimal Floating Convert To DFP Extended */ \
- V(dctqpq, DCTQPQ, 0xFC000204) \
- /* Decimal Floating Decode DPD To BCD */ \
- V(ddedpd, DDEDPD, 0xEC000284) \
- /* Decimal Floating Decode DPD To BCD Quad */ \
- V(ddedpdq, DDEDPDQ, 0xFC000284) \
- /* Decimal Floating Divide */ \
- V(ddiv, DDIV, 0xEC000444) \
- /* Decimal Floating Divide Quad */ \
- V(ddivq, DDIVQ, 0xFC000444) \
- /* Decimal Floating Encode BCD To DPD */ \
- V(denbcd, DENBCD, 0xEC000684) \
- /* Decimal Floating Encode BCD To DPD Quad */ \
- V(denbcdq, DENBCDQ, 0xFC000684) \
- /* Decimal Floating Insert Exponent */ \
- V(diex, DIEX, 0xEC0006C4) \
- /* Decimal Floating Insert Exponent Quad */ \
- V(diexq, DIEXQ, 0xFC0006C4) \
- /* Decimal Floating Multiply */ \
- V(dmul, DMUL, 0xEC000044) \
- /* Decimal Floating Multiply Quad */ \
- V(dmulq, DMULQ, 0xFC000044) \
- /* Decimal Floating Round To DFP Long */ \
- V(drdpq, DRDPQ, 0xFC000604) \
- /* Decimal Floating Round To DFP Short */ \
- V(drsp, DRSP, 0xEC000604) \
- /* Decimal Floating Subtract */ \
- V(dsub, DSUB, 0xEC000404) \
- /* Decimal Floating Subtract Quad */ \
- V(dsubq, DSUBQ, 0xFC000404) \
- /* Decimal Floating Test Exponent */ \
- V(dtstex, DTSTEX, 0xEC000144) \
- /* Decimal Floating Test Exponent Quad */ \
- V(dtstexq, DTSTEXQ, 0xFC000144) \
- /* Decimal Floating Test Significance */ \
- V(dtstsf, DTSTSF, 0xEC000544) \
- /* Decimal Floating Test Significance Quad */ \
- V(dtstsfq, DTSTSFQ, 0xFC000544) \
- /* Decimal Floating Extract Exponent */ \
- V(dxex, DXEX, 0xEC0002C4) \
- /* Decimal Floating Extract Exponent Quad */ \
- V(dxexq, DXEXQ, 0xFC0002C4) \
- /* Decorated Storage Notify */ \
- V(dsn, DSN, 0x7C0003C6) \
- /* Load Byte with Decoration Indexed */ \
- V(lbdx, LBDX, 0x7C000406) \
- /* Load Doubleword with Decoration Indexed */ \
- V(lddx, LDDX, 0x7C0004C6) \
- /* Load Floating Doubleword with Decoration Indexed */ \
- V(lfddx, LFDDX, 0x7C000646) \
- /* Load Halfword with Decoration Indexed */ \
- V(lhdx, LHDX, 0x7C000446) \
- /* Load Word with Decoration Indexed */ \
- V(lwdx, LWDX, 0x7C000486) \
- /* Store Byte with Decoration Indexed */ \
- V(stbdx, STBDX, 0x7C000506) \
- /* Store Doubleword with Decoration Indexed */ \
- V(stddx, STDDX, 0x7C0005C6) \
- /* Store Floating Doubleword with Decoration Indexed */ \
- V(stfddx, STFDDX, 0x7C000746) \
- /* Store Halfword with Decoration Indexed */ \
- V(sthdx, STHDX, 0x7C000546) \
- /* Store Word with Decoration Indexed */ \
- V(stwdx, STWDX, 0x7C000586) \
- /* Data Cache Block Allocate */ \
- V(dcba, DCBA, 0x7C0005EC) \
- /* Data Cache Block Invalidate */ \
- V(dcbi, DCBI, 0x7C0003AC) \
- /* Instruction Cache Block Touch */ \
- V(icbt, ICBT, 0x7C00002C) \
- /* Move to Condition Register from XER */ \
- V(mcrxr, MCRXR, 0x7C000400) \
- /* TLB Invalidate Local Indexed */ \
- V(tlbilx, TLBILX, 0x7C000024) \
- /* TLB Invalidate Virtual Address Indexed */ \
- V(tlbivax, TLBIVAX, 0x7C000624) \
- /* TLB Read Entry */ \
- V(tlbre, TLBRE, 0x7C000764) \
- /* TLB Search Indexed */ \
- V(tlbsx, TLBSX, 0x7C000724) \
- /* TLB Write Entry */ \
- V(tlbwe, TLBWE, 0x7C0007A4) \
- /* Write External Enable */ \
- V(wrtee, WRTEE, 0x7C000106) \
- /* Write External Enable Immediate */ \
- V(wrteei, WRTEEI, 0x7C000146) \
- /* Data Cache Read */ \
- V(dcread, DCREAD, 0x7C00028C) \
- /* Instruction Cache Read */ \
- V(icread, ICREAD, 0x7C0007CC) \
- /* Data Cache Invalidate */ \
- V(dci, DCI, 0x7C00038C) \
- /* Instruction Cache Invalidate */ \
- V(ici, ICI, 0x7C00078C) \
- /* Move From Device Control Register User Mode Indexed */ \
- V(mfdcrux, MFDCRUX, 0x7C000246) \
- /* Move From Device Control Register Indexed */ \
- V(mfdcrx, MFDCRX, 0x7C000206) \
- /* Move To Device Control Register User Mode Indexed */ \
- V(mtdcrux, MTDCRUX, 0x7C000346) \
- /* Move To Device Control Register Indexed */ \
- V(mtdcrx, MTDCRX, 0x7C000306) \
- /* Return From Debug Interrupt */ \
- V(rfdi, RFDI, 0x4C00004E) \
- /* Data Cache Block Flush by External PID */ \
- V(dcbfep, DCBFEP, 0x7C0000FE) \
- /* Data Cache Block Store by External PID */ \
- V(dcbstep, DCBSTEP, 0x7C00007E) \
- /* Data Cache Block Touch by External PID */ \
- V(dcbtep, DCBTEP, 0x7C00027E) \
- /* Data Cache Block Touch for Store by External PID */ \
- V(dcbtstep, DCBTSTEP, 0x7C0001FE) \
- /* Data Cache Block Zero by External PID */ \
- V(dcbzep, DCBZEP, 0x7C0007FE) \
- /* Instruction Cache Block Invalidate by External PID */ \
- V(icbiep, ICBIEP, 0x7C0007BE) \
- /* Load Byte and Zero by External PID Indexed */ \
- V(lbepx, LBEPX, 0x7C0000BE) \
- /* Load Floating-Point Double by External PID Indexed */ \
- V(lfdepx, LFDEPX, 0x7C0004BE) \
- /* Load Halfword and Zero by External PID Indexed */ \
- V(lhepx, LHEPX, 0x7C00023E) \
- /* Load Vector by External PID Indexed */ \
- V(lvepx, LVEPX, 0x7C00024E) \
- /* Load Vector by External PID Indexed Last */ \
- V(lvepxl, LVEPXL, 0x7C00020E) \
- /* Load Word and Zero by External PID Indexed */ \
- V(lwepx, LWEPX, 0x7C00003E) \
- /* Store Byte by External PID Indexed */ \
- V(stbepx, STBEPX, 0x7C0001BE) \
- /* Store Floating-Point Double by External PID Indexed */ \
- V(stfdepx, STFDEPX, 0x7C0005BE) \
- /* Store Halfword by External PID Indexed */ \
- V(sthepx, STHEPX, 0x7C00033E) \
- /* Store Vector by External PID Indexed */ \
- V(stvepx, STVEPX, 0x7C00064E) \
- /* Store Vector by External PID Indexed Last */ \
- V(stvepxl, STVEPXL, 0x7C00060E) \
- /* Store Word by External PID Indexed */ \
- V(stwepx, STWEPX, 0x7C00013E) \
- /* Load Doubleword by External PID Indexed */ \
- V(ldepx, LDEPX, 0x7C00003A) \
- /* Store Doubleword by External PID Indexed */ \
- V(stdepx, STDEPX, 0x7C00013A) \
- /* TLB Search and Reserve Indexed */ \
- V(tlbsrx, TLBSRX, 0x7C0006A5) \
- /* External Control In Word Indexed */ \
- V(eciwx, ECIWX, 0x7C00026C) \
- /* External Control Out Word Indexed */ \
- V(ecowx, ECOWX, 0x7C00036C) \
- /* Data Cache Block Lock Clear */ \
- V(dcblc, DCBLC, 0x7C00030C) \
- /* Data Cache Block Lock Query */ \
- V(dcblq, DCBLQ, 0x7C00034D) \
- /* Data Cache Block Touch and Lock Set */ \
- V(dcbtls, DCBTLS, 0x7C00014C) \
- /* Data Cache Block Touch for Store and Lock Set */ \
- V(dcbtstls, DCBTSTLS, 0x7C00010C) \
- /* Instruction Cache Block Lock Clear */ \
- V(icblc, ICBLC, 0x7C0001CC) \
- /* Instruction Cache Block Lock Query */ \
- V(icblq, ICBLQ, 0x7C00018D) \
- /* Instruction Cache Block Touch and Lock Set */ \
- V(icbtls, ICBTLS, 0x7C0003CC) \
- /* Floating Compare Ordered */ \
- V(fcmpo, FCMPO, 0xFC000040) \
- /* Floating Compare Unordered */ \
- V(fcmpu, FCMPU, 0xFC000000) \
- /* Floating Test for software Divide */ \
- V(ftdiv, FTDIV, 0xFC000100) \
- /* Floating Test for software Square Root */ \
- V(ftsqrt, FTSQRT, 0xFC000140) \
- /* Load Floating-Point as Integer Word Algebraic Indexed */ \
- V(lfiwax, LFIWAX, 0x7C0006AE) \
- /* Load Floating-Point as Integer Word and Zero Indexed */ \
- V(lfiwzx, LFIWZX, 0x7C0006EE) \
- /* Move To Condition Register from FPSCR */ \
- V(mcrfs, MCRFS, 0xFC000080) \
- /* Store Floating-Point as Integer Word Indexed */ \
- V(stfiwx, STFIWX, 0x7C0007AE) \
- /* Load Floating-Point Double Pair Indexed */ \
- V(lfdpx, LFDPX, 0x7C00062E) \
- /* Store Floating-Point Double Pair Indexed */ \
- V(stfdpx, STFDPX, 0x7C00072E) \
- /* Floating Absolute Value */ \
- V(fabs, FABS, 0xFC000210) \
- /* Floating Convert From Integer Doubleword */ \
- V(fcfid, FCFID, 0xFC00069C) \
- /* Floating Convert From Integer Doubleword Single */ \
- V(fcfids, FCFIDS, 0xEC00069C) \
- /* Floating Convert From Integer Doubleword Unsigned */ \
- V(fcfidu, FCFIDU, 0xFC00079C) \
- /* Floating Convert From Integer Doubleword Unsigned Single */ \
- V(fcfidus, FCFIDUS, 0xEC00079C) \
- /* Floating Copy Sign */ \
- V(fcpsgn, FCPSGN, 0xFC000010) \
- /* Floating Convert To Integer Doubleword */ \
- V(fctid, FCTID, 0xFC00065C) \
- /* Floating Convert To Integer Doubleword Unsigned */ \
- V(fctidu, FCTIDU, 0xFC00075C) \
- /* Floating Convert To Integer Doubleword Unsigned with round toward */ \
- /* Zero */ \
- V(fctiduz, FCTIDUZ, 0xFC00075E) \
- /* Floating Convert To Integer Doubleword with round toward Zero */ \
- V(fctidz, FCTIDZ, 0xFC00065E) \
- /* Floating Convert To Integer Word */ \
- V(fctiw, FCTIW, 0xFC00001C) \
- /* Floating Convert To Integer Word Unsigned */ \
- V(fctiwu, FCTIWU, 0xFC00011C) \
- /* Floating Convert To Integer Word Unsigned with round toward Zero */ \
- V(fctiwuz, FCTIWUZ, 0xFC00011E) \
- /* Floating Convert To Integer Word with round to Zero */ \
- V(fctiwz, FCTIWZ, 0xFC00001E) \
- /* Floating Move Register */ \
- V(fmr, FMR, 0xFC000090) \
- /* Floating Negative Absolute Value */ \
- V(fnabs, FNABS, 0xFC000110) \
- /* Floating Negate */ \
- V(fneg, FNEG, 0xFC000050) \
- /* Floating Round to Single-Precision */ \
- V(frsp, FRSP, 0xFC000018) \
- /* Move From FPSCR */ \
- V(mffs, MFFS, 0xFC00048E) \
- /* Move To FPSCR Bit 0 */ \
- V(mtfsb0, MTFSB0, 0xFC00008C) \
- /* Move To FPSCR Bit 1 */ \
- V(mtfsb1, MTFSB1, 0xFC00004C) \
- /* Move To FPSCR Field Immediate */ \
- V(mtfsfi, MTFSFI, 0xFC00010C) \
- /* Floating Round To Integer Minus */ \
- V(frim, FRIM, 0xFC0003D0) \
- /* Floating Round To Integer Nearest */ \
- V(frin, FRIN, 0xFC000310) \
- /* Floating Round To Integer Plus */ \
- V(frip, FRIP, 0xFC000390) \
- /* Floating Round To Integer toward Zero */ \
- V(friz, FRIZ, 0xFC000350) \
- /* Multiply Cross Halfword to Word Signed */ \
- V(mulchw, MULCHW, 0x10000150) \
- /* Multiply Cross Halfword to Word Unsigned */ \
- V(mulchwu, MULCHWU, 0x10000110) \
- /* Multiply High Halfword to Word Signed */ \
- V(mulhhw, MULHHW, 0x10000050) \
- /* Multiply High Halfword to Word Unsigned */ \
- V(mulhhwu, MULHHWU, 0x10000010) \
- /* Multiply Low Halfword to Word Signed */ \
- V(mullhw, MULLHW, 0x10000350) \
- /* Multiply Low Halfword to Word Unsigned */ \
- V(mullhwu, MULLHWU, 0x10000310) \
- /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */ \
- V(dlmzb, DLMZB, 0x7C00009C) \
- /* Load Quadword And Reserve Indexed */ \
- V(lqarx, LQARX, 0x7C000228) \
- /* Store Quadword Conditional Indexed and record CR0 */ \
- V(stqcx, STQCX, 0x7C00016D) \
- /* Load String Word Immediate */ \
- V(lswi, LSWI, 0x7C0004AA) \
- /* Load String Word Indexed */ \
- V(lswx, LSWX, 0x7C00042A) \
- /* Store String Word Immediate */ \
- V(stswi, STSWI, 0x7C0005AA) \
- /* Store String Word Indexed */ \
- V(stswx, STSWX, 0x7C00052A) \
- /* Clear BHRB */ \
- V(clrbhrb, CLRBHRB, 0x7C00035C) \
- /* Enforce In-order Execution of I/O */ \
- V(eieio, EIEIO, 0x7C0006AC) \
- /* Load Byte and Zero Caching Inhibited Indexed */ \
- V(lbzcix, LBZCIX, 0x7C0006AA) \
- /* Load Doubleword Caching Inhibited Indexed */ \
- V(ldcix, LDCIX, 0x7C0006EA) \
- /* Load Halfword and Zero Caching Inhibited Indexed */ \
- V(lhzcix, LHZCIX, 0x7C00066A) \
- /* Load Word and Zero Caching Inhibited Indexed */ \
- V(lwzcix, LWZCIX, 0x7C00062A) \
- /* Move From Segment Register */ \
- V(mfsr, MFSR, 0x7C0004A6) \
- /* Move From Segment Register Indirect */ \
- V(mfsrin, MFSRIN, 0x7C000526) \
- /* Move To Machine State Register Doubleword */ \
- V(mtmsrd, MTMSRD, 0x7C000164) \
- /* Move To Split Little Endian */ \
- V(mtsle, MTSLE, 0x7C000126) \
- /* Move To Segment Register */ \
- V(mtsr, MTSR, 0x7C0001A4) \
- /* Move To Segment Register Indirect */ \
- V(mtsrin, MTSRIN, 0x7C0001E4) \
- /* SLB Find Entry ESID */ \
- V(slbfee, SLBFEE, 0x7C0007A7) \
- /* SLB Invalidate All */ \
- V(slbia, SLBIA, 0x7C0003E4) \
- /* SLB Invalidate Entry */ \
- V(slbie, SLBIE, 0x7C000364) \
- /* SLB Move From Entry ESID */ \
- V(slbmfee, SLBMFEE, 0x7C000726) \
- /* SLB Move From Entry VSID */ \
- V(slbmfev, SLBMFEV, 0x7C0006A6) \
- /* SLB Move To Entry */ \
- V(slbmte, SLBMTE, 0x7C000324) \
- /* Store Byte Caching Inhibited Indexed */ \
- V(stbcix, STBCIX, 0x7C0007AA) \
- /* Store Doubleword Caching Inhibited Indexed */ \
- V(stdcix, STDCIX, 0x7C0007EA) \
- /* Store Halfword and Zero Caching Inhibited Indexed */ \
- V(sthcix, STHCIX, 0x7C00076A) \
- /* Store Word and Zero Caching Inhibited Indexed */ \
- V(stwcix, STWCIX, 0x7C00072A) \
- /* TLB Invalidate All */ \
- V(tlbia, TLBIA, 0x7C0002E4) \
- /* TLB Invalidate Entry */ \
- V(tlbie, TLBIE, 0x7C000264) \
- /* TLB Invalidate Entry Local */ \
- V(tlbiel, TLBIEL, 0x7C000224) \
- /* Message Clear Privileged */ \
- V(msgclrp, MSGCLRP, 0x7C00015C) \
- /* Message Send Privileged */ \
- V(msgsndp, MSGSNDP, 0x7C00011C) \
- /* Message Clear */ \
- V(msgclr, MSGCLR, 0x7C0001DC) \
- /* Message Send */ \
- V(msgsnd, MSGSND, 0x7C00019C) \
- /* Move From Machine State Register */ \
- V(mfmsr, MFMSR, 0x7C0000A6) \
- /* Move To Machine State Register */ \
- V(mtmsr, MTMSR, 0x7C000124) \
- /* TLB Synchronize */ \
- V(tlbsync, TLBSYNC, 0x7C00046C) \
- /* Transaction Abort */ \
- V(tabort, TABORT, 0x7C00071D) \
- /* Transaction Abort Doubleword Conditional */ \
- V(tabortdc, TABORTDC, 0x7C00065D) \
- /* Transaction Abort Doubleword Conditional Immediate */ \
- V(tabortdci, TABORTDCI, 0x7C0006DD) \
- /* Transaction Abort Word Conditional */ \
- V(tabortwc, TABORTWC, 0x7C00061D) \
- /* Transaction Abort Word Conditional Immediate */ \
- V(tabortwci, TABORTWCI, 0x7C00069D) \
- /* Transaction Begin */ \
- V(tbegin, TBEGIN, 0x7C00051D) \
- /* Transaction Check */ \
- V(tcheck, TCHECK, 0x7C00059C) \
- /* Transaction End */ \
- V(tend, TEND, 0x7C00055C) \
- /* Transaction Recheckpoint */ \
- V(trechkpt, TRECHKPT, 0x7C0007DD) \
- /* Transaction Reclaim */ \
- V(treclaim, TRECLAIM, 0x7C00075D) \
- /* Transaction Suspend or Resume */ \
- V(tsr, TSR, 0x7C0005DC) \
- /* Load Vector Element Byte Indexed */ \
- V(lvebx, LVEBX, 0x7C00000E) \
- /* Load Vector Element Halfword Indexed */ \
- V(lvehx, LVEHX, 0x7C00004E) \
- /* Load Vector Element Word Indexed */ \
- V(lvewx, LVEWX, 0x7C00008E) \
- /* Load Vector for Shift Left */ \
- V(lvsl, LVSL, 0x7C00000C) \
- /* Load Vector for Shift Right */ \
- V(lvsr, LVSR, 0x7C00004C) \
- /* Load Vector Indexed */ \
- V(lvx, LVX, 0x7C0000CE) \
- /* Load Vector Indexed Last */ \
- V(lvxl, LVXL, 0x7C0002CE) \
- /* Store Vector Element Byte Indexed */ \
- V(stvebx, STVEBX, 0x7C00010E) \
- /* Store Vector Element Halfword Indexed */ \
- V(stvehx, STVEHX, 0x7C00014E) \
- /* Store Vector Element Word Indexed */ \
- V(stvewx, STVEWX, 0x7C00018E) \
- /* Store Vector Indexed */ \
- V(stvx, STVX, 0x7C0001CE) \
- /* Store Vector Indexed Last */ \
- V(stvxl, STVXL, 0x7C0003CE) \
- /* Vector Minimum Signed Doubleword */ \
- V(vminsd, VMINSD, 0x100003C2) \
- /* Floating Merge Even Word */ \
- V(fmrgew, FMRGEW, 0xFC00078C) \
- /* Floating Merge Odd Word */ \
- V(fmrgow, FMRGOW, 0xFC00068C) \
- /* Wait for Interrupt */ \
+#define PPC_X_OPCODE_UNUSED_LIST(V) \
+ /* Bit Permute Doubleword */ \
+ V(bpermd, BPERMD, 0x7C0001F8) \
+ /* Extend Sign Word */ \
+ V(extsw, EXTSW, 0x7C0007B4) \
+ /* Load Word Algebraic with Update Indexed */ \
+ V(lwaux, LWAUX, 0x7C0002EA) \
+ /* Load Word Algebraic Indexed */ \
+ V(lwax, LWAX, 0x7C0002AA) \
+ /* Parity Doubleword */ \
+ V(prtyd, PRTYD, 0x7C000174) \
+ /* Store Doubleword Byte-Reverse Indexed */ \
+ V(stdbrx, STDBRX, 0x7C000528) \
+ /* Trap Doubleword */ \
+ V(td, TD, 0x7C000088) \
+ /* Branch Conditional to Branch Target Address Register */ \
+ V(bctar, BCTAR, 0x4C000460) \
+ /* Compare Byte */ \
+ V(cmpb, CMPB, 0x7C0003F8) \
+ /* Data Cache Block Flush */ \
+ V(dcbf, DCBF, 0x7C0000AC) \
+ /* Data Cache Block Store */ \
+ V(dcbst, DCBST, 0x7C00006C) \
+ /* Data Cache Block Touch */ \
+ V(dcbt, DCBT, 0x7C00022C) \
+ /* Data Cache Block Touch for Store */ \
+ V(dcbtst, DCBTST, 0x7C0001EC) \
+ /* Data Cache Block Zero */ \
+ V(dcbz, DCBZ, 0x7C0007EC) \
+ /* Equivalent */ \
+ V(eqv, EQV, 0x7C000238) \
+ /* Instruction Cache Block Invalidate */ \
+ V(icbi, ICBI, 0x7C0007AC) \
+ /* NAND */ \
+ V(nand, NAND, 0x7C0003B8) \
+ /* Parity Word */ \
+ V(prtyw, PRTYW, 0x7C000134) \
+ /* Store Halfword Byte-Reverse Indexed */ \
+ V(sthbrx, STHBRX, 0x7C00072C) \
+ /* Store Word Byte-Reverse Indexed */ \
+ V(stwbrx, STWBRX, 0x7C00052C) \
+ /* Synchronize */ \
+ V(sync, SYNC, 0x7C0004AC) \
+ /* Trap Word */ \
+ V(tw, TW, 0x7C000008) \
+ /* ExecuExecuted No Operation */ \
+ V(xnop, XNOP, 0x68000000) \
+ /* Convert Binary Coded Decimal To Declets */ \
+ V(cbcdtd, CBCDTD, 0x7C000274) \
+ /* Convert Declets To Binary Coded Decimal */ \
+ V(cdtbcd, CDTBCD, 0x7C000234) \
+ /* Decimal Floating Add */ \
+ V(dadd, DADD, 0xEC000004) \
+ /* Decimal Floating Add Quad */ \
+ V(daddq, DADDQ, 0xFC000004) \
+ /* Decimal Floating Convert From Fixed */ \
+ V(dcffix, DCFFIX, 0xEC000644) \
+ /* Decimal Floating Convert From Fixed Quad */ \
+ V(dcffixq, DCFFIXQ, 0xFC000644) \
+ /* Decimal Floating Compare Ordered */ \
+ V(dcmpo, DCMPO, 0xEC000104) \
+ /* Decimal Floating Compare Ordered Quad */ \
+ V(dcmpoq, DCMPOQ, 0xFC000104) \
+ /* Decimal Floating Compare Unordered */ \
+ V(dcmpu, DCMPU, 0xEC000504) \
+ /* Decimal Floating Compare Unordered Quad */ \
+ V(dcmpuq, DCMPUQ, 0xFC000504) \
+ /* Decimal Floating Convert To DFP Long */ \
+ V(dctdp, DCTDP, 0xEC000204) \
+ /* Decimal Floating Convert To Fixed */ \
+ V(dctfix, DCTFIX, 0xEC000244) \
+ /* Decimal Floating Convert To Fixed Quad */ \
+ V(dctfixq, DCTFIXQ, 0xFC000244) \
+ /* Decimal Floating Convert To DFP Extended */ \
+ V(dctqpq, DCTQPQ, 0xFC000204) \
+ /* Decimal Floating Decode DPD To BCD */ \
+ V(ddedpd, DDEDPD, 0xEC000284) \
+ /* Decimal Floating Decode DPD To BCD Quad */ \
+ V(ddedpdq, DDEDPDQ, 0xFC000284) \
+ /* Decimal Floating Divide */ \
+ V(ddiv, DDIV, 0xEC000444) \
+ /* Decimal Floating Divide Quad */ \
+ V(ddivq, DDIVQ, 0xFC000444) \
+ /* Decimal Floating Encode BCD To DPD */ \
+ V(denbcd, DENBCD, 0xEC000684) \
+ /* Decimal Floating Encode BCD To DPD Quad */ \
+ V(denbcdq, DENBCDQ, 0xFC000684) \
+ /* Decimal Floating Insert Exponent */ \
+ V(diex, DIEX, 0xEC0006C4) \
+ /* Decimal Floating Insert Exponent Quad */ \
+ V(diexq, DIEXQ, 0xFC0006C4) \
+ /* Decimal Floating Multiply */ \
+ V(dmul, DMUL, 0xEC000044) \
+ /* Decimal Floating Multiply Quad */ \
+ V(dmulq, DMULQ, 0xFC000044) \
+ /* Decimal Floating Round To DFP Long */ \
+ V(drdpq, DRDPQ, 0xFC000604) \
+ /* Decimal Floating Round To DFP Short */ \
+ V(drsp, DRSP, 0xEC000604) \
+ /* Decimal Floating Subtract */ \
+ V(dsub, DSUB, 0xEC000404) \
+ /* Decimal Floating Subtract Quad */ \
+ V(dsubq, DSUBQ, 0xFC000404) \
+ /* Decimal Floating Test Exponent */ \
+ V(dtstex, DTSTEX, 0xEC000144) \
+ /* Decimal Floating Test Exponent Quad */ \
+ V(dtstexq, DTSTEXQ, 0xFC000144) \
+ /* Decimal Floating Test Significance */ \
+ V(dtstsf, DTSTSF, 0xEC000544) \
+ /* Decimal Floating Test Significance Quad */ \
+ V(dtstsfq, DTSTSFQ, 0xFC000544) \
+ /* Decimal Floating Extract Exponent */ \
+ V(dxex, DXEX, 0xEC0002C4) \
+ /* Decimal Floating Extract Exponent Quad */ \
+ V(dxexq, DXEXQ, 0xFC0002C4) \
+ /* Decorated Storage Notify */ \
+ V(dsn, DSN, 0x7C0003C6) \
+ /* Load Byte with Decoration Indexed */ \
+ V(lbdx, LBDX, 0x7C000406) \
+ /* Load Doubleword with Decoration Indexed */ \
+ V(lddx, LDDX, 0x7C0004C6) \
+ /* Load Floating Doubleword with Decoration Indexed */ \
+ V(lfddx, LFDDX, 0x7C000646) \
+ /* Load Halfword with Decoration Indexed */ \
+ V(lhdx, LHDX, 0x7C000446) \
+ /* Load Word with Decoration Indexed */ \
+ V(lwdx, LWDX, 0x7C000486) \
+ /* Store Byte with Decoration Indexed */ \
+ V(stbdx, STBDX, 0x7C000506) \
+ /* Store Doubleword with Decoration Indexed */ \
+ V(stddx, STDDX, 0x7C0005C6) \
+ /* Store Floating Doubleword with Decoration Indexed */ \
+ V(stfddx, STFDDX, 0x7C000746) \
+ /* Store Halfword with Decoration Indexed */ \
+ V(sthdx, STHDX, 0x7C000546) \
+ /* Store Word with Decoration Indexed */ \
+ V(stwdx, STWDX, 0x7C000586) \
+ /* Data Cache Block Allocate */ \
+ V(dcba, DCBA, 0x7C0005EC) \
+ /* Data Cache Block Invalidate */ \
+ V(dcbi, DCBI, 0x7C0003AC) \
+ /* Instruction Cache Block Touch */ \
+ V(icbt, ICBT, 0x7C00002C) \
+ /* Move to Condition Register from XER */ \
+ V(mcrxr, MCRXR, 0x7C000400) \
+ /* TLB Invalidate Local Indexed */ \
+ V(tlbilx, TLBILX, 0x7C000024) \
+ /* TLB Invalidate Virtual Address Indexed */ \
+ V(tlbivax, TLBIVAX, 0x7C000624) \
+ /* TLB Read Entry */ \
+ V(tlbre, TLBRE, 0x7C000764) \
+ /* TLB Search Indexed */ \
+ V(tlbsx, TLBSX, 0x7C000724) \
+ /* TLB Write Entry */ \
+ V(tlbwe, TLBWE, 0x7C0007A4) \
+ /* Write External Enable */ \
+ V(wrtee, WRTEE, 0x7C000106) \
+ /* Write External Enable Immediate */ \
+ V(wrteei, WRTEEI, 0x7C000146) \
+ /* Data Cache Read */ \
+ V(dcread, DCREAD, 0x7C00028C) \
+ /* Instruction Cache Read */ \
+ V(icread, ICREAD, 0x7C0007CC) \
+ /* Data Cache Invalidate */ \
+ V(dci, DCI, 0x7C00038C) \
+ /* Instruction Cache Invalidate */ \
+ V(ici, ICI, 0x7C00078C) \
+ /* Move From Device Control Register User Mode Indexed */ \
+ V(mfdcrux, MFDCRUX, 0x7C000246) \
+ /* Move From Device Control Register Indexed */ \
+ V(mfdcrx, MFDCRX, 0x7C000206) \
+ /* Move To Device Control Register User Mode Indexed */ \
+ V(mtdcrux, MTDCRUX, 0x7C000346) \
+ /* Move To Device Control Register Indexed */ \
+ V(mtdcrx, MTDCRX, 0x7C000306) \
+ /* Return From Debug Interrupt */ \
+ V(rfdi, RFDI, 0x4C00004E) \
+ /* Data Cache Block Flush by External PID */ \
+ V(dcbfep, DCBFEP, 0x7C0000FE) \
+ /* Data Cache Block Store by External PID */ \
+ V(dcbstep, DCBSTEP, 0x7C00007E) \
+ /* Data Cache Block Touch by External PID */ \
+ V(dcbtep, DCBTEP, 0x7C00027E) \
+ /* Data Cache Block Touch for Store by External PID */ \
+ V(dcbtstep, DCBTSTEP, 0x7C0001FE) \
+ /* Data Cache Block Zero by External PID */ \
+ V(dcbzep, DCBZEP, 0x7C0007FE) \
+ /* Instruction Cache Block Invalidate by External PID */ \
+ V(icbiep, ICBIEP, 0x7C0007BE) \
+ /* Load Byte and Zero by External PID Indexed */ \
+ V(lbepx, LBEPX, 0x7C0000BE) \
+ /* Load Floating-Point Double by External PID Indexed */ \
+ V(lfdepx, LFDEPX, 0x7C0004BE) \
+ /* Load Halfword and Zero by External PID Indexed */ \
+ V(lhepx, LHEPX, 0x7C00023E) \
+ /* Load Vector by External PID Indexed */ \
+ V(lvepx, LVEPX, 0x7C00024E) \
+ /* Load Vector by External PID Indexed Last */ \
+ V(lvepxl, LVEPXL, 0x7C00020E) \
+ /* Load Word and Zero by External PID Indexed */ \
+ V(lwepx, LWEPX, 0x7C00003E) \
+ /* Store Byte by External PID Indexed */ \
+ V(stbepx, STBEPX, 0x7C0001BE) \
+ /* Store Floating-Point Double by External PID Indexed */ \
+ V(stfdepx, STFDEPX, 0x7C0005BE) \
+ /* Store Halfword by External PID Indexed */ \
+ V(sthepx, STHEPX, 0x7C00033E) \
+ /* Store Vector by External PID Indexed */ \
+ V(stvepx, STVEPX, 0x7C00064E) \
+ /* Store Vector by External PID Indexed Last */ \
+ V(stvepxl, STVEPXL, 0x7C00060E) \
+ /* Store Word by External PID Indexed */ \
+ V(stwepx, STWEPX, 0x7C00013E) \
+ /* Load Doubleword by External PID Indexed */ \
+ V(ldepx, LDEPX, 0x7C00003A) \
+ /* Store Doubleword by External PID Indexed */ \
+ V(stdepx, STDEPX, 0x7C00013A) \
+ /* TLB Search and Reserve Indexed */ \
+ V(tlbsrx, TLBSRX, 0x7C0006A5) \
+ /* External Control In Word Indexed */ \
+ V(eciwx, ECIWX, 0x7C00026C) \
+ /* External Control Out Word Indexed */ \
+ V(ecowx, ECOWX, 0x7C00036C) \
+ /* Data Cache Block Lock Clear */ \
+ V(dcblc, DCBLC, 0x7C00030C) \
+ /* Data Cache Block Lock Query */ \
+ V(dcblq, DCBLQ, 0x7C00034D) \
+ /* Data Cache Block Touch and Lock Set */ \
+ V(dcbtls, DCBTLS, 0x7C00014C) \
+ /* Data Cache Block Touch for Store and Lock Set */ \
+ V(dcbtstls, DCBTSTLS, 0x7C00010C) \
+ /* Instruction Cache Block Lock Clear */ \
+ V(icblc, ICBLC, 0x7C0001CC) \
+ /* Instruction Cache Block Lock Query */ \
+ V(icblq, ICBLQ, 0x7C00018D) \
+ /* Instruction Cache Block Touch and Lock Set */ \
+ V(icbtls, ICBTLS, 0x7C0003CC) \
+ /* Floating Compare Ordered */ \
+ V(fcmpo, FCMPO, 0xFC000040) \
+ /* Floating Compare Unordered */ \
+ V(fcmpu, FCMPU, 0xFC000000) \
+ /* Floating Test for software Divide */ \
+ V(ftdiv, FTDIV, 0xFC000100) \
+ /* Floating Test for software Square Root */ \
+ V(ftsqrt, FTSQRT, 0xFC000140) \
+ /* Load Floating-Point as Integer Word Algebraic Indexed */ \
+ V(lfiwax, LFIWAX, 0x7C0006AE) \
+ /* Load Floating-Point as Integer Word and Zero Indexed */ \
+ V(lfiwzx, LFIWZX, 0x7C0006EE) \
+ /* Move To Condition Register from FPSCR */ \
+ V(mcrfs, MCRFS, 0xFC000080) \
+ /* Store Floating-Point as Integer Word Indexed */ \
+ V(stfiwx, STFIWX, 0x7C0007AE) \
+ /* Load Floating-Point Double Pair Indexed */ \
+ V(lfdpx, LFDPX, 0x7C00062E) \
+ /* Store Floating-Point Double Pair Indexed */ \
+ V(stfdpx, STFDPX, 0x7C00072E) \
+ /* Floating Absolute Value */ \
+ V(fabs, FABS, 0xFC000210) \
+ /* Floating Convert From Integer Doubleword */ \
+ V(fcfid, FCFID, 0xFC00069C) \
+ /* Floating Convert From Integer Doubleword Single */ \
+ V(fcfids, FCFIDS, 0xEC00069C) \
+ /* Floating Convert From Integer Doubleword Unsigned */ \
+ V(fcfidu, FCFIDU, 0xFC00079C) \
+ /* Floating Convert From Integer Doubleword Unsigned Single */ \
+ V(fcfidus, FCFIDUS, 0xEC00079C) \
+ /* Floating Copy Sign */ \
+ V(fcpsgn, FCPSGN, 0xFC000010) \
+ /* Floating Convert To Integer Doubleword */ \
+ V(fctid, FCTID, 0xFC00065C) \
+ /* Floating Convert To Integer Doubleword Unsigned */ \
+ V(fctidu, FCTIDU, 0xFC00075C) \
+ /* Floating Convert To Integer Doubleword Unsigned with round toward */ \
+ /* Zero */ \
+ V(fctiduz, FCTIDUZ, 0xFC00075E) \
+ /* Floating Convert To Integer Doubleword with round toward Zero */ \
+ V(fctidz, FCTIDZ, 0xFC00065E) \
+ /* Floating Convert To Integer Word */ \
+ V(fctiw, FCTIW, 0xFC00001C) \
+ /* Floating Convert To Integer Word Unsigned */ \
+ V(fctiwu, FCTIWU, 0xFC00011C) \
+ /* Floating Convert To Integer Word Unsigned with round toward Zero */ \
+ V(fctiwuz, FCTIWUZ, 0xFC00011E) \
+ /* Floating Convert To Integer Word with round to Zero */ \
+ V(fctiwz, FCTIWZ, 0xFC00001E) \
+ /* Floating Move Register */ \
+ V(fmr, FMR, 0xFC000090) \
+ /* Floating Negative Absolute Value */ \
+ V(fnabs, FNABS, 0xFC000110) \
+ /* Floating Negate */ \
+ V(fneg, FNEG, 0xFC000050) \
+ /* Floating Round to Single-Precision */ \
+ V(frsp, FRSP, 0xFC000018) \
+ /* Move From FPSCR */ \
+ V(mffs, MFFS, 0xFC00048E) \
+ /* Move To FPSCR Bit 0 */ \
+ V(mtfsb0, MTFSB0, 0xFC00008C) \
+ /* Move To FPSCR Bit 1 */ \
+ V(mtfsb1, MTFSB1, 0xFC00004C) \
+ /* Move To FPSCR Field Immediate */ \
+ V(mtfsfi, MTFSFI, 0xFC00010C) \
+ /* Floating Round To Integer Minus */ \
+ V(frim, FRIM, 0xFC0003D0) \
+ /* Floating Round To Integer Nearest */ \
+ V(frin, FRIN, 0xFC000310) \
+ /* Floating Round To Integer Plus */ \
+ V(frip, FRIP, 0xFC000390) \
+ /* Floating Round To Integer toward Zero */ \
+ V(friz, FRIZ, 0xFC000350) \
+ /* Multiply Cross Halfword to Word Signed */ \
+ V(mulchw, MULCHW, 0x10000150) \
+ /* Multiply Cross Halfword to Word Unsigned */ \
+ V(mulchwu, MULCHWU, 0x10000110) \
+ /* Multiply High Halfword to Word Signed */ \
+ V(mulhhw, MULHHW, 0x10000050) \
+ /* Multiply High Halfword to Word Unsigned */ \
+ V(mulhhwu, MULHHWU, 0x10000010) \
+ /* Multiply Low Halfword to Word Signed */ \
+ V(mullhw, MULLHW, 0x10000350) \
+ /* Multiply Low Halfword to Word Unsigned */ \
+ V(mullhwu, MULLHWU, 0x10000310) \
+ /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */ \
+ V(dlmzb, DLMZB, 0x7C00009C) \
+ /* Load Quadword And Reserve Indexed */ \
+ V(lqarx, LQARX, 0x7C000228) \
+ /* Store Quadword Conditional Indexed and record CR0 */ \
+ V(stqcx, STQCX, 0x7C00016D) \
+ /* Load String Word Immediate */ \
+ V(lswi, LSWI, 0x7C0004AA) \
+ /* Load String Word Indexed */ \
+ V(lswx, LSWX, 0x7C00042A) \
+ /* Store String Word Immediate */ \
+ V(stswi, STSWI, 0x7C0005AA) \
+ /* Store String Word Indexed */ \
+ V(stswx, STSWX, 0x7C00052A) \
+ /* Clear BHRB */ \
+ V(clrbhrb, CLRBHRB, 0x7C00035C) \
+ /* Enforce In-order Execution of I/O */ \
+ V(eieio, EIEIO, 0x7C0006AC) \
+ /* Load Byte and Zero Caching Inhibited Indexed */ \
+ V(lbzcix, LBZCIX, 0x7C0006AA) \
+ /* Load Doubleword Caching Inhibited Indexed */ \
+ V(ldcix, LDCIX, 0x7C0006EA) \
+ /* Load Halfword and Zero Caching Inhibited Indexed */ \
+ V(lhzcix, LHZCIX, 0x7C00066A) \
+ /* Load Word and Zero Caching Inhibited Indexed */ \
+ V(lwzcix, LWZCIX, 0x7C00062A) \
+ /* Move From Segment Register */ \
+ V(mfsr, MFSR, 0x7C0004A6) \
+ /* Move From Segment Register Indirect */ \
+ V(mfsrin, MFSRIN, 0x7C000526) \
+ /* Move To Machine State Register Doubleword */ \
+ V(mtmsrd, MTMSRD, 0x7C000164) \
+ /* Move To Split Little Endian */ \
+ V(mtsle, MTSLE, 0x7C000126) \
+ /* Move To Segment Register */ \
+ V(mtsr, MTSR, 0x7C0001A4) \
+ /* Move To Segment Register Indirect */ \
+ V(mtsrin, MTSRIN, 0x7C0001E4) \
+ /* SLB Find Entry ESID */ \
+ V(slbfee, SLBFEE, 0x7C0007A7) \
+ /* SLB Invalidate All */ \
+ V(slbia, SLBIA, 0x7C0003E4) \
+ /* SLB Invalidate Entry */ \
+ V(slbie, SLBIE, 0x7C000364) \
+ /* SLB Move From Entry ESID */ \
+ V(slbmfee, SLBMFEE, 0x7C000726) \
+ /* SLB Move From Entry VSID */ \
+ V(slbmfev, SLBMFEV, 0x7C0006A6) \
+ /* SLB Move To Entry */ \
+ V(slbmte, SLBMTE, 0x7C000324) \
+ /* Store Byte Caching Inhibited Indexed */ \
+ V(stbcix, STBCIX, 0x7C0007AA) \
+ /* Store Doubleword Caching Inhibited Indexed */ \
+ V(stdcix, STDCIX, 0x7C0007EA) \
+ /* Store Halfword and Zero Caching Inhibited Indexed */ \
+ V(sthcix, STHCIX, 0x7C00076A) \
+ /* Store Word and Zero Caching Inhibited Indexed */ \
+ V(stwcix, STWCIX, 0x7C00072A) \
+ /* TLB Invalidate All */ \
+ V(tlbia, TLBIA, 0x7C0002E4) \
+ /* TLB Invalidate Entry */ \
+ V(tlbie, TLBIE, 0x7C000264) \
+ /* TLB Invalidate Entry Local */ \
+ V(tlbiel, TLBIEL, 0x7C000224) \
+ /* Message Clear Privileged */ \
+ V(msgclrp, MSGCLRP, 0x7C00015C) \
+ /* Message Send Privileged */ \
+ V(msgsndp, MSGSNDP, 0x7C00011C) \
+ /* Message Clear */ \
+ V(msgclr, MSGCLR, 0x7C0001DC) \
+ /* Message Send */ \
+ V(msgsnd, MSGSND, 0x7C00019C) \
+ /* Move From Machine State Register */ \
+ V(mfmsr, MFMSR, 0x7C0000A6) \
+ /* Move To Machine State Register */ \
+ V(mtmsr, MTMSR, 0x7C000124) \
+ /* TLB Synchronize */ \
+ V(tlbsync, TLBSYNC, 0x7C00046C) \
+ /* Transaction Abort */ \
+ V(tabort, TABORT, 0x7C00071D) \
+ /* Transaction Abort Doubleword Conditional */ \
+ V(tabortdc, TABORTDC, 0x7C00065D) \
+ /* Transaction Abort Doubleword Conditional Immediate */ \
+ V(tabortdci, TABORTDCI, 0x7C0006DD) \
+ /* Transaction Abort Word Conditional */ \
+ V(tabortwc, TABORTWC, 0x7C00061D) \
+ /* Transaction Abort Word Conditional Immediate */ \
+ V(tabortwci, TABORTWCI, 0x7C00069D) \
+ /* Transaction Begin */ \
+ V(tbegin, TBEGIN, 0x7C00051D) \
+ /* Transaction Check */ \
+ V(tcheck, TCHECK, 0x7C00059C) \
+ /* Transaction End */ \
+ V(tend, TEND, 0x7C00055C) \
+ /* Transaction Recheckpoint */ \
+ V(trechkpt, TRECHKPT, 0x7C0007DD) \
+ /* Transaction Reclaim */ \
+ V(treclaim, TRECLAIM, 0x7C00075D) \
+ /* Transaction Suspend or Resume */ \
+ V(tsr, TSR, 0x7C0005DC) \
+ /* Load Vector Element Byte Indexed */ \
+ V(lvebx, LVEBX, 0x7C00000E) \
+ /* Load Vector Element Halfword Indexed */ \
+ V(lvehx, LVEHX, 0x7C00004E) \
+ /* Load Vector Element Word Indexed */ \
+ V(lvewx, LVEWX, 0x7C00008E) \
+ /* Load Vector for Shift Left */ \
+ V(lvsl, LVSL, 0x7C00000C) \
+ /* Load Vector for Shift Right */ \
+ V(lvsr, LVSR, 0x7C00004C) \
+ /* Load Vector Indexed */ \
+ V(lvx, LVX, 0x7C0000CE) \
+ /* Load Vector Indexed Last */ \
+ V(lvxl, LVXL, 0x7C0002CE) \
+ /* Store Vector Element Byte Indexed */ \
+ V(stvebx, STVEBX, 0x7C00010E) \
+ /* Store Vector Element Halfword Indexed */ \
+ V(stvehx, STVEHX, 0x7C00014E) \
+ /* Store Vector Element Word Indexed */ \
+ V(stvewx, STVEWX, 0x7C00018E) \
+ /* Store Vector Indexed */ \
+ V(stvx, STVX, 0x7C0001CE) \
+ /* Store Vector Indexed Last */ \
+ V(stvxl, STVXL, 0x7C0003CE) \
+ /* Vector Minimum Signed Doubleword */ \
+ V(vminsd, VMINSD, 0x100003C2) \
+ /* Floating Merge Even Word */ \
+ V(fmrgew, FMRGEW, 0xFC00078C) \
+ /* Floating Merge Odd Word */ \
+ V(fmrgow, FMRGOW, 0xFC00068C) \
+ /* Wait for Interrupt */ \
V(wait, WAIT, 0x7C00007C)
#define PPC_X_OPCODE_LIST(V) \
@@ -1719,831 +1718,830 @@ typedef uint32_t Instr;
PPC_X_OPCODE_EH_L_FORM_LIST(V) \
PPC_X_OPCODE_UNUSED_LIST(V)
-#define PPC_EVS_OPCODE_LIST(V) \
- /* Vector Select */ \
+#define PPC_EVS_OPCODE_LIST(V) \
+ /* Vector Select */ \
V(evsel, EVSEL, 0x10000278)
-#define PPC_DS_OPCODE_LIST(V) \
- /* Load Doubleword */ \
- V(ld, LD, 0xE8000000) \
- /* Load Doubleword with Update */ \
- V(ldu, LDU, 0xE8000001) \
- /* Load Word Algebraic */ \
- V(lwa, LWA, 0xE8000002) \
- /* Store Doubleword */ \
- V(std, STD, 0xF8000000) \
- /* Store Doubleword with Update */ \
- V(stdu, STDU, 0xF8000001) \
- /* Load Floating-Point Double Pair */ \
- V(lfdp, LFDP, 0xE4000000) \
- /* Store Floating-Point Double Pair */ \
- V(stfdp, STFDP, 0xF4000000) \
- /* Store Quadword */ \
+#define PPC_DS_OPCODE_LIST(V) \
+ /* Load Doubleword */ \
+ V(ld, LD, 0xE8000000) \
+ /* Load Doubleword with Update */ \
+ V(ldu, LDU, 0xE8000001) \
+ /* Load Word Algebraic */ \
+ V(lwa, LWA, 0xE8000002) \
+ /* Store Doubleword */ \
+ V(std, STD, 0xF8000000) \
+ /* Store Doubleword with Update */ \
+ V(stdu, STDU, 0xF8000001) \
+ /* Load Floating-Point Double Pair */ \
+ V(lfdp, LFDP, 0xE4000000) \
+ /* Store Floating-Point Double Pair */ \
+ V(stfdp, STFDP, 0xF4000000) \
+ /* Store Quadword */ \
V(stq, STQ, 0xF8000002)
-#define PPC_DQ_OPCODE_LIST(V) \
- V(lsq, LSQ, 0xE0000000)
-
-#define PPC_D_OPCODE_LIST(V) \
- /* Trap Doubleword Immediate */ \
- V(tdi, TDI, 0x08000000) \
- /* Add Immediate */ \
- V(addi, ADDI, 0x38000000) \
- /* Add Immediate Carrying */ \
- V(addic, ADDIC, 0x30000000) \
- /* Add Immediate Carrying & record CR0 */ \
- V(addicx, ADDICx, 0x34000000) \
- /* Add Immediate Shifted */ \
- V(addis, ADDIS, 0x3C000000) \
- /* AND Immediate & record CR0 */ \
- V(andix, ANDIx, 0x70000000) \
- /* AND Immediate Shifted & record CR0 */ \
- V(andisx, ANDISx, 0x74000000) \
- /* Compare Immediate */ \
- V(cmpi, CMPI, 0x2C000000) \
- /* Compare Logical Immediate */ \
- V(cmpli, CMPLI, 0x28000000) \
- /* Load Byte and Zero */ \
- V(lbz, LBZ, 0x88000000) \
- /* Load Byte and Zero with Update */ \
- V(lbzu, LBZU, 0x8C000000) \
- /* Load Halfword Algebraic */ \
- V(lha, LHA, 0xA8000000) \
- /* Load Halfword Algebraic with Update */ \
- V(lhau, LHAU, 0xAC000000) \
- /* Load Halfword and Zero */ \
- V(lhz, LHZ, 0xA0000000) \
- /* Load Halfword and Zero with Update */ \
- V(lhzu, LHZU, 0xA4000000) \
- /* Load Multiple Word */ \
- V(lmw, LMW, 0xB8000000) \
- /* Load Word and Zero */ \
- V(lwz, LWZ, 0x80000000) \
- /* Load Word and Zero with Update */ \
- V(lwzu, LWZU, 0x84000000) \
- /* Multiply Low Immediate */ \
- V(mulli, MULLI, 0x1C000000) \
- /* OR Immediate */ \
- V(ori, ORI, 0x60000000) \
- /* OR Immediate Shifted */ \
- V(oris, ORIS, 0x64000000) \
- /* Store Byte */ \
- V(stb, STB, 0x98000000) \
- /* Store Byte with Update */ \
- V(stbu, STBU, 0x9C000000) \
- /* Store Halfword */ \
- V(sth, STH, 0xB0000000) \
- /* Store Halfword with Update */ \
- V(sthu, STHU, 0xB4000000) \
- /* Store Multiple Word */ \
- V(stmw, STMW, 0xBC000000) \
- /* Store Word */ \
- V(stw, STW, 0x90000000) \
- /* Store Word with Update */ \
- V(stwu, STWU, 0x94000000) \
- /* Subtract From Immediate Carrying */ \
- V(subfic, SUBFIC, 0x20000000) \
- /* Trap Word Immediate */ \
- V(twi, TWI, 0x0C000000) \
- /* XOR Immediate */ \
- V(xori, XORI, 0x68000000) \
- /* XOR Immediate Shifted */ \
- V(xoris, XORIS, 0x6C000000) \
- /* Load Floating-Point Double */ \
- V(lfd, LFD, 0xC8000000) \
- /* Load Floating-Point Double with Update */ \
- V(lfdu, LFDU, 0xCC000000) \
- /* Load Floating-Point Single */ \
- V(lfs, LFS, 0xC0000000) \
- /* Load Floating-Point Single with Update */ \
- V(lfsu, LFSU, 0xC4000000) \
- /* Store Floating-Point Double */ \
- V(stfd, STFD, 0xD8000000) \
- /* Store Floating-Point Double with Update */ \
- V(stfdu, STFDU, 0xDC000000) \
- /* Store Floating-Point Single */ \
- V(stfs, STFS, 0xD0000000) \
- /* Store Floating-Point Single with Update */ \
+#define PPC_DQ_OPCODE_LIST(V) V(lsq, LSQ, 0xE0000000)
+
+#define PPC_D_OPCODE_LIST(V) \
+ /* Trap Doubleword Immediate */ \
+ V(tdi, TDI, 0x08000000) \
+ /* Add Immediate */ \
+ V(addi, ADDI, 0x38000000) \
+ /* Add Immediate Carrying */ \
+ V(addic, ADDIC, 0x30000000) \
+ /* Add Immediate Carrying & record CR0 */ \
+ V(addicx, ADDICx, 0x34000000) \
+ /* Add Immediate Shifted */ \
+ V(addis, ADDIS, 0x3C000000) \
+ /* AND Immediate & record CR0 */ \
+ V(andix, ANDIx, 0x70000000) \
+ /* AND Immediate Shifted & record CR0 */ \
+ V(andisx, ANDISx, 0x74000000) \
+ /* Compare Immediate */ \
+ V(cmpi, CMPI, 0x2C000000) \
+ /* Compare Logical Immediate */ \
+ V(cmpli, CMPLI, 0x28000000) \
+ /* Load Byte and Zero */ \
+ V(lbz, LBZ, 0x88000000) \
+ /* Load Byte and Zero with Update */ \
+ V(lbzu, LBZU, 0x8C000000) \
+ /* Load Halfword Algebraic */ \
+ V(lha, LHA, 0xA8000000) \
+ /* Load Halfword Algebraic with Update */ \
+ V(lhau, LHAU, 0xAC000000) \
+ /* Load Halfword and Zero */ \
+ V(lhz, LHZ, 0xA0000000) \
+ /* Load Halfword and Zero with Update */ \
+ V(lhzu, LHZU, 0xA4000000) \
+ /* Load Multiple Word */ \
+ V(lmw, LMW, 0xB8000000) \
+ /* Load Word and Zero */ \
+ V(lwz, LWZ, 0x80000000) \
+ /* Load Word and Zero with Update */ \
+ V(lwzu, LWZU, 0x84000000) \
+ /* Multiply Low Immediate */ \
+ V(mulli, MULLI, 0x1C000000) \
+ /* OR Immediate */ \
+ V(ori, ORI, 0x60000000) \
+ /* OR Immediate Shifted */ \
+ V(oris, ORIS, 0x64000000) \
+ /* Store Byte */ \
+ V(stb, STB, 0x98000000) \
+ /* Store Byte with Update */ \
+ V(stbu, STBU, 0x9C000000) \
+ /* Store Halfword */ \
+ V(sth, STH, 0xB0000000) \
+ /* Store Halfword with Update */ \
+ V(sthu, STHU, 0xB4000000) \
+ /* Store Multiple Word */ \
+ V(stmw, STMW, 0xBC000000) \
+ /* Store Word */ \
+ V(stw, STW, 0x90000000) \
+ /* Store Word with Update */ \
+ V(stwu, STWU, 0x94000000) \
+ /* Subtract From Immediate Carrying */ \
+ V(subfic, SUBFIC, 0x20000000) \
+ /* Trap Word Immediate */ \
+ V(twi, TWI, 0x0C000000) \
+ /* XOR Immediate */ \
+ V(xori, XORI, 0x68000000) \
+ /* XOR Immediate Shifted */ \
+ V(xoris, XORIS, 0x6C000000) \
+ /* Load Floating-Point Double */ \
+ V(lfd, LFD, 0xC8000000) \
+ /* Load Floating-Point Double with Update */ \
+ V(lfdu, LFDU, 0xCC000000) \
+ /* Load Floating-Point Single */ \
+ V(lfs, LFS, 0xC0000000) \
+ /* Load Floating-Point Single with Update */ \
+ V(lfsu, LFSU, 0xC4000000) \
+ /* Store Floating-Point Double */ \
+ V(stfd, STFD, 0xD8000000) \
+ /* Store Floating-Point Double with Update */ \
+ V(stfdu, STFDU, 0xDC000000) \
+ /* Store Floating-Point Single */ \
+ V(stfs, STFS, 0xD0000000) \
+ /* Store Floating-Point Single with Update */ \
V(stfsu, STFSU, 0xD4000000)
-#define PPC_XFL_OPCODE_LIST(V) \
- /* Move To FPSCR Fields */ \
+#define PPC_XFL_OPCODE_LIST(V) \
+ /* Move To FPSCR Fields */ \
V(mtfsf, MTFSF, 0xFC00058E)
-#define PPC_XFX_OPCODE_LIST(V) \
- /* Move From Condition Register */ \
- V(mfcr, MFCR, 0x7C000026) \
- /* Move From One Condition Register Field */ \
- V(mfocrf, MFOCRF, 0x7C100026) \
- /* Move From Special Purpose Register */ \
- V(mfspr, MFSPR, 0x7C0002A6) \
- /* Move To Condition Register Fields */ \
- V(mtcrf, MTCRF, 0x7C000120) \
- /* Move To One Condition Register Field */ \
- V(mtocrf, MTOCRF, 0x7C100120) \
- /* Move To Special Purpose Register */ \
- V(mtspr, MTSPR, 0x7C0003A6) \
- /* Debugger Notify Halt */ \
- V(dnh, DNH, 0x4C00018C) \
- /* Move From Device Control Register */ \
- V(mfdcr, MFDCR, 0x7C000286) \
- /* Move To Device Control Register */ \
- V(mtdcr, MTDCR, 0x7C000386) \
- /* Move from Performance Monitor Register */ \
- V(mfpmr, MFPMR, 0x7C00029C) \
- /* Move To Performance Monitor Register */ \
- V(mtpmr, MTPMR, 0x7C00039C) \
- /* Move From Branch History Rolling Buffer */ \
- V(mfbhrbe, MFBHRBE, 0x7C00025C) \
- /* Move From Time Base */ \
+#define PPC_XFX_OPCODE_LIST(V) \
+ /* Move From Condition Register */ \
+ V(mfcr, MFCR, 0x7C000026) \
+ /* Move From One Condition Register Field */ \
+ V(mfocrf, MFOCRF, 0x7C100026) \
+ /* Move From Special Purpose Register */ \
+ V(mfspr, MFSPR, 0x7C0002A6) \
+ /* Move To Condition Register Fields */ \
+ V(mtcrf, MTCRF, 0x7C000120) \
+ /* Move To One Condition Register Field */ \
+ V(mtocrf, MTOCRF, 0x7C100120) \
+ /* Move To Special Purpose Register */ \
+ V(mtspr, MTSPR, 0x7C0003A6) \
+ /* Debugger Notify Halt */ \
+ V(dnh, DNH, 0x4C00018C) \
+ /* Move From Device Control Register */ \
+ V(mfdcr, MFDCR, 0x7C000286) \
+ /* Move To Device Control Register */ \
+ V(mtdcr, MTDCR, 0x7C000386) \
+ /* Move from Performance Monitor Register */ \
+ V(mfpmr, MFPMR, 0x7C00029C) \
+ /* Move To Performance Monitor Register */ \
+ V(mtpmr, MTPMR, 0x7C00039C) \
+ /* Move From Branch History Rolling Buffer */ \
+ V(mfbhrbe, MFBHRBE, 0x7C00025C) \
+ /* Move From Time Base */ \
V(mftb, MFTB, 0x7C0002E6)
-#define PPC_MDS_OPCODE_LIST(V) \
- /* Rotate Left Doubleword then Clear Left */ \
- V(rldcl, RLDCL, 0x78000010) \
- /* Rotate Left Doubleword then Clear Right */ \
+#define PPC_MDS_OPCODE_LIST(V) \
+ /* Rotate Left Doubleword then Clear Left */ \
+ V(rldcl, RLDCL, 0x78000010) \
+ /* Rotate Left Doubleword then Clear Right */ \
V(rldcr, RLDCR, 0x78000012)
-#define PPC_A_OPCODE_LIST(V) \
- /* Integer Select */ \
- V(isel, ISEL, 0x7C00001E) \
- /* Floating Add */ \
- V(fadd, FADD, 0xFC00002A) \
- /* Floating Add Single */ \
- V(fadds, FADDS, 0xEC00002A) \
- /* Floating Divide */ \
- V(fdiv, FDIV, 0xFC000024) \
- /* Floating Divide Single */ \
- V(fdivs, FDIVS, 0xEC000024) \
- /* Floating Multiply-Add */ \
- V(fmadd, FMADD, 0xFC00003A) \
- /* Floating Multiply-Add Single */ \
- V(fmadds, FMADDS, 0xEC00003A) \
- /* Floating Multiply-Subtract */ \
- V(fmsub, FMSUB, 0xFC000038) \
- /* Floating Multiply-Subtract Single */ \
- V(fmsubs, FMSUBS, 0xEC000038) \
- /* Floating Multiply */ \
- V(fmul, FMUL, 0xFC000032) \
- /* Floating Multiply Single */ \
- V(fmuls, FMULS, 0xEC000032) \
- /* Floating Negative Multiply-Add */ \
- V(fnmadd, FNMADD, 0xFC00003E) \
- /* Floating Negative Multiply-Add Single */ \
- V(fnmadds, FNMADDS, 0xEC00003E) \
- /* Floating Negative Multiply-Subtract */ \
- V(fnmsub, FNMSUB, 0xFC00003C) \
- /* Floating Negative Multiply-Subtract Single */ \
- V(fnmsubs, FNMSUBS, 0xEC00003C) \
- /* Floating Reciprocal Estimate Single */ \
- V(fres, FRES, 0xEC000030) \
- /* Floating Reciprocal Square Root Estimate */ \
- V(frsqrte, FRSQRTE, 0xFC000034) \
- /* Floating Select */ \
- V(fsel, FSEL, 0xFC00002E) \
- /* Floating Square Root */ \
- V(fsqrt, FSQRT, 0xFC00002C) \
- /* Floating Square Root Single */ \
- V(fsqrts, FSQRTS, 0xEC00002C) \
- /* Floating Subtract */ \
- V(fsub, FSUB, 0xFC000028) \
- /* Floating Subtract Single */ \
- V(fsubs, FSUBS, 0xEC000028) \
- /* Floating Reciprocal Estimate */ \
- V(fre, FRE, 0xFC000030) \
- /* Floating Reciprocal Square Root Estimate Single */ \
+#define PPC_A_OPCODE_LIST(V) \
+ /* Integer Select */ \
+ V(isel, ISEL, 0x7C00001E) \
+ /* Floating Add */ \
+ V(fadd, FADD, 0xFC00002A) \
+ /* Floating Add Single */ \
+ V(fadds, FADDS, 0xEC00002A) \
+ /* Floating Divide */ \
+ V(fdiv, FDIV, 0xFC000024) \
+ /* Floating Divide Single */ \
+ V(fdivs, FDIVS, 0xEC000024) \
+ /* Floating Multiply-Add */ \
+ V(fmadd, FMADD, 0xFC00003A) \
+ /* Floating Multiply-Add Single */ \
+ V(fmadds, FMADDS, 0xEC00003A) \
+ /* Floating Multiply-Subtract */ \
+ V(fmsub, FMSUB, 0xFC000038) \
+ /* Floating Multiply-Subtract Single */ \
+ V(fmsubs, FMSUBS, 0xEC000038) \
+ /* Floating Multiply */ \
+ V(fmul, FMUL, 0xFC000032) \
+ /* Floating Multiply Single */ \
+ V(fmuls, FMULS, 0xEC000032) \
+ /* Floating Negative Multiply-Add */ \
+ V(fnmadd, FNMADD, 0xFC00003E) \
+ /* Floating Negative Multiply-Add Single */ \
+ V(fnmadds, FNMADDS, 0xEC00003E) \
+ /* Floating Negative Multiply-Subtract */ \
+ V(fnmsub, FNMSUB, 0xFC00003C) \
+ /* Floating Negative Multiply-Subtract Single */ \
+ V(fnmsubs, FNMSUBS, 0xEC00003C) \
+ /* Floating Reciprocal Estimate Single */ \
+ V(fres, FRES, 0xEC000030) \
+ /* Floating Reciprocal Square Root Estimate */ \
+ V(frsqrte, FRSQRTE, 0xFC000034) \
+ /* Floating Select */ \
+ V(fsel, FSEL, 0xFC00002E) \
+ /* Floating Square Root */ \
+ V(fsqrt, FSQRT, 0xFC00002C) \
+ /* Floating Square Root Single */ \
+ V(fsqrts, FSQRTS, 0xEC00002C) \
+ /* Floating Subtract */ \
+ V(fsub, FSUB, 0xFC000028) \
+ /* Floating Subtract Single */ \
+ V(fsubs, FSUBS, 0xEC000028) \
+ /* Floating Reciprocal Estimate */ \
+ V(fre, FRE, 0xFC000030) \
+ /* Floating Reciprocal Square Root Estimate Single */ \
V(frsqrtes, FRSQRTES, 0xEC000034)
-#define PPC_VA_OPCODE_LIST(V) \
- /* Vector Add Extended & write Carry Unsigned Quadword */ \
- V(vaddecuq, VADDECUQ, 0x1000003D) \
- /* Vector Add Extended Unsigned Quadword Modulo */ \
- V(vaddeuqm, VADDEUQM, 0x1000003C) \
- /* Vector Multiply-Add Single-Precision */ \
- V(vmaddfp, VMADDFP, 0x1000002E) \
- /* Vector Multiply-High-Add Signed Halfword Saturate */ \
- V(vmhaddshs, VMHADDSHS, 0x10000020) \
- /* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
- V(vmhraddshs, VMHRADDSHS, 0x10000021) \
- /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
- V(vmladduhm, VMLADDUHM, 0x10000022) \
- /* Vector Multiply-Sum Mixed Byte Modulo */ \
- V(vmsummbm, VMSUMMBM, 0x10000025) \
- /* Vector Multiply-Sum Signed Halfword Modulo */ \
- V(vmsumshm, VMSUMSHM, 0x10000028) \
- /* Vector Multiply-Sum Signed Halfword Saturate */ \
- V(vmsumshs, VMSUMSHS, 0x10000029) \
- /* Vector Multiply-Sum Unsigned Byte Modulo */ \
- V(vmsumubm, VMSUMUBM, 0x10000024) \
- /* Vector Multiply-Sum Unsigned Halfword Modulo */ \
- V(vmsumuhm, VMSUMUHM, 0x10000026) \
- /* Vector Multiply-Sum Unsigned Halfword Saturate */ \
- V(vmsumuhs, VMSUMUHS, 0x10000027) \
- /* Vector Negative Multiply-Subtract Single-Precision */ \
- V(vnmsubfp, VNMSUBFP, 0x1000002F) \
- /* Vector Permute */ \
- V(vperm, VPERM, 0x1000002B) \
- /* Vector Select */ \
- V(vsel, VSEL, 0x1000002A) \
- /* Vector Shift Left Double by Octet Immediate */ \
- V(vsldoi, VSLDOI, 0x1000002C) \
- /* Vector Subtract Extended & write Carry Unsigned Quadword */ \
- V(vsubecuq, VSUBECUQ, 0x1000003F) \
- /* Vector Subtract Extended Unsigned Quadword Modulo */ \
- V(vsubeuqm, VSUBEUQM, 0x1000003E) \
- /* Vector Permute and Exclusive-OR */ \
+#define PPC_VA_OPCODE_LIST(V) \
+ /* Vector Add Extended & write Carry Unsigned Quadword */ \
+ V(vaddecuq, VADDECUQ, 0x1000003D) \
+ /* Vector Add Extended Unsigned Quadword Modulo */ \
+ V(vaddeuqm, VADDEUQM, 0x1000003C) \
+ /* Vector Multiply-Add Single-Precision */ \
+ V(vmaddfp, VMADDFP, 0x1000002E) \
+ /* Vector Multiply-High-Add Signed Halfword Saturate */ \
+ V(vmhaddshs, VMHADDSHS, 0x10000020) \
+ /* Vector Multiply-High-Round-Add Signed Halfword Saturate */ \
+ V(vmhraddshs, VMHRADDSHS, 0x10000021) \
+ /* Vector Multiply-Low-Add Unsigned Halfword Modulo */ \
+ V(vmladduhm, VMLADDUHM, 0x10000022) \
+ /* Vector Multiply-Sum Mixed Byte Modulo */ \
+ V(vmsummbm, VMSUMMBM, 0x10000025) \
+ /* Vector Multiply-Sum Signed Halfword Modulo */ \
+ V(vmsumshm, VMSUMSHM, 0x10000028) \
+ /* Vector Multiply-Sum Signed Halfword Saturate */ \
+ V(vmsumshs, VMSUMSHS, 0x10000029) \
+ /* Vector Multiply-Sum Unsigned Byte Modulo */ \
+ V(vmsumubm, VMSUMUBM, 0x10000024) \
+ /* Vector Multiply-Sum Unsigned Halfword Modulo */ \
+ V(vmsumuhm, VMSUMUHM, 0x10000026) \
+ /* Vector Multiply-Sum Unsigned Halfword Saturate */ \
+ V(vmsumuhs, VMSUMUHS, 0x10000027) \
+ /* Vector Negative Multiply-Subtract Single-Precision */ \
+ V(vnmsubfp, VNMSUBFP, 0x1000002F) \
+ /* Vector Permute */ \
+ V(vperm, VPERM, 0x1000002B) \
+ /* Vector Select */ \
+ V(vsel, VSEL, 0x1000002A) \
+ /* Vector Shift Left Double by Octet Immediate */ \
+ V(vsldoi, VSLDOI, 0x1000002C) \
+ /* Vector Subtract Extended & write Carry Unsigned Quadword */ \
+ V(vsubecuq, VSUBECUQ, 0x1000003F) \
+ /* Vector Subtract Extended Unsigned Quadword Modulo */ \
+ V(vsubeuqm, VSUBEUQM, 0x1000003E) \
+ /* Vector Permute and Exclusive-OR */ \
V(vpermxor, VPERMXOR, 0x1000002D)
-#define PPC_XX1_OPCODE_LIST(V) \
- /* Load VSR Scalar Doubleword Indexed */ \
- V(lxsdx, LXSDX, 0x7C000498) \
- /* Load VSX Scalar as Integer Word Algebraic Indexed */ \
- V(lxsiwax, LXSIWAX, 0x7C000098) \
- /* Load VSX Scalar as Integer Word and Zero Indexed */ \
- V(lxsiwzx, LXSIWZX, 0x7C000018) \
- /* Load VSX Scalar Single-Precision Indexed */ \
- V(lxsspx, LXSSPX, 0x7C000418) \
- /* Load VSR Vector Doubleword*2 Indexed */ \
- V(lxvd, LXVD, 0x7C000698) \
- /* Load VSR Vector Doubleword & Splat Indexed */ \
- V(lxvdsx, LXVDSX, 0x7C000298) \
- /* Load VSR Vector Word*4 Indexed */ \
- V(lxvw, LXVW, 0x7C000618) \
- /* Move From VSR Doubleword */ \
- V(mfvsrd, MFVSRD, 0x7C000066) \
- /* Move From VSR Word and Zero */ \
- V(mfvsrwz, MFVSRWZ, 0x7C0000E6) \
- /* Store VSR Scalar Doubleword Indexed */ \
- V(stxsdx, STXSDX, 0x7C000598) \
- /* Store VSX Scalar as Integer Word Indexed */ \
- V(stxsiwx, STXSIWX, 0x7C000118) \
- /* Store VSR Scalar Word Indexed */ \
- V(stxsspx, STXSSPX, 0x7C000518) \
- /* Store VSR Vector Doubleword*2 Indexed */ \
- V(stxvd, STXVD, 0x7C000798) \
- /* Store VSR Vector Word*4 Indexed */ \
+#define PPC_XX1_OPCODE_LIST(V) \
+ /* Load VSR Scalar Doubleword Indexed */ \
+ V(lxsdx, LXSDX, 0x7C000498) \
+ /* Load VSX Scalar as Integer Word Algebraic Indexed */ \
+ V(lxsiwax, LXSIWAX, 0x7C000098) \
+ /* Load VSX Scalar as Integer Word and Zero Indexed */ \
+ V(lxsiwzx, LXSIWZX, 0x7C000018) \
+ /* Load VSX Scalar Single-Precision Indexed */ \
+ V(lxsspx, LXSSPX, 0x7C000418) \
+ /* Load VSR Vector Doubleword*2 Indexed */ \
+ V(lxvd, LXVD, 0x7C000698) \
+ /* Load VSR Vector Doubleword & Splat Indexed */ \
+ V(lxvdsx, LXVDSX, 0x7C000298) \
+ /* Load VSR Vector Word*4 Indexed */ \
+ V(lxvw, LXVW, 0x7C000618) \
+ /* Move From VSR Doubleword */ \
+ V(mfvsrd, MFVSRD, 0x7C000066) \
+ /* Move From VSR Word and Zero */ \
+ V(mfvsrwz, MFVSRWZ, 0x7C0000E6) \
+ /* Store VSR Scalar Doubleword Indexed */ \
+ V(stxsdx, STXSDX, 0x7C000598) \
+ /* Store VSX Scalar as Integer Word Indexed */ \
+ V(stxsiwx, STXSIWX, 0x7C000118) \
+ /* Store VSR Scalar Word Indexed */ \
+ V(stxsspx, STXSSPX, 0x7C000518) \
+ /* Store VSR Vector Doubleword*2 Indexed */ \
+ V(stxvd, STXVD, 0x7C000798) \
+ /* Store VSR Vector Word*4 Indexed */ \
V(stxvw, STXVW, 0x7C000718)
-#define PPC_B_OPCODE_LIST(V) \
- /* Branch Conditional */ \
+#define PPC_B_OPCODE_LIST(V) \
+ /* Branch Conditional */ \
V(bc, BCX, 0x40000000)
-#define PPC_XO_OPCODE_LIST(V) \
- /* Divide Doubleword */ \
- V(divd, DIVD, 0x7C0003D2) \
- /* Divide Doubleword Extended */ \
- V(divde, DIVDE, 0x7C000352) \
- /* Divide Doubleword Extended & record OV */ \
- V(divdeo, DIVDEO, 0x7C000752) \
- /* Divide Doubleword Extended Unsigned */ \
- V(divdeu, DIVDEU, 0x7C000312) \
- /* Divide Doubleword Extended Unsigned & record OV */ \
- V(divdeuo, DIVDEUO, 0x7C000712) \
- /* Divide Doubleword & record OV */ \
- V(divdo, DIVDO, 0x7C0007D2) \
- /* Divide Doubleword Unsigned */ \
- V(divdu, DIVDU, 0x7C000392) \
- /* Divide Doubleword Unsigned & record OV */ \
- V(divduo, DIVDUO, 0x7C000792) \
- /* Multiply High Doubleword */ \
- V(mulhd, MULHD, 0x7C000092) \
- /* Multiply High Doubleword Unsigned */ \
- V(mulhdu, MULHDU, 0x7C000012) \
- /* Multiply Low Doubleword */ \
- V(mulld, MULLD, 0x7C0001D2) \
- /* Multiply Low Doubleword & record OV */ \
- V(mulldo, MULLDO, 0x7C0005D2) \
- /* Add */ \
- V(add, ADDX, 0x7C000214) \
- /* Add Carrying */ \
- V(addc, ADDCX, 0x7C000014) \
- /* Add Carrying & record OV */ \
- V(addco, ADDCO, 0x7C000414) \
- /* Add Extended */ \
- V(adde, ADDEX, 0x7C000114) \
- /* Add Extended & record OV & record OV */ \
- V(addeo, ADDEO, 0x7C000514) \
- /* Add to Minus One Extended */ \
- V(addme, ADDME, 0x7C0001D4) \
- /* Add to Minus One Extended & record OV */ \
- V(addmeo, ADDMEO, 0x7C0005D4) \
- /* Add & record OV */ \
- V(addo, ADDO, 0x7C000614) \
- /* Add to Zero Extended */ \
- V(addze, ADDZEX, 0x7C000194) \
- /* Add to Zero Extended & record OV */ \
- V(addzeo, ADDZEO, 0x7C000594) \
- /* Divide Word Format */ \
- V(divw, DIVW, 0x7C0003D6) \
- /* Divide Word Extended */ \
- V(divwe, DIVWE, 0x7C000356) \
- /* Divide Word Extended & record OV */ \
- V(divweo, DIVWEO, 0x7C000756) \
- /* Divide Word Extended Unsigned */ \
- V(divweu, DIVWEU, 0x7C000316) \
- /* Divide Word Extended Unsigned & record OV */ \
- V(divweuo, DIVWEUO, 0x7C000716) \
- /* Divide Word & record OV */ \
- V(divwo, DIVWO, 0x7C0007D6) \
- /* Divide Word Unsigned */ \
- V(divwu, DIVWU, 0x7C000396) \
- /* Divide Word Unsigned & record OV */ \
- V(divwuo, DIVWUO, 0x7C000796) \
- /* Multiply High Word */ \
- V(mulhw, MULHWX, 0x7C000096) \
- /* Multiply High Word Unsigned */ \
- V(mulhwu, MULHWUX, 0x7C000016) \
- /* Multiply Low Word */ \
- V(mullw, MULLW, 0x7C0001D6) \
- /* Multiply Low Word & record OV */ \
- V(mullwo, MULLWO, 0x7C0005D6) \
- /* Negate */ \
- V(neg, NEGX, 0x7C0000D0) \
- /* Negate & record OV */ \
- V(nego, NEGO, 0x7C0004D0) \
- /* Subtract From */ \
- V(subf, SUBFX, 0x7C000050) \
- /* Subtract From Carrying */ \
- V(subfc, SUBFCX, 0x7C000010) \
- /* Subtract From Carrying & record OV */ \
- V(subfco, SUBFCO, 0x7C000410) \
- /* Subtract From Extended */ \
- V(subfe, SUBFEX, 0x7C000110) \
- /* Subtract From Extended & record OV */ \
- V(subfeo, SUBFEO, 0x7C000510) \
- /* Subtract From Minus One Extended */ \
- V(subfme, SUBFME, 0x7C0001D0) \
- /* Subtract From Minus One Extended & record OV */ \
- V(subfmeo, SUBFMEO, 0x7C0005D0) \
- /* Subtract From & record OV */ \
- V(subfo, SUBFO, 0x7C000450) \
- /* Subtract From Zero Extended */ \
- V(subfze, SUBFZE, 0x7C000190) \
- /* Subtract From Zero Extended & record OV */ \
- V(subfzeo, SUBFZEO, 0x7C000590) \
- /* Add and Generate Sixes */ \
- V(addg, ADDG, 0x7C000094) \
- /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
- V(macchw, MACCHW, 0x10000158) \
- /* Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
- V(macchws, MACCHWS, 0x100001D8) \
- /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */ \
- V(macchwsu, MACCHWSU, 0x10000198) \
- /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */ \
- V(macchwu, MACCHWU, 0x10000118) \
- /* Multiply Accumulate High Halfword to Word Modulo Signed */ \
- V(machhw, MACHHW, 0x10000058) \
- /* Multiply Accumulate High Halfword to Word Saturate Signed */ \
- V(machhws, MACHHWS, 0x100000D8) \
- /* Multiply Accumulate High Halfword to Word Saturate Unsigned */ \
- V(machhwsu, MACHHWSU, 0x10000098) \
- /* Multiply Accumulate High Halfword to Word Modulo Unsigned */ \
- V(machhwu, MACHHWU, 0x10000018) \
- /* Multiply Accumulate Low Halfword to Word Modulo Signed */ \
- V(maclhw, MACLHW, 0x10000358) \
- /* Multiply Accumulate Low Halfword to Word Saturate Signed */ \
- V(maclhws, MACLHWS, 0x100003D8) \
- /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */ \
- V(maclhwsu, MACLHWSU, 0x10000398) \
- /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */ \
- V(maclhwu, MACLHWU, 0x10000318) \
- /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
- V(nmacchw, NMACCHW, 0x1000015C) \
- /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
- V(nmacchws, NMACCHWS, 0x100001DC) \
- /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */ \
- V(nmachhw, NMACHHW, 0x1000005C) \
- /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */ \
- V(nmachhws, NMACHHWS, 0x100000DC) \
- /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */ \
- V(nmaclhw, NMACLHW, 0x1000035C) \
- /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */ \
- V(nmaclhws, NMACLHWS, 0x100003DC) \
-
-#define PPC_XL_OPCODE_LIST(V) \
- /* Branch Conditional to Count Register */ \
- V(bcctr, BCCTRX, 0x4C000420) \
- /* Branch Conditional to Link Register */ \
- V(bclr, BCLRX, 0x4C000020) \
- /* Condition Register AND */ \
- V(crand, CRAND, 0x4C000202) \
- /* Condition Register AND with Complement */ \
- V(crandc, CRANDC, 0x4C000102) \
- /* Condition Register Equivalent */ \
- V(creqv, CREQV, 0x4C000242) \
- /* Condition Register NAND */ \
- V(crnand, CRNAND, 0x4C0001C2) \
- /* Condition Register NOR */ \
- V(crnor, CRNOR, 0x4C000042) \
- /* Condition Register OR */ \
- V(cror, CROR, 0x4C000382) \
- /* Condition Register OR with Complement */ \
- V(crorc, CRORC, 0x4C000342) \
- /* Condition Register XOR */ \
- V(crxor, CRXOR, 0x4C000182) \
- /* Instruction Synchronize */ \
- V(isync, ISYNC, 0x4C00012C) \
- /* Move Condition Register Field */ \
- V(mcrf, MCRF, 0x4C000000) \
- /* Return From Critical Interrupt */ \
- V(rfci, RFCI, 0x4C000066) \
- /* Return From Interrupt */ \
- V(rfi, RFI, 0x4C000064) \
- /* Return From Machine Check Interrupt */ \
- V(rfmci, RFMCI, 0x4C00004C) \
- /* Embedded Hypervisor Privilege */ \
- V(ehpriv, EHPRIV, 0x7C00021C) \
- /* Return From Guest Interrupt */ \
- V(rfgi, RFGI, 0x4C0000CC) \
- /* Doze */ \
- V(doze, DOZE, 0x4C000324) \
- /* Return From Interrupt Doubleword Hypervisor */ \
- V(hrfid, HRFID, 0x4C000224) \
- /* Nap */ \
- V(nap, NAP, 0x4C000364) \
- /* Return from Event Based Branch */ \
- V(rfebb, RFEBB, 0x4C000124) \
- /* Return from Interrupt Doubleword */ \
- V(rfid, RFID, 0x4C000024) \
- /* Rip Van Winkle */ \
- V(rvwinkle, RVWINKLE, 0x4C0003E4) \
- /* Sleep */ \
+#define PPC_XO_OPCODE_LIST(V) \
+ /* Divide Doubleword */ \
+ V(divd, DIVD, 0x7C0003D2) \
+ /* Divide Doubleword Extended */ \
+ V(divde, DIVDE, 0x7C000352) \
+ /* Divide Doubleword Extended & record OV */ \
+ V(divdeo, DIVDEO, 0x7C000752) \
+ /* Divide Doubleword Extended Unsigned */ \
+ V(divdeu, DIVDEU, 0x7C000312) \
+ /* Divide Doubleword Extended Unsigned & record OV */ \
+ V(divdeuo, DIVDEUO, 0x7C000712) \
+ /* Divide Doubleword & record OV */ \
+ V(divdo, DIVDO, 0x7C0007D2) \
+ /* Divide Doubleword Unsigned */ \
+ V(divdu, DIVDU, 0x7C000392) \
+ /* Divide Doubleword Unsigned & record OV */ \
+ V(divduo, DIVDUO, 0x7C000792) \
+ /* Multiply High Doubleword */ \
+ V(mulhd, MULHD, 0x7C000092) \
+ /* Multiply High Doubleword Unsigned */ \
+ V(mulhdu, MULHDU, 0x7C000012) \
+ /* Multiply Low Doubleword */ \
+ V(mulld, MULLD, 0x7C0001D2) \
+ /* Multiply Low Doubleword & record OV */ \
+ V(mulldo, MULLDO, 0x7C0005D2) \
+ /* Add */ \
+ V(add, ADDX, 0x7C000214) \
+ /* Add Carrying */ \
+ V(addc, ADDCX, 0x7C000014) \
+ /* Add Carrying & record OV */ \
+ V(addco, ADDCO, 0x7C000414) \
+ /* Add Extended */ \
+ V(adde, ADDEX, 0x7C000114) \
+ /* Add Extended & record OV & record OV */ \
+ V(addeo, ADDEO, 0x7C000514) \
+ /* Add to Minus One Extended */ \
+ V(addme, ADDME, 0x7C0001D4) \
+ /* Add to Minus One Extended & record OV */ \
+ V(addmeo, ADDMEO, 0x7C0005D4) \
+ /* Add & record OV */ \
+ V(addo, ADDO, 0x7C000614) \
+ /* Add to Zero Extended */ \
+ V(addze, ADDZEX, 0x7C000194) \
+ /* Add to Zero Extended & record OV */ \
+ V(addzeo, ADDZEO, 0x7C000594) \
+ /* Divide Word Format */ \
+ V(divw, DIVW, 0x7C0003D6) \
+ /* Divide Word Extended */ \
+ V(divwe, DIVWE, 0x7C000356) \
+ /* Divide Word Extended & record OV */ \
+ V(divweo, DIVWEO, 0x7C000756) \
+ /* Divide Word Extended Unsigned */ \
+ V(divweu, DIVWEU, 0x7C000316) \
+ /* Divide Word Extended Unsigned & record OV */ \
+ V(divweuo, DIVWEUO, 0x7C000716) \
+ /* Divide Word & record OV */ \
+ V(divwo, DIVWO, 0x7C0007D6) \
+ /* Divide Word Unsigned */ \
+ V(divwu, DIVWU, 0x7C000396) \
+ /* Divide Word Unsigned & record OV */ \
+ V(divwuo, DIVWUO, 0x7C000796) \
+ /* Multiply High Word */ \
+ V(mulhw, MULHWX, 0x7C000096) \
+ /* Multiply High Word Unsigned */ \
+ V(mulhwu, MULHWUX, 0x7C000016) \
+ /* Multiply Low Word */ \
+ V(mullw, MULLW, 0x7C0001D6) \
+ /* Multiply Low Word & record OV */ \
+ V(mullwo, MULLWO, 0x7C0005D6) \
+ /* Negate */ \
+ V(neg, NEGX, 0x7C0000D0) \
+ /* Negate & record OV */ \
+ V(nego, NEGO, 0x7C0004D0) \
+ /* Subtract From */ \
+ V(subf, SUBFX, 0x7C000050) \
+ /* Subtract From Carrying */ \
+ V(subfc, SUBFCX, 0x7C000010) \
+ /* Subtract From Carrying & record OV */ \
+ V(subfco, SUBFCO, 0x7C000410) \
+ /* Subtract From Extended */ \
+ V(subfe, SUBFEX, 0x7C000110) \
+ /* Subtract From Extended & record OV */ \
+ V(subfeo, SUBFEO, 0x7C000510) \
+ /* Subtract From Minus One Extended */ \
+ V(subfme, SUBFME, 0x7C0001D0) \
+ /* Subtract From Minus One Extended & record OV */ \
+ V(subfmeo, SUBFMEO, 0x7C0005D0) \
+ /* Subtract From & record OV */ \
+ V(subfo, SUBFO, 0x7C000450) \
+ /* Subtract From Zero Extended */ \
+ V(subfze, SUBFZE, 0x7C000190) \
+ /* Subtract From Zero Extended & record OV */ \
+ V(subfzeo, SUBFZEO, 0x7C000590) \
+ /* Add and Generate Sixes */ \
+ V(addg, ADDG, 0x7C000094) \
+ /* Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
+ V(macchw, MACCHW, 0x10000158) \
+ /* Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
+ V(macchws, MACCHWS, 0x100001D8) \
+ /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */ \
+ V(macchwsu, MACCHWSU, 0x10000198) \
+ /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */ \
+ V(macchwu, MACCHWU, 0x10000118) \
+ /* Multiply Accumulate High Halfword to Word Modulo Signed */ \
+ V(machhw, MACHHW, 0x10000058) \
+ /* Multiply Accumulate High Halfword to Word Saturate Signed */ \
+ V(machhws, MACHHWS, 0x100000D8) \
+ /* Multiply Accumulate High Halfword to Word Saturate Unsigned */ \
+ V(machhwsu, MACHHWSU, 0x10000098) \
+ /* Multiply Accumulate High Halfword to Word Modulo Unsigned */ \
+ V(machhwu, MACHHWU, 0x10000018) \
+ /* Multiply Accumulate Low Halfword to Word Modulo Signed */ \
+ V(maclhw, MACLHW, 0x10000358) \
+ /* Multiply Accumulate Low Halfword to Word Saturate Signed */ \
+ V(maclhws, MACLHWS, 0x100003D8) \
+ /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */ \
+ V(maclhwsu, MACLHWSU, 0x10000398) \
+ /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */ \
+ V(maclhwu, MACLHWU, 0x10000318) \
+ /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */ \
+ V(nmacchw, NMACCHW, 0x1000015C) \
+ /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */ \
+ V(nmacchws, NMACCHWS, 0x100001DC) \
+ /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */ \
+ V(nmachhw, NMACHHW, 0x1000005C) \
+ /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */ \
+ V(nmachhws, NMACHHWS, 0x100000DC) \
+ /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */ \
+ V(nmaclhw, NMACLHW, 0x1000035C) \
+ /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */ \
+ V(nmaclhws, NMACLHWS, 0x100003DC)
+
+#define PPC_XL_OPCODE_LIST(V) \
+ /* Branch Conditional to Count Register */ \
+ V(bcctr, BCCTRX, 0x4C000420) \
+ /* Branch Conditional to Link Register */ \
+ V(bclr, BCLRX, 0x4C000020) \
+ /* Condition Register AND */ \
+ V(crand, CRAND, 0x4C000202) \
+ /* Condition Register AND with Complement */ \
+ V(crandc, CRANDC, 0x4C000102) \
+ /* Condition Register Equivalent */ \
+ V(creqv, CREQV, 0x4C000242) \
+ /* Condition Register NAND */ \
+ V(crnand, CRNAND, 0x4C0001C2) \
+ /* Condition Register NOR */ \
+ V(crnor, CRNOR, 0x4C000042) \
+ /* Condition Register OR */ \
+ V(cror, CROR, 0x4C000382) \
+ /* Condition Register OR with Complement */ \
+ V(crorc, CRORC, 0x4C000342) \
+ /* Condition Register XOR */ \
+ V(crxor, CRXOR, 0x4C000182) \
+ /* Instruction Synchronize */ \
+ V(isync, ISYNC, 0x4C00012C) \
+ /* Move Condition Register Field */ \
+ V(mcrf, MCRF, 0x4C000000) \
+ /* Return From Critical Interrupt */ \
+ V(rfci, RFCI, 0x4C000066) \
+ /* Return From Interrupt */ \
+ V(rfi, RFI, 0x4C000064) \
+ /* Return From Machine Check Interrupt */ \
+ V(rfmci, RFMCI, 0x4C00004C) \
+ /* Embedded Hypervisor Privilege */ \
+ V(ehpriv, EHPRIV, 0x7C00021C) \
+ /* Return From Guest Interrupt */ \
+ V(rfgi, RFGI, 0x4C0000CC) \
+ /* Doze */ \
+ V(doze, DOZE, 0x4C000324) \
+ /* Return From Interrupt Doubleword Hypervisor */ \
+ V(hrfid, HRFID, 0x4C000224) \
+ /* Nap */ \
+ V(nap, NAP, 0x4C000364) \
+ /* Return from Event Based Branch */ \
+ V(rfebb, RFEBB, 0x4C000124) \
+ /* Return from Interrupt Doubleword */ \
+ V(rfid, RFID, 0x4C000024) \
+ /* Rip Van Winkle */ \
+ V(rvwinkle, RVWINKLE, 0x4C0003E4) \
+ /* Sleep */ \
V(sleep, SLEEP, 0x4C0003A4)
-#define PPC_XX4_OPCODE_LIST(V) \
- /* VSX Select */ \
+#define PPC_XX4_OPCODE_LIST(V) \
+ /* VSX Select */ \
V(xxsel, XXSEL, 0xF0000030)
-#define PPC_I_OPCODE_LIST(V) \
- /* Branch */ \
+#define PPC_I_OPCODE_LIST(V) \
+ /* Branch */ \
V(b, BX, 0x48000000)
-#define PPC_M_OPCODE_LIST(V) \
- /* Rotate Left Word Immediate then Mask Insert */ \
- V(rlwimi, RLWIMIX, 0x50000000) \
- /* Rotate Left Word Immediate then AND with Mask */ \
- V(rlwinm, RLWINMX, 0x54000000) \
- /* Rotate Left Word then AND with Mask */ \
+#define PPC_M_OPCODE_LIST(V) \
+ /* Rotate Left Word Immediate then Mask Insert */ \
+ V(rlwimi, RLWIMIX, 0x50000000) \
+ /* Rotate Left Word Immediate then AND with Mask */ \
+ V(rlwinm, RLWINMX, 0x54000000) \
+ /* Rotate Left Word then AND with Mask */ \
V(rlwnm, RLWNMX, 0x5C000000)
-#define PPC_VX_OPCODE_LIST(V) \
- /* Decimal Add Modulo */ \
- V(bcdadd, BCDADD, 0xF0000400) \
- /* Decimal Subtract Modulo */ \
- V(bcdsub, BCDSUB, 0xF0000440) \
- /* Move From Vector Status and Control Register */ \
- V(mfvscr, MFVSCR, 0x10000604) \
- /* Move To Vector Status and Control Register */ \
- V(mtvscr, MTVSCR, 0x10000644) \
- /* Vector Add & write Carry Unsigned Quadword */ \
- V(vaddcuq, VADDCUQ, 0x10000140) \
- /* Vector Add and Write Carry-Out Unsigned Word */ \
- V(vaddcuw, VADDCUW, 0x10000180) \
- /* Vector Add Single-Precision */ \
- V(vaddfp, VADDFP, 0x1000000A) \
- /* Vector Add Signed Byte Saturate */ \
- V(vaddsbs, VADDSBS, 0x10000300) \
- /* Vector Add Signed Halfword Saturate */ \
- V(vaddshs, VADDSHS, 0x10000340) \
- /* Vector Add Signed Word Saturate */ \
- V(vaddsws, VADDSWS, 0x10000380) \
- /* Vector Add Unsigned Byte Modulo */ \
- V(vaddubm, VADDUBM, 0x10000000) \
- /* Vector Add Unsigned Byte Saturate */ \
- V(vaddubs, VADDUBS, 0x10000200) \
- /* Vector Add Unsigned Doubleword Modulo */ \
- V(vaddudm, VADDUDM, 0x100000C0) \
- /* Vector Add Unsigned Halfword Modulo */ \
- V(vadduhm, VADDUHM, 0x10000040) \
- /* Vector Add Unsigned Halfword Saturate */ \
- V(vadduhs, VADDUHS, 0x10000240) \
- /* Vector Add Unsigned Quadword Modulo */ \
- V(vadduqm, VADDUQM, 0x10000100) \
- /* Vector Add Unsigned Word Modulo */ \
- V(vadduwm, VADDUWM, 0x10000080) \
- /* Vector Add Unsigned Word Saturate */ \
- V(vadduws, VADDUWS, 0x10000280) \
- /* Vector Logical AND */ \
- V(vand, VAND, 0x10000404) \
- /* Vector Logical AND with Complement */ \
- V(vandc, VANDC, 0x10000444) \
- /* Vector Average Signed Byte */ \
- V(vavgsb, VAVGSB, 0x10000502) \
- /* Vector Average Signed Halfword */ \
- V(vavgsh, VAVGSH, 0x10000542) \
- /* Vector Average Signed Word */ \
- V(vavgsw, VAVGSW, 0x10000582) \
- /* Vector Average Unsigned Byte */ \
- V(vavgub, VAVGUB, 0x10000402) \
- /* Vector Average Unsigned Halfword */ \
- V(vavguh, VAVGUH, 0x10000442) \
- /* Vector Average Unsigned Word */ \
- V(vavguw, VAVGUW, 0x10000482) \
- /* Vector Bit Permute Quadword */ \
- V(vbpermq, VBPERMQ, 0x1000054C) \
- /* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \
- V(vcfsx, VCFSX, 0x1000034A) \
- /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \
- V(vcfux, VCFUX, 0x1000030A) \
- /* Vector Count Leading Zeros Byte */ \
- V(vclzb, VCLZB, 0x10000702) \
- /* Vector Count Leading Zeros Doubleword */ \
- V(vclzd, VCLZD, 0x100007C2) \
- /* Vector Count Leading Zeros Halfword */ \
- V(vclzh, VCLZH, 0x10000742) \
- /* Vector Count Leading Zeros Word */ \
- V(vclzw, VCLZW, 0x10000782) \
- /* Vector Convert From Single-Precision To Signed Fixed-Point Word */ \
- /* Saturate */ \
- V(vctsxs, VCTSXS, 0x100003CA) \
- /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */ \
- /* Saturate */ \
- V(vctuxs, VCTUXS, 0x1000038A) \
- /* Vector Equivalence */ \
- V(veqv, VEQV, 0x10000684) \
- /* Vector 2 Raised to the Exponent Estimate Single-Precision */ \
- V(vexptefp, VEXPTEFP, 0x1000018A) \
- /* Vector Gather Bits by Byte by Doubleword */ \
- V(vgbbd, VGBBD, 0x1000050C) \
- /* Vector Log Base 2 Estimate Single-Precision */ \
- V(vlogefp, VLOGEFP, 0x100001CA) \
- /* Vector Maximum Single-Precision */ \
- V(vmaxfp, VMAXFP, 0x1000040A) \
- /* Vector Maximum Signed Byte */ \
- V(vmaxsb, VMAXSB, 0x10000102) \
- /* Vector Maximum Signed Doubleword */ \
- V(vmaxsd, VMAXSD, 0x100001C2) \
- /* Vector Maximum Signed Halfword */ \
- V(vmaxsh, VMAXSH, 0x10000142) \
- /* Vector Maximum Signed Word */ \
- V(vmaxsw, VMAXSW, 0x10000182) \
- /* Vector Maximum Unsigned Byte */ \
- V(vmaxub, VMAXUB, 0x10000002) \
- /* Vector Maximum Unsigned Doubleword */ \
- V(vmaxud, VMAXUD, 0x100000C2) \
- /* Vector Maximum Unsigned Halfword */ \
- V(vmaxuh, VMAXUH, 0x10000042) \
- /* Vector Maximum Unsigned Word */ \
- V(vmaxuw, VMAXUW, 0x10000082) \
- /* Vector Minimum Single-Precision */ \
- V(vminfp, VMINFP, 0x1000044A) \
- /* Vector Minimum Signed Byte */ \
- V(vminsb, VMINSB, 0x10000302) \
- /* Vector Minimum Signed Halfword */ \
- V(vminsh, VMINSH, 0x10000342) \
- /* Vector Minimum Signed Word */ \
- V(vminsw, VMINSW, 0x10000382) \
- /* Vector Minimum Unsigned Byte */ \
- V(vminub, VMINUB, 0x10000202) \
- /* Vector Minimum Unsigned Doubleword */ \
- V(vminud, VMINUD, 0x100002C2) \
- /* Vector Minimum Unsigned Halfword */ \
- V(vminuh, VMINUH, 0x10000242) \
- /* Vector Minimum Unsigned Word */ \
- V(vminuw, VMINUW, 0x10000282) \
- /* Vector Merge High Byte */ \
- V(vmrghb, VMRGHB, 0x1000000C) \
- /* Vector Merge High Halfword */ \
- V(vmrghh, VMRGHH, 0x1000004C) \
- /* Vector Merge High Word */ \
- V(vmrghw, VMRGHW, 0x1000008C) \
- /* Vector Merge Low Byte */ \
- V(vmrglb, VMRGLB, 0x1000010C) \
- /* Vector Merge Low Halfword */ \
- V(vmrglh, VMRGLH, 0x1000014C) \
- /* Vector Merge Low Word */ \
- V(vmrglw, VMRGLW, 0x1000018C) \
- /* Vector Multiply Even Signed Byte */ \
- V(vmulesb, VMULESB, 0x10000308) \
- /* Vector Multiply Even Signed Halfword */ \
- V(vmulesh, VMULESH, 0x10000348) \
- /* Vector Multiply Even Signed Word */ \
- V(vmulesw, VMULESW, 0x10000388) \
- /* Vector Multiply Even Unsigned Byte */ \
- V(vmuleub, VMULEUB, 0x10000208) \
- /* Vector Multiply Even Unsigned Halfword */ \
- V(vmuleuh, VMULEUH, 0x10000248) \
- /* Vector Multiply Even Unsigned Word */ \
- V(vmuleuw, VMULEUW, 0x10000288) \
- /* Vector Multiply Odd Signed Byte */ \
- V(vmulosb, VMULOSB, 0x10000108) \
- /* Vector Multiply Odd Signed Halfword */ \
- V(vmulosh, VMULOSH, 0x10000148) \
- /* Vector Multiply Odd Signed Word */ \
- V(vmulosw, VMULOSW, 0x10000188) \
- /* Vector Multiply Odd Unsigned Byte */ \
- V(vmuloub, VMULOUB, 0x10000008) \
- /* Vector Multiply Odd Unsigned Halfword */ \
- V(vmulouh, VMULOUH, 0x10000048) \
- /* Vector Multiply Odd Unsigned Word */ \
- V(vmulouw, VMULOUW, 0x10000088) \
- /* Vector Multiply Unsigned Word Modulo */ \
- V(vmuluwm, VMULUWM, 0x10000089) \
- /* Vector NAND */ \
- V(vnand, VNAND, 0x10000584) \
- /* Vector Logical NOR */ \
- V(vnor, VNOR, 0x10000504) \
- /* Vector Logical OR */ \
- V(vor, VOR, 0x10000484) \
- /* Vector OR with Complement */ \
- V(vorc, VORC, 0x10000544) \
- /* Vector Pack Pixel */ \
- V(vpkpx, VPKPX, 0x1000030E) \
- /* Vector Pack Signed Doubleword Signed Saturate */ \
- V(vpksdss, VPKSDSS, 0x100005CE) \
- /* Vector Pack Signed Doubleword Unsigned Saturate */ \
- V(vpksdus, VPKSDUS, 0x1000054E) \
- /* Vector Pack Signed Halfword Signed Saturate */ \
- V(vpkshss, VPKSHSS, 0x1000018E) \
- /* Vector Pack Signed Halfword Unsigned Saturate */ \
- V(vpkshus, VPKSHUS, 0x1000010E) \
- /* Vector Pack Signed Word Signed Saturate */ \
- V(vpkswss, VPKSWSS, 0x100001CE) \
- /* Vector Pack Signed Word Unsigned Saturate */ \
- V(vpkswus, VPKSWUS, 0x1000014E) \
- /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
- V(vpkudum, VPKUDUM, 0x1000044E) \
- /* Vector Pack Unsigned Doubleword Unsigned Saturate */ \
- V(vpkudus, VPKUDUS, 0x100004CE) \
- /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
- V(vpkuhum, VPKUHUM, 0x1000000E) \
- /* Vector Pack Unsigned Halfword Unsigned Saturate */ \
- V(vpkuhus, VPKUHUS, 0x1000008E) \
- /* Vector Pack Unsigned Word Unsigned Modulo */ \
- V(vpkuwum, VPKUWUM, 0x1000004E) \
- /* Vector Pack Unsigned Word Unsigned Saturate */ \
- V(vpkuwus, VPKUWUS, 0x100000CE) \
- /* Vector Polynomial Multiply-Sum Byte */ \
- V(vpmsumb, VPMSUMB, 0x10000408) \
- /* Vector Polynomial Multiply-Sum Doubleword */ \
- V(vpmsumd, VPMSUMD, 0x100004C8) \
- /* Vector Polynomial Multiply-Sum Halfword */ \
- V(vpmsumh, VPMSUMH, 0x10000448) \
- /* Vector Polynomial Multiply-Sum Word */ \
- V(vpmsumw, VPMSUMW, 0x10000488) \
- /* Vector Population Count Byte */ \
- V(vpopcntb, VPOPCNTB, 0x10000703) \
- /* Vector Population Count Doubleword */ \
- V(vpopcntd, VPOPCNTD, 0x100007C3) \
- /* Vector Population Count Halfword */ \
- V(vpopcnth, VPOPCNTH, 0x10000743) \
- /* Vector Population Count Word */ \
- V(vpopcntw, VPOPCNTW, 0x10000783) \
- /* Vector Reciprocal Estimate Single-Precision */ \
- V(vrefp, VREFP, 0x1000010A) \
- /* Vector Round to Single-Precision Integer toward -Infinity */ \
- V(vrfim, VRFIM, 0x100002CA) \
- /* Vector Round to Single-Precision Integer Nearest */ \
- V(vrfin, VRFIN, 0x1000020A) \
- /* Vector Round to Single-Precision Integer toward +Infinity */ \
- V(vrfip, VRFIP, 0x1000028A) \
- /* Vector Round to Single-Precision Integer toward Zero */ \
- V(vrfiz, VRFIZ, 0x1000024A) \
- /* Vector Rotate Left Byte */ \
- V(vrlb, VRLB, 0x10000004) \
- /* Vector Rotate Left Doubleword */ \
- V(vrld, VRLD, 0x100000C4) \
- /* Vector Rotate Left Halfword */ \
- V(vrlh, VRLH, 0x10000044) \
- /* Vector Rotate Left Word */ \
- V(vrlw, VRLW, 0x10000084) \
- /* Vector Reciprocal Square Root Estimate Single-Precision */ \
- V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
- /* Vector Shift Left */ \
- V(vsl, VSL, 0x100001C4) \
- /* Vector Shift Left Byte */ \
- V(vslb, VSLB, 0x10000104) \
- /* Vector Shift Left Doubleword */ \
- V(vsld, VSLD, 0x100005C4) \
- /* Vector Shift Left Halfword */ \
- V(vslh, VSLH, 0x10000144) \
- /* Vector Shift Left by Octet */ \
- V(vslo, VSLO, 0x1000040C) \
- /* Vector Shift Left Word */ \
- V(vslw, VSLW, 0x10000184) \
- /* Vector Splat Byte */ \
- V(vspltb, VSPLTB, 0x1000020C) \
- /* Vector Splat Halfword */ \
- V(vsplth, VSPLTH, 0x1000024C) \
- /* Vector Splat Immediate Signed Byte */ \
- V(vspltisb, VSPLTISB, 0x1000030C) \
- /* Vector Splat Immediate Signed Halfword */ \
- V(vspltish, VSPLTISH, 0x1000034C) \
- /* Vector Splat Immediate Signed Word */ \
- V(vspltisw, VSPLTISW, 0x1000038C) \
- /* Vector Splat Word */ \
- V(vspltw, VSPLTW, 0x1000028C) \
- /* Vector Shift Right */ \
- V(vsr, VSR, 0x100002C4) \
- /* Vector Shift Right Algebraic Byte */ \
- V(vsrab, VSRAB, 0x10000304) \
- /* Vector Shift Right Algebraic Doubleword */ \
- V(vsrad, VSRAD, 0x100003C4) \
- /* Vector Shift Right Algebraic Halfword */ \
- V(vsrah, VSRAH, 0x10000344) \
- /* Vector Shift Right Algebraic Word */ \
- V(vsraw, VSRAW, 0x10000384) \
- /* Vector Shift Right Byte */ \
- V(vsrb, VSRB, 0x10000204) \
- /* Vector Shift Right Doubleword */ \
- V(vsrd, VSRD, 0x100006C4) \
- /* Vector Shift Right Halfword */ \
- V(vsrh, VSRH, 0x10000244) \
- /* Vector Shift Right by Octet */ \
- V(vsro, VSRO, 0x1000044C) \
- /* Vector Shift Right Word */ \
- V(vsrw, VSRW, 0x10000284) \
- /* Vector Subtract & write Carry Unsigned Quadword */ \
- V(vsubcuq, VSUBCUQ, 0x10000540) \
- /* Vector Subtract and Write Carry-Out Unsigned Word */ \
- V(vsubcuw, VSUBCUW, 0x10000580) \
- /* Vector Subtract Single-Precision */ \
- V(vsubfp, VSUBFP, 0x1000004A) \
- /* Vector Subtract Signed Byte Saturate */ \
- V(vsubsbs, VSUBSBS, 0x10000700) \
- /* Vector Subtract Signed Halfword Saturate */ \
- V(vsubshs, VSUBSHS, 0x10000740) \
- /* Vector Subtract Signed Word Saturate */ \
- V(vsubsws, VSUBSWS, 0x10000780) \
- /* Vector Subtract Unsigned Byte Modulo */ \
- V(vsububm, VSUBUBM, 0x10000400) \
- /* Vector Subtract Unsigned Byte Saturate */ \
- V(vsububs, VSUBUBS, 0x10000600) \
- /* Vector Subtract Unsigned Doubleword Modulo */ \
- V(vsubudm, VSUBUDM, 0x100004C0) \
- /* Vector Subtract Unsigned Halfword Modulo */ \
- V(vsubuhm, VSUBUHM, 0x10000440) \
- /* Vector Subtract Unsigned Halfword Saturate */ \
- V(vsubuhs, VSUBUHS, 0x10000640) \
- /* Vector Subtract Unsigned Quadword Modulo */ \
- V(vsubuqm, VSUBUQM, 0x10000500) \
- /* Vector Subtract Unsigned Word Modulo */ \
- V(vsubuwm, VSUBUWM, 0x10000480) \
- /* Vector Subtract Unsigned Word Saturate */ \
- V(vsubuws, VSUBUWS, 0x10000680) \
- /* Vector Sum across Half Signed Word Saturate */ \
- V(vsum2sws, VSUM2SWS, 0x10000688) \
- /* Vector Sum across Quarter Signed Byte Saturate */ \
- V(vsum4sbs, VSUM4SBS, 0x10000708) \
- /* Vector Sum across Quarter Signed Halfword Saturate */ \
- V(vsum4shs, VSUM4SHS, 0x10000648) \
- /* Vector Sum across Quarter Unsigned Byte Saturate */ \
- V(vsum4bus, VSUM4BUS, 0x10000608) \
- /* Vector Sum across Signed Word Saturate */ \
- V(vsumsws, VSUMSWS, 0x10000788) \
- /* Vector Unpack High Pixel */ \
- V(vupkhpx, VUPKHPX, 0x1000034E) \
- /* Vector Unpack High Signed Byte */ \
- V(vupkhsb, VUPKHSB, 0x1000020E) \
- /* Vector Unpack High Signed Halfword */ \
- V(vupkhsh, VUPKHSH, 0x1000024E) \
- /* Vector Unpack High Signed Word */ \
- V(vupkhsw, VUPKHSW, 0x1000064E) \
- /* Vector Unpack Low Pixel */ \
- V(vupklpx, VUPKLPX, 0x100003CE) \
- /* Vector Unpack Low Signed Byte */ \
- V(vupklsb, VUPKLSB, 0x1000028E) \
- /* Vector Unpack Low Signed Halfword */ \
- V(vupklsh, VUPKLSH, 0x100002CE) \
- /* Vector Unpack Low Signed Word */ \
- V(vupklsw, VUPKLSW, 0x100006CE) \
- /* Vector Logical XOR */ \
- V(vxor, VXOR, 0x100004C4) \
- /* Vector AES Cipher */ \
- V(vcipher, VCIPHER, 0x10000508) \
- /* Vector AES Cipher Last */ \
- V(vcipherlast, VCIPHERLAST, 0x10000509) \
- /* Vector AES Inverse Cipher */ \
- V(vncipher, VNCIPHER, 0x10000548) \
- /* Vector AES Inverse Cipher Last */ \
- V(vncipherlast, VNCIPHERLAST, 0x10000549) \
- /* Vector AES S-Box */ \
- V(vsbox, VSBOX, 0x100005C8) \
- /* Vector SHA-512 Sigma Doubleword */ \
- V(vshasigmad, VSHASIGMAD, 0x100006C2) \
- /* Vector SHA-256 Sigma Word */ \
- V(vshasigmaw, VSHASIGMAW, 0x10000682) \
- /* Vector Merge Even Word */ \
- V(vmrgew, VMRGEW, 0x1000078C) \
- /* Vector Merge Odd Word */ \
+#define PPC_VX_OPCODE_LIST(V) \
+ /* Decimal Add Modulo */ \
+ V(bcdadd, BCDADD, 0xF0000400) \
+ /* Decimal Subtract Modulo */ \
+ V(bcdsub, BCDSUB, 0xF0000440) \
+ /* Move From Vector Status and Control Register */ \
+ V(mfvscr, MFVSCR, 0x10000604) \
+ /* Move To Vector Status and Control Register */ \
+ V(mtvscr, MTVSCR, 0x10000644) \
+ /* Vector Add & write Carry Unsigned Quadword */ \
+ V(vaddcuq, VADDCUQ, 0x10000140) \
+ /* Vector Add and Write Carry-Out Unsigned Word */ \
+ V(vaddcuw, VADDCUW, 0x10000180) \
+ /* Vector Add Single-Precision */ \
+ V(vaddfp, VADDFP, 0x1000000A) \
+ /* Vector Add Signed Byte Saturate */ \
+ V(vaddsbs, VADDSBS, 0x10000300) \
+ /* Vector Add Signed Halfword Saturate */ \
+ V(vaddshs, VADDSHS, 0x10000340) \
+ /* Vector Add Signed Word Saturate */ \
+ V(vaddsws, VADDSWS, 0x10000380) \
+ /* Vector Add Unsigned Byte Modulo */ \
+ V(vaddubm, VADDUBM, 0x10000000) \
+ /* Vector Add Unsigned Byte Saturate */ \
+ V(vaddubs, VADDUBS, 0x10000200) \
+ /* Vector Add Unsigned Doubleword Modulo */ \
+ V(vaddudm, VADDUDM, 0x100000C0) \
+ /* Vector Add Unsigned Halfword Modulo */ \
+ V(vadduhm, VADDUHM, 0x10000040) \
+ /* Vector Add Unsigned Halfword Saturate */ \
+ V(vadduhs, VADDUHS, 0x10000240) \
+ /* Vector Add Unsigned Quadword Modulo */ \
+ V(vadduqm, VADDUQM, 0x10000100) \
+ /* Vector Add Unsigned Word Modulo */ \
+ V(vadduwm, VADDUWM, 0x10000080) \
+ /* Vector Add Unsigned Word Saturate */ \
+ V(vadduws, VADDUWS, 0x10000280) \
+ /* Vector Logical AND */ \
+ V(vand, VAND, 0x10000404) \
+ /* Vector Logical AND with Complement */ \
+ V(vandc, VANDC, 0x10000444) \
+ /* Vector Average Signed Byte */ \
+ V(vavgsb, VAVGSB, 0x10000502) \
+ /* Vector Average Signed Halfword */ \
+ V(vavgsh, VAVGSH, 0x10000542) \
+ /* Vector Average Signed Word */ \
+ V(vavgsw, VAVGSW, 0x10000582) \
+ /* Vector Average Unsigned Byte */ \
+ V(vavgub, VAVGUB, 0x10000402) \
+ /* Vector Average Unsigned Halfword */ \
+ V(vavguh, VAVGUH, 0x10000442) \
+ /* Vector Average Unsigned Word */ \
+ V(vavguw, VAVGUW, 0x10000482) \
+ /* Vector Bit Permute Quadword */ \
+ V(vbpermq, VBPERMQ, 0x1000054C) \
+ /* Vector Convert From Signed Fixed-Point Word To Single-Precision */ \
+ V(vcfsx, VCFSX, 0x1000034A) \
+ /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */ \
+ V(vcfux, VCFUX, 0x1000030A) \
+ /* Vector Count Leading Zeros Byte */ \
+ V(vclzb, VCLZB, 0x10000702) \
+ /* Vector Count Leading Zeros Doubleword */ \
+ V(vclzd, VCLZD, 0x100007C2) \
+ /* Vector Count Leading Zeros Halfword */ \
+ V(vclzh, VCLZH, 0x10000742) \
+ /* Vector Count Leading Zeros Word */ \
+ V(vclzw, VCLZW, 0x10000782) \
+ /* Vector Convert From Single-Precision To Signed Fixed-Point Word */ \
+ /* Saturate */ \
+ V(vctsxs, VCTSXS, 0x100003CA) \
+ /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */ \
+ /* Saturate */ \
+ V(vctuxs, VCTUXS, 0x1000038A) \
+ /* Vector Equivalence */ \
+ V(veqv, VEQV, 0x10000684) \
+ /* Vector 2 Raised to the Exponent Estimate Single-Precision */ \
+ V(vexptefp, VEXPTEFP, 0x1000018A) \
+ /* Vector Gather Bits by Byte by Doubleword */ \
+ V(vgbbd, VGBBD, 0x1000050C) \
+ /* Vector Log Base 2 Estimate Single-Precision */ \
+ V(vlogefp, VLOGEFP, 0x100001CA) \
+ /* Vector Maximum Single-Precision */ \
+ V(vmaxfp, VMAXFP, 0x1000040A) \
+ /* Vector Maximum Signed Byte */ \
+ V(vmaxsb, VMAXSB, 0x10000102) \
+ /* Vector Maximum Signed Doubleword */ \
+ V(vmaxsd, VMAXSD, 0x100001C2) \
+ /* Vector Maximum Signed Halfword */ \
+ V(vmaxsh, VMAXSH, 0x10000142) \
+ /* Vector Maximum Signed Word */ \
+ V(vmaxsw, VMAXSW, 0x10000182) \
+ /* Vector Maximum Unsigned Byte */ \
+ V(vmaxub, VMAXUB, 0x10000002) \
+ /* Vector Maximum Unsigned Doubleword */ \
+ V(vmaxud, VMAXUD, 0x100000C2) \
+ /* Vector Maximum Unsigned Halfword */ \
+ V(vmaxuh, VMAXUH, 0x10000042) \
+ /* Vector Maximum Unsigned Word */ \
+ V(vmaxuw, VMAXUW, 0x10000082) \
+ /* Vector Minimum Single-Precision */ \
+ V(vminfp, VMINFP, 0x1000044A) \
+ /* Vector Minimum Signed Byte */ \
+ V(vminsb, VMINSB, 0x10000302) \
+ /* Vector Minimum Signed Halfword */ \
+ V(vminsh, VMINSH, 0x10000342) \
+ /* Vector Minimum Signed Word */ \
+ V(vminsw, VMINSW, 0x10000382) \
+ /* Vector Minimum Unsigned Byte */ \
+ V(vminub, VMINUB, 0x10000202) \
+ /* Vector Minimum Unsigned Doubleword */ \
+ V(vminud, VMINUD, 0x100002C2) \
+ /* Vector Minimum Unsigned Halfword */ \
+ V(vminuh, VMINUH, 0x10000242) \
+ /* Vector Minimum Unsigned Word */ \
+ V(vminuw, VMINUW, 0x10000282) \
+ /* Vector Merge High Byte */ \
+ V(vmrghb, VMRGHB, 0x1000000C) \
+ /* Vector Merge High Halfword */ \
+ V(vmrghh, VMRGHH, 0x1000004C) \
+ /* Vector Merge High Word */ \
+ V(vmrghw, VMRGHW, 0x1000008C) \
+ /* Vector Merge Low Byte */ \
+ V(vmrglb, VMRGLB, 0x1000010C) \
+ /* Vector Merge Low Halfword */ \
+ V(vmrglh, VMRGLH, 0x1000014C) \
+ /* Vector Merge Low Word */ \
+ V(vmrglw, VMRGLW, 0x1000018C) \
+ /* Vector Multiply Even Signed Byte */ \
+ V(vmulesb, VMULESB, 0x10000308) \
+ /* Vector Multiply Even Signed Halfword */ \
+ V(vmulesh, VMULESH, 0x10000348) \
+ /* Vector Multiply Even Signed Word */ \
+ V(vmulesw, VMULESW, 0x10000388) \
+ /* Vector Multiply Even Unsigned Byte */ \
+ V(vmuleub, VMULEUB, 0x10000208) \
+ /* Vector Multiply Even Unsigned Halfword */ \
+ V(vmuleuh, VMULEUH, 0x10000248) \
+ /* Vector Multiply Even Unsigned Word */ \
+ V(vmuleuw, VMULEUW, 0x10000288) \
+ /* Vector Multiply Odd Signed Byte */ \
+ V(vmulosb, VMULOSB, 0x10000108) \
+ /* Vector Multiply Odd Signed Halfword */ \
+ V(vmulosh, VMULOSH, 0x10000148) \
+ /* Vector Multiply Odd Signed Word */ \
+ V(vmulosw, VMULOSW, 0x10000188) \
+ /* Vector Multiply Odd Unsigned Byte */ \
+ V(vmuloub, VMULOUB, 0x10000008) \
+ /* Vector Multiply Odd Unsigned Halfword */ \
+ V(vmulouh, VMULOUH, 0x10000048) \
+ /* Vector Multiply Odd Unsigned Word */ \
+ V(vmulouw, VMULOUW, 0x10000088) \
+ /* Vector Multiply Unsigned Word Modulo */ \
+ V(vmuluwm, VMULUWM, 0x10000089) \
+ /* Vector NAND */ \
+ V(vnand, VNAND, 0x10000584) \
+ /* Vector Logical NOR */ \
+ V(vnor, VNOR, 0x10000504) \
+ /* Vector Logical OR */ \
+ V(vor, VOR, 0x10000484) \
+ /* Vector OR with Complement */ \
+ V(vorc, VORC, 0x10000544) \
+ /* Vector Pack Pixel */ \
+ V(vpkpx, VPKPX, 0x1000030E) \
+ /* Vector Pack Signed Doubleword Signed Saturate */ \
+ V(vpksdss, VPKSDSS, 0x100005CE) \
+ /* Vector Pack Signed Doubleword Unsigned Saturate */ \
+ V(vpksdus, VPKSDUS, 0x1000054E) \
+ /* Vector Pack Signed Halfword Signed Saturate */ \
+ V(vpkshss, VPKSHSS, 0x1000018E) \
+ /* Vector Pack Signed Halfword Unsigned Saturate */ \
+ V(vpkshus, VPKSHUS, 0x1000010E) \
+ /* Vector Pack Signed Word Signed Saturate */ \
+ V(vpkswss, VPKSWSS, 0x100001CE) \
+ /* Vector Pack Signed Word Unsigned Saturate */ \
+ V(vpkswus, VPKSWUS, 0x1000014E) \
+ /* Vector Pack Unsigned Doubleword Unsigned Modulo */ \
+ V(vpkudum, VPKUDUM, 0x1000044E) \
+ /* Vector Pack Unsigned Doubleword Unsigned Saturate */ \
+ V(vpkudus, VPKUDUS, 0x100004CE) \
+ /* Vector Pack Unsigned Halfword Unsigned Modulo */ \
+ V(vpkuhum, VPKUHUM, 0x1000000E) \
+ /* Vector Pack Unsigned Halfword Unsigned Saturate */ \
+ V(vpkuhus, VPKUHUS, 0x1000008E) \
+ /* Vector Pack Unsigned Word Unsigned Modulo */ \
+ V(vpkuwum, VPKUWUM, 0x1000004E) \
+ /* Vector Pack Unsigned Word Unsigned Saturate */ \
+ V(vpkuwus, VPKUWUS, 0x100000CE) \
+ /* Vector Polynomial Multiply-Sum Byte */ \
+ V(vpmsumb, VPMSUMB, 0x10000408) \
+ /* Vector Polynomial Multiply-Sum Doubleword */ \
+ V(vpmsumd, VPMSUMD, 0x100004C8) \
+ /* Vector Polynomial Multiply-Sum Halfword */ \
+ V(vpmsumh, VPMSUMH, 0x10000448) \
+ /* Vector Polynomial Multiply-Sum Word */ \
+ V(vpmsumw, VPMSUMW, 0x10000488) \
+ /* Vector Population Count Byte */ \
+ V(vpopcntb, VPOPCNTB, 0x10000703) \
+ /* Vector Population Count Doubleword */ \
+ V(vpopcntd, VPOPCNTD, 0x100007C3) \
+ /* Vector Population Count Halfword */ \
+ V(vpopcnth, VPOPCNTH, 0x10000743) \
+ /* Vector Population Count Word */ \
+ V(vpopcntw, VPOPCNTW, 0x10000783) \
+ /* Vector Reciprocal Estimate Single-Precision */ \
+ V(vrefp, VREFP, 0x1000010A) \
+ /* Vector Round to Single-Precision Integer toward -Infinity */ \
+ V(vrfim, VRFIM, 0x100002CA) \
+ /* Vector Round to Single-Precision Integer Nearest */ \
+ V(vrfin, VRFIN, 0x1000020A) \
+ /* Vector Round to Single-Precision Integer toward +Infinity */ \
+ V(vrfip, VRFIP, 0x1000028A) \
+ /* Vector Round to Single-Precision Integer toward Zero */ \
+ V(vrfiz, VRFIZ, 0x1000024A) \
+ /* Vector Rotate Left Byte */ \
+ V(vrlb, VRLB, 0x10000004) \
+ /* Vector Rotate Left Doubleword */ \
+ V(vrld, VRLD, 0x100000C4) \
+ /* Vector Rotate Left Halfword */ \
+ V(vrlh, VRLH, 0x10000044) \
+ /* Vector Rotate Left Word */ \
+ V(vrlw, VRLW, 0x10000084) \
+ /* Vector Reciprocal Square Root Estimate Single-Precision */ \
+ V(vrsqrtefp, VRSQRTEFP, 0x1000014A) \
+ /* Vector Shift Left */ \
+ V(vsl, VSL, 0x100001C4) \
+ /* Vector Shift Left Byte */ \
+ V(vslb, VSLB, 0x10000104) \
+ /* Vector Shift Left Doubleword */ \
+ V(vsld, VSLD, 0x100005C4) \
+ /* Vector Shift Left Halfword */ \
+ V(vslh, VSLH, 0x10000144) \
+ /* Vector Shift Left by Octet */ \
+ V(vslo, VSLO, 0x1000040C) \
+ /* Vector Shift Left Word */ \
+ V(vslw, VSLW, 0x10000184) \
+ /* Vector Splat Byte */ \
+ V(vspltb, VSPLTB, 0x1000020C) \
+ /* Vector Splat Halfword */ \
+ V(vsplth, VSPLTH, 0x1000024C) \
+ /* Vector Splat Immediate Signed Byte */ \
+ V(vspltisb, VSPLTISB, 0x1000030C) \
+ /* Vector Splat Immediate Signed Halfword */ \
+ V(vspltish, VSPLTISH, 0x1000034C) \
+ /* Vector Splat Immediate Signed Word */ \
+ V(vspltisw, VSPLTISW, 0x1000038C) \
+ /* Vector Splat Word */ \
+ V(vspltw, VSPLTW, 0x1000028C) \
+ /* Vector Shift Right */ \
+ V(vsr, VSR, 0x100002C4) \
+ /* Vector Shift Right Algebraic Byte */ \
+ V(vsrab, VSRAB, 0x10000304) \
+ /* Vector Shift Right Algebraic Doubleword */ \
+ V(vsrad, VSRAD, 0x100003C4) \
+ /* Vector Shift Right Algebraic Halfword */ \
+ V(vsrah, VSRAH, 0x10000344) \
+ /* Vector Shift Right Algebraic Word */ \
+ V(vsraw, VSRAW, 0x10000384) \
+ /* Vector Shift Right Byte */ \
+ V(vsrb, VSRB, 0x10000204) \
+ /* Vector Shift Right Doubleword */ \
+ V(vsrd, VSRD, 0x100006C4) \
+ /* Vector Shift Right Halfword */ \
+ V(vsrh, VSRH, 0x10000244) \
+ /* Vector Shift Right by Octet */ \
+ V(vsro, VSRO, 0x1000044C) \
+ /* Vector Shift Right Word */ \
+ V(vsrw, VSRW, 0x10000284) \
+ /* Vector Subtract & write Carry Unsigned Quadword */ \
+ V(vsubcuq, VSUBCUQ, 0x10000540) \
+ /* Vector Subtract and Write Carry-Out Unsigned Word */ \
+ V(vsubcuw, VSUBCUW, 0x10000580) \
+ /* Vector Subtract Single-Precision */ \
+ V(vsubfp, VSUBFP, 0x1000004A) \
+ /* Vector Subtract Signed Byte Saturate */ \
+ V(vsubsbs, VSUBSBS, 0x10000700) \
+ /* Vector Subtract Signed Halfword Saturate */ \
+ V(vsubshs, VSUBSHS, 0x10000740) \
+ /* Vector Subtract Signed Word Saturate */ \
+ V(vsubsws, VSUBSWS, 0x10000780) \
+ /* Vector Subtract Unsigned Byte Modulo */ \
+ V(vsububm, VSUBUBM, 0x10000400) \
+ /* Vector Subtract Unsigned Byte Saturate */ \
+ V(vsububs, VSUBUBS, 0x10000600) \
+ /* Vector Subtract Unsigned Doubleword Modulo */ \
+ V(vsubudm, VSUBUDM, 0x100004C0) \
+ /* Vector Subtract Unsigned Halfword Modulo */ \
+ V(vsubuhm, VSUBUHM, 0x10000440) \
+ /* Vector Subtract Unsigned Halfword Saturate */ \
+ V(vsubuhs, VSUBUHS, 0x10000640) \
+ /* Vector Subtract Unsigned Quadword Modulo */ \
+ V(vsubuqm, VSUBUQM, 0x10000500) \
+ /* Vector Subtract Unsigned Word Modulo */ \
+ V(vsubuwm, VSUBUWM, 0x10000480) \
+ /* Vector Subtract Unsigned Word Saturate */ \
+ V(vsubuws, VSUBUWS, 0x10000680) \
+ /* Vector Sum across Half Signed Word Saturate */ \
+ V(vsum2sws, VSUM2SWS, 0x10000688) \
+ /* Vector Sum across Quarter Signed Byte Saturate */ \
+ V(vsum4sbs, VSUM4SBS, 0x10000708) \
+ /* Vector Sum across Quarter Signed Halfword Saturate */ \
+ V(vsum4shs, VSUM4SHS, 0x10000648) \
+ /* Vector Sum across Quarter Unsigned Byte Saturate */ \
+ V(vsum4bus, VSUM4BUS, 0x10000608) \
+ /* Vector Sum across Signed Word Saturate */ \
+ V(vsumsws, VSUMSWS, 0x10000788) \
+ /* Vector Unpack High Pixel */ \
+ V(vupkhpx, VUPKHPX, 0x1000034E) \
+ /* Vector Unpack High Signed Byte */ \
+ V(vupkhsb, VUPKHSB, 0x1000020E) \
+ /* Vector Unpack High Signed Halfword */ \
+ V(vupkhsh, VUPKHSH, 0x1000024E) \
+ /* Vector Unpack High Signed Word */ \
+ V(vupkhsw, VUPKHSW, 0x1000064E) \
+ /* Vector Unpack Low Pixel */ \
+ V(vupklpx, VUPKLPX, 0x100003CE) \
+ /* Vector Unpack Low Signed Byte */ \
+ V(vupklsb, VUPKLSB, 0x1000028E) \
+ /* Vector Unpack Low Signed Halfword */ \
+ V(vupklsh, VUPKLSH, 0x100002CE) \
+ /* Vector Unpack Low Signed Word */ \
+ V(vupklsw, VUPKLSW, 0x100006CE) \
+ /* Vector Logical XOR */ \
+ V(vxor, VXOR, 0x100004C4) \
+ /* Vector AES Cipher */ \
+ V(vcipher, VCIPHER, 0x10000508) \
+ /* Vector AES Cipher Last */ \
+ V(vcipherlast, VCIPHERLAST, 0x10000509) \
+ /* Vector AES Inverse Cipher */ \
+ V(vncipher, VNCIPHER, 0x10000548) \
+ /* Vector AES Inverse Cipher Last */ \
+ V(vncipherlast, VNCIPHERLAST, 0x10000549) \
+ /* Vector AES S-Box */ \
+ V(vsbox, VSBOX, 0x100005C8) \
+ /* Vector SHA-512 Sigma Doubleword */ \
+ V(vshasigmad, VSHASIGMAD, 0x100006C2) \
+ /* Vector SHA-256 Sigma Word */ \
+ V(vshasigmaw, VSHASIGMAW, 0x10000682) \
+ /* Vector Merge Even Word */ \
+ V(vmrgew, VMRGEW, 0x1000078C) \
+ /* Vector Merge Odd Word */ \
V(vmrgow, VMRGOW, 0x1000068C)
-#define PPC_XS_OPCODE_LIST(V) \
- /* Shift Right Algebraic Doubleword Immediate */ \
+#define PPC_XS_OPCODE_LIST(V) \
+ /* Shift Right Algebraic Doubleword Immediate */ \
V(sradi, SRADIX, 0x7C000674)
-#define PPC_MD_OPCODE_LIST(V) \
- /* Rotate Left Doubleword Immediate then Clear */ \
- V(rldic, RLDIC, 0x78000008) \
- /* Rotate Left Doubleword Immediate then Clear Left */ \
- V(rldicl, RLDICL, 0x78000000) \
- /* Rotate Left Doubleword Immediate then Clear Right */ \
- V(rldicr, RLDICR, 0x78000004) \
- /* Rotate Left Doubleword Immediate then Mask Insert */ \
+#define PPC_MD_OPCODE_LIST(V) \
+ /* Rotate Left Doubleword Immediate then Clear */ \
+ V(rldic, RLDIC, 0x78000008) \
+ /* Rotate Left Doubleword Immediate then Clear Left */ \
+ V(rldicl, RLDICL, 0x78000000) \
+ /* Rotate Left Doubleword Immediate then Clear Right */ \
+ V(rldicr, RLDICR, 0x78000004) \
+ /* Rotate Left Doubleword Immediate then Mask Insert */ \
V(rldimi, RLDIMI, 0x7800000C)
-#define PPC_SC_OPCODE_LIST(V) \
- /* System Call */ \
+#define PPC_SC_OPCODE_LIST(V) \
+ /* System Call */ \
V(sc, SC, 0x44000002)
#define PPC_OPCODE_LIST(V) \
@@ -2577,16 +2575,16 @@ typedef uint32_t Instr;
PPC_XX4_OPCODE_LIST(V)
enum Opcode : uint32_t {
-#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
+#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value) \
opcode_name = opcode_value,
PPC_OPCODE_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
- EXT1 = 0x4C000000, // Extended code set 1
- EXT2 = 0x7C000000, // Extended code set 2
- EXT3 = 0xEC000000, // Extended code set 3
- EXT4 = 0xFC000000, // Extended code set 4
- EXT5 = 0x78000000, // Extended code set 5 - 64bit only
- EXT6 = 0xF0000000, // Extended code set 6
+ EXT1 = 0x4C000000, // Extended code set 1
+ EXT2 = 0x7C000000, // Extended code set 2
+ EXT3 = 0xEC000000, // Extended code set 3
+ EXT4 = 0xFC000000, // Extended code set 4
+ EXT5 = 0x78000000, // Extended code set 5 - 64bit only
+ EXT6 = 0xF0000000, // Extended code set 6
};
// Instruction encoding bits and masks.
@@ -2742,7 +2740,6 @@ enum CheckForInexactConversion {
// These constants are declared in assembler-arm.cc, as they use named registers
// and other constants.
-
// add(sp, sp, 4) instruction (aka Pop())
extern const Instr kPopInstruction;
@@ -2823,7 +2820,6 @@ class Instruction {
return (instr >> lo) & ((2 << (hi - lo)) - 1);
}
-
// Read a bit field out of the instruction bits.
static inline uint32_t BitField(Instr instr, int hi, int lo) {
return instr & (((2 << (hi - lo)) - 1) << lo);
@@ -2843,8 +2839,7 @@ class Instruction {
return static_cast<Opcode>(BitField(31, 26));
}
-#define OPCODE_CASES(name, opcode_name, opcode_value) \
- case opcode_name:
+#define OPCODE_CASES(name, opcode_name, opcode_value) case opcode_name:
inline Opcode OpcodeBase() const {
uint32_t opcode = OpcodeField();
@@ -2854,7 +2849,7 @@ class Instruction {
PPC_I_OPCODE_LIST(OPCODE_CASES)
PPC_B_OPCODE_LIST(OPCODE_CASES)
PPC_M_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(10, 0);
@@ -2866,12 +2861,12 @@ class Instruction {
opcode = extcode | BitField(9, 0);
switch (opcode) {
PPC_VC_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(10, 1) | BitField(20, 20);
switch (opcode) {
PPC_XFX_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(10, 1);
switch (opcode) {
@@ -2881,69 +2876,69 @@ class Instruction {
PPC_XX1_OPCODE_LIST(OPCODE_CASES)
PPC_XX2_OPCODE_LIST(OPCODE_CASES)
PPC_EVX_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(9, 1);
switch (opcode) {
PPC_XO_OPCODE_LIST(OPCODE_CASES)
PPC_Z22_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(10, 2);
switch (opcode) {
PPC_XS_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(10, 3);
switch (opcode) {
PPC_EVS_OPCODE_LIST(OPCODE_CASES)
PPC_XX3_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(8, 1);
switch (opcode) {
PPC_Z23_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
- }
+ return static_cast<Opcode>(opcode);
+ }
opcode = extcode | BitField(5, 0);
switch (opcode) {
PPC_VA_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(5, 1);
switch (opcode) {
PPC_A_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(4, 1);
switch (opcode) {
PPC_MDS_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(4, 2);
switch (opcode) {
PPC_MD_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(5, 4);
switch (opcode) {
PPC_XX4_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(2, 0);
switch (opcode) {
PPC_DQ_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(1, 0);
switch (opcode) {
PPC_DS_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(1, 1);
switch (opcode) {
PPC_SC_OPCODE_LIST(OPCODE_CASES)
- return static_cast<Opcode>(opcode);
+ return static_cast<Opcode>(opcode);
}
UNIMPLEMENTED();
return static_cast<Opcode>(0);
@@ -2964,13 +2959,11 @@ class Instruction {
return reinterpret_cast<Instruction*>(pc);
}
-
private:
// We need to prevent the creation of instances of class Instruction.
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
-
// Helper functions for converting between register numbers and names.
class Registers {
public:
@@ -2993,4 +2986,4 @@ class DoubleRegisters {
} // namespace internal
} // namespace v8
-#endif // V8_PPC_CONSTANTS_PPC_H_
+#endif // V8_CODEGEN_PPC_CONSTANTS_PPC_H_
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/codegen/ppc/cpu-ppc.cc
index cca8ebaf73..243fa29a46 100644
--- a/deps/v8/src/ppc/cpu-ppc.cc
+++ b/deps/v8/src/codegen/ppc/cpu-ppc.cc
@@ -6,7 +6,7 @@
#if V8_TARGET_ARCH_PPC
-#include "src/cpu-features.h"
+#include "src/codegen/cpu-features.h"
#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
@@ -28,10 +28,10 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) {
const int kCacheLineSize = CpuFeatures::icache_line_size();
intptr_t mask = kCacheLineSize - 1;
- byte *start =
- reinterpret_cast<byte *>(reinterpret_cast<intptr_t>(buffer) & ~mask);
- byte *end = static_cast<byte *>(buffer) + size;
- for (byte *pointer = start; pointer < end; pointer += kCacheLineSize) {
+ byte* start =
+ reinterpret_cast<byte*>(reinterpret_cast<intptr_t>(buffer) & ~mask);
+ byte* end = static_cast<byte*>(buffer) + size;
+ for (byte* pointer = start; pointer < end; pointer += kCacheLineSize) {
__asm__(
"dcbf 0, %0 \n"
"sync \n"
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
index a9f9635bd5..3d378d7a43 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/codegen/ppc/interface-descriptors-ppc.cc
@@ -4,9 +4,9 @@
#if V8_TARGET_ARCH_PPC
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
@@ -74,7 +74,6 @@ const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
@@ -203,7 +202,6 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4, r3};
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index f212e020fd..62f0fde3b8 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -9,25 +9,25 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
-#include "src/bootstrapper.h"
-#include "src/callable.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
-#include "src/external-reference-table.h"
-#include "src/frames-inl.h"
+#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
-#include "src/macro-assembler.h"
-#include "src/register-configuration.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
-#include "src/ppc/macro-assembler-ppc.h"
+#include "src/codegen/ppc/macro-assembler-ppc.h"
#endif
namespace v8 {
@@ -656,27 +656,6 @@ void TurboAssembler::RestoreFrameStateForTailCall() {
mtlr(r0);
}
-// Push and pop all registers that can hold pointers.
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of kNumSafepointRegisters values on the
- // stack, so adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK_GE(num_unsaved, 0);
- if (num_unsaved > 0) {
- subi(sp, sp, Operand(num_unsaved * kPointerSize));
- }
- MultiPush(kSafepointSavedRegisters);
-}
-
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- MultiPop(kSafepointSavedRegisters);
- if (num_unsaved > 0) {
- addi(sp, sp, Operand(num_unsaved * kPointerSize));
- }
-}
-
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
@@ -694,7 +673,6 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
return index;
}
-
void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
// Turn potential sNaN into qNaN.
@@ -1108,7 +1086,6 @@ int TurboAssembler::ActivationFrameAlignment() {
#endif
}
-
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
@@ -1445,20 +1422,18 @@ void MacroAssembler::PushStackHandler() {
StoreP(sp, MemOperand(r3));
}
-
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r4);
- Move(ip, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate()));
+ Move(ip,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
StoreP(r4, MemOperand(ip));
Drop(1); // Drop padding.
}
-
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
const Register temp = type_reg == no_reg ? r0 : type_reg;
@@ -1467,7 +1442,6 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
CompareInstanceType(map, temp, type);
}
-
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
@@ -1497,13 +1471,13 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
// C = A+B; C overflows if A/B have same sign and C has diff sign than A
if (dst == left) {
- mr(scratch, left); // Preserve left.
- add(dst, left, right); // Left is overwritten.
+ mr(scratch, left); // Preserve left.
+ add(dst, left, right); // Left is overwritten.
xor_(overflow_dst, dst, scratch, xorRC); // Original left.
if (!left_is_right) xor_(scratch, dst, right);
} else if (dst == right) {
- mr(scratch, right); // Preserve right.
- add(dst, left, right); // Right is overwritten.
+ mr(scratch, right); // Preserve right.
+ add(dst, left, right); // Right is overwritten.
xor_(overflow_dst, dst, left, xorRC);
if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
} else {
@@ -1585,32 +1559,6 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
ble(on_in_range);
}
-void MacroAssembler::TryDoubleToInt32Exact(Register result,
- DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch) {
- Label done;
- DCHECK(double_input != double_scratch);
-
- ConvertDoubleToInt64(double_input,
-#if !V8_TARGET_ARCH_PPC64
- scratch,
-#endif
- result, double_scratch);
-
-#if V8_TARGET_ARCH_PPC64
- TestIfInt32(result, r0);
-#else
- TestIfInt32(scratch, result, r0);
-#endif
- bne(&done);
-
- // convert back and compare
- fcfid(double_scratch, double_scratch);
- fcmpu(double_scratch, double_input);
- bind(&done);
-}
-
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
@@ -1708,7 +1656,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference::Create(fid));
}
-
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r4, builtin);
@@ -1745,7 +1692,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
}
}
-
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@@ -1760,8 +1706,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
}
-void TurboAssembler::Assert(Condition cond, AbortReason reason,
- CRegister cr) {
+void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
if (emit_debug_code()) Check(cond, reason, cr);
}
@@ -1816,22 +1761,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadP(dst, ContextMemOperand(dst, index));
}
-
-void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
- Label* smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- TestBitRange(src, kSmiTagSize - 1, 0, r0);
- SmiUntag(dst, src);
- beq(smi_case, cr0);
-}
-
-void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- JumpIfSmi(reg1, on_either_smi);
- JumpIfSmi(reg2, on_either_smi);
-}
-
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1840,7 +1769,6 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1875,7 +1803,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
-
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1931,7 +1858,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
}
-
static const int kRegisterPassedArguments = 8;
int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -2078,7 +2004,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
}
}
-
void TurboAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
@@ -2104,7 +2029,6 @@ void TurboAssembler::ResetRoundingMode() {
mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
}
-
////////////////////////////////////////////////////////////////////////////////
//
// New MacroAssembler Interfaces added for PPC
@@ -2241,7 +2165,6 @@ void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
addi(sp, sp, Operand(kDoubleSize));
}
-
#if V8_TARGET_ARCH_PPC64
void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
Register src_hi,
@@ -2382,7 +2305,6 @@ void TurboAssembler::Add(Register dst, Register src, intptr_t value,
}
}
-
void TurboAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
CRegister cr) {
intptr_t value = src2.immediate();
@@ -2416,7 +2338,6 @@ void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
}
}
-
void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
Register scratch, CRegister cr) {
intptr_t value = src2.immediate();
@@ -2428,7 +2349,6 @@ void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
}
}
-
void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
RCBit rc) {
if (rb.is_reg()) {
@@ -2446,7 +2366,6 @@ void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
}
}
-
void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
if (rb.is_reg()) {
orx(ra, rs, rb.rm(), rc);
@@ -2463,7 +2382,6 @@ void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
}
}
-
void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
RCBit rc) {
if (rb.is_reg()) {
@@ -2531,7 +2449,6 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#endif
}
-
// Load a "pointer" sized value from the memory location
void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
Register scratch) {
@@ -2656,7 +2573,6 @@ void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
}
}
-
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
@@ -2672,7 +2588,6 @@ void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
}
}
-
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
@@ -2688,7 +2603,6 @@ void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
}
}
-
void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
Register scratch) {
int offset = mem.offset();
@@ -2702,7 +2616,6 @@ void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
}
}
-
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
@@ -2719,7 +2632,6 @@ void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
}
}
-
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
@@ -2735,7 +2647,6 @@ void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
}
}
-
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
@@ -2751,7 +2662,6 @@ void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
}
}
-
// Variable length depending on whether offset fits into immediate field
// MemOperand current only supports d-form
void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
@@ -2781,7 +2691,7 @@ void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
}
void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem,
- Register scratch) {
+ Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -2794,7 +2704,7 @@ void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem,
}
void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
- Register scratch) {
+ Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
@@ -3035,7 +2945,7 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
ShiftRightArithImm(builtin_pointer, builtin_pointer,
kSmiShift - kSystemPointerSizeLog2);
addi(builtin_pointer, builtin_pointer,
- Operand(IsolateData::builtin_entry_table_offset()));
+ Operand(IsolateData::builtin_entry_table_offset()));
LoadPX(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
Call(builtin_pointer);
}
@@ -3081,7 +2991,8 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
LoadP(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()), r0);
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()),
+ r0);
bind(&out);
} else {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index c010e6a2bd..ae24ef9a55 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -6,14 +6,14 @@
#error This header must be included via macro-assembler.h
#endif
-#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
-#define V8_PPC_MACRO_ASSEMBLER_PPC_H_
+#ifndef V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
+#define V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
-#include "src/bailout-reason.h"
-#include "src/contexts.h"
-#include "src/double.h"
-#include "src/globals.h"
-#include "src/ppc/assembler-ppc.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/codegen/ppc/assembler-ppc.h"
+#include "src/common/globals.h"
+#include "src/numbers/double.h"
+#include "src/objects/contexts.h"
namespace v8 {
namespace internal {
@@ -30,7 +30,6 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
Register reg3 = no_reg,
Register reg4 = no_reg,
@@ -152,7 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadDouble(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadFloat32(DoubleRegister dst, const MemOperand& mem,
- Register scratch = no_reg);
+ Register scratch = no_reg);
void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
// load a literal signed int value <value> to GPR <dst>
@@ -674,11 +673,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
-
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
// At least one slot (for the return address) should be provided.
@@ -817,11 +811,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
- // Try to convert a double to a signed 32-bit integer.
- // CR_EQ in cr7 is set and result assigned if the conversion is exact.
- void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
- Register scratch, DoubleRegister double_scratch);
-
// ---------------------------------------------------------------------------
// Runtime calls
@@ -854,8 +843,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Convenience function: tail call a runtime routine (jump).
void TailCallRuntime(Runtime::FunctionId fid);
-
-
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
@@ -894,24 +881,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
#endif
}
- // Untag the source value into destination and jump if source is a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value, r0);
bne(not_smi_label, cr0);
}
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
void AssertSmi(Register object);
-
-
#if V8_TARGET_ARCH_PPC64
// Ensure it is permissible to read/write int value directly from
// upper half of the smi.
@@ -981,7 +960,6 @@ inline MemOperand ContextMemOperand(Register context, int index = 0) {
return MemOperand(context, Context::SlotOffset(index));
}
-
inline MemOperand NativeContextMemOperand() {
return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
}
@@ -991,4 +969,4 @@ inline MemOperand NativeContextMemOperand() {
} // namespace internal
} // namespace v8
-#endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
+#endif // V8_CODEGEN_PPC_MACRO_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index cb2e0bc78b..63a9fd803c 100644
--- a/deps/v8/src/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_REGISTER_PPC_H_
-#define V8_PPC_REGISTER_PPC_H_
+#ifndef V8_CODEGEN_PPC_REGISTER_PPC_H_
+#define V8_CODEGEN_PPC_REGISTER_PPC_H_
-#include "src/register.h"
-#include "src/reglist.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
namespace v8 {
namespace internal {
@@ -145,8 +145,9 @@ const int kNumSafepointRegisters = 32;
// The following constants describe the stack frame linkage area as
// defined by the ABI. Note that kNumRequiredStackFrameSlots must
// satisfy alignment requirements (rounding up if required).
-#if V8_TARGET_ARCH_PPC64 && (V8_TARGET_LITTLE_ENDIAN || \
- (defined(_CALL_ELF) && _CALL_ELF == 2)) // ELFv2 ABI
+#if V8_TARGET_ARCH_PPC64 && \
+ (V8_TARGET_LITTLE_ENDIAN || \
+ (defined(_CALL_ELF) && _CALL_ELF == 2)) // ELFv2 ABI
// [0] back chain
// [1] condition register save area
// [2] link register save area
@@ -249,10 +250,10 @@ ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
static_assert(sizeof(DoubleRegister) == sizeof(int),
"DoubleRegister can efficiently be passed by value");
-typedef DoubleRegister FloatRegister;
+using FloatRegister = DoubleRegister;
// TODO(ppc) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
+using Simd128Register = DoubleRegister;
#define DEFINE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
@@ -319,4 +320,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = r15;
} // namespace internal
} // namespace v8
-#endif // V8_PPC_REGISTER_PPC_H_
+#endif // V8_CODEGEN_PPC_REGISTER_PPC_H_
diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h
new file mode 100644
index 0000000000..aa668a9158
--- /dev/null
+++ b/deps/v8/src/codegen/register-arch.h
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_REGISTER_ARCH_H_
+#define V8_CODEGEN_REGISTER_ARCH_H_
+
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/codegen/ia32/register-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/codegen/x64/register-x64.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/codegen/arm64/register-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/codegen/arm/register-arm.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/codegen/ppc/register-ppc.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/codegen/mips/register-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/codegen/mips64/register-mips64.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/codegen/s390/register-s390.h"
+#else
+#error Unknown architecture.
+#endif
+
+#endif // V8_CODEGEN_REGISTER_ARCH_H_
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc
index e7f4ada1e4..c8f768e6de 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/codegen/register-configuration.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/register-configuration.h"
+#include "src/codegen/register-configuration.h"
#include "src/base/lazy-instance.h"
-#include "src/cpu-features.h"
-#include "src/globals.h"
-#include "src/register-arch.h"
+#include "src/codegen/cpu-features.h"
+#include "src/codegen/register-arch.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -15,9 +15,9 @@ namespace {
#define REGISTER_COUNT(R) 1 +
static const int kMaxAllocatableGeneralRegisterCount =
- ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT)0;
+ ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
static const int kMaxAllocatableDoubleRegisterCount =
- ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT)0;
+ ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0;
static const int kAllocatableGeneralCodes[] = {
#define REGISTER_CODE(R) kRegCode_##R,
@@ -138,7 +138,7 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
RestrictedRegisterConfiguration(
int num_allocatable_general_registers,
std::unique_ptr<int[]> allocatable_general_register_codes,
- std::unique_ptr<char const* []> allocatable_general_register_names)
+ std::unique_ptr<char const*[]> allocatable_general_register_names)
: RegisterConfiguration(
Register::kNumRegisters, DoubleRegister::kNumRegisters,
num_allocatable_general_registers,
@@ -167,7 +167,7 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
private:
std::unique_ptr<int[]> allocatable_general_register_codes_;
- std::unique_ptr<char const* []> allocatable_general_register_names_;
+ std::unique_ptr<char const*[]> allocatable_general_register_names_;
};
} // namespace
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/codegen/register-configuration.h
index f1c2c6cbc0..0521599734 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/codegen/register-configuration.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGISTER_CONFIGURATION_H_
-#define V8_REGISTER_CONFIGURATION_H_
+#ifndef V8_CODEGEN_REGISTER_CONFIGURATION_H_
+#define V8_CODEGEN_REGISTER_CONFIGURATION_H_
#include "src/base/macros.h"
-#include "src/globals.h"
-#include "src/machine-type.h"
-#include "src/reglist.h"
-#include "src/utils.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/reglist.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -152,4 +152,4 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
} // namespace internal
} // namespace v8
-#endif // V8_REGISTER_CONFIGURATION_H_
+#endif // V8_CODEGEN_REGISTER_CONFIGURATION_H_
diff --git a/deps/v8/src/register.h b/deps/v8/src/codegen/register.h
index f1f803a340..619f4f2890 100644
--- a/deps/v8/src/register.h
+++ b/deps/v8/src/codegen/register.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGISTER_H_
-#define V8_REGISTER_H_
+#ifndef V8_CODEGEN_REGISTER_H_
+#define V8_CODEGEN_REGISTER_H_
-#include "src/reglist.h"
+#include "src/codegen/reglist.h"
namespace v8 {
@@ -123,4 +123,4 @@ inline std::ostream& operator<<(std::ostream& os, RegType reg) {
} // namespace internal
} // namespace v8
-#endif // V8_REGISTER_H_
+#endif // V8_CODEGEN_REGISTER_H_
diff --git a/deps/v8/src/reglist.h b/deps/v8/src/codegen/reglist.h
index 121fd2bea3..609e6b8845 100644
--- a/deps/v8/src/reglist.h
+++ b/deps/v8/src/codegen/reglist.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_REGLIST_H_
-#define V8_REGLIST_H_
+#ifndef V8_CODEGEN_REGLIST_H_
+#define V8_CODEGEN_REGLIST_H_
#include <cstdint>
@@ -15,9 +15,9 @@ namespace internal {
// Register configurations.
#if V8_TARGET_ARCH_ARM64
-typedef uint64_t RegList;
+using RegList = uint64_t;
#else
-typedef uint32_t RegList;
+using RegList = uint32_t;
#endif
// Get the number of registers in a given register list.
@@ -44,4 +44,4 @@ constexpr RegList CombineRegLists(RegLists... lists) {
} // namespace internal
} // namespace v8
-#endif // V8_REGLIST_H_
+#endif // V8_CODEGEN_REGLIST_H_
diff --git a/deps/v8/src/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 8da70da65e..a889a8b9c7 100644
--- a/deps/v8/src/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/reloc-info.h"
+#include "src/codegen/reloc-info.h"
-#include "src/assembler-inl.h"
-#include "src/code-reference.h"
-#include "src/deoptimize-reason.h"
-#include "src/deoptimizer.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/code-reference.h"
+#include "src/deoptimizer/deoptimize-reason.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/code-inl.h"
#include "src/snapshot/snapshot.h"
@@ -149,7 +149,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
static_cast<uint32_t>(rinfo->pc() - reinterpret_cast<Address>(last_pc_));
// The two most common modes are given small tags, and usually fit in a byte.
- if (rmode == RelocInfo::EMBEDDED_OBJECT) {
+ if (rmode == RelocInfo::FULL_EMBEDDED_OBJECT) {
WriteShortTaggedPC(pc_delta, kEmbeddedObjectTag);
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteShortTaggedPC(pc_delta, kCodeTargetTag);
@@ -233,7 +233,7 @@ void RelocIterator::next() {
int tag = AdvanceGetTag();
if (tag == kEmbeddedObjectTag) {
ReadShortTaggedPC();
- if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
+ if (SetMode(RelocInfo::FULL_EMBEDDED_OBJECT)) return;
} else if (tag == kCodeTargetTag) {
ReadShortTaggedPC();
if (SetMode(RelocInfo::CODE_TARGET)) return;
@@ -272,13 +272,13 @@ void RelocIterator::next() {
}
RelocIterator::RelocIterator(Code code, int mode_mask)
- : RelocIterator(code, code->unchecked_relocation_info(), mode_mask) {}
+ : RelocIterator(code, code.unchecked_relocation_info(), mode_mask) {}
RelocIterator::RelocIterator(Code code, ByteArray relocation_info,
int mode_mask)
- : RelocIterator(code, code->raw_instruction_start(), code->constant_pool(),
- relocation_info->GetDataEndAddress(),
- relocation_info->GetDataStartAddress(), mode_mask) {}
+ : RelocIterator(code, code.raw_instruction_start(), code.constant_pool(),
+ relocation_info.GetDataEndAddress(),
+ relocation_info.GetDataStartAddress(), mode_mask) {}
RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
: RelocIterator(Code(), code_reference.instruction_start(),
@@ -289,10 +289,10 @@ RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code code,
int mode_mask)
: RelocIterator(
- code, embedded_data->InstructionStartOfBuiltin(code->builtin_index()),
- code->constant_pool(),
- code->relocation_start() + code->relocation_size(),
- code->relocation_start(), mode_mask) {}
+ code, embedded_data->InstructionStartOfBuiltin(code.builtin_index()),
+ code.constant_pool(),
+ code.relocation_start() + code.relocation_size(),
+ code.relocation_start(), mode_mask) {}
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
: RelocIterator(Code(), reinterpret_cast<Address>(desc.buffer), 0,
@@ -303,9 +303,9 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
RelocIterator::RelocIterator(Vector<byte> instructions,
Vector<const byte> reloc_info, Address const_pool,
int mode_mask)
- : RelocIterator(Code(), reinterpret_cast<Address>(instructions.start()),
- const_pool, reloc_info.start() + reloc_info.size(),
- reloc_info.start(), mode_mask) {}
+ : RelocIterator(Code(), reinterpret_cast<Address>(instructions.begin()),
+ const_pool, reloc_info.begin() + reloc_info.size(),
+ reloc_info.begin(), mode_mask) {}
RelocIterator::RelocIterator(Code host, Address pc, Address constant_pool,
const byte* pos, const byte* end, int mode_mask)
@@ -377,13 +377,14 @@ bool RelocInfo::HasTargetAddressAddress() const {
// non-intel platforms now that wasm code is no longer on the heap.
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
static constexpr int kTargetAddressAddressModeMask =
- ModeMask(CODE_TARGET) | ModeMask(EMBEDDED_OBJECT) |
- ModeMask(EXTERNAL_REFERENCE) | ModeMask(OFF_HEAP_TARGET) |
- ModeMask(RUNTIME_ENTRY) | ModeMask(WASM_CALL) | ModeMask(WASM_STUB_CALL);
+ ModeMask(CODE_TARGET) | ModeMask(FULL_EMBEDDED_OBJECT) |
+ ModeMask(COMPRESSED_EMBEDDED_OBJECT) | ModeMask(EXTERNAL_REFERENCE) |
+ ModeMask(OFF_HEAP_TARGET) | ModeMask(RUNTIME_ENTRY) |
+ ModeMask(WASM_CALL) | ModeMask(WASM_STUB_CALL);
#else
static constexpr int kTargetAddressAddressModeMask =
ModeMask(CODE_TARGET) | ModeMask(RELATIVE_CODE_TARGET) |
- ModeMask(EMBEDDED_OBJECT) | ModeMask(EXTERNAL_REFERENCE) |
+ ModeMask(FULL_EMBEDDED_OBJECT) | ModeMask(EXTERNAL_REFERENCE) |
ModeMask(OFF_HEAP_TARGET) | ModeMask(RUNTIME_ENTRY) | ModeMask(WASM_CALL);
#endif
return (ModeMask(rmode_) & kTargetAddressAddressModeMask) != 0;
@@ -404,8 +405,10 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
case NONE:
return "no reloc";
- case EMBEDDED_OBJECT:
- return "embedded object";
+ case COMPRESSED_EMBEDDED_OBJECT:
+ return "compressed embedded object";
+ case FULL_EMBEDDED_OBJECT:
+ return "full embedded object";
case CODE_TARGET:
return "code target";
case RELATIVE_CODE_TARGET:
@@ -450,8 +453,10 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
} else if (rmode_ == DEOPT_REASON) {
os << " ("
<< DeoptimizeReasonToString(static_cast<DeoptimizeReason>(data_)) << ")";
- } else if (rmode_ == EMBEDDED_OBJECT) {
+ } else if (rmode_ == FULL_EMBEDDED_OBJECT) {
os << " (" << Brief(target_object()) << ")";
+ } else if (rmode_ == COMPRESSED_EMBEDDED_OBJECT) {
+ os << " (" << Brief(target_object()) << " compressed)";
} else if (rmode_ == EXTERNAL_REFERENCE) {
if (isolate) {
ExternalReferenceEncoder ref_encoder(isolate);
@@ -464,10 +469,10 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
} else if (IsCodeTargetMode(rmode_)) {
const Address code_target = target_address();
Code code = Code::GetCodeFromTargetAddress(code_target);
- DCHECK(code->IsCode());
- os << " (" << Code::Kind2String(code->kind());
+ DCHECK(code.IsCode());
+ os << " (" << Code::Kind2String(code.kind());
if (Builtins::IsBuiltin(code)) {
- os << " " << Builtins::name(code->builtin_index());
+ os << " " << Builtins::name(code.builtin_index());
}
os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
@@ -488,7 +493,8 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
#ifdef VERIFY_HEAP
void RelocInfo::Verify(Isolate* isolate) {
switch (rmode_) {
- case EMBEDDED_OBJECT:
+ case COMPRESSED_EMBEDDED_OBJECT:
+ case FULL_EMBEDDED_OBJECT:
Object::VerifyPointer(isolate, target_object());
break;
case CODE_TARGET:
@@ -499,8 +505,8 @@ void RelocInfo::Verify(Isolate* isolate) {
// Check that we can find the right code object.
Code code = Code::GetCodeFromTargetAddress(addr);
Object found = isolate->FindCodeObject(addr);
- CHECK(found->IsCode());
- CHECK(code->address() == HeapObject::cast(found)->address());
+ CHECK(found.IsCode());
+ CHECK(code.address() == HeapObject::cast(found).address());
break;
}
case INTERNAL_REFERENCE:
@@ -508,8 +514,8 @@ void RelocInfo::Verify(Isolate* isolate) {
Address target = target_internal_reference();
Address pc = target_internal_reference_address();
Code code = Code::cast(isolate->FindCodeObject(pc));
- CHECK(target >= code->InstructionStart());
- CHECK(target <= code->InstructionEnd());
+ CHECK(target >= code.InstructionStart());
+ CHECK(target <= code.InstructionEnd());
break;
}
case OFF_HEAP_TARGET: {
@@ -533,7 +539,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
- break;
}
}
#endif // VERIFY_HEAP
diff --git a/deps/v8/src/reloc-info.h b/deps/v8/src/codegen/reloc-info.h
index f5f61046ee..6e72e84f40 100644
--- a/deps/v8/src/reloc-info.h
+++ b/deps/v8/src/codegen/reloc-info.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_RELOC_INFO_H_
-#define V8_RELOC_INFO_H_
+#ifndef V8_CODEGEN_RELOC_INFO_H_
+#define V8_CODEGEN_RELOC_INFO_H_
-#include "src/flush-instruction-cache.h"
-#include "src/globals.h"
+#include "src/codegen/flush-instruction-cache.h"
+#include "src/common/globals.h"
#include "src/objects/code.h"
namespace v8 {
@@ -56,7 +56,8 @@ class RelocInfo {
CODE_TARGET,
RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
- EMBEDDED_OBJECT, // LAST_GCED_ENUM
+ COMPRESSED_EMBEDDED_OBJECT,
+ FULL_EMBEDDED_OBJECT, // LAST_GCED_ENUM
WASM_CALL, // FIRST_SHAREABLE_RELOC_MODE
WASM_STUB_CALL,
@@ -93,7 +94,9 @@ class RelocInfo {
LAST_CODE_TARGET_MODE = RELATIVE_CODE_TARGET,
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
- LAST_GCED_ENUM = EMBEDDED_OBJECT,
+ FIRST_EMBEDDED_OBJECT_RELOC_MODE = COMPRESSED_EMBEDDED_OBJECT,
+ LAST_EMBEDDED_OBJECT_RELOC_MODE = FULL_EMBEDDED_OBJECT,
+ LAST_GCED_ENUM = LAST_EMBEDDED_OBJECT_RELOC_MODE,
FIRST_SHAREABLE_RELOC_MODE = WASM_CALL,
};
@@ -107,7 +110,10 @@ class RelocInfo {
rmode_(rmode),
data_(data),
host_(host),
- constant_pool_(constant_pool) {}
+ constant_pool_(constant_pool) {
+ DCHECK_IMPLIES(!COMPRESS_POINTERS_BOOL,
+ rmode != COMPRESSED_EMBEDDED_OBJECT);
+ }
static constexpr bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
@@ -129,8 +135,15 @@ class RelocInfo {
static constexpr bool IsRelativeCodeTarget(Mode mode) {
return mode == RELATIVE_CODE_TARGET;
}
- static constexpr bool IsEmbeddedObject(Mode mode) {
- return mode == EMBEDDED_OBJECT;
+ static constexpr bool IsFullEmbeddedObject(Mode mode) {
+ return mode == FULL_EMBEDDED_OBJECT;
+ }
+ static constexpr bool IsCompressedEmbeddedObject(Mode mode) {
+ return COMPRESS_POINTERS_BOOL && mode == COMPRESSED_EMBEDDED_OBJECT;
+ }
+ static constexpr bool IsEmbeddedObjectMode(Mode mode) {
+ return IsInRange(mode, FIRST_EMBEDDED_OBJECT_RELOC_MODE,
+ LAST_EMBEDDED_OBJECT_RELOC_MODE);
}
static constexpr bool IsRuntimeEntry(Mode mode) {
return mode == RUNTIME_ENTRY;
@@ -223,7 +236,12 @@ class RelocInfo {
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
V8_INLINE Address target_address();
V8_INLINE HeapObject target_object();
+
+ // In GC operations, we don't have a host_ pointer. Retrieving a target
+ // for COMPRESSED_EMBEDDED_OBJECT mode requires an isolate.
+ V8_INLINE HeapObject target_object_no_host(Isolate* isolate);
V8_INLINE Handle<HeapObject> target_object_handle(Assembler* origin);
+
V8_INLINE void set_target_object(
Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
@@ -280,7 +298,7 @@ class RelocInfo {
template <typename ObjectVisitor>
void Visit(ObjectVisitor* visitor) {
Mode mode = rmode();
- if (IsEmbeddedObject(mode)) {
+ if (IsEmbeddedObjectMode(mode)) {
visitor->VisitEmbeddedPointer(host(), this);
} else if (IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
@@ -311,12 +329,18 @@ class RelocInfo {
static const int kApplyMask; // Modes affected by apply. Depends on arch.
+ static int EmbeddedObjectModeMask() {
+ return ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
+ ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
+ }
+
// In addition to modes covered by the apply mask (which is applied at GC
// time, among others), this covers all modes that are relocated by
// Code::CopyFromNoFlush after code generation.
static int PostCodegenRelocationMask() {
return ModeMask(RelocInfo::CODE_TARGET) |
- ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
+ ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
ModeMask(RelocInfo::RUNTIME_ENTRY) |
ModeMask(RelocInfo::RELATIVE_CODE_TARGET) | kApplyMask;
}
@@ -444,4 +468,4 @@ class V8_EXPORT_PRIVATE RelocIterator : public Malloced {
} // namespace internal
} // namespace v8
-#endif // V8_RELOC_INFO_H_
+#endif // V8_CODEGEN_RELOC_INFO_H_
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/codegen/s390/assembler-s390-inl.h
index d02f73ceeb..5e7b193c8a 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/codegen/s390/assembler-s390-inl.h
@@ -34,14 +34,14 @@
// significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
-#ifndef V8_S390_ASSEMBLER_S390_INL_H_
-#define V8_S390_ASSEMBLER_S390_INL_H_
+#ifndef V8_CODEGEN_S390_ASSEMBLER_S390_INL_H_
+#define V8_CODEGEN_S390_ASSEMBLER_S390_INL_H_
-#include "src/s390/assembler-s390.h"
+#include "src/codegen/s390/assembler-s390.h"
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -112,26 +112,10 @@ Address RelocInfo::target_address_address() {
return pc_;
}
-Address RelocInfo::constant_pool_entry_address() {
- UNREACHABLE();
-}
+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
-Address Assembler::target_address_from_return_address(Address pc) {
- // Returns the address of the call target from the return address that will
- // be returned to after a call.
- // Sequence is:
- // BRASL r14, RI
- return pc - kCallTargetAddressOffset;
-}
-
-Address Assembler::return_address_from_call_start(Address pc) {
- // Sequence is:
- // BRASL r14, RI
- return pc + kCallTargetAddressOffset;
-}
-
Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
SixByteInstr instr =
Instruction::InstructionBits(reinterpret_cast<const byte*>(pc));
@@ -140,15 +124,19 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsRelativeCodeTarget(rmode_) || IsCodeTarget(rmode_) ||
- rmode_ == EMBEDDED_OBJECT);
- if (rmode_ == EMBEDDED_OBJECT) {
+ rmode_ == FULL_EMBEDDED_OBJECT);
+ if (rmode_ == FULL_EMBEDDED_OBJECT) {
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
} else {
@@ -159,8 +147,8 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- Assembler::set_target_address_at(pc_, constant_pool_, target->ptr(),
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == FULL_EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null()) {
WriteBarrierForCode(host(), this, target);
@@ -198,7 +186,7 @@ void RelocInfo::set_target_runtime_entry(Address target,
}
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
@@ -261,7 +249,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- !code.is_null() ? code->constant_pool() : kNullAddress,
+ !code.is_null() ? code.constant_pool() : kNullAddress,
target);
}
@@ -352,4 +340,4 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
} // namespace internal
} // namespace v8
-#endif // V8_S390_ASSEMBLER_S390_INL_H_
+#endif // V8_CODEGEN_S390_ASSEMBLER_S390_INL_H_
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 0a01648fc7..dbfdc9a32a 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -34,7 +34,7 @@
// modified significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
-#include "src/s390/assembler-s390.h"
+#include "src/codegen/s390/assembler-s390.h"
#include <sys/auxv.h>
#include <set>
#include <string>
@@ -47,10 +47,10 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/s390/assembler-s390-inl.h"
-#include "src/string-constants.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/s390/assembler-s390-inl.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
@@ -99,6 +99,9 @@ static bool supportsCPUFeature(const char* feature) {
return features.find(feature) != features.end();
}
+#undef CHECK_AVAILABILITY_FOR
+#undef HWCAP_S390_VX
+
// Check whether Store Facility STFLE instruction is available on the platform.
// Instruction returns a bit vector of the enabled hardware facilities.
static bool supportsSTFLE() {
@@ -210,6 +213,11 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
supportsCPUFeature("vx")) {
supported_ |= (1u << VECTOR_FACILITY);
}
+ // Test for Vector Enhancement Facility 1 - Bit 135
+ if (facilities[2] & (one << (63 - (135 - 128))) &&
+ supportsCPUFeature("vx")) {
+ supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
+ }
// Test for Miscellaneous Instruction Extension Facility - Bit 58
if (facilities[0] & (1lu << (63 - 58))) {
supported_ |= (1u << MISC_INSTR_EXT2);
@@ -225,6 +233,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
USE(performSTFLE); // To avoid assert
USE(supportsCPUFeature);
supported_ |= (1u << VECTOR_FACILITY);
+ supported_ |= (1u << VECTOR_ENHANCE_FACILITY_1);
#endif
supported_ |= (1u << FPU);
}
@@ -288,20 +297,20 @@ Operand::Operand(Handle<HeapObject> handle) {
AllowHandleDereference using_location;
rm_ = no_reg;
value_.immediate = static_cast<intptr_t>(handle.address());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
}
Operand Operand::EmbeddedNumber(double value) {
int32_t smi;
if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(value);
return result;
}
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
- Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(str);
return result;
@@ -468,9 +477,9 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
if (is_branch != nullptr) {
- *is_branch = (opcode == BRC || opcode == BRCT || opcode == BRCTG ||
- opcode == BRCL || opcode == BRASL || opcode == BRXH ||
- opcode == BRXHG);
+ *is_branch =
+ (opcode == BRC || opcode == BRCT || opcode == BRCTG || opcode == BRCL ||
+ opcode == BRASL || opcode == BRXH || opcode == BRXHG);
}
if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode) {
@@ -496,8 +505,8 @@ void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
} else if (BRXHG == opcode) {
// Immediate is in bits 16-31 of 48 bit instruction
int32_t imm16 = target_pos - pos;
- instr &= (0xFFFF0000FFFF); // clear bits 16-31
- imm16 &= 0xFFFF; // clear high halfword
+ instr &= (0xFFFF0000FFFF); // clear bits 16-31
+ imm16 &= 0xFFFF; // clear high halfword
imm16 <<= 16;
// Immediate is in # of halfwords
instr_at_put<SixByteInstr>(pos, instr | (imm16 >> 1));
@@ -511,7 +520,7 @@ int Assembler::max_reach_from(int pos) {
Opcode opcode = Instruction::S390OpcodeValue(buffer_start_ + pos);
// Check which type of instr. In theory, we can return
// the values below + 1, given offset is # of halfwords
- if (BRC == opcode || BRCT == opcode || BRCTG == opcode|| BRXH == opcode ||
+ if (BRC == opcode || BRCT == opcode || BRCTG == opcode || BRXH == opcode ||
BRXHG == opcode) {
return 16;
} else if (LLILF == opcode || BRCL == opcode || LARL == opcode ||
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/codegen/s390/assembler-s390.h
index 109ef53236..e22c037a31 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/codegen/s390/assembler-s390.h
@@ -37,8 +37,8 @@
// A light-weight S390 Assembler
// Generates user mode instructions for z/Architecture
-#ifndef V8_S390_ASSEMBLER_S390_H_
-#define V8_S390_ASSEMBLER_S390_H_
+#ifndef V8_CODEGEN_S390_ASSEMBLER_S390_H_
+#define V8_CODEGEN_S390_ASSEMBLER_S390_H_
#include <stdio.h>
#if V8_HOST_ARCH_S390
// elf.h include is required for auxv check for STFLE facility used
@@ -50,12 +50,12 @@
#include <unistd.h>
#include <vector>
-#include "src/assembler.h"
-#include "src/external-reference.h"
-#include "src/label.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/label.h"
+#include "src/codegen/s390/constants-s390.h"
+#include "src/codegen/s390/register-s390.h"
#include "src/objects/smi.h"
-#include "src/s390/constants-s390.h"
-#include "src/s390/register-s390.h"
#define ABI_USES_FUNCTION_DESCRIPTORS 0
@@ -88,7 +88,7 @@ class SafepointTableBuilder;
// Class Operand represents a shifter operand in data processing instructions
// defining immediate numbers and masks
-class Operand {
+class V8_EXPORT_PRIVATE Operand {
public:
// immediate
V8_INLINE explicit Operand(intptr_t immediate,
@@ -138,7 +138,7 @@ class Operand {
bool is_heap_object_request() const {
DCHECK_IMPLIES(is_heap_object_request_, !rm_.is_valid());
DCHECK_IMPLIES(is_heap_object_request_,
- rmode_ == RelocInfo::EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
@@ -160,14 +160,14 @@ class Operand {
friend class MacroAssembler;
};
-typedef int32_t Disp;
+using Disp = int32_t;
// Class MemOperand represents a memory operand in load and store instructions
// On S390, we have various flavours of memory operands:
// 1) a base register + 16 bit unsigned displacement
// 2) a base register + index register + 16 bit unsigned displacement
// 3) a base register + index register + 20 bit signed displacement
-class MemOperand {
+class V8_EXPORT_PRIVATE MemOperand {
public:
explicit MemOperand(Register rx, Disp offset = 0);
explicit MemOperand(Register rx, Register rb, Disp offset = 0);
@@ -285,14 +285,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- inline static Address target_address_from_return_address(Address pc);
-
- // Given the address of the beginning of a call, return the address
- // in the instruction stream that the call will return to.
- V8_INLINE static Address return_address_from_call_start(Address pc);
-
inline Handle<Object> code_target_object_handle_at(Address pc);
// This sets the branch destination.
// This is for calls and branches within generated code.
@@ -323,14 +315,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kBytesForPtrConstant = 6; // IILF
#endif
- // Distance between the instruction referring to the address of the call
- // target and the return address.
-
- // Offset between call target address and return address
- // for BRASL calls
- // Patch will be appiled to other FIXED_SEQUENCE call
- static constexpr int kCallTargetAddressOffset = 6;
-
// ---------------------------------------------------------------------------
// Code generation
@@ -418,8 +402,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#define DECLARE_S390_RX_INSTRUCTIONS(name, op_name, op_value) \
template <class R1> \
inline void name(R1 r1, Register x2, Register b2, const Operand& d2) { \
- rx_format(op_name, r1.code(), x2.code(), b2.code(), \
- d2.immediate()); \
+ rx_format(op_name, r1.code(), x2.code(), b2.code(), d2.immediate()); \
} \
template <class R1> \
inline void name(R1 r1, const MemOperand& opnd) { \
@@ -430,17 +413,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
inline void rx_format(Opcode opcode, int f1, int f2, int f3, int f4) {
DCHECK(is_uint8(opcode));
DCHECK(is_uint12(f4));
- emit4bytes(getfield<uint32_t, 4, 0, 8>(opcode) |
- getfield<uint32_t, 4, 8, 12>(f1) |
- getfield<uint32_t, 4, 12, 16>(f2) |
- getfield<uint32_t, 4, 16, 20>(f3) |
- getfield<uint32_t, 4, 20, 32>(f4));
+ emit4bytes(
+ getfield<uint32_t, 4, 0, 8>(opcode) | getfield<uint32_t, 4, 8, 12>(f1) |
+ getfield<uint32_t, 4, 12, 16>(f2) | getfield<uint32_t, 4, 16, 20>(f3) |
+ getfield<uint32_t, 4, 20, 32>(f4));
}
S390_RX_A_OPCODE_LIST(DECLARE_S390_RX_INSTRUCTIONS)
void bc(Condition cond, const MemOperand& opnd) {
- bc(cond, opnd.getIndexRegister(),
- opnd.getBaseRegister(), Operand(opnd.getDisplacement()));
+ bc(cond, opnd.getIndexRegister(), opnd.getBaseRegister(),
+ Operand(opnd.getDisplacement()));
}
void bc(Condition cond, Register x2, Register b2, const Operand& d2) {
rx_format(BC, cond, x2.code(), b2.code(), d2.immediate());
@@ -472,90 +454,84 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
S390_RXY_A_OPCODE_LIST(DECLARE_S390_RXY_INSTRUCTIONS)
void pfd(Condition cond, const MemOperand& opnd) {
- pfd(cond, opnd.getIndexRegister(),
- opnd.getBaseRegister(), Operand(opnd.getDisplacement()));
+ pfd(cond, opnd.getIndexRegister(), opnd.getBaseRegister(),
+ Operand(opnd.getDisplacement()));
}
void pfd(Condition cond, Register x2, Register b2, const Operand& d2) {
rxy_format(PFD, cond, x2.code(), b2.code(), d2.immediate());
}
#undef DECLARE_S390_RXY_INSTRUCTIONS
-
-inline void rsy_format(Opcode op, int f1, int f2, int f3, int f4) {
- DCHECK(is_int20(f4));
- DCHECK(is_uint16(op));
- uint64_t code = (getfield<uint64_t, 6, 0, 8>(op >> 8) |
- getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) |
- getfield<uint64_t, 6, 16, 20>(f3) |
- getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
- getfield<uint64_t, 6, 32, 40>(f4 >> 12) |
- getfield<uint64_t, 6, 40, 48>(op & 0xff));
- emit6bytes(code);
-}
-
-#define DECLARE_S390_RSY_A_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Register r3, Register b2, \
- const Operand& d2 = Operand::Zero()) { \
- rsy_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
- } \
- void name(Register r1, Register r3, Operand d2) { \
- name(r1, r3, r0, d2); \
- } \
- void name(Register r1, Register r3, const MemOperand& opnd) { \
- name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+ inline void rsy_format(Opcode op, int f1, int f2, int f3, int f4) {
+ DCHECK(is_int20(f4));
+ DCHECK(is_uint16(op));
+ uint64_t code =
+ (getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 12>(f1) | getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 20>(f3) |
+ getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
+ getfield<uint64_t, 6, 32, 40>(f4 >> 12) |
+ getfield<uint64_t, 6, 40, 48>(op & 0xff));
+ emit6bytes(code);
+ }
+
+#define DECLARE_S390_RSY_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r3, Register b2, \
+ const Operand& d2 = Operand::Zero()) { \
+ rsy_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
+ } \
+ void name(Register r1, Register r3, Operand d2) { name(r1, r3, r0, d2); } \
+ void name(Register r1, Register r3, const MemOperand& opnd) { \
+ name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RSY_A_OPCODE_LIST(DECLARE_S390_RSY_A_INSTRUCTIONS)
#undef DECLARE_S390_RSY_A_INSTRUCTIONS
-#define DECLARE_S390_RSY_B_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
- rsy_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
- } \
- void name(Register r1, Condition m3, const MemOperand& opnd) { \
- name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+#define DECLARE_S390_RSY_B_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
+ rsy_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
+ } \
+ void name(Register r1, Condition m3, const MemOperand& opnd) { \
+ name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RSY_B_OPCODE_LIST(DECLARE_S390_RSY_B_INSTRUCTIONS)
#undef DECLARE_S390_RSY_B_INSTRUCTIONS
+ inline void rs_format(Opcode op, int f1, int f2, int f3, const int f4) {
+ uint32_t code =
+ getfield<uint32_t, 4, 0, 8>(op) | getfield<uint32_t, 4, 8, 12>(f1) |
+ getfield<uint32_t, 4, 12, 16>(f2) | getfield<uint32_t, 4, 16, 20>(f3) |
+ getfield<uint32_t, 4, 20, 32>(f4);
+ emit4bytes(code);
+ }
-inline void rs_format(Opcode op, int f1, int f2, int f3, const int f4) {
- uint32_t code = getfield<uint32_t, 4, 0, 8>(op) |
- getfield<uint32_t, 4, 8, 12>(f1) |
- getfield<uint32_t, 4, 12, 16>(f2) |
- getfield<uint32_t, 4, 16, 20>(f3) |
- getfield<uint32_t, 4, 20, 32>(f4);
- emit4bytes(code);
-}
-
-#define DECLARE_S390_RS_A_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Register r3, Register b2, const Operand& d2) { \
- rs_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
- } \
- void name(Register r1, Register r3, const MemOperand& opnd) { \
- name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+#define DECLARE_S390_RS_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r3, Register b2, const Operand& d2) { \
+ rs_format(op_name, r1.code(), r3.code(), b2.code(), d2.immediate()); \
+ } \
+ void name(Register r1, Register r3, const MemOperand& opnd) { \
+ name(r1, r3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RS_A_OPCODE_LIST(DECLARE_S390_RS_A_INSTRUCTIONS)
#undef DECLARE_S390_RS_A_INSTRUCTIONS
-#define DECLARE_S390_RS_B_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
- rs_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
- } \
- void name(Register r1, Condition m3, const MemOperand& opnd) { \
- name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+#define DECLARE_S390_RS_B_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Condition m3, Register b2, const Operand& d2) { \
+ rs_format(op_name, r1.code(), m3, b2.code(), d2.immediate()); \
+ } \
+ void name(Register r1, Condition m3, const MemOperand& opnd) { \
+ name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_RS_B_OPCODE_LIST(DECLARE_S390_RS_B_INSTRUCTIONS)
#undef DECLARE_S390_RS_B_INSTRUCTIONS
-#define DECLARE_S390_RS_SHIFT_FORMAT(name, opcode) \
- void name(Register r1, Register r2, const Operand& opnd = \
- Operand::Zero()) { \
- DCHECK(r2 != r0); \
- rs_format(opcode, r1.code(), r0.code(), r2.code(), opnd.immediate()); \
- } \
- void name(Register r1, const Operand& opnd) { \
- rs_format(opcode, r1.code(), r0.code(), r0.code(), opnd.immediate()); \
+#define DECLARE_S390_RS_SHIFT_FORMAT(name, opcode) \
+ void name(Register r1, Register r2, const Operand& opnd = Operand::Zero()) { \
+ DCHECK(r2 != r0); \
+ rs_format(opcode, r1.code(), r0.code(), r2.code(), opnd.immediate()); \
+ } \
+ void name(Register r1, const Operand& opnd) { \
+ rs_format(opcode, r1.code(), r0.code(), r0.code(), opnd.immediate()); \
}
DECLARE_S390_RS_SHIFT_FORMAT(sll, SLL)
DECLARE_S390_RS_SHIFT_FORMAT(srl, SRL)
@@ -566,448 +542,408 @@ inline void rs_format(Opcode op, int f1, int f2, int f3, const int f4) {
DECLARE_S390_RS_SHIFT_FORMAT(srdl, SRDL)
#undef DECLARE_S390_RS_SHIFT_FORMAT
-
-inline void rxe_format(Opcode op, int f1, int f2, int f3, int f4, int f5 = 0) {
- DCHECK(is_uint12(f4));
- DCHECK(is_uint16(op));
- uint64_t code = (getfield<uint64_t, 6, 0, 8>(op >> 8) |
- getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) |
- getfield<uint64_t, 6, 16, 20>(f3) |
- getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
- getfield<uint64_t, 6, 32, 36>(f5) |
- getfield<uint64_t, 6, 40, 48>(op & 0xff));
- emit6bytes(code);
-}
-
-#define DECLARE_S390_RXE_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Register x2, Register b2, const Operand& d2, \
- Condition m3 = static_cast<Condition>(0)) { \
- rxe_format(op_name, r1.code(), x2.code(), b2.code(), d2.immediate(), \
- m3); \
- } \
- template<class _R1Type> \
- void name(_R1Type r1, const MemOperand& opnd) { \
- name(Register::from_code(r1.code()), opnd.rx(), opnd.rb(), \
- Operand(opnd.offset())); \
+ inline void rxe_format(Opcode op, int f1, int f2, int f3, int f4,
+ int f5 = 0) {
+ DCHECK(is_uint12(f4));
+ DCHECK(is_uint16(op));
+ uint64_t code =
+ (getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 12>(f1) | getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 20>(f3) |
+ getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
+ getfield<uint64_t, 6, 32, 36>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op & 0xff));
+ emit6bytes(code);
+ }
+
+#define DECLARE_S390_RXE_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register x2, Register b2, const Operand& d2, \
+ Condition m3 = static_cast<Condition>(0)) { \
+ rxe_format(op_name, r1.code(), x2.code(), b2.code(), d2.immediate(), m3); \
+ } \
+ template <class _R1Type> \
+ void name(_R1Type r1, const MemOperand& opnd) { \
+ name(Register::from_code(r1.code()), opnd.rx(), opnd.rb(), \
+ Operand(opnd.offset())); \
}
S390_RXE_OPCODE_LIST(DECLARE_S390_RXE_INSTRUCTIONS)
#undef DECLARE_S390_RXE_INSTRUCTIONS
+ inline void ri_format(Opcode opcode, int f1, int f2) {
+ uint32_t op1 = opcode >> 4;
+ uint32_t op2 = opcode & 0xf;
+ emit4bytes(
+ getfield<uint32_t, 4, 0, 8>(op1) | getfield<uint32_t, 4, 8, 12>(f1) |
+ getfield<uint32_t, 4, 12, 16>(op2) | getfield<uint32_t, 4, 16, 32>(f2));
+ }
-inline void ri_format(Opcode opcode, int f1, int f2) {
- uint32_t op1 = opcode >> 4;
- uint32_t op2 = opcode & 0xf;
- emit4bytes(getfield<uint32_t, 4, 0, 8>(op1) |
- getfield<uint32_t, 4, 8, 12>(f1) |
- getfield<uint32_t, 4, 12, 16>(op2) |
- getfield<uint32_t, 4, 16, 32>(f2));
-}
-
-#define DECLARE_S390_RI_A_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r, const Operand& i2) { \
- DCHECK(is_uint12(op_name)); \
- DCHECK(is_uint16(i2.immediate()) || is_int16(i2.immediate())); \
- ri_format(op_name, r.code(), i2.immediate()); \
+#define DECLARE_S390_RI_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r, const Operand& i2) { \
+ DCHECK(is_uint12(op_name)); \
+ DCHECK(is_uint16(i2.immediate()) || is_int16(i2.immediate())); \
+ ri_format(op_name, r.code(), i2.immediate()); \
}
S390_RI_A_OPCODE_LIST(DECLARE_S390_RI_A_INSTRUCTIONS)
#undef DECLARE_S390_RI_A_INSTRUCTIONS
-#define DECLARE_S390_RI_B_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, const Operand& imm) { \
- /* 2nd argument encodes # of halfwords, so divide by 2. */ \
- int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2; \
- Operand halfwordOp = Operand(numHalfwords); \
- halfwordOp.setBits(16); \
- ri_format(op_name, r1.code(), halfwordOp.immediate()); \
+#define DECLARE_S390_RI_B_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, const Operand& imm) { \
+ /* 2nd argument encodes # of halfwords, so divide by 2. */ \
+ int16_t numHalfwords = static_cast<int16_t>(imm.immediate()) / 2; \
+ Operand halfwordOp = Operand(numHalfwords); \
+ halfwordOp.setBits(16); \
+ ri_format(op_name, r1.code(), halfwordOp.immediate()); \
}
S390_RI_B_OPCODE_LIST(DECLARE_S390_RI_B_INSTRUCTIONS)
#undef DECLARE_S390_RI_B_INSTRUCTIONS
-#define DECLARE_S390_RI_C_INSTRUCTIONS(name, op_name, op_value) \
- void name(Condition m, const Operand& i2) { \
- DCHECK(is_uint12(op_name)); \
- DCHECK(is_uint4(m)); \
- DCHECK(op_name == BRC ? \
- is_int16(i2.immediate()) : is_uint16(i2.immediate())); \
- ri_format(op_name, m, i2.immediate()); \
+#define DECLARE_S390_RI_C_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Condition m, const Operand& i2) { \
+ DCHECK(is_uint12(op_name)); \
+ DCHECK(is_uint4(m)); \
+ DCHECK(op_name == BRC ? is_int16(i2.immediate()) \
+ : is_uint16(i2.immediate())); \
+ ri_format(op_name, m, i2.immediate()); \
}
S390_RI_C_OPCODE_LIST(DECLARE_S390_RI_C_INSTRUCTIONS)
#undef DECLARE_S390_RI_C_INSTRUCTIONS
+ inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
+ uint32_t code =
+ getfield<uint32_t, 4, 0, 16>(op) | getfield<uint32_t, 4, 16, 20>(f1) |
+ getfield<uint32_t, 4, 20, 24>(f2) | getfield<uint32_t, 4, 24, 28>(f3) |
+ getfield<uint32_t, 4, 28, 32>(f4);
+ emit4bytes(code);
+ }
-inline void rrf_format(Opcode op, int f1, int f2, int f3, int f4) {
- uint32_t code = getfield<uint32_t, 4, 0, 16>(op) |
- getfield<uint32_t, 4, 16, 20>(f1) |
- getfield<uint32_t, 4, 20, 24>(f2) |
- getfield<uint32_t, 4, 24, 28>(f3) |
- getfield<uint32_t, 4, 28, 32>(f4);
- emit4bytes(code);
-}
-
-#define DECLARE_S390_RRF_A_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Condition m4, Register r2, Register r3) { \
- rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
- } \
- void name(Register r1, Register r2, Register r3) { \
- name(r1, Condition(0), r2, r3); \
+#define DECLARE_S390_RRF_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Condition m4, Register r2, Register r3) { \
+ rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
+ } \
+ void name(Register r1, Register r2, Register r3) { \
+ name(r1, Condition(0), r2, r3); \
}
S390_RRF_A_OPCODE_LIST(DECLARE_S390_RRF_A_INSTRUCTIONS)
#undef DECLARE_S390_RRF_A_INSTRUCTIONS
-
-#define DECLARE_S390_RRF_B_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Condition m4, Register r2, Register r3) { \
- rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
- } \
- void name(Register r1, Register r2, Register r3) { \
- name(r1, Condition(0), r2, r3); \
+#define DECLARE_S390_RRF_B_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Condition m4, Register r2, Register r3) { \
+ rrf_format(op_name, r3.code(), m4, r1.code(), r2.code()); \
+ } \
+ void name(Register r1, Register r2, Register r3) { \
+ name(r1, Condition(0), r2, r3); \
}
S390_RRF_B_OPCODE_LIST(DECLARE_S390_RRF_B_INSTRUCTIONS)
#undef DECLARE_S390_RRF_B_INSTRUCTIONS
-
-#define DECLARE_S390_RRF_C_INSTRUCTIONS(name, op_name, op_value) \
- template <class R1, class R2> \
- void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
- rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
- } \
- template <class R1, class R2> \
- void name(Condition m3, R1 r1, R2 r2) { \
- name(m3, Condition(0), r1, r2); \
+#define DECLARE_S390_RRF_C_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1, class R2> \
+ void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
+ rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
+ } \
+ template <class R1, class R2> \
+ void name(Condition m3, R1 r1, R2 r2) { \
+ name(m3, Condition(0), r1, r2); \
}
S390_RRF_C_OPCODE_LIST(DECLARE_S390_RRF_C_INSTRUCTIONS)
#undef DECLARE_S390_RRF_C_INSTRUCTIONS
-
-#define DECLARE_S390_RRF_D_INSTRUCTIONS(name, op_name, op_value) \
- template <class R1, class R2> \
- void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
- rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
- } \
- template <class R1, class R2> \
- void name(Condition m3, R1 r1, R2 r2) { \
- name(m3, Condition(0), r1, r2); \
+#define DECLARE_S390_RRF_D_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1, class R2> \
+ void name(Condition m3, Condition m4, R1 r1, R2 r2) { \
+ rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
+ } \
+ template <class R1, class R2> \
+ void name(Condition m3, R1 r1, R2 r2) { \
+ name(m3, Condition(0), r1, r2); \
}
S390_RRF_D_OPCODE_LIST(DECLARE_S390_RRF_D_INSTRUCTIONS)
#undef DECLARE_S390_RRF_D_INSTRUCTIONS
-
-#define DECLARE_S390_RRF_E_INSTRUCTIONS(name, op_name, op_value) \
- template <class M3, class M4, class R1, class R2> \
- void name(M3 m3, M4 m4, R1 r1, R2 r2) { \
- rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
- } \
- template <class M3, class R1, class R2> \
- void name(M3 m3, R1 r1, R2 r2) { \
- name(m3, Condition(0), r1, r2); \
+#define DECLARE_S390_RRF_E_INSTRUCTIONS(name, op_name, op_value) \
+ template <class M3, class M4, class R1, class R2> \
+ void name(M3 m3, M4 m4, R1 r1, R2 r2) { \
+ rrf_format(op_name, m3, m4, r1.code(), r2.code()); \
+ } \
+ template <class M3, class R1, class R2> \
+ void name(M3 m3, R1 r1, R2 r2) { \
+ name(m3, Condition(0), r1, r2); \
}
S390_RRF_E_OPCODE_LIST(DECLARE_S390_RRF_E_INSTRUCTIONS)
#undef DECLARE_S390_RRF_E_INSTRUCTIONS
-enum FIDBRA_FLAGS {
- FIDBRA_CURRENT_ROUNDING_MODE = 0,
- FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0 = 1,
- // ...
- FIDBRA_ROUND_TOWARD_0 = 5,
- FIDBRA_ROUND_TOWARD_POS_INF = 6,
- FIDBRA_ROUND_TOWARD_NEG_INF = 7
-};
-
+ enum FIDBRA_FLAGS {
+ FIDBRA_CURRENT_ROUNDING_MODE = 0,
+ FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0 = 1,
+ // ...
+ FIDBRA_ROUND_TOWARD_0 = 5,
+ FIDBRA_ROUND_TOWARD_POS_INF = 6,
+ FIDBRA_ROUND_TOWARD_NEG_INF = 7
+ };
-inline void rsi_format(Opcode op, int f1, int f2, int f3) {
- DCHECK(is_uint8(op));
- DCHECK(is_uint16(f3) || is_int16(f3));
- uint32_t code = getfield<uint32_t, 4, 0, 8>(op) |
- getfield<uint32_t, 4, 8, 12>(f1) |
- getfield<uint32_t, 4, 12, 16>(f2) |
- getfield<uint32_t, 4, 16, 32>(f3);
- emit4bytes(code);
-}
+ inline void rsi_format(Opcode op, int f1, int f2, int f3) {
+ DCHECK(is_uint8(op));
+ DCHECK(is_uint16(f3) || is_int16(f3));
+ uint32_t code =
+ getfield<uint32_t, 4, 0, 8>(op) | getfield<uint32_t, 4, 8, 12>(f1) |
+ getfield<uint32_t, 4, 12, 16>(f2) | getfield<uint32_t, 4, 16, 32>(f3);
+ emit4bytes(code);
+ }
-#define DECLARE_S390_RSI_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Register r3, const Operand& i2) { \
- rsi_format(op_name, r1.code(), r3.code(), i2.immediate()); \
+#define DECLARE_S390_RSI_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r3, const Operand& i2) { \
+ rsi_format(op_name, r1.code(), r3.code(), i2.immediate()); \
}
S390_RSI_OPCODE_LIST(DECLARE_S390_RSI_INSTRUCTIONS)
#undef DECLARE_S390_RSI_INSTRUCTIONS
+ inline void rsl_format(Opcode op, uint16_t f1, int f2, int f3, int f4,
+ int f5) {
+ DCHECK(is_uint16(op));
+ uint64_t code =
+ getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 16>(f1) | getfield<uint64_t, 6, 16, 20>(f2) |
+ getfield<uint64_t, 6, 20, 32>(f3) | getfield<uint64_t, 6, 32, 36>(f4) |
+ getfield<uint64_t, 6, 36, 40>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
+ emit6bytes(code);
+ }
-inline void rsl_format(Opcode op, uint16_t f1, int f2, int f3, int f4,
- int f5) {
- DCHECK(is_uint16(op));
- uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
- getfield<uint64_t, 6, 8, 16>(f1) |
- getfield<uint64_t, 6, 16, 20>(f2) |
- getfield<uint64_t, 6, 20, 32>(f3) |
- getfield<uint64_t, 6, 32, 36>(f4) |
- getfield<uint64_t, 6, 36, 40>(f5) |
- getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
- emit6bytes(code);
-}
-
-#define DECLARE_S390_RSL_A_INSTRUCTIONS(name, op_name, op_value) \
- void name(const Operand& l1, Register b1, const Operand& d1) { \
- uint16_t L = static_cast<uint16_t>(l1.immediate() << 8); \
- rsl_format(op_name, L, b1.code(), d1.immediate(), 0, 0); \
+#define DECLARE_S390_RSL_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(const Operand& l1, Register b1, const Operand& d1) { \
+ uint16_t L = static_cast<uint16_t>(l1.immediate() << 8); \
+ rsl_format(op_name, L, b1.code(), d1.immediate(), 0, 0); \
}
S390_RSL_A_OPCODE_LIST(DECLARE_S390_RSL_A_INSTRUCTIONS)
#undef DECLARE_S390_RSL_A_INSTRUCTIONS
-#define DECLARE_S390_RSL_B_INSTRUCTIONS(name, op_name, op_value) \
- void name(const Operand& l2, Register b2, const Operand& d2, \
- Register r1, Condition m3) { \
- uint16_t L = static_cast<uint16_t>(l2.immediate()); \
- rsl_format(op_name, L, b2.code(), d2.immediate(), r1.code(), m3); \
+#define DECLARE_S390_RSL_B_INSTRUCTIONS(name, op_name, op_value) \
+ void name(const Operand& l2, Register b2, const Operand& d2, Register r1, \
+ Condition m3) { \
+ uint16_t L = static_cast<uint16_t>(l2.immediate()); \
+ rsl_format(op_name, L, b2.code(), d2.immediate(), r1.code(), m3); \
}
S390_RSL_B_OPCODE_LIST(DECLARE_S390_RSL_B_INSTRUCTIONS)
#undef DECLARE_S390_RSL_B_INSTRUCTIONS
-
-inline void s_format(Opcode op, int f1, int f2) {
- DCHECK_NE(op & 0xff00, 0);
- DCHECK(is_uint12(f2));
- uint32_t code = getfield<uint32_t, 4, 0, 16>(op) |
- getfield<uint32_t, 4, 16, 20>(f1) |
- getfield<uint32_t, 4, 20, 32>(f2);
- emit4bytes(code);
-}
-
-#define DECLARE_S390_S_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register b1, const Operand& d2) { \
- Opcode op = op_name; \
- if ((op & 0xFF00) == 0) { \
- op = (Opcode)(op << 8); \
- } \
- s_format(op, b1.code(), d2.immediate()); \
- } \
- void name(const MemOperand& opnd) { \
- Operand d2 = Operand(opnd.getDisplacement()); \
- name(opnd.getBaseRegister(), d2); \
+ inline void s_format(Opcode op, int f1, int f2) {
+ DCHECK_NE(op & 0xff00, 0);
+ DCHECK(is_uint12(f2));
+ uint32_t code = getfield<uint32_t, 4, 0, 16>(op) |
+ getfield<uint32_t, 4, 16, 20>(f1) |
+ getfield<uint32_t, 4, 20, 32>(f2);
+ emit4bytes(code);
+ }
+
+#define DECLARE_S390_S_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register b1, const Operand& d2) { \
+ Opcode op = op_name; \
+ if ((op & 0xFF00) == 0) { \
+ op = (Opcode)(op << 8); \
+ } \
+ s_format(op, b1.code(), d2.immediate()); \
+ } \
+ void name(const MemOperand& opnd) { \
+ Operand d2 = Operand(opnd.getDisplacement()); \
+ name(opnd.getBaseRegister(), d2); \
}
S390_S_OPCODE_LIST(DECLARE_S390_S_INSTRUCTIONS)
#undef DECLARE_S390_S_INSTRUCTIONS
+ inline void si_format(Opcode op, int f1, int f2, int f3) {
+ uint32_t code =
+ getfield<uint32_t, 4, 0, 8>(op) | getfield<uint32_t, 4, 8, 16>(f1) |
+ getfield<uint32_t, 4, 16, 20>(f2) | getfield<uint32_t, 4, 20, 32>(f3);
+ emit4bytes(code);
+ }
-inline void si_format(Opcode op, int f1, int f2, int f3) {
- uint32_t code = getfield<uint32_t, 4, 0, 8>(op) |
- getfield<uint32_t, 4, 8, 16>(f1) |
- getfield<uint32_t, 4, 16, 20>(f2) |
- getfield<uint32_t, 4, 20, 32>(f3);
- emit4bytes(code);
-}
-
-#define DECLARE_S390_SI_INSTRUCTIONS(name, op_name, op_value) \
- void name(const Operand& i2, Register b1, const Operand& d1) { \
- si_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
- } \
- void name(const MemOperand& opnd, const Operand& i2) { \
- name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+#define DECLARE_S390_SI_INSTRUCTIONS(name, op_name, op_value) \
+ void name(const Operand& i2, Register b1, const Operand& d1) { \
+ si_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
+ } \
+ void name(const MemOperand& opnd, const Operand& i2) { \
+ name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_SI_OPCODE_LIST(DECLARE_S390_SI_INSTRUCTIONS)
#undef DECLARE_S390_SI_INSTRUCTIONS
-
-inline void siy_format(Opcode op, int f1, int f2, int f3) {
- DCHECK(is_uint20(f3) || is_int20(f3));
- DCHECK(is_uint16(op));
- DCHECK(is_uint8(f1) || is_int8(f1));
- uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
- getfield<uint64_t, 6, 8, 16>(f1) |
- getfield<uint64_t, 6, 16, 20>(f2) |
- getfield<uint64_t, 6, 20, 32>(f3) |
- getfield<uint64_t, 6, 32, 40>(f3 >> 12) |
- getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
- emit6bytes(code);
-}
-
-#define DECLARE_S390_SIY_INSTRUCTIONS(name, op_name, op_value) \
- void name(const Operand& i2, Register b1, const Operand& d1) { \
- siy_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
- } \
- void name(const MemOperand& opnd, const Operand& i2) { \
- name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
+ inline void siy_format(Opcode op, int f1, int f2, int f3) {
+ DCHECK(is_uint20(f3) || is_int20(f3));
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint8(f1) || is_int8(f1));
+ uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 16>(f1) |
+ getfield<uint64_t, 6, 16, 20>(f2) |
+ getfield<uint64_t, 6, 20, 32>(f3) |
+ getfield<uint64_t, 6, 32, 40>(f3 >> 12) |
+ getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
+ emit6bytes(code);
+ }
+
+#define DECLARE_S390_SIY_INSTRUCTIONS(name, op_name, op_value) \
+ void name(const Operand& i2, Register b1, const Operand& d1) { \
+ siy_format(op_name, i2.immediate(), b1.code(), d1.immediate()); \
+ } \
+ void name(const MemOperand& opnd, const Operand& i2) { \
+ name(i2, opnd.getBaseRegister(), Operand(opnd.getDisplacement())); \
}
S390_SIY_OPCODE_LIST(DECLARE_S390_SIY_INSTRUCTIONS)
#undef DECLARE_S390_SIY_INSTRUCTIONS
-
-inline void rrs_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
- DCHECK(is_uint12(f4));
- DCHECK(is_uint16(op));
- uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
- getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) |
- getfield<uint64_t, 6, 16, 20>(f3) |
- getfield<uint64_t, 6, 20, 32>(f4) |
- getfield<uint64_t, 6, 32, 36>(f5) |
- getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
- emit6bytes(code);
-}
-
-#define DECLARE_S390_RRS_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Register r2, Register b4, const Operand& d4, \
- Condition m3) { \
- rrs_format(op_name, r1.code(), r2.code(), b4.code(), d4.immediate(), \
- m3); \
- } \
- void name(Register r1, Register r2, Condition m3, \
- const MemOperand& opnd) { \
- name(r1, r2, opnd.getBaseRegister(), \
- Operand(opnd.getDisplacement()), m3); \
+ inline void rrs_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
+ DCHECK(is_uint12(f4));
+ DCHECK(is_uint16(op));
+ uint64_t code =
+ getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 12>(f1) | getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 20>(f3) | getfield<uint64_t, 6, 20, 32>(f4) |
+ getfield<uint64_t, 6, 32, 36>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
+ emit6bytes(code);
+ }
+
+#define DECLARE_S390_RRS_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r2, Register b4, const Operand& d4, \
+ Condition m3) { \
+ rrs_format(op_name, r1.code(), r2.code(), b4.code(), d4.immediate(), m3); \
+ } \
+ void name(Register r1, Register r2, Condition m3, const MemOperand& opnd) { \
+ name(r1, r2, opnd.getBaseRegister(), Operand(opnd.getDisplacement()), m3); \
}
S390_RRS_OPCODE_LIST(DECLARE_S390_RRS_INSTRUCTIONS)
#undef DECLARE_S390_RRS_INSTRUCTIONS
-
-inline void ris_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
- DCHECK(is_uint12(f3));
- DCHECK(is_uint16(op));
- DCHECK(is_uint8(f5));
- uint64_t code = getfield<uint64_t, 6, 0, 8>(op >> 8) |
- getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) |
- getfield<uint64_t, 6, 16, 20>(f3) |
- getfield<uint64_t, 6, 20, 32>(f4) |
- getfield<uint64_t, 6, 32, 40>(f5) |
- getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
- emit6bytes(code);
-}
-
-#define DECLARE_S390_RIS_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Condition m3, Register b4, const Operand& d4, \
- const Operand& i2) { \
- ris_format(op_name, r1.code(), m3, b4.code(), d4.immediate(), \
- i2.immediate()); \
- } \
- void name(Register r1, const Operand& i2, Condition m3, \
- const MemOperand& opnd) { \
- name(r1, m3, opnd.getBaseRegister(), \
- Operand(opnd.getDisplacement()), i2); \
+ inline void ris_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
+ DCHECK(is_uint12(f3));
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint8(f5));
+ uint64_t code =
+ getfield<uint64_t, 6, 0, 8>(op >> 8) |
+ getfield<uint64_t, 6, 8, 12>(f1) | getfield<uint64_t, 6, 12, 16>(f2) |
+ getfield<uint64_t, 6, 16, 20>(f3) | getfield<uint64_t, 6, 20, 32>(f4) |
+ getfield<uint64_t, 6, 32, 40>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op & 0x00FF);
+ emit6bytes(code);
+ }
+
+#define DECLARE_S390_RIS_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Condition m3, Register b4, const Operand& d4, \
+ const Operand& i2) { \
+ ris_format(op_name, r1.code(), m3, b4.code(), d4.immediate(), \
+ i2.immediate()); \
+ } \
+ void name(Register r1, const Operand& i2, Condition m3, \
+ const MemOperand& opnd) { \
+ name(r1, m3, opnd.getBaseRegister(), Operand(opnd.getDisplacement()), i2); \
}
S390_RIS_OPCODE_LIST(DECLARE_S390_RIS_INSTRUCTIONS)
#undef DECLARE_S390_RIS_INSTRUCTIONS
+ inline void sil_format(Opcode op, int f1, int f2, int f3) {
+ DCHECK(is_uint12(f2));
+ DCHECK(is_uint16(op));
+ DCHECK(is_uint16(f3));
+ uint64_t code =
+ getfield<uint64_t, 6, 0, 16>(op) | getfield<uint64_t, 6, 16, 20>(f1) |
+ getfield<uint64_t, 6, 20, 32>(f2) | getfield<uint64_t, 6, 32, 48>(f3);
+ emit6bytes(code);
+ }
-inline void sil_format(Opcode op, int f1, int f2, int f3) {
- DCHECK(is_uint12(f2));
- DCHECK(is_uint16(op));
- DCHECK(is_uint16(f3));
- uint64_t code = getfield<uint64_t, 6, 0, 16>(op) |
- getfield<uint64_t, 6, 16, 20>(f1) |
- getfield<uint64_t, 6, 20, 32>(f2) |
- getfield<uint64_t, 6, 32, 48>(f3);
- emit6bytes(code);
-}
-
-#define DECLARE_S390_SIL_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register b1, const Operand& d1, const Operand& i2) { \
- sil_format(op_name, b1.code(), d1.immediate(), i2.immediate()); \
- } \
- void name(const MemOperand& opnd, const Operand& i2) { \
- name(opnd.getBaseRegister(), Operand(opnd.getDisplacement()), i2); \
+#define DECLARE_S390_SIL_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register b1, const Operand& d1, const Operand& i2) { \
+ sil_format(op_name, b1.code(), d1.immediate(), i2.immediate()); \
+ } \
+ void name(const MemOperand& opnd, const Operand& i2) { \
+ name(opnd.getBaseRegister(), Operand(opnd.getDisplacement()), i2); \
}
S390_SIL_OPCODE_LIST(DECLARE_S390_SIL_INSTRUCTIONS)
#undef DECLARE_S390_SIL_INSTRUCTIONS
+ inline void rie_d_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ uint64_t code =
+ getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
+ getfield<uint64_t, 6, 32, 40>(f4) | getfield<uint64_t, 6, 40, 48>(op2);
+ emit6bytes(code);
+ }
-inline void rie_d_format(Opcode opcode, int f1, int f2, int f3, int f4) {
- uint32_t op1 = opcode >> 8;
- uint32_t op2 = opcode & 0xff;
- uint64_t code = getfield<uint64_t, 6, 0, 8>(op1) |
- getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) |
- getfield<uint64_t, 6, 16, 32>(f3) |
- getfield<uint64_t, 6, 32, 40>(f4) |
- getfield<uint64_t, 6, 40, 48>(op2);
- emit6bytes(code);
-}
-
-#define DECLARE_S390_RIE_D_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Register r3, const Operand& i2) { \
- rie_d_format(op_name, r1.code(), r3.code(), i2.immediate(), 0); \
+#define DECLARE_S390_RIE_D_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r3, const Operand& i2) { \
+ rie_d_format(op_name, r1.code(), r3.code(), i2.immediate(), 0); \
}
S390_RIE_D_OPCODE_LIST(DECLARE_S390_RIE_D_INSTRUCTIONS)
#undef DECLARE_S390_RIE_D_INSTRUCTIONS
+ inline void rie_e_format(Opcode opcode, int f1, int f2, int f3) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ uint64_t code =
+ getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
+ getfield<uint64_t, 6, 40, 48>(op2);
+ emit6bytes(code);
+ }
-inline void rie_e_format(Opcode opcode, int f1, int f2, int f3) {
- uint32_t op1 = opcode >> 8;
- uint32_t op2 = opcode & 0xff;
- uint64_t code = getfield<uint64_t, 6, 0, 8>(op1) |
- getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) |
- getfield<uint64_t, 6, 16, 32>(f3) |
- getfield<uint64_t, 6, 40, 48>(op2);
- emit6bytes(code);
-}
-
-#define DECLARE_S390_RIE_E_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register r1, Register r3, const Operand& i2) { \
- rie_e_format(op_name, r1.code(), r3.code(), i2.immediate()); \
+#define DECLARE_S390_RIE_E_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register r1, Register r3, const Operand& i2) { \
+ rie_e_format(op_name, r1.code(), r3.code(), i2.immediate()); \
}
S390_RIE_E_OPCODE_LIST(DECLARE_S390_RIE_E_INSTRUCTIONS)
#undef DECLARE_S390_RIE_E_INSTRUCTIONS
+ inline void rie_f_format(Opcode opcode, int f1, int f2, int f3, int f4,
+ int f5) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ uint64_t code =
+ getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 24>(f3) |
+ getfield<uint64_t, 6, 24, 32>(f4) | getfield<uint64_t, 6, 32, 40>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op2);
+ emit6bytes(code);
+ }
-inline void rie_f_format(Opcode opcode, int f1, int f2, int f3, int f4,
- int f5) {
- uint32_t op1 = opcode >> 8;
- uint32_t op2 = opcode & 0xff;
- uint64_t code = getfield<uint64_t, 6, 0, 8>(op1) |
- getfield<uint64_t, 6, 8, 12>(f1) |
- getfield<uint64_t, 6, 12, 16>(f2) |
- getfield<uint64_t, 6, 16, 24>(f3) |
- getfield<uint64_t, 6, 24, 32>(f4) |
- getfield<uint64_t, 6, 32, 40>(f5) |
- getfield<uint64_t, 6, 40, 48>(op2);
- emit6bytes(code);
-}
-
-#define DECLARE_S390_RIE_F_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register dst, Register src, const Operand& startBit, \
- const Operand& endBit, const Operand& shiftAmt) { \
- DCHECK(is_uint8(startBit.immediate())); \
- DCHECK(is_uint8(endBit.immediate())); \
- DCHECK(is_uint8(shiftAmt.immediate())); \
- rie_f_format(op_name, dst.code(), src.code(), startBit.immediate(), \
- endBit.immediate(), shiftAmt.immediate()); \
+#define DECLARE_S390_RIE_F_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register dst, Register src, const Operand& startBit, \
+ const Operand& endBit, const Operand& shiftAmt) { \
+ DCHECK(is_uint8(startBit.immediate())); \
+ DCHECK(is_uint8(endBit.immediate())); \
+ DCHECK(is_uint8(shiftAmt.immediate())); \
+ rie_f_format(op_name, dst.code(), src.code(), startBit.immediate(), \
+ endBit.immediate(), shiftAmt.immediate()); \
}
S390_RIE_F_OPCODE_LIST(DECLARE_S390_RIE_F_INSTRUCTIONS)
#undef DECLARE_S390_RIE_F_INSTRUCTIONS
-
-inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
- DCHECK(is_uint12(f5));
- DCHECK(is_uint12(f3));
- DCHECK(is_uint8(f1));
- DCHECK(is_uint8(op));
- uint64_t code = getfield<uint64_t, 6, 0, 8>(op) |
- getfield<uint64_t, 6, 8, 16>(f1) |
- getfield<uint64_t, 6, 16, 20>(f2) |
- getfield<uint64_t, 6, 20, 32>(f3) |
- getfield<uint64_t, 6, 32, 36>(f4) |
- getfield<uint64_t, 6, 36, 48>(f5);
- emit6bytes(code);
-}
-
-#define DECLARE_S390_SS_A_INSTRUCTIONS(name, op_name, op_value) \
- void name(Register b1, const Operand& d1, Register b2, \
- const Operand& d2, const Operand& length) { \
- ss_a_format(op_name, length.immediate(), b1.code(), d1.immediate(), \
- b2.code(), d2.immediate()); \
- } \
- void name(const MemOperand& opnd1, const MemOperand& opnd2, \
- const Operand& length) { \
- ss_a_format(op_name, length.immediate(), \
- opnd1.getBaseRegister().code(), \
- opnd1.getDisplacement(), opnd2.getBaseRegister().code(), \
- opnd2.getDisplacement()); \
+ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
+ DCHECK(is_uint12(f5));
+ DCHECK(is_uint12(f3));
+ DCHECK(is_uint8(f1));
+ DCHECK(is_uint8(op));
+ uint64_t code =
+ getfield<uint64_t, 6, 0, 8>(op) | getfield<uint64_t, 6, 8, 16>(f1) |
+ getfield<uint64_t, 6, 16, 20>(f2) | getfield<uint64_t, 6, 20, 32>(f3) |
+ getfield<uint64_t, 6, 32, 36>(f4) | getfield<uint64_t, 6, 36, 48>(f5);
+ emit6bytes(code);
+ }
+
+#define DECLARE_S390_SS_A_INSTRUCTIONS(name, op_name, op_value) \
+ void name(Register b1, const Operand& d1, Register b2, const Operand& d2, \
+ const Operand& length) { \
+ ss_a_format(op_name, length.immediate(), b1.code(), d1.immediate(), \
+ b2.code(), d2.immediate()); \
+ } \
+ void name(const MemOperand& opnd1, const MemOperand& opnd2, \
+ const Operand& length) { \
+ ss_a_format(op_name, length.immediate(), opnd1.getBaseRegister().code(), \
+ opnd1.getDisplacement(), opnd2.getBaseRegister().code(), \
+ opnd2.getDisplacement()); \
}
S390_SS_A_OPCODE_LIST(DECLARE_S390_SS_A_INSTRUCTIONS)
#undef DECLARE_S390_SS_A_INSTRUCTIONS
-
// Helper for unconditional branch to Label with update to save register
void b(Register r, Label* l) {
int32_t halfwords = branch_offset(l) / 2;
@@ -1151,6 +1087,7 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
(static_cast<uint64_t>(m5 & 0xF)) * B20 | \
(static_cast<uint64_t>(m4 & 0xF)) * B16 | \
(static_cast<uint64_t>(m3 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
(static_cast<uint64_t>(opcode_value & 0x00FF)); \
emit6bytes(code); \
}
@@ -1167,12 +1104,141 @@ inline void ss_a_format(Opcode op, int f1, int f2, int f3, int f4, int f5) {
(static_cast<uint64_t>(m6 & 0xF)) * B20 | \
(static_cast<uint64_t>(m5 & 0xF)) * B16 | \
(static_cast<uint64_t>(m4 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
(static_cast<uint64_t>(opcode_value & 0x00FF)); \
emit6bytes(code); \
}
S390_VRR_C_OPCODE_LIST(DECLARE_VRR_C_INSTRUCTIONS)
#undef DECLARE_VRR_C_INSTRUCTIONS
+#define DECLARE_VRR_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3, \
+ Condition m5, Condition m4) { \
+ uint64_t code = (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(v1.code())) * B36 | \
+ (static_cast<uint64_t>(v2.code())) * B32 | \
+ (static_cast<uint64_t>(v3.code())) * B28 | \
+ (static_cast<uint64_t>(m5 & 0xF)) * B20 | \
+ (static_cast<uint64_t>(m4 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRR_B_OPCODE_LIST(DECLARE_VRR_B_INSTRUCTIONS)
+#undef DECLARE_VRR_B_INSTRUCTIONS
+
+#define DECLARE_VRR_E_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3, \
+ DoubleRegister v4, Condition m6, Condition m5) { \
+ uint64_t code = (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(v1.code())) * B36 | \
+ (static_cast<uint64_t>(v2.code())) * B32 | \
+ (static_cast<uint64_t>(v3.code())) * B28 | \
+ (static_cast<uint64_t>(m6 & 0xF)) * B24 | \
+ (static_cast<uint64_t>(m5 & 0xF)) * B16 | \
+ (static_cast<uint64_t>(v4.code())) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRR_E_OPCODE_LIST(DECLARE_VRR_E_INSTRUCTIONS)
+#undef DECLARE_VRR_E_INSTRUCTIONS
+
+#define DECLARE_VRX_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(DoubleRegister v1, const MemOperand& opnd, Condition m3) { \
+ uint64_t code = \
+ (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(v1.code())) * B36 | \
+ (static_cast<uint64_t>(opnd.getIndexRegister().code())) * B32 | \
+ (static_cast<uint64_t>(opnd.getBaseRegister().code())) * B28 | \
+ (static_cast<uint64_t>(opnd.getDisplacement())) * B16 | \
+ (static_cast<uint64_t>(m3 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRX_OPCODE_LIST(DECLARE_VRX_INSTRUCTIONS)
+#undef DECLARE_VRX_INSTRUCTIONS
+
+#define DECLARE_VRS_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(DoubleRegister v1, DoubleRegister v2, const MemOperand& opnd, \
+ Condition m4 = Condition(0)) { \
+ uint64_t code = \
+ (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(v1.code())) * B36 | \
+ (static_cast<uint64_t>(v2.code())) * B32 | \
+ (static_cast<uint64_t>(opnd.getBaseRegister().code())) * B28 | \
+ (static_cast<uint64_t>(opnd.getDisplacement())) * B16 | \
+ (static_cast<uint64_t>(m4 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRS_A_OPCODE_LIST(DECLARE_VRS_A_INSTRUCTIONS)
+#undef DECLARE_VRS_A_INSTRUCTIONS
+
+#define DECLARE_VRS_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(DoubleRegister v1, Register r1, const MemOperand& opnd, \
+ Condition m4 = Condition(0)) { \
+ uint64_t code = \
+ (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(v1.code())) * B36 | \
+ (static_cast<uint64_t>(r1.code())) * B32 | \
+ (static_cast<uint64_t>(opnd.getBaseRegister().code())) * B28 | \
+ (static_cast<uint64_t>(opnd.getDisplacement())) * B16 | \
+ (static_cast<uint64_t>(m4 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRS_B_OPCODE_LIST(DECLARE_VRS_B_INSTRUCTIONS)
+#undef DECLARE_VRS_B_INSTRUCTIONS
+
+#define DECLARE_VRS_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(Register r1, DoubleRegister v1, const MemOperand& opnd, \
+ Condition m4 = Condition(0)) { \
+ uint64_t code = \
+ (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(r1.code())) * B36 | \
+ (static_cast<uint64_t>(v1.code())) * B32 | \
+ (static_cast<uint64_t>(opnd.getBaseRegister().code())) * B28 | \
+ (static_cast<uint64_t>(opnd.getDisplacement())) * B16 | \
+ (static_cast<uint64_t>(m4 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRS_C_OPCODE_LIST(DECLARE_VRS_C_INSTRUCTIONS)
+#undef DECLARE_VRS_C_INSTRUCTIONS
+
+#define DECLARE_VRI_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(DoubleRegister v1, const Operand& i2, Condition m3) { \
+ uint64_t code = (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(v1.code())) * B36 | \
+ (static_cast<uint32_t>(i2.immediate())) * B16 | \
+ (static_cast<uint64_t>(m3 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRI_A_OPCODE_LIST(DECLARE_VRI_A_INSTRUCTIONS)
+#undef DECLARE_VRI_A_INSTRUCTIONS
+
+#define DECLARE_VRI_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(DoubleRegister v1, DoubleRegister v2, const Operand& i2, \
+ Condition m4) { \
+ uint64_t code = (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(v1.code())) * B36 | \
+ (static_cast<uint64_t>(v2.code())) * B32 | \
+ (static_cast<uint16_t>(i2.immediate())) * B16 | \
+ (static_cast<uint64_t>(m4 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(0)) * B8 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRI_C_OPCODE_LIST(DECLARE_VRI_C_INSTRUCTIONS)
+#undef DECLARE_VRI_C_INSTRUCTIONS
+
// Single Element format
void vfa(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3) {
vfa(v1, v2, v3, static_cast<Condition>(0), static_cast<Condition>(8),
@@ -1399,4 +1465,4 @@ class EnsureSpace {
} // namespace internal
} // namespace v8
-#endif // V8_S390_ASSEMBLER_S390_H_
+#endif // V8_CODEGEN_S390_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/codegen/s390/code-stubs-s390.cc
index 688b6bc816..f85c309943 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/codegen/s390/code-stubs-s390.cc
@@ -4,26 +4,24 @@
#if V8_TARGET_ARCH_S390
-#include "src/api-arguments-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-arguments-inl.h"
#include "src/base/bits.h"
-#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/macro-assembler.h"
+#include "src/init/bootstrapper.h"
#include "src/objects/api-callbacks.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
-namespace internal {
-
-} // namespace internal
+namespace internal {} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/s390/constants-s390.cc b/deps/v8/src/codegen/s390/constants-s390.cc
index bda7f61cf4..81036c285b 100644
--- a/deps/v8/src/s390/constants-s390.cc
+++ b/deps/v8/src/codegen/s390/constants-s390.cc
@@ -4,7 +4,7 @@
#if V8_TARGET_ARCH_S390
-#include "src/s390/constants-s390.h"
+#include "src/codegen/s390/constants-s390.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/codegen/s390/constants-s390.h
index fff6efacab..1d23294ae4 100644
--- a/deps/v8/src/s390/constants-s390.h
+++ b/deps/v8/src/codegen/s390/constants-s390.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_S390_CONSTANTS_S390_H_
-#define V8_S390_CONSTANTS_S390_H_
+#ifndef V8_CODEGEN_S390_CONSTANTS_S390_H_
+#define V8_CODEGEN_S390_CONSTANTS_S390_H_
// Get the standard printf format macros for C99 stdint types.
#ifndef __STDC_FORMAT_MACROS
@@ -15,7 +15,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
// UNIMPLEMENTED_ macro for S390.
#ifdef DEBUG
@@ -147,10 +147,10 @@ inline Condition NegateCondition(Condition cond) {
// representing instructions from usual 32 bit values.
// Instruction objects are pointers to 32bit values, and provide methods to
// access the various ISA fields.
-typedef int32_t Instr;
-typedef uint16_t TwoByteInstr;
-typedef uint32_t FourByteInstr;
-typedef uint64_t SixByteInstr;
+using Instr = int32_t;
+using TwoByteInstr = uint16_t;
+using FourByteInstr = uint32_t;
+using SixByteInstr = uint64_t;
#define S390_RSY_A_OPCODE_LIST(V) \
V(lmg, LMG, 0xEB04) /* type = RSY_A LOAD MULTIPLE (64) */ \
@@ -559,6 +559,8 @@ typedef uint64_t SixByteInstr;
V(vfce, VFCE, 0xE7E8) /* type = VRR_C VECTOR FP COMPARE EQUAL */ \
V(vfche, VFCHE, 0xE7EA) /* type = VRR_C VECTOR FP COMPARE HIGH OR EQUAL */ \
V(vfch, VFCH, 0xE7EB) /* type = VRR_C VECTOR FP COMPARE HIGH */ \
+ V(vfmax, VFMAX, 0xE7EF) /* type = VRR_C VECTOR FP MAXIMUM */ \
+ V(vfmin, VFMIN, 0xE7EE) /* type = VRR_C VECTOR FP MINIMUM */ \
V(vavgl, VAVGL, 0xE7F0) /* type = VRR_C VECTOR AVERAGE LOGICAL */ \
V(vacc, VACC, 0xE7F1) /* type = VRR_C VECTOR ADD COMPUTE CARRY */ \
V(vavg, VAVG, 0xE7F2) /* type = VRR_C VECTOR AVERAGE */ \
@@ -1157,7 +1159,7 @@ typedef uint64_t SixByteInstr;
V(sth, STH, 0x40) /* type = RX_A STORE HALFWORD (16) */
#define S390_RX_B_OPCODE_LIST(V) \
- V(bc, BC, 0x47) /* type = RX_B BRANCH ON CONDITION */
+ V(bc, BC, 0x47) /* type = RX_B BRANCH ON CONDITION */
#define S390_RIE_A_OPCODE_LIST(V) \
V(cgit, CGIT, 0xEC70) /* type = RIE_A COMPARE IMMEDIATE AND TRAP (64<-16) */ \
@@ -2288,6 +2290,24 @@ class RIEInstruction : Instruction {
};
// VRR Instruction
+class VRR_A_Instruction : SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R2Value, int, 12, 16)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M5Value, uint32_t, 24, 28)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M4Value, uint32_t, 28, 32)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M3Value, uint32_t, 32, 36)
+};
+
+class VRR_B_Instruction : SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R2Value, int, 12, 16)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R3Value, int, 16, 20)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M5Value, uint32_t, 24, 28)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M4Value, uint32_t, 32, 36)
+};
+
class VRR_C_Instruction : SixByteInstruction {
public:
DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
@@ -2298,6 +2318,49 @@ class VRR_C_Instruction : SixByteInstruction {
DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M4Value, uint32_t, 32, 36)
};
+class VRR_E_Instruction : SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R2Value, int, 12, 16)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R3Value, int, 16, 20)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R4Value, int, 32, 36)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M6Value, uint32_t, 20, 24)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M5Value, uint32_t, 28, 32)
+};
+
+class VRX_Instruction : SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(X2Value, int, 12, 16)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(B2Value, int, 16, 20)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(D2Value, int, 20, 32)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M3Value, uint32_t, 32, 36)
+};
+
+class VRS_Instruction : SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R3Value, int, 12, 16)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(B2Value, int, 16, 20)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(D2Value, int, 20, 32)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M4Value, uint32_t, 32, 36)
+};
+
+class VRI_A_Instruction : SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(I2Value, int, 16, 32)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M3Value, uint32_t, 32, 36)
+};
+
+class VRI_C_Instruction : SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R3Value, int, 12, 16)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(I2Value, int, 16, 32)
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M4Value, uint32_t, 32, 36)
+};
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
@@ -2321,4 +2384,4 @@ class DoubleRegisters {
} // namespace internal
} // namespace v8
-#endif // V8_S390_CONSTANTS_S390_H_
+#endif // V8_CODEGEN_S390_CONSTANTS_S390_H_
diff --git a/deps/v8/src/s390/cpu-s390.cc b/deps/v8/src/codegen/s390/cpu-s390.cc
index e00495ae09..748f4028b0 100644
--- a/deps/v8/src/s390/cpu-s390.cc
+++ b/deps/v8/src/codegen/s390/cpu-s390.cc
@@ -5,7 +5,7 @@
// CPU specific code for s390 independent of OS goes here.
#if V8_TARGET_ARCH_S390
-#include "src/cpu-features.h"
+#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
index 70033be214..1f65065fb7 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/codegen/s390/interface-descriptors-s390.cc
@@ -4,9 +4,9 @@
#if V8_TARGET_ARCH_S390
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 87f6499499..ff94fa839e 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -9,26 +9,26 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
-#include "src/bootstrapper.h"
-#include "src/callable.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
-#include "src/external-reference-table.h"
-#include "src/frames-inl.h"
+#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
-#include "src/macro-assembler.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-code-manager.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
-#include "src/s390/macro-assembler-s390.h"
+#include "src/codegen/s390/macro-assembler-s390.h"
#endif
namespace v8 {
@@ -175,7 +175,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependent(builtin_index);
if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ target_is_isolate_independent_builtin) {
Label skip;
if (cond != al) {
b(NegateCondition(cond), &skip, Label::kNear);
@@ -236,7 +236,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Builtins::IsIsolateIndependent(builtin_index);
if (options().inline_offheap_trampolines &&
- target_is_isolate_independent_builtin) {
+ target_is_isolate_independent_builtin) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
@@ -316,8 +316,7 @@ void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
}
// Wrapper around Assembler::mvc (SS-a format)
-void TurboAssembler::MoveChar(const MemOperand& opnd1,
- const MemOperand& opnd2,
+void TurboAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
const Operand& length) {
mvc(opnd1, opnd2, Operand(static_cast<intptr_t>(length.immediate() - 1)));
}
@@ -338,8 +337,10 @@ void TurboAssembler::ExclusiveOrChar(const MemOperand& opnd1,
// Wrapper around Assembler::risbg(n) (RIE-f)
void TurboAssembler::RotateInsertSelectBits(Register dst, Register src,
- const Operand& startBit, const Operand& endBit,
- const Operand& shiftAmt, bool zeroBits) {
+ const Operand& startBit,
+ const Operand& endBit,
+ const Operand& shiftAmt,
+ bool zeroBits) {
if (zeroBits)
// High tag the top bit of I4/EndBit to zero out any unselected bits
risbg(dst, src, startBit,
@@ -354,7 +355,7 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc,
brxhg(dst, inc, L);
#else
brxh(dst, inc, L);
-#endif // V8_TARGET_ARCH_S390X
+#endif // V8_TARGET_ARCH_S390X
}
void TurboAssembler::MultiPush(RegList regs, Register location) {
@@ -658,26 +659,6 @@ void TurboAssembler::RestoreFrameStateForTailCall() {
LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
-// Push and pop all registers that can hold pointers.
-void MacroAssembler::PushSafepointRegisters() {
- // Safepoints expect a block of kNumSafepointRegisters values on the
- // stack, so adjust the stack for unsaved registers.
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- DCHECK_GE(num_unsaved, 0);
- if (num_unsaved > 0) {
- lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize)));
- }
- MultiPush(kSafepointSavedRegisters);
-}
-
-void MacroAssembler::PopSafepointRegisters() {
- const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- MultiPop(kSafepointSavedRegisters);
- if (num_unsaved > 0) {
- la(sp, MemOperand(sp, num_unsaved * kPointerSize));
- }
-}
-
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
@@ -1485,8 +1466,8 @@ void MacroAssembler::PushStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Link the current handler as the next handler.
- Move(r7, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate()));
+ Move(r7,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
// Buy the full stack frame for 5 slots.
lay(sp, MemOperand(sp, -StackHandlerConstants::kSize));
@@ -1497,7 +1478,7 @@ void MacroAssembler::PushStackHandler() {
// Copy the old handler into the next handler slot.
MoveChar(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7),
- Operand(kPointerSize));
+ Operand(kPointerSize));
// Set this new handler as the current one.
StoreP(sp, MemOperand(r7));
}
@@ -1508,8 +1489,8 @@ void MacroAssembler::PopStackHandler() {
// Pop the Next Handler into r3 and store it into Handler Address reference.
Pop(r3);
- Move(ip, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
- isolate()));
+ Move(ip,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
StoreP(r3, MemOperand(ip));
Drop(1); // Drop padding.
@@ -1549,24 +1530,6 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
ble(on_in_range);
}
-void MacroAssembler::TryDoubleToInt32Exact(Register result,
- DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch) {
- Label done;
- DCHECK(double_input != double_scratch);
-
- ConvertDoubleToInt64(result, double_input);
-
- TestIfInt32(result);
- bne(&done);
-
- // convert back and compare
- cdfbr(double_scratch, result);
- cdbr(double_scratch, double_input);
- bind(&done);
-}
-
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input,
@@ -1750,24 +1713,6 @@ void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadP(dst, ContextMemOperand(dst, index));
}
-void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
- Label* smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- // this won't work if src == dst
- DCHECK(src.code() != dst.code());
- SmiUntag(dst, src);
- TestIfSmi(src);
- beq(smi_case);
-}
-
-void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
- Label* on_either_smi) {
- STATIC_ASSERT(kSmiTag == 0);
- JumpIfSmi(reg1, on_either_smi);
- JumpIfSmi(reg2, on_either_smi);
-}
-
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -1973,7 +1918,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
Call(dest);
-
if (isolate() != nullptr) {
// We don't unset the PC; the FP is the source of truth.
Register scratch1 = r6;
@@ -2249,7 +2193,7 @@ void TurboAssembler::Div32(Register dst, Register src1, Register src2) {
#define Generate_DivU32(instr) \
{ \
lr(r0, src1); \
- srdl(r0, Operand(32)); \
+ srdl(r0, Operand(32)); \
instr(r0, src2); \
LoadlW(dst, r1); \
}
@@ -2323,7 +2267,7 @@ void TurboAssembler::Mod32(Register dst, Register src1, Register src2) {
#define Generate_ModU32(instr) \
{ \
lr(r0, src1); \
- srdl(r0, Operand(32)); \
+ srdl(r0, Operand(32)); \
instr(r0, src2); \
LoadlW(dst, r0); \
}
@@ -3024,14 +2968,14 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) {
int endBit = 63 - trailing_zeros;
// Start: startBit, End: endBit, Shift = 0, true = zero unselected bits.
RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
- Operand::Zero(), true);
+ Operand::Zero(), true);
return;
} else if (-1 == shifted_value) {
// A Special case in which all top bits up to MSB are 1's. In this case,
// we can set startBit to be 0.
int endBit = 63 - trailing_zeros;
RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
- Operand::Zero(), true);
+ Operand::Zero(), true);
return;
}
}
@@ -3785,7 +3729,6 @@ void TurboAssembler::LoadLogicalReversedHalfWordP(Register dst,
LoadLogicalHalfWordP(dst, dst);
}
-
// Load And Test (Reg <- Reg)
void TurboAssembler::LoadAndTest32(Register dst, Register src) {
ltr(dst, src);
@@ -3864,6 +3807,11 @@ void TurboAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst,
ldebr(dst, dst);
}
+void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem) {
+ DCHECK(is_uint12(mem.offset()));
+ vl(dst, mem, Condition(0));
+}
+
// Store Double Precision (64-bit) Floating Point number to memory
void TurboAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) {
if (is_uint12(mem.offset())) {
@@ -3891,6 +3839,11 @@ void TurboAssembler::StoreDoubleAsFloat32(DoubleRegister src,
StoreFloat32(scratch, mem);
}
+void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem) {
+ DCHECK(is_uint12(mem.offset()));
+ vst(src, mem, Condition(0));
+}
+
void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch) {
if (is_uint12(opnd.offset())) {
@@ -4199,7 +4152,7 @@ void TurboAssembler::ClearRightImm(Register dst, Register src,
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
int endBit = 63 - numBitsToClear;
RotateInsertSelectBits(dst, src, Operand::Zero(), Operand(endBit),
- Operand::Zero(), true);
+ Operand::Zero(), true);
return;
}
@@ -4330,6 +4283,31 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
StoreDouble(scratch_1, src);
}
+void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
+ Simd128Register scratch) {
+ if (src == dst) return;
+ vlr(scratch, src, Condition(0), Condition(0), Condition(0));
+ vlr(src, dst, Condition(0), Condition(0), Condition(0));
+ vlr(dst, scratch, Condition(0), Condition(0), Condition(0));
+}
+
+void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
+ Simd128Register scratch) {
+ DCHECK(!AreAliased(src, scratch));
+ vlr(scratch, src, Condition(0), Condition(0), Condition(0));
+ LoadSimd128(src, dst);
+ StoreSimd128(scratch, dst);
+}
+
+void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
+ Simd128Register scratch_0,
+ Simd128Register scratch_1) {
+ LoadSimd128(scratch_0, src);
+ LoadSimd128(scratch_1, dst);
+ StoreSimd128(scratch_0, dst);
+ StoreSimd128(scratch_1, src);
+}
+
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
@@ -4365,7 +4343,7 @@ void TurboAssembler::CallBuiltinPointer(Register builtin_pointer) {
ShiftRightArithP(builtin_pointer, builtin_pointer,
Operand(kSmiShift - kSystemPointerSizeLog2));
AddP(builtin_pointer, builtin_pointer,
- Operand(IsolateData::builtin_entry_table_offset()));
+ Operand(IsolateData::builtin_entry_table_offset()));
LoadP(builtin_pointer, MemOperand(kRootRegister, builtin_pointer));
Call(builtin_pointer);
}
@@ -4409,7 +4387,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
ShiftLeftP(destination, scratch, Operand(kSystemPointerSizeLog2));
AddP(destination, destination, kRootRegister);
LoadP(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index be3785c035..ba870874c8 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -6,13 +6,13 @@
#error This header must be included via macro-assembler.h
#endif
-#ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
-#define V8_S390_MACRO_ASSEMBLER_S390_H_
+#ifndef V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
+#define V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
-#include "src/bailout-reason.h"
-#include "src/contexts.h"
-#include "src/globals.h"
-#include "src/s390/assembler-s390.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/codegen/s390/assembler-s390.h"
+#include "src/common/globals.h"
+#include "src/objects/contexts.h"
namespace v8 {
namespace internal {
@@ -180,17 +180,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(DoubleRegister dst, DoubleRegister src);
void MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
- const Operand& length);
+ const Operand& length);
void CompareLogicalChar(const MemOperand& opnd1, const MemOperand& opnd2,
- const Operand& length);
+ const Operand& length);
void ExclusiveOrChar(const MemOperand& opnd1, const MemOperand& opnd2,
- const Operand& length);
+ const Operand& length);
void RotateInsertSelectBits(Register dst, Register src,
- const Operand& startBit, const Operand& endBit,
- const Operand& shiftAmt, bool zeroBits);
+ const Operand& startBit, const Operand& endBit,
+ const Operand& shiftAmt, bool zeroBits);
void BranchRelativeOnIdxHighP(Register dst, Register inc, Label* L);
@@ -413,6 +413,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
+ void LoadSimd128(Simd128Register dst, const MemOperand& mem);
void AddFloat32(DoubleRegister dst, const MemOperand& opnd,
DoubleRegister scratch);
@@ -444,6 +445,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
DoubleRegister scratch);
+ void StoreSimd128(Simd128Register src, const MemOperand& mem);
void Branch(Condition c, const Operand& opnd);
void BranchOnCount(Register r1, Label* l);
@@ -508,12 +510,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#endif
}
+ void push(DoubleRegister src) {
+ lay(sp, MemOperand(sp, -kPointerSize));
+ StoreDouble(src, MemOperand(sp));
+ }
void push(Register src) {
lay(sp, MemOperand(sp, -kPointerSize));
StoreP(src, MemOperand(sp));
}
+ void pop(DoubleRegister dst) {
+ LoadDouble(dst, MemOperand(sp));
+ la(sp, MemOperand(sp, kPointerSize));
+ }
+
void pop(Register dst) {
LoadP(dst, MemOperand(sp));
la(sp, MemOperand(sp, kPointerSize));
@@ -774,6 +785,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
DoubleRegister scratch_1);
+ void SwapSimd128(Simd128Register src, Simd128Register dst,
+ Simd128Register scratch);
+ void SwapSimd128(Simd128Register src, MemOperand dst,
+ Simd128Register scratch);
+ void SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch_0,
+ Simd128Register scratch_1);
// Cleanse pointer address on 31bit by zero out top bit.
// This is a NOP on 64-bit.
@@ -869,7 +886,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
int endBit = 63; // End is always LSB after shifting.
int startBit = 63 - rangeStart + rangeEnd;
RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
- Operand(shiftAmount), true);
+ Operand(shiftAmount), true);
} else {
if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
ShiftRightP(dst, src, Operand(rangeEnd));
@@ -1077,11 +1094,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void JumpIfIsInRange(Register value, unsigned lower_limit,
unsigned higher_limit, Label* on_in_range);
- // Try to convert a double to a signed 32-bit integer.
- // CR_EQ in cr7 is set and result assigned if the conversion is exact.
- void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
- Register scratch, DoubleRegister double_scratch);
-
// ---------------------------------------------------------------------------
// In-place weak references.
void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
@@ -1175,17 +1187,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
#endif
}
- // Untag the source value into destination and jump if source is a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
// Jump if either of the registers contain a non-smi.
inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
TestIfSmi(value);
bne(not_smi_label /*, cr0*/);
}
- // Jump if either of the registers contain a smi.
- void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
@@ -1261,11 +1267,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
- // Push and pop the registers that can hold pointers, as defined by the
- // RegList constant kSafepointSavedRegisters.
- void PushSafepointRegisters();
- void PopSafepointRegisters();
-
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Helper functions for generating invokes.
@@ -1299,4 +1300,4 @@ inline MemOperand NativeContextMemOperand() {
} // namespace internal
} // namespace v8
-#endif // V8_S390_MACRO_ASSEMBLER_S390_H_
+#endif // V8_CODEGEN_S390_MACRO_ASSEMBLER_S390_H_
diff --git a/deps/v8/src/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index 06537bcb06..ccb35fcb68 100644
--- a/deps/v8/src/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_S390_REGISTER_S390_H_
-#define V8_S390_REGISTER_S390_H_
+#ifndef V8_CODEGEN_S390_REGISTER_S390_H_
+#define V8_CODEGEN_S390_REGISTER_S390_H_
-#include "src/register.h"
-#include "src/reglist.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
namespace v8 {
namespace internal {
@@ -210,10 +210,10 @@ ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
static_assert(sizeof(DoubleRegister) == sizeof(int),
"DoubleRegister can efficiently be passed by value");
-typedef DoubleRegister FloatRegister;
+using FloatRegister = DoubleRegister;
// TODO(john.yan) Define SIMD registers.
-typedef DoubleRegister Simd128Register;
+using Simd128Register = DoubleRegister;
#define DEFINE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
@@ -278,4 +278,4 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = r7;
} // namespace internal
} // namespace v8
-#endif // V8_S390_REGISTER_S390_H_
+#endif // V8_CODEGEN_S390_REGISTER_S390_H_
diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/codegen/safepoint-table.cc
index f3fc966b20..2afdb5f90c 100644
--- a/deps/v8/src/safepoint-table.cc
+++ b/deps/v8/src/codegen/safepoint-table.cc
@@ -2,38 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/safepoint-table.h"
+#include "src/codegen/safepoint-table.h"
-#include "src/assembler-inl.h"
-#include "src/deoptimizer.h"
-#include "src/disasm.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
-#include "src/ostreams.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/diagnostics/disasm.h"
+#include "src/execution/frames-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
-
-bool SafepointEntry::HasRegisters() const {
- DCHECK(is_valid());
- DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
- for (int i = 0; i < num_reg_bytes; i++) {
- if (bits_[i] != SafepointTable::kNoRegisters) return true;
- }
- return false;
-}
-
-
-bool SafepointEntry::HasRegisterAt(int reg_index) const {
- DCHECK(is_valid());
- DCHECK(reg_index >= 0 && reg_index < kNumSafepointRegisters);
- int byte_index = reg_index >> kBitsPerByteLog2;
- int bit_index = reg_index & (kBitsPerByte - 1);
- return (bits_[byte_index] & (1 << bit_index)) != 0;
-}
-
SafepointTable::SafepointTable(Address instruction_start,
size_t safepoint_table_offset,
uint32_t stack_slots, bool has_deopt)
@@ -45,14 +25,11 @@ SafepointTable::SafepointTable(Address instruction_start,
entry_size_ = Memory<uint32_t>(header + kEntrySizeOffset);
pc_and_deoptimization_indexes_ = header + kHeaderSize;
entries_ = pc_and_deoptimization_indexes_ + (length_ * kFixedEntrySize);
- DCHECK_GT(entry_size_, 0);
- STATIC_ASSERT(SafepointEntry::DeoptimizationIndexField::kMax ==
- Safepoint::kNoDeoptimizationIndex);
}
SafepointTable::SafepointTable(Code code)
- : SafepointTable(code->InstructionStart(), code->safepoint_table_offset(),
- code->stack_slots(), true) {}
+ : SafepointTable(code.InstructionStart(), code.safepoint_table_offset(),
+ code.stack_slots(), true) {}
unsigned SafepointTable::find_return_pc(unsigned pc_offset) {
for (unsigned i = 0; i < length(); i++) {
@@ -86,7 +63,6 @@ SafepointEntry SafepointTable::FindEntry(Address pc) const {
return SafepointEntry();
}
-
void SafepointTable::PrintEntry(unsigned index,
std::ostream& os) const { // NOLINT
disasm::NameConverter converter;
@@ -95,24 +71,14 @@ void SafepointTable::PrintEntry(unsigned index,
// Print the stack slot bits.
if (entry_size_ > 0) {
- DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
+ const int first = 0;
int last = entry_size_ - 1;
for (int i = first; i < last; i++) PrintBits(os, bits[i], kBitsPerByte);
int last_bits = stack_slots_ - ((last - first) * kBitsPerByte);
PrintBits(os, bits[last], last_bits);
-
- // Print the registers (if any).
- if (!entry.HasRegisters()) return;
- for (int j = 0; j < kNumSafepointRegisters; j++) {
- if (entry.HasRegisterAt(j)) {
- os << " | " << converter.NameOfCPURegister(j);
- }
- }
}
}
-
void SafepointTable::PrintBits(std::ostream& os, // NOLINT
uint8_t byte, int digits) {
DCHECK(digits >= 0 && digits <= kBitsPerByte);
@@ -121,22 +87,15 @@ void SafepointTable::PrintBits(std::ostream& os, // NOLINT
}
}
-void Safepoint::DefinePointerRegister(Register reg) {
- registers_->push_back(reg.code());
-}
-
-
Safepoint SafepointTableBuilder::DefineSafepoint(
- Assembler* assembler,
- Safepoint::Kind kind,
- Safepoint::DeoptMode deopt_mode) {
+ Assembler* assembler, Safepoint::DeoptMode deopt_mode) {
deoptimization_info_.push_back(
- DeoptimizationInfo(zone_, assembler->pc_offset(), kind));
+ DeoptimizationInfo(zone_, assembler->pc_offset()));
if (deopt_mode == Safepoint::kNoLazyDeopt) {
last_lazy_safepoint_ = deoptimization_info_.size();
}
DeoptimizationInfo& new_info = deoptimization_info_.back();
- return Safepoint(new_info.indexes, new_info.registers);
+ return Safepoint(new_info.indexes);
}
void SafepointTableBuilder::RecordLazyDeoptimizationIndex(int index) {
@@ -172,9 +131,6 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
assembler->RecordComment(";;; Safepoint table.");
offset_ = assembler->pc_offset();
- // Take the register bits into account.
- bits_per_entry += kNumSafepointRegisters;
-
// Compute the number of bytes per safepoint entry.
int bytes_per_entry =
RoundUp(bits_per_entry, kBitsPerByte) >> kBitsPerByteLog2;
@@ -195,7 +151,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
STATIC_ASSERT(SafepointTable::kFixedEntrySize == 3 * kIntSize);
for (const DeoptimizationInfo& info : deoptimization_info_) {
assembler->dd(info.pc);
- assembler->dd(EncodeExceptPC(info));
+ assembler->dd(info.deopt_index);
assembler->dd(info.trampoline);
}
@@ -203,25 +159,8 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
ZoneVector<uint8_t> bits(bytes_per_entry, 0, zone_);
for (const DeoptimizationInfo& info : deoptimization_info_) {
ZoneChunkList<int>* indexes = info.indexes;
- ZoneChunkList<int>* registers = info.registers;
std::fill(bits.begin(), bits.end(), 0);
- // Run through the registers (if any).
- DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte));
- if (registers == nullptr) {
- const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
- for (int j = 0; j < num_reg_bytes; j++) {
- bits[j] = SafepointTable::kNoRegisters;
- }
- } else {
- for (int index : *registers) {
- DCHECK(index >= 0 && index < kNumSafepointRegisters);
- int byte_index = index >> kBitsPerByteLog2;
- int bit_index = index & (kBitsPerByte - 1);
- bits[byte_index] |= (1 << bit_index);
- }
- }
-
// Run through the indexes and build a bitmap.
for (int idx : *indexes) {
int index = bits_per_entry - 1 - idx;
@@ -238,11 +177,6 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
emitted_ = true;
}
-uint32_t SafepointTableBuilder::EncodeExceptPC(const DeoptimizationInfo& info) {
- return SafepointEntry::DeoptimizationIndexField::encode(info.deopt_index) |
- SafepointEntry::SaveDoublesField::encode(info.has_doubles);
-}
-
void SafepointTableBuilder::RemoveDuplicates() {
// If the table contains more than one entry, and all entries are identical
// (except for the pc), replace the whole table by a single entry with pc =
@@ -266,7 +200,6 @@ void SafepointTableBuilder::RemoveDuplicates() {
bool SafepointTableBuilder::IsIdenticalExceptForPc(
const DeoptimizationInfo& info1, const DeoptimizationInfo& info2) const {
- if (info1.has_doubles != info2.has_doubles) return false;
if (info1.deopt_index != info2.deopt_index) return false;
ZoneChunkList<int>* indexes1 = info1.indexes;
@@ -276,19 +209,6 @@ bool SafepointTableBuilder::IsIdenticalExceptForPc(
return false;
}
- ZoneChunkList<int>* registers1 = info1.registers;
- ZoneChunkList<int>* registers2 = info2.registers;
- if (registers1) {
- if (!registers2) return false;
- if (registers1->size() != registers2->size()) return false;
- if (!std::equal(registers1->begin(), registers1->end(),
- registers2->begin())) {
- return false;
- }
- } else if (registers2) {
- return false;
- }
-
return true;
}
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/codegen/safepoint-table.h
index 462718a224..066f0123fc 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/codegen/safepoint-table.h
@@ -2,65 +2,51 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SAFEPOINT_TABLE_H_
-#define V8_SAFEPOINT_TABLE_H_
+#ifndef V8_CODEGEN_SAFEPOINT_TABLE_H_
+#define V8_CODEGEN_SAFEPOINT_TABLE_H_
-#include "src/allocation.h"
-#include "src/assert-scope.h"
-#include "src/utils.h"
-#include "src/v8memory.h"
+#include "src/common/assert-scope.h"
+#include "src/common/v8memory.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
#include "src/zone/zone-chunk-list.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
-class Register;
-
class SafepointEntry {
public:
- SafepointEntry() : info_(0), bits_(nullptr), trampoline_pc_(-1) {}
+ SafepointEntry() : deopt_index_(0), bits_(nullptr), trampoline_pc_(-1) {}
- SafepointEntry(unsigned info, uint8_t* bits, int trampoline_pc)
- : info_(info), bits_(bits), trampoline_pc_(trampoline_pc) {
+ SafepointEntry(unsigned deopt_index, uint8_t* bits, int trampoline_pc)
+ : deopt_index_(deopt_index), bits_(bits), trampoline_pc_(trampoline_pc) {
DCHECK(is_valid());
}
bool is_valid() const { return bits_ != nullptr; }
bool Equals(const SafepointEntry& other) const {
- return info_ == other.info_ && bits_ == other.bits_;
+ return deopt_index_ == other.deopt_index_ && bits_ == other.bits_;
}
void Reset() {
- info_ = 0;
+ deopt_index_ = 0;
bits_ = nullptr;
}
int trampoline_pc() { return trampoline_pc_; }
- static const int kSaveDoublesFieldBits = 1;
- static const int kDeoptIndexBits = 32 - kSaveDoublesFieldBits;
-
- class DeoptimizationIndexField : public BitField<int, 0, kDeoptIndexBits> {};
- class SaveDoublesField
- : public BitField<bool, DeoptimizationIndexField::kNext,
- kSaveDoublesFieldBits> {};
+ static const unsigned kNoDeoptIndex = kMaxUInt32;
int deoptimization_index() const {
DCHECK(is_valid() && has_deoptimization_index());
- return DeoptimizationIndexField::decode(info_);
+ return deopt_index_;
}
bool has_deoptimization_index() const {
DCHECK(is_valid());
- return DeoptimizationIndexField::decode(info_) !=
- DeoptimizationIndexField::kMax;
- }
-
- bool has_doubles() const {
- DCHECK(is_valid());
- return SaveDoublesField::decode(info_);
+ return deopt_index_ != kNoDeoptIndex;
}
uint8_t* bits() {
@@ -68,11 +54,8 @@ class SafepointEntry {
return bits_;
}
- bool HasRegisters() const;
- bool HasRegisterAt(int reg_index) const;
-
private:
- unsigned info_;
+ unsigned deopt_index_;
uint8_t* bits_;
// It needs to be an integer as it is -1 for eager deoptimizations.
int trampoline_pc_;
@@ -105,11 +88,11 @@ class SafepointTable {
SafepointEntry GetEntry(unsigned index) const {
DCHECK(index < length_);
- unsigned info = Memory<uint32_t>(GetEncodedInfoLocation(index));
+ unsigned deopt_index = Memory<uint32_t>(GetEncodedInfoLocation(index));
uint8_t* bits = &Memory<uint8_t>(entries_ + (index * entry_size_));
int trampoline_pc =
has_deopt_ ? Memory<int>(GetTrampolineLocation(index)) : -1;
- return SafepointEntry(info, bits, trampoline_pc);
+ return SafepointEntry(deopt_index, bits, trampoline_pc);
}
// Returns the entry for the given pc.
@@ -162,29 +145,15 @@ class SafepointTable {
class Safepoint {
public:
- typedef enum {
- kSimple = 0,
- kWithRegisters = 1 << 0,
- kWithDoubles = 1 << 1,
- kWithRegistersAndDoubles = kWithRegisters | kWithDoubles
- } Kind;
-
- enum DeoptMode {
- kNoLazyDeopt,
- kLazyDeopt
- };
+ enum DeoptMode { kNoLazyDeopt, kLazyDeopt };
- static const int kNoDeoptimizationIndex =
- SafepointEntry::DeoptimizationIndexField::kMax;
+ static const int kNoDeoptimizationIndex = SafepointEntry::kNoDeoptIndex;
void DefinePointerSlot(int index) { indexes_->push_back(index); }
- void DefinePointerRegister(Register reg);
private:
- Safepoint(ZoneChunkList<int>* indexes, ZoneChunkList<int>* registers)
- : indexes_(indexes), registers_(registers) {}
+ explicit Safepoint(ZoneChunkList<int>* indexes) : indexes_(indexes) {}
ZoneChunkList<int>* const indexes_;
- ZoneChunkList<int>* const registers_;
friend class SafepointTableBuilder;
};
@@ -201,9 +170,7 @@ class SafepointTableBuilder {
unsigned GetCodeOffset() const;
// Define a new safepoint for the current position in the body.
- Safepoint DefineSafepoint(Assembler* assembler,
- Safepoint::Kind kind,
- Safepoint::DeoptMode mode);
+ Safepoint DefineSafepoint(Assembler* assembler, Safepoint::DeoptMode mode);
// Record deoptimization index for lazy deoptimization for the last
// outstanding safepoints.
@@ -226,26 +193,16 @@ class SafepointTableBuilder {
struct DeoptimizationInfo {
unsigned pc;
unsigned deopt_index;
- bool has_doubles;
int trampoline;
ZoneChunkList<int>* indexes;
- ZoneChunkList<int>* registers;
- DeoptimizationInfo(Zone* zone, unsigned pc, Safepoint::Kind kind)
+ DeoptimizationInfo(Zone* zone, unsigned pc)
: pc(pc),
deopt_index(Safepoint::kNoDeoptimizationIndex),
- has_doubles(kind & Safepoint::kWithDoubles),
trampoline(-1),
indexes(new (zone) ZoneChunkList<int>(
- zone, ZoneChunkList<int>::StartMode::kSmall)),
- registers(kind & Safepoint::kWithRegisters
- ? new (zone) ZoneChunkList<int>(
- zone, ZoneChunkList<int>::StartMode::kSmall)
- : nullptr) {}
+ zone, ZoneChunkList<int>::StartMode::kSmall)) {}
};
- // Encodes all fields of a {DeoptimizationInfo} except {pc} and {trampoline}.
- uint32_t EncodeExceptPC(const DeoptimizationInfo&);
-
// Compares all fields of a {DeoptimizationInfo} except {pc} and {trampoline}.
bool IsIdenticalExceptForPc(const DeoptimizationInfo&,
const DeoptimizationInfo&) const;
@@ -267,4 +224,4 @@ class SafepointTableBuilder {
} // namespace internal
} // namespace v8
-#endif // V8_SAFEPOINT_TABLE_H_
+#endif // V8_CODEGEN_SAFEPOINT_TABLE_H_
diff --git a/deps/v8/src/signature.h b/deps/v8/src/codegen/signature.h
index 6890699ab4..c05c440530 100644
--- a/deps/v8/src/signature.h
+++ b/deps/v8/src/codegen/signature.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SIGNATURE_H_
-#define V8_SIGNATURE_H_
+#ifndef V8_CODEGEN_SIGNATURE_H_
+#define V8_CODEGEN_SIGNATURE_H_
#include "src/base/functional.h"
#include "src/base/iterator.h"
-#include "src/machine-type.h"
+#include "src/codegen/machine-type.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -105,7 +105,7 @@ class Signature : public ZoneObject {
const T* reps_;
};
-typedef Signature<MachineType> MachineSignature;
+using MachineSignature = Signature<MachineType>;
template <typename T>
size_t hash_value(const Signature<T>& sig) {
@@ -117,4 +117,4 @@ size_t hash_value(const Signature<T>& sig) {
} // namespace internal
} // namespace v8
-#endif // V8_SIGNATURE_H_
+#endif // V8_CODEGEN_SIGNATURE_H_
diff --git a/deps/v8/src/source-position-table.cc b/deps/v8/src/codegen/source-position-table.cc
index b27f08ac4e..6c0aa36b27 100644
--- a/deps/v8/src/source-position-table.cc
+++ b/deps/v8/src/codegen/source-position-table.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/source-position-table.h"
+#include "src/codegen/source-position-table.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -48,7 +48,7 @@ void SubtractFromEntry(PositionTableEntry& value,
// Helper: Encode an integer.
template <typename T>
void EncodeInt(std::vector<byte>& bytes, T value) {
- typedef typename std::make_unsigned<T>::type unsigned_type;
+ using unsigned_type = typename std::make_unsigned<T>::type;
// Zig-zag encoding.
static const int kShift = sizeof(T) * kBitsPerByte - 1;
value = ((static_cast<unsigned_type>(value) << 1) ^ (value >> kShift));
@@ -108,8 +108,8 @@ void DecodeEntry(Vector<const byte> bytes, int* index,
}
Vector<const byte> VectorFromByteArray(ByteArray byte_array) {
- return Vector<const byte>(byte_array->GetDataStartAddress(),
- byte_array->length());
+ return Vector<const byte>(byte_array.GetDataStartAddress(),
+ byte_array.length());
}
#ifdef ENABLE_SLOW_DCHECKS
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/codegen/source-position-table.h
index 772f163b4a..e4e506a114 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/codegen/source-position-table.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SOURCE_POSITION_TABLE_H_
-#define V8_SOURCE_POSITION_TABLE_H_
+#ifndef V8_CODEGEN_SOURCE_POSITION_TABLE_H_
+#define V8_CODEGEN_SOURCE_POSITION_TABLE_H_
-#include "src/assert-scope.h"
-#include "src/checks.h"
-#include "src/globals.h"
-#include "src/source-position.h"
+#include "src/codegen/source-position.h"
+#include "src/common/assert-scope.h"
+#include "src/common/checks.h"
+#include "src/common/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -135,4 +135,4 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
} // namespace internal
} // namespace v8
-#endif // V8_SOURCE_POSITION_TABLE_H_
+#endif // V8_CODEGEN_SOURCE_POSITION_TABLE_H_
diff --git a/deps/v8/src/source-position.cc b/deps/v8/src/codegen/source-position.cc
index 3d7ac98462..fa24127682 100644
--- a/deps/v8/src/source-position.cc
+++ b/deps/v8/src/codegen/source-position.cc
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/source-position.h"
-#include "src/objects-inl.h"
-#include "src/optimized-compilation-info.h"
+#include "src/codegen/source-position.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
std::ostream& operator<<(std::ostream& out, const SourcePositionInfo& pos) {
out << "<";
- if (!pos.script.is_null() && pos.script->name()->IsString()) {
- out << String::cast(pos.script->name())->ToCString(DISALLOW_NULLS).get();
+ if (!pos.script.is_null() && pos.script->name().IsString()) {
+ out << String::cast(pos.script->name()).ToCString(DISALLOW_NULLS).get();
} else {
out << "unknown";
}
@@ -68,7 +68,7 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
std::vector<SourcePositionInfo> stack;
while (pos.isInlined()) {
InliningPosition inl =
- deopt_data->InliningPositions()->get(pos.InliningId());
+ deopt_data->InliningPositions().get(pos.InliningId());
Handle<SharedFunctionInfo> function(
deopt_data->GetInlinedFunction(inl.inlined_function_id), isolate);
stack.push_back(SourcePositionInfo(pos, function));
@@ -84,15 +84,15 @@ void SourcePosition::Print(std::ostream& out,
SharedFunctionInfo function) const {
Script::PositionInfo pos;
Object source_name;
- if (function->script()->IsScript()) {
- Script script = Script::cast(function->script());
- source_name = script->name();
- script->GetPositionInfo(ScriptOffset(), &pos, Script::WITH_OFFSET);
+ if (function.script().IsScript()) {
+ Script script = Script::cast(function.script());
+ source_name = script.name();
+ script.GetPositionInfo(ScriptOffset(), &pos, Script::WITH_OFFSET);
}
out << "<";
- if (source_name->IsString()) {
+ if (source_name.IsString()) {
out << String::cast(source_name)
- ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
+ .ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
.get();
} else {
out << "unknown";
@@ -113,18 +113,18 @@ void SourcePosition::PrintJson(std::ostream& out) const {
void SourcePosition::Print(std::ostream& out, Code code) const {
DeoptimizationData deopt_data =
- DeoptimizationData::cast(code->deoptimization_data());
+ DeoptimizationData::cast(code.deoptimization_data());
if (!isInlined()) {
SharedFunctionInfo function(
- SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
+ SharedFunctionInfo::cast(deopt_data.SharedFunctionInfo()));
Print(out, function);
} else {
- InliningPosition inl = deopt_data->InliningPositions()->get(InliningId());
+ InliningPosition inl = deopt_data.InliningPositions().get(InliningId());
if (inl.inlined_function_id == -1) {
out << *this;
} else {
SharedFunctionInfo function =
- deopt_data->GetInlinedFunction(inl.inlined_function_id);
+ deopt_data.GetInlinedFunction(inl.inlined_function_id);
Print(out, function);
}
out << " inlined at ";
@@ -136,7 +136,7 @@ SourcePositionInfo::SourcePositionInfo(SourcePosition pos,
Handle<SharedFunctionInfo> f)
: position(pos),
shared(f),
- script(f.is_null() || !f->script()->IsScript()
+ script(f.is_null() || !f->script().IsScript()
? Handle<Script>::null()
: handle(Script::cast(f->script()), f->GetIsolate())) {
if (!script.is_null()) {
diff --git a/deps/v8/src/source-position.h b/deps/v8/src/codegen/source-position.h
index e62c7e5ddf..ad0132b827 100644
--- a/deps/v8/src/source-position.h
+++ b/deps/v8/src/codegen/source-position.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SOURCE_POSITION_H_
-#define V8_SOURCE_POSITION_H_
+#ifndef V8_CODEGEN_SOURCE_POSITION_H_
+#define V8_CODEGEN_SOURCE_POSITION_H_
#include <ostream>
-#include "src/flags.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/utils.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/handles/handles.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -142,18 +142,18 @@ class SourcePosition final {
void Print(std::ostream& out, SharedFunctionInfo function) const;
- typedef BitField64<bool, 0, 1> IsExternalField;
+ using IsExternalField = BitField64<bool, 0, 1>;
// The two below are only used if IsExternal() is true.
- typedef BitField64<int, 1, 20> ExternalLineField;
- typedef BitField64<int, 21, 10> ExternalFileIdField;
+ using ExternalLineField = BitField64<int, 1, 20>;
+ using ExternalFileIdField = BitField64<int, 21, 10>;
// ScriptOffsetField is only used if IsExternal() is false.
- typedef BitField64<int, 1, 30> ScriptOffsetField;
+ using ScriptOffsetField = BitField64<int, 1, 30>;
// InliningId is in the high bits for better compression in
// SourcePositionTable.
- typedef BitField64<int, 31, 16> InliningIdField;
+ using InliningIdField = BitField64<int, 31, 16>;
// Leaving the highest bit untouched to allow for signed conversion.
uint64_t value_;
@@ -194,4 +194,4 @@ std::ostream& operator<<(std::ostream& out,
} // namespace internal
} // namespace v8
-#endif // V8_SOURCE_POSITION_H_
+#endif // V8_CODEGEN_SOURCE_POSITION_H_
diff --git a/deps/v8/src/string-constants.cc b/deps/v8/src/codegen/string-constants.cc
index 26a5d2045f..2553789d7c 100644
--- a/deps/v8/src/string-constants.cc
+++ b/deps/v8/src/codegen/string-constants.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/string-constants.h"
+#include "src/codegen/string-constants.h"
#include "src/base/functional.h"
-#include "src/dtoa.h"
-#include "src/objects.h"
+#include "src/numbers/dtoa.h"
+#include "src/objects/objects.h"
#include "src/objects/string-inl.h"
namespace v8 {
diff --git a/deps/v8/src/string-constants.h b/deps/v8/src/codegen/string-constants.h
index 301a9bdd0b..8043c605c3 100644
--- a/deps/v8/src/string-constants.h
+++ b/deps/v8/src/codegen/string-constants.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STRING_CONSTANTS_H_
-#define V8_STRING_CONSTANTS_H_
+#ifndef V8_CODEGEN_STRING_CONSTANTS_H_
+#define V8_CODEGEN_STRING_CONSTANTS_H_
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/objects/string.h"
#include "src/zone/zone.h"
@@ -113,4 +113,4 @@ std::ostream& operator<<(std::ostream& os, StringCons const& parameters);
} // namespace internal
} // namespace v8
-#endif // V8_STRING_CONSTANTS_H_
+#endif // V8_CODEGEN_STRING_CONSTANTS_H_
diff --git a/deps/v8/src/turbo-assembler.cc b/deps/v8/src/codegen/turbo-assembler.cc
index 0a95775c2d..f46ab0ade5 100644
--- a/deps/v8/src/turbo-assembler.cc
+++ b/deps/v8/src/codegen/turbo-assembler.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/turbo-assembler.h"
+#include "src/codegen/turbo-assembler.h"
#include "src/builtins/builtins.h"
#include "src/builtins/constants-table-builder.h"
-#include "src/isolate-data.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-data.h"
+#include "src/execution/isolate-inl.h"
#include "src/snapshot/serializer-common.h"
namespace v8 {
diff --git a/deps/v8/src/turbo-assembler.h b/deps/v8/src/codegen/turbo-assembler.h
index 5ecd41c758..afdef22fe7 100644
--- a/deps/v8/src/turbo-assembler.h
+++ b/deps/v8/src/codegen/turbo-assembler.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TURBO_ASSEMBLER_H_
-#define V8_TURBO_ASSEMBLER_H_
+#ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_
+#define V8_CODEGEN_TURBO_ASSEMBLER_H_
-#include "src/assembler-arch.h"
#include "src/base/template-utils.h"
#include "src/builtins/builtins.h"
-#include "src/roots.h"
+#include "src/codegen/assembler-arch.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
@@ -19,11 +19,6 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
public:
// Constructors are declared public to inherit them in derived classes
// with `using` directive.
- TurboAssemblerBase(const AssemblerOptions& options,
- std::unique_ptr<AssemblerBuffer> buffer = {})
- : TurboAssemblerBase(nullptr, options.EnableV8AgnosticCode(),
- CodeObjectRequired::kNo, std::move(buffer)) {}
-
TurboAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object,
std::unique_ptr<AssemblerBuffer> buffer = {})
: TurboAssemblerBase(isolate, AssemblerOptions::Default(isolate),
@@ -34,7 +29,6 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
std::unique_ptr<AssemblerBuffer> buffer = {});
Isolate* isolate() const {
- DCHECK(!options().v8_agnostic_code);
return isolate_;
}
@@ -104,6 +98,12 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static bool IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference);
+#if V8_OS_WIN
+ // Minimum page size. We must touch memory once per page when expanding the
+ // stack, to avoid access violations.
+ static constexpr int kStackPageSize = 4 * KB;
+#endif
+
protected:
void RecordCommentForOffHeapTrampoline(int builtin_index);
@@ -169,4 +169,4 @@ inline bool AreAliased(RegType first_reg, RegTypes... regs) {
} // namespace internal
} // namespace v8
-#endif // V8_TURBO_ASSEMBLER_H_
+#endif // V8_CODEGEN_TURBO_ASSEMBLER_H_
diff --git a/deps/v8/src/unoptimized-compilation-info.cc b/deps/v8/src/codegen/unoptimized-compilation-info.cc
index 12ce9bfa0a..f46e9cda21 100644
--- a/deps/v8/src/unoptimized-compilation-info.cc
+++ b/deps/v8/src/codegen/unoptimized-compilation-info.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/unoptimized-compilation-info.h"
+#include "src/codegen/unoptimized-compilation-info.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/codegen/source-position.h"
#include "src/debug/debug.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
-#include "src/source-position.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/unoptimized-compilation-info.h b/deps/v8/src/codegen/unoptimized-compilation-info.h
index ddca691940..fe8dbe66c6 100644
--- a/deps/v8/src/unoptimized-compilation-info.h
+++ b/deps/v8/src/codegen/unoptimized-compilation-info.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UNOPTIMIZED_COMPILATION_INFO_H_
-#define V8_UNOPTIMIZED_COMPILATION_INFO_H_
+#ifndef V8_CODEGEN_UNOPTIMIZED_COMPILATION_INFO_H_
+#define V8_CODEGEN_UNOPTIMIZED_COMPILATION_INFO_H_
#include <memory>
-#include "src/feedback-vector.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/objects.h"
-#include "src/source-position-table.h"
-#include "src/utils.h"
+#include "src/codegen/source-position-table.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/objects.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -139,4 +139,4 @@ class V8_EXPORT_PRIVATE UnoptimizedCompilationInfo final {
} // namespace internal
} // namespace v8
-#endif // V8_UNOPTIMIZED_COMPILATION_INFO_H_
+#endif // V8_CODEGEN_UNOPTIMIZED_COMPILATION_INFO_H_
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 7b389c2456..67cf648c04 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_ASSEMBLER_X64_INL_H_
-#define V8_X64_ASSEMBLER_X64_INL_H_
+#ifndef V8_CODEGEN_X64_ASSEMBLER_X64_INL_H_
+#define V8_CODEGEN_X64_ASSEMBLER_X64_INL_H_
-#include "src/x64/assembler-x64.h"
+#include "src/codegen/x64/assembler-x64.h"
#include "src/base/cpu.h"
+#include "src/common/v8memory.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
-#include "src/v8memory.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -22,7 +22,6 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(SSE4_1); }
// -----------------------------------------------------------------------------
// Implementation of Assembler
-
void Assembler::emitl(uint32_t x) {
WriteUnalignedValue(reinterpret_cast<Address>(pc_), x);
pc_ += sizeof(uint32_t);
@@ -82,7 +81,6 @@ void Assembler::emit_rex_64(XMMRegister reg, Operand op) {
emit(0x48 | (reg.code() & 0x8) >> 1 | op.data().rex);
}
-
void Assembler::emit_rex_64(Register rm_reg) {
DCHECK_EQ(rm_reg.code() & 0xf, rm_reg.code());
emit(0x48 | rm_reg.high_bit());
@@ -98,10 +96,7 @@ void Assembler::emit_rex_32(Register reg, Operand op) {
emit(0x40 | reg.high_bit() << 2 | op.data().rex);
}
-
-void Assembler::emit_rex_32(Register rm_reg) {
- emit(0x40 | rm_reg.high_bit());
-}
+void Assembler::emit_rex_32(Register rm_reg) { emit(0x40 | rm_reg.high_bit()); }
void Assembler::emit_rex_32(Operand op) { emit(0x40 | op.data().rex); }
@@ -120,25 +115,21 @@ void Assembler::emit_optional_rex_32(XMMRegister reg, Operand op) {
if (rex_bits != 0) emit(0x40 | rex_bits);
}
-
void Assembler::emit_optional_rex_32(XMMRegister reg, XMMRegister base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+ byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
-
void Assembler::emit_optional_rex_32(XMMRegister reg, Register base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+ byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
-
void Assembler::emit_optional_rex_32(Register reg, XMMRegister base) {
- byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
+ byte rex_bits = (reg.code() & 0x8) >> 1 | (base.code() & 0x8) >> 3;
if (rex_bits != 0) emit(0x40 | rex_bits);
}
-
void Assembler::emit_optional_rex_32(Register rm_reg) {
if (rm_reg.high_bit()) emit(0x41);
}
@@ -151,7 +142,6 @@ void Assembler::emit_optional_rex_32(Operand op) {
if (op.data().rex != 0) emit(0x40 | op.data().rex);
}
-
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
LeadingOpcode m) {
@@ -159,14 +149,12 @@ void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
emit(rxb | m);
}
-
// byte 1 of 3-byte VEX
void Assembler::emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m) {
byte rxb = static_cast<byte>(~((reg.high_bit() << 2) | rm.data().rex)) << 5;
emit(rxb | m);
}
-
// byte 1 of 2-byte VEX
void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
SIMDPrefix pp) {
@@ -174,14 +162,12 @@ void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
emit(rv | l | pp);
}
-
// byte 2 of 3-byte VEX
void Assembler::emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
SIMDPrefix pp) {
emit(w | ((~v.code() & 0xf) << 3) | l | pp);
}
-
void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
XMMRegister rm, VectorLength l, SIMDPrefix pp,
LeadingOpcode mm, VexW w) {
@@ -195,7 +181,6 @@ void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
}
}
-
void Assembler::emit_vex_prefix(Register reg, Register vreg, Register rm,
VectorLength l, SIMDPrefix pp, LeadingOpcode mm,
VexW w) {
@@ -226,7 +211,6 @@ void Assembler::emit_vex_prefix(Register reg, Register vreg, Operand rm,
emit_vex_prefix(ireg, ivreg, rm, l, pp, mm, w);
}
-
Address Assembler::target_address_at(Address pc, Address constant_pool) {
return ReadUnalignedValue<int32_t>(pc) + pc + 4;
}
@@ -245,15 +229,10 @@ void Assembler::deserialization_set_target_internal_reference_at(
WriteUnalignedValue(pc, target);
}
-
-Address Assembler::target_address_from_return_address(Address pc) {
- return pc - kCallTargetAddressOffset;
-}
-
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
- !code.is_null() ? code->constant_pool() : kNullAddress,
+ !code.is_null() ? code.constant_pool() : kNullAddress,
target);
}
@@ -266,6 +245,10 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
return GetCodeTarget(ReadUnalignedValue<int32_t>(pc));
}
+Handle<HeapObject> Assembler::compressed_embedded_object_handle_at(Address pc) {
+ return GetCompressedEmbeddedObject(ReadUnalignedValue<int32_t>(pc));
+}
+
Address Assembler::runtime_entry_at(Address pc) {
return ReadUnalignedValue<int32_t>(pc) + options().code_range_start;
}
@@ -284,7 +267,6 @@ void RelocInfo::apply(intptr_t delta) {
}
}
-
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
@@ -292,36 +274,54 @@ Address RelocInfo::target_address() {
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
- IsWasmStubCall(rmode_) || IsEmbeddedObject(rmode_) ||
- IsExternalReference(rmode_) || IsOffHeapTarget(rmode_));
+ IsWasmStubCall(rmode_) || IsFullEmbeddedObject(rmode_) ||
+ IsCompressedEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsOffHeapTarget(rmode_));
return pc_;
}
-
-Address RelocInfo::constant_pool_entry_address() {
- UNREACHABLE();
-}
-
+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
} else {
- return kSystemPointerSize;
+ return IsCompressedEmbeddedObject(rmode_) ? kTaggedSize
+ : kSystemPointerSize;
}
}
HeapObject RelocInfo::target_object() {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ CHECK(!host_.is_null());
+ Object o = static_cast<Object>(DecompressTaggedPointer(
+ host_.ptr(), ReadUnalignedValue<Tagged_t>(pc_)));
+ return HeapObject::cast(o);
+ }
+ return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
+}
+
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
+ DCHECK(!HAS_SMI_TAG(compressed));
+ Object obj(DecompressTaggedPointer(isolate, compressed));
+ return HeapObject::cast(obj);
+ }
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- if (rmode_ == EMBEDDED_OBJECT) {
- return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
- } else {
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCodeTarget(rmode_)) {
return origin->code_target_object_handle_at(pc_);
+ } else {
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ return origin->compressed_embedded_object_handle_at(pc_);
+ }
+ return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
}
}
@@ -344,7 +344,6 @@ Address RelocInfo::target_internal_reference() {
return ReadUnalignedValue<Address>(pc_);
}
-
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return pc_;
@@ -353,8 +352,14 @@ Address RelocInfo::target_internal_reference_address() {
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
- DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- WriteUnalignedValue(pc_, target->ptr());
+ DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
+ if (IsCompressedEmbeddedObject(rmode_)) {
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ Tagged_t tagged = CompressTagged(target.ptr());
+ WriteUnalignedValue(pc_, tagged);
+ } else {
+ WriteUnalignedValue(pc_, target.ptr());
+ }
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc_, sizeof(Address));
}
@@ -383,9 +388,12 @@ Address RelocInfo::target_off_heap_target() {
}
void RelocInfo::WipeOut() {
- if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ if (IsFullEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsOffHeapTarget(rmode_)) {
WriteUnalignedValue(pc_, kNullAddress);
+ } else if (IsCompressedEmbeddedObject(rmode_)) {
+ Address smi_address = Smi::FromInt(0).ptr();
+ WriteUnalignedValue(pc_, CompressTagged(smi_address));
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
Assembler::set_target_address_at(pc_, constant_pool_,
@@ -398,4 +406,4 @@ void RelocInfo::WipeOut() {
} // namespace internal
} // namespace v8
-#endif // V8_X64_ASSEMBLER_X64_INL_H_
+#endif // V8_CODEGEN_X64_ASSEMBLER_X64_INL_H_
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 00eaf6b259..3236b0f52c 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/x64/assembler-x64.h"
+#include "src/codegen/x64/assembler-x64.h"
#include <cstring>
@@ -15,13 +15,13 @@
#include <sys/sysctl.h>
#endif
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/string-constants.h"
-#include "src/v8.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/init/v8.h"
namespace v8 {
namespace internal {
@@ -70,7 +70,6 @@ bool OSHasAVXSupport() {
} // namespace
-
void CpuFeatures::ProbeImpl(bool cross_compile) {
base::CPU cpu;
CHECK(cpu.has_sse2()); // SSE2 support is mandatory.
@@ -106,8 +105,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
}
}
-
-void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintTarget() {}
void CpuFeatures::PrintFeatures() {
printf(
"SSE3=%d SSSE3=%d SSE4_1=%d SAHF=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d "
@@ -515,12 +513,10 @@ void Assembler::Align(int m) {
Nop(delta);
}
-
void Assembler::CodeTargetAlign() {
Align(16); // Preferred alignment of jump targets on x64.
}
-
bool Assembler::IsNop(Address addr) {
byte* a = reinterpret_cast<byte*>(addr);
while (*a == 0x66) a++;
@@ -529,9 +525,8 @@ bool Assembler::IsNop(Address addr) {
return false;
}
-
void Assembler::bind_to(Label* L, int pos) {
- DCHECK(!L->is_bound()); // Label may only be bound once.
+ DCHECK(!L->is_bound()); // Label may only be bound once.
DCHECK(0 <= pos && pos <= pc_offset()); // Position must be valid.
if (L->is_linked()) {
int current = L->pos();
@@ -594,10 +589,7 @@ void Assembler::bind_to(Label* L, int pos) {
L->bind_to(pos);
}
-
-void Assembler::bind(Label* L) {
- bind_to(L, pc_offset());
-}
+void Assembler::bind(Label* L) { bind_to(L, pc_offset()); }
void Assembler::record_farjmp_position(Label* L, int pos) {
auto& pos_vector = label_farjmp_maps_[L];
@@ -692,7 +684,6 @@ void Assembler::emit_operand(int code, Operand adr) {
}
}
-
// Assembler Instruction implementations.
void Assembler::arithmetic_op(byte opcode, Register reg, Operand op, int size) {
@@ -702,14 +693,11 @@ void Assembler::arithmetic_op(byte opcode, Register reg, Operand op, int size) {
emit_operand(reg, op);
}
-
-void Assembler::arithmetic_op(byte opcode,
- Register reg,
- Register rm_reg,
+void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg,
int size) {
EnsureSpace ensure_space(this);
DCHECK_EQ(opcode & 0xC6, 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
+ if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
emit_rex(rm_reg, reg, size);
emit(opcode ^ 0x02);
@@ -721,7 +709,6 @@ void Assembler::arithmetic_op(byte opcode,
}
}
-
void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
DCHECK_EQ(opcode & 0xC6, 2);
@@ -758,11 +745,10 @@ void Assembler::arithmetic_op_8(byte opcode, Register reg, Operand op) {
emit_operand(reg, op);
}
-
void Assembler::arithmetic_op_8(byte opcode, Register reg, Register rm_reg) {
EnsureSpace ensure_space(this);
DCHECK_EQ(opcode & 0xC6, 2);
- if (rm_reg.low_bits() == 4) { // Forces SIB byte.
+ if (rm_reg.low_bits() == 4) { // Forces SIB byte.
// Swap reg and rm_reg and change opcode operand order.
if (!rm_reg.is_byte_register() || !reg.is_byte_register()) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -780,11 +766,8 @@ void Assembler::arithmetic_op_8(byte opcode, Register reg, Register rm_reg) {
}
}
-
-void Assembler::immediate_arithmetic_op(byte subcode,
- Register dst,
- Immediate src,
- int size) {
+void Assembler::immediate_arithmetic_op(byte subcode, Register dst,
+ Immediate src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
@@ -816,9 +799,7 @@ void Assembler::immediate_arithmetic_op(byte subcode, Operand dst,
}
}
-
-void Assembler::immediate_arithmetic_op_16(byte subcode,
- Register dst,
+void Assembler::immediate_arithmetic_op_16(byte subcode, Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
emit(0x66); // Operand size override prefix.
@@ -863,9 +844,7 @@ void Assembler::immediate_arithmetic_op_8(byte subcode, Operand dst,
emit(src.value_);
}
-
-void Assembler::immediate_arithmetic_op_8(byte subcode,
- Register dst,
+void Assembler::immediate_arithmetic_op_8(byte subcode, Register dst,
Immediate src) {
EnsureSpace ensure_space(this);
if (!dst.is_byte_register()) {
@@ -878,10 +857,7 @@ void Assembler::immediate_arithmetic_op_8(byte subcode,
emit(src.value_);
}
-
-void Assembler::shift(Register dst,
- Immediate shift_amount,
- int subcode,
+void Assembler::shift(Register dst, Immediate shift_amount, int subcode,
int size) {
EnsureSpace ensure_space(this);
DCHECK(size == kInt64Size ? is_uint6(shift_amount.value_)
@@ -898,7 +874,6 @@ void Assembler::shift(Register dst,
}
}
-
void Assembler::shift(Operand dst, Immediate shift_amount, int subcode,
int size) {
EnsureSpace ensure_space(this);
@@ -916,7 +891,6 @@ void Assembler::shift(Operand dst, Immediate shift_amount, int subcode,
}
}
-
void Assembler::shift(Register dst, int subcode, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
@@ -924,7 +898,6 @@ void Assembler::shift(Register dst, int subcode, int size) {
emit_modrm(subcode, dst);
}
-
void Assembler::shift(Operand dst, int subcode, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
@@ -996,7 +969,6 @@ void Assembler::bsrl(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::bsrq(Register dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
@@ -1013,7 +985,6 @@ void Assembler::bsrq(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::bsfl(Register dst, Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -1030,7 +1001,6 @@ void Assembler::bsfl(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::bsfq(Register dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(dst, src);
@@ -1104,7 +1074,6 @@ void Assembler::call(Label* L) {
}
}
-
void Assembler::call(Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
EnsureSpace ensure_space(this);
@@ -1155,7 +1124,6 @@ void Assembler::call(Operand op) {
emit_operand(0x2, op);
}
-
// Calls directly to the given address using a relative offset.
// Should only ever be used in Code objects for calls within the
// same Code object. Should not be used when generating new code (use labels),
@@ -1170,13 +1138,11 @@ void Assembler::call(Address target) {
emitl(static_cast<int32_t>(displacement));
}
-
void Assembler::clc() {
EnsureSpace ensure_space(this);
emit(0xF8);
}
-
void Assembler::cld() {
EnsureSpace ensure_space(this);
emit(0xFC);
@@ -1187,7 +1153,6 @@ void Assembler::cdq() {
emit(0x99);
}
-
void Assembler::cmovq(Condition cc, Register dst, Register src) {
if (cc == always) {
movq(dst, src);
@@ -1220,7 +1185,6 @@ void Assembler::cmovq(Condition cc, Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::cmovl(Condition cc, Register dst, Register src) {
if (cc == always) {
movl(dst, src);
@@ -1251,7 +1215,6 @@ void Assembler::cmovl(Condition cc, Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::cmpb_al(Immediate imm8) {
DCHECK(is_int8(imm8.value_) || is_uint8(imm8.value_));
EnsureSpace ensure_space(this);
@@ -1307,14 +1270,12 @@ void Assembler::cpuid() {
emit(0xA2);
}
-
void Assembler::cqo() {
EnsureSpace ensure_space(this);
emit_rex_64();
emit(0x99);
}
-
void Assembler::emit_dec(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
@@ -1329,7 +1290,6 @@ void Assembler::emit_dec(Operand dst, int size) {
emit_operand(1, dst);
}
-
void Assembler::decb(Register dst) {
EnsureSpace ensure_space(this);
if (!dst.is_byte_register()) {
@@ -1347,7 +1307,6 @@ void Assembler::decb(Operand dst) {
emit_operand(1, dst);
}
-
void Assembler::enter(Immediate size) {
EnsureSpace ensure_space(this);
emit(0xC8);
@@ -1355,13 +1314,11 @@ void Assembler::enter(Immediate size) {
emit(0);
}
-
void Assembler::hlt() {
EnsureSpace ensure_space(this);
emit(0xF4);
}
-
void Assembler::emit_idiv(Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, size);
@@ -1369,7 +1326,6 @@ void Assembler::emit_idiv(Register src, int size) {
emit_modrm(0x7, src);
}
-
void Assembler::emit_div(Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, size);
@@ -1377,7 +1333,6 @@ void Assembler::emit_div(Register src, int size) {
emit_modrm(0x6, src);
}
-
void Assembler::emit_imul(Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(src, size);
@@ -1392,7 +1347,6 @@ void Assembler::emit_imul(Operand src, int size) {
emit_operand(0x5, src);
}
-
void Assembler::emit_imul(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
@@ -1409,7 +1363,6 @@ void Assembler::emit_imul(Register dst, Operand src, int size) {
emit_operand(dst, src);
}
-
void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, src, size);
@@ -1438,7 +1391,6 @@ void Assembler::emit_imul(Register dst, Operand src, Immediate imm, int size) {
}
}
-
void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
@@ -1453,13 +1405,11 @@ void Assembler::emit_inc(Operand dst, int size) {
emit_operand(0, dst);
}
-
void Assembler::int3() {
EnsureSpace ensure_space(this);
emit(0xCC);
}
-
void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
if (cc == always) {
jmp(L);
@@ -1471,7 +1421,7 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
DCHECK(is_uint4(cc));
if (L->is_bound()) {
const int short_size = 2;
- const int long_size = 6;
+ const int long_size = 6;
int offs = L->pos() - pc_offset();
DCHECK_LE(offs, 0);
// Determine whether we can use 1-byte offsets for backwards branches,
@@ -1535,7 +1485,6 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
}
}
-
void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsRuntimeEntry(rmode));
EnsureSpace ensure_space(this);
@@ -1545,10 +1494,7 @@ void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
emit_runtime_entry(entry, rmode);
}
-
-void Assembler::j(Condition cc,
- Handle<Code> target,
- RelocInfo::Mode rmode) {
+void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
if (cc == always) {
jmp(target, rmode);
return;
@@ -1566,24 +1512,34 @@ void Assembler::j(Condition cc,
emitl(code_target_index);
}
-
-void Assembler::jmp(Label* L, Label::Distance distance) {
+void Assembler::jmp_rel(int offset) {
EnsureSpace ensure_space(this);
const int short_size = sizeof(int8_t);
const int long_size = sizeof(int32_t);
+ --offset; // This is how jumps are specified on x64.
+ if (is_int8(offset - short_size) && !predictable_code_size()) {
+ // 1110 1011 #8-bit disp.
+ emit(0xEB);
+ emit((offset - short_size) & 0xFF);
+ } else {
+ // 1110 1001 #32-bit disp.
+ emit(0xE9);
+ emitl(offset - long_size);
+ }
+}
+
+void Assembler::jmp(Label* L, Label::Distance distance) {
+ const int long_size = sizeof(int32_t);
+
if (L->is_bound()) {
- int offs = L->pos() - pc_offset() - 1;
- DCHECK_LE(offs, 0);
- if (is_int8(offs - short_size) && !predictable_code_size()) {
- // 1110 1011 #8-bit disp.
- emit(0xEB);
- emit((offs - short_size) & 0xFF);
- } else {
- // 1110 1001 #32-bit disp.
- emit(0xE9);
- emitl(offs - long_size);
- }
- } else if (distance == Label::kNear) {
+ int offset = L->pos() - pc_offset();
+ DCHECK_LE(offset, 0); // backward jump.
+ jmp_rel(offset);
+ return;
+ }
+
+ EnsureSpace ensure_space(this);
+ if (distance == Label::kNear) {
emit(0xEB);
byte disp = 0x00;
if (L->is_near_linked()) {
@@ -1622,7 +1578,6 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
}
}
-
void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
EnsureSpace ensure_space(this);
@@ -1633,7 +1588,6 @@ void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
emitl(code_target_index);
}
-
void Assembler::jmp(Register target) {
EnsureSpace ensure_space(this);
// Opcode FF/4 r64.
@@ -1685,7 +1639,6 @@ void Assembler::movb(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::movb(Register dst, Immediate imm) {
EnsureSpace ensure_space(this);
if (!dst.is_byte_register()) {
@@ -1749,7 +1702,6 @@ void Assembler::emit_mov(Register dst, Operand src, int size) {
emit_operand(dst, src);
}
-
void Assembler::emit_mov(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
@@ -1776,7 +1728,6 @@ void Assembler::emit_mov(Operand dst, Register src, int size) {
emit_operand(src, dst);
}
-
void Assembler::emit_mov(Register dst, Immediate value, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
@@ -1818,7 +1769,7 @@ void Assembler::movq_heap_number(Register dst, double value) {
emit_rex(dst, kInt64Size);
emit(0xB8 | dst.low_bits());
RequestHeapObject(HeapObjectRequest(value));
- emit(Immediate64(kNullAddress, RelocInfo::EMBEDDED_OBJECT));
+ emit(Immediate64(kNullAddress, RelocInfo::FULL_EMBEDDED_OBJECT));
}
void Assembler::movq_string(Register dst, const StringConstantBase* str) {
@@ -1826,7 +1777,7 @@ void Assembler::movq_string(Register dst, const StringConstantBase* str) {
emit_rex(dst, kInt64Size);
emit(0xB8 | dst.low_bits());
RequestHeapObject(HeapObjectRequest(str));
- emit(Immediate64(kNullAddress, RelocInfo::EMBEDDED_OBJECT));
+ emit(Immediate64(kNullAddress, RelocInfo::FULL_EMBEDDED_OBJECT));
}
// Loads the ip-relative location of the src label into the target location
@@ -1851,7 +1802,6 @@ void Assembler::movl(Operand dst, Label* src) {
}
}
-
void Assembler::movsxbl(Register dst, Register src) {
EnsureSpace ensure_space(this);
if (!src.is_byte_register()) {
@@ -1945,7 +1895,6 @@ void Assembler::emit_movzxb(Register dst, Operand src, int size) {
emit_operand(dst, src);
}
-
void Assembler::emit_movzxb(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
@@ -1971,7 +1920,6 @@ void Assembler::emit_movzxw(Register dst, Operand src, int size) {
emit_operand(dst, src);
}
-
void Assembler::emit_movzxw(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
@@ -1982,14 +1930,12 @@ void Assembler::emit_movzxw(Register dst, Register src, int size) {
emit_modrm(dst, src);
}
-
void Assembler::repmovsb() {
EnsureSpace ensure_space(this);
emit(0xF3);
emit(0xA4);
}
-
void Assembler::repmovsw() {
EnsureSpace ensure_space(this);
emit(0x66); // Operand size override.
@@ -1997,7 +1943,6 @@ void Assembler::repmovsw() {
emit(0xA4);
}
-
void Assembler::emit_repmovs(int size) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -2005,7 +1950,6 @@ void Assembler::emit_repmovs(int size) {
emit(0xA5);
}
-
void Assembler::mull(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
@@ -2020,7 +1964,6 @@ void Assembler::mull(Operand src) {
emit_operand(0x4, src);
}
-
void Assembler::mulq(Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src);
@@ -2028,7 +1971,6 @@ void Assembler::mulq(Register src) {
emit_modrm(0x4, src);
}
-
void Assembler::emit_neg(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
@@ -2043,13 +1985,11 @@ void Assembler::emit_neg(Operand dst, int size) {
emit_operand(3, dst);
}
-
void Assembler::nop() {
EnsureSpace ensure_space(this);
emit(0x90);
}
-
void Assembler::emit_not(Register dst, int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
@@ -2064,7 +2004,6 @@ void Assembler::emit_not(Operand dst, int size) {
emit_operand(2, dst);
}
-
void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
@@ -2146,7 +2085,6 @@ void Assembler::Nop(int n) {
}
}
-
void Assembler::popq(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
@@ -2160,13 +2098,11 @@ void Assembler::popq(Operand dst) {
emit_operand(0, dst);
}
-
void Assembler::popfq() {
EnsureSpace ensure_space(this);
emit(0x9D);
}
-
void Assembler::pushq(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
@@ -2186,7 +2122,6 @@ void Assembler::pushq(Operand src) {
emit_operand(6, src);
}
-
void Assembler::pushq(Immediate value) {
EnsureSpace ensure_space(this);
if (is_int8(value.value_)) {
@@ -2198,20 +2133,17 @@ void Assembler::pushq(Immediate value) {
}
}
-
void Assembler::pushq_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
emit(0x68);
emitl(imm32);
}
-
void Assembler::pushfq() {
EnsureSpace ensure_space(this);
emit(0x9C);
}
-
void Assembler::ret(int imm16) {
EnsureSpace ensure_space(this);
DCHECK(is_uint16(imm16));
@@ -2224,14 +2156,12 @@ void Assembler::ret(int imm16) {
}
}
-
void Assembler::ud2() {
EnsureSpace ensure_space(this);
emit(0x0F);
emit(0x0B);
}
-
void Assembler::setcc(Condition cc, Register reg) {
if (cc > last_condition) {
movb(reg, Immediate(cc == always ? 1 : 0));
@@ -2248,7 +2178,6 @@ void Assembler::setcc(Condition cc, Register reg) {
emit_modrm(0x0, reg);
}
-
void Assembler::shld(Register dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
@@ -2257,7 +2186,6 @@ void Assembler::shld(Register dst, Register src) {
emit_modrm(src, dst);
}
-
void Assembler::shrd(Register dst, Register src) {
EnsureSpace ensure_space(this);
emit_rex_64(src, dst);
@@ -2383,7 +2311,6 @@ void Assembler::emit_test(Register dst, Register src, int size) {
emit_modrm(dst, src);
}
-
void Assembler::emit_test(Register reg, Immediate mask, int size) {
if (is_uint8(mask.value_)) {
size = sizeof(int8_t);
@@ -2468,37 +2395,31 @@ void Assembler::emit_test(Operand op, Register reg, int size) {
emit_operand(reg, op);
}
-
// FPU instructions.
-
void Assembler::fld(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD9, 0xC0, i);
}
-
void Assembler::fld1() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xE8);
}
-
void Assembler::fldz() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xEE);
}
-
void Assembler::fldpi() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xEB);
}
-
void Assembler::fldln2() {
EnsureSpace ensure_space(this);
emit(0xD9);
@@ -2533,7 +2454,6 @@ void Assembler::fstp_d(Operand adr) {
emit_operand(3, adr);
}
-
void Assembler::fstp(int index) {
DCHECK(is_uint3(index));
EnsureSpace ensure_space(this);
@@ -2591,76 +2511,65 @@ void Assembler::fistp_d(Operand adr) {
emit_operand(7, adr);
}
-
void Assembler::fabs() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xE1);
}
-
void Assembler::fchs() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xE0);
}
-
void Assembler::fcos() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xFF);
}
-
void Assembler::fsin() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xFE);
}
-
void Assembler::fptan() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xF2);
}
-
void Assembler::fyl2x() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xF1);
}
-
void Assembler::f2xm1() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xF0);
}
-
void Assembler::fscale() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xFD);
}
-
void Assembler::fninit() {
EnsureSpace ensure_space(this);
emit(0xDB);
emit(0xE3);
}
-
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
}
-
void Assembler::fsub(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xE8, i);
@@ -2673,150 +2582,127 @@ void Assembler::fisub_s(Operand adr) {
emit_operand(4, adr);
}
-
void Assembler::fmul(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC8, i);
}
-
void Assembler::fdiv(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xF8, i);
}
-
void Assembler::faddp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xC0, i);
}
-
void Assembler::fsubp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xE8, i);
}
-
void Assembler::fsubrp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xE0, i);
}
-
void Assembler::fmulp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xC8, i);
}
-
void Assembler::fdivp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDE, 0xF8, i);
}
-
void Assembler::fprem() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xF8);
}
-
void Assembler::fprem1() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xF5);
}
-
void Assembler::fxch(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xD9, 0xC8, i);
}
-
void Assembler::fincstp() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xF7);
}
-
void Assembler::ffree(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDD, 0xC0, i);
}
-
void Assembler::ftst() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xE4);
}
-
void Assembler::fucomp(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDD, 0xE8, i);
}
-
void Assembler::fucompp() {
EnsureSpace ensure_space(this);
emit(0xDA);
emit(0xE9);
}
-
void Assembler::fucomi(int i) {
EnsureSpace ensure_space(this);
emit(0xDB);
emit(0xE8 + i);
}
-
void Assembler::fucomip() {
EnsureSpace ensure_space(this);
emit(0xDF);
emit(0xE9);
}
-
void Assembler::fcompp() {
EnsureSpace ensure_space(this);
emit(0xDE);
emit(0xD9);
}
-
void Assembler::fnstsw_ax() {
EnsureSpace ensure_space(this);
emit(0xDF);
emit(0xE0);
}
-
void Assembler::fwait() {
EnsureSpace ensure_space(this);
emit(0x9B);
}
-
void Assembler::frndint() {
EnsureSpace ensure_space(this);
emit(0xD9);
emit(0xFC);
}
-
void Assembler::fnclex() {
EnsureSpace ensure_space(this);
emit(0xDB);
emit(0xE2);
}
-
void Assembler::sahf() {
// TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
// in 64-bit mode. Test CpuID.
@@ -2825,15 +2711,13 @@ void Assembler::sahf() {
emit(0x9E);
}
-
void Assembler::emit_farith(int b1, int b2, int i) {
DCHECK(is_uint8(b1) && is_uint8(b2)); // wrong opcode
- DCHECK(is_uint3(i)); // illegal stack offset
+ DCHECK(is_uint3(i)); // illegal stack offset
emit(b1);
emit(b2 + i);
}
-
// SSE operations.
void Assembler::andps(XMMRegister dst, XMMRegister src) {
@@ -2884,7 +2768,6 @@ void Assembler::orps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::xorps(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -2903,7 +2786,6 @@ void Assembler::xorps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::addps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -2920,7 +2802,6 @@ void Assembler::addps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::subps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -2937,7 +2818,6 @@ void Assembler::subps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::mulps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -2954,7 +2834,6 @@ void Assembler::mulps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::divps(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -2971,7 +2850,6 @@ void Assembler::divps(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
// SSE 2 operations.
void Assembler::movd(XMMRegister dst, Register src) {
@@ -2994,7 +2872,6 @@ void Assembler::movd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::movd(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3005,7 +2882,6 @@ void Assembler::movd(Register dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
void Assembler::movq(XMMRegister dst, Register src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3016,7 +2892,6 @@ void Assembler::movq(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
void Assembler::movq(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3027,7 +2902,6 @@ void Assembler::movq(Register dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
void Assembler::movq(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3083,7 +2957,6 @@ void Assembler::movdqu(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
DCHECK(IsEnabled(SSE4_1));
DCHECK(is_uint8(imm8));
@@ -3279,7 +3152,6 @@ void Assembler::movsd(Operand dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
void Assembler::movsd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3300,7 +3172,6 @@ void Assembler::movsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3318,7 +3189,6 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
}
}
-
void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
@@ -3329,7 +3199,6 @@ void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
emit(imm8);
}
-
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3385,7 +3254,6 @@ void Assembler::addss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::subss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3404,7 +3272,6 @@ void Assembler::subss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::mulss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3423,7 +3290,6 @@ void Assembler::mulss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::divss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3442,7 +3308,6 @@ void Assembler::divss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::maxss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3461,7 +3326,6 @@ void Assembler::maxss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::minss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3480,7 +3344,6 @@ void Assembler::minss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3499,7 +3362,6 @@ void Assembler::sqrtss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3518,7 +3380,6 @@ void Assembler::ucomiss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::movss(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3549,7 +3410,6 @@ void Assembler::movss(Operand src, XMMRegister dst) {
emit_sse_operand(dst, src);
}
-
void Assembler::psllq(XMMRegister reg, byte imm8) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3561,7 +3421,6 @@ void Assembler::psllq(XMMRegister reg, byte imm8) {
emit(imm8);
}
-
void Assembler::psrlq(XMMRegister reg, byte imm8) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3681,7 +3540,6 @@ void Assembler::cvttss2si(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::cvttss2si(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3702,7 +3560,6 @@ void Assembler::cvttsd2si(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::cvttsd2si(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3713,7 +3570,6 @@ void Assembler::cvttsd2si(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvttss2siq(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3734,7 +3590,6 @@ void Assembler::cvttss2siq(Register dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3783,7 +3638,6 @@ void Assembler::cvtlsi2sd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3804,7 +3658,6 @@ void Assembler::cvtlsi2ss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3824,7 +3677,6 @@ void Assembler::cvtqsi2ss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvtqsi2ss(XMMRegister dst, Register src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3845,7 +3697,6 @@ void Assembler::cvtqsi2sd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3856,7 +3707,6 @@ void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3877,7 +3727,6 @@ void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3898,7 +3747,6 @@ void Assembler::cvtsd2ss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3909,7 +3757,6 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3920,7 +3767,6 @@ void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
void Assembler::addsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3939,7 +3785,6 @@ void Assembler::addsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3958,7 +3803,6 @@ void Assembler::mulsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3977,7 +3821,6 @@ void Assembler::subsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::divsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -3996,7 +3839,6 @@ void Assembler::divsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::maxsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -4015,7 +3857,6 @@ void Assembler::maxsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::minsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -4034,7 +3875,6 @@ void Assembler::minsd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -4053,7 +3893,6 @@ void Assembler::andpd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -4072,7 +3911,6 @@ void Assembler::orpd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -4093,7 +3931,6 @@ void Assembler::xorpd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -4154,7 +3991,6 @@ void Assembler::ucomisd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -4165,7 +4001,6 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
emit(0x01); // LT == 1
}
-
void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(!IsEnabled(AVX));
DCHECK(IsEnabled(SSE4_1));
@@ -4180,7 +4015,6 @@ void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(static_cast<byte>(mode) | 0x8);
}
-
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(!IsEnabled(AVX));
DCHECK(IsEnabled(SSE4_1));
@@ -4195,7 +4029,6 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
emit(static_cast<byte>(mode) | 0x8);
}
-
void Assembler::movmskpd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -4205,7 +4038,6 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
void Assembler::movmskps(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst, src);
@@ -4214,7 +4046,6 @@ void Assembler::movmskps(Register dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
-
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
@@ -4234,7 +4065,6 @@ void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(FMA3));
@@ -4253,7 +4083,6 @@ void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
-
void Assembler::vmovd(XMMRegister dst, Register src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -4271,7 +4100,6 @@ void Assembler::vmovd(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::vmovd(Register dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -4281,7 +4109,6 @@ void Assembler::vmovd(Register dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
-
void Assembler::vmovq(XMMRegister dst, Register src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -4299,7 +4126,6 @@ void Assembler::vmovq(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::vmovq(Register dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -4328,7 +4154,6 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
emit_sse_operand(dst, src2);
}
-
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
@@ -4346,7 +4171,6 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
emit_sse_operand(dst, src2);
}
-
void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
@@ -4364,7 +4188,6 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
emit_sse_operand(dst, src2);
}
-
void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -4381,7 +4204,6 @@ void Assembler::vucomiss(XMMRegister dst, Operand src) {
emit_sse_operand(dst, src);
}
-
void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
DCHECK(IsEnabled(AVX));
@@ -4399,7 +4221,6 @@ void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
emit_sse_operand(dst, src2);
}
-
void Assembler::bmi1q(byte op, Register reg, Register vreg, Register rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
@@ -4416,7 +4237,6 @@ void Assembler::bmi1q(byte op, Register reg, Register vreg, Operand rm) {
emit_operand(reg, rm);
}
-
void Assembler::bmi1l(byte op, Register reg, Register vreg, Register rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
@@ -4433,7 +4253,6 @@ void Assembler::bmi1l(byte op, Register reg, Register vreg, Operand rm) {
emit_operand(reg, rm);
}
-
void Assembler::tzcntq(Register dst, Register src) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
@@ -4454,7 +4273,6 @@ void Assembler::tzcntq(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::tzcntl(Register dst, Register src) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
@@ -4475,7 +4293,6 @@ void Assembler::tzcntl(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::lzcntq(Register dst, Register src) {
DCHECK(IsEnabled(LZCNT));
EnsureSpace ensure_space(this);
@@ -4496,7 +4313,6 @@ void Assembler::lzcntq(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::lzcntl(Register dst, Register src) {
DCHECK(IsEnabled(LZCNT));
EnsureSpace ensure_space(this);
@@ -4517,7 +4333,6 @@ void Assembler::lzcntl(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::popcntq(Register dst, Register src) {
DCHECK(IsEnabled(POPCNT));
EnsureSpace ensure_space(this);
@@ -4538,7 +4353,6 @@ void Assembler::popcntq(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::popcntl(Register dst, Register src) {
DCHECK(IsEnabled(POPCNT));
EnsureSpace ensure_space(this);
@@ -4559,7 +4373,6 @@ void Assembler::popcntl(Register dst, Operand src) {
emit_operand(dst, src);
}
-
void Assembler::bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg,
Register rm) {
DCHECK(IsEnabled(BMI2));
@@ -4578,7 +4391,6 @@ void Assembler::bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg,
emit_operand(reg, rm);
}
-
void Assembler::bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg,
Register rm) {
DCHECK(IsEnabled(BMI2));
@@ -4597,7 +4409,6 @@ void Assembler::bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg,
emit_operand(reg, rm);
}
-
void Assembler::rorxq(Register dst, Register src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
@@ -4620,7 +4431,6 @@ void Assembler::rorxq(Register dst, Operand src, byte imm8) {
emit(imm8);
}
-
void Assembler::rorxl(Register dst, Register src, byte imm8) {
DCHECK(IsEnabled(BMI2));
DCHECK(is_uint8(imm8));
@@ -4933,17 +4743,14 @@ void Assembler::emit_sse_operand(Register reg, Operand adr) {
emit_operand(reg, adr);
}
-
void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
-
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
-
void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
@@ -4957,19 +4764,16 @@ void Assembler::db(uint8_t data) {
emit(data);
}
-
void Assembler::dd(uint32_t data) {
EnsureSpace ensure_space(this);
emitl(data);
}
-
void Assembler::dq(uint64_t data) {
EnsureSpace ensure_space(this);
emitq(data);
}
-
void Assembler::dq(Label* label) {
EnsureSpace ensure_space(this);
if (label->is_bound()) {
@@ -4991,7 +4795,6 @@ void Assembler::dq(Label* label) {
}
}
-
// Relocation information implementations.
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
@@ -5013,9 +4816,7 @@ bool RelocInfo::IsCodedSpecially() {
return (1 << rmode_) & kApplyMask;
}
-bool RelocInfo::IsInConstantPool() {
- return false;
-}
+bool RelocInfo::IsInConstantPool() { return false; }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index e0e741ae98..dc6acb67f4 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -34,21 +34,21 @@
// A lightweight X64 Assembler.
-#ifndef V8_X64_ASSEMBLER_X64_H_
-#define V8_X64_ASSEMBLER_X64_H_
+#ifndef V8_CODEGEN_X64_ASSEMBLER_X64_H_
+#define V8_CODEGEN_X64_ASSEMBLER_X64_H_
#include <deque>
#include <map>
#include <vector>
-#include "src/assembler.h"
-#include "src/label.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/label.h"
+#include "src/codegen/x64/constants-x64.h"
+#include "src/codegen/x64/register-x64.h"
+#include "src/codegen/x64/sse-instr.h"
#include "src/objects/smi.h"
-#include "src/x64/constants-x64.h"
-#include "src/x64/register-x64.h"
-#include "src/x64/sse-instr.h"
#if defined(V8_OS_WIN_X64)
-#include "src/unwinding-info-win64.h"
+#include "src/diagnostics/unwinding-info-win64.h"
#endif
namespace v8 {
@@ -60,40 +60,39 @@ class SafepointTableBuilder;
enum Condition {
// any value < 0 is considered no_condition
- no_condition = -1,
-
- overflow = 0,
- no_overflow = 1,
- below = 2,
- above_equal = 3,
- equal = 4,
- not_equal = 5,
- below_equal = 6,
- above = 7,
- negative = 8,
- positive = 9,
- parity_even = 10,
- parity_odd = 11,
- less = 12,
+ no_condition = -1,
+
+ overflow = 0,
+ no_overflow = 1,
+ below = 2,
+ above_equal = 3,
+ equal = 4,
+ not_equal = 5,
+ below_equal = 6,
+ above = 7,
+ negative = 8,
+ positive = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
greater_equal = 13,
- less_equal = 14,
- greater = 15,
+ less_equal = 14,
+ greater = 15,
// Fake conditions that are handled by the
// opcodes using them.
- always = 16,
- never = 17,
+ always = 16,
+ never = 17,
// aliases
- carry = below,
- not_carry = above_equal,
- zero = equal,
- not_zero = not_equal,
- sign = negative,
- not_sign = positive,
+ carry = below,
+ not_carry = above_equal,
+ zero = equal,
+ not_zero = not_equal,
+ sign = negative,
+ not_sign = positive,
last_condition = greater
};
-
// Returns the equivalent of !cc.
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
@@ -102,7 +101,6 @@ inline Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
-
enum RoundingMode {
kRoundToNearest = 0x0,
kRoundDown = 0x1,
@@ -110,7 +108,6 @@ enum RoundingMode {
kRoundToZero = 0x3
};
-
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -175,15 +172,10 @@ class V8_EXPORT_PRIVATE Operand {
Operand(Register base, int32_t disp);
// [base + index*scale + disp/r]
- Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp);
+ Operand(Register base, Register index, ScaleFactor scale, int32_t disp);
// [index*scale + disp/r]
- Operand(Register index,
- ScaleFactor scale,
- int32_t disp);
+ Operand(Register index, ScaleFactor scale, int32_t disp);
// Offset from existing memory operand.
// Offset is added to existing displacement as 32-bit signed values and
@@ -292,7 +284,7 @@ class ConstPool {
Assembler* assm_;
// Values, pc offsets of entries.
- typedef std::multimap<uint64_t, int> EntryMap;
+ using EntryMap = std::multimap<uint64_t, int>;
EntryMap entries_;
// Number of bytes taken up by the displacement of rip-relative addressing.
@@ -365,10 +357,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- // Return the code target address at a call site from the return address
- // of that call in the instruction stream.
- static inline Address target_address_from_return_address(Address pc);
-
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
@@ -384,13 +372,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
inline Handle<Code> code_target_object_handle_at(Address pc);
+ inline Handle<HeapObject> compressed_embedded_object_handle_at(Address pc);
inline Address runtime_entry_at(Address pc);
// Number of bytes taken up by the branch target in the code.
static constexpr int kSpecialTargetSize = 4; // 32-bit displacement.
- // Distance between the address of the code target in the call instruction
- // and the return address pushed on the stack.
- static constexpr int kCallTargetAddressOffset = 4; // 32-bit displacement.
// One byte opcode for test eax,0xXXXXXXXX.
static constexpr byte kTestEaxByte = 0xA9;
@@ -567,9 +553,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cmpb_al(Immediate src);
- void cmpb(Register dst, Register src) {
- arithmetic_op_8(0x3A, dst, src);
- }
+ void cmpb(Register dst, Register src) { arithmetic_op_8(0x3A, dst, src); }
void cmpb(Register dst, Operand src) { arithmetic_op_8(0x3A, dst, src); }
@@ -589,9 +573,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cmpw(Register dst, Operand src) { arithmetic_op_16(0x3B, dst, src); }
- void cmpw(Register dst, Register src) {
- arithmetic_op_16(0x3B, dst, src);
- }
+ void cmpw(Register dst, Register src) { arithmetic_op_16(0x3B, dst, src); }
void cmpw(Operand dst, Register src) { arithmetic_op_16(0x39, src, dst); }
@@ -763,10 +745,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void jmp(Register adr);
void jmp(Operand src);
+ // Unconditional jump relative to the current address. Low-level routine,
+ // use with caution!
+ void jmp_rel(int offset);
+
// Conditional jumps
- void j(Condition cc,
- Label* L,
- Label::Distance distance = Label::kFar);
+ void j(Condition cc, Label* L, Label::Distance distance = Label::kFar);
void j(Condition cc, Address entry, RelocInfo::Mode rmode);
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
@@ -1034,7 +1018,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvtqsi2sd(XMMRegister dst, Operand src);
void cvtqsi2sd(XMMRegister dst, Register src);
-
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtss2sd(XMMRegister dst, Operand src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
@@ -1753,7 +1736,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
int id);
-
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -1803,7 +1785,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
uint32_t long_at(int pos) {
return ReadUnalignedValue<uint32_t>(addr_at(pos));
}
- void long_at_put(int pos, uint32_t x) {
+ void long_at_put(int pos, uint32_t x) {
WriteUnalignedValue(addr_at(pos), x);
}
@@ -1911,7 +1893,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
- template<class P1>
+ template <class P1>
void emit_rex(P1 p1, int size) {
if (size == kInt64Size) {
emit_rex_64(p1);
@@ -1921,7 +1903,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
- template<class P1, class P2>
+ template <class P1, class P2>
void emit_rex(P1 p1, P2 p2, int size) {
if (size == kInt64Size) {
emit_rex_64(p1, p2);
@@ -2001,19 +1983,13 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
void arithmetic_op(byte opcode, Register reg, Operand rm_reg, int size);
// Operate on a byte in memory or register.
- void immediate_arithmetic_op_8(byte subcode,
- Register dst,
- Immediate src);
+ void immediate_arithmetic_op_8(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op_8(byte subcode, Operand dst, Immediate src);
// Operate on a word in memory or register.
- void immediate_arithmetic_op_16(byte subcode,
- Register dst,
- Immediate src);
+ void immediate_arithmetic_op_16(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op_16(byte subcode, Operand dst, Immediate src);
// Operate on operands/registers with pointer size, 32-bit or 64-bit size.
- void immediate_arithmetic_op(byte subcode,
- Register dst,
- Immediate src,
+ void immediate_arithmetic_op(byte subcode, Register dst, Immediate src,
int size);
void immediate_arithmetic_op(byte subcode, Operand dst, Immediate src,
int size);
@@ -2200,8 +2176,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void emit_xor(Register dst, Register src, int size) {
if (size == kInt64Size && dst.code() == src.code()) {
- // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
- // there is no need to make this a 64 bit operation.
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
arithmetic_op(0x33, dst, src, kInt32Size);
} else {
arithmetic_op(0x33, dst, src, size);
@@ -2268,7 +2244,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
#endif
};
-
// Helper class that ensures that there is enough space for generating
// instructions and relocation information. The constructor makes
// sure that there is enough space and (in debug mode) the destructor
@@ -2299,4 +2274,4 @@ class EnsureSpace {
} // namespace internal
} // namespace v8
-#endif // V8_X64_ASSEMBLER_X64_H_
+#endif // V8_CODEGEN_X64_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/x64/constants-x64.h b/deps/v8/src/codegen/x64/constants-x64.h
index 1f2b04248c..0e43b05034 100644
--- a/deps/v8/src/x64/constants-x64.h
+++ b/deps/v8/src/codegen/x64/constants-x64.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_CONSTANTS_X64_H_
-#define V8_X64_CONSTANTS_X64_H_
+#ifndef V8_CODEGEN_X64_CONSTANTS_X64_H_
+#define V8_CODEGEN_X64_CONSTANTS_X64_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -18,4 +18,4 @@ constexpr size_t kMaxPCRelativeCodeRangeInMB = 2048;
} // namespace internal
} // namespace v8
-#endif // V8_X64_CONSTANTS_X64_H_
+#endif // V8_CODEGEN_X64_CONSTANTS_X64_H_
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/codegen/x64/cpu-x64.cc
index 07e00023ff..cce76d8c6a 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/codegen/x64/cpu-x64.cc
@@ -10,7 +10,7 @@
#if V8_TARGET_ARCH_X64
-#include "src/cpu-features.h"
+#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
index aa0819dd46..0fd62d46a4 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64.cc
@@ -4,9 +4,9 @@
#if V8_TARGET_ARCH_X64
-#include "src/interface-descriptors.h"
+#include "src/codegen/interface-descriptors.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
@@ -76,14 +76,12 @@ const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
-
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 13fc25dd46..493c711009 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -7,28 +7,28 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/bootstrapper.h"
-#include "src/callable.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/string-constants.h"
+#include "src/codegen/x64/assembler-x64.h"
+#include "src/common/globals.h"
#include "src/debug/debug.h"
-#include "src/external-reference-table.h"
-#include "src/frames-inl.h"
-#include "src/globals.h"
+#include "src/execution/frames-inl.h"
#include "src/heap/heap-inl.h" // For MemoryChunk.
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/snapshot.h"
-#include "src/string-constants.h"
-#include "src/x64/assembler-x64.h"
// Satisfy cpplint check, but don't include platform-specific header. It is
// included recursively via macro-assembler.h.
#if 0
-#include "src/x64/macro-assembler-x64.h"
+#include "src/codegen/x64/macro-assembler-x64.h"
#endif
namespace v8 {
@@ -87,7 +87,6 @@ void MacroAssembler::Load(Register destination, ExternalReference source) {
}
}
-
void MacroAssembler::Store(ExternalReference destination, Register source) {
if (root_array_available_ && options().enable_root_array_delta_access) {
intptr_t delta =
@@ -293,6 +292,13 @@ void TurboAssembler::DecompressTaggedSigned(Register destination,
RecordComment("]");
}
+void TurboAssembler::DecompressTaggedSigned(Register destination,
+ Register source) {
+ RecordComment("[ DecompressTaggedSigned");
+ movsxlq(destination, source);
+ RecordComment("]");
+}
+
void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedPointer");
@@ -301,12 +307,16 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
RecordComment("]");
}
-void TurboAssembler::DecompressAnyTagged(Register destination,
- Operand field_operand,
- Register scratch) {
- DCHECK(!AreAliased(destination, scratch));
- RecordComment("[ DecompressAnyTagged");
- movsxlq(destination, field_operand);
+void TurboAssembler::DecompressTaggedPointer(Register destination,
+ Register source) {
+ RecordComment("[ DecompressTaggedPointer");
+ movsxlq(destination, source);
+ addq(destination, kRootRegister);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressRegisterAnyTagged(Register destination,
+ Register scratch) {
if (kUseBranchlessPtrDecompression) {
// Branchlessly compute |masked_root|:
// masked_root = HAS_SMI_TAG(destination) ? 0 : kRootRegister;
@@ -325,6 +335,24 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
addq(destination, kRootRegister);
bind(&done);
}
+}
+
+void TurboAssembler::DecompressAnyTagged(Register destination,
+ Operand field_operand,
+ Register scratch) {
+ DCHECK(!AreAliased(destination, scratch));
+ RecordComment("[ DecompressAnyTagged");
+ movsxlq(destination, field_operand);
+ DecompressRegisterAnyTagged(destination, scratch);
+ RecordComment("]");
+}
+
+void TurboAssembler::DecompressAnyTagged(Register destination, Register source,
+ Register scratch) {
+ DCHECK(!AreAliased(destination, scratch));
+ RecordComment("[ DecompressAnyTagged");
+ movsxlq(destination, source);
+ DecompressRegisterAnyTagged(destination, scratch);
RecordComment("]");
}
@@ -507,9 +535,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
CheckPageFlag(object,
value, // Used as scratch.
- MemoryChunk::kPointersFromHereAreInterestingMask,
- zero,
- &done,
+ MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
Label::kNear);
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
@@ -605,8 +631,7 @@ void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
CallCodeObject(centry);
}
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
- int num_arguments,
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
@@ -694,7 +719,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
int delta = kDoubleSize * XMMRegister::kNumRegisters;
- subq(rsp, Immediate(delta));
+ AllocateStackSpace(delta);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
Movsd(Operand(rsp, i * kDoubleSize), reg);
@@ -1074,13 +1099,12 @@ void TurboAssembler::Set(Operand dst, intptr_t x) {
}
}
-
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
Register TurboAssembler::GetSmiConstant(Smi source) {
STATIC_ASSERT(kSmiTag == 0);
- int value = source->value();
+ int value = source.value();
if (value == 0) {
xorl(kScratchRegister, kScratchRegister);
return kScratchRegister;
@@ -1091,7 +1115,7 @@ Register TurboAssembler::GetSmiConstant(Smi source) {
void TurboAssembler::Move(Register dst, Smi source) {
STATIC_ASSERT(kSmiTag == 0);
- int value = source->value();
+ int value = source.value();
if (value == 0) {
xorl(dst, dst);
} else {
@@ -1156,7 +1180,7 @@ void MacroAssembler::SmiCompare(Register dst, Smi src) {
void MacroAssembler::Cmp(Register dst, Smi src) {
DCHECK_NE(dst, kScratchRegister);
- if (src->value() == 0) {
+ if (src.value() == 0) {
test_tagged(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
@@ -1179,7 +1203,7 @@ void MacroAssembler::SmiCompare(Operand dst, Register src) {
void MacroAssembler::SmiCompare(Operand dst, Smi src) {
AssertSmi(dst);
if (SmiValuesAre32Bits()) {
- cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src.value()));
} else {
DCHECK(SmiValuesAre31Bits());
cmpl(dst, Immediate(src));
@@ -1193,7 +1217,6 @@ void MacroAssembler::Cmp(Operand dst, Smi src) {
cmp_tagged(dst, smi_reg);
}
-
Condition TurboAssembler::CheckSmi(Register src) {
STATIC_ASSERT(kSmiTag == 0);
testb(src, Immediate(kSmiTagMask));
@@ -1212,8 +1235,7 @@ void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
j(smi, on_smi, near_jump);
}
-void MacroAssembler::JumpIfNotSmi(Register src,
- Label* on_not_smi,
+void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi,
Label::Distance near_jump) {
Condition smi = CheckSmi(src);
j(NegateCondition(smi), on_not_smi, near_jump);
@@ -1226,10 +1248,9 @@ void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
}
void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
- if (constant->value() != 0) {
+ if (constant.value() != 0) {
if (SmiValuesAre32Bits()) {
- addl(Operand(dst, kSmiShift / kBitsPerByte),
- Immediate(constant->value()));
+ addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant.value()));
} else {
DCHECK(SmiValuesAre31Bits());
if (kTaggedSize == kInt64Size) {
@@ -1246,9 +1267,7 @@ void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) {
}
}
-SmiIndex MacroAssembler::SmiToIndex(Register dst,
- Register src,
- int shift) {
+SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
if (SmiValuesAre32Bits()) {
DCHECK(is_uint6(shift));
// There is a possible optimization if shift is in the range 60-63, but that
@@ -1448,7 +1467,13 @@ void TurboAssembler::Move(Register result, Handle<HeapObject> object,
return;
}
}
- movq(result, Immediate64(object.address(), rmode));
+ if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ int compressed_embedded_object_index = AddCompressedEmbeddedObject(object);
+ movl(result, Immediate(compressed_embedded_object_index, rmode));
+ } else {
+ DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
+ movq(result, Immediate64(object.address(), rmode));
+ }
}
void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
@@ -1469,7 +1494,6 @@ void MacroAssembler::Drop(int stack_elements) {
}
}
-
void MacroAssembler::DropUnderReturnAddress(int stack_elements,
Register scratch) {
DCHECK_GT(stack_elements, 0);
@@ -1889,72 +1913,11 @@ void TurboAssembler::Popcntq(Register dst, Operand src) {
UNREACHABLE();
}
-
-void MacroAssembler::Pushad() {
- Push(rax);
- Push(rcx);
- Push(rdx);
- Push(rbx);
- // Not pushing rsp or rbp.
- Push(rsi);
- Push(rdi);
- Push(r8);
- Push(r9);
- // r10 is kScratchRegister.
- Push(r11);
- Push(r12);
- // r13 is kRootRegister.
- Push(r14);
- Push(r15);
- STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
- // Use lea for symmetry with Popad.
- int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
- kSystemPointerSize;
- leaq(rsp, Operand(rsp, -sp_delta));
-}
-
-
-void MacroAssembler::Popad() {
- // Popad must not change the flags, so use lea instead of addq.
- int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) *
- kSystemPointerSize;
- leaq(rsp, Operand(rsp, sp_delta));
- Pop(r15);
- Pop(r14);
- Pop(r12);
- Pop(r11);
- Pop(r9);
- Pop(r8);
- Pop(rdi);
- Pop(rsi);
- Pop(rbx);
- Pop(rdx);
- Pop(rcx);
- Pop(rax);
-}
-
-
// Order general registers are pushed by Pushad:
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
const int
-MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
- 0,
- 1,
- 2,
- 3,
- -1,
- -1,
- 4,
- 5,
- 6,
- 7,
- -1,
- 8,
- 9,
- -1,
- 10,
- 11
-};
+ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
+ 0, 1, 2, 3, -1, -1, 4, 5, 6, 7, -1, 8, 9, -1, 10, 11};
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
@@ -1972,7 +1935,6 @@ void MacroAssembler::PushStackHandler() {
movq(ExternalReferenceAsOperand(handler_address), rsp);
}
-
void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address =
@@ -1994,30 +1956,17 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
}
}
-void MacroAssembler::CmpObjectType(Register heap_object,
- InstanceType type,
+void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
Register map) {
LoadTaggedPointerField(map,
FieldOperand(heap_object, HeapObject::kMapOffset));
CmpInstanceType(map, type);
}
-
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
-void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, Label* lost_precision,
- Label* is_nan, Label::Distance dst) {
- Cvttsd2si(result_reg, input_reg);
- Cvtlsi2sd(kScratchDoubleReg, result_reg);
- Ucomisd(kScratchDoubleReg, input_reg);
- j(not_equal, lost_precision, dst);
- j(parity_even, is_nan, dst); // NaN.
-}
-
-
void MacroAssembler::AssertNotSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
@@ -2025,7 +1974,6 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
}
-
void MacroAssembler::AssertSmi(Register object) {
if (emit_debug_code()) {
Condition is_smi = CheckSmi(object);
@@ -2074,7 +2022,6 @@ void MacroAssembler::AssertFunction(Register object) {
}
}
-
void MacroAssembler::AssertBoundFunction(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
@@ -2149,7 +2096,6 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
}
}
-
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2323,7 +2269,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
definitely_matches = true;
} else {
if (expected.immediate() ==
- SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
// Don't worry about adapting arguments for built-ins that
// don't want that done. Skip adaption code by making it look
// like we have a match between expected and actual number of
@@ -2445,6 +2391,38 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
popq(rbp);
}
+#ifdef V8_OS_WIN
+void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
+ // In windows, we cannot increment the stack size by more than one page
+ // (minimum page size is 4KB) without accessing at least one byte on the
+ // page. Check this:
+ // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
+ Label check_offset;
+ Label touch_next_page;
+ jmp(&check_offset);
+ bind(&touch_next_page);
+ subq(rsp, Immediate(kStackPageSize));
+ // Just to touch the page, before we increment further.
+ movb(Operand(rsp, 0), Immediate(0));
+ subq(bytes_scratch, Immediate(kStackPageSize));
+
+ bind(&check_offset);
+ cmpq(bytes_scratch, Immediate(kStackPageSize));
+ j(greater, &touch_next_page);
+
+ subq(rsp, bytes_scratch);
+}
+
+void TurboAssembler::AllocateStackSpace(int bytes) {
+ while (bytes > kStackPageSize) {
+ subq(rsp, Immediate(kStackPageSize));
+ movb(Operand(rsp, 0), Immediate(0));
+ bytes -= kStackPageSize;
+ }
+ subq(rsp, Immediate(bytes));
+}
+#endif
+
void MacroAssembler::EnterExitFramePrologue(bool save_rax,
StackFrame::Type frame_type) {
DCHECK(frame_type == StackFrame::EXIT ||
@@ -2479,7 +2457,6 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax,
rbx);
}
-
void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
bool save_doubles) {
#ifdef _WIN64
@@ -2490,7 +2467,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (save_doubles) {
int space = XMMRegister::kNumRegisters * kDoubleSize +
arg_stack_space * kSystemPointerSize;
- subq(rsp, Immediate(space));
+ AllocateStackSpace(space);
int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -2499,7 +2476,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kSystemPointerSize));
+ AllocateStackSpace(arg_stack_space * kSystemPointerSize);
}
// Get the required frame alignment for the OS.
@@ -2526,13 +2503,11 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
-
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
EnterExitFramePrologue(false, StackFrame::EXIT);
EnterExitFrameEpilogue(arg_stack_space, false);
}
-
void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
// Registers:
// r15 : argv
@@ -2588,20 +2563,17 @@ void MacroAssembler::LeaveExitFrameEpilogue() {
movq(c_entry_fp_operand, Immediate(0));
}
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
static const int kRegisterPassedArguments = 6;
#endif
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadTaggedPointerField(dst, NativeContextOperand());
LoadTaggedPointerField(dst, ContextOperand(dst, index));
}
-
int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
// On Windows 64 stack slots are reserved by the caller for all arguments
// including the ones passed in registers, and space is always allocated for
@@ -2630,7 +2602,7 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) {
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kSystemPointerSize));
+ AllocateStackSpace((argument_slots_on_stack + 1) * kSystemPointerSize);
andq(rsp, Immediate(-frame_alignment));
movq(Operand(rsp, argument_slots_on_stack * kSystemPointerSize),
kScratchRegister);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index af85126d8d..a5b8e60ec5 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -6,20 +6,20 @@
#error This header must be included via macro-assembler.h
#endif
-#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
-#define V8_X64_MACRO_ASSEMBLER_X64_H_
+#ifndef V8_CODEGEN_X64_MACRO_ASSEMBLER_X64_H_
+#define V8_CODEGEN_X64_MACRO_ASSEMBLER_X64_H_
-#include "src/bailout-reason.h"
#include "src/base/flags.h"
-#include "src/contexts.h"
-#include "src/globals.h"
-#include "src/x64/assembler-x64.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/codegen/x64/assembler-x64.h"
+#include "src/common/globals.h"
+#include "src/objects/contexts.h"
namespace v8 {
namespace internal {
// Convenience for platform-independent signatures.
-typedef Operand MemOperand;
+using MemOperand = Operand;
class StringConstantBase;
@@ -28,8 +28,7 @@ enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
- : reg(index_register),
- scale(scale) {}
+ : reg(index_register), scale(scale) {}
Register reg;
ScaleFactor scale;
};
@@ -41,17 +40,6 @@ enum StackArgumentsAccessorReceiverMode {
class StackArgumentsAccessor {
public:
- StackArgumentsAccessor(Register base_reg, int argument_count_immediate,
- StackArgumentsAccessorReceiverMode receiver_mode =
- ARGUMENTS_CONTAIN_RECEIVER,
- int extra_displacement_to_last_argument = 0)
- : base_reg_(base_reg),
- argument_count_reg_(no_reg),
- argument_count_immediate_(argument_count_immediate),
- receiver_mode_(receiver_mode),
- extra_displacement_to_last_argument_(
- extra_displacement_to_last_argument) {}
-
StackArgumentsAccessor(Register base_reg, Register argument_count_reg,
StackArgumentsAccessorReceiverMode receiver_mode =
ARGUMENTS_CONTAIN_RECEIVER,
@@ -308,9 +296,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Move(Register target, Register source);
void Move(Register dst, Handle<HeapObject> source,
- RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
+ RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
void Move(Operand dst, Handle<HeapObject> source,
- RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
+ RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
// Loads a pointer into a register with a relocation mode.
void Move(Register dst, Address ptr, RelocInfo::Mode rmode) {
@@ -323,8 +311,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Move src0 to dst0 and src1 to dst1, handling possible overlaps.
void MovePair(Register dst0, Register src0, Register dst1, Register src1);
- void MoveStringConstant(Register result, const StringConstantBase* string,
- RelocInfo::Mode rmode = RelocInfo::EMBEDDED_OBJECT);
+ void MoveStringConstant(
+ Register result, const StringConstantBase* string,
+ RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
// Convert smi to word-size sign-extended value.
void SmiUntag(Register dst, Register src);
@@ -415,6 +404,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
void LeaveFrame(StackFrame::Type type);
+// Allocate stack space of given size (i.e. decrement {rsp} by the value
+// stored in the given register, or by a constant). If you need to perform a
+// stack check, do it before calling this function because this function may
+// write into the newly allocated space. It may also overwrite the given
+// register's value, in the version that takes a register.
+#ifdef V8_OS_WIN
+ void AllocateStackSpace(Register bytes_scratch);
+ void AllocateStackSpace(int bytes);
+#else
+ void AllocateStackSpace(Register bytes) { subq(rsp, bytes); }
+ void AllocateStackSpace(int bytes) { subq(rsp, Immediate(bytes)); }
+#endif
+
// Removes current frame and its arguments from the stack preserving the
// arguments and a return address pushed to the stack for the next call. Both
// |callee_args_count| and |caller_args_count_reg| do not include receiver.
@@ -510,14 +512,19 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The following macros work even when pointer compression is not enabled.
void DecompressTaggedSigned(Register destination, Operand field_operand);
+ void DecompressTaggedSigned(Register destination, Register source);
void DecompressTaggedPointer(Register destination, Operand field_operand);
+ void DecompressTaggedPointer(Register destination, Register source);
+ // Auxiliary function used by DecompressAnyTagged to perform the actual
+ // decompression. Assumes destination is already signed extended.
+ void DecompressRegisterAnyTagged(Register destination, Register scratch);
void DecompressAnyTagged(Register destination, Operand field_operand,
Register scratch = kScratchRegister);
+ void DecompressAnyTagged(Register destination, Register source,
+ Register scratch = kScratchRegister);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
- int smi_count = 0;
- int heap_object_count = 0;
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
@@ -627,10 +634,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// register rax (untouched).
void LeaveApiExitFrame();
- // Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { Pushad(); }
- void PopSafepointRegisters() { Popad(); }
-
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -674,8 +677,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// above with a conditional jump.
// Jump to label if the value is not a tagged smi.
- void JumpIfNotSmi(Register src,
- Label* on_not_smi,
+ void JumpIfNotSmi(Register src, Label* on_not_smi,
Label::Distance near_jump = Label::kFar);
// Jump to label if the value is not a tagged smi.
@@ -741,13 +743,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Generates a trampoline to jump to the off-heap instruction stream.
void JumpToInstructionStream(Address entry);
- // Non-x64 instructions.
- // Push/pop all general purpose registers.
- // Does not push rsp/rbp nor any of the assembler's special purpose registers
- // (kScratchRegister, kRootRegister).
- void Pushad();
- void Popad();
-
// Compare object type for heap object.
// Always use unsigned comparisons: above and below, not less and greater.
// Incoming register is heap_object and outgoing register is map.
@@ -758,11 +753,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
- void DoubleToI(Register result_reg, XMMRegister input_reg,
- XMMRegister scratch, Label* lost_precision, Label* is_nan,
- Label::Distance dst = Label::kFar);
-
- template<typename Field>
+ template <typename Field>
void DecodeField(Register reg) {
static const int shift = Field::kShift;
static const int mask = Field::kMask >> Field::kShift;
@@ -821,8 +812,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Runtime calls
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f,
- int num_arguments,
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Convenience function: Same as above, but takes the fid instead.
@@ -901,32 +891,25 @@ inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
-
// Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
- Register index,
- ScaleFactor scale,
+inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
-
inline Operand ContextOperand(Register context, int index) {
return Operand(context, Context::SlotOffset(index));
}
-
inline Operand ContextOperand(Register context, Register index) {
return Operand(context, index, times_system_pointer_size,
Context::SlotOffset(0));
}
-
inline Operand NativeContextOperand() {
return ContextOperand(rsi, Context::NATIVE_CONTEXT_INDEX);
}
-
// Provides access to exit frame stack space (not GCed).
inline Operand StackSpaceOperand(int index) {
#ifdef _WIN64
@@ -937,7 +920,6 @@ inline Operand StackSpaceOperand(int index) {
#endif
}
-
inline Operand StackOperandForReturnAddress(int32_t disp) {
return Operand(rsp, disp);
}
@@ -947,4 +929,4 @@ inline Operand StackOperandForReturnAddress(int32_t disp) {
} // namespace internal
} // namespace v8
-#endif // V8_X64_MACRO_ASSEMBLER_X64_H_
+#endif // V8_CODEGEN_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index cd7614744d..199571f088 100644
--- a/deps/v8/src/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_REGISTER_X64_H_
-#define V8_X64_REGISTER_X64_H_
+#ifndef V8_CODEGEN_X64_REGISTER_X64_H_
+#define V8_CODEGEN_X64_REGISTER_X64_H_
-#include "src/register.h"
-#include "src/reglist.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
namespace v8 {
namespace internal {
@@ -169,11 +169,11 @@ ASSERT_TRIVIALLY_COPYABLE(XMMRegister);
static_assert(sizeof(XMMRegister) == sizeof(int),
"XMMRegister can efficiently be passed by value");
-typedef XMMRegister FloatRegister;
+using FloatRegister = XMMRegister;
-typedef XMMRegister DoubleRegister;
+using DoubleRegister = XMMRegister;
-typedef XMMRegister Simd128Register;
+using Simd128Register = XMMRegister;
#define DECLARE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
@@ -221,4 +221,4 @@ constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
} // namespace internal
} // namespace v8
-#endif // V8_X64_REGISTER_X64_H_
+#endif // V8_CODEGEN_X64_REGISTER_X64_H_
diff --git a/deps/v8/src/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index a864d294f4..ee20483cfe 100644
--- a/deps/v8/src/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_SSE_INSTR_H_
-#define V8_X64_SSE_INSTR_H_
+#ifndef V8_CODEGEN_X64_SSE_INSTR_H_
+#define V8_CODEGEN_X64_SSE_INSTR_H_
#define SSE2_INSTRUCTION_LIST(V) \
V(cvtps2dq, 66, 0F, 5B) \
@@ -82,4 +82,4 @@
V(pmaxud, 66, 0F, 38, 3F) \
V(pmulld, 66, 0F, 38, 40)
-#endif // V8_X64_SSE_INSTR_H_
+#endif // V8_CODEGEN_X64_SSE_INSTR_H_
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/common/assert-scope.cc
index 3655a5e599..5a299fa1ee 100644
--- a/deps/v8/src/assert-scope.cc
+++ b/deps/v8/src/common/assert-scope.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assert-scope.h"
+#include "src/common/assert-scope.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
-#include "src/isolate.h"
-#include "src/utils.h"
+#include "src/execution/isolate.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -20,7 +20,6 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(base::Thread::LocalStorageKey,
} // namespace
-
class PerThreadAssertData final {
public:
PerThreadAssertData() : nesting_level_(0) {
@@ -97,7 +96,6 @@ template <PerIsolateAssertType kType, bool kAllow>
class PerIsolateAssertScope<kType, kAllow>::DataBit
: public BitField<bool, kType, 1> {};
-
template <PerIsolateAssertType kType, bool kAllow>
PerIsolateAssertScope<kType, kAllow>::PerIsolateAssertScope(Isolate* isolate)
: isolate_(isolate), old_data_(isolate->per_isolate_assert_data()) {
@@ -106,20 +104,17 @@ PerIsolateAssertScope<kType, kAllow>::PerIsolateAssertScope(Isolate* isolate)
isolate_->set_per_isolate_assert_data(DataBit::update(old_data_, kAllow));
}
-
template <PerIsolateAssertType kType, bool kAllow>
PerIsolateAssertScope<kType, kAllow>::~PerIsolateAssertScope() {
isolate_->set_per_isolate_assert_data(old_data_);
}
-
// static
template <PerIsolateAssertType kType, bool kAllow>
bool PerIsolateAssertScope<kType, kAllow>::IsAllowed(Isolate* isolate) {
return DataBit::decode(isolate->per_isolate_assert_data());
}
-
// -----------------------------------------------------------------------------
// Instantiations.
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/common/assert-scope.h
index e32a3a10a9..606439d42b 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/common/assert-scope.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ASSERT_SCOPE_H_
-#define V8_ASSERT_SCOPE_H_
+#ifndef V8_COMMON_ASSERT_SCOPE_H_
+#define V8_COMMON_ASSERT_SCOPE_H_
#include <stdint.h>
#include "src/base/macros.h"
#include "src/base/optional.h"
-#include "src/globals.h"
-#include "src/pointer-with-payload.h"
+#include "src/common/globals.h"
+#include "src/utils/pointer-with-payload.h"
namespace v8 {
namespace internal {
@@ -72,7 +72,6 @@ class PerThreadAssertScope {
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertScope);
};
-
template <PerIsolateAssertType type, bool allow>
class PerIsolateAssertScope {
public:
@@ -90,11 +89,9 @@ class PerIsolateAssertScope {
DISALLOW_COPY_AND_ASSIGN(PerIsolateAssertScope);
};
-
template <PerThreadAssertType type, bool allow>
#ifdef DEBUG
-class PerThreadAssertScopeDebugOnly : public
- PerThreadAssertScope<type, allow> {
+class PerThreadAssertScopeDebugOnly : public PerThreadAssertScope<type, allow> {
#else
class PerThreadAssertScopeDebugOnly {
public:
@@ -105,34 +102,33 @@ class PerThreadAssertScopeDebugOnly {
#endif
};
-
template <PerIsolateAssertType type, bool allow>
#ifdef DEBUG
-class PerIsolateAssertScopeDebugOnly : public
- PerIsolateAssertScope<type, allow> {
+class PerIsolateAssertScopeDebugOnly
+ : public PerIsolateAssertScope<type, allow> {
public:
explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate)
- : PerIsolateAssertScope<type, allow>(isolate) { }
+ : PerIsolateAssertScope<type, allow>(isolate) {}
#else
class PerIsolateAssertScopeDebugOnly {
public:
- explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate) { }
+ explicit PerIsolateAssertScopeDebugOnly(Isolate* isolate) {}
#endif
};
// Per-thread assert scopes.
// Scope to document where we do not expect handles to be created.
-typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, false>
- DisallowHandleAllocation;
+using DisallowHandleAllocation =
+ PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, false>;
// Scope to introduce an exception to DisallowHandleAllocation.
-typedef PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>
- AllowHandleAllocation;
+using AllowHandleAllocation =
+ PerThreadAssertScopeDebugOnly<HANDLE_ALLOCATION_ASSERT, true>;
// Scope to document where we do not expect any allocation and GC.
-typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>
- DisallowHeapAllocation;
+using DisallowHeapAllocation =
+ PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>;
#ifdef DEBUG
#define DISALLOW_HEAP_ALLOCATION(name) DisallowHeapAllocation name;
#else
@@ -140,32 +136,32 @@ typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, false>
#endif
// Scope to introduce an exception to DisallowHeapAllocation.
-typedef PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, true>
- AllowHeapAllocation;
+using AllowHeapAllocation =
+ PerThreadAssertScopeDebugOnly<HEAP_ALLOCATION_ASSERT, true>;
// Scope to document where we do not expect any handle dereferences.
-typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, false>
- DisallowHandleDereference;
+using DisallowHandleDereference =
+ PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, false>;
// Scope to introduce an exception to DisallowHandleDereference.
-typedef PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, true>
- AllowHandleDereference;
+using AllowHandleDereference =
+ PerThreadAssertScopeDebugOnly<HANDLE_DEREFERENCE_ASSERT, true>;
// Scope to document where we do not expect deferred handles to be dereferenced.
-typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>
- DisallowDeferredHandleDereference;
+using DisallowDeferredHandleDereference =
+ PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
-typedef PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>
- AllowDeferredHandleDereference;
+using AllowDeferredHandleDereference =
+ PerThreadAssertScopeDebugOnly<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>;
// Scope to document where we do not expect deferred handles to be dereferenced.
-typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>
- DisallowCodeDependencyChange;
+using DisallowCodeDependencyChange =
+ PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, false>;
// Scope to introduce an exception to DisallowDeferredHandleDereference.
-typedef PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>
- AllowCodeDependencyChange;
+using AllowCodeDependencyChange =
+ PerThreadAssertScopeDebugOnly<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
class DisallowHeapAccess {
DisallowCodeDependencyChange no_dependency_change_;
@@ -187,60 +183,60 @@ class DisallowHeapAccessIf {
// Per-isolate assert scopes.
// Scope to document where we do not expect javascript execution.
-typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>
- DisallowJavascriptExecution;
+using DisallowJavascriptExecution =
+ PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
// Scope to introduce an exception to DisallowJavascriptExecution.
-typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>
- AllowJavascriptExecution;
+using AllowJavascriptExecution =
+ PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
// Scope to document where we do not expect javascript execution (debug only)
-typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, false>
- DisallowJavascriptExecutionDebugOnly;
+using DisallowJavascriptExecutionDebugOnly =
+ PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, false>;
// Scope to introduce an exception to DisallowJavascriptExecutionDebugOnly.
-typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, true>
- AllowJavascriptExecutionDebugOnly;
+using AllowJavascriptExecutionDebugOnly =
+ PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, true>;
// Scope in which javascript execution leads to exception being thrown.
-typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
- ThrowOnJavascriptExecution;
+using ThrowOnJavascriptExecution =
+ PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>;
// Scope to introduce an exception to ThrowOnJavascriptExecution.
-typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>
- NoThrowOnJavascriptExecution;
+using NoThrowOnJavascriptExecution =
+ PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>;
// Scope in which javascript execution causes dumps.
-typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, false>
- DumpOnJavascriptExecution;
+using DumpOnJavascriptExecution =
+ PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, false>;
// Scope in which javascript execution causes dumps.
-typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, true>
- NoDumpOnJavascriptExecution;
+using NoDumpOnJavascriptExecution =
+ PerIsolateAssertScope<JAVASCRIPT_EXECUTION_DUMP, true>;
// Scope to document where we do not expect deoptimization.
-typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, false>
- DisallowDeoptimization;
+using DisallowDeoptimization =
+ PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, false>;
// Scope to introduce an exception to DisallowDeoptimization.
-typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, true>
- AllowDeoptimization;
+using AllowDeoptimization =
+ PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, true>;
// Scope to document where we do not expect deoptimization.
-typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, false>
- DisallowCompilation;
+using DisallowCompilation =
+ PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, false>;
// Scope to introduce an exception to DisallowDeoptimization.
-typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true>
- AllowCompilation;
+using AllowCompilation =
+ PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true>;
// Scope to document where we do not expect exceptions.
-typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, false>
- DisallowExceptions;
+using DisallowExceptions =
+ PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, false>;
// Scope to introduce an exception to DisallowExceptions.
-typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, true>
- AllowExceptions;
+using AllowExceptions =
+ PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, true>;
// Explicit instantiation declarations.
extern template class PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>;
@@ -273,4 +269,4 @@ extern template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, true>;
} // namespace internal
} // namespace v8
-#endif // V8_ASSERT_SCOPE_H_
+#endif // V8_COMMON_ASSERT_SCOPE_H_
diff --git a/deps/v8/src/checks.h b/deps/v8/src/common/checks.h
index 4a0afa4f57..ef9eb27ca0 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/common/checks.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CHECKS_H_
-#define V8_CHECKS_H_
+#ifndef V8_COMMON_CHECKS_H_
+#define V8_COMMON_CHECKS_H_
#include "include/v8-internal.h"
#include "src/base/logging.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
@@ -20,7 +20,7 @@ namespace internal {
CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
V8_EXPORT_PRIVATE extern bool FLAG_enable_slow_asserts;
#else
-#define SLOW_DCHECK(condition) ((void) 0)
+#define SLOW_DCHECK(condition) ((void)0)
static const bool FLAG_enable_slow_asserts = false;
#endif
@@ -33,4 +33,4 @@ static const bool FLAG_enable_slow_asserts = false;
#define DCHECK_SIZE_TAG_ALIGNED(size) \
DCHECK((size & ::v8::internal::kHeapObjectTagMask) == 0)
-#endif // V8_CHECKS_H_
+#endif // V8_COMMON_CHECKS_H_
diff --git a/deps/v8/src/globals.h b/deps/v8/src/common/globals.h
index bc0c890606..5d4b957e84 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_GLOBALS_H_
-#define V8_GLOBALS_H_
+#ifndef V8_COMMON_GLOBALS_H_
+#define V8_COMMON_GLOBALS_H_
#include <stddef.h>
#include <stdint.h>
@@ -25,7 +25,7 @@ namespace v8 {
namespace base {
class Mutex;
class RecursiveMutex;
-}
+} // namespace base
namespace internal {
@@ -82,9 +82,19 @@ constexpr int kStackSpaceRequiredForCompilation = 40;
#define V8_DOUBLE_FIELDS_UNBOXING false
#endif
+// Determine whether tagged pointers are 8 bytes (used in Torque layouts for
+// choosing where to insert padding).
+#if V8_TARGET_ARCH_64_BIT && !defined(V8_COMPRESS_POINTERS)
+#define TAGGED_SIZE_8_BYTES true
+#else
+#define TAGGED_SIZE_8_BYTES false
+#endif
+
// Some types of tracing require the SFI to store a unique ID.
#if defined(V8_TRACE_MAPS) || defined(V8_TRACE_IGNITION)
#define V8_SFI_HAS_UNIQUE_ID true
+#else
+#define V8_SFI_HAS_UNIQUE_ID false
#endif
#if defined(V8_OS_WIN) && defined(V8_TARGET_ARCH_X64)
@@ -100,7 +110,7 @@ class AllStatic {
#endif
};
-typedef uint8_t byte;
+using byte = uint8_t;
// -----------------------------------------------------------------------------
// Constants
@@ -160,7 +170,6 @@ constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
constexpr int kSystemPointerSizeLog2 = 3;
constexpr intptr_t kIntptrSignBit =
static_cast<intptr_t>(uintptr_t{0x8000000000000000});
-constexpr uintptr_t kUintptrAllBitsSet = uintptr_t{0xFFFFFFFFFFFFFFFF};
constexpr bool kRequiresCodeRange = true;
#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr size_t kMaximalCodeRangeSize = 512 * MB;
@@ -182,7 +191,6 @@ constexpr size_t kReservedCodeRangePages = 0;
#else
constexpr int kSystemPointerSizeLog2 = 2;
constexpr intptr_t kIntptrSignBit = 0x80000000;
-constexpr uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
#if V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC && V8_OS_LINUX
constexpr bool kRequiresCodeRange = false;
constexpr size_t kMaximalCodeRangeSize = 0 * MB;
@@ -204,6 +212,15 @@ constexpr size_t kReservedCodeRangePages = 0;
STATIC_ASSERT(kSystemPointerSize == (1 << kSystemPointerSizeLog2));
+// This macro is used for declaring and defining HeapObject getter methods that
+// are a bit more efficient for the pointer compression case than the default
+// parameterless getters because isolate root doesn't have to be computed from
+// arbitrary field address but it comes "for free" instead.
+// These alternatives are always defined (in order to avoid #ifdef mess but
+// are not supposed to be used when pointer compression is not enabled.
+#define ROOT_VALUE isolate_for_root
+#define ROOT_PARAM Isolate* const ROOT_VALUE
+
#ifdef V8_COMPRESS_POINTERS
static_assert(
kSystemPointerSize == kInt64Size,
@@ -217,6 +234,11 @@ constexpr int kTaggedSizeLog2 = 2;
using Tagged_t = int32_t;
using AtomicTagged_t = base::Atomic32;
+#define DEFINE_ROOT_VALUE(isolate) ROOT_PARAM = isolate
+#define WITH_ROOT_PARAM(...) ROOT_PARAM, ##__VA_ARGS__
+#define WITH_ROOT_VALUE(...) ROOT_VALUE, ##__VA_ARGS__
+#define WITH_ROOT(isolate_for_root, ...) isolate_for_root, ##__VA_ARGS__
+
#else
constexpr int kTaggedSize = kSystemPointerSize;
@@ -227,6 +249,11 @@ constexpr int kTaggedSizeLog2 = kSystemPointerSizeLog2;
using Tagged_t = Address;
using AtomicTagged_t = base::AtomicWord;
+#define DEFINE_ROOT_VALUE(isolate)
+#define WITH_ROOT_PARAM(...) __VA_ARGS__
+#define WITH_ROOT_VALUE(...) __VA_ARGS__
+#define WITH_ROOT(isolate_for_root, ...) __VA_ARGS__
+
#endif // V8_COMPRESS_POINTERS
// Defines whether the branchless or branchful implementation of pointer
@@ -234,6 +261,7 @@ using AtomicTagged_t = base::AtomicWord;
constexpr bool kUseBranchlessPtrDecompression = true;
STATIC_ASSERT(kTaggedSize == (1 << kTaggedSizeLog2));
+STATIC_ASSERT((kTaggedSize == 8) == TAGGED_SIZE_8_BYTES);
using AsAtomicTagged = base::AsAtomicPointerImpl<AtomicTagged_t>;
STATIC_ASSERT(sizeof(Tagged_t) == kTaggedSize);
@@ -288,8 +316,8 @@ constexpr uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
// Latin1/UTF-16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
// Code units in UTF-16 are 16 bits wide.
-typedef uint16_t uc16;
-typedef int32_t uc32;
+using uc16 = uint16_t;
+using uc32 = int32_t;
constexpr int kOneByteSize = kCharSize;
constexpr int kUC16Size = sizeof(uc16); // NOLINT
@@ -311,13 +339,12 @@ F FUNCTION_CAST(Address addr) {
return reinterpret_cast<F>(addr);
}
-
// Determine whether the architecture uses function descriptors
// which provide a level of indirection between the function pointer
// and the function entrypoint.
-#if V8_HOST_ARCH_PPC && \
+#if V8_HOST_ARCH_PPC && \
(V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && \
- (!defined(_CALL_ELF) || _CALL_ELF == 1)))
+ (!defined(_CALL_ELF) || _CALL_ELF == 1)))
#define USES_FUNCTION_DESCRIPTORS 1
#define FUNCTION_ENTRYPOINT_ADDRESS(f) \
(reinterpret_cast<v8::internal::Address*>( \
@@ -326,7 +353,6 @@ F FUNCTION_CAST(Address addr) {
#define USES_FUNCTION_DESCRIPTORS 0
#endif
-
// -----------------------------------------------------------------------------
// Declarations for use in both the preparser and the rest of V8.
@@ -546,6 +572,11 @@ static const intptr_t kPageAlignmentMask = (intptr_t{1} << kPageSizeBits) - 1;
// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
constexpr uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
+enum class HeapObjectReferenceType {
+ WEAK,
+ STRONG,
+};
+
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
@@ -569,7 +600,8 @@ class Foreign;
class FreeStoreAllocationPolicy;
class FunctionTemplateInfo;
class GlobalDictionary;
-template <typename T> class Handle;
+template <typename T>
+class Handle;
class Heap;
class HeapObject;
class HeapObjectReference;
@@ -598,6 +630,10 @@ class NewSpace;
class NewLargeObjectSpace;
class NumberDictionary;
class Object;
+template <HeapObjectReferenceType kRefType, typename StorageType>
+class TaggedImpl;
+class StrongTaggedValue;
+class TaggedValue;
class CompressedObjectSlot;
class CompressedMaybeObjectSlot;
class CompressedMapWordSlot;
@@ -617,6 +653,7 @@ class Smi;
template <typename Config, class Allocator = FreeStoreAllocationPolicy>
class SplayTree;
class String;
+class StringStream;
class Struct;
class Symbol;
class Variable;
@@ -669,9 +706,9 @@ using MaybeObjectSlot = SlotTraits<SlotLocation::kOnHeap>::TMaybeObjectSlot;
// HeapObjectReference).
using HeapObjectSlot = SlotTraits<SlotLocation::kOnHeap>::THeapObjectSlot;
-typedef bool (*WeakSlotCallback)(FullObjectSlot pointer);
+using WeakSlotCallback = bool (*)(FullObjectSlot pointer);
-typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, FullObjectSlot pointer);
+using WeakSlotCallbackWithHeap = bool (*)(Heap* heap, FullObjectSlot pointer);
// -----------------------------------------------------------------------------
// Miscellaneous
@@ -732,34 +769,7 @@ enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
enum class AccessMode { ATOMIC, NON_ATOMIC };
-// Supported write barrier modes.
-enum WriteBarrierKind : uint8_t {
- kNoWriteBarrier,
- kMapWriteBarrier,
- kPointerWriteBarrier,
- kEphemeronKeyWriteBarrier,
- kFullWriteBarrier
-};
-
-inline size_t hash_value(WriteBarrierKind kind) {
- return static_cast<uint8_t>(kind);
-}
-
-inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
- switch (kind) {
- case kNoWriteBarrier:
- return os << "NoWriteBarrier";
- case kMapWriteBarrier:
- return os << "MapWriteBarrier";
- case kPointerWriteBarrier:
- return os << "PointerWriteBarrier";
- case kEphemeronKeyWriteBarrier:
- return os << "EphemeronKeyWriteBarrier";
- case kFullWriteBarrier:
- return os << "FullWriteBarrier";
- }
- UNREACHABLE();
-}
+enum class AllowLargeObjects { kFalse, kTrue };
enum MinimumCapacity {
USE_DEFAULT_MINIMUM_CAPACITY,
@@ -770,8 +780,6 @@ enum GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-enum Movability { kMovable, kImmovable };
-
enum VisitMode {
VISIT_ALL,
VISIT_ALL_IN_MINOR_MC_MARK,
@@ -789,11 +797,7 @@ enum class BytecodeFlushMode {
};
// Flag indicating whether code is built into the VM (one of the natives files).
-enum NativesFlag {
- NOT_NATIVES_CODE,
- EXTENSION_CODE,
- INSPECTOR_CODE
-};
+enum NativesFlag { NOT_NATIVES_CODE, EXTENSION_CODE, INSPECTOR_CODE };
// ParseRestriction is used to restrict the set of valid statements in a
// unit of compilation. Restriction violations cause a syntax error.
@@ -855,16 +859,14 @@ enum ShouldThrow {
};
// The Store Buffer (GC).
-typedef enum {
+enum StoreBufferEvent {
kStoreBufferFullEvent,
kStoreBufferStartScanningPagesEvent,
kStoreBufferScanningPageEvent
-} StoreBufferEvent;
-
+};
-typedef void (*StoreBufferCallback)(Heap* heap,
- MemoryChunk* page,
- StoreBufferEvent event);
+using StoreBufferCallback = void (*)(Heap* heap, MemoryChunk* page,
+ StoreBufferEvent event);
// Union used for customized checking of the IEEE double types
// inlined within v8 runtime, rather than going to the underlying
@@ -872,30 +874,29 @@ typedef void (*StoreBufferCallback)(Heap* heap,
union IeeeDoubleLittleEndianArchType {
double d;
struct {
- unsigned int man_low :32;
- unsigned int man_high :20;
- unsigned int exp :11;
- unsigned int sign :1;
+ unsigned int man_low : 32;
+ unsigned int man_high : 20;
+ unsigned int exp : 11;
+ unsigned int sign : 1;
} bits;
};
-
union IeeeDoubleBigEndianArchType {
double d;
struct {
- unsigned int sign :1;
- unsigned int exp :11;
- unsigned int man_high :20;
- unsigned int man_low :32;
+ unsigned int sign : 1;
+ unsigned int exp : 11;
+ unsigned int man_high : 20;
+ unsigned int man_low : 32;
} bits;
};
#if V8_TARGET_LITTLE_ENDIAN
-typedef IeeeDoubleLittleEndianArchType IeeeDoubleArchType;
+using IeeeDoubleArchType = IeeeDoubleLittleEndianArchType;
constexpr int kIeeeDoubleMantissaWordOffset = 0;
constexpr int kIeeeDoubleExponentWordOffset = 4;
#else
-typedef IeeeDoubleBigEndianArchType IeeeDoubleArchType;
+using IeeeDoubleArchType = IeeeDoubleBigEndianArchType;
constexpr int kIeeeDoubleMantissaWordOffset = 4;
constexpr int kIeeeDoubleExponentWordOffset = 0;
#endif
@@ -908,10 +909,14 @@ constexpr int kIeeeDoubleExponentWordOffset = 0;
#define HAS_SMI_TAG(value) \
((static_cast<intptr_t>(value) & ::i::kSmiTagMask) == ::i::kSmiTag)
-#define HAS_HEAP_OBJECT_TAG(value) \
+#define HAS_STRONG_HEAP_OBJECT_TAG(value) \
(((static_cast<intptr_t>(value) & ::i::kHeapObjectTagMask) == \
::i::kHeapObjectTag))
+#define HAS_WEAK_HEAP_OBJECT_TAG(value) \
+ (((static_cast<intptr_t>(value) & ::i::kHeapObjectTagMask) == \
+ ::i::kWeakHeapObjectTag))
+
// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
#define OBJECT_POINTER_ALIGN(value) \
(((value) + ::i::kObjectAlignmentMask) & ~::i::kObjectAlignmentMask)
@@ -1190,7 +1195,6 @@ enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
-
enum class InterpreterPushArgsMode : unsigned {
kArrayFunction,
kWithFinalSpread,
@@ -1380,7 +1384,7 @@ enum class DataPropertyInLiteralFlag {
kDontEnum = 1 << 0,
kSetFunctionName = 1 << 1
};
-typedef base::Flags<DataPropertyInLiteralFlag> DataPropertyInLiteralFlags;
+using DataPropertyInLiteralFlags = base::Flags<DataPropertyInLiteralFlag>;
DEFINE_OPERATORS_FOR_FLAGS(DataPropertyInLiteralFlags)
enum ExternalArrayType {
@@ -1476,17 +1480,6 @@ enum IsolateAddressId {
kIsolateAddressCount
};
-V8_INLINE static bool HasWeakHeapObjectTag(Address value) {
- // TODO(jkummerow): Consolidate integer types here.
- return ((static_cast<intptr_t>(value) & kHeapObjectTagMask) ==
- kWeakHeapObjectTag);
-}
-
-enum class HeapObjectReferenceType {
- WEAK,
- STRONG,
-};
-
enum class PoisoningMitigationLevel {
kPoisonAll,
kDontPoison,
@@ -1523,55 +1516,20 @@ enum KeyedAccessLoadMode {
enum KeyedAccessStoreMode {
STANDARD_STORE,
- STORE_TRANSITION_TO_OBJECT,
- STORE_TRANSITION_TO_DOUBLE,
- STORE_AND_GROW_NO_TRANSITION_HANDLE_COW,
- STORE_AND_GROW_TRANSITION_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_TO_DOUBLE,
- STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
- STORE_NO_TRANSITION_HANDLE_COW
+ STORE_AND_GROW_HANDLE_COW,
+ STORE_IGNORE_OUT_OF_BOUNDS,
+ STORE_HANDLE_COW
};
enum MutableMode { MUTABLE, IMMUTABLE };
-static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) {
- return store_mode == STORE_TRANSITION_TO_OBJECT ||
- store_mode == STORE_TRANSITION_TO_DOUBLE ||
- store_mode == STORE_AND_GROW_TRANSITION_TO_OBJECT ||
- store_mode == STORE_AND_GROW_TRANSITION_TO_DOUBLE;
-}
-
static inline bool IsCOWHandlingStoreMode(KeyedAccessStoreMode store_mode) {
- return store_mode == STORE_NO_TRANSITION_HANDLE_COW ||
- store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
-}
-
-static inline KeyedAccessStoreMode GetNonTransitioningStoreMode(
- KeyedAccessStoreMode store_mode, bool receiver_was_cow) {
- switch (store_mode) {
- case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
- case STORE_AND_GROW_TRANSITION_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_TO_DOUBLE:
- store_mode = STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
- break;
- case STANDARD_STORE:
- case STORE_TRANSITION_TO_OBJECT:
- case STORE_TRANSITION_TO_DOUBLE:
- store_mode =
- receiver_was_cow ? STORE_NO_TRANSITION_HANDLE_COW : STANDARD_STORE;
- break;
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
- case STORE_NO_TRANSITION_HANDLE_COW:
- break;
- }
- DCHECK(!IsTransitionStoreMode(store_mode));
- DCHECK_IMPLIES(receiver_was_cow, IsCOWHandlingStoreMode(store_mode));
- return store_mode;
+ return store_mode == STORE_HANDLE_COW ||
+ store_mode == STORE_AND_GROW_HANDLE_COW;
}
static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
- return store_mode >= STORE_AND_GROW_NO_TRANSITION_HANDLE_COW &&
- store_mode <= STORE_AND_GROW_TRANSITION_TO_DOUBLE;
+ return store_mode == STORE_AND_GROW_HANDLE_COW;
}
enum IcCheckType { ELEMENT, PROPERTY };
@@ -1601,4 +1559,4 @@ constexpr int kSmallOrderedHashMapMinCapacity = 4;
namespace i = v8::internal;
-#endif // V8_GLOBALS_H_
+#endif // V8_COMMON_GLOBALS_H_
diff --git a/deps/v8/src/ptr-compr-inl.h b/deps/v8/src/common/ptr-compr-inl.h
index 98f94b351a..fd0f97e904 100644
--- a/deps/v8/src/ptr-compr-inl.h
+++ b/deps/v8/src/common/ptr-compr-inl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PTR_COMPR_INL_H_
-#define V8_PTR_COMPR_INL_H_
-
+#ifndef V8_COMMON_PTR_COMPR_INL_H_
+#define V8_COMMON_PTR_COMPR_INL_H_
#include "include/v8-internal.h"
-#include "src/ptr-compr.h"
+#include "src/common/ptr-compr.h"
+#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
@@ -19,34 +19,49 @@ V8_INLINE Tagged_t CompressTagged(Address tagged) {
return static_cast<Tagged_t>(static_cast<uint32_t>(tagged));
}
-enum class OnHeapAddressKind {
- kAnyOnHeapAddress,
- kIsolateRoot,
-};
-
// Calculates isolate root value from any on-heap address.
-template <OnHeapAddressKind kAddressKind = OnHeapAddressKind::kAnyOnHeapAddress>
-V8_INLINE Address GetRootFromOnHeapAddress(Address on_heap_addr) {
- if (kAddressKind == OnHeapAddressKind::kIsolateRoot) return on_heap_addr;
- return RoundDown(on_heap_addr + kPtrComprIsolateRootBias,
- kPtrComprIsolateRootAlignment);
+template <typename TOnHeapAddress>
+V8_INLINE Address GetIsolateRoot(TOnHeapAddress on_heap_addr);
+
+template <>
+V8_INLINE Address GetIsolateRoot<Address>(Address on_heap_addr) {
+ return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr +
+ kPtrComprIsolateRootBias);
+}
+
+template <>
+V8_INLINE Address GetIsolateRoot<Isolate*>(Isolate* isolate) {
+ return isolate->isolate_root();
+}
+
+template <>
+V8_INLINE Address GetIsolateRoot<const Isolate*>(const Isolate* isolate) {
+ return isolate->isolate_root();
+}
+
+// Decompresses smi value.
+V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) {
+ // Current compression scheme requires |raw_value| to be sign-extended
+ // from int32_t to intptr_t.
+ intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
+ return static_cast<Address>(value);
}
// Decompresses weak or strong heap object pointer or forwarding pointer,
// preserving both weak- and smi- tags.
-template <OnHeapAddressKind kAddressKind = OnHeapAddressKind::kAnyOnHeapAddress>
-V8_INLINE Address DecompressTaggedPointer(Address on_heap_addr,
+template <typename TOnHeapAddress>
+V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
// Current compression scheme requires |raw_value| to be sign-extended
// from int32_t to intptr_t.
intptr_t value = static_cast<intptr_t>(static_cast<int32_t>(raw_value));
- Address root = GetRootFromOnHeapAddress<kAddressKind>(on_heap_addr);
+ Address root = GetIsolateRoot(on_heap_addr);
return root + static_cast<Address>(value);
}
// Decompresses any tagged value, preserving both weak- and smi- tags.
-template <OnHeapAddressKind kAddressKind = OnHeapAddressKind::kAnyOnHeapAddress>
-V8_INLINE Address DecompressTaggedAny(Address on_heap_addr,
+template <typename TOnHeapAddress>
+V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
Tagged_t raw_value) {
// Current compression scheme requires |raw_value| to be sign-extended
// from int32_t to intptr_t.
@@ -54,14 +69,12 @@ V8_INLINE Address DecompressTaggedAny(Address on_heap_addr,
if (kUseBranchlessPtrDecompression) {
// |root_mask| is 0 if the |value| was a smi or -1 otherwise.
Address root_mask = static_cast<Address>(-(value & kSmiTagMask));
- Address root_or_zero =
- root_mask & GetRootFromOnHeapAddress<kAddressKind>(on_heap_addr);
+ Address root_or_zero = root_mask & GetIsolateRoot(on_heap_addr);
return root_or_zero + static_cast<Address>(value);
} else {
return HAS_SMI_TAG(value)
? static_cast<Address>(value)
- : (GetRootFromOnHeapAddress<kAddressKind>(on_heap_addr) +
- static_cast<Address>(value));
+ : (GetIsolateRoot(on_heap_addr) + static_cast<Address>(value));
}
}
@@ -79,8 +92,22 @@ STATIC_ASSERT(kPtrComprIsolateRootAlignment ==
V8_INLINE Tagged_t CompressTagged(Address tagged) { UNREACHABLE(); }
+V8_INLINE Address DecompressTaggedSigned(Tagged_t raw_value) { UNREACHABLE(); }
+
+template <typename TOnHeapAddress>
+V8_INLINE Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
+ Tagged_t raw_value) {
+ UNREACHABLE();
+}
+
+template <typename TOnHeapAddress>
+V8_INLINE Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
+ Tagged_t raw_value) {
+ UNREACHABLE();
+}
+
#endif // V8_TARGET_ARCH_64_BIT
} // namespace internal
} // namespace v8
-#endif // V8_PTR_COMPR_INL_H_
+#endif // V8_COMMON_PTR_COMPR_INL_H_
diff --git a/deps/v8/src/ptr-compr.h b/deps/v8/src/common/ptr-compr.h
index a871e99a04..5b4a74e7e3 100644
--- a/deps/v8/src/ptr-compr.h
+++ b/deps/v8/src/common/ptr-compr.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PTR_COMPR_H_
-#define V8_PTR_COMPR_H_
+#ifndef V8_COMMON_PTR_COMPR_H_
+#define V8_COMMON_PTR_COMPR_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#if V8_TARGET_ARCH_64_BIT
@@ -22,4 +22,4 @@ constexpr size_t kPtrComprIsolateRootAlignment = size_t{4} * GB;
#endif // V8_TARGET_ARCH_64_BIT
-#endif // V8_PTR_COMPR_H_
+#endif // V8_COMMON_PTR_COMPR_H_
diff --git a/deps/v8/src/v8memory.h b/deps/v8/src/common/v8memory.h
index e927962296..02ba2de848 100644
--- a/deps/v8/src/v8memory.h
+++ b/deps/v8/src/common/v8memory.h
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_V8MEMORY_H_
-#define V8_V8MEMORY_H_
+#ifndef V8_COMMON_V8MEMORY_H_
+#define V8_COMMON_V8MEMORY_H_
-#include "src/globals.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -15,13 +16,12 @@ namespace internal {
// Note that this class currently relies on undefined behaviour. There is a
// proposal (http://wg21.link/p0593r2) to make it defined behaviour though.
template <class T>
-T& Memory(Address addr) {
- // {addr} must be aligned.
- DCHECK_EQ(0, addr & (alignof(T) - 1));
+inline T& Memory(Address addr) {
+ DCHECK(IsAligned(addr, alignof(T)));
return *reinterpret_cast<T*>(addr);
}
template <class T>
-T& Memory(byte* addr) {
+inline T& Memory(byte* addr) {
return Memory<T>(reinterpret_cast<Address>(addr));
}
@@ -39,18 +39,6 @@ static inline void WriteUnalignedValue(Address p, V value) {
memcpy(reinterpret_cast<void*>(p), &value, sizeof(V));
}
-static inline double ReadFloatValue(Address p) {
- return ReadUnalignedValue<float>(p);
-}
-
-static inline double ReadDoubleValue(Address p) {
- return ReadUnalignedValue<double>(p);
-}
-
-static inline void WriteDoubleValue(Address p, double value) {
- WriteUnalignedValue(p, value);
-}
-
static inline uint16_t ReadUnalignedUInt16(Address p) {
return ReadUnalignedValue<uint16_t>(p);
}
@@ -108,4 +96,4 @@ static inline void WriteLittleEndianValue(V* p, V value) {
} // namespace internal
} // namespace v8
-#endif // V8_V8MEMORY_H_
+#endif // V8_COMMON_V8MEMORY_H_
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 08c815787f..e1d47d30a6 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -7,14 +7,14 @@
#include "src/ast/ast.h"
#include "src/base/platform/time.h"
#include "src/base/template-utils.h"
-#include "src/cancelable-task.h"
-#include "src/compiler.h"
-#include "src/flags.h"
-#include "src/global-handles.h"
-#include "src/objects-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/flags/flags.h"
+#include "src/handles/global-handles.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
-#include "src/task-utils.h"
+#include "src/tasks/cancelable-task.h"
+#include "src/tasks/task-utils.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
namespace v8 {
@@ -104,7 +104,7 @@ void CompilerDispatcher::RegisterSharedFunctionInfo(
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: registering ");
- function->ShortPrint();
+ function.ShortPrint();
PrintF(" with job id %zu\n", job_id);
}
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
index 0bfbe9d719..544e9c8ba7 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -17,9 +17,9 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
-#include "src/globals.h"
-#include "src/identity-map.h"
-#include "src/maybe-handles.h"
+#include "src/common/globals.h"
+#include "src/handles/maybe-handles.h"
+#include "src/utils/identity-map.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
@@ -75,7 +75,7 @@ class Handle;
// thread.
class V8_EXPORT_PRIVATE CompilerDispatcher {
public:
- typedef uintptr_t JobId;
+ using JobId = uintptr_t;
CompilerDispatcher(Isolate* isolate, Platform* platform,
size_t max_stack_size);
@@ -136,8 +136,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
bool aborted;
};
- typedef std::map<JobId, std::unique_ptr<Job>> JobMap;
- typedef IdentityMap<JobId, FreeStoreAllocationPolicy> SharedToJobIdMap;
+ using JobMap = std::map<JobId, std::unique_ptr<Job>>;
+ using SharedToJobIdMap = IdentityMap<JobId, FreeStoreAllocationPolicy>;
void WaitForJobIfRunningOnBackground(Job* job);
JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index b84949acf7..fbaeaa73f8 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -6,15 +6,15 @@
#include "src/base/atomicops.h"
#include "src/base/template-utils.h"
-#include "src/cancelable-task.h"
-#include "src/compiler.h"
-#include "src/counters.h"
-#include "src/isolate.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
-#include "src/optimized-compilation-info.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/execution/isolate.h"
+#include "src/init/v8.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
+#include "src/tasks/cancelable-task.h"
#include "src/tracing/trace-event.h"
-#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -25,7 +25,7 @@ void DisposeCompilationJob(OptimizedCompilationJob* job,
bool restore_function_code) {
if (restore_function_code) {
Handle<JSFunction> function = job->compilation_info()->closure();
- function->set_code(function->shared()->GetCode());
+ function->set_code(function->shared().GetCode());
if (function->IsInOptimizationQueue()) {
function->ClearOptimizationMarker();
}
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 1011808e99..c1b92ff5c9 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -8,12 +8,12 @@
#include <atomic>
#include <queue>
-#include "src/allocation.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
-#include "src/flags.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index b4d5ad195b..726a81a465 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -5,17 +5,17 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/type-cache.h"
-#include "src/contexts.h"
-#include "src/frames.h"
-#include "src/handles-inl.h"
+#include "src/execution/frames.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap.h"
-#include "src/objects-inl.h"
#include "src/objects/arguments.h"
#include "src/objects/cell.h"
+#include "src/objects/contexts.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-generator.h"
#include "src/objects/module.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
namespace v8 {
@@ -23,9 +23,10 @@ namespace internal {
namespace compiler {
// static
-FieldAccess AccessBuilder::ForExternalIntPtr() {
- FieldAccess access = {kUntaggedBase, 0, MaybeHandle<Name>(),
- MaybeHandle<Map>(), Type::Any(), MachineType::IntPtr(),
+FieldAccess AccessBuilder::ForExternalTaggedValue() {
+ FieldAccess access = {kUntaggedBase, 0,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
return access;
}
@@ -44,22 +45,11 @@ FieldAccess AccessBuilder::ForExternalUint8Value() {
// static
FieldAccess AccessBuilder::ForMap() {
- FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kMapWriteBarrier};
- return access;
-}
-
-// static
-FieldAccess AccessBuilder::ForCompressedMap() {
- FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- // We use MachineType::Uint32() for the compressed
- // pointer load, because we want to examine it
- // as a compressed pointer in map checks.
- Type::OtherInternal(), MachineType::Uint32(),
- kMapWriteBarrier, LoadSensitivity::kUnsafe};
+ FieldAccess access = {
+ kTaggedBase, HeapObject::kMapOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
+ kMapWriteBarrier};
return access;
}
@@ -83,20 +73,21 @@ FieldAccess AccessBuilder::ForBigIntBitfield() {
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {
+ kTaggedBase, JSObject::kPropertiesOrHashOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TypeCompressedTagged(),
+ kPointerWriteBarrier, LoadSensitivity::kCritical};
return access;
}
-
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {
+ kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier, LoadSensitivity::kCritical};
return access;
}
@@ -105,29 +96,32 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
FieldAccess AccessBuilder::ForJSObjectInObjectProperty(const MapRef& map,
int index) {
int const offset = map.GetInObjectPropertyOffset(index);
- FieldAccess access = {kTaggedBase, offset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, offset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectOffset(
int offset, WriteBarrierKind write_barrier_kind) {
- FieldAccess access = {kTaggedBase, offset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- write_barrier_kind};
+ FieldAccess access = {
+ kTaggedBase, offset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ write_barrier_kind};
return access;
}
// static
FieldAccess AccessBuilder::ForJSCollectionTable() {
- FieldAccess access = {kTaggedBase, JSCollection::kTableOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSCollection::kTableOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -136,7 +130,7 @@ FieldAccess AccessBuilder::ForJSCollectionIteratorTable() {
FieldAccess access = {
kTaggedBase, JSCollectionIterator::kTableOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -148,7 +142,7 @@ FieldAccess AccessBuilder::ForJSCollectionIteratorIndex() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kFixedArrayLengthType,
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -158,17 +152,18 @@ FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
FieldAccess access = {
kTaggedBase, JSFunction::kPrototypeOrInitialMapOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+ Type::Any(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionContext() {
- FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::AnyTagged(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kContextOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TypeCompressedTagged(),
+ kPointerWriteBarrier};
return access;
}
@@ -178,26 +173,28 @@ FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
FieldAccess access = {
kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionFeedbackCell() {
- FieldAccess access = {kTaggedBase, JSFunction::kFeedbackCellOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kFeedbackCellOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionCode() {
- FieldAccess access = {kTaggedBase, JSFunction::kCodeOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kCodeOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -206,17 +203,18 @@ FieldAccess AccessBuilder::ForJSBoundFunctionBoundTargetFunction() {
FieldAccess access = {
kTaggedBase, JSBoundFunction::kBoundTargetFunctionOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Callable(), MachineType::TaggedPointer(),
+ Type::Callable(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSBoundFunctionBoundThis() {
- FieldAccess access = {kTaggedBase, JSBoundFunction::kBoundThisOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSBoundFunction::kBoundThisOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -225,35 +223,38 @@ FieldAccess AccessBuilder::ForJSBoundFunctionBoundArguments() {
FieldAccess access = {
kTaggedBase, JSBoundFunction::kBoundArgumentsOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
+ Type::Internal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
- FieldAccess access = {kTaggedBase, JSGeneratorObject::kContextOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kContextOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectFunction() {
- FieldAccess access = {kTaggedBase, JSGeneratorObject::kFunctionOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Function(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kFunctionOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Function(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectReceiver() {
- FieldAccess access = {kTaggedBase, JSGeneratorObject::kReceiverOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kReceiverOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -262,7 +263,7 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
FieldAccess access = {
kTaggedBase, JSGeneratorObject::kContinuationOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::SignedSmall(), MachineType::TaggedSigned(),
+ Type::SignedSmall(), MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -272,7 +273,7 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
FieldAccess access = {
kTaggedBase, JSGeneratorObject::kInputOrDebugPosOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
return access;
}
@@ -283,7 +284,7 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectParametersAndRegisters() {
FieldAccess access = {
kTaggedBase, JSGeneratorObject::kParametersAndRegistersOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::AnyTagged(),
+ Type::Internal(), MachineType::TypeCompressedTagged(),
kPointerWriteBarrier};
return access;
}
@@ -293,7 +294,7 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
FieldAccess access = {
kTaggedBase, JSGeneratorObject::kResumeModeOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::SignedSmall(), MachineType::TaggedSigned(),
+ Type::SignedSmall(), MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -303,7 +304,7 @@ FieldAccess AccessBuilder::ForJSAsyncFunctionObjectPromise() {
FieldAccess access = {
kTaggedBase, JSAsyncFunctionObject::kPromiseOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::OtherObject(), MachineType::TaggedPointer(),
+ Type::OtherObject(), MachineType::TypeCompressedTaggedPointer(),
kFullWriteBarrier};
return access;
}
@@ -313,7 +314,7 @@ FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectQueue() {
FieldAccess access = {
kTaggedBase, JSAsyncGeneratorObject::kQueueOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
return access;
}
@@ -323,7 +324,7 @@ FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectIsAwaiting() {
FieldAccess access = {
kTaggedBase, JSAsyncGeneratorObject::kIsAwaitingOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::SignedSmall(), MachineType::TaggedSigned(),
+ Type::SignedSmall(), MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -336,7 +337,7 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
Handle<Name>(),
MaybeHandle<Map>(),
type_cache->kJSArrayLengthType,
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kFullWriteBarrier};
if (IsDoubleElementsKind(elements_kind)) {
access.type = type_cache->kFixedDoubleArrayLengthType;
@@ -370,10 +371,11 @@ FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
// static
FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
- FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArrayBufferView::kBufferOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -408,7 +410,39 @@ FieldAccess AccessBuilder::ForJSTypedArrayLength() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kJSTypedArrayLengthType,
- MachineType::TaggedSigned(),
+ MachineType::UintPtr(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSTypedArrayBasePointer() {
+ FieldAccess access = {
+ kTaggedBase, JSTypedArray::kBasePointerOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTagged(),
+ kPointerWriteBarrier, LoadSensitivity::kCritical};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
+ FieldAccess access = {kTaggedBase,
+ JSTypedArray::kExternalPointerOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ LoadSensitivity::kCritical};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
+ FieldAccess access = {kTaggedBase, JSDataView::kDataPointerOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::Pointer(),
kNoWriteBarrier};
return access;
}
@@ -420,7 +454,7 @@ FieldAccess AccessBuilder::ForJSDateValue() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kJSDateValueType,
- MachineType::AnyTagged(),
+ MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
return access;
}
@@ -430,7 +464,7 @@ FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
FieldAccess access = {
kTaggedBase, JSDate::kValueOffset + index * kTaggedSize,
MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Number(), MachineType::AnyTagged(),
+ Type::Number(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
return access;
}
@@ -438,56 +472,62 @@ FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
// static
FieldAccess AccessBuilder::ForJSIteratorResultDone() {
- FieldAccess access = {kTaggedBase, JSIteratorResult::kDoneOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSIteratorResult::kDoneOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSIteratorResultValue() {
- FieldAccess access = {kTaggedBase, JSIteratorResult::kValueOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSIteratorResult::kValueOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpData() {
- FieldAccess access = {kTaggedBase, JSRegExp::kDataOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSRegExp::kDataOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpFlags() {
- FieldAccess access = {kTaggedBase, JSRegExp::kFlagsOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSRegExp::kFlagsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpLastIndex() {
- FieldAccess access = {kTaggedBase, JSRegExp::kLastIndexOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSRegExp::kLastIndexOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpSource() {
- FieldAccess access = {kTaggedBase, JSRegExp::kSourceOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSRegExp::kSourceOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -499,7 +539,7 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kFixedArrayLengthType,
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -509,40 +549,17 @@ FieldAccess AccessBuilder::ForPropertyArrayLengthAndHash() {
FieldAccess access = {
kTaggedBase, PropertyArray::kLengthAndHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::SignedSmall(), MachineType::TaggedSigned(),
+ Type::SignedSmall(), MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
// static
-FieldAccess AccessBuilder::ForFixedTypedArrayBaseBasePointer() {
- FieldAccess access = {
- kTaggedBase, FixedTypedArrayBase::kBasePointerOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::AnyTagged(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
- return access;
-}
-
-// static
-FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
- FieldAccess access = {kTaggedBase,
- FixedTypedArrayBase::kExternalPointerOffset,
- MaybeHandle<Name>(),
- MaybeHandle<Map>(),
- Type::ExternalPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier,
- LoadSensitivity::kCritical};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
FieldAccess access = {
kTaggedBase, DescriptorArray::kEnumCacheOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -577,10 +594,11 @@ FieldAccess AccessBuilder::ForMapBitField3() {
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
- FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Map::kInstanceDescriptorsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -597,28 +615,31 @@ FieldAccess AccessBuilder::ForMapInstanceType() {
// static
FieldAccess AccessBuilder::ForMapPrototype() {
- FieldAccess access = {kTaggedBase, Map::kPrototypeOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Map::kPrototypeOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForModuleRegularExports() {
- FieldAccess access = {kTaggedBase, Module::kRegularExportsOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Module::kRegularExportsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForModuleRegularImports() {
- FieldAccess access = {kTaggedBase, Module::kRegularImportsOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Module::kRegularImportsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -645,46 +666,51 @@ FieldAccess AccessBuilder::ForStringLength() {
// static
FieldAccess AccessBuilder::ForConsStringFirst() {
- FieldAccess access = {kTaggedBase, ConsString::kFirstOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::String(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, ConsString::kFirstOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForConsStringSecond() {
- FieldAccess access = {kTaggedBase, ConsString::kSecondOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::String(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, ConsString::kSecondOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForThinStringActual() {
- FieldAccess access = {kTaggedBase, ThinString::kActualOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::String(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, ThinString::kActualOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForSlicedStringOffset() {
- FieldAccess access = {kTaggedBase, SlicedString::kOffsetOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::SignedSmall(), MachineType::TaggedSigned(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, SlicedString::kOffsetOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::SignedSmall(), MachineType::TypeCompressedTaggedSigned(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForSlicedStringParent() {
- FieldAccess access = {kTaggedBase, SlicedString::kParentOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::String(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, SlicedString::kParentOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -732,10 +758,11 @@ ElementAccess AccessBuilder::ForSeqTwoByteStringCharacter() {
// static
FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
- FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Receiver(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Receiver(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -744,7 +771,7 @@ FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
FieldAccess access = {
kTaggedBase, JSGlobalObject::kNativeContextOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
+ Type::Internal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -754,7 +781,7 @@ FieldAccess AccessBuilder::ForJSGlobalProxyNativeContext() {
FieldAccess access = {
kTaggedBase, JSGlobalProxy::kNativeContextOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
+ Type::Internal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -764,7 +791,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorIteratedObject() {
FieldAccess access = {
kTaggedBase, JSArrayIterator::kIteratedObjectOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Receiver(), MachineType::TaggedPointer(),
+ Type::Receiver(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -778,7 +805,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorNextIndex() {
Handle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kPositiveSafeInteger,
- MachineType::AnyTagged(),
+ MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
return access;
}
@@ -790,17 +817,18 @@ FieldAccess AccessBuilder::ForJSArrayIteratorKind() {
Handle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kJSArrayIteratorKindType,
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSStringIteratorString() {
- FieldAccess access = {kTaggedBase, JSStringIterator::kStringOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::String(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSStringIterator::kStringOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -811,17 +839,18 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
Handle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kStringLengthType,
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForValue() {
- FieldAccess access = {kTaggedBase, JSValue::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSValue::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -831,7 +860,7 @@ FieldAccess AccessBuilder::ForArgumentsLength() {
FieldAccess access = {
kTaggedBase, JSArgumentsObjectWithLength::kLengthOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
return access;
}
@@ -842,7 +871,7 @@ FieldAccess AccessBuilder::ForArgumentsCallee() {
FieldAccess access = {
kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
+ Type::NonInternal(), MachineType::TypeCompressedTagged(),
kPointerWriteBarrier};
return access;
}
@@ -854,7 +883,7 @@ FieldAccess AccessBuilder::ForFixedArraySlot(
int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
FieldAccess access = {kTaggedBase, offset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+ Type::Any(), MachineType::TypeCompressedTagged(),
write_barrier_kind};
return access;
}
@@ -864,7 +893,7 @@ FieldAccess AccessBuilder::ForFixedArraySlot(
FieldAccess AccessBuilder::ForCellValue() {
FieldAccess access = {kTaggedBase, Cell::kValueOffset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+ Type::Any(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier, LoadSensitivity::kCritical};
return access;
}
@@ -876,7 +905,7 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
FieldAccess access = {kTaggedBase, offset,
Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+ Type::Any(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
return access;
}
@@ -884,20 +913,22 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
- MachineType::AnyTagged(), kFullWriteBarrier};
+ MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier};
return access;
}
// static
ElementAccess AccessBuilder::ForFixedArrayElement(
ElementsKind kind, LoadSensitivity load_sensitivity) {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize,
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, load_sensitivity};
+ ElementAccess access = {
+ kTaggedBase, FixedArray::kHeaderSize,
+ Type::Any(), MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier, load_sensitivity};
switch (kind) {
case PACKED_SMI_ELEMENTS:
access.type = Type::SignedSmall();
- access.machine_type = MachineType::TaggedSigned();
+ access.machine_type = MachineType::TypeCompressedTaggedSigned();
access.write_barrier_kind = kNoWriteBarrier;
break;
case HOLEY_SMI_ELEMENTS:
@@ -920,7 +951,6 @@ ElementAccess AccessBuilder::ForFixedArrayElement(
break;
default:
UNREACHABLE();
- break;
}
return access;
}
@@ -930,7 +960,7 @@ ElementAccess AccessBuilder::ForStackArgument() {
ElementAccess access = {
kUntaggedBase,
CommonFrameConstants::kFixedFrameSizeAboveFp - kSystemPointerSize,
- Type::NonInternal(), MachineType::Pointer(),
+ Type::NonInternal(), MachineType::AnyTagged(),
WriteBarrierKind::kNoWriteBarrier};
return access;
}
@@ -945,19 +975,21 @@ ElementAccess AccessBuilder::ForFixedDoubleArrayElement() {
// static
FieldAccess AccessBuilder::ForEnumCacheKeys() {
- FieldAccess access = {kTaggedBase, EnumCache::kKeysOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, EnumCache::kKeysOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForEnumCacheIndices() {
- FieldAccess access = {kTaggedBase, EnumCache::kIndicesOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, EnumCache::kIndicesOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -966,7 +998,7 @@ ElementAccess AccessBuilder::ForTypedArrayElement(
ExternalArrayType type, bool is_external,
LoadSensitivity load_sensitivity) {
BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
- int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
+ int header_size = is_external ? 0 : ByteArray::kHeaderSize;
switch (type) {
case kExternalInt8Array: {
ElementAccess access = {taggedness, header_size,
@@ -1033,18 +1065,21 @@ FieldAccess AccessBuilder::ForHashTableBaseNumberOfElements() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
Type::SignedSmall(),
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForHashTableBaseNumberOfDeletedElement() {
- FieldAccess access = {
- kTaggedBase, FixedArray::OffsetOfElementAt(
- HashTableBase::kNumberOfDeletedElementsIndex),
- MaybeHandle<Name>(), MaybeHandle<Map>(), Type::SignedSmall(),
- MachineType::TaggedSigned(), kNoWriteBarrier};
+ FieldAccess access = {kTaggedBase,
+ FixedArray::OffsetOfElementAt(
+ HashTableBase::kNumberOfDeletedElementsIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::SignedSmall(),
+ MachineType::TypeCompressedTaggedSigned(),
+ kNoWriteBarrier};
return access;
}
@@ -1056,7 +1091,7 @@ FieldAccess AccessBuilder::ForHashTableBaseCapacity() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
Type::SignedSmall(),
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -1070,7 +1105,7 @@ FieldAccess AccessBuilder::ForOrderedHashMapOrSetNextTable() {
FieldAccess const access = {
kTaggedBase, OrderedHashMap::NextTableOffset(),
MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
+ Type::Any(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
return access;
}
@@ -1086,7 +1121,7 @@ FieldAccess AccessBuilder::ForOrderedHashMapOrSetNumberOfBuckets() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kFixedArrayLengthType,
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -1102,7 +1137,7 @@ FieldAccess AccessBuilder::ForOrderedHashMapOrSetNumberOfDeletedElements() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kFixedArrayLengthType,
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -1118,18 +1153,18 @@ FieldAccess AccessBuilder::ForOrderedHashMapOrSetNumberOfElements() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
TypeCache::Get()->kFixedArrayLengthType,
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
// static
ElementAccess AccessBuilder::ForOrderedHashMapEntryValue() {
- ElementAccess const access = {kTaggedBase,
- OrderedHashMap::HashTableStartOffset() +
- OrderedHashMap::kValueOffset * kTaggedSize,
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ ElementAccess const access = {
+ kTaggedBase,
+ OrderedHashMap::HashTableStartOffset() +
+ OrderedHashMap::kValueOffset * kTaggedSize,
+ Type::Any(), MachineType::TypeCompressedTagged(), kFullWriteBarrier};
return access;
}
@@ -1141,7 +1176,7 @@ FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
Type::Any(),
- MachineType::AnyTagged(),
+ MachineType::TypeCompressedTagged(),
kNoWriteBarrier};
return access;
}
@@ -1154,7 +1189,7 @@ FieldAccess AccessBuilder::ForDictionaryNextEnumerationIndex() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
Type::SignedSmall(),
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
@@ -1167,7 +1202,7 @@ FieldAccess AccessBuilder::ForDictionaryObjectHashIndex() {
MaybeHandle<Name>(),
MaybeHandle<Map>(),
Type::SignedSmall(),
- MachineType::TaggedSigned(),
+ MachineType::TypeCompressedTaggedSigned(),
kNoWriteBarrier};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 02e640bd1d..e38c487b1a 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -7,8 +7,8 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/simplified-operator.h"
-#include "src/elements-kind.h"
-#include "src/globals.h"
+#include "src/compiler/write-barrier-kind.h"
+#include "src/objects/elements-kind.h"
#include "src/objects/js-objects.h"
namespace v8 {
@@ -24,8 +24,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// ===========================================================================
// Access to external values (based on external references).
- // Provides access to an intptr field identified by an external reference.
- static FieldAccess ForExternalIntPtr();
+ // Provides access to a tagged field identified by an external reference.
+ static FieldAccess ForExternalTaggedValue();
// Provides access to an uint8 field identified by an external reference.
static FieldAccess ForExternalUint8Value();
@@ -36,8 +36,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to HeapObject::map() field.
static FieldAccess ForMap();
- static FieldAccess ForCompressedMap();
-
// Provides access to HeapNumber::value() field.
static FieldAccess ForHeapNumberValue();
@@ -139,6 +137,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSTypedArray::length() field.
static FieldAccess ForJSTypedArrayLength();
+ // Provides access to JSTypedArray::base_pointer() field.
+ static FieldAccess ForJSTypedArrayBasePointer();
+
+ // Provides access to JSTypedArray::external_pointer() field.
+ static FieldAccess ForJSTypedArrayExternalPointer();
+
+ // Provides access to JSDataView::data_pointer() field.
+ static FieldAccess ForJSDataViewDataPointer();
+
// Provides access to JSDate::value() field.
static FieldAccess ForJSDateValue();
@@ -169,12 +176,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to PropertyArray::length() field.
static FieldAccess ForPropertyArrayLengthAndHash();
- // Provides access to FixedTypedArrayBase::base_pointer() field.
- static FieldAccess ForFixedTypedArrayBaseBasePointer();
-
- // Provides access to FixedTypedArrayBase::external_pointer() field.
- static FieldAccess ForFixedTypedArrayBaseExternalPointer();
-
// Provides access to DescriptorArray::enum_cache() field.
static FieldAccess ForDescriptorArrayEnumCache();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 3fbedb6cbb..713484f734 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -6,16 +6,16 @@
#include "src/compiler/access-info.h"
-#include "src/accessors.h"
+#include "src/builtins/accessors.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/type-cache.h"
-#include "src/counters.h"
-#include "src/field-index-inl.h"
-#include "src/field-type.h"
#include "src/ic/call-optimization.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell-inl.h"
+#include "src/objects/field-index-inl.h"
+#include "src/objects/field-type.h"
#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/struct-inl.h"
#include "src/objects/templates.h"
@@ -55,91 +55,112 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
UNREACHABLE();
}
-ElementAccessInfo::ElementAccessInfo() = default;
-
-ElementAccessInfo::ElementAccessInfo(MapHandles const& receiver_maps,
- ElementsKind elements_kind)
- : elements_kind_(elements_kind), receiver_maps_(receiver_maps) {
+ElementAccessInfo::ElementAccessInfo(ZoneVector<Handle<Map>>&& receiver_maps,
+ ElementsKind elements_kind, Zone* zone)
+ : elements_kind_(elements_kind),
+ receiver_maps_(receiver_maps),
+ transition_sources_(zone) {
CHECK(!receiver_maps.empty());
}
// static
-PropertyAccessInfo PropertyAccessInfo::NotFound(MapHandles const& receiver_maps,
- MaybeHandle<JSObject> holder) {
- return PropertyAccessInfo(kNotFound, holder, receiver_maps);
+PropertyAccessInfo PropertyAccessInfo::Invalid(Zone* zone) {
+ return PropertyAccessInfo(zone);
}
// static
-PropertyAccessInfo PropertyAccessInfo::DataConstant(
- MapHandles const& receiver_maps, Handle<Object> constant,
- MaybeHandle<JSObject> holder) {
- return PropertyAccessInfo(kDataConstant, holder, constant, receiver_maps);
+PropertyAccessInfo PropertyAccessInfo::NotFound(Zone* zone,
+ Handle<Map> receiver_map,
+ MaybeHandle<JSObject> holder) {
+ return PropertyAccessInfo(zone, kNotFound, holder, {{receiver_map}, zone});
}
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
- PropertyConstness constness, MapHandles const& receiver_maps,
- FieldIndex field_index, MachineRepresentation field_representation,
+ Zone* zone, Handle<Map> receiver_map,
+ ZoneVector<CompilationDependencies::Dependency const*>&& dependencies,
+ FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
- Kind kind =
- constness == PropertyConstness::kConst ? kDataConstantField : kDataField;
- return PropertyAccessInfo(kind, holder, transition_map, field_index,
+ return PropertyAccessInfo(kDataField, holder, transition_map, field_index,
field_representation, field_type, field_map,
- receiver_maps);
+ {{receiver_map}, zone}, std::move(dependencies));
+}
+
+// static
+PropertyAccessInfo PropertyAccessInfo::DataConstant(
+ Zone* zone, Handle<Map> receiver_map,
+ ZoneVector<CompilationDependencies::Dependency const*>&& dependencies,
+ FieldIndex field_index, Representation field_representation,
+ Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map) {
+ return PropertyAccessInfo(kDataConstant, holder, transition_map, field_index,
+ field_representation, field_type, field_map,
+ {{receiver_map}, zone}, std::move(dependencies));
}
// static
PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
- MapHandles const& receiver_maps, Handle<Object> constant,
+ Zone* zone, Handle<Map> receiver_map, Handle<Object> constant,
MaybeHandle<JSObject> holder) {
- return PropertyAccessInfo(kAccessorConstant, holder, constant, receiver_maps);
+ return PropertyAccessInfo(zone, kAccessorConstant, holder, constant,
+ {{receiver_map}, zone});
}
// static
-PropertyAccessInfo PropertyAccessInfo::ModuleExport(
- MapHandles const& receiver_maps, Handle<Cell> cell) {
- return PropertyAccessInfo(kModuleExport, MaybeHandle<JSObject>(), cell,
- receiver_maps);
+PropertyAccessInfo PropertyAccessInfo::ModuleExport(Zone* zone,
+ Handle<Map> receiver_map,
+ Handle<Cell> cell) {
+ return PropertyAccessInfo(zone, kModuleExport, MaybeHandle<JSObject>(), cell,
+ {{receiver_map}, zone});
}
// static
-PropertyAccessInfo PropertyAccessInfo::StringLength(
- MapHandles const& receiver_maps) {
- return PropertyAccessInfo(kStringLength, MaybeHandle<JSObject>(),
- receiver_maps);
+PropertyAccessInfo PropertyAccessInfo::StringLength(Zone* zone,
+ Handle<Map> receiver_map) {
+ return PropertyAccessInfo(zone, kStringLength, MaybeHandle<JSObject>(),
+ {{receiver_map}, zone});
}
-PropertyAccessInfo::PropertyAccessInfo()
+PropertyAccessInfo::PropertyAccessInfo(Zone* zone)
: kind_(kInvalid),
- field_representation_(MachineRepresentation::kNone),
+ receiver_maps_(zone),
+ unrecorded_dependencies_(zone),
+ field_representation_(Representation::None()),
field_type_(Type::None()) {}
-PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
- MapHandles const& receiver_maps)
+PropertyAccessInfo::PropertyAccessInfo(Zone* zone, Kind kind,
+ MaybeHandle<JSObject> holder,
+ ZoneVector<Handle<Map>>&& receiver_maps)
: kind_(kind),
receiver_maps_(receiver_maps),
+ unrecorded_dependencies_(zone),
holder_(holder),
- field_representation_(MachineRepresentation::kNone),
+ field_representation_(Representation::None()),
field_type_(Type::None()) {}
-PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
+PropertyAccessInfo::PropertyAccessInfo(Zone* zone, Kind kind,
+ MaybeHandle<JSObject> holder,
Handle<Object> constant,
- MapHandles const& receiver_maps)
+ ZoneVector<Handle<Map>>&& receiver_maps)
: kind_(kind),
receiver_maps_(receiver_maps),
+ unrecorded_dependencies_(zone),
constant_(constant),
holder_(holder),
- field_representation_(MachineRepresentation::kNone),
+ field_representation_(Representation::None()),
field_type_(Type::Any()) {}
PropertyAccessInfo::PropertyAccessInfo(
Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
- FieldIndex field_index, MachineRepresentation field_representation,
+ FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map,
- MapHandles const& receiver_maps)
+ ZoneVector<Handle<Map>>&& receiver_maps,
+ ZoneVector<CompilationDependencies::Dependency const*>&&
+ unrecorded_dependencies)
: kind_(kind),
receiver_maps_(receiver_maps),
+ unrecorded_dependencies_(std::move(unrecorded_dependencies)),
transition_map_(transition_map),
holder_(holder),
field_index_(field_index),
@@ -157,7 +178,7 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
return that->kind_ == kInvalid;
case kDataField:
- case kDataConstantField: {
+ case kDataConstant: {
// Check if we actually access the same field (we use the
// GetFieldAccessStubKey method here just like the ICs do
// since that way we only compare the relevant bits of the
@@ -167,12 +188,13 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
switch (access_mode) {
case AccessMode::kHas:
case AccessMode::kLoad: {
- if (this->field_representation_ != that->field_representation_) {
- if (!IsAnyTagged(this->field_representation_) ||
- !IsAnyTagged(that->field_representation_)) {
+ if (!this->field_representation_.Equals(
+ that->field_representation_)) {
+ if (this->field_representation_.IsDouble() ||
+ that->field_representation_.IsDouble()) {
return false;
}
- this->field_representation_ = MachineRepresentation::kTagged;
+ this->field_representation_ = Representation::Tagged();
}
if (this->field_map_.address() != that->field_map_.address()) {
this->field_map_ = MaybeHandle<Map>();
@@ -186,7 +208,8 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
// also need to make sure that in case of transitioning stores,
// the transition targets match.
if (this->field_map_.address() != that->field_map_.address() ||
- this->field_representation_ != that->field_representation_ ||
+ !this->field_representation_.Equals(
+ that->field_representation_) ||
this->transition_map_.address() !=
that->transition_map_.address()) {
return false;
@@ -194,22 +217,25 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
break;
}
}
- // Merge the field type.
this->field_type_ =
Type::Union(this->field_type_, that->field_type_, zone);
- // Merge the receiver maps.
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
that->receiver_maps_.end());
+ this->unrecorded_dependencies_.insert(
+ this->unrecorded_dependencies_.end(),
+ that->unrecorded_dependencies_.begin(),
+ that->unrecorded_dependencies_.end());
return true;
}
return false;
}
- case kDataConstant:
case kAccessorConstant: {
// Check if we actually access the same constant.
if (this->constant_.address() == that->constant_.address()) {
+ DCHECK(this->unrecorded_dependencies_.empty());
+ DCHECK(that->unrecorded_dependencies_.empty());
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
that->receiver_maps_.end());
@@ -220,6 +246,8 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
case kNotFound:
case kStringLength: {
+ DCHECK(this->unrecorded_dependencies_.empty());
+ DCHECK(that->unrecorded_dependencies_.empty());
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
that->receiver_maps_.end());
@@ -243,65 +271,57 @@ AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
type_cache_(TypeCache::Get()),
zone_(zone) {}
-bool AccessInfoFactory::ComputeElementAccessInfo(
- Handle<Map> map, AccessMode access_mode,
- ElementAccessInfo* access_info) const {
+base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo(
+ Handle<Map> map, AccessMode access_mode) const {
// Check if it is safe to inline element access for the {map}.
MapRef map_ref(broker(), map);
- if (!CanInlineElementAccess(map_ref)) return false;
+ if (!CanInlineElementAccess(map_ref)) return base::nullopt;
ElementsKind const elements_kind = map_ref.elements_kind();
- *access_info = ElementAccessInfo(MapHandles{map}, elements_kind);
- return true;
+ return ElementAccessInfo({{map}, zone()}, elements_kind, zone());
}
bool AccessInfoFactory::ComputeElementAccessInfos(
- FeedbackNexus nexus, MapHandles const& maps, AccessMode access_mode,
+ ElementAccessFeedback const& processed, AccessMode access_mode,
ZoneVector<ElementAccessInfo>* access_infos) const {
- ElementAccessFeedback const* processed =
- FLAG_concurrent_inlining
- ? broker()->GetElementAccessFeedback(FeedbackSource(nexus))
- : broker()->ProcessFeedbackMapsForElementAccess(maps);
- if (processed == nullptr) return false;
-
if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
// For polymorphic loads of similar elements kinds (i.e. all tagged or all
// double), always use the "worst case" code without a transition. This is
// much faster than transitioning the elements to the worst case, trading a
// TransitionElementsKind for a CheckMaps, avoiding mutation of the array.
- ElementAccessInfo access_info;
- if (ConsolidateElementLoad(*processed, &access_info)) {
- access_infos->push_back(access_info);
+ base::Optional<ElementAccessInfo> access_info =
+ ConsolidateElementLoad(processed);
+ if (access_info.has_value()) {
+ access_infos->push_back(*access_info);
return true;
}
}
- for (Handle<Map> receiver_map : processed->receiver_maps) {
+ for (Handle<Map> receiver_map : processed.receiver_maps) {
// Compute the element access information.
- ElementAccessInfo access_info;
- if (!ComputeElementAccessInfo(receiver_map, access_mode, &access_info)) {
- return false;
- }
+ base::Optional<ElementAccessInfo> access_info =
+ ComputeElementAccessInfo(receiver_map, access_mode);
+ if (!access_info.has_value()) return false;
// Collect the possible transitions for the {receiver_map}.
- for (auto transition : processed->transitions) {
+ for (auto transition : processed.transitions) {
if (transition.second.equals(receiver_map)) {
- access_info.AddTransitionSource(transition.first);
+ access_info->AddTransitionSource(transition.first);
}
}
// Schedule the access information.
- access_infos->push_back(access_info);
+ access_infos->push_back(*access_info);
}
return true;
}
PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder,
- int number, AccessMode access_mode) const {
- DCHECK_NE(number, DescriptorArray::kNotFound);
+ int descriptor, AccessMode access_mode) const {
+ DCHECK_NE(descriptor, DescriptorArray::kNotFound);
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- PropertyDetails const details = descriptors->GetDetails(number);
- int index = descriptors->GetFieldIndex(number);
+ PropertyDetails const details = descriptors->GetDetails(descriptor);
+ int index = descriptors->GetFieldIndex(descriptor);
Representation details_representation = details.representation();
if (details_representation.IsNone()) {
// The ICs collect feedback in PREMONOMORPHIC state already,
@@ -309,65 +329,77 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// fields for which the representation has not yet been
// determined by the runtime. So we need to catch this case
// here and fall back to use the regular IC logic instead.
- return {};
+ return PropertyAccessInfo::Invalid(zone());
}
FieldIndex field_index =
FieldIndex::ForPropertyIndex(*map, index, details_representation);
Type field_type = Type::NonInternal();
-#ifdef V8_COMPRESS_POINTERS
- MachineRepresentation field_representation =
- MachineRepresentation::kCompressed;
-#else
- MachineRepresentation field_representation = MachineRepresentation::kTagged;
-#endif
MaybeHandle<Map> field_map;
MapRef map_ref(broker(), map);
+ ZoneVector<CompilationDependencies::Dependency const*>
+ unrecorded_dependencies(zone());
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
-#ifdef V8_COMPRESS_POINTERS
- field_representation = MachineRepresentation::kCompressedSigned;
-#else
- field_representation = MachineRepresentation::kTaggedSigned;
-#endif
- map_ref.SerializeOwnDescriptors(); // TODO(neis): Remove later.
- dependencies()->DependOnFieldRepresentation(map_ref, number);
+ map_ref.SerializeOwnDescriptor(descriptor);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
+ descriptor));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
- field_representation = MachineRepresentation::kFloat64;
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
- field_representation = MachineRepresentation::kTaggedPointer;
- Handle<FieldType> descriptors_field_type(descriptors->GetFieldType(number),
- isolate());
+ Handle<FieldType> descriptors_field_type(
+ descriptors->GetFieldType(descriptor), isolate());
if (descriptors_field_type->IsNone()) {
// Store is not safe if the field type was cleared.
- if (access_mode == AccessMode::kStore) return {};
+ if (access_mode == AccessMode::kStore) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
// The field type was cleared by the GC, so we don't know anything
// about the contents now.
}
- map_ref.SerializeOwnDescriptors(); // TODO(neis): Remove later.
- dependencies()->DependOnFieldRepresentation(map_ref, number);
+ map_ref.SerializeOwnDescriptor(descriptor);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
+ descriptor));
if (descriptors_field_type->IsClass()) {
- dependencies()->DependOnFieldType(map_ref, number);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor));
// Remember the field map, and try to infer a useful type.
Handle<Map> map(descriptors_field_type->AsClass(), isolate());
field_type = Type::For(MapRef(broker(), map));
field_map = MaybeHandle<Map>(map);
}
}
- return PropertyAccessInfo::DataField(
- details.constness(), MapHandles{receiver_map}, field_index,
- field_representation, field_type, field_map, holder);
+ PropertyConstness constness;
+ if (details.IsReadOnly() && !details.IsConfigurable()) {
+ constness = PropertyConstness::kConst;
+ } else {
+ map_ref.SerializeOwnDescriptor(descriptor);
+ constness = dependencies()->DependOnFieldConstness(map_ref, descriptor);
+ }
+ switch (constness) {
+ case PropertyConstness::kMutable:
+ return PropertyAccessInfo::DataField(
+ zone(), receiver_map, std::move(unrecorded_dependencies), field_index,
+ details_representation, field_type, field_map, holder);
+ case PropertyConstness::kConst:
+ return PropertyAccessInfo::DataConstant(
+ zone(), receiver_map, std::move(unrecorded_dependencies), field_index,
+ details_representation, field_type, field_map, holder);
+ }
+ UNREACHABLE();
}
PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
- MaybeHandle<JSObject> holder, int number, AccessMode access_mode) const {
- DCHECK_NE(number, DescriptorArray::kNotFound);
+ MaybeHandle<JSObject> holder, int descriptor,
+ AccessMode access_mode) const {
+ DCHECK_NE(descriptor, DescriptorArray::kNotFound);
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- SLOW_DCHECK(number == descriptors->Search(*name, *map));
+ SLOW_DCHECK(descriptor == descriptors->Search(*name, *map));
if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
DCHECK(map->is_prototype_map());
Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(map->prototype_info()),
@@ -375,22 +407,24 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
Handle<JSModuleNamespace> module_namespace(
JSModuleNamespace::cast(proto_info->module_namespace()), isolate());
Handle<Cell> cell(
- Cell::cast(module_namespace->module()->exports()->Lookup(
+ Cell::cast(module_namespace->module().exports().Lookup(
ReadOnlyRoots(isolate()), name, Smi::ToInt(name->GetHash()))),
isolate());
- if (cell->value()->IsTheHole(isolate())) {
+ if (cell->value().IsTheHole(isolate())) {
// This module has not been fully initialized yet.
- return {};
+ return PropertyAccessInfo::Invalid(zone());
}
- return PropertyAccessInfo::ModuleExport(MapHandles{receiver_map}, cell);
+ return PropertyAccessInfo::ModuleExport(zone(), receiver_map, cell);
}
if (access_mode == AccessMode::kHas) {
// HasProperty checks don't call getter/setters, existence is sufficient.
- return PropertyAccessInfo::AccessorConstant(MapHandles{receiver_map},
+ return PropertyAccessInfo::AccessorConstant(zone(), receiver_map,
Handle<Object>(), holder);
}
- Handle<Object> accessors(descriptors->GetStrongValue(number), isolate());
- if (!accessors->IsAccessorPair()) return {};
+ Handle<Object> accessors(descriptors->GetStrongValue(descriptor), isolate());
+ if (!accessors->IsAccessorPair()) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
Handle<Object> accessor(access_mode == AccessMode::kLoad
? Handle<AccessorPair>::cast(accessors)->getter()
: Handle<AccessorPair>::cast(accessors)->setter(),
@@ -400,16 +434,20 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
if (!optimization.is_simple_api_call() ||
optimization.IsCrossContextLazyAccessorPair(
*broker()->native_context().object(), *map)) {
- return {};
+ return PropertyAccessInfo::Invalid(zone());
}
CallOptimization::HolderLookup lookup;
holder = optimization.LookupHolderOfExpectedType(receiver_map, &lookup);
- if (lookup == CallOptimization::kHolderNotFound) return {};
+ if (lookup == CallOptimization::kHolderNotFound) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
holder.is_null());
DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null());
- if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) return {};
+ if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
}
if (access_mode == AccessMode::kLoad) {
Handle<Name> cached_property_name;
@@ -420,18 +458,22 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
if (!access_info.IsInvalid()) return access_info;
}
}
- return PropertyAccessInfo::AccessorConstant(MapHandles{receiver_map},
- accessor, holder);
+ return PropertyAccessInfo::AccessorConstant(zone(), receiver_map, accessor,
+ holder);
}
PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
Handle<Map> map, Handle<Name> name, AccessMode access_mode) const {
CHECK(name->IsUniqueName());
- if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) return {};
+ if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
// Check if it is safe to inline property access for the {map}.
- if (!CanInlinePropertyAccess(map)) return {};
+ if (!CanInlinePropertyAccess(map)) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
// We support fast inline cases for certain JSObject getters.
if (access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) {
@@ -451,7 +493,9 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
if (access_mode == AccessMode::kStore ||
access_mode == AccessMode::kStoreInLiteral) {
// Don't bother optimizing stores to read-only properties.
- if (details.IsReadOnly()) return {};
+ if (details.IsReadOnly()) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
if (details.kind() == kData && !holder.is_null()) {
// This is a store to a property not found on the receiver but on a
// prototype. According to ES6 section 9.1.9 [[Set]], we need to
@@ -467,20 +511,13 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
} else {
DCHECK_EQ(kAccessor, details.kind());
// TODO(turbofan): Add support for general accessors?
- return {};
+ return PropertyAccessInfo::Invalid(zone());
}
} else {
DCHECK_EQ(kDescriptor, details.location());
- if (details.kind() == kData) {
- DCHECK(!FLAG_track_constant_fields);
- return PropertyAccessInfo::DataConstant(
- MapHandles{receiver_map},
- handle(descriptors->GetStrongValue(number), isolate()), holder);
- } else {
- DCHECK_EQ(kAccessor, details.kind());
- return ComputeAccessorDescriptorAccessInfo(
- receiver_map, name, map, holder, number, access_mode);
- }
+ DCHECK_EQ(kAccessor, details.kind());
+ return ComputeAccessorDescriptorAccessInfo(receiver_map, name, map,
+ holder, number, access_mode);
}
UNREACHABLE();
}
@@ -491,7 +528,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// integer indexed exotic objects (see ES6 section 9.4.5).
if (map->IsJSTypedArrayMap() && name->IsString() &&
IsSpecialIndex(String::cast(*name))) {
- return {};
+ return PropertyAccessInfo::Invalid(zone());
}
// Don't search on the prototype when storing in literals.
@@ -500,18 +537,20 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
}
// Don't lookup private symbols on the prototype chain.
- if (name->IsPrivate()) return {};
+ if (name->IsPrivate()) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
// Walk up the prototype chain.
- if (!map->prototype()->IsJSObject()) {
+ if (!map->prototype().IsJSObject()) {
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
Handle<JSFunction> constructor;
if (Map::GetConstructorFunction(map, broker()->native_context().object())
.ToHandle(&constructor)) {
map = handle(constructor->initial_map(), isolate());
- DCHECK(map->prototype()->IsJSObject());
- } else if (map->prototype()->IsNull(isolate())) {
+ DCHECK(map->prototype().IsJSObject());
+ } else if (map->prototype().IsNull(isolate())) {
// Store to property not found on the receiver or any prototype, we need
// to transition to a new data property.
// Implemented according to ES6 section 9.1.9 [[Set]] (P, V, Receiver)
@@ -521,13 +560,13 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// The property was not found (access returns undefined or throws
// depending on the language mode of the load operation.
// Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
- return PropertyAccessInfo::NotFound(MapHandles{receiver_map}, holder);
+ return PropertyAccessInfo::NotFound(zone(), receiver_map, holder);
} else {
- return {};
+ return PropertyAccessInfo::Invalid(zone());
}
}
Handle<JSObject> map_prototype(JSObject::cast(map->prototype()), isolate());
- if (map_prototype->map()->is_deprecated()) {
+ if (map_prototype->map().is_deprecated()) {
// Try to migrate the prototype object so we don't embed the deprecated
// map into the optimized code.
JSObject::TryMigrateInstance(map_prototype);
@@ -535,26 +574,30 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
map = handle(map_prototype->map(), isolate());
holder = map_prototype;
- if (!CanInlinePropertyAccess(map)) return {};
+ if (!CanInlinePropertyAccess(map)) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
// Successful lookup on prototype chain needs to guarantee that all
// the prototypes up to the holder have stable maps. Let us make sure
// the prototype maps are stable here.
CHECK(map->is_stable());
}
+ UNREACHABLE();
}
-PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
- MapHandles const& maps, Handle<Name> name, AccessMode access_mode) const {
- ZoneVector<PropertyAccessInfo> raw_access_infos(zone());
- ComputePropertyAccessInfos(maps, name, access_mode, &raw_access_infos);
- ZoneVector<PropertyAccessInfo> access_infos(zone());
- if (FinalizePropertyAccessInfos(raw_access_infos, access_mode,
- &access_infos) &&
- access_infos.size() == 1) {
- return access_infos.front();
- }
- return {};
+PropertyAccessInfo AccessInfoFactory::FinalizePropertyAccessInfosAsOne(
+ ZoneVector<PropertyAccessInfo> access_infos, AccessMode access_mode) const {
+ ZoneVector<PropertyAccessInfo> merged_access_infos(zone());
+ MergePropertyAccessInfos(access_infos, access_mode, &merged_access_infos);
+ if (merged_access_infos.size() == 1) {
+ PropertyAccessInfo& result = merged_access_infos.front();
+ if (!result.IsInvalid()) {
+ result.RecordDependencies(dependencies());
+ return result;
+ }
+ }
+ return PropertyAccessInfo::Invalid(zone());
}
void AccessInfoFactory::ComputePropertyAccessInfos(
@@ -566,7 +609,29 @@ void AccessInfoFactory::ComputePropertyAccessInfos(
}
}
+void PropertyAccessInfo::RecordDependencies(
+ CompilationDependencies* dependencies) {
+ for (CompilationDependencies::Dependency const* d :
+ unrecorded_dependencies_) {
+ dependencies->RecordDependency(d);
+ }
+ unrecorded_dependencies_.clear();
+}
+
bool AccessInfoFactory::FinalizePropertyAccessInfos(
+ ZoneVector<PropertyAccessInfo> access_infos, AccessMode access_mode,
+ ZoneVector<PropertyAccessInfo>* result) const {
+ MergePropertyAccessInfos(access_infos, access_mode, result);
+ for (PropertyAccessInfo const& info : *result) {
+ if (info.IsInvalid()) return false;
+ }
+ for (PropertyAccessInfo& info : *result) {
+ info.RecordDependencies(dependencies());
+ }
+ return true;
+}
+
+void AccessInfoFactory::MergePropertyAccessInfos(
ZoneVector<PropertyAccessInfo> infos, AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* result) const {
DCHECK(result->empty());
@@ -578,11 +643,9 @@ bool AccessInfoFactory::FinalizePropertyAccessInfos(
break;
}
}
- if (it->IsInvalid()) return false;
if (!merged) result->push_back(*it);
}
CHECK(!result->empty());
- return true;
}
namespace {
@@ -608,28 +671,26 @@ Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
} // namespace
-bool AccessInfoFactory::ConsolidateElementLoad(
- ElementAccessFeedback const& processed,
- ElementAccessInfo* access_info) const {
+base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
+ ElementAccessFeedback const& processed) const {
ElementAccessFeedback::MapIterator it = processed.all_maps(broker());
MapRef first_map = it.current();
InstanceType instance_type = first_map.instance_type();
ElementsKind elements_kind = first_map.elements_kind();
- MapHandles maps;
+ ZoneVector<Handle<Map>> maps(zone());
for (; !it.done(); it.advance()) {
MapRef map = it.current();
if (map.instance_type() != instance_type || !CanInlineElementAccess(map)) {
- return false;
+ return base::nullopt;
}
if (!GeneralizeElementsKind(elements_kind, map.elements_kind())
.To(&elements_kind)) {
- return false;
+ return base::nullopt;
}
maps.push_back(map.object());
}
- *access_info = ElementAccessInfo(maps, elements_kind);
- return true;
+ return ElementAccessInfo(std::move(maps), elements_kind, zone());
}
PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor(
@@ -637,15 +698,15 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor(
// Check for String::length field accessor.
if (map->IsStringMap()) {
if (Name::Equals(isolate(), name, isolate()->factory()->length_string())) {
- return PropertyAccessInfo::StringLength(MapHandles{map});
+ return PropertyAccessInfo::StringLength(zone(), map);
}
- return {};
+ return PropertyAccessInfo::Invalid(zone());
}
// Check for special JSObject field accessors.
FieldIndex field_index;
if (Accessors::IsJSObjectFieldAccessor(isolate(), map, name, &field_index)) {
Type field_type = Type::NonInternal();
- MachineRepresentation field_representation = MachineRepresentation::kTagged;
+ Representation field_representation = Representation::Tagged();
if (map->IsJSArrayMap()) {
DCHECK(
Name::Equals(isolate(), isolate()->factory()->length_string(), name));
@@ -656,20 +717,19 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor(
// case of other arrays.
if (IsDoubleElementsKind(map->elements_kind())) {
field_type = type_cache_->kFixedDoubleArrayLengthType;
- field_representation = MachineRepresentation::kTaggedSigned;
+ field_representation = Representation::Smi();
} else if (IsFastElementsKind(map->elements_kind())) {
field_type = type_cache_->kFixedArrayLengthType;
- field_representation = MachineRepresentation::kTaggedSigned;
+ field_representation = Representation::Smi();
} else {
field_type = type_cache_->kJSArrayLengthType;
}
}
// Special fields are always mutable.
- return PropertyAccessInfo::DataField(PropertyConstness::kMutable,
- MapHandles{map}, field_index,
+ return PropertyAccessInfo::DataField(zone(), map, {{}, zone()}, field_index,
field_representation, field_type);
}
- return {};
+ return PropertyAccessInfo::Invalid(zone());
}
PropertyAccessInfo AccessInfoFactory::LookupTransition(
@@ -677,58 +737,81 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
// Check if the {map} has a data transition with the given {name}.
Map transition =
TransitionsAccessor(isolate(), map).SearchTransition(*name, kData, NONE);
- if (transition.is_null()) return {};
+ if (transition.is_null()) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
Handle<Map> transition_map(transition, isolate());
int const number = transition_map->LastAdded();
PropertyDetails const details =
- transition_map->instance_descriptors()->GetDetails(number);
+ transition_map->instance_descriptors().GetDetails(number);
// Don't bother optimizing stores to read-only properties.
- if (details.IsReadOnly()) return {};
+ if (details.IsReadOnly()) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
// TODO(bmeurer): Handle transition to data constant?
- if (details.location() != kField) return {};
+ if (details.location() != kField) {
+ return PropertyAccessInfo::Invalid(zone());
+ }
int const index = details.field_index();
Representation details_representation = details.representation();
FieldIndex field_index = FieldIndex::ForPropertyIndex(*transition_map, index,
details_representation);
Type field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
- MachineRepresentation field_representation = MachineRepresentation::kTagged;
MapRef transition_map_ref(broker(), transition_map);
+ ZoneVector<CompilationDependencies::Dependency const*>
+ unrecorded_dependencies(zone());
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
- field_representation = MachineRepresentation::kTaggedSigned;
- transition_map_ref.SerializeOwnDescriptors(); // TODO(neis): Remove later.
- dependencies()->DependOnFieldRepresentation(transition_map_ref, number);
+ transition_map_ref.SerializeOwnDescriptor(number);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ transition_map_ref, number));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
- field_representation = MachineRepresentation::kFloat64;
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
- field_representation = MachineRepresentation::kTaggedPointer;
Handle<FieldType> descriptors_field_type(
- transition_map->instance_descriptors()->GetFieldType(number),
- isolate());
+ transition_map->instance_descriptors().GetFieldType(number), isolate());
if (descriptors_field_type->IsNone()) {
// Store is not safe if the field type was cleared.
- return {};
+ return PropertyAccessInfo::Invalid(zone());
}
- transition_map_ref.SerializeOwnDescriptors(); // TODO(neis): Remove later.
- dependencies()->DependOnFieldRepresentation(transition_map_ref, number);
+ transition_map_ref.SerializeOwnDescriptor(number);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldRepresentationDependencyOffTheRecord(
+ transition_map_ref, number));
if (descriptors_field_type->IsClass()) {
- dependencies()->DependOnFieldType(transition_map_ref, number);
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldTypeDependencyOffTheRecord(transition_map_ref,
+ number));
// Remember the field map, and try to infer a useful type.
Handle<Map> map(descriptors_field_type->AsClass(), isolate());
field_type = Type::For(MapRef(broker(), map));
field_map = MaybeHandle<Map>(map);
}
}
- dependencies()->DependOnTransition(MapRef(broker(), transition_map));
- // Transitioning stores are never stores to constant fields.
- return PropertyAccessInfo::DataField(
- PropertyConstness::kMutable, MapHandles{map}, field_index,
- field_representation, field_type, field_map, holder, transition_map);
+ unrecorded_dependencies.push_back(
+ dependencies()->TransitionDependencyOffTheRecord(
+ MapRef(broker(), transition_map)));
+ // Transitioning stores *may* store to const fields. The resulting
+ // DataConstant access infos can be distinguished from later, i.e. redundant,
+ // stores to the same constant field by the presence of a transition map.
+ switch (details.constness()) {
+ case PropertyConstness::kMutable:
+ return PropertyAccessInfo::DataField(
+ zone(), map, std::move(unrecorded_dependencies), field_index,
+ details_representation, field_type, field_map, holder,
+ transition_map);
+ case PropertyConstness::kConst:
+ return PropertyAccessInfo::DataConstant(
+ zone(), map, std::move(unrecorded_dependencies), field_index,
+ details_representation, field_type, field_map, holder,
+ transition_map);
+ }
+ UNREACHABLE();
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 1945303929..3499069fc4 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -7,12 +7,13 @@
#include <iosfwd>
+#include "src/codegen/machine-type.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/types.h"
-#include "src/feedback-vector.h"
-#include "src/field-index.h"
-#include "src/machine-type.h"
-#include "src/objects.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/field-index.h"
#include "src/objects/map.h"
+#include "src/objects/objects.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -24,27 +25,25 @@ class Factory;
namespace compiler {
// Forward declarations.
-class CompilationDependencies;
class ElementAccessFeedback;
class Type;
class TypeCache;
-// Whether we are loading a property or storing to a property.
-// For a store during literal creation, do not walk up the prototype chain.
-enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
-
std::ostream& operator<<(std::ostream&, AccessMode);
// This class encapsulates all information required to access a certain element.
class ElementAccessInfo final {
public:
- ElementAccessInfo();
- ElementAccessInfo(MapHandles const& receiver_maps,
- ElementsKind elements_kind);
+ ElementAccessInfo(ZoneVector<Handle<Map>>&& receiver_maps,
+ ElementsKind elements_kind, Zone* zone);
ElementsKind elements_kind() const { return elements_kind_; }
- MapHandles const& receiver_maps() const { return receiver_maps_; }
- MapHandles const& transition_sources() const { return transition_sources_; }
+ ZoneVector<Handle<Map>> const& receiver_maps() const {
+ return receiver_maps_;
+ }
+ ZoneVector<Handle<Map>> const& transition_sources() const {
+ return transition_sources_;
+ }
void AddTransitionSource(Handle<Map> map) {
CHECK_EQ(receiver_maps_.size(), 1);
@@ -53,8 +52,8 @@ class ElementAccessInfo final {
private:
ElementsKind elements_kind_;
- MapHandles receiver_maps_;
- MapHandles transition_sources_;
+ ZoneVector<Handle<Map>> receiver_maps_;
+ ZoneVector<Handle<Map>> transition_sources_;
};
// This class encapsulates all information required to access a certain
@@ -64,44 +63,48 @@ class PropertyAccessInfo final {
enum Kind {
kInvalid,
kNotFound,
- kDataConstant,
kDataField,
- kDataConstantField,
+ kDataConstant,
kAccessorConstant,
kModuleExport,
kStringLength
};
- static PropertyAccessInfo NotFound(MapHandles const& receiver_maps,
+ static PropertyAccessInfo NotFound(Zone* zone, Handle<Map> receiver_map,
MaybeHandle<JSObject> holder);
- static PropertyAccessInfo DataConstant(MapHandles const& receiver_maps,
- Handle<Object> constant,
- MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
- PropertyConstness constness, MapHandles const& receiver_maps,
- FieldIndex field_index, MachineRepresentation field_representation,
+ Zone* zone, Handle<Map> receiver_map,
+ ZoneVector<CompilationDependencies::Dependency const*>&&
+ unrecorded_dependencies,
+ FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
- static PropertyAccessInfo AccessorConstant(MapHandles const& receiver_maps,
+ static PropertyAccessInfo DataConstant(
+ Zone* zone, Handle<Map> receiver_map,
+ ZoneVector<CompilationDependencies::Dependency const*>&&
+ unrecorded_dependencies,
+ FieldIndex field_index, Representation field_representation,
+ Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map = MaybeHandle<Map>());
+ static PropertyAccessInfo AccessorConstant(Zone* zone,
+ Handle<Map> receiver_map,
Handle<Object> constant,
MaybeHandle<JSObject> holder);
- static PropertyAccessInfo ModuleExport(MapHandles const& receiver_maps,
+ static PropertyAccessInfo ModuleExport(Zone* zone, Handle<Map> receiver_map,
Handle<Cell> cell);
- static PropertyAccessInfo StringLength(MapHandles const& receiver_maps);
-
- PropertyAccessInfo();
+ static PropertyAccessInfo StringLength(Zone* zone, Handle<Map> receiver_map);
+ static PropertyAccessInfo Invalid(Zone* zone);
bool Merge(PropertyAccessInfo const* that, AccessMode access_mode,
Zone* zone) V8_WARN_UNUSED_RESULT;
+ void RecordDependencies(CompilationDependencies* dependencies);
+
bool IsInvalid() const { return kind() == kInvalid; }
bool IsNotFound() const { return kind() == kNotFound; }
- bool IsDataConstant() const { return kind() == kDataConstant; }
bool IsDataField() const { return kind() == kDataField; }
- // TODO(ishell): rename to IsDataConstant() once constant field tracking
- // is done.
- bool IsDataConstantField() const { return kind() == kDataConstantField; }
+ bool IsDataConstant() const { return kind() == kDataConstant; }
bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
bool IsModuleExport() const { return kind() == kModuleExport; }
bool IsStringLength() const { return kind() == kStringLength; }
@@ -109,36 +112,46 @@ class PropertyAccessInfo final {
bool HasTransitionMap() const { return !transition_map().is_null(); }
Kind kind() const { return kind_; }
- MaybeHandle<JSObject> holder() const { return holder_; }
+ MaybeHandle<JSObject> holder() const {
+ // This CHECK tries to protect against using the access info without
+ // recording its dependencies first.
+ CHECK(unrecorded_dependencies_.empty());
+ return holder_;
+ }
MaybeHandle<Map> transition_map() const { return transition_map_; }
Handle<Object> constant() const { return constant_; }
FieldIndex field_index() const { return field_index_; }
Type field_type() const { return field_type_; }
- MachineRepresentation field_representation() const {
- return field_representation_;
- }
+ Representation field_representation() const { return field_representation_; }
MaybeHandle<Map> field_map() const { return field_map_; }
- MapHandles const& receiver_maps() const { return receiver_maps_; }
+ ZoneVector<Handle<Map>> const& receiver_maps() const {
+ return receiver_maps_;
+ }
Handle<Cell> export_cell() const;
private:
- PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
- MapHandles const& receiver_maps);
- PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
- Handle<Object> constant, MapHandles const& receiver_maps);
- PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
- MaybeHandle<Map> transition_map, FieldIndex field_index,
- MachineRepresentation field_representation,
- Type field_type, MaybeHandle<Map> field_map,
- MapHandles const& receiver_maps);
+ explicit PropertyAccessInfo(Zone* zone);
+ PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
+ ZoneVector<Handle<Map>>&& receiver_maps);
+ PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
+ Handle<Object> constant,
+ ZoneVector<Handle<Map>>&& receiver_maps);
+ PropertyAccessInfo(
+ Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
+ FieldIndex field_index, Representation field_representation,
+ Type field_type, MaybeHandle<Map> field_map,
+ ZoneVector<Handle<Map>>&& receiver_maps,
+ ZoneVector<CompilationDependencies::Dependency const*>&& dependencies);
Kind kind_;
- MapHandles receiver_maps_;
+ ZoneVector<Handle<Map>> receiver_maps_;
+ ZoneVector<CompilationDependencies::Dependency const*>
+ unrecorded_dependencies_;
Handle<Object> constant_;
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
FieldIndex field_index_;
- MachineRepresentation field_representation_;
+ Representation field_representation_;
Type field_type_;
MaybeHandle<Map> field_map_;
};
@@ -150,31 +163,38 @@ class AccessInfoFactory final {
AccessInfoFactory(JSHeapBroker* broker, CompilationDependencies* dependencies,
Zone* zone);
- bool ComputeElementAccessInfo(Handle<Map> map, AccessMode access_mode,
- ElementAccessInfo* access_info) const;
+ base::Optional<ElementAccessInfo> ComputeElementAccessInfo(
+ Handle<Map> map, AccessMode access_mode) const;
bool ComputeElementAccessInfos(
- FeedbackNexus nexus, MapHandles const& maps, AccessMode access_mode,
+ ElementAccessFeedback const& processed, AccessMode access_mode,
ZoneVector<ElementAccessInfo>* access_infos) const;
PropertyAccessInfo ComputePropertyAccessInfo(Handle<Map> map,
Handle<Name> name,
AccessMode access_mode) const;
- PropertyAccessInfo ComputePropertyAccessInfo(MapHandles const& maps,
- Handle<Name> name,
- AccessMode access_mode) const;
+
+ // Convenience wrapper around {ComputePropertyAccessInfo} for multiple maps.
void ComputePropertyAccessInfos(
MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos) const;
- // Merge as many of the given {infos} as possible. Return false iff
- // any of them was invalid.
+ // Merge as many of the given {infos} as possible and record any dependencies.
+ // Return false iff any of them was invalid, in which case no dependencies are
+ // recorded.
+ // TODO(neis): Make access_mode part of access info?
bool FinalizePropertyAccessInfos(
ZoneVector<PropertyAccessInfo> infos, AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* result) const;
+ // Merge the given {infos} to a single one and record any dependencies. If the
+ // merge is not possible, the result has kind {kInvalid} and no dependencies
+ // are recorded.
+ PropertyAccessInfo FinalizePropertyAccessInfosAsOne(
+ ZoneVector<PropertyAccessInfo> infos, AccessMode access_mode) const;
+
private:
- bool ConsolidateElementLoad(ElementAccessFeedback const& processed,
- ElementAccessInfo* access_info) const;
+ base::Optional<ElementAccessInfo> ConsolidateElementLoad(
+ ElementAccessFeedback const& processed) const;
PropertyAccessInfo LookupSpecialFieldAccessor(Handle<Map> map,
Handle<Name> name) const;
PropertyAccessInfo LookupTransition(Handle<Map> map, Handle<Name> name,
@@ -182,11 +202,16 @@ class AccessInfoFactory final {
PropertyAccessInfo ComputeDataFieldAccessInfo(Handle<Map> receiver_map,
Handle<Map> map,
MaybeHandle<JSObject> holder,
- int number,
+ int descriptor,
AccessMode access_mode) const;
PropertyAccessInfo ComputeAccessorDescriptorAccessInfo(
Handle<Map> receiver_map, Handle<Name> name, Handle<Map> map,
- MaybeHandle<JSObject> holder, int number, AccessMode access_mode) const;
+ MaybeHandle<JSObject> holder, int descriptor,
+ AccessMode access_mode) const;
+
+ void MergePropertyAccessInfos(ZoneVector<PropertyAccessInfo> infos,
+ AccessMode access_mode,
+ ZoneVector<PropertyAccessInfo>* result) const;
CompilationDependencies* dependencies() const { return dependencies_; }
JSHeapBroker* broker() const { return broker_; }
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 7f6c7f86cb..d93053c64b 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -4,16 +4,16 @@
#include "src/compiler/backend/code-generator.h"
-#include "src/assembler-inl.h"
-#include "src/boxed-float.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/double.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/macro-assembler.h"
-#include "src/optimized-compilation-info.h"
+#include "src/numbers/double.h"
+#include "src/utils/boxed-float.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -168,34 +168,14 @@ namespace {
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
- OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
- Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode, StubCallMode stub_mode,
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand offset,
+ Register value, RecordWriteMode mode,
+ StubCallMode stub_mode,
UnwindingInfoWriter* unwinding_info_writer)
: OutOfLineCode(gen),
object_(object),
- index_(index),
- index_immediate_(0),
+ offset_(offset),
value_(value),
- scratch0_(scratch0),
- scratch1_(scratch1),
- mode_(mode),
- stub_mode_(stub_mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()),
- unwinding_info_writer_(unwinding_info_writer),
- zone_(gen->zone()) {}
-
- OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
- Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode, StubCallMode stub_mode,
- UnwindingInfoWriter* unwinding_info_writer)
- : OutOfLineCode(gen),
- object_(object),
- index_(no_reg),
- index_immediate_(index),
- value_(value),
- scratch0_(scratch0),
- scratch1_(scratch1),
mode_(mode),
stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
@@ -206,15 +186,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
- if (index_ == no_reg) {
- __ add(scratch1_, object_, Operand(index_immediate_));
- } else {
- DCHECK_EQ(0, index_immediate_);
- __ add(scratch1_, object_, Operand(index_));
- }
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
@@ -226,12 +199,12 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
- __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+ __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ __ CallRecordWriteStub(object_, offset_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
} else {
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ __ CallRecordWriteStub(object_, offset_, remembered_set_action,
save_fp_mode);
}
if (must_save_lr_) {
@@ -242,11 +215,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
private:
Register const object_;
- Register const index_;
- int32_t const index_immediate_; // Valid if index_==no_reg.
+ Operand const offset_;
Register const value_;
- Register const scratch0_;
- Register const scratch1_;
RecordWriteMode const mode_;
StubCallMode stub_mode_;
bool must_save_lr_;
@@ -584,7 +554,6 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
break;
default:
UNREACHABLE();
- break;
}
frame_access_state->IncreaseSPDelta(pending_pushes->size());
pending_pushes->clear();
@@ -601,7 +570,7 @@ void AdjustStackPointerForTailCall(
if (pending_pushes != nullptr) {
FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- tasm->sub(sp, sp, Operand(stack_slot_delta * kSystemPointerSize));
+ tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
@@ -864,6 +833,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ str(pc, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -871,6 +844,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -972,29 +946,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
- Register scratch0 = i.TempRegister(0);
- Register scratch1 = i.TempRegister(1);
- OutOfLineRecordWrite* ool;
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
+ Operand offset(0);
if (addressing_mode == kMode_Offset_RI) {
- int32_t index = i.InputInt32(1);
- ool = new (zone()) OutOfLineRecordWrite(
- this, object, index, value, scratch0, scratch1, mode,
- DetermineStubCallMode(), &unwinding_info_writer_);
- __ str(value, MemOperand(object, index));
+ int32_t immediate = i.InputInt32(1);
+ offset = Operand(immediate);
+ __ str(value, MemOperand(object, immediate));
} else {
DCHECK_EQ(kMode_Offset_RR, addressing_mode);
- Register index(i.InputRegister(1));
- ool = new (zone()) OutOfLineRecordWrite(
- this, object, index, value, scratch0, scratch1, mode,
- DetermineStubCallMode(), &unwinding_info_writer_);
- __ str(value, MemOperand(object, index));
+ Register reg = i.InputRegister(1);
+ offset = Operand(reg);
+ __ str(value, MemOperand(object, reg));
}
- __ CheckPageFlag(object, scratch0,
- MemoryChunk::kPointersFromHereAreInterestingMask, ne,
- ool->entry());
+ auto ool = new (zone()) OutOfLineRecordWrite(
+ this, object, offset, value, mode, DetermineStubCallMode(),
+ &unwinding_info_writer_);
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ ne, ool->entry());
__ bind(ool->exit());
break;
}
@@ -2929,8 +2899,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
@@ -3035,7 +3004,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3045,6 +3015,10 @@ void CodeGenerator::AssembleConstructFrame() {
__ ldr(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ }
}
}
@@ -3098,8 +3072,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
}
@@ -3112,7 +3085,7 @@ void CodeGenerator::AssembleConstructFrame() {
required_slots -= frame()->GetReturnSlotCount();
required_slots -= 2 * base::bits::CountPopulation(saves_fp);
if (required_slots > 0) {
- __ sub(sp, sp, Operand(required_slots * kSystemPointerSize));
+ __ AllocateStackSpace(required_slots * kSystemPointerSize);
}
}
@@ -3134,7 +3107,7 @@ void CodeGenerator::AssembleConstructFrame() {
const int returns = frame()->GetReturnSlotCount();
if (returns != 0) {
// Create space for returns.
- __ sub(sp, sp, Operand(returns * kSystemPointerSize));
+ __ AllocateStackSpace(returns * kSystemPointerSize);
}
}
@@ -3468,7 +3441,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
default:
UNREACHABLE();
- break;
}
}
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 957e390c66..678d75ae5e 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -482,7 +482,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
@@ -528,12 +527,10 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ Emit(code, 0, nullptr, input_count, inputs);
} else {
InstructionCode opcode = kArchNop;
switch (rep) {
@@ -647,7 +644,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
default:
// All other cases should support unaligned accesses.
UNREACHABLE();
- return;
}
}
@@ -738,7 +734,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
default:
// All other cases should support unaligned accesses.
UNREACHABLE();
- return;
}
}
@@ -1661,7 +1656,6 @@ void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
break;
default:
UNREACHABLE();
- return;
}
if (selector->CanCover(*node, binop)) {
// The comparison is the only user of {node}.
@@ -2046,7 +2040,6 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
@@ -2071,7 +2064,6 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
AddressingMode addressing_mode = kMode_Offset_RR;
diff --git a/deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.h b/deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.h
index 237abe40ab..6b9ade0c48 100644
--- a/deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.h
+++ b/deps/v8/src/compiler/backend/arm/unwinding-info-writer-arm.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_BACKEND_ARM_UNWINDING_INFO_WRITER_ARM_H_
#define V8_COMPILER_BACKEND_ARM_UNWINDING_INFO_WRITER_ARM_H_
-#include "src/eh-frame.h"
-#include "src/flags.h"
+#include "src/diagnostics/eh-frame.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 29baddc3f1..53864ad2e9 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -4,15 +4,15 @@
#include "src/compiler/backend/code-generator.h"
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -260,16 +260,14 @@ namespace {
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
- OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
- Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode, StubCallMode stub_mode,
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand offset,
+ Register value, RecordWriteMode mode,
+ StubCallMode stub_mode,
UnwindingInfoWriter* unwinding_info_writer)
: OutOfLineCode(gen),
object_(object),
- index_(index),
+ offset_(offset),
value_(value),
- scratch0_(scratch0),
- scratch1_(scratch1),
mode_(mode),
stub_mode_(stub_mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
@@ -280,10 +278,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- __ CheckPageFlagClear(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask,
- exit());
- __ Add(scratch1_, object_, index_);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ DecompressTaggedPointer(value_, value_);
+ }
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, ne,
+ exit());
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
@@ -295,15 +294,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
}
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
- __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+ __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
} else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ __ CallRecordWriteStub(object_, offset_, remembered_set_action,
save_fp_mode, wasm::WasmCode::kWasmRecordWrite);
} else {
- __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
+ __ CallRecordWriteStub(object_, offset_, remembered_set_action,
save_fp_mode);
}
if (must_save_lr_) {
@@ -314,10 +313,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
private:
Register const object_;
- Operand const index_;
+ Operand const offset_;
Register const value_;
- Register const scratch0_;
- Register const scratch1_;
RecordWriteMode const mode_;
StubCallMode const stub_mode_;
bool must_save_lr_;
@@ -717,7 +714,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// by an unknown value, and it is safe to continue accessing the frame
// via the stack pointer.
UNREACHABLE();
- break;
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
@@ -748,6 +744,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ Register scratch = x8;
+ __ Adr(scratch, &return_location);
+ __ Str(scratch,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
+
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters, 0);
@@ -755,6 +760,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters, 0);
}
+ __ Bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -848,23 +855,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
- Operand index(0);
+ Operand offset(0);
if (addressing_mode == kMode_MRI) {
- index = Operand(i.InputInt64(1));
+ offset = Operand(i.InputInt64(1));
} else {
DCHECK_EQ(addressing_mode, kMode_MRR);
- index = Operand(i.InputRegister(1));
+ offset = Operand(i.InputRegister(1));
}
Register value = i.InputRegister(2);
- Register scratch0 = i.TempRegister(0);
- Register scratch1 = i.TempRegister(1);
auto ool = new (zone()) OutOfLineRecordWrite(
- this, object, index, value, scratch0, scratch1, mode,
- DetermineStubCallMode(), &unwinding_info_writer_);
- __ StoreTaggedField(value, MemOperand(object, index));
- __ CheckPageFlagSet(object, scratch0,
- MemoryChunk::kPointersFromHereAreInterestingMask,
- ool->entry());
+ this, object, offset, value, mode, DetermineStubCallMode(),
+ &unwinding_info_writer_);
+ __ StoreTaggedField(value, MemOperand(object, offset));
+ if (COMPRESS_POINTERS_BOOL) {
+ __ DecompressTaggedPointer(object, object);
+ }
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq, ool->entry());
__ Bind(ool->exit());
break;
}
@@ -1343,6 +1350,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Float32Sqrt:
__ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
+ case kArm64Float32Fnmul: {
+ __ Fnmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
+ i.InputFloat32Register(1));
+ break;
+ }
case kArm64Float64Cmp:
if (instr->InputAt(1)->IsFPRegister()) {
__ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
@@ -1408,6 +1420,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Float64Sqrt:
__ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float64Fnmul:
+ __ Fnmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kArm64Float32ToFloat64:
__ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
break;
@@ -1575,40 +1591,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
case kArm64DecompressSigned: {
- __ Sxtw(i.OutputRegister(), i.InputRegister(0));
+ __ DecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0));
break;
}
case kArm64DecompressPointer: {
- __ Add(i.OutputRegister(), kRootRegister,
- Operand(i.InputRegister(0), SXTW));
+ __ DecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0));
break;
}
case kArm64DecompressAny: {
- // TODO(solanes): Do branchful compute?
- // Branchlessly compute |masked_root|:
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
- UseScratchRegisterScope temps(tasm());
- Register masked_root = temps.AcquireX();
- // Sign extend tag bit to entire register.
- __ Sbfx(masked_root, i.InputRegister(0), 0, kSmiTagSize);
- __ And(masked_root, masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is a
- // smi or add the isolate root if it is a heap object.
- __ Add(i.OutputRegister(), masked_root,
- Operand(i.InputRegister(0), SXTW));
- break;
- }
- // TODO(solanes): Combine into one Compress? They seem to be identical.
- // TODO(solanes): We might get away with doing a no-op in these three cases.
- // The Uxtw instruction is the conservative way for the moment.
- case kArm64CompressSigned: {
- __ Uxtw(i.OutputRegister(), i.InputRegister(0));
- break;
- }
- case kArm64CompressPointer: {
- __ Uxtw(i.OutputRegister(), i.InputRegister(0));
+ __ DecompressAnyTagged(i.OutputRegister(), i.InputRegister(0));
break;
}
+ case kArm64CompressSigned: // Fall through.
+ case kArm64CompressPointer: // Fall through.
case kArm64CompressAny: {
__ Uxtw(i.OutputRegister(), i.InputRegister(0));
break;
@@ -2342,8 +2337,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
// The trap code should never return.
__ Brk(0);
@@ -2515,8 +2509,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ Brk(0);
}
@@ -2562,7 +2555,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ Str(kWasmInstanceRegister,
MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
} break;
- case CallDescriptor::kCallWasmImportWrapper: {
+ case CallDescriptor::kCallWasmImportWrapper:
+ case CallDescriptor::kCallWasmCapiFunction: {
UseScratchRegisterScope temps(tasm());
__ LoadTaggedPointerField(
kJSFunctionRegister,
@@ -2570,8 +2564,11 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadTaggedPointerField(
kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
- __ Claim(required_slots +
- 2); // Claim extra slots for marker + instance.
+ int extra_slots =
+ call_descriptor->kind() == CallDescriptor::kCallWasmImportWrapper
+ ? 2 // Import wrapper: marker + instance.
+ : 3; // C-API function: marker + instance + PC.
+ __ Claim(required_slots + extra_slots);
Register scratch = temps.AcquireX();
__ Mov(scratch,
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
@@ -2849,7 +2846,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
default:
UNREACHABLE();
- break;
}
}
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 2d729d1e65..4b7b017111 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -95,6 +95,7 @@ namespace compiler {
V(Arm64Float32Abs) \
V(Arm64Float32Neg) \
V(Arm64Float32Sqrt) \
+ V(Arm64Float32Fnmul) \
V(Arm64Float32RoundDown) \
V(Arm64Float32Max) \
V(Arm64Float32Min) \
@@ -109,6 +110,7 @@ namespace compiler {
V(Arm64Float64Abs) \
V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
+ V(Arm64Float64Fnmul) \
V(Arm64Float64RoundDown) \
V(Arm64Float32RoundUp) \
V(Arm64Float64RoundUp) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 2a0cf7c528..502b9d7d82 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -88,6 +88,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float32Abs:
case kArm64Float32Neg:
case kArm64Float32Sqrt:
+ case kArm64Float32Fnmul:
case kArm64Float32RoundDown:
case kArm64Float32Max:
case kArm64Float32Min:
@@ -101,6 +102,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64Abs:
case kArm64Float64Neg:
case kArm64Float64Sqrt:
+ case kArm64Float64Fnmul:
case kArm64Float64RoundDown:
case kArm64Float64RoundTiesAway:
case kArm64Float64RoundTruncate:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index c8364a5c88..69d82b4993 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -620,35 +620,19 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64LdrW;
immediate_mode = kLoadStoreImm32;
break;
-#ifdef V8_COMPRESS_POINTERS
- case MachineRepresentation::kTaggedSigned:
- opcode = kArm64LdrDecompressTaggedSigned;
- immediate_mode = kLoadStoreImm32;
- break;
- case MachineRepresentation::kTaggedPointer:
- opcode = kArm64LdrDecompressTaggedPointer;
- immediate_mode = kLoadStoreImm32;
- break;
- case MachineRepresentation::kTagged:
- opcode = kArm64LdrDecompressAnyTagged;
- immediate_mode = kLoadStoreImm32;
- break;
- case MachineRepresentation::kCompressedSigned:
- case MachineRepresentation::kCompressedPointer:
+ case MachineRepresentation::kCompressedSigned: // Fall through.
+ case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
+#ifdef V8_COMPRESS_POINTERS
opcode = kArm64LdrW;
immediate_mode = kLoadStoreImm32;
break;
#else
- case MachineRepresentation::kCompressedSigned: // Fall through.
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed:
UNREACHABLE();
- return;
+#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
-#endif
case MachineRepresentation::kWord64:
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
@@ -659,7 +643,6 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
@@ -688,7 +671,7 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(arm64): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK(CanBeTaggedPointer(rep));
+ DCHECK(CanBeTaggedOrCompressedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -706,12 +689,10 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ Emit(code, 0, nullptr, input_count, inputs);
} else {
InstructionOperand inputs[4];
size_t input_count = 0;
@@ -739,29 +720,19 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArm64StrW;
immediate_mode = kLoadStoreImm32;
break;
-#ifdef V8_COMPRESS_POINTERS
- case MachineRepresentation::kTaggedSigned:
- case MachineRepresentation::kTaggedPointer:
- case MachineRepresentation::kTagged:
- opcode = kArm64StrCompressTagged;
- immediate_mode = kLoadStoreImm32;
- break;
- case MachineRepresentation::kCompressedSigned:
- case MachineRepresentation::kCompressedPointer:
+ case MachineRepresentation::kCompressedSigned: // Fall through.
+ case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
+#ifdef V8_COMPRESS_POINTERS
opcode = kArm64StrW;
immediate_mode = kLoadStoreImm32;
break;
#else
- case MachineRepresentation::kCompressedSigned: // Fall through.
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed:
UNREACHABLE();
- return;
+#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
-#endif
case MachineRepresentation::kWord64:
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
@@ -772,7 +743,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
inputs[0] = g.UseRegisterOrImmediateZero(value);
@@ -1240,8 +1210,6 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Float64RoundTiesAway, kArm64Float64RoundTiesAway) \
V(Float32RoundTiesEven, kArm64Float32RoundTiesEven) \
V(Float64RoundTiesEven, kArm64Float64RoundTiesEven) \
- V(Float32Neg, kArm64Float32Neg) \
- V(Float64Neg, kArm64Float64Neg) \
V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32) \
V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
V(Float64SilenceNaN, kArm64Float64SilenceNaN)
@@ -1259,8 +1227,6 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Float64Add, kArm64Float64Add) \
V(Float32Sub, kArm64Float32Sub) \
V(Float64Sub, kArm64Float64Sub) \
- V(Float32Mul, kArm64Float32Mul) \
- V(Float64Mul, kArm64Float64Mul) \
V(Float32Div, kArm64Float32Div) \
V(Float64Div, kArm64Float64Div) \
V(Float32Max, kArm64Float32Max) \
@@ -1735,36 +1701,41 @@ void InstructionSelector::EmitPrepareArguments(
// `arguments` includes alignment "holes". This means that slots bigger than
// kSystemPointerSize, e.g. Simd128, will span across multiple arguments.
int claim_count = static_cast<int>(arguments->size());
+ bool needs_padding = claim_count % 2 != 0;
int slot = claim_count - 1;
claim_count = RoundUp(claim_count, 2);
- // Bump the stack pointer(s).
+ // Bump the stack pointer.
if (claim_count > 0) {
// TODO(titzer): claim and poke probably take small immediates.
// TODO(titzer): it would be better to bump the sp here only
// and emit paired stores with increment for non c frames.
Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
- }
- if (claim_count > 0) {
- // Store padding, which might be overwritten.
- Emit(kArm64Poke, g.NoOutput(), g.UseImmediate(0),
- g.TempImmediate(claim_count - 1));
+ if (needs_padding) {
+ Emit(kArm64Poke, g.NoOutput(), g.UseImmediate(0),
+ g.TempImmediate(claim_count - 1));
+ }
}
// Poke the arguments into the stack.
while (slot >= 0) {
- Node* input_node = (*arguments)[slot].node;
- // Skip any alignment holes in pushed nodes.
- if (input_node != nullptr) {
- Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input_node),
+ PushParameter input0 = (*arguments)[slot];
+ PushParameter input1 = slot > 0 ? (*arguments)[slot - 1] : PushParameter();
+ // Emit a poke-pair if consecutive parameters have the same type.
+ // TODO(arm): Support consecutive Simd128 parameters.
+ if (input0.node != nullptr && input1.node != nullptr &&
+ input0.location.GetType() == input1.location.GetType()) {
+ Emit(kArm64PokePair, g.NoOutput(), g.UseRegister(input0.node),
+ g.UseRegister(input1.node), g.TempImmediate(slot));
+ slot -= 2;
+ } else if (input0.node != nullptr) {
+ Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input0.node),
g.TempImmediate(slot));
+ slot--;
+ } else {
+ // Skip any alignment holes in pushed nodes.
+ slot--;
}
- slot--;
- // TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
- // same type.
- // Emit(kArm64PokePair, g.NoOutput(), g.UseRegister((*arguments)[slot]),
- // g.UseRegister((*arguments)[slot - 1]), g.TempImmediate(slot));
- // slot -= 2;
}
}
@@ -1905,7 +1876,6 @@ void MaybeReplaceCmpZeroWithFlagSettingBinop(InstructionSelector* selector,
break;
default:
UNREACHABLE();
- return;
}
if (selector->CanCover(*node, binop)) {
// The comparison is the only user of the add or and, so we can generate
@@ -2656,6 +2626,38 @@ void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
VisitWordCompare(this, node, kArm64Cmp, &cont, false, kArithmeticImm);
}
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* in = node->InputAt(0);
+ if (in->opcode() == IrOpcode::kFloat32Mul && CanCover(node, in)) {
+ Float32BinopMatcher m(in);
+ Emit(kArm64Float32Fnmul, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+ return;
+ }
+ VisitRR(this, kArm64Float32Neg, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ Arm64OperandGenerator g(this);
+ Float32BinopMatcher m(node);
+
+ if (m.left().IsFloat32Neg() && CanCover(node, m.left().node())) {
+ Emit(kArm64Float32Fnmul, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+
+ if (m.right().IsFloat32Neg() && CanCover(node, m.right().node())) {
+ Emit(kArm64Float32Fnmul, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()->InputAt(0)),
+ g.UseRegister(m.left().node()));
+ return;
+ }
+ return VisitRRR(this, kArm64Float32Mul, node);
+}
+
void InstructionSelector::VisitFloat32Equal(Node* node) {
FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
VisitFloat32Compare(this, node, &cont);
@@ -2721,6 +2723,38 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* in = node->InputAt(0);
+ if (in->opcode() == IrOpcode::kFloat64Mul && CanCover(node, in)) {
+ Float64BinopMatcher m(in);
+ Emit(kArm64Float64Fnmul, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+ return;
+ }
+ VisitRR(this, kArm64Float64Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ Arm64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+
+ if (m.left().IsFloat64Neg() && CanCover(node, m.left().node())) {
+ Emit(kArm64Float64Fnmul, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseRegister(m.right().node()));
+ return;
+ }
+
+ if (m.right().IsFloat64Neg() && CanCover(node, m.right().node())) {
+ Emit(kArm64Float64Fnmul, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()->InputAt(0)),
+ g.UseRegister(m.left().node()));
+ return;
+ }
+ return VisitRRR(this, kArm64Float64Mul, node);
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
@@ -2738,7 +2772,6 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicLoad(this, node, opcode);
}
@@ -2761,7 +2794,6 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicLoad(this, node, opcode);
}
@@ -2781,7 +2813,6 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicStore(this, node, opcode);
}
@@ -2804,7 +2835,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicStore(this, node, opcode);
}
diff --git a/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.h b/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.h
index 6b67f0ff64..36788735de 100644
--- a/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/unwinding-info-writer-arm64.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_BACKEND_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
#define V8_COMPILER_BACKEND_ARM64_UNWINDING_INFO_WRITER_ARM64_H_
-#include "src/eh-frame.h"
-#include "src/flags.h"
+#include "src/diagnostics/eh-frame.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index 567d7920a9..75f8e70203 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -5,11 +5,11 @@
#ifndef V8_COMPILER_BACKEND_CODE_GENERATOR_IMPL_H_
#define V8_COMPILER_BACKEND_CODE_GENERATOR_IMPL_H_
+#include "src/codegen/macro-assembler.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/linkage.h"
#include "src/compiler/opcodes.h"
-#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index d16933ba89..bb83a8497b 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -4,21 +4,21 @@
#include "src/compiler/backend/code-generator.h"
-#include "src/address-map.h"
-#include "src/assembler-inl.h"
#include "src/base/adapters.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/string-constants.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/counters.h"
-#include "src/eh-frame.h"
-#include "src/frames.h"
-#include "src/log.h"
-#include "src/macro-assembler-inl.h"
+#include "src/diagnostics/eh-frame.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/objects/smi.h"
-#include "src/optimized-compilation-info.h"
-#include "src/string-constants.h"
+#include "src/utils/address-map.h"
namespace v8 {
namespace internal {
@@ -88,6 +88,7 @@ CodeGenerator::CodeGenerator(
tasm_.set_jump_optimization_info(jump_opt);
Code::Kind code_kind = info->code_kind();
if (code_kind == Code::WASM_FUNCTION ||
+ code_kind == Code::WASM_TO_CAPI_FUNCTION ||
code_kind == Code::WASM_TO_JS_FUNCTION ||
code_kind == Code::WASM_INTERPRETER_ENTRY ||
(Builtins::IsBuiltinId(builtin_index) &&
@@ -305,8 +306,7 @@ void CodeGenerator::AssembleCode() {
// Emit the exception handler table.
if (!handlers_.empty()) {
- handler_table_offset_ = HandlerTable::EmitReturnTableStart(
- tasm(), static_cast<int>(handlers_.size()));
+ handler_table_offset_ = HandlerTable::EmitReturnTableStart(tasm());
for (size_t i = 0; i < handlers_.size(); ++i) {
HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
handlers_[i].handler->pos());
@@ -344,7 +344,6 @@ void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
}
case kFlags_deoptimize_and_poison: {
UNREACHABLE();
- break;
}
default:
break;
@@ -408,10 +407,14 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
}
- MaybeHandle<Code> maybe_code = isolate()->factory()->TryNewCode(
- desc, info()->code_kind(), Handle<Object>(), info()->builtin_index(),
- source_positions, deopt_data, kMovable, true,
- frame()->GetTotalFrameSlotCount());
+ MaybeHandle<Code> maybe_code =
+ Factory::CodeBuilder(isolate(), desc, info()->code_kind())
+ .set_builtin_index(info()->builtin_index())
+ .set_source_position_table(source_positions)
+ .set_deoptimization_data(deopt_data)
+ .set_is_turbofanned()
+ .set_stack_slots(frame()->GetTotalFrameSlotCount())
+ .TryBuild();
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
@@ -437,9 +440,8 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
}
void CodeGenerator::RecordSafepoint(ReferenceMap* references,
- Safepoint::Kind kind,
Safepoint::DeoptMode deopt_mode) {
- Safepoint safepoint = safepoints()->DefineSafepoint(tasm(), kind, deopt_mode);
+ Safepoint safepoint = safepoints()->DefineSafepoint(tasm(), deopt_mode);
int stackSlotToSpillSlotDelta =
frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
@@ -453,9 +455,6 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
// knowledge about those fields anyway.
if (index < stackSlotToSpillSlotDelta) continue;
safepoint.DefinePointerSlot(index);
- } else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
- Register reg = LocationOperand::cast(operand).GetRegister();
- safepoint.DefinePointerRegister(reg);
}
}
}
@@ -762,6 +761,7 @@ bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
StubCallMode CodeGenerator::DetermineStubCallMode() const {
Code::Kind code_kind = info()->code_kind();
return (code_kind == Code::WASM_FUNCTION ||
+ code_kind == Code::WASM_TO_CAPI_FUNCTION ||
code_kind == Code::WASM_TO_JS_FUNCTION)
? StubCallMode::kCallWasmRuntimeStub
: StubCallMode::kCallCodeObject;
@@ -867,9 +867,9 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
- RecordSafepoint(
- instr->reference_map(), Safepoint::kSimple,
- needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
+ RecordSafepoint(instr->reference_map(), needs_frame_state
+ ? Safepoint::kLazyDeopt
+ : Safepoint::kNoLazyDeopt);
if (flags & CallDescriptor::kHasExceptionHandler) {
InstructionOperandConverter i(this, instr);
@@ -1157,8 +1157,8 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
// Smis.
DCHECK_EQ(4, kSystemPointerSize);
Smi smi(static_cast<Address>(constant.ToInt32()));
- DCHECK(smi->IsSmi());
- literal = DeoptimizationLiteral(smi->value());
+ DCHECK(smi.IsSmi());
+ literal = DeoptimizationLiteral(smi.value());
} else if (type.representation() == MachineRepresentation::kBit) {
if (constant.ToInt32() == 0) {
literal =
@@ -1192,8 +1192,8 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
// Smis.
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
Smi smi(static_cast<Address>(constant.ToInt64()));
- DCHECK(smi->IsSmi());
- literal = DeoptimizationLiteral(smi->value());
+ DCHECK(smi.IsSmi());
+ literal = DeoptimizationLiteral(smi.value());
}
break;
case Constant::kFloat32:
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index b78a02df17..74dd90c5de 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -6,14 +6,14 @@
#define V8_COMPILER_BACKEND_CODE_GENERATOR_H_
#include "src/base/optional.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/codegen/source-position-table.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/backend/unwinding-info-writer.h"
#include "src/compiler/osr.h"
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/safepoint-table.h"
-#include "src/source-position-table.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/trap-handler/trap-handler.h"
namespace v8 {
@@ -129,7 +129,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
void AssembleSourcePosition(SourcePosition source_position);
// Record a safepoint with the given pointer map.
- void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
+ void RecordSafepoint(ReferenceMap* references,
Safepoint::DeoptMode deopt_mode);
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/backend/gap-resolver.cc b/deps/v8/src/compiler/backend/gap-resolver.cc
index e11a8b6434..e9aeb2fb2c 100644
--- a/deps/v8/src/compiler/backend/gap-resolver.cc
+++ b/deps/v8/src/compiler/backend/gap-resolver.cc
@@ -8,7 +8,7 @@
#include <set>
#include "src/base/enum-set.h"
-#include "src/register-configuration.h"
+#include "src/codegen/register-configuration.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index c4ca35472e..0e61c22cbb 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -4,20 +4,20 @@
#include "src/compiler/backend/code-generator.h"
-#include "src/assembler-inl.h"
#include "src/base/overflowing-math.h"
-#include "src/callable.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/ia32/assembler-ia32.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/ia32/assembler-ia32.h"
-#include "src/macro-assembler.h"
#include "src/objects/smi.h"
-#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -247,7 +247,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
zone_(gen->zone()) {}
void Generate() final {
- __ sub(esp, Immediate(kDoubleSize));
+ __ AllocateStackSpace(kDoubleSize);
__ movsd(MemOperand(esp, 0), input_);
if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
@@ -360,7 +360,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 4); \
/* Return value is in st(0) on ia32. */ \
/* Store it into the result register. */ \
- __ sub(esp, Immediate(kDoubleSize)); \
+ __ AllocateStackSpace(kDoubleSize); \
__ fstp_d(Operand(esp, 0)); \
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
__ add(esp, Immediate(kDoubleSize)); \
@@ -374,7 +374,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 2); \
/* Return value is in st(0) on ia32. */ \
/* Store it into the result register. */ \
- __ sub(esp, Immediate(kDoubleSize)); \
+ __ AllocateStackSpace(kDoubleSize); \
__ fstp_d(Operand(esp, 0)); \
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
__ add(esp, Immediate(kDoubleSize)); \
@@ -522,7 +522,7 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- tasm->sub(esp, Immediate(stack_slot_delta * kSystemPointerSize));
+ tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
tasm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize));
@@ -813,6 +813,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ Register scratch = eax;
+ __ push(scratch);
+ __ PushPC();
+ int pc = __ pc_offset();
+ __ pop(scratch);
+ __ sub(scratch, Immediate(pc + Code::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, Immediate::CodeRelativeOffset(&return_location));
+ __ mov(MemOperand(ebp, WasmExitFrameConstants::kCallingPCOffset),
+ scratch);
+ __ pop(scratch);
+ }
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -820,6 +834,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1188,7 +1204,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchWordPoisonOnSpeculation:
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
- break;
case kLFence:
__ lfence();
break;
@@ -1366,7 +1381,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat64Mod: {
Register tmp = i.TempRegister(1);
__ mov(tmp, esp);
- __ sub(esp, Immediate(kDoubleSize));
+ __ AllocateStackSpace(kDoubleSize);
__ and_(esp, -8); // align to 8 byte boundary.
// Move values to st(0) and st(1).
__ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
@@ -1687,45 +1702,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32PushFloat32:
if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kFloatSize));
+ __ AllocateStackSpace(kFloatSize);
__ movss(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ Move(kScratchDoubleReg, i.InputFloat32(0));
- __ sub(esp, Immediate(kFloatSize));
+ __ AllocateStackSpace(kFloatSize);
__ movss(Operand(esp, 0), kScratchDoubleReg);
frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
} else {
__ movss(kScratchDoubleReg, i.InputOperand(0));
- __ sub(esp, Immediate(kFloatSize));
+ __ AllocateStackSpace(kFloatSize);
__ movss(Operand(esp, 0), kScratchDoubleReg);
frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
}
break;
case kIA32PushFloat64:
if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
+ __ AllocateStackSpace(kDoubleSize);
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ Move(kScratchDoubleReg, i.InputDouble(0));
- __ sub(esp, Immediate(kDoubleSize));
+ __ AllocateStackSpace(kDoubleSize);
__ movsd(Operand(esp, 0), kScratchDoubleReg);
frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
} else {
__ movsd(kScratchDoubleReg, i.InputOperand(0));
- __ sub(esp, Immediate(kDoubleSize));
+ __ AllocateStackSpace(kDoubleSize);
__ movsd(Operand(esp, 0), kScratchDoubleReg);
frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
}
break;
case kIA32PushSimd128:
if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kSimd128Size));
+ __ AllocateStackSpace(kSimd128Size);
__ movups(Operand(esp, 0), i.InputSimd128Register(0));
} else {
__ movups(kScratchDoubleReg, i.InputOperand(0));
- __ sub(esp, Immediate(kSimd128Size));
+ __ AllocateStackSpace(kSimd128Size);
__ movups(Operand(esp, 0), kScratchDoubleReg);
}
frame_access_state()->IncreaseSPDelta(kSimd128Size / kSystemPointerSize);
@@ -1737,7 +1752,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ push(operand);
frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
} else if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kFloatSize));
+ __ AllocateStackSpace(kFloatSize);
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kFloatSize / kSystemPointerSize);
} else if (HasImmediateInput(instr, 0)) {
@@ -3898,7 +3913,6 @@ static Condition FlagsConditionToCondition(FlagsCondition condition) {
break;
default:
UNREACHABLE();
- break;
}
}
@@ -3970,8 +3984,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ wasm_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -4220,7 +4233,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -4232,6 +4246,10 @@ void CodeGenerator::AssembleConstructFrame() {
Operand(kWasmInstanceRegister,
Tuple2::kValue1Offset - kHeapObjectTag));
__ push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ }
}
}
}
@@ -4281,8 +4299,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ wasm_call(wasm::WasmCode::kWasmStackOverflow,
RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
@@ -4291,7 +4308,7 @@ void CodeGenerator::AssembleConstructFrame() {
required_slots -= base::bits::CountPopulation(saves);
required_slots -= frame()->GetReturnSlotCount();
if (required_slots > 0) {
- __ sub(esp, Immediate(required_slots * kSystemPointerSize));
+ __ AllocateStackSpace(required_slots * kSystemPointerSize);
}
}
@@ -4304,7 +4321,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Allocate return slots (located after callee-saved).
if (frame()->GetReturnSlotCount() > 0) {
- __ sub(esp, Immediate(frame()->GetReturnSlotCount() * kSystemPointerSize));
+ __ AllocateStackSpace(frame()->GetReturnSlotCount() * kSystemPointerSize);
}
}
@@ -4592,7 +4609,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
default:
UNREACHABLE();
- break;
}
}
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 44420b3352..f81b88823e 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -310,7 +310,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
IA32OperandGenerator g(this);
@@ -1619,7 +1618,6 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
break;
default:
UNREACHABLE();
- break;
}
VisitAtomicExchange(this, node, opcode, rep);
}
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 9bce47b030..068164b57e 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -27,8 +27,8 @@
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
#endif
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/compiler/write-barrier-kind.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index b3220f07fd..b0637c175d 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -6,7 +6,7 @@
#include "src/base/adapters.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h
index 1fd454558d..21edc2f503 100644
--- a/deps/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h
@@ -5,12 +5,12 @@
#ifndef V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_IMPL_H_
#define V8_COMPILER_BACKEND_INSTRUCTION_SELECTOR_IMPL_H_
+#include "src/codegen/macro-assembler.h"
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/schedule.h"
-#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
@@ -33,7 +33,7 @@ class SwitchInfo {
BasicBlock* default_branch)
: cases_(cases),
min_value_(min_value),
- max_value_(min_value),
+ max_value_(max_value),
default_branch_(default_branch) {
if (cases.size() != 0) {
DCHECK_LE(min_value, max_value);
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 5cbc5d5ba6..2b748a188b 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -6,15 +6,15 @@
#include <limits>
-#include "src/assembler-inl.h"
#include "src/base/adapters.h"
+#include "src/codegen/assembler-inl.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
-#include "src/deoptimizer.h"
+#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
@@ -458,7 +458,7 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
case IrOpcode::kDelayedStringConstant:
return g->UseImmediate(input);
case IrOpcode::kHeapConstant: {
- if (!CanBeTaggedPointer(rep)) {
+ if (!CanBeTaggedOrCompressedPointer(rep)) {
// If we have inconsistent static and dynamic types, e.g. if we
// smi-check a string, we can get here with a heap object that
// says it is a smi. In that case, we return an invalid instruction
@@ -485,7 +485,6 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
case IrOpcode::kObjectState:
case IrOpcode::kTypedObjectState:
UNREACHABLE();
- break;
default:
switch (kind) {
case FrameStateInputKind::kStackSlot:
@@ -908,6 +907,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
? g.UseFixed(callee, kJavaScriptCallCodeStartRegister)
: g.UseRegister(callee));
break;
+ case CallDescriptor::kCallWasmCapiFunction:
case CallDescriptor::kCallWasmFunction:
case CallDescriptor::kCallWasmImportWrapper:
buffer->instruction_args.push_back(
@@ -1007,8 +1007,13 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
UnallocatedOperand unallocated = UnallocatedOperand::cast(op);
if (unallocated.HasFixedSlotPolicy() && !call_tail) {
int stack_index = -unallocated.fixed_slot_index() - 1;
+ // This can insert empty slots before stack_index and will insert enough
+ // slots after stack_index to store the parameter.
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
- buffer->pushed_nodes.resize(stack_index + 1);
+ int num_slots = std::max(
+ 1, (ElementSizeInBytes(location.GetType().representation()) /
+ kSystemPointerSize));
+ buffer->pushed_nodes.resize(stack_index + num_slots);
}
PushParameter param = {*iter, location};
buffer->pushed_nodes[stack_index] = param;
@@ -1227,7 +1232,6 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
default:
UNREACHABLE();
- break;
}
if (trace_turbo_ == kEnableTraceTurboJson && input) {
int instruction_start = static_cast<int>(instructions_.size());
@@ -1329,6 +1333,9 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnreachable:
VisitUnreachable(node);
return;
+ case IrOpcode::kStaticAssert:
+ VisitStaticAssert(node);
+ return;
case IrOpcode::kDeadValue:
VisitDeadValue(node);
return;
@@ -2625,6 +2632,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
+ case CallDescriptor::kCallWasmCapiFunction:
case CallDescriptor::kCallWasmFunction:
case CallDescriptor::kCallWasmImportWrapper:
opcode = kArchCallWasmFunction | MiscField::encode(flags);
@@ -2839,6 +2847,11 @@ void InstructionSelector::VisitUnreachable(Node* node) {
Emit(kArchDebugBreak, g.NoOutput());
}
+void InstructionSelector::VisitStaticAssert(Node* node) {
+ node->InputAt(0)->Print();
+ FATAL("Expected turbofan static assert to hold, but got non-true input!\n");
+}
+
void InstructionSelector::VisitDeadValue(Node* node) {
OperandGenerator g(this);
MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node);
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index ecc3497498..4f6b1c5971 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -7,14 +7,14 @@
#include <map>
+#include "src/codegen/cpu-features.h"
+#include "src/common/globals.h"
#include "src/compiler/backend/instruction-scheduler.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
-#include "src/cpu-features.h"
-#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -628,6 +628,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitThrow(Node* node);
void VisitRetain(Node* node);
void VisitUnreachable(Node* node);
+ void VisitStaticAssert(Node* node);
void VisitDeadValue(Node* node);
void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 5346a730a3..c52dca61a1 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -6,12 +6,12 @@
#include <iomanip>
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/source-position.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
-#include "src/register-configuration.h"
-#include "src/source-position.h"
namespace v8 {
namespace internal {
@@ -56,7 +56,6 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
case kPositiveOrZero:
case kNegative:
UNREACHABLE();
- break;
case kEqual:
case kNotEqual:
case kOverflow:
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index a2084a294b..61875a1a17 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -11,15 +11,15 @@
#include <set>
#include "src/base/compiler-specific.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/register-arch.h"
+#include "src/codegen/source-position.h"
+#include "src/common/globals.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
#include "src/compiler/opcodes.h"
-#include "src/double.h"
-#include "src/external-reference.h"
-#include "src/globals.h"
-#include "src/register-arch.h"
-#include "src/source-position.h"
+#include "src/numbers/double.h"
#include "src/zone/zone-allocator.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/backend/mips/OWNERS b/deps/v8/src/compiler/backend/mips/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/compiler/backend/mips/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 561e3e85f4..1f79386821 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
-#include "src/callable.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/macro-assembler.h"
-#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
@@ -171,7 +171,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ Push(ra);
}
- if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
+ __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
+ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
@@ -3103,7 +3105,6 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
return;
default:
UNREACHABLE();
- break;
}
}
@@ -3154,8 +3155,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 8a5a3c8df3..0c7299d451 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -315,7 +315,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
@@ -1322,7 +1321,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
UNREACHABLE();
- break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
break;
@@ -1347,7 +1345,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
@@ -1383,7 +1380,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
UNREACHABLE();
- break;
case MachineRepresentation::kWord16:
opcode = kMipsUsh;
break;
@@ -1402,7 +1398,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
@@ -1800,7 +1795,6 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
@@ -1835,7 +1829,6 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
diff --git a/deps/v8/src/compiler/backend/mips64/OWNERS b/deps/v8/src/compiler/backend/mips64/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/compiler/backend/mips64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 61f0aef6a4..5cd9bc54eb 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
-#include "src/callable.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips64/constants-mips64.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/macro-assembler.h"
-#include "src/mips64/constants-mips64.h"
-#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
@@ -3250,7 +3250,6 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
return;
default:
UNREACHABLE();
- break;
}
}
@@ -3303,8 +3302,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index a3031cf698..499a3da05a 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/codegen/macro-assembler.h"
#include "src/compiler/backend/instruction-scheduler.h"
-#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 03f4e51420..9768a7da9b 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -201,8 +201,8 @@ struct ExtendingLoadMatcher {
DCHECK(m.IsWord64Sar());
if (m.left().IsLoad() && m.right().Is(32) &&
selector_->CanCover(m.node(), m.left().node())) {
- DCHECK_EQ(selector_->GetEffectiveLevel(node),
- selector_->GetEffectiveLevel(m.left().node()));
+ DCHECK_EQ(selector_->GetEffectLevel(node),
+ selector_->GetEffectLevel(m.left().node()));
MachineRepresentation rep =
LoadRepresentationOf(m.left().node()->op()).representation();
DCHECK_EQ(3, ElementSizeLog2Of(rep));
@@ -395,7 +395,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (node->opcode() == IrOpcode::kPoisonedLoad) {
CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
@@ -1673,7 +1672,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
UNREACHABLE();
- break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
break;
@@ -1694,7 +1692,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
@@ -1728,7 +1725,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
UNREACHABLE();
- break;
case MachineRepresentation::kWord16:
opcode = kMips64Ush;
break;
@@ -1749,7 +1745,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (g.CanBeImmediate(index, opcode)) {
@@ -2420,7 +2415,6 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicLoad(this, node, opcode);
}
@@ -2440,7 +2434,6 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicStore(this, node, opcode);
@@ -2464,7 +2457,6 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicLoad(this, node, opcode);
}
@@ -2487,7 +2479,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicStore(this, node, opcode);
diff --git a/deps/v8/src/compiler/backend/move-optimizer.cc b/deps/v8/src/compiler/backend/move-optimizer.cc
index 4d72c9340b..35d771e93d 100644
--- a/deps/v8/src/compiler/backend/move-optimizer.cc
+++ b/deps/v8/src/compiler/backend/move-optimizer.cc
@@ -4,7 +4,7 @@
#include "src/compiler/backend/move-optimizer.h"
-#include "src/register-configuration.h"
+#include "src/codegen/register-configuration.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/move-optimizer.h b/deps/v8/src/compiler/backend/move-optimizer.h
index ae99cb4ea4..ac3c407393 100644
--- a/deps/v8/src/compiler/backend/move-optimizer.h
+++ b/deps/v8/src/compiler/backend/move-optimizer.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_BACKEND_MOVE_OPTIMIZER_H_
#define V8_COMPILER_BACKEND_MOVE_OPTIMIZER_H_
+#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
-#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index fd954d94c8..30605df270 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -4,16 +4,16 @@
#include "src/compiler/backend/code-generator.h"
-#include "src/assembler-inl.h"
-#include "src/callable.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
-#include "src/double.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/macro-assembler.h"
-#include "src/optimized-compilation-info.h"
+#include "src/numbers/double.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -729,7 +729,6 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
break;
default:
UNREACHABLE();
- break;
}
frame_access_state->IncreaseSPDelta(pending_pushes->size());
pending_pushes->clear();
@@ -1959,7 +1958,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_AtomicStoreWord32:
case kPPC_AtomicStoreWord64:
UNREACHABLE();
- break;
case kWord32AtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
__ extsb(i.OutputRegister(0), i.OutputRegister(0));
@@ -2060,7 +2058,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_TARGET_ARCH_PPC64
default:
UNREACHABLE();
- break;
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2151,8 +2148,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
@@ -2391,8 +2387,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
}
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 4ccdc46b75..bb503763c2 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -6,7 +6,7 @@
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-#include "src/ppc/frame-constants-ppc.h"
+#include "src/execution/ppc/frame-constants-ppc.h"
namespace v8 {
namespace internal {
@@ -215,7 +215,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return;
}
if (node->opcode() == IrOpcode::kPoisonedLoad &&
diff --git a/deps/v8/src/compiler/backend/register-allocator-verifier.cc b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
index a66c35bd99..53349c9c2b 100644
--- a/deps/v8/src/compiler/backend/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/backend/register-allocator-verifier.cc
@@ -4,9 +4,9 @@
#include "src/compiler/backend/register-allocator-verifier.h"
-#include "src/bit-vector.h"
#include "src/compiler/backend/instruction.h"
-#include "src/ostreams.h"
+#include "src/utils/bit-vector.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 68cc963dbc..57ea2c1a26 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -6,12 +6,12 @@
#include <iomanip>
-#include "src/assembler-inl.h"
#include "src/base/adapters.h"
#include "src/base/small-vector.h"
+#include "src/codegen/assembler-inl.h"
#include "src/compiler/linkage.h"
-#include "src/string-stream.h"
-#include "src/vector.h"
+#include "src/strings/string-stream.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -2093,7 +2093,6 @@ int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
break;
default:
UNREACHABLE();
- break;
}
return result;
}
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index c142d17de7..8929fb2ee6 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -7,11 +7,11 @@
#include "src/base/bits.h"
#include "src/base/compiler-specific.h"
+#include "src/codegen/register-configuration.h"
+#include "src/common/globals.h"
#include "src/compiler/backend/instruction.h"
-#include "src/flags.h"
-#include "src/globals.h"
-#include "src/ostreams.h"
-#include "src/register-configuration.h"
+#include "src/flags/flags.h"
+#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/backend/s390/OWNERS b/deps/v8/src/compiler/backend/s390/OWNERS
deleted file mode 100644
index 6d1a8fc472..0000000000
--- a/deps/v8/src/compiler/backend/s390/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-jyan@ca.ibm.com
-joransiu@ca.ibm.com
-michael_dawson@ca.ibm.com
-miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index bd72664acc..595800268d 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -4,15 +4,15 @@
#include "src/compiler/backend/code-generator.h"
-#include "src/assembler-inl.h"
-#include "src/callable.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/macro-assembler.h"
-#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -1218,7 +1218,6 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
break;
default:
UNREACHABLE();
- break;
}
frame_access_state->IncreaseSPDelta(pending_pushes->size());
pending_pushes->clear();
@@ -2803,7 +2802,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
default:
UNREACHABLE();
- break;
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2891,8 +2889,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
@@ -3090,8 +3087,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
__ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
}
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 00f4a24f52..d982605efc 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -6,7 +6,7 @@
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/unwinding-info-writer.h b/deps/v8/src/compiler/backend/unwinding-info-writer.h
index 3383da99f7..590a839a06 100644
--- a/deps/v8/src/compiler/backend/unwinding-info-writer.h
+++ b/deps/v8/src/compiler/backend/unwinding-info-writer.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_BACKEND_UNWINDING_INFO_WRITER_H_
#define V8_COMPILER_BACKEND_UNWINDING_INFO_WRITER_H_
-#include "src/flags.h"
+#include "src/flags/flags.h"
#if V8_TARGET_ARCH_ARM
#include "src/compiler/backend/arm/unwinding-info-writer-arm.h"
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index cc17e30de1..c6667292fc 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -7,17 +7,17 @@
#include <limits>
#include "src/base/overflowing-math.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/x64/assembler-x64.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/macro-assembler.h"
#include "src/objects/smi.h"
-#include "src/optimized-compilation-info.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
-#include "src/x64/assembler-x64.h"
namespace v8 {
namespace internal {
@@ -201,7 +201,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
zone_(gen->zone()) {}
void Generate() final {
- __ subq(rsp, Immediate(kDoubleSize));
+ __ AllocateStackSpace(kDoubleSize);
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
@@ -247,6 +247,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
+ if (COMPRESS_POINTERS_BOOL) {
+ __ DecompressTaggedPointer(value_, value_);
+ }
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit());
@@ -322,8 +325,7 @@ class WasmOutOfLineTrap : public OutOfLineCode {
__ near_call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -612,7 +614,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(Assembler* assembler,
+void AdjustStackPointerForTailCall(TurboAssembler* assembler,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -620,7 +622,7 @@ void AdjustStackPointerForTailCall(Assembler* assembler,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- assembler->subq(rsp, Immediate(stack_slot_delta * kSystemPointerSize));
+ assembler->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
assembler->addq(rsp, Immediate(-stack_slot_delta * kSystemPointerSize));
@@ -899,6 +901,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ leaq(kScratchRegister, Operand(&return_location, 0));
+ __ movq(MemOperand(rbp, WasmExitFrameConstants::kCallingPCOffset),
+ kScratchRegister);
+ }
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -906,6 +915,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1018,6 +1029,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1,
mode, DetermineStubCallMode());
__ StoreTaggedField(operand, value);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ DecompressTaggedPointer(object, object);
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
@@ -1354,7 +1368,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kSSEFloat64Mod: {
- __ subq(rsp, Immediate(kDoubleSize));
+ __ AllocateStackSpace(kDoubleSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
// Move values to st(0) and st(1).
@@ -1955,42 +1969,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64DecompressSigned: {
CHECK(instr->HasOutput());
- ASSEMBLE_MOVX(movsxlq);
+ ASSEMBLE_MOVX(DecompressTaggedSigned);
break;
}
case kX64DecompressPointer: {
CHECK(instr->HasOutput());
- ASSEMBLE_MOVX(movsxlq);
- __ addq(i.OutputRegister(), kRootRegister);
+ ASSEMBLE_MOVX(DecompressTaggedPointer);
break;
}
case kX64DecompressAny: {
CHECK(instr->HasOutput());
- ASSEMBLE_MOVX(movsxlq);
- // TODO(solanes): Do branchful compute?
- // Branchlessly compute |masked_root|:
- STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag < 32));
- Register masked_root = kScratchRegister;
- __ movl(masked_root, i.OutputRegister());
- __ andl(masked_root, Immediate(kSmiTagMask));
- __ negq(masked_root);
- __ andq(masked_root, kRootRegister);
- // Now this add operation will either leave the value unchanged if it is a
- // smi or add the isolate root if it is a heap object.
- __ addq(i.OutputRegister(), masked_root);
- break;
- }
- // TODO(solanes): Combine into one Compress? They seem to be identical.
- // TODO(solanes): We might get away with doing a no-op in these three cases.
- // The movl instruction is the conservative way for the moment.
- case kX64CompressSigned: {
- ASSEMBLE_MOVX(movl);
- break;
- }
- case kX64CompressPointer: {
- ASSEMBLE_MOVX(movl);
+ ASSEMBLE_MOVX(DecompressAnyTagged);
break;
}
+ case kX64CompressSigned: // Fall through.
+ case kX64CompressPointer: // Fall through.
case kX64CompressAny: {
ASSEMBLE_MOVX(movl);
break;
@@ -2184,14 +2177,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else if (instr->InputAt(0)->IsFloatRegister() ||
instr->InputAt(0)->IsDoubleRegister()) {
// TODO(titzer): use another machine instruction?
- __ subq(rsp, Immediate(kDoubleSize));
+ __ AllocateStackSpace(kDoubleSize);
frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
} else if (instr->InputAt(0)->IsSimd128Register()) {
// TODO(titzer): use another machine instruction?
- __ subq(rsp, Immediate(kSimd128Size));
+ __ AllocateStackSpace(kSimd128Size);
frame_access_state()->IncreaseSPDelta(kSimd128Size /
kSystemPointerSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
@@ -2208,7 +2201,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK(instr->InputAt(0)->IsSimd128StackSlot());
__ Movups(kScratchDoubleReg, i.InputOperand(0));
// TODO(titzer): use another machine instruction?
- __ subq(rsp, Immediate(kSimd128Size));
+ __ AllocateStackSpace(kSimd128Size);
frame_access_state()->IncreaseSPDelta(kSimd128Size /
kSystemPointerSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
@@ -3750,7 +3743,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ pushq(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3762,6 +3756,10 @@ void CodeGenerator::AssembleConstructFrame() {
kWasmInstanceRegister,
FieldOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ pushq(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ }
}
}
@@ -3813,8 +3811,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ near_call(wasm::WasmCode::kWasmStackOverflow,
RelocInfo::WASM_STUB_CALL);
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
- RecordSafepoint(reference_map, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
@@ -3825,7 +3822,7 @@ void CodeGenerator::AssembleConstructFrame() {
(kQuadWordSize / kSystemPointerSize);
required_slots -= frame()->GetReturnSlotCount();
if (required_slots > 0) {
- __ subq(rsp, Immediate(required_slots * kSystemPointerSize));
+ __ AllocateStackSpace(required_slots * kSystemPointerSize);
}
}
@@ -3833,7 +3830,7 @@ void CodeGenerator::AssembleConstructFrame() {
const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
const int stack_size = saves_fp_count * kQuadWordSize;
// Adjust the stack pointer.
- __ subq(rsp, Immediate(stack_size));
+ __ AllocateStackSpace(stack_size);
// Store the registers on the stack.
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
@@ -3853,7 +3850,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Allocate return slots (located after callee-saved).
if (frame()->GetReturnSlotCount() > 0) {
- __ subq(rsp, Immediate(frame()->GetReturnSlotCount() * kSystemPointerSize));
+ __ AllocateStackSpace(frame()->GetReturnSlotCount() * kSystemPointerSize);
}
}
@@ -4194,7 +4191,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
default:
UNREACHABLE();
- break;
}
}
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index a3cb90f42a..a20590b8d3 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -9,7 +9,7 @@
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
@@ -240,33 +240,18 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
case MachineRepresentation::kWord32:
opcode = kX64Movl;
break;
-#ifdef V8_COMPRESS_POINTERS
- case MachineRepresentation::kTaggedSigned:
- opcode = kX64MovqDecompressTaggedSigned;
- break;
- case MachineRepresentation::kTaggedPointer:
- opcode = kX64MovqDecompressTaggedPointer;
- break;
- case MachineRepresentation::kTagged:
- opcode = kX64MovqDecompressAnyTagged;
- break;
case MachineRepresentation::kCompressedSigned: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
+#ifdef V8_COMPRESS_POINTERS
opcode = kX64Movl;
break;
#else
- case MachineRepresentation::kCompressedSigned: // Fall through.
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed:
UNREACHABLE();
- break;
+#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged:
- opcode = kX64Movq;
- break;
-#endif
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
@@ -275,7 +260,6 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
break;
case MachineRepresentation::kNone:
UNREACHABLE();
- break;
}
return opcode;
}
@@ -284,46 +268,30 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
switch (store_rep.representation()) {
case MachineRepresentation::kFloat32:
return kX64Movss;
- break;
case MachineRepresentation::kFloat64:
return kX64Movsd;
- break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
return kX64Movb;
- break;
case MachineRepresentation::kWord16:
return kX64Movw;
- break;
case MachineRepresentation::kWord32:
return kX64Movl;
- break;
-#ifdef V8_COMPRESS_POINTERS
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged:
- return kX64MovqCompressTagged;
case MachineRepresentation::kCompressedSigned: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
- return kX64Movl;
+#ifdef V8_COMPRESS_POINTERS
+ return kX64MovqCompressTagged;
#else
- case MachineRepresentation::kCompressedSigned: // Fall through.
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed:
UNREACHABLE();
+#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged:
- return kX64Movq;
- break;
-#endif
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64:
return kX64Movq;
- break;
case MachineRepresentation::kSimd128: // Fall through.
return kX64Movdqu;
- break;
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -380,7 +348,7 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK(CanBeTaggedPointer(store_rep.representation()));
+ DCHECK(CanBeTaggedOrCompressedPointer(store_rep.representation()));
AddressingMode addressing_mode;
InstructionOperand inputs[] = {
g.UseUniqueRegister(base),
@@ -2405,7 +2373,6 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
@@ -2428,7 +2395,6 @@ void InstructionSelector::VisitWord64AtomicStore(Node* node) {
break;
default:
UNREACHABLE();
- return;
}
VisitAtomicExchange(this, node, opcode);
}
@@ -3029,7 +2995,10 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
// pshufd takes a single imm8 shuffle mask.
opcode = kX64S32x4Swizzle;
no_same_as_first = true;
- src0_needs_reg = false;
+ // TODO(v8:9083): This doesn't strictly require a register, forcing the
+ // swizzles to always use registers until generation of incorrect memory
+ // operands can be fixed.
+ src0_needs_reg = true;
imms[imm_count++] = shuffle_mask;
}
} else {
diff --git a/deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.h b/deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.h
index f460cbca99..c85ad46a63 100644
--- a/deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.h
+++ b/deps/v8/src/compiler/backend/x64/unwinding-info-writer-x64.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_BACKEND_X64_UNWINDING_INFO_WRITER_X64_H_
#define V8_COMPILER_BACKEND_X64_UNWINDING_INFO_WRITER_X64_H_
-#include "src/eh-frame.h"
-#include "src/flags.h"
+#include "src/diagnostics/eh-frame.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 803150fdfe..c2548b7726 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -6,14 +6,14 @@
#include <sstream>
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/schedule.h"
-#include "src/objects-inl.h"
-#include "src/optimized-compilation-info.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.h b/deps/v8/src/compiler/basic-block-instrumentor.h
index 620f38d535..c8bc94c16b 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.h
+++ b/deps/v8/src/compiler/basic-block-instrumentor.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
#define V8_COMPILER_BASIC_BLOCK_INSTRUMENTOR_H_
-#include "src/allocation.h"
-#include "src/basic-block-profiler.h"
+#include "src/diagnostics/basic-block-profiler.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 23881ebd0a..2730da9c75 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -6,10 +6,10 @@
#define V8_COMPILER_BRANCH_ELIMINATION_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/functional-list.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/node-aux-data.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 7da43cc375..9c23cd460a 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -6,8 +6,8 @@
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-array-random-iterator.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -538,6 +538,12 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
}
}
+ DCHECK(do_liveness_analysis_);
+ if (FLAG_trace_environment_liveness) {
+ StdoutStream of;
+ PrintLivenessTo(of);
+ }
+
DCHECK(LivenessIsValid());
}
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index bc788943d7..53f86ca306 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -6,11 +6,11 @@
#define V8_COMPILER_BYTECODE_ANALYSIS_H_
#include "src/base/hashmap.h"
-#include "src/bit-vector.h"
#include "src/compiler/bytecode-liveness-map.h"
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/interpreter/bytecode-register.h"
-#include "src/utils.h"
+#include "src/utils/bit-vector.h"
+#include "src/utils/utils.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -35,8 +35,8 @@ class V8_EXPORT_PRIVATE BytecodeLoopAssignments {
int local_count() const { return bit_vector_->length() - parameter_count_; }
private:
- int parameter_count_;
- BitVector* bit_vector_;
+ int const parameter_count_;
+ BitVector* const bit_vector_;
};
// Jump targets for resuming a suspended generator.
@@ -129,8 +129,6 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis {
// Gets the out-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetOutLivenessFor(int offset) const;
- std::ostream& PrintLivenessTo(std::ostream& os) const;
-
private:
struct LoopStackEntry {
int header_offset;
@@ -152,10 +150,11 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis {
Zone* zone() const { return zone_; }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
- private:
- Handle<BytecodeArray> bytecode_array_;
- bool do_liveness_analysis_;
- Zone* zone_;
+ std::ostream& PrintLivenessTo(std::ostream& os) const;
+
+ Handle<BytecodeArray> const bytecode_array_;
+ bool const do_liveness_analysis_;
+ Zone* const zone_;
ZoneStack<LoopStackEntry> loop_stack_;
ZoneVector<int> loop_end_index_queue_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 086003276f..0ab8f85670 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -5,26 +5,448 @@
#include "src/compiler/bytecode-graph-builder.h"
#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
+#include "src/codegen/source-position-table.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/compiler/state-values-utils.h"
+#include "src/compiler/vector-slot-pair.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecodes.h"
-#include "src/objects-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/objects/template-objects-inl.h"
-#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
namespace compiler {
+class BytecodeGraphBuilder {
+ public:
+ BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone,
+ Handle<BytecodeArray> bytecode_array,
+ Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector,
+ BailoutId osr_offset, JSGraph* jsgraph,
+ CallFrequency const& invocation_frequency,
+ SourcePositionTable* source_positions,
+ Handle<Context> native_context, int inlining_id,
+ BytecodeGraphBuilderFlags flags);
+
+ // Creates a graph by visiting bytecodes.
+ void CreateGraph();
+
+ private:
+ class Environment;
+ class OsrIteratorState;
+ struct SubEnvironment;
+
+ void RemoveMergeEnvironmentsBeforeOffset(int limit_offset);
+ void AdvanceToOsrEntryAndPeelLoops();
+
+ // Advance {bytecode_iterator} to the given offset. If possible, also advance
+ // {source_position_iterator} while updating the source position table.
+ void AdvanceIteratorsTo(int bytecode_offset);
+
+ void VisitSingleBytecode();
+ void VisitBytecodes();
+
+ // Get or create the node that represents the outer function closure.
+ Node* GetFunctionClosure();
+
+ // Builder for loading the a native context field.
+ Node* BuildLoadNativeContextField(int index);
+
+ // Helper function for creating a pair containing type feedback vector and
+ // a feedback slot.
+ VectorSlotPair CreateVectorSlotPair(int slot_id);
+
+ void set_environment(Environment* env) { environment_ = env; }
+ const Environment* environment() const { return environment_; }
+ Environment* environment() { return environment_; }
+
+ // Node creation helpers
+ Node* NewNode(const Operator* op, bool incomplete = false) {
+ return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1) {
+ Node* buffer[] = {n1};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+ Node* buffer[] = {n1, n2};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+ Node* buffer[] = {n1, n2, n3};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+ Node* buffer[] = {n1, n2, n3, n4};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+ Node* n5, Node* n6) {
+ Node* buffer[] = {n1, n2, n3, n4, n5, n6};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ // Helpers to create new control nodes.
+ Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+ Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewIfValue(int32_t value) { return NewNode(common()->IfValue(value)); }
+ Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
+ Node* NewMerge() { return NewNode(common()->Merge(1), true); }
+ Node* NewLoop() { return NewNode(common()->Loop(1), true); }
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone,
+ IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) {
+ return NewNode(common()->Branch(hint, is_safety_check), condition);
+ }
+ Node* NewSwitch(Node* condition, int control_output_count) {
+ return NewNode(common()->Switch(control_output_count), condition);
+ }
+
+ // Creates a new Phi node having {count} input values.
+ Node* NewPhi(int count, Node* input, Node* control);
+ Node* NewEffectPhi(int count, Node* input, Node* control);
+
+ // Helpers for merging control, effect or value dependencies.
+ Node* MergeControl(Node* control, Node* other);
+ Node* MergeEffect(Node* effect, Node* other_effect, Node* control);
+ Node* MergeValue(Node* value, Node* other_value, Node* control);
+
+ // The main node creation chokepoint. Adds context, frame state, effect,
+ // and control dependencies depending on the operator.
+ Node* MakeNode(const Operator* op, int value_input_count,
+ Node* const* value_inputs, bool incomplete);
+
+ Node** EnsureInputBufferSize(int size);
+
+ Node* const* GetCallArgumentsFromRegisters(Node* callee, Node* receiver,
+ interpreter::Register first_arg,
+ int arg_count);
+ Node* const* ProcessCallVarArgs(ConvertReceiverMode receiver_mode,
+ Node* callee, interpreter::Register first_reg,
+ int arg_count);
+ Node* ProcessCallArguments(const Operator* call_op, Node* const* args,
+ int arg_count);
+ Node* ProcessCallArguments(const Operator* call_op, Node* callee,
+ interpreter::Register receiver, size_t reg_count);
+ Node* const* GetConstructArgumentsFromRegister(
+ Node* target, Node* new_target, interpreter::Register first_arg,
+ int arg_count);
+ Node* ProcessConstructArguments(const Operator* op, Node* const* args,
+ int arg_count);
+ Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
+ interpreter::Register receiver,
+ size_t reg_count);
+
+ // Prepare information for eager deoptimization. This information is carried
+ // by dedicated {Checkpoint} nodes that are wired into the effect chain.
+ // Conceptually this frame state is "before" a given operation.
+ void PrepareEagerCheckpoint();
+
+ // Prepare information for lazy deoptimization. This information is attached
+ // to the given node and the output value produced by the node is combined.
+ // Conceptually this frame state is "after" a given operation.
+ void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
+
+ void BuildCreateArguments(CreateArgumentsType type);
+ Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
+ TypeofMode typeof_mode);
+
+ enum class StoreMode {
+ // Check the prototype chain before storing.
+ kNormal,
+ // Store value to the receiver without checking the prototype chain.
+ kOwn,
+ };
+ void BuildNamedStore(StoreMode store_mode);
+ void BuildLdaLookupSlot(TypeofMode typeof_mode);
+ void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
+ void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
+ void BuildCallVarArgs(ConvertReceiverMode receiver_mode);
+ void BuildCall(ConvertReceiverMode receiver_mode, Node* const* args,
+ size_t arg_count, int slot_id);
+ void BuildCall(ConvertReceiverMode receiver_mode,
+ std::initializer_list<Node*> args, int slot_id) {
+ BuildCall(receiver_mode, args.begin(), args.size(), slot_id);
+ }
+ void BuildUnaryOp(const Operator* op);
+ void BuildBinaryOp(const Operator* op);
+ void BuildBinaryOpWithImmediate(const Operator* op);
+ void BuildCompareOp(const Operator* op);
+ void BuildDelete(LanguageMode language_mode);
+ void BuildCastOperator(const Operator* op);
+ void BuildHoleCheckAndThrow(Node* condition, Runtime::FunctionId runtime_id,
+ Node* name = nullptr);
+
+ // Optional early lowering to the simplified operator level. Note that
+ // the result has already been wired into the environment just like
+ // any other invocation of {NewNode} would do.
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedUnaryOp(
+ const Operator* op, Node* operand, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedBinaryOp(
+ const Operator* op, Node* left, Node* right, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedForInNext(
+ Node* receiver, Node* cache_array, Node* cache_type, Node* index,
+ FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedForInPrepare(
+ Node* receiver, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedToNumber(
+ Node* input, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedCall(const Operator* op,
+ Node* const* args,
+ int arg_count,
+ FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedConstruct(
+ const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadNamed(
+ const Operator* op, Node* receiver, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadKeyed(
+ const Operator* op, Node* receiver, Node* key, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedStoreNamed(
+ const Operator* op, Node* receiver, Node* value, FeedbackSlot slot);
+ JSTypeHintLowering::LoweringResult TryBuildSimplifiedStoreKeyed(
+ const Operator* op, Node* receiver, Node* key, Node* value,
+ FeedbackSlot slot);
+
+ // Applies the given early reduction onto the current environment.
+ void ApplyEarlyReduction(JSTypeHintLowering::LoweringResult reduction);
+
+ // Check the context chain for extensions, for lookup fast paths.
+ Environment* CheckContextExtensions(uint32_t depth);
+
+ // Helper function to create binary operation hint from the recorded
+ // type feedback.
+ BinaryOperationHint GetBinaryOperationHint(int operand_index);
+
+ // Helper function to create compare operation hint from the recorded
+ // type feedback.
+ CompareOperationHint GetCompareOperationHint();
+
+ // Helper function to create for-in mode from the recorded type feedback.
+ ForInMode GetForInMode(int operand_index);
+
+ // Helper function to compute call frequency from the recorded type
+ // feedback.
+ CallFrequency ComputeCallFrequency(int slot_id) const;
+
+ // Helper function to extract the speculation mode from the recorded type
+ // feedback.
+ SpeculationMode GetSpeculationMode(int slot_id) const;
+
+ // Control flow plumbing.
+ void BuildJump();
+ void BuildJumpIf(Node* condition);
+ void BuildJumpIfNot(Node* condition);
+ void BuildJumpIfEqual(Node* comperand);
+ void BuildJumpIfNotEqual(Node* comperand);
+ void BuildJumpIfTrue();
+ void BuildJumpIfFalse();
+ void BuildJumpIfToBooleanTrue();
+ void BuildJumpIfToBooleanFalse();
+ void BuildJumpIfNotHole();
+ void BuildJumpIfJSReceiver();
+
+ void BuildSwitchOnSmi(Node* condition);
+ void BuildSwitchOnGeneratorState(
+ const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
+ bool allow_fallthrough_on_executing);
+
+ // Simulates control flow by forward-propagating environments.
+ void MergeIntoSuccessorEnvironment(int target_offset);
+ void BuildLoopHeaderEnvironment(int current_offset);
+ void SwitchToMergeEnvironment(int current_offset);
+
+ // Simulates control flow that exits the function body.
+ void MergeControlToLeaveFunction(Node* exit);
+
+ // Builds loop exit nodes for every exited loop between the current bytecode
+ // offset and {target_offset}.
+ void BuildLoopExitsForBranch(int target_offset);
+ void BuildLoopExitsForFunctionExit(const BytecodeLivenessState* liveness);
+ void BuildLoopExitsUntilLoop(int loop_offset,
+ const BytecodeLivenessState* liveness);
+
+ // Helper for building a return (from an actual return or a suspend).
+ void BuildReturn(const BytecodeLivenessState* liveness);
+
+ // Simulates entry and exit of exception handlers.
+ void ExitThenEnterExceptionHandlers(int current_offset);
+
+ // Update the current position of the {SourcePositionTable} to that of the
+ // bytecode at {offset}, if any.
+ void UpdateSourcePosition(int offset);
+
+ // Growth increment for the temporary buffer used to construct input lists to
+ // new nodes.
+ static const int kInputBufferSizeIncrement = 64;
+
+ // An abstract representation for an exception handler that is being
+ // entered and exited while the graph builder is iterating over the
+ // underlying bytecode. The exception handlers within the bytecode are
+ // well scoped, hence will form a stack during iteration.
+ struct ExceptionHandler {
+ int start_offset_; // Start offset of the handled area in the bytecode.
+ int end_offset_; // End offset of the handled area in the bytecode.
+ int handler_offset_; // Handler entry offset within the bytecode.
+ int context_register_; // Index of register holding handler context.
+ };
+
+ // Field accessors
+ Graph* graph() const { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+ Zone* graph_zone() const { return graph()->zone(); }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const { return jsgraph_->isolate(); }
+ JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
+ SimplifiedOperatorBuilder* simplified() const {
+ return jsgraph_->simplified();
+ }
+ Zone* local_zone() const { return local_zone_; }
+ const Handle<BytecodeArray>& bytecode_array() const {
+ return bytecode_array_;
+ }
+ const Handle<FeedbackVector>& feedback_vector() const {
+ return feedback_vector_;
+ }
+ const JSTypeHintLowering& type_hint_lowering() const {
+ return type_hint_lowering_;
+ }
+ const FrameStateFunctionInfo* frame_state_function_info() const {
+ return frame_state_function_info_;
+ }
+
+ SourcePositionTableIterator& source_position_iterator() {
+ return source_position_iterator_;
+ }
+
+ interpreter::BytecodeArrayIterator& bytecode_iterator() {
+ return bytecode_iterator_;
+ }
+
+ BytecodeAnalysis const& bytecode_analysis() const {
+ return bytecode_analysis_;
+ }
+
+ void RunBytecodeAnalysis() { bytecode_analysis_.Analyze(osr_offset_); }
+
+ int currently_peeled_loop_offset() const {
+ return currently_peeled_loop_offset_;
+ }
+
+ void set_currently_peeled_loop_offset(int offset) {
+ currently_peeled_loop_offset_ = offset;
+ }
+
+ bool skip_next_stack_check() const { return skip_next_stack_check_; }
+
+ void unset_skip_next_stack_check() { skip_next_stack_check_ = false; }
+
+ int current_exception_handler() { return current_exception_handler_; }
+
+ void set_current_exception_handler(int index) {
+ current_exception_handler_ = index;
+ }
+
+ bool needs_eager_checkpoint() const { return needs_eager_checkpoint_; }
+ void mark_as_needing_eager_checkpoint(bool value) {
+ needs_eager_checkpoint_ = value;
+ }
+
+ Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+
+ Handle<Context> native_context() const { return native_context_; }
+
+ JSHeapBroker* broker() const { return broker_; }
+
+#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
+ BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
+#undef DECLARE_VISIT_BYTECODE
+
+ JSHeapBroker* const broker_;
+ Zone* const local_zone_;
+ JSGraph* const jsgraph_;
+ CallFrequency const invocation_frequency_;
+ Handle<BytecodeArray> const bytecode_array_;
+ Handle<FeedbackVector> const feedback_vector_;
+ JSTypeHintLowering const type_hint_lowering_;
+ const FrameStateFunctionInfo* const frame_state_function_info_;
+ SourcePositionTableIterator source_position_iterator_;
+ interpreter::BytecodeArrayIterator bytecode_iterator_;
+ BytecodeAnalysis bytecode_analysis_;
+ Environment* environment_;
+ BailoutId const osr_offset_;
+ int currently_peeled_loop_offset_;
+ bool skip_next_stack_check_;
+
+ // Merge environments are snapshots of the environment at points where the
+ // control flow merges. This models a forward data flow propagation of all
+ // values from all predecessors of the merge in question. They are indexed by
+ // the bytecode offset
+ ZoneMap<int, Environment*> merge_environments_;
+
+ // Generator merge environments are snapshots of the current resume
+ // environment, tracing back through loop headers to the resume switch of a
+ // generator. They allow us to model a single resume jump as several switch
+ // statements across loop headers, keeping those loop headers reducible,
+ // without having to merge the "executing" environments of the generator into
+ // the "resuming" ones. They are indexed by the suspend id of the resume.
+ ZoneMap<int, Environment*> generator_merge_environments_;
+
+ // Exception handlers currently entered by the iteration.
+ ZoneStack<ExceptionHandler> exception_handlers_;
+ int current_exception_handler_;
+
+ // Temporary storage for building node input lists.
+ int input_buffer_size_;
+ Node** input_buffer_;
+
+ // Optimization to only create checkpoints when the current position in the
+ // control-flow is not effect-dominated by another checkpoint already. All
+ // operations that do not have observable side-effects can be re-evaluated.
+ bool needs_eager_checkpoint_;
+
+ // Nodes representing values in the activation record.
+ SetOncePointer<Node> function_closure_;
+
+ // Control nodes that exit the function body.
+ ZoneVector<Node*> exit_controls_;
+
+ StateValuesCache state_values_cache_;
+
+ // The source position table, to be populated.
+ SourcePositionTable* const source_positions_;
+
+ SourcePosition const start_position_;
+
+ Handle<SharedFunctionInfo> const shared_info_;
+
+ // The native context for which we optimize.
+ Handle<Context> const native_context_;
+
+ static int const kBinaryOperationHintIndex = 1;
+ static int const kCountOperationHintIndex = 0;
+ static int const kBinaryOperationSmiHintIndex = 1;
+ static int const kUnaryOperationHintIndex = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
+};
+
// The abstract execution environment simulates the content of the interpreter
// register file. The environment performs SSA-renaming of all tracked nodes at
// split and merge points in the control flow.
@@ -516,30 +938,39 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
}
BytecodeGraphBuilder::BytecodeGraphBuilder(
- Zone* local_zone, Handle<BytecodeArray> bytecode_array,
+ JSHeapBroker* broker, Zone* local_zone,
+ Handle<BytecodeArray> bytecode_array,
Handle<SharedFunctionInfo> shared_info,
Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
- JSGraph* jsgraph, CallFrequency& invocation_frequency,
+ JSGraph* jsgraph, CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, Handle<Context> native_context,
- int inlining_id, JSTypeHintLowering::Flags flags, bool stack_check,
- bool analyze_environment_liveness)
- : local_zone_(local_zone),
+ int inlining_id, BytecodeGraphBuilderFlags flags)
+ : broker_(broker),
+ local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
bytecode_array_(bytecode_array),
feedback_vector_(feedback_vector),
- type_hint_lowering_(jsgraph, feedback_vector, flags),
+ type_hint_lowering_(
+ jsgraph, feedback_vector,
+ (flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized)
+ ? JSTypeHintLowering::kBailoutOnUninitialized
+ : JSTypeHintLowering::kNoFlags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
bytecode_array->parameter_count(), bytecode_array->register_count(),
shared_info)),
- bytecode_iterator_(nullptr),
- bytecode_analysis_(nullptr),
+ source_position_iterator_(
+ handle(bytecode_array->SourcePositionTableIfCollected(), isolate())),
+ bytecode_iterator_(bytecode_array),
+ bytecode_analysis_(
+ bytecode_array, local_zone,
+ flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness),
environment_(nullptr),
osr_offset_(osr_offset),
currently_peeled_loop_offset_(-1),
- stack_check_(stack_check),
- analyze_environment_liveness_(analyze_environment_liveness),
+ skip_next_stack_check_(flags &
+ BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
merge_environments_(local_zone),
generator_merge_environments_(local_zone),
exception_handlers_(local_zone),
@@ -565,11 +996,9 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() {
}
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
- const Operator* op =
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
- Node* native_context = NewNode(op);
Node* result = NewNode(javascript()->LoadContext(0, index, true));
- NodeProperties::ReplaceContextInput(result, native_context);
+ NodeProperties::ReplaceContextInput(
+ result, jsgraph()->HeapConstant(native_context()));
return result;
}
@@ -580,18 +1009,21 @@ VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
}
void BytecodeGraphBuilder::CreateGraph() {
+ BytecodeArrayRef bytecode_array_ref(broker(), bytecode_array());
+
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
// Set up the basic structure of the graph. Outputs for {Start} are the formal
// parameters (including the receiver) plus new target, number of arguments,
// context and closure.
- int actual_parameter_count = bytecode_array()->parameter_count() + 4;
+ int actual_parameter_count = bytecode_array_ref.parameter_count() + 4;
graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
- Environment env(this, bytecode_array()->register_count(),
- bytecode_array()->parameter_count(),
- bytecode_array()->incoming_new_target_or_generator_register(),
- graph()->start());
+ Environment env(
+ this, bytecode_array_ref.register_count(),
+ bytecode_array_ref.parameter_count(),
+ bytecode_array_ref.incoming_new_target_or_generator_register(),
+ graph()->start());
set_environment(&env);
VisitBytecodes();
@@ -616,7 +1048,7 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
BailoutId bailout_id(bytecode_iterator().current_offset());
const BytecodeLivenessState* liveness_before =
- bytecode_analysis()->GetInLivenessFor(
+ bytecode_analysis().GetInLivenessFor(
bytecode_iterator().current_offset());
Node* frame_state_before = environment()->Checkpoint(
@@ -650,7 +1082,7 @@ void BytecodeGraphBuilder::PrepareFrameState(Node* node,
BailoutId bailout_id(bytecode_iterator().current_offset());
const BytecodeLivenessState* liveness_after =
- bytecode_analysis()->GetOutLivenessFor(
+ bytecode_analysis().GetOutLivenessFor(
bytecode_iterator().current_offset());
Node* frame_state_after =
@@ -659,6 +1091,13 @@ void BytecodeGraphBuilder::PrepareFrameState(Node* node,
}
}
+void BytecodeGraphBuilder::AdvanceIteratorsTo(int bytecode_offset) {
+ for (; bytecode_iterator().current_offset() != bytecode_offset;
+ bytecode_iterator().Advance()) {
+ UpdateSourcePosition(bytecode_iterator().current_offset());
+ }
+}
+
// Stores the state of the SourcePosition iterator, and the index to the
// current exception handlers stack. We need, during the OSR graph generation,
// to backup the states of these iterators at the LoopHeader offset of each
@@ -667,19 +1106,14 @@ void BytecodeGraphBuilder::PrepareFrameState(Node* node,
// the source position can be achieved.
class BytecodeGraphBuilder::OsrIteratorState {
public:
- OsrIteratorState(interpreter::BytecodeArrayIterator* iterator,
- SourcePositionTableIterator* source_position_iterator,
- BytecodeGraphBuilder* graph_builder)
- : iterator_(iterator),
- source_position_iterator_(source_position_iterator),
- graph_builder_(graph_builder),
+ explicit OsrIteratorState(BytecodeGraphBuilder* graph_builder)
+ : graph_builder_(graph_builder),
saved_states_(graph_builder->local_zone()) {}
void ProcessOsrPrelude() {
ZoneVector<int> outer_loop_offsets(graph_builder_->local_zone());
-
- const BytecodeAnalysis& bytecode_analysis =
- *(graph_builder_->bytecode_analysis());
+ BytecodeAnalysis const& bytecode_analysis =
+ graph_builder_->bytecode_analysis();
int osr_offset = bytecode_analysis.osr_entry_point();
// We find here the outermost loop which contains the OSR loop.
@@ -691,15 +1125,7 @@ class BytecodeGraphBuilder::OsrIteratorState {
}
outermost_loop_offset =
outer_loop_offsets.empty() ? osr_offset : outer_loop_offsets.back();
-
- // We will not processs any bytecode before the outermost_loop_offset, but
- // the source_position_iterator needs to be advanced step by step through
- // the bytecode.
- for (; iterator_->current_offset() != outermost_loop_offset;
- iterator_->Advance()) {
- graph_builder_->UpdateSourcePosition(source_position_iterator_,
- iterator_->current_offset());
- }
+ graph_builder_->AdvanceIteratorsTo(outermost_loop_offset);
// We save some iterators states at the offsets of the loop headers of the
// outer loops (the ones containing the OSR loop). They will be used for
@@ -707,24 +1133,16 @@ class BytecodeGraphBuilder::OsrIteratorState {
for (ZoneVector<int>::const_reverse_iterator it =
outer_loop_offsets.crbegin();
it != outer_loop_offsets.crend(); ++it) {
- int next_loop_offset = *it;
- for (; iterator_->current_offset() != next_loop_offset;
- iterator_->Advance()) {
- graph_builder_->UpdateSourcePosition(source_position_iterator_,
- iterator_->current_offset());
- }
+ graph_builder_->AdvanceIteratorsTo(*it);
graph_builder_->ExitThenEnterExceptionHandlers(
- iterator_->current_offset());
- saved_states_.push(
- IteratorsStates(graph_builder_->current_exception_handler(),
- source_position_iterator_->GetState()));
+ graph_builder_->bytecode_iterator().current_offset());
+ saved_states_.push(IteratorsStates(
+ graph_builder_->current_exception_handler(),
+ graph_builder_->source_position_iterator().GetState()));
}
// Finishing by advancing to the OSR entry
- for (; iterator_->current_offset() != osr_offset; iterator_->Advance()) {
- graph_builder_->UpdateSourcePosition(source_position_iterator_,
- iterator_->current_offset());
- }
+ graph_builder_->AdvanceIteratorsTo(osr_offset);
// Enters all remaining exception handler which end before the OSR loop
// so that on next call of VisitSingleBytecode they will get popped from
@@ -735,12 +1153,13 @@ class BytecodeGraphBuilder::OsrIteratorState {
}
void RestoreState(int target_offset, int new_parent_offset) {
- iterator_->SetOffset(target_offset);
+ graph_builder_->bytecode_iterator().SetOffset(target_offset);
// In case of a return, we must not build loop exits for
// not-yet-built outer loops.
graph_builder_->set_currently_peeled_loop_offset(new_parent_offset);
IteratorsStates saved_state = saved_states_.top();
- source_position_iterator_->RestoreState(saved_state.source_iterator_state_);
+ graph_builder_->source_position_iterator().RestoreState(
+ saved_state.source_iterator_state_);
graph_builder_->set_current_exception_handler(
saved_state.exception_handler_index_);
saved_states_.pop();
@@ -758,8 +1177,6 @@ class BytecodeGraphBuilder::OsrIteratorState {
source_iterator_state_(source_iterator_state) {}
};
- interpreter::BytecodeArrayIterator* iterator_;
- SourcePositionTableIterator* source_position_iterator_;
BytecodeGraphBuilder* graph_builder_;
ZoneStack<IteratorsStates> saved_states_;
};
@@ -778,15 +1195,11 @@ void BytecodeGraphBuilder::RemoveMergeEnvironmentsBeforeOffset(
// We will iterate through the OSR loop, then its parent, and so on
// until we have reached the outmost loop containing the OSR loop. We do
// not generate nodes for anything before the outermost loop.
-void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops(
- interpreter::BytecodeArrayIterator* iterator,
- SourcePositionTableIterator* source_position_iterator) {
- const BytecodeAnalysis& analysis = *(bytecode_analysis());
- int osr_offset = analysis.osr_entry_point();
- OsrIteratorState iterator_states(iterator, source_position_iterator, this);
-
+void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
+ OsrIteratorState iterator_states(this);
iterator_states.ProcessOsrPrelude();
- DCHECK_EQ(iterator->current_offset(), osr_offset);
+ int osr_offset = bytecode_analysis().osr_entry_point();
+ DCHECK_EQ(bytecode_iterator().current_offset(), osr_offset);
environment()->FillWithOsrValues();
@@ -804,27 +1217,29 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops(
// parent loop entirely, and so on.
int current_parent_offset =
- analysis.GetLoopInfoFor(osr_offset).parent_offset();
+ bytecode_analysis().GetLoopInfoFor(osr_offset).parent_offset();
while (current_parent_offset != -1) {
const LoopInfo& current_parent_loop =
- analysis.GetLoopInfoFor(current_parent_offset);
+ bytecode_analysis().GetLoopInfoFor(current_parent_offset);
// We iterate until the back edge of the parent loop, which we detect by
// the offset that the JumpLoop targets.
- for (; !iterator->done(); iterator->Advance()) {
- if (iterator->current_bytecode() == interpreter::Bytecode::kJumpLoop &&
- iterator->GetJumpTargetOffset() == current_parent_offset) {
+ for (; !bytecode_iterator().done(); bytecode_iterator().Advance()) {
+ if (bytecode_iterator().current_bytecode() ==
+ interpreter::Bytecode::kJumpLoop &&
+ bytecode_iterator().GetJumpTargetOffset() == current_parent_offset) {
// Reached the end of the current parent loop.
break;
}
- VisitSingleBytecode(source_position_iterator);
+ VisitSingleBytecode();
}
- DCHECK(!iterator->done()); // Should have found the loop's jump target.
+ DCHECK(!bytecode_iterator()
+ .done()); // Should have found the loop's jump target.
// We also need to take care of the merge environments and exceptions
// handlers here because the omitted JumpLoop bytecode can still be the
// target of jumps or the first bytecode after a try block.
- ExitThenEnterExceptionHandlers(iterator->current_offset());
- SwitchToMergeEnvironment(iterator->current_offset());
+ ExitThenEnterExceptionHandlers(bytecode_iterator().current_offset());
+ SwitchToMergeEnvironment(bytecode_iterator().current_offset());
// This jump is the jump of our parent loop, which is not yet created.
// So we do not build the jump nodes, but restore the bytecode and the
@@ -838,18 +1253,16 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops(
// Completely clearing the environment is not possible because merge
// environments for forward jumps out of the loop need to be preserved
// (e.g. a return or a labeled break in the middle of a loop).
- RemoveMergeEnvironmentsBeforeOffset(iterator->current_offset());
+ RemoveMergeEnvironmentsBeforeOffset(bytecode_iterator().current_offset());
iterator_states.RestoreState(current_parent_offset,
current_parent_loop.parent_offset());
current_parent_offset = current_parent_loop.parent_offset();
}
}
-void BytecodeGraphBuilder::VisitSingleBytecode(
- SourcePositionTableIterator* source_position_iterator) {
- const interpreter::BytecodeArrayIterator& iterator = bytecode_iterator();
- int current_offset = iterator.current_offset();
- UpdateSourcePosition(source_position_iterator, current_offset);
+void BytecodeGraphBuilder::VisitSingleBytecode() {
+ int current_offset = bytecode_iterator().current_offset();
+ UpdateSourcePosition(current_offset);
ExitThenEnterExceptionHandlers(current_offset);
DCHECK_GE(exception_handlers_.empty() ? current_offset
: exception_handlers_.top().end_offset_,
@@ -858,15 +1271,13 @@ void BytecodeGraphBuilder::VisitSingleBytecode(
if (environment() != nullptr) {
BuildLoopHeaderEnvironment(current_offset);
-
- // Skip the first stack check if stack_check is false
- if (!stack_check() &&
- iterator.current_bytecode() == interpreter::Bytecode::kStackCheck) {
- set_stack_check(true);
+ if (skip_next_stack_check() && bytecode_iterator().current_bytecode() ==
+ interpreter::Bytecode::kStackCheck) {
+ unset_skip_next_stack_check();
return;
}
- switch (iterator.current_bytecode()) {
+ switch (bytecode_iterator().current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
Visit##name(); \
@@ -878,41 +1289,28 @@ void BytecodeGraphBuilder::VisitSingleBytecode(
}
void BytecodeGraphBuilder::VisitBytecodes() {
- BytecodeAnalysis bytecode_analysis(bytecode_array(), local_zone(),
- analyze_environment_liveness());
- bytecode_analysis.Analyze(osr_offset_);
- set_bytecode_analysis(&bytecode_analysis);
+ RunBytecodeAnalysis();
- interpreter::BytecodeArrayIterator iterator(bytecode_array());
- set_bytecode_iterator(&iterator);
- SourcePositionTableIterator source_position_iterator(
- handle(bytecode_array()->SourcePositionTableIfCollected(), isolate()));
-
- if (analyze_environment_liveness() && FLAG_trace_environment_liveness) {
- StdoutStream of;
- bytecode_analysis.PrintLivenessTo(of);
- }
-
- if (!bytecode_analysis.resume_jump_targets().empty()) {
+ if (!bytecode_analysis().resume_jump_targets().empty()) {
environment()->BindGeneratorState(
jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
}
- if (bytecode_analysis.HasOsrEntryPoint()) {
+ if (bytecode_analysis().HasOsrEntryPoint()) {
// We peel the OSR loop and any outer loop containing it except that we
// leave the nodes corresponding to the whole outermost loop (including
// the last copies of the loops it contains) to be generated by the normal
// bytecode iteration below.
- AdvanceToOsrEntryAndPeelLoops(&iterator, &source_position_iterator);
+ AdvanceToOsrEntryAndPeelLoops();
}
bool has_one_shot_bytecode = false;
- for (; !iterator.done(); iterator.Advance()) {
+ for (; !bytecode_iterator().done(); bytecode_iterator().Advance()) {
if (interpreter::Bytecodes::IsOneShotBytecode(
- iterator.current_bytecode())) {
+ bytecode_iterator().current_bytecode())) {
has_one_shot_bytecode = true;
}
- VisitSingleBytecode(&source_position_iterator);
+ VisitSingleBytecode();
}
if (has_one_shot_bytecode) {
@@ -920,8 +1318,6 @@ void BytecodeGraphBuilder::VisitBytecodes() {
v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
}
- set_bytecode_analysis(nullptr);
- set_bytecode_iterator(nullptr);
DCHECK(exception_handlers_.empty());
}
@@ -1185,7 +1581,7 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
NewMerge();
} else {
slow_environment->Merge(environment(),
- bytecode_analysis()->GetInLivenessFor(
+ bytecode_analysis().GetInLivenessFor(
bytecode_iterator().current_offset()));
}
}
@@ -1237,7 +1633,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
}
fast_environment->Merge(environment(),
- bytecode_analysis()->GetOutLivenessFor(
+ bytecode_analysis().GetOutLivenessFor(
bytecode_iterator().current_offset()));
set_environment(fast_environment);
mark_as_needing_eager_checkpoint(true);
@@ -1290,7 +1686,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
}
fast_environment->Merge(environment(),
- bytecode_analysis()->GetOutLivenessFor(
+ bytecode_analysis().GetOutLivenessFor(
bytecode_iterator().current_offset()));
set_environment(fast_environment);
mark_as_needing_eager_checkpoint(true);
@@ -1630,7 +2026,7 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
// TODO(mstarzinger): Thread through number of elements. The below number is
// only an estimate and does not match {ArrayLiteral::values::length}.
int number_of_elements =
- array_boilerplate_description->constant_elements()->length();
+ array_boilerplate_description->constant_elements().length();
Node* literal = NewNode(javascript()->CreateLiteralArray(
array_boilerplate_description, pair, literal_flags, number_of_elements));
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
@@ -1700,7 +2096,7 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
// the JSArray constant here.
cached_value = TemplateObjectDescription::GetTemplateObject(
isolate(), native_context(), description, shared_info(), slot.ToInt());
- nexus.vector()->Set(slot, *cached_value);
+ nexus.vector().Set(slot, *cached_value);
} else {
cached_value =
handle(JSArray::cast(nexus.GetFeedback()->GetHeapObjectAssumeStrong()),
@@ -2130,7 +2526,7 @@ void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
}
void BytecodeGraphBuilder::VisitThrow() {
- BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
+ BuildLoopExitsForFunctionExit(bytecode_analysis().GetInLivenessFor(
bytecode_iterator().current_offset()));
Node* value = environment()->LookupAccumulator();
Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
@@ -2140,7 +2536,7 @@ void BytecodeGraphBuilder::VisitThrow() {
}
void BytecodeGraphBuilder::VisitAbort() {
- BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
+ BuildLoopExitsForFunctionExit(bytecode_analysis().GetInLivenessFor(
bytecode_iterator().current_offset()));
AbortReason reason =
static_cast<AbortReason>(bytecode_iterator().GetIndexOperand(0));
@@ -2150,7 +2546,7 @@ void BytecodeGraphBuilder::VisitAbort() {
}
void BytecodeGraphBuilder::VisitReThrow() {
- BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
+ BuildLoopExitsForFunctionExit(bytecode_analysis().GetInLivenessFor(
bytecode_iterator().current_offset()));
Node* value = environment()->LookupAccumulator();
NewNode(javascript()->CallRuntime(Runtime::kReThrow), value);
@@ -2166,7 +2562,7 @@ void BytecodeGraphBuilder::BuildHoleCheckAndThrow(
SubEnvironment sub_environment(this);
NewIfTrue();
- BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
+ BuildLoopExitsForFunctionExit(bytecode_analysis().GetInLivenessFor(
bytecode_iterator().current_offset()));
Node* node;
const Operator* op = javascript()->CallRuntime(runtime_id);
@@ -2796,7 +3192,7 @@ void BytecodeGraphBuilder::BuildReturn(const BytecodeLivenessState* liveness) {
}
void BytecodeGraphBuilder::VisitReturn() {
- BuildReturn(bytecode_analysis()->GetInLivenessFor(
+ BuildReturn(bytecode_analysis().GetInLivenessFor(
bytecode_iterator().current_offset()));
}
@@ -2817,7 +3213,9 @@ void BytecodeGraphBuilder::VisitIncBlockCounter() {
Node* coverage_array_slot =
jsgraph()->Constant(bytecode_iterator().GetIndexOperand(0));
- const Operator* op = javascript()->CallRuntime(Runtime::kIncBlockCounter);
+ // Lowered by js-intrinsic-lowering to call Builtins::kIncBlockCounter.
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kInlineIncBlockCounter);
NewNode(op, closure, coverage_array_slot);
}
@@ -2915,7 +3313,7 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
jsgraph()->Constant(bytecode_iterator().current_offset() +
(BytecodeArray::kHeaderSize - kHeapObjectTag));
- const BytecodeLivenessState* liveness = bytecode_analysis()->GetInLivenessFor(
+ const BytecodeLivenessState* liveness = bytecode_analysis().GetInLivenessFor(
bytecode_iterator().current_offset());
// Maybe overallocate the value list since we don't know how many registers
@@ -2958,7 +3356,7 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
// TODO(leszeks): This over-approximates the liveness at exit, only the
// accumulator should be live by this point.
- BuildReturn(bytecode_analysis()->GetInLivenessFor(
+ BuildReturn(bytecode_analysis().GetInLivenessFor(
bytecode_iterator().current_offset()));
}
@@ -3026,7 +3424,7 @@ void BytecodeGraphBuilder::VisitSwitchOnGeneratorState() {
NewNode(javascript()->GeneratorRestoreContext(), generator);
environment()->SetContext(generator_context);
- BuildSwitchOnGeneratorState(bytecode_analysis()->resume_jump_targets(),
+ BuildSwitchOnGeneratorState(bytecode_analysis().resume_jump_targets(),
false);
}
@@ -3041,9 +3439,8 @@ void BytecodeGraphBuilder::VisitResumeGenerator() {
// We assume we are restoring registers starting fromm index 0.
CHECK_EQ(0, first_reg.index());
- const BytecodeLivenessState* liveness =
- bytecode_analysis()->GetOutLivenessFor(
- bytecode_iterator().current_offset());
+ const BytecodeLivenessState* liveness = bytecode_analysis().GetOutLivenessFor(
+ bytecode_iterator().current_offset());
int parameter_count_without_receiver =
bytecode_array()->parameter_count() - 1;
@@ -3086,19 +3483,19 @@ void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
mark_as_needing_eager_checkpoint(true);
if (environment() != nullptr) {
it->second->Merge(environment(),
- bytecode_analysis()->GetInLivenessFor(current_offset));
+ bytecode_analysis().GetInLivenessFor(current_offset));
}
set_environment(it->second);
}
}
void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
- if (bytecode_analysis()->IsLoopHeader(current_offset)) {
+ if (bytecode_analysis().IsLoopHeader(current_offset)) {
mark_as_needing_eager_checkpoint(true);
const LoopInfo& loop_info =
- bytecode_analysis()->GetLoopInfoFor(current_offset);
+ bytecode_analysis().GetLoopInfoFor(current_offset);
const BytecodeLivenessState* liveness =
- bytecode_analysis()->GetInLivenessFor(current_offset);
+ bytecode_analysis().GetInLivenessFor(current_offset);
const auto& resume_jump_targets = loop_info.resume_jump_targets();
bool generate_suspend_switch = !resume_jump_targets.empty();
@@ -3140,7 +3537,7 @@ void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
} else {
// Merge any values which are live coming into the successor.
merge_environment->Merge(
- environment(), bytecode_analysis()->GetInLivenessFor(target_offset));
+ environment(), bytecode_analysis().GetInLivenessFor(target_offset));
}
set_environment(nullptr);
}
@@ -3155,15 +3552,15 @@ void BytecodeGraphBuilder::BuildLoopExitsForBranch(int target_offset) {
// Only build loop exits for forward edges.
if (target_offset > origin_offset) {
BuildLoopExitsUntilLoop(
- bytecode_analysis()->GetLoopOffsetFor(target_offset),
- bytecode_analysis()->GetInLivenessFor(target_offset));
+ bytecode_analysis().GetLoopOffsetFor(target_offset),
+ bytecode_analysis().GetInLivenessFor(target_offset));
}
}
void BytecodeGraphBuilder::BuildLoopExitsUntilLoop(
int loop_offset, const BytecodeLivenessState* liveness) {
int origin_offset = bytecode_iterator().current_offset();
- int current_loop = bytecode_analysis()->GetLoopOffsetFor(origin_offset);
+ int current_loop = bytecode_analysis().GetLoopOffsetFor(origin_offset);
// The limit_offset is the stop offset for building loop exists, used for OSR.
// It prevents the creations of loopexits for loops which do not exist.
loop_offset = std::max(loop_offset, currently_peeled_loop_offset_);
@@ -3171,7 +3568,7 @@ void BytecodeGraphBuilder::BuildLoopExitsUntilLoop(
while (loop_offset < current_loop) {
Node* loop_node = merge_environments_[current_loop]->GetControlDependency();
const LoopInfo& loop_info =
- bytecode_analysis()->GetLoopInfoFor(current_loop);
+ bytecode_analysis().GetLoopInfoFor(current_loop);
environment()->PrepareForLoopExit(loop_node, loop_info.assignments(),
liveness);
current_loop = loop_info.parent_offset();
@@ -3621,16 +4018,32 @@ Node* BytecodeGraphBuilder::MergeValue(Node* value, Node* other,
return value;
}
-void BytecodeGraphBuilder::UpdateSourcePosition(SourcePositionTableIterator* it,
- int offset) {
- if (it->done()) return;
- if (it->code_offset() == offset) {
+void BytecodeGraphBuilder::UpdateSourcePosition(int offset) {
+ if (source_position_iterator().done()) return;
+ if (source_position_iterator().code_offset() == offset) {
source_positions_->SetCurrentPosition(SourcePosition(
- it->source_position().ScriptOffset(), start_position_.InliningId()));
- it->Advance();
+ source_position_iterator().source_position().ScriptOffset(),
+ start_position_.InliningId()));
+ source_position_iterator().Advance();
} else {
- DCHECK_GT(it->code_offset(), offset);
- }
+ DCHECK_GT(source_position_iterator().code_offset(), offset);
+ }
+}
+
+void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
+ Handle<BytecodeArray> bytecode_array,
+ Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector,
+ BailoutId osr_offset, JSGraph* jsgraph,
+ CallFrequency const& invocation_frequency,
+ SourcePositionTable* source_positions,
+ Handle<Context> native_context, int inlining_id,
+ BytecodeGraphBuilderFlags flags) {
+ BytecodeGraphBuilder builder(broker, local_zone, bytecode_array, shared,
+ feedback_vector, osr_offset, jsgraph,
+ invocation_frequency, source_positions,
+ native_context, inlining_id, flags);
+ builder.CreateGraph();
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 99d9b64766..b9504a6086 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -5,447 +5,42 @@
#ifndef V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
#define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
-#include "src/compiler/bytecode-analysis.h"
-#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
#include "src/compiler/js-type-hint-lowering.h"
-#include "src/compiler/state-values-utils.h"
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/interpreter/bytecode-flags.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/source-position-table.h"
+#include "src/utils/utils.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
-class VectorSlotPair;
+class BytecodeArray;
+class FeedbackVector;
+class SharedFunctionInfo;
+class Zone;
namespace compiler {
-class Reduction;
+class JSGraph;
class SourcePositionTable;
-// The BytecodeGraphBuilder produces a high-level IR graph based on
-// interpreter bytecodes.
-class BytecodeGraphBuilder {
- public:
- BytecodeGraphBuilder(
- Zone* local_zone, Handle<BytecodeArray> bytecode_array,
- Handle<SharedFunctionInfo> shared, Handle<FeedbackVector> feedback_vector,
- BailoutId osr_offset, JSGraph* jsgraph,
- CallFrequency& invocation_frequency,
- SourcePositionTable* source_positions, Handle<Context> native_context,
- int inlining_id = SourcePosition::kNotInlined,
- JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags,
- bool stack_check = true, bool analyze_environment_liveness = true);
-
- // Creates a graph by visiting bytecodes.
- void CreateGraph();
-
- private:
- class Environment;
- class OsrIteratorState;
- struct SubEnvironment;
-
- void RemoveMergeEnvironmentsBeforeOffset(int limit_offset);
- void AdvanceToOsrEntryAndPeelLoops(
- interpreter::BytecodeArrayIterator* iterator,
- SourcePositionTableIterator* source_position_iterator);
-
- void VisitSingleBytecode(
- SourcePositionTableIterator* source_position_iterator);
- void VisitBytecodes();
-
- // Get or create the node that represents the outer function closure.
- Node* GetFunctionClosure();
-
- // Builder for loading the a native context field.
- Node* BuildLoadNativeContextField(int index);
-
- // Helper function for creating a pair containing type feedback vector and
- // a feedback slot.
- VectorSlotPair CreateVectorSlotPair(int slot_id);
-
- void set_environment(Environment* env) { environment_ = env; }
- const Environment* environment() const { return environment_; }
- Environment* environment() { return environment_; }
-
- // Node creation helpers
- Node* NewNode(const Operator* op, bool incomplete = false) {
- return MakeNode(op, 0, static_cast<Node**>(nullptr), incomplete);
- }
-
- Node* NewNode(const Operator* op, Node* n1) {
- Node* buffer[] = {n1};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2) {
- Node* buffer[] = {n1, n2};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
- Node* buffer[] = {n1, n2, n3};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
- Node* buffer[] = {n1, n2, n3, n4};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6) {
- Node* buffer[] = {n1, n2, n3, n4, n5, n6};
- return MakeNode(op, arraysize(buffer), buffer, false);
- }
-
- // Helpers to create new control nodes.
- Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
- Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
- Node* NewIfValue(int32_t value) { return NewNode(common()->IfValue(value)); }
- Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
- Node* NewMerge() { return NewNode(common()->Merge(1), true); }
- Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) {
- return NewNode(common()->Branch(hint, is_safety_check), condition);
- }
- Node* NewSwitch(Node* condition, int control_output_count) {
- return NewNode(common()->Switch(control_output_count), condition);
- }
-
- // Creates a new Phi node having {count} input values.
- Node* NewPhi(int count, Node* input, Node* control);
- Node* NewEffectPhi(int count, Node* input, Node* control);
-
- // Helpers for merging control, effect or value dependencies.
- Node* MergeControl(Node* control, Node* other);
- Node* MergeEffect(Node* effect, Node* other_effect, Node* control);
- Node* MergeValue(Node* value, Node* other_value, Node* control);
-
- // The main node creation chokepoint. Adds context, frame state, effect,
- // and control dependencies depending on the operator.
- Node* MakeNode(const Operator* op, int value_input_count,
- Node* const* value_inputs, bool incomplete);
-
- Node** EnsureInputBufferSize(int size);
-
- Node* const* GetCallArgumentsFromRegisters(Node* callee, Node* receiver,
- interpreter::Register first_arg,
- int arg_count);
- Node* const* ProcessCallVarArgs(ConvertReceiverMode receiver_mode,
- Node* callee, interpreter::Register first_reg,
- int arg_count);
- Node* ProcessCallArguments(const Operator* call_op, Node* const* args,
- int arg_count);
- Node* ProcessCallArguments(const Operator* call_op, Node* callee,
- interpreter::Register receiver, size_t reg_count);
- Node* const* GetConstructArgumentsFromRegister(
- Node* target, Node* new_target, interpreter::Register first_arg,
- int arg_count);
- Node* ProcessConstructArguments(const Operator* op, Node* const* args,
- int arg_count);
- Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
- interpreter::Register receiver,
- size_t reg_count);
-
- // Prepare information for eager deoptimization. This information is carried
- // by dedicated {Checkpoint} nodes that are wired into the effect chain.
- // Conceptually this frame state is "before" a given operation.
- void PrepareEagerCheckpoint();
-
- // Prepare information for lazy deoptimization. This information is attached
- // to the given node and the output value produced by the node is combined.
- // Conceptually this frame state is "after" a given operation.
- void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
-
- void BuildCreateArguments(CreateArgumentsType type);
- Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
- TypeofMode typeof_mode);
-
- enum class StoreMode {
- // Check the prototype chain before storing.
- kNormal,
- // Store value to the receiver without checking the prototype chain.
- kOwn,
- };
- void BuildNamedStore(StoreMode store_mode);
- void BuildLdaLookupSlot(TypeofMode typeof_mode);
- void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
- void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
- void BuildCallVarArgs(ConvertReceiverMode receiver_mode);
- void BuildCall(ConvertReceiverMode receiver_mode, Node* const* args,
- size_t arg_count, int slot_id);
- void BuildCall(ConvertReceiverMode receiver_mode,
- std::initializer_list<Node*> args, int slot_id) {
- BuildCall(receiver_mode, args.begin(), args.size(), slot_id);
- }
- void BuildUnaryOp(const Operator* op);
- void BuildBinaryOp(const Operator* op);
- void BuildBinaryOpWithImmediate(const Operator* op);
- void BuildCompareOp(const Operator* op);
- void BuildDelete(LanguageMode language_mode);
- void BuildCastOperator(const Operator* op);
- void BuildHoleCheckAndThrow(Node* condition, Runtime::FunctionId runtime_id,
- Node* name = nullptr);
-
- // Optional early lowering to the simplified operator level. Note that
- // the result has already been wired into the environment just like
- // any other invocation of {NewNode} would do.
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedUnaryOp(
- const Operator* op, Node* operand, FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedBinaryOp(
- const Operator* op, Node* left, Node* right, FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedForInNext(
- Node* receiver, Node* cache_array, Node* cache_type, Node* index,
- FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedForInPrepare(
- Node* receiver, FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedToNumber(
- Node* input, FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedCall(const Operator* op,
- Node* const* args,
- int arg_count,
- FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedConstruct(
- const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadNamed(
- const Operator* op, Node* receiver, FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedLoadKeyed(
- const Operator* op, Node* receiver, Node* key, FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedStoreNamed(
- const Operator* op, Node* receiver, Node* value, FeedbackSlot slot);
- JSTypeHintLowering::LoweringResult TryBuildSimplifiedStoreKeyed(
- const Operator* op, Node* receiver, Node* key, Node* value,
- FeedbackSlot slot);
-
- // Applies the given early reduction onto the current environment.
- void ApplyEarlyReduction(JSTypeHintLowering::LoweringResult reduction);
-
- // Check the context chain for extensions, for lookup fast paths.
- Environment* CheckContextExtensions(uint32_t depth);
-
- // Helper function to create binary operation hint from the recorded
- // type feedback.
- BinaryOperationHint GetBinaryOperationHint(int operand_index);
-
- // Helper function to create compare operation hint from the recorded
- // type feedback.
- CompareOperationHint GetCompareOperationHint();
-
- // Helper function to create for-in mode from the recorded type feedback.
- ForInMode GetForInMode(int operand_index);
-
- // Helper function to compute call frequency from the recorded type
- // feedback.
- CallFrequency ComputeCallFrequency(int slot_id) const;
-
- // Helper function to extract the speculation mode from the recorded type
- // feedback.
- SpeculationMode GetSpeculationMode(int slot_id) const;
-
- // Control flow plumbing.
- void BuildJump();
- void BuildJumpIf(Node* condition);
- void BuildJumpIfNot(Node* condition);
- void BuildJumpIfEqual(Node* comperand);
- void BuildJumpIfNotEqual(Node* comperand);
- void BuildJumpIfTrue();
- void BuildJumpIfFalse();
- void BuildJumpIfToBooleanTrue();
- void BuildJumpIfToBooleanFalse();
- void BuildJumpIfNotHole();
- void BuildJumpIfJSReceiver();
-
- void BuildSwitchOnSmi(Node* condition);
- void BuildSwitchOnGeneratorState(
- const ZoneVector<ResumeJumpTarget>& resume_jump_targets,
- bool allow_fallthrough_on_executing);
-
- // Simulates control flow by forward-propagating environments.
- void MergeIntoSuccessorEnvironment(int target_offset);
- void BuildLoopHeaderEnvironment(int current_offset);
- void SwitchToMergeEnvironment(int current_offset);
-
- // Simulates control flow that exits the function body.
- void MergeControlToLeaveFunction(Node* exit);
-
- // Builds loop exit nodes for every exited loop between the current bytecode
- // offset and {target_offset}.
- void BuildLoopExitsForBranch(int target_offset);
- void BuildLoopExitsForFunctionExit(const BytecodeLivenessState* liveness);
- void BuildLoopExitsUntilLoop(int loop_offset,
- const BytecodeLivenessState* liveness);
-
- // Helper for building a return (from an actual return or a suspend).
- void BuildReturn(const BytecodeLivenessState* liveness);
-
- // Simulates entry and exit of exception handlers.
- void ExitThenEnterExceptionHandlers(int current_offset);
-
- // Update the current position of the {SourcePositionTable} to that of the
- // bytecode at {offset}, if any.
- void UpdateSourcePosition(SourcePositionTableIterator* it, int offset);
-
- // Growth increment for the temporary buffer used to construct input lists to
- // new nodes.
- static const int kInputBufferSizeIncrement = 64;
-
- // An abstract representation for an exception handler that is being
- // entered and exited while the graph builder is iterating over the
- // underlying bytecode. The exception handlers within the bytecode are
- // well scoped, hence will form a stack during iteration.
- struct ExceptionHandler {
- int start_offset_; // Start offset of the handled area in the bytecode.
- int end_offset_; // End offset of the handled area in the bytecode.
- int handler_offset_; // Handler entry offset within the bytecode.
- int context_register_; // Index of register holding handler context.
- };
-
- // Field accessors
- Graph* graph() const { return jsgraph_->graph(); }
- CommonOperatorBuilder* common() const { return jsgraph_->common(); }
- Zone* graph_zone() const { return graph()->zone(); }
- JSGraph* jsgraph() const { return jsgraph_; }
- Isolate* isolate() const { return jsgraph_->isolate(); }
- JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
- SimplifiedOperatorBuilder* simplified() const {
- return jsgraph_->simplified();
- }
- Zone* local_zone() const { return local_zone_; }
- const Handle<BytecodeArray>& bytecode_array() const {
- return bytecode_array_;
- }
- const Handle<FeedbackVector>& feedback_vector() const {
- return feedback_vector_;
- }
- const JSTypeHintLowering& type_hint_lowering() const {
- return type_hint_lowering_;
- }
- const FrameStateFunctionInfo* frame_state_function_info() const {
- return frame_state_function_info_;
- }
-
- const interpreter::BytecodeArrayIterator& bytecode_iterator() const {
- return *bytecode_iterator_;
- }
-
- void set_bytecode_iterator(
- interpreter::BytecodeArrayIterator* bytecode_iterator) {
- bytecode_iterator_ = bytecode_iterator;
- }
-
- const BytecodeAnalysis* bytecode_analysis() const {
- return bytecode_analysis_;
- }
-
- void set_bytecode_analysis(const BytecodeAnalysis* bytecode_analysis) {
- bytecode_analysis_ = bytecode_analysis;
- }
-
- int currently_peeled_loop_offset() const {
- return currently_peeled_loop_offset_;
- }
-
- void set_currently_peeled_loop_offset(int offset) {
- currently_peeled_loop_offset_ = offset;
- }
-
- bool stack_check() const { return stack_check_; }
-
- void set_stack_check(bool stack_check) { stack_check_ = stack_check; }
-
- bool analyze_environment_liveness() const {
- return analyze_environment_liveness_;
- }
-
- int current_exception_handler() { return current_exception_handler_; }
-
- void set_current_exception_handler(int index) {
- current_exception_handler_ = index;
- }
-
- bool needs_eager_checkpoint() const { return needs_eager_checkpoint_; }
- void mark_as_needing_eager_checkpoint(bool value) {
- needs_eager_checkpoint_ = value;
- }
-
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
-
- Handle<Context> native_context() const { return native_context_; }
-
-#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
- BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
-#undef DECLARE_VISIT_BYTECODE
-
- Zone* local_zone_;
- JSGraph* jsgraph_;
- CallFrequency const invocation_frequency_;
- Handle<BytecodeArray> bytecode_array_;
- Handle<FeedbackVector> feedback_vector_;
- const JSTypeHintLowering type_hint_lowering_;
- const FrameStateFunctionInfo* frame_state_function_info_;
- const interpreter::BytecodeArrayIterator* bytecode_iterator_;
- const BytecodeAnalysis* bytecode_analysis_;
- Environment* environment_;
- BailoutId osr_offset_;
- int currently_peeled_loop_offset_;
- bool stack_check_;
- bool analyze_environment_liveness_;
-
- // Merge environments are snapshots of the environment at points where the
- // control flow merges. This models a forward data flow propagation of all
- // values from all predecessors of the merge in question. They are indexed by
- // the bytecode offset
- ZoneMap<int, Environment*> merge_environments_;
-
- // Generator merge environments are snapshots of the current resume
- // environment, tracing back through loop headers to the resume switch of a
- // generator. They allow us to model a single resume jump as several switch
- // statements across loop headers, keeping those loop headers reducible,
- // without having to merge the "executing" environments of the generator into
- // the "resuming" ones. They are indexed by the suspend id of the resume.
- ZoneMap<int, Environment*> generator_merge_environments_;
-
- // Exception handlers currently entered by the iteration.
- ZoneStack<ExceptionHandler> exception_handlers_;
- int current_exception_handler_;
-
- // Temporary storage for building node input lists.
- int input_buffer_size_;
- Node** input_buffer_;
-
- // Optimization to only create checkpoints when the current position in the
- // control-flow is not effect-dominated by another checkpoint already. All
- // operations that do not have observable side-effects can be re-evaluated.
- bool needs_eager_checkpoint_;
-
- // Nodes representing values in the activation record.
- SetOncePointer<Node> function_closure_;
-
- // Control nodes that exit the function body.
- ZoneVector<Node*> exit_controls_;
-
- StateValuesCache state_values_cache_;
-
- // The source position table, to be populated.
- SourcePositionTable* source_positions_;
-
- SourcePosition const start_position_;
-
- Handle<SharedFunctionInfo> const shared_info_;
-
- // The native context for which we optimize.
- Handle<Context> const native_context_;
-
- static int const kBinaryOperationHintIndex = 1;
- static int const kCountOperationHintIndex = 0;
- static int const kBinaryOperationSmiHintIndex = 1;
- static int const kUnaryOperationHintIndex = 0;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
+enum class BytecodeGraphBuilderFlag : uint8_t {
+ kSkipFirstStackCheck = 1 << 0,
+ kAnalyzeEnvironmentLiveness = 1 << 1,
+ kBailoutOnUninitialized = 1 << 2,
};
+using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
+
+// Note: {invocation_frequency} is taken by reference to work around a GCC bug
+// on AIX (v8:8193).
+void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
+ Handle<BytecodeArray> bytecode_array,
+ Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector,
+ BailoutId osr_offset, JSGraph* jsgraph,
+ CallFrequency const& invocation_frequency,
+ SourcePositionTable* source_positions,
+ Handle<Context> native_context, int inlining_id,
+ BytecodeGraphBuilderFlags flags);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/bytecode-liveness-map.h b/deps/v8/src/compiler/bytecode-liveness-map.h
index 03251f1367..b377b55ecb 100644
--- a/deps/v8/src/compiler/bytecode-liveness-map.h
+++ b/deps/v8/src/compiler/bytecode-liveness-map.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
#include "src/base/hashmap.h"
-#include "src/bit-vector.h"
+#include "src/utils/bit-vector.h"
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 1300cd258d..e472a6a72c 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
#include "src/compiler/linkage.h"
@@ -220,7 +220,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
// The target for C calls is always an address (i.e. machine pointer).
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor::Flags flags = CallDescriptor::kNoAllocate;
if (set_initialize_root_flag) {
flags |= CallDescriptor::kInitializeRootRegister;
}
diff --git a/deps/v8/src/compiler/checkpoint-elimination.h b/deps/v8/src/compiler/checkpoint-elimination.h
index 97e05c130d..a850dc8a14 100644
--- a/deps/v8/src/compiler/checkpoint-elimination.h
+++ b/deps/v8/src/compiler/checkpoint-elimination.h
@@ -6,8 +6,8 @@
#define V8_COMPILER_CHECKPOINT_ELIMINATION_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 7787892d32..d8a01d6308 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -6,7 +6,10 @@
#include <ostream>
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/macro-assembler.h"
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
@@ -14,14 +17,11 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/schedule.h"
-#include "src/frames.h"
-#include "src/interface-descriptors.h"
+#include "src/execution/frames.h"
#include "src/interpreter/bytecodes.h"
-#include "src/machine-type.h"
-#include "src/macro-assembler.h"
-#include "src/memcopy.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
+#include "src/utils/memcopy.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -275,7 +275,7 @@ TNode<HeapObject> CodeAssembler::UntypedHeapConstant(
TNode<String> CodeAssembler::StringConstant(const char* str) {
Handle<String> internalized_string =
- factory()->InternalizeOneByteString(OneByteVector(str));
+ factory()->InternalizeString(OneByteVector(str));
return UncheckedCast<String>(HeapConstant(internalized_string));
}
@@ -425,6 +425,10 @@ void CodeAssembler::Comment(std::string str) {
raw_assembler()->Comment(str);
}
+void CodeAssembler::StaticAssert(TNode<BoolT> value) {
+ raw_assembler()->StaticAssert(value);
+}
+
void CodeAssembler::SetSourcePosition(const char* file, int line) {
raw_assembler()->SetSourcePosition(file, line);
}
@@ -941,14 +945,14 @@ Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
-Node* CodeAssembler::Load(MachineType rep, Node* base,
+Node* CodeAssembler::Load(MachineType type, Node* base,
LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(rep, base, needs_poisoning);
+ return raw_assembler()->Load(type, base, needs_poisoning);
}
-Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset,
+Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(rep, base, offset, needs_poisoning);
+ return raw_assembler()->Load(type, base, offset, needs_poisoning);
}
Node* CodeAssembler::LoadFullTagged(Node* base,
@@ -963,8 +967,13 @@ Node* CodeAssembler::LoadFullTagged(Node* base, Node* offset,
Load(MachineType::Pointer(), base, offset, needs_poisoning));
}
-Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
- return raw_assembler()->AtomicLoad(rep, base, offset);
+Node* CodeAssembler::AtomicLoad(MachineType type, Node* base, Node* offset) {
+ return raw_assembler()->AtomicLoad(type, base, offset);
+}
+
+Node* CodeAssembler::LoadFromObject(MachineType type, TNode<HeapObject> object,
+ TNode<IntPtrT> offset) {
+ return raw_assembler()->LoadFromObject(type, object, offset);
}
TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
@@ -992,13 +1001,51 @@ Node* CodeAssembler::Store(Node* base, Node* value) {
kFullWriteBarrier);
}
+void CodeAssembler::StoreToObject(MachineRepresentation rep,
+ TNode<HeapObject> object,
+ TNode<IntPtrT> offset, Node* value,
+ StoreToObjectWriteBarrier write_barrier) {
+ WriteBarrierKind write_barrier_kind;
+ switch (write_barrier) {
+ case StoreToObjectWriteBarrier::kFull:
+ write_barrier_kind = WriteBarrierKind::kFullWriteBarrier;
+ break;
+ case StoreToObjectWriteBarrier::kMap:
+ write_barrier_kind = WriteBarrierKind::kMapWriteBarrier;
+ break;
+ case StoreToObjectWriteBarrier::kNone:
+ if (CanBeTaggedPointer(rep)) {
+ write_barrier_kind = WriteBarrierKind::kAssertNoWriteBarrier;
+ } else {
+ write_barrier_kind = WriteBarrierKind::kNoWriteBarrier;
+ }
+ break;
+ }
+ raw_assembler()->StoreToObject(rep, object, offset, value,
+ write_barrier_kind);
+}
+
void CodeAssembler::OptimizedStoreField(MachineRepresentation rep,
TNode<HeapObject> object, int offset,
- Node* value,
- WriteBarrierKind write_barrier) {
+ Node* value) {
+ raw_assembler()->OptimizedStoreField(rep, object, offset, value,
+ WriteBarrierKind::kFullWriteBarrier);
+}
+
+void CodeAssembler::OptimizedStoreFieldAssertNoWriteBarrier(
+ MachineRepresentation rep, TNode<HeapObject> object, int offset,
+ Node* value) {
raw_assembler()->OptimizedStoreField(rep, object, offset, value,
- write_barrier);
+ WriteBarrierKind::kAssertNoWriteBarrier);
}
+
+void CodeAssembler::OptimizedStoreFieldUnsafeNoWriteBarrier(
+ MachineRepresentation rep, TNode<HeapObject> object, int offset,
+ Node* value) {
+ raw_assembler()->OptimizedStoreField(rep, object, offset, value,
+ WriteBarrierKind::kNoWriteBarrier);
+}
+
void CodeAssembler::OptimizedStoreMap(TNode<HeapObject> object,
TNode<Map> map) {
raw_assembler()->OptimizedStoreMap(object, map);
@@ -1016,11 +1063,26 @@ Node* CodeAssembler::StoreEphemeronKey(Node* base, Node* offset, Node* value) {
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* value) {
- return raw_assembler()->Store(rep, base, value, kNoWriteBarrier);
+ return raw_assembler()->Store(
+ rep, base, value,
+ CanBeTaggedPointer(rep) ? kAssertNoWriteBarrier : kNoWriteBarrier);
}
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* offset, Node* value) {
+ return raw_assembler()->Store(
+ rep, base, offset, value,
+ CanBeTaggedPointer(rep) ? kAssertNoWriteBarrier : kNoWriteBarrier);
+}
+
+Node* CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep,
+ Node* base, Node* value) {
+ return raw_assembler()->Store(rep, base, value, kNoWriteBarrier);
+}
+
+Node* CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep,
+ Node* base, Node* offset,
+ Node* value) {
return raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
}
@@ -1120,10 +1182,11 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
}
-TNode<HeapObject> CodeAssembler::OptimizedAllocate(TNode<IntPtrT> size,
- AllocationType allocation) {
- return UncheckedCast<HeapObject>(
- raw_assembler()->OptimizedAllocate(size, allocation));
+TNode<HeapObject> CodeAssembler::OptimizedAllocate(
+ TNode<IntPtrT> size, AllocationType allocation,
+ AllowLargeObjects allow_large_objects) {
+ return UncheckedCast<HeapObject>(raw_assembler()->OptimizedAllocate(
+ size, allocation, allow_large_objects));
}
void CodeAssembler::HandleException(Node* node) {
@@ -1186,7 +1249,8 @@ TNode<Object> CodeAssembler::CallRuntimeWithCEntryImpl(
int argc = static_cast<int>(args.size());
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
zone(), function, argc, Operator::kNoProperties,
- CallDescriptor::kNoFlags);
+ Runtime::MayAllocate(function) ? CallDescriptor::kNoFlags
+ : CallDescriptor::kNoAllocate);
Node* ref = ExternalConstant(ExternalReference::Create(function));
Node* arity = Int32Constant(argc);
@@ -1888,19 +1952,21 @@ Address CheckObjectType(Address raw_value, Address raw_type,
Smi type(raw_type);
String location = String::cast(Object(raw_location));
const char* expected;
- switch (static_cast<ObjectType>(type->value())) {
-#define TYPE_CASE(Name) \
- case ObjectType::k##Name: \
- if (value->Is##Name()) return Smi::FromInt(0).ptr(); \
- expected = #Name; \
+ switch (static_cast<ObjectType>(type.value())) {
+#define TYPE_CASE(Name) \
+ case ObjectType::k##Name: \
+ if (value.Is##Name()) return Smi::FromInt(0).ptr(); \
+ expected = #Name; \
break;
-#define TYPE_STRUCT_CASE(NAME, Name, name) \
- case ObjectType::k##Name: \
- if (value->Is##Name()) return Smi::FromInt(0).ptr(); \
- expected = #Name; \
+#define TYPE_STRUCT_CASE(NAME, Name, name) \
+ case ObjectType::k##Name: \
+ if (value.Is##Name()) return Smi::FromInt(0).ptr(); \
+ expected = #Name; \
break;
TYPE_CASE(Object)
+ TYPE_CASE(Smi)
+ TYPE_CASE(HeapObject)
OBJECT_TYPE_LIST(TYPE_CASE)
HEAP_OBJECT_TYPE_LIST(TYPE_CASE)
STRUCT_LIST(TYPE_STRUCT_CASE)
@@ -1908,11 +1974,11 @@ Address CheckObjectType(Address raw_value, Address raw_type,
#undef TYPE_STRUCT_CASE
}
std::stringstream value_description;
- value->Print(value_description);
- V8_Fatal(__FILE__, __LINE__,
- "Type cast failed in %s\n"
- " Expected %s but found %s",
- location->ToAsciiArray(), expected, value_description.str().c_str());
+ value.Print(value_description);
+ FATAL(
+ "Type cast failed in %s\n"
+ " Expected %s but found %s",
+ location.ToAsciiArray(), expected, value_description.str().c_str());
#else
UNREACHABLE();
#endif
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 166257d5ec..0f7ae64082 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -11,14 +11,13 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
-#include "src/allocation.h"
#include "src/base/macros.h"
+#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/globals.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/source-position.h"
#include "src/heap/heap.h"
-#include "src/machine-type.h"
-#include "src/objects.h"
#include "src/objects/arguments.h"
#include "src/objects/data-handler.h"
#include "src/objects/heap-number.h"
@@ -27,10 +26,10 @@
#include "src/objects/js-proxy.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/objects.h"
#include "src/objects/oddball.h"
#include "src/runtime/runtime.h"
-#include "src/source-position.h"
-#include "src/type-traits.h"
+#include "src/utils/allocation.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -67,14 +66,12 @@ class JSFinalizationGroupCleanupIterator;
class JSWeakMap;
class JSWeakRef;
class JSWeakSet;
-class MaybeObject;
class PromiseCapability;
class PromiseFulfillReactionJobTask;
class PromiseReaction;
class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
class WasmDebugInfo;
-class WeakCell;
class Zone;
template <typename T>
@@ -281,9 +278,12 @@ class int31_t {
#define ENUM_ELEMENT(Name) k##Name,
#define ENUM_STRUCT_ELEMENT(NAME, Name, name) k##Name,
enum class ObjectType {
- kObject,
- OBJECT_TYPE_LIST(ENUM_ELEMENT) HEAP_OBJECT_TYPE_LIST(ENUM_ELEMENT)
- STRUCT_LIST(ENUM_STRUCT_ELEMENT)
+ ENUM_ELEMENT(Object) //
+ ENUM_ELEMENT(Smi) //
+ ENUM_ELEMENT(HeapObject) //
+ OBJECT_TYPE_LIST(ENUM_ELEMENT) //
+ HEAP_OBJECT_TYPE_LIST(ENUM_ELEMENT) //
+ STRUCT_LIST(ENUM_STRUCT_ELEMENT) //
};
#undef ENUM_ELEMENT
#undef ENUM_STRUCT_ELEMENT
@@ -298,6 +298,8 @@ inline bool NeedsBoundsCheck(CheckBounds check_bounds) {
}
}
+enum class StoreToObjectWriteBarrier { kNone, kMap, kFull };
+
class AccessCheckNeeded;
class BigIntWrapper;
class ClassBoilerplate;
@@ -322,10 +324,12 @@ class StringWrapper;
class SymbolWrapper;
class Undetectable;
class UniqueName;
+class WasmCapiFunctionData;
class WasmExceptionObject;
class WasmExceptionTag;
class WasmExportedFunctionData;
class WasmGlobalObject;
+class WasmJSFunctionData;
class WasmMemoryObject;
class WasmModuleObject;
class WasmTableObject;
@@ -349,6 +353,8 @@ struct ObjectTypeOf {};
static const ObjectType value = ObjectType::k##Name; \
};
OBJECT_TYPE_CASE(Object)
+OBJECT_TYPE_CASE(Smi)
+OBJECT_TYPE_CASE(HeapObject)
OBJECT_TYPE_LIST(OBJECT_TYPE_CASE)
HEAP_OBJECT_ORDINARY_TYPE_LIST(OBJECT_TYPE_CASE)
STRUCT_LIST(OBJECT_TYPE_STRUCT_CASE)
@@ -470,12 +476,12 @@ struct types_have_common_values<MaybeObject, T> {
template <class T>
class TNode {
public:
- static_assert(is_valid_type_tag<T>::value, "invalid type tag");
-
template <class U,
typename std::enable_if<is_subtype<U, T>::value, int>::type = 0>
- TNode(const TNode<U>& other) : node_(other) {}
- TNode() : node_(nullptr) {}
+ TNode(const TNode<U>& other) : node_(other) {
+ LazyTemplateChecks();
+ }
+ TNode() : TNode(nullptr) {}
TNode operator=(TNode other) {
DCHECK_NOT_NULL(other.node_);
@@ -488,9 +494,14 @@ class TNode {
static TNode UncheckedCast(compiler::Node* node) { return TNode(node); }
protected:
- explicit TNode(compiler::Node* node) : node_(node) {}
+ explicit TNode(compiler::Node* node) : node_(node) { LazyTemplateChecks(); }
private:
+ // These checks shouldn't be checked before TNode is actually used.
+ void LazyTemplateChecks() {
+ static_assert(is_valid_type_tag<T>::value, "invalid type tag");
+ }
+
compiler::Node* node_;
};
@@ -795,6 +806,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<UintPtrT> UintPtrConstant(uintptr_t value) {
return Unsigned(IntPtrConstant(bit_cast<intptr_t>(value)));
}
+ TNode<RawPtrT> PointerConstant(void* value) {
+ return ReinterpretCast<RawPtrT>(IntPtrConstant(bit_cast<intptr_t>(value)));
+ }
TNode<Number> NumberConstant(double value);
TNode<Smi> SmiConstant(Smi value);
TNode<Smi> SmiConstant(int value);
@@ -874,6 +888,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Comment(s.str());
}
+ void StaticAssert(TNode<BoolT> value);
+
void SetSourcePosition(const char* file, int line);
void Bind(Label* label);
@@ -933,17 +949,17 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<WordT> WordPoisonOnSpeculation(SloppyTNode<WordT> value);
// Load raw memory location.
- Node* Load(MachineType rep, Node* base,
+ Node* Load(MachineType type, Node* base,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
template <class Type>
- TNode<Type> Load(MachineType rep, TNode<RawPtr<Type>> base) {
+ TNode<Type> Load(MachineType type, TNode<RawPtr<Type>> base) {
DCHECK(
- IsSubtype(rep.representation(), MachineRepresentationOf<Type>::value));
- return UncheckedCast<Type>(Load(rep, static_cast<Node*>(base)));
+ IsSubtype(type.representation(), MachineRepresentationOf<Type>::value));
+ return UncheckedCast<Type>(Load(type, static_cast<Node*>(base)));
}
- Node* Load(MachineType rep, Node* base, Node* offset,
+ Node* Load(MachineType type, Node* base, Node* offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
- Node* AtomicLoad(MachineType rep, Node* base, Node* offset);
+ Node* AtomicLoad(MachineType type, Node* base, Node* offset);
// Load uncompressed tagged value from (most likely off JS heap) memory
// location.
Node* LoadFullTagged(
@@ -952,6 +968,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* base, Node* offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* LoadFromObject(MachineType type, TNode<HeapObject> object,
+ TNode<IntPtrT> offset);
+
// Load a value from the root array.
TNode<Object> LoadRoot(RootIndex root_index);
@@ -962,6 +981,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
+ Node* UnsafeStoreNoWriteBarrier(MachineRepresentation rep, Node* base,
+ Node* value);
+ Node* UnsafeStoreNoWriteBarrier(MachineRepresentation rep, Node* base,
+ Node* offset, Node* value);
+
// Stores uncompressed tagged value to (most likely off JS heap) memory
// location without write barrier.
Node* StoreFullTaggedNoWriteBarrier(Node* base, Node* tagged_value);
@@ -970,10 +994,19 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Optimized memory operations that map to Turbofan simplified nodes.
TNode<HeapObject> OptimizedAllocate(TNode<IntPtrT> size,
- AllocationType allocation);
+ AllocationType allocation,
+ AllowLargeObjects allow_large_objects);
+ void StoreToObject(MachineRepresentation rep, TNode<HeapObject> object,
+ TNode<IntPtrT> offset, Node* value,
+ StoreToObjectWriteBarrier write_barrier);
void OptimizedStoreField(MachineRepresentation rep, TNode<HeapObject> object,
- int offset, Node* value,
- WriteBarrierKind write_barrier);
+ int offset, Node* value);
+ void OptimizedStoreFieldAssertNoWriteBarrier(MachineRepresentation rep,
+ TNode<HeapObject> object,
+ int offset, Node* value);
+ void OptimizedStoreFieldUnsafeNoWriteBarrier(MachineRepresentation rep,
+ TNode<HeapObject> object,
+ int offset, Node* value);
void OptimizedStoreMap(TNode<HeapObject> object, TNode<Map>);
// {value_high} is used for 64-bit stores on 32-bit platforms, must be
// nullptr in other cases.
diff --git a/deps/v8/src/compiler/common-node-cache.cc b/deps/v8/src/compiler/common-node-cache.cc
index d9fd5ca013..92c9c78c71 100644
--- a/deps/v8/src/compiler/common-node-cache.cc
+++ b/deps/v8/src/compiler/common-node-cache.cc
@@ -4,8 +4,8 @@
#include "src/compiler/common-node-cache.h"
+#include "src/codegen/external-reference.h"
#include "src/compiler/node.h"
-#include "src/external-reference.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 57f1866bdb..fa727748f6 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -72,6 +72,8 @@ Reduction CommonOperatorReducer::Reduce(Node* node) {
return ReduceSelect(node);
case IrOpcode::kSwitch:
return ReduceSwitch(node);
+ case IrOpcode::kStaticAssert:
+ return ReduceStaticAssert(node);
default:
break;
}
@@ -459,6 +461,18 @@ Reduction CommonOperatorReducer::ReduceSwitch(Node* node) {
return NoChange();
}
+Reduction CommonOperatorReducer::ReduceStaticAssert(Node* node) {
+ DCHECK_EQ(IrOpcode::kStaticAssert, node->opcode());
+ Node* const cond = node->InputAt(0);
+ Decision decision = DecideCondition(broker(), cond);
+ if (decision == Decision::kTrue) {
+ RelaxEffectsAndControls(node);
+ return Changed(node);
+ } else {
+ return NoChange();
+ }
+}
+
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
node->ReplaceInput(0, a);
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index b1d98e0558..4c7a06df16 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -6,8 +6,8 @@
#define V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -42,6 +42,7 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
Reduction ReduceReturn(Node* node);
Reduction ReduceSelect(Node* node);
Reduction ReduceSwitch(Node* node);
+ Reduction ReduceStaticAssert(Node* node);
Reduction Change(Node* node, Operator const* op, Node* a);
Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index f9d05ef851..45e558f609 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -9,7 +9,7 @@
#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -464,7 +464,8 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(LoopExitEffect, Operator::kNoThrow, 0, 1, 1, 0, 1, 0) \
V(Checkpoint, Operator::kKontrol, 0, 1, 1, 0, 1, 0) \
V(FinishRegion, Operator::kKontrol, 1, 1, 0, 1, 1, 0) \
- V(Retain, Operator::kKontrol, 1, 1, 0, 0, 1, 0)
+ V(Retain, Operator::kKontrol, 1, 1, 0, 0, 1, 0) \
+ V(StaticAssert, Operator::kFoldable, 1, 1, 0, 0, 1, 0)
#define CACHED_BRANCH_LIST(V) \
V(None, CriticalSafetyCheck) \
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 717b18c1ec..43a689b5c2 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -6,13 +6,13 @@
#define V8_COMPILER_COMMON_OPERATOR_H_
#include "src/base/compiler-specific.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/reloc-info.h"
+#include "src/codegen/string-constants.h"
+#include "src/common/globals.h"
#include "src/compiler/frame-states.h"
-#include "src/deoptimize-reason.h"
-#include "src/globals.h"
-#include "src/machine-type.h"
-#include "src/reloc-info.h"
-#include "src/string-constants.h"
-#include "src/vector-slot-pair.h"
+#include "src/compiler/vector-slot-pair.h"
+#include "src/deoptimizer/deoptimize-reason.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-handle-set.h"
@@ -454,6 +454,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Dead();
const Operator* DeadValue(MachineRepresentation rep);
const Operator* Unreachable();
+ const Operator* StaticAssert();
const Operator* End(size_t control_input_count);
const Operator* Branch(BranchHint = BranchHint::kNone,
IsSafetyCheck = IsSafetyCheck::kSafetyCheck);
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 012e78a7e4..f0bb797b68 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -4,9 +4,9 @@
#include "src/compiler/compilation-dependencies.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -20,8 +20,8 @@ CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
class CompilationDependencies::Dependency : public ZoneObject {
public:
virtual bool IsValid() const = 0;
- virtual void PrepareInstall() {}
- virtual void Install(const MaybeObjectHandle& code) = 0;
+ virtual void PrepareInstall() const {}
+ virtual void Install(const MaybeObjectHandle& code) const = 0;
#ifdef DEBUG
virtual bool IsPretenureModeDependency() const { return false; }
@@ -44,7 +44,7 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
function->initial_map() == *initial_map_.object();
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(function_.isolate(), code,
initial_map_.object(),
@@ -76,13 +76,13 @@ class PrototypePropertyDependency final
function->prototype() == *prototype_.object();
}
- void PrepareInstall() override {
+ void PrepareInstall() const override {
SLOW_DCHECK(IsValid());
Handle<JSFunction> function = function_.object();
if (!function->has_initial_map()) JSFunction::EnsureHasInitialMap(function);
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
Handle<JSFunction> function = function_.object();
DCHECK(function->has_initial_map());
@@ -104,7 +104,7 @@ class StableMapDependency final : public CompilationDependencies::Dependency {
bool IsValid() const override { return map_.object()->is_stable(); }
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(map_.isolate(), code, map_.object(),
DependentCode::kPrototypeCheckGroup);
@@ -122,7 +122,7 @@ class TransitionDependency final : public CompilationDependencies::Dependency {
bool IsValid() const override { return !map_.object()->is_deprecated(); }
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(map_.isolate(), code, map_.object(),
DependentCode::kTransitionGroup);
@@ -147,7 +147,7 @@ class PretenureModeDependency final
return allocation_ == site_.object()->GetAllocationType();
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
site_.isolate(), code, site_.object(),
@@ -181,12 +181,11 @@ class FieldRepresentationDependency final
bool IsValid() const override {
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
- return representation_.Equals(owner->instance_descriptors()
- ->GetDetails(descriptor_)
- .representation());
+ return representation_.Equals(
+ owner->instance_descriptors().GetDetails(descriptor_).representation());
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
DependentCode::kFieldOwnerGroup);
@@ -213,10 +212,10 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
Handle<Object> type = type_.object();
- return *type == owner->instance_descriptors()->GetFieldType(descriptor_);
+ return *type == owner->instance_descriptors().GetFieldType(descriptor_);
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
DependentCode::kFieldOwnerGroup);
@@ -242,10 +241,10 @@ class FieldConstnessDependency final
DisallowHeapAllocation no_heap_allocation;
Handle<Map> owner = owner_.object();
return PropertyConstness::kConst ==
- owner->instance_descriptors()->GetDetails(descriptor_).constness();
+ owner->instance_descriptors().GetDetails(descriptor_).constness();
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(owner_.isolate(), code, owner_.object(),
DependentCode::kFieldOwnerGroup);
@@ -283,7 +282,7 @@ class GlobalPropertyDependency final
read_only_ == cell->property_details().IsReadOnly();
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(cell_.isolate(), code, cell_.object(),
DependentCode::kPropertyCellChangedGroup);
@@ -306,7 +305,7 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
return cell->value() == Smi::FromInt(Isolate::kProtectorValid);
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(cell_.isolate(), code, cell_.object(),
DependentCode::kPropertyCellChangedGroup);
@@ -332,12 +331,12 @@ class ElementsKindDependency final
bool IsValid() const override {
Handle<AllocationSite> site = site_.object();
ElementsKind kind = site->PointsToLiteral()
- ? site->boilerplate()->GetElementsKind()
+ ? site->boilerplate().GetElementsKind()
: site->GetElementsKind();
return kind_ == kind;
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
DependentCode::InstallDependency(
site_.isolate(), code, site_.object(),
@@ -365,16 +364,15 @@ class InitialMapInstanceSizePredictionDependency final
return instance_size == instance_size_;
}
- void PrepareInstall() override {
+ void PrepareInstall() const override {
SLOW_DCHECK(IsValid());
function_.object()->CompleteInobjectSlackTrackingIfActive();
}
- void Install(const MaybeObjectHandle& code) override {
+ void Install(const MaybeObjectHandle& code) const override {
SLOW_DCHECK(IsValid());
- DCHECK(!function_.object()
- ->initial_map()
- ->IsInobjectSlackTrackingInProgress());
+ DCHECK(
+ !function_.object()->initial_map().IsInobjectSlackTrackingInProgress());
}
private:
@@ -382,42 +380,41 @@ class InitialMapInstanceSizePredictionDependency final
int instance_size_;
};
+void CompilationDependencies::RecordDependency(Dependency const* dependency) {
+ if (dependency != nullptr) dependencies_.push_front(dependency);
+}
+
MapRef CompilationDependencies::DependOnInitialMap(
const JSFunctionRef& function) {
MapRef map = function.initial_map();
- dependencies_.push_front(new (zone_) InitialMapDependency(function, map));
+ RecordDependency(new (zone_) InitialMapDependency(function, map));
return map;
}
ObjectRef CompilationDependencies::DependOnPrototypeProperty(
const JSFunctionRef& function) {
ObjectRef prototype = function.prototype();
- dependencies_.push_front(
- new (zone_) PrototypePropertyDependency(function, prototype));
+ RecordDependency(new (zone_)
+ PrototypePropertyDependency(function, prototype));
return prototype;
}
void CompilationDependencies::DependOnStableMap(const MapRef& map) {
if (map.CanTransition()) {
- dependencies_.push_front(new (zone_) StableMapDependency(map));
+ RecordDependency(new (zone_) StableMapDependency(map));
} else {
DCHECK(map.is_stable());
}
}
void CompilationDependencies::DependOnTransition(const MapRef& target_map) {
- if (target_map.CanBeDeprecated()) {
- dependencies_.push_front(new (zone_) TransitionDependency(target_map));
- } else {
- DCHECK(!target_map.is_deprecated());
- }
+ RecordDependency(TransitionDependencyOffTheRecord(target_map));
}
AllocationType CompilationDependencies::DependOnPretenureMode(
const AllocationSiteRef& site) {
AllocationType allocation = site.GetAllocationType();
- dependencies_.push_front(new (zone_)
- PretenureModeDependency(site, allocation));
+ RecordDependency(new (zone_) PretenureModeDependency(site, allocation));
return allocation;
}
@@ -440,41 +437,30 @@ PropertyConstness CompilationDependencies::DependOnFieldConstness(
}
DCHECK_EQ(constness, PropertyConstness::kConst);
- dependencies_.push_front(new (zone_)
- FieldConstnessDependency(owner, descriptor));
+ RecordDependency(new (zone_) FieldConstnessDependency(owner, descriptor));
return PropertyConstness::kConst;
}
void CompilationDependencies::DependOnFieldRepresentation(const MapRef& map,
int descriptor) {
- MapRef owner = map.FindFieldOwner(descriptor);
- PropertyDetails details = owner.GetPropertyDetails(descriptor);
- DCHECK(details.representation().Equals(
- map.GetPropertyDetails(descriptor).representation()));
- dependencies_.push_front(new (zone_) FieldRepresentationDependency(
- owner, descriptor, details.representation()));
+ RecordDependency(FieldRepresentationDependencyOffTheRecord(map, descriptor));
}
void CompilationDependencies::DependOnFieldType(const MapRef& map,
int descriptor) {
- MapRef owner = map.FindFieldOwner(descriptor);
- ObjectRef type = owner.GetFieldType(descriptor);
- DCHECK(type.equals(map.GetFieldType(descriptor)));
- dependencies_.push_front(new (zone_)
- FieldTypeDependency(owner, descriptor, type));
+ RecordDependency(FieldTypeDependencyOffTheRecord(map, descriptor));
}
void CompilationDependencies::DependOnGlobalProperty(
const PropertyCellRef& cell) {
PropertyCellType type = cell.property_details().cell_type();
bool read_only = cell.property_details().IsReadOnly();
- dependencies_.push_front(new (zone_)
- GlobalPropertyDependency(cell, type, read_only));
+ RecordDependency(new (zone_) GlobalPropertyDependency(cell, type, read_only));
}
bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
if (cell.value().AsSmi() != Isolate::kProtectorValid) return false;
- dependencies_.push_front(new (zone_) ProtectorDependency(cell));
+ RecordDependency(new (zone_) ProtectorDependency(cell));
return true;
}
@@ -521,7 +507,7 @@ void CompilationDependencies::DependOnElementsKind(
? site.boilerplate().value().GetElementsKind()
: site.GetElementsKind();
if (AllocationSite::ShouldTrack(kind)) {
- dependencies_.push_front(new (zone_) ElementsKindDependency(site, kind));
+ RecordDependency(new (zone_) ElementsKindDependency(site, kind));
}
}
@@ -611,7 +597,7 @@ void CompilationDependencies::DependOnStablePrototypeChains(
}
}
template void CompilationDependencies::DependOnStablePrototypeChains(
- MapHandles const& receiver_maps, WhereToStart start,
+ ZoneVector<Handle<Map>> const& receiver_maps, WhereToStart start,
base::Optional<JSObjectRef> last_prototype);
template void CompilationDependencies::DependOnStablePrototypeChains(
ZoneHandleSet<Map> const& receiver_maps, WhereToStart start,
@@ -643,13 +629,43 @@ CompilationDependencies::DependOnInitialMapInstanceSizePrediction(
// Currently, we always install the prediction dependency. If this turns out
// to be too expensive, we can only install the dependency if slack
// tracking is active.
- dependencies_.push_front(
- new (zone_)
- InitialMapInstanceSizePredictionDependency(function, instance_size));
+ RecordDependency(new (zone_) InitialMapInstanceSizePredictionDependency(
+ function, instance_size));
DCHECK_LE(instance_size, function.initial_map().instance_size());
return SlackTrackingPrediction(initial_map, instance_size);
}
+CompilationDependencies::Dependency const*
+CompilationDependencies::TransitionDependencyOffTheRecord(
+ const MapRef& target_map) const {
+ if (target_map.CanBeDeprecated()) {
+ return new (zone_) TransitionDependency(target_map);
+ } else {
+ DCHECK(!target_map.is_deprecated());
+ return nullptr;
+ }
+}
+
+CompilationDependencies::Dependency const*
+CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
+ const MapRef& map, int descriptor) const {
+ MapRef owner = map.FindFieldOwner(descriptor);
+ PropertyDetails details = owner.GetPropertyDetails(descriptor);
+ DCHECK(details.representation().Equals(
+ map.GetPropertyDetails(descriptor).representation()));
+ return new (zone_) FieldRepresentationDependency(owner, descriptor,
+ details.representation());
+}
+
+CompilationDependencies::Dependency const*
+CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map,
+ int descriptor) const {
+ MapRef owner = map.FindFieldOwner(descriptor);
+ ObjectRef type = owner.GetFieldType(descriptor);
+ DCHECK(type.equals(map.GetFieldType(descriptor)));
+ return new (zone_) FieldTypeDependency(owner, descriptor, type);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index ca4ca409a9..37a2bc3a28 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_COMPILATION_DEPENDENCIES_H_
#include "src/compiler/js-heap-broker.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -107,16 +107,28 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
SlackTrackingPrediction DependOnInitialMapInstanceSizePrediction(
const JSFunctionRef& function);
+ // The methods below allow for gathering dependencies without actually
+ // recording them. They can be recorded at a later time (or they can be
+ // ignored). For example,
+ // DependOnTransition(map);
+ // is equivalent to:
+ // RecordDependency(TransitionDependencyOffTheRecord(map));
+ class Dependency;
+ void RecordDependency(Dependency const* dependency);
+ Dependency const* TransitionDependencyOffTheRecord(
+ const MapRef& target_map) const;
+ Dependency const* FieldRepresentationDependencyOffTheRecord(
+ const MapRef& map, int descriptor) const;
+ Dependency const* FieldTypeDependencyOffTheRecord(const MapRef& map,
+ int descriptor) const;
+
// Exposed only for testing purposes.
bool AreValid() const;
- // Exposed only because C++.
- class Dependency;
-
private:
Zone* const zone_;
JSHeapBroker* const broker_;
- ZoneForwardList<Dependency*> dependencies_;
+ ZoneForwardList<Dependency const*> dependencies_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/compiler-source-position-table.h b/deps/v8/src/compiler/compiler-source-position-table.h
index e0314f5556..c067c68c72 100644
--- a/deps/v8/src/compiler/compiler-source-position-table.h
+++ b/deps/v8/src/compiler/compiler-source-position-table.h
@@ -6,9 +6,9 @@
#define V8_COMPILER_COMPILER_SOURCE_POSITION_TABLE_H_
#include "src/base/compiler-specific.h"
+#include "src/codegen/source-position.h"
+#include "src/common/globals.h"
#include "src/compiler/node-aux-data.h"
-#include "src/globals.h"
-#include "src/source-position.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/constant-folding-reducer.cc b/deps/v8/src/compiler/constant-folding-reducer.cc
index c9f838f814..5a903273ed 100644
--- a/deps/v8/src/compiler/constant-folding-reducer.cc
+++ b/deps/v8/src/compiler/constant-folding-reducer.cc
@@ -5,7 +5,7 @@
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/js-graph.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/control-equivalence.h b/deps/v8/src/compiler/control-equivalence.h
index d49454d9c9..6ad7976077 100644
--- a/deps/v8/src/compiler/control-equivalence.h
+++ b/deps/v8/src/compiler/control-equivalence.h
@@ -6,9 +6,9 @@
#define V8_COMPILER_CONTROL_EQUIVALENCE_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index 577c40d96d..0a688a7c39 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
#define V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
+#include "src/common/globals.h"
#include "src/compiler/node-marker.h"
-#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 2251121c7f..f39e6cabfb 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -61,7 +61,7 @@ Reduction DeadCodeElimination::Reduce(Node* node) {
case IrOpcode::kPhi:
return ReducePhi(node);
case IrOpcode::kEffectPhi:
- return PropagateDeadControl(node);
+ return ReduceEffectPhi(node);
case IrOpcode::kDeoptimize:
case IrOpcode::kReturn:
case IrOpcode::kTerminate:
@@ -109,7 +109,6 @@ Reduction DeadCodeElimination::ReduceEnd(Node* node) {
return NoChange();
}
-
Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
DCHECK(IrOpcode::IsMergeOpcode(node->opcode()));
Node::Inputs inputs = node->inputs();
@@ -233,6 +232,34 @@ Reduction DeadCodeElimination::ReducePhi(Node* node) {
return NoChange();
}
+Reduction DeadCodeElimination::ReduceEffectPhi(Node* node) {
+ DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+ Reduction reduction = PropagateDeadControl(node);
+ if (reduction.Changed()) return reduction;
+
+ Node* merge = NodeProperties::GetControlInput(node);
+ DCHECK(merge->opcode() == IrOpcode::kMerge ||
+ merge->opcode() == IrOpcode::kLoop);
+ int input_count = node->op()->EffectInputCount();
+ for (int i = 0; i < input_count; ++i) {
+ Node* effect = NodeProperties::GetEffectInput(node, i);
+ if (effect->opcode() == IrOpcode::kUnreachable) {
+ // If Unreachable hits an effect phi, we can re-connect the effect chain
+ // to the graph end and delete the corresponding inputs from the merge and
+ // phi nodes.
+ Node* control = NodeProperties::GetControlInput(merge, i);
+ Node* throw_node = graph_->NewNode(common_->Throw(), effect, control);
+ NodeProperties::MergeControlToEnd(graph_, common_, throw_node);
+ NodeProperties::ReplaceEffectInput(node, dead_, i);
+ NodeProperties::ReplaceControlInput(merge, dead_, i);
+ Revisit(merge);
+ Revisit(graph_->end());
+ reduction = Changed(node);
+ }
+ }
+ return reduction;
+}
+
Reduction DeadCodeElimination::ReducePureNode(Node* node) {
DCHECK_EQ(0, node->op()->EffectInputCount());
if (node->opcode() == IrOpcode::kDeadValue) return NoChange();
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index 95b9179595..5f2ba329e2 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -6,9 +6,9 @@
#define V8_COMPILER_DEAD_CODE_ELIMINATION_H_
#include "src/base/compiler-specific.h"
+#include "src/codegen/machine-type.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
-#include "src/machine-type.h"
namespace v8 {
namespace internal {
@@ -53,6 +53,7 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
Reduction ReduceLoopExit(Node* node);
Reduction ReduceNode(Node* node);
Reduction ReducePhi(Node* node);
+ Reduction ReduceEffectPhi(Node* node);
Reduction ReducePureNode(Node* node);
Reduction ReduceUnreachableOrIfException(Node* node);
Reduction ReduceEffectNode(Node* node);
diff --git a/deps/v8/src/compiler/decompression-elimination.cc b/deps/v8/src/compiler/decompression-elimination.cc
new file mode 100644
index 0000000000..e69e61fac5
--- /dev/null
+++ b/deps/v8/src/compiler/decompression-elimination.cc
@@ -0,0 +1,219 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/decompression-elimination.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+DecompressionElimination::DecompressionElimination(
+ Editor* editor, Graph* graph, MachineOperatorBuilder* machine,
+ CommonOperatorBuilder* common)
+ : AdvancedReducer(editor),
+ graph_(graph),
+ machine_(machine),
+ common_(common) {}
+
+bool DecompressionElimination::IsReducibleConstantOpcode(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kInt64Constant:
+ return true;
+ // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
+ // exists, since it breaks with verify CSA on.
+ case IrOpcode::kHeapConstant:
+ default:
+ return false;
+ }
+}
+
+bool DecompressionElimination::IsValidDecompress(
+ IrOpcode::Value compressOpcode, IrOpcode::Value decompressOpcode) {
+ switch (compressOpcode) {
+ case IrOpcode::kChangeTaggedToCompressed:
+ return IrOpcode::IsDecompressOpcode(decompressOpcode);
+ case IrOpcode::kChangeTaggedSignedToCompressedSigned:
+ return decompressOpcode ==
+ IrOpcode::kChangeCompressedSignedToTaggedSigned ||
+ decompressOpcode == IrOpcode::kChangeCompressedToTagged;
+ case IrOpcode::kChangeTaggedPointerToCompressedPointer:
+ return decompressOpcode ==
+ IrOpcode::kChangeCompressedPointerToTaggedPointer ||
+ decompressOpcode == IrOpcode::kChangeCompressedToTagged;
+ default:
+ UNREACHABLE();
+ }
+}
+
+Node* DecompressionElimination::GetCompressedConstant(Node* constant) {
+ switch (constant->opcode()) {
+ case IrOpcode::kInt64Constant:
+ return graph()->NewNode(common()->Int32Constant(
+ static_cast<int32_t>(OpParameter<int64_t>(constant->op()))));
+ break;
+ case IrOpcode::kHeapConstant:
+ // TODO(v8:8977): The HeapConstant remains as 64 bits. This does not
+ // affect the comparison and it will still work correctly. However, we are
+ // introducing a 64 bit value in the stream where a 32 bit one will
+ // suffice. Currently there is no "CompressedHeapConstant", and
+ // introducing a new opcode and handling it correctly throught the
+ // pipeline seems that it will involve quite a bit of work.
+ return constant;
+ default:
+ UNREACHABLE();
+ }
+}
+
+Reduction DecompressionElimination::ReduceCompress(Node* node) {
+ DCHECK(IrOpcode::IsCompressOpcode(node->opcode()));
+
+ DCHECK_EQ(node->InputCount(), 1);
+ Node* input_node = node->InputAt(0);
+ IrOpcode::Value input_opcode = input_node->opcode();
+ if (IrOpcode::IsDecompressOpcode(input_opcode)) {
+ DCHECK(IsValidDecompress(node->opcode(), input_opcode));
+ DCHECK_EQ(input_node->InputCount(), 1);
+ return Replace(input_node->InputAt(0));
+ } else if (IsReducibleConstantOpcode(input_opcode)) {
+ return Replace(GetCompressedConstant(input_node));
+ } else {
+ return NoChange();
+ }
+}
+
+Reduction DecompressionElimination::ReducePhi(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kPhi);
+
+ const int value_input_count = node->op()->ValueInputCount();
+
+ // Check if all input are decompress nodes, and if all are the same.
+ bool same_decompresses = true;
+ IrOpcode::Value first_opcode = node->InputAt(0)->opcode();
+ for (int i = 0; i < value_input_count; ++i) {
+ Node* input = node->InputAt(i);
+ if (IrOpcode::IsDecompressOpcode(input->opcode())) {
+ same_decompresses &= first_opcode == input->opcode();
+ } else {
+ return NoChange();
+ }
+ }
+
+ // By now, we know that all inputs are decompress nodes. If all are the same,
+ // we can grab the first one to be used after the Phi. If we have different
+ // Decompress nodes as inputs, we need to use a conservative decompression
+ // after the Phi.
+ const Operator* op;
+ if (same_decompresses) {
+ op = node->InputAt(0)->op();
+ } else {
+ op = machine()->ChangeCompressedToTagged();
+ }
+
+ // Rewire phi's inputs to be the compressed inputs.
+ for (int i = 0; i < value_input_count; ++i) {
+ Node* input = node->InputAt(i);
+ DCHECK_EQ(input->InputCount(), 1);
+ node->ReplaceInput(i, input->InputAt(0));
+ }
+
+ // Update the MachineRepresentation on the Phi.
+ MachineRepresentation rep;
+ switch (op->opcode()) {
+ case IrOpcode::kChangeCompressedToTagged:
+ rep = MachineRepresentation::kCompressed;
+ break;
+ case IrOpcode::kChangeCompressedSignedToTaggedSigned:
+ rep = MachineRepresentation::kCompressedSigned;
+ break;
+ case IrOpcode::kChangeCompressedPointerToTaggedPointer:
+ rep = MachineRepresentation::kCompressedPointer;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ NodeProperties::ChangeOp(node, common()->Phi(rep, value_input_count));
+
+ // Add a decompress after the Phi. To do this, we need to replace the Phi with
+ // "Phi <- Decompress".
+ return Replace(graph()->NewNode(op, node));
+}
+
+Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kTypedStateValues);
+
+ bool any_change = false;
+ for (int i = 0; i < node->InputCount(); ++i) {
+ Node* input = node->InputAt(i);
+ if (IrOpcode::IsDecompressOpcode(input->opcode())) {
+ DCHECK_EQ(input->InputCount(), 1);
+ node->ReplaceInput(i, input->InputAt(0));
+ any_change = true;
+ }
+ }
+ return any_change ? Changed(node) : NoChange();
+}
+
+Reduction DecompressionElimination::ReduceWord64Equal(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kWord64Equal);
+
+ DCHECK_EQ(node->InputCount(), 2);
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ bool lhs_is_decompress = IrOpcode::IsDecompressOpcode(lhs->opcode());
+ bool rhs_is_decompress = IrOpcode::IsDecompressOpcode(rhs->opcode());
+
+ // Case where both of its inputs are Decompress nodes.
+ if (lhs_is_decompress && rhs_is_decompress) {
+ DCHECK_EQ(lhs->InputCount(), 1);
+ node->ReplaceInput(0, lhs->InputAt(0));
+ DCHECK_EQ(rhs->InputCount(), 1);
+ node->ReplaceInput(1, rhs->InputAt(0));
+ NodeProperties::ChangeOp(node, machine()->Word32Equal());
+ return Changed(node);
+ }
+
+ bool lhs_is_constant = IsReducibleConstantOpcode(lhs->opcode());
+ bool rhs_is_constant = IsReducibleConstantOpcode(rhs->opcode());
+
+ // Case where one input is a Decompress node and the other a constant.
+ if ((lhs_is_decompress && rhs_is_constant) ||
+ (lhs_is_constant && rhs_is_decompress)) {
+ node->ReplaceInput(
+ 0, lhs_is_decompress ? lhs->InputAt(0) : GetCompressedConstant(lhs));
+ node->ReplaceInput(
+ 1, lhs_is_decompress ? GetCompressedConstant(rhs) : rhs->InputAt(0));
+ NodeProperties::ChangeOp(node, machine()->Word32Equal());
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+Reduction DecompressionElimination::Reduce(Node* node) {
+ DisallowHeapAccess no_heap_access;
+
+ switch (node->opcode()) {
+ case IrOpcode::kChangeTaggedToCompressed:
+ case IrOpcode::kChangeTaggedSignedToCompressedSigned:
+ case IrOpcode::kChangeTaggedPointerToCompressedPointer:
+ return ReduceCompress(node);
+ case IrOpcode::kPhi:
+ return ReducePhi(node);
+ case IrOpcode::kTypedStateValues:
+ return ReduceTypedStateValues(node);
+ case IrOpcode::kWord64Equal:
+ return ReduceWord64Equal(node);
+ default:
+ break;
+ }
+
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/decompression-elimination.h b/deps/v8/src/compiler/decompression-elimination.h
new file mode 100644
index 0000000000..c850f064a9
--- /dev/null
+++ b/deps/v8/src/compiler/decompression-elimination.h
@@ -0,0 +1,78 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DECOMPRESSION_ELIMINATION_H_
+#define V8_COMPILER_DECOMPRESSION_ELIMINATION_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Performs elimination of redundant decompressions within the graph.
+class V8_EXPORT_PRIVATE DecompressionElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ explicit DecompressionElimination(Editor* editor, Graph* graph,
+ MachineOperatorBuilder* machine,
+ CommonOperatorBuilder* common);
+ ~DecompressionElimination() final = default;
+
+ const char* reducer_name() const override {
+ return "DecompressionElimination";
+ }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ // Returns true if the decompress opcode is valid for the compressed one.
+ bool IsValidDecompress(IrOpcode::Value compressOpcode,
+ IrOpcode::Value decompressOpcode);
+
+ // Returns true if the constant opcode is a reducible one in decompression
+ // elimination.
+ bool IsReducibleConstantOpcode(IrOpcode::Value opcode);
+
+ // Get the new 32 bit node constant given the 64 bit one
+ Node* GetCompressedConstant(Node* constant);
+
+ // Removes direct Decompressions & Compressions, going from
+ // Parent <- Decompression <- Compression <- Child
+ // to
+ // Parent <- Child
+ // Can be used for Any, Signed, and Pointer compressions.
+ Reduction ReduceCompress(Node* node);
+
+ // Replaces Phi's input decompressions with their input node, if and only if
+ // all of the Phi's inputs are Decompress nodes.
+ Reduction ReducePhi(Node* node);
+
+ // Replaces TypedStateValues's input decompressions with their input node.
+ Reduction ReduceTypedStateValues(Node* node);
+
+ // Replaces a Word64Equal with a Word32Equal if both of its inputs are
+ // Decompress nodes, or if one is a Decompress node and the other a constant.
+ // In the case of two decompresses, it uses the original inputs before they
+ // are decompressed. In the case of having a constant, it uses the compressed
+ // value of that constant.
+ Reduction ReduceWord64Equal(Node* node);
+
+ Graph* graph() const { return graph_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ CommonOperatorBuilder* common() const { return common_; }
+
+ Graph* const graph_;
+ MachineOperatorBuilder* const machine_;
+ CommonOperatorBuilder* const common_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_DECOMPRESSION_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 1beafc7926..ced078a178 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -4,9 +4,12 @@
#include "src/compiler/effect-control-linearizer.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/machine-type.h"
+#include "src/common/ptr-compr-inl.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/graph-assembler.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -15,47 +18,243 @@
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
#include "src/heap/factory-inl.h"
-#include "src/machine-type.h"
#include "src/objects/heap-number.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
-#include "src/ptr-compr-inl.h"
-
namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-bool UsingCompressedPointers() { return false; }
-
-} // namespace
-
-EffectControlLinearizer::EffectControlLinearizer(
- JSGraph* js_graph, Schedule* schedule, Zone* temp_zone,
- SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index,
- std::vector<Handle<Map>>* embedded_maps)
- : js_graph_(js_graph),
- schedule_(schedule),
- temp_zone_(temp_zone),
- mask_array_index_(mask_array_index),
- source_positions_(source_positions),
- node_origins_(node_origins),
- graph_assembler_(js_graph, nullptr, nullptr, temp_zone),
- frame_state_zapper_(nullptr),
- embedded_maps_(embedded_maps) {}
+class EffectControlLinearizer {
+ public:
+ EffectControlLinearizer(JSGraph* js_graph, Schedule* schedule,
+ Zone* temp_zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins,
+ MaskArrayIndexEnable mask_array_index)
+ : js_graph_(js_graph),
+ schedule_(schedule),
+ temp_zone_(temp_zone),
+ mask_array_index_(mask_array_index),
+ source_positions_(source_positions),
+ node_origins_(node_origins),
+ graph_assembler_(js_graph, nullptr, nullptr, temp_zone),
+ frame_state_zapper_(nullptr) {}
+
+ void Run();
-Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
-CommonOperatorBuilder* EffectControlLinearizer::common() const {
- return js_graph_->common();
-}
-SimplifiedOperatorBuilder* EffectControlLinearizer::simplified() const {
- return js_graph_->simplified();
-}
-MachineOperatorBuilder* EffectControlLinearizer::machine() const {
- return js_graph_->machine();
-}
+ private:
+ void ProcessNode(Node* node, Node** frame_state, Node** effect,
+ Node** control);
+
+ bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
+ Node** control);
+ Node* LowerChangeBitToTagged(Node* node);
+ Node* LowerChangeInt31ToTaggedSigned(Node* node);
+ Node* LowerChangeInt32ToTagged(Node* node);
+ Node* LowerChangeInt64ToTagged(Node* node);
+ Node* LowerChangeUint32ToTagged(Node* node);
+ Node* LowerChangeUint64ToTagged(Node* node);
+ Node* LowerChangeFloat64ToTagged(Node* node);
+ Node* LowerChangeFloat64ToTaggedPointer(Node* node);
+ Node* LowerChangeTaggedSignedToInt32(Node* node);
+ Node* LowerChangeTaggedSignedToInt64(Node* node);
+ Node* LowerChangeTaggedToBit(Node* node);
+ Node* LowerChangeTaggedToInt32(Node* node);
+ Node* LowerChangeTaggedToUint32(Node* node);
+ Node* LowerChangeTaggedToInt64(Node* node);
+ Node* LowerChangeTaggedToTaggedSigned(Node* node);
+ Node* LowerChangeCompressedToTaggedSigned(Node* node);
+ Node* LowerChangeTaggedToCompressedSigned(Node* node);
+ Node* LowerPoisonIndex(Node* node);
+ Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
+ void LowerCheckMaps(Node* node, Node* frame_state);
+ Node* LowerCompareMaps(Node* node);
+ Node* LowerCheckNumber(Node* node, Node* frame_state);
+ Node* LowerCheckReceiver(Node* node, Node* frame_state);
+ Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
+ Node* LowerCheckString(Node* node, Node* frame_state);
+ Node* LowerCheckSymbol(Node* node, Node* frame_state);
+ void LowerCheckIf(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Mod(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32Bounds(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedUint64Bounds(Node* node, Node* frame_state);
+ Node* LowerCheckedUint64ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedUint64ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedFloat64ToInt64(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToInt64(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
+ Node* LowerCheckedCompressedToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedCompressedToTaggedPointer(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToCompressedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToCompressedPointer(Node* node, Node* frame_state);
+ Node* LowerChangeTaggedToFloat64(Node* node);
+ void TruncateTaggedPointerToBit(Node* node, GraphAssemblerLabel<1>* done);
+ Node* LowerTruncateTaggedToBit(Node* node);
+ Node* LowerTruncateTaggedPointerToBit(Node* node);
+ Node* LowerTruncateTaggedToFloat64(Node* node);
+ Node* LowerTruncateTaggedToWord32(Node* node);
+ Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
+ Node* LowerAllocate(Node* node);
+ Node* LowerNumberToString(Node* node);
+ Node* LowerObjectIsArrayBufferView(Node* node);
+ Node* LowerObjectIsBigInt(Node* node);
+ Node* LowerObjectIsCallable(Node* node);
+ Node* LowerObjectIsConstructor(Node* node);
+ Node* LowerObjectIsDetectableCallable(Node* node);
+ Node* LowerObjectIsMinusZero(Node* node);
+ Node* LowerNumberIsMinusZero(Node* node);
+ Node* LowerObjectIsNaN(Node* node);
+ Node* LowerNumberIsNaN(Node* node);
+ Node* LowerObjectIsNonCallable(Node* node);
+ Node* LowerObjectIsNumber(Node* node);
+ Node* LowerObjectIsReceiver(Node* node);
+ Node* LowerObjectIsSmi(Node* node);
+ Node* LowerObjectIsString(Node* node);
+ Node* LowerObjectIsSymbol(Node* node);
+ Node* LowerObjectIsUndetectable(Node* node);
+ Node* LowerNumberIsFloat64Hole(Node* node);
+ Node* LowerNumberIsFinite(Node* node);
+ Node* LowerObjectIsFiniteNumber(Node* node);
+ Node* LowerNumberIsInteger(Node* node);
+ Node* LowerObjectIsInteger(Node* node);
+ Node* LowerNumberIsSafeInteger(Node* node);
+ Node* LowerObjectIsSafeInteger(Node* node);
+ Node* LowerArgumentsFrame(Node* node);
+ Node* LowerArgumentsLength(Node* node);
+ Node* LowerNewDoubleElements(Node* node);
+ Node* LowerNewSmiOrObjectElements(Node* node);
+ Node* LowerNewArgumentsElements(Node* node);
+ Node* LowerNewConsString(Node* node);
+ Node* LowerSameValue(Node* node);
+ Node* LowerSameValueNumbersOnly(Node* node);
+ Node* LowerNumberSameValue(Node* node);
+ Node* LowerDeadValue(Node* node);
+ Node* LowerStringConcat(Node* node);
+ Node* LowerStringToNumber(Node* node);
+ Node* LowerStringCharCodeAt(Node* node);
+ Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding);
+ Node* LowerStringToLowerCaseIntl(Node* node);
+ Node* LowerStringToUpperCaseIntl(Node* node);
+ Node* LowerStringFromSingleCharCode(Node* node);
+ Node* LowerStringFromSingleCodePoint(Node* node);
+ Node* LowerStringIndexOf(Node* node);
+ Node* LowerStringSubstring(Node* node);
+ Node* LowerStringLength(Node* node);
+ Node* LowerStringEqual(Node* node);
+ Node* LowerStringLessThan(Node* node);
+ Node* LowerStringLessThanOrEqual(Node* node);
+ Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
+ Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
+ Node* LowerConvertTaggedHoleToUndefined(Node* node);
+ void LowerCheckEqualsInternalizedString(Node* node, Node* frame_state);
+ void LowerCheckEqualsSymbol(Node* node, Node* frame_state);
+ Node* LowerTypeOf(Node* node);
+ Node* LowerToBoolean(Node* node);
+ Node* LowerPlainPrimitiveToNumber(Node* node);
+ Node* LowerPlainPrimitiveToWord32(Node* node);
+ Node* LowerPlainPrimitiveToFloat64(Node* node);
+ Node* LowerEnsureWritableFastElements(Node* node);
+ Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
+ void LowerTransitionElementsKind(Node* node);
+ Node* LowerLoadFieldByIndex(Node* node);
+ Node* LowerLoadTypedElement(Node* node);
+ Node* LowerLoadDataViewElement(Node* node);
+ void LowerStoreTypedElement(Node* node);
+ void LowerStoreDataViewElement(Node* node);
+ void LowerStoreSignedSmallElement(Node* node);
+ Node* LowerFindOrderedHashMapEntry(Node* node);
+ Node* LowerFindOrderedHashMapEntryForInt32Key(Node* node);
+ void LowerTransitionAndStoreElement(Node* node);
+ void LowerTransitionAndStoreNumberElement(Node* node);
+ void LowerTransitionAndStoreNonNumberElement(Node* node);
+ void LowerRuntimeAbort(Node* node);
+ Node* LowerConvertReceiver(Node* node);
+ Node* LowerDateNow(Node* node);
+
+ // Lowering of optional operators.
+ Maybe<Node*> LowerFloat64RoundUp(Node* node);
+ Maybe<Node*> LowerFloat64RoundDown(Node* node);
+ Maybe<Node*> LowerFloat64RoundTiesEven(Node* node);
+ Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
+
+ Node* AllocateHeapNumberWithValue(Node* node);
+ Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
+ const VectorSlotPair& feedback, Node* value,
+ Node* frame_state);
+ Node* BuildCheckedFloat64ToInt64(CheckForMinusZeroMode mode,
+ const VectorSlotPair& feedback, Node* value,
+ Node* frame_state);
+ Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
+ const VectorSlotPair& feedback,
+ Node* value,
+ Node* frame_state);
+ Node* BuildReverseBytes(ExternalArrayType type, Node* value);
+ Node* BuildFloat64RoundDown(Node* value);
+ Node* BuildFloat64RoundTruncate(Node* input);
+ Node* BuildUint32Mod(Node* lhs, Node* rhs);
+ Node* ComputeUnseededHash(Node* value);
+ Node* LowerStringComparison(Callable const& callable, Node* node);
+ Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
+
+ Node* ChangeInt32ToSmi(Node* value);
+ Node* ChangeInt32ToIntPtr(Node* value);
+ Node* ChangeInt64ToSmi(Node* value);
+ Node* ChangeIntPtrToInt32(Node* value);
+ Node* ChangeIntPtrToSmi(Node* value);
+ Node* ChangeUint32ToUintPtr(Node* value);
+ Node* ChangeUint32ToSmi(Node* value);
+ Node* ChangeSmiToIntPtr(Node* value);
+ Node* ChangeSmiToInt32(Node* value);
+ Node* ChangeSmiToInt64(Node* value);
+ Node* ObjectIsSmi(Node* value);
+ Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
+
+ Node* SmiMaxValueConstant();
+ Node* SmiShiftBitsConstant();
+ void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
+ ElementsKind to);
+
+ Factory* factory() const { return isolate()->factory(); }
+ Isolate* isolate() const { return jsgraph()->isolate(); }
+ JSGraph* jsgraph() const { return js_graph_; }
+ Graph* graph() const { return js_graph_->graph(); }
+ Schedule* schedule() const { return schedule_; }
+ Zone* temp_zone() const { return temp_zone_; }
+ CommonOperatorBuilder* common() const { return js_graph_->common(); }
+ SimplifiedOperatorBuilder* simplified() const {
+ return js_graph_->simplified();
+ }
+ MachineOperatorBuilder* machine() const { return js_graph_->machine(); }
+ GraphAssembler* gasm() { return &graph_assembler_; }
+
+ JSGraph* js_graph_;
+ Schedule* schedule_;
+ Zone* temp_zone_;
+ MaskArrayIndexEnable mask_array_index_;
+ RegionObservability region_observability_ = RegionObservability::kObservable;
+ SourcePositionTable* source_positions_;
+ NodeOriginTable* node_origins_;
+ GraphAssembler graph_assembler_;
+ Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
+};
namespace {
@@ -890,6 +1089,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kSameValue:
result = LowerSameValue(node);
break;
+ case IrOpcode::kSameValueNumbersOnly:
+ result = LowerSameValueNumbersOnly(node);
+ break;
case IrOpcode::kNumberSameValue:
result = LowerNumberSameValue(node);
break;
@@ -995,12 +1197,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTransitionElementsKind:
LowerTransitionElementsKind(node);
break;
- case IrOpcode::kLoadMessage:
- result = LowerLoadMessage(node);
- break;
- case IrOpcode::kStoreMessage:
- LowerStoreMessage(node);
- break;
case IrOpcode::kLoadFieldByIndex:
result = LowerLoadFieldByIndex(node);
break;
@@ -1010,9 +1206,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kLoadDataViewElement:
result = LowerLoadDataViewElement(node);
break;
- case IrOpcode::kLoadStackArgument:
- result = LowerLoadStackArgument(node);
- break;
case IrOpcode::kStoreTypedElement:
LowerStoreTypedElement(node);
break;
@@ -1523,7 +1716,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
Node* EffectControlLinearizer::LowerPoisonIndex(Node* node) {
Node* index = node->InputAt(0);
- if (mask_array_index_ == kMaskArrayIndex) {
+ if (mask_array_index_ == MaskArrayIndexEnable::kMaskArrayIndex) {
index = __ Word32PoisonOnSpeculation(index);
}
return index;
@@ -1605,26 +1798,11 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
auto done = __ MakeLabel();
// Load the current map of the {value}.
- Node* value_map =
- UsingCompressedPointers()
- ? __ LoadField(AccessBuilder::ForCompressedMap(), value)
- : __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
for (size_t i = 0; i < map_count; ++i) {
- Node* check;
-
- if (UsingCompressedPointers()) {
- // We need the dereference scope to embed the map pointer value as an
- // int32. We don't visit the pointer.
- AllowHandleDereference allow_map_dereference;
- int32_t int32Map = static_cast<int32_t>(CompressTagged(maps[i]->ptr()));
- Node* map = __ Int32Constant(int32Map);
- check = __ Word32Equal(value_map, map);
- this->embedded_maps()->push_back(maps[i]);
- } else {
- Node* map = __ HeapConstant(maps[i]);
- check = __ WordEqual(value_map, map);
- }
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
@@ -1648,25 +1826,11 @@ Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
auto done = __ MakeLabel(MachineRepresentation::kBit);
// Load the current map of the {value}.
- Node* value_map = UsingCompressedPointers()
- ? __ LoadField(AccessBuilder::ForCompressedMap(), value)
- : __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
for (size_t i = 0; i < map_count; ++i) {
- Node* check;
-
- if (UsingCompressedPointers()) {
- // We need the dereference scope to embed the map pointer value as an
- // int32. We don't visit the pointer.
- AllowHandleDereference allow_map_dereference;
- int32_t int32Map = static_cast<int32_t>(CompressTagged(maps[i]->ptr()));
- Node* map = __ Int32Constant(int32Map);
- check = __ Word32Equal(value_map, map);
- this->embedded_maps()->push_back(maps[i]);
- } else {
- Node* map = __ HeapConstant(maps[i]);
- check = __ WordEqual(value_map, map);
- }
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ WordEqual(value_map, map);
auto next_map = __ MakeLabel();
auto passed = __ MakeLabel();
@@ -3124,7 +3288,7 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
__ Bind(&if_adaptor_frame);
Node* arguments_length = __ Load(
- MachineType::TaggedSigned(), arguments_frame,
+ MachineType::TypeCompressedTaggedSigned(), arguments_frame,
__ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset));
Node* rest_length =
@@ -3149,7 +3313,7 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
__ Bind(&if_adaptor_frame);
Node* arguments_length = __ Load(
- MachineType::TaggedSigned(), arguments_frame,
+ MachineType::TypeCompressedTaggedSigned(), arguments_frame,
__ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset));
__ Goto(&done, arguments_length);
@@ -3166,8 +3330,9 @@ Node* EffectControlLinearizer::LowerArgumentsFrame(Node* node) {
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
Node* parent_frame_type = __ Load(
- MachineType::AnyTagged(), parent_frame,
+ MachineType::TypeCompressedTagged(), parent_frame,
__ IntPtrConstant(CommonFrameConstants::kContextOrFrameTypeOffset));
+
__ GotoIf(__ WordEqual(parent_frame_type,
__ IntPtrConstant(StackFrame::TypeToMarker(
StackFrame::ARGUMENTS_ADAPTOR))),
@@ -3257,9 +3422,9 @@ Node* EffectControlLinearizer::LowerNewSmiOrObjectElements(Node* node) {
__ GotoIfNot(check, &done, result);
// Storing "the_hole" doesn't need a write barrier.
- ElementAccess const access = {kTaggedBase, FixedArray::kHeaderSize,
- Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier};
+ ElementAccess const access = {
+ kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ MachineType::TypeCompressedTagged(), kNoWriteBarrier};
__ StoreElement(access, result, index, the_hole);
// Advance the {index}.
@@ -3349,6 +3514,21 @@ Node* EffectControlLinearizer::LowerSameValue(Node* node) {
__ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerSameValueNumbersOnly(Node* node) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kSameValueNumbersOnly);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), lhs, rhs,
+ __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerNumberSameValue(Node* node) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
@@ -3420,27 +3600,38 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
receiver_instance_type, __ Int32Constant(kStringRepresentationMask));
// Dispatch on the current {receiver}s string representation.
+ auto if_lessthanoreq_cons = __ MakeLabel();
+ auto if_greaterthan_cons = __ MakeLabel();
auto if_seqstring = __ MakeLabel();
auto if_consstring = __ MakeLabel();
auto if_thinstring = __ MakeLabel();
auto if_externalstring = __ MakeLabel();
auto if_slicedstring = __ MakeLabel();
auto if_runtime = __ MakeDeferredLabel();
- __ GotoIf(__ Word32Equal(receiver_representation,
- __ Int32Constant(kSeqStringTag)),
- &if_seqstring);
- __ GotoIf(__ Word32Equal(receiver_representation,
- __ Int32Constant(kConsStringTag)),
- &if_consstring);
- __ GotoIf(__ Word32Equal(receiver_representation,
- __ Int32Constant(kThinStringTag)),
- &if_thinstring);
- __ GotoIf(__ Word32Equal(receiver_representation,
- __ Int32Constant(kExternalStringTag)),
- &if_externalstring);
- __ Branch(__ Word32Equal(receiver_representation,
- __ Int32Constant(kSlicedStringTag)),
- &if_slicedstring, &if_runtime);
+
+ __ Branch(__ Int32LessThanOrEqual(receiver_representation,
+ __ Int32Constant(kConsStringTag)),
+ &if_lessthanoreq_cons, &if_greaterthan_cons);
+
+ __ Bind(&if_lessthanoreq_cons);
+ {
+ __ Branch(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kConsStringTag)),
+ &if_consstring, &if_seqstring);
+ }
+
+ __ Bind(&if_greaterthan_cons);
+ {
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kThinStringTag)),
+ &if_thinstring);
+ __ GotoIf(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kExternalStringTag)),
+ &if_externalstring);
+ __ Branch(__ Word32Equal(receiver_representation,
+ __ Int32Constant(kSlicedStringTag)),
+ &if_slicedstring, &if_runtime);
+ }
__ Bind(&if_seqstring);
{
@@ -3453,13 +3644,6 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ Goto(&loop_done, result);
}
- __ Bind(&if_thinstring);
- {
- Node* receiver_actual =
- __ LoadField(AccessBuilder::ForThinStringActual(), receiver);
- __ Goto(&loop_next, receiver_actual, position);
- }
-
__ Bind(&if_consstring);
{
Node* receiver_second =
@@ -3471,6 +3655,13 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ Goto(&loop_next, receiver_first, position);
}
+ __ Bind(&if_thinstring);
+ {
+ Node* receiver_actual =
+ __ LoadField(AccessBuilder::ForThinStringActual(), receiver);
+ __ Goto(&loop_next, receiver_actual, position);
+ }
+
__ Bind(&if_externalstring);
{
// We need to bailout to the runtime for uncached external strings.
@@ -4311,20 +4502,6 @@ void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
__ Bind(&done);
}
-Node* EffectControlLinearizer::LowerLoadMessage(Node* node) {
- Node* offset = node->InputAt(0);
- Node* object_pattern =
- __ LoadField(AccessBuilder::ForExternalIntPtr(), offset);
- return __ BitcastWordToTagged(object_pattern);
-}
-
-void EffectControlLinearizer::LowerStoreMessage(Node* node) {
- Node* offset = node->InputAt(0);
- Node* object = node->InputAt(1);
- Node* object_pattern = __ BitcastTaggedToWord(object);
- __ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
-}
-
Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -4354,7 +4531,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* offset =
__ IntAdd(__ WordShl(index, __ IntPtrConstant(kTaggedSizeLog2 - 1)),
__ IntPtrConstant(JSObject::kHeaderSize - kHeapObjectTag));
- Node* result = __ Load(MachineType::AnyTagged(), object, offset);
+ Node* result =
+ __ Load(MachineType::TypeCompressedTagged(), object, offset);
__ Goto(&done, result);
}
@@ -4369,7 +4547,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
__ IntPtrConstant(kTaggedSizeLog2 - 1)),
__ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
kHeapObjectTag));
- Node* result = __ Load(MachineType::AnyTagged(), properties, offset);
+ Node* result =
+ __ Load(MachineType::TypeCompressedTagged(), properties, offset);
__ Goto(&done, result);
}
}
@@ -4395,7 +4574,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
Node* result = __ Load(MachineType::Float64(), object, offset);
__ Goto(&done_double, result);
} else {
- Node* result = __ Load(MachineType::AnyTagged(), object, offset);
+ Node* result =
+ __ Load(MachineType::TypeCompressedTagged(), object, offset);
result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result);
__ Goto(&done_double, result);
}
@@ -4410,7 +4590,8 @@ Node* EffectControlLinearizer::LowerLoadFieldByIndex(Node* node) {
__ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant((FixedArray::kHeaderSize - kTaggedSize) -
kHeapObjectTag));
- Node* result = __ Load(MachineType::AnyTagged(), properties, offset);
+ Node* result =
+ __ Load(MachineType::TypeCompressedTagged(), properties, offset);
result = __ LoadField(AccessBuilder::ForHeapNumberValue(), result);
__ Goto(&done_double, result);
}
@@ -4481,23 +4662,20 @@ Node* EffectControlLinearizer::BuildReverseBytes(ExternalArrayType type,
Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
- Node* buffer = node->InputAt(0);
+ Node* object = node->InputAt(0);
Node* storage = node->InputAt(1);
- Node* byte_offset = node->InputAt(2);
- Node* index = node->InputAt(3);
- Node* is_little_endian = node->InputAt(4);
-
- // We need to keep the {buffer} alive so that the GC will not release the
- // ArrayBuffer (if there's any) as long as we are still operating on it.
- __ Retain(buffer);
+ Node* index = node->InputAt(2);
+ Node* is_little_endian = node->InputAt(3);
- // Compute the effective offset.
- Node* offset = __ IntAdd(byte_offset, index);
+ // We need to keep the {object} (either the JSArrayBuffer or the JSDataView)
+ // alive so that the GC will not release the JSArrayBuffer (if there's any)
+ // as long as we are still operating on it.
+ __ Retain(object);
MachineType const machine_type =
AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
- Node* value = __ LoadUnaligned(machine_type, storage, offset);
+ Node* value = __ LoadUnaligned(machine_type, storage, index);
auto big_endian = __ MakeLabel();
auto done = __ MakeLabel(machine_type.representation());
@@ -4524,31 +4702,18 @@ Node* EffectControlLinearizer::LowerLoadDataViewElement(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerLoadStackArgument(Node* node) {
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
-
- Node* argument =
- __ LoadElement(AccessBuilder::ForStackArgument(), base, index);
-
- return __ BitcastWordToTagged(argument);
-}
-
void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
ExternalArrayType element_type = ExternalArrayTypeOf(node->op());
- Node* buffer = node->InputAt(0);
+ Node* object = node->InputAt(0);
Node* storage = node->InputAt(1);
- Node* byte_offset = node->InputAt(2);
- Node* index = node->InputAt(3);
- Node* value = node->InputAt(4);
- Node* is_little_endian = node->InputAt(5);
-
- // We need to keep the {buffer} alive so that the GC will not release the
- // ArrayBuffer (if there's any) as long as we are still operating on it.
- __ Retain(buffer);
+ Node* index = node->InputAt(2);
+ Node* value = node->InputAt(3);
+ Node* is_little_endian = node->InputAt(4);
- // Compute the effective offset.
- Node* offset = __ IntAdd(byte_offset, index);
+ // We need to keep the {object} (either the JSArrayBuffer or the JSDataView)
+ // alive so that the GC will not release the JSArrayBuffer (if there's any)
+ // as long as we are still operating on it.
+ __ Retain(object);
MachineType const machine_type =
AccessBuilder::ForTypedArrayElement(element_type, true).machine_type;
@@ -4575,7 +4740,7 @@ void EffectControlLinearizer::LowerStoreDataViewElement(Node* node) {
}
__ Bind(&done);
- __ StoreUnaligned(machine_type.representation(), storage, offset,
+ __ StoreUnaligned(machine_type.representation(), storage, index,
done.PhiAt(0));
}
@@ -4968,7 +5133,7 @@ void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
// the ElementAccess information.
ElementAccess access = AccessBuilder::ForFixedArrayElement();
access.type = Type::SignedSmall();
- access.machine_type = MachineType::TaggedSigned();
+ access.machine_type = MachineType::TypeCompressedTaggedSigned();
access.write_barrier_kind = kNoWriteBarrier;
Node* smi_value = ChangeInt32ToSmi(value);
__ StoreElement(access, elements, index, smi_value);
@@ -5458,7 +5623,7 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
AccessBuilder::ForOrderedHashMapOrSetNumberOfBuckets(), table));
hash = __ WordAnd(hash, __ IntSub(number_of_buckets, __ IntPtrConstant(1)));
Node* first_entry = ChangeSmiToIntPtr(__ Load(
- MachineType::TaggedSigned(), table,
+ MachineType::TypeCompressedTaggedSigned(), table,
__ IntAdd(__ WordShl(hash, __ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant(OrderedHashMap::HashTableStartOffset() -
kHeapObjectTag))));
@@ -5477,7 +5642,7 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
number_of_buckets);
Node* candidate_key = __ Load(
- MachineType::AnyTagged(), table,
+ MachineType::TypeCompressedTagged(), table,
__ IntAdd(__ WordShl(entry, __ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant(OrderedHashMap::HashTableStartOffset() -
kHeapObjectTag)));
@@ -5505,7 +5670,7 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntryForInt32Key(
__ Bind(&if_notmatch);
{
Node* next_entry = ChangeSmiToIntPtr(__ Load(
- MachineType::TaggedSigned(), table,
+ MachineType::TypeCompressedTaggedSigned(), table,
__ IntAdd(
__ WordShl(entry, __ IntPtrConstant(kTaggedSizeLog2)),
__ IntPtrConstant(OrderedHashMap::HashTableStartOffset() +
@@ -5531,12 +5696,14 @@ Node* EffectControlLinearizer::LowerDateNow(Node* node) {
#undef __
-Factory* EffectControlLinearizer::factory() const {
- return isolate()->factory();
-}
-
-Isolate* EffectControlLinearizer::isolate() const {
- return jsgraph()->isolate();
+void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins,
+ MaskArrayIndexEnable mask_array_index) {
+ EffectControlLinearizer linearizer(graph, schedule, temp_zone,
+ source_positions, node_origins,
+ mask_array_index);
+ linearizer.Run();
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 70970b3d79..1942b870d5 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -5,259 +5,29 @@
#ifndef V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
#define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-assembler.h"
-#include "src/compiler/node.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/globals.h"
+#include <vector>
+
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
-// Forward declarations.
-class Callable;
+class Map;
class Zone;
namespace compiler {
-class CommonOperatorBuilder;
-class SimplifiedOperatorBuilder;
-class MachineOperatorBuilder;
class JSGraph;
-class Graph;
+class NodeOriginTable;
class Schedule;
class SourcePositionTable;
-class NodeOriginTable;
-
-class V8_EXPORT_PRIVATE EffectControlLinearizer {
- public:
- enum MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
-
- EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
- SourcePositionTable* source_positions,
- NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index,
- std::vector<Handle<Map>>* embedded_maps);
-
- void Run();
-
- private:
- void ProcessNode(Node* node, Node** frame_state, Node** effect,
- Node** control);
-
- bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
- Node** control);
- Node* LowerChangeBitToTagged(Node* node);
- Node* LowerChangeInt31ToTaggedSigned(Node* node);
- Node* LowerChangeInt32ToTagged(Node* node);
- Node* LowerChangeInt64ToTagged(Node* node);
- Node* LowerChangeUint32ToTagged(Node* node);
- Node* LowerChangeUint64ToTagged(Node* node);
- Node* LowerChangeFloat64ToTagged(Node* node);
- Node* LowerChangeFloat64ToTaggedPointer(Node* node);
- Node* LowerChangeTaggedSignedToInt32(Node* node);
- Node* LowerChangeTaggedSignedToInt64(Node* node);
- Node* LowerChangeTaggedToBit(Node* node);
- Node* LowerChangeTaggedToInt32(Node* node);
- Node* LowerChangeTaggedToUint32(Node* node);
- Node* LowerChangeTaggedToInt64(Node* node);
- Node* LowerChangeTaggedToTaggedSigned(Node* node);
- Node* LowerChangeCompressedToTaggedSigned(Node* node);
- Node* LowerChangeTaggedToCompressedSigned(Node* node);
- Node* LowerPoisonIndex(Node* node);
- Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
- void LowerCheckMaps(Node* node, Node* frame_state);
- Node* LowerCompareMaps(Node* node);
- Node* LowerCheckNumber(Node* node, Node* frame_state);
- Node* LowerCheckReceiver(Node* node, Node* frame_state);
- Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
- Node* LowerCheckString(Node* node, Node* frame_state);
- Node* LowerCheckSymbol(Node* node, Node* frame_state);
- void LowerCheckIf(Node* node, Node* frame_state);
- Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
- Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
- Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
- Node* LowerCheckedInt32Mod(Node* node, Node* frame_state);
- Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
- Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
- Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
- Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
- Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state);
- Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state);
- Node* LowerCheckedUint32Bounds(Node* node, Node* frame_state);
- Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
- Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
- Node* LowerCheckedUint64Bounds(Node* node, Node* frame_state);
- Node* LowerCheckedUint64ToInt32(Node* node, Node* frame_state);
- Node* LowerCheckedUint64ToTaggedSigned(Node* node, Node* frame_state);
- Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
- Node* LowerCheckedFloat64ToInt64(Node* node, Node* frame_state);
- Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
- Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
- Node* LowerCheckedTaggedToInt64(Node* node, Node* frame_state);
- Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
- Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
- Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
- Node* LowerCheckedCompressedToTaggedSigned(Node* node, Node* frame_state);
- Node* LowerCheckedCompressedToTaggedPointer(Node* node, Node* frame_state);
- Node* LowerCheckedTaggedToCompressedSigned(Node* node, Node* frame_state);
- Node* LowerCheckedTaggedToCompressedPointer(Node* node, Node* frame_state);
- Node* LowerChangeTaggedToFloat64(Node* node);
- void TruncateTaggedPointerToBit(Node* node, GraphAssemblerLabel<1>* done);
- Node* LowerTruncateTaggedToBit(Node* node);
- Node* LowerTruncateTaggedPointerToBit(Node* node);
- Node* LowerTruncateTaggedToFloat64(Node* node);
- Node* LowerTruncateTaggedToWord32(Node* node);
- Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
- Node* LowerAllocate(Node* node);
- Node* LowerNumberToString(Node* node);
- Node* LowerObjectIsArrayBufferView(Node* node);
- Node* LowerObjectIsBigInt(Node* node);
- Node* LowerObjectIsCallable(Node* node);
- Node* LowerObjectIsConstructor(Node* node);
- Node* LowerObjectIsDetectableCallable(Node* node);
- Node* LowerObjectIsMinusZero(Node* node);
- Node* LowerNumberIsMinusZero(Node* node);
- Node* LowerObjectIsNaN(Node* node);
- Node* LowerNumberIsNaN(Node* node);
- Node* LowerObjectIsNonCallable(Node* node);
- Node* LowerObjectIsNumber(Node* node);
- Node* LowerObjectIsReceiver(Node* node);
- Node* LowerObjectIsSmi(Node* node);
- Node* LowerObjectIsString(Node* node);
- Node* LowerObjectIsSymbol(Node* node);
- Node* LowerObjectIsUndetectable(Node* node);
- Node* LowerNumberIsFloat64Hole(Node* node);
- Node* LowerNumberIsFinite(Node* node);
- Node* LowerObjectIsFiniteNumber(Node* node);
- Node* LowerNumberIsInteger(Node* node);
- Node* LowerObjectIsInteger(Node* node);
- Node* LowerNumberIsSafeInteger(Node* node);
- Node* LowerObjectIsSafeInteger(Node* node);
- Node* LowerArgumentsFrame(Node* node);
- Node* LowerArgumentsLength(Node* node);
- Node* LowerNewDoubleElements(Node* node);
- Node* LowerNewSmiOrObjectElements(Node* node);
- Node* LowerNewArgumentsElements(Node* node);
- Node* LowerNewConsString(Node* node);
- Node* LowerSameValue(Node* node);
- Node* LowerNumberSameValue(Node* node);
- Node* LowerDeadValue(Node* node);
- Node* LowerStringConcat(Node* node);
- Node* LowerStringToNumber(Node* node);
- Node* LowerStringCharCodeAt(Node* node);
- Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding);
- Node* LowerStringToLowerCaseIntl(Node* node);
- Node* LowerStringToUpperCaseIntl(Node* node);
- Node* LowerStringFromSingleCharCode(Node* node);
- Node* LowerStringFromSingleCodePoint(Node* node);
- Node* LowerStringIndexOf(Node* node);
- Node* LowerStringSubstring(Node* node);
- Node* LowerStringLength(Node* node);
- Node* LowerStringEqual(Node* node);
- Node* LowerStringLessThan(Node* node);
- Node* LowerStringLessThanOrEqual(Node* node);
- Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
- Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
- Node* LowerConvertTaggedHoleToUndefined(Node* node);
- void LowerCheckEqualsInternalizedString(Node* node, Node* frame_state);
- void LowerCheckEqualsSymbol(Node* node, Node* frame_state);
- Node* LowerTypeOf(Node* node);
- Node* LowerToBoolean(Node* node);
- Node* LowerPlainPrimitiveToNumber(Node* node);
- Node* LowerPlainPrimitiveToWord32(Node* node);
- Node* LowerPlainPrimitiveToFloat64(Node* node);
- Node* LowerEnsureWritableFastElements(Node* node);
- Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
- void LowerTransitionElementsKind(Node* node);
- Node* LowerLoadFieldByIndex(Node* node);
- Node* LowerLoadMessage(Node* node);
- Node* LowerLoadTypedElement(Node* node);
- Node* LowerLoadDataViewElement(Node* node);
- Node* LowerLoadStackArgument(Node* node);
- void LowerStoreMessage(Node* node);
- void LowerStoreTypedElement(Node* node);
- void LowerStoreDataViewElement(Node* node);
- void LowerStoreSignedSmallElement(Node* node);
- Node* LowerFindOrderedHashMapEntry(Node* node);
- Node* LowerFindOrderedHashMapEntryForInt32Key(Node* node);
- void LowerTransitionAndStoreElement(Node* node);
- void LowerTransitionAndStoreNumberElement(Node* node);
- void LowerTransitionAndStoreNonNumberElement(Node* node);
- void LowerRuntimeAbort(Node* node);
- Node* LowerConvertReceiver(Node* node);
- Node* LowerDateNow(Node* node);
-
- // Lowering of optional operators.
- Maybe<Node*> LowerFloat64RoundUp(Node* node);
- Maybe<Node*> LowerFloat64RoundDown(Node* node);
- Maybe<Node*> LowerFloat64RoundTiesEven(Node* node);
- Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
-
- Node* AllocateHeapNumberWithValue(Node* node);
- Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
- const VectorSlotPair& feedback, Node* value,
- Node* frame_state);
- Node* BuildCheckedFloat64ToInt64(CheckForMinusZeroMode mode,
- const VectorSlotPair& feedback, Node* value,
- Node* frame_state);
- Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
- const VectorSlotPair& feedback,
- Node* value,
- Node* frame_state);
- Node* BuildReverseBytes(ExternalArrayType type, Node* value);
- Node* BuildFloat64RoundDown(Node* value);
- Node* BuildFloat64RoundTruncate(Node* input);
- Node* BuildUint32Mod(Node* lhs, Node* rhs);
- Node* ComputeUnseededHash(Node* value);
- Node* LowerStringComparison(Callable const& callable, Node* node);
- Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
-
- Node* ChangeInt32ToSmi(Node* value);
- Node* ChangeInt32ToIntPtr(Node* value);
- Node* ChangeInt64ToSmi(Node* value);
- Node* ChangeIntPtrToInt32(Node* value);
- Node* ChangeIntPtrToSmi(Node* value);
- Node* ChangeUint32ToUintPtr(Node* value);
- Node* ChangeUint32ToSmi(Node* value);
- Node* ChangeSmiToIntPtr(Node* value);
- Node* ChangeSmiToInt32(Node* value);
- Node* ChangeSmiToInt64(Node* value);
- Node* ObjectIsSmi(Node* value);
- Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
-
- Node* SmiMaxValueConstant();
- Node* SmiShiftBitsConstant();
- void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
- ElementsKind to);
-
- Factory* factory() const;
- Isolate* isolate() const;
- JSGraph* jsgraph() const { return js_graph_; }
- Graph* graph() const;
- Schedule* schedule() const { return schedule_; }
- Zone* temp_zone() const { return temp_zone_; }
- CommonOperatorBuilder* common() const;
- SimplifiedOperatorBuilder* simplified() const;
- MachineOperatorBuilder* machine() const;
- std::vector<Handle<Map>>* embedded_maps() { return embedded_maps_; }
-
- GraphAssembler* gasm() { return &graph_assembler_; }
- JSGraph* js_graph_;
- Schedule* schedule_;
- Zone* temp_zone_;
- MaskArrayIndexEnable mask_array_index_;
- RegionObservability region_observability_ = RegionObservability::kObservable;
- SourcePositionTable* source_positions_;
- NodeOriginTable* node_origins_;
- GraphAssembler graph_assembler_;
- Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
+enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
- // embedded_maps_ keeps track of maps we've embedded as Uint32 constants.
- // We do this in order to notify the garbage collector at code-gen time.
- std::vector<Handle<Map>>* embedded_maps_;
-};
+V8_EXPORT_PRIVATE void LinearizeEffectControl(
+ JSGraph* graph, Schedule* schedule, Zone* temp_zone,
+ SourcePositionTable* source_positions, NodeOriginTable* node_origins,
+ MaskArrayIndexEnable mask_array_index);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index 688460abee..18ae069b21 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -7,7 +7,7 @@
#include "src/compiler/all-nodes.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -326,8 +326,9 @@ void EscapeAnalysisReducer::Finalize() {
TypeCache::Get()->kArgumentsLengthType);
NodeProperties::ReplaceValueInput(load, arguments_frame, 0);
NodeProperties::ReplaceValueInput(load, offset, 1);
- NodeProperties::ChangeOp(
- load, jsgraph()->simplified()->LoadStackArgument());
+ NodeProperties::ChangeOp(load,
+ jsgraph()->simplified()->LoadElement(
+ AccessBuilder::ForStackArgument()));
break;
}
case IrOpcode::kLoadField: {
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 976be6d906..1c1267b3c7 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -6,9 +6,10 @@
#define V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/escape-analysis.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index fc1f5d2bac..dc0db4d780 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -4,12 +4,12 @@
#include "src/compiler/escape-analysis.h"
-#include "src/bootstrapper.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/init/bootstrapper.h"
#include "src/objects/map-inl.h"
#ifdef DEBUG
@@ -622,6 +622,7 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
OffsetOfElementsAccess(op, index).To(&offset) &&
vobject->FieldAt(offset).To(&var) && current->Get(var).To(&value)) {
current->SetReplacement(value);
+ break;
} else if (vobject && !vobject->HasEscaped()) {
// Compute the known length (aka the number of elements) of {object}
// based on the virtual object information.
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 2475c34916..c3dcd2f74d 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -6,10 +6,10 @@
#define V8_COMPILER_ESCAPE_ANALYSIS_H_
#include "src/base/functional.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/persistent-map.h"
-#include "src/globals.h"
#include "src/objects/name.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index 267da154e1..5fbf11cdbc 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -5,12 +5,12 @@
#include "src/compiler/frame-states.h"
#include "src/base/functional.h"
-#include "src/callable.h"
+#include "src/codegen/callable.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -206,6 +206,17 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
shared.object());
}
+Node* CreateGenericLazyDeoptContinuationFrameState(
+ JSGraph* graph, const SharedFunctionInfoRef& shared, Node* target,
+ Node* context, Node* receiver, Node* outer_frame_state) {
+ Node* stack_parameters[]{receiver};
+ const int stack_parameter_count = arraysize(stack_parameters);
+ return CreateJavaScriptBuiltinContinuationFrameState(
+ graph, shared, Builtins::kGenericLazyDeoptContinuation, target, context,
+ stack_parameters, stack_parameter_count, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index dbe4deeb20..3922a21a9c 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -6,9 +6,9 @@
#define V8_COMPILER_FRAME_STATES_H_
#include "src/builtins/builtins.h"
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/objects/shared-function-info.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -161,6 +161,10 @@ Node* CreateJavaScriptBuiltinContinuationFrameState(
int stack_parameter_count, Node* outer_frame_state,
ContinuationFrameStateMode mode);
+Node* CreateGenericLazyDeoptContinuationFrameState(
+ JSGraph* graph, const SharedFunctionInfoRef& shared, Node* target,
+ Node* context, Node* receiver, Node* outer_frame_state);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 8ee5904d27..18f0df8c80 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_FRAME_H_
#define V8_COMPILER_FRAME_H_
-#include "src/bit-vector.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
+#include "src/utils/bit-vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 68f645150b..cc9dbd9dfd 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -4,7 +4,7 @@
#include "src/compiler/graph-assembler.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/linkage.h"
namespace v8 {
@@ -112,20 +112,26 @@ Node* GraphAssembler::Allocate(AllocationType allocation, Node* size) {
}
Node* GraphAssembler::LoadField(FieldAccess const& access, Node* object) {
- return current_effect_ =
- graph()->NewNode(simplified()->LoadField(access), object,
- current_effect_, current_control_);
+ Node* value = current_effect_ =
+ graph()->NewNode(simplified()->LoadField(access), object, current_effect_,
+ current_control_);
+ return InsertDecompressionIfNeeded(access.machine_type.representation(),
+ value);
}
Node* GraphAssembler::LoadElement(ElementAccess const& access, Node* object,
Node* index) {
- return current_effect_ =
- graph()->NewNode(simplified()->LoadElement(access), object, index,
- current_effect_, current_control_);
+ Node* value = current_effect_ =
+ graph()->NewNode(simplified()->LoadElement(access), object, index,
+ current_effect_, current_control_);
+ return InsertDecompressionIfNeeded(access.machine_type.representation(),
+ value);
}
Node* GraphAssembler::StoreField(FieldAccess const& access, Node* object,
Node* value) {
+ value =
+ InsertCompressionIfNeeded(access.machine_type.representation(), value);
return current_effect_ =
graph()->NewNode(simplified()->StoreField(access), object, value,
current_effect_, current_control_);
@@ -133,6 +139,8 @@ Node* GraphAssembler::StoreField(FieldAccess const& access, Node* object,
Node* GraphAssembler::StoreElement(ElementAccess const& access, Node* object,
Node* index, Node* value) {
+ value =
+ InsertCompressionIfNeeded(access.machine_type.representation(), value);
return current_effect_ =
graph()->NewNode(simplified()->StoreElement(access), object, index,
value, current_effect_, current_control_);
@@ -150,15 +158,16 @@ Node* GraphAssembler::Unreachable() {
Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
Node* value) {
+ value = InsertCompressionIfNeeded(rep.representation(), value);
return current_effect_ =
graph()->NewNode(machine()->Store(rep), object, offset, value,
current_effect_, current_control_);
}
-Node* GraphAssembler::Load(MachineType rep, Node* object, Node* offset) {
- return current_effect_ =
- graph()->NewNode(machine()->Load(rep), object, offset,
- current_effect_, current_control_);
+Node* GraphAssembler::Load(MachineType type, Node* object, Node* offset) {
+ Node* value = current_effect_ = graph()->NewNode(
+ machine()->Load(type), object, offset, current_effect_, current_control_);
+ return InsertDecompressionIfNeeded(type.representation(), value);
}
Node* GraphAssembler::StoreUnaligned(MachineRepresentation rep, Node* object,
@@ -172,13 +181,13 @@ Node* GraphAssembler::StoreUnaligned(MachineRepresentation rep, Node* object,
current_effect_, current_control_);
}
-Node* GraphAssembler::LoadUnaligned(MachineType rep, Node* object,
+Node* GraphAssembler::LoadUnaligned(MachineType type, Node* object,
Node* offset) {
Operator const* const op =
- (rep.representation() == MachineRepresentation::kWord8 ||
- machine()->UnalignedLoadSupported(rep.representation()))
- ? machine()->Load(rep)
- : machine()->UnalignedLoad(rep);
+ (type.representation() == MachineRepresentation::kWord8 ||
+ machine()->UnalignedLoadSupported(type.representation()))
+ ? machine()->Load(type)
+ : machine()->UnalignedLoad(type);
return current_effect_ = graph()->NewNode(op, object, offset, current_effect_,
current_control_);
}
@@ -274,6 +283,50 @@ Node* GraphAssembler::ExtractCurrentEffect() {
return result;
}
+Node* GraphAssembler::InsertDecompressionIfNeeded(MachineRepresentation rep,
+ Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ switch (rep) {
+ case MachineRepresentation::kCompressedPointer:
+ value = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), value);
+ break;
+ case MachineRepresentation::kCompressedSigned:
+ value = graph()->NewNode(
+ machine()->ChangeCompressedSignedToTaggedSigned(), value);
+ break;
+ case MachineRepresentation::kCompressed:
+ value = graph()->NewNode(machine()->ChangeCompressedToTagged(), value);
+ break;
+ default:
+ break;
+ }
+ }
+ return value;
+}
+
+Node* GraphAssembler::InsertCompressionIfNeeded(MachineRepresentation rep,
+ Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ switch (rep) {
+ case MachineRepresentation::kCompressedPointer:
+ value = graph()->NewNode(
+ machine()->ChangeTaggedPointerToCompressedPointer(), value);
+ break;
+ case MachineRepresentation::kCompressedSigned:
+ value = graph()->NewNode(
+ machine()->ChangeTaggedSignedToCompressedSigned(), value);
+ break;
+ case MachineRepresentation::kCompressed:
+ value = graph()->NewNode(machine()->ChangeTaggedToCompressed(), value);
+ break;
+ default:
+ break;
+ }
+ }
+ return value;
+}
+
void GraphAssembler::Reset(Node* effect, Node* control) {
current_effect_ = effect;
current_control_ = control;
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 8e330de68f..74b885b788 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -8,7 +8,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
-#include "src/vector-slot-pair.h"
+#include "src/compiler/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -233,11 +233,11 @@ class GraphAssembler {
Node* value);
Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
- Node* Load(MachineType rep, Node* object, Node* offset);
+ Node* Load(MachineType type, Node* object, Node* offset);
Node* StoreUnaligned(MachineRepresentation rep, Node* object, Node* offset,
Node* value);
- Node* LoadUnaligned(MachineType rep, Node* object, Node* offset);
+ Node* LoadUnaligned(MachineType type, Node* object, Node* offset);
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
@@ -284,6 +284,12 @@ class GraphAssembler {
Node* ExtractCurrentEffect();
private:
+ // Adds a decompression node if pointer compression is enabled and the
+ // representation loaded is a compressed one. To be used after loads.
+ Node* InsertDecompressionIfNeeded(MachineRepresentation rep, Node* value);
+ // Adds a compression node if pointer compression is enabled and the
+ // representation to be stored is a compressed one. To be used before stores.
+ Node* InsertCompressionIfNeeded(MachineRepresentation rep, Node* value);
template <typename... Vars>
void MergeState(GraphAssemblerLabel<sizeof...(Vars)>* label, Vars... vars);
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 36fee94fa9..3bb20a4625 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -6,8 +6,8 @@
#define V8_COMPILER_GRAPH_REDUCER_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/node-marker.h"
-#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/graph-trimmer.h b/deps/v8/src/compiler/graph-trimmer.h
index edabae0b8a..5a5f525ef4 100644
--- a/deps/v8/src/compiler/graph-trimmer.h
+++ b/deps/v8/src/compiler/graph-trimmer.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_GRAPH_TRIMMER_H_
#define V8_COMPILER_GRAPH_TRIMMER_H_
+#include "src/common/globals.h"
#include "src/compiler/node-marker.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index c14f66d165..85123261db 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -8,6 +8,8 @@
#include <sstream>
#include <string>
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/source-position.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/backend/register-allocator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -23,10 +25,8 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects/script-inl.h"
#include "src/objects/shared-function-info.h"
-#include "src/optimized-compilation-info.h"
-#include "src/ostreams.h"
-#include "src/source-position.h"
-#include "src/vector.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -102,9 +102,9 @@ void JsonPrintFunctionSource(std::ostream& os, int source_id,
if (!script.is_null() && !script->IsUndefined(isolate) && !shared.is_null()) {
Object source_name = script->name();
os << ", \"sourceName\": \"";
- if (source_name->IsString()) {
+ if (source_name.IsString()) {
std::ostringstream escaped_name;
- escaped_name << String::cast(source_name)->ToCString().get();
+ escaped_name << String::cast(source_name).ToCString().get();
os << JSONEscaped(escaped_name);
}
os << "\"";
@@ -173,7 +173,7 @@ void JsonPrintAllSourceWithPositions(std::ostream& os,
JsonPrintFunctionSource(os, -1,
info->shared_info().is_null()
? std::unique_ptr<char[]>(new char[1]{0})
- : info->shared_info()->DebugName()->ToCString(),
+ : info->shared_info()->DebugName().ToCString(),
script, isolate, info->shared_info(), true);
const auto& inlined = info->inlined_functions();
SourceIdAssigner id_assigner(info->inlined_functions().size());
@@ -181,7 +181,7 @@ void JsonPrintAllSourceWithPositions(std::ostream& os,
os << ", ";
Handle<SharedFunctionInfo> shared = inlined[id].shared_info;
const int source_id = id_assigner.GetIdFor(shared);
- JsonPrintFunctionSource(os, source_id, shared->DebugName()->ToCString(),
+ JsonPrintFunctionSource(os, source_id, shared->DebugName().ToCString(),
handle(Script::cast(shared->script()), isolate),
isolate, shared, true);
}
@@ -216,19 +216,19 @@ std::unique_ptr<char[]> GetVisualizerLogFileName(OptimizedCompilationInfo* info,
EmbeddedVector<char, 256> source_file(0);
bool source_available = false;
if (FLAG_trace_file_names && info->has_shared_info() &&
- info->shared_info()->script()->IsScript()) {
- Object source_name = Script::cast(info->shared_info()->script())->name();
- if (source_name->IsString()) {
+ info->shared_info()->script().IsScript()) {
+ Object source_name = Script::cast(info->shared_info()->script()).name();
+ if (source_name.IsString()) {
String str = String::cast(source_name);
- if (str->length() > 0) {
- SNPrintF(source_file, "%s", str->ToCString().get());
- std::replace(source_file.start(),
- source_file.start() + source_file.length(), '/', '_');
+ if (str.length() > 0) {
+ SNPrintF(source_file, "%s", str.ToCString().get());
+ std::replace(source_file.begin(),
+ source_file.begin() + source_file.length(), '/', '_');
source_available = true;
}
}
}
- std::replace(filename.start(), filename.start() + filename.length(), ' ',
+ std::replace(filename.begin(), filename.begin() + filename.length(), ' ',
'_');
EmbeddedVector<char, 256> base_dir;
@@ -241,21 +241,21 @@ std::unique_ptr<char[]> GetVisualizerLogFileName(OptimizedCompilationInfo* info,
EmbeddedVector<char, 256> full_filename;
if (phase == nullptr && !source_available) {
- SNPrintF(full_filename, "%s%s.%s", base_dir.start(), filename.start(),
+ SNPrintF(full_filename, "%s%s.%s", base_dir.begin(), filename.begin(),
suffix);
} else if (phase != nullptr && !source_available) {
- SNPrintF(full_filename, "%s%s-%s.%s", base_dir.start(), filename.start(),
+ SNPrintF(full_filename, "%s%s-%s.%s", base_dir.begin(), filename.begin(),
phase, suffix);
} else if (phase == nullptr && source_available) {
- SNPrintF(full_filename, "%s%s_%s.%s", base_dir.start(), filename.start(),
- source_file.start(), suffix);
+ SNPrintF(full_filename, "%s%s_%s.%s", base_dir.begin(), filename.begin(),
+ source_file.begin(), suffix);
} else {
- SNPrintF(full_filename, "%s%s_%s-%s.%s", base_dir.start(), filename.start(),
- source_file.start(), phase, suffix);
+ SNPrintF(full_filename, "%s%s_%s-%s.%s", base_dir.begin(), filename.begin(),
+ source_file.begin(), phase, suffix);
}
char* buffer = new char[full_filename.length() + 1];
- memcpy(buffer, full_filename.start(), full_filename.length());
+ memcpy(buffer, full_filename.begin(), full_filename.length());
buffer[full_filename.length()] = '\0';
return std::unique_ptr<char[]>(buffer);
}
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 0e1ed78652..05f522b6bc 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -10,8 +10,8 @@
#include <iosfwd>
#include <memory>
-#include "src/globals.h"
-#include "src/handles.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 16d41fa77b..038c9b457f 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -8,7 +8,7 @@
#include <array>
#include "src/base/compiler-specific.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index f440858c68..b083805771 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -5,11 +5,11 @@
#ifndef V8_COMPILER_INT64_LOWERING_H_
#define V8_COMPILER_INT64_LOWERING_H_
+#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h"
-#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index b947c061b1..d58331c85e 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -4,30 +4,33 @@
#include "src/compiler/js-call-reducer.h"
-#include "src/api-inl.h"
+#include <functional>
+
+#include "src/api/api-inl.h"
#include "src/builtins/builtins-promise.h"
#include "src/builtins/builtins-utils.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/map-inference.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/property-access-builder.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
-#include "src/counters.h"
-#include "src/feedback-vector-inl.h"
+#include "src/compiler/vector-slot-pair.h"
#include "src/ic/call-optimization.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/arguments-inl.h"
+#include "src/objects/feedback-vector-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-objects.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
-#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -389,6 +392,11 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// ES section #sec-function.prototype.bind
Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
// Value inputs to the {node} are as follows:
//
// - target, which is Function.prototype.bind JSFunction
@@ -407,12 +415,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// a JSFunction with the same [[Prototype]], and all maps we've
// seen for the {receiver} so far indicate that {receiver} is
// definitely a constructor or not a constructor.
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
+
MapRef first_receiver_map(broker(), receiver_maps[0]);
bool const is_constructor = first_receiver_map.is_constructor();
first_receiver_map.SerializePrototype();
@@ -426,12 +432,12 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
if (!receiver_map.prototype().equals(prototype) ||
receiver_map.is_constructor() != is_constructor ||
receiver_map.instance_type() < FIRST_FUNCTION_TYPE) {
- return NoChange();
+ return inference.NoChange();
}
// Disallow binding of slow-mode functions. We need to figure out
// whether the length and name property are in the original state.
- if (receiver_map.is_dictionary_map()) return NoChange();
+ if (receiver_map.is_dictionary_map()) return inference.NoChange();
// Check whether the length and name properties are still present
// as AccessorInfo objects. In that case, their values can be
@@ -440,22 +446,22 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// runtime otherwise.
Handle<DescriptorArray> descriptors(
receiver_map.object()->instance_descriptors(), isolate());
- if (descriptors->number_of_descriptors() < 2) return NoChange();
+ if (descriptors->number_of_descriptors() < 2) return inference.NoChange();
if (descriptors->GetKey(JSFunction::kLengthDescriptorIndex) !=
ReadOnlyRoots(isolate()).length_string()) {
- return NoChange();
+ return inference.NoChange();
}
if (!descriptors->GetStrongValue(JSFunction::kLengthDescriptorIndex)
- ->IsAccessorInfo()) {
- return NoChange();
+ .IsAccessorInfo()) {
+ return inference.NoChange();
}
if (descriptors->GetKey(JSFunction::kNameDescriptorIndex) !=
ReadOnlyRoots(isolate()).name_string()) {
- return NoChange();
+ return inference.NoChange();
}
if (!descriptors->GetStrongValue(JSFunction::kNameDescriptorIndex)
- ->IsAccessorInfo()) {
- return NoChange();
+ .IsAccessorInfo()) {
+ return inference.NoChange();
}
}
@@ -464,10 +470,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
MapRef map = is_constructor
? native_context().bound_function_with_constructor_map()
: native_context().bound_function_without_constructor_map();
- if (!map.prototype().equals(prototype)) return NoChange();
+ if (!map.prototype().equals(prototype)) return inference.NoChange();
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, VectorSlotPair(), receiver, effect, control);
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
// Replace the {node} with a JSCreateBoundFunction.
int const arity = std::max(0, node->op()->ValueInputCount() - 3);
@@ -569,45 +575,36 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
Node* effect = NodeProperties::GetEffectInput(node);
// Try to determine the {object} map.
- ZoneHandleSet<Map> object_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), object, effect, &object_maps);
- if (result != NodeProperties::kNoReceiverMaps) {
- MapRef candidate_map(broker(), object_maps[0]);
- candidate_map.SerializePrototype();
- ObjectRef candidate_prototype = candidate_map.prototype();
-
- // Check if we can constant-fold the {candidate_prototype}.
- for (size_t i = 0; i < object_maps.size(); ++i) {
- MapRef object_map(broker(), object_maps[i]);
- object_map.SerializePrototype();
- if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
- object_map.has_hidden_prototype() ||
- !object_map.prototype().equals(candidate_prototype)) {
- // We exclude special receivers, like JSProxy or API objects that
- // might require access checks here; we also don't want to deal
- // with hidden prototypes at this point.
- return NoChange();
- }
- // The above check also excludes maps for primitive values, which is
- // important because we are not applying [[ToObject]] here as expected.
- DCHECK(!object_map.IsPrimitiveMap() && object_map.IsJSReceiverMap());
- if (result == NodeProperties::kUnreliableReceiverMaps &&
- !object_map.is_stable()) {
- return NoChange();
- }
+ MapInference inference(broker(), object, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& object_maps = inference.GetMaps();
+
+ MapRef candidate_map(broker(), object_maps[0]);
+ candidate_map.SerializePrototype();
+ ObjectRef candidate_prototype = candidate_map.prototype();
+
+ // Check if we can constant-fold the {candidate_prototype}.
+ for (size_t i = 0; i < object_maps.size(); ++i) {
+ MapRef object_map(broker(), object_maps[i]);
+ object_map.SerializePrototype();
+ if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
+ object_map.has_hidden_prototype() ||
+ !object_map.prototype().equals(candidate_prototype)) {
+ // We exclude special receivers, like JSProxy or API objects that
+ // might require access checks here; we also don't want to deal
+ // with hidden prototypes at this point.
+ return inference.NoChange();
}
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- for (size_t i = 0; i < object_maps.size(); ++i) {
- dependencies()->DependOnStableMap(MapRef(broker(), object_maps[i]));
- }
- }
- Node* value = jsgraph()->Constant(candidate_prototype);
- ReplaceWithValue(node, value);
- return Replace(value);
+ // The above check also excludes maps for primitive values, which is
+ // important because we are not applying [[ToObject]] here as expected.
+ DCHECK(!object_map.IsPrimitiveMap() && object_map.IsJSReceiverMap());
}
-
- return NoChange();
+ if (!inference.RelyOnMapsViaStability(dependencies())) {
+ return inference.NoChange();
+ }
+ Node* value = jsgraph()->Constant(candidate_prototype);
+ ReplaceWithValue(node, value);
+ return Replace(value);
}
// ES6 section 19.1.2.11 Object.getPrototypeOf ( O )
@@ -730,14 +727,9 @@ Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) {
// Ensure that the {receiver} is known to be a JSReceiver (so that
// the ToObject step of Object.prototype.isPrototypeOf is a no-op).
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSReceiverMap()) return NoChange();
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAreJSReceiver()) {
+ return NoChange();
}
// We don't check whether {value} is a proper JSReceiver here explicitly,
@@ -996,7 +988,7 @@ void JSCallReducer::WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
namespace {
bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
- ZoneHandleSet<Map> receiver_maps,
+ MapHandles const& receiver_maps,
ElementsKind* kind_return) {
DCHECK_NE(0, receiver_maps.size());
*kind_return = MapRef(broker, receiver_maps[0]).elements_kind();
@@ -1011,7 +1003,7 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
}
bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
- ZoneHandleSet<Map> receiver_maps,
+ MapHandles const& receiver_maps,
ElementsKind* kind_return,
bool builtin_is_push = false) {
DCHECK_NE(0, receiver_maps.size());
@@ -1049,8 +1041,6 @@ Reduction JSCallReducer::ReduceArrayForEach(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
-
- // Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -1058,24 +1048,21 @@ Reduction JSCallReducer::ReduceArrayForEach(
Node* this_arg = node->op()->ValueInputCount() > 3
? NodeProperties::GetValueInput(node, 3)
: jsgraph()->UndefinedConstant();
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // Try to determine the {receiver} map.
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ bool const stability_dependency = inference.RelyOnMapsPreferStability(
+ dependencies(), jsgraph(), &effect, control, p.feedback());
Node* k = jsgraph()->ZeroConstant();
-
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
@@ -1109,25 +1096,25 @@ Reduction JSCallReducer::ReduceArrayForEach(
Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
control = if_true;
- Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), shared, Builtins::kArrayForEachLoopEagerDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
- outer_frame_state, ContinuationFrameStateMode::EAGER);
-
- effect =
- graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ {
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
- // Make sure the map hasn't changed during the iteration
- effect =
- graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
- receiver_maps, p.feedback()),
- receiver, effect, control);
+ // Deopt if the map has changed during the iteration.
+ if (!stability_dependency) {
+ inference.InsertMapChecks(jsgraph(), &effect, control, p.feedback());
+ }
Node* element =
SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
-
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
checkpoint_params[3] = next_k;
Node* hole_true = nullptr;
@@ -1157,7 +1144,7 @@ Reduction JSCallReducer::ReduceArrayForEach(
common()->TypeGuard(Type::NonInternal()), element, effect, control);
}
- frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayForEachLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -1206,18 +1193,6 @@ Reduction JSCallReducer::ReduceArrayForEach(
return Replace(jsgraph()->UndefinedConstant());
}
-Node* JSCallReducer::InsertMapChecksIfUnreliableReceiverMaps(
- NodeProperties::InferReceiverMapsResult result,
- ZoneHandleSet<Map> const& receiver_maps, VectorSlotPair const& feedback,
- Node* receiver, Node* effect, Node* control) {
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps, feedback),
- receiver, effect, control);
- }
- return effect;
-}
-
Reduction JSCallReducer::ReduceArrayReduce(
Node* node, ArrayReduceDirection direction,
const SharedFunctionInfoRef& shared) {
@@ -1233,37 +1208,23 @@ Reduction JSCallReducer::ReduceArrayReduce(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
-
- // Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
: jsgraph()->UndefinedConstant();
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ // Try to determine the {receiver} map.
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
- std::function<Node*(Node*)> hole_check = [this, kind](Node* element) {
- if (IsDoubleElementsKind(kind)) {
- return graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
- } else {
- return graph()->NewNode(simplified()->ReferenceEqual(), element,
- jsgraph()->TheHoleConstant());
- }
- };
-
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ bool const stability_dependency = inference.RelyOnMapsPreferStability(
+ dependencies(), jsgraph(), &effect, control, p.feedback());
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
@@ -1298,6 +1259,15 @@ Reduction JSCallReducer::ReduceArrayReduce(
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
&control, &check_fail, &check_throw);
+ std::function<Node*(Node*)> hole_check = [this, kind](Node* element) {
+ if (IsDoubleElementsKind(kind)) {
+ return graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ return graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ };
+
// Set initial accumulator value
Node* cur = jsgraph()->TheHoleConstant();
@@ -1387,16 +1357,16 @@ Reduction JSCallReducer::ReduceArrayReduce(
ContinuationFrameStateMode::EAGER);
effect =
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ inference.InsertMapChecks(jsgraph(), &effect, control, p.feedback());
}
- // Make sure the map hasn't changed during the iteration
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ // Deopt if the map has changed during the iteration.
+ if (!stability_dependency) {
+ inference.InsertMapChecks(jsgraph(), &effect, control, p.feedback());
+ }
Node* element =
SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
-
Node* next_k = graph()->NewNode(next_op, k, jsgraph()->OneConstant());
Node* hole_true = nullptr;
@@ -1495,8 +1465,6 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
-
- // Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -1504,30 +1472,27 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
Node* this_arg = node->op()->ValueInputCount() > 3
? NodeProperties::GetValueInput(node, 3)
: jsgraph()->UndefinedConstant();
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // Try to determine the {receiver} map.
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
- if (!dependencies()->DependOnArraySpeciesProtector()) return NoChange();
+ if (!dependencies()->DependOnArraySpeciesProtector())
+ return inference.NoChange();
if (IsHoleyElementsKind(kind)) {
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
+ bool const stability_dependency = inference.RelyOnMapsPreferStability(
+ dependencies(), jsgraph(), &effect, control, p.feedback());
Node* array_constructor = jsgraph()->Constant(
native_context().GetInitialJSArrayMap(kind).GetConstructor());
-
Node* k = jsgraph()->ZeroConstant();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
-
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
@@ -1576,23 +1541,22 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
control = if_true;
- Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), shared, Builtins::kArrayMapLoopEagerDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
- outer_frame_state, ContinuationFrameStateMode::EAGER);
-
- effect =
- graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ {
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), shared, Builtins::kArrayMapLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
- // Make sure the map hasn't changed during the iteration
- effect =
- graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
- receiver_maps, p.feedback()),
- receiver, effect, control);
+ // Deopt if the map has changed during the iteration.
+ if (!stability_dependency) {
+ inference.InsertMapChecks(jsgraph(), &effect, control, p.feedback());
+ }
Node* element =
SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
-
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
@@ -1625,7 +1589,7 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
// This frame state is dealt with by hand in
// ArrayMapLoopLazyDeoptContinuation.
- frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayMapLoopLazyDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::LAZY);
@@ -1693,7 +1657,6 @@ Reduction JSCallReducer::ReduceArrayFilter(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- // Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -1701,33 +1664,31 @@ Reduction JSCallReducer::ReduceArrayFilter(
Node* this_arg = node->op()->ValueInputCount() > 3
? NodeProperties::GetValueInput(node, 3)
: jsgraph()->UndefinedConstant();
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // Try to determine the {receiver} map.
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
- // The output array is packed (filter doesn't visit holes).
- const ElementsKind packed_kind = GetPackedElementsKind(kind);
-
- if (!dependencies()->DependOnArraySpeciesProtector()) return NoChange();
+ if (!dependencies()->DependOnArraySpeciesProtector())
+ return inference.NoChange();
if (IsHoleyElementsKind(kind)) {
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
+ bool const stability_dependency = inference.RelyOnMapsPreferStability(
+ dependencies(), jsgraph(), &effect, control, p.feedback());
+ // The output array is packed (filter doesn't visit holes).
+ const ElementsKind packed_kind = GetPackedElementsKind(kind);
MapRef initial_map = native_context().GetInitialJSArrayMap(packed_kind);
Node* k = jsgraph()->ZeroConstant();
Node* to = jsgraph()->ZeroConstant();
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
-
Node* a; // Construct the output array.
{
AllocationBuilder ab(jsgraph(), effect, control);
@@ -1792,25 +1753,21 @@ Reduction JSCallReducer::ReduceArrayFilter(
Node* checkpoint_params[] = {receiver, fncallback, this_arg, a,
k, original_length, to};
const int stack_parameters = arraysize(checkpoint_params);
-
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayFilterLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
-
effect =
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
}
- // Make sure the map hasn't changed during the iteration.
- effect =
- graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
- receiver_maps, p.feedback()),
- receiver, effect, control);
+ // Deopt if the map has changed during the iteration.
+ if (!stability_dependency) {
+ inference.InsertMapChecks(jsgraph(), &effect, control, p.feedback());
+ }
Node* element =
SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
-
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
@@ -1957,8 +1914,6 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
-
- // Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -1966,28 +1921,24 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
Node* this_arg = node->op()->ValueInputCount() > 3
? NodeProperties::GetValueInput(node, 3)
: jsgraph()->UndefinedConstant();
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // Try to determine the {receiver} map.
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ bool const stability_dependency = inference.RelyOnMapsPreferStability(
+ dependencies(), jsgraph(), &effect, control, p.feedback());
Node* k = jsgraph()->ZeroConstant();
-
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
-
Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
original_length};
const int stack_parameters = arraysize(checkpoint_params);
@@ -2021,27 +1972,22 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node, ArrayFindVariant variant,
if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
}
- // Check the map hasn't changed during the iteration.
{
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, eager_continuation_builtin, node->InputAt(0),
context, &checkpoint_params[0], stack_parameters, outer_frame_state,
ContinuationFrameStateMode::EAGER);
-
effect =
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
- effect =
- graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
- receiver_maps, p.feedback()),
- receiver, effect, control);
+ // Deopt if the map has changed during the iteration.
+ if (!stability_dependency) {
+ inference.InsertMapChecks(jsgraph(), &effect, control, p.feedback());
}
- // Load k-th element from receiver.
Node* element =
SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
-
- // Increment k for the next iteration.
Node* next_k = checkpoint_params[3] =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
@@ -2265,7 +2211,6 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- // Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -2273,27 +2218,25 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
Node* this_arg = node->op()->ValueInputCount() > 3
? NodeProperties::GetValueInput(node, 3)
: jsgraph()->UndefinedConstant();
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // Try to determine the {receiver} map.
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
- if (!dependencies()->DependOnArraySpeciesProtector()) return NoChange();
+ if (!dependencies()->DependOnArraySpeciesProtector())
+ return inference.NoChange();
if (IsHoleyElementsKind(kind)) {
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ bool const stability_dependency = inference.RelyOnMapsPreferStability(
+ dependencies(), jsgraph(), &effect, control, p.feedback());
Node* k = jsgraph()->ZeroConstant();
-
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
@@ -2335,25 +2278,21 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
original_length};
const int stack_parameters = arraysize(checkpoint_params);
-
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArrayEveryLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
-
effect =
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
}
- // Make sure the map hasn't changed during the iteration.
- effect =
- graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
- receiver_maps, p.feedback()),
- receiver, effect, control);
+ // Deopt if the map has changed during the iteration.
+ if (!stability_dependency) {
+ inference.InsertMapChecks(jsgraph(), &effect, control, p.feedback());
+ }
Node* element =
SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
-
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
@@ -2524,23 +2463,19 @@ Reduction JSCallReducer::ReduceArrayIndexOfIncludes(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
if (IsHoleyElementsKind(kind)) {
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
Callable const callable = search_variant == SearchVariant::kIndexOf
? GetCallableForArrayIndexOf(kind, isolate())
@@ -2601,7 +2536,6 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- // Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
? NodeProperties::GetValueInput(node, 2)
@@ -2609,27 +2543,25 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
Node* this_arg = node->op()->ValueInputCount() > 3
? NodeProperties::GetValueInput(node, 3)
: jsgraph()->UndefinedConstant();
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // Try to determine the {receiver} map.
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayIteratingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
- if (!dependencies()->DependOnArraySpeciesProtector()) return NoChange();
+ if (!dependencies()->DependOnArraySpeciesProtector())
+ return inference.NoChange();
if (IsHoleyElementsKind(kind)) {
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
+ bool const stability_dependency = inference.RelyOnMapsPreferStability(
+ dependencies(), jsgraph(), &effect, control, p.feedback());
Node* k = jsgraph()->ZeroConstant();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
-
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
effect, control);
@@ -2676,25 +2608,21 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
Node* checkpoint_params[] = {receiver, fncallback, this_arg, k,
original_length};
const int stack_parameters = arraysize(checkpoint_params);
-
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
jsgraph(), shared, Builtins::kArraySomeLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
-
effect =
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
}
- // Make sure the map hasn't changed during the iteration.
- effect =
- graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
- receiver_maps, p.feedback()),
- receiver, effect, control);
+ // Deopt if the map has changed during the iteration.
+ if (!stability_dependency) {
+ inference.InsertMapChecks(jsgraph(), &effect, control, p.feedback());
+ }
Node* element =
SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
-
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
@@ -2810,6 +2738,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int const argc = static_cast<int>(p.arity()) - 2;
+ Node* target = NodeProperties::GetValueInput(node, 0);
Node* global_proxy =
jsgraph()->Constant(native_context().global_proxy_object());
Node* receiver = (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
@@ -2818,6 +2747,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Node* holder;
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
// See if we can optimize this API call to {shared}.
Handle<FunctionTemplateInfo> function_template_info(
@@ -2826,11 +2757,10 @@ Reduction JSCallReducer::ReduceCallApiFunction(
if (!call_optimization.is_simple_api_call()) return NoChange();
// Try to infer the {receiver} maps from the graph.
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result != NodeProperties::kNoReceiverMaps) {
+ MapInference inference(broker(), receiver, effect);
+ if (inference.HaveMaps()) {
+ MapHandles const& receiver_maps = inference.GetMaps();
+
// Check that all {receiver_maps} are actually JSReceiver maps and
// that the {function_template_info} accepts them without access
// checks (even if "access check needed" is set for {receiver}).
@@ -2855,7 +2785,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
if (!receiver_map.IsJSReceiverMap() ||
(receiver_map.is_access_check_needed() &&
!function_template_info->accept_any_receiver())) {
- return NoChange();
+ return inference.NoChange();
}
}
@@ -2863,21 +2793,34 @@ Reduction JSCallReducer::ReduceCallApiFunction(
CallOptimization::HolderLookup lookup;
Handle<JSObject> api_holder =
call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup);
- if (lookup == CallOptimization::kHolderNotFound) return NoChange();
+ if (lookup == CallOptimization::kHolderNotFound)
+ return inference.NoChange();
for (size_t i = 1; i < receiver_maps.size(); ++i) {
CallOptimization::HolderLookup lookupi;
Handle<JSObject> holderi = call_optimization.LookupHolderOfExpectedType(
receiver_maps[i], &lookupi);
- if (lookup != lookupi) return NoChange();
- if (!api_holder.is_identical_to(holderi)) return NoChange();
+ if (lookup != lookupi) return inference.NoChange();
+ if (!api_holder.is_identical_to(holderi)) return inference.NoChange();
}
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation &&
+ !inference.RelyOnMapsViaStability(dependencies())) {
+ // We were not able to make the receiver maps reliable without map checks
+ // but doing map checks would lead to deopt loops, so give up.
+ return inference.NoChange();
+ }
+
+ // TODO(neis): The maps were used in a way that does not actually require
+ // map checks or stability dependencies.
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
+
// Determine the appropriate holder for the {lookup}.
holder = lookup == CallOptimization::kHolderFound
? jsgraph()->HeapConstant(api_holder)
: receiver;
} else if (function_template_info->accept_any_receiver() &&
- function_template_info->signature()->IsUndefined(isolate())) {
+ function_template_info->signature().IsUndefined(isolate())) {
// We haven't found any {receiver_maps}, but we might still be able to
// optimize the API call depending on the {function_template_info}.
// If the API function accepts any kind of {receiver}, we only need to
@@ -2903,7 +2846,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
// faster than the generic call sequence.
Builtins::Name builtin_name =
!function_template_info->accept_any_receiver()
- ? (function_template_info->signature()->IsUndefined(isolate())
+ ? (function_template_info->signature().IsUndefined(isolate())
? Builtins::kCallFunctionTemplate_CheckAccess
: Builtins::
kCallFunctionTemplate_CheckAccessAndCompatibleReceiver)
@@ -2944,6 +2887,10 @@ Reduction JSCallReducer::ReduceCallApiFunction(
ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
ExternalReference function_reference = ExternalReference::Create(
&api_function, ExternalReference::DIRECT_API_CALL);
+
+ Node* continuation_frame_state = CreateGenericLazyDeoptContinuationFrameState(
+ jsgraph(), shared, target, context, receiver, frame_state);
+
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(call_api_callback.code()));
node->ReplaceInput(1, jsgraph()->ExternalConstant(function_reference));
@@ -2951,6 +2898,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(data));
node->InsertInput(graph()->zone(), 4, holder);
node->ReplaceInput(5, receiver); // Update receiver input.
+ node->ReplaceInput(7 + argc, continuation_frame_state);
node->ReplaceInput(8 + argc, effect); // Update effect input.
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
@@ -3623,6 +3571,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceStringPrototypeSlice(node);
case Builtins::kStringPrototypeSubstr:
return ReduceStringPrototypeSubstr(node);
+ case Builtins::kStringPrototypeStartsWith:
+ return ReduceStringPrototypeStartsWith(node);
#ifdef V8_INTL_SUPPORT
case Builtins::kStringPrototypeToLowerCaseIntl:
return ReduceStringPrototypeToLowerCaseIntl(node);
@@ -4283,21 +4233,21 @@ Reduction JSCallReducer::ReduceReturnReceiver(Node* node) {
Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
DeoptimizeReason reason) {
- if (flags() & kBailoutOnUninitialized) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
- frame_state, effect, control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- node->TrimInputCount(0);
- NodeProperties::ChangeOp(node, common()->Dead());
- return Changed(node);
- }
- return NoChange();
+ if (!(flags() & kBailoutOnUninitialized)) return NoChange();
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state =
+ NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
+ return Changed(node);
}
// ES6 section 22.1.3.18 Array.prototype.push ( )
@@ -4313,22 +4263,17 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind, true)) {
- return NoChange();
+ return inference.NoChange();
}
-
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
// Collect the value inputs to push.
std::vector<Node*> values(num_values);
@@ -4411,22 +4356,17 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
// Load the "length" property of the {receiver}.
Node* length = effect = graph()->NewNode(
@@ -4514,22 +4454,17 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
ElementsKind kind;
if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
- return NoChange();
+ return inference.NoChange();
}
-
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
// Load length of the {receiver}.
Node* length = effect = graph()->NewNode(
@@ -4708,12 +4643,9 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
return NoChange();
}
- // Try to determine the {receiver} maps.
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
// Check that the maps are of JSArray (and more).
// TODO(turbofan): Consider adding special case for the common pattern
@@ -4721,20 +4653,20 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
bool can_be_holey = false;
for (Handle<Map> map : receiver_maps) {
MapRef receiver_map(broker(), map);
- if (!receiver_map.supports_fast_array_iteration()) return NoChange();
-
+ if (!receiver_map.supports_fast_array_iteration())
+ return inference.NoChange();
if (IsHoleyElementsKind(receiver_map.elements_kind())) {
can_be_holey = true;
}
}
- if (!dependencies()->DependOnArraySpeciesProtector()) return NoChange();
+ if (!dependencies()->DependOnArraySpeciesProtector())
+ return inference.NoChange();
if (can_be_holey) {
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
// TODO(turbofan): We can do even better here, either adding a CloneArray
// simplified operator, whose output type indicates that it's an Array,
@@ -4791,15 +4723,9 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
Node* control = NodeProperties::GetControlInput(node);
// Check if we know that {receiver} is a valid JSReceiver.
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSReceiverMap()) return NoChange();
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAreJSReceiver()) {
+ return NoChange();
}
// Morph the {node} into a JSCreateArrayIterator with the given {kind}.
@@ -4813,22 +4739,6 @@ Reduction JSCallReducer::ReduceArrayIterator(Node* node, IterationKind kind) {
return Changed(node);
}
-namespace {
-
-bool InferIteratedObjectMaps(JSHeapBroker* broker, Node* iterator,
- ZoneHandleSet<Map>* iterated_object_maps) {
- DCHECK_EQ(IrOpcode::kJSCreateArrayIterator, iterator->opcode());
- Node* iterated_object = NodeProperties::GetValueInput(iterator, 0);
- Node* effect = NodeProperties::GetEffectInput(iterator);
-
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker, iterated_object, effect,
- iterated_object_maps);
- return result != NodeProperties::kNoReceiverMaps;
-}
-
-} // namespace
-
// ES #sec-%arrayiteratorprototype%.next
Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -4842,58 +4752,48 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
return NoChange();
}
- // Check if the {iterator} is a JSCreateArrayIterator.
if (iterator->opcode() != IrOpcode::kJSCreateArrayIterator) return NoChange();
+
IterationKind const iteration_kind =
CreateArrayIteratorParametersOf(iterator->op()).kind();
+ Node* iterated_object = NodeProperties::GetValueInput(iterator, 0);
+ Node* iterator_effect = NodeProperties::GetEffectInput(iterator);
- // Try to infer the [[IteratedObject]] maps from the {iterator}.
- ZoneHandleSet<Map> iterated_object_maps;
- if (!InferIteratedObjectMaps(broker(), iterator, &iterated_object_maps)) {
- return NoChange();
- }
- DCHECK_NE(0, iterated_object_maps.size());
+ MapInference inference(broker(), iterated_object, iterator_effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& iterated_object_maps = inference.GetMaps();
// Check that various {iterated_object_maps} have compatible elements kinds.
ElementsKind elements_kind =
MapRef(broker(), iterated_object_maps[0]).elements_kind();
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ if (IsTypedArrayElementsKind(elements_kind)) {
// TurboFan doesn't support loading from BigInt typed arrays yet.
if (elements_kind == BIGUINT64_ELEMENTS ||
elements_kind == BIGINT64_ELEMENTS) {
- return NoChange();
+ return inference.NoChange();
}
for (Handle<Map> map : iterated_object_maps) {
MapRef iterated_object_map(broker(), map);
if (iterated_object_map.elements_kind() != elements_kind) {
- return NoChange();
+ return inference.NoChange();
}
}
} else {
if (!CanInlineArrayIteratingBuiltin(broker(), iterated_object_maps,
&elements_kind)) {
- return NoChange();
+ return inference.NoChange();
}
}
- // Install code dependency on the array protector for holey arrays.
if (IsHoleyElementsKind(elements_kind)) {
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
}
+ // Since the map inference was done relative to {iterator_effect} rather than
+ // {effect}, we need to guard the use of the map(s) even when the inference
+ // was reliable.
+ inference.InsertMapChecks(jsgraph(), &effect, control, p.feedback());
- // Load the (current) {iterated_object} from the {iterator}.
- Node* iterated_object = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayIteratorIteratedObject()),
- iterator, effect, control);
-
- // Ensure that the {iterated_object} map didn't change.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, iterated_object_maps,
- p.feedback()),
- iterated_object, effect, control);
-
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ if (IsTypedArrayElementsKind(elements_kind)) {
// See if we can skip the detaching check.
if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
// Bail out if the {iterated_object}s JSArrayBuffer was detached.
@@ -4921,9 +4821,9 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// {iterated_object} is either a JSArray or a JSTypedArray. For the
// latter case we even know that it's a Smi in UnsignedSmall range.
FieldAccess index_access = AccessBuilder::ForJSArrayIteratorNextIndex();
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ if (IsTypedArrayElementsKind(elements_kind)) {
index_access.type = TypeCache::Get()->kJSTypedArrayLengthType;
- index_access.machine_type = MachineType::TaggedSigned();
+ index_access.machine_type = MachineType::TypeCompressedTaggedSigned();
index_access.write_barrier_kind = kNoWriteBarrier;
} else {
index_access.type = TypeCache::Get()->kJSArrayLengthType;
@@ -4945,7 +4845,7 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// already know something about the length here, which we can leverage
// to generate Word32 operations below without additional checking.
FieldAccess length_access =
- IsFixedTypedArrayElementsKind(elements_kind)
+ IsTypedArrayElementsKind(elements_kind)
? AccessBuilder::ForJSTypedArrayLength()
: AccessBuilder::ForJSArrayLength(elements_kind);
Node* length = effect = graph()->NewNode(
@@ -4975,15 +4875,15 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
DCHECK(iteration_kind == IterationKind::kEntries ||
iteration_kind == IterationKind::kValues);
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
- Node* base_ptr = etrue = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
- elements, etrue, if_true);
+ if (IsTypedArrayElementsKind(elements_kind)) {
+ Node* base_ptr = etrue =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSTypedArrayBasePointer()),
+ iterated_object, etrue, if_true);
Node* external_ptr = etrue = graph()->NewNode(
simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
- elements, etrue, if_true);
+ AccessBuilder::ForJSTypedArrayExternalPointer()),
+ iterated_object, etrue, if_true);
ExternalArrayType array_type = kExternalInt8Array;
switch (elements_kind) {
@@ -5052,7 +4952,7 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
done_false = jsgraph()->TrueConstant();
value_false = jsgraph()->UndefinedConstant();
- if (!IsFixedTypedArrayElementsKind(elements_kind)) {
+ if (!IsTypedArrayElementsKind(elements_kind)) {
// Mark the {iterator} as exhausted by setting the [[NextIndex]] to a
// value that will never pass the length check again (aka the maximum
// value possible for the specific iterated object). Note that this is
@@ -5125,6 +5025,84 @@ Reduction JSCallReducer::ReduceStringPrototypeStringAt(
return Replace(value);
}
+// ES section 21.1.3.20
+// String.prototype.startsWith ( searchString [ , position ] )
+Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
+ Node* string = NodeProperties::GetValueInput(node, 1);
+ Node* search_string = NodeProperties::GetValueInput(node, 2);
+ Node* position = node->op()->ValueInputCount() >= 4
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->ZeroConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ HeapObjectMatcher m(search_string);
+ if (m.HasValue()) {
+ ObjectRef target_ref = m.Ref(broker());
+ if (target_ref.IsString()) {
+ StringRef str = target_ref.AsString();
+ if (str.length() == 1) {
+ string = effect = graph()->NewNode(
+ simplified()->CheckString(p.feedback()), string, effect, control);
+ position = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), position, effect, control);
+
+ Node* string_length =
+ graph()->NewNode(simplified()->StringLength(), string);
+ Node* unsigned_position = graph()->NewNode(
+ simplified()->NumberMax(), position, jsgraph()->ZeroConstant());
+
+ Node* check = graph()->NewNode(simplified()->NumberLessThan(),
+ unsigned_position, string_length);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kNone),
+ check, control);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = jsgraph()->FalseConstant();
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue;
+ {
+ Node* masked_position =
+ graph()->NewNode(simplified()->PoisonIndex(), unsigned_position);
+ Node* string_first = etrue =
+ graph()->NewNode(simplified()->StringCharCodeAt(), string,
+ masked_position, etrue, if_true);
+
+ Node* search_first = jsgraph()->Constant(str.GetFirstChar());
+ vtrue = graph()->NewNode(simplified()->NumberEqual(), string_first,
+ search_first);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+ }
+
+ return NoChange();
+}
+
// ES section 21.1.3.1 String.prototype.charAt ( pos )
Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -5282,68 +5260,70 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- if (NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
- JS_STRING_ITERATOR_TYPE)) {
- Node* string = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
- receiver, effect, control);
- Node* index = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
- receiver, effect, control);
- Node* length = graph()->NewNode(simplified()->StringLength(), string);
-
- // branch0: if (index < length)
- Node* check0 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kNone), check0, control);
-
- Node* etrue0 = effect;
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* done_true;
- Node* vtrue0;
- {
- done_true = jsgraph()->FalseConstant();
- Node* codepoint = etrue0 = graph()->NewNode(
- simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string,
- index, etrue0, if_true0);
- vtrue0 = graph()->NewNode(
- simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF16),
- codepoint);
-
- // Update iterator.[[NextIndex]]
- Node* char_length =
- graph()->NewNode(simplified()->StringLength(), vtrue0);
- index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
- etrue0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
- receiver, index, etrue0, if_true0);
- }
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* done_false;
- Node* vfalse0;
- {
- vfalse0 = jsgraph()->UndefinedConstant();
- done_false = jsgraph()->TrueConstant();
- }
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() ||
+ !inference.AllOfInstanceTypesAre(JS_STRING_ITERATOR_TYPE)) {
+ return NoChange();
+ }
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, effect, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
- Node* done =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- done_true, done_false, control);
+ Node* string = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSStringIteratorString()),
+ receiver, effect, control);
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
+ receiver, effect, control);
+ Node* length = graph()->NewNode(simplified()->StringLength(), string);
- value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
- value, done, context, effect);
+ // branch0: if (index < length)
+ Node* check0 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kNone), check0, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ Node* etrue0 = effect;
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* done_true;
+ Node* vtrue0;
+ {
+ done_true = jsgraph()->FalseConstant();
+ Node* codepoint = etrue0 = graph()->NewNode(
+ simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string, index,
+ etrue0, if_true0);
+ vtrue0 = graph()->NewNode(
+ simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF16),
+ codepoint);
+
+ // Update iterator.[[NextIndex]]
+ Node* char_length = graph()->NewNode(simplified()->StringLength(), vtrue0);
+ index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
+ etrue0 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
+ receiver, index, etrue0, if_true0);
}
- return NoChange();
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* done_false;
+ Node* vfalse0;
+ {
+ vfalse0 = jsgraph()->UndefinedConstant();
+ done_false = jsgraph()->TrueConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, effect, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
+ vfalse0, control);
+ Node* done =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ done_true, done_false, control);
+
+ value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+ value, done, context, effect);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
// ES #sec-string.prototype.concat
@@ -5656,34 +5636,26 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Check if we know something about {receiver} already.
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
// Check whether all {receiver_maps} are JSPromise maps and
// have the initial Promise.prototype as their [[Prototype]].
for (Handle<Map> map : receiver_maps) {
MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return NoChange();
+ if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
receiver_map.SerializePrototype();
if (!receiver_map.prototype().equals(
native_context().promise_prototype())) {
- return NoChange();
+ return inference.NoChange();
}
}
- // Check that the Promise.then protector is intact. This protector guards
- // that all JSPromise instances whose [[Prototype]] is the initial
- // %PromisePrototype% yield the initial %PromisePrototype%.then method
- // when looking up "then".
- if (!dependencies()->DependOnPromiseThenProtector()) return NoChange();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ if (!dependencies()->DependOnPromiseThenProtector())
+ return inference.NoChange();
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
// Massage the {node} to call "then" instead by first removing all inputs
// following the onRejected parameter, and then filling up the parameters
@@ -5717,43 +5689,30 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
return NoChange();
}
- // Check if we know something about {receiver} already.
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
// Check whether all {receiver_maps} are JSPromise maps and
// have the initial Promise.prototype as their [[Prototype]].
for (Handle<Map> map : receiver_maps) {
MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return NoChange();
+ if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
receiver_map.SerializePrototype();
if (!receiver_map.prototype().equals(
native_context().promise_prototype())) {
- return NoChange();
+ return inference.NoChange();
}
}
- // Check that promises aren't being observed through (debug) hooks.
- if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
-
- // Check that the Promise#then protector is intact. This protector guards
- // that all JSPromise instances whose [[Prototype]] is the initial
- // %PromisePrototype% yield the initial %PromisePrototype%.then method
- // when looking up "then".
- if (!dependencies()->DependOnPromiseThenProtector()) return NoChange();
-
- // Also check that the @@species protector is intact, which guards the
- // lookup of "constructor" on JSPromise instances, whoch [[Prototype]] is
- // the initial %PromisePrototype%, and the Symbol.species lookup on the
- // %PromisePrototype%.
- if (!dependencies()->DependOnPromiseSpeciesProtector()) return NoChange();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ if (!dependencies()->DependOnPromiseHookProtector())
+ return inference.NoChange();
+ if (!dependencies()->DependOnPromiseThenProtector())
+ return inference.NoChange();
+ if (!dependencies()->DependOnPromiseSpeciesProtector())
+ return inference.NoChange();
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
// Check if {on_finally} is callable, and if so wrap it into appropriate
// closures that perform the finalization.
@@ -5823,8 +5782,12 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
// At this point we definitely know that {receiver} has one of the
// {receiver_maps}, so insert a MapGuard as a hint for the lowering
// of the call to "then" below.
- effect = graph()->NewNode(simplified()->MapGuard(receiver_maps), receiver,
- effect, control);
+ {
+ ZoneHandleSet<Map> maps;
+ for (Handle<Map> map : receiver_maps) maps.insert(map, graph()->zone());
+ effect = graph()->NewNode(simplified()->MapGuard(maps), receiver, effect,
+ control);
+ }
// Massage the {node} to call "then" instead by first removing all inputs
// following the onFinally parameter, and then replacing the only parameter
@@ -5865,37 +5828,28 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
- // Check if we know something about {receiver} already.
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
// Check whether all {receiver_maps} are JSPromise maps and
// have the initial Promise.prototype as their [[Prototype]].
for (Handle<Map> map : receiver_maps) {
MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return NoChange();
+ if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
receiver_map.SerializePrototype();
if (!receiver_map.prototype().equals(
native_context().promise_prototype())) {
- return NoChange();
+ return inference.NoChange();
}
}
- // Check that promises aren't being observed through (debug) hooks.
- if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
-
- // Check if the @@species protector is intact. The @@species protector
- // guards the "constructor" lookup on all JSPromise instances and the
- // initial Promise.prototype, as well as the Symbol.species lookup on
- // the Promise constructor.
- if (!dependencies()->DependOnPromiseSpeciesProtector()) return NoChange();
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, receiver_maps, p.feedback(), receiver, effect, control);
+ if (!dependencies()->DependOnPromiseHookProtector())
+ return inference.NoChange();
+ if (!dependencies()->DependOnPromiseSpeciesProtector())
+ return inference.NoChange();
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
// Check that {on_fulfilled} is callable.
on_fulfilled = graph()->NewNode(
@@ -5945,21 +5899,11 @@ Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Check if we know something about {receiver} already.
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult infer_receiver_maps_result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (infer_receiver_maps_result == NodeProperties::kNoReceiverMaps) {
+ // Only reduce when the receiver is guaranteed to be a JSReceiver.
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAreJSReceiver()) {
return NoChange();
}
- DCHECK_NE(0, receiver_maps.size());
-
- // Only reduce when all {receiver_maps} are JSReceiver maps.
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSReceiverMap()) return NoChange();
- }
// Morph the {node} into a JSPromiseResolve operation.
node->ReplaceInput(0, receiver);
@@ -6004,8 +5948,8 @@ Reduction JSCallReducer::ReduceTypedArrayConstructor(
Node* const parameters[] = {jsgraph()->TheHoleConstant()};
int const num_parameters = static_cast<int>(arraysize(parameters));
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), shared, Builtins::kGenericConstructorLazyDeoptContinuation,
- target, context, parameters, num_parameters, frame_state,
+ jsgraph(), shared, Builtins::kGenericLazyDeoptContinuation, target,
+ context, parameters, num_parameters, frame_state,
ContinuationFrameStateMode::LAZY);
Node* result =
@@ -6145,9 +6089,10 @@ Reduction JSCallReducer::ReduceMapPrototypeGet(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
- JS_MAP_TYPE))
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAre(JS_MAP_TYPE)) {
return NoChange();
+ }
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
@@ -6190,9 +6135,10 @@ Reduction JSCallReducer::ReduceMapPrototypeHas(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* key = NodeProperties::GetValueInput(node, 2);
- if (!NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
- JS_MAP_TYPE))
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAre(JS_MAP_TYPE)) {
return NoChange();
+ }
Node* table = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
@@ -6230,16 +6176,18 @@ Reduction JSCallReducer::ReduceCollectionIteration(
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(
- broker(), receiver, effect,
- InstanceTypeForCollectionKind(collection_kind))) {
- Node* js_create_iterator = effect = graph()->NewNode(
- javascript()->CreateCollectionIterator(collection_kind, iteration_kind),
- receiver, context, effect, control);
- ReplaceWithValue(node, js_create_iterator, effect);
- return Replace(js_create_iterator);
+
+ InstanceType type = InstanceTypeForCollectionKind(collection_kind);
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAre(type)) {
+ return NoChange();
}
- return NoChange();
+
+ Node* js_create_iterator = effect = graph()->NewNode(
+ javascript()->CreateCollectionIterator(collection_kind, iteration_kind),
+ receiver, context, effect, control);
+ ReplaceWithValue(node, js_create_iterator, effect);
+ return Replace(js_create_iterator);
}
Reduction JSCallReducer::ReduceCollectionPrototypeSize(
@@ -6248,20 +6196,22 @@ Reduction JSCallReducer::ReduceCollectionPrototypeSize(
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(
- broker(), receiver, effect,
- InstanceTypeForCollectionKind(collection_kind))) {
- Node* table = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
- receiver, effect, control);
- Node* value = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForOrderedHashMapOrSetNumberOfElements()),
- table, effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+
+ InstanceType type = InstanceTypeForCollectionKind(collection_kind);
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAre(type)) {
+ return NoChange();
}
- return NoChange();
+
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
+ effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashMapOrSetNumberOfElements()),
+ table, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
@@ -6269,6 +6219,11 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
InstanceType collection_iterator_instance_type_first,
InstanceType collection_iterator_instance_type_last) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -6286,23 +6241,23 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
// how to update the escape analysis / arrange the graph in a way that
// this becomes possible.
- // Infer the {receiver} instance type.
InstanceType receiver_instance_type;
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, receiver_maps.size());
- receiver_instance_type = receiver_maps[0]->instance_type();
- for (size_t i = 1; i < receiver_maps.size(); ++i) {
- if (receiver_maps[i]->instance_type() != receiver_instance_type) {
- return NoChange();
+ {
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& receiver_maps = inference.GetMaps();
+ receiver_instance_type = receiver_maps[0]->instance_type();
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ if (receiver_maps[i]->instance_type() != receiver_instance_type) {
+ return inference.NoChange();
+ }
}
- }
- if (receiver_instance_type < collection_iterator_instance_type_first ||
- receiver_instance_type > collection_iterator_instance_type_last) {
- return NoChange();
+ if (receiver_instance_type < collection_iterator_instance_type_first ||
+ receiver_instance_type > collection_iterator_instance_type_last) {
+ return inference.NoChange();
+ }
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
}
// Transition the JSCollectionIterator {receiver} if necessary
@@ -6570,42 +6525,44 @@ Reduction JSCallReducer::ReduceArrayBufferViewAccessor(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
- instance_type)) {
- // Load the {receiver}s field.
- Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
- receiver, effect, control);
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() ||
+ !inference.AllOfInstanceTypesAre(instance_type)) {
+ return NoChange();
+ }
- // See if we can skip the detaching check.
- if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
- // Check whether {receiver}s JSArrayBuffer was detached.
- Node* buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* buffer_bit_field = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
- buffer, effect, control);
- Node* check = graph()->NewNode(
- simplified()->NumberEqual(),
- graph()->NewNode(
- simplified()->NumberBitwiseAnd(), buffer_bit_field,
- jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
- jsgraph()->ZeroConstant());
+ // Load the {receiver}s field.
+ Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
+ receiver, effect, control);
- // TODO(turbofan): Ideally we would bail out here if the {receiver}s
- // JSArrayBuffer was detached, but there's no way to guard against
- // deoptimization loops right now, since the JSCall {node} is usually
- // created from a LOAD_IC inlining, and so there's no CALL_IC slot
- // from which we could use the speculation bit.
- value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
- check, value, jsgraph()->ZeroConstant());
- }
+ // See if we can skip the detaching check.
+ if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
+ // Check whether {receiver}s JSArrayBuffer was detached.
+ Node* buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* buffer_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
+ buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bit_field,
+ jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
+ jsgraph()->ZeroConstant());
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ // TODO(turbofan): Ideally we would bail out here if the {receiver}s
+ // JSArrayBuffer was detached, but there's no way to guard against
+ // deoptimization loops right now, since the JSCall {node} is usually
+ // created from a LOAD_IC inlining, and so there's no CALL_IC slot
+ // from which we could use the speculation bit.
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kTrue),
+ check, value, jsgraph()->ZeroConstant());
}
- return NoChange();
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
namespace {
@@ -6651,122 +6608,117 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
}
// Only do stuff if the {receiver} is really a DataView.
- if (NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
- JS_DATA_VIEW_TYPE)) {
- Node* byte_offset;
-
- // Check that the {offset} is within range for the {receiver}.
- HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
- // We only deal with DataViews here whose [[ByteLength]] is at least
- // {element_size}, as for all other DataViews it'll be out-of-bounds.
- JSDataViewRef dataview = m.Ref(broker()).AsJSDataView();
- if (dataview.byte_length() < element_size) return NoChange();
-
- // Check that the {offset} is within range of the {byte_length}.
- Node* byte_length =
- jsgraph()->Constant(dataview.byte_length() - (element_size - 1));
- offset = effect =
- graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
- byte_length, effect, control);
-
- // Load the [[ByteOffset]] from the {dataview}.
- byte_offset = jsgraph()->Constant(dataview.byte_offset());
- } else {
- // We only deal with DataViews here that have Smi [[ByteLength]]s.
- Node* byte_length = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteLength()),
- receiver, effect, control);
-
- if (element_size > 1) {
- // For non-byte accesses we also need to check that the {offset}
- // plus the {element_size}-1 fits within the given {byte_length}.
- // So to keep this as a single check on the {offset}, we subtract
- // the {element_size}-1 from the {byte_length} here (clamped to
- // positive safe integer range), and perform a check against that
- // with the {offset} below.
- byte_length = graph()->NewNode(
- simplified()->NumberMax(), jsgraph()->ZeroConstant(),
- graph()->NewNode(simplified()->NumberSubtract(), byte_length,
- jsgraph()->Constant(element_size - 1)));
- }
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() ||
+ !inference.AllOfInstanceTypesAre(JS_DATA_VIEW_TYPE)) {
+ return NoChange();
+ }
- // Check that the {offset} is within range of the {byte_length}.
- offset = effect =
- graph()->NewNode(simplified()->CheckBounds(p.feedback()), offset,
- byte_length, effect, control);
+ // Check that the {offset} is within range for the {receiver}.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ // We only deal with DataViews here whose [[ByteLength]] is at least
+ // {element_size}, as for all other DataViews it'll be out-of-bounds.
+ JSDataViewRef dataview = m.Ref(broker()).AsJSDataView();
+ if (dataview.byte_length() < element_size) return NoChange();
+
+ // Check that the {offset} is within range of the {byte_length}.
+ Node* byte_length =
+ jsgraph()->Constant(dataview.byte_length() - (element_size - 1));
+ offset = effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
+ offset, byte_length, effect, control);
+ } else {
+ // We only deal with DataViews here that have Smi [[ByteLength]]s.
+ Node* byte_length = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewByteLength()),
+ receiver, effect, control);
- // Also load the [[ByteOffset]] from the {receiver}.
- byte_offset = effect =
- graph()->NewNode(simplified()->LoadField(
- AccessBuilder::ForJSArrayBufferViewByteOffset()),
- receiver, effect, control);
+ if (element_size > 1) {
+ // For non-byte accesses we also need to check that the {offset}
+ // plus the {element_size}-1 fits within the given {byte_length}.
+ // So to keep this as a single check on the {offset}, we subtract
+ // the {element_size}-1 from the {byte_length} here (clamped to
+ // positive safe integer range), and perform a check against that
+ // with the {offset} below.
+ byte_length = graph()->NewNode(
+ simplified()->NumberMax(), jsgraph()->ZeroConstant(),
+ graph()->NewNode(simplified()->NumberSubtract(), byte_length,
+ jsgraph()->Constant(element_size - 1)));
}
- // Coerce {is_little_endian} to boolean.
- is_little_endian =
- graph()->NewNode(simplified()->ToBoolean(), is_little_endian);
+ // Check that the {offset} is within range of the {byte_length}.
+ offset = effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
+ offset, byte_length, effect, control);
+ }
- // Coerce {value} to Number.
- if (access == DataViewAccess::kSet) {
- value = effect = graph()->NewNode(
- simplified()->SpeculativeToNumber(
- NumberOperationHint::kNumberOrOddball, p.feedback()),
- value, effect, control);
- }
+ // Coerce {is_little_endian} to boolean.
+ is_little_endian =
+ graph()->NewNode(simplified()->ToBoolean(), is_little_endian);
+ // Coerce {value} to Number.
+ if (access == DataViewAccess::kSet) {
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
+ p.feedback()),
+ value, effect, control);
+ }
+
+ // We need to retain either the {receiver} itself or it's backing
+ // JSArrayBuffer to make sure that the GC doesn't collect the raw
+ // memory. We default to {receiver} here, and only use the buffer
+ // if we anyways have to load it (to reduce register pressure).
+ Node* buffer_or_receiver = receiver;
+
+ if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
// Get the underlying buffer and check that it has not been detached.
Node* buffer = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
receiver, effect, control);
- if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
- // Bail out if the {buffer} was detached.
- Node* buffer_bit_field = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
- buffer, effect, control);
- Node* check = graph()->NewNode(
- simplified()->NumberEqual(),
- graph()->NewNode(
- simplified()->NumberBitwiseAnd(), buffer_bit_field,
- jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
- jsgraph()->ZeroConstant());
- effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached,
- p.feedback()),
- check, effect, control);
- }
-
- // Get the buffer's backing store.
- Node* backing_store = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBackingStore()),
+ // Bail out if the {buffer} was detached.
+ Node* buffer_bit_field = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()),
buffer, effect, control);
+ Node* check = graph()->NewNode(
+ simplified()->NumberEqual(),
+ graph()->NewNode(
+ simplified()->NumberBitwiseAnd(), buffer_bit_field,
+ jsgraph()->Constant(JSArrayBuffer::WasDetachedBit::kMask)),
+ jsgraph()->ZeroConstant());
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached,
+ p.feedback()),
+ check, effect, control);
- switch (access) {
- case DataViewAccess::kGet:
- // Perform the load.
- value = effect =
- graph()->NewNode(simplified()->LoadDataViewElement(element_type),
- buffer, backing_store, byte_offset, offset,
- is_little_endian, effect, control);
- break;
- case DataViewAccess::kSet:
- // Perform the store.
- effect =
- graph()->NewNode(simplified()->StoreDataViewElement(element_type),
- buffer, backing_store, byte_offset, offset, value,
- is_little_endian, effect, control);
- value = jsgraph()->UndefinedConstant();
- break;
- }
+ // We can reduce register pressure by holding on to the {buffer}
+ // now to retain the backing store memory.
+ buffer_or_receiver = buffer;
+ }
- // Continue on the regular path.
- ReplaceWithValue(node, value, effect, control);
- return Changed(value);
+ // Load the {receiver}s data pointer.
+ Node* data_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSDataViewDataPointer()),
+ receiver, effect, control);
+
+ switch (access) {
+ case DataViewAccess::kGet:
+ // Perform the load.
+ value = effect = graph()->NewNode(
+ simplified()->LoadDataViewElement(element_type), buffer_or_receiver,
+ data_pointer, offset, is_little_endian, effect, control);
+ break;
+ case DataViewAccess::kSet:
+ // Perform the store.
+ effect = graph()->NewNode(
+ simplified()->StoreDataViewElement(element_type), buffer_or_receiver,
+ data_pointer, offset, value, is_little_endian, effect, control);
+ value = jsgraph()->UndefinedConstant();
+ break;
}
- return NoChange();
+ ReplaceWithValue(node, value, effect, control);
+ return Changed(value);
}
// ES6 section 18.2.2 isFinite ( number )
@@ -6824,15 +6776,17 @@ Reduction JSCallReducer::ReduceDatePrototypeGetTime(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (NodeProperties::HasInstanceTypeWitness(broker(), receiver, effect,
- JS_DATE_TYPE)) {
- Node* value = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSDateValue()), receiver,
- effect, control);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAre(JS_DATE_TYPE)) {
+ return NoChange();
}
- return NoChange();
+
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForJSDateValue()),
+ receiver, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
// ES6 section 20.3.3.1 Date.now ( )
@@ -6885,41 +6839,36 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* regexp = NodeProperties::GetValueInput(node, 1);
- // Check if we know something about the {regexp}.
- ZoneHandleSet<Map> regexp_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), regexp, effect, &regexp_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
-
- for (auto map : regexp_maps) {
- MapRef receiver_map(broker(), map);
- if (receiver_map.instance_type() != JS_REGEXP_TYPE) return NoChange();
+ MapInference inference(broker(), regexp, effect);
+ if (!inference.HaveMaps() ||
+ !inference.AllOfInstanceTypes(InstanceTypeChecker::IsJSRegExp)) {
+ return inference.NoChange();
}
+ MapHandles const& regexp_maps = inference.GetMaps();
// Compute property access info for "exec" on {resolution}.
+ ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
- PropertyAccessInfo ai_exec = access_info_factory.ComputePropertyAccessInfo(
+ access_info_factory.ComputePropertyAccessInfos(
MapHandles(regexp_maps.begin(), regexp_maps.end()),
- factory()->exec_string(), AccessMode::kLoad);
- if (ai_exec.IsInvalid()) return NoChange();
+ factory()->exec_string(), AccessMode::kLoad, &access_infos);
+ PropertyAccessInfo ai_exec =
+ access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
+ AccessMode::kLoad);
+ if (ai_exec.IsInvalid()) return inference.NoChange();
// If "exec" has been modified on {regexp}, we can't do anything.
if (ai_exec.IsDataConstant()) {
- if (!ai_exec.constant().is_identical_to(
- isolate()->regexp_exec_function())) {
- return NoChange();
- }
- } else if (ai_exec.IsDataConstantField()) {
Handle<JSObject> holder;
// Do not reduce if the exec method is not on the prototype chain.
- if (!ai_exec.holder().ToHandle(&holder)) return NoChange();
+ if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange();
// Bail out if the exec method is not the original one.
Handle<Object> constant = JSObject::FastPropertyAt(
- holder, Representation::Tagged(), ai_exec.field_index());
+ holder, ai_exec.field_representation(), ai_exec.field_index());
if (!constant.is_identical_to(isolate()->regexp_exec_function())) {
- return NoChange();
+ return inference.NoChange();
}
// Protect the exec method change in the holder.
@@ -6933,11 +6882,9 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
holder_map.SerializeOwnDescriptors();
dependencies()->DependOnFieldType(holder_map, descriptor_index);
} else {
- return NoChange();
+ return inference.NoChange();
}
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
-
// Add proper dependencies on the {regexp}s [[Prototype]]s.
Handle<JSObject> holder;
if (ai_exec.holder().ToHandle(&holder)) {
@@ -6945,9 +6892,8 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
ai_exec.receiver_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
}
-
- effect = InsertMapChecksIfUnreliableReceiverMaps(
- result, regexp_maps, p.feedback(), regexp, effect, control);
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
@@ -6998,9 +6944,8 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
int stack_parameter_count = arraysize(stack_parameters);
Node* continuation_frame_state =
CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), shared_info,
- Builtins::kGenericConstructorLazyDeoptContinuation, target, context,
- stack_parameters, stack_parameter_count, frame_state,
+ jsgraph(), shared_info, Builtins::kGenericLazyDeoptContinuation,
+ target, context, stack_parameters, stack_parameter_count, frame_state,
ContinuationFrameStateMode::LAZY);
// Convert the {value} to a Number.
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index e0a7340a36..02821ebb0d 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -9,7 +9,7 @@
#include "src/compiler/frame-states.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/node-properties.h"
-#include "src/deoptimize-reason.h"
+#include "src/deoptimizer/deoptimize-reason.h"
namespace v8 {
namespace internal {
@@ -122,6 +122,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceStringPrototypeStringAt(
const Operator* string_access_operator, Node* node);
Reduction ReduceStringPrototypeCharAt(Node* node);
+ Reduction ReduceStringPrototypeStartsWith(Node* node);
#ifdef V8_INTL_SUPPORT
Reduction ReduceStringPrototypeToLowerCaseIntl(Node* node);
@@ -190,11 +191,6 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceNumberConstructor(Node* node);
- Node* InsertMapChecksIfUnreliableReceiverMaps(
- NodeProperties::InferReceiverMapsResult result,
- ZoneHandleSet<Map> const& receiver_maps, VectorSlotPair const& feedback,
- Node* receiver, Node* effect, Node* control);
-
// Returns the updated {to} node, and updates control and effect along the
// way.
Node* DoFilterPostCallbackWork(ElementsKind kind, Node** control,
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 5455155050..dea6d7fc2b 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -10,7 +10,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-#include "src/contexts-inl.h"
+#include "src/objects/contexts-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index 2c5847639a..14a72a70b5 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
#include "src/compiler/graph-reducer.h"
-#include "src/maybe-handles.h"
+#include "src/handles/maybe-handles.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 18d59d5ed2..8fc8dd1308 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -4,7 +4,7 @@
#include "src/compiler/js-create-lowering.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder-inl.h"
#include "src/compiler/common-operator.h"
@@ -18,7 +18,6 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/state-values-utils.h"
-#include "src/objects-inl.h"
#include "src/objects/arguments.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number.h"
@@ -26,6 +25,7 @@
#include "src/objects/js-generator.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -901,7 +901,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
SharedFunctionInfoRef shared(broker(), p.shared_info());
- HeapObjectRef feedback_cell(broker(), p.feedback_cell());
+ FeedbackCellRef feedback_cell(broker(), p.feedback_cell());
HeapObjectRef code(broker(), p.code());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1616,17 +1616,39 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
DCHECK_EQ(kData, property_details.kind());
NameRef property_name = boilerplate_map.GetPropertyKey(i);
FieldIndex index = boilerplate_map.GetFieldIndexFor(i);
- FieldAccess access = {
- kTaggedBase, index.offset(), property_name.object(),
- MaybeHandle<Map>(), Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase,
+ index.offset(),
+ property_name.object(),
+ MaybeHandle<Map>(),
+ Type::Any(),
+ MachineType::TypeCompressedTagged(),
+ kFullWriteBarrier,
+ LoadSensitivity::kUnsafe,
+ property_details.constness()};
Node* value;
if (boilerplate_map.IsUnboxedDoubleField(i)) {
access.machine_type = MachineType::Float64();
access.type = Type::Number();
- value = jsgraph()->Constant(boilerplate.RawFastDoublePropertyAt(index));
+ uint64_t value_bits = boilerplate.RawFastDoublePropertyAsBitsAt(index);
+ if (value_bits == kHoleNanInt64) {
+ // This special case is analogous to is_uninitialized being true in the
+ // non-unboxed-double case below. The store of the hole NaN value here
+ // will always be followed by another store that actually initializes
+ // the field. The hole NaN should therefore be unobservable.
+ // Load elimination expects there to be at most one const store to any
+ // given field, so we always mark the unobservable ones as mutable.
+ access.constness = PropertyConstness::kMutable;
+ }
+ value = jsgraph()->Constant(bit_cast<double>(value_bits));
} else {
ObjectRef boilerplate_value = boilerplate.RawFastPropertyAt(index);
+ bool is_uninitialized =
+ boilerplate_value.IsHeapObject() &&
+ boilerplate_value.AsHeapObject().map().oddball_type() ==
+ OddballType::kUninitialized;
+ if (is_uninitialized) {
+ access.constness = PropertyConstness::kMutable;
+ }
if (boilerplate_value.IsJSObject()) {
JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
value = effect = AllocateFastLiteral(effect, control,
@@ -1643,10 +1665,6 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
value = effect = builder.Finish();
} else if (property_details.representation().IsSmi()) {
// Ensure that value is stored as smi.
- bool is_uninitialized =
- boilerplate_value.IsHeapObject() &&
- boilerplate_value.AsHeapObject().map().oddball_type() ==
- OddballType::kUninitialized;
value = is_uninitialized
? jsgraph()->ZeroConstant()
: jsgraph()->Constant(boilerplate_value.AsSmi());
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 021da52c68..44a3b213b7 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -6,8 +6,8 @@
#define V8_COMPILER_JS_CREATE_LOWERING_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index fb477eac5e..0a6f90975f 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -6,15 +6,15 @@
#include "src/ast/ast.h"
#include "src/builtins/builtins-constructor.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/feedback-vector.h"
#include "src/objects/feedback-cell.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/scope-info.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 2c70a1e1f5..2a395ca5e8 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -4,7 +4,7 @@
#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
#define V8_COMPILER_JS_GENERIC_LOWERING_H_
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/linkage.h"
#include "src/compiler/opcodes.h"
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 74275bde9e..a3805ec125 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -4,10 +4,10 @@
#include "src/compiler/js-graph.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/typer.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index fb769a402e..b5c80515ad 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -5,13 +5,13 @@
#ifndef V8_COMPILER_JS_GRAPH_H_
#define V8_COMPILER_JS_GRAPH_H_
+#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/machine-graph.h"
#include "src/compiler/node-properties.h"
-#include "src/globals.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 77fbe92eab..86250e9d1f 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -8,14 +8,14 @@
#include <algorithm>
#endif
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/modules.h"
-#include "src/bootstrapper.h"
-#include "src/boxed-float.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
+#include "src/compiler/access-info.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/per-isolate-compiler-cache.h"
-#include "src/objects-inl.h"
+#include "src/compiler/vector-slot-pair.h"
+#include "src/init/bootstrapper.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell-inl.h"
@@ -25,9 +25,10 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/templates.h"
-#include "src/utils.h"
-#include "src/vector-slot-pair.h"
+#include "src/utils/boxed-float.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -165,26 +166,6 @@ void JSHeapBroker::IncrementTracingIndentation() { ++trace_indentation_; }
void JSHeapBroker::DecrementTracingIndentation() { --trace_indentation_; }
-class TraceScope {
- public:
- TraceScope(JSHeapBroker* broker, const char* label)
- : TraceScope(broker, static_cast<void*>(broker), label) {}
-
- TraceScope(JSHeapBroker* broker, ObjectData* data, const char* label)
- : TraceScope(broker, static_cast<void*>(data), label) {}
-
- ~TraceScope() { broker_->DecrementTracingIndentation(); }
-
- private:
- JSHeapBroker* const broker_;
-
- TraceScope(JSHeapBroker* broker, void* self, const char* label)
- : broker_(broker) {
- TRACE(broker_, "Running " << label << " on " << self);
- broker_->IncrementTracingIndentation();
- }
-};
-
PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
Handle<PropertyCell> object)
: HeapObjectData(broker, storage, object),
@@ -227,9 +208,13 @@ void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
class JSObjectField {
public:
bool IsDouble() const { return object_ == nullptr; }
+ uint64_t AsBitsOfDouble() const {
+ CHECK(IsDouble());
+ return number_bits_;
+ }
double AsDouble() const {
CHECK(IsDouble());
- return number_;
+ return bit_cast<double>(number_bits_);
}
bool IsObject() const { return object_ != nullptr; }
@@ -238,12 +223,12 @@ class JSObjectField {
return object_;
}
- explicit JSObjectField(double value) : number_(value) {}
+ explicit JSObjectField(uint64_t value_bits) : number_bits_(value_bits) {}
explicit JSObjectField(ObjectData* value) : object_(value) {}
private:
ObjectData* object_ = nullptr;
- double number_ = 0;
+ uint64_t number_bits_ = 0;
};
class JSObjectData : public HeapObjectData {
@@ -301,8 +286,8 @@ void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "JSObjectData::SerializeObjectCreateMap");
Handle<JSObject> jsobject = Handle<JSObject>::cast(object());
- if (jsobject->map()->is_prototype_map()) {
- Handle<Object> maybe_proto_info(jsobject->map()->prototype_info(),
+ if (jsobject->map().is_prototype_map()) {
+ Handle<Object> maybe_proto_info(jsobject->map().prototype_info(),
broker->isolate());
if (maybe_proto_info->IsPrototypeInfo()) {
auto proto_info = Handle<PrototypeInfo>::cast(maybe_proto_info);
@@ -354,8 +339,8 @@ class JSTypedArrayData : public JSObjectData {
Handle<JSTypedArray> object);
bool is_on_heap() const { return is_on_heap_; }
- size_t length_value() const { return length_value_; }
- void* elements_external_pointer() const { return elements_external_pointer_; }
+ size_t length() const { return length_; }
+ void* external_pointer() const { return external_pointer_; }
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
@@ -364,8 +349,8 @@ class JSTypedArrayData : public JSObjectData {
private:
bool const is_on_heap_;
- size_t const length_value_;
- void* const elements_external_pointer_;
+ size_t const length_;
+ void* const external_pointer_;
bool serialized_ = false;
HeapObjectData* buffer_ = nullptr;
@@ -375,9 +360,8 @@ JSTypedArrayData::JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSTypedArray> object)
: JSObjectData(broker, storage, object),
is_on_heap_(object->is_on_heap()),
- length_value_(object->length_value()),
- elements_external_pointer_(
- FixedTypedArrayBase::cast(object->elements())->external_pointer()) {}
+ length_(object->length()),
+ external_pointer_(object->external_pointer()) {}
void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
@@ -724,14 +708,14 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
// TODO(turbofan): Do we want to support out-of-object properties?
if (!(boilerplate->HasFastProperties() &&
- boilerplate->property_array()->length() == 0)) {
+ boilerplate->property_array().length() == 0)) {
return false;
}
// Check the in-object properties.
- Handle<DescriptorArray> descriptors(
- boilerplate->map()->instance_descriptors(), isolate);
- int limit = boilerplate->map()->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
+ isolate);
+ int limit = boilerplate->map().NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
@@ -853,11 +837,11 @@ class MapData : public HeapObjectData {
return elements_kind_generalizations_;
}
- // Serialize the own part of the descriptor array and, recursively, that of
- // any field owner.
+ // Serialize a single (or all) own slot(s) of the descriptor array and recurse
+ // on field owner(s).
+ void SerializeOwnDescriptor(JSHeapBroker* broker, int descriptor_index);
void SerializeOwnDescriptors(JSHeapBroker* broker);
DescriptorArrayData* instance_descriptors() const {
- CHECK(serialized_own_descriptors_);
return instance_descriptors_;
}
@@ -973,15 +957,15 @@ bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
Handle<Name> length_string = isolate->factory()->length_string();
DescriptorArray descriptors = jsarray_map->instance_descriptors();
- int number = descriptors->Search(*length_string, *jsarray_map);
+ int number = descriptors.Search(*length_string, *jsarray_map);
DCHECK_NE(DescriptorArray::kNotFound, number);
- return descriptors->GetDetails(number).IsReadOnly();
+ return descriptors.GetDetails(number).IsReadOnly();
}
bool SupportsFastArrayIteration(Isolate* isolate, Handle<Map> map) {
return map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(map->elements_kind()) &&
- map->prototype()->IsJSArray() &&
+ map->prototype().IsJSArray() &&
isolate->IsAnyInitialArrayPrototype(
handle(JSArray::cast(map->prototype()), isolate)) &&
isolate->IsNoElementsProtectorIntact();
@@ -1101,12 +1085,28 @@ class DescriptorArrayData : public HeapObjectData {
Handle<DescriptorArray> object)
: HeapObjectData(broker, storage, object), contents_(broker->zone()) {}
- ZoneVector<PropertyDescriptor>& contents() { return contents_; }
+ ZoneMap<int, PropertyDescriptor>& contents() { return contents_; }
private:
- ZoneVector<PropertyDescriptor> contents_;
+ ZoneMap<int, PropertyDescriptor> contents_;
};
+class FeedbackCellData : public HeapObjectData {
+ public:
+ FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FeedbackCell> object);
+
+ HeapObjectData* value() const { return value_; }
+
+ private:
+ HeapObjectData* const value_;
+};
+
+FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FeedbackCell> object)
+ : HeapObjectData(broker, storage, object),
+ value_(broker->GetOrCreateData(object->value())->AsHeapObject()) {}
+
class FeedbackVectorData : public HeapObjectData {
public:
const ZoneVector<ObjectData*>& feedback() { return feedback_; }
@@ -1277,14 +1277,23 @@ void FixedDoubleArrayData::SerializeContents(JSHeapBroker* broker) {
class BytecodeArrayData : public FixedArrayBaseData {
public:
int register_count() const { return register_count_; }
+ int parameter_count() const { return parameter_count_; }
+ interpreter::Register incoming_new_target_or_generator_register() const {
+ return incoming_new_target_or_generator_register_;
+ }
BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<BytecodeArray> object)
: FixedArrayBaseData(broker, storage, object),
- register_count_(object->register_count()) {}
+ register_count_(object->register_count()),
+ parameter_count_(object->parameter_count()),
+ incoming_new_target_or_generator_register_(
+ object->incoming_new_target_or_generator_register()) {}
private:
int const register_count_;
+ int const parameter_count_;
+ interpreter::Register const incoming_new_target_or_generator_register_;
};
class JSArrayData : public JSObjectData {
@@ -1407,7 +1416,7 @@ SharedFunctionInfoData::SharedFunctionInfoData(
void SharedFunctionInfoData::SetSerializedForCompilation(
JSHeapBroker* broker, FeedbackVectorRef feedback) {
CHECK(serialized_for_compilation_.insert(feedback.object()).second);
- TRACE(broker, "Set function " << object() << " with " << feedback.object()
+ TRACE(broker, "Set function " << this << " with " << feedback
<< " as serialized for compilation");
}
@@ -1448,7 +1457,6 @@ CellData* ModuleData::GetCell(int cell_index) const {
break;
case ModuleDescriptor::kInvalid:
UNREACHABLE();
- break;
}
CHECK_NOT_NULL(cell);
return cell;
@@ -1658,52 +1666,55 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptors");
Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(instance_descriptors_);
- instance_descriptors_ =
- broker->GetOrCreateData(map->instance_descriptors())->AsDescriptorArray();
-
int const number_of_own = map->NumberOfOwnDescriptors();
- ZoneVector<PropertyDescriptor>& contents = instance_descriptors_->contents();
- int const current_size = static_cast<int>(contents.size());
- if (number_of_own <= current_size) return;
+ for (int i = 0; i < number_of_own; ++i) {
+ SerializeOwnDescriptor(broker, i);
+ }
+}
+
+void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
+ int descriptor_index) {
+ TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
+ Handle<Map> map = Handle<Map>::cast(object());
+
+ if (instance_descriptors_ == nullptr) {
+ instance_descriptors_ = broker->GetOrCreateData(map->instance_descriptors())
+ ->AsDescriptorArray();
+ }
+
+ ZoneMap<int, PropertyDescriptor>& contents =
+ instance_descriptors_->contents();
+ CHECK_LT(descriptor_index, map->NumberOfOwnDescriptors());
+ if (contents.find(descriptor_index) != contents.end()) return;
Isolate* const isolate = broker->isolate();
auto descriptors =
Handle<DescriptorArray>::cast(instance_descriptors_->object());
CHECK_EQ(*descriptors, map->instance_descriptors());
- contents.reserve(number_of_own);
-
- // Copy the new descriptors.
- for (int i = current_size; i < number_of_own; ++i) {
- PropertyDescriptor d;
- d.key = broker->GetOrCreateData(descriptors->GetKey(i))->AsName();
- d.details = descriptors->GetDetails(i);
- if (d.details.location() == kField) {
- d.field_index = FieldIndex::ForDescriptor(*map, i);
- d.field_owner =
- broker->GetOrCreateData(map->FindFieldOwner(isolate, i))->AsMap();
- d.field_type = broker->GetOrCreateData(descriptors->GetFieldType(i));
- d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index);
- // Recurse.
- }
- contents.push_back(d);
- }
- CHECK_EQ(number_of_own, contents.size());
-
- // Recurse on the new owner maps.
- for (int i = current_size; i < number_of_own; ++i) {
- const PropertyDescriptor& d = contents[i];
- if (d.details.location() == kField) {
- CHECK_LE(
- Handle<Map>::cast(d.field_owner->object())->NumberOfOwnDescriptors(),
- number_of_own);
- d.field_owner->SerializeOwnDescriptors(broker);
- }
+
+ PropertyDescriptor d;
+ d.key =
+ broker->GetOrCreateData(descriptors->GetKey(descriptor_index))->AsName();
+ d.details = descriptors->GetDetails(descriptor_index);
+ if (d.details.location() == kField) {
+ d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
+ d.field_owner =
+ broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index))
+ ->AsMap();
+ d.field_type =
+ broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
+ d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index);
}
+ contents[descriptor_index] = d;
- TRACE(broker, "Copied " << number_of_own - current_size
- << " descriptors into " << instance_descriptors_
- << " (" << number_of_own << " total)");
+ if (d.details.location() == kField) {
+ // Recurse on the owner map.
+ d.field_owner->SerializeOwnDescriptor(broker, descriptor_index);
+ }
+
+ TRACE(broker, "Copied descriptor " << descriptor_index << " into "
+ << instance_descriptors_ << " ("
+ << contents.size() << " total)");
}
void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
@@ -1716,7 +1727,7 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
// We only serialize boilerplates that pass the IsInlinableFastLiteral
// check, so we only do a sanity check on the depth here.
CHECK_GT(depth, 0);
- CHECK(!boilerplate->map()->is_deprecated());
+ CHECK(!boilerplate->map().is_deprecated());
// Serialize the elements.
Isolate* const isolate = broker->isolate();
@@ -1767,13 +1778,13 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
// TODO(turbofan): Do we want to support out-of-object properties?
CHECK(boilerplate->HasFastProperties() &&
- boilerplate->property_array()->length() == 0);
+ boilerplate->property_array().length() == 0);
CHECK_EQ(inobject_fields_.size(), 0u);
// Check the in-object properties.
- Handle<DescriptorArray> descriptors(
- boilerplate->map()->instance_descriptors(), isolate);
- int const limit = boilerplate->map()->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descriptors(boilerplate->map().instance_descriptors(),
+ isolate);
+ int const limit = boilerplate->map().NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() != kField) continue;
@@ -1785,8 +1796,9 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
DCHECK_EQ(field_index.property_index(),
static_cast<int>(inobject_fields_.size()));
if (boilerplate->IsUnboxedDoubleField(field_index)) {
- double value = boilerplate->RawFastDoublePropertyAt(field_index);
- inobject_fields_.push_back(JSObjectField{value});
+ uint64_t value_bits =
+ boilerplate->RawFastDoublePropertyAsBitsAt(field_index);
+ inobject_fields_.push_back(JSObjectField{value_bits});
} else {
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
isolate);
@@ -1970,14 +1982,14 @@ void JSHeapBroker::CollectArrayAndObjectPrototypes() {
CHECK(array_and_object_prototypes_.empty());
Object maybe_context = isolate()->heap()->native_contexts_list();
- while (!maybe_context->IsUndefined(isolate())) {
+ while (!maybe_context.IsUndefined(isolate())) {
Context context = Context::cast(maybe_context);
- Object array_prot = context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
- Object object_prot = context->get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
+ Object array_prot = context.get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ Object object_prot = context.get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
array_and_object_prototypes_.emplace(JSObject::cast(array_prot), isolate());
array_and_object_prototypes_.emplace(JSObject::cast(object_prot),
isolate());
- maybe_context = context->next_context_link();
+ maybe_context = context.next_context_link();
}
CHECK(!array_and_object_prototypes_.empty());
@@ -2353,6 +2365,16 @@ double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
return object_data->GetInobjectField(index.property_index()).AsDouble();
}
+uint64_t JSObjectRef::RawFastDoublePropertyAsBitsAt(FieldIndex index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference handle_dereference;
+ return object()->RawFastDoublePropertyAsBitsAt(index);
+ }
+ JSObjectData* object_data = data()->AsJSObject();
+ CHECK(index.is_inobject());
+ return object_data->GetInobjectField(index.property_index()).AsBitsOfDouble();
+}
+
ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
@@ -2419,7 +2441,7 @@ int MapRef::GetInObjectPropertyOffset(int i) const {
PropertyDetails MapRef::GetPropertyDetails(int descriptor_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return object()->instance_descriptors()->GetDetails(descriptor_index);
+ return object()->instance_descriptors().GetDetails(descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return descriptors->contents().at(descriptor_index).details;
@@ -2431,7 +2453,7 @@ NameRef MapRef::GetPropertyKey(int descriptor_index) const {
AllowHandleDereference allow_handle_dereference;
return NameRef(
broker(),
- handle(object()->instance_descriptors()->GetKey(descriptor_index),
+ handle(object()->instance_descriptors().GetKey(descriptor_index),
broker()->isolate()));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
@@ -2467,7 +2489,7 @@ ObjectRef MapRef::GetFieldType(int descriptor_index) const {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
Handle<FieldType> field_type(
- object()->instance_descriptors()->GetFieldType(descriptor_index),
+ object()->instance_descriptors().GetFieldType(descriptor_index),
broker()->isolate());
return ObjectRef(broker(), field_type);
}
@@ -2575,6 +2597,9 @@ BIMODAL_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
BIMODAL_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType)
BIMODAL_ACCESSOR_C(BytecodeArray, int, register_count)
+BIMODAL_ACCESSOR_C(BytecodeArray, int, parameter_count)
+BIMODAL_ACCESSOR_C(BytecodeArray, interpreter::Register,
+ incoming_new_target_or_generator_register)
BIMODAL_ACCESSOR(Cell, Object, value)
@@ -2601,17 +2626,17 @@ BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
BIMODAL_ACCESSOR_C(JSTypedArray, bool, is_on_heap)
-BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length_value)
+BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits)
BIMODAL_ACCESSOR_B(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
+BIMODAL_ACCESSOR_B(Map, bit_field2, has_hidden_prototype,
+ Map::HasHiddenPrototypeBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::NumberOfOwnDescriptorsBits)
-BIMODAL_ACCESSOR_B(Map, bit_field3, has_hidden_prototype,
- Map::HasHiddenPrototypeBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_migration_target,
Map::IsMigrationTargetBit)
BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit)
@@ -2651,12 +2676,14 @@ BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
BIMODAL_ACCESSOR_C(String, int, length)
-void* JSTypedArrayRef::elements_external_pointer() const {
+BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value)
+
+void* JSTypedArrayRef::external_pointer() const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleDereference allow_handle_dereference;
- return FixedTypedArrayBase::cast(object()->elements())->external_pointer();
+ return object()->external_pointer();
}
- return data()->AsJSTypedArray()->elements_external_pointer();
+ return data()->AsJSTypedArray()->external_pointer();
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
@@ -2829,14 +2856,14 @@ base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(uint32_t index,
bool serialize) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
- if (!object()->elements()->IsCowArray()) return base::nullopt;
+ if (!object()->elements().IsCowArray()) return base::nullopt;
return GetOwnElementFromHeap(broker(), object(), index, false);
}
if (serialize) {
data()->AsJSObject()->SerializeElements(broker());
} else if (!data()->AsJSObject()->serialized_elements()) {
- TRACE(broker(), "'elements' on data " << this);
+ TRACE(broker(), "'elements' on " << this);
return base::nullopt;
}
if (!elements().map().IsFixedCowArrayMap()) return base::nullopt;
@@ -2897,7 +2924,7 @@ ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
namespace {
OddballType GetOddballType(Isolate* isolate, Map map) {
- if (map->instance_type() != ODDBALL_TYPE) {
+ if (map.instance_type() != ODDBALL_TYPE) {
return OddballType::kNone;
}
ReadOnlyRoots roots(isolate);
@@ -2928,9 +2955,9 @@ HeapObjectType HeapObjectRef::GetHeapObjectType() const {
AllowHandleDereference handle_dereference;
Map map = Handle<HeapObject>::cast(object())->map();
HeapObjectType::Flags flags(0);
- if (map->is_undetectable()) flags |= HeapObjectType::kUndetectable;
- if (map->is_callable()) flags |= HeapObjectType::kCallable;
- return HeapObjectType(map->instance_type(), flags,
+ if (map.is_undetectable()) flags |= HeapObjectType::kUndetectable;
+ if (map.is_callable()) flags |= HeapObjectType::kCallable;
+ return HeapObjectType(map.instance_type(), flags,
GetOddballType(broker()->isolate(), map));
}
HeapObjectType::Flags flags(0);
@@ -3131,6 +3158,12 @@ void MapRef::SerializeOwnDescriptors() {
data()->AsMap()->SerializeOwnDescriptors(broker());
}
+void MapRef::SerializeOwnDescriptor(int descriptor_index) {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index);
+}
+
void MapRef::SerializeBackPointer() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -3219,7 +3252,7 @@ bool CanInlineElementAccess(MapRef const& map) {
if (map.has_indexed_interceptor()) return false;
ElementsKind const elements_kind = map.elements_kind();
if (IsFastElementsKind(elements_kind)) return true;
- if (IsFixedTypedArrayElementsKind(elements_kind) &&
+ if (IsTypedArrayElementsKind(elements_kind) &&
elements_kind != BIGUINT64_ELEMENTS &&
elements_kind != BIGINT64_ELEMENTS) {
return true;
@@ -3227,6 +3260,9 @@ bool CanInlineElementAccess(MapRef const& map) {
return false;
}
+InsufficientFeedback::InsufficientFeedback()
+ : ProcessedFeedback(kInsufficient) {}
+
GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell)
: ProcessedFeedback(kGlobalAccess),
cell_or_context_(cell),
@@ -3307,6 +3343,14 @@ ElementAccessFeedback::MapIterator ElementAccessFeedback::all_maps(
return MapIterator(*this, broker);
}
+NamedAccessFeedback::NamedAccessFeedback(
+ NameRef const& name, ZoneVector<PropertyAccessInfo> const& access_infos)
+ : ProcessedFeedback(kNamedAccess),
+ name_(name),
+ access_infos_(access_infos) {
+ CHECK(!access_infos.empty());
+}
+
FeedbackSource::FeedbackSource(FeedbackNexus const& nexus)
: vector(nexus.vector_handle()), slot(nexus.slot()) {}
@@ -3330,14 +3374,6 @@ ProcessedFeedback const* JSHeapBroker::GetFeedback(
return it->second;
}
-ElementAccessFeedback const* JSHeapBroker::GetElementAccessFeedback(
- FeedbackSource const& source) const {
- ProcessedFeedback const* feedback = GetFeedback(source);
- if (feedback == nullptr) return nullptr;
- CHECK_EQ(feedback->kind(), ProcessedFeedback::kElementAccess);
- return static_cast<ElementAccessFeedback const*>(feedback);
-}
-
GlobalAccessFeedback const* JSHeapBroker::GetGlobalAccessFeedback(
FeedbackSource const& source) const {
ProcessedFeedback const* feedback = GetFeedback(source);
@@ -3348,6 +3384,8 @@ GlobalAccessFeedback const* JSHeapBroker::GetGlobalAccessFeedback(
ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess(
MapHandles const& maps) {
+ DCHECK(!maps.empty());
+
// Collect possible transition targets.
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
@@ -3359,8 +3397,6 @@ ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess(
}
}
- if (maps.empty()) return nullptr;
-
ElementAccessFeedback* result = new (zone()) ElementAccessFeedback(zone());
// Separate the actual receiver maps and the possible transition sources.
@@ -3446,6 +3482,23 @@ std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
return os << ref.data();
}
+base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
+ FeedbackNexus const& nexus) {
+ Name raw_name = nexus.GetName();
+ if (raw_name.is_null()) return base::nullopt;
+ return NameRef(this, handle(raw_name, isolate()));
+}
+
+ElementAccessFeedback const* ProcessedFeedback::AsElementAccess() const {
+ CHECK_EQ(kElementAccess, kind());
+ return static_cast<ElementAccessFeedback const*>(this);
+}
+
+NamedAccessFeedback const* ProcessedFeedback::AsNamedAccess() const {
+ CHECK_EQ(kNamedAccess, kind());
+ return static_cast<NamedAccessFeedback const*>(this);
+}
+
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 9ddc12a11e..2c4cc766bc 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -7,14 +7,14 @@
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
+#include "src/common/globals.h"
#include "src/compiler/refs-map.h"
-#include "src/feedback-vector.h"
-#include "src/function-kind.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/objects.h"
+#include "src/handles/handles.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/function-kind.h"
#include "src/objects/instance-type.h"
-#include "src/ostreams.h"
+#include "src/objects/objects.h"
+#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -37,6 +37,10 @@ class VectorSlotPair;
namespace compiler {
+// Whether we are loading a property or storing to a property.
+// For a store during literal creation, do not walk up the prototype chain.
+enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
+
enum class OddballType : uint8_t {
kNone, // Not an Oddball.
kBoolean, // True or False.
@@ -78,6 +82,7 @@ enum class OddballType : uint8_t {
V(Cell) \
V(Code) \
V(DescriptorArray) \
+ V(FeedbackCell) \
V(FeedbackVector) \
V(FixedArrayBase) \
V(FunctionTemplateInfo) \
@@ -96,6 +101,7 @@ class CompilationDependencies;
class JSHeapBroker;
class ObjectData;
class PerIsolateCompilerCache;
+class PropertyAccessInfo;
#define FORWARD_DECL(Name) class Name##Ref;
HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
@@ -216,6 +222,7 @@ class JSObjectRef : public HeapObjectRef {
using HeapObjectRef::HeapObjectRef;
Handle<JSObject> object() const;
+ uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
double RawFastDoublePropertyAt(FieldIndex index) const;
ObjectRef RawFastPropertyAt(FieldIndex index) const;
@@ -405,6 +412,14 @@ class DescriptorArrayRef : public HeapObjectRef {
Handle<DescriptorArray> object() const;
};
+class FeedbackCellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FeedbackCell> object() const;
+
+ HeapObjectRef value() const;
+};
+
class FeedbackVectorRef : public HeapObjectRef {
public:
using HeapObjectRef::HeapObjectRef;
@@ -514,6 +529,7 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
// Concerning the underlying instance_descriptors:
void SerializeOwnDescriptors();
+ void SerializeOwnDescriptor(int descriptor_index);
MapRef FindFieldOwner(int descriptor_index) const;
PropertyDetails GetPropertyDetails(int descriptor_index) const;
NameRef GetPropertyKey(int descriptor_index) const;
@@ -558,6 +574,8 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
Handle<BytecodeArray> object() const;
int register_count() const;
+ int parameter_count() const;
+ interpreter::Register incoming_new_target_or_generator_register() const;
};
class JSArrayRef : public JSObjectRef {
@@ -593,7 +611,8 @@ class ScopeInfoRef : public HeapObjectRef {
V(bool, construct_as_builtin) \
V(bool, HasBytecodeArray) \
V(bool, is_safe_to_skip_arguments_adaptor) \
- V(bool, IsInlineable)
+ V(bool, IsInlineable) \
+ V(bool, is_compiled)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
public:
@@ -635,8 +654,8 @@ class JSTypedArrayRef : public JSObjectRef {
Handle<JSTypedArray> object() const;
bool is_on_heap() const;
- size_t length_value() const;
- void* elements_external_pointer() const;
+ size_t length() const;
+ void* external_pointer() const;
void Serialize();
bool serialized() const;
@@ -690,11 +709,17 @@ class InternalizedStringRef : public StringRef {
Handle<InternalizedString> object() const;
};
+class ElementAccessFeedback;
+class NamedAccessFeedback;
+
class ProcessedFeedback : public ZoneObject {
public:
- enum Kind { kElementAccess, kGlobalAccess };
+ enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess };
Kind kind() const { return kind_; }
+ ElementAccessFeedback const* AsElementAccess() const;
+ NamedAccessFeedback const* AsNamedAccess() const;
+
protected:
explicit ProcessedFeedback(Kind kind) : kind_(kind) {}
@@ -702,6 +727,11 @@ class ProcessedFeedback : public ZoneObject {
Kind const kind_;
};
+class InsufficientFeedback final : public ProcessedFeedback {
+ public:
+ InsufficientFeedback();
+};
+
class GlobalAccessFeedback : public ProcessedFeedback {
public:
explicit GlobalAccessFeedback(PropertyCellRef cell);
@@ -753,6 +783,21 @@ class ElementAccessFeedback : public ProcessedFeedback {
MapIterator all_maps(JSHeapBroker* broker) const;
};
+class NamedAccessFeedback : public ProcessedFeedback {
+ public:
+ NamedAccessFeedback(NameRef const& name,
+ ZoneVector<PropertyAccessInfo> const& access_infos);
+
+ NameRef const& name() const { return name_; }
+ ZoneVector<PropertyAccessInfo> const& access_infos() const {
+ return access_infos_;
+ }
+
+ private:
+ NameRef const name_;
+ ZoneVector<PropertyAccessInfo> const access_infos_;
+};
+
struct FeedbackSource {
FeedbackSource(Handle<FeedbackVector> vector_, FeedbackSlot slot_)
: vector(vector_), slot(slot_) {}
@@ -776,7 +821,18 @@ struct FeedbackSource {
};
};
-class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
+#define TRACE_BROKER(broker, x) \
+ do { \
+ if (FLAG_trace_heap_broker_verbose) broker->Trace() << x << '\n'; \
+ } while (false)
+
+#define TRACE_BROKER_MISSING(broker, x) \
+ do { \
+ if (FLAG_trace_heap_broker) \
+ broker->Trace() << __FUNCTION__ << ": missing " << x << '\n'; \
+ } while (false)
+
+class V8_EXPORT_PRIVATE JSHeapBroker {
public:
JSHeapBroker(Isolate* isolate, Zone* broker_zone);
@@ -814,8 +870,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
ProcessedFeedback const* GetFeedback(FeedbackSource const& source) const;
// Convenience wrappers around GetFeedback.
- ElementAccessFeedback const* GetElementAccessFeedback(
- FeedbackSource const& source) const;
GlobalAccessFeedback const* GetGlobalAccessFeedback(
FeedbackSource const& source) const;
@@ -825,6 +879,8 @@ class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(
FeedbackSource const& source);
+ base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus);
+
std::ostream& Trace();
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -857,6 +913,26 @@ class V8_EXPORT_PRIVATE JSHeapBroker : public NON_EXPORTED_BASE(ZoneObject) {
static const size_t kInitialRefsBucketCount = 1024; // must be power of 2
};
+class TraceScope {
+ public:
+ TraceScope(JSHeapBroker* broker, const char* label)
+ : TraceScope(broker, static_cast<void*>(broker), label) {}
+
+ TraceScope(JSHeapBroker* broker, ObjectData* data, const char* label)
+ : TraceScope(broker, static_cast<void*>(data), label) {}
+
+ TraceScope(JSHeapBroker* broker, void* subject, const char* label)
+ : broker_(broker) {
+ TRACE_BROKER(broker_, "Running " << label << " on " << subject);
+ broker_->IncrementTracingIndentation();
+ }
+
+ ~TraceScope() { broker_->DecrementTracingIndentation(); }
+
+ private:
+ JSHeapBroker* const broker_;
+};
+
#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
optionally_something) \
auto optionally_something_ = optionally_something; \
@@ -872,17 +948,6 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
// compilation is finished.
bool CanInlineElementAccess(MapRef const& map);
-#define TRACE_BROKER(broker, x) \
- do { \
- if (FLAG_trace_heap_broker_verbose) broker->Trace() << x << '\n'; \
- } while (false)
-
-#define TRACE_BROKER_MISSING(broker, x) \
- do { \
- if (FLAG_trace_heap_broker) \
- broker->Trace() << __FUNCTION__ << ": missing " << x << '\n'; \
- } while (false)
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 3d8dd7402b..cc48ae80cb 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -64,7 +64,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSCreateClosure: {
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
SharedFunctionInfoRef(broker(), p.shared_info());
- HeapObjectRef(broker(), p.feedback_cell());
+ FeedbackCellRef(broker(), p.feedback_cell());
HeapObjectRef(broker(), p.code());
break;
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index c79ae954e9..f78635b139 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -4,12 +4,12 @@
#include "src/compiler/js-inlining-heuristic.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
-#include "src/objects-inl.h"
-#include "src/optimized-compilation-info.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -118,13 +118,13 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// continue the inlining checks. Log a warning and continue.
if (candidate.functions[i].has_value()) {
TRACE_BROKER(broker(),
- "Missing bytecode array trying to inline function "
- << candidate.functions[i].value().object().address());
+ "Missing bytecode array trying to inline JSFunction "
+ << *candidate.functions[i]);
} else {
TRACE_BROKER(
broker(),
- "Missing bytecode array trying to inline function with SFI "
- << candidate.shared_info.value().object().address());
+ "Missing bytecode array trying to inline SharedFunctionInfo "
+ << *candidate.shared_info);
}
// Those functions that don't have their bytecode serialized probably
// don't have the SFI either, so we exit the loop early.
@@ -343,7 +343,6 @@ Node* JSInliningHeuristic::DuplicateFrameStateAndRename(Node* frame_state,
}
bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
- Candidate const& candidate,
Node** if_successes, Node** calls,
Node** inputs, int input_count) {
// We will try to reuse the control flow branch created for computing
@@ -351,11 +350,6 @@ bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
// is no side-effect between the call and the branch, and if the callee is
// only used as the target (and possibly also in the related frame states).
- int const num_calls = candidate.num_functions;
-
- DCHECK_EQ(IrOpcode::kPhi, callee->opcode());
- DCHECK_EQ(num_calls, callee->op()->ValueInputCount());
-
// We are trying to match the following pattern:
//
// C1 C2
@@ -438,6 +432,11 @@ bool JSInliningHeuristic::TryReuseDispatch(Node* node, Node* callee,
// |
// ...
+ // Bailout if the call is not polymorphic anymore (other reducers might
+ // have replaced the callee phi with a constant).
+ if (callee->opcode() != IrOpcode::kPhi) return false;
+ int const num_calls = callee->op()->ValueInputCount();
+
// If there is a control node between the callee computation
// and the call, bail out.
Node* merge = NodeProperties::GetControlInput(callee);
@@ -584,7 +583,7 @@ void JSInliningHeuristic::CreateOrReuseDispatch(Node* node, Node* callee,
int input_count) {
SourcePositionTable::Scope position(
source_positions_, source_positions_->GetSourcePosition(node));
- if (TryReuseDispatch(node, callee, candidate, if_successes, calls, inputs,
+ if (TryReuseDispatch(node, callee, if_successes, calls, inputs,
input_count)) {
return;
}
@@ -744,7 +743,7 @@ void JSInliningHeuristic::PrintCandidates() {
? candidate.functions[i].value().shared()
: candidate.shared_info.value();
PrintF(" - size:%d, name: %s\n", candidate.bytecode[i].value().length(),
- shared.object()->DebugName()->ToCString().get());
+ shared.object()->DebugName().ToCString().get());
}
}
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index b66fe7131e..99ad258c31 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -73,9 +73,8 @@ class JSInliningHeuristic final : public AdvancedReducer {
void CreateOrReuseDispatch(Node* node, Node* callee,
Candidate const& candidate, Node** if_successes,
Node** calls, Node** inputs, int input_count);
- bool TryReuseDispatch(Node* node, Node* callee, Candidate const& candidate,
- Node** if_successes, Node** calls, Node** inputs,
- int input_count);
+ bool TryReuseDispatch(Node* node, Node* callee, Node** if_successes,
+ Node** calls, Node** inputs, int input_count);
enum StateCloneMode { kCloneState, kChangeInPlace };
Node* DuplicateFrameStateAndRename(Node* frame_state, Node* from, Node* to,
StateCloneMode mode);
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 17b5d2e7cb..e43e710da7 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -5,7 +5,8 @@
#include "src/compiler/js-inlining.h"
#include "src/ast/ast.h"
-#include "src/compiler.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/common-operator.h"
@@ -16,9 +17,8 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/objects/feedback-cell-inl.h"
-#include "src/optimized-compilation-info.h"
#include "src/parsing/parse-info.h"
namespace v8 {
@@ -31,12 +31,13 @@ namespace {
static const int kMaxDepthForInlining = 50;
} // namespace
-#define TRACE(...) \
- do { \
- if (FLAG_trace_turbo_inlining) PrintF(__VA_ARGS__); \
+#define TRACE(x) \
+ do { \
+ if (FLAG_trace_turbo_inlining) { \
+ StdoutStream() << x << "\n"; \
+ } \
} while (false)
-
// Provides convenience accessors for the common layout of nodes having either
// the {JSCall} or the {JSConstruct} operator.
class JSCallAccessor {
@@ -73,7 +74,7 @@ class JSCallAccessor {
return call_->op()->ValueInputCount() - 2;
}
- CallFrequency frequency() const {
+ CallFrequency const& frequency() const {
return (call_->opcode() == IrOpcode::kJSCall)
? CallParametersOf(call_->op()).frequency()
: ConstructParametersOf(call_->op()).frequency();
@@ -147,10 +148,9 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
// Link uncaught calls in the inlinee to {exception_target}
int subcall_count = static_cast<int>(uncaught_subcalls.size());
if (subcall_count > 0) {
- TRACE(
- "Inlinee contains %d calls without local exception handler; "
- "linking to surrounding exception handler\n",
- subcall_count);
+ TRACE("Inlinee contains " << subcall_count
+ << " calls without local exception handler; "
+ << "linking to surrounding exception handler.");
}
NodeVector on_exception_nodes(local_zone_);
for (Node* subcall : uncaught_subcalls) {
@@ -235,11 +235,11 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count,
BailoutId bailout_id,
FrameStateType frame_state_type,
- Handle<SharedFunctionInfo> shared,
+ SharedFunctionInfoRef shared,
Node* context) {
const FrameStateFunctionInfo* state_info =
- common()->CreateFrameStateFunctionInfo(frame_state_type,
- parameter_count + 1, 0, shared);
+ common()->CreateFrameStateFunctionInfo(
+ frame_state_type, parameter_count + 1, 0, shared.object());
const Operator* op = common()->FrameState(
bailout_id, OutputFrameStateCombine::Ignore(), state_info);
@@ -263,13 +263,10 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
namespace {
// TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
-bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
+bool NeedsImplicitReceiver(SharedFunctionInfoRef shared_info) {
DisallowHeapAllocation no_gc;
- if (!shared_info->construct_as_builtin()) {
- return !IsDerivedConstructor(shared_info->kind());
- } else {
- return false;
- }
+ return !shared_info.construct_as_builtin() &&
+ !IsDerivedConstructor(shared_info.kind());
}
} // namespace
@@ -277,8 +274,8 @@ bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
// Determines whether the call target of the given call {node} is statically
// known and can be used as an inlining candidate. The {SharedFunctionInfo} of
// the call target is provided (the exact closure might be unknown).
-bool JSInliner::DetermineCallTarget(
- Node* node, Handle<SharedFunctionInfo>& shared_info_out) {
+base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
+ Node* node) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
HeapObjectMatcher match(node->InputAt(0));
@@ -286,11 +283,13 @@ bool JSInliner::DetermineCallTarget(
// calls whenever the target is a constant function object, as follows:
// - JSCall(target:constant, receiver, args...)
// - JSConstruct(target:constant, args..., new.target)
- if (match.HasValue() && match.Value()->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+ if (match.HasValue() && match.Ref(broker()).IsJSFunction()) {
+ JSFunctionRef function = match.Ref(broker()).AsJSFunction();
- // Don't inline if the function has never run.
- if (!function->has_feedback_vector()) return false;
+ // The function might have not been called yet.
+ if (!function.has_feedback_vector()) {
+ return base::nullopt;
+ }
// Disallow cross native-context inlining for now. This means that all parts
// of the resulting code will operate on the same global object. This also
@@ -300,12 +299,11 @@ bool JSInliner::DetermineCallTarget(
// TODO(turbofan): We might want to revisit this restriction later when we
// have a need for this, and we know how to model different native contexts
// in the same graph in a compositional way.
- if (function->native_context() != info_->native_context()) {
- return false;
+ if (!function.native_context().equals(broker()->native_context())) {
+ return base::nullopt;
}
- shared_info_out = handle(function->shared(), isolate());
- return true;
+ return function.shared();
}
// This reducer can also handle calls where the target is statically known to
@@ -315,19 +313,15 @@ bool JSInliner::DetermineCallTarget(
if (match.IsJSCreateClosure()) {
CreateClosureParameters const& p = CreateClosureParametersOf(match.op());
- // Disallow inlining in case the instantiation site was never run and hence
- // the vector cell does not contain a valid feedback vector for the call
- // target.
// TODO(turbofan): We might consider to eagerly create the feedback vector
// in such a case (in {DetermineCallContext} below) eventually.
- Handle<FeedbackCell> cell = p.feedback_cell();
- if (!cell->value()->IsFeedbackVector()) return false;
+ FeedbackCellRef cell(FeedbackCellRef(broker(), p.feedback_cell()));
+ if (!cell.value().IsFeedbackVector()) return base::nullopt;
- shared_info_out = p.shared_info();
- return true;
+ return SharedFunctionInfoRef(broker(), p.shared_info());
}
- return false;
+ return base::nullopt;
}
// Determines statically known information about the call target (assuming that
@@ -335,20 +329,19 @@ bool JSInliner::DetermineCallTarget(
// following static information is provided:
// - context : The context (as SSA value) bound by the call target.
// - feedback_vector : The target is guaranteed to use this feedback vector.
-void JSInliner::DetermineCallContext(
- Node* node, Node*& context_out,
- Handle<FeedbackVector>& feedback_vector_out) {
+FeedbackVectorRef JSInliner::DetermineCallContext(Node* node,
+ Node*& context_out) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
HeapObjectMatcher match(node->InputAt(0));
- if (match.HasValue() && match.Value()->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
- CHECK(function->has_feedback_vector());
+ if (match.HasValue() && match.Ref(broker()).IsJSFunction()) {
+ JSFunctionRef function = match.Ref(broker()).AsJSFunction();
+ // This was already ensured by DetermineCallTarget
+ CHECK(function.has_feedback_vector());
// The inlinee specializes to the context from the JSFunction object.
- context_out = jsgraph()->Constant(handle(function->context(), isolate()));
- feedback_vector_out = handle(function->feedback_vector(), isolate());
- return;
+ context_out = jsgraph()->Constant(function.context());
+ return function.feedback_vector();
}
if (match.IsJSCreateClosure()) {
@@ -356,44 +349,32 @@ void JSInliner::DetermineCallContext(
// Load the feedback vector of the target by looking up its vector cell at
// the instantiation site (we only decide to inline if it's populated).
- Handle<FeedbackCell> cell = p.feedback_cell();
- DCHECK(cell->value()->IsFeedbackVector());
+ FeedbackCellRef cell(FeedbackCellRef(broker(), p.feedback_cell()));
// The inlinee uses the locally provided context at instantiation.
context_out = NodeProperties::GetContextInput(match.node());
- feedback_vector_out =
- handle(FeedbackVector::cast(cell->value()), isolate());
- return;
+ return cell.value().AsFeedbackVector();
}
// Must succeed.
UNREACHABLE();
}
-Handle<Context> JSInliner::native_context() const {
- return handle(info_->native_context(), isolate());
-}
-
Reduction JSInliner::ReduceJSCall(Node* node) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
- Handle<SharedFunctionInfo> shared_info;
JSCallAccessor call(node);
- // TODO(mslekova): Remove those when inlining is brokerized.
- AllowHandleDereference allow_handle_deref;
- AllowHandleAllocation allow_handle_alloc;
-
// Determine the call target.
- if (!DetermineCallTarget(node, shared_info)) return NoChange();
+ base::Optional<SharedFunctionInfoRef> shared_info(DetermineCallTarget(node));
+ if (!shared_info.has_value()) return NoChange();
DCHECK(shared_info->IsInlineable());
// Constructor must be constructable.
if (node->opcode() == IrOpcode::kJSConstruct &&
!IsConstructable(shared_info->kind())) {
- TRACE("Not inlining %s into %s because constructor is not constructable.\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
+ TRACE("Not inlining " << *shared_info << " into " << info_->shared_info()
+ << " because constructor is not constructable.");
return NoChange();
}
@@ -401,9 +382,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
if (node->opcode() == IrOpcode::kJSCall &&
IsClassConstructor(shared_info->kind())) {
- TRACE("Not inlining %s into %s because callee is a class constructor.\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
+ TRACE("Not inlining " << *shared_info << " into " << info_->shared_info()
+ << " because callee is a class constructor.");
return NoChange();
}
@@ -415,56 +395,40 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
nesting_level++;
if (nesting_level > kMaxDepthForInlining) {
- TRACE(
- "Not inlining %s into %s because call has exceeded the maximum depth "
- "for function inlining\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
+ TRACE("Not inlining "
+ << *shared_info << " into " << info_->shared_info()
+ << " because call has exceeded the maximum depth for function "
+ "inlining.");
return NoChange();
}
}
- // Calls surrounded by a local try-block are only inlined if the appropriate
- // flag is active. We also discover the {IfException} projection this way.
Node* exception_target = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &exception_target) &&
- !FLAG_inline_into_try) {
- TRACE(
- "Try block surrounds #%d:%s and --no-inline-into-try active, so not "
- "inlining %s into %s.\n",
- exception_target->id(), exception_target->op()->mnemonic(),
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- }
-
- IsCompiledScope is_compiled_scope(shared_info->is_compiled_scope());
- // JSInliningHeuristic should have already filtered candidates without
- // a BytecodeArray by calling SharedFunctionInfo::IsInlineable. For the ones
- // passing the check, a reference to the bytecode was retained to make sure
- // it never gets flushed, so the following check should always hold true.
- CHECK(is_compiled_scope.is_compiled());
-
- if (info_->is_source_positions_enabled()) {
- SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), shared_info);
+ NodeProperties::IsExceptionalCall(node, &exception_target);
+
+ // JSInliningHeuristic has already filtered candidates without a
+ // BytecodeArray by calling SharedFunctionInfoRef::IsInlineable. For the ones
+ // passing the IsInlineable check, The broker holds a reference to the
+ // bytecode array, which prevents it from getting flushed.
+ // Therefore, the following check should always hold true.
+ CHECK(shared_info.value().is_compiled());
+
+ if (!FLAG_concurrent_inlining && info_->is_source_positions_enabled()) {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(),
+ shared_info->object());
}
- TRACE("Inlining %s into %s%s\n", shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get(),
- (exception_target != nullptr) ? " (inside try-block)" : "");
-
+ TRACE("Inlining " << *shared_info << " into " << info_->shared_info()
+ << ((exception_target != nullptr) ? " (inside try-block)"
+ : ""));
// Determine the targets feedback vector and its context.
Node* context;
- Handle<FeedbackVector> feedback_vector;
- DetermineCallContext(node, context, feedback_vector);
+ FeedbackVectorRef feedback_vector = DetermineCallContext(node, context);
if (FLAG_concurrent_inlining) {
- SharedFunctionInfoRef sfi(broker(), shared_info);
- FeedbackVectorRef feedback(broker(), feedback_vector);
- if (!sfi.IsSerializedForCompilation(feedback)) {
- TRACE_BROKER(broker(), "Missed opportunity to inline a function ("
- << Brief(*sfi.object()) << " with "
- << Brief(*feedback.object()) << ")");
+ if (!shared_info.value().IsSerializedForCompilation(feedback_vector)) {
+ TRACE("Missed opportunity to inline a function ("
+ << *shared_info << " with " << feedback_vector << ")");
return NoChange();
}
}
@@ -473,12 +437,12 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// After this point, we've made a decision to inline this function.
// We shall not bailout from inlining if we got here.
- Handle<BytecodeArray> bytecode_array =
- handle(shared_info->GetBytecodeArray(), isolate());
+ BytecodeArrayRef bytecode_array = shared_info.value().GetBytecodeArray();
// Remember that we inlined this function.
int inlining_id = info_->AddInlinedFunction(
- shared_info, bytecode_array, source_positions_->GetSourcePosition(node));
+ shared_info.value().object(), bytecode_array.object(),
+ source_positions_->GetSourcePosition(node));
// Create the subgraph for the inlinee.
Node* start;
@@ -486,16 +450,31 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
{
// Run the BytecodeGraphBuilder to create the subgraph.
Graph::SubgraphScope scope(graph());
- JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags;
+ BytecodeGraphBuilderFlags flags(
+ BytecodeGraphBuilderFlag::kSkipFirstStackCheck);
+ if (info_->is_analyze_environment_liveness()) {
+ flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
+ }
if (info_->is_bailout_on_uninitialized()) {
- flags |= JSTypeHintLowering::kBailoutOnUninitialized;
+ flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
+ }
+ {
+ // TODO(mslekova): Remove the following once bytecode graph builder
+ // is brokerized. Also, remove the context argument from
+ // BuildGraphFromBytecode and extract it from the broker there.
+ AllowHandleDereference allow_handle_deref;
+ AllowHandleAllocation allow_handle_alloc;
+ AllowHeapAllocation allow_heap_alloc;
+ AllowCodeDependencyChange allow_code_dep_change;
+ Handle<Context> native_context =
+ handle(info_->native_context(), isolate());
+
+ BuildGraphFromBytecode(broker(), zone(), bytecode_array.object(),
+ shared_info.value().object(),
+ feedback_vector.object(), BailoutId::None(),
+ jsgraph(), call.frequency(), source_positions_,
+ native_context, inlining_id, flags);
}
- CallFrequency frequency = call.frequency();
- BytecodeGraphBuilder graph_builder(
- zone(), bytecode_array, shared_info, feedback_vector, BailoutId::None(),
- jsgraph(), frequency, source_positions_, native_context(), inlining_id,
- flags, false, info_->is_analyze_environment_liveness());
- graph_builder.CreateGraph();
// Extract the inlinee start/end nodes.
start = graph()->start();
@@ -542,13 +521,13 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// where execution continues at {construct_stub_create_deopt_pc_offset}).
Node* receiver = jsgraph()->TheHoleConstant(); // Implicit receiver.
Node* context = NodeProperties::GetContextInput(node);
- if (NeedsImplicitReceiver(shared_info)) {
+ if (NeedsImplicitReceiver(shared_info.value())) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state_inside = CreateArtificialFrameState(
node, frame_state, call.formal_arguments(),
BailoutId::ConstructStubCreate(), FrameStateType::kConstructStub,
- shared_info, context);
+ shared_info.value(), context);
Node* create =
graph()->NewNode(javascript()->Create(), call.target(), new_target,
context, frame_state_inside, effect, control);
@@ -603,7 +582,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
frame_state = CreateArtificialFrameState(
node, frame_state, call.formal_arguments(),
BailoutId::ConstructStubInvoke(), FrameStateType::kConstructStub,
- shared_info, context);
+ shared_info.value(), context);
}
// Insert a JSConvertReceiver node for sloppy callees. Note that the context
@@ -613,8 +592,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
if (NodeProperties::CanBePrimitive(broker(), call.receiver(), effect)) {
CallParameters const& p = CallParametersOf(node->op());
- Node* global_proxy = jsgraph()->HeapConstant(
- handle(info_->native_context()->global_proxy(), isolate()));
+ Node* global_proxy =
+ jsgraph()->Constant(broker()->native_context().global_proxy_object());
Node* receiver = effect =
graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
call.receiver(), global_proxy, effect, start);
@@ -632,7 +611,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
if (call.formal_arguments() != parameter_count) {
frame_state = CreateArtificialFrameState(
node, frame_state, call.formal_arguments(), BailoutId::None(),
- FrameStateType::kArgumentsAdaptor, shared_info);
+ FrameStateType::kArgumentsAdaptor, shared_info.value());
}
return InlineCall(node, new_target, context, frame_state, start, end,
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index ab481210af..94a9e71b2e 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -51,7 +51,6 @@ class JSInliner final : public AdvancedReducer {
// TODO(neis): Make heap broker a component of JSGraph?
JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const { return jsgraph_->isolate(); }
- Handle<Context> native_context() const;
Zone* const local_zone_;
OptimizedCompilationInfo* info_;
@@ -59,15 +58,13 @@ class JSInliner final : public AdvancedReducer {
JSHeapBroker* const broker_;
SourcePositionTable* const source_positions_;
- bool DetermineCallTarget(Node* node,
- Handle<SharedFunctionInfo>& shared_info_out);
- void DetermineCallContext(Node* node, Node*& context_out,
- Handle<FeedbackVector>& feedback_vector_out);
+ base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
+ FeedbackVectorRef DetermineCallContext(Node* node, Node*& context_out);
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
FrameStateType frame_state_type,
- Handle<SharedFunctionInfo> shared,
+ SharedFunctionInfoRef shared,
Node* context = nullptr);
Reduction InlineCall(Node* call, Node* new_target, Node* context,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 0ba3e7dfda..970a7e3ed6 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -6,16 +6,16 @@
#include <stack>
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -28,8 +28,12 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange();
const Runtime::Function* const f =
Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id());
+ if (f->function_id == Runtime::kTurbofanStaticAssert)
+ return ReduceTurbofanStaticAssert(node);
if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) return NoChange();
switch (f->function_id) {
+ case Runtime::kInlineCopyDataProperties:
+ return ReduceCopyDataProperties(node);
case Runtime::kInlineCreateIterResultObject:
return ReduceCreateIterResultObject(node);
case Runtime::kInlineDeoptimizeNow:
@@ -62,8 +66,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceGeneratorGetResumeMode(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
- case Runtime::kInlineIsTypedArray:
- return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
@@ -72,16 +74,22 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToLength(node);
case Runtime::kInlineToObject:
return ReduceToObject(node);
- case Runtime::kInlineToString:
+ case Runtime::kInlineToStringRT:
return ReduceToString(node);
case Runtime::kInlineCall:
return ReduceCall(node);
+ case Runtime::kInlineIncBlockCounter:
+ return ReduceIncBlockCounter(node);
default:
break;
}
return NoChange();
}
+Reduction JSIntrinsicLowering::ReduceCopyDataProperties(Node* node) {
+ return Change(
+ node, Builtins::CallableFor(isolate(), Builtins::kCopyDataProperties), 0);
+}
Reduction JSIntrinsicLowering::ReduceCreateIterResultObject(Node* node) {
Node* const value = NodeProperties::GetValueInput(node, 0);
@@ -260,6 +268,19 @@ Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
return Change(node, simplified()->ObjectIsSmi());
}
+Reduction JSIntrinsicLowering::ReduceTurbofanStaticAssert(Node* node) {
+ if (FLAG_always_opt) {
+ // Ignore static asserts, as we most likely won't have enough information
+ RelaxEffectsAndControls(node);
+ } else {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* assert = graph()->NewNode(common()->StaticAssert(), value, effect);
+ ReplaceWithValue(node, node, assert, nullptr);
+ }
+ return Changed(jsgraph_->UndefinedConstant());
+}
+
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
// Replace all effect uses of {node} with the effect dependency.
RelaxEffectsAndControls(node);
@@ -301,6 +322,14 @@ Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
return Changed(node);
}
+Reduction JSIntrinsicLowering::ReduceIncBlockCounter(Node* node) {
+ DCHECK(!Linkage::NeedsFrameStateInput(Runtime::kIncBlockCounter));
+ DCHECK(!Linkage::NeedsFrameStateInput(Runtime::kInlineIncBlockCounter));
+ return Change(node,
+ Builtins::CallableFor(isolate(), Builtins::kIncBlockCounter), 0,
+ kDoesNotNeedFrameState);
+}
+
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
RelaxControls(node);
@@ -336,19 +365,21 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
return Changed(node);
}
-
Reduction JSIntrinsicLowering::Change(Node* node, Callable const& callable,
- int stack_parameter_count) {
+ int stack_parameter_count,
+ enum FrameStateFlag frame_state_flag) {
+ CallDescriptor::Flags flags = frame_state_flag == kNeedsFrameState
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(), stack_parameter_count,
- CallDescriptor::kNeedsFrameState, node->op()->properties());
+ graph()->zone(), callable.descriptor(), stack_parameter_count, flags,
+ node->op()->properties());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
-
Graph* JSIntrinsicLowering::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 7313264c08..844e051d0a 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -6,9 +6,9 @@
#define V8_COMPILER_JS_INTRINSIC_LOWERING_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -39,6 +39,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceCopyDataProperties(Node* node);
Reduction ReduceCreateIterResultObject(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceCreateJSGeneratorObject(Node* node);
@@ -57,18 +58,26 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
+ Reduction ReduceTurbofanStaticAssert(Node* node);
Reduction ReduceToLength(Node* node);
Reduction ReduceToObject(Node* node);
Reduction ReduceToString(Node* node);
Reduction ReduceCall(Node* node);
+ Reduction ReduceIncBlockCounter(Node* node);
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c,
Node* d);
+
+ enum FrameStateFlag {
+ kNeedsFrameState,
+ kDoesNotNeedFrameState,
+ };
Reduction Change(Node* node, Callable const& callable,
- int stack_parameter_count);
+ int stack_parameter_count,
+ enum FrameStateFlag frame_state_flag = kNeedsFrameState);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 68f6541ab0..312ab38f51 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -4,9 +4,10 @@
#include "src/compiler/js-native-context-specialization.h"
-#include "src/accessors.h"
-#include "src/api-inl.h"
-#include "src/code-factory.h"
+#include "src/api/api-inl.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/string-constants.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
#include "src/compiler/allocation-builder.h"
@@ -14,19 +15,19 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/map-inference.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/property-access-builder.h"
#include "src/compiler/type-cache.h"
-#include "src/dtoa.h"
-#include "src/feedback-vector.h"
-#include "src/field-index-inl.h"
-#include "src/isolate-inl.h"
+#include "src/compiler/vector-slot-pair.h"
+#include "src/execution/isolate-inl.h"
+#include "src/numbers/dtoa.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/field-index-inl.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/templates.h"
-#include "src/string-constants.h"
-#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -40,7 +41,7 @@ namespace compiler {
namespace {
-bool HasNumberMaps(JSHeapBroker* broker, MapHandles const& maps) {
+bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps) {
for (auto map : maps) {
MapRef map_ref(broker, map);
if (map_ref.IsHeapNumberMap()) return true;
@@ -48,7 +49,8 @@ bool HasNumberMaps(JSHeapBroker* broker, MapHandles const& maps) {
return false;
}
-bool HasOnlyJSArrayMaps(JSHeapBroker* broker, MapHandles const& maps) {
+bool HasOnlyJSArrayMaps(JSHeapBroker* broker,
+ ZoneVector<Handle<Map>> const& maps) {
for (auto map : maps) {
MapRef map_ref(broker, map);
if (!map_ref.IsJSArrayMap()) return false;
@@ -56,6 +58,17 @@ bool HasOnlyJSArrayMaps(JSHeapBroker* broker, MapHandles const& maps) {
return true;
}
+void TryUpdateThenDropDeprecated(Isolate* isolate, MapHandles* maps) {
+ for (auto it = maps->begin(); it != maps->end();) {
+ if (Map::TryUpdate(isolate, *it).ToHandle(&*it)) {
+ DCHECK(!(*it)->is_deprecated());
+ ++it;
+ } else {
+ it = maps->erase(it);
+ }
+ }
+}
+
} // namespace
JSNativeContextSpecialization::JSNativeContextSpecialization(
@@ -233,7 +246,7 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked();
DCHECK(shared->is_compiled());
int register_count = shared->internal_formal_parameter_count() +
- shared->GetBytecodeArray()->register_count();
+ shared->GetBytecodeArray().register_count();
Node* value = effect =
graph()->NewNode(javascript()->CreateAsyncFunctionObject(register_count),
closure, receiver, promise, context, effect, control);
@@ -406,6 +419,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
access_info_factory.ComputePropertyAccessInfo(
receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad);
if (access_info.IsInvalid()) return NoChange();
+ access_info.RecordDependencies(dependencies());
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
@@ -418,8 +432,6 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
kStartAtPrototype);
// Monomorphic property access.
- constructor =
- access_builder.BuildCheckHeapObject(constructor, &effect, control);
access_builder.BuildCheckMaps(constructor, &effect, control,
access_info.receiver_maps());
@@ -432,39 +444,17 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
- if (access_info.IsDataConstant() || access_info.IsDataConstantField()) {
+ if (access_info.IsDataConstant()) {
// Determine actual holder.
Handle<JSObject> holder;
bool found_on_proto = access_info.holder().ToHandle(&holder);
if (!found_on_proto) holder = receiver;
- Handle<Object> constant;
- if (access_info.IsDataConstant()) {
- DCHECK(!FLAG_track_constant_fields);
- constant = access_info.constant();
- } else {
- DCHECK(FLAG_track_constant_fields);
- DCHECK(access_info.IsDataConstantField());
- FieldIndex field_index = access_info.field_index();
- constant = JSObject::FastPropertyAt(holder, Representation::Tagged(),
- field_index);
- if (!constant->IsCallable()) {
- return NoChange();
- }
-
- // Install dependency on constness. Unfortunately, access_info does not
- // track descriptor index, so we have to search for it.
- MapRef holder_map(broker(), handle(holder->map(), isolate()));
- Handle<DescriptorArray> descriptors(
- holder_map.object()->instance_descriptors(), isolate());
- int descriptor_index = descriptors->Search(
- *(factory()->has_instance_symbol()), *(holder_map.object()));
- CHECK_NE(descriptor_index, DescriptorArray::kNotFound);
- holder_map.SerializeOwnDescriptors();
- if (dependencies()->DependOnFieldConstness(
- holder_map, descriptor_index) != PropertyConstness::kConst) {
- return NoChange();
- }
+ FieldIndex field_index = access_info.field_index();
+ Handle<Object> constant = JSObject::FastPropertyAt(
+ holder, access_info.field_representation(), field_index);
+ if (!constant->IsCallable()) {
+ return NoChange();
}
if (found_on_proto) {
@@ -552,8 +542,8 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
none = false;
break;
}
- if (!current->map()->is_stable() ||
- current->map()->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ if (!current->map().is_stable() ||
+ current->map().instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
return kMayBeInPrototypeChain;
}
}
@@ -570,7 +560,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
// might be a different object each time, so it's much simpler to include
// {prototype}. That does, however, mean that we must check {prototype}'s
// map stability.
- if (!prototype->map()->is_stable()) return kMayBeInPrototypeChain;
+ if (!prototype->map().is_stable()) return kMayBeInPrototypeChain;
last_prototype.emplace(broker(), Handle<JSObject>::cast(prototype));
}
WhereToStart start = result == NodeProperties::kUnreliableReceiverMaps
@@ -676,16 +666,11 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
return NoChange();
}
- // Check if we know something about the {value}.
- ZoneHandleSet<Map> value_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), value, effect, &value_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, value_maps.size());
-
- // Check that the {value} cannot be a JSPromise.
- for (Handle<Map> const value_map : value_maps) {
- if (value_map->IsJSPromiseMap()) return NoChange();
+ // Only optimize if {value} cannot be a JSPromise.
+ MapInference inference(broker(), value, effect);
+ if (!inference.HaveMaps() ||
+ inference.AnyOfInstanceTypesAre(JS_PROMISE_TYPE)) {
+ return NoChange();
}
if (!dependencies()->DependOnPromiseHookProtector()) return NoChange();
@@ -709,38 +694,31 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Check if we know something about the {resolution}.
- ZoneHandleSet<Map> resolution_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), resolution, effect,
- &resolution_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
- DCHECK_NE(0, resolution_maps.size());
-
- // When the {resolution_maps} information is unreliable, we can
- // still optimize if all individual {resolution_maps} are stable.
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- for (Handle<Map> resolution_map : resolution_maps) {
- if (!resolution_map->is_stable()) return NoChange();
- }
- }
+ MapInference inference(broker(), resolution, effect);
+ if (!inference.HaveMaps()) return NoChange();
+ MapHandles const& resolution_maps = inference.GetMaps();
// Compute property access info for "then" on {resolution}.
+ ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
+ access_info_factory.ComputePropertyAccessInfos(
+ resolution_maps, factory()->then_string(), AccessMode::kLoad,
+ &access_infos);
PropertyAccessInfo access_info =
- access_info_factory.ComputePropertyAccessInfo(
- MapHandles(resolution_maps.begin(), resolution_maps.end()),
- factory()->then_string(), AccessMode::kLoad);
- if (access_info.IsInvalid()) return NoChange();
+ access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
+ AccessMode::kLoad);
+ if (access_info.IsInvalid()) return inference.NoChange();
+
+ // Only optimize when {resolution} definitely doesn't have a "then" property.
+ if (!access_info.IsNotFound()) return inference.NoChange();
- // We can further optimize the case where {resolution}
- // definitely doesn't have a "then" property.
- if (!access_info.IsNotFound()) return NoChange();
+ if (!inference.RelyOnMapsViaStability(dependencies())) {
+ return inference.NoChange();
+ }
- dependencies()->DependOnStablePrototypeChains(
- access_info.receiver_maps(),
- result == NodeProperties::kUnreliableReceiverMaps ? kStartAtReceiver
- : kStartAtPrototype);
+ dependencies()->DependOnStablePrototypeChains(access_info.receiver_maps(),
+ kStartAtPrototype);
// Simply fulfill the {promise} with the {resolution}.
Node* value = effect =
@@ -770,9 +748,11 @@ FieldAccess ForPropertyCellValue(MachineRepresentation representation,
Type type, MaybeHandle<Map> map,
NameRef const& name) {
WriteBarrierKind kind = kFullWriteBarrier;
- if (representation == MachineRepresentation::kTaggedSigned) {
+ if (representation == MachineRepresentation::kTaggedSigned ||
+ representation == MachineRepresentation::kCompressedSigned) {
kind = kNoWriteBarrier;
- } else if (representation == MachineRepresentation::kTaggedPointer) {
+ } else if (representation == MachineRepresentation::kTaggedPointer ||
+ representation == MachineRepresentation::kCompressedPointer) {
kind = kPointerWriteBarrier;
}
MachineType r = MachineType::TypeForRepresentation(representation);
@@ -884,20 +864,21 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// Load from constant type cell can benefit from type feedback.
MaybeHandle<Map> map;
Type property_cell_value_type = Type::NonInternal();
- MachineRepresentation representation = MachineRepresentation::kTagged;
+ MachineRepresentation representation =
+ MachineType::RepCompressedTagged();
if (property_details.cell_type() == PropertyCellType::kConstantType) {
// Compute proper type based on the current value in the cell.
if (property_cell_value.IsSmi()) {
property_cell_value_type = Type::SignedSmall();
- representation = MachineRepresentation::kTaggedSigned;
+ representation = MachineType::RepCompressedTaggedSigned();
} else if (property_cell_value.IsHeapNumber()) {
property_cell_value_type = Type::Number();
- representation = MachineRepresentation::kTaggedPointer;
+ representation = MachineType::RepCompressedTaggedPointer();
} else {
MapRef property_cell_value_map =
property_cell_value.AsHeapObject().map();
property_cell_value_type = Type::For(property_cell_value_map);
- representation = MachineRepresentation::kTaggedPointer;
+ representation = MachineType::RepCompressedTaggedPointer();
// We can only use the property cell value map for map check
// elimination if it's stable, i.e. the HeapObject wasn't
@@ -940,7 +921,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
// cell.
dependencies()->DependOnGlobalProperty(property_cell);
Type property_cell_value_type;
- MachineRepresentation representation = MachineRepresentation::kTagged;
+ MachineRepresentation representation =
+ MachineType::RepCompressedTagged();
if (property_cell_value.IsHeapObject()) {
// We cannot do anything if the {property_cell_value}s map is no
// longer stable.
@@ -959,13 +941,13 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
ZoneHandleSet<Map>(property_cell_value_map.object())),
value, effect, control);
property_cell_value_type = Type::OtherInternal();
- representation = MachineRepresentation::kTaggedPointer;
+ representation = MachineType::RepCompressedTaggedPointer();
} else {
// Check that the {value} is a Smi.
value = effect = graph()->NewNode(
simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
property_cell_value_type = Type::SignedSmall();
- representation = MachineRepresentation::kTaggedSigned;
+ representation = MachineType::RepCompressedTaggedSigned();
}
effect = graph()->NewNode(simplified()->StoreField(ForPropertyCellValue(
representation, property_cell_value_type,
@@ -980,7 +962,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
dependencies()->DependOnGlobalProperty(property_cell);
effect = graph()->NewNode(
simplified()->StoreField(ForPropertyCellValue(
- MachineRepresentation::kTagged, Type::NonInternal(),
+ MachineType::RepCompressedTagged(), Type::NonInternal(),
MaybeHandle<Map>(), name)),
jsgraph()->Constant(property_cell), value, effect, control);
break;
@@ -1000,6 +982,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
if (!p.feedback().IsValid()) return NoChange();
FeedbackSource source(p.feedback());
+ // TODO(neis): Make consistent with other feedback processing code.
GlobalAccessFeedback const* processed =
FLAG_concurrent_inlining
? broker()->GetGlobalAccessFeedback(source)
@@ -1061,8 +1044,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
}
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
- Node* node, Node* value, MapHandles const& receiver_maps,
- NameRef const& name, AccessMode access_mode, Node* key) {
+ Node* node, Node* value, NamedAccessFeedback const& feedback,
+ AccessMode access_mode, Node* key) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -1075,31 +1058,27 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ AccessInfoFactory access_info_factory(broker(), dependencies(), zone());
+ if (!access_info_factory.FinalizePropertyAccessInfos(
+ feedback.access_infos(), access_mode, &access_infos)) {
+ return NoChange();
+ }
+
// Check if we have an access o.x or o.x=v where o is the current
// native contexts' global proxy, and turn that into a direct access
- // to the current native contexts' global object instead.
- if (receiver_maps.size() == 1) {
- MapRef receiver_map(broker(), receiver_maps.front());
+ // to the current native context's global object instead.
+ if (access_infos.size() == 1 && access_infos[0].receiver_maps().size() == 1) {
+ MapRef receiver_map(broker(), access_infos[0].receiver_maps()[0]);
if (receiver_map.IsMapOfCurrentGlobalProxy()) {
- return ReduceGlobalAccess(node, receiver, value, name, access_mode, key);
+ return ReduceGlobalAccess(node, receiver, value, feedback.name(),
+ access_mode, key);
}
}
- // Compute property access infos for the receiver maps.
- AccessInfoFactory access_info_factory(broker(), dependencies(),
- graph()->zone());
- ZoneVector<PropertyAccessInfo> raw_access_infos(zone());
- access_info_factory.ComputePropertyAccessInfos(
- receiver_maps, name.object(), access_mode, &raw_access_infos);
- ZoneVector<PropertyAccessInfo> access_infos(zone());
- if (!access_info_factory.FinalizePropertyAccessInfos(
- raw_access_infos, access_mode, &access_infos)) {
- return NoChange();
- }
-
- // Ensure that {key} matches the specified {name} (if {key} is given).
+ // Ensure that {key} matches the specified name (if {key} is given).
if (key != nullptr) {
- effect = BuildCheckEqualsName(name, key, effect, control);
+ effect = BuildCheckEqualsName(feedback.name(), key, effect, control);
}
// Collect call nodes to rewire exception edges.
@@ -1144,8 +1123,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
effect =
graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
} else {
- receiver =
- access_builder.BuildCheckHeapObject(receiver, &effect, control);
access_builder.BuildCheckMaps(receiver, &effect, control,
access_info.receiver_maps());
}
@@ -1153,7 +1130,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
- receiver, value, context, frame_state, effect, control, name,
+ receiver, value, context, frame_state, effect, control, feedback.name(),
if_exceptions, access_info, access_mode);
value = continuation.value();
effect = continuation.effect();
@@ -1174,7 +1151,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
}
- // Ensure that {receiver} is a heap object.
+ // Handle the case that {receiver} may be a number.
Node* receiverissmi_control = nullptr;
Node* receiverissmi_effect = effect;
if (receiverissmi_possible) {
@@ -1183,9 +1160,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
control = graph()->NewNode(common()->IfFalse(), branch);
receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
receiverissmi_effect = effect;
- } else {
- receiver =
- access_builder.BuildCheckHeapObject(receiver, &effect, control);
}
// Generate code for the various different property access patterns.
@@ -1198,7 +1172,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* this_control = fallthrough_control;
// Perform map check on {receiver}.
- MapHandles const& receiver_maps = access_info.receiver_maps();
+ ZoneVector<Handle<Map>> const& receiver_maps =
+ access_info.receiver_maps();
{
// Whether to insert a dedicated MapGuard node into the
// effect to be able to learn from the control flow.
@@ -1269,9 +1244,10 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Generate the actual property access.
- ValueEffectControl continuation = BuildPropertyAccess(
- this_receiver, this_value, context, frame_state, this_effect,
- this_control, name, if_exceptions, access_info, access_mode);
+ ValueEffectControl continuation =
+ BuildPropertyAccess(this_receiver, this_value, context, frame_state,
+ this_effect, this_control, feedback.name(),
+ if_exceptions, access_info, access_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -1328,28 +1304,16 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSStoreNamedOwn);
Node* const receiver = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- // Check if we are accessing the current native contexts' global proxy.
+ // Optimize accesses to the current native context's global proxy.
HeapObjectMatcher m(receiver);
if (m.HasValue() &&
m.Ref(broker()).equals(native_context().global_proxy_object())) {
- // Optimize accesses to the current native contexts' global proxy.
return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
}
- // Extract receiver maps from the IC using the {nexus}.
- MapHandles receiver_maps;
- if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
- return NoChange();
- } else if (receiver_maps.empty()) {
- return ReduceSoftDeoptimize(
- node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
- }
- DCHECK(!nexus.IsUninitialized());
-
- // Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccess(node, value, receiver_maps, name, access_mode);
+ return ReducePropertyAccessUsingProcessedFeedback(node, nullptr, name, value,
+ nexus, access_mode);
}
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
@@ -1472,8 +1436,8 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
} // namespace
Reduction JSNativeContextSpecialization::ReduceElementAccess(
- Node* node, Node* index, Node* value, FeedbackNexus const& nexus,
- MapHandles const& receiver_maps, AccessMode access_mode,
+ Node* node, Node* index, Node* value,
+ ElementAccessFeedback const& processed, AccessMode access_mode,
KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
@@ -1484,9 +1448,11 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Node* frame_state = NodeProperties::FindFrameStateBefore(node);
+ Node* frame_state =
+ NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
- if (HasOnlyStringMaps(broker(), receiver_maps)) {
+ if (HasOnlyStringMaps(broker(), processed.receiver_maps)) {
+ DCHECK(processed.transitions.empty());
return ReduceElementAccessOnString(node, index, value, access_mode,
load_mode);
}
@@ -1495,13 +1461,10 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
ZoneVector<ElementAccessInfo> access_infos(zone());
- if (!access_info_factory.ComputeElementAccessInfos(
- nexus, receiver_maps, access_mode, &access_infos)) {
+ if (!access_info_factory.ComputeElementAccessInfos(processed, access_mode,
+ &access_infos)) {
return NoChange();
- }
-
- // Nothing to do if we have no non-deprecated maps.
- if (access_infos.empty()) {
+ } else if (access_infos.empty()) {
return ReduceSoftDeoptimize(
node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
}
@@ -1545,13 +1508,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
}
- // Ensure that {receiver} is a heap object.
- PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
- receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
-
// Check if we have the necessary data for building element accesses.
for (ElementAccessInfo const& access_info : access_infos) {
- if (!IsFixedTypedArrayElementsKind(access_info.elements_kind())) continue;
+ if (!IsTypedArrayElementsKind(access_info.elements_kind())) continue;
base::Optional<JSTypedArrayRef> typed_array =
GetTypedArrayConstant(broker(), receiver);
if (typed_array.has_value()) {
@@ -1565,6 +1524,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Check for the monomorphic case.
+ PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
if (access_infos.size() == 1) {
ElementAccessInfo access_info = access_infos.front();
@@ -1635,7 +1595,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Perform map check(s) on {receiver}.
- MapHandles const& receiver_maps = access_info.receiver_maps();
+ ZoneVector<Handle<Map>> const& receiver_maps =
+ access_info.receiver_maps();
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
@@ -1774,25 +1735,16 @@ Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
return NoChange();
}
-namespace {
-base::Optional<NameRef> GetNameFeedback(JSHeapBroker* broker,
- FeedbackNexus const& nexus) {
- Name raw_name = nexus.GetName();
- if (raw_name.is_null()) return base::nullopt;
- return NameRef(broker, handle(raw_name, broker->isolate()));
-}
-} // namespace
-
Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
Node* node, Node* key, Node* value, FeedbackNexus const& nexus,
AccessMode access_mode, KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
+ node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
node->opcode() == IrOpcode::kJSHasProperty);
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) &&
receiver->opcode() == IrOpcode::kHeapConstant) {
@@ -1801,52 +1753,96 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
if (reduction.Changed()) return reduction;
}
- // Extract receiver maps from the {nexus}.
- MapHandles receiver_maps;
- if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
- return NoChange();
- } else if (receiver_maps.empty()) {
- return ReduceSoftDeoptimize(
- node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
- }
- DCHECK(!nexus.IsUninitialized());
+ return ReducePropertyAccessUsingProcessedFeedback(node, key, base::nullopt,
+ value, nexus, access_mode,
+ load_mode, store_mode);
+}
- // Check if we have feedback for a named access.
- base::Optional<NameRef> name = GetNameFeedback(broker(), nexus);
- if (name.has_value()) {
- DCHECK_EQ(nexus.GetKeyType(), PROPERTY);
- return ReduceNamedAccess(node, value, receiver_maps, *name, access_mode,
- key);
- }
+Reduction
+JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
+ Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
+ FeedbackNexus const& nexus, AccessMode access_mode,
+ KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ DCHECK_EQ(key == nullptr, static_name.has_value());
+ DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
+ node->opcode() == IrOpcode::kJSStoreProperty ||
+ node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
+ node->opcode() == IrOpcode::kJSHasProperty ||
+ node->opcode() == IrOpcode::kJSLoadNamed ||
+ node->opcode() == IrOpcode::kJSStoreNamed ||
+ node->opcode() == IrOpcode::kJSStoreNamedOwn);
+
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
- // Try to lower element access based on the {receiver_maps}.
- // Only do so if the feedback is not megamorphic so that we can learn
- // something when the ReduceElementAccess code deopts.
- if (nexus.GetKeyType() == ELEMENT && nexus.ic_state() != MEGAMORPHIC) {
- return ReduceElementAccess(node, key, value, nexus, receiver_maps,
- access_mode, load_mode, store_mode);
+ ProcessedFeedback const* processed = nullptr;
+ if (FLAG_concurrent_inlining) {
+ processed = broker()->GetFeedback(FeedbackSource(nexus));
+ // TODO(neis): Infer maps from the graph and consolidate with feedback/hints
+ // and filter impossible candidates based on inferred root map.
+ } else {
+ // TODO(neis): Try to unify this with the similar code in the serializer.
+ if (nexus.ic_state() == UNINITIALIZED) {
+ processed = new (zone()) InsufficientFeedback();
+ } else {
+ MapHandles receiver_maps;
+ if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
+ processed = new (zone()) InsufficientFeedback();
+ } else if (!receiver_maps.empty()) {
+ base::Optional<NameRef> name = static_name.has_value()
+ ? static_name
+ : broker()->GetNameFeedback(nexus);
+ if (name.has_value()) {
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ zone());
+ access_info_factory.ComputePropertyAccessInfos(
+ receiver_maps, name->object(), access_mode, &access_infos);
+ processed = new (zone()) NamedAccessFeedback(*name, access_infos);
+ } else if (nexus.GetKeyType() == ELEMENT &&
+ MEGAMORPHIC != nexus.ic_state()) {
+ processed =
+ broker()->ProcessFeedbackMapsForElementAccess(receiver_maps);
+ }
+ }
+ }
}
- return NoChange();
+ if (processed == nullptr) return NoChange();
+ switch (processed->kind()) {
+ case ProcessedFeedback::kInsufficient:
+ return ReduceSoftDeoptimize(
+ node,
+ DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
+ case ProcessedFeedback::kNamedAccess:
+ return ReduceNamedAccess(node, value, *processed->AsNamedAccess(),
+ access_mode, key);
+ case ProcessedFeedback::kElementAccess:
+ return ReduceElementAccess(node, key, value,
+ *processed->AsElementAccess(), access_mode,
+ load_mode, store_mode);
+ case ProcessedFeedback::kGlobalAccess:
+ UNREACHABLE();
+ }
}
Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
Node* node, DeoptimizeReason reason) {
- if (flags() & kBailoutOnUninitialized) {
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
- frame_state, effect, control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- node->TrimInputCount(0);
- NodeProperties::ChangeOp(node, common()->Dead());
- return Changed(node);
- }
- return NoChange();
+ if (!(flags() & kBailoutOnUninitialized)) return NoChange();
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state =
+ NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
+ return Changed(node);
}
Reduction JSNativeContextSpecialization::ReduceJSHasProperty(Node* node) {
@@ -2151,9 +2147,6 @@ JSNativeContextSpecialization::BuildPropertyLoad(
Node* value;
if (access_info.IsNotFound()) {
value = jsgraph()->UndefinedConstant();
- } else if (access_info.IsDataConstant()) {
- DCHECK(!FLAG_track_constant_fields);
- value = jsgraph()->Constant(access_info.constant());
} else if (access_info.IsAccessorConstant()) {
value = InlinePropertyGetterCall(receiver, context, frame_state, &effect,
&control, if_exceptions, access_info);
@@ -2165,7 +2158,7 @@ JSNativeContextSpecialization::BuildPropertyLoad(
} else if (access_info.IsStringLength()) {
value = graph()->NewNode(simplified()->StringLength(), receiver);
} else {
- DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
+ DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
value = access_builder.BuildLoadDataField(name, access_info, receiver,
&effect, &control);
@@ -2228,30 +2221,30 @@ JSNativeContextSpecialization::BuildPropertyStore(
DCHECK(!access_info.IsNotFound());
// Generate the actual property access.
- if (access_info.IsDataConstant()) {
- DCHECK(!FLAG_track_constant_fields);
- Node* constant_value = jsgraph()->Constant(access_info.constant());
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(), value, constant_value);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongValue),
- check, effect, control);
- value = constant_value;
- } else if (access_info.IsAccessorConstant()) {
+ if (access_info.IsAccessorConstant()) {
InlinePropertySetterCall(receiver, value, context, frame_state, &effect,
&control, if_exceptions, access_info);
} else {
- DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
+ DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
+ DCHECK(access_mode == AccessMode::kStore ||
+ access_mode == AccessMode::kStoreInLiteral);
FieldIndex const field_index = access_info.field_index();
Type const field_type = access_info.field_type();
MachineRepresentation const field_representation =
- access_info.field_representation();
+ PropertyAccessBuilder::ConvertRepresentation(
+ access_info.field_representation());
Node* storage = receiver;
if (!field_index.is_inobject()) {
storage = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectPropertiesOrHash()),
storage, effect, control);
}
+ PropertyConstness constness = access_info.IsDataConstant()
+ ? PropertyConstness::kConst
+ : PropertyConstness::kMutable;
+ bool store_to_existing_constant_field = access_info.IsDataConstant() &&
+ access_mode == AccessMode::kStore &&
+ !access_info.HasTransitionMap();
FieldAccess field_access = {
kTaggedBase,
field_index.offset(),
@@ -2259,20 +2252,16 @@ JSNativeContextSpecialization::BuildPropertyStore(
MaybeHandle<Map>(),
field_type,
MachineType::TypeForRepresentation(field_representation),
- kFullWriteBarrier};
- bool store_to_constant_field = FLAG_track_constant_fields &&
- (access_mode == AccessMode::kStore) &&
- access_info.IsDataConstantField();
+ kFullWriteBarrier,
+ LoadSensitivity::kUnsafe,
+ constness};
- DCHECK(access_mode == AccessMode::kStore ||
- access_mode == AccessMode::kStoreInLiteral);
switch (field_representation) {
case MachineRepresentation::kFloat64: {
value = effect =
graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
effect, control);
- if (!field_index.is_inobject() || field_index.is_hidden_field() ||
- !FLAG_unbox_double_fields) {
+ if (!field_index.is_inobject() || !FLAG_unbox_double_fields) {
if (access_info.HasTransitionMap()) {
// Allocate a MutableHeapNumber for the new property.
AllocationBuilder a(jsgraph(), effect, control);
@@ -2280,19 +2269,28 @@ JSNativeContextSpecialization::BuildPropertyStore(
Type::OtherInternal());
a.Store(AccessBuilder::ForMap(),
factory()->mutable_heap_number_map());
- a.Store(AccessBuilder::ForHeapNumberValue(), value);
+ FieldAccess value_field_access =
+ AccessBuilder::ForHeapNumberValue();
+ value_field_access.constness = field_access.constness;
+ a.Store(value_field_access, value);
value = effect = a.Finish();
field_access.type = Type::Any();
- field_access.machine_type = MachineType::TaggedPointer();
+ field_access.machine_type =
+ MachineType::TypeCompressedTaggedPointer();
field_access.write_barrier_kind = kPointerWriteBarrier;
} else {
// We just store directly to the MutableHeapNumber.
FieldAccess const storage_access = {
- kTaggedBase, field_index.offset(),
- name.object(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ kTaggedBase,
+ field_index.offset(),
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kUnsafe,
+ constness};
storage = effect =
graph()->NewNode(simplified()->LoadField(storage_access),
storage, effect, control);
@@ -2301,7 +2299,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
field_access.machine_type = MachineType::Float64();
}
}
- if (store_to_constant_field) {
+ if (store_to_existing_constant_field) {
DCHECK(!access_info.HasTransitionMap());
// If the field is constant check that the value we are going
// to store matches current value.
@@ -2323,14 +2321,14 @@ JSNativeContextSpecialization::BuildPropertyStore(
case MachineRepresentation::kCompressedSigned:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
- if (store_to_constant_field) {
+ if (store_to_existing_constant_field) {
DCHECK(!access_info.HasTransitionMap());
// If the field is constant check that the value we are going
// to store matches current value.
Node* current_value = effect = graph()->NewNode(
simplified()->LoadField(field_access), storage, effect, control);
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+ Node* check = graph()->NewNode(simplified()->SameValueNumbersOnly(),
current_value, value);
effect = graph()->NewNode(
simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
@@ -2348,8 +2346,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
MachineRepresentation::kTaggedPointer ||
field_representation ==
MachineRepresentation::kCompressedPointer) {
- // Ensure that {value} is a HeapObject.
- value = access_builder.BuildCheckHeapObject(value, &effect, control);
Handle<Map> field_map;
if (access_info.field_map().ToHandle(&field_map)) {
// Emit a map check for the value.
@@ -2357,6 +2353,10 @@ JSNativeContextSpecialization::BuildPropertyStore(
simplified()->CheckMaps(CheckMapsFlag::kNone,
ZoneHandleSet<Map>(field_map)),
value, effect, control);
+ } else {
+ // Ensure that {value} is a HeapObject.
+ value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ value, effect, control);
}
field_access.write_barrier_kind = kPointerWriteBarrier;
@@ -2459,6 +2459,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
access_info_factory.ComputePropertyAccessInfo(
receiver_map, cached_name.object(), AccessMode::kStoreInLiteral);
if (access_info.IsInvalid()) return NoChange();
+ access_info.RecordDependencies(dependencies());
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -2466,7 +2467,6 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
// Monomorphic property access.
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
- receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
access_builder.BuildCheckMaps(receiver, &effect, control,
access_info.receiver_maps());
@@ -2497,10 +2497,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode());
FeedbackParameter const& p = FeedbackParameterOf(node->op());
- Node* const receiver = NodeProperties::GetValueInput(node, 0);
Node* const index = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- Node* const effect = NodeProperties::GetEffectInput(node);
// Extract receiver maps from the keyed store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
@@ -2509,23 +2507,9 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
// Extract the keyed access store mode from the keyed store IC.
KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
- // Extract receiver maps from the {nexus}.
- MapHandles receiver_maps;
- if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
- return NoChange();
- } else if (receiver_maps.empty()) {
- return ReduceSoftDeoptimize(
- node, DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
- }
- DCHECK(!nexus.IsUninitialized());
- DCHECK_EQ(ELEMENT, nexus.GetKeyType());
-
- if (nexus.ic_state() == MEGAMORPHIC) return NoChange();
-
- // Try to lower the element access based on the {receiver_maps}.
- return ReduceElementAccess(node, index, value, nexus, receiver_maps,
- AccessMode::kStoreInLiteral, STANDARD_LOAD,
- store_mode);
+ return ReduceKeyedAccess(node, index, value, nexus,
+ AccessMode::kStoreInLiteral, STANDARD_LOAD,
+ store_mode);
}
Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) {
@@ -2533,14 +2517,9 @@ Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return NoChange();
-
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- if (!receiver_maps[i]->IsJSReceiverMap()) return NoChange();
+ MapInference inference(broker(), receiver, effect);
+ if (!inference.HaveMaps() || !inference.AllOfInstanceTypesAreJSReceiver()) {
+ return NoChange();
}
ReplaceWithValue(node, receiver, effect);
@@ -2572,10 +2551,10 @@ JSNativeContextSpecialization::BuildElementAccess(
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
- MapHandles const& receiver_maps = access_info.receiver_maps();
+ ZoneVector<Handle<Map>> const& receiver_maps = access_info.receiver_maps();
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
- Node* buffer;
+ if (IsTypedArrayElementsKind(elements_kind)) {
+ Node* buffer_or_receiver = receiver;
Node* length;
Node* base_pointer;
Node* external_pointer;
@@ -2585,32 +2564,20 @@ JSNativeContextSpecialization::BuildElementAccess(
base::Optional<JSTypedArrayRef> typed_array =
GetTypedArrayConstant(broker(), receiver);
if (typed_array.has_value()) {
- buffer = jsgraph()->Constant(typed_array->buffer());
- length =
- jsgraph()->Constant(static_cast<double>(typed_array->length_value()));
+ length = jsgraph()->Constant(static_cast<double>(typed_array->length()));
// Load the (known) base and external pointer for the {receiver}. The
// {external_pointer} might be invalid if the {buffer} was detached, so
// we need to make sure that any access is properly guarded.
base_pointer = jsgraph()->ZeroConstant();
external_pointer =
- jsgraph()->PointerConstant(typed_array->elements_external_pointer());
+ jsgraph()->PointerConstant(typed_array->external_pointer());
} else {
// Load the {receiver}s length.
length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
receiver, effect, control);
- // Load the buffer for the {receiver}.
- buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
-
- // Load the elements for the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, effect, control);
-
// Load the base pointer for the {receiver}. This will always be Smi
// zero unless we allow on-heap TypedArrays, which is only the case
// for Chrome. Node and Electron both set this limit to 0. Setting
@@ -2619,21 +2586,30 @@ JSNativeContextSpecialization::BuildElementAccess(
if (V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP == 0) {
base_pointer = jsgraph()->ZeroConstant();
} else {
- base_pointer = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
- elements, effect, control);
+ base_pointer = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSTypedArrayBasePointer()),
+ receiver, effect, control);
}
- // Load the external pointer for the {receiver}s {elements}.
- external_pointer = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
- elements, effect, control);
+ // Load the external pointer for the {receiver}.
+ external_pointer = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSTypedArrayExternalPointer()),
+ receiver, effect, control);
}
// See if we can skip the detaching check.
if (!dependencies()->DependOnArrayBufferDetachingProtector()) {
+ // Load the buffer for the {receiver}.
+ Node* buffer =
+ typed_array.has_value()
+ ? jsgraph()->Constant(typed_array->buffer())
+ : (effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control));
+
// Deopt if the {buffer} was detached.
// Note: A detached buffer leads to megamorphic feedback.
Node* buffer_bit_field = effect = graph()->NewNode(
@@ -2648,10 +2624,13 @@ JSNativeContextSpecialization::BuildElementAccess(
effect = graph()->NewNode(
simplified()->CheckIf(DeoptimizeReason::kArrayBufferWasDetached),
check, effect, control);
+
+ // Retain the {buffer} instead of {receiver} to reduce live ranges.
+ buffer_or_receiver = buffer;
}
if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
// Only check that the {index} is in SignedSmall range. We do the actual
// bounds check below and just skip the property access if it's out of
// bounds for the {receiver}.
@@ -2689,8 +2668,9 @@ JSNativeContextSpecialization::BuildElementAccess(
{
// Perform the actual load
vtrue = etrue = graph()->NewNode(
- simplified()->LoadTypedElement(external_array_type), buffer,
- base_pointer, external_pointer, index, etrue, if_true);
+ simplified()->LoadTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index,
+ etrue, if_true);
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -2710,8 +2690,9 @@ JSNativeContextSpecialization::BuildElementAccess(
} else {
// Perform the actual load.
value = effect = graph()->NewNode(
- simplified()->LoadTypedElement(external_array_type), buffer,
- base_pointer, external_pointer, index, effect, control);
+ simplified()->LoadTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, effect,
+ control);
}
break;
}
@@ -2735,7 +2716,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Check if we can skip the out-of-bounds store.
- if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+ if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -2746,8 +2727,9 @@ JSNativeContextSpecialization::BuildElementAccess(
{
// Perform the actual store.
etrue = graph()->NewNode(
- simplified()->StoreTypedElement(external_array_type), buffer,
- base_pointer, external_pointer, index, value, etrue, if_true);
+ simplified()->StoreTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index,
+ value, etrue, if_true);
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -2762,8 +2744,9 @@ JSNativeContextSpecialization::BuildElementAccess(
} else {
// Perform the actual store
effect = graph()->NewNode(
- simplified()->StoreTypedElement(external_array_type), buffer,
- base_pointer, external_pointer, index, value, effect, control);
+ simplified()->StoreTypedElement(external_array_type),
+ buffer_or_receiver, base_pointer, external_pointer, index, value,
+ effect, control);
}
break;
}
@@ -2829,13 +2812,13 @@ JSNativeContextSpecialization::BuildElementAccess(
// Compute the element access.
Type element_type = Type::NonInternal();
- MachineType element_machine_type = MachineType::AnyTagged();
+ MachineType element_machine_type = MachineType::TypeCompressedTagged();
if (IsDoubleElementsKind(elements_kind)) {
element_type = Type::Number();
element_machine_type = MachineType::Float64();
} else if (IsSmiElementsKind(elements_kind)) {
element_type = Type::SignedSmall();
- element_machine_type = MachineType::TaggedSigned();
+ element_machine_type = MachineType::TypeCompressedTaggedSigned();
}
ElementAccess element_access = {
kTaggedBase, FixedArray::kHeaderSize,
@@ -2852,7 +2835,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
if (elements_kind == HOLEY_ELEMENTS ||
elements_kind == HOLEY_SMI_ELEMENTS) {
- element_access.machine_type = MachineType::AnyTagged();
+ element_access.machine_type = MachineType::TypeCompressedTagged();
}
// Check if we can return undefined for out-of-bounds loads.
@@ -2960,7 +2943,7 @@ JSNativeContextSpecialization::BuildElementAccess(
if (elements_kind == HOLEY_ELEMENTS ||
elements_kind == HOLEY_SMI_ELEMENTS) {
- element_access.machine_type = MachineType::AnyTagged();
+ element_access.machine_type = MachineType::TypeCompressedTagged();
}
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
@@ -3028,7 +3011,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// Ensure that copy-on-write backing store is writable.
if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
+ store_mode == STORE_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
@@ -3070,7 +3053,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// If we didn't grow {elements}, it might still be COW, in which case we
// copy it now.
if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW) {
+ store_mode == STORE_AND_GROW_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
@@ -3251,7 +3234,7 @@ Node* JSNativeContextSpecialization::BuildCheckEqualsName(NameRef const& name,
}
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
- MapHandles const& receiver_maps) {
+ ZoneVector<Handle<Map>> const& receiver_maps) {
// Check if all {receiver_maps} have one of the initial Array.prototype
// or Object.prototype objects as their prototype (in any of the current
// native contexts, as the global Array protector works isolate-wide).
@@ -3269,24 +3252,12 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
return dependencies()->DependOnNoElementsProtector();
}
-namespace {
-void TryUpdateThenDropDeprecated(Isolate* isolate, MapHandles* maps) {
- for (auto it = maps->begin(); it != maps->end();) {
- if (Map::TryUpdate(isolate, *it).ToHandle(&*it)) {
- DCHECK(!(*it)->is_deprecated());
- ++it;
- } else {
- it = maps->erase(it);
- }
- }
-}
-} // namespace
-
+// Returns false iff we have insufficient feedback (uninitialized or obsolete).
bool JSNativeContextSpecialization::ExtractReceiverMaps(
Node* receiver, Node* effect, FeedbackNexus const& nexus,
MapHandles* receiver_maps) {
DCHECK(receiver_maps->empty());
- if (nexus.IsUninitialized()) return true;
+ if (nexus.IsUninitialized()) return false;
// See if we can infer a concrete type for the {receiver}. Solely relying on
// the inference is not safe for keyed stores, because we would potentially
@@ -3301,26 +3272,23 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
}
}
- // Try to extract some maps from the {nexus}.
- if (nexus.ExtractMaps(receiver_maps) != 0) {
- // Try to filter impossible candidates based on inferred root map.
- Handle<Map> root_map;
- if (InferReceiverRootMap(receiver).ToHandle(&root_map)) {
- DCHECK(!root_map->is_abandoned_prototype_map());
- Isolate* isolate = this->isolate();
- receiver_maps->erase(
- std::remove_if(receiver_maps->begin(), receiver_maps->end(),
- [root_map, isolate](Handle<Map> map) {
- return map->is_abandoned_prototype_map() ||
- map->FindRootMap(isolate) != *root_map;
- }),
- receiver_maps->end());
- }
- TryUpdateThenDropDeprecated(isolate(), receiver_maps);
- return true;
- }
+ if (nexus.ExtractMaps(receiver_maps) == 0) return true;
- return false;
+ // Try to filter impossible candidates based on inferred root map.
+ Handle<Map> root_map;
+ if (InferReceiverRootMap(receiver).ToHandle(&root_map)) {
+ DCHECK(!root_map->is_abandoned_prototype_map());
+ Isolate* isolate = this->isolate();
+ receiver_maps->erase(
+ std::remove_if(receiver_maps->begin(), receiver_maps->end(),
+ [root_map, isolate](Handle<Map> map) {
+ return map->is_abandoned_prototype_map() ||
+ map->FindRootMap(isolate) != *root_map;
+ }),
+ receiver_maps->end());
+ }
+ TryUpdateThenDropDeprecated(isolate(), receiver_maps);
+ return !receiver_maps->empty();
}
bool JSNativeContextSpecialization::InferReceiverMaps(
@@ -3352,7 +3320,7 @@ MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
Node* receiver) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
- return handle(m.Value()->map()->FindRootMap(isolate()), isolate());
+ return handle(m.Value()->map().FindRootMap(isolate()), isolate());
} else if (m.IsJSCreate()) {
base::Optional<MapRef> initial_map =
NodeProperties::GetJSCreateMap(broker(), receiver);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 1e9c44257f..7de2639966 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -7,7 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
-#include "src/deoptimize-reason.h"
+#include "src/deoptimizer/deoptimize-reason.h"
#include "src/objects/map.h"
namespace v8 {
@@ -93,11 +93,17 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSToObject(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
- FeedbackNexus const& nexus,
- MapHandles const& receiver_maps,
+ ElementAccessFeedback const& processed,
AccessMode access_mode,
KeyedAccessLoadMode load_mode,
KeyedAccessStoreMode store_mode);
+ // In the case of non-keyed (named) accesses, pass the name as {static_name}
+ // and use {nullptr} for {key} (load/store modes are irrelevant).
+ Reduction ReducePropertyAccessUsingProcessedFeedback(
+ Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
+ FeedbackNexus const& nexus, AccessMode access_mode,
+ KeyedAccessLoadMode load_mode = STANDARD_LOAD,
+ KeyedAccessStoreMode store_mode = STANDARD_STORE);
Reduction ReduceKeyedAccess(Node* node, Node* key, Node* value,
FeedbackNexus const& nexus,
AccessMode access_mode,
@@ -108,9 +114,8 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
NameRef const& name,
AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
- MapHandles const& receiver_maps,
- NameRef const& name, AccessMode access_mode,
- Node* key = nullptr);
+ NamedAccessFeedback const& processed,
+ AccessMode access_mode, Node* key = nullptr);
Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
NameRef const& name, AccessMode access_mode,
Node* key = nullptr);
@@ -214,7 +219,7 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
- bool CanTreatHoleAsUndefined(MapHandles const& receiver_maps);
+ bool CanTreatHoleAsUndefined(ZoneVector<Handle<Map>> const& receiver_maps);
// Extract receiver maps from {nexus} and filter based on {receiver} if
// possible.
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 2280084b76..a779790b8d 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -9,9 +9,9 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
-#include "src/vector-slot-pair.h"
+#include "src/compiler/vector-slot-pair.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 282c34ec93..0f315b1cb5 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -6,11 +6,11 @@
#define V8_COMPILER_JS_OPERATOR_H_
#include "src/base/compiler-specific.h"
-#include "src/globals.h"
-#include "src/maybe-handles.h"
+#include "src/common/globals.h"
+#include "src/compiler/vector-slot-pair.h"
+#include "src/handles/maybe-handles.h"
+#include "src/objects/type-hints.h"
#include "src/runtime/runtime.h"
-#include "src/type-hints.h"
-#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -106,7 +106,7 @@ class ConstructParameters final {
: arity_(arity), frequency_(frequency), feedback_(feedback) {}
uint32_t arity() const { return arity_; }
- CallFrequency frequency() const { return frequency_; }
+ CallFrequency const& frequency() const { return frequency_; }
VectorSlotPair const& feedback() const { return feedback_; }
private:
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index cfcfb4ce58..9d882e8238 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -8,8 +8,8 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/feedback-vector.h"
-#include "src/type-hints.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/type-hints.h"
namespace v8 {
namespace internal {
@@ -268,7 +268,6 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
}
default:
UNREACHABLE();
- break;
}
if (node != nullptr) {
@@ -354,7 +353,6 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
}
default:
UNREACHABLE();
- break;
}
return LoweringResult::NoChange();
}
@@ -501,7 +499,8 @@ Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect,
jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason,
VectorSlotPair()),
jsgraph()->Dead(), effect, control);
- Node* frame_state = NodeProperties::FindFrameStateBefore(deoptimize);
+ Node* frame_state =
+ NodeProperties::FindFrameStateBefore(deoptimize, jsgraph()->Dead());
deoptimize->ReplaceInput(0, frame_state);
return deoptimize;
}
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 7ce898f9d0..7164a0b708 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -7,8 +7,8 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
-#include "src/deoptimize-reason.h"
-#include "src/handles.h"
+#include "src/deoptimizer/deoptimize-reason.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 452609a4d5..ba50b75792 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -6,7 +6,7 @@
#include "src/ast/modules.h"
#include "src/builtins/builtins-utils.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/js-graph.h"
@@ -16,9 +16,9 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
-#include "src/objects-inl.h"
#include "src/objects/js-generator.h"
#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -1446,16 +1446,15 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
const bool is_construct = (node->opcode() == IrOpcode::kJSConstruct);
- DCHECK(Builtins::HasCppImplementation(builtin_index));
-
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = is_construct
? NodeProperties::GetValueInput(node, arity + 1)
: jsgraph->UndefinedConstant();
- // API and CPP builtins are implemented in C++, and we can inline both.
- // CPP builtins create a builtin exit frame, API builtins don't.
- const bool has_builtin_exit_frame = Builtins::IsCpp(builtin_index);
+ // CPP builtins are implemented in C++, and we can inline it.
+ // CPP builtins create a builtin exit frame.
+ DCHECK(Builtins::IsCpp(builtin_index));
+ const bool has_builtin_exit_frame = true;
Node* stub = jsgraph->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack,
has_builtin_exit_frame);
@@ -1720,8 +1719,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
common()->Call(Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(), 1 + arity, flags)));
}
- } else if (shared.HasBuiltinId() &&
- Builtins::HasCppImplementation(shared.builtin_id())) {
+ } else if (shared.HasBuiltinId() && Builtins::IsCpp(shared.builtin_id())) {
// Patch {node} to a direct CEntry call.
ReduceBuiltin(jsgraph(), node, shared.builtin_id(), arity, flags);
} else if (shared.HasBuiltinId() &&
@@ -2022,7 +2020,8 @@ Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
ExternalReference const ref =
ExternalReference::address_of_pending_message_obj(isolate());
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
- NodeProperties::ChangeOp(node, simplified()->LoadMessage());
+ NodeProperties::ChangeOp(
+ node, simplified()->LoadField(AccessBuilder::ForExternalTaggedValue()));
return Changed(node);
}
@@ -2033,7 +2032,8 @@ Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
node->ReplaceInput(1, value);
- NodeProperties::ChangeOp(node, simplified()->StoreMessage());
+ NodeProperties::ChangeOp(
+ node, simplified()->StoreField(AccessBuilder::ForExternalTaggedValue()));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index d8164ac97d..bcbc3aa16f 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -6,9 +6,9 @@
#define V8_COMPILER_JS_TYPED_LOWERING_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/opcodes.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 5c671aca25..8bb47b43e9 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -4,14 +4,14 @@
#include "src/compiler/linkage.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
#include "src/compiler/node.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline.h"
-#include "src/macro-assembler.h"
-#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -37,6 +37,9 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
case CallDescriptor::kCallAddress:
os << "Addr";
break;
+ case CallDescriptor::kCallWasmCapiFunction:
+ os << "WasmExit";
+ break;
case CallDescriptor::kCallWasmFunction:
os << "WasmFunction";
break;
@@ -149,6 +152,8 @@ int CallDescriptor::CalculateFixedFrameSize() const {
case kCallWasmFunction:
case kCallWasmImportWrapper:
return WasmCompiledFrameConstants::kFixedSlotCount;
+ case kCallWasmCapiFunction:
+ return WasmExitFrameConstants::kFixedSlotCount;
}
UNREACHABLE();
}
@@ -161,7 +166,7 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
// plus the receiver.
SharedFunctionInfo shared = info->closure()->shared();
return GetJSCallDescriptor(zone, info->is_osr(),
- 1 + shared->internal_formal_parameter_count(),
+ 1 + shared.internal_formal_parameter_count(),
CallDescriptor::kCanUseRoots);
}
return nullptr; // TODO(titzer): ?
@@ -197,6 +202,7 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
// Some inline intrinsics are also safe to call without a FrameState.
case Runtime::kInlineCreateIterResultObject:
+ case Runtime::kInlineIncBlockCounter:
case Runtime::kInlineGeneratorClose:
case Runtime::kInlineGeneratorGetResumeMode:
case Runtime::kInlineCreateJSGeneratorObject:
@@ -204,7 +210,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineIsJSReceiver:
case Runtime::kInlineIsRegExp:
case Runtime::kInlineIsSmi:
- case Runtime::kInlineIsTypedArray:
return false;
default:
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 20feec79de..e4fa6f9f20 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -7,15 +7,15 @@
#include "src/base/compiler-specific.h"
#include "src/base/flags.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/register-arch.h"
+#include "src/codegen/reglist.h"
+#include "src/codegen/signature.h"
+#include "src/common/globals.h"
#include "src/compiler/frame.h"
#include "src/compiler/operator.h"
-#include "src/globals.h"
-#include "src/interface-descriptors.h"
-#include "src/machine-type.h"
-#include "src/register-arch.h"
-#include "src/reglist.h"
#include "src/runtime/runtime.h"
-#include "src/signature.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -175,6 +175,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kCallCodeObject, // target is a Code object
kCallJSFunction, // target is a JSFunction object
kCallAddress, // target is a machine pointer
+ kCallWasmCapiFunction, // target is a Wasm C API function
kCallWasmFunction, // target is a wasm function
kCallWasmImportWrapper, // target is a wasm import wrapper
kCallBuiltinPointer, // target is a builtin pointer
@@ -236,6 +237,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// Returns {true} if this descriptor is a call to a WebAssembly function.
bool IsWasmImportWrapper() const { return kind_ == kCallWasmImportWrapper; }
+ // Returns {true} if this descriptor is a call to a Wasm C API function.
+ bool IsWasmCapiFunction() const { return kind_ == kCallWasmCapiFunction; }
+
bool RequiresFrameAsIncoming() const {
return IsCFunctionCall() || IsJSFunctionCall() || IsWasmFunctionCall();
}
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 468ef201ec..c42bfd839a 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -10,7 +10,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/heap/factory.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -116,10 +116,6 @@ Reduction LoadElimination::Reduce(Node* node) {
return ReduceLoadField(node, FieldAccessOf(node->op()));
case IrOpcode::kStoreField:
return ReduceStoreField(node, FieldAccessOf(node->op()));
- case IrOpcode::kStoreMessage:
- return ReduceStoreField(node, AccessBuilder::ForExternalIntPtr());
- case IrOpcode::kLoadMessage:
- return ReduceLoadField(node, AccessBuilder::ForExternalIntPtr());
case IrOpcode::kLoadElement:
return ReduceLoadElement(node);
case IrOpcode::kStoreElement:
@@ -144,11 +140,14 @@ namespace {
bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
if (r1 == r2) return true;
- return IsAnyTagged(r1) && IsAnyTagged(r2);
+ return IsAnyCompressedTagged(r1) && IsAnyCompressedTagged(r2);
}
} // namespace
+LoadElimination::AbstractState const
+ LoadElimination::AbstractState::empty_state_;
+
Node* LoadElimination::AbstractElements::Lookup(
Node* object, Node* index, MachineRepresentation representation) const {
for (Element const element : elements_) {
@@ -250,10 +249,11 @@ void LoadElimination::AbstractElements::Print() const {
}
}
-Node* LoadElimination::AbstractField::Lookup(Node* object) const {
- for (auto pair : info_for_node_) {
+LoadElimination::FieldInfo const* LoadElimination::AbstractField::Lookup(
+ Node* object) const {
+ for (auto& pair : info_for_node_) {
if (pair.first->IsDead()) continue;
- if (MustAlias(object, pair.first)) return pair.second.value;
+ if (MustAlias(object, pair.first)) return &pair.second;
}
return nullptr;
}
@@ -305,9 +305,10 @@ LoadElimination::AbstractField const* LoadElimination::AbstractField::Kill(
void LoadElimination::AbstractField::Print() const {
for (auto pair : info_for_node_) {
- PrintF(" #%d:%s -> #%d:%s\n", pair.first->id(),
+ PrintF(" #%d:%s -> #%d:%s [repr=%s]\n", pair.first->id(),
pair.first->op()->mnemonic(), pair.second.value->id(),
- pair.second.value->op()->mnemonic());
+ pair.second.value->op()->mnemonic(),
+ MachineReprToString(pair.second.representation));
}
}
@@ -380,6 +381,21 @@ void LoadElimination::AbstractMaps::Print() const {
}
}
+bool LoadElimination::AbstractState::FieldsEquals(
+ AbstractFields const& this_fields,
+ AbstractFields const& that_fields) const {
+ for (size_t i = 0u; i < this_fields.size(); ++i) {
+ AbstractField const* this_field = this_fields[i];
+ AbstractField const* that_field = that_fields[i];
+ if (this_field) {
+ if (!that_field || !that_field->Equals(this_field)) return false;
+ } else if (that_field) {
+ return false;
+ }
+ }
+ return true;
+}
+
bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
if (this->elements_) {
if (!that->elements_ || !that->elements_->Equals(this->elements_)) {
@@ -388,14 +404,9 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
} else if (that->elements_) {
return false;
}
- for (size_t i = 0u; i < arraysize(fields_); ++i) {
- AbstractField const* this_field = this->fields_[i];
- AbstractField const* that_field = that->fields_[i];
- if (this_field) {
- if (!that_field || !that_field->Equals(this_field)) return false;
- } else if (that_field) {
- return false;
- }
+ if (!FieldsEquals(this->fields_, that->fields_) ||
+ !FieldsEquals(this->const_fields_, that->const_fields_)) {
+ return false;
}
if (this->maps_) {
if (!that->maps_ || !that->maps_->Equals(this->maps_)) {
@@ -407,6 +418,20 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
return true;
}
+void LoadElimination::AbstractState::FieldsMerge(
+ AbstractFields& this_fields, AbstractFields const& that_fields,
+ Zone* zone) {
+ for (size_t i = 0; i < this_fields.size(); ++i) {
+ if (this_fields[i]) {
+ if (that_fields[i]) {
+ this_fields[i] = this_fields[i]->Merge(that_fields[i], zone);
+ } else {
+ this_fields[i] = nullptr;
+ }
+ }
+ }
+}
+
void LoadElimination::AbstractState::Merge(AbstractState const* that,
Zone* zone) {
// Merge the information we have about the elements.
@@ -417,15 +442,8 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
}
// Merge the information we have about the fields.
- for (size_t i = 0; i < arraysize(fields_); ++i) {
- if (this->fields_[i]) {
- if (that->fields_[i]) {
- this->fields_[i] = this->fields_[i]->Merge(that->fields_[i], zone);
- } else {
- this->fields_[i] = nullptr;
- }
- }
- }
+ FieldsMerge(this->fields_, that->fields_, zone);
+ FieldsMerge(this->const_fields_, that->const_fields_, zone);
// Merge the information we have about the maps.
if (this->maps_) {
@@ -508,14 +526,16 @@ LoadElimination::AbstractState::KillElement(Node* object, Node* index,
}
LoadElimination::AbstractState const* LoadElimination::AbstractState::AddField(
- Node* object, size_t index, Node* value, MaybeHandle<Name> name,
- Zone* zone) const {
+ Node* object, size_t index, LoadElimination::FieldInfo info,
+ PropertyConstness constness, Zone* zone) const {
AbstractState* that = new (zone) AbstractState(*this);
- if (that->fields_[index]) {
- that->fields_[index] =
- that->fields_[index]->Extend(object, value, name, zone);
+ AbstractFields& fields = constness == PropertyConstness::kConst
+ ? that->const_fields_
+ : that->fields_;
+ if (fields[index]) {
+ fields[index] = fields[index]->Extend(object, info, zone);
} else {
- that->fields_[index] = new (zone) AbstractField(object, value, name, zone);
+ fields[index] = new (zone) AbstractField(object, info, zone);
}
return that;
}
@@ -545,14 +565,14 @@ LoadElimination::AbstractState::KillFields(Node* object, MaybeHandle<Name> name,
Zone* zone) const {
AliasStateInfo alias_info(this, object);
for (size_t i = 0;; ++i) {
- if (i == arraysize(fields_)) return this;
+ if (i == fields_.size()) return this;
if (AbstractField const* this_field = this->fields_[i]) {
AbstractField const* that_field =
this_field->Kill(alias_info, name, zone);
if (that_field != this_field) {
AbstractState* that = new (zone) AbstractState(*this);
that->fields_[i] = that_field;
- while (++i < arraysize(fields_)) {
+ while (++i < fields_.size()) {
if (this->fields_[i] != nullptr) {
that->fields_[i] = this->fields_[i]->Kill(alias_info, name, zone);
}
@@ -563,9 +583,24 @@ LoadElimination::AbstractState::KillFields(Node* object, MaybeHandle<Name> name,
}
}
-Node* LoadElimination::AbstractState::LookupField(Node* object,
- size_t index) const {
- if (AbstractField const* this_field = this->fields_[index]) {
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillAll(
+ Zone* zone) const {
+ // Kill everything except for const fields
+ for (size_t i = 0; i < const_fields_.size(); ++i) {
+ if (const_fields_[i]) {
+ AbstractState* that = new (zone) AbstractState();
+ that->const_fields_ = const_fields_;
+ return that;
+ }
+ }
+ return LoadElimination::empty_state();
+}
+
+LoadElimination::FieldInfo const* LoadElimination::AbstractState::LookupField(
+ Node* object, size_t index, PropertyConstness constness) const {
+ AbstractFields const& fields =
+ constness == PropertyConstness::kConst ? const_fields_ : fields_;
+ if (AbstractField const* this_field = fields[index]) {
return this_field->Lookup(object);
}
return nullptr;
@@ -604,12 +639,18 @@ void LoadElimination::AbstractState::Print() const {
PrintF(" elements:\n");
elements_->Print();
}
- for (size_t i = 0; i < arraysize(fields_); ++i) {
+ for (size_t i = 0; i < fields_.size(); ++i) {
if (AbstractField const* const field = fields_[i]) {
PrintF(" field %zu:\n", i);
field->Print();
}
}
+ for (size_t i = 0; i < const_fields_.size(); ++i) {
+ if (AbstractField const* const const_field = const_fields_[i]) {
+ PrintF(" const field %zu:\n", i);
+ const_field->Print();
+ }
+ }
}
LoadElimination::AbstractState const*
@@ -694,8 +735,9 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
MaybeHandle<Name>(), zone());
// Add the new elements on {object}.
- state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset), node,
- MaybeHandle<Name>(), zone());
+ state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset),
+ {node, MachineType::RepCompressedTaggedPointer()},
+ PropertyConstness::kMutable, zone());
return UpdateState(node, state);
}
@@ -720,8 +762,9 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
state = state->KillField(object, FieldIndexOf(JSObject::kElementsOffset),
MaybeHandle<Name>(), zone());
// Add the new elements on {object}.
- state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset), node,
- MaybeHandle<Name>(), zone());
+ state = state->AddField(object, FieldIndexOf(JSObject::kElementsOffset),
+ {node, MachineType::RepCompressedTaggedPointer()},
+ PropertyConstness::kMutable, zone());
return UpdateState(node, state);
}
@@ -798,7 +841,7 @@ Reduction LoadElimination::ReduceLoadField(Node* node,
if (state == nullptr) return NoChange();
if (access.offset == HeapObject::kMapOffset &&
access.base_is_tagged == kTaggedBase) {
- DCHECK(IsAnyTagged(access.machine_type.representation()));
+ DCHECK(IsAnyCompressedTagged(access.machine_type.representation()));
ZoneHandleSet<Map> object_maps;
if (state->LookupMaps(object, &object_maps) && object_maps.size() == 1) {
Node* value = jsgraph()->HeapConstant(object_maps[0]);
@@ -809,9 +852,21 @@ Reduction LoadElimination::ReduceLoadField(Node* node,
} else {
int field_index = FieldIndexOf(access);
if (field_index >= 0) {
- if (Node* replacement = state->LookupField(object, field_index)) {
- // Make sure we don't resurrect dead {replacement} nodes.
- if (!replacement->IsDead()) {
+ PropertyConstness constness = access.constness;
+ MachineRepresentation representation =
+ access.machine_type.representation();
+ FieldInfo const* lookup_result =
+ state->LookupField(object, field_index, constness);
+ if (!lookup_result && constness == PropertyConstness::kConst) {
+ lookup_result = state->LookupField(object, field_index,
+ PropertyConstness::kMutable);
+ }
+ if (lookup_result) {
+ // Make sure we don't reuse values that were recorded with a different
+ // representation or resurrect dead {replacement} nodes.
+ Node* replacement = lookup_result->value;
+ if (IsCompatible(representation, lookup_result->representation) &&
+ !replacement->IsDead()) {
// Introduce a TypeGuard if the type of the {replacement} node is not
// a subtype of the original {node}'s type.
if (!NodeProperties::GetType(replacement)
@@ -828,7 +883,8 @@ Reduction LoadElimination::ReduceLoadField(Node* node,
return Replace(replacement);
}
}
- state = state->AddField(object, field_index, node, access.name, zone());
+ FieldInfo info(node, access.name, representation);
+ state = state->AddField(object, field_index, info, constness, zone());
}
}
Handle<Map> field_map;
@@ -847,7 +903,7 @@ Reduction LoadElimination::ReduceStoreField(Node* node,
if (state == nullptr) return NoChange();
if (access.offset == HeapObject::kMapOffset &&
access.base_is_tagged == kTaggedBase) {
- DCHECK(IsAnyTagged(access.machine_type.representation()));
+ DCHECK(IsAnyCompressedTagged(access.machine_type.representation()));
// Kill all potential knowledge about the {object}s map.
state = state->KillMaps(object, zone());
Type const new_value_type = NodeProperties::GetType(new_value);
@@ -861,15 +917,49 @@ Reduction LoadElimination::ReduceStoreField(Node* node,
} else {
int field_index = FieldIndexOf(access);
if (field_index >= 0) {
- Node* const old_value = state->LookupField(object, field_index);
- if (old_value == new_value) {
- // This store is fully redundant.
- return Replace(effect);
+ PropertyConstness constness = access.constness;
+ MachineRepresentation representation =
+ access.machine_type.representation();
+ FieldInfo const* lookup_result =
+ state->LookupField(object, field_index, constness);
+
+ if (lookup_result && constness == PropertyConstness::kMutable) {
+ // At runtime, we should never encounter
+ // - any store replacing existing info with a different, incompatible
+ // representation, nor
+ // - two consecutive const stores.
+ // However, we may see such code statically, so we guard against
+ // executing it by emitting Unreachable.
+ // TODO(gsps): Re-enable the double const store check once we have
+ // identified other FieldAccesses that should be marked mutable
+ // instead of const (cf. JSCreateLowering::AllocateFastLiteral).
+ bool incompatible_representation =
+ !lookup_result->name.is_null() &&
+ !IsCompatible(representation, lookup_result->representation);
+ if (incompatible_representation) {
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* unreachable =
+ graph()->NewNode(common()->Unreachable(), effect, control);
+ return Replace(unreachable);
+ }
+ if (lookup_result->value == new_value) {
+ // This store is fully redundant.
+ return Replace(effect);
+ }
}
+
// Kill all potentially aliasing fields and record the new value.
+ FieldInfo new_info(new_value, access.name, representation);
state = state->KillField(object, field_index, access.name, zone());
- state =
- state->AddField(object, field_index, new_value, access.name, zone());
+ state = state->AddField(object, field_index, new_info,
+ PropertyConstness::kMutable, zone());
+ if (constness == PropertyConstness::kConst) {
+ // For const stores, we track information in both the const and the
+ // mutable world to guard against field accesses that should have
+ // been marked const, but were not.
+ state =
+ state->AddField(object, field_index, new_info, constness, zone());
+ }
} else {
// Unsupported StoreField operator.
state = state->KillFields(object, access.name, zone());
@@ -890,12 +980,6 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
switch (access.machine_type.representation()) {
case MachineRepresentation::kNone:
case MachineRepresentation::kBit:
- // TODO(solanes): Create the code for the compressed values
- case MachineRepresentation::kCompressedSigned:
- case MachineRepresentation::kCompressedPointer:
- case MachineRepresentation::kCompressed:
- UNREACHABLE();
- break;
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
@@ -908,6 +992,9 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kCompressedSigned:
+ case MachineRepresentation::kCompressedPointer:
+ case MachineRepresentation::kCompressed:
if (Node* replacement = state->LookupElement(
object, index, access.machine_type.representation())) {
// Make sure we don't resurrect dead {replacement} nodes.
@@ -948,12 +1035,6 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
switch (access.machine_type.representation()) {
case MachineRepresentation::kNone:
case MachineRepresentation::kBit:
- // TODO(solanes): Create the code for the compressed values
- case MachineRepresentation::kCompressedSigned:
- case MachineRepresentation::kCompressedPointer:
- case MachineRepresentation::kCompressed:
- UNREACHABLE();
- break;
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
@@ -966,6 +1047,9 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kCompressedSigned:
+ case MachineRepresentation::kCompressedPointer:
+ case MachineRepresentation::kCompressed:
state = state->AddElement(object, index, new_value,
access.machine_type.representation(), zone());
break;
@@ -1057,7 +1141,7 @@ Reduction LoadElimination::ReduceOtherNode(Node* node) {
if (state == nullptr) return NoChange();
// Check if this {node} has some uncontrolled side effects.
if (!node->op()->HasProperty(Operator::kNoWrite)) {
- state = empty_state();
+ state = state->KillAll(zone());
}
return UpdateState(node, state);
} else {
@@ -1158,14 +1242,11 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
MaybeHandle<Name>(), zone());
break;
}
- case IrOpcode::kStoreField:
- state = ComputeLoopStateForStoreField(current, state,
- FieldAccessOf(current->op()));
- break;
- case IrOpcode::kStoreMessage:
- state = ComputeLoopStateForStoreField(
- current, state, AccessBuilder::ForExternalIntPtr());
+ case IrOpcode::kStoreField: {
+ FieldAccess access = FieldAccessOf(current->op());
+ state = ComputeLoopStateForStoreField(current, state, access);
break;
+ }
case IrOpcode::kStoreElement: {
Node* const object = NodeProperties::GetValueInput(current, 0);
Node* const index = NodeProperties::GetValueInput(current, 1);
@@ -1177,7 +1258,7 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
break;
}
default:
- return empty_state();
+ return state->KillAll(zone());
}
}
for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
@@ -1247,7 +1328,6 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
case MachineRepresentation::kBit:
case MachineRepresentation::kSimd128:
UNREACHABLE();
- break;
case MachineRepresentation::kWord32:
if (kInt32Size != kTaggedSize) {
return -1; // We currently only track tagged pointer size fields.
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index e18c3a7602..7658d01365 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -6,10 +6,10 @@
#define V8_COMPILER_LOAD_ELIMINATION_H_
#include "src/base/compiler-specific.h"
+#include "src/codegen/machine-type.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
-#include "src/machine-type.h"
-#include "src/maybe-handles.h"
+#include "src/handles/maybe-handles.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -98,24 +98,42 @@ class V8_EXPORT_PRIVATE LoadElimination final
// not alias.
class AliasStateInfo;
+ struct FieldInfo {
+ FieldInfo() = default;
+ FieldInfo(Node* value, MachineRepresentation representation)
+ : value(value), name(), representation(representation) {}
+ FieldInfo(Node* value, MaybeHandle<Name> name,
+ MachineRepresentation representation)
+ : value(value), name(name), representation(representation) {}
+
+ bool operator==(const FieldInfo& other) const {
+ return value == other.value && name.address() == other.name.address() &&
+ representation == other.representation;
+ }
+
+ Node* value = nullptr;
+ MaybeHandle<Name> name;
+ MachineRepresentation representation = MachineRepresentation::kNone;
+ };
+
// Abstract state to approximate the current state of a certain field along
// the effect paths through the graph.
class AbstractField final : public ZoneObject {
public:
explicit AbstractField(Zone* zone) : info_for_node_(zone) {}
- AbstractField(Node* object, Node* value, MaybeHandle<Name> name, Zone* zone)
+ AbstractField(Node* object, FieldInfo info, Zone* zone)
: info_for_node_(zone) {
- info_for_node_.insert(std::make_pair(object, Field(value, name)));
+ info_for_node_.insert(std::make_pair(object, info));
}
- AbstractField const* Extend(Node* object, Node* value,
- MaybeHandle<Name> name, Zone* zone) const {
+ AbstractField const* Extend(Node* object, FieldInfo info,
+ Zone* zone) const {
AbstractField* that = new (zone) AbstractField(zone);
that->info_for_node_ = this->info_for_node_;
- that->info_for_node_.insert(std::make_pair(object, Field(value, name)));
+ that->info_for_node_[object] = info;
return that;
}
- Node* Lookup(Node* object) const;
+ FieldInfo const* Lookup(Node* object) const;
AbstractField const* Kill(const AliasStateInfo& alias_info,
MaybeHandle<Name> name, Zone* zone) const;
bool Equals(AbstractField const* that) const {
@@ -126,7 +144,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractField* copy = new (zone) AbstractField(zone);
for (auto this_it : this->info_for_node_) {
Node* this_object = this_it.first;
- Field this_second = this_it.second;
+ FieldInfo this_second = this_it.second;
if (this_object->IsDead()) continue;
auto that_it = that->info_for_node_.find(this_object);
if (that_it != that->info_for_node_.end() &&
@@ -140,19 +158,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
void Print() const;
private:
- struct Field {
- Field() = default;
- Field(Node* value, MaybeHandle<Name> name) : value(value), name(name) {}
-
- bool operator==(const Field& other) const {
- return value == other.value && name.address() == other.name.address();
- }
-
- Node* value = nullptr;
- MaybeHandle<Name> name;
- };
-
- ZoneMap<Node*, Field> info_for_node_;
+ ZoneMap<Node*, FieldInfo> info_for_node_;
};
static size_t const kMaxTrackedFields = 32;
@@ -182,11 +188,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
class AbstractState final : public ZoneObject {
public:
- AbstractState() {
- for (size_t i = 0; i < arraysize(fields_); ++i) {
- fields_[i] = nullptr;
- }
- }
+ AbstractState() {}
bool Equals(AbstractState const* that) const;
void Merge(AbstractState const* that, Zone* zone);
@@ -198,8 +200,9 @@ class V8_EXPORT_PRIVATE LoadElimination final
Zone* zone) const;
bool LookupMaps(Node* object, ZoneHandleSet<Map>* object_maps) const;
- AbstractState const* AddField(Node* object, size_t index, Node* value,
- MaybeHandle<Name> name, Zone* zone) const;
+ AbstractState const* AddField(Node* object, size_t index, FieldInfo info,
+ PropertyConstness constness,
+ Zone* zone) const;
AbstractState const* KillField(const AliasStateInfo& alias_info,
size_t index, MaybeHandle<Name> name,
Zone* zone) const;
@@ -207,7 +210,9 @@ class V8_EXPORT_PRIVATE LoadElimination final
MaybeHandle<Name> name, Zone* zone) const;
AbstractState const* KillFields(Node* object, MaybeHandle<Name> name,
Zone* zone) const;
- Node* LookupField(Node* object, size_t index) const;
+ AbstractState const* KillAll(Zone* zone) const;
+ FieldInfo const* LookupField(Node* object, size_t index,
+ PropertyConstness constness) const;
AbstractState const* AddElement(Node* object, Node* index, Node* value,
MachineRepresentation representation,
@@ -219,9 +224,21 @@ class V8_EXPORT_PRIVATE LoadElimination final
void Print() const;
+ static AbstractState const* empty_state() { return &empty_state_; }
+
private:
+ static AbstractState const empty_state_;
+
+ using AbstractFields = std::array<AbstractField const*, kMaxTrackedFields>;
+
+ bool FieldsEquals(AbstractFields const& this_fields,
+ AbstractFields const& that_fields) const;
+ void FieldsMerge(AbstractFields& this_fields,
+ AbstractFields const& that_fields, Zone* zone);
+
AbstractElements const* elements_ = nullptr;
- AbstractField const* fields_[kMaxTrackedFields];
+ AbstractFields fields_{};
+ AbstractFields const_fields_{};
AbstractMaps const* maps_ = nullptr;
};
@@ -251,8 +268,6 @@ class V8_EXPORT_PRIVATE LoadElimination final
Reduction ReduceStoreTypedElement(Node* node);
Reduction ReduceEffectPhi(Node* node);
Reduction ReduceStart(Node* node);
- Reduction ReduceStoreMessage(Node* node);
- Reduction ReduceLoadMessage(Node* node);
Reduction ReduceOtherNode(Node* node);
Reduction UpdateState(Node* node, AbstractState const* state);
@@ -268,15 +283,17 @@ class V8_EXPORT_PRIVATE LoadElimination final
static int FieldIndexOf(int offset);
static int FieldIndexOf(FieldAccess const& access);
+ static AbstractState const* empty_state() {
+ return AbstractState::empty_state();
+ }
+
CommonOperatorBuilder* common() const;
- AbstractState const* empty_state() const { return &empty_state_; }
Isolate* isolate() const;
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Zone* zone() const { return node_states_.zone(); }
- AbstractState const empty_state_;
AbstractStateForEffectNodes node_states_;
JSGraph* const jsgraph_;
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 05c3b57212..620a9554e0 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -6,9 +6,9 @@
#define V8_COMPILER_LOOP_ANALYSIS_H_
#include "src/base/iterator.h"
+#include "src/common/globals.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index a3408ec81d..730900af54 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -6,8 +6,8 @@
#define V8_COMPILER_LOOP_PEELING_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/loop-analysis.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 8521666681..f8e78b2169 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -206,8 +206,29 @@ class MachineRepresentationInferrer {
case IrOpcode::kChangeUint32ToTagged:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kTaggedPoisonOnSpeculation:
+ case IrOpcode::kChangeCompressedToTagged:
representation_vector_[node->id()] = MachineRepresentation::kTagged;
break;
+ case IrOpcode::kChangeCompressedPointerToTaggedPointer:
+ representation_vector_[node->id()] =
+ MachineRepresentation::kTaggedPointer;
+ break;
+ case IrOpcode::kChangeCompressedSignedToTaggedSigned:
+ representation_vector_[node->id()] =
+ MachineRepresentation::kTaggedSigned;
+ break;
+ case IrOpcode::kChangeTaggedToCompressed:
+ representation_vector_[node->id()] =
+ MachineRepresentation::kCompressed;
+ break;
+ case IrOpcode::kChangeTaggedPointerToCompressedPointer:
+ representation_vector_[node->id()] =
+ MachineRepresentation::kCompressedPointer;
+ break;
+ case IrOpcode::kChangeTaggedSignedToCompressedSigned:
+ representation_vector_[node->id()] =
+ MachineRepresentation::kCompressedSigned;
+ break;
case IrOpcode::kWord32PoisonOnSpeculation:
representation_vector_[node->id()] = MachineRepresentation::kWord32;
break;
@@ -363,6 +384,29 @@ class MachineRepresentationChecker {
CHECK_EQ(MachineRepresentation::kTagged,
inferrer_->GetRepresentation(node->InputAt(0)));
break;
+ case IrOpcode::kChangeCompressedToTagged:
+ CHECK(IsAnyCompressed(
+ inferrer_->GetRepresentation(node->InputAt(0))));
+ break;
+ case IrOpcode::kChangeCompressedPointerToTaggedPointer:
+ CHECK(CanBeCompressedPointer(
+ inferrer_->GetRepresentation(node->InputAt(0))));
+ break;
+ case IrOpcode::kChangeCompressedSignedToTaggedSigned:
+ CHECK(CanBeCompressedSigned(
+ inferrer_->GetRepresentation(node->InputAt(0))));
+ break;
+ case IrOpcode::kChangeTaggedToCompressed:
+ CHECK(IsAnyTagged(inferrer_->GetRepresentation(node->InputAt(0))));
+ break;
+ case IrOpcode::kChangeTaggedPointerToCompressedPointer:
+ CHECK(CanBeTaggedPointer(
+ inferrer_->GetRepresentation(node->InputAt(0))));
+ break;
+ case IrOpcode::kChangeTaggedSignedToCompressedSigned:
+ CHECK(CanBeTaggedSigned(
+ inferrer_->GetRepresentation(node->InputAt(0))));
+ break;
case IrOpcode::kRoundInt64ToFloat64:
case IrOpcode::kRoundUint64ToFloat64:
case IrOpcode::kRoundInt64ToFloat32:
@@ -454,8 +498,8 @@ class MachineRepresentationChecker {
node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
}
} else {
- CheckValueInputForInt32Op(node, 0);
- CheckValueInputForInt32Op(node, 1);
+ CheckValueIsCompressedOrInt32(node, 0);
+ CheckValueIsCompressedOrInt32(node, 1);
}
break;
@@ -558,6 +602,11 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedSigned:
CheckValueInputIsTagged(node, 2);
break;
+ case MachineRepresentation::kCompressed:
+ case MachineRepresentation::kCompressedPointer:
+ case MachineRepresentation::kCompressedSigned:
+ CheckValueInputIsCompressed(node, 2);
+ break;
default:
CheckValueInputRepresentationIs(
node, 2, inferrer_->GetRepresentation(node));
@@ -597,6 +646,13 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, i);
}
break;
+ case MachineRepresentation::kCompressed:
+ case MachineRepresentation::kCompressedPointer:
+ case MachineRepresentation::kCompressedSigned:
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ CheckValueInputIsCompressed(node, i);
+ }
+ break;
case MachineRepresentation::kWord32:
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
CheckValueInputForInt32Op(node, i);
@@ -685,6 +741,24 @@ class MachineRepresentationChecker {
}
}
+ void CheckValueInputIsCompressed(Node const* node, int index) {
+ Node const* input = node->InputAt(index);
+ switch (inferrer_->GetRepresentation(input)) {
+ case MachineRepresentation::kCompressed:
+ case MachineRepresentation::kCompressedPointer:
+ case MachineRepresentation::kCompressedSigned:
+ return;
+ default:
+ break;
+ }
+ std::ostringstream str;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op()
+ << " which doesn't have a compressed representation.";
+ PrintDebugHelp(str, node);
+ FATAL("%s", str.str().c_str());
+ }
+
void CheckValueInputIsTagged(Node const* node, int index) {
Node const* input = node->InputAt(index);
switch (inferrer_->GetRepresentation(input)) {
@@ -764,6 +838,38 @@ class MachineRepresentationChecker {
FATAL("%s", str.str().c_str());
}
+ void CheckValueIsCompressedOrInt32(Node const* node, int index) {
+ Node const* input = node->InputAt(index);
+ switch (inferrer_->GetRepresentation(input)) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return;
+ case MachineRepresentation::kCompressed:
+ case MachineRepresentation::kCompressedSigned:
+ case MachineRepresentation::kCompressedPointer:
+ return;
+ case MachineRepresentation::kNone: {
+ std::ostringstream str;
+ str << "TypeError: node #" << input->id() << ":" << *input->op()
+ << " is untyped.";
+ PrintDebugHelp(str, node);
+ FATAL("%s", str.str().c_str());
+ break;
+ }
+ default:
+ break;
+ }
+ std::ostringstream str;
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op()
+ << " which doesn't have a compressed or int32-compatible "
+ "representation.";
+ PrintDebugHelp(str, node);
+ FATAL("%s", str.str().c_str());
+ }
+
void CheckValueInputForInt64Op(Node const* node, int index) {
Node const* input = node->InputAt(index);
MachineRepresentation input_representation =
@@ -882,13 +988,9 @@ class MachineRepresentationChecker {
MachineRepresentation actual) {
switch (expected) {
case MachineRepresentation::kTagged:
- return (actual == MachineRepresentation::kTagged ||
- actual == MachineRepresentation::kTaggedSigned ||
- actual == MachineRepresentation::kTaggedPointer);
+ return IsAnyTagged(actual);
case MachineRepresentation::kCompressed:
- return (actual == MachineRepresentation::kCompressed ||
- actual == MachineRepresentation::kCompressedSigned ||
- actual == MachineRepresentation::kCompressedPointer);
+ return IsAnyCompressed(actual);
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kCompressedSigned:
diff --git a/deps/v8/src/compiler/machine-graph.cc b/deps/v8/src/compiler/machine-graph.cc
index 92bf3910c2..3f05d56a9a 100644
--- a/deps/v8/src/compiler/machine-graph.cc
+++ b/deps/v8/src/compiler/machine-graph.cc
@@ -4,8 +4,8 @@
#include "src/compiler/machine-graph.h"
+#include "src/codegen/external-reference.h"
#include "src/compiler/node-properties.h"
-#include "src/external-reference.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/machine-graph.h b/deps/v8/src/compiler/machine-graph.h
index 83d27e03b9..86c3847211 100644
--- a/deps/v8/src/compiler/machine-graph.h
+++ b/deps/v8/src/compiler/machine-graph.h
@@ -6,11 +6,11 @@
#define V8_COMPILER_MACHINE_GRAPH_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/common-node-cache.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
-#include "src/globals.h"
#include "src/runtime/runtime.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 50142003dc..a6a8e87cf4 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -14,7 +14,7 @@
#include "src/compiler/machine-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-#include "src/conversions-inl.h"
+#include "src/numbers/conversions-inl.h"
namespace v8 {
namespace internal {
@@ -751,6 +751,16 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
}
+ // (x + Int32Constant(a)) + Int32Constant(b)) => x + Int32Constant(a + b)
+ if (m.right().HasValue() && m.left().IsInt32Add()) {
+ Int32BinopMatcher n(m.left().node());
+ if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(1, Int32Constant(base::AddWithWraparound(
+ m.right().Value(), n.right().Value())));
+ node->ReplaceInput(0, n.left().node());
+ return Changed(node);
+ }
+ }
return NoChange();
}
@@ -762,6 +772,16 @@ Reduction MachineOperatorReducer::ReduceInt64Add(Node* node) {
return ReplaceInt64(
base::AddWithWraparound(m.left().Value(), m.right().Value()));
}
+ // (x + Int64Constant(a)) + Int64Constant(b)) => x + Int64Constant(a + b)
+ if (m.right().HasValue() && m.left().IsInt64Add()) {
+ Int64BinopMatcher n(m.left().node());
+ if (n.right().HasValue() && m.OwnsInput(m.left().node())) {
+ node->ReplaceInput(1, Int64Constant(base::AddWithWraparound(
+ m.right().Value(), n.right().Value())));
+ node->ReplaceInput(0, n.left().node());
+ return Changed(node);
+ }
+ }
return NoChange();
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 8c0d4c810d..a8e4cd5749 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -6,9 +6,9 @@
#define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/machine-operator.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 845717baf8..d2ddedc8fa 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -550,6 +550,11 @@ struct MachineOperatorGlobalCache {
Store##Type##NoWriteBarrier##Operator() \
: Store##Type##Operator(kNoWriteBarrier) {} \
}; \
+ struct Store##Type##AssertNoWriteBarrier##Operator final \
+ : public Store##Type##Operator { \
+ Store##Type##AssertNoWriteBarrier##Operator() \
+ : Store##Type##Operator(kAssertNoWriteBarrier) {} \
+ }; \
struct Store##Type##MapWriteBarrier##Operator final \
: public Store##Type##Operator { \
Store##Type##MapWriteBarrier##Operator() \
@@ -590,6 +595,8 @@ struct MachineOperatorGlobalCache {
kNoWriteBarrier)) {} \
}; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
+ Store##Type##AssertNoWriteBarrier##Operator \
+ kStore##Type##AssertNoWriteBarrier; \
Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
Store##Type##PointerWriteBarrier##Operator \
kStore##Type##PointerWriteBarrier; \
@@ -945,6 +952,8 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
switch (store_rep.write_barrier_kind()) { \
case kNoWriteBarrier: \
return &cache_.k##Store##kRep##NoWriteBarrier; \
+ case kAssertNoWriteBarrier: \
+ return &cache_.k##Store##kRep##AssertNoWriteBarrier; \
case kMapWriteBarrier: \
return &cache_.k##Store##kRep##MapWriteBarrier; \
case kPointerWriteBarrier: \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 255b927c84..8b1250dd30 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -8,8 +8,8 @@
#include "src/base/compiler-specific.h"
#include "src/base/enum-set.h"
#include "src/base/flags.h"
-#include "src/globals.h"
-#include "src/machine-type.h"
+#include "src/codegen/machine-type.h"
+#include "src/compiler/write-barrier-kind.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -645,9 +645,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-or [base + index], value
const Operator* Word32AtomicOr(MachineType type);
// atomic-xor [base + index], value
- const Operator* Word32AtomicXor(MachineType rep);
+ const Operator* Word32AtomicXor(MachineType type);
// atomic-add [base + index], value
- const Operator* Word64AtomicAdd(MachineType rep);
+ const Operator* Word64AtomicAdd(MachineType type);
// atomic-sub [base + index], value
const Operator* Word64AtomicSub(MachineType type);
// atomic-and [base + index], value
@@ -655,7 +655,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-or [base + index], value
const Operator* Word64AtomicOr(MachineType type);
// atomic-xor [base + index], value
- const Operator* Word64AtomicXor(MachineType rep);
+ const Operator* Word64AtomicXor(MachineType type);
// atomic-pair-load [base + index]
const Operator* Word32AtomicPairLoad();
// atomic-pair-sub [base + index], value_high, value-low
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
new file mode 100644
index 0000000000..f43ba0d155
--- /dev/null
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -0,0 +1,149 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/map-inference.h"
+
+#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/vector-slot-pair.h"
+#include "src/objects/map-inl.h"
+#include "src/zone/zone-handle-set.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect)
+ : broker_(broker), object_(object) {
+ ZoneHandleSet<Map> maps;
+ auto result =
+ NodeProperties::InferReceiverMaps(broker_, object_, effect, &maps);
+ maps_.insert(maps_.end(), maps.begin(), maps.end());
+ maps_state_ = (result == NodeProperties::kUnreliableReceiverMaps)
+ ? kUnreliableDontNeedGuard
+ : kReliableOrGuarded;
+ DCHECK_EQ(maps_.empty(), result == NodeProperties::kNoReceiverMaps);
+}
+
+MapInference::~MapInference() { CHECK(Safe()); }
+
+bool MapInference::Safe() const { return maps_state_ != kUnreliableNeedGuard; }
+
+void MapInference::SetNeedGuardIfUnreliable() {
+ CHECK(HaveMaps());
+ if (maps_state_ == kUnreliableDontNeedGuard) {
+ maps_state_ = kUnreliableNeedGuard;
+ }
+}
+
+void MapInference::SetGuarded() { maps_state_ = kReliableOrGuarded; }
+
+bool MapInference::HaveMaps() const { return !maps_.empty(); }
+
+bool MapInference::AllOfInstanceTypesAreJSReceiver() const {
+ return AllOfInstanceTypesUnsafe(InstanceTypeChecker::IsJSReceiver);
+}
+
+bool MapInference::AllOfInstanceTypesAre(InstanceType type) const {
+ CHECK(!InstanceTypeChecker::IsString(type));
+ return AllOfInstanceTypesUnsafe(
+ [type](InstanceType other) { return type == other; });
+}
+
+bool MapInference::AnyOfInstanceTypesAre(InstanceType type) const {
+ CHECK(!InstanceTypeChecker::IsString(type));
+ return AnyOfInstanceTypesUnsafe(
+ [type](InstanceType other) { return type == other; });
+}
+
+bool MapInference::AllOfInstanceTypes(std::function<bool(InstanceType)> f) {
+ SetNeedGuardIfUnreliable();
+ return AllOfInstanceTypesUnsafe(f);
+}
+
+bool MapInference::AllOfInstanceTypesUnsafe(
+ std::function<bool(InstanceType)> f) const {
+ // TODO(neis): Brokerize the MapInference.
+ AllowHandleDereference allow_handle_deref;
+ CHECK(HaveMaps());
+
+ return std::all_of(maps_.begin(), maps_.end(),
+ [f](Handle<Map> map) { return f(map->instance_type()); });
+}
+
+bool MapInference::AnyOfInstanceTypesUnsafe(
+ std::function<bool(InstanceType)> f) const {
+ AllowHandleDereference allow_handle_deref;
+ CHECK(HaveMaps());
+
+ return std::any_of(maps_.begin(), maps_.end(),
+ [f](Handle<Map> map) { return f(map->instance_type()); });
+}
+
+MapHandles const& MapInference::GetMaps() {
+ SetNeedGuardIfUnreliable();
+ return maps_;
+}
+
+void MapInference::InsertMapChecks(JSGraph* jsgraph, Node** effect,
+ Node* control,
+ const VectorSlotPair& feedback) {
+ CHECK(HaveMaps());
+ CHECK(feedback.IsValid());
+ ZoneHandleSet<Map> maps;
+ for (Handle<Map> map : maps_) maps.insert(map, jsgraph->graph()->zone());
+ *effect = jsgraph->graph()->NewNode(
+ jsgraph->simplified()->CheckMaps(CheckMapsFlag::kNone, maps, feedback),
+ object_, *effect, control);
+ SetGuarded();
+}
+
+bool MapInference::RelyOnMapsViaStability(
+ CompilationDependencies* dependencies) {
+ CHECK(HaveMaps());
+ return RelyOnMapsHelper(dependencies, nullptr, nullptr, nullptr, {});
+}
+
+bool MapInference::RelyOnMapsPreferStability(
+ CompilationDependencies* dependencies, JSGraph* jsgraph, Node** effect,
+ Node* control, const VectorSlotPair& feedback) {
+ CHECK(HaveMaps());
+ if (Safe()) return false;
+ if (RelyOnMapsViaStability(dependencies)) return true;
+ CHECK(RelyOnMapsHelper(nullptr, jsgraph, effect, control, feedback));
+ return false;
+}
+
+bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies,
+ JSGraph* jsgraph, Node** effect,
+ Node* control,
+ const VectorSlotPair& feedback) {
+ if (Safe()) return true;
+
+ auto is_stable = [](Handle<Map> map) { return map->is_stable(); };
+ if (dependencies != nullptr &&
+ std::all_of(maps_.cbegin(), maps_.cend(), is_stable)) {
+ for (Handle<Map> map : maps_) {
+ dependencies->DependOnStableMap(MapRef(broker_, map));
+ }
+ SetGuarded();
+ return true;
+ } else if (feedback.IsValid()) {
+ InsertMapChecks(jsgraph, effect, control, feedback);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+Reduction MapInference::NoChange() {
+ SetGuarded();
+ maps_.clear(); // Just to make some CHECKs fail if {this} gets used after.
+ return Reducer::NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/map-inference.h b/deps/v8/src/compiler/map-inference.h
new file mode 100644
index 0000000000..64cec77f2b
--- /dev/null
+++ b/deps/v8/src/compiler/map-inference.h
@@ -0,0 +1,108 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MAP_INFERENCE_H_
+#define V8_COMPILER_MAP_INFERENCE_H_
+
+#include "include/v8config.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/objects/instance-type.h"
+#include "src/objects/map.h"
+
+namespace v8 {
+namespace internal {
+
+class VectorSlotPair;
+
+namespace compiler {
+
+class CompilationDependencies;
+class JSGraph;
+class JSHeapBroker;
+class Node;
+
+// The MapInference class provides access to the "inferred" maps of an
+// {object}. This information can be either "reliable", meaning that the object
+// is guaranteed to have one of these maps at runtime, or "unreliable", meaning
+// that the object is guaranteed to have HAD one of these maps.
+//
+// The MapInference class does not expose whether or not the information is
+// reliable. A client is expected to eventually make the information reliable by
+// calling one of several methods that will either insert map checks, or record
+// stability dependencies (or do nothing if the information was already
+// reliable).
+class MapInference {
+ public:
+ MapInference(JSHeapBroker* broker, Node* object, Node* effect);
+
+ // The destructor checks that the information has been made reliable (if
+ // necessary) and force-crashes if not.
+ ~MapInference();
+
+ // Is there any information at all?
+ V8_WARN_UNUSED_RESULT bool HaveMaps() const;
+
+ // These queries don't require a guard.
+ //
+ V8_WARN_UNUSED_RESULT bool AllOfInstanceTypesAreJSReceiver() const;
+ // Here, {type} must not be a String type.
+ V8_WARN_UNUSED_RESULT bool AllOfInstanceTypesAre(InstanceType type) const;
+ V8_WARN_UNUSED_RESULT bool AnyOfInstanceTypesAre(InstanceType type) const;
+
+ // These queries require a guard. (Even instance types are generally not
+ // reliable because of how the representation of a string can change.)
+ V8_WARN_UNUSED_RESULT MapHandles const& GetMaps();
+ V8_WARN_UNUSED_RESULT bool AllOfInstanceTypes(
+ std::function<bool(InstanceType)> f);
+
+ // These methods provide a guard.
+ //
+ // Returns true iff maps were already reliable or stability dependencies were
+ // successfully recorded.
+ V8_WARN_UNUSED_RESULT bool RelyOnMapsViaStability(
+ CompilationDependencies* dependencies);
+ // Records stability dependencies if possible, otherwise it inserts map
+ // checks. Does nothing if maps were already reliable. Returns true iff
+ // dependencies were taken.
+ bool RelyOnMapsPreferStability(CompilationDependencies* dependencies,
+ JSGraph* jsgraph, Node** effect, Node* control,
+ const VectorSlotPair& feedback);
+ // Inserts map checks even if maps were already reliable.
+ void InsertMapChecks(JSGraph* jsgraph, Node** effect, Node* control,
+ const VectorSlotPair& feedback);
+
+ // Internally marks the maps as reliable (thus bypassing the safety check) and
+ // returns the NoChange reduction. USE THIS ONLY WHEN RETURNING, e.g.:
+ // if (foo) return inference.NoChange();
+ V8_WARN_UNUSED_RESULT Reduction NoChange();
+
+ private:
+ JSHeapBroker* const broker_;
+ Node* const object_;
+
+ MapHandles maps_;
+ enum {
+ kReliableOrGuarded,
+ kUnreliableDontNeedGuard,
+ kUnreliableNeedGuard
+ } maps_state_;
+
+ bool Safe() const;
+ void SetNeedGuardIfUnreliable();
+ void SetGuarded();
+
+ V8_WARN_UNUSED_RESULT bool AllOfInstanceTypesUnsafe(
+ std::function<bool(InstanceType)> f) const;
+ V8_WARN_UNUSED_RESULT bool AnyOfInstanceTypesUnsafe(
+ std::function<bool(InstanceType)> f) const;
+ V8_WARN_UNUSED_RESULT bool RelyOnMapsHelper(
+ CompilationDependencies* dependencies, JSGraph* jsgraph, Node** effect,
+ Node* control, const VectorSlotPair& feedback);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_MAP_INFERENCE_H_
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index a3f47a8acb..29cbb4d26c 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -4,13 +4,14 @@
#include "src/compiler/memory-optimizer.h"
+#include "src/codegen/interface-descriptors.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
-#include "src/interface-descriptors.h"
+#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
@@ -18,7 +19,8 @@ namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
- AllocationFolding allocation_folding)
+ AllocationFolding allocation_folding,
+ const char* function_debug_name)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
@@ -26,7 +28,8 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
zone_(zone),
graph_assembler_(jsgraph, nullptr, nullptr, zone),
poisoning_level_(poisoning_level),
- allocation_folding_(allocation_folding) {}
+ allocation_folding_(allocation_folding),
+ function_debug_name_(function_debug_name) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -58,7 +61,21 @@ void MemoryOptimizer::AllocationGroup::Add(Node* node) {
}
bool MemoryOptimizer::AllocationGroup::Contains(Node* node) const {
- return node_ids_.find(node->id()) != node_ids_.end();
+ // Additions should stay within the same allocated object, so it's safe to
+ // ignore them.
+ while (node_ids_.find(node->id()) == node_ids_.end()) {
+ switch (node->opcode()) {
+ case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastWordToTagged:
+ case IrOpcode::kInt32Add:
+ case IrOpcode::kInt64Add:
+ node = NodeProperties::GetValueInput(node, 0);
+ break;
+ default:
+ return false;
+ }
+ }
+ return true;
}
MemoryOptimizer::AllocationState::AllocationState()
@@ -86,6 +103,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
+ case IrOpcode::kEffectPhi:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
case IrOpcode::kLoadElement:
@@ -94,6 +112,10 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
+ // TODO(tebbi): Store nodes might do a bump-pointer allocation.
+ // We should introduce a special bump-pointer store node to
+ // differentiate that.
+ case IrOpcode::kStore:
case IrOpcode::kStoreElement:
case IrOpcode::kStoreField:
case IrOpcode::kTaggedPoisonOnSpeculation:
@@ -101,6 +123,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnsafePointerAdd:
case IrOpcode::kUnreachable:
+ case IrOpcode::kStaticAssert:
case IrOpcode::kWord32AtomicAdd:
case IrOpcode::kWord32AtomicAnd:
case IrOpcode::kWord32AtomicCompareExchange:
@@ -136,29 +159,17 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kCallWithCallerSavedRegisters:
return !(CallDescriptorOf(node->op())->flags() &
CallDescriptor::kNoAllocate);
-
- case IrOpcode::kStore:
- // Store is not safe because it could be part of CSA's bump pointer
- // allocation(?).
- return true;
-
default:
break;
}
return true;
}
-bool CanLoopAllocate(Node* loop_effect_phi, Zone* temp_zone) {
- Node* const control = NodeProperties::GetControlInput(loop_effect_phi);
-
+Node* SearchAllocatingNode(Node* start, Node* limit, Zone* temp_zone) {
ZoneQueue<Node*> queue(temp_zone);
ZoneSet<Node*> visited(temp_zone);
- visited.insert(loop_effect_phi);
-
- // Start the effect chain walk from the loop back edges.
- for (int i = 1; i < control->InputCount(); ++i) {
- queue.push(loop_effect_phi->InputAt(i));
- }
+ visited.insert(limit);
+ queue.push(start);
while (!queue.empty()) {
Node* const current = queue.front();
@@ -166,16 +177,40 @@ bool CanLoopAllocate(Node* loop_effect_phi, Zone* temp_zone) {
if (visited.find(current) == visited.end()) {
visited.insert(current);
- if (CanAllocate(current)) return true;
+ if (CanAllocate(current)) {
+ return current;
+ }
for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
queue.push(NodeProperties::GetEffectInput(current, i));
}
}
}
+ return nullptr;
+}
+
+bool CanLoopAllocate(Node* loop_effect_phi, Zone* temp_zone) {
+ Node* const control = NodeProperties::GetControlInput(loop_effect_phi);
+ // Start the effect chain walk from the loop back edges.
+ for (int i = 1; i < control->InputCount(); ++i) {
+ if (SearchAllocatingNode(loop_effect_phi->InputAt(i), loop_effect_phi,
+ temp_zone) != nullptr) {
+ return true;
+ }
+ }
return false;
}
+Node* EffectPhiForPhi(Node* phi) {
+ Node* control = NodeProperties::GetControlInput(phi);
+ for (Node* use : control->uses()) {
+ if (use->opcode() == IrOpcode::kEffectPhi) {
+ return use;
+ }
+ }
+ return nullptr;
+}
+
} // namespace
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
@@ -192,10 +227,14 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
return VisitCall(node, state);
case IrOpcode::kCallWithCallerSavedRegisters:
return VisitCallWithCallerSavedRegisters(node, state);
+ case IrOpcode::kLoadFromObject:
+ return VisitLoadFromObject(node, state);
case IrOpcode::kLoadElement:
return VisitLoadElement(node, state);
case IrOpcode::kLoadField:
return VisitLoadField(node, state);
+ case IrOpcode::kStoreToObject:
+ return VisitStoreToObject(node, state);
case IrOpcode::kStoreElement:
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
@@ -223,13 +262,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
gasm()->Reset(effect, control);
- AllocationType allocation = AllocationTypeOf(node->op());
+ const AllocateParameters& allocation = AllocateParametersOf(node->op());
+ AllocationType allocation_type = allocation.allocation_type();
// Propagate tenuring from outer allocations to inner allocations, i.e.
// when we allocate an object in old space and store a newly allocated
// child object into the pretenured object, then the newly allocated
// child object also should get pretenured to old space.
- if (allocation == AllocationType::kOld) {
+ if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
@@ -242,14 +282,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
}
}
} else {
- DCHECK_EQ(AllocationType::kYoung, allocation);
+ DCHECK_EQ(AllocationType::kYoung, allocation_type);
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* const parent = user->InputAt(0);
if (parent->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(parent->op()) == AllocationType::kOld) {
- allocation = AllocationType::kOld;
+ allocation_type = AllocationType::kOld;
break;
}
}
@@ -258,22 +298,22 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
- allocation == AllocationType::kYoung
+ allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = __ ExternalConstant(
- allocation == AllocationType::kYoung
+ allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
// Check if we can fold this allocation into a previous allocation represented
// by the incoming {state}.
IntPtrMatcher m(size);
- if (m.IsInRange(0, kMaxRegularHeapObjectSize)) {
+ if (m.IsInRange(0, kMaxRegularHeapObjectSize) && FLAG_inline_new) {
intptr_t const object_size = m.Value();
if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
state->size() <= kMaxRegularHeapObjectSize - object_size &&
- state->group()->allocation() == allocation) {
+ state->group()->allocation() == allocation_type) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
@@ -331,7 +371,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Bind(&call_runtime);
{
- Node* target = allocation == AllocationType::kYoung
+ Node* target = allocation_type == AllocationType::kYoung
? __
AllocateInYoungGenerationStubConstant()
: __
@@ -363,7 +403,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Start a new allocation group.
AllocationGroup* group =
- new (zone()) AllocationGroup(value, allocation, size, zone());
+ new (zone()) AllocationGroup(value, allocation_type, size, zone());
state = AllocationState::Open(group, object_size, top, zone());
}
} else {
@@ -382,6 +422,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
__ GotoIfNot(check, &call_runtime);
+ if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
+ __ GotoIfNot(
+ __ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
+ &call_runtime);
+ }
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), new_top);
@@ -389,7 +434,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
- Node* target = allocation == AllocationType::kYoung
+ Node* target = allocation_type == AllocationType::kYoung
? __
AllocateInYoungGenerationStubConstant()
: __
@@ -408,7 +453,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Create an unfoldable allocation group.
AllocationGroup* group =
- new (zone()) AllocationGroup(value, allocation, zone());
+ new (zone()) AllocationGroup(value, allocation_type, zone());
state = AllocationState::Closed(group, zone());
}
@@ -434,6 +479,32 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
node->Kill();
}
+void MemoryOptimizer::VisitLoadFromObject(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ Node* offset = node->InputAt(1);
+ node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ EnqueueUses(node, state);
+}
+
+void MemoryOptimizer::VisitStoreToObject(Node* node,
+ AllocationState const* state) {
+ DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
+ ObjectAccess const& access = ObjectAccessOf(node->op());
+ Node* object = node->InputAt(0);
+ Node* offset = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type.representation(), write_barrier_kind)));
+ EnqueueUses(node, state);
+}
+
#undef __
void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
@@ -461,13 +532,13 @@ void MemoryOptimizer::VisitLoadElement(Node* node,
ElementAccess const& access = ElementAccessOf(node->op());
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
+ MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity) &&
- access.machine_type.representation() !=
- MachineRepresentation::kTaggedPointer) {
- NodeProperties::ChangeOp(node,
- machine()->PoisonedLoad(access.machine_type));
+ type.representation() != MachineRepresentation::kTaggedPointer &&
+ type.representation() != MachineRepresentation::kCompressedPointer) {
+ NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ NodeProperties::ChangeOp(node, machine()->Load(type));
}
EnqueueUses(node, state);
}
@@ -477,13 +548,13 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
+ MachineType type = access.machine_type;
if (NeedsPoisoning(access.load_sensitivity) &&
- access.machine_type.representation() !=
- MachineRepresentation::kTaggedPointer) {
- NodeProperties::ChangeOp(node,
- machine()->PoisonedLoad(access.machine_type));
+ type.representation() != MachineRepresentation::kTaggedPointer &&
+ type.representation() != MachineRepresentation::kCompressedPointer) {
+ NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
- NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
+ NodeProperties::ChangeOp(node, machine()->Load(type));
}
EnqueueUses(node, state);
}
@@ -494,8 +565,9 @@ void MemoryOptimizer::VisitStoreElement(Node* node,
ElementAccess const& access = ElementAccessOf(node->op());
Node* object = node->InputAt(0);
Node* index = node->InputAt(1);
- WriteBarrierKind write_barrier_kind =
- ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+ Node* value = node->InputAt(2);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
node->ReplaceInput(1, ComputeIndex(access, index));
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
@@ -508,8 +580,9 @@ void MemoryOptimizer::VisitStoreField(Node* node,
DCHECK_EQ(IrOpcode::kStoreField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
Node* object = node->InputAt(0);
- WriteBarrierKind write_barrier_kind =
- ComputeWriteBarrierKind(object, state, access.write_barrier_kind);
+ Node* value = node->InputAt(1);
+ WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
+ node, object, value, state, access.write_barrier_kind);
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
NodeProperties::ChangeOp(
@@ -522,8 +595,9 @@ void MemoryOptimizer::VisitStore(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kStore, node->opcode());
StoreRepresentation representation = StoreRepresentationOf(node->op());
Node* object = node->InputAt(0);
+ Node* value = node->InputAt(2);
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
- object, state, representation.write_barrier_kind());
+ node, object, value, state, representation.write_barrier_kind());
if (write_barrier_kind != representation.write_barrier_kind()) {
NodeProperties::ChangeOp(
node, machine()->Store(StoreRepresentation(
@@ -552,13 +626,85 @@ Node* MemoryOptimizer::ComputeIndex(ElementAccess const& access, Node* index) {
return index;
}
+namespace {
+
+bool ValueNeedsWriteBarrier(Node* value, Isolate* isolate) {
+ while (true) {
+ switch (value->opcode()) {
+ case IrOpcode::kBitcastWordToTaggedSigned:
+ case IrOpcode::kChangeTaggedSignedToCompressedSigned:
+ case IrOpcode::kChangeTaggedToCompressedSigned:
+ return false;
+ case IrOpcode::kChangeTaggedPointerToCompressedPointer:
+ case IrOpcode::kChangeTaggedToCompressed:
+ value = NodeProperties::GetValueInput(value, 0);
+ continue;
+ case IrOpcode::kHeapConstant: {
+ RootIndex root_index;
+ if (isolate->roots_table().IsRootHandle(HeapConstantOf(value->op()),
+ &root_index) &&
+ RootsTable::IsImmortalImmovable(root_index)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return true;
+ }
+}
+
+void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
+ Zone* temp_zone) {
+ std::stringstream str;
+ str << "MemoryOptimizer could not remove write barrier for node #"
+ << node->id() << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << node->id() << " to break in CSA code.\n";
+ Node* object_position = object;
+ if (object_position->opcode() == IrOpcode::kPhi) {
+ object_position = EffectPhiForPhi(object_position);
+ }
+ Node* allocating_node = nullptr;
+ if (object_position && object_position->op()->EffectOutputCount() > 0) {
+ allocating_node = SearchAllocatingNode(node, object_position, temp_zone);
+ }
+ if (allocating_node) {
+ str << "\n There is a potentially allocating node in between:\n";
+ str << " " << *allocating_node << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << allocating_node->id() << " to break there.\n";
+ if (allocating_node->opcode() == IrOpcode::kCall) {
+ str << " If this is a never-allocating runtime call, you can add an "
+ "exception to Runtime::MayAllocate.\n";
+ }
+ } else {
+ str << "\n It seems the store happened to something different than a "
+ "direct "
+ "allocation:\n";
+ str << " " << *object << "\n";
+ str << " Run mksnapshot with --csa-trap-on-node=" << name << ","
+ << object->id() << " to break there.\n";
+ }
+ FATAL("%s", str.str().c_str());
+}
+
+} // namespace
+
WriteBarrierKind MemoryOptimizer::ComputeWriteBarrierKind(
- Node* object, AllocationState const* state,
+ Node* node, Node* object, Node* value, AllocationState const* state,
WriteBarrierKind write_barrier_kind) {
if (state->IsYoungGenerationAllocation() &&
state->group()->Contains(object)) {
write_barrier_kind = kNoWriteBarrier;
}
+ if (!ValueNeedsWriteBarrier(value, isolate())) {
+ write_barrier_kind = kNoWriteBarrier;
+ }
+ if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
+ WriteBarrierAssertFailed(node, object, function_debug_name_, zone());
+ }
return write_barrier_kind;
}
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 81e4696d93..cbefcb67de 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -35,7 +35,8 @@ class MemoryOptimizer final {
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
- AllocationFolding allocation_folding);
+ AllocationFolding allocation_folding,
+ const char* function_debug_name);
~MemoryOptimizer() = default;
void Optimize();
@@ -115,15 +116,18 @@ class MemoryOptimizer final {
void VisitAllocateRaw(Node*, AllocationState const*);
void VisitCall(Node*, AllocationState const*);
void VisitCallWithCallerSavedRegisters(Node*, AllocationState const*);
+ void VisitLoadFromObject(Node*, AllocationState const*);
void VisitLoadElement(Node*, AllocationState const*);
void VisitLoadField(Node*, AllocationState const*);
+ void VisitStoreToObject(Node*, AllocationState const*);
void VisitStoreElement(Node*, AllocationState const*);
void VisitStoreField(Node*, AllocationState const*);
void VisitStore(Node*, AllocationState const*);
void VisitOtherEffect(Node*, AllocationState const*);
Node* ComputeIndex(ElementAccess const&, Node*);
- WriteBarrierKind ComputeWriteBarrierKind(Node* object,
+ WriteBarrierKind ComputeWriteBarrierKind(Node* node, Node* object,
+ Node* value,
AllocationState const* state,
WriteBarrierKind);
@@ -153,6 +157,7 @@ class MemoryOptimizer final {
GraphAssembler graph_assembler_;
PoisoningMitigationLevel poisoning_level_;
AllocationFolding allocation_folding_;
+ const char* function_debug_name_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
index 78d7eccbb3..70e497ae60 100644
--- a/deps/v8/src/compiler/node-cache.cc
+++ b/deps/v8/src/compiler/node-cache.cc
@@ -6,7 +6,7 @@
#include <cstring>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 02d7b50e07..7c0c702e3f 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -8,11 +8,11 @@
#include <cmath>
#include "src/base/compiler-specific.h"
+#include "src/codegen/external-reference.h"
+#include "src/common/globals.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
-#include "src/double.h"
-#include "src/external-reference.h"
-#include "src/globals.h"
+#include "src/numbers/double.h"
#include "src/objects/heap-object.h"
namespace v8 {
@@ -254,6 +254,15 @@ struct BinopMatcher : public NodeMatcher {
bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
bool LeftEqualsRight() const { return left().node() == right().node(); }
+ bool OwnsInput(Node* input) {
+ for (Node* use : input->uses()) {
+ if (use != node()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
protected:
void SwapInputs() {
std::swap(left_, right_);
diff --git a/deps/v8/src/compiler/node-origin-table.h b/deps/v8/src/compiler/node-origin-table.h
index 4f0f29f3ed..4bb66a769d 100644
--- a/deps/v8/src/compiler/node-origin-table.h
+++ b/deps/v8/src/compiler/node-origin-table.h
@@ -8,9 +8,9 @@
#include <limits>
#include "src/base/compiler-specific.h"
+#include "src/codegen/source-position.h"
+#include "src/common/globals.h"
#include "src/compiler/node-aux-data.h"
-#include "src/globals.h"
-#include "src/source-position.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 23fb74a5ce..d6528c553a 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -7,12 +7,13 @@
#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/map-inference.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/verifier.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -254,10 +255,15 @@ void NodeProperties::ChangeOp(Node* node, const Operator* new_op) {
// static
-Node* NodeProperties::FindFrameStateBefore(Node* node) {
+Node* NodeProperties::FindFrameStateBefore(Node* node,
+ Node* unreachable_sentinel) {
Node* effect = NodeProperties::GetEffectInput(node);
while (effect->opcode() != IrOpcode::kCheckpoint) {
- if (effect->opcode() == IrOpcode::kDead) return effect;
+ if (effect->opcode() == IrOpcode::kDead ||
+ effect->opcode() == IrOpcode::kUnreachable) {
+ return unreachable_sentinel;
+ }
+ DCHECK(effect->op()->HasProperty(Operator::kNoWrite));
DCHECK_EQ(1, effect->op()->EffectInputCount());
effect = NodeProperties::GetEffectInput(effect);
}
@@ -524,30 +530,6 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
}
// static
-bool NodeProperties::HasInstanceTypeWitness(JSHeapBroker* broker,
- Node* receiver, Node* effect,
- InstanceType instance_type) {
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker, receiver, effect,
- &receiver_maps);
- switch (result) {
- case NodeProperties::kUnreliableReceiverMaps:
- case NodeProperties::kReliableReceiverMaps:
- DCHECK_NE(0, receiver_maps.size());
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- MapRef map(broker, receiver_maps[i]);
- if (map.instance_type() != instance_type) return false;
- }
- return true;
-
- case NodeProperties::kNoReceiverMaps:
- return false;
- }
- UNREACHABLE();
-}
-
-// static
bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
Node* dominator) {
while (effect != dominator) {
@@ -579,20 +561,9 @@ bool NodeProperties::CanBePrimitive(JSHeapBroker* broker, Node* receiver,
return value.map().IsPrimitiveMap();
}
default: {
- // We don't really care about the exact maps here,
- // just the instance types, which don't change
- // across potential side-effecting operations.
- ZoneHandleSet<Map> maps;
- if (InferReceiverMaps(broker, receiver, effect, &maps) !=
- kNoReceiverMaps) {
- // Check if one of the {maps} is not a JSReceiver map.
- for (size_t i = 0; i < maps.size(); ++i) {
- MapRef map(broker, maps[i]);
- if (!map.IsJSReceiverMap()) return true;
- }
- return false;
- }
- return true;
+ MapInference inference(broker, receiver, effect);
+ return !inference.HaveMaps() ||
+ !inference.AllOfInstanceTypesAreJSReceiver();
}
}
}
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 2e00d03ace..4a23b6781d 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -5,9 +5,9 @@
#ifndef V8_COMPILER_NODE_PROPERTIES_H_
#define V8_COMPILER_NODE_PROPERTIES_H_
+#include "src/common/globals.h"
#include "src/compiler/node.h"
#include "src/compiler/types.h"
-#include "src/globals.h"
#include "src/objects/map.h"
#include "src/zone/zone-handle-set.h"
@@ -118,7 +118,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// Find the last frame state that is effect-wise before the given node. This
// assumes a linear effect-chain up to a {CheckPoint} node in the graph.
- static Node* FindFrameStateBefore(Node* node);
+ // Returns {unreachable_sentinel} if {node} is determined to be unreachable.
+ static Node* FindFrameStateBefore(Node* node, Node* unreachable_sentinel);
// Collect the output-value projection for the given output index.
static Node* FindProjection(Node* node, size_t projection_index);
@@ -148,8 +149,7 @@ class V8_EXPORT_PRIVATE NodeProperties final {
enum InferReceiverMapsResult {
kNoReceiverMaps, // No receiver maps inferred.
kReliableReceiverMaps, // Receiver maps can be trusted.
- kUnreliableReceiverMaps // Receiver maps might have changed (side-effect),
- // but instance type is reliable.
+ kUnreliableReceiverMaps // Receiver maps might have changed (side-effect).
};
static InferReceiverMapsResult InferReceiverMaps(
JSHeapBroker* broker, Node* receiver, Node* effect,
@@ -159,9 +159,6 @@ class V8_EXPORT_PRIVATE NodeProperties final {
static base::Optional<MapRef> GetJSCreateMap(JSHeapBroker* broker,
Node* receiver);
- static bool HasInstanceTypeWitness(JSHeapBroker* broker, Node* receiver,
- Node* effect, InstanceType instance_type);
-
// Walks up the {effect} chain to check that there's no observable side-effect
// between the {effect} and it's {dominator}. Aborts the walk if there's join
// in the effect chain.
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 1da13e5786..50cfdf6248 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -56,7 +56,6 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
Node* node;
bool is_inline;
-#if DEBUG
// Verify that none of the inputs are {nullptr}.
for (int i = 0; i < input_count; i++) {
if (inputs[i] == nullptr) {
@@ -64,7 +63,6 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
op->mnemonic(), i);
}
}
-#endif
if (input_count > kMaxInlineCapacity) {
// Allocate out-of-line inputs.
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index e89e8b29b6..d7daca38ef 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -5,10 +5,10 @@
#ifndef V8_COMPILER_NODE_H_
#define V8_COMPILER_NODE_H_
+#include "src/common/globals.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
-#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index b38e4e4628..9ac8ec581f 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -7,7 +7,7 @@
#include <iosfwd>
-#include "src/globals.h"
+#include "src/common/globals.h"
// Opcodes for control operators.
#define CONTROL_OP_LIST(V) \
@@ -81,7 +81,8 @@
INNER_OP_LIST(V) \
V(Unreachable) \
V(DeadValue) \
- V(Dead)
+ V(Dead) \
+ V(StaticAssert)
// Opcodes for JavaScript operators.
#define JS_COMPARE_BINOP_LIST(V) \
@@ -293,6 +294,7 @@
V(SpeculativeNumberLessThanOrEqual) \
V(ReferenceEqual) \
V(SameValue) \
+ V(SameValueNumbersOnly) \
V(NumberSameValue) \
V(StringEqual) \
V(StringLessThan) \
@@ -409,14 +411,13 @@
V(LoadFieldByIndex) \
V(LoadField) \
V(LoadElement) \
- V(LoadMessage) \
V(LoadTypedElement) \
+ V(LoadFromObject) \
V(LoadDataViewElement) \
- V(LoadStackArgument) \
V(StoreField) \
V(StoreElement) \
- V(StoreMessage) \
V(StoreTypedElement) \
+ V(StoreToObject) \
V(StoreDataViewElement) \
V(StoreSignedSmallElement) \
V(TransitionAndStoreElement) \
@@ -885,7 +886,7 @@ class V8_EXPORT_PRIVATE IrOpcode {
// Returns true if opcode for common operator.
static bool IsCommonOpcode(Value value) {
- return kStart <= value && value <= kDead;
+ return kStart <= value && value <= kStaticAssert;
}
// Returns true if opcode for control operator.
@@ -934,6 +935,18 @@ class V8_EXPORT_PRIVATE IrOpcode {
(kWord32Equal <= value && value <= kFloat64LessThanOrEqual);
}
+ // Returns true if opcode for decompress operator.
+ static bool IsDecompressOpcode(Value value) {
+ return kChangeCompressedToTagged <= value &&
+ value <= kChangeCompressedSignedToTaggedSigned;
+ }
+
+ // Returns true if opcode for compress operator.
+ static bool IsCompressOpcode(Value value) {
+ return kChangeTaggedToCompressed <= value &&
+ value <= kChangeTaggedSignedToCompressedSigned;
+ }
+
static bool IsContextChainExtendingOpcode(Value value) {
return kJSCreateFunctionContext <= value && value <= kJSCreateBlockContext;
}
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 070c17c8e9..475623f76b 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -7,10 +7,10 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -1124,7 +1124,7 @@ Type OperationTyper::ToPrimitive(Type type) {
Type OperationTyper::Invert(Type type) {
DCHECK(type.Is(Type::Boolean()));
- DCHECK(!type.IsNone());
+ CHECK(!type.IsNone());
if (type.Is(singleton_false())) return singleton_true();
if (type.Is(singleton_true())) return singleton_false();
return type;
@@ -1187,7 +1187,16 @@ Type OperationTyper::SameValue(Type lhs, Type rhs) {
return Type::Boolean();
}
+Type OperationTyper::SameValueNumbersOnly(Type lhs, Type rhs) {
+ // SameValue and SamevalueNumbersOnly only differ in treatment of
+ // strings and biginits. Since the SameValue typer does not do anything
+ // special about strings or bigints, we can just use it here.
+ return SameValue(lhs, rhs);
+}
+
Type OperationTyper::StrictEqual(Type lhs, Type rhs) {
+ CHECK(!lhs.IsNone());
+ CHECK(!rhs.IsNone());
if (!JSType(lhs).Maybe(JSType(rhs))) return singleton_false();
if (lhs.Is(Type::NaN()) || rhs.Is(Type::NaN())) return singleton_false();
if (lhs.Is(Type::Number()) && rhs.Is(Type::Number()) &&
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index 52a2c52c1b..a905662ad1 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -8,7 +8,7 @@
#include "src/base/flags.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/types.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -55,6 +55,7 @@ class V8_EXPORT_PRIVATE OperationTyper {
// Comparison operators.
Type SameValue(Type lhs, Type rhs);
+ Type SameValueNumbersOnly(Type lhs, Type rhs);
Type StrictEqual(Type lhs, Type rhs);
// Check operators.
diff --git a/deps/v8/src/compiler/operator-properties.h b/deps/v8/src/compiler/operator-properties.h
index eb9e683f63..47db81df98 100644
--- a/deps/v8/src/compiler/operator-properties.h
+++ b/deps/v8/src/compiler/operator-properties.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_OPERATOR_PROPERTIES_H_
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index db1b16c9b8..c393aa257b 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -10,8 +10,8 @@
#include "src/base/compiler-specific.h"
#include "src/base/flags.h"
#include "src/base/functional.h"
-#include "src/globals.h"
-#include "src/handles.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index 659b3f4c8f..d497fc5669 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -4,11 +4,11 @@
#include "src/compiler/osr.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/frame.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
-#include "src/optimized-compilation-info.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/per-isolate-compiler-cache.h b/deps/v8/src/compiler/per-isolate-compiler-cache.h
index 70f53c38e1..b715950c0c 100644
--- a/deps/v8/src/compiler/per-isolate-compiler-cache.h
+++ b/deps/v8/src/compiler/per-isolate-compiler-cache.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_PER_ISOLATE_COMPILER_CACHE_H_
#include "src/compiler/refs-map.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index e89a3e56ca..9cfd1cf94d 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -4,11 +4,11 @@
#include <memory>
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/zone-stats.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
-#include "src/optimized-compilation-info.h"
#include "src/tracing/trace-event.h"
namespace v8 {
@@ -66,8 +66,7 @@ PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
phase_name_(nullptr) {
if (info->has_shared_info()) {
source_size_ = static_cast<size_t>(info->shared_info()->SourceSize());
- std::unique_ptr<char[]> name =
- info->shared_info()->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = info->shared_info()->DebugName().ToCString();
function_name_ = name.get();
}
total_stats_.Begin(this);
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index 21ef2b02aa..5d874f0588 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -9,8 +9,8 @@
#include <string>
#include "src/base/platform/elapsed-timer.h"
-#include "src/compilation-statistics.h"
#include "src/compiler/zone-stats.h"
+#include "src/diagnostics/compilation-statistics.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 95c066aeaf..e771cef123 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -9,13 +9,13 @@
#include <memory>
#include <sstream>
-#include "src/assembler-inl.h"
#include "src/base/adapters.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
-#include "src/bootstrapper.h"
-#include "src/code-tracer.h"
-#include "src/compiler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/register-configuration.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/frame-elider.h"
#include "src/compiler/backend/instruction-selector.h"
@@ -35,6 +35,7 @@
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
#include "src/compiler/dead-code-elimination.h"
+#include "src/compiler/decompression-elimination.h"
#include "src/compiler/effect-control-linearizer.h"
#include "src/compiler/escape-analysis-reducer.h"
#include "src/compiler/escape-analysis.h"
@@ -76,16 +77,16 @@
#include "src/compiler/verifier.h"
#include "src/compiler/wasm-compiler.h"
#include "src/compiler/zone-stats.h"
-#include "src/disassembler.h"
-#include "src/isolate-inl.h"
+#include "src/diagnostics/code-tracer.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/isolate-inl.h"
+#include "src/init/bootstrapper.h"
#include "src/objects/shared-function-info.h"
-#include "src/optimized-compilation-info.h"
-#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
-#include "src/register-configuration.h"
#include "src/tracing/trace-event.h"
#include "src/tracing/traced-value.h"
-#include "src/utils.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/utils.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
@@ -113,6 +114,7 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, ZONE_NAME),
codegen_zone_(codegen_zone_scope_.zone()),
+ broker_(new JSHeapBroker(isolate_, info_->zone())),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(AssemblerOptions::Default(isolate)) {
@@ -131,7 +133,6 @@ class PipelineData {
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
- broker_ = new (info_->zone()) JSHeapBroker(isolate_, info_->zone());
dependencies_ =
new (info_->zone()) CompilationDependencies(broker_, info_->zone());
}
@@ -226,7 +227,6 @@ class PipelineData {
delete code_generator_;
code_generator_ = nullptr;
DeleteTyper();
-
DeleteRegisterAllocationZone();
DeleteInstructionZone();
DeleteCodegenZone();
@@ -274,6 +274,11 @@ class PipelineData {
}
JSHeapBroker* broker() const { return broker_; }
+ std::unique_ptr<JSHeapBroker> ReleaseBroker() {
+ std::unique_ptr<JSHeapBroker> broker(broker_);
+ broker_ = nullptr;
+ return broker;
+ }
Schedule* schedule() const { return schedule_; }
void set_schedule(Schedule* schedule) {
@@ -286,7 +291,6 @@ class PipelineData {
Zone* codegen_zone() const { return codegen_zone_; }
InstructionSequence* sequence() const { return sequence_; }
Frame* frame() const { return frame_; }
- std::vector<Handle<Map>>* embedded_maps() { return &embedded_maps_; }
Zone* register_allocation_zone() const { return register_allocation_zone_; }
RegisterAllocationData* register_allocation_data() const {
@@ -362,6 +366,7 @@ class PipelineData {
codegen_zone_scope_.Destroy();
codegen_zone_ = nullptr;
dependencies_ = nullptr;
+ delete broker_;
broker_ = nullptr;
frame_ = nullptr;
}
@@ -419,7 +424,6 @@ class PipelineData {
void InitializeCodeGenerator(Linkage* linkage,
std::unique_ptr<AssemblerBuffer> buffer) {
DCHECK_NULL(code_generator_);
-
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
@@ -490,10 +494,6 @@ class PipelineData {
JSHeapBroker* broker_ = nullptr;
Frame* frame_ = nullptr;
- // embedded_maps_ keeps track of maps we've embedded as Uint32 constants.
- // We do this in order to notify the garbage collector at code-gen time.
- std::vector<Handle<Map>> embedded_maps_;
-
// All objects in the following group of fields are allocated in
// register_allocation_zone_. They are all set to nullptr when the zone is
// destroyed.
@@ -562,18 +562,18 @@ namespace {
void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
int source_id, Handle<SharedFunctionInfo> shared) {
- if (!shared->script()->IsUndefined(isolate)) {
+ if (!shared->script().IsUndefined(isolate)) {
Handle<Script> script(Script::cast(shared->script()), isolate);
- if (!script->source()->IsUndefined(isolate)) {
+ if (!script->source().IsUndefined(isolate)) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
Object source_name = script->name();
OFStream os(tracing_scope.file());
os << "--- FUNCTION SOURCE (";
- if (source_name->IsString()) {
- os << String::cast(source_name)->ToCString().get() << ":";
+ if (source_name.IsString()) {
+ os << String::cast(source_name).ToCString().get() << ":";
}
- os << shared->DebugName()->ToCString().get() << ") id{";
+ os << shared->DebugName().ToCString().get() << ") id{";
os << info->optimization_id() << "," << source_id << "} start{";
os << shared->StartPosition() << "} ---\n";
{
@@ -599,7 +599,7 @@ void PrintInlinedFunctionInfo(
int inlining_id, const OptimizedCompilationInfo::InlinedFunctionHolder& h) {
CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
OFStream os(tracing_scope.file());
- os << "INLINE (" << h.shared_info->DebugName()->ToCString().get() << ") id{"
+ os << "INLINE (" << h.shared_info->DebugName().ToCString().get() << ") id{"
<< info->optimization_id() << "," << source_id << "} AS " << inlining_id
<< " AT ";
const SourcePosition position = h.position.position;
@@ -649,11 +649,11 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION;
if (print_source) {
Handle<SharedFunctionInfo> shared = info->shared_info();
- if (shared->script()->IsScript() &&
- !Script::cast(shared->script())->source()->IsUndefined(isolate)) {
+ if (shared->script().IsScript() &&
+ !Script::cast(shared->script()).source().IsUndefined(isolate)) {
os << "--- Raw source ---\n";
StringCharacterStream stream(
- String::cast(Script::cast(shared->script())->source()),
+ String::cast(Script::cast(shared->script()).source()),
shared->StartPosition());
// fun->end_position() points to the last character in the stream. We
// need to compensate by adding one to calculate the length.
@@ -951,7 +951,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
compilation_info()->MarkAsAllocationFoldingEnabled();
}
- if (compilation_info()->closure()->raw_feedback_cell()->map() ==
+ if (compilation_info()->closure()->raw_feedback_cell().map() ==
ReadOnlyRoots(isolate).one_closure_cell_map()) {
compilation_info()->MarkAsFunctionContextSpecializing();
}
@@ -1011,42 +1011,32 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
}
compilation_info()->SetCode(code);
- compilation_info()->native_context()->AddOptimizedCode(*code);
+ compilation_info()->native_context().AddOptimizedCode(*code);
RegisterWeakObjectsInOptimizedCode(code, isolate);
return SUCCEEDED;
}
void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
Handle<Code> code, Isolate* isolate) {
+ std::vector<Handle<Map>> maps;
DCHECK(code->is_optimized_code());
- std::vector<Handle<Map>> retained_maps;
{
DisallowHeapAllocation no_gc;
- int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ int const mode_mask = RelocInfo::EmbeddedObjectModeMask();
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT &&
- code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
+ if (code->IsWeakObjectInOptimizedCode(it.rinfo()->target_object())) {
Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
isolate);
if (object->IsMap()) {
- retained_maps.push_back(Handle<Map>::cast(object));
+ maps.push_back(Handle<Map>::cast(object));
}
}
}
}
-
- for (Handle<Map> map : retained_maps) {
+ for (Handle<Map> map : maps) {
isolate->heap()->AddRetainedMap(map);
}
-
- // Additionally, gather embedded maps if we have any.
- for (Handle<Map> map : *data_.embedded_maps()) {
- if (code->IsWeakObjectInOptimizedCode(*map)) {
- isolate->heap()->AddRetainedMap(map);
- }
- }
-
code->set_can_have_weak_objects(true);
}
@@ -1061,19 +1051,21 @@ struct GraphBuilderPhase {
static const char* phase_name() { return "V8.TFBytecodeGraphBuilder"; }
void Run(PipelineData* data, Zone* temp_zone) {
- JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags;
+ BytecodeGraphBuilderFlags flags;
+ if (data->info()->is_analyze_environment_liveness()) {
+ flags |= BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness;
+ }
if (data->info()->is_bailout_on_uninitialized()) {
- flags |= JSTypeHintLowering::kBailoutOnUninitialized;
+ flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
}
- CallFrequency frequency = CallFrequency(1.0f);
- BytecodeGraphBuilder graph_builder(
- temp_zone, data->info()->bytecode_array(), data->info()->shared_info(),
+ CallFrequency frequency(1.0f);
+ BuildGraphFromBytecode(
+ data->broker(), temp_zone, data->info()->bytecode_array(),
+ data->info()->shared_info(),
handle(data->info()->closure()->feedback_vector(), data->isolate()),
data->info()->osr_offset(), data->jsgraph(), frequency,
data->source_positions(), data->native_context(),
- SourcePosition::kNotInlined, flags, true,
- data->info()->is_analyze_environment_liveness());
- graph_builder.CreateGraph();
+ SourcePosition::kNotInlined, flags);
}
};
@@ -1082,12 +1074,12 @@ namespace {
Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
Context current = closure->context();
size_t distance = 0;
- while (!current->IsNativeContext()) {
- if (current->IsModuleContext()) {
+ while (!current.IsNativeContext()) {
+ if (current.IsModuleContext()) {
return Just(
- OuterContext(handle(current, current->GetIsolate()), distance));
+ OuterContext(handle(current, current.GetIsolate()), distance));
}
- current = current->previous();
+ current = current.previous();
distance++;
}
return Nothing<OuterContext>();
@@ -1243,8 +1235,19 @@ struct SerializationPhase {
static const char* phase_name() { return "V8.TFSerializeBytecode"; }
void Run(PipelineData* data, Zone* temp_zone) {
- SerializerForBackgroundCompilation serializer(data->broker(), temp_zone,
- data->info()->closure());
+ SerializerForBackgroundCompilationFlags flags;
+ if (data->info()->is_bailout_on_uninitialized()) {
+ flags |= SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized;
+ }
+ if (data->info()->is_source_positions_enabled()) {
+ flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
+ }
+ if (data->info()->is_osr()) {
+ flags |= SerializerForBackgroundCompilationFlag::kOsr;
+ }
+ SerializerForBackgroundCompilation serializer(
+ data->broker(), data->dependencies(), temp_zone,
+ data->info()->closure(), flags);
serializer.Run();
}
};
@@ -1409,20 +1412,19 @@ struct EffectControlLinearizationPhase {
TraceSchedule(data->info(), data, schedule,
"effect linearization schedule");
- EffectControlLinearizer::MaskArrayIndexEnable mask_array_index =
+ MaskArrayIndexEnable mask_array_index =
(data->info()->GetPoisoningMitigationLevel() !=
PoisoningMitigationLevel::kDontPoison)
- ? EffectControlLinearizer::kMaskArrayIndex
- : EffectControlLinearizer::kDoNotMaskArrayIndex;
+ ? MaskArrayIndexEnable::kMaskArrayIndex
+ : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
// Post-pass for wiring the control/effects
// - connect allocating representation changes into the control&effect
// chains and lower them,
// - get rid of the region markers,
// - introduce effect phis and rewire effects to get SSA again.
- EffectControlLinearizer linearizer(
- data->jsgraph(), schedule, temp_zone, data->source_positions(),
- data->node_origins(), mask_array_index, data->embedded_maps());
- linearizer.Run();
+ LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
+ data->source_positions(), data->node_origins(),
+ mask_array_index);
}
{
// The {EffectControlLinearizer} might leave {Dead} nodes behind, so we
@@ -1510,7 +1512,8 @@ struct MemoryOptimizationPhase {
data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
data->info()->is_allocation_folding_enabled()
? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
- : MemoryOptimizer::AllocationFolding::kDontAllocationFolding);
+ : MemoryOptimizer::AllocationFolding::kDontAllocationFolding,
+ data->debug_name());
optimizer.Optimize();
}
};
@@ -1532,6 +1535,11 @@ struct LateOptimizationPhase {
data->machine(), temp_zone);
SelectLowering select_lowering(data->jsgraph()->graph(),
data->jsgraph()->common());
+#ifdef V8_COMPRESS_POINTERS
+ DecompressionElimination decompression_elimination(
+ &graph_reducer, data->graph(), data->machine(), data->common());
+ AddReducer(data, &graph_reducer, &decompression_elimination);
+#endif
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
@@ -1571,6 +1579,11 @@ struct CsaOptimizationPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
+#ifdef V8_COMPRESS_POINTERS
+ DecompressionElimination decompression_elimination(
+ &graph_reducer, data->graph(), data->machine(), data->common());
+ AddReducer(data, &graph_reducer, &decompression_elimination);
+#endif
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
@@ -1934,6 +1947,7 @@ struct VerifyGraphPhase {
Verifier::CodeType code_type;
switch (data->info()->code_kind()) {
case Code::WASM_FUNCTION:
+ case Code::WASM_TO_CAPI_FUNCTION:
case Code::WASM_TO_JS_FUNCTION:
case Code::JS_TO_WASM_FUNCTION:
case Code::WASM_INTERPRETER_ENTRY:
@@ -2371,16 +2385,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForWasmHeapStub(
// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
- JSHeapBroker** out_broker) {
+ std::unique_ptr<JSHeapBroker>* out_broker) {
ZoneStats zone_stats(isolate->allocator());
std::unique_ptr<PipelineStatistics> pipeline_statistics(
CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
&zone_stats));
PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
- if (out_broker != nullptr) {
- *out_broker = data.broker();
- }
-
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
@@ -2392,6 +2402,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
Handle<Code> code;
if (pipeline.FinalizeCode(out_broker == nullptr).ToHandle(&code) &&
pipeline.CommitDependencies(code)) {
+ if (out_broker != nullptr) *out_broker = data.ReleaseBroker();
return code;
}
return MaybeHandle<Code>();
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 334b89df77..7f9a242d98 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -7,9 +7,9 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
#include "src/objects/code.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -79,12 +79,12 @@ class Pipeline : public AllStatic {
// The following methods are for testing purposes only. Avoid production use.
// ---------------------------------------------------------------------------
- // Run the pipeline on JavaScript bytecode and generate code.
- // If requested, hands out the heap broker, which is allocated
- // in {info}'s zone.
+ // Run the pipeline on JavaScript bytecode and generate code. If requested,
+ // hands out the heap broker on success, transferring its ownership to the
+ // caller.
V8_EXPORT_PRIVATE static MaybeHandle<Code> GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
- JSHeapBroker** out_broker = nullptr);
+ std::unique_ptr<JSHeapBroker>* out_broker = nullptr);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 75b3c70f65..dafd481797 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -10,11 +10,11 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
-#include "src/lookup.h"
#include "src/objects/heap-number.h"
+#include "src/objects/lookup.h"
-#include "src/field-index-inl.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/objects/field-index-inl.h"
namespace v8 {
namespace internal {
@@ -32,7 +32,8 @@ SimplifiedOperatorBuilder* PropertyAccessBuilder::simplified() const {
return jsgraph()->simplified();
}
-bool HasOnlyStringMaps(JSHeapBroker* broker, MapHandles const& maps) {
+bool HasOnlyStringMaps(JSHeapBroker* broker,
+ ZoneVector<Handle<Map>> const& maps) {
for (auto map : maps) {
MapRef map_ref(broker, map);
if (!map_ref.IsStringMap()) return false;
@@ -42,7 +43,8 @@ bool HasOnlyStringMaps(JSHeapBroker* broker, MapHandles const& maps) {
namespace {
-bool HasOnlyNumberMaps(JSHeapBroker* broker, MapHandles const& maps) {
+bool HasOnlyNumberMaps(JSHeapBroker* broker,
+ ZoneVector<Handle<Map>> const& maps) {
for (auto map : maps) {
MapRef map_ref(broker, map);
if (map_ref.instance_type() != HEAP_NUMBER_TYPE) return false;
@@ -52,10 +54,9 @@ bool HasOnlyNumberMaps(JSHeapBroker* broker, MapHandles const& maps) {
} // namespace
-bool PropertyAccessBuilder::TryBuildStringCheck(JSHeapBroker* broker,
- MapHandles const& maps,
- Node** receiver, Node** effect,
- Node* control) {
+bool PropertyAccessBuilder::TryBuildStringCheck(
+ JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps, Node** receiver,
+ Node** effect, Node* control) {
if (HasOnlyStringMaps(broker, maps)) {
// Monormorphic string access (ignoring the fact that there are multiple
// String maps).
@@ -67,10 +68,9 @@ bool PropertyAccessBuilder::TryBuildStringCheck(JSHeapBroker* broker,
return false;
}
-bool PropertyAccessBuilder::TryBuildNumberCheck(JSHeapBroker* broker,
- MapHandles const& maps,
- Node** receiver, Node** effect,
- Node* control) {
+bool PropertyAccessBuilder::TryBuildNumberCheck(
+ JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps, Node** receiver,
+ Node** effect, Node* control) {
if (HasOnlyNumberMaps(broker, maps)) {
// Monomorphic number access (we also deal with Smis here).
*receiver = *effect =
@@ -81,63 +81,9 @@ bool PropertyAccessBuilder::TryBuildNumberCheck(JSHeapBroker* broker,
return false;
}
-namespace {
-
-bool NeedsCheckHeapObject(Node* receiver) {
- switch (receiver->opcode()) {
- case IrOpcode::kConvertReceiver:
- case IrOpcode::kHeapConstant:
- case IrOpcode::kJSCloneObject:
- case IrOpcode::kJSConstruct:
- case IrOpcode::kJSConstructForwardVarargs:
- case IrOpcode::kJSConstructWithArrayLike:
- case IrOpcode::kJSConstructWithSpread:
- case IrOpcode::kJSCreate:
- case IrOpcode::kJSCreateArguments:
- case IrOpcode::kJSCreateArray:
- case IrOpcode::kJSCreateArrayFromIterable:
- case IrOpcode::kJSCreateArrayIterator:
- case IrOpcode::kJSCreateAsyncFunctionObject:
- case IrOpcode::kJSCreateBoundFunction:
- case IrOpcode::kJSCreateClosure:
- case IrOpcode::kJSCreateCollectionIterator:
- case IrOpcode::kJSCreateEmptyLiteralArray:
- case IrOpcode::kJSCreateEmptyLiteralObject:
- case IrOpcode::kJSCreateGeneratorObject:
- case IrOpcode::kJSCreateIterResultObject:
- case IrOpcode::kJSCreateKeyValueArray:
- case IrOpcode::kJSCreateLiteralArray:
- case IrOpcode::kJSCreateLiteralObject:
- case IrOpcode::kJSCreateLiteralRegExp:
- case IrOpcode::kJSCreateObject:
- case IrOpcode::kJSCreatePromise:
- case IrOpcode::kJSCreateStringIterator:
- case IrOpcode::kJSCreateTypedArray:
- case IrOpcode::kJSGetSuperConstructor:
- case IrOpcode::kJSToName:
- case IrOpcode::kJSToObject:
- case IrOpcode::kJSToString:
- case IrOpcode::kTypeOf:
- return false;
- default:
- return true;
- }
-}
-
-} // namespace
-
-Node* PropertyAccessBuilder::BuildCheckHeapObject(Node* receiver, Node** effect,
- Node* control) {
- if (NeedsCheckHeapObject(receiver)) {
- receiver = *effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, *effect, control);
- }
- return receiver;
-}
-
-void PropertyAccessBuilder::BuildCheckMaps(Node* receiver, Node** effect,
- Node* control,
- MapHandles const& receiver_maps) {
+void PropertyAccessBuilder::BuildCheckMaps(
+ Node* receiver, Node** effect, Node* control,
+ ZoneVector<Handle<Map>> const& receiver_maps) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
MapRef receiver_map = m.Ref(broker()).map();
@@ -186,11 +132,26 @@ Node* PropertyAccessBuilder::ResolveHolder(
return receiver;
}
+MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
+ Representation representation) {
+ switch (representation.kind()) {
+ case Representation::kSmi:
+ return MachineType::RepCompressedTaggedSigned();
+ case Representation::kDouble:
+ return MachineRepresentation::kFloat64;
+ case Representation::kHeapObject:
+ return MachineType::RepCompressedTaggedPointer();
+ case Representation::kTagged:
+ return MachineType::RepCompressedTagged();
+ default:
+ UNREACHABLE();
+ }
+}
+
Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
Node* receiver) {
- // Optimize immutable property loads.
-
+ if (!access_info.IsDataConstant()) return nullptr;
// First, determine if we have a constant holder to load from.
Handle<JSObject> holder;
// If {access_info} has a holder, just use it.
@@ -212,47 +173,15 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
holder = Handle<JSObject>::cast(m.Value());
}
- // TODO(ishell): Use something simpler like
- //
- // Handle<Object> value =
- // JSObject::FastPropertyAt(Handle<JSObject>::cast(m.Value()),
- // Representation::Tagged(), field_index);
- //
- // here, once we have the immutable bit in the access_info.
-
- // TODO(turbofan): Given that we already have the field_index here, we
- // might be smarter in the future and not rely on the LookupIterator.
- LookupIterator it(isolate(), holder, name.object(),
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.state() == LookupIterator::DATA) {
- bool is_readonly_non_configurable = it.IsReadOnly() && !it.IsConfigurable();
- if (is_readonly_non_configurable ||
- (FLAG_track_constant_fields && access_info.IsDataConstantField())) {
- Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
- if (!is_readonly_non_configurable) {
- // It's necessary to add dependency on the map that introduced
- // the field.
- DCHECK(access_info.IsDataConstantField());
- DCHECK(!it.is_dictionary_holder());
- MapRef map(broker(),
- handle(it.GetHolder<HeapObject>()->map(), isolate()));
- map.SerializeOwnDescriptors(); // TODO(neis): Remove later.
- if (dependencies()->DependOnFieldConstness(
- map, it.GetFieldDescriptorIndex()) !=
- PropertyConstness::kConst) {
- return nullptr;
- }
- }
- return value;
- }
- }
- return nullptr;
+ Handle<Object> value = JSObject::FastPropertyAt(
+ holder, access_info.field_representation(), access_info.field_index());
+ return jsgraph()->Constant(value);
}
Node* PropertyAccessBuilder::BuildLoadDataField(
NameRef const& name, PropertyAccessInfo const& access_info, Node* receiver,
Node** effect, Node** control) {
- DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
+ DCHECK(access_info.IsDataField() || access_info.IsDataConstant());
if (Node* value =
TryBuildLoadConstantDataField(name, access_info, receiver)) {
return value;
@@ -261,13 +190,16 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
FieldIndex const field_index = access_info.field_index();
Type const field_type = access_info.field_type();
MachineRepresentation const field_representation =
- access_info.field_representation();
+ ConvertRepresentation(access_info.field_representation());
Node* storage = ResolveHolder(access_info, receiver);
if (!field_index.is_inobject()) {
storage = *effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectPropertiesOrHash()),
storage, *effect, *control);
}
+ PropertyConstness constness = access_info.IsDataConstant()
+ ? PropertyConstness::kConst
+ : PropertyConstness::kMutable;
FieldAccess field_access = {
kTaggedBase,
field_index.offset(),
@@ -276,21 +208,28 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kCritical};
+ LoadSensitivity::kCritical,
+ constness};
if (field_representation == MachineRepresentation::kFloat64) {
- if (!field_index.is_inobject() || field_index.is_hidden_field() ||
- !FLAG_unbox_double_fields) {
+ if (!field_index.is_inobject() || !FLAG_unbox_double_fields) {
FieldAccess const storage_access = {
- kTaggedBase, field_index.offset(),
- name.object(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ kTaggedBase,
+ field_index.offset(),
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TypeCompressedTaggedPointer(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kCritical,
+ constness};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
field_access.offset = HeapNumber::kValueOffset;
field_access.name = MaybeHandle<Name>();
}
- } else if (field_representation == MachineRepresentation::kTaggedPointer) {
+ } else if (field_representation == MachineRepresentation::kTaggedPointer ||
+ field_representation ==
+ MachineRepresentation::kCompressedPointer) {
// Remember the map of the field value, if its map is stable. This is
// used by the LoadElimination to eliminate map checks on the result.
Handle<Map> field_map;
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
index 61c075f789..f3c071a88a 100644
--- a/deps/v8/src/compiler/property-access-builder.h
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -7,8 +7,9 @@
#include <vector>
+#include "src/codegen/machine-type.h"
#include "src/compiler/js-heap-broker.h"
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/objects/map.h"
#include "src/zone/zone-containers.h"
@@ -33,15 +34,16 @@ class PropertyAccessBuilder {
// Builds the appropriate string check if the maps are only string
// maps.
- bool TryBuildStringCheck(JSHeapBroker* broker, MapHandles const& maps,
- Node** receiver, Node** effect, Node* control);
+ bool TryBuildStringCheck(JSHeapBroker* broker,
+ ZoneVector<Handle<Map>> const& maps, Node** receiver,
+ Node** effect, Node* control);
// Builds a number check if all maps are number maps.
- bool TryBuildNumberCheck(JSHeapBroker* broker, MapHandles const& maps,
- Node** receiver, Node** effect, Node* control);
+ bool TryBuildNumberCheck(JSHeapBroker* broker,
+ ZoneVector<Handle<Map>> const& maps, Node** receiver,
+ Node** effect, Node* control);
- Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
void BuildCheckMaps(Node* receiver, Node** effect, Node* control,
- std::vector<Handle<Map>> const& receiver_maps);
+ ZoneVector<Handle<Map>> const& receiver_maps);
Node* BuildCheckValue(Node* receiver, Node** effect, Node* control,
Handle<HeapObject> value);
@@ -51,6 +53,9 @@ class PropertyAccessBuilder {
PropertyAccessInfo const& access_info,
Node* receiver, Node** effect, Node** control);
+ static MachineRepresentation ConvertRepresentation(
+ Representation representation);
+
private:
JSGraph* jsgraph() const { return jsgraph_; }
JSHeapBroker* broker() const { return broker_; }
@@ -72,7 +77,8 @@ class PropertyAccessBuilder {
CompilationDependencies* dependencies_;
};
-bool HasOnlyStringMaps(JSHeapBroker* broker, MapHandles const& maps);
+bool HasOnlyStringMaps(JSHeapBroker* broker,
+ ZoneVector<Handle<Map>> const& maps);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 49e47475ed..dc1edc710d 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -69,9 +69,12 @@ Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
: RelocatableInt32Constant(static_cast<int>(value), rmode);
}
-Node* RawMachineAssembler::OptimizedAllocate(Node* size,
- AllocationType allocation) {
- return AddNode(simplified()->AllocateRaw(Type::Any(), allocation), size);
+Node* RawMachineAssembler::OptimizedAllocate(
+ Node* size, AllocationType allocation,
+ AllowLargeObjects allow_large_objects) {
+ return AddNode(
+ simplified()->AllocateRaw(Type::Any(), allocation, allow_large_objects),
+ size);
}
Schedule* RawMachineAssembler::Export() {
@@ -572,6 +575,10 @@ void RawMachineAssembler::Comment(const std::string& msg) {
AddNode(machine()->Comment(zone_buffer));
}
+void RawMachineAssembler::StaticAssert(Node* value) {
+ AddNode(common()->StaticAssert(), value);
+}
+
Node* RawMachineAssembler::CallN(CallDescriptor* call_descriptor,
int input_count, Node* const* inputs) {
DCHECK(!call_descriptor->NeedsFrameState());
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 0dc11888ac..67326ac730 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -7,7 +7,9 @@
#include <initializer_list>
-#include "src/assembler.h"
+#include "src/base/type-traits.h"
+#include "src/codegen/assembler.h"
+#include "src/common/globals.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
@@ -16,10 +18,9 @@
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/compiler/simplified-operator.h"
-#include "src/globals.h"
+#include "src/compiler/write-barrier-kind.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/type-traits.h"
namespace v8 {
namespace internal {
@@ -126,57 +127,108 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Memory Operations.
- Node* Load(MachineType rep, Node* base,
+ std::pair<MachineType, const Operator*> InsertDecompressionIfNeeded(
+ MachineType type) {
+ const Operator* decompress_op = nullptr;
+ if (COMPRESS_POINTERS_BOOL) {
+ switch (type.representation()) {
+ case MachineRepresentation::kTaggedPointer:
+ type = MachineType::CompressedPointer();
+ decompress_op = machine()->ChangeCompressedPointerToTaggedPointer();
+ break;
+ case MachineRepresentation::kTaggedSigned:
+ type = MachineType::CompressedSigned();
+ decompress_op = machine()->ChangeCompressedSignedToTaggedSigned();
+ break;
+ case MachineRepresentation::kTagged:
+ type = MachineType::AnyCompressed();
+ decompress_op = machine()->ChangeCompressedToTagged();
+ break;
+ default:
+ break;
+ }
+ }
+ return std::make_pair(type, decompress_op);
+ }
+ Node* Load(MachineType type, Node* base,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return Load(rep, base, IntPtrConstant(0), needs_poisoning);
+ return Load(type, base, IntPtrConstant(0), needs_poisoning);
}
- Node* Load(MachineType rep, Node* base, Node* index,
+ Node* Load(MachineType type, Node* base, Node* index,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- // change_op is used below to change to the correct Tagged representation
- const Operator* change_op = nullptr;
-#ifdef V8_COMPRESS_POINTERS
- switch (rep.representation()) {
- case MachineRepresentation::kTaggedPointer:
- rep = MachineType::CompressedPointer();
- change_op = machine()->ChangeCompressedPointerToTaggedPointer();
- break;
- case MachineRepresentation::kTaggedSigned:
- rep = MachineType::CompressedSigned();
- change_op = machine()->ChangeCompressedSignedToTaggedSigned();
- break;
- case MachineRepresentation::kTagged:
- rep = MachineType::AnyCompressed();
- change_op = machine()->ChangeCompressedToTagged();
- break;
- default:
- break;
- }
-#endif
-
- const Operator* op = machine()->Load(rep);
+ const Operator* decompress_op;
+ std::tie(type, decompress_op) = InsertDecompressionIfNeeded(type);
+ const Operator* op = machine()->Load(type);
CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level_);
if (needs_poisoning == LoadSensitivity::kCritical &&
poisoning_level_ == PoisoningMitigationLevel::kPoisonCriticalOnly) {
- op = machine()->PoisonedLoad(rep);
+ op = machine()->PoisonedLoad(type);
}
Node* load = AddNode(op, base, index);
- if (change_op != nullptr) {
- load = AddNode(change_op, load);
+ if (decompress_op != nullptr) {
+ load = AddNode(decompress_op, load);
+ }
+ return load;
+ }
+ Node* LoadFromObject(
+ MachineType type, Node* base, Node* offset,
+ LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ const Operator* decompress_op;
+ std::tie(type, decompress_op) = InsertDecompressionIfNeeded(type);
+ CHECK_EQ(needs_poisoning, LoadSensitivity::kSafe);
+ ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier};
+ Node* load = AddNode(simplified()->LoadFromObject(access), base, offset);
+ if (decompress_op != nullptr) {
+ load = AddNode(decompress_op, load);
}
return load;
}
+
+ std::pair<MachineRepresentation, Node*> InsertCompressionIfNeeded(
+ MachineRepresentation rep, Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ switch (rep) {
+ case MachineRepresentation::kTaggedPointer:
+ rep = MachineRepresentation::kCompressedPointer;
+ value = AddNode(machine()->ChangeTaggedPointerToCompressedPointer(),
+ value);
+ break;
+ case MachineRepresentation::kTaggedSigned:
+ rep = MachineRepresentation::kCompressedSigned;
+ value =
+ AddNode(machine()->ChangeTaggedSignedToCompressedSigned(), value);
+ break;
+ case MachineRepresentation::kTagged:
+ rep = MachineRepresentation::kCompressed;
+ value = AddNode(machine()->ChangeTaggedToCompressed(), value);
+ break;
+ default:
+ break;
+ }
+ }
+ return std::make_pair(rep, value);
+ }
Node* Store(MachineRepresentation rep, Node* base, Node* value,
WriteBarrierKind write_barrier) {
return Store(rep, base, IntPtrConstant(0), value, write_barrier);
}
Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value,
WriteBarrierKind write_barrier) {
+ std::tie(rep, value) = InsertCompressionIfNeeded(rep, value);
return AddNode(machine()->Store(StoreRepresentation(rep, write_barrier)),
base, index, value);
}
+ void StoreToObject(MachineRepresentation rep, Node* object, Node* offset,
+ Node* value, WriteBarrierKind write_barrier) {
+ std::tie(rep, value) = InsertCompressionIfNeeded(rep, value);
+ ObjectAccess access = {MachineType::TypeForRepresentation(rep),
+ write_barrier};
+ AddNode(simplified()->StoreToObject(access), object, offset, value);
+ }
void OptimizedStoreField(MachineRepresentation rep, Node* object, int offset,
Node* value, WriteBarrierKind write_barrier) {
+ std::tie(rep, value) = InsertCompressionIfNeeded(rep, value);
AddNode(simplified()->StoreField(FieldAccess(
BaseTaggedness::kTaggedBase, offset, MaybeHandle<Name>(),
MaybeHandle<Map>(), Type::Any(),
@@ -184,18 +236,27 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
object, value);
}
void OptimizedStoreMap(Node* object, Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ DCHECK(AccessBuilder::ForMap().machine_type.IsCompressedPointer());
+ value =
+ AddNode(machine()->ChangeTaggedPointerToCompressedPointer(), value);
+ }
AddNode(simplified()->StoreField(AccessBuilder::ForMap()), object, value);
}
Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
- Node* OptimizedAllocate(Node* size, AllocationType allocation);
+ Node* OptimizedAllocate(Node* size, AllocationType allocation,
+ AllowLargeObjects allow_large_objects);
// Unaligned memory operations
Node* UnalignedLoad(MachineType type, Node* base) {
return UnalignedLoad(type, base, IntPtrConstant(0));
}
Node* UnalignedLoad(MachineType type, Node* base, Node* index) {
- if (machine()->UnalignedLoadSupported(type.representation())) {
+ MachineRepresentation rep = type.representation();
+ // Tagged or compressed should never be unaligned
+ DCHECK(!(IsAnyTagged(rep) || IsAnyCompressed(rep)));
+ if (machine()->UnalignedLoadSupported(rep)) {
return AddNode(machine()->Load(type), base, index);
} else {
return AddNode(machine()->UnalignedLoad(type), base, index);
@@ -206,6 +267,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
Node* UnalignedStore(MachineRepresentation rep, Node* base, Node* index,
Node* value) {
+ // Tagged or compressed should never be unaligned
+ DCHECK(!(IsAnyTagged(rep) || IsAnyCompressed(rep)));
if (machine()->UnalignedStoreSupported(rep)) {
return AddNode(machine()->Store(StoreRepresentation(
rep, WriteBarrierKind::kNoWriteBarrier)),
@@ -249,21 +312,21 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
DCHECK_NULL(value_high);
return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
}
-#define ATOMIC_FUNCTION(name) \
- Node* Atomic##name(MachineType rep, Node* base, Node* index, Node* value, \
- Node* value_high) { \
- if (rep.representation() == MachineRepresentation::kWord64) { \
- if (machine()->Is64()) { \
- DCHECK_NULL(value_high); \
- return AddNode(machine()->Word64Atomic##name(rep), base, index, \
- value); \
- } else { \
- return AddNode(machine()->Word32AtomicPair##name(), base, index, \
- VALUE_HALVES); \
- } \
- } \
- DCHECK_NULL(value_high); \
- return AddNode(machine()->Word32Atomic##name(rep), base, index, value); \
+#define ATOMIC_FUNCTION(name) \
+ Node* Atomic##name(MachineType type, Node* base, Node* index, Node* value, \
+ Node* value_high) { \
+ if (type.representation() == MachineRepresentation::kWord64) { \
+ if (machine()->Is64()) { \
+ DCHECK_NULL(value_high); \
+ return AddNode(machine()->Word64Atomic##name(type), base, index, \
+ value); \
+ } else { \
+ return AddNode(machine()->Word32AtomicPair##name(), base, index, \
+ VALUE_HALVES); \
+ } \
+ } \
+ DCHECK_NULL(value_high); \
+ return AddNode(machine()->Word32Atomic##name(type), base, index, value); \
}
ATOMIC_FUNCTION(Exchange)
ATOMIC_FUNCTION(Add)
@@ -274,15 +337,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
#undef ATOMIC_FUNCTION
#undef VALUE_HALVES
- Node* AtomicCompareExchange(MachineType rep, Node* base, Node* index,
+ Node* AtomicCompareExchange(MachineType type, Node* base, Node* index,
Node* old_value, Node* old_value_high,
Node* new_value, Node* new_value_high) {
- if (rep.representation() == MachineRepresentation::kWord64) {
+ if (type.representation() == MachineRepresentation::kWord64) {
if (machine()->Is64()) {
DCHECK_NULL(old_value_high);
DCHECK_NULL(new_value_high);
- return AddNode(machine()->Word64AtomicCompareExchange(rep), base, index,
- old_value, new_value);
+ return AddNode(machine()->Word64AtomicCompareExchange(type), base,
+ index, old_value, new_value);
} else {
return AddNode(machine()->Word32AtomicPairCompareExchange(), base,
index, old_value, old_value_high, new_value,
@@ -291,7 +354,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
DCHECK_NULL(old_value_high);
DCHECK_NULL(new_value_high);
- return AddNode(machine()->Word32AtomicCompareExchange(rep), base, index,
+ return AddNode(machine()->Word32AtomicCompareExchange(type), base, index,
old_value, new_value);
}
@@ -852,15 +915,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Parameter(size_t index);
// Pointer utilities.
- Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
- return Load(rep, PointerConstant(address), Int32Constant(offset));
+ Node* LoadFromPointer(void* address, MachineType type, int32_t offset = 0) {
+ return Load(type, PointerConstant(address), Int32Constant(offset));
}
Node* StoreToPointer(void* address, MachineRepresentation rep, Node* node) {
return Store(rep, PointerConstant(address), node, kNoWriteBarrier);
}
- Node* UnalignedLoadFromPointer(void* address, MachineType rep,
+ Node* UnalignedLoadFromPointer(void* address, MachineType type,
int32_t offset = 0) {
- return UnalignedLoad(rep, PointerConstant(address), Int32Constant(offset));
+ return UnalignedLoad(type, PointerConstant(address), Int32Constant(offset));
}
Node* UnalignedStoreToPointer(void* address, MachineRepresentation rep,
Node* node) {
@@ -957,6 +1020,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void DebugBreak();
void Unreachable();
void Comment(const std::string& msg);
+ void StaticAssert(Node* value);
#if DEBUG
void Bind(RawMachineLabel* label, AssemblerDebugInfo info);
diff --git a/deps/v8/src/compiler/refs-map.h b/deps/v8/src/compiler/refs-map.h
index daaf433049..68beeb28f3 100644
--- a/deps/v8/src/compiler/refs-map.h
+++ b/deps/v8/src/compiler/refs-map.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_REFS_MAP_H_
#include "src/base/hashmap.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 79447f0809..cebd87e73d 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -7,7 +7,7 @@
#include <sstream>
#include "src/base/bits.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/type-cache.h"
@@ -355,7 +355,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
}
} else if (output_rep == MachineRepresentation::kCompressedSigned) {
op = machine()->ChangeCompressedSignedToTaggedSigned();
- } else if (output_rep == MachineRepresentation::kCompressed) {
+ } else if (CanBeCompressedPointer(output_rep)) {
if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedCompressedToTaggedSigned(use_info.feedback());
} else if (output_type.Is(Type::SignedSmall())) {
@@ -449,10 +449,10 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback());
} else if (output_rep == MachineRepresentation::kCompressedPointer) {
op = machine()->ChangeCompressedPointerToTaggedPointer();
- } else if (output_rep == MachineRepresentation::kCompressed &&
+ } else if (CanBeCompressedSigned(output_rep) &&
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type.Maybe(Type::SignedSmall())) {
- return node;
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
}
// TODO(turbofan): Consider adding a Bailout operator that just deopts
// for CompressedSigned output representation.
@@ -477,7 +477,6 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
case IrOpcode::kFloat64Constant:
case IrOpcode::kFloat32Constant:
UNREACHABLE();
- break;
default:
break;
}
@@ -592,7 +591,7 @@ Node* RepresentationChanger::GetCompressedSignedRepresentationFor(
node);
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
op = machine()->ChangeTaggedSignedToCompressedSigned();
- } else if (output_rep == MachineRepresentation::kTagged) {
+ } else if (CanBeTaggedPointer(output_rep)) {
if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedTaggedToCompressedSigned(use_info.feedback());
} else if (output_type.Is(Type::SignedSmall())) {
@@ -642,14 +641,36 @@ Node* RepresentationChanger::GetCompressedPointerRepresentationFor(
node);
} else if (output_rep == MachineRepresentation::kTaggedPointer) {
op = machine()->ChangeTaggedPointerToCompressedPointer();
- } else if (output_rep == MachineRepresentation::kTagged &&
+ } else if (CanBeTaggedSigned(output_rep) &&
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type.Maybe(Type::SignedSmall())) {
- return node;
+ op = machine()->ChangeTaggedPointerToCompressedPointer();
}
// TODO(turbofan): Consider adding a Bailout operator that just deopts
// for TaggedSigned output representation.
op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback());
+ } else if (output_rep == MachineRepresentation::kBit) {
+ // TODO(v8:8977): specialize here and below
+ node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = machine()->ChangeTaggedPointerToCompressedPointer();
+ } else if (IsWord(output_rep)) {
+ node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = machine()->ChangeTaggedPointerToCompressedPointer();
+ } else if (output_rep == MachineRepresentation::kWord64) {
+ node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = machine()->ChangeTaggedPointerToCompressedPointer();
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = machine()->ChangeTaggedPointerToCompressedPointer();
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = machine()->ChangeTaggedPointerToCompressedPointer();
+ } else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kCompressedPointer);
}
@@ -713,7 +734,6 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
case IrOpcode::kFloat64Constant:
case IrOpcode::kFloat32Constant:
UNREACHABLE();
- break;
default:
break;
}
@@ -762,6 +782,12 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
node = jsgraph()->graph()->NewNode(op, node);
return GetFloat32RepresentationFor(
node, MachineRepresentation::kTaggedSigned, output_type, truncation);
+ } else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ // TODO(v8:8977): Specialise here
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ node = jsgraph()->graph()->NewNode(op, node);
+ return GetFloat32RepresentationFor(
+ node, MachineRepresentation::kTaggedPointer, output_type, truncation);
} else if (output_rep == MachineRepresentation::kFloat64) {
op = machine()->TruncateFloat64ToFloat32();
} else if (output_rep == MachineRepresentation::kWord64) {
@@ -808,6 +834,8 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
use_info.truncation().IdentifiesZeroAndMinusZero())) {
op = machine()->ChangeInt32ToFloat64();
} else if (output_type.Is(Type::Unsigned32()) ||
+ (output_type.Is(Type::Unsigned32OrMinusZero()) &&
+ use_info.truncation().IdentifiesZeroAndMinusZero()) ||
use_info.truncation().IsUsedAsWord32()) {
// Either the output is uint32 or the uses only care about the
// low 32 bits (so we can pick uint32 safely).
@@ -815,7 +843,10 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kBit) {
CHECK(output_type.Is(Type::Boolean()));
- if (use_info.truncation().IsUsedAsFloat64()) {
+ // TODO(tebbi): TypeCheckKind::kNumberOrOddball should imply Float64
+ // truncation, since this exactly means that we treat Oddballs as Numbers.
+ if (use_info.truncation().IsUsedAsFloat64() ||
+ use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = machine()->ChangeUint32ToFloat64();
} else {
CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
@@ -825,9 +856,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
jsgraph()->common()->DeadValue(MachineRepresentation::kFloat64),
unreachable);
}
- } else if (output_rep == MachineRepresentation::kTagged ||
- output_rep == MachineRepresentation::kTaggedSigned ||
- output_rep == MachineRepresentation::kTaggedPointer) {
+ } else if (IsAnyTagged(output_rep)) {
if (output_type.Is(Type::Undefined())) {
return jsgraph()->Float64Constant(
std::numeric_limits<double>::quiet_NaN());
@@ -837,8 +866,17 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
op = machine()->ChangeInt32ToFloat64();
} else if (output_type.Is(Type::Number())) {
op = simplified()->ChangeTaggedToFloat64();
- } else if (output_type.Is(Type::NumberOrOddball())) {
- // TODO(jarin) Here we should check that truncation is Number.
+ } else if ((output_type.Is(Type::NumberOrOddball()) &&
+ use_info.truncation().IsUsedAsFloat64()) ||
+ output_type.Is(Type::NumberOrHole())) {
+ // JavaScript 'null' is an Oddball that results in +0 when truncated to
+ // Number. In a context like -0 == null, which must evaluate to false,
+ // this truncation must not happen. For this reason we restrict this case
+ // to when either the user explicitly requested a float (and thus wants
+ // +0 if null is the input) or we know from the types that the input can
+ // only be Number | Hole. The latter is necessary to handle the operator
+ // CheckFloat64Hole. We did not put in the type (Number | Oddball \ Null)
+ // to discover more bugs related to this conversion via crashes.
op = simplified()->TruncateTaggedToFloat64();
} else if (use_info.type_check() == TypeCheckKind::kNumber ||
(use_info.type_check() == TypeCheckKind::kNumberOrOddball &&
@@ -862,6 +900,13 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
return GetFloat64RepresentationFor(node,
MachineRepresentation::kTaggedSigned,
output_type, use_node, use_info);
+ } else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ // TODO(v8:8977): Specialise here
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ node = jsgraph()->graph()->NewNode(op, node);
+ return GetFloat64RepresentationFor(node,
+ MachineRepresentation::kTaggedPointer,
+ output_type, use_node, use_info);
} else if (output_rep == MachineRepresentation::kFloat32) {
op = machine()->ChangeFloat32ToFloat64();
} else if (output_rep == MachineRepresentation::kWord64) {
@@ -903,7 +948,6 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
UNREACHABLE();
- break;
case IrOpcode::kNumberConstant: {
double const fv = OpParameter<double>(node->op());
if (use_info.type_check() == TypeCheckKind::kNone ||
@@ -934,6 +978,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
CHECK(Truncation::Any(kIdentifyZeros)
.IsLessGeneralThan(use_info.truncation()));
CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
+ CHECK_NE(use_info.type_check(), TypeCheckKind::kNumberOrOddball);
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
return jsgraph()->graph()->NewNode(
@@ -1023,6 +1068,13 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
return GetWord32RepresentationFor(node,
MachineRepresentation::kTaggedSigned,
output_type, use_node, use_info);
+ } else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ // TODO(v8:8977): Specialise here
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ node = jsgraph()->graph()->NewNode(op, node);
+ return GetWord32RepresentationFor(node,
+ MachineRepresentation::kTaggedPointer,
+ output_type, use_node, use_info);
} else if (output_rep == MachineRepresentation::kWord32) {
// Only the checked case should get here, the non-checked case is
// handled in GetRepresentationFor.
@@ -1148,6 +1200,12 @@ Node* RepresentationChanger::GetBitRepresentationFor(
node = jsgraph()->graph()->NewNode(op, node);
return GetBitRepresentationFor(node, MachineRepresentation::kTaggedSigned,
output_type);
+ } else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ // TODO(v8:8977): Specialise here
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ node = jsgraph()->graph()->NewNode(op, node);
+ return GetBitRepresentationFor(node, MachineRepresentation::kTaggedPointer,
+ output_type);
} else if (IsWord(output_rep)) {
node = jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
jsgraph()->Int32Constant(0));
@@ -1183,7 +1241,6 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
UNREACHABLE();
- break;
case IrOpcode::kNumberConstant: {
double const fv = OpParameter<double>(node->op());
using limits = std::numeric_limits<int64_t>;
@@ -1208,6 +1265,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
} else if (output_rep == MachineRepresentation::kBit) {
CHECK(output_type.Is(Type::Boolean()));
CHECK_NE(use_info.type_check(), TypeCheckKind::kNone);
+ CHECK_NE(use_info.type_check(), TypeCheckKind::kNumberOrOddball);
Node* unreachable =
InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotASmi);
return jsgraph()->graph()->NewNode(
@@ -1291,6 +1349,13 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
return GetWord64RepresentationFor(node,
MachineRepresentation::kTaggedSigned,
output_type, use_node, use_info);
+ } else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ // TODO(v8:8977): Specialise here
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ node = jsgraph()->graph()->NewNode(op, node);
+ return GetWord64RepresentationFor(node,
+ MachineRepresentation::kTaggedPointer,
+ output_type, use_node, use_info);
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 5e682624e8..e8bb3f12ac 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -240,6 +240,8 @@ class UseInfo {
}
static UseInfo CheckedNumberOrOddballAsFloat64(
IdentifyZeros identify_zeros, const VectorSlotPair& feedback) {
+ // TODO(tebbi): We should use Float64 truncation here, since this exactly
+ // means that we treat Oddballs as Numbers.
return UseInfo(MachineRepresentation::kFloat64,
Truncation::Any(identify_zeros),
TypeCheckKind::kNumberOrOddball, feedback);
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index f547f584ae..84d74b4685 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -6,7 +6,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 6f64cae5dd..aae2cd3ad8 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -8,7 +8,7 @@
#include <iosfwd>
#include "src/base/compiler-specific.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 1dd0ddfce7..b57162f7f5 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -7,13 +7,13 @@
#include <iomanip>
#include "src/base/adapters.h"
-#include "src/bit-vector.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
+#include "src/utils/bit-vector.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -134,7 +134,6 @@ void Scheduler::UpdatePlacement(Node* node, Placement placement) {
case IrOpcode::kParameter:
// Parameters are fixed once and for all.
UNREACHABLE();
- break;
case IrOpcode::kPhi:
case IrOpcode::kEffectPhi: {
// Phis and effect phis are coupled to their respective blocks.
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index 7ec9872c31..bd2f2780dd 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -6,11 +6,11 @@
#define V8_COMPILER_SCHEDULER_H_
#include "src/base/flags.h"
+#include "src/common/globals.h"
#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/schedule.h"
#include "src/compiler/zone-stats.h"
-#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index 3d9e11324c..ecbd9cc030 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -7,11 +7,11 @@
#include <sstream>
#include "src/compiler/js-heap-broker.h"
-#include "src/handles-inl.h"
+#include "src/compiler/vector-slot-pair.h"
+#include "src/handles/handles-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects/code.h"
#include "src/objects/shared-function-info-inl.h"
-#include "src/vector-slot-pair.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -67,16 +67,15 @@ std::ostream& operator<<(std::ostream& out,
}
std::ostream& operator<<(std::ostream& out, const Hints& hints) {
- !hints.constants().empty() &&
- out << "\t\tConstants (" << hints.constants().size() << "):" << std::endl;
- for (auto x : hints.constants()) out << Brief(*x) << std::endl;
- !hints.maps().empty() && out << "\t\tMaps (" << hints.maps().size()
- << "):" << std::endl;
- for (auto x : hints.maps()) out << Brief(*x) << std::endl;
- !hints.function_blueprints().empty() &&
- out << "\t\tBlueprints (" << hints.function_blueprints().size()
- << "):" << std::endl;
- for (auto x : hints.function_blueprints()) out << x;
+ for (Handle<Object> constant : hints.constants()) {
+ out << " constant " << Brief(*constant) << std::endl;
+ }
+ for (Handle<Map> map : hints.maps()) {
+ out << " map " << Brief(*map) << std::endl;
+ }
+ for (FunctionBlueprint const& blueprint : hints.function_blueprints()) {
+ out << " blueprint " << blueprint << std::endl;
+ }
return out;
}
@@ -93,20 +92,35 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
Environment(Zone* zone, Isolate* isolate, CompilationSubject function,
base::Optional<Hints> new_target, const HintsVector& arguments);
+ bool IsDead() const { return environment_hints_.empty(); }
+
+ void Kill() {
+ DCHECK(!IsDead());
+ environment_hints_.clear();
+ DCHECK(IsDead());
+ }
+
+ void Revive() {
+ DCHECK(IsDead());
+ environment_hints_.resize(environment_hints_size(), Hints(zone()));
+ DCHECK(!IsDead());
+ }
+
// When control flow bytecodes are encountered, e.g. a conditional jump,
// the current environment needs to be stashed together with the target jump
// address. Later, when this target bytecode is handled, the stashed
// environment will be merged into the current one.
void Merge(Environment* other);
- friend std::ostream& operator<<(std::ostream& out, const Environment& env);
-
FunctionBlueprint function() const { return function_; }
- Hints& accumulator_hints() { return environment_hints_[accumulator_index()]; }
+ Hints& accumulator_hints() {
+ CHECK_LT(accumulator_index(), environment_hints_.size());
+ return environment_hints_[accumulator_index()];
+ }
Hints& register_hints(interpreter::Register reg) {
int local_index = RegisterToLocalIndex(reg);
- DCHECK_LT(local_index, environment_hints_.size());
+ CHECK_LT(local_index, environment_hints_.size());
return environment_hints_[local_index];
}
Hints& return_value_hints() { return return_value_hints_; }
@@ -124,6 +138,8 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
HintsVector& dst);
private:
+ friend std::ostream& operator<<(std::ostream& out, const Environment& env);
+
int RegisterToLocalIndex(interpreter::Register reg) const;
Zone* zone() const { return zone_; }
@@ -154,8 +170,8 @@ SerializerForBackgroundCompilation::Environment::Environment(
Zone* zone, CompilationSubject function)
: zone_(zone),
function_(function.blueprint()),
- parameter_count_(function_.shared->GetBytecodeArray()->parameter_count()),
- register_count_(function_.shared->GetBytecodeArray()->register_count()),
+ parameter_count_(function_.shared->GetBytecodeArray().parameter_count()),
+ register_count_(function_.shared->GetBytecodeArray().register_count()),
environment_hints_(environment_hints_size(), Hints(zone), zone),
return_value_hints_(zone) {
Handle<JSFunction> closure;
@@ -187,7 +203,7 @@ SerializerForBackgroundCompilation::Environment::Environment(
interpreter::Register new_target_reg =
function_.shared->GetBytecodeArray()
- ->incoming_new_target_or_generator_register();
+ .incoming_new_target_or_generator_register();
if (new_target_reg.is_valid()) {
DCHECK(register_hints(new_target_reg).IsEmpty());
if (new_target.has_value()) {
@@ -198,11 +214,17 @@ SerializerForBackgroundCompilation::Environment::Environment(
void SerializerForBackgroundCompilation::Environment::Merge(
Environment* other) {
- // Presumably the source and the target would have the same layout
- // so this is enforced here.
+ // {other} is guaranteed to have the same layout because it comes from an
+ // earlier bytecode in the same function.
CHECK_EQ(parameter_count(), other->parameter_count());
CHECK_EQ(register_count(), other->register_count());
- CHECK_EQ(environment_hints_size(), other->environment_hints_size());
+
+ if (IsDead()) {
+ environment_hints_ = other->environment_hints_;
+ CHECK(!IsDead());
+ return;
+ }
+ CHECK_EQ(environment_hints_.size(), other->environment_hints_.size());
for (size_t i = 0; i < environment_hints_.size(); ++i) {
environment_hints_[i].Add(other->environment_hints_[i]);
@@ -214,21 +236,43 @@ std::ostream& operator<<(
std::ostream& out,
const SerializerForBackgroundCompilation::Environment& env) {
std::ostringstream output_stream;
- output_stream << "Function ";
- env.function_.shared->Name()->Print(output_stream);
- output_stream << "Parameter count: " << env.parameter_count() << std::endl;
- output_stream << "Register count: " << env.register_count() << std::endl;
-
- output_stream << "Hints (" << env.environment_hints_.size() << "):\n";
- for (size_t i = 0; i < env.environment_hints_.size(); ++i) {
- if (env.environment_hints_[i].IsEmpty()) continue;
- output_stream << "\tSlot " << i << std::endl;
- output_stream << env.environment_hints_[i];
+ for (size_t i = 0; i << env.parameter_count(); ++i) {
+ Hints const& hints = env.environment_hints_[i];
+ if (!hints.IsEmpty()) {
+ output_stream << "Hints for a" << i << ":\n" << hints;
+ }
+ }
+ for (size_t i = 0; i << env.register_count(); ++i) {
+ Hints const& hints = env.environment_hints_[env.parameter_count() + i];
+ if (!hints.IsEmpty()) {
+ output_stream << "Hints for r" << i << ":\n" << hints;
+ }
+ }
+ {
+ Hints const& hints = env.environment_hints_[env.accumulator_index()];
+ if (!hints.IsEmpty()) {
+ output_stream << "Hints for <accumulator>:\n" << hints;
+ }
+ }
+ {
+ Hints const& hints = env.environment_hints_[env.function_closure_index()];
+ if (!hints.IsEmpty()) {
+ output_stream << "Hints for <closure>:\n" << hints;
+ }
+ }
+ {
+ Hints const& hints = env.environment_hints_[env.current_context_index()];
+ if (!hints.IsEmpty()) {
+ output_stream << "Hints for <context>:\n" << hints;
+ }
+ }
+ {
+ Hints const& hints = env.return_value_hints_;
+ if (!hints.IsEmpty()) {
+ output_stream << "Hints for {return value}:\n" << hints;
+ }
}
- output_stream << "Return value:\n";
- output_stream << env.return_value_hints_
- << "===========================================\n";
out << output_stream.str();
return out;
@@ -247,49 +291,147 @@ int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex(
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- JSHeapBroker* broker, Zone* zone, Handle<JSFunction> closure)
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags)
: broker_(broker),
+ dependencies_(dependencies),
zone_(zone),
environment_(new (zone) Environment(zone, {closure, broker_->isolate()})),
- stashed_environments_(zone) {
+ stashed_environments_(zone),
+ flags_(flags) {
JSFunctionRef(broker, closure).Serialize();
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
- JSHeapBroker* broker, Zone* zone, CompilationSubject function,
- base::Optional<Hints> new_target, const HintsVector& arguments)
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ CompilationSubject function, base::Optional<Hints> new_target,
+ const HintsVector& arguments, SerializerForBackgroundCompilationFlags flags)
: broker_(broker),
+ dependencies_(dependencies),
zone_(zone),
environment_(new (zone) Environment(zone, broker_->isolate(), function,
new_target, arguments)),
- stashed_environments_(zone) {
+ stashed_environments_(zone),
+ flags_(flags) {
+ DCHECK(!(flags_ & SerializerForBackgroundCompilationFlag::kOsr));
+ TraceScope tracer(
+ broker_, this,
+ "SerializerForBackgroundCompilation::SerializerForBackgroundCompilation");
+ TRACE_BROKER(broker_, "Initial environment:\n" << *environment_);
Handle<JSFunction> closure;
if (function.closure().ToHandle(&closure)) {
JSFunctionRef(broker, closure).Serialize();
}
}
+bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
+ FeedbackSlot slot) {
+ DCHECK(!environment()->IsDead());
+ if (!(flags() &
+ SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) {
+ return false;
+ }
+ if (flags() & SerializerForBackgroundCompilationFlag::kOsr) {
+ // Exclude OSR from this optimization because we might end up skipping the
+ // OSR entry point. TODO(neis): Support OSR?
+ return false;
+ }
+ FeedbackNexus nexus(environment()->function().feedback_vector, slot);
+ if (!slot.IsInvalid() && nexus.IsUninitialized()) {
+ FeedbackSource source(nexus);
+ if (broker()->HasFeedback(source)) {
+ DCHECK_EQ(broker()->GetFeedback(source)->kind(),
+ ProcessedFeedback::kInsufficient);
+ } else {
+ broker()->SetFeedback(source,
+ new (broker()->zone()) InsufficientFeedback());
+ }
+ environment()->Kill();
+ return true;
+ }
+ return false;
+}
+
Hints SerializerForBackgroundCompilation::Run() {
+ TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run");
SharedFunctionInfoRef shared(broker(), environment()->function().shared);
FeedbackVectorRef feedback_vector(broker(),
environment()->function().feedback_vector);
if (shared.IsSerializedForCompilation(feedback_vector)) {
+ TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo "
+ << Brief(*shared.object())
+ << ", bailing out.\n");
return Hints(zone());
}
shared.SetSerializedForCompilation(feedback_vector);
+
+ // We eagerly call the {EnsureSourcePositionsAvailable} for all serialized
+ // SFIs while still on the main thread. Source positions will later be used
+ // by JSInliner::ReduceJSCall.
+ if (flags() &
+ SerializerForBackgroundCompilationFlag::kCollectSourcePositions) {
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(broker()->isolate(),
+ shared.object());
+ }
+
feedback_vector.SerializeSlots();
TraverseBytecode();
return environment()->return_value_hints();
}
+class ExceptionHandlerMatcher {
+ public:
+ explicit ExceptionHandlerMatcher(
+ BytecodeArrayIterator const& bytecode_iterator)
+ : bytecode_iterator_(bytecode_iterator) {
+ HandlerTable table(*bytecode_iterator_.bytecode_array());
+ for (int i = 0, n = table.NumberOfRangeEntries(); i < n; ++i) {
+ handlers_.insert(table.GetRangeHandler(i));
+ }
+ handlers_iterator_ = handlers_.cbegin();
+ }
+
+ bool CurrentBytecodeIsExceptionHandlerStart() {
+ CHECK(!bytecode_iterator_.done());
+ while (handlers_iterator_ != handlers_.cend() &&
+ *handlers_iterator_ < bytecode_iterator_.current_offset()) {
+ handlers_iterator_++;
+ }
+ return handlers_iterator_ != handlers_.cend() &&
+ *handlers_iterator_ == bytecode_iterator_.current_offset();
+ }
+
+ private:
+ BytecodeArrayIterator const& bytecode_iterator_;
+ std::set<int> handlers_;
+ std::set<int>::const_iterator handlers_iterator_;
+};
+
void SerializerForBackgroundCompilation::TraverseBytecode() {
BytecodeArrayRef bytecode_array(
broker(), handle(environment()->function().shared->GetBytecodeArray(),
broker()->isolate()));
BytecodeArrayIterator iterator(bytecode_array.object());
+ ExceptionHandlerMatcher handler_matcher(iterator);
for (; !iterator.done(); iterator.Advance()) {
MergeAfterJump(&iterator);
+
+ if (environment()->IsDead()) {
+ if (iterator.current_bytecode() ==
+ interpreter::Bytecode::kResumeGenerator ||
+ handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) {
+ environment()->Revive();
+ } else {
+ continue; // Skip this bytecode since TF won't generate code for it.
+ }
+ }
+
+ TRACE_BROKER(broker(),
+ "Handling bytecode: " << iterator.current_offset() << " "
+ << iterator.current_bytecode());
+ TRACE_BROKER(broker(), "Current environment:\n" << *environment());
+
switch (iterator.current_bytecode()) {
#define DEFINE_BYTECODE_CASE(name) \
case interpreter::Bytecode::k##name: \
@@ -329,7 +471,7 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
// For JSNativeContextSpecialization::ReduceJSGetSuperConstructor.
if (!constant->IsJSFunction()) continue;
MapRef map(broker(),
- handle(HeapObject::cast(*constant)->map(), broker()->isolate()));
+ handle(HeapObject::cast(*constant).map(), broker()->isolate()));
map.SerializePrototype();
ObjectRef proto = map.prototype();
if (proto.IsHeapObject() && proto.AsHeapObject().map().is_constructor()) {
@@ -425,7 +567,9 @@ void SerializerForBackgroundCompilation::VisitCreateClosure(
Handle<FeedbackCell> feedback_cell =
environment()->function().feedback_vector->GetClosureFeedbackCell(
iterator->GetIndexOperand(1));
+ FeedbackCellRef feedback_cell_ref(broker(), feedback_cell);
Handle<Object> cell_value(feedback_cell->value(), broker()->isolate());
+ ObjectRef cell_value_ref(broker(), cell_value);
environment()->accumulator_hints().Clear();
if (cell_value->IsFeedbackVector()) {
@@ -556,16 +700,14 @@ Hints SerializerForBackgroundCompilation::RunChildSerializer(
padded.pop_back(); // Remove the spread element.
// Fill the rest with empty hints.
padded.resize(
- function.blueprint().shared->GetBytecodeArray()->parameter_count(),
+ function.blueprint().shared->GetBytecodeArray().parameter_count(),
Hints(zone()));
return RunChildSerializer(function, new_target, padded, false);
}
- TRACE_BROKER(broker(), "Will run child serializer with environment:\n"
- << *environment());
-
SerializerForBackgroundCompilation child_serializer(
- broker(), zone(), function, new_target, arguments);
+ broker(), dependencies(), zone(), function, new_target, arguments,
+ flags().without(SerializerForBackgroundCompilationFlag::kOsr));
return child_serializer.Run();
}
@@ -587,6 +729,9 @@ base::Optional<HeapObjectRef> GetHeapObjectFeedback(
void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
Hints callee, base::Optional<Hints> new_target,
const HintsVector& arguments, FeedbackSlot slot, bool with_spread) {
+ // TODO(neis): Make this part of ProcessFeedback*?
+ if (BailoutOnUninitialized(slot)) return;
+
// Incorporate feedback into hints.
base::Optional<HeapObjectRef> feedback = GetHeapObjectFeedback(
broker(), environment()->function().feedback_vector, slot);
@@ -607,7 +752,7 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
if (!hint->IsJSFunction()) continue;
Handle<JSFunction> function = Handle<JSFunction>::cast(hint);
- if (!function->shared()->IsInlineable() || !function->has_feedback_vector())
+ if (!function->shared().IsInlineable() || !function->has_feedback_vector())
continue;
environment()->accumulator_hints().Add(RunChildSerializer(
@@ -727,6 +872,7 @@ SerializerForBackgroundCompilation::ProcessFeedbackForGlobalAccess(
void SerializerForBackgroundCompilation::VisitLdaGlobal(
BytecodeArrayIterator* iterator) {
FeedbackSlot slot = iterator->GetSlotOperand(1);
+
environment()->accumulator_hints().Clear();
GlobalAccessFeedback const* feedback = ProcessFeedbackForGlobalAccess(slot);
if (feedback != nullptr) {
@@ -774,32 +920,12 @@ MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) {
}
} // namespace
-// Note: We never use the same feedback slot for multiple access modes.
-void SerializerForBackgroundCompilation::ProcessFeedbackForKeyedPropertyAccess(
- FeedbackSlot slot, AccessMode mode) {
- if (slot.IsInvalid()) return;
- if (environment()->function().feedback_vector.is_null()) return;
-
- FeedbackNexus nexus(environment()->function().feedback_vector, slot);
- FeedbackSource source(nexus);
- if (broker()->HasFeedback(source)) return;
-
- if (nexus.GetKeyType() == PROPERTY) {
- CHECK_NE(mode, AccessMode::kStoreInLiteral);
- return; // TODO(neis): Support named access.
- }
- DCHECK_EQ(nexus.GetKeyType(), ELEMENT);
- CHECK(nexus.GetName().is_null());
-
- MapHandles maps;
- nexus.ExtractMaps(&maps);
- ElementAccessFeedback const* processed =
- broker()->ProcessFeedbackMapsForElementAccess(
- GetRelevantReceiverMaps(broker()->isolate(), maps));
- broker()->SetFeedback(source, processed);
- if (processed == nullptr) return;
-
- for (ElementAccessFeedback::MapIterator it = processed->all_maps(broker());
+ElementAccessFeedback const*
+SerializerForBackgroundCompilation::ProcessFeedbackMapsForElementAccess(
+ const MapHandles& maps, AccessMode mode) {
+ ElementAccessFeedback const* result =
+ broker()->ProcessFeedbackMapsForElementAccess(maps);
+ for (ElementAccessFeedback::MapIterator it = result->all_maps(broker());
!it.done(); it.advance()) {
switch (mode) {
case AccessMode::kHas:
@@ -814,12 +940,69 @@ void SerializerForBackgroundCompilation::ProcessFeedbackForKeyedPropertyAccess(
break;
}
}
+ return result;
+}
+
+NamedAccessFeedback const*
+SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess(
+ const MapHandles& maps, AccessMode mode, NameRef const& name) {
+ ZoneVector<PropertyAccessInfo> access_infos(broker()->zone());
+ for (Handle<Map> map : maps) {
+ MapRef map_ref(broker(), map);
+ ProcessMapForNamedPropertyAccess(map_ref, name);
+ AccessInfoFactory access_info_factory(broker(), dependencies(),
+ broker()->zone());
+ access_infos.push_back(access_info_factory.ComputePropertyAccessInfo(
+ map, name.object(), mode));
+ }
+ DCHECK(!access_infos.empty());
+ return new (broker()->zone()) NamedAccessFeedback(name, access_infos);
+}
+
+void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess(
+ FeedbackSlot slot, AccessMode mode, base::Optional<NameRef> static_name) {
+ if (slot.IsInvalid()) return;
+ if (environment()->function().feedback_vector.is_null()) return;
+
+ FeedbackNexus nexus(environment()->function().feedback_vector, slot);
+ FeedbackSource source(nexus);
+ if (broker()->HasFeedback(source)) return;
+
+ if (nexus.ic_state() == UNINITIALIZED) {
+ broker()->SetFeedback(source,
+ new (broker()->zone()) InsufficientFeedback());
+ return;
+ }
+
+ MapHandles maps;
+ if (nexus.ExtractMaps(&maps) == 0) { // Megamorphic.
+ broker()->SetFeedback(source, nullptr);
+ return;
+ }
+
+ maps = GetRelevantReceiverMaps(broker()->isolate(), maps);
+ if (maps.empty()) {
+ broker()->SetFeedback(source,
+ new (broker()->zone()) InsufficientFeedback());
+ return;
+ }
+
+ ProcessedFeedback const* processed = nullptr;
+ base::Optional<NameRef> name =
+ static_name.has_value() ? static_name : broker()->GetNameFeedback(nexus);
+ if (name.has_value()) {
+ processed = ProcessFeedbackMapsForNamedAccess(maps, mode, *name);
+ } else if (nexus.GetKeyType() == ELEMENT && nexus.ic_state() != MEGAMORPHIC) {
+ processed = ProcessFeedbackMapsForElementAccess(maps, mode);
+ }
+ broker()->SetFeedback(source, processed);
}
void SerializerForBackgroundCompilation::ProcessKeyedPropertyAccess(
Hints const& receiver, Hints const& key, FeedbackSlot slot,
AccessMode mode) {
- ProcessFeedbackForKeyedPropertyAccess(slot, mode);
+ if (BailoutOnUninitialized(slot)) return;
+ ProcessFeedbackForPropertyAccess(slot, mode, base::nullopt);
for (Handle<Object> hint : receiver.constants()) {
ObjectRef receiver_ref(broker(), hint);
@@ -860,27 +1043,6 @@ void SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
}
}
-// Note: We never use the same feedback slot for multiple names.
-void SerializerForBackgroundCompilation::ProcessFeedbackForNamedPropertyAccess(
- FeedbackSlot slot, NameRef const& name) {
- if (slot.IsInvalid()) return;
- if (environment()->function().feedback_vector.is_null()) return;
-
- FeedbackNexus nexus(environment()->function().feedback_vector, slot);
- FeedbackSource source(nexus);
- if (broker()->HasFeedback(source)) return;
-
- MapHandles maps;
- nexus.ExtractMaps(&maps);
- for (Handle<Map> map : GetRelevantReceiverMaps(broker()->isolate(), maps)) {
- ProcessMapForNamedPropertyAccess(MapRef(broker(), map), name);
- }
-
- // NamedProperty support is still WIP. For now we don't have any actual data
- // to store, so use nullptr to at least record that we processed the feedback.
- broker()->SetFeedback(source, nullptr);
-}
-
void SerializerForBackgroundCompilation::VisitLdaKeyedProperty(
BytecodeArrayIterator* iterator) {
Hints const& key = environment()->accumulator_hints();
@@ -893,7 +1055,8 @@ void SerializerForBackgroundCompilation::VisitLdaKeyedProperty(
void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
Hints const& receiver, NameRef const& name, FeedbackSlot slot,
AccessMode mode) {
- if (!slot.IsInvalid()) ProcessFeedbackForNamedPropertyAccess(slot, name);
+ if (BailoutOnUninitialized(slot)) return;
+ ProcessFeedbackForPropertyAccess(slot, mode, name);
for (Handle<Map> map :
GetRelevantReceiverMaps(broker()->isolate(), receiver.maps())) {
@@ -940,6 +1103,11 @@ void SerializerForBackgroundCompilation::VisitStaNamedProperty(
ProcessNamedPropertyAccess(iterator, AccessMode::kStore);
}
+void SerializerForBackgroundCompilation::VisitStaNamedOwnProperty(
+ BytecodeArrayIterator* iterator) {
+ ProcessNamedPropertyAccess(iterator, AccessMode::kStoreInLiteral);
+}
+
void SerializerForBackgroundCompilation::VisitTestIn(
BytecodeArrayIterator* iterator) {
Hints const& receiver = environment()->accumulator_hints();
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
index 17db48f906..0ee37ef280 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -7,9 +7,9 @@
#include "src/base/optional.h"
#include "src/compiler/access-info.h"
-#include "src/handles.h"
-#include "src/maybe-handles.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
+#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -189,6 +189,7 @@ namespace compiler {
V(StaGlobal) \
V(StaInArrayLiteral) \
V(StaKeyedProperty) \
+ V(StaNamedOwnProperty) \
V(StaNamedProperty) \
V(Star) \
V(TestIn) \
@@ -262,25 +263,37 @@ class Hints {
MapsSet maps_;
BlueprintsSet function_blueprints_;
};
-
using HintsVector = ZoneVector<Hints>;
+enum class SerializerForBackgroundCompilationFlag : uint8_t {
+ kBailoutOnUninitialized = 1 << 0,
+ kCollectSourcePositions = 1 << 1,
+ kOsr = 1 << 2,
+};
+using SerializerForBackgroundCompilationFlags =
+ base::Flags<SerializerForBackgroundCompilationFlag>;
+
// The SerializerForBackgroundCompilation makes sure that the relevant function
// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
// optimizations in the compiler, is copied to the heap broker.
class SerializerForBackgroundCompilation {
public:
- SerializerForBackgroundCompilation(JSHeapBroker* broker, Zone* zone,
- Handle<JSFunction> closure);
+ SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure,
+ SerializerForBackgroundCompilationFlags flags);
Hints Run(); // NOTE: Returns empty for an already-serialized function.
class Environment;
private:
- SerializerForBackgroundCompilation(JSHeapBroker* broker, Zone* zone,
- CompilationSubject function,
- base::Optional<Hints> new_target,
- const HintsVector& arguments);
+ SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ CompilationSubject function, base::Optional<Hints> new_target,
+ const HintsVector& arguments,
+ SerializerForBackgroundCompilationFlags flags);
+
+ bool BailoutOnUninitialized(FeedbackSlot slot);
void TraverseBytecode();
@@ -307,10 +320,12 @@ class SerializerForBackgroundCompilation {
FeedbackSlot slot, AccessMode mode);
GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot);
- void ProcessFeedbackForKeyedPropertyAccess(FeedbackSlot slot,
- AccessMode mode);
- void ProcessFeedbackForNamedPropertyAccess(FeedbackSlot slot,
- NameRef const& name);
+ NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess(
+ const MapHandles& maps, AccessMode mode, NameRef const& name);
+ ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
+ const MapHandles& maps, AccessMode mode);
+ void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode,
+ base::Optional<NameRef> static_name);
void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name);
Hints RunChildSerializer(CompilationSubject function,
@@ -318,13 +333,17 @@ class SerializerForBackgroundCompilation {
const HintsVector& arguments, bool with_spread);
JSHeapBroker* broker() const { return broker_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
Environment* environment() const { return environment_; }
+ SerializerForBackgroundCompilationFlags flags() const { return flags_; }
JSHeapBroker* const broker_;
+ CompilationDependencies* const dependencies_;
Zone* const zone_;
Environment* const environment_;
ZoneUnorderedMap<int, Environment*> stashed_environments_;
+ SerializerForBackgroundCompilationFlags const flags_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 30b402291d..8bc0e7af7b 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -6,9 +6,8 @@
#include <limits>
-#include "src/address-map.h"
#include "src/base/bits.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -22,8 +21,9 @@
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
-#include "src/conversions-inl.h"
-#include "src/objects.h"
+#include "src/numbers/conversions-inl.h"
+#include "src/utils/address-map.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -116,7 +116,6 @@ UseInfo CheckedUseInfoAsFloat64FromHint(
case NumberOperationHint::kSigned32:
// Not used currently.
UNREACHABLE();
- break;
case NumberOperationHint::kNumber:
return UseInfo::CheckedNumberAsFloat64(identify_zeros, feedback);
case NumberOperationHint::kNumberOrOddball:
@@ -1233,10 +1232,12 @@ class RepresentationSelector {
MachineRepresentation field_representation, Type field_type,
MachineRepresentation value_representation, Node* value) {
if (base_taggedness == kTaggedBase &&
- CanBeTaggedPointer(field_representation)) {
+ CanBeTaggedOrCompressedPointer(field_representation)) {
Type value_type = NodeProperties::GetType(value);
if (field_representation == MachineRepresentation::kTaggedSigned ||
- value_representation == MachineRepresentation::kTaggedSigned) {
+ value_representation == MachineRepresentation::kTaggedSigned ||
+ field_representation == MachineRepresentation::kCompressedSigned ||
+ value_representation == MachineRepresentation::kCompressedSigned) {
// Write barriers are only for stores of heap objects.
return kNoWriteBarrier;
}
@@ -1258,7 +1259,9 @@ class RepresentationSelector {
}
}
if (field_representation == MachineRepresentation::kTaggedPointer ||
- value_representation == MachineRepresentation::kTaggedPointer) {
+ value_representation == MachineRepresentation::kTaggedPointer ||
+ field_representation == MachineRepresentation::kCompressedPointer ||
+ value_representation == MachineRepresentation::kCompressedPointer) {
// Write barriers for heap objects are cheaper.
return kPointerWriteBarrier;
}
@@ -1742,6 +1745,10 @@ class RepresentationSelector {
// BooleanNot(x: kRepTagged) => WordEqual(x, #false)
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ } else if (CanBeCompressedPointer(input_info->representation())) {
+ // BooleanNot(x: kRepCompressed) => Word32Equal(x, #false)
+ node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
+ NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
} else {
DCHECK(TypeOf(node->InputAt(0)).IsNone());
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
@@ -2583,6 +2590,11 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kSameValueNumbersOnly: {
+ VisitBinop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kSameValue: {
if (truncation.IsUnused()) return VisitUnused(node);
if (BothInputsAre(node, Type::Number())) {
@@ -2760,18 +2772,6 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
- case IrOpcode::kLoadMessage: {
- if (truncation.IsUnused()) return VisitUnused(node);
- VisitUnop(node, UseInfo::Word(), MachineRepresentation::kTagged);
- return;
- }
- case IrOpcode::kStoreMessage: {
- ProcessInput(node, 0, UseInfo::Word());
- ProcessInput(node, 1, UseInfo::AnyTagged());
- ProcessRemainingInputs(node, 2);
- SetOutput(node, MachineRepresentation::kNone);
- return;
- }
case IrOpcode::kLoadFieldByIndex: {
if (truncation.IsUnused()) return VisitUnused(node);
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
@@ -2794,9 +2794,9 @@ class RepresentationSelector {
access.machine_type.representation();
// Convert to Smi if possible, such that we can avoid a write barrier.
- if (field_representation == MachineRepresentation::kTagged &&
+ if (field_representation == MachineType::RepCompressedTagged() &&
TypeOf(value_node).Is(Type::SignedSmall())) {
- field_representation = MachineRepresentation::kTaggedSigned;
+ field_representation = MachineType::RepCompressedTaggedSigned();
}
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, field_representation, access.offset,
@@ -2823,11 +2823,6 @@ class RepresentationSelector {
access.machine_type.representation());
return;
}
- case IrOpcode::kLoadStackArgument: {
- if (truncation.IsUnused()) return VisitUnused(node);
- VisitBinop(node, UseInfo::Word(), MachineRepresentation::kTagged);
- return;
- }
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
Node* value_node = node->InputAt(2);
@@ -2836,9 +2831,9 @@ class RepresentationSelector {
access.machine_type.representation();
// Convert to Smi if possible, such that we can avoid a write barrier.
- if (element_representation == MachineRepresentation::kTagged &&
+ if (element_representation == MachineType::RepCompressedTagged() &&
TypeOf(value_node).Is(Type::SignedSmall())) {
- element_representation = MachineRepresentation::kTaggedSigned;
+ element_representation = MachineType::RepCompressedTaggedSigned();
}
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
access.base_is_tagged, element_representation, access.type,
@@ -2914,12 +2909,11 @@ class RepresentationSelector {
case IrOpcode::kLoadDataViewElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
- ProcessInput(node, 1, UseInfo::Word()); // external pointer
- ProcessInput(node, 2, UseInfo::Word()); // byte offset
- ProcessInput(node, 3, UseInfo::Word()); // index
- ProcessInput(node, 4, UseInfo::Bool()); // little-endian
- ProcessRemainingInputs(node, 5);
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // object
+ ProcessInput(node, 1, UseInfo::Word()); // base
+ ProcessInput(node, 2, UseInfo::Word()); // index
+ ProcessInput(node, 3, UseInfo::Bool()); // little-endian
+ ProcessRemainingInputs(node, 4);
SetOutput(node, rep);
return;
}
@@ -2939,14 +2933,13 @@ class RepresentationSelector {
case IrOpcode::kStoreDataViewElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
- ProcessInput(node, 0, UseInfo::AnyTagged()); // buffer
- ProcessInput(node, 1, UseInfo::Word()); // external pointer
- ProcessInput(node, 2, UseInfo::Word()); // byte offset
- ProcessInput(node, 3, UseInfo::Word()); // index
- ProcessInput(node, 4,
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // object
+ ProcessInput(node, 1, UseInfo::Word()); // base
+ ProcessInput(node, 2, UseInfo::Word()); // index
+ ProcessInput(node, 3,
TruncatingUseInfoFromRepresentation(rep)); // value
- ProcessInput(node, 5, UseInfo::Bool()); // little-endian
- ProcessRemainingInputs(node, 6);
+ ProcessInput(node, 4, UseInfo::Bool()); // little-endian
+ ProcessRemainingInputs(node, 5);
SetOutput(node, MachineRepresentation::kNone);
return;
}
@@ -3292,14 +3285,21 @@ class RepresentationSelector {
case IrOpcode::kMapGuard:
// Eliminate MapGuard nodes here.
return VisitUnused(node);
- case IrOpcode::kCheckMaps:
+ case IrOpcode::kCheckMaps: {
+ CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
+ return VisitUnop(
+ node, UseInfo::CheckedHeapObjectAsTaggedPointer(p.feedback()),
+ MachineRepresentation::kNone);
+ }
case IrOpcode::kTransitionElementsKind: {
- VisitInputs(node);
- return SetOutput(node, MachineRepresentation::kNone);
+ return VisitUnop(
+ node, UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()),
+ MachineRepresentation::kNone);
}
case IrOpcode::kCompareMaps:
- return VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kBit);
+ return VisitUnop(
+ node, UseInfo::CheckedHeapObjectAsTaggedPointer(VectorSlotPair()),
+ MachineRepresentation::kBit);
case IrOpcode::kEnsureWritableFastElements:
return VisitBinop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
@@ -3429,6 +3429,8 @@ class RepresentationSelector {
case IrOpcode::kDeadValue:
ProcessInput(node, 0, UseInfo::Any());
return SetOutput(node, MachineRepresentation::kNone);
+ case IrOpcode::kStaticAssert:
+ return VisitUnop(node, UseInfo::Any(), MachineRepresentation::kTagged);
default:
FATAL(
"Representation inference: unsupported opcode %i (%s), node #%i\n.",
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 96c434a595..c3cca499ac 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -10,7 +10,7 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
-#include "src/conversions-inl.h"
+#include "src/numbers/conversions-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 4024a3e439..4344f87dc9 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -6,8 +6,8 @@
#define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 6f69d5dd99..ed3cfa8617 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -8,10 +8,10 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/map.h"
#include "src/objects/name.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -78,7 +78,7 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
}
#endif
os << access.type << ", " << access.machine_type << ", "
- << access.write_barrier_kind;
+ << access.write_barrier_kind << ", " << access.constness;
if (FLAG_untrusted_code_mitigations) {
os << ", " << access.load_sensitivity;
}
@@ -113,7 +113,6 @@ size_t hash_value(ElementAccess const& access) {
access.machine_type);
}
-
std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", "
<< access.type << ", " << access.machine_type << ", "
@@ -124,6 +123,20 @@ std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
return os;
}
+bool operator==(ObjectAccess const& lhs, ObjectAccess const& rhs) {
+ return lhs.machine_type == rhs.machine_type &&
+ lhs.write_barrier_kind == rhs.write_barrier_kind;
+}
+
+size_t hash_value(ObjectAccess const& access) {
+ return base::hash_combine(access.machine_type, access.write_barrier_kind);
+}
+
+std::ostream& operator<<(std::ostream& os, ObjectAccess const& access) {
+ os << access.machine_type << ", " << access.write_barrier_kind;
+ return os;
+}
+
const FieldAccess& FieldAccessOf(const Operator* op) {
DCHECK_NOT_NULL(op);
DCHECK(op->opcode() == IrOpcode::kLoadField ||
@@ -131,7 +144,6 @@ const FieldAccess& FieldAccessOf(const Operator* op) {
return OpParameter<FieldAccess>(op);
}
-
const ElementAccess& ElementAccessOf(const Operator* op) {
DCHECK_NOT_NULL(op);
DCHECK(op->opcode() == IrOpcode::kLoadElement ||
@@ -139,6 +151,13 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
return OpParameter<ElementAccess>(op);
}
+const ObjectAccess& ObjectAccessOf(const Operator* op) {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->opcode() == IrOpcode::kLoadFromObject ||
+ op->opcode() == IrOpcode::kStoreToObject);
+ return OpParameter<ObjectAccess>(op);
+}
+
ExternalArrayType ExternalArrayTypeOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kLoadTypedElement ||
op->opcode() == IrOpcode::kLoadDataViewElement ||
@@ -547,19 +566,23 @@ bool operator==(AllocateParameters const& lhs, AllocateParameters const& rhs) {
lhs.type() == rhs.type();
}
+const AllocateParameters& AllocateParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kAllocate ||
+ op->opcode() == IrOpcode::kAllocateRaw);
+ return OpParameter<AllocateParameters>(op);
+}
+
AllocationType AllocationTypeOf(const Operator* op) {
if (op->opcode() == IrOpcode::kNewDoubleElements ||
op->opcode() == IrOpcode::kNewSmiOrObjectElements) {
return OpParameter<AllocationType>(op);
}
- DCHECK(op->opcode() == IrOpcode::kAllocate ||
- op->opcode() == IrOpcode::kAllocateRaw);
- return OpParameter<AllocateParameters>(op).allocation_type();
+ return AllocateParametersOf(op).allocation_type();
}
Type AllocateTypeOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
- return OpParameter<AllocateParameters>(op).type();
+ return AllocateParametersOf(op).type();
}
UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
@@ -736,6 +759,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
V(SameValue, Operator::kCommutative, 2, 0) \
+ V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
V(NumberSameValue, Operator::kCommutative, 2, 0) \
V(ReferenceEqual, Operator::kCommutative, 2, 0) \
V(StringEqual, Operator::kCommutative, 2, 0) \
@@ -1103,39 +1127,6 @@ struct SimplifiedOperatorGlobalCache final {
};
LoadFieldByIndexOperator kLoadFieldByIndex;
- struct LoadStackArgumentOperator final : public Operator {
- LoadStackArgumentOperator()
- : Operator( // --
- IrOpcode::kLoadStackArgument, // opcode
- Operator::kNoDeopt | Operator::kNoThrow |
- Operator::kNoWrite, // flags
- "LoadStackArgument", // name
- 2, 1, 1, 1, 1, 0) {} // counts
- };
- LoadStackArgumentOperator kLoadStackArgument;
-
- struct LoadMessageOperator final : public Operator {
- LoadMessageOperator()
- : Operator( // --
- IrOpcode::kLoadMessage, // opcode
- Operator::kNoDeopt | Operator::kNoThrow |
- Operator::kNoWrite, // flags
- "LoadMessage", // name
- 1, 1, 1, 1, 1, 0) {} // counts
- };
- LoadMessageOperator kLoadMessage;
-
- struct StoreMessageOperator final : public Operator {
- StoreMessageOperator()
- : Operator( // --
- IrOpcode::kStoreMessage, // opcode
- Operator::kNoDeopt | Operator::kNoThrow |
- Operator::kNoRead, // flags
- "StoreMessage", // name
- 2, 1, 1, 0, 1, 0) {} // counts
- };
- StoreMessageOperator kStoreMessage;
-
#define SPECULATIVE_NUMBER_BINOP(Name) \
template <NumberOperationHint kHint> \
struct Name##Operator final : public Operator1<NumberOperationHint> { \
@@ -1405,7 +1396,7 @@ const Operator* SimplifiedOperatorBuilder::CompareMaps(
DCHECK_LT(0, maps.size());
return new (zone()) Operator1<ZoneHandleSet<Map>>( // --
IrOpcode::kCompareMaps, // opcode
- Operator::kEliminatable, // flags
+ Operator::kNoThrow | Operator::kNoWrite, // flags
"CompareMaps", // name
1, 1, 1, 1, 1, 0, // counts
maps); // parameter
@@ -1490,7 +1481,7 @@ const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
ElementsTransition transition) {
return new (zone()) Operator1<ElementsTransition>( // --
IrOpcode::kTransitionElementsKind, // opcode
- Operator::kNoDeopt | Operator::kNoThrow, // flags
+ Operator::kNoThrow, // flags
"TransitionElementsKind", // name
1, 1, 1, 0, 1, 0, // counts
transition); // parameter
@@ -1650,11 +1641,18 @@ const Operator* SimplifiedOperatorBuilder::Allocate(Type type,
}
const Operator* SimplifiedOperatorBuilder::AllocateRaw(
- Type type, AllocationType allocation) {
+ Type type, AllocationType allocation,
+ AllowLargeObjects allow_large_objects) {
+ // We forbid optimized allocations to allocate in a different generation than
+ // requested.
+ DCHECK(!(allow_large_objects == AllowLargeObjects::kTrue &&
+ allocation == AllocationType::kYoung &&
+ !FLAG_young_generation_large_objects));
return new (zone()) Operator1<AllocateParameters>(
IrOpcode::kAllocateRaw,
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
- "AllocateRaw", 1, 1, 1, 1, 1, 1, AllocateParameters(type, allocation));
+ "AllocateRaw", 1, 1, 1, 1, 1, 1,
+ AllocateParameters(type, allocation, allow_large_objects));
}
const Operator* SimplifiedOperatorBuilder::StringCodePointAt(
@@ -1705,9 +1703,11 @@ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0) \
V(LoadTypedElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
+ V(LoadFromObject, ObjectAccess, Operator::kNoWrite, 2, 1, 1) \
V(StoreTypedElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0) \
- V(LoadDataViewElement, ExternalArrayType, Operator::kNoWrite, 5, 1, 1) \
- V(StoreDataViewElement, ExternalArrayType, Operator::kNoRead, 6, 1, 0)
+ V(StoreToObject, ObjectAccess, Operator::kNoRead, 3, 1, 0) \
+ V(LoadDataViewElement, ExternalArrayType, Operator::kNoWrite, 4, 1, 1) \
+ V(StoreDataViewElement, ExternalArrayType, Operator::kNoRead, 5, 1, 0)
#define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
output_count) \
@@ -1721,18 +1721,6 @@ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
ACCESS_OP_LIST(ACCESS)
#undef ACCESS
-const Operator* SimplifiedOperatorBuilder::LoadMessage() {
- return &cache_.kLoadMessage;
-}
-
-const Operator* SimplifiedOperatorBuilder::StoreMessage() {
- return &cache_.kStoreMessage;
-}
-
-const Operator* SimplifiedOperatorBuilder::LoadStackArgument() {
- return &cache_.kLoadStackArgument;
-}
-
const Operator* SimplifiedOperatorBuilder::TransitionAndStoreElement(
Handle<Map> double_map, Handle<Map> fast_map) {
TransitionAndStoreElementParameters parameters(double_map, fast_map);
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 476991af3b..d93544c5cd 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -8,16 +8,17 @@
#include <iosfwd>
#include "src/base/compiler-specific.h"
+#include "src/codegen/machine-type.h"
+#include "src/common/globals.h"
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
-#include "src/deoptimize-reason.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/machine-type.h"
-#include "src/maybe-handles.h"
-#include "src/objects.h"
-#include "src/type-hints.h"
-#include "src/vector-slot-pair.h"
+#include "src/compiler/vector-slot-pair.h"
+#include "src/compiler/write-barrier-kind.h"
+#include "src/deoptimizer/deoptimize-reason.h"
+#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
+#include "src/objects/objects.h"
+#include "src/objects/type-hints.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -55,6 +56,7 @@ struct FieldAccess {
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
LoadSensitivity load_sensitivity; // load safety for poisoning.
+ PropertyConstness constness; // whether the field is assigned only once
FieldAccess()
: base_is_tagged(kTaggedBase),
@@ -62,12 +64,14 @@ struct FieldAccess {
type(Type::None()),
machine_type(MachineType::None()),
write_barrier_kind(kFullWriteBarrier),
- load_sensitivity(LoadSensitivity::kUnsafe) {}
+ load_sensitivity(LoadSensitivity::kUnsafe),
+ constness(PropertyConstness::kMutable) {}
FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle<Name> name,
MaybeHandle<Map> map, Type type, MachineType machine_type,
WriteBarrierKind write_barrier_kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe)
+ LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe,
+ PropertyConstness constness = PropertyConstness::kMutable)
: base_is_tagged(base_is_tagged),
offset(offset),
name(name),
@@ -75,7 +79,8 @@ struct FieldAccess {
type(type),
machine_type(machine_type),
write_barrier_kind(write_barrier_kind),
- load_sensitivity(load_sensitivity) {}
+ load_sensitivity(load_sensitivity),
+ constness(constness) {}
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -137,6 +142,30 @@ V8_EXPORT_PRIVATE ElementAccess const& ElementAccessOf(const Operator* op)
ExternalArrayType ExternalArrayTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT;
+// An access descriptor for loads/stores of CSA-accessible structures.
+struct ObjectAccess {
+ MachineType machine_type; // machine type of the field.
+ WriteBarrierKind write_barrier_kind; // write barrier hint.
+
+ ObjectAccess()
+ : machine_type(MachineType::None()),
+ write_barrier_kind(kFullWriteBarrier) {}
+
+ ObjectAccess(MachineType machine_type, WriteBarrierKind write_barrier_kind)
+ : machine_type(machine_type), write_barrier_kind(write_barrier_kind) {}
+
+ int tag() const { return kHeapObjectTag; }
+};
+
+V8_EXPORT_PRIVATE bool operator==(ObjectAccess const&, ObjectAccess const&);
+
+size_t hash_value(ObjectAccess const&);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ObjectAccess const&);
+
+V8_EXPORT_PRIVATE ObjectAccess const& ObjectAccessOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
+
// The ConvertReceiverMode is used as parameter by ConvertReceiver operators.
ConvertReceiverMode ConvertReceiverModeOf(Operator const* op)
V8_WARN_UNUSED_RESULT;
@@ -480,15 +509,21 @@ bool IsRestLengthOf(const Operator* op) V8_WARN_UNUSED_RESULT;
class AllocateParameters {
public:
- AllocateParameters(Type type, AllocationType allocation_type)
- : type_(type), allocation_type_(allocation_type) {}
+ AllocateParameters(
+ Type type, AllocationType allocation_type,
+ AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse)
+ : type_(type),
+ allocation_type_(allocation_type),
+ allow_large_objects_(allow_large_objects) {}
Type type() const { return type_; }
AllocationType allocation_type() const { return allocation_type_; }
+ AllowLargeObjects allow_large_objects() const { return allow_large_objects_; }
private:
Type type_;
AllocationType allocation_type_;
+ AllowLargeObjects allow_large_objects_;
};
bool IsCheckedWithFeedback(const Operator* op);
@@ -499,6 +534,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters);
bool operator==(AllocateParameters const&, AllocateParameters const&);
+const AllocateParameters& AllocateParametersOf(const Operator* op)
+ V8_WARN_UNUSED_RESULT;
+
AllocationType AllocationTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT;
Type AllocateTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT;
@@ -617,6 +655,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ReferenceEqual();
const Operator* SameValue();
+ const Operator* SameValueNumbersOnly();
const Operator* TypeOf();
@@ -788,21 +827,16 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* Allocate(Type type,
AllocationType allocation = AllocationType::kYoung);
const Operator* AllocateRaw(
- Type type, AllocationType allocation = AllocationType::kYoung);
+ Type type, AllocationType allocation = AllocationType::kYoung,
+ AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse);
const Operator* LoadFieldByIndex();
const Operator* LoadField(FieldAccess const&);
const Operator* StoreField(FieldAccess const&);
- const Operator* LoadMessage();
- const Operator* StoreMessage();
-
// load-element [base + index]
const Operator* LoadElement(ElementAccess const&);
- // load-stack-argument [base + index]
- const Operator* LoadStackArgument();
-
// store-element [base + index], value
const Operator* StoreElement(ElementAccess const&);
@@ -819,16 +853,22 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TransitionAndStoreNonNumberElement(Handle<Map> fast_map,
Type value_type);
+ // load-from-object [base + offset]
+ const Operator* LoadFromObject(ObjectAccess const&);
+
+ // store-to-object [base + offset], value
+ const Operator* StoreToObject(ObjectAccess const&);
+
// load-typed-element buffer, [base + external + index]
const Operator* LoadTypedElement(ExternalArrayType const&);
- // load-data-view-element buffer, [base + byte_offset + index]
+ // load-data-view-element object, [base + index]
const Operator* LoadDataViewElement(ExternalArrayType const&);
// store-typed-element buffer, [base + external + index], value
const Operator* StoreTypedElement(ExternalArrayType const&);
- // store-data-view-element buffer, [base + byte_offset + index], value
+ // store-data-view-element object, [base + index], value
const Operator* StoreDataViewElement(ExternalArrayType const&);
// Abort (for terminating execution on internal error).
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index 26c47e0cb5..c00613c232 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -4,7 +4,7 @@
#include "src/compiler/state-values-utils.h"
-#include "src/bit-vector.h"
+#include "src/utils/bit-vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 49768a4d22..00ec3bb351 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -6,9 +6,9 @@
#define V8_COMPILER_STATE_VALUES_UTILS_H_
#include <array>
+#include "src/common/globals.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 1a1555d29f..3d053484e0 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_TYPE_CACHE_H_
#include "src/compiler/types.h"
-#include "src/date.h"
+#include "src/date/date.h"
#include "src/objects/code.h"
#include "src/objects/js-array-buffer.h"
#include "src/objects/string.h"
@@ -113,9 +113,10 @@ class V8_EXPORT_PRIVATE TypeCache final {
// JSArrayBuffer::byte_length above.
Type const kJSArrayBufferViewByteOffsetType = kJSArrayBufferByteLengthType;
- // The JSTypedArray::length property always contains a tagged number in the
- // range [0, kMaxSmiValue].
- Type const kJSTypedArrayLengthType = Type::UnsignedSmall();
+ // The JSTypedArray::length property always contains an untagged number in
+ // the range [0, kMaxSmiValue].
+ Type const kJSTypedArrayLengthType =
+ CreateRange(0.0, JSTypedArray::kMaxLength);
// The String::length property always contains a smi in the range
// [0, String::kMaxLength].
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index a555d7f63b..0c81670f0b 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -12,7 +12,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -113,6 +113,26 @@ base::Optional<MapRef> GetStableMapFromObjectType(JSHeapBroker* broker,
return {};
}
+Node* ResolveSameValueRenames(Node* node) {
+ while (true) {
+ switch (node->opcode()) {
+ case IrOpcode::kCheckHeapObject:
+ case IrOpcode::kCheckNumber:
+ case IrOpcode::kCheckSmi:
+ case IrOpcode::kFinishRegion:
+ case IrOpcode::kTypeGuard:
+ if (node->IsDead()) {
+ return node;
+ } else {
+ node = node->InputAt(0);
+ continue;
+ }
+ default:
+ return node;
+ }
+ }
+}
+
} // namespace
Reduction TypedOptimization::ReduceConvertReceiver(Node* node) {
@@ -309,6 +329,12 @@ Reduction TypedOptimization::ReducePhi(Node* node) {
// after lowering based on types, i.e. a SpeculativeNumberAdd has a more
// precise type than the JSAdd that was in the graph when the Typer was run.
DCHECK_EQ(IrOpcode::kPhi, node->opcode());
+ // Prevent new types from being propagated through loop-related Phis for now.
+ // This is to avoid slow convergence of type narrowing when we learn very
+ // precise information about loop variables.
+ if (NodeProperties::GetControlInput(node, 0)->opcode() == IrOpcode::kLoop) {
+ return NoChange();
+ }
int arity = node->op()->ValueInputCount();
Type type = NodeProperties::GetType(node->InputAt(0));
for (int i = 1; i < arity; ++i) {
@@ -507,7 +533,10 @@ Reduction TypedOptimization::ReduceSameValue(Node* node) {
Node* const rhs = NodeProperties::GetValueInput(node, 1);
Type const lhs_type = NodeProperties::GetType(lhs);
Type const rhs_type = NodeProperties::GetType(rhs);
- if (lhs == rhs) {
+ if (ResolveSameValueRenames(lhs) == ResolveSameValueRenames(rhs)) {
+ if (NodeProperties::GetType(node).IsNone()) {
+ return NoChange();
+ }
// SameValue(x,x) => #true
return Replace(jsgraph()->TrueConstant());
} else if (lhs_type.Is(Type::Unique()) && rhs_type.Is(Type::Unique())) {
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index ed9c56e59f..58efff918f 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -6,8 +6,8 @@
#define V8_COMPILER_TYPED_OPTIMIZATION_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index ac8a1d4bfb..4cf2c38bdb 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -7,7 +7,6 @@
#include <iomanip>
#include "src/base/flags.h"
-#include "src/bootstrapper.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
@@ -18,7 +17,8 @@
#include "src/compiler/operation-typer.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
-#include "src/objects-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -317,6 +317,7 @@ class Typer::Visitor : public Reducer {
static Type NumberLessThanOrEqualTyper(Type, Type, Typer*);
static Type ReferenceEqualTyper(Type, Type, Typer*);
static Type SameValueTyper(Type, Type, Typer*);
+ static Type SameValueNumbersOnlyTyper(Type, Type, Typer*);
static Type StringFromSingleCharCodeTyper(Type, Typer*);
static Type StringFromSingleCodePointTyper(Type, Typer*);
@@ -485,12 +486,12 @@ Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
}
Type Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
+ if (outcome == 0) return Type::None();
if ((outcome & kComparisonFalse) != 0 ||
(outcome & kComparisonUndefined) != 0) {
return (outcome & kComparisonTrue) != 0 ? Type::Boolean()
: t->singleton_false_;
}
- // Type should be non empty, so we know it should be true.
DCHECK_NE(0, outcome & kComparisonTrue);
return t->singleton_true_;
}
@@ -623,17 +624,20 @@ Type Typer::Visitor::ToString(Type type, Typer* t) {
Type Typer::Visitor::ObjectIsArrayBufferView(Type type, Typer* t) {
// TODO(turbofan): Introduce a Type::ArrayBufferView?
+ CHECK(!type.IsNone());
if (!type.Maybe(Type::OtherObject())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsBigInt(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::BigInt())) return t->singleton_true_;
if (!type.Maybe(Type::BigInt())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsCallable(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::Callable())) return t->singleton_true_;
if (!type.Maybe(Type::Callable())) return t->singleton_false_;
return Type::Boolean();
@@ -641,53 +645,62 @@ Type Typer::Visitor::ObjectIsCallable(Type type, Typer* t) {
Type Typer::Visitor::ObjectIsConstructor(Type type, Typer* t) {
// TODO(turbofan): Introduce a Type::Constructor?
+ CHECK(!type.IsNone());
if (!type.Maybe(Type::Callable())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsDetectableCallable(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::DetectableCallable())) return t->singleton_true_;
if (!type.Maybe(Type::DetectableCallable())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsMinusZero(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::MinusZero())) return t->singleton_true_;
if (!type.Maybe(Type::MinusZero())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::NumberIsMinusZero(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::MinusZero())) return t->singleton_true_;
if (!type.Maybe(Type::MinusZero())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsNaN(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::NaN())) return t->singleton_true_;
if (!type.Maybe(Type::NaN())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::NumberIsNaN(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::NaN())) return t->singleton_true_;
if (!type.Maybe(Type::NaN())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsNonCallable(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::NonCallable())) return t->singleton_true_;
if (!type.Maybe(Type::NonCallable())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsNumber(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::Number())) return t->singleton_true_;
if (!type.Maybe(Type::Number())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsReceiver(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::Receiver())) return t->singleton_true_;
if (!type.Maybe(Type::Receiver())) return t->singleton_false_;
return Type::Boolean();
@@ -699,18 +712,21 @@ Type Typer::Visitor::ObjectIsSmi(Type type, Typer* t) {
}
Type Typer::Visitor::ObjectIsString(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::String())) return t->singleton_true_;
if (!type.Maybe(Type::String())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsSymbol(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::Symbol())) return t->singleton_true_;
if (!type.Maybe(Type::Symbol())) return t->singleton_false_;
return Type::Boolean();
}
Type Typer::Visitor::ObjectIsUndetectable(Type type, Typer* t) {
+ CHECK(!type.IsNone());
if (type.Is(Type::Undetectable())) return t->singleton_true_;
if (!type.Maybe(Type::Undetectable())) return t->singleton_false_;
return Type::Boolean();
@@ -994,9 +1010,12 @@ Type Typer::Visitor::TypeDeadValue(Node* node) { return Type::None(); }
Type Typer::Visitor::TypeUnreachable(Node* node) { return Type::None(); }
+Type Typer::Visitor::TypeStaticAssert(Node* node) { UNREACHABLE(); }
+
// JS comparison operators.
Type Typer::Visitor::JSEqualTyper(Type lhs, Type rhs, Typer* t) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
if (lhs.Is(Type::NaN()) || rhs.Is(Type::NaN())) return t->singleton_false_;
if (lhs.Is(Type::NullOrUndefined()) && rhs.Is(Type::NullOrUndefined())) {
return t->singleton_true_;
@@ -1046,6 +1065,8 @@ Typer::Visitor::ComparisonOutcome Typer::Visitor::NumberCompareTyper(Type lhs,
DCHECK(lhs.Is(Type::Number()));
DCHECK(rhs.Is(Type::Number()));
+ if (lhs.IsNone() || rhs.IsNone()) return {};
+
// Shortcut for NaNs.
if (lhs.Is(Type::NaN()) || rhs.Is(Type::NaN())) return kComparisonUndefined;
@@ -1058,11 +1079,9 @@ Typer::Visitor::ComparisonOutcome Typer::Visitor::NumberCompareTyper(Type lhs,
} else if (lhs.Max() < rhs.Min()) {
result = kComparisonTrue;
} else {
- // We cannot figure out the result, return both true and false. (We do not
- // have to return undefined because that cannot affect the result of
- // FalsifyUndefined.)
return ComparisonOutcome(kComparisonTrue) |
- ComparisonOutcome(kComparisonFalse);
+ ComparisonOutcome(kComparisonFalse) |
+ ComparisonOutcome(kComparisonUndefined);
}
// Add the undefined if we could see NaN.
if (lhs.Maybe(Type::NaN()) || rhs.Maybe(Type::NaN())) {
@@ -1804,7 +1823,6 @@ Type Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineIsSmi:
return TypeUnaryOp(node, ObjectIsSmi);
case Runtime::kInlineIsArray:
- case Runtime::kInlineIsTypedArray:
case Runtime::kInlineIsRegExp:
return Type::Boolean();
case Runtime::kInlineCreateIterResultObject:
@@ -1815,7 +1833,7 @@ Type Typer::Visitor::TypeJSCallRuntime(Node* node) {
return TypeUnaryOp(node, ToNumber);
case Runtime::kInlineToObject:
return TypeUnaryOp(node, ToObject);
- case Runtime::kInlineToString:
+ case Runtime::kInlineToStringRT:
return TypeUnaryOp(node, ToString);
case Runtime::kHasInPrototypeChain:
return Type::Boolean();
@@ -1987,10 +2005,19 @@ Type Typer::Visitor::SameValueTyper(Type lhs, Type rhs, Typer* t) {
return t->operation_typer()->SameValue(lhs, rhs);
}
+// static
+Type Typer::Visitor::SameValueNumbersOnlyTyper(Type lhs, Type rhs, Typer* t) {
+ return t->operation_typer()->SameValueNumbersOnly(lhs, rhs);
+}
+
Type Typer::Visitor::TypeSameValue(Node* node) {
return TypeBinaryOp(node, SameValueTyper);
}
+Type Typer::Visitor::TypeSameValueNumbersOnly(Node* node) {
+ return TypeBinaryOp(node, SameValueNumbersOnlyTyper);
+}
+
Type Typer::Visitor::TypeNumberSameValue(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStringEqual(Node* node) { return Type::Boolean(); }
@@ -2137,15 +2164,11 @@ Type Typer::Visitor::TypeLoadField(Node* node) {
return FieldAccessOf(node->op()).type;
}
-Type Typer::Visitor::TypeLoadMessage(Node* node) { return Type::Any(); }
-
Type Typer::Visitor::TypeLoadElement(Node* node) {
return ElementAccessOf(node->op()).type;
}
-Type Typer::Visitor::TypeLoadStackArgument(Node* node) {
- return Type::NonInternal();
-}
+Type Typer::Visitor::TypeLoadFromObject(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeLoadTypedElement(Node* node) {
switch (ExternalArrayTypeOf(node->op())) {
@@ -2170,10 +2193,11 @@ Type Typer::Visitor::TypeLoadDataViewElement(Node* node) {
}
Type Typer::Visitor::TypeStoreField(Node* node) { UNREACHABLE(); }
-Type Typer::Visitor::TypeStoreMessage(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeStoreElement(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeStoreToObject(Node* node) { UNREACHABLE(); }
+
Type Typer::Visitor::TypeTransitionAndStoreElement(Node* node) {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 62cbd1d35a..fa87d81f1e 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -5,9 +5,9 @@
#ifndef V8_COMPILER_TYPER_H_
#define V8_COMPILER_TYPER_H_
+#include "src/common/globals.h"
#include "src/compiler/graph.h"
#include "src/compiler/operation-typer.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 14496cff53..edf07a4ffd 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -6,9 +6,9 @@
#include "src/compiler/types.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -324,11 +324,6 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
// require bit set types, they should get kOtherInternal.
case MUTABLE_HEAP_NUMBER_TYPE:
case FREE_SPACE_TYPE:
-#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE:
-
- TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
-#undef FIXED_TYPED_ARRAY_CASE
case FILLER_TYPE:
case ACCESS_CHECK_INFO_TYPE:
case ASM_WASM_DATA_TYPE:
@@ -348,12 +343,16 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
case PROTOTYPE_INFO_TYPE:
case INTERPRETER_DATA_TYPE:
+ case TEMPLATE_OBJECT_DESCRIPTION_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
case ENUM_CACHE_TYPE:
+ case SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE:
+ case WASM_CAPI_FUNCTION_DATA_TYPE:
case WASM_DEBUG_INFO_TYPE:
case WASM_EXCEPTION_TAG_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
+ case WASM_JS_FUNCTION_DATA_TYPE:
case LOAD_HANDLER_TYPE:
case STORE_HANDLER_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index a4219ec4f7..21aaab5036 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -6,12 +6,12 @@
#define V8_COMPILER_TYPES_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/js-heap-broker.h"
-#include "src/conversions.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/objects.h"
-#include "src/ostreams.h"
+#include "src/handles/handles.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/objects.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/value-numbering-reducer.h b/deps/v8/src/compiler/value-numbering-reducer.h
index 489ab71d74..41ff2ae5b7 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.h
+++ b/deps/v8/src/compiler/value-numbering-reducer.h
@@ -6,8 +6,8 @@
#define V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
#include "src/base/compiler-specific.h"
+#include "src/common/globals.h"
#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/vector-slot-pair.cc b/deps/v8/src/compiler/vector-slot-pair.cc
index 9a1d13c697..97f53648a4 100644
--- a/deps/v8/src/vector-slot-pair.cc
+++ b/deps/v8/src/compiler/vector-slot-pair.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/vector-slot-pair.h"
+#include "src/compiler/vector-slot-pair.h"
-#include "src/feedback-vector.h"
+#include "src/objects/feedback-vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/vector-slot-pair.h b/deps/v8/src/compiler/vector-slot-pair.h
index cb99d06112..9944544a13 100644
--- a/deps/v8/src/vector-slot-pair.h
+++ b/deps/v8/src/compiler/vector-slot-pair.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_VECTOR_SLOT_PAIR_H_
-#define V8_VECTOR_SLOT_PAIR_H_
+#ifndef V8_COMPILER_VECTOR_SLOT_PAIR_H_
+#define V8_COMPILER_VECTOR_SLOT_PAIR_H_
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/utils.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -48,4 +48,4 @@ size_t hash_value(VectorSlotPair const&);
} // namespace internal
} // namespace v8
-#endif // V8_VECTOR_SLOT_PAIR_H_
+#endif // V8_COMPILER_VECTOR_SLOT_PAIR_H_
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 6cd248cfd9..3f1b2e9f13 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -10,7 +10,6 @@
#include <sstream>
#include <string>
-#include "src/bit-vector.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
@@ -23,7 +22,8 @@
#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
-#include "src/ostreams.h"
+#include "src/utils/bit-vector.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -490,7 +490,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kInductionVariablePhi: {
// This is only a temporary node for the typer.
UNREACHABLE();
- break;
}
case IrOpcode::kEffectPhi: {
// EffectPhi input count matches parent control node.
@@ -1185,6 +1184,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Boolean());
break;
case IrOpcode::kSameValue:
+ case IrOpcode::kSameValueNumbersOnly:
// (Any, Any) -> Boolean
CheckValueInputIs(node, 0, Type::Any());
CheckValueInputIs(node, 1, Type::Any());
@@ -1552,25 +1552,25 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::NonInternal());
break;
case IrOpcode::kLoadField:
- case IrOpcode::kLoadMessage:
// Object -> fieldtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
// CheckTypeIs(node, FieldAccessOf(node->op()).type));
break;
case IrOpcode::kLoadElement:
- case IrOpcode::kLoadStackArgument:
// Object -> elementtype
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
// CheckTypeIs(node, ElementAccessOf(node->op()).type));
break;
+ case IrOpcode::kLoadFromObject:
+ // TODO(gsps): Can we check some types here?
+ break;
case IrOpcode::kLoadTypedElement:
break;
case IrOpcode::kLoadDataViewElement:
break;
case IrOpcode::kStoreField:
- case IrOpcode::kStoreMessage:
// (Object, fieldtype) -> _|_
// TODO(rossberg): activate once machine ops are typed.
// CheckValueInputIs(node, 0, Type::Object());
@@ -1584,6 +1584,9 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckValueInputIs(node, 1, ElementAccessOf(node->op()).type));
CheckNotTyped(node);
break;
+ case IrOpcode::kStoreToObject:
+ // TODO(gsps): Can we check some types here?
+ break;
case IrOpcode::kTransitionAndStoreElement:
CheckNotTyped(node);
break;
@@ -1829,6 +1832,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kSignExtendWord8ToInt64:
case IrOpcode::kSignExtendWord16ToInt64:
case IrOpcode::kSignExtendWord32ToInt64:
+ case IrOpcode::kStaticAssert:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index 2cf851cadd..308b44060a 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -6,7 +6,7 @@
#define V8_COMPILER_VERIFIER_H_
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index dd4da53124..3396214e58 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -6,14 +6,16 @@
#include <memory>
-#include "src/assembler-inl.h"
-#include "src/assembler.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
#include "src/base/v8-fallthrough.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/common-operator.h"
@@ -31,16 +33,14 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/simd-scalar-lowering.h"
#include "src/compiler/zone-stats.h"
-#include "src/counters.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
-#include "src/interface-descriptors.h"
-#include "src/isolate-inl.h"
-#include "src/log.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/objects/heap-number.h"
-#include "src/optimized-compilation-info.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/graph-builder-interface.h"
@@ -84,53 +84,66 @@ MachineType assert_size(int expected_size, MachineType type) {
#define WASM_INSTANCE_OBJECT_OFFSET(name) \
wasm::ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
-#define LOAD_RAW(base_pointer, byte_offset, type) \
- SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type), base_pointer, \
- mcgraph()->Int32Constant(byte_offset), Effect(), \
- Control()))
+#define LOAD_RAW(base_pointer, byte_offset, type) \
+ InsertDecompressionIfNeeded( \
+ type, SetEffect(graph()->NewNode( \
+ mcgraph()->machine()->Load(type), base_pointer, \
+ mcgraph()->Int32Constant(byte_offset), Effect(), Control())))
+
+#define LOAD_RAW_NODE_OFFSET(base_pointer, node_offset, type) \
+ InsertDecompressionIfNeeded( \
+ type, SetEffect(graph()->NewNode(mcgraph()->machine()->Load(type), \
+ base_pointer, node_offset, Effect(), \
+ Control())))
#define LOAD_INSTANCE_FIELD(name, type) \
LOAD_RAW(instance_node_.get(), WASM_INSTANCE_OBJECT_OFFSET(name), \
assert_size(WASM_INSTANCE_OBJECT_SIZE(name), type))
#define LOAD_TAGGED_POINTER(base_pointer, byte_offset) \
- LOAD_RAW(base_pointer, byte_offset, MachineType::TaggedPointer())
+ LOAD_RAW(base_pointer, byte_offset, \
+ MachineType::TypeCompressedTaggedPointer())
#define LOAD_TAGGED_ANY(base_pointer, byte_offset) \
- LOAD_RAW(base_pointer, byte_offset, MachineType::AnyTagged())
+ LOAD_RAW(base_pointer, byte_offset, MachineType::TypeCompressedTagged())
#define LOAD_FIXED_ARRAY_SLOT(array_node, index, type) \
LOAD_RAW(array_node, \
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), type)
#define LOAD_FIXED_ARRAY_SLOT_SMI(array_node, index) \
- LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedSigned())
+ LOAD_FIXED_ARRAY_SLOT(array_node, index, \
+ MachineType::TypeCompressedTaggedSigned())
#define LOAD_FIXED_ARRAY_SLOT_PTR(array_node, index) \
- LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TaggedPointer())
+ LOAD_FIXED_ARRAY_SLOT(array_node, index, \
+ MachineType::TypeCompressedTaggedPointer())
#define LOAD_FIXED_ARRAY_SLOT_ANY(array_node, index) \
- LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::AnyTagged())
+ LOAD_FIXED_ARRAY_SLOT(array_node, index, MachineType::TypeCompressedTagged())
+
+#define STORE_RAW(base, offset, val, rep, barrier) \
+ SetEffect(graph()->NewNode( \
+ mcgraph()->machine()->Store(StoreRepresentation(rep, barrier)), base, \
+ mcgraph()->Int32Constant(offset), InsertCompressionIfNeeded(rep, val), \
+ Effect(), Control()))
+
+#define STORE_RAW_NODE_OFFSET(base, node_offset, val, rep, barrier) \
+ SetEffect(graph()->NewNode( \
+ mcgraph()->machine()->Store(StoreRepresentation(rep, barrier)), base, \
+ node_offset, InsertCompressionIfNeeded(rep, val), Effect(), Control()))
// This can be used to store tagged Smi values only.
-#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
- SetEffect(graph()->NewNode( \
- mcgraph()->machine()->Store(StoreRepresentation( \
- MachineRepresentation::kTaggedSigned, kNoWriteBarrier)), \
- array_node, \
- mcgraph()->Int32Constant( \
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index)), \
- value, Effect(), Control()))
+#define STORE_FIXED_ARRAY_SLOT_SMI(array_node, index, value) \
+ STORE_RAW(array_node, \
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), value, \
+ MachineType::RepCompressedTaggedSigned(), kNoWriteBarrier)
// This can be used to store any tagged (Smi and HeapObject) value.
-#define STORE_FIXED_ARRAY_SLOT_ANY(array_node, index, value) \
- SetEffect(graph()->NewNode( \
- mcgraph()->machine()->Store(StoreRepresentation( \
- MachineRepresentation::kTagged, kFullWriteBarrier)), \
- array_node, \
- mcgraph()->Int32Constant( \
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index)), \
- value, Effect(), Control()))
+#define STORE_FIXED_ARRAY_SLOT_ANY(array_node, index, value) \
+ STORE_RAW(array_node, \
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), value, \
+ MachineType::RepCompressedTagged(), kFullWriteBarrier)
void MergeControlToEnd(MachineGraph* mcgraph, Node* node) {
Graph* g = mcgraph->graph();
@@ -263,7 +276,16 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
}
Node* WasmGraphBuilder::RefNull() {
- return LOAD_INSTANCE_FIELD(NullValue, MachineType::TaggedPointer());
+ return LOAD_INSTANCE_FIELD(NullValue,
+ MachineType::TypeCompressedTaggedPointer());
+}
+
+Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
+ Node* args[] = {
+ graph()->NewNode(mcgraph()->common()->NumberConstant(function_index))};
+ Node* result =
+ BuildCallToRuntime(Runtime::kWasmRefFunc, args, arraysize(args));
+ return result;
}
Node* WasmGraphBuilder::NoContextConstant() {
@@ -345,6 +367,56 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
*effect = ephi;
}
+Node* WasmGraphBuilder::InsertDecompressionIfNeeded(MachineType type,
+ Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ switch (type.representation()) {
+ case MachineRepresentation::kCompressedPointer:
+ value = graph()->NewNode(
+ mcgraph()->machine()->ChangeCompressedPointerToTaggedPointer(),
+ value);
+ break;
+ case MachineRepresentation::kCompressedSigned:
+ value = graph()->NewNode(
+ mcgraph()->machine()->ChangeCompressedSignedToTaggedSigned(),
+ value);
+ break;
+ case MachineRepresentation::kCompressed:
+ value = graph()->NewNode(
+ mcgraph()->machine()->ChangeCompressedToTagged(), value);
+ break;
+ default:
+ break;
+ }
+ }
+ return value;
+}
+
+Node* WasmGraphBuilder::InsertCompressionIfNeeded(MachineRepresentation rep,
+ Node* value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ switch (rep) {
+ case MachineRepresentation::kCompressedPointer:
+ value = graph()->NewNode(
+ mcgraph()->machine()->ChangeTaggedPointerToCompressedPointer(),
+ value);
+ break;
+ case MachineRepresentation::kCompressedSigned:
+ value = graph()->NewNode(
+ mcgraph()->machine()->ChangeTaggedSignedToCompressedSigned(),
+ value);
+ break;
+ case MachineRepresentation::kCompressed:
+ value = graph()->NewNode(
+ mcgraph()->machine()->ChangeTaggedToCompressed(), value);
+ break;
+ default:
+ break;
+ }
+ }
+ return value;
+}
+
void WasmGraphBuilder::PatchInStackCheckIfNeeded() {
if (!needs_stack_check_) return;
@@ -1159,7 +1231,6 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(
break;
default:
UNREACHABLE();
- break;
}
if (mem_rep == MachineRepresentation::kWord8) {
@@ -1313,7 +1384,6 @@ Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
break;
default:
UNREACHABLE();
- break;
}
int i;
@@ -2073,7 +2143,8 @@ Node* WasmGraphBuilder::MemoryGrow(Node* input) {
Node* WasmGraphBuilder::Throw(uint32_t exception_index,
const wasm::WasmException* exception,
- const Vector<Node*> values) {
+ const Vector<Node*> values,
+ wasm::WasmCodePosition position) {
needs_stack_check_ = true;
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
Node* create_parameters[] = {
@@ -2082,6 +2153,7 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
Node* except_obj =
BuildCallToRuntime(Runtime::kWasmThrowCreate, create_parameters,
arraysize(create_parameters));
+ SetSourcePosition(except_obj, position);
Node* values_array =
BuildCallToRuntime(Runtime::kWasmExceptionGetValues, &except_obj, 1);
uint32_t index = 0;
@@ -2123,6 +2195,8 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
graph()->NewNode(m->I32x4ExtractLane(3), value));
break;
case wasm::kWasmAnyRef:
+ case wasm::kWasmAnyFunc:
+ case wasm::kWasmExceptRef:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
@@ -2138,9 +2212,11 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmThrow, RelocInfo::WASM_STUB_CALL);
- return SetEffect(SetControl(
+ Node* call = SetEffect(SetControl(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
except_obj, Effect(), Control())));
+ SetSourcePosition(call, position);
+ return call;
}
void WasmGraphBuilder::BuildEncodeException32BitValue(Node* values_array,
@@ -2184,13 +2260,16 @@ Node* WasmGraphBuilder::BuildDecodeException64BitValue(Node* values_array,
Node* WasmGraphBuilder::Rethrow(Node* except_obj) {
needs_stack_check_ = true;
+ // TODO(v8:8091): Currently the message of the original exception is not being
+ // preserved when rethrown to the console. The pending message will need to be
+ // saved when caught and restored here while being rethrown.
WasmThrowDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), interface_descriptor,
interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
Node* call_target = mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmThrow, RelocInfo::WASM_STUB_CALL);
+ wasm::WasmCode::kWasmRethrow, RelocInfo::WASM_STUB_CALL);
return SetEffect(SetControl(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), call_target,
except_obj, Effect(), Control())));
@@ -2203,8 +2282,8 @@ Node* WasmGraphBuilder::ExceptionTagEqual(Node* caught_tag,
}
Node* WasmGraphBuilder::LoadExceptionTagFromTable(uint32_t exception_index) {
- Node* exceptions_table =
- LOAD_INSTANCE_FIELD(ExceptionsTable, MachineType::TaggedPointer());
+ Node* exceptions_table = LOAD_INSTANCE_FIELD(
+ ExceptionsTable, MachineType::TypeCompressedTaggedPointer());
Node* tag = LOAD_FIXED_ARRAY_SLOT_PTR(exceptions_table, exception_index);
return tag;
}
@@ -2255,6 +2334,8 @@ Node** WasmGraphBuilder::GetExceptionValues(
BuildDecodeException32BitValue(values_array, &index));
break;
case wasm::kWasmAnyRef:
+ case wasm::kWasmAnyFunc:
+ case wasm::kWasmExceptRef:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
@@ -2676,8 +2757,8 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
int func_index,
IsReturnCall continuation) {
// Load the imported function refs array from the instance.
- Node* imported_function_refs =
- LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
+ Node* imported_function_refs = LOAD_INSTANCE_FIELD(
+ ImportedFunctionRefs, MachineType::TypeCompressedTaggedPointer());
Node* ref_node =
LOAD_FIXED_ARRAY_SLOT_PTR(imported_function_refs, func_index);
@@ -2707,8 +2788,8 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
Node* func_index,
IsReturnCall continuation) {
// Load the imported function refs array from the instance.
- Node* imported_function_refs =
- LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
+ Node* imported_function_refs = LOAD_INSTANCE_FIELD(
+ ImportedFunctionRefs, MachineType::TypeCompressedTaggedPointer());
// Access fixed array at {header_size - tag + func_index * kTaggedSize}.
Node* imported_instances_data = graph()->NewNode(
mcgraph()->machine()->IntAdd(), imported_function_refs,
@@ -2717,10 +2798,9 @@ Node* WasmGraphBuilder::BuildImportCall(wasm::FunctionSig* sig, Node** args,
Node* func_index_times_tagged_size = graph()->NewNode(
mcgraph()->machine()->IntMul(), Uint32ToUintptr(func_index),
mcgraph()->Int32Constant(kTaggedSize));
- Node* ref_node = SetEffect(
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::TaggedPointer()),
- imported_instances_data, func_index_times_tagged_size,
- Effect(), Control()));
+ Node* ref_node = LOAD_RAW_NODE_OFFSET(
+ imported_instances_data, func_index_times_tagged_size,
+ MachineType::TypeCompressedTaggedPointer());
// Load the target from the imported_targets array at the offset of
// {func_index}.
@@ -2831,8 +2911,8 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
Node* ift_targets =
LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, MachineType::Pointer());
- Node* ift_instances = LOAD_INSTANCE_FIELD(IndirectFunctionTableRefs,
- MachineType::TaggedPointer());
+ Node* ift_instances = LOAD_INSTANCE_FIELD(
+ IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer());
Node* tagged_scaled_key;
if (kTaggedSize == kInt32Size) {
@@ -2843,11 +2923,10 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
int32_scaled_key);
}
- Node* target_instance = SetEffect(graph()->NewNode(
- machine->Load(MachineType::TaggedPointer()),
+ Node* target_instance = LOAD_RAW(
graph()->NewNode(machine->IntAdd(), ift_instances, tagged_scaled_key),
- Int32Constant(wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)),
- Effect(), Control()));
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0),
+ MachineType::TypeCompressedTaggedPointer());
Node* intptr_scaled_key;
if (kSystemPointerSize == kTaggedSize) {
@@ -3194,8 +3273,9 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableAnyRefGlobal(
const wasm::WasmGlobal& global, Node** base, Node** offset) {
// Load the base from the ImportedMutableGlobalsBuffer of the instance.
- Node* buffers = LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
- MachineType::TaggedPointer());
+ Node* buffers =
+ LOAD_INSTANCE_FIELD(ImportedMutableGlobalsBuffers,
+ MachineType::TypeCompressedTaggedPointer());
*base = LOAD_FIXED_ARRAY_SLOT_ANY(buffers, global.index);
// For the offset we need the index of the global in the buffer, and then
@@ -3263,8 +3343,8 @@ Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(
// The CEntryStub is loaded from the instance_node so that generated code is
// Isolate independent. At the moment this is only done for CEntryStub(1).
DCHECK_EQ(1, fun->result_size);
- Node* centry_stub =
- LOAD_INSTANCE_FIELD(CEntryStub, MachineType::TaggedPointer());
+ Node* centry_stub = LOAD_INSTANCE_FIELD(
+ CEntryStub, MachineType::TypeCompressedTaggedPointer());
// TODO(titzer): allow arbitrary number of runtime arguments
// At the moment we only allow 5 parameters. If more parameters are needed,
// increase this constant accordingly.
@@ -3303,12 +3383,11 @@ Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
Node* base = nullptr;
Node* offset = nullptr;
GetBaseAndOffsetForImportedMutableAnyRefGlobal(global, &base, &offset);
- return SetEffect(
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::AnyTagged()),
- base, offset, Effect(), Control()));
+ return LOAD_RAW_NODE_OFFSET(base, offset,
+ MachineType::TypeCompressedTagged());
}
- Node* globals_buffer =
- LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
+ Node* globals_buffer = LOAD_INSTANCE_FIELD(
+ TaggedGlobalsBuffer, MachineType::TypeCompressedTaggedPointer());
return LOAD_FIXED_ARRAY_SLOT_ANY(globals_buffer, global.offset);
}
@@ -3335,13 +3414,11 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
Node* offset = nullptr;
GetBaseAndOffsetForImportedMutableAnyRefGlobal(global, &base, &offset);
- return SetEffect(graph()->NewNode(
- mcgraph()->machine()->Store(StoreRepresentation(
- MachineRepresentation::kTagged, kFullWriteBarrier)),
- base, offset, val, Effect(), Control()));
+ return STORE_RAW_NODE_OFFSET(
+ base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier);
}
- Node* globals_buffer =
- LOAD_INSTANCE_FIELD(TaggedGlobalsBuffer, MachineType::TaggedPointer());
+ Node* globals_buffer = LOAD_INSTANCE_FIELD(
+ TaggedGlobalsBuffer, MachineType::TypeCompressedTaggedPointer());
return STORE_FIXED_ARRAY_SLOT_ANY(globals_buffer,
env_->module->globals[index].offset, val);
}
@@ -3366,20 +3443,23 @@ void WasmGraphBuilder::BoundsCheckTable(uint32_t table_index, Node* entry_index,
wasm::WasmCodePosition position,
wasm::TrapReason trap_reason,
Node** base_node) {
- Node* tables = LOAD_INSTANCE_FIELD(Tables, MachineType::TaggedPointer());
+ Node* tables =
+ LOAD_INSTANCE_FIELD(Tables, MachineType::TypeCompressedTaggedPointer());
Node* table = LOAD_FIXED_ARRAY_SLOT_ANY(tables, table_index);
- int storage_field_size = WasmTableObject::kElementsOffsetEnd -
- WasmTableObject::kElementsOffset + 1;
+ int storage_field_size =
+ WasmTableObject::kEntriesOffsetEnd - WasmTableObject::kEntriesOffset + 1;
Node* storage = LOAD_RAW(
- table, wasm::ObjectAccess::ToTagged(WasmTableObject::kElementsOffset),
- assert_size(storage_field_size, MachineType::TaggedPointer()));
+ table, wasm::ObjectAccess::ToTagged(WasmTableObject::kEntriesOffset),
+ assert_size(storage_field_size,
+ MachineType::TypeCompressedTaggedPointer()));
int length_field_size =
FixedArray::kLengthOffsetEnd - FixedArray::kLengthOffset + 1;
Node* storage_size =
LOAD_RAW(storage, wasm::ObjectAccess::ToTagged(FixedArray::kLengthOffset),
- assert_size(length_field_size, MachineType::TaggedSigned()));
+ assert_size(length_field_size,
+ MachineType::TypeCompressedTaggedSigned()));
storage_size = BuildChangeSmiToInt32(storage_size);
// Bounds check against the table size.
@@ -3418,9 +3498,8 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
Node* base = nullptr;
Node* offset = nullptr;
GetTableBaseAndOffset(table_index, index, position, &base, &offset);
- return SetEffect(
- graph()->NewNode(mcgraph()->machine()->Load(MachineType::AnyTagged()),
- base, offset, Effect(), Control()));
+ return LOAD_RAW_NODE_OFFSET(base, offset,
+ MachineType::TypeCompressedTagged());
}
// We access anyfunc tables through runtime calls.
WasmTableGetDescriptor interface_descriptor;
@@ -3448,12 +3527,8 @@ Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val,
Node* base = nullptr;
Node* offset = nullptr;
GetTableBaseAndOffset(table_index, index, position, &base, &offset);
-
- const Operator* op = mcgraph()->machine()->Store(
- StoreRepresentation(MachineRepresentation::kTagged, kFullWriteBarrier));
-
- Node* store = graph()->NewNode(op, base, offset, val, Effect(), Control());
- return SetEffect(store);
+ return STORE_RAW_NODE_OFFSET(
+ base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier);
} else {
// We access anyfunc tables through runtime calls.
WasmTableSetDescriptor interface_descriptor;
@@ -3912,8 +3987,9 @@ void WasmGraphBuilder::SimdScalarLoweringForTesting() {
void WasmGraphBuilder::SetSourcePosition(Node* node,
wasm::WasmCodePosition position) {
DCHECK_NE(position, wasm::kNoCodePosition);
- if (source_position_table_)
+ if (source_position_table_) {
source_position_table_->SetSourcePosition(node, SourcePosition(position));
+ }
}
Node* WasmGraphBuilder::S128Zero() {
@@ -4729,6 +4805,49 @@ Node* WasmGraphBuilder::TableCopy(uint32_t table_src_index,
return result;
}
+Node* WasmGraphBuilder::TableGrow(uint32_t table_index, Node* value,
+ Node* delta) {
+ Node* args[] = {
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)), value,
+ BuildConvertUint32ToSmiWithSaturation(delta, FLAG_wasm_max_table_size)};
+ Node* result =
+ BuildCallToRuntime(Runtime::kWasmTableGrow, args, arraysize(args));
+ return BuildChangeSmiToInt32(result);
+}
+
+Node* WasmGraphBuilder::TableSize(uint32_t table_index) {
+ Node* tables =
+ LOAD_INSTANCE_FIELD(Tables, MachineType::TypeCompressedTaggedPointer());
+ Node* table = LOAD_FIXED_ARRAY_SLOT_ANY(tables, table_index);
+
+ int storage_field_size = WasmTableObject::kElementsOffsetEnd -
+ WasmTableObject::kElementsOffset + 1;
+ Node* storage = LOAD_RAW(
+ table, wasm::ObjectAccess::ToTagged(WasmTableObject::kEntriesOffset),
+ assert_size(storage_field_size,
+ MachineType::TypeCompressedTaggedPointer()));
+
+ int length_field_size =
+ FixedArray::kLengthOffsetEnd - FixedArray::kLengthOffset + 1;
+ Node* table_size =
+ LOAD_RAW(storage, wasm::ObjectAccess::ToTagged(FixedArray::kLengthOffset),
+ assert_size(length_field_size,
+ MachineType::TypeCompressedTaggedSigned()));
+
+ return BuildChangeSmiToInt32(table_size);
+}
+
+Node* WasmGraphBuilder::TableFill(uint32_t table_index, Node* start,
+ Node* value, Node* count) {
+ Node* args[] = {
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
+ BuildConvertUint32ToSmiWithSaturation(start, FLAG_wasm_max_table_size),
+ value,
+ BuildConvertUint32ToSmiWithSaturation(count, FLAG_wasm_max_table_size)};
+
+ return BuildCallToRuntime(Runtime::kWasmTableFill, args, arraysize(args));
+}
+
class WasmDecorator final : public GraphDecorator {
public:
explicit WasmDecorator(NodeOriginTable* origins, wasm::Decoder* decoder)
@@ -4777,7 +4896,7 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
CHECK_LT(0, len);
va_end(arguments);
Handle<String> name_str =
- isolate->factory()->NewStringFromAsciiChecked(buffer.start());
+ isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *name_str));
}
@@ -5007,8 +5126,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetControl(is_heap_object.if_true);
Node* orig_effect = Effect();
- Node* undefined_node =
- LOAD_INSTANCE_FIELD(UndefinedValue, MachineType::TaggedPointer());
+ Node* undefined_node = LOAD_INSTANCE_FIELD(
+ UndefinedValue, MachineType::TypeCompressedTaggedPointer());
Node* check_undefined =
graph()->NewNode(machine->WordEqual(), value, undefined_node);
Node* effect_tagged = Effect();
@@ -5214,42 +5333,35 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
Node* BuildLoadFunctionDataFromExportedFunction(Node* closure) {
- Node* shared = SetEffect(graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->Int32Constant(
- wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()),
- Effect(), Control()));
- return SetEffect(graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), shared,
- jsgraph()->Int32Constant(SharedFunctionInfo::kFunctionDataOffset -
- kHeapObjectTag),
- Effect(), Control()));
+ Node* shared = LOAD_RAW(
+ closure,
+ wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction(),
+ MachineType::TypeCompressedTagged());
+ return LOAD_RAW(shared,
+ SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag,
+ MachineType::TypeCompressedTagged());
}
Node* BuildLoadInstanceFromExportedFunctionData(Node* function_data) {
- return SetEffect(graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
- jsgraph()->Int32Constant(WasmExportedFunctionData::kInstanceOffset -
- kHeapObjectTag),
- Effect(), Control()));
+ return LOAD_RAW(function_data,
+ WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag,
+ MachineType::TypeCompressedTagged());
}
Node* BuildLoadFunctionIndexFromExportedFunctionData(Node* function_data) {
- Node* function_index_smi = SetEffect(graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
- jsgraph()->Int32Constant(
- WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag),
- Effect(), Control()));
+ Node* function_index_smi = LOAD_RAW(
+ function_data,
+ WasmExportedFunctionData::kFunctionIndexOffset - kHeapObjectTag,
+ MachineType::TypeCompressedTagged());
Node* function_index = BuildChangeSmiToInt32(function_index_smi);
return function_index;
}
Node* BuildLoadJumpTableOffsetFromExportedFunctionData(Node* function_data) {
- Node* jump_table_offset_smi = SetEffect(graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), function_data,
- jsgraph()->Int32Constant(
- WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag),
- Effect(), Control()));
+ Node* jump_table_offset_smi = LOAD_RAW(
+ function_data,
+ WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag,
+ MachineType::TypeCompressedTagged());
Node* jump_table_offset = BuildChangeSmiToInt32(jump_table_offset_smi);
return jump_table_offset;
}
@@ -5340,8 +5452,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
instance_node_.set(Param(wasm::kWasmInstanceParameterIndex));
- Node* native_context =
- LOAD_INSTANCE_FIELD(NativeContext, MachineType::TaggedPointer());
+ Node* native_context = LOAD_INSTANCE_FIELD(
+ NativeContext, MachineType::TypeCompressedTaggedPointer());
if (kind == WasmImportCallKind::kRuntimeTypeError) {
// =======================================================================
@@ -5359,8 +5471,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// The callable is passed as the last parameter, after WASM arguments.
Node* callable_node = Param(wasm_count + 1);
- Node* undefined_node =
- LOAD_INSTANCE_FIELD(UndefinedValue, MachineType::TaggedPointer());
+ Node* undefined_node = LOAD_INSTANCE_FIELD(
+ UndefinedValue, MachineType::TypeCompressedTaggedPointer());
Node* call = nullptr;
bool sloppy_receiver = true;
@@ -5378,12 +5490,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case WasmImportCallKind::kJSFunctionArityMatchSloppy: {
Node** args = Buffer(wasm_count + 9);
int pos = 0;
- Node* function_context = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::TaggedPointer()),
- callable_node,
- mcgraph()->Int32Constant(
- wasm::ObjectAccess::ContextOffsetInTaggedJSFunction()),
- Effect(), Control()));
+ Node* function_context =
+ LOAD_RAW(callable_node,
+ wasm::ObjectAccess::ContextOffsetInTaggedJSFunction(),
+ MachineType::TypeCompressedTaggedPointer());
args[pos++] = callable_node; // target callable.
// Receiver.
if (sloppy_receiver) {
@@ -5419,12 +5529,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case WasmImportCallKind::kJSFunctionArityMismatchSloppy: {
Node** args = Buffer(wasm_count + 9);
int pos = 0;
- Node* function_context = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::TaggedPointer()),
- callable_node,
- mcgraph()->Int32Constant(
- wasm::ObjectAccess::ContextOffsetInTaggedJSFunction()),
- Effect(), Control()));
+ Node* function_context =
+ LOAD_RAW(callable_node,
+ wasm::ObjectAccess::ContextOffsetInTaggedJSFunction(),
+ MachineType::TypeCompressedTaggedPointer());
args[pos++] =
BuildLoadBuiltinFromInstance(Builtins::kArgumentsAdaptorTrampoline);
args[pos++] = callable_node; // target callable
@@ -5432,13 +5540,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] = mcgraph()->Int32Constant(wasm_count); // argument count
// Load shared function info, and then the formal parameter count.
- Node* shared_function_info = SetEffect(graph()->NewNode(
- mcgraph()->machine()->Load(MachineType::TaggedPointer()),
+ Node* shared_function_info = LOAD_RAW(
callable_node,
- mcgraph()->Int32Constant(
- wasm::ObjectAccess::
- SharedFunctionInfoOffsetInTaggedJSFunction()),
- Effect(), Control()));
+ wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction(),
+ MachineType::TypeCompressedTaggedPointer());
Node* formal_param_count = SetEffect(graph()->NewNode(
mcgraph()->machine()->Load(MachineType::Uint16()),
shared_function_info,
@@ -5530,7 +5635,112 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return true;
}
- void BuildWasmInterpreterEntry(uint32_t func_index) {
+ void BuildCapiCallWrapper(Address address) {
+ // Store arguments on our stack, then align the stack for calling to C.
+ int param_bytes = 0;
+ for (wasm::ValueType type : sig_->parameters()) {
+ param_bytes += wasm::ValueTypes::MemSize(type);
+ }
+ int return_bytes = 0;
+ for (wasm::ValueType type : sig_->returns()) {
+ return_bytes += wasm::ValueTypes::MemSize(type);
+ }
+
+ int stack_slot_bytes = std::max(param_bytes, return_bytes);
+ Node* values = stack_slot_bytes == 0
+ ? mcgraph()->IntPtrConstant(0)
+ : graph()->NewNode(mcgraph()->machine()->StackSlot(
+ stack_slot_bytes, kDoubleAlignment));
+
+ int offset = 0;
+ int param_count = static_cast<int>(sig_->parameter_count());
+ for (int i = 0; i < param_count; ++i) {
+ wasm::ValueType type = sig_->GetParam(i);
+ // Start from the parameter with index 1 to drop the instance_node.
+ // TODO(jkummerow): When a values is a reference type, we should pass it
+ // in a GC-safe way, not just as a raw pointer.
+ SetEffect(graph()->NewNode(GetSafeStoreOperator(offset, type), values,
+ Int32Constant(offset), Param(i + 1), Effect(),
+ Control()));
+ offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ }
+ // The function is passed as the last parameter, after WASM arguments.
+ Node* function_node = Param(param_count + 1);
+ Node* shared = LOAD_RAW(
+ function_node,
+ wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction(),
+ MachineType::TypeCompressedTagged());
+ Node* sfi_data = LOAD_RAW(
+ shared, SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag,
+ MachineType::TypeCompressedTagged());
+ Node* host_data = LOAD_RAW(
+ sfi_data, WasmCapiFunctionData::kEmbedderDataOffset - kHeapObjectTag,
+ MachineType::Pointer());
+
+ BuildModifyThreadInWasmFlag(false);
+ Node* isolate_root =
+ LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer());
+ STORE_RAW(isolate_root, Isolate::c_entry_fp_offset(), fp_value,
+ MachineType::PointerRepresentation(), kNoWriteBarrier);
+
+ // TODO(jkummerow): Load the address from the {host_data}, and cache
+ // wrappers per signature.
+ const ExternalReference ref = ExternalReference::Create(address);
+ Node* function =
+ graph()->NewNode(mcgraph()->common()->ExternalConstant(ref));
+
+ // Parameters: void* data, Address arguments.
+ MachineType host_sig_types[] = {
+ MachineType::Pointer(), MachineType::Pointer(), MachineType::Pointer()};
+ MachineSignature host_sig(1, 2, host_sig_types);
+ Node* return_value = BuildCCall(&host_sig, function, host_data, values);
+
+ BuildModifyThreadInWasmFlag(true);
+
+ Node* exception_branch =
+ graph()->NewNode(mcgraph()->common()->Branch(BranchHint::kTrue),
+ graph()->NewNode(mcgraph()->machine()->WordEqual(),
+ return_value, IntPtrConstant(0)),
+ Control());
+ SetControl(
+ graph()->NewNode(mcgraph()->common()->IfFalse(), exception_branch));
+ WasmThrowDescriptor interface_descriptor;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ mcgraph()->zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kNoProperties, StubCallMode::kCallWasmRuntimeStub);
+ Node* call_target = mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmRethrow, RelocInfo::WASM_STUB_CALL);
+ Node* throw_effect =
+ graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
+ call_target, return_value, Effect(), Control());
+ TerminateThrow(throw_effect, Control());
+
+ SetControl(
+ graph()->NewNode(mcgraph()->common()->IfTrue(), exception_branch));
+ DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionMultiReturns);
+ int return_count = static_cast<int>(sig_->return_count());
+ if (return_count == 0) {
+ Return(Int32Constant(0));
+ } else {
+ Node** returns = Buffer(return_count);
+ offset = 0;
+ for (int i = 0; i < return_count; ++i) {
+ wasm::ValueType type = sig_->GetReturn(i);
+ Node* val = SetEffect(
+ graph()->NewNode(GetSafeLoadOperator(offset, type), values,
+ Int32Constant(offset), Effect(), Control()));
+ returns[i] = val;
+ offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ }
+ Return(return_count, returns);
+ }
+
+ if (ContainsInt64(sig_)) LowerInt64();
+ }
+
+ void BuildWasmInterpreterEntry(int func_index) {
int param_count = static_cast<int>(sig_->parameter_count());
// Build the start and the parameter nodes.
@@ -5758,8 +5968,8 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
bool has_bigint_feature) {
if (WasmExportedFunction::IsWasmExportedFunction(*target)) {
auto imported_function = WasmExportedFunction::cast(*target);
- auto func_index = imported_function->function_index();
- auto module = imported_function->instance()->module();
+ auto func_index = imported_function.function_index();
+ auto module = imported_function.instance().module();
wasm::FunctionSig* imported_sig = module->functions[func_index].sig;
if (*imported_sig != *expected_sig) {
return WasmImportCallKind::kLinkError;
@@ -5771,6 +5981,13 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
}
return WasmImportCallKind::kWasmToWasm;
}
+ if (WasmCapiFunction::IsWasmCapiFunction(*target)) {
+ WasmCapiFunction capi_function = WasmCapiFunction::cast(*target);
+ if (!capi_function.IsSignatureEqual(expected_sig)) {
+ return WasmImportCallKind::kLinkError;
+ }
+ return WasmImportCallKind::kWasmToCapi;
+ }
// Assuming we are calling to JS, check whether this would be a runtime error.
if (!wasm::IsJSCompatibleSignature(expected_sig, has_bigint_feature)) {
return WasmImportCallKind::kRuntimeTypeError;
@@ -5799,8 +6016,8 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
COMPARE_SIG_FOR_BUILTIN(F32##name); \
break;
- if (FLAG_wasm_math_intrinsics && shared->HasBuiltinId()) {
- switch (shared->builtin_id()) {
+ if (FLAG_wasm_math_intrinsics && shared.HasBuiltinId()) {
+ switch (shared.builtin_id()) {
COMPARE_SIG_FOR_BUILTIN_F64(Acos);
COMPARE_SIG_FOR_BUILTIN_F64(Asin);
COMPARE_SIG_FOR_BUILTIN_F64(Atan);
@@ -5832,12 +6049,12 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
#undef COMPARE_SIG_FOR_BUILTIN_F64
#undef COMPARE_SIG_FOR_BUILTIN_F32_F64
- if (IsClassConstructor(shared->kind())) {
+ if (IsClassConstructor(shared.kind())) {
// Class constructor will throw anyway.
return WasmImportCallKind::kUseCallBuiltin;
}
- bool sloppy = is_sloppy(shared->language_mode()) && !shared->native();
- if (shared->internal_formal_parameter_count() ==
+ bool sloppy = is_sloppy(shared.language_mode()) && !shared.native();
+ if (shared.internal_formal_parameter_count() ==
expected_sig->parameter_count()) {
return sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy
: WasmImportCallKind::kJSFunctionArityMatch;
@@ -5936,7 +6153,6 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
break;
default:
UNREACHABLE();
- break;
}
builder.Return(node);
@@ -6008,7 +6224,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
// Schedule and compile to machine code.
CallDescriptor* incoming =
GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
- WasmGraphBuilder::kExtraCallableParam);
+ WasmCallKind::kWasmImportWrapper);
if (machine.Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
@@ -6025,6 +6241,63 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
return native_module->PublishCode(std::move(wasm_code));
}
+wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
+ wasm::NativeModule* native_module,
+ wasm::FunctionSig* sig,
+ Address address) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileWasmCapiFunction");
+
+ Zone zone(wasm_engine->allocator(), ZONE_NAME);
+
+ // TODO(jkummerow): Extract common code into helper method.
+ SourcePositionTable* source_positions = nullptr;
+ MachineGraph* mcgraph = new (&zone) MachineGraph(
+ new (&zone) Graph(&zone), new (&zone) CommonOperatorBuilder(&zone),
+ new (&zone) MachineOperatorBuilder(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()));
+ JSGraph jsgraph(nullptr, mcgraph->graph(), mcgraph->common(), nullptr,
+ nullptr, mcgraph->machine());
+
+ WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_positions,
+ StubCallMode::kCallWasmRuntimeStub,
+ native_module->enabled_features());
+
+ // Set up the graph start.
+ int param_count = static_cast<int>(sig->parameter_count()) +
+ 1 /* offset for first parameter index being -1 */ +
+ 1 /* Wasm instance */ + 1 /* kExtraCallableParam */;
+ Node* start = builder.Start(param_count);
+ Node* effect = start;
+ Node* control = start;
+ builder.set_effect_ptr(&effect);
+ builder.set_control_ptr(&control);
+ builder.set_instance_node(builder.Param(wasm::kWasmInstanceParameterIndex));
+ builder.BuildCapiCallWrapper(address);
+
+ // Run the compiler pipeline to generate machine code.
+ CallDescriptor* call_descriptor =
+ GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
+ WasmCallKind::kWasmCapiFunction);
+ if (mcgraph->machine()->Is32()) {
+ call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
+ }
+
+ const char* debug_name = "WasmCapiCall";
+ wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
+ wasm_engine, call_descriptor, mcgraph, Code::WASM_TO_CAPI_FUNCTION,
+ wasm::WasmCode::kWasmToCapiWrapper, debug_name,
+ WasmStubAssemblerOptions(), source_positions);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
+ result.frame_slot_count, result.tagged_parameter_slots,
+ std::move(result.protected_instructions),
+ std::move(result.source_positions), wasm::WasmCode::kWasmToCapiWrapper,
+ wasm::ExecutionTier::kNone);
+ return native_module->PublishCode(std::move(wasm_code));
+}
+
wasm::WasmCompilationResult CompileWasmInterpreterEntry(
wasm::WasmEngine* wasm_engine, const wasm::WasmFeatures& enabled_features,
uint32_t func_index, wasm::FunctionSig* sig) {
@@ -6062,7 +6335,7 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
wasm_engine, incoming, &jsgraph, Code::WASM_INTERPRETER_ENTRY,
- wasm::WasmCode::kInterpreterEntry, func_name.start(),
+ wasm::WasmCode::kInterpreterEntry, func_name.begin(),
WasmStubAssemblerOptions());
result.result_tier = wasm::ExecutionTier::kInterpreter;
@@ -6117,23 +6390,13 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
return code;
}
-TurbofanWasmCompilationUnit::TurbofanWasmCompilationUnit(
- wasm::WasmCompilationUnit* wasm_unit)
- : wasm_unit_(wasm_unit) {}
-
-// Clears unique_ptrs, but (part of) the type is forward declared in the header.
-TurbofanWasmCompilationUnit::~TurbofanWasmCompilationUnit() = default;
-
-bool TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
- AccountingAllocator* allocator, wasm::CompilationEnv* env,
- const wasm::FunctionBody& func_body, wasm::WasmFeatures* detected,
- double* decode_ms, MachineGraph* mcgraph, NodeOriginTable* node_origins,
- SourcePositionTable* source_positions) {
- base::ElapsedTimer decode_timer;
- if (FLAG_trace_wasm_decode_time) {
- decode_timer.Start();
- }
-
+bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
+ wasm::CompilationEnv* env,
+ const wasm::FunctionBody& func_body,
+ int func_index, wasm::WasmFeatures* detected,
+ MachineGraph* mcgraph,
+ NodeOriginTable* node_origins,
+ SourcePositionTable* source_positions) {
// Create a TF graph during decoding.
WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, func_body.sig,
source_positions);
@@ -6158,13 +6421,10 @@ bool TurbofanWasmCompilationUnit::BuildGraphForWasmFunction(
.LowerGraph();
}
- if (wasm_unit_->func_index_ >= FLAG_trace_wasm_ast_start &&
- wasm_unit_->func_index_ < FLAG_trace_wasm_ast_end) {
+ if (func_index >= FLAG_trace_wasm_ast_start &&
+ func_index < FLAG_trace_wasm_ast_end) {
PrintRawWasmCode(allocator, func_body, env->module, wasm::kPrintLocals);
}
- if (FLAG_trace_wasm_decode_time) {
- *decode_ms = decode_timer.Elapsed().InMillisecondsF();
- }
return true;
}
@@ -6178,20 +6438,19 @@ Vector<const char> GetDebugName(Zone* zone, int index) {
DCHECK(name_len > 0 && name_len < name_vector.length());
char* index_name = zone->NewArray<char>(name_len);
- memcpy(index_name, name_vector.start(), name_len);
+ memcpy(index_name, name_vector.begin(), name_len);
return Vector<const char>(index_name, name_len);
}
} // namespace
-wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
+wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
- const wasm::FunctionBody& func_body, Counters* counters,
+ const wasm::FunctionBody& func_body, int func_index, Counters* counters,
wasm::WasmFeatures* detected) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "ExecuteTurbofanCompilation");
- double decode_ms = 0;
- size_t node_count = 0;
-
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "ExecuteTurbofanCompilation", "func_index", func_index,
+ "body_size",
+ static_cast<uint32_t>(func_body.end - func_body.start));
Zone zone(wasm_engine->allocator(), ZONE_NAME);
MachineGraph* mcgraph = new (&zone) MachineGraph(
new (&zone) Graph(&zone), new (&zone) CommonOperatorBuilder(&zone),
@@ -6200,8 +6459,8 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()));
- OptimizedCompilationInfo info(GetDebugName(&zone, wasm_unit_->func_index_),
- &zone, Code::WASM_FUNCTION);
+ OptimizedCompilationInfo info(GetDebugName(&zone, func_index), &zone,
+ Code::WASM_FUNCTION);
if (env->runtime_exception_support) {
info.SetWasmRuntimeExceptionSupport();
}
@@ -6218,7 +6477,7 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
SourcePositionTable* source_positions =
new (mcgraph->zone()) SourcePositionTable(mcgraph->graph());
if (!BuildGraphForWasmFunction(wasm_engine->allocator(), env, func_body,
- detected, &decode_ms, mcgraph, node_origins,
+ func_index, detected, mcgraph, node_origins,
source_positions)) {
return wasm::WasmCompilationResult{};
}
@@ -6227,12 +6486,6 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
node_origins->AddDecorator();
}
- base::ElapsedTimer pipeline_timer;
- if (FLAG_trace_wasm_decode_time) {
- node_count = mcgraph->graph()->NodeCount();
- pipeline_timer.Start();
- }
-
// Run the compiler pipeline to generate machine code.
auto call_descriptor = GetWasmCallDescriptor(&zone, func_body.sig);
if (mcgraph->machine()->Is32()) {
@@ -6241,16 +6494,8 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
Pipeline::GenerateCodeForWasmFunction(
&info, wasm_engine, mcgraph, call_descriptor, source_positions,
- node_origins, func_body, env->module, wasm_unit_->func_index_);
+ node_origins, func_body, env->module, func_index);
- if (FLAG_trace_wasm_decode_time) {
- double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
- PrintF(
- "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
- "%0.3f ms pipeline\n",
- static_cast<unsigned>(func_body.end - func_body.start), decode_ms,
- node_count, pipeline_ms);
- }
// TODO(bradnelson): Improve histogram handling of size_t.
counters->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(mcgraph->graph()->zone()->allocation_size()));
@@ -6259,9 +6504,9 @@ wasm::WasmCompilationResult TurbofanWasmCompilationUnit::ExecuteCompilation(
return std::move(*result);
}
-wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
+wasm::WasmCompilationResult ExecuteInterpreterEntryCompilation(
wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
- const wasm::FunctionBody& func_body, Counters* counters,
+ const wasm::FunctionBody& func_body, int func_index, Counters* counters,
wasm::WasmFeatures* detected) {
Zone zone(wasm_engine->allocator(), ZONE_NAME);
const wasm::WasmModule* module = env ? env->module : nullptr;
@@ -6270,9 +6515,8 @@ wasm::WasmCompilationResult InterpreterCompilationUnit::ExecuteCompilation(
decoder.Decode();
if (decoder.failed()) return wasm::WasmCompilationResult{};
- wasm::WasmCompilationResult result =
- CompileWasmInterpreterEntry(wasm_engine, env->enabled_features,
- wasm_unit_->func_index_, func_body.sig);
+ wasm::WasmCompilationResult result = CompileWasmInterpreterEntry(
+ wasm_engine, env->enabled_features, func_index, func_body.sig);
DCHECK(result.succeeded());
DCHECK_EQ(wasm::ExecutionTier::kInterpreter, result.result_tier);
@@ -6315,10 +6559,11 @@ class LinkageLocationAllocator {
// General code uses the above configuration data.
CallDescriptor* GetWasmCallDescriptor(
Zone* zone, wasm::FunctionSig* fsig,
- WasmGraphBuilder::UseRetpoline use_retpoline,
- WasmGraphBuilder::ExtraCallableParam extra_callable_param) {
+ WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind) {
// The extra here is to accomodate the instance object as first parameter
- // and, in the case of an import wrapper, the additional callable.
+ // and, when specified, the additional callable.
+ bool extra_callable_param =
+ call_kind == kWasmImportWrapper || call_kind == kWasmCapiFunction;
int extra_params = extra_callable_param ? 2 : 1;
LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count() + extra_params);
@@ -6383,14 +6628,20 @@ CallDescriptor* GetWasmCallDescriptor(
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Kind kind = extra_callable_param
- ? CallDescriptor::kCallWasmImportWrapper
- : CallDescriptor::kCallWasmFunction;
+ CallDescriptor::Kind descriptor_kind;
+ if (call_kind == kWasmFunction) {
+ descriptor_kind = CallDescriptor::kCallWasmFunction;
+ } else if (call_kind == kWasmImportWrapper) {
+ descriptor_kind = CallDescriptor::kCallWasmImportWrapper;
+ } else {
+ DCHECK_EQ(call_kind, kWasmCapiFunction);
+ descriptor_kind = CallDescriptor::kCallWasmCapiFunction;
+ }
CallDescriptor::Flags flags =
use_retpoline ? CallDescriptor::kRetpoline : CallDescriptor::kNoFlags;
return new (zone) CallDescriptor( // --
- kind, // kind
+ descriptor_kind, // kind
target_type, // target MachineType
target_loc, // target location
locations.Build(), // location_sig
@@ -6505,6 +6756,7 @@ AssemblerOptions WasmStubAssemblerOptions() {
#undef WASM_INSTANCE_OBJECT_SIZE
#undef WASM_INSTANCE_OBJECT_OFFSET
#undef LOAD_RAW
+#undef LOAD_RAW_NODE_OFFSET
#undef LOAD_INSTANCE_FIELD
#undef LOAD_TAGGED_POINTER
#undef LOAD_TAGGED_ANY
@@ -6512,6 +6764,8 @@ AssemblerOptions WasmStubAssemblerOptions() {
#undef LOAD_FIXED_ARRAY_SLOT_SMI
#undef LOAD_FIXED_ARRAY_SLOT_PTR
#undef LOAD_FIXED_ARRAY_SLOT_ANY
+#undef STORE_RAW
+#undef STORE_RAW_NODE_OFFSET
#undef STORE_FIXED_ARRAY_SLOT_SMI
#undef STORE_FIXED_ARRAY_SLOT_ANY
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 8db91affc1..460d0d2f1b 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -45,47 +45,21 @@ struct WasmFeatures;
namespace compiler {
-class TurbofanWasmCompilationUnit {
- public:
- explicit TurbofanWasmCompilationUnit(wasm::WasmCompilationUnit* wasm_unit);
- ~TurbofanWasmCompilationUnit();
-
- bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
- wasm::CompilationEnv* env,
- const wasm::FunctionBody& func_body,
- wasm::WasmFeatures* detected,
- double* decode_ms, MachineGraph* mcgraph,
- NodeOriginTable* node_origins,
- SourcePositionTable* source_positions);
-
- wasm::WasmCompilationResult ExecuteCompilation(wasm::WasmEngine*,
- wasm::CompilationEnv*,
- const wasm::FunctionBody&,
- Counters*,
- wasm::WasmFeatures* detected);
-
- private:
- wasm::WasmCompilationUnit* const wasm_unit_;
-
- DISALLOW_COPY_AND_ASSIGN(TurbofanWasmCompilationUnit);
-};
-
-class InterpreterCompilationUnit final {
- public:
- explicit InterpreterCompilationUnit(wasm::WasmCompilationUnit* wasm_unit)
- : wasm_unit_(wasm_unit) {}
-
- wasm::WasmCompilationResult ExecuteCompilation(wasm::WasmEngine*,
- wasm::CompilationEnv*,
- const wasm::FunctionBody&,
- Counters*,
- wasm::WasmFeatures* detected);
-
- private:
- wasm::WasmCompilationUnit* const wasm_unit_;
-
- DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationUnit);
-};
+bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
+ wasm::CompilationEnv* env,
+ const wasm::FunctionBody& func_body,
+ int func_index, wasm::WasmFeatures* detected,
+ MachineGraph* mcgraph,
+ NodeOriginTable* node_origins,
+ SourcePositionTable* source_positions);
+
+wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
+ wasm::WasmEngine*, wasm::CompilationEnv*, const wasm::FunctionBody&,
+ int func_index, Counters*, wasm::WasmFeatures* detected);
+
+wasm::WasmCompilationResult ExecuteInterpreterEntryCompilation(
+ wasm::WasmEngine*, wasm::CompilationEnv*, const wasm::FunctionBody&,
+ int func_index, Counters*, wasm::WasmFeatures* detected);
// Calls to WASM imports are handled in several different ways, depending on the
// type of the target function/callable and whether the signature matches the
@@ -93,6 +67,7 @@ class InterpreterCompilationUnit final {
enum class WasmImportCallKind : uint8_t {
kLinkError, // static WASM->WASM type error
kRuntimeTypeError, // runtime WASM->JS type error
+ kWasmToCapi, // fast WASM->C-API call
kWasmToWasm, // fast WASM->WASM call
kJSFunctionArityMatch, // fast WASM->JS call
kJSFunctionArityMatchSloppy, // fast WASM->JS call, sloppy receiver
@@ -137,6 +112,11 @@ V8_EXPORT_PRIVATE wasm::WasmCode* CompileWasmImportCallWrapper(
wasm::WasmEngine*, wasm::NativeModule*, WasmImportCallKind,
wasm::FunctionSig*, bool source_positions);
+// Compiles a host call wrapper, which allows WASM to call host functions.
+wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*,
+ wasm::NativeModule*,
+ wasm::FunctionSig*, Address address);
+
// Creates a code object calling a wasm function with the given signature,
// callable from JS.
V8_EXPORT_PRIVATE MaybeHandle<Code> CompileJSToWasmWrapper(Isolate*,
@@ -184,10 +164,6 @@ class WasmGraphBuilder {
kRetpoline = true,
kNoRetpoline = false
};
- enum ExtraCallableParam : bool { // --
- kExtraCallableParam = true,
- kNoExtraCallableParam = false
- };
V8_EXPORT_PRIVATE WasmGraphBuilder(
wasm::CompilationEnv* env, Zone* zone, MachineGraph* mcgraph,
@@ -219,6 +195,7 @@ class WasmGraphBuilder {
Node* CreateOrMergeIntoEffectPhi(Node* merge, Node* tnode, Node* fnode);
Node* EffectPhi(unsigned count, Node** effects, Node* control);
Node* RefNull();
+ Node* RefFunc(uint32_t function_index);
Node* Uint32Constant(uint32_t value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
@@ -231,7 +208,7 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position = wasm::kNoCodePosition);
Node* MemoryGrow(Node* input);
Node* Throw(uint32_t exception_index, const wasm::WasmException* exception,
- const Vector<Node*> values);
+ const Vector<Node*> values, wasm::WasmCodePosition position);
Node* Rethrow(Node* except_obj);
Node* ExceptionTagEqual(Node* caught_tag, Node* expected_tag);
Node* LoadExceptionTagFromTable(uint32_t exception_index);
@@ -248,6 +225,15 @@ class WasmGraphBuilder {
void PatchInStackCheckIfNeeded();
+ // TODO(v8:8977, v8:7703): move this somewhere? This should be where it
+ // can be used in many places (e.g graph assembler, wasm compiler).
+ // Adds a decompression node if pointer compression is enabled and the type
+ // loaded is a compressed one. To be used after loads.
+ Node* InsertDecompressionIfNeeded(MachineType type, Node* value);
+ // Adds a compression node if pointer compression is enabled and the
+ // representation to be stored is a compressed one. To be used before stores.
+ Node* InsertCompressionIfNeeded(MachineRepresentation rep, Node* value);
+
//-----------------------------------------------------------------------
// Operations that read and/or write {control} and {effect}.
//-----------------------------------------------------------------------
@@ -411,6 +397,9 @@ class WasmGraphBuilder {
Node* ElemDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position);
Node* TableCopy(uint32_t table_src_index, uint32_t table_dst_index, Node* dst,
Node* src, Node* size, wasm::WasmCodePosition position);
+ Node* TableGrow(uint32_t table_index, Node* value, Node* delta);
+ Node* TableSize(uint32_t table_index);
+ Node* TableFill(uint32_t table_index, Node* start, Node* value, Node* count);
bool has_simd() const { return has_simd_; }
@@ -617,12 +606,13 @@ class WasmGraphBuilder {
TrapId GetTrapIdForTrap(wasm::TrapReason reason);
};
+enum WasmCallKind { kWasmFunction, kWasmImportWrapper, kWasmCapiFunction };
+
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, wasm::FunctionSig* signature,
WasmGraphBuilder::UseRetpoline use_retpoline =
WasmGraphBuilder::kNoRetpoline,
- WasmGraphBuilder::ExtraCallableParam callable_param =
- WasmGraphBuilder::kNoExtraCallableParam);
+ WasmCallKind kind = kWasmFunction);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, CallDescriptor* call_descriptor);
diff --git a/deps/v8/src/compiler/write-barrier-kind.h b/deps/v8/src/compiler/write-barrier-kind.h
new file mode 100644
index 0000000000..1f38049f2d
--- /dev/null
+++ b/deps/v8/src/compiler/write-barrier-kind.h
@@ -0,0 +1,52 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_WRITE_BARRIER_KIND_H_
+#define V8_COMPILER_WRITE_BARRIER_KIND_H_
+
+#include <ostream>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Write barrier kinds supported by compiler.
+enum WriteBarrierKind : uint8_t {
+ kNoWriteBarrier,
+ kAssertNoWriteBarrier,
+ kMapWriteBarrier,
+ kPointerWriteBarrier,
+ kEphemeronKeyWriteBarrier,
+ kFullWriteBarrier
+};
+
+inline size_t hash_value(WriteBarrierKind kind) {
+ return static_cast<uint8_t>(kind);
+}
+
+inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
+ switch (kind) {
+ case kNoWriteBarrier:
+ return os << "NoWriteBarrier";
+ case kAssertNoWriteBarrier:
+ return os << "AssertNoWriteBarrier";
+ case kMapWriteBarrier:
+ return os << "MapWriteBarrier";
+ case kPointerWriteBarrier:
+ return os << "PointerWriteBarrier";
+ case kEphemeronKeyWriteBarrier:
+ return os << "EphemeronKeyWriteBarrier";
+ case kFullWriteBarrier:
+ return os << "FullWriteBarrier";
+ }
+ UNREACHABLE();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_WRITE_BARRIER_KIND_H_
diff --git a/deps/v8/src/compiler/zone-stats.h b/deps/v8/src/compiler/zone-stats.h
index 79adc05cb8..63d58eb99f 100644
--- a/deps/v8/src/compiler/zone-stats.h
+++ b/deps/v8/src/compiler/zone-stats.h
@@ -9,7 +9,7 @@
#include <set>
#include <vector>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/constants-arch.h b/deps/v8/src/constants-arch.h
deleted file mode 100644
index 546d316cf4..0000000000
--- a/deps/v8/src/constants-arch.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CONSTANTS_ARCH_H_
-#define V8_CONSTANTS_ARCH_H_
-
-#if V8_TARGET_ARCH_ARM
-#include "src/arm/constants-arm.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/constants-arm64.h" // NOLINT
-#elif V8_TARGET_ARCH_IA32
-#include "src/ia32/constants-ia32.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/constants-mips.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/constants-mips64.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/constants-ppc.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/constants-s390.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/constants-x64.h" // NOLINT
-#else
-#error Unsupported target architecture.
-#endif
-
-#endif // V8_CONSTANTS_ARCH_H_
diff --git a/deps/v8/src/d8/OWNERS b/deps/v8/src/d8/OWNERS
new file mode 100644
index 0000000000..ff3b6d7372
--- /dev/null
+++ b/deps/v8/src/d8/OWNERS
@@ -0,0 +1,5 @@
+binji@chromium.org
+bmeurer@chromium.org
+clemensh@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/async-hooks-wrapper.cc b/deps/v8/src/d8/async-hooks-wrapper.cc
index bff7965171..a3fc9dba1f 100644
--- a/deps/v8/src/async-hooks-wrapper.cc
+++ b/deps/v8/src/d8/async-hooks-wrapper.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/async-hooks-wrapper.h"
-#include "src/d8.h"
-#include "src/isolate-inl.h"
+#include "src/d8/async-hooks-wrapper.h"
+#include "src/d8/d8.h"
+#include "src/execution/isolate-inl.h"
namespace v8 {
diff --git a/deps/v8/src/async-hooks-wrapper.h b/deps/v8/src/d8/async-hooks-wrapper.h
index 68aafa5225..f339b6e316 100644
--- a/deps/v8/src/async-hooks-wrapper.h
+++ b/deps/v8/src/d8/async-hooks-wrapper.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ASYNC_HOOKS_WRAPPER_H_
-#define V8_ASYNC_HOOKS_WRAPPER_H_
+#ifndef V8_D8_ASYNC_HOOKS_WRAPPER_H_
+#define V8_D8_ASYNC_HOOKS_WRAPPER_H_
#include <stack>
#include "include/v8.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
namespace v8 {
-typedef double async_id_t;
+using async_id_t = double;
struct AsyncContext {
async_id_t execution_async_id;
@@ -93,4 +93,4 @@ class AsyncHooks {
} // namespace v8
-#endif // V8_ASYNC_HOOKS_WRAPPER_H_
+#endif // V8_D8_ASYNC_HOOKS_WRAPPER_H_
diff --git a/deps/v8/src/d8-console.cc b/deps/v8/src/d8/d8-console.cc
index f08aa3bfae..8175d608b4 100644
--- a/deps/v8/src/d8-console.cc
+++ b/deps/v8/src/d8/d8-console.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/d8-console.h"
-#include "src/d8.h"
-#include "src/isolate.h"
+#include "src/d8/d8-console.h"
+#include "src/d8/d8.h"
+#include "src/execution/isolate.h"
namespace v8 {
diff --git a/deps/v8/src/d8-console.h b/deps/v8/src/d8/d8-console.h
index 5c7569b3ce..be14d99219 100644
--- a/deps/v8/src/d8-console.h
+++ b/deps/v8/src/d8/d8-console.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_D8_CONSOLE_H_
-#define V8_D8_CONSOLE_H_
+#ifndef V8_D8_D8_CONSOLE_H_
+#define V8_D8_D8_CONSOLE_H_
#include "src/base/platform/time.h"
#include "src/debug/debug-interface.h"
@@ -44,4 +44,4 @@ class D8Console : public debug::ConsoleDelegate {
} // namespace v8
-#endif // V8_D8_CONSOLE_H_
+#endif // V8_D8_D8_CONSOLE_H_
diff --git a/deps/v8/src/d8-js.cc b/deps/v8/src/d8/d8-js.cc
index c1dac77075..eb18a00ff6 100644
--- a/deps/v8/src/d8-js.cc
+++ b/deps/v8/src/d8/d8-js.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/d8.h"
+#include "src/d8/d8.h"
const char* v8::Shell::stringify_source_ = R"D8(
(function() {
diff --git a/deps/v8/src/d8-platforms.cc b/deps/v8/src/d8/d8-platforms.cc
index 0c179bbdd2..42ce14f4f7 100644
--- a/deps/v8/src/d8-platforms.cc
+++ b/deps/v8/src/d8/d8-platforms.cc
@@ -13,7 +13,7 @@
#include "src/base/platform/time.h"
#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/d8-platforms.h"
+#include "src/d8/d8-platforms.h"
namespace v8 {
diff --git a/deps/v8/src/d8-platforms.h b/deps/v8/src/d8/d8-platforms.h
index d78207a5e1..a658f0a47c 100644
--- a/deps/v8/src/d8-platforms.h
+++ b/deps/v8/src/d8/d8-platforms.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_D8_PLATFORMS_H_
-#define V8_D8_PLATFORMS_H_
+#ifndef V8_D8_D8_PLATFORMS_H_
+#define V8_D8_D8_PLATFORMS_H_
#include <cstdint>
#include <memory>
@@ -26,4 +26,4 @@ std::unique_ptr<Platform> MakeDelayedTasksPlatform(
} // namespace v8
-#endif // V8_D8_PLATFORMS_H_
+#endif // V8_D8_D8_PLATFORMS_H_
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8/d8-posix.cc
index 57a8a0d5a5..23767ba2b5 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8/d8-posix.cc
@@ -16,11 +16,10 @@
#include <sys/wait.h>
#include <unistd.h>
-#include "src/d8.h"
+#include "src/d8/d8.h"
namespace v8 {
-
// If the buffer ends in the middle of a UTF-8 sequence then we return
// the length of the string up to but not including the incomplete UTF-8
// sequence. If the buffer ends with a valid UTF-8 sequence then we
@@ -75,12 +74,9 @@ static int LengthWithoutIncompleteUtf8(char* buffer, int len) {
return 0;
}
-
// Suspends the thread until there is data available from the child process.
// Returns false on timeout, true on data ready.
-static bool WaitOnFD(int fd,
- int read_timeout,
- int total_timeout,
+static bool WaitOnFD(int fd, int read_timeout, int total_timeout,
const struct timeval& start_time) {
fd_set readfds, writefds, exceptfds;
struct timeval timeout;
@@ -109,7 +105,6 @@ static bool WaitOnFD(int fd,
return number_of_fds_ready == 1;
}
-
// Checks whether we ran out of time on the timeout. Returns true if we ran out
// of time, false if we still have time.
static bool TimeIsOut(const struct timeval& start_time, const int& total_time) {
@@ -129,32 +124,31 @@ static bool TimeIsOut(const struct timeval& start_time, const int& total_time) {
return false;
}
-
// A utility class that does a non-hanging waitpid on the child process if we
// bail out of the System() function early. If you don't ever do a waitpid on
// a subprocess then it turns into one of those annoying 'zombie processes'.
class ZombieProtector {
public:
- explicit ZombieProtector(int pid): pid_(pid) { }
+ explicit ZombieProtector(int pid) : pid_(pid) {}
~ZombieProtector() {
if (pid_ != 0) waitpid(pid_, nullptr, 0);
}
void ChildIsDeadNow() { pid_ = 0; }
+
private:
int pid_;
};
-
// A utility class that closes a file descriptor when it goes out of scope.
class OpenFDCloser {
public:
- explicit OpenFDCloser(int fd): fd_(fd) { }
+ explicit OpenFDCloser(int fd) : fd_(fd) {}
~OpenFDCloser() { close(fd_); }
+
private:
int fd_;
};
-
// A utility class that takes the array of command arguments and puts then in an
// array of new[]ed UTF-8 C strings. Deallocates them again when it goes out of
// scope.
@@ -178,8 +172,9 @@ class ExecArgs {
int i = 1;
for (unsigned j = 0; j < command_args->Length(); i++, j++) {
Local<Value> arg(
- command_args->Get(isolate->GetCurrentContext(),
- Integer::New(isolate, j)).ToLocalChecked());
+ command_args
+ ->Get(isolate->GetCurrentContext(), Integer::New(isolate, j))
+ .ToLocalChecked());
String::Utf8Value utf8_arg(isolate, arg);
if (*utf8_arg == nullptr) {
exec_args_[i] = nullptr; // Consistent state for destructor.
@@ -203,7 +198,7 @@ class ExecArgs {
if (exec_args_[i] == nullptr) {
return;
}
- delete [] exec_args_[i];
+ delete[] exec_args_[i];
exec_args_[i] = nullptr;
}
}
@@ -215,11 +210,9 @@ class ExecArgs {
char* exec_args_[kMaxArgs + 1];
};
-
// Gets the optional timeouts from the arguments to the system() call.
static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
- int* read_timeout,
- int* total_timeout) {
+ int* read_timeout, int* total_timeout) {
if (args.Length() > 3) {
if (args[3]->IsNumber()) {
*total_timeout = args[3]
@@ -229,7 +222,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(),
"system: Argument 4 must be a number",
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
}
@@ -242,23 +236,21 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(),
"system: Argument 3 must be a number",
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
return false;
}
}
return true;
}
-
static const int kReadFD = 0;
static const int kWriteFD = 1;
-
// This is run in the child process after fork() but before exec(). It normally
// ends with the child process being replaced with the desired child program.
// It only returns if an error occurred.
-static void ExecSubprocess(int* exec_error_fds,
- int* stdout_fds,
+static void ExecSubprocess(int* exec_error_fds, int* stdout_fds,
const ExecArgs& exec_args) {
close(exec_error_fds[kReadFD]); // Don't need this in the child.
close(stdout_fds[kReadFD]); // Don't need this in the child.
@@ -277,7 +269,6 @@ static void ExecSubprocess(int* exec_error_fds,
// Return (and exit child process).
}
-
// Runs in the parent process. Checks that the child was able to exec (closing
// the file desriptor), or reports an error if it failed.
static bool ChildLaunchedOK(Isolate* isolate, int* exec_error_fds) {
@@ -295,7 +286,6 @@ static bool ChildLaunchedOK(Isolate* isolate, int* exec_error_fds) {
return true;
}
-
// Accumulates the output from the child in a string handle. Returns true if it
// succeeded or false if an exception was thrown.
static Local<Value> GetStdout(Isolate* isolate, int child_fd,
@@ -319,14 +309,12 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
read(child_fd, buffer + fullness, kStdoutReadBufferSize - fullness));
if (bytes_read == -1) {
if (errno == EAGAIN) {
- if (!WaitOnFD(child_fd,
- read_timeout,
- total_timeout,
- start_time) ||
+ if (!WaitOnFD(child_fd, read_timeout, total_timeout, start_time) ||
(TimeIsOut(start_time, total_timeout))) {
return isolate->ThrowException(
String::NewFromUtf8(isolate, "Timed out waiting for output",
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
}
continue;
} else if (errno == EINTR) {
@@ -336,9 +324,9 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
}
}
if (bytes_read + fullness > 0) {
- int length = bytes_read == 0 ?
- bytes_read + fullness :
- LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
+ int length = bytes_read == 0 ? bytes_read + fullness
+ : LengthWithoutIncompleteUtf8(
+ buffer, bytes_read + fullness);
Local<String> addition =
String::NewFromUtf8(isolate, buffer, NewStringType::kNormal, length)
.ToLocalChecked();
@@ -350,7 +338,6 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
return accumulator;
}
-
// Modern Linux has the waitid call, which is like waitpid, but more useful
// if you want a timeout. If we don't have waitid we can't limit the time
// waiting for the process to exit without losing the information about
@@ -368,13 +355,10 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
#endif
#endif
-
// Get exit status of child.
-static bool WaitForChild(Isolate* isolate,
- int pid,
+static bool WaitForChild(Isolate* isolate, int pid,
ZombieProtector& child_waiter, // NOLINT
- const struct timeval& start_time,
- int read_timeout,
+ const struct timeval& start_time, int read_timeout,
int total_timeout) {
#ifdef HAS_WAITID
@@ -391,16 +375,15 @@ static bool WaitForChild(Isolate* isolate,
isolate->ThrowException(
String::NewFromUtf8(isolate,
"Timed out waiting for process to terminate",
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
kill(pid, SIGINT);
return false;
}
}
if (child_info.si_code == CLD_KILLED) {
char message[999];
- snprintf(message,
- sizeof(message),
- "Child killed by signal %d",
+ snprintf(message, sizeof(message), "Child killed by signal %d",
child_info.si_status);
isolate->ThrowException(
String::NewFromUtf8(isolate, message, NewStringType::kNormal)
@@ -409,9 +392,7 @@ static bool WaitForChild(Isolate* isolate,
}
if (child_info.si_code == CLD_EXITED && child_info.si_status != 0) {
char message[999];
- snprintf(message,
- sizeof(message),
- "Child exited with status %d",
+ snprintf(message, sizeof(message), "Child exited with status %d",
child_info.si_status);
isolate->ThrowException(
String::NewFromUtf8(isolate, message, NewStringType::kNormal)
@@ -426,9 +407,7 @@ static bool WaitForChild(Isolate* isolate,
child_waiter.ChildIsDeadNow();
if (WIFSIGNALED(child_status)) {
char message[999];
- snprintf(message,
- sizeof(message),
- "Child killed by signal %d",
+ snprintf(message, sizeof(message), "Child killed by signal %d",
WTERMSIG(child_status));
isolate->ThrowException(
String::NewFromUtf8(isolate, message, NewStringType::kNormal)
@@ -438,9 +417,7 @@ static bool WaitForChild(Isolate* isolate,
if (WEXITSTATUS(child_status) != 0) {
char message[999];
int exit_status = WEXITSTATUS(child_status);
- snprintf(message,
- sizeof(message),
- "Child exited with status %d",
+ snprintf(message, sizeof(message), "Child exited with status %d",
exit_status);
isolate->ThrowException(
String::NewFromUtf8(isolate, message, NewStringType::kNormal)
@@ -453,6 +430,7 @@ static bool WaitForChild(Isolate* isolate,
return true;
}
+#undef HAS_WAITID
// Implementation of the system() function (see d8.h for details).
void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -466,7 +444,8 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(),
"system: Argument 2 must be an array",
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
command_args = Local<Array>::Cast(args[1]);
@@ -476,13 +455,15 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (command_args->Length() > ExecArgs::kMaxArgs) {
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(), "Too many arguments to system()",
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
if (args.Length() < 1) {
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(), "Too few arguments to system()",
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
@@ -499,13 +480,15 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (pipe(exec_error_fds) != 0) {
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed.",
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
if (pipe(stdout_fds) != 0) {
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(), "pipe syscall failed.",
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
@@ -541,7 +524,6 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(accumulator);
}
-
void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "chdir() takes one argument";
@@ -561,12 +543,12 @@ void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (chdir(*directory) != 0) {
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(), strerror(errno),
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
}
-
void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "umask() takes one argument";
@@ -589,7 +571,6 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
-
static bool CheckItsADirectory(Isolate* isolate, char* directory) {
struct stat stat_buf;
int stat_result = stat(directory, &stat_buf);
@@ -606,7 +587,6 @@ static bool CheckItsADirectory(Isolate* isolate, char* directory) {
return false;
}
-
// Returns true for success. Creates intermediate directories as needed. No
// error if the directory exists already.
static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
@@ -642,7 +622,6 @@ static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
}
}
-
void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mode_t mask = 0777;
if (args.Length() == 2) {
@@ -654,7 +633,8 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
const char* message = "mkdirp() second argument must be numeric";
args.GetIsolate()->ThrowException(
String::NewFromUtf8(args.GetIsolate(), message,
- NewStringType::kNormal).ToLocalChecked());
+ NewStringType::kNormal)
+ .ToLocalChecked());
return;
}
} else if (args.Length() != 1) {
@@ -675,7 +655,6 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
mkdirp(args.GetIsolate(), *directory, mask);
}
-
void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "rmdir() takes one or two arguments";
@@ -695,7 +674,6 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
rmdir(*directory);
}
-
void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
const char* message = "setenv() takes two arguments";
@@ -725,7 +703,6 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
setenv(*var, *value, 1);
}
-
void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
const char* message = "unsetenv() takes one argument";
diff --git a/deps/v8/src/d8-windows.cc b/deps/v8/src/d8/d8-windows.cc
index 4dee20f440..12e4f8b513 100644
--- a/deps/v8/src/d8-windows.cc
+++ b/deps/v8/src/d8/d8-windows.cc
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/d8.h"
-
+#include "src/d8/d8.h"
namespace v8 {
-
void Shell::AddOSMethods(Isolate* isolate, Local<ObjectTemplate> os_templ) {}
char* Shell::ReadCharsFromTcpPort(const char* name, int* size_out) {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8/d8.cc
index be90dd0859..a29c596909 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -21,37 +21,42 @@
#include "include/libplatform/libplatform.h"
#include "include/libplatform/v8-tracing.h"
#include "include/v8-inspector.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/cpu.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
-#include "src/basic-block-profiler.h"
-#include "src/d8-console.h"
-#include "src/d8-platforms.h"
-#include "src/d8.h"
+#include "src/d8/d8-console.h"
+#include "src/d8/d8-platforms.h"
+#include "src/d8/d8.h"
#include "src/debug/debug-interface.h"
+#include "src/diagnostics/basic-block-profiler.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/init/v8.h"
#include "src/interpreter/interpreter.h"
-#include "src/msan.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/ostreams.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/scanner-character-streams.h"
+#include "src/sanitizer/msan.h"
#include "src/snapshot/natives.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/utils.h"
-#include "src/v8.h"
-#include "src/vm-state-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/utils.h"
#include "src/wasm/wasm-engine.h"
+#ifdef V8_INTL_SUPPORT
+#include "unicode/locid.h"
+#endif // V8_INTL_SUPPORT
+
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#else
#include <windows.h> // NOLINT
-#endif // !defined(_WIN32) && !defined(_WIN64)
+#endif // !defined(_WIN32) && !defined(_WIN64)
#ifndef DCHECK
#define DCHECK(condition) assert(condition)
@@ -342,7 +347,6 @@ static platform::tracing::TraceConfig* CreateTraceConfigFromJSON(
} // namespace tracing
-
class ExternalOwningOneByteStringResource
: public String::ExternalOneByteStringResource {
public:
@@ -680,10 +684,7 @@ class ModuleEmbedderData {
module_to_specifier_map;
};
-enum {
- kModuleEmbedderDataIndex,
- kInspectorClientIndex
-};
+enum { kModuleEmbedderDataIndex, kInspectorClientIndex };
void InitializeModuleEmbedderData(Local<Context> context) {
context->SetAlignedPointerInEmbedderData(
@@ -956,7 +957,6 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->isolate_->GetEnteredOrMicrotaskContext());
}
-
PerIsolateData::RealmScope::~RealmScope() {
// Drop realms to avoid keeping them alive. We don't dispose the
// module embedder data for the first realm here, but instead do
@@ -970,7 +970,6 @@ PerIsolateData::RealmScope::~RealmScope() {
delete[] data_->realms_;
}
-
int PerIsolateData::RealmFind(Local<Context> context) {
for (int i = 0; i < realm_count_; ++i) {
if (realms_[i] == context) return i;
@@ -978,10 +977,8 @@ int PerIsolateData::RealmFind(Local<Context> context) {
return -1;
}
-
int PerIsolateData::RealmIndexOrThrow(
- const v8::FunctionCallbackInfo<v8::Value>& args,
- int arg_offset) {
+ const v8::FunctionCallbackInfo<v8::Value>& args, int arg_offset) {
if (args.Length() < arg_offset || !args[arg_offset]->IsNumber()) {
Throw(args.GetIsolate(), "Invalid argument");
return -1;
@@ -996,7 +993,6 @@ int PerIsolateData::RealmIndexOrThrow(
return index;
}
-
// performance.now() returns a time stamp as double, measured in milliseconds.
// When FLAG_verify_predictable mode is enabled it returns result of
// v8::Platform::MonotonicallyIncreasingTime().
@@ -1010,7 +1006,6 @@ void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
-
// Realm.current() returns the index of the currently active realm.
void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -1020,7 +1015,6 @@ void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(index);
}
-
// Realm.owner(o) returns the index of the realm that created o.
void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -1037,7 +1031,6 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(index);
}
-
// Realm.global(i) returns the global object of realm i.
// (Note that properties of global objects cannot be read/written cross-realm.)
void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -1155,15 +1148,14 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData* data = PerIsolateData::Get(isolate);
int index = data->RealmIndexOrThrow(args, 0);
if (index == -1) return;
- if (index == 0 ||
- index == data->realm_current_ || index == data->realm_switch_) {
+ if (index == 0 || index == data->realm_current_ ||
+ index == data->realm_switch_) {
Throw(args.GetIsolate(), "Invalid realm index");
return;
}
DisposeRealm(args, index);
}
-
// Realm.switch(i) switches to the realm i for consecutive interactive inputs.
void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -1173,7 +1165,6 @@ void Shell::RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args) {
data->realm_switch_ = index;
}
-
// Realm.eval(i, s) evaluates s in realm i and returns the result.
void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -1206,7 +1197,6 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(result);
}
-
// Realm.shared is an accessor for a single shared value across realms.
void Shell::RealmSharedGet(Local<String> property,
const PropertyCallbackInfo<Value>& info) {
@@ -1216,8 +1206,7 @@ void Shell::RealmSharedGet(Local<String> property,
info.GetReturnValue().Set(data->realm_shared_);
}
-void Shell::RealmSharedSet(Local<String> property,
- Local<Value> value,
+void Shell::RealmSharedSet(Local<String> property, Local<Value> value,
const PropertyCallbackInfo<void>& info) {
Isolate* isolate = info.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
@@ -1323,7 +1312,6 @@ void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(source);
}
-
Local<String> Shell::ReadFromStdin(Isolate* isolate) {
static const int kBufferSize = 256;
char buffer[kBufferSize];
@@ -1340,13 +1328,13 @@ Local<String> Shell::ReadFromStdin(Isolate* isolate) {
length = static_cast<int>(strlen(buffer));
if (length == 0) {
return accumulator;
- } else if (buffer[length-1] != '\n') {
+ } else if (buffer[length - 1] != '\n') {
accumulator = String::Concat(
isolate, accumulator,
String::NewFromUtf8(isolate, buffer, NewStringType::kNormal, length)
.ToLocalChecked());
- } else if (length > 1 && buffer[length-2] == '\\') {
- buffer[length-2] = '\n';
+ } else if (length > 1 && buffer[length - 2] == '\\') {
+ buffer[length - 2] = '\n';
accumulator =
String::Concat(isolate, accumulator,
String::NewFromUtf8(isolate, buffer,
@@ -1362,7 +1350,6 @@ Local<String> Shell::ReadFromStdin(Isolate* isolate) {
}
}
-
void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
@@ -1472,7 +1459,6 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
-
void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope handle_scope(isolate);
@@ -1497,7 +1483,6 @@ void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
-
void Shell::WorkerGetMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope handle_scope(isolate);
@@ -1515,7 +1500,6 @@ void Shell::WorkerGetMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
-
void Shell::WorkerTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope handle_scope(isolate);
@@ -1527,7 +1511,6 @@ void Shell::WorkerTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
worker->Terminate();
}
-
void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
int exit_code = (*args)[0]
->Int32Value(args->GetIsolate()->GetCurrentContext())
@@ -1538,7 +1521,6 @@ void Shell::QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args) {
base::OS::ExitProcess(exit_code);
}
-
void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
base::CallOnce(&quit_once_, &QuitOnce,
const_cast<v8::FunctionCallbackInfo<v8::Value>*>(&args));
@@ -1553,12 +1535,12 @@ void Shell::NotifyDone(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(
- String::NewFromUtf8(args.GetIsolate(), V8::GetVersion(),
- NewStringType::kNormal).ToLocalChecked());
+ args.GetReturnValue().Set(String::NewFromUtf8(args.GetIsolate(),
+ V8::GetVersion(),
+ NewStringType::kNormal)
+ .ToLocalChecked());
}
-
void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
Local<Context> context = isolate->GetCurrentContext();
@@ -1621,7 +1603,6 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
if (enter_context) context->Exit();
}
-
int32_t* Counter::Bind(const char* name, bool is_histogram) {
int i;
for (i = 0; i < kMaxNameSize - 1 && name[i]; i++)
@@ -1631,13 +1612,11 @@ int32_t* Counter::Bind(const char* name, bool is_histogram) {
return ptr();
}
-
void Counter::AddSample(int32_t sample) {
count_++;
sample_total_ += sample;
}
-
CounterCollection::CounterCollection() {
magic_number_ = 0xDEADFACE;
max_counters_ = kMaxCounters;
@@ -1645,13 +1624,11 @@ CounterCollection::CounterCollection() {
counters_in_use_ = 0;
}
-
Counter* CounterCollection::GetNextCounter() {
if (counters_in_use_ == kMaxCounters) return nullptr;
return &counters_[counters_in_use_++];
}
-
void Shell::MapCounters(v8::Isolate* isolate, const char* name) {
counters_file_ = base::OS::MemoryMappedFile::create(
name, sizeof(CounterCollection), &local_counters_);
@@ -1684,7 +1661,6 @@ Counter* Shell::GetCounter(const char* name, bool is_histogram) {
return counter;
}
-
int* Shell::LookupCounter(const char* name) {
Counter* counter = GetCounter(name, false);
@@ -1695,15 +1671,11 @@ int* Shell::LookupCounter(const char* name) {
}
}
-
-void* Shell::CreateHistogram(const char* name,
- int min,
- int max,
+void* Shell::CreateHistogram(const char* name, int min, int max,
size_t buckets) {
return GetCounter(name, true);
}
-
void Shell::AddHistogramSample(void* histogram, int sample) {
Counter* counter = reinterpret_cast<Counter*>(histogram);
counter->AddSample(sample);
@@ -1734,7 +1706,6 @@ Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
return result.ToLocalChecked().As<String>();
}
-
Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
global_template->Set(
@@ -1950,7 +1921,6 @@ static void PrintNonErrorsMessageCallback(Local<Message> message,
default: {
UNREACHABLE();
- break;
}
}
// Converts a V8 value to a C string.
@@ -1971,8 +1941,9 @@ static void PrintNonErrorsMessageCallback(Local<Message> message,
void Shell::Initialize(Isolate* isolate) {
// Set up counters
- if (i::StrLength(i::FLAG_map_counters) != 0)
+ if (i::FLAG_map_counters[0] != '\0') {
MapCounters(isolate, i::FLAG_map_counters);
+ }
// Disable default message reporting.
isolate->AddMessageListenerWithErrorLevel(
PrintNonErrorsMessageCallback,
@@ -1981,7 +1952,6 @@ void Shell::Initialize(Isolate* isolate) {
v8::Isolate::kMessageLog);
}
-
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
// This needs to be a critical section since this is not thread-safe
base::MutexGuard lock_guard(context_mutex_.Pointer());
@@ -2176,7 +2146,6 @@ void Shell::OnExit(v8::Isolate* isolate) {
delete counter_map_;
}
-
static FILE* FOpen(const char* path, const char* mode) {
#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
FILE* result;
@@ -2224,14 +2193,12 @@ static char* ReadChars(const char* name, int* size_out) {
return chars;
}
-
struct DataAndPersistent {
uint8_t* data;
int byte_length;
Global<ArrayBuffer> handle;
};
-
static void ReadBufferWeakCallback(
const v8::WeakCallbackInfo<DataAndPersistent>& data) {
int byte_length = data.GetParameter()->byte_length;
@@ -2243,7 +2210,6 @@ static void ReadBufferWeakCallback(
delete data.GetParameter();
}
-
void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
static_assert(sizeof(char) == sizeof(uint8_t),
"char and uint8_t should both have 1 byte");
@@ -2293,7 +2259,6 @@ Local<String> Shell::ReadFile(Isolate* isolate, const char* name) {
return result;
}
-
void Shell::RunShell(Isolate* isolate) {
HandleScope outer_scope(isolate);
v8::Local<v8::Context> context =
@@ -2576,7 +2541,6 @@ void SourceGroup::ExecuteInThread() {
isolate->Dispose();
}
-
void SourceGroup::StartExecuteInThread() {
if (thread_ == nullptr) {
thread_ = new IsolateThread(this);
@@ -2585,13 +2549,11 @@ void SourceGroup::StartExecuteInThread() {
next_semaphore_.Signal();
}
-
void SourceGroup::WaitForThread() {
if (thread_ == nullptr) return;
done_semaphore_.Wait();
}
-
void SourceGroup::JoinThread() {
if (thread_ == nullptr) return;
thread_->Join();
@@ -2618,13 +2580,11 @@ bool SerializationDataQueue::Dequeue(
return true;
}
-
bool SerializationDataQueue::IsEmpty() {
base::MutexGuard lock_guard(&mutex_);
return data_.empty();
}
-
void SerializationDataQueue::Clear() {
base::MutexGuard lock_guard(&mutex_);
data_.clear();
@@ -2646,7 +2606,6 @@ Worker::~Worker() {
out_queue_.Clear();
}
-
void Worker::StartExecuteInThread(const char* script) {
running_ = true;
script_ = i::StrDup(script);
@@ -2670,7 +2629,6 @@ std::unique_ptr<SerializationData> Worker::GetMessage() {
return result;
}
-
void Worker::Terminate() {
base::Relaxed_Store(&running_, false);
// Post nullptr to wake the Worker thread message loop, and tell it to stop
@@ -2678,13 +2636,11 @@ void Worker::Terminate() {
PostMessage(nullptr);
}
-
void Worker::WaitForThread() {
Terminate();
thread_->Join();
}
-
void Worker::ExecuteInThread() {
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
@@ -2711,12 +2667,15 @@ void Worker::ExecuteInThread() {
FunctionTemplate::New(isolate, PostMessageOut, this_value);
Local<Function> postmessage_fun;
- if (postmessage_fun_template->GetFunction(context)
- .ToLocal(&postmessage_fun)) {
- global->Set(context, String::NewFromUtf8(isolate, "postMessage",
- NewStringType::kNormal)
- .ToLocalChecked(),
- postmessage_fun).FromJust();
+ if (postmessage_fun_template->GetFunction(context).ToLocal(
+ &postmessage_fun)) {
+ global
+ ->Set(context,
+ String::NewFromUtf8(isolate, "postMessage",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ postmessage_fun)
+ .FromJust();
}
// First run the script
@@ -2731,9 +2690,11 @@ void Worker::ExecuteInThread() {
Shell::kReportExceptions, Shell::kProcessMessageQueue)) {
// Get the message handler
Local<Value> onmessage =
- global->Get(context, String::NewFromUtf8(isolate, "onmessage",
- NewStringType::kNormal)
- .ToLocalChecked()).ToLocalChecked();
+ global
+ ->Get(context, String::NewFromUtf8(isolate, "onmessage",
+ NewStringType::kNormal)
+ .ToLocalChecked())
+ .ToLocalChecked();
if (onmessage->IsFunction()) {
Local<Function> onmessage_fun = Local<Function>::Cast(onmessage);
// Now wait for messages
@@ -2771,7 +2732,6 @@ void Worker::ExecuteInThread() {
out_semaphore_.Signal();
}
-
void Worker::PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope handle_scope(isolate);
@@ -2794,12 +2754,6 @@ void Worker::PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
-
-void SetFlagsFromString(const char* flags) {
- v8::V8::SetFlagsFromString(flags, static_cast<int>(strlen(flags)));
-}
-
-
bool Shell::SetOptions(int argc, char* argv[]) {
bool logfile_per_isolate = false;
for (int i = 0; i < argc; i++) {
@@ -2876,6 +2830,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
options.icu_data_file = argv[i] + 16;
argv[i] = nullptr;
+ } else if (strncmp(argv[i], "--icu-locale=", 13) == 0) {
+ options.icu_locale = argv[i] + 13;
+ argv[i] = nullptr;
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
} else if (strncmp(argv[i], "--natives_blob=", 15) == 0) {
options.natives_blob = argv[i] + 15;
@@ -2976,7 +2933,7 @@ bool Shell::SetOptions(int argc, char* argv[]) {
current->End(argc);
if (!logfile_per_isolate && options.num_isolates) {
- SetFlagsFromString("--nologfile_per_isolate");
+ V8::SetFlagsFromString("--no-logfile-per-isolate");
}
return true;
@@ -3024,7 +2981,6 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
return success == Shell::options.expected_to_throw ? 1 : 0;
}
-
void Shell::CollectGarbage(Isolate* isolate) {
if (options.send_idle_notification) {
const double kLongIdlePauseInSeconds = 1.0;
@@ -3351,7 +3307,6 @@ MaybeLocal<Value> Shell::DeserializeValue(
return deserializer.ReadValue(context);
}
-
void Shell::CleanupWorkers() {
// Make a copy of workers_, because we don't want to call Worker::Terminate
// while holding the workers_mutex_ lock. Otherwise, if a worker is about to
@@ -3375,26 +3330,45 @@ void Shell::CleanupWorkers() {
}
int Shell::Main(int argc, char* argv[]) {
- std::ofstream trace_file;
v8::base::EnsureConsoleOutput();
if (!SetOptions(argc, argv)) return 1;
+
v8::V8::InitializeICUDefaultLocation(argv[0], options.icu_data_file);
+#ifdef V8_INTL_SUPPORT
+ if (options.icu_locale != nullptr) {
+ icu::Locale locale(options.icu_locale);
+ UErrorCode error_code = U_ZERO_ERROR;
+ icu::Locale::setDefault(locale, error_code);
+ }
+#endif // V8_INTL_SUPPORT
+
v8::platform::InProcessStackDumping in_process_stack_dumping =
options.disable_in_process_stack_traces
? v8::platform::InProcessStackDumping::kDisabled
: v8::platform::InProcessStackDumping::kEnabled;
std::unique_ptr<platform::tracing::TracingController> tracing;
+ std::ofstream trace_file;
+#ifdef V8_USE_PERFETTO
+ std::ofstream perfetto_trace_file;
+#endif // V8_USE_PERFETTO
if (options.trace_enabled && !i::FLAG_verify_predictable) {
tracing = base::make_unique<platform::tracing::TracingController>();
trace_file.open(options.trace_path ? options.trace_path : "v8_trace.json");
+ DCHECK(trace_file.good());
platform::tracing::TraceBuffer* trace_buffer =
platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
platform::tracing::TraceBuffer::kRingBufferChunks,
platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file));
tracing->Initialize(trace_buffer);
+
+#ifdef V8_USE_PERFETTO
+ perfetto_trace_file.open("v8_perfetto_trace.json");
+ DCHECK(trace_file.good());
+ tracing->InitializeForPerfetto(&perfetto_trace_file);
+#endif // V8_USE_PERFETTO
}
platform::tracing::TracingController* tracing_controller = tracing.get();
@@ -3414,10 +3388,10 @@ int Shell::Main(int argc, char* argv[]) {
}
if (i::FLAG_trace_turbo_cfg_file == nullptr) {
- SetFlagsFromString("--trace-turbo-cfg-file=turbo.cfg");
+ V8::SetFlagsFromString("--trace-turbo-cfg-file=turbo.cfg");
}
if (i::FLAG_redirect_code_traces_to == nullptr) {
- SetFlagsFromString("--redirect-code-traces-to=code.asm");
+ V8::SetFlagsFromString("--redirect-code-traces-to=code.asm");
}
v8::V8::InitializePlatform(g_platform.get());
v8::V8::Initialize();
@@ -3498,9 +3472,8 @@ int Shell::Main(int argc, char* argv[]) {
}
if (options.stress_opt || options.stress_deopt) {
- Testing::SetStressRunType(options.stress_opt
- ? Testing::kStressTypeOpt
- : Testing::kStressTypeDeopt);
+ Testing::SetStressRunType(options.stress_opt ? Testing::kStressTypeOpt
+ : Testing::kStressTypeDeopt);
options.stress_runs = Testing::GetStressRuns();
for (int i = 0; i < options.stress_runs && result == 0; i++) {
printf("============ Stress %d/%d ============\n", i + 1,
@@ -3582,17 +3555,17 @@ int Shell::Main(int argc, char* argv[]) {
// Delete the platform explicitly here to write the tracing output to the
// tracing file.
+ if (options.trace_enabled) {
+ tracing_controller->StopTracing();
+ }
g_platform.reset();
return result;
}
} // namespace v8
-
#ifndef GOOGLE3
-int main(int argc, char* argv[]) {
- return v8::Shell::Main(argc, argv);
-}
+int main(int argc, char* argv[]) { return v8::Shell::Main(argc, argv); }
#endif
#undef CHECK
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8/d8.h
index 3223b12fde..1e0dd43c2d 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_D8_H_
-#define V8_D8_H_
+#ifndef V8_D8_D8_H_
+#define V8_D8_D8_H_
#include <iterator>
#include <map>
@@ -13,18 +13,15 @@
#include <unordered_map>
#include <vector>
-#include "src/allocation.h"
-#include "src/async-hooks-wrapper.h"
-#include "src/base/platform/time.h"
-#include "src/string-hasher.h"
-#include "src/utils.h"
-
#include "src/base/once.h"
-
+#include "src/base/platform/time.h"
+#include "src/d8/async-hooks-wrapper.h"
+#include "src/strings/string-hasher.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
namespace v8 {
-
// A single counter in a counter collection.
class Counter {
public:
@@ -35,6 +32,7 @@ class Counter {
int32_t sample_total() { return sample_total_; }
bool is_histogram() { return is_histogram_; }
void AddSample(int32_t sample);
+
private:
int32_t count_;
int32_t sample_total_;
@@ -42,7 +40,6 @@ class Counter {
uint8_t name_[kMaxNameSize];
};
-
// A set of counters and associated information. An instance of this
// class is stored directly in the memory-mapped counters file if
// the --map-counters options is used
@@ -50,6 +47,7 @@ class CounterCollection {
public:
CounterCollection();
Counter* GetNextCounter();
+
private:
static const unsigned kMaxCounters = 512;
uint32_t magic_number_;
@@ -59,7 +57,7 @@ class CounterCollection {
Counter counters_[kMaxCounters];
};
-typedef std::unordered_map<std::string, Counter*> CounterMap;
+using CounterMap = std::unordered_map<std::string, Counter*>;
class SourceGroup {
public:
@@ -195,7 +193,6 @@ class SerializationData {
DISALLOW_COPY_AND_ASSIGN(SerializationData);
};
-
class SerializationDataQueue {
public:
void Enqueue(std::unique_ptr<SerializationData> data);
@@ -208,7 +205,6 @@ class SerializationDataQueue {
std::vector<std::unique_ptr<SerializationData>> data_;
};
-
class Worker {
public:
Worker();
@@ -311,65 +307,37 @@ class ShellOptions {
kProduceCacheAfterExecute
};
- ShellOptions()
- : send_idle_notification(false),
- invoke_weak_callbacks(false),
- omit_quit(false),
- wait_for_wasm(true),
- stress_opt(false),
- stress_deopt(false),
- stress_runs(1),
- interactive_shell(false),
- test_shell(false),
- expected_to_throw(false),
- mock_arraybuffer_allocator(false),
- enable_inspector(false),
- num_isolates(1),
- compile_options(v8::ScriptCompiler::kNoCompileOptions),
- stress_background_compile(false),
- code_cache_options(CodeCacheOptions::kNoProduceCache),
- isolate_sources(nullptr),
- icu_data_file(nullptr),
- natives_blob(nullptr),
- snapshot_blob(nullptr),
- trace_enabled(false),
- trace_path(nullptr),
- trace_config(nullptr),
- lcov_file(nullptr),
- disable_in_process_stack_traces(false),
- read_from_tcp_port(-1) {}
-
- ~ShellOptions() {
- delete[] isolate_sources;
- }
-
- bool send_idle_notification;
- bool invoke_weak_callbacks;
- bool omit_quit;
- bool wait_for_wasm;
- bool stress_opt;
- bool stress_deopt;
- int stress_runs;
- bool interactive_shell;
- bool test_shell;
- bool expected_to_throw;
- bool mock_arraybuffer_allocator;
+ ~ShellOptions() { delete[] isolate_sources; }
+
+ bool send_idle_notification = false;
+ bool invoke_weak_callbacks = false;
+ bool omit_quit = false;
+ bool wait_for_wasm = true;
+ bool stress_opt = false;
+ bool stress_deopt = false;
+ int stress_runs = 1;
+ bool interactive_shell = false;
+ bool test_shell = false;
+ bool expected_to_throw = false;
+ bool mock_arraybuffer_allocator = false;
size_t mock_arraybuffer_allocator_limit = 0;
- bool enable_inspector;
- int num_isolates;
- v8::ScriptCompiler::CompileOptions compile_options;
- bool stress_background_compile;
- CodeCacheOptions code_cache_options;
- SourceGroup* isolate_sources;
- const char* icu_data_file;
- const char* natives_blob;
- const char* snapshot_blob;
- bool trace_enabled;
- const char* trace_path;
- const char* trace_config;
- const char* lcov_file;
- bool disable_in_process_stack_traces;
- int read_from_tcp_port;
+ bool enable_inspector = false;
+ int num_isolates = 1;
+ v8::ScriptCompiler::CompileOptions compile_options =
+ v8::ScriptCompiler::kNoCompileOptions;
+ bool stress_background_compile = false;
+ CodeCacheOptions code_cache_options = CodeCacheOptions::kNoProduceCache;
+ SourceGroup* isolate_sources = nullptr;
+ const char* icu_data_file = nullptr;
+ const char* icu_locale = nullptr;
+ const char* natives_blob = nullptr;
+ const char* snapshot_blob = nullptr;
+ bool trace_enabled = false;
+ const char* trace_path = nullptr;
+ const char* trace_config = nullptr;
+ const char* lcov_file = nullptr;
+ bool disable_in_process_stack_traces = false;
+ int read_from_tcp_port = -1;
bool enable_os_system = false;
bool quiet_load = false;
int thread_pool_size = 0;
@@ -412,9 +380,7 @@ class Shell : public i::AllStatic {
Isolate* isolate, std::unique_ptr<SerializationData> data);
static void CleanupWorkers();
static int* LookupCounter(const char* name);
- static void* CreateHistogram(const char* name,
- int min,
- int max,
+ static void* CreateHistogram(const char* name, int min, int max,
size_t buckets);
static void AddHistogramSample(void* histogram, int sample);
static void MapCounters(v8::Isolate* isolate, const char* name);
@@ -434,10 +400,9 @@ class Shell : public i::AllStatic {
static void RealmSwitch(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmSharedGet(Local<String> property,
- const PropertyCallbackInfo<Value>& info);
- static void RealmSharedSet(Local<String> property,
- Local<Value> value,
- const PropertyCallbackInfo<void>& info);
+ const PropertyCallbackInfo<Value>& info);
+ static void RealmSharedSet(Local<String> property, Local<Value> value,
+ const PropertyCallbackInfo<void>& info);
static void AsyncHooksCreateHook(
const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -580,8 +545,6 @@ class Shell : public i::AllStatic {
cached_code_map_;
};
-
} // namespace v8
-
-#endif // V8_D8_H_
+#endif // V8_D8_D8_H_
diff --git a/deps/v8/src/date/OWNERS b/deps/v8/src/date/OWNERS
new file mode 100644
index 0000000000..fc4aa8d5ac
--- /dev/null
+++ b/deps/v8/src/date/OWNERS
@@ -0,0 +1,3 @@
+ishell@chromium.org
+jshin@chromium.org
+ulan@chromium.org
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date/date.cc
index 7b6c9e3394..928e3279db 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date/date.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/date.h"
+#include "src/date/date.h"
#include "src/base/overflowing-math.h"
-#include "src/conversions.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif
@@ -14,16 +14,15 @@
namespace v8 {
namespace internal {
-
static const int kDaysIn4Years = 4 * 365 + 1;
static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
static const int kDays1970to2000 = 30 * 365 + 7;
-static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
- kDays1970to2000;
+static const int kDaysOffset =
+ 1000 * kDaysIn400Years + 5 * kDaysIn400Years - kDays1970to2000;
static const int kYearsOffset = 400000;
-static const char kDaysInMonths[] =
- {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+static const char kDaysInMonths[] = {31, 28, 31, 30, 31, 30,
+ 31, 31, 30, 31, 30, 31};
DateCache::DateCache()
: stamp_(kNullAddress),
@@ -33,16 +32,16 @@ DateCache::DateCache()
#else
base::OS::CreateTimezoneCache()
#endif
- ) {
+ ) {
ResetDateCache(base::TimezoneCache::TimeZoneDetection::kSkip);
}
void DateCache::ResetDateCache(
base::TimezoneCache::TimeZoneDetection time_zone_detection) {
- if (stamp_->value() >= Smi::kMaxValue) {
+ if (stamp_.value() >= Smi::kMaxValue) {
stamp_ = Smi::zero();
} else {
- stamp_ = Smi::FromInt(stamp_->value() + 1);
+ stamp_ = Smi::FromInt(stamp_.value() + 1);
}
DCHECK(stamp_ != Smi::FromInt(kInvalidStamp));
for (int i = 0; i < kDSTSize; ++i) {
@@ -79,9 +78,8 @@ void DateCache::ClearSegment(DST* segment) {
segment->last_used = 0;
}
-
-void DateCache::YearMonthDayFromDays(
- int days, int* year, int* month, int* day) {
+void DateCache::YearMonthDayFromDays(int days, int* year, int* month,
+ int* day) {
if (ymd_valid_) {
// Check conservatively if the given 'days' has
// the same year and month as the cached 'days'.
@@ -118,7 +116,6 @@ void DateCache::YearMonthDayFromDays(
days %= 365;
*year += yd3;
-
bool is_leap = (!yd1 || yd2) && !yd3;
DCHECK_GE(days, -1);
@@ -160,11 +157,10 @@ void DateCache::YearMonthDayFromDays(
ymd_days_ = save_days;
}
-
int DateCache::DaysFromYearMonth(int year, int month) {
- static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
+ static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
181, 212, 243, 273, 304, 334};
- static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
+ static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
182, 213, 244, 274, 305, 335};
year += month / 12;
@@ -186,17 +182,13 @@ int DateCache::DaysFromYearMonth(int year, int month) {
// c) there shouldn't be an overflow for 32-bit integers in the following
// operations.
static const int year_delta = 399999;
- static const int base_day = 365 * (1970 + year_delta) +
- (1970 + year_delta) / 4 -
- (1970 + year_delta) / 100 +
- (1970 + year_delta) / 400;
+ static const int base_day =
+ 365 * (1970 + year_delta) + (1970 + year_delta) / 4 -
+ (1970 + year_delta) / 100 + (1970 + year_delta) / 400;
int year1 = year + year_delta;
- int day_from_year = 365 * year1 +
- year1 / 4 -
- year1 / 100 +
- year1 / 400 -
- base_day;
+ int day_from_year =
+ 365 * year1 + year1 / 4 - year1 / 100 + year1 / 400 - base_day;
if ((year % 4 != 0) || (year % 100 == 0 && year % 400 != 0)) {
return day_from_year + day_from_month[month];
@@ -204,7 +196,6 @@ int DateCache::DaysFromYearMonth(int year, int month) {
return day_from_year + day_from_month_leap[month];
}
-
void DateCache::BreakDownTime(int64_t time_ms, int* year, int* month, int* day,
int* weekday, int* hour, int* min, int* sec,
int* ms) {
@@ -302,11 +293,10 @@ void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
}
}
-
int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
int time_sec = (time_ms >= 0 && time_ms <= kMaxEpochTimeInMs)
- ? static_cast<int>(time_ms / 1000)
- : static_cast<int>(EquivalentTime(time_ms) / 1000);
+ ? static_cast<int>(time_ms / 1000)
+ : static_cast<int>(EquivalentTime(time_ms) / 1000);
// Invalidate cache if the usage counter is close to overflow.
// Note that dst_usage_counter is incremented less than ten times
@@ -319,8 +309,7 @@ int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
}
// Optimistic fast check.
- if (before_->start_sec <= time_sec &&
- time_sec <= before_->end_sec) {
+ if (before_->start_sec <= time_sec && time_sec <= before_->end_sec) {
// Cache hit.
before_->last_used = ++dst_usage_counter_;
return before_->offset_ms;
@@ -414,7 +403,6 @@ int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
return 0;
}
-
void DateCache::ProbeDST(int time_sec) {
DST* before = nullptr;
DST* after = nullptr;
@@ -439,7 +427,8 @@ void DateCache::ProbeDST(int time_sec) {
}
if (after == nullptr) {
after = InvalidSegment(after_) && before != after_
- ? after_ : LeastRecentlyUsedDST(before);
+ ? after_
+ : LeastRecentlyUsedDST(before);
}
DCHECK_NOT_NULL(before);
@@ -454,7 +443,6 @@ void DateCache::ProbeDST(int time_sec) {
after_ = after;
}
-
DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
DST* result = nullptr;
for (int i = 0; i < kDSTSize; ++i) {
diff --git a/deps/v8/src/date.h b/deps/v8/src/date/date.h
index 58bc3bef47..1f6c79c5d4 100644
--- a/deps/v8/src/date.h
+++ b/deps/v8/src/date/date.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DATE_H_
-#define V8_DATE_H_
+#ifndef V8_DATE_DATE_H_
+#define V8_DATE_DATE_H_
#include "src/base/timezone-cache.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -21,8 +21,7 @@ class V8_EXPORT_PRIVATE DateCache {
// The largest time that can be passed to OS date-time library functions.
static const int kMaxEpochTimeInSec = kMaxInt;
- static const int64_t kMaxEpochTimeInMs =
- static_cast<int64_t>(kMaxInt) * 1000;
+ static const int64_t kMaxEpochTimeInMs = static_cast<int64_t>(kMaxInt) * 1000;
// The largest time that can be stored in JSDate.
static const int64_t kMaxTimeInMs =
@@ -55,7 +54,6 @@ class V8_EXPORT_PRIVATE DateCache {
return static_cast<int>(time_ms / kMsPerDay);
}
-
// Computes modulo(time_ms, kMsPerDay) given that
// days = floor(time_ms / kMsPerDay).
static int TimeInDay(int64_t time_ms, int days) {
@@ -72,7 +70,6 @@ class V8_EXPORT_PRIVATE DateCache {
return result >= 0 ? result : result + 7;
}
-
bool IsLeap(int year) {
return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
}
@@ -82,7 +79,6 @@ class V8_EXPORT_PRIVATE DateCache {
return GetLocalOffsetFromOS(time, is_utc);
}
-
const char* LocalTimezone(int64_t time_ms) {
if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
time_ms = EquivalentTime(time_ms);
@@ -113,7 +109,6 @@ class V8_EXPORT_PRIVATE DateCache {
return time_ms - LocalOffsetInMs(time_ms, false);
}
-
// Computes a time equivalent to the given time according
// to ECMA 262 - 15.9.1.9.
// The issue here is that some library calls don't work right for dates
@@ -244,4 +239,4 @@ class V8_EXPORT_PRIVATE DateCache {
} // namespace internal
} // namespace v8
-#endif // V8_DATE_H_
+#endif // V8_DATE_DATE_H_
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/date/dateparser-inl.h
index b4376b4789..b2099ca88d 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/date/dateparser-inl.h
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DATEPARSER_INL_H_
-#define V8_DATEPARSER_INL_H_
+#ifndef V8_DATE_DATEPARSER_INL_H_
+#define V8_DATE_DATEPARSER_INL_H_
-#include "src/char-predicates-inl.h"
-#include "src/dateparser.h"
-#include "src/isolate.h"
+#include "src/date/dateparser.h"
+#include "src/execution/isolate.h"
+#include "src/strings/char-predicates-inl.h"
namespace v8 {
namespace internal {
template <typename Char>
bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray out) {
- DCHECK(out->length() >= OUTPUT_SIZE);
+ DCHECK(out.length() >= OUTPUT_SIZE);
InputReader<Char> in(str);
DateStringTokenizer<Char> scanner(&in);
TimeZoneComposer tz;
@@ -75,8 +75,7 @@ bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray out) {
bool has_read_number = !day.IsEmpty();
// If there's anything left, continue with the legacy parser.
bool legacy_parser = false;
- for (DateToken token = next_unhandled_token;
- !token.IsEndOfInput();
+ for (DateToken token = next_unhandled_token; !token.IsEndOfInput();
token = scanner.Next()) {
if (token.IsNumber()) {
legacy_parser = true;
@@ -106,10 +105,9 @@ bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray out) {
// Require end, white space, "Z", "+" or "-" immediately after
// finalizing time.
DateToken peek = scanner.Peek();
- if (!peek.IsEndOfInput() &&
- !peek.IsWhiteSpace() &&
- !peek.IsKeywordZ() &&
- !peek.IsAsciiSign()) return false;
+ if (!peek.IsEndOfInput() && !peek.IsWhiteSpace() &&
+ !peek.IsKeywordZ() && !peek.IsAsciiSign())
+ return false;
} else {
if (!day.Add(n)) return false;
scanner.SkipSymbol('-');
@@ -181,7 +179,7 @@ bool DateParser::Parse(Isolate* isolate, Vector<Char> str, FixedArray out) {
return success;
}
-template<typename CharType>
+template <typename CharType>
DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
int pre_pos = in_->position();
if (in_->IsEnd()) return DateToken::EndOfInput();
@@ -201,8 +199,7 @@ DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
int length = in_->ReadWord(buffer, 3);
int index = KeywordTable::Lookup(buffer, length);
return DateToken::Keyword(KeywordTable::GetType(index),
- KeywordTable::GetValue(index),
- length);
+ KeywordTable::GetValue(index), length);
}
if (in_->SkipWhiteSpace()) {
return DateToken::WhiteSpace(in_->position() - pre_pos);
@@ -214,7 +211,6 @@ DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
return DateToken::Unknown();
}
-
template <typename Char>
bool DateParser::InputReader<Char>::SkipWhiteSpace() {
if (IsWhiteSpaceOrLineTerminator(ch_)) {
@@ -224,20 +220,20 @@ bool DateParser::InputReader<Char>::SkipWhiteSpace() {
return false;
}
-
template <typename Char>
bool DateParser::InputReader<Char>::SkipParentheses() {
if (ch_ != '(') return false;
int balance = 0;
do {
- if (ch_ == ')') --balance;
- else if (ch_ == '(') ++balance;
+ if (ch_ == ')')
+ --balance;
+ else if (ch_ == '(')
+ ++balance;
Next();
} while (balance > 0 && ch_);
return true;
}
-
template <typename Char>
DateParser::DateToken DateParser::ParseES5DateTime(
DateStringTokenizer<Char>* scanner, DayComposer* day, TimeComposer* time,
@@ -263,11 +259,13 @@ DateParser::DateToken DateParser::ParseES5DateTime(
}
if (scanner->SkipSymbol('-')) {
if (!scanner->Peek().IsFixedLengthNumber(2) ||
- !DayComposer::IsMonth(scanner->Peek().number())) return scanner->Next();
+ !DayComposer::IsMonth(scanner->Peek().number()))
+ return scanner->Next();
day->Add(scanner->Next().number());
if (scanner->SkipSymbol('-')) {
if (!scanner->Peek().IsFixedLengthNumber(2) ||
- !DayComposer::IsDay(scanner->Peek().number())) return scanner->Next();
+ !DayComposer::IsDay(scanner->Peek().number()))
+ return scanner->Next();
day->Add(scanner->Next().number());
}
}
@@ -311,8 +309,7 @@ DateParser::DateToken DateParser::ParseES5DateTime(
if (scanner->Peek().IsKeywordZ()) {
scanner->Next();
tz->Set(0);
- } else if (scanner->Peek().IsSymbol('+') ||
- scanner->Peek().IsSymbol('-')) {
+ } else if (scanner->Peek().IsSymbol('+') || scanner->Peek().IsSymbol('-')) {
tz->SetSign(scanner->Next().symbol() == '+' ? 1 : -1);
if (scanner->Peek().IsFixedLengthNumber(4)) {
// hhmm extension syntax.
@@ -352,8 +349,7 @@ DateParser::DateToken DateParser::ParseES5DateTime(
return DateToken::EndOfInput();
}
-
} // namespace internal
} // namespace v8
-#endif // V8_DATEPARSER_INL_H_
+#endif // V8_DATE_DATEPARSER_INL_H_
diff --git a/deps/v8/src/dateparser.cc b/deps/v8/src/date/dateparser.cc
index cf99a8c0c1..252fe54e5b 100644
--- a/deps/v8/src/dateparser.cc
+++ b/deps/v8/src/date/dateparser.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/dateparser.h"
+#include "src/date/dateparser.h"
-#include "src/char-predicates-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/char-predicates-inl.h"
namespace v8 {
namespace internal {
@@ -50,15 +50,17 @@ bool DateParser::DayComposer::Write(FixedArray output) {
}
if (!is_iso_date_) {
- if (Between(year, 0, 49)) year += 2000;
- else if (Between(year, 50, 99)) year += 1900;
+ if (Between(year, 0, 49))
+ year += 2000;
+ else if (Between(year, 50, 99))
+ year += 1900;
}
if (!Smi::IsValid(year) || !IsMonth(month) || !IsDay(day)) return false;
- output->set(YEAR, Smi::FromInt(year));
- output->set(MONTH, Smi::FromInt(month - 1)); // 0-based
- output->set(DAY, Smi::FromInt(day));
+ output.set(YEAR, Smi::FromInt(year));
+ output.set(MONTH, Smi::FromInt(month - 1)); // 0-based
+ output.set(DAY, Smi::FromInt(day));
return true;
}
@@ -79,18 +81,18 @@ bool DateParser::TimeComposer::Write(FixedArray output) {
hour += hour_offset_;
}
- if (!IsHour(hour) || !IsMinute(minute) ||
- !IsSecond(second) || !IsMillisecond(millisecond)) {
+ if (!IsHour(hour) || !IsMinute(minute) || !IsSecond(second) ||
+ !IsMillisecond(millisecond)) {
// A 24th hour is allowed if minutes, seconds, and milliseconds are 0
if (hour != 24 || minute != 0 || second != 0 || millisecond != 0) {
return false;
}
}
- output->set(HOUR, Smi::FromInt(hour));
- output->set(MINUTE, Smi::FromInt(minute));
- output->set(SECOND, Smi::FromInt(second));
- output->set(MILLISECOND, Smi::FromInt(millisecond));
+ output.set(HOUR, Smi::FromInt(hour));
+ output.set(MINUTE, Smi::FromInt(minute));
+ output.set(SECOND, Smi::FromInt(second));
+ output.set(MILLISECOND, Smi::FromInt(millisecond));
return true;
}
@@ -107,53 +109,51 @@ bool DateParser::TimeZoneComposer::Write(FixedArray output) {
total_seconds = -total_seconds;
}
DCHECK(Smi::IsValid(total_seconds));
- output->set(UTC_OFFSET, Smi::FromInt(total_seconds));
+ output.set(UTC_OFFSET, Smi::FromInt(total_seconds));
} else {
- output->set_null(UTC_OFFSET);
+ output.set_null(UTC_OFFSET);
}
return true;
}
-const int8_t DateParser::KeywordTable::
- array[][DateParser::KeywordTable::kEntrySize] = {
- {'j', 'a', 'n', DateParser::MONTH_NAME, 1},
- {'f', 'e', 'b', DateParser::MONTH_NAME, 2},
- {'m', 'a', 'r', DateParser::MONTH_NAME, 3},
- {'a', 'p', 'r', DateParser::MONTH_NAME, 4},
- {'m', 'a', 'y', DateParser::MONTH_NAME, 5},
- {'j', 'u', 'n', DateParser::MONTH_NAME, 6},
- {'j', 'u', 'l', DateParser::MONTH_NAME, 7},
- {'a', 'u', 'g', DateParser::MONTH_NAME, 8},
- {'s', 'e', 'p', DateParser::MONTH_NAME, 9},
- {'o', 'c', 't', DateParser::MONTH_NAME, 10},
- {'n', 'o', 'v', DateParser::MONTH_NAME, 11},
- {'d', 'e', 'c', DateParser::MONTH_NAME, 12},
- {'a', 'm', '\0', DateParser::AM_PM, 0},
- {'p', 'm', '\0', DateParser::AM_PM, 12},
- {'u', 't', '\0', DateParser::TIME_ZONE_NAME, 0},
- {'u', 't', 'c', DateParser::TIME_ZONE_NAME, 0},
- {'z', '\0', '\0', DateParser::TIME_ZONE_NAME, 0},
- {'g', 'm', 't', DateParser::TIME_ZONE_NAME, 0},
- {'c', 'd', 't', DateParser::TIME_ZONE_NAME, -5},
- {'c', 's', 't', DateParser::TIME_ZONE_NAME, -6},
- {'e', 'd', 't', DateParser::TIME_ZONE_NAME, -4},
- {'e', 's', 't', DateParser::TIME_ZONE_NAME, -5},
- {'m', 'd', 't', DateParser::TIME_ZONE_NAME, -6},
- {'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
- {'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
- {'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
- {'t', '\0', '\0', DateParser::TIME_SEPARATOR, 0},
- {'\0', '\0', '\0', DateParser::INVALID, 0},
+const int8_t
+ DateParser::KeywordTable::array[][DateParser::KeywordTable::kEntrySize] = {
+ {'j', 'a', 'n', DateParser::MONTH_NAME, 1},
+ {'f', 'e', 'b', DateParser::MONTH_NAME, 2},
+ {'m', 'a', 'r', DateParser::MONTH_NAME, 3},
+ {'a', 'p', 'r', DateParser::MONTH_NAME, 4},
+ {'m', 'a', 'y', DateParser::MONTH_NAME, 5},
+ {'j', 'u', 'n', DateParser::MONTH_NAME, 6},
+ {'j', 'u', 'l', DateParser::MONTH_NAME, 7},
+ {'a', 'u', 'g', DateParser::MONTH_NAME, 8},
+ {'s', 'e', 'p', DateParser::MONTH_NAME, 9},
+ {'o', 'c', 't', DateParser::MONTH_NAME, 10},
+ {'n', 'o', 'v', DateParser::MONTH_NAME, 11},
+ {'d', 'e', 'c', DateParser::MONTH_NAME, 12},
+ {'a', 'm', '\0', DateParser::AM_PM, 0},
+ {'p', 'm', '\0', DateParser::AM_PM, 12},
+ {'u', 't', '\0', DateParser::TIME_ZONE_NAME, 0},
+ {'u', 't', 'c', DateParser::TIME_ZONE_NAME, 0},
+ {'z', '\0', '\0', DateParser::TIME_ZONE_NAME, 0},
+ {'g', 'm', 't', DateParser::TIME_ZONE_NAME, 0},
+ {'c', 'd', 't', DateParser::TIME_ZONE_NAME, -5},
+ {'c', 's', 't', DateParser::TIME_ZONE_NAME, -6},
+ {'e', 'd', 't', DateParser::TIME_ZONE_NAME, -4},
+ {'e', 's', 't', DateParser::TIME_ZONE_NAME, -5},
+ {'m', 'd', 't', DateParser::TIME_ZONE_NAME, -6},
+ {'m', 's', 't', DateParser::TIME_ZONE_NAME, -7},
+ {'p', 'd', 't', DateParser::TIME_ZONE_NAME, -7},
+ {'p', 's', 't', DateParser::TIME_ZONE_NAME, -8},
+ {'t', '\0', '\0', DateParser::TIME_SEPARATOR, 0},
+ {'\0', '\0', '\0', DateParser::INVALID, 0},
};
-
// We could use perfect hashing here, but this is not a bottleneck.
int DateParser::KeywordTable::Lookup(const uint32_t* pre, int len) {
int i;
for (i = 0; array[i][kTypeOffset] != INVALID; i++) {
int j = 0;
- while (j < kPrefixLength &&
- pre[j] == static_cast<uint32_t>(array[i][j])) {
+ while (j < kPrefixLength && pre[j] == static_cast<uint32_t>(array[i][j])) {
j++;
}
// Check if we have a match and the length is legal.
@@ -166,7 +166,6 @@ int DateParser::KeywordTable::Lookup(const uint32_t* pre, int len) {
return i;
}
-
int DateParser::ReadMilliseconds(DateToken token) {
// Read first three significant digits of the original numeral,
// as inferred from the value and the number of digits.
@@ -197,6 +196,5 @@ int DateParser::ReadMilliseconds(DateToken token) {
return number;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/date/dateparser.h
index 557750283d..ac6be47692 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/date/dateparser.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DATEPARSER_H_
-#define V8_DATEPARSER_H_
+#ifndef V8_DATE_DATEPARSER_H_
+#define V8_DATE_DATEPARSER_H_
-#include "src/allocation.h"
-#include "src/char-predicates.h"
+#include "src/strings/char-predicates.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -28,7 +28,15 @@ class DateParser : public AllStatic {
static bool Parse(Isolate* isolate, Vector<Char> str, FixedArray output);
enum {
- YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, MILLISECOND, UTC_OFFSET, OUTPUT_SIZE
+ YEAR,
+ MONTH,
+ DAY,
+ HOUR,
+ MINUTE,
+ SECOND,
+ MILLISECOND,
+ UTC_OFFSET,
+ OUTPUT_SIZE
};
private:
@@ -113,7 +121,11 @@ class DateParser : public AllStatic {
};
enum KeywordType {
- INVALID, MONTH_NAME, TIME_ZONE_NAME, TIME_SEPARATOR, AM_PM
+ INVALID,
+ MONTH_NAME,
+ TIME_ZONE_NAME,
+ TIME_SEPARATOR,
+ AM_PM
};
struct DateToken {
@@ -147,9 +159,7 @@ class DateParser : public AllStatic {
bool IsSymbol(char symbol) {
return IsSymbol() && this->symbol() == symbol;
}
- bool IsKeywordType(KeywordType tag) {
- return tag_ == tag;
- }
+ bool IsKeywordType(KeywordType tag) { return tag_ == tag; }
bool IsFixedLengthNumber(int length) {
return IsNumber() && length_ == length;
}
@@ -163,9 +173,7 @@ class DateParser : public AllStatic {
bool IsKeywordZ() {
return IsKeywordType(TIME_ZONE_NAME) && length_ == 1 && value_ == 0;
}
- bool IsUnknown(int character) {
- return IsUnknown() && value_ == character;
- }
+ bool IsUnknown(int character) { return IsUnknown() && value_ == character; }
// Factory functions.
static DateToken Keyword(KeywordType tag, int value, int length) {
return DateToken(tag, length, value);
@@ -176,18 +184,12 @@ class DateParser : public AllStatic {
static DateToken Symbol(char symbol) {
return DateToken(kSymbolTag, 1, symbol);
}
- static DateToken EndOfInput() {
- return DateToken(kEndOfInputTag, 0, -1);
- }
+ static DateToken EndOfInput() { return DateToken(kEndOfInputTag, 0, -1); }
static DateToken WhiteSpace(int length) {
return DateToken(kWhiteSpaceTag, length, -1);
}
- static DateToken Unknown() {
- return DateToken(kUnknownTokenTag, 1, -1);
- }
- static DateToken Invalid() {
- return DateToken(kInvalidTokenTag, 0, -1);
- }
+ static DateToken Unknown() { return DateToken(kUnknownTokenTag, 1, -1); }
+ static DateToken Invalid() { return DateToken(kInvalidTokenTag, 0, -1); }
private:
enum TagType {
@@ -200,9 +202,7 @@ class DateParser : public AllStatic {
kKeywordTagStart = 0
};
DateToken(int tag, int length, int value)
- : tag_(tag),
- length_(length),
- value_(value) { }
+ : tag_(tag), length_(length), value_(value) {}
int tag_;
int length_; // Number of characters.
@@ -213,16 +213,14 @@ class DateParser : public AllStatic {
class DateStringTokenizer {
public:
explicit DateStringTokenizer(InputReader<Char>* in)
- : in_(in), next_(Scan()) { }
+ : in_(in), next_(Scan()) {}
DateToken Next() {
DateToken result = next_;
next_ = Scan();
return result;
}
- DateToken Peek() {
- return next_;
- }
+ DateToken Peek() { return next_; }
bool SkipSymbol(char symbol) {
if (next_.IsSymbol(symbol)) {
next_ = Scan();
@@ -278,6 +276,7 @@ class DateParser : public AllStatic {
bool IsUTC() const { return hour_ == 0 && minute_ == 0; }
bool Write(FixedArray output);
bool IsEmpty() { return hour_ == kNone; }
+
private:
int sign_;
int hour_;
@@ -289,8 +288,7 @@ class DateParser : public AllStatic {
TimeComposer() : index_(0), hour_offset_(kNone) {}
bool IsEmpty() const { return index_ == 0; }
bool IsExpecting(int n) const {
- return (index_ == 1 && IsMinute(n)) ||
- (index_ == 2 && IsSecond(n)) ||
+ return (index_ == 1 && IsMinute(n)) || (index_ == 2 && IsSecond(n)) ||
(index_ == 3 && IsMillisecond(n));
}
bool Add(int n) {
@@ -356,8 +354,7 @@ class DateParser : public AllStatic {
TimeZoneComposer* tz);
};
-
} // namespace internal
} // namespace v8
-#endif // V8_DATEPARSER_H_
+#endif // V8_DATE_DATEPARSER_H_
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index 6844fe28a9..542ff5c4df 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -6,11 +6,11 @@
#include "src/debug/debug.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index e617964e51..de72ce68e8 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -6,11 +6,11 @@
#include "src/debug/debug.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
#include "src/debug/liveedit.h"
-#include "src/frame-constants.h"
-#include "src/frames-inl.h"
-#include "src/objects-inl.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/debug-coverage.cc b/deps/v8/src/debug/debug-coverage.cc
index f4a7e5ac77..4021cd5038 100644
--- a/deps/v8/src/debug/debug-coverage.cc
+++ b/deps/v8/src/debug/debug-coverage.cc
@@ -4,14 +4,15 @@
#include "src/debug/debug-coverage.h"
+#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/base/hashmap.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/frames-inl.h"
-#include "src/isolate.h"
-#include "src/objects.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -48,15 +49,15 @@ class SharedToCounterMap
namespace {
int StartPosition(SharedFunctionInfo info) {
- int start = info->function_token_position();
- if (start == kNoSourcePosition) start = info->StartPosition();
+ int start = info.function_token_position();
+ if (start == kNoSourcePosition) start = info.StartPosition();
return start;
}
bool CompareSharedFunctionInfo(SharedFunctionInfo a, SharedFunctionInfo b) {
int a_start = StartPosition(a);
int b_start = StartPosition(b);
- if (a_start == b_start) return a->EndPosition() > b->EndPosition();
+ if (a_start == b_start) return a.EndPosition() > b.EndPosition();
return a_start < b_start;
}
@@ -73,18 +74,18 @@ void SortBlockData(std::vector<CoverageBlock>& v) {
}
std::vector<CoverageBlock> GetSortedBlockData(SharedFunctionInfo shared) {
- DCHECK(shared->HasCoverageInfo());
+ DCHECK(shared.HasCoverageInfo());
CoverageInfo coverage_info =
- CoverageInfo::cast(shared->GetDebugInfo()->coverage_info());
+ CoverageInfo::cast(shared.GetDebugInfo().coverage_info());
std::vector<CoverageBlock> result;
- if (coverage_info->SlotCount() == 0) return result;
+ if (coverage_info.SlotCount() == 0) return result;
- for (int i = 0; i < coverage_info->SlotCount(); i++) {
- const int start_pos = coverage_info->StartSourcePosition(i);
- const int until_pos = coverage_info->EndSourcePosition(i);
- const int count = coverage_info->BlockCount(i);
+ for (int i = 0; i < coverage_info.SlotCount(); i++) {
+ const int start_pos = coverage_info.StartSourcePosition(i);
+ const int until_pos = coverage_info.EndSourcePosition(i);
+ const int count = coverage_info.BlockCount(i);
DCHECK_NE(kNoSourcePosition, start_pos);
result.emplace_back(start_pos, until_pos, count);
@@ -103,11 +104,7 @@ std::vector<CoverageBlock> GetSortedBlockData(SharedFunctionInfo shared) {
class CoverageBlockIterator final {
public:
explicit CoverageBlockIterator(CoverageFunction* function)
- : function_(function),
- ended_(false),
- delete_current_(false),
- read_index_(-1),
- write_index_(-1) {
+ : function_(function) {
DCHECK(std::is_sorted(function_->blocks.begin(), function_->blocks.end(),
CompareCoverageBlock));
}
@@ -223,10 +220,10 @@ class CoverageBlockIterator final {
CoverageFunction* function_;
std::vector<CoverageBlock> nesting_stack_;
- bool ended_;
- bool delete_current_;
- int read_index_;
- int write_index_;
+ bool ended_ = false;
+ bool delete_current_ = false;
+ int read_index_ = -1;
+ int write_index_ = -1;
};
bool HaveSameSourceRange(const CoverageBlock& lhs, const CoverageBlock& rhs) {
@@ -312,6 +309,30 @@ void MergeNestedRanges(CoverageFunction* function) {
}
}
+void RewriteFunctionScopeCounter(CoverageFunction* function) {
+ // Every function must have at least the top-level function counter.
+ DCHECK(!function->blocks.empty());
+
+ CoverageBlockIterator iter(function);
+ if (iter.Next()) {
+ DCHECK(iter.IsTopLevel());
+
+ CoverageBlock& block = iter.GetBlock();
+ if (block.start == SourceRange::kFunctionLiteralSourcePosition &&
+ block.end == SourceRange::kFunctionLiteralSourcePosition) {
+ // If a function-scope block exists, overwrite the function count. It has
+ // a more reliable count than what we get from the FeedbackVector (which
+ // is imprecise e.g. for generator functions and optimized code).
+ function->count = block.count;
+
+ // Then delete it; for compatibility with non-block coverage modes, the
+ // function-scope block is expected in CoverageFunction, not as a
+ // CoverageBlock.
+ iter.DeleteBlock();
+ }
+ }
+}
+
void FilterAliasedSingletons(CoverageFunction* function) {
CoverageBlockIterator iter(function);
@@ -365,13 +386,13 @@ void ClampToBinary(CoverageFunction* function) {
}
void ResetAllBlockCounts(SharedFunctionInfo shared) {
- DCHECK(shared->HasCoverageInfo());
+ DCHECK(shared.HasCoverageInfo());
CoverageInfo coverage_info =
- CoverageInfo::cast(shared->GetDebugInfo()->coverage_info());
+ CoverageInfo::cast(shared.GetDebugInfo().coverage_info());
- for (int i = 0; i < coverage_info->SlotCount(); i++) {
- coverage_info->ResetBlockCount(i);
+ for (int i = 0; i < coverage_info.SlotCount(); i++) {
+ coverage_info.ResetBlockCount(i);
}
}
@@ -395,16 +416,32 @@ bool IsBinaryMode(debug::CoverageMode mode) {
}
}
-void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo info,
- debug::CoverageMode mode) {
+void CollectBlockCoverageInternal(CoverageFunction* function,
+ SharedFunctionInfo info,
+ debug::CoverageMode mode) {
DCHECK(IsBlockMode(mode));
+ // Functions with empty source ranges are not interesting to report. This can
+ // happen e.g. for internally-generated functions like class constructors.
+ if (!function->HasNonEmptySourceRange()) return;
+
function->has_block_coverage = true;
function->blocks = GetSortedBlockData(info);
// If in binary mode, only report counts of 0/1.
if (mode == debug::CoverageMode::kBlockBinary) ClampToBinary(function);
+ // To stay compatible with non-block coverage modes, the function-scope count
+ // is expected to be in the CoverageFunction, not as part of its blocks.
+ // This finds the function-scope counter, overwrites CoverageFunction::count,
+ // and removes it from the block list.
+ //
+ // Important: Must be called before other transformation passes.
+ RewriteFunctionScopeCounter(function);
+
+ // Functions without blocks don't need to be processed further.
+ if (!function->HasBlocks()) return;
+
// Remove singleton ranges with the same start position as a full range and
// throw away their counts.
// Singleton ranges are only intended to split existing full ranges and should
@@ -435,6 +472,11 @@ void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo info,
// Filter out ranges of zero length.
FilterEmptyRanges(function);
+}
+
+void CollectBlockCoverage(CoverageFunction* function, SharedFunctionInfo info,
+ debug::CoverageMode mode) {
+ CollectBlockCoverageInternal(function, info, mode);
// Reset all counters on the DebugInfo to zero.
ResetAllBlockCounts(info);
@@ -479,10 +521,10 @@ std::unique_ptr<Coverage> Coverage::Collect(
isolate->factory()->feedback_vectors_for_profiling_tools());
for (int i = 0; i < list->Length(); i++) {
FeedbackVector vector = FeedbackVector::cast(list->Get(i));
- SharedFunctionInfo shared = vector->shared_function_info();
- DCHECK(shared->IsSubjectToDebugging());
- uint32_t count = static_cast<uint32_t>(vector->invocation_count());
- if (reset_count) vector->clear_invocation_count();
+ SharedFunctionInfo shared = vector.shared_function_info();
+ DCHECK(shared.IsSubjectToDebugging());
+ uint32_t count = static_cast<uint32_t>(vector.invocation_count());
+ if (reset_count) vector.clear_invocation_count();
counter_map.Add(shared, count);
}
break;
@@ -495,13 +537,35 @@ std::unique_ptr<Coverage> Coverage::Collect(
HeapIterator heap_iterator(isolate->heap());
for (HeapObject current_obj = heap_iterator.next();
!current_obj.is_null(); current_obj = heap_iterator.next()) {
- if (!current_obj->IsFeedbackVector()) continue;
- FeedbackVector vector = FeedbackVector::cast(current_obj);
- SharedFunctionInfo shared = vector->shared_function_info();
- if (!shared->IsSubjectToDebugging()) continue;
- uint32_t count = static_cast<uint32_t>(vector->invocation_count());
+ if (!current_obj.IsJSFunction()) continue;
+ JSFunction func = JSFunction::cast(current_obj);
+ SharedFunctionInfo shared = func.shared();
+ if (!shared.IsSubjectToDebugging()) continue;
+ if (!(func.has_feedback_vector() ||
+ func.has_closure_feedback_cell_array()))
+ continue;
+ uint32_t count = 0;
+ if (func.has_feedback_vector()) {
+ count =
+ static_cast<uint32_t>(func.feedback_vector().invocation_count());
+ } else if (func.raw_feedback_cell().interrupt_budget() <
+ FLAG_budget_for_feedback_vector_allocation) {
+ // We haven't allocated feedback vector, but executed the function
+ // atleast once. We don't have precise invocation count here.
+ count = 1;
+ }
counter_map.Add(shared, count);
}
+
+ // Also check functions on the stack to collect the count map. With lazy
+ // feedback allocation we may miss counting functions if the feedback
+ // vector wasn't allocated yet and the function's interrupt budget wasn't
+ // updated (i.e. it didn't execute return / jump).
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
+ SharedFunctionInfo shared = it.frame()->function().shared();
+ if (counter_map.Get(shared) != 0) continue;
+ counter_map.Add(shared, 1);
+ }
break;
}
}
@@ -512,7 +576,7 @@ std::unique_ptr<Coverage> Coverage::Collect(
Script::Iterator scripts(isolate);
for (Script script = scripts.Next(); !script.is_null();
script = scripts.Next()) {
- if (!script->IsUserJavaScript()) continue;
+ if (!script.IsUserJavaScript()) continue;
// Create and add new script data.
Handle<Script> script_handle(script, isolate);
@@ -537,7 +601,7 @@ std::unique_ptr<Coverage> Coverage::Collect(
// Use sorted list to reconstruct function nesting.
for (SharedFunctionInfo info : sorted) {
int start = StartPosition(info);
- int end = info->EndPosition();
+ int end = info.EndPosition();
uint32_t count = counter_map.Get(info);
// Find the correct outer function based on start position.
while (!nesting.empty() && functions->at(nesting.back()).end <= start) {
@@ -550,8 +614,8 @@ std::unique_ptr<Coverage> Coverage::Collect(
break;
case v8::debug::CoverageMode::kBlockBinary:
case v8::debug::CoverageMode::kPreciseBinary:
- count = info->has_reported_binary_coverage() ? 0 : 1;
- info->set_has_reported_binary_coverage(true);
+ count = info.has_reported_binary_coverage() ? 0 : 1;
+ info.set_has_reported_binary_coverage(true);
break;
case v8::debug::CoverageMode::kBestEffort:
count = 1;
@@ -559,20 +623,25 @@ std::unique_ptr<Coverage> Coverage::Collect(
}
}
- Handle<String> name(info->DebugName(), isolate);
+ Handle<String> name(info.DebugName(), isolate);
CoverageFunction function(start, end, count, name);
- if (IsBlockMode(collectionMode) && info->HasCoverageInfo()) {
+ if (IsBlockMode(collectionMode) && info.HasCoverageInfo()) {
CollectBlockCoverage(&function, info, collectionMode);
}
// Only include a function range if itself or its parent function is
- // covered, or if it contains non-trivial block coverage.
+ // covered, or if it contains non-trivial block coverage. It must also
+ // have a non-empty source range (otherwise it is not interesting to
+ // report).
bool is_covered = (count != 0);
bool parent_is_covered =
(!nesting.empty() && functions->at(nesting.back()).count != 0);
bool has_block_coverage = !function.blocks.empty();
- if (is_covered || parent_is_covered || has_block_coverage) {
+ bool function_is_relevant =
+ (is_covered || parent_is_covered || has_block_coverage);
+
+ if (function.HasNonEmptySourceRange() && function_is_relevant) {
nesting.push_back(functions->size());
functions->emplace_back(function);
}
@@ -607,24 +676,37 @@ void Coverage::SelectMode(Isolate* isolate, debug::CoverageMode mode) {
// increment invocation count.
Deoptimizer::DeoptimizeAll(isolate);
- // Root all feedback vectors to avoid early collection.
- isolate->MaybeInitializeVectorListFromHeap();
-
- HeapIterator heap_iterator(isolate->heap());
- for (HeapObject o = heap_iterator.next(); !o.is_null();
- o = heap_iterator.next()) {
- if (IsBinaryMode(mode) && o->IsSharedFunctionInfo()) {
- // If collecting binary coverage, reset
- // SFI::has_reported_binary_coverage to avoid optimizing / inlining
- // functions before they have reported coverage.
- SharedFunctionInfo shared = SharedFunctionInfo::cast(o);
- shared->set_has_reported_binary_coverage(false);
- } else if (o->IsFeedbackVector()) {
- // In any case, clear any collected invocation counts.
- FeedbackVector::cast(o)->clear_invocation_count();
+ std::vector<Handle<JSFunction>> funcs_needing_feedback_vector;
+ {
+ HeapIterator heap_iterator(isolate->heap());
+ for (HeapObject o = heap_iterator.next(); !o.is_null();
+ o = heap_iterator.next()) {
+ if (o.IsJSFunction()) {
+ JSFunction func = JSFunction::cast(o);
+ if (func.has_closure_feedback_cell_array()) {
+ funcs_needing_feedback_vector.push_back(
+ Handle<JSFunction>(func, isolate));
+ }
+ } else if (IsBinaryMode(mode) && o.IsSharedFunctionInfo()) {
+ // If collecting binary coverage, reset
+ // SFI::has_reported_binary_coverage to avoid optimizing / inlining
+ // functions before they have reported coverage.
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(o);
+ shared.set_has_reported_binary_coverage(false);
+ } else if (o.IsFeedbackVector()) {
+ // In any case, clear any collected invocation counts.
+ FeedbackVector::cast(o).clear_invocation_count();
+ }
}
}
+ for (Handle<JSFunction> func : funcs_needing_feedback_vector) {
+ JSFunction::EnsureFeedbackVector(func);
+ }
+
+ // Root all feedback vectors to avoid early collection.
+ isolate->MaybeInitializeVectorListFromHeap();
+
break;
}
}
diff --git a/deps/v8/src/debug/debug-coverage.h b/deps/v8/src/debug/debug-coverage.h
index e319f01a32..9c1f0bcc2c 100644
--- a/deps/v8/src/debug/debug-coverage.h
+++ b/deps/v8/src/debug/debug-coverage.h
@@ -8,8 +8,8 @@
#include <vector>
#include "src/debug/debug-interface.h"
-#include "src/handles.h"
-#include "src/objects.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -20,6 +20,7 @@ class Isolate;
struct CoverageBlock {
CoverageBlock(int s, int e, uint32_t c) : start(s), end(e), count(c) {}
CoverageBlock() : CoverageBlock(kNoSourcePosition, kNoSourcePosition, 0) {}
+
int start;
int end;
uint32_t count;
@@ -28,6 +29,10 @@ struct CoverageBlock {
struct CoverageFunction {
CoverageFunction(int s, int e, uint32_t c, Handle<String> n)
: start(s), end(e), count(c), name(n), has_block_coverage(false) {}
+
+ bool HasNonEmptySourceRange() const { return start < end && start >= 0; }
+ bool HasBlocks() const { return !blocks.empty(); }
+
int start;
int end;
uint32_t count;
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 7bf444231e..65e62f2aac 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -4,18 +4,18 @@
#include "src/debug/debug-evaluate.h"
-#include "src/accessors.h"
-#include "src/assembler-inl.h"
-#include "src/compiler.h"
-#include "src/contexts.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
-#include "src/frames-inl.h"
-#include "src/globals.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecodes.h"
-#include "src/isolate-inl.h"
+#include "src/objects/contexts.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -90,7 +90,7 @@ MaybeHandle<Object> DebugEvaluate::WithTopmostArguments(Isolate* isolate,
// Get context and receiver.
Handle<Context> native_context(
- Context::cast(it.frame()->context())->native_context(), isolate);
+ Context::cast(it.frame()->context()).native_context(), isolate);
// Materialize arguments as property on an extension object.
Handle<JSObject> materialized = factory->NewJSObjectWithNullProto();
@@ -115,7 +115,7 @@ MaybeHandle<Object> DebugEvaluate::WithTopmostArguments(Isolate* isolate,
factory->NewDebugEvaluateContext(native_context, scope_info, materialized,
Handle<Context>(), Handle<StringSet>());
Handle<SharedFunctionInfo> outer_info(
- native_context->empty_function()->shared(), isolate);
+ native_context->empty_function().shared(), isolate);
Handle<JSObject> receiver(native_context->global_proxy(), isolate);
const bool throw_on_side_effect = false;
MaybeHandle<Object> maybe_result =
@@ -226,7 +226,7 @@ void DebugEvaluate::ContextBuilder::UpdateValues() {
.ToHandleChecked();
for (int i = 0; i < keys->length(); i++) {
- DCHECK(keys->get(i)->IsString());
+ DCHECK(keys->get(i).IsString());
Handle<String> key(String::cast(keys->get(i)), isolate_);
Handle<Object> value =
JSReceiver::GetDataProperty(element.materialized_object, key);
@@ -248,28 +248,21 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(ToLength) \
V(ToNumber) \
V(ToObject) \
- V(ToString) \
+ V(ToStringRT) \
/* Type checks */ \
V(IsArray) \
V(IsFunction) \
- V(IsJSProxy) \
V(IsJSReceiver) \
V(IsRegExp) \
V(IsSmi) \
- V(IsTypedArray) \
/* Loads */ \
V(LoadLookupSlotForCall) \
V(GetProperty) \
/* Arrays */ \
V(ArraySpeciesConstructor) \
- V(EstimateNumberOfElements) \
- V(GetArrayKeys) \
- V(HasComplexElements) \
V(HasFastPackedElements) \
V(NewArray) \
V(NormalizeElements) \
- V(PrepareElementsForSort) \
- V(TrySliceSimpleNonFastElements) \
V(TypedArrayGetBuffer) \
/* Errors */ \
V(NewTypeError) \
@@ -339,7 +332,6 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
V(IncrementUseCounter) \
V(MaxSmi) \
V(NewObject) \
- V(SmiLexicographicCompare) \
V(StringMaxLength) \
V(StringToArray) \
/* Test */ \
@@ -520,7 +512,6 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kArrayPrototypeKeys:
case Builtins::kArrayPrototypeLastIndexOf:
case Builtins::kArrayPrototypeSlice:
- case Builtins::kArrayPrototypeSort:
case Builtins::kArrayPrototypeToLocaleString:
case Builtins::kArrayPrototypeToString:
case Builtins::kArrayForEach:
@@ -794,6 +785,7 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kArrayPrototypeReverse:
case Builtins::kArrayPrototypeShift:
case Builtins::kArrayPrototypeUnshift:
+ case Builtins::kArrayPrototypeSort:
case Builtins::kArrayPrototypeSplice:
case Builtins::kArrayUnshift:
// Map builtins.
@@ -846,7 +838,7 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
Isolate* isolate, Handle<SharedFunctionInfo> info) {
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] Checking function %s for side effect.\n",
- info->DebugName()->ToCString().get());
+ info->DebugName().ToCString().get());
}
DCHECK(info->is_compiled());
@@ -887,8 +879,8 @@ DebugInfo::SideEffectState DebugEvaluate::FunctionGetSideEffectState(
return requires_runtime_checks ? DebugInfo::kRequiresRuntimeChecks
: DebugInfo::kHasNoSideEffect;
} else if (info->IsApiFunction()) {
- if (info->GetCode()->is_builtin()) {
- return info->GetCode()->builtin_index() == Builtins::kHandleApiCall
+ if (info->GetCode().is_builtin()) {
+ return info->GetCode().builtin_index() == Builtins::kHandleApiCall
? DebugInfo::kHasNoSideEffect
: DebugInfo::kHasSideEffects;
}
@@ -1031,9 +1023,9 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
Code callee_code = isolate->heap()->GcSafeFindCodeForInnerPointer(
rinfo->target_address());
- if (!callee_code->is_builtin()) continue;
+ if (!callee_code.is_builtin()) continue;
Builtins::Name callee =
- static_cast<Builtins::Name>(callee_code->builtin_index());
+ static_cast<Builtins::Name>(callee_code.builtin_index());
if (BuiltinGetSideEffectState(callee) == DebugInfo::kHasNoSideEffect) {
continue;
}
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 9aaa959bc2..50817691d7 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -9,7 +9,7 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string-table.h"
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 7454623227..a6ee31738d 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -4,8 +4,8 @@
#include "src/debug/debug-frames.h"
-#include "src/accessors.h"
-#include "src/frames-inl.h"
+#include "src/builtins/accessors.h"
+#include "src/execution/frames-inl.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -47,7 +47,7 @@ FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
wasm_interpreted_frame_ =
WasmInterpreterEntryFrame::cast(frame_)
->debug_info()
- ->GetInterpretedFrame(frame_->fp(), inlined_frame_index);
+ .GetInterpretedFrame(frame_->fp(), inlined_frame_index);
DCHECK(wasm_interpreted_frame_);
}
}
@@ -97,9 +97,9 @@ bool FrameInspector::ParameterIsShadowedByContextLocal(
RedirectActiveFunctions::RedirectActiveFunctions(SharedFunctionInfo shared,
Mode mode)
: shared_(shared), mode_(mode) {
- DCHECK(shared->HasBytecodeArray());
+ DCHECK(shared.HasBytecodeArray());
if (mode == Mode::kUseDebugBytecode) {
- DCHECK(shared->HasDebugInfo());
+ DCHECK(shared.HasDebugInfo());
}
}
@@ -109,12 +109,12 @@ void RedirectActiveFunctions::VisitThread(Isolate* isolate,
JavaScriptFrame* frame = it.frame();
JSFunction function = frame->function();
if (!frame->is_interpreted()) continue;
- if (function->shared() != shared_) continue;
+ if (function.shared() != shared_) continue;
InterpretedFrame* interpreted_frame =
reinterpret_cast<InterpretedFrame*>(frame);
BytecodeArray bytecode = mode_ == Mode::kUseDebugBytecode
- ? shared_->GetDebugInfo()->DebugBytecodeArray()
- : shared_->GetBytecodeArray();
+ ? shared_.GetDebugInfo().DebugBytecodeArray()
+ : shared_.GetBytecodeArray();
interpreted_frame->PatchBytecodeArray(bytecode);
}
}
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index 9c4fafd404..5ee4f8b61f 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -5,11 +5,11 @@
#ifndef V8_DEBUG_DEBUG_FRAMES_H_
#define V8_DEBUG_DEBUG_FRAMES_H_
-#include "src/deoptimizer.h"
-#include "src/frames.h"
-#include "src/isolate.h"
-#include "src/objects.h"
-#include "src/v8threads.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
+#include "src/execution/v8threads.h"
+#include "src/objects/objects.h"
#include "src/wasm/wasm-interpreter.h"
namespace v8 {
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index bdefe78225..79222371f9 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -9,8 +9,8 @@
#include "include/v8-util.h"
#include "include/v8.h"
+#include "src/common/globals.h"
#include "src/debug/interface-types.h"
-#include "src/globals.h"
namespace v8 {
@@ -57,6 +57,12 @@ MaybeLocal<Array> GetInternalProperties(Isolate* isolate, Local<Value> value);
V8_EXPORT_PRIVATE MaybeLocal<Array> GetPrivateFields(Local<Context> context,
Local<Object> value);
+/**
+ * Forwards to v8::Object::CreationContext, but with special handling for
+ * JSGlobalProxy objects.
+ */
+Local<Context> GetCreationContext(Local<Object> value);
+
enum ExceptionBreakState {
NoBreakOnException = 0,
BreakOnUncaughtException = 1,
@@ -141,6 +147,7 @@ class V8_EXPORT_PRIVATE Script {
LiveEditResult* result) const;
bool SetBreakpoint(v8::Local<v8::String> condition, debug::Location* location,
BreakpointId* id) const;
+ bool SetBreakpointOnScriptEntry(BreakpointId* id) const;
};
// Specialization for wasm Scripts.
@@ -482,13 +489,13 @@ class PostponeInterruptsScope {
class WeakMap : public v8::Object {
public:
- V8_WARN_UNUSED_RESULT v8::MaybeLocal<v8::Value> Get(
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT v8::MaybeLocal<v8::Value> Get(
v8::Local<v8::Context> context, v8::Local<v8::Value> key);
- V8_WARN_UNUSED_RESULT v8::MaybeLocal<WeakMap> Set(
+ V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT v8::MaybeLocal<WeakMap> Set(
v8::Local<v8::Context> context, v8::Local<v8::Value> key,
v8::Local<v8::Value> value);
- static Local<WeakMap> New(v8::Isolate* isolate);
+ V8_EXPORT_PRIVATE static Local<WeakMap> New(v8::Isolate* isolate);
V8_INLINE static WeakMap* Cast(Value* obj);
private:
diff --git a/deps/v8/src/debug/debug-property-iterator.cc b/deps/v8/src/debug/debug-property-iterator.cc
index 1bef58192c..a445f55f6d 100644
--- a/deps/v8/src/debug/debug-property-iterator.cc
+++ b/deps/v8/src/debug/debug-property-iterator.cc
@@ -4,12 +4,12 @@
#include "src/debug/debug-property-iterator.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/flags.h"
-#include "src/keys.h"
#include "src/objects/js-array-buffer-inl.h"
-#include "src/property-descriptor.h"
-#include "src/property-details.h"
+#include "src/objects/keys.h"
+#include "src/objects/property-descriptor.h"
+#include "src/objects/property-details.h"
namespace v8 {
@@ -148,8 +148,13 @@ void DebugPropertyIterator::FillKeysForCurrentPrototypeAndStage() {
bool has_exotic_indices = receiver->IsJSTypedArray();
if (stage_ == kExoticIndices) {
if (!has_exotic_indices) return;
- exotic_length_ = static_cast<uint32_t>(
- Handle<JSTypedArray>::cast(receiver)->length_value());
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
+ if (typed_array->WasDetached()) {
+ exotic_length_ = 0;
+ } else {
+ // TODO(bmeurer, v8:4153): Change this to size_t later.
+ exotic_length_ = static_cast<uint32_t>(typed_array->length());
+ }
return;
}
bool skip_indices = has_exotic_indices;
diff --git a/deps/v8/src/debug/debug-property-iterator.h b/deps/v8/src/debug/debug-property-iterator.h
index 6a527f5dc7..822260afb6 100644
--- a/deps/v8/src/debug/debug-property-iterator.h
+++ b/deps/v8/src/debug/debug-property-iterator.h
@@ -6,9 +6,9 @@
#define V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
#include "src/debug/debug-interface.h"
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/prototype.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
+#include "src/objects/prototype.h"
#include "include/v8.h"
diff --git a/deps/v8/src/debug/debug-scope-iterator.cc b/deps/v8/src/debug/debug-scope-iterator.cc
index e71c1c07b3..72e7dc2e45 100644
--- a/deps/v8/src/debug/debug-scope-iterator.cc
+++ b/deps/v8/src/debug/debug-scope-iterator.cc
@@ -4,11 +4,11 @@
#include "src/debug/debug-scope-iterator.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/isolate.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate.h"
#include "src/objects/js-generator-inl.h"
#include "src/wasm/wasm-objects-inl.h"
diff --git a/deps/v8/src/debug/debug-scope-iterator.h b/deps/v8/src/debug/debug-scope-iterator.h
index 912d6858fd..3859e8cb41 100644
--- a/deps/v8/src/debug/debug-scope-iterator.h
+++ b/deps/v8/src/debug/debug-scope-iterator.h
@@ -8,7 +8,7 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug-interface.h"
#include "src/debug/debug-scopes.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 464e0aae33..3a58f0b458 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -8,16 +8,16 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/common/globals.h"
#include "src/debug/debug.h"
-#include "src/frames-inl.h"
-#include "src/globals.h"
-#include "src/isolate-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/module.h"
-#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -47,8 +47,8 @@ Handle<Object> ScopeIterator::GetFunctionDebugName() const {
if (!context_->IsNativeContext()) {
DisallowHeapAllocation no_gc;
- ScopeInfo closure_info = context_->closure_context()->scope_info();
- Handle<String> debug_name(closure_info->FunctionDebugName(), isolate_);
+ ScopeInfo closure_info = context_->closure_context().scope_info();
+ Handle<String> debug_name(closure_info.FunctionDebugName(), isolate_);
if (debug_name->length() > 0) return debug_name;
}
return isolate_->factory()->undefined_value();
@@ -56,11 +56,11 @@ Handle<Object> ScopeIterator::GetFunctionDebugName() const {
ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate), context_(function->context(), isolate) {
- if (!function->shared()->IsSubjectToDebugging()) {
+ if (!function->shared().IsSubjectToDebugging()) {
context_ = Handle<Context>();
return;
}
- script_ = handle(Script::cast(function->shared()->script()), isolate);
+ script_ = handle(Script::cast(function->shared().script()), isolate);
UnwrapEvaluationContext();
}
@@ -70,8 +70,8 @@ ScopeIterator::ScopeIterator(Isolate* isolate,
generator_(generator),
function_(generator->function(), isolate),
context_(generator->context(), isolate),
- script_(Script::cast(function_->shared()->script()), isolate) {
- CHECK(function_->shared()->IsSubjectToDebugging());
+ script_(Script::cast(function_->shared().script()), isolate) {
+ CHECK(function_->shared().IsSubjectToDebugging());
TryParseAndRetrieveScopes(DEFAULT);
}
@@ -88,7 +88,7 @@ void ScopeIterator::TryParseAndRetrieveScopes(ScopeIterator::Option option) {
// Catch the case when the debugger stops in an internal function.
Handle<SharedFunctionInfo> shared_info(function_->shared(), isolate_);
Handle<ScopeInfo> scope_info(shared_info->scope_info(), isolate_);
- if (shared_info->script()->IsUndefined(isolate_)) {
+ if (shared_info->script().IsUndefined(isolate_)) {
current_scope_ = closure_scope_ = nullptr;
context_ = handle(function_->context(), isolate_);
function_ = Handle<JSFunction>();
@@ -191,14 +191,14 @@ void ScopeIterator::UnwrapEvaluationContext() {
if (!context_->IsDebugEvaluateContext()) return;
Context current = *context_;
do {
- Object wrapped = current->get(Context::WRAPPED_CONTEXT_INDEX);
- if (wrapped->IsContext()) {
+ Object wrapped = current.get(Context::WRAPPED_CONTEXT_INDEX);
+ if (wrapped.IsContext()) {
current = Context::cast(wrapped);
} else {
- DCHECK(!current->previous().is_null());
- current = current->previous();
+ DCHECK(!current.previous().is_null());
+ current = current.previous();
}
- } while (current->IsDebugEvaluateContext());
+ } while (current.IsDebugEvaluateContext());
context_ = handle(current, isolate_);
}
@@ -232,13 +232,13 @@ bool ScopeIterator::HasPositionInfo() {
int ScopeIterator::start_position() {
if (InInnerScope()) return current_scope_->start_position();
if (context_->IsNativeContext()) return 0;
- return context_->closure_context()->scope_info()->StartPosition();
+ return context_->closure_context().scope_info().StartPosition();
}
int ScopeIterator::end_position() {
if (InInnerScope()) return current_scope_->end_position();
if (context_->IsNativeContext()) return 0;
- return context_->closure_context()->scope_info()->EndPosition();
+ return context_->closure_context().scope_info().EndPosition();
}
bool ScopeIterator::DeclaresLocals(Mode mode) const {
@@ -341,7 +341,7 @@ ScopeIterator::ScopeType ScopeIterator::Type() const {
UNREACHABLE();
}
if (context_->IsNativeContext()) {
- DCHECK(context_->global_object()->IsJSGlobalObject());
+ DCHECK(context_->global_object().IsJSGlobalObject());
// If we are at the native context and have not yet seen script scope,
// fake it.
return seen_script_scope_ ? ScopeTypeGlobal : ScopeTypeScript;
@@ -481,13 +481,13 @@ void ScopeIterator::DebugPrint() {
case ScopeIterator::ScopeTypeWith:
os << "With:\n";
- context_->extension()->Print(os);
+ context_->extension().Print(os);
break;
case ScopeIterator::ScopeTypeCatch:
os << "Catch:\n";
- context_->extension()->Print(os);
- context_->get(Context::THROWN_OBJECT_INDEX)->Print(os);
+ context_->extension().Print(os);
+ context_->get(Context::THROWN_OBJECT_INDEX).Print(os);
break;
case ScopeIterator::ScopeTypeClosure:
@@ -502,10 +502,8 @@ void ScopeIterator::DebugPrint() {
case ScopeIterator::ScopeTypeScript:
os << "Script:\n";
- context_->global_object()
- ->native_context()
- ->script_context_table()
- ->Print(os);
+ context_->global_object().native_context().script_context_table().Print(
+ os);
break;
default:
@@ -521,7 +519,7 @@ int ScopeIterator::GetSourcePosition() {
} else {
DCHECK(!generator_.is_null());
SharedFunctionInfo::EnsureSourcePositionsAvailable(
- isolate_, handle(generator_->function()->shared(), isolate_));
+ isolate_, handle(generator_->function().shared(), isolate_));
return generator_->source_position();
}
}
@@ -557,7 +555,7 @@ void ScopeIterator::RetrieveScopeChain(DeclarationScope* scope) {
void ScopeIterator::VisitScriptScope(const Visitor& visitor) const {
Handle<JSGlobalObject> global(context_->global_object(), isolate_);
Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table(), isolate_);
+ global->native_context().script_context_table(), isolate_);
// Skip the first script since that just declares 'this'.
for (int context_index = 1; context_index < script_contexts->used();
@@ -576,7 +574,7 @@ void ScopeIterator::VisitModuleScope(const Visitor& visitor) const {
if (VisitContextLocals(visitor, scope_info, context_)) return;
int count_index = scope_info->ModuleVariableCountIndex();
- int module_variable_count = Smi::cast(scope_info->get(count_index))->value();
+ int module_variable_count = Smi::cast(scope_info->get(count_index)).value();
Handle<Module> module(context_->module(), isolate_);
@@ -645,8 +643,8 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
DCHECK(!generator_.is_null());
FixedArray parameters_and_registers =
generator_->parameters_and_registers();
- DCHECK_LT(index, parameters_and_registers->length());
- value = handle(parameters_and_registers->get(index), isolate_);
+ DCHECK_LT(index, parameters_and_registers.length());
+ value = handle(parameters_and_registers.get(index), isolate_);
} else {
value = frame_inspector_->GetParameter(index);
@@ -664,10 +662,10 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
FixedArray parameters_and_registers =
generator_->parameters_and_registers();
int parameter_count =
- function_->shared()->scope_info()->ParameterCount();
+ function_->shared().scope_info().ParameterCount();
index += parameter_count;
- DCHECK_LT(index, parameters_and_registers->length());
- value = handle(parameters_and_registers->get(index), isolate_);
+ DCHECK_LT(index, parameters_and_registers.length());
+ value = handle(parameters_and_registers.get(index), isolate_);
if (value->IsTheHole(isolate_)) {
value = isolate_->factory()->undefined_value();
}
@@ -715,7 +713,7 @@ bool ScopeIterator::VisitLocals(const Visitor& visitor, Mode mode) const {
// a proxy, return an empty object.
Handle<JSObject> ScopeIterator::WithContextExtension() {
DCHECK(context_->IsWithContext());
- if (context_->extension_receiver()->IsJSProxy()) {
+ if (context_->extension_receiver().IsJSProxy()) {
return isolate_->factory()->NewJSObjectWithNullProto();
}
return handle(JSObject::cast(context_->extension_receiver()), isolate_);
@@ -761,7 +759,7 @@ void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
DCHECK(!context_->IsScriptContext());
DCHECK(!context_->IsNativeContext());
DCHECK(!context_->IsWithContext());
- if (!context_->scope_info()->CallsSloppyEval()) return;
+ if (!context_->scope_info().CallsSloppyEval()) return;
if (context_->extension_object().is_null()) return;
Handle<JSObject> extension(context_->extension_object(), isolate_);
Handle<FixedArray> keys =
@@ -771,7 +769,7 @@ void ScopeIterator::VisitLocalScope(const Visitor& visitor, Mode mode) const {
for (int i = 0; i < keys->length(); i++) {
// Names of variables introduced by eval are strings.
- DCHECK(keys->get(i)->IsString());
+ DCHECK(keys->get(i).IsString());
Handle<String> key(String::cast(keys->get(i)), isolate_);
Handle<Object> value = JSReceiver::GetDataProperty(extension, key);
if (visitor(key, value)) return;
@@ -817,7 +815,7 @@ bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
// Set the variable in the suspended generator.
DCHECK(!generator_.is_null());
int parameter_count =
- function_->shared()->scope_info()->ParameterCount();
+ function_->shared().scope_info().ParameterCount();
index += parameter_count;
Handle<FixedArray> parameters_and_registers(
generator_->parameters_and_registers(), isolate_);
@@ -854,7 +852,7 @@ bool ScopeIterator::SetContextExtensionValue(Handle<String> variable_name,
Handle<Object> new_value) {
if (!context_->has_extension()) return false;
- DCHECK(context_->extension_object()->IsJSContextExtensionObject());
+ DCHECK(context_->extension_object().IsJSContextExtensionObject());
Handle<JSObject> ext(context_->extension_object(), isolate_);
LookupIterator it(isolate_, ext, variable_name, LookupIterator::OWN);
Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
@@ -887,7 +885,7 @@ bool ScopeIterator::SetModuleVariableValue(Handle<String> variable_name,
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- cell_index = context_->scope_info()->ModuleIndex(
+ cell_index = context_->scope_info().ModuleIndex(
*variable_name, &mode, &init_flag, &maybe_assigned_flag);
// Setting imports is currently not supported.
@@ -904,7 +902,7 @@ bool ScopeIterator::SetModuleVariableValue(Handle<String> variable_name,
bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
Handle<ScriptContextTable> script_contexts(
- context_->global_object()->native_context()->script_context_table(),
+ context_->global_object().native_context().script_context_table(),
isolate_);
ScriptContextTable::LookupResult lookup_result;
if (ScriptContextTable::Lookup(isolate_, *script_contexts, *variable_name,
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 63a03753f6..6e1c8b27bc 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -8,7 +8,7 @@
#include <vector>
#include "src/debug/debug-frames.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.cc b/deps/v8/src/debug/debug-stack-trace-iterator.cc
index 214ef0d48b..2c2c438727 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.cc
@@ -4,13 +4,13 @@
#include "src/debug/debug-stack-trace-iterator.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-scope-iterator.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/isolate.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate.h"
namespace v8 {
@@ -69,9 +69,8 @@ int DebugStackTraceIterator::GetContextId() const {
DCHECK(!Done());
Handle<Object> context = frame_inspector_->GetContext();
if (context->IsContext()) {
- Object value =
- Context::cast(*context)->native_context()->debug_context_id();
- if (value->IsSmi()) return Smi::ToInt(value);
+ Object value = Context::cast(*context).native_context().debug_context_id();
+ if (value.IsSmi()) return Smi::ToInt(value);
}
return 0;
}
@@ -79,7 +78,7 @@ int DebugStackTraceIterator::GetContextId() const {
v8::MaybeLocal<v8::Value> DebugStackTraceIterator::GetReceiver() const {
DCHECK(!Done());
if (frame_inspector_->IsJavaScript() &&
- frame_inspector_->GetFunction()->shared()->kind() == kArrowFunction) {
+ frame_inspector_->GetFunction()->shared().kind() == kArrowFunction) {
// FrameInspector is not able to get receiver for arrow function.
// So let's try to fetch it using same logic as is used to retrieve 'this'
// during DebugEvaluate::Local.
diff --git a/deps/v8/src/debug/debug-stack-trace-iterator.h b/deps/v8/src/debug/debug-stack-trace-iterator.h
index 0c09afa87c..15b8a85c5e 100644
--- a/deps/v8/src/debug/debug-stack-trace-iterator.h
+++ b/deps/v8/src/debug/debug-stack-trace-iterator.h
@@ -7,7 +7,7 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug-interface.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/debug-type-profile.cc b/deps/v8/src/debug/debug-type-profile.cc
index c1fe308508..5ed2dfb116 100644
--- a/deps/v8/src/debug/debug-type-profile.cc
+++ b/deps/v8/src/debug/debug-type-profile.cc
@@ -4,10 +4,10 @@
#include "src/debug/debug-type-profile.h"
-#include "src/feedback-vector.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/execution/isolate.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -26,7 +26,7 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
for (Script script = scripts.Next(); !script.is_null();
script = scripts.Next()) {
- if (!script->IsUserJavaScript()) {
+ if (!script.IsUserJavaScript()) {
continue;
}
@@ -39,21 +39,20 @@ std::unique_ptr<TypeProfile> TypeProfile::Collect(Isolate* isolate) {
// the list multiple times.
for (int i = 0; i < list->Length(); i++) {
FeedbackVector vector = FeedbackVector::cast(list->Get(i));
- SharedFunctionInfo info = vector->shared_function_info();
- DCHECK(info->IsSubjectToDebugging());
+ SharedFunctionInfo info = vector.shared_function_info();
+ DCHECK(info.IsSubjectToDebugging());
// Match vectors with script.
- if (script != info->script()) {
+ if (script != info.script()) {
continue;
}
- if (!info->HasFeedbackMetadata() ||
- info->feedback_metadata()->is_empty() ||
- !info->feedback_metadata()->HasTypeProfileSlot()) {
+ if (!info.HasFeedbackMetadata() || info.feedback_metadata().is_empty() ||
+ !info.feedback_metadata().HasTypeProfileSlot()) {
continue;
}
- FeedbackSlot slot = vector->GetTypeProfileSlot();
+ FeedbackSlot slot = vector.GetTypeProfileSlot();
FeedbackNexus nexus(vector, slot);
- Handle<String> name(info->DebugName(), isolate);
+ Handle<String> name(info.DebugName(), isolate);
std::vector<int> source_positions = nexus.GetSourcePositions();
for (int position : source_positions) {
DCHECK_GE(position, 0);
@@ -89,10 +88,10 @@ void TypeProfile::SelectMode(Isolate* isolate, debug::TypeProfileMode mode) {
for (int i = 0; i < list->Length(); i++) {
FeedbackVector vector = FeedbackVector::cast(list->Get(i));
- SharedFunctionInfo info = vector->shared_function_info();
- DCHECK(info->IsSubjectToDebugging());
- if (info->feedback_metadata()->HasTypeProfileSlot()) {
- FeedbackSlot slot = vector->GetTypeProfileSlot();
+ SharedFunctionInfo info = vector.shared_function_info();
+ DCHECK(info.IsSubjectToDebugging());
+ if (info.feedback_metadata().HasTypeProfileSlot()) {
+ FeedbackSlot slot = vector.GetTypeProfileSlot();
FeedbackNexus nexus(vector, slot);
nexus.ResetTypeProfile();
}
diff --git a/deps/v8/src/debug/debug-type-profile.h b/deps/v8/src/debug/debug-type-profile.h
index 37f2b659d8..16f739e453 100644
--- a/deps/v8/src/debug/debug-type-profile.h
+++ b/deps/v8/src/debug/debug-type-profile.h
@@ -8,8 +8,8 @@
#include <vector>
#include "src/debug/debug-interface.h"
-#include "src/handles.h"
-#include "src/objects.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index d33fc31669..5cc200d552 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -7,29 +7,30 @@
#include <memory>
#include <unordered_set>
-#include "src/api-inl.h"
-#include "src/api-natives.h"
-#include "src/arguments.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
+#include "src/api/api-natives.h"
#include "src/base/platform/mutex.h"
-#include "src/bootstrapper.h"
#include "src/builtins/builtins.h"
-#include "src/compilation-cache.h"
-#include "src/compiler.h"
-#include "src/counters.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/liveedit.h"
-#include "src/deoptimizer.h"
-#include "src/execution.h"
-#include "src/frames-inl.h"
-#include "src/global-handles.h"
-#include "src/globals.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/arguments.h"
+#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
+#include "src/execution/v8threads.h"
+#include "src/handles/global-handles.h"
#include "src/heap/heap-inl.h" // For NextDebuggingId.
+#include "src/init/bootstrapper.h"
#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate-inl.h"
-#include "src/message-template.h"
+#include "src/logging/counters.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/js-generator-inl.h"
@@ -37,7 +38,6 @@
#include "src/objects/slots.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
-#include "src/v8threads.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
@@ -202,8 +202,8 @@ BreakIterator::BreakIterator(Handle<DebugInfo> debug_info)
: debug_info_(debug_info),
break_index_(-1),
source_position_iterator_(
- debug_info->DebugBytecodeArray()->SourcePositionTable()) {
- position_ = debug_info->shared()->StartPosition();
+ debug_info->DebugBytecodeArray().SourcePositionTable()) {
+ position_ = debug_info->shared().StartPosition();
statement_position_ = position_;
// There is at least one break location.
DCHECK(!Done());
@@ -251,12 +251,12 @@ void BreakIterator::Next() {
DebugBreakType BreakIterator::GetDebugBreakType() {
BytecodeArray bytecode_array = debug_info_->OriginalBytecodeArray();
interpreter::Bytecode bytecode =
- interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+ interpreter::Bytecodes::FromByte(bytecode_array.get(code_offset()));
// Make sure we read the actual bytecode, not a prefix scaling bytecode.
if (interpreter::Bytecodes::IsPrefixScalingBytecode(bytecode)) {
- bytecode = interpreter::Bytecodes::FromByte(
- bytecode_array->get(code_offset() + 1));
+ bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_array.get(code_offset() + 1));
}
if (bytecode == interpreter::Bytecode::kDebugger) {
@@ -296,7 +296,7 @@ void BreakIterator::ClearDebugBreak() {
DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
BytecodeArray bytecode_array = debug_info_->DebugBytecodeArray();
BytecodeArray original = debug_info_->OriginalBytecodeArray();
- bytecode_array->set(code_offset(), original->get(code_offset()));
+ bytecode_array.set(code_offset(), original.get(code_offset()));
}
BreakLocation BreakIterator::GetBreakLocation() {
@@ -541,8 +541,8 @@ bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
FrameSummary summary = FrameSummary::GetTop(frame);
DCHECK(!summary.IsWasm());
Handle<JSFunction> function = summary.AsJavaScript().function();
- if (!function->shared()->HasBreakInfo()) return false;
- Handle<DebugInfo> debug_info(function->shared()->GetDebugInfo(), isolate_);
+ if (!function->shared().HasBreakInfo()) return false;
+ Handle<DebugInfo> debug_info(function->shared().GetDebugInfo(), isolate_);
// Enter the debugger.
DebugScope debug_scope(this);
std::vector<BreakLocation> break_locations;
@@ -563,7 +563,7 @@ bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point,
bool is_break_at_entry) {
HandleScope scope(isolate_);
- if (!break_point->condition()->length()) return true;
+ if (!break_point->condition().length()) return true;
Handle<String> condition(break_point->condition(), isolate_);
MaybeHandle<Object> maybe_result;
Handle<Object> result;
@@ -589,13 +589,12 @@ bool Debug::CheckBreakPoint(Handle<BreakPoint> break_point,
return result->BooleanValue(isolate_);
}
-bool Debug::SetBreakPoint(Handle<JSFunction> function,
+bool Debug::SetBreakpoint(Handle<SharedFunctionInfo> shared,
Handle<BreakPoint> break_point,
int* source_position) {
HandleScope scope(isolate_);
// Make sure the function is compiled and has set up the debug info.
- Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
if (!EnsureBreakInfo(shared)) return false;
PrepareFunctionForDebugExecution(shared);
@@ -686,13 +685,13 @@ void Debug::ApplyBreakPoints(Handle<DebugInfo> debug_info) {
} else {
if (!debug_info->HasInstrumentedBytecodeArray()) return;
FixedArray break_points = debug_info->break_points();
- for (int i = 0; i < break_points->length(); i++) {
- if (break_points->get(i)->IsUndefined(isolate_)) continue;
- BreakPointInfo info = BreakPointInfo::cast(break_points->get(i));
- if (info->GetBreakPointCount(isolate_) == 0) continue;
+ for (int i = 0; i < break_points.length(); i++) {
+ if (break_points.get(i).IsUndefined(isolate_)) continue;
+ BreakPointInfo info = BreakPointInfo::cast(break_points.get(i));
+ if (info.GetBreakPointCount(isolate_) == 0) continue;
DCHECK(debug_info->HasInstrumentedBytecodeArray());
BreakIterator it(debug_info);
- it.SkipToPosition(info->source_position());
+ it.SkipToPosition(info.source_position());
it.SetDebugBreak();
}
}
@@ -750,13 +749,13 @@ int Debug::GetFunctionDebuggingId(Handle<JSFunction> function) {
return id;
}
-bool Debug::SetBreakpointForFunction(Handle<JSFunction> function,
+bool Debug::SetBreakpointForFunction(Handle<SharedFunctionInfo> shared,
Handle<String> condition, int* id) {
*id = ++thread_local_.last_breakpoint_id_;
Handle<BreakPoint> breakpoint =
isolate_->factory()->NewBreakPoint(*id, condition);
int source_position = 0;
- return SetBreakPoint(function, breakpoint, &source_position);
+ return SetBreakpoint(shared, breakpoint, &source_position);
}
void Debug::RemoveBreakpoint(int id) {
@@ -874,7 +873,7 @@ void Debug::PrepareStepInSuspendedGenerator() {
thread_local_.last_step_action_ = StepIn;
UpdateHookOnFunctionCall();
Handle<JSFunction> function(
- JSGeneratorObject::cast(thread_local_.suspended_generator_)->function(),
+ JSGeneratorObject::cast(thread_local_.suspended_generator_).function(),
isolate_);
FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared(), isolate_));
clear_suspended_generator();
@@ -978,12 +977,12 @@ void Debug::PrepareStep(StepAction step_action) {
if (frame->is_wasm_compiled()) return;
WasmInterpreterEntryFrame* wasm_frame =
WasmInterpreterEntryFrame::cast(frame);
- wasm_frame->debug_info()->PrepareStep(step_action);
+ wasm_frame->debug_info().PrepareStep(step_action);
return;
}
JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
- DCHECK(js_frame->function()->IsJSFunction());
+ DCHECK(js_frame->function().IsJSFunction());
// Get the debug info (create it if it does not exist).
auto summary = FrameSummary::GetTop(frame).AsJavaScript();
@@ -1024,7 +1023,6 @@ void Debug::PrepareStep(StepAction step_action) {
switch (step_action) {
case StepNone:
UNREACHABLE();
- break;
case StepOut: {
// Clear last position info. For stepping out it does not matter.
thread_local_.last_statement_position_ = kNoSourcePosition;
@@ -1091,15 +1089,15 @@ Handle<Object> Debug::GetSourceBreakLocations(
Handle<FixedArray> locations = isolate->factory()->NewFixedArray(
debug_info->GetBreakPointCount(isolate));
int count = 0;
- for (int i = 0; i < debug_info->break_points()->length(); ++i) {
- if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
+ for (int i = 0; i < debug_info->break_points().length(); ++i) {
+ if (!debug_info->break_points().get(i).IsUndefined(isolate)) {
BreakPointInfo break_point_info =
- BreakPointInfo::cast(debug_info->break_points()->get(i));
- int break_points = break_point_info->GetBreakPointCount(isolate);
+ BreakPointInfo::cast(debug_info->break_points().get(i));
+ int break_points = break_point_info.GetBreakPointCount(isolate);
if (break_points == 0) continue;
for (int j = 0; j < break_points; ++j) {
locations->set(count++,
- Smi::FromInt(break_point_info->source_position()));
+ Smi::FromInt(break_point_info.source_position()));
}
}
}
@@ -1150,8 +1148,8 @@ void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
do {
Code code = iterator.Next();
if (code.is_null()) break;
- if (code->Inlines(*shared)) {
- code->set_marked_for_deoptimization(true);
+ if (code.Inlines(*shared)) {
+ code.set_marked_for_deoptimization(true);
found_something = true;
}
} while (true);
@@ -1215,7 +1213,7 @@ void Debug::InstallDebugBreakTrampoline() {
current = current->next()) {
if (current->debug_info()->CanBreakAtEntry()) {
needs_to_use_trampoline = true;
- if (current->debug_info()->shared()->IsApiFunction()) {
+ if (current->debug_info()->shared().IsApiFunction()) {
needs_to_clear_ic = true;
break;
}
@@ -1231,23 +1229,23 @@ void Debug::InstallDebugBreakTrampoline() {
HeapIterator iterator(isolate_->heap());
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (needs_to_clear_ic && obj->IsFeedbackVector()) {
- FeedbackVector::cast(obj)->ClearSlots(isolate_);
+ if (needs_to_clear_ic && obj.IsFeedbackVector()) {
+ FeedbackVector::cast(obj).ClearSlots(isolate_);
continue;
- } else if (obj->IsJSFunction()) {
+ } else if (obj.IsJSFunction()) {
JSFunction fun = JSFunction::cast(obj);
- SharedFunctionInfo shared = fun->shared();
- if (!shared->HasDebugInfo()) continue;
- if (!shared->GetDebugInfo()->CanBreakAtEntry()) continue;
- if (!fun->is_compiled()) {
+ SharedFunctionInfo shared = fun.shared();
+ if (!shared.HasDebugInfo()) continue;
+ if (!shared.GetDebugInfo().CanBreakAtEntry()) continue;
+ if (!fun.is_compiled()) {
needs_compile.push_back(handle(fun, isolate_));
} else {
- fun->set_code(*trampoline);
+ fun.set_code(*trampoline);
}
- } else if (obj->IsAccessorPair()) {
+ } else if (obj.IsAccessorPair()) {
AccessorPair accessor_pair = AccessorPair::cast(obj);
- if (accessor_pair->getter()->IsFunctionTemplateInfo() ||
- accessor_pair->setter()->IsFunctionTemplateInfo()) {
+ if (accessor_pair.getter().IsFunctionTemplateInfo() ||
+ accessor_pair.setter().IsFunctionTemplateInfo()) {
needs_instantiate.push_back(handle(accessor_pair, isolate_));
}
}
@@ -1257,7 +1255,7 @@ void Debug::InstallDebugBreakTrampoline() {
// Forcibly instantiate all lazy accessor pairs to make sure that they
// properly hit the debug break trampoline.
for (Handle<AccessorPair> accessor_pair : needs_instantiate) {
- if (accessor_pair->getter()->IsFunctionTemplateInfo()) {
+ if (accessor_pair->getter().IsFunctionTemplateInfo()) {
Handle<JSFunction> fun =
ApiNatives::InstantiateFunction(
handle(FunctionTemplateInfo::cast(accessor_pair->getter()),
@@ -1265,7 +1263,7 @@ void Debug::InstallDebugBreakTrampoline() {
.ToHandleChecked();
accessor_pair->set_getter(*fun);
}
- if (accessor_pair->setter()->IsFunctionTemplateInfo()) {
+ if (accessor_pair->setter().IsFunctionTemplateInfo()) {
Handle<JSFunction> fun =
ApiNatives::InstantiateFunction(
handle(FunctionTemplateInfo::cast(accessor_pair->setter()),
@@ -1332,12 +1330,12 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
SharedFunctionInfo::ScriptIterator iterator(isolate_, *script);
for (SharedFunctionInfo info = iterator.Next(); !info.is_null();
info = iterator.Next()) {
- if (info->EndPosition() < start_position ||
- info->StartPosition() >= end_position) {
+ if (info.EndPosition() < start_position ||
+ info.StartPosition() >= end_position) {
continue;
}
- if (!info->IsSubjectToDebugging()) continue;
- if (!info->is_compiled() && !info->allows_lazy_compilation()) continue;
+ if (!info.IsSubjectToDebugging()) continue;
+ if (!info.is_compiled() && !info.allows_lazy_compilation()) continue;
candidates.push_back(i::handle(info, isolate_));
}
@@ -1396,26 +1394,26 @@ class SharedFunctionInfoFinder {
void NewCandidate(SharedFunctionInfo shared,
JSFunction closure = JSFunction()) {
- if (!shared->IsSubjectToDebugging()) return;
- int start_position = shared->function_token_position();
+ if (!shared.IsSubjectToDebugging()) return;
+ int start_position = shared.function_token_position();
if (start_position == kNoSourcePosition) {
- start_position = shared->StartPosition();
+ start_position = shared.StartPosition();
}
if (start_position > target_position_) return;
- if (target_position_ > shared->EndPosition()) return;
+ if (target_position_ > shared.EndPosition()) return;
if (!current_candidate_.is_null()) {
if (current_start_position_ == start_position &&
- shared->EndPosition() == current_candidate_->EndPosition()) {
+ shared.EndPosition() == current_candidate_.EndPosition()) {
// If we already have a matching closure, do not throw it away.
if (!current_candidate_closure_.is_null() && closure.is_null()) return;
// If a top-level function contains only one function
// declaration the source for the top-level and the function
// is the same. In that case prefer the non top-level function.
- if (!current_candidate_->is_toplevel() && shared->is_toplevel()) return;
+ if (!current_candidate_.is_toplevel() && shared.is_toplevel()) return;
} else if (start_position < current_start_position_ ||
- current_candidate_->EndPosition() < shared->EndPosition()) {
+ current_candidate_.EndPosition() < shared.EndPosition()) {
return;
}
}
@@ -1464,7 +1462,7 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
shared = finder.Result();
if (shared.is_null()) break;
// We found it if it's already compiled.
- is_compiled_scope = shared->is_compiled_scope();
+ is_compiled_scope = shared.is_compiled_scope();
if (is_compiled_scope.is_compiled()) {
Handle<SharedFunctionInfo> shared_handle(shared, isolate_);
// If the iteration count is larger than 1, we had to compile the outer
@@ -1481,7 +1479,7 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// If not, compile to reveal inner functions.
HandleScope scope(isolate_);
// Code that cannot be compiled lazily are internal and not debuggable.
- DCHECK(shared->allows_lazy_compilation());
+ DCHECK(shared.allows_lazy_compilation());
if (!Compiler::Compile(handle(shared, isolate_), Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
break;
@@ -1619,7 +1617,7 @@ void Debug::FreeDebugInfoListNode(DebugInfoListNode* prev,
// Pack script back into the
// SFI::script_or_debug_info field.
Handle<DebugInfo> debug_info(node->debug_info());
- debug_info->shared()->set_script_or_debug_info(debug_info->script());
+ debug_info->shared().set_script_or_debug_info(debug_info->script());
delete node;
}
@@ -1628,7 +1626,7 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
// Get the executing function in which the debug break occurred.
- Handle<SharedFunctionInfo> shared(frame->function()->shared(), isolate_);
+ Handle<SharedFunctionInfo> shared(frame->function().shared(), isolate_);
// With no debug info there are no break points, so we can't be at a return.
if (!shared->HasBreakInfo()) return false;
@@ -1675,7 +1673,7 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
Script::Iterator iterator(isolate_);
for (Script script = iterator.Next(); !script.is_null();
script = iterator.Next()) {
- if (script->HasValidSource()) results->set(length++, script);
+ if (script.HasValidSource()) results->set(length++, script);
}
}
return FixedArray::ShrinkOrEmpty(isolate_, results, length);
@@ -1808,7 +1806,7 @@ void Debug::OnDebugBreak(Handle<FixedArray> break_points_hit) {
// This array contains breakpoints installed using JS debug API.
for (int i = 0; i < break_points_hit->length(); ++i) {
BreakPoint break_point = BreakPoint::cast(break_points_hit->get(i));
- inspector_break_points_hit.push_back(break_point->id());
+ inspector_break_points_hit.push_back(break_point.id());
++inspector_break_points_count;
}
@@ -1836,13 +1834,13 @@ bool Debug::IsBlackboxed(Handle<SharedFunctionInfo> shared) {
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
if (!debug_info->computed_debug_is_blackboxed()) {
bool is_blackboxed =
- !shared->IsSubjectToDebugging() || !shared->script()->IsScript();
+ !shared->IsSubjectToDebugging() || !shared->script().IsScript();
if (!is_blackboxed) {
SuppressDebug while_processing(this);
HandleScope handle_scope(isolate_);
PostponeInterruptsScope no_interrupts(isolate_);
DisableBreak no_recursive_break(this);
- DCHECK(shared->script()->IsScript());
+ DCHECK(shared->script().IsScript());
Handle<Script> script(Script::cast(shared->script()), isolate_);
DCHECK(script->IsUserJavaScript());
debug::Location start = GetDebugLocation(script, shared->StartPosition());
@@ -1980,7 +1978,7 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
{ JavaScriptFrameIterator it(isolate_);
DCHECK(!it.done());
Object fun = it.frame()->function();
- if (fun->IsJSFunction()) {
+ if (fun.IsJSFunction()) {
HandleScope scope(isolate_);
Handle<JSFunction> function(JSFunction::cast(fun), isolate_);
// Don't stop in builtin and blackboxed functions.
@@ -2032,7 +2030,7 @@ void Debug::PrintBreakLocation() {
String::FlatContent content = source->GetFlatContent(no_gc);
if (content.IsOneByte()) {
PrintF("[debug] %.*s\n", line_end - line_start,
- content.ToOneByteVector().start() + line_start);
+ content.ToOneByteVector().begin() + line_start);
PrintF("[debug] ");
for (int i = 0; i < column; i++) PrintF(" ");
PrintF("^\n");
@@ -2093,7 +2091,7 @@ void Debug::UpdateDebugInfosForExecutionMode() {
Handle<DebugInfo> debug_info = current->debug_info();
if (debug_info->HasInstrumentedBytecodeArray() &&
debug_info->DebugExecutionMode() != isolate_->debug_execution_mode()) {
- DCHECK(debug_info->shared()->HasBytecodeArray());
+ DCHECK(debug_info->shared().HasBytecodeArray());
if (isolate_->debug_execution_mode() == DebugInfo::kBreakpoints) {
ClearSideEffectChecks(debug_info);
ApplyBreakPoints(debug_info);
@@ -2175,7 +2173,7 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function,
Handle<Object> receiver) {
DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
DisallowJavascriptExecution no_js(isolate_);
- IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
+ IsCompiledScope is_compiled_scope(function->shared().is_compiled_scope());
if (!function->is_compiled() &&
!Compiler::Compile(function, Compiler::KEEP_EXCEPTION,
&is_compiled_scope)) {
@@ -2190,7 +2188,7 @@ bool Debug::PerformSideEffectCheck(Handle<JSFunction> function,
case DebugInfo::kHasSideEffects:
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] Function %s failed side effect check.\n",
- function->shared()->DebugName()->ToCString().get());
+ function->shared().DebugName().ToCString().get());
}
side_effect_check_failed_ = true;
// Throw an uncatchable termination exception.
@@ -2227,7 +2225,7 @@ bool Debug::PerformSideEffectCheckForCallback(
DCHECK_EQ(!receiver.is_null(), callback_info->IsAccessorInfo());
DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
if (!callback_info.is_null() && callback_info->IsCallHandlerInfo() &&
- i::CallHandlerInfo::cast(*callback_info)->NextCallHasNoSideEffect()) {
+ i::CallHandlerInfo::cast(*callback_info).NextCallHasNoSideEffect()) {
return true;
}
// TODO(7515): always pass a valid callback info object.
@@ -2236,8 +2234,8 @@ bool Debug::PerformSideEffectCheckForCallback(
// List of whitelisted internal accessors can be found in accessors.h.
AccessorInfo info = AccessorInfo::cast(*callback_info);
DCHECK_NE(kNotAccessor, accessor_kind);
- switch (accessor_kind == kSetter ? info->setter_side_effect_type()
- : info->getter_side_effect_type()) {
+ switch (accessor_kind == kSetter ? info.setter_side_effect_type()
+ : info.getter_side_effect_type()) {
case SideEffectType::kHasNoSideEffect:
// We do not support setter accessors with no side effects, since
// calling set accessors go through a store bytecode. Store bytecodes
@@ -2254,18 +2252,18 @@ bool Debug::PerformSideEffectCheckForCallback(
}
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] API Callback '");
- info->name()->ShortPrint();
+ info.name().ShortPrint();
PrintF("' may cause side effect.\n");
}
} else if (callback_info->IsInterceptorInfo()) {
InterceptorInfo info = InterceptorInfo::cast(*callback_info);
- if (info->has_no_side_effect()) return true;
+ if (info.has_no_side_effect()) return true;
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] API Interceptor may cause side effect.\n");
}
} else if (callback_info->IsCallHandlerInfo()) {
CallHandlerInfo info = CallHandlerInfo::cast(*callback_info);
- if (info->IsSideEffectFreeCallHandlerInfo()) return true;
+ if (info.IsSideEffectFreeCallHandlerInfo()) return true;
if (FLAG_trace_side_effect_free_debug_evaluate) {
PrintF("[debug-evaluate] API CallHandlerInfo may cause side effect.\n");
}
@@ -2282,8 +2280,8 @@ bool Debug::PerformSideEffectCheckAtBytecode(InterpretedFrame* frame) {
using interpreter::Bytecode;
DCHECK_EQ(isolate_->debug_execution_mode(), DebugInfo::kSideEffects);
- SharedFunctionInfo shared = frame->function()->shared();
- BytecodeArray bytecode_array = shared->GetBytecodeArray();
+ SharedFunctionInfo shared = frame->function().shared();
+ BytecodeArray bytecode_array = shared.GetBytecodeArray();
int offset = frame->GetBytecodeOffset();
interpreter::BytecodeArrayAccessor bytecode_accessor(
handle(bytecode_array, isolate_), offset);
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index e0d675fc7d..8ac77e259d 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -7,14 +7,14 @@
#include <vector>
+#include "src/codegen/source-position-table.h"
+#include "src/common/globals.h"
#include "src/debug/debug-interface.h"
#include "src/debug/interface-types.h"
-#include "src/frames.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/isolate.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
#include "src/objects/debug-objects.h"
-#include "src/source-position-table.h"
namespace v8 {
namespace internal {
@@ -227,7 +227,7 @@ class V8_EXPORT_PRIVATE Debug {
Handle<FixedArray> GetLoadedScripts();
// Break point handling.
- bool SetBreakPoint(Handle<JSFunction> function,
+ bool SetBreakpoint(Handle<SharedFunctionInfo> shared,
Handle<BreakPoint> break_point, int* source_position);
void ClearBreakPoint(Handle<BreakPoint> break_point);
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
@@ -235,7 +235,7 @@ class V8_EXPORT_PRIVATE Debug {
bool SetBreakPointForScript(Handle<Script> script, Handle<String> condition,
int* source_position, int* id);
- bool SetBreakpointForFunction(Handle<JSFunction> function,
+ bool SetBreakpointForFunction(Handle<SharedFunctionInfo> shared,
Handle<String> condition, int* id);
void RemoveBreakpoint(int id);
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index a4466ee9eb..e438f3a640 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -6,9 +6,9 @@
#include "src/debug/debug.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
+#include "src/execution/frames-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 3a449dde62..2375827b1b 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -10,7 +10,7 @@
#include <vector>
#include "include/v8.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 013cb0ff50..9144e03be4 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -4,25 +4,25 @@
#include "src/debug/liveedit.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/compilation-cache.h"
-#include "src/compiler.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/source-position-table.h"
#include "src/debug/debug-interface.h"
#include "src/debug/debug.h"
-#include "src/frames-inl.h"
-#include "src/isolate-inl.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/v8threads.h"
+#include "src/init/v8.h"
+#include "src/logging/log.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-generator-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
-#include "src/source-position-table.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -811,12 +811,12 @@ class FunctionDataMap : public ThreadVisitor {
}
bool Lookup(SharedFunctionInfo sfi, FunctionData** data) {
- int start_position = sfi->StartPosition();
- if (!sfi->script()->IsScript() || start_position == -1) {
+ int start_position = sfi.StartPosition();
+ if (!sfi.script().IsScript() || start_position == -1) {
return false;
}
- Script script = Script::cast(sfi->script());
- return Lookup(GetFuncId(script->id(), sfi), data);
+ Script script = Script::cast(sfi.script());
+ return Lookup(GetFuncId(script.id(), sfi), data);
}
bool Lookup(Handle<Script> script, FunctionLiteral* literal,
@@ -829,21 +829,21 @@ class FunctionDataMap : public ThreadVisitor {
HeapIterator iterator(isolate->heap(), HeapIterator::kFilterUnreachable);
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (obj->IsSharedFunctionInfo()) {
+ if (obj.IsSharedFunctionInfo()) {
SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
FunctionData* data = nullptr;
if (!Lookup(sfi, &data)) continue;
data->shared = handle(sfi, isolate);
- } else if (obj->IsJSFunction()) {
+ } else if (obj.IsJSFunction()) {
JSFunction js_function = JSFunction::cast(obj);
- SharedFunctionInfo sfi = js_function->shared();
+ SharedFunctionInfo sfi = js_function.shared();
FunctionData* data = nullptr;
if (!Lookup(sfi, &data)) continue;
data->js_functions.emplace_back(js_function, isolate);
- } else if (obj->IsJSGeneratorObject()) {
+ } else if (obj.IsJSGeneratorObject()) {
JSGeneratorObject gen = JSGeneratorObject::cast(obj);
- if (gen->is_closed()) continue;
- SharedFunctionInfo sfi = gen->function()->shared();
+ if (gen.is_closed()) continue;
+ SharedFunctionInfo sfi = gen.function().shared();
FunctionData* data = nullptr;
if (!Lookup(sfi, &data)) continue;
data->running_generators.emplace_back(gen, isolate);
@@ -903,10 +903,10 @@ class FunctionDataMap : public ThreadVisitor {
}
FuncId GetFuncId(int script_id, SharedFunctionInfo sfi) {
- DCHECK_EQ(script_id, Script::cast(sfi->script())->id());
- int start_position = sfi->StartPosition();
+ DCHECK_EQ(script_id, Script::cast(sfi.script()).id());
+ int start_position = sfi.StartPosition();
DCHECK_NE(start_position, -1);
- if (sfi->is_toplevel()) {
+ if (sfi.is_toplevel()) {
// This is the top-level function, so special case its start position
DCHECK_EQ(start_position, 0);
start_position = -1;
@@ -1119,10 +1119,10 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
sfi->set_script(*new_script);
if (sfi->HasUncompiledData()) {
- sfi->uncompiled_data()->set_function_literal_id(
+ sfi->uncompiled_data().set_function_literal_id(
mapping.second->function_literal_id());
}
- new_script->shared_function_infos()->Set(
+ new_script->shared_function_infos().Set(
mapping.second->function_literal_id(), HeapObjectReference::Weak(*sfi));
DCHECK_EQ(sfi->FunctionLiteralId(isolate),
mapping.second->function_literal_id());
@@ -1144,11 +1144,11 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
}
if (!sfi->HasBytecodeArray()) continue;
- FixedArray constants = sfi->GetBytecodeArray()->constant_pool();
- for (int i = 0; i < constants->length(); ++i) {
- if (!constants->get(i)->IsSharedFunctionInfo()) continue;
+ FixedArray constants = sfi->GetBytecodeArray().constant_pool();
+ for (int i = 0; i < constants.length(); ++i) {
+ if (!constants.get(i).IsSharedFunctionInfo()) continue;
FunctionData* data = nullptr;
- if (!function_data_map.Lookup(SharedFunctionInfo::cast(constants->get(i)),
+ if (!function_data_map.Lookup(SharedFunctionInfo::cast(constants.get(i)),
&data)) {
continue;
}
@@ -1159,7 +1159,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
}
Handle<SharedFunctionInfo> new_sfi;
if (!data->shared.ToHandle(&new_sfi)) continue;
- constants->set(i, *new_sfi);
+ constants.set(i, *new_sfi);
}
}
for (const auto& mapping : changed) {
@@ -1176,7 +1176,7 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
isolate->compilation_cache()->Remove(sfi);
for (auto& js_function : data->js_functions) {
js_function->set_shared(*new_sfi);
- js_function->set_code(js_function->shared()->GetCode());
+ js_function->set_code(js_function->shared().GetCode());
js_function->set_raw_feedback_cell(
*isolate->factory()->many_closures_cell());
@@ -1186,30 +1186,29 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
}
SharedFunctionInfo::ScriptIterator it(isolate, *new_script);
for (SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
- if (!sfi->HasBytecodeArray()) continue;
- FixedArray constants = sfi->GetBytecodeArray()->constant_pool();
- for (int i = 0; i < constants->length(); ++i) {
- if (!constants->get(i)->IsSharedFunctionInfo()) continue;
- SharedFunctionInfo inner_sfi =
- SharedFunctionInfo::cast(constants->get(i));
+ if (!sfi.HasBytecodeArray()) continue;
+ FixedArray constants = sfi.GetBytecodeArray().constant_pool();
+ for (int i = 0; i < constants.length(); ++i) {
+ if (!constants.get(i).IsSharedFunctionInfo()) continue;
+ SharedFunctionInfo inner_sfi = SharedFunctionInfo::cast(constants.get(i));
// See if there is a mapping from this function's start position to a
// unchanged function's id.
auto unchanged_it =
- start_position_to_unchanged_id.find(inner_sfi->StartPosition());
+ start_position_to_unchanged_id.find(inner_sfi.StartPosition());
if (unchanged_it == start_position_to_unchanged_id.end()) continue;
// Grab that function id from the new script's SFI list, which should have
// already been updated in in the unchanged pass.
SharedFunctionInfo old_unchanged_inner_sfi =
SharedFunctionInfo::cast(new_script->shared_function_infos()
- ->Get(unchanged_it->second)
+ .Get(unchanged_it->second)
->GetHeapObject());
if (old_unchanged_inner_sfi == inner_sfi) continue;
DCHECK_NE(old_unchanged_inner_sfi, inner_sfi);
// Now some sanity checks. Make sure that the unchanged SFI has already
// been processed and patched to be on the new script ...
- DCHECK_EQ(old_unchanged_inner_sfi->script(), *new_script);
- constants->set(i, old_unchanged_inner_sfi);
+ DCHECK_EQ(old_unchanged_inner_sfi.script(), *new_script);
+ constants.set(i, old_unchanged_inner_sfi);
}
}
#ifdef DEBUG
@@ -1222,28 +1221,28 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
SharedFunctionInfo::ScriptIterator it(isolate, *new_script);
std::set<int> start_positions;
for (SharedFunctionInfo sfi = it.Next(); !sfi.is_null(); sfi = it.Next()) {
- DCHECK_EQ(sfi->script(), *new_script);
- DCHECK_EQ(sfi->FunctionLiteralId(isolate), it.CurrentIndex());
+ DCHECK_EQ(sfi.script(), *new_script);
+ DCHECK_EQ(sfi.FunctionLiteralId(isolate), it.CurrentIndex());
// Don't check the start position of the top-level function, as it can
// overlap with a function in the script.
- if (sfi->is_toplevel()) {
- DCHECK_EQ(start_positions.find(sfi->StartPosition()),
+ if (sfi.is_toplevel()) {
+ DCHECK_EQ(start_positions.find(sfi.StartPosition()),
start_positions.end());
- start_positions.insert(sfi->StartPosition());
+ start_positions.insert(sfi.StartPosition());
}
- if (!sfi->HasBytecodeArray()) continue;
+ if (!sfi.HasBytecodeArray()) continue;
// Check that all the functions in this function's constant pool are also
// on the new script, and that their id matches their index in the new
// scripts function list.
- FixedArray constants = sfi->GetBytecodeArray()->constant_pool();
- for (int i = 0; i < constants->length(); ++i) {
- if (!constants->get(i)->IsSharedFunctionInfo()) continue;
+ FixedArray constants = sfi.GetBytecodeArray().constant_pool();
+ for (int i = 0; i < constants.length(); ++i) {
+ if (!constants.get(i).IsSharedFunctionInfo()) continue;
SharedFunctionInfo inner_sfi =
- SharedFunctionInfo::cast(constants->get(i));
- DCHECK_EQ(inner_sfi->script(), *new_script);
+ SharedFunctionInfo::cast(constants.get(i));
+ DCHECK_EQ(inner_sfi.script(), *new_script);
DCHECK_EQ(inner_sfi, new_script->shared_function_infos()
- ->Get(inner_sfi->FunctionLiteralId(isolate))
+ .Get(inner_sfi.FunctionLiteralId(isolate))
->GetHeapObject());
}
}
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 0daba0da1f..578cf29254 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -7,8 +7,8 @@
#include <vector>
-#include "src/globals.h"
-#include "src/handles.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace debug {
diff --git a/deps/v8/src/debug/mips/OWNERS b/deps/v8/src/debug/mips/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/debug/mips/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index b84779a4fb..cc1adc2328 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -6,9 +6,9 @@
#include "src/debug/debug.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
+#include "src/execution/frames-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/mips64/OWNERS b/deps/v8/src/debug/mips64/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/debug/mips64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index ebd8db26d7..b93eb39c52 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -6,9 +6,9 @@
#include "src/debug/debug.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
+#include "src/execution/frames-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index 4a6d0a67d5..a5b41c46fe 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -6,9 +6,9 @@
#include "src/debug/debug.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
+#include "src/execution/frames-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/s390/OWNERS b/deps/v8/src/debug/s390/OWNERS
deleted file mode 100644
index 6d1a8fc472..0000000000
--- a/deps/v8/src/debug/s390/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-jyan@ca.ibm.com
-joransiu@ca.ibm.com
-michael_dawson@ca.ibm.com
-miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
index f7aabe39b6..b85b2cc219 100644
--- a/deps/v8/src/debug/s390/debug-s390.cc
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#if V8_TARGET_ARCH_S390
#include "src/debug/debug.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
+#include "src/execution/frames-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 6cdfba151f..a8b018d99f 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -6,11 +6,11 @@
#include "src/debug/debug.h"
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/deoptimizer/OWNERS b/deps/v8/src/deoptimizer/OWNERS
new file mode 100644
index 0000000000..97a194d7cf
--- /dev/null
+++ b/deps/v8/src/deoptimizer/OWNERS
@@ -0,0 +1,5 @@
+bmeurer@chromium.org
+jarin@chromium.org
+mstarzinger@chromium.org
+sigurds@chromium.org
+tebbi@chromium.org
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
index edfb9c6096..4004dfd90f 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/deoptimizer/arm/deoptimizer-arm.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -44,6 +44,8 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
+ // Okay to not call AllocateStackSpace here because the size is a known
+ // small number and we need to use condition codes.
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d15);
@@ -54,7 +56,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
// handle this a bit differently.
- __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
+ __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
{
UseScratchRegisterScope temps(masm);
@@ -145,7 +147,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
- __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
+ __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
Label pop_loop_header;
__ b(&pop_loop_header);
@@ -171,8 +173,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r4 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
@@ -239,18 +240,15 @@ void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
index 78540e7d6f..a96b1263ab 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/deoptimizer/arm64/deoptimizer-arm64.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api.h"
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
+#include "src/api/api.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -55,12 +55,6 @@ void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
masm->Sub(dst, dst, dst_offset);
}
-// TODO(jgruber): There's a hack here to explicitly skip restoration of the
-// so-called 'arm64 platform register' x18. The register may be in use by the
-// OS, thus we should not clobber it. Instead of this hack, it would be nicer
-// not to add x18 to the list of saved registers in the first place. The
-// complication here is that we require `reg_list.Count() % 2 == 0` in multiple
-// spots.
void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
const Register& src_base, int src_offset) {
DCHECK_EQ(reg_list.Count() % 2, 0);
@@ -74,8 +68,8 @@ void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
Register src = temps.AcquireX();
masm->Add(src, src_base, src_offset);
- // x18 is the platform register and is reserved for the use of platform ABIs.
- restore_list.Remove(x18);
+ // No need to restore padreg.
+ restore_list.Remove(padreg);
// Restore every register in restore_list from src.
while (!restore_list.IsEmpty()) {
@@ -123,11 +117,14 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
DCHECK_EQ(saved_float_registers.Count() % 4, 0);
__ PushCPURegList(saved_float_registers);
- // We save all the registers except sp, lr and the masm scratches.
+ // We save all the registers except sp, lr, platform register (x18) and the
+ // masm scratches.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
saved_registers.Remove(ip0);
saved_registers.Remove(ip1);
+ saved_registers.Remove(x18);
saved_registers.Combine(fp);
+ saved_registers.Align();
DCHECK_EQ(saved_registers.Count() % 2, 0);
__ PushCPURegList(saved_registers);
@@ -294,18 +291,15 @@ void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/deoptimize-reason.cc b/deps/v8/src/deoptimizer/deoptimize-reason.cc
index 733fdfd883..ed5954bf9c 100644
--- a/deps/v8/src/deoptimize-reason.cc
+++ b/deps/v8/src/deoptimizer/deoptimize-reason.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/deoptimize-reason.h"
+#include "src/deoptimizer/deoptimize-reason.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimizer/deoptimize-reason.h
index 8fa2a25d2b..d556e89927 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimizer/deoptimize-reason.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DEOPTIMIZE_REASON_H_
-#define V8_DEOPTIMIZE_REASON_H_
+#ifndef V8_DEOPTIMIZER_DEOPTIMIZE_REASON_H_
+#define V8_DEOPTIMIZER_DEOPTIMIZE_REASON_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -74,4 +74,4 @@ V8_EXPORT_PRIVATE char const* DeoptimizeReasonToString(DeoptimizeReason reason);
} // namespace internal
} // namespace v8
-#endif // V8_DEOPTIMIZE_REASON_H_
+#endif // V8_DEOPTIMIZER_DEOPTIMIZE_REASON_H_
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index a00beee829..91556cfbdc 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -2,29 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/deoptimizer.h"
+#include "src/deoptimizer/deoptimizer.h"
#include <memory>
-#include "src/accessors.h"
-#include "src/assembler-inl.h"
#include "src/ast/prettyprinter.h"
-#include "src/callable.h"
-#include "src/counters.h"
-#include "src/disasm.h"
-#include "src/frames-inl.h"
-#include "src/global-handles.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/diagnostics/disasm.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/v8threads.h"
+#include "src/handles/global-handles.h"
#include "src/heap/heap-inl.h"
+#include "src/init/v8.h"
#include "src/interpreter/interpreter.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
#include "src/tracing/trace-event.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
// Has to be the last include (doesn't have include guards)
#include "src/objects/object-macros.h"
@@ -54,7 +54,7 @@ class FrameWriter {
}
void PushRawObject(Object obj, const char* debug_hint) {
- intptr_t value = obj->ptr();
+ intptr_t value = obj.ptr();
PushValue(value);
if (trace_scope_ != nullptr) {
DebugPrintOutputObject(obj, top_offset_, debug_hint);
@@ -121,10 +121,10 @@ class FrameWriter {
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " " V8PRIxPTR_FMT ": [top + %3d] <- ",
output_address(output_offset), output_offset);
- if (obj->IsSmi()) {
- PrintF(V8PRIxPTR_FMT " <Smi %d>", obj->ptr(), Smi::cast(obj)->value());
+ if (obj.IsSmi()) {
+ PrintF(V8PRIxPTR_FMT " <Smi %d>", obj.ptr(), Smi::cast(obj).value());
} else {
- obj->ShortPrint(trace_scope_->file());
+ obj.ShortPrint(trace_scope_->file());
}
PrintF(trace_scope_->file(), " ; %s", debug_hint);
}
@@ -142,7 +142,6 @@ DeoptimizerData::DeoptimizerData(Heap* heap) : heap_(heap), current_(nullptr) {
heap_->RegisterStrongRoots(FullObjectSlot(start), FullObjectSlot(end));
}
-
DeoptimizerData::~DeoptimizerData() {
Code* start = &deopt_entry_code_[0];
heap_->UnregisterStrongRoots(FullObjectSlot(start));
@@ -157,22 +156,21 @@ void DeoptimizerData::set_deopt_entry_code(DeoptimizeKind kind, Code code) {
}
Code Deoptimizer::FindDeoptimizingCode(Address addr) {
- if (function_->IsHeapObject()) {
+ if (function_.IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
Isolate* isolate = isolate_;
- Context native_context = function_->context()->native_context();
- Object element = native_context->DeoptimizedCodeListHead();
- while (!element->IsUndefined(isolate)) {
+ Context native_context = function_.context().native_context();
+ Object element = native_context.DeoptimizedCodeListHead();
+ while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
- CHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- if (code->contains(addr)) return code;
- element = code->next_code_link();
+ CHECK(code.kind() == Code::OPTIMIZED_FUNCTION);
+ if (code.contains(addr)) return code;
+ element = code.next_code_link();
}
}
return Code();
}
-
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(Address raw_function, DeoptimizeKind kind,
@@ -195,9 +193,7 @@ Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
}
DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
- JavaScriptFrame* frame,
- int jsframe_index,
- Isolate* isolate) {
+ JavaScriptFrame* frame, int jsframe_index, Isolate* isolate) {
CHECK(frame->is_optimized());
TranslatedState translated_values(frame);
@@ -248,15 +244,15 @@ class ActivationsFinder : public ThreadVisitor {
for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
- if (code->kind() == Code::OPTIMIZED_FUNCTION &&
- code->marked_for_deoptimization()) {
+ if (code.kind() == Code::OPTIMIZED_FUNCTION &&
+ code.marked_for_deoptimization()) {
codes_->erase(code);
// Obtain the trampoline to the deoptimizer call.
- SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
+ SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc());
int trampoline_pc = safepoint.trampoline_pc();
DCHECK_IMPLIES(code == topmost_, safe_to_deopt_);
// Replace the current pc on the stack with the trampoline.
- it.frame()->set_pc(code->raw_instruction_start() + trampoline_pc);
+ it.frame()->set_pc(code.raw_instruction_start() + trampoline_pc);
}
}
}
@@ -277,15 +273,15 @@ class ActivationsFinder : public ThreadVisitor {
void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
DisallowHeapAllocation no_allocation;
- Isolate* isolate = context->GetIsolate();
+ Isolate* isolate = context.GetIsolate();
Code topmost_optimized_code;
bool safe_to_deopt_topmost_optimized_code = false;
#ifdef DEBUG
// Make sure all activations of optimized code can deopt at their current PC.
// The topmost optimized code has special handling because it cannot be
// deoptimized due to weak object dependency.
- for (StackFrameIterator it(isolate, isolate->thread_local_top());
- !it.done(); it.Advance()) {
+ for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
+ it.Advance()) {
StackFrame::Type type = it.frame()->type();
if (type == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
@@ -294,14 +290,14 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimizer found activation of function: ");
- function->PrintName(scope.file());
+ function.PrintName(scope.file());
PrintF(scope.file(), " / %" V8PRIxPTR "]\n", function.ptr());
}
- SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
+ SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc());
// Turbofan deopt is checked when we are patching addresses on stack.
bool safe_if_deopt_triggered = safepoint.has_deoptimization_index();
- bool is_builtin_code = code->kind() == Code::BUILTIN;
+ bool is_builtin_code = code.kind() == Code::BUILTIN;
DCHECK(topmost_optimized_code.is_null() || safe_if_deopt_triggered ||
is_builtin_code);
if (topmost_optimized_code.is_null()) {
@@ -319,26 +315,26 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
// Move marked code from the optimized code list to the deoptimized code list.
// Walk over all optimized code objects in this native context.
Code prev;
- Object element = context->OptimizedCodeListHead();
- while (!element->IsUndefined(isolate)) {
+ Object element = context.OptimizedCodeListHead();
+ while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
- CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
- Object next = code->next_code_link();
+ CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION);
+ Object next = code.next_code_link();
- if (code->marked_for_deoptimization()) {
+ if (code.marked_for_deoptimization()) {
codes.insert(code);
if (!prev.is_null()) {
// Skip this code in the optimized code list.
- prev->set_next_code_link(next);
+ prev.set_next_code_link(next);
} else {
// There was no previous node, the next node is the new head.
- context->SetOptimizedCodeListHead(next);
+ context.SetOptimizedCodeListHead(next);
}
// Move the code to the _deoptimized_ code list.
- code->set_next_code_link(context->DeoptimizedCodeListHead());
- context->SetDeoptimizedCodeListHead(code);
+ code.set_next_code_link(context.DeoptimizedCodeListHead());
+ context.SetDeoptimizedCodeListHead(code);
} else {
// Not marked; preserve this element.
prev = code;
@@ -363,7 +359,6 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context context) {
}
}
-
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
RuntimeCallCounterId::kDeoptimizeCode);
@@ -377,15 +372,14 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
DisallowHeapAllocation no_allocation;
// For all contexts, mark all code, then deoptimize.
Object context = isolate->heap()->native_contexts_list();
- while (!context->IsUndefined(isolate)) {
+ while (!context.IsUndefined(isolate)) {
Context native_context = Context::cast(context);
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
- context = native_context->next_context_link();
+ context = native_context.next_context_link();
}
}
-
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
RuntimeCallCounterId::kDeoptimizeCode);
@@ -398,47 +392,46 @@ void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
DisallowHeapAllocation no_allocation;
// For all contexts, deoptimize code already marked.
Object context = isolate->heap()->native_contexts_list();
- while (!context->IsUndefined(isolate)) {
+ while (!context.IsUndefined(isolate)) {
Context native_context = Context::cast(context);
DeoptimizeMarkedCodeForContext(native_context);
- context = native_context->next_context_link();
+ context = native_context.next_context_link();
}
}
void Deoptimizer::MarkAllCodeForContext(Context context) {
- Object element = context->OptimizedCodeListHead();
- Isolate* isolate = context->GetIsolate();
- while (!element->IsUndefined(isolate)) {
+ Object element = context.OptimizedCodeListHead();
+ Isolate* isolate = context.GetIsolate();
+ while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
- CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
- code->set_marked_for_deoptimization(true);
- element = code->next_code_link();
+ CHECK_EQ(code.kind(), Code::OPTIMIZED_FUNCTION);
+ code.set_marked_for_deoptimization(true);
+ element = code.next_code_link();
}
}
void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
- Isolate* isolate = function->GetIsolate();
+ Isolate* isolate = function.GetIsolate();
RuntimeCallTimerScope runtimeTimer(isolate,
RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
- function->ResetIfBytecodeFlushed();
- if (code.is_null()) code = function->code();
+ function.ResetIfBytecodeFlushed();
+ if (code.is_null()) code = function.code();
- if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ if (code.kind() == Code::OPTIMIZED_FUNCTION) {
// Mark the code for deoptimization and unlink any functions that also
// refer to that code. The code cannot be shared across native contexts,
// so we only need to search one.
- code->set_marked_for_deoptimization(true);
+ code.set_marked_for_deoptimization(true);
// The code in the function's optimized code feedback vector slot might
// be different from the code on the function - evict it if necessary.
- function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
- function->shared(), "unlinking code marked for deopt");
- if (!code->deopt_already_counted()) {
- function->feedback_vector()->increment_deopt_count();
- code->set_deopt_already_counted(true);
+ function.feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
+ function.shared(), "unlinking code marked for deopt");
+ if (!code.deopt_already_counted()) {
+ code.set_deopt_already_counted(true);
}
- DeoptimizeMarkedCodeForContext(function->context()->native_context());
+ DeoptimizeMarkedCodeForContext(function.context().native_context());
}
}
@@ -491,7 +484,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
compiled_code_ = FindOptimizedCode();
DCHECK(!compiled_code_.is_null());
- DCHECK(function->IsJSFunction());
+ DCHECK(function.IsJSFunction());
trace_scope_ = FLAG_trace_deopt
? new CodeTracer::Scope(isolate->GetCodeTracer())
: nullptr;
@@ -499,28 +492,18 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
DCHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
- if (compiled_code_->kind() != Code::OPTIMIZED_FUNCTION ||
- !compiled_code_->deopt_already_counted()) {
- // If the function is optimized, and we haven't counted that deopt yet, then
- // increment the function's deopt count so that we can avoid optimising
- // functions that deopt too often.
-
- if (deopt_kind_ == DeoptimizeKind::kSoft) {
- // Soft deopts shouldn't count against the overall deoptimization count
- // that can eventually lead to disabling optimization for a function.
- isolate->counters()->soft_deopts_executed()->Increment();
- } else if (!function.is_null()) {
- function->feedback_vector()->increment_deopt_count();
- }
- }
- if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
- compiled_code_->set_deopt_already_counted(true);
+ if ((compiled_code_.kind() != Code::OPTIMIZED_FUNCTION ||
+ !compiled_code_.deopt_already_counted()) &&
+ deopt_kind_ == DeoptimizeKind::kSoft) {
+ isolate->counters()->soft_deopts_executed()->Increment();
+ }
+ if (compiled_code_.kind() == Code::OPTIMIZED_FUNCTION) {
+ compiled_code_.set_deopt_already_counted(true);
PROFILE(isolate_,
CodeDeoptEvent(compiled_code_, kind, from_, fp_to_sp_delta_));
}
unsigned size = ComputeInputFrameSize();
- int parameter_count =
- function->shared()->internal_formal_parameter_count() + 1;
+ int parameter_count = function.shared().internal_formal_parameter_count() + 1;
input_ = new (size) FrameDescription(size, parameter_count);
}
@@ -530,13 +513,12 @@ Code Deoptimizer::FindOptimizedCode() {
: isolate_->FindCodeObject(from_);
}
-
void Deoptimizer::PrintFunctionName() {
- if (function_->IsHeapObject() && function_->IsJSFunction()) {
- function_->ShortPrint(trace_scope_->file());
+ if (function_.IsHeapObject() && function_.IsJSFunction()) {
+ function_.ShortPrint(trace_scope_->file());
} else {
- PrintF(trace_scope_->file(),
- "%s", Code::Kind2String(compiled_code_->kind()));
+ PrintF(trace_scope_->file(), "%s",
+ Code::Kind2String(compiled_code_.kind()));
}
}
@@ -553,7 +535,6 @@ Deoptimizer::~Deoptimizer() {
delete trace_scope_;
}
-
void Deoptimizer::DeleteFrameDescriptions() {
delete input_;
for (int i = 0; i < output_count_; ++i) {
@@ -575,7 +556,7 @@ Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(kind, DeoptimizerData::kLastDeoptimizeKind);
CHECK(!data->deopt_entry_code(kind).is_null());
- return data->deopt_entry_code(kind)->raw_instruction_start();
+ return data->deopt_entry_code(kind).raw_instruction_start();
}
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
@@ -584,7 +565,7 @@ bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
CHECK_LE(type, DeoptimizerData::kLastDeoptimizeKind);
Code code = data->deopt_entry_code(type);
if (code.is_null()) return false;
- return addr == code->raw_instruction_start();
+ return addr == code.raw_instruction_start();
}
bool Deoptimizer::IsDeoptimizationEntry(Isolate* isolate, Address addr,
@@ -608,18 +589,18 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
// Count all entries in the deoptimizing code list of every context.
Object context = isolate->heap()->native_contexts_list();
- while (!context->IsUndefined(isolate)) {
+ while (!context.IsUndefined(isolate)) {
Context native_context = Context::cast(context);
- Object element = native_context->DeoptimizedCodeListHead();
- while (!element->IsUndefined(isolate)) {
+ Object element = native_context.DeoptimizedCodeListHead();
+ while (!element.IsUndefined(isolate)) {
Code code = Code::cast(element);
- DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- if (!code->marked_for_deoptimization()) {
+ DCHECK(code.kind() == Code::OPTIMIZED_FUNCTION);
+ if (!code.marked_for_deoptimization()) {
length++;
}
- element = code->next_code_link();
+ element = code.next_code_link();
}
- context = Context::cast(context)->next_context_link();
+ context = Context::cast(context).next_context_link();
}
return length;
}
@@ -631,7 +612,7 @@ int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
case TranslatedFrame::kInterpretedFunction: {
int bytecode_offset = translated_frame->node_id().ToInt();
HandlerTable table(
- translated_frame->raw_shared_info()->GetBytecodeArray());
+ translated_frame->raw_shared_info().GetBytecodeArray());
return table.LookupRange(bytecode_offset, data_out, nullptr);
}
case TranslatedFrame::kJavaScriptBuiltinContinuationWithCatch: {
@@ -657,7 +638,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
DeoptimizationData input_data =
- DeoptimizationData::cast(compiled_code_->deoptimization_data());
+ DeoptimizationData::cast(compiled_code_.deoptimization_data());
{
// Read caller's PC, caller's FP and caller's constant pool values
@@ -681,6 +662,10 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
+ StackGuard* const stack_guard = isolate()->stack_guard();
+ CHECK_GT(static_cast<uintptr_t>(caller_frame_top_),
+ stack_guard->real_jslimit());
+
if (trace_scope_ != nullptr) {
timer.Start();
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
@@ -689,27 +674,26 @@ void Deoptimizer::DoComputeOutputFrames() {
PrintF(trace_scope_->file(),
" (opt #%d) @%d, FP to SP delta: %d, caller sp: " V8PRIxPTR_FMT
"]\n",
- input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_,
+ input_data.OptimizationId().value(), bailout_id_, fp_to_sp_delta_,
caller_frame_top_);
if (deopt_kind_ == DeoptimizeKind::kEager ||
deopt_kind_ == DeoptimizeKind::kSoft) {
- compiled_code_->PrintDeoptLocation(
+ compiled_code_.PrintDeoptLocation(
trace_scope_->file(), " ;;; deoptimize at ", from_);
}
}
- BailoutId node_id = input_data->BytecodeOffset(bailout_id_);
- ByteArray translations = input_data->TranslationByteArray();
- unsigned translation_index =
- input_data->TranslationIndex(bailout_id_)->value();
+ BailoutId node_id = input_data.BytecodeOffset(bailout_id_);
+ ByteArray translations = input_data.TranslationByteArray();
+ unsigned translation_index = input_data.TranslationIndex(bailout_id_).value();
TranslationIterator state_iterator(translations, translation_index);
translated_state_.Init(
isolate_, input_->GetFramePointerAddress(), &state_iterator,
- input_data->LiteralArray(), input_->GetRegisterValues(),
+ input_data.LiteralArray(), input_->GetRegisterValues(),
trace_scope_ == nullptr ? nullptr : trace_scope_->file(),
- function_->IsHeapObject()
- ? function_->shared()->internal_formal_parameter_count()
+ function_.IsHeapObject()
+ ? function_.shared().internal_formal_parameter_count()
: 0);
// Do the input frame to output frame(s) translation.
@@ -739,6 +723,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Translate each output frame.
int frame_index = 0; // output_frame_index
+ size_t total_output_frame_size = 0;
for (size_t i = 0; i < count; ++i, ++frame_index) {
// Read the ast node id, function, and frame height for this output frame.
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
@@ -774,6 +759,7 @@ void Deoptimizer::DoComputeOutputFrames() {
FATAL("invalid frame");
break;
}
+ total_output_frame_size += output_[frame_index]->GetFrameSize();
}
FrameDescription* topmost = output_[count - 1];
@@ -793,6 +779,14 @@ void Deoptimizer::DoComputeOutputFrames() {
bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
caller_frame_top_, ms);
}
+
+ // TODO(jgruber,neis):
+ // The situation that the output frames do not fit into the stack space should
+ // be prevented by an optimized function's initial stack check: That check
+ // must fail if the (interpreter) frames generated upon deoptimization of the
+ // function would overflow the stack.
+ CHECK_GT(static_cast<uintptr_t>(caller_frame_top_) - total_output_frame_size,
+ stack_guard->real_jslimit());
}
void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
@@ -820,7 +814,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
TranslatedFrame::iterator function_iterator = value_iterator++;
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " translating interpreted frame ");
- std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared.DebugName().ToCString();
PrintF(trace_scope_->file(), "%s", name.get());
PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d%s\n",
bytecode_offset, height_in_bytes,
@@ -837,7 +831,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
- int parameter_count = shared->internal_formal_parameter_count() + 1;
+ int parameter_count = shared.internal_formal_parameter_count() + 1;
FrameDescription* output_frame = new (output_frame_size)
FrameDescription(output_frame_size, parameter_count);
FrameWriter frame_writer(this, output_frame, trace_scope_);
@@ -927,16 +921,16 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
// Read the context from the translations.
Object context = context_pos->GetRawValue();
- output_frame->SetContext(static_cast<intptr_t>(context->ptr()));
+ output_frame->SetContext(static_cast<intptr_t>(context.ptr()));
frame_writer.PushTranslatedValue(context_pos, "context");
// The function was mentioned explicitly in the BEGIN_FRAME.
frame_writer.PushTranslatedValue(function_iterator, "function");
// Set the bytecode array pointer.
- Object bytecode_array = shared->HasBreakInfo()
- ? shared->GetDebugInfo()->DebugBytecodeArray()
- : shared->GetBytecodeArray();
+ Object bytecode_array = shared.HasBreakInfo()
+ ? shared.GetDebugInfo().DebugBytecodeArray()
+ : shared.GetBytecodeArray();
frame_writer.PushRawObject(bytecode_array, "bytecode array\n");
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
@@ -1037,12 +1031,12 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
output_frame->SetPc(
- static_cast<intptr_t>(dispatch_builtin->InstructionStart()));
+ static_cast<intptr_t>(dispatch_builtin.InstructionStart()));
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
- static_cast<intptr_t>(dispatch_builtin->constant_pool());
+ static_cast<intptr_t>(dispatch_builtin.constant_pool());
output_frame->SetConstantPool(constant_pool_value);
if (is_topmost) {
Register constant_pool_reg =
@@ -1061,7 +1055,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Set the continuation for the topmost frame.
Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
- static_cast<intptr_t>(continuation->InstructionStart()));
+ static_cast<intptr_t>(continuation.InstructionStart()));
}
}
@@ -1158,12 +1152,12 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
Code adaptor_trampoline =
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
intptr_t pc_value = static_cast<intptr_t>(
- adaptor_trampoline->InstructionStart() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ adaptor_trampoline.InstructionStart() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset().value());
output_frame->SetPc(pc_value);
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
- static_cast<intptr_t>(adaptor_trampoline->constant_pool());
+ static_cast<intptr_t>(adaptor_trampoline.constant_pool());
output_frame->SetConstantPool(constant_pool_value);
}
}
@@ -1305,18 +1299,18 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// Compute this frame's PC.
DCHECK(bailout_id.IsValidForConstructStub());
- Address start = construct_stub->InstructionStart();
+ Address start = construct_stub.InstructionStart();
int pc_offset =
bailout_id == BailoutId::ConstructStubCreate()
- ? isolate_->heap()->construct_stub_create_deopt_pc_offset()->value()
- : isolate_->heap()->construct_stub_invoke_deopt_pc_offset()->value();
+ ? isolate_->heap()->construct_stub_create_deopt_pc_offset().value()
+ : isolate_->heap()->construct_stub_invoke_deopt_pc_offset().value();
intptr_t pc_value = static_cast<intptr_t>(start + pc_offset);
output_frame->SetPc(pc_value);
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
- static_cast<intptr_t>(construct_stub->constant_pool());
+ static_cast<intptr_t>(construct_stub.constant_pool());
output_frame->SetConstantPool(constant_pool_value);
if (is_topmost) {
Register constant_pool_reg =
@@ -1340,7 +1334,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
DCHECK_EQ(DeoptimizeKind::kLazy, deopt_kind_);
Code continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
- static_cast<intptr_t>(continuation->InstructionStart()));
+ static_cast<intptr_t>(continuation.InstructionStart()));
}
}
@@ -1565,7 +1559,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// Get the possible JSFunction for the case that this is a
// JavaScriptBuiltinContinuationFrame, which needs the JSFunction pointer
// like a normal JavaScriptFrame.
- const intptr_t maybe_function = value_iterator->GetRawValue()->ptr();
+ const intptr_t maybe_function = value_iterator->GetRawValue().ptr();
++value_iterator;
ReadOnlyRoots roots(isolate());
@@ -1617,7 +1611,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
// set (it was automatically added at the end of the FrameState by the
// instruction selector).
Object context = value_iterator->GetRawValue();
- const intptr_t value = context->ptr();
+ const intptr_t value = context.ptr();
TranslatedFrame::iterator context_register_value = value_iterator++;
register_values[kContextRegister.code()] = context_register_value;
output_frame->SetContext(value);
@@ -1687,7 +1681,7 @@ void Deoptimizer::DoComputeBuiltinContinuation(
}
}
frame_writer.PushTranslatedValue(
- register_values[code], trace_scope_ != nullptr ? str.start() : "");
+ register_values[code], trace_scope_ != nullptr ? str.begin() : "");
}
// Some architectures must pad the stack frame with extra stack slots
@@ -1731,12 +1725,12 @@ void Deoptimizer::DoComputeBuiltinContinuation(
Code continue_to_builtin = isolate()->builtins()->builtin(
TrampolineForBuiltinContinuation(mode, must_handle_result));
output_frame->SetPc(
- static_cast<intptr_t>(continue_to_builtin->InstructionStart()));
+ static_cast<intptr_t>(continue_to_builtin.InstructionStart()));
Code continuation =
isolate()->builtins()->builtin(Builtins::kNotifyDeoptimized);
output_frame->SetContinuation(
- static_cast<intptr_t>(continuation->InstructionStart()));
+ static_cast<intptr_t>(continuation.InstructionStart()));
}
void Deoptimizer::MaterializeHeapObjects() {
@@ -1767,8 +1761,8 @@ void Deoptimizer::MaterializeHeapObjects() {
bool feedback_updated = translated_state_.DoUpdateFeedback();
if (trace_scope_ != nullptr && feedback_updated) {
PrintF(trace_scope_->file(), "Feedback updated");
- compiled_code_->PrintDeoptLocation(trace_scope_->file(),
- " from deoptimization at ", from_);
+ compiled_code_.PrintDeoptLocation(trace_scope_->file(),
+ " from deoptimization at ", from_);
}
isolate_->materialized_object_store()->Remove(
@@ -1787,8 +1781,8 @@ unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const {
unsigned fixed_size = CommonFrameConstants::kFixedFrameSizeAboveFp;
// TODO(jkummerow): If {function_->IsSmi()} can indeed be true, then
// {function_} should not have type {JSFunction}.
- if (!function_->IsSmi()) {
- fixed_size += ComputeIncomingArgumentSize(function_->shared());
+ if (!function_.IsSmi()) {
+ fixed_size += ComputeIncomingArgumentSize(function_.shared());
}
return fixed_size;
}
@@ -1798,8 +1792,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// function into account so we have to avoid double counting them.
unsigned fixed_size_above_fp = ComputeInputFrameAboveFpFixedSize();
unsigned result = fixed_size_above_fp + fp_to_sp_delta_;
- if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
- unsigned stack_slots = compiled_code_->stack_slots();
+ if (compiled_code_.kind() == Code::OPTIMIZED_FUNCTION) {
+ unsigned stack_slots = compiled_code_.stack_slots();
unsigned outgoing_size = 0;
// ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
CHECK_EQ(fixed_size_above_fp + (stack_slots * kSystemPointerSize) -
@@ -1819,7 +1813,7 @@ unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo shared) {
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
- int parameter_slots = shared->internal_formal_parameter_count() + 1;
+ int parameter_slots = shared.internal_formal_parameter_count() + 1;
if (kPadArguments) parameter_slots = RoundUp(parameter_slots, 2);
return parameter_slots * kSystemPointerSize;
}
@@ -1841,9 +1835,8 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
// Allocate the code as immovable since the entry addresses will be used
// directly and there is no support for relocating them.
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::STUB, Handle<Object>(), Builtins::kNoBuiltinId,
- MaybeHandle<ByteArray>(), MaybeHandle<DeoptimizationData>(), kImmovable);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, Code::STUB).set_immovable().Build();
CHECK(isolate->heap()->IsImmovable(*code));
CHECK(data->deopt_entry_code(kind).is_null());
@@ -1904,7 +1897,7 @@ void TranslationBuffer::Add(int32_t value) {
TranslationIterator::TranslationIterator(ByteArray buffer, int index)
: buffer_(buffer), index_(index) {
- DCHECK(index >= 0 && index < buffer->length());
+ DCHECK(index >= 0 && index < buffer.length());
}
int32_t TranslationIterator::Next() {
@@ -1913,7 +1906,7 @@ int32_t TranslationIterator::Next() {
uint32_t bits = 0;
for (int i = 0; true; i += 7) {
DCHECK(HasNext());
- uint8_t next = buffer_->get(index_++);
+ uint8_t next = buffer_.get(index_++);
bits |= (next >> 1) << i;
if ((next & 1) == 0) break;
}
@@ -1923,7 +1916,7 @@ int32_t TranslationIterator::Next() {
return is_negative ? -result : result;
}
-bool TranslationIterator::HasNext() const { return index_ < buffer_->length(); }
+bool TranslationIterator::HasNext() const { return index_ < buffer_.length(); }
Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
Handle<ByteArray> result =
@@ -1966,7 +1959,6 @@ void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
buffer_->Add(height);
}
-
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
buffer_->Add(literal_id);
@@ -2000,19 +1992,16 @@ void Translation::BeginCapturedObject(int length) {
buffer_->Add(length);
}
-
void Translation::DuplicateObject(int object_index) {
buffer_->Add(DUPLICATED_OBJECT);
buffer_->Add(object_index);
}
-
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER);
buffer_->Add(reg.code());
}
-
void Translation::StoreInt32Register(Register reg) {
buffer_->Add(INT32_REGISTER);
buffer_->Add(reg.code());
@@ -2028,7 +2017,6 @@ void Translation::StoreUint32Register(Register reg) {
buffer_->Add(reg.code());
}
-
void Translation::StoreBoolRegister(Register reg) {
buffer_->Add(BOOL_REGISTER);
buffer_->Add(reg.code());
@@ -2044,13 +2032,11 @@ void Translation::StoreDoubleRegister(DoubleRegister reg) {
buffer_->Add(reg.code());
}
-
void Translation::StoreStackSlot(int index) {
buffer_->Add(STACK_SLOT);
buffer_->Add(index);
}
-
void Translation::StoreInt32StackSlot(int index) {
buffer_->Add(INT32_STACK_SLOT);
buffer_->Add(index);
@@ -2066,7 +2052,6 @@ void Translation::StoreUint32StackSlot(int index) {
buffer_->Add(index);
}
-
void Translation::StoreBoolStackSlot(int index) {
buffer_->Add(BOOL_STACK_SLOT);
buffer_->Add(index);
@@ -2082,7 +2067,6 @@ void Translation::StoreDoubleStackSlot(int index) {
buffer_->Add(index);
}
-
void Translation::StoreLiteral(int literal_id) {
buffer_->Add(LITERAL);
buffer_->Add(literal_id);
@@ -2138,21 +2122,19 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
return -1;
}
-
#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
const char* Translation::StringFor(Opcode opcode) {
-#define TRANSLATION_OPCODE_CASE(item) case item: return #item;
- switch (opcode) {
- TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE)
- }
+#define TRANSLATION_OPCODE_CASE(item) \
+ case item: \
+ return #item;
+ switch (opcode) { TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE) }
#undef TRANSLATION_OPCODE_CASE
UNREACHABLE();
}
#endif
-
Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
int index = StackIdToIndex(fp);
if (index == -1) {
@@ -2163,7 +2145,6 @@ Handle<FixedArray> MaterializedObjectStore::Get(Address fp) {
return Handle<FixedArray>::cast(Handle<Object>(array->get(index), isolate()));
}
-
void MaterializedObjectStore::Set(Address fp,
Handle<FixedArray> materialized_objects) {
int index = StackIdToIndex(fp);
@@ -2176,7 +2157,6 @@ void MaterializedObjectStore::Set(Address fp,
array->set(index, *materialized_objects);
}
-
bool MaterializedObjectStore::Remove(Address fp) {
auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp);
if (it == frame_fps_.end()) return false;
@@ -2185,16 +2165,15 @@ bool MaterializedObjectStore::Remove(Address fp) {
frame_fps_.erase(it);
FixedArray array = isolate()->heap()->materialized_objects();
- CHECK_LT(index, array->length());
+ CHECK_LT(index, array.length());
int fps_size = static_cast<int>(frame_fps_.size());
for (int i = index; i < fps_size; i++) {
- array->set(i, array->get(i + 1));
+ array.set(i, array.get(i + 1));
}
- array->set(fps_size, ReadOnlyRoots(isolate()).undefined_value());
+ array.set(fps_size, ReadOnlyRoots(isolate()).undefined_value());
return true;
}
-
int MaterializedObjectStore::StackIdToIndex(Address fp) {
auto it = std::find(frame_fps_.begin(), frame_fps_.end(), fp);
return it == frame_fps_.end()
@@ -2202,13 +2181,11 @@ int MaterializedObjectStore::StackIdToIndex(Address fp) {
: static_cast<int>(std::distance(frame_fps_.begin(), it));
}
-
Handle<FixedArray> MaterializedObjectStore::GetStackEntries() {
return Handle<FixedArray>(isolate()->heap()->materialized_objects(),
isolate());
}
-
Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
Handle<FixedArray> array = GetStackEntries();
if (array->length() >= length) {
@@ -2266,7 +2243,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
*frame_it->shared_info(), frame_it->node_id());
DCHECK_EQ(parameter_count,
- function_->shared()->internal_formal_parameter_count());
+ function_->shared().internal_formal_parameter_count());
parameters_.resize(static_cast<size_t>(parameter_count));
for (int i = 0; i < parameter_count; i++) {
@@ -2301,7 +2278,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
}
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
- CHECK(code->InstructionStart() <= pc && pc <= code->InstructionEnd());
+ CHECK(code.InstructionStart() <= pc && pc <= code.InstructionEnd());
SourcePosition last_position = SourcePosition::Unknown();
DeoptimizeReason last_reason = DeoptimizeReason::kUnknown;
int last_deopt_id = kNoDeoptimizationId;
@@ -2327,13 +2304,12 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code code, Address pc) {
return DeoptInfo(last_position, last_reason, last_deopt_id);
}
-
// static
int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
SharedFunctionInfo shared, BailoutId node_id) {
- DCHECK(shared->HasBytecodeArray());
- return AbstractCode::cast(shared->GetBytecodeArray())
- ->SourcePosition(node_id.ToInt());
+ DCHECK(shared.HasBytecodeArray());
+ return AbstractCode::cast(shared.GetBytecodeArray())
+ .SourcePosition(node_id.ToInt());
}
// static
@@ -2345,7 +2321,6 @@ TranslatedValue TranslatedValue::NewDeferredObject(TranslatedState* container,
return slot;
}
-
// static
TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container,
int id) {
@@ -2354,7 +2329,6 @@ TranslatedValue TranslatedValue::NewDuplicateObject(TranslatedState* container,
return slot;
}
-
// static
TranslatedValue TranslatedValue::NewFloat(TranslatedState* container,
Float32 value) {
@@ -2371,7 +2345,6 @@ TranslatedValue TranslatedValue::NewDouble(TranslatedState* container,
return slot;
}
-
// static
TranslatedValue TranslatedValue::NewInt32(TranslatedState* container,
int32_t value) {
@@ -2396,7 +2369,6 @@ TranslatedValue TranslatedValue::NewUInt32(TranslatedState* container,
return slot;
}
-
// static
TranslatedValue TranslatedValue::NewBool(TranslatedState* container,
uint32_t value) {
@@ -2405,7 +2377,6 @@ TranslatedValue TranslatedValue::NewBool(TranslatedState* container,
return slot;
}
-
// static
TranslatedValue TranslatedValue::NewTagged(TranslatedState* container,
Object literal) {
@@ -2419,7 +2390,6 @@ TranslatedValue TranslatedValue::NewInvalid(TranslatedState* container) {
return TranslatedValue(container, kInvalid);
}
-
Isolate* TranslatedValue::isolate() const { return container_->isolate(); }
Object TranslatedValue::raw_literal() const {
@@ -2452,13 +2422,11 @@ Float64 TranslatedValue::double_value() const {
return double_value_;
}
-
int TranslatedValue::object_length() const {
DCHECK_EQ(kind(), kCapturedObject);
return materialization_info_.length_;
}
-
int TranslatedValue::object_index() const {
DCHECK(kind() == kCapturedObject || kind() == kDuplicatedObject);
return materialization_info_.id_;
@@ -2620,7 +2588,6 @@ void TranslatedValue::MaterializeSimple() {
}
}
-
bool TranslatedValue::IsMaterializedObject() const {
switch (kind()) {
case kCapturedObject:
@@ -2727,7 +2694,7 @@ int TranslatedFrame::GetValueCount() {
switch (kind()) {
case kInterpretedFunction: {
int parameter_count =
- raw_shared_info_->internal_formal_parameter_count() + 1;
+ raw_shared_info_.internal_formal_parameter_count() + 1;
// + 2 for function and context.
return height_ + parameter_count + 2;
}
@@ -2741,16 +2708,14 @@ int TranslatedFrame::GetValueCount() {
case kInvalid:
UNREACHABLE();
- break;
}
UNREACHABLE();
}
-
void TranslatedFrame::Handlify() {
if (!raw_shared_info_.is_null()) {
shared_info_ = Handle<SharedFunctionInfo>(raw_shared_info_,
- raw_shared_info_->GetIsolate());
+ raw_shared_info_.GetIsolate());
raw_shared_info_ = SharedFunctionInfo();
}
for (auto& value : values_) {
@@ -2767,14 +2732,14 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::INTERPRETED_FRAME: {
BailoutId bytecode_offset = BailoutId(iterator->Next());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
int height = iterator->Next();
int return_value_offset = iterator->Next();
int return_value_count = iterator->Next();
if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info.DebugName().ToCString();
PrintF(trace_file, " reading input frame %s", name.get());
- int arg_count = shared_info->internal_formal_parameter_count() + 1;
+ int arg_count = shared_info.internal_formal_parameter_count() + 1;
PrintF(trace_file,
" => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); "
"inputs:\n",
@@ -2788,10 +2753,10 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::ARGUMENTS_ADAPTOR_FRAME: {
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info.DebugName().ToCString();
PrintF(trace_file, " reading arguments adaptor frame %s", name.get());
PrintF(trace_file, " => height=%d; inputs:\n", height);
}
@@ -2801,10 +2766,10 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::CONSTRUCT_STUB_FRAME: {
BailoutId bailout_id = BailoutId(iterator->Next());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info.DebugName().ToCString();
PrintF(trace_file, " reading construct stub frame %s", name.get());
PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
bailout_id.ToInt(), height);
@@ -2816,10 +2781,10 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::BUILTIN_CONTINUATION_FRAME: {
BailoutId bailout_id = BailoutId(iterator->Next());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info.DebugName().ToCString();
PrintF(trace_file, " reading builtin continuation frame %s",
name.get());
PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
@@ -2835,10 +2800,10 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME: {
BailoutId bailout_id = BailoutId(iterator->Next());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info.DebugName().ToCString();
PrintF(trace_file, " reading JavaScript builtin continuation frame %s",
name.get());
PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
@@ -2853,10 +2818,10 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
BailoutId bailout_id = BailoutId(iterator->Next());
SharedFunctionInfo shared_info =
- SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
+ SharedFunctionInfo::cast(literal_array.get(iterator->Next()));
int height = iterator->Next();
if (trace_file != nullptr) {
- std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared_info.DebugName().ToCString();
PrintF(trace_file,
" reading JavaScript builtin continuation frame with catch %s",
name.get());
@@ -2925,7 +2890,7 @@ Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer,
*length = Smi::cast(*FullObjectSlot(
parent_frame_pointer +
ArgumentsAdaptorFrameConstants::kLengthOffset))
- ->value();
+ .value();
arguments_frame = parent_frame_pointer;
} else {
if (length) *length = formal_parameter_count_;
@@ -3081,7 +3046,7 @@ int TranslatedState::CreateNextTranslatedValue(
if (trace_file != nullptr) {
PrintF(trace_file, V8PRIxPTR_FMT " ; %s ", uncompressed_value,
converter.NameOfCPURegister(input_reg));
- Object(uncompressed_value)->ShortPrint(trace_file);
+ Object(uncompressed_value).ShortPrint(trace_file);
}
TranslatedValue translated_value =
TranslatedValue::NewTagged(this, Object(uncompressed_value));
@@ -3210,7 +3175,7 @@ int TranslatedState::CreateNextTranslatedValue(
PrintF(trace_file, V8PRIxPTR_FMT " ; [fp %c %3d] ",
uncompressed_value, slot_offset < 0 ? '-' : '+',
std::abs(slot_offset));
- Object(uncompressed_value)->ShortPrint(trace_file);
+ Object(uncompressed_value).ShortPrint(trace_file);
}
TranslatedValue translated_value =
TranslatedValue::NewTagged(this, Object(uncompressed_value));
@@ -3302,11 +3267,11 @@ int TranslatedState::CreateNextTranslatedValue(
case Translation::LITERAL: {
int literal_index = iterator->Next();
- Object value = literal_array->get(literal_index);
+ Object value = literal_array.get(literal_index);
if (trace_file != nullptr) {
- PrintF(trace_file, V8PRIxPTR_FMT " ; (literal %2d) ", value->ptr(),
+ PrintF(trace_file, V8PRIxPTR_FMT " ; (literal %2d) ", value.ptr(),
literal_index);
- value->ShortPrint(trace_file);
+ value.ShortPrint(trace_file);
}
TranslatedValue translated_value =
@@ -3325,11 +3290,11 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame) {
static_cast<const OptimizedFrame*>(frame)->GetDeoptimizationData(
&deopt_index);
DCHECK(!data.is_null() && deopt_index != Safepoint::kNoDeoptimizationIndex);
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
- Init(frame->isolate(), frame->fp(), &it, data->LiteralArray(),
+ TranslationIterator it(data.TranslationByteArray(),
+ data.TranslationIndex(deopt_index).value());
+ Init(frame->isolate(), frame->fp(), &it, data.LiteralArray(),
nullptr /* registers */, nullptr /* trace file */,
- frame->function()->shared()->internal_formal_parameter_count());
+ frame->function().shared().internal_formal_parameter_count());
}
void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
@@ -3372,8 +3337,8 @@ void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
if (trace_file != nullptr) {
if (nested_counts.empty()) {
// For top level values, print the value number.
- PrintF(trace_file, " %3i: ",
- frame.GetValueCount() - values_to_process);
+ PrintF(trace_file,
+ " %3i: ", frame.GetValueCount() - values_to_process);
} else {
// Take care of indenting for nested values.
PrintF(trace_file, " ");
@@ -3405,9 +3370,8 @@ void TranslatedState::Init(Isolate* isolate, Address input_frame_pointer,
}
}
- CHECK(!iterator->HasNext() ||
- static_cast<Translation::Opcode>(iterator->Next()) ==
- Translation::BEGIN);
+ CHECK(!iterator->HasNext() || static_cast<Translation::Opcode>(
+ iterator->Next()) == Translation::BEGIN);
}
void TranslatedState::Prepare(Address stack_frame_pointer) {
@@ -3548,7 +3512,7 @@ void TranslatedState::MaterializeFixedDoubleArray(TranslatedFrame* frame,
int* value_index,
TranslatedValue* slot,
Handle<Map> map) {
- int length = Smi::cast(frame->values_[*value_index].GetRawValue())->value();
+ int length = Smi::cast(frame->values_[*value_index].GetRawValue()).value();
(*value_index)++;
Handle<FixedDoubleArray> array = Handle<FixedDoubleArray>::cast(
isolate()->factory()->NewFixedDoubleArray(length));
@@ -3659,13 +3623,13 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
case STRING_TABLE_TYPE: {
// Check we have the right size.
int array_length =
- Smi::cast(frame->values_[value_index].GetRawValue())->value();
+ Smi::cast(frame->values_[value_index].GetRawValue()).value();
int instance_size = FixedArray::SizeFor(array_length);
CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
// Canonicalize empty fixed array.
- if (*map == ReadOnlyRoots(isolate()).empty_fixed_array()->map() &&
+ if (*map == ReadOnlyRoots(isolate()).empty_fixed_array().map() &&
array_length == 0) {
slot->set_storage(isolate()->factory()->empty_fixed_array());
} else {
@@ -3680,7 +3644,7 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt(
case PROPERTY_ARRAY_TYPE: {
// Check we have the right size.
int length_or_hash =
- Smi::cast(frame->values_[value_index].GetRawValue())->value();
+ Smi::cast(frame->values_[value_index].GetRawValue()).value();
int array_length = PropertyArray::LengthField::decode(length_or_hash);
int instance_size = PropertyArray::SizeFor(array_length);
CHECK_EQ(instance_size, slot->GetChildrenCount() * kTaggedSize);
@@ -3842,16 +3806,16 @@ void TranslatedState::InitializeJSObjectAt(
// what the markers in the storage say (note that all heap numbers
// should be fully initialized by now).
int offset = i * kTaggedSize;
- uint8_t marker = READ_UINT8_FIELD(*object_storage, offset);
+ uint8_t marker = object_storage->ReadField<uint8_t>(offset);
if (marker == kStoreUnboxedDouble) {
double double_field_value;
if (field_value->IsSmi()) {
- double_field_value = Smi::cast(*field_value)->value();
+ double_field_value = Smi::cast(*field_value).value();
} else {
CHECK(field_value->IsHeapNumber());
- double_field_value = HeapNumber::cast(*field_value)->value();
+ double_field_value = HeapNumber::cast(*field_value).value();
}
- WRITE_DOUBLE_FIELD(*object_storage, offset, double_field_value);
+ object_storage->WriteField<double>(offset, double_field_value);
} else if (marker == kStoreMutableHeapNumber) {
CHECK(field_value->IsMutableHeapNumber());
WRITE_FIELD(*object_storage, offset, *field_value);
@@ -3886,7 +3850,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
for (int i = 1; i < slot->GetChildrenCount(); i++) {
Handle<Object> field_value = GetValueAndAdvance(frame, value_index);
int offset = i * kTaggedSize;
- uint8_t marker = READ_UINT8_FIELD(*object_storage, offset);
+ uint8_t marker = object_storage->ReadField<uint8_t>(offset);
if (i > 1 && marker == kStoreMutableHeapNumber) {
CHECK(field_value->IsMutableHeapNumber());
} else {
@@ -3942,8 +3906,32 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
*args_count = frames_[i - 1].height();
return &(frames_[i - 1]);
}
- *args_count =
- frames_[i].shared_info()->internal_formal_parameter_count() + 1;
+
+ // JavaScriptBuiltinContinuation frames that are not preceeded by
+ // a arguments adapter frame are currently only used by C++ API calls
+ // from TurboFan. Calls to C++ API functions from TurboFan need
+ // a special marker frame state, otherwise the API call wouldn't
+ // be shown in a stack trace.
+ if (frames_[i].kind() ==
+ TranslatedFrame::kJavaScriptBuiltinContinuation &&
+ frames_[i].shared_info()->internal_formal_parameter_count() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ DCHECK(frames_[i].shared_info()->IsApiFunction());
+
+ // The argument count for this special case is always the second
+ // to last value in the TranslatedFrame. It should also always be
+ // {1}, as the GenericLazyDeoptContinuation builtin only has one
+ // argument (the receiver).
+ const int height = frames_[i].height();
+ Object argc_object = frames_[i].ValueAt(height - 1)->GetRawValue();
+ CHECK(argc_object.IsSmi());
+ *args_count = Smi::ToInt(argc_object);
+
+ DCHECK_EQ(*args_count, 1);
+ } else {
+ *args_count =
+ frames_[i].shared_info()->internal_formal_parameter_count() + 1;
+ }
return &(frames_[i]);
}
}
@@ -4068,7 +4056,7 @@ void TranslatedState::ReadUpdateFeedback(TranslationIterator* iterator,
FixedArray literal_array,
FILE* trace_file) {
CHECK_EQ(Translation::UPDATE_FEEDBACK, iterator->Next());
- feedback_vector_ = FeedbackVector::cast(literal_array->get(iterator->Next()));
+ feedback_vector_ = FeedbackVector::cast(literal_array.get(iterator->Next()));
feedback_slot_ = FeedbackSlot(iterator->Next());
if (trace_file != nullptr) {
PrintF(trace_file, " reading FeedbackVector (slot %d)\n",
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h
index 98e8b7f855..67e3e54405 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer/deoptimizer.h
@@ -2,26 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DEOPTIMIZER_H_
-#define V8_DEOPTIMIZER_H_
+#ifndef V8_DEOPTIMIZER_DEOPTIMIZER_H_
+#define V8_DEOPTIMIZER_DEOPTIMIZER_H_
#include <stack>
#include <vector>
-#include "src/allocation.h"
#include "src/base/macros.h"
-#include "src/boxed-float.h"
-#include "src/code-tracer.h"
-#include "src/deoptimize-reason.h"
-#include "src/feedback-vector.h"
-#include "src/frame-constants.h"
-#include "src/frames.h"
-#include "src/globals.h"
-#include "src/isolate.h"
-#include "src/label.h"
+#include "src/codegen/label.h"
+#include "src/codegen/register-arch.h"
+#include "src/codegen/source-position.h"
+#include "src/common/globals.h"
+#include "src/deoptimizer/deoptimize-reason.h"
+#include "src/diagnostics/code-tracer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/shared-function-info.h"
-#include "src/register-arch.h"
-#include "src/source-position.h"
+#include "src/utils/allocation.h"
+#include "src/utils/boxed-float.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -154,7 +154,6 @@ class TranslatedValue {
int object_index() const;
};
-
class TranslatedFrame {
public:
enum Kind {
@@ -220,8 +219,8 @@ class TranslatedFrame {
int input_index_;
};
- typedef TranslatedValue& reference;
- typedef TranslatedValue const& const_reference;
+ using reference = TranslatedValue&;
+ using const_reference = TranslatedValue const&;
iterator begin() { return iterator(values_.begin()); }
iterator end() { return iterator(values_.end()); }
@@ -279,12 +278,11 @@ class TranslatedFrame {
int return_value_offset_;
int return_value_count_;
- typedef std::deque<TranslatedValue> ValuesContainer;
+ using ValuesContainer = std::deque<TranslatedValue>;
ValuesContainer values_;
};
-
// Auxiliary class for translating deoptimization values.
// Typical usage sequence:
//
@@ -310,11 +308,11 @@ class TranslatedState {
// Store newly materialized values into the isolate.
void StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame);
- typedef std::vector<TranslatedFrame>::iterator iterator;
+ using iterator = std::vector<TranslatedFrame>::iterator;
iterator begin() { return frames_.begin(); }
iterator end() { return frames_.end(); }
- typedef std::vector<TranslatedFrame>::const_iterator const_iterator;
+ using const_iterator = std::vector<TranslatedFrame>::const_iterator;
const_iterator begin() const { return frames_.begin(); }
const_iterator end() const { return frames_.end(); }
@@ -506,7 +504,6 @@ class Deoptimizer : public Malloced {
void QueueValueForMaterialization(Address output_address, Object obj,
const TranslatedFrame::iterator& iterator);
-
Deoptimizer(Isolate* isolate, JSFunction function, DeoptimizeKind kind,
unsigned bailout_id, Address from, int fp_to_sp_delta);
Code FindOptimizedCode();
@@ -616,7 +613,6 @@ class Deoptimizer : public Malloced {
friend class DeoptimizedFrameInfo;
};
-
class RegisterValues {
public:
intptr_t GetRegister(unsigned n) const {
@@ -667,7 +663,6 @@ class RegisterValues {
Float64 double_registers_[DoubleRegister::kNumRegisters];
};
-
class FrameDescription {
public:
explicit FrameDescription(uint32_t frame_size, int parameter_count = 0);
@@ -678,13 +673,9 @@ class FrameDescription {
return malloc(size + frame_size - kSystemPointerSize);
}
- void operator delete(void* pointer, uint32_t frame_size) {
- free(pointer);
- }
+ void operator delete(void* pointer, uint32_t frame_size) { free(pointer); }
- void operator delete(void* description) {
- free(description);
- }
+ void operator delete(void* description) { free(description); }
uint32_t GetFrameSize() const {
USE(frame_content_);
@@ -809,12 +800,11 @@ class FrameDescription {
intptr_t* GetFrameSlotPointer(unsigned offset) {
DCHECK(offset < frame_size_);
- return reinterpret_cast<intptr_t*>(
- reinterpret_cast<Address>(this) + frame_content_offset() + offset);
+ return reinterpret_cast<intptr_t*>(reinterpret_cast<Address>(this) +
+ frame_content_offset() + offset);
}
};
-
class DeoptimizerData {
public:
explicit DeoptimizerData(Heap* heap);
@@ -907,8 +897,7 @@ class Translation {
public:
#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item,
enum Opcode {
- TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM)
- LAST = LITERAL
+ TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM) LAST = LITERAL
};
#undef DECLARE_TRANSLATION_OPCODE_ENUM
@@ -973,11 +962,9 @@ class Translation {
Zone* zone_;
};
-
class MaterializedObjectStore {
public:
- explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {
- }
+ explicit MaterializedObjectStore(Isolate* isolate) : isolate_(isolate) {}
Handle<FixedArray> Get(Address fp);
void Set(Address fp, Handle<FixedArray> materialized_objects);
@@ -994,7 +981,6 @@ class MaterializedObjectStore {
std::vector<Address> frame_fps_;
};
-
// Class used to represent an unoptimized frame when the debugger
// needs to inspect a frame that is part of an optimized frame. The
// internally used FrameDescription objects are not GC safe so for use
@@ -1030,9 +1016,7 @@ class DeoptimizedFrameInfo : public Malloced {
return expression_stack_[index];
}
- int GetSourcePosition() {
- return source_position_;
- }
+ int GetSourcePosition() { return source_position_; }
private:
// Set an incoming argument.
@@ -1059,4 +1043,4 @@ class DeoptimizedFrameInfo : public Malloced {
} // namespace internal
} // namespace v8
-#endif // V8_DEOPTIMIZER_H_
+#endif // V8_DEOPTIMIZER_DEOPTIMIZER_H_
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
index 34427c95ed..6b01449ba7 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/deoptimizer/ia32/deoptimizer-ia32.cc
@@ -4,11 +4,11 @@
#if V8_TARGET_ARCH_IA32
-#include "src/deoptimizer.h"
-#include "src/frame-constants.h"
-#include "src/macro-assembler.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -24,7 +24,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
- __ sub(esp, Immediate(kDoubleRegsSize));
+ __ AllocateStackSpace(kDoubleRegsSize);
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
@@ -35,7 +35,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
STATIC_ASSERT(kFloatSize == kSystemPointerSize);
const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
- __ sub(esp, Immediate(kFloatRegsSize));
+ __ AllocateStackSpace(kFloatRegsSize);
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);
XMMRegister xmm_reg = XMMRegister::from_code(code);
@@ -151,8 +151,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one
// past the last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
@@ -210,21 +209,17 @@ void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
-
#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
index 33e517a21d..a56501660b 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/deoptimizer/mips/deoptimizer-mips.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
@@ -172,8 +172,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: t0 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
@@ -211,7 +210,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
__ push(t2);
-
// Technically restoring 'at' should work unless zero_reg is also restored
// but it's safer to check for this.
DCHECK(!(at.bit() & restored_regs));
@@ -237,27 +235,22 @@ const int Deoptimizer::table_entry_size_ = 2 * kInstrSize;
const int Deoptimizer::table_entry_size_ = 3 * kInstrSize;
#endif
-
bool Deoptimizer::PadTopOfStackRegister() { return false; }
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
-
#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
index 69bb895e58..6869199f1b 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/deoptimizer/mips64/deoptimizer-mips64.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
@@ -173,8 +173,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: a4 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
@@ -243,20 +242,16 @@ void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
-
#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
index 9fe8cbefbd..268660c2ef 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/deoptimizer/ppc/deoptimizer-ppc.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
@@ -232,18 +232,15 @@ void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
DCHECK(FLAG_enable_embedded_constant_pool);
SetFrameSlot(offset, value);
}
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
index 6e090227b8..db2330a8e8 100644
--- a/deps/v8/src/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/deoptimizer/s390/deoptimizer-s390.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
index f49a131a26..7654dc965f 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/deoptimizer/x64/deoptimizer-x64.cc
@@ -4,11 +4,11 @@
#if V8_TARGET_ARCH_X64
-#include "src/deoptimizer.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -24,7 +24,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
- __ subq(rsp, Immediate(kDoubleRegsSize));
+ __ AllocateStackSpace(kDoubleRegsSize);
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
@@ -35,7 +35,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
const int kFloatRegsSize = kFloatSize * XMMRegister::kNumRegisters;
- __ subq(rsp, Immediate(kFloatRegsSize));
+ __ AllocateStackSpace(kFloatRegsSize);
for (int i = 0; i < config->num_allocatable_float_registers(); ++i) {
int code = config->GetAllocatableFloatCode(i);
@@ -106,7 +106,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
// Fill in the input registers.
- for (int i = kNumberOfRegisters -1; i >= 0; i--) {
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
__ PopQuad(Operand(rbx, offset));
@@ -165,8 +165,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
__ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop,
- outer_loop_header, inner_loop_header;
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
@@ -208,7 +207,7 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
// Restore the registers from the stack.
- for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
Register r = Register::from_code(i);
// Do not restore rsp, simply pop the value into the next register
// and overwrite this afterwards.
@@ -233,7 +232,6 @@ void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
if (kFPOnStackSize == 2 * kSystemPointerSize) {
// Zero out the high-32 bit of FP for x32 port.
@@ -242,16 +240,13 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
-
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
-
#undef __
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/diagnostics/OWNERS b/deps/v8/src/diagnostics/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/diagnostics/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/diagnostics/arm/disasm-arm.cc
index 35ff085e32..51b6594e70 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/diagnostics/arm/disasm-arm.cc
@@ -22,20 +22,20 @@
// of code into a FILE*, meaning that the above functionality could also be
// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
-
-#include <assert.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <string.h>
+#include <cassert>
+#include <cinttypes>
+#include <cstdarg>
+#include <cstdio>
+#include <cstring>
#if V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm.h"
-#include "src/arm/constants-arm.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
-#include "src/disasm.h"
-#include "src/vector.h"
+#include "src/codegen/arm/assembler-arm.h"
+#include "src/codegen/arm/constants-arm.h"
+#include "src/diagnostics/disasm.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -47,11 +47,8 @@ namespace internal {
// more informative description.
class Decoder {
public:
- Decoder(const disasm::NameConverter& converter,
- Vector<char> out_buffer)
- : converter_(converter),
- out_buffer_(out_buffer),
- out_buffer_pos_(0) {
+ Decoder(const disasm::NameConverter& converter, Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
out_buffer_[out_buffer_pos_] = '\0';
}
@@ -123,17 +120,12 @@ class Decoder {
DISALLOW_COPY_AND_ASSIGN(Decoder);
};
-
// Support for assertions in the Decoder formatting functions.
#define STRING_STARTS_WITH(string, compare_string) \
(strncmp(string, compare_string, strlen(compare_string)) == 0)
-
// Append the ch to the output buffer.
-void Decoder::PrintChar(const char ch) {
- out_buffer_[out_buffer_pos_++] = ch;
-}
-
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
// Append the str to the output buffer.
void Decoder::Print(const char* str) {
@@ -145,45 +137,33 @@ void Decoder::Print(const char* str) {
out_buffer_[out_buffer_pos_] = 0;
}
-
// These condition names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
static const char* const cond_names[kNumberOfConditions] = {
- "eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
- "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
};
-
// Print the condition guarding the instruction.
void Decoder::PrintCondition(Instruction* instr) {
Print(cond_names[instr->ConditionValue()]);
}
-
// Print the register name according to the active name converter.
void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
-
// Print the VFP S register name according to the active name converter.
-void Decoder::PrintSRegister(int reg) {
- Print(VFPRegisters::Name(reg, false));
-}
-
+void Decoder::PrintSRegister(int reg) { Print(VFPRegisters::Name(reg, false)); }
// Print the VFP D register name according to the active name converter.
-void Decoder::PrintDRegister(int reg) {
- Print(VFPRegisters::Name(reg, true));
-}
-
+void Decoder::PrintDRegister(int reg) { Print(VFPRegisters::Name(reg, true)); }
// These shift names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
-static const char* const shift_names[kNumberOfShifts] = {
- "lsl", "lsr", "asr", "ror"
-};
-
+static const char* const shift_names[kNumberOfShifts] = {"lsl", "lsr", "asr",
+ "ror"};
// Print the register shift operands for the instruction. Generally used for
// data processing instructions.
@@ -207,20 +187,17 @@ void Decoder::PrintShiftRm(Instruction* instr) {
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[shift_index],
- shift_amount);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", %s #%d",
+ shift_names[shift_index], shift_amount);
} else {
// by register
int rs = instr->RsValue();
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s ", shift_names[shift_index]);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", %s ",
+ shift_names[shift_index]);
PrintRegister(rs);
}
}
-
// Print the immediate operand for the instruction. Generally used for data
// processing instructions.
void Decoder::PrintShiftImm(Instruction* instr) {
@@ -230,19 +207,16 @@ void Decoder::PrintShiftImm(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%d", imm);
}
-
// Print the optional shift and immediate used by saturating instructions.
void Decoder::PrintShiftSat(Instruction* instr) {
int shift = instr->Bits(11, 7);
if (shift > 0) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- ", %s #%d",
- shift_names[instr->Bit(6) * 2],
- instr->Bits(11, 7));
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, ", %s #%d",
+ shift_names[instr->Bit(6) * 2], instr->Bits(11, 7));
}
}
-
// Print PU formatting to reduce complexity of FormatOption.
void Decoder::PrintPU(Instruction* instr) {
switch (instr->PUField()) {
@@ -264,12 +238,10 @@ void Decoder::PrintPU(Instruction* instr) {
}
default: {
UNREACHABLE();
- break;
}
}
}
-
// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
// the FormatOption method.
void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
@@ -282,20 +254,15 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
return;
default:
if (svc >= kStopCode) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d - 0x%x",
- svc & kStopCodeMask,
- svc & kStopCodeMask);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d - 0x%x",
+ svc & kStopCodeMask, svc & kStopCodeMask);
} else {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- svc);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", svc);
}
return;
}
}
-
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
@@ -343,7 +310,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
UNREACHABLE();
}
-
// Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
@@ -359,10 +325,8 @@ int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
} else if (format[1] == 'm') {
reg = instr->VFPMRegValue(precision);
} else if (format[1] == 'd') {
- if ((instr->TypeValue() == 7) &&
- (instr->Bit(24) == 0x0) &&
- (instr->Bits(11, 9) == 0x5) &&
- (instr->Bit(4) == 0x1)) {
+ if ((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) && (instr->Bit(4) == 0x1)) {
// vmov.32 has Vd in a different place.
reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
} else {
@@ -388,36 +352,33 @@ int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
return retval;
}
-
int Decoder::FormatVFPinstruction(Instruction* instr, const char* format) {
- Print(format);
- return 0;
+ Print(format);
+ return 0;
}
-
void Decoder::FormatNeonList(int Vd, int type) {
if (type == nlt_1) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "{d%d}", Vd);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "{d%d}", Vd);
} else if (type == nlt_2) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "{d%d, d%d}", Vd, Vd + 1);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "{d%d, d%d}", Vd, Vd + 1);
} else if (type == nlt_3) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"{d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2);
} else if (type == nlt_4) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "{d%d, d%d, d%d, d%d}", Vd, Vd + 1, Vd + 2, Vd + 3);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "{d%d, d%d, d%d, d%d}", Vd,
+ Vd + 1, Vd + 2, Vd + 3);
}
}
-
void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "[%s",
converter_.NameOfCPURegister(Rn));
if (align != 0) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- ":%d", (1 << align) << 6);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, ":%d", (1 << align) << 6);
}
if (Rm == 15) {
Print("]");
@@ -429,7 +390,6 @@ void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
}
}
-
// Print the movw or movt instruction.
void Decoder::PrintMovwMovt(Instruction* instr) {
int imm = instr->ImmedMovwMovtValue();
@@ -438,7 +398,6 @@ void Decoder::PrintMovwMovt(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", imm);
}
-
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
@@ -480,8 +439,8 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
DCHECK_GT(width, 0);
}
DCHECK_LE(width + lsbit, 32);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "#%d, #%d", lsbit, width);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "#%d, #%d", lsbit, width);
return 1;
}
case 'h': { // 'h: halfword operation for extra loads and stores
@@ -495,14 +454,13 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
case 'i': { // 'i: immediate value from adjacent bits.
// Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
int width = (format[3] - '0') * 10 + (format[4] - '0');
- int lsb = (format[6] - '0') * 10 + (format[7] - '0');
+ int lsb = (format[6] - '0') * 10 + (format[7] - '0');
DCHECK((width >= 1) && (width <= 32));
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK_LE(width + lsb, 32);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d",
instr->Bits(width + lsb - 1, lsb));
return 8;
}
@@ -540,24 +498,23 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
DCHECK(STRING_STARTS_WITH(format, "msg"));
byte* str =
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0FFFFFFF);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%s", converter_.NameInCode(str));
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameInCode(str));
return 3;
}
case 'o': {
if ((format[3] == '1') && (format[4] == '2')) {
// 'off12: 12-bit offset for load and store instructions
DCHECK(STRING_STARTS_WITH(format, "off12"));
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Offset12Value());
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d",
+ instr->Offset12Value());
return 5;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
DCHECK(STRING_STARTS_WITH(format, "off0to3and8to19"));
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d",
- (instr->Bits(19, 8) << 4) +
- instr->Bits(3, 0));
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d",
+ (instr->Bits(19, 8) << 4) + instr->Bits(3, 0));
return 15;
}
// 'off8: 8-bit offset for extra load and store instructions
@@ -575,7 +532,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return FormatRegister(instr, format);
}
case 's': {
- if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
+ if (format[1] == 'h') { // 'shift_op or 'shift_rm or 'shift_sat.
if (format[6] == 'o') { // 'shift_op
DCHECK(STRING_STARTS_WITH(format, "shift_op"));
if (instr->TypeValue() == 0) {
@@ -634,11 +591,9 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
case 't': { // 'target: target of branch instructions
DCHECK(STRING_STARTS_WITH(format, "target"));
int off = (instr->SImmed24Value() << 2) + 8;
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%+d -> %s",
- off,
- converter_.NameOfAddress(
- reinterpret_cast<byte*>(instr) + off));
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
return 6;
}
case 'u': { // 'u: signed or unsigned multiplies
@@ -700,13 +655,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
default: {
UNREACHABLE();
- break;
}
}
UNREACHABLE();
}
-
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
@@ -720,25 +673,20 @@ void Decoder::Format(Instruction* instr, const char* format) {
}
cur = *format++;
}
- out_buffer_[out_buffer_pos_] = '\0';
+ out_buffer_[out_buffer_pos_] = '\0';
}
-
// The disassembler may end up decoding data inlined in the code. We do not want
// it to crash if the data does not resemble any known instruction.
#define VERIFY(condition) \
-if(!(condition)) { \
- Unknown(instr); \
- return; \
-}
-
+ if (!(condition)) { \
+ Unknown(instr); \
+ return; \
+ }
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instruction* instr) {
- Format(instr, "unknown");
-}
-
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
void Decoder::DecodeType01(Instruction* instr) {
int type = instr->TypeValue();
@@ -1045,7 +993,6 @@ void Decoder::DecodeType01(Instruction* instr) {
}
}
-
void Decoder::DecodeType2(Instruction* instr) {
switch (instr->PUField()) {
case da_x: {
@@ -1083,12 +1030,10 @@ void Decoder::DecodeType2(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
}
-
void Decoder::DecodeType3(Instruction* instr) {
switch (instr->PUField()) {
case da_x: {
@@ -1373,12 +1318,10 @@ void Decoder::DecodeType3(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
}
-
void Decoder::DecodeType4(Instruction* instr) {
if (instr->Bit(22) != 0) {
// Privileged mode currently not supported.
@@ -1392,17 +1335,14 @@ void Decoder::DecodeType4(Instruction* instr) {
}
}
-
void Decoder::DecodeType5(Instruction* instr) {
Format(instr, "b'l'cond 'target");
}
-
void Decoder::DecodeType6(Instruction* instr) {
DecodeType6CoprocessorIns(instr);
}
-
int Decoder::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
if (instr->SvcValue() >= kStopCode) {
@@ -1427,7 +1367,6 @@ int Decoder::DecodeType7(Instruction* instr) {
return kInstrSize;
}
-
// void Decoder::DecodeTypeVFP(Instruction* instr)
// vmov: Sn = Rt
// vmov: Rt = Sn
@@ -1460,7 +1399,7 @@ int Decoder::DecodeType7(Instruction* instr) {
// vmov.size: Dd[i] = Rt
// vmov.sign.size: Rt = Dn[i]
void Decoder::DecodeTypeVFP(Instruction* instr) {
- VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
+ VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0));
VERIFY(instr->Bits(11, 9) == 0x5);
if (instr->Bit(4) == 0) {
@@ -1496,8 +1435,8 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
// vcvt.f64.s32 Dd, Dd, #<fbits>
int fraction_bits = 32 - ((instr->Bits(3, 0) << 1) | instr->Bit(5));
Format(instr, "vcvt'cond.f64.s32 'Dd, 'Dd");
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- ", #%d", fraction_bits);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, ", #%d", fraction_bits);
} else if (((instr->Opc2Value() >> 1) == 0x6) &&
(instr->Opc3Value() & 0x1)) {
DecodeVCVTBetweenFloatingPointAndInteger(instr);
@@ -1568,8 +1507,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
Unknown(instr); // Not used by V8.
}
} else {
- if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0)) {
+ if ((instr->VCValue() == 0x0) && (instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
const char* rt_name = converter_.NameOfCPURegister(instr->RtValue());
@@ -1638,8 +1576,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
Unknown(instr);
}
}
- } else if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
+ } else if ((instr->VCValue() == 0x0) && (instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
if (instr->VLValue() == 0) {
if (instr->Bits(15, 12) == 0xF) {
@@ -1703,7 +1640,6 @@ void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
}
}
-
void Decoder::DecodeVCMP(Instruction* instr) {
VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
@@ -1734,7 +1670,6 @@ void Decoder::DecodeVCMP(Instruction* instr) {
}
}
-
void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
@@ -1748,7 +1683,6 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
}
}
-
void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
@@ -1791,7 +1725,6 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
}
}
-
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
// <Rt, Rt2> = vmov(Dm)
@@ -1885,13 +1818,11 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
}
-
static const char* const barrier_option_names[] = {
"invalid", "oshld", "oshst", "osh", "invalid", "nshld", "nshst", "nsh",
"invalid", "ishld", "ishst", "ish", "invalid", "ld", "st", "sy",
};
-
void Decoder::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
case 4: {
@@ -2580,14 +2511,13 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
}
}
-#undef VERIFIY
+#undef VERIFY
bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker;
}
-
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
@@ -2597,13 +2527,11 @@ int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
}
}
-
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(reinterpret_cast<Address>(instr_ptr));
// Print raw instruction bytes.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
DecodeSpecialCondition(instr);
@@ -2648,57 +2576,47 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
default: {
// The type field is 3-bits in the ARM encoding.
UNREACHABLE();
- break;
}
}
return kInstrSize;
}
-
} // namespace internal
} // namespace v8
-
//------------------------------------------------------------------------------
namespace disasm {
-
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
- return tmp_buffer_.start();
+ return tmp_buffer_.begin();
}
-
const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
-
const char* NameConverter::NameOfCPURegister(int reg) const {
return RegisterName(i::Register::from_code(reg));
}
-
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM does not have the concept of a byte register
return "nobytereg";
}
-
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM does not have any XMM registers
return "noxmmreg";
}
-
const char* NameConverter::NameInCode(byte* addr) const {
// The default name converter is called for unknown code. So we will not try
// to access any memory.
return "";
}
-
//------------------------------------------------------------------------------
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
@@ -2707,7 +2625,6 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return d.InstructionDecode(instruction);
}
-
int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return v8::internal::Decoder::ConstantPoolSizeAt(instruction);
}
@@ -2722,10 +2639,12 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
- *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.begin());
}
}
+#undef STRING_STARTS_WITH
+
} // namespace disasm
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/eh-frame-arm.cc b/deps/v8/src/diagnostics/arm/eh-frame-arm.cc
index f0902691bc..7d0dc49155 100644
--- a/deps/v8/src/arm/eh-frame-arm.cc
+++ b/deps/v8/src/diagnostics/arm/eh-frame-arm.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/eh-frame.h"
+#include "src/diagnostics/eh-frame.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/disasm-arm64.cc b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
index eec2cbf138..e51986ee4c 100644
--- a/deps/v8/src/arm64/disasm-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -9,16 +9,15 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/decoder-arm64-inl.h"
-#include "src/arm64/disasm-arm64.h"
-#include "src/arm64/utils-arm64.h"
#include "src/base/platform/platform.h"
-#include "src/disasm.h"
+#include "src/codegen/arm64/decoder-arm64-inl.h"
+#include "src/codegen/arm64/utils-arm64.h"
+#include "src/diagnostics/arm64/disasm-arm64.h"
+#include "src/diagnostics/disasm.h"
namespace v8 {
namespace internal {
-
DisassemblingDecoder::DisassemblingDecoder() {
buffer_size_ = 256;
buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
@@ -26,7 +25,6 @@ DisassemblingDecoder::DisassemblingDecoder() {
own_buffer_ = true;
}
-
DisassemblingDecoder::DisassemblingDecoder(char* text_buffer, int buffer_size) {
buffer_size_ = buffer_size;
buffer_ = text_buffer;
@@ -34,25 +32,23 @@ DisassemblingDecoder::DisassemblingDecoder(char* text_buffer, int buffer_size) {
own_buffer_ = false;
}
-
DisassemblingDecoder::~DisassemblingDecoder() {
if (own_buffer_) {
free(buffer_);
}
}
-
char* DisassemblingDecoder::GetOutput() { return buffer_; }
-
void DisassemblingDecoder::VisitAddSubImmediate(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
- bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
- (instr->ImmAddSub() == 0) ? true : false;
- const char *mnemonic = "";
- const char *form = "'Rds, 'Rns, 'IAddSub";
- const char *form_cmp = "'Rns, 'IAddSub";
- const char *form_mov = "'Rds, 'Rns";
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) && (instr->ImmAddSub() == 0)
+ ? true
+ : false;
+ const char* mnemonic = "";
+ const char* form = "'Rds, 'Rns, 'IAddSub";
+ const char* form_cmp = "'Rns, 'IAddSub";
+ const char* form_mov = "'Rds, 'Rns";
switch (instr->Mask(AddSubImmediateMask)) {
case ADD_w_imm:
@@ -74,7 +70,9 @@ void DisassemblingDecoder::VisitAddSubImmediate(Instruction* instr) {
break;
}
case SUB_w_imm:
- case SUB_x_imm: mnemonic = "sub"; break;
+ case SUB_x_imm:
+ mnemonic = "sub";
+ break;
case SUBS_w_imm:
case SUBS_x_imm: {
mnemonic = "subs";
@@ -84,23 +82,25 @@ void DisassemblingDecoder::VisitAddSubImmediate(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitAddSubShifted(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
- const char *mnemonic = "";
+ const char* mnemonic = "";
const char* form = "'Rd, 'Rn, 'Rm'NDP";
const char* form_cmp = "'Rn, 'Rm'NDP";
const char* form_neg = "'Rd, 'Rm'NDP";
switch (instr->Mask(AddSubShiftedMask)) {
case ADD_w_shift:
- case ADD_x_shift: mnemonic = "add"; break;
+ case ADD_x_shift:
+ mnemonic = "add";
+ break;
case ADDS_w_shift:
case ADDS_x_shift: {
mnemonic = "adds";
@@ -131,24 +131,26 @@ void DisassemblingDecoder::VisitAddSubShifted(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitAddSubExtended(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
- const char *mnemonic = "";
+ const char* mnemonic = "";
Extend mode = static_cast<Extend>(instr->ExtendMode());
- const char *form = ((mode == UXTX) || (mode == SXTX)) ?
- "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
- const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
- "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+ const char* form = ((mode == UXTX) || (mode == SXTX)) ? "'Rds, 'Rns, 'Xm'Ext"
+ : "'Rds, 'Rns, 'Wm'Ext";
+ const char* form_cmp =
+ ((mode == UXTX) || (mode == SXTX)) ? "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
switch (instr->Mask(AddSubExtendedMask)) {
case ADD_w_ext:
- case ADD_x_ext: mnemonic = "add"; break;
+ case ADD_x_ext:
+ mnemonic = "add";
+ break;
case ADDS_w_ext:
case ADDS_x_ext: {
mnemonic = "adds";
@@ -159,7 +161,9 @@ void DisassemblingDecoder::VisitAddSubExtended(Instruction* instr) {
break;
}
case SUB_w_ext:
- case SUB_x_ext: mnemonic = "sub"; break;
+ case SUB_x_ext:
+ mnemonic = "sub";
+ break;
case SUBS_w_ext:
case SUBS_x_ext: {
mnemonic = "subs";
@@ -169,23 +173,27 @@ void DisassemblingDecoder::VisitAddSubExtended(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitAddSubWithCarry(Instruction* instr) {
bool rn_is_zr = RnIsZROrSP(instr);
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm";
- const char *form_neg = "'Rd, 'Rm";
+ const char* mnemonic = "";
+ const char* form = "'Rd, 'Rn, 'Rm";
+ const char* form_neg = "'Rd, 'Rm";
switch (instr->Mask(AddSubWithCarryMask)) {
case ADC_w:
- case ADC_x: mnemonic = "adc"; break;
+ case ADC_x:
+ mnemonic = "adc";
+ break;
case ADCS_w:
- case ADCS_x: mnemonic = "adcs"; break;
+ case ADCS_x:
+ mnemonic = "adcs";
+ break;
case SBC_w:
case SBC_x: {
mnemonic = "sbc";
@@ -204,17 +212,17 @@ void DisassemblingDecoder::VisitAddSubWithCarry(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitLogicalImmediate(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
- const char *mnemonic = "";
- const char *form = "'Rds, 'Rn, 'ITri";
+ const char* mnemonic = "";
+ const char* form = "'Rds, 'Rn, 'ITri";
if (instr->ImmLogical() == 0) {
// The immediate encoded in the instruction is not in the expected format.
@@ -224,12 +232,14 @@ void DisassemblingDecoder::VisitLogicalImmediate(Instruction* instr) {
switch (instr->Mask(LogicalImmediateMask)) {
case AND_w_imm:
- case AND_x_imm: mnemonic = "and"; break;
+ case AND_x_imm:
+ mnemonic = "and";
+ break;
case ORR_w_imm:
case ORR_x_imm: {
mnemonic = "orr";
- unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
- : kWRegSizeInBits;
+ unsigned reg_size =
+ (instr->SixtyFourBits() == 1) ? kXRegSizeInBits : kWRegSizeInBits;
if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
mnemonic = "mov";
form = "'Rds, 'ITri";
@@ -237,7 +247,9 @@ void DisassemblingDecoder::VisitLogicalImmediate(Instruction* instr) {
break;
}
case EOR_w_imm:
- case EOR_x_imm: mnemonic = "eor"; break;
+ case EOR_x_imm:
+ mnemonic = "eor";
+ break;
case ANDS_w_imm:
case ANDS_x_imm: {
mnemonic = "ands";
@@ -247,12 +259,12 @@ void DisassemblingDecoder::VisitLogicalImmediate(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
bool DisassemblingDecoder::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
DCHECK((reg_size == kXRegSizeInBits) ||
((reg_size == kWRegSizeInBits) && (value <= 0xFFFFFFFF)));
@@ -280,24 +292,33 @@ bool DisassemblingDecoder::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
return false;
}
-
void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) {
bool rd_is_zr = RdIsZROrSP(instr);
bool rn_is_zr = RnIsZROrSP(instr);
- const char *mnemonic = "";
+ const char* mnemonic = "";
const char* form = "'Rd, 'Rn, 'Rm'NLo";
switch (instr->Mask(LogicalShiftedMask)) {
case AND_w:
- case AND_x: mnemonic = "and"; break;
+ case AND_x:
+ mnemonic = "and";
+ break;
case BIC_w:
- case BIC_x: mnemonic = "bic"; break;
+ case BIC_x:
+ mnemonic = "bic";
+ break;
case EOR_w:
- case EOR_x: mnemonic = "eor"; break;
+ case EOR_x:
+ mnemonic = "eor";
+ break;
case EON_w:
- case EON_x: mnemonic = "eon"; break;
+ case EON_x:
+ mnemonic = "eon";
+ break;
case BICS_w:
- case BICS_x: mnemonic = "bics"; break;
+ case BICS_x:
+ mnemonic = "bics";
+ break;
case ANDS_w:
case ANDS_x: {
mnemonic = "ands";
@@ -325,58 +346,68 @@ void DisassemblingDecoder::VisitLogicalShifted(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitConditionalCompareRegister(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+ const char* mnemonic = "";
+ const char* form = "'Rn, 'Rm, 'INzcv, 'Cond";
switch (instr->Mask(ConditionalCompareRegisterMask)) {
case CCMN_w:
- case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMN_x:
+ mnemonic = "ccmn";
+ break;
case CCMP_w:
- case CCMP_x: mnemonic = "ccmp"; break;
- default: UNREACHABLE();
+ case CCMP_x:
+ mnemonic = "ccmp";
+ break;
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitConditionalCompareImmediate(
Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+ const char* mnemonic = "";
+ const char* form = "'Rn, 'IP, 'INzcv, 'Cond";
switch (instr->Mask(ConditionalCompareImmediateMask)) {
case CCMN_w_imm:
- case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMN_x_imm:
+ mnemonic = "ccmn";
+ break;
case CCMP_w_imm:
- case CCMP_x_imm: mnemonic = "ccmp"; break;
- default: UNREACHABLE();
+ case CCMP_x_imm:
+ mnemonic = "ccmp";
+ break;
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitConditionalSelect(Instruction* instr) {
bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
bool rn_is_rm = (instr->Rn() == instr->Rm());
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
- const char *form_test = "'Rd, 'CInv";
- const char *form_update = "'Rd, 'Rn, 'CInv";
+ const char* mnemonic = "";
+ const char* form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char* form_test = "'Rd, 'CInv";
+ const char* form_update = "'Rd, 'Rn, 'CInv";
Condition cond = static_cast<Condition>(instr->Condition());
bool invertible_cond = (cond != al) && (cond != nv);
switch (instr->Mask(ConditionalSelectMask)) {
case CSEL_w:
- case CSEL_x: mnemonic = "csel"; break;
+ case CSEL_x:
+ mnemonic = "csel";
+ break;
case CSINC_w:
case CSINC_x: {
mnemonic = "csinc";
@@ -410,24 +441,24 @@ void DisassemblingDecoder::VisitConditionalSelect(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitBitfield(Instruction* instr) {
unsigned s = instr->ImmS();
unsigned r = instr->ImmR();
unsigned rd_size_minus_1 =
- ((instr->SixtyFourBits() == 1) ? kXRegSizeInBits : kWRegSizeInBits) - 1;
- const char *mnemonic = "";
- const char *form = "";
- const char *form_shift_right = "'Rd, 'Rn, 'IBr";
- const char *form_extend = "'Rd, 'Wn";
- const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
- const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
- const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+ ((instr->SixtyFourBits() == 1) ? kXRegSizeInBits : kWRegSizeInBits) - 1;
+ const char* mnemonic = "";
+ const char* form = "";
+ const char* form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char* form_extend = "'Rd, 'Wn";
+ const char* form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char* form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char* form_lsl = "'Rd, 'Rn, 'IBZ-r";
switch (instr->Mask(BitfieldMask)) {
case SBFM_w:
@@ -493,10 +524,9 @@ void DisassemblingDecoder::VisitBitfield(Instruction* instr) {
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitExtract(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+ const char* mnemonic = "";
+ const char* form = "'Rd, 'Rn, 'Rm, 'IExtract";
switch (instr->Mask(ExtractMask)) {
case EXTR_w:
@@ -509,39 +539,45 @@ void DisassemblingDecoder::VisitExtract(Instruction* instr) {
}
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitPCRelAddressing(Instruction* instr) {
switch (instr->Mask(PCRelAddressingMask)) {
- case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ case ADR:
+ Format(instr, "adr", "'Xd, 'AddrPCRelByte");
+ break;
// ADRP is not implemented.
- default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ default:
+ Format(instr, "unimplemented", "(PCRelAddressing)");
}
}
-
void DisassemblingDecoder::VisitConditionalBranch(Instruction* instr) {
switch (instr->Mask(ConditionalBranchMask)) {
case B_cond:
Format(instr, "b.'CBrn", "'TImmCond");
break;
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
}
-
void DisassemblingDecoder::VisitUnconditionalBranchToRegister(
Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Xn";
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Xn";
switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
- case BR: mnemonic = "br"; break;
- case BLR: mnemonic = "blr"; break;
+ case BR:
+ mnemonic = "br";
+ break;
+ case BLR:
+ mnemonic = "blr";
+ break;
case RET: {
mnemonic = "ret";
if (instr->Rn() == kLinkRegCode) {
@@ -549,75 +585,85 @@ void DisassemblingDecoder::VisitUnconditionalBranchToRegister(
}
break;
}
- default: form = "(UnconditionalBranchToRegister)";
+ default:
+ form = "(UnconditionalBranchToRegister)";
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitUnconditionalBranch(Instruction* instr) {
- const char *mnemonic = "";
+ const char* mnemonic = "";
const char* form = "'TImmUncn";
switch (instr->Mask(UnconditionalBranchMask)) {
- case B: mnemonic = "b"; break;
- case BL: mnemonic = "bl"; break;
- default: UNREACHABLE();
+ case B:
+ mnemonic = "b";
+ break;
+ case BL:
+ mnemonic = "bl";
+ break;
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitDataProcessing1Source(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rd, 'Rn";
+ const char* mnemonic = "";
+ const char* form = "'Rd, 'Rn";
switch (instr->Mask(DataProcessing1SourceMask)) {
- #define FORMAT(A, B) \
- case A##_w: \
- case A##_x: mnemonic = B; break;
+#define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: \
+ mnemonic = B; \
+ break;
FORMAT(RBIT, "rbit");
FORMAT(REV16, "rev16");
FORMAT(REV, "rev");
FORMAT(CLZ, "clz");
FORMAT(CLS, "cls");
- #undef FORMAT
- case REV32_x: mnemonic = "rev32"; break;
- default: UNREACHABLE();
+#undef FORMAT
+ case REV32_x:
+ mnemonic = "rev32";
+ break;
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitDataProcessing2Source(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Rd, 'Rn, 'Rm";
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Rd, 'Rn, 'Rm";
switch (instr->Mask(DataProcessing2SourceMask)) {
- #define FORMAT(A, B) \
- case A##_w: \
- case A##_x: mnemonic = B; break;
+#define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: \
+ mnemonic = B; \
+ break;
FORMAT(UDIV, "udiv");
FORMAT(SDIV, "sdiv");
FORMAT(LSLV, "lsl");
FORMAT(LSRV, "lsr");
FORMAT(ASRV, "asr");
FORMAT(RORV, "ror");
- #undef FORMAT
- default: form = "(DataProcessing2Source)";
+#undef FORMAT
+ default:
+ form = "(DataProcessing2Source)";
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitDataProcessing3Source(Instruction* instr) {
bool ra_is_zr = RaIsZROrSP(instr);
- const char *mnemonic = "";
- const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
- const char *form_rrr = "'Rd, 'Rn, 'Rm";
- const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
- const char *form_xww = "'Xd, 'Wn, 'Wm";
- const char *form_xxx = "'Xd, 'Xn, 'Xm";
+ const char* mnemonic = "";
+ const char* form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char* form_rrr = "'Rd, 'Rn, 'Rm";
+ const char* form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char* form_xww = "'Xd, 'Wn, 'Wm";
+ const char* form_xxx = "'Xd, 'Xn, 'Xm";
switch (instr->Mask(DataProcessing3SourceMask)) {
case MADD_w:
@@ -682,29 +728,33 @@ void DisassemblingDecoder::VisitDataProcessing3Source(Instruction* instr) {
form = form_xxx;
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitCompareBranch(Instruction* instr) {
- const char *mnemonic = "";
+ const char* mnemonic = "";
const char* form = "'Rt, 'TImmCmpa";
switch (instr->Mask(CompareBranchMask)) {
case CBZ_w:
- case CBZ_x: mnemonic = "cbz"; break;
+ case CBZ_x:
+ mnemonic = "cbz";
+ break;
case CBNZ_w:
- case CBNZ_x: mnemonic = "cbnz"; break;
- default: UNREACHABLE();
+ case CBNZ_x:
+ mnemonic = "cbnz";
+ break;
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitTestBranch(Instruction* instr) {
- const char *mnemonic = "";
+ const char* mnemonic = "";
// If the top bit of the immediate is clear, the tested register is
// disassembled as Wt, otherwise Xt. As the top bit of the immediate is
// encoded in bit 31 of the instruction, we can reuse the Rt form, which
@@ -712,29 +762,41 @@ void DisassemblingDecoder::VisitTestBranch(Instruction* instr) {
const char* form = "'Rt, 'IS, 'TImmTest";
switch (instr->Mask(TestBranchMask)) {
- case TBZ: mnemonic = "tbz"; break;
- case TBNZ: mnemonic = "tbnz"; break;
- default: UNREACHABLE();
+ case TBZ:
+ mnemonic = "tbz";
+ break;
+ case TBNZ:
+ mnemonic = "tbnz";
+ break;
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitMoveWideImmediate(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rd, 'IMoveImm";
+ const char* mnemonic = "";
+ const char* form = "'Rd, 'IMoveImm";
// Print the shift separately for movk, to make it clear which half word will
// be overwritten. Movn and movz print the computed immediate, which includes
// shift calculation.
switch (instr->Mask(MoveWideImmediateMask)) {
case MOVN_w:
- case MOVN_x: mnemonic = "movn"; break;
+ case MOVN_x:
+ mnemonic = "movn";
+ break;
case MOVZ_w:
- case MOVZ_x: mnemonic = "movz"; break;
+ case MOVZ_x:
+ mnemonic = "movz";
+ break;
case MOVK_w:
- case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
- default: UNREACHABLE();
+ case MOVK_x:
+ mnemonic = "movk";
+ form = "'Rd, 'IMoveLSL";
+ break;
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
@@ -765,62 +827,77 @@ void DisassemblingDecoder::VisitMoveWideImmediate(Instruction* instr) {
V(LDR_q, "ldr", "'Qt")
void DisassemblingDecoder::VisitLoadStorePreIndex(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePreIndex)";
+ const char* mnemonic = "unimplemented";
+ const char* form = "(LoadStorePreIndex)";
switch (instr->Mask(LoadStorePreIndexMask)) {
- #define LS_PREINDEX(A, B, C) \
- case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+#define LS_PREINDEX(A, B, C) \
+ case A##_pre: \
+ mnemonic = B; \
+ form = C ", ['Xns'ILS]!"; \
+ break;
LOAD_STORE_LIST(LS_PREINDEX)
- #undef LS_PREINDEX
+#undef LS_PREINDEX
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitLoadStorePostIndex(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePostIndex)";
+ const char* mnemonic = "unimplemented";
+ const char* form = "(LoadStorePostIndex)";
switch (instr->Mask(LoadStorePostIndexMask)) {
- #define LS_POSTINDEX(A, B, C) \
- case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+#define LS_POSTINDEX(A, B, C) \
+ case A##_post: \
+ mnemonic = B; \
+ form = C ", ['Xns]'ILS"; \
+ break;
LOAD_STORE_LIST(LS_POSTINDEX)
- #undef LS_POSTINDEX
+#undef LS_POSTINDEX
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitLoadStoreUnsignedOffset(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStoreUnsignedOffset)";
+ const char* mnemonic = "unimplemented";
+ const char* form = "(LoadStoreUnsignedOffset)";
switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
- #define LS_UNSIGNEDOFFSET(A, B, C) \
- case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+#define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: \
+ mnemonic = B; \
+ form = C ", ['Xns'ILU]"; \
+ break;
LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
- #undef LS_UNSIGNEDOFFSET
- case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]";
+#undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned:
+ mnemonic = "prfm";
+ form = "'PrefOp, ['Xn'ILU]";
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitLoadStoreRegisterOffset(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStoreRegisterOffset)";
+ const char* mnemonic = "unimplemented";
+ const char* form = "(LoadStoreRegisterOffset)";
switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
- #define LS_REGISTEROFFSET(A, B, C) \
- case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+#define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: \
+ mnemonic = B; \
+ form = C ", ['Xns, 'Offsetreg]"; \
+ break;
LOAD_STORE_LIST(LS_REGISTEROFFSET)
- #undef LS_REGISTEROFFSET
- case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+#undef LS_REGISTEROFFSET
+ case PRFM_reg:
+ mnemonic = "prfm";
+ form = "'PrefOp, ['Xns, 'Offsetreg]";
}
Format(instr, mnemonic, form);
}
+#undef LOAD_STORE_LIST
+
#define LOAD_STORE_UNSCALED_LIST(V) \
V(STURB_w, "sturb", "'Wt") \
V(STURH_w, "sturh", "'Wt") \
@@ -862,16 +939,27 @@ void DisassemblingDecoder::VisitLoadStoreUnscaledOffset(Instruction* instr) {
Format(instr, mnemonic, form);
}
+#undef LOAD_STORE_UNSCALED_LIST
+
void DisassemblingDecoder::VisitLoadLiteral(Instruction* instr) {
- const char *mnemonic = "ldr";
- const char *form = "(LoadLiteral)";
+ const char* mnemonic = "ldr";
+ const char* form = "(LoadLiteral)";
switch (instr->Mask(LoadLiteralMask)) {
- case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
- case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
- case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
- case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
- default: mnemonic = "unimplemented";
+ case LDR_w_lit:
+ form = "'Wt, 'ILLiteral 'LValue";
+ break;
+ case LDR_x_lit:
+ form = "'Xt, 'ILLiteral 'LValue";
+ break;
+ case LDR_s_lit:
+ form = "'St, 'ILLiteral 'LValue";
+ break;
+ case LDR_d_lit:
+ form = "'Dt, 'ILLiteral 'LValue";
+ break;
+ default:
+ mnemonic = "unimplemented";
}
Format(instr, mnemonic, form);
}
@@ -890,70 +978,118 @@ void DisassemblingDecoder::VisitLoadLiteral(Instruction* instr) {
V(STP_q, "stp", "'Qt, 'Qt2", "4")
void DisassemblingDecoder::VisitLoadStorePairPostIndex(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePairPostIndex)";
+ const char* mnemonic = "unimplemented";
+ const char* form = "(LoadStorePairPostIndex)";
switch (instr->Mask(LoadStorePairPostIndexMask)) {
- #define LSP_POSTINDEX(A, B, C, D) \
- case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+#define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: \
+ mnemonic = B; \
+ form = C ", ['Xns]'ILP" D; \
+ break;
LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
- #undef LSP_POSTINDEX
+#undef LSP_POSTINDEX
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitLoadStorePairPreIndex(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePairPreIndex)";
+ const char* mnemonic = "unimplemented";
+ const char* form = "(LoadStorePairPreIndex)";
switch (instr->Mask(LoadStorePairPreIndexMask)) {
- #define LSP_PREINDEX(A, B, C, D) \
- case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+#define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: \
+ mnemonic = B; \
+ form = C ", ['Xns'ILP" D "]!"; \
+ break;
LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
- #undef LSP_PREINDEX
+#undef LSP_PREINDEX
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitLoadStorePairOffset(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(LoadStorePairOffset)";
+ const char* mnemonic = "unimplemented";
+ const char* form = "(LoadStorePairOffset)";
switch (instr->Mask(LoadStorePairOffsetMask)) {
- #define LSP_OFFSET(A, B, C, D) \
- case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+#define LSP_OFFSET(A, B, C, D) \
+ case A##_off: \
+ mnemonic = B; \
+ form = C ", ['Xns'ILP" D "]"; \
+ break;
LOAD_STORE_PAIR_LIST(LSP_OFFSET)
- #undef LSP_OFFSET
+#undef LSP_OFFSET
}
Format(instr, mnemonic, form);
}
-void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
- const char *mnemonic = "unimplemented";
+#undef LOAD_STORE_PAIR_LIST
+
+void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction* instr) {
+ const char* mnemonic = "unimplemented";
const char* form = "'Wt, ['Xns]";
const char* form_x = "'Xt, ['Xns]";
const char* form_stlx = "'Ws, 'Wt, ['Xns]";
const char* form_stlx_x = "'Ws, 'Xt, ['Xns]";
switch (instr->Mask(LoadStoreAcquireReleaseMask)) {
- case LDAXR_b: mnemonic = "ldaxrb"; break;
- case STLR_b: mnemonic = "stlrb"; break;
- case LDAR_b: mnemonic = "ldarb"; break;
- case LDAXR_h: mnemonic = "ldaxrh"; break;
- case STLR_h: mnemonic = "stlrh"; break;
- case LDAR_h: mnemonic = "ldarh"; break;
- case LDAXR_w: mnemonic = "ldaxr"; break;
- case STLR_w: mnemonic = "stlr"; break;
- case LDAR_w: mnemonic = "ldar"; break;
- case LDAXR_x: mnemonic = "ldaxr"; form = form_x; break;
- case STLR_x: mnemonic = "stlr"; form = form_x; break;
- case LDAR_x: mnemonic = "ldar"; form = form_x; break;
- case STLXR_h: mnemonic = "stlxrh"; form = form_stlx; break;
- case STLXR_b: mnemonic = "stlxrb"; form = form_stlx; break;
- case STLXR_w: mnemonic = "stlxr"; form = form_stlx; break;
- case STLXR_x: mnemonic = "stlxr"; form = form_stlx_x; break;
+ case LDAXR_b:
+ mnemonic = "ldaxrb";
+ break;
+ case STLR_b:
+ mnemonic = "stlrb";
+ break;
+ case LDAR_b:
+ mnemonic = "ldarb";
+ break;
+ case LDAXR_h:
+ mnemonic = "ldaxrh";
+ break;
+ case STLR_h:
+ mnemonic = "stlrh";
+ break;
+ case LDAR_h:
+ mnemonic = "ldarh";
+ break;
+ case LDAXR_w:
+ mnemonic = "ldaxr";
+ break;
+ case STLR_w:
+ mnemonic = "stlr";
+ break;
+ case LDAR_w:
+ mnemonic = "ldar";
+ break;
+ case LDAXR_x:
+ mnemonic = "ldaxr";
+ form = form_x;
+ break;
+ case STLR_x:
+ mnemonic = "stlr";
+ form = form_x;
+ break;
+ case LDAR_x:
+ mnemonic = "ldar";
+ form = form_x;
+ break;
+ case STLXR_h:
+ mnemonic = "stlxrh";
+ form = form_stlx;
+ break;
+ case STLXR_b:
+ mnemonic = "stlxrb";
+ form = form_stlx;
+ break;
+ case STLXR_w:
+ mnemonic = "stlxr";
+ form = form_stlx;
+ break;
+ case STLXR_x:
+ mnemonic = "stlxr";
+ form = form_stlx_x;
+ break;
default:
form = "(LoadStoreAcquireRelease)";
}
@@ -961,57 +1097,69 @@ void DisassemblingDecoder::VisitLoadStoreAcquireRelease(Instruction *instr) {
}
void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Fn, 'Fm";
- const char *form_zero = "'Fn, #0.0";
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Fn, 'Fm";
+ const char* form_zero = "'Fn, #0.0";
switch (instr->Mask(FPCompareMask)) {
case FCMP_s_zero:
- case FCMP_d_zero: form = form_zero; V8_FALLTHROUGH;
+ case FCMP_d_zero:
+ form = form_zero;
+ V8_FALLTHROUGH;
case FCMP_s:
- case FCMP_d: mnemonic = "fcmp"; break;
- default: form = "(FPCompare)";
+ case FCMP_d:
+ mnemonic = "fcmp";
+ break;
+ default:
+ form = "(FPCompare)";
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitFPConditionalCompare(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Fn, 'Fm, 'INzcv, 'Cond";
switch (instr->Mask(FPConditionalCompareMask)) {
case FCCMP_s:
- case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMP_d:
+ mnemonic = "fccmp";
+ break;
case FCCMPE_s:
- case FCCMPE_d: mnemonic = "fccmpe"; break;
- default: form = "(FPConditionalCompare)";
+ case FCCMPE_d:
+ mnemonic = "fccmpe";
+ break;
+ default:
+ form = "(FPConditionalCompare)";
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitFPConditionalSelect(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+ const char* mnemonic = "";
+ const char* form = "'Fd, 'Fn, 'Fm, 'Cond";
switch (instr->Mask(FPConditionalSelectMask)) {
case FCSEL_s:
- case FCSEL_d: mnemonic = "fcsel"; break;
- default: UNREACHABLE();
+ case FCSEL_d:
+ mnemonic = "fcsel";
+ break;
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitFPDataProcessing1Source(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'Fd, 'Fn";
+ const char* mnemonic = "unimplemented";
+ const char* form = "'Fd, 'Fn";
switch (instr->Mask(FPDataProcessing1SourceMask)) {
- #define FORMAT(A, B) \
- case A##_s: \
- case A##_d: mnemonic = B; break;
+#define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: \
+ mnemonic = B; \
+ break;
FORMAT(FMOV, "fmov");
FORMAT(FABS, "fabs");
FORMAT(FNEG, "fneg");
@@ -1023,9 +1171,15 @@ void DisassemblingDecoder::VisitFPDataProcessing1Source(Instruction* instr) {
FORMAT(FRINTA, "frinta");
FORMAT(FRINTX, "frintx");
FORMAT(FRINTI, "frinti");
- #undef FORMAT
- case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
- case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+#undef FORMAT
+ case FCVT_ds:
+ mnemonic = "fcvt";
+ form = "'Dd, 'Sn";
+ break;
+ case FCVT_sd:
+ mnemonic = "fcvt";
+ form = "'Sd, 'Dn";
+ break;
case FCVT_hs:
mnemonic = "fcvt";
form = "'Hd, 'Sn";
@@ -1042,20 +1196,22 @@ void DisassemblingDecoder::VisitFPDataProcessing1Source(Instruction* instr) {
mnemonic = "fcvt";
form = "'Hd, 'Dn";
break;
- default: form = "(FPDataProcessing1Source)";
+ default:
+ form = "(FPDataProcessing1Source)";
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitFPDataProcessing2Source(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Fd, 'Fn, 'Fm";
+ const char* mnemonic = "";
+ const char* form = "'Fd, 'Fn, 'Fm";
switch (instr->Mask(FPDataProcessing2SourceMask)) {
- #define FORMAT(A, B) \
- case A##_s: \
- case A##_d: mnemonic = B; break;
+#define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: \
+ mnemonic = B; \
+ break;
FORMAT(FMUL, "fmul");
FORMAT(FDIV, "fdiv");
FORMAT(FADD, "fadd");
@@ -1065,56 +1221,70 @@ void DisassemblingDecoder::VisitFPDataProcessing2Source(Instruction* instr) {
FORMAT(FMAXNM, "fmaxnm");
FORMAT(FMINNM, "fminnm");
FORMAT(FNMUL, "fnmul");
- #undef FORMAT
- default: UNREACHABLE();
+#undef FORMAT
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitFPDataProcessing3Source(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+ const char* mnemonic = "";
+ const char* form = "'Fd, 'Fn, 'Fm, 'Fa";
switch (instr->Mask(FPDataProcessing3SourceMask)) {
- #define FORMAT(A, B) \
- case A##_s: \
- case A##_d: mnemonic = B; break;
+#define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: \
+ mnemonic = B; \
+ break;
FORMAT(FMADD, "fmadd");
FORMAT(FMSUB, "fmsub");
FORMAT(FNMADD, "fnmadd");
FORMAT(FNMSUB, "fnmsub");
- #undef FORMAT
- default: UNREACHABLE();
+#undef FORMAT
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitFPImmediate(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "(FPImmediate)";
+ const char* mnemonic = "";
+ const char* form = "(FPImmediate)";
switch (instr->Mask(FPImmediateMask)) {
- case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
- case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
- default: UNREACHABLE();
+ case FMOV_s_imm:
+ mnemonic = "fmov";
+ form = "'Sd, 'IFPSingle";
+ break;
+ case FMOV_d_imm:
+ mnemonic = "fmov";
+ form = "'Dd, 'IFPDouble";
+ break;
+ default:
+ UNREACHABLE();
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "(FPIntegerConvert)";
- const char *form_rf = "'Rd, 'Fn";
- const char *form_fr = "'Fd, 'Rn";
+ const char* mnemonic = "unimplemented";
+ const char* form = "(FPIntegerConvert)";
+ const char* form_rf = "'Rd, 'Fn";
+ const char* form_fr = "'Fd, 'Rn";
switch (instr->Mask(FPIntegerConvertMask)) {
case FMOV_ws:
- case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_xd:
+ mnemonic = "fmov";
+ form = form_rf;
+ break;
case FMOV_sw:
- case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FMOV_dx:
+ mnemonic = "fmov";
+ form = form_fr;
+ break;
case FMOV_d1_x:
mnemonic = "fmov";
form = "'Vd.D[1], 'Rn";
@@ -1126,35 +1296,59 @@ void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) {
case FCVTAS_ws:
case FCVTAS_xs:
case FCVTAS_wd:
- case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAS_xd:
+ mnemonic = "fcvtas";
+ form = form_rf;
+ break;
case FCVTAU_ws:
case FCVTAU_xs:
case FCVTAU_wd:
- case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+ case FCVTAU_xd:
+ mnemonic = "fcvtau";
+ form = form_rf;
+ break;
case FCVTMS_ws:
case FCVTMS_xs:
case FCVTMS_wd:
- case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMS_xd:
+ mnemonic = "fcvtms";
+ form = form_rf;
+ break;
case FCVTMU_ws:
case FCVTMU_xs:
case FCVTMU_wd:
- case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTMU_xd:
+ mnemonic = "fcvtmu";
+ form = form_rf;
+ break;
case FCVTNS_ws:
case FCVTNS_xs:
case FCVTNS_wd:
- case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNS_xd:
+ mnemonic = "fcvtns";
+ form = form_rf;
+ break;
case FCVTNU_ws:
case FCVTNU_xs:
case FCVTNU_wd:
- case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTNU_xd:
+ mnemonic = "fcvtnu";
+ form = form_rf;
+ break;
case FCVTZU_xd:
case FCVTZU_ws:
case FCVTZU_wd:
- case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZU_xs:
+ mnemonic = "fcvtzu";
+ form = form_rf;
+ break;
case FCVTZS_xd:
case FCVTZS_wd:
case FCVTZS_xs:
- case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case FCVTZS_ws:
+ mnemonic = "fcvtzs";
+ form = form_rf;
+ break;
case FCVTPU_xd:
case FCVTPU_ws:
case FCVTPU_wd:
@@ -1172,67 +1366,93 @@ void DisassemblingDecoder::VisitFPIntegerConvert(Instruction* instr) {
case SCVTF_sw:
case SCVTF_sx:
case SCVTF_dw:
- case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case SCVTF_dx:
+ mnemonic = "scvtf";
+ form = form_fr;
+ break;
case UCVTF_sw:
case UCVTF_sx:
case UCVTF_dw:
- case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ case UCVTF_dx:
+ mnemonic = "ucvtf";
+ form = form_fr;
+ break;
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitFPFixedPointConvert(Instruction* instr) {
- const char *mnemonic = "";
- const char *form = "'Rd, 'Fn, 'IFPFBits";
- const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+ const char* mnemonic = "";
+ const char* form = "'Rd, 'Fn, 'IFPFBits";
+ const char* form_fr = "'Fd, 'Rn, 'IFPFBits";
switch (instr->Mask(FPFixedPointConvertMask)) {
case FCVTZS_ws_fixed:
case FCVTZS_xs_fixed:
case FCVTZS_wd_fixed:
- case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZS_xd_fixed:
+ mnemonic = "fcvtzs";
+ break;
case FCVTZU_ws_fixed:
case FCVTZU_xs_fixed:
case FCVTZU_wd_fixed:
- case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case FCVTZU_xd_fixed:
+ mnemonic = "fcvtzu";
+ break;
case SCVTF_sw_fixed:
case SCVTF_sx_fixed:
case SCVTF_dw_fixed:
- case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case SCVTF_dx_fixed:
+ mnemonic = "scvtf";
+ form = form_fr;
+ break;
case UCVTF_sw_fixed:
case UCVTF_sx_fixed:
case UCVTF_dw_fixed:
- case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ case UCVTF_dx_fixed:
+ mnemonic = "ucvtf";
+ form = form_fr;
+ break;
}
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitSystem(Instruction* instr) {
// Some system instructions hijack their Op and Cp fields to represent a
// range of immediates instead of indicating a different instruction. This
// makes the decoding tricky.
- const char *mnemonic = "unimplemented";
- const char *form = "(System)";
+ const char* mnemonic = "unimplemented";
+ const char* form = "(System)";
if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
switch (instr->Mask(SystemSysRegMask)) {
case MRS: {
mnemonic = "mrs";
switch (instr->ImmSystemRegister()) {
- case NZCV: form = "'Xt, nzcv"; break;
- case FPCR: form = "'Xt, fpcr"; break;
- default: form = "'Xt, (unknown)"; break;
+ case NZCV:
+ form = "'Xt, nzcv";
+ break;
+ case FPCR:
+ form = "'Xt, fpcr";
+ break;
+ default:
+ form = "'Xt, (unknown)";
+ break;
}
break;
}
case MSR: {
mnemonic = "msr";
switch (instr->ImmSystemRegister()) {
- case NZCV: form = "nzcv, 'Xt"; break;
- case FPCR: form = "fpcr, 'Xt"; break;
- default: form = "(unknown), 'Xt"; break;
+ case NZCV:
+ form = "nzcv, 'Xt";
+ break;
+ case FPCR:
+ form = "fpcr, 'Xt";
+ break;
+ default:
+ form = "(unknown), 'Xt";
+ break;
}
break;
}
@@ -1274,21 +1494,40 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
Format(instr, mnemonic, form);
}
-
void DisassemblingDecoder::VisitException(Instruction* instr) {
- const char *mnemonic = "unimplemented";
- const char *form = "'IDebug";
+ const char* mnemonic = "unimplemented";
+ const char* form = "'IDebug";
switch (instr->Mask(ExceptionMask)) {
- case HLT: mnemonic = "hlt"; break;
- case BRK: mnemonic = "brk"; break;
- case SVC: mnemonic = "svc"; break;
- case HVC: mnemonic = "hvc"; break;
- case SMC: mnemonic = "smc"; break;
- case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
- case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
- case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
- default: form = "(Exception)";
+ case HLT:
+ mnemonic = "hlt";
+ break;
+ case BRK:
+ mnemonic = "brk";
+ break;
+ case SVC:
+ mnemonic = "svc";
+ break;
+ case HVC:
+ mnemonic = "hvc";
+ break;
+ case SMC:
+ mnemonic = "smc";
+ break;
+ case DCPS1:
+ mnemonic = "dcps1";
+ form = "{'IDebug}";
+ break;
+ case DCPS2:
+ mnemonic = "dcps2";
+ form = "{'IDebug}";
+ break;
+ case DCPS3:
+ mnemonic = "dcps3";
+ form = "{'IDebug}";
+ break;
+ default:
+ form = "(Exception)";
}
Format(instr, mnemonic, form);
}
@@ -3621,7 +3860,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
return 7;
}
- case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
if (format[3] == 'F') { // IFPFBits.
AppendToOutput("#%d", 64 - instr->FPScale());
return 8;
@@ -3638,9 +3877,9 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
case 'N': { // INzcv.
int nzcv = (instr->Nzcv() << Flags_offset);
AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
- ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
- ((nzcv & CFlag) == 0) ? 'c' : 'C',
- ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
return 5;
}
case 'P': { // IP - Conditional compare.
@@ -3656,7 +3895,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
}
case 'S': { // IS - Test and branch bit.
AppendToOutput("#%d", (instr->ImmTestBranchBit5() << 5) |
- instr->ImmTestBranchBit40());
+ instr->ImmTestBranchBit40());
return 2;
}
case 's': { // Is - Shift (immediate).
@@ -3779,7 +4018,6 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
}
}
-
int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
const char* format) {
DCHECK((format[0] == 'I') && (format[1] == 'B'));
@@ -3803,8 +4041,8 @@ int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
}
case 'Z': { // IBZ-r.
DCHECK((format[3] == '-') && (format[4] == 'r'));
- unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
- : kWRegSizeInBits;
+ unsigned reg_size =
+ (instr->SixtyFourBits() == 1) ? kXRegSizeInBits : kWRegSizeInBits;
AppendToOutput("#%d", reg_size - r);
return 5;
}
@@ -3814,7 +4052,6 @@ int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
}
}
-
int DisassemblingDecoder::SubstituteLiteralField(Instruction* instr,
const char* format) {
DCHECK_EQ(strncmp(format, "LValue", 6), 0);
@@ -3827,13 +4064,13 @@ int DisassemblingDecoder::SubstituteLiteralField(Instruction* instr,
case LDR_d_lit:
AppendToOutput("(addr 0x%016" PRIxPTR ")", instr->LiteralAddress());
break;
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
return 6;
}
-
int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
const char* format) {
DCHECK_EQ(format[0], 'N');
@@ -3857,28 +4094,28 @@ int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
}
}
-
int DisassemblingDecoder::SubstituteConditionField(Instruction* instr,
const char* format) {
DCHECK_EQ(format[0], 'C');
- const char* condition_code[] = { "eq", "ne", "hs", "lo",
- "mi", "pl", "vs", "vc",
- "hi", "ls", "ge", "lt",
- "gt", "le", "al", "nv" };
+ const char* condition_code[] = {"eq", "ne", "hs", "lo", "mi", "pl",
+ "vs", "vc", "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv"};
int cond;
switch (format[1]) {
- case 'B': cond = instr->ConditionBranch(); break;
+ case 'B':
+ cond = instr->ConditionBranch();
+ break;
case 'I': {
cond = NegateCondition(static_cast<Condition>(instr->Condition()));
break;
}
- default: cond = instr->Condition();
+ default:
+ cond = instr->Condition();
}
AppendToOutput("%s", condition_code[cond]);
return 4;
}
-
int DisassemblingDecoder::SubstitutePCRelAddressField(Instruction* instr,
const char* format) {
USE(format);
@@ -3898,7 +4135,6 @@ int DisassemblingDecoder::SubstitutePCRelAddressField(Instruction* instr,
return 13;
}
-
int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
const char* format) {
DCHECK_EQ(strncmp(format, "TImm", 4), 0);
@@ -3906,14 +4142,23 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
int64_t offset = 0;
switch (format[5]) {
// TImmUncn - unconditional branch immediate.
- case 'n': offset = instr->ImmUncondBranch(); break;
+ case 'n':
+ offset = instr->ImmUncondBranch();
+ break;
// TImmCond - conditional branch immediate.
- case 'o': offset = instr->ImmCondBranch(); break;
+ case 'o':
+ offset = instr->ImmCondBranch();
+ break;
// TImmCmpa - compare and branch immediate.
- case 'm': offset = instr->ImmCmpBranch(); break;
+ case 'm':
+ offset = instr->ImmCmpBranch();
+ break;
// TImmTest - test and branch immediate.
- case 'e': offset = instr->ImmTestBranch(); break;
- default: UNREACHABLE();
+ case 'e':
+ offset = instr->ImmTestBranch();
+ break;
+ default:
+ UNREACHABLE();
}
offset <<= kInstrSizeLog2;
char sign = '+';
@@ -3925,15 +4170,14 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
return 8;
}
-
int DisassemblingDecoder::SubstituteExtendField(Instruction* instr,
const char* format) {
DCHECK_EQ(strncmp(format, "Ext", 3), 0);
DCHECK_LE(instr->ExtendMode(), 7);
USE(format);
- const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
- "sxtb", "sxth", "sxtw", "sxtx" };
+ const char* extend_mode[] = {"uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx"};
// If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
// registers becomes lsl.
@@ -3952,12 +4196,11 @@ int DisassemblingDecoder::SubstituteExtendField(Instruction* instr,
return 3;
}
-
int DisassemblingDecoder::SubstituteLSRegOffsetField(Instruction* instr,
const char* format) {
DCHECK_EQ(strncmp(format, "Offsetreg", 9), 0);
- const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
- "undefined", "undefined", "sxtw", "sxtx" };
+ const char* extend_mode[] = {"undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx"};
USE(format);
unsigned shift = instr->ImmShiftLS();
@@ -3981,7 +4224,6 @@ int DisassemblingDecoder::SubstituteLSRegOffsetField(Instruction* instr,
return 9;
}
-
int DisassemblingDecoder::SubstitutePrefetchField(Instruction* instr,
const char* format) {
DCHECK_EQ(format[0], 'P');
@@ -4003,11 +4245,10 @@ int DisassemblingDecoder::SubstituteBarrierField(Instruction* instr,
USE(format);
static const char* const options[4][4] = {
- { "sy (0b0000)", "oshld", "oshst", "osh" },
- { "sy (0b0100)", "nshld", "nshst", "nsh" },
- { "sy (0b1000)", "ishld", "ishst", "ish" },
- { "sy (0b1100)", "ld", "st", "sy" }
- };
+ {"sy (0b0000)", "oshld", "oshst", "osh"},
+ {"sy (0b0100)", "nshld", "nshst", "nsh"},
+ {"sy (0b1000)", "ishld", "ishst", "ish"},
+ {"sy (0b1100)", "ld", "st", "sy"}};
int domain = instr->ImmBarrierDomain();
int type = instr->ImmBarrierType();
@@ -4015,13 +4256,11 @@ int DisassemblingDecoder::SubstituteBarrierField(Instruction* instr,
return 1;
}
-
void DisassemblingDecoder::ResetOutput() {
buffer_pos_ = 0;
buffer_[buffer_pos_] = 0;
}
-
void DisassemblingDecoder::AppendToOutput(const char* format, ...) {
va_list args;
va_start(args, format);
@@ -4029,7 +4268,6 @@ void DisassemblingDecoder::AppendToOutput(const char* format, ...) {
va_end(args);
}
-
void PrintDisassembler::ProcessOutput(Instruction* instr) {
fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
reinterpret_cast<uint64_t>(instr), instr->InstructionBits(),
@@ -4039,21 +4277,17 @@ void PrintDisassembler::ProcessOutput(Instruction* instr) {
} // namespace internal
} // namespace v8
-
namespace disasm {
-
const char* NameConverter::NameOfAddress(byte* addr) const {
- v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void *>(addr));
- return tmp_buffer_.start();
+ v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
+ return tmp_buffer_.begin();
}
-
const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
-
const char* NameConverter::NameOfCPURegister(int reg) const {
unsigned ureg = reg; // Avoid warnings about signed/unsigned comparisons.
if (ureg >= v8::internal::kNumberOfRegisters) {
@@ -4063,37 +4297,33 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
return "xzr";
}
v8::internal::SNPrintF(tmp_buffer_, "x%u", ureg);
- return tmp_buffer_.start();
+ return tmp_buffer_.begin();
}
-
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM64 does not have the concept of a byte register
return "nobytereg";
}
-
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM64 does not have any XMM registers
return "noxmmreg";
}
-
const char* NameConverter::NameInCode(byte* addr) const {
// The default name converter is called for unknown code, so we will not try
// to access any memory.
return "";
}
-
//------------------------------------------------------------------------------
class BufferDisassembler : public v8::internal::DisassemblingDecoder {
public:
explicit BufferDisassembler(v8::internal::Vector<char> out_buffer)
- : out_buffer_(out_buffer) { }
+ : out_buffer_(out_buffer) {}
- ~BufferDisassembler() { }
+ ~BufferDisassembler() {}
virtual void ProcessOutput(v8::internal::Instruction* instr) {
v8::internal::SNPrintF(out_buffer_, "%08" PRIx32 " %s",
@@ -4115,7 +4345,6 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return v8::internal::kInstrSize;
}
-
int Disassembler::ConstantPoolSizeAt(byte* instr) {
return v8::internal::Assembler::ConstantPoolSizeAt(
reinterpret_cast<v8::internal::Instruction*>(instr));
diff --git a/deps/v8/src/arm64/disasm-arm64.h b/deps/v8/src/diagnostics/arm64/disasm-arm64.h
index 8e218b5cf8..68d7fddd9d 100644
--- a/deps/v8/src/arm64/disasm-arm64.h
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_DISASM_ARM64_H_
-#define V8_ARM64_DISASM_ARM64_H_
+#ifndef V8_DIAGNOSTICS_ARM64_DISASM_ARM64_H_
+#define V8_DIAGNOSTICS_ARM64_DISASM_ARM64_H_
-#include "src/arm64/assembler-arm64.h"
-#include "src/arm64/decoder-arm64.h"
-#include "src/arm64/instructions-arm64.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/codegen/arm64/assembler-arm64.h"
+#include "src/codegen/arm64/decoder-arm64.h"
+#include "src/codegen/arm64/instructions-arm64.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -21,10 +21,10 @@ class V8_EXPORT_PRIVATE DisassemblingDecoder : public DecoderVisitor {
virtual ~DisassemblingDecoder();
char* GetOutput();
- // Declare all Visitor functions.
- #define DECLARE(A) void Visit##A(Instruction* instr);
+// Declare all Visitor functions.
+#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
- #undef DECLARE
+#undef DECLARE
protected:
virtual void ProcessOutput(Instruction* instr);
@@ -81,16 +81,16 @@ class V8_EXPORT_PRIVATE DisassemblingDecoder : public DecoderVisitor {
class V8_EXPORT_PRIVATE PrintDisassembler : public DisassemblingDecoder {
public:
- explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
- ~PrintDisassembler() { }
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) {}
+ ~PrintDisassembler() {}
virtual void ProcessOutput(Instruction* instr);
private:
- FILE *stream_;
+ FILE* stream_;
};
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_DISASM_ARM64_H_
+#endif // V8_DIAGNOSTICS_ARM64_DISASM_ARM64_H_
diff --git a/deps/v8/src/arm64/eh-frame-arm64.cc b/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc
index 79d8510f9b..115d0cc300 100644
--- a/deps/v8/src/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/eh-frame.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/diagnostics/eh-frame.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/basic-block-profiler.cc b/deps/v8/src/diagnostics/basic-block-profiler.cc
index 444c2bb397..262a5364b5 100644
--- a/deps/v8/src/basic-block-profiler.cc
+++ b/deps/v8/src/diagnostics/basic-block-profiler.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/basic-block-profiler.h"
+#include "src/diagnostics/basic-block-profiler.h"
#include <algorithm>
#include <numeric>
@@ -51,7 +51,6 @@ intptr_t BasicBlockProfiler::Data::GetCounterAddress(size_t offset) {
return reinterpret_cast<intptr_t>(&(counts_[offset]));
}
-
void BasicBlockProfiler::Data::ResetCounts() {
for (size_t i = 0; i < n_blocks_; ++i) {
counts_[i] = 0;
@@ -65,24 +64,21 @@ BasicBlockProfiler::Data* BasicBlockProfiler::NewData(size_t n_blocks) {
return data;
}
-
BasicBlockProfiler::~BasicBlockProfiler() {
for (DataList::iterator i = data_list_.begin(); i != data_list_.end(); ++i) {
delete (*i);
}
}
-
void BasicBlockProfiler::ResetCounts() {
for (DataList::iterator i = data_list_.begin(); i != data_list_.end(); ++i) {
(*i)->ResetCounts();
}
}
-
std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler& p) {
os << "---- Start Profiling Data ----" << std::endl;
- typedef BasicBlockProfiler::DataList::const_iterator iterator;
+ using iterator = BasicBlockProfiler::DataList::const_iterator;
for (iterator i = p.data_list_.begin(); i != p.data_list_.end(); ++i) {
os << **i;
}
@@ -90,7 +86,6 @@ std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler& p) {
return os;
}
-
std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& d) {
int block_count_sum = std::accumulate(d.counts_.begin(), d.counts_.end(), 0);
if (block_count_sum == 0) return os;
@@ -112,8 +107,7 @@ std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& d) {
std::sort(pairs.begin(), pairs.end(),
[=](std::pair<int32_t, uint32_t> left,
std::pair<int32_t, uint32_t> right) {
- if (right.second == left.second)
- return left.first < right.first;
+ if (right.second == left.second) return left.first < right.first;
return right.second < left.second;
});
for (auto it : pairs) {
diff --git a/deps/v8/src/basic-block-profiler.h b/deps/v8/src/diagnostics/basic-block-profiler.h
index 86295253c7..960b4b43e1 100644
--- a/deps/v8/src/basic-block-profiler.h
+++ b/deps/v8/src/diagnostics/basic-block-profiler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BASIC_BLOCK_PROFILER_H_
-#define V8_BASIC_BLOCK_PROFILER_H_
+#ifndef V8_DIAGNOSTICS_BASIC_BLOCK_PROFILER_H_
+#define V8_DIAGNOSTICS_BASIC_BLOCK_PROFILER_H_
#include <iosfwd>
#include <list>
@@ -12,7 +12,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -49,7 +49,7 @@ class BasicBlockProfiler {
DISALLOW_COPY_AND_ASSIGN(Data);
};
- typedef std::list<Data*> DataList;
+ using DataList = std::list<Data*>;
BasicBlockProfiler() = default;
~BasicBlockProfiler();
@@ -77,4 +77,4 @@ std::ostream& operator<<(std::ostream& os, const BasicBlockProfiler::Data& s);
} // namespace internal
} // namespace v8
-#endif // V8_BASIC_BLOCK_PROFILER_H_
+#endif // V8_DIAGNOSTICS_BASIC_BLOCK_PROFILER_H_
diff --git a/deps/v8/src/code-tracer.h b/deps/v8/src/diagnostics/code-tracer.h
index efa3dee8f0..6dd8d3e086 100644
--- a/deps/v8/src/code-tracer.h
+++ b/deps/v8/src/diagnostics/code-tracer.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CODE_TRACER_H_
-#define V8_CODE_TRACER_H_
+#ifndef V8_DIAGNOSTICS_CODE_TRACER_H_
+#define V8_DIAGNOSTICS_CODE_TRACER_H_
-#include "src/allocation.h"
-#include "src/flags.h"
-#include "src/globals.h"
-#include "src/utils.h"
-#include "src/vector.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -31,7 +31,7 @@ class CodeTracer final : public Malloced {
SNPrintF(filename_, "code-%d.asm", base::OS::GetCurrentProcessId());
}
- WriteChars(filename_.start(), "", 0, false);
+ WriteChars(filename_.begin(), "", 0, false);
}
class Scope {
@@ -51,7 +51,7 @@ class CodeTracer final : public Malloced {
}
if (file_ == nullptr) {
- file_ = base::OS::FOpen(filename_.start(), "ab");
+ file_ = base::OS::FOpen(filename_.begin(), "ab");
}
scope_depth_++;
@@ -81,4 +81,4 @@ class CodeTracer final : public Malloced {
} // namespace internal
} // namespace v8
-#endif // V8_CODE_TRACER_H_
+#endif // V8_DIAGNOSTICS_CODE_TRACER_H_
diff --git a/deps/v8/src/compilation-statistics.cc b/deps/v8/src/diagnostics/compilation-statistics.cc
index d809eab82a..adedc3e283 100644
--- a/deps/v8/src/compilation-statistics.cc
+++ b/deps/v8/src/diagnostics/compilation-statistics.cc
@@ -6,7 +6,7 @@
#include <vector>
#include "src/base/platform/platform.h"
-#include "src/compilation-statistics.h"
+#include "src/diagnostics/compilation-statistics.h"
namespace v8 {
namespace internal {
@@ -25,7 +25,6 @@ void CompilationStatistics::RecordPhaseStats(const char* phase_kind_name,
it->second.Accumulate(stats);
}
-
void CompilationStatistics::RecordPhaseKindStats(const char* phase_kind_name,
const BasicStats& stats) {
base::MutexGuard guard(&record_mutex_);
@@ -34,13 +33,13 @@ void CompilationStatistics::RecordPhaseKindStats(const char* phase_kind_name,
auto it = phase_kind_map_.find(phase_kind_name_str);
if (it == phase_kind_map_.end()) {
PhaseKindStats phase_kind_stats(phase_kind_map_.size());
- it = phase_kind_map_.insert(std::make_pair(phase_kind_name_str,
- phase_kind_stats)).first;
+ it = phase_kind_map_
+ .insert(std::make_pair(phase_kind_name_str, phase_kind_stats))
+ .first;
}
it->second.Accumulate(stats);
}
-
void CompilationStatistics::RecordTotalStats(size_t source_size,
const BasicStats& stats) {
base::MutexGuard guard(&record_mutex_);
@@ -49,7 +48,6 @@ void CompilationStatistics::RecordTotalStats(size_t source_size,
total_stats_.Accumulate(stats);
}
-
void CompilationStatistics::BasicStats::Accumulate(const BasicStats& stats) {
delta_ += stats.delta_;
total_allocated_bytes_ += stats.total_allocated_bytes_;
@@ -73,31 +71,29 @@ static void WriteLine(std::ostream& os, bool machine_format, const char* name,
static_cast<double>(total_stats.total_allocated_bytes_);
if (machine_format) {
base::OS::SNPrintF(buffer, kBufferSize,
- "\"%s_time\"=%.3f\n\"%s_space\"=%" PRIuS, name, ms, name,
+ "\"%s_time\"=%.3f\n\"%s_space\"=%zu", name, ms, name,
stats.total_allocated_bytes_);
os << buffer;
} else {
- base::OS::SNPrintF(
- buffer, kBufferSize,
- "%34s %10.3f (%5.1f%%) %10" PRIuS " (%5.1f%%) %10" PRIuS " %10" PRIuS,
- name, ms, percent, stats.total_allocated_bytes_, size_percent,
- stats.max_allocated_bytes_, stats.absolute_max_allocated_bytes_);
+ base::OS::SNPrintF(buffer, kBufferSize,
+ "%34s %10.3f (%5.1f%%) %10zu (%5.1f%%) %10zu %10zu",
+ name, ms, percent, stats.total_allocated_bytes_,
+ size_percent, stats.max_allocated_bytes_,
+ stats.absolute_max_allocated_bytes_);
os << buffer;
- if (stats.function_name_.size() > 0) {
+ if (!stats.function_name_.empty()) {
os << " " << stats.function_name_.c_str();
}
os << std::endl;
}
}
-
static void WriteFullLine(std::ostream& os) {
os << "-----------------------------------------------------------"
"-----------------------------------------------------------\n";
}
-
static void WriteHeader(std::ostream& os) {
WriteFullLine(os);
os << " Turbofan phase Time (ms) "
@@ -107,7 +103,6 @@ static void WriteHeader(std::ostream& os) {
WriteFullLine(os);
}
-
static void WritePhaseKindBreak(std::ostream& os) {
os << " ------------------------"
"-----------------------------------------------------------\n";
@@ -118,16 +113,16 @@ std::ostream& operator<<(std::ostream& os, const AsPrintableStatistics& ps) {
// pointers into them.
const CompilationStatistics& s = ps.s;
- typedef std::vector<CompilationStatistics::PhaseKindMap::const_iterator>
- SortedPhaseKinds;
+ using SortedPhaseKinds =
+ std::vector<CompilationStatistics::PhaseKindMap::const_iterator>;
SortedPhaseKinds sorted_phase_kinds(s.phase_kind_map_.size());
for (auto it = s.phase_kind_map_.begin(); it != s.phase_kind_map_.end();
++it) {
sorted_phase_kinds[it->second.insert_order_] = it;
}
- typedef std::vector<CompilationStatistics::PhaseMap::const_iterator>
- SortedPhases;
+ using SortedPhases =
+ std::vector<CompilationStatistics::PhaseMap::const_iterator>;
SortedPhases sorted_phases(s.phase_map_.size());
for (auto it = s.phase_map_.begin(); it != s.phase_map_.end(); ++it) {
sorted_phases[it->second.insert_order_] = it;
diff --git a/deps/v8/src/compilation-statistics.h b/deps/v8/src/diagnostics/compilation-statistics.h
index bfd9a5c66a..50bc88af92 100644
--- a/deps/v8/src/compilation-statistics.h
+++ b/deps/v8/src/diagnostics/compilation-statistics.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILATION_STATISTICS_H_
-#define V8_COMPILATION_STATISTICS_H_
+#ifndef V8_DIAGNOSTICS_COMPILATION_STATISTICS_H_
+#define V8_DIAGNOSTICS_COMPILATION_STATISTICS_H_
#include <map>
#include <string>
-#include "src/allocation.h"
#include "src/base/platform/time.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -73,9 +73,9 @@ class CompilationStatistics final : public Malloced {
friend std::ostream& operator<<(std::ostream& os,
const AsPrintableStatistics& s);
- typedef OrderedStats PhaseKindStats;
- typedef std::map<std::string, PhaseKindStats> PhaseKindMap;
- typedef std::map<std::string, PhaseStats> PhaseMap;
+ using PhaseKindStats = OrderedStats;
+ using PhaseKindMap = std::map<std::string, PhaseKindStats>;
+ using PhaseMap = std::map<std::string, PhaseStats>;
TotalStats total_stats_;
PhaseKindMap phase_kind_map_;
@@ -90,4 +90,4 @@ std::ostream& operator<<(std::ostream& os, const AsPrintableStatistics& s);
} // namespace internal
} // namespace v8
-#endif // V8_COMPILATION_STATISTICS_H_
+#endif // V8_DIAGNOSTICS_COMPILATION_STATISTICS_H_
diff --git a/deps/v8/src/disasm.h b/deps/v8/src/diagnostics/disasm.h
index f543af2609..036691877c 100644
--- a/deps/v8/src/disasm.h
+++ b/deps/v8/src/diagnostics/disasm.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DISASM_H_
-#define V8_DISASM_H_
+#ifndef V8_DIAGNOSTICS_DISASM_H_
+#define V8_DIAGNOSTICS_DISASM_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace disasm {
-typedef unsigned char byte;
+using byte = unsigned char;
// Interface and default implementation for converting addresses and
// register-numbers to text. The default implementation is machine
@@ -78,4 +78,4 @@ class Disassembler {
} // namespace disasm
-#endif // V8_DISASM_H_
+#endif // V8_DIAGNOSTICS_DISASM_H_
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/diagnostics/disassembler.cc
index c22a257e1c..8307ef3ff4 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/diagnostics/disassembler.cc
@@ -2,26 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/disassembler.h"
+#include "src/diagnostics/disassembler.h"
#include <memory>
#include <unordered_map>
#include <vector>
-#include "src/assembler-inl.h"
-#include "src/code-comments.h"
-#include "src/code-reference.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/code-comments.h"
+#include "src/codegen/code-reference.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/disasm.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/diagnostics/disasm.h"
+#include "src/execution/isolate-data.h"
#include "src/ic/ic.h"
-#include "src/isolate-data.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/objects/objects-inl.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/serializer-common.h"
-#include "src/string-stream.h"
-#include "src/vector.h"
+#include "src/strings/string-stream.h"
+#include "src/utils/vector.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
@@ -30,7 +30,7 @@ namespace internal {
#ifdef ENABLE_DISASSEMBLER
-class V8NameConverter: public disasm::NameConverter {
+class V8NameConverter : public disasm::NameConverter {
public:
explicit V8NameConverter(Isolate* isolate, CodeReference code = {})
: isolate_(isolate), code_(code) {}
@@ -82,7 +82,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
if (name != nullptr) {
SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc), name);
- return v8_buffer_.start();
+ return v8_buffer_.begin();
}
int offs = static_cast<int>(reinterpret_cast<Address>(pc) -
@@ -90,7 +90,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
// print as code offset, if it seems reasonable
if (0 <= offs && offs < code_.instruction_size()) {
SNPrintF(v8_buffer_, "%p <+0x%x>", static_cast<void*>(pc), offs);
- return v8_buffer_.start();
+ return v8_buffer_.begin();
}
wasm::WasmCodeRefScope wasm_code_ref_scope;
@@ -101,14 +101,13 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const {
if (wasm_code != nullptr) {
SNPrintF(v8_buffer_, "%p (%s)", static_cast<void*>(pc),
wasm::GetWasmCodeKindAsString(wasm_code->kind()));
- return v8_buffer_.start();
+ return v8_buffer_.begin();
}
}
return disasm::NameConverter::NameOfAddress(pc);
}
-
const char* V8NameConverter::NameInCode(byte* addr) const {
// The V8NameConverter is used for well known code, so we can "safely"
// dereference pointers in generated code.
@@ -136,7 +135,7 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
static_cast<RootIndex>(offset_in_roots_table / kSystemPointerSize);
SNPrintF(v8_buffer_, "root (%s)", RootsTable::name(root_index));
- return v8_buffer_.start();
+ return v8_buffer_.begin();
} else if (static_cast<unsigned>(offset - kExtRefsTableStart) <
kExtRefsTableSize) {
@@ -155,7 +154,7 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
SNPrintF(v8_buffer_, "external reference (%s)",
isolate_->external_reference_table()->NameFromOffset(
offset_in_extref_table));
- return v8_buffer_.start();
+ return v8_buffer_.begin();
} else if (static_cast<unsigned>(offset - kBuiltinsTableStart) <
kBuiltinsTableSize) {
@@ -166,7 +165,7 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
const char* name = Builtins::name(builtin_id);
SNPrintF(v8_buffer_, "builtin (%s)", name);
- return v8_buffer_.start();
+ return v8_buffer_.begin();
} else {
// It must be a direct access to one of the external values.
@@ -177,9 +176,9 @@ const char* V8NameConverter::RootRelativeName(int offset) const {
auto iter = directly_accessed_external_refs_.find(offset);
if (iter != directly_accessed_external_refs_.end()) {
SNPrintF(v8_buffer_, "external value (%s)", iter->second);
- return v8_buffer_.start();
+ return v8_buffer_.begin();
}
- return "WAAT??? What are we accessing here???";
+ return nullptr;
}
}
@@ -188,7 +187,6 @@ static void DumpBuffer(std::ostream* os, StringBuilder* out) {
out->Reset();
}
-
static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
@@ -220,12 +218,14 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
} else if (rmode == RelocInfo::DEOPT_ID) {
out->AddFormatted(" ;; debug: deopt index %d",
static_cast<int>(relocinfo->data()));
- } else if (rmode == RelocInfo::EMBEDDED_OBJECT) {
+ } else if (RelocInfo::IsEmbeddedObjectMode(rmode)) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- relocinfo->target_object()->ShortPrint(&accumulator);
+ relocinfo->target_object().ShortPrint(&accumulator);
std::unique_ptr<char[]> obj_name = accumulator.ToCString();
- out->AddFormatted(" ;; object: %s", obj_name.get());
+ const bool is_compressed = RelocInfo::IsCompressedEmbeddedObject(rmode);
+ out->AddFormatted(" ;; %sobject: %s",
+ is_compressed ? "(compressed) " : "", obj_name.get());
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
const char* reference_name =
ref_encoder ? ref_encoder->NameOfAddress(
@@ -236,9 +236,9 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
out->AddFormatted(" ;; code:");
Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(
relocinfo->target_address());
- Code::Kind kind = code->kind();
- if (code->is_builtin()) {
- out->AddFormatted(" Builtin::%s", Builtins::name(code->builtin_index()));
+ Code::Kind kind = code.kind();
+ if (code.is_builtin()) {
+ out->AddFormatted(" Builtin::%s", Builtins::name(code.builtin_index()));
} else {
out->AddFormatted(" %s", Code::Kind2String(kind));
}
@@ -271,7 +271,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
CHECK(!code.is_null());
v8::internal::EmbeddedVector<char, 128> decode_buffer;
v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
- StringBuilder out(out_buffer.start(), out_buffer.length());
+ StringBuilder out(out_buffer.begin(), out_buffer.length());
byte* pc = begin;
disasm::Disassembler d(converter,
disasm::Disassembler::kContinueOnUnimplementedOpcode);
@@ -291,8 +291,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// First decode instruction so that we know its length.
byte* prev_pc = pc;
if (constants > 0) {
- SNPrintF(decode_buffer,
- "%08x constant",
+ SNPrintF(decode_buffer, "%08x constant",
*reinterpret_cast<int32_t*>(pc));
constants--;
pc += 4;
@@ -309,9 +308,9 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE) {
// raw pointer embedded in code stream, e.g., jump table
byte* ptr = *reinterpret_cast<byte**>(pc);
- SNPrintF(
- decode_buffer, "%08" V8PRIxPTR " jump table entry %4" PRIuS,
- reinterpret_cast<intptr_t>(ptr), static_cast<size_t>(ptr - begin));
+ SNPrintF(decode_buffer, "%08" V8PRIxPTR " jump table entry %4zu",
+ reinterpret_cast<intptr_t>(ptr),
+ static_cast<size_t>(ptr - begin));
pc += sizeof(ptr);
} else {
decode_buffer[0] = '\0';
@@ -354,7 +353,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
prev_pc - begin);
// Instruction.
- out.AddFormatted("%s", decode_buffer.start());
+ out.AddFormatted("%s", decode_buffer.begin());
// Print all the reloc info for this instruction which are not comments.
for (size_t i = 0; i < pcs.size(); i++) {
@@ -362,7 +361,13 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
const CodeReference& host = code;
Address constant_pool =
host.is_null() ? kNullAddress : host.constant_pool();
- RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], Code(), constant_pool);
+ Code code_pointer;
+ if (!host.is_null() && host.is_js()) {
+ code_pointer = *host.as_js_code();
+ }
+
+ RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], code_pointer,
+ constant_pool);
bool first_reloc_info = (i == 0);
PrintRelocInfo(&out, isolate, ref_encoder, os, code, &relocinfo,
@@ -373,8 +378,7 @@ static int DecodeIt(Isolate* isolate, ExternalReferenceEncoder* ref_encoder,
// already, check if we can find some RelocInfo for the target address in
// the constant pool.
if (pcs.empty() && !code.is_null()) {
- RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc),
- RelocInfo::NONE,
+ RelocInfo dummy_rinfo(reinterpret_cast<Address>(prev_pc), RelocInfo::NONE,
0, Code());
if (dummy_rinfo.IsInConstantPool()) {
Address constant_pool_entry_address =
diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/diagnostics/disassembler.h
index d6bb84cd27..303252cb4e 100644
--- a/deps/v8/src/disassembler.h
+++ b/deps/v8/src/diagnostics/disassembler.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DISASSEMBLER_H_
-#define V8_DISASSEMBLER_H_
+#ifndef V8_DIAGNOSTICS_DISASSEMBLER_H_
+#define V8_DIAGNOSTICS_DISASSEMBLER_H_
-#include "src/allocation.h"
-#include "src/code-reference.h"
+#include "src/codegen/code-reference.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -29,4 +29,4 @@ class Disassembler : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_DISASSEMBLER_H_
+#endif // V8_DIAGNOSTICS_DISASSEMBLER_H_
diff --git a/deps/v8/src/eh-frame.cc b/deps/v8/src/diagnostics/eh-frame.cc
index 37a176557d..e19e09f332 100644
--- a/deps/v8/src/eh-frame.cc
+++ b/deps/v8/src/diagnostics/eh-frame.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/eh-frame.h"
+#include "src/diagnostics/eh-frame.h"
#include <iomanip>
#include <ostream>
-#include "src/code-desc.h"
+#include "src/codegen/code-desc.h"
#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM) && \
!defined(V8_TARGET_ARCH_ARM64)
@@ -493,8 +493,9 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
if (((bytecode >> EhFrameConstants::kSavedRegisterMaskSize) & 0xFF) ==
EhFrameConstants::kSavedRegisterTag) {
int32_t decoded_offset = eh_frame_iterator.GetNextULeb128();
- stream << "| " << DwarfRegisterCodeToString(
- bytecode & EhFrameConstants::kLocationMask)
+ stream << "| "
+ << DwarfRegisterCodeToString(bytecode &
+ EhFrameConstants::kLocationMask)
<< " saved at base" << std::showpos
<< decoded_offset * EhFrameConstants::kDataAlignmentFactor
<< std::noshowpos << '\n';
@@ -503,8 +504,9 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
if (((bytecode >> EhFrameConstants::kFollowInitialRuleMaskSize) & 0xFF) ==
EhFrameConstants::kFollowInitialRuleTag) {
- stream << "| " << DwarfRegisterCodeToString(
- bytecode & EhFrameConstants::kLocationMask)
+ stream << "| "
+ << DwarfRegisterCodeToString(bytecode &
+ EhFrameConstants::kLocationMask)
<< " follows rule in CIE\n";
continue;
}
diff --git a/deps/v8/src/eh-frame.h b/deps/v8/src/diagnostics/eh-frame.h
index 1f1fb8ea3d..8b78b04b16 100644
--- a/deps/v8/src/eh-frame.h
+++ b/deps/v8/src/diagnostics/eh-frame.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_EH_FRAME_H_
-#define V8_EH_FRAME_H_
+#ifndef V8_DIAGNOSTICS_EH_FRAME_H_
+#define V8_DIAGNOSTICS_EH_FRAME_H_
#include "src/base/compiler-specific.h"
-#include "src/globals.h"
-#include "src/register-arch.h"
-#include "src/v8memory.h"
+#include "src/codegen/register-arch.h"
+#include "src/common/globals.h"
+#include "src/common/v8memory.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -305,4 +305,4 @@ class EhFrameDisassembler final {
} // namespace internal
} // namespace v8
-#endif // V8_EH_FRAME_H_
+#endif // V8_DIAGNOSTICS_EH_FRAME_H_
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/diagnostics/gdb-jit.cc
index db47bb3022..70fd9fb06d 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/diagnostics/gdb-jit.cc
@@ -2,23 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/gdb-jit.h"
+#include "src/diagnostics/gdb-jit.h"
#include <memory>
#include <vector>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
-#include "src/bootstrapper.h"
-#include "src/frames-inl.h"
-#include "src/frames.h"
-#include "src/global-handles.h"
-#include "src/objects.h"
-#include "src/ostreams.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/frames.h"
+#include "src/handles/global-handles.h"
+#include "src/init/bootstrapper.h"
+#include "src/objects/objects.h"
#include "src/snapshot/natives.h"
-#include "src/splay-tree-inl.h"
-#include "src/vector.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/splay-tree-inl.h"
+#include "src/utils/vector.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -31,14 +31,14 @@ namespace GDBJITInterface {
#define __MACH_O
class MachO;
class MachOSection;
-typedef MachO DebugObject;
-typedef MachOSection DebugSection;
+using DebugObject = MachO;
+using DebugSection = MachOSection;
#else
#define __ELF
class ELF;
class ELFSection;
-typedef ELF DebugObject;
-typedef ELFSection DebugSection;
+using DebugObject = ELF;
+using DebugSection = ELFSection;
#endif
class Writer {
@@ -47,58 +47,47 @@ class Writer {
: debug_object_(debug_object),
position_(0),
capacity_(1024),
- buffer_(reinterpret_cast<byte*>(malloc(capacity_))) {
- }
+ buffer_(reinterpret_cast<byte*>(malloc(capacity_))) {}
- ~Writer() {
- free(buffer_);
- }
+ ~Writer() { free(buffer_); }
- uintptr_t position() const {
- return position_;
- }
+ uintptr_t position() const { return position_; }
- template<typename T>
+ template <typename T>
class Slot {
public:
- Slot(Writer* w, uintptr_t offset) : w_(w), offset_(offset) { }
+ Slot(Writer* w, uintptr_t offset) : w_(w), offset_(offset) {}
- T* operator-> () {
- return w_->RawSlotAt<T>(offset_);
- }
+ T* operator->() { return w_->RawSlotAt<T>(offset_); }
- void set(const T& value) {
- *w_->RawSlotAt<T>(offset_) = value;
- }
+ void set(const T& value) { *w_->RawSlotAt<T>(offset_) = value; }
- Slot<T> at(int i) {
- return Slot<T>(w_, offset_ + sizeof(T) * i);
- }
+ Slot<T> at(int i) { return Slot<T>(w_, offset_ + sizeof(T) * i); }
private:
Writer* w_;
uintptr_t offset_;
};
- template<typename T>
+ template <typename T>
void Write(const T& val) {
Ensure(position_ + sizeof(T));
*RawSlotAt<T>(position_) = val;
position_ += sizeof(T);
}
- template<typename T>
+ template <typename T>
Slot<T> SlotAt(uintptr_t offset) {
Ensure(offset + sizeof(T));
return Slot<T>(this, offset);
}
- template<typename T>
+ template <typename T>
Slot<T> CreateSlotHere() {
return CreateSlotsHere<T>(1);
}
- template<typename T>
+ template <typename T>
Slot<T> CreateSlotsHere(uint32_t count) {
uintptr_t slot_position = position_;
position_ += sizeof(T) * count;
@@ -158,9 +147,10 @@ class Writer {
}
private:
- template<typename T> friend class Slot;
+ template <typename T>
+ friend class Slot;
- template<typename T>
+ template <typename T>
T* RawSlotAt(uintptr_t offset) {
DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_);
return reinterpret_cast<T*>(&buffer_[offset]);
@@ -174,7 +164,7 @@ class Writer {
class ELFStringTable;
-template<typename THeader>
+template <typename THeader>
class DebugSectionBase : public ZoneObject {
public:
virtual ~DebugSectionBase() = default;
@@ -191,14 +181,11 @@ class DebugSectionBase : public ZoneObject {
}
}
- virtual bool WriteBodyInternal(Writer* writer) {
- return false;
- }
+ virtual bool WriteBodyInternal(Writer* writer) { return false; }
- typedef THeader Header;
+ using Header = THeader;
};
-
struct MachOSectionHeader {
char sectname[16];
char segname[16];
@@ -218,7 +205,6 @@ struct MachOSectionHeader {
uint32_t reserved2;
};
-
class MachOSection : public DebugSectionBase<MachOSectionHeader> {
public:
enum Type {
@@ -265,7 +251,6 @@ class MachOSection : public DebugSectionBase<MachOSectionHeader> {
uint32_t flags_;
};
-
struct ELFSectionHeader {
uint32_t name;
uint32_t type;
@@ -279,7 +264,6 @@ struct ELFSectionHeader {
uintptr_t entry_size;
};
-
#if defined(__ELF)
class ELFSection : public DebugSectionBase<ELFSectionHeader> {
public:
@@ -303,16 +287,12 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
TYPE_HIUSER = 0xFFFFFFFF
};
- enum Flags {
- FLAG_WRITE = 1,
- FLAG_ALLOC = 2,
- FLAG_EXEC = 4
- };
+ enum Flags { FLAG_WRITE = 1, FLAG_ALLOC = 2, FLAG_EXEC = 4 };
enum SpecialIndexes { INDEX_ABSOLUTE = 0xFFF1 };
ELFSection(const char* name, Type type, uintptr_t align)
- : name_(name), type_(type), align_(align) { }
+ : name_(name), type_(type), align_(align) {}
~ELFSection() override = default;
@@ -351,7 +331,6 @@ class ELFSection : public DebugSectionBase<ELFSectionHeader> {
};
#endif // defined(__ELF)
-
#if defined(__MACH_O)
class MachOTextSection : public MachOSection {
public:
@@ -376,22 +355,17 @@ class MachOTextSection : public MachOSection {
};
#endif // defined(__MACH_O)
-
#if defined(__ELF)
class FullHeaderELFSection : public ELFSection {
public:
- FullHeaderELFSection(const char* name,
- Type type,
- uintptr_t align,
- uintptr_t addr,
- uintptr_t offset,
- uintptr_t size,
+ FullHeaderELFSection(const char* name, Type type, uintptr_t align,
+ uintptr_t addr, uintptr_t offset, uintptr_t size,
uintptr_t flags)
: ELFSection(name, type, align),
addr_(addr),
offset_(offset),
size_(size),
- flags_(flags) { }
+ flags_(flags) {}
protected:
void PopulateHeader(Writer::Slot<Header> header) override {
@@ -409,7 +383,6 @@ class FullHeaderELFSection : public ELFSection {
uintptr_t flags_;
};
-
class ELFStringTable : public ELFSection {
public:
explicit ELFStringTable(const char* name)
@@ -458,7 +431,6 @@ class ELFStringTable : public ELFSection {
uintptr_t size_;
};
-
void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
ELFStringTable* strtab) {
header->name = static_cast<uint32_t>(strtab->Add(name_));
@@ -468,7 +440,6 @@ void ELFSection::PopulateHeader(Writer::Slot<ELFSection::Header> header,
}
#endif // defined(__ELF)
-
#if defined(__MACH_O)
class MachO {
public:
@@ -482,9 +453,8 @@ class MachO {
void Write(Writer* w, uintptr_t code_start, uintptr_t code_size) {
Writer::Slot<MachOHeader> header = WriteHeader(w);
uintptr_t load_command_start = w->position();
- Writer::Slot<MachOSegmentCommand> cmd = WriteSegmentCommand(w,
- code_start,
- code_size);
+ Writer::Slot<MachOSegmentCommand> cmd =
+ WriteSegmentCommand(w, code_start, code_size);
WriteSections(w, cmd, header, load_command_start);
}
@@ -528,18 +498,17 @@ class MachO {
LC_SEGMENT_64 = 0x00000019u
};
-
Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
DCHECK_EQ(w->position(), 0);
Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
#if V8_TARGET_ARCH_IA32
header->magic = 0xFEEDFACEu;
- header->cputype = 7; // i386
+ header->cputype = 7; // i386
header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
#elif V8_TARGET_ARCH_X64
header->magic = 0xFEEDFACFu;
header->cputype = 7 | 0x01000000; // i386 | 64-bit ABI
- header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
+ header->cpusubtype = 3; // CPU_SUBTYPE_I386_ALL
header->reserved = 0;
#else
#error Unsupported target architecture.
@@ -551,7 +520,6 @@ class MachO {
return header;
}
-
Writer::Slot<MachOSegmentCommand> WriteSegmentCommand(Writer* w,
uintptr_t code_start,
uintptr_t code_size) {
@@ -571,14 +539,12 @@ class MachO {
cmd->flags = 0;
cmd->nsects = static_cast<uint32_t>(sections_.size());
memset(cmd->segname, 0, 16);
- cmd->cmdsize = sizeof(MachOSegmentCommand) + sizeof(MachOSection::Header) *
- cmd->nsects;
+ cmd->cmdsize = sizeof(MachOSegmentCommand) +
+ sizeof(MachOSection::Header) * cmd->nsects;
return cmd;
}
-
- void WriteSections(Writer* w,
- Writer::Slot<MachOSegmentCommand> cmd,
+ void WriteSections(Writer* w, Writer::Slot<MachOSegmentCommand> cmd,
Writer::Slot<MachOHeader> header,
uintptr_t load_command_start) {
Writer::Slot<MachOSection::Header> headers =
@@ -600,7 +566,6 @@ class MachO {
};
#endif // defined(__MACH_O)
-
#if defined(__ELF)
class ELF {
public:
@@ -641,15 +606,14 @@ class ELF {
uint16_t sht_strtab_index;
};
-
void WriteHeader(Writer* w) {
DCHECK_EQ(w->position(), 0);
Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM)
const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 1, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0};
-#elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
- (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT || \
+ V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
const uint8_t ident[16] = {0x7F, 'E', 'L', 'F', 2, 1, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0};
#elif V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN && V8_OS_LINUX
@@ -763,39 +727,27 @@ class ELFSymbol {
BIND_HIPROC = 15
};
- ELFSymbol(const char* name,
- uintptr_t value,
- uintptr_t size,
- Binding binding,
- Type type,
- uint16_t section)
+ ELFSymbol(const char* name, uintptr_t value, uintptr_t size, Binding binding,
+ Type type, uint16_t section)
: name(name),
value(value),
size(size),
info((binding << 4) | type),
other(0),
- section(section) {
- }
+ section(section) {}
- Binding binding() const {
- return static_cast<Binding>(info >> 4);
- }
-#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
+ Binding binding() const { return static_cast<Binding>(info >> 4); }
+#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
(V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT))
struct SerializedLayout {
- SerializedLayout(uint32_t name,
- uintptr_t value,
- uintptr_t size,
- Binding binding,
- Type type,
- uint16_t section)
+ SerializedLayout(uint32_t name, uintptr_t value, uintptr_t size,
+ Binding binding, Type type, uint16_t section)
: name(name),
value(value),
size(size),
info((binding << 4) | type),
other(0),
- section(section) {
- }
+ section(section) {}
uint32_t name;
uintptr_t value;
@@ -804,22 +756,17 @@ class ELFSymbol {
uint8_t other;
uint16_t section;
};
-#elif(V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT) || \
- (V8_TARGET_ARCH_PPC64 && V8_OS_LINUX) || V8_TARGET_ARCH_S390X
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT || \
+ V8_TARGET_ARCH_PPC64 && V8_OS_LINUX || V8_TARGET_ARCH_S390X
struct SerializedLayout {
- SerializedLayout(uint32_t name,
- uintptr_t value,
- uintptr_t size,
- Binding binding,
- Type type,
- uint16_t section)
+ SerializedLayout(uint32_t name, uintptr_t value, uintptr_t size,
+ Binding binding, Type type, uint16_t section)
: name(name),
info((binding << 4) | type),
other(0),
section(section),
value(value),
- size(size) {
- }
+ size(size) {}
uint32_t name;
uint8_t info;
@@ -849,7 +796,6 @@ class ELFSymbol {
uint16_t section;
};
-
class ELFSymbolTable : public ELFSection {
public:
ELFSymbolTable(const char* name, Zone* zone)
@@ -872,12 +818,8 @@ class ELFSymbolTable : public ELFSection {
ELFStringTable* strtab =
static_cast<ELFStringTable*>(w->debug_object()->SectionAt(index() + 1));
strtab->AttachWriter(w);
- symbols.at(0).set(ELFSymbol::SerializedLayout(0,
- 0,
- 0,
- ELFSymbol::BIND_LOCAL,
- ELFSymbol::TYPE_NOTYPE,
- 0));
+ symbols.at(0).set(ELFSymbol::SerializedLayout(
+ 0, 0, 0, ELFSymbol::BIND_LOCAL, ELFSymbol::TYPE_NOTYPE, 0));
WriteSymbolsList(&locals_, symbols.at(1), strtab);
WriteSymbolsList(&globals_,
symbols.at(static_cast<uint32_t>(locals_.size() + 1)),
@@ -917,7 +859,6 @@ class ELFSymbolTable : public ELFSection {
};
#endif // defined(__ELF)
-
class LineInfo : public Malloced {
public:
void SetPosition(intptr_t pc, int pos, bool is_statement) {
@@ -956,14 +897,12 @@ class CodeDescription {
LineInfo* lineinfo)
: name_(name), code_(code), shared_info_(shared), lineinfo_(lineinfo) {}
- const char* name() const {
- return name_;
- }
+ const char* name() const { return name_; }
LineInfo* lineinfo() const { return lineinfo_; }
bool is_function() const {
- Code::Kind kind = code_->kind();
+ Code::Kind kind = code_.kind();
return kind == Code::OPTIMIZED_FUNCTION;
}
@@ -971,26 +910,24 @@ class CodeDescription {
ScopeInfo scope_info() const {
DCHECK(has_scope_info());
- return shared_info_->scope_info();
+ return shared_info_.scope_info();
}
uintptr_t CodeStart() const {
- return static_cast<uintptr_t>(code_->InstructionStart());
+ return static_cast<uintptr_t>(code_.InstructionStart());
}
uintptr_t CodeEnd() const {
- return static_cast<uintptr_t>(code_->InstructionEnd());
+ return static_cast<uintptr_t>(code_.InstructionEnd());
}
- uintptr_t CodeSize() const {
- return CodeEnd() - CodeStart();
- }
+ uintptr_t CodeSize() const { return CodeEnd() - CodeStart(); }
bool has_script() {
- return !shared_info_.is_null() && shared_info_->script()->IsScript();
+ return !shared_info_.is_null() && shared_info_.script().IsScript();
}
- Script script() { return Script::cast(shared_info_->script()); }
+ Script script() { return Script::cast(shared_info_.script()); }
bool IsLineInfoAvailable() { return lineinfo_ != nullptr; }
@@ -1008,7 +945,7 @@ class CodeDescription {
std::unique_ptr<char[]> GetFilename() {
if (!shared_info_.is_null()) {
- return String::cast(script()->name())->ToCString();
+ return String::cast(script().name()).ToCString();
} else {
std::unique_ptr<char[]> result(new char[1]);
result[0] = 0;
@@ -1018,7 +955,7 @@ class CodeDescription {
int GetScriptLineNumber(int pos) {
if (!shared_info_.is_null()) {
- return script()->GetLineNumber(pos) + 1;
+ return script().GetLineNumber(pos) + 1;
} else {
return 0;
}
@@ -1037,8 +974,8 @@ class CodeDescription {
#if defined(__ELF)
static void CreateSymbolsTable(CodeDescription* desc, Zone* zone, ELF* elf,
size_t text_section_index) {
- ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone);
- ELFStringTable* strtab = new(zone) ELFStringTable(".strtab");
+ ELFSymbolTable* symtab = new (zone) ELFSymbolTable(".symtab", zone);
+ ELFStringTable* strtab = new (zone) ELFStringTable(".strtab");
// Symbol table should be followed by the linked string table.
elf->AddSection(symtab);
@@ -1053,19 +990,17 @@ static void CreateSymbolsTable(CodeDescription* desc, Zone* zone, ELF* elf,
}
#endif // defined(__ELF)
-
class DebugInfoSection : public DebugSection {
public:
explicit DebugInfoSection(CodeDescription* desc)
#if defined(__ELF)
: ELFSection(".debug_info", TYPE_PROGBITS, 1),
#else
- : MachOSection("__debug_info",
- "__DWARF",
- 1,
+ : MachOSection("__debug_info", "__DWARF", 1,
MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
#endif
- desc_(desc) { }
+ desc_(desc) {
+ }
// DWARF2 standard
enum DWARF2LocationOp {
@@ -1104,10 +1039,7 @@ class DebugInfoSection : public DebugSection {
DW_OP_fbreg = 0x91 // 1 param: SLEB128 offset
};
- enum DWARF2Encoding {
- DW_ATE_ADDRESS = 0x1,
- DW_ATE_SIGNED = 0x5
- };
+ enum DWARF2Encoding { DW_ATE_ADDRESS = 0x1, DW_ATE_SIGNED = 0x5 };
bool WriteBodyInternal(Writer* w) override {
uintptr_t cu_start = w->position();
@@ -1155,14 +1087,14 @@ class DebugInfoSection : public DebugSection {
#endif
fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start));
- int params = scope->ParameterCount();
- int context_slots = scope->ContextLocalCount();
+ int params = scope.ParameterCount();
+ int context_slots = scope.ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
int current_abbreviation = 4;
EmbeddedVector<char, 256> buffer;
- StringBuilder builder(buffer.start(), buffer.length());
+ StringBuilder builder(buffer.begin(), buffer.length());
for (int param = 0; param < params; ++param) {
w->WriteULEB128(current_abbreviation++);
@@ -1193,9 +1125,7 @@ class DebugInfoSection : public DebugSection {
w->WriteULEB128(current_abbreviation++);
w->WriteString(".native_context");
- for (int context_slot = 0;
- context_slot < context_slots;
- ++context_slot) {
+ for (int context_slot = 0; context_slot < context_slots; ++context_slot) {
w->WriteULEB128(current_abbreviation++);
builder.Reset();
builder.AddFormatted("context_slot%d", context_slot + internal_slots);
@@ -1236,19 +1166,17 @@ class DebugInfoSection : public DebugSection {
CodeDescription* desc_;
};
-
class DebugAbbrevSection : public DebugSection {
public:
explicit DebugAbbrevSection(CodeDescription* desc)
#ifdef __ELF
: ELFSection(".debug_abbrev", TYPE_PROGBITS, 1),
#else
- : MachOSection("__debug_abbrev",
- "__DWARF",
- 1,
+ : MachOSection("__debug_abbrev", "__DWARF", 1,
MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
#endif
- desc_(desc) { }
+ desc_(desc) {
+ }
// DWARF2 standard, figure 14.
enum DWARF2Tags {
@@ -1262,10 +1190,7 @@ class DebugAbbrevSection : public DebugSection {
};
// DWARF2 standard, figure 16.
- enum DWARF2ChildrenDetermination {
- DW_CHILDREN_NO = 0,
- DW_CHILDREN_YES = 1
- };
+ enum DWARF2ChildrenDetermination { DW_CHILDREN_NO = 0, DW_CHILDREN_YES = 1 };
// DWARF standard, figure 17.
enum DWARF2Attribute {
@@ -1292,10 +1217,8 @@ class DebugAbbrevSection : public DebugSection {
DW_FORM_REF4 = 0x13
};
- void WriteVariableAbbreviation(Writer* w,
- int abbreviation_code,
- bool has_value,
- bool is_parameter) {
+ void WriteVariableAbbreviation(Writer* w, int abbreviation_code,
+ bool has_value, bool is_parameter) {
w->WriteULEB128(abbreviation_code);
w->WriteULEB128(is_parameter ? DW_TAG_FORMAL_PARAMETER : DW_TAG_VARIABLE);
w->Write<uint8_t>(DW_CHILDREN_NO);
@@ -1331,8 +1254,8 @@ class DebugAbbrevSection : public DebugSection {
if (extra_info) {
ScopeInfo scope = desc_->scope_info();
- int params = scope->ParameterCount();
- int context_slots = scope->ContextLocalCount();
+ int params = scope.ParameterCount();
+ int context_slots = scope.ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
// Total children is params + context_slots + internal_slots + 2
@@ -1368,15 +1291,12 @@ class DebugAbbrevSection : public DebugSection {
WriteVariableAbbreviation(w, current_abbreviation++, true, true);
}
- for (int internal_slot = 0;
- internal_slot < internal_slots;
+ for (int internal_slot = 0; internal_slot < internal_slots;
++internal_slot) {
WriteVariableAbbreviation(w, current_abbreviation++, false, false);
}
- for (int context_slot = 0;
- context_slot < context_slots;
- ++context_slot) {
+ for (int context_slot = 0; context_slot < context_slots; ++context_slot) {
WriteVariableAbbreviation(w, current_abbreviation++, false, false);
}
@@ -1397,19 +1317,17 @@ class DebugAbbrevSection : public DebugSection {
CodeDescription* desc_;
};
-
class DebugLineSection : public DebugSection {
public:
explicit DebugLineSection(CodeDescription* desc)
#ifdef __ELF
: ELFSection(".debug_line", TYPE_PROGBITS, 1),
#else
- : MachOSection("__debug_line",
- "__DWARF",
- 1,
+ : MachOSection("__debug_line", "__DWARF", 1,
MachOSection::S_REGULAR | MachOSection::S_ATTR_DEBUG),
#endif
- desc_(desc) { }
+ desc_(desc) {
+ }
// DWARF2 standard, figure 34.
enum DWARF2Opcodes {
@@ -1442,22 +1360,22 @@ class DebugLineSection : public DebugSection {
w->Write<uint16_t>(2); // Field version.
Writer::Slot<uint32_t> prologue_length = w->CreateSlotHere<uint32_t>();
uintptr_t prologue_start = w->position();
- w->Write<uint8_t>(1); // Field minimum_instruction_length.
- w->Write<uint8_t>(1); // Field default_is_stmt.
- w->Write<int8_t>(line_base); // Field line_base.
- w->Write<uint8_t>(line_range); // Field line_range.
+ w->Write<uint8_t>(1); // Field minimum_instruction_length.
+ w->Write<uint8_t>(1); // Field default_is_stmt.
+ w->Write<int8_t>(line_base); // Field line_base.
+ w->Write<uint8_t>(line_range); // Field line_range.
w->Write<uint8_t>(opcode_base); // Field opcode_base.
- w->Write<uint8_t>(0); // DW_LNS_COPY operands count.
- w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count.
- w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count.
- w->Write<uint8_t>(1); // DW_LNS_SET_FILE operands count.
- w->Write<uint8_t>(1); // DW_LNS_SET_COLUMN operands count.
- w->Write<uint8_t>(0); // DW_LNS_NEGATE_STMT operands count.
- w->Write<uint8_t>(0); // Empty include_directories sequence.
+ w->Write<uint8_t>(0); // DW_LNS_COPY operands count.
+ w->Write<uint8_t>(1); // DW_LNS_ADVANCE_PC operands count.
+ w->Write<uint8_t>(1); // DW_LNS_ADVANCE_LINE operands count.
+ w->Write<uint8_t>(1); // DW_LNS_SET_FILE operands count.
+ w->Write<uint8_t>(1); // DW_LNS_SET_COLUMN operands count.
+ w->Write<uint8_t>(0); // DW_LNS_NEGATE_STMT operands count.
+ w->Write<uint8_t>(0); // Empty include_directories sequence.
w->WriteString(desc_->GetFilename().get()); // File name.
- w->WriteULEB128(0); // Current directory.
- w->WriteULEB128(0); // Unknown modification time.
- w->WriteULEB128(0); // Unknown file size.
+ w->WriteULEB128(0); // Current directory.
+ w->WriteULEB128(0); // Unknown modification time.
+ w->WriteULEB128(0); // Unknown file size.
w->Write<uint8_t>(0);
prologue_length.set(static_cast<uint32_t>(w->position() - prologue_start));
@@ -1478,7 +1396,7 @@ class DebugLineSection : public DebugSection {
// Reduce bloating in the debug line table by removing duplicate line
// entries (per DWARF2 standard).
- intptr_t new_line = desc_->GetScriptLineNumber(info->pos_);
+ intptr_t new_line = desc_->GetScriptLineNumber(info->pos_);
if (new_line == line) {
continue;
}
@@ -1503,8 +1421,8 @@ class DebugLineSection : public DebugSection {
intptr_t line_diff = new_line - line;
// Compute special opcode (see DWARF 2.0 standard)
- intptr_t special_opcode = (line_diff - line_base) +
- (line_range * pc_diff) + opcode_base;
+ intptr_t special_opcode =
+ (line_diff - line_base) + (line_range * pc_diff) + opcode_base;
// If special_opcode is less than or equal to 255, it can be used as a
// special opcode. If line_diff is larger than the max line increment
@@ -1536,8 +1454,7 @@ class DebugLineSection : public DebugSection {
}
private:
- void WriteExtendedOpcode(Writer* w,
- DWARF2ExtendedOpcode op,
+ void WriteExtendedOpcode(Writer* w, DWARF2ExtendedOpcode op,
size_t operands_size) {
w->Write<uint8_t>(0);
w->WriteULEB128(operands_size + 1);
@@ -1558,7 +1475,6 @@ class DebugLineSection : public DebugSection {
CodeDescription* desc_;
};
-
#if V8_TARGET_ARCH_X64
class UnwindInfoSection : public DebugSection {
@@ -1574,8 +1490,7 @@ class UnwindInfoSection : public DebugSection {
void WriteFDEStateAfterRBPSet(Writer* w);
void WriteFDEStateAfterRBPPop(Writer* w);
- void WriteLength(Writer* w,
- Writer::Slot<uint32_t>* length_slot,
+ void WriteLength(Writer* w, Writer::Slot<uint32_t>* length_slot,
int initial_position);
private:
@@ -1629,7 +1544,6 @@ class UnwindInfoSection : public DebugSection {
};
};
-
void UnwindInfoSection::WriteLength(Writer* w,
Writer::Slot<uint32_t>* length_slot,
int initial_position) {
@@ -1645,7 +1559,6 @@ void UnwindInfoSection::WriteLength(Writer* w,
length_slot->set(static_cast<uint32_t>(w->position() - initial_position));
}
-
UnwindInfoSection::UnwindInfoSection(CodeDescription* desc)
#ifdef __ELF
: ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
@@ -1653,7 +1566,8 @@ UnwindInfoSection::UnwindInfoSection(CodeDescription* desc)
: MachOSection("__eh_frame", "__TEXT", sizeof(uintptr_t),
MachOSection::S_REGULAR),
#endif
- desc_(desc) { }
+ desc_(desc) {
+}
int UnwindInfoSection::WriteCIE(Writer* w) {
Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
@@ -1674,7 +1588,6 @@ int UnwindInfoSection::WriteCIE(Writer* w) {
return cie_position;
}
-
void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) {
// The only FDE for this function. The CFA is the current RBP.
Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>();
@@ -1692,7 +1605,6 @@ void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) {
WriteLength(w, &fde_length_slot, fde_position);
}
-
void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
// The first state, just after the control has been transferred to the the
// function.
@@ -1719,7 +1631,6 @@ void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_PUSH));
}
-
void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) {
// The second state, just after RBP has been pushed.
@@ -1740,7 +1651,6 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) {
desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_SET));
}
-
void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) {
// The third state, after the RBP has been set.
@@ -1755,7 +1665,6 @@ void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) {
desc_->GetStackStateStartAddress(CodeDescription::POST_RBP_POP));
}
-
void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
// The fourth (final) state. The RBP has been popped (just before issuing a
// return).
@@ -1775,80 +1684,69 @@ void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
w->Write<uint64_t>(desc_->CodeEnd());
}
-
bool UnwindInfoSection::WriteBodyInternal(Writer* w) {
uint32_t cie_position = WriteCIE(w);
WriteFDE(w, cie_position);
return true;
}
-
#endif // V8_TARGET_ARCH_X64
-static void CreateDWARFSections(CodeDescription* desc,
- Zone* zone,
+static void CreateDWARFSections(CodeDescription* desc, Zone* zone,
DebugObject* obj) {
if (desc->IsLineInfoAvailable()) {
- obj->AddSection(new(zone) DebugInfoSection(desc));
- obj->AddSection(new(zone) DebugAbbrevSection(desc));
- obj->AddSection(new(zone) DebugLineSection(desc));
+ obj->AddSection(new (zone) DebugInfoSection(desc));
+ obj->AddSection(new (zone) DebugAbbrevSection(desc));
+ obj->AddSection(new (zone) DebugLineSection(desc));
}
#if V8_TARGET_ARCH_X64
- obj->AddSection(new(zone) UnwindInfoSection(desc));
+ obj->AddSection(new (zone) UnwindInfoSection(desc));
#endif
}
-
// -------------------------------------------------------------------
// Binary GDB JIT Interface as described in
// http://sourceware.org/gdb/onlinedocs/gdb/Declarations.html
extern "C" {
- typedef enum {
- JIT_NOACTION = 0,
- JIT_REGISTER_FN,
- JIT_UNREGISTER_FN
- } JITAction;
-
- struct JITCodeEntry {
- JITCodeEntry* next_;
- JITCodeEntry* prev_;
- Address symfile_addr_;
- uint64_t symfile_size_;
- };
+enum JITAction { JIT_NOACTION = 0, JIT_REGISTER_FN, JIT_UNREGISTER_FN };
- struct JITDescriptor {
- uint32_t version_;
- uint32_t action_flag_;
- JITCodeEntry* relevant_entry_;
- JITCodeEntry* first_entry_;
- };
+struct JITCodeEntry {
+ JITCodeEntry* next_;
+ JITCodeEntry* prev_;
+ Address symfile_addr_;
+ uint64_t symfile_size_;
+};
- // GDB will place breakpoint into this function.
- // To prevent GCC from inlining or removing it we place noinline attribute
- // and inline assembler statement inside.
- void __attribute__((noinline)) __jit_debug_register_code() {
- __asm__("");
- }
+struct JITDescriptor {
+ uint32_t version_;
+ uint32_t action_flag_;
+ JITCodeEntry* relevant_entry_;
+ JITCodeEntry* first_entry_;
+};
- // GDB will inspect contents of this descriptor.
- // Static initialization is necessary to prevent GDB from seeing
- // uninitialized descriptor.
- JITDescriptor __jit_debug_descriptor = {1, 0, nullptr, nullptr};
+// GDB will place breakpoint into this function.
+// To prevent GCC from inlining or removing it we place noinline attribute
+// and inline assembler statement inside.
+void __attribute__((noinline)) __jit_debug_register_code() { __asm__(""); }
+
+// GDB will inspect contents of this descriptor.
+// Static initialization is necessary to prevent GDB from seeing
+// uninitialized descriptor.
+JITDescriptor __jit_debug_descriptor = {1, 0, nullptr, nullptr};
#ifdef OBJECT_PRINT
- void __gdb_print_v8_object(Object object) {
- StdoutStream os;
- object->Print(os);
- os << std::flush;
- }
+void __gdb_print_v8_object(Object object) {
+ StdoutStream os;
+ object.Print(os);
+ os << std::flush;
+}
#endif
}
-
static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
uintptr_t symfile_size) {
- JITCodeEntry* entry = static_cast<JITCodeEntry*>(
- malloc(sizeof(JITCodeEntry) + symfile_size));
+ JITCodeEntry* entry =
+ static_cast<JITCodeEntry*>(malloc(sizeof(JITCodeEntry) + symfile_size));
entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1);
entry->symfile_size_ = symfile_size;
@@ -1860,23 +1758,18 @@ static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
return entry;
}
-
-static void DestroyCodeEntry(JITCodeEntry* entry) {
- free(entry);
-}
-
+static void DestroyCodeEntry(JITCodeEntry* entry) { free(entry); }
static void RegisterCodeEntry(JITCodeEntry* entry) {
entry->next_ = __jit_debug_descriptor.first_entry_;
if (entry->next_ != nullptr) entry->next_->prev_ = entry;
- __jit_debug_descriptor.first_entry_ =
- __jit_debug_descriptor.relevant_entry_ = entry;
+ __jit_debug_descriptor.first_entry_ = __jit_debug_descriptor.relevant_entry_ =
+ entry;
__jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
__jit_debug_register_code();
}
-
static void UnregisterCodeEntry(JITCodeEntry* entry) {
if (entry->prev_ != nullptr) {
entry->prev_->next_ = entry->next_;
@@ -1893,16 +1786,14 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
__jit_debug_register_code();
}
-
static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
#ifdef __MACH_O
Zone zone(isolate->allocator(), ZONE_NAME);
MachO mach_o(&zone);
Writer w(&mach_o);
- mach_o.AddSection(new(&zone) MachOTextSection(kCodeAlignment,
- desc->CodeStart(),
- desc->CodeSize()));
+ mach_o.AddSection(new (&zone) MachOTextSection(
+ kCodeAlignment, desc->CodeStart(), desc->CodeSize()));
CreateDWARFSections(desc, &zone, &mach_o);
@@ -1926,15 +1817,14 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
return CreateCodeEntry(reinterpret_cast<Address>(w.buffer()), w.position());
}
-
struct AddressRange {
Address start;
Address end;
};
struct SplayTreeConfig {
- typedef AddressRange Key;
- typedef JITCodeEntry* Value;
+ using Key = AddressRange;
+ using Value = JITCodeEntry*;
static const AddressRange kNoKey;
static Value NoValue() { return nullptr; }
static int Compare(const AddressRange& a, const AddressRange& b) {
@@ -1946,7 +1836,7 @@ struct SplayTreeConfig {
};
const AddressRange SplayTreeConfig::kNoKey = {0, 0};
-typedef SplayTree<SplayTreeConfig> CodeMap;
+using CodeMap = SplayTree<SplayTreeConfig>;
static CodeMap* GetCodeMap() {
static CodeMap* code_map = nullptr;
@@ -1954,7 +1844,6 @@ static CodeMap* GetCodeMap() {
return code_map;
}
-
static uint32_t HashCodeAddress(Address addr) {
static const uintptr_t kGoldenRatio = 2654435761u;
return static_cast<uint32_t>((addr >> kCodeAlignmentBits) * kGoldenRatio);
@@ -1968,7 +1857,6 @@ static base::HashMap* GetLineMap() {
return line_map;
}
-
static void PutLineInfo(Address addr, LineInfo* info) {
base::HashMap* line_map = GetLineMap();
base::HashMap::Entry* e = line_map->LookupOrInsert(
@@ -1977,14 +1865,12 @@ static void PutLineInfo(Address addr, LineInfo* info) {
e->value = info;
}
-
static LineInfo* GetLineInfo(Address addr) {
void* value = GetLineMap()->Remove(reinterpret_cast<void*>(addr),
HashCodeAddress(addr));
return static_cast<LineInfo*>(value);
}
-
static void AddUnwindInfo(CodeDescription* desc) {
#if V8_TARGET_ARCH_X64
if (desc->is_function()) {
@@ -2021,10 +1907,8 @@ static void AddUnwindInfo(CodeDescription* desc) {
#endif // V8_TARGET_ARCH_X64
}
-
static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
-
// Remove entries from the splay tree that intersect the given address range,
// and deregister them from GDB.
static void RemoveJITCodeEntries(CodeMap* map, const AddressRange& range) {
@@ -2055,7 +1939,6 @@ static void RemoveJITCodeEntries(CodeMap* map, const AddressRange& range) {
}
}
-
// Insert the entry into the splay tree and register it with GDB.
static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
JITCodeEntry* entry, bool dump_if_enabled,
@@ -2086,8 +1969,8 @@ static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
CodeMap* code_map = GetCodeMap();
AddressRange range;
- range.start = code->address();
- range.end = code->address() + code->CodeSize();
+ range.start = code.address();
+ range.end = code.address() + code.CodeSize();
RemoveJITCodeEntries(code_map, range);
CodeDescription code_desc(name, code, shared, lineinfo);
@@ -2098,7 +1981,7 @@ static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
}
AddUnwindInfo(&code_desc);
- Isolate* isolate = code->GetIsolate();
+ Isolate* isolate = code.GetIsolate();
JITCodeEntry* entry = CreateELFObject(&code_desc, isolate);
delete lineinfo;
@@ -2128,7 +2011,7 @@ void EventHandler(const v8::JitCodeEvent* event) {
Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(addr);
LineInfo* lineinfo = GetLineInfo(addr);
EmbeddedVector<char, 256> buffer;
- StringBuilder builder(buffer.start(), buffer.length());
+ StringBuilder builder(buffer.begin(), buffer.length());
builder.AddSubstring(event->name.str, static_cast<int>(event->name.len));
// It's called UnboundScript in the API but it's a SharedFunctionInfo.
SharedFunctionInfo shared = event->script.IsEmpty()
@@ -2140,7 +2023,6 @@ void EventHandler(const v8::JitCodeEvent* event) {
case v8::JitCodeEvent::CODE_MOVED:
// Enabling the GDB JIT interface should disable code compaction.
UNREACHABLE();
- break;
case v8::JitCodeEvent::CODE_REMOVED:
// Do nothing. Instead, adding code causes eviction of any entry whose
// address range intersects the address range of the added code.
@@ -2169,3 +2051,6 @@ void EventHandler(const v8::JitCodeEvent* event) {
} // namespace GDBJITInterface
} // namespace internal
} // namespace v8
+
+#undef __MACH_O
+#undef __ELF
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/diagnostics/gdb-jit.h
index 7ffc6459fb..e1bc852f0a 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/diagnostics/gdb-jit.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_GDB_JIT_H_
-#define V8_GDB_JIT_H_
+#ifndef V8_DIAGNOSTICS_GDB_JIT_H_
+#define V8_DIAGNOSTICS_GDB_JIT_H_
#include "include/v8.h"
@@ -36,4 +36,4 @@ void EventHandler(const v8::JitCodeEvent* event);
} // namespace internal
} // namespace v8
-#endif // V8_GDB_JIT_H_
+#endif // V8_DIAGNOSTICS_GDB_JIT_H_
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index b4c2db191e..534898fdf5 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -9,18 +9,13 @@
#if V8_TARGET_ARCH_IA32
#include "src/base/compiler-specific.h"
-#include "src/disasm.h"
-#include "src/ia32/sse-instr.h"
-#include "src/utils.h"
+#include "src/codegen/ia32/sse-instr.h"
+#include "src/diagnostics/disasm.h"
+#include "src/utils/utils.h"
namespace disasm {
-enum OperandOrder {
- UNSET_OP_ORDER = 0,
- REG_OPER_OP_ORDER,
- OPER_REG_OP_ORDER
-};
-
+enum OperandOrder { UNSET_OP_ORDER = 0, REG_OPER_OP_ORDER, OPER_REG_OP_ORDER };
//------------------------------------------------------------------
// Tables
@@ -47,42 +42,24 @@ static const ByteMnemonic two_operands_instr[] = {
{-1, "", UNSET_OP_ORDER}};
static const ByteMnemonic zero_operands_instr[] = {
- {0xC3, "ret", UNSET_OP_ORDER},
- {0xC9, "leave", UNSET_OP_ORDER},
- {0x90, "nop", UNSET_OP_ORDER},
- {0xF4, "hlt", UNSET_OP_ORDER},
- {0xCC, "int3", UNSET_OP_ORDER},
- {0x60, "pushad", UNSET_OP_ORDER},
- {0x61, "popad", UNSET_OP_ORDER},
- {0x9C, "pushfd", UNSET_OP_ORDER},
- {0x9D, "popfd", UNSET_OP_ORDER},
- {0x9E, "sahf", UNSET_OP_ORDER},
- {0x99, "cdq", UNSET_OP_ORDER},
- {0x9B, "fwait", UNSET_OP_ORDER},
- {0xFC, "cld", UNSET_OP_ORDER},
- {0xAB, "stos", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
-
-static const ByteMnemonic call_jump_instr[] = {
- {0xE8, "call", UNSET_OP_ORDER},
- {0xE9, "jmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
+ {0xC3, "ret", UNSET_OP_ORDER}, {0xC9, "leave", UNSET_OP_ORDER},
+ {0x90, "nop", UNSET_OP_ORDER}, {0xF4, "hlt", UNSET_OP_ORDER},
+ {0xCC, "int3", UNSET_OP_ORDER}, {0x60, "pushad", UNSET_OP_ORDER},
+ {0x61, "popad", UNSET_OP_ORDER}, {0x9C, "pushfd", UNSET_OP_ORDER},
+ {0x9D, "popfd", UNSET_OP_ORDER}, {0x9E, "sahf", UNSET_OP_ORDER},
+ {0x99, "cdq", UNSET_OP_ORDER}, {0x9B, "fwait", UNSET_OP_ORDER},
+ {0xFC, "cld", UNSET_OP_ORDER}, {0xAB, "stos", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}};
+static const ByteMnemonic call_jump_instr[] = {{0xE8, "call", UNSET_OP_ORDER},
+ {0xE9, "jmp", UNSET_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}};
static const ByteMnemonic short_immediate_instr[] = {
- {0x05, "add", UNSET_OP_ORDER},
- {0x0D, "or", UNSET_OP_ORDER},
- {0x15, "adc", UNSET_OP_ORDER},
- {0x25, "and", UNSET_OP_ORDER},
- {0x2D, "sub", UNSET_OP_ORDER},
- {0x35, "xor", UNSET_OP_ORDER},
- {0x3D, "cmp", UNSET_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
+ {0x05, "add", UNSET_OP_ORDER}, {0x0D, "or", UNSET_OP_ORDER},
+ {0x15, "adc", UNSET_OP_ORDER}, {0x25, "and", UNSET_OP_ORDER},
+ {0x2D, "sub", UNSET_OP_ORDER}, {0x35, "xor", UNSET_OP_ORDER},
+ {0x3D, "cmp", UNSET_OP_ORDER}, {-1, "", UNSET_OP_ORDER}};
// Generally we don't want to generate these because they are subject to partial
// register stalls. They are included for completeness and because the cmp
@@ -95,28 +72,22 @@ static ByteMnemonic byte_immediate_instr[] = {{0x0C, "or", UNSET_OP_ORDER},
{-1, "", UNSET_OP_ORDER}};
static const char* const jump_conditional_mnem[] = {
- /*0*/ "jo", "jno", "jc", "jnc",
- /*4*/ "jz", "jnz", "jna", "ja",
- /*8*/ "js", "jns", "jpe", "jpo",
- /*12*/ "jl", "jnl", "jng", "jg"
-};
-
+ /*0*/ "jo", "jno", "jc", "jnc",
+ /*4*/ "jz", "jnz", "jna", "ja",
+ /*8*/ "js", "jns", "jpe", "jpo",
+ /*12*/ "jl", "jnl", "jng", "jg"};
static const char* const set_conditional_mnem[] = {
- /*0*/ "seto", "setno", "setc", "setnc",
- /*4*/ "setz", "setnz", "setna", "seta",
- /*8*/ "sets", "setns", "setpe", "setpo",
- /*12*/ "setl", "setnl", "setng", "setg"
-};
-
+ /*0*/ "seto", "setno", "setc", "setnc",
+ /*4*/ "setz", "setnz", "setna", "seta",
+ /*8*/ "sets", "setns", "setpe", "setpo",
+ /*12*/ "setl", "setnl", "setng", "setg"};
static const char* const conditional_move_mnem[] = {
- /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
- /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
- /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
- /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
-};
-
+ /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
+ /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
+ /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
+ /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"};
enum InstructionType {
NO_INSTR,
@@ -130,14 +101,12 @@ enum InstructionType {
BYTE_IMMEDIATE_INSTR
};
-
struct InstructionDesc {
const char* mnem;
InstructionType type;
OperandOrder op_order_;
};
-
class InstructionTable {
public:
InstructionTable();
@@ -152,20 +121,16 @@ class InstructionTable {
void Clear();
void Init();
void CopyTable(const ByteMnemonic bm[], InstructionType type);
- void SetTableRange(InstructionType type,
- byte start,
- byte end,
+ void SetTableRange(InstructionType type, byte start, byte end,
const char* mnem);
void AddJumpConditionalShort();
};
-
InstructionTable::InstructionTable() {
Clear();
Init();
}
-
void InstructionTable::Clear() {
for (int i = 0; i < 256; i++) {
instructions_[i].mnem = "";
@@ -174,7 +139,6 @@ void InstructionTable::Clear() {
}
}
-
void InstructionTable::Init() {
CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
@@ -190,7 +154,6 @@ void InstructionTable::Init() {
SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
}
-
void InstructionTable::CopyTable(const ByteMnemonic bm[],
InstructionType type) {
for (int i = 0; bm[i].b >= 0; i++) {
@@ -202,10 +165,7 @@ void InstructionTable::CopyTable(const ByteMnemonic bm[],
}
}
-
-void InstructionTable::SetTableRange(InstructionType type,
- byte start,
- byte end,
+void InstructionTable::SetTableRange(InstructionType type, byte start, byte end,
const char* mnem) {
for (byte b = start; b <= end; b++) {
InstructionDesc* id = &instructions_[b];
@@ -215,7 +175,6 @@ void InstructionTable::SetTableRange(InstructionType type,
}
}
-
void InstructionTable::AddJumpConditionalShort() {
for (byte b = 0x70; b <= 0x7F; b++) {
InstructionDesc* id = &instructions_[b];
@@ -225,7 +184,6 @@ void InstructionTable::AddJumpConditionalShort() {
}
}
-
// The IA32 disassembler implementation.
class DisassemblerIA32 {
public:
@@ -269,7 +227,6 @@ class DisassemblerIA32 {
edi = 7
};
-
enum ShiftOpcodeExtension {
kROL = 0,
kROR = 1,
@@ -342,22 +299,18 @@ class DisassemblerIA32 {
return converter_.NameOfCPURegister(reg);
}
-
const char* NameOfByteCPURegister(int reg) const {
return converter_.NameOfByteCPURegister(reg);
}
-
const char* NameOfXMMRegister(int reg) const {
return converter_.NameOfXMMRegister(reg);
}
-
const char* NameOfAddress(byte* addr) const {
return converter_.NameOfAddress(addr);
}
-
// Disassembler helper functions.
static void get_modrm(byte data, int* mod, int* regop, int* rm) {
*mod = (data >> 6) & 3;
@@ -365,14 +318,13 @@ class DisassemblerIA32 {
*rm = data & 7;
}
-
static void get_sib(byte data, int* scale, int* index, int* base) {
*scale = (data >> 6) & 3;
*index = (data >> 3) & 7;
*base = data & 7;
}
- typedef const char* (DisassemblerIA32::*RegisterNameMapping)(int reg) const;
+ using RegisterNameMapping = const char* (DisassemblerIA32::*)(int reg) const;
int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
@@ -403,7 +355,6 @@ class DisassemblerIA32 {
}
};
-
void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
va_list args;
@@ -414,16 +365,15 @@ void DisassemblerIA32::AppendToBuffer(const char* format, ...) {
}
int DisassemblerIA32::PrintRightOperandHelper(
- byte* modrmp,
- RegisterNameMapping direct_register_name) {
+ byte* modrmp, RegisterNameMapping direct_register_name) {
int mod, regop, rm;
get_modrm(*modrmp, &mod, &regop, &rm);
- RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
- &DisassemblerIA32::NameOfCPURegister;
+ RegisterNameMapping register_name =
+ (mod == 3) ? direct_register_name : &DisassemblerIA32::NameOfCPURegister;
switch (mod) {
case 0:
if (rm == ebp) {
- int32_t disp = *reinterpret_cast<int32_t*>(modrmp+1);
+ int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
AppendToBuffer("[0x%x]", disp);
return 5;
} else if (rm == esp) {
@@ -435,18 +385,14 @@ int DisassemblerIA32::PrintRightOperandHelper(
return 2;
} else if (base == ebp) {
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d%s0x%x]",
- (this->*register_name)(index),
- 1 << scale,
- disp < 0 ? "-" : "+",
+ AppendToBuffer("[%s*%d%s0x%x]", (this->*register_name)(index),
+ 1 << scale, disp < 0 ? "-" : "+",
disp < 0 ? -disp : disp);
return 6;
} else if (index != esp && base != ebp) {
// [base+index*scale]
- AppendToBuffer("[%s+%s*%d]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale);
+ AppendToBuffer("[%s+%s*%d]", (this->*register_name)(base),
+ (this->*register_name)(index), 1 << scale);
return 2;
} else {
UnimplementedInstruction();
@@ -466,27 +412,20 @@ int DisassemblerIA32::PrintRightOperandHelper(
int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 2)
: *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == base && index == rm /*esp*/ && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s%s0x%x]",
- (this->*register_name)(rm),
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
+ AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
} else {
- AppendToBuffer("[%s+%s*%d%s0x%x]",
- (this->*register_name)(base),
- (this->*register_name)(index),
- 1 << scale,
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
+ AppendToBuffer("[%s+%s*%d%s0x%x]", (this->*register_name)(base),
+ (this->*register_name)(index), 1 << scale,
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
int disp = mod == 2 ? *reinterpret_cast<int32_t*>(modrmp + 1)
: *reinterpret_cast<int8_t*>(modrmp + 1);
- AppendToBuffer("[%s%s0x%x]",
- (this->*register_name)(rm),
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
+ AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
return mod == 2 ? 5 : 2;
}
break;
@@ -500,28 +439,22 @@ int DisassemblerIA32::PrintRightOperandHelper(
UNREACHABLE();
}
-
int DisassemblerIA32::PrintRightOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfCPURegister);
}
-
int DisassemblerIA32::PrintRightByteOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp,
&DisassemblerIA32::NameOfByteCPURegister);
}
-
int DisassemblerIA32::PrintRightXMMOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerIA32::NameOfXMMRegister);
+ return PrintRightOperandHelper(modrmp, &DisassemblerIA32::NameOfXMMRegister);
}
-
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
-int DisassemblerIA32::PrintOperands(const char* mnem,
- OperandOrder op_order,
+int DisassemblerIA32::PrintOperands(const char* mnem, OperandOrder op_order,
byte* data) {
byte modrm = *data;
int mod, regop, rm;
@@ -541,32 +474,45 @@ int DisassemblerIA32::PrintOperands(const char* mnem,
}
default:
UNREACHABLE();
- break;
}
return advance;
}
-
// Returns number of bytes used by machine instruction, including *data byte.
// Writes immediate instructions to 'tmp_buffer_'.
int DisassemblerIA32::PrintImmediateOp(byte* data) {
bool sign_extension_bit = (*data & 0x02) != 0;
- byte modrm = *(data+1);
+ byte modrm = *(data + 1);
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
const char* mnem = "Imm???";
switch (regop) {
- case 0: mnem = "add"; break;
- case 1: mnem = "or"; break;
- case 2: mnem = "adc"; break;
- case 4: mnem = "and"; break;
- case 5: mnem = "sub"; break;
- case 6: mnem = "xor"; break;
- case 7: mnem = "cmp"; break;
- default: UnimplementedInstruction();
+ case 0:
+ mnem = "add";
+ break;
+ case 1:
+ mnem = "or";
+ break;
+ case 2:
+ mnem = "adc";
+ break;
+ case 4:
+ mnem = "and";
+ break;
+ case 5:
+ mnem = "sub";
+ break;
+ case 6:
+ mnem = "xor";
+ break;
+ case 7:
+ mnem = "cmp";
+ break;
+ default:
+ UnimplementedInstruction();
}
AppendToBuffer("%s ", mnem);
- int count = PrintRightOperand(data+1);
+ int count = PrintRightOperand(data + 1);
if (sign_extension_bit) {
AppendToBuffer(",0x%x", *(data + 1 + count));
return 1 + count + 1 /*int8*/;
@@ -576,7 +522,6 @@ int DisassemblerIA32::PrintImmediateOp(byte* data) {
}
}
-
// Returns number of bytes used, including *data.
int DisassemblerIA32::F7Instruction(byte* data) {
DCHECK_EQ(0xF7, *data);
@@ -618,7 +563,6 @@ int DisassemblerIA32::F7Instruction(byte* data) {
return 1 + count;
}
-
int DisassemblerIA32::D1D3C1Instruction(byte* data) {
byte op = *data;
DCHECK(op == 0xD1 || op == 0xD3 || op == 0xC1);
@@ -670,22 +614,20 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) {
return 1 + count;
}
-
// Returns number of bytes used, including *data.
int DisassemblerIA32::JumpShort(byte* data) {
DCHECK_EQ(0xEB, *data);
- byte b = *(data+1);
+ byte b = *(data + 1);
byte* dest = data + static_cast<int8_t>(b) + 2;
AppendToBuffer("jmp %s", NameOfAddress(dest));
return 2;
}
-
// Returns number of bytes used, including *data.
int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
DCHECK_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
- byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
+ byte cond = *(data + 1) & 0x0F;
+ byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
const char* mnem = jump_conditional_mnem[cond];
AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
if (comment != nullptr) {
@@ -694,11 +636,10 @@ int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
return 6; // includes 0x0F
}
-
// Returns number of bytes used, including *data.
int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
byte cond = *data & 0x0F;
- byte b = *(data+1);
+ byte b = *(data + 1);
byte* dest = data + static_cast<int8_t>(b) + 2;
const char* mnem = jump_conditional_mnem[cond];
AppendToBuffer("%s %s", mnem, NameOfAddress(dest));
@@ -708,18 +649,16 @@ int DisassemblerIA32::JumpConditionalShort(byte* data, const char* comment) {
return 2;
}
-
// Returns number of bytes used, including *data.
int DisassemblerIA32::SetCC(byte* data) {
DCHECK_EQ(0x0F, *data);
- byte cond = *(data+1) & 0x0F;
+ byte cond = *(data + 1) & 0x0F;
const char* mnem = set_conditional_mnem[cond];
AppendToBuffer("%s ", mnem);
- PrintRightByteOperand(data+2);
+ PrintRightByteOperand(data + 2);
return 3; // Includes 0x0F.
}
-
// Returns number of bytes used, including *data.
int DisassemblerIA32::CMov(byte* data) {
DCHECK_EQ(0x0F, *data);
@@ -1281,61 +1220,96 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
return static_cast<int>(current - data);
}
-
// Returns number of bytes used, including *data.
int DisassemblerIA32::FPUInstruction(byte* data) {
byte escape_opcode = *data;
DCHECK_EQ(0xD8, escape_opcode & 0xF8);
- byte modrm_byte = *(data+1);
+ byte modrm_byte = *(data + 1);
if (modrm_byte >= 0xC0) {
return RegisterFPUInstruction(escape_opcode, modrm_byte);
} else {
- return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
+ return MemoryFPUInstruction(escape_opcode, modrm_byte, data + 1);
}
}
-int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
- int modrm_byte,
+int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode, int modrm_byte,
byte* modrm_start) {
const char* mnem = "?";
int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
switch (escape_opcode) {
- case 0xD9: switch (regop) {
- case 0: mnem = "fld_s"; break;
- case 2: mnem = "fst_s"; break;
- case 3: mnem = "fstp_s"; break;
- case 7: mnem = "fstcw"; break;
- default: UnimplementedInstruction();
+ case 0xD9:
+ switch (regop) {
+ case 0:
+ mnem = "fld_s";
+ break;
+ case 2:
+ mnem = "fst_s";
+ break;
+ case 3:
+ mnem = "fstp_s";
+ break;
+ case 7:
+ mnem = "fstcw";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
- case 0xDB: switch (regop) {
- case 0: mnem = "fild_s"; break;
- case 1: mnem = "fisttp_s"; break;
- case 2: mnem = "fist_s"; break;
- case 3: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
+ case 0xDB:
+ switch (regop) {
+ case 0:
+ mnem = "fild_s";
+ break;
+ case 1:
+ mnem = "fisttp_s";
+ break;
+ case 2:
+ mnem = "fist_s";
+ break;
+ case 3:
+ mnem = "fistp_s";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
- case 0xDD: switch (regop) {
- case 0: mnem = "fld_d"; break;
- case 1: mnem = "fisttp_d"; break;
- case 2: mnem = "fst_d"; break;
- case 3: mnem = "fstp_d"; break;
- default: UnimplementedInstruction();
+ case 0xDD:
+ switch (regop) {
+ case 0:
+ mnem = "fld_d";
+ break;
+ case 1:
+ mnem = "fisttp_d";
+ break;
+ case 2:
+ mnem = "fst_d";
+ break;
+ case 3:
+ mnem = "fstp_d";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
- case 0xDF: switch (regop) {
- case 5: mnem = "fild_d"; break;
- case 7: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
+ case 0xDF:
+ switch (regop) {
+ case 5:
+ mnem = "fild_d";
+ break;
+ case 7:
+ mnem = "fistp_d";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
- default: UnimplementedInstruction();
+ default:
+ UnimplementedInstruction();
}
AppendToBuffer("%s ", mnem);
int count = PrintRightOperand(modrm_start);
@@ -1351,11 +1325,20 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xD8:
has_register = true;
switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd_i"; break;
- case 0xE0: mnem = "fsub_i"; break;
- case 0xC8: mnem = "fmul_i"; break;
- case 0xF0: mnem = "fdiv_i"; break;
- default: UnimplementedInstruction();
+ case 0xC0:
+ mnem = "fadd_i";
+ break;
+ case 0xE0:
+ mnem = "fsub_i";
+ break;
+ case 0xC8:
+ mnem = "fmul_i";
+ break;
+ case 0xF0:
+ mnem = "fdiv_i";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
@@ -1371,24 +1354,59 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
break;
default:
switch (modrm_byte) {
- case 0xE0: mnem = "fchs"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE4: mnem = "ftst"; break;
- case 0xE8: mnem = "fld1"; break;
- case 0xEB: mnem = "fldpi"; break;
- case 0xED: mnem = "fldln2"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xF0: mnem = "f2xm1"; break;
- case 0xF1: mnem = "fyl2x"; break;
- case 0xF4: mnem = "fxtract"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xFC: mnem = "frndint"; break;
- case 0xFD: mnem = "fscale"; break;
- case 0xFE: mnem = "fsin"; break;
- case 0xFF: mnem = "fcos"; break;
- default: UnimplementedInstruction();
+ case 0xE0:
+ mnem = "fchs";
+ break;
+ case 0xE1:
+ mnem = "fabs";
+ break;
+ case 0xE4:
+ mnem = "ftst";
+ break;
+ case 0xE8:
+ mnem = "fld1";
+ break;
+ case 0xEB:
+ mnem = "fldpi";
+ break;
+ case 0xED:
+ mnem = "fldln2";
+ break;
+ case 0xEE:
+ mnem = "fldz";
+ break;
+ case 0xF0:
+ mnem = "f2xm1";
+ break;
+ case 0xF1:
+ mnem = "fyl2x";
+ break;
+ case 0xF4:
+ mnem = "fxtract";
+ break;
+ case 0xF5:
+ mnem = "fprem1";
+ break;
+ case 0xF7:
+ mnem = "fincstp";
+ break;
+ case 0xF8:
+ mnem = "fprem";
+ break;
+ case 0xFC:
+ mnem = "frndint";
+ break;
+ case 0xFD:
+ mnem = "fscale";
+ break;
+ case 0xFE:
+ mnem = "fsin";
+ break;
+ case 0xFF:
+ mnem = "fcos";
+ break;
+ default:
+ UnimplementedInstruction();
}
}
break;
@@ -1405,7 +1423,7 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
if ((modrm_byte & 0xF8) == 0xE8) {
mnem = "fucomi";
has_register = true;
- } else if (modrm_byte == 0xE2) {
+ } else if (modrm_byte == 0xE2) {
mnem = "fclex";
} else if (modrm_byte == 0xE3) {
mnem = "fninit";
@@ -1417,35 +1435,60 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xDC:
has_register = true;
switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
+ case 0xC0:
+ mnem = "fadd";
+ break;
+ case 0xE8:
+ mnem = "fsub";
+ break;
+ case 0xC8:
+ mnem = "fmul";
+ break;
+ case 0xF8:
+ mnem = "fdiv";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
case 0xDD:
has_register = true;
switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "ffree"; break;
- case 0xD0: mnem = "fst"; break;
- case 0xD8: mnem = "fstp"; break;
- default: UnimplementedInstruction();
+ case 0xC0:
+ mnem = "ffree";
+ break;
+ case 0xD0:
+ mnem = "fst";
+ break;
+ case 0xD8:
+ mnem = "fstp";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
case 0xDE:
- if (modrm_byte == 0xD9) {
+ if (modrm_byte == 0xD9) {
mnem = "fcompp";
} else {
has_register = true;
switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "faddp"; break;
- case 0xE8: mnem = "fsubp"; break;
- case 0xC8: mnem = "fmulp"; break;
- case 0xF8: mnem = "fdivp"; break;
- default: UnimplementedInstruction();
+ case 0xC0:
+ mnem = "faddp";
+ break;
+ case 0xE8:
+ mnem = "fsubp";
+ break;
+ case 0xC8:
+ mnem = "fmulp";
+ break;
+ case 0xF8:
+ mnem = "fdivp";
+ break;
+ default:
+ UnimplementedInstruction();
}
}
break;
@@ -1459,7 +1502,8 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
}
break;
- default: UnimplementedInstruction();
+ default:
+ UnimplementedInstruction();
}
if (has_register) {
@@ -1515,7 +1559,6 @@ static const char* F0Mnem(byte f0byte) {
}
}
-
// Disassembled instruction '*instr' and writes it into 'out_buffer'.
int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
byte* instr) {
@@ -1613,7 +1656,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
if (!processed) {
switch (*data) {
case 0xC2:
- AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data+1));
+ AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
data += 3;
break;
@@ -1629,293 +1672,299 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data));
data += 4;
- }
- break;
+ } break;
- case 0xF6:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("test_b ");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- UnimplementedInstruction();
- }
+ case 0xF6: {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (regop == eax) {
+ AppendToBuffer("test_b ");
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } else {
+ UnimplementedInstruction();
}
- break;
+ } break;
case 0x81: // fall through
case 0x83: // 0x81 with sign extension bit set
data += PrintImmediateOp(data);
break;
- case 0x0F:
- { byte f0byte = data[1];
- const char* f0mnem = F0Mnem(f0byte);
- if (f0byte == 0x18) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* suffix[] = {"nta", "1", "2", "3"};
- AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
- data += PrintRightOperand(data);
- } else if (f0byte == 0x1F && data[2] == 0) {
- AppendToBuffer("nop"); // 3 byte nop.
- data += 3;
- } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
- AppendToBuffer("nop"); // 4 byte nop.
- data += 4;
- } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
- data[4] == 0) {
- AppendToBuffer("nop"); // 5 byte nop.
- data += 5;
- } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
- data[4] == 0 && data[5] == 0 && data[6] == 0) {
- AppendToBuffer("nop"); // 7 byte nop.
- data += 7;
- } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
- data[4] == 0 && data[5] == 0 && data[6] == 0 &&
- data[7] == 0) {
- AppendToBuffer("nop"); // 8 byte nop.
- data += 8;
- } else if (f0byte == 0x0B || f0byte == 0xA2 || f0byte == 0x31) {
- AppendToBuffer("%s", f0mnem);
- data += 2;
- } else if (f0byte == 0x28) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movaps %s,%s",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (f0byte == 0x10 || f0byte == 0x11) {
- data += 2;
- // movups xmm, xmm/m128
- // movups xmm/m128, xmm
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movups ");
- if (f0byte == 0x11) {
- data += PrintRightXMMOperand(data);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
- } else {
- AppendToBuffer("%s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- }
- } else if (f0byte == 0x2E) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (f0byte >= 0x52 && f0byte <= 0x5F) {
- const char* const pseudo_op[] = {
- "rsqrtps", "rcpps", "andps", "andnps", "orps",
- "xorps", "addps", "mulps", "cvtps2pd", "cvtdq2ps",
- "subps", "minps", "divps", "maxps",
- };
-
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", pseudo_op[f0byte - 0x52],
- NameOfXMMRegister(regop));
- data += PrintRightXMMOperand(data);
- } else if (f0byte == 0x50) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s,%s",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
- } else if (f0byte == 0xC2) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
- AppendToBuffer("cmpps %s, ", NameOfXMMRegister(regop));
+ case 0x0F: {
+ byte f0byte = data[1];
+ const char* f0mnem = F0Mnem(f0byte);
+ if (f0byte == 0x18) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* suffix[] = {"nta", "1", "2", "3"};
+ AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
+ data += PrintRightOperand(data);
+ } else if (f0byte == 0x1F && data[2] == 0) {
+ AppendToBuffer("nop"); // 3 byte nop.
+ data += 3;
+ } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
+ AppendToBuffer("nop"); // 4 byte nop.
+ data += 4;
+ } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
+ data[4] == 0) {
+ AppendToBuffer("nop"); // 5 byte nop.
+ data += 5;
+ } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0) {
+ AppendToBuffer("nop"); // 7 byte nop.
+ data += 7;
+ } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0 &&
+ data[7] == 0) {
+ AppendToBuffer("nop"); // 8 byte nop.
+ data += 8;
+ } else if (f0byte == 0x0B || f0byte == 0xA2 || f0byte == 0x31) {
+ AppendToBuffer("%s", f0mnem);
+ data += 2;
+ } else if (f0byte == 0x28) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movaps %s,%s", NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (f0byte == 0x10 || f0byte == 0x11) {
+ data += 2;
+ // movups xmm, xmm/m128
+ // movups xmm/m128, xmm
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movups ");
+ if (f0byte == 0x11) {
data += PrintRightXMMOperand(data);
- AppendToBuffer(", (%s)", pseudo_op[*data]);
- data++;
- } else if (f0byte== 0xC6) {
- // shufps xmm, xmm/m128, imm8
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("shufps %s,%s,%d",
- NameOfXMMRegister(rm),
- NameOfXMMRegister(regop),
- static_cast<int>(imm8));
- data += 2;
- } else if (f0byte >= 0xC8 && f0byte <= 0xCF) {
- // bswap
- data += 2;
- int reg = f0byte - 0xC8;
- AppendToBuffer("bswap %s", NameOfCPURegister(reg));
- } else if ((f0byte & 0xF0) == 0x80) {
- data += JumpConditional(data, branch_hint);
- } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
- f0byte == 0xB7 || f0byte == 0xAF) {
- data += 2;
- data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
- } else if ((f0byte & 0xF0) == 0x90) {
- data += SetCC(data);
- } else if ((f0byte & 0xF0) == 0x40) {
- data += CMov(data);
- } else if (f0byte == 0xA4 || f0byte == 0xAC) {
- // shld, shrd
- data += 2;
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- int8_t imm8 = static_cast<int8_t>(data[1]);
- data += 2;
- AppendToBuffer("%s,%s,%d", NameOfCPURegister(rm),
- NameOfCPURegister(regop), static_cast<int>(imm8));
- } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
- // shrd_cl, shld_cl, bts
- data += 2;
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- if (f0byte == 0xAB) {
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- } else {
- AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
- }
- } else if (f0byte == 0xB0) {
- // cmpxchg_b
- data += 2;
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else if (f0byte == 0xB1) {
- // cmpxchg
- data += 2;
- data += PrintOperands(f0mnem, OPER_REG_OP_ORDER, data);
- } else if (f0byte == 0xBC) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
- data += PrintRightOperand(data);
- } else if (f0byte == 0xBD) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
- data += PrintRightOperand(data);
- } else if (f0byte == 0xC7) {
- // cmpxchg8b
- data += 2;
- AppendToBuffer("%s ", f0mnem);
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- data += PrintRightOperand(data);
- } else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xE8) {
- AppendToBuffer("lfence");
- data += 3;
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
- UnimplementedInstruction();
+ AppendToBuffer("%s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
}
- }
- break;
-
- case 0x8F:
- { data++;
+ } else if (f0byte == 0x2E) {
+ data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- if (regop == eax) {
- AppendToBuffer("pop ");
- data += PrintRightOperand(data);
- }
- }
- break;
+ AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (f0byte >= 0x52 && f0byte <= 0x5F) {
+ const char* const pseudo_op[] = {
+ "rsqrtps", "rcpps", "andps", "andnps", "orps",
+ "xorps", "addps", "mulps", "cvtps2pd", "cvtdq2ps",
+ "subps", "minps", "divps", "maxps",
+ };
- case 0xFF:
- { data++;
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", pseudo_op[f0byte - 0x52],
+ NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ } else if (f0byte == 0x50) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movmskps %s,%s", NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (f0byte == 0xC2) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
+ "neq", "nlt", "nle", "ord"};
+ AppendToBuffer("cmpps %s, ", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(", (%s)", pseudo_op[*data]);
+ data++;
+ } else if (f0byte == 0xC6) {
+ // shufps xmm, xmm/m128, imm8
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("shufps %s,%s,%d", NameOfXMMRegister(rm),
+ NameOfXMMRegister(regop), static_cast<int>(imm8));
+ data += 2;
+ } else if (f0byte >= 0xC8 && f0byte <= 0xCF) {
+ // bswap
+ data += 2;
+ int reg = f0byte - 0xC8;
+ AppendToBuffer("bswap %s", NameOfCPURegister(reg));
+ } else if ((f0byte & 0xF0) == 0x80) {
+ data += JumpConditional(data, branch_hint);
+ } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
+ f0byte == 0xB7 || f0byte == 0xAF) {
+ data += 2;
+ data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
+ } else if ((f0byte & 0xF0) == 0x90) {
+ data += SetCC(data);
+ } else if ((f0byte & 0xF0) == 0x40) {
+ data += CMov(data);
+ } else if (f0byte == 0xA4 || f0byte == 0xAC) {
+ // shld, shrd
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = nullptr;
- switch (regop) {
- case esi: mnem = "push"; break;
- case eax: mnem = "inc"; break;
- case ecx: mnem = "dec"; break;
- case edx: mnem = "call"; break;
- case esp: mnem = "jmp"; break;
- default: mnem = "???";
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ data += 2;
+ AppendToBuffer("%s,%s,%d", NameOfCPURegister(rm),
+ NameOfCPURegister(regop), static_cast<int>(imm8));
+ } else if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+ // shrd_cl, shld_cl, bts
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ if (f0byte == 0xAB) {
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else {
+ AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
- AppendToBuffer("%s ", mnem);
+ } else if (f0byte == 0xB0) {
+ // cmpxchg_b
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else if (f0byte == 0xB1) {
+ // cmpxchg
+ data += 2;
+ data += PrintOperands(f0mnem, OPER_REG_OP_ORDER, data);
+ } else if (f0byte == 0xBC) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
+ } else if (f0byte == 0xBD) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("%s %s,", f0mnem, NameOfCPURegister(regop));
+ data += PrintRightOperand(data);
+ } else if (f0byte == 0xC7) {
+ // cmpxchg8b
+ data += 2;
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
data += PrintRightOperand(data);
+ } else if (f0byte == 0xAE && (data[2] & 0xF8) == 0xE8) {
+ AppendToBuffer("lfence");
+ data += 3;
+ } else {
+ UnimplementedInstruction();
}
- break;
+ } break;
- case 0xC7: // imm32, fall through
- case 0xC6: // imm8
- { bool is_byte = *data == 0xC6;
- data++;
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- int32_t imm = *data;
- AppendToBuffer(",0x%x", imm);
- data++;
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- int32_t imm = *reinterpret_cast<int32_t*>(data);
- AppendToBuffer(",0x%x", imm);
- data += 4;
- }
+ case 0x8F: {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (regop == eax) {
+ AppendToBuffer("pop ");
+ data += PrintRightOperand(data);
}
- break;
+ } break;
- case 0x80:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- const char* mnem = nullptr;
- switch (regop) {
- case 5: mnem = "subb"; break;
- case 7: mnem = "cmpb"; break;
- default: UnimplementedInstruction();
- }
- AppendToBuffer("%s ", mnem);
+ case 0xFF: {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* mnem = nullptr;
+ switch (regop) {
+ case esi:
+ mnem = "push";
+ break;
+ case eax:
+ mnem = "inc";
+ break;
+ case ecx:
+ mnem = "dec";
+ break;
+ case edx:
+ mnem = "call";
+ break;
+ case esp:
+ mnem = "jmp";
+ break;
+ default:
+ mnem = "???";
+ }
+ AppendToBuffer("%s ", mnem);
+ data += PrintRightOperand(data);
+ } break;
+
+ case 0xC7: // imm32, fall through
+ case 0xC6: // imm8
+ {
+ bool is_byte = *data == 0xC6;
+ data++;
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
data += PrintRightByteOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
data++;
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ int32_t imm = *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += 4;
}
- break;
+ } break;
+
+ case 0x80: {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* mnem = nullptr;
+ switch (regop) {
+ case 5:
+ mnem = "subb";
+ break;
+ case 7:
+ mnem = "cmpb";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ data += PrintRightByteOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ } break;
case 0x88: // 8bit, fall through
case 0x89: // 32bit
- { bool is_byte = *data == 0x88;
- int mod, regop, rm;
- data++;
- get_modrm(*data, &mod, &regop, &rm);
- if (is_byte) {
- AppendToBuffer("%s ", "mov_b");
- data += PrintRightByteOperand(data);
- AppendToBuffer(",%s", NameOfByteCPURegister(regop));
- } else {
- AppendToBuffer("%s ", "mov");
- data += PrintRightOperand(data);
- AppendToBuffer(",%s", NameOfCPURegister(regop));
- }
+ {
+ bool is_byte = *data == 0x88;
+ int mod, regop, rm;
+ data++;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (is_byte) {
+ AppendToBuffer("%s ", "mov_b");
+ data += PrintRightByteOperand(data);
+ AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+ } else {
+ AppendToBuffer("%s ", "mov");
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
}
- break;
+ } break;
case 0x66: // prefix
while (*data == 0x66) data++;
@@ -1982,9 +2031,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break; \
}
- SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
- SSE4_RM_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSSE3_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_INSTRUCTION_LIST(SSE34_DIS_CASE)
+ SSE4_RM_INSTRUCTION_LIST(SSE34_DIS_CASE)
#undef SSE34_DIS_CASE
default:
UnimplementedInstruction();
@@ -2004,10 +2053,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("roundsd %s,%s,%d",
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm),
- static_cast<int>(imm8));
+ AppendToBuffer("roundsd %s,%s,%d", NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm), static_cast<int>(imm8));
data += 2;
} else if (*data == 0x0E) {
data++;
@@ -2057,10 +2104,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
int8_t imm8 = static_cast<int8_t>(data[1]);
- AppendToBuffer("extractps %s,%s,%d",
- NameOfCPURegister(rm),
- NameOfXMMRegister(regop),
- static_cast<int>(imm8));
+ AppendToBuffer("extractps %s,%s,%d", NameOfCPURegister(rm),
+ NameOfXMMRegister(regop), static_cast<int>(imm8));
data += 2;
} else if (*data == 0x20) {
data++;
@@ -2095,8 +2140,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
if (mod == 0x3) {
- AppendToBuffer("%s %s,%s", mnem,
- NameOfXMMRegister(regop),
+ AppendToBuffer("%s %s,%s", mnem, NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else {
@@ -2107,32 +2151,28 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskpd %s,%s",
- NameOfCPURegister(regop),
+ AppendToBuffer("movmskpd %s,%s", NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x54) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("andpd %s,%s",
- NameOfXMMRegister(regop),
+ AppendToBuffer("andpd %s,%s", NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x56) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("orpd %s,%s",
- NameOfXMMRegister(regop),
+ AppendToBuffer("orpd %s,%s", NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x57) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("xorpd %s,%s",
- NameOfXMMRegister(regop),
+ AppendToBuffer("xorpd %s,%s", NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x6E) {
@@ -2162,8 +2202,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psllq %s,%s",
- NameOfXMMRegister(regop),
+ AppendToBuffer("psllq %s,%s", NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x71) {
@@ -2195,8 +2234,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("psrlq %s,%s",
- NameOfXMMRegister(regop),
+ AppendToBuffer("psrlq %s,%s", NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
} else if (*data == 0x7F) {
@@ -2258,21 +2296,20 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
break;
- case 0xFE:
- { data++;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- if (regop == ecx) {
- AppendToBuffer("dec_b ");
- data += PrintRightOperand(data);
- } else {
- UnimplementedInstruction();
- }
+ case 0xFE: {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (regop == ecx) {
+ AppendToBuffer("dec_b ");
+ data += PrintRightOperand(data);
+ } else {
+ UnimplementedInstruction();
}
- break;
+ } break;
case 0x68:
- AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data+1));
+ AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
data += 5;
break;
@@ -2282,12 +2319,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xA8:
- AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+ AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
data += 2;
break;
case 0xA9:
- AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
+ AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data + 1));
data += 5;
break;
@@ -2313,8 +2350,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xF2:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
+ if (*(data + 1) == 0x0F) {
+ byte b2 = *(data + 2);
if (b2 == 0x11) {
AppendToBuffer("movsd ");
data += 3;
@@ -2328,7 +2365,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- } else if (b2 == 0x5A) {
+ } else if (b2 == 0x5A) {
data += 3;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
@@ -2391,19 +2428,10 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
const char* const pseudo_op[] = {
- "cmpeqsd",
- "cmpltsd",
- "cmplesd",
- "cmpunordsd",
- "cmpneqsd",
- "cmpnltsd",
- "cmpnlesd",
- "cmpordsd"
- };
- AppendToBuffer("%s %s,%s",
- pseudo_op[data[1]],
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
+ "cmpeqsd", "cmpltsd", "cmplesd", "cmpunordsd",
+ "cmpneqsd", "cmpnltsd", "cmpnlesd", "cmpordsd"};
+ AppendToBuffer("%s %s,%s", pseudo_op[data[1]],
+ NameOfXMMRegister(regop), NameOfXMMRegister(rm));
data += 2;
} else {
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
@@ -2416,8 +2444,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xF3:
- if (*(data+1) == 0x0F) {
- byte b2 = *(data+2);
+ if (*(data + 1) == 0x0F) {
+ byte b2 = *(data + 2);
if (b2 == 0x11) {
AppendToBuffer("movss ");
data += 3;
@@ -2538,10 +2566,10 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
}
}
- } else if (*(data+1) == 0xA5) {
+ } else if (*(data + 1) == 0xA5) {
data += 2;
AppendToBuffer("rep_movs");
- } else if (*(data+1) == 0xAB) {
+ } else if (*(data + 1) == 0xAB) {
data += 2;
AppendToBuffer("rep_stos");
} else if (*(data + 1) == 0x90) {
@@ -2574,74 +2602,56 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
int outp = 0;
// Instruction bytes.
for (byte* bp = instr; bp < data; bp++) {
- outp += v8::internal::SNPrintF(out_buffer + outp,
- "%02x",
- *bp);
+ outp += v8::internal::SNPrintF(out_buffer + outp, "%02x", *bp);
}
for (int i = 6 - instr_len; i >= 0; i--) {
outp += v8::internal::SNPrintF(out_buffer + outp, " ");
}
- outp += v8::internal::SNPrintF(out_buffer + outp,
- " %s",
- tmp_buffer_.start());
+ outp += v8::internal::SNPrintF(out_buffer + outp, " %s", tmp_buffer_.begin());
return instr_len;
} // NOLINT (function is too long)
-
//------------------------------------------------------------------------------
+static const char* const cpu_regs[8] = {"eax", "ecx", "edx", "ebx",
+ "esp", "ebp", "esi", "edi"};
-static const char* const cpu_regs[8] = {
- "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi"
-};
-
-
-static const char* const byte_cpu_regs[8] = {
- "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh"
-};
-
-
-static const char* const xmm_regs[8] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
-};
+static const char* const byte_cpu_regs[8] = {"al", "cl", "dl", "bl",
+ "ah", "ch", "dh", "bh"};
+static const char* const xmm_regs[8] = {"xmm0", "xmm1", "xmm2", "xmm3",
+ "xmm4", "xmm5", "xmm6", "xmm7"};
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
- return tmp_buffer_.start();
+ return tmp_buffer_.begin();
}
-
const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
-
const char* NameConverter::NameOfCPURegister(int reg) const {
if (0 <= reg && reg < 8) return cpu_regs[reg];
return "noreg";
}
-
const char* NameConverter::NameOfByteCPURegister(int reg) const {
if (0 <= reg && reg < 8) return byte_cpu_regs[reg];
return "noreg";
}
-
const char* NameConverter::NameOfXMMRegister(int reg) const {
if (0 <= reg && reg < 8) return xmm_regs[reg];
return "noxmmreg";
}
-
const char* NameConverter::NameInCode(byte* addr) const {
// IA32 does not embed debug strings at the moment.
UNREACHABLE();
}
-
//------------------------------------------------------------------------------
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
@@ -2667,16 +2677,15 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
fprintf(f, " ");
for (byte* bp = prev_pc; bp < pc; bp++) {
- fprintf(f, "%02x", *bp);
+ fprintf(f, "%02x", *bp);
}
for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
fprintf(f, " ");
}
- fprintf(f, " %s\n", buffer.start());
+ fprintf(f, " %s\n", buffer.begin());
}
}
-
} // namespace disasm
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/diagnostics/mips/disasm-mips.cc
index e83d56aa5b..ac26d65db2 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/diagnostics/mips/disasm-mips.cc
@@ -30,9 +30,9 @@
#if V8_TARGET_ARCH_MIPS
#include "src/base/platform/platform.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/mips/constants-mips.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips/constants-mips.h"
+#include "src/diagnostics/disasm.h"
namespace v8 {
namespace internal {
@@ -46,9 +46,7 @@ class Decoder {
public:
Decoder(const disasm::NameConverter& converter,
v8::internal::Vector<char> out_buffer)
- : converter_(converter),
- out_buffer_(out_buffer),
- out_buffer_pos_(0) {
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
out_buffer_[out_buffer_pos_] = '\0';
}
@@ -102,7 +100,7 @@ class Decoder {
void PrintSImm26(Instruction* instr);
void PrintPCImm26(Instruction* instr, int delta_pc, int n_bits);
void PrintPCImm26(Instruction* instr);
- void PrintCode(Instruction* instr); // For break and trap instructions.
+ void PrintCode(Instruction* instr); // For break and trap instructions.
void PrintFormat(Instruction* instr); // For floating format postfix.
void PrintMsaDataFormat(Instruction* instr);
void PrintMsaXImm8(Instruction* instr);
@@ -124,7 +122,6 @@ class Decoder {
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
-
// Each of these functions decodes one particular instruction type.
bool DecodeTypeRegisterRsType(Instruction* instr);
void DecodeTypeRegisterSRsType(Instruction* instr);
@@ -157,17 +154,12 @@ class Decoder {
DISALLOW_COPY_AND_ASSIGN(Decoder);
};
-
// Support for assertions in the Decoder formatting functions.
#define STRING_STARTS_WITH(string, compare_string) \
(strncmp(string, compare_string, strlen(compare_string)) == 0)
-
// Append the ch to the output buffer.
-void Decoder::PrintChar(const char ch) {
- out_buffer_[out_buffer_pos_++] = ch;
-}
-
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
// Append the str to the output buffer.
void Decoder::Print(const char* str) {
@@ -179,31 +171,26 @@ void Decoder::Print(const char* str) {
out_buffer_[out_buffer_pos_] = 0;
}
-
// Print the register name according to the active name converter.
void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
-
void Decoder::PrintRs(Instruction* instr) {
int reg = instr->RsValue();
PrintRegister(reg);
}
-
void Decoder::PrintRt(Instruction* instr) {
int reg = instr->RtValue();
PrintRegister(reg);
}
-
void Decoder::PrintRd(Instruction* instr) {
int reg = instr->RdValue();
PrintRegister(reg);
}
-
// Print the FPUregister name according to the active name converter.
void Decoder::PrintFPURegister(int freg) {
Print(converter_.NameOfXMMRegister(freg));
@@ -239,47 +226,40 @@ void Decoder::PrintFs(Instruction* instr) {
PrintFPURegister(freg);
}
-
void Decoder::PrintFt(Instruction* instr) {
int freg = instr->RtValue();
PrintFPURegister(freg);
}
-
void Decoder::PrintFd(Instruction* instr) {
int freg = instr->RdValue();
PrintFPURegister(freg);
}
-
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
int sa = instr->SaValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
}
-
// Print the integer value of the sa field of a lsa instruction.
void Decoder::PrintLsaSa(Instruction* instr) {
int sa = instr->LsaSaValue() + 1;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
}
-
// Print the integer value of the rd field, when it is not used as reg.
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
}
-
// Print the integer value of the rd field, when used as 'ext' size.
void Decoder::PrintSs1(Instruction* instr) {
int ss = instr->RdValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
}
-
// Print the integer value of the rd field, when used as 'ins' size.
void Decoder::PrintSs2(Instruction* instr) {
int ss = instr->RdValue();
@@ -288,21 +268,18 @@ void Decoder::PrintSs2(Instruction* instr) {
SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
}
-
// Print the integer value of the cc field for the bc1t/f instructions.
void Decoder::PrintBc(Instruction* instr) {
int cc = instr->FBccValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
}
-
// Print the integer value of the cc field for the FP compare instructions.
void Decoder::PrintCc(Instruction* instr) {
int cc = instr->FCccValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
}
-
void Decoder::PrintBp2(Instruction* instr) {
int bp2 = instr->Bp2Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", bp2);
@@ -326,21 +303,18 @@ void Decoder::PrintUImm16(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
}
-
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
-
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
int32_t imm = instr->Imm16Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
-
// Print absoulte address for 16-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC + delta_pc + (offset << n_bits)
@@ -352,7 +326,6 @@ void Decoder::PrintPCImm16(Instruction* instr, int delta_pc, int n_bits) {
delta_pc + (offset << n_bits)));
}
-
// Print 18-bit signed immediate value.
void Decoder::PrintSImm18(Instruction* instr) {
int32_t imm =
@@ -360,21 +333,18 @@ void Decoder::PrintSImm18(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
-
// Print 18-bit hexa immediate value.
void Decoder::PrintXImm18(Instruction* instr) {
int32_t imm = instr->Imm18Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
-
// Print 19-bit hexa immediate value.
void Decoder::PrintXImm19(Instruction* instr) {
int32_t imm = instr->Imm19Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
-
// Print 19-bit signed immediate value.
void Decoder::PrintSImm19(Instruction* instr) {
int32_t imm19 = instr->Imm19Value();
@@ -384,14 +354,12 @@ void Decoder::PrintSImm19(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm19);
}
-
// Print 21-bit immediate value.
void Decoder::PrintXImm21(Instruction* instr) {
uint32_t imm = instr->Imm21Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
-
// Print 21-bit signed immediate value.
void Decoder::PrintSImm21(Instruction* instr) {
int32_t imm21 = instr->Imm21Value();
@@ -401,7 +369,6 @@ void Decoder::PrintSImm21(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm21);
}
-
// Print absoulte address for 21-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC + delta_pc + (offset << n_bits)
@@ -416,7 +383,6 @@ void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
delta_pc + (imm21 << n_bits)));
}
-
// Print 26-bit hex immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
uint32_t target = static_cast<uint32_t>(instr->Imm26Value())
@@ -425,7 +391,6 @@ void Decoder::PrintXImm26(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", target);
}
-
// Print 26-bit signed immediate value.
void Decoder::PrintSImm26(Instruction* instr) {
int32_t imm26 = instr->Imm26Value();
@@ -435,7 +400,6 @@ void Decoder::PrintSImm26(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm26);
}
-
// Print absoulte address for 26-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC + delta_pc + (offset << n_bits)
@@ -450,7 +414,6 @@ void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) {
delta_pc + (imm26 << n_bits)));
}
-
// Print absoulte address for 26-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC[GPRLEN-1 .. 28] || instr_index26 || 00
@@ -463,7 +426,6 @@ void Decoder::PrintPCImm26(Instruction* instr) {
converter_.NameOfAddress((reinterpret_cast<byte*>(pc))));
}
-
// Print 26-bit immediate value.
void Decoder::PrintCode(Instruction* instr) {
if (instr->OpcodeFieldRaw() != SPECIAL)
@@ -471,10 +433,10 @@ void Decoder::PrintCode(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "0x%05x (%d)", code, code);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x (%d)", code, code);
break;
- }
+ }
case TGE:
case TGEU:
case TLT:
@@ -487,7 +449,7 @@ void Decoder::PrintCode(Instruction* instr) {
break;
}
default: // Not a break or trap instruction.
- break;
+ break;
}
}
@@ -556,7 +518,6 @@ void Decoder::PrintFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
PrintChar(formatLetter);
}
@@ -641,9 +602,7 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
}
// Printing of instruction name.
-void Decoder::PrintInstructionName(Instruction* instr) {
-}
-
+void Decoder::PrintInstructionName(Instruction* instr) {}
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
@@ -665,7 +624,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
UNREACHABLE();
}
-
// Handle all FPUregister based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
@@ -738,12 +696,12 @@ int Decoder::FormatMSARegister(Instruction* instr, const char* format) {
// characters that were consumed from the formatting string.
int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
- case 'c': { // 'code for break or trap instructions.
+ case 'c': { // 'code for break or trap instructions.
DCHECK(STRING_STARTS_WITH(format, "code"));
PrintCode(instr);
return 4;
}
- case 'i': { // 'imm16u or 'imm26.
+ case 'i': { // 'imm16u or 'imm26.
if (format[3] == '1') {
if (format[4] == '6') {
DCHECK(STRING_STARTS_WITH(format, "imm16"));
@@ -919,16 +877,16 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
UNREACHABLE();
}
- case 'r': { // 'r: registers.
+ case 'r': { // 'r: registers.
return FormatRegister(instr, format);
}
- case 'f': { // 'f: FPUregisters.
+ case 'f': { // 'f: FPUregisters.
return FormatFPURegister(instr, format);
}
case 'w': { // 'w: MSA Register
return FormatMSARegister(instr, format);
}
- case 's': { // 'sa.
+ case 's': { // 'sa.
switch (format[1]) {
case 'a':
if (format[2] == '2') {
@@ -948,13 +906,13 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 's': {
if (format[2] == '1') {
- DCHECK(STRING_STARTS_WITH(format, "ss1")); /* ext size */
- PrintSs1(instr);
- return 3;
+ DCHECK(STRING_STARTS_WITH(format, "ss1")); /* ext size */
+ PrintSs1(instr);
+ return 3;
} else {
- DCHECK(STRING_STARTS_WITH(format, "ss2")); /* ins size */
- PrintSs2(instr);
- return 3;
+ DCHECK(STRING_STARTS_WITH(format, "ss2")); /* ins size */
+ PrintSs2(instr);
+ return 3;
}
}
}
@@ -977,7 +935,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
}
}
- case 'C': { // 'Cc - Special for c.xx.d cc field.
+ case 'C': { // 'Cc - Special for c.xx.d cc field.
DCHECK(STRING_STARTS_WITH(format, "Cc"));
PrintCc(instr);
return 2;
@@ -993,7 +951,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
UNREACHABLE();
}
-
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
@@ -1007,16 +964,12 @@ void Decoder::Format(Instruction* instr, const char* format) {
}
cur = *format++;
}
- out_buffer_[out_buffer_pos_] = '\0';
+ out_buffer_[out_buffer_pos_] = '\0';
}
-
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instruction* instr) {
- Format(instr, "unknown");
-}
-
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
@@ -1153,7 +1106,6 @@ bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
return true;
}
-
void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) {
if (!DecodeTypeRegisterRsType(instr)) {
switch (instr->FunctionFieldRaw()) {
@@ -1173,7 +1125,6 @@ void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) {
if (!DecodeTypeRegisterRsType(instr)) {
switch (instr->FunctionFieldRaw()) {
@@ -1190,7 +1141,6 @@ void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterLRsType(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case CVT_D_L:
@@ -1237,7 +1187,6 @@ void Decoder::DecodeTypeRegisterLRsType(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterWRsType(Instruction* instr) {
switch (instr->FunctionValue()) {
case CVT_S_W: // Convert word to float (single).
@@ -1284,7 +1233,6 @@ void Decoder::DecodeTypeRegisterWRsType(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case JR:
@@ -1476,7 +1424,6 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case MUL:
@@ -1492,7 +1439,6 @@ void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case INS: {
@@ -1580,12 +1526,11 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
- case COP1: // Coprocessor instructions.
+ case COP1: // Coprocessor instructions.
switch (instr->RsFieldRaw()) {
- case BC1: // bc1 handled in DecodeTypeImmediate.
+ case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
break;
case MFC1:
@@ -1778,7 +1723,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
default:
UNREACHABLE();
}
- break; // Case REGIMM.
+ break; // Case REGIMM.
// ------------- Branch instructions.
case BEQ:
Format(instr, "beq 'rs, 'rt, 'imm16u -> 'imm16p4s2");
@@ -2052,11 +1997,9 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
default:
printf("a 0x%x \n", instr->OpcodeFieldRaw());
UNREACHABLE();
- break;
}
}
-
void Decoder::DecodeTypeJump(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case J:
@@ -2705,9 +2648,8 @@ void Decoder::DecodeTypeMsa2RF(Instruction* instr) {
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
- instr->InstructionBits());
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
+ instr->InstructionBits());
switch (instr->InstructionType()) {
case Instruction::kRegisterType: {
DecodeTypeRegister(instr);
@@ -2729,49 +2671,41 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
return kInstrSize;
}
-
} // namespace internal
} // namespace v8
-
//------------------------------------------------------------------------------
namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
- return tmp_buffer_.start();
+ return tmp_buffer_.begin();
}
-
const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
-
const char* NameConverter::NameOfCPURegister(int reg) const {
return v8::internal::Registers::Name(reg);
}
-
const char* NameConverter::NameOfXMMRegister(int reg) const {
return v8::internal::FPURegisters::Name(reg);
}
-
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register.
return "nobytereg";
}
-
const char* NameConverter::NameInCode(byte* addr) const {
// The default name converter is called for unknown code. So we will not try
// to access any memory.
return "";
}
-
//------------------------------------------------------------------------------
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
@@ -2780,11 +2714,8 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return d.InstructionDecode(instruction);
}
-
// The MIPS assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return -1;
-}
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
UnimplementedOpcodeAction unimplemented_action) {
@@ -2796,12 +2727,11 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
- *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.begin());
}
}
-
-#undef UNSUPPORTED
+#undef STRING_STARTS_WITH
} // namespace disasm
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/diagnostics/mips64/disasm-mips64.cc
index 1279d25f42..5edb7836b5 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/diagnostics/mips64/disasm-mips64.cc
@@ -22,7 +22,6 @@
// of code into a FILE*, meaning that the above functionality could also be
// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
-
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
@@ -31,9 +30,9 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/base/platform/platform.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/mips64/constants-mips64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips64/constants-mips64.h"
+#include "src/diagnostics/disasm.h"
namespace v8 {
namespace internal {
@@ -47,9 +46,7 @@ class Decoder {
public:
Decoder(const disasm::NameConverter& converter,
v8::internal::Vector<char> out_buffer)
- : converter_(converter),
- out_buffer_(out_buffer),
- out_buffer_pos_(0) {
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
out_buffer_[out_buffer_pos_] = '\0';
}
@@ -105,7 +102,7 @@ class Decoder {
void PrintSImm26(Instruction* instr);
void PrintPCImm26(Instruction* instr, int delta_pc, int n_bits);
void PrintPCImm26(Instruction* instr);
- void PrintCode(Instruction* instr); // For break and trap instructions.
+ void PrintCode(Instruction* instr); // For break and trap instructions.
void PrintFormat(Instruction* instr); // For floating format postfix.
void PrintBp2(Instruction* instr);
void PrintBp3(Instruction* instr);
@@ -169,17 +166,12 @@ class Decoder {
DISALLOW_COPY_AND_ASSIGN(Decoder);
};
-
// Support for assertions in the Decoder formatting functions.
#define STRING_STARTS_WITH(string, compare_string) \
(strncmp(string, compare_string, strlen(compare_string)) == 0)
-
// Append the ch to the output buffer.
-void Decoder::PrintChar(const char ch) {
- out_buffer_[out_buffer_pos_++] = ch;
-}
-
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
// Append the str to the output buffer.
void Decoder::Print(const char* str) {
@@ -191,31 +183,26 @@ void Decoder::Print(const char* str) {
out_buffer_[out_buffer_pos_] = 0;
}
-
// Print the register name according to the active name converter.
void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
-
void Decoder::PrintRs(Instruction* instr) {
int reg = instr->RsValue();
PrintRegister(reg);
}
-
void Decoder::PrintRt(Instruction* instr) {
int reg = instr->RtValue();
PrintRegister(reg);
}
-
void Decoder::PrintRd(Instruction* instr) {
int reg = instr->RdValue();
PrintRegister(reg);
}
-
// Print the FPUregister name according to the active name converter.
void Decoder::PrintFPURegister(int freg) {
Print(converter_.NameOfXMMRegister(freg));
@@ -251,33 +238,28 @@ void Decoder::PrintFs(Instruction* instr) {
PrintFPURegister(freg);
}
-
void Decoder::PrintFt(Instruction* instr) {
int freg = instr->RtValue();
PrintFPURegister(freg);
}
-
void Decoder::PrintFd(Instruction* instr) {
int freg = instr->RdValue();
PrintFPURegister(freg);
}
-
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
int sa = instr->SaValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
}
-
// Print the integer value of the sa field of a lsa instruction.
void Decoder::PrintLsaSa(Instruction* instr) {
int sa = instr->LsaSaValue() + 1;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
}
-
// Print the integer value of the rd field, when it is not used as reg.
void Decoder::PrintSd(Instruction* instr) {
int sd = instr->RdValue();
@@ -321,14 +303,12 @@ void Decoder::PrintSs5(Instruction* instr) {
SNPrintF(out_buffer_ + out_buffer_pos_, "%d", lsbminus32 + 32);
}
-
// Print the integer value of the cc field for the bc1t/f instructions.
void Decoder::PrintBc(Instruction* instr) {
int cc = instr->FBccValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
}
-
// Print the integer value of the cc field for the FP compare instructions.
void Decoder::PrintCc(Instruction* instr) {
int cc = instr->FCccValue();
@@ -353,7 +333,6 @@ void Decoder::PrintUImm16(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
}
-
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
int32_t imm =
@@ -361,14 +340,12 @@ void Decoder::PrintSImm16(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
-
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
int32_t imm = instr->Imm16Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
-
// Print absoulte address for 16-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC + delta_pc + (offset << n_bits)
@@ -380,7 +357,6 @@ void Decoder::PrintPCImm16(Instruction* instr, int delta_pc, int n_bits) {
delta_pc + (offset << n_bits)));
}
-
// Print 18-bit signed immediate value.
void Decoder::PrintSImm18(Instruction* instr) {
int32_t imm =
@@ -388,21 +364,18 @@ void Decoder::PrintSImm18(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
-
// Print 18-bit hexa immediate value.
void Decoder::PrintXImm18(Instruction* instr) {
int32_t imm = instr->Imm18Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
-
// Print 19-bit hexa immediate value.
void Decoder::PrintXImm19(Instruction* instr) {
int32_t imm = instr->Imm19Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
-
// Print 19-bit signed immediate value.
void Decoder::PrintSImm19(Instruction* instr) {
int32_t imm19 = instr->Imm19Value();
@@ -412,14 +385,12 @@ void Decoder::PrintSImm19(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm19);
}
-
// Print 21-bit immediate value.
void Decoder::PrintXImm21(Instruction* instr) {
uint32_t imm = instr->Imm21Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
-
// Print 21-bit signed immediate value.
void Decoder::PrintSImm21(Instruction* instr) {
int32_t imm21 = instr->Imm21Value();
@@ -429,7 +400,6 @@ void Decoder::PrintSImm21(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm21);
}
-
// Print absoulte address for 21-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC + delta_pc + (offset << n_bits)
@@ -444,7 +414,6 @@ void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) {
delta_pc + (imm21 << n_bits)));
}
-
// Print 26-bit hex immediate value.
void Decoder::PrintXImm26(Instruction* instr) {
uint64_t target = static_cast<uint64_t>(instr->Imm26Value())
@@ -454,7 +423,6 @@ void Decoder::PrintXImm26(Instruction* instr) {
SNPrintF(out_buffer_ + out_buffer_pos_, "0x%" PRIx64, target);
}
-
// Print 26-bit signed immediate value.
void Decoder::PrintSImm26(Instruction* instr) {
int32_t imm26 = instr->Imm26Value();
@@ -464,7 +432,6 @@ void Decoder::PrintSImm26(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm26);
}
-
// Print absoulte address for 26-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC + delta_pc + (offset << n_bits)
@@ -479,7 +446,6 @@ void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) {
delta_pc + (imm26 << n_bits)));
}
-
// Print absoulte address for 26-bit offset or immediate value.
// The absolute address is calculated according following expression:
// PC[GPRLEN-1 .. 28] || instr_index26 || 00
@@ -492,19 +458,16 @@ void Decoder::PrintPCImm26(Instruction* instr) {
converter_.NameOfAddress((reinterpret_cast<byte*>(pc))));
}
-
void Decoder::PrintBp2(Instruction* instr) {
int bp2 = instr->Bp2Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", bp2);
}
-
void Decoder::PrintBp3(Instruction* instr) {
int bp3 = instr->Bp3Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", bp3);
}
-
// Print 26-bit immediate value.
void Decoder::PrintCode(Instruction* instr) {
if (instr->OpcodeFieldRaw() != SPECIAL)
@@ -512,10 +475,10 @@ void Decoder::PrintCode(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "0x%05x (%d)", code, code);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x (%d)", code, code);
break;
- }
+ }
case TGE:
case TGEU:
case TLT:
@@ -528,7 +491,7 @@ void Decoder::PrintCode(Instruction* instr) {
break;
}
default: // Not a break or trap instruction.
- break;
+ break;
}
}
@@ -597,7 +560,6 @@ void Decoder::PrintFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
PrintChar(formatLetter);
}
@@ -682,9 +644,7 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
}
// Printing of instruction name.
-void Decoder::PrintInstructionName(Instruction* instr) {
-}
-
+void Decoder::PrintInstructionName(Instruction* instr) {}
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
@@ -706,7 +666,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
UNREACHABLE();
}
-
// Handle all FPUregister based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
@@ -779,12 +738,12 @@ int Decoder::FormatMSARegister(Instruction* instr, const char* format) {
// characters that were consumed from the formatting string.
int Decoder::FormatOption(Instruction* instr, const char* format) {
switch (format[0]) {
- case 'c': { // 'code for break or trap instructions.
+ case 'c': { // 'code for break or trap instructions.
DCHECK(STRING_STARTS_WITH(format, "code"));
PrintCode(instr);
return 4;
}
- case 'i': { // 'imm16u or 'imm26.
+ case 'i': { // 'imm16u or 'imm26.
if (format[3] == '1') {
if (format[4] == '6') {
DCHECK(STRING_STARTS_WITH(format, "imm16"));
@@ -960,16 +919,16 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
UNREACHABLE();
}
- case 'r': { // 'r: registers.
+ case 'r': { // 'r: registers.
return FormatRegister(instr, format);
}
- case 'f': { // 'f: FPUregisters.
+ case 'f': { // 'f: FPUregisters.
return FormatFPURegister(instr, format);
}
case 'w': { // 'w: MSA Register
return FormatMSARegister(instr, format);
}
- case 's': { // 'sa.
+ case 's': { // 'sa.
switch (format[1]) {
case 'a':
if (format[2] == '2') {
@@ -1031,7 +990,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
}
}
- case 'C': { // 'Cc - Special for c.xx.d cc field.
+ case 'C': { // 'Cc - Special for c.xx.d cc field.
DCHECK(STRING_STARTS_WITH(format, "Cc"));
PrintCc(instr);
return 2;
@@ -1047,7 +1006,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
UNREACHABLE();
}
-
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
@@ -1061,16 +1019,12 @@ void Decoder::Format(Instruction* instr, const char* format) {
}
cur = *format++;
}
- out_buffer_[out_buffer_pos_] = '\0';
+ out_buffer_[out_buffer_pos_] = '\0';
}
-
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
-void Decoder::Unknown(Instruction* instr) {
- Format(instr, "unknown");
-}
-
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
int Decoder::DecodeBreakInstr(Instruction* instr) {
// This is already known to be BREAK instr, just extract the code.
@@ -1090,7 +1044,6 @@ int Decoder::DecodeBreakInstr(Instruction* instr) {
}
}
-
bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case RINT:
@@ -1226,7 +1179,6 @@ bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) {
return true;
}
-
void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) {
if (!DecodeTypeRegisterRsType(instr)) {
switch (instr->FunctionFieldRaw()) {
@@ -1246,7 +1198,6 @@ void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) {
if (!DecodeTypeRegisterRsType(instr)) {
switch (instr->FunctionFieldRaw()) {
@@ -1263,7 +1214,6 @@ void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterLRsType(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case CVT_D_L:
@@ -1310,7 +1260,6 @@ void Decoder::DecodeTypeRegisterLRsType(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterWRsType(Instruction* instr) {
switch (instr->FunctionValue()) {
case CVT_S_W: // Convert word to float (single).
@@ -1357,7 +1306,6 @@ void Decoder::DecodeTypeRegisterWRsType(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterCOP1(Instruction* instr) {
switch (instr->RsFieldRaw()) {
case MFC1:
@@ -1402,7 +1350,6 @@ void Decoder::DecodeTypeRegisterCOP1(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterCOP1X(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case MADD_S:
@@ -1422,7 +1369,6 @@ void Decoder::DecodeTypeRegisterCOP1X(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case JR:
@@ -1711,7 +1657,6 @@ void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case MUL:
@@ -1732,7 +1677,6 @@ void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
case EXT: {
@@ -1845,7 +1789,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
}
-
int Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case COP1: // Coprocessor instructions.
@@ -1933,7 +1876,6 @@ void Decoder::DecodeTypeImmediateCOP1(Instruction* instr) {
}
}
-
void Decoder::DecodeTypeImmediateREGIMM(Instruction* instr) {
switch (instr->RtFieldRaw()) {
case BLTZ:
@@ -2344,11 +2286,9 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
default:
printf("a 0x%x \n", instr->OpcodeFieldRaw());
UNREACHABLE();
- break;
}
}
-
void Decoder::DecodeTypeJump(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
case J:
@@ -2993,7 +2933,6 @@ void Decoder::DecodeTypeMsa2RF(Instruction* instr) {
}
}
-
// Disassemble the instruction at *instr_ptr into the output buffer.
// All instructions are one word long, except for the simulator
// pseudo-instruction stop(msg). For that one special case, we return
@@ -3001,8 +2940,7 @@ void Decoder::DecodeTypeMsa2RF(Instruction* instr) {
int Decoder::InstructionDecode(byte* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ",
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
instr->InstructionBits());
switch (instr->InstructionType()) {
case Instruction::kRegisterType: {
@@ -3027,45 +2965,38 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
} // namespace internal
} // namespace v8
-
//------------------------------------------------------------------------------
namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
- return tmp_buffer_.start();
+ return tmp_buffer_.begin();
}
-
const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
-
const char* NameConverter::NameOfCPURegister(int reg) const {
return v8::internal::Registers::Name(reg);
}
-
const char* NameConverter::NameOfXMMRegister(int reg) const {
return v8::internal::FPURegisters::Name(reg);
}
-
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register.
return "nobytereg";
}
-
const char* NameConverter::NameInCode(byte* addr) const {
// The default name converter is called for unknown code. So we will not try
// to access any memory.
return "";
}
-
//------------------------------------------------------------------------------
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
@@ -3074,11 +3005,8 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return d.InstructionDecode(instruction);
}
-
// The MIPS assembler does not currently use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return -1;
-}
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
UnimplementedOpcodeAction unimplemented_action) {
@@ -3090,12 +3018,11 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
- *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.begin());
}
}
-
-#undef UNSUPPORTED
+#undef STRING_STARTS_WITH
} // namespace disasm
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index 23ef03bc00..c5219970cb 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -2,33 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects.h"
-
-#include "src/assembler-inl.h"
-#include "src/bootstrapper.h"
-#include "src/counters.h"
-#include "src/date.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/elements.h"
-#include "src/field-type.h"
+#include "src/objects/objects.h"
+
+#include "src/codegen/assembler-inl.h"
+#include "src/date/date.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/heap/combined-heap.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/ic/handler-configuration-inl.h"
-#include "src/layout-descriptor.h"
-#include "src/objects-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
#include "src/objects/cell-inl.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/elements.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/embedder-data-slot-inl.h"
#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/field-type.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/layout-descriptor.h"
+#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator-inl.h"
#include "src/objects/js-collator-inl.h"
@@ -60,10 +61,12 @@
#include "src/objects/promise-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
-#include "src/ostreams.h"
+#include "src/objects/template-objects-inl.h"
+#include "src/objects/transitions-inl.h"
#include "src/regexp/jsregexp.h"
-#include "src/transitions-inl.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "torque-generated/class-verifiers-tq.h"
namespace v8 {
namespace internal {
@@ -92,21 +95,26 @@ namespace internal {
#ifdef VERIFY_HEAP
+#define USE_TORQUE_VERIFIER(Class) \
+ void Class::Class##Verify(Isolate* isolate) { \
+ TorqueGeneratedClassVerifiers::Class##Verify(*this, isolate); \
+ }
+
void Object::ObjectVerify(Isolate* isolate) {
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kObjectVerify);
if (IsSmi()) {
- Smi::cast(*this)->SmiVerify(isolate);
+ Smi::cast(*this).SmiVerify(isolate);
} else {
- HeapObject::cast(*this)->HeapObjectVerify(isolate);
+ HeapObject::cast(*this).HeapObjectVerify(isolate);
}
CHECK(!IsConstructor() || IsCallable());
}
void Object::VerifyPointer(Isolate* isolate, Object p) {
- if (p->IsHeapObject()) {
+ if (p.IsHeapObject()) {
HeapObject::VerifyHeapPointer(isolate, p);
} else {
- CHECK(p->IsSmi());
+ CHECK(p.IsSmi());
}
}
@@ -121,8 +129,8 @@ void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p) {
namespace {
void VerifyForeignPointer(Isolate* isolate, HeapObject host, Object foreign) {
- host->VerifyPointer(isolate, foreign);
- CHECK(foreign->IsUndefined(isolate) || Foreign::IsNormalized(foreign));
+ host.VerifyPointer(isolate, foreign);
+ CHECK(foreign.IsUndefined(isolate) || Foreign::IsNormalized(foreign));
}
} // namespace
@@ -133,20 +141,27 @@ void Smi::SmiVerify(Isolate* isolate) {
}
void HeapObject::HeapObjectVerify(Isolate* isolate) {
- VerifyHeapPointer(isolate, map());
- CHECK(map()->IsMap());
+ TorqueGeneratedClassVerifiers::HeapObjectVerify(*this, isolate);
- switch (map()->instance_type()) {
+ switch (map().instance_type()) {
#define STRING_TYPE_CASE(TYPE, size, name, CamelName) case TYPE:
STRING_TYPE_LIST(STRING_TYPE_CASE)
#undef STRING_TYPE_CASE
- String::cast(*this)->StringVerify(isolate);
+ if (IsConsString()) {
+ ConsString::cast(*this).ConsStringVerify(isolate);
+ } else if (IsSlicedString()) {
+ SlicedString::cast(*this).SlicedStringVerify(isolate);
+ } else if (IsThinString()) {
+ ThinString::cast(*this).ThinStringVerify(isolate);
+ } else {
+ String::cast(*this).StringVerify(isolate);
+ }
break;
case SYMBOL_TYPE:
- Symbol::cast(*this)->SymbolVerify(isolate);
+ Symbol::cast(*this).SymbolVerify(isolate);
break;
case MAP_TYPE:
- Map::cast(*this)->MapVerify(isolate);
+ Map::cast(*this).MapVerify(isolate);
break;
case HEAP_NUMBER_TYPE:
CHECK(IsHeapNumber());
@@ -155,17 +170,17 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
CHECK(IsMutableHeapNumber());
break;
case BIGINT_TYPE:
- BigInt::cast(*this)->BigIntVerify(isolate);
+ BigInt::cast(*this).BigIntVerify(isolate);
break;
case CALL_HANDLER_INFO_TYPE:
- CallHandlerInfo::cast(*this)->CallHandlerInfoVerify(isolate);
+ CallHandlerInfo::cast(*this).CallHandlerInfoVerify(isolate);
break;
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
ObjectBoilerplateDescription::cast(*this)
- ->ObjectBoilerplateDescriptionVerify(isolate);
+ .ObjectBoilerplateDescriptionVerify(isolate);
break;
case EMBEDDER_DATA_ARRAY_TYPE:
- EmbedderDataArray::cast(*this)->EmbedderDataArrayVerify(isolate);
+ EmbedderDataArray::cast(*this).EmbedderDataArrayVerify(isolate);
break;
// FixedArray types
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
@@ -182,7 +197,7 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case FIXED_ARRAY_TYPE:
case SCOPE_INFO_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
- FixedArray::cast(*this)->FixedArrayVerify(isolate);
+ FixedArray::cast(*this).FixedArrayVerify(isolate);
break;
case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
@@ -193,301 +208,290 @@ void HeapObject::HeapObjectVerify(Isolate* isolate) {
case MODULE_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
- Context::cast(*this)->ContextVerify(isolate);
+ Context::cast(*this).ContextVerify(isolate);
break;
case NATIVE_CONTEXT_TYPE:
- NativeContext::cast(*this)->NativeContextVerify(isolate);
+ NativeContext::cast(*this).NativeContextVerify(isolate);
break;
case WEAK_FIXED_ARRAY_TYPE:
- WeakFixedArray::cast(*this)->WeakFixedArrayVerify(isolate);
+ WeakFixedArray::cast(*this).WeakFixedArrayVerify(isolate);
break;
case WEAK_ARRAY_LIST_TYPE:
- WeakArrayList::cast(*this)->WeakArrayListVerify(isolate);
+ WeakArrayList::cast(*this).WeakArrayListVerify(isolate);
break;
case FIXED_DOUBLE_ARRAY_TYPE:
- FixedDoubleArray::cast(*this)->FixedDoubleArrayVerify(isolate);
+ FixedDoubleArray::cast(*this).FixedDoubleArrayVerify(isolate);
break;
case FEEDBACK_METADATA_TYPE:
- FeedbackMetadata::cast(*this)->FeedbackMetadataVerify(isolate);
+ FeedbackMetadata::cast(*this).FeedbackMetadataVerify(isolate);
break;
case BYTE_ARRAY_TYPE:
- ByteArray::cast(*this)->ByteArrayVerify(isolate);
+ ByteArray::cast(*this).ByteArrayVerify(isolate);
break;
case BYTECODE_ARRAY_TYPE:
- BytecodeArray::cast(*this)->BytecodeArrayVerify(isolate);
+ BytecodeArray::cast(*this).BytecodeArrayVerify(isolate);
break;
case DESCRIPTOR_ARRAY_TYPE:
- DescriptorArray::cast(*this)->DescriptorArrayVerify(isolate);
+ DescriptorArray::cast(*this).DescriptorArrayVerify(isolate);
break;
case TRANSITION_ARRAY_TYPE:
- TransitionArray::cast(*this)->TransitionArrayVerify(isolate);
+ TransitionArray::cast(*this).TransitionArrayVerify(isolate);
break;
case PROPERTY_ARRAY_TYPE:
- PropertyArray::cast(*this)->PropertyArrayVerify(isolate);
+ PropertyArray::cast(*this).PropertyArrayVerify(isolate);
break;
case FREE_SPACE_TYPE:
- FreeSpace::cast(*this)->FreeSpaceVerify(isolate);
+ FreeSpace::cast(*this).FreeSpaceVerify(isolate);
break;
case FEEDBACK_CELL_TYPE:
- FeedbackCell::cast(*this)->FeedbackCellVerify(isolate);
+ FeedbackCell::cast(*this).FeedbackCellVerify(isolate);
break;
case FEEDBACK_VECTOR_TYPE:
- FeedbackVector::cast(*this)->FeedbackVectorVerify(isolate);
+ FeedbackVector::cast(*this).FeedbackVectorVerify(isolate);
break;
-#define VERIFY_TYPED_ARRAY(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- Fixed##Type##Array::cast(*this)->FixedTypedArrayVerify(isolate); \
- break;
-
- TYPED_ARRAYS(VERIFY_TYPED_ARRAY)
-#undef VERIFY_TYPED_ARRAY
-
case CODE_TYPE:
- Code::cast(*this)->CodeVerify(isolate);
+ Code::cast(*this).CodeVerify(isolate);
break;
case ODDBALL_TYPE:
- Oddball::cast(*this)->OddballVerify(isolate);
+ Oddball::cast(*this).OddballVerify(isolate);
break;
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- JSObject::cast(*this)->JSObjectVerify(isolate);
+ JSObject::cast(*this).JSObjectVerify(isolate);
break;
case WASM_MODULE_TYPE:
- WasmModuleObject::cast(*this)->WasmModuleObjectVerify(isolate);
+ WasmModuleObject::cast(*this).WasmModuleObjectVerify(isolate);
break;
case WASM_TABLE_TYPE:
- WasmTableObject::cast(*this)->WasmTableObjectVerify(isolate);
+ WasmTableObject::cast(*this).WasmTableObjectVerify(isolate);
break;
case WASM_MEMORY_TYPE:
- WasmMemoryObject::cast(*this)->WasmMemoryObjectVerify(isolate);
+ WasmMemoryObject::cast(*this).WasmMemoryObjectVerify(isolate);
break;
case WASM_GLOBAL_TYPE:
- WasmGlobalObject::cast(*this)->WasmGlobalObjectVerify(isolate);
+ WasmGlobalObject::cast(*this).WasmGlobalObjectVerify(isolate);
break;
case WASM_EXCEPTION_TYPE:
- WasmExceptionObject::cast(*this)->WasmExceptionObjectVerify(isolate);
+ WasmExceptionObject::cast(*this).WasmExceptionObjectVerify(isolate);
break;
case WASM_INSTANCE_TYPE:
- WasmInstanceObject::cast(*this)->WasmInstanceObjectVerify(isolate);
+ WasmInstanceObject::cast(*this).WasmInstanceObjectVerify(isolate);
break;
case JS_ARGUMENTS_TYPE:
- JSArgumentsObject::cast(*this)->JSArgumentsObjectVerify(isolate);
+ JSArgumentsObject::cast(*this).JSArgumentsObjectVerify(isolate);
break;
case JS_GENERATOR_OBJECT_TYPE:
- JSGeneratorObject::cast(*this)->JSGeneratorObjectVerify(isolate);
+ JSGeneratorObject::cast(*this).JSGeneratorObjectVerify(isolate);
break;
case JS_ASYNC_FUNCTION_OBJECT_TYPE:
- JSAsyncFunctionObject::cast(*this)->JSAsyncFunctionObjectVerify(isolate);
+ JSAsyncFunctionObject::cast(*this).JSAsyncFunctionObjectVerify(isolate);
break;
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
- JSAsyncGeneratorObject::cast(*this)->JSAsyncGeneratorObjectVerify(
- isolate);
+ JSAsyncGeneratorObject::cast(*this).JSAsyncGeneratorObjectVerify(isolate);
break;
case JS_VALUE_TYPE:
- JSValue::cast(*this)->JSValueVerify(isolate);
+ JSValue::cast(*this).JSValueVerify(isolate);
break;
case JS_DATE_TYPE:
- JSDate::cast(*this)->JSDateVerify(isolate);
+ JSDate::cast(*this).JSDateVerify(isolate);
break;
case JS_BOUND_FUNCTION_TYPE:
- JSBoundFunction::cast(*this)->JSBoundFunctionVerify(isolate);
+ JSBoundFunction::cast(*this).JSBoundFunctionVerify(isolate);
break;
case JS_FUNCTION_TYPE:
- JSFunction::cast(*this)->JSFunctionVerify(isolate);
+ JSFunction::cast(*this).JSFunctionVerify(isolate);
break;
case JS_GLOBAL_PROXY_TYPE:
- JSGlobalProxy::cast(*this)->JSGlobalProxyVerify(isolate);
+ JSGlobalProxy::cast(*this).JSGlobalProxyVerify(isolate);
break;
case JS_GLOBAL_OBJECT_TYPE:
- JSGlobalObject::cast(*this)->JSGlobalObjectVerify(isolate);
+ JSGlobalObject::cast(*this).JSGlobalObjectVerify(isolate);
break;
case CELL_TYPE:
- Cell::cast(*this)->CellVerify(isolate);
+ Cell::cast(*this).CellVerify(isolate);
break;
case PROPERTY_CELL_TYPE:
- PropertyCell::cast(*this)->PropertyCellVerify(isolate);
+ PropertyCell::cast(*this).PropertyCellVerify(isolate);
break;
case JS_ARRAY_TYPE:
- JSArray::cast(*this)->JSArrayVerify(isolate);
+ JSArray::cast(*this).JSArrayVerify(isolate);
break;
case JS_MODULE_NAMESPACE_TYPE:
- JSModuleNamespace::cast(*this)->JSModuleNamespaceVerify(isolate);
+ JSModuleNamespace::cast(*this).JSModuleNamespaceVerify(isolate);
break;
case JS_SET_TYPE:
- JSSet::cast(*this)->JSSetVerify(isolate);
+ JSSet::cast(*this).JSSetVerify(isolate);
break;
case JS_MAP_TYPE:
- JSMap::cast(*this)->JSMapVerify(isolate);
+ JSMap::cast(*this).JSMapVerify(isolate);
break;
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
- JSSetIterator::cast(*this)->JSSetIteratorVerify(isolate);
+ JSSetIterator::cast(*this).JSSetIteratorVerify(isolate);
break;
case JS_MAP_KEY_ITERATOR_TYPE:
case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
case JS_MAP_VALUE_ITERATOR_TYPE:
- JSMapIterator::cast(*this)->JSMapIteratorVerify(isolate);
+ JSMapIterator::cast(*this).JSMapIteratorVerify(isolate);
break;
case JS_ARRAY_ITERATOR_TYPE:
- JSArrayIterator::cast(*this)->JSArrayIteratorVerify(isolate);
+ JSArrayIterator::cast(*this).JSArrayIteratorVerify(isolate);
break;
case JS_STRING_ITERATOR_TYPE:
- JSStringIterator::cast(*this)->JSStringIteratorVerify(isolate);
+ JSStringIterator::cast(*this).JSStringIteratorVerify(isolate);
break;
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
- JSAsyncFromSyncIterator::cast(*this)->JSAsyncFromSyncIteratorVerify(
+ JSAsyncFromSyncIterator::cast(*this).JSAsyncFromSyncIteratorVerify(
isolate);
break;
case WEAK_CELL_TYPE:
- WeakCell::cast(*this)->WeakCellVerify(isolate);
+ WeakCell::cast(*this).WeakCellVerify(isolate);
break;
case JS_WEAK_REF_TYPE:
- JSWeakRef::cast(*this)->JSWeakRefVerify(isolate);
+ JSWeakRef::cast(*this).JSWeakRefVerify(isolate);
break;
case JS_FINALIZATION_GROUP_TYPE:
- JSFinalizationGroup::cast(*this)->JSFinalizationGroupVerify(isolate);
+ JSFinalizationGroup::cast(*this).JSFinalizationGroupVerify(isolate);
break;
case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
JSFinalizationGroupCleanupIterator::cast(*this)
- ->JSFinalizationGroupCleanupIteratorVerify(isolate);
+ .JSFinalizationGroupCleanupIteratorVerify(isolate);
break;
case JS_WEAK_MAP_TYPE:
- JSWeakMap::cast(*this)->JSWeakMapVerify(isolate);
+ JSWeakMap::cast(*this).JSWeakMapVerify(isolate);
break;
case JS_WEAK_SET_TYPE:
- JSWeakSet::cast(*this)->JSWeakSetVerify(isolate);
+ JSWeakSet::cast(*this).JSWeakSetVerify(isolate);
break;
case JS_PROMISE_TYPE:
- JSPromise::cast(*this)->JSPromiseVerify(isolate);
+ JSPromise::cast(*this).JSPromiseVerify(isolate);
break;
case JS_REGEXP_TYPE:
- JSRegExp::cast(*this)->JSRegExpVerify(isolate);
+ JSRegExp::cast(*this).JSRegExpVerify(isolate);
break;
case JS_REGEXP_STRING_ITERATOR_TYPE:
- JSRegExpStringIterator::cast(*this)->JSRegExpStringIteratorVerify(
- isolate);
+ JSRegExpStringIterator::cast(*this).JSRegExpStringIteratorVerify(isolate);
break;
case FILLER_TYPE:
break;
case JS_PROXY_TYPE:
- JSProxy::cast(*this)->JSProxyVerify(isolate);
+ JSProxy::cast(*this).JSProxyVerify(isolate);
break;
case FOREIGN_TYPE:
- Foreign::cast(*this)->ForeignVerify(isolate);
+ Foreign::cast(*this).ForeignVerify(isolate);
break;
case PREPARSE_DATA_TYPE:
- PreparseData::cast(*this)->PreparseDataVerify(isolate);
+ PreparseData::cast(*this).PreparseDataVerify(isolate);
break;
case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
UncompiledDataWithoutPreparseData::cast(*this)
- ->UncompiledDataWithoutPreparseDataVerify(isolate);
+ .UncompiledDataWithoutPreparseDataVerify(isolate);
break;
case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
UncompiledDataWithPreparseData::cast(*this)
- ->UncompiledDataWithPreparseDataVerify(isolate);
+ .UncompiledDataWithPreparseDataVerify(isolate);
break;
case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::cast(*this)->SharedFunctionInfoVerify(isolate);
+ SharedFunctionInfo::cast(*this).SharedFunctionInfoVerify(isolate);
break;
case JS_MESSAGE_OBJECT_TYPE:
- JSMessageObject::cast(*this)->JSMessageObjectVerify(isolate);
+ JSMessageObject::cast(*this).JSMessageObjectVerify(isolate);
break;
case JS_ARRAY_BUFFER_TYPE:
- JSArrayBuffer::cast(*this)->JSArrayBufferVerify(isolate);
+ JSArrayBuffer::cast(*this).JSArrayBufferVerify(isolate);
break;
case JS_TYPED_ARRAY_TYPE:
- JSTypedArray::cast(*this)->JSTypedArrayVerify(isolate);
+ JSTypedArray::cast(*this).JSTypedArrayVerify(isolate);
break;
case JS_DATA_VIEW_TYPE:
- JSDataView::cast(*this)->JSDataViewVerify(isolate);
+ JSDataView::cast(*this).JSDataViewVerify(isolate);
break;
case SMALL_ORDERED_HASH_SET_TYPE:
- SmallOrderedHashSet::cast(*this)->SmallOrderedHashSetVerify(isolate);
+ SmallOrderedHashSet::cast(*this).SmallOrderedHashSetVerify(isolate);
break;
case SMALL_ORDERED_HASH_MAP_TYPE:
- SmallOrderedHashMap::cast(*this)->SmallOrderedHashMapVerify(isolate);
+ SmallOrderedHashMap::cast(*this).SmallOrderedHashMapVerify(isolate);
break;
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
- SmallOrderedNameDictionary::cast(*this)->SmallOrderedNameDictionaryVerify(
+ SmallOrderedNameDictionary::cast(*this).SmallOrderedNameDictionaryVerify(
isolate);
break;
case CODE_DATA_CONTAINER_TYPE:
- CodeDataContainer::cast(*this)->CodeDataContainerVerify(isolate);
+ CodeDataContainer::cast(*this).CodeDataContainerVerify(isolate);
break;
#ifdef V8_INTL_SUPPORT
case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- JSV8BreakIterator::cast(*this)->JSV8BreakIteratorVerify(isolate);
+ JSV8BreakIterator::cast(*this).JSV8BreakIteratorVerify(isolate);
break;
case JS_INTL_COLLATOR_TYPE:
- JSCollator::cast(*this)->JSCollatorVerify(isolate);
+ JSCollator::cast(*this).JSCollatorVerify(isolate);
break;
case JS_INTL_DATE_TIME_FORMAT_TYPE:
- JSDateTimeFormat::cast(*this)->JSDateTimeFormatVerify(isolate);
+ JSDateTimeFormat::cast(*this).JSDateTimeFormatVerify(isolate);
break;
case JS_INTL_LIST_FORMAT_TYPE:
- JSListFormat::cast(*this)->JSListFormatVerify(isolate);
+ JSListFormat::cast(*this).JSListFormatVerify(isolate);
break;
case JS_INTL_LOCALE_TYPE:
- JSLocale::cast(*this)->JSLocaleVerify(isolate);
+ JSLocale::cast(*this).JSLocaleVerify(isolate);
break;
case JS_INTL_NUMBER_FORMAT_TYPE:
- JSNumberFormat::cast(*this)->JSNumberFormatVerify(isolate);
+ JSNumberFormat::cast(*this).JSNumberFormatVerify(isolate);
break;
case JS_INTL_PLURAL_RULES_TYPE:
- JSPluralRules::cast(*this)->JSPluralRulesVerify(isolate);
+ JSPluralRules::cast(*this).JSPluralRulesVerify(isolate);
break;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- JSRelativeTimeFormat::cast(*this)->JSRelativeTimeFormatVerify(isolate);
+ JSRelativeTimeFormat::cast(*this).JSRelativeTimeFormatVerify(isolate);
break;
case JS_INTL_SEGMENT_ITERATOR_TYPE:
- JSSegmentIterator::cast(*this)->JSSegmentIteratorVerify(isolate);
+ JSSegmentIterator::cast(*this).JSSegmentIteratorVerify(isolate);
break;
case JS_INTL_SEGMENTER_TYPE:
- JSSegmenter::cast(*this)->JSSegmenterVerify(isolate);
+ JSSegmenter::cast(*this).JSSegmenterVerify(isolate);
break;
#endif // V8_INTL_SUPPORT
-#define MAKE_STRUCT_CASE(TYPE, Name, name) \
- case TYPE: \
- Name::cast(*this)->Name##Verify(isolate); \
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
+ Name::cast(*this).Name##Verify(isolate); \
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
case ALLOCATION_SITE_TYPE:
- AllocationSite::cast(*this)->AllocationSiteVerify(isolate);
+ AllocationSite::cast(*this).AllocationSiteVerify(isolate);
break;
case LOAD_HANDLER_TYPE:
- LoadHandler::cast(*this)->LoadHandlerVerify(isolate);
+ LoadHandler::cast(*this).LoadHandlerVerify(isolate);
break;
case STORE_HANDLER_TYPE:
- StoreHandler::cast(*this)->StoreHandlerVerify(isolate);
+ StoreHandler::cast(*this).StoreHandlerVerify(isolate);
break;
}
}
// static
void HeapObject::VerifyHeapPointer(Isolate* isolate, Object p) {
- CHECK(p->IsHeapObject());
- HeapObject ho = HeapObject::cast(p);
- CHECK(isolate->heap()->Contains(ho));
+ CHECK(p.IsHeapObject());
+ CHECK(IsValidHeapObject(isolate->heap(), HeapObject::cast(p)));
}
void Symbol::SymbolVerify(Isolate* isolate) {
- CHECK(IsSymbol());
+ TorqueGeneratedClassVerifiers::SymbolVerify(*this, isolate);
CHECK(HasHashCode());
CHECK_GT(Hash(), 0);
- CHECK(name()->IsUndefined(isolate) || name()->IsString());
+ CHECK(name().IsUndefined(isolate) || name().IsString());
CHECK_IMPLIES(IsPrivateName(), IsPrivate());
}
-void ByteArray::ByteArrayVerify(Isolate* isolate) { CHECK(IsByteArray()); }
+USE_TORQUE_VERIFIER(ByteArray)
void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
// TODO(oth): Walk bytecodes and immediate values to validate sanity.
@@ -496,42 +500,21 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
// - No Illegal bytecodes.
// - No consecutive sequences of prefix Wide / ExtraWide.
CHECK(IsBytecodeArray());
- CHECK(constant_pool()->IsFixedArray());
+ CHECK(constant_pool().IsFixedArray());
VerifyHeapPointer(isolate, constant_pool());
}
-void FreeSpace::FreeSpaceVerify(Isolate* isolate) {
- CHECK(IsFreeSpace());
- VerifySmiField(kSizeOffset);
-}
+USE_TORQUE_VERIFIER(FreeSpace)
-void FeedbackCell::FeedbackCellVerify(Isolate* isolate) {
- CHECK(IsFeedbackCell());
-
- VerifyHeapPointer(isolate, value());
- CHECK(value()->IsUndefined(isolate) || value()->IsFeedbackVector() ||
- value()->IsFixedArray());
-}
+USE_TORQUE_VERIFIER(FeedbackCell)
void FeedbackVector::FeedbackVectorVerify(Isolate* isolate) {
- CHECK(IsFeedbackVector());
- CHECK(closure_feedback_cell_array()->IsFixedArray());
+ TorqueGeneratedClassVerifiers::FeedbackVectorVerify(*this, isolate);
MaybeObject code = optimized_code_weak_or_smi();
MaybeObject::VerifyMaybeObjectPointer(isolate, code);
CHECK(code->IsSmi() || code->IsWeakOrCleared());
}
-template <class Traits>
-void FixedTypedArray<Traits>::FixedTypedArrayVerify(Isolate* isolate) {
- CHECK(IsHeapObject() && map()->instance_type() == Traits::kInstanceType);
- if (base_pointer()->ptr() == ptr()) {
- CHECK_EQ(reinterpret_cast<Address>(external_pointer()),
- FixedTypedArrayBase::kDataOffset - kHeapObjectTag);
- } else {
- CHECK_EQ(base_pointer(), Smi::kZero);
- }
-}
-
bool JSObject::ElementsAreSafeToExamine() const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
@@ -539,68 +522,67 @@ bool JSObject::ElementsAreSafeToExamine() const {
}
namespace {
+
void VerifyJSObjectElements(Isolate* isolate, JSObject object) {
// Only TypedArrays can have these specialized elements.
- if (object->IsJSTypedArray()) {
- // TODO(cbruni): Fix CreateTypedArray to either not instantiate the object
- // or propertly initialize it on errors during construction.
- /* CHECK(object->HasFixedTypedArrayElements()); */
- /* CHECK(object->elements()->IsFixedTypedArrayBase()); */
+ if (object.IsJSTypedArray()) {
+ // TODO(bmeurer,v8:4153): Fix CreateTypedArray to either not instantiate
+ // the object or propertly initialize it on errors during construction.
+ /* CHECK(object->HasTypedArrayElements()); */
return;
}
- CHECK(!object->HasFixedTypedArrayElements());
- CHECK(!object->elements()->IsFixedTypedArrayBase());
+ CHECK(!object.elements().IsByteArray());
- if (object->HasDoubleElements()) {
- if (object->elements()->length() > 0) {
- CHECK(object->elements()->IsFixedDoubleArray());
+ if (object.HasDoubleElements()) {
+ if (object.elements().length() > 0) {
+ CHECK(object.elements().IsFixedDoubleArray());
}
return;
}
- FixedArray elements = FixedArray::cast(object->elements());
- if (object->HasSmiElements()) {
+ FixedArray elements = FixedArray::cast(object.elements());
+ if (object.HasSmiElements()) {
// We might have a partially initialized backing store, in which case we
// allow the hole + smi values.
- for (int i = 0; i < elements->length(); i++) {
- Object value = elements->get(i);
- CHECK(value->IsSmi() || value->IsTheHole(isolate));
+ for (int i = 0; i < elements.length(); i++) {
+ Object value = elements.get(i);
+ CHECK(value.IsSmi() || value.IsTheHole(isolate));
}
- } else if (object->HasObjectElements()) {
- for (int i = 0; i < elements->length(); i++) {
- Object element = elements->get(i);
- CHECK_IMPLIES(!element->IsSmi(), !HasWeakHeapObjectTag(element));
+ } else if (object.HasObjectElements()) {
+ for (int i = 0; i < elements.length(); i++) {
+ Object element = elements.get(i);
+ CHECK(!HasWeakHeapObjectTag(element));
}
}
}
} // namespace
void JSObject::JSObjectVerify(Isolate* isolate) {
- VerifyPointer(isolate, raw_properties_or_hash());
+ TorqueGeneratedClassVerifiers::JSObjectVerify(*this, isolate);
VerifyHeapPointer(isolate, elements());
CHECK_IMPLIES(HasSloppyArgumentsElements(), IsJSArgumentsObject());
if (HasFastProperties()) {
- int actual_unused_property_fields = map()->GetInObjectProperties() +
- property_array()->length() -
- map()->NextFreePropertyIndex();
- if (map()->UnusedPropertyFields() != actual_unused_property_fields) {
+ int actual_unused_property_fields = map().GetInObjectProperties() +
+ property_array().length() -
+ map().NextFreePropertyIndex();
+ if (map().UnusedPropertyFields() != actual_unused_property_fields) {
// There are two reasons why this can happen:
// - in the middle of StoreTransitionStub when the new extended backing
// store is already set into the object and the allocation of the
// MutableHeapNumber triggers GC while the map isn't updated yet.
// - deletion of the last property can leave additional backing store
// capacity behind.
- CHECK_GT(actual_unused_property_fields, map()->UnusedPropertyFields());
- int delta = actual_unused_property_fields - map()->UnusedPropertyFields();
+ CHECK_GT(actual_unused_property_fields, map().UnusedPropertyFields());
+ int delta = actual_unused_property_fields - map().UnusedPropertyFields();
CHECK_EQ(0, delta % JSObject::kFieldsAdded);
}
- DescriptorArray descriptors = map()->instance_descriptors();
+ DescriptorArray descriptors = map().instance_descriptors();
bool is_transitionable_fast_elements_kind =
- IsTransitionableFastElementsKind(map()->elements_kind());
+ IsTransitionableFastElementsKind(map().elements_kind());
- for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ for (int i = 0; i < map().NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
Representation r = details.representation();
@@ -613,73 +595,72 @@ void JSObject::JSObjectVerify(Isolate* isolate) {
VerifyObjectField(isolate, index.offset());
}
Object value = RawFastPropertyAt(index);
- if (r.IsDouble()) DCHECK(value->IsMutableHeapNumber());
- if (value->IsUninitialized(isolate)) continue;
- if (r.IsSmi()) DCHECK(value->IsSmi());
- if (r.IsHeapObject()) DCHECK(value->IsHeapObject());
- FieldType field_type = descriptors->GetFieldType(i);
- bool type_is_none = field_type->IsNone();
- bool type_is_any = field_type->IsAny();
+ if (r.IsDouble()) DCHECK(value.IsMutableHeapNumber());
+ if (value.IsUninitialized(isolate)) continue;
+ if (r.IsSmi()) DCHECK(value.IsSmi());
+ if (r.IsHeapObject()) DCHECK(value.IsHeapObject());
+ FieldType field_type = descriptors.GetFieldType(i);
+ bool type_is_none = field_type.IsNone();
+ bool type_is_any = field_type.IsAny();
if (r.IsNone()) {
CHECK(type_is_none);
} else if (!type_is_any && !(type_is_none && r.IsHeapObject())) {
- CHECK(!field_type->NowStable() || field_type->NowContains(value));
+ CHECK(!field_type.NowStable() || field_type.NowContains(value));
}
CHECK_IMPLIES(is_transitionable_fast_elements_kind,
Map::IsMostGeneralFieldType(r, field_type));
}
}
- if (map()->EnumLength() != kInvalidEnumCacheSentinel) {
- EnumCache enum_cache = descriptors->enum_cache();
- FixedArray keys = enum_cache->keys();
- FixedArray indices = enum_cache->indices();
- CHECK_LE(map()->EnumLength(), keys->length());
+ if (map().EnumLength() != kInvalidEnumCacheSentinel) {
+ EnumCache enum_cache = descriptors.enum_cache();
+ FixedArray keys = enum_cache.keys();
+ FixedArray indices = enum_cache.indices();
+ CHECK_LE(map().EnumLength(), keys.length());
CHECK_IMPLIES(indices != ReadOnlyRoots(isolate).empty_fixed_array(),
- keys->length() == indices->length());
+ keys.length() == indices.length());
}
}
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
- CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
- map()->has_frozen_or_sealed_elements() ||
+ CHECK_EQ((map().has_fast_smi_or_object_elements() ||
+ map().has_frozen_or_sealed_elements() ||
(elements() == GetReadOnlyRoots().empty_fixed_array()) ||
HasFastStringWrapperElements()),
- (elements()->map() == GetReadOnlyRoots().fixed_array_map() ||
- elements()->map() == GetReadOnlyRoots().fixed_cow_array_map()));
- CHECK_EQ(map()->has_fast_object_elements(), HasObjectElements());
+ (elements().map() == GetReadOnlyRoots().fixed_array_map() ||
+ elements().map() == GetReadOnlyRoots().fixed_cow_array_map()));
+ CHECK_EQ(map().has_fast_object_elements(), HasObjectElements());
VerifyJSObjectElements(isolate, *this);
}
}
void Map::MapVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::MapVerify(*this, isolate);
Heap* heap = isolate->heap();
CHECK(!ObjectInYoungGeneration(*this));
CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
CHECK(instance_size() == kVariableSizeSentinel ||
(kTaggedSize <= instance_size() &&
static_cast<size_t>(instance_size()) < heap->Capacity()));
- CHECK(GetBackPointer()->IsUndefined(isolate) ||
- !Map::cast(GetBackPointer())->is_stable());
- HeapObject::VerifyHeapPointer(isolate, prototype());
- HeapObject::VerifyHeapPointer(isolate, instance_descriptors());
- SLOW_DCHECK(instance_descriptors()->IsSortedNoDuplicates());
+ CHECK(GetBackPointer().IsUndefined(isolate) ||
+ !Map::cast(GetBackPointer()).is_stable());
+ SLOW_DCHECK(instance_descriptors().IsSortedNoDuplicates());
DisallowHeapAllocation no_gc;
SLOW_DCHECK(
TransitionsAccessor(isolate, *this, &no_gc).IsSortedNoDuplicates());
SLOW_DCHECK(TransitionsAccessor(isolate, *this, &no_gc)
.IsConsistentWithBackPointers());
SLOW_DCHECK(!FLAG_unbox_double_fields ||
- layout_descriptor()->IsConsistentWithMap(*this));
+ layout_descriptor().IsConsistentWithMap(*this));
if (!may_have_interesting_symbols()) {
CHECK(!has_named_interceptor());
CHECK(!is_dictionary_map());
CHECK(!is_access_check_needed());
DescriptorArray const descriptors = instance_descriptors();
for (int i = 0; i < NumberOfOwnDescriptors(); ++i) {
- CHECK(!descriptors->GetKey(i)->IsInterestingSymbol());
+ CHECK(!descriptors.GetKey(i).IsInterestingSymbol());
}
}
CHECK_IMPLIES(has_named_interceptor(), may_have_interesting_symbols());
@@ -687,14 +668,13 @@ void Map::MapVerify(Isolate* isolate) {
CHECK_IMPLIES(is_access_check_needed(), may_have_interesting_symbols());
CHECK_IMPLIES(IsJSObjectMap() && !CanHaveFastTransitionableElementsKind(),
IsDictionaryElementsKind(elements_kind()) ||
- IsTerminalElementsKind(elements_kind()));
+ IsTerminalElementsKind(elements_kind()) ||
+ IsHoleyFrozenOrSealedElementsKind(elements_kind()));
CHECK_IMPLIES(is_deprecated(), !is_stable());
if (is_prototype_map()) {
DCHECK(prototype_info() == Smi::kZero ||
- prototype_info()->IsPrototypeInfo());
+ prototype_info().IsPrototypeInfo());
}
- CHECK(prototype_validity_cell()->IsSmi() ||
- prototype_validity_cell()->IsCell());
}
void Map::DictionaryMapVerify(Isolate* isolate) {
@@ -707,29 +687,22 @@ void Map::DictionaryMapVerify(Isolate* isolate) {
CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
-void AliasedArgumentsEntry::AliasedArgumentsEntryVerify(Isolate* isolate) {
- VerifySmiField(kAliasedContextSlotOffset);
-}
+USE_TORQUE_VERIFIER(AliasedArgumentsEntry)
void EmbedderDataArray::EmbedderDataArrayVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::EmbedderDataArrayVerify(*this, isolate);
EmbedderDataSlot start(*this, 0);
EmbedderDataSlot end(*this, length());
for (EmbedderDataSlot slot = start; slot < end; ++slot) {
Object e = slot.load_tagged();
Object::VerifyPointer(isolate, e);
}
- VerifySmiField(kLengthOffset);
}
-void FixedArray::FixedArrayVerify(Isolate* isolate) {
- for (int i = 0; i < length(); i++) {
- Object e = get(i);
- VerifyPointer(isolate, e);
- }
-}
+USE_TORQUE_VERIFIER(FixedArray)
void WeakFixedArray::WeakFixedArrayVerify(Isolate* isolate) {
- VerifySmiField(kLengthOffset);
+ TorqueGeneratedClassVerifiers::WeakFixedArrayVerify(*this, isolate);
for (int i = 0; i < length(); i++) {
MaybeObject::VerifyMaybeObjectPointer(isolate, Get(i));
}
@@ -742,6 +715,7 @@ void WeakArrayList::WeakArrayListVerify(Isolate* isolate) {
}
void PropertyArray::PropertyArrayVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::PropertyArrayVerify(*this, isolate);
if (length() == 0) {
CHECK_EQ(*this, ReadOnlyRoots(isolate).empty_property_array());
return;
@@ -752,10 +726,10 @@ void PropertyArray::PropertyArrayVerify(Isolate* isolate) {
Object e = get(i);
Object::VerifyPointer(isolate, e);
}
- VerifySmiField(kLengthAndHashOffset);
}
void FixedDoubleArray::FixedDoubleArrayVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::FixedDoubleArrayVerify(*this, isolate);
for (int i = 0; i < length(); i++) {
if (!is_the_hole(i)) {
uint64_t value = get_representation(i);
@@ -771,11 +745,7 @@ void FixedDoubleArray::FixedDoubleArrayVerify(Isolate* isolate) {
}
void Context::ContextVerify(Isolate* isolate) {
- VerifySmiField(kLengthOffset);
- VerifyObjectField(isolate, kScopeInfoOffset);
- VerifyObjectField(isolate, kPreviousOffset);
- VerifyObjectField(isolate, kExtensionOffset);
- VerifyObjectField(isolate, kNativeContextOffset);
+ TorqueGeneratedClassVerifiers::ContextVerify(*this, isolate);
for (int i = 0; i < length(); i++) {
VerifyObjectField(isolate, OffsetOfElementAt(i));
}
@@ -784,7 +754,7 @@ void Context::ContextVerify(Isolate* isolate) {
void NativeContext::NativeContextVerify(Isolate* isolate) {
ContextVerify(isolate);
CHECK_EQ(length(), NativeContext::NATIVE_CONTEXT_SLOTS);
- CHECK_EQ(kSize, map()->instance_size());
+ CHECK_EQ(kSize, map().instance_size());
}
void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
@@ -802,6 +772,7 @@ void FeedbackMetadata::FeedbackMetadataVerify(Isolate* isolate) {
}
void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::DescriptorArrayVerify(*this, isolate);
for (int i = 0; i < number_of_all_descriptors(); i++) {
MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToKeyIndex(i)));
MaybeObject::VerifyMaybeObjectPointer(isolate, get(ToDetailsIndex(i)));
@@ -823,9 +794,9 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
Object key = get(ToKeyIndex(descriptor))->cast<Object>();
// number_of_descriptors() may be out of sync with the actual descriptors
// written during descriptor array construction.
- if (key->IsUndefined(isolate)) continue;
+ if (key.IsUndefined(isolate)) continue;
PropertyDetails details = GetDetails(descriptor);
- if (Name::cast(key)->IsPrivate()) {
+ if (Name::cast(key).IsPrivate()) {
CHECK_NE(details.attributes() & DONT_ENUM, 0);
}
MaybeObject value = get(ToValueIndex(descriptor));
@@ -835,10 +806,10 @@ void DescriptorArray::DescriptorArrayVerify(Isolate* isolate) {
value == MaybeObject::FromObject(FieldType::None()) ||
value == MaybeObject::FromObject(FieldType::Any()) ||
value->IsCleared() ||
- (value->GetHeapObjectIfWeak(&heap_object) && heap_object->IsMap()));
+ (value->GetHeapObjectIfWeak(&heap_object) && heap_object.IsMap()));
} else {
CHECK(!value->IsWeakOrCleared());
- CHECK(!value->cast<Object>()->IsMap());
+ CHECK(!value->cast<Object>().IsMap());
}
}
}
@@ -850,9 +821,10 @@ void TransitionArray::TransitionArrayVerify(Isolate* isolate) {
}
void JSArgumentsObject::JSArgumentsObjectVerify(Isolate* isolate) {
+ TorqueGeneratedClassVerifiers::JSArgumentsObjectVerify(*this, isolate);
if (IsSloppyArgumentsElementsKind(GetElementsKind())) {
SloppyArgumentsElements::cast(elements())
- ->SloppyArgumentsElementsVerify(isolate, *this);
+ .SloppyArgumentsElementsVerify(isolate, *this);
}
if (isolate->IsInAnyContext(map(), Context::SLOPPY_ARGUMENTS_MAP_INDEX) ||
isolate->IsInAnyContext(map(),
@@ -865,7 +837,6 @@ void JSArgumentsObject::JSArgumentsObjectVerify(Isolate* isolate) {
Context::STRICT_ARGUMENTS_MAP_INDEX)) {
VerifyObjectField(isolate, JSStrictArgumentsObject::kLengthOffset);
}
- JSObjectVerify(isolate);
}
void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
@@ -873,16 +844,16 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
FixedArrayVerify(isolate);
// Abort verification if only partially initialized (can't use arguments()
// getter because it does FixedArray::cast()).
- if (get(kArgumentsIndex)->IsUndefined(isolate)) return;
+ if (get(kArgumentsIndex).IsUndefined(isolate)) return;
- ElementsKind kind = holder->GetElementsKind();
+ ElementsKind kind = holder.GetElementsKind();
bool is_fast = kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
CHECK(IsFixedArray());
CHECK_GE(length(), 2);
CHECK_EQ(map(), ReadOnlyRoots(isolate).sloppy_arguments_elements_map());
Context context_object = context();
FixedArray arg_elements = FixedArray::cast(arguments());
- if (arg_elements->length() == 0) {
+ if (arg_elements.length() == 0) {
CHECK(arg_elements == ReadOnlyRoots(isolate).empty_fixed_array());
return;
}
@@ -898,7 +869,7 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
// Verify that each context-mapped argument is either the hole or a valid
// Smi within context length range.
Object mapped = get_mapped_entry(i);
- if (mapped->IsTheHole(isolate)) {
+ if (mapped.IsTheHole(isolate)) {
// Slow sloppy arguments can be holey.
if (!is_fast) continue;
// Fast sloppy arguments elements are never holey. Either the element is
@@ -910,104 +881,67 @@ void SloppyArgumentsElements::SloppyArgumentsElementsVerify(Isolate* isolate,
nofMappedParameters++;
CHECK_LE(maxMappedIndex, mappedIndex);
maxMappedIndex = mappedIndex;
- Object value = context_object->get(mappedIndex);
- CHECK(value->IsObject());
+ Object value = context_object.get(mappedIndex);
+ CHECK(value.IsObject());
// None of the context-mapped entries should exist in the arguments
// elements.
CHECK(!accessor->HasElement(holder, i, arg_elements));
}
- CHECK_LE(nofMappedParameters, context_object->length());
- CHECK_LE(nofMappedParameters, arg_elements->length());
- CHECK_LE(maxMappedIndex, context_object->length());
- CHECK_LE(maxMappedIndex, arg_elements->length());
+ CHECK_LE(nofMappedParameters, context_object.length());
+ CHECK_LE(nofMappedParameters, arg_elements.length());
+ CHECK_LE(maxMappedIndex, context_object.length());
+ CHECK_LE(maxMappedIndex, arg_elements.length());
}
-void JSGeneratorObject::JSGeneratorObjectVerify(Isolate* isolate) {
- // In an expression like "new g()", there can be a point where a generator
- // object is allocated but its fields are all undefined, as it hasn't yet been
- // initialized by the generator. Hence these weak checks.
- VerifyObjectField(isolate, kFunctionOffset);
- VerifyObjectField(isolate, kContextOffset);
- VerifyObjectField(isolate, kReceiverOffset);
- VerifyObjectField(isolate, kParametersAndRegistersOffset);
- VerifyObjectField(isolate, kContinuationOffset);
-}
+USE_TORQUE_VERIFIER(JSGeneratorObject)
void JSAsyncFunctionObject::JSAsyncFunctionObjectVerify(Isolate* isolate) {
- // Check inherited fields
- JSGeneratorObjectVerify(isolate);
- VerifyObjectField(isolate, kPromiseOffset);
- promise()->HeapObjectVerify(isolate);
+ TorqueGeneratedClassVerifiers::JSAsyncFunctionObjectVerify(*this, isolate);
+ promise().HeapObjectVerify(isolate);
}
void JSAsyncGeneratorObject::JSAsyncGeneratorObjectVerify(Isolate* isolate) {
- // Check inherited fields
- JSGeneratorObjectVerify(isolate);
- VerifyObjectField(isolate, kQueueOffset);
- queue()->HeapObjectVerify(isolate);
+ TorqueGeneratedClassVerifiers::JSAsyncGeneratorObjectVerify(*this, isolate);
+ queue().HeapObjectVerify(isolate);
}
-void JSValue::JSValueVerify(Isolate* isolate) {
- Object v = value();
- if (v->IsHeapObject()) {
- VerifyHeapPointer(isolate, v);
- }
-}
+USE_TORQUE_VERIFIER(JSValue)
void JSDate::JSDateVerify(Isolate* isolate) {
- if (value()->IsHeapObject()) {
- VerifyHeapPointer(isolate, value());
- }
- CHECK(value()->IsUndefined(isolate) || value()->IsSmi() ||
- value()->IsHeapNumber());
- CHECK(year()->IsUndefined(isolate) || year()->IsSmi() || year()->IsNaN());
- CHECK(month()->IsUndefined(isolate) || month()->IsSmi() || month()->IsNaN());
- CHECK(day()->IsUndefined(isolate) || day()->IsSmi() || day()->IsNaN());
- CHECK(weekday()->IsUndefined(isolate) || weekday()->IsSmi() ||
- weekday()->IsNaN());
- CHECK(hour()->IsUndefined(isolate) || hour()->IsSmi() || hour()->IsNaN());
- CHECK(min()->IsUndefined(isolate) || min()->IsSmi() || min()->IsNaN());
- CHECK(sec()->IsUndefined(isolate) || sec()->IsSmi() || sec()->IsNaN());
- CHECK(cache_stamp()->IsUndefined(isolate) || cache_stamp()->IsSmi() ||
- cache_stamp()->IsNaN());
-
- if (month()->IsSmi()) {
+ TorqueGeneratedClassVerifiers::JSDateVerify(*this, isolate);
+
+ if (month().IsSmi()) {
int month = Smi::ToInt(this->month());
CHECK(0 <= month && month <= 11);
}
- if (day()->IsSmi()) {
+ if (day().IsSmi()) {
int day = Smi::ToInt(this->day());
CHECK(1 <= day && day <= 31);
}
- if (hour()->IsSmi()) {
+ if (hour().IsSmi()) {
int hour = Smi::ToInt(this->hour());
CHECK(0 <= hour && hour <= 23);
}
- if (min()->IsSmi()) {
+ if (min().IsSmi()) {
int min = Smi::ToInt(this->min());
CHECK(0 <= min && min <= 59);
}
- if (sec()->IsSmi()) {
+ if (sec().IsSmi()) {
int sec = Smi::ToInt(this->sec());
CHECK(0 <= sec && sec <= 59);
}
- if (weekday()->IsSmi()) {
+ if (weekday().IsSmi()) {
int weekday = Smi::ToInt(this->weekday());
CHECK(0 <= weekday && weekday <= 6);
}
- if (cache_stamp()->IsSmi()) {
+ if (cache_stamp().IsSmi()) {
CHECK(Smi::ToInt(cache_stamp()) <=
Smi::ToInt(isolate->date_cache()->stamp()));
}
}
void JSMessageObject::JSMessageObjectVerify(Isolate* isolate) {
- CHECK(IsJSMessageObject());
- VerifyObjectField(isolate, kStartPositionOffset);
- VerifyObjectField(isolate, kEndPositionOffset);
- VerifyObjectField(isolate, kArgumentsOffset);
- VerifyObjectField(isolate, kScriptOffset);
- VerifyObjectField(isolate, kStackFramesOffset);
+ TorqueGeneratedClassVerifiers::JSMessageObjectVerify(*this, isolate);
VerifySmiField(kMessageTypeOffset);
VerifySmiField(kStartPositionOffset);
VerifySmiField(kEndPositionOffset);
@@ -1015,67 +949,54 @@ void JSMessageObject::JSMessageObjectVerify(Isolate* isolate) {
}
void String::StringVerify(Isolate* isolate) {
- CHECK(IsString());
+ TorqueGeneratedClassVerifiers::StringVerify(*this, isolate);
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
CHECK_IMPLIES(length() == 0, *this == ReadOnlyRoots(isolate).empty_string());
if (IsInternalizedString()) {
CHECK(!ObjectInYoungGeneration(*this));
}
- if (IsConsString()) {
- ConsString::cast(*this)->ConsStringVerify(isolate);
- } else if (IsSlicedString()) {
- SlicedString::cast(*this)->SlicedStringVerify(isolate);
- } else if (IsThinString()) {
- ThinString::cast(*this)->ThinStringVerify(isolate);
- }
}
void ConsString::ConsStringVerify(Isolate* isolate) {
- CHECK(this->first()->IsString());
- CHECK(this->second() == ReadOnlyRoots(isolate).empty_string() ||
- this->second()->IsString());
+ TorqueGeneratedClassVerifiers::ConsStringVerify(*this, isolate);
CHECK_GE(this->length(), ConsString::kMinLength);
- CHECK(this->length() == this->first()->length() + this->second()->length());
+ CHECK(this->length() == this->first().length() + this->second().length());
if (this->IsFlat()) {
// A flat cons can only be created by String::SlowFlatten.
// Afterwards, the first part may be externalized or internalized.
- CHECK(this->first()->IsSeqString() || this->first()->IsExternalString() ||
- this->first()->IsThinString());
+ CHECK(this->first().IsSeqString() || this->first().IsExternalString() ||
+ this->first().IsThinString());
}
}
void ThinString::ThinStringVerify(Isolate* isolate) {
- CHECK(this->actual()->IsInternalizedString());
- CHECK(this->actual()->IsSeqString() || this->actual()->IsExternalString());
+ TorqueGeneratedClassVerifiers::ThinStringVerify(*this, isolate);
+ CHECK(this->actual().IsInternalizedString());
+ CHECK(this->actual().IsSeqString() || this->actual().IsExternalString());
}
void SlicedString::SlicedStringVerify(Isolate* isolate) {
- CHECK(!this->parent()->IsConsString());
- CHECK(!this->parent()->IsSlicedString());
+ TorqueGeneratedClassVerifiers::SlicedStringVerify(*this, isolate);
+ CHECK(!this->parent().IsConsString());
+ CHECK(!this->parent().IsSlicedString());
CHECK_GE(this->length(), SlicedString::kMinLength);
}
void JSBoundFunction::JSBoundFunctionVerify(Isolate* isolate) {
- CHECK(IsJSBoundFunction());
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kBoundThisOffset);
- VerifyObjectField(isolate, kBoundTargetFunctionOffset);
- VerifyObjectField(isolate, kBoundArgumentsOffset);
+ TorqueGeneratedClassVerifiers::JSBoundFunctionVerify(*this, isolate);
CHECK(IsCallable());
- if (!raw_bound_target_function()->IsUndefined(isolate)) {
- CHECK(bound_target_function()->IsCallable());
- CHECK_EQ(IsConstructor(), bound_target_function()->IsConstructor());
+ if (!raw_bound_target_function().IsUndefined(isolate)) {
+ CHECK(bound_target_function().IsCallable());
+ CHECK_EQ(IsConstructor(), bound_target_function().IsConstructor());
}
}
void JSFunction::JSFunctionVerify(Isolate* isolate) {
- CHECK(IsJSFunction());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, raw_feedback_cell());
- CHECK(raw_feedback_cell()->IsFeedbackCell());
- CHECK(code()->IsCode());
- CHECK(map()->is_callable());
+ TorqueGeneratedClassVerifiers::JSFunctionVerify(*this, isolate);
+ CHECK(raw_feedback_cell().IsFeedbackCell());
+ CHECK(code().IsCode());
+ CHECK(map().is_callable());
Handle<JSFunction> function(*this, isolate);
LookupIterator it(isolate, function, isolate->factory()->prototype_string(),
LookupIterator::OWN_SKIP_INTERCEPTOR);
@@ -1094,45 +1015,39 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
}
void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
- CHECK(IsSharedFunctionInfo());
-
- VerifyObjectField(isolate, kFunctionDataOffset);
- VerifyObjectField(isolate, kOuterScopeInfoOrFeedbackMetadataOffset);
- VerifyObjectField(isolate, kScriptOrDebugInfoOffset);
- VerifyObjectField(isolate, kNameOrScopeInfoOffset);
+ TorqueGeneratedClassVerifiers::SharedFunctionInfoVerify(*this, isolate);
Object value = name_or_scope_info();
- CHECK(value == kNoSharedNameSentinel || value->IsString() ||
- value->IsScopeInfo());
- if (value->IsScopeInfo()) {
- CHECK_LT(0, ScopeInfo::cast(value)->length());
+ if (value.IsScopeInfo()) {
+ CHECK_LT(0, ScopeInfo::cast(value).length());
CHECK_NE(value, ReadOnlyRoots(isolate).empty_scope_info());
}
CHECK(HasWasmExportedFunctionData() || IsApiFunction() ||
HasBytecodeArray() || HasAsmWasmData() || HasBuiltinId() ||
HasUncompiledDataWithPreparseData() ||
- HasUncompiledDataWithoutPreparseData());
+ HasUncompiledDataWithoutPreparseData() || HasWasmJSFunctionData() ||
+ HasWasmCapiFunctionData());
- CHECK(script_or_debug_info()->IsUndefined(isolate) ||
- script_or_debug_info()->IsScript() || HasDebugInfo());
+ CHECK(script_or_debug_info().IsUndefined(isolate) ||
+ script_or_debug_info().IsScript() || HasDebugInfo());
if (!is_compiled()) {
CHECK(!HasFeedbackMetadata());
- CHECK(outer_scope_info()->IsScopeInfo() ||
- outer_scope_info()->IsTheHole(isolate));
+ CHECK(outer_scope_info().IsScopeInfo() ||
+ outer_scope_info().IsTheHole(isolate));
} else if (HasBytecodeArray() && HasFeedbackMetadata()) {
- CHECK(feedback_metadata()->IsFeedbackMetadata());
+ CHECK(feedback_metadata().IsFeedbackMetadata());
}
int expected_map_index = Context::FunctionMapIndex(
language_mode(), kind(), HasSharedName(), needs_home_object());
CHECK_EQ(expected_map_index, function_map_index());
- if (scope_info()->length() > 0) {
+ if (scope_info().length() > 0) {
ScopeInfo info = scope_info();
- CHECK(kind() == info->function_kind());
- CHECK_EQ(kind() == kModule, info->scope_type() == MODULE_SCOPE);
+ CHECK(kind() == info.function_kind());
+ CHECK_EQ(kind() == kModule, info.scope_type() == MODULE_SCOPE);
}
if (IsApiFunction()) {
@@ -1155,34 +1070,30 @@ void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
}
void JSGlobalProxy::JSGlobalProxyVerify(Isolate* isolate) {
- CHECK(IsJSGlobalProxy());
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, JSGlobalProxy::kNativeContextOffset);
- CHECK(map()->is_access_check_needed());
+ TorqueGeneratedClassVerifiers::JSGlobalProxyVerify(*this, isolate);
+ CHECK(map().is_access_check_needed());
// Make sure that this object has no properties, elements.
- CHECK_EQ(0, FixedArray::cast(elements())->length());
+ CHECK_EQ(0, FixedArray::cast(elements()).length());
}
void JSGlobalObject::JSGlobalObjectVerify(Isolate* isolate) {
CHECK(IsJSGlobalObject());
// Do not check the dummy global object for the builtins.
- if (global_dictionary()->NumberOfElements() == 0 &&
- elements()->length() == 0) {
+ if (global_dictionary().NumberOfElements() == 0 && elements().length() == 0) {
return;
}
JSObjectVerify(isolate);
}
void Oddball::OddballVerify(Isolate* isolate) {
- CHECK(IsOddball());
+ TorqueGeneratedOddball::OddballVerify(isolate);
Heap* heap = isolate->heap();
- VerifyHeapPointer(isolate, to_string());
Object number = to_number();
- if (number->IsHeapObject()) {
+ if (number.IsHeapObject()) {
CHECK(number == ReadOnlyRoots(heap).nan_value() ||
number == ReadOnlyRoots(heap).hole_nan_value());
} else {
- CHECK(number->IsSmi());
+ CHECK(number.IsSmi());
int value = Smi::ToInt(number);
// Hidden oddballs have negative smis.
const int kLeastHiddenOddballNumber = -7;
@@ -1217,29 +1128,16 @@ void Oddball::OddballVerify(Isolate* isolate) {
} else {
UNREACHABLE();
}
- CHECK(to_string()->IsString());
- CHECK(type_of()->IsString());
}
-void Cell::CellVerify(Isolate* isolate) {
- CHECK(IsCell());
- VerifyObjectField(isolate, kValueOffset);
-}
+USE_TORQUE_VERIFIER(Cell)
-void PropertyCell::PropertyCellVerify(Isolate* isolate) {
- CHECK(IsPropertyCell());
- VerifyObjectField(isolate, kNameOffset);
- CHECK(name()->IsName());
- VerifySmiField(kPropertyDetailsRawOffset);
- VerifyObjectField(isolate, kValueOffset);
- VerifyObjectField(isolate, kDependentCodeOffset);
- CHECK(dependent_code()->IsDependentCode());
-}
+USE_TORQUE_VERIFIER(PropertyCell)
void CodeDataContainer::CodeDataContainerVerify(Isolate* isolate) {
CHECK(IsCodeDataContainer());
VerifyObjectField(isolate, kNextCodeLinkOffset);
- CHECK(next_code_link()->IsCode() || next_code_link()->IsUndefined(isolate));
+ CHECK(next_code_link().IsCode() || next_code_link().IsUndefined(isolate));
}
void Code::CodeVerify(Isolate* isolate) {
@@ -1251,7 +1149,7 @@ void Code::CodeVerify(Isolate* isolate) {
CHECK_LE(constant_pool_offset(), code_comments_offset());
CHECK_LE(code_comments_offset(), InstructionSize());
CHECK(IsAligned(raw_instruction_start(), kCodeAlignment));
- relocation_info()->ObjectVerify(isolate);
+ relocation_info().ObjectVerify(isolate);
CHECK(Code::SizeFor(body_size()) <= kMaxRegularHeapObjectSize ||
isolate->heap()->InSpace(*this, CODE_LO_SPACE));
Address last_gc_pc = kNullAddress;
@@ -1267,42 +1165,41 @@ void Code::CodeVerify(Isolate* isolate) {
}
void JSArray::JSArrayVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- CHECK(length()->IsNumber() || length()->IsUndefined(isolate));
+ TorqueGeneratedClassVerifiers::JSArrayVerify(*this, isolate);
// If a GC was caused while constructing this array, the elements
// pointer may point to a one pointer filler map.
if (!ElementsAreSafeToExamine()) return;
- if (elements()->IsUndefined(isolate)) return;
- CHECK(elements()->IsFixedArray() || elements()->IsFixedDoubleArray());
- if (elements()->length() == 0) {
+ if (elements().IsUndefined(isolate)) return;
+ CHECK(elements().IsFixedArray() || elements().IsFixedDoubleArray());
+ if (elements().length() == 0) {
CHECK_EQ(elements(), ReadOnlyRoots(isolate).empty_fixed_array());
}
- if (!length()->IsNumber()) return;
+ if (!length().IsNumber()) return;
// Verify that the length and the elements backing store are in sync.
- if (length()->IsSmi() && (HasFastElements() || HasFrozenOrSealedElements())) {
- if (elements()->length() > 0) {
- CHECK_IMPLIES(HasDoubleElements(), elements()->IsFixedDoubleArray());
+ if (length().IsSmi() && (HasFastElements() || HasFrozenOrSealedElements())) {
+ if (elements().length() > 0) {
+ CHECK_IMPLIES(HasDoubleElements(), elements().IsFixedDoubleArray());
CHECK_IMPLIES(HasSmiOrObjectElements() || HasFrozenOrSealedElements(),
- elements()->IsFixedArray());
+ elements().IsFixedArray());
}
int size = Smi::ToInt(length());
// Holey / Packed backing stores might have slack or might have not been
// properly initialized yet.
- CHECK(size <= elements()->length() ||
+ CHECK(size <= elements().length() ||
elements() == ReadOnlyRoots(isolate).empty_fixed_array());
} else {
CHECK(HasDictionaryElements());
uint32_t array_length;
- CHECK(length()->ToArrayLength(&array_length));
+ CHECK(length().ToArrayLength(&array_length));
if (array_length == 0xFFFFFFFF) {
- CHECK(length()->ToArrayLength(&array_length));
+ CHECK(length().ToArrayLength(&array_length));
}
if (array_length != 0) {
NumberDictionary dict = NumberDictionary::cast(elements());
// The dictionary can never have more elements than the array length + 1.
// If the backing store grows the verification might be triggered with
// the old length in place.
- uint32_t nof_elements = static_cast<uint32_t>(dict->NumberOfElements());
+ uint32_t nof_elements = static_cast<uint32_t>(dict.NumberOfElements());
if (nof_elements != 0) nof_elements--;
CHECK_LE(nof_elements, array_length);
}
@@ -1310,18 +1207,16 @@ void JSArray::JSArrayVerify(Isolate* isolate) {
}
void JSSet::JSSetVerify(Isolate* isolate) {
- CHECK(IsJSSet());
- JSObjectVerify(isolate);
+ TorqueGeneratedClassVerifiers::JSSetVerify(*this, isolate);
VerifyHeapPointer(isolate, table());
- CHECK(table()->IsOrderedHashSet() || table()->IsUndefined(isolate));
+ CHECK(table().IsOrderedHashSet() || table().IsUndefined(isolate));
// TODO(arv): Verify OrderedHashTable too.
}
void JSMap::JSMapVerify(Isolate* isolate) {
- CHECK(IsJSMap());
- JSObjectVerify(isolate);
+ TorqueGeneratedClassVerifiers::JSMapVerify(*this, isolate);
VerifyHeapPointer(isolate, table());
- CHECK(table()->IsOrderedHashMap() || table()->IsUndefined(isolate));
+ CHECK(table().IsOrderedHashMap() || table().IsUndefined(isolate));
// TODO(arv): Verify OrderedHashTable too.
}
@@ -1329,69 +1224,69 @@ void JSSetIterator::JSSetIteratorVerify(Isolate* isolate) {
CHECK(IsJSSetIterator());
JSObjectVerify(isolate);
VerifyHeapPointer(isolate, table());
- CHECK(table()->IsOrderedHashSet());
- CHECK(index()->IsSmi());
+ CHECK(table().IsOrderedHashSet());
+ CHECK(index().IsSmi());
}
void JSMapIterator::JSMapIteratorVerify(Isolate* isolate) {
CHECK(IsJSMapIterator());
JSObjectVerify(isolate);
VerifyHeapPointer(isolate, table());
- CHECK(table()->IsOrderedHashMap());
- CHECK(index()->IsSmi());
+ CHECK(table().IsOrderedHashMap());
+ CHECK(index().IsSmi());
}
void WeakCell::WeakCellVerify(Isolate* isolate) {
CHECK(IsWeakCell());
- CHECK(target()->IsJSReceiver() || target()->IsUndefined(isolate));
+ CHECK(target().IsJSReceiver() || target().IsUndefined(isolate));
- CHECK(prev()->IsWeakCell() || prev()->IsUndefined(isolate));
- if (prev()->IsWeakCell()) {
- CHECK_EQ(WeakCell::cast(prev())->next(), *this);
+ CHECK(prev().IsWeakCell() || prev().IsUndefined(isolate));
+ if (prev().IsWeakCell()) {
+ CHECK_EQ(WeakCell::cast(prev()).next(), *this);
}
- CHECK(next()->IsWeakCell() || next()->IsUndefined(isolate));
- if (next()->IsWeakCell()) {
- CHECK_EQ(WeakCell::cast(next())->prev(), *this);
+ CHECK(next().IsWeakCell() || next().IsUndefined(isolate));
+ if (next().IsWeakCell()) {
+ CHECK_EQ(WeakCell::cast(next()).prev(), *this);
}
- CHECK_IMPLIES(key()->IsUndefined(isolate),
- key_list_prev()->IsUndefined(isolate));
- CHECK_IMPLIES(key()->IsUndefined(isolate),
- key_list_next()->IsUndefined(isolate));
+ CHECK_IMPLIES(key().IsUndefined(isolate),
+ key_list_prev().IsUndefined(isolate));
+ CHECK_IMPLIES(key().IsUndefined(isolate),
+ key_list_next().IsUndefined(isolate));
- CHECK(key_list_prev()->IsWeakCell() || key_list_prev()->IsUndefined(isolate));
- if (key_list_prev()->IsWeakCell()) {
- CHECK_EQ(WeakCell::cast(key_list_prev())->key_list_next(), *this);
+ CHECK(key_list_prev().IsWeakCell() || key_list_prev().IsUndefined(isolate));
+ if (key_list_prev().IsWeakCell()) {
+ CHECK_EQ(WeakCell::cast(key_list_prev()).key_list_next(), *this);
}
- CHECK(key_list_next()->IsWeakCell() || key_list_next()->IsUndefined(isolate));
- if (key_list_next()->IsWeakCell()) {
- CHECK_EQ(WeakCell::cast(key_list_next())->key_list_prev(), *this);
+ CHECK(key_list_next().IsWeakCell() || key_list_next().IsUndefined(isolate));
+ if (key_list_next().IsWeakCell()) {
+ CHECK_EQ(WeakCell::cast(key_list_next()).key_list_prev(), *this);
}
- CHECK(finalization_group()->IsUndefined(isolate) ||
- finalization_group()->IsJSFinalizationGroup());
+ CHECK(finalization_group().IsUndefined(isolate) ||
+ finalization_group().IsJSFinalizationGroup());
}
void JSWeakRef::JSWeakRefVerify(Isolate* isolate) {
CHECK(IsJSWeakRef());
JSObjectVerify(isolate);
- CHECK(target()->IsUndefined(isolate) || target()->IsJSReceiver());
+ CHECK(target().IsUndefined(isolate) || target().IsJSReceiver());
}
void JSFinalizationGroup::JSFinalizationGroupVerify(Isolate* isolate) {
CHECK(IsJSFinalizationGroup());
JSObjectVerify(isolate);
VerifyHeapPointer(isolate, cleanup());
- CHECK(active_cells()->IsUndefined(isolate) || active_cells()->IsWeakCell());
- if (active_cells()->IsWeakCell()) {
- CHECK(WeakCell::cast(active_cells())->prev()->IsUndefined(isolate));
+ CHECK(active_cells().IsUndefined(isolate) || active_cells().IsWeakCell());
+ if (active_cells().IsWeakCell()) {
+ CHECK(WeakCell::cast(active_cells()).prev().IsUndefined(isolate));
}
- CHECK(cleared_cells()->IsUndefined(isolate) || cleared_cells()->IsWeakCell());
- if (cleared_cells()->IsWeakCell()) {
- CHECK(WeakCell::cast(cleared_cells())->prev()->IsUndefined(isolate));
+ CHECK(cleared_cells().IsUndefined(isolate) || cleared_cells().IsWeakCell());
+ if (cleared_cells().IsWeakCell()) {
+ CHECK(WeakCell::cast(cleared_cells()).prev().IsUndefined(isolate));
}
}
@@ -1405,148 +1300,78 @@ void JSFinalizationGroupCleanupIterator::
void FinalizationGroupCleanupJobTask::FinalizationGroupCleanupJobTaskVerify(
Isolate* isolate) {
CHECK(IsFinalizationGroupCleanupJobTask());
- CHECK(finalization_group()->IsJSFinalizationGroup());
+ CHECK(finalization_group().IsJSFinalizationGroup());
}
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
- CHECK(IsJSWeakMap());
- JSObjectVerify(isolate);
+ TorqueGeneratedClassVerifiers::JSWeakMapVerify(*this, isolate);
VerifyHeapPointer(isolate, table());
- CHECK(table()->IsEphemeronHashTable() || table()->IsUndefined(isolate));
+ CHECK(table().IsEphemeronHashTable() || table().IsUndefined(isolate));
}
void JSArrayIterator::JSArrayIteratorVerify(Isolate* isolate) {
- CHECK(IsJSArrayIterator());
- JSObjectVerify(isolate);
- CHECK(iterated_object()->IsJSReceiver());
+ TorqueGeneratedClassVerifiers::JSArrayIteratorVerify(*this, isolate);
+ CHECK(iterated_object().IsJSReceiver());
- CHECK_GE(next_index()->Number(), 0);
- CHECK_LE(next_index()->Number(), kMaxSafeInteger);
+ CHECK_GE(next_index().Number(), 0);
+ CHECK_LE(next_index().Number(), kMaxSafeInteger);
- if (iterated_object()->IsJSTypedArray()) {
+ if (iterated_object().IsJSTypedArray()) {
// JSTypedArray::length is limited to Smi range.
- CHECK(next_index()->IsSmi());
- CHECK_LE(next_index()->Number(), Smi::kMaxValue);
- } else if (iterated_object()->IsJSArray()) {
+ CHECK(next_index().IsSmi());
+ CHECK_LE(next_index().Number(), Smi::kMaxValue);
+ } else if (iterated_object().IsJSArray()) {
// JSArray::length is limited to Uint32 range.
- CHECK_LE(next_index()->Number(), kMaxUInt32);
+ CHECK_LE(next_index().Number(), kMaxUInt32);
}
}
void JSStringIterator::JSStringIteratorVerify(Isolate* isolate) {
- CHECK(IsJSStringIterator());
- JSObjectVerify(isolate);
- CHECK(string()->IsString());
+ TorqueGeneratedClassVerifiers::JSStringIteratorVerify(*this, isolate);
+ CHECK(string().IsString());
CHECK_GE(index(), 0);
CHECK_LE(index(), String::kMaxLength);
}
-void JSAsyncFromSyncIterator::JSAsyncFromSyncIteratorVerify(Isolate* isolate) {
- CHECK(IsJSAsyncFromSyncIterator());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, sync_iterator());
-}
+USE_TORQUE_VERIFIER(JSAsyncFromSyncIterator)
void JSWeakSet::JSWeakSetVerify(Isolate* isolate) {
- CHECK(IsJSWeakSet());
- JSObjectVerify(isolate);
+ TorqueGeneratedClassVerifiers::JSWeakSetVerify(*this, isolate);
VerifyHeapPointer(isolate, table());
- CHECK(table()->IsEphemeronHashTable() || table()->IsUndefined(isolate));
+ CHECK(table().IsEphemeronHashTable() || table().IsUndefined(isolate));
}
-void Microtask::MicrotaskVerify(Isolate* isolate) { CHECK(IsMicrotask()); }
+USE_TORQUE_VERIFIER(Microtask)
void CallableTask::CallableTaskVerify(Isolate* isolate) {
- CHECK(IsCallableTask());
- MicrotaskVerify(isolate);
- VerifyHeapPointer(isolate, callable());
- CHECK(callable()->IsCallable());
- VerifyHeapPointer(isolate, context());
- CHECK(context()->IsContext());
+ TorqueGeneratedClassVerifiers::CallableTaskVerify(*this, isolate);
+ CHECK(callable().IsCallable());
}
-void CallbackTask::CallbackTaskVerify(Isolate* isolate) {
- CHECK(IsCallbackTask());
- MicrotaskVerify(isolate);
- VerifyHeapPointer(isolate, callback());
- VerifyHeapPointer(isolate, data());
-}
+USE_TORQUE_VERIFIER(CallbackTask)
void PromiseReactionJobTask::PromiseReactionJobTaskVerify(Isolate* isolate) {
- CHECK(IsPromiseReactionJobTask());
- MicrotaskVerify(isolate);
- VerifyPointer(isolate, argument());
- VerifyHeapPointer(isolate, context());
- CHECK(context()->IsContext());
+ TorqueGeneratedClassVerifiers::PromiseReactionJobTaskVerify(*this, isolate);
VerifyHeapPointer(isolate, handler());
- CHECK(handler()->IsUndefined(isolate) || handler()->IsCallable());
- VerifyHeapPointer(isolate, promise_or_capability());
- CHECK(promise_or_capability()->IsJSPromise() ||
- promise_or_capability()->IsPromiseCapability() ||
- promise_or_capability()->IsUndefined(isolate));
+ CHECK(handler().IsUndefined(isolate) || handler().IsCallable());
}
-void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskVerify(
- Isolate* isolate) {
- CHECK(IsPromiseFulfillReactionJobTask());
- PromiseReactionJobTaskVerify(isolate);
-}
+USE_TORQUE_VERIFIER(PromiseFulfillReactionJobTask)
-void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskVerify(
- Isolate* isolate) {
- CHECK(IsPromiseRejectReactionJobTask());
- PromiseReactionJobTaskVerify(isolate);
-}
+USE_TORQUE_VERIFIER(PromiseRejectReactionJobTask)
-void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskVerify(
- Isolate* isolate) {
- CHECK(IsPromiseResolveThenableJobTask());
- MicrotaskVerify(isolate);
- VerifyHeapPointer(isolate, context());
- CHECK(context()->IsContext());
- VerifyHeapPointer(isolate, promise_to_resolve());
- CHECK(promise_to_resolve()->IsJSPromise());
- VerifyHeapPointer(isolate, then());
- CHECK(then()->IsCallable());
- CHECK(then()->IsJSReceiver());
- VerifyHeapPointer(isolate, thenable());
- CHECK(thenable()->IsJSReceiver());
-}
-
-void PromiseCapability::PromiseCapabilityVerify(Isolate* isolate) {
- CHECK(IsPromiseCapability());
-
- VerifyHeapPointer(isolate, promise());
- CHECK(promise()->IsJSReceiver() || promise()->IsUndefined(isolate));
- VerifyPointer(isolate, resolve());
- VerifyPointer(isolate, reject());
-}
-
-void PromiseReaction::PromiseReactionVerify(Isolate* isolate) {
- CHECK(IsPromiseReaction());
-
- VerifyPointer(isolate, next());
- CHECK(next()->IsSmi() || next()->IsPromiseReaction());
- VerifyHeapPointer(isolate, reject_handler());
- CHECK(reject_handler()->IsUndefined(isolate) ||
- reject_handler()->IsCallable());
- VerifyHeapPointer(isolate, fulfill_handler());
- CHECK(fulfill_handler()->IsUndefined(isolate) ||
- fulfill_handler()->IsCallable());
- VerifyHeapPointer(isolate, promise_or_capability());
- CHECK(promise_or_capability()->IsJSPromise() ||
- promise_or_capability()->IsPromiseCapability() ||
- promise_or_capability()->IsUndefined(isolate));
-}
+USE_TORQUE_VERIFIER(PromiseResolveThenableJobTask)
+
+USE_TORQUE_VERIFIER(PromiseCapability)
+
+USE_TORQUE_VERIFIER(PromiseReaction)
void JSPromise::JSPromiseVerify(Isolate* isolate) {
- CHECK(IsJSPromise());
- JSObjectVerify(isolate);
- VerifyPointer(isolate, reactions_or_result());
+ TorqueGeneratedClassVerifiers::JSPromiseVerify(*this, isolate);
VerifySmiField(kFlagsOffset);
if (status() == Promise::kPending) {
- CHECK(reactions()->IsSmi() || reactions()->IsPromiseReaction());
+ CHECK(reactions().IsSmi() || reactions().IsPromiseReaction());
}
}
@@ -1584,7 +1409,7 @@ void SmallOrderedHashTable<Derived>::SmallOrderedHashTableVerify(
entry < Capacity(); entry++) {
for (int offset = 0; offset < Derived::kEntrySize; offset++) {
Object val = GetDataEntry(entry, offset);
- CHECK(val->IsTheHole(isolate));
+ CHECK(val.IsTheHole(isolate));
}
}
}
@@ -1595,7 +1420,7 @@ void SmallOrderedHashMap::SmallOrderedHashMapVerify(Isolate* isolate) {
entry++) {
for (int offset = 0; offset < kEntrySize; offset++) {
Object val = GetDataEntry(entry, offset);
- CHECK(val->IsTheHole(isolate));
+ CHECK(val.IsTheHole(isolate));
}
}
}
@@ -1607,7 +1432,7 @@ void SmallOrderedHashSet::SmallOrderedHashSetVerify(Isolate* isolate) {
entry++) {
for (int offset = 0; offset < kEntrySize; offset++) {
Object val = GetDataEntry(entry, offset);
- CHECK(val->IsTheHole(isolate));
+ CHECK(val.IsTheHole(isolate));
}
}
}
@@ -1620,116 +1445,101 @@ void SmallOrderedNameDictionary::SmallOrderedNameDictionaryVerify(
entry++) {
for (int offset = 0; offset < kEntrySize; offset++) {
Object val = GetDataEntry(entry, offset);
- CHECK(val->IsTheHole(isolate) ||
+ CHECK(val.IsTheHole(isolate) ||
(PropertyDetails::Empty().AsSmi() == Smi::cast(val)));
}
}
}
void JSRegExp::JSRegExpVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- CHECK(data()->IsUndefined(isolate) || data()->IsFixedArray());
- CHECK(source()->IsUndefined(isolate) || source()->IsString());
- CHECK(flags()->IsUndefined() || flags()->IsSmi());
+ TorqueGeneratedClassVerifiers::JSRegExpVerify(*this, isolate);
switch (TypeTag()) {
case JSRegExp::ATOM: {
FixedArray arr = FixedArray::cast(data());
- CHECK(arr->get(JSRegExp::kAtomPatternIndex)->IsString());
+ CHECK(arr.get(JSRegExp::kAtomPatternIndex).IsString());
break;
}
case JSRegExp::IRREGEXP: {
bool is_native = RegExpImpl::UsesNativeRegExp();
FixedArray arr = FixedArray::cast(data());
- Object one_byte_data = arr->get(JSRegExp::kIrregexpLatin1CodeIndex);
+ Object one_byte_data = arr.get(JSRegExp::kIrregexpLatin1CodeIndex);
// Smi : Not compiled yet (-1).
// Code/ByteArray: Compiled code.
- CHECK(
- (one_byte_data->IsSmi() &&
- Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
- (is_native ? one_byte_data->IsCode() : one_byte_data->IsByteArray()));
- Object uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
- CHECK((uc16_data->IsSmi() &&
+ CHECK((one_byte_data.IsSmi() &&
+ Smi::ToInt(one_byte_data) == JSRegExp::kUninitializedValue) ||
+ (is_native ? one_byte_data.IsCode() : one_byte_data.IsByteArray()));
+ Object uc16_data = arr.get(JSRegExp::kIrregexpUC16CodeIndex);
+ CHECK((uc16_data.IsSmi() &&
Smi::ToInt(uc16_data) == JSRegExp::kUninitializedValue) ||
- (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
+ (is_native ? uc16_data.IsCode() : uc16_data.IsByteArray()));
- CHECK(arr->get(JSRegExp::kIrregexpCaptureCountIndex)->IsSmi());
- CHECK(arr->get(JSRegExp::kIrregexpMaxRegisterCountIndex)->IsSmi());
+ CHECK(arr.get(JSRegExp::kIrregexpCaptureCountIndex).IsSmi());
+ CHECK(arr.get(JSRegExp::kIrregexpMaxRegisterCountIndex).IsSmi());
break;
}
default:
CHECK_EQ(JSRegExp::NOT_COMPILED, TypeTag());
- CHECK(data()->IsUndefined(isolate));
+ CHECK(data().IsUndefined(isolate));
break;
}
}
void JSRegExpStringIterator::JSRegExpStringIteratorVerify(Isolate* isolate) {
- CHECK(IsJSRegExpStringIterator());
- JSObjectVerify(isolate);
- CHECK(iterating_string()->IsString());
- CHECK(iterating_regexp()->IsObject());
+ TorqueGeneratedClassVerifiers::JSRegExpStringIteratorVerify(*this, isolate);
+ CHECK(iterating_string().IsString());
VerifySmiField(kFlagsOffset);
}
void JSProxy::JSProxyVerify(Isolate* isolate) {
- CHECK(IsJSProxy());
- CHECK(map()->GetConstructor()->IsJSFunction());
- VerifyPointer(isolate, target());
- VerifyPointer(isolate, handler());
+ TorqueGeneratedClassVerifiers::JSProxyVerify(*this, isolate);
+ CHECK(map().GetConstructor().IsJSFunction());
if (!IsRevoked()) {
- CHECK_EQ(target()->IsCallable(), map()->is_callable());
- CHECK_EQ(target()->IsConstructor(), map()->is_constructor());
+ CHECK_EQ(target().IsCallable(), map().is_callable());
+ CHECK_EQ(target().IsConstructor(), map().is_constructor());
}
- CHECK(map()->prototype()->IsNull(isolate));
+ CHECK(map().prototype().IsNull(isolate));
// There should be no properties on a Proxy.
- CHECK_EQ(0, map()->NumberOfOwnDescriptors());
+ CHECK_EQ(0, map().NumberOfOwnDescriptors());
}
void JSArrayBuffer::JSArrayBufferVerify(Isolate* isolate) {
- CHECK(IsJSArrayBuffer());
+ TorqueGeneratedClassVerifiers::JSArrayBufferVerify(*this, isolate);
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
CHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
CHECK_EQ(0,
*reinterpret_cast<uint32_t*>(address() + kOptionalPaddingOffset));
}
- JSObjectVerify(isolate);
}
void JSArrayBufferView::JSArrayBufferViewVerify(Isolate* isolate) {
- CHECK(IsJSArrayBufferView());
- JSObjectVerify(isolate);
- VerifyPointer(isolate, buffer());
- CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined(isolate) ||
- buffer() == Smi::kZero);
+ TorqueGeneratedClassVerifiers::JSArrayBufferViewVerify(*this, isolate);
CHECK_LE(byte_length(), JSArrayBuffer::kMaxByteLength);
CHECK_LE(byte_offset(), JSArrayBuffer::kMaxByteLength);
}
void JSTypedArray::JSTypedArrayVerify(Isolate* isolate) {
- CHECK(IsJSTypedArray());
- JSArrayBufferViewVerify(isolate);
- VerifyPointer(isolate, raw_length());
- CHECK(raw_length()->IsSmi() || raw_length()->IsUndefined(isolate));
- VerifyPointer(isolate, elements());
+ TorqueGeneratedClassVerifiers::JSTypedArrayVerify(*this, isolate);
+ CHECK_LE(length(), JSTypedArray::kMaxLength);
}
void JSDataView::JSDataViewVerify(Isolate* isolate) {
- CHECK(IsJSDataView());
- JSArrayBufferViewVerify(isolate);
+ TorqueGeneratedClassVerifiers::JSDataViewVerify(*this, isolate);
+ if (!WasDetached()) {
+ CHECK_EQ(reinterpret_cast<uint8_t*>(
+ JSArrayBuffer::cast(buffer()).backing_store()) +
+ byte_offset(),
+ data_pointer());
+ }
}
-void Foreign::ForeignVerify(Isolate* isolate) { CHECK(IsForeign()); }
+USE_TORQUE_VERIFIER(Foreign)
void AsyncGeneratorRequest::AsyncGeneratorRequestVerify(Isolate* isolate) {
- CHECK(IsAsyncGeneratorRequest());
- VerifySmiField(kResumeModeOffset);
+ TorqueGeneratedClassVerifiers::AsyncGeneratorRequestVerify(*this, isolate);
CHECK_GE(resume_mode(), JSGeneratorObject::kNext);
CHECK_LE(resume_mode(), JSGeneratorObject::kThrow);
- CHECK(promise()->IsJSPromise());
- VerifyPointer(isolate, value());
- VerifyPointer(isolate, next());
- next()->ObjectVerify(isolate);
+ next().ObjectVerify(isolate);
}
void BigInt::BigIntVerify(Isolate* isolate) {
@@ -1744,20 +1554,10 @@ void JSModuleNamespace::JSModuleNamespaceVerify(Isolate* isolate) {
}
void ModuleInfoEntry::ModuleInfoEntryVerify(Isolate* isolate) {
- CHECK(IsModuleInfoEntry());
-
- CHECK(export_name()->IsUndefined(isolate) || export_name()->IsString());
- CHECK(local_name()->IsUndefined(isolate) || local_name()->IsString());
- CHECK(import_name()->IsUndefined(isolate) || import_name()->IsString());
-
- VerifySmiField(kModuleRequestOffset);
- VerifySmiField(kCellIndexOffset);
- VerifySmiField(kBegPosOffset);
- VerifySmiField(kEndPosOffset);
-
- CHECK_IMPLIES(import_name()->IsString(), module_request() >= 0);
- CHECK_IMPLIES(export_name()->IsString() && import_name()->IsString(),
- local_name()->IsUndefined(isolate));
+ TorqueGeneratedClassVerifiers::ModuleInfoEntryVerify(*this, isolate);
+ CHECK_IMPLIES(import_name().IsString(), module_request() >= 0);
+ CHECK_IMPLIES(export_name().IsString() && import_name().IsString(),
+ local_name().IsUndefined(isolate));
}
void Module::ModuleVerify(Isolate* isolate) {
@@ -1773,40 +1573,38 @@ void Module::ModuleVerify(Isolate* isolate) {
VerifySmiField(kHashOffset);
VerifySmiField(kStatusOffset);
- CHECK((status() >= kEvaluating && code()->IsModuleInfo()) ||
- (status() == kInstantiated && code()->IsJSGeneratorObject()) ||
- (status() == kInstantiating && code()->IsJSFunction()) ||
- (code()->IsSharedFunctionInfo()));
+ CHECK((status() >= kEvaluating && code().IsModuleInfo()) ||
+ (status() == kInstantiated && code().IsJSGeneratorObject()) ||
+ (status() == kInstantiating && code().IsJSFunction()) ||
+ (code().IsSharedFunctionInfo()));
- CHECK_EQ(status() == kErrored, !exception()->IsTheHole(isolate));
+ CHECK_EQ(status() == kErrored, !exception().IsTheHole(isolate));
- CHECK(module_namespace()->IsUndefined(isolate) ||
- module_namespace()->IsJSModuleNamespace());
- if (module_namespace()->IsJSModuleNamespace()) {
+ CHECK(module_namespace().IsUndefined(isolate) ||
+ module_namespace().IsJSModuleNamespace());
+ if (module_namespace().IsJSModuleNamespace()) {
CHECK_LE(kInstantiating, status());
- CHECK_EQ(JSModuleNamespace::cast(module_namespace())->module(), *this);
+ CHECK_EQ(JSModuleNamespace::cast(module_namespace()).module(), *this);
}
- CHECK_EQ(requested_modules()->length(), info()->module_requests()->length());
+ CHECK_EQ(requested_modules().length(), info().module_requests().length());
- CHECK(import_meta()->IsTheHole(isolate) || import_meta()->IsJSObject());
+ CHECK(import_meta().IsTheHole(isolate) || import_meta().IsJSObject());
CHECK_NE(hash(), 0);
}
void PrototypeInfo::PrototypeInfoVerify(Isolate* isolate) {
- CHECK(IsPrototypeInfo());
- Object module_ns = module_namespace();
- CHECK(module_ns->IsJSModuleNamespace() || module_ns->IsUndefined(isolate));
- if (prototype_users()->IsWeakArrayList()) {
+ TorqueGeneratedClassVerifiers::PrototypeInfoVerify(*this, isolate);
+ if (prototype_users().IsWeakArrayList()) {
PrototypeUsers::Verify(WeakArrayList::cast(prototype_users()));
} else {
- CHECK(prototype_users()->IsSmi());
+ CHECK(prototype_users().IsSmi());
}
}
void PrototypeUsers::Verify(WeakArrayList array) {
- if (array->length() == 0) {
+ if (array.length() == 0) {
// Allow empty & uninitialized lists.
return;
}
@@ -1815,18 +1613,18 @@ void PrototypeUsers::Verify(WeakArrayList array) {
int empty_slots_count = 0;
while (empty_slot != kNoEmptySlotsMarker) {
CHECK_GT(empty_slot, 0);
- CHECK_LT(empty_slot, array->length());
- empty_slot = array->Get(empty_slot).ToSmi().value();
+ CHECK_LT(empty_slot, array.length());
+ empty_slot = array.Get(empty_slot).ToSmi().value();
++empty_slots_count;
}
// Verify that all elements are either weak pointers or SMIs marking empty
// slots.
int weak_maps_count = 0;
- for (int i = kFirstIndex; i < array->length(); ++i) {
+ for (int i = kFirstIndex; i < array.length(); ++i) {
HeapObject heap_object;
- MaybeObject object = array->Get(i);
- if ((object->GetHeapObjectIfWeak(&heap_object) && heap_object->IsMap()) ||
+ MaybeObject object = array.Get(i);
+ if ((object->GetHeapObjectIfWeak(&heap_object) && heap_object.IsMap()) ||
object->IsCleared()) {
++weak_maps_count;
} else {
@@ -1834,41 +1632,23 @@ void PrototypeUsers::Verify(WeakArrayList array) {
}
}
- CHECK_EQ(weak_maps_count + empty_slots_count + 1, array->length());
+ CHECK_EQ(weak_maps_count + empty_slots_count + 1, array.length());
}
-void Tuple2::Tuple2Verify(Isolate* isolate) {
- CHECK(IsTuple2());
- VerifyObjectField(isolate, kValue1Offset);
- VerifyObjectField(isolate, kValue2Offset);
-}
+USE_TORQUE_VERIFIER(TemplateObjectDescription)
void EnumCache::EnumCacheVerify(Isolate* isolate) {
- CHECK(IsEnumCache());
+ TorqueGeneratedClassVerifiers::EnumCacheVerify(*this, isolate);
Heap* heap = isolate->heap();
if (*this == ReadOnlyRoots(heap).empty_enum_cache()) {
CHECK_EQ(ReadOnlyRoots(heap).empty_fixed_array(), keys());
CHECK_EQ(ReadOnlyRoots(heap).empty_fixed_array(), indices());
- } else {
- VerifyObjectField(isolate, kKeysOffset);
- VerifyObjectField(isolate, kIndicesOffset);
- CHECK(keys()->IsFixedArray());
- CHECK(indices()->IsFixedArray());
}
}
-void Tuple3::Tuple3Verify(Isolate* isolate) {
- CHECK(IsTuple3());
- VerifyObjectField(isolate, kValue1Offset);
- VerifyObjectField(isolate, kValue2Offset);
- VerifyObjectField(isolate, kValue3Offset);
-}
+USE_TORQUE_VERIFIER(SourcePositionTableWithFrameCache)
-void ClassPositions::ClassPositionsVerify(Isolate* isolate) {
- CHECK(IsClassPositions());
- VerifySmiField(kStartOffset);
- VerifySmiField(kEndOffset);
-}
+USE_TORQUE_VERIFIER(ClassPositions)
void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
Isolate* isolate) {
@@ -1878,43 +1658,13 @@ void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionVerify(
this->FixedArrayVerify(isolate);
}
-void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionVerify(
- Isolate* isolate) {
- CHECK(IsArrayBoilerplateDescription());
- CHECK(constant_elements()->IsFixedArrayBase());
- VerifyObjectField(isolate, kConstantElementsOffset);
-}
-
-void AsmWasmData::AsmWasmDataVerify(Isolate* isolate) {
- CHECK(IsAsmWasmData());
- CHECK(managed_native_module()->IsForeign());
- VerifyObjectField(isolate, kManagedNativeModuleOffset);
- CHECK(export_wrappers()->IsFixedArray());
- VerifyObjectField(isolate, kExportWrappersOffset);
- CHECK(asm_js_offset_table()->IsByteArray());
- VerifyObjectField(isolate, kAsmJsOffsetTableOffset);
- CHECK(uses_bitset()->IsHeapNumber());
- VerifyObjectField(isolate, kUsesBitsetOffset);
-}
-
-void WasmDebugInfo::WasmDebugInfoVerify(Isolate* isolate) {
- CHECK(IsWasmDebugInfo());
- VerifyObjectField(isolate, kInstanceOffset);
- CHECK(wasm_instance()->IsWasmInstanceObject());
- VerifyObjectField(isolate, kInterpreterHandleOffset);
- CHECK(interpreter_handle()->IsUndefined(isolate) ||
- interpreter_handle()->IsForeign());
- VerifyObjectField(isolate, kInterpretedFunctionsOffset);
- CHECK(interpreted_functions()->IsFixedArray());
- VerifyObjectField(isolate, kLocalsNamesOffset);
- VerifyObjectField(isolate, kCWasmEntriesOffset);
- VerifyObjectField(isolate, kCWasmEntryMapOffset);
-}
-
-void WasmExceptionTag::WasmExceptionTagVerify(Isolate* isolate) {
- CHECK(IsWasmExceptionTag());
- VerifySmiField(kIndexOffset);
-}
+USE_TORQUE_VERIFIER(ArrayBoilerplateDescription)
+
+USE_TORQUE_VERIFIER(AsmWasmData)
+
+USE_TORQUE_VERIFIER(WasmDebugInfo)
+
+USE_TORQUE_VERIFIER(WasmExceptionTag)
void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
JSObjectVerify(isolate);
@@ -1923,7 +1673,7 @@ void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
// Just generically check all tagged fields. Don't check the untagged fields,
// as some of them might still contain the "undefined" value if the
// WasmInstanceObject is not fully set up yet.
- for (int offset = kHeaderSize; offset < kEndOfTaggedFieldsOffset;
+ for (int offset = kHeaderSize; offset < kEndOfStrongFieldsOffset;
offset += kTaggedSize) {
VerifyObjectField(isolate, offset);
}
@@ -1931,69 +1681,41 @@ void WasmInstanceObject::WasmInstanceObjectVerify(Isolate* isolate) {
void WasmExportedFunctionData::WasmExportedFunctionDataVerify(
Isolate* isolate) {
- CHECK(IsWasmExportedFunctionData());
- VerifyObjectField(isolate, kWrapperCodeOffset);
- CHECK(wrapper_code()->kind() == Code::JS_TO_WASM_FUNCTION ||
- wrapper_code()->kind() == Code::C_WASM_ENTRY);
- VerifyObjectField(isolate, kInstanceOffset);
- CHECK(instance()->IsWasmInstanceObject());
- VerifySmiField(kJumpTableOffsetOffset);
- VerifySmiField(kFunctionIndexOffset);
+ TorqueGeneratedClassVerifiers::WasmExportedFunctionDataVerify(*this, isolate);
+ CHECK(wrapper_code().kind() == Code::JS_TO_WASM_FUNCTION ||
+ wrapper_code().kind() == Code::C_WASM_ENTRY);
}
void WasmModuleObject::WasmModuleObjectVerify(Isolate* isolate) {
- CHECK(IsWasmModuleObject());
- VerifyObjectField(isolate, kNativeModuleOffset);
- CHECK(managed_native_module()->IsForeign());
- VerifyObjectField(isolate, kExportWrappersOffset);
- CHECK(export_wrappers()->IsFixedArray());
- VerifyObjectField(isolate, kScriptOffset);
- CHECK(script()->IsScript());
- VerifyObjectField(isolate, kWeakInstanceListOffset);
- VerifyObjectField(isolate, kAsmJsOffsetTableOffset);
- VerifyObjectField(isolate, kBreakPointInfosOffset);
+ TorqueGeneratedClassVerifiers::WasmModuleObjectVerify(*this, isolate);
+ CHECK(managed_native_module().IsForeign());
+ CHECK(export_wrappers().IsFixedArray());
+ CHECK(script().IsScript());
}
void WasmTableObject::WasmTableObjectVerify(Isolate* isolate) {
- CHECK(IsWasmTableObject());
- VerifyObjectField(isolate, kElementsOffset);
- CHECK(elements()->IsFixedArray());
- VerifyObjectField(isolate, kMaximumLengthOffset);
- CHECK(maximum_length()->IsSmi() || maximum_length()->IsHeapNumber() ||
- maximum_length()->IsUndefined(isolate));
- VerifyObjectField(isolate, kDispatchTablesOffset);
+ TorqueGeneratedClassVerifiers::WasmTableObjectVerify(*this, isolate);
+ CHECK(elements().IsFixedArray());
VerifySmiField(kRawTypeOffset);
}
void WasmMemoryObject::WasmMemoryObjectVerify(Isolate* isolate) {
- CHECK(IsWasmMemoryObject());
- VerifyObjectField(isolate, kArrayBufferOffset);
- CHECK(array_buffer()->IsJSArrayBuffer());
+ TorqueGeneratedClassVerifiers::WasmMemoryObjectVerify(*this, isolate);
+ CHECK(array_buffer().IsJSArrayBuffer());
VerifySmiField(kMaximumPagesOffset);
- VerifyObjectField(isolate, kInstancesOffset);
}
-void WasmGlobalObject::WasmGlobalObjectVerify(Isolate* isolate) {
- CHECK(IsWasmGlobalObject());
- VerifyObjectField(isolate, kUntaggedBufferOffset);
- VerifyObjectField(isolate, kTaggedBufferOffset);
- VerifyObjectField(isolate, kOffsetOffset);
- VerifyObjectField(isolate, kFlagsOffset);
-}
+USE_TORQUE_VERIFIER(WasmGlobalObject)
void WasmExceptionObject::WasmExceptionObjectVerify(Isolate* isolate) {
- CHECK(IsWasmExceptionObject());
- VerifyObjectField(isolate, kSerializedSignatureOffset);
- CHECK(serialized_signature()->IsByteArray());
- VerifyObjectField(isolate, kExceptionTagOffset);
- CHECK(exception_tag()->IsHeapObject());
+ TorqueGeneratedClassVerifiers::WasmExceptionObjectVerify(*this, isolate);
+ CHECK(serialized_signature().IsByteArray());
}
void DataHandler::DataHandlerVerify(Isolate* isolate) {
- CHECK(IsDataHandler());
- CHECK_IMPLIES(!smi_handler()->IsSmi(),
- smi_handler()->IsCode() && IsStoreHandler());
- CHECK(validity_cell()->IsSmi() || validity_cell()->IsCell());
+ TorqueGeneratedClassVerifiers::DataHandlerVerify(*this, isolate);
+ CHECK_IMPLIES(!smi_handler().IsSmi(),
+ smi_handler().IsCode() && IsStoreHandler());
int data_count = data_field_count();
if (data_count >= 1) {
VerifyMaybeObjectField(isolate, kData1Offset);
@@ -2017,28 +1739,15 @@ void StoreHandler::StoreHandlerVerify(Isolate* isolate) {
}
void AccessorInfo::AccessorInfoVerify(Isolate* isolate) {
- CHECK(IsAccessorInfo());
- VerifyPointer(isolate, name());
- VerifyPointer(isolate, expected_receiver_type());
+ TorqueGeneratedClassVerifiers::AccessorInfoVerify(*this, isolate);
VerifyForeignPointer(isolate, *this, getter());
VerifyForeignPointer(isolate, *this, setter());
VerifyForeignPointer(isolate, *this, js_getter());
- VerifyPointer(isolate, data());
}
-void AccessorPair::AccessorPairVerify(Isolate* isolate) {
- CHECK(IsAccessorPair());
- VerifyPointer(isolate, getter());
- VerifyPointer(isolate, setter());
-}
+USE_TORQUE_VERIFIER(AccessorPair)
-void AccessCheckInfo::AccessCheckInfoVerify(Isolate* isolate) {
- CHECK(IsAccessCheckInfo());
- VerifyPointer(isolate, callback());
- VerifyPointer(isolate, named_interceptor());
- VerifyPointer(isolate, indexed_interceptor());
- VerifyPointer(isolate, data());
-}
+USE_TORQUE_VERIFIER(AccessCheckInfo)
void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
CHECK(IsCallHandlerInfo());
@@ -2053,127 +1762,83 @@ void CallHandlerInfo::CallHandlerInfoVerify(Isolate* isolate) {
}
void InterceptorInfo::InterceptorInfoVerify(Isolate* isolate) {
- CHECK(IsInterceptorInfo());
+ TorqueGeneratedClassVerifiers::InterceptorInfoVerify(*this, isolate);
VerifyForeignPointer(isolate, *this, getter());
VerifyForeignPointer(isolate, *this, setter());
VerifyForeignPointer(isolate, *this, query());
+ VerifyForeignPointer(isolate, *this, descriptor());
VerifyForeignPointer(isolate, *this, deleter());
VerifyForeignPointer(isolate, *this, enumerator());
- VerifyPointer(isolate, data());
- VerifySmiField(kFlagsOffset);
+ VerifyForeignPointer(isolate, *this, definer());
}
-void TemplateInfo::TemplateInfoVerify(Isolate* isolate) {
- VerifyPointer(isolate, tag());
- VerifyPointer(isolate, property_list());
- VerifyPointer(isolate, property_accessors());
-}
+USE_TORQUE_VERIFIER(TemplateInfo)
-void FunctionTemplateInfo::FunctionTemplateInfoVerify(Isolate* isolate) {
- CHECK(IsFunctionTemplateInfo());
- TemplateInfoVerify(isolate);
- VerifyPointer(isolate, serial_number());
- VerifyPointer(isolate, call_code());
- VerifyPointer(isolate, signature());
- VerifyPointer(isolate, cached_property_name());
- VerifyPointer(isolate, rare_data());
-}
+USE_TORQUE_VERIFIER(FunctionTemplateInfo)
-void FunctionTemplateRareData::FunctionTemplateRareDataVerify(
- Isolate* isolate) {
- CHECK(IsFunctionTemplateRareData());
- VerifyPointer(isolate, prototype_template());
- VerifyPointer(isolate, parent_template());
- VerifyPointer(isolate, named_property_handler());
- VerifyPointer(isolate, indexed_property_handler());
- VerifyPointer(isolate, instance_template());
- VerifyPointer(isolate, access_check_info());
-}
-
-void ObjectTemplateInfo::ObjectTemplateInfoVerify(Isolate* isolate) {
- CHECK(IsObjectTemplateInfo());
- TemplateInfoVerify(isolate);
- VerifyPointer(isolate, constructor());
- VerifyPointer(isolate, data());
-}
+USE_TORQUE_VERIFIER(FunctionTemplateRareData)
+
+USE_TORQUE_VERIFIER(WasmCapiFunctionData)
+
+USE_TORQUE_VERIFIER(WasmJSFunctionData)
+
+USE_TORQUE_VERIFIER(ObjectTemplateInfo)
void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
CHECK(IsAllocationSite());
+ CHECK(dependent_code().IsDependentCode());
+ CHECK(transition_info_or_boilerplate().IsSmi() ||
+ transition_info_or_boilerplate().IsJSObject());
+ CHECK(nested_site().IsAllocationSite() || nested_site() == Smi::kZero);
}
void AllocationMemento::AllocationMementoVerify(Isolate* isolate) {
- CHECK(IsAllocationMemento());
+ TorqueGeneratedClassVerifiers::AllocationMementoVerify(*this, isolate);
VerifyHeapPointer(isolate, allocation_site());
- CHECK(!IsValid() || GetAllocationSite()->IsAllocationSite());
+ CHECK(!IsValid() || GetAllocationSite().IsAllocationSite());
}
void Script::ScriptVerify(Isolate* isolate) {
- CHECK(IsScript());
- VerifyPointer(isolate, source());
- VerifyPointer(isolate, name());
- VerifyPointer(isolate, line_ends());
- for (int i = 0; i < shared_function_infos()->length(); ++i) {
- MaybeObject maybe_object = shared_function_infos()->Get(i);
+ TorqueGeneratedClassVerifiers::ScriptVerify(*this, isolate);
+ for (int i = 0; i < shared_function_infos().length(); ++i) {
+ MaybeObject maybe_object = shared_function_infos().Get(i);
HeapObject heap_object;
CHECK(maybe_object->IsWeak() || maybe_object->IsCleared() ||
(maybe_object->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsUndefined(isolate)));
+ heap_object.IsUndefined(isolate)));
}
- VerifySmiField(kIdOffset);
- VerifySmiField(kLineOffsetOffset);
- VerifySmiField(kColumnOffsetOffset);
- VerifySmiField(kScriptTypeOffset);
- VerifySmiField(kEvalFromPositionOffset);
- VerifySmiField(kFlagsOffset);
}
void NormalizedMapCache::NormalizedMapCacheVerify(Isolate* isolate) {
- WeakFixedArray::cast(*this)->WeakFixedArrayVerify(isolate);
+ WeakFixedArray::cast(*this).WeakFixedArrayVerify(isolate);
if (FLAG_enable_slow_asserts) {
for (int i = 0; i < length(); i++) {
MaybeObject e = WeakFixedArray::Get(i);
HeapObject heap_object;
if (e->GetHeapObjectIfWeak(&heap_object)) {
- Map::cast(heap_object)->DictionaryMapVerify(isolate);
+ Map::cast(heap_object).DictionaryMapVerify(isolate);
} else {
CHECK(e->IsCleared() || (e->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsUndefined(isolate)));
+ heap_object.IsUndefined(isolate)));
}
}
}
}
-void DebugInfo::DebugInfoVerify(Isolate* isolate) {
- CHECK(IsDebugInfo());
- VerifyPointer(isolate, shared());
- VerifyPointer(isolate, script());
- VerifyPointer(isolate, original_bytecode_array());
- VerifyPointer(isolate, break_points());
-}
+USE_TORQUE_VERIFIER(DebugInfo)
-void StackTraceFrame::StackTraceFrameVerify(Isolate* isolate) {
- CHECK(IsStackTraceFrame());
- VerifySmiField(kFrameIndexOffset);
- VerifySmiField(kIdOffset);
- VerifyPointer(isolate, frame_array());
- VerifyPointer(isolate, frame_info());
-}
+USE_TORQUE_VERIFIER(StackTraceFrame)
-void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
- CHECK(IsStackFrameInfo());
- VerifyPointer(isolate, script_name());
- VerifyPointer(isolate, script_name_or_source_url());
- VerifyPointer(isolate, function_name());
-}
+USE_TORQUE_VERIFIER(StackFrameInfo)
void PreparseData::PreparseDataVerify(Isolate* isolate) {
- CHECK(IsPreparseData());
+ TorqueGeneratedClassVerifiers::PreparseDataVerify(*this, isolate);
CHECK_LE(0, data_length());
CHECK_LE(0, children_length());
for (int i = 0; i < children_length(); ++i) {
Object child = get_child_raw(i);
- CHECK(child->IsNull() || child->IsPreparseData());
+ CHECK(child.IsNull() || child.IsPreparseData());
VerifyPointer(isolate, child);
}
}
@@ -2191,13 +1856,7 @@ void UncompiledDataWithoutPreparseData::UncompiledDataWithoutPreparseDataVerify(
VerifyPointer(isolate, inferred_name());
}
-void InterpreterData::InterpreterDataVerify(Isolate* isolate) {
- CHECK(IsInterpreterData());
- VerifyObjectField(isolate, kBytecodeArrayOffset);
- CHECK(bytecode_array()->IsBytecodeArray());
- VerifyObjectField(isolate, kInterpreterTrampolineOffset);
- CHECK(interpreter_trampoline()->IsCode());
-}
+USE_TORQUE_VERIFIER(InterpreterData)
#ifdef V8_INTL_SUPPORT
void JSV8BreakIterator::JSV8BreakIteratorVerify(Isolate* isolate) {
@@ -2221,64 +1880,42 @@ void JSCollator::JSCollatorVerify(Isolate* isolate) {
}
void JSDateTimeFormat::JSDateTimeFormatVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kICULocaleOffset);
- VerifyObjectField(isolate, kICUSimpleDateFormatOffset);
- VerifyObjectField(isolate, kICUDateIntervalFormatOffset);
- VerifyObjectField(isolate, kBoundFormatOffset);
- VerifyObjectField(isolate, kFlagsOffset);
+ TorqueGeneratedClassVerifiers::JSDateTimeFormatVerify(*this, isolate);
+ VerifySmiField(kFlagsOffset);
}
void JSListFormat::JSListFormatVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kICUFormatterOffset);
- VerifyObjectField(isolate, kFlagsOffset);
+ TorqueGeneratedClassVerifiers::JSListFormatVerify(*this, isolate);
+ VerifySmiField(kFlagsOffset);
}
-void JSLocale::JSLocaleVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kICULocaleOffset);
-}
+USE_TORQUE_VERIFIER(JSLocale)
void JSNumberFormat::JSNumberFormatVerify(Isolate* isolate) {
- CHECK(IsJSNumberFormat());
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kICUNumberFormatOffset);
- VerifyObjectField(isolate, kBoundFormatOffset);
- VerifyObjectField(isolate, kFlagsOffset);
+ TorqueGeneratedClassVerifiers::JSNumberFormatVerify(*this, isolate);
+ VerifySmiField(kFlagsOffset);
}
void JSPluralRules::JSPluralRulesVerify(Isolate* isolate) {
- CHECK(IsJSPluralRules());
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kFlagsOffset);
- VerifyObjectField(isolate, kICUPluralRulesOffset);
- VerifyObjectField(isolate, kICUDecimalFormatOffset);
+ TorqueGeneratedClassVerifiers::JSPluralRulesVerify(*this, isolate);
+ VerifySmiField(kFlagsOffset);
}
void JSRelativeTimeFormat::JSRelativeTimeFormatVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kICUFormatterOffset);
- VerifyObjectField(isolate, kFlagsOffset);
+ TorqueGeneratedClassVerifiers::JSRelativeTimeFormatVerify(*this, isolate);
+ VerifySmiField(kFlagsOffset);
}
void JSSegmentIterator::JSSegmentIteratorVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kICUBreakIteratorOffset);
- VerifyObjectField(isolate, kUnicodeStringOffset);
- VerifyObjectField(isolate, kFlagsOffset);
+ TorqueGeneratedClassVerifiers::JSSegmentIteratorVerify(*this, isolate);
+ VerifySmiField(kFlagsOffset);
}
void JSSegmenter::JSSegmenterVerify(Isolate* isolate) {
- JSObjectVerify(isolate);
- VerifyObjectField(isolate, kLocaleOffset);
- VerifyObjectField(isolate, kICUBreakIteratorOffset);
- VerifyObjectField(isolate, kFlagsOffset);
+ TorqueGeneratedClassVerifiers::JSSegmenterVerify(*this, isolate);
+ VerifySmiField(kFlagsOffset);
}
+
#endif // V8_INTL_SUPPORT
#endif // VERIFY_HEAP
@@ -2291,18 +1928,18 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
// Named properties
if (HasFastProperties()) {
info->number_of_objects_with_fast_properties_++;
- info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex();
- info->number_of_fast_unused_fields_ += map()->UnusedPropertyFields();
+ info->number_of_fast_used_fields_ += map().NextFreePropertyIndex();
+ info->number_of_fast_unused_fields_ += map().UnusedPropertyFields();
} else if (IsJSGlobalObject()) {
- GlobalDictionary dict = JSGlobalObject::cast(*this)->global_dictionary();
- info->number_of_slow_used_properties_ += dict->NumberOfElements();
+ GlobalDictionary dict = JSGlobalObject::cast(*this).global_dictionary();
+ info->number_of_slow_used_properties_ += dict.NumberOfElements();
info->number_of_slow_unused_properties_ +=
- dict->Capacity() - dict->NumberOfElements();
+ dict.Capacity() - dict.NumberOfElements();
} else {
NameDictionary dict = property_dictionary();
- info->number_of_slow_used_properties_ += dict->NumberOfElements();
+ info->number_of_slow_used_properties_ += dict.NumberOfElements();
info->number_of_slow_unused_properties_ +=
- dict->Capacity() - dict->NumberOfElements();
+ dict.Capacity() - dict.NumberOfElements();
}
// Indexed properties
switch (GetElementsKind()) {
@@ -2311,6 +1948,8 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
case HOLEY_DOUBLE_ELEMENTS:
case PACKED_DOUBLE_ELEMENTS:
case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
@@ -2318,11 +1957,11 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
info->number_of_objects_with_fast_elements_++;
int holes = 0;
FixedArray e = FixedArray::cast(elements());
- int len = e->length();
+ int len = e.length();
for (int i = 0; i < len; i++) {
- if (e->get(i)->IsTheHole(isolate)) holes++;
+ if (e.get(i).IsTheHole(isolate)) holes++;
}
- info->number_of_fast_used_elements_ += len - holes;
+ info->number_of_fast_used_elements_ += len - holes;
info->number_of_fast_unused_elements_ += holes;
break;
}
@@ -2334,15 +1973,15 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
{
info->number_of_objects_with_fast_elements_++;
FixedArrayBase e = FixedArrayBase::cast(elements());
- info->number_of_fast_used_elements_ += e->length();
+ info->number_of_fast_used_elements_ += e.length();
break;
}
case DICTIONARY_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS: {
NumberDictionary dict = element_dictionary();
- info->number_of_slow_used_elements_ += dict->NumberOfElements();
+ info->number_of_slow_used_elements_ += dict.NumberOfElements();
info->number_of_slow_unused_elements_ +=
- dict->Capacity() - dict->NumberOfElements();
+ dict.Capacity() - dict.NumberOfElements();
break;
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -2352,7 +1991,6 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
}
}
-
void JSObject::SpillInformation::Clear() {
number_of_objects_ = 0;
number_of_objects_with_fast_properties_ = 0;
@@ -2367,21 +2005,20 @@ void JSObject::SpillInformation::Clear() {
number_of_slow_unused_elements_ = 0;
}
-
void JSObject::SpillInformation::Print() {
PrintF("\n JSObject Spill Statistics (#%d):\n", number_of_objects_);
PrintF(" - fast properties (#%d): %d (used) %d (unused)\n",
- number_of_objects_with_fast_properties_,
- number_of_fast_used_fields_, number_of_fast_unused_fields_);
+ number_of_objects_with_fast_properties_, number_of_fast_used_fields_,
+ number_of_fast_unused_fields_);
PrintF(" - slow properties (#%d): %d (used) %d (unused)\n",
number_of_objects_ - number_of_objects_with_fast_properties_,
number_of_slow_used_properties_, number_of_slow_unused_properties_);
PrintF(" - fast elements (#%d): %d (used) %d (unused)\n",
- number_of_objects_with_fast_elements_,
- number_of_fast_used_elements_, number_of_fast_unused_elements_);
+ number_of_objects_with_fast_elements_, number_of_fast_used_elements_,
+ number_of_fast_unused_elements_);
PrintF(" - slow elements (#%d): %d (used) %d (unused)\n",
number_of_objects_ - number_of_objects_with_fast_elements_,
@@ -2401,7 +2038,7 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
return false;
}
current_key = key;
- uint32_t hash = GetSortedKey(i)->Hash();
+ uint32_t hash = GetSortedKey(i).Hash();
if (hash < current) {
Print();
return false;
@@ -2420,10 +2057,10 @@ bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
for (int i = 0; i < number_of_transitions(); i++) {
Name key = GetSortedKey(i);
- uint32_t hash = key->Hash();
+ uint32_t hash = key.Hash();
PropertyKind kind = kData;
PropertyAttributes attributes = NONE;
- if (!TransitionsAccessor::IsSpecialTransition(key->GetReadOnlyRoots(),
+ if (!TransitionsAccessor::IsSpecialTransition(key.GetReadOnlyRoots(),
key)) {
Map target = GetTarget(i);
PropertyDetails details =
@@ -2452,11 +2089,11 @@ bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
bool TransitionsAccessor::IsSortedNoDuplicates() {
// Simple and non-existent transitions are always sorted.
if (encoding() != kFullTransitionArray) return true;
- return transitions()->IsSortedNoDuplicates();
+ return transitions().IsSortedNoDuplicates();
}
static bool CheckOneBackPointer(Map current_map, Object target) {
- return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
+ return !target.IsMap() || Map::cast(target).GetBackPointer() == current_map;
}
bool TransitionsAccessor::IsConsistentWithBackPointers() {
@@ -2468,6 +2105,8 @@ bool TransitionsAccessor::IsConsistentWithBackPointers() {
return true;
}
+#undef USE_TORQUE_VERIFIER
+
#endif // DEBUG
} // namespace internal
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 5257144c38..e65c0af190 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -2,18 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include <iomanip>
#include <memory>
-#include "src/bootstrapper.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
#include "src/heap/heap-inl.h" // For InOldSpace.
#include "src/heap/heap-write-barrier-inl.h" // For GetIsolateFromWritableObj.
+#include "src/init/bootstrapper.h"
#include "src/interpreter/bytecodes.h"
-#include "src/objects-inl.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/data-handler-inl.h"
@@ -27,7 +26,8 @@
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/objects/objects-inl.h"
+#include "src/snapshot/embedded/embedded-data.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator-inl.h"
#include "src/objects/js-collator-inl.h"
@@ -58,9 +58,10 @@
#include "src/objects/promise-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
-#include "src/ostreams.h"
+#include "src/objects/template-objects-inl.h"
+#include "src/objects/transitions-inl.h"
#include "src/regexp/jsregexp.h"
-#include "src/transitions-inl.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -86,7 +87,7 @@ void Object::Print(std::ostream& os) const { // NOLINT
os << "Smi: " << std::hex << "0x" << Smi::ToInt(*this);
os << std::dec << " (" << Smi::ToInt(*this) << ")\n";
} else {
- HeapObject::cast(*this)->HeapObjectPrint(os);
+ HeapObject::cast(*this).HeapObjectPrint(os);
}
}
@@ -95,52 +96,54 @@ void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
if (id != nullptr) {
os << id;
} else {
- os << map()->instance_type();
+ os << map().instance_type();
}
os << "]";
- if (GetHeapFromWritableObject(*this)->InOldSpace(*this)) {
+ if (ReadOnlyHeap::Contains(*this)) {
+ os << " in ReadOnlySpace";
+ } else if (GetHeapFromWritableObject(*this)->InOldSpace(*this)) {
os << " in OldSpace";
}
if (!IsMap()) os << "\n - map: " << Brief(map());
}
void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
- InstanceType instance_type = map()->instance_type();
+ InstanceType instance_type = map().instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
- String::cast(*this)->StringPrint(os);
+ String::cast(*this).StringPrint(os);
os << "\n";
return;
}
switch (instance_type) {
case SYMBOL_TYPE:
- Symbol::cast(*this)->SymbolPrint(os);
+ Symbol::cast(*this).SymbolPrint(os);
break;
case MAP_TYPE:
- Map::cast(*this)->MapPrint(os);
+ Map::cast(*this).MapPrint(os);
break;
case HEAP_NUMBER_TYPE:
- HeapNumber::cast(*this)->HeapNumberPrint(os);
+ HeapNumber::cast(*this).HeapNumberPrint(os);
os << "\n";
break;
case MUTABLE_HEAP_NUMBER_TYPE:
os << "<mutable ";
- MutableHeapNumber::cast(*this)->MutableHeapNumberPrint(os);
+ MutableHeapNumber::cast(*this).MutableHeapNumberPrint(os);
os << ">\n";
break;
case BIGINT_TYPE:
- BigInt::cast(*this)->BigIntPrint(os);
+ BigInt::cast(*this).BigIntPrint(os);
os << "\n";
break;
case EMBEDDER_DATA_ARRAY_TYPE:
- EmbedderDataArray::cast(*this)->EmbedderDataArrayPrint(os);
+ EmbedderDataArray::cast(*this).EmbedderDataArrayPrint(os);
break;
case FIXED_DOUBLE_ARRAY_TYPE:
- FixedDoubleArray::cast(*this)->FixedDoubleArrayPrint(os);
+ FixedDoubleArray::cast(*this).FixedDoubleArrayPrint(os);
break;
case FIXED_ARRAY_TYPE:
- FixedArray::cast(*this)->FixedArrayPrint(os);
+ FixedArray::cast(*this).FixedArrayPrint(os);
break;
case AWAIT_CONTEXT_TYPE:
case BLOCK_CONTEXT_TYPE:
@@ -152,10 +155,10 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case SCRIPT_CONTEXT_TYPE:
case WITH_CONTEXT_TYPE:
case SCRIPT_CONTEXT_TABLE_TYPE:
- Context::cast(*this)->ContextPrint(os);
+ Context::cast(*this).ContextPrint(os);
break;
case NATIVE_CONTEXT_TYPE:
- NativeContext::cast(*this)->NativeContextPrint(os);
+ NativeContext::cast(*this).NativeContextPrint(os);
break;
case HASH_TABLE_TYPE:
case ORDERED_HASH_MAP_TYPE:
@@ -164,57 +167,49 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case SIMPLE_NUMBER_DICTIONARY_TYPE:
- FixedArray::cast(*this)->FixedArrayPrint(os);
+ FixedArray::cast(*this).FixedArrayPrint(os);
break;
case STRING_TABLE_TYPE:
- ObjectHashTable::cast(*this)->ObjectHashTablePrint(os);
+ ObjectHashTable::cast(*this).ObjectHashTablePrint(os);
break;
case NUMBER_DICTIONARY_TYPE:
- NumberDictionary::cast(*this)->NumberDictionaryPrint(os);
+ NumberDictionary::cast(*this).NumberDictionaryPrint(os);
break;
case EPHEMERON_HASH_TABLE_TYPE:
- EphemeronHashTable::cast(*this)->EphemeronHashTablePrint(os);
+ EphemeronHashTable::cast(*this).EphemeronHashTablePrint(os);
break;
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
ObjectBoilerplateDescription::cast(*this)
- ->ObjectBoilerplateDescriptionPrint(os);
+ .ObjectBoilerplateDescriptionPrint(os);
break;
case PROPERTY_ARRAY_TYPE:
- PropertyArray::cast(*this)->PropertyArrayPrint(os);
+ PropertyArray::cast(*this).PropertyArrayPrint(os);
break;
case BYTE_ARRAY_TYPE:
- ByteArray::cast(*this)->ByteArrayPrint(os);
+ ByteArray::cast(*this).ByteArrayPrint(os);
break;
case BYTECODE_ARRAY_TYPE:
- BytecodeArray::cast(*this)->BytecodeArrayPrint(os);
+ BytecodeArray::cast(*this).BytecodeArrayPrint(os);
break;
case DESCRIPTOR_ARRAY_TYPE:
- DescriptorArray::cast(*this)->DescriptorArrayPrint(os);
+ DescriptorArray::cast(*this).DescriptorArrayPrint(os);
break;
case TRANSITION_ARRAY_TYPE:
- TransitionArray::cast(*this)->TransitionArrayPrint(os);
+ TransitionArray::cast(*this).TransitionArrayPrint(os);
break;
case FEEDBACK_CELL_TYPE:
- FeedbackCell::cast(*this)->FeedbackCellPrint(os);
+ FeedbackCell::cast(*this).FeedbackCellPrint(os);
break;
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
- ClosureFeedbackCellArray::cast(*this)->ClosureFeedbackCellArrayPrint(os);
+ ClosureFeedbackCellArray::cast(*this).ClosureFeedbackCellArrayPrint(os);
break;
case FEEDBACK_VECTOR_TYPE:
- FeedbackVector::cast(*this)->FeedbackVectorPrint(os);
+ FeedbackVector::cast(*this).FeedbackVectorPrint(os);
break;
case FREE_SPACE_TYPE:
- FreeSpace::cast(*this)->FreeSpacePrint(os);
+ FreeSpace::cast(*this).FreeSpacePrint(os);
break;
-#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
- case Fixed##Type##Array::kInstanceType: \
- Fixed##Type##Array::cast(*this)->FixedTypedArrayPrint(os); \
- break;
-
- TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
-#undef PRINT_FIXED_TYPED_ARRAY
-
case FILLER_TYPE:
os << "filler";
break;
@@ -228,205 +223,205 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_ERROR_TYPE:
// TODO(titzer): debug printing for more wasm objects
case WASM_EXCEPTION_TYPE:
- JSObject::cast(*this)->JSObjectPrint(os);
+ JSObject::cast(*this).JSObjectPrint(os);
break;
case WASM_MODULE_TYPE:
- WasmModuleObject::cast(*this)->WasmModuleObjectPrint(os);
+ WasmModuleObject::cast(*this).WasmModuleObjectPrint(os);
break;
case WASM_MEMORY_TYPE:
- WasmMemoryObject::cast(*this)->WasmMemoryObjectPrint(os);
+ WasmMemoryObject::cast(*this).WasmMemoryObjectPrint(os);
break;
case WASM_TABLE_TYPE:
- WasmTableObject::cast(*this)->WasmTableObjectPrint(os);
+ WasmTableObject::cast(*this).WasmTableObjectPrint(os);
break;
case WASM_GLOBAL_TYPE:
- WasmGlobalObject::cast(*this)->WasmGlobalObjectPrint(os);
+ WasmGlobalObject::cast(*this).WasmGlobalObjectPrint(os);
break;
case WASM_INSTANCE_TYPE:
- WasmInstanceObject::cast(*this)->WasmInstanceObjectPrint(os);
+ WasmInstanceObject::cast(*this).WasmInstanceObjectPrint(os);
break;
case JS_GENERATOR_OBJECT_TYPE:
- JSGeneratorObject::cast(*this)->JSGeneratorObjectPrint(os);
+ JSGeneratorObject::cast(*this).JSGeneratorObjectPrint(os);
break;
case JS_PROMISE_TYPE:
- JSPromise::cast(*this)->JSPromisePrint(os);
+ JSPromise::cast(*this).JSPromisePrint(os);
break;
case JS_ARRAY_TYPE:
- JSArray::cast(*this)->JSArrayPrint(os);
+ JSArray::cast(*this).JSArrayPrint(os);
break;
case JS_REGEXP_TYPE:
- JSRegExp::cast(*this)->JSRegExpPrint(os);
+ JSRegExp::cast(*this).JSRegExpPrint(os);
break;
case JS_REGEXP_STRING_ITERATOR_TYPE:
- JSRegExpStringIterator::cast(*this)->JSRegExpStringIteratorPrint(os);
+ JSRegExpStringIterator::cast(*this).JSRegExpStringIteratorPrint(os);
break;
case ODDBALL_TYPE:
- Oddball::cast(*this)->to_string()->Print(os);
+ Oddball::cast(*this).to_string().Print(os);
break;
case JS_BOUND_FUNCTION_TYPE:
- JSBoundFunction::cast(*this)->JSBoundFunctionPrint(os);
+ JSBoundFunction::cast(*this).JSBoundFunctionPrint(os);
break;
case JS_FUNCTION_TYPE:
- JSFunction::cast(*this)->JSFunctionPrint(os);
+ JSFunction::cast(*this).JSFunctionPrint(os);
break;
case JS_GLOBAL_PROXY_TYPE:
- JSGlobalProxy::cast(*this)->JSGlobalProxyPrint(os);
+ JSGlobalProxy::cast(*this).JSGlobalProxyPrint(os);
break;
case JS_GLOBAL_OBJECT_TYPE:
- JSGlobalObject::cast(*this)->JSGlobalObjectPrint(os);
+ JSGlobalObject::cast(*this).JSGlobalObjectPrint(os);
break;
case JS_VALUE_TYPE:
- JSValue::cast(*this)->JSValuePrint(os);
+ JSValue::cast(*this).JSValuePrint(os);
break;
case JS_DATE_TYPE:
- JSDate::cast(*this)->JSDatePrint(os);
+ JSDate::cast(*this).JSDatePrint(os);
break;
case CODE_TYPE:
- Code::cast(*this)->CodePrint(os);
+ Code::cast(*this).CodePrint(os);
break;
case CODE_DATA_CONTAINER_TYPE:
- CodeDataContainer::cast(*this)->CodeDataContainerPrint(os);
+ CodeDataContainer::cast(*this).CodeDataContainerPrint(os);
break;
case JS_PROXY_TYPE:
- JSProxy::cast(*this)->JSProxyPrint(os);
+ JSProxy::cast(*this).JSProxyPrint(os);
break;
case JS_SET_TYPE:
- JSSet::cast(*this)->JSSetPrint(os);
+ JSSet::cast(*this).JSSetPrint(os);
break;
case JS_MAP_TYPE:
- JSMap::cast(*this)->JSMapPrint(os);
+ JSMap::cast(*this).JSMapPrint(os);
break;
case JS_SET_KEY_VALUE_ITERATOR_TYPE:
case JS_SET_VALUE_ITERATOR_TYPE:
- JSSetIterator::cast(*this)->JSSetIteratorPrint(os);
+ JSSetIterator::cast(*this).JSSetIteratorPrint(os);
break;
case JS_MAP_KEY_ITERATOR_TYPE:
case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
case JS_MAP_VALUE_ITERATOR_TYPE:
- JSMapIterator::cast(*this)->JSMapIteratorPrint(os);
+ JSMapIterator::cast(*this).JSMapIteratorPrint(os);
break;
case WEAK_CELL_TYPE:
- WeakCell::cast(*this)->WeakCellPrint(os);
+ WeakCell::cast(*this).WeakCellPrint(os);
break;
case JS_WEAK_REF_TYPE:
- JSWeakRef::cast(*this)->JSWeakRefPrint(os);
+ JSWeakRef::cast(*this).JSWeakRefPrint(os);
break;
case JS_FINALIZATION_GROUP_TYPE:
- JSFinalizationGroup::cast(*this)->JSFinalizationGroupPrint(os);
+ JSFinalizationGroup::cast(*this).JSFinalizationGroupPrint(os);
break;
case JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE:
JSFinalizationGroupCleanupIterator::cast(*this)
- ->JSFinalizationGroupCleanupIteratorPrint(os);
+ .JSFinalizationGroupCleanupIteratorPrint(os);
break;
case JS_WEAK_MAP_TYPE:
- JSWeakMap::cast(*this)->JSWeakMapPrint(os);
+ JSWeakMap::cast(*this).JSWeakMapPrint(os);
break;
case JS_WEAK_SET_TYPE:
- JSWeakSet::cast(*this)->JSWeakSetPrint(os);
+ JSWeakSet::cast(*this).JSWeakSetPrint(os);
break;
case JS_MODULE_NAMESPACE_TYPE:
- JSModuleNamespace::cast(*this)->JSModuleNamespacePrint(os);
+ JSModuleNamespace::cast(*this).JSModuleNamespacePrint(os);
break;
case FOREIGN_TYPE:
- Foreign::cast(*this)->ForeignPrint(os);
+ Foreign::cast(*this).ForeignPrint(os);
break;
case CALL_HANDLER_INFO_TYPE:
- CallHandlerInfo::cast(*this)->CallHandlerInfoPrint(os);
+ CallHandlerInfo::cast(*this).CallHandlerInfoPrint(os);
break;
case PREPARSE_DATA_TYPE:
- PreparseData::cast(*this)->PreparseDataPrint(os);
+ PreparseData::cast(*this).PreparseDataPrint(os);
break;
case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
UncompiledDataWithoutPreparseData::cast(*this)
- ->UncompiledDataWithoutPreparseDataPrint(os);
+ .UncompiledDataWithoutPreparseDataPrint(os);
break;
case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE:
UncompiledDataWithPreparseData::cast(*this)
- ->UncompiledDataWithPreparseDataPrint(os);
+ .UncompiledDataWithPreparseDataPrint(os);
break;
case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::cast(*this)->SharedFunctionInfoPrint(os);
+ SharedFunctionInfo::cast(*this).SharedFunctionInfoPrint(os);
break;
case JS_MESSAGE_OBJECT_TYPE:
- JSMessageObject::cast(*this)->JSMessageObjectPrint(os);
+ JSMessageObject::cast(*this).JSMessageObjectPrint(os);
break;
case CELL_TYPE:
- Cell::cast(*this)->CellPrint(os);
+ Cell::cast(*this).CellPrint(os);
break;
case PROPERTY_CELL_TYPE:
- PropertyCell::cast(*this)->PropertyCellPrint(os);
+ PropertyCell::cast(*this).PropertyCellPrint(os);
break;
case JS_ARRAY_BUFFER_TYPE:
- JSArrayBuffer::cast(*this)->JSArrayBufferPrint(os);
+ JSArrayBuffer::cast(*this).JSArrayBufferPrint(os);
break;
case JS_ARRAY_ITERATOR_TYPE:
- JSArrayIterator::cast(*this)->JSArrayIteratorPrint(os);
+ JSArrayIterator::cast(*this).JSArrayIteratorPrint(os);
break;
case JS_TYPED_ARRAY_TYPE:
- JSTypedArray::cast(*this)->JSTypedArrayPrint(os);
+ JSTypedArray::cast(*this).JSTypedArrayPrint(os);
break;
case JS_DATA_VIEW_TYPE:
- JSDataView::cast(*this)->JSDataViewPrint(os);
+ JSDataView::cast(*this).JSDataViewPrint(os);
break;
#ifdef V8_INTL_SUPPORT
case JS_INTL_V8_BREAK_ITERATOR_TYPE:
- JSV8BreakIterator::cast(*this)->JSV8BreakIteratorPrint(os);
+ JSV8BreakIterator::cast(*this).JSV8BreakIteratorPrint(os);
break;
case JS_INTL_COLLATOR_TYPE:
- JSCollator::cast(*this)->JSCollatorPrint(os);
+ JSCollator::cast(*this).JSCollatorPrint(os);
break;
case JS_INTL_DATE_TIME_FORMAT_TYPE:
- JSDateTimeFormat::cast(*this)->JSDateTimeFormatPrint(os);
+ JSDateTimeFormat::cast(*this).JSDateTimeFormatPrint(os);
break;
case JS_INTL_LIST_FORMAT_TYPE:
- JSListFormat::cast(*this)->JSListFormatPrint(os);
+ JSListFormat::cast(*this).JSListFormatPrint(os);
break;
case JS_INTL_LOCALE_TYPE:
- JSLocale::cast(*this)->JSLocalePrint(os);
+ JSLocale::cast(*this).JSLocalePrint(os);
break;
case JS_INTL_NUMBER_FORMAT_TYPE:
- JSNumberFormat::cast(*this)->JSNumberFormatPrint(os);
+ JSNumberFormat::cast(*this).JSNumberFormatPrint(os);
break;
case JS_INTL_PLURAL_RULES_TYPE:
- JSPluralRules::cast(*this)->JSPluralRulesPrint(os);
+ JSPluralRules::cast(*this).JSPluralRulesPrint(os);
break;
case JS_INTL_RELATIVE_TIME_FORMAT_TYPE:
- JSRelativeTimeFormat::cast(*this)->JSRelativeTimeFormatPrint(os);
+ JSRelativeTimeFormat::cast(*this).JSRelativeTimeFormatPrint(os);
break;
case JS_INTL_SEGMENT_ITERATOR_TYPE:
- JSSegmentIterator::cast(*this)->JSSegmentIteratorPrint(os);
+ JSSegmentIterator::cast(*this).JSSegmentIteratorPrint(os);
break;
case JS_INTL_SEGMENTER_TYPE:
- JSSegmenter::cast(*this)->JSSegmenterPrint(os);
+ JSSegmenter::cast(*this).JSSegmenterPrint(os);
break;
#endif // V8_INTL_SUPPORT
#define MAKE_STRUCT_CASE(TYPE, Name, name) \
case TYPE: \
- Name::cast(*this)->Name##Print(os); \
+ Name::cast(*this).Name##Print(os); \
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
case ALLOCATION_SITE_TYPE:
- AllocationSite::cast(*this)->AllocationSitePrint(os);
+ AllocationSite::cast(*this).AllocationSitePrint(os);
break;
case LOAD_HANDLER_TYPE:
- LoadHandler::cast(*this)->LoadHandlerPrint(os);
+ LoadHandler::cast(*this).LoadHandlerPrint(os);
break;
case STORE_HANDLER_TYPE:
- StoreHandler::cast(*this)->StoreHandlerPrint(os);
+ StoreHandler::cast(*this).StoreHandlerPrint(os);
break;
case SCOPE_INFO_TYPE:
- ScopeInfo::cast(*this)->ScopeInfoPrint(os);
+ ScopeInfo::cast(*this).ScopeInfoPrint(os);
break;
case FEEDBACK_METADATA_TYPE:
- FeedbackMetadata::cast(*this)->FeedbackMetadataPrint(os);
+ FeedbackMetadata::cast(*this).FeedbackMetadataPrint(os);
break;
case WEAK_FIXED_ARRAY_TYPE:
- WeakFixedArray::cast(*this)->WeakFixedArrayPrint(os);
+ WeakFixedArray::cast(*this).WeakFixedArrayPrint(os);
break;
case WEAK_ARRAY_LIST_TYPE:
- WeakArrayList::cast(*this)->WeakArrayListPrint(os);
+ WeakArrayList::cast(*this).WeakArrayListPrint(os);
break;
case INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_TYPE:
@@ -452,9 +447,8 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
// TODO(all): Handle these types too.
- os << "UNKNOWN TYPE " << map()->instance_type();
+ os << "UNKNOWN TYPE " << map().instance_type();
UNREACHABLE();
- break;
}
}
@@ -471,35 +465,20 @@ void BytecodeArray::BytecodeArrayPrint(std::ostream& os) { // NOLINT
Disassemble(os);
}
-
void FreeSpace::FreeSpacePrint(std::ostream& os) { // NOLINT
os << "free space, size " << Size() << "\n";
}
-
-template <class Traits>
-void FixedTypedArray<Traits>::FixedTypedArrayPrint(
- std::ostream& os) { // NOLINT
- PrintHeader(os, Traits::ArrayTypeName());
- os << "\n - length: " << length() << "\n - base_pointer: ";
- if (base_pointer().ptr() == kNullAddress) {
- os << "<nullptr>";
- } else {
- os << Brief(base_pointer());
- }
- os << "\n - external_pointer: " << external_pointer() << "\n";
-}
-
bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
- DescriptorArray descs = map()->instance_descriptors();
- int nof_inobject_properties = map()->GetInObjectProperties();
+ DescriptorArray descs = map().instance_descriptors();
+ int nof_inobject_properties = map().GetInObjectProperties();
int i = 0;
- for (; i < map()->NumberOfOwnDescriptors(); i++) {
+ for (; i < map().NumberOfOwnDescriptors(); i++) {
os << "\n ";
- descs->GetKey(i)->NamePrint(os);
+ descs.GetKey(i).NamePrint(os);
os << ": ";
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
case kField: {
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
@@ -511,7 +490,7 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
break;
}
case kDescriptor:
- os << Brief(descs->GetStrongValue(i));
+ os << Brief(descs.GetStrongValue(i));
break;
}
os << " ";
@@ -525,9 +504,9 @@ bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
}
return i > 0;
} else if (IsJSGlobalObject()) {
- JSGlobalObject::cast(*this)->global_dictionary()->Print(os);
+ JSGlobalObject::cast(*this).global_dictionary().Print(os);
} else {
- property_dictionary()->Print(os);
+ property_dictionary().Print(os);
}
return true;
}
@@ -541,7 +520,7 @@ bool IsTheHoleAt(T array, int index) {
template <>
bool IsTheHoleAt(FixedDoubleArray array, int index) {
- return array->is_the_hole(index);
+ return array.is_the_hole(index);
}
template <class T>
@@ -549,22 +528,22 @@ double GetScalarElement(T array, int index) {
if (IsTheHoleAt(array, index)) {
return std::numeric_limits<double>::quiet_NaN();
}
- return array->get_scalar(index);
+ return array.get_scalar(index);
}
template <class T>
-void DoPrintElements(std::ostream& os, Object object) { // NOLINT
+void DoPrintElements(std::ostream& os, Object object, int length) { // NOLINT
const bool print_the_hole = std::is_same<T, FixedDoubleArray>::value;
T array = T::cast(object);
- if (array->length() == 0) return;
+ if (length == 0) return;
int previous_index = 0;
double previous_value = GetScalarElement(array, 0);
double value = 0.0;
int i;
- for (i = 1; i <= array->length(); i++) {
- if (i < array->length()) value = GetScalarElement(array, i);
+ for (i = 1; i <= length; i++) {
+ if (i < length) value = GetScalarElement(array, i);
bool values_are_nan = std::isnan(previous_value) && std::isnan(value);
- if (i != array->length() && (previous_value == value || values_are_nan) &&
+ if (i != length && (previous_value == value || values_are_nan) &&
IsTheHoleAt(array, i - 1) == IsTheHoleAt(array, i)) {
continue;
}
@@ -585,16 +564,40 @@ void DoPrintElements(std::ostream& os, Object object) { // NOLINT
}
}
+template <typename ElementType>
+void PrintTypedArrayElements(std::ostream& os, const ElementType* data_ptr,
+ size_t length) {
+ if (length == 0) return;
+ size_t previous_index = 0;
+ ElementType previous_value = data_ptr[0];
+ ElementType value = 0;
+ for (size_t i = 1; i <= length; i++) {
+ if (i < length) value = data_ptr[i];
+ if (i != length && previous_value == value) {
+ continue;
+ }
+ os << "\n";
+ std::stringstream ss;
+ ss << previous_index;
+ if (previous_index != i - 1) {
+ ss << '-' << (i - 1);
+ }
+ os << std::setw(12) << ss.str() << ": " << previous_value;
+ previous_index = i;
+ previous_value = value;
+ }
+}
+
template <typename T>
void PrintFixedArrayElements(std::ostream& os, T array) {
// Print in array notation for non-sparse arrays.
- Object previous_value = array->length() > 0 ? array->get(0) : Object();
+ Object previous_value = array.length() > 0 ? array.get(0) : Object();
Object value;
int previous_index = 0;
int i;
- for (i = 1; i <= array->length(); i++) {
- if (i < array->length()) value = array->get(i);
- if (previous_value == value && i != array->length()) {
+ for (i = 1; i <= array.length(); i++) {
+ if (i < array.length()) value = array.get(i);
+ if (previous_value == value && i != array.length()) {
continue;
}
os << "\n";
@@ -612,35 +615,35 @@ void PrintFixedArrayElements(std::ostream& os, T array) {
void PrintDictionaryElements(std::ostream& os, FixedArrayBase elements) {
// Print some internal fields
NumberDictionary dict = NumberDictionary::cast(elements);
- if (dict->requires_slow_elements()) {
+ if (dict.requires_slow_elements()) {
os << "\n - requires_slow_elements";
} else {
- os << "\n - max_number_key: " << dict->max_number_key();
+ os << "\n - max_number_key: " << dict.max_number_key();
}
- dict->Print(os);
+ dict.Print(os);
}
void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
SloppyArgumentsElements elements) {
- FixedArray arguments_store = elements->arguments();
- os << "\n 0: context: " << Brief(elements->context())
+ FixedArray arguments_store = elements.arguments();
+ os << "\n 0: context: " << Brief(elements.context())
<< "\n 1: arguments_store: " << Brief(arguments_store)
<< "\n parameter to context slot map:";
- for (uint32_t i = 0; i < elements->parameter_map_length(); i++) {
+ for (uint32_t i = 0; i < elements.parameter_map_length(); i++) {
uint32_t raw_index = i + SloppyArgumentsElements::kParameterMapStart;
- Object mapped_entry = elements->get_mapped_entry(i);
+ Object mapped_entry = elements.get_mapped_entry(i);
os << "\n " << raw_index << ": param(" << i
<< "): " << Brief(mapped_entry);
- if (mapped_entry->IsTheHole()) {
+ if (mapped_entry.IsTheHole()) {
os << " in the arguments_store[" << i << "]";
} else {
os << " in the context";
}
}
- if (arguments_store->length() == 0) return;
+ if (arguments_store.length() == 0) return;
os << "\n }"
<< "\n - arguments_store: " << Brief(arguments_store) << " "
- << ElementsKindToString(arguments_store->map()->elements_kind()) << " {";
+ << ElementsKindToString(arguments_store.map().elements_kind()) << " {";
if (kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
PrintFixedArrayElements(os, arguments_store);
} else {
@@ -665,14 +668,12 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
os << " - elements: " << Brief(elements()) << " {";
- if (elements()->length() == 0) {
- os << " }\n";
- return;
- }
- switch (map()->elements_kind()) {
+ switch (map().elements_kind()) {
case HOLEY_SMI_ELEMENTS:
case PACKED_SMI_ELEMENTS:
case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
@@ -682,14 +683,17 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
}
case HOLEY_DOUBLE_ELEMENTS:
case PACKED_DOUBLE_ELEMENTS: {
- DoPrintElements<FixedDoubleArray>(os, elements());
+ DoPrintElements<FixedDoubleArray>(os, elements(), elements().length());
break;
}
-#define PRINT_ELEMENTS(Type, type, TYPE, elementType) \
- case TYPE##_ELEMENTS: { \
- DoPrintElements<Fixed##Type##Array>(os, elements()); \
- break; \
+#define PRINT_ELEMENTS(Type, type, TYPE, elementType) \
+ case TYPE##_ELEMENTS: { \
+ size_t length = JSTypedArray::cast(*this).length(); \
+ const elementType* data_ptr = \
+ static_cast<const elementType*>(JSTypedArray::cast(*this).DataPtr()); \
+ PrintTypedArrayElements<elementType>(os, data_ptr, length); \
+ break; \
}
TYPED_ARRAYS(PRINT_ELEMENTS)
#undef PRINT_ELEMENTS
@@ -700,7 +704,7 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- PrintSloppyArgumentElements(os, map()->elements_kind(),
+ PrintSloppyArgumentElements(os, map().elements_kind(),
SloppyArgumentsElements::cast(elements()));
break;
case NO_ELEMENTS:
@@ -711,28 +715,28 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
static void JSObjectPrintHeader(std::ostream& os, JSObject obj,
const char* id) { // NOLINT
- Isolate* isolate = obj->GetIsolate();
- obj->PrintHeader(os, id);
+ Isolate* isolate = obj.GetIsolate();
+ obj.PrintHeader(os, id);
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
os << " [";
- if (obj->HasFastProperties()) {
+ if (obj.HasFastProperties()) {
os << "FastProperties";
} else {
os << "DictionaryProperties";
}
PrototypeIterator iter(isolate, obj);
os << "]\n - prototype: " << Brief(iter.GetCurrent());
- os << "\n - elements: " << Brief(obj->elements()) << " ["
- << ElementsKindToString(obj->map()->elements_kind());
- if (obj->elements()->IsCowArray()) os << " (COW)";
+ os << "\n - elements: " << Brief(obj.elements()) << " ["
+ << ElementsKindToString(obj.map().elements_kind());
+ if (obj.elements().IsCowArray()) os << " (COW)";
os << "]";
- Object hash = obj->GetHash();
- if (hash->IsSmi()) {
+ Object hash = obj.GetHash();
+ if (hash.IsSmi()) {
os << "\n - hash: " << Brief(hash);
}
- if (obj->GetEmbedderFieldCount() > 0) {
- os << "\n - embedder fields: " << obj->GetEmbedderFieldCount();
+ if (obj.GetEmbedderFieldCount() > 0) {
+ os << "\n - embedder fields: " << obj.GetEmbedderFieldCount();
}
}
@@ -740,17 +744,19 @@ static void JSObjectPrintBody(std::ostream& os,
JSObject obj, // NOLINT
bool print_elements = true) {
os << "\n - properties: ";
- Object properties_or_hash = obj->raw_properties_or_hash();
- if (!properties_or_hash->IsSmi()) {
+ Object properties_or_hash = obj.raw_properties_or_hash();
+ if (!properties_or_hash.IsSmi()) {
os << Brief(properties_or_hash);
}
os << " {";
- if (obj->PrintProperties(os)) os << "\n ";
+ if (obj.PrintProperties(os)) os << "\n ";
os << "}\n";
- if (print_elements && obj->elements()->length() > 0) {
- obj->PrintElements(os);
+ if (print_elements) {
+ size_t length = obj.IsJSTypedArray() ? JSTypedArray::cast(obj).length()
+ : obj.elements().length();
+ if (length > 0) obj.PrintElements(os);
}
- int embedder_fields = obj->GetEmbedderFieldCount();
+ int embedder_fields = obj.GetEmbedderFieldCount();
if (embedder_fields > 0) {
os << " - embedder fields = {";
for (int i = 0; i < embedder_fields; i++) {
@@ -796,23 +802,23 @@ void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
if (is_suspended()) os << " (suspended)";
if (is_suspended()) {
DisallowHeapAllocation no_gc;
- SharedFunctionInfo fun_info = function()->shared();
- if (fun_info->HasSourceCode()) {
- Script script = Script::cast(fun_info->script());
- String script_name = script->name()->IsString()
- ? String::cast(script->name())
+ SharedFunctionInfo fun_info = function().shared();
+ if (fun_info.HasSourceCode()) {
+ Script script = Script::cast(fun_info.script());
+ String script_name = script.name().IsString()
+ ? String::cast(script.name())
: GetReadOnlyRoots().empty_string();
os << "\n - source position: ";
// Can't collect source positions here if not available as that would
// allocate memory.
- if (fun_info->HasBytecodeArray() &&
- fun_info->GetBytecodeArray()->HasSourcePositionTable()) {
+ if (fun_info.HasBytecodeArray() &&
+ fun_info.GetBytecodeArray().HasSourcePositionTable()) {
os << source_position();
os << " (";
- script_name->PrintUC16(os);
- int lin = script->GetLineNumber(source_position()) + 1;
- int col = script->GetColumnNumber(source_position()) + 1;
+ script_name.PrintUC16(os);
+ int lin = script.GetLineNumber(source_position()) + 1;
+ int col = script.GetColumnNumber(source_position()) + 1;
os << ", lin " << lin;
os << ", col " << col;
} else {
@@ -865,22 +871,21 @@ void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "Symbol");
os << "\n - hash: " << Hash();
os << "\n - name: " << Brief(name());
- if (name()->IsUndefined()) {
+ if (name().IsUndefined()) {
os << " (" << PrivateSymbolToName() << ")";
}
os << "\n - private: " << is_private();
}
-
void DescriptorArray::DescriptorArrayPrint(std::ostream& os) {
PrintHeader(os, "DescriptorArray");
os << "\n - enum_cache: ";
- if (enum_cache()->keys()->length() == 0) {
+ if (enum_cache().keys().length() == 0) {
os << "empty";
} else {
- os << enum_cache()->keys()->length();
- os << "\n - keys: " << Brief(enum_cache()->keys());
- os << "\n - indices: " << Brief(enum_cache()->indices());
+ os << enum_cache().keys().length();
+ os << "\n - keys: " << Brief(enum_cache().keys());
+ os << "\n - indices: " << Brief(enum_cache().indices());
}
os << "\n - nof slack descriptors: " << number_of_slack_descriptors();
os << "\n - nof descriptors: " << number_of_descriptors();
@@ -900,25 +905,25 @@ void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(
namespace {
void PrintFixedArrayWithHeader(std::ostream& os, FixedArray array,
const char* type) {
- array->PrintHeader(os, type);
- os << "\n - length: " << array->length();
+ array.PrintHeader(os, type);
+ os << "\n - length: " << array.length();
PrintFixedArrayElements(os, array);
os << "\n";
}
template <typename T>
void PrintHashTableWithHeader(std::ostream& os, T table, const char* type) {
- table->PrintHeader(os, type);
- os << "\n - length: " << table->length();
- os << "\n - elements: " << table->NumberOfElements();
- os << "\n - deleted: " << table->NumberOfDeletedElements();
- os << "\n - capacity: " << table->Capacity();
+ table.PrintHeader(os, type);
+ os << "\n - length: " << table.length();
+ os << "\n - elements: " << table.NumberOfElements();
+ os << "\n - deleted: " << table.NumberOfDeletedElements();
+ os << "\n - capacity: " << table.Capacity();
os << "\n - elements: {";
- for (int i = 0; i < table->Capacity(); i++) {
+ for (int i = 0; i < table.Capacity(); i++) {
os << '\n'
- << std::setw(12) << i << ": " << Brief(table->KeyAt(i)) << " -> "
- << Brief(table->ValueAt(i));
+ << std::setw(12) << i << ": " << Brief(table.KeyAt(i)) << " -> "
+ << Brief(table.ValueAt(i));
}
os << "\n }\n";
}
@@ -969,12 +974,12 @@ void FixedArray::FixedArrayPrint(std::ostream& os) {
namespace {
void PrintContextWithHeader(std::ostream& os, Context context,
const char* type) {
- context->PrintHeader(os, type);
- os << "\n - length: " << context->length();
- os << "\n - scope_info: " << Brief(context->scope_info());
- os << "\n - previous: " << Brief(context->unchecked_previous());
- os << "\n - extension: " << Brief(context->extension());
- os << "\n - native_context: " << Brief(context->native_context());
+ context.PrintHeader(os, type);
+ os << "\n - length: " << context.length();
+ os << "\n - scope_info: " << Brief(context.scope_info());
+ os << "\n - previous: " << Brief(context.unchecked_previous());
+ os << "\n - extension: " << Brief(context.extension());
+ os << "\n - native_context: " << Brief(context.native_context());
PrintFixedArrayElements(os, context);
os << "\n";
}
@@ -1017,7 +1022,7 @@ void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "FixedDoubleArray");
os << "\n - length: " << length();
- DoPrintElements<FixedDoubleArray>(os, *this);
+ DoPrintElements<FixedDoubleArray>(os, *this, length());
os << "\n";
}
@@ -1183,7 +1188,6 @@ void FeedbackNexus::Print(std::ostream& os) { // NOLINT
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
- break;
}
}
@@ -1204,7 +1208,6 @@ void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-
void String::StringPrint(std::ostream& os) { // NOLINT
if (!IsOneByteRepresentation()) {
os << "u";
@@ -1236,48 +1239,44 @@ void String::StringPrint(std::ostream& os) { // NOLINT
if (!StringShape(*this).IsInternalized()) os << "\"";
}
-
void Name::NamePrint(std::ostream& os) { // NOLINT
if (IsString()) {
- String::cast(*this)->StringPrint(os);
+ String::cast(*this).StringPrint(os);
} else {
os << Brief(*this);
}
}
-
-static const char* const weekdays[] = {
- "???", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
-};
+static const char* const weekdays[] = {"???", "Sun", "Mon", "Tue",
+ "Wed", "Thu", "Fri", "Sat"};
void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, *this, "JSDate");
os << "\n - value: " << Brief(value());
- if (!year()->IsSmi()) {
+ if (!year().IsSmi()) {
os << "\n - time = NaN\n";
} else {
// TODO(svenpanne) Add some basic formatting to our streams.
ScopedVector<char> buf(100);
SNPrintF(buf, "\n - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
- weekdays[weekday()->IsSmi() ? Smi::ToInt(weekday()) + 1 : 0],
- year()->IsSmi() ? Smi::ToInt(year()) : -1,
- month()->IsSmi() ? Smi::ToInt(month()) : -1,
- day()->IsSmi() ? Smi::ToInt(day()) : -1,
- hour()->IsSmi() ? Smi::ToInt(hour()) : -1,
- min()->IsSmi() ? Smi::ToInt(min()) : -1,
- sec()->IsSmi() ? Smi::ToInt(sec()) : -1);
- os << buf.start();
+ weekdays[weekday().IsSmi() ? Smi::ToInt(weekday()) + 1 : 0],
+ year().IsSmi() ? Smi::ToInt(year()) : -1,
+ month().IsSmi() ? Smi::ToInt(month()) : -1,
+ day().IsSmi() ? Smi::ToInt(day()) : -1,
+ hour().IsSmi() ? Smi::ToInt(hour()) : -1,
+ min().IsSmi() ? Smi::ToInt(min()) : -1,
+ sec().IsSmi() ? Smi::ToInt(sec()) : -1);
+ os << buf.begin();
}
JSObjectPrintBody(os, *this);
}
-
void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "JSProxy");
os << "\n - target: ";
- target()->ShortPrint(os);
+ target().ShortPrint(os);
os << "\n - handler: ";
- handler()->ShortPrint(os);
+ handler().ShortPrint(os);
os << "\n";
}
@@ -1379,8 +1378,8 @@ void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
os << "\n - buffer: " << Brief(buffer());
os << "\n - byte_offset: " << byte_offset();
os << "\n - byte_length: " << byte_length();
- os << "\n - length: " << Brief(length());
- if (!buffer()->IsJSArrayBuffer()) {
+ os << "\n - length: " << length();
+ if (!buffer().IsJSArrayBuffer()) {
os << "\n <invalid buffer>\n";
return;
}
@@ -1401,7 +1400,7 @@ void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
os << "\n - buffer =" << Brief(buffer());
os << "\n - byte_offset: " << byte_offset();
os << "\n - byte_length: " << byte_length();
- if (!buffer()->IsJSArrayBuffer()) {
+ if (!buffer().IsJSArrayBuffer()) {
os << "\n <invalid buffer>";
return;
}
@@ -1424,7 +1423,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
if (has_prototype_slot()) {
if (has_prototype()) {
os << Brief(prototype());
- if (map()->has_non_instance_prototype()) {
+ if (map().has_non_instance_prototype()) {
os << " (non-instance prototype)";
}
}
@@ -1434,41 +1433,41 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
os << "<no-prototype-slot>";
}
os << "\n - shared_info: " << Brief(shared());
- os << "\n - name: " << Brief(shared()->Name());
+ os << "\n - name: " << Brief(shared().Name());
// Print Builtin name for builtin functions
- int builtin_index = code()->builtin_index();
+ int builtin_index = code().builtin_index();
if (Builtins::IsBuiltinId(builtin_index) && !IsInterpreted()) {
os << "\n - builtin: " << isolate->builtins()->name(builtin_index);
}
os << "\n - formal_parameter_count: "
- << shared()->internal_formal_parameter_count();
- if (shared()->is_safe_to_skip_arguments_adaptor()) {
+ << shared().internal_formal_parameter_count();
+ if (shared().is_safe_to_skip_arguments_adaptor()) {
os << "\n - safe_to_skip_arguments_adaptor";
}
- os << "\n - kind: " << shared()->kind();
+ os << "\n - kind: " << shared().kind();
os << "\n - context: " << Brief(context());
os << "\n - code: " << Brief(code());
if (IsInterpreted()) {
os << "\n - interpreted";
- if (shared()->HasBytecodeArray()) {
- os << "\n - bytecode: " << shared()->GetBytecodeArray();
+ if (shared().HasBytecodeArray()) {
+ os << "\n - bytecode: " << shared().GetBytecodeArray();
}
}
if (WasmExportedFunction::IsWasmExportedFunction(*this)) {
WasmExportedFunction function = WasmExportedFunction::cast(*this);
os << "\n - WASM instance "
- << reinterpret_cast<void*>(function->instance()->ptr());
- os << "\n - WASM function index " << function->function_index();
+ << reinterpret_cast<void*>(function.instance().ptr());
+ os << "\n - WASM function index " << function.function_index();
}
- shared()->PrintSourceCode(os);
+ shared().PrintSourceCode(os);
JSObjectPrintBody(os, *this);
os << "\n - feedback vector: ";
- if (!shared()->HasFeedbackMetadata()) {
+ if (!shared().HasFeedbackMetadata()) {
os << "feedback metadata is not available in SFI\n";
} else if (has_feedback_vector()) {
- feedback_vector()->FeedbackVectorPrint(os);
+ feedback_vector().FeedbackVectorPrint(os);
} else {
os << "not available\n";
}
@@ -1477,10 +1476,10 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
if (HasSourceCode()) {
os << "\n - source code: ";
- String source = String::cast(Script::cast(script())->source());
+ String source = String::cast(Script::cast(script()).source());
int start = StartPosition();
int length = EndPosition() - start;
- std::unique_ptr<char[]> source_string = source->ToCString(
+ std::unique_ptr<char[]> source_string = source.ToCString(
DISALLOW_NULLS, FAST_STRING_TRAVERSAL, start, length, nullptr);
os << source_string.get();
}
@@ -1536,7 +1535,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - length: " << length();
os << "\n - feedback_metadata: ";
if (HasFeedbackMetadata()) {
- feedback_metadata()->FeedbackMetadataPrint(os);
+ feedback_metadata().FeedbackMetadataPrint(os);
} else {
os << "<none>";
}
@@ -1569,13 +1568,13 @@ void Cell::CellPrint(std::ostream& os) { // NOLINT
void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "PropertyCell");
os << "\n - name: ";
- name()->NamePrint(os);
+ name().NamePrint(os);
os << "\n - value: " << Brief(value());
os << "\n - details: ";
property_details().PrintAsSlowTo(os);
PropertyCellType cell_type = property_details().cell_type();
os << "\n - cell_type: ";
- if (value()->IsTheHole()) {
+ if (value().IsTheHole()) {
switch (cell_type) {
case PropertyCellType::kUninitialized:
os << "Uninitialized";
@@ -1638,7 +1637,6 @@ void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-
void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "AccessorInfo");
os << "\n - name: " << Brief(name());
@@ -1746,7 +1744,7 @@ void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
void Module::ModulePrint(std::ostream& os) { // NOLINT
PrintHeader(os, "Module");
- os << "\n - origin: " << Brief(script()->GetNameOrSourceURL());
+ os << "\n - origin: " << Brief(script().GetNameOrSourceURL());
os << "\n - code: " << Brief(code());
os << "\n - exports: " << Brief(exports());
os << "\n - requested_modules: " << Brief(requested_modules());
@@ -1793,7 +1791,7 @@ void AsmWasmData::AsmWasmDataPrint(std::ostream& os) { // NOLINT
os << "\n - native module: " << Brief(managed_native_module());
os << "\n - export_wrappers: " << Brief(export_wrappers());
os << "\n - offset table: " << Brief(asm_js_offset_table());
- os << "\n - uses bitset: " << uses_bitset()->value();
+ os << "\n - uses bitset: " << uses_bitset().value();
os << "\n";
}
@@ -1830,8 +1828,8 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
if (has_debug_info()) {
os << "\n - debug_info: " << Brief(debug_info());
}
- for (int i = 0; i < tables()->length(); i++) {
- os << "\n - table " << i << ": " << Brief(tables()->get(i));
+ for (int i = 0; i < tables().length(); i++) {
+ os << "\n - table " << i << ": " << Brief(tables().get(i));
}
os << "\n - imported_function_refs: " << Brief(imported_function_refs());
if (has_indirect_function_table_refs()) {
@@ -1869,6 +1867,12 @@ void WasmExportedFunctionData::WasmExportedFunctionDataPrint(
os << "\n";
}
+void WasmJSFunctionData::WasmJSFunctionDataPrint(std::ostream& os) { // NOLINT
+ PrintHeader(os, "WasmJSFunctionData");
+ os << "\n - wrapper_code: " << Brief(wrapper_code());
+ os << "\n";
+}
+
void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "WasmModuleObject");
os << "\n - module: " << module();
@@ -1963,7 +1967,6 @@ void AccessorPair::AccessorPairPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-
void AccessCheckInfo::AccessCheckInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "AccessCheckInfo");
os << "\n - callback: " << Brief(callback());
@@ -1994,7 +1997,6 @@ void InterceptorInfo::InterceptorInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-
void FunctionTemplateInfo::FunctionTemplateInfoPrint(
std::ostream& os) { // NOLINT
PrintHeader(os, "FunctionTemplateInfo");
@@ -2006,7 +2008,6 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n - property_accessors: " << Brief(property_accessors());
os << "\n - signature: " << Brief(signature());
os << "\n - cached_property_name: " << Brief(cached_property_name());
- os << "\n - hidden_prototype: " << (hidden_prototype() ? "true" : "false");
os << "\n - undetectable: " << (undetectable() ? "true" : "false");
os << "\n - need_access_check: " << (needs_access_check() ? "true" : "false");
os << "\n - instantiated: " << (instantiated() ? "true" : "false");
@@ -2029,6 +2030,16 @@ void FunctionTemplateRareData::FunctionTemplateRareDataPrint(
os << "\n";
}
+void WasmCapiFunctionData::WasmCapiFunctionDataPrint(
+ std::ostream& os) { // NOLINT
+ PrintHeader(os, "WasmCapiFunctionData");
+ os << "\n - call_target: " << call_target();
+ os << "\n - embedder_data: " << embedder_data();
+ os << "\n - wrapper_code: " << Brief(wrapper_code());
+ os << "\n - serialized_signature: " << Brief(serialized_signature());
+ os << "\n";
+}
+
void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "ObjectTemplateInfo");
os << "\n - tag: " << Brief(tag());
@@ -2041,7 +2052,6 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-
void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
PrintHeader(os, "AllocationSite");
if (this->HasWeakNext()) os << "\n - weak_next: " << Brief(weak_next());
@@ -2057,7 +2067,7 @@ void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
if (!PointsToLiteral()) {
ElementsKind kind = GetElementsKind();
os << "Array allocation with ElementsKind " << ElementsKindToString(kind);
- } else if (boilerplate()->IsJSArray()) {
+ } else if (boilerplate().IsJSArray()) {
os << "Array literal with boilerplate " << Brief(boilerplate());
} else {
os << "Object literal with boilerplate " << Brief(boilerplate());
@@ -2065,18 +2075,16 @@ void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
os << "\n";
}
-
void AllocationMemento::AllocationMementoPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "AllocationMemento");
os << "\n - allocation site: ";
if (IsValid()) {
- GetAllocationSite()->AllocationSitePrint(os);
+ GetAllocationSite().AllocationSitePrint(os);
} else {
os << "<invalid>\n";
}
}
-
void Script::ScriptPrint(std::ostream& os) { // NOLINT
PrintHeader(os, "Script");
os << "\n - source: " << Brief(source());
@@ -2149,10 +2157,8 @@ void JSLocale::JSLocalePrint(std::ostream& os) { // NOLINT
void JSNumberFormat::JSNumberFormatPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, *this, "JSNumberFormat");
os << "\n - locale: " << Brief(locale());
- os << "\n - icu_number_format: " << Brief(icu_number_format());
+ os << "\n - icu_number_formatter: " << Brief(icu_number_formatter());
os << "\n - bound_format: " << Brief(bound_format());
- os << "\n - style: " << StyleAsString();
- os << "\n - currency_display: " << CurrencyDisplayAsString();
JSObjectPrintBody(os, *this);
}
@@ -2205,7 +2211,7 @@ void PrintScopeInfoList(ScopeInfo scope_info, std::ostream& os,
os << " {\n";
for (int i = nof_internal_slots; start < end; ++i, ++start) {
os << " - " << i << ": ";
- String::cast(scope_info->get(start))->ShortPrint(os);
+ String::cast(scope_info.get(start)).ShortPrint(os);
os << "\n";
}
os << " }";
@@ -2230,11 +2236,12 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
if (HasReceiver()) {
os << "\n - receiver: " << ReceiverVariableField::decode(flags);
}
+ if (HasClassBrand()) os << "\n - has class brand";
if (HasNewTarget()) os << "\n - needs new target";
if (HasFunctionName()) {
os << "\n - function name(" << FunctionVariableField::decode(flags)
<< "): ";
- FunctionName()->ShortPrint(os);
+ FunctionName().ShortPrint(os);
}
if (IsAsmModule()) os << "\n - asm module";
if (HasSimpleParameters()) os << "\n - simple parameters";
@@ -2271,7 +2278,7 @@ void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
os << "\n - original bytecode array: " << Brief(original_bytecode_array());
os << "\n - debug bytecode array: " << Brief(debug_bytecode_array());
os << "\n - break_points: ";
- break_points()->FixedArrayPrint(os);
+ break_points().FixedArrayPrint(os);
os << "\n - coverage_info: " << Brief(coverage_info());
}
@@ -2379,24 +2386,26 @@ void InterpreterData::InterpreterDataPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void MaybeObject::Print() {
+template <HeapObjectReferenceType kRefType, typename StorageType>
+void TaggedImpl<kRefType, StorageType>::Print() {
StdoutStream os;
this->Print(os);
os << std::flush;
}
-void MaybeObject::Print(std::ostream& os) {
+template <HeapObjectReferenceType kRefType, typename StorageType>
+void TaggedImpl<kRefType, StorageType>::Print(std::ostream& os) {
Smi smi;
HeapObject heap_object;
if (ToSmi(&smi)) {
- smi->SmiPrint(os);
+ smi.SmiPrint(os);
} else if (IsCleared()) {
os << "[cleared]";
} else if (GetHeapObjectIfWeak(&heap_object)) {
os << "[weak] ";
- heap_object->HeapObjectPrint(os);
+ heap_object.HeapObjectPrint(os);
} else if (GetHeapObjectIfStrong(&heap_object)) {
- heap_object->HeapObjectPrint(os);
+ heap_object.HeapObjectPrint(os);
} else {
UNREACHABLE();
}
@@ -2413,14 +2422,14 @@ void MutableHeapNumber::MutableHeapNumberPrint(std::ostream& os) {
// TODO(cbruni): remove once the new maptracer is in place.
void Name::NameShortPrint() {
if (this->IsString()) {
- PrintF("%s", String::cast(*this)->ToCString().get());
+ PrintF("%s", String::cast(*this).ToCString().get());
} else {
DCHECK(this->IsSymbol());
Symbol s = Symbol::cast(*this);
- if (s->name()->IsUndefined()) {
- PrintF("#<%s>", s->PrivateSymbolToName());
+ if (s.name().IsUndefined()) {
+ PrintF("#<%s>", s.PrivateSymbolToName());
} else {
- PrintF("<%s>", String::cast(s->name())->ToCString().get());
+ PrintF("<%s>", String::cast(s.name()).ToCString().get());
}
}
}
@@ -2428,14 +2437,14 @@ void Name::NameShortPrint() {
// TODO(cbruni): remove once the new maptracer is in place.
int Name::NameShortPrint(Vector<char> str) {
if (this->IsString()) {
- return SNPrintF(str, "%s", String::cast(*this)->ToCString().get());
+ return SNPrintF(str, "%s", String::cast(*this).ToCString().get());
} else {
DCHECK(this->IsSymbol());
Symbol s = Symbol::cast(*this);
- if (s->name()->IsUndefined()) {
- return SNPrintF(str, "#<%s>", s->PrivateSymbolToName());
+ if (s.name().IsUndefined()) {
+ return SNPrintF(str, "#<%s>", s.PrivateSymbolToName());
} else {
- return SNPrintF(str, "<%s>", String::cast(s->name())->ToCString().get());
+ return SNPrintF(str, "<%s>", String::cast(s.name()).ToCString().get());
}
}
}
@@ -2443,7 +2452,7 @@ int Name::NameShortPrint(Vector<char> str) {
void Map::PrintMapDetails(std::ostream& os) {
DisallowHeapAllocation no_gc;
this->MapPrint(os);
- instance_descriptors()->PrintDescriptors(os);
+ instance_descriptors().PrintDescriptors(os);
}
void Map::MapPrint(std::ostream& os) { // NOLINT
@@ -2499,7 +2508,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
<< Brief(instance_descriptors());
if (FLAG_unbox_double_fields) {
os << "\n - layout descriptor: ";
- layout_descriptor()->ShortPrint(os);
+ layout_descriptor().ShortPrint(os);
}
Isolate* isolate;
@@ -2535,9 +2544,9 @@ void DescriptorArray::PrintDescriptors(std::ostream& os) {
Name key = GetKey(i);
os << "\n [" << i << "]: ";
#ifdef OBJECT_PRINT
- key->NamePrint(os);
+ key.NamePrint(os);
#else
- key->ShortPrint(os);
+ key.ShortPrint(os);
#endif
os << " ";
PrintDescriptorDetails(os, i, PropertyDetails::kPrintFull);
@@ -2553,16 +2562,16 @@ void DescriptorArray::PrintDescriptorDetails(std::ostream& os, int descriptor,
switch (details.location()) {
case kField: {
FieldType field_type = GetFieldType(descriptor);
- field_type->PrintTo(os);
+ field_type.PrintTo(os);
break;
}
case kDescriptor:
Object value = GetStrongValue(descriptor);
os << Brief(value);
- if (value->IsAccessorPair()) {
+ if (value.IsAccessorPair()) {
AccessorPair pair = AccessorPair::cast(value);
- os << "(get: " << Brief(pair->getter())
- << ", set: " << Brief(pair->setter()) << ")";
+ os << "(get: " << Brief(pair.getter())
+ << ", set: " << Brief(pair.setter()) << ")";
}
break;
}
@@ -2588,12 +2597,12 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name key,
Map target) {
os << "\n ";
#ifdef OBJECT_PRINT
- key->NamePrint(os);
+ key.NamePrint(os);
#else
- key->ShortPrint(os);
+ key.ShortPrint(os);
#endif
os << ": ";
- ReadOnlyRoots roots = key->GetReadOnlyRoots();
+ ReadOnlyRoots roots = key.GetReadOnlyRoots();
if (key == roots.nonextensible_symbol()) {
os << "(transition to non-extensible)";
} else if (key == roots.sealed_symbol()) {
@@ -2601,17 +2610,17 @@ void TransitionsAccessor::PrintOneTransition(std::ostream& os, Name key,
} else if (key == roots.frozen_symbol()) {
os << "(transition to frozen)";
} else if (key == roots.elements_transition_symbol()) {
- os << "(transition to " << ElementsKindToString(target->elements_kind())
+ os << "(transition to " << ElementsKindToString(target.elements_kind())
<< ")";
} else if (key == roots.strict_function_transition_symbol()) {
os << " (transition to strict function)";
} else {
DCHECK(!IsSpecialTransition(roots, key));
os << "(transition to ";
- int descriptor = target->LastAdded();
- DescriptorArray descriptors = target->instance_descriptors();
- descriptors->PrintDescriptorDetails(os, descriptor,
- PropertyDetails::kForTransitions);
+ int descriptor = target.LastAdded();
+ DescriptorArray descriptors = target.instance_descriptors();
+ descriptors.PrintDescriptorDetails(os, descriptor,
+ PropertyDetails::kForTransitions);
os << ")";
}
os << " -> " << Brief(target);
@@ -2641,7 +2650,7 @@ void TransitionsAccessor::PrintTransitions(std::ostream& os) { // NOLINT
break;
}
case kFullTransitionArray:
- return transitions()->PrintInternal(os);
+ return transitions().PrintInternal(os);
}
}
@@ -2674,22 +2683,22 @@ void TransitionsAccessor::PrintTransitionTree(std::ostream& os, int level,
} else if (key == roots.frozen_symbol()) {
os << "to frozen";
} else if (key == roots.elements_transition_symbol()) {
- os << "to " << ElementsKindToString(target->elements_kind());
+ os << "to " << ElementsKindToString(target.elements_kind());
} else if (key == roots.strict_function_transition_symbol()) {
os << "to strict function";
} else {
#ifdef OBJECT_PRINT
- key->NamePrint(os);
+ key.NamePrint(os);
#else
- key->ShortPrint(os);
+ key.ShortPrint(os);
#endif
os << " ";
DCHECK(!IsSpecialTransition(ReadOnlyRoots(isolate_), key));
os << "to ";
- int descriptor = target->LastAdded();
- DescriptorArray descriptors = target->instance_descriptors();
- descriptors->PrintDescriptorDetails(os, descriptor,
- PropertyDetails::kForTransitions);
+ int descriptor = target.LastAdded();
+ DescriptorArray descriptors = target.instance_descriptors();
+ descriptors.PrintDescriptorDetails(os, descriptor,
+ PropertyDetails::kForTransitions);
}
TransitionsAccessor transitions(isolate_, target, no_gc);
transitions.PrintTransitionTree(os, level + 1, no_gc);
@@ -2734,19 +2743,22 @@ V8_EXPORT_PRIVATE extern i::Object _v8_internal_Get_Object(void* object) {
}
V8_EXPORT_PRIVATE extern void _v8_internal_Print_Object(void* object) {
- GetObjectFromRaw(object)->Print();
+ GetObjectFromRaw(object).Print();
}
V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
i::Address address = reinterpret_cast<i::Address>(object);
i::Isolate* isolate = i::Isolate::Current();
- i::wasm::WasmCode* wasm_code =
- isolate->wasm_engine()->code_manager()->LookupCode(address);
- if (wasm_code) {
- i::StdoutStream os;
- wasm_code->Disassemble(nullptr, os, address);
- return;
+ {
+ i::wasm::WasmCodeRefScope scope;
+ i::wasm::WasmCode* wasm_code =
+ isolate->wasm_engine()->code_manager()->LookupCode(address);
+ if (wasm_code) {
+ i::StdoutStream os;
+ wasm_code->Disassemble(nullptr, os, address);
+ return;
+ }
}
if (!isolate->heap()->InSpaceSlow(address, i::CODE_SPACE) &&
@@ -2760,25 +2772,25 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
}
i::Code code = isolate->FindCodeObject(address);
- if (!code->IsCode()) {
+ if (!code.IsCode()) {
i::PrintF("No code object found containing %p\n", object);
return;
}
#ifdef ENABLE_DISASSEMBLER
i::StdoutStream os;
- code->Disassemble(nullptr, os, address);
+ code.Disassemble(nullptr, os, address);
#else // ENABLE_DISASSEMBLER
- code->Print();
+ code.Print();
#endif // ENABLE_DISASSEMBLER
}
V8_EXPORT_PRIVATE extern void _v8_internal_Print_LayoutDescriptor(
void* object) {
i::Object o(GetObjectFromRaw(object));
- if (!o->IsLayoutDescriptor()) {
+ if (!o.IsLayoutDescriptor()) {
printf("Please provide a layout descriptor\n");
} else {
- i::LayoutDescriptor::cast(o)->Print();
+ i::LayoutDescriptor::cast(o).Print();
}
}
@@ -2789,7 +2801,7 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_StackTrace() {
V8_EXPORT_PRIVATE extern void _v8_internal_Print_TransitionTree(void* object) {
i::Object o(GetObjectFromRaw(object));
- if (!o->IsMap()) {
+ if (!o.IsMap()) {
printf("Please provide a valid Map\n");
} else {
#if defined(DEBUG) || defined(OBJECT_PRINT)
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/diagnostics/perf-jit.cc
index c30047abb3..57133964b0 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/diagnostics/perf-jit.cc
@@ -25,22 +25,23 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/perf-jit.h"
+#include "src/diagnostics/perf-jit.h"
#include <memory>
-#include "src/assembler.h"
-#include "src/eh-frame.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/source-position-table.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/source-position-table.h"
+#include "src/diagnostics/eh-frame.h"
+#include "src/objects/objects-inl.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/wasm-code-manager.h"
#if V8_OS_LINUX
#include <fcntl.h>
#include <sys/mman.h>
-#undef MAP_TYPE // jumbo: conflicts with v8::internal::InstanceType::MAP_TYPE
+// jumbo: conflicts with v8::internal::InstanceType::MAP_TYPE
+#undef MAP_TYPE // NOLINT
#include <unistd.h>
#endif // V8_OS_LINUX
@@ -128,7 +129,7 @@ void PerfJitLogger::OpenJitDumpFile() {
base::OS::GetCurrentProcessId());
CHECK_NE(size, -1);
- int fd = open(perf_dump_name.start(), O_CREAT | O_TRUNC | O_RDWR, 0666);
+ int fd = open(perf_dump_name.begin(), O_CREAT | O_TRUNC | O_RDWR, 0666);
if (fd == -1) return;
marker_address_ = OpenMarkerFile(fd);
@@ -200,8 +201,8 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode abstract_code,
SharedFunctionInfo shared,
const char* name, int length) {
if (FLAG_perf_basic_prof_only_functions &&
- (abstract_code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
- abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
+ (abstract_code.kind() != AbstractCode::INTERPRETED_FUNCTION &&
+ abstract_code.kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
return;
}
@@ -210,25 +211,25 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode abstract_code,
if (perf_output_handle_ == nullptr) return;
// We only support non-interpreted functions.
- if (!abstract_code->IsCode()) return;
- Code code = abstract_code->GetCode();
- DCHECK(code->raw_instruction_start() == code->address() + Code::kHeaderSize);
+ if (!abstract_code.IsCode()) return;
+ Code code = abstract_code.GetCode();
+ DCHECK(code.raw_instruction_start() == code.address() + Code::kHeaderSize);
// Debug info has to be emitted first.
if (FLAG_perf_prof && !shared.is_null()) {
// TODO(herhut): This currently breaks for js2wasm/wasm2js functions.
- if (code->kind() != Code::JS_TO_WASM_FUNCTION &&
- code->kind() != Code::WASM_TO_JS_FUNCTION) {
+ if (code.kind() != Code::JS_TO_WASM_FUNCTION &&
+ code.kind() != Code::WASM_TO_JS_FUNCTION) {
LogWriteDebugInfo(code, shared);
}
}
const char* code_name = name;
- uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->InstructionStart());
+ uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code.InstructionStart());
// Code generated by Turbofan will have the safepoint table directly after
// instructions. There is no need to record the safepoint table itself.
- uint32_t code_size = code->ExecutableInstructionSize();
+ uint32_t code_size = code.ExecutableInstructionSize();
// Unwinding info comes right after debug info.
if (FLAG_perf_prof_unwinding_info) LogWriteUnwindingInfo(code);
@@ -242,7 +243,7 @@ void PerfJitLogger::LogRecordedBuffer(const wasm::WasmCode* code,
if (perf_output_handle_ == nullptr) return;
- WriteJitCodeLoadEntry(code->instructions().start(),
+ WriteJitCodeLoadEntry(code->instructions().begin(),
code->instructions().length(), name, length);
}
@@ -280,11 +281,11 @@ constexpr size_t kUnknownScriptNameStringLen =
size_t GetScriptNameLength(const SourcePositionInfo& info) {
if (!info.script.is_null()) {
Object name_or_url = info.script->GetNameOrSourceURL();
- if (name_or_url->IsString()) {
+ if (name_or_url.IsString()) {
String str = String::cast(name_or_url);
- if (str->IsOneByteRepresentation()) return str->length();
+ if (str.IsOneByteRepresentation()) return str.length();
int length;
- str->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
+ str.ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
return static_cast<size_t>(length);
}
}
@@ -296,15 +297,14 @@ Vector<const char> GetScriptName(const SourcePositionInfo& info,
const DisallowHeapAllocation& no_gc) {
if (!info.script.is_null()) {
Object name_or_url = info.script->GetNameOrSourceURL();
- if (name_or_url->IsSeqOneByteString()) {
+ if (name_or_url.IsSeqOneByteString()) {
SeqOneByteString str = SeqOneByteString::cast(name_or_url);
- return {reinterpret_cast<char*>(str->GetChars(no_gc)),
- static_cast<size_t>(str->length())};
- } else if (name_or_url->IsString()) {
+ return {reinterpret_cast<char*>(str.GetChars(no_gc)),
+ static_cast<size_t>(str.length())};
+ } else if (name_or_url.IsString()) {
int length;
- *storage =
- String::cast(name_or_url)
- ->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
+ *storage = String::cast(name_or_url)
+ .ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
return {storage->get(), static_cast<size_t>(length)};
}
}
@@ -327,21 +327,21 @@ SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
void PerfJitLogger::LogWriteDebugInfo(Code code, SharedFunctionInfo shared) {
// Compute the entry count and get the name of the script.
uint32_t entry_count = 0;
- for (SourcePositionTableIterator iterator(code->SourcePositionTable());
+ for (SourcePositionTableIterator iterator(code.SourcePositionTable());
!iterator.done(); iterator.Advance()) {
entry_count++;
}
if (entry_count == 0) return;
// The WasmToJS wrapper stubs have source position entries.
- if (!shared->HasSourceCode()) return;
- Isolate* isolate = shared->GetIsolate();
- Handle<Script> script(Script::cast(shared->script()), isolate);
+ if (!shared.HasSourceCode()) return;
+ Isolate* isolate = shared.GetIsolate();
+ Handle<Script> script(Script::cast(shared.script()), isolate);
PerfJitCodeDebugInfo debug_info;
debug_info.event_ = PerfJitCodeLoad::kDebugInfo;
debug_info.time_stamp_ = GetTimestamp();
- debug_info.address_ = code->InstructionStart();
+ debug_info.address_ = code.InstructionStart();
debug_info.entry_count_ = entry_count;
uint32_t size = sizeof(debug_info);
@@ -351,7 +351,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code code, SharedFunctionInfo shared) {
Handle<Code> code_handle(code, isolate);
Handle<SharedFunctionInfo> function_handle(shared, isolate);
- for (SourcePositionTableIterator iterator(code->SourcePositionTable());
+ for (SourcePositionTableIterator iterator(code.SourcePositionTable());
!iterator.done(); iterator.Advance()) {
SourcePositionInfo info(GetSourcePositionInfo(code_handle, function_handle,
iterator.source_position()));
@@ -362,9 +362,9 @@ void PerfJitLogger::LogWriteDebugInfo(Code code, SharedFunctionInfo shared) {
debug_info.size_ = size + padding;
LogWriteBytes(reinterpret_cast<const char*>(&debug_info), sizeof(debug_info));
- Address code_start = code->InstructionStart();
+ Address code_start = code.InstructionStart();
- for (SourcePositionTableIterator iterator(code->SourcePositionTable());
+ for (SourcePositionTableIterator iterator(code.SourcePositionTable());
!iterator.done(); iterator.Advance()) {
SourcePositionInfo info(GetSourcePositionInfo(code_handle, function_handle,
iterator.source_position()));
@@ -380,7 +380,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code code, SharedFunctionInfo shared) {
DisallowHeapAllocation no_gc;
std::unique_ptr<char[]> name_storage;
Vector<const char> name_string = GetScriptName(info, &name_storage, no_gc);
- LogWriteBytes(name_string.start(),
+ LogWriteBytes(name_string.begin(),
static_cast<uint32_t>(name_string.size()) + 1);
}
char padding_bytes[8] = {0};
@@ -393,8 +393,8 @@ void PerfJitLogger::LogWriteUnwindingInfo(Code code) {
unwinding_info_header.time_stamp_ = GetTimestamp();
unwinding_info_header.eh_frame_hdr_size_ = EhFrameConstants::kEhFrameHdrSize;
- if (code->has_unwinding_info()) {
- unwinding_info_header.unwinding_size_ = code->unwinding_info_size();
+ if (code.has_unwinding_info()) {
+ unwinding_info_header.unwinding_size_ = code.unwinding_info_size();
unwinding_info_header.mapped_size_ = unwinding_info_header.unwinding_size_;
} else {
unwinding_info_header.unwinding_size_ = EhFrameConstants::kEhFrameHdrSize;
@@ -409,9 +409,9 @@ void PerfJitLogger::LogWriteUnwindingInfo(Code code) {
LogWriteBytes(reinterpret_cast<const char*>(&unwinding_info_header),
sizeof(unwinding_info_header));
- if (code->has_unwinding_info()) {
- LogWriteBytes(reinterpret_cast<const char*>(code->unwinding_info_start()),
- code->unwinding_info_size());
+ if (code.has_unwinding_info()) {
+ LogWriteBytes(reinterpret_cast<const char*>(code.unwinding_info_start()),
+ code.unwinding_info_size());
} else {
OFStream perf_output_stream(perf_output_handle_);
EhFrameWriter::WriteEmptyEhFrame(perf_output_stream);
@@ -425,7 +425,7 @@ void PerfJitLogger::LogWriteUnwindingInfo(Code code) {
void PerfJitLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
// We may receive a CodeMove event if a BytecodeArray object moves. Otherwise
// code relocation is not supported.
- CHECK(from->IsBytecodeArray());
+ CHECK(from.IsBytecodeArray());
}
void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
diff --git a/deps/v8/src/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h
index ac058c1660..492a550976 100644
--- a/deps/v8/src/perf-jit.h
+++ b/deps/v8/src/diagnostics/perf-jit.h
@@ -25,10 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_PERF_JIT_H_
-#define V8_PERF_JIT_H_
+#ifndef V8_DIAGNOSTICS_PERF_JIT_H_
+#define V8_DIAGNOSTICS_PERF_JIT_H_
-#include "src/log.h"
+#include "src/logging/log.h"
namespace v8 {
namespace internal {
@@ -144,4 +144,4 @@ class PerfJitLogger : public CodeEventLogger {
} // namespace internal
} // namespace v8
-#endif // V8_PERF_JIT_H_
+#endif // V8_DIAGNOSTICS_PERF_JIT_H_
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
index f736f804c0..e7d26858e5 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -22,7 +22,6 @@
// of code into a FILE*, meaning that the above functionality could also be
// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
-
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
@@ -31,10 +30,10 @@
#if V8_TARGET_ARCH_PPC
#include "src/base/platform/platform.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/ppc/constants-ppc.h"
-#include "src/register-configuration.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/ppc/constants-ppc.h"
+#include "src/codegen/register-configuration.h"
+#include "src/diagnostics/disasm.h"
namespace v8 {
namespace internal {
@@ -89,16 +88,13 @@ class Decoder {
DISALLOW_COPY_AND_ASSIGN(Decoder);
};
-
// Support for assertions in the Decoder formatting functions.
#define STRING_STARTS_WITH(string, compare_string) \
(strncmp(string, compare_string, strlen(compare_string)) == 0)
-
// Append the ch to the output buffer.
void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
-
// Append the str to the output buffer.
void Decoder::Print(const char* str) {
char cur = *str++;
@@ -109,19 +105,16 @@ void Decoder::Print(const char* str) {
out_buffer_[out_buffer_pos_] = 0;
}
-
// Print the register name according to the active name converter.
void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
-
// Print the double FP register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(RegisterName(DoubleRegister::from_code(reg)));
}
-
// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
// the FormatOption method.
void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
@@ -143,7 +136,6 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
}
}
-
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
@@ -166,7 +158,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
UNREACHABLE();
}
-
// Handle all FP register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatFPRegister(Instruction* instr, const char* format) {
@@ -191,7 +182,6 @@ int Decoder::FormatFPRegister(Instruction* instr, const char* format) {
return retval;
}
-
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
@@ -318,14 +308,12 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
#endif
default: {
UNREACHABLE();
- break;
}
}
UNREACHABLE();
}
-
// Format takes a formatting string for a whole instruction and prints it into
// the output buffer. All escaped options are handed to FormatOption to be
// parsed further.
@@ -342,7 +330,6 @@ void Decoder::Format(Instruction* instr, const char* format) {
out_buffer_[out_buffer_pos_] = '\0';
}
-
// The disassembler may end up decoding data inlined in the code. We do not want
// it to crash if the data does not resemble any known instruction.
#define VERIFY(condition) \
@@ -351,12 +338,10 @@ void Decoder::Format(Instruction* instr, const char* format) {
return; \
}
-
// For currently unimplemented decodings the disassembler calls Unknown(instr)
// which will just print "unknown" of the instruction bits.
void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
-
// For currently unimplemented decodings the disassembler calls
// UnknownFormat(instr) which will just print opcode name of the
// instruction bits.
@@ -366,7 +351,6 @@ void Decoder::UnknownFormat(Instruction* instr, const char* name) {
Format(instr, buffer);
}
-
void Decoder::DecodeExt1(Instruction* instr) {
switch (EXT1 | (instr->BitField(10, 1))) {
case MCRF: {
@@ -485,7 +469,9 @@ void Decoder::DecodeExt1(Instruction* instr) {
}
break;
}
- default: { UNREACHABLE(); }
+ default: {
+ UNREACHABLE();
+ }
}
break;
}
@@ -535,7 +521,6 @@ void Decoder::DecodeExt1(Instruction* instr) {
}
}
-
void Decoder::DecodeExt2(Instruction* instr) {
// Some encodings are 10-1 bits, handle those first
switch (EXT2 | (instr->BitField(10, 1))) {
@@ -946,7 +931,6 @@ void Decoder::DecodeExt2(Instruction* instr) {
}
}
-
void Decoder::DecodeExt3(Instruction* instr) {
switch (EXT3 | (instr->BitField(10, 1))) {
case FCFID: {
@@ -963,7 +947,6 @@ void Decoder::DecodeExt3(Instruction* instr) {
}
}
-
void Decoder::DecodeExt4(Instruction* instr) {
switch (EXT4 | (instr->BitField(5, 1))) {
case FDIV: {
@@ -1099,7 +1082,6 @@ void Decoder::DecodeExt4(Instruction* instr) {
}
}
-
void Decoder::DecodeExt5(Instruction* instr) {
switch (EXT5 | (instr->BitField(4, 2))) {
case RLDICL: {
@@ -1132,7 +1114,7 @@ void Decoder::DecodeExt6(Instruction* instr) {
switch (EXT6 | (instr->BitField(10, 3))) {
#define DECODE_XX3_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: { \
- Format(instr, #name" 'Dt, 'Da, 'Db"); \
+ Format(instr, #name " 'Dt, 'Da, 'Db"); \
return; \
}
PPC_XX3_OPCODE_LIST(DECODE_XX3_INSTRUCTIONS)
@@ -1141,16 +1123,16 @@ void Decoder::DecodeExt6(Instruction* instr) {
switch (EXT6 | (instr->BitField(10, 2))) {
#define DECODE_XX2_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: { \
- Format(instr, #name" 'Dt, 'Db"); \
+ Format(instr, #name " 'Dt, 'Db"); \
return; \
}
PPC_XX2_OPCODE_LIST(DECODE_XX2_INSTRUCTIONS)
}
-#undef DECODE_XX3_INSTRUCTIONS
+#undef DECODE_XX2_INSTRUCTIONS
Unknown(instr); // not used by V8
}
-#undef VERIFIY
+#undef VERIFY
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
@@ -1477,23 +1459,19 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
} // namespace internal
} // namespace v8
-
//------------------------------------------------------------------------------
namespace disasm {
-
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
- return tmp_buffer_.start();
+ return tmp_buffer_.begin();
}
-
const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
-
const char* NameConverter::NameOfCPURegister(int reg) const {
return RegisterName(i::Register::from_code(reg));
}
@@ -1502,7 +1480,6 @@ const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // PPC does not have the concept of a byte register
}
-
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // PPC does not have any XMM registers
}
@@ -1513,7 +1490,6 @@ const char* NameConverter::NameInCode(byte* addr) const {
return "";
}
-
//------------------------------------------------------------------------------
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
@@ -1522,7 +1498,6 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return d.InstructionDecode(instruction);
}
-
// The PPC assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
@@ -1536,10 +1511,11 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
- *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.begin());
}
}
+#undef STRING_STARTS_WITH
} // namespace disasm
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/diagnostics/s390/disasm-s390.cc
index f11f441c8a..9b10e236ce 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/diagnostics/s390/disasm-s390.cc
@@ -30,10 +30,10 @@
#if V8_TARGET_ARCH_S390
#include "src/base/platform/platform.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/register-configuration.h"
-#include "src/s390/constants-s390.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/s390/constants-s390.h"
+#include "src/diagnostics/disasm.h"
namespace v8 {
namespace internal {
@@ -210,6 +210,11 @@ int Decoder::FormatFloatingRegister(Instruction* instr, const char* format) {
int reg = rreinstr->R2Value();
PrintDRegister(reg);
return 2;
+ } else if (format[1] == '4') {
+ VRR_E_Instruction* vrreinstr = reinterpret_cast<VRR_E_Instruction*>(instr);
+ int reg = vrreinstr->R4Value();
+ PrintDRegister(reg);
+ return 2;
}
UNREACHABLE();
}
@@ -289,7 +294,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
default: {
UNREACHABLE();
- break;
}
}
@@ -311,8 +315,19 @@ int Decoder::FormatMask(Instruction* instr, const char* format) {
value = reinterpret_cast<RRFInstruction*>(instr)->M4Value();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
return 2;
+ } else if (format[1] == '4') { // mask format in bits 32-35
+ value = reinterpret_cast<VRR_C_Instruction*>(instr)->M4Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
+ return 2;
+ } else if (format[1] == '5') { // mask format in bits 28-31
+ value = reinterpret_cast<VRR_C_Instruction*>(instr)->M5Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
+ return 2;
+ } else if (format[1] == '6') { // mask format in bits 24-27
+ value = reinterpret_cast<VRR_C_Instruction*>(instr)->M6Value();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", value);
+ return 2;
}
-
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
return 2;
}
@@ -683,210 +698,273 @@ bool Decoder::DecodeSpecial(Instruction* instr) {
bool Decoder::DecodeGeneric(Instruction* instr) {
Opcode opcode = instr->S390OpcodeValue();
switch (opcode) {
- /* 2 bytes */
+ /* 2 bytes */
#define DECODE_RR_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'r1,'r2"); \
break;
- S390_RR_OPCODE_LIST(DECODE_RR_INSTRUCTIONS)
+ S390_RR_OPCODE_LIST(DECODE_RR_INSTRUCTIONS)
#undef DECODE_RR_INSTRUCTIONS
- /* 4 bytes */
-#define DECODE_RS_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'r1,'r2,'d1('r3)"); \
- break;
- S390_RS_A_OPCODE_LIST(DECODE_RS_A_INSTRUCTIONS)
+ /* 4 bytes */
+#define DECODE_RS_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'r2,'d1('r3)"); \
+ break;
+ S390_RS_A_OPCODE_LIST(DECODE_RS_A_INSTRUCTIONS)
#undef DECODE_RS_A_INSTRUCTIONS
-#define DECODE_RSI_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'r1,'r2,'i4"); \
- break;
- S390_RSI_OPCODE_LIST(DECODE_RSI_INSTRUCTIONS)
+#define DECODE_RSI_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'r2,'i4"); \
+ break;
+ S390_RSI_OPCODE_LIST(DECODE_RSI_INSTRUCTIONS)
#undef DECODE_RSI_INSTRUCTIONS
-#define DECODE_RI_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'r1,'i1"); \
- break;
- S390_RI_A_OPCODE_LIST(DECODE_RI_A_INSTRUCTIONS)
+#define DECODE_RI_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'i1"); \
+ break;
+ S390_RI_A_OPCODE_LIST(DECODE_RI_A_INSTRUCTIONS)
#undef DECODE_RI_A_INSTRUCTIONS
-#define DECODE_RI_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'r1,'i4"); \
- break;
- S390_RI_B_OPCODE_LIST(DECODE_RI_B_INSTRUCTIONS)
+#define DECODE_RI_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'i4"); \
+ break;
+ S390_RI_B_OPCODE_LIST(DECODE_RI_B_INSTRUCTIONS)
#undef DECODE_RI_B_INSTRUCTIONS
-#define DECODE_RI_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'm1,'i4"); \
- break;
- S390_RI_C_OPCODE_LIST(DECODE_RI_C_INSTRUCTIONS)
+#define DECODE_RI_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'm1,'i4"); \
+ break;
+ S390_RI_C_OPCODE_LIST(DECODE_RI_C_INSTRUCTIONS)
#undef DECODE_RI_C_INSTRUCTIONS
-#define DECODE_RRE_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'r5,'r6"); \
- break;
- S390_RRE_OPCODE_LIST(DECODE_RRE_INSTRUCTIONS)
+#define DECODE_RRE_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r5,'r6"); \
+ break;
+ S390_RRE_OPCODE_LIST(DECODE_RRE_INSTRUCTIONS)
#undef DECODE_RRE_INSTRUCTIONS
-#define DECODE_RRF_A_INSTRUCTIONS(name, opcode_name, opcode_val) \
- case opcode_name: \
- Format(instr, #name "\t'r5,'r6,'r3"); \
- break;
- S390_RRF_A_OPCODE_LIST(DECODE_RRF_A_INSTRUCTIONS)
+#define DECODE_RRF_A_INSTRUCTIONS(name, opcode_name, opcode_val) \
+ case opcode_name: \
+ Format(instr, #name "\t'r5,'r6,'r3"); \
+ break;
+ S390_RRF_A_OPCODE_LIST(DECODE_RRF_A_INSTRUCTIONS)
#undef DECODE_RRF_A_INSTRUCTIONS
-#define DECODE_RRF_C_INSTRUCTIONS(name, opcode_name, opcode_val) \
- case opcode_name: \
- Format(instr, #name "\t'r5,'r6,'m2"); \
- break;
- S390_RRF_C_OPCODE_LIST(DECODE_RRF_C_INSTRUCTIONS)
+#define DECODE_RRF_C_INSTRUCTIONS(name, opcode_name, opcode_val) \
+ case opcode_name: \
+ Format(instr, #name "\t'r5,'r6,'m2"); \
+ break;
+ S390_RRF_C_OPCODE_LIST(DECODE_RRF_C_INSTRUCTIONS)
#undef DECODE_RRF_C_INSTRUCTIONS
-#define DECODE_RRF_E_INSTRUCTIONS(name, opcode_name, opcode_val) \
- case opcode_name: \
- Format(instr, #name "\t'r5,'m2,'f6"); \
- break;
- S390_RRF_E_OPCODE_LIST(DECODE_RRF_E_INSTRUCTIONS)
+#define DECODE_RRF_E_INSTRUCTIONS(name, opcode_name, opcode_val) \
+ case opcode_name: \
+ Format(instr, #name "\t'r5,'m2,'f6"); \
+ break;
+ S390_RRF_E_OPCODE_LIST(DECODE_RRF_E_INSTRUCTIONS)
#undef DECODE_RRF_E_INSTRUCTIONS
-#define DECODE_RX_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'r1,'d1('r2d,'r3)"); \
- break;
- S390_RX_A_OPCODE_LIST(DECODE_RX_A_INSTRUCTIONS)
+#define DECODE_RX_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'d1('r2d,'r3)"); \
+ break;
+ S390_RX_A_OPCODE_LIST(DECODE_RX_A_INSTRUCTIONS)
#undef DECODE_RX_A_INSTRUCTIONS
-#define DECODE_RX_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'm1,'d1('r2d,'r3)"); \
- break;
- S390_RX_B_OPCODE_LIST(DECODE_RX_B_INSTRUCTIONS)
+#define DECODE_RX_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'm1,'d1('r2d,'r3)"); \
+ break;
+ S390_RX_B_OPCODE_LIST(DECODE_RX_B_INSTRUCTIONS)
#undef DECODE_RX_B_INSTRUCTIONS
-#define DECODE_RRD_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'f3,'f5,'f6"); \
- break;
- S390_RRD_OPCODE_LIST(DECODE_RRD_INSTRUCTIONS)
+#define DECODE_RRD_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f3,'f5,'f6"); \
+ break;
+ S390_RRD_OPCODE_LIST(DECODE_RRD_INSTRUCTIONS)
#undef DECODE_RRD_INSTRUCTIONS
-#define DECODE_SI_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'd1('r3),'i8"); \
- break;
- S390_SI_OPCODE_LIST(DECODE_SI_INSTRUCTIONS)
+#define DECODE_SI_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'd1('r3),'i8"); \
+ break;
+ S390_SI_OPCODE_LIST(DECODE_SI_INSTRUCTIONS)
#undef DECODE_SI_INSTRUCTIONS
- /* 6 bytes */
+ /* 6 bytes */
+#define DECODE_VRR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'f2,'m4,'m5,'m6"); \
+ break;
+ S390_VRR_A_OPCODE_LIST(DECODE_VRR_A_INSTRUCTIONS)
+#undef DECODE_VRR_A_INSTRUCTIONS
+
+#define DECODE_VRR_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'f2,'f3,'m4,'m6"); \
+ break;
+ S390_VRR_B_OPCODE_LIST(DECODE_VRR_B_INSTRUCTIONS)
+#undef DECODE_VRR_B_INSTRUCTIONS
+
#define DECODE_VRR_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
- Format(instr, #name "\t'f1,'f2,'f3"); \
+ Format(instr, #name "\t'f1,'f2,'f3,'m4"); \
break;
- S390_VRR_C_OPCODE_LIST(DECODE_VRR_C_INSTRUCTIONS)
+ S390_VRR_C_OPCODE_LIST(DECODE_VRR_C_INSTRUCTIONS)
#undef DECODE_VRR_C_INSTRUCTIONS
+#define DECODE_VRR_E_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'f2,'f3,'f4,'m5,'m3"); \
+ break;
+ S390_VRR_E_OPCODE_LIST(DECODE_VRR_E_INSTRUCTIONS)
+#undef DECODE_VRR_E_INSTRUCTIONS
+
+#define DECODE_VRX_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'d1('r2d,'r3),'m4"); \
+ break;
+ S390_VRX_OPCODE_LIST(DECODE_VRX_INSTRUCTIONS)
+#undef DECODE_VRX_INSTRUCTIONS
+
+#define DECODE_VRS_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'f2,'d1('r3),'m4"); \
+ break;
+ S390_VRS_A_OPCODE_LIST(DECODE_VRS_A_INSTRUCTIONS)
+#undef DECODE_VRS_A_INSTRUCTIONS
+
+#define DECODE_VRS_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'r2,'d1('r3),'m4"); \
+ break;
+ S390_VRS_B_OPCODE_LIST(DECODE_VRS_B_INSTRUCTIONS)
+#undef DECODE_VRS_B_INSTRUCTIONS
+
+#define DECODE_VRS_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'r1,'f2,'d1('r3),'m4"); \
+ break;
+ S390_VRS_C_OPCODE_LIST(DECODE_VRS_C_INSTRUCTIONS)
+#undef DECODE_VRS_C_INSTRUCTIONS
+
+#define DECODE_VRI_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'i1,'m4"); \
+ break;
+ S390_VRI_A_OPCODE_LIST(DECODE_VRI_A_INSTRUCTIONS)
+#undef DECODE_VRI_A_INSTRUCTIONS
+
+#define DECODE_VRI_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'f2,'i1,'m4"); \
+ break;
+ S390_VRI_C_OPCODE_LIST(DECODE_VRI_C_INSTRUCTIONS)
+#undef DECODE_VRI_C_INSTRUCTIONS
+
#define DECODE_RIL_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'r1,'i7"); \
break;
- S390_RIL_A_OPCODE_LIST(DECODE_RIL_A_INSTRUCTIONS)
+ S390_RIL_A_OPCODE_LIST(DECODE_RIL_A_INSTRUCTIONS)
#undef DECODE_RIL_A_INSTRUCTIONS
#define DECODE_RIL_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'r1,'ie"); \
break;
- S390_RIL_B_OPCODE_LIST(DECODE_RIL_B_INSTRUCTIONS)
+ S390_RIL_B_OPCODE_LIST(DECODE_RIL_B_INSTRUCTIONS)
#undef DECODE_RIL_B_INSTRUCTIONS
#define DECODE_RIL_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'm1,'ie"); \
break;
- S390_RIL_C_OPCODE_LIST(DECODE_RIL_C_INSTRUCTIONS)
+ S390_RIL_C_OPCODE_LIST(DECODE_RIL_C_INSTRUCTIONS)
#undef DECODE_RIL_C_INSTRUCTIONS
-#define DECODE_SIY_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'd2('r3),'i8"); \
+#define DECODE_SIY_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'd2('r3),'i8"); \
break;
- S390_SIY_OPCODE_LIST(DECODE_SIY_INSTRUCTIONS)
+ S390_SIY_OPCODE_LIST(DECODE_SIY_INSTRUCTIONS)
#undef DECODE_SIY_INSTRUCTIONS
#define DECODE_RIE_D_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'r1,'r2,'i1"); \
break;
- S390_RIE_D_OPCODE_LIST(DECODE_RIE_D_INSTRUCTIONS)
+ S390_RIE_D_OPCODE_LIST(DECODE_RIE_D_INSTRUCTIONS)
#undef DECODE_RIE_D_INSTRUCTIONS
#define DECODE_RIE_E_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'r1,'r2,'i4"); \
break;
- S390_RIE_E_OPCODE_LIST(DECODE_RIE_E_INSTRUCTIONS)
+ S390_RIE_E_OPCODE_LIST(DECODE_RIE_E_INSTRUCTIONS)
#undef DECODE_RIE_E_INSTRUCTIONS
#define DECODE_RIE_F_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'r1,'r2,'i9,'ia,'ib"); \
break;
- S390_RIE_F_OPCODE_LIST(DECODE_RIE_F_INSTRUCTIONS)
+ S390_RIE_F_OPCODE_LIST(DECODE_RIE_F_INSTRUCTIONS)
#undef DECODE_RIE_F_INSTRUCTIONS
#define DECODE_RSY_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'r1,'r2,'d2('r3)"); \
break;
- S390_RSY_A_OPCODE_LIST(DECODE_RSY_A_INSTRUCTIONS)
+ S390_RSY_A_OPCODE_LIST(DECODE_RSY_A_INSTRUCTIONS)
#undef DECODE_RSY_A_INSTRUCTIONS
#define DECODE_RSY_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'm2,'r1,'d2('r3)"); \
break;
- S390_RSY_B_OPCODE_LIST(DECODE_RSY_B_INSTRUCTIONS)
+ S390_RSY_B_OPCODE_LIST(DECODE_RSY_B_INSTRUCTIONS)
#undef DECODE_RSY_B_INSTRUCTIONS
#define DECODE_RXY_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'r1,'d2('r2d,'r3)"); \
break;
- S390_RXY_A_OPCODE_LIST(DECODE_RXY_A_INSTRUCTIONS)
+ S390_RXY_A_OPCODE_LIST(DECODE_RXY_A_INSTRUCTIONS)
#undef DECODE_RXY_A_INSTRUCTIONS
#define DECODE_RXY_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: \
Format(instr, #name "\t'm1,'d2('r2d,'r3)"); \
break;
- S390_RXY_B_OPCODE_LIST(DECODE_RXY_B_INSTRUCTIONS)
+ S390_RXY_B_OPCODE_LIST(DECODE_RXY_B_INSTRUCTIONS)
#undef DECODE_RXY_B_INSTRUCTIONS
-#define DECODE_RXE_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'f1,'d1('r2d, 'r3)"); \
+#define DECODE_RXE_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'d1('r2d, 'r3)"); \
break;
- S390_RXE_OPCODE_LIST(DECODE_RXE_INSTRUCTIONS)
+ S390_RXE_OPCODE_LIST(DECODE_RXE_INSTRUCTIONS)
#undef DECODE_RXE_INSTRUCTIONS
-#define DECODE_SIL_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'd3('r3),'id"); \
+#define DECODE_SIL_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'd3('r3),'id"); \
break;
- S390_SIL_OPCODE_LIST(DECODE_SIL_INSTRUCTIONS)
+ S390_SIL_OPCODE_LIST(DECODE_SIL_INSTRUCTIONS)
#undef DECODE_SIL_INSTRUCTIONS
-#define DECODE_SS_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: \
- Format(instr, #name "\t'd3('i8,'r3),'d4('r7)"); \
+#define DECODE_SS_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'd3('i8,'r3),'d4('r7)"); \
break;
- S390_SS_A_OPCODE_LIST(DECODE_SS_A_INSTRUCTIONS)
+ S390_SS_A_OPCODE_LIST(DECODE_SS_A_INSTRUCTIONS)
#undef DECODE_SS_A_INSTRUCTIONS
default:
@@ -902,21 +980,21 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
// Print the Instruction bits.
if (instrLength == 2) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%04x ", instr->InstructionBits<TwoByteInstr>());
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%04x ",
+ instr->InstructionBits<TwoByteInstr>());
} else if (instrLength == 4) {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%08x ", instr->InstructionBits<FourByteInstr>());
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
+ instr->InstructionBits<FourByteInstr>());
} else {
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "%012" PRIx64 " ", instr->InstructionBits<SixByteInstr>());
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%012" PRIx64 " ",
+ instr->InstructionBits<SixByteInstr>());
}
bool decoded = DecodeSpecial(instr);
- if (!decoded)
- decoded = DecodeGeneric(instr);
- if (!decoded)
- Unknown(instr);
+ if (!decoded) decoded = DecodeGeneric(instr);
+ if (!decoded) Unknown(instr);
return instrLength;
}
@@ -929,7 +1007,7 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
- return tmp_buffer_.start();
+ return tmp_buffer_.begin();
}
const char* NameConverter::NameOfConstant(byte* addr) const {
@@ -977,7 +1055,7 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
- *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.begin());
}
}
diff --git a/deps/v8/src/unwinder.cc b/deps/v8/src/diagnostics/unwinder.cc
index c3fcd1a6ae..84097c288f 100644
--- a/deps/v8/src/unwinder.cc
+++ b/deps/v8/src/diagnostics/unwinder.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "include/v8.h"
-#include "src/frame-constants.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
@@ -88,6 +88,9 @@ bool Unwinder::TryUnwindV8Frames(const UnwindState& unwind_state,
register_state->fp = final_fp;
register_state->pc = next_pc;
+
+ // Link register no longer valid after unwinding.
+ register_state->lr = nullptr;
return true;
}
return false;
diff --git a/deps/v8/src/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 2bec0a06f7..8fb01dba9a 100644
--- a/deps/v8/src/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -2,44 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/unwinding-info-win64.h"
+#include "src/diagnostics/unwinding-info-win64.h"
#if defined(V8_OS_WIN_X64)
-#include "src/allocation.h"
-#include "src/macro-assembler.h"
-#include "src/x64/assembler-x64.h"
-
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/x64/assembler-x64.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -62,23 +31,23 @@ bool RegisterUnwindInfoForExceptionHandlingOnly() {
* From Windows SDK ehdata.h, which does not compile with Clang.
* See https://msdn.microsoft.com/en-us/library/ddssxxy8.aspx.
*/
-typedef union _UNWIND_CODE {
+union UNWIND_CODE {
struct {
unsigned char CodeOffset;
unsigned char UnwindOp : 4;
unsigned char OpInfo : 4;
};
uint16_t FrameOffset;
-} UNWIND_CODE, *PUNWIND_CODE;
+};
-typedef struct _UNWIND_INFO {
+struct UNWIND_INFO {
unsigned char Version : 3;
unsigned char Flags : 5;
unsigned char SizeOfProlog;
unsigned char CountOfCodes;
unsigned char FrameRegister : 4;
unsigned char FrameOffset : 4;
-} UNWIND_INFO, *PUNWIND_INFO;
+};
struct V8UnwindData {
UNWIND_INFO unwind_info;
@@ -131,7 +100,7 @@ void SetUnhandledExceptionCallback(
// part of the registration of unwinding info. It is referenced by
// RegisterNonABICompliantCodeRange(), below, and by the unwinding info for
// builtins declared in the embedded blob.
-extern "C" int CRASH_HANDLER_FUNCTION_NAME(
+extern "C" __declspec(dllexport) int CRASH_HANDLER_FUNCTION_NAME(
PEXCEPTION_RECORD ExceptionRecord, ULONG64 EstablisherFrame,
PCONTEXT ContextRecord, PDISPATCHER_CONTEXT DispatcherContext) {
if (unhandled_exception_callback_g != nullptr) {
@@ -158,36 +127,33 @@ struct ExceptionHandlerRecord {
uint8_t exception_thunk[kMaxExceptionThunkSize];
};
+namespace {
+
+V8_DECLARE_ONCE(load_ntdll_unwinding_functions_once);
static decltype(
&::RtlAddGrowableFunctionTable) add_growable_function_table_func = nullptr;
static decltype(
&::RtlDeleteGrowableFunctionTable) delete_growable_function_table_func =
nullptr;
-namespace {
-
void LoadNtdllUnwindingFunctions() {
- static bool loaded = false;
- if (loaded) {
- return;
- }
- loaded = true;
-
- // Load functions from the ntdll.dll module.
- HMODULE ntdll_module =
- LoadLibraryEx(L"ntdll.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
- DCHECK_NOT_NULL(ntdll_module);
-
- // This fails on Windows 7.
- add_growable_function_table_func =
- reinterpret_cast<decltype(&::RtlAddGrowableFunctionTable)>(
- ::GetProcAddress(ntdll_module, "RtlAddGrowableFunctionTable"));
- DCHECK_IMPLIES(IsWindows8OrGreater(), add_growable_function_table_func);
-
- delete_growable_function_table_func =
- reinterpret_cast<decltype(&::RtlDeleteGrowableFunctionTable)>(
- ::GetProcAddress(ntdll_module, "RtlDeleteGrowableFunctionTable"));
- DCHECK_IMPLIES(IsWindows8OrGreater(), delete_growable_function_table_func);
+ base::CallOnce(&load_ntdll_unwinding_functions_once, []() {
+ // Load functions from the ntdll.dll module.
+ HMODULE ntdll_module =
+ LoadLibraryEx(L"ntdll.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
+ DCHECK_NOT_NULL(ntdll_module);
+
+ // This fails on Windows 7.
+ add_growable_function_table_func =
+ reinterpret_cast<decltype(&::RtlAddGrowableFunctionTable)>(
+ ::GetProcAddress(ntdll_module, "RtlAddGrowableFunctionTable"));
+ DCHECK_IMPLIES(IsWindows8OrGreater(), add_growable_function_table_func);
+
+ delete_growable_function_table_func =
+ reinterpret_cast<decltype(&::RtlDeleteGrowableFunctionTable)>(
+ ::GetProcAddress(ntdll_module, "RtlDeleteGrowableFunctionTable"));
+ DCHECK_IMPLIES(IsWindows8OrGreater(), delete_growable_function_table_func);
+ });
}
bool AddGrowableFunctionTable(PVOID* DynamicTable,
@@ -239,11 +205,15 @@ void InitUnwindingRecord(Record* record, size_t code_size_in_bytes) {
record->exception_handler = offsetof(Record, exception_thunk);
// Hardcoded thunk.
- MacroAssembler masm(AssemblerOptions{}, NewAssemblerBuffer(64));
+ AssemblerOptions options;
+ options.record_reloc_info_for_serialization = false;
+ MacroAssembler masm(nullptr, options, CodeObjectRequired::kNo,
+ NewAssemblerBuffer(64));
masm.movq(rax, reinterpret_cast<uint64_t>(&CRASH_HANDLER_FUNCTION_NAME));
masm.jmp(rax);
- DCHECK_GE(masm.buffer_size(), sizeof(record->exception_thunk));
- memcpy(&record->exception_thunk[0], masm.buffer_start(), masm.buffer_size());
+ DCHECK_LE(masm.instruction_size(), sizeof(record->exception_thunk));
+ memcpy(&record->exception_thunk[0], masm.buffer_start(),
+ masm.instruction_size());
}
void RegisterNonABICompliantCodeRange(void* start, size_t size_in_bytes) {
diff --git a/deps/v8/src/unwinding-info-win64.h b/deps/v8/src/diagnostics/unwinding-info-win64.h
index af29c4673d..f6611e7e2e 100644
--- a/deps/v8/src/unwinding-info-win64.h
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UNWINDING_INFO_WIN64_H_
-#define V8_UNWINDING_INFO_WIN64_H_
+#ifndef V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
+#define V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
#include "include/v8.h"
#include "include/v8config.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#if defined(V8_OS_WIN_X64)
#include "src/base/win32-headers.h"
@@ -98,4 +98,4 @@ class XdataEncoder {
#endif // defined(V8_OS_WIN_X64)
-#endif // V8_UNWINDING_INFO_WIN64_H_
+#endif // V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index ce992d09fa..ab8ba34d90 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -2,19 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <assert.h>
-#include <stdarg.h>
-#include <stdio.h>
+#include <cassert>
+#include <cinttypes>
+#include <cstdarg>
+#include <cstdio>
#if V8_TARGET_ARCH_X64
#include "src/base/compiler-specific.h"
#include "src/base/lazy-instance.h"
#include "src/base/v8-fallthrough.h"
-#include "src/disasm.h"
-#include "src/utils.h"
-#include "src/x64/register-x64.h"
-#include "src/x64/sse-instr.h"
+#include "src/codegen/x64/register-x64.h"
+#include "src/codegen/x64/sse-instr.h"
+#include "src/diagnostics/disasm.h"
+#include "src/utils/utils.h"
namespace disasm {
@@ -29,7 +30,6 @@ enum OperandType {
BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
};
-
//------------------------------------------------------------------
// Tables
//------------------------------------------------------------------
@@ -39,100 +39,76 @@ struct ByteMnemonic {
const char* mnem;
};
-
static const ByteMnemonic two_operands_instr[] = {
- { 0x00, BYTE_OPER_REG_OP_ORDER, "add" },
- { 0x01, OPER_REG_OP_ORDER, "add" },
- { 0x02, BYTE_REG_OPER_OP_ORDER, "add" },
- { 0x03, REG_OPER_OP_ORDER, "add" },
- { 0x08, BYTE_OPER_REG_OP_ORDER, "or" },
- { 0x09, OPER_REG_OP_ORDER, "or" },
- { 0x0A, BYTE_REG_OPER_OP_ORDER, "or" },
- { 0x0B, REG_OPER_OP_ORDER, "or" },
- { 0x10, BYTE_OPER_REG_OP_ORDER, "adc" },
- { 0x11, OPER_REG_OP_ORDER, "adc" },
- { 0x12, BYTE_REG_OPER_OP_ORDER, "adc" },
- { 0x13, REG_OPER_OP_ORDER, "adc" },
- { 0x18, BYTE_OPER_REG_OP_ORDER, "sbb" },
- { 0x19, OPER_REG_OP_ORDER, "sbb" },
- { 0x1A, BYTE_REG_OPER_OP_ORDER, "sbb" },
- { 0x1B, REG_OPER_OP_ORDER, "sbb" },
- { 0x20, BYTE_OPER_REG_OP_ORDER, "and" },
- { 0x21, OPER_REG_OP_ORDER, "and" },
- { 0x22, BYTE_REG_OPER_OP_ORDER, "and" },
- { 0x23, REG_OPER_OP_ORDER, "and" },
- { 0x28, BYTE_OPER_REG_OP_ORDER, "sub" },
- { 0x29, OPER_REG_OP_ORDER, "sub" },
- { 0x2A, BYTE_REG_OPER_OP_ORDER, "sub" },
- { 0x2B, REG_OPER_OP_ORDER, "sub" },
- { 0x30, BYTE_OPER_REG_OP_ORDER, "xor" },
- { 0x31, OPER_REG_OP_ORDER, "xor" },
- { 0x32, BYTE_REG_OPER_OP_ORDER, "xor" },
- { 0x33, REG_OPER_OP_ORDER, "xor" },
- { 0x38, BYTE_OPER_REG_OP_ORDER, "cmp" },
- { 0x39, OPER_REG_OP_ORDER, "cmp" },
- { 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
- { 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x63, REG_OPER_OP_ORDER, "movsxl" },
- { 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
- { 0x85, REG_OPER_OP_ORDER, "test" },
- { 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
- { 0x87, REG_OPER_OP_ORDER, "xchg" },
- { 0x88, BYTE_OPER_REG_OP_ORDER, "mov" },
- { 0x89, OPER_REG_OP_ORDER, "mov" },
- { 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
- { 0x8B, REG_OPER_OP_ORDER, "mov" },
- { 0x8D, REG_OPER_OP_ORDER, "lea" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
+ {0x00, BYTE_OPER_REG_OP_ORDER, "add"},
+ {0x01, OPER_REG_OP_ORDER, "add"},
+ {0x02, BYTE_REG_OPER_OP_ORDER, "add"},
+ {0x03, REG_OPER_OP_ORDER, "add"},
+ {0x08, BYTE_OPER_REG_OP_ORDER, "or"},
+ {0x09, OPER_REG_OP_ORDER, "or"},
+ {0x0A, BYTE_REG_OPER_OP_ORDER, "or"},
+ {0x0B, REG_OPER_OP_ORDER, "or"},
+ {0x10, BYTE_OPER_REG_OP_ORDER, "adc"},
+ {0x11, OPER_REG_OP_ORDER, "adc"},
+ {0x12, BYTE_REG_OPER_OP_ORDER, "adc"},
+ {0x13, REG_OPER_OP_ORDER, "adc"},
+ {0x18, BYTE_OPER_REG_OP_ORDER, "sbb"},
+ {0x19, OPER_REG_OP_ORDER, "sbb"},
+ {0x1A, BYTE_REG_OPER_OP_ORDER, "sbb"},
+ {0x1B, REG_OPER_OP_ORDER, "sbb"},
+ {0x20, BYTE_OPER_REG_OP_ORDER, "and"},
+ {0x21, OPER_REG_OP_ORDER, "and"},
+ {0x22, BYTE_REG_OPER_OP_ORDER, "and"},
+ {0x23, REG_OPER_OP_ORDER, "and"},
+ {0x28, BYTE_OPER_REG_OP_ORDER, "sub"},
+ {0x29, OPER_REG_OP_ORDER, "sub"},
+ {0x2A, BYTE_REG_OPER_OP_ORDER, "sub"},
+ {0x2B, REG_OPER_OP_ORDER, "sub"},
+ {0x30, BYTE_OPER_REG_OP_ORDER, "xor"},
+ {0x31, OPER_REG_OP_ORDER, "xor"},
+ {0x32, BYTE_REG_OPER_OP_ORDER, "xor"},
+ {0x33, REG_OPER_OP_ORDER, "xor"},
+ {0x38, BYTE_OPER_REG_OP_ORDER, "cmp"},
+ {0x39, OPER_REG_OP_ORDER, "cmp"},
+ {0x3A, BYTE_REG_OPER_OP_ORDER, "cmp"},
+ {0x3B, REG_OPER_OP_ORDER, "cmp"},
+ {0x63, REG_OPER_OP_ORDER, "movsxl"},
+ {0x84, BYTE_REG_OPER_OP_ORDER, "test"},
+ {0x85, REG_OPER_OP_ORDER, "test"},
+ {0x86, BYTE_REG_OPER_OP_ORDER, "xchg"},
+ {0x87, REG_OPER_OP_ORDER, "xchg"},
+ {0x88, BYTE_OPER_REG_OP_ORDER, "mov"},
+ {0x89, OPER_REG_OP_ORDER, "mov"},
+ {0x8A, BYTE_REG_OPER_OP_ORDER, "mov"},
+ {0x8B, REG_OPER_OP_ORDER, "mov"},
+ {0x8D, REG_OPER_OP_ORDER, "lea"},
+ {-1, UNSET_OP_ORDER, ""}};
static const ByteMnemonic zero_operands_instr[] = {
- { 0xC3, UNSET_OP_ORDER, "ret" },
- { 0xC9, UNSET_OP_ORDER, "leave" },
- { 0xF4, UNSET_OP_ORDER, "hlt" },
- { 0xFC, UNSET_OP_ORDER, "cld" },
- { 0xCC, UNSET_OP_ORDER, "int3" },
- { 0x60, UNSET_OP_ORDER, "pushad" },
- { 0x61, UNSET_OP_ORDER, "popad" },
- { 0x9C, UNSET_OP_ORDER, "pushfd" },
- { 0x9D, UNSET_OP_ORDER, "popfd" },
- { 0x9E, UNSET_OP_ORDER, "sahf" },
- { 0x99, UNSET_OP_ORDER, "cdq" },
- { 0x9B, UNSET_OP_ORDER, "fwait" },
- { 0xA4, UNSET_OP_ORDER, "movs" },
- { 0xA5, UNSET_OP_ORDER, "movs" },
- { 0xA6, UNSET_OP_ORDER, "cmps" },
- { 0xA7, UNSET_OP_ORDER, "cmps" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
-
-static const ByteMnemonic call_jump_instr[] = {
- { 0xE8, UNSET_OP_ORDER, "call" },
- { 0xE9, UNSET_OP_ORDER, "jmp" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
+ {0xC3, UNSET_OP_ORDER, "ret"}, {0xC9, UNSET_OP_ORDER, "leave"},
+ {0xF4, UNSET_OP_ORDER, "hlt"}, {0xFC, UNSET_OP_ORDER, "cld"},
+ {0xCC, UNSET_OP_ORDER, "int3"}, {0x60, UNSET_OP_ORDER, "pushad"},
+ {0x61, UNSET_OP_ORDER, "popad"}, {0x9C, UNSET_OP_ORDER, "pushfd"},
+ {0x9D, UNSET_OP_ORDER, "popfd"}, {0x9E, UNSET_OP_ORDER, "sahf"},
+ {0x99, UNSET_OP_ORDER, "cdq"}, {0x9B, UNSET_OP_ORDER, "fwait"},
+ {0xA4, UNSET_OP_ORDER, "movs"}, {0xA5, UNSET_OP_ORDER, "movs"},
+ {0xA6, UNSET_OP_ORDER, "cmps"}, {0xA7, UNSET_OP_ORDER, "cmps"},
+ {-1, UNSET_OP_ORDER, ""}};
+
+static const ByteMnemonic call_jump_instr[] = {{0xE8, UNSET_OP_ORDER, "call"},
+ {0xE9, UNSET_OP_ORDER, "jmp"},
+ {-1, UNSET_OP_ORDER, ""}};
static const ByteMnemonic short_immediate_instr[] = {
- { 0x05, UNSET_OP_ORDER, "add" },
- { 0x0D, UNSET_OP_ORDER, "or" },
- { 0x15, UNSET_OP_ORDER, "adc" },
- { 0x1D, UNSET_OP_ORDER, "sbb" },
- { 0x25, UNSET_OP_ORDER, "and" },
- { 0x2D, UNSET_OP_ORDER, "sub" },
- { 0x35, UNSET_OP_ORDER, "xor" },
- { 0x3D, UNSET_OP_ORDER, "cmp" },
- { -1, UNSET_OP_ORDER, "" }
-};
-
+ {0x05, UNSET_OP_ORDER, "add"}, {0x0D, UNSET_OP_ORDER, "or"},
+ {0x15, UNSET_OP_ORDER, "adc"}, {0x1D, UNSET_OP_ORDER, "sbb"},
+ {0x25, UNSET_OP_ORDER, "and"}, {0x2D, UNSET_OP_ORDER, "sub"},
+ {0x35, UNSET_OP_ORDER, "xor"}, {0x3D, UNSET_OP_ORDER, "cmp"},
+ {-1, UNSET_OP_ORDER, ""}};
static const char* const conditional_code_suffix[] = {
- "o", "no", "c", "nc", "z", "nz", "na", "a",
- "s", "ns", "pe", "po", "l", "ge", "le", "g"
-};
-
+ "o", "no", "c", "nc", "z", "nz", "na", "a",
+ "s", "ns", "pe", "po", "l", "ge", "le", "g"};
enum InstructionType {
NO_INSTR,
@@ -165,13 +141,10 @@ struct InstructionDesc {
bool byte_size_operation; // Fixed 8-bit operation.
};
-
class InstructionTable {
public:
InstructionTable();
- const InstructionDesc& Get(byte x) const {
- return instructions_[x];
- }
+ const InstructionDesc& Get(byte x) const { return instructions_[x]; }
private:
InstructionDesc instructions_[256];
@@ -183,13 +156,11 @@ class InstructionTable {
void AddJumpConditionalShort();
};
-
InstructionTable::InstructionTable() {
Clear();
Init();
}
-
void InstructionTable::Clear() {
for (int i = 0; i < 256; i++) {
instructions_[i].mnem = "(bad)";
@@ -199,7 +170,6 @@ void InstructionTable::Clear() {
}
}
-
void InstructionTable::Init() {
CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
@@ -211,7 +181,6 @@ void InstructionTable::Init() {
SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, false, "mov");
}
-
void InstructionTable::CopyTable(const ByteMnemonic bm[],
InstructionType type) {
for (int i = 0; bm[i].b >= 0; i++) {
@@ -226,12 +195,8 @@ void InstructionTable::CopyTable(const ByteMnemonic bm[],
}
}
-
-void InstructionTable::SetTableRange(InstructionType type,
- byte start,
- byte end,
- bool byte_size,
- const char* mnem) {
+void InstructionTable::SetTableRange(InstructionType type, byte start, byte end,
+ bool byte_size, const char* mnem) {
for (byte b = start; b <= end; b++) {
InstructionDesc* id = &instructions_[b];
DCHECK_EQ(NO_INSTR, id->type); // Information not already entered
@@ -241,7 +206,6 @@ void InstructionTable::SetTableRange(InstructionType type,
}
}
-
void InstructionTable::AddJumpConditionalShort() {
for (byte b = 0x70; b <= 0x7F; b++) {
InstructionDesc* id = &instructions_[b];
@@ -256,29 +220,26 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(InstructionTable, GetInstructionTable)
}
static const InstructionDesc cmov_instructions[16] = {
- {"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovnc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovnz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovna", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmova", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovs", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovns", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovpe", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovpo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovl", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovge", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
- {"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
-};
-
+ {"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovnc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovnz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovna", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmova", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovs", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovns", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovpe", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovpo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovl", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovge", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}};
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
-
// A new DisassemblerX64 object is created to disassemble each instruction.
// The object can only disassemble a single instruction.
class DisassemblerX64 {
@@ -318,7 +279,7 @@ class DisassemblerX64 {
bool abort_on_unimplemented_;
// Prefixes parsed
byte rex_;
- byte operand_size_; // 0x66 or (if no group 3 prefix is present) 0x0.
+ byte operand_size_; // 0x66 or (if no group 3 prefix is present) 0x0.
byte group_1_prefix_; // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
byte vex_byte0_; // 0xC4 or 0xC5
byte vex_byte1_;
@@ -408,9 +369,7 @@ class DisassemblerX64 {
return OPERAND_DOUBLEWORD_SIZE;
}
- char operand_size_code() {
- return "bwlq"[operand_size()];
- }
+ char operand_size_code() { return "bwlq"[operand_size()]; }
char float_size_code() { return "sd"[rex_w()]; }
@@ -431,35 +390,26 @@ class DisassemblerX64 {
}
// Disassembler helper functions.
- void get_modrm(byte data,
- int* mod,
- int* regop,
- int* rm) {
+ void get_modrm(byte data, int* mod, int* regop, int* rm) {
*mod = (data >> 6) & 3;
*regop = ((data & 0x38) >> 3) | (rex_r() ? 8 : 0);
*rm = (data & 7) | (rex_b() ? 8 : 0);
}
- void get_sib(byte data,
- int* scale,
- int* index,
- int* base) {
+ void get_sib(byte data, int* scale, int* index, int* base) {
*scale = (data >> 6) & 3;
*index = ((data >> 3) & 7) | (rex_x() ? 8 : 0);
*base = (data & 7) | (rex_b() ? 8 : 0);
}
- typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const;
+ using RegisterNameMapping = const char* (DisassemblerX64::*)(int reg) const;
void TryAppendRootRelativeName(int offset);
- int PrintRightOperandHelper(byte* modrmp,
- RegisterNameMapping register_name);
+ int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
int PrintRightXMMOperand(byte* modrmp);
- int PrintOperands(const char* mnem,
- OperandType op_order,
- byte* data);
+ int PrintOperands(const char* mnem, OperandType op_order, byte* data);
int PrintImmediate(byte* data, OperandSize size);
int PrintImmediateOp(byte* data);
const char* TwoByteMnemonic(byte opcode);
@@ -485,7 +435,6 @@ class DisassemblerX64 {
}
};
-
void DisassemblerX64::AppendToBuffer(const char* format, ...) {
v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
va_list args;
@@ -501,12 +450,11 @@ void DisassemblerX64::TryAppendRootRelativeName(int offset) {
}
int DisassemblerX64::PrintRightOperandHelper(
- byte* modrmp,
- RegisterNameMapping direct_register_name) {
+ byte* modrmp, RegisterNameMapping direct_register_name) {
int mod, regop, rm;
get_modrm(*modrmp, &mod, &regop, &rm);
- RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
- &DisassemblerX64::NameOfCPURegister;
+ RegisterNameMapping register_name =
+ (mod == 3) ? direct_register_name : &DisassemblerX64::NameOfCPURegister;
switch (mod) {
case 0:
if ((rm & 7) == 5) {
@@ -526,18 +474,13 @@ int DisassemblerX64::PrintRightOperandHelper(
} else if (base == 5) {
// base == rbp means no base register (when mod == 0).
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d%s0x%x]",
- NameOfCPURegister(index),
- 1 << scale,
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
+ AppendToBuffer("[%s*%d%s0x%x]", NameOfCPURegister(index), 1 << scale,
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
return 6;
} else if (index != 4 && base != 5) {
// [base+index*scale]
- AppendToBuffer("[%s+%s*%d]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale);
+ AppendToBuffer("[%s+%s*%d]", NameOfCPURegister(base),
+ NameOfCPURegister(index), 1 << scale);
return 2;
} else {
UnimplementedInstruction();
@@ -557,27 +500,20 @@ int DisassemblerX64::PrintRightOperandHelper(
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
: *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- AppendToBuffer("[%s%s0x%x]",
- NameOfCPURegister(base),
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
+ AppendToBuffer("[%s%s0x%x]", NameOfCPURegister(base),
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
} else {
- AppendToBuffer("[%s+%s*%d%s0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
+ AppendToBuffer("[%s+%s*%d%s0x%x]", NameOfCPURegister(base),
+ NameOfCPURegister(index), 1 << scale,
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
: *reinterpret_cast<int8_t*>(modrmp + 1);
- AppendToBuffer("[%s%s0x%x]",
- NameOfCPURegister(rm),
- disp < 0 ? "-" : "+",
- disp < 0 ? -disp : disp);
+ AppendToBuffer("[%s%s0x%x]", NameOfCPURegister(rm),
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
if (rm == i::kRootRegister.code()) {
// For root-relative accesses, try to append a description.
TryAppendRootRelativeName(disp);
@@ -595,7 +531,6 @@ int DisassemblerX64::PrintRightOperandHelper(
UNREACHABLE();
}
-
int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
int64_t value;
int count;
@@ -623,43 +558,32 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
return count;
}
-
int DisassemblerX64::PrintRightOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfCPURegister);
+ return PrintRightOperandHelper(modrmp, &DisassemblerX64::NameOfCPURegister);
}
-
int DisassemblerX64::PrintRightByteOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp,
&DisassemblerX64::NameOfByteCPURegister);
}
-
int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
- return PrintRightOperandHelper(modrmp,
- &DisassemblerX64::NameOfXMMRegister);
+ return PrintRightOperandHelper(modrmp, &DisassemblerX64::NameOfXMMRegister);
}
-
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
-int DisassemblerX64::PrintOperands(const char* mnem,
- OperandType op_order,
+int DisassemblerX64::PrintOperands(const char* mnem, OperandType op_order,
byte* data) {
byte modrm = *data;
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
int advance = 0;
- const char* register_name =
- byte_size_operand_ ? NameOfByteCPURegister(regop)
- : NameOfCPURegister(regop);
+ const char* register_name = byte_size_operand_ ? NameOfByteCPURegister(regop)
+ : NameOfCPURegister(regop);
switch (op_order) {
case REG_OPER_OP_ORDER: {
- AppendToBuffer("%s%c %s,",
- mnem,
- operand_size_code(),
- register_name);
+ AppendToBuffer("%s%c %s,", mnem, operand_size_code(), register_name);
advance = byte_size_operand_ ? PrintRightByteOperand(data)
: PrintRightOperand(data);
break;
@@ -673,12 +597,10 @@ int DisassemblerX64::PrintOperands(const char* mnem,
}
default:
UNREACHABLE();
- break;
}
return advance;
}
-
// Returns number of bytes used by machine instruction, including *data byte.
// Writes immediate instructions to 'tmp_buffer_'.
int DisassemblerX64::PrintImmediateOp(byte* data) {
@@ -724,7 +646,6 @@ int DisassemblerX64::PrintImmediateOp(byte* data) {
return 1 + count;
}
-
// Returns number of bytes used, including *data.
int DisassemblerX64::F6F7Instruction(byte* data) {
DCHECK(*data == 0xF7 || *data == 0xF6);
@@ -755,10 +676,7 @@ int DisassemblerX64::F6F7Instruction(byte* data) {
default:
UnimplementedInstruction();
}
- AppendToBuffer("%s%c %s",
- mnem,
- operand_size_code(),
- NameOfCPURegister(rm));
+ AppendToBuffer("%s%c %s", mnem, operand_size_code(), NameOfCPURegister(rm));
return 2;
} else if (regop == 0) {
AppendToBuffer("test%c ", operand_size_code());
@@ -772,7 +690,6 @@ int DisassemblerX64::F6F7Instruction(byte* data) {
}
}
-
int DisassemblerX64::ShiftInstruction(byte* data) {
byte op = *data & (~1);
int count = 1;
@@ -833,7 +750,6 @@ int DisassemblerX64::ShiftInstruction(byte* data) {
return count;
}
-
// Returns number of bytes used, including *data.
int DisassemblerX64::JumpShort(byte* data) {
DCHECK_EQ(0xEB, *data);
@@ -843,7 +759,6 @@ int DisassemblerX64::JumpShort(byte* data) {
return 2;
}
-
// Returns number of bytes used, including *data.
int DisassemblerX64::JumpConditional(byte* data) {
DCHECK_EQ(0x0F, *data);
@@ -854,7 +769,6 @@ int DisassemblerX64::JumpConditional(byte* data) {
return 6; // includes 0x0F
}
-
// Returns number of bytes used, including *data.
int DisassemblerX64::JumpConditionalShort(byte* data) {
byte cond = *data & 0x0F;
@@ -865,7 +779,6 @@ int DisassemblerX64::JumpConditionalShort(byte* data) {
return 2;
}
-
// Returns number of bytes used, including *data.
int DisassemblerX64::SetCC(byte* data) {
DCHECK_EQ(0x0F, *data);
@@ -1460,53 +1373,83 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int DisassemblerX64::FPUInstruction(byte* data) {
byte escape_opcode = *data;
DCHECK_EQ(0xD8, escape_opcode & 0xF8);
- byte modrm_byte = *(data+1);
+ byte modrm_byte = *(data + 1);
if (modrm_byte >= 0xC0) {
return RegisterFPUInstruction(escape_opcode, modrm_byte);
} else {
- return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
+ return MemoryFPUInstruction(escape_opcode, modrm_byte, data + 1);
}
}
-int DisassemblerX64::MemoryFPUInstruction(int escape_opcode,
- int modrm_byte,
- byte* modrm_start) {
+int DisassemblerX64::MemoryFPUInstruction(int escape_opcode, int modrm_byte,
+ byte* modrm_start) {
const char* mnem = "?";
int regop = (modrm_byte >> 3) & 0x7; // reg/op field of modrm byte.
switch (escape_opcode) {
- case 0xD9: switch (regop) {
- case 0: mnem = "fld_s"; break;
- case 3: mnem = "fstp_s"; break;
- case 7: mnem = "fstcw"; break;
- default: UnimplementedInstruction();
+ case 0xD9:
+ switch (regop) {
+ case 0:
+ mnem = "fld_s";
+ break;
+ case 3:
+ mnem = "fstp_s";
+ break;
+ case 7:
+ mnem = "fstcw";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
- case 0xDB: switch (regop) {
- case 0: mnem = "fild_s"; break;
- case 1: mnem = "fisttp_s"; break;
- case 2: mnem = "fist_s"; break;
- case 3: mnem = "fistp_s"; break;
- default: UnimplementedInstruction();
+ case 0xDB:
+ switch (regop) {
+ case 0:
+ mnem = "fild_s";
+ break;
+ case 1:
+ mnem = "fisttp_s";
+ break;
+ case 2:
+ mnem = "fist_s";
+ break;
+ case 3:
+ mnem = "fistp_s";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
- case 0xDD: switch (regop) {
- case 0: mnem = "fld_d"; break;
- case 3: mnem = "fstp_d"; break;
- default: UnimplementedInstruction();
+ case 0xDD:
+ switch (regop) {
+ case 0:
+ mnem = "fld_d";
+ break;
+ case 3:
+ mnem = "fstp_d";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
- case 0xDF: switch (regop) {
- case 5: mnem = "fild_d"; break;
- case 7: mnem = "fistp_d"; break;
- default: UnimplementedInstruction();
+ case 0xDF:
+ switch (regop) {
+ case 5:
+ mnem = "fild_d";
+ break;
+ case 7:
+ mnem = "fistp_d";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
- default: UnimplementedInstruction();
+ default:
+ UnimplementedInstruction();
}
AppendToBuffer("%s ", mnem);
int count = PrintRightOperand(modrm_start);
@@ -1514,7 +1457,7 @@ int DisassemblerX64::MemoryFPUInstruction(int escape_opcode,
}
int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
- byte modrm_byte) {
+ byte modrm_byte) {
bool has_register = false; // Is the FPU register encoded in modrm_byte?
const char* mnem = "?";
@@ -1535,25 +1478,62 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
break;
default:
switch (modrm_byte) {
- case 0xE0: mnem = "fchs"; break;
- case 0xE1: mnem = "fabs"; break;
- case 0xE3: mnem = "fninit"; break;
- case 0xE4: mnem = "ftst"; break;
- case 0xE8: mnem = "fld1"; break;
- case 0xEB: mnem = "fldpi"; break;
- case 0xED: mnem = "fldln2"; break;
- case 0xEE: mnem = "fldz"; break;
- case 0xF0: mnem = "f2xm1"; break;
- case 0xF1: mnem = "fyl2x"; break;
- case 0xF2: mnem = "fptan"; break;
- case 0xF5: mnem = "fprem1"; break;
- case 0xF7: mnem = "fincstp"; break;
- case 0xF8: mnem = "fprem"; break;
- case 0xFC: mnem = "frndint"; break;
- case 0xFD: mnem = "fscale"; break;
- case 0xFE: mnem = "fsin"; break;
- case 0xFF: mnem = "fcos"; break;
- default: UnimplementedInstruction();
+ case 0xE0:
+ mnem = "fchs";
+ break;
+ case 0xE1:
+ mnem = "fabs";
+ break;
+ case 0xE3:
+ mnem = "fninit";
+ break;
+ case 0xE4:
+ mnem = "ftst";
+ break;
+ case 0xE8:
+ mnem = "fld1";
+ break;
+ case 0xEB:
+ mnem = "fldpi";
+ break;
+ case 0xED:
+ mnem = "fldln2";
+ break;
+ case 0xEE:
+ mnem = "fldz";
+ break;
+ case 0xF0:
+ mnem = "f2xm1";
+ break;
+ case 0xF1:
+ mnem = "fyl2x";
+ break;
+ case 0xF2:
+ mnem = "fptan";
+ break;
+ case 0xF5:
+ mnem = "fprem1";
+ break;
+ case 0xF7:
+ mnem = "fincstp";
+ break;
+ case 0xF8:
+ mnem = "fprem";
+ break;
+ case 0xFC:
+ mnem = "frndint";
+ break;
+ case 0xFD:
+ mnem = "fscale";
+ break;
+ case 0xFE:
+ mnem = "fsin";
+ break;
+ case 0xFF:
+ mnem = "fcos";
+ break;
+ default:
+ UnimplementedInstruction();
}
}
break;
@@ -1570,7 +1550,7 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
if ((modrm_byte & 0xF8) == 0xE8) {
mnem = "fucomi";
has_register = true;
- } else if (modrm_byte == 0xE2) {
+ } else if (modrm_byte == 0xE2) {
mnem = "fclex";
} else if (modrm_byte == 0xE3) {
mnem = "fninit";
@@ -1582,34 +1562,57 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
case 0xDC:
has_register = true;
switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "fadd"; break;
- case 0xE8: mnem = "fsub"; break;
- case 0xC8: mnem = "fmul"; break;
- case 0xF8: mnem = "fdiv"; break;
- default: UnimplementedInstruction();
+ case 0xC0:
+ mnem = "fadd";
+ break;
+ case 0xE8:
+ mnem = "fsub";
+ break;
+ case 0xC8:
+ mnem = "fmul";
+ break;
+ case 0xF8:
+ mnem = "fdiv";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
case 0xDD:
has_register = true;
switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "ffree"; break;
- case 0xD8: mnem = "fstp"; break;
- default: UnimplementedInstruction();
+ case 0xC0:
+ mnem = "ffree";
+ break;
+ case 0xD8:
+ mnem = "fstp";
+ break;
+ default:
+ UnimplementedInstruction();
}
break;
case 0xDE:
- if (modrm_byte == 0xD9) {
+ if (modrm_byte == 0xD9) {
mnem = "fcompp";
} else {
has_register = true;
switch (modrm_byte & 0xF8) {
- case 0xC0: mnem = "faddp"; break;
- case 0xE8: mnem = "fsubp"; break;
- case 0xC8: mnem = "fmulp"; break;
- case 0xF8: mnem = "fdivp"; break;
- default: UnimplementedInstruction();
+ case 0xC0:
+ mnem = "faddp";
+ break;
+ case 0xE8:
+ mnem = "fsubp";
+ break;
+ case 0xC8:
+ mnem = "fmulp";
+ break;
+ case 0xF8:
+ mnem = "fdivp";
+ break;
+ default:
+ UnimplementedInstruction();
}
}
break;
@@ -1623,7 +1626,8 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
}
break;
- default: UnimplementedInstruction();
+ default:
+ UnimplementedInstruction();
}
if (has_register) {
@@ -1634,8 +1638,6 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
return 2;
}
-
-
// Handle all two-byte opcodes, which start with 0x0F.
// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
@@ -1682,7 +1684,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else if (third_byte == 0x0B) {
get_modrm(*current, &mod, &regop, &rm);
- // roundsd xmm, xmm/m64, imm8
+ // roundsd xmm, xmm/m64, imm8
AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
AppendToBuffer(",0x%x", (*current) & 3);
@@ -1769,17 +1771,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x6E) {
- AppendToBuffer("mov%c %s,",
- rex_w() ? 'q' : 'd',
+ AppendToBuffer("mov%c %s,", rex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
} else if (opcode == 0x6F) {
- AppendToBuffer("movdqa %s,",
- NameOfXMMRegister(regop));
+ AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x7E) {
- AppendToBuffer("mov%c ",
- rex_w() ? 'q' : 'd');
+ AppendToBuffer("mov%c ", rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else if (opcode == 0x7F) {
@@ -1824,9 +1823,9 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
const char* mnemonic;
if (opcode == 0x54) {
mnemonic = "andpd";
- } else if (opcode == 0x56) {
+ } else if (opcode == 0x56) {
mnemonic = "orpd";
- } else if (opcode == 0x57) {
+ } else if (opcode == 0x57) {
mnemonic = "xorpd";
} else if (opcode == 0x5B) {
mnemonic = "cvtps2dq";
@@ -1969,15 +1968,15 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// Convert with truncation scalar double-precision FP to integer.
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvttsd2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
+ AppendToBuffer("cvttsd2si%c %s,", operand_size_code(),
+ NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x2D) {
// CVTSD2SI: Convert scalar double-precision FP to integer.
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvtsd2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
+ AppendToBuffer("cvtsd2si%c %s,", operand_size_code(),
+ NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x5B) {
// CVTTPS2DQ: Convert packed single-precision FP values to packed signed
@@ -2004,20 +2003,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// Intel manual 2A, Table 3-18.
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- const char* const pseudo_op[] = {
- "cmpeqsd",
- "cmpltsd",
- "cmplesd",
- "cmpunordsd",
- "cmpneqsd",
- "cmpnltsd",
- "cmpnlesd",
- "cmpordsd"
- };
- AppendToBuffer("%s %s,%s",
- pseudo_op[current[1]],
- NameOfXMMRegister(regop),
- NameOfXMMRegister(rm));
+ const char* const pseudo_op[] = {"cmpeqsd", "cmpltsd", "cmplesd",
+ "cmpunordsd", "cmpneqsd", "cmpnltsd",
+ "cmpnlesd", "cmpordsd"};
+ AppendToBuffer("%s %s,%s", pseudo_op[current[1]],
+ NameOfXMMRegister(regop), NameOfXMMRegister(rm));
current += 2;
} else if (opcode == 0xF0) {
int mod, regop, rm;
@@ -2057,8 +2047,8 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// Convert with truncation scalar single-precision FP to dword integer.
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("cvttss2si%c %s,",
- operand_size_code(), NameOfCPURegister(regop));
+ AppendToBuffer("cvttss2si%c %s,", operand_size_code(),
+ NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x70) {
int mod, regop, rm;
@@ -2339,7 +2329,6 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
}
}
-
// Disassembles the instruction at instr, and writes it into out_buffer.
int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
byte* instr) {
@@ -2518,8 +2507,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("pop ");
data += PrintRightOperand(data);
}
- }
- break;
+ } break;
case 0xFF: {
data++;
@@ -2551,8 +2539,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
AppendToBuffer("%s ", mnem);
}
data += PrintRightOperand(data);
- }
- break;
+ } break;
case 0xC7: // imm32, fall through
case 0xC6: // imm8
@@ -2578,8 +2565,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 4;
}
}
- }
- break;
+ } break;
case 0x80: {
data++;
@@ -2588,8 +2574,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
data++;
- }
- break;
+ } break;
case 0x88: // 8bit, fall through
case 0x89: // 32bit
@@ -2607,8 +2592,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += PrintRightOperand(data);
AppendToBuffer(",%s", NameOfCPURegister(regop));
}
- }
- break;
+ } break;
case 0x90:
case 0x91:
@@ -2624,13 +2608,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
} else if (reg == 0) {
AppendToBuffer("nop"); // Common name for xchg rax,rax.
} else {
- AppendToBuffer("xchg%c rax,%s",
- operand_size_code(),
+ AppendToBuffer("xchg%c rax,%s", operand_size_code(),
NameOfCPURegister(reg));
}
data++;
- }
- break;
+ } break;
case 0xB0:
case 0xB1:
case 0xB2:
@@ -2653,13 +2635,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
bool is_32bit = (opcode >= 0xB8);
int reg = (opcode & 0x7) | (rex_b() ? 8 : 0);
if (is_32bit) {
- AppendToBuffer("mov%c %s,",
- operand_size_code(),
+ AppendToBuffer("mov%c %s,", operand_size_code(),
NameOfCPURegister(reg));
data += PrintImmediate(data, OPERAND_DOUBLEWORD_SIZE);
} else {
- AppendToBuffer("movb %s,",
- NameOfByteCPURegister(reg));
+ AppendToBuffer("movb %s,", NameOfByteCPURegister(reg));
data += PrintImmediate(data, OPERAND_BYTE_SIZE);
}
break;
@@ -2691,8 +2671,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
switch (operand_size()) {
case OPERAND_DOUBLEWORD_SIZE: {
const char* memory_location = NameOfAddress(
- reinterpret_cast<byte*>(
- *reinterpret_cast<int32_t*>(data + 1)));
+ reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1)));
if (*data == 0xA1) { // Opcode 0xA1
AppendToBuffer("movzxlq rax,(%s)", memory_location);
} else { // Opcode 0xA3
@@ -2703,8 +2682,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
case OPERAND_QUADWORD_SIZE: {
// New x64 instruction mov rax,(imm_64).
- const char* memory_location = NameOfAddress(
- *reinterpret_cast<byte**>(data + 1));
+ const char* memory_location =
+ NameOfAddress(*reinterpret_cast<byte**>(data + 1));
if (*data == 0xA1) { // Opcode 0xA1
AppendToBuffer("movq rax,(%s)", memory_location);
} else { // Opcode 0xA3
@@ -2780,7 +2759,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x3C:
AppendToBuffer("cmp al,0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data +=2;
+ data += 2;
break;
default:
@@ -2805,71 +2784,53 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
outp += v8::internal::SNPrintF(out_buffer + outp, " ");
}
- outp += v8::internal::SNPrintF(out_buffer + outp, " %s",
- tmp_buffer_.start());
+ outp += v8::internal::SNPrintF(out_buffer + outp, " %s", tmp_buffer_.begin());
return instr_len;
}
-
//------------------------------------------------------------------------------
-
static const char* const cpu_regs[16] = {
- "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-};
-
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"};
static const char* const byte_cpu_regs[16] = {
- "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
- "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"
-};
-
+ "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
+ "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"};
static const char* const xmm_regs[16] = {
- "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
- "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
-};
-
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"};
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
- return tmp_buffer_.start();
+ return tmp_buffer_.begin();
}
-
const char* NameConverter::NameOfConstant(byte* addr) const {
return NameOfAddress(addr);
}
-
const char* NameConverter::NameOfCPURegister(int reg) const {
- if (0 <= reg && reg < 16)
- return cpu_regs[reg];
+ if (0 <= reg && reg < 16) return cpu_regs[reg];
return "noreg";
}
-
const char* NameConverter::NameOfByteCPURegister(int reg) const {
- if (0 <= reg && reg < 16)
- return byte_cpu_regs[reg];
+ if (0 <= reg && reg < 16) return byte_cpu_regs[reg];
return "noreg";
}
-
const char* NameConverter::NameOfXMMRegister(int reg) const {
- if (0 <= reg && reg < 16)
- return xmm_regs[reg];
+ if (0 <= reg && reg < 16) return xmm_regs[reg];
return "noxmmreg";
}
-
const char* NameConverter::NameInCode(byte* addr) const {
// X64 does not embed debug strings at the moment.
UNREACHABLE();
}
-
//------------------------------------------------------------------------------
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
@@ -2879,9 +2840,7 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
}
// The X64 assembler does not use constant pools.
-int Disassembler::ConstantPoolSizeAt(byte* instruction) {
- return -1;
-}
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
UnimplementedOpcodeAction unimplemented_action) {
@@ -2901,7 +2860,7 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
for (int i = 6 - static_cast<int>(pc - prev_pc); i >= 0; i--) {
fprintf(f, " ");
}
- fprintf(f, " %s\n", buffer.start());
+ fprintf(f, " %s\n", buffer.begin());
}
}
diff --git a/deps/v8/src/x64/eh-frame-x64.cc b/deps/v8/src/diagnostics/x64/eh-frame-x64.cc
index 45f758a774..f8c5447126 100644
--- a/deps/v8/src/x64/eh-frame-x64.cc
+++ b/deps/v8/src/diagnostics/x64/eh-frame-x64.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/eh-frame.h"
+#include "src/diagnostics/eh-frame.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
new file mode 100644
index 0000000000..a62d530e1a
--- /dev/null
+++ b/deps/v8/src/execution/OWNERS
@@ -0,0 +1,10 @@
+binji@chromium.org
+bmeurer@chromium.org
+ishell@chromium.org
+jgruber@chromium.org
+jkummerow@chromium.org
+mstarzinger@chromium.org
+petermarshall@chromium.org
+szuend@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/arguments-inl.h b/deps/v8/src/execution/arguments-inl.h
index e596d44117..ecdc4ef359 100644
--- a/deps/v8/src/arguments-inl.h
+++ b/deps/v8/src/execution/arguments-inl.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARGUMENTS_INL_H_
-#define V8_ARGUMENTS_INL_H_
+#ifndef V8_EXECUTION_ARGUMENTS_INL_H_
+#define V8_EXECUTION_ARGUMENTS_INL_H_
-#include "src/arguments.h"
+#include "src/execution/arguments.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h" // TODO(jkummerow): Just smi-inl.h.
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h" // TODO(jkummerow): Just smi-inl.h.
namespace v8 {
namespace internal {
@@ -22,9 +22,9 @@ int Arguments::smi_at(int index) {
return Smi::ToInt(Object(*address_of_arg_at(index)));
}
-double Arguments::number_at(int index) { return (*this)[index]->Number(); }
+double Arguments::number_at(int index) { return (*this)[index].Number(); }
} // namespace internal
} // namespace v8
-#endif // V8_ARGUMENTS_INL_H_
+#endif // V8_EXECUTION_ARGUMENTS_INL_H_
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/execution/arguments.cc
index 815f5de577..f24447f137 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/execution/arguments.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments.h"
+#include "src/execution/arguments.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/execution/arguments.h
index 003457a8cc..8f07dd9db3 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/execution/arguments.h
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARGUMENTS_H_
-#define V8_ARGUMENTS_H_
+#ifndef V8_EXECUTION_ARGUMENTS_H_
+#define V8_EXECUTION_ARGUMENTS_H_
-#include "src/allocation.h"
-#include "src/handles.h"
-#include "src/objects.h"
+#include "src/handles/handles.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects.h"
#include "src/objects/slots.h"
#include "src/tracing/trace-event.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -46,7 +47,7 @@ class Arguments {
inline double number_at(int index);
inline void set_at(int index, Object value) {
- *address_of_arg_at(index) = value->ptr();
+ *address_of_arg_at(index) = value.ptr();
}
inline FullObjectSlot slot_at(int index) {
@@ -100,7 +101,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
} \
\
Type Name(int args_length, Address* args_object, Isolate* isolate) { \
- DCHECK(isolate->context().is_null() || isolate->context()->IsContext()); \
+ DCHECK(isolate->context().is_null() || isolate->context().IsContext()); \
CLOBBER_DOUBLE_REGISTERS(); \
if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) { \
return Stats_##Name(args_length, args_object, isolate); \
@@ -111,7 +112,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
\
static InternalType __RT_impl_##Name(Arguments args, Isolate* isolate)
-#define CONVERT_OBJECT(x) (x)->ptr()
+#define CONVERT_OBJECT(x) (x).ptr()
#define CONVERT_OBJECTPAIR(x) (x)
#define RUNTIME_FUNCTION(Name) \
@@ -124,4 +125,4 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
} // namespace internal
} // namespace v8
-#endif // V8_ARGUMENTS_H_
+#endif // V8_EXECUTION_ARGUMENTS_H_
diff --git a/deps/v8/src/arm/frame-constants-arm.cc b/deps/v8/src/execution/arm/frame-constants-arm.cc
index f1cb8211b8..af04813301 100644
--- a/deps/v8/src/arm/frame-constants-arm.cc
+++ b/deps/v8/src/execution/arm/frame-constants-arm.cc
@@ -4,11 +4,11 @@
#if V8_TARGET_ARCH_ARM
-#include "src/arm/frame-constants-arm.h"
+#include "src/execution/arm/frame-constants-arm.h"
-#include "src/assembler-inl.h"
-#include "src/frame-constants.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm/frame-constants-arm.h b/deps/v8/src/execution/arm/frame-constants-arm.h
index 3072caf9af..10b4aa28fc 100644
--- a/deps/v8/src/arm/frame-constants-arm.h
+++ b/deps/v8/src/execution/arm/frame-constants-arm.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM_FRAME_CONSTANTS_ARM_H_
-#define V8_ARM_FRAME_CONSTANTS_ARM_H_
+#ifndef V8_EXECUTION_ARM_FRAME_CONSTANTS_ARM_H_
+#define V8_EXECUTION_ARM_FRAME_CONSTANTS_ARM_H_
#include "src/base/macros.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -68,4 +68,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_ARM_FRAME_CONSTANTS_ARM_H_
+#endif // V8_EXECUTION_ARM_FRAME_CONSTANTS_ARM_H_
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index a5581a51d3..0b3ebcf879 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/simulator-arm.h"
+#include "src/execution/arm/simulator-arm.h"
#if defined(USE_SIMULATOR)
@@ -10,17 +10,18 @@
#include <stdlib.h>
#include <cmath>
-#include "src/arm/constants-arm.h"
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/codegen/arm/constants-arm.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/heap/combined-heap.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
-#include "src/utils.h"
-#include "src/vector.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
// Only build the simulator if not compiling for real ARM hardware.
namespace v8 {
@@ -39,15 +40,15 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
// code.
class ArmDebugger {
public:
- explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
+ explicit ArmDebugger(Simulator* sim) : sim_(sim) {}
void Stop(Instruction* instr);
void Debug();
private:
static const Instr kBreakpointInstr =
- (al | (7*B25) | (1*B24) | kBreakpoint);
- static const Instr kNopInstr = (al | (13*B21));
+ (al | (7 * B25) | (1 * B24) | kBreakpoint);
+ static const Instr kNopInstr = (al | (13 * B21));
Simulator* sim_;
@@ -92,12 +93,10 @@ double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
return sim_->get_double_from_register_pair(regnum);
}
-
double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_d_register(regnum).get_scalar();
}
-
bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
if (regnum != kNoRegister) {
@@ -113,7 +112,6 @@ bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
return false;
}
-
bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
@@ -124,7 +122,6 @@ bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
return false;
}
-
bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
@@ -135,7 +132,6 @@ bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
return false;
}
-
bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != nullptr) {
@@ -150,7 +146,6 @@ bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
return true;
}
-
bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
@@ -161,21 +156,18 @@ bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
return true;
}
-
void ArmDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-
void ArmDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
-
void ArmDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@@ -189,7 +181,7 @@ void ArmDebugger::Debug() {
char cmd[COMMAND_SIZE + 1];
char arg1[ARG_SIZE + 1];
char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
+ char* argv[3] = {cmd, arg1, arg2};
// make sure to have a proper terminating character if reaching the limit
cmd[COMMAND_SIZE] = 0;
@@ -206,9 +198,8 @@ void ArmDebugger::Debug() {
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.begin());
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
@@ -246,8 +237,7 @@ void ArmDebugger::Debug() {
value = GetRegisterValue(i);
PrintF("%3s: 0x%08x %10d", RegisterName(Register::from_code(i)),
value, value);
- if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
- i < 8 &&
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
PrintF(" (%f)\n", dvalue);
@@ -280,8 +270,8 @@ void ArmDebugger::Debug() {
} else {
PrintF("print <register>\n");
}
- } else if ((strcmp(cmd, "po") == 0)
- || (strcmp(cmd, "printobject") == 0)) {
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
int32_t value;
StdoutStream os;
@@ -289,7 +279,7 @@ void ArmDebugger::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
@@ -332,12 +322,13 @@ void ArmDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
PrintF(" (");
if (obj.IsSmi()) {
PrintF("smi %d", Smi::ToInt(obj));
} else {
- obj->ShortPrint();
+ obj.ShortPrint();
}
PrintF(")");
}
@@ -389,7 +380,7 @@ void ArmDebugger::Debug() {
prev = cur;
cur += dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
- buffer.start());
+ buffer.begin());
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
@@ -550,12 +541,10 @@ bool Simulator::ICacheMatch(void* one, void* two) {
return one == two;
}
-
static uint32_t ICacheHash(void* key) {
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
}
-
static bool AllOnOnePage(uintptr_t start, int size) {
intptr_t start_page = (start & ~CachePage::kPageMask);
intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
@@ -602,7 +591,6 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
return reinterpret_cast<CachePage*>(entry->value);
}
-
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, int size) {
@@ -638,11 +626,10 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
}
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
- size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
+ size_t stack_size = 1 * 1024 * 1024; // allocate 1MB for stack
stack_ = reinterpret_cast<char*>(malloc(stack_size));
pc_modified_ = false;
icount_ = 0;
@@ -696,7 +683,6 @@ Simulator::~Simulator() {
free(stack_);
}
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
@@ -712,7 +698,6 @@ Simulator* Simulator::current(Isolate* isolate) {
return sim;
}
-
// Sets the register in the architecture state. It will also deal with updating
// Simulator internal state for special registers such as PC.
void Simulator::set_register(int reg, int32_t value) {
@@ -723,7 +708,6 @@ void Simulator::set_register(int reg, int32_t value) {
registers_[reg] = value;
}
-
// Get the register from the architecture state. This function does handle
// the special case of accessing the PC register.
int32_t Simulator::get_register(int reg) const {
@@ -735,7 +719,6 @@ int32_t Simulator::get_register(int reg) const {
return registers_[reg] + ((reg == pc) ? Instruction::kPcLoadDelta : 0);
}
-
double Simulator::get_double_from_register_pair(int reg) {
DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
@@ -745,41 +728,35 @@ double Simulator::get_double_from_register_pair(int reg) {
char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
- return(dm_val);
+ return (dm_val);
}
-
void Simulator::set_register_pair_from_double(int reg, double* value) {
DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
memcpy(registers_ + reg, value, sizeof(*value));
}
-
void Simulator::set_dw_register(int dreg, const int* dbl) {
DCHECK((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
registers_[dreg + 1] = dbl[1];
}
-
void Simulator::get_d_register(int dreg, uint64_t* value) {
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
}
-
void Simulator::set_d_register(int dreg, const uint64_t* value) {
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
}
-
void Simulator::get_d_register(int dreg, uint32_t* value) {
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
}
-
void Simulator::set_d_register(int dreg, const uint32_t* value) {
DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
@@ -807,17 +784,12 @@ void Simulator::set_pc(int32_t value) {
registers_[pc] = value;
}
-
bool Simulator::has_bad_pc() const {
return ((registers_[pc] == bad_lr) || (registers_[pc] == end_sim_pc));
}
-
// Raw access to the PC register without the special adjustment when reading.
-int32_t Simulator::get_pc() const {
- return registers_[pc];
-}
-
+int32_t Simulator::get_pc() const { return registers_[pc]; }
// Getting from and setting into VFP registers.
void Simulator::set_s_register(int sreg, unsigned int value) {
@@ -825,14 +797,12 @@ void Simulator::set_s_register(int sreg, unsigned int value) {
vfp_registers_[sreg] = value;
}
-
unsigned int Simulator::get_s_register(int sreg) const {
DCHECK((sreg >= 0) && (sreg < num_s_registers));
return vfp_registers_[sreg];
}
-
-template<class InputType, int register_size>
+template <class InputType, int register_size>
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
unsigned bytes = register_size * sizeof(vfp_registers_[0]);
DCHECK_EQ(sizeof(InputType), bytes);
@@ -843,8 +813,7 @@ void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
memcpy(&vfp_registers_[reg_index * register_size], &value, bytes);
}
-
-template<class ReturnType, int register_size>
+template <class ReturnType, int register_size>
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
unsigned bytes = register_size * sizeof(vfp_registers_[0]);
DCHECK_EQ(sizeof(ReturnType), bytes);
@@ -903,7 +872,6 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
}
}
-
// The return value is either in r0/r1 or d0.
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
@@ -919,7 +887,6 @@ void Simulator::SetFpResult(const double& result) {
}
}
-
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
registers_[2] = 0x50BAD4U;
@@ -1132,7 +1099,6 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
-
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
@@ -1140,50 +1106,57 @@ void Simulator::Format(Instruction* instr, const char* format) {
UNIMPLEMENTED();
}
-
// Checks if the current instruction should be executed based on its
// condition bits.
bool Simulator::ConditionallyExecute(Instruction* instr) {
switch (instr->ConditionField()) {
- case eq: return z_flag_;
- case ne: return !z_flag_;
- case cs: return c_flag_;
- case cc: return !c_flag_;
- case mi: return n_flag_;
- case pl: return !n_flag_;
- case vs: return v_flag_;
- case vc: return !v_flag_;
- case hi: return c_flag_ && !z_flag_;
- case ls: return !c_flag_ || z_flag_;
- case ge: return n_flag_ == v_flag_;
- case lt: return n_flag_ != v_flag_;
- case gt: return !z_flag_ && (n_flag_ == v_flag_);
- case le: return z_flag_ || (n_flag_ != v_flag_);
- case al: return true;
- default: UNREACHABLE();
+ case eq:
+ return z_flag_;
+ case ne:
+ return !z_flag_;
+ case cs:
+ return c_flag_;
+ case cc:
+ return !c_flag_;
+ case mi:
+ return n_flag_;
+ case pl:
+ return !n_flag_;
+ case vs:
+ return v_flag_;
+ case vc:
+ return !v_flag_;
+ case hi:
+ return c_flag_ && !z_flag_;
+ case ls:
+ return !c_flag_ || z_flag_;
+ case ge:
+ return n_flag_ == v_flag_;
+ case lt:
+ return n_flag_ != v_flag_;
+ case gt:
+ return !z_flag_ && (n_flag_ == v_flag_);
+ case le:
+ return z_flag_ || (n_flag_ != v_flag_);
+ case al:
+ return true;
+ default:
+ UNREACHABLE();
}
return false;
}
-
// Calculate and set the Negative and Zero flags.
void Simulator::SetNZFlags(int32_t val) {
n_flag_ = (val < 0);
z_flag_ = (val == 0);
}
-
// Set the Carry flag.
-void Simulator::SetCFlag(bool val) {
- c_flag_ = val;
-}
-
+void Simulator::SetCFlag(bool val) { c_flag_ = val; }
// Set the oVerflow flag.
-void Simulator::SetVFlag(bool val) {
- v_flag_ = val;
-}
-
+void Simulator::SetVFlag(bool val) { v_flag_ = val; }
// Calculate C flag value for additions.
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
@@ -1195,7 +1168,6 @@ bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
}
-
// Calculate C flag value for subtractions.
bool Simulator::BorrowFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
@@ -1205,18 +1177,17 @@ bool Simulator::BorrowFrom(int32_t left, int32_t right, int32_t carry) {
(!carry && (((uright + 1) > uleft) || (uright > (uleft - 1))));
}
-
// Calculate V flag value for additions and subtractions.
-bool Simulator::OverflowFrom(int32_t alu_out,
- int32_t left, int32_t right, bool addition) {
+bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
+ bool addition) {
bool overflow;
if (addition) {
- // operands have the same sign
+ // operands have the same sign
overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
// and operands and result have different sign
&& ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
} else {
- // operands have different signs
+ // operands have different signs
overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
// and first operand and result have different signs
&& ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
@@ -1224,7 +1195,6 @@ bool Simulator::OverflowFrom(int32_t alu_out,
return overflow;
}
-
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(float val1, float val2) {
if (std::isnan(val1) || std::isnan(val2)) {
@@ -1252,14 +1222,13 @@ void Simulator::Compute_FPSCR_Flags(float val1, float val2) {
}
}
-
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
if (std::isnan(val1) || std::isnan(val2)) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
c_flag_FPSCR_ = true;
v_flag_FPSCR_ = true;
- // All non-NaN cases.
+ // All non-NaN cases.
} else if (val1 == val2) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = true;
@@ -1279,7 +1248,6 @@ void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
}
}
-
void Simulator::Copy_FPSCR_to_APSR() {
n_flag_ = n_flag_FPSCR_;
z_flag_ = z_flag_FPSCR_;
@@ -1287,7 +1255,6 @@ void Simulator::Copy_FPSCR_to_APSR() {
v_flag_ = v_flag_FPSCR_;
}
-
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with register.
int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
@@ -1445,7 +1412,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
return result;
}
-
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with immediate.
int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
@@ -1456,7 +1422,6 @@ int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
return imm;
}
-
static int count_bits(int bit_vector) {
int count = 0;
while (bit_vector != 0) {
@@ -1468,12 +1433,8 @@ static int count_bits(int bit_vector) {
return count;
}
-
-int32_t Simulator::ProcessPU(Instruction* instr,
- int num_regs,
- int reg_size,
- intptr_t* start_address,
- intptr_t* end_address) {
+int32_t Simulator::ProcessPU(Instruction* instr, int num_regs, int reg_size,
+ intptr_t* start_address, intptr_t* end_address) {
int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
switch (instr->PUField()) {
@@ -1501,13 +1462,11 @@ int32_t Simulator::ProcessPU(Instruction* instr,
}
default: {
UNREACHABLE();
- break;
}
}
return rn_val;
}
-
// Addressing Mode 4 - Load and Store Multiple
void Simulator::HandleRList(Instruction* instr, bool load) {
int rlist = instr->RlistValue();
@@ -1540,7 +1499,6 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
}
}
-
// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
void Simulator::HandleVList(Instruction* instr) {
VFPRegPrecision precision =
@@ -1594,34 +1552,33 @@ void Simulator::HandleVList(Instruction* instr) {
}
}
-
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
// 64-bit value. With the code below we assume that all runtime calls return
// 64 bits of result. If they don't, the r1 result register contains a bogus
// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg1,
- int32_t arg2, int32_t arg3,
- int32_t arg4, int32_t arg5,
- int32_t arg6, int32_t arg7,
- int32_t arg8);
+using SimulatorRuntimeCall = int64_t (*)(int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5,
+ int32_t arg6, int32_t arg7,
+ int32_t arg8);
// These prototypes handle the four types of FP calls.
-typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPCall)(double darg0);
-typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
+using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1);
+using SimulatorRuntimeFPCall = double (*)(double darg0);
+using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, void* arg1);
+using SimulatorRuntimeDirectApiCall = void (*)(int32_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(int32_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
-typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
-typedef void (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, void* arg2);
+using SimulatorRuntimeDirectGetterCall = void (*)(int32_t arg0, int32_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(int32_t arg0, int32_t arg1,
+ void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1632,8 +1589,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// Check if stack is aligned. Error if not aligned is reported below to
// include information on the function called.
bool stack_aligned =
- (get_register(sp)
- & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
+ (get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
+ 0;
Redirection* redirection = Redirection::FromInstruction(instr);
int32_t arg0 = get_register(r0);
int32_t arg1 = get_register(r1);
@@ -1648,10 +1605,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
STATIC_ASSERT(kMaxCParameters == 9);
bool fp_call =
- (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
@@ -1665,27 +1622,27 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
GetFpArgs(&dval0, &dval1, &ival);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
SimulatorRuntimeCall generic_target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
+ reinterpret_cast<SimulatorRuntimeCall>(external);
switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- PrintF("Call to host function at %p with args %f, %f",
- reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
- dval0, dval1);
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- PrintF("Call to host function at %p with arg %f",
- reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
- dval0);
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- PrintF("Call to host function at %p with args %f, %d",
- reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
- dval0, ival);
- break;
- default:
- UNREACHABLE();
- break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Call to host function at %p with args %f, %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ PrintF("Call to host function at %p with arg %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Call to host function at %p with args %f, %d",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
@@ -1694,58 +1651,58 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
switch (redirection->type()) {
- case ExternalReference::BUILTIN_COMPARE_CALL: {
- SimulatorRuntimeCompareCall target =
- reinterpret_cast<SimulatorRuntimeCompareCall>(external);
- iresult = target(dval0, dval1);
- set_register(r0, static_cast<int32_t>(iresult));
- set_register(r1, static_cast<int32_t>(iresult >> 32));
- break;
- }
- case ExternalReference::BUILTIN_FP_FP_CALL: {
- SimulatorRuntimeFPFPCall target =
- reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
- dresult = target(dval0, dval1);
- SetFpResult(dresult);
- break;
- }
- case ExternalReference::BUILTIN_FP_CALL: {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- dresult = target(dval0);
- SetFpResult(dresult);
- break;
- }
- case ExternalReference::BUILTIN_FP_INT_CALL: {
- SimulatorRuntimeFPIntCall target =
- reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
- dresult = target(dval0, ival);
- SetFpResult(dresult);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_COMPARE_CALL:
- PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(r0, static_cast<int32_t>(iresult));
+ set_register(r1, static_cast<int32_t>(iresult >> 32));
break;
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_FP_CALL:
- case ExternalReference::BUILTIN_FP_INT_CALL:
- PrintF("Returned %f\n", dresult);
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
default:
UNREACHABLE();
break;
+ }
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x",
- reinterpret_cast<void*>(external), arg0);
+ reinterpret_cast<void*>(external), arg0);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -1755,11 +1712,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
target(arg0);
- } else if (
- redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
- reinterpret_cast<void*>(external), arg0, arg1);
+ reinterpret_cast<void*>(external), arg0, arg1);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -1769,11 +1725,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
target(arg0, Redirection::ReverseRedirection(arg1));
- } else if (
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x",
- reinterpret_cast<void*>(external), arg0, arg1);
+ reinterpret_cast<void*>(external), arg0, arg1);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -1783,11 +1738,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
target(arg0, arg1);
- } else if (
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ } else if (redirection->type() ==
+ ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF("Call to host function at %p args %08x %08x %08x",
- reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
@@ -1795,8 +1750,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
CHECK(stack_aligned);
SimulatorRuntimeProfilingGetterCall target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
- external);
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
} else {
// builtin call.
@@ -1857,7 +1811,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
}
-
float Simulator::canonicalizeNaN(float value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
@@ -1898,21 +1851,18 @@ bool Simulator::isStopInstruction(Instruction* instr) {
return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
}
-
bool Simulator::isWatchedStop(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
return code < kNumOfWatchedStops;
}
-
bool Simulator::isEnabledStop(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
// Unwatched stops are always enabled.
return !isWatchedStop(code) ||
- !(watched_stops_[code].count & kStopDisabledBit);
+ !(watched_stops_[code].count & kStopDisabledBit);
}
-
void Simulator::EnableStop(uint32_t code) {
DCHECK(isWatchedStop(code));
if (!isEnabledStop(code)) {
@@ -1920,7 +1870,6 @@ void Simulator::EnableStop(uint32_t code) {
}
}
-
void Simulator::DisableStop(uint32_t code) {
DCHECK(isWatchedStop(code));
if (isEnabledStop(code)) {
@@ -1928,13 +1877,14 @@ void Simulator::DisableStop(uint32_t code) {
}
}
-
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
- PrintF("Stop counter for code %i has overflowed.\n"
- "Enabling this code and reseting the counter to 0.\n", code);
+ PrintF(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
watched_stops_[code].count = 0;
EnableStop(code);
} else {
@@ -1942,7 +1892,6 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
}
}
-
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
@@ -1954,17 +1903,16 @@ void Simulator::PrintStopInfo(uint32_t code) {
// Don't print the state of unused breakpoints.
if (count != 0) {
if (watched_stops_[code].desc) {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
- code, code, state, count, watched_stops_[code].desc);
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code,
+ state, count, watched_stops_[code].desc);
} else {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
- code, code, state, count);
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
}
}
}
}
-
// Handle execution based on instruction types.
// Instruction types 0 and 1 are both rolled into one function because they
@@ -2029,14 +1977,14 @@ void Simulator::DecodeType01(Instruction* instr) {
int32_t hi_res = 0;
int32_t lo_res = 0;
if (instr->Bit(22) == 1) {
- int64_t left_op = static_cast<int32_t>(rm_val);
+ int64_t left_op = static_cast<int32_t>(rm_val);
int64_t right_op = static_cast<int32_t>(rs_val);
uint64_t result = left_op * right_op;
hi_res = static_cast<int32_t>(result >> 32);
lo_res = static_cast<int32_t>(result & 0xFFFFFFFF);
} else {
// unsigned multiply
- uint64_t left_op = static_cast<uint32_t>(rm_val);
+ uint64_t left_op = static_cast<uint32_t>(rm_val);
uint64_t right_op = static_cast<uint32_t>(rs_val);
uint64_t result = left_op * right_op;
hi_res = static_cast<int32_t>(result >> 32);
@@ -2232,7 +2180,7 @@ void Simulator::DecodeType01(Instruction* instr) {
if (instr->HasH()) {
// The strd instruction.
int32_t value1 = get_register(rd);
- int32_t value2 = get_register(rd+1);
+ int32_t value2 = get_register(rd + 1);
WriteDW(addr, value1, value2);
} else {
// The ldrd instruction.
@@ -2566,7 +2514,6 @@ void Simulator::DecodeType01(Instruction* instr) {
}
}
-
void Simulator::DecodeType2(Instruction* instr) {
int rd = instr->RdValue();
int rn = instr->RnValue();
@@ -2610,7 +2557,6 @@ void Simulator::DecodeType2(Instruction* instr) {
}
default: {
UNREACHABLE();
- break;
}
}
if (instr->HasB()) {
@@ -2630,7 +2576,6 @@ void Simulator::DecodeType2(Instruction* instr) {
}
}
-
void Simulator::DecodeType3(Instruction* instr) {
int rd = instr->RdValue();
int rn = instr->RnValue();
@@ -3032,7 +2977,6 @@ void Simulator::DecodeType3(Instruction* instr) {
}
default: {
UNREACHABLE();
- break;
}
}
if (instr->HasB()) {
@@ -3052,7 +2996,6 @@ void Simulator::DecodeType3(Instruction* instr) {
}
}
-
void Simulator::DecodeType4(Instruction* instr) {
DCHECK_EQ(instr->Bit(22), 0); // only allowed to be set in privileged mode
if (instr->HasL()) {
@@ -3064,7 +3007,6 @@ void Simulator::DecodeType4(Instruction* instr) {
}
}
-
void Simulator::DecodeType5(Instruction* instr) {
// Format(instr, "b'l'cond 'target");
int off = (instr->SImmed24Value() << 2);
@@ -3076,12 +3018,10 @@ void Simulator::DecodeType5(Instruction* instr) {
set_pc(pc_reg + off);
}
-
void Simulator::DecodeType6(Instruction* instr) {
DecodeType6CoprocessorIns(instr);
}
-
void Simulator::DecodeType7(Instruction* instr) {
if (instr->Bit(24) == 1) {
SoftwareInterrupt(instr);
@@ -3100,7 +3040,6 @@ void Simulator::DecodeType7(Instruction* instr) {
}
}
-
// void Simulator::DecodeTypeVFP(Instruction* instr)
// The Following ARMv7 VFPv instructions are currently supported.
// vmov :Sn = Rt
@@ -3127,7 +3066,7 @@ void Simulator::DecodeType7(Instruction* instr) {
// vmrs
// vdup.size Qd, Rt.
void Simulator::DecodeTypeVFP(Instruction* instr) {
- DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
+ DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0));
DCHECK_EQ(instr->Bits(11, 9), 0x5);
// Obtain single precision register codes.
int m = instr->VFPMRegValue(kSinglePrecision);
@@ -3335,8 +3274,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
UNIMPLEMENTED(); // Not used by V8.
}
} else {
- if ((instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x0)) {
+ if ((instr->VCValue() == 0x0) && (instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
if (instr->Bit(23) == 0) {
@@ -3449,33 +3387,24 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
UNREACHABLE(); // Not used by V8.
}
}
- } else if ((instr->VLValue() == 0x1) &&
- (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
+ } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) && (instr->Bits(19, 16) == 0x1)) {
// vmrs
uint32_t rt = instr->RtValue();
if (rt == 0xF) {
Copy_FPSCR_to_APSR();
} else {
// Emulate FPSCR from the Simulator flags.
- uint32_t fpscr = (n_flag_FPSCR_ << 31) |
- (z_flag_FPSCR_ << 30) |
- (c_flag_FPSCR_ << 29) |
- (v_flag_FPSCR_ << 28) |
+ uint32_t fpscr = (n_flag_FPSCR_ << 31) | (z_flag_FPSCR_ << 30) |
+ (c_flag_FPSCR_ << 29) | (v_flag_FPSCR_ << 28) |
(FPSCR_default_NaN_mode_ << 25) |
- (inexact_vfp_flag_ << 4) |
- (underflow_vfp_flag_ << 3) |
- (overflow_vfp_flag_ << 2) |
- (div_zero_vfp_flag_ << 1) |
- (inv_op_vfp_flag_ << 0) |
- (FPSCR_rounding_mode_);
+ (inexact_vfp_flag_ << 4) | (underflow_vfp_flag_ << 3) |
+ (overflow_vfp_flag_ << 2) | (div_zero_vfp_flag_ << 1) |
+ (inv_op_vfp_flag_ << 0) | (FPSCR_rounding_mode_);
set_register(rt, fpscr);
}
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x0) &&
- (instr->VAValue() == 0x7) &&
- (instr->Bits(19, 16) == 0x1)) {
+ } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x0) &&
+ (instr->VAValue() == 0x7) && (instr->Bits(19, 16) == 0x1)) {
// vmsr
uint32_t rt = instr->RtValue();
if (rt == pc) {
@@ -3493,7 +3422,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
div_zero_vfp_flag_ = (rt_value >> 1) & 1;
inv_op_vfp_flag_ = (rt_value >> 0) & 1;
FPSCR_rounding_mode_ =
- static_cast<VFPRoundingMode>((rt_value) & kVFPRoundingModeMask);
+ static_cast<VFPRoundingMode>((rt_value)&kVFPRoundingModeMask);
}
} else {
UNIMPLEMENTED(); // Not used by V8.
@@ -3545,7 +3474,6 @@ void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
}
}
-
void Simulator::DecodeVCMP(Instruction* instr) {
DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
DCHECK(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
@@ -3596,7 +3524,6 @@ void Simulator::DecodeVCMP(Instruction* instr) {
}
}
-
void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
DCHECK((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
@@ -3620,9 +3547,7 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
}
}
-bool get_inv_op_vfp_flag(VFPRoundingMode mode,
- double val,
- bool unsigned_) {
+bool get_inv_op_vfp_flag(VFPRoundingMode mode, double val, bool unsigned_) {
DCHECK((mode == RN) || (mode == RM) || (mode == RZ));
double max_uint = static_cast<double>(0xFFFFFFFFu);
double max_int = static_cast<double>(kMaxInt);
@@ -3637,28 +3562,21 @@ bool get_inv_op_vfp_flag(VFPRoundingMode mode,
// exactly represented by ieee-754 64bit floating-point values.
switch (mode) {
case RN:
- return unsigned_ ? (val >= (max_uint + 0.5)) ||
- (val < -0.5)
- : (val >= (max_int + 0.5)) ||
- (val < (min_int - 0.5));
+ return unsigned_ ? (val >= (max_uint + 0.5)) || (val < -0.5)
+ : (val >= (max_int + 0.5)) || (val < (min_int - 0.5));
case RM:
- return unsigned_ ? (val >= (max_uint + 1.0)) ||
- (val < 0)
- : (val >= (max_int + 1.0)) ||
- (val < min_int);
+ return unsigned_ ? (val >= (max_uint + 1.0)) || (val < 0)
+ : (val >= (max_int + 1.0)) || (val < min_int);
case RZ:
- return unsigned_ ? (val >= (max_uint + 1.0)) ||
- (val <= -1)
- : (val >= (max_int + 1.0)) ||
- (val <= (min_int - 1.0));
+ return unsigned_ ? (val >= (max_uint + 1.0)) || (val <= -1)
+ : (val >= (max_int + 1.0)) || (val <= (min_int - 1.0));
default:
UNREACHABLE();
}
}
-
// We call this function only if we had a vfp invalid exception.
// It returns the correct saturated value.
int VFPConversionSaturate(double val, bool unsigned_res) {
@@ -3727,8 +3645,8 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
// Conversion between floating-point and integer.
bool to_integer = (instr->Bit(18) == 1);
- VFPRegPrecision src_precision = (instr->SzValue() == 1) ? kDoublePrecision
- : kSinglePrecision;
+ VFPRegPrecision src_precision =
+ (instr->SzValue() == 1) ? kDoublePrecision : kSinglePrecision;
if (to_integer) {
// We are playing with code close to the C++ standard's limits below,
@@ -3743,8 +3661,7 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
// Bit 7 in vcvt instructions indicates if we should use the FPSCR rounding
// mode or the default Round to Zero mode.
- VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
- : RZ;
+ VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_ : RZ;
DCHECK((mode == RM) || (mode == RZ) || (mode == RN));
bool unsigned_integer = (instr->Bit(16) == 0);
@@ -3784,7 +3701,6 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
}
}
-
// void Simulator::DecodeType6CoprocessorIns(Instruction* instr)
// Decode Type 6 coprocessor instructions.
// Dm = vmov(Rt, Rt2)
@@ -3848,7 +3764,7 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
set_register(rt, data[0]);
set_register(rn, data[1]);
} else {
- int32_t data[] = { get_register(rt), get_register(rn) };
+ int32_t data[] = {get_register(rt), get_register(rn)};
set_d_register(vm, reinterpret_cast<uint32_t*>(data));
}
}
@@ -5662,7 +5578,6 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
}
-
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -5674,10 +5589,9 @@ void Simulator::InstructionDecode(Instruction* instr) {
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(instr));
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(instr),
- buffer.start());
+ buffer.begin());
}
if (instr->ConditionField() == kSpecialCondition) {
DecodeSpecialCondition(instr);
@@ -5857,7 +5771,6 @@ intptr_t Simulator::CallFPImpl(Address entry, double d0, double d1) {
return get_register(r0);
}
-
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
@@ -5866,7 +5779,6 @@ uintptr_t Simulator::PushAddress(uintptr_t address) {
return new_sp;
}
-
uintptr_t Simulator::PopAddress() {
int current_sp = get_register(sp);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
@@ -6074,6 +5986,8 @@ void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
processor->next_ = nullptr;
}
+#undef SScanF
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/execution/arm/simulator-arm.h
index 273281416a..0a21b30ac3 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/execution/arm/simulator-arm.h
@@ -9,22 +9,22 @@
// which will start execution in the Simulator or forwards to the real entry
// on a ARM HW platform.
-#ifndef V8_ARM_SIMULATOR_ARM_H_
-#define V8_ARM_SIMULATOR_ARM_H_
+#ifndef V8_EXECUTION_ARM_SIMULATOR_ARM_H_
+#define V8_EXECUTION_ARM_SIMULATOR_ARM_H_
// globals.h defines USE_SIMULATOR.
-#include "src/globals.h"
+#include "src/common/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
-#include "src/allocation.h"
-#include "src/arm/constants-arm.h"
#include "src/base/hashmap.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
-#include "src/boxed-float.h"
-#include "src/simulator-base.h"
+#include "src/codegen/arm/constants-arm.h"
+#include "src/execution/simulator-base.h"
+#include "src/utils/allocation.h"
+#include "src/utils/boxed-float.h"
namespace v8 {
namespace internal {
@@ -41,20 +41,16 @@ class CachePage {
static const int kLineLength = 1 << kLineShift;
static const int kLineMask = kLineLength - 1;
- CachePage() {
- memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
- }
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
char* ValidityByte(int offset) {
return &validity_map_[offset >> kLineShift];
}
- char* CachedData(int offset) {
- return &data_[offset];
- }
+ char* CachedData(int offset) { return &data_[offset]; }
private:
- char data_[kPageSize]; // The cached data.
+ char data_[kPageSize]; // The cached data.
static const int kValidityMapSize = kPageSize >> kLineShift;
char validity_map_[kValidityMapSize]; // One byte per line.
};
@@ -64,24 +60,108 @@ class Simulator : public SimulatorBase {
friend class ArmDebugger;
enum Register {
no_reg = -1,
- r0 = 0, r1, r2, r3, r4, r5, r6, r7,
- r8, r9, r10, r11, r12, r13, r14, r15,
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
num_registers,
sp = 13,
lr = 14,
pc = 15,
- s0 = 0, s1, s2, s3, s4, s5, s6, s7,
- s8, s9, s10, s11, s12, s13, s14, s15,
- s16, s17, s18, s19, s20, s21, s22, s23,
- s24, s25, s26, s27, s28, s29, s30, s31,
+ s0 = 0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ s9,
+ s10,
+ s11,
+ s12,
+ s13,
+ s14,
+ s15,
+ s16,
+ s17,
+ s18,
+ s19,
+ s20,
+ s21,
+ s22,
+ s23,
+ s24,
+ s25,
+ s26,
+ s27,
+ s28,
+ s29,
+ s30,
+ s31,
num_s_registers = 32,
- d0 = 0, d1, d2, d3, d4, d5, d6, d7,
- d8, d9, d10, d11, d12, d13, d14, d15,
- d16, d17, d18, d19, d20, d21, d22, d23,
- d24, d25, d26, d27, d28, d29, d30, d31,
+ d0 = 0,
+ d1,
+ d2,
+ d3,
+ d4,
+ d5,
+ d6,
+ d7,
+ d8,
+ d9,
+ d10,
+ d11,
+ d12,
+ d13,
+ d14,
+ d15,
+ d16,
+ d17,
+ d18,
+ d19,
+ d20,
+ d21,
+ d22,
+ d23,
+ d24,
+ d25,
+ d26,
+ d27,
+ d28,
+ d29,
+ d30,
+ d31,
num_d_registers = 32,
- q0 = 0, q1, q2, q3, q4, q5, q6, q7,
- q8, q9, q10, q11, q12, q13, q14, q15,
+ q0 = 0,
+ q1,
+ q2,
+ q3,
+ q4,
+ q5,
+ q6,
+ q7,
+ q8,
+ q9,
+ q10,
+ q11,
+ q12,
+ q13,
+ q14,
+ q15,
num_q_registers = 16
};
@@ -96,7 +176,7 @@ class Simulator : public SimulatorBase {
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
- int32_t get_register(int reg) const;
+ V8_EXPORT_PRIVATE int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
void set_register_pair_from_double(int reg, double* value);
void set_dw_register(int dreg, const int* dbl);
@@ -147,7 +227,7 @@ class Simulator : public SimulatorBase {
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
- int32_t get_pc() const;
+ V8_EXPORT_PRIVATE int32_t get_pc() const;
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
@@ -228,14 +308,10 @@ class Simulator : public SimulatorBase {
void SetVFlag(bool val);
bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
bool BorrowFrom(int32_t left, int32_t right, int32_t carry = 1);
- bool OverflowFrom(int32_t alu_out,
- int32_t left,
- int32_t right,
+ bool OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
bool addition);
- inline int GetCarry() {
- return c_flag_ ? 1 : 0;
- }
+ inline int GetCarry() { return c_flag_ ? 1 : 0; }
// Support for VFP.
void Compute_FPSCR_Flags(float val1, float val2);
@@ -249,11 +325,8 @@ class Simulator : public SimulatorBase {
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
- int32_t ProcessPU(Instruction* instr,
- int num_regs,
- int operand_size,
- intptr_t* start_address,
- intptr_t* end_address);
+ int32_t ProcessPU(Instruction* instr, int num_regs, int operand_size,
+ intptr_t* start_address, intptr_t* end_address);
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
@@ -336,11 +409,11 @@ class Simulator : public SimulatorBase {
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
- template<class ReturnType, int register_size>
- ReturnType GetFromVFPRegister(int reg_index);
+ template <class ReturnType, int register_size>
+ ReturnType GetFromVFPRegister(int reg_index);
- template<class InputType, int register_size>
- void SetVFPRegister(int reg_index, const InputType& value);
+ template <class InputType, int register_size>
+ void SetVFPRegister(int reg_index, const InputType& value);
void SetSpecialRegister(SRegisterFieldMask reg_and_mask, uint32_t value);
uint32_t GetFromSpecialRegister(SRegister reg);
@@ -503,4 +576,4 @@ class Simulator : public SimulatorBase {
} // namespace v8
#endif // defined(USE_SIMULATOR)
-#endif // V8_ARM_SIMULATOR_ARM_H_
+#endif // V8_EXECUTION_ARM_SIMULATOR_ARM_H_
diff --git a/deps/v8/src/arm64/frame-constants-arm64.cc b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
index a37b665d28..89a5259e2b 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.cc
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/assembler-arm64.h"
-#include "src/assembler.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/assembler-arm64.h"
+#include "src/codegen/assembler.h"
-#include "src/arm64/frame-constants-arm64.h"
+#include "src/execution/arm64/frame-constants-arm64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/arm64/frame-constants-arm64.h b/deps/v8/src/execution/arm64/frame-constants-arm64.h
index 81f89b2961..d35ed2e75a 100644
--- a/deps/v8/src/arm64/frame-constants-arm64.h
+++ b/deps/v8/src/execution/arm64/frame-constants-arm64.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_FRAME_CONSTANTS_ARM64_H_
-#define V8_ARM64_FRAME_CONSTANTS_ARM64_H_
+#ifndef V8_EXECUTION_ARM64_FRAME_CONSTANTS_ARM64_H_
+#define V8_EXECUTION_ARM64_FRAME_CONSTANTS_ARM64_H_
#include "src/base/macros.h"
-#include "src/frame-constants.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -81,4 +81,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_ARM64_FRAME_CONSTANTS_ARM64_H_
+#endif // V8_EXECUTION_ARM64_FRAME_CONSTANTS_ARM64_H_
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index be2c6cdec6..8618dd8551 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/simulator-arm64.h"
+#include "src/execution/arm64/simulator-arm64.h"
#if defined(USE_SIMULATOR)
@@ -11,51 +11,50 @@
#include <cstdarg>
#include <type_traits>
-#include "src/arm64/decoder-arm64-inl.h"
-#include "src/assembler-inl.h"
#include "src/base/lazy-instance.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/codegen/arm64/decoder-arm64-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/heap/combined-heap.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
-
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
// Windows C Run-Time Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
-
// Helpers for colors.
-#define COLOUR(colour_code) "\033[0;" colour_code "m"
-#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
-#define NORMAL ""
-#define GREY "30"
-#define RED "31"
-#define GREEN "32"
-#define YELLOW "33"
-#define BLUE "34"
+#define COLOUR(colour_code) "\033[0;" colour_code "m"
+#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
+#define NORMAL ""
+#define GREY "30"
+#define RED "31"
+#define GREEN "32"
+#define YELLOW "33"
+#define BLUE "34"
#define MAGENTA "35"
-#define CYAN "36"
-#define WHITE "37"
-
-typedef char const * const TEXT_COLOUR;
-TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
-TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : "";
-TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : "";
-TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : "";
-TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : "";
+#define CYAN "36"
+#define WHITE "37"
+
+using TEXT_COLOUR = char const* const;
+TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
+TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : "";
+TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : "";
+TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : "";
+TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : "";
TEXT_COLOUR clr_vreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : "";
TEXT_COLOUR clr_vreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : "";
-TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : "";
-TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
-TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
-TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
+TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : "";
+TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
+TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
+TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
Simulator::GlobalMonitor::Get)
@@ -83,7 +82,6 @@ void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
value_ = (value_ & ~mask) | (bits & mask);
}
-
SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
switch (id) {
case NZCV:
@@ -95,7 +93,6 @@ SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
}
}
-
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
Isolate::PerIsolateThreadData* isolate_data =
@@ -134,12 +131,12 @@ void Simulator::CallImpl(Address entry, CallArgument* args) {
// Process stack arguments, and make sure the stack is suitably aligned.
uintptr_t original_stack = sp();
- uintptr_t entry_stack = original_stack -
- stack_args.size() * sizeof(stack_args[0]);
+ uintptr_t entry_stack =
+ original_stack - stack_args.size() * sizeof(stack_args[0]);
if (base::OS::ActivationFrameAlignment() != 0) {
entry_stack &= -base::OS::ActivationFrameAlignment();
}
- char * stack = reinterpret_cast<char*>(entry_stack);
+ char* stack = reinterpret_cast<char*>(entry_stack);
std::vector<int64_t>::const_iterator it;
for (it = stack_args.begin(); it != stack_args.end(); it++) {
memcpy(stack, &(*it), sizeof(*it));
@@ -177,8 +174,7 @@ void Simulator::CheckPCSComplianceAndRun() {
saved_registers[i] = xreg(register_list.PopLowestIndex().code());
}
for (int i = 0; i < kNumberOfCalleeSavedVRegisters; i++) {
- saved_fpregisters[i] =
- dreg_bits(fpregister_list.PopLowestIndex().code());
+ saved_fpregisters[i] = dreg_bits(fpregister_list.PopLowestIndex().code());
}
int64_t original_stack = sp();
#endif
@@ -215,7 +211,6 @@ void Simulator::CheckPCSComplianceAndRun() {
#endif
}
-
#ifdef DEBUG
// The least significant byte of the curruption value holds the corresponding
// register's code.
@@ -234,7 +229,6 @@ void Simulator::CorruptRegisters(CPURegList* list, uint64_t value) {
}
}
-
void Simulator::CorruptAllCallerSavedCPURegisters() {
// Corrupt alters its parameter so copy them first.
CPURegList register_list = kCallerSaved;
@@ -245,13 +239,11 @@ void Simulator::CorruptAllCallerSavedCPURegisters() {
}
#endif
-
// Extending the stack by 2 * 64 bits is required for stack alignment purposes.
uintptr_t Simulator::PushAddress(uintptr_t address) {
DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
intptr_t new_sp = sp() - 2 * kXRegSize;
- uintptr_t* alignment_slot =
- reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
+ uintptr_t* alignment_slot = reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
memcpy(alignment_slot, &kSlotsZapValue, kSystemPointerSize);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
memcpy(stack_slot, &address, kSystemPointerSize);
@@ -259,7 +251,6 @@ uintptr_t Simulator::PushAddress(uintptr_t address) {
return new_sp;
}
-
uintptr_t Simulator::PopAddress() {
intptr_t current_sp = sp();
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
@@ -269,7 +260,6 @@ uintptr_t Simulator::PopAddress() {
return address;
}
-
// Returns the limit of the stack area to enable checking for stack overflows.
uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
// The simulator uses a separate JS stack. If we have exhausted the C stack,
@@ -305,8 +295,8 @@ Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
}
if (FLAG_log_instruction_stats) {
- instrument_ = new Instrument(FLAG_log_instruction_file,
- FLAG_log_instruction_period);
+ instrument_ =
+ new Instrument(FLAG_log_instruction_file, FLAG_log_instruction_period);
decoder_->AppendVisitor(instrument_);
}
}
@@ -320,7 +310,6 @@ Simulator::Simulator()
CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats);
}
-
void Simulator::Init(FILE* stream) {
ResetState();
@@ -341,7 +330,6 @@ void Simulator::Init(FILE* stream) {
disassembler_decoder_->AppendVisitor(print_disasm_);
}
-
void Simulator::ResetState() {
// Reset the system registers.
nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
@@ -364,7 +352,6 @@ void Simulator::ResetState() {
break_on_next_ = false;
}
-
Simulator::~Simulator() {
GlobalMonitor::Get()->RemoveProcessor(&global_monitor_processor_);
delete[] reinterpret_cast<byte*>(stack_);
@@ -377,7 +364,6 @@ Simulator::~Simulator() {
delete decoder_;
}
-
void Simulator::Run() {
// Flush any written registers before executing anything, so that
// manually-set registers are logged _before_ the first instruction.
@@ -389,46 +375,44 @@ void Simulator::Run() {
}
}
-
void Simulator::RunFrom(Instruction* start) {
set_pc(start);
Run();
}
-
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair structure.
// The simulator assumes all runtime calls return two 64-bits values. If they
// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
#if defined(V8_OS_WIN)
-typedef int64_t (*SimulatorRuntimeCall_ReturnPtr)(int64_t arg0, int64_t arg1,
- int64_t arg2, int64_t arg3,
- int64_t arg4, int64_t arg5,
- int64_t arg6, int64_t arg7,
- int64_t arg8);
+using SimulatorRuntimeCall_ReturnPtr = int64_t (*)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8);
#endif
-typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
- int64_t arg2, int64_t arg3,
- int64_t arg4, int64_t arg5,
- int64_t arg6, int64_t arg7,
- int64_t arg8);
+using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8);
-typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
-typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
-typedef double (*SimulatorRuntimeFPCall)(double arg1);
-typedef double (*SimulatorRuntimeFPIntCall)(double arg1, int32_t arg2);
+using SimulatorRuntimeCompareCall = int64_t (*)(double arg1, double arg2);
+using SimulatorRuntimeFPFPCall = double (*)(double arg1, double arg2);
+using SimulatorRuntimeFPCall = double (*)(double arg1);
+using SimulatorRuntimeFPIntCall = double (*)(double arg1, int32_t arg2);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
+using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
-typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
-typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
- void* arg2);
+using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
+ void* arg2);
void Simulator::DoRuntimeCall(Instruction* instr) {
Redirection* redirection = Redirection::FromInstruction(instr);
@@ -467,7 +451,6 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
default:
TraceSim("Type: Unknown.\n");
UNREACHABLE();
- break;
case ExternalReference::BUILTIN_CALL:
#if defined(V8_OS_WIN)
@@ -549,7 +532,7 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// void f(v8::FunctionCallbackInfo&)
TraceSim("Type: DIRECT_API_CALL\n");
SimulatorRuntimeDirectApiCall target =
- reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
TraceSim("Arguments: 0x%016" PRIx64 "\n", xreg(0));
target(xreg(0));
TraceSim("No return value.");
@@ -563,7 +546,7 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// int f(double, double)
TraceSim("Type: BUILTIN_COMPARE_CALL\n");
SimulatorRuntimeCompareCall target =
- reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
int64_t result = target(dreg(0), dreg(1));
TraceSim("Returned: %" PRId64 "\n", result);
@@ -578,7 +561,7 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// double f(double)
TraceSim("Type: BUILTIN_FP_CALL\n");
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
TraceSim("Argument: %f\n", dreg(0));
double result = target(dreg(0));
TraceSim("Returned: %f\n", result);
@@ -593,7 +576,7 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// double f(double, double)
TraceSim("Type: BUILTIN_FP_FP_CALL\n");
SimulatorRuntimeFPFPCall target =
- reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
TraceSim("Arguments: %f, %f\n", dreg(0), dreg(1));
double result = target(dreg(0), dreg(1));
TraceSim("Returned: %f\n", result);
@@ -608,7 +591,7 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// double f(double, int)
TraceSim("Type: BUILTIN_FP_INT_CALL\n");
SimulatorRuntimeFPIntCall target =
- reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
TraceSim("Arguments: %f, %d\n", dreg(0), wreg(0));
double result = target(dreg(0), wreg(0));
TraceSim("Returned: %f\n", result);
@@ -623,9 +606,9 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// void f(Local<String> property, PropertyCallbackInfo& info)
TraceSim("Type: DIRECT_GETTER_CALL\n");
SimulatorRuntimeDirectGetterCall target =
- reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
- TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n",
- xreg(0), xreg(1));
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 "\n", xreg(0),
+ xreg(1));
target(xreg(0), xreg(1));
TraceSim("No return value.");
#ifdef DEBUG
@@ -638,7 +621,7 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// void f(v8::FunctionCallbackInfo&, v8::FunctionCallback)
TraceSim("Type: PROFILING_API_CALL\n");
SimulatorRuntimeProfilingApiCall target =
- reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
void* arg1 = Redirection::ReverseRedirection(xreg(1));
TraceSim("Arguments: 0x%016" PRIx64 ", %p\n", xreg(0), arg1);
target(xreg(0), arg1);
@@ -654,11 +637,10 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// AccessorNameGetterCallback callback)
TraceSim("Type: PROFILING_GETTER_CALL\n");
SimulatorRuntimeProfilingGetterCall target =
- reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
- external);
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
void* arg2 = Redirection::ReverseRedirection(xreg(2));
- TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n",
- xreg(0), xreg(1), arg2);
+ TraceSim("Arguments: 0x%016" PRIx64 ", 0x%016" PRIx64 ", %p\n", xreg(0),
+ xreg(1), arg2);
target(xreg(0), xreg(1), arg2);
TraceSim("No return value.");
#ifdef DEBUG
@@ -684,23 +666,19 @@ const char* Simulator::wreg_names[] = {
"wcp", "w28", "wfp", "wlr", "wzr", "wsp"};
const char* Simulator::sreg_names[] = {
-"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
-"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
-"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
-"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10",
+ "s11", "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21",
+ "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
const char* Simulator::dreg_names[] = {
-"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
-"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
-"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
-"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
+ "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
+ "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
const char* Simulator::vreg_names[] = {
-"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
-"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
-"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
-"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
-
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
+ "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
+ "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
static_assert(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1),
@@ -717,7 +695,6 @@ const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
return wreg_names[code];
}
-
const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
static_assert(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1),
"Array must be large enough to hold all register names.");
@@ -731,7 +708,6 @@ const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
return xreg_names[code];
}
-
const char* Simulator::SRegNameForCode(unsigned code) {
static_assert(arraysize(Simulator::sreg_names) == kNumberOfVRegisters,
"Array must be large enough to hold all register names.");
@@ -739,7 +715,6 @@ const char* Simulator::SRegNameForCode(unsigned code) {
return sreg_names[code % kNumberOfVRegisters];
}
-
const char* Simulator::DRegNameForCode(unsigned code) {
static_assert(arraysize(Simulator::dreg_names) == kNumberOfVRegisters,
"Array must be large enough to hold all register names.");
@@ -747,7 +722,6 @@ const char* Simulator::DRegNameForCode(unsigned code) {
return dreg_names[code % kNumberOfVRegisters];
}
-
const char* Simulator::VRegNameForCode(unsigned code) {
static_assert(arraysize(Simulator::vreg_names) == kNumberOfVRegisters,
"Array must be large enough to hold all register names.");
@@ -772,7 +746,6 @@ void LogicVRegister::ReadUintFromMem(VectorFormat vform, int index,
break;
default:
UNREACHABLE();
- return;
}
}
@@ -795,11 +768,9 @@ void LogicVRegister::WriteUintToMem(VectorFormat vform, int index,
break;
default:
UNREACHABLE();
- return;
}
}
-
int Simulator::CodeFromName(const char* name) {
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
if ((strcmp(xreg_names[i], name) == 0) ||
@@ -820,7 +791,6 @@ int Simulator::CodeFromName(const char* name) {
return -1;
}
-
// Helpers ---------------------------------------------------------------------
template <typename T>
T Simulator::AddWithCarry(bool set_flags, T left, T right, int carry_in) {
@@ -853,8 +823,7 @@ T Simulator::AddWithCarry(bool set_flags, T left, T right, int carry_in) {
return result;
}
-
-template<typename T>
+template <typename T>
void Simulator::AddSubWithCarry(Instruction* instr) {
// Use unsigned types to avoid implementation-defined overflow behaviour.
static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
@@ -866,9 +835,7 @@ void Simulator::AddSubWithCarry(Instruction* instr) {
op2 = ~op2;
}
- new_val = AddWithCarry<T>(instr->FlagsUpdate(),
- reg<T>(instr->Rn()),
- op2,
+ new_val = AddWithCarry<T>(instr->FlagsUpdate(), reg<T>(instr->Rn()), op2,
nzcv().C());
set_reg<T>(instr->Rd(), new_val);
@@ -876,7 +843,7 @@ void Simulator::AddSubWithCarry(Instruction* instr) {
template <typename T>
T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
- typedef typename std::make_unsigned<T>::type unsignedT;
+ using unsignedT = typename std::make_unsigned<T>::type;
if (amount == 0) {
return value;
@@ -900,7 +867,6 @@ T Simulator::ShiftOperand(T value, Shift shift_type, unsigned amount) {
}
}
-
template <typename T>
T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
const unsigned kSignExtendBShift = (sizeof(T) - 1) * 8;
@@ -935,7 +901,6 @@ T Simulator::ExtendValue(T value, Extend extend_type, unsigned left_shift) {
return value << left_shift;
}
-
template <typename T>
void Simulator::Extract(Instruction* instr) {
unsigned lsb = instr->ImmS();
@@ -949,7 +914,6 @@ void Simulator::Extract(Instruction* instr) {
set_reg<T>(instr->Rd(), result);
}
-
void Simulator::FPCompare(double val0, double val1) {
AssertSupportedFPCR();
@@ -1075,8 +1039,7 @@ Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatFP(
void Simulator::SetBreakpoint(Instruction* location) {
for (unsigned i = 0; i < breakpoints_.size(); i++) {
if (breakpoints_.at(i).location == location) {
- PrintF(stream_,
- "Existing breakpoint at %p was %s\n",
+ PrintF(stream_, "Existing breakpoint at %p was %s\n",
reinterpret_cast<void*>(location),
breakpoints_.at(i).enabled ? "disabled" : "enabled");
breakpoints_.at(i).enabled = !breakpoints_.at(i).enabled;
@@ -1085,11 +1048,10 @@ void Simulator::SetBreakpoint(Instruction* location) {
}
Breakpoint new_breakpoint = {location, true};
breakpoints_.push_back(new_breakpoint);
- PrintF(stream_,
- "Set a breakpoint at %p\n", reinterpret_cast<void*>(location));
+ PrintF(stream_, "Set a breakpoint at %p\n",
+ reinterpret_cast<void*>(location));
}
-
void Simulator::ListBreakpoints() {
PrintF(stream_, "Breakpoints:\n");
for (unsigned i = 0; i < breakpoints_.size(); i++) {
@@ -1099,12 +1061,10 @@ void Simulator::ListBreakpoints() {
}
}
-
void Simulator::CheckBreakpoints() {
bool hit_a_breakpoint = false;
for (unsigned i = 0; i < breakpoints_.size(); i++) {
- if ((breakpoints_.at(i).location == pc_) &&
- breakpoints_.at(i).enabled) {
+ if ((breakpoints_.at(i).location == pc_) && breakpoints_.at(i).enabled) {
hit_a_breakpoint = true;
// Disable this breakpoint.
breakpoints_.at(i).enabled = false;
@@ -1117,7 +1077,6 @@ void Simulator::CheckBreakpoints() {
}
}
-
void Simulator::CheckBreakNext() {
// If the current instruction is a BL, insert a breakpoint just after it.
if (break_on_next_ && pc_->IsBranchAndLinkToRegister()) {
@@ -1126,7 +1085,6 @@ void Simulator::CheckBreakNext() {
}
}
-
void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
Instruction* end = start->InstructionAtOffset(count * kInstrSize);
for (Instruction* pc = start; pc < end; pc = pc->following()) {
@@ -1152,7 +1110,6 @@ void Simulator::PrintSystemRegisters() {
PrintSystemRegister(FPCR);
}
-
void Simulator::PrintRegisters() {
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
PrintRegister(i);
@@ -1166,7 +1123,6 @@ void Simulator::PrintVRegisters() {
}
}
-
void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
registers_[code].NotifyRegisterLogged();
@@ -1372,28 +1328,21 @@ void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) {
fprintf(stream_, "\n");
}
-
void Simulator::PrintSystemRegister(SystemRegister id) {
switch (id) {
case NZCV:
- fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
- clr_flag_name, clr_flag_value,
- nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+ fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n", clr_flag_name,
+ clr_flag_value, nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
clr_normal);
break;
case FPCR: {
- static const char * rmode[] = {
- "0b00 (Round to Nearest)",
- "0b01 (Round towards Plus Infinity)",
- "0b10 (Round towards Minus Infinity)",
- "0b11 (Round towards Zero)"
- };
+ static const char* rmode[] = {
+ "0b00 (Round to Nearest)", "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)", "0b11 (Round towards Zero)"};
DCHECK(fpcr().RMode() < arraysize(rmode));
- fprintf(stream_,
- "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
- clr_flag_name, clr_flag_value,
- fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
- clr_normal);
+ fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name, clr_flag_value, fpcr().AHP(), fpcr().DN(),
+ fpcr().FZ(), rmode[fpcr().RMode()], clr_normal);
break;
}
default:
@@ -1409,8 +1358,8 @@ void Simulator::PrintRead(uintptr_t address, unsigned reg_code,
// The template is "# {reg}: 0x{value} <- {address}".
PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister);
- fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
- clr_memory_address, address, clr_normal);
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", clr_memory_address, address,
+ clr_normal);
}
void Simulator::PrintVRead(uintptr_t address, unsigned reg_code,
@@ -1423,8 +1372,8 @@ void Simulator::PrintVRead(uintptr_t address, unsigned reg_code,
PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format),
GetPrintRegLaneCount(format), lane);
}
- fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
- clr_memory_address, address, clr_normal);
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n", clr_memory_address, address,
+ clr_normal);
}
void Simulator::PrintWrite(uintptr_t address, unsigned reg_code,
@@ -1435,8 +1384,8 @@ void Simulator::PrintWrite(uintptr_t address, unsigned reg_code,
// and readable, the value is aligned with the values in the register trace.
PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister,
GetPrintRegSizeInBytes(format));
- fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
- clr_memory_address, address, clr_normal);
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", clr_memory_address, address,
+ clr_normal);
}
void Simulator::PrintVWrite(uintptr_t address, unsigned reg_code,
@@ -1456,11 +1405,10 @@ void Simulator::PrintVWrite(uintptr_t address, unsigned reg_code,
if (format & kPrintRegAsFP) {
PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane);
}
- fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
- clr_memory_address, address, clr_normal);
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n", clr_memory_address, address,
+ clr_normal);
}
-
// Visitors---------------------------------------------------------------------
void Simulator::VisitUnimplemented(Instruction* instr) {
@@ -1469,14 +1417,12 @@ void Simulator::VisitUnimplemented(Instruction* instr) {
UNIMPLEMENTED();
}
-
void Simulator::VisitUnallocated(Instruction* instr) {
fprintf(stream_, "Unallocated instruction at %p: 0x%08" PRIx32 "\n",
reinterpret_cast<void*>(instr), instr->InstructionBits());
UNIMPLEMENTED();
}
-
void Simulator::VisitPCRelAddressing(Instruction* instr) {
switch (instr->Mask(PCRelAddressingMask)) {
case ADR:
@@ -1487,11 +1433,9 @@ void Simulator::VisitPCRelAddressing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
-
void Simulator::VisitUnconditionalBranch(Instruction* instr) {
switch (instr->Mask(UnconditionalBranchMask)) {
case BL:
@@ -1505,7 +1449,6 @@ void Simulator::VisitUnconditionalBranch(Instruction* instr) {
}
}
-
void Simulator::VisitConditionalBranch(Instruction* instr) {
DCHECK(instr->Mask(ConditionalBranchMask) == B_cond);
if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
@@ -1513,7 +1456,6 @@ void Simulator::VisitConditionalBranch(Instruction* instr) {
}
}
-
void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
Instruction* target = reg<Instruction*>(instr->Rn());
switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
@@ -1527,44 +1469,57 @@ void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
V8_FALLTHROUGH;
}
case BR:
- case RET: set_pc(target); break;
- default: UNIMPLEMENTED();
+ case RET:
+ set_pc(target);
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
-
void Simulator::VisitTestBranch(Instruction* instr) {
- unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
- instr->ImmTestBranchBit40();
+ unsigned bit_pos =
+ (instr->ImmTestBranchBit5() << 5) | instr->ImmTestBranchBit40();
bool take_branch = ((xreg(instr->Rt()) & (1ULL << bit_pos)) == 0);
switch (instr->Mask(TestBranchMask)) {
- case TBZ: break;
- case TBNZ: take_branch = !take_branch; break;
- default: UNIMPLEMENTED();
+ case TBZ:
+ break;
+ case TBNZ:
+ take_branch = !take_branch;
+ break;
+ default:
+ UNIMPLEMENTED();
}
if (take_branch) {
set_pc(instr->ImmPCOffsetTarget());
}
}
-
void Simulator::VisitCompareBranch(Instruction* instr) {
unsigned rt = instr->Rt();
bool take_branch = false;
switch (instr->Mask(CompareBranchMask)) {
- case CBZ_w: take_branch = (wreg(rt) == 0); break;
- case CBZ_x: take_branch = (xreg(rt) == 0); break;
- case CBNZ_w: take_branch = (wreg(rt) != 0); break;
- case CBNZ_x: take_branch = (xreg(rt) != 0); break;
- default: UNIMPLEMENTED();
+ case CBZ_w:
+ take_branch = (wreg(rt) == 0);
+ break;
+ case CBZ_x:
+ take_branch = (xreg(rt) == 0);
+ break;
+ case CBNZ_w:
+ take_branch = (wreg(rt) != 0);
+ break;
+ case CBNZ_x:
+ take_branch = (xreg(rt) != 0);
+ break;
+ default:
+ UNIMPLEMENTED();
}
if (take_branch) {
set_pc(instr->ImmPCOffsetTarget());
}
}
-
-template<typename T>
+template <typename T>
void Simulator::AddSubHelper(Instruction* instr, T op2) {
// Use unsigned types to avoid implementation-defined overflow behaviour.
static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
@@ -1576,26 +1531,23 @@ void Simulator::AddSubHelper(Instruction* instr, T op2) {
switch (operation) {
case ADD:
case ADDS: {
- new_val = AddWithCarry<T>(set_flags,
- reg<T>(instr->Rn(), instr->RnMode()),
- op2);
+ new_val =
+ AddWithCarry<T>(set_flags, reg<T>(instr->Rn(), instr->RnMode()), op2);
break;
}
case SUB:
case SUBS: {
- new_val = AddWithCarry<T>(set_flags,
- reg<T>(instr->Rn(), instr->RnMode()),
- ~op2,
- 1);
+ new_val = AddWithCarry<T>(set_flags, reg<T>(instr->Rn(), instr->RnMode()),
+ ~op2, 1);
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
set_reg<T>(instr->Rd(), new_val, instr->RdMode());
}
-
void Simulator::VisitAddSubShifted(Instruction* instr) {
Shift shift_type = static_cast<Shift>(instr->ShiftDP());
unsigned shift_amount = instr->ImmDPShift();
@@ -1609,7 +1561,6 @@ void Simulator::VisitAddSubShifted(Instruction* instr) {
}
}
-
void Simulator::VisitAddSubImmediate(Instruction* instr) {
int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
if (instr->SixtyFourBits()) {
@@ -1619,7 +1570,6 @@ void Simulator::VisitAddSubImmediate(Instruction* instr) {
}
}
-
void Simulator::VisitAddSubExtended(Instruction* instr) {
Extend ext = static_cast<Extend>(instr->ExtendMode());
unsigned left_shift = instr->ImmExtendShift();
@@ -1632,7 +1582,6 @@ void Simulator::VisitAddSubExtended(Instruction* instr) {
}
}
-
void Simulator::VisitAddSubWithCarry(Instruction* instr) {
if (instr->SixtyFourBits()) {
AddSubWithCarry<uint64_t>(instr);
@@ -1641,7 +1590,6 @@ void Simulator::VisitAddSubWithCarry(Instruction* instr) {
}
}
-
void Simulator::VisitLogicalShifted(Instruction* instr) {
Shift shift_type = static_cast<Shift>(instr->ShiftDP());
unsigned shift_amount = instr->ImmDPShift();
@@ -1657,7 +1605,6 @@ void Simulator::VisitLogicalShifted(Instruction* instr) {
}
}
-
void Simulator::VisitLogicalImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
LogicalHelper(instr, static_cast<uint64_t>(instr->ImmLogical()));
@@ -1666,8 +1613,7 @@ void Simulator::VisitLogicalImmediate(Instruction* instr) {
}
}
-
-template<typename T>
+template <typename T>
void Simulator::LogicalHelper(Instruction* instr, T op2) {
T op1 = reg<T>(instr->Rn());
T result = 0;
@@ -1676,10 +1622,18 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
// Switch on the logical operation, stripping out the NOT bit, as it has a
// different meaning for logical immediate instructions.
switch (instr->Mask(LogicalOpMask & ~NOT)) {
- case ANDS: update_flags = true; V8_FALLTHROUGH;
- case AND: result = op1 & op2; break;
- case ORR: result = op1 | op2; break;
- case EOR: result = op1 ^ op2; break;
+ case ANDS:
+ update_flags = true;
+ V8_FALLTHROUGH;
+ case AND:
+ result = op1 & op2;
+ break;
+ case ORR:
+ result = op1 | op2;
+ break;
+ case EOR:
+ result = op1 ^ op2;
+ break;
default:
UNIMPLEMENTED();
}
@@ -1695,7 +1649,6 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
set_reg<T>(instr->Rd(), result, instr->RdMode());
}
-
void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
if (instr->SixtyFourBits()) {
ConditionalCompareHelper(instr, static_cast<uint64_t>(xreg(instr->Rm())));
@@ -1704,7 +1657,6 @@ void Simulator::VisitConditionalCompareRegister(Instruction* instr) {
}
}
-
void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
if (instr->SixtyFourBits()) {
ConditionalCompareHelper(instr, static_cast<uint64_t>(instr->ImmCondCmp()));
@@ -1713,8 +1665,7 @@ void Simulator::VisitConditionalCompareImmediate(Instruction* instr) {
}
}
-
-template<typename T>
+template <typename T>
void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
// Use unsigned types to avoid implementation-defined overflow behaviour.
static_assert(std::is_unsigned<T>::value, "operands must be unsigned");
@@ -1737,28 +1688,23 @@ void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
}
}
-
void Simulator::VisitLoadStoreUnsignedOffset(Instruction* instr) {
int offset = instr->ImmLSUnsigned() << instr->SizeLS();
LoadStoreHelper(instr, offset, Offset);
}
-
void Simulator::VisitLoadStoreUnscaledOffset(Instruction* instr) {
LoadStoreHelper(instr, instr->ImmLS(), Offset);
}
-
void Simulator::VisitLoadStorePreIndex(Instruction* instr) {
LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
}
-
void Simulator::VisitLoadStorePostIndex(Instruction* instr) {
LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
}
-
void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
Extend ext = static_cast<Extend>(instr->ExtendMode());
DCHECK((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
@@ -1768,9 +1714,7 @@ void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
LoadStoreHelper(instr, offset, Offset);
}
-
-void Simulator::LoadStoreHelper(Instruction* instr,
- int64_t offset,
+void Simulator::LoadStoreHelper(Instruction* instr, int64_t offset,
AddrMode addrmode) {
unsigned srcdst = instr->Rt();
unsigned addr_reg = instr->Rn();
@@ -1806,44 +1750,79 @@ void Simulator::LoadStoreHelper(Instruction* instr,
switch (op) {
// Use _no_log variants to suppress the register trace (LOG_REGS,
// LOG_VREGS). We will print a more detailed log.
- case LDRB_w: set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address)); break;
- case LDRH_w: set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address)); break;
- case LDR_w: set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address)); break;
- case LDR_x: set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address)); break;
- case LDRSB_w: set_wreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
- case LDRSH_w: set_wreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
- case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
- case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
- case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead<int32_t>(address)); break;
+ case LDRB_w:
+ set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address));
+ break;
+ case LDRH_w:
+ set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address));
+ break;
+ case LDR_w:
+ set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address));
+ break;
+ case LDR_x:
+ set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address));
+ break;
+ case LDRSB_w:
+ set_wreg_no_log(srcdst, MemoryRead<int8_t>(address));
+ break;
+ case LDRSH_w:
+ set_wreg_no_log(srcdst, MemoryRead<int16_t>(address));
+ break;
+ case LDRSB_x:
+ set_xreg_no_log(srcdst, MemoryRead<int8_t>(address));
+ break;
+ case LDRSH_x:
+ set_xreg_no_log(srcdst, MemoryRead<int16_t>(address));
+ break;
+ case LDRSW_x:
+ set_xreg_no_log(srcdst, MemoryRead<int32_t>(address));
+ break;
case LDR_b:
set_breg_no_log(srcdst, MemoryRead<uint8_t>(address));
break;
case LDR_h:
set_hreg_no_log(srcdst, MemoryRead<uint16_t>(address));
break;
- case LDR_s: set_sreg_no_log(srcdst, MemoryRead<float>(address)); break;
- case LDR_d: set_dreg_no_log(srcdst, MemoryRead<double>(address)); break;
+ case LDR_s:
+ set_sreg_no_log(srcdst, MemoryRead<float>(address));
+ break;
+ case LDR_d:
+ set_dreg_no_log(srcdst, MemoryRead<double>(address));
+ break;
case LDR_q:
set_qreg_no_log(srcdst, MemoryRead<qreg_t>(address));
break;
- case STRB_w: MemoryWrite<uint8_t>(address, wreg(srcdst)); break;
- case STRH_w: MemoryWrite<uint16_t>(address, wreg(srcdst)); break;
- case STR_w: MemoryWrite<uint32_t>(address, wreg(srcdst)); break;
- case STR_x: MemoryWrite<uint64_t>(address, xreg(srcdst)); break;
+ case STRB_w:
+ MemoryWrite<uint8_t>(address, wreg(srcdst));
+ break;
+ case STRH_w:
+ MemoryWrite<uint16_t>(address, wreg(srcdst));
+ break;
+ case STR_w:
+ MemoryWrite<uint32_t>(address, wreg(srcdst));
+ break;
+ case STR_x:
+ MemoryWrite<uint64_t>(address, xreg(srcdst));
+ break;
case STR_b:
MemoryWrite<uint8_t>(address, breg(srcdst));
break;
case STR_h:
MemoryWrite<uint16_t>(address, hreg(srcdst));
break;
- case STR_s: MemoryWrite<float>(address, sreg(srcdst)); break;
- case STR_d: MemoryWrite<double>(address, dreg(srcdst)); break;
+ case STR_s:
+ MemoryWrite<float>(address, sreg(srcdst));
+ break;
+ case STR_d:
+ MemoryWrite<double>(address, dreg(srcdst));
+ break;
case STR_q:
MemoryWrite<qreg_t>(address, qreg(srcdst));
break;
- default: UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED();
}
// Print a detailed trace (including the memory address) instead of the basic
@@ -1884,24 +1863,19 @@ void Simulator::LoadStoreHelper(Instruction* instr,
CheckMemoryAccess(address, stack);
}
-
void Simulator::VisitLoadStorePairOffset(Instruction* instr) {
LoadStorePairHelper(instr, Offset);
}
-
void Simulator::VisitLoadStorePairPreIndex(Instruction* instr) {
LoadStorePairHelper(instr, PreIndex);
}
-
void Simulator::VisitLoadStorePairPostIndex(Instruction* instr) {
LoadStorePairHelper(instr, PostIndex);
}
-
-void Simulator::LoadStorePairHelper(Instruction* instr,
- AddrMode addrmode) {
+void Simulator::LoadStorePairHelper(Instruction* instr, AddrMode addrmode) {
unsigned rt = instr->Rt();
unsigned rt2 = instr->Rt2();
unsigned addr_reg = instr->Rn();
@@ -1937,7 +1911,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
}
LoadStorePairOp op =
- static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
+ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
// 'rt' and 'rt2' can only be aliased for stores.
DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
@@ -2011,7 +1985,8 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
MemoryWrite<qreg_t>(address2, qreg(rt2));
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
// Print a detailed trace (including the memory address) instead of the basic
@@ -2057,7 +2032,6 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
CheckMemoryAccess(address, stack);
}
-
void Simulator::VisitLoadLiteral(Instruction* instr) {
uintptr_t address = instr->LiteralAddress();
unsigned rt = instr->Rt();
@@ -2086,11 +2060,11 @@ void Simulator::VisitLoadLiteral(Instruction* instr) {
set_dreg_no_log(rt, MemoryRead<double>(address));
LogVRead(address, rt, kPrintDReg);
break;
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
}
-
uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset,
AddrMode addrmode) {
const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
@@ -2109,9 +2083,7 @@ uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset,
return address;
}
-
-void Simulator::LoadStoreWriteBack(unsigned addr_reg,
- int64_t offset,
+void Simulator::LoadStoreWriteBack(unsigned addr_reg, int64_t offset,
AddrMode addrmode) {
if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
DCHECK_NE(offset, 0);
@@ -2251,10 +2223,9 @@ void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
}
}
-
void Simulator::VisitMoveWideImmediate(Instruction* instr) {
MoveWideImmediateOp mov_op =
- static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
+ static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
int64_t new_xn_val = 0;
bool is_64_bits = instr->SixtyFourBits() == 1;
@@ -2269,22 +2240,20 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
switch (mov_op) {
case MOVN_w:
case MOVN_x: {
- new_xn_val = ~shifted_imm16;
- if (!is_64_bits) new_xn_val &= kWRegMask;
+ new_xn_val = ~shifted_imm16;
+ if (!is_64_bits) new_xn_val &= kWRegMask;
break;
}
case MOVK_w:
case MOVK_x: {
- unsigned reg_code = instr->Rd();
- int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
- : wreg(reg_code);
- new_xn_val =
- (prev_xn_val & ~(INT64_C(0xFFFF) << shift)) | shifted_imm16;
- break;
+ unsigned reg_code = instr->Rd();
+ int64_t prev_xn_val = is_64_bits ? xreg(reg_code) : wreg(reg_code);
+ new_xn_val = (prev_xn_val & ~(INT64_C(0xFFFF) << shift)) | shifted_imm16;
+ break;
}
case MOVZ_w:
case MOVZ_x: {
- new_xn_val = shifted_imm16;
+ new_xn_val = shifted_imm16;
break;
}
default:
@@ -2295,7 +2264,6 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
set_xreg(instr->Rd(), new_xn_val);
}
-
void Simulator::VisitConditionalSelect(Instruction* instr) {
uint64_t new_val = xreg(instr->Rn());
if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
@@ -2316,7 +2284,8 @@ void Simulator::VisitConditionalSelect(Instruction* instr) {
case CSNEG_x:
new_val = (uint64_t)(-(int64_t)new_val);
break;
- default: UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED();
}
}
if (instr->SixtyFourBits()) {
@@ -2326,7 +2295,6 @@ void Simulator::VisitConditionalSelect(Instruction* instr) {
}
}
-
void Simulator::VisitDataProcessing1Source(Instruction* instr) {
unsigned dst = instr->Rd();
unsigned src = instr->Rn();
@@ -2353,10 +2321,12 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
case REV_x:
set_xreg(dst, ReverseBytes(xreg(src), 3));
break;
- case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
- break;
- case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
- break;
+ case CLZ_w:
+ set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits));
+ break;
+ case CLZ_x:
+ set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits));
+ break;
case CLS_w: {
set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSizeInBits));
break;
@@ -2365,11 +2335,11 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits));
break;
}
- default: UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED();
}
}
-
template <typename T>
void Simulator::DataProcessing2Source(Instruction* instr) {
Shift shift_op = NO_SHIFT;
@@ -2391,7 +2361,7 @@ void Simulator::DataProcessing2Source(Instruction* instr) {
}
case UDIV_w:
case UDIV_x: {
- typedef typename std::make_unsigned<T>::type unsignedT;
+ using unsignedT = typename std::make_unsigned<T>::type;
unsignedT rn = static_cast<unsignedT>(reg<T>(instr->Rn()));
unsignedT rm = static_cast<unsignedT>(reg<T>(instr->Rm()));
if (rm == 0) {
@@ -2403,14 +2373,23 @@ void Simulator::DataProcessing2Source(Instruction* instr) {
break;
}
case LSLV_w:
- case LSLV_x: shift_op = LSL; break;
+ case LSLV_x:
+ shift_op = LSL;
+ break;
case LSRV_w:
- case LSRV_x: shift_op = LSR; break;
+ case LSRV_x:
+ shift_op = LSR;
+ break;
case ASRV_w:
- case ASRV_x: shift_op = ASR; break;
+ case ASRV_x:
+ shift_op = ASR;
+ break;
case RORV_w:
- case RORV_x: shift_op = ROR; break;
- default: UNIMPLEMENTED();
+ case RORV_x:
+ shift_op = ROR;
+ break;
+ default:
+ UNIMPLEMENTED();
}
if (shift_op != NO_SHIFT) {
@@ -2427,7 +2406,6 @@ void Simulator::DataProcessing2Source(Instruction* instr) {
set_reg<T>(instr->Rd(), result);
}
-
void Simulator::VisitDataProcessing2Source(Instruction* instr) {
if (instr->SixtyFourBits()) {
DataProcessing2Source<int64_t>(instr);
@@ -2436,7 +2414,6 @@ void Simulator::VisitDataProcessing2Source(Instruction* instr) {
}
}
-
// The algorithm used is described in section 8.2 of
// Hacker's Delight, by Henry S. Warren, Jr.
// It assumes that a right shift on a signed integer is an arithmetic shift.
@@ -2458,7 +2435,6 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
return u1 * v1 + w2 + (w1 >> 32);
}
-
void Simulator::VisitDataProcessing3Source(Instruction* instr) {
int64_t result = 0;
// Extract and sign- or zero-extend 32-bit arguments for widening operations.
@@ -2475,15 +2451,24 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
case MSUB_x:
result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
break;
- case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
- case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
- case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
- case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
+ case SMADDL_x:
+ result = xreg(instr->Ra()) + (rn_s32 * rm_s32);
+ break;
+ case SMSUBL_x:
+ result = xreg(instr->Ra()) - (rn_s32 * rm_s32);
+ break;
+ case UMADDL_x:
+ result = xreg(instr->Ra()) + (rn_u32 * rm_u32);
+ break;
+ case UMSUBL_x:
+ result = xreg(instr->Ra()) - (rn_u32 * rm_u32);
+ break;
case SMULH_x:
DCHECK_EQ(instr->Ra(), kZeroRegCode);
result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
break;
- default: UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED();
}
if (instr->SixtyFourBits()) {
@@ -2493,10 +2478,9 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
}
}
-
template <typename T>
void Simulator::BitfieldHelper(Instruction* instr) {
- typedef typename std::make_unsigned<T>::type unsignedT;
+ using unsignedT = typename std::make_unsigned<T>::type;
T reg_size = sizeof(T) * 8;
T R = instr->ImmR();
T S = instr->ImmS();
@@ -2549,7 +2533,6 @@ void Simulator::BitfieldHelper(Instruction* instr) {
set_reg<T>(instr->Rd(), result);
}
-
void Simulator::VisitBitfield(Instruction* instr) {
if (instr->SixtyFourBits()) {
BitfieldHelper<int64_t>(instr);
@@ -2558,7 +2541,6 @@ void Simulator::VisitBitfield(Instruction* instr) {
}
}
-
void Simulator::VisitExtract(Instruction* instr) {
if (instr->SixtyFourBits()) {
Extract<uint64_t>(instr);
@@ -2567,19 +2549,22 @@ void Simulator::VisitExtract(Instruction* instr) {
}
}
-
void Simulator::VisitFPImmediate(Instruction* instr) {
AssertSupportedFPCR();
unsigned dest = instr->Rd();
switch (instr->Mask(FPImmediateMask)) {
- case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
- case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
- default: UNREACHABLE();
+ case FMOV_s_imm:
+ set_sreg(dest, instr->ImmFP32());
+ break;
+ case FMOV_d_imm:
+ set_dreg(dest, instr->ImmFP64());
+ break;
+ default:
+ UNREACHABLE();
}
}
-
void Simulator::VisitFPIntegerConvert(Instruction* instr) {
AssertSupportedFPCR();
@@ -2589,14 +2574,30 @@ void Simulator::VisitFPIntegerConvert(Instruction* instr) {
FPRounding round = fpcr().RMode();
switch (instr->Mask(FPIntegerConvertMask)) {
- case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
- case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
- case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
- case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
- case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
- case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
- case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
- case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
+ case FCVTAS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPTieAway));
+ break;
+ case FCVTAS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPTieAway));
+ break;
+ case FCVTAS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPTieAway));
+ break;
+ case FCVTAS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPTieAway));
+ break;
+ case FCVTAU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPTieAway));
+ break;
+ case FCVTAU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPTieAway));
+ break;
+ case FCVTAU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPTieAway));
+ break;
+ case FCVTAU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPTieAway));
+ break;
case FCVTMS_ws:
set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
break;
@@ -2621,49 +2622,101 @@ void Simulator::VisitFPIntegerConvert(Instruction* instr) {
case FCVTMU_xd:
set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
break;
- case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
- case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
- case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
- case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
- case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
- case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
- case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
- case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
- case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
- case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
- case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
- case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
- case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
- case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
- case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
- case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
- case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
- case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
- case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
- case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
+ case FCVTNS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPTieEven));
+ break;
+ case FCVTNS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPTieEven));
+ break;
+ case FCVTNS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPTieEven));
+ break;
+ case FCVTNS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPTieEven));
+ break;
+ case FCVTNU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPTieEven));
+ break;
+ case FCVTNU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPTieEven));
+ break;
+ case FCVTNU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPTieEven));
+ break;
+ case FCVTNU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPTieEven));
+ break;
+ case FCVTZS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPZero));
+ break;
+ case FCVTZS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPZero));
+ break;
+ case FCVTZS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPZero));
+ break;
+ case FCVTZS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPZero));
+ break;
+ case FCVTZU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPZero));
+ break;
+ case FCVTZU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPZero));
+ break;
+ case FCVTZU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPZero));
+ break;
+ case FCVTZU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPZero));
+ break;
+ case FMOV_ws:
+ set_wreg(dst, sreg_bits(src));
+ break;
+ case FMOV_xd:
+ set_xreg(dst, dreg_bits(src));
+ break;
+ case FMOV_sw:
+ set_sreg_bits(dst, wreg(src));
+ break;
+ case FMOV_dx:
+ set_dreg_bits(dst, xreg(src));
+ break;
// A 32-bit input can be handled in the same way as a 64-bit input, since
// the sign- or zero-extension will not affect the conversion.
- case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
- case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
- case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
+ case SCVTF_dx:
+ set_dreg(dst, FixedToDouble(xreg(src), 0, round));
+ break;
+ case SCVTF_dw:
+ set_dreg(dst, FixedToDouble(wreg(src), 0, round));
+ break;
+ case UCVTF_dx:
+ set_dreg(dst, UFixedToDouble(xreg(src), 0, round));
+ break;
case UCVTF_dw: {
set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), 0, round));
break;
}
- case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
- case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
- case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
+ case SCVTF_sx:
+ set_sreg(dst, FixedToFloat(xreg(src), 0, round));
+ break;
+ case SCVTF_sw:
+ set_sreg(dst, FixedToFloat(wreg(src), 0, round));
+ break;
+ case UCVTF_sx:
+ set_sreg(dst, UFixedToFloat(xreg(src), 0, round));
+ break;
case UCVTF_sw: {
set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), 0, round));
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
}
-
void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
AssertSupportedFPCR();
@@ -2686,8 +2739,7 @@ void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
break;
case UCVTF_dw_fixed: {
- set_dreg(dst,
- UFixedToDouble(reg<uint32_t>(src), fbits, round));
+ set_dreg(dst, UFixedToDouble(reg<uint32_t>(src), fbits, round));
break;
}
case SCVTF_sx_fixed:
@@ -2700,15 +2752,14 @@ void Simulator::VisitFPFixedPointConvert(Instruction* instr) {
set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
break;
case UCVTF_sw_fixed: {
- set_sreg(dst,
- UFixedToFloat(reg<uint32_t>(src), fbits, round));
+ set_sreg(dst, UFixedToFloat(reg<uint32_t>(src), fbits, round));
break;
}
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
}
-
void Simulator::VisitFPCompare(Instruction* instr) {
AssertSupportedFPCR();
@@ -2725,11 +2776,11 @@ void Simulator::VisitFPCompare(Instruction* instr) {
case FCMP_d_zero:
FPCompare(dreg(instr->Rn()), 0.0);
break;
- default: UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED();
}
}
-
void Simulator::VisitFPConditionalCompare(Instruction* instr) {
AssertSupportedFPCR();
@@ -2752,11 +2803,11 @@ void Simulator::VisitFPConditionalCompare(Instruction* instr) {
}
break;
}
- default: UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED();
}
}
-
void Simulator::VisitFPConditionalSelect(Instruction* instr) {
AssertSupportedFPCR();
@@ -2768,13 +2819,17 @@ void Simulator::VisitFPConditionalSelect(Instruction* instr) {
}
switch (instr->Mask(FPConditionalSelectMask)) {
- case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
- case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
- default: UNIMPLEMENTED();
+ case FCSEL_s:
+ set_sreg(instr->Rd(), sreg(selected));
+ break;
+ case FCSEL_d:
+ set_dreg(instr->Rd(), dreg(selected));
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
-
void Simulator::VisitFPDataProcessing1Source(Instruction* instr) {
AssertSupportedFPCR();
@@ -2982,7 +3037,6 @@ bool Simulator::FPProcessNaNs(Instruction* instr) {
return done;
}
-
void Simulator::VisitSystem(Instruction* instr) {
// Some system instructions hijack their Op and Cp fields to represent a
// range of immediates instead of indicating a different instruction. This
@@ -2991,9 +3045,14 @@ void Simulator::VisitSystem(Instruction* instr) {
switch (instr->Mask(SystemSysRegMask)) {
case MRS: {
switch (instr->ImmSystemRegister()) {
- case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
- case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
- default: UNIMPLEMENTED();
+ case NZCV:
+ set_xreg(instr->Rt(), nzcv().RawValue());
+ break;
+ case FPCR:
+ set_xreg(instr->Rt(), fpcr().RawValue());
+ break;
+ default:
+ UNIMPLEMENTED();
}
break;
}
@@ -3007,7 +3066,8 @@ void Simulator::VisitSystem(Instruction* instr) {
fpcr().SetRawValue(wreg(instr->Rt()));
LogSystemRegister(FPCR);
break;
- default: UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED();
}
break;
}
@@ -3018,7 +3078,8 @@ void Simulator::VisitSystem(Instruction* instr) {
case NOP:
case CSDB:
break;
- default: UNIMPLEMENTED();
+ default:
+ UNIMPLEMENTED();
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
#if defined(V8_OS_WIN)
@@ -3031,7 +3092,6 @@ void Simulator::VisitSystem(Instruction* instr) {
}
}
-
bool Simulator::GetValue(const char* desc, int64_t* value) {
int regnum = CodeFromName(desc);
if (regnum >= 0) {
@@ -3051,15 +3111,13 @@ bool Simulator::GetValue(const char* desc, int64_t* value) {
}
return true;
} else if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%" SCNx64,
- reinterpret_cast<uint64_t*>(value)) == 1;
+ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast<uint64_t*>(value)) ==
+ 1;
} else {
- return SScanF(desc, "%" SCNu64,
- reinterpret_cast<uint64_t*>(value)) == 1;
+ return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
}
-
bool Simulator::PrintValue(const char* desc) {
if (strcmp(desc, "sp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
@@ -3094,19 +3152,18 @@ bool Simulator::PrintValue(const char* desc) {
clr_vreg_value, sreg(i), clr_normal);
return true;
} else if (desc[0] == 'w') {
- PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n",
- clr_reg_name, WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
+ PrintF(stream_, "%s %s:%s 0x%08" PRIx32 "%s\n", clr_reg_name,
+ WRegNameForCode(i), clr_reg_value, wreg(i), clr_normal);
return true;
} else {
// X register names have a wide variety of starting characters, but anything
// else will be an X register.
- PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s\n",
- clr_reg_name, XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
+ PrintF(stream_, "%s %s:%s 0x%016" PRIx64 "%s\n", clr_reg_name,
+ XRegNameForCode(i), clr_reg_value, xreg(i), clr_normal);
return true;
}
}
-
void Simulator::Debug() {
#define COMMAND_SIZE 63
#define ARG_SIZE 255
@@ -3117,7 +3174,7 @@ void Simulator::Debug() {
char cmd[COMMAND_SIZE + 1];
char arg1[ARG_SIZE + 1];
char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
+ char* argv[3] = {cmd, arg1, arg2};
// Make sure to have a proper terminating character if reaching the limit.
cmd[COMMAND_SIZE] = 0;
@@ -3180,25 +3237,26 @@ void Simulator::Debug() {
// again. It will be cleared when exiting.
pc_modified_ = true;
- // next / n --------------------------------------------------------------
+ // next / n
+ // --------------------------------------------------------------
} else if ((strcmp(cmd, "next") == 0) || (strcmp(cmd, "n") == 0)) {
// Tell the simulator to break after the next executed BL.
break_on_next_ = true;
// Continue.
done = true;
- // continue / cont / c ---------------------------------------------------
- } else if ((strcmp(cmd, "continue") == 0) ||
- (strcmp(cmd, "cont") == 0) ||
+ // continue / cont / c
+ // ---------------------------------------------------
+ } else if ((strcmp(cmd, "continue") == 0) || (strcmp(cmd, "cont") == 0) ||
(strcmp(cmd, "c") == 0)) {
// Leave the debugger shell.
done = true;
- // disassemble / disasm / di ---------------------------------------------
+ // disassemble / disasm / di
+ // ---------------------------------------------
} else if (strcmp(cmd, "disassemble") == 0 ||
- strcmp(cmd, "disasm") == 0 ||
- strcmp(cmd, "di") == 0) {
- int64_t n_of_instrs_to_disasm = 10; // default value.
+ strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
+ int64_t n_of_instrs_to_disasm = 10; // default value.
int64_t address = reinterpret_cast<int64_t>(pc_); // default value.
if (argc >= 2) { // disasm <n of instrs>
GetValue(arg1, &n_of_instrs_to_disasm);
@@ -3212,7 +3270,8 @@ void Simulator::Debug() {
n_of_instrs_to_disasm);
PrintF("\n");
- // print / p -------------------------------------------------------------
+ // print / p
+ // -------------------------------------------------------------
} else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
if (argc == 2) {
if (strcmp(arg1, "all") == 0) {
@@ -3225,13 +3284,14 @@ void Simulator::Debug() {
}
} else {
PrintF(
- "print <register>\n"
- " Print the content of a register. (alias 'p')\n"
- " 'print all' will print all registers.\n"
- " Use 'printobject' to get more details about the value.\n");
+ "print <register>\n"
+ " Print the content of a register. (alias 'p')\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n");
}
- // printobject / po ------------------------------------------------------
+ // printobject / po
+ // ------------------------------------------------------
} else if ((strcmp(cmd, "printobject") == 0) ||
(strcmp(cmd, "po") == 0)) {
if (argc == 2) {
@@ -3241,7 +3301,7 @@ void Simulator::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
@@ -3250,12 +3310,14 @@ void Simulator::Debug() {
os << arg1 << " unrecognized\n";
}
} else {
- PrintF("printobject <value>\n"
- "printobject <register>\n"
- " Print details about the value. (alias 'po')\n");
+ PrintF(
+ "printobject <value>\n"
+ "printobject <register>\n"
+ " Print details about the value. (alias 'po')\n");
}
- // stack / mem ----------------------------------------------------------
+ // stack / mem
+ // ----------------------------------------------------------
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
int64_t* cur = nullptr;
int64_t* end = nullptr;
@@ -3293,12 +3355,13 @@ void Simulator::Debug() {
reinterpret_cast<uint64_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = isolate_->heap();
- if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
PrintF(" (");
if (obj.IsSmi()) {
PrintF("smi %" PRId32, Smi::ToInt(obj));
} else {
- obj->ShortPrint();
+ obj.ShortPrint();
}
PrintF(")");
}
@@ -3306,7 +3369,8 @@ void Simulator::Debug() {
cur++;
}
- // trace / t -------------------------------------------------------------
+ // trace / t
+ // -------------------------------------------------------------
} else if (strcmp(cmd, "trace") == 0 || strcmp(cmd, "t") == 0) {
if ((log_parameters() & (LOG_DISASM | LOG_REGS)) !=
(LOG_DISASM | LOG_REGS)) {
@@ -3317,7 +3381,8 @@ void Simulator::Debug() {
set_log_parameters(log_parameters() & ~(LOG_DISASM | LOG_REGS));
}
- // break / b -------------------------------------------------------------
+ // break / b
+ // -------------------------------------------------------------
} else if (strcmp(cmd, "break") == 0 || strcmp(cmd, "b") == 0) {
if (argc == 2) {
int64_t value;
@@ -3331,57 +3396,60 @@ void Simulator::Debug() {
PrintF("Use `break <address>` to set or disable a breakpoint\n");
}
- // gdb -------------------------------------------------------------------
+ // gdb
+ // -------------------------------------------------------------------
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("Relinquishing control to gdb.\n");
base::OS::DebugBreak();
PrintF("Regaining control from gdb.\n");
- // sysregs ---------------------------------------------------------------
+ // sysregs
+ // ---------------------------------------------------------------
} else if (strcmp(cmd, "sysregs") == 0) {
PrintSystemRegisters();
- // help / h --------------------------------------------------------------
+ // help / h
+ // --------------------------------------------------------------
} else if (strcmp(cmd, "help") == 0 || strcmp(cmd, "h") == 0) {
PrintF(
- "stepi / si\n"
- " stepi <n>\n"
- " Step <n> instructions.\n"
- "next / n\n"
- " Continue execution until a BL instruction is reached.\n"
- " At this point a breakpoint is set just after this BL.\n"
- " Then execution is resumed. It will probably later hit the\n"
- " breakpoint just set.\n"
- "continue / cont / c\n"
- " Continue execution from here.\n"
- "disassemble / disasm / di\n"
- " disassemble <n> <address>\n"
- " Disassemble <n> instructions from current <address>.\n"
- " By default <n> is 20 and <address> is the current pc.\n"
- "print / p\n"
- " print <register>\n"
- " Print the content of a register.\n"
- " 'print all' will print all registers.\n"
- " Use 'printobject' to get more details about the value.\n"
- "printobject / po\n"
- " printobject <value>\n"
- " printobject <register>\n"
- " Print details about the value.\n"
- "stack\n"
- " stack [<words>]\n"
- " Dump stack content, default dump 10 words\n"
- "mem\n"
- " mem <address> [<words>]\n"
- " Dump memory content, default dump 10 words\n"
- "trace / t\n"
- " Toggle disassembly and register tracing\n"
- "break / b\n"
- " break : list all breakpoints\n"
- " break <address> : set / enable / disable a breakpoint.\n"
- "gdb\n"
- " Enter gdb.\n"
- "sysregs\n"
- " Print all system registers (including NZCV).\n");
+ "stepi / si\n"
+ " stepi <n>\n"
+ " Step <n> instructions.\n"
+ "next / n\n"
+ " Continue execution until a BL instruction is reached.\n"
+ " At this point a breakpoint is set just after this BL.\n"
+ " Then execution is resumed. It will probably later hit the\n"
+ " breakpoint just set.\n"
+ "continue / cont / c\n"
+ " Continue execution from here.\n"
+ "disassemble / disasm / di\n"
+ " disassemble <n> <address>\n"
+ " Disassemble <n> instructions from current <address>.\n"
+ " By default <n> is 20 and <address> is the current pc.\n"
+ "print / p\n"
+ " print <register>\n"
+ " Print the content of a register.\n"
+ " 'print all' will print all registers.\n"
+ " Use 'printobject' to get more details about the value.\n"
+ "printobject / po\n"
+ " printobject <value>\n"
+ " printobject <register>\n"
+ " Print details about the value.\n"
+ "stack\n"
+ " stack [<words>]\n"
+ " Dump stack content, default dump 10 words\n"
+ "mem\n"
+ " mem <address> [<words>]\n"
+ " Dump memory content, default dump 10 words\n"
+ "trace / t\n"
+ " Toggle disassembly and register tracing\n"
+ "break / b\n"
+ " break : list all breakpoints\n"
+ " break <address> : set / enable / disable a breakpoint.\n"
+ "gdb\n"
+ " Enter gdb.\n"
+ "sysregs\n"
+ " Print all system registers (including NZCV).\n");
} else {
PrintF("Unknown command: %s\n", cmd);
PrintF("Use 'help' for more information.\n");
@@ -3393,7 +3461,6 @@ void Simulator::Debug() {
}
}
-
void Simulator::VisitException(Instruction* instr) {
switch (instr->Mask(ExceptionMask)) {
case HLT: {
@@ -3402,33 +3469,21 @@ void Simulator::VisitException(Instruction* instr) {
uint32_t code;
uint32_t parameters;
- memcpy(&code,
- pc_->InstructionAtOffset(kDebugCodeOffset),
- sizeof(code));
- memcpy(&parameters,
- pc_->InstructionAtOffset(kDebugParamsOffset),
+ memcpy(&code, pc_->InstructionAtOffset(kDebugCodeOffset), sizeof(code));
+ memcpy(&parameters, pc_->InstructionAtOffset(kDebugParamsOffset),
sizeof(parameters));
- char const *message =
- reinterpret_cast<char const*>(
- pc_->InstructionAtOffset(kDebugMessageOffset));
+ char const* message = reinterpret_cast<char const*>(
+ pc_->InstructionAtOffset(kDebugMessageOffset));
// Always print something when we hit a debug point that breaks.
// We are going to break, so printing something is not an issue in
// terms of speed.
if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
if (message != nullptr) {
- PrintF(stream_,
- "# %sDebugger hit %d: %s%s%s\n",
- clr_debug_number,
- code,
- clr_debug_message,
- message,
- clr_normal);
+ PrintF(stream_, "# %sDebugger hit %d: %s%s%s\n", clr_debug_number,
+ code, clr_debug_message, message, clr_normal);
} else {
- PrintF(stream_,
- "# %sDebugger hit %d.%s\n",
- clr_debug_number,
- code,
+ PrintF(stream_, "# %sDebugger hit %d.%s\n", clr_debug_number, code,
clr_normal);
}
}
@@ -3437,8 +3492,12 @@ void Simulator::VisitException(Instruction* instr) {
switch (parameters & kDebuggerTracingDirectivesMask) {
case TRACE_ENABLE:
set_log_parameters(log_parameters() | parameters);
- if (parameters & LOG_SYS_REGS) { PrintSystemRegisters(); }
- if (parameters & LOG_REGS) { PrintRegisters(); }
+ if (parameters & LOG_SYS_REGS) {
+ PrintSystemRegisters();
+ }
+ if (parameters & LOG_REGS) {
+ PrintRegisters();
+ }
if (parameters & LOG_VREGS) {
PrintVRegisters();
}
@@ -5625,17 +5684,14 @@ void Simulator::VisitNEONPerm(Instruction* instr) {
void Simulator::DoPrintf(Instruction* instr) {
DCHECK((instr->Mask(ExceptionMask) == HLT) &&
- (instr->ImmException() == kImmExceptionIsPrintf));
+ (instr->ImmException() == kImmExceptionIsPrintf));
// Read the arguments encoded inline in the instruction stream.
uint32_t arg_count;
uint32_t arg_pattern_list;
STATIC_ASSERT(sizeof(*instr) == 1);
- memcpy(&arg_count,
- instr + kPrintfArgCountOffset,
- sizeof(arg_count));
- memcpy(&arg_pattern_list,
- instr + kPrintfArgPatternListOffset,
+ memcpy(&arg_count, instr + kPrintfArgCountOffset, sizeof(arg_count));
+ memcpy(&arg_pattern_list, instr + kPrintfArgPatternListOffset,
sizeof(arg_pattern_list));
DCHECK_LE(arg_count, kPrintfMaxArgCount);
@@ -5650,17 +5706,17 @@ void Simulator::DoPrintf(Instruction* instr) {
// Allocate space for the format string. We take a copy, so we can modify it.
// Leave enough space for one extra character per expected argument (plus the
// '\0' termination).
- const char * format_base = reg<const char *>(0);
+ const char* format_base = reg<const char*>(0);
DCHECK_NOT_NULL(format_base);
size_t length = strlen(format_base) + 1;
- char * const format = new char[length + arg_count];
+ char* const format = new char[length + arg_count];
// A list of chunks, each with exactly one format placeholder.
- const char * chunks[kPrintfMaxArgCount];
+ const char* chunks[kPrintfMaxArgCount];
// Copy the format string and search for format placeholders.
uint32_t placeholder_count = 0;
- char * format_scratch = format;
+ char* format_scratch = format;
for (size_t i = 0; i < length; i++) {
if (format_base[i] != '%') {
*format_scratch++ = format_base[i];
@@ -5699,8 +5755,8 @@ void Simulator::DoPrintf(Instruction* instr) {
// Because '\0' is inserted before each placeholder, the first string in
// 'format' contains no format placeholders and should be printed literally.
int result = fprintf(stream_, "%s", format);
- int pcs_r = 1; // Start at x1. x0 holds the format string.
- int pcs_f = 0; // Start at d0.
+ int pcs_r = 1; // Start at x1. x0 holds the format string.
+ int pcs_f = 0; // Start at d0.
if (result >= 0) {
for (uint32_t i = 0; i < placeholder_count; i++) {
int part_result = -1;
@@ -5717,7 +5773,8 @@ void Simulator::DoPrintf(Instruction* instr) {
case kPrintfArgD:
part_result = fprintf(stream_, chunks[i], dreg(pcs_f++));
break;
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
}
if (part_result < 0) {
@@ -5926,6 +5983,23 @@ void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
processor->next_ = nullptr;
}
+#undef SScanF
+#undef COLOUR
+#undef COLOUR_BOLD
+#undef NORMAL
+#undef GREY
+#undef RED
+#undef GREEN
+#undef YELLOW
+#undef BLUE
+#undef MAGENTA
+#undef CYAN
+#undef WHITE
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+#undef STR
+#undef XSTR
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/execution/arm64/simulator-arm64.h
index 12f9dcba8c..ca1cef61ae 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/execution/arm64/simulator-arm64.h
@@ -2,26 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ARM64_SIMULATOR_ARM64_H_
-#define V8_ARM64_SIMULATOR_ARM64_H_
+#ifndef V8_EXECUTION_ARM64_SIMULATOR_ARM64_H_
+#define V8_EXECUTION_ARM64_SIMULATOR_ARM64_H_
// globals.h defines USE_SIMULATOR.
-#include "src/globals.h"
+#include "src/common/globals.h"
#if defined(USE_SIMULATOR)
#include <stdarg.h>
#include <vector>
-#include "src/allocation.h"
-#include "src/arm64/assembler-arm64.h"
-#include "src/arm64/decoder-arm64.h"
-#include "src/arm64/disasm-arm64.h"
-#include "src/arm64/instrument-arm64.h"
-#include "src/assembler.h"
#include "src/base/compiler-specific.h"
-#include "src/simulator-base.h"
-#include "src/utils.h"
+#include "src/codegen/arm64/assembler-arm64.h"
+#include "src/codegen/arm64/decoder-arm64.h"
+#include "src/codegen/arm64/instrument-arm64.h"
+#include "src/codegen/assembler.h"
+#include "src/diagnostics/arm64/disasm-arm64.h"
+#include "src/execution/simulator-base.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -271,11 +271,9 @@ class SimSystemRegister {
public:
// The default constructor represents a register which has no writable bits.
// It is not possible to set its value to anything other than 0.
- SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
+ SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) {}
- uint32_t RawValue() const {
- return value_;
- }
+ uint32_t RawValue() const { return value_; }
void SetRawValue(uint32_t new_value) {
value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
@@ -294,12 +292,12 @@ class SimSystemRegister {
// Default system register values.
static SimSystemRegister DefaultValueFor(SystemRegister id);
-#define DEFINE_GETTER(Name, HighBit, LowBit, Func, Type) \
- Type Name() const { return static_cast<Type>(Func(HighBit, LowBit)); } \
- void Set##Name(Type bits) { \
- SetBits(HighBit, LowBit, static_cast<Type>(bits)); \
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func, Type) \
+ Type Name() const { return static_cast<Type>(Func(HighBit, LowBit)); } \
+ void Set##Name(Type bits) { \
+ SetBits(HighBit, LowBit, static_cast<Type>(bits)); \
}
-#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
+#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
#undef DEFINE_ZERO_BITS
@@ -310,18 +308,17 @@ class SimSystemRegister {
// bits are "read-as-zero, write-ignored". The write_ignore_mask argument
// describes the bits which are not modifiable.
SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
- : value_(value), write_ignore_mask_(write_ignore_mask) { }
+ : value_(value), write_ignore_mask_(write_ignore_mask) {}
uint32_t value_;
uint32_t write_ignore_mask_;
};
-
// Represent a register (r0-r31, v0-v31).
template <int kSizeInBytes>
class SimRegisterBase {
public:
- template<typename T>
+ template <typename T>
void Set(T new_value) {
static_assert(sizeof(new_value) <= kSizeInBytes,
"Size of new_value must be <= size of template type.");
@@ -372,8 +369,8 @@ class SimRegisterBase {
void NotifyRegisterWrite() { written_since_last_log_ = true; }
};
-typedef SimRegisterBase<kXRegSize> SimRegister; // r0-r31
-typedef SimRegisterBase<kQRegSize> SimVRegister; // v0-v31
+using SimRegister = SimRegisterBase<kXRegSize>; // r0-r31
+using SimVRegister = SimRegisterBase<kQRegSize>; // v0-v31
// Representation of a vector register, with typed getters and setters for lanes
// and additional information to represent lane state.
@@ -672,7 +669,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
// Only arguments up to 64 bits in size are supported.
class CallArgument {
public:
- template<typename T>
+ template <typename T>
explicit CallArgument(T argument) {
bits_ = 0;
DCHECK(sizeof(argument) <= sizeof(bits_));
@@ -772,9 +769,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
pc_modified_ = false;
}
- virtual void Decode(Instruction* instr) {
- decoder_->Decode(instr);
- }
+ virtual void Decode(Instruction* instr) { decoder_->Decode(instr); }
void ExecuteInstruction() {
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstrSize));
@@ -785,10 +780,10 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
CheckBreakpoints();
}
- // Declare all Visitor functions.
- #define DECLARE(A) void Visit##A(Instruction* instr);
+// Declare all Visitor functions.
+#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
- #undef DECLARE
+#undef DECLARE
bool IsZeroRegister(unsigned code, Reg31Mode r31mode) const {
return ((code == 31) && (r31mode == Reg31IsZeroRegister));
@@ -798,7 +793,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
// Return 'size' bits of the value of an integer register, as the specified
// type. The value is zero-extended to fill the result.
//
- template<typename T>
+ template <typename T>
T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
if (IsZeroRegister(code, r31mode)) {
@@ -820,7 +815,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
// Write 'value' into an integer register. The value is zero-extended. This
// behaviour matches AArch64 register writes.
- template<typename T>
+ template <typename T>
void set_reg(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg_no_log(code, value, r31mode);
@@ -859,13 +854,13 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
}
// Commonly-used special cases.
- template<typename T>
+ template <typename T>
void set_lr(T value) {
DCHECK_EQ(sizeof(T), static_cast<unsigned>(kSystemPointerSize));
set_reg(kLinkRegCode, value);
}
- template<typename T>
+ template <typename T>
void set_sp(T value) {
DCHECK_EQ(sizeof(T), static_cast<unsigned>(kSystemPointerSize));
set_reg(31, value, Reg31IsStackPointer);
@@ -895,9 +890,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
inline SimVRegister& vreg(unsigned code) { return vregisters_[code]; }
int64_t sp() { return xreg(31, Reg31IsStackPointer); }
- int64_t fp() {
- return xreg(kFramePointerRegCode, Reg31IsStackPointer);
- }
+ int64_t fp() { return xreg(kFramePointerRegCode, Reg31IsStackPointer); }
Instruction* lr() { return reg<Instruction*>(kLinkRegCode); }
Address get_sp() const { return reg<Address>(31, Reg31IsStackPointer); }
@@ -1271,10 +1264,10 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
void PrintVRegisterFPHelper(unsigned code, unsigned lane_size_in_bytes,
int lane_count = 1, int rightmost_lane = 0);
- static inline const char* WRegNameForCode(unsigned code,
- Reg31Mode mode = Reg31IsZeroRegister);
- static inline const char* XRegNameForCode(unsigned code,
- Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* WRegNameForCode(
+ unsigned code, Reg31Mode mode = Reg31IsZeroRegister);
+ static inline const char* XRegNameForCode(
+ unsigned code, Reg31Mode mode = Reg31IsZeroRegister);
static inline const char* SRegNameForCode(unsigned code);
static inline const char* DRegNameForCode(unsigned code);
static inline const char* VRegNameForCode(unsigned code);
@@ -1321,29 +1314,23 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
}
}
- bool ConditionFailed(Condition cond) {
- return !ConditionPassed(cond);
- }
+ bool ConditionFailed(Condition cond) { return !ConditionPassed(cond); }
- template<typename T>
+ template <typename T>
void AddSubHelper(Instruction* instr, T op2);
template <typename T>
T AddWithCarry(bool set_flags, T left, T right, int carry_in = 0);
- template<typename T>
+ template <typename T>
void AddSubWithCarry(Instruction* instr);
- template<typename T>
+ template <typename T>
void LogicalHelper(Instruction* instr, T op2);
- template<typename T>
+ template <typename T>
void ConditionalCompareHelper(Instruction* instr, T op2);
- void LoadStoreHelper(Instruction* instr,
- int64_t offset,
- AddrMode addrmode);
+ void LoadStoreHelper(Instruction* instr, int64_t offset, AddrMode addrmode);
void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
uintptr_t LoadStoreAddress(unsigned addr_reg, int64_t offset,
AddrMode addrmode);
- void LoadStoreWriteBack(unsigned addr_reg,
- int64_t offset,
- AddrMode addrmode);
+ void LoadStoreWriteBack(unsigned addr_reg, int64_t offset, AddrMode addrmode);
void NEONLoadStoreMultiStructHelper(const Instruction* instr,
AddrMode addr_mode);
void NEONLoadStoreSingleStructHelper(const Instruction* instr,
@@ -1371,13 +1358,9 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
}
template <typename T>
- T ShiftOperand(T value,
- Shift shift_type,
- unsigned amount);
+ T ShiftOperand(T value, Shift shift_type, unsigned amount);
template <typename T>
- T ExtendValue(T value,
- Extend extend_type,
- unsigned left_shift = 0);
+ T ExtendValue(T value, Extend extend_type, unsigned left_shift = 0);
template <typename T>
void Extract(Instruction* instr);
template <typename T>
@@ -1450,11 +1433,11 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
LogicVRegister pmul(VectorFormat vform, LogicVRegister dst,
const LogicVRegister& src1, const LogicVRegister& src2);
- typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform,
- LogicVRegister dst,
- const LogicVRegister& src1,
- const LogicVRegister& src2,
- int index);
+ using ByElementOp = LogicVRegister (Simulator::*)(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
LogicVRegister fmul(VectorFormat vform, LogicVRegister dst,
const LogicVRegister& src1, const LogicVRegister& src2,
int index);
@@ -1998,7 +1981,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
LogicVRegister urecpe(VectorFormat vform, LogicVRegister dst,
const LogicVRegister& src);
- typedef float (Simulator::*FPMinMaxOp)(float a, float b);
+ using FPMinMaxOp = float (Simulator::*)(float a, float b);
LogicVRegister FMinMaxV(VectorFormat vform, LogicVRegister dst,
const LogicVRegister& src, FPMinMaxOp Op);
@@ -2153,9 +2136,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
return (result >> (sizeof(T) * 8 - 1)) & 1;
}
- static int CalcZFlag(uint64_t result) {
- return result == 0;
- }
+ static int CalcZFlag(uint64_t result) { return result == 0; }
static const uint32_t kConditionFlagsMask = 0xf0000000;
@@ -2348,7 +2329,7 @@ class Simulator : public DecoderVisitor, public SimulatorBase {
}
}
- int log_parameters_;
+ int log_parameters_;
Isolate* isolate_;
};
@@ -2366,4 +2347,4 @@ inline float Simulator::FPDefaultNaN<float>() {
} // namespace v8
#endif // defined(USE_SIMULATOR)
-#endif // V8_ARM64_SIMULATOR_ARM64_H_
+#endif // V8_EXECUTION_ARM64_SIMULATOR_ARM64_H_
diff --git a/deps/v8/src/arm64/simulator-logic-arm64.cc b/deps/v8/src/execution/arm64/simulator-logic-arm64.cc
index c074f99fac..d855c8b708 100644
--- a/deps/v8/src/arm64/simulator-logic-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-logic-arm64.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/simulator-arm64.h"
+#include "src/execution/arm64/simulator-arm64.h"
#if defined(USE_SIMULATOR)
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution/execution.cc
index 187ec46479..285b4b2134 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/execution.h"
+#include "src/execution/execution.h"
-#include "src/api-inl.h"
-#include "src/bootstrapper.h"
+#include "src/api/api-inl.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/debug/debug.h"
-#include "src/isolate-inl.h"
-#include "src/runtime-profiler.h"
-#include "src/vm-state-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/wasm/wasm-engine.h"
namespace v8 {
@@ -194,10 +195,10 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
if (params.target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(params.target);
if ((!params.is_construct || function->IsConstructor()) &&
- function->shared()->IsApiFunction() &&
- !function->shared()->BreakAtEntry()) {
+ function->shared().IsApiFunction() &&
+ !function->shared().BreakAtEntry()) {
SaveAndSwitchContext save(isolate, function->context());
- DCHECK(function->context()->global_object()->IsJSGlobalObject());
+ DCHECK(function->context().global_object().IsJSGlobalObject());
Handle<Object> receiver = params.is_construct
? isolate->factory()->the_hole_value()
@@ -285,12 +286,12 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- value->ObjectVerify(isolate);
+ value.ObjectVerify(isolate);
}
#endif
// Update the pending exception flag and return the value.
- bool has_exception = value->IsException(isolate);
+ bool has_exception = value.IsException(isolate);
DCHECK(has_exception == isolate->has_pending_exception());
if (has_exception) {
if (params.message_handling == Execution::MessageHandling::kReport) {
@@ -359,6 +360,16 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
argc, argv));
}
+MaybeHandle<Object> Execution::CallBuiltin(Isolate* isolate,
+ Handle<JSFunction> builtin,
+ Handle<Object> receiver, int argc,
+ Handle<Object> argv[]) {
+ DCHECK(builtin->code().is_builtin());
+ DisableBreak no_break(isolate->debug());
+ return Invoke(isolate, InvokeParams::SetUpForCall(isolate, builtin, receiver,
+ argc, argv));
+}
+
// static
MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
int argc, Handle<Object> argv[]) {
@@ -410,7 +421,6 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
thread_local_.real_jslimit_ = jslimit;
}
-
void StackGuard::AdjustStackLimitForSimulator() {
ExecutionAccess access(isolate_);
uintptr_t climit = thread_local_.real_climit_;
@@ -423,7 +433,6 @@ void StackGuard::AdjustStackLimitForSimulator() {
}
}
-
void StackGuard::EnableInterrupts() {
ExecutionAccess access(isolate_);
if (has_pending_interrupts(access)) {
@@ -431,7 +440,6 @@ void StackGuard::EnableInterrupts() {
}
}
-
void StackGuard::DisableInterrupts() {
ExecutionAccess access(isolate_);
reset_limits(access);
@@ -489,13 +497,11 @@ void StackGuard::PopInterruptsScope() {
thread_local_.interrupt_scopes_ = top->prev_;
}
-
bool StackGuard::CheckInterrupt(InterruptFlag flag) {
ExecutionAccess access(isolate_);
return thread_local_.interrupt_flags_ & flag;
}
-
void StackGuard::RequestInterrupt(InterruptFlag flag) {
ExecutionAccess access(isolate_);
// Check the chain of InterruptsScope for interception.
@@ -512,7 +518,6 @@ void StackGuard::RequestInterrupt(InterruptFlag flag) {
isolate_->futex_wait_list_node()->NotifyWake();
}
-
void StackGuard::ClearInterrupt(InterruptFlag flag) {
ExecutionAccess access(isolate_);
// Clear the interrupt flag from the chain of InterruptsScope.
@@ -526,7 +531,6 @@ void StackGuard::ClearInterrupt(InterruptFlag flag) {
if (!has_pending_interrupts(access)) reset_limits(access);
}
-
bool StackGuard::CheckAndClearInterrupt(InterruptFlag flag) {
ExecutionAccess access(isolate_);
bool result = (thread_local_.interrupt_flags_ & flag);
@@ -535,7 +539,6 @@ bool StackGuard::CheckAndClearInterrupt(InterruptFlag flag) {
return result;
}
-
char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_);
MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
@@ -552,7 +555,6 @@ char* StackGuard::ArchiveStackGuard(char* to) {
return to + sizeof(ThreadLocal);
}
-
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access(isolate_);
MemCopy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
@@ -560,14 +562,12 @@ char* StackGuard::RestoreStackGuard(char* from) {
return from + sizeof(ThreadLocal);
}
-
void StackGuard::FreeThreadResources() {
Isolate::PerIsolateThreadData* per_thread =
isolate_->FindOrAllocatePerThreadDataForThisThread();
per_thread->set_stack_limit(thread_local_.real_climit_);
}
-
void StackGuard::ThreadLocal::Clear() {
real_jslimit_ = kIllegalLimit;
set_jslimit(kIllegalLimit);
@@ -577,7 +577,6 @@ void StackGuard::ThreadLocal::Clear() {
interrupt_flags_ = 0;
}
-
bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
bool should_set_stack_limits = false;
if (real_climit_ == kIllegalLimit) {
@@ -595,13 +594,11 @@ bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
return should_set_stack_limits;
}
-
void StackGuard::ClearThread(const ExecutionAccess& lock) {
thread_local_.Clear();
isolate_->heap()->SetStackLimits();
}
-
void StackGuard::InitThread(const ExecutionAccess& lock) {
if (thread_local_.Initialize(isolate_)) isolate_->heap()->SetStackLimits();
Isolate::PerIsolateThreadData* per_thread =
@@ -613,7 +610,6 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
}
}
-
// --- C a l l s t o n a t i v e s ---
Object StackGuard::HandleInterrupts() {
@@ -661,10 +657,15 @@ Object StackGuard::HandleInterrupts() {
}
if (CheckAndClearInterrupt(LOG_WASM_CODE)) {
- TRACE_EVENT0("v8.wasm", "LogCode");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "LogCode");
isolate_->wasm_engine()->LogOutstandingCodesForIsolate(isolate_);
}
+ if (CheckAndClearInterrupt(WASM_CODE_GC)) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "WasmCodeGC");
+ isolate_->wasm_engine()->ReportLiveCodeFromStackForGC(isolate_);
+ }
+
isolate_->counters()->stack_interrupts()->Increment();
isolate_->counters()->runtime_profiler_ticks()->Increment();
isolate_->runtime_profiler()->MarkCandidatesForOptimization();
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution/execution.h
index 5f7bc5e788..48a8d64424 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution/execution.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_EXECUTION_H_
-#define V8_EXECUTION_H_
+#ifndef V8_EXECUTION_EXECUTION_H_
+#define V8_EXECUTION_EXECUTION_H_
#include "src/base/atomicops.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -32,6 +32,10 @@ class Execution final : public AllStatic {
Isolate* isolate, Handle<Object> callable, Handle<Object> receiver,
int argc, Handle<Object> argv[]);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> CallBuiltin(
+ Isolate* isolate, Handle<JSFunction> builtin, Handle<Object> receiver,
+ int argc, Handle<Object> argv[]);
+
// Construct object from function, the caller supplies an array of
// arguments.
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> New(
@@ -58,7 +62,6 @@ class Execution final : public AllStatic {
MaybeHandle<Object>* exception_out);
};
-
class ExecutionAccess;
class InterruptsScope;
@@ -97,7 +100,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
V(API_INTERRUPT, ApiInterrupt, 3) \
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 4) \
V(GROW_SHARED_MEMORY, GrowSharedMemory, 5) \
- V(LOG_WASM_CODE, LogWasmCode, 6)
+ V(LOG_WASM_CODE, LogWasmCode, 6) \
+ V(WASM_CODE_GC, WasmCodeGC, 7)
#define V(NAME, Name, id) \
inline bool Check##Name() { return CheckInterrupt(NAME); } \
@@ -109,12 +113,12 @@ class V8_EXPORT_PRIVATE StackGuard final {
// Flag used to set the interrupt causes.
enum InterruptFlag {
- #define V(NAME, Name, id) NAME = (1 << id),
+#define V(NAME, Name, id) NAME = (1 << id),
INTERRUPT_LIST(V)
- #undef V
- #define V(NAME, Name, id) NAME |
- ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
- #undef V
+#undef V
+#define V(NAME, Name, id) NAME |
+ ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
+#undef V
};
uintptr_t climit() { return thread_local_.climit(); }
@@ -122,12 +126,8 @@ class V8_EXPORT_PRIVATE StackGuard final {
// This provides an asynchronous read of the stack limits for the current
// thread. There are no locks protecting this, but it is assumed that you
// have the global V8 lock if you are using multiple V8 threads.
- uintptr_t real_climit() {
- return thread_local_.real_climit_;
- }
- uintptr_t real_jslimit() {
- return thread_local_.real_jslimit_;
- }
+ uintptr_t real_climit() { return thread_local_.real_climit_; }
+ uintptr_t real_jslimit() { return thread_local_.real_jslimit_; }
Address address_of_jslimit() {
return reinterpret_cast<Address>(&thread_local_.jslimit_);
}
@@ -192,7 +192,7 @@ class V8_EXPORT_PRIVATE StackGuard final {
// fail. Both the generated code and the runtime system check against the
// one without the real_ prefix.
uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM.
- uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
+ uintptr_t real_climit_; // Actual C++ stack limit set for the VM.
// jslimit_ and climit_ can be read without any lock.
// Writing requires the ExecutionAccess lock.
@@ -233,4 +233,4 @@ class V8_EXPORT_PRIVATE StackGuard final {
} // namespace internal
} // namespace v8
-#endif // V8_EXECUTION_H_
+#endif // V8_EXECUTION_EXECUTION_H_
diff --git a/deps/v8/src/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index e91339d6be..7ddee5689e 100644
--- a/deps/v8/src/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FRAME_CONSTANTS_H_
-#define V8_FRAME_CONSTANTS_H_
+#ifndef V8_EXECUTION_FRAME_CONSTANTS_H_
+#define V8_EXECUTION_FRAME_CONSTANTS_H_
-#include "src/flags.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
@@ -210,13 +210,13 @@ class TypedFrameConstants : public CommonFrameConstants {
(TypedFrameConstants::kFirstPushedFrameValueOffset - (x)*kSystemPointerSize)
#define TYPED_FRAME_SIZE(count) \
(TypedFrameConstants::kFixedFrameSize + (count)*kSystemPointerSize)
-#define TYPED_FRAME_SIZE_FROM_SP(count) \
+#define TYPED_FRAME_SIZE_FROM_FP(count) \
(TypedFrameConstants::kFixedFrameSizeFromFp + (count)*kSystemPointerSize)
#define DEFINE_TYPED_FRAME_SIZES(count) \
static constexpr int kFixedFrameSize = TYPED_FRAME_SIZE(count); \
static constexpr int kFixedSlotCount = kFixedFrameSize / kSystemPointerSize; \
static constexpr int kFixedFrameSizeFromFp = \
- TYPED_FRAME_SIZE_FROM_SP(count); \
+ TYPED_FRAME_SIZE_FROM_FP(count); \
static constexpr int kFixedSlotCountFromFp = \
kFixedFrameSizeFromFp / kSystemPointerSize
@@ -256,6 +256,13 @@ class WasmCompiledFrameConstants : public TypedFrameConstants {
DEFINE_TYPED_FRAME_SIZES(1);
};
+class WasmExitFrameConstants : public WasmCompiledFrameConstants {
+ public:
+ // FP-relative.
+ static const int kCallingPCOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ DEFINE_TYPED_FRAME_SIZES(2);
+};
+
class BuiltinContinuationFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
@@ -344,23 +351,23 @@ inline static int FrameSlotToFPOffset(int slot) {
} // namespace v8
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/frame-constants-ia32.h" // NOLINT
+#include "src/execution/ia32/frame-constants-ia32.h" // NOLINT
#elif V8_TARGET_ARCH_X64
-#include "src/x64/frame-constants-x64.h" // NOLINT
+#include "src/execution/x64/frame-constants-x64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/frame-constants-arm64.h" // NOLINT
+#include "src/execution/arm64/frame-constants-arm64.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/frame-constants-arm.h" // NOLINT
+#include "src/execution/arm/frame-constants-arm.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/frame-constants-ppc.h" // NOLINT
+#include "src/execution/ppc/frame-constants-ppc.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/frame-constants-mips.h" // NOLINT
+#include "src/execution/mips/frame-constants-mips.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/frame-constants-mips64.h" // NOLINT
+#include "src/execution/mips64/frame-constants-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
-#include "src/s390/frame-constants-s390.h" // NOLINT
+#include "src/execution/s390/frame-constants-s390.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
-#endif // V8_FRAME_CONSTANTS_H_
+#endif // V8_EXECUTION_FRAME_CONSTANTS_H_
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/execution/frames-inl.h
index f3f6805aa7..aeb43fe0a6 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/execution/frames-inl.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FRAMES_INL_H_
-#define V8_FRAMES_INL_H_
+#ifndef V8_EXECUTION_FRAMES_INL_H_
+#define V8_EXECUTION_FRAMES_INL_H_
-#include "src/frame-constants.h"
-#include "src/frames.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/v8memory.h"
+#include "src/common/v8memory.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -107,7 +107,7 @@ inline Object BuiltinExitFrame::receiver_slot_object() const {
// fp[4]: argc.
// fp[2 + argc - 1]: receiver.
Object argc_slot = argc_slot_object();
- DCHECK(argc_slot->IsSmi());
+ DCHECK(argc_slot.IsSmi());
int argc = Smi::ToInt(argc_slot);
const int receiverOffset = BuiltinExitFrameConstants::kNewTargetOffset +
@@ -138,7 +138,7 @@ inline Object StandardFrame::GetExpression(int index) const {
}
inline void StandardFrame::SetExpression(int index, Object value) {
- Memory<Address>(GetExpressionAddress(index)) = value->ptr();
+ Memory<Address>(GetExpressionAddress(index)) = value.ptr();
}
inline Address StandardFrame::caller_fp() const {
@@ -187,7 +187,7 @@ Address JavaScriptFrame::GetParameterSlot(int index) const {
}
inline void JavaScriptFrame::set_receiver(Object value) {
- Memory<Address>(GetParameterSlot(-1)) = value->ptr();
+ Memory<Address>(GetParameterSlot(-1)) = value.ptr();
}
inline bool JavaScriptFrame::has_adapted_arguments() const {
@@ -223,6 +223,9 @@ inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
inline WasmCompiledFrame::WasmCompiledFrame(StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {}
+inline WasmExitFrame::WasmExitFrame(StackFrameIteratorBase* iterator)
+ : WasmCompiledFrame(iterator) {}
+
inline WasmInterpreterEntryFrame::WasmInterpreterEntryFrame(
StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {}
@@ -311,4 +314,4 @@ inline StackFrame* SafeStackFrameIterator::frame() const {
} // namespace internal
} // namespace v8
-#endif // V8_FRAMES_INL_H_
+#endif // V8_EXECUTION_FRAMES_INL_H_
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/execution/frames.cc
index 0692d215c0..af660a338e 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -2,25 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/frames.h"
+#include "src/execution/frames.h"
#include <memory>
#include <sstream>
#include "src/base/bits.h"
-#include "src/deoptimizer.h"
-#include "src/frames-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/vm-state-inl.h"
#include "src/ic/ic-stats.h"
-#include "src/macro-assembler.h"
+#include "src/logging/counters.h"
#include "src/objects/code.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
-#include "src/register-configuration.h"
-#include "src/safepoint-table.h"
+#include "src/objects/visitors.h"
#include "src/snapshot/snapshot.h"
-#include "src/string-stream.h"
-#include "src/visitors.h"
-#include "src/vm-state-inl.h"
+#include "src/strings/string-stream.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -55,10 +56,8 @@ class StackHandlerIterator {
StackHandler* handler_;
};
-
// -------------------------------------------------------------------------
-
#define INITIALIZE_SINGLETON(type, field) field##_(this),
StackFrameIteratorBase::StackFrameIteratorBase(Isolate* isolate,
bool can_access_heap_objects)
@@ -98,25 +97,22 @@ void StackFrameIterator::Advance() {
DCHECK(!done() || handler_ == nullptr);
}
-
void StackFrameIterator::Reset(ThreadLocalTop* top) {
StackFrame::State state;
- StackFrame::Type type = ExitFrame::GetStateForFramePointer(
- Isolate::c_entry_fp(top), &state);
+ StackFrame::Type type =
+ ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
handler_ = StackHandler::FromAddress(Isolate::handler(top));
frame_ = SingletonFor(type, &state);
}
-
StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
- StackFrame::State* state) {
+ StackFrame::State* state) {
StackFrame* result = SingletonFor(type);
DCHECK((!result) == (type == StackFrame::NONE));
if (result) result->state_ = *state;
return result;
}
-
StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
#define FRAME_TYPE_CASE(type, field) \
case StackFrame::type: \
@@ -126,7 +122,8 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
case StackFrame::NONE:
return nullptr;
STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
- default: break;
+ default:
+ break;
}
return nullptr;
@@ -162,11 +159,11 @@ void StackTraceFrameIterator::Advance() {
bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
if (frame->is_java_script()) {
- JavaScriptFrame* jsFrame = static_cast<JavaScriptFrame*>(frame);
- if (!jsFrame->function()->IsJSFunction()) return false;
- return jsFrame->function()->shared()->IsSubjectToDebugging();
+ JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
+ if (!js_frame->function().IsJSFunction()) return false;
+ return js_frame->function().shared().IsSubjectToDebugging();
}
- // apart from javascript, only wasm is valid
+ // Apart from JavaScript frames, only Wasm frames are valid.
return frame->is_wasm();
}
@@ -183,9 +180,9 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
Code interpreter_bytecode_dispatch =
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- if (interpreter_entry_trampoline->contains(pc) ||
- interpreter_bytecode_advance->contains(pc) ||
- interpreter_bytecode_dispatch->contains(pc)) {
+ if (interpreter_entry_trampoline.contains(pc) ||
+ interpreter_bytecode_advance.contains(pc) ||
+ interpreter_bytecode_dispatch.contains(pc)) {
return true;
} else if (FLAG_interpreted_frames_native_stack) {
intptr_t marker = Memory<intptr_t>(
@@ -197,33 +194,61 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
Memory<Address>(state->fp + StandardFrameConstants::kFunctionOffset));
// There's no need to run a full ContainsSlow if we know the frame can't be
// an InterpretedFrame, so we do these fast checks first
- if (StackFrame::IsTypeMarker(marker) || maybe_function->IsSmi()) {
+ if (StackFrame::IsTypeMarker(marker) || maybe_function.IsSmi()) {
return false;
} else if (!isolate->heap()->InSpaceSlow(pc, CODE_SPACE)) {
return false;
}
interpreter_entry_trampoline =
isolate->heap()->GcSafeFindCodeForInnerPointer(pc);
- return interpreter_entry_trampoline->is_interpreter_trampoline_builtin();
+ return interpreter_entry_trampoline.is_interpreter_trampoline_builtin();
} else {
return false;
}
}
-DISABLE_ASAN Address ReadMemoryAt(Address address) {
- return Memory<Address>(address);
-}
-
} // namespace
-SafeStackFrameIterator::SafeStackFrameIterator(
- Isolate* isolate,
- Address fp, Address sp, Address js_entry_sp)
+bool SafeStackFrameIterator::IsNoFrameBytecodeHandlerPc(Isolate* isolate,
+ Address pc,
+ Address fp) const {
+ // Return false for builds with non-embedded bytecode handlers.
+ if (Isolate::CurrentEmbeddedBlob() == nullptr) return false;
+
+ EmbeddedData d = EmbeddedData::FromBlob();
+ if (pc < d.InstructionStartOfBytecodeHandlers() ||
+ pc >= d.InstructionEndOfBytecodeHandlers()) {
+ // Not a bytecode handler pc address.
+ return false;
+ }
+
+ if (!IsValidStackAddress(fp +
+ CommonFrameConstants::kContextOrFrameTypeOffset)) {
+ return false;
+ }
+
+ // Check if top stack frame is a bytecode handler stub frame.
+ MSAN_MEMORY_IS_INITIALIZED(
+ fp + CommonFrameConstants::kContextOrFrameTypeOffset, kSystemPointerSize);
+ intptr_t marker =
+ Memory<intptr_t>(fp + CommonFrameConstants::kContextOrFrameTypeOffset);
+ if (StackFrame::IsTypeMarker(marker) &&
+ StackFrame::MarkerToType(marker) == StackFrame::STUB) {
+ // Bytecode handler built a frame.
+ return false;
+ }
+ return true;
+}
+
+SafeStackFrameIterator::SafeStackFrameIterator(Isolate* isolate, Address pc,
+ Address fp, Address sp,
+ Address lr, Address js_entry_sp)
: StackFrameIteratorBase(isolate, false),
low_bound_(sp),
high_bound_(js_entry_sp),
top_frame_type_(StackFrame::NONE),
- external_callback_scope_(isolate->external_callback_scope()) {
+ external_callback_scope_(isolate->external_callback_scope()),
+ top_link_register_(lr) {
StackFrame::State state;
StackFrame::Type type;
ThreadLocalTop* top = isolate->thread_local_top();
@@ -255,14 +280,22 @@ SafeStackFrameIterator::SafeStackFrameIterator(
state.pc_address = StackFrame::ResolveReturnAddressLocation(
reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp)));
- // If the top of stack is a return address to the interpreter trampoline,
- // then we are likely in a bytecode handler with elided frame. In that
- // case, set the PC properly and make sure we do not drop the frame.
- if (IsValidStackAddress(sp)) {
- MSAN_MEMORY_IS_INITIALIZED(sp, kSystemPointerSize);
- Address tos = ReadMemoryAt(sp);
- if (IsInterpreterFramePc(isolate, tos, &state)) {
- state.pc_address = reinterpret_cast<Address*>(sp);
+ // If the current PC is in a bytecode handler, the top stack frame isn't
+ // the bytecode handler's frame and the top of stack or link register is a
+ // return address into the interpreter entry trampoline, then we are likely
+ // in a bytecode handler with elided frame. In that case, set the PC
+ // properly and make sure we do not drop the frame.
+ if (IsNoFrameBytecodeHandlerPc(isolate, pc, fp)) {
+ Address* tos_location = nullptr;
+ if (top_link_register_) {
+ tos_location = &top_link_register_;
+ } else if (IsValidStackAddress(sp)) {
+ MSAN_MEMORY_IS_INITIALIZED(sp, kSystemPointerSize);
+ tos_location = reinterpret_cast<Address*>(sp);
+ }
+
+ if (IsInterpreterFramePc(isolate, *tos_location, &state)) {
+ state.pc_address = tos_location;
advance_frame = false;
}
}
@@ -300,7 +333,6 @@ SafeStackFrameIterator::SafeStackFrameIterator(
if (advance_frame && frame_) Advance();
}
-
bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
Address c_entry_fp = Isolate::c_entry_fp(top);
if (!IsValidExitFrame(c_entry_fp)) return false;
@@ -311,7 +343,6 @@ bool SafeStackFrameIterator::IsValidTop(ThreadLocalTop* top) const {
return c_entry_fp < handler;
}
-
void SafeStackFrameIterator::AdvanceOneFrame() {
DCHECK(!done());
StackFrame* last_frame = frame_;
@@ -334,12 +365,10 @@ void SafeStackFrameIterator::AdvanceOneFrame() {
}
}
-
bool SafeStackFrameIterator::IsValidFrame(StackFrame* frame) const {
return IsValidStackAddress(frame->sp()) && IsValidStackAddress(frame->fp());
}
-
bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
StackFrame::State state;
if (frame->is_entry() || frame->is_construct_entry()) {
@@ -355,7 +384,7 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
// that it really an Smi.
Object number_of_args =
reinterpret_cast<ArgumentsAdaptorFrame*>(frame)->GetExpression(0);
- if (!number_of_args->IsSmi()) {
+ if (!number_of_args.IsSmi()) {
return false;
}
}
@@ -364,7 +393,6 @@ bool SafeStackFrameIterator::IsValidCaller(StackFrame* frame) {
SingletonFor(frame->GetCallerState(&state)) != nullptr;
}
-
bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
if (!IsValidStackAddress(fp)) return false;
Address sp = ExitFrame::ComputeStackPointer(fp);
@@ -375,7 +403,6 @@ bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
return *state.pc_address != kNullAddress;
}
-
void SafeStackFrameIterator::Advance() {
while (true) {
AdvanceOneFrame();
@@ -407,7 +434,6 @@ void SafeStackFrameIterator::Advance() {
}
}
-
// -------------------------------------------------------------------------
namespace {
@@ -418,28 +444,27 @@ Code GetContainingCode(Isolate* isolate, Address pc) {
Code StackFrame::LookupCode() const {
Code result = GetContainingCode(isolate(), pc());
- DCHECK_GE(pc(), result->InstructionStart());
- DCHECK_LT(pc(), result->InstructionEnd());
+ DCHECK_GE(pc(), result.InstructionStart());
+ DCHECK_LT(pc(), result.InstructionEnd());
return result;
}
void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
Address* constant_pool_address, Code holder) {
Address pc = *pc_address;
- DCHECK(holder->GetHeap()->GcSafeCodeContains(holder, pc));
- unsigned pc_offset = static_cast<unsigned>(pc - holder->InstructionStart());
+ DCHECK(holder.GetHeap()->GcSafeCodeContains(holder, pc));
+ unsigned pc_offset = static_cast<unsigned>(pc - holder.InstructionStart());
Object code = holder;
v->VisitRootPointer(Root::kTop, nullptr, FullObjectSlot(&code));
if (code == holder) return;
holder = Code::unchecked_cast(code);
- pc = holder->InstructionStart() + pc_offset;
+ pc = holder.InstructionStart() + pc_offset;
*pc_address = pc;
if (FLAG_enable_embedded_constant_pool && constant_pool_address) {
- *constant_pool_address = holder->constant_pool();
+ *constant_pool_address = holder.constant_pool();
}
}
-
void StackFrame::SetReturnAddressLocationResolver(
ReturnAddressLocationResolver resolver) {
DCHECK_NULL(return_address_location_resolver_);
@@ -467,7 +492,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
Object maybe_function = Object(
Memory<Address>(state->fp + StandardFrameConstants::kFunctionOffset));
if (!StackFrame::IsTypeMarker(marker)) {
- if (maybe_function->IsSmi()) {
+ if (maybe_function.IsSmi()) {
return NATIVE;
} else if (IsInterpreterFramePc(iterator->isolate(), *(state->pc_address),
state)) {
@@ -487,6 +512,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
switch (wasm_code->kind()) {
case wasm::WasmCode::kFunction:
return WASM_COMPILED;
+ case wasm::WasmCode::kWasmToCapiWrapper:
+ return WASM_EXIT;
case wasm::WasmCode::kWasmToJsWrapper:
return WASM_TO_JS;
case wasm::WasmCode::kRuntimeStub:
@@ -506,13 +533,13 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// Look up the code object to figure out the type of the stack frame.
Code code_obj = GetContainingCode(iterator->isolate(), pc);
if (!code_obj.is_null()) {
- switch (code_obj->kind()) {
+ switch (code_obj.kind()) {
case Code::BUILTIN:
if (StackFrame::IsTypeMarker(marker)) break;
- if (code_obj->is_interpreter_trampoline_builtin()) {
+ if (code_obj.is_interpreter_trampoline_builtin()) {
return INTERPRETED;
}
- if (code_obj->is_turbofanned()) {
+ if (code_obj.is_turbofanned()) {
// TODO(bmeurer): We treat frames for BUILTIN Code objects as
// OptimizedFrame for now (all the builtins with JavaScript
// linkage are actually generated with TurboFan currently, so
@@ -522,16 +549,16 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return BUILTIN;
case Code::OPTIMIZED_FUNCTION:
return OPTIMIZED;
- case Code::WASM_FUNCTION:
- return WASM_COMPILED;
- case Code::WASM_TO_JS_FUNCTION:
- return WASM_TO_JS;
case Code::JS_TO_WASM_FUNCTION:
return JS_TO_WASM;
- case Code::WASM_INTERPRETER_ENTRY:
- return WASM_INTERPRETER_ENTRY;
case Code::C_WASM_ENTRY:
return C_WASM_ENTRY;
+ case Code::WASM_FUNCTION:
+ case Code::WASM_TO_CAPI_FUNCTION:
+ case Code::WASM_TO_JS_FUNCTION:
+ case Code::WASM_INTERPRETER_ENTRY:
+ // Never appear as on-heap {Code} objects.
+ UNREACHABLE();
default:
// All other types should have an explicit marker
break;
@@ -558,6 +585,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case WASM_TO_JS:
case WASM_COMPILED:
case WASM_COMPILE_LAZY:
+ case WASM_EXIT:
return candidate;
case JS_TO_WASM:
case OPTIMIZED:
@@ -571,23 +599,18 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
}
-
#ifdef DEBUG
bool StackFrame::can_access_heap_objects() const {
return iterator_->can_access_heap_objects_;
}
#endif
-
StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state);
return ComputeType(iterator_, state);
}
-
-Address StackFrame::UnpaddedFP() const {
- return fp();
-}
+Address StackFrame::UnpaddedFP() const { return fp(); }
Code NativeFrame::unchecked_code() const { return Code(); }
@@ -604,12 +627,10 @@ Code EntryFrame::unchecked_code() const {
return isolate()->heap()->builtin(Builtins::kJSEntry);
}
-
void EntryFrame::ComputeCallerState(State* state) const {
GetCallerState(state);
}
-
StackFrame::Type EntryFrame::GetCallerState(State* state) const {
const int offset = EntryFrameConstants::kCallerFPOffset;
Address fp = Memory<Address>(this->fp() + offset);
@@ -635,26 +656,24 @@ void ExitFrame::ComputeCallerState(State* state) const {
}
}
-
void ExitFrame::Iterate(RootVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
-
Address ExitFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
- Address sp = ComputeStackPointer(fp);
+ StackFrame::Type type = ComputeFrameType(fp);
+ Address sp = (type == WASM_EXIT) ? WasmExitFrame::ComputeStackPointer(fp)
+ : ExitFrame::ComputeStackPointer(fp);
FillState(fp, sp, state);
DCHECK_NE(*state->pc_address, kNullAddress);
-
- return ComputeFrameType(fp);
+ return type;
}
StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
@@ -663,14 +682,15 @@ StackFrame::Type ExitFrame::ComputeFrameType(Address fp) {
const int offset = ExitFrameConstants::kFrameTypeOffset;
Object marker(Memory<Address>(fp + offset));
- if (!marker->IsSmi()) {
+ if (!marker.IsSmi()) {
return EXIT;
}
intptr_t marker_int = bit_cast<intptr_t>(marker);
StackFrame::Type frame_type = static_cast<StackFrame::Type>(marker_int >> 1);
- if (frame_type == EXIT || frame_type == BUILTIN_EXIT) {
+ if (frame_type == EXIT || frame_type == BUILTIN_EXIT ||
+ frame_type == WASM_EXIT) {
return frame_type;
}
@@ -683,6 +703,15 @@ Address ExitFrame::ComputeStackPointer(Address fp) {
return Memory<Address>(fp + ExitFrameConstants::kSPOffset);
}
+Address WasmExitFrame::ComputeStackPointer(Address fp) {
+ // For WASM_EXIT frames, {sp} is only needed for finding the PC slot,
+ // everything else is handled via safepoint information.
+ Address sp = fp + WasmExitFrameConstants::kWasmInstanceOffset;
+ DCHECK_EQ(sp - 1 * kPCOnStackSize,
+ fp + WasmExitFrameConstants::kCallingPCOffset);
+ return sp;
+}
+
void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->sp = sp;
state->fp = fp;
@@ -703,7 +732,7 @@ JSFunction BuiltinExitFrame::function() const {
Object BuiltinExitFrame::receiver() const { return receiver_slot_object(); }
bool BuiltinExitFrame::IsConstructor() const {
- return !new_target_slot_object()->IsUndefined(isolate());
+ return !new_target_slot_object().IsUndefined(isolate());
}
Object BuiltinExitFrame::GetParameter(int i) const {
@@ -715,7 +744,7 @@ Object BuiltinExitFrame::GetParameter(int i) const {
int BuiltinExitFrame::ComputeParametersCount() const {
Object argc_slot = argc_slot_object();
- DCHECK(argc_slot->IsSmi());
+ DCHECK(argc_slot.IsSmi());
// Argc also counts the receiver, target, new target, and argc itself as args,
// therefore the real argument count is argc - 4.
int argc = Smi::ToInt(argc_slot) - 4;
@@ -800,8 +829,8 @@ Object StandardFrame::context() const {
int StandardFrame::position() const {
AbstractCode code = AbstractCode::cast(LookupCode());
- int code_offset = static_cast<int>(pc() - code->InstructionStart());
- return code->SourcePosition(code_offset);
+ int code_offset = static_cast<int>(pc() - code.InstructionStart());
+ return code.SourcePosition(code_offset);
}
int StandardFrame::ComputeExpressionsCount() const {
@@ -829,7 +858,6 @@ void StandardFrame::ComputeCallerState(State* state) const {
reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
}
-
bool StandardFrame::IsConstructor() const { return false; }
void StandardFrame::Summarize(std::vector<FrameSummary>* functions) const {
@@ -857,23 +885,24 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
wasm_code->stack_slots());
safepoint_entry = table.FindEntry(inner_pointer);
stack_slots = wasm_code->stack_slots();
- has_tagged_params = wasm_code->kind() != wasm::WasmCode::kFunction;
+ has_tagged_params = wasm_code->kind() != wasm::WasmCode::kFunction &&
+ wasm_code->kind() != wasm::WasmCode::kWasmToCapiWrapper;
tagged_parameter_slots = wasm_code->tagged_parameter_slots();
} else {
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
isolate()->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
if (!entry->safepoint_entry.is_valid()) {
- entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
+ entry->safepoint_entry = entry->code.GetSafepointEntry(inner_pointer);
DCHECK(entry->safepoint_entry.is_valid());
} else {
DCHECK(entry->safepoint_entry.Equals(
- entry->code->GetSafepointEntry(inner_pointer)));
+ entry->code.GetSafepointEntry(inner_pointer)));
}
code = entry->code;
safepoint_entry = entry->safepoint_entry;
- stack_slots = code->stack_slots();
- has_tagged_params = code->has_tagged_params();
+ stack_slots = code.stack_slots();
+ has_tagged_params = code.has_tagged_params();
}
uint32_t slot_space = stack_slots * kSystemPointerSize;
@@ -905,6 +934,14 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
case WASM_COMPILE_LAZY:
frame_header_size = WasmCompiledFrameConstants::kFixedFrameSizeFromFp;
break;
+ case WASM_EXIT:
+ // The last value in the frame header is the calling PC, which should
+ // not be visited.
+ static_assert(WasmExitFrameConstants::kFixedSlotCountFromFp ==
+ WasmCompiledFrameConstants::kFixedSlotCountFromFp + 1,
+ "WasmExitFrame has one slot more than WasmCompiledFrame");
+ frame_header_size = WasmCompiledFrameConstants::kFixedFrameSizeFromFp;
+ break;
case OPTIMIZED:
case INTERPRETED:
case BUILTIN:
@@ -929,42 +966,15 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
FullObjectSlot parameters_base(&Memory<Address>(sp()));
FullObjectSlot parameters_limit(frame_header_base.address() - slot_space);
- // Skip saved double registers.
- if (safepoint_entry.has_doubles()) {
- // Number of doubles not known at snapshot time.
- DCHECK(!isolate()->serializer_enabled());
- parameters_base +=
- RegisterConfiguration::Default()->num_allocatable_double_registers() *
- kDoubleSize / kSystemPointerSize;
- }
-
- // Visit the registers that contain pointers if any.
- if (safepoint_entry.HasRegisters()) {
- for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
- if (safepoint_entry.HasRegisterAt(i)) {
- int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
- v->VisitRootPointer(Root::kTop, nullptr,
- parameters_base + reg_stack_index);
- }
- }
- // Skip the words containing the register values.
- parameters_base += kNumSafepointRegisters;
- }
-
- // We're done dealing with the register bits.
- uint8_t* safepoint_bits = safepoint_entry.bits();
- safepoint_bits += kNumSafepointRegisters >> kBitsPerByteLog2;
-
// Visit the rest of the parameters if they are tagged.
if (has_tagged_params) {
v->VisitRootPointers(Root::kTop, nullptr, parameters_base,
parameters_limit);
}
-#ifdef V8_COMPRESS_POINTERS
- Address isolate_root = isolate()->isolate_root();
-#endif
+ DEFINE_ROOT_VALUE(isolate());
// Visit pointer spill slots and locals.
+ uint8_t* safepoint_bits = safepoint_entry.bits();
for (unsigned index = 0; index < stack_slots; index++) {
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
@@ -982,8 +992,7 @@ void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
if (!HAS_SMI_TAG(compressed_value)) {
// We don't need to update smi values.
*spill_slot.location() =
- DecompressTaggedPointer<OnHeapAddressKind::kIsolateRoot>(
- isolate_root, compressed_value);
+ DecompressTaggedPointer(ROOT_VALUE, compressed_value);
}
#endif
v->VisitRootPointer(Root::kTop, nullptr, spill_slot);
@@ -1022,25 +1031,24 @@ Code StubFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
}
-
Address StubFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
Code code = LookupCode();
- DCHECK(code->is_turbofanned());
- DCHECK_EQ(code->kind(), Code::BUILTIN);
+ DCHECK(code.is_turbofanned());
+ DCHECK_EQ(code.kind(), Code::BUILTIN);
HandlerTable table(code);
- int pc_offset = static_cast<int>(pc() - code->InstructionStart());
- *stack_slots = code->stack_slots();
+ int pc_offset = static_cast<int>(pc() - code.InstructionStart());
+ *stack_slots = code.stack_slots();
return table.LookupReturn(pc_offset);
}
void OptimizedFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
void JavaScriptFrame::SetParameterValue(int index, Object value) const {
- Memory<Address>(GetParameterSlot(index)) = value->ptr();
+ Memory<Address>(GetParameterSlot(index)) = value.ptr();
}
bool JavaScriptFrame::IsConstructor() const {
@@ -1052,18 +1060,17 @@ bool JavaScriptFrame::IsConstructor() const {
return IsConstructFrame(fp);
}
-
bool JavaScriptFrame::HasInlinedFrames() const {
std::vector<SharedFunctionInfo> functions;
GetFunctions(&functions);
return functions.size() > 1;
}
-Code JavaScriptFrame::unchecked_code() const { return function()->code(); }
+Code JavaScriptFrame::unchecked_code() const { return function().code(); }
int OptimizedFrame::ComputeParametersCount() const {
Code code = LookupCode();
- if (code->kind() == Code::BUILTIN) {
+ if (code.kind() == Code::BUILTIN) {
return static_cast<int>(
Memory<intptr_t>(fp() + OptimizedBuiltinFrameConstants::kArgCOffset));
} else {
@@ -1078,7 +1085,7 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
void JavaScriptFrame::GetFunctions(
std::vector<SharedFunctionInfo>* functions) const {
DCHECK(functions->empty());
- functions->push_back(function()->shared());
+ functions->push_back(function().shared());
}
void JavaScriptFrame::GetFunctions(
@@ -1088,14 +1095,14 @@ void JavaScriptFrame::GetFunctions(
GetFunctions(&raw_functions);
for (const auto& raw_function : raw_functions) {
functions->push_back(
- Handle<SharedFunctionInfo>(raw_function, function()->GetIsolate()));
+ Handle<SharedFunctionInfo>(raw_function, function().GetIsolate()));
}
}
void JavaScriptFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
Code code = LookupCode();
- int offset = static_cast<int>(pc() - code->InstructionStart());
+ int offset = static_cast<int>(pc() - code.InstructionStart());
AbstractCode abstract_code = AbstractCode::cast(code);
Handle<FixedArray> params = GetParameters();
FrameSummary::JavaScriptFrameSummary summary(
@@ -1112,7 +1119,7 @@ Object JavaScriptFrame::unchecked_function() const {
// During deoptimization of an optimized function, we may have yet to
// materialize some closures on the stack. The arguments marker object
// marks this case.
- DCHECK(function_slot_object()->IsJSFunction() ||
+ DCHECK(function_slot_object().IsJSFunction() ||
ReadOnlyRoots(isolate()).arguments_marker() == function_slot_object());
return function_slot_object();
}
@@ -1122,18 +1129,18 @@ Object JavaScriptFrame::receiver() const { return GetParameter(-1); }
Object JavaScriptFrame::context() const {
const int offset = StandardFrameConstants::kContextOffset;
Object maybe_result(Memory<Address>(fp() + offset));
- DCHECK(!maybe_result->IsSmi());
+ DCHECK(!maybe_result.IsSmi());
return maybe_result;
}
Script JavaScriptFrame::script() const {
- return Script::cast(function()->shared()->script());
+ return Script::cast(function().shared().script());
}
int JavaScriptFrame::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
- DCHECK(!LookupCode()->has_handler_table());
- DCHECK(!LookupCode()->is_optimized_code());
+ DCHECK(!LookupCode().has_handler_table());
+ DCHECK(!LookupCode().is_optimized_code());
return -1;
}
@@ -1141,21 +1148,21 @@ void JavaScriptFrame::PrintFunctionAndOffset(JSFunction function,
AbstractCode code, int code_offset,
FILE* file,
bool print_line_number) {
- PrintF(file, "%s", function->IsOptimized() ? "*" : "~");
- function->PrintName(file);
+ PrintF(file, "%s", function.IsOptimized() ? "*" : "~");
+ function.PrintName(file);
PrintF(file, "+%d", code_offset);
if (print_line_number) {
- SharedFunctionInfo shared = function->shared();
- int source_pos = code->SourcePosition(code_offset);
- Object maybe_script = shared->script();
- if (maybe_script->IsScript()) {
+ SharedFunctionInfo shared = function.shared();
+ int source_pos = code.SourcePosition(code_offset);
+ Object maybe_script = shared.script();
+ if (maybe_script.IsScript()) {
Script script = Script::cast(maybe_script);
- int line = script->GetLineNumber(source_pos) + 1;
- Object script_name_raw = script->name();
- if (script_name_raw->IsString()) {
- String script_name = String::cast(script->name());
+ int line = script.GetLineNumber(source_pos) + 1;
+ Object script_name_raw = script.name();
+ if (script_name_raw.IsString()) {
+ String script_name = String::cast(script.name());
std::unique_ptr<char[]> c_script_name =
- script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ script_name.ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
PrintF(file, " at %s:%d", c_script_name.get(), line);
} else {
PrintF(file, " at <unknown>:%d", line);
@@ -1182,20 +1189,20 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
code_offset = iframe->GetBytecodeOffset();
} else {
Code code = frame->unchecked_code();
- code_offset = static_cast<int>(frame->pc() - code->InstructionStart());
+ code_offset = static_cast<int>(frame->pc() - code.InstructionStart());
}
- PrintFunctionAndOffset(function, function->abstract_code(), code_offset,
+ PrintFunctionAndOffset(function, function.abstract_code(), code_offset,
file, print_line_number);
if (print_args) {
// function arguments
// (we are intentionally only printing the actually
// supplied parameters, not all parameters required)
PrintF(file, "(this=");
- frame->receiver()->ShortPrint(file);
+ frame->receiver().ShortPrint(file);
const int length = frame->ComputeParametersCount();
for (int i = 0; i < length; i++) {
PrintF(file, ", ");
- frame->GetParameter(i)->ShortPrint(file);
+ frame->GetParameter(i).ShortPrint(file);
}
PrintF(file, ")");
}
@@ -1210,16 +1217,16 @@ void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction function,
int code_offset) {
auto ic_stats = ICStats::instance();
ICInfo& ic_info = ic_stats->Current();
- SharedFunctionInfo shared = function->shared();
+ SharedFunctionInfo shared = function.shared();
ic_info.function_name = ic_stats->GetOrCacheFunctionName(function);
ic_info.script_offset = code_offset;
- int source_pos = code->SourcePosition(code_offset);
- Object maybe_script = shared->script();
- if (maybe_script->IsScript()) {
+ int source_pos = code.SourcePosition(code_offset);
+ Object maybe_script = shared.script();
+ if (maybe_script.IsScript()) {
Script script = Script::cast(maybe_script);
- ic_info.line_num = script->GetLineNumber(source_pos) + 1;
+ ic_info.line_num = script.GetLineNumber(source_pos) + 1;
ic_info.script_name = ic_stats->GetOrCacheScriptName(script);
}
}
@@ -1231,7 +1238,7 @@ Object JavaScriptFrame::GetParameter(int index) const {
int JavaScriptFrame::ComputeParametersCount() const {
DCHECK(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
- return function()->shared()->internal_formal_parameter_count();
+ return function().shared().internal_formal_parameter_count();
}
Handle<FixedArray> JavaScriptFrame::GetParameters() const {
@@ -1279,7 +1286,7 @@ void JavaScriptBuiltinContinuationWithCatchFrame::SetException(
// Only allow setting exception if previous value was the hole.
CHECK_EQ(ReadOnlyRoots(isolate()).the_hole_value(),
Object(Memory<Address>(exception_argument_slot)));
- Memory<Address>(exception_argument_slot) = exception->ptr();
+ Memory<Address>(exception_argument_slot) = exception.ptr();
}
FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
@@ -1293,8 +1300,8 @@ FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
code_offset_(code_offset),
is_constructor_(is_constructor),
parameters_(parameters, isolate) {
- DCHECK(abstract_code->IsBytecodeArray() ||
- Code::cast(abstract_code)->kind() != Code::OPTIMIZED_FUNCTION);
+ DCHECK(abstract_code.IsBytecodeArray() ||
+ Code::cast(abstract_code).kind() != Code::OPTIMIZED_FUNCTION);
}
void FrameSummary::EnsureSourcePositionsAvailable() {
@@ -1303,13 +1310,25 @@ void FrameSummary::EnsureSourcePositionsAvailable() {
}
}
+bool FrameSummary::AreSourcePositionsAvailable() const {
+ if (IsJavaScript()) {
+ return java_script_summary_.AreSourcePositionsAvailable();
+ }
+ return true;
+}
+
void FrameSummary::JavaScriptFrameSummary::EnsureSourcePositionsAvailable() {
Handle<SharedFunctionInfo> shared(function()->shared(), isolate());
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), shared);
}
+bool FrameSummary::JavaScriptFrameSummary::AreSourcePositionsAvailable() const {
+ return !FLAG_enable_lazy_source_positions ||
+ function()->shared().GetBytecodeArray().HasSourcePositionTable();
+}
+
bool FrameSummary::JavaScriptFrameSummary::is_subject_to_debugging() const {
- return function()->shared()->IsSubjectToDebugging();
+ return function()->shared().IsSubjectToDebugging();
}
int FrameSummary::JavaScriptFrameSummary::SourcePosition() const {
@@ -1321,7 +1340,7 @@ int FrameSummary::JavaScriptFrameSummary::SourceStatementPosition() const {
}
Handle<Object> FrameSummary::JavaScriptFrameSummary::script() const {
- return handle(function_->shared()->script(), isolate());
+ return handle(function_->shared().script(), isolate());
}
Handle<String> FrameSummary::JavaScriptFrameSummary::FunctionName() const {
@@ -1329,7 +1348,7 @@ Handle<String> FrameSummary::JavaScriptFrameSummary::FunctionName() const {
}
Handle<Context> FrameSummary::JavaScriptFrameSummary::native_context() const {
- return handle(function_->context()->native_context(), isolate());
+ return handle(function_->context().native_context(), isolate());
}
FrameSummary::WasmFrameSummary::WasmFrameSummary(
@@ -1366,7 +1385,7 @@ int FrameSummary::WasmFrameSummary::SourcePosition() const {
}
Handle<Script> FrameSummary::WasmFrameSummary::script() const {
- return handle(wasm_instance()->module_object()->script(),
+ return handle(wasm_instance()->module_object().script(),
wasm_instance()->GetIsolate());
}
@@ -1490,7 +1509,7 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
Code code = LookupCode();
- if (code->kind() == Code::BUILTIN) {
+ if (code.kind() == Code::BUILTIN) {
return JavaScriptFrame::Summarize(frames);
}
@@ -1563,7 +1582,6 @@ void OptimizedFrame::Summarize(std::vector<FrameSummary>* frames) const {
}
}
-
int OptimizedFrame::LookupExceptionHandlerInTable(
int* stack_slots, HandlerTable::CatchPrediction* prediction) {
// We cannot perform exception prediction on optimized code. Instead, we need
@@ -1572,14 +1590,14 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
DCHECK_NULL(prediction);
Code code = LookupCode();
HandlerTable table(code);
- int pc_offset = static_cast<int>(pc() - code->InstructionStart());
- if (stack_slots) *stack_slots = code->stack_slots();
+ int pc_offset = static_cast<int>(pc() - code.InstructionStart());
+ if (stack_slots) *stack_slots = code.stack_slots();
// When the return pc has been replaced by a trampoline there won't be
// a handler for this trampoline. Thus we need to use the return pc that
// _used to be_ on the stack to get the right ExceptionHandler.
- if (code->kind() == Code::OPTIMIZED_FUNCTION &&
- code->marked_for_deoptimization()) {
+ if (code.kind() == Code::OPTIMIZED_FUNCTION &&
+ code.marked_for_deoptimization()) {
SafepointTable safepoints(code);
pc_offset = safepoints.find_return_pc(pc_offset);
}
@@ -1591,21 +1609,21 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
DCHECK(is_optimized());
JSFunction opt_function = function();
- Code code = opt_function->code();
+ Code code = opt_function.code();
// The code object may have been replaced by lazy deoptimization. Fall
// back to a slow search in this case to find the original optimized
// code object.
- if (!code->contains(pc())) {
+ if (!code.contains(pc())) {
code = isolate()->heap()->GcSafeFindCodeForInnerPointer(pc());
}
DCHECK(!code.is_null());
- DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(code.kind() == Code::OPTIMIZED_FUNCTION);
- SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
+ SafepointEntry safepoint_entry = code.GetSafepointEntry(pc());
if (safepoint_entry.has_deoptimization_index()) {
*deopt_index = safepoint_entry.deoptimization_index();
- return DeoptimizationData::cast(code->deoptimization_data());
+ return DeoptimizationData::cast(code.deoptimization_data());
}
*deopt_index = Safepoint::kNoDeoptimizationIndex;
return DeoptimizationData();
@@ -1613,7 +1631,7 @@ DeoptimizationData OptimizedFrame::GetDeoptimizationData(
Object OptimizedFrame::receiver() const {
Code code = LookupCode();
- if (code->kind() == Code::BUILTIN) {
+ if (code.kind() == Code::BUILTIN) {
Address argc_ptr = fp() + OptimizedBuiltinFrameConstants::kArgCOffset;
intptr_t argc = *reinterpret_cast<intptr_t*>(argc_ptr);
intptr_t args_size =
@@ -1634,7 +1652,7 @@ void OptimizedFrame::GetFunctions(
// Delegate to JS frame in absence of turbofan deoptimization.
// TODO(turbofan): Revisit once we support deoptimization across the board.
Code code = LookupCode();
- if (code->kind() == Code::BUILTIN) {
+ if (code.kind() == Code::BUILTIN) {
return JavaScriptFrame::GetFunctions(functions);
}
@@ -1643,10 +1661,10 @@ void OptimizedFrame::GetFunctions(
DeoptimizationData const data = GetDeoptimizationData(&deopt_index);
DCHECK(!data.is_null());
DCHECK_NE(Safepoint::kNoDeoptimizationIndex, deopt_index);
- FixedArray const literal_array = data->LiteralArray();
+ FixedArray const literal_array = data.LiteralArray();
- TranslationIterator it(data->TranslationByteArray(),
- data->TranslationIndex(deopt_index)->value());
+ TranslationIterator it(data.TranslationByteArray(),
+ data.TranslationIndex(deopt_index).value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
DCHECK_EQ(Translation::BEGIN, opcode);
it.Next(); // Skip frame count.
@@ -1665,7 +1683,7 @@ void OptimizedFrame::GetFunctions(
jsframe_count--;
// The second operand of the frame points to the function.
- Object shared = literal_array->get(it.Next());
+ Object shared = literal_array.get(it.Next());
functions->push_back(SharedFunctionInfo::cast(shared));
// Skip over remaining operands to advance to the next opcode.
@@ -1689,7 +1707,7 @@ Object OptimizedFrame::StackSlotAt(int index) const {
int InterpretedFrame::position() const {
AbstractCode code = AbstractCode::cast(GetBytecodeArray());
int code_offset = GetBytecodeOffset();
- return code->SourcePosition(code_offset);
+ return code.SourcePosition(code_offset);
}
int InterpretedFrame::LookupExceptionHandlerInTable(
@@ -1775,8 +1793,7 @@ int ArgumentsAdaptorFrame::ComputeParametersCount() const {
}
Code ArgumentsAdaptorFrame::unchecked_code() const {
- return isolate()->builtins()->builtin(
- Builtins::kArgumentsAdaptorTrampoline);
+ return isolate()->builtins()->builtin(Builtins::kArgumentsAdaptorTrampoline);
}
int BuiltinFrame::ComputeParametersCount() const {
@@ -1799,21 +1816,21 @@ void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
PrintIndex(accumulator, mode, index);
accumulator->Add("WASM [");
- accumulator->PrintName(script()->name());
+ accumulator->PrintName(script().name());
Address instruction_start = isolate()
->wasm_engine()
->code_manager()
->LookupCode(pc())
->instruction_start();
Vector<const uint8_t> raw_func_name =
- module_object()->GetRawFunctionName(function_index());
+ module_object().GetRawFunctionName(function_index());
const int kMaxPrintedFunctionName = 64;
char func_name[kMaxPrintedFunctionName + 1];
int func_name_len = std::min(kMaxPrintedFunctionName, raw_func_name.length());
- memcpy(func_name, raw_func_name.start(), func_name_len);
+ memcpy(func_name, raw_func_name.begin(), func_name_len);
func_name[func_name_len] = '\0';
int pos = position();
- const wasm::WasmModule* module = wasm_instance()->module_object()->module();
+ const wasm::WasmModule* module = wasm_instance().module_object().module();
int func_index = function_index();
int func_code_offset = module->functions[func_index].code.offset();
accumulator->Add("], function #%u ('%s'), pc=%p (+0x%x), pos=%d (+%d)\n",
@@ -1846,14 +1863,14 @@ WasmInstanceObject WasmCompiledFrame::wasm_instance() const {
}
WasmModuleObject WasmCompiledFrame::module_object() const {
- return wasm_instance()->module_object();
+ return wasm_instance().module_object();
}
uint32_t WasmCompiledFrame::function_index() const {
return FrameSummary::GetSingle(this).AsWasmCompiled().function_index();
}
-Script WasmCompiledFrame::script() const { return module_object()->script(); }
+Script WasmCompiledFrame::script() const { return module_object().script(); }
int WasmCompiledFrame::position() const {
return FrameSummary::GetSingle(this).SourcePosition();
@@ -1892,8 +1909,8 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
DCHECK_NOT_NULL(stack_slots);
wasm::WasmCode* code =
isolate()->wasm_engine()->code_manager()->LookupCode(pc());
- if (!code->IsAnonymous() && code->handler_table_offset() > 0) {
- HandlerTable table(code->instruction_start(), code->handler_table_offset());
+ if (!code->IsAnonymous() && code->handler_table_size() > 0) {
+ HandlerTable table(code->handler_table(), code->handler_table_size());
int pc_offset = static_cast<int>(pc() - code->instruction_start());
*stack_slots = static_cast<int>(code->stack_slots());
return table.LookupReturn(pc_offset);
@@ -1910,7 +1927,7 @@ void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
PrintIndex(accumulator, mode, index);
accumulator->Add("WASM INTERPRETER ENTRY [");
Script script = this->script();
- accumulator->PrintName(script->name());
+ accumulator->PrintName(script.name());
accumulator->Add("]");
if (mode != OVERVIEW) accumulator->Add("\n");
}
@@ -1919,7 +1936,7 @@ void WasmInterpreterEntryFrame::Summarize(
std::vector<FrameSummary>* functions) const {
Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
std::vector<std::pair<uint32_t, int>> interpreted_stack =
- instance->debug_info()->GetInterpretedStack(fp());
+ instance->debug_info().GetInterpretedStack(fp());
for (auto& e : interpreted_stack) {
FrameSummary::WasmInterpretedFrameSummary summary(isolate(), instance,
@@ -1937,15 +1954,15 @@ WasmInstanceObject WasmInterpreterEntryFrame::wasm_instance() const {
}
WasmDebugInfo WasmInterpreterEntryFrame::debug_info() const {
- return wasm_instance()->debug_info();
+ return wasm_instance().debug_info();
}
WasmModuleObject WasmInterpreterEntryFrame::module_object() const {
- return wasm_instance()->module_object();
+ return wasm_instance().module_object();
}
Script WasmInterpreterEntryFrame::script() const {
- return module_object()->script();
+ return module_object().script();
}
int WasmInterpreterEntryFrame::position() const {
@@ -1953,7 +1970,7 @@ int WasmInterpreterEntryFrame::position() const {
}
Object WasmInterpreterEntryFrame::context() const {
- return wasm_instance()->native_context();
+ return wasm_instance().native_context();
}
Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
@@ -1998,11 +2015,9 @@ void PrintFunctionSource(StringStream* accumulator, SharedFunctionInfo shared,
} // namespace
-
-void JavaScriptFrame::Print(StringStream* accumulator,
- PrintMode mode,
+void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
- Handle<SharedFunctionInfo> shared = handle(function()->shared(), isolate());
+ Handle<SharedFunctionInfo> shared = handle(function().shared(), isolate());
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate(), shared);
DisallowHeapAllocation no_gc;
@@ -2023,23 +2038,23 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// or context slots.
ScopeInfo scope_info = shared->scope_info();
Object script_obj = shared->script();
- if (script_obj->IsScript()) {
+ if (script_obj.IsScript()) {
Script script = Script::cast(script_obj);
accumulator->Add(" [");
- accumulator->PrintName(script->name());
+ accumulator->PrintName(script.name());
if (is_interpreted()) {
const InterpretedFrame* iframe =
reinterpret_cast<const InterpretedFrame*>(this);
BytecodeArray bytecodes = iframe->GetBytecodeArray();
int offset = iframe->GetBytecodeOffset();
- int source_pos = AbstractCode::cast(bytecodes)->SourcePosition(offset);
- int line = script->GetLineNumber(source_pos) + 1;
+ int source_pos = AbstractCode::cast(bytecodes).SourcePosition(offset);
+ int line = script.GetLineNumber(source_pos) + 1;
accumulator->Add(":%d] [bytecode=%p offset=%d]", line,
reinterpret_cast<void*>(bytecodes.ptr()), offset);
} else {
int function_start_pos = shared->StartPosition();
- int line = script->GetLineNumber(function_start_pos) + 1;
+ int line = script.GetLineNumber(function_start_pos) + 1;
accumulator->Add(":~%d] [pc=%p]", line, reinterpret_cast<void*>(pc()));
}
}
@@ -2067,15 +2082,15 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add(" {\n");
// Compute the number of locals and expression stack elements.
- int heap_locals_count = scope_info->ContextLocalCount();
+ int heap_locals_count = scope_info.ContextLocalCount();
int expressions_count = ComputeExpressionsCount();
// Try to get hold of the context of this frame.
Context context;
- if (this->context()->IsContext()) {
+ if (this->context().IsContext()) {
context = Context::cast(this->context());
- while (context->IsWithContext()) {
- context = context->previous();
+ while (context.IsWithContext()) {
+ context = context.previous();
DCHECK(!context.is_null());
}
}
@@ -2086,12 +2101,12 @@ void JavaScriptFrame::Print(StringStream* accumulator,
}
for (int i = 0; i < heap_locals_count; i++) {
accumulator->Add(" var ");
- accumulator->PrintName(scope_info->ContextLocalName(i));
+ accumulator->PrintName(scope_info.ContextLocalName(i));
accumulator->Add(" = ");
if (!context.is_null()) {
int index = Context::MIN_CONTEXT_SLOTS + i;
- if (index < context->length()) {
- accumulator->Add("%o", context->get(index));
+ if (index < context.length()) {
+ accumulator->Add("%o", context.get(index));
} else {
accumulator->Add(
"// warning: missing context slot - inconsistent frame?");
@@ -2115,14 +2130,12 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add("}\n\n");
}
-
-void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
- PrintMode mode,
+void ArgumentsAdaptorFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
int actual = ComputeParametersCount();
int expected = -1;
JSFunction function = this->function();
- expected = function->shared()->internal_formal_parameter_count();
+ expected = function.shared().internal_formal_parameter_count();
PrintIndex(accumulator, mode, index);
accumulator->Add("arguments adaptor frame: %d->%d", actual, expected);
@@ -2170,7 +2183,7 @@ void InternalFrame::Iterate(RootVisitor* v) const {
// the full stack frame contains only tagged pointers or only raw values.
// This is used for the WasmCompileLazy builtin, where we actually pass
// untagged arguments and also store untagged values on the stack.
- if (code->has_tagged_params()) IterateExpressions(v);
+ if (code.has_tagged_params()) IterateExpressions(v);
}
// -------------------------------------------------------------------------
@@ -2188,7 +2201,7 @@ uint32_t PcAddressForHashing(Isolate* isolate, Address address) {
} // namespace
InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
- InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
+InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
isolate_->counters()->pc_to_code()->Increment();
DCHECK(base::bits::IsPowerOfTwo(kInnerPointerToCodeCacheSize));
uint32_t hash =
diff --git a/deps/v8/src/frames.h b/deps/v8/src/execution/frames.h
index 5e6f5d2a61..982716db93 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FRAMES_H_
-#define V8_FRAMES_H_
+#ifndef V8_EXECUTION_FRAMES_H_
+#define V8_EXECUTION_FRAMES_H_
-#include "src/handles.h"
-#include "src/objects.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/handles/handles.h"
#include "src/objects/code.h"
-#include "src/safepoint-table.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -23,6 +23,7 @@ class ExternalCallbackScope;
class InnerPointerToCodeCache;
class Isolate;
class ObjectVisitor;
+class Register;
class RootVisitor;
class StackFrameIteratorBase;
class StringStream;
@@ -69,6 +70,7 @@ class StackHandler {
V(JS_TO_WASM, JsToWasmFrame) \
V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame) \
V(C_WASM_ENTRY, CWasmEntryFrame) \
+ V(WASM_EXIT, WasmExitFrame) \
V(WASM_COMPILE_LAZY, WasmCompileLazyFrame) \
V(INTERPRETED, InterpretedFrame) \
V(STUB, StubFrame) \
@@ -89,8 +91,7 @@ class StackFrame {
#define DECLARE_TYPE(type, ignore) type,
enum Type {
NONE = 0,
- STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
- NUMBER_OF_TYPES,
+ STACK_FRAME_TYPE_LIST(DECLARE_TYPE) NUMBER_OF_TYPES,
// Used by FrameScope to indicate that the stack frame is constructed
// manually and the FrameScope does not need to emit code.
MANUAL
@@ -101,11 +102,7 @@ class StackFrame {
// by the debugger.
// ID_MIN_VALUE and ID_MAX_VALUE are specified to ensure that enumeration type
// has correct value range (see Issue 830 for more details).
- enum Id {
- ID_MIN_VALUE = kMinInt,
- ID_MAX_VALUE = kMaxInt,
- NO_ID = 0
- };
+ enum Id { ID_MIN_VALUE = kMinInt, ID_MAX_VALUE = kMaxInt, NO_ID = 0 };
// Used to mark the outermost JS entry frame.
//
@@ -175,6 +172,7 @@ class StackFrame {
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_interpreted() const { return type() == INTERPRETED; }
bool is_wasm_compiled() const { return type() == WASM_COMPILED; }
+ bool is_wasm_exit() const { return type() == WASM_EXIT; }
bool is_wasm_compile_lazy() const { return type() == WASM_COMPILE_LAZY; }
bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
@@ -329,7 +327,7 @@ class NativeFrame : public StackFrame {
};
// Entry frames are used to enter JavaScript execution from C.
-class EntryFrame: public StackFrame {
+class EntryFrame : public StackFrame {
public:
Type type() const override { return ENTRY; }
@@ -376,9 +374,8 @@ class ConstructEntryFrame : public EntryFrame {
friend class StackFrameIteratorBase;
};
-
// Exit frames are used to exit JavaScript execution and go to C.
-class ExitFrame: public StackFrame {
+class ExitFrame : public StackFrame {
public:
Type type() const override { return EXIT; }
@@ -483,6 +480,7 @@ class V8_EXPORT_PRIVATE FrameSummary {
FixedArray parameters);
void EnsureSourcePositionsAvailable();
+ bool AreSourcePositionsAvailable() const;
Handle<Object> receiver() const { return receiver_; }
Handle<JSFunction> function() const { return function_; }
@@ -572,6 +570,7 @@ class V8_EXPORT_PRIVATE FrameSummary {
static FrameSummary Get(const StandardFrame* frame, int index);
void EnsureSourcePositionsAvailable();
+ bool AreSourcePositionsAvailable() const;
// Dispatched accessors.
Handle<Object> receiver() const;
@@ -773,7 +772,6 @@ class JavaScriptFrame : public StandardFrame {
friend class StackFrameIteratorBase;
};
-
class StubFrame : public StandardFrame {
public:
Type type() const override { return STUB; }
@@ -798,7 +796,6 @@ class StubFrame : public StandardFrame {
friend class StackFrameIteratorBase;
};
-
class OptimizedFrame : public JavaScriptFrame {
public:
Type type() const override { return OPTIMIZED; }
@@ -827,14 +824,12 @@ class OptimizedFrame : public JavaScriptFrame {
protected:
inline explicit OptimizedFrame(StackFrameIteratorBase* iterator);
-
private:
friend class StackFrameIteratorBase;
Object StackSlotAt(int index) const;
};
-
class InterpretedFrame : public JavaScriptFrame {
public:
Type type() const override { return INTERPRETED; }
@@ -883,11 +878,10 @@ class InterpretedFrame : public JavaScriptFrame {
friend class StackFrameIteratorBase;
};
-
// Arguments adaptor frames are automatically inserted below
// JavaScript frames when the actual number of parameters does not
// match the formal number of parameters.
-class ArgumentsAdaptorFrame: public JavaScriptFrame {
+class ArgumentsAdaptorFrame : public JavaScriptFrame {
public:
Type type() const override { return ARGUMENTS_ADAPTOR; }
@@ -908,7 +902,6 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
protected:
inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
-
private:
friend class StackFrameIteratorBase;
};
@@ -934,7 +927,7 @@ class BuiltinFrame final : public JavaScriptFrame {
friend class StackFrameIteratorBase;
};
-class WasmCompiledFrame final : public StandardFrame {
+class WasmCompiledFrame : public StandardFrame {
public:
Type type() const override { return WASM_COMPILED; }
@@ -977,6 +970,18 @@ class WasmCompiledFrame final : public StandardFrame {
WasmModuleObject module_object() const;
};
+class WasmExitFrame : public WasmCompiledFrame {
+ public:
+ Type type() const override { return WASM_EXIT; }
+ static Address ComputeStackPointer(Address fp);
+
+ protected:
+ inline explicit WasmExitFrame(StackFrameIteratorBase* iterator);
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
class WasmInterpreterEntryFrame final : public StandardFrame {
public:
Type type() const override { return WASM_INTERPRETER_ENTRY; }
@@ -1074,7 +1079,7 @@ class WasmCompileLazyFrame : public StandardFrame {
friend class StackFrameIteratorBase;
};
-class InternalFrame: public StandardFrame {
+class InternalFrame : public StandardFrame {
public:
Type type() const override { return INTERNAL; }
@@ -1098,10 +1103,9 @@ class InternalFrame: public StandardFrame {
friend class StackFrameIteratorBase;
};
-
// Construct frames are special trampoline frames introduced to handle
// function invocations through 'new'.
-class ConstructFrame: public InternalFrame {
+class ConstructFrame : public InternalFrame {
public:
Type type() const override { return CONSTRUCT; }
@@ -1212,8 +1216,7 @@ class StackFrameIteratorBase {
DISALLOW_COPY_AND_ASSIGN(StackFrameIteratorBase);
};
-
-class StackFrameIterator: public StackFrameIteratorBase {
+class StackFrameIterator : public StackFrameIteratorBase {
public:
// An iterator that iterates over the isolate's current thread's stack,
V8_EXPORT_PRIVATE explicit StackFrameIterator(Isolate* isolate);
@@ -1272,11 +1275,10 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
bool IsValidFrame(StackFrame* frame) const;
};
-class SafeStackFrameIterator: public StackFrameIteratorBase {
+class SafeStackFrameIterator : public StackFrameIteratorBase {
public:
- SafeStackFrameIterator(Isolate* isolate,
- Address fp, Address sp,
- Address js_entry_sp);
+ SafeStackFrameIterator(Isolate* isolate, Address pc, Address fp, Address sp,
+ Address lr, Address js_entry_sp);
inline StackFrame* frame() const;
void Advance();
@@ -1294,12 +1296,21 @@ class SafeStackFrameIterator: public StackFrameIteratorBase {
bool IsValidExitFrame(Address fp) const;
bool IsValidTop(ThreadLocalTop* top) const;
+ // Returns true if the pc points to a bytecode handler and the frame pointer
+ // doesn't seem to be a bytecode handler's frame, which implies that the
+ // bytecode handler has an elided frame. This is not precise and might give
+ // false negatives since it relies on checks to the frame's type marker,
+ // which might be uninitialized.
+ bool IsNoFrameBytecodeHandlerPc(Isolate* isolate, Address pc,
+ Address fp) const;
+
const Address low_bound_;
const Address high_bound_;
StackFrame::Type top_frame_type_;
ExternalCallbackScope* external_callback_scope_;
+ Address top_link_register_;
};
} // namespace internal
} // namespace v8
-#endif // V8_FRAMES_H_
+#endif // V8_EXECUTION_FRAMES_H_
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/execution/futex-emulation.cc
index 1cd856541b..7482807921 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/execution/futex-emulation.cc
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/futex-emulation.h"
+#include "src/execution/futex-emulation.h"
#include <limits>
#include "src/base/macros.h"
#include "src/base/platform/time.h"
-#include "src/conversions.h"
-#include "src/handles-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/numbers/conversions.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -23,7 +23,6 @@ base::LazyMutex FutexEmulation::mutex_ = LAZY_MUTEX_INITIALIZER;
base::LazyInstance<FutexWaitList>::type FutexEmulation::wait_list_ =
LAZY_INSTANCE_INITIALIZER;
-
void FutexWaitListNode::NotifyWake() {
// Lock the FutexEmulation mutex before notifying. We know that the mutex
// will have been unlocked if we are currently waiting on the condition
@@ -36,10 +35,8 @@ void FutexWaitListNode::NotifyWake() {
interrupted_ = true;
}
-
FutexWaitList::FutexWaitList() : head_(nullptr), tail_(nullptr) {}
-
void FutexWaitList::AddNode(FutexWaitListNode* node) {
DCHECK(node->prev_ == nullptr && node->next_ == nullptr);
if (tail_) {
@@ -53,7 +50,6 @@ void FutexWaitList::AddNode(FutexWaitListNode* node) {
tail_ = node;
}
-
void FutexWaitList::RemoveNode(FutexWaitListNode* node) {
if (node->prev_) {
node->prev_->next_ = node->next_;
@@ -88,7 +84,7 @@ Object FutexEmulation::WaitJs(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
Object res = Wait32(isolate, array_buffer, addr, value, rel_timeout_ms);
- if (res->IsSmi()) {
+ if (res.IsSmi()) {
int val = Smi::ToInt(res);
switch (val) {
case WaitReturnValue::kOk:
@@ -205,7 +201,7 @@ Object FutexEmulation::Wait(Isolate* isolate,
// be false, so we'll loop and then check interrupts.
if (interrupted) {
Object interrupt_object = isolate->stack_guard()->HandleInterrupts();
- if (interrupt_object->IsException(isolate)) {
+ if (interrupt_object.IsException(isolate)) {
result = interrupt_object;
callback_result = AtomicsWaitEvent::kTerminatedExecution;
mutex_.Pointer()->Lock();
diff --git a/deps/v8/src/futex-emulation.h b/deps/v8/src/execution/futex-emulation.h
index 1cdcac7248..c6fee5c3f7 100644
--- a/deps/v8/src/futex-emulation.h
+++ b/deps/v8/src/execution/futex-emulation.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FUTEX_EMULATION_H_
-#define V8_FUTEX_EMULATION_H_
+#ifndef V8_EXECUTION_FUTEX_EMULATION_H_
+#define V8_EXECUTION_FUTEX_EMULATION_H_
#include <stdint.h>
-#include "src/allocation.h"
#include "src/base/atomicops.h"
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
+#include "src/utils/allocation.h"
// Support for emulating futexes, a low-level synchronization primitive. They
// are natively supported by Linux, but must be emulated for other platforms.
@@ -26,7 +26,7 @@ namespace v8 {
namespace base {
class TimeDelta;
-} // base
+} // namespace base
namespace internal {
@@ -79,7 +79,6 @@ class FutexWaitListNode {
DISALLOW_COPY_AND_ASSIGN(FutexWaitListNode);
};
-
class FutexWaitList {
public:
FutexWaitList();
@@ -162,4 +161,4 @@ class FutexEmulation : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_FUTEX_EMULATION_H_
+#endif // V8_EXECUTION_FUTEX_EMULATION_H_
diff --git a/deps/v8/src/ia32/frame-constants-ia32.cc b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
index 32c2caf139..e5e3855c79 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.cc
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.cc
@@ -4,12 +4,12 @@
#if V8_TARGET_ARCH_IA32
-#include "src/assembler.h"
-#include "src/frame-constants.h"
-#include "src/ia32/assembler-ia32-inl.h"
-#include "src/ia32/assembler-ia32.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/ia32/assembler-ia32-inl.h"
+#include "src/codegen/ia32/assembler-ia32.h"
+#include "src/execution/frame-constants.h"
-#include "src/ia32/frame-constants-ia32.h"
+#include "src/execution/ia32/frame-constants-ia32.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ia32/frame-constants-ia32.h b/deps/v8/src/execution/ia32/frame-constants-ia32.h
index faaefbbad7..6d2125e83e 100644
--- a/deps/v8/src/ia32/frame-constants-ia32.h
+++ b/deps/v8/src/execution/ia32/frame-constants-ia32.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IA32_FRAME_CONSTANTS_IA32_H_
-#define V8_IA32_FRAME_CONSTANTS_IA32_H_
+#ifndef V8_EXECUTION_IA32_FRAME_CONSTANTS_IA32_H_
+#define V8_EXECUTION_IA32_FRAME_CONSTANTS_IA32_H_
#include "src/base/macros.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -77,4 +77,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_IA32_FRAME_CONSTANTS_IA32_H_
+#endif // V8_EXECUTION_IA32_FRAME_CONSTANTS_IA32_H_
diff --git a/deps/v8/src/isolate-data.h b/deps/v8/src/execution/isolate-data.h
index c6941975ac..d83ae708ec 100644
--- a/deps/v8/src/isolate-data.h
+++ b/deps/v8/src/execution/isolate-data.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ISOLATE_DATA_H_
-#define V8_ISOLATE_DATA_H_
+#ifndef V8_EXECUTION_ISOLATE_DATA_H_
+#define V8_EXECUTION_ISOLATE_DATA_H_
#include "src/builtins/builtins.h"
-#include "src/constants-arch.h"
-#include "src/external-reference-table.h"
-#include "src/roots.h"
-#include "src/thread-local-top.h"
-#include "src/utils.h"
+#include "src/codegen/constants-arch.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/execution/thread-local-top.h"
+#include "src/roots/roots.h"
+#include "src/utils/utils.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
namespace v8 {
@@ -225,4 +225,4 @@ void IsolateData::AssertPredictableLayout() {
} // namespace internal
} // namespace v8
-#endif // V8_ISOLATE_DATA_H_
+#endif // V8_EXECUTION_ISOLATE_DATA_H_
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/execution/isolate-inl.h
index 253fb9f8a0..fcbbed139c 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/execution/isolate-inl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ISOLATE_INL_H_
-#define V8_ISOLATE_INL_H_
+#ifndef V8_EXECUTION_ISOLATE_INL_H_
+#define V8_EXECUTION_ISOLATE_INL_H_
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
#include "src/objects/cell-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/property-cell.h"
#include "src/objects/regexp-match-info.h"
@@ -21,41 +21,39 @@ IsolateAllocationMode Isolate::isolate_allocation_mode() {
}
void Isolate::set_context(Context context) {
- DCHECK(context.is_null() || context->IsContext());
+ DCHECK(context.is_null() || context.IsContext());
thread_local_top()->context_ = context;
}
Handle<NativeContext> Isolate::native_context() {
- return handle(context()->native_context(), this);
+ return handle(context().native_context(), this);
}
NativeContext Isolate::raw_native_context() {
- return context()->native_context();
+ return context().native_context();
}
Object Isolate::pending_exception() {
DCHECK(has_pending_exception());
- DCHECK(!thread_local_top()->pending_exception_->IsException(this));
+ DCHECK(!thread_local_top()->pending_exception_.IsException(this));
return thread_local_top()->pending_exception_;
}
void Isolate::set_pending_exception(Object exception_obj) {
- DCHECK(!exception_obj->IsException(this));
+ DCHECK(!exception_obj.IsException(this));
thread_local_top()->pending_exception_ = exception_obj;
}
void Isolate::clear_pending_exception() {
- DCHECK(!thread_local_top()->pending_exception_->IsException(this));
+ DCHECK(!thread_local_top()->pending_exception_.IsException(this));
thread_local_top()->pending_exception_ = ReadOnlyRoots(this).the_hole_value();
}
-
bool Isolate::has_pending_exception() {
- DCHECK(!thread_local_top()->pending_exception_->IsException(this));
- return !thread_local_top()->pending_exception_->IsTheHole(this);
+ DCHECK(!thread_local_top()->pending_exception_.IsException(this));
+ return !thread_local_top()->pending_exception_.IsTheHole(this);
}
-
void Isolate::clear_pending_message() {
thread_local_top()->pending_message_obj_ =
ReadOnlyRoots(this).the_hole_value();
@@ -63,19 +61,18 @@ void Isolate::clear_pending_message() {
Object Isolate::scheduled_exception() {
DCHECK(has_scheduled_exception());
- DCHECK(!thread_local_top()->scheduled_exception_->IsException(this));
+ DCHECK(!thread_local_top()->scheduled_exception_.IsException(this));
return thread_local_top()->scheduled_exception_;
}
bool Isolate::has_scheduled_exception() {
- DCHECK(!thread_local_top()->scheduled_exception_->IsException(this));
+ DCHECK(!thread_local_top()->scheduled_exception_.IsException(this));
return thread_local_top()->scheduled_exception_ !=
ReadOnlyRoots(this).the_hole_value();
}
-
void Isolate::clear_scheduled_exception() {
- DCHECK(!thread_local_top()->scheduled_exception_->IsException(this));
+ DCHECK(!thread_local_top()->scheduled_exception_.IsException(this));
thread_local_top()->scheduled_exception_ =
ReadOnlyRoots(this).the_hole_value();
}
@@ -91,28 +88,27 @@ void Isolate::FireBeforeCallEnteredCallback() {
}
Handle<JSGlobalObject> Isolate::global_object() {
- return handle(context()->global_object(), this);
+ return handle(context().global_object(), this);
}
Handle<JSGlobalProxy> Isolate::global_proxy() {
- return handle(context()->global_proxy(), this);
+ return handle(context().global_proxy(), this);
}
Isolate::ExceptionScope::ExceptionScope(Isolate* isolate)
: isolate_(isolate),
pending_exception_(isolate_->pending_exception(), isolate_) {}
-
Isolate::ExceptionScope::~ExceptionScope() {
isolate_->set_pending_exception(*pending_exception_);
}
-#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- Handle<type> Isolate::name() { \
- return Handle<type>(raw_native_context()->name(), this); \
- } \
- bool Isolate::is_##name(type value) { \
- return raw_native_context()->is_##name(value); \
+#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ Handle<type> Isolate::name() { \
+ return Handle<type>(raw_native_context().name(), this); \
+ } \
+ bool Isolate::is_##name(type value) { \
+ return raw_native_context().is_##name(value); \
}
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
@@ -120,7 +116,7 @@ NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
bool Isolate::IsArrayConstructorIntact() {
Cell array_constructor_cell =
Cell::cast(root(RootIndex::kArrayConstructorProtector));
- return array_constructor_cell->value() == Smi::FromInt(kProtectorValid);
+ return array_constructor_cell.value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsArraySpeciesLookupChainIntact() {
@@ -138,67 +134,67 @@ bool Isolate::IsArraySpeciesLookupChainIntact() {
PropertyCell species_cell =
PropertyCell::cast(root(RootIndex::kArraySpeciesProtector));
- return species_cell->value()->IsSmi() &&
- Smi::ToInt(species_cell->value()) == kProtectorValid;
+ return species_cell.value().IsSmi() &&
+ Smi::ToInt(species_cell.value()) == kProtectorValid;
}
bool Isolate::IsTypedArraySpeciesLookupChainIntact() {
PropertyCell species_cell =
PropertyCell::cast(root(RootIndex::kTypedArraySpeciesProtector));
- return species_cell->value()->IsSmi() &&
- Smi::ToInt(species_cell->value()) == kProtectorValid;
+ return species_cell.value().IsSmi() &&
+ Smi::ToInt(species_cell.value()) == kProtectorValid;
}
bool Isolate::IsRegExpSpeciesLookupChainIntact() {
PropertyCell species_cell =
PropertyCell::cast(root(RootIndex::kRegExpSpeciesProtector));
- return species_cell->value()->IsSmi() &&
- Smi::ToInt(species_cell->value()) == kProtectorValid;
+ return species_cell.value().IsSmi() &&
+ Smi::ToInt(species_cell.value()) == kProtectorValid;
}
bool Isolate::IsPromiseSpeciesLookupChainIntact() {
PropertyCell species_cell =
PropertyCell::cast(root(RootIndex::kPromiseSpeciesProtector));
- return species_cell->value()->IsSmi() &&
- Smi::ToInt(species_cell->value()) == kProtectorValid;
+ return species_cell.value().IsSmi() &&
+ Smi::ToInt(species_cell.value()) == kProtectorValid;
}
bool Isolate::IsStringLengthOverflowIntact() {
Cell string_length_cell = Cell::cast(root(RootIndex::kStringLengthProtector));
- return string_length_cell->value() == Smi::FromInt(kProtectorValid);
+ return string_length_cell.value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsArrayBufferDetachingIntact() {
PropertyCell buffer_detaching =
PropertyCell::cast(root(RootIndex::kArrayBufferDetachingProtector));
- return buffer_detaching->value() == Smi::FromInt(kProtectorValid);
+ return buffer_detaching.value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsArrayIteratorLookupChainIntact() {
PropertyCell array_iterator_cell =
PropertyCell::cast(root(RootIndex::kArrayIteratorProtector));
- return array_iterator_cell->value() == Smi::FromInt(kProtectorValid);
+ return array_iterator_cell.value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsMapIteratorLookupChainIntact() {
PropertyCell map_iterator_cell =
PropertyCell::cast(root(RootIndex::kMapIteratorProtector));
- return map_iterator_cell->value() == Smi::FromInt(kProtectorValid);
+ return map_iterator_cell.value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsSetIteratorLookupChainIntact() {
PropertyCell set_iterator_cell =
PropertyCell::cast(root(RootIndex::kSetIteratorProtector));
- return set_iterator_cell->value() == Smi::FromInt(kProtectorValid);
+ return set_iterator_cell.value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsStringIteratorLookupChainIntact() {
PropertyCell string_iterator_cell =
PropertyCell::cast(root(RootIndex::kStringIteratorProtector));
- return string_iterator_cell->value() == Smi::FromInt(kProtectorValid);
+ return string_iterator_cell.value() == Smi::FromInt(kProtectorValid);
}
} // namespace internal
} // namespace v8
-#endif // V8_ISOLATE_INL_H_
+#endif // V8_EXECUTION_ISOLATE_INL_H_
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/execution/isolate.cc
index 8e15e78109..8a8db12ca3 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include <stdlib.h>
@@ -12,8 +12,7 @@
#include <sstream>
#include <unordered_map>
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
#include "src/base/adapters.h"
@@ -21,62 +20,63 @@
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/bootstrapper.h"
#include "src/builtins/builtins-promise.h"
#include "src/builtins/constants-table-builder.h"
-#include "src/cancelable-task.h"
-#include "src/compilation-cache.h"
-#include "src/compilation-statistics.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/common/ptr-compr.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
-#include "src/date.h"
+#include "src/date/date.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/elements.h"
-#include "src/frames-inl.h"
-#include "src/hash-seed-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/diagnostics/compilation-statistics.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/messages.h"
+#include "src/execution/microtask-queue.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/simulator.h"
+#include "src/execution/v8threads.h"
+#include "src/execution/vm-state-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/ic/stub-cache.h"
+#include "src/init/bootstrapper.h"
+#include "src/init/setup-isolate.h"
+#include "src/init/v8.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate-inl.h"
#include "src/libsampler/sampler.h"
-#include "src/log.h"
-#include "src/messages.h"
-#include "src/microtask-queue.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/elements.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/prototype.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/stack-frame-info-inl.h"
-#include "src/ostreams.h"
+#include "src/objects/visitors.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/tracing-cpu-profiler.h"
-#include "src/prototype.h"
-#include "src/ptr-compr.h"
#include "src/regexp/regexp-stack.h"
-#include "src/runtime-profiler.h"
-#include "src/setup-isolate.h"
-#include "src/simulator.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/snapshot/embedded-file-writer.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/embedded-file-writer.h"
#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/startup-deserializer.h"
-#include "src/string-builder-inl.h"
-#include "src/string-stream.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-stream.h"
+#include "src/tasks/cancelable-task.h"
#include "src/tracing/tracing-category-observer.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/unicode-cache.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
-#include "src/version.h"
-#include "src/visitors.h"
-#include "src/vm-state-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/version.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
@@ -86,7 +86,7 @@
#endif // V8_INTL_SUPPORT
#if defined(V8_OS_WIN_X64)
-#include "src/unwinding-info-win64.h"
+#include "src/diagnostics/unwinding-info-win64.h"
#endif
extern "C" const uint8_t* v8_Default_embedded_blob_;
@@ -295,12 +295,11 @@ size_t Isolate::HashIsolateForEmbeddedBlob() {
// The builtins constants table is also tightly tied to embedded builtins.
hash = base::hash_combine(
- hash, static_cast<size_t>(heap_.builtins_constants_table()->length()));
+ hash, static_cast<size_t>(heap_.builtins_constants_table().length()));
return hash;
}
-
base::Thread::LocalStorageKey Isolate::isolate_key_;
base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
#if DEBUG
@@ -313,7 +312,7 @@ std::atomic<int> isolate_counter{0};
} // namespace
Isolate::PerIsolateThreadData*
- Isolate::FindOrAllocatePerThreadDataForThisThread() {
+Isolate::FindOrAllocatePerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = nullptr;
{
@@ -329,7 +328,6 @@ Isolate::PerIsolateThreadData*
return per_thread;
}
-
void Isolate::DiscardPerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::TryGetCurrent();
if (thread_id.IsValid()) {
@@ -344,13 +342,11 @@ void Isolate::DiscardPerThreadDataForThisThread() {
}
}
-
Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
return FindPerThreadDataForThread(thread_id);
}
-
Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
ThreadId thread_id) {
PerIsolateThreadData* per_thread = nullptr;
@@ -361,7 +357,6 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
return per_thread;
}
-
void Isolate::InitializeOncePerProcess() {
isolate_key_ = base::Thread::CreateThreadLocalKey();
#if DEBUG
@@ -382,7 +377,6 @@ char* Isolate::Iterate(RootVisitor* v, char* thread_storage) {
return thread_storage + sizeof(ThreadLocalTop);
}
-
void Isolate::IterateThread(ThreadVisitor* v, char* t) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
v->VisitThread(this, thread);
@@ -428,7 +422,6 @@ void Isolate::IterateDeferredHandles(RootVisitor* visitor) {
}
}
-
#ifdef DEBUG
bool Isolate::IsDeferredHandle(Address* handle) {
// Comparing unrelated pointers (not from the same array) is undefined
@@ -456,18 +449,15 @@ bool Isolate::IsDeferredHandle(Address* handle) {
}
#endif // DEBUG
-
void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
thread_local_top()->try_catch_handler_ = that;
}
-
void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
DCHECK(thread_local_top()->try_catch_handler_ == that);
thread_local_top()->try_catch_handler_ = that->next_;
}
-
Handle<String> Isolate::StackTraceString() {
if (stack_trace_nesting_level_ == 0) {
stack_trace_nesting_level_++;
@@ -483,9 +473,9 @@ Handle<String> Isolate::StackTraceString() {
} else if (stack_trace_nesting_level_ == 1) {
stack_trace_nesting_level_++;
base::OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
+ "\n\nAttempt to print stack while printing stack (double fault)\n");
base::OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
+ "If you are lucky you may find a partial stack dump on stdout.\n\n");
incomplete_message_->OutputToStdOut();
return factory()->empty_string();
} else {
@@ -612,7 +602,7 @@ class FrameArrayBuilder {
Handle<Object> receiver(generator_object->receiver(), isolate_);
Handle<AbstractCode> code(
- AbstractCode::cast(function->shared()->GetBytecodeArray()), isolate_);
+ AbstractCode::cast(function->shared().GetBytecodeArray()), isolate_);
int offset = Smi::ToInt(generator_object->input_or_debug_pos());
// The stored bytecode offset is relative to a different base than what
// is used in the source position table, hence the subtraction.
@@ -620,11 +610,10 @@ class FrameArrayBuilder {
Handle<FixedArray> parameters = isolate_->factory()->empty_fixed_array();
if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
- int param_count = function->shared()->internal_formal_parameter_count();
+ int param_count = function->shared().internal_formal_parameter_count();
parameters = isolate_->factory()->NewFixedArray(param_count);
for (int i = 0; i < param_count; i++) {
- parameters->set(i,
- generator_object->parameters_and_registers()->get(i));
+ parameters->set(i, generator_object->parameters_and_registers().get(i));
}
}
@@ -679,7 +668,7 @@ class FrameArrayBuilder {
if (summary.code()->kind() != wasm::WasmCode::kFunction) return;
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = 0;
- if (instance->module_object()->is_asm_js()) {
+ if (instance->module_object().is_asm_js()) {
flags |= FrameArray::kIsAsmJsWasmFrame;
if (summary.at_to_number_conversion()) {
flags |= FrameArray::kAsmJsAtNumberConversion;
@@ -697,7 +686,7 @@ class FrameArrayBuilder {
FrameSummary::WasmInterpretedFrameSummary const& summary) {
Handle<WasmInstanceObject> instance = summary.wasm_instance();
int flags = FrameArray::kIsWasmInterpretedFrame;
- DCHECK(!instance->module_object()->is_asm_js());
+ DCHECK(!instance->module_object().is_asm_js());
elements_ = FrameArray::AppendWasmFrame(elements_, instance,
summary.function_index(), {},
summary.byte_offset(), flags);
@@ -709,6 +698,13 @@ class FrameArrayBuilder {
// Filter out internal frames that we do not want to show.
if (!IsVisibleInStackTrace(function)) return;
+ // TODO(szuend): Remove this check once the flag is enabled
+ // by default.
+ if (!FLAG_experimental_stack_trace_frames &&
+ function->shared().IsApiFunction()) {
+ return;
+ }
+
Handle<Object> receiver(exit_frame->receiver(), isolate_);
Handle<Code> code(exit_frame->LookupCode(), isolate_);
const int offset =
@@ -781,7 +777,7 @@ class FrameArrayBuilder {
bool IsStrictFrame(Handle<JSFunction> function) {
if (!encountered_strict_function_) {
encountered_strict_function_ =
- is_strict(function->shared()->language_mode());
+ is_strict(function->shared().language_mode());
}
return encountered_strict_function_;
}
@@ -821,15 +817,15 @@ class FrameArrayBuilder {
// The --builtins-in-stack-traces command line flag allows including
// internal call sites in the stack trace for debugging purposes.
if (!FLAG_builtins_in_stack_traces &&
- !function->shared()->IsUserJavaScript()) {
- return function->shared()->native();
+ !function->shared().IsUserJavaScript()) {
+ return function->shared().native() || function->shared().IsApiFunction();
}
return true;
}
bool IsInSameSecurityContext(Handle<JSFunction> function) {
if (!check_security_context_) return true;
- return isolate_->context()->HasSameSecurityTokenAs(function->context());
+ return isolate_->context().HasSameSecurityTokenAs(function->context());
}
// TODO(jgruber): Fix all cases in which frames give us a hole value (e.g. the
@@ -871,9 +867,9 @@ bool NoExtension(const v8::FunctionCallbackInfo<v8::Value>&) { return false; }
bool IsBuiltinFunction(Isolate* isolate, HeapObject object,
Builtins::Name builtin_index) {
- if (!object->IsJSFunction()) return false;
+ if (!object.IsJSFunction()) return false;
JSFunction const function = JSFunction::cast(object);
- return function->code() == isolate->builtins()->builtin(builtin_index);
+ return function.code() == isolate->builtins()->builtin(builtin_index);
}
void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
@@ -883,10 +879,10 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
if (promise->status() != Promise::kPending) return;
// Check that we have exactly one PromiseReaction on the {promise}.
- if (!promise->reactions()->IsPromiseReaction()) return;
+ if (!promise->reactions().IsPromiseReaction()) return;
Handle<PromiseReaction> reaction(
PromiseReaction::cast(promise->reactions()), isolate);
- if (!reaction->next()->IsSmi()) return;
+ if (!reaction->next().IsSmi()) return;
// Check if the {reaction} has one of the known async function or
// async generator continuations as its fulfill handler.
@@ -899,7 +895,7 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
// Now peak into the handlers' AwaitContext to get to
// the JSGeneratorObject for the async function.
Handle<Context> context(
- JSFunction::cast(reaction->fulfill_handler())->context(), isolate);
+ JSFunction::cast(reaction->fulfill_handler()).context(), isolate);
Handle<JSGeneratorObject> generator_object(
JSGeneratorObject::cast(context->extension()), isolate);
CHECK(generator_object->is_suspended());
@@ -915,7 +911,7 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
} else {
Handle<JSAsyncGeneratorObject> async_generator_object =
Handle<JSAsyncGeneratorObject>::cast(generator_object);
- if (async_generator_object->queue()->IsUndefined(isolate)) return;
+ if (async_generator_object->queue().IsUndefined(isolate)) return;
Handle<AsyncGeneratorRequest> async_generator_request(
AsyncGeneratorRequest::cast(async_generator_object->queue()),
isolate);
@@ -940,7 +936,7 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
PromiseBuiltins::kPromiseAllResolveElementCapabilitySlot;
Handle<PromiseCapability> capability(
PromiseCapability::cast(context->get(index)), isolate);
- if (!capability->promise()->IsJSPromise()) return;
+ if (!capability->promise().IsJSPromise()) return;
promise = handle(JSPromise::cast(capability->promise()), isolate);
} else {
// We have some generic promise chain here, so try to
@@ -953,7 +949,7 @@ void CaptureAsyncStackTrace(Isolate* isolate, Handle<JSPromise> promise,
} else if (promise_or_capability->IsPromiseCapability()) {
Handle<PromiseCapability> capability =
Handle<PromiseCapability>::cast(promise_or_capability);
- if (!capability->promise()->IsJSPromise()) return;
+ if (!capability->promise().IsJSPromise()) return;
promise = handle(JSPromise::cast(capability->promise()), isolate);
} else {
// Otherwise the {promise_or_capability} must be undefined here.
@@ -1070,7 +1066,7 @@ Handle<Object> CaptureStackTrace(Isolate* isolate, Handle<Object> caller,
// Now peak into the handlers' AwaitContext to get to
// the JSGeneratorObject for the async function.
Handle<Context> context(
- JSFunction::cast(promise_reaction_job_task->handler())->context(),
+ JSFunction::cast(promise_reaction_job_task->handler()).context(),
isolate);
Handle<JSGeneratorObject> generator_object(
JSGeneratorObject::cast(context->extension()), isolate);
@@ -1188,12 +1184,12 @@ Address Isolate::GetAbstractPC(int* line, int* column) {
JavaScriptFrame* frame = it.frame();
DCHECK(!frame->is_builtin());
- Handle<SharedFunctionInfo> shared = handle(frame->function()->shared(), this);
+ Handle<SharedFunctionInfo> shared = handle(frame->function().shared(), this);
SharedFunctionInfo::EnsureSourcePositionsAvailable(this, shared);
int position = frame->position();
- Object maybe_script = frame->function()->shared()->script();
- if (maybe_script->IsScript()) {
+ Object maybe_script = frame->function().shared().script();
+ if (maybe_script.IsScript()) {
Handle<Script> script(Script::cast(maybe_script), this);
Script::PositionInfo info;
Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
@@ -1207,7 +1203,7 @@ Address Isolate::GetAbstractPC(int* line, int* column) {
if (frame->is_interpreted()) {
InterpretedFrame* iframe = static_cast<InterpretedFrame*>(frame);
Address bytecode_start =
- iframe->GetBytecodeArray()->GetFirstBytecodeAddress();
+ iframe->GetBytecodeArray().GetFirstBytecodeAddress();
return bytecode_start + iframe->GetBytecodeOffset();
}
@@ -1248,16 +1244,14 @@ void Isolate::PrintStack(FILE* out, PrintStackMode mode) {
} else if (stack_trace_nesting_level_ == 1) {
stack_trace_nesting_level_++;
base::OS::PrintError(
- "\n\nAttempt to print stack while printing stack (double fault)\n");
+ "\n\nAttempt to print stack while printing stack (double fault)\n");
base::OS::PrintError(
- "If you are lucky you may find a partial stack dump on stdout.\n\n");
+ "If you are lucky you may find a partial stack dump on stdout.\n\n");
incomplete_message_->OutputToFile(out);
}
}
-
-static void PrintFrames(Isolate* isolate,
- StringStream* accumulator,
+static void PrintFrames(Isolate* isolate, StringStream* accumulator,
StackFrame::PrintMode mode) {
StackFrameIterator it(isolate);
for (int i = 0; !it.done(); it.Advance()) {
@@ -1285,13 +1279,11 @@ void Isolate::PrintStack(StringStream* accumulator, PrintStackMode mode) {
accumulator->Add("=====================\n\n");
}
-
void Isolate::SetFailedAccessCheckCallback(
v8::FailedAccessCheckCallback callback) {
thread_local_top()->failed_access_check_callback_ = callback;
}
-
void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
if (!thread_local_top()->failed_access_check_callback_) {
return ScheduleThrow(*factory()->NewTypeError(MessageTemplate::kNoAccess));
@@ -1303,14 +1295,15 @@ void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
// Get the data object from access check info.
HandleScope scope(this);
Handle<Object> data;
- { DisallowHeapAllocation no_gc;
+ {
+ DisallowHeapAllocation no_gc;
AccessCheckInfo access_check_info = AccessCheckInfo::Get(this, receiver);
if (access_check_info.is_null()) {
AllowHeapAllocation doesnt_matter_anymore;
return ScheduleThrow(
*factory()->NewTypeError(MessageTemplate::kNoAccess));
}
- data = handle(access_check_info->data(), this);
+ data = handle(access_check_info.data(), this);
}
// Leaving JavaScript.
@@ -1319,7 +1312,6 @@ void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
v8::Utils::ToLocal(receiver), v8::ACCESS_HAS, v8::Utils::ToLocal(data));
}
-
bool Isolate::MayAccess(Handle<Context> accessing_context,
Handle<JSObject> receiver) {
DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
@@ -1333,18 +1325,17 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
DisallowHeapAllocation no_gc;
if (receiver->IsJSGlobalProxy()) {
- Object receiver_context =
- JSGlobalProxy::cast(*receiver)->native_context();
- if (!receiver_context->IsContext()) return false;
+ Object receiver_context = JSGlobalProxy::cast(*receiver).native_context();
+ if (!receiver_context.IsContext()) return false;
// Get the native context of current top context.
// avoid using Isolate::native_context() because it uses Handle.
Context native_context =
- accessing_context->global_object()->native_context();
+ accessing_context->global_object().native_context();
if (receiver_context == native_context) return true;
- if (Context::cast(receiver_context)->security_token() ==
- native_context->security_token())
+ if (Context::cast(receiver_context).security_token() ==
+ native_context.security_token())
return true;
}
}
@@ -1352,12 +1343,13 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
HandleScope scope(this);
Handle<Object> data;
v8::AccessCheckCallback callback = nullptr;
- { DisallowHeapAllocation no_gc;
+ {
+ DisallowHeapAllocation no_gc;
AccessCheckInfo access_check_info = AccessCheckInfo::Get(this, receiver);
if (access_check_info.is_null()) return false;
- Object fun_obj = access_check_info->callback();
+ Object fun_obj = access_check_info.callback();
callback = v8::ToCData<v8::AccessCheckCallback>(fun_obj);
- data = handle(access_check_info->data(), this);
+ data = handle(access_check_info.data(), this);
}
LOG(this, ApiSecurityCheck());
@@ -1371,7 +1363,7 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
}
Object Isolate::StackOverflow() {
- if (FLAG_abort_on_stack_or_string_length_overflow) {
+ if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on stack overflow");
}
@@ -1419,14 +1411,12 @@ void Isolate::CancelTerminateExecution() {
}
}
-
void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
ExecutionAccess access(this);
api_interrupts_queue_.push(InterruptEntry(callback, data));
stack_guard()->RequestApiInterrupt();
}
-
void Isolate::InvokeApiInterruptCallbacks() {
RuntimeCallTimerScope runtimeTimer(
this, RuntimeCallCounterId::kInvokeApiInterruptCallbacks);
@@ -1445,7 +1435,6 @@ void Isolate::InvokeApiInterruptCallbacks() {
}
}
-
void ReportBootstrappingException(Handle<Object> exception,
MessageLocation* location) {
base::OS::PrintError("Exception thrown during bootstrapping\n");
@@ -1457,27 +1446,27 @@ void ReportBootstrappingException(Handle<Object> exception,
// to the console for easier debugging.
int line_number =
location->script()->GetLineNumber(location->start_pos()) + 1;
- if (exception->IsString() && location->script()->name()->IsString()) {
+ if (exception->IsString() && location->script()->name().IsString()) {
base::OS::PrintError(
"Extension or internal compilation error: %s in %s at line %d.\n",
- String::cast(*exception)->ToCString().get(),
- String::cast(location->script()->name())->ToCString().get(),
+ String::cast(*exception).ToCString().get(),
+ String::cast(location->script()->name()).ToCString().get(),
line_number);
- } else if (location->script()->name()->IsString()) {
+ } else if (location->script()->name().IsString()) {
base::OS::PrintError(
"Extension or internal compilation error in %s at line %d.\n",
- String::cast(location->script()->name())->ToCString().get(),
+ String::cast(location->script()->name()).ToCString().get(),
line_number);
} else if (exception->IsString()) {
base::OS::PrintError("Extension or internal compilation error: %s.\n",
- String::cast(*exception)->ToCString().get());
+ String::cast(*exception).ToCString().get());
} else {
base::OS::PrintError("Extension or internal compilation error.\n");
}
#ifdef OBJECT_PRINT
// Since comments and empty lines have been stripped from the source of
// builtins, print the actual source here so that line numbers match.
- if (location->script()->source()->IsString()) {
+ if (location->script()->source().IsString()) {
Handle<String> src(String::cast(location->script()->source()),
location->script()->GetIsolate());
PrintF("Failing script:");
@@ -1514,8 +1503,8 @@ Object Isolate::Throw(Object raw_exception, MessageLocation* location) {
Handle<Script> script = location->script();
Handle<Object> name(script->GetNameOrSourceURL(), this);
printf("at ");
- if (name->IsString() && String::cast(*name)->length() > 0)
- String::cast(*name)->PrintOn(stdout);
+ if (name->IsString() && String::cast(*name).length() > 0)
+ String::cast(*name).PrintOn(stdout);
else
printf("<anonymous>");
// Script::GetLineNumber and Script::GetColumnNumber can allocate on the heap to
@@ -1536,7 +1525,7 @@ Object Isolate::Throw(Object raw_exception, MessageLocation* location) {
printf(", line %d\n", script->GetLineNumber(location->start_pos()) + 1);
}
}
- raw_exception->Print();
+ raw_exception.Print();
printf("Stack Trace:\n");
PrintStack(stdout);
printf("=========================================================\n");
@@ -1661,8 +1650,8 @@ Object Isolate::UnwindAndFindHandler() {
// Gather information from the handler.
Code code = frame->LookupCode();
HandlerTable table(code);
- return FoundHandler(Context(), code->InstructionStart(),
- table.LookupReturn(0), code->constant_pool(),
+ return FoundHandler(Context(), code.InstructionStart(),
+ table.LookupReturn(0), code.constant_pool(),
handler->address() + StackHandlerConstants::kSize,
0);
}
@@ -1699,6 +1688,16 @@ Object Isolate::UnwindAndFindHandler() {
wasm_code->constant_pool(), return_sp, frame->fp());
}
+ case StackFrame::WASM_COMPILE_LAZY: {
+ // Can only fail directly on invocation. This happens if an invalid
+ // function was validated lazily.
+ DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
+ trap_handler::IsThreadInWasm());
+ DCHECK(FLAG_wasm_lazy_validation);
+ trap_handler::ClearThreadInWasm();
+ break;
+ }
+
case StackFrame::OPTIMIZED: {
// For optimized frames we perform a lookup in the handler table.
if (!catchable_by_js) break;
@@ -1718,17 +1717,17 @@ Object Isolate::UnwindAndFindHandler() {
// TODO(bmeurer): Turbofanned BUILTIN frames appear as OPTIMIZED,
// but do not have a code kind of OPTIMIZED_FUNCTION.
- if (code->kind() == Code::OPTIMIZED_FUNCTION &&
- code->marked_for_deoptimization()) {
+ if (code.kind() == Code::OPTIMIZED_FUNCTION &&
+ code.marked_for_deoptimization()) {
// If the target code is lazy deoptimized, we jump to the original
// return address, but we make a note that we are throwing, so
// that the deoptimizer can do the right thing.
- offset = static_cast<int>(frame->pc() - code->entry());
+ offset = static_cast<int>(frame->pc() - code.entry());
set_deoptimizer_lazy_throw(true);
}
- return FoundHandler(Context(), code->InstructionStart(), offset,
- code->constant_pool(), return_sp, frame->fp());
+ return FoundHandler(Context(), code.InstructionStart(), offset,
+ code.constant_pool(), return_sp, frame->fp());
}
case StackFrame::STUB: {
@@ -1742,12 +1741,12 @@ Object Isolate::UnwindAndFindHandler() {
// It is safe to skip Wasm runtime stubs as none of them contain local
// exception handlers.
CHECK_EQ(wasm::WasmCode::kRuntimeStub, wasm_code->kind());
- CHECK_EQ(0, wasm_code->handler_table_offset());
+ CHECK_EQ(0, wasm_code->handler_table_size());
break;
}
Code code = stub_frame->LookupCode();
- if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->has_handler_table() || !code->is_turbofanned()) {
+ if (!code.IsCode() || code.kind() != Code::BUILTIN ||
+ !code.has_handler_table() || !code.is_turbofanned()) {
break;
}
@@ -1761,8 +1760,8 @@ Object Isolate::UnwindAndFindHandler() {
StandardFrameConstants::kFixedFrameSizeAboveFp -
stack_slots * kSystemPointerSize;
- return FoundHandler(Context(), code->InstructionStart(), offset,
- code->constant_pool(), return_sp, frame->fp());
+ return FoundHandler(Context(), code.InstructionStart(), offset,
+ code.constant_pool(), return_sp, frame->fp());
}
case StackFrame::INTERPRETED: {
@@ -1770,7 +1769,7 @@ Object Isolate::UnwindAndFindHandler() {
if (!catchable_by_js) break;
InterpretedFrame* js_frame = static_cast<InterpretedFrame*>(frame);
int register_slots = InterpreterFrameConstants::RegisterStackSlotCount(
- js_frame->GetBytecodeArray()->register_count());
+ js_frame->GetBytecodeArray().register_count());
int context_reg = 0; // Will contain register index holding context.
int offset =
js_frame->LookupExceptionHandlerInTable(&context_reg, nullptr);
@@ -1794,8 +1793,8 @@ Object Isolate::UnwindAndFindHandler() {
Code code =
builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- return FoundHandler(context, code->InstructionStart(), 0,
- code->constant_pool(), return_sp, frame->fp());
+ return FoundHandler(context, code.InstructionStart(), 0,
+ code.constant_pool(), return_sp, frame->fp());
}
case StackFrame::BUILTIN:
@@ -1823,8 +1822,8 @@ Object Isolate::UnwindAndFindHandler() {
// Reconstruct the stack pointer from the frame pointer.
Address return_sp = js_frame->fp() - js_frame->GetSPToFPDelta();
Code code = js_frame->LookupCode();
- return FoundHandler(Context(), code->InstructionStart(), 0,
- code->constant_pool(), return_sp, frame->fp());
+ return FoundHandler(Context(), code.InstructionStart(), 0,
+ code.constant_pool(), return_sp, frame->fp());
} break;
default:
@@ -1838,7 +1837,7 @@ Object Isolate::UnwindAndFindHandler() {
USE(removed);
// If there were any materialized objects, the code should be
// marked for deopt.
- DCHECK_IMPLIES(removed, frame->LookupCode()->marked_for_deoptimization());
+ DCHECK_IMPLIES(removed, frame->LookupCode().marked_for_deoptimization());
}
}
@@ -1859,7 +1858,7 @@ HandlerTable::CatchPrediction PredictException(JavaScriptFrame* frame) {
const FrameSummary& summary = summaries[i - 1];
Handle<AbstractCode> code = summary.AsJavaScript().abstract_code();
if (code->IsCode() && code->kind() == AbstractCode::BUILTIN) {
- prediction = code->GetCode()->GetBuiltinCatchPrediction();
+ prediction = code->GetCode().GetBuiltinCatchPrediction();
if (prediction == HandlerTable::UNCAUGHT) continue;
return prediction;
}
@@ -1981,11 +1980,10 @@ void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
DCHECK(handler->rethrow_);
DCHECK(handler->capture_message_);
Object message(reinterpret_cast<Address>(handler->message_obj_));
- DCHECK(message->IsJSMessageObject() || message->IsTheHole(this));
+ DCHECK(message.IsJSMessageObject() || message.IsTheHole(this));
thread_local_top()->pending_message_obj_ = message;
}
-
void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
DCHECK(has_scheduled_exception());
if (reinterpret_cast<void*>(scheduled_exception().ptr()) ==
@@ -2060,19 +2058,24 @@ bool Isolate::ComputeLocation(MessageLocation* target) {
wasm::WasmCodeRefScope code_ref_scope;
frame->Summarize(&frames);
FrameSummary& summary = frames.back();
- summary.EnsureSourcePositionsAvailable();
- int pos = summary.SourcePosition();
Handle<SharedFunctionInfo> shared;
Handle<Object> script = summary.script();
if (!script->IsScript() ||
- (Script::cast(*script)->source()->IsUndefined(this))) {
+ (Script::cast(*script).source().IsUndefined(this))) {
return false;
}
if (summary.IsJavaScript()) {
shared = handle(summary.AsJavaScript().function()->shared(), this);
}
- *target = MessageLocation(Handle<Script>::cast(script), pos, pos + 1, shared);
+ if (summary.AreSourcePositionsAvailable()) {
+ int pos = summary.SourcePosition();
+ *target =
+ MessageLocation(Handle<Script>::cast(script), pos, pos + 1, shared);
+ } else {
+ *target = MessageLocation(Handle<Script>::cast(script), shared,
+ summary.code_offset());
+ }
return true;
}
@@ -2102,7 +2105,6 @@ bool Isolate::ComputeLocationFromException(MessageLocation* target,
return true;
}
-
bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
Handle<Object> exception) {
if (!exception->IsJSObject()) return false;
@@ -2118,15 +2120,15 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
Handle<WasmInstanceObject> instance(elements->WasmInstance(i), this);
uint32_t func_index =
- static_cast<uint32_t>(elements->WasmFunctionIndex(i)->value());
- int code_offset = elements->Offset(i)->value();
+ static_cast<uint32_t>(elements->WasmFunctionIndex(i).value());
+ int code_offset = elements->Offset(i).value();
bool is_at_number_conversion =
elements->IsAsmJsWasmFrame(i) &&
- elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
+ elements->Flags(i).value() & FrameArray::kAsmJsAtNumberConversion;
// WasmCode* held alive by the {GlobalWasmCodeRef}.
wasm::WasmCode* code =
Managed<wasm::GlobalWasmCodeRef>::cast(elements->WasmCodeObject(i))
- ->get()
+ .get()
->code();
int byte_offset =
FrameSummary::WasmCompiledFrameSummary::GetWasmSourcePosition(
@@ -2134,33 +2136,37 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
int pos = WasmModuleObject::GetSourcePosition(
handle(instance->module_object(), this), func_index, byte_offset,
is_at_number_conversion);
- Handle<Script> script(instance->module_object()->script(), this);
+ Handle<Script> script(instance->module_object().script(), this);
*target = MessageLocation(script, pos, pos + 1);
return true;
}
Handle<JSFunction> fun = handle(elements->Function(i), this);
- if (!fun->shared()->IsSubjectToDebugging()) continue;
+ if (!fun->shared().IsSubjectToDebugging()) continue;
- Object script = fun->shared()->script();
- if (script->IsScript() &&
- !(Script::cast(script)->source()->IsUndefined(this))) {
+ Object script = fun->shared().script();
+ if (script.IsScript() &&
+ !(Script::cast(script).source().IsUndefined(this))) {
Handle<SharedFunctionInfo> shared = handle(fun->shared(), this);
- SharedFunctionInfo::EnsureSourcePositionsAvailable(this, shared);
- AbstractCode abstract_code = elements->Code(i);
- const int code_offset = elements->Offset(i)->value();
- const int pos = abstract_code->SourcePosition(code_offset);
+ AbstractCode abstract_code = elements->Code(i);
+ const int code_offset = elements->Offset(i).value();
Handle<Script> casted_script(Script::cast(script), this);
- *target = MessageLocation(casted_script, pos, pos + 1);
+ if (shared->HasBytecodeArray() &&
+ shared->GetBytecodeArray().HasSourcePositionTable()) {
+ int pos = abstract_code.SourcePosition(code_offset);
+ *target = MessageLocation(casted_script, pos, pos + 1, shared);
+ } else {
+ *target = MessageLocation(casted_script, shared, code_offset);
+ }
+
return true;
}
}
return false;
}
-
Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
MessageLocation* location) {
Handle<FixedArray> stack_trace_object;
@@ -2265,12 +2271,17 @@ void Isolate::ReportPendingMessagesImpl(bool report_externally) {
}
// Actually report the pending message to all message handlers.
- if (!message_obj->IsTheHole(this) && should_report_exception) {
+ if (!message_obj.IsTheHole(this) && should_report_exception) {
HandleScope scope(this);
Handle<JSMessageObject> message(JSMessageObject::cast(message_obj), this);
Handle<Script> script(message->script(), this);
- int start_pos = message->start_position();
- int end_pos = message->end_position();
+ // Clear the exception and restore it afterwards, otherwise
+ // CollectSourcePositions will abort.
+ clear_pending_exception();
+ JSMessageObject::EnsureSourcePositionsAvailable(this, message);
+ set_pending_exception(exception);
+ int start_pos = message->GetStartPosition();
+ int end_pos = message->GetEndPosition();
MessageLocation location(script, start_pos, end_pos);
MessageHandler::ReportMessage(this, &location, message);
}
@@ -2336,13 +2347,13 @@ void Isolate::ReportPendingMessagesFromJavaScript() {
thread_local_top()->external_caught_exception_ = true;
v8::TryCatch* handler = try_catch_handler();
- DCHECK(thread_local_top()->pending_message_obj_->IsJSMessageObject() ||
- thread_local_top()->pending_message_obj_->IsTheHole(this));
+ DCHECK(thread_local_top()->pending_message_obj_.IsJSMessageObject() ||
+ thread_local_top()->pending_message_obj_.IsTheHole(this));
handler->can_continue_ = true;
handler->has_terminated_ = false;
handler->exception_ = reinterpret_cast<void*>(pending_exception().ptr());
// Propagate to the external try-catch only if we got an actual message.
- if (thread_local_top()->pending_message_obj_->IsTheHole(this)) return true;
+ if (thread_local_top()->pending_message_obj_.IsTheHole(this)) return true;
handler->message_obj_ =
reinterpret_cast<void*>(thread_local_top()->pending_message_obj_.ptr());
@@ -2401,7 +2412,6 @@ void Isolate::PushPromise(Handle<JSObject> promise) {
tltop->promise_on_stack_ = new PromiseOnStack(global_promise, prev);
}
-
void Isolate::PopPromise() {
ThreadLocalTop* tltop = thread_local_top();
if (tltop->promise_on_stack_ == nullptr) return;
@@ -2471,7 +2481,7 @@ bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
: handle(Handle<PromiseCapability>::cast(promise_or_capability)
->promise(),
isolate));
- if (reaction->reject_handler()->IsUndefined(isolate)) {
+ if (reaction->reject_handler().IsUndefined(isolate)) {
if (InternalPromiseHasUserDefinedRejectHandler(isolate, promise)) {
return true;
}
@@ -2516,11 +2526,11 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
catch_prediction = PredictException(JavaScriptFrame::cast(frame));
} else if (frame->type() == StackFrame::STUB) {
Code code = frame->LookupCode();
- if (!code->IsCode() || code->kind() != Code::BUILTIN ||
- !code->has_handler_table() || !code->is_turbofanned()) {
+ if (!code.IsCode() || code.kind() != Code::BUILTIN ||
+ !code.has_handler_table() || !code.is_turbofanned()) {
continue;
}
- catch_prediction = code->GetBuiltinCatchPrediction();
+ catch_prediction = code.GetBuiltinCatchPrediction();
} else {
continue;
}
@@ -2564,17 +2574,13 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
return retval;
}
-
void Isolate::SetCaptureStackTraceForUncaughtExceptions(
- bool capture,
- int frame_limit,
- StackTrace::StackTraceOptions options) {
+ bool capture, int frame_limit, StackTrace::StackTraceOptions options) {
capture_stack_trace_for_uncaught_exceptions_ = capture;
stack_trace_for_uncaught_exceptions_frame_limit_ = frame_limit;
stack_trace_for_uncaught_exceptions_options_ = options;
}
-
void Isolate::SetAbortOnUncaughtExceptionCallback(
v8::Isolate::AbortOnUncaughtExceptionCallback callback) {
abort_on_uncaught_exception_callback_ = callback;
@@ -2602,7 +2608,7 @@ Handle<Context> Isolate::GetIncumbentContext() {
if (!it.done() &&
(!top_backup_incumbent || it.frame()->sp() < top_backup_incumbent)) {
Context context = Context::cast(it.frame()->context());
- return Handle<Context>(context->native_context(), this);
+ return Handle<Context>(context.native_context(), this);
}
// 2nd candidate: the last Context::Scope's incumbent context if any.
@@ -2630,7 +2636,6 @@ char* Isolate::ArchiveThread(char* to) {
return to + sizeof(ThreadLocalTop);
}
-
char* Isolate::RestoreThread(char* from) {
MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
sizeof(ThreadLocalTop));
@@ -2639,7 +2644,7 @@ char* Isolate::RestoreThread(char* from) {
#ifdef USE_SIMULATOR
thread_local_top()->simulator_ = Simulator::current(this);
#endif
- DCHECK(context().is_null() || context()->IsContext());
+ DCHECK(context().is_null() || context().IsContext());
return from + sizeof(ThreadLocalTop);
}
@@ -2701,13 +2706,11 @@ Isolate::PerIsolateThreadData* Isolate::ThreadDataTable::Lookup(
return t->second;
}
-
void Isolate::ThreadDataTable::Insert(Isolate::PerIsolateThreadData* data) {
bool inserted = table_.insert(std::make_pair(data->thread_id_, data)).second;
CHECK(inserted);
}
-
void Isolate::ThreadDataTable::Remove(PerIsolateThreadData* data) {
table_.erase(data->thread_id_);
delete data;
@@ -2766,8 +2769,7 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"time\": %f, "
"\"ptr\": \"%p\", "
"\"name\": \"%s\", "
- "\"size\": %" PRIuS
- ","
+ "\"size\": %zu,"
"\"nesting\": %zu}\n",
type, reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(),
@@ -2784,7 +2786,7 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"type\": \"zone\", "
"\"isolate\": \"%p\", "
"\"time\": %f, "
- "\"allocated\": %" PRIuS "}\n",
+ "\"allocated\": %zu}\n",
reinterpret_cast<void*>(heap_->isolate()), time, malloced);
}
@@ -2875,12 +2877,12 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
handle_scope_data_.Initialize();
-#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
+#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
name##_ = (initial_value);
ISOLATE_INIT_LIST(ISOLATE_INIT_EXECUTE)
#undef ISOLATE_INIT_EXECUTE
-#define ISOLATE_INIT_ARRAY_EXECUTE(type, name, length) \
+#define ISOLATE_INIT_ARRAY_EXECUTE(type, name, length) \
memset(name##_, 0, sizeof(type) * length);
ISOLATE_INIT_ARRAY_LIST(ISOLATE_INIT_ARRAY_EXECUTE)
#undef ISOLATE_INIT_ARRAY_EXECUTE
@@ -3030,14 +3032,12 @@ void Isolate::Deinit() {
}
}
-
void Isolate::SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data) {
base::Thread::SetThreadLocal(isolate_key_, isolate);
base::Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
}
-
Isolate::~Isolate() {
TRACE_ISOLATE(destructor);
@@ -3047,9 +3047,6 @@ Isolate::~Isolate() {
delete entry_stack_;
entry_stack_ = nullptr;
- delete unicode_cache_;
- unicode_cache_ = nullptr;
-
delete date_cache_;
date_cache_ = nullptr;
@@ -3144,13 +3141,13 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
SetTerminationOnExternalTryCatch();
} else {
v8::TryCatch* handler = try_catch_handler();
- DCHECK(thread_local_top()->pending_message_obj_->IsJSMessageObject() ||
- thread_local_top()->pending_message_obj_->IsTheHole(this));
+ DCHECK(thread_local_top()->pending_message_obj_.IsJSMessageObject() ||
+ thread_local_top()->pending_message_obj_.IsTheHole(this));
handler->can_continue_ = true;
handler->has_terminated_ = false;
handler->exception_ = reinterpret_cast<void*>(pending_exception().ptr());
// Propagate to the external try-catch only if we got an actual message.
- if (thread_local_top()->pending_message_obj_->IsTheHole(this)) return true;
+ if (thread_local_top()->pending_message_obj_.IsTheHole(this)) return true;
handler->message_obj_ =
reinterpret_cast<void*>(thread_local_top()->pending_message_obj_.ptr());
@@ -3192,11 +3189,6 @@ void CreateOffHeapTrampolines(Isolate* isolate) {
// From this point onwards, the old builtin code object is unreachable and
// will be collected by the next GC.
builtins->set_builtin(i, *trampoline);
-
- if (isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling()) {
- isolate->logger()->LogCodeObject(*trampoline);
- }
}
}
@@ -3321,7 +3313,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
compilation_cache_ = new CompilationCache(this);
descriptor_lookup_cache_ = new DescriptorLookupCache();
- unicode_cache_ = new UnicodeCache();
inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
global_handles_ = new GlobalHandles(this);
eternal_handles_ = new EternalHandles();
@@ -3342,7 +3333,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
// Enable logging before setting up the heap
logger_->SetUp(this);
- { // NOLINT
+ { // NOLINT
// Ensure that the thread has a valid stack guard. The v8::Locker object
// will ensure this too, but we don't have to use lockers if we are only
// using one thread.
@@ -3370,6 +3361,8 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
setup_delegate_ = new SetupIsolateDelegate(create_heap_objects);
}
+ if (!FLAG_inline_new) heap_.DisableInlineAllocation();
+
if (!setup_delegate_->SetupHeap(&heap_)) {
V8::FatalProcessOutOfMemory(this, "heap object creation");
return false;
@@ -3437,21 +3430,29 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
AlwaysAllocateScope always_allocate(this);
CodeSpaceMemoryModificationScope modification_scope(&heap_);
- if (!create_heap_objects) {
- startup_deserializer->DeserializeInto(this);
+ if (create_heap_objects) {
+ heap_.read_only_space()->ClearStringPaddingIfNeeded();
+ heap_.read_only_heap()->OnCreateHeapObjectsComplete(this);
} else {
- heap_.read_only_heap()->OnCreateHeapObjectsComplete();
+ startup_deserializer->DeserializeInto(this);
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
interpreter_->Initialize();
heap_.NotifyDeserializationComplete();
}
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ heap_.VerifyReadOnlyHeap();
+ }
+#endif
+
delete setup_delegate_;
setup_delegate_ = nullptr;
- // Initialize the builtin entry table.
Builtins::UpdateBuiltinEntryTable(this);
+ Builtins::EmitCodeCreateEvents(this);
#ifdef DEBUG
// Verify that the current heap state (usually deserialized from the snapshot)
@@ -3471,7 +3472,7 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
#ifndef V8_TARGET_ARCH_ARM
// The IET for profiling should always be a full on-heap Code object.
DCHECK(!Code::cast(heap_.interpreter_entry_trampoline_for_profiling())
- ->is_off_heap_trampoline());
+ .is_off_heap_trampoline());
#endif // V8_TARGET_ARCH_ARM
if (FLAG_print_builtin_code) builtins()->PrintBuiltinCode();
@@ -3502,8 +3503,6 @@ bool Isolate::Init(ReadOnlyDeserializer* read_only_deserializer,
initialized_from_snapshot_ = !create_heap_objects;
- if (!FLAG_inline_new) heap_.DisableInlineAllocation();
-
if (FLAG_stress_sampling_allocation_profiler > 0) {
uint64_t sample_interval = FLAG_stress_sampling_allocation_profiler;
int stack_depth = 128;
@@ -3553,9 +3552,8 @@ void Isolate::Enter() {
DCHECK_NOT_NULL(data);
DCHECK(data->isolate_ == this);
- EntryStackItem* item = new EntryStackItem(current_data,
- current_isolate,
- entry_stack_);
+ EntryStackItem* item =
+ new EntryStackItem(current_data, current_isolate, entry_stack_);
entry_stack_ = item;
SetIsolateThreadLocals(this, data);
@@ -3564,7 +3562,6 @@ void Isolate::Enter() {
set_thread_id(data->thread_id());
}
-
void Isolate::Exit() {
DCHECK_NOT_NULL(entry_stack_);
DCHECK(entry_stack_->previous_thread_data == nullptr ||
@@ -3589,7 +3586,6 @@ void Isolate::Exit() {
SetIsolateThreadLocals(previous_isolate, previous_thread_data);
}
-
void Isolate::LinkDeferredHandles(DeferredHandles* deferred) {
deferred->next_ = deferred_handles_head_;
if (deferred_handles_head_ != nullptr) {
@@ -3598,7 +3594,6 @@ void Isolate::LinkDeferredHandles(DeferredHandles* deferred) {
deferred_handles_head_ = deferred;
}
-
void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
#ifdef DEBUG
// In debug mode assert that the linked list is well-formed.
@@ -3661,7 +3656,6 @@ CompilationStatistics* Isolate::GetTurboStatistics() {
return turbo_statistics();
}
-
CodeTracer* Isolate::GetCodeTracer() {
if (code_tracer() == nullptr) set_code_tracer(new CodeTracer(id()));
return code_tracer();
@@ -3669,7 +3663,7 @@ CodeTracer* Isolate::GetCodeTracer() {
bool Isolate::use_optimizer() {
return FLAG_opt && !serializer_enabled_ && CpuFeatures::SupportsOptimizer() &&
- !is_precise_count_code_coverage() && !is_block_count_code_coverage();
+ !is_precise_count_code_coverage();
}
bool Isolate::NeedsDetailedOptimizedCodeLineInfo() const {
@@ -3684,14 +3678,14 @@ bool Isolate::NeedsSourcePositionsForProfiling() const {
}
void Isolate::SetFeedbackVectorsForProfilingTools(Object value) {
- DCHECK(value->IsUndefined(this) || value->IsArrayList());
+ DCHECK(value.IsUndefined(this) || value.IsArrayList());
heap()->set_feedback_vectors_for_profiling_tools(value);
}
void Isolate::MaybeInitializeVectorListFromHeap() {
- if (!heap()->feedback_vectors_for_profiling_tools()->IsUndefined(this)) {
+ if (!heap()->feedback_vectors_for_profiling_tools().IsUndefined(this)) {
// Already initialized, return early.
- DCHECK(heap()->feedback_vectors_for_profiling_tools()->IsArrayList());
+ DCHECK(heap()->feedback_vectors_for_profiling_tools().IsArrayList());
return;
}
@@ -3702,13 +3696,13 @@ void Isolate::MaybeInitializeVectorListFromHeap() {
HeapIterator heap_iterator(heap());
for (HeapObject current_obj = heap_iterator.next(); !current_obj.is_null();
current_obj = heap_iterator.next()) {
- if (!current_obj->IsFeedbackVector()) continue;
+ if (!current_obj.IsFeedbackVector()) continue;
FeedbackVector vector = FeedbackVector::cast(current_obj);
- SharedFunctionInfo shared = vector->shared_function_info();
+ SharedFunctionInfo shared = vector.shared_function_info();
// No need to preserve the feedback vector for non-user-visible functions.
- if (!shared->IsSubjectToDebugging()) continue;
+ if (!shared.IsSubjectToDebugging()) continue;
vectors.emplace_back(vector, this);
}
@@ -3730,14 +3724,14 @@ void Isolate::set_date_cache(DateCache* date_cache) {
bool Isolate::IsArrayOrObjectOrStringPrototype(Object object) {
Object context = heap()->native_contexts_list();
- while (!context->IsUndefined(this)) {
+ while (!context.IsUndefined(this)) {
Context current_context = Context::cast(context);
- if (current_context->initial_object_prototype() == object ||
- current_context->initial_array_prototype() == object ||
- current_context->initial_string_prototype() == object) {
+ if (current_context.initial_object_prototype() == object ||
+ current_context.initial_array_prototype() == object ||
+ current_context.initial_string_prototype() == object) {
return true;
}
- context = current_context->next_context_link();
+ context = current_context.next_context_link();
}
return false;
}
@@ -3745,12 +3739,12 @@ bool Isolate::IsArrayOrObjectOrStringPrototype(Object object) {
bool Isolate::IsInAnyContext(Object object, uint32_t index) {
DisallowHeapAllocation no_gc;
Object context = heap()->native_contexts_list();
- while (!context->IsUndefined(this)) {
+ while (!context.IsUndefined(this)) {
Context current_context = Context::cast(context);
- if (current_context->get(index) == object) {
+ if (current_context.get(index) == object) {
return true;
}
- context = current_context->next_context_link();
+ context = current_context.next_context_link();
}
return false;
}
@@ -3758,20 +3752,20 @@ bool Isolate::IsInAnyContext(Object object, uint32_t index) {
bool Isolate::IsNoElementsProtectorIntact(Context context) {
PropertyCell no_elements_cell = heap()->no_elements_protector();
bool cell_reports_intact =
- no_elements_cell->value()->IsSmi() &&
- Smi::ToInt(no_elements_cell->value()) == kProtectorValid;
+ no_elements_cell.value().IsSmi() &&
+ Smi::ToInt(no_elements_cell.value()) == kProtectorValid;
#ifdef DEBUG
- Context native_context = context->native_context();
+ Context native_context = context.native_context();
Map root_array_map =
- native_context->GetInitialJSArrayMap(GetInitialFastElementsKind());
+ native_context.GetInitialJSArrayMap(GetInitialFastElementsKind());
JSObject initial_array_proto = JSObject::cast(
- native_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ native_context.get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
JSObject initial_object_proto = JSObject::cast(
- native_context->get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX));
+ native_context.get(Context::INITIAL_OBJECT_PROTOTYPE_INDEX));
JSObject initial_string_proto = JSObject::cast(
- native_context->get(Context::INITIAL_STRING_PROTOTYPE_INDEX));
+ native_context.get(Context::INITIAL_STRING_PROTOTYPE_INDEX));
if (root_array_map.is_null() || initial_array_proto == initial_object_proto) {
// We are in the bootstrapping process, and the entire check sequence
@@ -3780,12 +3774,12 @@ bool Isolate::IsNoElementsProtectorIntact(Context context) {
}
// Check that the array prototype hasn't been altered WRT empty elements.
- if (root_array_map->prototype() != initial_array_proto) {
+ if (root_array_map.prototype() != initial_array_proto) {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
- FixedArrayBase elements = initial_array_proto->elements();
+ FixedArrayBase elements = initial_array_proto.elements();
ReadOnlyRoots roots(heap());
if (elements != roots.empty_fixed_array() &&
elements != roots.empty_slow_element_dictionary()) {
@@ -3794,7 +3788,7 @@ bool Isolate::IsNoElementsProtectorIntact(Context context) {
}
// Check that the Object.prototype hasn't been altered WRT empty elements.
- elements = initial_object_proto->elements();
+ elements = initial_object_proto.elements();
if (elements != roots.empty_fixed_array() &&
elements != roots.empty_slow_element_dictionary()) {
DCHECK_EQ(false, cell_reports_intact);
@@ -3818,7 +3812,7 @@ bool Isolate::IsNoElementsProtectorIntact(Context context) {
DCHECK(!has_pending_exception());
// Check that the String.prototype hasn't been altered WRT empty elements.
- elements = initial_string_proto->elements();
+ elements = initial_string_proto.elements();
if (elements != roots.empty_fixed_array() &&
elements != roots.empty_slow_element_dictionary()) {
DCHECK_EQ(false, cell_reports_intact);
@@ -3827,7 +3821,7 @@ bool Isolate::IsNoElementsProtectorIntact(Context context) {
// Check that the String.prototype has the Object.prototype
// as its [[Prototype]] still.
- if (initial_string_proto->map()->prototype() != initial_object_proto) {
+ if (initial_string_proto.map().prototype() != initial_object_proto) {
DCHECK_EQ(false, cell_reports_intact);
return cell_reports_intact;
}
@@ -3843,10 +3837,10 @@ bool Isolate::IsNoElementsProtectorIntact() {
bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
Cell is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
bool is_is_concat_spreadable_set =
- Smi::ToInt(is_concat_spreadable_cell->value()) == kProtectorInvalid;
+ Smi::ToInt(is_concat_spreadable_cell.value()) == kProtectorInvalid;
#ifdef DEBUG
Map root_array_map =
- raw_native_context()->GetInitialJSArrayMap(GetInitialFastElementsKind());
+ raw_native_context().GetInitialJSArrayMap(GetInitialFastElementsKind());
if (root_array_map.is_null()) {
// Ignore the value of is_concat_spreadable during bootstrap.
return !is_is_concat_spreadable_set;
@@ -3869,13 +3863,13 @@ bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
bool Isolate::IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver) {
if (!IsIsConcatSpreadableLookupChainIntact()) return false;
- return !receiver->HasProxyInPrototype(this);
+ return !receiver.HasProxyInPrototype(this);
}
bool Isolate::IsPromiseHookProtectorIntact() {
PropertyCell promise_hook_cell = heap()->promise_hook_protector();
bool is_promise_hook_protector_intact =
- Smi::ToInt(promise_hook_cell->value()) == kProtectorValid;
+ Smi::ToInt(promise_hook_cell.value()) == kProtectorValid;
DCHECK_IMPLIES(is_promise_hook_protector_intact,
!promise_hook_or_async_event_delegate_);
DCHECK_IMPLIES(is_promise_hook_protector_intact,
@@ -3886,21 +3880,21 @@ bool Isolate::IsPromiseHookProtectorIntact() {
bool Isolate::IsPromiseResolveLookupChainIntact() {
Cell promise_resolve_cell = heap()->promise_resolve_protector();
bool is_promise_resolve_protector_intact =
- Smi::ToInt(promise_resolve_cell->value()) == kProtectorValid;
+ Smi::ToInt(promise_resolve_cell.value()) == kProtectorValid;
return is_promise_resolve_protector_intact;
}
bool Isolate::IsPromiseThenLookupChainIntact() {
PropertyCell promise_then_cell = heap()->promise_then_protector();
bool is_promise_then_protector_intact =
- Smi::ToInt(promise_then_cell->value()) == kProtectorValid;
+ Smi::ToInt(promise_then_cell.value()) == kProtectorValid;
return is_promise_then_protector_intact;
}
bool Isolate::IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver) {
DisallowHeapAllocation no_gc;
if (!receiver->IsJSPromise()) return false;
- if (!IsInAnyContext(receiver->map()->prototype(),
+ if (!IsInAnyContext(receiver->map().prototype(),
Context::PROMISE_PROTOTYPE_INDEX)) {
return false;
}
@@ -3909,7 +3903,7 @@ bool Isolate::IsPromiseThenLookupChainIntact(Handle<JSReceiver> receiver) {
void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
- if (!object->map()->is_prototype_map()) return;
+ if (!object->map().is_prototype_map()) return;
if (!IsNoElementsProtectorIntact()) return;
if (!IsArrayOrObjectOrStringPrototype(*object)) return;
PropertyCell::SetValueWithInvalidation(
@@ -3918,7 +3912,7 @@ void Isolate::UpdateNoElementsProtectorOnSetElement(Handle<JSObject> object) {
}
void Isolate::InvalidateIsConcatSpreadableProtector() {
- DCHECK(factory()->is_concat_spreadable_protector()->value()->IsSmi());
+ DCHECK(factory()->is_concat_spreadable_protector()->value().IsSmi());
DCHECK(IsIsConcatSpreadableLookupChainIntact());
factory()->is_concat_spreadable_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
@@ -3926,7 +3920,7 @@ void Isolate::InvalidateIsConcatSpreadableProtector() {
}
void Isolate::InvalidateArrayConstructorProtector() {
- DCHECK(factory()->array_constructor_protector()->value()->IsSmi());
+ DCHECK(factory()->array_constructor_protector()->value().IsSmi());
DCHECK(IsArrayConstructorIntact());
factory()->array_constructor_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
@@ -3934,7 +3928,7 @@ void Isolate::InvalidateArrayConstructorProtector() {
}
void Isolate::InvalidateArraySpeciesProtector() {
- DCHECK(factory()->array_species_protector()->value()->IsSmi());
+ DCHECK(factory()->array_species_protector()->value().IsSmi());
DCHECK(IsArraySpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->array_species_protector(),
@@ -3943,7 +3937,7 @@ void Isolate::InvalidateArraySpeciesProtector() {
}
void Isolate::InvalidateTypedArraySpeciesProtector() {
- DCHECK(factory()->typed_array_species_protector()->value()->IsSmi());
+ DCHECK(factory()->typed_array_species_protector()->value().IsSmi());
DCHECK(IsTypedArraySpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->typed_array_species_protector(),
@@ -3952,7 +3946,7 @@ void Isolate::InvalidateTypedArraySpeciesProtector() {
}
void Isolate::InvalidateRegExpSpeciesProtector() {
- DCHECK(factory()->regexp_species_protector()->value()->IsSmi());
+ DCHECK(factory()->regexp_species_protector()->value().IsSmi());
DCHECK(IsRegExpSpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->regexp_species_protector(),
@@ -3961,7 +3955,7 @@ void Isolate::InvalidateRegExpSpeciesProtector() {
}
void Isolate::InvalidatePromiseSpeciesProtector() {
- DCHECK(factory()->promise_species_protector()->value()->IsSmi());
+ DCHECK(factory()->promise_species_protector()->value().IsSmi());
DCHECK(IsPromiseSpeciesLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->promise_species_protector(),
@@ -3970,7 +3964,7 @@ void Isolate::InvalidatePromiseSpeciesProtector() {
}
void Isolate::InvalidateStringLengthOverflowProtector() {
- DCHECK(factory()->string_length_protector()->value()->IsSmi());
+ DCHECK(factory()->string_length_protector()->value().IsSmi());
DCHECK(IsStringLengthOverflowIntact());
factory()->string_length_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
@@ -3978,7 +3972,7 @@ void Isolate::InvalidateStringLengthOverflowProtector() {
}
void Isolate::InvalidateArrayIteratorProtector() {
- DCHECK(factory()->array_iterator_protector()->value()->IsSmi());
+ DCHECK(factory()->array_iterator_protector()->value().IsSmi());
DCHECK(IsArrayIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->array_iterator_protector(),
@@ -3987,7 +3981,7 @@ void Isolate::InvalidateArrayIteratorProtector() {
}
void Isolate::InvalidateMapIteratorProtector() {
- DCHECK(factory()->map_iterator_protector()->value()->IsSmi());
+ DCHECK(factory()->map_iterator_protector()->value().IsSmi());
DCHECK(IsMapIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->map_iterator_protector(),
@@ -3996,7 +3990,7 @@ void Isolate::InvalidateMapIteratorProtector() {
}
void Isolate::InvalidateSetIteratorProtector() {
- DCHECK(factory()->set_iterator_protector()->value()->IsSmi());
+ DCHECK(factory()->set_iterator_protector()->value().IsSmi());
DCHECK(IsSetIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->set_iterator_protector(),
@@ -4005,7 +3999,7 @@ void Isolate::InvalidateSetIteratorProtector() {
}
void Isolate::InvalidateStringIteratorProtector() {
- DCHECK(factory()->string_iterator_protector()->value()->IsSmi());
+ DCHECK(factory()->string_iterator_protector()->value().IsSmi());
DCHECK(IsStringIteratorLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->string_iterator_protector(),
@@ -4014,7 +4008,7 @@ void Isolate::InvalidateStringIteratorProtector() {
}
void Isolate::InvalidateArrayBufferDetachingProtector() {
- DCHECK(factory()->array_buffer_detaching_protector()->value()->IsSmi());
+ DCHECK(factory()->array_buffer_detaching_protector()->value().IsSmi());
DCHECK(IsArrayBufferDetachingIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->array_buffer_detaching_protector(),
@@ -4023,7 +4017,7 @@ void Isolate::InvalidateArrayBufferDetachingProtector() {
}
void Isolate::InvalidatePromiseHookProtector() {
- DCHECK(factory()->promise_hook_protector()->value()->IsSmi());
+ DCHECK(factory()->promise_hook_protector()->value().IsSmi());
DCHECK(IsPromiseHookProtectorIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->promise_hook_protector(),
@@ -4032,7 +4026,7 @@ void Isolate::InvalidatePromiseHookProtector() {
}
void Isolate::InvalidatePromiseResolveProtector() {
- DCHECK(factory()->promise_resolve_protector()->value()->IsSmi());
+ DCHECK(factory()->promise_resolve_protector()->value().IsSmi());
DCHECK(IsPromiseResolveLookupChainIntact());
factory()->promise_resolve_protector()->set_value(
Smi::FromInt(kProtectorInvalid));
@@ -4040,7 +4034,7 @@ void Isolate::InvalidatePromiseResolveProtector() {
}
void Isolate::InvalidatePromiseThenProtector() {
- DCHECK(factory()->promise_then_protector()->value()->IsSmi());
+ DCHECK(factory()->promise_then_protector()->value().IsSmi());
DCHECK(IsPromiseThenLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
this, factory()->promise_then_protector(),
@@ -4097,10 +4091,9 @@ Code Isolate::FindCodeObject(Address a) {
return heap()->GcSafeFindCodeForInnerPointer(a);
}
-
#ifdef DEBUG
-#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
-const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
+#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
+ const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
@@ -4464,7 +4457,6 @@ void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
use_counter_callback_ = callback;
}
-
void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
// The counter callback may cause the embedder to call into V8, which is not
// generally possible during GC.
@@ -4507,7 +4499,6 @@ void Isolate::AddDetachedContext(Handle<Context> context) {
heap()->set_detached_contexts(*detached_contexts);
}
-
void Isolate::CheckDetachedContextsAfterGC() {
HandleScope scope(this);
Handle<WeakArrayList> detached_contexts = factory()->detached_contexts();
@@ -4598,12 +4589,12 @@ void Isolate::SetIdle(bool is_idle) {
}
#ifdef V8_INTL_SUPPORT
-icu::UObject* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type) {
+icu::UMemory* Isolate::get_cached_icu_object(ICUObjectCacheType cache_type) {
return icu_object_cache_[cache_type].get();
}
void Isolate::set_icu_object_in_cache(ICUObjectCacheType cache_type,
- std::shared_ptr<icu::UObject> obj) {
+ std::shared_ptr<icu::UMemory> obj) {
icu_object_cache_[cache_type] = obj;
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/execution/isolate.h
index 2b1612a957..4b4bf9cd7c 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ISOLATE_H_
-#define V8_ISOLATE_H_
+#ifndef V8_EXECUTION_ISOLATE_H_
+#define V8_EXECUTION_ISOLATE_H_
#include <cstddef>
#include <functional>
@@ -15,29 +15,30 @@
#include "include/v8-inspector.h"
#include "include/v8-internal.h"
#include "include/v8.h"
-#include "src/allocation.h"
#include "src/base/macros.h"
#include "src/builtins/builtins.h"
-#include "src/contexts.h"
+#include "src/common/globals.h"
#include "src/debug/interface-types.h"
-#include "src/execution.h"
-#include "src/futex-emulation.h"
-#include "src/globals.h"
-#include "src/handles.h"
+#include "src/execution/execution.h"
+#include "src/execution/futex-emulation.h"
+#include "src/execution/isolate-data.h"
+#include "src/execution/messages.h"
+#include "src/handles/handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
-#include "src/isolate-allocator.h"
-#include "src/isolate-data.h"
-#include "src/messages.h"
+#include "src/heap/read-only-heap.h"
+#include "src/init/isolate-allocator.h"
#include "src/objects/code.h"
+#include "src/objects/contexts.h"
#include "src/objects/debug-objects.h"
#include "src/runtime/runtime.h"
-#include "src/unicode.h"
+#include "src/strings/unicode.h"
+#include "src/utils/allocation.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uversion.h" // Define U_ICU_NAMESPACE.
namespace U_ICU_NAMESPACE {
-class UObject;
+class UMemory;
} // namespace U_ICU_NAMESPACE
#endif // V8_INTL_SUPPORT
@@ -50,7 +51,7 @@ class RandomNumberGenerator;
namespace debug {
class ConsoleDelegate;
class AsyncEventDelegate;
-}
+} // namespace debug
namespace internal {
@@ -100,7 +101,8 @@ class TracingCpuProfilerImpl;
class UnicodeCache;
struct ManagedPtrDestructor;
-template <StateTag Tag> class VMState;
+template <StateTag Tag>
+class VMState;
namespace interpreter {
class Interpreter;
@@ -192,12 +194,12 @@ class BuiltinUnwindInfo;
return *__result__; \
} while (false)
-#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
- do { \
- if (!(call).ToHandle(&dst)) { \
- DCHECK((isolate)->has_pending_exception()); \
- return value; \
- } \
+#define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value) \
+ do { \
+ if (!(call).ToHandle(&dst)) { \
+ DCHECK((isolate)->has_pending_exception()); \
+ return value; \
+ } \
} while (false)
#define ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
@@ -207,7 +209,7 @@ class BuiltinUnwindInfo;
ReadOnlyRoots(__isolate__).exception()); \
} while (false)
-#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
+#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
#define THROW_NEW_ERROR(isolate, call, T) \
@@ -259,12 +261,12 @@ class BuiltinUnwindInfo;
* If inside a function with return type Object, use
* RETURN_FAILURE_ON_EXCEPTION instead.
*/
-#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
- do { \
- if ((call).is_null()) { \
- DCHECK((isolate)->has_pending_exception()); \
- return value; \
- } \
+#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
+ do { \
+ if ((call).is_null()) { \
+ DCHECK((isolate)->has_pending_exception()); \
+ return value; \
+ } \
} while (false)
/**
@@ -314,9 +316,34 @@ class BuiltinUnwindInfo;
* If inside a function with return type
* Maybe<X> or Handle<X>, use RETURN_ON_EXCEPTION_VALUE instead.
*/
-#define RETURN_ON_EXCEPTION(isolate, call, T) \
+#define RETURN_ON_EXCEPTION(isolate, call, T) \
RETURN_ON_EXCEPTION_VALUE(isolate, call, MaybeHandle<T>())
+#define RETURN_FAILURE(isolate, should_throw, call) \
+ do { \
+ if ((should_throw) == kDontThrow) { \
+ return Just(false); \
+ } else { \
+ isolate->Throw(*isolate->factory()->call); \
+ return Nothing<bool>(); \
+ } \
+ } while (false)
+
+#define MAYBE_RETURN(call, value) \
+ do { \
+ if ((call).IsNothing()) return value; \
+ } while (false)
+
+#define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
+
+#define MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
+ do { \
+ Isolate* __isolate__ = (isolate); \
+ if (!(call).To(&dst)) { \
+ DCHECK(__isolate__->has_pending_exception()); \
+ return ReadOnlyRoots(__isolate__).exception(); \
+ } \
+ } while (false)
#define FOR_WITH_HANDLE_SCOPE(isolate, loop_var_type, init, loop_var, \
limit_check, increment, body) \
@@ -333,8 +360,8 @@ class BuiltinUnwindInfo;
} \
} while (false)
-#define FIELD_ACCESSOR(type, name) \
- inline void set_##name(type v) { name##_ = v; } \
+#define FIELD_ACCESSOR(type, name) \
+ inline void set_##name(type v) { name##_ = v; } \
inline type name() const { return name##_; }
// Controls for manual embedded blob lifecycle management, used by tests and
@@ -393,6 +420,8 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(int, external_script_source_size, 0) \
/* true if being profiled. Causes collection of extra compile info. */ \
V(bool, is_profiling, false) \
+ /* Number of CPU profilers running on the isolate. */ \
+ V(size_t, num_cpu_profilers, 0) \
/* true if a trace is being formatted through Error.prepareStackTrace. */ \
V(bool, formatting_stack_trace, false) \
/* Perform side effect checks on function call and API callbacks. */ \
@@ -423,6 +452,7 @@ class Isolate final : private HiddenFactory {
// PerIsolateThreadData work on some older versions of gcc.
class ThreadDataTable;
class EntryStackItem;
+
public:
// A thread has a PerIsolateThreadData instance for each isolate that it has
// entered. That instance is allocated when the isolate is initially entered
@@ -433,12 +463,12 @@ class Isolate final : private HiddenFactory {
: isolate_(isolate),
thread_id_(thread_id),
stack_limit_(0),
- thread_state_(nullptr),
+ thread_state_(nullptr)
#if USE_SIMULATOR
- simulator_(nullptr),
+ ,
+ simulator_(nullptr)
#endif
- next_(nullptr),
- prev_(nullptr) {
+ {
}
~PerIsolateThreadData();
Isolate* isolate() const { return isolate_; }
@@ -465,9 +495,6 @@ class Isolate final : private HiddenFactory {
Simulator* simulator_;
#endif
- PerIsolateThreadData* next_;
- PerIsolateThreadData* prev_;
-
friend class Isolate;
friend class ThreadDataTable;
friend class EntryStackItem;
@@ -628,6 +655,11 @@ class Isolate final : private HiddenFactory {
inline Address* c_entry_fp_address() {
return &thread_local_top()->c_entry_fp_;
}
+ static uint32_t c_entry_fp_offset() {
+ return static_cast<uint32_t>(
+ OFFSET_OF(Isolate, thread_local_top()->c_entry_fp_) -
+ isolate_root_bias());
+ }
inline Address* handler_address() { return &thread_local_top()->handler_; }
inline Address* c_function_address() {
return &thread_local_top()->c_function_;
@@ -823,22 +855,22 @@ class Isolate final : private HiddenFactory {
static const int kBMMaxShift = 250; // See StringSearchBase.
// Accessors.
-#define GLOBAL_ACCESSOR(type, name, initialvalue) \
- inline type name() const { \
- DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
- return name##_; \
- } \
- inline void set_##name(type value) { \
- DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
- name##_ = value; \
+#define GLOBAL_ACCESSOR(type, name, initialvalue) \
+ inline type name() const { \
+ DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ return name##_; \
+ } \
+ inline void set_##name(type value) { \
+ DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ name##_ = value; \
}
ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
#undef GLOBAL_ACCESSOR
-#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
- inline type* name() { \
- DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
- return &(name##_)[0]; \
+#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
+ inline type* name() { \
+ DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
+ return &(name##_)[0]; \
}
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR
@@ -952,9 +984,7 @@ class Isolate final : private HiddenFactory {
return handle_scope_implementer_;
}
- UnicodeCache* unicode_cache() {
- return unicode_cache_;
- }
+ UnicodeCache* unicode_cache() { return unicode_cache_; }
InnerPointerToCodeCache* inner_pointer_to_code_cache() {
return inner_pointer_to_code_cache_;
@@ -976,7 +1006,7 @@ class Isolate final : private HiddenFactory {
}
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
- regexp_macro_assembler_canonicalize() {
+ regexp_macro_assembler_canonicalize() {
return &regexp_macro_assembler_canonicalize_;
}
#endif // !V8_INTL_SUPPORT
@@ -1071,6 +1101,14 @@ class Isolate final : private HiddenFactory {
return is_block_count_code_coverage() || is_block_binary_code_coverage();
}
+ bool is_binary_code_coverage() const {
+ return is_precise_binary_code_coverage() || is_block_binary_code_coverage();
+ }
+
+ bool is_count_code_coverage() const {
+ return is_precise_count_code_coverage() || is_block_count_code_coverage();
+ }
+
bool is_collecting_type_profile() const {
return type_profile_mode() == debug::TypeProfileMode::kCollect;
}
@@ -1088,9 +1126,7 @@ class Isolate final : private HiddenFactory {
return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
}
- DateCache* date_cache() {
- return date_cache_;
- }
+ DateCache* date_cache() { return date_cache_; }
V8_EXPORT_PRIVATE void set_date_cache(DateCache* date_cache);
@@ -1110,9 +1146,9 @@ class Isolate final : private HiddenFactory {
kDefaultCollator, kDefaultNumberFormat, kDefaultSimpleDateFormat,
kDefaultSimpleDateFormatForTime, kDefaultSimpleDateFormatForDate};
- icu::UObject* get_cached_icu_object(ICUObjectCacheType cache_type);
+ icu::UMemory* get_cached_icu_object(ICUObjectCacheType cache_type);
void set_icu_object_in_cache(ICUObjectCacheType cache_type,
- std::shared_ptr<icu::UObject> obj);
+ std::shared_ptr<icu::UMemory> obj);
void clear_cached_icu_object(ICUObjectCacheType cache_type);
#endif // V8_INTL_SUPPORT
@@ -1133,7 +1169,13 @@ class Isolate final : private HiddenFactory {
inline bool IsArraySpeciesLookupChainIntact();
inline bool IsTypedArraySpeciesLookupChainIntact();
inline bool IsRegExpSpeciesLookupChainIntact();
+
+ // Check that the @@species protector is intact, which guards the lookup of
+ // "constructor" on JSPromise instances, whose [[Prototype]] is the initial
+ // %PromisePrototype%, and the Symbol.species lookup on the
+ // %PromisePrototype%.
inline bool IsPromiseSpeciesLookupChainIntact();
+
bool IsIsConcatSpreadableLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact(JSReceiver receiver);
inline bool IsStringLengthOverflowIntact();
@@ -1179,7 +1221,7 @@ class Isolate final : private HiddenFactory {
inline bool IsArrayBufferDetachingIntact();
// Disable promise optimizations if promise (debug) hooks have ever been
- // active.
+ // active, because those can observe promises.
bool IsPromiseHookProtectorIntact();
// Make sure a lookup of "resolve" on the %Promise% intrinsic object
@@ -1540,12 +1582,11 @@ class Isolate final : private HiddenFactory {
class EntryStackItem {
public:
EntryStackItem(PerIsolateThreadData* previous_thread_data,
- Isolate* previous_isolate,
- EntryStackItem* previous_item)
+ Isolate* previous_isolate, EntryStackItem* previous_item)
: entry_count(1),
previous_thread_data(previous_thread_data),
previous_isolate(previous_isolate),
- previous_item(previous_item) { }
+ previous_item(previous_item) {}
int entry_count;
PerIsolateThreadData* previous_thread_data;
@@ -1670,7 +1711,7 @@ class Isolate final : private HiddenFactory {
return static_cast<std::size_t>(a);
}
};
- std::unordered_map<ICUObjectCacheType, std::shared_ptr<icu::UObject>,
+ std::unordered_map<ICUObjectCacheType, std::shared_ptr<icu::UMemory>,
ICUObjectCacheTypeHash>
icu_object_cache_;
@@ -1719,16 +1760,14 @@ class Isolate final : private HiddenFactory {
CompilerDispatcher* compiler_dispatcher_ = nullptr;
- typedef std::pair<InterruptCallback, void*> InterruptEntry;
+ using InterruptEntry = std::pair<InterruptCallback, void*>;
std::queue<InterruptEntry> api_interrupts_queue_;
-#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
- type name##_;
+#define GLOBAL_BACKING_STORE(type, name, initialvalue) type name##_;
ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
#undef GLOBAL_BACKING_STORE
-#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
- type name##_[length];
+#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) type name##_[length];
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
#undef GLOBAL_ARRAY_BACKING_STORE
@@ -1881,16 +1920,14 @@ class AssertNoContextChange {
#ifdef DEBUG
public:
explicit AssertNoContextChange(Isolate* isolate);
- ~AssertNoContextChange() {
- DCHECK(isolate_->context() == *context_);
- }
+ ~AssertNoContextChange() { DCHECK(isolate_->context() == *context_); }
private:
Isolate* isolate_;
Handle<Context> context_;
#else
public:
- explicit AssertNoContextChange(Isolate* isolate) { }
+ explicit AssertNoContextChange(Isolate* isolate) {}
#endif
};
@@ -1912,11 +1949,10 @@ class ExecutionAccess {
Isolate* isolate_;
};
-
// Support for checking for stack-overflows.
class StackLimitCheck {
public:
- explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
+ explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) {}
// Use this to check for stack-overflows in C++ code.
bool HasOverflowed() const {
@@ -2031,4 +2067,4 @@ class StackTraceFailureMessage {
} // namespace internal
} // namespace v8
-#endif // V8_ISOLATE_H_
+#endif // V8_EXECUTION_ISOLATE_H_
diff --git a/deps/v8/src/message-template.h b/deps/v8/src/execution/message-template.h
index b1aca2c905..ae88aa4411 100644
--- a/deps/v8/src/message-template.h
+++ b/deps/v8/src/execution/message-template.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MESSAGE_TEMPLATE_H_
-#define V8_MESSAGE_TEMPLATE_H_
+#ifndef V8_EXECUTION_MESSAGE_TEMPLATE_H_
+#define V8_EXECUTION_MESSAGE_TEMPLATE_H_
#include "src/base/logging.h"
@@ -171,6 +171,11 @@ namespace internal {
"'defineProperty' on proxy: trap returned truish for defining " \
"non-configurable property '%' which is either non-existent or " \
"configurable in the proxy target") \
+ T(ProxyDefinePropertyNonConfigurableWritable, \
+ "'defineProperty' on proxy: trap returned truish for defining " \
+ "non-configurable property '%' which cannot be non-writable, unless " \
+ "there exists a corresponding non-configurable, non-writable own " \
+ "property of the target object.") \
T(ProxyDefinePropertyNonExtensible, \
"'defineProperty' on proxy: trap returned truish for adding property '%' " \
" to the non-extensible proxy target") \
@@ -180,6 +185,9 @@ namespace internal {
T(ProxyDeletePropertyNonConfigurable, \
"'deleteProperty' on proxy: trap returned truish for property '%' which " \
"is non-configurable in the proxy target") \
+ T(ProxyDeletePropertyNonExtensible, \
+ "'deleteProperty' on proxy: trap returned truish for property '%' but " \
+ "the proxy target is non-extensible") \
T(ProxyGetNonConfigurableData, \
"'get' on proxy: property '%' is a read-only and " \
"non-configurable data property on the proxy target but the proxy " \
@@ -199,6 +207,10 @@ namespace internal {
"'getOwnPropertyDescriptor' on proxy: trap reported non-configurability " \
"for property '%' which is either non-existent or configurable in the " \
"proxy target") \
+ T(ProxyGetOwnPropertyDescriptorNonConfigurableWritable, \
+ "'getOwnPropertyDescriptor' on proxy: trap reported non-configurable " \
+ "and writable for property '%' which is non-configurable, non-writable " \
+ "in the proxy target") \
T(ProxyGetOwnPropertyDescriptorNonExtensible, \
"'getOwnPropertyDescriptor' on proxy: trap returned undefined for " \
"property '%' which exists in the non-extensible proxy target") \
@@ -304,11 +316,11 @@ namespace internal {
"a location, got %") \
T(InvalidArrayBufferLength, "Invalid array buffer length") \
T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
+ T(Invalid, "Invalid %s : %") \
T(InvalidArrayLength, "Invalid array length") \
T(InvalidAtomicAccessIndex, "Invalid atomic access index") \
T(InvalidCodePoint, "Invalid code point %") \
T(InvalidCountValue, "Invalid count value") \
- T(InvalidCurrencyCode, "Invalid currency code: %") \
T(InvalidDataViewAccessorOffset, \
"Offset is outside the bounds of the DataView") \
T(InvalidDataViewLength, "Invalid DataView length %") \
@@ -362,6 +374,7 @@ namespace internal {
"Duplicate __proto__ fields are not allowed in object literals") \
T(ForInOfLoopInitializer, \
"% loop variable declaration may not have an initializer.") \
+ T(ForOfLet, "The left-hand side of a for-of loop may not start with 'let'.") \
T(ForInOfLoopMultiBindings, \
"Invalid left-hand side in % loop: Must have a single binding.") \
T(GeneratorInSingleStatementContext, \
@@ -397,7 +410,7 @@ namespace internal {
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
T(InvalidPrivateFieldResolution, \
- "Undefined private field %: must be declared in an enclosing class") \
+ "Private field '%' must be declared in an enclosing class") \
T(InvalidPrivateFieldRead, \
"Read of private field % from an object which did not contain the field") \
T(InvalidPrivateFieldWrite, \
@@ -575,4 +588,4 @@ inline MessageTemplate MessageTemplateFromInt(int message_id) {
} // namespace internal
} // namespace v8
-#endif // V8_MESSAGE_TEMPLATE_H_
+#endif // V8_EXECUTION_MESSAGE_TEMPLATE_H_
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/execution/messages.cc
index 0860fb24b7..c76f546d62 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -2,20 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/messages.h"
+#include "src/execution/messages.h"
#include <memory>
-#include "src/api-inl.h"
-#include "src/counters.h"
-#include "src/execution.h"
-#include "src/isolate-inl.h"
-#include "src/keys.h"
+#include "src/api/api-inl.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/keys.h"
#include "src/objects/struct-inl.h"
-#include "src/string-builder-inl.h"
+#include "src/strings/string-builder-inl.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects.h"
@@ -24,14 +24,30 @@ namespace internal {
MessageLocation::MessageLocation(Handle<Script> script, int start_pos,
int end_pos)
- : script_(script), start_pos_(start_pos), end_pos_(end_pos) {}
+ : script_(script),
+ start_pos_(start_pos),
+ end_pos_(end_pos),
+ bytecode_offset_(-1) {}
+
MessageLocation::MessageLocation(Handle<Script> script, int start_pos,
int end_pos, Handle<SharedFunctionInfo> shared)
: script_(script),
start_pos_(start_pos),
end_pos_(end_pos),
+ bytecode_offset_(-1),
+ shared_(shared) {}
+
+MessageLocation::MessageLocation(Handle<Script> script,
+ Handle<SharedFunctionInfo> shared,
+ int bytecode_offset)
+ : script_(script),
+ start_pos_(-1),
+ end_pos_(-1),
+ bytecode_offset_(bytecode_offset),
shared_(shared) {}
-MessageLocation::MessageLocation() : start_pos_(-1), end_pos_(-1) {}
+
+MessageLocation::MessageLocation()
+ : start_pos_(-1), end_pos_(-1), bytecode_offset_(-1) {}
// If no message listeners have been registered this one is called
// by default.
@@ -59,19 +75,24 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
int start = -1;
int end = -1;
+ int bytecode_offset = -1;
Handle<Script> script_handle = isolate->factory()->empty_script();
+ Handle<SharedFunctionInfo> shared_info;
if (location != nullptr) {
start = location->start_pos();
end = location->end_pos();
script_handle = location->script();
+ bytecode_offset = location->bytecode_offset();
+ shared_info = location->shared();
}
- Handle<Object> stack_frames_handle = stack_frames.is_null()
- ? Handle<Object>::cast(factory->undefined_value())
- : Handle<Object>::cast(stack_frames);
+ Handle<Object> stack_frames_handle =
+ stack_frames.is_null() ? Handle<Object>::cast(factory->undefined_value())
+ : Handle<Object>::cast(stack_frames);
Handle<JSMessageObject> message_obj = factory->NewJSMessageObject(
- message, argument, start, end, script_handle, stack_frames_handle);
+ message, argument, start, end, shared_info, bytecode_offset,
+ script_handle, stack_frames_handle);
return message_obj;
}
@@ -97,7 +118,7 @@ void MessageHandler::ReportMessage(Isolate* isolate, const MessageLocation* loc,
isolate->set_external_caught_exception(false);
// Turn the exception on the message into a string if it is an object.
- if (message->argument()->IsJSObject()) {
+ if (message->argument().IsJSObject()) {
HandleScope scope(isolate);
Handle<Object> argument(message->argument(), isolate);
@@ -148,17 +169,17 @@ void MessageHandler::ReportMessageNoExceptions(
} else {
for (int i = 0; i < global_length; i++) {
HandleScope scope(isolate);
- if (global_listeners->get(i)->IsUndefined(isolate)) continue;
+ if (global_listeners->get(i).IsUndefined(isolate)) continue;
FixedArray listener = FixedArray::cast(global_listeners->get(i));
- Foreign callback_obj = Foreign::cast(listener->get(0));
+ Foreign callback_obj = Foreign::cast(listener.get(0));
int32_t message_levels =
- static_cast<int32_t>(Smi::ToInt(listener->get(2)));
+ static_cast<int32_t>(Smi::ToInt(listener.get(2)));
if (!(message_levels & error_level)) {
continue;
}
v8::MessageCallback callback =
- FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
- Handle<Object> callback_data(listener->get(1), isolate);
+ FUNCTION_CAST<v8::MessageCallback>(callback_obj.foreign_address());
+ Handle<Object> callback_data(listener.get(1), isolate);
{
RuntimeCallTimerScope timer(
isolate, RuntimeCallCounterId::kMessageListenerCallback);
@@ -175,7 +196,6 @@ void MessageHandler::ReportMessageNoExceptions(
}
}
-
Handle<String> MessageHandler::GetMessage(Isolate* isolate,
Handle<Object> data) {
Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
@@ -198,7 +218,7 @@ Object EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
Handle<SharedFunctionInfo> shared(script->eval_from_shared(), isolate);
// Find the name of the function calling eval.
- if (shared->Name()->BooleanValue(isolate)) {
+ if (shared->Name().BooleanValue(isolate)) {
return shared->Name();
}
@@ -230,7 +250,7 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
if (script->has_eval_from_shared()) {
Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared(),
isolate);
- if (eval_from_shared->script()->IsScript()) {
+ if (eval_from_shared->script().IsScript()) {
Handle<Script> eval_from_script =
handle(Script::cast(eval_from_shared->script()), isolate);
builder.AppendCString(" (");
@@ -246,7 +266,7 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
Script::COMPILATION_TYPE_EVAL);
// eval script originated from "real" source.
Handle<Object> name_obj = handle(eval_from_script->name(), isolate);
- if (eval_from_script->name()->IsString()) {
+ if (eval_from_script->name().IsString()) {
builder.AppendString(Handle<String>::cast(name_obj));
Script::PositionInfo info;
@@ -267,7 +287,7 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
builder.AppendString(str);
}
} else {
- DCHECK(!eval_from_script->name()->IsString());
+ DCHECK(!eval_from_script->name().IsString());
builder.AppendCString("unknown source");
}
}
@@ -287,6 +307,10 @@ Handle<Object> StackFrameBase::GetEvalOrigin() {
return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
}
+Handle<Object> StackFrameBase::GetWasmModuleName() {
+ return isolate_->factory()->undefined_value();
+}
+
int StackFrameBase::GetScriptId() const {
if (!HasScript()) return kNone;
return GetScript()->id();
@@ -310,9 +334,9 @@ void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
receiver_ = handle(array->Receiver(frame_ix), isolate);
function_ = handle(array->Function(frame_ix), isolate);
code_ = handle(array->Code(frame_ix), isolate);
- offset_ = array->Offset(frame_ix)->value();
+ offset_ = array->Offset(frame_ix).value();
- const int flags = array->Flags(frame_ix)->value();
+ const int flags = array->Flags(frame_ix).value();
is_constructor_ = (flags & FrameArray::kIsConstructor) != 0;
is_strict_ = (flags & FrameArray::kIsStrict) != 0;
is_async_ = (flags & FrameArray::kIsAsync) != 0;
@@ -372,7 +396,7 @@ bool CheckMethodName(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<Object> ScriptNameOrSourceUrl(Handle<Script> script, Isolate* isolate) {
Object name_or_url = script->source_url();
- if (!name_or_url->IsString()) name_or_url = script->name();
+ if (!name_or_url.IsString()) name_or_url = script->name();
return handle(name_or_url, isolate);
}
@@ -396,18 +420,19 @@ Handle<Object> JSStackFrame::GetMethodName() {
return isolate_->factory()->null_value();
}
- Handle<String> name(function_->shared()->Name(), isolate_);
+ Handle<String> name(function_->shared().Name(), isolate_);
+ name = String::Flatten(isolate_, name);
// The static initializer function is not a method, so don't add a
// class name, just return the function name.
- if (name->IsUtf8EqualTo(CStrVector("<static_fields_initializer>"), true)) {
+ if (name->HasOneBytePrefix(CStrVector("<static_fields_initializer>"))) {
return name;
}
// ES2015 gives getters and setters name prefixes which must
// be stripped to find the property name.
- if (name->IsUtf8EqualTo(CStrVector("get "), true) ||
- name->IsUtf8EqualTo(CStrVector("set "), true)) {
+ if (name->HasOneBytePrefix(CStrVector("get ")) ||
+ name->HasOneBytePrefix(CStrVector("set "))) {
name = isolate_->factory()->NewProperSubString(name, 4, name->length());
}
if (CheckMethodName(isolate_, receiver, name, function_,
@@ -427,7 +452,7 @@ Handle<Object> JSStackFrame::GetMethodName() {
KeyAccumulator::GetOwnEnumPropertyKeys(isolate_, current_obj);
for (int i = 0; i < keys->length(); i++) {
HandleScope inner_scope(isolate_);
- if (!keys->get(i)->IsName()) continue;
+ if (!keys->get(i).IsName()) continue;
Handle<Name> name_key(Name::cast(keys->get(i)), isolate_);
if (!CheckMethodName(isolate_, current_obj, name_key, function_,
LookupIterator::OWN_SKIP_INTERCEPTOR))
@@ -492,7 +517,7 @@ bool JSStackFrame::IsToplevel() {
namespace {
bool IsNonEmptyString(Handle<Object> object) {
- return (object->IsString() && String::cast(*object)->length() > 0);
+ return (object->IsString() && String::cast(*object).length() > 0);
}
void AppendFileLocation(Isolate* isolate, StackFrameBase* call_site,
@@ -667,11 +692,11 @@ int JSStackFrame::GetPosition() const {
}
bool JSStackFrame::HasScript() const {
- return function_->shared()->script()->IsScript();
+ return function_->shared().script().IsScript();
}
Handle<Script> JSStackFrame::GetScript() const {
- return handle(Script::cast(function_->shared()->script()), isolate_);
+ return handle(Script::cast(function_->shared().script()), isolate_);
}
void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
@@ -683,16 +708,16 @@ void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
array->IsAsmJsWasmFrame(frame_ix));
isolate_ = isolate;
wasm_instance_ = handle(array->WasmInstance(frame_ix), isolate);
- wasm_func_index_ = array->WasmFunctionIndex(frame_ix)->value();
+ wasm_func_index_ = array->WasmFunctionIndex(frame_ix).value();
if (array->IsWasmInterpretedFrame(frame_ix)) {
code_ = nullptr;
} else {
// The {WasmCode*} is held alive by the {GlobalWasmCodeRef}.
auto global_wasm_code_ref =
Managed<wasm::GlobalWasmCodeRef>::cast(array->WasmCodeObject(frame_ix));
- code_ = global_wasm_code_ref->get()->code();
+ code_ = global_wasm_code_ref.get()->code();
}
- offset_ = array->Offset(frame_ix)->value();
+ offset_ = array->Offset(frame_ix).value();
}
Handle<Object> WasmStackFrame::GetReceiver() const { return wasm_instance_; }
@@ -713,6 +738,17 @@ Handle<Object> WasmStackFrame::GetFunctionName() {
return name;
}
+Handle<Object> WasmStackFrame::GetWasmModuleName() {
+ Handle<Object> module_name;
+ Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
+ isolate_);
+ if (!WasmModuleObject::GetModuleNameOrNull(isolate_, module_object)
+ .ToHandle(&module_name)) {
+ module_name = isolate_->factory()->null_value();
+ }
+ return module_name;
+}
+
void WasmStackFrame::ToString(IncrementalStringBuilder& builder) {
Handle<WasmModuleObject> module_object(wasm_instance_->module_object(),
isolate_);
@@ -762,7 +798,7 @@ Handle<Object> WasmStackFrame::Null() const {
bool WasmStackFrame::HasScript() const { return true; }
Handle<Script> WasmStackFrame::GetScript() const {
- return handle(wasm_instance_->module_object()->script(), isolate_);
+ return handle(wasm_instance_->module_object().script(), isolate_);
}
void AsmJsWasmStackFrame::FromFrameArray(Isolate* isolate,
@@ -771,7 +807,7 @@ void AsmJsWasmStackFrame::FromFrameArray(Isolate* isolate,
DCHECK(array->IsAsmJsWasmFrame(frame_ix));
WasmStackFrame::FromFrameArray(isolate, array, frame_ix);
is_at_number_conversion_ =
- array->Flags(frame_ix)->value() & FrameArray::kAsmJsAtNumberConversion;
+ array->Flags(frame_ix).value() & FrameArray::kAsmJsAtNumberConversion;
}
Handle<Object> AsmJsWasmStackFrame::GetReceiver() const {
@@ -784,13 +820,13 @@ Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
}
Handle<Object> AsmJsWasmStackFrame::GetFileName() {
- Handle<Script> script(wasm_instance_->module_object()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
DCHECK(script->IsUserJavaScript());
return handle(script->name(), isolate_);
}
Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
- Handle<Script> script(wasm_instance_->module_object()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
DCHECK_EQ(Script::TYPE_NORMAL, script->type());
return ScriptNameOrSourceUrl(script, isolate_);
}
@@ -810,14 +846,14 @@ int AsmJsWasmStackFrame::GetPosition() const {
int AsmJsWasmStackFrame::GetLineNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetLineNumber(script, GetPosition()) + 1;
}
int AsmJsWasmStackFrame::GetColumnNumber() {
DCHECK_LE(0, GetPosition());
- Handle<Script> script(wasm_instance_->module_object()->script(), isolate_);
+ Handle<Script> script(wasm_instance_->module_object().script(), isolate_);
DCHECK(script->IsUserJavaScript());
return Script::GetColumnNumber(script, GetPosition()) + 1;
}
@@ -852,7 +888,7 @@ void FrameArrayIterator::Advance() { frame_ix_++; }
StackFrameBase* FrameArrayIterator::Frame() {
DCHECK(HasFrame());
- const int flags = array_->Flags(frame_ix_)->value();
+ const int flags = array_->Flags(frame_ix_).value();
int flag_mask = FrameArray::kIsWasmFrame |
FrameArray::kIsWasmInterpretedFrame |
FrameArray::kIsAsmJsWasmFrame;
@@ -889,15 +925,17 @@ MaybeHandle<Object> ConstructCallSite(Isolate* isolate,
JSObject::New(target, target, Handle<AllocationSite>::null()), Object);
Handle<Symbol> key = isolate->factory()->call_site_frame_array_symbol();
- RETURN_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- obj, key, frame_array, DONT_ENUM),
+ RETURN_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ obj, key, frame_array, DONT_ENUM),
Object);
key = isolate->factory()->call_site_frame_index_symbol();
Handle<Object> value(Smi::FromInt(frame_index), isolate);
- RETURN_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- obj, key, value, DONT_ENUM),
- Object);
+ RETURN_ON_EXCEPTION(
+ isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(obj, key, value, DONT_ENUM),
+ Object);
return obj;
}
@@ -1025,7 +1063,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
Execution::Call(isolate, prepare_stack_trace, global_error, argc,
- argv.start()),
+ argv.begin()),
Object);
return result;
@@ -1085,7 +1123,7 @@ Handle<String> MessageFormatter::Format(Isolate* isolate, MessageTemplate index,
if (!maybe_result_string.ToHandle(&result_string)) {
DCHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
- return factory->InternalizeOneByteString(StaticCharVector("<error>"));
+ return factory->InternalizeString(StaticCharVector("<error>"));
}
// A string that has been obtained from JS code in this way is
// likely to be a complicated ConsString of some sort. We flatten it
@@ -1172,10 +1210,11 @@ MaybeHandle<Object> ErrorUtils::Construct(
Handle<String> msg_string;
ASSIGN_RETURN_ON_EXCEPTION(isolate, msg_string,
Object::ToString(isolate, message), Object);
- RETURN_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- err, isolate->factory()->message_string(),
- msg_string, DONT_ENUM),
- Object);
+ RETURN_ON_EXCEPTION(
+ isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ err, isolate->factory()->message_string(), msg_string, DONT_ENUM),
+ Object);
}
// Optionally capture a more detailed stack trace for the message.
@@ -1303,11 +1342,19 @@ MaybeHandle<Object> ErrorUtils::MakeGenericError(
// pending exceptions would be cleared. Preserve this behavior.
isolate->clear_pending_exception();
}
+ Handle<String> msg;
+ if (FLAG_correctness_fuzzer_suppressions) {
+ // Ignore error messages in correctness fuzzing, because the spec leaves
+ // room for undefined behavior.
+ msg = isolate->factory()->InternalizeUtf8String(
+ "Message suppressed for fuzzers (--correctness-fuzzer-suppressions)");
+ } else {
+ msg = DoFormatMessage(isolate, index, arg0, arg1, arg2);
+ }
DCHECK(mode != SKIP_UNTIL_SEEN);
Handle<Object> no_caller;
- Handle<String> msg = DoFormatMessage(isolate, index, arg0, arg1, arg2);
return ErrorUtils::Construct(isolate, constructor, constructor, msg, mode,
no_caller, false);
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/execution/messages.h
index af45b16173..0fc3692f64 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -7,13 +7,13 @@
// Note: there's a big unresolved issue about ownership of the data
// structures used by this framework.
-#ifndef V8_MESSAGES_H_
-#define V8_MESSAGES_H_
+#ifndef V8_EXECUTION_MESSAGES_H_
+#define V8_EXECUTION_MESSAGES_H_
#include <memory>
-#include "src/handles.h"
-#include "src/message-template.h"
+#include "src/execution/message-template.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
@@ -33,20 +33,29 @@ class WasmInstanceObject;
class V8_EXPORT_PRIVATE MessageLocation {
public:
+ // Constructors for when source positions are already known.
+ // TODO(delphick): Collapse to a single constructor with a default parameter
+ // when we stop using the GCC that requires this separation.
MessageLocation(Handle<Script> script, int start_pos, int end_pos);
MessageLocation(Handle<Script> script, int start_pos, int end_pos,
Handle<SharedFunctionInfo> shared);
+ // Constructor for when source positions were not collected but which can be
+ // reconstructed from the SharedFuncitonInfo and bytecode offset.
+ MessageLocation(Handle<Script> script, Handle<SharedFunctionInfo> shared,
+ int bytecode_offset);
MessageLocation();
Handle<Script> script() const { return script_; }
int start_pos() const { return start_pos_; }
int end_pos() const { return end_pos_; }
+ int bytecode_offset() const { return bytecode_offset_; }
Handle<SharedFunctionInfo> shared() const { return shared_; }
private:
Handle<Script> script_;
int start_pos_;
int end_pos_;
+ int bytecode_offset_;
Handle<SharedFunctionInfo> shared_;
};
@@ -63,6 +72,7 @@ class StackFrameBase {
virtual Handle<Object> GetMethodName() = 0;
virtual Handle<Object> GetTypeName() = 0;
virtual Handle<Object> GetEvalOrigin();
+ virtual Handle<Object> GetWasmModuleName();
// Returns the script ID if one is attached, -1 otherwise.
int GetScriptId() const;
@@ -163,6 +173,7 @@ class WasmStackFrame : public StackFrameBase {
Handle<Object> GetScriptNameOrSourceUrl() override { return Null(); }
Handle<Object> GetMethodName() override { return Null(); }
Handle<Object> GetTypeName() override { return Null(); }
+ Handle<Object> GetWasmModuleName() override;
int GetPosition() const override;
int GetLineNumber() override { return wasm_func_index_; }
@@ -289,7 +300,6 @@ class MessageFormatter {
Handle<Object> arg);
};
-
// A message handler is a convenience interface for accessing the list
// of message listeners registered in an environment
class MessageHandler {
@@ -317,8 +327,7 @@ class MessageHandler {
v8::Local<v8::Value> api_exception_obj);
};
-
} // namespace internal
} // namespace v8
-#endif // V8_MESSAGES_H_
+#endif // V8_EXECUTION_MESSAGES_H_
diff --git a/deps/v8/src/microtask-queue.cc b/deps/v8/src/execution/microtask-queue.cc
index a19d9dab8e..8088935154 100644
--- a/deps/v8/src/microtask-queue.cc
+++ b/deps/v8/src/execution/microtask-queue.cc
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/microtask-queue.h"
+#include "src/execution/microtask-queue.h"
#include <stddef.h>
#include <algorithm>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/logging.h"
-#include "src/handles-inl.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/microtask-inl.h"
-#include "src/roots-inl.h"
+#include "src/objects/visitors.h"
+#include "src/roots/roots-inl.h"
#include "src/tracing/trace-event.h"
-#include "src/visitors.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/microtask-queue.h b/deps/v8/src/execution/microtask-queue.h
index 98dc56bb18..4ce1498279 100644
--- a/deps/v8/src/microtask-queue.h
+++ b/deps/v8/src/execution/microtask-queue.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MICROTASK_QUEUE_H_
-#define V8_MICROTASK_QUEUE_H_
+#ifndef V8_EXECUTION_MICROTASK_QUEUE_H_
+#define V8_EXECUTION_MICROTASK_QUEUE_H_
#include <stdint.h>
#include <memory>
@@ -145,4 +145,4 @@ class V8_EXPORT_PRIVATE MicrotaskQueue final : public v8::MicrotaskQueue {
} // namespace internal
} // namespace v8
-#endif // V8_MICROTASK_QUEUE_H_
+#endif // V8_EXECUTION_MICROTASK_QUEUE_H_
diff --git a/deps/v8/src/mips/frame-constants-mips.cc b/deps/v8/src/execution/mips/frame-constants-mips.cc
index fde4306f62..95d6eb951c 100644
--- a/deps/v8/src/mips/frame-constants-mips.cc
+++ b/deps/v8/src/execution/mips/frame-constants-mips.cc
@@ -4,12 +4,12 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/assembler.h"
-#include "src/frame-constants.h"
-#include "src/mips/assembler-mips-inl.h"
-#include "src/mips/assembler-mips.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/mips/assembler-mips-inl.h"
+#include "src/codegen/mips/assembler-mips.h"
+#include "src/execution/frame-constants.h"
-#include "src/mips/frame-constants-mips.h"
+#include "src/execution/mips/frame-constants-mips.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips/frame-constants-mips.h b/deps/v8/src/execution/mips/frame-constants-mips.h
index 0675cd0aa7..2043d12e89 100644
--- a/deps/v8/src/mips/frame-constants-mips.h
+++ b/deps/v8/src/execution/mips/frame-constants-mips.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS_FRAME_CONSTANTS_MIPS_H_
-#define V8_MIPS_FRAME_CONSTANTS_MIPS_H_
+#ifndef V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_
+#define V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_
#include "src/base/macros.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -72,4 +72,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_MIPS_FRAME_CONSTANTS_MIPS_H_
+#endif // V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index 061a96c38a..e0448f232a 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips/simulator-mips.h"
+#include "src/execution/mips/simulator-mips.h"
// Only build the simulator if not compiling for real MIPS hardware.
#if defined(USE_SIMULATOR)
@@ -12,15 +12,16 @@
#include <stdlib.h>
#include <cmath>
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/mips/constants-mips.h"
-#include "src/ostreams.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips/constants-mips.h"
+#include "src/diagnostics/disasm.h"
+#include "src/heap/combined-heap.h"
#include "src/runtime/runtime-utils.h"
-#include "src/vector.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -29,10 +30,7 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
Simulator::GlobalMonitor::Get)
// Utils functions.
-bool HaveSameSign(int32_t a, int32_t b) {
- return ((a ^ b) >= 0);
-}
-
+bool HaveSameSign(int32_t a, int32_t b) { return ((a ^ b) >= 0); }
uint32_t get_fcsr_condition_bit(uint32_t cc) {
if (cc == 0) {
@@ -42,7 +40,6 @@ uint32_t get_fcsr_condition_bit(uint32_t cc) {
}
}
-
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent was through
// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
@@ -53,7 +50,7 @@ uint32_t get_fcsr_condition_bit(uint32_t cc) {
// code.
class MipsDebugger {
public:
- explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) {}
void Stop(Instruction* instr);
void Debug();
@@ -64,7 +61,7 @@ class MipsDebugger {
private:
// We set the breakpoint code to 0xFFFFF to easily recognize it.
static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xFFFFF << 6;
- static const Instr kNopInstr = 0x0;
+ static const Instr kNopInstr = 0x0;
Simulator* sim_;
@@ -86,10 +83,8 @@ class MipsDebugger {
void RedoBreakpoints();
};
-
#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n");
-
void MipsDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->Bits(25, 6);
@@ -97,7 +92,6 @@ void MipsDebugger::Stop(Instruction* instr) {
Debug();
}
-
int32_t MipsDebugger::GetRegisterValue(int regnum) {
if (regnum == kNumSimuRegisters) {
return sim_->get_pc();
@@ -106,7 +100,6 @@ int32_t MipsDebugger::GetRegisterValue(int regnum) {
}
}
-
int32_t MipsDebugger::GetFPURegisterValue32(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
@@ -115,7 +108,6 @@ int32_t MipsDebugger::GetFPURegisterValue32(int regnum) {
}
}
-
int64_t MipsDebugger::GetFPURegisterValue64(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
@@ -124,7 +116,6 @@ int64_t MipsDebugger::GetFPURegisterValue64(int regnum) {
}
}
-
float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
@@ -133,7 +124,6 @@ float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
}
}
-
double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
@@ -142,7 +132,6 @@ double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
}
}
-
bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
int fpuregnum = FPURegisters::Number(desc);
@@ -161,7 +150,6 @@ bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
return false;
}
-
bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
int regnum = Registers::Number(desc);
int fpuregnum = FPURegisters::Number(desc);
@@ -173,15 +161,14 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
*value = GetFPURegisterValue64(fpuregnum);
return true;
} else if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%" SCNx64,
- reinterpret_cast<uint64_t*>(value)) == 1;
+ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast<uint64_t*>(value)) ==
+ 1;
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
return false;
}
-
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != nullptr) {
@@ -196,7 +183,6 @@ bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
return true;
}
-
bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
@@ -207,31 +193,28 @@ bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
return true;
}
-
void MipsDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-
void MipsDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
-
void MipsDebugger::PrintAllRegs() {
#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
PrintF("\n");
// at, v0, a0.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(1), REG_INFO(2), REG_INFO(4));
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(1),
+ REG_INFO(2), REG_INFO(4));
// v1, a1.
- PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- "", REG_INFO(3), REG_INFO(5));
+ PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", "", REG_INFO(3),
+ REG_INFO(5));
// a2.
PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6));
// a3.
@@ -239,37 +222,33 @@ void MipsDebugger::PrintAllRegs() {
PrintF("\n");
// t0-t7, s0-s7
for (int i = 0; i < 8; i++) {
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(8+i), REG_INFO(16+i));
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(8 + i),
+ REG_INFO(16 + i));
}
PrintF("\n");
// t8, k0, LO.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(24), REG_INFO(26), REG_INFO(32));
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(24),
+ REG_INFO(26), REG_INFO(32));
// t9, k1, HI.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(25), REG_INFO(27), REG_INFO(33));
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(25),
+ REG_INFO(27), REG_INFO(33));
// sp, fp, gp.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(29), REG_INFO(30), REG_INFO(28));
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(29),
+ REG_INFO(30), REG_INFO(28));
// pc.
- PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
- REG_INFO(31), REG_INFO(34));
+ PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(31), REG_INFO(34));
#undef REG_INFO
-#undef FPU_REG_INFO
}
-
void MipsDebugger::PrintAllRegsIncludingFPU() {
-#define FPU_REG_INFO32(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
- GetFPURegisterValue32(n+1), \
- GetFPURegisterValue32(n), \
- GetFPURegisterValueDouble(n)
+#define FPU_REG_INFO32(n) \
+ FPURegisters::Name(n), FPURegisters::Name(n + 1), \
+ GetFPURegisterValue32(n + 1), GetFPURegisterValue32(n), \
+ GetFPURegisterValueDouble(n)
-#define FPU_REG_INFO64(n) FPURegisters::Name(n), \
- GetFPURegisterValue64(n), \
- GetFPURegisterValueDouble(n)
+#define FPU_REG_INFO64(n) \
+ FPURegisters::Name(n), GetFPURegisterValue64(n), GetFPURegisterValueDouble(n)
PrintAllRegs();
@@ -278,16 +257,16 @@ void MipsDebugger::PrintAllRegsIncludingFPU() {
// This must be a compile-time switch,
// compiler will throw out warnings otherwise.
if (kFpuMode == kFP64) {
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(0) );
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(1) );
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(2) );
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(3) );
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(4) );
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(5) );
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(6) );
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(7) );
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(8) );
- PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(9) );
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(0));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(1));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(2));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(3));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(4));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(5));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(6));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(7));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(8));
+ PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(9));
PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(10));
PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(11));
PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(12));
@@ -311,11 +290,11 @@ void MipsDebugger::PrintAllRegsIncludingFPU() {
PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(30));
PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(31));
} else {
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(0) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(2) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(4) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(6) );
- PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(8) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(0));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(2));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(4));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(6));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(8));
PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(10));
PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(12));
PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(14));
@@ -329,12 +308,10 @@ void MipsDebugger::PrintAllRegsIncludingFPU() {
PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(30));
}
-#undef REG_INFO
#undef FPU_REG_INFO32
#undef FPU_REG_INFO64
}
-
void MipsDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@@ -348,7 +325,7 @@ void MipsDebugger::Debug() {
char cmd[COMMAND_SIZE + 1];
char arg1[ARG_SIZE + 1];
char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
+ char* argv[3] = {cmd, arg1, arg2};
// Make sure to have a proper terminating character if reaching the limit.
cmd[COMMAND_SIZE] = 0;
@@ -365,9 +342,8 @@ void MipsDebugger::Debug() {
disasm::Disassembler dasm(converter);
// Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.start());
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.begin());
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
@@ -424,8 +400,8 @@ void MipsDebugger::Debug() {
double dvalue;
value = GetFPURegisterValue64(fpuregnum);
dvalue = GetFPURegisterValueDouble(fpuregnum);
- PrintF("%3s: 0x%016llx %16.4e\n",
- FPURegisters::Name(fpuregnum), value, dvalue);
+ PrintF("%3s: 0x%016llx %16.4e\n", FPURegisters::Name(fpuregnum),
+ value, dvalue);
} else {
if (fpuregnum % 2 == 1) {
int32_t value;
@@ -439,10 +415,8 @@ void MipsDebugger::Debug() {
int32_t lvalue2 = GetFPURegisterValue32(fpuregnum + 1);
dfvalue = GetFPURegisterValueDouble(fpuregnum);
PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
- FPURegisters::Name(fpuregnum+1),
- FPURegisters::Name(fpuregnum),
- lvalue1,
- lvalue2,
+ FPURegisters::Name(fpuregnum + 1),
+ FPURegisters::Name(fpuregnum), lvalue1, lvalue2,
dfvalue);
}
}
@@ -471,8 +445,8 @@ void MipsDebugger::Debug() {
PrintF("print <register> or print <fpu register> single\n");
}
}
- } else if ((strcmp(cmd, "po") == 0)
- || (strcmp(cmd, "printobject") == 0)) {
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
int32_t value;
StdoutStream os;
@@ -480,7 +454,7 @@ void MipsDebugger::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
@@ -536,12 +510,13 @@ void MipsDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
PrintF(" (");
if (obj.IsSmi()) {
PrintF("smi %d", Smi::ToInt(obj));
} else {
- obj->ShortPrint();
+ obj.ShortPrint();
}
PrintF(")");
}
@@ -549,8 +524,7 @@ void MipsDebugger::Debug() {
cur++;
}
- } else if ((strcmp(cmd, "disasm") == 0) ||
- (strcmp(cmd, "dpc") == 0) ||
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
(strcmp(cmd, "di") == 0)) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@@ -594,7 +568,7 @@ void MipsDebugger::Debug() {
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
- buffer.start());
+ buffer.begin());
cur += kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
@@ -639,8 +613,7 @@ void MipsDebugger::Debug() {
if (strcmp(arg1, "info") == 0) {
if (strcmp(arg2, "all") == 0) {
PrintF("Stop information:\n");
- for (uint32_t i = kMaxWatchpointCode + 1;
- i <= kMaxStopCode;
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
i++) {
sim_->PrintStopInfo(i);
}
@@ -652,8 +625,7 @@ void MipsDebugger::Debug() {
} else if (strcmp(arg1, "enable") == 0) {
// Enable all/the specified breakpoint(s).
if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = kMaxWatchpointCode + 1;
- i <= kMaxStopCode;
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
i++) {
sim_->EnableStop(i);
}
@@ -665,8 +637,7 @@ void MipsDebugger::Debug() {
} else if (strcmp(arg1, "disable") == 0) {
// Disable all/the specified breakpoint(s).
if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = kMaxWatchpointCode + 1;
- i <= kMaxStopCode;
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
i++) {
sim_->DisableStop(i);
}
@@ -714,7 +685,7 @@ void MipsDebugger::Debug() {
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
- buffer.start());
+ buffer.begin());
cur += kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
@@ -787,19 +758,16 @@ bool Simulator::ICacheMatch(void* one, void* two) {
return one == two;
}
-
static uint32_t ICacheHash(void* key) {
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
}
-
static bool AllOnOnePage(uintptr_t start, int size) {
intptr_t start_page = (start & ~CachePage::kPageMask);
intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
return start_page == end_page;
}
-
void Simulator::set_last_debugger_input(char* input) {
DeleteArray(last_debugger_input_);
last_debugger_input_ = input;
@@ -841,7 +809,6 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
return reinterpret_cast<CachePage*>(entry->value);
}
-
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, int size) {
@@ -877,7 +844,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
}
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
@@ -924,7 +890,7 @@ Simulator::~Simulator() {
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
- isolate->FindOrAllocatePerThreadDataForThisThread();
+ isolate->FindOrAllocatePerThreadDataForThisThread();
DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
@@ -936,7 +902,6 @@ Simulator* Simulator::current(Isolate* isolate) {
return sim;
}
-
// Sets the register in the architecture state. It will also deal with updating
// Simulator internal state for special registers such as PC.
void Simulator::set_register(int reg, int32_t value) {
@@ -949,21 +914,18 @@ void Simulator::set_register(int reg, int32_t value) {
registers_[reg] = (reg == 0) ? 0 : value;
}
-
void Simulator::set_dw_register(int reg, const int* dbl) {
DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
registers_[reg] = dbl[0];
registers_[reg + 1] = dbl[1];
}
-
void Simulator::set_fpu_register(int fpureg, int64_t value) {
DCHECK(IsFp64Mode());
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg * 2] = value;
}
-
void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
// Set ONLY lower 32-bits, leaving upper bits untouched.
// TODO(plind): big endian issue.
@@ -972,7 +934,6 @@ void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
*pword = value;
}
-
void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
// Set ONLY upper 32-bits, leaving lower bits untouched.
// TODO(plind): big endian issue.
@@ -982,13 +943,11 @@ void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
*phiword = value;
}
-
void Simulator::set_fpu_register_float(int fpureg, float value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
*bit_cast<float*>(&FPUregisters_[fpureg * 2]) = value;
}
-
void Simulator::set_fpu_register_double(int fpureg, double value) {
if (IsFp64Mode()) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
@@ -1001,7 +960,6 @@ void Simulator::set_fpu_register_double(int fpureg, double value) {
}
}
-
// Get the register from the architecture state. This function does handle
// the special case of accessing the PC register.
int32_t Simulator::get_register(int reg) const {
@@ -1012,7 +970,6 @@ int32_t Simulator::get_register(int reg) const {
return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
}
-
double Simulator::get_double_from_register_pair(int reg) {
// TODO(plind): bad ABI stuff, refactor or remove.
DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
@@ -1023,10 +980,9 @@ double Simulator::get_double_from_register_pair(int reg) {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
- return(dm_val);
+ return (dm_val);
}
-
int64_t Simulator::get_fpu_register(int fpureg) const {
if (IsFp64Mode()) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
@@ -1040,31 +996,26 @@ int64_t Simulator::get_fpu_register(int fpureg) const {
}
}
-
int32_t Simulator::get_fpu_register_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
-
int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
-
int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xFFFFFFFF);
}
-
float Simulator::get_fpu_register_float(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg * 2]));
}
-
double Simulator::get_fpu_register_double(int fpureg) const {
if (IsFp64Mode()) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
@@ -1119,7 +1070,6 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
}
}
-
// The return value is either in v0/v1 or f0.
void Simulator::SetFpResult(const double& result) {
if (!IsMipsSoftFloatABI) {
@@ -1134,7 +1084,6 @@ void Simulator::SetFpResult(const double& result) {
}
}
-
// Helper functions for setting and testing the FCSR register's bits.
void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
if (value) {
@@ -1144,11 +1093,7 @@ void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
}
}
-
-bool Simulator::test_fcsr_bit(uint32_t cc) {
- return FCSR_ & (1 << cc);
-}
-
+bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); }
void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
FCSR_ |= mode & kFPURoundingModeMask;
@@ -1185,7 +1130,6 @@ void Simulator::set_fpu_register_word_invalid_result(float original,
}
}
-
void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
double max_int32 = std::numeric_limits<int32_t>::max();
@@ -1204,7 +1148,6 @@ void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
}
}
-
void Simulator::set_fpu_register_invalid_result64(float original,
float rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
@@ -1226,7 +1169,6 @@ void Simulator::set_fpu_register_invalid_result64(float original,
}
}
-
void Simulator::set_fpu_register_word_invalid_result(double original,
double rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
@@ -1246,7 +1188,6 @@ void Simulator::set_fpu_register_word_invalid_result(double original,
}
}
-
void Simulator::set_fpu_register_invalid_result(double original,
double rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
@@ -1266,7 +1207,6 @@ void Simulator::set_fpu_register_invalid_result(double original,
}
}
-
void Simulator::set_fpu_register_invalid_result64(double original,
double rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
@@ -1288,7 +1228,6 @@ void Simulator::set_fpu_register_invalid_result64(double original,
}
}
-
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(double original, double rounded) {
@@ -1320,7 +1259,6 @@ bool Simulator::set_fcsr_round_error(double original, double rounded) {
return ret;
}
-
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(double original, double rounded) {
@@ -1354,7 +1292,6 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) {
return ret;
}
-
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(float original, float rounded) {
@@ -1386,7 +1323,6 @@ bool Simulator::set_fcsr_round_error(float original, float rounded) {
return ret;
}
-
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(float original, float rounded) {
@@ -1420,7 +1356,6 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
return ret;
}
-
void Simulator::round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
@@ -1462,7 +1397,6 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
}
}
-
void Simulator::round_according_to_fcsr(float toRound, float& rounded,
int32_t& rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
@@ -1587,7 +1521,6 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
}
}
-
void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
int64_t& rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
@@ -1629,24 +1562,18 @@ void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
}
}
-
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
registers_[pc] = value;
}
-
bool Simulator::has_bad_pc() const {
return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
}
-
// Raw access to the PC register without the special adjustment when reading.
-int32_t Simulator::get_pc() const {
- return registers_[pc];
-}
-
+int32_t Simulator::get_pc() const { return registers_[pc]; }
// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
// interrupt is caused. On others it does a funky rotation thing. For now we
@@ -1666,8 +1593,9 @@ void Simulator::TraceRegWr(int32_t value, TraceType t) {
switch (t) {
case WORD:
- SNPrintF(trace_buf_, "%08" PRIx32 " (%" PRIu64 ") int32:%" PRId32
- " uint32:%" PRIu32,
+ SNPrintF(trace_buf_,
+ "%08" PRIx32 " (%" PRIu64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
value, icount_, value, value);
break;
case FLOAT:
@@ -1690,8 +1618,9 @@ void Simulator::TraceRegWr(int64_t value, TraceType t) {
switch (t) {
case DWORD:
- SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRIu64 ") int64:%" PRId64
- " uint64:%" PRIu64,
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRIu64 ") int64:%" PRId64
+ " uint64:%" PRIu64,
value, icount_, value, value);
break;
case DOUBLE:
@@ -1805,8 +1734,9 @@ void Simulator::TraceMemRd(int32_t addr, int32_t value, TraceType t) {
switch (t) {
case WORD:
- SNPrintF(trace_buf_, "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64
- ") int32:%" PRId32 " uint32:%" PRIu32,
+ SNPrintF(trace_buf_,
+ "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
value, addr, icount_, value, value);
break;
case FLOAT:
@@ -1820,7 +1750,6 @@ void Simulator::TraceMemRd(int32_t addr, int32_t value, TraceType t) {
}
}
-
void Simulator::TraceMemWr(int32_t addr, int32_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
switch (t) {
@@ -1925,18 +1854,21 @@ void Simulator::TraceMemRd(int32_t addr, int64_t value, TraceType t) {
switch (t) {
case DWORD:
- SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%08" PRIx32 "] (%" PRIu64
- ") int64:%" PRId64 " uint64:%" PRIu64,
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%08" PRIx32 "] (%" PRIu64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
v.fmt_int64, addr, icount_, v.fmt_int64, v.fmt_int64);
break;
case DOUBLE:
- SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%08" PRIx32 "] (%" PRIu64
- ") dbl:%e",
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%08" PRIx32 "] (%" PRIu64
+ ") dbl:%e",
v.fmt_int64, addr, icount_, v.fmt_double);
break;
case FLOAT_DOUBLE:
- SNPrintF(trace_buf_, "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64
- ") flt:%e dbl:%e",
+ SNPrintF(trace_buf_,
+ "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64
+ ") flt:%e dbl:%e",
v.fmt_int32[1], addr, icount_, v.fmt_float[1], v.fmt_double);
break;
default:
@@ -1960,7 +1892,7 @@ void Simulator::TraceMemWr(int32_t addr, int64_t value, TraceType t) {
}
int Simulator::ReadW(int32_t addr, Instruction* instr, TraceType t) {
- if (addr >=0 && addr < 0x400) {
+ if (addr >= 0 && addr < 0x400) {
// This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
@@ -1982,8 +1914,7 @@ int Simulator::ReadW(int32_t addr, Instruction* instr, TraceType t) {
}
return *ptr;
}
- PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
MipsDebugger dbg(this);
dbg.Debug();
@@ -2007,8 +1938,7 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
MipsDebugger dbg(this);
dbg.Debug();
@@ -2051,14 +1981,12 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
- PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
return 0;
}
-
void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
local_monitor_.NotifyStore();
@@ -2068,13 +1996,11 @@ void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
}
-
uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
local_monitor_.NotifyLoad();
@@ -2083,13 +2009,11 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
return *ptr;
}
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
+ addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
return 0;
}
-
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
local_monitor_.NotifyLoad();
@@ -2098,13 +2022,11 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
return *ptr;
}
PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
+ addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
return 0;
}
-
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
local_monitor_.NotifyStore();
@@ -2116,12 +2038,10 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
return;
}
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
- reinterpret_cast<intptr_t>(instr));
+ addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
}
-
void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) {
local_monitor_.NotifyStore();
@@ -2132,13 +2052,11 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
- addr,
+ PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
}
-
uint32_t Simulator::ReadBU(int32_t addr) {
local_monitor_.NotifyLoad();
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
@@ -2146,7 +2064,6 @@ uint32_t Simulator::ReadBU(int32_t addr) {
return *ptr & 0xFF;
}
-
int32_t Simulator::ReadB(int32_t addr) {
local_monitor_.NotifyLoad();
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
@@ -2154,7 +2071,6 @@ int32_t Simulator::ReadB(int32_t addr) {
return *ptr;
}
-
void Simulator::WriteB(int32_t addr, uint8_t value) {
local_monitor_.NotifyStore();
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -2164,7 +2080,6 @@ void Simulator::WriteB(int32_t addr, uint8_t value) {
*ptr = value;
}
-
void Simulator::WriteB(int32_t addr, int8_t value) {
local_monitor_.NotifyStore();
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -2220,7 +2135,6 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
-
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR ": %s\n",
@@ -2228,34 +2142,33 @@ void Simulator::Format(Instruction* instr, const char* format) {
UNIMPLEMENTED_MIPS();
}
-
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
// 64-bit value. With the code below we assume that all runtime calls return
// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg1,
- int32_t arg2, int32_t arg3,
- int32_t arg4, int32_t arg5,
- int32_t arg6, int32_t arg7,
- int32_t arg8);
+using SimulatorRuntimeCall = int64_t (*)(int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5,
+ int32_t arg6, int32_t arg7,
+ int32_t arg8);
// These prototypes handle the four types of FP calls.
-typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPCall)(double darg0);
-typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
+using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1);
+using SimulatorRuntimeFPCall = double (*)(double darg0);
+using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef void (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int32_t arg0, void* arg1);
+using SimulatorRuntimeDirectApiCall = void (*)(int32_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(int32_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
-typedef void (*SimulatorRuntimeDirectGetterCall)(int32_t arg0, int32_t arg1);
-typedef void (*SimulatorRuntimeProfilingGetterCall)(
- int32_t arg0, int32_t arg1, void* arg2);
+using SimulatorRuntimeDirectGetterCall = void (*)(int32_t arg0, int32_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(int32_t arg0, int32_t arg1,
+ void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
@@ -2284,10 +2197,10 @@ void Simulator::SoftwareInterrupt() {
STATIC_ASSERT(kMaxCParameters == 9);
bool fp_call =
- (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
if (!IsMipsSoftFloatABI) {
// With the hard floating point calling convention, double
@@ -2295,41 +2208,41 @@ void Simulator::SoftwareInterrupt() {
// from there and call the builtin using soft floating point
// convention.
switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- if (IsFp64Mode()) {
- arg0 = get_fpu_register_word(f12);
- arg1 = get_fpu_register_hi_word(f12);
- arg2 = get_fpu_register_word(f14);
- arg3 = get_fpu_register_hi_word(f14);
- } else {
- arg0 = get_fpu_register_word(f12);
- arg1 = get_fpu_register_word(f13);
- arg2 = get_fpu_register_word(f14);
- arg3 = get_fpu_register_word(f15);
- }
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- if (IsFp64Mode()) {
- arg0 = get_fpu_register_word(f12);
- arg1 = get_fpu_register_hi_word(f12);
- } else {
- arg0 = get_fpu_register_word(f12);
- arg1 = get_fpu_register_word(f13);
- }
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- if (IsFp64Mode()) {
- arg0 = get_fpu_register_word(f12);
- arg1 = get_fpu_register_hi_word(f12);
- } else {
- arg0 = get_fpu_register_word(f12);
- arg1 = get_fpu_register_word(f13);
- }
- arg2 = get_register(a2);
- break;
- default:
- break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ if (IsFp64Mode()) {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_hi_word(f12);
+ arg2 = get_fpu_register_word(f14);
+ arg3 = get_fpu_register_hi_word(f14);
+ } else {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_word(f13);
+ arg2 = get_fpu_register_word(f14);
+ arg3 = get_fpu_register_word(f15);
+ }
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ if (IsFp64Mode()) {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_hi_word(f12);
+ } else {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_word(f13);
+ }
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ if (IsFp64Mode()) {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_hi_word(f12);
+ } else {
+ arg0 = get_fpu_register_word(f12);
+ arg1 = get_fpu_register_word(f13);
+ }
+ arg2 = get_register(a2);
+ break;
+ default:
+ break;
}
}
@@ -2338,7 +2251,7 @@ void Simulator::SoftwareInterrupt() {
int32_t saved_ra = get_register(ra);
intptr_t external =
- reinterpret_cast<intptr_t>(redirection->external_function());
+ reinterpret_cast<intptr_t>(redirection->external_function());
// Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
// FPU, or gcc soft-float routines. Hardware FPU is simulated in this
@@ -2376,85 +2289,83 @@ void Simulator::SoftwareInterrupt() {
}
}
switch (redirection->type()) {
- case ExternalReference::BUILTIN_COMPARE_CALL: {
- SimulatorRuntimeCompareCall target =
- reinterpret_cast<SimulatorRuntimeCompareCall>(external);
- iresult = target(dval0, dval1);
- set_register(v0, static_cast<int32_t>(iresult));
- set_register(v1, static_cast<int32_t>(iresult >> 32));
- break;
- }
- case ExternalReference::BUILTIN_FP_FP_CALL: {
- SimulatorRuntimeFPFPCall target =
- reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
- dresult = target(dval0, dval1);
- SetFpResult(dresult);
- break;
- }
- case ExternalReference::BUILTIN_FP_CALL: {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- dresult = target(dval0);
- SetFpResult(dresult);
- break;
- }
- case ExternalReference::BUILTIN_FP_INT_CALL: {
- SimulatorRuntimeFPIntCall target =
- reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
- dresult = target(dval0, ival);
- SetFpResult(dresult);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (::v8::internal::FLAG_trace_sim) {
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_COMPARE_CALL:
- PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(v0, static_cast<int32_t>(iresult));
+ set_register(v1, static_cast<int32_t>(iresult >> 32));
break;
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_FP_CALL:
- case ExternalReference::BUILTIN_FP_INT_CALL:
- PrintF("Returned %f\n", dresult);
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
default:
UNREACHABLE();
break;
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08x\n",
- reinterpret_cast<void*>(external), arg0);
+ reinterpret_cast<void*>(external), arg0);
}
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
target(arg0);
- } else if (
- redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg0, arg1);
+ reinterpret_cast<void*>(external), arg0, arg1);
}
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
target(arg0, Redirection::ReverseRedirection(arg1));
- } else if (
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08x %08x\n",
- reinterpret_cast<void*>(external), arg0, arg1);
+ reinterpret_cast<void*>(external), arg0, arg1);
}
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
target(arg0, arg1);
- } else if (
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ } else if (redirection->type() ==
+ ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08x %08x %08x\n",
- reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
}
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
@@ -2463,7 +2374,7 @@ void Simulator::SoftwareInterrupt() {
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
+ reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF(
"Call to host function at %p "
@@ -2496,13 +2407,11 @@ void Simulator::SoftwareInterrupt() {
}
}
-
// Stop helper functions.
bool Simulator::IsWatchpoint(uint32_t code) {
return (code <= kMaxWatchpointCode);
}
-
void Simulator::PrintWatchpoint(uint32_t code) {
MipsDebugger dbg(this);
++break_count_;
@@ -2513,7 +2422,6 @@ void Simulator::PrintWatchpoint(uint32_t code) {
dbg.PrintAllRegs(); // Print registers and continue running.
}
-
void Simulator::HandleStop(uint32_t code, Instruction* instr) {
// Stop if it is enabled, otherwise go on jumping over the stop
// and the message address.
@@ -2523,40 +2431,37 @@ void Simulator::HandleStop(uint32_t code, Instruction* instr) {
}
}
-
bool Simulator::IsStopInstruction(Instruction* instr) {
int32_t func = instr->FunctionFieldRaw();
uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6));
return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode;
}
-
bool Simulator::IsEnabledStop(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK_GT(code, kMaxWatchpointCode);
return !(watched_stops_[code].count & kStopDisabledBit);
}
-
void Simulator::EnableStop(uint32_t code) {
if (!IsEnabledStop(code)) {
watched_stops_[code].count &= ~kStopDisabledBit;
}
}
-
void Simulator::DisableStop(uint32_t code) {
if (IsEnabledStop(code)) {
watched_stops_[code].count |= kStopDisabledBit;
}
}
-
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
- PrintF("Stop counter for code %i has overflowed.\n"
- "Enabling this code and reseting the counter to 0.\n", code);
+ PrintF(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
watched_stops_[code].count = 0;
EnableStop(code);
} else {
@@ -2564,7 +2469,6 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
}
}
-
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
if (code <= kMaxWatchpointCode) {
@@ -2579,16 +2483,15 @@ void Simulator::PrintStopInfo(uint32_t code) {
// Don't print the state of unused breakpoints.
if (count != 0) {
if (watched_stops_[code].desc) {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n",
- code, code, state, count, watched_stops_[code].desc);
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state,
+ count, watched_stops_[code].desc);
} else {
- PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n",
- code, code, state, count);
+ PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
}
}
}
-
void Simulator::SignalException(Exception e) {
FATAL("Error: Exception %i raised.", static_cast<int>(e));
}
@@ -3121,7 +3024,6 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
}
-
void Simulator::DecodeTypeRegisterWRsType() {
float fs = get_fpu_register_float(fs_reg());
float ft = get_fpu_register_float(ft_reg());
@@ -3213,7 +3115,6 @@ void Simulator::DecodeTypeRegisterWRsType() {
}
}
-
void Simulator::DecodeTypeRegisterSRsType() {
float fs, ft, fd;
fs = get_fpu_register_float(fs_reg());
@@ -3428,15 +3329,15 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
case SELEQZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- SetFPUFloatResult(
- fd_reg(),
- (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg()) : 0.0);
+ SetFPUFloatResult(fd_reg(), (ft_int & 0x1) == 0
+ ? get_fpu_register_float(fs_reg())
+ : 0.0);
break;
case SELNEZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- SetFPUFloatResult(
- fd_reg(),
- (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg()) : 0.0);
+ SetFPUFloatResult(fd_reg(), (ft_int & 0x1) != 0
+ ? get_fpu_register_float(fs_reg())
+ : 0.0);
break;
case MOVZ_C: {
DCHECK(IsMipsArchVariant(kMips32r2));
@@ -3615,7 +3516,6 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
}
-
void Simulator::DecodeTypeRegisterLRsType() {
double fs = get_fpu_register_double(fs_reg());
double ft = get_fpu_register_double(ft_reg());
@@ -3719,7 +3619,6 @@ void Simulator::DecodeTypeRegisterLRsType() {
}
}
-
void Simulator::DecodeTypeRegisterCOP1() {
switch (instr_.RsFieldRaw()) {
case CFC1:
@@ -3790,7 +3689,6 @@ void Simulator::DecodeTypeRegisterCOP1() {
}
}
-
void Simulator::DecodeTypeRegisterCOP1X() {
switch (instr_.FunctionFieldRaw()) {
case MADD_S: {
@@ -3834,7 +3732,6 @@ void Simulator::DecodeTypeRegisterCOP1X() {
}
}
-
void Simulator::DecodeTypeRegisterSPECIAL() {
int64_t alu_out = 0x12345678;
int64_t i64hilo = 0;
@@ -4144,7 +4041,6 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
}
}
-
void Simulator::DecodeTypeRegisterSPECIAL2() {
int32_t alu_out;
switch (instr_.FunctionFieldRaw()) {
@@ -4167,7 +4063,6 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
SetResult(rd_reg(), alu_out);
}
-
void Simulator::DecodeTypeRegisterSPECIAL3() {
int32_t alu_out;
switch (instr_.FunctionFieldRaw()) {
@@ -4752,7 +4647,7 @@ void Simulator::DecodeTypeMsaELM() {
template <typename T>
T Simulator::MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m) {
- typedef typename std::make_unsigned<T>::type uT;
+ using uT = typename std::make_unsigned<T>::type;
T res;
switch (opcode) {
case SLLI:
@@ -4945,7 +4840,7 @@ void Simulator::DecodeTypeMsaMI10() {
template <typename T>
T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) {
- typedef typename std::make_unsigned<T>::type uT;
+ using uT = typename std::make_unsigned<T>::type;
T res;
T wt_modulo = wt % (sizeof(T) * 8);
switch (opcode) {
@@ -5226,8 +5121,8 @@ template <typename T_int, typename T_smaller_int, typename T_reg>
void Msa3RInstrHelper_horizontal(const uint32_t opcode, T_reg ws, T_reg wt,
T_reg wd, const int i,
const int num_of_lanes) {
- typedef typename std::make_unsigned<T_int>::type T_uint;
- typedef typename std::make_unsigned<T_smaller_int>::type T_smaller_uint;
+ using T_uint = typename std::make_unsigned<T_int>::type;
+ using T_smaller_uint = typename std::make_unsigned<T_smaller_int>::type;
T_int* wd_p;
T_smaller_int *ws_p, *wt_p;
ws_p = reinterpret_cast<T_smaller_int*>(ws);
@@ -5520,8 +5415,8 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
template <typename T_int, typename T_int_dbl, typename T_reg>
void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
- // typedef typename std::make_unsigned<T_int>::type T_uint;
- typedef typename std::make_unsigned<T_int_dbl>::type T_uint_dbl;
+ // using T_uint = typename std::make_unsigned<T_int>::type;
+ using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
const int shift = kBitsPerByte * sizeof(T_int) - 1;
@@ -5966,7 +5861,7 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
Simulator* sim) {
- typedef typename std::make_unsigned<T_int>::type T_uint;
+ using T_uint = typename std::make_unsigned<T_int>::type;
switch (opcode) {
case FCLASS: {
#define SNAN_BIT BIT(0)
@@ -6151,7 +6046,7 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
dst = bit_cast<T_int>(static_cast<T_fp>(src));
break;
case FFINT_U:
- typedef typename std::make_unsigned<T_src>::type uT_src;
+ using uT_src = typename std::make_unsigned<T_src>::type;
dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
@@ -6325,7 +6220,6 @@ void Simulator::DecodeTypeRegister() {
}
}
-
// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
void Simulator::DecodeTypeImmediate() {
// Instruction fields.
@@ -6752,7 +6646,7 @@ void Simulator::DecodeTypeImmediate() {
uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
alu_out = ReadW(addr, instr_.instr());
- alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+ alu_out = static_cast<uint32_t>(alu_out) >> al_offset * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
break;
@@ -6939,7 +6833,6 @@ void Simulator::DecodeTypeImmediate() {
}
}
-
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump() {
SimInstruction simInstr = instr_;
@@ -6967,7 +6860,6 @@ void Simulator::DecodeTypeJump() {
pc_modified_ = true;
}
-
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -6998,8 +6890,8 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (::v8::internal::FLAG_trace_sim) {
PrintF(" 0x%08" PRIxPTR " %-44s %s\n",
- reinterpret_cast<intptr_t>(instr), buffer.start(),
- trace_buf_.start());
+ reinterpret_cast<intptr_t>(instr), buffer.begin(),
+ trace_buf_.begin());
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) + kInstrSize);
@@ -7119,8 +7011,8 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
// Remaining arguments passed on stack.
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kCArgsSlotsSize);
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t) -
+ kCArgsSlotsSize);
if (base::OS::ActivationFrameAlignment() != 0) {
entry_stack &= -base::OS::ActivationFrameAlignment();
}
@@ -7159,7 +7051,6 @@ double Simulator::CallFP(Address entry, double d0, double d1) {
}
}
-
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
@@ -7168,7 +7059,6 @@ uintptr_t Simulator::PushAddress(uintptr_t address) {
return new_sp;
}
-
uintptr_t Simulator::PopAddress() {
int current_sp = get_register(sp);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
@@ -7350,6 +7240,7 @@ void Simulator::GlobalMonitor::RemoveLinkedAddress(
}
#undef UNSUPPORTED
+#undef SScanF
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/execution/mips/simulator-mips.h
index 88e45605f2..b5712d1a82 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/execution/mips/simulator-mips.h
@@ -9,20 +9,20 @@
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
-#ifndef V8_MIPS_SIMULATOR_MIPS_H_
-#define V8_MIPS_SIMULATOR_MIPS_H_
+#ifndef V8_EXECUTION_MIPS_SIMULATOR_MIPS_H_
+#define V8_EXECUTION_MIPS_SIMULATOR_MIPS_H_
// globals.h defines USE_SIMULATOR.
-#include "src/globals.h"
+#include "src/common/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
-#include "src/allocation.h"
-#include "src/assembler.h"
#include "src/base/hashmap.h"
-#include "src/mips/constants-mips.h"
-#include "src/simulator-base.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/mips/constants-mips.h"
+#include "src/execution/simulator-base.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -42,20 +42,16 @@ class CachePage {
static const int kLineLength = 1 << kLineShift;
static const int kLineMask = kLineLength - 1;
- CachePage() {
- memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
- }
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
char* ValidityByte(int offset) {
return &validity_map_[offset >> kLineShift];
}
- char* CachedData(int offset) {
- return &data_[offset];
- }
+ char* CachedData(int offset) { return &data_[offset]; }
private:
- char data_[kPageSize]; // The cached data.
+ char data_[kPageSize]; // The cached data.
static const int kValidityMapSize = kPageSize >> kLineShift;
char validity_map_[kValidityMapSize]; // One byte per line.
};
@@ -102,12 +98,32 @@ class Simulator : public SimulatorBase {
no_reg = -1,
zero_reg = 0,
at,
- v0, v1,
- a0, a1, a2, a3,
- t0, t1, t2, t3, t4, t5, t6, t7,
- s0, s1, s2, s3, s4, s5, s6, s7,
- t8, t9,
- k0, k1,
+ v0,
+ v1,
+ a0,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ t4,
+ t5,
+ t6,
+ t7,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ t8,
+ t9,
+ k0,
+ k1,
gp,
sp,
s8,
@@ -115,7 +131,7 @@ class Simulator : public SimulatorBase {
// LO, HI, and pc.
LO,
HI,
- pc, // pc must be the last register.
+ pc, // pc must be the last register.
kNumSimuRegisters,
// aliases
fp = s8
@@ -124,10 +140,38 @@ class Simulator : public SimulatorBase {
// Coprocessor registers.
// Generated code will always use doubles. So we will only use even registers.
enum FPURegister {
- f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
- f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
- f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
- f26, f27, f28, f29, f30, f31,
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15, // f12 and f14 are arguments FPURegisters.
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
kNumFPURegisters
};
@@ -291,7 +335,7 @@ class Simulator : public SimulatorBase {
// MSA Data Format
enum MSADataFormat { MSA_VECT = 0, MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD };
- typedef union {
+ union msa_reg_t {
int8_t b[kMSALanesByte];
uint8_t ub[kMSALanesByte];
int16_t h[kMSALanesHalf];
@@ -300,7 +344,7 @@ class Simulator : public SimulatorBase {
uint32_t uw[kMSALanesWord];
int64_t d[kMSALanesDword];
uint64_t ud[kMSALanesDword];
- } msa_reg_t;
+ };
// Read and write memory.
inline uint32_t ReadBU(int32_t addr);
@@ -471,7 +515,6 @@ class Simulator : public SimulatorBase {
void IncreaseStopCounter(uint32_t code);
void PrintStopInfo(uint32_t code);
-
// Executes one instruction.
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
@@ -531,7 +574,7 @@ class Simulator : public SimulatorBase {
// Simulator support.
// Allocate 1MB for stack.
- static const size_t stack_size_ = 1 * 1024*1024;
+ static const size_t stack_size_ = 1 * 1024 * 1024;
char* stack_;
bool pc_modified_;
uint64_t icount_;
@@ -655,4 +698,4 @@ class Simulator : public SimulatorBase {
} // namespace v8
#endif // defined(USE_SIMULATOR)
-#endif // V8_MIPS_SIMULATOR_MIPS_H_
+#endif // V8_EXECUTION_MIPS_SIMULATOR_MIPS_H_
diff --git a/deps/v8/src/mips64/frame-constants-mips64.cc b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
index 5a19a0c364..68398605ba 100644
--- a/deps/v8/src/mips64/frame-constants-mips64.cc
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.cc
@@ -4,12 +4,12 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/assembler.h"
-#include "src/frame-constants.h"
-#include "src/mips64/assembler-mips64-inl.h"
-#include "src/mips64/assembler-mips64.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/mips64/assembler-mips64-inl.h"
+#include "src/codegen/mips64/assembler-mips64.h"
+#include "src/execution/frame-constants.h"
-#include "src/mips64/frame-constants-mips64.h"
+#include "src/execution/mips64/frame-constants-mips64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips64/frame-constants-mips64.h b/deps/v8/src/execution/mips64/frame-constants-mips64.h
index 99afed0ffa..c7791f6f7c 100644
--- a/deps/v8/src/mips64/frame-constants-mips64.h
+++ b/deps/v8/src/execution/mips64/frame-constants-mips64.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
-#define V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
+#ifndef V8_EXECUTION_MIPS64_FRAME_CONSTANTS_MIPS64_H_
+#define V8_EXECUTION_MIPS64_FRAME_CONSTANTS_MIPS64_H_
#include "src/base/macros.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -68,4 +68,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_MIPS64_FRAME_CONSTANTS_MIPS64_H_
+#endif // V8_EXECUTION_MIPS64_FRAME_CONSTANTS_MIPS64_H_
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index 33a573b6cc..7c45e7f82d 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/mips64/simulator-mips64.h"
+#include "src/execution/mips64/simulator-mips64.h"
// Only build the simulator if not compiling for real MIPS hardware.
#if defined(USE_SIMULATOR)
@@ -12,14 +12,15 @@
#include <stdlib.h>
#include <cmath>
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/mips64/constants-mips64.h"
-#include "src/ostreams.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips64/constants-mips64.h"
+#include "src/diagnostics/disasm.h"
+#include "src/heap/combined-heap.h"
#include "src/runtime/runtime-utils.h"
-#include "src/vector.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -38,7 +39,6 @@ uint32_t get_fcsr_condition_bit(uint32_t cc) {
}
}
-
static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
uint64_t u0, v0, w0;
int64_t u1, v1, w1, w2, t;
@@ -57,7 +57,6 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
return u1 * v1 + w2 + (w1 >> 32);
}
-
// This macro provides a platform independent use of sscanf. The reason for
// SScanF not being implemented in a platform independent was through
// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
@@ -68,7 +67,7 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
// code.
class MipsDebugger {
public:
- explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) {}
void Stop(Instruction* instr);
void Debug();
@@ -79,7 +78,7 @@ class MipsDebugger {
private:
// We set the breakpoint code to 0xFFFFF to easily recognize it.
static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xFFFFF << 6;
- static const Instr kNopInstr = 0x0;
+ static const Instr kNopInstr = 0x0;
Simulator* sim_;
@@ -116,7 +115,6 @@ int64_t MipsDebugger::GetRegisterValue(int regnum) {
}
}
-
int64_t MipsDebugger::GetFPURegisterValue(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
@@ -125,7 +123,6 @@ int64_t MipsDebugger::GetFPURegisterValue(int regnum) {
}
}
-
float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
@@ -134,7 +131,6 @@ float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
}
}
-
double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
if (regnum == kNumFPURegisters) {
return sim_->get_pc();
@@ -143,7 +139,6 @@ double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
}
}
-
bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
int regnum = Registers::Number(desc);
int fpuregnum = FPURegisters::Number(desc);
@@ -155,15 +150,14 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
*value = GetFPURegisterValue(fpuregnum);
return true;
} else if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%" SCNx64,
- reinterpret_cast<uint64_t*>(value)) == 1;
+ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast<uint64_t*>(value)) ==
+ 1;
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
return false;
}
-
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != nullptr) {
@@ -178,7 +172,6 @@ bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
return true;
}
-
bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
@@ -189,21 +182,18 @@ bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
return true;
}
-
void MipsDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-
void MipsDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
-
void MipsDebugger::PrintAllRegs() {
#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
@@ -250,11 +240,9 @@ void MipsDebugger::PrintAllRegs() {
#undef REG_INFO
}
-
void MipsDebugger::PrintAllRegsIncludingFPU() {
-#define FPU_REG_INFO(n) FPURegisters::Name(n), \
- GetFPURegisterValue(n), \
- GetFPURegisterValueDouble(n)
+#define FPU_REG_INFO(n) \
+ FPURegisters::Name(n), GetFPURegisterValue(n), GetFPURegisterValueDouble(n)
PrintAllRegs();
@@ -297,7 +285,6 @@ void MipsDebugger::PrintAllRegsIncludingFPU() {
#undef FPU_REG_INFO
}
-
void MipsDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@@ -311,7 +298,7 @@ void MipsDebugger::Debug() {
char cmd[COMMAND_SIZE + 1];
char arg1[ARG_SIZE + 1];
char arg2[ARG_SIZE + 1];
- char* argv[3] = { cmd, arg1, arg2 };
+ char* argv[3] = {cmd, arg1, arg2};
// Make sure to have a proper terminating character if reaching the limit.
cmd[COMMAND_SIZE] = 0;
@@ -328,9 +315,8 @@ void MipsDebugger::Debug() {
disasm::Disassembler dasm(converter);
// Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.start());
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin());
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
@@ -414,8 +400,8 @@ void MipsDebugger::Debug() {
PrintF("print <register> or print <fpu register> single\n");
}
}
- } else if ((strcmp(cmd, "po") == 0)
- || (strcmp(cmd, "printobject") == 0)) {
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
int64_t value;
StdoutStream os;
@@ -423,7 +409,7 @@ void MipsDebugger::Debug() {
Object obj(value);
os << arg1 << ": \n";
#ifdef DEBUG
- obj->Print(os);
+ obj.Print(os);
os << "\n";
#else
os << Brief(obj) << "\n";
@@ -466,12 +452,13 @@ void MipsDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
PrintF(" (");
if (obj.IsSmi()) {
PrintF("smi %d", Smi::ToInt(obj));
} else {
- obj->ShortPrint();
+ obj.ShortPrint();
}
PrintF(")");
}
@@ -479,8 +466,7 @@ void MipsDebugger::Debug() {
cur++;
}
- } else if ((strcmp(cmd, "disasm") == 0) ||
- (strcmp(cmd, "dpc") == 0) ||
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
(strcmp(cmd, "di") == 0)) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@@ -524,7 +510,7 @@ void MipsDebugger::Debug() {
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
- buffer.start());
+ buffer.begin());
cur += kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
@@ -569,8 +555,7 @@ void MipsDebugger::Debug() {
if (strcmp(arg1, "info") == 0) {
if (strcmp(arg2, "all") == 0) {
PrintF("Stop information:\n");
- for (uint32_t i = kMaxWatchpointCode + 1;
- i <= kMaxStopCode;
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
i++) {
sim_->PrintStopInfo(i);
}
@@ -582,8 +567,7 @@ void MipsDebugger::Debug() {
} else if (strcmp(arg1, "enable") == 0) {
// Enable all/the specified breakpoint(s).
if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = kMaxWatchpointCode + 1;
- i <= kMaxStopCode;
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
i++) {
sim_->EnableStop(i);
}
@@ -595,8 +579,7 @@ void MipsDebugger::Debug() {
} else if (strcmp(arg1, "disable") == 0) {
// Disable all/the specified breakpoint(s).
if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = kMaxWatchpointCode + 1;
- i <= kMaxStopCode;
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
i++) {
sim_->DisableStop(i);
}
@@ -644,7 +627,7 @@ void MipsDebugger::Debug() {
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
- buffer.start());
+ buffer.begin());
cur += kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
@@ -717,19 +700,16 @@ bool Simulator::ICacheMatch(void* one, void* two) {
return one == two;
}
-
static uint32_t ICacheHash(void* key) {
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
}
-
static bool AllOnOnePage(uintptr_t start, size_t size) {
intptr_t start_page = (start & ~CachePage::kPageMask);
intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
return start_page == end_page;
}
-
void Simulator::set_last_debugger_input(char* input) {
DeleteArray(last_debugger_input_);
last_debugger_input_ = input;
@@ -770,7 +750,6 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
return reinterpret_cast<CachePage*>(entry->value);
}
-
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, size_t size) {
@@ -806,7 +785,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
}
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
@@ -855,7 +833,7 @@ Simulator::~Simulator() {
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
- isolate->FindOrAllocatePerThreadDataForThisThread();
+ isolate->FindOrAllocatePerThreadDataForThisThread();
DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
@@ -867,7 +845,6 @@ Simulator* Simulator::current(Isolate* isolate) {
return sim;
}
-
// Sets the register in the architecture state. It will also deal with updating
// Simulator internal state for special registers such as PC.
void Simulator::set_register(int reg, int64_t value) {
@@ -880,7 +857,6 @@ void Simulator::set_register(int reg, int64_t value) {
registers_[reg] = (reg == 0) ? 0 : value;
}
-
void Simulator::set_dw_register(int reg, const int* dbl) {
DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
registers_[reg] = dbl[1];
@@ -888,13 +864,11 @@ void Simulator::set_dw_register(int reg, const int* dbl) {
registers_[reg] += dbl[0];
}
-
void Simulator::set_fpu_register(int fpureg, int64_t value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg * 2] = value;
}
-
void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
// Set ONLY lower 32-bits, leaving upper bits untouched.
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
@@ -907,7 +881,6 @@ void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
*pword = value;
}
-
void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
// Set ONLY upper 32-bits, leaving lower bits untouched.
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
@@ -920,19 +893,16 @@ void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
*phiword = value;
}
-
void Simulator::set_fpu_register_float(int fpureg, float value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
*bit_cast<float*>(&FPUregisters_[fpureg * 2]) = value;
}
-
void Simulator::set_fpu_register_double(int fpureg, double value) {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
*bit_cast<double*>(&FPUregisters_[fpureg * 2]) = value;
}
-
// Get the register from the architecture state. This function does handle
// the special case of accessing the PC register.
int64_t Simulator::get_register(int reg) const {
@@ -943,7 +913,6 @@ int64_t Simulator::get_register(int reg) const {
return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
}
-
double Simulator::get_double_from_register_pair(int reg) {
// TODO(plind): bad ABI stuff, refactor or remove.
DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
@@ -954,40 +923,34 @@ double Simulator::get_double_from_register_pair(int reg) {
char buffer[sizeof(registers_[0])];
memcpy(buffer, &registers_[reg], sizeof(registers_[0]));
memcpy(&dm_val, buffer, sizeof(registers_[0]));
- return(dm_val);
+ return (dm_val);
}
-
int64_t Simulator::get_fpu_register(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg * 2];
}
-
int32_t Simulator::get_fpu_register_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
-
int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return static_cast<int32_t>(FPUregisters_[fpureg * 2] & 0xFFFFFFFF);
}
-
int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return static_cast<int32_t>((FPUregisters_[fpureg * 2] >> 32) & 0xFFFFFFFF);
}
-
float Simulator::get_fpu_register_float(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg * 2]));
}
-
double Simulator::get_fpu_register_double(int fpureg) const {
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
return *bit_cast<double*>(&FPUregisters_[fpureg * 2]);
@@ -1015,7 +978,7 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
*y = get_fpu_register_double(fparg2);
*z = static_cast<int32_t>(get_register(a2));
} else {
- // TODO(plind): bad ABI stuff, refactor or remove.
+ // TODO(plind): bad ABI stuff, refactor or remove.
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
@@ -1035,7 +998,6 @@ void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
}
}
-
// The return value is either in v0/v1 or f0.
void Simulator::SetFpResult(const double& result) {
if (!IsMipsSoftFloatABI) {
@@ -1050,7 +1012,6 @@ void Simulator::SetFpResult(const double& result) {
}
}
-
// Helper functions for setting and testing the FCSR register's bits.
void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
if (value) {
@@ -1060,11 +1021,7 @@ void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
}
}
-
-bool Simulator::test_fcsr_bit(uint32_t cc) {
- return FCSR_ & (1 << cc);
-}
-
+bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); }
void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
FCSR_ |= mode & kFPURoundingModeMask;
@@ -1113,7 +1070,6 @@ bool Simulator::set_fcsr_round_error(double original, double rounded) {
return ret;
}
-
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(double original, double rounded) {
@@ -1147,7 +1103,6 @@ bool Simulator::set_fcsr_round64_error(double original, double rounded) {
return ret;
}
-
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round_error(float original, float rounded) {
@@ -1198,7 +1153,6 @@ void Simulator::set_fpu_register_word_invalid_result(float original,
}
}
-
void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
double max_int32 = std::numeric_limits<int32_t>::max();
@@ -1217,7 +1171,6 @@ void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
}
}
-
void Simulator::set_fpu_register_invalid_result64(float original,
float rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
@@ -1239,7 +1192,6 @@ void Simulator::set_fpu_register_invalid_result64(float original,
}
}
-
void Simulator::set_fpu_register_word_invalid_result(double original,
double rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
@@ -1259,7 +1211,6 @@ void Simulator::set_fpu_register_word_invalid_result(double original,
}
}
-
void Simulator::set_fpu_register_invalid_result(double original,
double rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
@@ -1279,7 +1230,6 @@ void Simulator::set_fpu_register_invalid_result(double original,
}
}
-
void Simulator::set_fpu_register_invalid_result64(double original,
double rounded) {
if (FCSR_ & kFCSRNaN2008FlagMask) {
@@ -1301,7 +1251,6 @@ void Simulator::set_fpu_register_invalid_result64(double original,
}
}
-
// Sets the rounding error codes in FCSR based on the result of the rounding.
// Returns true if the operation was invalid.
bool Simulator::set_fcsr_round64_error(float original, float rounded) {
@@ -1335,7 +1284,6 @@ bool Simulator::set_fcsr_round64_error(float original, float rounded) {
return ret;
}
-
// For cvt instructions only
void Simulator::round_according_to_fcsr(double toRound, double& rounded,
int32_t& rounded_int, double fs) {
@@ -1378,7 +1326,6 @@ void Simulator::round_according_to_fcsr(double toRound, double& rounded,
}
}
-
void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
int64_t& rounded_int, double fs) {
// 0 RN (round to nearest): Round a result to the nearest
@@ -1420,7 +1367,6 @@ void Simulator::round64_according_to_fcsr(double toRound, double& rounded,
}
}
-
// for cvt instructions only
void Simulator::round_according_to_fcsr(float toRound, float& rounded,
int32_t& rounded_int, float fs) {
@@ -1463,7 +1409,6 @@ void Simulator::round_according_to_fcsr(float toRound, float& rounded,
}
}
-
void Simulator::round64_according_to_fcsr(float toRound, float& rounded,
int64_t& rounded_int, float fs) {
// 0 RN (round to nearest): Round a result to the nearest
@@ -1553,17 +1498,12 @@ void Simulator::set_pc(int64_t value) {
registers_[pc] = value;
}
-
bool Simulator::has_bad_pc() const {
return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
}
-
// Raw access to the PC register without the special adjustment when reading.
-int64_t Simulator::get_pc() const {
- return registers_[pc];
-}
-
+int64_t Simulator::get_pc() const { return registers_[pc]; }
// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an
// interrupt is caused. On others it does a funky rotation thing. For now we
@@ -1595,13 +1535,15 @@ void Simulator::TraceRegWr(int64_t value, TraceType t) {
switch (t) {
case WORD:
- SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
- " uint32:%" PRIu32,
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
break;
case DWORD:
- SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64
- " uint64:%" PRIu64,
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64,
value, icount_, value, value);
break;
case FLOAT:
@@ -1732,28 +1674,33 @@ void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) {
switch (t) {
case WORD:
- SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
- ") int32:%" PRId32 " uint32:%" PRIu32,
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
v.fmt_int64, addr, icount_, v.fmt_int32[0], v.fmt_int32[0]);
break;
case DWORD:
- SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
- ") int64:%" PRId64 " uint64:%" PRIu64,
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
value, addr, icount_, value, value);
break;
case FLOAT:
- SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
- ") flt:%e",
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e",
v.fmt_int64, addr, icount_, v.fmt_float[0]);
break;
case DOUBLE:
- SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
- ") dbl:%e",
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") dbl:%e",
v.fmt_int64, addr, icount_, v.fmt_double);
break;
case FLOAT_DOUBLE:
- SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
- ") flt:%e dbl:%e",
+ SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e dbl:%e",
v.fmt_int64, addr, icount_, v.fmt_float[0], v.fmt_double);
break;
default:
@@ -1762,18 +1709,19 @@ void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) {
}
}
-
void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
if (::v8::internal::FLAG_trace_sim) {
switch (t) {
case BYTE:
- SNPrintF(trace_buf_, " %02" PRIx8 " --> [%016" PRIx64
- "] (%" PRId64 ")",
+ SNPrintF(trace_buf_,
+ " %02" PRIx8 " --> [%016" PRIx64 "] (%" PRId64
+ ")",
static_cast<uint8_t>(value), addr, icount_);
break;
case HALF:
- SNPrintF(trace_buf_, " %04" PRIx16 " --> [%016" PRIx64
- "] (%" PRId64 ")",
+ SNPrintF(trace_buf_,
+ " %04" PRIx16 " --> [%016" PRIx64 "] (%" PRId64
+ ")",
static_cast<uint16_t>(value), addr, icount_);
break;
case WORD:
@@ -1863,7 +1811,7 @@ void Simulator::TraceMemWr(int64_t addr, T value) {
// TODO(plind): sign-extend and zero-extend not implmented properly
// on all the ReadXX functions, I don't think re-interpret cast does it.
int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
- if (addr >=0 && addr < 0x400) {
+ if (addr >= 0 && addr < 0x400) {
// This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
" \n",
@@ -1882,9 +1830,8 @@ int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
return 0;
}
-
uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
- if (addr >=0 && addr < 0x400) {
+ if (addr >= 0 && addr < 0x400) {
// This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
" \n",
@@ -1903,7 +1850,6 @@ uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
return 0;
}
-
void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
// This has to be a nullptr-dereference, drop into debugger.
@@ -1957,7 +1903,7 @@ void Simulator::WriteConditionalW(int64_t addr, int32_t value,
}
int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
- if (addr >=0 && addr < 0x400) {
+ if (addr >= 0 && addr < 0x400) {
// This has to be a nullptr-dereference, drop into debugger.
PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
" \n",
@@ -1976,7 +1922,6 @@ int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
return 0;
}
-
void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
if (addr >= 0 && addr < 0x400) {
// This has to be a nullptr-dereference, drop into debugger.
@@ -2042,7 +1987,6 @@ double Simulator::ReadD(int64_t addr, Instruction* instr) {
return 0;
}
-
void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
if ((addr & kDoubleAlignmentMask) == 0 || kArchVariant == kMips64r6) {
local_monitor_.NotifyStore();
@@ -2058,7 +2002,6 @@ void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
DieOrDebug();
}
-
uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
local_monitor_.NotifyLoad();
@@ -2073,7 +2016,6 @@ uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
return 0;
}
-
int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
local_monitor_.NotifyLoad();
@@ -2088,7 +2030,6 @@ int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
return 0;
}
-
void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
local_monitor_.NotifyStore();
@@ -2105,7 +2046,6 @@ void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
DieOrDebug();
}
-
void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
if ((addr & 1) == 0 || kArchVariant == kMips64r6) {
local_monitor_.NotifyStore();
@@ -2122,7 +2062,6 @@ void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
DieOrDebug();
}
-
uint32_t Simulator::ReadBU(int64_t addr) {
local_monitor_.NotifyLoad();
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
@@ -2130,7 +2069,6 @@ uint32_t Simulator::ReadBU(int64_t addr) {
return *ptr & 0xFF;
}
-
int32_t Simulator::ReadB(int64_t addr) {
local_monitor_.NotifyLoad();
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
@@ -2138,7 +2076,6 @@ int32_t Simulator::ReadB(int64_t addr) {
return *ptr;
}
-
void Simulator::WriteB(int64_t addr, uint8_t value) {
local_monitor_.NotifyStore();
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -2148,7 +2085,6 @@ void Simulator::WriteB(int64_t addr, uint8_t value) {
*ptr = value;
}
-
void Simulator::WriteB(int64_t addr, int8_t value) {
local_monitor_.NotifyStore();
base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
@@ -2205,7 +2141,6 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return reinterpret_cast<uintptr_t>(stack_) + 1024;
}
-
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n",
@@ -2213,7 +2148,6 @@ void Simulator::Format(Instruction* instr, const char* format) {
UNIMPLEMENTED_MIPS();
}
-
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
@@ -2221,27 +2155,27 @@ void Simulator::Format(Instruction* instr, const char* format) {
// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
-typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
- int64_t arg2, int64_t arg3,
- int64_t arg4, int64_t arg5,
- int64_t arg6, int64_t arg7,
- int64_t arg8);
+using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8);
// These prototypes handle the four types of FP calls.
-typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPCall)(double darg0);
-typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
+using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1);
+using SimulatorRuntimeFPCall = double (*)(double darg0);
+using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
+using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
-typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
-typedef void (*SimulatorRuntimeProfilingGetterCall)(
- int64_t arg0, int64_t arg1, void* arg2);
+using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
+ void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. They are also used for debugging with simulator.
@@ -2269,10 +2203,10 @@ void Simulator::SoftwareInterrupt() {
STATIC_ASSERT(kMaxCParameters == 9);
bool fp_call =
- (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
- (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
if (!IsMipsSoftFloatABI) {
// With the hard floating point calling convention, double
@@ -2280,24 +2214,24 @@ void Simulator::SoftwareInterrupt() {
// from there and call the builtin using soft floating point
// convention.
switch (redirection->type()) {
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
- arg2 = get_fpu_register(f14);
- arg3 = get_fpu_register(f15);
- break;
- case ExternalReference::BUILTIN_FP_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
- break;
- case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = get_fpu_register(f12);
- arg1 = get_fpu_register(f13);
- arg2 = get_register(a2);
- break;
- default:
- break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ arg2 = get_fpu_register(f14);
+ arg3 = get_fpu_register(f15);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ arg0 = get_fpu_register(f12);
+ arg1 = get_fpu_register(f13);
+ arg2 = get_register(a2);
+ break;
+ default:
+ break;
}
}
@@ -2306,7 +2240,7 @@ void Simulator::SoftwareInterrupt() {
int64_t saved_ra = get_register(ra);
intptr_t external =
- reinterpret_cast<intptr_t>(redirection->external_function());
+ reinterpret_cast<intptr_t>(redirection->external_function());
// Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
// FPU, or gcc soft-float routines. Hardware FPU is simulated in this
@@ -2344,52 +2278,52 @@ void Simulator::SoftwareInterrupt() {
}
}
switch (redirection->type()) {
- case ExternalReference::BUILTIN_COMPARE_CALL: {
- SimulatorRuntimeCompareCall target =
- reinterpret_cast<SimulatorRuntimeCompareCall>(external);
- iresult = target(dval0, dval1);
- set_register(v0, static_cast<int64_t>(iresult));
- // set_register(v1, static_cast<int64_t>(iresult >> 32));
- break;
- }
- case ExternalReference::BUILTIN_FP_FP_CALL: {
- SimulatorRuntimeFPFPCall target =
- reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
- dresult = target(dval0, dval1);
- SetFpResult(dresult);
- break;
- }
- case ExternalReference::BUILTIN_FP_CALL: {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- dresult = target(dval0);
- SetFpResult(dresult);
- break;
- }
- case ExternalReference::BUILTIN_FP_INT_CALL: {
- SimulatorRuntimeFPIntCall target =
- reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
- dresult = target(dval0, ival);
- SetFpResult(dresult);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- if (::v8::internal::FLAG_trace_sim) {
- switch (redirection->type()) {
- case ExternalReference::BUILTIN_COMPARE_CALL:
- PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(v0, static_cast<int64_t>(iresult));
+ // set_register(v1, static_cast<int64_t>(iresult >> 32));
break;
- case ExternalReference::BUILTIN_FP_FP_CALL:
- case ExternalReference::BUILTIN_FP_CALL:
- case ExternalReference::BUILTIN_FP_INT_CALL:
- PrintF("Returned %f\n", dresult);
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
default:
UNREACHABLE();
break;
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2400,8 +2334,7 @@ void Simulator::SoftwareInterrupt() {
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
target(arg0);
- } else if (
- redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
" \n",
@@ -2410,8 +2343,7 @@ void Simulator::SoftwareInterrupt() {
SimulatorRuntimeProfilingApiCall target =
reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
target(arg0, Redirection::ReverseRedirection(arg1));
- } else if (
- redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
" \n",
@@ -2420,8 +2352,8 @@ void Simulator::SoftwareInterrupt() {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
target(arg0, arg1);
- } else if (
- redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+ } else if (redirection->type() ==
+ ExternalReference::PROFILING_GETTER_CALL) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
" %08" PRIx64 " \n",
@@ -2434,7 +2366,7 @@ void Simulator::SoftwareInterrupt() {
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
+ reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
PrintF(
"Call to host function at %p "
@@ -2449,9 +2381,9 @@ void Simulator::SoftwareInterrupt() {
set_register(v0, (int64_t)(result.x));
set_register(v1, (int64_t)(result.y));
}
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(v1),
- get_register(v0));
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(v1),
+ get_register(v0));
}
set_register(ra, saved_ra);
set_pc(get_register(ra));
@@ -2470,13 +2402,11 @@ void Simulator::SoftwareInterrupt() {
}
}
-
// Stop helper functions.
bool Simulator::IsWatchpoint(uint64_t code) {
return (code <= kMaxWatchpointCode);
}
-
void Simulator::PrintWatchpoint(uint64_t code) {
MipsDebugger dbg(this);
++break_count_;
@@ -2487,7 +2417,6 @@ void Simulator::PrintWatchpoint(uint64_t code) {
dbg.PrintAllRegs(); // Print registers and continue running.
}
-
void Simulator::HandleStop(uint64_t code, Instruction* instr) {
// Stop if it is enabled, otherwise go on jumping over the stop
// and the message address.
@@ -2497,35 +2426,30 @@ void Simulator::HandleStop(uint64_t code, Instruction* instr) {
}
}
-
bool Simulator::IsStopInstruction(Instruction* instr) {
int32_t func = instr->FunctionFieldRaw();
uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6));
return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode;
}
-
bool Simulator::IsEnabledStop(uint64_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK_GT(code, kMaxWatchpointCode);
return !(watched_stops_[code].count & kStopDisabledBit);
}
-
void Simulator::EnableStop(uint64_t code) {
if (!IsEnabledStop(code)) {
watched_stops_[code].count &= ~kStopDisabledBit;
}
}
-
void Simulator::DisableStop(uint64_t code) {
if (IsEnabledStop(code)) {
watched_stops_[code].count |= kStopDisabledBit;
}
}
-
void Simulator::IncreaseStopCounter(uint64_t code) {
DCHECK_LE(code, kMaxStopCode);
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
@@ -2540,7 +2464,6 @@ void Simulator::IncreaseStopCounter(uint64_t code) {
}
}
-
// Print a stop status.
void Simulator::PrintStopInfo(uint64_t code) {
if (code <= kMaxWatchpointCode) {
@@ -2564,7 +2487,6 @@ void Simulator::PrintStopInfo(uint64_t code) {
}
}
-
void Simulator::SignalException(Exception e) {
FATAL("Error: Exception %i raised.", static_cast<int>(e));
}
@@ -3028,15 +2950,15 @@ void Simulator::DecodeTypeRegisterSRsType() {
break;
case SELEQZ_C:
DCHECK_EQ(kArchVariant, kMips64r6);
- SetFPUFloatResult(
- fd_reg(),
- (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg()) : 0.0);
+ SetFPUFloatResult(fd_reg(), (ft_int & 0x1) == 0
+ ? get_fpu_register_float(fs_reg())
+ : 0.0);
break;
case SELNEZ_C:
DCHECK_EQ(kArchVariant, kMips64r6);
- SetFPUFloatResult(
- fd_reg(),
- (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg()) : 0.0);
+ SetFPUFloatResult(fd_reg(), (ft_int & 0x1) != 0
+ ? get_fpu_register_float(fs_reg())
+ : 0.0);
break;
case MOVZ_C: {
DCHECK_EQ(kArchVariant, kMips64r2);
@@ -3073,7 +2995,6 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
}
-
void Simulator::DecodeTypeRegisterDRsType() {
double ft, fs, fd;
uint32_t cc, fcsr_cc;
@@ -3445,7 +3366,6 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
}
-
void Simulator::DecodeTypeRegisterWRsType() {
float fs = get_fpu_register_float(fs_reg());
float ft = get_fpu_register_float(ft_reg());
@@ -3537,7 +3457,6 @@ void Simulator::DecodeTypeRegisterWRsType() {
}
}
-
void Simulator::DecodeTypeRegisterLRsType() {
double fs = get_fpu_register_double(fs_reg());
double ft = get_fpu_register_double(ft_reg());
@@ -3629,14 +3548,12 @@ void Simulator::DecodeTypeRegisterLRsType() {
}
}
-
void Simulator::DecodeTypeRegisterCOP1() {
switch (instr_.RsFieldRaw()) {
case BC1: // Branch on coprocessor condition.
case BC1EQZ:
case BC1NEZ:
UNREACHABLE();
- break;
case CFC1:
// At the moment only FCSR is supported.
DCHECK_EQ(fs_reg(), kFCSRRegister);
@@ -3696,7 +3613,6 @@ void Simulator::DecodeTypeRegisterCOP1() {
}
}
-
void Simulator::DecodeTypeRegisterCOP1X() {
switch (instr_.FunctionFieldRaw()) {
case MADD_S: {
@@ -3740,7 +3656,6 @@ void Simulator::DecodeTypeRegisterCOP1X() {
}
}
-
void Simulator::DecodeTypeRegisterSPECIAL() {
int64_t i64hilo;
uint64_t u64hilo;
@@ -4212,7 +4127,6 @@ void Simulator::DecodeTypeRegisterSPECIAL() {
}
}
-
void Simulator::DecodeTypeRegisterSPECIAL2() {
int64_t alu_out;
switch (instr_.FunctionFieldRaw()) {
@@ -4241,7 +4155,6 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
}
}
-
void Simulator::DecodeTypeRegisterSPECIAL3() {
int64_t alu_out;
switch (instr_.FunctionFieldRaw()) {
@@ -5008,7 +4921,7 @@ void Simulator::DecodeTypeMsaELM() {
template <typename T>
T Simulator::MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m) {
- typedef typename std::make_unsigned<T>::type uT;
+ using uT = typename std::make_unsigned<T>::type;
T res;
switch (opcode) {
case SLLI:
@@ -5201,7 +5114,7 @@ void Simulator::DecodeTypeMsaMI10() {
template <typename T>
T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) {
- typedef typename std::make_unsigned<T>::type uT;
+ using uT = typename std::make_unsigned<T>::type;
T res;
int wt_modulo = wt % (sizeof(T) * 8);
switch (opcode) {
@@ -5481,8 +5394,8 @@ template <typename T_int, typename T_smaller_int, typename T_reg>
void Msa3RInstrHelper_horizontal(const uint32_t opcode, T_reg ws, T_reg wt,
T_reg wd, const int i,
const int num_of_lanes) {
- typedef typename std::make_unsigned<T_int>::type T_uint;
- typedef typename std::make_unsigned<T_smaller_int>::type T_smaller_uint;
+ using T_uint = typename std::make_unsigned<T_int>::type;
+ using T_smaller_uint = typename std::make_unsigned<T_smaller_int>::type;
T_int* wd_p;
T_smaller_int *ws_p, *wt_p;
ws_p = reinterpret_cast<T_smaller_int*>(ws);
@@ -5775,8 +5688,8 @@ void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
template <typename T_int, typename T_int_dbl, typename T_reg>
void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg& wd) {
- // typedef typename std::make_unsigned<T_int>::type T_uint;
- typedef typename std::make_unsigned<T_int_dbl>::type T_uint_dbl;
+ // using T_uint = typename std::make_unsigned<T_int>::type;
+ using T_uint_dbl = typename std::make_unsigned<T_int_dbl>::type;
const T_int max_int = std::numeric_limits<T_int>::max();
const T_int min_int = std::numeric_limits<T_int>::min();
const int shift = kBitsPerByte * sizeof(T_int) - 1;
@@ -6228,7 +6141,7 @@ static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); }
template <typename T_int, typename T_fp, typename T_src, typename T_dst>
T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
Simulator* sim) {
- typedef typename std::make_unsigned<T_int>::type T_uint;
+ using T_uint = typename std::make_unsigned<T_int>::type;
switch (opcode) {
case FCLASS: {
#define SNAN_BIT BIT(0)
@@ -6413,7 +6326,7 @@ T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst& dst,
dst = bit_cast<T_int>(static_cast<T_fp>(src));
break;
case FFINT_U:
- typedef typename std::make_unsigned<T_src>::type uT_src;
+ using uT_src = typename std::make_unsigned<T_src>::type;
dst = bit_cast<T_int>(static_cast<T_fp>(bit_cast<uT_src>(src)));
break;
default:
@@ -6590,7 +6503,6 @@ void Simulator::DecodeTypeRegister() {
}
}
-
// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc).
void Simulator::DecodeTypeImmediate() {
// Instruction fields.
@@ -7038,7 +6950,7 @@ void Simulator::DecodeTypeImmediate() {
uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
alu_out = ReadW(addr, instr_.instr());
- alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+ alu_out = static_cast<uint32_t>(alu_out) >> al_offset * 8;
alu_out |= rt & mask;
set_register(rt_reg, alu_out);
break;
@@ -7327,7 +7239,6 @@ void Simulator::DecodeTypeImmediate() {
}
}
-
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump() {
SimInstruction simInstr = instr_;
@@ -7354,7 +7265,6 @@ void Simulator::DecodeTypeJump() {
pc_modified_ = true;
}
-
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -7389,8 +7299,8 @@ void Simulator::InstructionDecode(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) {
PrintF(" 0x%08" PRIxPTR " %-44s %s\n",
- reinterpret_cast<intptr_t>(instr), buffer.start(),
- trace_buf_.start());
+ reinterpret_cast<intptr_t>(instr), buffer.begin(),
+ trace_buf_.begin());
}
if (!pc_modified_) {
@@ -7398,8 +7308,6 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
}
-
-
void Simulator::Execute() {
// Get the PC to simulate. Cannot use the accessor here as we need the
// raw PC value and not the one used as input to arithmetic instructions.
@@ -7564,7 +7472,6 @@ double Simulator::CallFP(Address entry, double d0, double d1) {
}
}
-
uintptr_t Simulator::PushAddress(uintptr_t address) {
int64_t new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
@@ -7573,7 +7480,6 @@ uintptr_t Simulator::PushAddress(uintptr_t address) {
return new_sp;
}
-
uintptr_t Simulator::PopAddress() {
int64_t current_sp = get_register(sp);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/execution/mips64/simulator-mips64.h
index 9691bedd75..d1251f5f0e 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/execution/mips64/simulator-mips64.h
@@ -9,20 +9,20 @@
// which will start execution in the Simulator or forwards to the real entry
// on a MIPS HW platform.
-#ifndef V8_MIPS64_SIMULATOR_MIPS64_H_
-#define V8_MIPS64_SIMULATOR_MIPS64_H_
+#ifndef V8_EXECUTION_MIPS64_SIMULATOR_MIPS64_H_
+#define V8_EXECUTION_MIPS64_SIMULATOR_MIPS64_H_
// globals.h defines USE_SIMULATOR.
-#include "src/globals.h"
+#include "src/common/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
-#include "src/allocation.h"
-#include "src/assembler.h"
#include "src/base/hashmap.h"
-#include "src/mips64/constants-mips64.h"
-#include "src/simulator-base.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/mips64/constants-mips64.h"
+#include "src/execution/simulator-base.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -42,20 +42,16 @@ class CachePage {
static const int kLineLength = 1 << kLineShift;
static const int kLineMask = kLineLength - 1;
- CachePage() {
- memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
- }
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
char* ValidityByte(int offset) {
return &validity_map_[offset >> kLineShift];
}
- char* CachedData(int offset) {
- return &data_[offset];
- }
+ char* CachedData(int offset) { return &data_[offset]; }
private:
- char data_[kPageSize]; // The cached data.
+ char data_[kPageSize]; // The cached data.
static const int kValidityMapSize = kPageSize >> kLineShift;
char validity_map_[kValidityMapSize]; // One byte per line.
};
@@ -102,12 +98,32 @@ class Simulator : public SimulatorBase {
no_reg = -1,
zero_reg = 0,
at,
- v0, v1,
- a0, a1, a2, a3, a4, a5, a6, a7,
- t0, t1, t2, t3,
- s0, s1, s2, s3, s4, s5, s6, s7,
- t8, t9,
- k0, k1,
+ v0,
+ v1,
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ t0,
+ t1,
+ t2,
+ t3,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ t8,
+ t9,
+ k0,
+ k1,
gp,
sp,
s8,
@@ -115,7 +131,7 @@ class Simulator : public SimulatorBase {
// LO, HI, and pc.
LO,
HI,
- pc, // pc must be the last register.
+ pc, // pc must be the last register.
kNumSimuRegisters,
// aliases
fp = s8
@@ -124,10 +140,38 @@ class Simulator : public SimulatorBase {
// Coprocessor registers.
// Generated code will always use doubles. So we will only use even registers.
enum FPURegister {
- f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
- f12, f13, f14, f15, // f12 and f14 are arguments FPURegisters.
- f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
- f26, f27, f28, f29, f30, f31,
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15, // f12 and f14 are arguments FPURegisters.
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
kNumFPURegisters
};
@@ -302,7 +346,7 @@ class Simulator : public SimulatorBase {
// MSA Data Format
enum MSADataFormat { MSA_VECT = 0, MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD };
- typedef union {
+ union msa_reg_t {
int8_t b[kMSALanesByte];
uint8_t ub[kMSALanesByte];
int16_t h[kMSALanesHalf];
@@ -311,7 +355,7 @@ class Simulator : public SimulatorBase {
uint32_t uw[kMSALanesWord];
int64_t d[kMSALanesDword];
uint64_t ud[kMSALanesDword];
- } msa_reg_t;
+ };
// Read and write memory.
inline uint32_t ReadBU(int64_t addr);
@@ -375,7 +419,6 @@ class Simulator : public SimulatorBase {
void DecodeTypeRegisterSPECIAL();
-
void DecodeTypeRegisterSPECIAL2();
void DecodeTypeRegisterSPECIAL3();
@@ -495,7 +538,6 @@ class Simulator : public SimulatorBase {
void IncreaseStopCounter(uint64_t code);
void PrintStopInfo(uint64_t code);
-
// Executes one instruction.
void InstructionDecode(Instruction* instr);
// Execute one instruction placed in a branch delay slot.
@@ -679,4 +721,4 @@ class Simulator : public SimulatorBase {
} // namespace v8
#endif // defined(USE_SIMULATOR)
-#endif // V8_MIPS64_SIMULATOR_MIPS64_H_
+#endif // V8_EXECUTION_MIPS64_SIMULATOR_MIPS64_H_
diff --git a/deps/v8/src/ppc/frame-constants-ppc.cc b/deps/v8/src/execution/ppc/frame-constants-ppc.cc
index 546d495df8..05cde9c8ee 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.cc
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.cc
@@ -4,12 +4,11 @@
#if V8_TARGET_ARCH_PPC
-#include "src/ppc/frame-constants-ppc.h"
-
-#include "src/assembler-inl.h"
-#include "src/frame-constants.h"
-#include "src/macro-assembler.h"
+#include "src/execution/ppc/frame-constants-ppc.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ppc/frame-constants-ppc.h b/deps/v8/src/execution/ppc/frame-constants-ppc.h
index 0d92d853f8..07cc2715b1 100644
--- a/deps/v8/src/ppc/frame-constants-ppc.h
+++ b/deps/v8/src/execution/ppc/frame-constants-ppc.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PPC_FRAME_CONSTANTS_PPC_H_
-#define V8_PPC_FRAME_CONSTANTS_PPC_H_
+#ifndef V8_EXECUTION_PPC_FRAME_CONSTANTS_PPC_H_
+#define V8_EXECUTION_PPC_FRAME_CONSTANTS_PPC_H_
#include "src/base/macros.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -62,4 +62,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_PPC_FRAME_CONSTANTS_PPC_H_
+#endif // V8_EXECUTION_PPC_FRAME_CONSTANTS_PPC_H_
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index a031814f5e..6cd4daa33c 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ppc/simulator-ppc.h"
+#include "src/execution/ppc/simulator-ppc.h"
#if defined(USE_SIMULATOR)
@@ -10,17 +10,18 @@
#include <stdlib.h>
#include <cmath>
-#include "src/assembler.h"
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-#include "src/ppc/constants-ppc.h"
-#include "src/ppc/frame-constants-ppc.h"
-#include "src/register-configuration.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/ppc/constants-ppc.h"
+#include "src/codegen/register-configuration.h"
+#include "src/diagnostics/disasm.h"
+#include "src/execution/ppc/frame-constants-ppc.h"
+#include "src/heap/combined-heap.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
+#include "src/utils/ostreams.h"
// Only build the simulator if not compiling for real PPC hardware.
namespace v8 {
@@ -90,17 +91,14 @@ intptr_t PPCDebugger::GetRegisterValue(int regnum) {
return sim_->get_register(regnum);
}
-
double PPCDebugger::GetRegisterPairDoubleValue(int regnum) {
return sim_->get_double_from_register_pair(regnum);
}
-
double PPCDebugger::GetFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_d_register(regnum);
}
-
bool PPCDebugger::GetValue(const char* desc, intptr_t* value) {
int regnum = Registers::Number(desc);
if (regnum != kNoRegister) {
@@ -118,7 +116,6 @@ bool PPCDebugger::GetValue(const char* desc, intptr_t* value) {
return false;
}
-
bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
int regnum = DoubleRegisters::Number(desc);
if (regnum != kNoRegister) {
@@ -128,7 +125,6 @@ bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
return false;
}
-
bool PPCDebugger::SetBreakpoint(Instruction* break_pc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != nullptr) {
@@ -143,7 +139,6 @@ bool PPCDebugger::SetBreakpoint(Instruction* break_pc) {
return true;
}
-
bool PPCDebugger::DeleteBreakpoint(Instruction* break_pc) {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
@@ -154,21 +149,18 @@ bool PPCDebugger::DeleteBreakpoint(Instruction* break_pc) {
return true;
}
-
void PPCDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-
void PPCDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
-
void PPCDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@@ -203,7 +195,7 @@ void PPCDebugger::Debug() {
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(), buffer.start());
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(), buffer.begin());
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
@@ -245,7 +237,7 @@ void PPCDebugger::Debug() {
dasm.InstructionDecode(buffer,
reinterpret_cast<byte*>(sim_->get_pc()));
PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(),
- buffer.start());
+ buffer.begin());
sim_->ExecuteInstruction(
reinterpret_cast<Instruction*>(sim_->get_pc()));
}
@@ -401,12 +393,13 @@ void PPCDebugger::Debug() {
reinterpret_cast<intptr_t>(cur), *cur, *cur);
Object obj(*cur);
Heap* current_heap = sim_->isolate_->heap();
- if (obj.IsSmi() || current_heap->Contains(HeapObject::cast(obj))) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
PrintF(" (");
if (obj.IsSmi()) {
PrintF("smi %d", Smi::ToInt(obj));
} else {
- obj->ShortPrint();
+ obj.ShortPrint();
}
PrintF(")");
}
@@ -458,7 +451,7 @@ void PPCDebugger::Debug() {
prev = cur;
cur += dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
- buffer.start());
+ buffer.begin());
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
@@ -639,19 +632,16 @@ bool Simulator::ICacheMatch(void* one, void* two) {
return one == two;
}
-
static uint32_t ICacheHash(void* key) {
return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
}
-
static bool AllOnOnePage(uintptr_t start, int size) {
intptr_t start_page = (start & ~CachePage::kPageMask);
intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
return start_page == end_page;
}
-
void Simulator::set_last_debugger_input(char* input) {
DeleteArray(last_debugger_input_);
last_debugger_input_ = input;
@@ -692,7 +682,6 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
return reinterpret_cast<CachePage*>(entry->value);
}
-
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, int size) {
@@ -728,7 +717,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
}
-
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
@@ -769,10 +757,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
last_debugger_input_ = nullptr;
}
-Simulator::~Simulator() {
- free(stack_);
-}
-
+Simulator::~Simulator() { free(stack_); }
// Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) {
@@ -789,14 +774,12 @@ Simulator* Simulator::current(Isolate* isolate) {
return sim;
}
-
// Sets the register in the architecture state.
void Simulator::set_register(int reg, intptr_t value) {
DCHECK((reg >= 0) && (reg < kNumGPRs));
registers_[reg] = value;
}
-
// Get the register from the architecture state.
intptr_t Simulator::get_register(int reg) const {
DCHECK((reg >= 0) && (reg < kNumGPRs));
@@ -807,7 +790,6 @@ intptr_t Simulator::get_register(int reg) const {
return registers_[reg];
}
-
double Simulator::get_double_from_register_pair(int reg) {
DCHECK((reg >= 0) && (reg < kNumGPRs) && ((reg % 2) == 0));
@@ -822,22 +804,21 @@ double Simulator::get_double_from_register_pair(int reg) {
return (dm_val);
}
-
// Raw access to the PC register.
void Simulator::set_pc(intptr_t value) {
pc_modified_ = true;
special_reg_pc_ = value;
}
-
bool Simulator::has_bad_pc() const {
return ((special_reg_pc_ == bad_lr) || (special_reg_pc_ == end_sim_pc));
}
-
// Raw access to the PC register without the special adjustment when reading.
intptr_t Simulator::get_pc() const { return special_reg_pc_; }
+// Accessor to the internal Link Register
+intptr_t Simulator::get_lr() const { return special_reg_lr_; }
// Runtime FP routines take:
// - two double arguments
@@ -849,13 +830,11 @@ void Simulator::GetFpArgs(double* x, double* y, intptr_t* z) {
*z = get_register(3);
}
-
// The return value is in d1.
void Simulator::SetFpResult(const double& result) {
set_d_register_from_double(1, result);
}
-
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
#if 0 // A good idea to trash volatile registers, needs to be done
@@ -899,7 +878,6 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return reinterpret_cast<uintptr_t>(stack_) + stack_protection_size_;
}
-
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
@@ -907,7 +885,6 @@ void Simulator::Format(Instruction* instr, const char* format) {
UNIMPLEMENTED();
}
-
// Calculate C flag value for additions.
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
@@ -918,7 +895,6 @@ bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
}
-
// Calculate C flag value for subtractions.
bool Simulator::BorrowFrom(int32_t left, int32_t right) {
uint32_t uleft = static_cast<uint32_t>(left);
@@ -927,7 +903,6 @@ bool Simulator::BorrowFrom(int32_t left, int32_t right) {
return (uright > uleft);
}
-
// Calculate V flag value for additions and subtractions.
bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
bool addition) {
@@ -936,14 +911,12 @@ bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
// operands have the same sign
overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
// and operands and result have different sign
- &&
- ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
} else {
// operands have different signs
overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
// and first operand and result have different signs
- &&
- ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
}
return overflow;
}
@@ -954,30 +927,30 @@ static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
}
// Calls into the V8 runtime.
-typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5,
- intptr_t arg6, intptr_t arg7,
- intptr_t arg8);
-typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+using SimulatorRuntimeCall = intptr_t (*)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5,
+ intptr_t arg6, intptr_t arg7,
+ intptr_t arg8);
+using SimulatorRuntimePairCall = ObjectPair (*)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
// These prototypes handle the four types of FP calls.
-typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPCall)(double darg0);
-typedef double (*SimulatorRuntimeFPIntCall)(double darg0, intptr_t arg0);
+using SimulatorRuntimeCompareCall = int (*)(double darg0, double darg1);
+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1);
+using SimulatorRuntimeFPCall = double (*)(double darg0);
+using SimulatorRuntimeFPIntCall = double (*)(double darg0, intptr_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef void (*SimulatorRuntimeDirectApiCall)(intptr_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(intptr_t arg0, void* arg1);
+using SimulatorRuntimeDirectApiCall = void (*)(intptr_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(intptr_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
-typedef void (*SimulatorRuntimeDirectGetterCall)(intptr_t arg0, intptr_t arg1);
-typedef void (*SimulatorRuntimeProfilingGetterCall)(intptr_t arg0,
- intptr_t arg1, void* arg2);
+using SimulatorRuntimeDirectGetterCall = void (*)(intptr_t arg0, intptr_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(intptr_t arg0,
+ intptr_t arg1, void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -1261,19 +1234,16 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
}
-
// Stop helper functions.
bool Simulator::isStopInstruction(Instruction* instr) {
return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
}
-
bool Simulator::isWatchedStop(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
return code < kNumOfWatchedStops;
}
-
bool Simulator::isEnabledStop(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
// Unwatched stops are always enabled.
@@ -1281,7 +1251,6 @@ bool Simulator::isEnabledStop(uint32_t code) {
!(watched_stops_[code].count & kStopDisabledBit);
}
-
void Simulator::EnableStop(uint32_t code) {
DCHECK(isWatchedStop(code));
if (!isEnabledStop(code)) {
@@ -1289,7 +1258,6 @@ void Simulator::EnableStop(uint32_t code) {
}
}
-
void Simulator::DisableStop(uint32_t code) {
DCHECK(isWatchedStop(code));
if (isEnabledStop(code)) {
@@ -1297,7 +1265,6 @@ void Simulator::DisableStop(uint32_t code) {
}
}
-
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
@@ -1313,7 +1280,6 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
}
}
-
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
DCHECK_LE(code, kMaxStopCode);
@@ -1335,7 +1301,6 @@ void Simulator::PrintStopInfo(uint32_t code) {
}
}
-
void Simulator::SetCR0(intptr_t result, bool setSO) {
int bf = 0;
if (result < 0) {
@@ -1353,7 +1318,6 @@ void Simulator::SetCR0(intptr_t result, bool setSO) {
condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
}
-
void Simulator::ExecuteBranchConditional(Instruction* instr, BCType type) {
int bo = instr->Bits(25, 21) << 21;
int condition_bit = instr->Bits(20, 16);
@@ -1378,7 +1342,7 @@ void Simulator::ExecuteBranchConditional(Instruction* instr, BCType type) {
special_reg_ctr_ -= 1;
if ((special_reg_ctr_ == 0) != (bo == DCBEZ)) return;
break;
- case BA: { // Branch always
+ case BA: { // Branch always
break;
}
default:
@@ -1936,7 +1900,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
- case STFSUX: V8_FALLTHROUGH;
+ case STFSUX:
+ V8_FALLTHROUGH;
case STFSX: {
int frs = instr->RSValue();
int ra = instr->RAValue();
@@ -1966,7 +1931,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
- case STFDUX: V8_FALLTHROUGH;
+ case STFDUX:
+ V8_FALLTHROUGH;
case STFDX: {
int frs = instr->RSValue();
int ra = instr->RAValue();
@@ -3015,7 +2981,8 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
- case STFSU: V8_FALLTHROUGH;
+ case STFSU:
+ V8_FALLTHROUGH;
case STFS: {
int frs = instr->RSValue();
int ra = instr->RAValue();
@@ -3493,7 +3460,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
return;
}
-
#if V8_TARGET_ARCH_PPC64
case RLDICL: {
int ra = instr->RAValue();
@@ -3694,7 +3660,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
} // NOLINT
-
void Simulator::Trace(Instruction* instr) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@@ -3702,10 +3667,9 @@ void Simulator::Trace(Instruction* instr) {
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
PrintF("%05d %08" V8PRIxPTR " %s\n", icount_,
- reinterpret_cast<intptr_t>(instr), buffer.start());
+ reinterpret_cast<intptr_t>(instr), buffer.begin());
}
-
// Executes the current instruction.
void Simulator::ExecuteInstruction(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -3932,7 +3896,6 @@ double Simulator::CallFPReturnsDouble(Address entry, double d0, double d1) {
return get_double_from_d_register(1);
}
-
uintptr_t Simulator::PushAddress(uintptr_t address) {
uintptr_t new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
@@ -3941,7 +3904,6 @@ uintptr_t Simulator::PushAddress(uintptr_t address) {
return new_sp;
}
-
uintptr_t Simulator::PopAddress() {
uintptr_t current_sp = get_register(sp);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/execution/ppc/simulator-ppc.h
index 02d1b5a350..34a39f608b 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/execution/ppc/simulator-ppc.h
@@ -9,23 +9,23 @@
// which will start execution in the Simulator or forwards to the real entry
// on a PPC HW platform.
-#ifndef V8_PPC_SIMULATOR_PPC_H_
-#define V8_PPC_SIMULATOR_PPC_H_
+#ifndef V8_EXECUTION_PPC_SIMULATOR_PPC_H_
+#define V8_EXECUTION_PPC_SIMULATOR_PPC_H_
// globals.h defines USE_SIMULATOR.
-#include "src/globals.h"
+#include "src/common/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
-#include "src/allocation.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
+#include "src/utils/allocation.h"
-#include "src/assembler.h"
#include "src/base/hashmap.h"
-#include "src/ppc/constants-ppc.h"
-#include "src/simulator-base.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/ppc/constants-ppc.h"
+#include "src/execution/simulator-base.h"
namespace v8 {
namespace internal {
@@ -163,6 +163,9 @@ class Simulator : public SimulatorBase {
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
+ // Accessor to the internal Link Register
+ intptr_t get_lr() const;
+
// Accessor to the internal simulator stack area.
uintptr_t StackLimit(uintptr_t c_limit) const;
@@ -415,4 +418,4 @@ class Simulator : public SimulatorBase {
} // namespace v8
#endif // defined(USE_SIMULATOR)
-#endif // V8_PPC_SIMULATOR_PPC_H_
+#endif // V8_EXECUTION_PPC_SIMULATOR_PPC_H_
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index ab94c25989..0ed36cbe10 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/runtime-profiler.h"
+#include "src/execution/runtime-profiler.h"
-#include "src/assembler.h"
#include "src/base/platform/platform.h"
-#include "src/bootstrapper.h"
-#include "src/compilation-cache.h"
-#include "src/compiler.h"
-#include "src/execution.h"
-#include "src/frames-inl.h"
-#include "src/global-handles.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/codegen/compiler.h"
+#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
+#include "src/handles/global-handles.h"
+#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
#include "src/tracing/trace-event.h"
@@ -36,9 +36,9 @@ static const int kOSRBytecodeSizeAllowancePerTick = 48;
// the very first time it is seen on the stack.
static const int kMaxBytecodeSizeForEarlyOpt = 90;
-#define OPTIMIZATION_REASON_LIST(V) \
- V(DoNotOptimize, "do not optimize") \
- V(HotAndStable, "hot and stable") \
+#define OPTIMIZATION_REASON_LIST(V) \
+ V(DoNotOptimize, "do not optimize") \
+ V(HotAndStable, "hot and stable") \
V(SmallFunction, "small function")
enum class OptimizationReason : uint8_t {
@@ -58,20 +58,20 @@ char const* OptimizationReasonToString(OptimizationReason reason) {
return reasons[index];
}
+#undef OPTIMIZATION_REASON_LIST
+
std::ostream& operator<<(std::ostream& os, OptimizationReason reason) {
return os << OptimizationReasonToString(reason);
}
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
- : isolate_(isolate),
- any_ic_changed_(false) {
-}
+ : isolate_(isolate), any_ic_changed_(false) {}
static void TraceRecompile(JSFunction function, const char* reason,
const char* type) {
if (FLAG_trace_opt) {
PrintF("[marking ");
- function->ShortPrint();
+ function.ShortPrint();
PrintF(" for %s recompilation, reason: %s", type, reason);
PrintF("]\n");
}
@@ -80,41 +80,41 @@ static void TraceRecompile(JSFunction function, const char* reason,
void RuntimeProfiler::Optimize(JSFunction function, OptimizationReason reason) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, OptimizationReasonToString(reason), "optimized");
- function->MarkForOptimization(ConcurrencyMode::kConcurrent);
+ function.MarkForOptimization(ConcurrencyMode::kConcurrent);
}
void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
int loop_nesting_levels) {
JSFunction function = frame->function();
- SharedFunctionInfo shared = function->shared();
- if (!FLAG_use_osr || !shared->IsUserJavaScript()) {
+ SharedFunctionInfo shared = function.shared();
+ if (!FLAG_use_osr || !shared.IsUserJavaScript()) {
return;
}
// If the code is not optimizable, don't try OSR.
- if (shared->optimization_disabled()) return;
+ if (shared.optimization_disabled()) return;
// We're using on-stack replacement: Store new loop nesting level in
// BytecodeArray header so that certain back edges in any interpreter frame
// for this bytecode will trigger on-stack replacement for that frame.
if (FLAG_trace_osr) {
PrintF("[OSR - arming back edges in ");
- function->PrintName();
+ function.PrintName();
PrintF("]\n");
}
DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
- int level = frame->GetBytecodeArray()->osr_loop_nesting_level();
- frame->GetBytecodeArray()->set_osr_loop_nesting_level(
+ int level = frame->GetBytecodeArray().osr_loop_nesting_level();
+ frame->GetBytecodeArray().set_osr_loop_nesting_level(
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
}
void RuntimeProfiler::MaybeOptimize(JSFunction function,
InterpretedFrame* frame) {
- if (function->IsInOptimizationQueue()) {
+ if (function.IsInOptimizationQueue()) {
if (FLAG_trace_opt_verbose) {
PrintF("[function ");
- function->PrintName();
+ function.PrintName();
PrintF(" is already in optimization queue]\n");
}
return;
@@ -127,10 +127,10 @@ void RuntimeProfiler::MaybeOptimize(JSFunction function,
return;
}
- if (function->shared()->optimization_disabled()) return;
+ if (function.shared().optimization_disabled()) return;
OptimizationReason reason =
- ShouldOptimize(function, function->shared()->GetBytecodeArray());
+ ShouldOptimize(function, function.shared().GetBytecodeArray());
if (reason != OptimizationReason::kDoNotOptimize) {
Optimize(function, reason);
@@ -138,19 +138,19 @@ void RuntimeProfiler::MaybeOptimize(JSFunction function,
}
bool RuntimeProfiler::MaybeOSR(JSFunction function, InterpretedFrame* frame) {
- int ticks = function->feedback_vector()->profiler_ticks();
+ int ticks = function.feedback_vector().profiler_ticks();
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
- if (function->IsMarkedForOptimization() ||
- function->IsMarkedForConcurrentOptimization() ||
- function->HasOptimizedCode()) {
+ if (function.IsMarkedForOptimization() ||
+ function.IsMarkedForConcurrentOptimization() ||
+ function.HasOptimizedCode()) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =
kOSRBytecodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
- if (function->shared()->GetBytecodeArray()->length() <= allowance) {
+ if (function.shared().GetBytecodeArray().length() <= allowance) {
AttemptOnStackReplacement(frame);
}
return true;
@@ -160,27 +160,27 @@ bool RuntimeProfiler::MaybeOSR(JSFunction function, InterpretedFrame* frame) {
OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
BytecodeArray bytecode) {
- int ticks = function->feedback_vector()->profiler_ticks();
+ int ticks = function.feedback_vector().profiler_ticks();
int ticks_for_optimization =
kProfilerTicksBeforeOptimization +
- (bytecode->length() / kBytecodeSizeAllowancePerTick);
+ (bytecode.length() / kBytecodeSizeAllowancePerTick);
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
} else if (!any_ic_changed_ &&
- bytecode->length() < kMaxBytecodeSizeForEarlyOpt) {
+ bytecode.length() < kMaxBytecodeSizeForEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
return OptimizationReason::kSmallFunction;
} else if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
- function->PrintName();
+ function.PrintName();
PrintF(", not enough ticks: %d/%d and ", ticks,
kProfilerTicksBeforeOptimization);
if (any_ic_changed_) {
PrintF("ICs changed]\n");
} else {
PrintF(" too large for small function optimization: %d/%d]\n",
- bytecode->length(), kMaxBytecodeSizeForEarlyOpt);
+ bytecode.length(), kMaxBytecodeSizeForEarlyOpt);
}
}
return OptimizationReason::kDoNotOptimize;
@@ -201,24 +201,23 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
int frame_count = 0;
int frame_count_limit = FLAG_frame_count;
for (JavaScriptFrameIterator it(isolate_);
- frame_count++ < frame_count_limit && !it.done();
- it.Advance()) {
+ frame_count++ < frame_count_limit && !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (!frame->is_interpreted()) continue;
JSFunction function = frame->function();
- DCHECK(function->shared()->is_compiled());
- if (!function->shared()->IsInterpreted()) continue;
+ DCHECK(function.shared().is_compiled());
+ if (!function.shared().IsInterpreted()) continue;
- if (!function->has_feedback_vector()) continue;
+ if (!function.has_feedback_vector()) continue;
MaybeOptimize(function, InterpretedFrame::cast(frame));
// TODO(leszeks): Move this increment to before the maybe optimize checks,
// and update the tests to assume the increment has already happened.
- int ticks = function->feedback_vector()->profiler_ticks();
+ int ticks = function.feedback_vector().profiler_ticks();
if (ticks < Smi::kMaxValue) {
- function->feedback_vector()->set_profiler_ticks(ticks + 1);
+ function.feedback_vector().set_profiler_ticks(ticks + 1);
}
}
any_ic_changed_ = false;
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/execution/runtime-profiler.h
index 7e29d57bdc..728ea19266 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/execution/runtime-profiler.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_RUNTIME_PROFILER_H_
-#define V8_RUNTIME_PROFILER_H_
+#ifndef V8_EXECUTION_RUNTIME_PROFILER_H_
+#define V8_EXECUTION_RUNTIME_PROFILER_H_
-#include "src/allocation.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -44,4 +44,4 @@ class RuntimeProfiler {
} // namespace internal
} // namespace v8
-#endif // V8_RUNTIME_PROFILER_H_
+#endif // V8_EXECUTION_RUNTIME_PROFILER_H_
diff --git a/deps/v8/src/s390/frame-constants-s390.cc b/deps/v8/src/execution/s390/frame-constants-s390.cc
index c91a826ccb..ea36f6b370 100644
--- a/deps/v8/src/s390/frame-constants-s390.cc
+++ b/deps/v8/src/execution/s390/frame-constants-s390.cc
@@ -4,12 +4,11 @@
#if V8_TARGET_ARCH_S390
-#include "src/s390/frame-constants-s390.h"
-
-#include "src/assembler-inl.h"
-#include "src/frame-constants.h"
-#include "src/macro-assembler.h"
+#include "src/execution/s390/frame-constants-s390.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/s390/frame-constants-s390.h b/deps/v8/src/execution/s390/frame-constants-s390.h
index 617c09380e..a48a78fd42 100644
--- a/deps/v8/src/s390/frame-constants-s390.h
+++ b/deps/v8/src/execution/s390/frame-constants-s390.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_S390_FRAME_CONSTANTS_S390_H_
-#define V8_S390_FRAME_CONSTANTS_S390_H_
+#ifndef V8_EXECUTION_S390_FRAME_CONSTANTS_S390_H_
+#define V8_EXECUTION_S390_FRAME_CONSTANTS_S390_H_
#include "src/base/macros.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -68,4 +68,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_S390_FRAME_CONSTANTS_S390_H_
+#endif // V8_EXECUTION_S390_FRAME_CONSTANTS_S390_H_
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 588d1b6e40..8093497168 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/s390/simulator-s390.h"
+#include "src/execution/s390/simulator-s390.h"
// Only build the simulator if not compiling for real s390 hardware.
#if defined(USE_SIMULATOR)
@@ -11,16 +11,17 @@
#include <stdlib.h>
#include <cmath>
-#include "src/assembler.h"
#include "src/base/bits.h"
#include "src/base/once.h"
-#include "src/disasm.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-#include "src/register-configuration.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/codegen/s390/constants-s390.h"
+#include "src/diagnostics/disasm.h"
+#include "src/heap/combined-heap.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
-#include "src/s390/constants-s390.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -31,6 +32,8 @@ namespace internal {
// Windows C Run-Time Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
+const Simulator::fpr_t Simulator::fp_zero;
+
// The S390Debugger class is used by the simulator while debugging simulated
// z/Architecture code.
class S390Debugger {
@@ -200,7 +203,7 @@ void S390Debugger::Debug() {
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(), buffer.start());
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(), buffer.begin());
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
@@ -242,30 +245,30 @@ void S390Debugger::Debug() {
if (GetValue(arg1, &value)) {
// Interpret a numeric argument as the number of instructions to
// step past.
- for (int i = 1; (!sim_->has_bad_pc()) && i < value; i++) {
+ for (int i = 1; (!sim_->has_bad_pc()) && i < value; i++) {
dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
+ reinterpret_cast<byte*>(sim_->get_pc()));
PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(),
- buffer.start());
+ buffer.begin());
sim_->ExecuteInstruction(
- reinterpret_cast<Instruction*>(sim_->get_pc()));
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
}
} else {
// Otherwise treat it as the mnemonic of the opcode to stop at.
char mnemonic[256];
while (!sim_->has_bad_pc()) {
dasm.InstructionDecode(buffer,
- reinterpret_cast<byte*>(sim_->get_pc()));
- char* mnemonicStart = buffer.start();
+ reinterpret_cast<byte*>(sim_->get_pc()));
+ char* mnemonicStart = buffer.begin();
while (*mnemonicStart != 0 && *mnemonicStart != ' ')
mnemonicStart++;
SScanF(mnemonicStart, "%s", mnemonic);
if (!strcmp(arg1, mnemonic)) break;
PrintF(" 0x%08" V8PRIxPTR " %s\n", sim_->get_pc(),
- buffer.start());
+ buffer.begin());
sim_->ExecuteInstruction(
- reinterpret_cast<Instruction*>(sim_->get_pc()));
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
}
}
}
@@ -422,9 +425,9 @@ void S390Debugger::Debug() {
Heap* current_heap = sim_->isolate_->heap();
if (obj.IsSmi()) {
PrintF(" (smi %d)", Smi::ToInt(obj));
- } else if (current_heap->Contains(HeapObject::cast(obj))) {
+ } else if (IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
PrintF(" (");
- obj->ShortPrint();
+ obj.ShortPrint();
PrintF(")");
}
PrintF("\n");
@@ -474,7 +477,7 @@ void S390Debugger::Debug() {
prev = cur;
cur += dasm.InstructionDecode(buffer, cur);
PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
- buffer.start());
+ buffer.begin());
numInstructions--;
}
} else if (strcmp(cmd, "gdb") == 0) {
@@ -741,11 +744,59 @@ void Simulator::EvalTableInit() {
EvalTable[i] = &Simulator::Evaluate_Unknown;
}
-#define S390_SUPPORTED_VECTOR_OPCODE_LIST(V) \
- V(vfs, VFS, 0xE7E2) /* type = VRR_C VECTOR FP SUBTRACT */ \
- V(vfa, VFA, 0xE7E3) /* type = VRR_C VECTOR FP ADD */ \
- V(vfd, VFD, 0xE7E5) /* type = VRR_C VECTOR FP DIVIDE */ \
- V(vfm, VFM, 0xE7E7) /* type = VRR_C VECTOR FP MULTIPLY */
+#define S390_SUPPORTED_VECTOR_OPCODE_LIST(V) \
+ V(vst, VST, 0xE70E) /* type = VRX VECTOR STORE */ \
+ V(vl, VL, 0xE706) /* type = VRX VECTOR LOAD */ \
+ V(vlgv, VLGV, 0xE721) /* type = VRS_C VECTOR LOAD GR FROM VR ELEMENT */ \
+ V(vlvg, VLVG, 0xE722) /* type = VRS_B VECTOR LOAD VR ELEMENT FROM GR */ \
+ V(vrep, VREP, 0xE74D) /* type = VRI_C VECTOR REPLICATE */ \
+ V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
+ V(vrepi, VREPI, 0xE745) /* type = VRI_A VECTOR REPLICATE IMMEDIATE */ \
+ V(vlr, VLR, 0xE756) /* type = VRR_A VECTOR LOAD */ \
+ V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
+ V(vlef, VLEF, 0xE703) /* type = VRX VECTOR LOAD ELEMENT (32) */ \
+ V(va, VA, 0xE7F3) /* type = VRR_C VECTOR ADD */ \
+ V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
+ V(vml, VML, 0xE7A2) /* type = VRR_C VECTOR MULTIPLY LOW */ \
+ V(vsum, VSUM, 0xE764) /* type = VRR_C VECTOR SUM ACROSS WORD */ \
+ V(vsumg, VSUMG, 0xE765) /* type = VRR_C VECTOR SUM ACROSS DOUBLEWORD */ \
+ V(vpk, VPK, 0xE794) /* type = VRR_C VECTOR PACK */ \
+ V(vpks, VPKS, 0xE797) /* type = VRR_B VECTOR PACK SATURATE */ \
+ V(vpkls, VPKLS, 0xE795) /* type = VRR_B VECTOR PACK LOGICAL SATURATE */ \
+ V(vupll, VUPLL, 0xE7D4) /* type = VRR_A VECTOR UNPACK LOGICAL LOW */ \
+ V(vuplh, VUPLH, 0xE7D5) /* type = VRR_A VECTOR UNPACK LOGICAL HIGH */ \
+ V(vupl, VUPL, 0xE7D6) /* type = VRR_A VECTOR UNPACK LOW */ \
+ V(vuph, VUPH, 0xE7D7) /* type = VRR_A VECTOR UNPACK HIGH */ \
+ V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
+ V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
+ V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
+ V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */ \
+ V(vceq, VCEQ, 0xE7F8) /* type = VRR_B VECTOR COMPARE EQUAL */ \
+ V(vx, VX, 0xE76D) /* type = VRR_C VECTOR EXCLUSIVE OR */ \
+ V(vchl, VCHL, 0xE7F9) /* type = VRR_B VECTOR COMPARE HIGH LOGICAL */ \
+ V(vch, VCH, 0xE7FB) /* type = VRR_B VECTOR COMPARE HIGH */ \
+ V(vo, VO, 0xE76A) /* type = VRR_C VECTOR OR */ \
+ V(vn, VN, 0xE768) /* type = VRR_C VECTOR AND */ \
+ V(vlc, VLC, 0xE7DE) /* type = VRR_A VECTOR LOAD COMPLEMENT */ \
+ V(vsel, VSEL, 0xE78D) /* type = VRR_E VECTOR SELECT */ \
+ V(vtm, VTM, 0xE7D8) /* type = VRR_A VECTOR TEST UNDER MASK */ \
+ V(vesl, VESL, 0xE730) /* type = VRS_A VECTOR ELEMENT SHIFT LEFT */ \
+ V(vesrl, VESRL, \
+ 0xE738) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT LOGICAL */ \
+ V(vesra, VESRA, \
+ 0xE73A) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ \
+ V(vfsq, VFSQ, 0xE7CE) /* type = VRR_A VECTOR FP SQUARE ROOT */ \
+ V(vfmax, VFMAX, 0xE7EF) /* type = VRR_C VECTOR FP MAXIMUM */ \
+ V(vfmin, VFMIN, 0xE7EE) /* type = VRR_C VECTOR FP MINIMUM */ \
+ V(vfce, VFCE, 0xE7E8) /* type = VRR_C VECTOR FP COMPARE EQUAL */ \
+ V(vfpso, VFPSO, 0xE7CC) /* type = VRR_A VECTOR FP PERFORM SIGN OPERATION */ \
+ V(vfche, VFCHE, 0xE7EA) /* type = VRR_C VECTOR FP COMPARE HIGH OR EQUAL */ \
+ V(vfch, VFCH, 0xE7EB) /* type = VRR_C VECTOR FP COMPARE HIGH */ \
+ V(vfi, VFI, 0xE7C7) /* type = VRR_A VECTOR LOAD FP INTEGER */ \
+ V(vfs, VFS, 0xE7E2) /* type = VRR_C VECTOR FP SUBTRACT */ \
+ V(vfa, VFA, 0xE7E3) /* type = VRR_C VECTOR FP ADD */ \
+ V(vfd, VFD, 0xE7E5) /* type = VRR_C VECTOR FP DIVIDE */ \
+ V(vfm, VFM, 0xE7E7) /* type = VRR_C VECTOR FP MULTIPLY */
#define CREATE_EVALUATE_TABLE(name, op_name, op_value) \
EvalTable[op_name] = &Simulator::Evaluate_##op_name;
@@ -1515,7 +1566,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// Initializing FP registers.
for (int i = 0; i < kNumFPRs; i++) {
- fp_registers_[i] = 0.0;
+ set_simd_register_by_lane<double>(i, 0, 0.0);
+ set_simd_register_by_lane<double>(i, 1, 0.0);
}
// The sp is initialized to point to the bottom (high address) of the
@@ -1551,12 +1603,13 @@ void Simulator::set_register(int reg, uint64_t value) {
}
// Get the register from the architecture state.
-uint64_t Simulator::get_register(int reg) const {
+const uint64_t& Simulator::get_register(int reg) const {
+ DCHECK((reg >= 0) && (reg < kNumGPRs));
+ return registers_[reg];
+}
+
+uint64_t& Simulator::get_register(int reg) {
DCHECK((reg >= 0) && (reg < kNumGPRs));
- // Stupid code added to avoid bug in GCC.
- // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
- if (reg >= kNumGPRs) return 0;
- // End stupid code.
return registers_[reg];
}
@@ -1801,30 +1854,30 @@ static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
}
// Calls into the V8 runtime.
-typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5,
- intptr_t arg6, intptr_t arg7,
- intptr_t arg8);
-typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+using SimulatorRuntimeCall = intptr_t (*)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5,
+ intptr_t arg6, intptr_t arg7,
+ intptr_t arg8);
+using SimulatorRuntimePairCall = ObjectPair (*)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
// These prototypes handle the four types of FP calls.
-typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
-typedef double (*SimulatorRuntimeFPCall)(double darg0);
-typedef double (*SimulatorRuntimeFPIntCall)(double darg0, intptr_t arg0);
+using SimulatorRuntimeCompareCall = int (*)(double darg0, double darg1);
+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1);
+using SimulatorRuntimeFPCall = double (*)(double darg0);
+using SimulatorRuntimeFPIntCall = double (*)(double darg0, intptr_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
-typedef void (*SimulatorRuntimeDirectApiCall)(intptr_t arg0);
-typedef void (*SimulatorRuntimeProfilingApiCall)(intptr_t arg0, void* arg1);
+using SimulatorRuntimeDirectApiCall = void (*)(intptr_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(intptr_t arg0, void* arg1);
// This signature supports direct call to accessor getter callback.
-typedef void (*SimulatorRuntimeDirectGetterCall)(intptr_t arg0, intptr_t arg1);
-typedef void (*SimulatorRuntimeProfilingGetterCall)(intptr_t arg0,
- intptr_t arg1, void* arg2);
+using SimulatorRuntimeDirectGetterCall = void (*)(intptr_t arg0, intptr_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(intptr_t arg0,
+ intptr_t arg1, void* arg2);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
@@ -2323,7 +2376,7 @@ void Simulator::ExecuteInstruction(Instruction* instr, bool auto_incr_pc) {
v8::internal::EmbeddedVector<char, 256> buffer;
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
PrintF("%05" PRId64 " %08" V8PRIxPTR " %s\n", icount_,
- reinterpret_cast<intptr_t>(instr), buffer.start());
+ reinterpret_cast<intptr_t>(instr), buffer.begin());
// Flush stdout to prevent incomplete file output during abnormal exits
// This is caused by the output being buffered before being written to file
@@ -2568,7 +2621,7 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
set_register(r11, r11_val);
set_register(r12, r12_val);
set_register(r13, r13_val);
-// Pop stack passed arguments.
+ // Pop stack passed arguments.
#ifndef V8_TARGET_ARCH_S390X
DCHECK_EQ(entry_stack, get_low_register<uint32_t>(sp));
@@ -2614,8 +2667,7 @@ uintptr_t Simulator::PopAddress() {
return address;
}
-#define EVALUATE(name) \
- int Simulator::Evaluate_##name(Instruction* instr)
+#define EVALUATE(name) int Simulator::Evaluate_##name(Instruction* instr)
#define DCHECK_OPCODE(op) DCHECK(instr->S390OpcodeValue() == op)
@@ -2663,10 +2715,10 @@ uintptr_t Simulator::PopAddress() {
int d2 = AS(RSInstruction)->D2Value(); \
int length = 4;
-#define DECODE_RSI_INSTRUCTION(r1, r3, i2) \
- int r1 = AS(RSIInstruction)->R1Value(); \
- int r3 = AS(RSIInstruction)->R3Value(); \
- int32_t i2 = AS(RSIInstruction)->I2Value(); \
+#define DECODE_RSI_INSTRUCTION(r1, r3, i2) \
+ int r1 = AS(RSIInstruction)->R1Value(); \
+ int r3 = AS(RSIInstruction)->R3Value(); \
+ int32_t i2 = AS(RSIInstruction)->I2Value(); \
int length = 4;
#define DECODE_SI_INSTRUCTION_I_UINT8(b1, d1_val, imm_val) \
@@ -2781,6 +2833,22 @@ uintptr_t Simulator::PopAddress() {
int d2 = AS(RXEInstruction)->D2Value(); \
int length = 6;
+#define DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3) \
+ int r1 = AS(VRR_A_Instruction)->R1Value(); \
+ int r2 = AS(VRR_A_Instruction)->R2Value(); \
+ int m5 = AS(VRR_A_Instruction)->M5Value(); \
+ int m4 = AS(VRR_A_Instruction)->M4Value(); \
+ int m3 = AS(VRR_A_Instruction)->M3Value(); \
+ int length = 6;
+
+#define DECODE_VRR_B_INSTRUCTION(r1, r2, r3, m5, m4) \
+ int r1 = AS(VRR_B_Instruction)->R1Value(); \
+ int r2 = AS(VRR_B_Instruction)->R2Value(); \
+ int r3 = AS(VRR_B_Instruction)->R3Value(); \
+ int m5 = AS(VRR_B_Instruction)->M5Value(); \
+ int m4 = AS(VRR_B_Instruction)->M4Value(); \
+ int length = 6;
+
#define DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4) \
int r1 = AS(VRR_C_Instruction)->R1Value(); \
int r2 = AS(VRR_C_Instruction)->R2Value(); \
@@ -2790,56 +2858,822 @@ uintptr_t Simulator::PopAddress() {
int m4 = AS(VRR_C_Instruction)->M4Value(); \
int length = 6;
+#define DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5) \
+ int r1 = AS(VRR_E_Instruction)->R1Value(); \
+ int r2 = AS(VRR_E_Instruction)->R2Value(); \
+ int r3 = AS(VRR_E_Instruction)->R3Value(); \
+ int r4 = AS(VRR_E_Instruction)->R4Value(); \
+ int m6 = AS(VRR_E_Instruction)->M6Value(); \
+ int m5 = AS(VRR_E_Instruction)->M5Value(); \
+ int length = 6;
+
+#define DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3) \
+ int r1 = AS(VRX_Instruction)->R1Value(); \
+ int x2 = AS(VRX_Instruction)->X2Value(); \
+ int b2 = AS(VRX_Instruction)->B2Value(); \
+ int d2 = AS(VRX_Instruction)->D2Value(); \
+ int m3 = AS(VRX_Instruction)->M3Value(); \
+ int length = 6;
+
+#define DECODE_VRS_INSTRUCTION(r1, r3, b2, d2, m4) \
+ int r1 = AS(VRS_Instruction)->R1Value(); \
+ int r3 = AS(VRS_Instruction)->R3Value(); \
+ int b2 = AS(VRS_Instruction)->B2Value(); \
+ int d2 = AS(VRS_Instruction)->D2Value(); \
+ int m4 = AS(VRS_Instruction)->M4Value(); \
+ int length = 6;
+
+#define DECODE_VRI_A_INSTRUCTION(r1, i2, m3) \
+ int r1 = AS(VRI_A_Instruction)->R1Value(); \
+ int16_t i2 = AS(VRI_A_Instruction)->I2Value(); \
+ int m3 = AS(VRI_A_Instruction)->M3Value(); \
+ int length = 6;
+
+#define DECODE_VRI_C_INSTRUCTION(r1, r3, i2, m4) \
+ int r1 = AS(VRI_C_Instruction)->R1Value(); \
+ int r3 = AS(VRI_C_Instruction)->R3Value(); \
+ uint16_t i2 = AS(VRI_C_Instruction)->I2Value(); \
+ int m4 = AS(VRI_C_Instruction)->M4Value(); \
+ int length = 6;
+
#define GET_ADDRESS(index_reg, base_reg, offset) \
(((index_reg) == 0) ? 0 : get_register(index_reg)) + \
(((base_reg) == 0) ? 0 : get_register(base_reg)) + offset
-int Simulator::Evaluate_Unknown(Instruction* instr) {
- UNREACHABLE();
+int Simulator::Evaluate_Unknown(Instruction* instr) { UNREACHABLE(); }
+
+EVALUATE(VST) {
+ DCHECK_OPCODE(VST);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ USE(m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ fpr_t* ptr = reinterpret_cast<fpr_t*>(addr);
+ *ptr = get_simd_register(r1);
+ return length;
}
-EVALUATE(VFA) {
- DCHECK_OPCODE(VFA);
+EVALUATE(VL) {
+ DCHECK(VL);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ USE(m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ fpr_t* ptr = reinterpret_cast<fpr_t*>(addr);
+ DCHECK(m3 != 3 || (0x7 & addr) == 0);
+ DCHECK(m3 != 4 || (0xf & addr) == 0);
+ set_simd_register(r1, *ptr);
+ return length;
+}
+
+EVALUATE(VLGV) {
+ DCHECK_OPCODE(VLGV);
+ DECODE_VRS_INSTRUCTION(r1, r3, b2, d2, m4);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t index = b2_val + d2;
+ const int size_by_byte = 1 << m4;
+ int8_t* src = get_simd_register(r3).int8 + index * size_by_byte;
+ set_register(r1, 0);
+ memcpy(&get_register(r1), src, size_by_byte);
+ return length;
+}
+
+EVALUATE(VLVG) {
+ DCHECK_OPCODE(VLVG);
+ DECODE_VRS_INSTRUCTION(r1, r3, b2, d2, m4);
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+ int64_t index = b2_val + d2;
+ const int size_by_byte = 1 << m4;
+ int8_t* dst = get_simd_register(r1).int8 + index * size_by_byte;
+ memcpy(dst, &get_register(r3), size_by_byte);
+ return length;
+}
+
+EVALUATE(VREP) {
+ DCHECK_OPCODE(VREP);
+ DECODE_VRI_C_INSTRUCTION(r1, r3, i2, m4);
+ const int size_by_byte = 1 << m4;
+ DCHECK(i2 >= 0 && i2 < kSimd128Size / size_by_byte);
+ int8_t* src = get_simd_register(r3).int8;
+ int8_t* dst = get_simd_register(r1).int8;
+ for (int i = 0; i < kSimd128Size; i += size_by_byte) {
+ memcpy(dst + i, src + i2 * size_by_byte, size_by_byte);
+ }
+ return length;
+}
+
+EVALUATE(VLREP) {
+ DCHECK_OPCODE(VLREP);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ const int size_by_byte = 1 << m3;
+ int8_t* dst = get_simd_register(r1).int8;
+ int8_t* src = reinterpret_cast<int8_t*>(addr);
+ set_simd_register(r1, fp_zero);
+ for (int i = 0; i < kSimd128Size; i += size_by_byte) {
+ memcpy(dst + i, src, size_by_byte);
+ }
+ return length;
+}
+
+EVALUATE(VREPI) {
+ DCHECK_OPCODE(VREPI);
+ DECODE_VRI_A_INSTRUCTION(r1, i2, m3);
+ const int size_by_byte = 1 << m3;
+ int8_t* dst = get_simd_register(r1).int8;
+ uint64_t immediate = static_cast<uint64_t>(i2);
+ set_simd_register(r1, fp_zero);
+ for (int i = 0; i < kSimd128Size; i += size_by_byte) {
+ memcpy(dst + i, &immediate, size_by_byte);
+ }
+ return length;
+}
+
+EVALUATE(VLR) {
+ DCHECK_OPCODE(VLR);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m5);
+ USE(m4);
+ USE(m3);
+ set_simd_register(r1, get_simd_register(r2));
+ return length;
+}
+
+EVALUATE(VSTEF) {
+ DCHECK_OPCODE(VSTEF);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int32_t value = get_simd_register_by_lane<int32_t>(r1, m3);
+ WriteW(addr, value, instr);
+ return length;
+}
+
+EVALUATE(VLEF) {
+ DCHECK_OPCODE(VLEF);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int32_t value = ReadW(addr, instr);
+ set_simd_register_by_lane<int32_t>(r1, m3, value);
+ return length;
+}
+
+template <class T, class Operation>
+inline static void VectorBinaryOp(void* dst, void* src1, void* src2,
+ Operation op) {
+ int8_t* src1_ptr = reinterpret_cast<int8_t*>(src1);
+ int8_t* src2_ptr = reinterpret_cast<int8_t*>(src2);
+ int8_t* dst_ptr = reinterpret_cast<int8_t*>(dst);
+ for (int i = 0; i < kSimd128Size; i += sizeof(T)) {
+ T& dst_val = *reinterpret_cast<T*>(dst_ptr + i);
+ T& src1_val = *reinterpret_cast<T*>(src1_ptr + i);
+ T& src2_val = *reinterpret_cast<T*>(src2_ptr + i);
+ dst_val = op(src1_val, src2_val);
+ memcpy(dst_ptr + i, &dst_val, sizeof(T));
+ }
+}
+
+#define VECTOR_BINARY_OP_FOR_TYPE(type, op) \
+ VectorBinaryOp<type>(&get_simd_register(r1), &get_simd_register(r2), \
+ &get_simd_register(r3), \
+ [](type a, type b) { return a op b; });
+
+#define VECTOR_BINARY_OP(op) \
+ switch (m4) { \
+ case 0: \
+ VECTOR_BINARY_OP_FOR_TYPE(int8_t, op) \
+ break; \
+ case 1: \
+ VECTOR_BINARY_OP_FOR_TYPE(int16_t, op) \
+ break; \
+ case 2: \
+ VECTOR_BINARY_OP_FOR_TYPE(int32_t, op) \
+ break; \
+ case 3: \
+ VECTOR_BINARY_OP_FOR_TYPE(int64_t, op) \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ }
+
+EVALUATE(VA) {
+ DCHECK(VA);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m5);
+ USE(m6);
+ VECTOR_BINARY_OP(+)
+ return length;
+}
+
+EVALUATE(VS) {
+ DCHECK_OPCODE(VS);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m5);
+ USE(m6);
+ VECTOR_BINARY_OP(-)
+ return length;
+}
+
+EVALUATE(VML) {
+ DCHECK_OPCODE(VML);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m5);
+ USE(m6);
+ VECTOR_BINARY_OP(*)
+ return length;
+}
+
+template <class S, class D>
+void VectorSum(void* dst, void* src1, void* src2) {
+ D value = 0;
+ for (size_t i = 0; i < kSimd128Size / sizeof(S); i++) {
+ value += *(reinterpret_cast<S*>(src1) + i);
+ if ((i + 1) % (sizeof(D) / sizeof(S)) == 0) {
+ value += *(reinterpret_cast<S*>(src2) + i);
+ memcpy(reinterpret_cast<D*>(dst) + i / (sizeof(D) / sizeof(S)), &value,
+ sizeof(D));
+ value = 0;
+ }
+ }
+}
+
+EVALUATE(VSUM) {
+ DCHECK_OPCODE(VSUM);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ fpr_t src1 = get_simd_register(r2);
+ fpr_t src2 = get_simd_register(r3);
+ switch (m4) {
+ case 0:
+ VectorSum<int8_t, int32_t>(&get_simd_register(r1), &src1, &src2);
+ break;
+ case 1:
+ VectorSum<int16_t, int32_t>(&get_simd_register(r1), &src1, &src2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+EVALUATE(VSUMG) {
+ DCHECK_OPCODE(VSUMG);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ fpr_t src1 = get_simd_register(r2);
+ fpr_t src2 = get_simd_register(r3);
+ switch (m4) {
+ case 1:
+ VectorSum<int16_t, int64_t>(&get_simd_register(r1), &src1, &src2);
+ break;
+ case 2:
+ VectorSum<int32_t, int64_t>(&get_simd_register(r1), &src1, &src2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+template <class S, class D>
+void VectorPack(void* dst, void* src1, void* src2, bool saturate,
+ const D& max = 0, const D& min = 0) {
+ S* src = reinterpret_cast<S*>(src1);
+ int count = 0;
+ S value = 0;
+ for (size_t i = 0; i < kSimd128Size / sizeof(D); i++, count++) {
+ if (count == kSimd128Size / sizeof(S)) {
+ src = reinterpret_cast<S*>(src2);
+ count = 0;
+ }
+ memcpy(&value, src + count, sizeof(S));
+ if (saturate) {
+ if (value > max)
+ value = max;
+ else if (value < min)
+ value = min;
+ }
+ memcpy(reinterpret_cast<D*>(dst) + i, &value, sizeof(D));
+ }
+}
+
+EVALUATE(VPK) {
+ DCHECK_OPCODE(VPK);
DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
USE(m6);
USE(m5);
+ fpr_t src1 = get_simd_register(r2);
+ fpr_t src2 = get_simd_register(r3);
+ switch (m4) {
+ case 1:
+ VectorPack<int16_t, int8_t>(&get_simd_register(r1), &src1, &src2, false);
+ break;
+ case 2:
+ VectorPack<int32_t, int16_t>(&get_simd_register(r1), &src1, &src2, false);
+ break;
+ case 3:
+ VectorPack<int64_t, int32_t>(&get_simd_register(r1), &src1, &src2, false);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+EVALUATE(VPKS) {
+ DCHECK_OPCODE(VPKS);
+ DECODE_VRR_B_INSTRUCTION(r1, r2, r3, m5, m4);
+ USE(m5);
USE(m4);
- DCHECK_EQ(m5, 8);
- DCHECK_EQ(m4, 3);
- double r2_val = get_double_from_d_register(r2);
- double r3_val = get_double_from_d_register(r3);
- double r1_val = r2_val + r3_val;
- set_d_register_from_double(r1, r1_val);
+ fpr_t src1 = get_simd_register(r2);
+ fpr_t src2 = get_simd_register(r3);
+ switch (m4) {
+ case 1:
+ VectorPack<int16_t, int8_t>(&get_simd_register(r1), &src1, &src2, true,
+ INT8_MAX, INT8_MIN);
+ break;
+ case 2:
+ VectorPack<int32_t, int16_t>(&get_simd_register(r1), &src1, &src2, true,
+ INT16_MAX, INT16_MIN);
+ break;
+ case 3:
+ VectorPack<int64_t, int32_t>(&get_simd_register(r1), &src1, &src2, true,
+ INT32_MAX, INT32_MIN);
+ break;
+ default:
+ UNREACHABLE();
+ }
return length;
}
-EVALUATE(VFS) {
- DCHECK_OPCODE(VFS);
+EVALUATE(VPKLS) {
+ DCHECK_OPCODE(VPKLS);
+ DECODE_VRR_B_INSTRUCTION(r1, r2, r3, m5, m4);
+ USE(m5);
+ USE(m4);
+ fpr_t src1 = get_simd_register(r2);
+ fpr_t src2 = get_simd_register(r3);
+ switch (m4) {
+ case 1:
+ VectorPack<uint16_t, uint8_t>(&get_simd_register(r1), &src1, &src2, true,
+ UINT8_MAX, 0);
+ break;
+ case 2:
+ VectorPack<uint32_t, uint16_t>(&get_simd_register(r1), &src1, &src2, true,
+ UINT16_MAX, 0);
+ break;
+ case 3:
+ VectorPack<uint64_t, uint32_t>(&get_simd_register(r1), &src1, &src2, true,
+ UINT32_MAX, 0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+template <class S, class D>
+void VectorUnpackHigh(void* dst, void* src) {
+ D value = 0;
+ for (size_t i = 0; i < kSimd128Size / sizeof(D); i++) {
+ value = *(reinterpret_cast<S*>(src) + i + (sizeof(S) / 2));
+ memcpy(reinterpret_cast<D*>(dst) + i, &value, sizeof(D));
+ }
+}
+
+EVALUATE(VUPH) {
+ DCHECK_OPCODE(VUPH);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m5);
+ USE(m4);
+ switch (m3) {
+ case 0:
+ VectorUnpackHigh<int8_t, int16_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 1:
+ VectorUnpackHigh<int16_t, int32_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 2:
+ VectorUnpackHigh<int32_t, int64_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+EVALUATE(VUPLH) {
+ DCHECK_OPCODE(VUPLH);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m5);
+ USE(m4);
+ switch (m3) {
+ case 0:
+ VectorUnpackHigh<uint8_t, uint16_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 1:
+ VectorUnpackHigh<uint16_t, uint32_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 2:
+ VectorUnpackHigh<uint32_t, uint64_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+template <class S, class D>
+void VectorUnpackLow(void* dst, void* src) {
+ D value = 0;
+ for (size_t i = kSimd128Size / sizeof(D); i > 0; i--) {
+ value = *(reinterpret_cast<S*>(src) + i - 1);
+ memcpy(reinterpret_cast<D*>(dst) + i - 1, &value, sizeof(D));
+ }
+}
+
+EVALUATE(VUPL) {
+ DCHECK_OPCODE(VUPL);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m5);
+ USE(m4);
+ switch (m3) {
+ case 0:
+ VectorUnpackLow<int8_t, int16_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 1:
+ VectorUnpackLow<int16_t, int32_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 2:
+ VectorUnpackLow<int32_t, int64_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+EVALUATE(VUPLL) {
+ DCHECK_OPCODE(VUPLL);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m5);
+ USE(m4);
+ switch (m3) {
+ case 0:
+ VectorUnpackLow<uint8_t, uint16_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 1:
+ VectorUnpackLow<uint16_t, uint32_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 2:
+ VectorUnpackLow<uint32_t, uint64_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+#define VECTOR_MAX_MIN_FOR_TYPE(type, op) \
+ VectorBinaryOp<type>(&get_simd_register(r1), &get_simd_register(r2), \
+ &get_simd_register(r3), \
+ [](type a, type b) { return (a op b) ? a : b; });
+
+#define VECTOR_MAX_MIN(op, sign) \
+ switch (m4) { \
+ case 0: \
+ VECTOR_MAX_MIN_FOR_TYPE(sign##int8_t, op) \
+ break; \
+ case 1: \
+ VECTOR_MAX_MIN_FOR_TYPE(sign##int16_t, op) \
+ break; \
+ case 2: \
+ VECTOR_MAX_MIN_FOR_TYPE(sign##int32_t, op) \
+ break; \
+ case 3: \
+ VECTOR_MAX_MIN_FOR_TYPE(sign##int64_t, op) \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ }
+
+EVALUATE(VMX) {
+ DCHECK_OPCODE(VMX);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m5);
+ USE(m6);
+ VECTOR_MAX_MIN(>, )
+ return length;
+}
+
+EVALUATE(VMXL) {
+ DCHECK_OPCODE(VMXL);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m5);
+ USE(m6);
+ VECTOR_MAX_MIN(>, u)
+ return length;
+}
+
+EVALUATE(VMN) {
+ DCHECK_OPCODE(VMN);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m5);
+ USE(m6);
+ VECTOR_MAX_MIN(<, )
+ return length;
+}
+
+EVALUATE(VMNL) {
+ DCHECK_OPCODE(VMNL);
DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m5);
USE(m6);
+ VECTOR_MAX_MIN(<, u);
+ return length;
+}
+
+#define VECTOR_COMPARE_FOR_TYPE(type, op) \
+ VectorBinaryOp<type>(&get_simd_register(r1), &get_simd_register(r2), \
+ &get_simd_register(r3), \
+ [](type a, type b) { return (a op b) ? -1 : 0; });
+
+#define VECTOR_COMPARE(op, sign) \
+ switch (m4) { \
+ case 0: \
+ VECTOR_COMPARE_FOR_TYPE(sign##int8_t, op) \
+ break; \
+ case 1: \
+ VECTOR_COMPARE_FOR_TYPE(sign##int16_t, op) \
+ break; \
+ case 2: \
+ VECTOR_COMPARE_FOR_TYPE(sign##int32_t, op) \
+ break; \
+ case 3: \
+ VECTOR_COMPARE_FOR_TYPE(sign##int64_t, op) \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ }
+
+EVALUATE(VCEQ) {
+ DCHECK_OPCODE(VCEQ);
+ DECODE_VRR_B_INSTRUCTION(r1, r2, r3, m5, m4);
+ USE(m5);
+ DCHECK_EQ(m5, 0);
+ VECTOR_COMPARE(==, )
+ return length;
+}
+
+EVALUATE(VCH) {
+ DCHECK_OPCODE(VCH);
+ DECODE_VRR_B_INSTRUCTION(r1, r2, r3, m5, m4);
+ USE(m5);
+ DCHECK_EQ(m5, 0);
+ VECTOR_COMPARE(>, )
+ return length;
+}
+
+EVALUATE(VCHL) {
+ DCHECK_OPCODE(VCHL);
+ DECODE_VRR_B_INSTRUCTION(r1, r2, r3, m5, m4);
+ USE(m5);
+ DCHECK_EQ(m5, 0);
+ VECTOR_COMPARE(>, u)
+ return length;
+}
+
+EVALUATE(VO) {
+ DCHECK_OPCODE(VO);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
USE(m5);
+ USE(m6);
USE(m4);
- DCHECK_EQ(m5, 8);
- DCHECK_EQ(m4, 3);
- double r2_val = get_double_from_d_register(r2);
- double r3_val = get_double_from_d_register(r3);
- double r1_val = r2_val - r3_val;
- set_d_register_from_double(r1, r1_val);
+ VECTOR_BINARY_OP_FOR_TYPE(int64_t, |)
return length;
}
-EVALUATE(VFM) {
- DCHECK_OPCODE(VFM);
+EVALUATE(VN) {
+ DCHECK_OPCODE(VN);
DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m5);
USE(m6);
+ USE(m4);
+ VECTOR_BINARY_OP_FOR_TYPE(int64_t, &)
+ return length;
+}
+
+EVALUATE(VX) {
+ DCHECK_OPCODE(VX);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m4);
+ USE(m5);
+ USE(m6);
+ VECTOR_BINARY_OP_FOR_TYPE(int64_t, ^)
+ return length;
+}
+
+template <class T>
+void VectorLoadComplement(void* dst, void* src) {
+ int8_t* src_ptr = reinterpret_cast<int8_t*>(src);
+ int8_t* dst_ptr = reinterpret_cast<int8_t*>(dst);
+ for (int i = 0; i < kSimd128Size; i += sizeof(T)) {
+ T& src_val = *reinterpret_cast<T*>(src_ptr + i);
+ T& dst_val = *reinterpret_cast<T*>(dst_ptr + i);
+ dst_val = -(uint64_t)src_val;
+ memcpy(dst_ptr + i, &dst_val, sizeof(T));
+ }
+}
+
+EVALUATE(VLC) {
+ DCHECK_OPCODE(VLC);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
USE(m5);
USE(m4);
- DCHECK_EQ(m5, 8);
- DCHECK_EQ(m4, 3);
- double r2_val = get_double_from_d_register(r2);
- double r3_val = get_double_from_d_register(r3);
- double r1_val = r2_val * r3_val;
- set_d_register_from_double(r1, r1_val);
+ switch (m3) {
+ case 0:
+ VectorLoadComplement<int8_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 1:
+ VectorLoadComplement<int16_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 2:
+ VectorLoadComplement<int32_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ case 3:
+ VectorLoadComplement<int64_t>(&get_simd_register(r1),
+ &get_simd_register(r2));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+EVALUATE(VSEL) {
+ DCHECK_OPCODE(VSEL);
+ DECODE_VRR_E_INSTRUCTION(r1, r2, r3, r4, m6, m5);
+ USE(m5);
+ USE(m6);
+ fpr_t scratch = get_simd_register(r2);
+ fpr_t mask = get_simd_register(r4);
+ scratch.int64[0] ^= get_simd_register_by_lane<int64_t>(r3, 0);
+ scratch.int64[1] ^= get_simd_register_by_lane<int64_t>(r3, 1);
+ mask.int64[0] &= scratch.int64[0];
+ mask.int64[1] &= scratch.int64[1];
+ mask.int64[0] ^= get_simd_register_by_lane<int64_t>(r3, 0);
+ mask.int64[1] ^= get_simd_register_by_lane<int64_t>(r3, 1);
+ set_simd_register(r1, mask);
+ return length;
+}
+
+template <class T, class Operation>
+void VectorShift(void* dst, void* src, unsigned int shift, Operation op) {
+ int8_t* src_ptr = reinterpret_cast<int8_t*>(src);
+ int8_t* dst_ptr = reinterpret_cast<int8_t*>(dst);
+ for (int i = 0; i < kSimd128Size; i += sizeof(T)) {
+ T& dst_val = *reinterpret_cast<T*>(dst_ptr + i);
+ T& src_val = *reinterpret_cast<T*>(src_ptr + i);
+ dst_val = op(src_val, shift);
+ memcpy(dst_ptr + i, &dst_val, sizeof(T));
+ }
+}
+
+#define VECTOR_SHIFT_FOR_TYPE(type, op, shift) \
+ VectorShift<type>(&get_simd_register(r1), &get_simd_register(r3), shift, \
+ [](type a, unsigned int shift) { return a op shift; });
+
+#define VECTOR_SHIFT(op, sign) \
+ switch (m4) { \
+ case 0: \
+ VECTOR_SHIFT_FOR_TYPE(sign##int8_t, op, shift) \
+ break; \
+ case 1: \
+ VECTOR_SHIFT_FOR_TYPE(sign##int16_t, op, shift) \
+ break; \
+ case 2: \
+ VECTOR_SHIFT_FOR_TYPE(sign##int32_t, op, shift) \
+ break; \
+ case 3: \
+ VECTOR_SHIFT_FOR_TYPE(sign##int64_t, op, shift) \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ }
+
+EVALUATE(VESL) {
+ DCHECK_OPCODE(VESL);
+ DECODE_VRS_INSTRUCTION(r1, r3, b2, d2, m4);
+ unsigned int shift = get_register(b2) + d2;
+ VECTOR_SHIFT(<<, )
+ return length;
+}
+
+EVALUATE(VESRA) {
+ DCHECK_OPCODE(VESRA);
+ DECODE_VRS_INSTRUCTION(r1, r3, b2, d2, m4);
+ unsigned int shift = get_register(b2) + d2;
+ VECTOR_SHIFT(>>, )
+ return length;
+}
+
+EVALUATE(VESRL) {
+ DCHECK_OPCODE(VESRL);
+ DECODE_VRS_INSTRUCTION(r1, r3, b2, d2, m4);
+ unsigned int shift = get_register(b2) + d2;
+ VECTOR_SHIFT(>>, u)
+ return length;
+}
+
+EVALUATE(VTM) {
+ DCHECK_OPCODE(VTM);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m5);
+ USE(m4);
+ USE(m3);
+ int64_t src1 = get_simd_register_by_lane<int64_t>(r1, 0);
+ int64_t src2 = get_simd_register_by_lane<int64_t>(r1, 1);
+ int64_t mask1 = get_simd_register_by_lane<int64_t>(r2, 0);
+ int64_t mask2 = get_simd_register_by_lane<int64_t>(r2, 1);
+ if ((src1 & mask1) == 0 && (src2 & mask2) == 0) {
+ condition_reg_ = 0x8;
+ return length;
+ }
+ if ((src1 & mask1) == mask1 && (src2 & mask2) == mask2) {
+ condition_reg_ = 0x1;
+ return length;
+ }
+ condition_reg_ = 0x4;
+ return length;
+}
+
+#define VECTOR_FP_BINARY_OP(op) \
+ switch (m4) { \
+ case 2: \
+ DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)); \
+ if (m5 == 8) { \
+ float src1 = get_simd_register_by_lane<float>(r2, 0); \
+ float src2 = get_simd_register_by_lane<float>(r3, 0); \
+ set_simd_register_by_lane<float>(r1, 0, src1 op src2); \
+ } else { \
+ DCHECK_EQ(m5, 0); \
+ VECTOR_BINARY_OP_FOR_TYPE(float, op) \
+ } \
+ break; \
+ case 3: \
+ if (m5 == 8) { \
+ double src1 = get_simd_register_by_lane<double>(r2, 0); \
+ double src2 = get_simd_register_by_lane<double>(r3, 0); \
+ set_simd_register_by_lane<double>(r1, 0, src1 op src2); \
+ } else { \
+ DCHECK_EQ(m5, 0); \
+ VECTOR_BINARY_OP_FOR_TYPE(double, op) \
+ } \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ }
+
+EVALUATE(VFA) {
+ DCHECK_OPCODE(VFA);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ VECTOR_FP_BINARY_OP(+)
+ return length;
+}
+
+EVALUATE(VFS) {
+ DCHECK_OPCODE(VFS);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ VECTOR_FP_BINARY_OP(-)
+ return length;
+}
+
+EVALUATE(VFM) {
+ DCHECK_OPCODE(VFM);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ VECTOR_FP_BINARY_OP(*)
return length;
}
@@ -2847,14 +3681,276 @@ EVALUATE(VFD) {
DCHECK_OPCODE(VFD);
DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
USE(m6);
+ VECTOR_FP_BINARY_OP(/)
+ return length;
+}
+
+template <class T, class Operation>
+void VectorFPMaxMin(void* dst, void* src1, void* src2, Operation op) {
+ T* dst_ptr = reinterpret_cast<T*>(dst);
+ T* src1_ptr = reinterpret_cast<T*>(src1);
+ T* src2_ptr = reinterpret_cast<T*>(src2);
+ for (size_t i = 0; i < kSimd128Size / sizeof(T); i++) {
+ T src1_val = *(src1_ptr + i);
+ T src2_val = *(src2_ptr + i);
+ T value = op(src1_val, src2_val);
+ // using Java's Max Min functions
+ if (isnan(src1_val) || isnan(src2_val)) {
+ value = NAN;
+ }
+ memcpy(dst_ptr + i, &value, sizeof(T));
+ }
+}
+
+#define VECTOR_FP_MAX_MIN_FOR_TYPE(type, op) \
+ VectorFPMaxMin<type>(&get_simd_register(r1), &get_simd_register(r2), \
+ &get_simd_register(r3), \
+ [](type a, type b) { return (a op b) ? a : b; });
+
+#define VECTOR_FP_MAX_MIN(op) \
+ switch (m4) { \
+ case 2: \
+ if (m5 == 8) { \
+ float src1 = get_simd_register_by_lane<float>(r2, 0); \
+ float src2 = get_simd_register_by_lane<float>(r3, 0); \
+ set_simd_register_by_lane<float>(r1, 0, (src1 op src2) ? src1 : src2); \
+ } else { \
+ DCHECK_EQ(m5, 0); \
+ DCHECK_EQ(m6, 1); \
+ VECTOR_FP_MAX_MIN_FOR_TYPE(float, op) \
+ } \
+ break; \
+ case 3: \
+ if (m5 == 8) { \
+ double src1 = get_simd_register_by_lane<double>(r2, 0); \
+ double src2 = get_simd_register_by_lane<double>(r3, 0); \
+ set_simd_register_by_lane<double>(r1, 0, \
+ (src1 op src2) ? src1 : src2); \
+ } else { \
+ DCHECK_EQ(m5, 0); \
+ DCHECK_EQ(m6, 1); \
+ VECTOR_FP_MAX_MIN_FOR_TYPE(double, op) \
+ } \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ }
+
+EVALUATE(VFMIN) {
+ DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
+ DCHECK_OPCODE(VFMIN);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ VECTOR_FP_MAX_MIN(<) // NOLINT
+ return length;
+}
+
+EVALUATE(VFMAX) {
+ DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
+ DCHECK_OPCODE(VFMAX);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ VECTOR_FP_MAX_MIN(>) // NOLINT
+ return length;
+}
+
+template <class S, class D, class Operation>
+void VectorFPCompare(void* dst, void* src1, void* src2, Operation op) {
+ D* dst_ptr = reinterpret_cast<D*>(dst);
+ S* src1_ptr = reinterpret_cast<S*>(src1);
+ S* src2_ptr = reinterpret_cast<S*>(src2);
+ for (size_t i = 0; i < kSimd128Size / sizeof(D); i++) {
+ S src1_val = *(src1_ptr + i);
+ S src2_val = *(src2_ptr + i);
+ D value = op(src1_val, src2_val);
+ memcpy(dst_ptr + i, &value, sizeof(D));
+ }
+}
+
+#define VECTOR_FP_COMPARE_FOR_TYPE(S, D, op) \
+ VectorFPCompare<S, D>(&get_simd_register(r1), &get_simd_register(r2), \
+ &get_simd_register(r3), \
+ [](S a, S b) { return (a op b) ? -1 : 0; });
+
+#define VECTOR_FP_COMPARE(op) \
+ switch (m4) { \
+ case 2: \
+ DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)); \
+ if (m5 == 8) { \
+ float src1 = get_simd_register_by_lane<float>(r2, 0); \
+ float src2 = get_simd_register_by_lane<float>(r3, 0); \
+ set_simd_register_by_lane<int32_t>(r1, 0, (src1 op src2) ? -1 : 0); \
+ } else { \
+ DCHECK_EQ(m5, 0); \
+ VECTOR_FP_COMPARE_FOR_TYPE(float, int32_t, op) \
+ } \
+ break; \
+ case 3: \
+ if (m5 == 8) { \
+ double src1 = get_simd_register_by_lane<double>(r2, 0); \
+ double src2 = get_simd_register_by_lane<double>(r3, 0); \
+ set_simd_register_by_lane<int64_t>(r1, 0, (src1 op src2) ? -1 : 0); \
+ } else { \
+ DCHECK_EQ(m5, 0); \
+ VECTOR_FP_COMPARE_FOR_TYPE(double, int64_t, op) \
+ } \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ break; \
+ }
+
+EVALUATE(VFCE) {
+ DCHECK_OPCODE(VFCE);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ VECTOR_FP_COMPARE(==)
+ return length;
+}
+
+EVALUATE(VFCHE) {
+ DCHECK_OPCODE(VFCHE);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ VECTOR_FP_COMPARE(>=)
+ return length;
+}
+
+EVALUATE(VFCH) {
+ DCHECK_OPCODE(VFCH);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ VECTOR_FP_COMPARE(>) // NOLINT
+ return length;
+}
+
+template <class T>
+void VectorSignOp(void* dst, void* src, int m4, int m5) {
+ T* src_ptr = reinterpret_cast<T*>(src);
+ T* dst_ptr = reinterpret_cast<T*>(dst);
+ switch (m5) {
+ case 0:
+ if (m4 == 8) {
+ T value = -(*src_ptr);
+ memcpy(dst_ptr, &value, sizeof(T));
+ } else {
+ for (size_t i = 0; i < kSimd128Size / sizeof(T); i++) {
+ T value = -(*(src_ptr + i));
+ memcpy(dst_ptr + i, &value, sizeof(T));
+ }
+ }
+ break;
+ case 1:
+ if (m4 == 8) {
+ T value = -abs(*src_ptr);
+ memcpy(dst_ptr, &value, sizeof(T));
+ } else {
+ for (size_t i = 0; i < kSimd128Size / sizeof(T); i++) {
+ T value = -abs(*(src_ptr + i));
+ memcpy(dst_ptr + i, &value, sizeof(T));
+ }
+ }
+ break;
+ case 2:
+ if (m4 == 8) {
+ T value = abs(*src_ptr);
+ memcpy(dst_ptr, &value, sizeof(T));
+ } else {
+ for (size_t i = 0; i < kSimd128Size / sizeof(T); i++) {
+ T value = abs(*(src_ptr + i));
+ memcpy(dst_ptr + i, &value, sizeof(T));
+ }
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+EVALUATE(VFPSO) {
+ DCHECK_OPCODE(VFPSO);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
USE(m5);
USE(m4);
- DCHECK_EQ(m5, 8);
- DCHECK_EQ(m4, 3);
- double r2_val = get_double_from_d_register(r2);
- double r3_val = get_double_from_d_register(r3);
- double r1_val = r2_val / r3_val;
- set_d_register_from_double(r1, r1_val);
+ USE(m3);
+ switch (m3) {
+ case 2:
+ DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
+ VectorSignOp<float>(&get_simd_register(r1), &get_simd_register(r2), m4,
+ m5);
+ break;
+ case 3:
+ VectorSignOp<double>(&get_simd_register(r1), &get_simd_register(r2), m4,
+ m5);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+template <class T>
+void VectorFPSqrt(void* dst, void* src) {
+ T* dst_ptr = reinterpret_cast<T*>(dst);
+ T* src_ptr = reinterpret_cast<T*>(src);
+ for (size_t i = 0; i < kSimd128Size / sizeof(T); i++) {
+ T value = sqrt(*(src_ptr + i));
+ memcpy(dst_ptr + i, &value, sizeof(T));
+ }
+}
+
+EVALUATE(VFSQ) {
+ DCHECK_OPCODE(VFSQ);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m5);
+ switch (m3) {
+ case 2:
+ DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
+ if (m4 == 8) {
+ float src = get_simd_register_by_lane<float>(r2, 0);
+ set_simd_register_by_lane<float>(r1, 0, sqrt(src));
+ } else {
+ VectorFPSqrt<float>(&get_simd_register(r1), &get_simd_register(r2));
+ }
+ break;
+ case 3:
+ if (m4 == 8) {
+ double src = get_simd_register_by_lane<double>(r2, 0);
+ set_simd_register_by_lane<double>(r1, 0, sqrt(src));
+ } else {
+ VectorFPSqrt<double>(&get_simd_register(r1), &get_simd_register(r2));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+EVALUATE(VFI) {
+ DCHECK_OPCODE(VFI);
+ DECODE_VRR_A_INSTRUCTION(r1, r2, m5, m4, m3);
+ USE(m4);
+ USE(m5);
+ DCHECK_EQ(m5, 5);
+ switch (m3) {
+ case 2:
+ DCHECK(CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1));
+ for (int i = 0; i < 4; i++) {
+ float value = get_simd_register_by_lane<float>(r2, i);
+ set_simd_register_by_lane<float>(r1, i, trunc(value));
+ }
+ break;
+ case 3:
+ for (int i = 0; i < 2; i++) {
+ double value = get_simd_register_by_lane<double>(r2, i);
+ set_simd_register_by_lane<double>(r1, i, trunc(value));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
return length;
}
@@ -3867,8 +4963,8 @@ EVALUATE(BRXH) {
int32_t r3_val = (r3 == 0) ? 0 : get_low_register<int32_t>(r3);
intptr_t branch_address = get_pc() + (2 * i2);
r1_val += r3_val;
- int32_t compare_val = r3 % 2 == 0 ?
- get_low_register<int32_t>(r3 + 1) : r3_val;
+ int32_t compare_val =
+ r3 % 2 == 0 ? get_low_register<int32_t>(r3 + 1) : r3_val;
if (r1_val > compare_val) {
set_pc(branch_address);
}
@@ -9211,14 +10307,14 @@ EVALUATE(STOC) {
return 0;
}
-#define ATOMIC_LOAD_AND_UPDATE_WORD32(op) \
- DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2); \
- int64_t b2_val = (b2 == 0) ? 0 : get_register(b2); \
- intptr_t addr = static_cast<intptr_t>(b2_val) + d2; \
- int32_t r3_val = get_low_register<int32_t>(r3); \
- DCHECK_EQ(addr & 0x3, 0); \
- int32_t r1_val = op(reinterpret_cast<int32_t*>(addr), \
- r3_val, __ATOMIC_SEQ_CST); \
+#define ATOMIC_LOAD_AND_UPDATE_WORD32(op) \
+ DECODE_RSY_A_INSTRUCTION(r1, r3, b2, d2); \
+ int64_t b2_val = (b2 == 0) ? 0 : get_register(b2); \
+ intptr_t addr = static_cast<intptr_t>(b2_val) + d2; \
+ int32_t r3_val = get_low_register<int32_t>(r3); \
+ DCHECK_EQ(addr & 0x3, 0); \
+ int32_t r1_val = \
+ op(reinterpret_cast<int32_t*>(addr), r3_val, __ATOMIC_SEQ_CST); \
set_low_register(r1, r1_val);
EVALUATE(LAN) {
@@ -9754,6 +10850,66 @@ EVALUATE(CXZT) {
}
#undef EVALUATE
+#undef SScanF
+#undef S390_SUPPORTED_VECTOR_OPCODE_LIST
+#undef CheckOverflowForIntAdd
+#undef CheckOverflowForIntSub
+#undef CheckOverflowForUIntAdd
+#undef CheckOverflowForUIntSub
+#undef CheckOverflowForMul
+#undef CheckOverflowForShiftRight
+#undef CheckOverflowForShiftLeft
+#undef DCHECK_OPCODE
+#undef AS
+#undef DECODE_RIL_A_INSTRUCTION
+#undef DECODE_RIL_B_INSTRUCTION
+#undef DECODE_RIL_C_INSTRUCTION
+#undef DECODE_RXY_A_INSTRUCTION
+#undef DECODE_RX_A_INSTRUCTION
+#undef DECODE_RS_A_INSTRUCTION
+#undef DECODE_RS_A_INSTRUCTION_NO_R3
+#undef DECODE_RSI_INSTRUCTION
+#undef DECODE_SI_INSTRUCTION_I_UINT8
+#undef DECODE_SIL_INSTRUCTION
+#undef DECODE_SIY_INSTRUCTION
+#undef DECODE_RRE_INSTRUCTION
+#undef DECODE_RRE_INSTRUCTION_M3
+#undef DECODE_RRE_INSTRUCTION_NO_R2
+#undef DECODE_RRD_INSTRUCTION
+#undef DECODE_RRF_E_INSTRUCTION
+#undef DECODE_RRF_A_INSTRUCTION
+#undef DECODE_RRF_C_INSTRUCTION
+#undef DECODE_RR_INSTRUCTION
+#undef DECODE_RIE_D_INSTRUCTION
+#undef DECODE_RIE_E_INSTRUCTION
+#undef DECODE_RIE_F_INSTRUCTION
+#undef DECODE_RSY_A_INSTRUCTION
+#undef DECODE_RI_A_INSTRUCTION
+#undef DECODE_RI_B_INSTRUCTION
+#undef DECODE_RI_C_INSTRUCTION
+#undef DECODE_RXE_INSTRUCTION
+#undef DECODE_VRR_A_INSTRUCTION
+#undef DECODE_VRR_B_INSTRUCTION
+#undef DECODE_VRR_C_INSTRUCTION
+#undef DECODE_VRR_E_INSTRUCTION
+#undef DECODE_VRX_INSTRUCTION
+#undef DECODE_VRS_INSTRUCTION
+#undef DECODE_VRI_A_INSTRUCTION
+#undef DECODE_VRI_C_INSTRUCTION
+#undef GET_ADDRESS
+#undef VECTOR_BINARY_OP_FOR_TYPE
+#undef VECTOR_BINARY_OP
+#undef VECTOR_MAX_MIN_FOR_TYPE
+#undef VECTOR_MAX_MIN
+#undef VECTOR_COMPARE_FOR_TYPE
+#undef VECTOR_COMPARE
+#undef VECTOR_SHIFT_FOR_TYPE
+#undef VECTOR_SHIFT
+#undef VECTOR_FP_BINARY_OP
+#undef VECTOR_FP_MAX_MIN_FOR_TYPE
+#undef VECTOR_FP_MAX_MIN
+#undef VECTOR_FP_COMPARE_FOR_TYPE
+#undef VECTOR_FP_COMPARE
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/execution/s390/simulator-s390.h
index 6b6a91e2a7..0921ac839e 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/execution/s390/simulator-s390.h
@@ -9,20 +9,20 @@
// which will start execution in the Simulator or forwards to the real entry
// on a S390 hardware platform.
-#ifndef V8_S390_SIMULATOR_S390_H_
-#define V8_S390_SIMULATOR_S390_H_
+#ifndef V8_EXECUTION_S390_SIMULATOR_S390_H_
+#define V8_EXECUTION_S390_SIMULATOR_S390_H_
// globals.h defines USE_SIMULATOR.
-#include "src/globals.h"
+#include "src/common/globals.h"
#if defined(USE_SIMULATOR)
// Running with a simulator.
-#include "src/allocation.h"
-#include "src/assembler.h"
#include "src/base/hashmap.h"
-#include "src/s390/constants-s390.h"
-#include "src/simulator-base.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/s390/constants-s390.h"
+#include "src/execution/simulator-base.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -108,7 +108,8 @@ class Simulator : public SimulatorBase {
// Accessors for register state.
void set_register(int reg, uint64_t value);
- uint64_t get_register(int reg) const;
+ const uint64_t& get_register(int reg) const;
+ uint64_t& get_register(int reg);
template <typename T>
T get_low_register(int reg) const;
template <typename T>
@@ -119,20 +120,22 @@ class Simulator : public SimulatorBase {
double get_double_from_register_pair(int reg);
void set_d_register_from_double(int dreg, const double dbl) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
- *bit_cast<double*>(&fp_registers_[dreg]) = dbl;
+ set_simd_register_by_lane<double>(dreg, 0, dbl);
}
double get_double_from_d_register(int dreg) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
- return *bit_cast<double*>(&fp_registers_[dreg]);
+ return get_simd_register_by_lane<double>(dreg, 0);
}
+
void set_d_register(int dreg, int64_t value) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
- fp_registers_[dreg] = value;
+ set_simd_register_by_lane<int64_t>(dreg, 0, value);
}
+
int64_t get_d_register(int dreg) {
DCHECK(dreg >= 0 && dreg < kNumFPRs);
- return fp_registers_[dreg];
+ return get_simd_register_by_lane<int64_t>(dreg, 0);
}
void set_d_register_from_float32(int dreg, const float f) {
@@ -411,7 +414,41 @@ class Simulator : public SimulatorBase {
// On z9 and higher and supported Linux on z Systems platforms, all registers
// are 64-bit, even in 31-bit mode.
uint64_t registers_[kNumGPRs];
- int64_t fp_registers_[kNumFPRs];
+ union fpr_t {
+ int8_t int8[16];
+ uint8_t uint8[16];
+ int16_t int16[8];
+ uint16_t uint16[8];
+ int32_t int32[4];
+ uint32_t uint32[4];
+ int64_t int64[2];
+ uint64_t uint64[2];
+ float f32[4];
+ double f64[2];
+ };
+ fpr_t fp_registers_[kNumFPRs];
+
+ static constexpr fpr_t fp_zero = {{0}};
+
+ fpr_t& get_simd_register(int reg) { return fp_registers_[reg]; }
+
+ void set_simd_register(int reg, const fpr_t& v) {
+ get_simd_register(reg) = v;
+ }
+
+ template <class T>
+ T& get_simd_register_by_lane(int reg, int lane) {
+ DCHECK_LE(lane, kSimd128Size / sizeof(T));
+ DCHECK_LT(reg, kNumFPRs);
+ DCHECK_GE(lane, 0);
+ DCHECK_GE(reg, 0);
+ return (reinterpret_cast<T*>(&get_simd_register(reg)))[lane];
+ }
+
+ template <class T>
+ void set_simd_register_by_lane(int reg, int lane, const T& value) {
+ get_simd_register_by_lane<T>(reg, lane) = value;
+ }
// Condition Code register. In S390, the last 4 bits are used.
int32_t condition_reg_;
@@ -455,16 +492,24 @@ class Simulator : public SimulatorBase {
int DecodeInstruction(Instruction* instr);
int Evaluate_Unknown(Instruction* instr);
#define MAX_NUM_OPCODES (1 << 16)
- typedef int (Simulator::*EvaluateFuncType)(Instruction*);
+ using EvaluateFuncType = int (Simulator::*)(Instruction*);
static EvaluateFuncType EvalTable[MAX_NUM_OPCODES];
static void EvalTableInit();
#define EVALUATE(name) int Evaluate_##name(Instruction* instr)
-#define EVALUATE_VRR_INSTRUCTIONS(name, op_name, op_value) EVALUATE(op_name);
- S390_VRR_C_OPCODE_LIST(EVALUATE_VRR_INSTRUCTIONS)
- S390_VRR_A_OPCODE_LIST(EVALUATE_VRR_INSTRUCTIONS)
-#undef EVALUATE_VRR_INSTRUCTIONS
+#define EVALUATE_VR_INSTRUCTIONS(name, op_name, op_value) EVALUATE(op_name);
+ S390_VRR_A_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRR_C_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRR_E_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRX_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRS_A_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRS_B_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRS_C_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRR_B_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRI_A_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+ S390_VRI_C_OPCODE_LIST(EVALUATE_VR_INSTRUCTIONS)
+#undef EVALUATE_VR_INSTRUCTIONS
EVALUATE(DUMY);
EVALUATE(BKPT);
@@ -1204,4 +1249,4 @@ class Simulator : public SimulatorBase {
} // namespace v8
#endif // defined(USE_SIMULATOR)
-#endif // V8_S390_SIMULATOR_S390_H_
+#endif // V8_EXECUTION_S390_SIMULATOR_S390_H_
diff --git a/deps/v8/src/simulator-base.cc b/deps/v8/src/execution/simulator-base.cc
index 25e21cdbdf..b26c775917 100644
--- a/deps/v8/src/simulator-base.cc
+++ b/deps/v8/src/execution/simulator-base.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/simulator-base.h"
+#include "src/execution/simulator-base.h"
-#include "src/isolate.h"
-#include "src/simulator.h"
+#include "src/execution/isolate.h"
+#include "src/execution/simulator.h"
#if defined(USE_SIMULATOR)
diff --git a/deps/v8/src/simulator-base.h b/deps/v8/src/execution/simulator-base.h
index 09270ff5ae..6eca3f2b47 100644
--- a/deps/v8/src/simulator-base.h
+++ b/deps/v8/src/execution/simulator-base.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SIMULATOR_BASE_H_
-#define V8_SIMULATOR_BASE_H_
+#ifndef V8_EXECUTION_SIMULATOR_BASE_H_
+#define V8_EXECUTION_SIMULATOR_BASE_H_
#include <type_traits>
-#include "src/globals.h"
-#include "src/isolate.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#if defined(USE_SIMULATOR)
@@ -177,4 +177,4 @@ class Redirection {
} // namespace v8
#endif // defined(USE_SIMULATOR)
-#endif // V8_SIMULATOR_BASE_H_
+#endif // V8_EXECUTION_SIMULATOR_BASE_H_
diff --git a/deps/v8/src/simulator.h b/deps/v8/src/execution/simulator.h
index 37e8fd4f12..9f98f2039b 100644
--- a/deps/v8/src/simulator.h
+++ b/deps/v8/src/execution/simulator.h
@@ -2,30 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SIMULATOR_H_
-#define V8_SIMULATOR_H_
+#ifndef V8_EXECUTION_SIMULATOR_H_
+#define V8_EXECUTION_SIMULATOR_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/code.h"
#if !defined(USE_SIMULATOR)
-#include "src/utils.h"
+#include "src/utils/utils.h"
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// No simulator for ia32 or x64.
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/simulator-arm64.h"
+#include "src/execution/arm64/simulator-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/simulator-arm.h"
+#include "src/execution/arm/simulator-arm.h"
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/simulator-ppc.h"
+#include "src/execution/ppc/simulator-ppc.h"
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/simulator-mips.h"
+#include "src/execution/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/simulator-mips64.h"
+#include "src/execution/mips64/simulator-mips64.h"
#elif V8_TARGET_ARCH_S390
-#include "src/s390/simulator-s390.h"
+#include "src/execution/s390/simulator-s390.h"
#else
#error Unsupported target architecture.
#endif
@@ -109,7 +109,7 @@ class GeneratedCode {
}
static GeneratedCode FromCode(Code code) {
- return FromAddress(code->GetIsolate(), code->entry());
+ return FromAddress(code.GetIsolate(), code.entry());
}
#ifdef USE_SIMULATOR
@@ -167,4 +167,4 @@ class GeneratedCode<Return(Args...)> : public GeneratedCode<Return, Args...> {
} // namespace internal
} // namespace v8
-#endif // V8_SIMULATOR_H_
+#endif // V8_EXECUTION_SIMULATOR_H_
diff --git a/deps/v8/src/thread-id.cc b/deps/v8/src/execution/thread-id.cc
index a0585b3a41..a32d15e22f 100644
--- a/deps/v8/src/thread-id.cc
+++ b/deps/v8/src/execution/thread-id.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/thread-id.h"
+#include "src/execution/thread-id.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
diff --git a/deps/v8/src/thread-id.h b/deps/v8/src/execution/thread-id.h
index 68693bdaa6..27f05843f3 100644
--- a/deps/v8/src/thread-id.h
+++ b/deps/v8/src/execution/thread-id.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_THREAD_ID_H_
-#define V8_THREAD_ID_H_
+#ifndef V8_EXECUTION_THREAD_ID_H_
+#define V8_EXECUTION_THREAD_ID_H_
#include "src/base/macros.h"
@@ -51,4 +51,4 @@ class ThreadId {
} // namespace internal
} // namespace v8
-#endif // V8_THREAD_ID_H_
+#endif // V8_EXECUTION_THREAD_ID_H_
diff --git a/deps/v8/src/thread-local-top.cc b/deps/v8/src/execution/thread-local-top.cc
index 10cd6ea3a9..569333f276 100644
--- a/deps/v8/src/thread-local-top.cc
+++ b/deps/v8/src/execution/thread-local-top.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/thread-local-top.h"
-#include "src/isolate.h"
-#include "src/simulator.h"
+#include "src/execution/thread-local-top.h"
+#include "src/execution/isolate.h"
+#include "src/execution/simulator.h"
#include "src/trap-handler/trap-handler.h"
namespace v8 {
diff --git a/deps/v8/src/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index dd99221537..625fcc41dd 100644
--- a/deps/v8/src/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_THREAD_LOCAL_TOP_H_
-#define V8_THREAD_LOCAL_TOP_H_
+#ifndef V8_EXECUTION_THREAD_LOCAL_TOP_H_
+#define V8_EXECUTION_THREAD_LOCAL_TOP_H_
-#include "src/contexts.h"
-#include "src/globals.h"
-#include "src/thread-id.h"
+#include "src/common/globals.h"
+#include "src/execution/thread-id.h"
+#include "src/objects/contexts.h"
namespace v8 {
@@ -119,4 +119,4 @@ class ThreadLocalTop {
} // namespace internal
} // namespace v8
-#endif // V8_THREAD_LOCAL_TOP_H_
+#endif // V8_EXECUTION_THREAD_LOCAL_TOP_H_
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/execution/v8threads.cc
index f3ec82d1f3..6b99b81ef7 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/execution/v8threads.cc
@@ -2,19 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8threads.h"
+#include "src/execution/v8threads.h"
-#include "src/api.h"
-#include "src/bootstrapper.h"
+#include "src/api/api.h"
#include "src/debug/debug.h"
-#include "src/execution.h"
-#include "src/isolate-inl.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/objects/visitors.h"
#include "src/regexp/regexp-stack.h"
-#include "src/visitors.h"
namespace v8 {
-
namespace {
// Track whether this V8 instance has ever called v8::Locker. This allows the
@@ -23,7 +22,6 @@ base::Atomic32 g_locker_was_ever_used_ = 0;
} // namespace
-
// Once the Locker is initialized, the current thread will be guaranteed to have
// the lock for a given isolate.
void Locker::Initialize(v8::Isolate* isolate) {
@@ -51,19 +49,16 @@ void Locker::Initialize(v8::Isolate* isolate) {
DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread());
}
-
bool Locker::IsLocked(v8::Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
return internal_isolate->thread_manager()->IsLockedByCurrentThread();
}
-
bool Locker::IsActive() {
return !!base::Relaxed_Load(&g_locker_was_ever_used_);
}
-
Locker::~Locker() {
DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread());
if (has_lock_) {
@@ -76,7 +71,6 @@ Locker::~Locker() {
}
}
-
void Unlocker::Initialize(v8::Isolate* isolate) {
DCHECK_NOT_NULL(isolate);
isolate_ = reinterpret_cast<i::Isolate*>(isolate);
@@ -85,14 +79,12 @@ void Unlocker::Initialize(v8::Isolate* isolate) {
isolate_->thread_manager()->Unlock();
}
-
Unlocker::~Unlocker() {
DCHECK(!isolate_->thread_manager()->IsLockedByCurrentThread());
isolate_->thread_manager()->Lock();
isolate_->thread_manager()->RestoreThread();
}
-
namespace internal {
void ThreadManager::InitThread(const ExecutionAccess& lock) {
@@ -144,75 +136,60 @@ bool ThreadManager::RestoreThread() {
from = isolate_->regexp_stack()->RestoreStack(from);
from = isolate_->bootstrapper()->RestoreState(from);
per_thread->set_thread_state(nullptr);
- if (state->terminate_on_restore()) {
- isolate_->stack_guard()->RequestTerminateExecution();
- state->set_terminate_on_restore(false);
- }
state->set_id(ThreadId::Invalid());
state->Unlink();
state->LinkInto(ThreadState::FREE_LIST);
return true;
}
-
void ThreadManager::Lock() {
mutex_.Lock();
mutex_owner_.store(ThreadId::Current(), std::memory_order_relaxed);
DCHECK(IsLockedByCurrentThread());
}
-
void ThreadManager::Unlock() {
mutex_owner_.store(ThreadId::Invalid(), std::memory_order_relaxed);
mutex_.Unlock();
}
-
static int ArchiveSpacePerThread() {
return HandleScopeImplementer::ArchiveSpacePerThread() +
- Isolate::ArchiveSpacePerThread() +
- Debug::ArchiveSpacePerThread() +
- StackGuard::ArchiveSpacePerThread() +
- RegExpStack::ArchiveSpacePerThread() +
- Bootstrapper::ArchiveSpacePerThread() +
- Relocatable::ArchiveSpacePerThread();
+ Isolate::ArchiveSpacePerThread() + Debug::ArchiveSpacePerThread() +
+ StackGuard::ArchiveSpacePerThread() +
+ RegExpStack::ArchiveSpacePerThread() +
+ Bootstrapper::ArchiveSpacePerThread() +
+ Relocatable::ArchiveSpacePerThread();
}
ThreadState::ThreadState(ThreadManager* thread_manager)
: id_(ThreadId::Invalid()),
- terminate_on_restore_(false),
data_(nullptr),
next_(this),
previous_(this),
thread_manager_(thread_manager) {}
-ThreadState::~ThreadState() {
- DeleteArray<char>(data_);
-}
-
+ThreadState::~ThreadState() { DeleteArray<char>(data_); }
void ThreadState::AllocateSpace() {
data_ = NewArray<char>(ArchiveSpacePerThread());
}
-
void ThreadState::Unlink() {
next_->previous_ = previous_;
previous_->next_ = next_;
}
-
void ThreadState::LinkInto(List list) {
- ThreadState* flying_anchor =
- list == FREE_LIST ? thread_manager_->free_anchor_
- : thread_manager_->in_use_anchor_;
+ ThreadState* flying_anchor = list == FREE_LIST
+ ? thread_manager_->free_anchor_
+ : thread_manager_->in_use_anchor_;
next_ = flying_anchor->next_;
previous_ = flying_anchor;
flying_anchor->next_ = this;
next_->previous_ = this;
}
-
ThreadState* ThreadManager::GetFreeThreadState() {
ThreadState* gotten = free_anchor_->next_;
if (gotten == free_anchor_) {
@@ -223,13 +200,11 @@ ThreadState* ThreadManager::GetFreeThreadState() {
return gotten;
}
-
// Gets the first in the list of archived threads.
ThreadState* ThreadManager::FirstThreadStateInUse() {
return in_use_anchor_->Next();
}
-
ThreadState* ThreadState::Next() {
if (next_ == thread_manager_->in_use_anchor_) return nullptr;
return next_;
@@ -249,13 +224,11 @@ ThreadManager::ThreadManager(Isolate* isolate)
in_use_anchor_ = new ThreadState(this);
}
-
ThreadManager::~ThreadManager() {
DeleteThreadStateList(free_anchor_);
DeleteThreadStateList(in_use_anchor_);
}
-
void ThreadManager::DeleteThreadStateList(ThreadState* anchor) {
// The list starts and ends with the anchor.
for (ThreadState* current = anchor->next_; current != anchor;) {
@@ -266,7 +239,6 @@ void ThreadManager::DeleteThreadStateList(ThreadState* anchor) {
delete anchor;
}
-
void ThreadManager::ArchiveThread() {
DCHECK_EQ(lazily_archived_thread_, ThreadId::Invalid());
DCHECK(!IsArchived());
@@ -283,7 +255,6 @@ void ThreadManager::ArchiveThread() {
DCHECK_NE(state->id(), ThreadId::Invalid());
}
-
void ThreadManager::EagerlyArchiveThread() {
DCHECK(IsLockedByCurrentThread());
ThreadState* state = lazily_archived_thread_state_;
@@ -302,7 +273,6 @@ void ThreadManager::EagerlyArchiveThread() {
lazily_archived_thread_state_ = nullptr;
}
-
void ThreadManager::FreeThreadResources() {
DCHECK(!isolate_->has_pending_exception());
DCHECK(!isolate_->external_caught_exception());
@@ -315,7 +285,6 @@ void ThreadManager::FreeThreadResources() {
isolate_->bootstrapper()->FreeThreadResources();
}
-
bool ThreadManager::IsArchived() {
Isolate::PerIsolateThreadData* data =
isolate_->FindPerThreadDataForThisThread();
@@ -333,7 +302,6 @@ void ThreadManager::Iterate(RootVisitor* v) {
}
}
-
void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
for (ThreadState* state = FirstThreadStateInUse(); state != nullptr;
state = state->Next()) {
@@ -343,21 +311,7 @@ void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
}
}
-
-ThreadId ThreadManager::CurrentId() {
- return ThreadId::Current();
-}
-
-
-void ThreadManager::TerminateExecution(ThreadId thread_id) {
- for (ThreadState* state = FirstThreadStateInUse(); state != nullptr;
- state = state->Next()) {
- if (thread_id == state->id()) {
- state->set_terminate_on_restore(true);
- }
- }
-}
-
+ThreadId ThreadManager::CurrentId() { return ThreadId::Current(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/execution/v8threads.h
index 1c87fb6cf6..c59b5ccee3 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/execution/v8threads.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_V8THREADS_H_
-#define V8_V8THREADS_H_
+#ifndef V8_EXECUTION_V8THREADS_H_
+#define V8_EXECUTION_V8THREADS_H_
#include <atomic>
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
@@ -20,7 +20,7 @@ class ThreadState {
// Returns nullptr after the last one.
ThreadState* Next();
- enum List {FREE_LIST, IN_USE_LIST};
+ enum List { FREE_LIST, IN_USE_LIST };
void LinkInto(List list);
void Unlink();
@@ -29,12 +29,6 @@ class ThreadState {
void set_id(ThreadId id) { id_ = id; }
ThreadId id() { return id_; }
- // Should the thread be terminated when it is restored?
- bool terminate_on_restore() { return terminate_on_restore_; }
- void set_terminate_on_restore(bool terminate_on_restore) {
- terminate_on_restore_ = terminate_on_restore;
- }
-
// Get data area for archiving a thread.
char* data() { return data_; }
@@ -45,7 +39,6 @@ class ThreadState {
void AllocateSpace();
ThreadId id_;
- bool terminate_on_restore_;
char* data_;
ThreadState* next_;
ThreadState* previous_;
@@ -83,8 +76,6 @@ class ThreadManager {
ThreadId CurrentId();
- void TerminateExecution(ThreadId thread_id);
-
// Iterate over in-use states.
ThreadState* FirstThreadStateInUse();
ThreadState* GetFreeThreadState();
@@ -118,8 +109,7 @@ class ThreadManager {
friend class ThreadState;
};
-
} // namespace internal
} // namespace v8
-#endif // V8_V8THREADS_H_
+#endif // V8_EXECUTION_V8THREADS_H_
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/execution/vm-state-inl.h
index d22e1abd69..87dc185f2d 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/execution/vm-state-inl.h
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_VM_STATE_INL_H_
-#define V8_VM_STATE_INL_H_
+#ifndef V8_EXECUTION_VM_STATE_INL_H_
+#define V8_EXECUTION_VM_STATE_INL_H_
-#include "src/vm-state.h"
-#include "src/log.h"
-#include "src/simulator.h"
+#include "src/execution/isolate.h"
+#include "src/execution/simulator.h"
+#include "src/execution/vm-state.h"
+#include "src/logging/log.h"
#include "src/tracing/trace-event.h"
namespace v8 {
@@ -39,22 +40,14 @@ inline const char* StateToString(StateTag state) {
}
}
-
template <StateTag Tag>
VMState<Tag>::VMState(Isolate* isolate)
: isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
- if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
- LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
- }
isolate_->set_current_vm_state(Tag);
}
-
template <StateTag Tag>
VMState<Tag>::~VMState() {
- if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
- LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
- }
isolate_->set_current_vm_state(previous_tag_);
}
@@ -84,8 +77,7 @@ Address ExternalCallbackScope::scope_address() {
#endif
}
-
} // namespace internal
} // namespace v8
-#endif // V8_VM_STATE_INL_H_
+#endif // V8_EXECUTION_VM_STATE_INL_H_
diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/execution/vm-state.h
index a9bd08b6cd..38b70f5a95 100644
--- a/deps/v8/src/vm-state.h
+++ b/deps/v8/src/execution/vm-state.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_VM_STATE_H_
-#define V8_VM_STATE_H_
+#ifndef V8_EXECUTION_VM_STATE_H_
+#define V8_EXECUTION_VM_STATE_H_
-#include "src/allocation.h"
-#include "src/counters.h"
+#include "include/v8.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -55,5 +55,4 @@ class ExternalCallbackScope {
} // namespace internal
} // namespace v8
-
-#endif // V8_VM_STATE_H_
+#endif // V8_EXECUTION_VM_STATE_H_
diff --git a/deps/v8/src/x64/frame-constants-x64.cc b/deps/v8/src/execution/x64/frame-constants-x64.cc
index 9780bb2d7a..2a55fea9c9 100644
--- a/deps/v8/src/x64/frame-constants-x64.cc
+++ b/deps/v8/src/execution/x64/frame-constants-x64.cc
@@ -4,11 +4,10 @@
#if V8_TARGET_ARCH_X64
-#include "src/x64/frame-constants-x64.h"
-
-#include "src/frame-constants.h"
-#include "src/x64/assembler-x64-inl.h"
+#include "src/execution/x64/frame-constants-x64.h"
+#include "src/codegen/x64/assembler-x64-inl.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/frame-constants-x64.h b/deps/v8/src/execution/x64/frame-constants-x64.h
index 15c6ebfebb..5af35b1b3b 100644
--- a/deps/v8/src/x64/frame-constants-x64.h
+++ b/deps/v8/src/execution/x64/frame-constants-x64.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_X64_FRAME_CONSTANTS_X64_H_
-#define V8_X64_FRAME_CONSTANTS_X64_H_
+#ifndef V8_EXECUTION_X64_FRAME_CONSTANTS_X64_H_
+#define V8_EXECUTION_X64_FRAME_CONSTANTS_X64_H_
#include "src/base/macros.h"
-#include "src/frame-constants.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
@@ -86,4 +86,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_X64_FRAME_CONSTANTS_X64_H_
+#endif // V8_EXECUTION_X64_FRAME_CONSTANTS_X64_H_
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index b19128a941..00a946b6ff 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -4,10 +4,10 @@
#include "src/extensions/externalize-string-extension.h"
-#include "src/api-inl.h"
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -31,11 +31,10 @@ class SimpleStringResource : public Base {
const size_t length_;
};
-
-typedef SimpleStringResource<char, v8::String::ExternalOneByteStringResource>
- SimpleOneByteStringResource;
-typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
- SimpleTwoByteStringResource;
+using SimpleOneByteStringResource =
+ SimpleStringResource<char, v8::String::ExternalOneByteStringResource>;
+using SimpleTwoByteStringResource =
+ SimpleStringResource<uc16, v8::String::ExternalStringResource>;
const char* const ExternalizeStringExtension::kSource =
"native function externalizeString();"
diff --git a/deps/v8/src/extensions/free-buffer-extension.cc b/deps/v8/src/extensions/free-buffer-extension.cc
index 5bd56312a5..975e9543c8 100644
--- a/deps/v8/src/extensions/free-buffer-extension.cc
+++ b/deps/v8/src/extensions/free-buffer-extension.cc
@@ -5,7 +5,7 @@
#include "src/extensions/free-buffer-extension.h"
#include "src/base/platform/platform.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index 4fd35d4233..7b517637f7 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -6,7 +6,7 @@
#define V8_EXTENSIONS_GC_EXTENSION_H_
#include "include/v8.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/ignition-statistics-extension.cc b/deps/v8/src/extensions/ignition-statistics-extension.cc
index c5e7eb9f14..869dcd59b8 100644
--- a/deps/v8/src/extensions/ignition-statistics-extension.cc
+++ b/deps/v8/src/extensions/ignition-statistics-extension.cc
@@ -5,9 +5,9 @@
#include "src/extensions/ignition-statistics-extension.h"
#include "src/base/logging.h"
+#include "src/execution/isolate.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index a13a4ce351..458aec38f3 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -4,9 +4,9 @@
#include "src/extensions/statistics-extension.h"
-#include "src/counters.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
-#include "src/isolate.h"
+#include "src/logging/counters.h"
namespace v8 {
namespace internal {
@@ -129,16 +129,16 @@ void StatisticsExtension::GetCounters(
int source_position_table_total = 0;
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (obj->IsCode()) {
+ if (obj.IsCode()) {
Code code = Code::cast(obj);
- reloc_info_total += code->relocation_info()->Size();
- ByteArray source_position_table = code->SourcePositionTable();
- if (source_position_table->length() > 0) {
- source_position_table_total += code->SourcePositionTable()->Size();
+ reloc_info_total += code.relocation_info().Size();
+ ByteArray source_position_table = code.SourcePositionTable();
+ if (source_position_table.length() > 0) {
+ source_position_table_total += code.SourcePositionTable().Size();
}
- } else if (obj->IsBytecodeArray()) {
+ } else if (obj.IsBytecodeArray()) {
source_position_table_total +=
- BytecodeArray::cast(obj)->SourcePositionTable()->Size();
+ BytecodeArray::cast(obj).SourcePositionTable().Size();
}
}
diff --git a/deps/v8/src/extensions/trigger-failure-extension.cc b/deps/v8/src/extensions/trigger-failure-extension.cc
index 1f7f72e100..44c07fbc00 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.cc
+++ b/deps/v8/src/extensions/trigger-failure-extension.cc
@@ -5,7 +5,7 @@
#include "src/extensions/trigger-failure-extension.h"
#include "src/base/logging.h"
-#include "src/checks.h"
+#include "src/common/checks.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index da33cd170d..0ef23def1e 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -12,10 +12,10 @@
//
// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
-#define DEFINE_IMPLICATION(whenflag, thenflag) \
+#define DEFINE_IMPLICATION(whenflag, thenflag) \
DEFINE_VALUE_IMPLICATION(whenflag, thenflag, true)
-#define DEFINE_NEG_IMPLICATION(whenflag, thenflag) \
+#define DEFINE_NEG_IMPLICATION(whenflag, thenflag) \
DEFINE_VALUE_IMPLICATION(whenflag, thenflag, false)
#define DEFINE_NEG_NEG_IMPLICATION(whenflag, thenflag) \
@@ -51,15 +51,11 @@
// We want to write entries into our meta data table, for internal parsing and
// printing / etc in the flag parser code. We only do this for writable flags.
#elif defined(FLAG_MODE_META)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
- { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false } \
- ,
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
+ {Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false},
#define FLAG_ALIAS(ftype, ctype, alias, nam) \
- { \
- Flag::TYPE_##ftype, #alias, &FLAG_##nam, &FLAGDEFAULT_##nam, \
- "alias for --" #nam, false \
- } \
- ,
+ {Flag::TYPE_##ftype, #alias, &FLAG_##nam, &FLAGDEFAULT_##nam, \
+ "alias for --" #nam, false},
// We produce the code to set flags when it is implied by another flag.
#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
@@ -159,9 +155,9 @@ struct MaybeBoolFlag {
#endif
#ifdef V8_OS_WIN
-# define ENABLE_LOG_COLOUR false
+#define ENABLE_LOG_COLOUR false
#else
-# define ENABLE_LOG_COLOUR true
+#define ENABLE_LOG_COLOUR true
#endif
#define DEFINE_BOOL(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
@@ -204,65 +200,53 @@ DEFINE_IMPLICATION(es_staging, harmony)
// Enabling import.meta requires to also enable import()
DEFINE_IMPLICATION(harmony_import_meta, harmony_dynamic_import)
-DEFINE_IMPLICATION(harmony_class_fields, harmony_public_fields)
-DEFINE_IMPLICATION(harmony_class_fields, harmony_static_fields)
-DEFINE_IMPLICATION(harmony_class_fields, harmony_private_fields)
-
-DEFINE_IMPLICATION(harmony_private_methods, harmony_private_fields)
-
// Update bootstrapper.cc whenever adding a new feature flag.
// Features that are still work in progress (behind individual flags).
#define HARMONY_INPROGRESS_BASE(V) \
- V(harmony_class_fields, "harmony fields in class literals") \
V(harmony_private_methods, "harmony private methods in class literals") \
V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
V(harmony_weak_refs, "harmony weak references")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
- V(harmony_intl_date_format_range, "DateTimeFormat formatRange")
+#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
#endif
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V) \
- V(harmony_promise_all_settled, "harmony Promise.allSettled")
+#define HARMONY_STAGED_BASE(V)
#ifdef V8_INTL_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
- V(harmony_intl_bigint, "BigInt.prototype.toLocaleString") \
- V(harmony_intl_datetime_style, "dateStyle timeStyle for DateTimeFormat") \
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(harmony_intl_add_calendar_numbering_system, \
+ "Add calendar and numberingSystem to DateTimeFormat") \
+ V(harmony_intl_numberformat_unified, "Unified Intl.NumberFormat Features") \
V(harmony_intl_segmenter, "Intl.Segmenter")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
#endif
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_namespace_exports, \
- "harmony namespace exports (export * as foo from 'bar')") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_import_meta, "harmony import.meta property") \
- V(harmony_dynamic_import, "harmony dynamic import") \
- V(harmony_global, "harmony global") \
- V(harmony_json_stringify, "well-formed JSON.stringify") \
- V(harmony_public_fields, "harmony public instance fields in class literals") \
- V(harmony_static_fields, "harmony static fields in class literals") \
- V(harmony_string_matchall, "harmony String.prototype.matchAll") \
- V(harmony_object_from_entries, "harmony Object.fromEntries()") \
- V(harmony_await_optimization, "harmony await taking 1 tick") \
- V(harmony_private_fields, "harmony private fields in class literals") \
- V(harmony_hashbang, "harmony hashbang syntax") \
- V(harmony_numeric_separator, "harmony numeric separator between digits")
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_namespace_exports, \
+ "harmony namespace exports (export * as foo from 'bar')") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_import_meta, "harmony import.meta property") \
+ V(harmony_dynamic_import, "harmony dynamic import") \
+ V(harmony_global, "harmony global") \
+ V(harmony_object_from_entries, "harmony Object.fromEntries()") \
+ V(harmony_hashbang, "harmony hashbang syntax") \
+ V(harmony_numeric_separator, "harmony numeric separator between digits") \
+ V(harmony_promise_all_settled, "harmony Promise.allSettled")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_locale, "Intl.Locale")
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_bigint, "BigInt.prototype.toLocaleString") \
+ V(harmony_intl_date_format_range, "DateTimeFormat formatRange") \
+ V(harmony_intl_datetime_style, "dateStyle timeStyle for DateTimeFormat")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -272,13 +256,12 @@ DEFINE_IMPLICATION(harmony_private_methods, harmony_private_fields)
// and associated tests are moved from the harmony directory to the appropriate
// esN directory.
-
#define FLAG_INPROGRESS_FEATURES(id, description) \
DEFINE_BOOL(id, false, "enable " #description " (in progress)")
HARMONY_INPROGRESS(FLAG_INPROGRESS_FEATURES)
#undef FLAG_INPROGRESS_FEATURES
-#define FLAG_STAGED_FEATURES(id, description) \
+#define FLAG_STAGED_FEATURES(id, description) \
DEFINE_BOOL(id, false, "enable " #description) \
DEFINE_IMPLICATION(harmony, id)
HARMONY_STAGED(FLAG_STAGED_FEATURES)
@@ -306,6 +289,15 @@ DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
#define V8_LITE_BOOL false
#endif
+DEFINE_BOOL(lite_mode, V8_LITE_BOOL,
+ "enables trade-off of performance for memory savings")
+
+// Lite mode implies other flags to trade-off performance for memory.
+DEFINE_IMPLICATION(lite_mode, jitless)
+DEFINE_IMPLICATION(lite_mode, lazy_feedback_allocation)
+DEFINE_IMPLICATION(lite_mode, enable_lazy_source_positions)
+DEFINE_IMPLICATION(lite_mode, optimize_for_size)
+
#ifdef V8_ENABLE_FUTURE
#define FUTURE_BOOL true
#else
@@ -355,8 +347,8 @@ DEFINE_BOOL(enable_one_shot_optimization, true,
"only be executed once")
// Flag for sealed, frozen elements kind instead of dictionary elements kind
-DEFINE_BOOL(enable_sealed_frozen_elements_kind, true,
- "Enable sealed, frozen elements kind")
+DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, true,
+ "Enable sealed, frozen elements kind")
// Flags for data representation optimizations
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -365,10 +357,33 @@ DEFINE_BOOL_READONLY(string_slices, true, "use string slices")
DEFINE_INT(interrupt_budget, 144 * KB,
"interrupt budget which should be used for the profiler counter")
+// Flags for jitless
+DEFINE_BOOL(jitless, V8_LITE_BOOL,
+ "Disable runtime allocation of executable memory.")
+
+// Jitless V8 has a few implications:
+DEFINE_NEG_IMPLICATION(jitless, opt)
+// Field representation tracking is only used by TurboFan.
+DEFINE_NEG_IMPLICATION(jitless, track_field_types)
+DEFINE_NEG_IMPLICATION(jitless, track_heap_object_fields)
+// Regexps are interpreted.
+DEFINE_IMPLICATION(jitless, regexp_interpret_all)
+// asm.js validation is disabled since it triggers wasm code generation.
+DEFINE_NEG_IMPLICATION(jitless, validate_asm)
+// Wasm is put into interpreter-only mode. We repeat flag implications down
+// here to ensure they're applied correctly by setting the --jitless flag.
+DEFINE_IMPLICATION(jitless, wasm_interpret_all)
+DEFINE_NEG_IMPLICATION(jitless, asm_wasm_lazy_compilation)
+DEFINE_NEG_IMPLICATION(jitless, wasm_lazy_compilation)
+// --jitless also implies --no-expose-wasm, see InitializeOncePerProcessImpl.
+
+// Flags for inline caching and feedback vectors.
+DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_INT(budget_for_feedback_vector_allocation, 1 * KB,
"The budget in amount of bytecode executed by a function before we "
"decide to allocate feedback vectors")
DEFINE_BOOL(lazy_feedback_allocation, false, "Allocate feedback vectors lazily")
+DEFINE_IMPLICATION(future, lazy_feedback_allocation)
// Flags for Ignition.
DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
@@ -381,7 +396,7 @@ DEFINE_BOOL(ignition_share_named_property_feedback, true,
"the same object")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
-DEFINE_BOOL(enable_lazy_source_positions, V8_LITE_BOOL,
+DEFINE_BOOL(enable_lazy_source_positions, false,
"skip generating source positions during initial compile but "
"regenerate when actually required")
DEFINE_STRING(print_bytecode_filter, "*",
@@ -437,14 +452,11 @@ DEFINE_INT(deopt_every_n_times, 0,
DEFINE_BOOL(print_deopt_stress, false, "print number of possible deopt points")
// Flags for TurboFan.
+DEFINE_BOOL(opt, true, "use adaptive optimizations")
DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
-DEFINE_BOOL(turbo_preprocess_ranges, true,
- "run pre-register allocation heuristics")
DEFINE_BOOL(turbo_control_flow_aware_allocation, false,
"consider control flow while allocating registers")
-DEFINE_NEG_IMPLICATION(turbo_control_flow_aware_allocation,
- turbo_preprocess_ranges)
DEFINE_STRING(turbo_filter, "*", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
@@ -521,7 +533,6 @@ DEFINE_VALUE_IMPLICATION(stress_inline, min_inlining_frequency, 0)
DEFINE_VALUE_IMPLICATION(stress_inline, polymorphic_inlining, true)
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
-DEFINE_BOOL(inline_into_try, true, "inline into try blocks")
DEFINE_BOOL(turbo_inline_array_builtins, true,
"inline array builtins in TurboFan code")
DEFINE_BOOL(use_osr, true, "use on-stack replacement")
@@ -559,6 +570,12 @@ DEFINE_BOOL(
stress_gc_during_compilation, false,
"simulate GC/compiler thread race related to https://crbug.com/v8/8520")
+// Favor memory over execution speed.
+DEFINE_BOOL(optimize_for_size, false,
+ "Enables optimizations which favor memory size over execution "
+ "speed")
+DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
+
#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
#else
@@ -606,8 +623,6 @@ DEFINE_IMPLICATION(future, wasm_tier_up)
#endif
DEFINE_IMPLICATION(wasm_tier_up, liftoff)
DEFINE_DEBUG_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
-DEFINE_DEBUG_BOOL(trace_wasm_decode_time, false,
- "trace decoding time of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
DEFINE_DEBUG_BOOL(trace_wasm_interpreter, false,
"trace interpretation of wasm code")
@@ -673,12 +688,12 @@ DEFINE_BOOL(wasm_trap_handler, true,
"use signal handlers to catch out of bounds memory access in wasm"
" (currently Linux x86_64 only)")
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
- "Generate a test case when running a wasm fuzzer")
+ "generate a test case when running a wasm fuzzer")
DEFINE_IMPLICATION(wasm_fuzzer_gen_test, single_threaded)
DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
DEFINE_BOOL(print_wasm_stub_code, false, "Print WebAssembly stub code")
DEFINE_BOOL(wasm_interpret_all, false,
- "Execute all wasm code in the wasm interpreter")
+ "execute all wasm code in the wasm interpreter")
DEFINE_BOOL(asm_wasm_lazy_compilation, false,
"enable lazy compilation for asm-wasm modules")
DEFINE_IMPLICATION(validate_asm, asm_wasm_lazy_compilation)
@@ -687,13 +702,18 @@ DEFINE_BOOL(wasm_lazy_compilation, false,
DEFINE_DEBUG_BOOL(trace_wasm_lazy_compilation, false,
"trace lazy compilation of wasm functions")
DEFINE_BOOL(wasm_grow_shared_memory, false,
- "Allow growing shared WebAssembly memory objects")
+ "allow growing shared WebAssembly memory objects")
+DEFINE_BOOL(wasm_lazy_validation, false,
+ "enable lazy validation for lazily compiled wasm functions")
// wasm-interpret-all resets {asm-,}wasm-lazy-compilation.
DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_tier_up)
DEFINE_BOOL(wasm_code_gc, false, "enable garbage collection of wasm code")
DEFINE_IMPLICATION(future, wasm_code_gc)
+DEFINE_BOOL(trace_wasm_code_gc, false, "trace garbage collection of wasm code")
+DEFINE_BOOL(stress_wasm_code_gc, false,
+ "stress test garbage collection of wasm code")
// Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
@@ -713,7 +733,12 @@ DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
"Grow the new space based on the percentage of survivors instead "
"of their absolute value.")
DEFINE_SIZE_T(max_old_space_size, 0, "max size of the old space (in Mbytes)")
+DEFINE_BOOL(huge_max_old_generation_size, false,
+ "Increase max size of the old space to 4 GB for x64 systems with"
+ "the physical memory bigger than 16 GB")
DEFINE_SIZE_T(initial_old_space_size, 0, "initial old space size (in Mbytes)")
+DEFINE_BOOL(global_gc_scheduling, false,
+ "enable GC scheduling based on global memory")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
DEFINE_INT(random_gc_interval, 0,
"Collect garbage after random(0, X) allocations. It overrides "
@@ -853,6 +878,12 @@ DEFINE_INT(stress_scavenge, 0,
DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_marking)
DEFINE_IMPLICATION(fuzzer_gc_analysis, stress_scavenge)
+// These flags will be removed after experiments. Do not rely on them.
+DEFINE_BOOL(gc_experiment_background_schedule, false,
+ "new background GC schedule heuristics")
+DEFINE_BOOL(gc_experiment_less_compaction, false,
+ "less compaction in non-memory reducing mode")
+
DEFINE_BOOL(disable_abortjs, false, "disables AbortJS runtime function")
DEFINE_BOOL(manual_evacuation_candidates_selection, false,
@@ -931,6 +962,8 @@ DEFINE_BOOL(expose_trigger_failure, false, "expose trigger-failure extension")
DEFINE_INT(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_BOOL(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
+DEFINE_BOOL(experimental_stack_trace_frames, false,
+ "enable experimental frames (API/Builtins) and stack trace layout")
DEFINE_BOOL(disallow_code_generation_from_strings, false,
"disallow eval and friends")
DEFINE_BOOL(expose_async_hooks, false, "expose async_hooks object")
@@ -943,8 +976,8 @@ DEFINE_BOOL(test_small_max_function_context_stub_size, false,
"enable testing the function context size overflow path "
"by making the maximum size smaller")
-// builtins-ia32.cc
DEFINE_BOOL(inline_new, true, "use fast inline allocation")
+DEFINE_NEG_NEG_IMPLICATION(inline_new, turbo_allocation_folding)
// codegen-ia32.cc / codegen-arm.cc
DEFINE_BOOL(trace, false, "trace function calls")
@@ -986,16 +1019,6 @@ DEFINE_BOOL(trace_compiler_dispatcher, false,
DEFINE_INT(cpu_profiler_sampling_interval, 1000,
"CPU profiler sampling interval in microseconds")
-// Array abuse tracing
-DEFINE_BOOL(trace_js_array_abuse, false,
- "trace out-of-bounds accesses to JS arrays")
-DEFINE_BOOL(trace_external_array_abuse, false,
- "trace out-of-bounds-accesses to external arrays")
-DEFINE_BOOL(trace_array_abuse, false,
- "trace out-of-bounds accesses to all arrays")
-DEFINE_IMPLICATION(trace_array_abuse, trace_js_array_abuse)
-DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse)
-
// debugger
DEFINE_BOOL(
trace_side_effect_free_debug_evaluate, false,
@@ -1044,11 +1067,9 @@ DEFINE_IMPLICATION(trace_ic, log_code)
DEFINE_GENERIC_IMPLICATION(
trace_ic, TracingFlags::ic_stats.store(
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE))
-DEFINE_BOOL_READONLY(track_constant_fields, true,
- "enable constant field tracking")
DEFINE_BOOL_READONLY(fast_map_update, false,
"enable fast map update by caching the migration target")
-DEFINE_BOOL(modify_field_representation_inplace, false,
+DEFINE_BOOL(modify_field_representation_inplace, true,
"enable in-place field representation updates")
DEFINE_INT(max_polymorphic_map_count, 4,
"maximum number of maps to track in POLYMORPHIC state")
@@ -1098,17 +1119,15 @@ DEFINE_BOOL(trace_sim_messages, false,
// isolate.cc
DEFINE_BOOL(async_stack_traces, true,
"include async stack traces in Error.stack")
-DEFINE_IMPLICATION(async_stack_traces, harmony_await_optimization)
DEFINE_BOOL(stack_trace_on_illegal, false,
"print stack trace when an illegal exception is thrown")
DEFINE_BOOL(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
-// TODO(jgruber,machenbach): Rename to --correctness-fuzzer-suppressions.
-DEFINE_BOOL(abort_on_stack_or_string_length_overflow, false,
- "Abort program when the stack overflows or a string exceeds "
- "maximum length (as opposed to throwing RangeError). This is "
- "useful for fuzzing where the spec behaviour would introduce "
- "nondeterminism.")
+DEFINE_BOOL(correctness_fuzzer_suppressions, false,
+ "Suppress certain unspecified behaviors to ease correctness "
+ "fuzzing: Abort program when the stack overflows or a string "
+ "exceeds maximum length (as opposed to throwing RangeError). "
+ "Use a fixed suppression string for error messages.")
DEFINE_BOOL(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
@@ -1155,7 +1174,7 @@ DEFINE_UINT(serialization_chunk_size, 4096,
// Regexp
DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
DEFINE_BOOL(regexp_mode_modifiers, false, "enable inline flags in regexp.")
-DEFINE_BOOL(regexp_interpret_all, false, "interpret all regexp code")
+DEFINE_BOOL(regexp_interpret_all, false, "interpret all regexp code")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag")
@@ -1175,6 +1194,8 @@ DEFINE_STRING(startup_src, nullptr,
"Write V8 startup as C++ src. (mksnapshot only)")
DEFINE_STRING(startup_blob, nullptr,
"Write V8 startup blob file. (mksnapshot only)")
+DEFINE_STRING(target_arch, nullptr,
+ "The mksnapshot target arch. (mksnapshot only)")
DEFINE_STRING(target_os, nullptr, "The mksnapshot target os. (mksnapshot only)")
//
@@ -1206,50 +1227,6 @@ DEFINE_SIZE_T(mock_arraybuffer_allocator_limit, 0,
"OOM for testing.")
//
-// Flags only available in non-Lite modes.
-//
-#undef FLAG
-#ifdef V8_LITE_MODE
-#define FLAG FLAG_READONLY
-#else
-#define FLAG FLAG_FULL
-#endif
-
-DEFINE_BOOL(jitless, V8_LITE_BOOL,
- "Disable runtime allocation of executable memory.")
-
-// Jitless V8 has a few implications:
-#ifndef V8_LITE_MODE
-// Optimizations (i.e. jitting) are disabled.
-DEFINE_NEG_IMPLICATION(jitless, opt)
-#endif
-// Field representation tracking is only used by TurboFan.
-DEFINE_NEG_IMPLICATION(jitless, track_field_types)
-DEFINE_NEG_IMPLICATION(jitless, track_heap_object_fields)
-// Regexps are interpreted.
-DEFINE_IMPLICATION(jitless, regexp_interpret_all)
-// asm.js validation is disabled since it triggers wasm code generation.
-DEFINE_NEG_IMPLICATION(jitless, validate_asm)
-// Wasm is put into interpreter-only mode. We repeat flag implications down
-// here to ensure they're applied correctly by setting the --jitless flag.
-DEFINE_IMPLICATION(jitless, wasm_interpret_all)
-DEFINE_NEG_IMPLICATION(jitless, asm_wasm_lazy_compilation)
-DEFINE_NEG_IMPLICATION(jitless, wasm_lazy_compilation)
-// --jitless also implies --no-expose-wasm, see InitializeOncePerProcessImpl.
-
-// Enable recompilation of function with optimized code.
-DEFINE_BOOL(opt, !V8_LITE_BOOL, "use adaptive optimizations")
-
-// Enable use of inline caches to optimize object access operations.
-DEFINE_BOOL(use_ic, true, "use inline caching")
-
-// Favor memory over execution speed.
-DEFINE_BOOL(optimize_for_size, V8_LITE_BOOL,
- "Enables optimizations which favor memory size over execution "
- "speed")
-DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
-
-//
// GDB JIT integration flags.
//
#undef FLAG
@@ -1399,9 +1376,6 @@ DEFINE_IMPLICATION(perf_prof, perf_prof_unwinding_info)
DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
-DEFINE_BOOL(log_timer_events, false,
- "Time events including external callbacks.")
-DEFINE_IMPLICATION(log_timer_events, log_internal_timer_events)
DEFINE_IMPLICATION(log_internal_timer_events, prof)
DEFINE_BOOL(log_instruction_stats, false, "Log AArch64 instruction statistics.")
DEFINE_STRING(log_instruction_file, "arm64_inst.csv",
@@ -1418,8 +1392,7 @@ DEFINE_STRING(redirect_code_traces_to, nullptr,
DEFINE_BOOL(print_opt_source, false,
"print source code of optimized and inlined functions")
-DEFINE_BOOL(win64_unwinding_info, false,
- "Enable unwinding info for Windows/x64 (experimental).")
+DEFINE_BOOL(win64_unwinding_info, true, "Enable unwinding info for Windows/x64")
#ifdef V8_TARGET_ARCH_ARM
// Unsupported on arm. See https://crbug.com/v8/8713.
@@ -1553,11 +1526,6 @@ DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
DEFINE_BOOL(raw_heap_snapshots, V8_ENABLE_RAW_HEAP_SNAPSHOTS_BOOL,
"enable raw heap snapshots contain garbage collection internals")
-DEFINE_BOOL(lite_mode, V8_LITE_BOOL,
- "enables trade-off of performance for memory savings "
- "(Lite mode only)")
-DEFINE_IMPLICATION(lite_mode, lazy_feedback_allocation)
-
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags/flags.cc
index d16c547c52..147e8b20ce 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags/flags.cc
@@ -2,21 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/flags.h"
+#include "src/flags/flags.h"
#include <cctype>
#include <cerrno>
+#include <cinttypes>
#include <cstdlib>
#include <sstream>
-#include "src/allocation.h"
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
-#include "src/counters.h"
-#include "src/cpu-features.h"
-#include "src/memcopy.h"
-#include "src/ostreams.h"
-#include "src/utils.h"
+#include "src/codegen/cpu-features.h"
+#include "src/logging/counters.h"
+#include "src/utils/allocation.h"
+#include "src/utils/memcopy.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/utils.h"
#include "src/wasm/wasm-limits.h"
namespace v8 {
@@ -24,11 +25,11 @@ namespace internal {
// Define all of our flags.
#define FLAG_MODE_DEFINE
-#include "src/flag-definitions.h" // NOLINT(build/include)
+#include "src/flags/flag-definitions.h" // NOLINT(build/include)
// Define all of our flags default values.
#define FLAG_MODE_DEFINE_DEFAULTS
-#include "src/flag-definitions.h" // NOLINT(build/include)
+#include "src/flags/flag-definitions.h" // NOLINT(build/include)
namespace {
@@ -47,12 +48,12 @@ struct Flag {
TYPE_STRING,
};
- FlagType type_; // What type of flag, bool, int, or string.
- const char* name_; // Name of the flag, ex "my_flag".
- void* valptr_; // Pointer to the global flag variable.
- const void* defptr_; // Pointer to the default value.
- const char* cmt_; // A comment about the flags purpose.
- bool owns_ptr_; // Does the flag own its string value?
+ FlagType type_; // What type of flag, bool, int, or string.
+ const char* name_; // Name of the flag, ex "my_flag".
+ void* valptr_; // Pointer to the global flag variable.
+ const void* defptr_; // Pointer to the default value.
+ const char* cmt_; // A comment about the flags purpose.
+ bool owns_ptr_; // Does the flag own its string value?
FlagType type() const { return type_; }
@@ -140,7 +141,7 @@ struct Flag {
const char* string_default() const {
DCHECK(type_ == TYPE_STRING);
- return *reinterpret_cast<const char* const *>(defptr_);
+ return *reinterpret_cast<const char* const*>(defptr_);
}
// Compare this flag's current value against the default.
@@ -204,32 +205,35 @@ struct Flag {
Flag flags[] = {
#define FLAG_MODE_META
-#include "src/flag-definitions.h" // NOLINT(build/include)
+#include "src/flags/flag-definitions.h" // NOLINT(build/include)
};
const size_t num_flags = sizeof(flags) / sizeof(*flags);
} // namespace
-
static const char* Type2String(Flag::FlagType type) {
switch (type) {
- case Flag::TYPE_BOOL: return "bool";
- case Flag::TYPE_MAYBE_BOOL: return "maybe_bool";
- case Flag::TYPE_INT: return "int";
+ case Flag::TYPE_BOOL:
+ return "bool";
+ case Flag::TYPE_MAYBE_BOOL:
+ return "maybe_bool";
+ case Flag::TYPE_INT:
+ return "int";
case Flag::TYPE_UINT:
return "uint";
case Flag::TYPE_UINT64:
return "uint64";
- case Flag::TYPE_FLOAT: return "float";
+ case Flag::TYPE_FLOAT:
+ return "float";
case Flag::TYPE_SIZE_T:
return "size_t";
- case Flag::TYPE_STRING: return "string";
+ case Flag::TYPE_STRING:
+ return "string";
}
UNREACHABLE();
}
-
std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
switch (flag.type()) {
case Flag::TYPE_BOOL:
@@ -264,7 +268,6 @@ std::ostream& operator<<(std::ostream& os, const Flag& flag) { // NOLINT
return os;
}
-
// static
std::vector<const char*>* FlagList::argv() {
std::vector<const char*>* args = new std::vector<const char*>(8);
@@ -287,10 +290,7 @@ std::vector<const char*>* FlagList::argv() {
return args;
}
-
-inline char NormalizeChar(char ch) {
- return ch == '_' ? '-' : ch;
-}
+inline char NormalizeChar(char ch) { return ch == '_' ? '-' : ch; }
// Helper function to parse flags: Takes an argument arg and splits it into
// a flag name and flag value (or nullptr if they are missing). negated is set
@@ -307,19 +307,18 @@ static void SplitArgument(const char* arg, char* buffer, int buffer_size,
// find the begin of the flag name
arg++; // remove 1st '-'
if (*arg == '-') {
- arg++; // remove 2nd '-'
+ arg++; // remove 2nd '-'
DCHECK_NE('\0', arg[0]); // '--' arguments are handled in the caller.
}
if (arg[0] == 'n' && arg[1] == 'o') {
- arg += 2; // remove "no"
+ arg += 2; // remove "no"
if (NormalizeChar(arg[0]) == '-') arg++; // remove dash after "no".
*negated = true;
}
*name = arg;
// find the end of the flag name
- while (*arg != '\0' && *arg != '=')
- arg++;
+ while (*arg != '\0' && *arg != '=') arg++;
// get the value if any
if (*arg == '=') {
@@ -335,7 +334,6 @@ static void SplitArgument(const char* arg, char* buffer, int buffer_size,
}
}
-
static bool EqualNames(const char* a, const char* b) {
for (int i = 0; NormalizeChar(a[i]) == NormalizeChar(b[i]); i++) {
if (a[i] == '\0') {
@@ -345,11 +343,9 @@ static bool EqualNames(const char* a, const char* b) {
return false;
}
-
static Flag* FindFlag(const char* name) {
for (size_t i = 0; i < num_flags; ++i) {
- if (EqualNames(name, flags[i].name()))
- return &flags[i];
+ if (EqualNames(name, flags[i].name())) return &flags[i];
}
return nullptr;
}
@@ -375,8 +371,7 @@ bool TryParseUnsigned(Flag* flag, const char* arg, const char* value,
}
// static
-int FlagList::SetFlagsFromCommandLine(int* argc,
- char** argv,
+int FlagList::SetFlagsFromCommandLine(int* argc, char** argv,
bool remove_flags) {
int return_code = 0;
// parse arguments
@@ -385,7 +380,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
const char* arg = argv[i++];
// split arg into flag components
- char buffer[1*KB];
+ char buffer[1 * KB];
const char* name;
const char* value;
bool negated;
@@ -462,7 +457,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// handle errors
bool is_bool_type = flag->type() == Flag::TYPE_BOOL ||
- flag->type() == Flag::TYPE_MAYBE_BOOL;
+ flag->type() == Flag::TYPE_MAYBE_BOOL;
if ((is_bool_type && value != nullptr) || (!is_bool_type && negated) ||
*endp != '\0') {
// TODO(neis): TryParseUnsigned may return with {*endp == '\0'} even in
@@ -512,28 +507,25 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
return return_code;
}
-
static char* SkipWhiteSpace(char* p) {
while (*p != '\0' && isspace(*p) != 0) p++;
return p;
}
-
static char* SkipBlackSpace(char* p) {
while (*p != '\0' && isspace(*p) == 0) p++;
return p;
}
-
// static
-int FlagList::SetFlagsFromString(const char* str, int len) {
+int FlagList::SetFlagsFromString(const char* str, size_t len) {
// make a 0-terminated copy of str
- ScopedVector<char> copy0(len + 1);
- MemCopy(copy0.start(), str, len);
+ std::unique_ptr<char[]> copy0{NewArray<char>(len + 1)};
+ MemCopy(copy0.get(), str, len);
copy0[len] = '\0';
// strip leading white space
- char* copy = SkipWhiteSpace(copy0.start());
+ char* copy = SkipWhiteSpace(copy0.get());
// count the number of 'arguments'
int argc = 1; // be compatible with SetFlagsFromCommandLine()
@@ -554,10 +546,9 @@ int FlagList::SetFlagsFromString(const char* str, int len) {
p = SkipWhiteSpace(p);
}
- return SetFlagsFromCommandLine(&argc, argv.start(), false);
+ return SetFlagsFromCommandLine(&argc, argv.begin(), false);
}
-
// static
void FlagList::ResetAllFlags() {
for (size_t i = 0; i < num_flags; ++i) {
@@ -565,7 +556,6 @@ void FlagList::ResetAllFlags() {
}
}
-
// static
void FlagList::PrintHelp() {
CpuFeatures::Probe(false);
@@ -600,10 +590,8 @@ void FlagList::PrintHelp() {
}
}
-
static uint32_t flag_hash = 0;
-
void ComputeFlagListHash() {
std::ostringstream modified_args_as_string;
#ifdef DEBUG
@@ -630,16 +618,19 @@ void ComputeFlagListHash() {
base::hash_range(args.c_str(), args.c_str() + args.length()));
}
-
// static
void FlagList::EnforceFlagImplications() {
#define FLAG_MODE_DEFINE_IMPLICATIONS
-#include "src/flag-definitions.h" // NOLINT(build/include)
+#include "src/flags/flag-definitions.h" // NOLINT(build/include)
#undef FLAG_MODE_DEFINE_IMPLICATIONS
ComputeFlagListHash();
}
-
uint32_t FlagList::Hash() { return flag_hash; }
+
+#undef FLAG_MODE_DEFINE
+#undef FLAG_MODE_DEFINE_DEFAULTS
+#undef FLAG_MODE_META
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags/flags.h
index d82c5b40d1..5f0a6d05a8 100644
--- a/deps/v8/src/flags.h
+++ b/deps/v8/src/flags/flags.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FLAGS_H_
-#define V8_FLAGS_H_
+#ifndef V8_FLAGS_FLAGS_H_
+#define V8_FLAGS_FLAGS_H_
#include <vector>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/wasm/wasm-limits.h"
namespace v8 {
@@ -15,7 +15,7 @@ namespace internal {
// Declare all of our flags.
#define FLAG_MODE_DECLARE
-#include "src/flag-definitions.h" // NOLINT
+#include "src/flags/flag-definitions.h" // NOLINT
// The global list of all flags.
class V8_EXPORT_PRIVATE FlagList {
@@ -44,14 +44,12 @@ class V8_EXPORT_PRIVATE FlagList {
// --flag=value (non-bool flags only, no spaces around '=')
// --flag value (non-bool flags only)
// -- (capture all remaining args in JavaScript)
- static int SetFlagsFromCommandLine(int* argc,
- char** argv,
- bool remove_flags);
+ static int SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags);
// Set the flag values by parsing the string str. Splits string into argc
// substrings argv[], each of which consisting of non-white-space chars,
// and then calls SetFlagsFromCommandLine() and returns its result.
- static int SetFlagsFromString(const char* str, int len);
+ static int SetFlagsFromString(const char* str, size_t len);
// Reset all flags to their default value.
static void ResetAllFlags();
@@ -70,4 +68,4 @@ class V8_EXPORT_PRIVATE FlagList {
} // namespace internal
} // namespace v8
-#endif // V8_FLAGS_H_
+#endif // V8_FLAGS_FLAGS_H_
diff --git a/deps/v8/src/handles/OWNERS b/deps/v8/src/handles/OWNERS
new file mode 100644
index 0000000000..57fcdd4fac
--- /dev/null
+++ b/deps/v8/src/handles/OWNERS
@@ -0,0 +1,4 @@
+ishell@chromium.org
+jkummerow@chromium.org
+mlippautz@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index 350380b23c..db4f806e58 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -2,19 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/global-handles.h"
+#include "src/handles/global-handles.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/compiler-specific.h"
-#include "src/cancelable-task.h"
+#include "src/execution/vm-state-inl.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/init/v8.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
-#include "src/task-utils.h"
-#include "src/v8.h"
-#include "src/visitors.h"
-#include "src/vm-state-inl.h"
+#include "src/objects/visitors.h"
+#include "src/tasks/cancelable-task.h"
+#include "src/tasks/task-utils.h"
namespace v8 {
namespace internal {
@@ -349,7 +350,7 @@ class NodeBase {
namespace {
void ExtractInternalFields(JSObject jsobject, void** embedder_fields, int len) {
- int field_count = jsobject->GetEmbedderFieldCount();
+ int field_count = jsobject.GetEmbedderFieldCount();
for (int i = 0; i < len; ++i) {
if (field_count == i) break;
void* pointer;
@@ -379,10 +380,6 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
Internals::kNodeStateMask);
STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue);
STATIC_ASSERT(PENDING == Internals::kNodeStateIsPendingValue);
- STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) ==
- Internals::kNodeIsIndependentShift);
- STATIC_ASSERT(static_cast<int>(IsActive::kShift) ==
- Internals::kNodeIsActiveShift);
set_in_young_list(false);
}
@@ -399,22 +396,8 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
// State and flag accessors.
- State state() const {
- return NodeState::decode(flags_);
- }
- void set_state(State state) {
- flags_ = NodeState::update(flags_, state);
- }
-
- bool is_independent() { return IsIndependent::decode(flags_); }
- void set_independent(bool v) { flags_ = IsIndependent::update(flags_, v); }
-
- bool is_active() {
- return IsActive::decode(flags_);
- }
- void set_active(bool v) {
- flags_ = IsActive::update(flags_, v);
- }
+ State state() const { return NodeState::decode(flags_); }
+ void set_state(State state) { flags_ = NodeState::update(flags_, state); }
bool is_in_young_list() const { return IsInYoungList::decode(flags_); }
void set_in_young_list(bool v) { flags_ = IsInYoungList::update(flags_, v); }
@@ -534,7 +517,7 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
void* embedder_fields[v8::kEmbedderFieldsInWeakCallback] = {nullptr,
nullptr};
- if (weakness_type() != PHANTOM_WEAK && object()->IsJSObject()) {
+ if (weakness_type() != PHANTOM_WEAK && object().IsJSObject()) {
ExtractInternalFields(JSObject::cast(object()), embedder_fields,
v8::kEmbedderFieldsInWeakCallback);
}
@@ -562,14 +545,13 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
// This method invokes a finalizer. Updating the method name would require
// adjusting CFI blacklist as weak_callback_ is invoked on the wrong type.
CHECK(IsPendingFinalizer());
- CHECK(!is_active());
set_state(NEAR_DEATH);
// Check that we are not passing a finalized external string to
// the callback.
- DCHECK(!object()->IsExternalOneByteString() ||
- ExternalOneByteString::cast(object())->resource() != nullptr);
- DCHECK(!object()->IsExternalTwoByteString() ||
- ExternalTwoByteString::cast(object())->resource() != nullptr);
+ DCHECK(!object().IsExternalOneByteString() ||
+ ExternalOneByteString::cast(object()).resource() != nullptr);
+ DCHECK(!object().IsExternalTwoByteString() ||
+ ExternalTwoByteString::cast(object()).resource() != nullptr);
// Leaving V8.
VMState<EXTERNAL> vmstate(isolate);
HandleScope handle_scope(isolate);
@@ -592,25 +574,14 @@ class GlobalHandles::Node final : public NodeBase<GlobalHandles::Node> {
private:
// Fields that are not used for managing node memory.
- void ClearImplFields() {
- set_independent(false);
- set_active(false);
- weak_callback_ = nullptr;
- }
+ void ClearImplFields() { weak_callback_ = nullptr; }
- void CheckImplFieldsAreCleared() {
- DCHECK(!is_independent());
- DCHECK(!is_active());
- DCHECK_EQ(nullptr, weak_callback_);
- }
+ void CheckImplFieldsAreCleared() { DCHECK_EQ(nullptr, weak_callback_); }
// This stores three flags (independent, partially_dependent and
// in_young_list) and a State.
class NodeState : public BitField8<State, 0, 3> {};
- class IsIndependent : public BitField8<bool, NodeState::kNext, 1> {};
- // The following two fields are mutually exclusive
- class IsActive : public BitField8<bool, IsIndependent::kNext, 1> {};
- class IsInYoungList : public BitField8<bool, IsActive::kNext, 1> {};
+ class IsInYoungList : public BitField8<bool, NodeState::kNext, 1> {};
class NodeWeaknessType
: public BitField8<WeaknessType, IsInYoungList::kNext, 2> {};
@@ -740,7 +711,7 @@ Handle<Object> GlobalHandles::CopyGlobal(Address* location) {
Node::FromLocation(location)->global_handles();
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
- Object(*location)->ObjectVerify(global_handles->isolate());
+ Object(*location).ObjectVerify(global_handles->isolate());
}
#endif // VERIFY_HEAP
return global_handles->Create(*location);
@@ -792,7 +763,7 @@ void GlobalHandles::SetFinalizationCallbackForTraced(
callback);
}
-typedef v8::WeakCallbackInfo<void>::Callback GenericCallback;
+using GenericCallback = v8::WeakCallbackInfo<void>::Callback;
void GlobalHandles::MakeWeak(Address* location, void* parameter,
GenericCallback phantom_callback,
@@ -873,12 +844,6 @@ void GlobalHandles::IterateWeakRootsIdentifyFinalizers(
void GlobalHandles::IdentifyWeakUnmodifiedObjects(
WeakSlotCallback is_unmodified) {
- for (Node* node : young_nodes_) {
- if (node->IsWeak() && !is_unmodified(node->location())) {
- node->set_active(true);
- }
- }
-
LocalEmbedderHeapTracer* const tracer =
isolate()->heap()->local_embedder_heap_tracer();
for (TracedNode* node : traced_young_nodes_) {
@@ -895,9 +860,7 @@ void GlobalHandles::IdentifyWeakUnmodifiedObjects(
void GlobalHandles::IterateYoungStrongAndDependentRoots(RootVisitor* v) {
for (Node* node : young_nodes_) {
- if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- node->is_active())) {
+ if (node->IsStrongRetainer()) {
v->VisitRootPointer(Root::kGlobalHandles, node->label(),
node->location());
}
@@ -913,8 +876,7 @@ void GlobalHandles::MarkYoungWeakUnmodifiedObjectsPending(
WeakSlotCallbackWithHeap is_dead) {
for (Node* node : young_nodes_) {
DCHECK(node->is_in_young_list());
- if ((node->is_independent() || !node->is_active()) && node->IsWeak() &&
- is_dead(isolate_->heap(), node->location())) {
+ if (node->IsWeak() && is_dead(isolate_->heap(), node->location())) {
if (!node->IsPhantomCallback() && !node->IsPhantomResetHandle()) {
node->MarkPending();
}
@@ -926,8 +888,7 @@ void GlobalHandles::IterateYoungWeakUnmodifiedRootsForFinalizers(
RootVisitor* v) {
for (Node* node : young_nodes_) {
DCHECK(node->is_in_young_list());
- if ((node->is_independent() || !node->is_active()) &&
- node->IsWeakRetainer() && (node->state() == Node::PENDING)) {
+ if (node->IsWeakRetainer() && (node->state() == Node::PENDING)) {
DCHECK(!node->IsPhantomCallback());
DCHECK(!node->IsPhantomResetHandle());
// Finalizers need to survive.
@@ -941,8 +902,7 @@ void GlobalHandles::IterateYoungWeakUnmodifiedRootsForPhantomHandles(
RootVisitor* v, WeakSlotCallbackWithHeap should_reset_handle) {
for (Node* node : young_nodes_) {
DCHECK(node->is_in_young_list());
- if ((node->is_independent() || !node->is_active()) &&
- node->IsWeakRetainer() && (node->state() != Node::PENDING)) {
+ if (node->IsWeakRetainer() && (node->state() != Node::PENDING)) {
if (should_reset_handle(isolate_->heap(), node->location())) {
DCHECK(node->IsPhantomResetHandle() || node->IsPhantomCallback());
if (node->IsPhantomResetHandle()) {
@@ -995,11 +955,20 @@ void GlobalHandles::InvokeSecondPassPhantomCallbacksFromTask() {
}
void GlobalHandles::InvokeSecondPassPhantomCallbacks() {
+ // The callbacks may execute JS, which in turn may lead to another GC run.
+ // If we are already processing the callbacks, we do not want to start over
+ // from within the inner GC. Newly added callbacks will always be run by the
+ // outermost GC run only.
+ if (running_second_pass_callbacks_) return;
+ running_second_pass_callbacks_ = true;
+
+ AllowJavascriptExecution allow_js(isolate());
while (!second_pass_callbacks_.empty()) {
auto callback = second_pass_callbacks_.back();
second_pass_callbacks_.pop_back();
callback.Invoke(isolate(), PendingPhantomCallback::kSecondPass);
}
+ running_second_pass_callbacks_ = false;
}
size_t GlobalHandles::PostScavengeProcessing(unsigned post_processing_count) {
@@ -1008,9 +977,6 @@ size_t GlobalHandles::PostScavengeProcessing(unsigned post_processing_count) {
// Filter free nodes.
if (!node->IsRetainer()) continue;
- // Reset active state for all affected nodes.
- node->set_active(false);
-
if (node->IsPending()) {
DCHECK(node->has_callback());
DCHECK(node->IsPendingFinalizer());
@@ -1029,9 +995,6 @@ size_t GlobalHandles::PostMarkSweepProcessing(unsigned post_processing_count) {
// Filter free nodes.
if (!node->IsRetainer()) continue;
- // Reset active state for all affected nodes.
- node->set_active(false);
-
if (node->IsPending()) {
DCHECK(node->has_callback());
DCHECK(node->IsPendingFinalizer());
@@ -1308,7 +1271,7 @@ void GlobalHandles::PrintStats() {
}
PrintF("Global Handle Statistics:\n");
- PrintF(" allocated memory = %" PRIuS "B\n", total * sizeof(Node));
+ PrintF(" allocated memory = %zuB\n", total * sizeof(Node));
PrintF(" # weak = %d\n", weak);
PrintF(" # pending = %d\n", pending);
PrintF(" # near_death = %d\n", near_death);
@@ -1316,12 +1279,11 @@ void GlobalHandles::PrintStats() {
PrintF(" # total = %d\n", total);
}
-
void GlobalHandles::Print() {
PrintF("Global handles:\n");
for (Node* node : *regular_nodes_) {
PrintF(" handle %p to %p%s\n", node->location().ToVoidPtr(),
- reinterpret_cast<void*>(node->object()->ptr()),
+ reinterpret_cast<void*>(node->object().ptr()),
node->IsWeak() ? " (weak)" : "");
}
}
@@ -1374,8 +1336,8 @@ void EternalHandles::Create(Isolate* isolate, Object object, int* index) {
MemsetPointer(FullObjectSlot(next_block), the_hole, kSize);
blocks_.push_back(next_block);
}
- DCHECK_EQ(the_hole->ptr(), blocks_[block][offset]);
- blocks_[block][offset] = object->ptr();
+ DCHECK_EQ(the_hole.ptr(), blocks_[block][offset]);
+ blocks_[block][offset] = object.ptr();
if (ObjectInYoungGeneration(object)) {
young_node_indices_.push_back(size_);
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/handles/global-handles.h
index 6b8ca0c93f..a08bc1fd13 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_GLOBAL_HANDLES_H_
-#define V8_GLOBAL_HANDLES_H_
+#ifndef V8_HANDLES_GLOBAL_HANDLES_H_
+#define V8_HANDLES_GLOBAL_HANDLES_H_
#include <type_traits>
#include <utility>
@@ -12,9 +12,9 @@
#include "include/v8.h"
#include "include/v8-profiler.h"
-#include "src/handles.h"
-#include "src/objects.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -231,6 +231,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
traced_pending_phantom_callbacks_;
std::vector<PendingPhantomCallback> second_pass_callbacks_;
bool second_pass_callbacks_task_posted_ = false;
+ bool running_second_pass_callbacks_ = false;
// Counter for recursive garbage collections during callback processing.
unsigned post_gc_processing_count_ = 0;
@@ -240,7 +241,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final {
class GlobalHandles::PendingPhantomCallback final {
public:
- typedef v8::WeakCallbackInfo<void> Data;
+ using Data = v8::WeakCallbackInfo<void>;
enum InvocationType { kFirstPass, kSecondPass };
@@ -308,4 +309,4 @@ class EternalHandles final {
} // namespace internal
} // namespace v8
-#endif // V8_GLOBAL_HANDLES_H_
+#endif // V8_HANDLES_GLOBAL_HANDLES_H_
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles/handles-inl.h
index 941c839d9c..5adb5fcdc6 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles/handles-inl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HANDLES_INL_H_
-#define V8_HANDLES_INL_H_
+#ifndef V8_HANDLES_HANDLES_INL_H_
+#define V8_HANDLES_HANDLES_INL_H_
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/msan.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
+#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
@@ -29,14 +29,6 @@ const Handle<T> Handle<T>::cast(Handle<S> that) {
return Handle<T>(that.location_);
}
-HandleScope::HandleScope(Isolate* isolate) {
- HandleScopeData* data = isolate->handle_scope_data();
- isolate_ = isolate;
- prev_next_ = data->next;
- prev_limit_ = data->limit;
- data->level++;
-}
-
template <typename T>
Handle<T>::Handle(T object, Isolate* isolate)
: HandleBase(object.ptr(), isolate) {}
@@ -51,24 +43,45 @@ inline std::ostream& operator<<(std::ostream& os, Handle<T> handle) {
return os << Brief(*handle);
}
+HandleScope::HandleScope(Isolate* isolate) {
+ HandleScopeData* data = isolate->handle_scope_data();
+ isolate_ = isolate;
+ prev_next_ = data->next;
+ prev_limit_ = data->limit;
+ data->level++;
+}
+
+HandleScope::HandleScope(HandleScope&& other) V8_NOEXCEPT
+ : isolate_(other.isolate_),
+ prev_next_(other.prev_next_),
+ prev_limit_(other.prev_limit_) {
+ other.isolate_ = nullptr;
+}
+
HandleScope::~HandleScope() {
-#ifdef DEBUG
- if (FLAG_check_handle_count) {
- int before = NumberOfHandles(isolate_);
- CloseScope(isolate_, prev_next_, prev_limit_);
- int after = NumberOfHandles(isolate_);
- DCHECK_LT(after - before, kCheckHandleThreshold);
- DCHECK_LT(before, kCheckHandleThreshold);
+ if (isolate_ == nullptr) return;
+ CloseScope(isolate_, prev_next_, prev_limit_);
+}
+
+HandleScope& HandleScope::operator=(HandleScope&& other) V8_NOEXCEPT {
+ if (isolate_ == nullptr) {
+ isolate_ = other.isolate_;
} else {
-#endif // DEBUG
+ DCHECK_EQ(isolate_, other.isolate_);
CloseScope(isolate_, prev_next_, prev_limit_);
-#ifdef DEBUG
}
-#endif // DEBUG
+ prev_next_ = other.prev_next_;
+ prev_limit_ = other.prev_limit_;
+ other.isolate_ = nullptr;
+ return *this;
}
void HandleScope::CloseScope(Isolate* isolate, Address* prev_next,
Address* prev_limit) {
+#ifdef DEBUG
+ int before = FLAG_check_handle_count ? NumberOfHandles(isolate) : 0;
+#endif
+ DCHECK_NOT_NULL(isolate);
HandleScopeData* current = isolate->handle_scope_data();
std::swap(current->next, prev_next);
@@ -86,6 +99,11 @@ void HandleScope::CloseScope(Isolate* isolate, Address* prev_next,
current->next,
static_cast<size_t>(reinterpret_cast<Address>(limit) -
reinterpret_cast<Address>(current->next)));
+#ifdef DEBUG
+ int after = FLAG_check_handle_count ? NumberOfHandles(isolate) : 0;
+ DCHECK_LT(after - before, kCheckHandleThreshold);
+ DCHECK_LT(before, kCheckHandleThreshold);
+#endif
}
template <typename T>
@@ -129,7 +147,6 @@ Address* HandleScope::GetHandle(Isolate* isolate, Address value) {
return canonical ? canonical->Lookup(value) : CreateHandle(isolate, value);
}
-
#ifdef DEBUG
inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
// Make sure the current thread is allowed to create handles to begin with.
@@ -143,7 +160,6 @@ inline SealHandleScope::SealHandleScope(Isolate* isolate) : isolate_(isolate) {
current->sealed_level = current->level;
}
-
inline SealHandleScope::~SealHandleScope() {
// Restore state in current handle scope to re-enable handle
// allocations.
@@ -159,4 +175,4 @@ inline SealHandleScope::~SealHandleScope() {
} // namespace internal
} // namespace v8
-#endif // V8_HANDLES_INL_H_
+#endif // V8_HANDLES_HANDLES_INL_H_
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles/handles.cc
index 21d21be9fd..e0a1f23b7b 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles/handles.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/handles.h"
+#include "src/handles/handles.h"
-#include "src/address-map.h"
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/base/logging.h"
-#include "src/identity-map.h"
-#include "src/maybe-handles.h"
-#include "src/objects-inl.h"
-#include "src/roots-inl.h"
+#include "src/handles/maybe-handles.h"
+#include "src/objects/objects-inl.h"
+#include "src/roots/roots-inl.h"
+#include "src/utils/address-map.h"
+#include "src/utils/identity-map.h"
#ifdef DEBUG
// For GetIsolateFromWritableHeapObject.
@@ -31,7 +31,7 @@ ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>);
bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
DCHECK_NOT_NULL(location_);
Object object(*location_);
- if (object->IsSmi()) return true;
+ if (object.IsSmi()) return true;
HeapObject heap_object = HeapObject::cast(object);
Isolate* isolate;
if (!GetIsolateFromWritableObject(heap_object, &isolate)) return true;
@@ -44,16 +44,15 @@ bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
if (mode == INCLUDE_DEFERRED_CHECK &&
!AllowDeferredHandleDereference::IsAllowed()) {
// Accessing cells, maps and internalized strings is safe.
- if (heap_object->IsCell()) return true;
- if (heap_object->IsMap()) return true;
- if (heap_object->IsInternalizedString()) return true;
+ if (heap_object.IsCell()) return true;
+ if (heap_object.IsMap()) return true;
+ if (heap_object.IsInternalizedString()) return true;
return !isolate->IsDeferredHandle(location_);
}
return true;
}
#endif
-
int HandleScope::NumberOfHandles(Isolate* isolate) {
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
int n = static_cast<int>(impl->blocks()->size());
@@ -101,13 +100,11 @@ Address* HandleScope::Extend(Isolate* isolate) {
return result;
}
-
void HandleScope::DeleteExtensions(Isolate* isolate) {
HandleScopeData* current = isolate->handle_scope_data();
isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
}
-
#ifdef ENABLE_HANDLE_ZAPPING
void HandleScope::ZapRange(Address* start, Address* end) {
DCHECK_LE(end - start, kHandleBlockSize);
@@ -117,17 +114,14 @@ void HandleScope::ZapRange(Address* start, Address* end) {
}
#endif
-
Address HandleScope::current_level_address(Isolate* isolate) {
return reinterpret_cast<Address>(&isolate->handle_scope_data()->level);
}
-
Address HandleScope::current_next_address(Isolate* isolate) {
return reinterpret_cast<Address>(&isolate->handle_scope_data()->next);
}
-
Address HandleScope::current_limit_address(Isolate* isolate) {
return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
}
@@ -143,7 +137,6 @@ CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
canonical_level_ = handle_scope_data->level;
}
-
CanonicalHandleScope::~CanonicalHandleScope() {
delete root_index_map_;
delete identity_map_;
@@ -171,7 +164,6 @@ Address* CanonicalHandleScope::Lookup(Address object) {
return *entry;
}
-
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
impl_->BeginDeferredScope();
@@ -195,14 +187,12 @@ DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
data->limit = new_limit;
}
-
DeferredHandleScope::~DeferredHandleScope() {
impl_->isolate()->handle_scope_data()->level--;
DCHECK(handles_detached_);
DCHECK(impl_->isolate()->handle_scope_data()->level == prev_level_);
}
-
DeferredHandles* DeferredHandleScope::Detach() {
DeferredHandles* deferred = impl_->Detach(prev_limit_);
HandleScopeData* data = impl_->isolate()->handle_scope_data();
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles/handles.h
index 2115f4a878..5f9b170d4b 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HANDLES_H_
-#define V8_HANDLES_H_
+#ifndef V8_HANDLES_HANDLES_H_
+#define V8_HANDLES_HANDLES_H_
#include <type_traits>
#include "include/v8.h"
#include "src/base/functional.h"
#include "src/base/macros.h"
-#include "src/checks.h"
-#include "src/globals.h"
+#include "src/common/checks.h"
+#include "src/common/globals.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -57,13 +57,6 @@ class HandleBase {
// used for hashing handles; do not ever try to dereference it.
V8_INLINE Address address() const { return bit_cast<Address>(location_); }
- protected:
- // Provides the C++ dereference operator.
- V8_INLINE Address operator*() const {
- SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
- return *location_;
- }
-
// Returns the address to where the raw pointer is stored.
V8_INLINE Address* location() const {
SLOW_DCHECK(location_ == nullptr ||
@@ -71,6 +64,7 @@ class HandleBase {
return location_;
}
+ protected:
enum DereferenceCheckMode { INCLUDE_DEFERRED_CHECK, NO_DEFERRED_CHECK };
#ifdef DEBUG
bool V8_EXPORT_PRIVATE IsDereferenceAllowed(DereferenceCheckMode mode) const;
@@ -87,7 +81,6 @@ class HandleBase {
Address* location_;
};
-
// ----------------------------------------------------------------------------
// A Handle provides a reference to an object that survives relocation by
// the garbage collector.
@@ -102,9 +95,30 @@ class HandleBase {
template <typename T>
class Handle final : public HandleBase {
public:
- V8_INLINE explicit Handle(Address* location = nullptr)
- : HandleBase(location) {
- // Type check:
+ // {ObjectRef} is returned by {Handle::operator->}. It should never be stored
+ // anywhere or used in any other code; no one should ever have to spell out
+ // {ObjectRef} in code. Its only purpose is to be dereferenced immediately by
+ // "operator-> chaining". Returning the address of the field is valid because
+ // this objects lifetime only ends at the end of the full statement.
+ class ObjectRef {
+ public:
+ T* operator->() { return &object_; }
+
+ private:
+ friend class Handle;
+ explicit ObjectRef(T object) : object_(object) {}
+
+ T object_;
+ };
+
+ V8_INLINE explicit Handle() : HandleBase(nullptr) {
+ // Skip static type check in order to allow Handle<XXX>::null() as default
+ // parameter values in non-inl header files without requiring full
+ // definition of type XXX.
+ }
+
+ V8_INLINE explicit Handle(Address* location) : HandleBase(location) {
+ // This static type check also fails for forward class declarations.
static_assert(std::is_convertible<T*, Object*>::value,
"static type violation");
// TODO(jkummerow): Runtime type check here as a SLOW_DCHECK?
@@ -121,20 +135,15 @@ class Handle final : public HandleBase {
std::is_convertible<S*, T*>::value>::type>
V8_INLINE Handle(Handle<S> handle) : HandleBase(handle) {}
- V8_INLINE T operator->() const {
- return operator*();
- }
+ V8_INLINE ObjectRef operator->() const { return ObjectRef{**this}; }
- // Provides the C++ dereference operator.
V8_INLINE T operator*() const {
// unchecked_cast because we rather trust Handle<T> to contain a T than
// include all the respective -inl.h headers for SLOW_DCHECKs.
- return T::unchecked_cast(Object(HandleBase::operator*()));
+ SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
+ return T::unchecked_cast(Object(*location()));
}
- // Returns the address to where the raw pointer is stored.
- V8_INLINE Address* location() const { return HandleBase::location(); }
-
template <typename S>
inline static const Handle<T> cast(Handle<S> that);
@@ -187,9 +196,12 @@ inline std::ostream& operator<<(std::ostream& os, Handle<T> handle);
class HandleScope {
public:
explicit inline HandleScope(Isolate* isolate);
+ inline HandleScope(HandleScope&& other) V8_NOEXCEPT;
inline ~HandleScope();
+ inline HandleScope& operator=(HandleScope&& other) V8_NOEXCEPT;
+
// Counts the number of allocated handles.
V8_EXPORT_PRIVATE static int NumberOfHandles(Isolate* isolate);
@@ -250,13 +262,11 @@ class HandleScope {
DISALLOW_COPY_AND_ASSIGN(HandleScope);
};
-
// Forward declarations for CanonicalHandleScope.
template <typename V, class AllocationPolicy>
class IdentityMap;
class RootIndexMap;
-
// A CanonicalHandleScope does not open a new HandleScope. It changes the
// existing HandleScope so that Handles created within are canonicalized.
// This does not apply to nested inner HandleScopes unless a nested
@@ -324,7 +334,6 @@ class V8_EXPORT_PRIVATE DeferredHandleScope final {
friend class HandleScopeImplementer;
};
-
// Seal off the current HandleScope so that new handles can only be created
// if a new HandleScope is entered.
class SealHandleScope final {
@@ -335,6 +344,7 @@ class SealHandleScope final {
#else
explicit inline SealHandleScope(Isolate* isolate);
inline ~SealHandleScope();
+
private:
Isolate* isolate_;
Address* prev_limit_;
@@ -342,7 +352,6 @@ class SealHandleScope final {
#endif
};
-
struct HandleScopeData final {
Address* next;
Address* limit;
@@ -360,4 +369,4 @@ struct HandleScopeData final {
} // namespace internal
} // namespace v8
-#endif // V8_HANDLES_H_
+#endif // V8_HANDLES_HANDLES_H_
diff --git a/deps/v8/src/maybe-handles-inl.h b/deps/v8/src/handles/maybe-handles-inl.h
index 8e0e7e3a38..d4989d9456 100644
--- a/deps/v8/src/maybe-handles-inl.h
+++ b/deps/v8/src/handles/maybe-handles-inl.h
@@ -2,24 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MAYBE_HANDLES_INL_H_
-#define V8_MAYBE_HANDLES_INL_H_
+#ifndef V8_HANDLES_MAYBE_HANDLES_INL_H_
+#define V8_HANDLES_MAYBE_HANDLES_INL_H_
-#include "src/maybe-handles.h"
+#include "src/handles/maybe-handles.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/maybe-object-inl.h"
namespace v8 {
namespace internal {
+
template <typename T>
MaybeHandle<T>::MaybeHandle(T object, Isolate* isolate)
: MaybeHandle(handle(object, isolate)) {}
-MaybeObjectHandle::MaybeObjectHandle()
- : reference_type_(HeapObjectReferenceType::STRONG),
- handle_(Handle<Object>::null()) {}
-
MaybeObjectHandle::MaybeObjectHandle(MaybeObject object, Isolate* isolate) {
HeapObject heap_object;
DCHECK(!object->IsCleared());
@@ -83,4 +80,4 @@ inline MaybeObjectHandle handle(MaybeObject object, Isolate* isolate) {
} // namespace internal
} // namespace v8
-#endif // V8_MAYBE_HANDLES_INL_H_
+#endif // V8_HANDLES_MAYBE_HANDLES_INL_H_
diff --git a/deps/v8/src/maybe-handles.h b/deps/v8/src/handles/maybe-handles.h
index 8a68c85f48..0b93bf82ea 100644
--- a/deps/v8/src/maybe-handles.h
+++ b/deps/v8/src/handles/maybe-handles.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MAYBE_HANDLES_H_
-#define V8_MAYBE_HANDLES_H_
+#ifndef V8_HANDLES_MAYBE_HANDLES_H_
+#define V8_HANDLES_MAYBE_HANDLES_H_
#include <type_traits>
-#include "src/handles.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
@@ -81,7 +81,8 @@ class MaybeHandle final {
// while the MaybeObjectHandle is alive.
class MaybeObjectHandle {
public:
- inline MaybeObjectHandle();
+ inline MaybeObjectHandle()
+ : reference_type_(HeapObjectReferenceType::STRONG) {}
inline MaybeObjectHandle(MaybeObject object, Isolate* isolate);
inline MaybeObjectHandle(Object object, Isolate* isolate);
inline explicit MaybeObjectHandle(Handle<Object> object);
@@ -118,4 +119,4 @@ class MaybeObjectHandle {
} // namespace internal
} // namespace v8
-#endif // V8_MAYBE_HANDLES_H_
+#endif // V8_HANDLES_MAYBE_HANDLES_H_
diff --git a/deps/v8/src/heap/array-buffer-collector.cc b/deps/v8/src/heap/array-buffer-collector.cc
index 6d4e1bb3c3..b6d7df8191 100644
--- a/deps/v8/src/heap/array-buffer-collector.cc
+++ b/deps/v8/src/heap/array-buffer-collector.cc
@@ -5,11 +5,11 @@
#include "src/heap/array-buffer-collector.h"
#include "src/base/template-utils.h"
-#include "src/cancelable-task.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
-#include "src/task-utils.h"
+#include "src/tasks/cancelable-task.h"
+#include "src/tasks/task-utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/array-buffer-tracker-inl.h b/deps/v8/src/heap/array-buffer-tracker-inl.h
index 04c14b15fd..61b5ba1f8c 100644
--- a/deps/v8/src/heap/array-buffer-tracker-inl.h
+++ b/deps/v8/src/heap/array-buffer-tracker-inl.h
@@ -5,23 +5,23 @@
#ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_INL_H_
#define V8_HEAP_ARRAY_BUFFER_TRACKER_INL_H_
-#include "src/conversions-inl.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/heap-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/objects.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
- if (buffer->backing_store() == nullptr) return;
+ if (buffer.backing_store() == nullptr) return;
// ArrayBuffer tracking works only for small objects.
DCHECK(!heap->IsLargeObject(buffer));
- const size_t length = buffer->byte_length();
+ const size_t length = buffer.byte_length();
Page* page = Page::FromHeapObject(buffer);
{
base::MutexGuard guard(page->mutex());
@@ -42,10 +42,10 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
}
void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) {
- if (buffer->backing_store() == nullptr) return;
+ if (buffer.backing_store() == nullptr) return;
Page* page = Page::FromHeapObject(buffer);
- const size_t length = buffer->byte_length();
+ const size_t length = buffer.byte_length();
{
base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
@@ -110,8 +110,8 @@ void LocalArrayBufferTracker::Add(JSArrayBuffer buffer, size_t length) {
void LocalArrayBufferTracker::AddInternal(JSArrayBuffer buffer, size_t length) {
auto ret = array_buffers_.insert(
{buffer,
- {buffer->backing_store(), length, buffer->backing_store(),
- buffer->is_wasm_memory()}});
+ {buffer.backing_store(), length, buffer.backing_store(),
+ buffer.is_wasm_memory()}});
USE(ret);
// Check that we indeed inserted a new value and did not overwrite an existing
// one (which would be a bug).
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 58cd4f9e43..0c04d7b6ae 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -47,7 +47,7 @@ void LocalArrayBufferTracker::Process(Callback callback) {
const size_t length = it->second.length;
// We should decrement before adding to avoid potential overflows in
// the external memory counters.
- DCHECK_EQ(it->first->is_wasm_memory(), it->second.is_wasm_memory);
+ DCHECK_EQ(it->first.is_wasm_memory(), it->second.is_wasm_memory);
tracker->AddInternal(new_buffer, length);
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer,
@@ -104,7 +104,7 @@ bool ArrayBufferTracker::ProcessBuffers(Page* page, ProcessingMode mode) {
DCHECK(page->SweepingDone());
tracker->Process([mode](JSArrayBuffer old_buffer, JSArrayBuffer* new_buffer) {
- MapWord map_word = old_buffer->map_word();
+ MapWord map_word = old_buffer.map_word();
if (map_word.IsForwardingAddress()) {
*new_buffer = JSArrayBuffer::cast(map_word.ToForwardingAddress());
return LocalArrayBufferTracker::kUpdateEntry;
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index dc29b95f36..e8ca57b543 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -7,10 +7,10 @@
#include <unordered_map>
-#include "src/allocation.h"
#include "src/base/platform/mutex.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/js-array-buffer.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/code-stats.cc b/deps/v8/src/heap/code-stats.cc
index 710ebd4fa1..cb34d732a4 100644
--- a/deps/v8/src/heap/code-stats.cc
+++ b/deps/v8/src/heap/code-stats.cc
@@ -4,10 +4,10 @@
#include "src/heap/code-stats.h"
-#include "src/code-comments.h"
+#include "src/codegen/code-comments.h"
+#include "src/codegen/reloc-info.h"
#include "src/heap/spaces-inl.h" // For HeapObjectIterator.
-#include "src/objects-inl.h"
-#include "src/reloc-info.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -15,21 +15,21 @@ namespace internal {
// Record code statisitcs.
void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
Isolate* isolate) {
- if (object->IsScript()) {
+ if (object.IsScript()) {
Script script = Script::cast(object);
// Log the size of external source code.
- Object source = script->source();
- if (source->IsExternalString()) {
+ Object source = script.source();
+ if (source.IsExternalString()) {
ExternalString external_source_string = ExternalString::cast(source);
int size = isolate->external_script_source_size();
- size += external_source_string->ExternalPayloadSize();
+ size += external_source_string.ExternalPayloadSize();
isolate->set_external_script_source_size(size);
}
- } else if (object->IsAbstractCode()) {
+ } else if (object.IsAbstractCode()) {
// Record code+metadata statisitcs.
AbstractCode abstract_code = AbstractCode::cast(object);
- int size = abstract_code->SizeIncludingMetadata();
- if (abstract_code->IsCode()) {
+ int size = abstract_code.SizeIncludingMetadata();
+ if (abstract_code.IsCode()) {
size += isolate->code_and_metadata_size();
isolate->set_code_and_metadata_size(size);
} else {
@@ -39,8 +39,8 @@ void CodeStatistics::RecordCodeAndMetadataStatistics(HeapObject object,
#ifdef DEBUG
// Record code kind and code comment statistics.
- isolate->code_kind_statistics()[abstract_code->kind()] +=
- abstract_code->Size();
+ isolate->code_kind_statistics()[abstract_code.kind()] +=
+ abstract_code.Size();
CodeStatistics::CollectCodeCommentStatistics(object, isolate);
#endif
}
@@ -197,12 +197,12 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
Isolate* isolate) {
// Bytecode objects do not contain RelocInfo. Only process code objects
// for code comment statistics.
- if (!obj->IsCode()) {
+ if (!obj.IsCode()) {
return;
}
Code code = Code::cast(obj);
- CodeCommentsIterator cit(code->code_comments(), code->code_comments_size());
+ CodeCommentsIterator cit(code.code_comments(), code.code_comments_size());
int delta = 0;
int prev_pc_offset = 0;
while (cit.HasCurrent()) {
@@ -212,8 +212,8 @@ void CodeStatistics::CollectCodeCommentStatistics(HeapObject obj,
cit.Next();
}
- DCHECK(0 <= prev_pc_offset && prev_pc_offset <= code->raw_instruction_size());
- delta += static_cast<int>(code->raw_instruction_size() - prev_pc_offset);
+ DCHECK(0 <= prev_pc_offset && prev_pc_offset <= code.raw_instruction_size());
+ delta += static_cast<int>(code.raw_instruction_size() - prev_pc_offset);
EnterComment(isolate, "NoComment", delta);
}
#endif
diff --git a/deps/v8/src/heap/combined-heap.cc b/deps/v8/src/heap/combined-heap.cc
new file mode 100644
index 0000000000..ed60b438cb
--- /dev/null
+++ b/deps/v8/src/heap/combined-heap.cc
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/combined-heap.h"
+
+namespace v8 {
+namespace internal {
+
+HeapObject CombinedHeapIterator::Next() {
+ HeapObject object = ro_heap_iterator_.Next();
+ if (!object.is_null()) {
+ return object;
+ }
+ return heap_iterator_.next();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/combined-heap.h b/deps/v8/src/heap/combined-heap.h
new file mode 100644
index 0000000000..c331d95c3d
--- /dev/null
+++ b/deps/v8/src/heap/combined-heap.h
@@ -0,0 +1,41 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_COMBINED_HEAP_H_
+#define V8_HEAP_COMBINED_HEAP_H_
+
+#include "src/heap/heap.h"
+#include "src/heap/read-only-heap.h"
+#include "src/objects/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// This class allows iteration over the entire heap (Heap and ReadOnlyHeap). It
+// uses the HeapIterator to iterate over non-read-only objects and accepts the
+// same filtering option. (Interrupting iteration while filtering unreachable
+// objects is still forbidden)
+class V8_EXPORT_PRIVATE CombinedHeapIterator final {
+ public:
+ CombinedHeapIterator(Heap* heap,
+ HeapIterator::HeapObjectsFiltering filtering =
+ HeapIterator::HeapObjectsFiltering::kNoFiltering)
+ : heap_iterator_(heap, filtering),
+ ro_heap_iterator_(heap->read_only_heap()) {}
+ HeapObject Next();
+
+ private:
+ HeapIterator heap_iterator_;
+ ReadOnlyHeapIterator ro_heap_iterator_;
+};
+
+V8_WARN_UNUSED_RESULT inline bool IsValidHeapObject(Heap* heap,
+ HeapObject object) {
+ return ReadOnlyHeap::Contains(object) || heap->Contains(object);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_COMBINED_HEAP_H_
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index 42f0f9f562..8ce96428e1 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -9,6 +9,7 @@
#include "include/v8config.h"
#include "src/base/template-utils.h"
+#include "src/execution/isolate.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
@@ -18,15 +19,14 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/worklist.h"
-#include "src/isolate.h"
+#include "src/init/v8.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/slots-inl.h"
-#include "src/transitions-inl.h"
-#include "src/utils-inl.h"
-#include "src/utils.h"
-#include "src/v8.h"
+#include "src/objects/transitions-inl.h"
+#include "src/utils/utils-inl.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -124,7 +124,7 @@ class ConcurrentMarkingVisitor final
// Perform a dummy acquire load to tell TSAN that there is no data race
// in mark-bit initialization. See MemoryChunk::Initialize for the
// corresponding release store.
- MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
+ MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
CHECK_NOT_NULL(chunk->synchronized_heap());
#endif
if (marking_state_.IsBlackOrGrey(heap_object)) {
@@ -174,11 +174,11 @@ class ConcurrentMarkingVisitor final
ObjectSlot end) final {}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object = rinfo->target_object();
RecordRelocSlot(host, rinfo, object);
if (!marking_state_.IsBlackOrGrey(object)) {
- if (host->IsWeakObject(object)) {
+ if (host.IsWeakObject(object)) {
weak_objects_->weak_objects_in_code.Push(task_id_,
std::make_pair(object, host));
} else {
@@ -199,7 +199,7 @@ class ConcurrentMarkingVisitor final
ObjectSlot slot = snapshot.slot(i);
Object object = snapshot.value(i);
DCHECK(!HasWeakHeapObjectTag(object));
- if (!object->IsHeapObject()) continue;
+ if (!object.IsHeapObject()) continue;
HeapObject heap_object = HeapObject::cast(object);
MarkObject(heap_object);
MarkCompactCollector::RecordSlot(host, slot, heap_object);
@@ -227,8 +227,8 @@ class ConcurrentMarkingVisitor final
if (size == 0) {
return 0;
}
- if (weak_ref->target()->IsHeapObject()) {
- HeapObject target = HeapObject::cast(weak_ref->target());
+ if (weak_ref.target().IsHeapObject()) {
+ HeapObject target = HeapObject::cast(weak_ref.target());
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the JSWeakRef, since the
// VisitJSObjectSubclass above didn't visit it.
@@ -247,10 +247,10 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(weak_cell)) return 0;
int size = WeakCell::BodyDescriptor::SizeOf(map, weak_cell);
- VisitMapPointer(weak_cell, weak_cell->map_slot());
+ VisitMapPointer(weak_cell, weak_cell.map_slot());
WeakCell::BodyDescriptor::IterateBody(map, weak_cell, size, this);
- if (weak_cell->target()->IsHeapObject()) {
- HeapObject target = HeapObject::cast(weak_cell->target());
+ if (weak_cell.target().IsHeapObject()) {
+ HeapObject target = HeapObject::cast(weak_cell.target());
if (marking_state_.IsBlackOrGrey(target)) {
// Record the slot inside the WeakCell, since the IterateBody above
// didn't visit it.
@@ -306,14 +306,14 @@ class ConcurrentMarkingVisitor final
int VisitSeqOneByteString(Map map, SeqOneByteString object) {
if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object->map_slot());
- return SeqOneByteString::SizeFor(object->synchronized_length());
+ VisitMapPointer(object, object.map_slot());
+ return SeqOneByteString::SizeFor(object.synchronized_length());
}
int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
if (!ShouldVisit(object)) return 0;
- VisitMapPointer(object, object->map_slot());
- return SeqTwoByteString::SizeFor(object->synchronized_length());
+ VisitMapPointer(object, object.map_slot());
+ return SeqTwoByteString::SizeFor(object.synchronized_length());
}
// ===========================================================================
@@ -330,24 +330,14 @@ class ConcurrentMarkingVisitor final
marking_state_.GreyToBlack(object);
int size = FixedArray::BodyDescriptor::SizeOf(map, object);
size_t current_progress_bar = chunk->ProgressBar();
- if (current_progress_bar == 0) {
- // Try to move the progress bar forward to start offset. This solves the
- // problem of not being able to observe a progress bar reset when
- // processing the first kProgressBarScanningChunk.
- if (!chunk->TrySetProgressBar(0,
- FixedArray::BodyDescriptor::kStartOffset))
- return 0;
- current_progress_bar = FixedArray::BodyDescriptor::kStartOffset;
- }
int start = static_cast<int>(current_progress_bar);
+ if (start == 0) start = FixedArray::BodyDescriptor::kStartOffset;
int end = Min(size, start + kProgressBarScanningChunk);
if (start < end) {
VisitPointers(object, object.RawField(start), object.RawField(end));
- // Setting the progress bar can fail if the object that is currently
- // scanned is also revisited. In this case, there may be two tasks racing
- // on the progress counter. The looser can bail out because the progress
- // bar is reset before the tasks race on the object.
- if (chunk->TrySetProgressBar(current_progress_bar, end) && (end < size)) {
+ bool success = chunk->TrySetProgressBar(current_progress_bar, end);
+ CHECK(success);
+ if (end < size) {
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
shared_.Push(object);
@@ -377,16 +367,16 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(shared_info)) return 0;
int size = SharedFunctionInfo::BodyDescriptor::SizeOf(map, shared_info);
- VisitMapPointer(shared_info, shared_info->map_slot());
+ VisitMapPointer(shared_info, shared_info.map_slot());
SharedFunctionInfo::BodyDescriptor::IterateBody(map, shared_info, size,
this);
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
- if (shared_info->ShouldFlushBytecode(bytecode_flush_mode_)) {
+ if (shared_info.ShouldFlushBytecode(bytecode_flush_mode_)) {
weak_objects_->bytecode_flushing_candidates.Push(task_id_, shared_info);
} else {
- VisitPointer(shared_info, shared_info->RawField(
+ VisitPointer(shared_info, shared_info.RawField(
SharedFunctionInfo::kFunctionDataOffset));
}
return size;
@@ -395,10 +385,10 @@ class ConcurrentMarkingVisitor final
int VisitBytecodeArray(Map map, BytecodeArray object) {
if (!ShouldVisit(object)) return 0;
int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
- VisitMapPointer(object, object->map_slot());
+ VisitMapPointer(object, object.map_slot());
BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
if (!is_forced_gc_) {
- object->MakeOlder();
+ object.MakeOlder();
}
return size;
}
@@ -408,7 +398,7 @@ class ConcurrentMarkingVisitor final
// Check if the JSFunction needs reset due to bytecode being flushed.
if (bytecode_flush_mode_ != BytecodeFlushMode::kDoNotFlushBytecode &&
- object->NeedsResetDueToFlushedBytecode()) {
+ object.NeedsResetDueToFlushedBytecode()) {
weak_objects_->flushed_js_functions.Push(task_id_, object);
}
@@ -418,7 +408,7 @@ class ConcurrentMarkingVisitor final
int VisitMap(Map meta_map, Map map) {
if (!ShouldVisit(map)) return 0;
int size = Map::BodyDescriptor::SizeOf(meta_map, map);
- if (map->CanTransition()) {
+ if (map.CanTransition()) {
// Maps that can transition share their descriptor arrays and require
// special visiting logic to avoid memory leaks.
// Since descriptor arrays are potentially shared, ensure that only the
@@ -426,9 +416,9 @@ class ConcurrentMarkingVisitor final
// non-empty descriptor array is marked, its header is also visited. The
// slot holding the descriptor array will be implicitly recorded when the
// pointer fields of this map are visited.
- DescriptorArray descriptors = map->synchronized_instance_descriptors();
+ DescriptorArray descriptors = map.synchronized_instance_descriptors();
MarkDescriptorArrayBlack(descriptors);
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
// It is possible that the concurrent marker observes the
// number_of_own_descriptors out of sync with the descriptors. In that
@@ -438,7 +428,7 @@ class ConcurrentMarkingVisitor final
// std::min<int>() below.
VisitDescriptors(descriptors,
std::min<int>(number_of_own_descriptors,
- descriptors->number_of_descriptors()));
+ descriptors.number_of_descriptors()));
}
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
@@ -451,29 +441,29 @@ class ConcurrentMarkingVisitor final
void VisitDescriptors(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
- int16_t old_marked = descriptor_array->UpdateNumberOfMarkedDescriptors(
+ int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
mark_compact_epoch_, new_marked);
if (old_marked < new_marked) {
VisitPointers(
descriptor_array,
- MaybeObjectSlot(descriptor_array->GetDescriptorSlot(old_marked)),
- MaybeObjectSlot(descriptor_array->GetDescriptorSlot(new_marked)));
+ MaybeObjectSlot(descriptor_array.GetDescriptorSlot(old_marked)),
+ MaybeObjectSlot(descriptor_array.GetDescriptorSlot(new_marked)));
}
}
int VisitDescriptorArray(Map map, DescriptorArray array) {
if (!ShouldVisit(array)) return 0;
- VisitMapPointer(array, array->map_slot());
+ VisitMapPointer(array, array.map_slot());
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
- VisitPointers(array, array->GetFirstPointerSlot(),
- array->GetDescriptorSlot(0));
- VisitDescriptors(array, array->number_of_descriptors());
+ VisitPointers(array, array.GetFirstPointerSlot(),
+ array.GetDescriptorSlot(0));
+ VisitDescriptors(array, array.number_of_descriptors());
return size;
}
int VisitTransitionArray(Map map, TransitionArray array) {
if (!ShouldVisit(array)) return 0;
- VisitMapPointer(array, array->map_slot());
+ VisitMapPointer(array, array.map_slot());
int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
weak_objects_->transition_arrays.Push(task_id_, array);
@@ -488,22 +478,22 @@ class ConcurrentMarkingVisitor final
if (!ShouldVisit(table)) return 0;
weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
- for (int i = 0; i < table->Capacity(); i++) {
+ for (int i = 0; i < table.Capacity(); i++) {
ObjectSlot key_slot =
- table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
- HeapObject key = HeapObject::cast(table->KeyAt(i));
+ table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
+ HeapObject key = HeapObject::cast(table.KeyAt(i));
MarkCompactCollector::RecordSlot(table, key_slot, key);
ObjectSlot value_slot =
- table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
+ table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (marking_state_.IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
} else {
- Object value_obj = table->ValueAt(i);
+ Object value_obj = table.ValueAt(i);
- if (value_obj->IsHeapObject()) {
+ if (value_obj.IsHeapObject()) {
HeapObject value = HeapObject::cast(value_obj);
MarkCompactCollector::RecordSlot(table, value_slot, value);
@@ -517,7 +507,7 @@ class ConcurrentMarkingVisitor final
}
}
- return table->SizeFromMap(map);
+ return table.SizeFromMap(map);
}
// Implements ephemeron semantics: Marks value if key is already reachable.
@@ -541,7 +531,7 @@ class ConcurrentMarkingVisitor final
// Perform a dummy acquire load to tell TSAN that there is no data race
// in mark-bit initialization. See MemoryChunk::Initialize for the
// corresponding release store.
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object.address());
CHECK_NOT_NULL(chunk->synchronized_heap());
#endif
if (marking_state_.WhiteToGrey(object)) {
@@ -552,8 +542,8 @@ class ConcurrentMarkingVisitor final
void MarkDescriptorArrayBlack(DescriptorArray descriptors) {
marking_state_.WhiteToGrey(descriptors);
if (marking_state_.GreyToBlack(descriptors)) {
- VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
- descriptors->GetDescriptorSlot(0));
+ VisitPointers(descriptors, descriptors.GetFirstPointerSlot(),
+ descriptors.GetDescriptorSlot(0));
}
}
@@ -595,7 +585,7 @@ class ConcurrentMarkingVisitor final
void VisitCustomWeakPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
- DCHECK(host->IsWeakCell() || host->IsJSWeakRef());
+ DCHECK(host.IsWeakCell() || host.IsJSWeakRef());
}
private:
@@ -604,7 +594,7 @@ class ConcurrentMarkingVisitor final
template <typename T>
int VisitJSObjectSubclassFast(Map map, T object) {
- DCHECK_IMPLIES(FLAG_unbox_double_fields, map->HasFastPointerLayout());
+ DCHECK_IMPLIES(FLAG_unbox_double_fields, map.HasFastPointerLayout());
using TBodyDescriptor = typename T::FastBodyDescriptor;
return VisitJSObjectSubclass<T, TBodyDescriptor>(map, object);
}
@@ -612,7 +602,7 @@ class ConcurrentMarkingVisitor final
template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
int VisitJSObjectSubclass(Map map, T object) {
int size = TBodyDescriptor::SizeOf(map, object);
- int used_size = map->UsedInstanceSize();
+ int used_size = map.UsedInstanceSize();
DCHECK_LE(used_size, size);
DCHECK_GE(used_size, T::kHeaderSize);
return VisitPartiallyWithSnapshot<T, TBodyDescriptor>(map, object,
@@ -621,7 +611,7 @@ class ConcurrentMarkingVisitor final
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object) {
- DCHECK(object->IsApiWrapper());
+ DCHECK(object.IsApiWrapper());
int size = VisitJSObjectSubclass(map, object);
if (size && embedder_tracing_enabled_) {
// Success: The object needs to be processed for embedder references on
@@ -635,13 +625,13 @@ class ConcurrentMarkingVisitor final
int VisitLeftTrimmableArray(Map map, T object) {
// The synchronized_length() function checks that the length is a Smi.
// This is not necessarily the case if the array is being left-trimmed.
- Object length = object->unchecked_synchronized_length();
+ Object length = object.unchecked_synchronized_length();
if (!ShouldVisit(object)) return 0;
// The cached length must be the actual length as the array is not black.
// Left trimming marks the array black before over-writing the length.
- DCHECK(length->IsSmi());
+ DCHECK(length.IsSmi());
int size = T::SizeFor(Smi::ToInt(length));
- VisitMapPointer(object, object->map_slot());
+ VisitMapPointer(object, object.map_slot());
T::BodyDescriptor::IterateBody(map, object, size, this);
return size;
}
@@ -666,7 +656,7 @@ class ConcurrentMarkingVisitor final
template <typename T, typename TBodyDescriptor>
const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
SlotSnapshottingVisitor visitor(&slot_snapshot_);
- visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
+ visitor.VisitPointer(object, ObjectSlot(object.map_slot().address()));
TBodyDescriptor::IterateBody(map, object, size, &visitor);
return slot_snapshot_;
}
@@ -813,12 +803,12 @@ void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
Address new_space_top = heap_->new_space()->original_top_acquire();
Address new_space_limit = heap_->new_space()->original_limit_relaxed();
Address new_large_object = heap_->new_lo_space()->pending_object();
- Address addr = object->address();
+ Address addr = object.address();
if ((new_space_top <= addr && addr < new_space_limit) ||
addr == new_large_object) {
on_hold_->Push(task_id, object);
} else {
- Map map = object->synchronized_map();
+ Map map = object.synchronized_map();
current_marked_bytes += visitor.Visit(map, object);
}
}
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 7fbc445061..be2fc03d46 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -6,16 +6,16 @@
#define V8_HEAP_CONCURRENT_MARKING_H_
#include "include/v8-platform.h"
-#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
-#include "src/cancelable-task.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/heap/worklist.h"
-#include "src/utils.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/tasks/cancelable-task.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
index 0ba84d8798..c032f384b3 100644
--- a/deps/v8/src/heap/embedder-tracing.cc
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -5,6 +5,7 @@
#include "src/heap/embedder-tracing.h"
#include "src/base/logging.h"
+#include "src/heap/gc-tracer.h"
#include "src/objects/embedder-data-slot.h"
#include "src/objects/js-objects-inl.h"
@@ -19,18 +20,29 @@ void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
}
-void LocalEmbedderHeapTracer::TracePrologue() {
+void LocalEmbedderHeapTracer::TracePrologue(
+ EmbedderHeapTracer::TraceFlags flags) {
if (!InUse()) return;
num_v8_marking_worklist_was_empty_ = 0;
embedder_worklist_empty_ = false;
- remote_tracer_->TracePrologue();
+ remote_tracer_->TracePrologue(flags);
}
void LocalEmbedderHeapTracer::TraceEpilogue() {
if (!InUse()) return;
- remote_tracer_->TraceEpilogue();
+ EmbedderHeapTracer::TraceSummary summary;
+ remote_tracer_->TraceEpilogue(&summary);
+ remote_stats_.allocated_size = summary.allocated_size;
+ // Force a check next time increased memory is reported. This allows for
+ // setting limits close to actual heap sizes.
+ remote_stats_.allocated_size_limit_for_check = 0;
+ constexpr double kMinReportingTimeMs = 0.5;
+ if (summary.time > kMinReportingTimeMs) {
+ isolate_->heap()->tracer()->RecordEmbedderSpeed(summary.allocated_size,
+ summary.time);
+ }
}
void LocalEmbedderHeapTracer::EnterFinalPause() {
@@ -73,8 +85,8 @@ LocalEmbedderHeapTracer::ProcessingScope::~ProcessingScope() {
void LocalEmbedderHeapTracer::ProcessingScope::TracePossibleWrapper(
JSObject js_object) {
- DCHECK(js_object->IsApiWrapper());
- if (js_object->GetEmbedderFieldCount() < 2) return;
+ DCHECK(js_object.IsApiWrapper());
+ if (js_object.GetEmbedderFieldCount() < 2) return;
void* pointer0;
void* pointer1;
@@ -99,5 +111,14 @@ void LocalEmbedderHeapTracer::ProcessingScope::AddWrapperInfoForTesting(
FlushWrapperCacheIfFull();
}
+void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() {
+ if (!FLAG_global_gc_scheduling) return;
+
+ Heap* heap = isolate_->heap();
+ heap->StartIncrementalMarkingIfAllocationLimitIsReached(
+ heap->GCFlagsForIncrementalMarking(),
+ kGCCallbackScheduleIdleGarbageCollection);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index 1b30552660..4309fb722a 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -6,8 +6,8 @@
#define V8_HEAP_EMBEDDER_TRACING_H_
#include "include/v8.h"
-#include "src/flags.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
@@ -48,7 +48,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
EmbedderHeapTracer* remote_tracer() const { return remote_tracer_; }
void SetRemoteTracer(EmbedderHeapTracer* tracer);
- void TracePrologue();
+ void TracePrologue(EmbedderHeapTracer::TraceFlags flags);
void TraceEpilogue();
void EnterFinalPause();
bool Trace(double deadline);
@@ -76,7 +76,27 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
embedder_worklist_empty_ = is_empty;
}
+ void IncreaseAllocatedSize(size_t bytes) {
+ remote_stats_.allocated_size += bytes;
+ remote_stats_.accumulated_allocated_size += bytes;
+ if (remote_stats_.allocated_size >
+ remote_stats_.allocated_size_limit_for_check) {
+ StartIncrementalMarkingIfNeeded();
+ remote_stats_.allocated_size_limit_for_check =
+ remote_stats_.allocated_size + kEmbedderAllocatedThreshold;
+ }
+ }
+
+ void StartIncrementalMarkingIfNeeded();
+
+ size_t allocated_size() const { return remote_stats_.allocated_size; }
+ size_t accumulated_allocated_size() const {
+ return remote_stats_.accumulated_allocated_size;
+ }
+
private:
+ static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
+
Isolate* const isolate_;
EmbedderHeapTracer* remote_tracer_ = nullptr;
@@ -88,6 +108,19 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
// segments of potential embedder fields to move to the main thread.
bool embedder_worklist_empty_ = false;
+ struct RemoteStatistics {
+ // Allocated size of objects in bytes reported by the embedder. Updated via
+ // TraceSummary at the end of tracing and incrementally when the GC is not
+ // in progress.
+ size_t allocated_size = 0;
+ // Limit for |allocated_size_| in bytes to avoid checking for starting a GC
+ // on each increment.
+ size_t allocated_size_limit_for_check = 0;
+ // Totally accumulated bytes allocated by the embedder. Monotonically
+ // increasing value. Used to approximate allocation rate.
+ size_t accumulated_allocated_size = 0;
+ } remote_stats_;
+
friend class EmbedderStackStateScope;
};
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index dce99498da..32237da877 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -9,14 +9,14 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
-#include "src/handles-inl.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/string-inl.h"
-#include "src/string-hasher.h"
+#include "src/strings/string-hasher.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 72737bdaf7..03896f7827 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -4,26 +4,26 @@
#include "src/heap/factory.h"
-#include "src/accessors.h"
-#include "src/allocation-site-scopes.h"
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/base/bits.h"
-#include "src/bootstrapper.h"
+#include "src/builtins/accessors.h"
#include "src/builtins/constants-table-builder.h"
-#include "src/compiler.h"
-#include "src/conversions.h"
-#include "src/counters.h"
-#include "src/hash-seed-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/ic/handler-configuration-inl.h"
+#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate-inl.h"
-#include "src/log.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/numbers/conversions.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/objects/allocation-site-inl.h"
+#include "src/objects/allocation-site-scopes.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
@@ -48,9 +48,8 @@
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/struct-inl.h"
#include "src/objects/template-objects-inl.h"
-#include "src/transitions-inl.h"
-#include "src/unicode-cache.h"
-#include "src/unicode-inl.h"
+#include "src/objects/transitions-inl.h"
+#include "src/strings/unicode-inl.h"
namespace v8 {
namespace internal {
@@ -72,63 +71,139 @@ int ComputeCodeObjectSize(const CodeDesc& desc) {
return object_size;
}
-void InitializeCode(Heap* heap, Handle<Code> code, int object_size,
- const CodeDesc& desc, Code::Kind kind,
- Handle<Object> self_ref, int32_t builtin_index,
- Handle<ByteArray> source_position_table,
- Handle<DeoptimizationData> deopt_data,
- Handle<ByteArray> reloc_info,
- Handle<CodeDataContainer> data_container,
- bool is_turbofanned, int stack_slots) {
- DCHECK(IsAligned(code->address(), kCodeAlignment));
- DCHECK_IMPLIES(
- !heap->memory_allocator()->code_range().is_empty(),
- heap->memory_allocator()->code_range().contains(code->address()));
-
- constexpr bool kIsNotOffHeapTrampoline = false;
- const bool has_unwinding_info = desc.unwinding_info != nullptr;
-
- code->set_raw_instruction_size(desc.instr_size);
- code->set_relocation_info(*reloc_info);
- code->initialize_flags(kind, has_unwinding_info, is_turbofanned, stack_slots,
- kIsNotOffHeapTrampoline);
- code->set_builtin_index(builtin_index);
- code->set_code_data_container(*data_container);
- code->set_deoptimization_data(*deopt_data);
- code->set_source_position_table(*source_position_table);
- code->set_safepoint_table_offset(desc.safepoint_table_offset);
- code->set_handler_table_offset(desc.handler_table_offset);
- code->set_constant_pool_offset(desc.constant_pool_offset);
- code->set_code_comments_offset(desc.code_comments_offset);
-
- // Allow self references to created code object by patching the handle to
- // point to the newly allocated Code object.
- if (!self_ref.is_null()) {
- DCHECK(self_ref->IsOddball());
- DCHECK(Oddball::cast(*self_ref)->kind() == Oddball::kSelfReferenceMarker);
- if (FLAG_embedded_builtins) {
- auto builder = heap->isolate()->builtins_constants_table_builder();
- if (builder != nullptr) builder->PatchSelfReference(self_ref, code);
- }
- *(self_ref.location()) = code->ptr();
+} // namespace
+
+Factory::CodeBuilder::CodeBuilder(Isolate* isolate, const CodeDesc& desc,
+ Code::Kind kind)
+ : isolate_(isolate),
+ code_desc_(desc),
+ kind_(kind),
+ source_position_table_(isolate_->factory()->empty_byte_array()) {}
+
+MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
+ bool retry_allocation_or_fail) {
+ const auto factory = isolate_->factory();
+ // Allocate objects needed for code initialization.
+ Handle<ByteArray> reloc_info =
+ factory->NewByteArray(code_desc_.reloc_size, AllocationType::kOld);
+ Handle<CodeDataContainer> data_container;
+
+ // Use a canonical off-heap trampoline CodeDataContainer if possible.
+ const int32_t promise_rejection_flag =
+ Code::IsPromiseRejectionField::encode(true);
+ if (read_only_data_container_ &&
+ (kind_specific_flags_ == 0 ||
+ kind_specific_flags_ == promise_rejection_flag)) {
+ const ReadOnlyRoots roots(isolate_);
+ const auto canonical_code_data_container =
+ kind_specific_flags_ == 0
+ ? roots.trampoline_trivial_code_data_container_handle()
+ : roots.trampoline_promise_rejection_code_data_container_handle();
+ DCHECK_EQ(canonical_code_data_container->kind_specific_flags(),
+ kind_specific_flags_);
+ data_container = canonical_code_data_container;
+ } else {
+ data_container = factory->NewCodeDataContainer(
+ 0, read_only_data_container_ ? AllocationType::kReadOnly
+ : AllocationType::kOld);
+ data_container->set_kind_specific_flags(kind_specific_flags_);
}
- // Migrate generated code.
- // The generated code can contain embedded objects (typically from handles)
- // in a pointer-to-tagged-value format (i.e. with indirection like a handle)
- // that are dereferenced during the copy to point directly to the actual heap
- // objects. These pointers can include references to the code object itself,
- // through the self_reference parameter.
- code->CopyFromNoFlush(heap, desc);
+ Handle<Code> code;
+ {
+ int object_size = ComputeCodeObjectSize(code_desc_);
+ Heap* heap = isolate_->heap();
- code->clear_padding();
+ CodePageCollectionMemoryModificationScope code_allocation(heap);
+ HeapObject result;
+ if (retry_allocation_or_fail) {
+ result =
+ heap->AllocateRawWithRetryOrFail(object_size, AllocationType::kCode);
+ } else {
+ result =
+ heap->AllocateRawWithLightRetry(object_size, AllocationType::kCode);
+ // Return an empty handle if we cannot allocate the code object.
+ if (result.is_null()) return MaybeHandle<Code>();
+ }
+
+ if (!is_movable_) {
+ result = heap->EnsureImmovableCode(result, object_size);
+ }
+
+ // The code object has not been fully initialized yet. We rely on the
+ // fact that no allocation will happen from this point on.
+ DisallowHeapAllocation no_gc;
+
+ result.set_map_after_allocation(*factory->code_map(), SKIP_WRITE_BARRIER);
+ code = handle(Code::cast(result), isolate_);
+ DCHECK(IsAligned(code->address(), kCodeAlignment));
+ DCHECK_IMPLIES(
+ !heap->memory_allocator()->code_range().is_empty(),
+ heap->memory_allocator()->code_range().contains(code->address()));
+
+ constexpr bool kIsNotOffHeapTrampoline = false;
+ const bool has_unwinding_info = code_desc_.unwinding_info != nullptr;
+
+ code->set_raw_instruction_size(code_desc_.instr_size);
+ code->set_relocation_info(*reloc_info);
+ code->initialize_flags(kind_, has_unwinding_info, is_turbofanned_,
+ stack_slots_, kIsNotOffHeapTrampoline);
+ code->set_builtin_index(builtin_index_);
+ code->set_code_data_container(*data_container);
+ code->set_deoptimization_data(*deoptimization_data_);
+ code->set_source_position_table(*source_position_table_);
+ code->set_safepoint_table_offset(code_desc_.safepoint_table_offset);
+ code->set_handler_table_offset(code_desc_.handler_table_offset);
+ code->set_constant_pool_offset(code_desc_.constant_pool_offset);
+ code->set_code_comments_offset(code_desc_.code_comments_offset);
+
+ // Allow self references to created code object by patching the handle to
+ // point to the newly allocated Code object.
+ Handle<Object> self_reference;
+ if (self_reference_.ToHandle(&self_reference)) {
+ DCHECK(self_reference->IsOddball());
+ DCHECK(Oddball::cast(*self_reference).kind() ==
+ Oddball::kSelfReferenceMarker);
+ if (FLAG_embedded_builtins) {
+ auto builder = isolate_->builtins_constants_table_builder();
+ if (builder != nullptr)
+ builder->PatchSelfReference(self_reference, code);
+ }
+ *(self_reference.location()) = code->ptr();
+ }
+
+ // Migrate generated code.
+ // The generated code can contain embedded objects (typically from handles)
+ // in a pointer-to-tagged-value format (i.e. with indirection like a handle)
+ // that are dereferenced during the copy to point directly to the actual
+ // heap objects. These pointers can include references to the code object
+ // itself, through the self_reference parameter.
+ code->CopyFromNoFlush(heap, code_desc_);
+
+ code->clear_padding();
#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) code->ObjectVerify(heap->isolate());
+ if (FLAG_verify_heap) code->ObjectVerify(isolate_);
#endif
+
+ // Flush the instruction cache before changing the permissions.
+ // Note: we do this before setting permissions to ReadExecute because on
+ // some older ARM kernels there is a bug which causes an access error on
+ // cache flush instructions to trigger access error on non-writable memory.
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
+ code->FlushICache();
+ }
+
+ return code;
}
-} // namespace
+MaybeHandle<Code> Factory::CodeBuilder::TryBuild() {
+ return BuildInternal(false);
+}
+
+Handle<Code> Factory::CodeBuilder::Build() {
+ return BuildInternal(true).ToHandleChecked();
+}
HeapObject Factory::AllocateRawWithImmortalMap(int size,
AllocationType allocation,
@@ -136,7 +211,7 @@ HeapObject Factory::AllocateRawWithImmortalMap(int size,
AllocationAlignment alignment) {
HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
size, allocation, alignment);
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
}
@@ -151,10 +226,10 @@ HeapObject Factory::AllocateRawWithAllocationSite(
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
- result->set_map_after_allocation(*map, write_barrier_mode);
+ result.set_map_after_allocation(*map, write_barrier_mode);
if (!allocation_site.is_null()) {
AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
- Object(result->ptr() + map->instance_size()));
+ Object(result.ptr() + map->instance_size()));
InitializeAllocationMemento(alloc_memento, *allocation_site);
}
return result;
@@ -162,11 +237,11 @@ HeapObject Factory::AllocateRawWithAllocationSite(
void Factory::InitializeAllocationMemento(AllocationMemento memento,
AllocationSite allocation_site) {
- memento->set_map_after_allocation(*allocation_memento_map(),
- SKIP_WRITE_BARRIER);
- memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
+ memento.set_map_after_allocation(*allocation_memento_map(),
+ SKIP_WRITE_BARRIER);
+ memento.set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
if (FLAG_allocation_site_pretenuring) {
- allocation_site->IncrementMementoCreateCount();
+ allocation_site.IncrementMementoCreateCount();
}
}
@@ -205,7 +280,7 @@ HeapObject Factory::New(Handle<Map> map, AllocationType allocation) {
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
- result->set_map_after_allocation(*map, write_barrier_mode);
+ result.set_map_after_allocation(*map, write_barrier_mode);
return result;
}
@@ -215,7 +290,7 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
Heap* heap = isolate()->heap();
HeapObject result =
heap->AllocateRawWithRetryOrFail(size, allocation, alignment);
- heap->CreateFillerObjectAt(result->address(), size, ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(result.address(), size, ClearRecordedSlots::kNo);
return Handle<HeapObject>(result, isolate());
}
@@ -274,7 +349,7 @@ Handle<TemplateObjectDescription> Factory::NewTemplateObjectDescription(
DCHECK_LT(0, raw_strings->length());
Handle<TemplateObjectDescription> result =
Handle<TemplateObjectDescription>::cast(
- NewStruct(TUPLE2_TYPE, AllocationType::kOld));
+ NewStruct(TEMPLATE_OBJECT_DESCRIPTION_TYPE, AllocationType::kOld));
result->set_raw_strings(*raw_strings);
result->set_cooked_strings(*cooked_strings);
return result;
@@ -300,7 +375,7 @@ Handle<PropertyArray> Factory::NewPropertyArray(int length,
DCHECK_LE(0, length);
if (length == 0) return empty_property_array();
HeapObject result = AllocateRawFixedArray(length, allocation);
- result->set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
+ result.set_map_after_allocation(*property_array_map(), SKIP_WRITE_BARRIER);
Handle<PropertyArray> array(PropertyArray::cast(result), isolate());
array->initialize_length(length);
MemsetTagged(array->data_start(), *undefined_value(), length);
@@ -313,7 +388,7 @@ Handle<FixedArray> Factory::NewFixedArrayWithFiller(RootIndex map_root_index,
HeapObject result = AllocateRawFixedArray(length, allocation);
DCHECK(RootsTable::IsImmortalImmovable(map_root_index));
Map map = Map::cast(isolate()->root(map_root_index));
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
Handle<FixedArray> array(FixedArray::cast(result), isolate());
array->set_length(length);
MemsetTagged(array->data_start(), filler, length);
@@ -345,7 +420,7 @@ Handle<T> Factory::NewWeakFixedArrayWithMap(RootIndex map_root_index,
HeapObject result =
AllocateRawArray(WeakFixedArray::SizeFor(length), allocation);
Map map = Map::cast(isolate()->root(map_root_index));
- result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
+ result.set_map_after_allocation(map, SKIP_WRITE_BARRIER);
Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
array->set_length(length);
@@ -372,7 +447,7 @@ Handle<WeakFixedArray> Factory::NewWeakFixedArray(int length,
HeapObject result =
AllocateRawArray(WeakFixedArray::SizeFor(length), allocation);
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kWeakFixedArrayMap));
- result->set_map_after_allocation(*weak_fixed_array_map(), SKIP_WRITE_BARRIER);
+ result.set_map_after_allocation(*weak_fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<WeakFixedArray> array(WeakFixedArray::cast(result), isolate());
array->set_length(length);
MemsetTagged(ObjectSlot(array->data_start()), *undefined_value(), length);
@@ -393,7 +468,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
MemoryChunk* chunk = MemoryChunk::FromHeapObject(result);
chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
}
- result->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
+ result.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<FixedArray> array(FixedArray::cast(result), isolate());
array->set_length(length);
MemsetTagged(array->data_start(), ReadOnlyRoots(heap).undefined_value(),
@@ -436,7 +511,7 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
Handle<SharedFunctionInfo> shared,
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array,
AllocationType allocation) {
- int length = shared->feedback_metadata()->slot_count();
+ int length = shared->feedback_metadata().slot_count();
DCHECK_LE(0, length);
int size = FeedbackVector::SizeFor(length);
@@ -450,7 +525,7 @@ Handle<FeedbackVector> Factory::NewFeedbackVector(
vector->set_length(length);
vector->set_invocation_count(0);
vector->set_profiler_ticks(0);
- vector->set_deopt_count(0);
+ vector->clear_padding();
vector->set_closure_feedback_cell_array(*closure_feedback_cell_array);
// TODO(leszeks): Initialize based on the feedback metadata.
@@ -631,34 +706,59 @@ Handle<AccessorPair> Factory::NewAccessorPair() {
}
// Internalized strings are created in the old generation (data space).
-Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
- Utf8StringKey key(string, HashSeed(isolate()));
+Handle<String> Factory::InternalizeUtf8String(
+ const Vector<const char>& string) {
+ Vector<const uint8_t> utf8_data = Vector<const uint8_t>::cast(string);
+ Utf8Decoder decoder(utf8_data);
+ if (decoder.is_ascii()) return InternalizeString(utf8_data);
+ if (decoder.is_one_byte()) {
+ std::unique_ptr<uint8_t[]> buffer(new uint8_t[decoder.utf16_length()]);
+ decoder.Decode(buffer.get(), utf8_data);
+ return InternalizeString(
+ Vector<const uint8_t>(buffer.get(), decoder.utf16_length()));
+ }
+ std::unique_ptr<uint16_t[]> buffer(new uint16_t[decoder.utf16_length()]);
+ decoder.Decode(buffer.get(), utf8_data);
+ return InternalizeString(
+ Vector<const uc16>(buffer.get(), decoder.utf16_length()));
+}
+
+template <typename Char>
+Handle<String> Factory::InternalizeString(const Vector<const Char>& string,
+ bool convert_encoding) {
+ SequentialStringKey<Char> key(string, HashSeed(isolate()), convert_encoding);
return InternalizeStringWithKey(&key);
}
-Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
- OneByteStringKey key(string, HashSeed(isolate()));
- return InternalizeStringWithKey(&key);
-}
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<String> Factory::InternalizeString(
+ const Vector<const uint8_t>& string, bool convert_encoding);
+template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Handle<String> Factory::InternalizeString(
+ const Vector<const uint16_t>& string, bool convert_encoding);
-Handle<String> Factory::InternalizeOneByteString(
- Handle<SeqOneByteString> string, int from, int length) {
- SeqOneByteSubStringKey key(isolate(), string, from, length);
+template <typename SeqString>
+Handle<String> Factory::InternalizeString(Handle<SeqString> string, int from,
+ int length, bool convert_encoding) {
+ SeqSubStringKey<SeqString> key(isolate(), string, from, length,
+ convert_encoding);
return InternalizeStringWithKey(&key);
}
-Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
- TwoByteStringKey key(string, HashSeed(isolate()));
- return InternalizeStringWithKey(&key);
-}
+template Handle<String> Factory::InternalizeString(
+ Handle<SeqOneByteString> string, int from, int length,
+ bool convert_encoding);
+template Handle<String> Factory::InternalizeString(
+ Handle<SeqTwoByteString> string, int from, int length,
+ bool convert_encoding);
template <class StringTableKey>
Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
return StringTable::LookupKey(isolate(), key);
}
-MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
- AllocationType allocation) {
+MaybeHandle<String> Factory::NewStringFromOneByte(
+ const Vector<const uint8_t>& string, AllocationType allocation) {
DCHECK_NE(allocation, AllocationType::kReadOnly);
int length = string.length();
if (length == 0) return empty_string();
@@ -670,127 +770,91 @@ MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
DisallowHeapAllocation no_gc;
// Copy the characters into the new object.
- CopyChars(SeqOneByteString::cast(*result)->GetChars(no_gc), string.start(),
+ CopyChars(SeqOneByteString::cast(*result).GetChars(no_gc), string.begin(),
length);
return result;
}
-MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
+MaybeHandle<String> Factory::NewStringFromUtf8(const Vector<const char>& string,
AllocationType allocation) {
- DCHECK_NE(allocation, AllocationType::kReadOnly);
- // Check for ASCII first since this is the common case.
- const char* ascii_data = string.start();
- int length = string.length();
- int non_ascii_start = String::NonAsciiStart(ascii_data, length);
- if (non_ascii_start >= length) {
- // If the string is ASCII, we do not need to convert the characters
- // since UTF8 is backwards compatible with ASCII.
- return NewStringFromOneByte(Vector<const uint8_t>::cast(string),
- allocation);
- }
-
- std::unique_ptr<uint16_t[]> buffer(new uint16_t[length - non_ascii_start]);
-
- const uint8_t* cursor =
- reinterpret_cast<const uint8_t*>(&string[non_ascii_start]);
- const uint8_t* end = reinterpret_cast<const uint8_t*>(string.end());
-
- uint16_t* output_cursor = buffer.get();
+ Vector<const uint8_t> utf8_data = Vector<const uint8_t>::cast(string);
+ Utf8Decoder decoder(utf8_data);
- uint32_t incomplete_char = 0;
- unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
+ if (decoder.utf16_length() == 0) return empty_string();
- while (cursor < end) {
- unibrow::uchar t =
- unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
-
- if (V8_LIKELY(t <= unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- *(output_cursor++) = static_cast<uc16>(t); // The most frequent case.
- } else if (t == unibrow::Utf8::kIncomplete) {
- continue;
- } else {
- *(output_cursor++) = unibrow::Utf16::LeadSurrogate(t);
- *(output_cursor++) = unibrow::Utf16::TrailSurrogate(t);
- }
- }
+ if (decoder.is_one_byte()) {
+ // Allocate string.
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ NewRawOneByteString(decoder.utf16_length(), allocation), String);
- unibrow::uchar t = unibrow::Utf8::ValueOfIncrementalFinish(&state);
- if (t != unibrow::Utf8::kBufferEmpty) {
- *(output_cursor++) = static_cast<uc16>(t);
+ DisallowHeapAllocation no_gc;
+ decoder.Decode(result->GetChars(no_gc), utf8_data);
+ return result;
}
- DCHECK_LE(output_cursor, buffer.get() + length - non_ascii_start);
- int utf16_length = static_cast<int>(output_cursor - buffer.get());
- DCHECK_GT(utf16_length, 0);
-
// Allocate string.
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- NewRawTwoByteString(non_ascii_start + utf16_length, allocation), String);
-
- DCHECK_LE(non_ascii_start + utf16_length, length);
+ NewRawTwoByteString(decoder.utf16_length(), allocation), String);
DisallowHeapAllocation no_gc;
- uint16_t* data = result->GetChars(no_gc);
- CopyChars(data, ascii_data, non_ascii_start);
- CopyChars(data + non_ascii_start, buffer.get(), utf16_length);
-
+ decoder.Decode(result->GetChars(no_gc), utf8_data);
return result;
}
MaybeHandle<String> Factory::NewStringFromUtf8SubString(
Handle<SeqOneByteString> str, int begin, int length,
AllocationType allocation) {
- Access<UnicodeCache::Utf8Decoder> decoder(
- isolate()->unicode_cache()->utf8_decoder());
- int non_ascii_start;
- int utf16_length = 0;
+ Vector<const uint8_t> utf8_data;
{
DisallowHeapAllocation no_gc;
- const char* ascii_data =
- reinterpret_cast<const char*>(str->GetChars(no_gc) + begin);
- non_ascii_start = String::NonAsciiStart(ascii_data, length);
- if (non_ascii_start < length) {
- // Non-ASCII and we need to decode.
- auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
- length - non_ascii_start);
- decoder->Reset(non_ascii);
-
- utf16_length = static_cast<int>(decoder->Utf16Length());
- }
+ utf8_data = Vector<const uint8_t>(str->GetChars(no_gc) + begin, length);
+ }
+ Utf8Decoder decoder(utf8_data);
+
+ if (length == 1) {
+ uint16_t t;
+ // Decode even in the case of length 1 since it can be a bad character.
+ decoder.Decode(&t, utf8_data);
+ return LookupSingleCharacterStringFromCode(t);
}
- if (non_ascii_start >= length) {
+ if (decoder.is_ascii()) {
// If the string is ASCII, we can just make a substring.
// TODO(v8): the allocation flag is ignored in this case.
return NewSubString(str, begin, begin + length);
}
- DCHECK_GT(utf16_length, 0);
+ DCHECK_GT(decoder.utf16_length(), 0);
+
+ if (decoder.is_one_byte()) {
+ // Allocate string.
+ Handle<SeqOneByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ NewRawOneByteString(decoder.utf16_length(), allocation), String);
+ DisallowHeapAllocation no_gc;
+ // Update pointer references, since the original string may have moved after
+ // allocation.
+ utf8_data = Vector<const uint8_t>(str->GetChars(no_gc) + begin, length);
+ decoder.Decode(result->GetChars(no_gc), utf8_data);
+ return result;
+ }
// Allocate string.
Handle<SeqTwoByteString> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), result,
- NewRawTwoByteString(non_ascii_start + utf16_length, allocation), String);
+ NewRawTwoByteString(decoder.utf16_length(), allocation), String);
+ DisallowHeapAllocation no_gc;
// Update pointer references, since the original string may have moved after
// allocation.
- DisallowHeapAllocation no_gc;
- const char* ascii_data =
- reinterpret_cast<const char*>(str->GetChars(no_gc) + begin);
- auto non_ascii = Vector<const char>(ascii_data + non_ascii_start,
- length - non_ascii_start);
-
- // Copy ASCII portion.
- uint16_t* data = result->GetChars(no_gc);
- for (int i = 0; i < non_ascii_start; i++) {
- *data++ = *ascii_data++;
- }
-
- // Now write the remainder.
- decoder->WriteUtf16(data, utf16_length, non_ascii);
+ utf8_data = Vector<const uint8_t>(str->GetChars(no_gc) + begin, length);
+ decoder.Decode(result->GetChars(no_gc), utf8_data);
return result;
}
@@ -817,9 +881,9 @@ MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
}
}
-MaybeHandle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
- AllocationType allocation) {
- return NewStringFromTwoByte(string.start(), string.length(), allocation);
+MaybeHandle<String> Factory::NewStringFromTwoByte(
+ const Vector<const uc16>& string, AllocationType allocation) {
+ return NewStringFromTwoByte(string.begin(), string.length(), allocation);
}
MaybeHandle<String> Factory::NewStringFromTwoByte(
@@ -830,37 +894,10 @@ MaybeHandle<String> Factory::NewStringFromTwoByte(
namespace {
-bool inline IsOneByte(Vector<const char> str, int chars) {
- // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
- return chars == str.length();
-}
-
bool inline IsOneByte(Handle<String> str) {
return str->IsOneByteRepresentation();
}
-inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
- int len) {
- // Only works for one byte strings.
- DCHECK(vector.length() == len);
- MemCopy(chars, vector.start(), len);
-}
-
-inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
- int len) {
- unibrow::Utf8Iterator it = unibrow::Utf8Iterator(vector);
- while (!it.Done()) {
- DCHECK_GT(len, 0);
- len -= 1;
-
- uint16_t c = *it;
- ++it;
- DCHECK_NE(unibrow::Utf8::kBadChar, c);
- *chars++ = c;
- }
- DCHECK_EQ(len, 0);
-}
-
inline void WriteOneByteData(Handle<String> s, uint8_t* chars, int len) {
DCHECK(s->length() == len);
String::WriteToFlat(*s, chars, 0, len);
@@ -897,23 +934,30 @@ Handle<SeqOneByteString> Factory::AllocateRawOneByteInternalizedString(
}
Handle<String> Factory::AllocateTwoByteInternalizedString(
- Vector<const uc16> str, uint32_t hash_field) {
- CHECK_GE(String::kMaxLength, str.length());
- DCHECK_NE(0, str.length()); // Use Heap::empty_string() instead.
+ const Vector<const uc16>& str, uint32_t hash_field) {
+ Handle<SeqTwoByteString> result =
+ AllocateRawTwoByteInternalizedString(str.length(), hash_field);
+ DisallowHeapAllocation no_gc;
+
+ // Fill in the characters.
+ MemCopy(result->GetChars(no_gc), str.begin(), str.length() * kUC16Size);
+
+ return result;
+}
+
+Handle<SeqTwoByteString> Factory::AllocateRawTwoByteInternalizedString(
+ int length, uint32_t hash_field) {
+ CHECK_GE(String::kMaxLength, length);
+ DCHECK_NE(0, length); // Use Heap::empty_string() instead.
Map map = *internalized_string_map();
- int size = SeqTwoByteString::SizeFor(str.length());
+ int size = SeqTwoByteString::SizeFor(length);
HeapObject result =
AllocateRawWithImmortalMap(size, AllocationType::kOld, map);
Handle<SeqTwoByteString> answer(SeqTwoByteString::cast(result), isolate());
- answer->set_length(str.length());
+ answer->set_length(length);
answer->set_hash_field(hash_field);
- DCHECK_EQ(size, answer->Size());
- DisallowHeapAllocation no_gc;
-
- // Fill in the characters.
- MemCopy(answer->GetChars(no_gc), str.start(), str.length() * kUC16Size);
-
+ DCHECK_EQ(size, result.Size());
return answer;
}
@@ -947,50 +991,24 @@ Handle<String> Factory::AllocateInternalizedStringImpl(T t, int chars,
DisallowHeapAllocation no_gc;
if (is_one_byte) {
- WriteOneByteData(t, SeqOneByteString::cast(*answer)->GetChars(no_gc),
- chars);
+ WriteOneByteData(t, SeqOneByteString::cast(*answer).GetChars(no_gc), chars);
} else {
- WriteTwoByteData(t, SeqTwoByteString::cast(*answer)->GetChars(no_gc),
- chars);
+ WriteTwoByteData(t, SeqTwoByteString::cast(*answer).GetChars(no_gc), chars);
}
return answer;
}
-Handle<String> Factory::NewInternalizedStringFromUtf8(Vector<const char> str,
- int chars,
- uint32_t hash_field) {
- if (IsOneByte(str, chars)) {
- Handle<SeqOneByteString> result =
- AllocateRawOneByteInternalizedString(str.length(), hash_field);
- DisallowHeapAllocation no_allocation;
- MemCopy(result->GetChars(no_allocation), str.start(), str.length());
- return result;
- }
- return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
-}
-
-Handle<String> Factory::NewOneByteInternalizedString(Vector<const uint8_t> str,
- uint32_t hash_field) {
+Handle<String> Factory::NewOneByteInternalizedString(
+ const Vector<const uint8_t>& str, uint32_t hash_field) {
Handle<SeqOneByteString> result =
AllocateRawOneByteInternalizedString(str.length(), hash_field);
DisallowHeapAllocation no_allocation;
- MemCopy(result->GetChars(no_allocation), str.start(), str.length());
- return result;
-}
-
-Handle<String> Factory::NewOneByteInternalizedSubString(
- Handle<SeqOneByteString> string, int offset, int length,
- uint32_t hash_field) {
- Handle<SeqOneByteString> result =
- AllocateRawOneByteInternalizedString(length, hash_field);
- DisallowHeapAllocation no_allocation;
- MemCopy(result->GetChars(no_allocation),
- string->GetChars(no_allocation) + offset, length);
+ MemCopy(result->GetChars(no_allocation), str.begin(), str.length());
return result;
}
-Handle<String> Factory::NewTwoByteInternalizedString(Vector<const uc16> str,
- uint32_t hash_field) {
+Handle<String> Factory::NewTwoByteInternalizedString(
+ const Vector<const uc16>& str, uint32_t hash_field) {
return AllocateTwoByteInternalizedString(str, hash_field);
}
@@ -1006,7 +1024,7 @@ Handle<String> Factory::NewInternalizedStringImpl(Handle<String> string,
namespace {
MaybeHandle<Map> GetInternalizedStringMap(Factory* f, Handle<String> string) {
- switch (string->map()->instance_type()) {
+ switch (string->map().instance_type()) {
case STRING_TYPE:
return f->internalized_string_map();
case ONE_BYTE_STRING_TYPE:
@@ -1089,8 +1107,8 @@ MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
return string;
}
-Handle<String> Factory::LookupSingleCharacterStringFromCode(uint32_t code) {
- if (code <= String::kMaxOneByteCharCodeU) {
+Handle<String> Factory::LookupSingleCharacterStringFromCode(uint16_t code) {
+ if (code <= unibrow::Latin1::kMaxChar) {
{
DisallowHeapAllocation no_allocation;
Object value = single_character_string_cache()->get(code);
@@ -1098,61 +1116,26 @@ Handle<String> Factory::LookupSingleCharacterStringFromCode(uint32_t code) {
return handle(String::cast(value), isolate());
}
}
- uint8_t buffer[1];
- buffer[0] = static_cast<uint8_t>(code);
- Handle<String> result =
- InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
+ uint8_t buffer[] = {static_cast<uint8_t>(code)};
+ Handle<String> result = InternalizeString(Vector<const uint8_t>(buffer, 1));
single_character_string_cache()->set(code, *result);
return result;
}
- DCHECK_LE(code, String::kMaxUtf16CodeUnitU);
-
- Handle<SeqTwoByteString> result = NewRawTwoByteString(1).ToHandleChecked();
- result->SeqTwoByteStringSet(0, static_cast<uint16_t>(code));
- return result;
-}
-
-// Returns true for a character in a range. Both limits are inclusive.
-static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
- // This makes uses of the the unsigned wraparound.
- return character - from <= to - from;
+ uint16_t buffer[] = {code};
+ return InternalizeString(Vector<const uint16_t>(buffer, 1));
}
static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
uint16_t c1,
uint16_t c2) {
- // Numeric strings have a different hash algorithm not known by
- // LookupTwoCharsStringIfExists, so we skip this step for such strings.
- if (!Between(c1, '0', '9') || !Between(c2, '0', '9')) {
- Handle<String> result;
- if (StringTable::LookupTwoCharsStringIfExists(isolate, c1, c2)
- .ToHandle(&result)) {
- return result;
- }
- }
-
- // Now we know the length is 2, we might as well make use of that fact
- // when building the new string.
- if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
- // We can do this.
- DCHECK(base::bits::IsPowerOfTwo(String::kMaxOneByteCharCodeU +
- 1)); // because of this.
- Handle<SeqOneByteString> str =
- isolate->factory()->NewRawOneByteString(2).ToHandleChecked();
- DisallowHeapAllocation no_allocation;
- uint8_t* dest = str->GetChars(no_allocation);
- dest[0] = static_cast<uint8_t>(c1);
- dest[1] = static_cast<uint8_t>(c2);
- return str;
- } else {
- Handle<SeqTwoByteString> str =
- isolate->factory()->NewRawTwoByteString(2).ToHandleChecked();
- DisallowHeapAllocation no_allocation;
- uc16* dest = str->GetChars(no_allocation);
- dest[0] = c1;
- dest[1] = c2;
- return str;
+ if ((c1 | c2) <= unibrow::Latin1::kMaxChar) {
+ uint8_t buffer[] = {static_cast<uint8_t>(c1), static_cast<uint8_t>(c2)};
+ return isolate->factory()->InternalizeString(
+ Vector<const uint8_t>(buffer, 2));
}
+ uint16_t buffer[] = {c1, c2};
+ return isolate->factory()->InternalizeString(
+ Vector<const uint16_t>(buffer, 2));
}
template <typename SinkChar, typename StringType>
@@ -1639,7 +1622,7 @@ Handle<Struct> Factory::NewStruct(InstanceType type,
default:
UNREACHABLE();
}
- int size = map->instance_size();
+ int size = map.instance_size();
HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
Handle<Struct> str(Struct::cast(result), isolate());
str->InitializeBody(size);
@@ -1779,7 +1762,7 @@ Handle<Foreign> Factory::NewForeign(Address addr, AllocationType allocation) {
STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
Map map = *foreign_map();
HeapObject result =
- AllocateRawWithImmortalMap(map->instance_size(), allocation, map);
+ AllocateRawWithImmortalMap(map.instance_size(), allocation, map);
Handle<Foreign> foreign(Foreign::cast(result), isolate());
foreign->set_foreign_address(addr);
return foreign;
@@ -1831,47 +1814,6 @@ Handle<BytecodeArray> Factory::NewBytecodeArray(
return instance;
}
-Handle<FixedTypedArrayBase> Factory::NewFixedTypedArrayWithExternalPointer(
- int length, ExternalArrayType array_type, void* external_pointer,
- AllocationType allocation) {
- // TODO(7881): Smi length check
- DCHECK(0 <= length && length <= Smi::kMaxValue);
- int size = FixedTypedArrayBase::kHeaderSize;
- HeapObject result = AllocateRawWithImmortalMap(
- size, allocation,
- ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type));
- Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(result),
- isolate());
- elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
- elements->set_external_pointer(external_pointer);
- elements->set_length(length);
- return elements;
-}
-
-Handle<FixedTypedArrayBase> Factory::NewFixedTypedArray(
- size_t length, size_t byte_length, ExternalArrayType array_type,
- bool initialize, AllocationType allocation) {
- // TODO(7881): Smi length check
- DCHECK(0 <= length && length <= Smi::kMaxValue);
- CHECK(byte_length <= kMaxInt - FixedTypedArrayBase::kDataOffset);
- size_t size =
- OBJECT_POINTER_ALIGN(byte_length + FixedTypedArrayBase::kDataOffset);
- Map map = ReadOnlyRoots(isolate()).MapForFixedTypedArray(array_type);
- AllocationAlignment alignment =
- array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned;
- HeapObject object = AllocateRawWithImmortalMap(static_cast<int>(size),
- allocation, map, alignment);
-
- Handle<FixedTypedArrayBase> elements(FixedTypedArrayBase::cast(object),
- isolate());
- elements->set_base_pointer(*elements, SKIP_WRITE_BARRIER);
- elements->set_external_pointer(
- FixedTypedArrayBase::ExternalPointerPtrForOnHeapArray());
- elements->set_length(static_cast<int>(length));
- if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
- return elements;
-}
-
Handle<Cell> Factory::NewCell(Handle<Object> value) {
AllowDeferredHandleDereference convert_to_cell;
STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
@@ -1884,8 +1826,8 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- HeapObject result = AllocateRawWithImmortalMap(
- FeedbackCell::kSize, AllocationType::kOld, *no_closures_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
+ AllocationType::kOld, *no_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
@@ -1895,8 +1837,8 @@ Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- HeapObject result = AllocateRawWithImmortalMap(
- FeedbackCell::kSize, AllocationType::kOld, *one_closure_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
+ AllocationType::kOld, *one_closure_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
@@ -1906,8 +1848,8 @@ Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
- HeapObject result = AllocateRawWithImmortalMap(
- FeedbackCell::kSize, AllocationType::kOld, *many_closures_cell_map());
+ HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
+ AllocationType::kOld, *many_closures_cell_map());
Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
cell->set_value(*value);
cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
@@ -1940,10 +1882,10 @@ Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors,
int size = DescriptorArray::SizeFor(number_of_all_descriptors);
HeapObject obj =
isolate()->heap()->AllocateRawWithRetryOrFail(size, allocation);
- obj->set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
+ obj.set_map_after_allocation(*descriptor_array_map(), SKIP_WRITE_BARRIER);
DescriptorArray array = DescriptorArray::cast(obj);
- array->Initialize(*empty_enum_cache(), *undefined_value(),
- number_of_descriptors, slack);
+ array.Initialize(*empty_enum_cache(), *undefined_value(),
+ number_of_descriptors, slack);
return Handle<DescriptorArray>(array, isolate());
}
@@ -1992,7 +1934,7 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
IsTerminalElementsKind(elements_kind));
HeapObject result = isolate()->heap()->AllocateRawWithRetryOrFail(
Map::kSize, AllocationType::kMap);
- result->set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
+ result.set_map_after_allocation(*meta_map(), SKIP_WRITE_BARRIER);
return handle(InitializeMap(Map::cast(result), type, instance_size,
elements_kind, inobject_properties),
isolate());
@@ -2001,42 +1943,42 @@ Handle<Map> Factory::NewMap(InstanceType type, int instance_size,
Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
ElementsKind elements_kind,
int inobject_properties) {
- map->set_instance_type(type);
- map->set_prototype(*null_value(), SKIP_WRITE_BARRIER);
- map->set_constructor_or_backpointer(*null_value(), SKIP_WRITE_BARRIER);
- map->set_instance_size(instance_size);
- if (map->IsJSObjectMap()) {
+ map.set_instance_type(type);
+ map.set_prototype(*null_value(), SKIP_WRITE_BARRIER);
+ map.set_constructor_or_backpointer(*null_value(), SKIP_WRITE_BARRIER);
+ map.set_instance_size(instance_size);
+ if (map.IsJSObjectMap()) {
DCHECK(!ReadOnlyHeap::Contains(map));
- map->SetInObjectPropertiesStartInWords(instance_size / kTaggedSize -
- inobject_properties);
- DCHECK_EQ(map->GetInObjectProperties(), inobject_properties);
- map->set_prototype_validity_cell(*invalid_prototype_validity_cell());
+ map.SetInObjectPropertiesStartInWords(instance_size / kTaggedSize -
+ inobject_properties);
+ DCHECK_EQ(map.GetInObjectProperties(), inobject_properties);
+ map.set_prototype_validity_cell(*invalid_prototype_validity_cell());
} else {
DCHECK_EQ(inobject_properties, 0);
- map->set_inobject_properties_start_or_constructor_function_index(0);
- map->set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
- }
- map->set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
- SKIP_WRITE_BARRIER);
- map->set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
- map->SetInObjectUnusedPropertyFields(inobject_properties);
- map->SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
+ map.set_inobject_properties_start_or_constructor_function_index(0);
+ map.set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
+ }
+ map.set_dependent_code(DependentCode::cast(*empty_weak_fixed_array()),
+ SKIP_WRITE_BARRIER);
+ map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
+ map.SetInObjectUnusedPropertyFields(inobject_properties);
+ map.SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
if (FLAG_unbox_double_fields) {
- map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
// Must be called only after |instance_type|, |instance_size| and
// |layout_descriptor| are set.
- map->set_visitor_id(Map::GetVisitorId(map));
- map->set_bit_field(0);
- map->set_bit_field2(Map::IsExtensibleBit::kMask);
- DCHECK(!map->is_in_retained_map_list());
+ map.set_visitor_id(Map::GetVisitorId(map));
+ map.set_bit_field(0);
+ map.set_bit_field2(Map::IsExtensibleBit::kMask);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptorsBit::encode(true) |
Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
- map->set_bit_field3(bit_field3);
- map->clear_padding();
- map->set_elements_kind(elements_kind);
- map->set_new_target_is_base(true);
+ map.set_bit_field3(bit_field3);
+ DCHECK(!map.is_in_retained_map_list());
+ map.clear_padding();
+ map.set_elements_kind(elements_kind);
+ map.set_new_target_is_base(true);
isolate()->counters()->maps_created()->Increment();
if (FLAG_trace_maps) LOG(isolate(), MapCreate(map));
return map;
@@ -2074,21 +2016,21 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
DCHECK(Heap::InYoungGeneration(raw_clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
- Heap::CopyBlock(raw_clone->address(), source->address(), object_size);
+ Heap::CopyBlock(raw_clone.address(), source->address(), object_size);
Handle<JSObject> clone(JSObject::cast(raw_clone), isolate());
if (!site.is_null()) {
AllocationMemento alloc_memento = AllocationMemento::unchecked_cast(
- Object(raw_clone->ptr() + object_size));
+ Object(raw_clone.ptr() + object_size));
InitializeAllocationMemento(alloc_memento, *site);
}
SLOW_DCHECK(clone->GetElementsKind() == source->GetElementsKind());
FixedArrayBase elements = source->elements();
// Update elements if necessary.
- if (elements->length() > 0) {
+ if (elements.length() > 0) {
FixedArrayBase elem;
- if (elements->map() == *fixed_cow_array_map()) {
+ if (elements.map() == *fixed_cow_array_map()) {
elem = elements;
} else if (source->HasDoubleElements()) {
elem = *CopyFixedDoubleArray(
@@ -2102,10 +2044,10 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
// Update properties if necessary.
if (source->HasFastProperties()) {
PropertyArray properties = source->property_array();
- if (properties->length() > 0) {
+ if (properties.length() > 0) {
// TODO(gsathya): Do not copy hash code.
Handle<PropertyArray> prop = CopyArrayWithMap(
- handle(properties, isolate()), handle(properties->map(), isolate()));
+ handle(properties, isolate()), handle(properties.map(), isolate()));
clone->set_raw_properties_or_hash(*prop);
}
} else {
@@ -2134,21 +2076,14 @@ template <typename T>
Handle<T> Factory::CopyArrayWithMap(Handle<T> src, Handle<Map> map) {
int len = src->length();
HeapObject obj = AllocateRawFixedArray(len, AllocationType::kYoung);
- obj->set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
+ obj.set_map_after_allocation(*map, SKIP_WRITE_BARRIER);
Handle<T> result(T::cast(obj), isolate());
+ initialize_length(result, len);
+
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-
- if (mode == SKIP_WRITE_BARRIER) {
- // Eliminate the write barrier if possible.
- Heap::CopyBlock(obj->address() + kTaggedSize, src->address() + kTaggedSize,
- T::SizeFor(len) - kTaggedSize);
- } else {
- // Slow case: Just copy the content one-by-one.
- initialize_length(result, len);
- for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
- }
+ result->CopyElements(isolate(), 0, *src, 0, len, mode);
return result;
}
@@ -2160,16 +2095,17 @@ Handle<T> Factory::CopyArrayAndGrow(Handle<T> src, int grow_by,
int old_len = src->length();
int new_len = old_len + grow_by;
HeapObject obj = AllocateRawFixedArray(new_len, allocation);
- obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
+ obj.set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
Handle<T> result(T::cast(obj), isolate());
initialize_length(result, new_len);
// Copy the content.
DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
- MemsetTagged(result->data_start() + old_len, *undefined_value(), grow_by);
+ WriteBarrierMode mode = obj.GetWriteBarrierMode(no_gc);
+ result->CopyElements(isolate(), 0, *src, 0, old_len, mode);
+ MemsetTagged(ObjectSlot(result->data_start() + old_len),
+ ReadOnlyRoots(isolate()).undefined_value(), grow_by);
return result;
}
@@ -2186,25 +2122,8 @@ Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
Handle<WeakFixedArray> Factory::CopyWeakFixedArrayAndGrow(
Handle<WeakFixedArray> src, int grow_by, AllocationType allocation) {
- DCHECK(
- !src->IsTransitionArray()); // Compacted by GC, this code doesn't work.
- int old_len = src->length();
- int new_len = old_len + grow_by;
- DCHECK_GE(new_len, old_len);
- HeapObject obj = AllocateRawFixedArray(new_len, allocation);
- DCHECK_EQ(old_len, src->length());
- obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
-
- WeakFixedArray result = WeakFixedArray::cast(obj);
- result->set_length(new_len);
-
- // Copy the content.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < old_len; i++) result->Set(i, src->Get(i), mode);
- MemsetTagged(ObjectSlot(result->RawFieldOfElementAt(old_len)),
- ReadOnlyRoots(isolate()).undefined_value(), grow_by);
- return Handle<WeakFixedArray>(result, isolate());
+ DCHECK(!src->IsTransitionArray()); // Compacted by GC, this code doesn't work
+ return CopyArrayAndGrow(src, grow_by, allocation);
}
Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
@@ -2213,18 +2132,20 @@ Handle<WeakArrayList> Factory::CopyWeakArrayListAndGrow(
int new_capacity = old_capacity + grow_by;
DCHECK_GE(new_capacity, old_capacity);
HeapObject obj = AllocateRawWeakArrayList(new_capacity, allocation);
- obj->set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
+ obj.set_map_after_allocation(src->map(), SKIP_WRITE_BARRIER);
WeakArrayList result = WeakArrayList::cast(obj);
- result->set_length(src->length());
- result->set_capacity(new_capacity);
+ int old_len = src->length();
+ result.set_length(old_len);
+ result.set_capacity(new_capacity);
// Copy the content.
DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < old_capacity; i++) result->Set(i, src->Get(i), mode);
- MemsetTagged(ObjectSlot(result->data_start() + old_capacity),
- ReadOnlyRoots(isolate()).undefined_value(), grow_by);
+ WriteBarrierMode mode = obj.GetWriteBarrierMode(no_gc);
+ result.CopyElements(isolate(), 0, *src, 0, old_len, mode);
+ MemsetTagged(ObjectSlot(result.data_start() + old_len),
+ ReadOnlyRoots(isolate()).undefined_value(),
+ new_capacity - old_len);
return Handle<WeakArrayList>(result, isolate());
}
@@ -2241,14 +2162,14 @@ Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
if (new_len == 0) return empty_fixed_array();
HeapObject obj = AllocateRawFixedArray(new_len, allocation);
- obj->set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
+ obj.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
Handle<FixedArray> result(FixedArray::cast(obj), isolate());
result->set_length(new_len);
// Copy the content.
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < new_len; i++) result->set(i, array->get(i), mode);
+ result->CopyElements(isolate(), 0, *array, 0, new_len, mode);
return result;
}
@@ -2283,34 +2204,6 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
return result;
}
-Handle<FeedbackVector> Factory::CopyFeedbackVector(
- Handle<FeedbackVector> array) {
- int len = array->length();
- HeapObject obj = AllocateRawWithImmortalMap(FeedbackVector::SizeFor(len),
- AllocationType::kYoung,
- *feedback_vector_map());
- Handle<FeedbackVector> result(FeedbackVector::cast(obj), isolate());
-
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-
- // Eliminate the write barrier if possible.
- if (mode == SKIP_WRITE_BARRIER) {
- Heap::CopyBlock(result->address() + kTaggedSize,
- result->address() + kTaggedSize,
- FeedbackVector::SizeFor(len) - kTaggedSize);
- } else {
- // Slow case: Just copy the content one-by-one.
- result->set_shared_function_info(array->shared_function_info());
- result->set_optimized_code_weak_or_smi(array->optimized_code_weak_or_smi());
- result->set_invocation_count(array->invocation_count());
- result->set_profiler_ticks(array->profiler_ticks());
- result->set_deopt_count(array->deopt_count());
- for (int i = 0; i < len; i++) result->set(i, array->get(i), mode);
- }
- return result;
-}
-
Handle<Object> Factory::NewNumber(double value, AllocationType allocation) {
// Materialize as a SMI if possible.
int32_t int_value;
@@ -2361,7 +2254,7 @@ Handle<FreshlyAllocatedBigInt> Factory::NewBigInt(int length,
HeapObject result = AllocateRawWithImmortalMap(BigInt::SizeFor(length),
allocation, *bigint_map());
FreshlyAllocatedBigInt bigint = FreshlyAllocatedBigInt::cast(result);
- bigint->clear_padding();
+ bigint.clear_padding();
return handle(bigint, isolate());
}
@@ -2413,7 +2306,7 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
}
Handle<Object> Factory::NewInvalidStringLengthError() {
- if (FLAG_abort_on_stack_or_string_length_overflow) {
+ if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on invalid string length");
}
// Invalidate the "string length" protector.
@@ -2469,7 +2362,7 @@ Handle<JSFunction> Factory::NewFunctionForTest(Handle<String> name) {
NewFunctionArgs args = NewFunctionArgs::ForFunctionWithoutCode(
name, isolate()->sloppy_function_map(), LanguageMode::kSloppy);
Handle<JSFunction> result = NewFunction(args);
- DCHECK(is_sloppy(result->shared()->language_mode()));
+ DCHECK(is_sloppy(result->shared().language_mode()));
return result;
}
@@ -2480,7 +2373,7 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
Handle<NativeContext> context(isolate()->native_context());
Handle<Map> map = args.GetMap(isolate());
Handle<SharedFunctionInfo> info =
- NewSharedFunctionInfo(args.name_, args.maybe_exported_function_data_,
+ NewSharedFunctionInfo(args.name_, args.maybe_wasm_function_data_,
args.maybe_builtin_id_, kNormalFunction);
// Proper language mode in shared function info will be set later.
@@ -2509,7 +2402,7 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
(*map == *isolate()->sloppy_function_with_readonly_prototype_map()) ||
(*map == *isolate()->strict_function_map()) ||
(*map == *isolate()->strict_function_without_prototype_map()) ||
- (*map == *isolate()->native_function_map()));
+ (*map == *isolate()->wasm_exported_function_map()));
}
#endif
@@ -2521,7 +2414,7 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
}
if (args.should_set_language_mode_) {
- result->shared()->set_language_mode(args.language_mode_);
+ result->shared().set_language_mode(args.language_mode_);
}
if (args.should_create_and_set_initial_map_) {
@@ -2539,12 +2432,12 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
}
Handle<Map> initial_map = NewMap(args.type_, args.instance_size_,
elements_kind, args.inobject_properties_);
- result->shared()->set_expected_nof_properties(args.inobject_properties_);
+ result->shared().set_expected_nof_properties(args.inobject_properties_);
// TODO(littledan): Why do we have this is_generator test when
// NewFunctionPrototype already handles finding an appropriately
// shared prototype?
Handle<HeapObject> prototype = args.maybe_prototype_.ToHandleChecked();
- if (!IsResumableFunction(result->shared()->kind())) {
+ if (!IsResumableFunction(result->shared().kind())) {
if (prototype->IsTheHole(isolate())) {
prototype = NewFunctionPrototype(result);
}
@@ -2558,13 +2451,13 @@ Handle<JSFunction> Factory::NewFunction(const NewFunctionArgs& args) {
Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
// Make sure to use globals from the function's context, since the function
// can be from a different context.
- Handle<NativeContext> native_context(function->context()->native_context(),
+ Handle<NativeContext> native_context(function->context().native_context(),
isolate());
Handle<Map> new_map;
- if (V8_UNLIKELY(IsAsyncGeneratorFunction(function->shared()->kind()))) {
+ if (V8_UNLIKELY(IsAsyncGeneratorFunction(function->shared().kind()))) {
new_map = handle(native_context->async_generator_object_prototype_map(),
isolate());
- } else if (IsResumableFunction(function->shared()->kind())) {
+ } else if (IsResumableFunction(function->shared().kind())) {
// Generator and async function prototypes can share maps since they
// don't have "constructor" properties.
new_map =
@@ -2581,7 +2474,7 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
DCHECK(!new_map->is_prototype_map());
Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
- if (!IsResumableFunction(function->shared()->kind())) {
+ if (!IsResumableFunction(function->shared().kind())) {
JSObject::AddProperty(isolate(), prototype, constructor_string(), function,
DONT_ENUM);
}
@@ -2604,7 +2497,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
AllocationType allocation) {
Handle<Map> initial_map(
- Map::cast(context->native_context()->get(info->function_map_index())),
+ Map::cast(context->native_context().get(info->function_map_index())),
isolate());
return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
allocation);
@@ -2614,7 +2507,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> info, Handle<Context> context,
Handle<FeedbackCell> feedback_cell, AllocationType allocation) {
Handle<Map> initial_map(
- Map::cast(context->native_context()->get(info->function_map_index())),
+ Map::cast(context->native_context().get(info->function_map_index())),
isolate());
return NewFunctionFromSharedFunctionInfo(initial_map, info, context,
feedback_cell, allocation);
@@ -2652,9 +2545,9 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
// Check that the optimized code in the feedback cell wasn't marked for
// deoptimization while not pointed to by any live JSFunction.
- if (feedback_cell->value()->IsFeedbackVector()) {
+ if (feedback_cell->value().IsFeedbackVector()) {
FeedbackVector::cast(feedback_cell->value())
- ->EvictOptimizedCodeMarkedForDeoptimization(
+ .EvictOptimizedCodeMarkedForDeoptimization(
*info, "new function from shared function info");
}
result->set_raw_feedback_cell(*feedback_cell);
@@ -2729,10 +2622,10 @@ Handle<JSObject> Factory::NewExternal(void* value) {
return external;
}
-Handle<CodeDataContainer> Factory::NewCodeDataContainer(int flags) {
+Handle<CodeDataContainer> Factory::NewCodeDataContainer(
+ int flags, AllocationType allocation) {
Handle<CodeDataContainer> data_container(
- CodeDataContainer::cast(
- New(code_data_container_map(), AllocationType::kOld)),
+ CodeDataContainer::cast(New(code_data_container_map(), allocation)),
isolate());
data_container->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
data_container->set_kind_specific_flags(flags);
@@ -2740,127 +2633,20 @@ Handle<CodeDataContainer> Factory::NewCodeDataContainer(int flags) {
return data_container;
}
-MaybeHandle<Code> Factory::TryNewCode(
- const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, MaybeHandle<ByteArray> maybe_source_position_table,
- MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
- bool is_turbofanned, int stack_slots) {
- // Allocate objects needed for code initialization.
- Handle<ByteArray> reloc_info =
- NewByteArray(desc.reloc_size, Builtins::IsBuiltinId(builtin_index)
- ? AllocationType::kReadOnly
- : AllocationType::kOld);
- Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
- Handle<ByteArray> source_position_table =
- maybe_source_position_table.is_null()
- ? empty_byte_array()
- : maybe_source_position_table.ToHandleChecked();
- Handle<DeoptimizationData> deopt_data =
- maybe_deopt_data.is_null() ? DeoptimizationData::Empty(isolate())
- : maybe_deopt_data.ToHandleChecked();
- Handle<Code> code;
- {
- int object_size = ComputeCodeObjectSize(desc);
-
- Heap* heap = isolate()->heap();
- CodePageCollectionMemoryModificationScope code_allocation(heap);
- HeapObject result =
- heap->AllocateRawWithLightRetry(object_size, AllocationType::kCode);
-
- // Return an empty handle if we cannot allocate the code object.
- if (result.is_null()) return MaybeHandle<Code>();
-
- if (movability == kImmovable) {
- result = heap->EnsureImmovableCode(result, object_size);
- }
-
- // The code object has not been fully initialized yet. We rely on the
- // fact that no allocation will happen from this point on.
- DisallowHeapAllocation no_gc;
-
- result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
- code = handle(Code::cast(result), isolate());
-
- InitializeCode(heap, code, object_size, desc, kind, self_ref, builtin_index,
- source_position_table, deopt_data, reloc_info,
- data_container, is_turbofanned, stack_slots);
-
- // Flush the instruction cache before changing the permissions.
- // Note: we do this before setting permissions to ReadExecute because on
- // some older ARM kernels there is a bug which causes an access error on
- // cache flush instructions to trigger access error on non-writable memory.
- // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
- code->FlushICache();
- }
-
- return code;
-}
-
-Handle<Code> Factory::NewCode(
- const CodeDesc& desc, Code::Kind kind, Handle<Object> self_ref,
- int32_t builtin_index, MaybeHandle<ByteArray> maybe_source_position_table,
- MaybeHandle<DeoptimizationData> maybe_deopt_data, Movability movability,
- bool is_turbofanned, int stack_slots) {
- // Allocate objects needed for code initialization.
- Handle<ByteArray> reloc_info =
- NewByteArray(desc.reloc_size, Builtins::IsBuiltinId(builtin_index)
- ? AllocationType::kReadOnly
- : AllocationType::kOld);
- Handle<CodeDataContainer> data_container = NewCodeDataContainer(0);
- Handle<ByteArray> source_position_table =
- maybe_source_position_table.is_null()
- ? empty_byte_array()
- : maybe_source_position_table.ToHandleChecked();
- Handle<DeoptimizationData> deopt_data =
- maybe_deopt_data.is_null() ? DeoptimizationData::Empty(isolate())
- : maybe_deopt_data.ToHandleChecked();
-
- Handle<Code> code;
- {
- int object_size = ComputeCodeObjectSize(desc);
-
- Heap* heap = isolate()->heap();
- CodePageCollectionMemoryModificationScope code_allocation(heap);
- HeapObject result =
- heap->AllocateRawWithRetryOrFail(object_size, AllocationType::kCode);
- if (movability == kImmovable) {
- result = heap->EnsureImmovableCode(result, object_size);
- }
-
- // The code object has not been fully initialized yet. We rely on the
- // fact that no allocation will happen from this point on.
- DisallowHeapAllocation no_gc;
-
- result->set_map_after_allocation(*code_map(), SKIP_WRITE_BARRIER);
- code = handle(Code::cast(result), isolate());
-
- InitializeCode(heap, code, object_size, desc, kind, self_ref, builtin_index,
- source_position_table, deopt_data, reloc_info,
- data_container, is_turbofanned, stack_slots);
-
- // Flush the instruction cache before changing the permissions.
- // Note: we do this before setting permissions to ReadExecute because on
- // some older ARM kernels there is a bug which causes an access error on
- // cache flush instructions to trigger access error on non-writable memory.
- // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
- code->FlushICache();
- }
-
- return code;
-}
-
Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Address off_heap_entry) {
CHECK_NOT_NULL(isolate()->embedded_blob());
CHECK_NE(0, isolate()->embedded_blob_size());
CHECK(Builtins::IsIsolateIndependentBuiltin(*code));
- Handle<Code> result =
- Builtins::GenerateOffHeapTrampolineFor(isolate(), off_heap_entry);
+ Handle<Code> result = Builtins::GenerateOffHeapTrampolineFor(
+ isolate(), off_heap_entry,
+ code->code_data_container().kind_specific_flags());
+ // The CodeDataContainer should not be modified beyond this point since it's
+ // now possibly canonicalized.
// The trampoline code object must inherit specific flags from the original
// builtin (e.g. the safepoint-table offset). We set them manually here.
-
{
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*result);
CodePageMemoryModificationScope code_allocation(chunk);
@@ -2868,8 +2654,6 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
const bool set_is_off_heap_trampoline = true;
const int stack_slots =
code->has_safepoint_info() ? code->stack_slots() : 0;
- result->code_data_container()->set_kind_specific_flags(
- code->code_data_container()->kind_specific_flags());
result->initialize_flags(code->kind(), code->has_unwinding_info(),
code->is_turbofanned(), stack_slots,
set_is_off_heap_trampoline);
@@ -2887,9 +2671,9 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
#ifdef DEBUG
// Verify that the contents are the same.
ByteArray reloc_info = result->relocation_info();
- DCHECK_EQ(reloc_info->length(), canonical_reloc_info->length());
- for (int i = 0; i < reloc_info->length(); ++i) {
- DCHECK_EQ(reloc_info->get(i), canonical_reloc_info->get(i));
+ DCHECK_EQ(reloc_info.length(), canonical_reloc_info.length());
+ for (int i = 0; i < reloc_info.length(); ++i) {
+ DCHECK_EQ(reloc_info.get(i), canonical_reloc_info.get(i));
}
#endif
result->set_relocation_info(canonical_reloc_info);
@@ -2899,8 +2683,8 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
}
Handle<Code> Factory::CopyCode(Handle<Code> code) {
- Handle<CodeDataContainer> data_container =
- NewCodeDataContainer(code->code_data_container()->kind_specific_flags());
+ Handle<CodeDataContainer> data_container = NewCodeDataContainer(
+ code->code_data_container().kind_specific_flags(), AllocationType::kOld);
Heap* heap = isolate()->heap();
Handle<Code> new_code;
@@ -2912,7 +2696,7 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
// Copy code object.
Address old_addr = code->address();
- Address new_addr = result->address();
+ Address new_addr = result.address();
Heap::CopyBlock(new_addr, old_addr, obj_size);
new_code = handle(Code::cast(result), isolate());
@@ -3075,7 +2859,7 @@ void Factory::InitializeJSObjectBody(Handle<JSObject> obj, Handle<Map> map,
}
obj->InitializeBody(*map, start_offset, *undefined_value(), filler);
if (in_progress) {
- map->FindRootMap(isolate())->InobjectSlackTrackingStep(isolate());
+ map->FindRootMap(isolate()).InobjectSlackTrackingStep(isolate());
}
}
@@ -3096,9 +2880,9 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
InitializeJSObjectFromMap(js_obj, empty_fixed_array(), map);
- DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
+ DCHECK(js_obj->HasFastElements() || js_obj->HasTypedArrayElements() ||
js_obj->HasFastStringWrapperElements() ||
- js_obj->HasFastArgumentsElements());
+ js_obj->HasFastArgumentsElements() || js_obj->HasDictionaryElements());
return js_obj;
}
@@ -3135,10 +2919,10 @@ Handle<JSObject> Factory::NewSlowJSObjectWithPropertiesAndElements(
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
AllocationType allocation) {
NativeContext native_context = isolate()->raw_native_context();
- Map map = native_context->GetInitialJSArrayMap(elements_kind);
+ Map map = native_context.GetInitialJSArrayMap(elements_kind);
if (map.is_null()) {
- JSFunction array_function = native_context->array_function();
- map = array_function->initial_map();
+ JSFunction array_function = native_context.array_function();
+ map = array_function.initial_map();
}
return Handle<JSArray>::cast(
NewJSObjectFromMap(handle(map, isolate()), allocation));
@@ -3202,7 +2986,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array, int length, int capacity,
Handle<JSWeakMap> Factory::NewJSWeakMap() {
NativeContext native_context = isolate()->raw_native_context();
- Handle<Map> map(native_context->js_weak_map_fun()->initial_map(), isolate());
+ Handle<Map> map(native_context.js_weak_map_fun().initial_map(), isolate());
Handle<JSWeakMap> weakmap(JSWeakMap::cast(*NewJSObjectFromMap(map)),
isolate());
{
@@ -3226,7 +3010,7 @@ Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
Handle<JSFunction> function) {
- DCHECK(IsResumableFunction(function->shared()->kind()));
+ DCHECK(IsResumableFunction(function->shared().kind()));
JSFunction::EnsureHasInitialMap(function);
Handle<Map> map(function->initial_map(), isolate());
@@ -3237,15 +3021,15 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
}
Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
- Handle<ModuleInfo> module_info(code->scope_info()->ModuleDescriptorInfo(),
+ Handle<ModuleInfo> module_info(code->scope_info().ModuleDescriptorInfo(),
isolate());
Handle<ObjectHashTable> exports =
ObjectHashTable::New(isolate(), module_info->RegularExportCount());
Handle<FixedArray> regular_exports =
NewFixedArray(module_info->RegularExportCount());
Handle<FixedArray> regular_imports =
- NewFixedArray(module_info->regular_imports()->length());
- int requested_modules_length = module_info->module_requests()->length();
+ NewFixedArray(module_info->regular_imports().length());
+ int requested_modules_length = module_info->module_requests().length();
Handle<FixedArray> requested_modules =
requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
: empty_fixed_array();
@@ -3336,9 +3120,8 @@ void Factory::TypeAndSizeForElementsKind(ElementsKind kind,
namespace {
-static void ForFixedTypedArray(ExternalArrayType array_type,
- size_t* element_size,
- ElementsKind* element_kind) {
+void ForFixedTypedArray(ExternalArrayType array_type, size_t* element_size,
+ ElementsKind* element_kind) {
switch (array_type) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case kExternal##Type##Array: \
@@ -3352,138 +3135,77 @@ static void ForFixedTypedArray(ExternalArrayType array_type,
UNREACHABLE();
}
-JSFunction GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
- NativeContext native_context = isolate->context()->native_context();
- switch (type) {
-#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
- case kExternal##Type##Array: \
- return native_context->type##_array_fun();
-
- TYPED_ARRAYS(TYPED_ARRAY_FUN)
-#undef TYPED_ARRAY_FUN
- }
- UNREACHABLE();
-}
-
-JSFunction GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
- NativeContext native_context = isolate->context()->native_context();
- switch (elements_kind) {
-#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- return native_context->type##_array_fun();
-
- TYPED_ARRAYS(TYPED_ARRAY_FUN)
-#undef TYPED_ARRAY_FUN
-
- default:
- UNREACHABLE();
- }
-}
+} // namespace
-void SetupArrayBufferView(i::Isolate* isolate,
- i::Handle<i::JSArrayBufferView> obj,
- i::Handle<i::JSArrayBuffer> buffer,
- size_t byte_offset, size_t byte_length) {
- DCHECK_LE(byte_offset + byte_length, buffer->byte_length());
- DCHECK_EQ(obj->GetEmbedderFieldCount(),
- v8::ArrayBufferView::kEmbedderFieldCount);
+Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
+ Handle<Map> map, Handle<FixedArrayBase> elements,
+ Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
+ AllocationType allocation) {
+ CHECK_LE(byte_length, buffer->byte_length());
+ CHECK_LE(byte_offset, buffer->byte_length());
+ CHECK_LE(byte_offset + byte_length, buffer->byte_length());
+ Handle<JSArrayBufferView> array_buffer_view =
+ Handle<JSArrayBufferView>::cast(NewJSObjectFromMap(map, allocation));
+ array_buffer_view->set_elements(*elements);
+ array_buffer_view->set_buffer(*buffer);
+ array_buffer_view->set_byte_offset(byte_offset);
+ array_buffer_view->set_byte_length(byte_length);
for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) {
- obj->SetEmbedderField(i, Smi::kZero);
+ array_buffer_view->SetEmbedderField(i, Smi::kZero);
}
- obj->set_buffer(*buffer);
- obj->set_byte_offset(byte_offset);
- obj->set_byte_length(byte_length);
-}
-
-} // namespace
-
-Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
- AllocationType allocation) {
- Handle<JSFunction> typed_array_fun(GetTypedArrayFun(type, isolate()),
- isolate());
- Handle<Map> map(typed_array_fun->initial_map(), isolate());
- return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, allocation));
-}
-
-Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
- AllocationType allocation) {
- Handle<JSFunction> typed_array_fun(GetTypedArrayFun(elements_kind, isolate()),
- isolate());
- Handle<Map> map(typed_array_fun->initial_map(), isolate());
- return Handle<JSTypedArray>::cast(NewJSObjectFromMap(map, allocation));
+ DCHECK_EQ(array_buffer_view->GetEmbedderFieldCount(),
+ v8::ArrayBufferView::kEmbedderFieldCount);
+ return array_buffer_view;
}
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSArrayBuffer> buffer,
size_t byte_offset, size_t length,
AllocationType allocation) {
- Handle<JSTypedArray> obj = NewJSTypedArray(type, allocation);
-
size_t element_size;
ElementsKind elements_kind;
ForFixedTypedArray(type, &element_size, &elements_kind);
-
- CHECK_EQ(byte_offset % element_size, 0);
-
- CHECK(length <= (std::numeric_limits<size_t>::max() / element_size));
- // TODO(7881): Smi length check
- CHECK(length <= static_cast<size_t>(Smi::kMaxValue));
size_t byte_length = length * element_size;
- SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length);
- Handle<Object> length_object = NewNumberFromSize(length, allocation);
- obj->set_length(*length_object);
+ CHECK_LE(length, JSTypedArray::kMaxLength);
+ CHECK_EQ(length, byte_length / element_size);
+ CHECK_EQ(0, byte_offset % ElementsKindToByteSize(elements_kind));
- Handle<FixedTypedArrayBase> elements = NewFixedTypedArrayWithExternalPointer(
- static_cast<int>(length), type,
- static_cast<uint8_t*>(buffer->backing_store()) + byte_offset, allocation);
- Handle<Map> map = JSObject::GetElementsTransitionMap(obj, elements_kind);
- JSObject::SetMapAndElements(obj, map, elements);
- return obj;
-}
+ Handle<Map> map;
+ switch (elements_kind) {
+#define TYPED_ARRAY_FUN(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ map = \
+ handle(isolate()->native_context()->type##_array_fun().initial_map(), \
+ isolate()); \
+ break;
-Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
- size_t number_of_elements,
- AllocationType allocation) {
- Handle<JSTypedArray> obj = NewJSTypedArray(elements_kind, allocation);
- DCHECK_EQ(obj->GetEmbedderFieldCount(),
- v8::ArrayBufferView::kEmbedderFieldCount);
- for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) {
- obj->SetEmbedderField(i, Smi::kZero);
- }
+ TYPED_ARRAYS(TYPED_ARRAY_FUN)
+#undef TYPED_ARRAY_FUN
- size_t element_size;
- ExternalArrayType array_type;
- TypeAndSizeForElementsKind(elements_kind, &array_type, &element_size);
-
- CHECK(number_of_elements <=
- (std::numeric_limits<size_t>::max() / element_size));
- // TODO(7881): Smi length check
- CHECK(number_of_elements <= static_cast<size_t>(Smi::kMaxValue));
- size_t byte_length = number_of_elements * element_size;
-
- obj->set_byte_offset(0);
- obj->set_byte_length(byte_length);
- obj->set_length(Smi::FromIntptr(static_cast<intptr_t>(number_of_elements)));
-
- Handle<JSArrayBuffer> buffer =
- NewJSArrayBuffer(SharedFlag::kNotShared, allocation);
- JSArrayBuffer::Setup(buffer, isolate(), true, nullptr, byte_length,
- SharedFlag::kNotShared);
- obj->set_buffer(*buffer);
- Handle<FixedTypedArrayBase> elements = NewFixedTypedArray(
- number_of_elements, byte_length, array_type, true, allocation);
- obj->set_elements(*elements);
- return obj;
+ default:
+ UNREACHABLE();
+ }
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(
+ NewJSArrayBufferView(map, empty_byte_array(), buffer, byte_offset,
+ byte_length, allocation));
+ typed_array->set_length(length);
+ typed_array->set_external_pointer(
+ reinterpret_cast<byte*>(buffer->backing_store()) + byte_offset);
+ typed_array->set_base_pointer(Smi::kZero);
+ return typed_array;
}
Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
size_t byte_offset,
- size_t byte_length) {
- Handle<Map> map(isolate()->native_context()->data_view_fun()->initial_map(),
+ size_t byte_length,
+ AllocationType allocation) {
+ Handle<Map> map(isolate()->native_context()->data_view_fun().initial_map(),
isolate());
- Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSObjectFromMap(map));
- SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length);
+ Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
+ map, empty_fixed_array(), buffer, byte_offset, byte_length, allocation));
+ obj->set_data_pointer(static_cast<uint8_t*>(buffer->backing_store()) +
+ byte_offset);
return obj;
}
@@ -3549,7 +3271,7 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
} else {
map = Handle<Map>(isolate()->proxy_map());
}
- DCHECK(map->prototype()->IsNull(isolate()));
+ DCHECK(map->prototype().IsNull(isolate()));
Handle<JSProxy> result(JSProxy::cast(New(map, AllocationType::kYoung)),
isolate());
result->initialize_properties();
@@ -3617,13 +3339,14 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "SharedFunctionInfo",
TRACE_ID_WITH_SCOPE(SharedFunctionInfo::kTraceScope, shared->TraceID()),
- shared->ToTracedValue());
+ shared->ToTracedValue(literal));
return shared;
}
Handle<JSMessageObject> Factory::NewJSMessageObject(
MessageTemplate message, Handle<Object> argument, int start_position,
- int end_position, Handle<Script> script, Handle<Object> stack_frames) {
+ int end_position, Handle<SharedFunctionInfo> shared_info,
+ int bytecode_offset, Handle<Script> script, Handle<Object> stack_frames) {
Handle<Map> map = message_object_map();
Handle<JSMessageObject> message_obj(
JSMessageObject::cast(New(map, AllocationType::kYoung)), isolate());
@@ -3636,6 +3359,23 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
message_obj->set_start_position(start_position);
message_obj->set_end_position(end_position);
message_obj->set_script(*script);
+ if (start_position >= 0) {
+ // If there's a start_position, then there's no need to store the
+ // SharedFunctionInfo as it will never be necessary to regenerate the
+ // position.
+ message_obj->set_shared_info(*undefined_value());
+ message_obj->set_bytecode_offset(Smi::FromInt(0));
+ } else {
+ message_obj->set_bytecode_offset(Smi::FromInt(bytecode_offset));
+ if (shared_info.is_null()) {
+ message_obj->set_shared_info(*undefined_value());
+ DCHECK_EQ(bytecode_offset, -1);
+ } else {
+ message_obj->set_shared_info(*shared_info);
+ DCHECK_GE(bytecode_offset, 0);
+ }
+ }
+
message_obj->set_stack_frames(*stack_frames);
message_obj->set_error_level(v8::Isolate::kMessageError);
return message_obj;
@@ -3649,6 +3389,12 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForApiFunction(
return shared;
}
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForWasmCapiFunction(
+ Handle<WasmCapiFunctionData> data) {
+ return NewSharedFunctionInfo(MaybeHandle<String>(), data,
+ Builtins::kNoBuiltinId, kConciseMethod);
+}
+
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForBuiltin(
MaybeHandle<String> maybe_name, int builtin_index, FunctionKind kind) {
Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
@@ -3683,7 +3429,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
// the function_data should not be code with a builtin.
DCHECK(!Builtins::IsBuiltinId(maybe_builtin_index));
DCHECK_IMPLIES(function_data->IsCode(),
- !Code::cast(*function_data)->is_builtin());
+ !Code::cast(*function_data).is_builtin());
share->set_function_data(*function_data);
} else if (Builtins::IsBuiltinId(maybe_builtin_index)) {
share->set_builtin_id(maybe_builtin_index);
@@ -3702,8 +3448,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
}
share->set_script_or_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
#if V8_SFI_HAS_UNIQUE_ID
- Handle<SharedFunctionInfoWithID>::cast(share)->set_unique_id(
- isolate()->GetNextUniqueSharedFunctionInfoId());
+ share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
// Set integer fields (smi or int, depending on the architecture).
@@ -3713,12 +3458,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_raw_function_token_offset(0);
// All flags default to false or 0.
share->set_flags(0);
- // For lite mode disable optimization.
- if (FLAG_lite_mode) {
- share->set_flags(
- SharedFunctionInfo::DisabledOptimizationReasonBits::encode(
- BailoutReason::kNeverOptimize));
- }
share->CalculateConstructAsBuiltin();
share->set_kind(kind);
@@ -3739,7 +3478,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
namespace {
inline int NumberToStringCacheHash(Handle<FixedArray> cache, Smi number) {
int mask = (cache->length() >> 1) - 1;
- return number->value() & mask;
+ return number.value() & mask;
}
inline int NumberToStringCacheHash(Handle<FixedArray> cache, double number) {
int mask = (cache->length() >> 1) - 1;
@@ -3757,7 +3496,7 @@ Handle<String> Factory::NumberToStringCacheSet(Handle<Object> number, int hash,
string, check_cache ? AllocationType::kOld : AllocationType::kYoung);
if (!check_cache) return js_string;
- if (!number_string_cache()->get(hash * 2)->IsUndefined(isolate())) {
+ if (!number_string_cache()->get(hash * 2).IsUndefined(isolate())) {
int full_size = isolate()->heap()->MaxNumberToStringCacheSize();
if (number_string_cache()->length() != full_size) {
Handle<FixedArray> new_cache =
@@ -3774,8 +3513,8 @@ Handle<String> Factory::NumberToStringCacheSet(Handle<Object> number, int hash,
Handle<Object> Factory::NumberToStringCacheGet(Object number, int hash) {
DisallowHeapAllocation no_gc;
Object key = number_string_cache()->get(hash * 2);
- if (key == number || (key->IsHeapNumber() && number->IsHeapNumber() &&
- key->Number() == number->Number())) {
+ if (key == number || (key.IsHeapNumber() && number.IsHeapNumber() &&
+ key.Number() == number.Number())) {
return Handle<String>(
String::cast(number_string_cache()->get(hash * 2 + 1)), isolate());
}
@@ -3817,7 +3556,7 @@ Handle<String> Factory::NumberToString(Smi number, bool check_cache) {
char arr[100];
Vector<char> buffer(arr, arraysize(arr));
- const char* string = IntToCString(number->value(), buffer);
+ const char* string = IntToCString(number.value(), buffer);
return NumberToStringCacheSet(handle(number, isolate()), hash, string,
check_cache);
@@ -3906,9 +3645,10 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
stack_frame_info->set_line_number(0);
stack_frame_info->set_column_number(0);
stack_frame_info->set_script_id(0);
- stack_frame_info->set_script_name(Smi::kZero);
- stack_frame_info->set_script_name_or_source_url(Smi::kZero);
- stack_frame_info->set_function_name(Smi::kZero);
+ stack_frame_info->set_promise_all_index(-1);
+ stack_frame_info->set_script_name(*null_value());
+ stack_frame_info->set_script_name_or_source_url(*null_value());
+ stack_frame_info->set_function_name(*null_value());
stack_frame_info->set_flag(0);
return stack_frame_info;
}
@@ -3918,8 +3658,7 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
FrameArrayIterator it(isolate(), frame_array, index);
DCHECK(it.HasFrame());
- Handle<StackFrameInfo> info = Handle<StackFrameInfo>::cast(
- NewStruct(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
+ Handle<StackFrameInfo> info = NewStackFrameInfo();
info->set_flag(0);
const bool is_wasm = frame_array->IsAnyWasmFrame(index);
@@ -3947,13 +3686,21 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
if (!is_wasm) {
Handle<Object> function = it.Frame()->GetFunction();
if (function->IsJSFunction()) {
- function_name =
- JSFunction::GetDebugName(Handle<JSFunction>::cast(function));
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(function);
+ function_name = JSFunction::GetDebugName(fun);
+
+ const bool is_user_java_script = fun->shared().IsUserJavaScript();
+ info->set_is_user_java_script(is_user_java_script);
}
}
info->set_function_name(*function_name);
+ info->set_wasm_module_name(*it.Frame()->GetWasmModuleName());
info->set_is_eval(it.Frame()->IsEval());
info->set_is_constructor(it.Frame()->IsConstructor());
+ info->set_is_toplevel(it.Frame()->IsToplevel());
+ info->set_is_async(it.Frame()->IsAsync());
+ info->set_is_promise_all(it.Frame()->IsPromiseAll());
+ info->set_promise_all_index(it.Frame()->GetPromiseIndex());
return info;
}
@@ -3965,7 +3712,8 @@ Factory::NewSourcePositionTableWithFrameCache(
Handle<SourcePositionTableWithFrameCache>
source_position_table_with_frame_cache =
Handle<SourcePositionTableWithFrameCache>::cast(
- NewStruct(TUPLE2_TYPE, AllocationType::kOld));
+ NewStruct(SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE,
+ AllocationType::kOld));
source_position_table_with_frame_cache->set_source_position_table(
*source_position_table);
source_position_table_with_frame_cache->set_stack_frame_cache(
@@ -3975,8 +3723,8 @@ Factory::NewSourcePositionTableWithFrameCache(
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
- bool strict_mode_callee = is_strict(callee->shared()->language_mode()) ||
- !callee->shared()->has_simple_parameters();
+ bool strict_mode_callee = is_strict(callee->shared().language_mode()) ||
+ !callee->shared().has_simple_parameters();
Handle<Map> map = strict_mode_callee ? isolate()->strict_arguments_map()
: isolate()->sloppy_arguments_map();
AllocationSiteUsageContext context(isolate(), Handle<AllocationSite>(),
@@ -4002,12 +3750,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
if (number_of_properties == 0) {
// Reuse the initial map of the Object function if the literal has no
// predeclared properties.
- return handle(context->object_function()->initial_map(), isolate());
- }
-
- // We do not cache maps for too many properties or when running builtin code.
- if (isolate()->bootstrapper()->IsActive()) {
- return Map::Create(isolate(), number_of_properties);
+ return handle(context->object_function().initial_map(), isolate());
}
// Use initial slow object proto map for too many properties.
@@ -4029,7 +3772,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
HeapObject heap_object;
if (result->GetHeapObjectIfWeak(&heap_object)) {
Map map = Map::cast(heap_object);
- DCHECK(!map->is_dictionary_map());
+ DCHECK(!map.is_dictionary_map());
return handle(map, isolate());
}
}
@@ -4056,7 +3799,6 @@ Handle<LoadHandler> Factory::NewLoadHandler(int data_count) {
break;
default:
UNREACHABLE();
- break;
}
return handle(LoadHandler::cast(New(map, AllocationType::kOld)), isolate());
}
@@ -4078,7 +3820,6 @@ Handle<StoreHandler> Factory::NewStoreHandler(int data_count) {
break;
default:
UNREACHABLE();
- break;
}
return handle(StoreHandler::cast(New(map, AllocationType::kOld)), isolate());
}
@@ -4381,7 +4122,21 @@ NewFunctionArgs NewFunctionArgs::ForWasm(
NewFunctionArgs args;
args.name_ = name;
args.maybe_map_ = map;
- args.maybe_exported_function_data_ = exported_function_data;
+ args.maybe_wasm_function_data_ = exported_function_data;
+ args.language_mode_ = LanguageMode::kSloppy;
+ args.prototype_mutability_ = MUTABLE;
+
+ return args;
+}
+
+// static
+NewFunctionArgs NewFunctionArgs::ForWasm(
+ Handle<String> name, Handle<WasmJSFunctionData> js_function_data,
+ Handle<Map> map) {
+ NewFunctionArgs args;
+ args.name_ = name;
+ args.maybe_map_ = map;
+ args.maybe_wasm_function_data_ = js_function_data;
args.language_mode_ = LanguageMode::kSloppy;
args.prototype_mutability_ = MUTABLE;
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 94646517a0..5af2529021 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -8,14 +8,14 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
#include "src/builtins/builtins.h"
-#include "src/function-kind.h"
-#include "src/globals.h"
-#include "src/handles.h"
+#include "src/common/globals.h"
+#include "src/execution/messages.h"
+#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
#include "src/heap/heap.h"
-#include "src/maybe-handles.h"
-#include "src/messages.h"
#include "src/objects/code.h"
#include "src/objects/dictionary.h"
+#include "src/objects/function-kind.h"
#include "src/objects/js-array.h"
#include "src/objects/js-regexp.h"
#include "src/objects/string.h"
@@ -40,6 +40,7 @@ class EnumCache;
class FinalizationGroupCleanupJobTask;
class FreshlyAllocatedBigInt;
class Isolate;
+class JSArrayBufferView;
class JSDataView;
class JSGeneratorObject;
class JSMap;
@@ -65,7 +66,9 @@ class StoreHandler;
class TemplateObjectDescription;
class UncompiledDataWithoutPreparseData;
class UncompiledDataWithPreparseData;
+class WasmCapiFunctionData;
class WasmExportedFunctionData;
+class WasmJSFunctionData;
class WeakCell;
struct SourceRange;
template <typename T>
@@ -236,16 +239,19 @@ class V8_EXPORT_PRIVATE Factory {
// Finds the internalized copy for string in the string table.
// If not found, a new string is added to the table and returned.
- Handle<String> InternalizeUtf8String(Vector<const char> str);
+ Handle<String> InternalizeUtf8String(const Vector<const char>& str);
Handle<String> InternalizeUtf8String(const char* str) {
return InternalizeUtf8String(CStrVector(str));
}
- Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
- Handle<String> InternalizeOneByteString(Handle<SeqOneByteString>, int from,
- int length);
+ template <typename Char>
+ EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Handle<String> InternalizeString(const Vector<const Char>& str,
+ bool convert_encoding = false);
- Handle<String> InternalizeTwoByteString(Vector<const uc16> str);
+ template <typename SeqString>
+ Handle<String> InternalizeString(Handle<SeqString>, int from, int length,
+ bool convert_encoding = false);
template <class StringTableKey>
Handle<String> InternalizeStringWithKey(StringTableKey* key);
@@ -276,14 +282,14 @@ class V8_EXPORT_PRIVATE Factory {
//
// One-byte strings are pretenured when used as keys in the SourceCodeCache.
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromOneByte(
- Vector<const uint8_t> str,
+ const Vector<const uint8_t>& str,
AllocationType allocation = AllocationType::kYoung);
template <size_t N>
inline Handle<String> NewStringFromStaticChars(
const char (&str)[N],
AllocationType allocation = AllocationType::kYoung) {
- DCHECK(N == StrLength(str) + 1);
+ DCHECK_EQ(N, strlen(str) + 1);
return NewStringFromOneByte(StaticCharVector(str), allocation)
.ToHandleChecked();
}
@@ -297,7 +303,7 @@ class V8_EXPORT_PRIVATE Factory {
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromUtf8(
- Vector<const char> str,
+ const Vector<const char>& str,
AllocationType allocation = AllocationType::kYoung);
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromUtf8SubString(
@@ -305,7 +311,7 @@ class V8_EXPORT_PRIVATE Factory {
AllocationType allocation = AllocationType::kYoung);
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromTwoByte(
- Vector<const uc16> str,
+ const Vector<const uc16>& str,
AllocationType allocation = AllocationType::kYoung);
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewStringFromTwoByte(
@@ -314,21 +320,18 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSStringIterator> NewJSStringIterator(Handle<String> string);
- // Allocates an internalized string in old space based on the character
- // stream.
- Handle<String> NewInternalizedStringFromUtf8(Vector<const char> str,
- int chars, uint32_t hash_field);
-
- Handle<String> NewOneByteInternalizedString(Vector<const uint8_t> str,
+ Handle<String> NewOneByteInternalizedString(const Vector<const uint8_t>& str,
uint32_t hash_field);
- Handle<String> NewOneByteInternalizedSubString(
- Handle<SeqOneByteString> string, int offset, int length,
- uint32_t hash_field);
+ Handle<SeqOneByteString> AllocateRawOneByteInternalizedString(
+ int length, uint32_t hash_field);
- Handle<String> NewTwoByteInternalizedString(Vector<const uc16> str,
+ Handle<String> NewTwoByteInternalizedString(const Vector<const uc16>& str,
uint32_t hash_field);
+ Handle<SeqTwoByteString> AllocateRawTwoByteInternalizedString(
+ int length, uint32_t hash_field);
+
Handle<String> NewInternalizedStringImpl(Handle<String> string, int chars,
uint32_t hash_field);
@@ -352,7 +355,7 @@ class V8_EXPORT_PRIVATE Factory {
// Creates a single character string where the character has given code.
// A cache is used for Latin1 codes.
- Handle<String> LookupSingleCharacterStringFromCode(uint32_t code);
+ Handle<String> LookupSingleCharacterStringFromCode(uint16_t code);
// Create a new cons string object which consists of a pair of strings.
V8_WARN_UNUSED_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
@@ -488,14 +491,6 @@ class V8_EXPORT_PRIVATE Factory {
int frame_size, int parameter_count,
Handle<FixedArray> constant_pool);
- Handle<FixedTypedArrayBase> NewFixedTypedArrayWithExternalPointer(
- int length, ExternalArrayType array_type, void* external_pointer,
- AllocationType allocation = AllocationType::kYoung);
-
- Handle<FixedTypedArrayBase> NewFixedTypedArray(
- size_t length, size_t byte_length, ExternalArrayType array_type,
- bool initialize, AllocationType allocation = AllocationType::kYoung);
-
Handle<Cell> NewCell(Handle<Object> value);
Handle<PropertyCell> NewPropertyCell(
@@ -571,8 +566,6 @@ class V8_EXPORT_PRIVATE Factory {
Handle<FixedDoubleArray> CopyFixedDoubleArray(Handle<FixedDoubleArray> array);
- Handle<FeedbackVector> CopyFeedbackVector(Handle<FeedbackVector> array);
-
// Numbers (e.g. literals) are pretenured by the parser.
// The return value may be a smi or a heap number.
Handle<Object> NewNumber(double value,
@@ -696,27 +689,15 @@ class V8_EXPORT_PRIVATE Factory {
ExternalArrayType* array_type,
size_t* element_size);
- Handle<JSTypedArray> NewJSTypedArray(
- ExternalArrayType type,
- AllocationType allocation = AllocationType::kYoung);
-
- Handle<JSTypedArray> NewJSTypedArray(
- ElementsKind elements_kind,
- AllocationType allocation = AllocationType::kYoung);
-
// Creates a new JSTypedArray with the specified buffer.
Handle<JSTypedArray> NewJSTypedArray(
ExternalArrayType type, Handle<JSArrayBuffer> buffer, size_t byte_offset,
size_t length, AllocationType allocation = AllocationType::kYoung);
- // Creates a new on-heap JSTypedArray.
- Handle<JSTypedArray> NewJSTypedArray(
- ElementsKind elements_kind, size_t number_of_elements,
+ Handle<JSDataView> NewJSDataView(
+ Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
AllocationType allocation = AllocationType::kYoung);
- Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
- size_t byte_offset, size_t byte_length);
-
Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator(
Handle<JSReceiver> sync_iterator, Handle<Object> next);
@@ -797,35 +778,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<JSObject> NewExternal(void* value);
// Creates a new CodeDataContainer for a Code object.
- Handle<CodeDataContainer> NewCodeDataContainer(int flags);
-
- // Allocates a new code object (fully initialized). All header fields of the
- // returned object are immutable and the code object is write protected.
- // The reference to the Code object is stored in self_reference.
- // This allows generated code to reference its own Code object
- // by containing this handle.
- Handle<Code> NewCode(const CodeDesc& desc, Code::Kind kind,
- Handle<Object> self_reference,
- int32_t builtin_index = Builtins::kNoBuiltinId,
- MaybeHandle<ByteArray> maybe_source_position_table =
- MaybeHandle<ByteArray>(),
- MaybeHandle<DeoptimizationData> maybe_deopt_data =
- MaybeHandle<DeoptimizationData>(),
- Movability movability = kMovable,
- bool is_turbofanned = false, int stack_slots = 0);
-
- // Like NewCode, this function allocates a new code object (fully
- // initialized). It may return an empty handle if the allocation does not
- // succeed.
- V8_WARN_UNUSED_RESULT MaybeHandle<Code> TryNewCode(
- const CodeDesc& desc, Code::Kind kind, Handle<Object> self_reference,
- int32_t builtin_index = Builtins::kNoBuiltinId,
- MaybeHandle<ByteArray> maybe_source_position_table =
- MaybeHandle<ByteArray>(),
- MaybeHandle<DeoptimizationData> maybe_deopt_data =
- MaybeHandle<DeoptimizationData>(),
- Movability movability = kMovable, bool is_turbofanned = false,
- int stack_slots = 0);
+ Handle<CodeDataContainer> NewCodeDataContainer(int flags,
+ AllocationType allocation);
// Allocates a new code object and initializes it as the trampoline to the
// given off-heap entry point.
@@ -880,6 +834,9 @@ class V8_EXPORT_PRIVATE Factory {
MaybeHandle<String> maybe_name,
Handle<FunctionTemplateInfo> function_template_info, FunctionKind kind);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmCapiFunction(
+ Handle<WasmCapiFunctionData> data);
+
Handle<SharedFunctionInfo> NewSharedFunctionInfoForBuiltin(
MaybeHandle<String> name, int builtin_index,
FunctionKind kind = kNormalFunction);
@@ -914,7 +871,8 @@ class V8_EXPORT_PRIVATE Factory {
// Allocates a new JSMessageObject object.
Handle<JSMessageObject> NewJSMessageObject(
MessageTemplate message, Handle<Object> argument, int start_position,
- int end_position, Handle<Script> script, Handle<Object> stack_frames);
+ int end_position, Handle<SharedFunctionInfo> shared_info,
+ int bytecode_offset, Handle<Script> script, Handle<Object> stack_frames);
Handle<ClassPositions> NewClassPositions(int start, int end);
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
@@ -965,6 +923,89 @@ class V8_EXPORT_PRIVATE Factory {
return New(map, allocation);
}
+ // Allows creation of Code objects. It provides two build methods, one of
+ // which tries to gracefully handle allocation failure.
+ class V8_EXPORT_PRIVATE CodeBuilder final {
+ public:
+ CodeBuilder(Isolate* isolate, const CodeDesc& desc, Code::Kind kind);
+
+ // Builds a new code object (fully initialized). All header fields of the
+ // returned object are immutable and the code object is write protected.
+ V8_WARN_UNUSED_RESULT Handle<Code> Build();
+ // Like Build, builds a new code object. May return an empty handle if the
+ // allocation fails.
+ V8_WARN_UNUSED_RESULT MaybeHandle<Code> TryBuild();
+
+ // Sets the self-reference object in which a reference to the code object is
+ // stored. This allows generated code to reference its own Code object by
+ // using this handle.
+ CodeBuilder& set_self_reference(Handle<Object> self_reference) {
+ DCHECK(!self_reference.is_null());
+ self_reference_ = self_reference;
+ return *this;
+ }
+
+ CodeBuilder& set_builtin_index(int32_t builtin_index) {
+ builtin_index_ = builtin_index;
+ return *this;
+ }
+
+ CodeBuilder& set_source_position_table(Handle<ByteArray> table) {
+ DCHECK(!table.is_null());
+ source_position_table_ = table;
+ return *this;
+ }
+
+ CodeBuilder& set_deoptimization_data(
+ Handle<DeoptimizationData> deopt_data) {
+ DCHECK(!deopt_data.is_null());
+ deoptimization_data_ = deopt_data;
+ return *this;
+ }
+
+ CodeBuilder& set_immovable() {
+ is_movable_ = false;
+ return *this;
+ }
+
+ CodeBuilder& set_is_turbofanned() {
+ is_turbofanned_ = true;
+ return *this;
+ }
+
+ // Indicates the CodeDataContainer should be allocated in read-only space.
+ // As an optimization, if the kind-specific flags match that of a canonical
+ // container, it will be used instead.
+ CodeBuilder& set_read_only_data_container(int32_t flags) {
+ read_only_data_container_ = true;
+ kind_specific_flags_ = flags;
+ return *this;
+ }
+
+ CodeBuilder& set_stack_slots(int stack_slots) {
+ stack_slots_ = stack_slots;
+ return *this;
+ }
+
+ private:
+ MaybeHandle<Code> BuildInternal(bool retry_allocation_or_fail);
+
+ Isolate* const isolate_;
+ const CodeDesc& code_desc_;
+ const Code::Kind kind_;
+
+ MaybeHandle<Object> self_reference_;
+ int32_t builtin_index_ = Builtins::kNoBuiltinId;
+ int32_t kind_specific_flags_ = 0;
+ Handle<ByteArray> source_position_table_;
+ Handle<DeoptimizationData> deoptimization_data_ =
+ DeoptimizationData::Empty(isolate_);
+ bool read_only_data_container_ = false;
+ bool is_movable_ = true;
+ bool is_turbofanned_ = false;
+ int stack_slots_ = 0;
+ };
+
private:
Isolate* isolate() {
// Downcast to the privately inherited sub-class using c-style casts to
@@ -981,6 +1022,11 @@ class V8_EXPORT_PRIVATE Factory {
Handle<Map> map, AllocationType allocation,
Handle<AllocationSite> allocation_site);
+ Handle<JSArrayBufferView> NewJSArrayBufferView(
+ Handle<Map> map, Handle<FixedArrayBase> elements,
+ Handle<JSArrayBuffer> buffer, size_t byte_offset, size_t byte_length,
+ AllocationType allocation);
+
// Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
HeapObject AllocateRawArray(int size, AllocationType allocation);
HeapObject AllocateRawFixedArray(int length, AllocationType allocation);
@@ -1014,11 +1060,8 @@ class V8_EXPORT_PRIVATE Factory {
Handle<String> AllocateInternalizedStringImpl(T t, int chars,
uint32_t hash_field);
- Handle<SeqOneByteString> AllocateRawOneByteInternalizedString(
- int length, uint32_t hash_field);
-
- Handle<String> AllocateTwoByteInternalizedString(Vector<const uc16> str,
- uint32_t hash_field);
+ Handle<String> AllocateTwoByteInternalizedString(
+ const Vector<const uc16>& str, uint32_t hash_field);
MaybeHandle<String> NewStringFromTwoByte(const uc16* string, int length,
AllocationType allocation);
@@ -1057,6 +1100,9 @@ class NewFunctionArgs final {
static NewFunctionArgs ForWasm(
Handle<String> name,
Handle<WasmExportedFunctionData> exported_function_data, Handle<Map> map);
+ static NewFunctionArgs ForWasm(Handle<String> name,
+ Handle<WasmJSFunctionData> js_function_data,
+ Handle<Map> map);
V8_EXPORT_PRIVATE static NewFunctionArgs ForBuiltin(Handle<String> name,
Handle<Map> map,
int builtin_id);
@@ -1085,7 +1131,7 @@ class NewFunctionArgs final {
Handle<String> name_;
MaybeHandle<Map> maybe_map_;
- MaybeHandle<WasmExportedFunctionData> maybe_exported_function_data_;
+ MaybeHandle<Struct> maybe_wasm_function_data_;
bool should_create_and_set_initial_map_ = false;
InstanceType type_;
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index a3bf1aa167..7b1defb935 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -4,9 +4,9 @@
#include "src/heap/gc-idle-time-handler.h"
-#include "src/flags.h"
+#include "src/flags/flags.h"
#include "src/heap/gc-tracer.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -20,7 +20,7 @@ const size_t GCIdleTimeHandler::kMinTimeForOverApproximatingWeakClosureInMs = 1;
void GCIdleTimeHeapState::Print() {
PrintF("contexts_disposed=%d ", contexts_disposed);
PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
- PrintF("size_of_objects=%" PRIuS " ", size_of_objects);
+ PrintF("size_of_objects=%zu ", size_of_objects);
PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
}
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 5781f44d87..a7ce5dafc7 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_GC_IDLE_TIME_HANDLER_H_
#define V8_HEAP_GC_IDLE_TIME_HANDLER_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 4afc012c62..fab663d767 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -7,11 +7,11 @@
#include <cstdarg>
#include "src/base/atomic-utils.h"
-#include "src/counters-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
-#include "src/isolate.h"
+#include "src/logging/counters-inl.h"
namespace v8 {
namespace internal {
@@ -191,6 +191,7 @@ void GCTracer::ResetForTesting() {
recorded_incremental_mark_compacts_.Reset();
recorded_new_generation_allocations_.Reset();
recorded_old_generation_allocations_.Reset();
+ recorded_embedder_generation_allocations_.Reset();
recorded_context_disposal_times_.Reset();
recorded_survival_ratios_.Reset();
start_counter_ = 0;
@@ -221,7 +222,8 @@ void GCTracer::Start(GarbageCollector collector,
previous_ = current_;
double start_time = heap_->MonotonicallyIncreasingTimeInMs();
SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
- heap_->OldGenerationAllocationCounter());
+ heap_->OldGenerationAllocationCounter(),
+ heap_->EmbedderAllocationCounter());
switch (collector) {
case SCAVENGER:
@@ -363,17 +365,28 @@ void GCTracer::Stop(GarbageCollector collector) {
if (FLAG_trace_gc) {
heap_->PrintShortHeapStatistics();
}
-}
+ if (V8_UNLIKELY(TracingFlags::gc.load(std::memory_order_relaxed) &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ std::stringstream heap_stats;
+ heap_->DumpJSONHeapStatistics(heap_stats);
+
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "V8.GC_Heap_Stats",
+ TRACE_EVENT_SCOPE_THREAD, "stats",
+ TRACE_STR_COPY(heap_stats.str().c_str()));
+ }
+}
void GCTracer::SampleAllocation(double current_ms,
size_t new_space_counter_bytes,
- size_t old_generation_counter_bytes) {
+ size_t old_generation_counter_bytes,
+ size_t embedder_counter_bytes) {
if (allocation_time_ms_ == 0) {
// It is the first sample.
allocation_time_ms_ = current_ms;
new_space_allocation_counter_bytes_ = new_space_counter_bytes;
old_generation_allocation_counter_bytes_ = old_generation_counter_bytes;
+ embedder_allocation_counter_bytes_ = embedder_counter_bytes;
return;
}
// This assumes that counters are unsigned integers so that the subtraction
@@ -382,17 +395,20 @@ void GCTracer::SampleAllocation(double current_ms,
new_space_counter_bytes - new_space_allocation_counter_bytes_;
size_t old_generation_allocated_bytes =
old_generation_counter_bytes - old_generation_allocation_counter_bytes_;
+ size_t embedder_allocated_bytes =
+ embedder_counter_bytes - embedder_allocation_counter_bytes_;
double duration = current_ms - allocation_time_ms_;
allocation_time_ms_ = current_ms;
new_space_allocation_counter_bytes_ = new_space_counter_bytes;
old_generation_allocation_counter_bytes_ = old_generation_counter_bytes;
+ embedder_allocation_counter_bytes_ = embedder_counter_bytes;
allocation_duration_since_gc_ += duration;
new_space_allocation_in_bytes_since_gc_ += new_space_allocated_bytes;
old_generation_allocation_in_bytes_since_gc_ +=
old_generation_allocated_bytes;
+ embedder_allocation_in_bytes_since_gc_ += embedder_allocated_bytes;
}
-
void GCTracer::AddAllocation(double current_ms) {
allocation_time_ms_ = current_ms;
if (allocation_duration_since_gc_ > 0) {
@@ -402,10 +418,13 @@ void GCTracer::AddAllocation(double current_ms) {
recorded_old_generation_allocations_.Push(
MakeBytesAndDuration(old_generation_allocation_in_bytes_since_gc_,
allocation_duration_since_gc_));
+ recorded_embedder_generation_allocations_.Push(MakeBytesAndDuration(
+ embedder_allocation_in_bytes_since_gc_, allocation_duration_since_gc_));
}
allocation_duration_since_gc_ = 0;
new_space_allocation_in_bytes_since_gc_ = 0;
old_generation_allocation_in_bytes_since_gc_ = 0;
+ embedder_allocation_in_bytes_since_gc_ = 0;
}
@@ -447,7 +466,7 @@ void GCTracer::Output(const char* format, ...) const {
VSNPrintF(buffer, format, arguments2);
va_end(arguments2);
- heap_->AddToRingBuffer(buffer.start());
+ heap_->AddToRingBuffer(buffer.begin());
}
void GCTracer::Print() const {
@@ -530,20 +549,13 @@ void GCTracer::PrintNVP() const {
"incremental.steps_count=%d "
"incremental.steps_took=%.1f "
"scavenge_throughput=%.f "
- "total_size_before=%" PRIuS
- " "
- "total_size_after=%" PRIuS
- " "
- "holes_size_before=%" PRIuS
- " "
- "holes_size_after=%" PRIuS
- " "
- "allocated=%" PRIuS
- " "
- "promoted=%" PRIuS
- " "
- "semi_space_copied=%" PRIuS
- " "
+ "total_size_before=%zu "
+ "total_size_after=%zu "
+ "holes_size_before=%zu "
+ "holes_size_after=%zu "
+ "allocated=%zu "
+ "promoted=%zu "
+ "semi_space_copied=%zu "
"nodes_died_in_new=%d "
"nodes_copied_in_new=%d "
"nodes_promoted=%d "
@@ -726,20 +738,13 @@ void GCTracer::PrintNVP() const {
"background.array_buffer_free=%.2f "
"background.store_buffer=%.2f "
"background.unmapper=%.1f "
- "total_size_before=%" PRIuS
- " "
- "total_size_after=%" PRIuS
- " "
- "holes_size_before=%" PRIuS
- " "
- "holes_size_after=%" PRIuS
- " "
- "allocated=%" PRIuS
- " "
- "promoted=%" PRIuS
- " "
- "semi_space_copied=%" PRIuS
- " "
+ "total_size_before=%zu "
+ "total_size_after=%zu "
+ "holes_size_before=%zu "
+ "holes_size_after=%zu "
+ "allocated=%zu "
+ "promoted=%zu "
+ "semi_space_copied=%zu "
"nodes_died_in_new=%d "
"nodes_copied_in_new=%d "
"nodes_promoted=%d "
@@ -885,6 +890,16 @@ void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
}
}
+void GCTracer::RecordEmbedderSpeed(size_t bytes, double duration) {
+ if (duration == 0 || bytes == 0) return;
+ double current_speed = bytes / duration;
+ if (recorded_embedder_speed_ == 0.0) {
+ recorded_embedder_speed_ = current_speed;
+ } else {
+ recorded_embedder_speed_ = (recorded_embedder_speed_ + current_speed) / 2;
+ }
+}
+
void GCTracer::RecordMutatorUtilization(double mark_compact_end_time,
double mark_compact_duration) {
if (previous_mark_compact_end_time_ == 0) {
@@ -923,7 +938,6 @@ double GCTracer::CurrentMarkCompactMutatorUtilization() const {
}
double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
- const int kConservativeSpeedInBytesPerMillisecond = 128 * KB;
if (recorded_incremental_marking_speed_ != 0) {
return recorded_incremental_marking_speed_;
}
@@ -933,6 +947,13 @@ double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
return kConservativeSpeedInBytesPerMillisecond;
}
+double GCTracer::EmbedderSpeedInBytesPerMillisecond() const {
+ if (recorded_embedder_speed_ != 0.0) {
+ return recorded_embedder_speed_;
+ }
+ return kConservativeSpeedInBytesPerMillisecond;
+}
+
double GCTracer::ScavengeSpeedInBytesPerMillisecond(
ScavengeSpeedMode mode) const {
if (mode == kForAllObjects) {
@@ -979,6 +1000,15 @@ double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
return combined_mark_compact_speed_cache_;
}
+double GCTracer::CombineSpeedsInBytesPerMillisecond(double default_speed,
+ double optional_speed) {
+ constexpr double kMinimumSpeed = 0.5;
+ if (optional_speed < kMinimumSpeed) {
+ return default_speed;
+ }
+ return default_speed * optional_speed / (default_speed + optional_speed);
+}
+
double GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
double time_ms) const {
size_t bytes = new_space_allocation_in_bytes_since_gc_;
@@ -995,6 +1025,14 @@ double GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
MakeBytesAndDuration(bytes, durations), time_ms);
}
+double GCTracer::EmbedderAllocationThroughputInBytesPerMillisecond(
+ double time_ms) const {
+ size_t bytes = embedder_allocation_in_bytes_since_gc_;
+ double durations = allocation_duration_since_gc_;
+ return AverageSpeed(recorded_embedder_generation_allocations_,
+ MakeBytesAndDuration(bytes, durations), time_ms);
+}
+
double GCTracer::AllocationThroughputInBytesPerMillisecond(
double time_ms) const {
return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
@@ -1011,6 +1049,12 @@ double GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
kThroughputTimeFrameMs);
}
+double GCTracer::CurrentEmbedderAllocationThroughputInBytesPerMillisecond()
+ const {
+ return EmbedderAllocationThroughputInBytesPerMillisecond(
+ kThroughputTimeFrameMs);
+}
+
double GCTracer::ContextDisposalRateInMilliseconds() const {
if (recorded_context_disposal_times_.Count() <
recorded_context_disposal_times_.kSize)
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index b9604bdff0..4ddd0ef1c2 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -8,10 +8,10 @@
#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
#include "src/base/ring-buffer.h"
-#include "src/counters.h"
-#include "src/globals.h"
-#include "src/heap-symbols.h"
+#include "src/common/globals.h"
#include "src/heap/heap.h"
+#include "src/init/heap-symbols.h"
+#include "src/logging/counters.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
@@ -200,6 +200,10 @@ class V8_EXPORT_PRIVATE GCTracer {
};
static const int kThroughputTimeFrameMs = 5000;
+ static constexpr double kConservativeSpeedInBytesPerMillisecond = 128 * KB;
+
+ static double CombineSpeedsInBytesPerMillisecond(double default_speed,
+ double optional_speed);
static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
@@ -217,7 +221,8 @@ class V8_EXPORT_PRIVATE GCTracer {
// Sample and accumulate bytes allocated since the last GC.
void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
- size_t old_generation_counter_bytes);
+ size_t old_generation_counter_bytes,
+ size_t embedder_counter_bytes);
// Log the accumulated new space allocation bytes.
void AddAllocation(double current_ms);
@@ -232,9 +237,13 @@ class V8_EXPORT_PRIVATE GCTracer {
void AddIncrementalMarkingStep(double duration, size_t bytes);
// Compute the average incremental marking speed in bytes/millisecond.
- // Returns 0 if no events have been recorded.
+ // Returns a conservative value if no events have been recorded.
double IncrementalMarkingSpeedInBytesPerMillisecond() const;
+ // Compute the average embedder speed in bytes/millisecond.
+ // Returns a conservative value if no events have been recorded.
+ double EmbedderSpeedInBytesPerMillisecond() const;
+
// Compute the average scavenge speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
double ScavengeSpeedInBytesPerMillisecond(
@@ -268,6 +277,12 @@ class V8_EXPORT_PRIVATE GCTracer {
double OldGenerationAllocationThroughputInBytesPerMillisecond(
double time_ms = 0) const;
+ // Allocation throughput in the embedder in bytes/millisecond in the
+ // last time_ms milliseconds. Reported through v8::EmbedderHeapTracer.
+ // Returns 0 if no allocation events have been recorded.
+ double EmbedderAllocationThroughputInBytesPerMillisecond(
+ double time_ms = 0) const;
+
// Allocation throughput in heap in bytes/millisecond in the last time_ms
// milliseconds.
// Returns 0 if no allocation events have been recorded.
@@ -283,6 +298,11 @@ class V8_EXPORT_PRIVATE GCTracer {
// Returns 0 if no allocation events have been recorded.
double CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
+ // Allocation throughput in the embedder in bytes/milliseconds in the last
+ // kThroughputTimeFrameMs seconds. Reported through v8::EmbedderHeapTracer.
+ // Returns 0 if no allocation events have been recorded.
+ double CurrentEmbedderAllocationThroughputInBytesPerMillisecond() const;
+
// Computes the context disposal rate in milliseconds. It takes the time
// frame of the first recorded context disposal to the current time and
// divides it by the number of recorded events.
@@ -323,16 +343,19 @@ class V8_EXPORT_PRIVATE GCTracer {
void RecordGCPhasesHistograms(TimedHistogram* gc_timer);
+ void RecordEmbedderSpeed(size_t bytes, double duration);
+
private:
FRIEND_TEST(GCTracer, AverageSpeed);
FRIEND_TEST(GCTracerTest, AllocationThroughput);
FRIEND_TEST(GCTracerTest, BackgroundScavengerScope);
FRIEND_TEST(GCTracerTest, BackgroundMinorMCScope);
FRIEND_TEST(GCTracerTest, BackgroundMajorMCScope);
+ FRIEND_TEST(GCTracerTest, EmbedderAllocationThroughput);
FRIEND_TEST(GCTracerTest, MultithreadedBackgroundScope);
FRIEND_TEST(GCTracerTest, NewSpaceAllocationThroughput);
- FRIEND_TEST(GCTracerTest, NewSpaceAllocationThroughputWithProvidedTime);
- FRIEND_TEST(GCTracerTest, OldGenerationAllocationThroughputWithProvidedTime);
+ FRIEND_TEST(GCTracerTest, PerGenerationAllocationThroughput);
+ FRIEND_TEST(GCTracerTest, PerGenerationAllocationThroughputWithProvidedTime);
FRIEND_TEST(GCTracerTest, RegularScope);
FRIEND_TEST(GCTracerTest, IncrementalMarkingDetails);
FRIEND_TEST(GCTracerTest, IncrementalScope);
@@ -414,6 +437,8 @@ class V8_EXPORT_PRIVATE GCTracer {
double recorded_incremental_marking_speed_;
+ double recorded_embedder_speed_ = 0.0;
+
// Incremental scopes carry more information than just the duration. The infos
// here are merged back upon starting/stopping the GC tracer.
IncrementalMarkingInfos
@@ -424,11 +449,13 @@ class V8_EXPORT_PRIVATE GCTracer {
double allocation_time_ms_;
size_t new_space_allocation_counter_bytes_;
size_t old_generation_allocation_counter_bytes_;
+ size_t embedder_allocation_counter_bytes_;
// Accumulated duration and allocated bytes since the last GC.
double allocation_duration_since_gc_;
size_t new_space_allocation_in_bytes_since_gc_;
size_t old_generation_allocation_in_bytes_since_gc_;
+ size_t embedder_allocation_in_bytes_since_gc_;
double combined_mark_compact_speed_cache_;
@@ -448,6 +475,7 @@ class V8_EXPORT_PRIVATE GCTracer {
base::RingBuffer<BytesAndDuration> recorded_mark_compacts_;
base::RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
base::RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
+ base::RingBuffer<BytesAndDuration> recorded_embedder_generation_allocations_;
base::RingBuffer<double> recorded_context_disposal_times_;
base::RingBuffer<double> recorded_survival_ratios_;
diff --git a/deps/v8/src/heap/heap-controller.cc b/deps/v8/src/heap/heap-controller.cc
index 41ffa7b1a3..77e4870913 100644
--- a/deps/v8/src/heap/heap-controller.cc
+++ b/deps/v8/src/heap/heap-controller.cc
@@ -4,12 +4,55 @@
#include "src/heap/heap-controller.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/spaces.h"
-#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
+template <typename Trait>
+double MemoryController<Trait>::GrowingFactor(Heap* heap, size_t max_heap_size,
+ double gc_speed,
+ double mutator_speed) {
+ const double max_factor = MaxGrowingFactor(max_heap_size);
+ const double factor =
+ DynamicGrowingFactor(gc_speed, mutator_speed, max_factor);
+ if (FLAG_trace_gc_verbose) {
+ Isolate::FromHeap(heap)->PrintWithTimestamp(
+ "[%s] factor %.1f based on mu=%.3f, speed_ratio=%.f "
+ "(gc=%.f, mutator=%.f)\n",
+ Trait::kName, factor, Trait::kTargetMutatorUtilization,
+ gc_speed / mutator_speed, gc_speed, mutator_speed);
+ }
+ return factor;
+}
+
+template <typename Trait>
+double MemoryController<Trait>::MaxGrowingFactor(size_t max_heap_size) {
+ constexpr double kMinSmallFactor = 1.3;
+ constexpr double kMaxSmallFactor = 2.0;
+ constexpr double kHighFactor = 4.0;
+
+ size_t max_size_in_mb = max_heap_size / MB;
+ max_size_in_mb = Max(max_size_in_mb, Trait::kMinSize);
+
+ // If we are on a device with lots of memory, we allow a high heap
+ // growing factor.
+ if (max_size_in_mb >= Trait::kMaxSize) {
+ return kHighFactor;
+ }
+
+ DCHECK_GE(max_size_in_mb, Trait::kMinSize);
+ DCHECK_LT(max_size_in_mb, Trait::kMaxSize);
+
+ // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
+ double factor = (max_size_in_mb - Trait::kMinSize) *
+ (kMaxSmallFactor - kMinSmallFactor) /
+ (Trait::kMaxSize - Trait::kMinSize) +
+ kMinSmallFactor;
+ return factor;
+}
+
// Given GC speed in bytes per ms, the allocation throughput in bytes per ms
// (mutator speed), this function returns the heap growing factor that will
// achieve the target_mutator_utilization_ if the GC speed and the mutator speed
@@ -49,46 +92,56 @@ namespace internal {
// F * (1 - MU / (R * (1 - MU))) = 1
// F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
// F = R * (1 - MU) / (R * (1 - MU) - MU)
-double MemoryController::GrowingFactor(double gc_speed, double mutator_speed,
- double max_factor) {
- DCHECK_LE(min_growing_factor_, max_factor);
- DCHECK_GE(max_growing_factor_, max_factor);
+template <typename Trait>
+double MemoryController<Trait>::DynamicGrowingFactor(double gc_speed,
+ double mutator_speed,
+ double max_factor) {
+ DCHECK_LE(Trait::kMinGrowingFactor, max_factor);
+ DCHECK_GE(Trait::kMaxGrowingFactor, max_factor);
if (gc_speed == 0 || mutator_speed == 0) return max_factor;
const double speed_ratio = gc_speed / mutator_speed;
- const double a = speed_ratio * (1 - target_mutator_utilization_);
- const double b = speed_ratio * (1 - target_mutator_utilization_) -
- target_mutator_utilization_;
+ const double a = speed_ratio * (1 - Trait::kTargetMutatorUtilization);
+ const double b = speed_ratio * (1 - Trait::kTargetMutatorUtilization) -
+ Trait::kTargetMutatorUtilization;
// The factor is a / b, but we need to check for small b first.
double factor = (a < b * max_factor) ? a / b : max_factor;
factor = Min(factor, max_factor);
- factor = Max(factor, min_growing_factor_);
+ factor = Max(factor, Trait::kMinGrowingFactor);
return factor;
}
-size_t MemoryController::CalculateAllocationLimit(
- size_t curr_size, size_t max_size, double max_factor, double gc_speed,
- double mutator_speed, size_t new_space_capacity,
+template <typename Trait>
+size_t MemoryController<Trait>::MinimumAllocationLimitGrowingStep(
Heap::HeapGrowingMode growing_mode) {
- double factor = GrowingFactor(gc_speed, mutator_speed, max_factor);
-
- if (FLAG_trace_gc_verbose) {
- Isolate::FromHeap(heap_)->PrintWithTimestamp(
- "%s factor %.1f based on mu=%.3f, speed_ratio=%.f "
- "(gc=%.f, mutator=%.f)\n",
- ControllerName(), factor, target_mutator_utilization_,
- gc_speed / mutator_speed, gc_speed, mutator_speed);
- }
+ const size_t kRegularAllocationLimitGrowingStep = 8;
+ const size_t kLowMemoryAllocationLimitGrowingStep = 2;
+ size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
+ return limit * (growing_mode == Heap::HeapGrowingMode::kConservative
+ ? kLowMemoryAllocationLimitGrowingStep
+ : kRegularAllocationLimitGrowingStep);
+}
- if (growing_mode == Heap::HeapGrowingMode::kConservative ||
- growing_mode == Heap::HeapGrowingMode::kSlow) {
- factor = Min(factor, conservative_growing_factor_);
+template <typename Trait>
+size_t MemoryController<Trait>::CalculateAllocationLimit(
+ Heap* heap, size_t current_size, size_t max_size, size_t new_space_capacity,
+ double factor, Heap::HeapGrowingMode growing_mode) {
+ switch (growing_mode) {
+ case Heap::HeapGrowingMode::kConservative:
+ case Heap::HeapGrowingMode::kSlow:
+ factor = Min(factor, Trait::kConservativeGrowingFactor);
+ break;
+ case Heap::HeapGrowingMode::kMinimal:
+ factor = Trait::kMinGrowingFactor;
+ break;
+ case Heap::HeapGrowingMode::kDefault:
+ break;
}
- if (growing_mode == Heap::HeapGrowingMode::kMinimal) {
- factor = min_growing_factor_;
+ if (FLAG_heap_growing_percent > 0) {
+ factor = 1.0 + FLAG_heap_growing_percent / 100.0;
}
if (FLAG_heap_growing_percent > 0) {
@@ -96,58 +149,28 @@ size_t MemoryController::CalculateAllocationLimit(
}
CHECK_LT(1.0, factor);
- CHECK_LT(0, curr_size);
- uint64_t limit = static_cast<uint64_t>(curr_size * factor);
- limit = Max(limit, static_cast<uint64_t>(curr_size) +
- MinimumAllocationLimitGrowingStep(growing_mode));
- limit += new_space_capacity;
- uint64_t halfway_to_the_max =
- (static_cast<uint64_t>(curr_size) + max_size) / 2;
- size_t result = static_cast<size_t>(Min(limit, halfway_to_the_max));
-
+ CHECK_LT(0, current_size);
+ const uint64_t limit =
+ Max(static_cast<uint64_t>(current_size * factor),
+ static_cast<uint64_t>(current_size) +
+ MinimumAllocationLimitGrowingStep(growing_mode)) +
+ new_space_capacity;
+ const uint64_t halfway_to_the_max =
+ (static_cast<uint64_t>(current_size) + max_size) / 2;
+ const size_t result = static_cast<size_t>(Min(limit, halfway_to_the_max));
if (FLAG_trace_gc_verbose) {
- Isolate::FromHeap(heap_)->PrintWithTimestamp(
- "%s Limit: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
- ControllerName(), curr_size / KB, result / KB, factor);
+ Isolate::FromHeap(heap)->PrintWithTimestamp(
+ "[%s] Limit: old size: %zu KB, new limit: %zu KB (%.1f)\n",
+ Trait::kName, current_size / KB, result / KB, factor);
}
-
return result;
}
-size_t MemoryController::MinimumAllocationLimitGrowingStep(
- Heap::HeapGrowingMode growing_mode) {
- const size_t kRegularAllocationLimitGrowingStep = 8;
- const size_t kLowMemoryAllocationLimitGrowingStep = 2;
- size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
- return limit * (growing_mode == Heap::HeapGrowingMode::kConservative
- ? kLowMemoryAllocationLimitGrowingStep
- : kRegularAllocationLimitGrowingStep);
-}
-
-double HeapController::MaxGrowingFactor(size_t curr_max_size) {
- const double min_small_factor = 1.3;
- const double max_small_factor = 2.0;
- const double high_factor = 4.0;
+template class V8_EXPORT_PRIVATE MemoryController<V8HeapTrait>;
+template class V8_EXPORT_PRIVATE MemoryController<GlobalMemoryTrait>;
- size_t max_size_in_mb = curr_max_size / MB;
- max_size_in_mb = Max(max_size_in_mb, kMinSize);
-
- // If we are on a device with lots of memory, we allow a high heap
- // growing factor.
- if (max_size_in_mb >= kMaxSize) {
- return high_factor;
- }
-
- DCHECK_GE(max_size_in_mb, kMinSize);
- DCHECK_LT(max_size_in_mb, kMaxSize);
-
- // On smaller devices we linearly scale the factor: (X-A)/(B-A)*(D-C)+C
- double factor = (max_size_in_mb - kMinSize) *
- (max_small_factor - min_small_factor) /
- (kMaxSize - kMinSize) +
- min_small_factor;
- return factor;
-}
+const char* V8HeapTrait::kName = "HeapController";
+const char* GlobalMemoryTrait::kName = "GlobalMemoryController";
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap-controller.h b/deps/v8/src/heap/heap-controller.h
index f8625ee963..bba1588669 100644
--- a/deps/v8/src/heap/heap-controller.h
+++ b/deps/v8/src/heap/heap-controller.h
@@ -6,65 +6,55 @@
#define V8_HEAP_HEAP_CONTROLLER_H_
#include <cstddef>
-#include "src/allocation.h"
#include "src/heap/heap.h"
+#include "src/utils/allocation.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
namespace internal {
-class V8_EXPORT_PRIVATE MemoryController {
- public:
- MemoryController(Heap* heap, double min_growing_factor,
- double max_growing_factor,
- double conservative_growing_factor,
- double target_mutator_utilization)
- : heap_(heap),
- min_growing_factor_(min_growing_factor),
- max_growing_factor_(max_growing_factor),
- conservative_growing_factor_(conservative_growing_factor),
- target_mutator_utilization_(target_mutator_utilization) {}
- virtual ~MemoryController() = default;
-
- // Computes the allocation limit to trigger the next garbage collection.
- size_t CalculateAllocationLimit(size_t curr_size, size_t max_size,
- double max_factor, double gc_speed,
- double mutator_speed,
- size_t new_space_capacity,
- Heap::HeapGrowingMode growing_mode);
-
- // Computes the growing step when the limit increases.
- size_t MinimumAllocationLimitGrowingStep(Heap::HeapGrowingMode growing_mode);
+struct BaseControllerTrait {
+ // Sizes are in MB.
+ static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
+ static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
- protected:
- double GrowingFactor(double gc_speed, double mutator_speed,
- double max_factor);
- virtual const char* ControllerName() = 0;
+ static constexpr double kMinGrowingFactor = 1.1;
+ static constexpr double kMaxGrowingFactor = 4.0;
+ static constexpr double kConservativeGrowingFactor = 1.3;
+ static constexpr double kTargetMutatorUtilization = 0.97;
+};
- Heap* const heap_;
- const double min_growing_factor_;
- const double max_growing_factor_;
- const double conservative_growing_factor_;
- const double target_mutator_utilization_;
+struct V8HeapTrait : public BaseControllerTrait {
+ static const char* kName;
+};
- FRIEND_TEST(HeapControllerTest, HeapGrowingFactor);
- FRIEND_TEST(HeapControllerTest, MaxHeapGrowingFactor);
- FRIEND_TEST(HeapControllerTest, MaxOldGenerationSize);
- FRIEND_TEST(HeapControllerTest, OldGenerationAllocationLimit);
+struct GlobalMemoryTrait : public BaseControllerTrait {
+ static const char* kName;
};
-class V8_EXPORT_PRIVATE HeapController : public MemoryController {
+template <typename Trait>
+class V8_EXPORT_PRIVATE MemoryController : public AllStatic {
public:
- // Sizes are in MB.
- static constexpr size_t kMinSize = 128 * Heap::kPointerMultiplier;
- static constexpr size_t kMaxSize = 1024 * Heap::kPointerMultiplier;
+ // Computes the growing step when the limit increases.
+ static size_t MinimumAllocationLimitGrowingStep(
+ Heap::HeapGrowingMode growing_mode);
+
+ static double GrowingFactor(Heap* heap, size_t max_heap_size, double gc_speed,
+ double mutator_speed);
+
+ static size_t CalculateAllocationLimit(Heap* heap, size_t current_size,
+ size_t max_size,
+ size_t new_space_capacity,
+ double factor,
+ Heap::HeapGrowingMode growing_mode);
- explicit HeapController(Heap* heap)
- : MemoryController(heap, 1.1, 4.0, 1.3, 0.97) {}
- double MaxGrowingFactor(size_t curr_max_size);
+ private:
+ static double MaxGrowingFactor(size_t max_heap_size);
+ static double DynamicGrowingFactor(double gc_speed, double mutator_speed,
+ double max_factor);
- protected:
- const char* ControllerName() override { return "HeapController"; }
+ FRIEND_TEST(MemoryControllerTest, HeapGrowingFactor);
+ FRIEND_TEST(MemoryControllerTest, MaxHeapGrowingFactor);
};
} // namespace internal
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index d129346295..4ce35bd961 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -15,21 +15,20 @@
#include "src/base/atomic-utils.h"
#include "src/base/platform/platform.h"
-#include "src/feedback-vector.h"
+#include "src/objects/feedback-vector.h"
// TODO(mstarzinger): There is one more include to remove in order to no longer
// leak heap internals to users of this interface!
+#include "src/execution/isolate-data.h"
+#include "src/execution/isolate.h"
#include "src/heap/spaces-inl.h"
-#include "src/isolate-data.h"
-#include "src/isolate.h"
-#include "src/msan.h"
-#include "src/objects-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/feedback-cell-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/property-cell.h"
#include "src/objects/scope-info.h"
@@ -37,7 +36,8 @@
#include "src/objects/slots-inl.h"
#include "src/objects/struct-inl.h"
#include "src/profiler/heap-profiler.h"
-#include "src/string-hasher.h"
+#include "src/sanitizer/msan.h"
+#include "src/strings/string-hasher.h"
#include "src/zone/zone-list-inl.h"
namespace v8 {
@@ -94,34 +94,34 @@ MUTABLE_ROOT_LIST(ROOT_ACCESSOR)
!RootsTable::IsImmortalImmovable(RootIndex::k##CamelName)); \
DCHECK_IMPLIES(RootsTable::IsImmortalImmovable(RootIndex::k##CamelName), \
IsImmovable(HeapObject::cast(value))); \
- roots_table()[RootIndex::k##CamelName] = value->ptr(); \
+ roots_table()[RootIndex::k##CamelName] = value.ptr(); \
}
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
void Heap::SetRootMaterializedObjects(FixedArray objects) {
- roots_table()[RootIndex::kMaterializedObjects] = objects->ptr();
+ roots_table()[RootIndex::kMaterializedObjects] = objects.ptr();
}
void Heap::SetRootScriptList(Object value) {
- roots_table()[RootIndex::kScriptList] = value->ptr();
+ roots_table()[RootIndex::kScriptList] = value.ptr();
}
void Heap::SetRootStringTable(StringTable value) {
- roots_table()[RootIndex::kStringTable] = value->ptr();
+ roots_table()[RootIndex::kStringTable] = value.ptr();
}
void Heap::SetRootNoScriptSharedFunctionInfos(Object value) {
- roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value->ptr();
+ roots_table()[RootIndex::kNoScriptSharedFunctionInfos] = value.ptr();
}
void Heap::SetMessageListeners(TemplateList value) {
- roots_table()[RootIndex::kMessageListeners] = value->ptr();
+ roots_table()[RootIndex::kMessageListeners] = value.ptr();
}
void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) {
- DCHECK(hash_table->IsObjectHashTable() || hash_table->IsUndefined(isolate()));
- roots_table()[RootIndex::kPendingOptimizeForTestBytecode] = hash_table->ptr();
+ DCHECK(hash_table.IsObjectHashTable() || hash_table.IsUndefined(isolate()));
+ roots_table()[RootIndex::kPendingOptimizeForTestBytecode] = hash_table.ptr();
}
PagedSpace* Heap::paged_space(int idx) {
@@ -223,7 +223,12 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
// Unprotect the memory chunk of the object if it was not unprotected
// already.
UnprotectAndRegisterMemoryChunk(object);
- ZapCodeObject(object->address(), size_in_bytes);
+ ZapCodeObject(object.address(), size_in_bytes);
+ if (!large_object) {
+ MemoryChunk::FromHeapObject(object)
+ ->GetCodeObjectRegistry()
+ ->RegisterNewlyAllocatedCodeObject(object.address());
+ }
}
OnAllocationEvent(object, size_in_bytes);
}
@@ -233,7 +238,7 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
void Heap::OnAllocationEvent(HeapObject object, int size_in_bytes) {
for (auto& tracker : allocation_trackers_) {
- tracker->AllocationEvent(object->address(), size_in_bytes);
+ tracker->AllocationEvent(object.address(), size_in_bytes);
}
if (FLAG_verify_predictable) {
@@ -264,7 +269,7 @@ bool Heap::CanAllocateInReadOnlySpace() {
}
void Heap::UpdateAllocationsHash(HeapObject object) {
- Address object_address = object->address();
+ Address object_address = object.address();
MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
AllocationSpace allocation_space = memory_chunk->owner()->identity();
@@ -286,28 +291,28 @@ void Heap::UpdateAllocationsHash(uint32_t value) {
}
void Heap::RegisterExternalString(String string) {
- DCHECK(string->IsExternalString());
- DCHECK(!string->IsThinString());
+ DCHECK(string.IsExternalString());
+ DCHECK(!string.IsThinString());
external_string_table_.AddString(string);
}
void Heap::FinalizeExternalString(String string) {
- DCHECK(string->IsExternalString());
+ DCHECK(string.IsExternalString());
Page* page = Page::FromHeapObject(string);
ExternalString ext_string = ExternalString::cast(string);
page->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
- ext_string->ExternalPayloadSize());
+ ext_string.ExternalPayloadSize());
- ext_string->DisposeResource();
+ ext_string.DisposeResource();
}
Address Heap::NewSpaceTop() { return new_space_->top(); }
bool Heap::InYoungGeneration(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
- return object->IsHeapObject() && InYoungGeneration(HeapObject::cast(object));
+ return object.IsHeapObject() && InYoungGeneration(HeapObject::cast(object));
}
// static
@@ -335,7 +340,7 @@ bool Heap::InYoungGeneration(HeapObject heap_object) {
// static
bool Heap::InFromPage(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
- return object->IsHeapObject() && InFromPage(HeapObject::cast(object));
+ return object.IsHeapObject() && InFromPage(HeapObject::cast(object));
}
// static
@@ -352,7 +357,7 @@ bool Heap::InFromPage(HeapObject heap_object) {
// static
bool Heap::InToPage(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
- return object->IsHeapObject() && InToPage(HeapObject::cast(object));
+ return object.IsHeapObject() && InToPage(HeapObject::cast(object));
}
// static
@@ -395,15 +400,15 @@ void Heap::CopyBlock(Address dst, Address src, int byte_size) {
template <Heap::FindMementoMode mode>
AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
- Address object_address = object->address();
- Address memento_address = object_address + object->SizeFromMap(map);
+ Address object_address = object.address();
+ Address memento_address = object_address + object.SizeFromMap(map);
Address last_memento_word_address = memento_address + kTaggedSize;
// If the memento would be on another page, bail out immediately.
if (!Page::OnSamePage(object_address, last_memento_word_address)) {
return AllocationMemento();
}
HeapObject candidate = HeapObject::FromAddress(memento_address);
- MapWordSlot candidate_map_slot = candidate->map_slot();
+ MapWordSlot candidate_map_slot = candidate.map_slot();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
@@ -445,7 +450,7 @@ AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
DCHECK(memento_address == top ||
memento_address + HeapObject::kHeaderSize <= top ||
!Page::OnSamePage(memento_address, top - 1));
- if ((memento_address != top) && memento_candidate->IsValid()) {
+ if ((memento_address != top) && memento_candidate.IsValid()) {
return memento_candidate;
}
return AllocationMemento();
@@ -466,7 +471,7 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION));
#endif
if (!FLAG_allocation_site_pretenuring ||
- !AllocationSite::CanTrack(map->instance_type())) {
+ !AllocationSite::CanTrack(map.instance_type())) {
return;
}
AllocationMemento memento_candidate =
@@ -476,12 +481,12 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
// Entering cached feedback is used in the parallel case. We are not allowed
// to dereference the allocation site and rather have to postpone all checks
// till actually merging the data.
- Address key = memento_candidate->GetAllocationSiteUnchecked();
+ Address key = memento_candidate.GetAllocationSiteUnchecked();
(*pretenuring_feedback)[AllocationSite::unchecked_cast(Object(key))]++;
}
void Heap::ExternalStringTable::AddString(String string) {
- DCHECK(string->IsExternalString());
+ DCHECK(string.IsExternalString());
DCHECK(!Contains(string));
if (InYoungGeneration(string)) {
@@ -497,7 +502,7 @@ Oddball Heap::ToBoolean(bool condition) {
}
int Heap::NextScriptId() {
- int last_id = last_script_id()->value();
+ int last_id = last_script_id().value();
if (last_id == Smi::kMaxValue) last_id = v8::UnboundScript::kNoScriptId;
last_id++;
set_last_script_id(Smi::FromInt(last_id));
@@ -505,7 +510,7 @@ int Heap::NextScriptId() {
}
int Heap::NextDebuggingId() {
- int last_id = last_debugging_id()->value();
+ int last_id = last_debugging_id().value();
if (last_id == DebugInfo::DebuggingIdBits::kMax) {
last_id = DebugInfo::kNoDebuggingId;
}
@@ -515,7 +520,7 @@ int Heap::NextDebuggingId() {
}
int Heap::GetNextTemplateSerialNumber() {
- int next_serial_number = next_template_serial_number()->value() + 1;
+ int next_serial_number = next_template_serial_number().value() + 1;
set_next_template_serial_number(Smi::FromInt(next_serial_number));
return next_serial_number;
}
diff --git a/deps/v8/src/heap/heap-write-barrier-inl.h b/deps/v8/src/heap/heap-write-barrier-inl.h
index b33fd5d4c1..6c5f20ac72 100644
--- a/deps/v8/src/heap/heap-write-barrier-inl.h
+++ b/deps/v8/src/heap/heap-write-barrier-inl.h
@@ -10,16 +10,16 @@
#include "src/heap/heap-write-barrier.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
// TODO(jkummerow): Get rid of this by moving GetIsolateFromWritableObject
// elsewhere.
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "src/objects/code.h"
#include "src/objects/compressed-slots-inl.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
#include "src/objects/maybe-object-inl.h"
-#include "src/objects/slots.h"
+#include "src/objects/slots-inl.h"
namespace v8 {
namespace internal {
@@ -38,12 +38,6 @@ V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForCodeSlow(Code host,
V8_EXPORT_PRIVATE void Heap_MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo,
HeapObject object);
-V8_EXPORT_PRIVATE void Heap_GenerationalBarrierForElementsSlow(Heap* heap,
- FixedArray array,
- int offset,
- int length);
-V8_EXPORT_PRIVATE void Heap_MarkingBarrierForElementsSlow(Heap* heap,
- HeapObject object);
V8_EXPORT_PRIVATE void Heap_MarkingBarrierForDescriptorArraySlow(
Heap* heap, HeapObject host, HeapObject descriptor_array,
int number_of_own_descriptors);
@@ -72,7 +66,7 @@ struct MemoryChunk {
V8_INLINE static heap_internals::MemoryChunk* FromHeapObject(
HeapObject object) {
- return reinterpret_cast<MemoryChunk*>(object->ptr() & ~kPageAlignmentMask);
+ return reinterpret_cast<MemoryChunk*>(object.ptr() & ~kPageAlignmentMask);
}
V8_INLINE bool IsMarking() const { return GetFlags() & kMarkingBit; }
@@ -147,7 +141,7 @@ inline void MarkingBarrierInternal(HeapObject object, Address slot,
inline void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value) {
DCHECK(!HasWeakHeapObjectTag(value));
- if (!value->IsHeapObject()) return;
+ if (!value.IsHeapObject()) return;
HeapObject object = HeapObject::cast(value);
GenerationalBarrierForCode(host, rinfo, object);
MarkingBarrierForCode(host, rinfo, object);
@@ -161,7 +155,7 @@ inline void GenerationalBarrier(HeapObject object, ObjectSlot slot,
Object value) {
DCHECK(!HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
- if (!value->IsHeapObject()) return;
+ if (!value.IsHeapObject()) return;
heap_internals::GenerationalBarrierInternal(object, slot.address(),
HeapObject::cast(value));
}
@@ -170,7 +164,7 @@ inline void GenerationalEphemeronKeyBarrier(EphemeronHashTable table,
ObjectSlot slot, Object value) {
DCHECK(!HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
- DCHECK(value->IsHeapObject());
+ DCHECK(value.IsHeapObject());
heap_internals::GenerationalEphemeronKeyBarrierInternal(
table, slot.address(), HeapObject::cast(value));
}
@@ -183,15 +177,6 @@ inline void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
value_heap_object);
}
-inline void GenerationalBarrierForElements(Heap* heap, FixedArray array,
- int offset, int length) {
- heap_internals::MemoryChunk* array_chunk =
- heap_internals::MemoryChunk::FromHeapObject(array);
- if (array_chunk->InYoungGeneration()) return;
-
- Heap_GenerationalBarrierForElementsSlow(heap, array, offset, length);
-}
-
inline void GenerationalBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject object) {
heap_internals::MemoryChunk* object_chunk =
@@ -203,7 +188,7 @@ inline void GenerationalBarrierForCode(Code host, RelocInfo* rinfo,
inline void MarkingBarrier(HeapObject object, ObjectSlot slot, Object value) {
DCHECK_IMPLIES(slot.address() != kNullAddress, !HasWeakHeapObjectTag(*slot));
DCHECK(!HasWeakHeapObjectTag(value));
- if (!value->IsHeapObject()) return;
+ if (!value.IsHeapObject()) return;
heap_internals::MarkingBarrierInternal(object, slot.address(),
HeapObject::cast(value));
}
@@ -216,17 +201,9 @@ inline void MarkingBarrier(HeapObject object, MaybeObjectSlot slot,
value_heap_object);
}
-inline void MarkingBarrierForElements(Heap* heap, HeapObject object) {
- heap_internals::MemoryChunk* object_chunk =
- heap_internals::MemoryChunk::FromHeapObject(object);
- if (!object_chunk->IsMarking()) return;
-
- Heap_MarkingBarrierForElementsSlow(heap, object);
-}
-
inline void MarkingBarrierForCode(Code host, RelocInfo* rinfo,
HeapObject object) {
- DCHECK(!HasWeakHeapObjectTag(object.ptr()));
+ DCHECK(!HasWeakHeapObjectTag(object));
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
if (!object_chunk->IsMarking()) return;
diff --git a/deps/v8/src/heap/heap-write-barrier.h b/deps/v8/src/heap/heap-write-barrier.h
index 803f022fcd..ead17f9396 100644
--- a/deps/v8/src/heap/heap-write-barrier.h
+++ b/deps/v8/src/heap/heap-write-barrier.h
@@ -6,7 +6,7 @@
#define V8_HEAP_HEAP_WRITE_BARRIER_H_
#include "include/v8-internal.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -14,22 +14,12 @@ namespace internal {
class Code;
class FixedArray;
class Heap;
-class HeapObject;
-class MaybeObject;
-class Object;
class RelocInfo;
class EphemeronHashTable;
// Note: In general it is preferred to use the macros defined in
// object-macros.h.
-// Write barrier for FixedArray elements.
-#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
- do { \
- GenerationalBarrierForElements(heap, array, start, length); \
- MarkingBarrierForElements(heap, array); \
- } while (false)
-
// Combined write barriers.
void WriteBarrierForCode(Code host, RelocInfo* rinfo, Object value);
void WriteBarrierForCode(Code host);
@@ -40,14 +30,11 @@ void GenerationalBarrier(HeapObject object, MaybeObjectSlot slot,
MaybeObject value);
void GenerationalEphemeronKeyBarrier(EphemeronHashTable table, ObjectSlot slot,
Object value);
-void GenerationalBarrierForElements(Heap* heap, FixedArray array, int offset,
- int length);
void GenerationalBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
// Marking write barrier.
void MarkingBarrier(HeapObject object, ObjectSlot slot, Object value);
void MarkingBarrier(HeapObject object, MaybeObjectSlot slot, MaybeObject value);
-void MarkingBarrierForElements(Heap* heap, HeapObject object);
void MarkingBarrierForCode(Code host, RelocInfo* rinfo, HeapObject object);
void MarkingBarrierForDescriptorArray(Heap* heap, HeapObject host,
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index a6b3f5dd1d..52387b5bc1 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -4,32 +4,37 @@
#include "src/heap/heap.h"
+#include <cinttypes>
#include <unordered_map>
#include <unordered_set>
-#include "src/accessors.h"
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/bits.h"
+#include "src/base/flags.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/bootstrapper.h"
-#include "src/compilation-cache.h"
-#include "src/conversions.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compilation-cache.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/feedback-vector.h"
-#include "src/global-handles.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/microtask-queue.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/v8threads.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/barrier.h"
#include "src/heap/code-stats.h"
+#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/heap/incremental-marking-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
@@ -45,30 +50,30 @@
#include "src/heap/stress-marking-observer.h"
#include "src/heap/stress-scavenge-observer.h"
#include "src/heap/sweeper.h"
+#include "src/init/bootstrapper.h"
+#include "src/init/v8.h"
#include "src/interpreter/interpreter.h"
-#include "src/log.h"
-#include "src/microtask-queue.h"
+#include "src/logging/log.h"
+#include "src/numbers/conversions.h"
#include "src/objects/data-handler.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots-inl.h"
#include "src/regexp/jsregexp.h"
-#include "src/runtime-profiler.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
-#include "src/string-stream.h"
+#include "src/strings/string-stream.h"
+#include "src/strings/unicode-decoder.h"
+#include "src/strings/unicode-inl.h"
#include "src/tracing/trace-event.h"
-#include "src/unicode-decoder.h"
-#include "src/unicode-inl.h"
-#include "src/utils-inl.h"
-#include "src/utils.h"
-#include "src/v8.h"
-#include "src/v8threads.h"
-#include "src/vm-state-inl.h"
+#include "src/utils/utils-inl.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -106,15 +111,6 @@ void Heap_MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
Heap::MarkingBarrierForCodeSlow(host, rinfo, object);
}
-void Heap_GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
- int offset, int length) {
- Heap::GenerationalBarrierForElementsSlow(heap, array, offset, length);
-}
-
-void Heap_MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
- Heap::MarkingBarrierForElementsSlow(heap, object);
-}
-
void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
HeapObject descriptor_array,
int number_of_own_descriptors) {
@@ -183,10 +179,11 @@ Heap::Heap()
: isolate_(isolate()),
initial_max_old_generation_size_(max_old_generation_size_),
initial_max_old_generation_size_threshold_(0),
- initial_old_generation_size_(max_old_generation_size_ /
- kInitalOldGenerationLimitFactor),
+ initial_old_generation_size_(
+ Min(max_old_generation_size_, kMaxInitialOldGenerationSize)),
memory_pressure_level_(MemoryPressureLevel::kNone),
old_generation_allocation_limit_(initial_old_generation_size_),
+ global_allocation_limit_(initial_old_generation_size_),
global_pretenuring_feedback_(kInitialFeedbackCapacity),
current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
is_current_gc_forced_(false),
@@ -215,8 +212,18 @@ size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
old_space_physical_memory_factor *
kPointerMultiplier);
- return Max(Min(computed_size, HeapController::kMaxSize),
- HeapController::kMinSize);
+ size_t max_size_in_mb = V8HeapTrait::kMaxSize;
+
+ // Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit
+ // systems with physical memory bigger than 16GB.
+ constexpr bool x64_bit = Heap::kPointerMultiplier >= 2;
+ if (FLAG_huge_max_old_generation_size && x64_bit &&
+ physical_memory / GB > 16) {
+ DCHECK_LE(max_size_in_mb, 4096);
+ max_size_in_mb = 4096; // 4GB
+ }
+
+ return Max(Min(computed_size, max_size_in_mb), V8HeapTrait::kMinSize);
}
size_t Heap::Capacity() {
@@ -227,7 +234,7 @@ size_t Heap::Capacity() {
size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
- PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
+ PagedSpaces spaces(this);
size_t total = 0;
for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
@@ -239,7 +246,7 @@ size_t Heap::OldGenerationCapacity() {
size_t Heap::CommittedOldGenerationMemory() {
if (!HasBeenSetUp()) return 0;
- PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
+ PagedSpaces spaces(this);
size_t total = 0;
for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
@@ -359,93 +366,76 @@ void Heap::SetGCState(HeapState state) {
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintIsolate(isolate_,
- "Memory allocator, used: %6" PRIuS
- " KB,"
- " available: %6" PRIuS " KB\n",
+ "Memory allocator, used: %6zu KB,"
+ " available: %6zu KB\n",
memory_allocator()->Size() / KB,
memory_allocator()->Available() / KB);
PrintIsolate(isolate_,
- "Read-only space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ "Read-only space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
read_only_space_->Size() / KB,
read_only_space_->Available() / KB,
read_only_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
- "New space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ "New space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
new_space_->Size() / KB, new_space_->Available() / KB,
new_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
- "New large object space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ "New large object space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
new_lo_space_->SizeOfObjects() / KB,
new_lo_space_->Available() / KB,
new_lo_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
- "Old space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ "Old space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
old_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
- "Code space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS "KB\n",
+ "Code space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
- "Map space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ "Map space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
- "Large object space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ "Large object space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
- "Code large object space, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS " KB\n",
+ "Code large object space, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
code_lo_space_->SizeOfObjects() / KB,
code_lo_space_->Available() / KB,
code_lo_space_->CommittedMemory() / KB);
+ ReadOnlySpace* const ro_space = read_only_space_;
PrintIsolate(isolate_,
- "All spaces, used: %6" PRIuS
- " KB"
- ", available: %6" PRIuS
- " KB"
- ", committed: %6" PRIuS "KB\n",
- this->SizeOfObjects() / KB, this->Available() / KB,
- this->CommittedMemory() / KB);
+ "All spaces, used: %6zu KB"
+ ", available: %6zu KB"
+ ", committed: %6zu KB\n",
+ (this->SizeOfObjects() + ro_space->SizeOfObjects()) / KB,
+ (this->Available() + ro_space->Available()) / KB,
+ (this->CommittedMemory() + ro_space->CommittedMemory()) / KB);
PrintIsolate(isolate_,
- "Unmapper buffering %zu chunks of committed: %6" PRIuS " KB\n",
+ "Unmapper buffering %zu chunks of committed: %6zu KB\n",
memory_allocator()->unmapper()->NumberOfCommittedChunks(),
CommittedMemoryOfUnmapper() / KB);
PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
isolate()->isolate_data()->external_memory_ / KB);
- PrintIsolate(isolate_, "Backing store memory: %6" PRIuS " KB\n",
+ PrintIsolate(isolate_, "Backing store memory: %6zu KB\n",
backing_store_bytes_ / KB);
PrintIsolate(isolate_, "External memory global %zu KB\n",
external_memory_callback_() / KB);
@@ -453,6 +443,63 @@ void Heap::PrintShortHeapStatistics() {
total_gc_time_ms_);
}
+void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
+ HeapStatistics stats;
+ reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats);
+
+// clang-format off
+#define DICT(s) "{" << s << "}"
+#define LIST(s) "[" << s << "]"
+#define ESCAPE(s) "\"" << s << "\""
+#define MEMBER(s) ESCAPE(s) << ":"
+
+ auto SpaceStatistics = [this](int space_index) {
+ HeapSpaceStatistics space_stats;
+ reinterpret_cast<v8::Isolate*>(isolate())->GetHeapSpaceStatistics(
+ &space_stats, space_index);
+ std::stringstream stream;
+ stream << DICT(
+ MEMBER("name")
+ << ESCAPE(GetSpaceName(static_cast<AllocationSpace>(space_index)))
+ << ","
+ MEMBER("size") << space_stats.space_size() << ","
+ MEMBER("used_size") << space_stats.space_used_size() << ","
+ MEMBER("available_size") << space_stats.space_available_size() << ","
+ MEMBER("physical_size") << space_stats.physical_space_size());
+ return stream.str();
+ };
+
+ stream << DICT(
+ MEMBER("isolate") << ESCAPE(reinterpret_cast<void*>(isolate())) << ","
+ MEMBER("id") << gc_count() << ","
+ MEMBER("time_ms") << isolate()->time_millis_since_init() << ","
+ MEMBER("total_heap_size") << stats.total_heap_size() << ","
+ MEMBER("total_heap_size_executable")
+ << stats.total_heap_size_executable() << ","
+ MEMBER("total_physical_size") << stats.total_physical_size() << ","
+ MEMBER("total_available_size") << stats.total_available_size() << ","
+ MEMBER("used_heap_size") << stats.used_heap_size() << ","
+ MEMBER("heap_size_limit") << stats.heap_size_limit() << ","
+ MEMBER("malloced_memory") << stats.malloced_memory() << ","
+ MEMBER("external_memory") << stats.external_memory() << ","
+ MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << ","
+ MEMBER("pages") << LIST(
+ SpaceStatistics(RO_SPACE) << "," <<
+ SpaceStatistics(NEW_SPACE) << "," <<
+ SpaceStatistics(OLD_SPACE) << "," <<
+ SpaceStatistics(CODE_SPACE) << "," <<
+ SpaceStatistics(MAP_SPACE) << "," <<
+ SpaceStatistics(LO_SPACE) << "," <<
+ SpaceStatistics(CODE_LO_SPACE) << "," <<
+ SpaceStatistics(NEW_LO_SPACE)));
+
+#undef DICT
+#undef LIST
+#undef ESCAPE
+#undef MEMBER
+ // clang-format on
+}
+
void Heap::ReportStatisticsAfterGC() {
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
++i) {
@@ -497,10 +544,10 @@ void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
bool Heap::IsRetainingPathTarget(HeapObject object,
RetainingPathOption* option) {
WeakArrayList targets = retaining_path_targets();
- int length = targets->length();
+ int length = targets.length();
MaybeObject object_to_check = HeapObjectReference::Weak(object);
for (int i = 0; i < length; i++) {
- MaybeObject target = targets->Get(i);
+ MaybeObject target = targets.Get(i);
DCHECK(target->IsWeakOrCleared());
if (target == object_to_check) {
DCHECK(retaining_path_target_option_.count(i));
@@ -514,7 +561,7 @@ bool Heap::IsRetainingPathTarget(HeapObject object,
void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
PrintF("\n\n\n");
PrintF("#################################################\n");
- PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target->ptr()));
+ PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target.ptr()));
HeapObject object = target;
std::vector<std::pair<HeapObject, bool>> retaining_path;
Root root = Root::kUnknown;
@@ -543,10 +590,10 @@ void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
PrintF("Distance from root %d%s: ", distance,
ephemeron ? " (ephemeron)" : "");
- object->ShortPrint();
+ object.ShortPrint();
PrintF("\n");
#ifdef OBJECT_PRINT
- object->Print();
+ object.Print();
PrintF("\n");
#endif
--distance;
@@ -680,7 +727,7 @@ void Heap::MergeAllocationSitePretenuringFeedback(
AllocationSite site;
for (auto& site_and_count : local_pretenuring_feedback) {
site = site_and_count.first;
- MapWord map_word = site_and_count.first->map_word();
+ MapWord map_word = site_and_count.first.map_word();
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
@@ -688,11 +735,11 @@ void Heap::MergeAllocationSitePretenuringFeedback(
// We have not validated the allocation site yet, since we have not
// dereferenced the site during collecting information.
// This is an inlined check of AllocationMemento::IsValid.
- if (!site->IsAllocationSite() || site->IsZombie()) continue;
+ if (!site.IsAllocationSite() || site.IsZombie()) continue;
const int value = static_cast<int>(site_and_count.second);
DCHECK_LT(0, value);
- if (site->IncrementMementoFoundCount(value)) {
+ if (site.IncrementMementoFoundCount(value)) {
// For sites in the global map the count is accessed through the site.
global_pretenuring_feedback_.insert(std::make_pair(site, 0));
}
@@ -756,15 +803,15 @@ inline bool MakePretenureDecision(
// We just transition into tenure state when the semi-space was at
// maximum capacity.
if (maximum_size_scavenge) {
- site->set_deopt_dependent_code(true);
- site->set_pretenure_decision(AllocationSite::kTenure);
+ site.set_deopt_dependent_code(true);
+ site.set_pretenure_decision(AllocationSite::kTenure);
// Currently we just need to deopt when we make a state transition to
// tenure.
return true;
}
- site->set_pretenure_decision(AllocationSite::kMaybeTenure);
+ site.set_pretenure_decision(AllocationSite::kMaybeTenure);
} else {
- site->set_pretenure_decision(AllocationSite::kDontTenure);
+ site.set_pretenure_decision(AllocationSite::kDontTenure);
}
}
return false;
@@ -773,15 +820,15 @@ inline bool MakePretenureDecision(
inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
bool maximum_size_scavenge) {
bool deopt = false;
- int create_count = site->memento_create_count();
- int found_count = site->memento_found_count();
+ int create_count = site.memento_create_count();
+ int found_count = site.memento_found_count();
bool minimum_mementos_created =
create_count >= AllocationSite::kPretenureMinimumCreated;
double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
? static_cast<double>(found_count) / create_count
: 0.0;
AllocationSite::PretenureDecision current_decision =
- site->pretenure_decision();
+ site.pretenure_decision();
if (minimum_mementos_created) {
deopt = MakePretenureDecision(site, current_decision, ratio,
@@ -793,13 +840,13 @@ inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
"pretenuring: AllocationSite(%p): (created, found, ratio) "
"(%d, %d, %f) %s => %s\n",
reinterpret_cast<void*>(site.ptr()), create_count, found_count,
- ratio, site->PretenureDecisionName(current_decision),
- site->PretenureDecisionName(site->pretenure_decision()));
+ ratio, site.PretenureDecisionName(current_decision),
+ site.PretenureDecisionName(site.pretenure_decision()));
}
// Clear feedback calculation fields until the next gc.
- site->set_memento_found_count(0);
- site->set_memento_create_count(0);
+ site.set_memento_found_count(0);
+ site.set_memento_create_count(0);
return deopt;
}
} // namespace
@@ -830,18 +877,18 @@ void Heap::ProcessPretenuringFeedback() {
site = site_and_count.first;
// Count is always access through the site.
DCHECK_EQ(0, site_and_count.second);
- int found_count = site->memento_found_count();
+ int found_count = site.memento_found_count();
// An entry in the storage does not imply that the count is > 0 because
// allocation sites might have been reset due to too many objects dying
// in old space.
if (found_count > 0) {
- DCHECK(site->IsAllocationSite());
+ DCHECK(site.IsAllocationSite());
active_allocation_sites++;
allocation_mementos_found += found_count;
if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
trigger_deoptimization = true;
}
- if (site->GetAllocationType() == AllocationType::kOld) {
+ if (site.GetAllocationType() == AllocationType::kOld) {
tenure_decisions++;
} else {
dont_tenure_decisions++;
@@ -855,10 +902,10 @@ void Heap::ProcessPretenuringFeedback() {
ForeachAllocationSite(
allocation_sites_list(),
[&allocation_sites, &trigger_deoptimization](AllocationSite site) {
- DCHECK(site->IsAllocationSite());
+ DCHECK(site.IsAllocationSite());
allocation_sites++;
- if (site->IsMaybeTenure()) {
- site->set_deopt_dependent_code(true);
+ if (site.IsMaybeTenure()) {
+ site.set_deopt_dependent_code(true);
trigger_deoptimization = true;
}
});
@@ -888,7 +935,7 @@ void Heap::ProcessPretenuringFeedback() {
void Heap::InvalidateCodeDeoptimizationData(Code code) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(code);
CodePageMemoryModificationScope modification_scope(chunk);
- code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
+ code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
}
void Heap::DeoptMarkedAllocationSites() {
@@ -896,10 +943,10 @@ void Heap::DeoptMarkedAllocationSites() {
// performance issue, use a cache data structure in heap instead.
ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite site) {
- if (site->deopt_dependent_code()) {
- site->dependent_code()->MarkCodeForDeoptimization(
+ if (site.deopt_dependent_code()) {
+ site.dependent_code().MarkCodeForDeoptimization(
isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
- site->set_deopt_dependent_code(false);
+ site.set_deopt_dependent_code(false);
}
});
@@ -934,10 +981,9 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
- isolate_->counters()->string_table_capacity()->Set(
- string_table()->Capacity());
+ isolate_->counters()->string_table_capacity()->Set(string_table().Capacity());
isolate_->counters()->number_of_symbols()->Set(
- string_table()->NumberOfElements());
+ string_table().NumberOfElements());
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
@@ -1002,7 +1048,7 @@ void Heap::GarbageCollectionEpilogue() {
// earlier invocation of the cleanup function didn't iterate through
// them). See https://github.com/tc39/proposal-weakrefs/issues/34
HandleScope handle_scope(isolate());
- while (!isolate()->heap()->dirty_js_finalization_groups()->IsUndefined(
+ while (!isolate()->heap()->dirty_js_finalization_groups().IsUndefined(
isolate())) {
// Enqueue one microtask per JSFinalizationGroup.
Handle<JSFinalizationGroup> finalization_group(
@@ -1026,7 +1072,8 @@ void Heap::GarbageCollectionEpilogue() {
Handle<FinalizationGroupCleanupJobTask> task =
isolate()->factory()->NewFinalizationGroupCleanupJobTask(
finalization_group);
- context->microtask_queue()->EnqueueMicrotask(*task);
+ MicrotaskQueue* microtask_queue = context->microtask_queue();
+ if (microtask_queue) microtask_queue->EnqueueMicrotask(*task);
}
}
}
@@ -1133,10 +1180,10 @@ namespace {
intptr_t CompareWords(int size, HeapObject a, HeapObject b) {
int slots = size / kTaggedSize;
- DCHECK_EQ(a->Size(), size);
- DCHECK_EQ(b->Size(), size);
- Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a->address());
- Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b->address());
+ DCHECK_EQ(a.Size(), size);
+ DCHECK_EQ(b.Size(), size);
+ Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a.address());
+ Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b.address());
for (int i = 0; i < slots; i++) {
if (*slot_a != *slot_b) {
return *slot_a - *slot_b;
@@ -1183,7 +1230,7 @@ void ReportDuplicates(int size, std::vector<HeapObject>& objects) {
PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
duplicate_bytes / KB);
PrintF("Sample object: ");
- it->second->Print();
+ it->second.Print();
PrintF("============================\n");
}
}
@@ -1238,13 +1285,13 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
space = spaces.next()) {
HeapObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- objects_by_size[obj->Size()].push_back(obj);
+ objects_by_size[obj.Size()].push_back(obj);
}
}
{
LargeObjectIterator it(lo_space());
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- objects_by_size[obj->Size()].push_back(obj);
+ objects_by_size[obj.Size()].push_back(obj);
}
}
for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
@@ -1414,7 +1461,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
event.next_gc_likely_to_collect_more =
(committed_memory_before > committed_memory_after + MB) ||
HasHighFragmentation(used_memory_after, committed_memory_after) ||
- (detached_contexts()->length() > 0);
+ (detached_contexts().length() > 0);
event.committed_memory = committed_memory_after;
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
@@ -1458,7 +1505,7 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
}
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
- number_of_disposed_maps_ = retained_maps()->length();
+ number_of_disposed_maps_ = retained_maps().length();
tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
return ++contexts_disposed_;
}
@@ -1479,9 +1526,12 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
} else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
- StartIncrementalMarking(gc_flags,
- GarbageCollectionReason::kAllocationLimit,
- gc_callback_flags);
+ StartIncrementalMarking(
+ gc_flags,
+ OldGenerationSpaceAvailable() <= new_space_->Capacity()
+ ? GarbageCollectionReason::kAllocationLimit
+ : GarbageCollectionReason::kGlobalAllocationLimit,
+ gc_callback_flags);
}
}
}
@@ -1493,70 +1543,83 @@ void Heap::StartIdleIncrementalMarking(
gc_callback_flags);
}
-void Heap::MoveElements(FixedArray array, int dst_index, int src_index, int len,
- WriteBarrierMode mode) {
- if (len == 0) return;
+void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot,
+ const ObjectSlot src_slot, int len,
+ WriteBarrierMode mode) {
+ DCHECK_NE(len, 0);
+ DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
+ const ObjectSlot dst_end(dst_slot + len);
+ // Ensure no range overflow.
+ DCHECK(dst_slot < dst_end);
+ DCHECK(src_slot < src_slot + len);
- DCHECK_NE(array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
- ObjectSlot dst = array->RawFieldOfElementAt(dst_index);
- ObjectSlot src = array->RawFieldOfElementAt(src_index);
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
- if (dst < src) {
- for (int i = 0; i < len; i++) {
- dst.Relaxed_Store(src.Relaxed_Load());
+ if (dst_slot < src_slot) {
+ // Copy tagged values forward using relaxed load/stores that do not
+ // involve value decompression.
+ const AtomicSlot atomic_dst_end(dst_end);
+ AtomicSlot dst(dst_slot);
+ AtomicSlot src(src_slot);
+ while (dst < atomic_dst_end) {
+ *dst = *src;
++dst;
++src;
}
} else {
- // Copy backwards.
- dst += len - 1;
- src += len - 1;
- for (int i = 0; i < len; i++) {
- dst.Relaxed_Store(src.Relaxed_Load());
+ // Copy tagged values backwards using relaxed load/stores that do not
+ // involve value decompression.
+ const AtomicSlot atomic_dst_begin(dst_slot);
+ AtomicSlot dst(dst_slot + len - 1);
+ AtomicSlot src(src_slot + len - 1);
+ while (dst >= atomic_dst_begin) {
+ *dst = *src;
--dst;
--src;
}
}
} else {
- MemMove(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
+ MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
}
if (mode == SKIP_WRITE_BARRIER) return;
- FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
+ WriteBarrierForRange(dst_object, dst_slot, dst_end);
}
-void Heap::CopyElements(FixedArray dst_array, FixedArray src_array,
- int dst_index, int src_index, int len,
- WriteBarrierMode mode) {
- DCHECK_NE(dst_array, src_array);
- if (len == 0) return;
+// Instantiate Heap::CopyRange() for ObjectSlot and MaybeObjectSlot.
+template void Heap::CopyRange<ObjectSlot>(HeapObject dst_object,
+ ObjectSlot dst_slot,
+ ObjectSlot src_slot, int len,
+ WriteBarrierMode mode);
+template void Heap::CopyRange<MaybeObjectSlot>(HeapObject dst_object,
+ MaybeObjectSlot dst_slot,
+ MaybeObjectSlot src_slot,
+ int len, WriteBarrierMode mode);
+
+template <typename TSlot>
+void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot,
+ const TSlot src_slot, int len, WriteBarrierMode mode) {
+ DCHECK_NE(len, 0);
- DCHECK_NE(dst_array->map(), ReadOnlyRoots(this).fixed_cow_array_map());
- ObjectSlot dst = dst_array->RawFieldOfElementAt(dst_index);
- ObjectSlot src = src_array->RawFieldOfElementAt(src_index);
+ DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map());
+ const TSlot dst_end(dst_slot + len);
// Ensure ranges do not overlap.
- DCHECK(dst + len <= src || src + len <= dst);
+ DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot);
+
if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
- if (dst < src) {
- for (int i = 0; i < len; i++) {
- dst.Relaxed_Store(src.Relaxed_Load());
- ++dst;
- ++src;
- }
- } else {
- // Copy backwards.
- dst += len - 1;
- src += len - 1;
- for (int i = 0; i < len; i++) {
- dst.Relaxed_Store(src.Relaxed_Load());
- --dst;
- --src;
- }
+ // Copy tagged values using relaxed load/stores that do not involve value
+ // decompression.
+ const AtomicSlot atomic_dst_end(dst_end);
+ AtomicSlot dst(dst_slot);
+ AtomicSlot src(src_slot);
+ while (dst < atomic_dst_end) {
+ *dst = *src;
+ ++dst;
+ ++src;
}
} else {
- MemCopy(dst.ToVoidPtr(), src.ToVoidPtr(), len * kTaggedSize);
+ MemCopy(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize);
}
if (mode == SKIP_WRITE_BARRIER) return;
- FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, dst_array, dst_index, len);
+ WriteBarrierForRange(dst_object, dst_slot, dst_end);
}
#ifdef VERIFY_HEAP
@@ -1570,11 +1633,11 @@ class StringTableVerifier : public ObjectVisitor {
// Visit all HeapObject pointers in [start, end).
for (ObjectSlot p = start; p < end; ++p) {
DCHECK(!HasWeakHeapObjectTag(*p));
- if ((*p)->IsHeapObject()) {
+ if ((*p).IsHeapObject()) {
HeapObject object = HeapObject::cast(*p);
// Check that the string is actually internalized.
- CHECK(object->IsTheHole(isolate_) || object->IsUndefined(isolate_) ||
- object->IsInternalizedString());
+ CHECK(object.IsTheHole(isolate_) || object.IsUndefined(isolate_) ||
+ object.IsInternalizedString());
}
}
}
@@ -1595,7 +1658,7 @@ class StringTableVerifier : public ObjectVisitor {
static void VerifyStringTable(Isolate* isolate) {
StringTableVerifier verifier(isolate);
- isolate->heap()->string_table()->IterateElements(&verifier);
+ isolate->heap()->string_table().IterateElements(&verifier);
}
#endif // VERIFY_HEAP
@@ -1623,14 +1686,13 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
DCHECK_EQ(0, reserved_size % Map::kSize);
int num_maps = reserved_size / Map::kSize;
for (int i = 0; i < num_maps; i++) {
- // The deserializer will update the skip list.
- AllocationResult allocation = map_space()->AllocateRawUnaligned(
- Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
+ AllocationResult allocation =
+ map_space()->AllocateRawUnaligned(Map::kSize);
HeapObject free_space;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
- Address free_space_address = free_space->address();
+ Address free_space_address = free_space.address();
CreateFillerObjectAt(free_space_address, Map::kSize,
ClearRecordedSlots::kNo);
maps->push_back(free_space_address);
@@ -1656,14 +1718,13 @@ bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
allocation = new_space()->AllocateRawUnaligned(size);
} else {
// The deserializer will update the skip list.
- allocation = paged_space(space)->AllocateRawUnaligned(
- size, PagedSpace::IGNORE_SKIP_LIST);
+ allocation = paged_space(space)->AllocateRawUnaligned(size);
}
HeapObject free_space;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
- Address free_space_address = free_space->address();
+ Address free_space_address = free_space.address();
CreateFillerObjectAt(free_space_address, size,
ClearRecordedSlots::kNo);
DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
@@ -1867,38 +1928,7 @@ bool Heap::PerformGarbageCollection(
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing(isolate_);
- double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
- double mutator_speed =
- tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
- size_t old_gen_size = OldGenerationSizeOfObjects();
- if (collector == MARK_COMPACTOR) {
- // Register the amount of external allocated memory.
- isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
- isolate()->isolate_data()->external_memory_;
- isolate()->isolate_data()->external_memory_limit_ =
- isolate()->isolate_data()->external_memory_ +
- kExternalAllocationSoftLimit;
-
- double max_factor =
- heap_controller()->MaxGrowingFactor(max_old_generation_size_);
- size_t new_limit = heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size_, max_factor, gc_speed,
- mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
- old_generation_allocation_limit_ = new_limit;
-
- CheckIneffectiveMarkCompact(
- old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
- } else if (HasLowYoungGenerationAllocationRate() &&
- old_generation_size_configured_) {
- double max_factor =
- heap_controller()->MaxGrowingFactor(max_old_generation_size_);
- size_t new_limit = heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size_, max_factor, gc_speed,
- mutator_speed, new_space()->Capacity(), CurrentHeapGrowingMode());
- if (new_limit < old_generation_allocation_limit_) {
- old_generation_allocation_limit_ = new_limit;
- }
- }
+ RecomputeLimits(collector);
{
GCCallbacksScope scope(this);
@@ -1921,6 +1951,80 @@ bool Heap::PerformGarbageCollection(
return freed_global_handles > 0;
}
+void Heap::RecomputeLimits(GarbageCollector collector) {
+ if (!((collector == MARK_COMPACTOR) ||
+ (HasLowYoungGenerationAllocationRate() &&
+ old_generation_size_configured_))) {
+ return;
+ }
+
+ double v8_gc_speed =
+ tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
+ double v8_mutator_speed =
+ tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
+ double v8_growing_factor = MemoryController<V8HeapTrait>::GrowingFactor(
+ this, max_old_generation_size_, v8_gc_speed, v8_mutator_speed);
+ double global_growing_factor = 0;
+ if (UseGlobalMemoryScheduling()) {
+ DCHECK_NOT_NULL(local_embedder_heap_tracer());
+ double embedder_gc_speed = tracer()->EmbedderSpeedInBytesPerMillisecond();
+ double embedder_speed =
+ tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond();
+ double embedder_growing_factor =
+ (embedder_gc_speed > 0 && embedder_speed > 0)
+ ? MemoryController<GlobalMemoryTrait>::GrowingFactor(
+ this, max_global_memory_size_, embedder_gc_speed,
+ embedder_speed)
+ : 0;
+ global_growing_factor = Max(v8_growing_factor, embedder_growing_factor);
+ }
+
+ size_t old_gen_size = OldGenerationSizeOfObjects();
+ size_t new_space_capacity = new_space()->Capacity();
+ HeapGrowingMode mode = CurrentHeapGrowingMode();
+
+ if (collector == MARK_COMPACTOR) {
+ // Register the amount of external allocated memory.
+ isolate()->isolate_data()->external_memory_at_last_mark_compact_ =
+ isolate()->isolate_data()->external_memory_;
+ isolate()->isolate_data()->external_memory_limit_ =
+ isolate()->isolate_data()->external_memory_ +
+ kExternalAllocationSoftLimit;
+
+ old_generation_allocation_limit_ =
+ MemoryController<V8HeapTrait>::CalculateAllocationLimit(
+ this, old_gen_size, max_old_generation_size_, new_space_capacity,
+ v8_growing_factor, mode);
+ if (UseGlobalMemoryScheduling()) {
+ DCHECK_GT(global_growing_factor, 0);
+ global_allocation_limit_ =
+ MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
+ this, GlobalSizeOfObjects(), max_global_memory_size_,
+ new_space_capacity, global_growing_factor, mode);
+ }
+ CheckIneffectiveMarkCompact(
+ old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
+ } else if (HasLowYoungGenerationAllocationRate() &&
+ old_generation_size_configured_) {
+ size_t new_old_generation_limit =
+ MemoryController<V8HeapTrait>::CalculateAllocationLimit(
+ this, old_gen_size, max_old_generation_size_, new_space_capacity,
+ v8_growing_factor, mode);
+ if (new_old_generation_limit < old_generation_allocation_limit_) {
+ old_generation_allocation_limit_ = new_old_generation_limit;
+ }
+ if (UseGlobalMemoryScheduling()) {
+ DCHECK_GT(global_growing_factor, 0);
+ size_t new_global_limit =
+ MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit(
+ this, GlobalSizeOfObjects(), max_global_memory_size_,
+ new_space_capacity, global_growing_factor, mode);
+ if (new_global_limit < global_allocation_limit_) {
+ global_allocation_limit_ = new_global_limit;
+ }
+ }
+ }
+}
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
RuntimeCallTimerScope runtime_timer(
@@ -2140,9 +2244,9 @@ void Heap::ComputeFastPromotionMode() {
!ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
- PrintIsolate(
- isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
- fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
+ PrintIsolate(isolate(), "Fast promotion mode: %s survival rate: %zu%%\n",
+ fast_promotion_mode_ ? "true" : "false",
+ survived_in_new_space);
}
}
@@ -2185,7 +2289,7 @@ bool Heap::ExternalStringTable::Contains(String string) {
void Heap::UpdateExternalString(String string, size_t old_payload,
size_t new_payload) {
- DCHECK(string->IsExternalString());
+ DCHECK(string.IsExternalString());
Page* page = Page::FromHeapObject(string);
if (old_payload > new_payload) {
@@ -2200,7 +2304,7 @@ void Heap::UpdateExternalString(String string, size_t old_payload,
String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
HeapObject obj = HeapObject::cast(*p);
- MapWord first_word = obj->map_word();
+ MapWord first_word = obj.map_word();
String new_string;
@@ -2208,9 +2312,9 @@ String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
String string = String::cast(obj);
- if (!string->IsExternalString()) {
+ if (!string.IsExternalString()) {
// Original external string has been internalized.
- DCHECK(string->IsThinString());
+ DCHECK(string.IsThinString());
return String();
}
heap->FinalizeExternalString(string);
@@ -2222,19 +2326,19 @@ String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
}
// String is still reachable.
- if (new_string->IsThinString()) {
+ if (new_string.IsThinString()) {
// Filtering Thin strings out of the external string table.
return String();
- } else if (new_string->IsExternalString()) {
+ } else if (new_string.IsExternalString()) {
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
- ExternalString::cast(new_string)->ExternalPayloadSize());
+ ExternalString::cast(new_string).ExternalPayloadSize());
return new_string;
}
// Internalization can replace external strings with non-external strings.
- return new_string->IsExternalString() ? new_string : String();
+ return new_string.IsExternalString() ? new_string : String();
}
void Heap::ExternalStringTable::VerifyYoung() {
@@ -2247,12 +2351,12 @@ void Heap::ExternalStringTable::VerifyYoung() {
MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
DCHECK(mc->InYoungGeneration());
DCHECK(heap_->InYoungGeneration(obj));
- DCHECK(!obj->IsTheHole(heap_->isolate()));
- DCHECK(obj->IsExternalString());
+ DCHECK(!obj.IsTheHole(heap_->isolate()));
+ DCHECK(obj.IsExternalString());
// Note: we can have repeated elements in the table.
DCHECK_EQ(0, visited_map.count(obj));
visited_map.insert(obj);
- size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
+ size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize();
}
for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
it != size_map.end(); it++)
@@ -2271,12 +2375,12 @@ void Heap::ExternalStringTable::Verify() {
MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
DCHECK(!mc->InYoungGeneration());
DCHECK(!heap_->InYoungGeneration(obj));
- DCHECK(!obj->IsTheHole(heap_->isolate()));
- DCHECK(obj->IsExternalString());
+ DCHECK(!obj.IsTheHole(heap_->isolate()));
+ DCHECK(obj.IsExternalString());
// Note: we can have repeated elements in the table.
DCHECK_EQ(0, visited_map.count(obj));
visited_map.insert(obj);
- size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
+ size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize();
}
for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
it != size_map.end(); it++)
@@ -2297,7 +2401,7 @@ void Heap::ExternalStringTable::UpdateYoungReferences(
if (target.is_null()) continue;
- DCHECK(target->IsExternalString());
+ DCHECK(target.IsExternalString());
if (InYoungGeneration(target)) {
// String is still in new space. Update the table entry.
@@ -2400,16 +2504,16 @@ void Heap::ForeachAllocationSite(
Object list, const std::function<void(AllocationSite)>& visitor) {
DisallowHeapAllocation disallow_heap_allocation;
Object current = list;
- while (current->IsAllocationSite()) {
+ while (current.IsAllocationSite()) {
AllocationSite site = AllocationSite::cast(current);
visitor(site);
- Object current_nested = site->nested_site();
- while (current_nested->IsAllocationSite()) {
+ Object current_nested = site.nested_site();
+ while (current_nested.IsAllocationSite()) {
AllocationSite nested_site = AllocationSite::cast(current_nested);
visitor(nested_site);
- current_nested = nested_site->nested_site();
+ current_nested = nested_site.nested_site();
}
- current = site->weak_next();
+ current = site.weak_next();
}
}
@@ -2419,9 +2523,9 @@ void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) {
ForeachAllocationSite(allocation_sites_list(),
[&marked, allocation, this](AllocationSite site) {
- if (site->GetAllocationType() == allocation) {
- site->ResetPretenureDecision();
- site->set_deopt_dependent_code(true);
+ if (site.GetAllocationType() == allocation) {
+ site.ResetPretenureDecision();
+ site.set_deopt_dependent_code(true);
marked = true;
RemoveAllocationSitePretenuringFeedback(site);
return;
@@ -2465,7 +2569,7 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
- DCHECK((*p)->IsExternalString());
+ DCHECK((*p).IsExternalString());
visitor_->VisitExternalString(
Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
}
@@ -2486,9 +2590,9 @@ STATIC_ASSERT(IsAligned(FixedDoubleArray::kHeaderSize, kDoubleAlignment));
// is only kTaggedSize aligned but we can keep using unaligned access since
// both x64 and arm64 architectures (where pointer compression supported)
// allow unaligned access to doubles.
-STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kTaggedSize));
+STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize));
#else
-STATIC_ASSERT(IsAligned(FixedTypedArrayBase::kDataOffset, kDoubleAlignment));
+STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment));
#endif
#ifdef V8_HOST_ARCH_32_BIT
@@ -2523,8 +2627,8 @@ size_t Heap::GetCodeRangeReservedAreaSize() {
}
HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) {
- CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
- return HeapObject::FromAddress(object->address() + filler_size);
+ CreateFillerObjectAt(object.address(), filler_size, ClearRecordedSlots::kNo);
+ return HeapObject::FromAddress(object.address() + filler_size);
}
HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
@@ -2532,13 +2636,13 @@ HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
AllocationAlignment alignment) {
int filler_size = allocation_size - object_size;
DCHECK_LT(0, filler_size);
- int pre_filler = GetFillToAlign(object->address(), alignment);
+ int pre_filler = GetFillToAlign(object.address(), alignment);
if (pre_filler) {
object = PrecedeWithFiller(object, pre_filler);
filler_size -= pre_filler;
}
if (filler_size) {
- CreateFillerObjectAt(object->address() + object_size, filler_size,
+ CreateFillerObjectAt(object.address() + object_size, filler_size,
ClearRecordedSlots::kNo);
}
return object;
@@ -2554,26 +2658,37 @@ void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
- const size_t new_limit =
- Max(OldGenerationSizeOfObjects() +
- heap_controller()->MinimumAllocationLimitGrowingStep(
- CurrentHeapGrowingMode()),
+ const size_t minimum_growing_step =
+ MemoryController<V8HeapTrait>::MinimumAllocationLimitGrowingStep(
+ CurrentHeapGrowingMode());
+ const size_t new_old_generation_allocation_limit =
+ Max(OldGenerationSizeOfObjects() + minimum_growing_step,
static_cast<size_t>(
static_cast<double>(old_generation_allocation_limit_) *
(tracer()->AverageSurvivalRatio() / 100)));
- if (new_limit < old_generation_allocation_limit_) {
- old_generation_allocation_limit_ = new_limit;
+ if (new_old_generation_allocation_limit <
+ old_generation_allocation_limit_) {
+ old_generation_allocation_limit_ = new_old_generation_allocation_limit;
} else {
old_generation_size_configured_ = true;
}
+ if (UseGlobalMemoryScheduling()) {
+ const size_t new_global_memory_limit = Max(
+ GlobalSizeOfObjects() + minimum_growing_step,
+ static_cast<size_t>(static_cast<double>(global_allocation_limit_) *
+ (tracer()->AverageSurvivalRatio() / 100)));
+ if (new_global_memory_limit < global_allocation_limit_) {
+ global_allocation_limit_ = new_global_memory_limit;
+ }
+ }
}
}
void Heap::FlushNumberStringCache() {
// Flush the number to string cache.
- int len = number_string_cache()->length();
+ int len = number_string_cache().length();
for (int i = 0; i < len; i++) {
- number_string_cache()->set_undefined(i);
+ number_string_cache().set_undefined(i);
}
}
@@ -2583,11 +2698,11 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
if (size == 0) return HeapObject();
HeapObject filler = HeapObject::FromAddress(addr);
if (size == kTaggedSize) {
- filler->set_map_after_allocation(
+ filler.set_map_after_allocation(
Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)),
SKIP_WRITE_BARRIER);
} else if (size == 2 * kTaggedSize) {
- filler->set_map_after_allocation(
+ filler.set_map_after_allocation(
Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)),
SKIP_WRITE_BARRIER);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
@@ -2596,10 +2711,10 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
}
} else {
DCHECK_GT(size, 2 * kTaggedSize);
- filler->set_map_after_allocation(
+ filler.set_map_after_allocation(
Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)),
SKIP_WRITE_BARRIER);
- FreeSpace::cast(filler)->relaxed_write_size(size);
+ FreeSpace::cast(filler).relaxed_write_size(size);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
(size / kTaggedSize) - 2);
@@ -2611,9 +2726,9 @@ HeapObject Heap::CreateFillerObjectAt(Address addr, int size,
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
- DCHECK((filler->map_slot().contains_value(kNullAddress) &&
+ DCHECK((filler.map_slot().contains_value(kNullAddress) &&
!deserialization_complete_) ||
- filler->map()->IsMap());
+ filler.map().IsMap());
return filler;
}
@@ -2666,7 +2781,7 @@ bool MayContainRecordedSlots(HeapObject object) {
// New space object do not have recorded slots.
if (MemoryChunk::FromHeapObject(object)->InYoungGeneration()) return false;
// Whitelist objects that definitely do not have pointers.
- if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
+ if (object.IsByteArray() || object.IsFixedDoubleArray()) return false;
// Conservatively return true for other objects.
return true;
}
@@ -2676,15 +2791,15 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
int size_in_bytes) {
HeapProfiler* heap_profiler = isolate_->heap_profiler();
if (heap_profiler->is_tracking_object_moves()) {
- heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+ heap_profiler->ObjectMoveEvent(source.address(), target.address(),
size_in_bytes);
}
for (auto& tracker : allocation_trackers_) {
- tracker->MoveEvent(source->address(), target->address(), size_in_bytes);
+ tracker->MoveEvent(source.address(), target.address(), size_in_bytes);
}
- if (target->IsSharedFunctionInfo()) {
- LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
- target->address()));
+ if (target.IsSharedFunctionInfo()) {
+ LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
+ target.address()));
}
if (FLAG_verify_predictable) {
@@ -2714,26 +2829,26 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
DCHECK(CanMoveObjectStart(object));
// Add custom visitor to concurrent marker if new left-trimmable type
// is added.
- DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
- const int element_size = object->IsFixedArray() ? kTaggedSize : kDoubleSize;
+ DCHECK(object.IsFixedArray() || object.IsFixedDoubleArray());
+ const int element_size = object.IsFixedArray() ? kTaggedSize : kDoubleSize;
const int bytes_to_trim = elements_to_trim * element_size;
- Map map = object->map();
+ Map map = object.map();
// For now this trick is only applied to fixed arrays which may be in new
// space or old space. In a large object space the object's start must
// coincide with chunk and thus the trick is just not applicable.
DCHECK(!IsLargeObject(object));
- DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
+ DCHECK(object.map() != ReadOnlyRoots(this).fixed_cow_array_map());
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kTaggedSize);
STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
- const int len = object->length();
+ const int len = object.length();
DCHECK(elements_to_trim <= len);
// Calculate location of new array start.
- Address old_start = object->address();
+ Address old_start = object.address();
Address new_start = old_start + bytes_to_trim;
if (incremental_marking()->IsMarking()) {
@@ -2773,14 +2888,14 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
// Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
// we need pointer granularity writes to avoid race with the concurrent
// marking.
- if (filler->Size() > FreeSpace::kSize) {
+ if (filler.Size() > FreeSpace::kSize) {
MemsetTagged(filler.RawField(FreeSpace::kSize),
ReadOnlyRoots(this).undefined_value(),
- (filler->Size() - FreeSpace::kSize) / kTaggedSize);
+ (filler.Size() - FreeSpace::kSize) / kTaggedSize);
}
}
// Notify the heap profiler of change in object layout.
- OnMoveEvent(new_object, object, new_object->Size());
+ OnMoveEvent(new_object, object, new_object.Size());
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
@@ -2796,21 +2911,20 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
}
void Heap::RightTrimFixedArray(FixedArrayBase object, int elements_to_trim) {
- const int len = object->length();
+ const int len = object.length();
DCHECK_LE(elements_to_trim, len);
DCHECK_GE(elements_to_trim, 0);
int bytes_to_trim;
- DCHECK(!object->IsFixedTypedArrayBase());
- if (object->IsByteArray()) {
+ if (object.IsByteArray()) {
int new_size = ByteArray::SizeFor(len - elements_to_trim);
bytes_to_trim = ByteArray::SizeFor(len) - new_size;
DCHECK_GE(bytes_to_trim, 0);
- } else if (object->IsFixedArray()) {
+ } else if (object.IsFixedArray()) {
CHECK_NE(elements_to_trim, len);
bytes_to_trim = elements_to_trim * kTaggedSize;
} else {
- DCHECK(object->IsFixedDoubleArray());
+ DCHECK(object.IsFixedDoubleArray());
CHECK_NE(elements_to_trim, len);
bytes_to_trim = elements_to_trim * kDoubleSize;
}
@@ -2831,11 +2945,11 @@ void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
template <typename T>
void Heap::CreateFillerForArray(T object, int elements_to_trim,
int bytes_to_trim) {
- DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
- object->IsWeakFixedArray());
+ DCHECK(object.IsFixedArrayBase() || object.IsByteArray() ||
+ object.IsWeakFixedArray());
// For now this trick is only applied to objects in new and paged space.
- DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
+ DCHECK(object.map() != ReadOnlyRoots(this).fixed_cow_array_map());
if (bytes_to_trim == 0) {
DCHECK_EQ(elements_to_trim, 0);
@@ -2844,8 +2958,8 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
}
// Calculate location of new array end.
- int old_size = object->Size();
- Address old_end = object->address() + old_size;
+ int old_size = object.Size();
+ Address old_end = object.address() + old_size;
Address new_end = old_end - bytes_to_trim;
// Register the array as an object with invalidated old-to-old slots. We
@@ -2883,12 +2997,12 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
// Initialize header of the trimmed array. We are storing the new length
// using release store after creating a filler for the left-over space to
// avoid races with the sweeper thread.
- object->synchronized_set_length(object->length() - elements_to_trim);
+ object.synchronized_set_length(object.length() - elements_to_trim);
// Notify the heap object allocation tracker of change in object layout. The
// array may not be moved during GC, and size has to be adjusted nevertheless.
for (auto& tracker : allocation_trackers_) {
- tracker->UpdateObjectSizeEvent(object->address(), object->Size());
+ tracker->UpdateObjectSizeEvent(object.address(), object.Size());
}
}
@@ -2896,10 +3010,11 @@ void Heap::MakeHeapIterable() {
mark_compact_collector()->EnsureSweepingCompleted();
}
+namespace {
-static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
- const double kMinMutatorUtilization = 0.0;
- const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
+double ComputeMutatorUtilizationImpl(double mutator_speed, double gc_speed) {
+ constexpr double kMinMutatorUtilization = 0.0;
+ constexpr double kConservativeGcSpeedInBytesPerMillisecond = 200000;
if (mutator_speed == 0) return kMinMutatorUtilization;
if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
// Derivation:
@@ -2912,54 +3027,53 @@ static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
return gc_speed / (mutator_speed + gc_speed);
}
+} // namespace
-double Heap::YoungGenerationMutatorUtilization() {
- double mutator_speed = static_cast<double>(
- tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
- double gc_speed =
- tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
- double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
- if (FLAG_trace_mutator_utilization) {
- isolate()->PrintWithTimestamp(
- "Young generation mutator utilization = %.3f ("
- "mutator_speed=%.f, gc_speed=%.f)\n",
- result, mutator_speed, gc_speed);
- }
- return result;
-}
-
-
-double Heap::OldGenerationMutatorUtilization() {
- double mutator_speed = static_cast<double>(
- tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
- double gc_speed = static_cast<double>(
- tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
- double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
+double Heap::ComputeMutatorUtilization(const char* tag, double mutator_speed,
+ double gc_speed) {
+ double result = ComputeMutatorUtilizationImpl(mutator_speed, gc_speed);
if (FLAG_trace_mutator_utilization) {
isolate()->PrintWithTimestamp(
- "Old generation mutator utilization = %.3f ("
+ "%s mutator utilization = %.3f ("
"mutator_speed=%.f, gc_speed=%.f)\n",
- result, mutator_speed, gc_speed);
+ tag, result, mutator_speed, gc_speed);
}
return result;
}
-
bool Heap::HasLowYoungGenerationAllocationRate() {
- const double high_mutator_utilization = 0.993;
- return YoungGenerationMutatorUtilization() > high_mutator_utilization;
+ double mu = ComputeMutatorUtilization(
+ "Young generation",
+ tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond(),
+ tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
+ constexpr double kHighMutatorUtilization = 0.993;
+ return mu > kHighMutatorUtilization;
}
-
bool Heap::HasLowOldGenerationAllocationRate() {
- const double high_mutator_utilization = 0.993;
- return OldGenerationMutatorUtilization() > high_mutator_utilization;
+ double mu = ComputeMutatorUtilization(
+ "Old generation",
+ tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond(),
+ tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
+ const double kHighMutatorUtilization = 0.993;
+ return mu > kHighMutatorUtilization;
}
+bool Heap::HasLowEmbedderAllocationRate() {
+ if (!UseGlobalMemoryScheduling()) return true;
+
+ DCHECK_NOT_NULL(local_embedder_heap_tracer());
+ double mu = ComputeMutatorUtilization(
+ "Embedder",
+ tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond(),
+ tracer()->EmbedderSpeedInBytesPerMillisecond());
+ const double kHighMutatorUtilization = 0.993;
+ return mu > kHighMutatorUtilization;
+}
bool Heap::HasLowAllocationRate() {
return HasLowYoungGenerationAllocationRate() &&
- HasLowOldGenerationAllocationRate();
+ HasLowOldGenerationAllocationRate() && HasLowEmbedderAllocationRate();
}
bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
@@ -3126,7 +3240,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
if (marking_state->IsBlack(obj)) {
incremental_marking()->ProcessBlackAllocatedObject(obj);
}
- addr += obj->Size();
+ addr += obj.Size();
}
}
}
@@ -3198,19 +3312,19 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
// If you see this check triggering for a freshly allocated object,
// use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_.is_null()) {
- if (object->IsJSObject()) {
- DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
+ if (object.IsJSObject()) {
+ DCHECK(!object.map().TransitionRequiresSynchronizationWithGC(new_map));
} else {
// Check that the set of slots before and after the transition match.
SlotCollectingVisitor old_visitor;
- object->IterateFast(&old_visitor);
- MapWord old_map_word = object->map_word();
+ object.IterateFast(&old_visitor);
+ MapWord old_map_word = object.map_word();
// Temporarily set the new map to iterate new slots.
- object->set_map_word(MapWord::FromMap(new_map));
+ object.set_map_word(MapWord::FromMap(new_map));
SlotCollectingVisitor new_visitor;
- object->IterateFast(&new_visitor);
+ object.IterateFast(&new_visitor);
// Restore the old map.
- object->set_map_word(old_map_word);
+ object.set_map_word(old_map_word);
DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
for (int i = 0; i < new_visitor.number_of_slots(); i++) {
DCHECK(new_visitor.slot(i) == old_visitor.slot(i));
@@ -3327,7 +3441,8 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
double idle_time_in_ms = deadline_in_ms - start_ms;
tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
- OldGenerationAllocationCounter());
+ OldGenerationAllocationCounter(),
+ EmbedderAllocationCounter());
GCIdleTimeHeapState heap_state = ComputeHeapState();
@@ -3580,6 +3695,8 @@ const char* Heap::GarbageCollectionReasonToString(
return "testing";
case GarbageCollectionReason::kExternalFinalize:
return "external finalize";
+ case GarbageCollectionReason::kGlobalAllocationLimit:
+ return "global allocation limit";
case GarbageCollectionReason::kUnknown:
return "unknown";
}
@@ -3587,14 +3704,7 @@ const char* Heap::GarbageCollectionReasonToString(
}
bool Heap::Contains(HeapObject value) {
- // Check RO_SPACE first because IsOutsideAllocatedSpace cannot account for a
- // shared RO_SPACE.
- // TODO(goszczycki): Exclude read-only space. Use ReadOnlyHeap::Contains where
- // appropriate.
- if (read_only_space_ != nullptr && read_only_space_->Contains(value)) {
- return true;
- }
- if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+ if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
return HasBeenSetUp() &&
@@ -3605,7 +3715,7 @@ bool Heap::Contains(HeapObject value) {
}
bool Heap::InSpace(HeapObject value, AllocationSpace space) {
- if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+ if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
if (!HasBeenSetUp()) return false;
@@ -3684,7 +3794,7 @@ class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
void VerifyPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
if (!host.is_null()) {
- CHECK(ReadOnlyHeap::Contains(host->map()));
+ CHECK(ReadOnlyHeap::Contains(host.map()));
}
VerifyPointersVisitor::VerifyPointers(host, start, end);
@@ -3710,7 +3820,7 @@ void Heap::Verify() {
if (!isolate()->context().is_null() &&
!isolate()->normalized_map_cache()->IsUndefined(isolate())) {
NormalizedMapCache::cast(*isolate()->normalized_map_cache())
- ->NormalizedMapCacheVerify(isolate());
+ .NormalizedMapCacheVerify(isolate());
}
VerifySmisVisitor smis_visitor;
@@ -3727,9 +3837,18 @@ void Heap::Verify() {
lo_space_->Verify(isolate());
code_lo_space_->Verify(isolate());
new_lo_space_->Verify(isolate());
+}
+void Heap::VerifyReadOnlyHeap() {
+ CHECK(!read_only_space_->writable());
+ // TODO(v8:7464): Always verify read-only space once PagedSpace::Verify
+ // supports verifying shared read-only space. Currently HeapObjectIterator is
+ // explicitly disabled for read-only space when sharing is enabled, because it
+ // relies on PagedSpace::heap_ being non-null.
+#ifndef V8_SHARED_RO_HEAP
VerifyReadOnlyPointersVisitor read_only_visitor(this);
read_only_space_->Verify(isolate(), &read_only_visitor);
+#endif
}
class SlotVerifyingVisitor : public ObjectVisitor {
@@ -3772,7 +3891,8 @@ class SlotVerifyingVisitor : public ObjectVisitor {
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
Object target = rinfo->target_object();
if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
- CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
+ CHECK(InTypedSet(FULL_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
+ InTypedSet(COMPRESSED_EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
(rinfo->IsInConstantPool() &&
InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
}
@@ -3859,8 +3979,8 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
// In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
chunk->mutex());
- Address start = object->address();
- Address end = start + object->Size();
+ Address start = object.address();
+ Address end = start + object.Size();
std::set<Address> old_to_new;
std::set<std::pair<SlotType, Address> > typed_old_to_new;
if (!InYoungGeneration(object)) {
@@ -3868,7 +3988,7 @@ void Heap::VerifyRememberedSetFor(HeapObject object) {
CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new,
&this->ephemeron_remembered_set_);
- object->IterateBody(&visitor);
+ object.IterateBody(&visitor);
}
// TODO(ulan): Add old to old slot set verification once all weak objects
// have their own instance types and slots are recorded for all weal fields.
@@ -3983,25 +4103,25 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
private:
inline void FixHandle(FullObjectSlot p) {
- if (!(*p)->IsHeapObject()) return;
+ if (!(*p).IsHeapObject()) return;
HeapObject current = HeapObject::cast(*p);
- const MapWord map_word = current->map_word();
- if (!map_word.IsForwardingAddress() && current->IsFiller()) {
+ const MapWord map_word = current.map_word();
+ if (!map_word.IsForwardingAddress() && current.IsFiller()) {
#ifdef DEBUG
// We need to find a FixedArrayBase map after walking the fillers.
- while (current->IsFiller()) {
- Address next = current->ptr();
- if (current->map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
+ while (current.IsFiller()) {
+ Address next = current.ptr();
+ if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
next += kTaggedSize;
- } else if (current->map() ==
+ } else if (current.map() ==
ReadOnlyRoots(heap_).two_pointer_filler_map()) {
next += 2 * kTaggedSize;
} else {
- next += current->Size();
+ next += current.Size();
}
current = HeapObject::cast(Object(next));
}
- DCHECK(current->IsFixedArrayBase());
+ DCHECK(current.IsFixedArrayBase());
#endif // DEBUG
p.store(Smi::kZero);
}
@@ -4193,7 +4313,7 @@ void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
if (FLAG_trace_gc) {
PrintIsolate(isolate_,
"Min semi-space size cannot be more than the maximum "
- "semi-space size of %" PRIuS " MB\n",
+ "semi-space size of %zu MB\n",
max_semi_space_size_ / MB);
}
} else {
@@ -4219,7 +4339,7 @@ void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
} else {
initial_old_generation_size_ =
- max_old_generation_size_ / kInitalOldGenerationLimitFactor;
+ Min(max_old_generation_size_, kMaxInitialOldGenerationSize);
}
old_generation_allocation_limit_ = initial_old_generation_size_;
@@ -4286,10 +4406,10 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
HeapIterator iterator(this);
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- InstanceType type = obj->map()->instance_type();
+ InstanceType type = obj.map().instance_type();
DCHECK(0 <= type && type <= LAST_TYPE);
stats->objects_per_type[type]++;
- stats->size_per_type[type] += obj->Size();
+ stats->size_per_type[type] += obj.Size();
}
}
if (stats->last_few_messages != nullptr)
@@ -4306,7 +4426,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
size_t Heap::OldGenerationSizeOfObjects() {
- PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
+ PagedSpaces spaces(this);
size_t total = 0;
for (PagedSpace* space = spaces.next(); space != nullptr;
space = spaces.next()) {
@@ -4315,6 +4435,15 @@ size_t Heap::OldGenerationSizeOfObjects() {
return total + lo_space_->SizeOfObjects();
}
+size_t Heap::GlobalSizeOfObjects() {
+ const size_t on_heap_size = OldGenerationSizeOfObjects();
+ const size_t embedder_size =
+ local_embedder_heap_tracer()
+ ? local_embedder_heap_tracer()->allocated_size()
+ : 0;
+ return on_heap_size + embedder_size;
+}
+
uint64_t Heap::PromotedExternalMemorySize() {
IsolateData* isolate_data = isolate()->isolate_data();
if (isolate_data->external_memory_ <=
@@ -4374,6 +4503,14 @@ Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
return Heap::HeapGrowingMode::kDefault;
}
+size_t Heap::GlobalMemoryAvailable() {
+ return UseGlobalMemoryScheduling()
+ ? GlobalSizeOfObjects() < global_allocation_limit_
+ ? global_allocation_limit_ - GlobalSizeOfObjects()
+ : 0
+ : 1;
+}
+
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
// The kNoLimit means that either incremental marking is disabled or it is too
// early to start incremental marking.
@@ -4434,8 +4571,10 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
}
size_t old_generation_space_available = OldGenerationSpaceAvailable();
+ const size_t global_memory_available = GlobalMemoryAvailable();
- if (old_generation_space_available > new_space_->Capacity()) {
+ if (old_generation_space_available > new_space_->Capacity() &&
+ (global_memory_available > 0)) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -4447,6 +4586,9 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (old_generation_space_available == 0) {
return IncrementalMarkingLimit::kHardLimit;
}
+ if (global_memory_available == 0) {
+ return IncrementalMarkingLimit::kHardLimit;
+ }
return IncrementalMarkingLimit::kSoftLimit;
}
@@ -4484,16 +4626,16 @@ HeapObject Heap::EnsureImmovableCode(HeapObject heap_object, int object_size) {
DCHECK_GE(object_size, 0);
if (!Heap::IsImmovable(heap_object)) {
if (isolate()->serializer_enabled() ||
- code_space_->first_page()->Contains(heap_object->address())) {
+ code_space_->first_page()->Contains(heap_object.address())) {
MemoryChunk::FromHeapObject(heap_object)->MarkNeverEvacuate();
} else {
// Discard the first code allocation, which was on a page where it could
// be moved.
- CreateFillerObjectAt(heap_object->address(), object_size,
+ CreateFillerObjectAt(heap_object.address(), object_size,
ClearRecordedSlots::kNo);
heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
UnprotectAndRegisterMemoryChunk(heap_object);
- ZapCodeObject(heap_object->address(), object_size);
+ ZapCodeObject(heap_object.address(), object_size);
OnAllocationEvent(heap_object, object_size);
}
}
@@ -4599,8 +4741,6 @@ void Heap::SetUp() {
store_buffer_.reset(new StoreBuffer(this));
- heap_controller_.reset(new HeapController(this));
-
mark_compact_collector_.reset(new MarkCompactCollector(this));
scavenger_collector_.reset(new ScavengerCollector(this));
@@ -4712,7 +4852,7 @@ void Heap::InitializeHashSeed() {
} else {
new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
}
- ReadOnlyRoots(this).hash_seed()->copy_in(
+ ReadOnlyRoots(this).hash_seed().copy_in(
0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
}
@@ -4812,13 +4952,19 @@ EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
return local_embedder_heap_tracer()->remote_tracer();
}
+EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
+ if (ShouldReduceMemory())
+ return EmbedderHeapTracer::TraceFlags::kReduceMemory;
+ return EmbedderHeapTracer::TraceFlags::kNoFlags;
+}
+
void Heap::RegisterExternallyReferencedObject(Address* location) {
// The embedder is not aware of whether numbers are materialized as heap
// objects are just passed around as Smis.
Object object(*location);
- if (!object->IsHeapObject()) return;
+ if (!object.IsHeapObject()) return;
HeapObject heap_object = HeapObject::cast(object);
- DCHECK(Contains(heap_object));
+ DCHECK(IsValidHeapObject(this, heap_object));
if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
incremental_marking()->WhiteToGreyAndPush(heap_object);
} else {
@@ -4870,8 +5016,6 @@ void Heap::TearDown() {
stress_scavenge_observer_ = nullptr;
}
- heap_controller_.reset();
-
if (mark_compact_collector_) {
mark_compact_collector_->TearDown();
mark_compact_collector_.reset();
@@ -5016,9 +5160,9 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
{
HeapIterator iterator(this);
for (HeapObject o = iterator.next(); !o.is_null(); o = iterator.next()) {
- if (o->IsPrototypeInfo()) {
+ if (o.IsPrototypeInfo()) {
PrototypeInfo prototype_info = PrototypeInfo::cast(o);
- if (prototype_info->prototype_users()->IsWeakArrayList()) {
+ if (prototype_info.prototype_users().IsWeakArrayList()) {
prototype_infos.emplace_back(handle(prototype_info, isolate()));
}
}
@@ -5070,23 +5214,23 @@ void Heap::AddRetainedMap(Handle<Map> map) {
void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
DCHECK_EQ(retained_maps, this->retained_maps());
- int length = retained_maps->length();
+ int length = retained_maps.length();
int new_length = 0;
int new_number_of_disposed_maps = 0;
// This loop compacts the array by removing cleared weak cells.
for (int i = 0; i < length; i += 2) {
- MaybeObject maybe_object = retained_maps->Get(i);
+ MaybeObject maybe_object = retained_maps.Get(i);
if (maybe_object->IsCleared()) {
continue;
}
DCHECK(maybe_object->IsWeak());
- MaybeObject age = retained_maps->Get(i + 1);
+ MaybeObject age = retained_maps.Get(i + 1);
DCHECK(age->IsSmi());
if (i != new_length) {
- retained_maps->Set(new_length, maybe_object);
- retained_maps->Set(new_length + 1, age);
+ retained_maps.Set(new_length, maybe_object);
+ retained_maps.Set(new_length + 1, age);
}
if (i < number_of_disposed_maps_) {
new_number_of_disposed_maps += 2;
@@ -5096,9 +5240,9 @@ void Heap::CompactRetainedMaps(WeakArrayList retained_maps) {
number_of_disposed_maps_ = new_number_of_disposed_maps;
HeapObject undefined = ReadOnlyRoots(this).undefined_value();
for (int i = new_length; i < length; i++) {
- retained_maps->Set(i, HeapObjectReference::Strong(undefined));
+ retained_maps.Set(i, HeapObjectReference::Strong(undefined));
}
- if (new_length != length) retained_maps->set_length(new_length);
+ if (new_length != length) retained_maps.set_length(new_length);
}
void Heap::FatalProcessOutOfMemory(const char* location) {
@@ -5211,7 +5355,7 @@ PagedSpace* PagedSpaces::next() {
}
SpaceIterator::SpaceIterator(Heap* heap)
- : heap_(heap), current_space_(FIRST_SPACE - 1) {}
+ : heap_(heap), current_space_(FIRST_MUTABLE_SPACE - 1) {}
SpaceIterator::~SpaceIterator() = default;
@@ -5247,7 +5391,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
bool SkipObject(HeapObject object) override {
- if (object->IsFiller()) return true;
+ if (object.IsFiller()) return true;
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
if (reachable_.count(chunk) == 0) return true;
return reachable_[chunk]->count(object) == 0;
@@ -5296,7 +5440,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
while (!marking_stack_.empty()) {
HeapObject obj = marking_stack_.back();
marking_stack_.pop_back();
- obj->Iterate(this);
+ obj.Iterate(this);
}
}
@@ -5417,13 +5561,13 @@ void Heap::ExternalStringTable::CleanUpYoung() {
Isolate* isolate = heap_->isolate();
for (size_t i = 0; i < young_strings_.size(); ++i) {
Object o = young_strings_[i];
- if (o->IsTheHole(isolate)) {
+ if (o.IsTheHole(isolate)) {
continue;
}
// The real external string is already in one of these vectors and was or
// will be processed. Re-processing it will add a duplicate to the vector.
- if (o->IsThinString()) continue;
- DCHECK(o->IsExternalString());
+ if (o.IsThinString()) continue;
+ DCHECK(o.IsExternalString());
if (InYoungGeneration(o)) {
young_strings_[last++] = o;
} else {
@@ -5439,13 +5583,13 @@ void Heap::ExternalStringTable::CleanUpAll() {
Isolate* isolate = heap_->isolate();
for (size_t i = 0; i < old_strings_.size(); ++i) {
Object o = old_strings_[i];
- if (o->IsTheHole(isolate)) {
+ if (o.IsTheHole(isolate)) {
continue;
}
// The real external string is already in one of these vectors and was or
// will be processed. Re-processing it will add a duplicate to the vector.
- if (o->IsThinString()) continue;
- DCHECK(o->IsExternalString());
+ if (o.IsThinString()) continue;
+ DCHECK(o.IsExternalString());
DCHECK(!InYoungGeneration(o));
old_strings_[last++] = o;
}
@@ -5461,14 +5605,14 @@ void Heap::ExternalStringTable::TearDown() {
for (size_t i = 0; i < young_strings_.size(); ++i) {
Object o = young_strings_[i];
// Dont finalize thin strings.
- if (o->IsThinString()) continue;
+ if (o.IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
}
young_strings_.clear();
for (size_t i = 0; i < old_strings_.size(); ++i) {
Object o = old_strings_[i];
// Dont finalize thin strings.
- if (o->IsThinString()) continue;
+ if (o.IsThinString()) continue;
heap_->FinalizeExternalString(ExternalString::cast(o));
}
old_strings_.clear();
@@ -5519,7 +5663,7 @@ void Heap::SetBuiltinsConstantsTable(FixedArray cache) {
}
void Heap::SetInterpreterEntryTrampolineForProfiling(Code code) {
- DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code->builtin_index());
+ DCHECK_EQ(Builtins::kInterpreterEntryTrampoline, code.builtin_index());
set_interpreter_entry_trampoline_for_profiling(code);
}
@@ -5527,12 +5671,12 @@ void Heap::AddDirtyJSFinalizationGroup(
JSFinalizationGroup finalization_group,
std::function<void(HeapObject object, ObjectSlot slot, Object target)>
gc_notify_updated_slot) {
- DCHECK(dirty_js_finalization_groups()->IsUndefined(isolate()) ||
- dirty_js_finalization_groups()->IsJSFinalizationGroup());
- DCHECK(finalization_group->next()->IsUndefined(isolate()));
- DCHECK(!finalization_group->scheduled_for_cleanup());
- finalization_group->set_scheduled_for_cleanup(true);
- finalization_group->set_next(dirty_js_finalization_groups());
+ DCHECK(dirty_js_finalization_groups().IsUndefined(isolate()) ||
+ dirty_js_finalization_groups().IsJSFinalizationGroup());
+ DCHECK(finalization_group.next().IsUndefined(isolate()));
+ DCHECK(!finalization_group.scheduled_for_cleanup());
+ finalization_group.set_scheduled_for_cleanup(true);
+ finalization_group.set_next(dirty_js_finalization_groups());
gc_notify_updated_slot(
finalization_group,
finalization_group.RawField(JSFinalizationGroup::kNextOffset),
@@ -5544,10 +5688,10 @@ void Heap::AddDirtyJSFinalizationGroup(
void Heap::AddKeepDuringJobTarget(Handle<JSReceiver> target) {
DCHECK(FLAG_harmony_weak_refs);
- DCHECK(weak_refs_keep_during_job()->IsUndefined() ||
- weak_refs_keep_during_job()->IsOrderedHashSet());
+ DCHECK(weak_refs_keep_during_job().IsUndefined() ||
+ weak_refs_keep_during_job().IsOrderedHashSet());
Handle<OrderedHashSet> table;
- if (weak_refs_keep_during_job()->IsUndefined(isolate())) {
+ if (weak_refs_keep_during_job().IsUndefined(isolate())) {
table = isolate()->factory()->NewOrderedHashSet();
} else {
table =
@@ -5607,17 +5751,17 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
size_t Heap::NumberOfNativeContexts() {
int result = 0;
Object context = native_contexts_list();
- while (!context->IsUndefined(isolate())) {
+ while (!context.IsUndefined(isolate())) {
++result;
Context native_context = Context::cast(context);
- context = native_context->next_context_link();
+ context = native_context.next_context_link();
}
return result;
}
size_t Heap::NumberOfDetachedContexts() {
// The detached_contexts() array has two entries per detached context.
- return detached_contexts()->length() / 2;
+ return detached_contexts().length() / 2;
}
void VerifyPointersVisitor::VisitPointers(HeapObject host, ObjectSlot start,
@@ -5639,8 +5783,8 @@ void VerifyPointersVisitor::VisitRootPointers(Root root,
}
void VerifyPointersVisitor::VerifyHeapObjectImpl(HeapObject heap_object) {
- CHECK(heap_->Contains(heap_object));
- CHECK(heap_object->map()->IsMap());
+ CHECK(IsValidHeapObject(heap_, heap_object));
+ CHECK(heap_object.map().IsMap());
}
template <typename TSlot>
@@ -5651,7 +5795,7 @@ void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
if (object.GetHeapObject(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
} else {
- CHECK(object->IsSmi() || object->IsCleared());
+ CHECK(object.IsSmi() || object.IsCleared());
}
}
}
@@ -5663,7 +5807,7 @@ void VerifyPointersVisitor::VerifyPointers(HeapObject host,
// to one of objects in DATA_ONLY_VISITOR_ID_LIST. You can fix
// this by moving that object to POINTER_VISITOR_ID_LIST.
DCHECK_EQ(ObjectFields::kMaybePointers,
- Map::ObjectFieldsFrom(host->map()->visitor_id()));
+ Map::ObjectFieldsFrom(host.map().visitor_id()));
VerifyPointersImpl(start, end);
}
@@ -5680,11 +5824,11 @@ void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
FullObjectSlot start,
FullObjectSlot end) {
for (FullObjectSlot current = start; current < end; ++current) {
- CHECK((*current)->IsSmi());
+ CHECK((*current).IsSmi());
}
}
-bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
+bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) {
// Object migration is governed by the following rules:
//
// 1) Objects in new-space can be migrated to the old space
@@ -5697,8 +5841,8 @@ bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
//
// Since this function is used for debugging only, we do not place
// asserts here, but check everything explicitly.
- if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
- InstanceType type = obj->map()->instance_type();
+ if (map == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
+ InstanceType type = map.instance_type();
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
AllocationSpace src = chunk->owner()->identity();
switch (src) {
@@ -5718,6 +5862,12 @@ bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
UNREACHABLE();
}
+size_t Heap::EmbedderAllocationCounter() const {
+ return local_embedder_heap_tracer()
+ ? local_embedder_heap_tracer()->accumulated_allocated_size()
+ : 0;
+}
+
void Heap::CreateObjectStats() {
if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
if (!live_object_stats_) {
@@ -5740,33 +5890,25 @@ void AllocationObserver::AllocationStep(int bytes_allocated,
DCHECK_GE(bytes_to_next_step_, 0);
}
-namespace {
-
-Map GcSafeMapOfCodeSpaceObject(HeapObject object) {
- MapWord map_word = object->map_word();
- return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
+Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
+ MapWord map_word = object.map_word();
+ return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress().map()
: map_word.ToMap();
}
-int GcSafeSizeOfCodeSpaceObject(HeapObject object) {
- return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
-}
-
-Code GcSafeCastToCode(Heap* heap, HeapObject object, Address inner_pointer) {
+Code Heap::GcSafeCastToCode(HeapObject object, Address inner_pointer) {
Code code = Code::unchecked_cast(object);
DCHECK(!code.is_null());
- DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
+ DCHECK(GcSafeCodeContains(code, inner_pointer));
return code;
}
-} // namespace
-
bool Heap::GcSafeCodeContains(Code code, Address addr) {
Map map = GcSafeMapOfCodeSpaceObject(code);
DCHECK(map == ReadOnlyRoots(this).code_map());
if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
- Address start = code->address();
- Address end = code->address() + code->SizeFromMap(map);
+ Address start = code.address();
+ Address end = code.address() + code.SizeFromMap(map);
return start <= addr && addr < end;
}
@@ -5777,7 +5919,7 @@ Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = code_lo_space()->FindPage(inner_pointer);
if (large_page != nullptr) {
- return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
+ return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
}
DCHECK(code_space()->Contains(inner_pointer));
@@ -5785,32 +5927,16 @@ Code Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
// Iterate through the page until we reach the end or find an object starting
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
- DCHECK_EQ(page->owner(), code_space());
- mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
-
- Address addr = page->skip_list()->StartFor(inner_pointer);
- Address top = code_space()->top();
- Address limit = code_space()->limit();
-
- while (true) {
- if (addr == top && addr != limit) {
- addr = limit;
- continue;
- }
- HeapObject obj = HeapObject::FromAddress(addr);
- int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
- Address next_addr = addr + obj_size;
- if (next_addr > inner_pointer) {
- return GcSafeCastToCode(this, obj, inner_pointer);
- }
- addr = next_addr;
- }
+ Address start =
+ page->GetCodeObjectRegistry()->GetCodeObjectStartFromInnerAddress(
+ inner_pointer);
+ return GcSafeCastToCode(HeapObject::FromAddress(start), inner_pointer);
}
void Heap::WriteBarrierForCodeSlow(Code code) {
- for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
- !it.done(); it.next()) {
+ for (RelocIterator it(code, RelocInfo::EmbeddedObjectModeMask()); !it.done();
+ it.next()) {
GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
MarkingBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
}
@@ -5842,16 +5968,104 @@ void Heap::EphemeronKeyWriteBarrierFromCode(Address raw_object,
if (!ObjectInYoungGeneration(table) && ObjectInYoungGeneration(key)) {
isolate->heap()->RecordEphemeronKeyWrite(table, key_slot_address);
}
- isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(table, key_slot,
- maybe_key);
+ isolate->heap()->incremental_marking()->RecordWrite(table, key_slot,
+ maybe_key);
}
-void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray array,
- int offset, int length) {
- for (int i = 0; i < length; i++) {
- if (!InYoungGeneration(array->get(offset + i))) continue;
- heap->store_buffer()->InsertEntry(
- array->RawFieldOfElementAt(offset + i).address());
+enum RangeWriteBarrierMode {
+ kDoGenerational = 1 << 0,
+ kDoMarking = 1 << 1,
+ kDoEvacuationSlotRecording = 1 << 2,
+};
+
+template <int kModeMask, typename TSlot>
+void Heap::WriteBarrierForRangeImpl(MemoryChunk* source_page, HeapObject object,
+ TSlot start_slot, TSlot end_slot) {
+ // At least one of generational or marking write barrier should be requested.
+ STATIC_ASSERT(kModeMask & (kDoGenerational | kDoMarking));
+ // kDoEvacuationSlotRecording implies kDoMarking.
+ STATIC_ASSERT(!(kModeMask & kDoEvacuationSlotRecording) ||
+ (kModeMask & kDoMarking));
+
+ StoreBuffer* store_buffer = this->store_buffer();
+ IncrementalMarking* incremental_marking = this->incremental_marking();
+ MarkCompactCollector* collector = this->mark_compact_collector();
+
+ for (TSlot slot = start_slot; slot < end_slot; ++slot) {
+ typename TSlot::TObject value = *slot;
+ HeapObject value_heap_object;
+ if (!value.GetHeapObject(&value_heap_object)) continue;
+
+ if ((kModeMask & kDoGenerational) &&
+ Heap::InYoungGeneration(value_heap_object)) {
+ store_buffer->InsertEntry(slot.address());
+ }
+
+ if ((kModeMask & kDoMarking) &&
+ incremental_marking->BaseRecordWrite(object, value_heap_object)) {
+ if (kModeMask & kDoEvacuationSlotRecording) {
+ collector->RecordSlot(source_page, HeapObjectSlot(slot),
+ value_heap_object);
+ }
+ }
+ }
+}
+
+// Instantiate Heap::WriteBarrierForRange() for ObjectSlot and MaybeObjectSlot.
+template void Heap::WriteBarrierForRange<ObjectSlot>(HeapObject object,
+ ObjectSlot start_slot,
+ ObjectSlot end_slot);
+template void Heap::WriteBarrierForRange<MaybeObjectSlot>(
+ HeapObject object, MaybeObjectSlot start_slot, MaybeObjectSlot end_slot);
+
+template <typename TSlot>
+void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
+ TSlot end_slot) {
+ MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
+ base::Flags<RangeWriteBarrierMode> mode;
+
+ if (!source_page->InYoungGeneration()) {
+ mode |= kDoGenerational;
+ }
+
+ if (incremental_marking()->IsMarking()) {
+ mode |= kDoMarking;
+ if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
+ mode |= kDoEvacuationSlotRecording;
+ }
+ }
+
+ switch (mode) {
+ // Nothing to be done.
+ case 0:
+ return;
+
+ // Generational only.
+ case kDoGenerational:
+ return WriteBarrierForRangeImpl<kDoGenerational>(source_page, object,
+ start_slot, end_slot);
+ // Marking, no evacuation slot recording.
+ case kDoMarking:
+ return WriteBarrierForRangeImpl<kDoMarking>(source_page, object,
+ start_slot, end_slot);
+ // Marking with evacuation slot recording.
+ case kDoMarking | kDoEvacuationSlotRecording:
+ return WriteBarrierForRangeImpl<kDoMarking | kDoEvacuationSlotRecording>(
+ source_page, object, start_slot, end_slot);
+
+ // Generational and marking, no evacuation slot recording.
+ case kDoGenerational | kDoMarking:
+ return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking>(
+ source_page, object, start_slot, end_slot);
+
+ // Generational and marking with evacuation slot recording.
+ case kDoGenerational | kDoMarking | kDoEvacuationSlotRecording:
+ return WriteBarrierForRangeImpl<kDoGenerational | kDoMarking |
+ kDoEvacuationSlotRecording>(
+ source_page, object, start_slot, end_slot);
+
+ default:
+ UNREACHABLE();
}
}
@@ -5867,7 +6081,10 @@ void Heap::GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo,
if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
} else {
- DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+ // Constant pools don't currently support compressed objects, as
+ // their values are all pointer sized (though this could change
+ // therefore we have a DCHECK).
+ DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
slot_type = OBJECT_SLOT;
}
}
@@ -5884,16 +6101,6 @@ void Heap::MarkingBarrierSlow(HeapObject object, Address slot,
value);
}
-void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject object) {
- IncrementalMarking::MarkingState* marking_state =
- heap->incremental_marking()->marking_state();
- if (!marking_state->IsBlack(object)) {
- marking_state->WhiteToGrey(object);
- marking_state->GreyToBlack(object);
- }
- heap->incremental_marking()->RevisitObject(object);
-}
-
void Heap::MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo,
HeapObject object) {
Heap* heap = Heap::FromWritableHeapObject(host);
@@ -5907,7 +6114,7 @@ void Heap::MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
DCHECK(heap->incremental_marking()->IsMarking());
DescriptorArray descriptor_array =
DescriptorArray::cast(raw_descriptor_array);
- int16_t raw_marked = descriptor_array->raw_number_of_marked_descriptors();
+ int16_t raw_marked = descriptor_array.raw_number_of_marked_descriptors();
if (NumberOfMarkedDescriptors::decode(heap->mark_compact_collector()->epoch(),
raw_marked) <
number_of_own_descriptors) {
@@ -5917,20 +6124,35 @@ void Heap::MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host,
}
bool Heap::PageFlagsAreConsistent(HeapObject object) {
- Heap* heap = Heap::FromWritableHeapObject(object);
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
heap_internals::MemoryChunk* slim_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
- const bool generation_consistency =
- chunk->owner()->identity() != NEW_SPACE ||
- (chunk->InYoungGeneration() && slim_chunk->InYoungGeneration());
- const bool marking_consistency =
- !heap->incremental_marking()->IsMarking() ||
- (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
- slim_chunk->IsMarking());
-
- return generation_consistency && marking_consistency;
+ // Slim chunk flags consistency.
+ CHECK_EQ(chunk->InYoungGeneration(), slim_chunk->InYoungGeneration());
+ CHECK_EQ(chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING),
+ slim_chunk->IsMarking());
+
+ Space* chunk_owner = chunk->owner();
+ AllocationSpace identity = chunk_owner->identity();
+
+ // Generation consistency.
+ CHECK_EQ(identity == NEW_SPACE || identity == NEW_LO_SPACE,
+ slim_chunk->InYoungGeneration());
+
+ // Marking consistency.
+ if (identity != RO_SPACE ||
+ static_cast<ReadOnlySpace*>(chunk->owner())->writable()) {
+ // RO_SPACE can be shared between heaps, so we can't use RO_SPACE objects to
+ // find a heap. The exception is when the ReadOnlySpace is writeable, during
+ // bootstrapping, so explicitly allow this case.
+ Heap* heap = Heap::FromWritableHeapObject(object);
+ CHECK_EQ(slim_chunk->IsMarking(), heap->incremental_marking()->IsMarking());
+ } else {
+ // Non-writable RO_SPACE must never have marking flag set.
+ CHECK(!slim_chunk->IsMarking());
+ }
+ return true;
}
static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 1725a9ad87..a242bd80d1 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -15,20 +15,20 @@
// Do not include anything from src/heap here!
#include "include/v8-internal.h"
#include "include/v8.h"
-#include "src/accessors.h"
-#include "src/allocation.h"
-#include "src/assert-scope.h"
#include "src/base/atomic-utils.h"
-#include "src/globals.h"
-#include "src/heap-symbols.h"
-#include "src/objects.h"
+#include "src/builtins/accessors.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
+#include "src/init/heap-symbols.h"
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
#include "src/objects/smi.h"
#include "src/objects/string-table.h"
-#include "src/roots.h"
-#include "src/visitors.h"
+#include "src/objects/visitors.h"
+#include "src/roots/roots.h"
+#include "src/utils/allocation.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
namespace v8 {
@@ -129,7 +129,8 @@ enum class GarbageCollectionReason {
kSamplingProfiler = 19,
kSnapshotCreator = 20,
kTesting = 21,
- kExternalFinalize = 22
+ kExternalFinalize = 22,
+ kGlobalAllocationLimit = 23,
// If you add new items here, then update the incremental_marking_reason,
// mark_compact_reason, and scavenge_reason counters in counters.h.
// Also update src/tools/metrics/histograms/histograms.xml in chromium.
@@ -159,12 +160,12 @@ class AllocationResult {
: object_(object) {
// AllocationResults can't return Smis, which are used to represent
// failure and the space to retry in.
- CHECK(!object->IsSmi());
+ CHECK(!object.IsSmi());
}
AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
- inline bool IsRetry() { return object_->IsSmi(); }
+ inline bool IsRetry() { return object_.IsSmi(); }
inline HeapObject ToObjectChecked();
inline AllocationSpace RetrySpace();
@@ -211,6 +212,8 @@ class Heap {
EphemeronRememberedSet ephemeron_remembered_set_;
enum FindMementoMode { kForRuntime, kForGC };
+ enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
+
enum HeapState {
NOT_IN_GC,
SCAVENGE,
@@ -235,8 +238,6 @@ class Heap {
};
using Reservation = std::vector<Chunk>;
- static const int kInitalOldGenerationLimitFactor = 2;
-
#if V8_OS_ANDROID
// Don't apply pointer multiplier on Android since it has no swap space and
// should instead adapt it's heap size based on available physical memory.
@@ -246,6 +247,9 @@ class Heap {
static const int kPointerMultiplier = i::kSystemPointerSize / 4;
#endif
+ static const size_t kMaxInitialOldGenerationSize =
+ 256 * MB * kPointerMultiplier;
+
// Semi-space size needs to be a multiple of page size.
static const size_t kMinSemiSpaceSizeInKB = 512 * kPointerMultiplier;
static const size_t kMaxSemiSpaceSizeInKB = 8192 * kPointerMultiplier;
@@ -351,6 +355,12 @@ class Heap {
// by pointer size.
static inline void CopyBlock(Address dst, Address src, int byte_size);
+ // Executes generational and/or marking write barrier for a [start, end) range
+ // of non-weak slots inside |object|.
+ template <typename TSlot>
+ V8_EXPORT_PRIVATE void WriteBarrierForRange(HeapObject object, TSlot start,
+ TSlot end);
+
V8_EXPORT_PRIVATE static void WriteBarrierForCodeSlow(Code host);
V8_EXPORT_PRIVATE static void GenerationalBarrierSlow(HeapObject object,
Address slot,
@@ -359,15 +369,11 @@ class Heap {
Address key_slot);
V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(
Address raw_object, Address address, Isolate* isolate);
- V8_EXPORT_PRIVATE static void GenerationalBarrierForElementsSlow(
- Heap* heap, FixedArray array, int offset, int length);
V8_EXPORT_PRIVATE static void GenerationalBarrierForCodeSlow(
Code host, RelocInfo* rinfo, HeapObject value);
V8_EXPORT_PRIVATE static void MarkingBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
- V8_EXPORT_PRIVATE static void MarkingBarrierForElementsSlow(
- Heap* heap, HeapObject object);
V8_EXPORT_PRIVATE static void MarkingBarrierForCodeSlow(Code host,
RelocInfo* rinfo,
HeapObject value);
@@ -389,14 +395,16 @@ class Heap {
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
- // Move len elements within a given array from src_index index to dst_index
- // index.
- void MoveElements(FixedArray array, int dst_index, int src_index, int len,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ // Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
+ // The source and destination memory ranges can overlap.
+ void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
+ ObjectSlot src_slot, int len, WriteBarrierMode mode);
- // Copy len elements from src_index of src array to dst_index of dst array.
- void CopyElements(FixedArray dst, FixedArray src, int dst_index,
- int src_index, int len, WriteBarrierMode mode);
+ // Copy len non-weak tagged elements from src_slot to dst_slot of dst_object.
+ // The source and destination memory ranges must not overlap.
+ template <typename TSlot>
+ void CopyRange(HeapObject dst_object, TSlot dst_slot, TSlot src_slot, int len,
+ WriteBarrierMode mode);
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
@@ -459,7 +467,7 @@ class Heap {
// Checks whether the given object is allowed to be migrated from it's
// current space into the given destination space. Used for debugging.
- bool AllowedToBeMigrated(HeapObject object, AllocationSpace dest);
+ bool AllowedToBeMigrated(Map map, HeapObject object, AllocationSpace dest);
void CheckHandleCount();
@@ -469,6 +477,9 @@ class Heap {
// Print short heap statistics.
void PrintShortHeapStatistics();
+ // Dump heap statistics in JSON format.
+ void DumpJSONHeapStatistics(std::stringstream& stream);
+
bool write_protect_code_memory() const { return write_protect_code_memory_; }
uintptr_t code_space_memory_modification_scope_depth() {
@@ -936,6 +947,8 @@ class Heap {
void SetEmbedderStackStateForNextFinalizaton(
EmbedderHeapTracer::EmbedderStackState stack_state);
+ EmbedderHeapTracer::TraceFlags flags_for_embedder_tracer() const;
+
// ===========================================================================
// External string table API. ================================================
// ===========================================================================
@@ -1137,6 +1150,8 @@ class Heap {
PromotedSinceLastGC();
}
+ size_t EmbedderAllocationCounter() const;
+
// This should be used only for testing.
void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
old_generation_allocation_counter_at_last_gc_ = new_value;
@@ -1168,6 +1183,8 @@ class Heap {
// Excludes external memory held by those objects.
V8_EXPORT_PRIVATE size_t OldGenerationSizeOfObjects();
+ V8_EXPORT_PRIVATE size_t GlobalSizeOfObjects();
+
// ===========================================================================
// Prologue/epilogue callback methods.========================================
// ===========================================================================
@@ -1275,10 +1292,21 @@ class Heap {
// Mostly useful for debugging.
bool GcSafeCodeContains(Code code, Address addr);
+ // Casts a heap object to a code object and checks if the inner_pointer is
+ // within the object.
+ Code GcSafeCastToCode(HeapObject object, Address inner_pointer);
+
+ // Returns the map of an object. Can be used during garbage collection, i.e.
+ // it supports a forwarded map. Fails if the map is not the code map.
+ Map GcSafeMapOfCodeSpaceObject(HeapObject object);
+
// =============================================================================
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
V8_EXPORT_PRIVATE void Verify();
+ // Verify the read-only heap after all read-only heap objects have been
+ // created.
+ void VerifyReadOnlyHeap();
void VerifyRememberedSetFor(HeapObject object);
#endif
@@ -1488,6 +1516,12 @@ class Heap {
V8_EXPORT_PRIVATE void ZapCodeObject(Address start_address,
int size_in_bytes);
+ // Range write barrier implementation.
+ template <int kModeMask, typename TSlot>
+ V8_INLINE void WriteBarrierForRangeImpl(MemoryChunk* source_page,
+ HeapObject object, TSlot start_slot,
+ TSlot end_slot);
+
// Deopts all code that contains allocation instruction which are tenured or
// not tenured. Moreover it clears the pretenuring allocation site statistics.
void ResetAllAllocationSitesDependentCode(AllocationType allocation);
@@ -1505,10 +1539,11 @@ class Heap {
void ConfigureInitialOldGenerationSize();
+ double ComputeMutatorUtilization(const char* tag, double mutator_speed,
+ double gc_speed);
bool HasLowYoungGenerationAllocationRate();
bool HasLowOldGenerationAllocationRate();
- double YoungGenerationMutatorUtilization();
- double OldGenerationMutatorUtilization();
+ bool HasLowEmbedderAllocationRate();
void ReduceNewSpaceSize();
@@ -1659,7 +1694,6 @@ class Heap {
// Growing strategy. =========================================================
// ===========================================================================
- HeapController* heap_controller() { return heap_controller_.get(); }
MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
// For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
@@ -1680,13 +1714,19 @@ class Heap {
bool ShouldExpandOldGenerationOnSlowAllocation();
- enum class HeapGrowingMode { kSlow, kConservative, kMinimal, kDefault };
-
HeapGrowingMode CurrentHeapGrowingMode();
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
IncrementalMarkingLimit IncrementalMarkingLimitReached();
+ bool UseGlobalMemoryScheduling() const {
+ return FLAG_global_gc_scheduling && local_embedder_heap_tracer();
+ }
+
+ size_t GlobalMemoryAvailable();
+
+ void RecomputeLimits(GarbageCollector collector);
+
// ===========================================================================
// Idle notification. ========================================================
// ===========================================================================
@@ -1780,6 +1820,11 @@ class Heap {
size_t max_semi_space_size_ = 8 * (kSystemPointerSize / 4) * MB;
size_t initial_semispace_size_ = kMinSemiSpaceSizeInKB * KB;
size_t max_old_generation_size_ = 700ul * (kSystemPointerSize / 4) * MB;
+ // TODO(mlippautz): Clarify whether this should be take some embedder
+ // configurable limit into account.
+ size_t max_global_memory_size_ =
+ Min(static_cast<uint64_t>(std::numeric_limits<size_t>::max()),
+ static_cast<uint64_t>(max_old_generation_size_) * 2);
size_t initial_max_old_generation_size_;
size_t initial_max_old_generation_size_threshold_;
size_t initial_old_generation_size_;
@@ -1888,6 +1933,7 @@ class Heap {
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
size_t old_generation_allocation_limit_;
+ size_t global_allocation_limit_;
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
@@ -1937,7 +1983,6 @@ class Heap {
std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
std::unique_ptr<MemoryAllocator> memory_allocator_;
std::unique_ptr<StoreBuffer> store_buffer_;
- std::unique_ptr<HeapController> heap_controller_;
std::unique_ptr<IncrementalMarking> incremental_marking_;
std::unique_ptr<ConcurrentMarking> concurrent_marking_;
std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
@@ -2037,7 +2082,6 @@ class Heap {
friend class ConcurrentMarking;
friend class GCCallbacksScope;
friend class GCTracer;
- friend class MemoryController;
friend class HeapIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
@@ -2205,13 +2249,7 @@ class VerifySmisVisitor : public RootVisitor {
// space in turn, and null when it is done.
class V8_EXPORT_PRIVATE PagedSpaces {
public:
- enum class SpacesSpecifier { kSweepablePagedSpaces, kAllPagedSpaces };
-
- explicit PagedSpaces(Heap* heap, SpacesSpecifier specifier =
- SpacesSpecifier::kSweepablePagedSpaces)
- : heap_(heap),
- counter_(specifier == SpacesSpecifier::kAllPagedSpaces ? RO_SPACE
- : OLD_SPACE) {}
+ explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_SPACE) {}
PagedSpace* next();
private:
@@ -2233,19 +2271,21 @@ class SpaceIterator : public Malloced {
int current_space_; // from enum AllocationSpace.
};
-
-// A HeapIterator provides iteration over the whole heap. It
-// aggregates the specific iterators for the different spaces as
-// these can only iterate over one space only.
+// A HeapIterator provides iteration over the entire non-read-only heap. It
+// aggregates the specific iterators for the different spaces as these can only
+// iterate over one space only.
+//
+// HeapIterator ensures there is no allocation during its lifetime (using an
+// embedded DisallowHeapAllocation instance).
//
-// HeapIterator ensures there is no allocation during its lifetime
-// (using an embedded DisallowHeapAllocation instance).
+// HeapIterator can skip free list nodes (that is, de-allocated heap objects
+// that still remain in the heap). As implementation of free nodes filtering
+// uses GC marks, it can't be used during MS/MC GC phases. Also, it is forbidden
+// to interrupt iteration in this mode, as this will leave heap objects marked
+// (and thus, unusable).
//
-// HeapIterator can skip free list nodes (that is, de-allocated heap
-// objects that still remain in the heap). As implementation of free
-// nodes filtering uses GC marks, it can't be used during MS/MC GC
-// phases. Also, it is forbidden to interrupt iteration in this mode,
-// as this will leave heap objects marked (and thus, unusable).
+// See ReadOnlyHeapIterator if you need to iterate over read-only space objects,
+// or CombinedHeapIterator if you need to iterate over both heaps.
class V8_EXPORT_PRIVATE HeapIterator {
public:
enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
@@ -2337,7 +2377,7 @@ class HeapObjectAllocationTracker {
template <typename T>
T ForwardingAddress(T heap_obj) {
- MapWord map_word = heap_obj->map_word();
+ MapWord map_word = heap_obj.map_word();
if (map_word.IsForwardingAddress()) {
return T::cast(map_word.ToForwardingAddress());
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index 7df67d3d27..325fb07182 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -7,10 +7,10 @@
#include "src/heap/incremental-marking.h"
+#include "src/execution/isolate.h"
#include "src/heap/mark-compact-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -33,24 +33,43 @@ void IncrementalMarking::TransferColor(HeapObject from, HeapObject to) {
}
}
-void IncrementalMarking::RecordWrite(HeapObject obj, ObjectSlot slot,
- Object value) {
- DCHECK_IMPLIES(slot.address() != kNullAddress, !HasWeakHeapObjectTag(*slot));
- DCHECK(!HasWeakHeapObjectTag(value));
- if (IsMarking() && value->IsHeapObject()) {
- RecordWriteSlow(obj, HeapObjectSlot(slot), HeapObject::cast(value));
+bool IncrementalMarking::BaseRecordWrite(HeapObject obj, HeapObject value) {
+ DCHECK(!marking_state()->IsImpossible(value));
+ DCHECK(!marking_state()->IsImpossible(obj));
+ // The write barrier stub generated with V8_CONCURRENT_MARKING does not
+ // check the color of the source object.
+ const bool need_recording =
+ V8_CONCURRENT_MARKING_BOOL || marking_state()->IsBlack(obj);
+
+ if (need_recording && WhiteToGreyAndPush(value)) {
+ RestartIfNotMarking();
}
+ return is_compacting_ && need_recording;
}
-void IncrementalMarking::RecordMaybeWeakWrite(HeapObject obj,
- MaybeObjectSlot slot,
- MaybeObject value) {
+template <typename TSlot>
+void IncrementalMarking::RecordWrite(HeapObject obj, TSlot slot,
+ typename TSlot::TObject value) {
+ static_assert(std::is_same<TSlot, ObjectSlot>::value ||
+ std::is_same<TSlot, MaybeObjectSlot>::value,
+ "Only ObjectSlot and MaybeObjectSlot are expected here");
+ DCHECK_NE(slot.address(), kNullAddress);
+ DCHECK_IMPLIES(!TSlot::kCanBeWeak, !HAS_WEAK_HEAP_OBJECT_TAG((*slot).ptr()));
+ DCHECK_IMPLIES(!TSlot::kCanBeWeak, !HAS_WEAK_HEAP_OBJECT_TAG(value.ptr()));
// When writing a weak reference, treat it as strong for the purposes of the
// marking barrier.
- HeapObject heap_object;
- if (IsMarking() && value->GetHeapObject(&heap_object)) {
- RecordWriteSlow(obj, HeapObjectSlot(slot), heap_object);
+ HeapObject value_heap_object;
+ if (IsMarking() && value.GetHeapObject(&value_heap_object)) {
+ RecordWriteSlow(obj, HeapObjectSlot(slot), value_heap_object);
+ }
+}
+
+bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
+ if (marking_state()->WhiteToGrey(obj)) {
+ marking_worklist()->Push(obj);
+ return true;
}
+ return false;
}
void IncrementalMarking::RestartIfNotMarking() {
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index fe90dafcfa..c6e607c3ea 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -5,13 +5,13 @@
#include "src/heap/incremental-marking-job.h"
#include "src/base/platform/time.h"
+#include "src/execution/isolate.h"
+#include "src/execution/vm-state-inl.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking.h"
-#include "src/isolate.h"
-#include "src/v8.h"
-#include "src/vm-state-inl.h"
+#include "src/init/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
index 277a3955c0..145f1dca64 100644
--- a/deps/v8/src/heap/incremental-marking-job.h
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_INCREMENTAL_MARKING_JOB_H_
#define V8_HEAP_INCREMENTAL_MARKING_JOB_H_
-#include "src/cancelable-task.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 9005fc3e57..4a901dc17a 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -4,8 +4,8 @@
#include "src/heap/incremental-marking.h"
-#include "src/compilation-cache.h"
-#include "src/conversions.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/vm-state-inl.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
@@ -17,15 +17,15 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/sweeper.h"
+#include "src/init/v8.h"
+#include "src/numbers/conversions.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/slots-inl.h"
+#include "src/objects/transitions-inl.h"
+#include "src/objects/visitors.h"
#include "src/tracing/trace-event.h"
-#include "src/transitions-inl.h"
-#include "src/v8.h"
-#include "src/visitors.h"
-#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -70,30 +70,11 @@ IncrementalMarking::IncrementalMarking(
SetState(STOPPED);
}
-bool IncrementalMarking::BaseRecordWrite(HeapObject obj, Object value) {
- HeapObject value_heap_obj = HeapObject::cast(value);
- DCHECK(!marking_state()->IsImpossible(value_heap_obj));
- DCHECK(!marking_state()->IsImpossible(obj));
-#ifdef V8_CONCURRENT_MARKING
- // The write barrier stub generated with V8_CONCURRENT_MARKING does not
- // check the color of the source object.
- const bool need_recording = true;
-#else
- const bool need_recording = marking_state()->IsBlack(obj);
-#endif
-
- if (need_recording && WhiteToGreyAndPush(value_heap_obj)) {
- RestartIfNotMarking();
- }
- return is_compacting_ && need_recording;
-}
-
void IncrementalMarking::RecordWriteSlow(HeapObject obj, HeapObjectSlot slot,
- Object value) {
+ HeapObject value) {
if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
// Object is not going to be rescanned we need to record the slot.
- heap_->mark_compact_collector()->RecordSlot(obj, slot,
- HeapObject::cast(value));
+ heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
}
}
@@ -102,8 +83,7 @@ int IncrementalMarking::RecordWriteFromCode(Address raw_obj,
Isolate* isolate) {
HeapObject obj = HeapObject::cast(Object(raw_obj));
MaybeObjectSlot slot(slot_address);
- isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(obj, slot,
- *slot);
+ isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
}
@@ -117,14 +97,6 @@ void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
}
}
-bool IncrementalMarking::WhiteToGreyAndPush(HeapObject obj) {
- if (marking_state()->WhiteToGrey(obj)) {
- marking_worklist()->Push(obj);
- return true;
- }
- return false;
-}
-
void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
HeapObject obj) {
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
@@ -150,7 +122,7 @@ void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
MarkBlackAndVisitObjectDueToLayoutChange(from);
DCHECK(marking_state()->IsBlack(from));
// Mark the new address as black.
- if (from->address() + kTaggedSize == to->address()) {
+ if (from.address() + kTaggedSize == to.address()) {
// The old and the new markbits overlap. The |to| object has the
// grey color. To make it black, we need to set the second bit.
DCHECK(new_mark_bit.Get<kAtomicity>());
@@ -182,7 +154,7 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
private:
void MarkObjectByPointer(FullObjectSlot p) {
Object obj = *p;
- if (!obj->IsHeapObject()) return;
+ if (!obj.IsHeapObject()) return;
heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
}
@@ -385,7 +357,8 @@ void IncrementalMarking::StartMarking() {
// marking (including write barriers) is fully set up.
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
- heap_->local_embedder_heap_tracer()->TracePrologue();
+ heap_->local_embedder_heap_tracer()->TracePrologue(
+ heap_->flags_for_embedder_tracer());
}
}
@@ -451,8 +424,8 @@ bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
// The map has aged. Do not retain this map.
return false;
}
- Object constructor = map->GetConstructor();
- if (!constructor->IsHeapObject() ||
+ Object constructor = map.GetConstructor();
+ if (!constructor.IsHeapObject() ||
marking_state()->IsWhite(HeapObject::cast(constructor))) {
// The constructor is dead, no new objects with this map can
// be created. Do not retain this map.
@@ -469,18 +442,18 @@ void IncrementalMarking::RetainMaps() {
bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
FLAG_retain_maps_for_n_gc == 0;
WeakArrayList retained_maps = heap()->retained_maps();
- int length = retained_maps->length();
+ int length = retained_maps.length();
// The number_of_disposed_maps separates maps in the retained_maps
// array that were created before and after context disposal.
// We do not age and retain disposed maps to avoid memory leaks.
int number_of_disposed_maps = heap()->number_of_disposed_maps_;
for (int i = 0; i < length; i += 2) {
- MaybeObject value = retained_maps->Get(i);
+ MaybeObject value = retained_maps.Get(i);
HeapObject map_heap_object;
if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
continue;
}
- int age = retained_maps->Get(i + 1).ToSmi().value();
+ int age = retained_maps.Get(i + 1).ToSmi().value();
int new_age;
Map map = Map::cast(map_heap_object);
if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
@@ -488,8 +461,8 @@ void IncrementalMarking::RetainMaps() {
if (ShouldRetainMap(map, age)) {
WhiteToGreyAndPush(map);
}
- Object prototype = map->prototype();
- if (age > 0 && prototype->IsHeapObject() &&
+ Object prototype = map.prototype();
+ if (age > 0 && prototype.IsHeapObject() &&
marking_state()->IsWhite(HeapObject::cast(prototype))) {
// The prototype is not marked, age the map.
new_age = age - 1;
@@ -503,7 +476,7 @@ void IncrementalMarking::RetainMaps() {
}
// Compact the array and update the age.
if (new_age != age) {
- retained_maps->Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
+ retained_maps.Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
}
}
}
@@ -554,10 +527,10 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
#endif
filler_map, minor_marking_state](
HeapObject obj, HeapObject* out) -> bool {
- DCHECK(obj->IsHeapObject());
+ DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
- MapWord map_word = obj->map_word();
+ MapWord map_word = obj.map_word();
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist anymore,
// e.g. left trimmed objects or objects from the root set (frames).
@@ -567,7 +540,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
return false;
}
HeapObject dest = map_word.ToForwardingAddress();
- DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
+ DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFiller());
*out = dest;
return true;
} else if (Heap::InToPage(obj)) {
@@ -595,10 +568,10 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
*out = obj;
return true;
}
- DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
+ DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFiller());
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
- if (obj->map() != filler_map) {
+ if (obj.map() != filler_map) {
*out = obj;
return true;
}
@@ -696,12 +669,6 @@ void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
bytes_marked_ -= Min(bytes_marked_, dead_bytes_in_new_space);
}
-bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject obj) {
- if (!obj->IsFixedArray()) return false;
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
- return chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR);
-}
-
int IncrementalMarking::VisitObject(Map map, HeapObject obj) {
DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
if (!marking_state()->GreyToBlack(obj)) {
@@ -714,9 +681,9 @@ int IncrementalMarking::VisitObject(Map map, HeapObject obj) {
// 4. The object is materizalized by the deoptimizer.
// 5. The object is a descriptor array marked black by
// the descriptor array marking barrier.
- DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
- obj->IsFixedArray() || obj->IsContext() || obj->IsJSObject() ||
- obj->IsString() || obj->IsDescriptorArray());
+ DCHECK(obj.IsHashTable() || obj.IsPropertyArray() || obj.IsFixedArray() ||
+ obj.IsContext() || obj.IsJSObject() || obj.IsString() ||
+ obj.IsDescriptorArray());
}
DCHECK(marking_state()->IsBlack(obj));
WhiteToGreyAndPush(map);
@@ -734,12 +701,10 @@ void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
void IncrementalMarking::RevisitObject(HeapObject obj) {
DCHECK(IsMarking());
DCHECK(marking_state()->IsBlack(obj));
- Page* page = Page::FromHeapObject(obj);
- if (page->owner()->identity() == LO_SPACE ||
- page->owner()->identity() == NEW_LO_SPACE) {
- page->ResetProgressBar();
- }
- Map map = obj->map();
+ DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->IsFlagSet(
+ MemoryChunk::HAS_PROGRESS_BAR),
+ 0u == MemoryChunk::FromHeapObject(obj)->ProgressBar());
+ Map map = obj.map();
WhiteToGreyAndPush(map);
IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
marking_state());
@@ -766,20 +731,20 @@ intptr_t IncrementalMarking::ProcessMarkingWorklist(
if (obj.is_null()) break;
// Left trimming may result in grey or black filler objects on the marking
// worklist. Ignore these objects.
- if (obj->IsFiller()) {
+ if (obj.IsFiller()) {
// Due to copying mark bits and the fact that grey and black have their
// first bit set, one word fillers are always black.
DCHECK_IMPLIES(
- obj->map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
+ obj.map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
marking_state()->IsBlack(obj));
// Other fillers may be black or grey depending on the color of the object
// that was trimmed.
DCHECK_IMPLIES(
- obj->map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
+ obj.map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
marking_state()->IsBlackOrGrey(obj));
continue;
}
- bytes_processed += VisitObject(obj->map(), obj);
+ bytes_processed += VisitObject(obj.map(), obj);
}
return bytes_processed;
}
@@ -961,8 +926,8 @@ void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Scheduled %" PRIuS
- "KB to mark based on time delta %.1fms\n",
+ "[IncrementalMarking] Scheduled %zuKB to mark based on time delta "
+ "%.1fms\n",
bytes_to_mark / KB, delta_ms);
}
}
@@ -1063,9 +1028,8 @@ void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Scheduled %" PRIuS
- "KB to mark based on allocation (progress="
- "%" PRIuS "KB, allocation=%" PRIuS "KB)\n",
+ "[IncrementalMarking] Scheduled %zuKB to mark based on allocation "
+ "(progress=%zuKB, allocation=%zuKB)\n",
bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
}
}
@@ -1083,7 +1047,7 @@ void IncrementalMarking::FetchBytesMarkedConcurrently() {
}
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Marked %" PRIuS "KB on background threads\n",
+ "[IncrementalMarking] Marked %zuKB on background threads\n",
heap_->concurrent_marking()->TotalMarkedBytes() / KB);
}
}
@@ -1094,11 +1058,11 @@ size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
if (FLAG_trace_incremental_marking) {
if (scheduled_bytes_to_mark_ > bytes_marked_) {
heap_->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Marker is %" PRIuS "KB behind schedule\n",
+ "[IncrementalMarking] Marker is %zuKB behind schedule\n",
(scheduled_bytes_to_mark_ - bytes_marked_) / KB);
} else {
heap_->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Marker is %" PRIuS "KB ahead of schedule\n",
+ "[IncrementalMarking] Marker is %zuKB ahead of schedule\n",
(bytes_marked_ - scheduled_bytes_to_mark_) / KB);
}
}
@@ -1206,7 +1170,7 @@ StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
- "[IncrementalMarking] Step %s %" PRIuS "KB (%" PRIuS "KB) in %.1f\n",
+ "[IncrementalMarking] Step %s %zuKB (%zuKB) in %.1f\n",
step_origin == StepOrigin::kV8 ? "in v8" : "in task",
bytes_processed / KB, bytes_to_process / KB, duration);
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index f3f0703bd1..7284034191 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -5,10 +5,10 @@
#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
#define V8_HEAP_INCREMENTAL_MARKING_H_
-#include "src/cancelable-task.h"
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-job.h"
#include "src/heap/mark-compact.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace internal {
@@ -184,7 +184,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool ShouldDoEmbedderStep();
StepResult EmbedderStep(double duration);
- inline void RestartIfNotMarking();
+ V8_INLINE void RestartIfNotMarking();
// {raw_obj} and {slot_address} are raw Address values instead of a
// HeapObject and a MaybeObjectSlot because this is called from
@@ -198,22 +198,22 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// No slots in white objects should be recorded, as some slots are typed and
// cannot be interpreted correctly if the underlying object does not survive
// the incremental cycle (stays white).
- V8_INLINE bool BaseRecordWrite(HeapObject obj, Object value);
- V8_INLINE void RecordWrite(HeapObject obj, ObjectSlot slot, Object value);
- V8_INLINE void RecordMaybeWeakWrite(HeapObject obj, MaybeObjectSlot slot,
- MaybeObject value);
+ V8_INLINE bool BaseRecordWrite(HeapObject obj, HeapObject value);
+ template <typename TSlot>
+ V8_INLINE void RecordWrite(HeapObject obj, TSlot slot,
+ typename TSlot::TObject value);
void RevisitObject(HeapObject obj);
// Ensures that all descriptors int range [0, number_of_own_descripts)
// are visited.
void VisitDescriptors(HeapObject host, DescriptorArray array,
int number_of_own_descriptors);
- void RecordWriteSlow(HeapObject obj, HeapObjectSlot slot, Object value);
+ void RecordWriteSlow(HeapObject obj, HeapObjectSlot slot, HeapObject value);
void RecordWriteIntoCode(Code host, RelocInfo* rinfo, HeapObject value);
// Returns true if the function succeeds in transitioning the object
// from white to grey.
- bool WhiteToGreyAndPush(HeapObject obj);
+ V8_INLINE bool WhiteToGreyAndPush(HeapObject obj);
// This function is used to color the object black before it undergoes an
// unsafe layout change. This is a part of synchronization protocol with
@@ -285,8 +285,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION);
- V8_INLINE bool IsFixedArrayWithProgressBar(HeapObject object);
-
// Visits the object and returns its size.
V8_INLINE int VisitObject(Map map, HeapObject obj);
diff --git a/deps/v8/src/heap/invalidated-slots-inl.h b/deps/v8/src/heap/invalidated-slots-inl.h
index 583d443eda..58f6ac9bc8 100644
--- a/deps/v8/src/heap/invalidated-slots-inl.h
+++ b/deps/v8/src/heap/invalidated-slots-inl.h
@@ -7,12 +7,12 @@
#include <map>
-#include "src/allocation.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
-#include "src/objects-body-descriptors-inl.h"
-#include "src/objects-body-descriptors.h"
-#include "src/objects.h"
+#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/objects/objects-body-descriptors.h"
+#include "src/objects/objects.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -28,8 +28,8 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
++iterator_;
if (iterator_ != iterator_end_) {
// Invalidated ranges must not overlap.
- DCHECK_LE(invalidated_end_, iterator_->first->address());
- invalidated_start_ = iterator_->first->address();
+ DCHECK_LE(invalidated_end_, iterator_->first.address());
+ invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
invalidated_object_ = HeapObject();
invalidated_object_size_ = 0;
@@ -47,9 +47,9 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
// Ask the object if the slot is valid.
if (invalidated_object_.is_null()) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
- DCHECK(!invalidated_object_->IsFiller());
+ DCHECK(!invalidated_object_.IsFiller());
invalidated_object_size_ =
- invalidated_object_->SizeFromMap(invalidated_object_->map());
+ invalidated_object_.SizeFromMap(invalidated_object_.map());
}
int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0);
@@ -59,7 +59,7 @@ bool InvalidatedSlotsFilter::IsValid(Address slot) {
if (offset >= invalidated_object_size_) {
return slots_in_free_space_are_valid_;
}
- return invalidated_object_->IsValidSlot(invalidated_object_->map(), offset);
+ return invalidated_object_.IsValidSlot(invalidated_object_.map(), offset);
}
} // namespace internal
diff --git a/deps/v8/src/heap/invalidated-slots.cc b/deps/v8/src/heap/invalidated-slots.cc
index a5b835441b..368d189c55 100644
--- a/deps/v8/src/heap/invalidated-slots.cc
+++ b/deps/v8/src/heap/invalidated-slots.cc
@@ -24,7 +24,7 @@ InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) {
- invalidated_start_ = iterator_->first->address();
+ invalidated_start_ = iterator_->first.address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
diff --git a/deps/v8/src/heap/invalidated-slots.h b/deps/v8/src/heap/invalidated-slots.h
index 0480086a3a..4098595fe4 100644
--- a/deps/v8/src/heap/invalidated-slots.h
+++ b/deps/v8/src/heap/invalidated-slots.h
@@ -8,10 +8,10 @@
#include <map>
#include <stack>
-#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/objects/heap-object.h"
-#include "src/utils.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/item-parallel-job.cc b/deps/v8/src/heap/item-parallel-job.cc
index 17a9f04e79..1945e3275a 100644
--- a/deps/v8/src/heap/item-parallel-job.cc
+++ b/deps/v8/src/heap/item-parallel-job.cc
@@ -5,8 +5,8 @@
#include "src/heap/item-parallel-job.h"
#include "src/base/platform/semaphore.h"
-#include "src/counters.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/logging/counters.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
index 6639ea1ef5..54f09b87b5 100644
--- a/deps/v8/src/heap/item-parallel-job.h
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -11,8 +11,8 @@
#include "src/base/atomic-utils.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/cancelable-task.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
diff --git a/deps/v8/src/heap/local-allocator-inl.h b/deps/v8/src/heap/local-allocator-inl.h
index b8f0bdc5b5..71dcd98778 100644
--- a/deps/v8/src/heap/local-allocator-inl.h
+++ b/deps/v8/src/heap/local-allocator-inl.h
@@ -26,7 +26,6 @@ AllocationResult LocalAllocator::Allocate(AllocationSpace space,
->AllocateRaw(object_size, alignment);
default:
UNREACHABLE();
- break;
}
}
@@ -42,14 +41,13 @@ void LocalAllocator::FreeLast(AllocationSpace space, HeapObject object,
default:
// Only new and old space supported.
UNREACHABLE();
- break;
}
}
void LocalAllocator::FreeLastInNewSpace(HeapObject object, int object_size) {
if (!new_space_lab_.TryFreeLast(object, object_size)) {
// We couldn't free the last object so we have to write a proper filler.
- heap_->CreateFillerObjectAt(object->address(), object_size,
+ heap_->CreateFillerObjectAt(object.address(), object_size,
ClearRecordedSlots::kNo);
}
}
@@ -57,7 +55,7 @@ void LocalAllocator::FreeLastInNewSpace(HeapObject object, int object_size) {
void LocalAllocator::FreeLastInOldSpace(HeapObject object, int object_size) {
if (!compaction_spaces_.Get(OLD_SPACE)->TryFreeLast(object, object_size)) {
// We couldn't free the last object so we have to write a proper filler.
- heap_->CreateFillerObjectAt(object->address(), object_size,
+ heap_->CreateFillerObjectAt(object.address(), object_size,
ClearRecordedSlots::kNo);
}
}
diff --git a/deps/v8/src/heap/local-allocator.h b/deps/v8/src/heap/local-allocator.h
index ad99f07fac..7019a79f21 100644
--- a/deps/v8/src/heap/local-allocator.h
+++ b/deps/v8/src/heap/local-allocator.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_LOCAL_ALLOCATOR_H_
#define V8_HEAP_LOCAL_ALLOCATOR_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 0b12330b5b..cf6d96cef8 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -7,8 +7,8 @@
#include "src/heap/mark-compact.h"
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
+#include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/objects-visiting-inl.h"
@@ -16,7 +16,7 @@
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/slots-inl.h"
-#include "src/transitions.h"
+#include "src/objects/transitions.h"
namespace v8 {
namespace internal {
@@ -24,9 +24,9 @@ namespace internal {
template <typename ConcreteState, AccessMode access_mode>
bool MarkingStateBase<ConcreteState, access_mode>::GreyToBlack(HeapObject obj) {
MemoryChunk* p = MemoryChunk::FromHeapObject(obj);
- MarkBit markbit = MarkBitFrom(p, obj->address());
+ MarkBit markbit = MarkBitFrom(p, obj.address());
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
- static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj->Size());
+ static_cast<ConcreteState*>(this)->IncrementLiveBytes(p, obj.Size());
return true;
}
@@ -60,7 +60,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
BytecodeArray::BodyDescriptor::IterateBody(map, array, size, this);
if (!heap_->is_current_gc_forced()) {
- array->MakeOlder();
+ array.MakeOlder();
}
return size;
}
@@ -71,9 +71,8 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitDescriptorArray(Map map,
DescriptorArray array) {
int size = DescriptorArray::BodyDescriptor::SizeOf(map, array);
- VisitPointers(array, array->GetFirstPointerSlot(),
- array->GetDescriptorSlot(0));
- VisitDescriptors(array, array->number_of_descriptors());
+ VisitPointers(array, array.GetFirstPointerSlot(), array.GetDescriptorSlot(0));
+ VisitDescriptors(array, array.number_of_descriptors());
return size;
}
@@ -86,7 +85,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
// If the SharedFunctionInfo has old bytecode, mark it as flushable,
// otherwise visit the function data field strongly.
- if (shared_info->ShouldFlushBytecode(Heap::GetBytecodeFlushMode())) {
+ if (shared_info.ShouldFlushBytecode(Heap::GetBytecodeFlushMode())) {
collector_->AddBytecodeFlushingCandidate(shared_info);
} else {
VisitPointer(shared_info,
@@ -102,7 +101,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
int size = Parent::VisitJSFunction(map, object);
// Check if the JSFunction needs reset due to bytecode being flushed.
- if (FLAG_flush_bytecode && object->NeedsResetDueToFlushedBytecode()) {
+ if (FLAG_flush_bytecode && object.NeedsResetDueToFlushedBytecode()) {
collector_->AddFlushedJSFunction(object);
}
@@ -168,14 +167,14 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
collector_->AddEphemeronHashTable(table);
- for (int i = 0; i < table->Capacity(); i++) {
+ for (int i = 0; i < table.Capacity(); i++) {
ObjectSlot key_slot =
- table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
- HeapObject key = HeapObject::cast(table->KeyAt(i));
+ table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
+ HeapObject key = HeapObject::cast(table.KeyAt(i));
collector_->RecordSlot(table, key_slot, key);
ObjectSlot value_slot =
- table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
+ table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
if (marking_state()->IsBlackOrGrey(key)) {
VisitPointer(table, value_slot);
@@ -183,7 +182,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
} else {
Object value_obj = *value_slot;
- if (value_obj->IsHeapObject()) {
+ if (value_obj.IsHeapObject()) {
HeapObject value = HeapObject::cast(value_obj);
collector_->RecordSlot(table, value_slot, value);
@@ -196,7 +195,7 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
}
}
- return table->SizeFromMap(map);
+ return table.SizeFromMap(map);
}
template <FixedArrayVisitationMode fixed_array_mode,
@@ -204,7 +203,7 @@ template <FixedArrayVisitationMode fixed_array_mode,
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitMap(Map meta_map, Map map) {
int size = Map::BodyDescriptor::SizeOf(meta_map, map);
- if (map->CanTransition()) {
+ if (map.CanTransition()) {
// Maps that can transition share their descriptor arrays and require
// special visiting logic to avoid memory leaks.
// Since descriptor arrays are potentially shared, ensure that only the
@@ -212,12 +211,11 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode,
// non-empty descriptor array is marked, its header is also visited. The
// slot holding the descriptor array will be implicitly recorded when the
// pointer fields of this map are visited.
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors();
MarkDescriptorArrayBlack(map, descriptors);
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors) {
- DCHECK_LE(number_of_own_descriptors,
- descriptors->number_of_descriptors());
+ DCHECK_LE(number_of_own_descriptors, descriptors.number_of_descriptors());
VisitDescriptors(descriptors, number_of_own_descriptors);
}
// Mark the pointer fields of the Map. Since the transitions array has
@@ -243,8 +241,8 @@ template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitJSWeakRef(Map map, JSWeakRef weak_ref) {
- if (weak_ref->target()->IsHeapObject()) {
- HeapObject target = HeapObject::cast(weak_ref->target());
+ if (weak_ref.target().IsHeapObject()) {
+ HeapObject target = HeapObject::cast(weak_ref.target());
if (marking_state()->IsBlackOrGrey(target)) {
// Record the slot inside the JSWeakRef, since the IterateBody below
// won't visit it.
@@ -265,8 +263,8 @@ template <FixedArrayVisitationMode fixed_array_mode,
TraceRetainingPathMode retaining_path_mode, typename MarkingState>
int MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitWeakCell(Map map, WeakCell weak_cell) {
- if (weak_cell->target()->IsHeapObject()) {
- HeapObject target = HeapObject::cast(weak_cell->target());
+ if (weak_cell.target().IsHeapObject()) {
+ HeapObject target = HeapObject::cast(weak_cell.target());
if (marking_state()->IsBlackOrGrey(target)) {
// Record the slot inside the WeakCell, since the IterateBody below
// won't visit it.
@@ -331,11 +329,11 @@ template <FixedArrayVisitationMode fixed_array_mode,
void MarkingVisitor<fixed_array_mode, retaining_path_mode,
MarkingState>::VisitEmbeddedPointer(Code host,
RelocInfo* rinfo) {
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object = HeapObject::cast(rinfo->target_object());
collector_->RecordRelocSlot(host, rinfo, object);
if (!marking_state()->IsBlackOrGrey(object)) {
- if (host->IsWeakObject(object)) {
+ if (host.IsWeakObject(object)) {
collector_->AddWeakObjectInCode(object, host);
} else {
MarkObject(host, object);
@@ -368,8 +366,8 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
}
}
if (marking_state()->GreyToBlack(descriptors)) {
- VisitPointers(descriptors, descriptors->GetFirstPointerSlot(),
- descriptors->GetDescriptorSlot(0));
+ VisitPointers(descriptors, descriptors.GetFirstPointerSlot(),
+ descriptors.GetDescriptorSlot(0));
}
DCHECK(marking_state()->IsBlack(descriptors));
}
@@ -398,24 +396,14 @@ int MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
DCHECK(FLAG_use_marking_progress_bar);
DCHECK(heap_->IsLargeObject(object));
size_t current_progress_bar = chunk->ProgressBar();
- if (current_progress_bar == 0) {
- // Try to move the progress bar forward to start offset. This solves the
- // problem of not being able to observe a progress bar reset when
- // processing the first kProgressBarScanningChunk.
- if (!chunk->TrySetProgressBar(0,
- FixedArray::BodyDescriptor::kStartOffset))
- return 0;
- current_progress_bar = FixedArray::BodyDescriptor::kStartOffset;
- }
int start = static_cast<int>(current_progress_bar);
+ if (start == 0) start = FixedArray::BodyDescriptor::kStartOffset;
int end = Min(size, start + kProgressBarScanningChunk);
if (start < end) {
VisitPointers(object, object.RawField(start), object.RawField(end));
- // Setting the progress bar can fail if the object that is currently
- // scanned is also revisited. In this case, there may be two tasks racing
- // on the progress counter. The looser can bail out because the progress
- // bar is reset before the tasks race on the object.
- if (chunk->TrySetProgressBar(current_progress_bar, end) && (end < size)) {
+ bool success = chunk->TrySetProgressBar(current_progress_bar, end);
+ CHECK(success);
+ if (end < size) {
DCHECK(marking_state()->IsBlack(object));
// The object can be pushed back onto the marking worklist only after
// progress bar was updated.
@@ -439,12 +427,12 @@ void MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>::
// descriptor arrays.
DCHECK(marking_state()->IsBlack(descriptors));
int16_t new_marked = static_cast<int16_t>(number_of_own_descriptors);
- int16_t old_marked = descriptors->UpdateNumberOfMarkedDescriptors(
+ int16_t old_marked = descriptors.UpdateNumberOfMarkedDescriptors(
mark_compact_epoch_, new_marked);
if (old_marked < new_marked) {
VisitPointers(descriptors,
- MaybeObjectSlot(descriptors->GetDescriptorSlot(old_marked)),
- MaybeObjectSlot(descriptors->GetDescriptorSlot(new_marked)));
+ MaybeObjectSlot(descriptors.GetDescriptorSlot(old_marked)),
+ MaybeObjectSlot(descriptors.GetDescriptorSlot(new_marked)));
}
}
@@ -493,14 +481,22 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target) {
- Page* target_page = Page::FromHeapObject(target);
- Page* source_page = Page::FromHeapObject(object);
+ MemoryChunk* target_page = MemoryChunk::FromHeapObject(target);
+ MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>() &&
!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address());
}
}
+void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
+ HeapObjectSlot slot, HeapObject target) {
+ MemoryChunk* target_page = MemoryChunk::FromHeapObject(target);
+ if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
+ RememberedSet<OLD_TO_OLD>::Insert(source_page, slot.address());
+ }
+}
+
void MarkCompactCollector::AddTransitionArray(TransitionArray array) {
weak_objects_.transition_arrays.Push(kMainThread, array);
}
@@ -569,7 +565,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// last word is a one word filler, we are not allowed to advance. In
// that case we can return immediately.
if (!it_.Advance()) {
- DCHECK(HeapObject::FromAddress(addr)->map() == one_word_filler_map_);
+ DCHECK(HeapObject::FromAddress(addr).map() == one_word_filler_map_);
current_object_ = HeapObject();
return;
}
@@ -586,7 +582,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
map = Map::cast(ObjectSlot(addr).Acquire_Load());
- size = black_object->SizeFromMap(map);
+ size = black_object.SizeFromMap(map);
Address end = addr + size - kTaggedSize;
// One word filler objects do not borrow the second mark bit. We have
// to jump over the advancing and clearing part.
@@ -614,7 +610,7 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
} else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
map = Map::cast(ObjectSlot(addr).Acquire_Load());
object = HeapObject::FromAddress(addr);
- size = object->SizeFromMap(map);
+ size = object.SizeFromMap(map);
}
// We found a live object.
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 2c119d9593..03be1100b1 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -7,12 +7,12 @@
#include <unordered_map>
#include "src/base/utils/random-number-generator.h"
-#include "src/cancelable-task.h"
-#include "src/compilation-cache.h"
-#include "src/deoptimizer.h"
-#include "src/execution.h"
-#include "src/frames-inl.h"
-#include "src/global-handles.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/handles/global-handles.h"
#include "src/heap/array-buffer-collector.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/gc-tracer.h"
@@ -23,20 +23,21 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/read-only-heap.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/sweeper.h"
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
+#include "src/init/v8.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/maybe-object.h"
#include "src/objects/slots-inl.h"
-#include "src/transitions-inl.h"
-#include "src/utils-inl.h"
-#include "src/v8.h"
-#include "src/vm-state-inl.h"
+#include "src/objects/transitions-inl.h"
+#include "src/tasks/cancelable-task.h"
+#include "src/utils/utils-inl.h"
namespace v8 {
namespace internal {
@@ -111,11 +112,11 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
object = HeapObject::FromAddress(current);
// One word fillers at the end of a black area can be grey.
if (IsBlackOrGrey(object) &&
- object->map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
+ object.map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
CHECK(IsMarked(object));
CHECK(current >= next_object_must_be_here_or_later);
- object->Iterate(this);
- next_object_must_be_here_or_later = current + object->Size();
+ object.Iterate(this);
+ next_object_must_be_here_or_later = current + object.Size();
// The object is either part of a black area of black allocation or a
// regular black object
CHECK(
@@ -158,7 +159,7 @@ void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
LargeObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
- obj->Iterate(this);
+ obj.Iterate(this);
}
}
}
@@ -213,8 +214,8 @@ class FullMarkingVerifier : public MarkingVerifier {
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- if (!host->IsWeakObject(rinfo->target_object())) {
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
+ if (!host.IsWeakObject(rinfo->target_object())) {
HeapObject object = rinfo->target_object();
VerifyHeapObjectImpl(object);
}
@@ -283,8 +284,8 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
Address current = start;
while (current < end) {
HeapObject object = HeapObject::FromAddress(current);
- if (!object->IsFiller()) object->Iterate(this);
- current += object->Size();
+ if (!object.IsFiller()) object.Iterate(this);
+ current += object.Size();
}
}
@@ -511,8 +512,8 @@ void MarkCompactCollector::CollectGarbage() {
}
#ifdef VERIFY_HEAP
-void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
- HeapObjectIterator iterator(space);
+void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
+ ReadOnlyHeapIterator iterator(space);
for (HeapObject object = iterator.Next(); !object.is_null();
object = iterator.Next()) {
CHECK(non_atomic_marking_state()->IsBlack(object));
@@ -731,8 +732,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if (FLAG_trace_fragmentation_verbose) {
PrintIsolate(isolate(),
"compaction-selection-page: space=%s free_bytes_page=%zu "
- "fragmentation_limit_kb=%" PRIuS
- " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
+ "fragmentation_limit_kb=%zu "
+ "fragmentation_limit_percent=%d sum_compaction_kb=%zu "
"compaction_limit_kb=%zu\n",
space->name(), free_bytes / KB, free_bytes_threshold / KB,
target_fragmentation_percent, total_live_bytes / KB,
@@ -796,7 +797,8 @@ void MarkCompactCollector::Prepare() {
if (!was_marked_incrementally_) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
- heap_->local_embedder_heap_tracer()->TracePrologue();
+ heap_->local_embedder_heap_tracer()->TracePrologue(
+ heap_->flags_for_embedder_tracer());
}
// Don't start compaction if we are in the middle of incremental
@@ -851,8 +853,6 @@ void MarkCompactCollector::VerifyMarking() {
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
- epoch_++;
-
#ifdef DEBUG
heap()->VerifyCountersBeforeConcurrentSweeping();
#endif
@@ -905,7 +905,7 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
private:
V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
- if (!(*p)->IsHeapObject()) return;
+ if (!(*p).IsHeapObject()) return;
collector_->MarkRootObject(root, HeapObject::cast(*p));
}
@@ -956,7 +956,7 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
private:
V8_INLINE void MarkObject(HeapObject host, Object object) {
- if (!object->IsHeapObject()) return;
+ if (!object.IsHeapObject()) return;
collector_->MarkObject(host, HeapObject::cast(object));
}
@@ -976,7 +976,7 @@ class InternalizedStringTableCleaner : public ObjectVisitor {
heap_->mark_compact_collector()->non_atomic_marking_state();
for (ObjectSlot p = start; p < end; ++p) {
Object o = *p;
- if (o->IsHeapObject()) {
+ if (o.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
pointers_removed_++;
@@ -1024,14 +1024,14 @@ class ExternalStringTableCleaner : public RootVisitor {
Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
for (FullObjectSlot p = start; p < end; ++p) {
Object o = *p;
- if (o->IsHeapObject()) {
+ if (o.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(o);
if (marking_state->IsWhite(heap_object)) {
- if (o->IsExternalString()) {
+ if (o.IsExternalString()) {
heap_->FinalizeExternalString(String::cast(o));
} else {
// The original external string may have been internalized.
- DCHECK(o->IsThinString());
+ DCHECK(o.IsThinString());
}
// Set the entry to the_hole_value (as deleted).
p.store(the_hole);
@@ -1057,18 +1057,18 @@ class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
DCHECK(!marking_state_->IsGrey(heap_object));
if (marking_state_->IsBlack(heap_object)) {
return object;
- } else if (object->IsAllocationSite() &&
- !(AllocationSite::cast(object)->IsZombie())) {
+ } else if (object.IsAllocationSite() &&
+ !(AllocationSite::cast(object).IsZombie())) {
// "dead" AllocationSites need to live long enough for a traversal of new
// space. These sites get a one-time reprieve.
Object nested = object;
- while (nested->IsAllocationSite()) {
+ while (nested.IsAllocationSite()) {
AllocationSite current_site = AllocationSite::cast(nested);
// MarkZombie will override the nested_site, read it first before
// marking
- nested = current_site->nested_site();
- current_site->MarkZombie();
+ nested = current_site.nested_site();
+ current_site.MarkZombie();
marking_state_->WhiteToBlack(current_site);
}
@@ -1117,7 +1117,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
ObjectSlot value) override {
- DCHECK(host->IsEphemeronHashTable());
+ DCHECK(host.IsEphemeronHashTable());
DCHECK(!Heap::InYoungGeneration(host));
VisitPointer(host, value);
@@ -1144,7 +1144,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
DCHECK_EQ(host, rinfo->host());
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
HeapObject object = HeapObject::cast(rinfo->target_object());
GenerationalBarrierForCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
@@ -1196,7 +1196,7 @@ class ProfilingMigrationObserver final : public MigrationObserver {
inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) final {
- if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
+ if (dest == CODE_SPACE || (dest == OLD_SPACE && dst.IsBytecodeArray())) {
PROFILE(heap_->isolate(),
CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
}
@@ -1227,9 +1227,9 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
template <MigrationMode mode>
static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
HeapObject src, int size, AllocationSpace dest) {
- Address dst_addr = dst->address();
- Address src_addr = src->address();
- DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
+ Address dst_addr = dst.address();
+ Address src_addr = src.address();
+ DCHECK(base->heap_->AllowedToBeMigrated(src.map(), src, dest));
DCHECK_NE(dest, LO_SPACE);
DCHECK_NE(dest, CODE_LO_SPACE);
if (dest == OLD_SPACE) {
@@ -1238,14 +1238,14 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
base->heap_->CopyBlock(dst_addr, src_addr, size);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
+ dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
base->heap_->CopyBlock(dst_addr, src_addr, size);
- Code::cast(dst)->Relocate(dst_addr - src_addr);
+ Code::cast(dst).Relocate(dst_addr - src_addr);
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
- dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
+ dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1253,7 +1253,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
- src->set_map_word(MapWord::FromForwardingAddress(dst));
+ src.set_map_word(MapWord::FromForwardingAddress(dst));
}
EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
@@ -1269,12 +1269,15 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
#ifdef VERIFY_HEAP
if (AbortCompactionForTesting(object)) return false;
#endif // VERIFY_HEAP
- AllocationAlignment alignment =
- HeapObject::RequiredAlignment(object->map());
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
AllocationResult allocation =
local_allocator_->Allocate(target_space, size, alignment);
if (allocation.To(target_object)) {
MigrateObject(*target_object, object, size, target_space);
+ if (target_space == CODE_SPACE)
+ MemoryChunk::FromHeapObject(*target_object)
+ ->GetCodeObjectRegistry()
+ ->RegisterNewlyAllocatedCodeObject((*target_object).address());
return true;
}
return false;
@@ -1297,7 +1300,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (FLAG_stress_compaction) {
const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
kPageAlignmentMask & ~kObjectAlignmentMask;
- if ((object->ptr() & kPageAlignmentMask) == mask) {
+ if ((object.ptr() & kPageAlignmentMask) == mask) {
Page* page = Page::FromHeapObject(object);
if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
@@ -1334,12 +1337,12 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline bool Visit(HeapObject object, int size) override {
if (TryEvacuateWithoutCopy(object)) return true;
HeapObject target_object;
- if (heap_->ShouldBePromoted(object->address()) &&
+ if (heap_->ShouldBePromoted(object.address()) &&
TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
promoted_size_ += size;
return true;
}
- heap_->UpdateAllocationSite(object->map(), object,
+ heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
HeapObject target;
AllocationSpace space = AllocateTargetObject(object, size, &target);
@@ -1355,13 +1358,13 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline bool TryEvacuateWithoutCopy(HeapObject object) {
if (is_incremental_marking_) return false;
- Map map = object->map();
+ Map map = object.map();
// Some objects can be evacuated without creating a copy.
- if (map->visitor_id() == kVisitThinString) {
- HeapObject actual = ThinString::cast(object)->unchecked_actual();
+ if (map.visitor_id() == kVisitThinString) {
+ HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- object->map_slot().Relaxed_Store(
+ object.map_slot().Relaxed_Store(
MapWord::FromForwardingAddress(actual).ToMap());
return true;
}
@@ -1373,7 +1376,7 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
HeapObject* target_object) {
AllocationAlignment alignment =
- HeapObject::RequiredAlignment(old_object->map());
+ HeapObject::RequiredAlignment(old_object.map());
AllocationSpace space_allocated_in = NEW_SPACE;
AllocationResult allocation =
local_allocator_->Allocate(NEW_SPACE, size, alignment);
@@ -1434,10 +1437,10 @@ class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject object, int size) override {
if (mode == NEW_TO_NEW) {
- heap_->UpdateAllocationSite(object->map(), object,
+ heap_->UpdateAllocationSite(object.map(), object,
local_pretenuring_feedback_);
} else if (mode == NEW_TO_OLD) {
- object->IterateBodyFast(record_visitor_);
+ object.IterateBodyFast(record_visitor_);
}
return true;
}
@@ -1462,7 +1465,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner()->identity(),
object, size, &target_object)) {
- DCHECK(object->map_word().IsForwardingAddress());
+ DCHECK(object.map_word().IsForwardingAddress());
return true;
}
return false;
@@ -1476,7 +1479,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
inline bool Visit(HeapObject object, int size) override {
RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
&heap_->ephemeron_remembered_set_);
- object->IterateBodyFast(&visitor);
+ object.IterateBodyFast(&visitor);
return true;
}
@@ -1486,7 +1489,7 @@ class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
Object o = *p;
- if (!o->IsHeapObject()) return false;
+ if (!o.IsHeapObject()) return false;
HeapObject heap_object = HeapObject::cast(o);
return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
heap_object);
@@ -1498,7 +1501,7 @@ void MarkCompactCollector::MarkStringTable(
// Mark the string table itself.
if (marking_state()->WhiteToBlack(string_table)) {
// Explicitly mark the prefix.
- string_table->IteratePrefix(custom_root_body_visitor);
+ string_table.IteratePrefix(custom_root_body_visitor);
}
}
@@ -1704,20 +1707,20 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
while (!(object = marking_worklist()->Pop()).is_null()) {
// Left trimming may result in grey or black filler objects on the marking
// worklist. Ignore these objects.
- if (object->IsFiller()) {
+ if (object.IsFiller()) {
// Due to copying mark bits and the fact that grey and black have their
// first bit set, one word fillers are always black.
DCHECK_IMPLIES(
- object->map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
+ object.map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
marking_state()->IsBlack(object));
// Other fillers may be black or grey depending on the color of the object
// that was trimmed.
DCHECK_IMPLIES(
- object->map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
+ object.map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
marking_state()->IsBlackOrGrey(object));
continue;
}
- DCHECK(object->IsHeapObject());
+ DCHECK(object.IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(!(marking_state()->IsWhite(object)));
marking_state()->GreyToBlack(object);
@@ -1725,7 +1728,7 @@ void MarkCompactCollector::ProcessMarkingWorklistInternal() {
kTrackNewlyDiscoveredObjects) {
AddNewlyDiscovered(object);
}
- Map map = object->map();
+ Map map = object.map();
MarkObject(object, map);
visitor.Visit(map, object);
}
@@ -1766,8 +1769,8 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code code = it.frame()->LookupCode();
- if (!code->CanDeoptAt(it.frame()->pc())) {
- Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
+ if (!code.CanDeoptAt(it.frame()->pc())) {
+ Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
}
return;
}
@@ -1918,6 +1921,8 @@ void MarkCompactCollector::MarkLiveObjects() {
if (was_marked_incrementally_) {
heap()->incremental_marking()->Deactivate();
}
+
+ epoch_++;
}
void MarkCompactCollector::ClearNonLiveReferences() {
@@ -1931,8 +1936,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
// table is marked.
StringTable string_table = heap()->string_table();
InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
- string_table->IterateElements(&internalized_visitor);
- string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
+ string_table.IterateElements(&internalized_visitor);
+ string_table.ElementsRemoved(internalized_visitor.PointersRemoved());
ExternalStringTableCleaner external_visitor(heap());
heap()->external_string_table_.IterateAll(&external_visitor);
@@ -1988,21 +1993,21 @@ void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
HeapObject object = weak_object_in_code.first;
Code code = weak_object_in_code.second;
if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
- !code->embedded_objects_cleared()) {
- if (!code->marked_for_deoptimization()) {
- code->SetMarkedForDeoptimization("weak objects");
+ !code.embedded_objects_cleared()) {
+ if (!code.marked_for_deoptimization()) {
+ code.SetMarkedForDeoptimization("weak objects");
have_code_to_deoptimize_ = true;
}
- code->ClearEmbeddedObjects(heap_);
- DCHECK(code->embedded_objects_cleared());
+ code.ClearEmbeddedObjects(heap_);
+ DCHECK(code.embedded_objects_cleared());
}
}
}
void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
- Object potential_parent = dead_target->constructor_or_backpointer();
- if (potential_parent->IsMap()) {
+ Object potential_parent = dead_target.constructor_or_backpointer();
+ if (potential_parent.IsMap()) {
Map parent = Map::cast(potential_parent);
DisallowHeapAllocation no_gc_obviously;
if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
@@ -2015,29 +2020,29 @@ void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
Map dead_target) {
- DCHECK(!map->is_prototype_map());
- DCHECK(!dead_target->is_prototype_map());
- DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
+ DCHECK(!map.is_prototype_map());
+ DCHECK(!dead_target.is_prototype_map());
+ DCHECK_EQ(map.raw_transitions(), HeapObjectReference::Weak(dead_target));
// Take ownership of the descriptor array.
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
- DescriptorArray descriptors = map->instance_descriptors();
- if (descriptors == dead_target->instance_descriptors() &&
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
+ DescriptorArray descriptors = map.instance_descriptors();
+ if (descriptors == dead_target.instance_descriptors() &&
number_of_own_descriptors > 0) {
TrimDescriptorArray(map, descriptors);
- DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
}
}
void MarkCompactCollector::FlushBytecodeFromSFI(
SharedFunctionInfo shared_info) {
- DCHECK(shared_info->HasBytecodeArray());
+ DCHECK(shared_info.HasBytecodeArray());
// Retain objects required for uncompiled data.
- String inferred_name = shared_info->inferred_name();
- int start_position = shared_info->StartPosition();
- int end_position = shared_info->EndPosition();
+ String inferred_name = shared_info.inferred_name();
+ int start_position = shared_info.StartPosition();
+ int end_position = shared_info.EndPosition();
- shared_info->DiscardCompiledMetadata(
+ shared_info.DiscardCompiledMetadata(
isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
RecordSlot(object, slot, target);
});
@@ -2048,9 +2053,9 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
UncompiledDataWithoutPreparseData::kSize);
// Replace bytecode array with an uncompiled data array.
- HeapObject compiled_data = shared_info->GetBytecodeArray();
- Address compiled_data_start = compiled_data->address();
- int compiled_data_size = compiled_data->Size();
+ HeapObject compiled_data = shared_info.GetBytecodeArray();
+ Address compiled_data_start = compiled_data.address();
+ int compiled_data_size = compiled_data.Size();
MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
// Clear any recorded slots for the compiled data as being invalid.
@@ -2063,14 +2068,14 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// Swap the map, using set_map_after_allocation to avoid verify heap checks
// which are not necessary since we are doing this during the GC atomic pause.
- compiled_data->set_map_after_allocation(
+ compiled_data.set_map_after_allocation(
ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
SKIP_WRITE_BARRIER);
// Create a filler object for any left over space in the bytecode array.
if (!heap()->IsLargeObject(compiled_data)) {
heap()->CreateFillerObjectAt(
- compiled_data->address() + UncompiledDataWithoutPreparseData::kSize,
+ compiled_data.address() + UncompiledDataWithoutPreparseData::kSize,
compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
ClearRecordedSlots::kNo);
}
@@ -2091,8 +2096,8 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
// Use the raw function data setter to avoid validity checks, since we're
// performing the unusual task of decompiling.
- shared_info->set_function_data(uncompiled_data);
- DCHECK(!shared_info->is_compiled());
+ shared_info.set_function_data(uncompiled_data);
+ DCHECK(!shared_info.is_compiled());
}
void MarkCompactCollector::ClearOldBytecodeCandidates() {
@@ -2104,7 +2109,7 @@ void MarkCompactCollector::ClearOldBytecodeCandidates() {
// If the BytecodeArray is dead, flush it, which will replace the field with
// an uncompiled data object.
if (!non_atomic_marking_state()->IsBlackOrGrey(
- flushing_candidate->GetBytecodeArray())) {
+ flushing_candidate.GetBytecodeArray())) {
FlushBytecodeFromSFI(flushing_candidate);
}
@@ -2121,26 +2126,25 @@ void MarkCompactCollector::ClearFlushedJsFunctions() {
JSFunction flushed_js_function;
while (weak_objects_.flushed_js_functions.Pop(kMainThread,
&flushed_js_function)) {
- flushed_js_function->ResetIfBytecodeFlushed();
+ flushed_js_function.ResetIfBytecodeFlushed();
}
}
void MarkCompactCollector::ClearFullMapTransitions() {
TransitionArray array;
while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
- int num_transitions = array->number_of_entries();
+ int num_transitions = array.number_of_entries();
if (num_transitions > 0) {
Map map;
// The array might contain "undefined" elements because it's not yet
// filled. Allow it.
- if (array->GetTargetIfExists(0, isolate(), &map)) {
+ if (array.GetTargetIfExists(0, isolate(), &map)) {
DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
- Map parent = Map::cast(map->constructor_or_backpointer());
+ Map parent = Map::cast(map.constructor_or_backpointer());
bool parent_is_alive =
non_atomic_marking_state()->IsBlackOrGrey(parent);
- DescriptorArray descriptors = parent_is_alive
- ? parent->instance_descriptors()
- : DescriptorArray();
+ DescriptorArray descriptors =
+ parent_is_alive ? parent.instance_descriptors() : DescriptorArray();
bool descriptors_owner_died =
CompactTransitionArray(parent, array, descriptors);
if (descriptors_owner_died) {
@@ -2154,30 +2158,30 @@ void MarkCompactCollector::ClearFullMapTransitions() {
bool MarkCompactCollector::CompactTransitionArray(Map map,
TransitionArray transitions,
DescriptorArray descriptors) {
- DCHECK(!map->is_prototype_map());
- int num_transitions = transitions->number_of_entries();
+ DCHECK(!map.is_prototype_map());
+ int num_transitions = transitions.number_of_entries();
bool descriptors_owner_died = false;
int transition_index = 0;
// Compact all live transitions to the left.
for (int i = 0; i < num_transitions; ++i) {
- Map target = transitions->GetTarget(i);
- DCHECK_EQ(target->constructor_or_backpointer(), map);
+ Map target = transitions.GetTarget(i);
+ DCHECK_EQ(target.constructor_or_backpointer(), map);
if (non_atomic_marking_state()->IsWhite(target)) {
if (!descriptors.is_null() &&
- target->instance_descriptors() == descriptors) {
- DCHECK(!target->is_prototype_map());
+ target.instance_descriptors() == descriptors) {
+ DCHECK(!target.is_prototype_map());
descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
- Name key = transitions->GetKey(i);
- transitions->SetKey(transition_index, key);
- HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
+ Name key = transitions.GetKey(i);
+ transitions.SetKey(transition_index, key);
+ HeapObjectSlot key_slot = transitions.GetKeySlot(transition_index);
RecordSlot(transitions, key_slot, key);
- MaybeObject raw_target = transitions->GetRawTarget(i);
- transitions->SetRawTarget(transition_index, raw_target);
+ MaybeObject raw_target = transitions.GetRawTarget(i);
+ transitions.SetRawTarget(transition_index, raw_target);
HeapObjectSlot target_slot =
- transitions->GetTargetSlot(transition_index);
+ transitions.GetTargetSlot(transition_index);
RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
}
transition_index++;
@@ -2192,23 +2196,23 @@ bool MarkCompactCollector::CompactTransitionArray(Map map,
// such that number_of_transitions() == 0. If this assumption changes,
// TransitionArray::Insert() will need to deal with the case that a transition
// array disappeared during GC.
- int trim = transitions->Capacity() - transition_index;
+ int trim = transitions.Capacity() - transition_index;
if (trim > 0) {
heap_->RightTrimWeakFixedArray(transitions,
trim * TransitionArray::kEntrySize);
- transitions->SetNumberOfTransitions(transition_index);
+ transitions.SetNumberOfTransitions(transition_index);
}
return descriptors_owner_died;
}
void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
int descriptors_to_trim) {
- int old_nof_all_descriptors = array->number_of_all_descriptors();
+ int old_nof_all_descriptors = array.number_of_all_descriptors();
int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
DCHECK_LT(0, descriptors_to_trim);
DCHECK_LE(0, new_nof_all_descriptors);
- Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
- Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
+ Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
+ Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
start, end,
SlotSet::PREFREE_EMPTY_BUCKETS);
@@ -2217,52 +2221,52 @@ void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
SlotSet::PREFREE_EMPTY_BUCKETS);
heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
ClearRecordedSlots::kNo);
- array->set_number_of_all_descriptors(new_nof_all_descriptors);
+ array.set_number_of_all_descriptors(new_nof_all_descriptors);
}
void MarkCompactCollector::TrimDescriptorArray(Map map,
DescriptorArray descriptors) {
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) {
DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
return;
}
// TODO(ulan): Trim only if slack is greater than some percentage threshold.
int to_trim =
- descriptors->number_of_all_descriptors() - number_of_own_descriptors;
+ descriptors.number_of_all_descriptors() - number_of_own_descriptors;
if (to_trim > 0) {
- descriptors->set_number_of_descriptors(number_of_own_descriptors);
+ descriptors.set_number_of_descriptors(number_of_own_descriptors);
RightTrimDescriptorArray(descriptors, to_trim);
TrimEnumCache(map, descriptors);
- descriptors->Sort();
+ descriptors.Sort();
if (FLAG_unbox_double_fields) {
- LayoutDescriptor layout_descriptor = map->layout_descriptor();
- layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
- number_of_own_descriptors);
- SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
+ LayoutDescriptor layout_descriptor = map.layout_descriptor();
+ layout_descriptor = layout_descriptor.Trim(heap_, map, descriptors,
+ number_of_own_descriptors);
+ SLOW_DCHECK(layout_descriptor.IsConsistentWithMap(map, true));
}
}
- DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
- map->set_owns_descriptors(true);
+ DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
+ map.set_owns_descriptors(true);
}
void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
- int live_enum = map->EnumLength();
+ int live_enum = map.EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
- live_enum = map->NumberOfEnumerableProperties();
+ live_enum = map.NumberOfEnumerableProperties();
}
- if (live_enum == 0) return descriptors->ClearEnumCache();
- EnumCache enum_cache = descriptors->enum_cache();
+ if (live_enum == 0) return descriptors.ClearEnumCache();
+ EnumCache enum_cache = descriptors.enum_cache();
- FixedArray keys = enum_cache->keys();
- int to_trim = keys->length() - live_enum;
+ FixedArray keys = enum_cache.keys();
+ int to_trim = keys.length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(keys, to_trim);
- FixedArray indices = enum_cache->indices();
- to_trim = indices->length() - live_enum;
+ FixedArray indices = enum_cache.indices();
+ to_trim = indices.length() - live_enum;
if (to_trim <= 0) return;
heap_->RightTrimFixedArray(indices, to_trim);
}
@@ -2272,19 +2276,19 @@ void MarkCompactCollector::ClearWeakCollections() {
EphemeronHashTable table;
while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
- for (int i = 0; i < table->Capacity(); i++) {
- HeapObject key = HeapObject::cast(table->KeyAt(i));
+ for (int i = 0; i < table.Capacity(); i++) {
+ HeapObject key = HeapObject::cast(table.KeyAt(i));
#ifdef VERIFY_HEAP
- Object value = table->ValueAt(i);
+ Object value = table.ValueAt(i);
- if (value->IsHeapObject()) {
+ if (value.IsHeapObject()) {
CHECK_IMPLIES(
non_atomic_marking_state()->IsBlackOrGrey(key),
non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value)));
}
#endif
if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
- table->RemoveEntry(i);
+ table.RemoveEntry(i);
}
}
}
@@ -2309,12 +2313,12 @@ void MarkCompactCollector::ClearWeakReferences() {
// as MaybeObjectSlot.
MaybeObjectSlot location(slot.second);
if ((*location)->GetHeapObjectIfWeak(&value)) {
- DCHECK(!value->IsCell());
+ DCHECK(!value.IsCell());
if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
// The value of the weak reference is alive.
RecordSlot(slot.first, HeapObjectSlot(location), value);
} else {
- if (value->IsMap()) {
+ if (value.IsMap()) {
// The map is non-live.
ClearPotentialSimpleMapTransition(Map::cast(value));
}
@@ -2330,9 +2334,9 @@ void MarkCompactCollector::ClearJSWeakRefs() {
}
JSWeakRef weak_ref;
while (weak_objects_.js_weak_refs.Pop(kMainThread, &weak_ref)) {
- HeapObject target = HeapObject::cast(weak_ref->target());
+ HeapObject target = HeapObject::cast(weak_ref.target());
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
- weak_ref->set_target(ReadOnlyRoots(isolate()).undefined_value());
+ weak_ref.set_target(ReadOnlyRoots(isolate()).undefined_value());
} else {
// The value of the JSWeakRef is alive.
ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
@@ -2341,17 +2345,17 @@ void MarkCompactCollector::ClearJSWeakRefs() {
}
WeakCell weak_cell;
while (weak_objects_.weak_cells.Pop(kMainThread, &weak_cell)) {
- HeapObject target = HeapObject::cast(weak_cell->target());
+ HeapObject target = HeapObject::cast(weak_cell.target());
if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
- DCHECK(!target->IsUndefined());
+ DCHECK(!target.IsUndefined());
// The value of the WeakCell is dead.
JSFinalizationGroup finalization_group =
- JSFinalizationGroup::cast(weak_cell->finalization_group());
- if (!finalization_group->scheduled_for_cleanup()) {
+ JSFinalizationGroup::cast(weak_cell.finalization_group());
+ if (!finalization_group.scheduled_for_cleanup()) {
heap()->AddDirtyJSFinalizationGroup(
finalization_group,
[](HeapObject object, ObjectSlot slot, Object target) {
- if (target->IsHeapObject()) {
+ if (target.IsHeapObject()) {
RecordSlot(object, slot, HeapObject::cast(target));
}
});
@@ -2359,14 +2363,14 @@ void MarkCompactCollector::ClearJSWeakRefs() {
// We're modifying the pointers in WeakCell and JSFinalizationGroup during
// GC; thus we need to record the slots it writes. The normal write
// barrier is not enough, since it's disabled before GC.
- weak_cell->Nullify(isolate(),
- [](HeapObject object, ObjectSlot slot, Object target) {
- if (target->IsHeapObject()) {
- RecordSlot(object, slot, HeapObject::cast(target));
- }
- });
- DCHECK(finalization_group->NeedsCleanup());
- DCHECK(finalization_group->scheduled_for_cleanup());
+ weak_cell.Nullify(isolate(),
+ [](HeapObject object, ObjectSlot slot, Object target) {
+ if (target.IsHeapObject()) {
+ RecordSlot(object, slot, HeapObject::cast(target));
+ }
+ });
+ DCHECK(finalization_group.NeedsCleanup());
+ DCHECK(finalization_group.scheduled_for_cleanup());
} else {
// The value of the WeakCell is alive.
ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
@@ -2411,7 +2415,9 @@ MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
if (RelocInfo::IsCodeTargetMode(rmode)) {
slot_type = CODE_ENTRY_SLOT;
} else {
- DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+ // Constant pools don't support compressed values at this time
+ // (this may change, therefore use a DCHECK).
+ DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
slot_type = OBJECT_SLOT;
}
}
@@ -2488,7 +2494,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
std::is_same<TSlot, FullMaybeObjectSlot>::value ||
std::is_same<TSlot, MaybeObjectSlot>::value,
"Only [Full]ObjectSlot and [Full]MaybeObjectSlot are expected here");
- MapWord map_word = heap_obj->map_word();
+ MapWord map_word = heap_obj.map_word();
if (map_word.IsForwardingAddress()) {
DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
@@ -2504,7 +2510,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
DCHECK(!Heap::InFromPage(target));
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
} else {
- DCHECK(heap_obj->map()->IsMap());
+ DCHECK(heap_obj.map().IsMap());
}
// OLD_TO_OLD slots are always removed after updating.
return REMOVE_SLOT;
@@ -2525,8 +2531,8 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot) {
template <AccessMode access_mode, typename TSlot>
static inline SlotCallbackResult UpdateStrongSlot(TSlot slot) {
- DCHECK(!HasWeakHeapObjectTag((*slot).ptr()));
typename TSlot::TObject obj = slot.Relaxed_Load();
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
HeapObject heap_obj;
if (obj.GetHeapObject(&heap_obj)) {
return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
@@ -2608,16 +2614,16 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
- MapWord map_word = HeapObject::cast(*p)->map_word();
+ MapWord map_word = HeapObject::cast(*p).map_word();
if (map_word.IsForwardingAddress()) {
String new_string = String::cast(map_word.ToForwardingAddress());
- if (new_string->IsExternalString()) {
+ if (new_string.IsExternalString()) {
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kExternalString,
Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
- ExternalString::cast(new_string)->ExternalPayloadSize());
+ ExternalString::cast(new_string).ExternalPayloadSize());
}
return new_string;
}
@@ -3024,9 +3030,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
Object RetainAs(Object object) override {
- if (object->IsHeapObject()) {
+ if (object.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(object);
- MapWord map_word = heap_object->map_word();
+ MapWord map_word = heap_object.map_word();
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
@@ -3057,7 +3063,7 @@ bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
if (iteration_mode == kClearMarkbits) {
marking_state->bitmap(chunk)->ClearRange(
chunk->AddressToMarkbitIndex(chunk->area_start()),
- chunk->AddressToMarkbitIndex(object->address()));
+ chunk->AddressToMarkbitIndex(object.address()));
*failed_object = object;
}
return false;
@@ -3079,7 +3085,7 @@ void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
if (chunk->IsLargePage()) {
HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
if (marking_state->IsBlack(object)) {
- const bool success = visitor->Visit(object, object->Size());
+ const bool success = visitor->Visit(object, object.Size());
USE(success);
DCHECK(success);
}
@@ -3108,7 +3114,7 @@ void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
if (chunk->IsLargePage()) {
HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
if (marking_state->IsGrey(object)) {
- const bool success = visitor->Visit(object, object->Size());
+ const bool success = visitor->Visit(object, object.Size());
USE(success);
DCHECK(success);
}
@@ -3184,11 +3190,6 @@ void MarkCompactCollector::Evacuate() {
new_space_evacuation_pages_.clear();
for (Page* p : old_space_evacuation_pages_) {
- // Important: skip list should be cleared only after roots were updated
- // because root iteration traverses the stack and might have to find
- // code objects from non-updated pc pointing into evacuation candidate.
- SkipList* list = p->skip_list();
- if (list != nullptr) list->Clear();
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
@@ -3265,9 +3266,9 @@ class ToSpaceUpdatingItem : public UpdatingItem {
PointersUpdatingVisitor visitor;
for (Address cur = start_; cur < end_;) {
HeapObject object = HeapObject::FromAddress(cur);
- Map map = object->map();
- int size = object->SizeFromMap(map);
- object->IterateBodyFast(map, size, &visitor);
+ Map map = object.map();
+ int size = object.SizeFromMap(map);
+ object.IterateBodyFast(map, size, &visitor);
cur += size;
}
}
@@ -3280,7 +3281,7 @@ class ToSpaceUpdatingItem : public UpdatingItem {
PointersUpdatingVisitor visitor;
for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
chunk_, marking_state_->bitmap(chunk_))) {
- object_and_size.first->IterateBodyFast(&visitor);
+ object_and_size.first.IterateBodyFast(&visitor);
}
}
@@ -3324,7 +3325,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return REMOVE_SLOT;
}
if (Heap::InFromPage(heap_object)) {
- MapWord map_word = heap_object->map_word();
+ MapWord map_word = heap_object.map_word();
if (map_word.IsForwardingAddress()) {
HeapObjectReference::Update(THeapObjectSlot(slot),
map_word.ToForwardingAddress());
@@ -3389,7 +3390,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
for (auto object_size : *chunk_->invalidated_slots()) {
HeapObject object = object_size.first;
int size = object_size.second;
- DCHECK_LE(object->SizeFromMap(object->map()), size);
+ DCHECK_LE(object.SizeFromMap(object.map()), size);
}
#endif
// The invalidated slots are not needed after old-to-old slots were
@@ -3587,9 +3588,9 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
for (auto iti = indices.begin(); iti != indices.end();) {
// EphemeronHashTable keys must be heap objects.
HeapObjectSlot key_slot(
- table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(*iti)));
+ table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(*iti)));
HeapObject key = key_slot.ToHeapObject();
- MapWord map_word = key->map_word();
+ MapWord map_word = key.map_word();
if (map_word.IsForwardingAddress()) {
key = map_word.ToForwardingAddress();
key_slot.StoreHeapObject(key);
@@ -3721,10 +3722,10 @@ void MarkCompactCollector::PostProcessEvacuationCandidates() {
// Remove outdated slots.
RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
- failed_object->address(),
+ failed_object.address(),
SlotSet::PREFREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
- failed_object->address());
+ failed_object.address());
// Recompute live bytes.
LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
// Re-record slots.
@@ -3769,7 +3770,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
- space->ClearStats();
+ space->ClearAllocatorState();
int will_be_swept = 0;
bool unused_page_present = false;
@@ -3842,7 +3843,7 @@ void MarkCompactCollector::MarkingWorklist::PrintWorklist(
int total_count = 0;
worklist->IterateGlobalPool([&count, &total_count](HeapObject obj) {
++total_count;
- count[obj->map()->instance_type()]++;
+ count[obj.map().instance_type()]++;
});
std::vector<std::pair<int, InstanceType>> rank;
rank.reserve(count.size());
@@ -4241,7 +4242,7 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
private:
V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
- if (!(*p)->IsHeapObject()) return;
+ if (!(*p).IsHeapObject()) return;
collector_->MarkRootObject(HeapObject::cast(*p));
}
MinorMarkCompactCollector* const collector_;
@@ -4319,7 +4320,7 @@ void MinorMarkCompactCollector::MakeIterable(
LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
HeapObject const object = object_and_size.first;
DCHECK(non_atomic_marking_state()->IsGrey(object));
- Address free_end = object->address();
+ Address free_end = object.address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
@@ -4332,8 +4333,8 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
- Map map = object->synchronized_map();
- int size = object->SizeFromMap(map);
+ Map map = object.synchronized_map();
+ int size = object.SizeFromMap(map);
free_start = free_end + size;
}
@@ -4373,14 +4374,14 @@ class YoungGenerationExternalStringTableCleaner : public RootVisitor {
// Visit all HeapObject pointers in [start, end).
for (FullObjectSlot p = start; p < end; ++p) {
Object o = *p;
- if (o->IsHeapObject()) {
+ if (o.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(o);
if (marking_state_->IsWhite(heap_object)) {
- if (o->IsExternalString()) {
+ if (o.IsExternalString()) {
heap_->FinalizeExternalString(String::cast(*p));
} else {
// The original external string may have been internalized.
- DCHECK(o->IsThinString());
+ DCHECK(o.IsThinString());
}
// Set the entry to the_hole_value (as deleted).
p.store(ReadOnlyRoots(heap_).the_hole_value());
@@ -4695,8 +4696,8 @@ void MinorMarkCompactCollector::ProcessMarkingWorklist() {
MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
HeapObject object;
while (marking_worklist.Pop(&object)) {
- DCHECK(!object->IsFiller());
- DCHECK(object->IsHeapObject());
+ DCHECK(!object.IsFiller());
+ DCHECK(object.IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(non_atomic_marking_state()->IsGrey(object));
main_marking_visitor()->Visit(object);
@@ -4827,7 +4828,6 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
break;
case kObjectsOldToOld:
UNREACHABLE();
- break;
}
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 566a7a53c4..8c6a2b4cc6 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -33,7 +33,7 @@ template <typename ConcreteState, AccessMode access_mode>
class MarkingStateBase {
public:
V8_INLINE MarkBit MarkBitFrom(HeapObject obj) {
- return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj->ptr());
+ return MarkBitFrom(MemoryChunk::FromHeapObject(obj), obj.ptr());
}
// {addr} may be tagged or aligned.
@@ -601,7 +601,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void AbortCompaction();
static inline bool IsOnEvacuationCandidate(Object obj) {
- return Page::FromAddress(obj->ptr())->IsEvacuationCandidate();
+ return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
}
static bool IsOnEvacuationCandidate(MaybeObject obj);
@@ -619,6 +619,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
HeapObject target);
V8_INLINE static void RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target);
+ V8_INLINE static void RecordSlot(MemoryChunk* source_page,
+ HeapObjectSlot slot, HeapObject target);
void RecordLiveSlotsOnPage(Page* page);
void UpdateSlots(SlotsBuffer* buffer);
@@ -701,7 +703,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
#ifdef VERIFY_HEAP
void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
- void VerifyMarkbitsAreDirty(PagedSpace* space);
+ void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
void VerifyMarkbitsAreClean(PagedSpace* space);
void VerifyMarkbitsAreClean(NewSpace* space);
void VerifyMarkbitsAreClean(LargeObjectSpace* space);
@@ -907,9 +909,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
- // Counts the number of mark-compact collections. This is used for marking
- // descriptor arrays. See NumberOfMarkedDescriptors. Only lower two bits are
- // used, so it is okay if this counter overflows and wraps around.
+ // Counts the number of major mark-compact collections. The counter is
+ // incremented right after marking. This is used for:
+ // - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
+ // two bits are used, so it is okay if this counter overflows and wraps
+ // around.
unsigned epoch_ = 0;
friend class FullEvacuator;
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index 612cc78601..6d73b0c4b4 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -6,7 +6,7 @@
#define V8_HEAP_MARKING_H_
#include "src/base/atomic-utils.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 6d0dfe5012..704e656796 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -4,12 +4,12 @@
#include "src/heap/memory-reducer.h"
-#include "src/flags.h"
+#include "src/flags/flags.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
-#include "src/utils.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -39,7 +39,8 @@ void MemoryReducer::TimerTask::RunInternal() {
Event event;
double time_ms = heap->MonotonicallyIncreasingTimeInMs();
heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
- heap->OldGenerationAllocationCounter());
+ heap->OldGenerationAllocationCounter(),
+ heap->EmbedderAllocationCounter());
bool low_allocation_rate = heap->HasLowAllocationRate();
bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
if (FLAG_trace_gc_verbose) {
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 2b0a8b81bb..ae5ff321d3 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -7,8 +7,8 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
-#include "src/cancelable-task.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index d920b25bd3..033f4fc6e9 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -7,15 +7,15 @@
#include <unordered_set>
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/compilation-cache.h"
-#include "src/counters.h"
-#include "src/globals.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/heap/combined-heap.h"
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
-#include "src/isolate.h"
-#include "src/memcopy.h"
+#include "src/logging/counters.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-array-inl.h"
@@ -23,7 +23,8 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/templates.h"
-#include "src/ostreams.h"
+#include "src/utils/memcopy.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -43,17 +44,17 @@ class FieldStatsCollector : public ObjectVisitor {
void RecordStats(HeapObject host) {
size_t old_pointer_fields_count = *tagged_fields_count_;
- host->Iterate(this);
+ host.Iterate(this);
size_t tagged_fields_count_in_object =
*tagged_fields_count_ - old_pointer_fields_count;
- int object_size_in_words = host->Size() / kTaggedSize;
+ int object_size_in_words = host.Size() / kTaggedSize;
DCHECK_LE(tagged_fields_count_in_object, object_size_in_words);
size_t raw_fields_count_in_object =
object_size_in_words - tagged_fields_count_in_object;
- if (host->IsJSObject()) {
- JSObjectFieldStats field_stats = GetInobjectFieldStats(host->map());
+ if (host.IsJSObject()) {
+ JSObjectFieldStats field_stats = GetInobjectFieldStats(host.map());
// Embedder fields are already included into pointer words.
DCHECK_LE(field_stats.embedded_fields_count_,
tagged_fields_count_in_object);
@@ -116,17 +117,17 @@ FieldStatsCollector::GetInobjectFieldStats(Map map) {
// Iterate descriptor array and calculate stats.
JSObjectFieldStats stats;
stats.embedded_fields_count_ = JSObject::GetEmbedderFieldCount(map);
- if (!map->is_dictionary_map()) {
- int nof = map->NumberOfOwnDescriptors();
- DescriptorArray descriptors = map->instance_descriptors();
+ if (!map.is_dictionary_map()) {
+ int nof = map.NumberOfOwnDescriptors();
+ DescriptorArray descriptors = map.instance_descriptors();
for (int descriptor = 0; descriptor < nof; descriptor++) {
- PropertyDetails details = descriptors->GetDetails(descriptor);
+ PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == kField) {
FieldIndex index = FieldIndex::ForDescriptor(map, descriptor);
// Stop on first out-of-object field.
if (!index.is_inobject()) break;
if (details.representation().IsDouble() &&
- map->IsUnboxedDoubleField(index)) {
+ map.IsUnboxedDoubleField(index)) {
++stats.unboxed_double_fields_count_;
}
}
@@ -430,7 +431,7 @@ ObjectStatsCollectorImpl::ObjectStatsCollectorImpl(Heap* heap,
bool ObjectStatsCollectorImpl::ShouldRecordObject(HeapObject obj,
CowMode check_cow_array) {
- if (obj->IsFixedArrayExact()) {
+ if (obj.IsFixedArrayExact()) {
FixedArray fixed_array = FixedArray::cast(obj);
bool cow_check = check_cow_array == kIgnoreCow || !IsCowArray(fixed_array);
return CanRecordFixedArray(fixed_array) && cow_check;
@@ -444,16 +445,16 @@ void ObjectStatsCollectorImpl::RecordHashTableVirtualObjectStats(
HeapObject parent, HashTable<Derived, Shape> hash_table,
ObjectStats::VirtualInstanceType type) {
size_t over_allocated =
- (hash_table->Capacity() - (hash_table->NumberOfElements() +
- hash_table->NumberOfDeletedElements())) *
+ (hash_table.Capacity() -
+ (hash_table.NumberOfElements() + hash_table.NumberOfDeletedElements())) *
HashTable<Derived, Shape>::kEntrySize * kTaggedSize;
- RecordVirtualObjectStats(parent, hash_table, type, hash_table->Size(),
+ RecordVirtualObjectStats(parent, hash_table, type, hash_table.Size(),
over_allocated);
}
bool ObjectStatsCollectorImpl::RecordSimpleVirtualObjectStats(
HeapObject parent, HeapObject obj, ObjectStats::VirtualInstanceType type) {
- return RecordVirtualObjectStats(parent, obj, type, obj->Size(),
+ return RecordVirtualObjectStats(parent, obj, type, obj.Size(),
ObjectStats::kNoOverAllocation, kCheckCow);
}
@@ -483,29 +484,29 @@ void ObjectStatsCollectorImpl::RecordExternalResourceStats(
void ObjectStatsCollectorImpl::RecordVirtualAllocationSiteDetails(
AllocationSite site) {
- if (!site->PointsToLiteral()) return;
- JSObject boilerplate = site->boilerplate();
- if (boilerplate->IsJSArray()) {
+ if (!site.PointsToLiteral()) return;
+ JSObject boilerplate = site.boilerplate();
+ if (boilerplate.IsJSArray()) {
RecordSimpleVirtualObjectStats(site, boilerplate,
ObjectStats::JS_ARRAY_BOILERPLATE_TYPE);
// Array boilerplates cannot have properties.
} else {
RecordVirtualObjectStats(
site, boilerplate, ObjectStats::JS_OBJECT_BOILERPLATE_TYPE,
- boilerplate->Size(), ObjectStats::kNoOverAllocation);
- if (boilerplate->HasFastProperties()) {
+ boilerplate.Size(), ObjectStats::kNoOverAllocation);
+ if (boilerplate.HasFastProperties()) {
// We'll mis-classify the empty_property_array here. Given that there is a
// single instance, this is negligible.
- PropertyArray properties = boilerplate->property_array();
+ PropertyArray properties = boilerplate.property_array();
RecordSimpleVirtualObjectStats(
site, properties, ObjectStats::BOILERPLATE_PROPERTY_ARRAY_TYPE);
} else {
- NameDictionary properties = boilerplate->property_dictionary();
+ NameDictionary properties = boilerplate.property_dictionary();
RecordSimpleVirtualObjectStats(
site, properties, ObjectStats::BOILERPLATE_PROPERTY_DICTIONARY_TYPE);
}
}
- FixedArrayBase elements = boilerplate->elements();
+ FixedArrayBase elements = boilerplate.elements();
RecordSimpleVirtualObjectStats(site, elements,
ObjectStats::BOILERPLATE_ELEMENTS_TYPE);
}
@@ -514,14 +515,14 @@ void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
FunctionTemplateInfo fti) {
// named_property_handler and indexed_property_handler are recorded as
// INTERCEPTOR_INFO_TYPE.
- if (!fti->call_code()->IsUndefined(isolate())) {
+ if (!fti.call_code().IsUndefined(isolate())) {
RecordSimpleVirtualObjectStats(
- fti, CallHandlerInfo::cast(fti->call_code()),
+ fti, CallHandlerInfo::cast(fti.call_code()),
ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
- if (!fti->GetInstanceCallHandler()->IsUndefined(isolate())) {
+ if (!fti.GetInstanceCallHandler().IsUndefined(isolate())) {
RecordSimpleVirtualObjectStats(
- fti, CallHandlerInfo::cast(fti->GetInstanceCallHandler()),
+ fti, CallHandlerInfo::cast(fti.GetInstanceCallHandler()),
ObjectStats::FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE);
}
}
@@ -529,62 +530,61 @@ void ObjectStatsCollectorImpl::RecordVirtualFunctionTemplateInfoDetails(
void ObjectStatsCollectorImpl::RecordVirtualJSGlobalObjectDetails(
JSGlobalObject object) {
// Properties.
- GlobalDictionary properties = object->global_dictionary();
+ GlobalDictionary properties = object.global_dictionary();
RecordHashTableVirtualObjectStats(object, properties,
ObjectStats::GLOBAL_PROPERTIES_TYPE);
// Elements.
- FixedArrayBase elements = object->elements();
+ FixedArrayBase elements = object.elements();
RecordSimpleVirtualObjectStats(object, elements,
ObjectStats::GLOBAL_ELEMENTS_TYPE);
}
void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject object) {
// JSGlobalObject is recorded separately.
- if (object->IsJSGlobalObject()) return;
+ if (object.IsJSGlobalObject()) return;
// Uncompiled JSFunction has a separate type.
- if (object->IsJSFunction() && !JSFunction::cast(object)->is_compiled()) {
+ if (object.IsJSFunction() && !JSFunction::cast(object).is_compiled()) {
RecordSimpleVirtualObjectStats(HeapObject(), object,
ObjectStats::JS_UNCOMPILED_FUNCTION_TYPE);
}
// Properties.
- if (object->HasFastProperties()) {
- PropertyArray properties = object->property_array();
+ if (object.HasFastProperties()) {
+ PropertyArray properties = object.property_array();
if (properties != ReadOnlyRoots(heap_).empty_property_array()) {
- size_t over_allocated =
- object->map()->UnusedPropertyFields() * kTaggedSize;
+ size_t over_allocated = object.map().UnusedPropertyFields() * kTaggedSize;
RecordVirtualObjectStats(object, properties,
- object->map()->is_prototype_map()
+ object.map().is_prototype_map()
? ObjectStats::PROTOTYPE_PROPERTY_ARRAY_TYPE
: ObjectStats::OBJECT_PROPERTY_ARRAY_TYPE,
- properties->Size(), over_allocated);
+ properties.Size(), over_allocated);
}
} else {
- NameDictionary properties = object->property_dictionary();
+ NameDictionary properties = object.property_dictionary();
RecordHashTableVirtualObjectStats(
object, properties,
- object->map()->is_prototype_map()
+ object.map().is_prototype_map()
? ObjectStats::PROTOTYPE_PROPERTY_DICTIONARY_TYPE
: ObjectStats::OBJECT_PROPERTY_DICTIONARY_TYPE);
}
// Elements.
- FixedArrayBase elements = object->elements();
- if (object->HasDictionaryElements()) {
+ FixedArrayBase elements = object.elements();
+ if (object.HasDictionaryElements()) {
RecordHashTableVirtualObjectStats(
object, NumberDictionary::cast(elements),
- object->IsJSArray() ? ObjectStats::ARRAY_DICTIONARY_ELEMENTS_TYPE
- : ObjectStats::OBJECT_DICTIONARY_ELEMENTS_TYPE);
- } else if (object->IsJSArray()) {
+ object.IsJSArray() ? ObjectStats::ARRAY_DICTIONARY_ELEMENTS_TYPE
+ : ObjectStats::OBJECT_DICTIONARY_ELEMENTS_TYPE);
+ } else if (object.IsJSArray()) {
if (elements != ReadOnlyRoots(heap_).empty_fixed_array()) {
size_t element_size =
- (elements->Size() - FixedArrayBase::kHeaderSize) / elements->length();
- uint32_t length = JSArray::cast(object)->length()->Number();
- size_t over_allocated = (elements->length() - length) * element_size;
+ (elements.Size() - FixedArrayBase::kHeaderSize) / elements.length();
+ uint32_t length = JSArray::cast(object).length().Number();
+ size_t over_allocated = (elements.length() - length) * element_size;
RecordVirtualObjectStats(object, elements,
ObjectStats::ARRAY_ELEMENTS_TYPE,
- elements->Size(), over_allocated);
+ elements.Size(), over_allocated);
}
} else {
RecordSimpleVirtualObjectStats(object, elements,
@@ -592,10 +592,10 @@ void ObjectStatsCollectorImpl::RecordVirtualJSObjectDetails(JSObject object) {
}
// JSCollections.
- if (object->IsJSCollection()) {
+ if (object.IsJSCollection()) {
// TODO(bmeurer): Properly compute over-allocation here.
RecordSimpleVirtualObjectStats(
- object, FixedArray::cast(JSCollection::cast(object)->table()),
+ object, FixedArray::cast(JSCollection::cast(object).table()),
ObjectStats::JS_COLLECTION_TABLE_TYPE);
}
}
@@ -656,30 +656,30 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
size_t calculated_size = 0;
// Log the feedback vector's header (fixed fields).
- size_t header_size = vector->slots_start().address() - vector->address();
+ size_t header_size = vector.slots_start().address() - vector.address();
stats_->RecordVirtualObjectStats(ObjectStats::FEEDBACK_VECTOR_HEADER_TYPE,
header_size, ObjectStats::kNoOverAllocation);
calculated_size += header_size;
// Iterate over the feedback slots and log each one.
- if (!vector->shared_function_info()->HasFeedbackMetadata()) return;
+ if (!vector.shared_function_info().HasFeedbackMetadata()) return;
- FeedbackMetadataIterator it(vector->metadata());
+ FeedbackMetadataIterator it(vector.metadata());
while (it.HasNext()) {
FeedbackSlot slot = it.Next();
// Log the entry (or entries) taken up by this slot.
size_t slot_size = it.entry_size() * kTaggedSize;
stats_->RecordVirtualObjectStats(
- GetFeedbackSlotType(vector->Get(slot), it.kind(), heap_->isolate()),
+ GetFeedbackSlotType(vector.Get(slot), it.kind(), heap_->isolate()),
slot_size, ObjectStats::kNoOverAllocation);
calculated_size += slot_size;
// Log the monomorphic/polymorphic helper objects that this slot owns.
for (int i = 0; i < it.entry_size(); i++) {
- MaybeObject raw_object = vector->get(slot.ToInt() + i);
+ MaybeObject raw_object = vector.get(slot.ToInt() + i);
HeapObject object;
if (raw_object->GetHeapObject(&object)) {
- if (object->IsCell() || object->IsWeakFixedArray()) {
+ if (object.IsCell() || object.IsWeakFixedArray()) {
RecordSimpleVirtualObjectStats(
vector, object, ObjectStats::FEEDBACK_VECTOR_ENTRY_TYPE);
}
@@ -687,65 +687,65 @@ void ObjectStatsCollectorImpl::RecordVirtualFeedbackVectorDetails(
}
}
- CHECK_EQ(calculated_size, vector->Size());
+ CHECK_EQ(calculated_size, vector.Size());
}
void ObjectStatsCollectorImpl::RecordVirtualFixedArrayDetails(
FixedArray array) {
if (IsCowArray(array)) {
RecordVirtualObjectStats(HeapObject(), array, ObjectStats::COW_ARRAY_TYPE,
- array->Size(), ObjectStats::kNoOverAllocation,
+ array.Size(), ObjectStats::kNoOverAllocation,
kIgnoreCow);
}
}
void ObjectStatsCollectorImpl::CollectStatistics(
HeapObject obj, Phase phase, CollectFieldStats collect_field_stats) {
- Map map = obj->map();
+ Map map = obj.map();
switch (phase) {
case kPhase1:
- if (obj->IsFeedbackVector()) {
+ if (obj.IsFeedbackVector()) {
RecordVirtualFeedbackVectorDetails(FeedbackVector::cast(obj));
- } else if (obj->IsMap()) {
+ } else if (obj.IsMap()) {
RecordVirtualMapDetails(Map::cast(obj));
- } else if (obj->IsBytecodeArray()) {
+ } else if (obj.IsBytecodeArray()) {
RecordVirtualBytecodeArrayDetails(BytecodeArray::cast(obj));
- } else if (obj->IsCode()) {
+ } else if (obj.IsCode()) {
RecordVirtualCodeDetails(Code::cast(obj));
- } else if (obj->IsFunctionTemplateInfo()) {
+ } else if (obj.IsFunctionTemplateInfo()) {
RecordVirtualFunctionTemplateInfoDetails(
FunctionTemplateInfo::cast(obj));
- } else if (obj->IsJSGlobalObject()) {
+ } else if (obj.IsJSGlobalObject()) {
RecordVirtualJSGlobalObjectDetails(JSGlobalObject::cast(obj));
- } else if (obj->IsJSObject()) {
+ } else if (obj.IsJSObject()) {
// This phase needs to come after RecordVirtualAllocationSiteDetails
// to properly split among boilerplates.
RecordVirtualJSObjectDetails(JSObject::cast(obj));
- } else if (obj->IsSharedFunctionInfo()) {
+ } else if (obj.IsSharedFunctionInfo()) {
RecordVirtualSharedFunctionInfoDetails(SharedFunctionInfo::cast(obj));
- } else if (obj->IsContext()) {
+ } else if (obj.IsContext()) {
RecordVirtualContext(Context::cast(obj));
- } else if (obj->IsScript()) {
+ } else if (obj.IsScript()) {
RecordVirtualScriptDetails(Script::cast(obj));
- } else if (obj->IsArrayBoilerplateDescription()) {
+ } else if (obj.IsArrayBoilerplateDescription()) {
RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription::cast(obj));
- } else if (obj->IsFixedArrayExact()) {
+ } else if (obj.IsFixedArrayExact()) {
// Has to go last as it triggers too eagerly.
RecordVirtualFixedArrayDetails(FixedArray::cast(obj));
}
break;
case kPhase2:
- if (obj->IsExternalString()) {
+ if (obj.IsExternalString()) {
// This has to be in Phase2 to avoid conflicting with recording Script
// sources. We still want to run RecordObjectStats after though.
RecordVirtualExternalStringDetails(ExternalString::cast(obj));
}
size_t over_allocated = ObjectStats::kNoOverAllocation;
- if (obj->IsJSObject()) {
- over_allocated = map->instance_size() - map->UsedInstanceSize();
+ if (obj.IsJSObject()) {
+ over_allocated = map.instance_size() - map.UsedInstanceSize();
}
- RecordObjectStats(obj, map->instance_type(), obj->Size(), over_allocated);
+ RecordObjectStats(obj, map.instance_type(), obj.Size(), over_allocated);
if (collect_field_stats == CollectFieldStats::kYes) {
field_stats_collector_.RecordStats(obj);
}
@@ -756,10 +756,10 @@ void ObjectStatsCollectorImpl::CollectStatistics(
void ObjectStatsCollectorImpl::CollectGlobalStatistics() {
// Iterate boilerplates first to disambiguate them from regular JS objects.
Object list = heap_->allocation_sites_list();
- while (list->IsAllocationSite()) {
+ while (list.IsAllocationSite()) {
AllocationSite site = AllocationSite::cast(list);
RecordVirtualAllocationSiteDetails(site);
- list = site->weak_next();
+ list = site.weak_next();
}
// FixedArray.
@@ -804,7 +804,7 @@ bool ObjectStatsCollectorImpl::CanRecordFixedArray(FixedArrayBase array) {
}
bool ObjectStatsCollectorImpl::IsCowArray(FixedArrayBase array) {
- return array->map() == ReadOnlyRoots(heap_).fixed_cow_array_map();
+ return array.map() == ReadOnlyRoots(heap_).fixed_cow_array_map();
}
bool ObjectStatsCollectorImpl::SameLiveness(HeapObject obj1, HeapObject obj2) {
@@ -819,57 +819,57 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
// to get a better picture of what's going on in MapSpace. This
// method computes the virtual instance type to use for a given map,
// using MAP_TYPE for regular maps that aren't special in any way.
- if (map->is_prototype_map()) {
- if (map->is_dictionary_map()) {
+ if (map.is_prototype_map()) {
+ if (map.is_dictionary_map()) {
RecordSimpleVirtualObjectStats(
HeapObject(), map, ObjectStats::MAP_PROTOTYPE_DICTIONARY_TYPE);
- } else if (map->is_abandoned_prototype_map()) {
+ } else if (map.is_abandoned_prototype_map()) {
RecordSimpleVirtualObjectStats(HeapObject(), map,
ObjectStats::MAP_ABANDONED_PROTOTYPE_TYPE);
} else {
RecordSimpleVirtualObjectStats(HeapObject(), map,
ObjectStats::MAP_PROTOTYPE_TYPE);
}
- } else if (map->is_deprecated()) {
+ } else if (map.is_deprecated()) {
RecordSimpleVirtualObjectStats(HeapObject(), map,
ObjectStats::MAP_DEPRECATED_TYPE);
- } else if (map->is_dictionary_map()) {
+ } else if (map.is_dictionary_map()) {
RecordSimpleVirtualObjectStats(HeapObject(), map,
ObjectStats::MAP_DICTIONARY_TYPE);
- } else if (map->is_stable()) {
+ } else if (map.is_stable()) {
RecordSimpleVirtualObjectStats(HeapObject(), map,
ObjectStats::MAP_STABLE_TYPE);
} else {
// This will be logged as MAP_TYPE in Phase2.
}
- DescriptorArray array = map->instance_descriptors();
- if (map->owns_descriptors() &&
+ DescriptorArray array = map.instance_descriptors();
+ if (map.owns_descriptors() &&
array != ReadOnlyRoots(heap_).empty_descriptor_array()) {
// Generally DescriptorArrays have their own instance type already
// (DESCRIPTOR_ARRAY_TYPE), but we'd like to be able to tell which
// of those are for (abandoned) prototypes, and which of those are
// owned by deprecated maps.
- if (map->is_prototype_map()) {
+ if (map.is_prototype_map()) {
RecordSimpleVirtualObjectStats(
map, array, ObjectStats::PROTOTYPE_DESCRIPTOR_ARRAY_TYPE);
- } else if (map->is_deprecated()) {
+ } else if (map.is_deprecated()) {
RecordSimpleVirtualObjectStats(
map, array, ObjectStats::DEPRECATED_DESCRIPTOR_ARRAY_TYPE);
}
- EnumCache enum_cache = array->enum_cache();
- RecordSimpleVirtualObjectStats(array, enum_cache->keys(),
+ EnumCache enum_cache = array.enum_cache();
+ RecordSimpleVirtualObjectStats(array, enum_cache.keys(),
ObjectStats::ENUM_KEYS_CACHE_TYPE);
- RecordSimpleVirtualObjectStats(array, enum_cache->indices(),
+ RecordSimpleVirtualObjectStats(array, enum_cache.indices(),
ObjectStats::ENUM_INDICES_CACHE_TYPE);
}
- if (map->is_prototype_map()) {
- if (map->prototype_info()->IsPrototypeInfo()) {
- PrototypeInfo info = PrototypeInfo::cast(map->prototype_info());
- Object users = info->prototype_users();
- if (users->IsWeakFixedArray()) {
+ if (map.is_prototype_map()) {
+ if (map.prototype_info().IsPrototypeInfo()) {
+ PrototypeInfo info = PrototypeInfo::cast(map.prototype_info());
+ Object users = info.prototype_users();
+ if (users.IsWeakFixedArray()) {
RecordSimpleVirtualObjectStats(map, WeakArrayList::cast(users),
ObjectStats::PROTOTYPE_USERS_TYPE);
}
@@ -879,29 +879,29 @@ void ObjectStatsCollectorImpl::RecordVirtualMapDetails(Map map) {
void ObjectStatsCollectorImpl::RecordVirtualScriptDetails(Script script) {
RecordSimpleVirtualObjectStats(
- script, script->shared_function_infos(),
+ script, script.shared_function_infos(),
ObjectStats::SCRIPT_SHARED_FUNCTION_INFOS_TYPE);
// Log the size of external source code.
- Object raw_source = script->source();
- if (raw_source->IsExternalString()) {
+ Object raw_source = script.source();
+ if (raw_source.IsExternalString()) {
// The contents of external strings aren't on the heap, so we have to record
// them manually. The on-heap String object is recorded indepentendely in
// the normal pass.
ExternalString string = ExternalString::cast(raw_source);
- Address resource = string->resource_as_address();
- size_t off_heap_size = string->ExternalPayloadSize();
+ Address resource = string.resource_as_address();
+ size_t off_heap_size = string.ExternalPayloadSize();
RecordExternalResourceStats(
resource,
- string->IsOneByteRepresentation()
+ string.IsOneByteRepresentation()
? ObjectStats::SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE
: ObjectStats::SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE,
off_heap_size);
- } else if (raw_source->IsString()) {
+ } else if (raw_source.IsString()) {
String source = String::cast(raw_source);
RecordSimpleVirtualObjectStats(
script, source,
- source->IsOneByteRepresentation()
+ source.IsOneByteRepresentation()
? ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_ONE_BYTE_TYPE
: ObjectStats::SCRIPT_SOURCE_NON_EXTERNAL_TWO_BYTE_TYPE);
}
@@ -911,11 +911,11 @@ void ObjectStatsCollectorImpl::RecordVirtualExternalStringDetails(
ExternalString string) {
// Track the external string resource size in a separate category.
- Address resource = string->resource_as_address();
- size_t off_heap_size = string->ExternalPayloadSize();
+ Address resource = string.resource_as_address();
+ size_t off_heap_size = string.ExternalPayloadSize();
RecordExternalResourceStats(
resource,
- string->IsOneByteRepresentation()
+ string.IsOneByteRepresentation()
? ObjectStats::STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE
: ObjectStats::STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE,
off_heap_size);
@@ -924,7 +924,7 @@ void ObjectStatsCollectorImpl::RecordVirtualExternalStringDetails(
void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
SharedFunctionInfo info) {
// Uncompiled SharedFunctionInfo gets its own category.
- if (!info->is_compiled()) {
+ if (!info.is_compiled()) {
RecordSimpleVirtualObjectStats(
HeapObject(), info, ObjectStats::UNCOMPILED_SHARED_FUNCTION_INFO_TYPE);
}
@@ -933,7 +933,7 @@ void ObjectStatsCollectorImpl::RecordVirtualSharedFunctionInfoDetails(
void ObjectStatsCollectorImpl::RecordVirtualArrayBoilerplateDescription(
ArrayBoilerplateDescription description) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- description, description->constant_elements(),
+ description, description.constant_elements(),
ObjectStats::ARRAY_BOILERPLATE_DESCRIPTION_ELEMENTS_TYPE);
}
@@ -942,11 +942,11 @@ void ObjectStatsCollectorImpl::
HeapObject parent, HeapObject object,
ObjectStats::VirtualInstanceType type) {
if (!RecordSimpleVirtualObjectStats(parent, object, type)) return;
- if (object->IsFixedArrayExact()) {
+ if (object.IsFixedArrayExact()) {
FixedArray array = FixedArray::cast(object);
- for (int i = 0; i < array->length(); i++) {
- Object entry = array->get(i);
- if (!entry->IsHeapObject()) continue;
+ for (int i = 0; i < array.length(); i++) {
+ Object entry = array.get(i);
+ if (!entry.IsHeapObject()) continue;
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
array, HeapObject::cast(entry), type);
}
@@ -956,24 +956,24 @@ void ObjectStatsCollectorImpl::
void ObjectStatsCollectorImpl::RecordVirtualBytecodeArrayDetails(
BytecodeArray bytecode) {
RecordSimpleVirtualObjectStats(
- bytecode, bytecode->constant_pool(),
+ bytecode, bytecode.constant_pool(),
ObjectStats::BYTECODE_ARRAY_CONSTANT_POOL_TYPE);
// FixedArrays on constant pool are used for holding descriptor information.
// They are shared with optimized code.
- FixedArray constant_pool = FixedArray::cast(bytecode->constant_pool());
- for (int i = 0; i < constant_pool->length(); i++) {
- Object entry = constant_pool->get(i);
- if (entry->IsFixedArrayExact()) {
+ FixedArray constant_pool = FixedArray::cast(bytecode.constant_pool());
+ for (int i = 0; i < constant_pool.length(); i++) {
+ Object entry = constant_pool.get(i);
+ if (entry.IsFixedArrayExact()) {
RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
constant_pool, HeapObject::cast(entry),
ObjectStats::EMBEDDED_OBJECT_TYPE);
}
}
RecordSimpleVirtualObjectStats(
- bytecode, bytecode->handler_table(),
+ bytecode, bytecode.handler_table(),
ObjectStats::BYTECODE_ARRAY_HANDLER_TABLE_TYPE);
- if (bytecode->HasSourcePositionTable()) {
- RecordSimpleVirtualObjectStats(bytecode, bytecode->SourcePositionTable(),
+ if (bytecode.HasSourcePositionTable()) {
+ RecordSimpleVirtualObjectStats(bytecode, bytecode.SourcePositionTable(),
ObjectStats::SOURCE_POSITION_TABLE_TYPE);
}
}
@@ -998,50 +998,48 @@ ObjectStats::VirtualInstanceType CodeKindToVirtualInstanceType(
void ObjectStatsCollectorImpl::RecordVirtualCodeDetails(Code code) {
RecordSimpleVirtualObjectStats(HeapObject(), code,
- CodeKindToVirtualInstanceType(code->kind()));
- RecordSimpleVirtualObjectStats(code, code->deoptimization_data(),
+ CodeKindToVirtualInstanceType(code.kind()));
+ RecordSimpleVirtualObjectStats(code, code.deoptimization_data(),
ObjectStats::DEOPTIMIZATION_DATA_TYPE);
- RecordSimpleVirtualObjectStats(code, code->relocation_info(),
+ RecordSimpleVirtualObjectStats(code, code.relocation_info(),
ObjectStats::RELOC_INFO_TYPE);
- Object source_position_table = code->source_position_table();
- if (source_position_table->IsSourcePositionTableWithFrameCache()) {
+ Object source_position_table = code.source_position_table();
+ if (source_position_table.IsSourcePositionTableWithFrameCache()) {
RecordSimpleVirtualObjectStats(
code,
SourcePositionTableWithFrameCache::cast(source_position_table)
- ->source_position_table(),
+ .source_position_table(),
ObjectStats::SOURCE_POSITION_TABLE_TYPE);
- } else if (source_position_table->IsHeapObject()) {
+ } else if (source_position_table.IsHeapObject()) {
RecordSimpleVirtualObjectStats(code,
HeapObject::cast(source_position_table),
ObjectStats::SOURCE_POSITION_TABLE_TYPE);
}
- if (code->kind() == Code::Kind::OPTIMIZED_FUNCTION) {
+ if (code.kind() == Code::Kind::OPTIMIZED_FUNCTION) {
DeoptimizationData input_data =
- DeoptimizationData::cast(code->deoptimization_data());
- if (input_data->length() > 0) {
- RecordSimpleVirtualObjectStats(code->deoptimization_data(),
- input_data->LiteralArray(),
+ DeoptimizationData::cast(code.deoptimization_data());
+ if (input_data.length() > 0) {
+ RecordSimpleVirtualObjectStats(code.deoptimization_data(),
+ input_data.LiteralArray(),
ObjectStats::OPTIMIZED_CODE_LITERALS_TYPE);
}
}
- int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ int const mode_mask = RelocInfo::EmbeddedObjectModeMask();
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- Object target = it.rinfo()->target_object();
- if (target->IsFixedArrayExact()) {
- RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
- code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
- }
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
+ Object target = it.rinfo()->target_object();
+ if (target.IsFixedArrayExact()) {
+ RecordVirtualObjectsForConstantPoolOrEmbeddedObjects(
+ code, HeapObject::cast(target), ObjectStats::EMBEDDED_OBJECT_TYPE);
}
}
}
void ObjectStatsCollectorImpl::RecordVirtualContext(Context context) {
- if (context->IsNativeContext()) {
- RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context->Size());
- } else if (context->IsFunctionContext()) {
- RecordObjectStats(context, FUNCTION_CONTEXT_TYPE, context->Size());
+ if (context.IsNativeContext()) {
+ RecordObjectStats(context, NATIVE_CONTEXT_TYPE, context.Size());
+ } else if (context.IsFunctionContext()) {
+ RecordObjectStats(context, FUNCTION_CONTEXT_TYPE, context.Size());
} else {
RecordSimpleVirtualObjectStats(HeapObject(), context,
ObjectStats::OTHER_CONTEXT_TYPE);
@@ -1081,14 +1079,10 @@ class ObjectStatsVisitor {
namespace {
void IterateHeap(Heap* heap, ObjectStatsVisitor* visitor) {
- SpaceIterator space_it(heap);
- HeapObject obj;
- while (space_it.has_next()) {
- std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
- ObjectIterator* obj_it = it.get();
- for (obj = obj_it->Next(); !obj.is_null(); obj = obj_it->Next()) {
- visitor->Visit(obj, obj->Size());
- }
+ CombinedHeapIterator iterator(heap);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ visitor->Visit(obj, obj.Size());
}
}
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index 72865a47f7..0bd2a1e3e4 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -5,8 +5,8 @@
#ifndef V8_HEAP_OBJECT_STATS_H_
#define V8_HEAP_OBJECT_STATS_H_
-#include "src/objects.h"
#include "src/objects/code.h"
+#include "src/objects/objects.h"
// These instance types do not exist for actual use but are merely introduced
// for object stats tracing. In contrast to Code and FixedArray sub types
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 132bd5b6dc..d96cded09a 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -10,10 +10,10 @@
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/mark-compact.h"
-#include "src/objects-body-descriptors-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
#include "src/wasm/wasm-objects.h"
@@ -29,14 +29,14 @@ T HeapVisitor<ResultType, ConcreteVisitor>::Cast(HeapObject object) {
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject object) {
- return Visit(object->map(), object);
+ return Visit(object.map(), object);
}
template <typename ResultType, typename ConcreteVisitor>
ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
HeapObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
- switch (map->visitor_id()) {
+ switch (map.visitor_id()) {
#define CASE(TypeName, Type) \
case kVisit##TypeName: \
return visitor->Visit##TypeName( \
@@ -72,7 +72,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
template <typename ResultType, typename ConcreteVisitor>
void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
HeapObject host, MapWordSlot map_slot) {
- DCHECK(!host->map_word().IsForwardingAddress());
+ DCHECK(!host.map_word().IsForwardingAddress());
static_cast<ConcreteVisitor*>(this)->VisitPointer(host, ObjectSlot(map_slot));
}
@@ -83,13 +83,13 @@ void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
if (!visitor->ShouldVisit(object)) return ResultType(); \
if (!visitor->AllowDefaultJSObjectVisit()) { \
- DCHECK_WITH_MSG(!map->IsJSObjectMap(), \
+ DCHECK_WITH_MSG(!map.IsJSObjectMap(), \
"Implement custom visitor for new JSObject subclass in " \
"concurrent marker"); \
} \
int size = TypeName::BodyDescriptor::SizeOf(map, object); \
if (visitor->ShouldVisitMapPointer()) \
- visitor->VisitMapPointer(object, object->map_slot()); \
+ visitor->VisitMapPointer(object, object.map_slot()); \
TypeName::BodyDescriptor::IterateBody(map, object, size, visitor); \
return static_cast<ResultType>(size); \
}
@@ -107,9 +107,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
Map map, HeapObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
- int size = map->instance_size();
+ int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object->map_slot());
+ visitor->VisitMapPointer(object, object.map_slot());
}
return static_cast<ResultType>(size);
}
@@ -121,7 +121,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object->map_slot());
+ visitor->VisitMapPointer(object, object.map_slot());
JSObject::FastBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -133,7 +133,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = JSObject::BodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer())
- visitor->VisitMapPointer(object, object->map_slot());
+ visitor->VisitMapPointer(object, object.map_slot());
JSObject::BodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
}
@@ -143,9 +143,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
Map map, HeapObject object) {
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
- int size = map->instance_size();
+ int size = map.instance_size();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object->map_slot());
+ visitor->VisitMapPointer(object, object.map_slot());
}
StructBodyDescriptor::IterateBody(map, object, size, visitor);
return static_cast<ResultType>(size);
@@ -157,9 +157,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
if (!visitor->ShouldVisit(object)) return ResultType();
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object->map_slot());
+ visitor->VisitMapPointer(object, object.map_slot());
}
- return static_cast<ResultType>(object->size());
+ return static_cast<ResultType>(object.size());
}
template <typename ResultType, typename ConcreteVisitor>
@@ -169,7 +169,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitWeakArray(
if (!visitor->ShouldVisit(object)) return ResultType();
int size = WeakArrayBodyDescriptor::SizeOf(map, object);
if (visitor->ShouldVisitMapPointer()) {
- visitor->VisitMapPointer(object, object->map_slot());
+ visitor->VisitMapPointer(object, object.map_slot());
}
WeakArrayBodyDescriptor::IterateBody(map, object, size, visitor);
return size;
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index d56dd91da2..ec494715ba 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -57,7 +57,7 @@ Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
}
}
// Retained object is new tail.
- DCHECK(!retained->IsUndefined(heap->isolate()));
+ DCHECK(!retained.IsUndefined(heap->isolate()));
candidate = T::cast(retained);
tail = candidate;
@@ -87,16 +87,16 @@ static void ClearWeakList(Heap* heap, Object list) {
template <>
struct WeakListVisitor<Code> {
static void SetWeakNext(Code code, Object next) {
- code->code_data_container()->set_next_code_link(next,
- UPDATE_WEAK_WRITE_BARRIER);
+ code.code_data_container().set_next_code_link(next,
+ UPDATE_WEAK_WRITE_BARRIER);
}
static Object WeakNext(Code code) {
- return code->code_data_container()->next_code_link();
+ return code.code_data_container().next_code_link();
}
static HeapObject WeakNextHolder(Code code) {
- return code->code_data_container();
+ return code.code_data_container();
}
static int WeakNextOffset() { return CodeDataContainer::kNextCodeLinkOffset; }
@@ -114,11 +114,11 @@ struct WeakListVisitor<Code> {
template <>
struct WeakListVisitor<Context> {
static void SetWeakNext(Context context, Object next) {
- context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WEAK_WRITE_BARRIER);
+ context.set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WEAK_WRITE_BARRIER);
}
static Object WeakNext(Context context) {
- return context->next_context_link();
+ return context.next_context_link();
}
static HeapObject WeakNextHolder(Context context) { return context; }
@@ -133,7 +133,7 @@ struct WeakListVisitor<Context> {
// Record the slots of the weak entries in the native context.
for (int idx = Context::FIRST_WEAK_SLOT;
idx < Context::NATIVE_CONTEXT_SLOTS; ++idx) {
- ObjectSlot slot = context->RawField(Context::OffsetOfElementAt(idx));
+ ObjectSlot slot = context.RawField(Context::OffsetOfElementAt(idx));
MarkCompactCollector::RecordSlot(context, slot,
HeapObject::cast(*slot));
}
@@ -148,22 +148,22 @@ struct WeakListVisitor<Context> {
static void DoWeakList(Heap* heap, Context context,
WeakObjectRetainer* retainer, int index) {
// Visit the weak list, removing dead intermediate elements.
- Object list_head = VisitWeakList<T>(heap, context->get(index), retainer);
+ Object list_head = VisitWeakList<T>(heap, context.get(index), retainer);
// Update the list head.
- context->set(index, list_head, UPDATE_WRITE_BARRIER);
+ context.set(index, list_head, UPDATE_WRITE_BARRIER);
if (MustRecordSlots(heap)) {
// Record the updated slot if necessary.
- ObjectSlot head_slot = context->RawField(FixedArray::SizeFor(index));
+ ObjectSlot head_slot = context.RawField(FixedArray::SizeFor(index));
heap->mark_compact_collector()->RecordSlot(context, head_slot,
HeapObject::cast(list_head));
}
}
static void VisitPhantomObject(Heap* heap, Context context) {
- ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
- ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
+ ClearWeakList<Code>(heap, context.get(Context::OPTIMIZED_CODE_LIST));
+ ClearWeakList<Code>(heap, context.get(Context::DEOPTIMIZED_CODE_LIST));
}
};
@@ -171,10 +171,10 @@ struct WeakListVisitor<Context> {
template <>
struct WeakListVisitor<AllocationSite> {
static void SetWeakNext(AllocationSite obj, Object next) {
- obj->set_weak_next(next, UPDATE_WEAK_WRITE_BARRIER);
+ obj.set_weak_next(next, UPDATE_WEAK_WRITE_BARRIER);
}
- static Object WeakNext(AllocationSite obj) { return obj->weak_next(); }
+ static Object WeakNext(AllocationSite obj) { return obj.weak_next(); }
static HeapObject WeakNextHolder(AllocationSite obj) { return obj; }
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 52a9e94b49..9ebd94427e 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -5,15 +5,16 @@
#ifndef V8_HEAP_OBJECTS_VISITING_H_
#define V8_HEAP_OBJECTS_VISITING_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
#include "src/objects/map.h"
-#include "src/visitors.h"
+#include "src/objects/objects.h"
+#include "src/objects/visitors.h"
namespace v8 {
namespace internal {
-#define TYPED_VISITOR_ID_LIST_CLASSES(V) \
+// TODO(jkummerow): Drop the duplication: V(x, x) -> V(x).
+#define TYPED_VISITOR_ID_LIST(V) \
V(AllocationSite, AllocationSite) \
V(BigInt, BigInt) \
V(ByteArray, ByteArray) \
@@ -31,7 +32,6 @@ namespace internal {
V(FeedbackVector, FeedbackVector) \
V(FixedArray, FixedArray) \
V(FixedDoubleArray, FixedDoubleArray) \
- V(FixedTypedArrayBase, FixedTypedArrayBase) \
V(JSArrayBuffer, JSArrayBuffer) \
V(JSDataView, JSDataView) \
V(JSFunction, JSFunction) \
@@ -59,19 +59,13 @@ namespace internal {
V(TransitionArray, TransitionArray) \
V(UncompiledDataWithoutPreparseData, UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData, UncompiledDataWithPreparseData) \
+ V(WasmCapiFunctionData, WasmCapiFunctionData) \
V(WasmInstanceObject, WasmInstanceObject)
#define FORWARD_DECLARE(TypeName, Type) class Type;
-TYPED_VISITOR_ID_LIST_CLASSES(FORWARD_DECLARE)
+TYPED_VISITOR_ID_LIST(FORWARD_DECLARE)
#undef FORWARD_DECLARE
-#define TYPED_VISITOR_ID_LIST_TYPEDEFS(V) \
- V(FixedFloat64Array, FixedFloat64Array)
-
-#define TYPED_VISITOR_ID_LIST(V) \
- TYPED_VISITOR_ID_LIST_CLASSES(V) \
- TYPED_VISITOR_ID_LIST_TYPEDEFS(V)
-
// The base class for visitors that need to dispatch on object type. The default
// behavior of all visit functions is to iterate body of the given object using
// the BodyDescriptor of the object.
diff --git a/deps/v8/src/heap/read-only-heap.cc b/deps/v8/src/heap/read-only-heap.cc
index 1a5345de9b..1021bc147f 100644
--- a/deps/v8/src/heap/read-only-heap.cc
+++ b/deps/v8/src/heap/read-only-heap.cc
@@ -8,7 +8,11 @@
#include "src/base/once.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/spaces.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
#include "src/snapshot/read-only-deserializer.h"
namespace v8 {
@@ -21,48 +25,63 @@ ReadOnlyHeap* shared_ro_heap = nullptr;
// static
void ReadOnlyHeap::SetUp(Isolate* isolate, ReadOnlyDeserializer* des) {
+ DCHECK_NOT_NULL(isolate);
#ifdef V8_SHARED_RO_HEAP
- void* isolate_ro_roots = reinterpret_cast<void*>(
- isolate->roots_table().read_only_roots_begin().address());
- base::CallOnce(&setup_ro_heap_once, [isolate, des, isolate_ro_roots]() {
- shared_ro_heap = Init(isolate, des);
- if (des != nullptr) {
- std::memcpy(shared_ro_heap->read_only_roots_, isolate_ro_roots,
- kEntriesCount * sizeof(Address));
- }
+ // Make sure we are only sharing read-only space when deserializing. Otherwise
+ // we would be trying to create heap objects inside an already initialized
+ // read-only space. Use ClearSharedHeapForTest if you need a new read-only
+ // space.
+ DCHECK_IMPLIES(shared_ro_heap != nullptr, des != nullptr);
+
+ base::CallOnce(&setup_ro_heap_once, [isolate, des]() {
+ shared_ro_heap = CreateAndAttachToIsolate(isolate);
+ if (des != nullptr) shared_ro_heap->DeseralizeIntoIsolate(isolate, des);
});
isolate->heap()->SetUpFromReadOnlyHeap(shared_ro_heap);
if (des != nullptr) {
+ void* const isolate_ro_roots = reinterpret_cast<void*>(
+ isolate->roots_table().read_only_roots_begin().address());
std::memcpy(isolate_ro_roots, shared_ro_heap->read_only_roots_,
kEntriesCount * sizeof(Address));
}
#else
- Init(isolate, des);
+ auto* ro_heap = CreateAndAttachToIsolate(isolate);
+ if (des != nullptr) ro_heap->DeseralizeIntoIsolate(isolate, des);
#endif // V8_SHARED_RO_HEAP
}
-void ReadOnlyHeap::OnCreateHeapObjectsComplete() {
- DCHECK(!deserializing_);
-#ifdef V8_SHARED_RO_HEAP
- read_only_space_->Forget();
-#endif
- read_only_space_->MarkAsReadOnly();
+void ReadOnlyHeap::DeseralizeIntoIsolate(Isolate* isolate,
+ ReadOnlyDeserializer* des) {
+ DCHECK_NOT_NULL(des);
+ des->DeserializeInto(isolate);
+ InitFromIsolate(isolate);
+}
+
+void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) {
+ DCHECK_NOT_NULL(isolate);
+ InitFromIsolate(isolate);
}
// static
-ReadOnlyHeap* ReadOnlyHeap::Init(Isolate* isolate, ReadOnlyDeserializer* des) {
+ReadOnlyHeap* ReadOnlyHeap::CreateAndAttachToIsolate(Isolate* isolate) {
auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap()));
isolate->heap()->SetUpFromReadOnlyHeap(ro_heap);
- if (des != nullptr) {
- des->DeserializeInto(isolate);
- ro_heap->deserializing_ = true;
+ return ro_heap;
+}
+
+void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) {
+ DCHECK(!init_complete_);
#ifdef V8_SHARED_RO_HEAP
- ro_heap->read_only_space_->Forget();
+ void* const isolate_ro_roots = reinterpret_cast<void*>(
+ isolate->roots_table().read_only_roots_begin().address());
+ std::memcpy(read_only_roots_, isolate_ro_roots,
+ kEntriesCount * sizeof(Address));
+ read_only_space_->Seal(ReadOnlySpace::SealMode::kDetachFromHeapAndForget);
+#else
+ read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap);
#endif
- ro_heap->read_only_space_->MarkAsReadOnly();
- }
- return ro_heap;
+ init_complete_ = true;
}
void ReadOnlyHeap::OnHeapTearDown() {
@@ -73,9 +92,89 @@ void ReadOnlyHeap::OnHeapTearDown() {
}
// static
+void ReadOnlyHeap::ClearSharedHeapForTest() {
+#ifdef V8_SHARED_RO_HEAP
+ DCHECK_NOT_NULL(shared_ro_heap);
+ // TODO(v8:7464): Just leak read-only space for now. The paged-space heap
+ // is null so there isn't a nice way to do this.
+ delete shared_ro_heap;
+ shared_ro_heap = nullptr;
+ setup_ro_heap_once = 0;
+#endif
+}
+
+// static
bool ReadOnlyHeap::Contains(HeapObject object) {
return Page::FromAddress(object.ptr())->owner()->identity() == RO_SPACE;
}
+// static
+ReadOnlyRoots ReadOnlyHeap::GetReadOnlyRoots(HeapObject object) {
+#ifdef V8_SHARED_RO_HEAP
+ // This fails if we are creating heap objects and the roots haven't yet been
+ // copied into the read-only heap or it has been cleared for testing.
+ if (shared_ro_heap != nullptr && shared_ro_heap->init_complete_) {
+ return ReadOnlyRoots(shared_ro_heap->read_only_roots_);
+ }
+#endif
+ return ReadOnlyRoots(GetHeapFromWritableObject(object));
+}
+
+Object* ReadOnlyHeap::ExtendReadOnlyObjectCache() {
+ read_only_object_cache_.push_back(Smi::kZero);
+ return &read_only_object_cache_.back();
+}
+
+Object ReadOnlyHeap::cached_read_only_object(size_t i) const {
+ DCHECK_LE(i, read_only_object_cache_.size());
+ return read_only_object_cache_[i];
+}
+
+bool ReadOnlyHeap::read_only_object_cache_is_initialized() const {
+ return read_only_object_cache_.size() > 0;
+}
+
+ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap)
+ : ReadOnlyHeapIterator(ro_heap->read_only_space()) {}
+
+ReadOnlyHeapIterator::ReadOnlyHeapIterator(ReadOnlySpace* ro_space)
+ : ro_space_(ro_space),
+ current_page_(ro_space->first_page()),
+ current_addr_(current_page_->area_start()) {}
+
+HeapObject ReadOnlyHeapIterator::Next() {
+ if (current_page_ == nullptr) {
+ return HeapObject();
+ }
+
+ for (;;) {
+ DCHECK_LE(current_addr_, current_page_->area_end());
+ if (current_addr_ == current_page_->area_end()) {
+ // Progress to the next page.
+ current_page_ = current_page_->next_page();
+ if (current_page_ == nullptr) {
+ return HeapObject();
+ }
+ current_addr_ = current_page_->area_start();
+ }
+
+ if (current_addr_ == ro_space_->top() &&
+ current_addr_ != ro_space_->limit()) {
+ current_addr_ = ro_space_->limit();
+ continue;
+ }
+ HeapObject object = HeapObject::FromAddress(current_addr_);
+ const int object_size = object.Size();
+ current_addr_ += object_size;
+
+ if (object.IsFiller()) {
+ continue;
+ }
+
+ DCHECK_OBJECT_SIZE(object_size);
+ return object;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/read-only-heap.h b/deps/v8/src/heap/read-only-heap.h
index 9e7a50e4f9..697c9e26ef 100644
--- a/deps/v8/src/heap/read-only-heap.h
+++ b/deps/v8/src/heap/read-only-heap.h
@@ -6,15 +6,17 @@
#define V8_HEAP_READ_ONLY_HEAP_H_
#include "src/base/macros.h"
-#include "src/heap/heap.h"
-#include "src/objects.h"
-#include "src/roots.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
-class ReadOnlySpace;
+class Isolate;
+class Page;
class ReadOnlyDeserializer;
+class ReadOnlySpace;
// This class transparently manages read-only space, roots and cache creation
// and destruction.
@@ -23,30 +25,53 @@ class ReadOnlyHeap final {
static constexpr size_t kEntriesCount =
static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
- // If necessary create read-only heap and initialize its artifacts (if the
- // deserializer is provided).
- // TODO(goszczycki): Ideally we'd create this without needing a heap.
+ // If necessary creates read-only heap and initializes its artifacts (if
+ // the deserializer is provided). Then attaches the read-only heap to the
+ // isolate.
+ // TODO(v8:7464): Ideally we'd create this without needing a heap.
static void SetUp(Isolate* isolate, ReadOnlyDeserializer* des);
- // Indicate that all read-only space objects have been created and will not
- // be written to. This is not thread safe, and should really only be used as
- // part of mksnapshot or when read-only heap sharing is disabled.
- void OnCreateHeapObjectsComplete();
- // Indicate that the current isolate no longer requires the read-only heap and
- // it may be safely disposed of.
+ // Indicates that the isolate has been set up and all read-only space objects
+ // have been created and will not be written to. This is not thread safe, and
+ // should really only be used during snapshot creation or when read-only heap
+ // sharing is disabled.
+ void OnCreateHeapObjectsComplete(Isolate* isolate);
+ // Indicates that the current isolate no longer requires the read-only heap
+ // and it may be safely disposed of.
void OnHeapTearDown();
// Returns whether the object resides in the read-only space.
V8_EXPORT_PRIVATE static bool Contains(HeapObject object);
+ // Gets read-only roots from an appropriate root list: shared read-only root
+ // list if the shared read-only heap has been initialized or the isolate
+ // specific roots table.
+ V8_EXPORT_PRIVATE static ReadOnlyRoots GetReadOnlyRoots(HeapObject object);
+
+ // Clears any shared read-only heap artifacts for testing, forcing read-only
+ // heap to be re-created on next set up.
+ V8_EXPORT_PRIVATE static void ClearSharedHeapForTest();
+
+ // Extends the read-only object cache with new zero smi and returns a
+ // reference to it.
+ Object* ExtendReadOnlyObjectCache();
+ // Returns a read-only cache entry at a particular index.
+ Object cached_read_only_object(size_t i) const;
+ bool read_only_object_cache_is_initialized() const;
- std::vector<Object>* read_only_object_cache() {
- return &read_only_object_cache_;
- }
ReadOnlySpace* read_only_space() const { return read_only_space_; }
private:
- static ReadOnlyHeap* Init(Isolate* isolate, ReadOnlyDeserializer* des);
+ // Creates a new read-only heap and attaches it to the provided isolate.
+ static ReadOnlyHeap* CreateAndAttachToIsolate(Isolate* isolate);
+ // Runs the read-only deserailizer and calls InitFromIsolate to complete
+ // read-only heap initialization.
+ void DeseralizeIntoIsolate(Isolate* isolate, ReadOnlyDeserializer* des);
+ // Initializes read-only heap from an already set-up isolate, copying
+ // read-only roots from the isolate. This then seals the space off from
+ // further writes, marks it as read-only and detaches it from the heap (unless
+ // sharing is disabled).
+ void InitFromIsolate(Isolate* isolate);
- bool deserializing_ = false;
+ bool init_complete_ = false;
ReadOnlySpace* read_only_space_ = nullptr;
std::vector<Object> read_only_object_cache_;
@@ -58,6 +83,20 @@ class ReadOnlyHeap final {
DISALLOW_COPY_AND_ASSIGN(ReadOnlyHeap);
};
+// This class enables iterating over all read-only heap objects.
+class V8_EXPORT_PRIVATE ReadOnlyHeapIterator {
+ public:
+ explicit ReadOnlyHeapIterator(ReadOnlyHeap* ro_heap);
+ explicit ReadOnlyHeapIterator(ReadOnlySpace* ro_space);
+
+ HeapObject Next();
+
+ private:
+ ReadOnlySpace* const ro_space_;
+ Page* current_page_;
+ Address current_addr_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index b890350207..cd2344b349 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -5,11 +5,11 @@
#ifndef V8_HEAP_REMEMBERED_SET_H_
#define V8_HEAP_REMEMBERED_SET_H_
+#include "src/codegen/reloc-info.h"
+#include "src/common/v8memory.h"
#include "src/heap/heap.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
-#include "src/reloc-info.h"
-#include "src/v8memory.h"
namespace v8 {
namespace internal {
@@ -281,8 +281,12 @@ class UpdateTypedSlotHelper {
case CODE_ENTRY_SLOT: {
return UpdateCodeEntry(addr, callback);
}
- case EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, Code());
+ case COMPRESSED_EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::COMPRESSED_EMBEDDED_OBJECT, 0, Code());
+ return UpdateEmbeddedPointer(heap, &rinfo, callback);
+ }
+ case FULL_EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::FULL_EMBEDDED_OBJECT, 0, Code());
return UpdateEmbeddedPointer(heap, &rinfo, callback);
}
case OBJECT_SLOT: {
@@ -303,9 +307,9 @@ class UpdateTypedSlotHelper {
Code code = Code::GetObjectFromEntryAddress(entry_address);
Code old_code = code;
SlotCallbackResult result = callback(FullMaybeObjectSlot(&code));
- DCHECK(!HasWeakHeapObjectTag(code.ptr()));
+ DCHECK(!HasWeakHeapObjectTag(code));
if (code != old_code) {
- Memory<Address>(entry_address) = code->entry();
+ Memory<Address>(entry_address) = code.entry();
}
return result;
}
@@ -319,10 +323,9 @@ class UpdateTypedSlotHelper {
Code old_target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Code new_target = old_target;
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target.ptr()));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
- rinfo->set_target_address(
- Code::cast(new_target)->raw_instruction_start());
+ rinfo->set_target_address(Code::cast(new_target).raw_instruction_start());
}
return result;
}
@@ -332,11 +335,11 @@ class UpdateTypedSlotHelper {
template <typename Callback>
static SlotCallbackResult UpdateEmbeddedPointer(Heap* heap, RelocInfo* rinfo,
Callback callback) {
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- HeapObject old_target = rinfo->target_object();
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
+ HeapObject old_target = rinfo->target_object_no_host(heap->isolate());
HeapObject new_target = old_target;
SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target));
- DCHECK(!HasWeakHeapObjectTag(new_target->ptr()));
+ DCHECK(!HasWeakHeapObjectTag(new_target));
if (new_target != old_target) {
rinfo->set_target_object(heap, HeapObject::cast(new_target));
}
@@ -347,8 +350,10 @@ class UpdateTypedSlotHelper {
inline SlotType SlotTypeForRelocInfoMode(RelocInfo::Mode rmode) {
if (RelocInfo::IsCodeTargetMode(rmode)) {
return CODE_TARGET_SLOT;
- } else if (RelocInfo::IsEmbeddedObject(rmode)) {
- return EMBEDDED_OBJECT_SLOT;
+ } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
+ return FULL_EMBEDDED_OBJECT_SLOT;
+ } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
+ return COMPRESSED_EMBEDDED_OBJECT_SLOT;
}
UNREACHABLE();
}
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
index 14e7d000ca..273866d5e4 100644
--- a/deps/v8/src/heap/scavenge-job.cc
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -5,13 +5,13 @@
#include "src/heap/scavenge-job.h"
#include "src/base/platform/time.h"
+#include "src/execution/isolate.h"
+#include "src/execution/vm-state-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
-#include "src/isolate.h"
-#include "src/v8.h"
-#include "src/vm-state-inl.h"
+#include "src/init/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index 34f7bfafc3..2b35ccbb18 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -5,8 +5,8 @@
#ifndef V8_HEAP_SCAVENGE_JOB_H_
#define V8_HEAP_SCAVENGE_JOB_H_
-#include "src/cancelable-task.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 4736519099..50dc5f25c9 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -9,8 +9,8 @@
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/local-allocator-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/map.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots-inl.h"
namespace v8 {
@@ -72,7 +72,7 @@ bool Scavenger::PromotionList::Pop(int task_id,
if (regular_object_promotion_list_.Pop(task_id, &regular_object)) {
entry->heap_object = regular_object.first;
entry->size = regular_object.second;
- entry->map = entry->heap_object->map();
+ entry->map = entry->heap_object.map();
return true;
}
return large_object_promotion_list_.Pop(task_id, entry);
@@ -97,7 +97,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
// with page initialization.
HeapObject heap_object;
if (object->GetHeapObject(&heap_object)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
+ MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object.address());
CHECK_NOT_NULL(chunk->synchronized_heap());
}
#endif
@@ -106,11 +106,11 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
int size) {
// Copy the content of source to target.
- target->set_map_word(MapWord::FromMap(map));
- heap()->CopyBlock(target->address() + kTaggedSize,
- source->address() + kTaggedSize, size - kTaggedSize);
+ target.set_map_word(MapWord::FromMap(map));
+ heap()->CopyBlock(target.address() + kTaggedSize,
+ source.address() + kTaggedSize, size - kTaggedSize);
- Object old = source->map_slot().Release_CompareAndSwap(
+ Object old = source.map_slot().Release_CompareAndSwap(
map, MapWord::FromForwardingAddress(target).ToMap());
if (old != map) {
// Other task migrated the object.
@@ -135,7 +135,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
- DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
+ DCHECK(heap()->AllowedToBeMigrated(map, object, NEW_SPACE));
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation =
allocator_.Allocate(NEW_SPACE, object_size, alignment);
@@ -147,7 +147,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
- MapWord map_word = object->synchronized_map_word();
+ MapWord map_word = object.synchronized_map_word();
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
DCHECK(!Heap::InFromPage(*slot));
return Heap::InToPage(*slot)
@@ -183,7 +183,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
- MapWord map_word = object->synchronized_map_word();
+ MapWord map_word = object.synchronized_map_word();
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
DCHECK(!Heap::InFromPage(*slot));
return Heap::InToPage(*slot)
@@ -216,7 +216,7 @@ bool Scavenger::HandleLargeObject(Map map, HeapObject object, int object_size,
MemoryChunk::FromHeapObject(object)->InNewLargeObjectSpace())) {
DCHECK_EQ(NEW_LO_SPACE,
MemoryChunk::FromHeapObject(object)->owner()->identity());
- if (object->map_slot().Release_CompareAndSwap(
+ if (object.map_slot().Release_CompareAndSwap(
map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
surviving_new_large_objects_.insert({object, map});
promoted_size_ += object_size;
@@ -236,7 +236,7 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
- SLOW_DCHECK(object->SizeFromMap(map) == object_size);
+ SLOW_DCHECK(object.SizeFromMap(map) == object_size);
CopyAndForwardResult result;
if (HandleLargeObject(map, object, object_size, object_fields)) {
@@ -246,7 +246,7 @@ SlotCallbackResult Scavenger::EvacuateObjectDefault(
SLOW_DCHECK(static_cast<size_t>(object_size) <=
MemoryChunkLayout::AllocatableMemoryInDataPage());
- if (!heap()->ShouldBePromoted(object->address())) {
+ if (!heap()->ShouldBePromoted(object.address())) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
result = SemiSpaceCopyObject(map, slot, object, object_size, object_fields);
@@ -284,7 +284,7 @@ SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot,
// The ThinString should die after Scavenge, so avoid writing the proper
// forwarding pointer and instead just signal the actual object as forwarded
// reference.
- String actual = object->actual();
+ String actual = object.actual();
// ThinStrings always refer to internalized strings, which are always in old
// space.
DCHECK(!Heap::InYoungGeneration(actual));
@@ -293,7 +293,7 @@ SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot,
}
DCHECK_EQ(ObjectFields::kMaybePointers,
- Map::ObjectFieldsFrom(map->visitor_id()));
+ Map::ObjectFieldsFrom(map.visitor_id()));
return EvacuateObjectDefault(map, slot, object, object_size,
ObjectFields::kMaybePointers);
}
@@ -306,38 +306,38 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
- DCHECK(IsShortcutCandidate(map->instance_type()));
+ DCHECK(IsShortcutCandidate(map.instance_type()));
if (!is_incremental_marking_ &&
- object->unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
- HeapObject first = HeapObject::cast(object->unchecked_first());
+ object.unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
+ HeapObject first = HeapObject::cast(object.unchecked_first());
HeapObjectReference::Update(slot, first);
if (!Heap::InYoungGeneration(first)) {
- object->map_slot().Release_Store(
+ object.map_slot().Release_Store(
MapWord::FromForwardingAddress(first).ToMap());
return REMOVE_SLOT;
}
- MapWord first_word = first->synchronized_map_word();
+ MapWord first_word = first.synchronized_map_word();
if (first_word.IsForwardingAddress()) {
HeapObject target = first_word.ToForwardingAddress();
HeapObjectReference::Update(slot, target);
- object->map_slot().Release_Store(
+ object.map_slot().Release_Store(
MapWord::FromForwardingAddress(target).ToMap());
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
SlotCallbackResult result =
- EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map),
- Map::ObjectFieldsFrom(map->visitor_id()));
- object->map_slot().Release_Store(
+ EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
+ Map::ObjectFieldsFrom(map.visitor_id()));
+ object.map_slot().Release_Store(
MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
return result;
}
DCHECK_EQ(ObjectFields::kMaybePointers,
- Map::ObjectFieldsFrom(map->visitor_id()));
+ Map::ObjectFieldsFrom(map.visitor_id()));
return EvacuateObjectDefault(map, slot, object, object_size,
ObjectFields::kMaybePointers);
}
@@ -350,10 +350,10 @@ SlotCallbackResult Scavenger::EvacuateObject(THeapObjectSlot slot, Map map,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
SLOW_DCHECK(Heap::InFromPage(source));
SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
- int size = source->SizeFromMap(map);
+ int size = source.SizeFromMap(map);
// Cannot use ::cast() below because that would add checks in debug mode
// that require re-reading the map.
- VisitorId visitor_id = map->visitor_id();
+ VisitorId visitor_id = map.visitor_id();
switch (visitor_id) {
case kVisitThinString:
// At the moment we don't allow weak pointers to thin strings.
@@ -380,7 +380,7 @@ SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
DCHECK(Heap::InFromPage(object));
// Synchronized load that consumes the publishing CAS of MigrateObject.
- MapWord first_word = object->synchronized_map_word();
+ MapWord first_word = object.synchronized_map_word();
// If the first word is a forwarding address, the object has already been
// copied.
@@ -486,13 +486,13 @@ int ScavengeVisitor::VisitEphemeronHashTable(Map map,
// later. This allows to only iterate the tables' values, which are treated
// as strong independetly of whether the key is live.
scavenger_->AddEphemeronHashTable(table);
- for (int i = 0; i < table->Capacity(); i++) {
+ for (int i = 0; i < table.Capacity(); i++) {
ObjectSlot value_slot =
- table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
+ table.RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
VisitPointer(table, value_slot);
}
- return table->SizeFromMap(map);
+ return table.SizeFromMap(map);
}
} // namespace internal
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 217affa84b..c7666b7da7 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -13,11 +13,11 @@
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/sweeper.h"
-#include "src/objects-body-descriptors-inl.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/embedder-data-array-inl.h"
-#include "src/transitions-inl.h"
-#include "src/utils-inl.h"
+#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/objects/transitions-inl.h"
+#include "src/utils/utils-inl.h"
namespace v8 {
namespace internal {
@@ -100,7 +100,7 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
inline void VisitEphemeron(HeapObject obj, int entry, ObjectSlot key,
ObjectSlot value) override {
- DCHECK(Heap::IsLargeObject(obj) || obj->IsEphemeronHashTable());
+ DCHECK(Heap::IsLargeObject(obj) || obj.IsEphemeronHashTable());
VisitPointer(obj, value);
if (ObjectInYoungGeneration(*key)) {
@@ -143,7 +143,7 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
DCHECK(success);
if (result == KEEP_SLOT) {
- SLOW_DCHECK(target->IsHeapObject());
+ SLOW_DCHECK(target.IsHeapObject());
RememberedSet<OLD_TO_NEW>::Insert(MemoryChunk::FromHeapObject(host),
slot.address());
}
@@ -169,13 +169,13 @@ namespace {
V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Object object) {
return Heap::InFromPage(object) &&
- !HeapObject::cast(object)->map_word().IsForwardingAddress();
+ !HeapObject::cast(object).map_word().IsForwardingAddress();
}
// Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, HeapObject heap_object) {
return Heap::InFromPage(heap_object) &&
- !heap_object->map_word().IsForwardingAddress();
+ !heap_object.map_word().IsForwardingAddress();
}
bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
@@ -191,7 +191,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
return object;
}
- MapWord map_word = HeapObject::cast(object)->map_word();
+ MapWord map_word = HeapObject::cast(object).map_word();
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
@@ -344,7 +344,7 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
Map map = update_info.second;
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
- object->set_map_word(MapWord::FromMap(map));
+ object.set_map_word(MapWord::FromMap(map));
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
@@ -403,7 +403,7 @@ void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map,
is_compacting_ &&
heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
- target->IterateBodyFast(map, size, &visitor);
+ target.IterateBodyFast(map, size, &visitor);
}
void Scavenger::RememberPromotedEphemeron(EphemeronHashTable table, int entry) {
@@ -463,7 +463,6 @@ void Scavenger::Process(OneshotBarrier* barrier) {
struct PromotionListEntry entry;
while (promotion_list_.Pop(&entry)) {
HeapObject target = entry.heap_object;
- DCHECK(!target->IsMap());
IterateAndScavengePromotedObject(target, entry.map, entry.size);
done = false;
if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
@@ -488,13 +487,13 @@ void ScavengerCollector::ProcessWeakReferences(
void ScavengerCollector::ClearYoungEphemerons(
EphemeronTableList* ephemeron_table_list) {
ephemeron_table_list->Iterate([this](EphemeronHashTable table) {
- for (int i = 0; i < table->Capacity(); i++) {
+ for (int i = 0; i < table.Capacity(); i++) {
// Keys in EphemeronHashTables must be heap objects.
HeapObjectSlot key_slot(
- table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)));
+ table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i)));
HeapObject key = key_slot.ToHeapObject();
if (IsUnscavengedHeapObject(heap_, key)) {
- table->RemoveEntry(i);
+ table.RemoveEntry(i);
} else {
HeapObject forwarded = ForwardingAddress(key);
key_slot.StoreHeapObject(forwarded);
@@ -514,10 +513,10 @@ void ScavengerCollector::ClearOldEphemerons() {
for (auto iti = indices.begin(); iti != indices.end();) {
// Keys in EphemeronHashTables must be heap objects.
HeapObjectSlot key_slot(
- table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(*iti)));
+ table.RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(*iti)));
HeapObject key = key_slot.ToHeapObject();
if (IsUnscavengedHeapObject(heap_, key)) {
- table->RemoveEntry(*iti);
+ table.RemoveEntry(*iti);
iti = indices.erase(iti);
} else {
HeapObject forwarded = ForwardingAddress(key);
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index ac00b77d71..458fd819ae 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -2,22 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/setup-isolate.h"
+#include "src/init/setup-isolate.h"
-#include "src/accessors.h"
-#include "src/compilation-cache.h"
-#include "src/contexts.h"
-#include "src/heap-symbols.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-configuration.h"
+#include "src/init/heap-symbols.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
-#include "src/layout-descriptor.h"
-#include "src/lookup-cache.h"
-#include "src/objects-inl.h"
#include "src/objects/arguments.h"
#include "src/objects/cell-inl.h"
+#include "src/objects/contexts.h"
#include "src/objects/data-handler.h"
#include "src/objects/debug-objects.h"
#include "src/objects/descriptor-array.h"
@@ -27,10 +24,13 @@
#include "src/objects/instance-type-inl.h"
#include "src/objects/js-generator.h"
#include "src/objects/js-weak-refs.h"
+#include "src/objects/layout-descriptor.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/lookup-cache.h"
#include "src/objects/map.h"
#include "src/objects/microtask.h"
#include "src/objects/module.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/promise.h"
@@ -39,6 +39,7 @@
#include "src/objects/smi.h"
#include "src/objects/stack-frame-info.h"
#include "src/objects/string.h"
+#include "src/objects/template-objects-inl.h"
#include "src/regexp/jsregexp.h"
#include "src/wasm/wasm-objects.h"
@@ -115,8 +116,8 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
: AllocationType::kReadOnly);
if (!allocation.To(&result)) return allocation;
- result->set_map_after_allocation(ReadOnlyRoots(this).meta_map(),
- SKIP_WRITE_BARRIER);
+ result.set_map_after_allocation(ReadOnlyRoots(this).meta_map(),
+ SKIP_WRITE_BARRIER);
Map map = isolate()->factory()->InitializeMap(
Map::cast(result), instance_type, instance_size, elements_kind,
inobject_properties);
@@ -132,48 +133,48 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
Map map = Map::unchecked_cast(result);
- map->set_map_after_allocation(
+ map.set_map_after_allocation(
Map::unchecked_cast(isolate()->root(RootIndex::kMetaMap)),
SKIP_WRITE_BARRIER);
- map->set_instance_type(instance_type);
- map->set_instance_size(instance_size);
+ map.set_instance_type(instance_type);
+ map.set_instance_size(instance_size);
// Initialize to only containing tagged fields.
if (FLAG_unbox_double_fields) {
- map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
// GetVisitorId requires a properly initialized LayoutDescriptor.
- map->set_visitor_id(Map::GetVisitorId(map));
- map->set_inobject_properties_start_or_constructor_function_index(0);
- DCHECK(!map->IsJSObjectMap());
- map->set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
- map->SetInObjectUnusedPropertyFields(0);
- map->set_bit_field(0);
- map->set_bit_field2(0);
- DCHECK(!map->is_in_retained_map_list());
+ map.set_visitor_id(Map::GetVisitorId(map));
+ map.set_inobject_properties_start_or_constructor_function_index(0);
+ DCHECK(!map.IsJSObjectMap());
+ map.set_prototype_validity_cell(Smi::FromInt(Map::kPrototypeChainValid));
+ map.SetInObjectUnusedPropertyFields(0);
+ map.set_bit_field(0);
+ map.set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptorsBit::encode(true) |
Map::ConstructionCounterBits::encode(Map::kNoSlackTracking);
- map->set_bit_field3(bit_field3);
- map->clear_padding();
- map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
+ map.set_bit_field3(bit_field3);
+ DCHECK(!map.is_in_retained_map_list());
+ map.clear_padding();
+ map.set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
return map;
}
void Heap::FinalizePartialMap(Map map) {
ReadOnlyRoots roots(this);
- map->set_dependent_code(DependentCode::cast(roots.empty_weak_fixed_array()));
- map->set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
- map->SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0);
+ map.set_dependent_code(DependentCode::cast(roots.empty_weak_fixed_array()));
+ map.set_raw_transitions(MaybeObject::FromSmi(Smi::zero()));
+ map.SetInstanceDescriptors(isolate(), roots.empty_descriptor_array(), 0);
if (FLAG_unbox_double_fields) {
- map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+ map.set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
- map->set_prototype(roots.null_value());
- map->set_constructor_or_backpointer(roots.null_value());
+ map.set_prototype(roots.null_value());
+ map.set_constructor_or_backpointer(roots.null_value());
}
AllocationResult Heap::Allocate(Map map, AllocationType allocation_type) {
- DCHECK(map->instance_type() != MAP_TYPE);
- int size = map->instance_size();
+ DCHECK(map.instance_type() != MAP_TYPE);
+ int size = map.instance_size();
HeapObject result;
AllocationResult allocation = AllocateRaw(size, allocation_type);
if (!allocation.To(&result)) return allocation;
@@ -181,31 +182,10 @@ AllocationResult Heap::Allocate(Map map, AllocationType allocation_type) {
WriteBarrierMode write_barrier_mode =
allocation_type == AllocationType::kYoung ? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
- result->set_map_after_allocation(map, write_barrier_mode);
+ result.set_map_after_allocation(map, write_barrier_mode);
return result;
}
-AllocationResult Heap::AllocateEmptyFixedTypedArray(
- ExternalArrayType array_type) {
- int size = OBJECT_POINTER_ALIGN(FixedTypedArrayBase::kDataOffset);
-
- HeapObject object;
- AllocationResult allocation = AllocateRaw(
- size, AllocationType::kReadOnly,
- array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
- if (!allocation.To(&object)) return allocation;
-
- object->set_map_after_allocation(
- ReadOnlyRoots(this).MapForFixedTypedArray(array_type),
- SKIP_WRITE_BARRIER);
- FixedTypedArrayBase elements = FixedTypedArrayBase::cast(object);
- elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
- elements->set_external_pointer(
- FixedTypedArrayBase::ExternalPointerPtrForOnHeapArray());
- elements->set_length(0);
- return elements;
-}
-
bool Heap::CreateInitialMaps() {
HeapObject obj;
{
@@ -215,7 +195,7 @@ bool Heap::CreateInitialMaps() {
// Map::cast cannot be used due to uninitialized map field.
Map new_meta_map = Map::unchecked_cast(obj);
set_meta_map(new_meta_map);
- new_meta_map->set_map_after_allocation(new_meta_map);
+ new_meta_map.set_map_after_allocation(new_meta_map);
ReadOnlyRoots roots(this);
{ // Partial map allocation
@@ -250,8 +230,8 @@ bool Heap::CreateInitialMaps() {
AllocationResult alloc =
AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(roots.fixed_array_map(), SKIP_WRITE_BARRIER);
- FixedArray::cast(obj)->set_length(0);
+ obj.set_map_after_allocation(roots.fixed_array_map(), SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj).set_length(0);
}
set_empty_fixed_array(FixedArray::cast(obj));
@@ -259,9 +239,9 @@ bool Heap::CreateInitialMaps() {
AllocationResult alloc =
AllocateRaw(WeakFixedArray::SizeFor(0), AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(roots.weak_fixed_array_map(),
- SKIP_WRITE_BARRIER);
- WeakFixedArray::cast(obj)->set_length(0);
+ obj.set_map_after_allocation(roots.weak_fixed_array_map(),
+ SKIP_WRITE_BARRIER);
+ WeakFixedArray::cast(obj).set_length(0);
}
set_empty_weak_fixed_array(WeakFixedArray::cast(obj));
@@ -269,10 +249,10 @@ bool Heap::CreateInitialMaps() {
AllocationResult allocation = AllocateRaw(WeakArrayList::SizeForCapacity(0),
AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
- obj->set_map_after_allocation(roots.weak_array_list_map(),
- SKIP_WRITE_BARRIER);
- WeakArrayList::cast(obj)->set_capacity(0);
- WeakArrayList::cast(obj)->set_length(0);
+ obj.set_map_after_allocation(roots.weak_array_list_map(),
+ SKIP_WRITE_BARRIER);
+ WeakArrayList::cast(obj).set_capacity(0);
+ WeakArrayList::cast(obj).set_length(0);
}
set_empty_weak_array_list(WeakArrayList::cast(obj));
@@ -282,7 +262,7 @@ bool Heap::CreateInitialMaps() {
if (!allocation.To(&obj)) return false;
}
set_null_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kNull);
+ Oddball::cast(obj).set_kind(Oddball::kNull);
{
AllocationResult allocation =
@@ -290,7 +270,7 @@ bool Heap::CreateInitialMaps() {
if (!allocation.To(&obj)) return false;
}
set_undefined_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+ Oddball::cast(obj).set_kind(Oddball::kUndefined);
DCHECK(!InYoungGeneration(roots.undefined_value()));
{
AllocationResult allocation =
@@ -298,7 +278,7 @@ bool Heap::CreateInitialMaps() {
if (!allocation.To(&obj)) return false;
}
set_the_hole_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kTheHole);
+ Oddball::cast(obj).set_kind(Oddball::kTheHole);
// Set preliminary exception sentinel value before actually initializing it.
set_exception(roots.null_value());
@@ -308,7 +288,7 @@ bool Heap::CreateInitialMaps() {
const StructTable& entry = struct_table[i];
Map map;
if (!AllocatePartialMap(entry.type, entry.size).To(&map)) return false;
- roots_table()[entry.index] = map->ptr();
+ roots_table()[entry.index] = map.ptr();
}
// Allocate the empty enum cache.
@@ -318,17 +298,17 @@ bool Heap::CreateInitialMaps() {
if (!allocation.To(&obj)) return false;
}
set_empty_enum_cache(EnumCache::cast(obj));
- EnumCache::cast(obj)->set_keys(roots.empty_fixed_array());
- EnumCache::cast(obj)->set_indices(roots.empty_fixed_array());
+ EnumCache::cast(obj).set_keys(roots.empty_fixed_array());
+ EnumCache::cast(obj).set_indices(roots.empty_fixed_array());
// Allocate the empty descriptor array.
{
int size = DescriptorArray::SizeFor(0);
if (!AllocateRaw(size, AllocationType::kReadOnly).To(&obj)) return false;
- obj->set_map_after_allocation(roots.descriptor_array_map(),
- SKIP_WRITE_BARRIER);
+ obj.set_map_after_allocation(roots.descriptor_array_map(),
+ SKIP_WRITE_BARRIER);
DescriptorArray array = DescriptorArray::cast(obj);
- array->Initialize(roots.empty_enum_cache(), roots.undefined_value(), 0, 0);
+ array.Initialize(roots.empty_enum_cache(), roots.undefined_value(), 0, 0);
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
@@ -340,9 +320,9 @@ bool Heap::CreateInitialMaps() {
FinalizePartialMap(roots.fixed_cow_array_map());
FinalizePartialMap(roots.descriptor_array_map());
FinalizePartialMap(roots.undefined_map());
- roots.undefined_map()->set_is_undetectable(true);
+ roots.undefined_map().set_is_undetectable(true);
FinalizePartialMap(roots.null_map());
- roots.null_map()->set_is_undetectable(true);
+ roots.null_map().set_is_undetectable(true);
FinalizePartialMap(roots.the_hole_map());
for (unsigned i = 0; i < arraysize(struct_table); ++i) {
const StructTable& entry = struct_table[i];
@@ -364,7 +344,7 @@ bool Heap::CreateInitialMaps() {
constructor_function_index) \
{ \
ALLOCATE_MAP((instance_type), (size), field_name); \
- roots.field_name##_map()->SetConstructorFunctionIndex( \
+ roots.field_name##_map().SetConstructorFunctionIndex( \
(constructor_function_index)); \
}
@@ -396,11 +376,11 @@ bool Heap::CreateInitialMaps() {
const StringTypeTable& entry = string_type_table[i];
Map map;
if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
- map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
+ map.SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
// Mark cons string maps as unstable, because their objects can change
// maps during GC.
- if (StringShape(entry.type).IsCons()) map->mark_unstable();
- roots_table()[entry.index] = map->ptr();
+ if (StringShape(entry.type).IsCons()) map.mark_unstable();
+ roots_table()[entry.index] = map.ptr();
}
{ // Create a separate external one byte string map for native sources.
@@ -409,12 +389,12 @@ bool Heap::CreateInitialMaps() {
AllocateMap(UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE,
ExternalOneByteString::kUncachedSize);
if (!allocation.To(&map)) return false;
- map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
+ map.SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
set_native_source_string_map(map);
}
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
- roots.fixed_double_array_map()->set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
+ roots.fixed_double_array_map().set_elements_kind(HOLEY_DOUBLE_ELEMENTS);
ALLOCATE_VARSIZE_MAP(FEEDBACK_METADATA_TYPE, feedback_metadata)
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
@@ -425,12 +405,6 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(SMALL_ORDERED_NAME_DICTIONARY_TYPE,
small_ordered_name_dictionary)
-#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype) \
- ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
-
- TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
-#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
-
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
@@ -441,8 +415,8 @@ bool Heap::CreateInitialMaps() {
Smi value = Smi::FromInt(Map::kPrototypeChainInvalid);
AllocationResult alloc = AllocateRaw(Cell::kSize, AllocationType::kOld);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(roots.cell_map(), SKIP_WRITE_BARRIER);
- Cell::cast(obj)->set_value(value);
+ obj.set_map_after_allocation(roots.cell_map(), SKIP_WRITE_BARRIER);
+ Cell::cast(obj).set_value(value);
set_invalid_prototype_validity_cell(Cell::cast(obj));
}
@@ -452,11 +426,14 @@ bool Heap::CreateInitialMaps() {
// The "no closures" and "one closure" FeedbackCell maps need
// to be marked unstable because their objects can change maps.
- ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_closures_cell)
- roots.no_closures_cell_map()->mark_unstable();
- ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
- roots.one_closure_cell_map()->mark_unstable();
- ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
+ ALLOCATE_MAP(
+ FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, no_closures_cell)
+ roots.no_closures_cell_map().mark_unstable();
+ ALLOCATE_MAP(
+ FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, one_closure_cell)
+ roots.one_closure_cell_map().mark_unstable();
+ ALLOCATE_MAP(
+ FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, many_closures_cell)
ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
@@ -506,13 +483,8 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
UncompiledDataWithPreparseData::kSize,
uncompiled_data_with_preparse_data)
-#if V8_SFI_HAS_UNIQUE_ID
- ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE,
- SharedFunctionInfoWithID::kAlignedSize, shared_function_info)
-#else
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
-#endif
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
@@ -522,7 +494,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kEmbedderDataSlotSize,
external)
- external_map()->set_is_extensible(false);
+ external_map().set_is_extensible(false);
#undef ALLOCATE_PRIMITIVE_MAP
#undef ALLOCATE_VARSIZE_MAP
#undef ALLOCATE_MAP
@@ -532,8 +504,8 @@ bool Heap::CreateInitialMaps() {
AllocationResult alloc =
AllocateRaw(FixedArray::SizeFor(0), AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(roots.scope_info_map(), SKIP_WRITE_BARRIER);
- FixedArray::cast(obj)->set_length(0);
+ obj.set_map_after_allocation(roots.scope_info_map(), SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj).set_length(0);
}
set_empty_scope_info(ScopeInfo::cast(obj));
@@ -542,12 +514,12 @@ bool Heap::CreateInitialMaps() {
AllocationResult alloc =
AllocateRaw(FixedArray::SizeFor(1), AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
- obj->set_map_after_allocation(roots.object_boilerplate_description_map(),
- SKIP_WRITE_BARRIER);
+ obj.set_map_after_allocation(roots.object_boilerplate_description_map(),
+ SKIP_WRITE_BARRIER);
- FixedArray::cast(obj)->set_length(1);
- FixedArray::cast(obj)->set(ObjectBoilerplateDescription::kLiteralTypeOffset,
- Smi::kZero);
+ FixedArray::cast(obj).set_length(1);
+ FixedArray::cast(obj).set(ObjectBoilerplateDescription::kLiteralTypeOffset,
+ Smi::kZero);
}
set_empty_object_boilerplate_description(
ObjectBoilerplateDescription::cast(obj));
@@ -558,9 +530,9 @@ bool Heap::CreateInitialMaps() {
AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
- ArrayBoilerplateDescription::cast(obj)->set_constant_elements(
+ ArrayBoilerplateDescription::cast(obj).set_constant_elements(
roots.empty_fixed_array());
- ArrayBoilerplateDescription::cast(obj)->set_elements_kind(
+ ArrayBoilerplateDescription::cast(obj).set_elements_kind(
ElementsKind::PACKED_SMI_ELEMENTS);
}
set_empty_array_boilerplate_description(
@@ -572,7 +544,7 @@ bool Heap::CreateInitialMaps() {
if (!allocation.To(&obj)) return false;
}
set_true_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kTrue);
+ Oddball::cast(obj).set_kind(Oddball::kTrue);
{
AllocationResult allocation =
@@ -580,14 +552,14 @@ bool Heap::CreateInitialMaps() {
if (!allocation.To(&obj)) return false;
}
set_false_value(Oddball::cast(obj));
- Oddball::cast(obj)->set_kind(Oddball::kFalse);
+ Oddball::cast(obj).set_kind(Oddball::kFalse);
// Empty arrays.
{
if (!AllocateRaw(ByteArray::SizeFor(0), AllocationType::kReadOnly).To(&obj))
return false;
- obj->set_map_after_allocation(roots.byte_array_map(), SKIP_WRITE_BARRIER);
- ByteArray::cast(obj)->set_length(0);
+ obj.set_map_after_allocation(roots.byte_array_map(), SKIP_WRITE_BARRIER);
+ ByteArray::cast(obj).set_length(0);
set_empty_byte_array(ByteArray::cast(obj));
}
@@ -596,9 +568,9 @@ bool Heap::CreateInitialMaps() {
.To(&obj)) {
return false;
}
- obj->set_map_after_allocation(roots.property_array_map(),
- SKIP_WRITE_BARRIER);
- PropertyArray::cast(obj)->initialize_length(0);
+ obj.set_map_after_allocation(roots.property_array_map(),
+ SKIP_WRITE_BARRIER);
+ PropertyArray::cast(obj).initialize_length(0);
set_empty_property_array(PropertyArray::cast(obj));
}
@@ -607,27 +579,15 @@ bool Heap::CreateInitialMaps() {
.To(&obj)) {
return false;
}
- obj->set_map_after_allocation(roots.closure_feedback_cell_array_map(),
- SKIP_WRITE_BARRIER);
- FixedArray::cast(obj)->set_length(0);
+ obj.set_map_after_allocation(roots.closure_feedback_cell_array_map(),
+ SKIP_WRITE_BARRIER);
+ FixedArray::cast(obj).set_length(0);
set_empty_closure_feedback_cell_array(ClosureFeedbackCellArray::cast(obj));
}
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype) \
- { \
- FixedTypedArrayBase obj; \
- if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) { \
- return false; \
- } \
- set_empty_fixed_##type##_array(obj); \
- }
-
- TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
-#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
-
DCHECK(!InYoungGeneration(roots.empty_fixed_array()));
- roots.bigint_map()->SetConstructorFunctionIndex(
+ roots.bigint_map().SetConstructorFunctionIndex(
Context::BIGINT_FUNCTION_INDEX);
return true;
@@ -654,7 +614,7 @@ void Heap::CreateInitialObjects() {
// The -0 value must be set before NewNumber works.
set_minus_zero_value(
*factory->NewHeapNumber(-0.0, AllocationType::kReadOnly));
- DCHECK(std::signbit(roots.minus_zero_value()->Number()));
+ DCHECK(std::signbit(roots.minus_zero_value().Number()));
set_nan_value(*factory->NewHeapNumber(
std::numeric_limits<double>::quiet_NaN(), AllocationType::kReadOnly));
@@ -941,9 +901,19 @@ void Heap::CreateInitialObjects() {
set_noscript_shared_function_infos(roots.empty_weak_array_list());
+ /* Canonical off-heap trampoline data */
set_off_heap_trampoline_relocation_info(
*Builtins::GenerateOffHeapTrampolineRelocInfo(isolate_));
+ set_trampoline_trivial_code_data_container(
+ *isolate()->factory()->NewCodeDataContainer(0,
+ AllocationType::kReadOnly));
+
+ set_trampoline_promise_rejection_code_data_container(
+ *isolate()->factory()->NewCodeDataContainer(
+ Code::IsPromiseRejectionField::encode(true),
+ AllocationType::kReadOnly));
+
// Evaluate the hash values which will then be cached in the strings.
isolate()->factory()->zero_string()->Hash();
isolate()->factory()->one_string()->Hash();
@@ -973,10 +943,10 @@ void Heap::CreateInternalAccessorInfoObjects() {
SetterType) \
AccessorInfo::cast( \
Object(roots_table()[RootIndex::k##AccessorName##Accessor])) \
- ->set_getter_side_effect_type(SideEffectType::GetterType); \
+ .set_getter_side_effect_type(SideEffectType::GetterType); \
AccessorInfo::cast( \
Object(roots_table()[RootIndex::k##AccessorName##Accessor])) \
- ->set_setter_side_effect_type(SideEffectType::SetterType);
+ .set_setter_side_effect_type(SideEffectType::SetterType);
ACCESSOR_INFO_LIST_GENERATOR(INIT_SIDE_EFFECT_FLAG, /* not used */)
#undef INIT_SIDE_EFFECT_FLAG
}
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 894563bacd..f7efc64247 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -8,12 +8,12 @@
#include <map>
#include <stack>
-#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/slots.h"
-#include "src/utils.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -387,7 +387,8 @@ class SlotSet : public Malloced {
};
enum SlotType {
- EMBEDDED_OBJECT_SLOT,
+ FULL_EMBEDDED_OBJECT_SLOT,
+ COMPRESSED_EMBEDDED_OBJECT_SLOT,
OBJECT_SLOT,
CODE_TARGET_SLOT,
CODE_ENTRY_SLOT,
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 091ab6503f..308d4f51b1 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -12,8 +12,8 @@
#include "src/base/v8-fallthrough.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
-#include "src/msan.h"
#include "src/objects/code-inl.h"
+#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
@@ -54,8 +54,8 @@ HeapObject SemiSpaceIterator::Next() {
if (current_ == limit_) return HeapObject();
}
HeapObject object = HeapObject::FromAddress(current_);
- current_ += object->Size();
- if (!object->IsFiller()) {
+ current_ += object.Size();
+ if (!object.IsFiller()) {
return object;
}
}
@@ -80,11 +80,11 @@ HeapObject HeapObjectIterator::FromCurrentPage() {
continue;
}
HeapObject obj = HeapObject::FromAddress(cur_addr_);
- const int obj_size = obj->Size();
+ const int obj_size = obj.Size();
cur_addr_ += obj_size;
DCHECK_LE(cur_addr_, cur_end_);
- if (!obj->IsFiller()) {
- if (obj->IsCode()) {
+ if (!obj.IsFiller()) {
+ if (obj.IsCode()) {
DCHECK_EQ(space_, space_->heap()->code_space());
DCHECK_CODEOBJECT_SIZE(obj_size, space_);
} else {
@@ -128,7 +128,7 @@ bool SemiSpace::Contains(HeapObject o) {
}
bool SemiSpace::Contains(Object o) {
- return o->IsHeapObject() && Contains(HeapObject::cast(o));
+ return o.IsHeapObject() && Contains(HeapObject::cast(o));
}
bool SemiSpace::ContainsSlow(Address a) {
@@ -142,7 +142,7 @@ bool SemiSpace::ContainsSlow(Address a) {
// NewSpace
bool NewSpace::Contains(Object o) {
- return o->IsHeapObject() && Contains(HeapObject::cast(o));
+ return o.IsHeapObject() && Contains(HeapObject::cast(o));
}
bool NewSpace::Contains(HeapObject o) {
@@ -195,7 +195,7 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
bool PagedSpace::TryFreeLast(HeapObject object, int object_size) {
if (allocation_info_.top() != kNullAddress) {
- const Address object_address = object->address();
+ const Address object_address = object.address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
@@ -375,22 +375,17 @@ HeapObject PagedSpace::TryAllocateLinearlyAligned(
return HeapObject::FromAddress(current_top);
}
-AllocationResult PagedSpace::AllocateRawUnaligned(
- int size_in_bytes, UpdateSkipList update_skip_list) {
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
DCHECK_IMPLIES(identity() == RO_SPACE, heap()->CanAllocateInReadOnlySpace());
if (!EnsureLinearAllocationArea(size_in_bytes)) {
return AllocationResult::Retry(identity());
}
HeapObject object = AllocateLinearly(size_in_bytes);
DCHECK(!object.is_null());
- if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
- SkipList::Update(object->address(), size_in_bytes);
- }
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
return object;
}
-
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE || identity() == RO_SPACE);
@@ -409,7 +404,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
object = TryAllocateLinearlyAligned(&allocation_size, alignment);
DCHECK(!object.is_null());
}
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object.address(), size_in_bytes);
return object;
}
@@ -439,7 +434,7 @@ AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
HeapObject heap_obj;
if (!result.IsRetry() && result.To(&heap_obj) && !is_local()) {
AllocationStep(static_cast<int>(size_in_bytes + bytes_since_last),
- heap_obj->address(), size_in_bytes);
+ heap_obj.address(), size_in_bytes);
StartNextInlineAllocationStep();
DCHECK_IMPLIES(
heap()->incremental_marking()->black_allocation(),
@@ -479,7 +474,7 @@ AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
obj = heap()->PrecedeWithFiller(obj, filler_size);
}
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
return obj;
}
@@ -500,7 +495,7 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
allocation_info_.set_top(top + size_in_bytes);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj.address(), size_in_bytes);
return obj;
}
@@ -543,7 +538,7 @@ LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
bool ok = result.To(&obj);
USE(ok);
DCHECK(ok);
- Address top = HeapObject::cast(obj)->address();
+ Address top = HeapObject::cast(obj).address();
return LocalAllocationBuffer(heap, LinearAllocationArea(top, top + size));
}
@@ -559,7 +554,7 @@ bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
bool LocalAllocationBuffer::TryFreeLast(HeapObject object, int object_size) {
if (IsValid()) {
- const Address object_address = object->address();
+ const Address object_address = object.address();
if ((allocation_info_.top() - object_size) == object_address) {
allocation_info_.set_top(object_address);
return true;
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index ada9777215..2c8cbdfc32 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -4,31 +4,33 @@
#include "src/heap/spaces.h"
+#include <cinttypes>
#include <utility>
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
-#include "src/counters.h"
+#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-controller.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
#include "src/heap/sweeper.h"
-#include "src/msan.h"
-#include "src/objects-inl.h"
+#include "src/init/v8.h"
+#include "src/logging/counters.h"
#include "src/objects/free-space-inl.h"
#include "src/objects/js-array-buffer-inl.h"
-#include "src/objects/js-array-inl.h"
-#include "src/ostreams.h"
+#include "src/objects/objects-inl.h"
+#include "src/sanitizer/msan.h"
#include "src/snapshot/snapshot.h"
-#include "src/v8.h"
-#include "src/vm-state-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -49,7 +51,11 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
cur_end_(kNullAddress),
space_(space),
page_range_(space->first_page(), nullptr),
- current_page_(page_range_.begin()) {}
+ current_page_(page_range_.begin()) {
+#ifdef V8_SHARED_RO_HEAP
+ DCHECK_NE(space->identity(), RO_SPACE);
+#endif
+}
HeapObjectIterator::HeapObjectIterator(Page* page)
: cur_addr_(kNullAddress),
@@ -59,10 +65,14 @@ HeapObjectIterator::HeapObjectIterator(Page* page)
current_page_(page_range_.begin()) {
#ifdef DEBUG
Space* owner = page->owner();
- DCHECK(owner == page->heap()->old_space() ||
- owner == page->heap()->map_space() ||
- owner == page->heap()->code_space() ||
- owner == page->heap()->read_only_space());
+ // TODO(v8:7464): Always enforce this once PagedSpace::Verify is no longer
+ // used to verify read-only space for non-shared builds.
+#ifdef V8_SHARED_RO_HEAP
+ DCHECK_NE(owner->identity(), RO_SPACE);
+#endif
+ // Do not access the heap of the read-only space.
+ DCHECK(owner->identity() == RO_SPACE || owner->identity() == OLD_SPACE ||
+ owner->identity() == MAP_SPACE || owner->identity() == CODE_SPACE);
#endif // DEBUG
}
@@ -72,17 +82,19 @@ bool HeapObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
- Heap* heap = space_->heap();
- heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
#ifdef ENABLE_MINOR_MC
- if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
+ Heap* heap = space_->heap();
+ heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
+ if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
heap->minor_mark_compact_collector()->MakeIterable(
cur_page, MarkingTreatmentMode::CLEAR,
FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
+ }
#else
DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
#endif // ENABLE_MINOR_MC
+
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
@@ -206,7 +218,7 @@ void MemoryAllocator::InitializeCodePageAllocator(
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
requested));
- heap_reservation_.TakeControl(&reservation);
+ heap_reservation_ = std::move(reservation);
code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
page_allocator, aligned_base, size,
static_cast<size_t>(MemoryChunk::kAlignment));
@@ -456,7 +468,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
return kNullAddress;
}
- controller->TakeControl(&reservation);
+ *controller = std::move(reservation);
return base;
}
@@ -602,6 +614,66 @@ void MemoryChunk::SetReadAndWritable() {
}
}
+void CodeObjectRegistry::RegisterNewlyAllocatedCodeObject(Address code) {
+ auto result = code_object_registry_newly_allocated_.insert(code);
+ USE(result);
+ DCHECK(result.second);
+}
+
+void CodeObjectRegistry::RegisterAlreadyExistingCodeObject(Address code) {
+ code_object_registry_already_existing_.push_back(code);
+}
+
+void CodeObjectRegistry::Clear() {
+ code_object_registry_already_existing_.clear();
+ code_object_registry_newly_allocated_.clear();
+}
+
+void CodeObjectRegistry::Finalize() {
+ code_object_registry_already_existing_.shrink_to_fit();
+}
+
+bool CodeObjectRegistry::Contains(Address object) const {
+ return (code_object_registry_newly_allocated_.find(object) !=
+ code_object_registry_newly_allocated_.end()) ||
+ (std::binary_search(code_object_registry_already_existing_.begin(),
+ code_object_registry_already_existing_.end(),
+ object));
+}
+
+Address CodeObjectRegistry::GetCodeObjectStartFromInnerAddress(
+ Address address) const {
+ // Let's first find the object which comes right before address in the vector
+ // of already existing code objects.
+ Address already_existing_set_ = 0;
+ Address newly_allocated_set_ = 0;
+ if (!code_object_registry_already_existing_.empty()) {
+ auto it =
+ std::upper_bound(code_object_registry_already_existing_.begin(),
+ code_object_registry_already_existing_.end(), address);
+ if (it != code_object_registry_already_existing_.begin()) {
+ already_existing_set_ = *(--it);
+ }
+ }
+
+ // Next, let's find the object which comes right before address in the set
+ // of newly allocated code objects.
+ if (!code_object_registry_newly_allocated_.empty()) {
+ auto it = code_object_registry_newly_allocated_.upper_bound(address);
+ if (it != code_object_registry_newly_allocated_.begin()) {
+ newly_allocated_set_ = *(--it);
+ }
+ }
+
+ // The code objects which contains address has to be in one of the two
+ // data structures.
+ DCHECK(already_existing_set_ != 0 || newly_allocated_set_ != 0);
+
+ // The address which is closest to the given address is the code object.
+ return already_existing_set_ > newly_allocated_set_ ? already_existing_set_
+ : newly_allocated_set_;
+}
+
namespace {
PageAllocator::Permission DefaultWritableCodePermissions() {
@@ -635,7 +707,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
chunk->invalidated_slots_ = nullptr;
- chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
chunk->set_concurrent_sweeping_state(kSweepingDone);
@@ -688,6 +759,12 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->reservation_ = std::move(reservation);
+ if (owner->identity() == CODE_SPACE) {
+ chunk->code_object_registry_ = new CodeObjectRegistry();
+ } else {
+ chunk->code_object_registry_ = nullptr;
+ }
+
return chunk;
}
@@ -697,7 +774,7 @@ Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
page->owner()->identity()),
page->area_size());
// Make sure that categories are initialized before freeing the area.
- page->ResetAllocatedBytes();
+ page->ResetAllocationStatistics();
page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->AllocateFreeListCategories();
page->InitializeFreeListCategories();
@@ -905,7 +982,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
// linear allocation area.
if ((base + chunk_size) == 0u) {
CHECK(!last_chunk_.IsReserved());
- last_chunk_.TakeControl(&reservation);
+ last_chunk_ = std::move(reservation);
UncommitMemory(&last_chunk_);
size_ -= chunk_size;
if (executable == EXECUTABLE) {
@@ -947,7 +1024,10 @@ void MemoryChunk::SetYoungGenerationPageFlags(bool is_marking) {
}
}
-void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
+void Page::ResetAllocationStatistics() {
+ allocated_bytes_ = area_size();
+ wasted_memory_ = 0;
+}
void Page::AllocateLocalTracker() {
DCHECK_NULL(local_tracker_);
@@ -958,10 +1038,6 @@ bool Page::contains_array_buffers() {
return local_tracker_ != nullptr && !local_tracker_->IsEmpty();
}
-void Page::ResetFreeListStatistics() {
- wasted_memory_ = 0;
-}
-
size_t Page::AvailableInFreeList() {
size_t sum = 0;
ForAllFreeListCategories([&sum](FreeListCategory* category) {
@@ -975,11 +1051,11 @@ namespace {
// Skips filler starting from the given filler until the end address.
// Returns the first address after the skipped fillers.
Address SkipFillers(HeapObject filler, Address end) {
- Address addr = filler->address();
+ Address addr = filler.address();
while (addr < end) {
filler = HeapObject::FromAddress(addr);
- CHECK(filler->IsFiller());
- addr = filler->address() + filler->Size();
+ CHECK(filler.IsFiller());
+ addr = filler.address() + filler.Size();
}
return addr;
}
@@ -995,14 +1071,14 @@ size_t Page::ShrinkToHighWaterMark() {
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
HeapObject filler = HeapObject::FromAddress(HighWaterMark());
- if (filler->address() == area_end()) return 0;
- CHECK(filler->IsFiller());
+ if (filler.address() == area_end()) return 0;
+ CHECK(filler.IsFiller());
// Ensure that no objects were allocated in [filler, area_end) region.
DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
// Ensure that no objects will be allocated on this page.
DCHECK_EQ(0u, AvailableInFreeList());
- size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
+ size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()),
MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
@@ -1013,14 +1089,14 @@ size_t Page::ShrinkToHighWaterMark() {
reinterpret_cast<void*>(area_end() - unused));
}
heap()->CreateFillerObjectAt(
- filler->address(),
- static_cast<int>(area_end() - filler->address() - unused),
+ filler.address(),
+ static_cast<int>(area_end() - filler.address() - unused),
ClearRecordedSlots::kNo);
heap()->memory_allocator()->PartialFreeMemory(
this, address() + size() - unused, unused, area_end() - unused);
- if (filler->address() != area_end()) {
- CHECK(filler->IsFiller());
- CHECK_EQ(filler->address() + filler->Size(), area_end());
+ if (filler.address() != area_end()) {
+ CHECK(filler.IsFiller());
+ CHECK_EQ(filler.address() + filler.Size(), area_end());
}
}
return unused;
@@ -1076,13 +1152,8 @@ void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
static_cast<int>(released_bytes));
}
-void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
- DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
- LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
-
- isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
- chunk->IsEvacuationCandidate());
-
+void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
@@ -1094,13 +1165,21 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
size_executable_ -= size;
}
- chunk->SetFlag(MemoryChunk::PRE_FREED);
-
if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
+ chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
+void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+ LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+ UnregisterMemory(chunk);
+ isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
+ chunk->IsEvacuationCandidate());
+ chunk->SetFlag(MemoryChunk::PRE_FREED);
+}
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
+ DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllocatedMemory();
@@ -1290,10 +1369,6 @@ bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
// MemoryChunk implementation
void MemoryChunk::ReleaseAllocatedMemory() {
- if (skip_list_ != nullptr) {
- delete skip_list_;
- skip_list_ = nullptr;
- }
if (mutex_ != nullptr) {
delete mutex_;
mutex_ = nullptr;
@@ -1310,6 +1385,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
if (marking_bitmap_ != nullptr) ReleaseMarkingBitmap();
+ if (code_object_registry_ != nullptr) delete code_object_registry_;
if (!IsLargePage()) {
Page* page = static_cast<Page*>(this);
@@ -1427,7 +1503,7 @@ void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject old_start,
auto it = invalidated_slots()->find(old_start);
if (it != invalidated_slots()->end()) {
int old_size = it->second;
- int delta = static_cast<int>(new_start->address() - old_start->address());
+ int delta = static_cast<int>(new_start.address() - old_start.address());
invalidated_slots()->erase(it);
(*invalidated_slots())[new_start] = old_size - delta;
}
@@ -1740,13 +1816,6 @@ int PagedSpace::CountTotalPages() {
return count;
}
-
-void PagedSpace::ResetFreeListStatistics() {
- for (Page* page : *this) {
- page->ResetFreeListStatistics();
- }
-}
-
void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != kNullAddress && top != limit &&
@@ -1944,8 +2013,8 @@ bool PagedSpace::RefillLinearAllocationAreaFromFreeList(size_t size_in_bytes) {
Page* page = Page::FromHeapObject(new_node);
IncreaseAllocatedBytes(new_node_size, page);
- Address start = new_node->address();
- Address end = new_node->address() + new_node_size;
+ Address start = new_node.address();
+ Address end = new_node.address() + new_node_size;
Address limit = ComputeLimit(start, end, size_in_bytes);
DCHECK_LE(limit, end);
DCHECK_LE(size_in_bytes, limit - start);
@@ -1991,39 +2060,39 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
Address top = page->area_end();
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- CHECK(end_of_previous_object <= object->address());
+ CHECK(end_of_previous_object <= object.address());
// The first word should be a map, and we expect all map pointers to
// be in map space.
- Map map = object->map();
- CHECK(map->IsMap());
- CHECK(heap()->map_space()->Contains(map) ||
- heap()->read_only_space()->Contains(map));
+ Map map = object.map();
+ CHECK(map.IsMap());
+ CHECK(isolate->heap()->map_space()->Contains(map) ||
+ ReadOnlyHeap::Contains(map));
// Perform space-specific object verification.
VerifyObject(object);
// The object itself should look OK.
- object->ObjectVerify(isolate);
+ object.ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
- heap()->VerifyRememberedSetFor(object);
+ isolate->heap()->VerifyRememberedSetFor(object);
}
// All the interior pointers should be contained in the heap.
- int size = object->Size();
- object->IterateBody(map, size, visitor);
- CHECK(object->address() + size <= top);
- end_of_previous_object = object->address() + size;
+ int size = object.Size();
+ object.IterateBody(map, size, visitor);
+ CHECK(object.address() + size <= top);
+ end_of_previous_object = object.address() + size;
- if (object->IsExternalString()) {
+ if (object.IsExternalString()) {
ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string->ExternalPayloadSize();
+ size_t size = external_string.ExternalPayloadSize();
external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
- } else if (object->IsJSArrayBuffer()) {
+ } else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = array_buffer->byte_length();
+ size_t size = array_buffer.byte_length();
external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -2054,7 +2123,7 @@ void PagedSpace::VerifyLiveBytes() {
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
// All the interior pointers should be contained in the heap.
if (marking_state->IsBlack(object)) {
- black_size += object->Size();
+ black_size += object.Size();
}
}
CHECK_LE(black_size, marking_state->live_bytes(page));
@@ -2072,8 +2141,8 @@ void PagedSpace::VerifyCountersAfterSweeping() {
HeapObjectIterator it(page);
size_t real_allocated = 0;
for (HeapObject object = it.Next(); !object.is_null(); object = it.Next()) {
- if (!object->IsFiller()) {
- real_allocated += object->Size();
+ if (!object.IsFiller()) {
+ real_allocated += object.Size();
}
}
total_allocated += page->allocated_bytes();
@@ -2489,31 +2558,31 @@ void NewSpace::Verify(Isolate* isolate) {
// The first word should be a map, and we expect all map pointers to
// be in map space or read-only space.
- Map map = object->map();
- CHECK(map->IsMap());
+ Map map = object.map();
+ CHECK(map.IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
// The object should not be code or a map.
- CHECK(!object->IsMap());
- CHECK(!object->IsAbstractCode());
+ CHECK(!object.IsMap());
+ CHECK(!object.IsAbstractCode());
// The object itself should look OK.
- object->ObjectVerify(isolate);
+ object.ObjectVerify(isolate);
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor(heap());
- int size = object->Size();
- object->IterateBody(map, size, &visitor);
+ int size = object.Size();
+ object.IterateBody(map, size, &visitor);
- if (object->IsExternalString()) {
+ if (object.IsExternalString()) {
ExternalString external_string = ExternalString::cast(object);
- size_t size = external_string->ExternalPayloadSize();
+ size_t size = external_string.ExternalPayloadSize();
external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
- } else if (object->IsJSArrayBuffer()) {
+ } else if (object.IsJSArrayBuffer()) {
JSArrayBuffer array_buffer = JSArrayBuffer::cast(object);
if (ArrayBufferTracker::IsTracked(array_buffer)) {
- size_t size = array_buffer->byte_length();
+ size_t size = array_buffer.byte_length();
external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
}
}
@@ -2862,12 +2931,12 @@ FreeSpace FreeListCategory::PickNodeFromList(size_t minimum_size,
size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace node = top();
- if (node.is_null() || static_cast<size_t>(node->Size()) < minimum_size) {
+ if (node.is_null() || static_cast<size_t>(node.Size()) < minimum_size) {
*node_size = 0;
return FreeSpace();
}
- set_top(node->next());
- *node_size = node->Size();
+ set_top(node.next());
+ *node_size = node.Size();
available_ -= *node_size;
return node;
}
@@ -2877,20 +2946,20 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
DCHECK(page()->CanAllocate());
FreeSpace prev_non_evac_node;
for (FreeSpace cur_node = top(); !cur_node.is_null();
- cur_node = cur_node->next()) {
- size_t size = cur_node->size();
+ cur_node = cur_node.next()) {
+ size_t size = cur_node.size();
if (size >= minimum_size) {
DCHECK_GE(available_, size);
available_ -= size;
if (cur_node == top()) {
- set_top(cur_node->next());
+ set_top(cur_node.next());
}
if (!prev_non_evac_node.is_null()) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(prev_non_evac_node);
if (chunk->owner()->identity() == CODE_SPACE) {
chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
}
- prev_non_evac_node->set_next(cur_node->next());
+ prev_non_evac_node.set_next(cur_node.next());
}
*node_size = size;
return cur_node;
@@ -2904,7 +2973,7 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
void FreeListCategory::Free(Address start, size_t size_in_bytes,
FreeMode mode) {
FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
- free_space->set_next(top());
+ free_space.set_next(top());
set_top(free_space);
available_ += size_in_bytes;
if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
@@ -2926,7 +2995,7 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
DCHECK(map_location.contains_value(
ReadOnlyRoots(heap).free_space_map().ptr()));
}
- n = n->next();
+ n = n.next();
}
}
@@ -2949,7 +3018,7 @@ void FreeList::Reset() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i] = nullptr;
}
- ResetStats();
+ wasted_bytes_ = 0;
}
size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
@@ -3132,10 +3201,10 @@ size_t FreeListCategory::SumFreeList() {
while (!cur.is_null()) {
// We can't use "cur->map()" here because both cur's map and the
// root can be null during bootstrapping.
- DCHECK(cur->map_slot().contains_value(
+ DCHECK(cur.map_slot().contains_value(
page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap).ptr()));
- sum += cur->relaxed_read_size();
- cur = cur->next();
+ sum += cur.relaxed_read_size();
+ cur = cur.next();
}
return sum;
}
@@ -3145,7 +3214,7 @@ int FreeListCategory::FreeListLength() {
FreeSpace cur = top();
while (!cur.is_null()) {
length++;
- cur = cur->next();
+ cur = cur.next();
if (length == kVeryLongFreeList) return length;
}
return length;
@@ -3293,7 +3362,7 @@ bool PagedSpace::RawSlowRefillLinearAllocationArea(int size_in_bytes) {
// MapSpace implementation
#ifdef VERIFY_HEAP
-void MapSpace::VerifyObject(HeapObject object) { CHECK(object->IsMap()); }
+void MapSpace::VerifyObject(HeapObject object) { CHECK(object.IsMap()); }
#endif
ReadOnlySpace::ReadOnlySpace(Heap* heap)
@@ -3303,34 +3372,22 @@ ReadOnlySpace::ReadOnlySpace(Heap* heap)
void ReadOnlyPage::MakeHeaderRelocatable() {
if (mutex_ != nullptr) {
- // TODO(v8:7464): heap_ and owner_ need to be cleared as well.
delete mutex_;
+ heap_ = nullptr;
mutex_ = nullptr;
local_tracker_ = nullptr;
reservation_.Reset();
}
}
-void ReadOnlySpace::Forget() {
- for (Page* p : *this) {
- heap()->memory_allocator()->PreFreeMemory(p);
- }
-}
-
-void ReadOnlySpace::SetPermissionsForPages(PageAllocator::Permission access) {
- MemoryAllocator* memory_allocator = heap()->memory_allocator();
+void ReadOnlySpace::SetPermissionsForPages(MemoryAllocator* memory_allocator,
+ PageAllocator::Permission access) {
for (Page* p : *this) {
- ReadOnlyPage* page = static_cast<ReadOnlyPage*>(p);
- if (access == PageAllocator::kRead) {
- page->MakeHeaderRelocatable();
- }
-
// Read only pages don't have valid reservation object so we get proper
// page allocator manually.
v8::PageAllocator* page_allocator =
- memory_allocator->page_allocator(page->executable());
- CHECK(
- SetPermissions(page_allocator, page->address(), page->size(), access));
+ memory_allocator->page_allocator(p->executable());
+ CHECK(SetPermissions(page_allocator, p->address(), p->size(), access));
}
}
@@ -3354,8 +3411,8 @@ void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
if (start < end - size) {
// A region at the high watermark is already in free list.
HeapObject filler = HeapObject::FromAddress(start);
- CHECK(filler->IsFiller());
- start += filler->Size();
+ CHECK(filler.IsFiller());
+ start += filler.Size();
}
CHECK_EQ(size, static_cast<int>(end - start));
heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
@@ -3365,30 +3422,38 @@ void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
void ReadOnlySpace::ClearStringPaddingIfNeeded() {
if (is_string_padding_cleared_) return;
- WritableScope writable_scope(this);
- for (Page* page : *this) {
- HeapObjectIterator iterator(page);
- for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
- if (o->IsSeqOneByteString()) {
- SeqOneByteString::cast(o)->clear_padding();
- } else if (o->IsSeqTwoByteString()) {
- SeqTwoByteString::cast(o)->clear_padding();
- }
+ ReadOnlyHeapIterator iterator(this);
+ for (HeapObject o = iterator.Next(); !o.is_null(); o = iterator.Next()) {
+ if (o.IsSeqOneByteString()) {
+ SeqOneByteString::cast(o).clear_padding();
+ } else if (o.IsSeqTwoByteString()) {
+ SeqTwoByteString::cast(o).clear_padding();
}
}
is_string_padding_cleared_ = true;
}
-void ReadOnlySpace::MarkAsReadOnly() {
+void ReadOnlySpace::Seal(SealMode ro_mode) {
DCHECK(!is_marked_read_only_);
+
FreeLinearAllocationArea();
is_marked_read_only_ = true;
- SetPermissionsForPages(PageAllocator::kRead);
+ auto* memory_allocator = heap()->memory_allocator();
+
+ if (ro_mode == SealMode::kDetachFromHeapAndForget) {
+ DetachFromHeap();
+ for (Page* p : *this) {
+ memory_allocator->UnregisterMemory(p);
+ static_cast<ReadOnlyPage*>(p)->MakeHeaderRelocatable();
+ }
+ }
+
+ SetPermissionsForPages(memory_allocator, PageAllocator::kRead);
}
-void ReadOnlySpace::MarkAsReadWrite() {
+void ReadOnlySpace::Unseal() {
DCHECK(is_marked_read_only_);
- SetPermissionsForPages(PageAllocator::kReadWrite);
+ SetPermissionsForPages(heap()->memory_allocator(), PageAllocator::kReadWrite);
is_marked_read_only_ = false;
}
@@ -3477,7 +3542,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->marking_state()->IsBlack(object));
page->InitializationMemoryFence();
heap()->NotifyOldGenerationExpansion();
- AllocationStep(object_size, object->address(), object_size);
+ AllocationStep(object_size, object.address(), object_size);
return object;
}
@@ -3492,7 +3557,7 @@ LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
HeapObject object = page->GetObject();
- heap()->CreateFillerObjectAt(object->address(), object_size,
+ heap()->CreateFillerObjectAt(object.address(), object_size,
ClearRecordedSlots::kNo);
return page;
}
@@ -3553,7 +3618,7 @@ void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
DCHECK(page->IsLargePage());
DCHECK(page->IsFlagSet(MemoryChunk::FROM_PAGE));
DCHECK(!page->IsFlagSet(MemoryChunk::TO_PAGE));
- size_t object_size = static_cast<size_t>(page->GetObject()->Size());
+ size_t object_size = static_cast<size_t>(page->GetObject().Size());
static_cast<LargeObjectSpace*>(page->owner())->RemovePage(page, object_size);
AddPage(page, object_size);
page->ClearFlag(MemoryChunk::FROM_PAGE);
@@ -3588,11 +3653,11 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* next_current = current->next_page();
HeapObject object = current->GetObject();
DCHECK(!marking_state->IsGrey(object));
- size_t size = static_cast<size_t>(object->Size());
+ size_t size = static_cast<size_t>(object.Size());
if (marking_state->IsBlack(object)) {
Address free_start;
surviving_object_size += size;
- if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
+ if ((free_start = current->GetAddressToShrink(object.address(), size)) !=
0) {
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start);
@@ -3600,7 +3665,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
current->size() - (free_start - current->address());
heap()->memory_allocator()->PartialFreeMemory(
current, free_start, bytes_to_free,
- current->area_start() + object->Size());
+ current->area_start() + object.Size());
size_ -= bytes_to_free;
AccountUncommitted(bytes_to_free);
}
@@ -3619,7 +3684,7 @@ bool LargeObjectSpace::Contains(HeapObject object) {
bool owned = (chunk->owner() == this);
- SLOW_DCHECK(!owned || ContainsSlow(object->address()));
+ SLOW_DCHECK(!owned || ContainsSlow(object.address()));
return owned;
}
@@ -3651,60 +3716,59 @@ void LargeObjectSpace::Verify(Isolate* isolate) {
// object area start.
HeapObject object = chunk->GetObject();
Page* page = Page::FromHeapObject(object);
- CHECK(object->address() == page->area_start());
+ CHECK(object.address() == page->area_start());
// The first word should be a map, and we expect all map pointers to be
// in map space or read-only space.
- Map map = object->map();
- CHECK(map->IsMap());
+ Map map = object.map();
+ CHECK(map.IsMap());
CHECK(heap()->map_space()->Contains(map) ||
heap()->read_only_space()->Contains(map));
// We have only the following types in the large object space:
- if (!(object->IsAbstractCode() || object->IsSeqString() ||
- object->IsExternalString() || object->IsThinString() ||
- object->IsFixedArray() || object->IsFixedDoubleArray() ||
- object->IsWeakFixedArray() || object->IsWeakArrayList() ||
- object->IsPropertyArray() || object->IsByteArray() ||
- object->IsFeedbackVector() || object->IsBigInt() ||
- object->IsFreeSpace() || object->IsFeedbackMetadata() ||
- object->IsContext() ||
- object->IsUncompiledDataWithoutPreparseData() ||
- object->IsPreparseData()) &&
+ if (!(object.IsAbstractCode() || object.IsSeqString() ||
+ object.IsExternalString() || object.IsThinString() ||
+ object.IsFixedArray() || object.IsFixedDoubleArray() ||
+ object.IsWeakFixedArray() || object.IsWeakArrayList() ||
+ object.IsPropertyArray() || object.IsByteArray() ||
+ object.IsFeedbackVector() || object.IsBigInt() ||
+ object.IsFreeSpace() || object.IsFeedbackMetadata() ||
+ object.IsContext() || object.IsUncompiledDataWithoutPreparseData() ||
+ object.IsPreparseData()) &&
!FLAG_young_generation_large_objects) {
FATAL("Found invalid Object (instance_type=%i) in large object space.",
- object->map()->instance_type());
+ object.map().instance_type());
}
// The object itself should look OK.
- object->ObjectVerify(isolate);
+ object.ObjectVerify(isolate);
if (!FLAG_verify_heap_skip_remembered_set) {
heap()->VerifyRememberedSetFor(object);
}
// Byte arrays and strings don't have interior pointers.
- if (object->IsAbstractCode()) {
+ if (object.IsAbstractCode()) {
VerifyPointersVisitor code_visitor(heap());
- object->IterateBody(map, object->Size(), &code_visitor);
- } else if (object->IsFixedArray()) {
+ object.IterateBody(map, object.Size(), &code_visitor);
+ } else if (object.IsFixedArray()) {
FixedArray array = FixedArray::cast(object);
- for (int j = 0; j < array->length(); j++) {
- Object element = array->get(j);
- if (element->IsHeapObject()) {
+ for (int j = 0; j < array.length(); j++) {
+ Object element = array.get(j);
+ if (element.IsHeapObject()) {
HeapObject element_object = HeapObject::cast(element);
- CHECK(heap()->Contains(element_object));
- CHECK(element_object->map()->IsMap());
+ CHECK(IsValidHeapObject(heap(), element_object));
+ CHECK(element_object.map().IsMap());
}
}
- } else if (object->IsPropertyArray()) {
+ } else if (object.IsPropertyArray()) {
PropertyArray array = PropertyArray::cast(object);
- for (int j = 0; j < array->length(); j++) {
- Object property = array->get(j);
- if (property->IsHeapObject()) {
+ for (int j = 0; j < array.length(); j++) {
+ Object property = array.get(j);
+ if (property.IsHeapObject()) {
HeapObject property_object = HeapObject::cast(property);
CHECK(heap()->Contains(property_object));
- CHECK(property_object->map()->IsMap());
+ CHECK(property_object.map().IsMap());
}
}
}
@@ -3725,7 +3789,7 @@ void LargeObjectSpace::Print() {
StdoutStream os;
LargeObjectIterator it(this);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
- obj->Print(os);
+ obj.Print(os);
}
}
@@ -3742,9 +3806,9 @@ void Page::Print() {
heap()->incremental_marking()->marking_state()->IsBlackOrGrey(object);
PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
if (is_marked) {
- mark_size += object->Size();
+ mark_size += object.Size();
}
- object->ShortPrint();
+ object.ShortPrint();
PrintF("\n");
}
printf(" --------------------------------------\n");
@@ -3780,7 +3844,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
HeapObject result = page->GetObject();
page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
page->SetFlag(MemoryChunk::TO_PAGE);
- pending_object_.store(result->address(), std::memory_order_relaxed);
+ pending_object_.store(result.address(), std::memory_order_relaxed);
#ifdef ENABLE_MINOR_MC
if (FLAG_minor_mc) {
page->AllocateYoungGenerationBitmap();
@@ -3793,7 +3857,7 @@ AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
page->InitializationMemoryFence();
DCHECK(page->IsLargePage());
DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
- AllocationStep(object_size, result->address(), object_size);
+ AllocationStep(object_size, result.address(), object_size);
return result;
}
@@ -3816,7 +3880,7 @@ void NewLargeObjectSpace::FreeDeadObjects(
LargePage* page = *it;
it++;
HeapObject object = page->GetObject();
- size_t size = static_cast<size_t>(object->Size());
+ size_t size = static_cast<size_t>(object.Size());
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index ce286ef390..7522cac9cb 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -12,24 +12,24 @@
#include <unordered_set>
#include <vector>
-#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/export-template.h"
#include "src/base/iterator.h"
#include "src/base/list.h"
#include "src/base/platform/mutex.h"
-#include "src/cancelable-task.h"
-#include "src/flags.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
-#include "src/objects.h"
#include "src/objects/free-space.h"
#include "src/objects/heap-object.h"
#include "src/objects/map.h"
-#include "src/utils.h"
+#include "src/objects/objects.h"
+#include "src/tasks/cancelable-task.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -52,7 +52,6 @@ class MemoryChunkLayout;
class Page;
class PagedSpace;
class SemiSpace;
-class SkipList;
class SlotsBuffer;
class SlotSet;
class TypedSlotSet;
@@ -164,8 +163,6 @@ class FreeListCategory {
void Reset();
- void ResetStats() { Reset(); }
-
void RepairFreeList(Heap* heap);
// Relinks the category into the currently owning free list. Requires that the
@@ -233,6 +230,24 @@ class FreeListCategory {
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
};
+// The CodeObjectRegistry holds all start addresses of code objects of a given
+// MemoryChunk. Each MemoryChunk owns a separate CodeObjectRegistry. The
+// CodeObjectRegistry allows fast lookup from an inner pointer of a code object
+// to the actual code object.
+class V8_EXPORT_PRIVATE CodeObjectRegistry {
+ public:
+ void RegisterNewlyAllocatedCodeObject(Address code);
+ void RegisterAlreadyExistingCodeObject(Address code);
+ void Clear();
+ void Finalize();
+ bool Contains(Address code) const;
+ Address GetCodeObjectStartFromInnerAddress(Address address) const;
+
+ private:
+ std::vector<Address> code_object_registry_already_existing_;
+ std::set<Address> code_object_registry_newly_allocated_;
+};
+
class V8_EXPORT_PRIVATE MemoryChunkLayout {
public:
static size_t CodePageGuardStartOffset();
@@ -320,7 +335,11 @@ class MemoryChunk {
// |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
// enabled.
INCREMENTAL_MARKING = 1u << 18,
- NEW_SPACE_BELOW_AGE_MARK = 1u << 19
+ NEW_SPACE_BELOW_AGE_MARK = 1u << 19,
+
+ // The memory chunk freeing bookkeeping has been performed but the chunk has
+ // not yet been freed.
+ UNREGISTERED = 1u << 20
};
using Flags = uintptr_t;
@@ -385,7 +404,6 @@ class MemoryChunk {
+ kSystemPointerSize *
NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kSystemPointerSize // InvalidatedSlots* invalidated_slots_
- + kSystemPointerSize // SkipList* skip_list_
+ kSystemPointerSize // std::atomic<intptr_t> high_water_mark_
+ kSystemPointerSize // base::Mutex* mutex_
+ kSystemPointerSize // std::atomic<ConcurrentSweepingState>
@@ -401,7 +419,8 @@ class MemoryChunk {
// FreeListCategory categories_[kNumberOfCategories]
+ kSystemPointerSize // LocalArrayBufferTracker* local_tracker_
+ kIntptrSize // std::atomic<intptr_t> young_generation_live_byte_count_
- + kSystemPointerSize; // Bitmap* young_generation_bitmap_
+ + kSystemPointerSize // Bitmap* young_generation_bitmap_
+ + kSystemPointerSize; // CodeObjectRegistry* code_object_registry_
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -475,14 +494,13 @@ class MemoryChunk {
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
- inline Heap* heap() const { return heap_; }
+ inline Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
Heap* synchronized_heap();
- inline SkipList* skip_list() { return skip_list_; }
-
- inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
-
template <RememberedSetType type>
bool ContainsSlots() {
return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
@@ -673,6 +691,8 @@ class MemoryChunk {
base::ListNode<MemoryChunk>& list_node() { return list_node_; }
+ CodeObjectRegistry* GetCodeObjectRegistry() { return code_object_registry_; }
+
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -736,8 +756,6 @@ class MemoryChunk {
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_;
- SkipList* skip_list_;
-
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
std::atomic<intptr_t> high_water_mark_;
@@ -781,6 +799,8 @@ class MemoryChunk {
std::atomic<intptr_t> young_generation_live_byte_count_;
Bitmap* young_generation_bitmap_;
+ CodeObjectRegistry* code_object_registry_;
+
private:
void InitializeReservedMemory() { reservation_.Reset(); }
@@ -796,8 +816,7 @@ class MemoryChunk {
friend class PagedSpace;
};
-static_assert(sizeof(std::atomic<intptr_t>) == kSystemPointerSize,
- "sizeof(std::atomic<intptr_t>) == kSystemPointerSize");
+STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 512K. Large object pages may be larger.
@@ -883,8 +902,6 @@ class Page : public MemoryChunk {
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
bool contains_array_buffers();
- void ResetFreeListStatistics();
-
size_t AvailableInFreeList();
size_t AvailableInFreeListFromAllocatedBytes() {
@@ -909,7 +926,7 @@ class Page : public MemoryChunk {
allocated_bytes_ -= bytes;
}
- void ResetAllocatedBytes();
+ void ResetAllocationStatistics();
size_t ShrinkToHighWaterMark();
@@ -925,8 +942,6 @@ class Page : public MemoryChunk {
#endif // DEBUG
private:
- enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
-
friend class MemoryAllocator;
};
@@ -1000,7 +1015,10 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
external_backing_store_bytes_ = nullptr;
}
- Heap* heap() const { return heap_; }
+ Heap* heap() const {
+ DCHECK_NOT_NULL(heap_);
+ return heap_;
+ }
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
@@ -1090,6 +1108,8 @@ class V8_EXPORT_PRIVATE Space : public Malloced {
return !allocation_observers_paused_ && !allocation_observers_.empty();
}
+ void DetachFromHeap() { heap_ = nullptr; }
+
std::vector<AllocationObserver*> allocation_observers_;
// The List manages the pages that belong to the given space.
@@ -1144,59 +1164,6 @@ class CodeRangeAddressHint {
std::unordered_map<size_t, std::vector<Address>> recently_freed_;
};
-class SkipList {
- public:
- SkipList() { Clear(); }
-
- void Clear() {
- for (int idx = 0; idx < kSize; idx++) {
- starts_[idx] = static_cast<Address>(-1);
- }
- }
-
- Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
-
- void AddObject(Address addr, int size) {
- int start_region = RegionNumber(addr);
- int end_region = RegionNumber(addr + size - kTaggedSize);
- for (int idx = start_region; idx <= end_region; idx++) {
- if (starts_[idx] > addr) {
- starts_[idx] = addr;
- } else {
- // In the first region, there may already be an object closer to the
- // start of the region. Do not change the start in that case. If this
- // is not the first region, you probably added overlapping objects.
- DCHECK_EQ(start_region, idx);
- }
- }
- }
-
- static inline int RegionNumber(Address addr) {
- return (addr & kPageAlignmentMask) >> kRegionSizeLog2;
- }
-
- static void Update(Address addr, int size) {
- Page* page = Page::FromAddress(addr);
- SkipList* list = page->skip_list();
- if (list == nullptr) {
- list = new SkipList();
- page->set_skip_list(list);
- }
-
- list->AddObject(addr, size);
- }
-
- private:
- static const int kRegionSizeLog2 = 13;
- static const int kRegionSize = 1 << kRegionSizeLog2;
- static const int kSize = Page::kPageSize / kRegionSize;
-
- STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
-
- Address starts_[kSize];
-};
-
-
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
// allocator allocates and deallocates pages for the paged heap spaces and large
@@ -1435,15 +1402,20 @@ class MemoryAllocator {
Unmapper* unmapper() { return &unmapper_; }
- // PreFree logically frees the object, i.e., it takes care of the size
- // bookkeeping and calls the allocation callback.
- void PreFreeMemory(MemoryChunk* chunk);
+ // Performs all necessary bookkeeping to free the memory, but does not free
+ // it.
+ void UnregisterMemory(MemoryChunk* chunk);
private:
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
size_t requested);
- // FreeMemory can be called concurrently when PreFree was executed before.
+ // PreFreeMemory logically frees the object, i.e., it unregisters the memory,
+ // logs a delete event and adds the chunk to remembered unmapped pages.
+ void PreFreeMemory(MemoryChunk* chunk);
+
+ // PerformFreeMemory can be called concurrently when PreFree was executed
+ // before.
void PerformFreeMemory(MemoryChunk* chunk);
// See AllocatePage for public interface. Note that currently we only support
@@ -1866,12 +1838,6 @@ class FreeList {
// Clear the free list.
void Reset();
- void ResetStats() {
- wasted_bytes_ = 0;
- ForAllFreeListCategories(
- [](FreeListCategory* category) { category->ResetStats(); });
- }
-
// Return the number of bytes available on the free list.
size_t Available() {
size_t available = 0;
@@ -2146,17 +2112,14 @@ class V8_EXPORT_PRIVATE PagedSpace
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
- void ResetFreeListStatistics();
-
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
// discovered during the sweeping they are subtracted from the size and added
- // to the available and wasted totals.
- void ClearStats() {
+ // to the available and wasted totals. The free list is cleared as well.
+ void ClearAllocatorState() {
accounting_stats_.ClearSize();
- free_list_.ResetStats();
- ResetFreeListStatistics();
+ free_list_.Reset();
}
// Available bytes without growing. These are the bytes on the free list.
@@ -2179,13 +2142,10 @@ class V8_EXPORT_PRIVATE PagedSpace
// due to being too small to use for allocation.
virtual size_t Waste() { return free_list_.wasted_bytes(); }
- enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
-
// Allocate the requested number of bytes in the space if possible, return a
- // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
- // to be manually updated later.
+ // failure object if not.
V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
- int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
+ int size_in_bytes);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
@@ -2978,41 +2938,36 @@ class MapSpace : public PagedSpace {
class ReadOnlySpace : public PagedSpace {
public:
- class WritableScope {
- public:
- explicit WritableScope(ReadOnlySpace* space) : space_(space) {
- space_->MarkAsReadWrite();
- }
-
- ~WritableScope() { space_->MarkAsReadOnly(); }
-
- private:
- ReadOnlySpace* space_;
- };
-
explicit ReadOnlySpace(Heap* heap);
- // TODO(v8:7464): Remove this once PagedSpace::TearDown no longer writes to
+ // TODO(v8:7464): Remove this once PagedSpace::Unseal no longer writes to
// memory_chunk_list_.
- ~ReadOnlySpace() override { MarkAsReadWrite(); }
+ ~ReadOnlySpace() override { Unseal(); }
bool writable() const { return !is_marked_read_only_; }
V8_EXPORT_PRIVATE void ClearStringPaddingIfNeeded();
- void MarkAsReadOnly();
- // Make the heap forget the space for memory bookkeeping purposes
- // (e.g. prevent space's memory from registering as leaked).
- void Forget();
+
+ enum class SealMode { kDetachFromHeapAndForget, kDoNotDetachFromHeap };
+
+ // Seal the space by marking it read-only, optionally detaching it
+ // from the heap and forgetting it for memory bookkeeping purposes (e.g.
+ // prevent space's memory from registering as leaked).
+ void Seal(SealMode ro_mode);
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
void RepairFreeListsAfterDeserialization();
private:
- void MarkAsReadWrite();
- void SetPermissionsForPages(PageAllocator::Permission access);
+ // Unseal the space after is has been sealed, by making it writable.
+ // TODO(v8:7464): Only possible if the space hasn't been detached.
+ void Unseal();
+ void SetPermissionsForPages(MemoryAllocator* memory_allocator,
+ PageAllocator::Permission access);
bool is_marked_read_only_ = false;
+
//
// String padding must be cleared just before serialization and therefore the
// string padding in the space will already have been cleared if the space was
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 637b8062d4..e59e72d3a6 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -9,12 +9,12 @@
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/template-utils.h"
-#include "src/counters.h"
+#include "src/execution/isolate.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/store-buffer-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -78,7 +78,7 @@ void StoreBuffer::SetUp() {
}
current_ = 0;
top_ = start_[current_];
- virtual_memory_.TakeControl(&reservation);
+ virtual_memory_ = std::move(reservation);
}
void StoreBuffer::TearDown() {
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index d1be45f3e5..62b10b9071 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -5,14 +5,14 @@
#ifndef V8_HEAP_STORE_BUFFER_H_
#define V8_HEAP_STORE_BUFFER_H_
-#include "src/allocation.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
-#include "src/cancelable-task.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
+#include "src/tasks/cancelable-task.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/stress-scavenge-observer.cc b/deps/v8/src/heap/stress-scavenge-observer.cc
index c9f169ae45..b91825c38b 100644
--- a/deps/v8/src/heap/stress-scavenge-observer.cc
+++ b/deps/v8/src/heap/stress-scavenge-observer.cc
@@ -5,9 +5,9 @@
#include "src/heap/stress-scavenge-observer.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/spaces.h"
-#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/sweeper.cc b/deps/v8/src/heap/sweeper.cc
index 3b1d9a7727..8f7b55bf2b 100644
--- a/deps/v8/src/heap/sweeper.cc
+++ b/deps/v8/src/heap/sweeper.cc
@@ -5,12 +5,12 @@
#include "src/heap/sweeper.h"
#include "src/base/template-utils.h"
+#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/remembered-set.h"
-#include "src/objects-inl.h"
-#include "src/vm-state-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -155,12 +155,11 @@ void Sweeper::StartSweeping() {
heap_->mark_compact_collector()->non_atomic_marking_state();
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
int space_index = GetSweepSpaceIndex(space);
- std::sort(sweeping_list_[space_index].begin(),
- sweeping_list_[space_index].end(),
- [marking_state](Page* a, Page* b) {
- return marking_state->live_bytes(a) <
- marking_state->live_bytes(b);
- });
+ std::sort(
+ sweeping_list_[space_index].begin(), sweeping_list_[space_index].end(),
+ [marking_state](Page* a, Page* b) {
+ return marking_state->live_bytes(a) > marking_state->live_bytes(b);
+ });
});
}
@@ -250,6 +249,8 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
+ CodeObjectRegistry* code_object_registry = p->GetCodeObjectRegistry();
+
// TODO(ulan): we don't have to clear type old-to-old slots in code space
// because the concurrent marker doesn't mark code objects. This requires
// the write barrier for code objects to check the color of the code object.
@@ -265,30 +266,24 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
Address free_start = p->area_start();
- // If we use the skip list for code space pages, we have to lock the skip
- // list because it could be accessed concurrently by the runtime or the
- // deoptimizer.
- const bool rebuild_skip_list =
- space->identity() == CODE_SPACE && p->skip_list() != nullptr;
- SkipList* skip_list = p->skip_list();
- if (rebuild_skip_list) {
- skip_list->Clear();
- }
-
intptr_t live_bytes = 0;
intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
- int curr_region = -1;
- // Set the allocated_bytes counter to area_size. The free operations below
- // will decrease the counter to actual live bytes.
- p->ResetAllocatedBytes();
+ // Set the allocated_bytes_ counter to area_size and clear the wasted_memory_
+ // counter. The free operations below will decrease allocated_bytes_ to actual
+ // live bytes and keep track of wasted_memory_.
+ p->ResetAllocationStatistics();
+
+ if (code_object_registry) code_object_registry->Clear();
for (auto object_and_size :
LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
HeapObject const object = object_and_size.first;
+ if (code_object_registry)
+ code_object_registry->RegisterAlreadyExistingCodeObject(object.address());
DCHECK(marking_state_->IsBlack(object));
- Address free_end = object->address();
+ Address free_end = object.address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
size_t size = static_cast<size_t>(free_end - free_start);
@@ -315,18 +310,9 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
static_cast<uint32_t>(free_end - p->address())));
}
}
- Map map = object->synchronized_map();
- int size = object->SizeFromMap(map);
+ Map map = object.synchronized_map();
+ int size = object.SizeFromMap(map);
live_bytes += size;
- if (rebuild_skip_list) {
- int new_region_start = SkipList::RegionNumber(free_end);
- int new_region_end =
- SkipList::RegionNumber(free_end + size - kTaggedSize);
- if (new_region_start != curr_region || new_region_end != curr_region) {
- skip_list->AddObject(free_end, size);
- curr_region = new_region_end;
- }
- }
free_start = free_end + size;
}
@@ -382,6 +368,7 @@ int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
DCHECK_EQ(live_bytes, p->allocated_bytes());
}
p->set_concurrent_sweeping_state(Page::kSweepingDone);
+ if (code_object_registry) code_object_registry->Finalize();
if (free_list_mode == IGNORE_FREE_LIST) return 0;
return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
@@ -408,7 +395,10 @@ int Sweeper::ParallelSweepSpace(AllocationSpace identity,
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
int freed = ParallelSweepPage(page, identity);
- pages_freed += 1;
+ if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+ // Free list of a never-allocate page will be dropped later on.
+ continue;
+ }
DCHECK_GE(freed, 0);
max_freed = Max(max_freed, freed);
if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
@@ -503,8 +493,8 @@ Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
int space_index = GetSweepSpaceIndex(space);
Page* page = nullptr;
if (!sweeping_list_[space_index].empty()) {
- page = sweeping_list_[space_index].front();
- sweeping_list_[space_index].pop_front();
+ page = sweeping_list_[space_index].back();
+ sweeping_list_[space_index].pop_back();
}
return page;
}
diff --git a/deps/v8/src/heap/sweeper.h b/deps/v8/src/heap/sweeper.h
index cd45932348..97de7a028d 100644
--- a/deps/v8/src/heap/sweeper.h
+++ b/deps/v8/src/heap/sweeper.h
@@ -9,8 +9,8 @@
#include <vector>
#include "src/base/platform/semaphore.h"
-#include "src/cancelable-task.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace internal {
@@ -24,7 +24,7 @@ enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
class Sweeper {
public:
using IterabilityList = std::vector<Page*>;
- using SweepingList = std::deque<Page*>;
+ using SweepingList = std::vector<Page*>;
using SweptList = std::vector<Page*>;
// Pauses the sweeper tasks or completes sweeping.
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index 63639a7792..98c86c2263 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -5,17 +5,17 @@
#include "src/ic/accessor-assembler.h"
#include "src/ast/ast.h"
-#include "src/code-factory.h"
-#include "src/counters.h"
+#include "src/codegen/code-factory.h"
#include "src/ic/handler-configuration.h"
#include "src/ic/ic.h"
#include "src/ic/keyed-store-generic.h"
#include "src/ic/stub-cache.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
#include "src/objects/module.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -57,7 +57,6 @@ TNode<MaybeObject> AccessorAssembler::LoadHandlerDataField(
break;
default:
UNREACHABLE();
- break;
}
USE(minimum_size);
CSA_ASSERT(this, UintPtrGreaterThanOrEqual(
@@ -335,17 +334,16 @@ void AccessorAssembler::HandleLoadICSmiHandlerCase(
BIND(&if_element);
Comment("element_load");
Node* intptr_index = TryToIntptr(p->name, miss);
- Node* elements = LoadElements(holder);
Node* is_jsarray_condition =
IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
Node* elements_kind =
DecodeWord32FromWord<LoadHandler::ElementsKindBits>(handler_word);
Label if_hole(this), unimplemented_elements_kind(this),
if_oob(this, Label::kDeferred);
- EmitElementLoad(holder, elements, elements_kind, intptr_index,
- is_jsarray_condition, &if_hole, &rebox_double,
- &var_double_value, &unimplemented_elements_kind, &if_oob,
- miss, exit_point, access_mode);
+ EmitElementLoad(holder, elements_kind, intptr_index, is_jsarray_condition,
+ &if_hole, &rebox_double, &var_double_value,
+ &unimplemented_elements_kind, &if_oob, miss, exit_point,
+ access_mode);
BIND(&unimplemented_elements_kind);
{
@@ -1213,7 +1211,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
if (FLAG_unbox_double_fields) {
if (do_transitioning_store) {
StoreMap(object, object_map);
- } else if (FLAG_track_constant_fields) {
+ } else {
Label if_mutable(this);
GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
TNode<Float64T> current_value =
@@ -1231,14 +1229,12 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
StoreObjectField(object, field_offset, mutable_heap_number);
} else {
Node* mutable_heap_number = LoadObjectField(object, field_offset);
- if (FLAG_track_constant_fields) {
- Label if_mutable(this);
- GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
- TNode<Float64T> current_value =
- LoadHeapNumberValue(mutable_heap_number);
- BranchIfSameNumberValue(current_value, double_value, &done, slow);
- BIND(&if_mutable);
- }
+ Label if_mutable(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ TNode<Float64T> current_value =
+ LoadHeapNumberValue(mutable_heap_number);
+ BranchIfSameNumberValue(current_value, double_value, &done, slow);
+ BIND(&if_mutable);
StoreHeapNumberValue(mutable_heap_number, double_value);
}
}
@@ -1249,7 +1245,7 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
{
if (do_transitioning_store) {
StoreMap(object, object_map);
- } else if (FLAG_track_constant_fields) {
+ } else {
Label if_mutable(this);
GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
TNode<Object> current_value =
@@ -1306,28 +1302,27 @@ void AccessorAssembler::OverwriteExistingFastDataProperty(
Node* mutable_heap_number =
LoadPropertyArrayElement(properties, backing_store_index);
TNode<Float64T> double_value = ChangeNumberToFloat64(value);
- if (FLAG_track_constant_fields) {
- Label if_mutable(this);
- GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
- TNode<Float64T> current_value =
- LoadHeapNumberValue(mutable_heap_number);
- BranchIfSameNumberValue(current_value, double_value, &done, slow);
- BIND(&if_mutable);
- }
+
+ Label if_mutable(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ TNode<Float64T> current_value =
+ LoadHeapNumberValue(mutable_heap_number);
+ BranchIfSameNumberValue(current_value, double_value, &done, slow);
+
+ BIND(&if_mutable);
StoreHeapNumberValue(mutable_heap_number, double_value);
Goto(&done);
}
BIND(&tagged_rep);
{
- if (FLAG_track_constant_fields) {
- Label if_mutable(this);
- GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
- TNode<Object> current_value =
- LoadPropertyArrayElement(properties, backing_store_index);
- BranchIfSameValue(current_value, value, &done, slow,
- SameValueMode::kNumbersOnly);
- BIND(&if_mutable);
- }
+ Label if_mutable(this);
+ GotoIfNot(IsPropertyDetailsConst(details), &if_mutable);
+ TNode<Object> current_value =
+ LoadPropertyArrayElement(properties, backing_store_index);
+ BranchIfSameValue(current_value, value, &done, slow,
+ SameValueMode::kNumbersOnly);
+
+ BIND(&if_mutable);
StorePropertyArrayElement(properties, backing_store_index, value);
Goto(&done);
}
@@ -1586,16 +1581,11 @@ void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
Comment("field store");
#ifdef DEBUG
Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
- if (FLAG_track_constant_fields) {
- CSA_ASSERT(
- this,
- Word32Or(WordEqual(handler_kind, IntPtrConstant(StoreHandler::kField)),
- WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kConstField))));
- } else {
- CSA_ASSERT(this,
- WordEqual(handler_kind, IntPtrConstant(StoreHandler::kField)));
- }
+ CSA_ASSERT(
+ this,
+ Word32Or(
+ WordEqual(handler_kind, IntPtrConstant(StoreHandler::kField)),
+ WordEqual(handler_kind, IntPtrConstant(StoreHandler::kConstField))));
#endif
Node* field_representation =
@@ -1680,13 +1670,11 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
GotoIf(TaggedIsSmi(value), bailout);
Label done(this);
- if (FLAG_track_constant_fields) {
- // Skip field type check in favor of constant value check when storing
- // to constant field.
- GotoIf(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
- IntPtrConstant(StoreHandler::kConstField)),
- &done);
- }
+ // Skip field type check in favor of constant value check when storing
+ // to constant field.
+ GotoIf(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
+ IntPtrConstant(StoreHandler::kConstField)),
+ &done);
TNode<IntPtrT> descriptor =
Signed(DecodeWord<StoreHandler::DescriptorBits>(handler_word));
TNode<MaybeObject> maybe_field_type =
@@ -1828,31 +1816,30 @@ void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
}
// Do constant value check if necessary.
- if (FLAG_track_constant_fields) {
- Label done(this);
- GotoIfNot(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
- IntPtrConstant(StoreHandler::kConstField)),
- &done);
- {
- if (store_value_as_double) {
- TNode<Float64T> current_value =
- LoadObjectField<Float64T>(CAST(property_storage), offset);
- BranchIfSameNumberValue(current_value, UncheckedCast<Float64T>(value),
- &done, bailout);
- } else {
- Node* current_value = LoadObjectField(property_storage, offset);
- Branch(WordEqual(current_value, value), &done, bailout);
- }
+ Label const_checked(this);
+ GotoIfNot(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
+ IntPtrConstant(StoreHandler::kConstField)),
+ &const_checked);
+ {
+ if (store_value_as_double) {
+ TNode<Float64T> current_value =
+ LoadObjectField<Float64T>(CAST(property_storage), offset);
+ BranchIfSameNumberValue(current_value, UncheckedCast<Float64T>(value),
+ &const_checked, bailout);
+ } else {
+ Node* current_value = LoadObjectField(property_storage, offset);
+ Branch(WordEqual(current_value, value), &const_checked, bailout);
}
- BIND(&done);
}
+ BIND(&const_checked);
// Do the store.
if (store_value_as_double) {
StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
MachineRepresentation::kFloat64);
} else if (representation.IsSmi()) {
- StoreObjectFieldNoWriteBarrier(property_storage, offset, value);
+ TNode<Smi> value_smi = CAST(value);
+ StoreObjectFieldNoWriteBarrier(property_storage, offset, value_smi);
} else {
StoreObjectField(property_storage, offset, value);
}
@@ -1881,82 +1868,87 @@ void AccessorAssembler::EmitFastElementsBoundsCheck(Node* object,
}
void AccessorAssembler::EmitElementLoad(
- Node* object, Node* elements, Node* elements_kind,
- SloppyTNode<IntPtrT> intptr_index, Node* is_jsarray_condition,
- Label* if_hole, Label* rebox_double, Variable* var_double_value,
- Label* unimplemented_elements_kind, Label* out_of_bounds, Label* miss,
- ExitPoint* exit_point, LoadAccessMode access_mode) {
- Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
- if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
- if_dictionary(this);
- GotoIf(
+ Node* object, Node* elements_kind, SloppyTNode<IntPtrT> intptr_index,
+ Node* is_jsarray_condition, Label* if_hole, Label* rebox_double,
+ Variable* var_double_value, Label* unimplemented_elements_kind,
+ Label* out_of_bounds, Label* miss, ExitPoint* exit_point,
+ LoadAccessMode access_mode) {
+ Label if_typed_array(this), if_fast(this), if_fast_packed(this),
+ if_fast_holey(this), if_fast_double(this), if_fast_holey_double(this),
+ if_nonfast(this), if_dictionary(this);
+ Branch(
Int32GreaterThan(elements_kind, Int32Constant(LAST_FROZEN_ELEMENTS_KIND)),
- &if_nonfast);
-
- EmitFastElementsBoundsCheck(object, elements, intptr_index,
- is_jsarray_condition, out_of_bounds);
- int32_t kinds[] = {// Handled by if_fast_packed.
- PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
- PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
- // Handled by if_fast_holey.
- HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
- // Handled by if_fast_double.
- PACKED_DOUBLE_ELEMENTS,
- // Handled by if_fast_holey_double.
- HOLEY_DOUBLE_ELEMENTS};
- Label* labels[] = {
- // FAST_{SMI,}_ELEMENTS
- &if_fast_packed, &if_fast_packed, &if_fast_packed, &if_fast_packed,
- // FAST_HOLEY_{SMI,}_ELEMENTS
- &if_fast_holey, &if_fast_holey,
- // PACKED_DOUBLE_ELEMENTS
- &if_fast_double,
- // HOLEY_DOUBLE_ELEMENTS
- &if_fast_holey_double};
- Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
- arraysize(kinds));
-
- BIND(&if_fast_packed);
- {
- Comment("fast packed elements");
- exit_point->Return(
- access_mode == LoadAccessMode::kHas
- ? TrueConstant()
- : UnsafeLoadFixedArrayElement(CAST(elements), intptr_index));
- }
+ &if_nonfast, &if_fast);
+
+ BIND(&if_fast);
+ {
+ TNode<FixedArrayBase> elements = LoadJSObjectElements(CAST(object));
+ EmitFastElementsBoundsCheck(object, elements, intptr_index,
+ is_jsarray_condition, out_of_bounds);
+ int32_t kinds[] = {// Handled by if_fast_packed.
+ PACKED_SMI_ELEMENTS, PACKED_ELEMENTS,
+ PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS,
+ // Handled by if_fast_holey.
+ HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS,
+ HOLEY_FROZEN_ELEMENTS, HOLEY_SEALED_ELEMENTS,
+ // Handled by if_fast_double.
+ PACKED_DOUBLE_ELEMENTS,
+ // Handled by if_fast_holey_double.
+ HOLEY_DOUBLE_ELEMENTS};
+ Label* labels[] = {
+ // FAST_{SMI,}_ELEMENTS
+ &if_fast_packed, &if_fast_packed, &if_fast_packed, &if_fast_packed,
+ // FAST_HOLEY_{SMI,}_ELEMENTS
+ &if_fast_holey, &if_fast_holey, &if_fast_holey, &if_fast_holey,
+ // PACKED_DOUBLE_ELEMENTS
+ &if_fast_double,
+ // HOLEY_DOUBLE_ELEMENTS
+ &if_fast_holey_double};
+ Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
+ arraysize(kinds));
+
+ BIND(&if_fast_packed);
+ {
+ Comment("fast packed elements");
+ exit_point->Return(
+ access_mode == LoadAccessMode::kHas
+ ? TrueConstant()
+ : UnsafeLoadFixedArrayElement(CAST(elements), intptr_index));
+ }
- BIND(&if_fast_holey);
- {
- Comment("fast holey elements");
- Node* element = UnsafeLoadFixedArrayElement(CAST(elements), intptr_index);
- GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
- exit_point->Return(access_mode == LoadAccessMode::kHas ? TrueConstant()
- : element);
- }
+ BIND(&if_fast_holey);
+ {
+ Comment("fast holey elements");
+ Node* element = UnsafeLoadFixedArrayElement(CAST(elements), intptr_index);
+ GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
+ exit_point->Return(access_mode == LoadAccessMode::kHas ? TrueConstant()
+ : element);
+ }
- BIND(&if_fast_double);
- {
- Comment("packed double elements");
- if (access_mode == LoadAccessMode::kHas) {
- exit_point->Return(TrueConstant());
- } else {
- var_double_value->Bind(LoadFixedDoubleArrayElement(
- elements, intptr_index, MachineType::Float64()));
- Goto(rebox_double);
+ BIND(&if_fast_double);
+ {
+ Comment("packed double elements");
+ if (access_mode == LoadAccessMode::kHas) {
+ exit_point->Return(TrueConstant());
+ } else {
+ var_double_value->Bind(LoadFixedDoubleArrayElement(
+ CAST(elements), intptr_index, MachineType::Float64()));
+ Goto(rebox_double);
+ }
}
- }
- BIND(&if_fast_holey_double);
- {
- Comment("holey double elements");
- Node* value = LoadFixedDoubleArrayElement(elements, intptr_index,
- MachineType::Float64(), 0,
- INTPTR_PARAMETERS, if_hole);
- if (access_mode == LoadAccessMode::kHas) {
- exit_point->Return(TrueConstant());
- } else {
- var_double_value->Bind(value);
- Goto(rebox_double);
+ BIND(&if_fast_holey_double);
+ {
+ Comment("holey double elements");
+ Node* value = LoadFixedDoubleArrayElement(CAST(elements), intptr_index,
+ MachineType::Float64(), 0,
+ INTPTR_PARAMETERS, if_hole);
+ if (access_mode == LoadAccessMode::kHas) {
+ exit_point->Return(TrueConstant());
+ } else {
+ var_double_value->Bind(value);
+ Goto(rebox_double);
+ }
}
}
@@ -1970,123 +1962,127 @@ void AccessorAssembler::EmitElementLoad(
GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
&if_dictionary);
Goto(unimplemented_elements_kind);
- }
-
- BIND(&if_dictionary);
- {
- Comment("dictionary elements");
- GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
-
- TNode<Object> value = BasicLoadNumberDictionaryElement(
- CAST(elements), intptr_index, miss, if_hole);
- exit_point->Return(access_mode == LoadAccessMode::kHas ? TrueConstant()
- : value);
- }
- BIND(&if_typed_array);
- {
- Comment("typed elements");
- // Check if buffer has been detached.
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- GotoIf(IsDetachedBuffer(buffer), miss);
+ BIND(&if_dictionary);
+ {
+ Comment("dictionary elements");
+ GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
+
+ TNode<FixedArrayBase> elements = LoadJSObjectElements(CAST(object));
+ TNode<Object> value = BasicLoadNumberDictionaryElement(
+ CAST(elements), intptr_index, miss, if_hole);
+ exit_point->Return(access_mode == LoadAccessMode::kHas ? TrueConstant()
+ : value);
+ }
- // Bounds check.
- Node* length = SmiUntag(LoadJSTypedArrayLength(CAST(object)));
- GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
- if (access_mode == LoadAccessMode::kHas) {
- exit_point->Return(TrueConstant());
- } else {
- Node* backing_store = LoadFixedTypedArrayBackingStore(CAST(elements));
-
- Label uint8_elements(this), int8_elements(this), uint16_elements(this),
- int16_elements(this), uint32_elements(this), int32_elements(this),
- float32_elements(this), float64_elements(this),
- bigint64_elements(this), biguint64_elements(this);
- Label* elements_kind_labels[] = {
- &uint8_elements, &uint8_elements, &int8_elements,
- &uint16_elements, &int16_elements, &uint32_elements,
- &int32_elements, &float32_elements, &float64_elements,
- &bigint64_elements, &biguint64_elements};
- int32_t elements_kinds[] = {
- UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
- UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
- INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS,
- BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS};
- const size_t kTypedElementsKindCount =
- LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
- Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
- BIND(&uint8_elements);
- {
- Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
- Node* element = Load(MachineType::Uint8(), backing_store, intptr_index);
- exit_point->Return(SmiFromInt32(element));
- }
- BIND(&int8_elements);
- {
- Comment("INT8_ELEMENTS");
- Node* element = Load(MachineType::Int8(), backing_store, intptr_index);
- exit_point->Return(SmiFromInt32(element));
- }
- BIND(&uint16_elements);
- {
- Comment("UINT16_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(1));
- Node* element = Load(MachineType::Uint16(), backing_store, index);
- exit_point->Return(SmiFromInt32(element));
- }
- BIND(&int16_elements);
- {
- Comment("INT16_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(1));
- Node* element = Load(MachineType::Int16(), backing_store, index);
- exit_point->Return(SmiFromInt32(element));
- }
- BIND(&uint32_elements);
- {
- Comment("UINT32_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Uint32(), backing_store, index);
- exit_point->Return(ChangeUint32ToTagged(element));
- }
- BIND(&int32_elements);
- {
- Comment("INT32_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Int32(), backing_store, index);
- exit_point->Return(ChangeInt32ToTagged(element));
- }
- BIND(&float32_elements);
- {
- Comment("FLOAT32_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Float32(), backing_store, index);
- var_double_value->Bind(ChangeFloat32ToFloat64(element));
- Goto(rebox_double);
- }
- BIND(&float64_elements);
- {
- Comment("FLOAT64_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(3));
- Node* element = Load(MachineType::Float64(), backing_store, index);
- var_double_value->Bind(element);
- Goto(rebox_double);
- }
- BIND(&bigint64_elements);
- {
- Comment("BIGINT64_ELEMENTS");
- exit_point->Return(LoadFixedTypedArrayElementAsTagged(
- backing_store, intptr_index, BIGINT64_ELEMENTS, INTPTR_PARAMETERS));
- }
- BIND(&biguint64_elements);
- {
- Comment("BIGUINT64_ELEMENTS");
- exit_point->Return(LoadFixedTypedArrayElementAsTagged(
- backing_store, intptr_index, BIGUINT64_ELEMENTS,
- INTPTR_PARAMETERS));
+ BIND(&if_typed_array);
+ {
+ Comment("typed elements");
+ // Check if buffer has been detached.
+ Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), miss);
+
+ // Bounds check.
+ TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
+ GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
+ if (access_mode == LoadAccessMode::kHas) {
+ exit_point->Return(TrueConstant());
+ } else {
+ Node* backing_store = LoadJSTypedArrayBackingStore(CAST(object));
+
+ Label uint8_elements(this), int8_elements(this), uint16_elements(this),
+ int16_elements(this), uint32_elements(this), int32_elements(this),
+ float32_elements(this), float64_elements(this),
+ bigint64_elements(this), biguint64_elements(this);
+ Label* elements_kind_labels[] = {
+ &uint8_elements, &uint8_elements, &int8_elements,
+ &uint16_elements, &int16_elements, &uint32_elements,
+ &int32_elements, &float32_elements, &float64_elements,
+ &bigint64_elements, &biguint64_elements};
+ int32_t elements_kinds[] = {
+ UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
+ UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
+ INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS,
+ BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS};
+ const size_t kTypedElementsKindCount =
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+ Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
+ kTypedElementsKindCount);
+ BIND(&uint8_elements);
+ {
+ Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
+ Node* element =
+ Load(MachineType::Uint8(), backing_store, intptr_index);
+ exit_point->Return(SmiFromInt32(element));
+ }
+ BIND(&int8_elements);
+ {
+ Comment("INT8_ELEMENTS");
+ Node* element =
+ Load(MachineType::Int8(), backing_store, intptr_index);
+ exit_point->Return(SmiFromInt32(element));
+ }
+ BIND(&uint16_elements);
+ {
+ Comment("UINT16_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(1));
+ Node* element = Load(MachineType::Uint16(), backing_store, index);
+ exit_point->Return(SmiFromInt32(element));
+ }
+ BIND(&int16_elements);
+ {
+ Comment("INT16_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(1));
+ Node* element = Load(MachineType::Int16(), backing_store, index);
+ exit_point->Return(SmiFromInt32(element));
+ }
+ BIND(&uint32_elements);
+ {
+ Comment("UINT32_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
+ Node* element = Load(MachineType::Uint32(), backing_store, index);
+ exit_point->Return(ChangeUint32ToTagged(element));
+ }
+ BIND(&int32_elements);
+ {
+ Comment("INT32_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
+ Node* element = Load(MachineType::Int32(), backing_store, index);
+ exit_point->Return(ChangeInt32ToTagged(element));
+ }
+ BIND(&float32_elements);
+ {
+ Comment("FLOAT32_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
+ Node* element = Load(MachineType::Float32(), backing_store, index);
+ var_double_value->Bind(ChangeFloat32ToFloat64(element));
+ Goto(rebox_double);
+ }
+ BIND(&float64_elements);
+ {
+ Comment("FLOAT64_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(3));
+ Node* element = Load(MachineType::Float64(), backing_store, index);
+ var_double_value->Bind(element);
+ Goto(rebox_double);
+ }
+ BIND(&bigint64_elements);
+ {
+ Comment("BIGINT64_ELEMENTS");
+ exit_point->Return(LoadFixedTypedArrayElementAsTagged(
+ backing_store, intptr_index, BIGINT64_ELEMENTS,
+ INTPTR_PARAMETERS));
+ }
+ BIND(&biguint64_elements);
+ {
+ Comment("BIGUINT64_ELEMENTS");
+ exit_point->Return(LoadFixedTypedArrayElementAsTagged(
+ backing_store, intptr_index, BIGUINT64_ELEMENTS,
+ INTPTR_PARAMETERS));
+ }
}
}
}
@@ -2142,7 +2138,6 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
// Receivers requiring non-standard element accesses (interceptors, access
// checks, strings and string wrappers, proxies) are handled in the runtime.
GotoIf(IsCustomElementsReceiverInstanceType(instance_type), &if_custom);
- Node* elements = LoadElements(receiver);
Node* elements_kind = LoadMapElementsKind(receiver_map);
Node* is_jsarray_condition = InstanceTypeEqual(instance_type, JS_ARRAY_TYPE);
VARIABLE(var_double_value, MachineRepresentation::kFloat64);
@@ -2151,10 +2146,9 @@ void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
// Unimplemented elements kinds fall back to a runtime call.
Label* unimplemented_elements_kind = slow;
IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
- EmitElementLoad(receiver, elements, elements_kind, index,
- is_jsarray_condition, &if_element_hole, &rebox_double,
- &var_double_value, unimplemented_elements_kind, &if_oob, slow,
- &direct_exit);
+ EmitElementLoad(receiver, elements_kind, index, is_jsarray_condition,
+ &if_element_hole, &rebox_double, &var_double_value,
+ unimplemented_elements_kind, &if_oob, slow, &direct_exit);
BIND(&rebox_double);
Return(AllocateHeapNumberWithValue(var_double_value.value()));
@@ -2616,46 +2610,15 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
}
}
-// TODO(8860): This check is only required so we can make prototypes fast on
-// the first load. This is not really useful when there is no feedback vector
-// and may not be important when lazily allocating feedback vectors. Once lazy
-// allocation of feedback vectors has landed try to eliminate this check.
-void AccessorAssembler::BranchIfPrototypeShouldbeFast(Node* receiver_map,
- Label* prototype_not_fast,
- Label* prototype_fast) {
- VARIABLE(var_map, MachineRepresentation::kTagged);
- var_map.Bind(receiver_map);
- Label loop_body(this, &var_map);
- Goto(&loop_body);
-
- BIND(&loop_body);
- {
- Node* map = var_map.value();
- Node* prototype = LoadMapPrototype(map);
- GotoIf(IsNull(prototype), prototype_fast);
- TNode<PrototypeInfo> proto_info =
- LoadMapPrototypeInfo(receiver_map, prototype_not_fast);
- GotoIf(IsNull(prototype), prototype_not_fast);
- TNode<Uint32T> flags =
- LoadObjectField<Uint32T>(proto_info, PrototypeInfo::kBitFieldOffset);
- GotoIf(Word32Equal(flags, Uint32Constant(0)), prototype_not_fast);
-
- Node* prototype_map = LoadMap(prototype);
- var_map.Bind(prototype_map);
- Goto(&loop_body);
- }
-}
-
void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
Label miss(this, Label::kDeferred),
- check_if_fast_prototype(this, Label::kDeferred),
check_function_prototype(this);
Node* receiver = p->receiver;
GotoIf(TaggedIsSmi(receiver), &miss);
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
- GotoIf(IsUndefined(p->vector), &check_if_fast_prototype);
+ GotoIf(IsUndefined(p->vector), &check_function_prototype);
// Optimistically write the state transition to the vector.
StoreFeedbackVectorSlot(p->vector, p->slot,
LoadRoot(RootIndex::kpremonomorphic_symbol),
@@ -2664,12 +2627,6 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
kTaggedSize, SMI_PARAMETERS);
Goto(&check_function_prototype);
- BIND(&check_if_fast_prototype);
- {
- BranchIfPrototypeShouldbeFast(receiver_map, &miss,
- &check_function_prototype);
- }
-
BIND(&check_function_prototype);
{
// Special case for Function.prototype load, because it's very common
@@ -2922,6 +2879,7 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
Node* receiver = p->receiver;
GotoIf(TaggedIsSmi(receiver), &if_runtime);
+ GotoIf(IsNullOrUndefined(receiver), &if_runtime);
TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
&if_other, &if_notunique);
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index 180d9fc43a..b0d6291094 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -5,7 +5,7 @@
#ifndef V8_IC_ACCESSOR_ASSEMBLER_H_
#define V8_IC_ACCESSOR_ASSEMBLER_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -296,10 +296,6 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
Representation representation, Node* value,
Label* bailout);
- void BranchIfPrototypeShouldbeFast(Node* receiver_map,
- Label* prototype_not_fast,
- Label* prototype_fast);
-
// Extends properties backing store by JSObject::kFieldsAdded elements,
// returns updated properties backing store.
Node* ExtendPropertiesBackingStore(Node* object, Node* index);
@@ -311,7 +307,7 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
void EmitFastElementsBoundsCheck(Node* object, Node* elements,
Node* intptr_index,
Node* is_jsarray_condition, Label* miss);
- void EmitElementLoad(Node* object, Node* elements, Node* elements_kind,
+ void EmitElementLoad(Node* object, Node* elements_kind,
SloppyTNode<IntPtrT> key, Node* is_jsarray_condition,
Label* if_hole, Label* rebox_double,
Variable* var_double_value,
diff --git a/deps/v8/src/ic/binary-op-assembler.cc b/deps/v8/src/ic/binary-op-assembler.cc
index ebe64437c6..a7a5b988f6 100644
--- a/deps/v8/src/ic/binary-op-assembler.cc
+++ b/deps/v8/src/ic/binary-op-assembler.cc
@@ -4,7 +4,7 @@
#include "src/ic/binary-op-assembler.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ic/binary-op-assembler.h b/deps/v8/src/ic/binary-op-assembler.h
index a6add269dc..26324660c8 100644
--- a/deps/v8/src/ic/binary-op-assembler.h
+++ b/deps/v8/src/ic/binary-op-assembler.h
@@ -6,7 +6,7 @@
#define V8_IC_BINARY_OP_ASSEMBLER_H_
#include <functional>
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index f1acbe112b..439d342f1e 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/ic/call-optimization.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -22,15 +22,15 @@ CallOptimization::CallOptimization(Isolate* isolate, Handle<Object> function) {
Context CallOptimization::GetAccessorContext(Map holder_map) const {
if (is_constant_call()) {
- return constant_function_->context()->native_context();
+ return constant_function_->context().native_context();
}
- JSFunction constructor = JSFunction::cast(holder_map->GetConstructor());
- return constructor->context()->native_context();
+ JSFunction constructor = JSFunction::cast(holder_map.GetConstructor());
+ return constructor.context().native_context();
}
bool CallOptimization::IsCrossContextLazyAccessorPair(Context native_context,
Map holder_map) const {
- DCHECK(native_context->IsNativeContext());
+ DCHECK(native_context.IsNativeContext());
if (is_constant_call()) return false;
return native_context != GetAccessorContext(holder_map);
}
@@ -49,7 +49,7 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
}
if (object_map->has_hidden_prototype()) {
JSObject raw_prototype = JSObject::cast(object_map->prototype());
- Handle<JSObject> prototype(raw_prototype, raw_prototype->GetIsolate());
+ Handle<JSObject> prototype(raw_prototype, raw_prototype.GetIsolate());
object_map = handle(prototype->map(), prototype->GetIsolate());
if (expected_receiver_type_->IsTemplateFor(*object_map)) {
*holder_lookup = kHolderFound;
@@ -65,7 +65,7 @@ bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
Handle<JSObject> holder) const {
DCHECK(is_simple_api_call());
if (!receiver->IsHeapObject()) return false;
- Handle<Map> map(HeapObject::cast(*receiver)->map(), holder->GetIsolate());
+ Handle<Map> map(HeapObject::cast(*receiver).map(), holder->GetIsolate());
return IsCompatibleReceiverMap(map, holder);
}
@@ -85,8 +85,8 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
{
JSObject object = *api_holder;
while (true) {
- Object prototype = object->map()->prototype();
- if (!prototype->IsJSObject()) return false;
+ Object prototype = object.map().prototype();
+ if (!prototype.IsJSObject()) return false;
if (prototype == *holder) return true;
object = JSObject::cast(prototype);
}
@@ -98,11 +98,11 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
void CallOptimization::Initialize(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
- if (function_template_info->call_code()->IsUndefined(isolate)) return;
+ if (function_template_info->call_code().IsUndefined(isolate)) return;
api_call_info_ = handle(
CallHandlerInfo::cast(function_template_info->call_code()), isolate);
- if (!function_template_info->signature()->IsUndefined(isolate)) {
+ if (!function_template_info->signature().IsUndefined(isolate)) {
expected_receiver_type_ =
handle(FunctionTemplateInfo::cast(function_template_info->signature()),
isolate);
@@ -120,15 +120,15 @@ void CallOptimization::Initialize(Isolate* isolate,
void CallOptimization::AnalyzePossibleApiFunction(Isolate* isolate,
Handle<JSFunction> function) {
- if (!function->shared()->IsApiFunction()) return;
- Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data(),
+ if (!function->shared().IsApiFunction()) return;
+ Handle<FunctionTemplateInfo> info(function->shared().get_api_func_data(),
isolate);
// Require a C++ callback.
- if (info->call_code()->IsUndefined(isolate)) return;
+ if (info->call_code().IsUndefined(isolate)) return;
api_call_info_ = handle(CallHandlerInfo::cast(info->call_code()), isolate);
- if (!info->signature()->IsUndefined(isolate)) {
+ if (!info->signature().IsUndefined(isolate)) {
expected_receiver_type_ =
handle(FunctionTemplateInfo::cast(info->signature()), isolate);
}
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index 62317dc659..c8c7f25d5a 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -5,8 +5,8 @@
#ifndef V8_IC_CALL_OPTIMIZATION_H_
#define V8_IC_CALL_OPTIMIZATION_H_
-#include "src/api-arguments.h"
-#include "src/objects.h"
+#include "src/api/api-arguments.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 85dabc9954..f5cd0c1de7 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -7,10 +7,10 @@
#include "src/ic/handler-configuration.h"
-#include "src/field-index-inl.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/data-handler-inl.h"
+#include "src/objects/field-index-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
@@ -25,7 +25,7 @@ CAST_ACCESSOR(LoadHandler)
// Decodes kind from Smi-handler.
LoadHandler::Kind LoadHandler::GetHandlerKind(Smi smi_handler) {
- return KindBits::decode(smi_handler->value());
+ return KindBits::decode(smi_handler.value());
}
Handle<Smi> LoadHandler::LoadNormal(Isolate* isolate) {
@@ -153,7 +153,7 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, Kind kind,
UNREACHABLE();
}
- DCHECK(kind == kField || (kind == kConstField && FLAG_track_constant_fields));
+ DCHECK(kind == kField || kind == kConstField);
int config = KindBits::encode(kind) |
IsInobjectBits::encode(field_index.is_inobject()) |
@@ -167,8 +167,6 @@ Handle<Smi> StoreHandler::StoreField(Isolate* isolate, int descriptor,
FieldIndex field_index,
PropertyConstness constness,
Representation representation) {
- DCHECK_IMPLIES(!FLAG_track_constant_fields,
- constness == PropertyConstness::kMutable);
Kind kind = constness == PropertyConstness::kMutable ? kField : kConstField;
return StoreField(isolate, kind, descriptor, field_index, representation);
}
diff --git a/deps/v8/src/ic/handler-configuration.cc b/deps/v8/src/ic/handler-configuration.cc
index a6acbb058d..0b8ebd2bbe 100644
--- a/deps/v8/src/ic/handler-configuration.cc
+++ b/deps/v8/src/ic/handler-configuration.cc
@@ -4,11 +4,11 @@
#include "src/ic/handler-configuration.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/objects/data-handler-inl.h"
#include "src/objects/maybe-object.h"
-#include "src/transitions.h"
+#include "src/objects/transitions.h"
namespace v8 {
namespace internal {
@@ -202,7 +202,7 @@ MaybeObjectHandle StoreHandler::StoreTransition(Isolate* isolate,
Handle<DescriptorArray> descriptors(transition_map->instance_descriptors(),
isolate);
PropertyDetails details = descriptors->GetDetails(descriptor);
- if (descriptors->GetKey(descriptor)->IsPrivate()) {
+ if (descriptors->GetKey(descriptor).IsPrivate()) {
DCHECK_EQ(DONT_ENUM, details.attributes());
} else {
DCHECK_EQ(NONE, details.attributes());
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index 19ca5a9c6d..b8888868ec 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -5,13 +5,13 @@
#ifndef V8_IC_HANDLER_CONFIGURATION_H_
#define V8_IC_HANDLER_CONFIGURATION_H_
-#include "src/elements-kind.h"
-#include "src/field-index.h"
-#include "src/globals.h"
-#include "src/maybe-handles.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
+#include "src/handles/maybe-handles.h"
#include "src/objects/data-handler.h"
-#include "src/utils.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/field-index.h"
+#include "src/objects/objects.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 0616340a62..38b15618ac 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -7,22 +7,15 @@
#include "src/ic/ic.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/debug/debug.h"
-#include "src/frames-inl.h"
-#include "src/handles-inl.h"
-#include "src/prototype.h"
+#include "src/execution/frames-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/prototype.h"
namespace v8 {
namespace internal {
-
-Address IC::address() const {
- // Get the address of the call.
- return Assembler::target_address_from_return_address(pc());
-}
-
-
Address IC::constant_pool() const {
if (FLAG_enable_embedded_constant_pool) {
return raw_constant_pool();
@@ -44,7 +37,7 @@ void IC::update_receiver_map(Handle<Object> receiver) {
if (receiver->IsSmi()) {
receiver_map_ = isolate_->factory()->heap_number_map();
} else {
- receiver_map_ = handle(HeapObject::cast(*receiver)->map(), isolate_);
+ receiver_map_ = handle(HeapObject::cast(*receiver).map(), isolate_);
}
}
@@ -52,21 +45,16 @@ bool IC::IsHandler(MaybeObject object) {
HeapObject heap_object;
return (object->IsSmi() && (object.ptr() != kNullAddress)) ||
(object->GetHeapObjectIfWeak(&heap_object) &&
- (heap_object->IsMap() || heap_object->IsPropertyCell())) ||
+ (heap_object.IsMap() || heap_object.IsPropertyCell())) ||
(object->GetHeapObjectIfStrong(&heap_object) &&
- (heap_object->IsDataHandler() || heap_object->IsCode()));
-}
-
-bool IC::AddressIsDeoptimizedCode() const {
- return AddressIsDeoptimizedCode(isolate(), address());
+ (heap_object.IsDataHandler() || heap_object.IsCode()));
}
-// static
-bool IC::AddressIsDeoptimizedCode(Isolate* isolate, Address address) {
+bool IC::HostIsDeoptimizedCode() const {
Code host =
- isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
- return (host->kind() == Code::OPTIMIZED_FUNCTION &&
- host->marked_for_deoptimization());
+ isolate()->inner_pointer_to_code_cache()->GetCacheEntry(pc())->code;
+ return (host.kind() == Code::OPTIMIZED_FUNCTION &&
+ host.marked_for_deoptimization());
}
bool IC::vector_needs_update() {
diff --git a/deps/v8/src/ic/ic-stats.cc b/deps/v8/src/ic/ic-stats.cc
index 69c7150bd2..f387239aee 100644
--- a/deps/v8/src/ic/ic-stats.cc
+++ b/deps/v8/src/ic/ic-stats.cc
@@ -4,11 +4,11 @@
#include "src/ic/ic-stats.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/init/v8.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
#include "src/tracing/trace-event.h"
#include "src/tracing/traced-value.h"
-#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -59,11 +59,11 @@ const char* ICStats::GetOrCacheScriptName(Script script) {
if (script_name_map_.find(script_ptr) != script_name_map_.end()) {
return script_name_map_[script_ptr].get();
}
- Object script_name_raw = script->name();
- if (script_name_raw->IsString()) {
+ Object script_name_raw = script.name();
+ if (script_name_raw.IsString()) {
String script_name = String::cast(script_name_raw);
char* c_script_name =
- script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
+ script_name.ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
.release();
script_name_map_.insert(
std::make_pair(script_ptr, std::unique_ptr<char[]>(c_script_name)));
@@ -81,9 +81,9 @@ const char* ICStats::GetOrCacheFunctionName(JSFunction function) {
if (function_name_map_.find(function_ptr) != function_name_map_.end()) {
return function_name_map_[function_ptr].get();
}
- SharedFunctionInfo shared = function->shared();
- ic_infos_[pos_].is_optimized = function->IsOptimized();
- char* function_name = shared->DebugName()->ToCString().release();
+ SharedFunctionInfo shared = function.shared();
+ ic_infos_[pos_].is_optimized = function.IsOptimized();
+ char* function_name = shared.DebugName().ToCString().release();
function_name_map_.insert(
std::make_pair(function_ptr, std::unique_ptr<char[]>(function_name)));
return function_name;
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 914e62d8c3..64a9f315bb 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -4,36 +4,36 @@
#include "src/ic/ic.h"
-#include "src/accessors.h"
-#include "src/api-arguments-inl.h"
-#include "src/api.h"
-#include "src/arguments-inl.h"
+#include "src/api/api-arguments-inl.h"
+#include "src/api/api.h"
#include "src/ast/ast.h"
#include "src/base/bits.h"
-#include "src/code-factory.h"
-#include "src/conversions.h"
-#include "src/execution.h"
-#include "src/field-type.h"
-#include "src/frames-inl.h"
-#include "src/handles-inl.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/code-factory.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/ic/call-optimization.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
#include "src/ic/ic-stats.h"
#include "src/ic/stub-cache.h"
-#include "src/isolate-inl.h"
+#include "src/numbers/conversions.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/data-handler-inl.h"
+#include "src/objects/field-type.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/struct-inl.h"
#ifdef V8_TRACE_FEEDBACK_UPDATES
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
#endif // V8_TRACE_FEEDBACK_UPDATES
-#include "src/prototype.h"
-#include "src/runtime-profiler.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/objects/prototype.h"
#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
#include "src/tracing/trace-event.h"
@@ -45,7 +45,7 @@ namespace internal {
char IC::TransitionMarkFromState(IC::State state) {
switch (state) {
case NO_FEEDBACK:
- UNREACHABLE();
+ return 'X';
case UNINITIALIZED:
return '0';
case PREMONOMORPHIC:
@@ -73,25 +73,25 @@ const char* GetModifier(KeyedAccessLoadMode mode) {
const char* GetModifier(KeyedAccessStoreMode mode) {
switch (mode) {
- case STORE_NO_TRANSITION_HANDLE_COW:
+ case STORE_HANDLE_COW:
return ".COW";
- case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
+ case STORE_AND_GROW_HANDLE_COW:
return ".STORE+COW";
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+ case STORE_IGNORE_OUT_OF_BOUNDS:
return ".IGNORE_OOB";
- default:
- break;
+ case STANDARD_STORE:
+ return "";
}
- DCHECK(!IsCOWHandlingStoreMode(mode));
- return IsGrowStoreMode(mode) ? ".GROW" : "";
+ UNREACHABLE();
}
} // namespace
void IC::TraceIC(const char* type, Handle<Object> name) {
if (V8_LIKELY(!TracingFlags::is_ic_stats_enabled())) return;
- if (AddressIsDeoptimizedCode()) return;
- State new_state = nexus()->ic_state();
+ if (HostIsDeoptimizedCode()) return;
+ State new_state =
+ (state() == NO_FEEDBACK) ? NO_FEEDBACK : nexus()->ic_state();
TraceIC(type, name, state(), new_state);
}
@@ -105,7 +105,9 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
}
const char* modifier = "";
- if (IsKeyedLoadIC()) {
+ if (state() == NO_FEEDBACK) {
+ modifier = "";
+ } else if (IsKeyedLoadIC()) {
KeyedAccessLoadMode mode = nexus()->GetKeyedAccessLoadMode();
modifier = GetModifier(mode);
} else if (IsKeyedStoreIC() || IsStoreInArrayLiteralICKind(kind())) {
@@ -131,16 +133,16 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
Object maybe_function =
Object(Memory<Address>(fp_ + JavaScriptFrameConstants::kFunctionOffset));
- DCHECK(maybe_function->IsJSFunction());
+ DCHECK(maybe_function.IsJSFunction());
JSFunction function = JSFunction::cast(maybe_function);
int code_offset = 0;
- if (function->IsInterpreted()) {
+ if (function.IsInterpreted()) {
code_offset = InterpretedFrame::GetBytecodeOffset(fp());
} else {
- code_offset = static_cast<int>(pc() - function->code()->InstructionStart());
+ code_offset = static_cast<int>(pc() - function.code().InstructionStart());
}
JavaScriptFrame::CollectFunctionAndOffsetForICStats(
- function, function->abstract_code(), code_offset);
+ function, function.abstract_code(), code_offset);
// Reserve enough space for IC transition state, the longest length is 17.
ic_info.state.reserve(17);
@@ -152,9 +154,9 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ic_info.state += ")";
ic_info.map = reinterpret_cast<void*>(map.ptr());
if (!map.is_null()) {
- ic_info.is_dictionary_map = map->is_dictionary_map();
- ic_info.number_of_own_descriptors = map->NumberOfOwnDescriptors();
- ic_info.instance_type = std::to_string(map->instance_type());
+ ic_info.is_dictionary_map = map.is_dictionary_map();
+ ic_info.number_of_own_descriptors = map.NumberOfOwnDescriptors();
+ ic_info.instance_type = std::to_string(map.instance_type());
}
// TODO(lpy) Add name as key field in ICStats.
ICStats::instance()->End();
@@ -229,13 +231,12 @@ static void LookupForRead(LookupIterator* it, bool is_has_property) {
case LookupIterator::INTERCEPTOR: {
// If there is a getter, return; otherwise loop to perform the lookup.
Handle<JSObject> holder = it->GetHolder<JSObject>();
- if (!holder->GetNamedInterceptor()->getter()->IsUndefined(
+ if (!holder->GetNamedInterceptor().getter().IsUndefined(
it->isolate())) {
return;
}
if (is_has_property &&
- !holder->GetNamedInterceptor()->query()->IsUndefined(
- it->isolate())) {
+ !holder->GetNamedInterceptor().query().IsUndefined(it->isolate())) {
return;
}
break;
@@ -335,18 +336,18 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
// TODO(leszeks): The host function is only needed for this print, we could
// remove it as a parameter if we're of with removing this trace (or only
// tracing the feedback vector, not the function name).
- if (vector->profiler_ticks() != 0) {
+ if (vector.profiler_ticks() != 0) {
PrintF("[resetting ticks for ");
- host_function->ShortPrint();
- PrintF(" due from %d due to IC change: %s]\n", vector->profiler_ticks(),
+ host_function.ShortPrint();
+ PrintF(" due from %d due to IC change: %s]\n", vector.profiler_ticks(),
reason);
}
}
- vector->set_profiler_ticks(0);
+ vector.set_profiler_ticks(0);
#ifdef V8_TRACE_FEEDBACK_UPDATES
if (FLAG_trace_feedback_updates) {
- int slot_count = vector->metadata()->slot_count();
+ int slot_count = vector.metadata().slot_count();
StdoutStream os;
if (slot.IsInvalid()) {
@@ -354,12 +355,12 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
} else {
os << "[Feedback slot " << slot.ToInt() << "/" << slot_count << " in ";
}
- vector->shared_function_info()->ShortPrint(os);
+ vector.shared_function_info().ShortPrint(os);
if (slot.IsInvalid()) {
os << " updated - ";
} else {
os << " updated to ";
- vector->FeedbackSlotPrint(os, slot);
+ vector.FeedbackSlotPrint(os, slot);
os << " - ";
}
os << reason << "]" << std::endl;
@@ -375,7 +376,7 @@ void IC::OnFeedbackChanged(Isolate* isolate, FeedbackVector vector,
static bool MigrateDeprecated(Handle<Object> object) {
if (!object->IsJSObject()) return false;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (!receiver->map()->is_deprecated()) return false;
+ if (!receiver->map().is_deprecated()) return false;
JSObject::MigrateInstance(Handle<JSObject>::cast(object));
return true;
}
@@ -467,7 +468,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
if (name->IsPrivate()) {
if (name->IsPrivateName() && !it.IsFound()) {
- Handle<String> name_string(String::cast(Symbol::cast(*name)->name()),
+ Handle<String> name_string(String::cast(Symbol::cast(*name).name()),
isolate());
return TypeError(MessageTemplate::kInvalidPrivateFieldRead, object,
name_string);
@@ -514,7 +515,7 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table(), isolate());
+ global->native_context().script_context_table(), isolate());
ScriptContextTable::LookupResult lookup_result;
if (ScriptContextTable::Lookup(isolate(), *script_contexts, *str_name,
@@ -651,16 +652,16 @@ void IC::CopyICToMegamorphicCache(Handle<Name> name) {
bool IC::IsTransitionOfMonomorphicTarget(Map source_map, Map target_map) {
if (source_map.is_null()) return true;
if (target_map.is_null()) return false;
- if (source_map->is_abandoned_prototype_map()) return false;
- ElementsKind target_elements_kind = target_map->elements_kind();
+ if (source_map.is_abandoned_prototype_map()) return false;
+ ElementsKind target_elements_kind = target_map.elements_kind();
bool more_general_transition = IsMoreGeneralElementsKindTransition(
- source_map->elements_kind(), target_elements_kind);
+ source_map.elements_kind(), target_elements_kind);
Map transitioned_map;
if (more_general_transition) {
MapHandles map_list;
map_list.push_back(handle(target_map, isolate_));
transitioned_map =
- source_map->FindElementsKindTransitionedMap(isolate(), map_list);
+ source_map.FindElementsKindTransitionedMap(isolate(), map_list);
}
return transitioned_map == target_map;
}
@@ -701,12 +702,12 @@ void IC::PatchCache(Handle<Name> name, const MaybeObjectHandle& handler) {
break;
case GENERIC:
UNREACHABLE();
- break;
}
}
void LoadIC::UpdateCaches(LookupIterator* lookup) {
- if (state() == UNINITIALIZED && !IsLoadGlobalIC()) {
+ if (!FLAG_lazy_feedback_allocation && state() == UNINITIALIZED &&
+ !IsLoadGlobalIC()) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
@@ -795,7 +796,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
// Use specialized code for getting prototype of functions.
if (receiver->IsJSFunction() &&
*lookup->name() == roots.prototype_string() &&
- !JSFunction::cast(*receiver)->PrototypeRequiresRuntimeLookup()) {
+ !JSFunction::cast(*receiver).PrototypeRequiresRuntimeLookup()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
return BUILTIN_CODE(isolate(), LoadIC_FunctionPrototype);
}
@@ -813,7 +814,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
case LookupIterator::INTERCEPTOR: {
Handle<Smi> smi_handler = LoadHandler::LoadInterceptor(isolate());
- if (holder->GetNamedInterceptor()->non_masking()) {
+ if (holder->GetNamedInterceptor().non_masking()) {
MaybeObjectHandle holder_ref(isolate()->factory()->null_value());
if (!receiver_is_holder || IsLoadGlobalIC()) {
holder_ref = MaybeObjectHandle::Weak(holder);
@@ -846,7 +847,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
}
if (holder->IsJSModuleNamespace()) {
Handle<ObjectHashTable> exports(
- Handle<JSModuleNamespace>::cast(holder)->module()->exports(),
+ Handle<JSModuleNamespace>::cast(holder)->module().exports(),
isolate());
int entry = exports->FindEntry(roots, lookup->name(),
Smi::ToInt(lookup->name()->GetHash()));
@@ -863,7 +864,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
return ComputeHandler(lookup);
}
- Handle<Object> getter(AccessorPair::cast(*accessors)->getter(),
+ Handle<Object> getter(AccessorPair::cast(*accessors).getter(),
isolate());
if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
@@ -871,9 +872,9 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
}
if ((getter->IsFunctionTemplateInfo() &&
- FunctionTemplateInfo::cast(*getter)->BreakAtEntry()) ||
+ FunctionTemplateInfo::cast(*getter).BreakAtEntry()) ||
(getter->IsJSFunction() &&
- JSFunction::cast(*getter)->shared()->BreakAtEntry())) {
+ JSFunction::cast(*getter).shared().BreakAtEntry())) {
// Do not install an IC if the api function has a breakpoint.
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
@@ -969,19 +970,13 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
if (receiver_is_holder) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNormalFromPrototypeDH);
- } else if (lookup->property_details().location() == kField) {
+ } else {
+ DCHECK_EQ(kField, lookup->property_details().location());
FieldIndex field = lookup->GetFieldIndex();
smi_handler = LoadHandler::LoadField(isolate(), field);
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
if (receiver_is_holder) return smi_handler;
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
- } else {
- DCHECK_EQ(kDescriptor, lookup->property_details().location());
- smi_handler =
- LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
- if (receiver_is_holder) return smi_handler;
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
}
return LoadHandler::LoadFromPrototype(isolate(), map, holder,
smi_handler);
@@ -1150,12 +1145,11 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
KeyedAccessLoadMode load_mode) {
// Has a getter interceptor, or is any has and has a query interceptor.
if (receiver_map->has_indexed_interceptor() &&
- (!receiver_map->GetIndexedInterceptor()->getter()->IsUndefined(
- isolate()) ||
+ (!receiver_map->GetIndexedInterceptor().getter().IsUndefined(isolate()) ||
(IsAnyHas() &&
- !receiver_map->GetIndexedInterceptor()->query()->IsUndefined(
+ !receiver_map->GetIndexedInterceptor().query().IsUndefined(
isolate()))) &&
- !receiver_map->GetIndexedInterceptor()->non_masking()) {
+ !receiver_map->GetIndexedInterceptor().non_masking()) {
// TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), KeyedLoadIC_LoadIndexedInterceptorStub);
return IsAnyHas() ? BUILTIN_CODE(isolate(), HasIndexedInterceptorIC)
@@ -1191,8 +1185,8 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
is_js_array, load_mode);
}
DCHECK(IsFastElementsKind(elements_kind) ||
- IsPackedFrozenOrSealedElementsKind(elements_kind) ||
- IsFixedTypedArrayElementsKind(elements_kind));
+ IsFrozenOrSealedElementsKind(elements_kind) ||
+ IsTypedArrayElementsKind(elements_kind));
bool convert_hole_to_undefined =
(elements_kind == HOLEY_SMI_ELEMENTS ||
elements_kind == HOLEY_ELEMENTS) &&
@@ -1256,13 +1250,15 @@ bool ConvertKeyToIndex(Handle<Object> receiver, Handle<Object> key,
}
bool IsOutOfBoundsAccess(Handle<Object> receiver, uint32_t index) {
- uint32_t length = 0;
+ size_t length;
if (receiver->IsJSArray()) {
- JSArray::cast(*receiver)->length()->ToArrayLength(&length);
- } else if (receiver->IsString()) {
- length = String::cast(*receiver)->length();
+ length = JSArray::cast(*receiver).length().Number();
+ } else if (receiver->IsJSTypedArray()) {
+ length = JSTypedArray::cast(*receiver).length();
} else if (receiver->IsJSObject()) {
- length = JSObject::cast(*receiver)->elements()->length();
+ length = JSObject::cast(*receiver).elements().length();
+ } else if (receiver->IsString()) {
+ length = String::cast(*receiver).length();
} else {
return false;
}
@@ -1315,7 +1311,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
uint32_t index;
if ((key->IsInternalizedString() &&
- !String::cast(*key)->AsArrayIndex(&index)) ||
+ !String::cast(*key).AsArrayIndex(&index)) ||
key->IsSymbol()) {
ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle,
LoadIC::Load(object, Handle<Name>::cast(key)),
@@ -1345,7 +1341,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
if (object->IsJSProxy()) return true;
if (!object->IsJSObject()) return false;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- DCHECK(!receiver->map()->is_deprecated());
+ DCHECK(!receiver->map().is_deprecated());
if (it->state() != LookupIterator::TRANSITION) {
for (; it->IsFound(); it->Next()) {
@@ -1359,10 +1355,10 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
Handle<JSObject> holder = it->GetHolder<JSObject>();
InterceptorInfo info = holder->GetNamedInterceptor();
if (it->HolderIsReceiverOrHiddenPrototype()) {
- return !info->non_masking() && receiver.is_identical_to(holder) &&
- !info->setter()->IsUndefined(isolate());
- } else if (!info->getter()->IsUndefined(isolate()) ||
- !info->query()->IsUndefined(isolate())) {
+ return !info.non_masking() && receiver.is_identical_to(holder) &&
+ !info.setter().IsUndefined(isolate());
+ } else if (!info.getter().IsUndefined(isolate()) ||
+ !info.query().IsUndefined(isolate())) {
return false;
}
break;
@@ -1417,7 +1413,7 @@ MaybeHandle<Object> StoreGlobalIC::Store(Handle<Name> name,
Handle<String> str_name = Handle<String>::cast(name);
Handle<JSGlobalObject> global = isolate()->global_object();
Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table(), isolate());
+ global->native_context().script_context_table(), isolate());
ScriptContextTable::LookupResult lookup_result;
if (ScriptContextTable::Lookup(isolate(), *script_contexts, *str_name,
@@ -1492,7 +1488,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
if (name->IsPrivate()) {
if (name->IsPrivateName() && !it.IsFound()) {
- Handle<String> name_string(String::cast(Symbol::cast(*name)->name()),
+ Handle<String> name_string(String::cast(Symbol::cast(*name).name()),
isolate());
return TypeError(MessageTemplate::kInvalidPrivateFieldWrite, object,
name_string);
@@ -1540,7 +1536,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
InterceptorInfo info =
lookup->GetHolder<JSObject>()->GetNamedInterceptor();
if (!lookup->HolderIsReceiverOrHiddenPrototype() &&
- !info->getter()->IsUndefined(isolate())) {
+ !info.getter().IsUndefined(isolate())) {
// Utilize premonomorphic state for global store ics that run into
// an interceptor because the property doesn't exist yet.
// After we actually set the property, we'll have more information.
@@ -1598,7 +1594,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
USE(holder);
- DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined(isolate()));
+ DCHECK(!holder->GetNamedInterceptor().setter().IsUndefined(isolate()));
// TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), StoreIC_StoreInterceptorStub);
return MaybeObjectHandle(BUILTIN_CODE(isolate(), StoreInterceptorIC));
@@ -1623,7 +1619,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return MaybeObjectHandle(slow_stub());
}
- if (AccessorInfo::cast(*accessors)->is_special_data_property() &&
+ if (AccessorInfo::cast(*accessors).is_special_data_property() &&
!lookup->HolderIsReceiverOrHiddenPrototype()) {
set_slow_stub_reason("special data property in prototype chain");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
@@ -1656,9 +1652,9 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
}
if ((setter->IsFunctionTemplateInfo() &&
- FunctionTemplateInfo::cast(*setter)->BreakAtEntry()) ||
+ FunctionTemplateInfo::cast(*setter).BreakAtEntry()) ||
(setter->IsJSFunction() &&
- JSFunction::cast(*setter)->shared()->BreakAtEntry())) {
+ JSFunction::cast(*setter).shared().BreakAtEntry())) {
// Do not install an IC if the api function has a breakpoint.
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return MaybeObjectHandle(slow_stub());
@@ -1768,13 +1764,16 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode,
- bool receiver_was_cow) {
+ Handle<Map> new_receiver_map) {
MapHandles target_receiver_maps;
TargetMaps(&target_receiver_maps);
if (target_receiver_maps.empty()) {
- Handle<Map> monomorphic_map =
- ComputeTransitionedMap(receiver_map, store_mode);
- store_mode = GetNonTransitioningStoreMode(store_mode, receiver_was_cow);
+ Handle<Map> monomorphic_map = receiver_map;
+ // If we transitioned to a map that is a more general map than incoming
+ // then use the new map.
+ if (IsTransitionOfMonomorphicTarget(*receiver_map, *new_receiver_map)) {
+ monomorphic_map = new_receiver_map;
+ }
Handle<Object> handler = StoreElementHandler(monomorphic_map, store_mode);
return ConfigureVectorState(Handle<Name>(), monomorphic_map, handler);
}
@@ -1788,36 +1787,28 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
// There are several special cases where an IC that is MONOMORPHIC can still
- // transition to a different GetNonTransitioningStoreMode IC that handles a
- // superset of the original IC. Handle those here if the receiver map hasn't
- // changed or it has transitioned to a more general kind.
- KeyedAccessStoreMode old_store_mode;
- old_store_mode = GetKeyedAccessStoreMode();
+ // transition to a different IC that handles a superset of the original IC.
+ // Handle those here if the receiver map hasn't changed or it has transitioned
+ // to a more general kind.
+ KeyedAccessStoreMode old_store_mode = GetKeyedAccessStoreMode();
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
if (state() == MONOMORPHIC) {
- Handle<Map> transitioned_receiver_map = receiver_map;
- if (IsTransitionStoreMode(store_mode)) {
- transitioned_receiver_map =
- ComputeTransitionedMap(receiver_map, store_mode);
- }
- if ((receiver_map.is_identical_to(previous_receiver_map) &&
- IsTransitionStoreMode(store_mode)) ||
- IsTransitionOfMonomorphicTarget(*previous_receiver_map,
+ Handle<Map> transitioned_receiver_map = new_receiver_map;
+ if (IsTransitionOfMonomorphicTarget(*previous_receiver_map,
*transitioned_receiver_map)) {
// If the "old" and "new" maps are in the same elements map family, or
// if they at least come from the same origin for a transitioning store,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
- store_mode = GetNonTransitioningStoreMode(store_mode, receiver_was_cow);
Handle<Object> handler =
StoreElementHandler(transitioned_receiver_map, store_mode);
ConfigureVectorState(Handle<Name>(), transitioned_receiver_map, handler);
return;
}
+ // If there is no transition and if we have seen the same map earlier and
+ // there is only a change in the store_mode we can still stay monomorphic.
if (receiver_map.is_identical_to(previous_receiver_map) &&
- old_store_mode == STANDARD_STORE &&
- (store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
+ new_receiver_map.is_identical_to(receiver_map) &&
+ old_store_mode == STANDARD_STORE && store_mode != STANDARD_STORE) {
// A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
@@ -1831,11 +1822,9 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
- if (IsTransitionStoreMode(store_mode)) {
- Handle<Map> transitioned_receiver_map =
- ComputeTransitionedMap(receiver_map, store_mode);
- map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps,
- transitioned_receiver_map);
+ if (IsTransitionOfMonomorphicTarget(*receiver_map, *new_receiver_map)) {
+ map_added |=
+ AddOneReceiverMapIfMissing(&target_receiver_maps, new_receiver_map);
}
if (!map_added) {
@@ -1851,7 +1840,6 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// Make sure all polymorphic handlers have the same store mode, otherwise the
// megamorphic stub must be used.
- store_mode = GetNonTransitioningStoreMode(store_mode, receiver_was_cow);
if (old_store_mode != STANDARD_STORE) {
if (store_mode == STANDARD_STORE) {
store_mode = old_store_mode;
@@ -1867,7 +1855,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
if (store_mode != STANDARD_STORE) {
size_t external_arrays = 0;
for (Handle<Map> map : target_receiver_maps) {
- if (map->has_fixed_typed_array_elements()) {
+ if (map->has_typed_array_elements()) {
DCHECK(!IsStoreInArrayLiteralICKind(kind()));
external_arrays++;
}
@@ -1896,40 +1884,8 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
}
-Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
- Handle<Map> map, KeyedAccessStoreMode store_mode) {
- switch (store_mode) {
- case STORE_TRANSITION_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_TO_OBJECT: {
- ElementsKind kind = IsHoleyElementsKind(map->elements_kind())
- ? HOLEY_ELEMENTS
- : PACKED_ELEMENTS;
- return Map::TransitionElementsTo(isolate(), map, kind);
- }
- case STORE_TRANSITION_TO_DOUBLE:
- case STORE_AND_GROW_TRANSITION_TO_DOUBLE: {
- ElementsKind kind = IsHoleyElementsKind(map->elements_kind())
- ? HOLEY_DOUBLE_ELEMENTS
- : PACKED_DOUBLE_ELEMENTS;
- return Map::TransitionElementsTo(isolate(), map, kind);
- }
- case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
- DCHECK(map->has_fixed_typed_array_elements());
- V8_FALLTHROUGH;
- case STORE_NO_TRANSITION_HANDLE_COW:
- case STANDARD_STORE:
- case STORE_AND_GROW_NO_TRANSITION_HANDLE_COW:
- return map;
- }
- UNREACHABLE();
-}
-
Handle<Object> KeyedStoreIC::StoreElementHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
DCHECK_IMPLIES(
receiver_map->DictionaryElementsInPrototypeChainOnly(isolate()),
IsStoreInArrayLiteralICKind(kind()));
@@ -1947,10 +1903,10 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
CodeFactory::KeyedStoreIC_SloppyArguments(isolate(), store_mode).code();
} else if (receiver_map->has_fast_elements() ||
receiver_map->has_sealed_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
+ receiver_map->has_typed_array_elements()) {
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
code = CodeFactory::StoreFastElementIC(isolate(), store_mode).code();
- if (receiver_map->has_fixed_typed_array_elements()) return code;
+ if (receiver_map->has_typed_array_elements()) return code;
} else if (IsStoreInArrayLiteralICKind(kind())) {
// TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), StoreInArrayLiteralIC_SlowStub);
@@ -1981,11 +1937,6 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
void KeyedStoreIC::StoreElementPolymorphicHandlers(
MapHandles* receiver_maps, MaybeObjectHandles* handlers,
KeyedAccessStoreMode store_mode) {
- DCHECK(store_mode == STANDARD_STORE ||
- store_mode == STORE_AND_GROW_NO_TRANSITION_HANDLE_COW ||
- store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
// Filter out deprecated maps to ensure their instances get migrated.
receiver_maps->erase(
std::remove_if(
@@ -2035,50 +1986,39 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
}
}
-static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
- uint32_t index, Handle<Object> value) {
+namespace {
+
+bool MayHaveTypedArrayInPrototypeChain(Handle<JSObject> object) {
+ for (PrototypeIterator iter(object->GetIsolate(), *object); !iter.IsAtEnd();
+ iter.Advance()) {
+ // Be conservative, don't walk into proxies.
+ if (iter.GetCurrent().IsJSProxy()) return true;
+ if (iter.GetCurrent().IsJSTypedArray()) return true;
+ }
+ return false;
+}
+
+KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver, uint32_t index) {
bool oob_access = IsOutOfBoundsAccess(receiver, index);
// Don't consider this a growing store if the store would send the receiver to
- // dictionary mode.
+ // dictionary mode. Also make sure we don't consider this a growing store if
+ // there's any JSTypedArray in the {receiver}'s prototype chain, since that
+ // prototype is going to swallow all stores that are out-of-bounds for said
+ // prototype, and we just let the runtime deal with the complexity of this.
bool allow_growth = receiver->IsJSArray() && oob_access &&
- !receiver->WouldConvertToSlowElements(index);
+ !receiver->WouldConvertToSlowElements(index) &&
+ !MayHaveTypedArrayInPrototypeChain(receiver);
if (allow_growth) {
- // Handle growing array in stub if necessary.
- if (receiver->HasSmiElements()) {
- if (value->IsHeapNumber()) {
- return STORE_AND_GROW_TRANSITION_TO_DOUBLE;
- }
- if (value->IsHeapObject()) {
- return STORE_AND_GROW_TRANSITION_TO_OBJECT;
- }
- } else if (receiver->HasDoubleElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber()) {
- return STORE_AND_GROW_TRANSITION_TO_OBJECT;
- }
- }
- return STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
- } else {
- // Handle only in-bounds elements accesses.
- if (receiver->HasSmiElements()) {
- if (value->IsHeapNumber()) {
- return STORE_TRANSITION_TO_DOUBLE;
- } else if (value->IsHeapObject()) {
- return STORE_TRANSITION_TO_OBJECT;
- }
- } else if (receiver->HasDoubleElements()) {
- if (!value->IsSmi() && !value->IsHeapNumber()) {
- return STORE_TRANSITION_TO_OBJECT;
- }
- }
- if (!FLAG_trace_external_array_abuse &&
- receiver->map()->has_fixed_typed_array_elements() && oob_access) {
- return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
- }
- return receiver->elements()->IsCowArray() ? STORE_NO_TRANSITION_HANDLE_COW
- : STANDARD_STORE;
+ return STORE_AND_GROW_HANDLE_COW;
+ }
+ if (receiver->map().has_typed_array_elements() && oob_access) {
+ return STORE_IGNORE_OUT_OF_BOUNDS;
}
+ return receiver->elements().IsCowArray() ? STORE_HANDLE_COW : STANDARD_STORE;
}
+} // namespace
+
MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
Handle<Object> value) {
@@ -2102,7 +2042,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
uint32_t index;
if ((key->IsInternalizedString() &&
- !String::cast(*key)->AsArrayIndex(&index)) ||
+ !String::cast(*key).AsArrayIndex(&index)) ||
key->IsSymbol()) {
ASSIGN_RETURN_ON_EXCEPTION(isolate(), store_handle,
StoreIC::Store(object, Handle<Name>::cast(key),
@@ -2127,7 +2067,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
// expect to be able to trap element sets to objects with those maps in
// the runtime to enable optimization of element hole access.
Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
- if (heap_object->map()->IsMapInArrayPrototypeChain(isolate())) {
+ if (heap_object->map().IsMapInArrayPrototypeChain(isolate())) {
set_slow_stub_reason("map in array prototype");
use_ic = false;
}
@@ -2153,15 +2093,12 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (key_is_valid_index) {
uint32_t index = static_cast<uint32_t>(Smi::ToInt(*key));
Handle<JSObject> receiver_object = Handle<JSObject>::cast(object);
- store_mode = GetStoreMode(receiver_object, index, value);
+ store_mode = GetStoreMode(receiver_object, index);
}
}
}
DCHECK(store_handle.is_null());
- bool receiver_was_cow =
- object->IsJSArray() &&
- Handle<JSArray>::cast(object)->elements()->IsCowArray();
ASSIGN_RETURN_ON_EXCEPTION(
isolate(), store_handle,
Runtime::SetObjectProperty(isolate(), object, key, value,
@@ -2177,23 +2114,13 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
set_slow_stub_reason("receiver with prototype map");
} else if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly(
isolate())) {
- // If the SetObjectProperty call did not transition, avoid adding
- // a transition just for the ICs. We want to avoid making
- // the receiver map unnecessarily non-stable (crbug.com/950328).
- //
- // TODO(jarin) We should make this more robust so that the IC system
- // does not duplicate the logic implemented in runtime
- // (Runtime::SetObjectProperty).
- if (old_receiver_map->elements_kind() ==
- Handle<HeapObject>::cast(object)->map()->elements_kind()) {
- store_mode =
- GetNonTransitioningStoreMode(store_mode, receiver_was_cow);
- }
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
// from fast path keyed stores.
- UpdateStoreElement(old_receiver_map, store_mode, receiver_was_cow);
+ Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
+ UpdateStoreElement(old_receiver_map, store_mode,
+ handle(receiver->map(), isolate()));
} else {
set_slow_stub_reason("dictionary or proxy prototype");
}
@@ -2230,7 +2157,7 @@ void StoreOwnElement(Isolate* isolate, Handle<JSArray> array,
void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
Handle<Object> value) {
- DCHECK(!array->map()->IsMapInArrayPrototypeChain(isolate()));
+ DCHECK(!array->map().IsMapInArrayPrototypeChain(isolate()));
DCHECK(index->IsNumber());
if (!FLAG_use_ic || state() == NO_FEEDBACK || MigrateDeprecated(array)) {
@@ -2245,16 +2172,16 @@ void StoreInArrayLiteralIC::Store(Handle<JSArray> array, Handle<Object> index,
if (index->IsSmi()) {
DCHECK_GE(Smi::ToInt(*index), 0);
uint32_t index32 = static_cast<uint32_t>(Smi::ToInt(*index));
- store_mode = GetStoreMode(array, index32, value);
+ store_mode = GetStoreMode(array, index32);
}
Handle<Map> old_array_map(array->map(), isolate());
- bool array_was_cow = array->elements()->IsCowArray();
StoreOwnElement(isolate(), array, index, value);
if (index->IsSmi()) {
DCHECK(!old_array_map->is_abandoned_prototype_map());
- UpdateStoreElement(old_array_map, store_mode, array_was_cow);
+ UpdateStoreElement(old_array_map, store_mode,
+ handle(array->map(), isolate()));
} else {
set_slow_stub_reason("index out of Smi range");
}
@@ -2647,10 +2574,10 @@ static bool CanFastCloneObject(Handle<Map> map) {
DescriptorArray descriptors = map->instance_descriptors();
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- Name key = descriptors->GetKey(i);
+ PropertyDetails details = descriptors.GetDetails(i);
+ Name key = descriptors.GetKey(i);
if (details.kind() != kData || !details.IsEnumerable() ||
- key->IsPrivateName()) {
+ key.IsPrivateName()) {
return false;
}
}
@@ -2817,7 +2744,7 @@ RUNTIME_FUNCTION(Runtime_LoadAccessorProperty) {
Object holder = *receiver;
if (handler_kind == LoadHandler::kApiGetterHolderIsPrototype) {
- holder = receiver->map()->prototype();
+ holder = receiver->map().prototype();
} else {
DCHECK_EQ(handler_kind, LoadHandler::kApiGetter);
}
@@ -2988,7 +2915,7 @@ RUNTIME_FUNCTION(Runtime_HasElementWithInterceptor) {
PropertyCallbackArguments arguments(isolate, interceptor->data(), *receiver,
*receiver, Just(kDontThrow));
- if (!interceptor->query()->IsUndefined(isolate)) {
+ if (!interceptor->query().IsUndefined(isolate)) {
Handle<Object> result = arguments.CallIndexedQuery(interceptor, index);
if (!result.is_null()) {
int32_t value;
@@ -2996,7 +2923,7 @@ RUNTIME_FUNCTION(Runtime_HasElementWithInterceptor) {
return value == ABSENT ? ReadOnlyRoots(isolate).false_value()
: ReadOnlyRoots(isolate).true_value();
}
- } else if (!interceptor->getter()->IsUndefined(isolate)) {
+ } else if (!interceptor->getter().IsUndefined(isolate)) {
Handle<Object> result = arguments.CallIndexedGetter(interceptor, index);
if (!result.is_null()) {
return ReadOnlyRoots(isolate).true_value();
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 590acdaa46..39462d55e5 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -7,11 +7,11 @@
#include <vector>
-#include "src/feedback-vector.h"
+#include "src/execution/isolate.h"
+#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/ic/stub-cache.h"
-#include "src/isolate.h"
-#include "src/message-template.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object.h"
#include "src/objects/smi.h"
@@ -38,7 +38,6 @@ class IC {
virtual ~IC() = default;
State state() const { return state_; }
- inline Address address() const;
// Compute the current IC state based on the target stub, receiver and name.
void UpdateState(Handle<Object> receiver, Handle<Object> name);
@@ -80,9 +79,7 @@ class IC {
// Get the caller function object.
JSFunction GetHostFunction() const;
- inline bool AddressIsDeoptimizedCode() const;
- inline static bool AddressIsDeoptimizedCode(Isolate* isolate,
- Address address);
+ inline bool HostIsDeoptimizedCode() const;
bool is_vector_set() { return vector_set_; }
inline bool vector_needs_update();
@@ -337,6 +334,12 @@ enum KeyedStoreCheckMap { kDontCheckMap, kCheckMap };
enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength };
+enum class TransitionMode {
+ kNoTransition,
+ kTransitionToDouble,
+ kTransitionToObject
+};
+
class KeyedStoreIC : public StoreIC {
public:
KeyedAccessStoreMode GetKeyedAccessStoreMode() {
@@ -354,7 +357,7 @@ class KeyedStoreIC : public StoreIC {
protected:
void UpdateStoreElement(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode,
- bool receiver_was_cow);
+ Handle<Map> new_receiver_map);
Handle<Code> slow_stub() const override {
return BUILTIN_CODE(isolate(), KeyedStoreIC_Slow);
@@ -362,7 +365,7 @@ class KeyedStoreIC : public StoreIC {
private:
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
- KeyedAccessStoreMode store_mode);
+ TransitionMode transition_mode);
Handle<Object> StoreElementHandler(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 03f9d91d4f..f7e79ee6d7 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -4,14 +4,14 @@
#include "src/ic/keyed-store-generic.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
-#include "src/contexts.h"
-#include "src/feedback-vector.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/isolate.h"
#include "src/ic/accessor-assembler.h"
-#include "src/interface-descriptors.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/contexts.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -352,8 +352,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
PACKED_SMI_ELEMENTS, PACKED_ELEMENTS, slow);
}
- StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
- value);
+ StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, elements,
+ offset, value);
MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
BIND(&non_smi_value);
@@ -851,7 +851,9 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
var_accessor_holder.Bind(receiver);
Goto(&accessor);
} else {
- Goto(&overwrite);
+ // We must reconfigure an accessor property to a data property
+ // here, let the runtime take care of that.
+ Goto(slow);
}
BIND(&overwrite);
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index 9ab8db7864..322bb63321 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -5,8 +5,8 @@
#ifndef V8_IC_KEYED_STORE_GENERIC_H_
#define V8_IC_KEYED_STORE_GENERIC_H_
+#include "src/common/globals.h"
#include "src/compiler/code-assembler.h"
-#include "src/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 89a34ef80c..bdac1ce334 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -6,9 +6,9 @@
#include "src/ast/ast.h"
#include "src/base/bits.h"
-#include "src/counters.h"
#include "src/heap/heap-inl.h" // For InYoungGeneration().
#include "src/ic/ic-inl.h"
+#include "src/logging/counters.h"
namespace v8 {
namespace internal {
@@ -31,8 +31,8 @@ void StubCache::Initialize() {
int StubCache::PrimaryOffset(Name name, Map map) {
STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
- DCHECK(name->HasHashCode());
- uint32_t field = name->hash_field();
+ DCHECK(name.HasHashCode());
+ uint32_t field = name.hash_field();
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
@@ -70,8 +70,8 @@ bool CommonStubCacheChecks(StubCache* stub_cache, Name name, Map map,
// can use identity checks instead of structural equality checks.
DCHECK(!Heap::InYoungGeneration(name));
DCHECK(!Heap::InYoungGeneration(handler));
- DCHECK(name->IsUniqueName());
- DCHECK(name->HasHashCode());
+ DCHECK(name.IsUniqueName());
+ DCHECK(name.HasHashCode());
if (handler->ptr() != kNullAddress) DCHECK(IC::IsHandler(handler));
return true;
}
diff --git a/deps/v8/src/init/OWNERS b/deps/v8/src/init/OWNERS
new file mode 100644
index 0000000000..c5a41de1fd
--- /dev/null
+++ b/deps/v8/src/init/OWNERS
@@ -0,0 +1,5 @@
+ahaas@chromium.org
+bmeurer@chromium.org
+jkummerow@chromium.org
+jgruber@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 7a35e37c94..a080f8cdf0 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -2,28 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/bootstrapper.h"
+#include "src/init/bootstrapper.h"
-#include "src/accessors.h"
-#include "src/api-inl.h"
-#include "src/api-natives.h"
+#include "src/api/api-inl.h"
+#include "src/api/api-natives.h"
#include "src/base/ieee754.h"
-#include "src/compiler.h"
-#include "src/counters.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/compiler.h"
#include "src/debug/debug.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/microtask-queue.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
#include "src/extensions/gc-extension.h"
#include "src/extensions/ignition-statistics-extension.h"
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
-#include "src/function-kind.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate-inl.h"
-#include "src/math-random.h"
-#include "src/microtask-queue.h"
+#include "src/logging/counters.h"
+#include "src/numbers/math-random.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments.h"
+#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
@@ -68,11 +68,11 @@ void SourceCodeCache::Iterate(RootVisitor* v) {
bool SourceCodeCache::Lookup(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo>* handle) {
- for (int i = 0; i < cache_->length(); i += 2) {
- SeqOneByteString str = SeqOneByteString::cast(cache_->get(i));
- if (str->IsUtf8EqualTo(name)) {
+ for (int i = 0; i < cache_.length(); i += 2) {
+ SeqOneByteString str = SeqOneByteString::cast(cache_.get(i));
+ if (str.IsOneByteEqualTo(Vector<const uint8_t>::cast(name))) {
*handle = Handle<SharedFunctionInfo>(
- SharedFunctionInfo::cast(cache_->get(i + 1)), isolate);
+ SharedFunctionInfo::cast(cache_.get(i + 1)), isolate);
return true;
}
}
@@ -83,10 +83,10 @@ void SourceCodeCache::Add(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo> shared) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- int length = cache_->length();
+ int length = cache_.length();
Handle<FixedArray> new_array =
factory->NewFixedArray(length + 2, AllocationType::kOld);
- cache_->CopyTo(0, *new_array, 0, cache_->length());
+ cache_.CopyTo(0, *new_array, 0, cache_.length());
cache_ = *new_array;
Handle<String> str =
factory
@@ -94,9 +94,9 @@ void SourceCodeCache::Add(Isolate* isolate, Vector<const char> name,
AllocationType::kOld)
.ToHandleChecked();
DCHECK(!str.is_null());
- cache_->set(length, *str);
- cache_->set(length + 1, *shared);
- Script::cast(shared->script())->set_type(type_);
+ cache_.set(length, *str);
+ cache_.set(length + 1, *shared);
+ Script::cast(shared->script()).set_type(type_);
}
Bootstrapper::Bootstrapper(Isolate* isolate)
@@ -117,7 +117,6 @@ void Bootstrapper::Initialize(bool create_heap_objects) {
extensions_cache_.Initialize(isolate_, create_heap_objects);
}
-
static const char* GCFunctionName() {
bool flag_given =
FLAG_expose_gc_as != nullptr && strlen(FLAG_expose_gc_as) != 0;
@@ -209,8 +208,7 @@ class Genesis {
// Depending on the situation, expose and/or get rid of the utils object.
void ConfigureUtilsObject();
-#define DECLARE_FEATURE_INITIALIZATION(id, descr) \
- void InitializeGlobal_##id();
+#define DECLARE_FEATURE_INITIALIZATION(id, descr) void InitializeGlobal_##id();
HARMONY_INPROGRESS(DECLARE_FEATURE_INITIALIZATION)
HARMONY_STAGED(DECLARE_FEATURE_INITIALIZATION)
@@ -233,9 +231,7 @@ class Genesis {
bool InstallExtraNatives();
void InitializeNormalizedMapCaches();
- enum ExtensionTraversalState {
- UNVISITED, VISITED, INSTALLED
- };
+ enum ExtensionTraversalState { UNVISITED, VISITED, INSTALLED };
class ExtensionStates {
public:
@@ -243,6 +239,7 @@ class Genesis {
ExtensionTraversalState get_state(RegisteredExtension* extension);
void set_state(RegisteredExtension* extension,
ExtensionTraversalState state);
+
private:
base::HashMap map_;
DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
@@ -258,8 +255,7 @@ class Genesis {
static bool InstallRequestedExtensions(Isolate* isolate,
v8::ExtensionConfiguration* extensions,
ExtensionStates* extension_states);
- static bool InstallExtension(Isolate* isolate,
- const char* name,
+ static bool InstallExtension(Isolate* isolate, const char* name,
ExtensionStates* extension_states);
static bool InstallExtension(Isolate* isolate,
v8::RegisteredExtension* current,
@@ -353,12 +349,12 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
Handle<JSGlobalProxy> global_proxy(env->global_proxy(), isolate_);
global_proxy->set_native_context(roots.null_value());
JSObject::ForceSetPrototype(global_proxy, isolate_->factory()->null_value());
- global_proxy->map()->SetConstructor(roots.null_value());
+ global_proxy->map().SetConstructor(roots.null_value());
if (FLAG_track_detached_contexts) {
isolate_->AddDetachedContext(env);
}
- env->native_context()->set_microtask_queue(nullptr);
+ env->native_context().set_microtask_queue(nullptr);
}
namespace {
@@ -401,7 +397,7 @@ V8_NOINLINE Handle<JSFunction> CreateFunction(
// Make the resulting JSFunction object fast.
JSObject::MakePrototypesFast(result, kStartAtReceiver, isolate);
- result->shared()->set_native(true);
+ result->shared().set_native(true);
return result;
}
@@ -442,14 +438,14 @@ V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
Handle<JSFunction> fun = isolate->factory()->NewFunction(args);
// Make the resulting JSFunction object fast.
JSObject::MakePrototypesFast(fun, kStartAtReceiver, isolate);
- fun->shared()->set_native(true);
+ fun->shared().set_native(true);
if (adapt) {
- fun->shared()->set_internal_formal_parameter_count(len);
+ fun->shared().set_internal_formal_parameter_count(len);
} else {
- fun->shared()->DontAdaptArguments();
+ fun->shared().DontAdaptArguments();
}
- fun->shared()->set_length(len);
+ fun->shared().set_length(len);
return fun;
}
@@ -606,8 +602,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
script->set_type(Script::TYPE_NATIVE);
Handle<WeakFixedArray> infos = factory()->NewWeakFixedArray(2);
script->set_shared_function_infos(*infos);
- empty_function->shared()->set_scope_info(*scope_info);
- empty_function->shared()->DontAdaptArguments();
+ empty_function->shared().set_scope_info(*scope_info);
+ empty_function->shared().DontAdaptArguments();
SharedFunctionInfo::SetScript(handle(empty_function->shared(), isolate()),
script, 1);
@@ -651,7 +647,7 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic() {
NewFunctionArgs args = NewFunctionArgs::ForBuiltinWithoutPrototype(
name, Builtins::kStrictPoisonPillThrower, i::LanguageMode::kStrict);
Handle<JSFunction> function = factory()->NewFunction(args);
- function->shared()->DontAdaptArguments();
+ function->shared().DontAdaptArguments();
// %ThrowTypeError% must not have a name property.
if (JSReceiver::DeleteProperty(function, factory()->name_string())
@@ -741,14 +737,14 @@ void Genesis::CreateObjectFunction(Handle<JSFunction> empty_function) {
Handle<JSFunction> object_fun = CreateFunction(
isolate_, factory->Object_string(), JS_OBJECT_TYPE, instance_size,
inobject_properties, factory->null_value(), Builtins::kObjectConstructor);
- object_fun->shared()->set_length(1);
- object_fun->shared()->DontAdaptArguments();
+ object_fun->shared().set_length(1);
+ object_fun->shared().DontAdaptArguments();
native_context()->set_object_function(*object_fun);
{
// Finish setting up Object function's initial map.
Map initial_map = object_fun->initial_map();
- initial_map->set_elements_kind(HOLEY_ELEMENTS);
+ initial_map.set_elements_kind(HOLEY_ELEMENTS);
}
// Allocate a new prototype for the object function.
@@ -856,7 +852,7 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
Handle<JSFunction> generator_next_internal =
SimpleCreateFunction(isolate(), factory()->next_string(),
Builtins::kGeneratorPrototypeNext, 1, false);
- generator_next_internal->shared()->set_native(false);
+ generator_next_internal->shared().set_native(false);
native_context()->set_generator_next_internal(*generator_next_internal);
// Create maps for generator functions and their prototypes. Store those
@@ -1090,9 +1086,9 @@ void ReplaceAccessors(Isolate* isolate, Handle<Map> map, Handle<String> name,
PropertyAttributes attributes,
Handle<AccessorPair> accessor_pair) {
DescriptorArray descriptors = map->instance_descriptors();
- int idx = descriptors->SearchWithCache(isolate, *name, *map);
+ int idx = descriptors.SearchWithCache(isolate, *name, *map);
Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
- descriptors->Replace(idx, &d);
+ descriptors.Replace(idx, &d);
}
} // namespace
@@ -1111,25 +1107,24 @@ void Genesis::AddRestrictedFunctionProperties(Handle<JSFunction> empty) {
}
static void AddToWeakNativeContextList(Isolate* isolate, Context context) {
- DCHECK(context->IsNativeContext());
+ DCHECK(context.IsNativeContext());
Heap* heap = isolate->heap();
#ifdef DEBUG
- { // NOLINT
- DCHECK(context->next_context_link()->IsUndefined(isolate));
+ { // NOLINT
+ DCHECK(context.next_context_link().IsUndefined(isolate));
// Check that context is not in the list yet.
for (Object current = heap->native_contexts_list();
- !current->IsUndefined(isolate);
- current = Context::cast(current)->next_context_link()) {
+ !current.IsUndefined(isolate);
+ current = Context::cast(current).next_context_link()) {
DCHECK(current != context);
}
}
#endif
- context->set(Context::NEXT_CONTEXT_LINK, heap->native_contexts_list(),
- UPDATE_WEAK_WRITE_BARRIER);
+ context.set(Context::NEXT_CONTEXT_LINK, heap->native_contexts_list(),
+ UPDATE_WEAK_WRITE_BARRIER);
heap->set_native_contexts_list(context);
}
-
void Genesis::CreateRoots() {
// Allocate the native context FixedArray first and then patch the
// closure and extension object later (we need the empty function
@@ -1146,7 +1141,6 @@ void Genesis::CreateRoots() {
}
}
-
void Genesis::InstallGlobalThisBinding() {
Handle<ScriptContextTable> script_contexts(
native_context()->script_context_table(), isolate());
@@ -1164,7 +1158,6 @@ void Genesis::InstallGlobalThisBinding() {
native_context()->set_script_context_table(*new_script_contexts);
}
-
Handle<JSGlobalObject> Genesis::CreateNewGlobals(
v8::Local<v8::ObjectTemplate> global_proxy_template,
Handle<JSGlobalProxy> global_proxy) {
@@ -1223,9 +1216,9 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
JS_GLOBAL_OBJECT_TYPE);
}
- js_global_object_function->initial_map()->set_is_prototype_map(true);
- js_global_object_function->initial_map()->set_is_dictionary_map(true);
- js_global_object_function->initial_map()->set_may_have_interesting_symbols(
+ js_global_object_function->initial_map().set_is_prototype_map(true);
+ js_global_object_function->initial_map().set_is_dictionary_map(true);
+ js_global_object_function->initial_map().set_may_have_interesting_symbols(
true);
Handle<JSGlobalObject> global_object =
factory()->NewJSGlobalObject(js_global_object_function);
@@ -1248,9 +1241,9 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
isolate(), global_constructor, factory()->the_hole_value(),
JS_GLOBAL_PROXY_TYPE);
}
- global_proxy_function->initial_map()->set_is_access_check_needed(true);
- global_proxy_function->initial_map()->set_has_hidden_prototype(true);
- global_proxy_function->initial_map()->set_may_have_interesting_symbols(true);
+ global_proxy_function->initial_map().set_is_access_check_needed(true);
+ global_proxy_function->initial_map().set_has_hidden_prototype(true);
+ global_proxy_function->initial_map().set_may_have_interesting_symbols(true);
native_context()->set_global_proxy_function(*global_proxy_function);
// Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
@@ -1268,7 +1261,7 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
// deserializer. Otherwise it's undefined.
DCHECK(native_context()
->get(Context::GLOBAL_PROXY_INDEX)
- ->IsUndefined(isolate()) ||
+ .IsUndefined(isolate()) ||
native_context()->global_proxy() == *global_proxy);
native_context()->set_global_proxy(*global_proxy);
@@ -1321,8 +1314,8 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
InstallFunction(isolate, global, name, JS_ERROR_TYPE, kErrorObjectSize,
kInObjectPropertiesCount, factory->the_hole_value(),
Builtins::kErrorConstructor);
- error_fun->shared()->DontAdaptArguments();
- error_fun->shared()->set_length(1);
+ error_fun->shared().DontAdaptArguments();
+ error_fun->shared().set_length(1);
if (context_index == Context::ERROR_FUNCTION_INDEX) {
SimpleInstallFunction(isolate, error_fun, "captureStackTrace",
@@ -1348,7 +1341,7 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
isolate->native_context()->set_error_to_string(*to_string_fun);
isolate->native_context()->set_initial_error_prototype(*prototype);
} else {
- DCHECK(isolate->native_context()->error_to_string()->IsJSFunction());
+ DCHECK(isolate->native_context()->error_to_string().IsJSFunction());
JSObject::AddProperty(isolate, prototype, factory->toString_string(),
isolate->error_to_string(), DONT_ENUM);
@@ -1383,7 +1376,7 @@ void InstallMakeError(Isolate* isolate, int builtin_id, int context_index) {
JS_OBJECT_TYPE, JSObject::kHeaderSize, 0, builtin_id, MUTABLE);
Handle<JSFunction> function = isolate->factory()->NewFunction(args);
- function->shared()->DontAdaptArguments();
+ function->shared().DontAdaptArguments();
isolate->native_context()->set(context_index, *function);
}
@@ -1395,7 +1388,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> empty_function) {
// --- N a t i v e C o n t e x t ---
// Use the empty scope info.
- native_context()->set_scope_info(empty_function->shared()->scope_info());
+ native_context()->set_scope_info(empty_function->shared().scope_info());
native_context()->set_previous(Context());
// Set extension and global object.
native_context()->set_extension(*global_object);
@@ -1526,8 +1519,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Function instances are sloppy by default.
function_fun->set_prototype_or_initial_map(
*isolate_->sloppy_function_map());
- function_fun->shared()->DontAdaptArguments();
- function_fun->shared()->set_length(1);
+ function_fun->shared().DontAdaptArguments();
+ function_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, function_fun,
Context::FUNCTION_FUNCTION_INDEX);
@@ -1616,11 +1609,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> array_function = InstallFunction(
isolate_, global, "Array", JS_ARRAY_TYPE, JSArray::kSize, 0,
isolate_->initial_object_prototype(), Builtins::kArrayConstructor);
- array_function->shared()->DontAdaptArguments();
+ array_function->shared().DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
- array_function->shared()->set_length(1);
+ array_function->shared().set_length(1);
Handle<Map> initial_map(array_function->initial_map(), isolate());
@@ -1629,8 +1622,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
DCHECK(initial_map->elements_kind() == GetInitialFastElementsKind());
Map::EnsureDescriptorSlack(isolate_, initial_map, 1);
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
STATIC_ASSERT(JSArray::kLengthDescriptorIndex == 0);
{ // Add length.
@@ -1649,7 +1642,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Set up %ArrayPrototype%.
// The %ArrayPrototype% has TERMINAL_FAST_ELEMENTS_KIND in order to ensure
// that constant functions stay constant after turning prototype to setup
- // mode and back when constant field tracking is enabled.
+ // mode and back.
Handle<JSArray> proto = factory->NewJSArray(0, TERMINAL_FAST_ELEMENTS_KIND,
AllocationType::kOld);
JSFunction::SetPrototype(array_function, proto);
@@ -1783,7 +1776,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
CreateFunction(isolate_, factory->ArrayIterator_string(),
JS_ARRAY_ITERATOR_TYPE, JSArrayIterator::kSize, 0,
array_iterator_prototype, Builtins::kIllegal);
- array_iterator_function->shared()->set_native(false);
+ array_iterator_function->shared().set_native(false);
native_context()->set_initial_array_iterator_map(
array_iterator_function->initial_map());
@@ -1795,8 +1788,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> number_fun = InstallFunction(
isolate_, global, "Number", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate_->initial_object_prototype(), Builtins::kNumberConstructor);
- number_fun->shared()->DontAdaptArguments();
- number_fun->shared()->set_length(1);
+ number_fun->shared().DontAdaptArguments();
+ number_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, number_fun,
Context::NUMBER_FUNCTION_INDEX);
@@ -1878,8 +1871,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> boolean_fun = InstallFunction(
isolate_, global, "Boolean", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate_->initial_object_prototype(), Builtins::kBooleanConstructor);
- boolean_fun->shared()->DontAdaptArguments();
- boolean_fun->shared()->set_length(1);
+ boolean_fun->shared().DontAdaptArguments();
+ boolean_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, boolean_fun,
Context::BOOLEAN_FUNCTION_INDEX);
@@ -1904,18 +1897,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> string_fun = InstallFunction(
isolate_, global, "String", JS_VALUE_TYPE, JSValue::kSize, 0,
isolate_->initial_object_prototype(), Builtins::kStringConstructor);
- string_fun->shared()->DontAdaptArguments();
- string_fun->shared()->set_length(1);
+ string_fun->shared().DontAdaptArguments();
+ string_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, string_fun,
Context::STRING_FUNCTION_INDEX);
Handle<Map> string_map = Handle<Map>(
- native_context()->string_function()->initial_map(), isolate());
+ native_context()->string_function().initial_map(), isolate());
string_map->set_elements_kind(FAST_STRING_WRAPPER_ELEMENTS);
Map::EnsureDescriptorSlack(isolate_, string_map, 1);
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
{ // Add length.
Descriptor d = Descriptor::AccessorConstant(
@@ -1990,6 +1983,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
#endif // V8_INTL_SUPPORT
SimpleInstallFunction(isolate_, prototype, "match",
Builtins::kStringPrototypeMatch, 1, true);
+ SimpleInstallFunction(isolate_, prototype, "matchAll",
+ Builtins::kStringPrototypeMatchAll, 1, true);
#ifdef V8_INTL_SUPPORT
SimpleInstallFunction(isolate_, prototype, "normalize",
Builtins::kStringPrototypeNormalizeIntl, 0, false);
@@ -2087,7 +2082,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, factory->InternalizeUtf8String("StringIterator"),
JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize, 0,
string_iterator_prototype, Builtins::kIllegal);
- string_iterator_function->shared()->set_native(false);
+ string_iterator_function->shared().set_native(false);
native_context()->set_initial_string_iterator_map(
string_iterator_function->initial_map());
native_context()->set_initial_string_iterator_prototype(
@@ -2098,8 +2093,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> symbol_fun = InstallFunction(
isolate_, global, "Symbol", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kSymbolConstructor);
- symbol_fun->shared()->set_length(0);
- symbol_fun->shared()->DontAdaptArguments();
+ symbol_fun->shared().set_length(0);
+ symbol_fun->shared().DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
// Install the Symbol.for and Symbol.keyFor functions.
@@ -2118,6 +2113,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallConstant(isolate_, symbol_fun, "iterator",
factory->iterator_symbol());
InstallConstant(isolate_, symbol_fun, "match", factory->match_symbol());
+ InstallConstant(isolate_, symbol_fun, "matchAll",
+ factory->match_all_symbol());
InstallConstant(isolate_, symbol_fun, "replace", factory->replace_symbol());
InstallConstant(isolate_, symbol_fun, "search", factory->search_symbol());
InstallConstant(isolate_, symbol_fun, "species", factory->species_symbol());
@@ -2159,8 +2156,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
factory->the_hole_value(), Builtins::kDateConstructor);
InstallWithIntrinsicDefaultProto(isolate_, date_fun,
Context::DATE_FUNCTION_INDEX);
- date_fun->shared()->set_length(7);
- date_fun->shared()->DontAdaptArguments();
+ date_fun->shared().set_length(7);
+ date_fun->shared().DontAdaptArguments();
// Install the Date.now, Date.parse and Date.UTC functions.
SimpleInstallFunction(isolate_, date_fun, "now", Builtins::kDateNow, 0,
@@ -2451,7 +2448,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kRegExpPrototypeExec, 1, true);
// Check that index of "exec" function in JSRegExp is correct.
DCHECK_EQ(JSRegExp::kExecFunctionDescriptorIndex,
- prototype->map()->LastAdded());
+ prototype->map().LastAdded());
native_context()->set_regexp_exec_function(*fun);
}
@@ -2484,25 +2481,31 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
"[Symbol.match]", Builtins::kRegExpPrototypeMatch,
1, true);
DCHECK_EQ(JSRegExp::kSymbolMatchFunctionDescriptorIndex,
- prototype->map()->LastAdded());
+ prototype->map().LastAdded());
+
+ InstallFunctionAtSymbol(isolate_, prototype, factory->match_all_symbol(),
+ "[Symbol.matchAll]",
+ Builtins::kRegExpPrototypeMatchAll, 1, true);
+ DCHECK_EQ(JSRegExp::kSymbolMatchAllFunctionDescriptorIndex,
+ prototype->map().LastAdded());
InstallFunctionAtSymbol(isolate_, prototype, factory->replace_symbol(),
"[Symbol.replace]",
Builtins::kRegExpPrototypeReplace, 2, false);
DCHECK_EQ(JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
- prototype->map()->LastAdded());
+ prototype->map().LastAdded());
InstallFunctionAtSymbol(isolate_, prototype, factory->search_symbol(),
"[Symbol.search]",
Builtins::kRegExpPrototypeSearch, 1, true);
DCHECK_EQ(JSRegExp::kSymbolSearchFunctionDescriptorIndex,
- prototype->map()->LastAdded());
+ prototype->map().LastAdded());
InstallFunctionAtSymbol(isolate_, prototype, factory->split_symbol(),
"[Symbol.split]", Builtins::kRegExpPrototypeSplit,
2, false);
DCHECK_EQ(JSRegExp::kSymbolSplitFunctionDescriptorIndex,
- prototype->map()->LastAdded());
+ prototype->map().LastAdded());
Handle<Map> prototype_map(prototype->map(), isolate());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate_);
@@ -2599,6 +2602,31 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSObject::MigrateSlowToFast(regexp_fun, 0, "Bootstrapping");
}
+ { // --- R e g E x p S t r i n g I t e r a t o r ---
+ Handle<JSObject> iterator_prototype(
+ native_context()->initial_iterator_prototype(), isolate());
+
+ Handle<JSObject> regexp_string_iterator_prototype = factory->NewJSObject(
+ isolate()->object_function(), AllocationType::kOld);
+ JSObject::ForceSetPrototype(regexp_string_iterator_prototype,
+ iterator_prototype);
+
+ InstallToStringTag(isolate(), regexp_string_iterator_prototype,
+ "RegExp String Iterator");
+
+ SimpleInstallFunction(isolate(), regexp_string_iterator_prototype, "next",
+ Builtins::kRegExpStringIteratorPrototypeNext, 0,
+ true);
+
+ Handle<JSFunction> regexp_string_iterator_function = CreateFunction(
+ isolate(), "RegExpStringIterator", JS_REGEXP_STRING_ITERATOR_TYPE,
+ JSRegExpStringIterator::kSize, 0, regexp_string_iterator_prototype,
+ Builtins::kIllegal);
+ regexp_string_iterator_function->shared().set_native(false);
+ native_context()->set_initial_regexp_string_iterator_prototype_map(
+ regexp_string_iterator_function->initial_map());
+ }
+
{ // -- E r r o r
InstallError(isolate_, global, factory->Error_string(),
Context::ERROR_FUNCTION_INDEX);
@@ -2764,49 +2792,49 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
DCHECK(console->IsJSObject());
JSObject::AddProperty(isolate_, global, name, console, DONT_ENUM);
SimpleInstallFunction(isolate_, console, "debug", Builtins::kConsoleDebug,
- 1, false, NONE);
+ 0, false, NONE);
SimpleInstallFunction(isolate_, console, "error", Builtins::kConsoleError,
- 1, false, NONE);
- SimpleInstallFunction(isolate_, console, "info", Builtins::kConsoleInfo, 1,
+ 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "info", Builtins::kConsoleInfo, 0,
false, NONE);
- SimpleInstallFunction(isolate_, console, "log", Builtins::kConsoleLog, 1,
+ SimpleInstallFunction(isolate_, console, "log", Builtins::kConsoleLog, 0,
false, NONE);
- SimpleInstallFunction(isolate_, console, "warn", Builtins::kConsoleWarn, 1,
+ SimpleInstallFunction(isolate_, console, "warn", Builtins::kConsoleWarn, 0,
false, NONE);
SimpleInstallFunction(isolate_, console, "dir", Builtins::kConsoleDir, 1,
false, NONE);
SimpleInstallFunction(isolate_, console, "dirxml", Builtins::kConsoleDirXml,
- 1, false, NONE);
+ 0, false, NONE);
SimpleInstallFunction(isolate_, console, "table", Builtins::kConsoleTable,
1, false, NONE);
SimpleInstallFunction(isolate_, console, "trace", Builtins::kConsoleTrace,
- 1, false, NONE);
+ 0, false, NONE);
SimpleInstallFunction(isolate_, console, "group", Builtins::kConsoleGroup,
- 1, false, NONE);
+ 0, false, NONE);
SimpleInstallFunction(isolate_, console, "groupCollapsed",
- Builtins::kConsoleGroupCollapsed, 1, false, NONE);
+ Builtins::kConsoleGroupCollapsed, 0, false, NONE);
SimpleInstallFunction(isolate_, console, "groupEnd",
- Builtins::kConsoleGroupEnd, 1, false, NONE);
+ Builtins::kConsoleGroupEnd, 0, false, NONE);
SimpleInstallFunction(isolate_, console, "clear", Builtins::kConsoleClear,
- 1, false, NONE);
+ 0, false, NONE);
SimpleInstallFunction(isolate_, console, "count", Builtins::kConsoleCount,
- 1, false, NONE);
+ 0, false, NONE);
SimpleInstallFunction(isolate_, console, "countReset",
- Builtins::kConsoleCountReset, 1, false, NONE);
+ Builtins::kConsoleCountReset, 0, false, NONE);
SimpleInstallFunction(isolate_, console, "assert",
- Builtins::kFastConsoleAssert, 1, false, NONE);
+ Builtins::kFastConsoleAssert, 0, false, NONE);
SimpleInstallFunction(isolate_, console, "profile",
- Builtins::kConsoleProfile, 1, false, NONE);
+ Builtins::kConsoleProfile, 0, false, NONE);
SimpleInstallFunction(isolate_, console, "profileEnd",
- Builtins::kConsoleProfileEnd, 1, false, NONE);
- SimpleInstallFunction(isolate_, console, "time", Builtins::kConsoleTime, 1,
+ Builtins::kConsoleProfileEnd, 0, false, NONE);
+ SimpleInstallFunction(isolate_, console, "time", Builtins::kConsoleTime, 0,
false, NONE);
SimpleInstallFunction(isolate_, console, "timeLog",
- Builtins::kConsoleTimeLog, 1, false, NONE);
+ Builtins::kConsoleTimeLog, 0, false, NONE);
SimpleInstallFunction(isolate_, console, "timeEnd",
- Builtins::kConsoleTimeEnd, 1, false, NONE);
+ Builtins::kConsoleTimeEnd, 0, false, NONE);
SimpleInstallFunction(isolate_, console, "timeStamp",
- Builtins::kConsoleTimeStamp, 1, false, NONE);
+ Builtins::kConsoleTimeStamp, 0, false, NONE);
SimpleInstallFunction(isolate_, console, "context",
Builtins::kConsoleContext, 1, true, NONE);
InstallToStringTag(isolate_, console, "Object");
@@ -2826,8 +2854,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, intl, "DateTimeFormat", JS_INTL_DATE_TIME_FORMAT_TYPE,
JSDateTimeFormat::kSize, 0, factory->the_hole_value(),
Builtins::kDateTimeFormatConstructor);
- date_time_format_constructor->shared()->set_length(0);
- date_time_format_constructor->shared()->DontAdaptArguments();
+ date_time_format_constructor->shared().set_length(0);
+ date_time_format_constructor->shared().DontAdaptArguments();
InstallWithIntrinsicDefaultProto(
isolate_, date_time_format_constructor,
Context::INTL_DATE_TIME_FORMAT_FUNCTION_INDEX);
@@ -2858,8 +2886,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, intl, "NumberFormat", JS_INTL_NUMBER_FORMAT_TYPE,
JSNumberFormat::kSize, 0, factory->the_hole_value(),
Builtins::kNumberFormatConstructor);
- number_format_constructor->shared()->set_length(0);
- number_format_constructor->shared()->DontAdaptArguments();
+ number_format_constructor->shared().set_length(0);
+ number_format_constructor->shared().DontAdaptArguments();
InstallWithIntrinsicDefaultProto(
isolate_, number_format_constructor,
Context::INTL_NUMBER_FORMAT_FUNCTION_INDEX);
@@ -2888,7 +2916,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> collator_constructor = InstallFunction(
isolate_, intl, "Collator", JS_INTL_COLLATOR_TYPE, JSCollator::kSize,
0, factory->the_hole_value(), Builtins::kCollatorConstructor);
- collator_constructor->shared()->DontAdaptArguments();
+ collator_constructor->shared().DontAdaptArguments();
InstallWithIntrinsicDefaultProto(isolate_, collator_constructor,
Context::INTL_COLLATOR_FUNCTION_INDEX);
@@ -2914,7 +2942,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, intl, "v8BreakIterator", JS_INTL_V8_BREAK_ITERATOR_TYPE,
JSV8BreakIterator::kSize, 0, factory->the_hole_value(),
Builtins::kV8BreakIteratorConstructor);
- v8_break_iterator_constructor->shared()->DontAdaptArguments();
+ v8_break_iterator_constructor->shared().DontAdaptArguments();
SimpleInstallFunction(
isolate_, v8_break_iterator_constructor, "supportedLocalesOf",
@@ -2950,7 +2978,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, intl, "PluralRules", JS_INTL_PLURAL_RULES_TYPE,
JSPluralRules::kSize, 0, factory->the_hole_value(),
Builtins::kPluralRulesConstructor);
- plural_rules_constructor->shared()->DontAdaptArguments();
+ plural_rules_constructor->shared().DontAdaptArguments();
SimpleInstallFunction(isolate(), plural_rules_constructor,
"supportedLocalesOf",
@@ -2974,8 +3002,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate(), intl, "RelativeTimeFormat",
JS_INTL_RELATIVE_TIME_FORMAT_TYPE, JSRelativeTimeFormat::kSize, 0,
factory->the_hole_value(), Builtins::kRelativeTimeFormatConstructor);
- relative_time_format_fun->shared()->set_length(0);
- relative_time_format_fun->shared()->DontAdaptArguments();
+ relative_time_format_fun->shared().set_length(0);
+ relative_time_format_fun->shared().DontAdaptArguments();
SimpleInstallFunction(
isolate(), relative_time_format_fun, "supportedLocalesOf",
@@ -3004,8 +3032,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate(), intl, "ListFormat", JS_INTL_LIST_FORMAT_TYPE,
JSListFormat::kSize, 0, factory->the_hole_value(),
Builtins::kListFormatConstructor);
- list_format_fun->shared()->set_length(0);
- list_format_fun->shared()->DontAdaptArguments();
+ list_format_fun->shared().set_length(0);
+ list_format_fun->shared().DontAdaptArguments();
SimpleInstallFunction(isolate(), list_format_fun, "supportedLocalesOf",
Builtins::kListFormatSupportedLocalesOf, 1, false);
@@ -3025,6 +3053,52 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kListFormatPrototypeFormatToParts, 1,
false);
}
+
+ { // -- L o c a l e
+ Handle<JSFunction> locale_fun = InstallFunction(
+ isolate(), intl, "Locale", JS_INTL_LOCALE_TYPE, JSLocale::kSize, 0,
+ factory->the_hole_value(), Builtins::kLocaleConstructor);
+ InstallWithIntrinsicDefaultProto(isolate(), locale_fun,
+ Context::INTL_LOCALE_FUNCTION_INDEX);
+ locale_fun->shared().set_length(1);
+ locale_fun->shared().DontAdaptArguments();
+
+ // Setup %LocalePrototype%.
+ Handle<JSObject> prototype(
+ JSObject::cast(locale_fun->instance_prototype()), isolate());
+
+ InstallToStringTag(isolate(), prototype, "Intl.Locale");
+
+ SimpleInstallFunction(isolate(), prototype, "toString",
+ Builtins::kLocalePrototypeToString, 0, false);
+ SimpleInstallFunction(isolate(), prototype, "maximize",
+ Builtins::kLocalePrototypeMaximize, 0, false);
+ SimpleInstallFunction(isolate(), prototype, "minimize",
+ Builtins::kLocalePrototypeMinimize, 0, false);
+ // Base locale getters.
+ SimpleInstallGetter(isolate(), prototype, factory->language_string(),
+ Builtins::kLocalePrototypeLanguage, true);
+ SimpleInstallGetter(isolate(), prototype, factory->script_string(),
+ Builtins::kLocalePrototypeScript, true);
+ SimpleInstallGetter(isolate(), prototype, factory->region_string(),
+ Builtins::kLocalePrototypeRegion, true);
+ SimpleInstallGetter(isolate(), prototype, factory->baseName_string(),
+ Builtins::kLocalePrototypeBaseName, true);
+ // Unicode extension getters.
+ SimpleInstallGetter(isolate(), prototype, factory->calendar_string(),
+ Builtins::kLocalePrototypeCalendar, true);
+ SimpleInstallGetter(isolate(), prototype, factory->caseFirst_string(),
+ Builtins::kLocalePrototypeCaseFirst, true);
+ SimpleInstallGetter(isolate(), prototype, factory->collation_string(),
+ Builtins::kLocalePrototypeCollation, true);
+ SimpleInstallGetter(isolate(), prototype, factory->hourCycle_string(),
+ Builtins::kLocalePrototypeHourCycle, true);
+ SimpleInstallGetter(isolate(), prototype, factory->numeric_string(),
+ Builtins::kLocalePrototypeNumeric, true);
+ SimpleInstallGetter(isolate(), prototype,
+ factory->numberingSystem_string(),
+ Builtins::kLocalePrototypeNumberingSystem, true);
+ }
}
#endif // V8_INTL_SUPPORT
@@ -3091,8 +3165,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, factory->InternalizeUtf8String("TypedArray"),
JS_TYPED_ARRAY_TYPE, JSTypedArray::kHeaderSize, 0,
factory->the_hole_value(), Builtins::kTypedArrayBaseConstructor);
- typed_array_fun->shared()->set_native(false);
- typed_array_fun->shared()->set_length(0);
+ typed_array_fun->shared().set_native(false);
+ typed_array_fun->shared().set_length(0);
InstallSpeciesGetter(isolate_, typed_array_fun);
native_context()->set_typed_array_function(*typed_array_fun);
@@ -3181,7 +3255,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
array_prototype_to_string_fun, DONT_ENUM);
}
- { // -- T y p e d A r r a y s
+ {// -- T y p e d A r r a y s
#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype) \
{ \
Handle<JSFunction> fun = \
@@ -3189,7 +3263,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallWithIntrinsicDefaultProto(isolate_, fun, \
Context::TYPE##_ARRAY_FUN_INDEX); \
}
- TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
+ TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
}
@@ -3200,8 +3274,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kDataViewConstructor);
InstallWithIntrinsicDefaultProto(isolate_, data_view_fun,
Context::DATA_VIEW_FUN_INDEX);
- data_view_fun->shared()->set_length(1);
- data_view_fun->shared()->DontAdaptArguments();
+ data_view_fun->shared().set_length(1);
+ data_view_fun->shared().DontAdaptArguments();
// Setup %DataViewPrototype%.
Handle<JSObject> prototype(
@@ -3285,7 +3359,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, "set", Builtins::kMapPrototypeSet, 2, true);
// Check that index of "set" function in JSCollection is correct.
DCHECK_EQ(JSCollection::kAddFunctionDescriptorIndex,
- prototype->map()->LastAdded());
+ prototype->map().LastAdded());
native_context()->set_map_set(*map_set);
Handle<JSFunction> map_has = SimpleInstallFunction(
@@ -3322,8 +3396,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> bigint_fun = InstallFunction(
isolate_, global, "BigInt", JS_VALUE_TYPE, JSValue::kSize, 0,
factory->the_hole_value(), Builtins::kBigIntConstructor);
- bigint_fun->shared()->DontAdaptArguments();
- bigint_fun->shared()->set_length(1);
+ bigint_fun->shared().DontAdaptArguments();
+ bigint_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, bigint_fun,
Context::BIGINT_FUNCTION_INDEX);
@@ -3380,7 +3454,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, "add", Builtins::kSetPrototypeAdd, 1, true);
// Check that index of "add" function in JSCollection is correct.
DCHECK_EQ(JSCollection::kAddFunctionDescriptorIndex,
- prototype->map()->LastAdded());
+ prototype->map().LastAdded());
native_context()->set_set_add(*set_add);
Handle<JSFunction> set_delete = SimpleInstallFunction(
@@ -3429,26 +3503,32 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{ // -- I t e r a t o r R e s u l t
- Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSIteratorResult::kSize,
- TERMINAL_FAST_ELEMENTS_KIND, 2);
- Map::SetPrototype(isolate(), map, isolate_->initial_object_prototype());
- Map::EnsureDescriptorSlack(isolate_, map, 2);
-
- { // value
- Descriptor d = Descriptor::DataField(isolate(), factory->value_string(),
- JSIteratorResult::kValueIndex, NONE,
- Representation::Tagged());
- map->AppendDescriptor(isolate(), &d);
- }
-
- { // done
- Descriptor d = Descriptor::DataField(isolate(), factory->done_string(),
- JSIteratorResult::kDoneIndex, NONE,
- Representation::Tagged());
- map->AppendDescriptor(isolate(), &d);
- }
+ // Setup the map for IterResultObjects created from builtins in such a
+ // way that it's exactly the same map as the one produced by object
+ // literals in the form `{value, done}`. This way we have better sharing
+ // of maps (i.e. less polymorphism) and also make it possible to hit the
+ // fast-paths in various builtins (i.e. promises and collections) with
+ // user defined iterators.
+ Handle<Map> map = factory->ObjectLiteralMapFromCache(native_context(), 2);
+
+ // value
+ map = Map::CopyWithField(isolate(), map, factory->value_string(),
+ FieldType::Any(isolate()), NONE,
+ PropertyConstness::kConst,
+ Representation::Tagged(), INSERT_TRANSITION)
+ .ToHandleChecked();
+
+ // done
+ // TODO(bmeurer): Once FLAG_modify_field_representation_inplace is always
+ // on, we can say Representation::HeapObject() here and have the inplace
+ // update logic take care of the case where someone ever stores a Smi into
+ // the done field.
+ map = Map::CopyWithField(isolate(), map, factory->done_string(),
+ FieldType::Any(isolate()), NONE,
+ PropertyConstness::kConst,
+ Representation::Tagged(), INSERT_TRANSITION)
+ .ToHandleChecked();
- map->SetConstructor(native_context()->object_function());
native_context()->set_iterator_result_map(*map);
}
@@ -3477,7 +3557,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, "set", Builtins::kWeakMapPrototypeSet, 2, true);
// Check that index of "set" function in JSWeakCollection is correct.
DCHECK_EQ(JSWeakCollection::kAddFunctionDescriptorIndex,
- prototype->map()->LastAdded());
+ prototype->map().LastAdded());
native_context()->set_weakmap_set(*weakmap_set);
SimpleInstallFunction(isolate_, prototype, "has",
@@ -3512,7 +3592,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_, prototype, "add", Builtins::kWeakSetPrototypeAdd, 1, true);
// Check that index of "add" function in JSWeakCollection is correct.
DCHECK_EQ(JSWeakCollection::kAddFunctionDescriptorIndex,
- prototype->map()->LastAdded());
+ prototype->map().LastAdded());
native_context()->set_weakset_add(*weakset_add);
@@ -3538,8 +3618,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_->proxy_map()->SetConstructor(*proxy_function);
- proxy_function->shared()->set_internal_formal_parameter_count(2);
- proxy_function->shared()->set_length(2);
+ proxy_function->shared().set_internal_formal_parameter_count(2);
+ proxy_function->shared().set_length(2);
native_context()->set_proxy_function(*proxy_function);
JSObject::AddProperty(isolate_, global, name, proxy_function, DONT_ENUM);
@@ -3681,7 +3761,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // --- strict mode arguments map
const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
// Create the ThrowTypeError function.
Handle<AccessorPair> callee = factory->NewAccessorPair();
@@ -3712,13 +3792,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
// @@iterator method is added later.
- DCHECK_EQ(native_context()->object_function()->prototype(),
+ DCHECK_EQ(native_context()->object_function().prototype(),
*isolate_->initial_object_prototype());
Map::SetPrototype(isolate(), map, isolate_->initial_object_prototype());
// Copy constructor from the sloppy arguments boilerplate.
map->SetConstructor(
- native_context()->sloppy_arguments_map()->GetConstructor());
+ native_context()->sloppy_arguments_map().GetConstructor());
native_context()->set_strict_arguments_map(*map);
@@ -3764,10 +3844,10 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
isolate(), global, name, JS_TYPED_ARRAY_TYPE,
JSTypedArray::kSizeWithEmbedderFields, 0, factory()->the_hole_value(),
Builtins::kTypedArrayConstructor);
- result->initial_map()->set_elements_kind(elements_kind);
+ result->initial_map().set_elements_kind(elements_kind);
- result->shared()->DontAdaptArguments();
- result->shared()->set_length(3);
+ result->shared().DontAdaptArguments();
+ result->shared().set_length(3);
CHECK(JSObject::SetPrototype(result, typed_array_function, false, kDontThrow)
.FromJust());
@@ -3778,7 +3858,7 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
InstallConstant(isolate(), result, "BYTES_PER_ELEMENT", bytes_per_element);
// Setup prototype object.
- DCHECK(result->prototype()->IsJSObject());
+ DCHECK(result->prototype().IsJSObject());
Handle<JSObject> prototype(JSObject::cast(result->prototype()), isolate());
CHECK(JSObject::SetPrototype(prototype, typed_array_prototype, false,
@@ -3789,7 +3869,6 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
return result;
}
-
void Genesis::InitializeExperimentalGlobal() {
#define FEATURE_INITIALIZE_GLOBAL(id, descr) InitializeGlobal_##id();
@@ -3812,7 +3891,6 @@ bool Bootstrapper::CompileExtraBuiltin(Isolate* isolate, int index) {
arraysize(args), args, EXTENSION_CODE);
}
-
bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<String> source, int argc,
Handle<Object> argv[],
@@ -3852,7 +3930,6 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
.is_null();
}
-
bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
@@ -3925,8 +4002,8 @@ void Genesis::InitializeIteratorFunctions() {
Builtins::kGeneratorFunctionConstructor);
generator_function_function->set_prototype_or_initial_map(
native_context->generator_function_map());
- generator_function_function->shared()->DontAdaptArguments();
- generator_function_function->shared()->set_length(1);
+ generator_function_function->shared().DontAdaptArguments();
+ generator_function_function->shared().set_length(1);
InstallWithIntrinsicDefaultProto(
isolate, generator_function_function,
Context::GENERATOR_FUNCTION_FUNCTION_INDEX);
@@ -3938,7 +4015,7 @@ void Genesis::InitializeIteratorFunctions() {
generator_function_function,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- native_context->generator_function_map()->SetConstructor(
+ native_context->generator_function_map().SetConstructor(
*generator_function_function);
}
@@ -3954,8 +4031,8 @@ void Genesis::InitializeIteratorFunctions() {
Builtins::kAsyncGeneratorFunctionConstructor);
async_generator_function_function->set_prototype_or_initial_map(
native_context->async_generator_function_map());
- async_generator_function_function->shared()->DontAdaptArguments();
- async_generator_function_function->shared()->set_length(1);
+ async_generator_function_function->shared().DontAdaptArguments();
+ async_generator_function_function->shared().set_length(1);
InstallWithIntrinsicDefaultProto(
isolate, async_generator_function_function,
Context::ASYNC_GENERATOR_FUNCTION_FUNCTION_INDEX);
@@ -3968,7 +4045,7 @@ void Genesis::InitializeIteratorFunctions() {
factory->constructor_string(), async_generator_function_function,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- native_context->async_generator_function_map()->SetConstructor(
+ native_context->async_generator_function_map().SetConstructor(
*async_generator_function_function);
}
@@ -3989,7 +4066,7 @@ void Genesis::InitializeIteratorFunctions() {
Handle<JSFunction> set_iterator_function =
CreateFunction(isolate, "SetIterator", JS_SET_VALUE_ITERATOR_TYPE,
JSSetIterator::kSize, 0, prototype, Builtins::kIllegal);
- set_iterator_function->shared()->set_native(false);
+ set_iterator_function->shared().set_native(false);
Handle<Map> set_value_iterator_map(set_iterator_function->initial_map(),
isolate);
@@ -4019,7 +4096,7 @@ void Genesis::InitializeIteratorFunctions() {
Handle<JSFunction> map_iterator_function =
CreateFunction(isolate, "MapIterator", JS_MAP_KEY_ITERATOR_TYPE,
JSMapIterator::kSize, 0, prototype, Builtins::kIllegal);
- map_iterator_function->shared()->set_native(false);
+ map_iterator_function->shared().set_native(false);
Handle<Map> map_key_iterator_map(map_iterator_function->initial_map(),
isolate);
@@ -4049,8 +4126,8 @@ void Genesis::InitializeIteratorFunctions() {
Builtins::kAsyncFunctionConstructor);
async_function_constructor->set_prototype_or_initial_map(
native_context->async_function_map());
- async_function_constructor->shared()->DontAdaptArguments();
- async_function_constructor->shared()->set_length(1);
+ async_function_constructor->shared().DontAdaptArguments();
+ async_function_constructor->shared().set_length(1);
native_context->set_async_function_constructor(*async_function_constructor);
JSObject::ForceSetPrototype(async_function_constructor,
isolate->function_function());
@@ -4102,7 +4179,7 @@ void Genesis::InitializeCallSiteBuiltins() {
Handle<JSFunction> callsite_fun = CreateFunction(
isolate(), "CallSite", JS_OBJECT_TYPE, JSObject::kHeaderSize, 0,
factory->the_hole_value(), Builtins::kUnsupportedThrower);
- callsite_fun->shared()->DontAdaptArguments();
+ callsite_fun->shared().DontAdaptArguments();
isolate()->native_context()->set_callsite_function(*callsite_fun);
// Setup CallSite.prototype.
@@ -4150,22 +4227,18 @@ void Genesis::InitializeCallSiteBuiltins() {
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_namespace_exports)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_public_fields)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_methods)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_static_fields)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_meta)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_separator)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_json_stringify)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_sequence)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_await_optimization)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_hashbang)
#ifdef V8_INTL_SUPPORT
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_add_calendar_numbering_system)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_bigint)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_datetime_style)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_numberformat_unified)
#endif // V8_INTL_SUPPORT
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
@@ -4194,68 +4267,6 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
InstallToStringTag(isolate_, isolate()->atomics_object(), "Atomics");
}
-void Genesis::InitializeGlobal_harmony_string_matchall() {
- if (!FLAG_harmony_string_matchall) return;
-
- { // String.prototype.matchAll
- Handle<JSFunction> string_fun(native_context()->string_function(),
- isolate());
- Handle<JSObject> string_prototype(
- JSObject::cast(string_fun->instance_prototype()), isolate());
-
- SimpleInstallFunction(isolate(), string_prototype, "matchAll",
- Builtins::kStringPrototypeMatchAll, 1, true);
- }
-
- { // RegExp.prototype[@@matchAll]
- Handle<JSFunction> regexp_fun(native_context()->regexp_function(),
- isolate());
- Handle<JSObject> regexp_prototype(
- JSObject::cast(regexp_fun->instance_prototype()), isolate());
- InstallFunctionAtSymbol(isolate(), regexp_prototype,
- factory()->match_all_symbol(), "[Symbol.matchAll]",
- Builtins::kRegExpPrototypeMatchAll, 1, true);
- DCHECK_EQ(JSRegExp::kSymbolMatchAllFunctionDescriptorIndex,
- regexp_prototype->map()->LastAdded());
-
- Handle<Map> regexp_prototype_map(regexp_prototype->map(), isolate());
- Map::SetShouldBeFastPrototypeMap(regexp_prototype_map, true, isolate());
- native_context()->set_regexp_prototype_map(*regexp_prototype_map);
- }
-
- { // --- R e g E x p S t r i n g I t e r a t o r ---
- Handle<JSObject> iterator_prototype(
- native_context()->initial_iterator_prototype(), isolate());
-
- Handle<JSObject> regexp_string_iterator_prototype = factory()->NewJSObject(
- isolate()->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(regexp_string_iterator_prototype,
- iterator_prototype);
-
- InstallToStringTag(isolate(), regexp_string_iterator_prototype,
- "RegExp String Iterator");
-
- SimpleInstallFunction(isolate(), regexp_string_iterator_prototype, "next",
- Builtins::kRegExpStringIteratorPrototypeNext, 0,
- true);
-
- Handle<JSFunction> regexp_string_iterator_function = CreateFunction(
- isolate(), "RegExpStringIterator", JS_REGEXP_STRING_ITERATOR_TYPE,
- JSRegExpStringIterator::kSize, 0, regexp_string_iterator_prototype,
- Builtins::kIllegal);
- regexp_string_iterator_function->shared()->set_native(false);
- native_context()->set_initial_regexp_string_iterator_prototype_map(
- regexp_string_iterator_function->initial_map());
- }
-
- { // @@matchAll Symbol
- Handle<JSFunction> symbol_fun(native_context()->symbol_function(),
- isolate());
- InstallConstant(isolate(), symbol_fun, "matchAll",
- factory()->match_all_symbol());
- }
-}
-
void Genesis::InitializeGlobal_harmony_weak_refs() {
if (!FLAG_harmony_weak_refs) return;
@@ -4275,8 +4286,8 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
JSFinalizationGroup::kSize, 0, finalization_group_prototype,
Builtins::kFinalizationGroupConstructor);
- finalization_group_fun->shared()->DontAdaptArguments();
- finalization_group_fun->shared()->set_length(1);
+ finalization_group_fun->shared().DontAdaptArguments();
+ finalization_group_fun->shared().set_length(1);
// Install the "constructor" property on the prototype.
JSObject::AddProperty(isolate(), finalization_group_prototype,
@@ -4322,8 +4333,8 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
isolate(), weak_ref_name, JS_WEAK_REF_TYPE, JSWeakRef::kSize, 0,
weak_ref_prototype, Builtins::kWeakRefConstructor);
- weak_ref_fun->shared()->DontAdaptArguments();
- weak_ref_fun->shared()->set_length(1);
+ weak_ref_fun->shared().DontAdaptArguments();
+ weak_ref_fun->shared().set_length(1);
// Install the "constructor" property on the prototype.
JSObject::AddProperty(isolate(), weak_ref_prototype,
@@ -4362,7 +4373,7 @@ void Genesis::InitializeGlobal_harmony_weak_refs() {
void Genesis::InitializeGlobal_harmony_promise_all_settled() {
if (!FLAG_harmony_promise_all_settled) return;
SimpleInstallFunction(isolate(), isolate()->promise_function(), "allSettled",
- Builtins::kPromiseAllSettled, 1, false);
+ Builtins::kPromiseAllSettled, 1, true);
Factory* factory = isolate()->factory();
{
Handle<SharedFunctionInfo> info = SimpleCreateSharedFunctionInfo(
@@ -4391,15 +4402,11 @@ void Genesis::InitializeGlobal_harmony_intl_date_format_range() {
factory()->InternalizeUtf8String("Intl"))
.ToHandleChecked());
- Handle<JSObject> date_time_format_object = Handle<JSObject>::cast(
+ Handle<JSFunction> date_time_format_constructor = Handle<JSFunction>::cast(
JSReceiver::GetProperty(
- isolate(), Handle<JSReceiver>(JSReceiver::cast(*intl), isolate()),
- factory()->InternalizeUtf8String("DateTimeFormat"))
+ isolate(), intl, factory()->InternalizeUtf8String("DateTimeFormat"))
.ToHandleChecked());
- Handle<JSFunction> date_time_format_constructor =
- Handle<JSFunction>(JSFunction::cast(*date_time_format_object), isolate());
-
Handle<JSObject> prototype(
JSObject::cast(date_time_format_constructor->prototype()), isolate_);
@@ -4411,60 +4418,6 @@ void Genesis::InitializeGlobal_harmony_intl_date_format_range() {
false);
}
-void Genesis::InitializeGlobal_harmony_locale() {
- if (!FLAG_harmony_locale) return;
-
- Handle<JSObject> intl = Handle<JSObject>::cast(
- JSReceiver::GetProperty(
- isolate(),
- Handle<JSReceiver>(native_context()->global_object(), isolate()),
- factory()->InternalizeUtf8String("Intl"))
- .ToHandleChecked());
-
- Handle<JSFunction> locale_fun = InstallFunction(
- isolate(), intl, "Locale", JS_INTL_LOCALE_TYPE, JSLocale::kSize, 0,
- factory()->the_hole_value(), Builtins::kLocaleConstructor);
- InstallWithIntrinsicDefaultProto(isolate(), locale_fun,
- Context::INTL_LOCALE_FUNCTION_INDEX);
- locale_fun->shared()->set_length(1);
- locale_fun->shared()->DontAdaptArguments();
-
- // Setup %LocalePrototype%.
- Handle<JSObject> prototype(JSObject::cast(locale_fun->instance_prototype()),
- isolate());
-
- InstallToStringTag(isolate(), prototype, "Intl.Locale");
-
- SimpleInstallFunction(isolate(), prototype, "toString",
- Builtins::kLocalePrototypeToString, 0, false);
- SimpleInstallFunction(isolate(), prototype, "maximize",
- Builtins::kLocalePrototypeMaximize, 0, false);
- SimpleInstallFunction(isolate(), prototype, "minimize",
- Builtins::kLocalePrototypeMinimize, 0, false);
- // Base locale getters.
- SimpleInstallGetter(isolate(), prototype, factory()->language_string(),
- Builtins::kLocalePrototypeLanguage, true);
- SimpleInstallGetter(isolate(), prototype, factory()->script_string(),
- Builtins::kLocalePrototypeScript, true);
- SimpleInstallGetter(isolate(), prototype, factory()->region_string(),
- Builtins::kLocalePrototypeRegion, true);
- SimpleInstallGetter(isolate(), prototype, factory()->baseName_string(),
- Builtins::kLocalePrototypeBaseName, true);
- // Unicode extension getters.
- SimpleInstallGetter(isolate(), prototype, factory()->calendar_string(),
- Builtins::kLocalePrototypeCalendar, true);
- SimpleInstallGetter(isolate(), prototype, factory()->caseFirst_string(),
- Builtins::kLocalePrototypeCaseFirst, true);
- SimpleInstallGetter(isolate(), prototype, factory()->collation_string(),
- Builtins::kLocalePrototypeCollation, true);
- SimpleInstallGetter(isolate(), prototype, factory()->hourCycle_string(),
- Builtins::kLocalePrototypeHourCycle, true);
- SimpleInstallGetter(isolate(), prototype, factory()->numeric_string(),
- Builtins::kLocalePrototypeNumeric, true);
- SimpleInstallGetter(isolate(), prototype, factory()->numberingSystem_string(),
- Builtins::kLocalePrototypeNumberingSystem, true);
-}
-
void Genesis::InitializeGlobal_harmony_intl_segmenter() {
if (!FLAG_harmony_intl_segmenter) return;
Handle<JSObject> intl = Handle<JSObject>::cast(
@@ -4477,8 +4430,8 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() {
Handle<JSFunction> segmenter_fun = InstallFunction(
isolate(), intl, "Segmenter", JS_INTL_SEGMENTER_TYPE, JSSegmenter::kSize,
0, factory()->the_hole_value(), Builtins::kSegmenterConstructor);
- segmenter_fun->shared()->set_length(0);
- segmenter_fun->shared()->DontAdaptArguments();
+ segmenter_fun->shared().set_length(0);
+ segmenter_fun->shared().DontAdaptArguments();
SimpleInstallFunction(isolate(), segmenter_fun, "supportedLocalesOf",
Builtins::kSegmenterSupportedLocalesOf, 1, false);
@@ -4535,7 +4488,7 @@ void Genesis::InitializeGlobal_harmony_intl_segmenter() {
Handle<JSFunction> segment_iterator_fun = CreateFunction(
isolate(), name_string, JS_INTL_SEGMENT_ITERATOR_TYPE,
JSSegmentIterator::kSize, 0, prototype, Builtins::kIllegal);
- segment_iterator_fun->shared()->set_native(false);
+ segment_iterator_fun->shared().set_native(false);
Handle<Map> segment_iterator_map(segment_iterator_fun->initial_map(),
isolate());
@@ -4564,8 +4517,8 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
CreateFunction(isolate(), name, JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSizeWithEmbedderFields, 0, prototype,
Builtins::kArrayBufferConstructor);
- array_buffer_fun->shared()->DontAdaptArguments();
- array_buffer_fun->shared()->set_length(1);
+ array_buffer_fun->shared().DontAdaptArguments();
+ array_buffer_fun->shared().set_length(1);
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(isolate(), prototype, factory()->constructor_string(),
@@ -4624,7 +4577,7 @@ void Genesis::InstallInternalPackedArray(Handle<JSObject> target,
InstallFunction(isolate(), target, name, JS_ARRAY_TYPE, JSArray::kSize, 0,
prototype, Builtins::kInternalArrayConstructor);
- array_function->shared()->DontAdaptArguments();
+ array_function->shared().DontAdaptArguments();
Handle<Map> original_map(array_function->initial_map(), isolate());
Handle<Map> initial_map = Map::Copy(isolate(), original_map, "InternalArray");
@@ -4634,8 +4587,8 @@ void Genesis::InstallInternalPackedArray(Handle<JSObject> target,
// Make "length" magic on instances.
Map::EnsureDescriptorSlack(isolate(), initial_map, 1);
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
{ // Add length.
Descriptor d = Descriptor::AccessorConstant(
@@ -4701,7 +4654,7 @@ bool Genesis::InstallNatives() {
Handle<JSFunction> promise_internal_constructor =
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kPromiseInternalConstructor, 1, true);
- promise_internal_constructor->shared()->set_native(false);
+ promise_internal_constructor->shared().set_native(false);
JSObject::AddProperty(isolate(), extras_utils, "createPromise",
promise_internal_constructor, DONT_ENUM);
@@ -4709,7 +4662,7 @@ bool Genesis::InstallNatives() {
Handle<JSFunction> promise_internal_reject =
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kPromiseInternalReject, 2, true);
- promise_internal_reject->shared()->set_native(false);
+ promise_internal_reject->shared().set_native(false);
JSObject::AddProperty(isolate(), extras_utils, "rejectPromise",
promise_internal_reject, DONT_ENUM);
@@ -4717,7 +4670,7 @@ bool Genesis::InstallNatives() {
Handle<JSFunction> promise_internal_resolve =
SimpleCreateFunction(isolate(), factory()->empty_string(),
Builtins::kPromiseInternalResolve, 2, true);
- promise_internal_resolve->shared()->set_native(false);
+ promise_internal_resolve->shared().set_native(false);
JSObject::AddProperty(isolate(), extras_utils, "resolvePromise",
promise_internal_resolve, DONT_ENUM);
@@ -4754,10 +4707,10 @@ bool Genesis::InstallNatives() {
{
Handle<JSFunction> object_function(native_context()->object_function(),
isolate());
- DCHECK(JSObject::cast(object_function->initial_map()->prototype())
- ->HasFastProperties());
+ DCHECK(JSObject::cast(object_function->initial_map().prototype())
+ .HasFastProperties());
native_context()->set_object_function_prototype_map(
- HeapObject::cast(object_function->initial_map()->prototype())->map());
+ HeapObject::cast(object_function->initial_map().prototype()).map());
}
// Store the map for the %StringPrototype% after the natives has been compiled
@@ -4765,10 +4718,10 @@ bool Genesis::InstallNatives() {
Handle<JSFunction> string_function(native_context()->string_function(),
isolate());
JSObject string_function_prototype =
- JSObject::cast(string_function->initial_map()->prototype());
- DCHECK(string_function_prototype->HasFastProperties());
+ JSObject::cast(string_function->initial_map().prototype());
+ DCHECK(string_function_prototype.HasFastProperties());
native_context()->set_string_function_prototype_map(
- string_function_prototype->map());
+ string_function_prototype.map());
Handle<JSGlobalObject> global_object =
handle(native_context()->global_object(), isolate());
@@ -4821,7 +4774,7 @@ bool Genesis::InstallNatives() {
// Verification of important array prototype properties.
Object length = proto->length();
- CHECK(length->IsSmi());
+ CHECK(length.IsSmi());
CHECK_EQ(Smi::ToInt(length), 0);
CHECK(proto->HasSmiOrObjectElements());
// This is necessary to enable fast checks for absence of elements
@@ -4950,10 +4903,10 @@ bool Genesis::InstallNatives() {
{
JSFunction array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
- array_function->initial_map()->instance_descriptors(), isolate());
+ array_function.initial_map().instance_descriptors(), isolate());
Handle<String> length = factory()->length_string();
int old = array_descriptors->SearchWithCache(
- isolate(), *length, array_function->initial_map());
+ isolate(), *length, array_function.initial_map());
DCHECK_NE(old, DescriptorArray::kNotFound);
Descriptor d = Descriptor::AccessorConstant(
length, handle(array_descriptors->GetStrongValue(old), isolate()),
@@ -5055,7 +5008,6 @@ void Genesis::InitializeNormalizedMapCaches() {
native_context()->set_normalized_map_cache(*cache);
}
-
bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
// Don't install extensions into the snapshot.
@@ -5088,7 +5040,6 @@ bool Genesis::InstallSpecialObjects(Isolate* isolate,
return true;
}
-
static uint32_t Hash(RegisteredExtension* extension) {
return v8::internal::ComputePointerHash(extension);
}
@@ -5132,7 +5083,6 @@ bool Genesis::InstallExtensions(Isolate* isolate,
InstallRequestedExtensions(isolate, extensions, &extension_states);
}
-
bool Genesis::InstallAutoExtensions(Isolate* isolate,
ExtensionStates* extension_states) {
for (v8::RegisteredExtension* it = v8::RegisteredExtension::first_extension();
@@ -5145,7 +5095,6 @@ bool Genesis::InstallAutoExtensions(Isolate* isolate,
return true;
}
-
bool Genesis::InstallRequestedExtensions(Isolate* isolate,
v8::ExtensionConfiguration* extensions,
ExtensionStates* extension_states) {
@@ -5155,11 +5104,9 @@ bool Genesis::InstallRequestedExtensions(Isolate* isolate,
return true;
}
-
// Installs a named extension. This methods is unoptimized and does
// not scale well if we want to support a large number of extensions.
-bool Genesis::InstallExtension(Isolate* isolate,
- const char* name,
+bool Genesis::InstallExtension(Isolate* isolate, const char* name,
ExtensionStates* extension_states) {
for (v8::RegisteredExtension* it = v8::RegisteredExtension::first_extension();
it != nullptr; it = it->next()) {
@@ -5167,12 +5114,10 @@ bool Genesis::InstallExtension(Isolate* isolate,
return InstallExtension(isolate, it, extension_states);
}
}
- return Utils::ApiCheck(false,
- "v8::Context::New()",
+ return Utils::ApiCheck(false, "v8::Context::New()",
"Cannot find required extension");
}
-
bool Genesis::InstallExtension(Isolate* isolate,
v8::RegisteredExtension* current,
ExtensionStates* extension_states) {
@@ -5182,8 +5127,7 @@ bool Genesis::InstallExtension(Isolate* isolate,
// The current node has already been visited so there must be a
// cycle in the dependency graph; fail.
if (!Utils::ApiCheck(extension_states->get_state(current) != VISITED,
- "v8::Context::New()",
- "Circular extension dependency")) {
+ "v8::Context::New()", "Circular extension dependency")) {
return false;
}
DCHECK(extension_states->get_state(current) == UNVISITED);
@@ -5191,8 +5135,7 @@ bool Genesis::InstallExtension(Isolate* isolate,
v8::Extension* extension = current->extension();
// Install the extension's dependencies
for (int i = 0; i < extension->dependency_count(); i++) {
- if (!InstallExtension(isolate,
- extension->dependencies()[i],
+ if (!InstallExtension(isolate, extension->dependencies()[i],
extension_states)) {
return false;
}
@@ -5213,7 +5156,6 @@ bool Genesis::InstallExtension(Isolate* isolate,
return result;
}
-
bool Genesis::ConfigureGlobalObjects(
v8::Local<v8::ObjectTemplate> global_proxy_template) {
Handle<JSObject> global_proxy(native_context()->global_proxy(), isolate());
@@ -5229,7 +5171,7 @@ bool Genesis::ConfigureGlobalObjects(
Handle<FunctionTemplateInfo> proxy_constructor(
FunctionTemplateInfo::cast(global_proxy_data->constructor()),
isolate());
- if (!proxy_constructor->GetPrototypeTemplate()->IsUndefined(isolate())) {
+ if (!proxy_constructor->GetPrototypeTemplate().IsUndefined(isolate())) {
Handle<ObjectTemplateInfo> global_object_data(
ObjectTemplateInfo::cast(proxy_constructor->GetPrototypeTemplate()),
isolate());
@@ -5240,7 +5182,7 @@ bool Genesis::ConfigureGlobalObjects(
JSObject::ForceSetPrototype(global_proxy, global_object);
native_context()->set_array_buffer_map(
- native_context()->array_buffer_fun()->initial_map());
+ native_context()->array_buffer_fun().initial_map());
Handle<JSFunction> js_map_fun(native_context()->js_map_fun(), isolate());
Handle<JSFunction> js_set_fun(native_context()->js_set_fun(), isolate());
@@ -5261,12 +5203,11 @@ bool Genesis::ConfigureGlobalObjects(
return true;
}
-
bool Genesis::ConfigureApiObject(Handle<JSObject> object,
Handle<ObjectTemplateInfo> object_template) {
DCHECK(!object_template.is_null());
DCHECK(FunctionTemplateInfo::cast(object_template->constructor())
- ->IsTemplateFor(object->map()));;
+ .IsTemplateFor(object->map()));
MaybeHandle<JSObject> maybe_obj =
ApiNatives::InstantiateObject(object->GetIsolate(), object_template);
@@ -5296,8 +5237,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
// in the snapshotted global object.
if (from->HasFastProperties()) {
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(from->map()->instance_descriptors(), isolate());
- for (int i = 0; i < from->map()->NumberOfOwnDescriptors(); i++) {
+ Handle<DescriptorArray>(from->map().instance_descriptors(), isolate());
+ for (int i = 0; i < from->map().NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
if (details.location() == kField) {
if (details.kind() == kData) {
@@ -5317,34 +5258,23 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
} else {
DCHECK_EQ(kDescriptor, details.location());
- if (details.kind() == kData) {
- DCHECK(!FLAG_track_constant_fields);
- HandleScope inner(isolate());
- Handle<Name> key = Handle<Name>(descs->GetKey(i), isolate());
- // If the property is already there we skip it.
- if (PropertyAlreadyExists(isolate(), to, key)) continue;
- Handle<Object> value(descs->GetStrongValue(i), isolate());
- JSObject::AddProperty(isolate(), to, key, value,
- details.attributes());
- } else {
- DCHECK_EQ(kAccessor, details.kind());
- Handle<Name> key(descs->GetKey(i), isolate());
- // If the property is already there we skip it.
- if (PropertyAlreadyExists(isolate(), to, key)) continue;
- HandleScope inner(isolate());
- DCHECK(!to->HasFastProperties());
- // Add to dictionary.
- Handle<Object> value(descs->GetStrongValue(i), isolate());
- PropertyDetails d(kAccessor, details.attributes(),
- PropertyCellType::kMutable);
- JSObject::SetNormalizedProperty(to, key, value, d);
- }
+ DCHECK_EQ(kAccessor, details.kind());
+ Handle<Name> key(descs->GetKey(i), isolate());
+ // If the property is already there we skip it.
+ if (PropertyAlreadyExists(isolate(), to, key)) continue;
+ HandleScope inner(isolate());
+ DCHECK(!to->HasFastProperties());
+ // Add to dictionary.
+ Handle<Object> value(descs->GetStrongValue(i), isolate());
+ PropertyDetails d(kAccessor, details.attributes(),
+ PropertyCellType::kMutable);
+ JSObject::SetNormalizedProperty(to, key, value, d);
}
}
} else if (from->IsJSGlobalObject()) {
// Copy all keys and values in enumeration order.
Handle<GlobalDictionary> properties(
- JSGlobalObject::cast(*from)->global_dictionary(), isolate());
+ JSGlobalObject::cast(*from).global_dictionary(), isolate());
Handle<FixedArray> indices =
GlobalDictionary::IterationIndices(isolate(), properties);
for (int i = 0; i < indices->length(); i++) {
@@ -5371,7 +5301,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
int key_index = Smi::ToInt(key_indices->get(i));
Object raw_key = properties->KeyAt(key_index);
DCHECK(properties->IsKey(roots, raw_key));
- DCHECK(raw_key->IsName());
+ DCHECK(raw_key.IsName());
Handle<Name> key(Name::cast(raw_key), isolate());
// If the property is already there we skip it.
if (PropertyAlreadyExists(isolate(), to, key)) continue;
@@ -5387,7 +5317,6 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
}
}
-
void Genesis::TransferIndexedProperties(Handle<JSObject> from,
Handle<JSObject> to) {
// Cloning the elements array is sufficient.
@@ -5397,7 +5326,6 @@ void Genesis::TransferIndexedProperties(Handle<JSObject> from,
to->set_elements(*to_elements);
}
-
void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
HandleScope outer(isolate());
@@ -5408,7 +5336,7 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
TransferIndexedProperties(from, to);
// Transfer the prototype (new map is needed).
- Handle<HeapObject> proto(from->map()->prototype(), isolate());
+ Handle<HeapObject> proto(from->map().prototype(), isolate());
JSObject::ForceSetPrototype(to, proto);
}
@@ -5437,7 +5365,7 @@ Genesis::Genesis(
// The global proxy function to reinitialize this global proxy is in the
// context that is yet to be deserialized. We need to prepare a global
// proxy of the correct size.
- Object size = isolate->heap()->serialized_global_proxy_sizes()->get(
+ Object size = isolate->heap()->serialized_global_proxy_sizes().get(
static_cast<int>(context_snapshot_index) - 1);
instance_size = Smi::ToInt(size);
} else {
@@ -5520,19 +5448,19 @@ Genesis::Genesis(
// Install experimental natives. Do not include them into the
// snapshot as we should be able to turn them off at runtime. Re-installing
// them after they have already been deserialized would also fail.
- if (!isolate->serializer_enabled()) {
- InitializeExperimentalGlobal();
-
- // Store String.prototype's map again in case it has been changed by
- // experimental natives.
- Handle<JSFunction> string_function(native_context()->string_function(),
- isolate);
- JSObject string_function_prototype =
- JSObject::cast(string_function->initial_map()->prototype());
- DCHECK(string_function_prototype->HasFastProperties());
- native_context()->set_string_function_prototype_map(
- string_function_prototype->map());
- }
+ if (!isolate->serializer_enabled()) {
+ InitializeExperimentalGlobal();
+
+ // Store String.prototype's map again in case it has been changed by
+ // experimental natives.
+ Handle<JSFunction> string_function(native_context()->string_function(),
+ isolate);
+ JSObject string_function_prototype =
+ JSObject::cast(string_function->initial_map().prototype());
+ DCHECK(string_function_prototype.HasFastProperties());
+ native_context()->set_string_function_prototype_map(
+ string_function_prototype.map());
+ }
if (FLAG_disallow_code_generation_from_strings) {
native_context()->set_allow_code_gen_from_strings(
@@ -5579,8 +5507,8 @@ Genesis::Genesis(Isolate* isolate,
ObjectTemplateInfo::cast(global_constructor->GetPrototypeTemplate()),
isolate);
Handle<JSObject> global_object =
- ApiNatives::InstantiateRemoteObject(
- global_object_template).ToHandleChecked();
+ ApiNatives::InstantiateRemoteObject(global_object_template)
+ .ToHandleChecked();
// (Re)initialize the global proxy object.
DCHECK_EQ(global_proxy_data->embedder_field_count(),
@@ -5596,10 +5524,10 @@ Genesis::Genesis(Isolate* isolate,
// Configure the hidden prototype chain of the global proxy.
JSObject::ForceSetPrototype(global_proxy, global_object);
- global_proxy->map()->SetConstructor(*global_constructor);
+ global_proxy->map().SetConstructor(*global_constructor);
// TODO(dcheng): This is a hack. Why does this need to be manually called
// here? Line 4812 should have taken care of it?
- global_proxy->map()->set_has_hidden_prototype(true);
+ global_proxy->map().set_has_hidden_prototype(true);
global_proxy_ = global_proxy;
}
@@ -5607,10 +5535,7 @@ Genesis::Genesis(Isolate* isolate,
// Support for thread preemption.
// Reserve space for statics needing saving and restoring.
-int Bootstrapper::ArchiveSpacePerThread() {
- return sizeof(NestingCounterType);
-}
-
+int Bootstrapper::ArchiveSpacePerThread() { return sizeof(NestingCounterType); }
// Archive statics that are thread-local.
char* Bootstrapper::ArchiveState(char* to) {
@@ -5619,18 +5544,14 @@ char* Bootstrapper::ArchiveState(char* to) {
return to + sizeof(NestingCounterType);
}
-
// Restore statics that are thread-local.
char* Bootstrapper::RestoreState(char* from) {
nesting_ = *reinterpret_cast<NestingCounterType*>(from);
return from + sizeof(NestingCounterType);
}
-
// Called when the top-level V8 mutex is destroyed.
-void Bootstrapper::FreeThreadResources() {
- DCHECK(!IsActive());
-}
+void Bootstrapper::FreeThreadResources() { DCHECK(!IsActive()); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/init/bootstrapper.h
index 329bf57c50..35295c3e88 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/init/bootstrapper.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BOOTSTRAPPER_H_
-#define V8_BOOTSTRAPPER_H_
+#ifndef V8_INIT_BOOTSTRAPPER_H_
+#define V8_INIT_BOOTSTRAPPER_H_
#include "src/heap/factory.h"
#include "src/objects/fixed-array.h"
#include "src/objects/shared-function-info.h"
+#include "src/objects/visitors.h"
#include "src/snapshot/natives.h"
-#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -39,7 +39,6 @@ class SourceCodeCache final {
DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
};
-
// The Boostrapper is the public interface for creating a JavaScript global
// context.
class Bootstrapper final {
@@ -98,7 +97,7 @@ class Bootstrapper final {
void LogAllMaps();
Isolate* isolate_;
- typedef int NestingCounterType;
+ using NestingCounterType = int;
NestingCounterType nesting_;
SourceCodeCache extensions_cache_;
@@ -118,9 +117,7 @@ class BootstrapperActive final {
++bootstrapper_->nesting_;
}
- ~BootstrapperActive() {
- --bootstrapper_->nesting_;
- }
+ ~BootstrapperActive() { --bootstrapper_->nesting_; }
private:
Bootstrapper* bootstrapper_;
@@ -131,4 +128,4 @@ class BootstrapperActive final {
} // namespace internal
} // namespace v8
-#endif // V8_BOOTSTRAPPER_H_
+#endif // V8_INIT_BOOTSTRAPPER_H_
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index ccb3ac8e64..2293dc67d7 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -2,26 +2,39 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_SYMBOLS_H_
-#define V8_HEAP_SYMBOLS_H_
+#ifndef V8_INIT_HEAP_SYMBOLS_H_
+#define V8_INIT_HEAP_SYMBOLS_H_
#ifdef V8_INTL_SUPPORT
#define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
V(_, adoptText_string, "adoptText") \
V(_, baseName_string, "baseName") \
+ V(_, accounting_string, "accounting") \
V(_, breakType_string, "breakType") \
V(_, calendar_string, "calendar") \
V(_, cardinal_string, "cardinal") \
V(_, caseFirst_string, "caseFirst") \
V(_, compare_string, "compare") \
V(_, current_string, "current") \
+ V(_, collation_string, "collation") \
+ V(_, compact_string, "compact") \
+ V(_, compactDisplay_string, "compactDisplay") \
+ V(_, currency_string, "currency") \
+ V(_, currencyDisplay_string, "currencyDisplay") \
+ V(_, currencySign_string, "currencySign") \
V(_, dateStyle_string, "dateStyle") \
V(_, day_string, "day") \
V(_, dayPeriod_string, "dayPeriod") \
V(_, decimal_string, "decimal") \
+ V(_, endRange_string, "endRange") \
+ V(_, engineering_string, "engineering") \
V(_, era_string, "era") \
V(_, first_string, "first") \
V(_, format_string, "format") \
+ V(_, except_zero_string, "except-zero") \
+ V(_, exponentInteger_string, "exponentInteger") \
+ V(_, exponentMinusSign_string, "exponentMinusSign") \
+ V(_, exponentSeparator_string, "exponentSeparator") \
V(_, fraction_string, "fraction") \
V(_, full_string, "full") \
V(_, granularity_string, "granularity") \
@@ -34,9 +47,6 @@
V(_, hour_string, "hour") \
V(_, hour12_string, "hour12") \
V(_, hourCycle_string, "hourCycle") \
- V(_, collation_string, "collation") \
- V(_, currency_string, "currency") \
- V(_, currencyDisplay_string, "currencyDisplay") \
V(_, ideo_string, "ideo") \
V(_, ignorePunctuation_string, "ignorePunctuation") \
V(_, Invalid_Date_string, "Invalid Date") \
@@ -58,7 +68,10 @@
V(_, minute_string, "minute") \
V(_, month_string, "month") \
V(_, nan_string, "nan") \
+ V(_, narrow_symbol_string, "narrow-symbol") \
+ V(_, never_string, "never") \
V(_, none_string, "none") \
+ V(_, notation_string, "notation") \
V(_, normal_string, "normal") \
V(_, numberingSystem_string, "numberingSystem") \
V(_, numeric_string, "numeric") \
@@ -67,11 +80,16 @@
V(_, plusSign_string, "plusSign") \
V(_, quarter_string, "quarter") \
V(_, region_string, "region") \
+ V(_, scientific_string, "scientific") \
V(_, second_string, "second") \
V(_, segment_string, "segment") \
V(_, SegmentIterator_string, "Segment Iterator") \
V(_, sensitivity_string, "sensitivity") \
V(_, sep_string, "sep") \
+ V(_, shared_string, "shared") \
+ V(_, signDisplay_string, "signDisplay") \
+ V(_, standard_string, "standard") \
+ V(_, startRange_string, "startRange") \
V(_, strict_string, "strict") \
V(_, style_string, "style") \
V(_, term_string, "term") \
@@ -85,223 +103,217 @@
V(_, useGrouping_string, "useGrouping") \
V(_, UTC_string, "UTC") \
V(_, unit_string, "unit") \
+ V(_, unitDisplay_string, "unitDisplay") \
V(_, weekday_string, "weekday") \
V(_, year_string, "year")
#else // V8_INTL_SUPPORT
#define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _)
#endif // V8_INTL_SUPPORT
-#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
- INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
- V(_, add_string, "add") \
- V(_, always_string, "always") \
- V(_, anonymous_function_string, "(anonymous function)") \
- V(_, anonymous_string, "anonymous") \
- V(_, apply_string, "apply") \
- V(_, Arguments_string, "Arguments") \
- V(_, arguments_string, "arguments") \
- V(_, arguments_to_string, "[object Arguments]") \
- V(_, Array_string, "Array") \
- V(_, array_to_string, "[object Array]") \
- V(_, ArrayBuffer_string, "ArrayBuffer") \
- V(_, ArrayIterator_string, "Array Iterator") \
- V(_, as_string, "as") \
- V(_, assign_string, "assign") \
- V(_, async_string, "async") \
- V(_, auto_string, "auto") \
- V(_, await_string, "await") \
- V(_, BigInt_string, "BigInt") \
- V(_, bigint_string, "bigint") \
- V(_, BigInt64Array_string, "BigInt64Array") \
- V(_, BigUint64Array_string, "BigUint64Array") \
- V(_, bind_string, "bind") \
- V(_, Boolean_string, "Boolean") \
- V(_, boolean_string, "boolean") \
- V(_, boolean_to_string, "[object Boolean]") \
- V(_, bound__string, "bound ") \
- V(_, buffer_string, "buffer") \
- V(_, byte_length_string, "byteLength") \
- V(_, byte_offset_string, "byteOffset") \
- V(_, CompileError_string, "CompileError") \
- V(_, call_string, "call") \
- V(_, callee_string, "callee") \
- V(_, caller_string, "caller") \
- V(_, character_string, "character") \
- V(_, closure_string, "(closure)") \
- V(_, code_string, "code") \
- V(_, column_string, "column") \
- V(_, computed_string, "<computed>") \
- V(_, configurable_string, "configurable") \
- V(_, conjunction_string, "conjunction") \
- V(_, construct_string, "construct") \
- V(_, constructor_string, "constructor") \
- V(_, create_string, "create") \
- V(_, Date_string, "Date") \
- V(_, date_to_string, "[object Date]") \
- V(_, default_string, "default") \
- V(_, defineProperty_string, "defineProperty") \
- V(_, deleteProperty_string, "deleteProperty") \
- V(_, disjunction_string, "disjunction") \
- V(_, display_name_string, "displayName") \
- V(_, done_string, "done") \
- V(_, dot_catch_string, ".catch") \
- V(_, dot_default_string, ".default") \
- V(_, dot_for_string, ".for") \
- V(_, dot_generator_object_string, ".generator_object") \
- V(_, dot_iterator_string, ".iterator") \
- V(_, dot_promise_string, ".promise") \
- V(_, dot_result_string, ".result") \
- V(_, dot_string, ".") \
- V(_, dot_switch_tag_string, ".switch_tag") \
- V(_, dotAll_string, "dotAll") \
- V(_, entries_string, "entries") \
- V(_, enumerable_string, "enumerable") \
- V(_, element_string, "element") \
- V(_, Error_string, "Error") \
- V(_, error_to_string, "[object Error]") \
- V(_, eval_string, "eval") \
- V(_, EvalError_string, "EvalError") \
- V(_, exec_string, "exec") \
- V(_, false_string, "false") \
- V(_, flags_string, "flags") \
- V(_, Float32Array_string, "Float32Array") \
- V(_, Float64Array_string, "Float64Array") \
- V(_, from_string, "from") \
- V(_, Function_string, "Function") \
- V(_, function_native_code_string, "function () { [native code] }") \
- V(_, function_string, "function") \
- V(_, function_to_string, "[object Function]") \
- V(_, Generator_string, "Generator") \
- V(_, get_space_string, "get ") \
- V(_, get_string, "get") \
- V(_, getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
- V(_, getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
- V(_, getPrototypeOf_string, "getPrototypeOf") \
- V(_, global_string, "global") \
- V(_, globalThis_string, "globalThis") \
- V(_, groups_string, "groups") \
- V(_, has_string, "has") \
- V(_, ignoreCase_string, "ignoreCase") \
- V(_, illegal_access_string, "illegal access") \
- V(_, illegal_argument_string, "illegal argument") \
- V(_, index_string, "index") \
- V(_, Infinity_string, "Infinity") \
- V(_, infinity_string, "infinity") \
- V(_, input_string, "input") \
- V(_, Int16Array_string, "Int16Array") \
- V(_, Int32Array_string, "Int32Array") \
- V(_, Int8Array_string, "Int8Array") \
- V(_, isExtensible_string, "isExtensible") \
- V(_, isView_string, "isView") \
- V(_, keys_string, "keys") \
- V(_, lastIndex_string, "lastIndex") \
- V(_, length_string, "length") \
- V(_, let_string, "let") \
- V(_, line_string, "line") \
- V(_, LinkError_string, "LinkError") \
- V(_, long_string, "long") \
- V(_, Map_string, "Map") \
- V(_, MapIterator_string, "Map Iterator") \
- V(_, medium_string, "medium") \
- V(_, message_string, "message") \
- V(_, meta_string, "meta") \
- V(_, minus_Infinity_string, "-Infinity") \
- V(_, Module_string, "Module") \
- V(_, multiline_string, "multiline") \
- V(_, name_string, "name") \
- V(_, NaN_string, "NaN") \
- V(_, narrow_string, "narrow") \
- V(_, native_string, "native") \
- V(_, new_target_string, ".new.target") \
- V(_, next_string, "next") \
- V(_, NFC_string, "NFC") \
- V(_, NFD_string, "NFD") \
- V(_, NFKC_string, "NFKC") \
- V(_, NFKD_string, "NFKD") \
- V(_, not_equal, "not-equal") \
- V(_, null_string, "null") \
- V(_, null_to_string, "[object Null]") \
- V(_, Number_string, "Number") \
- V(_, number_string, "number") \
- V(_, number_to_string, "[object Number]") \
- V(_, Object_string, "Object") \
- V(_, object_string, "object") \
- V(_, object_to_string, "[object Object]") \
- V(_, of_string, "of") \
- V(_, ok, "ok") \
- V(_, one_string, "1") \
- V(_, ownKeys_string, "ownKeys") \
- V(_, percent_string, "percent") \
- V(_, position_string, "position") \
- V(_, preventExtensions_string, "preventExtensions") \
- V(_, private_constructor_string, "#constructor") \
- V(_, Promise_string, "Promise") \
- V(_, promise_string, "promise") \
- V(_, proto_string, "__proto__") \
- V(_, prototype_string, "prototype") \
- V(_, proxy_string, "proxy") \
- V(_, Proxy_string, "Proxy") \
- V(_, query_colon_string, "(?:)") \
- V(_, RangeError_string, "RangeError") \
- V(_, raw_string, "raw") \
- V(_, ReferenceError_string, "ReferenceError") \
- V(_, ReflectGet_string, "Reflect.get") \
- V(_, ReflectHas_string, "Reflect.has") \
- V(_, RegExp_string, "RegExp") \
- V(_, regexp_to_string, "[object RegExp]") \
- V(_, resolve_string, "resolve") \
- V(_, return_string, "return") \
- V(_, revoke_string, "revoke") \
- V(_, RuntimeError_string, "RuntimeError") \
- V(_, Script_string, "Script") \
- V(_, script_string, "script") \
- V(_, short_string, "short") \
- V(_, Set_string, "Set") \
- V(_, sentence_string, "sentence") \
- V(_, set_space_string, "set ") \
- V(_, set_string, "set") \
- V(_, SetIterator_string, "Set Iterator") \
- V(_, setPrototypeOf_string, "setPrototypeOf") \
- V(_, SharedArrayBuffer_string, "SharedArrayBuffer") \
- V(_, source_string, "source") \
- V(_, sourceText_string, "sourceText") \
- V(_, stack_string, "stack") \
- V(_, stackTraceLimit_string, "stackTraceLimit") \
- V(_, sticky_string, "sticky") \
- V(_, String_string, "String") \
- V(_, string_string, "string") \
- V(_, string_to_string, "[object String]") \
- V(_, symbol_species_string, "[Symbol.species]") \
- V(_, Symbol_string, "Symbol") \
- V(_, symbol_string, "symbol") \
- V(_, SyntaxError_string, "SyntaxError") \
- V(_, target_string, "target") \
- V(_, then_string, "then") \
- V(_, this_function_string, ".this_function") \
- V(_, this_string, "this") \
- V(_, throw_string, "throw") \
- V(_, timed_out, "timed-out") \
- V(_, toJSON_string, "toJSON") \
- V(_, toString_string, "toString") \
- V(_, true_string, "true") \
- V(_, TypeError_string, "TypeError") \
- V(_, Uint16Array_string, "Uint16Array") \
- V(_, Uint32Array_string, "Uint32Array") \
- V(_, Uint8Array_string, "Uint8Array") \
- V(_, Uint8ClampedArray_string, "Uint8ClampedArray") \
- V(_, undefined_string, "undefined") \
- V(_, undefined_to_string, "[object Undefined]") \
- V(_, unicode_string, "unicode") \
- V(_, URIError_string, "URIError") \
- V(_, value_string, "value") \
- V(_, valueOf_string, "valueOf") \
- V(_, values_string, "values") \
- V(_, WeakMap_string, "WeakMap") \
- V(_, WeakRef_string, "WeakRef") \
- V(_, WeakSet_string, "WeakSet") \
- V(_, week_string, "week") \
- V(_, word_string, "word") \
- V(_, writable_string, "writable") \
+#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+ INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
+ V(_, add_string, "add") \
+ V(_, always_string, "always") \
+ V(_, anonymous_function_string, "(anonymous function)") \
+ V(_, anonymous_string, "anonymous") \
+ V(_, apply_string, "apply") \
+ V(_, Arguments_string, "Arguments") \
+ V(_, arguments_string, "arguments") \
+ V(_, arguments_to_string, "[object Arguments]") \
+ V(_, Array_string, "Array") \
+ V(_, array_to_string, "[object Array]") \
+ V(_, ArrayBuffer_string, "ArrayBuffer") \
+ V(_, ArrayIterator_string, "Array Iterator") \
+ V(_, as_string, "as") \
+ V(_, async_string, "async") \
+ V(_, auto_string, "auto") \
+ V(_, await_string, "await") \
+ V(_, BigInt_string, "BigInt") \
+ V(_, bigint_string, "bigint") \
+ V(_, BigInt64Array_string, "BigInt64Array") \
+ V(_, BigUint64Array_string, "BigUint64Array") \
+ V(_, bind_string, "bind") \
+ V(_, Boolean_string, "Boolean") \
+ V(_, boolean_string, "boolean") \
+ V(_, boolean_to_string, "[object Boolean]") \
+ V(_, bound__string, "bound ") \
+ V(_, buffer_string, "buffer") \
+ V(_, byte_length_string, "byteLength") \
+ V(_, byte_offset_string, "byteOffset") \
+ V(_, CompileError_string, "CompileError") \
+ V(_, callee_string, "callee") \
+ V(_, caller_string, "caller") \
+ V(_, character_string, "character") \
+ V(_, closure_string, "(closure)") \
+ V(_, code_string, "code") \
+ V(_, column_string, "column") \
+ V(_, computed_string, "<computed>") \
+ V(_, configurable_string, "configurable") \
+ V(_, conjunction_string, "conjunction") \
+ V(_, construct_string, "construct") \
+ V(_, constructor_string, "constructor") \
+ V(_, Date_string, "Date") \
+ V(_, date_to_string, "[object Date]") \
+ V(_, default_string, "default") \
+ V(_, defineProperty_string, "defineProperty") \
+ V(_, deleteProperty_string, "deleteProperty") \
+ V(_, disjunction_string, "disjunction") \
+ V(_, display_name_string, "displayName") \
+ V(_, done_string, "done") \
+ V(_, dot_brand_string, ".brand") \
+ V(_, dot_catch_string, ".catch") \
+ V(_, dot_default_string, ".default") \
+ V(_, dot_for_string, ".for") \
+ V(_, dot_generator_object_string, ".generator_object") \
+ V(_, dot_iterator_string, ".iterator") \
+ V(_, dot_promise_string, ".promise") \
+ V(_, dot_result_string, ".result") \
+ V(_, dot_string, ".") \
+ V(_, dot_switch_tag_string, ".switch_tag") \
+ V(_, dotAll_string, "dotAll") \
+ V(_, enumerable_string, "enumerable") \
+ V(_, element_string, "element") \
+ V(_, Error_string, "Error") \
+ V(_, error_to_string, "[object Error]") \
+ V(_, eval_string, "eval") \
+ V(_, EvalError_string, "EvalError") \
+ V(_, exec_string, "exec") \
+ V(_, false_string, "false") \
+ V(_, flags_string, "flags") \
+ V(_, Float32Array_string, "Float32Array") \
+ V(_, Float64Array_string, "Float64Array") \
+ V(_, from_string, "from") \
+ V(_, Function_string, "Function") \
+ V(_, function_native_code_string, "function () { [native code] }") \
+ V(_, function_string, "function") \
+ V(_, function_to_string, "[object Function]") \
+ V(_, Generator_string, "Generator") \
+ V(_, get_space_string, "get ") \
+ V(_, get_string, "get") \
+ V(_, getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(_, getPrototypeOf_string, "getPrototypeOf") \
+ V(_, global_string, "global") \
+ V(_, globalThis_string, "globalThis") \
+ V(_, groups_string, "groups") \
+ V(_, has_string, "has") \
+ V(_, ignoreCase_string, "ignoreCase") \
+ V(_, illegal_access_string, "illegal access") \
+ V(_, illegal_argument_string, "illegal argument") \
+ V(_, index_string, "index") \
+ V(_, Infinity_string, "Infinity") \
+ V(_, infinity_string, "infinity") \
+ V(_, input_string, "input") \
+ V(_, Int16Array_string, "Int16Array") \
+ V(_, Int32Array_string, "Int32Array") \
+ V(_, Int8Array_string, "Int8Array") \
+ V(_, isExtensible_string, "isExtensible") \
+ V(_, keys_string, "keys") \
+ V(_, lastIndex_string, "lastIndex") \
+ V(_, length_string, "length") \
+ V(_, let_string, "let") \
+ V(_, line_string, "line") \
+ V(_, LinkError_string, "LinkError") \
+ V(_, long_string, "long") \
+ V(_, Map_string, "Map") \
+ V(_, MapIterator_string, "Map Iterator") \
+ V(_, medium_string, "medium") \
+ V(_, message_string, "message") \
+ V(_, meta_string, "meta") \
+ V(_, minus_Infinity_string, "-Infinity") \
+ V(_, Module_string, "Module") \
+ V(_, multiline_string, "multiline") \
+ V(_, name_string, "name") \
+ V(_, NaN_string, "NaN") \
+ V(_, narrow_string, "narrow") \
+ V(_, native_string, "native") \
+ V(_, new_target_string, ".new.target") \
+ V(_, next_string, "next") \
+ V(_, NFC_string, "NFC") \
+ V(_, NFD_string, "NFD") \
+ V(_, NFKC_string, "NFKC") \
+ V(_, NFKD_string, "NFKD") \
+ V(_, not_equal, "not-equal") \
+ V(_, null_string, "null") \
+ V(_, null_to_string, "[object Null]") \
+ V(_, Number_string, "Number") \
+ V(_, number_string, "number") \
+ V(_, number_to_string, "[object Number]") \
+ V(_, Object_string, "Object") \
+ V(_, object_string, "object") \
+ V(_, object_to_string, "[object Object]") \
+ V(_, of_string, "of") \
+ V(_, ok, "ok") \
+ V(_, one_string, "1") \
+ V(_, ownKeys_string, "ownKeys") \
+ V(_, percent_string, "percent") \
+ V(_, position_string, "position") \
+ V(_, preventExtensions_string, "preventExtensions") \
+ V(_, private_constructor_string, "#constructor") \
+ V(_, Promise_string, "Promise") \
+ V(_, proto_string, "__proto__") \
+ V(_, prototype_string, "prototype") \
+ V(_, proxy_string, "proxy") \
+ V(_, Proxy_string, "Proxy") \
+ V(_, query_colon_string, "(?:)") \
+ V(_, RangeError_string, "RangeError") \
+ V(_, raw_string, "raw") \
+ V(_, ReferenceError_string, "ReferenceError") \
+ V(_, ReflectGet_string, "Reflect.get") \
+ V(_, ReflectHas_string, "Reflect.has") \
+ V(_, RegExp_string, "RegExp") \
+ V(_, regexp_to_string, "[object RegExp]") \
+ V(_, resolve_string, "resolve") \
+ V(_, return_string, "return") \
+ V(_, revoke_string, "revoke") \
+ V(_, RuntimeError_string, "RuntimeError") \
+ V(_, Script_string, "Script") \
+ V(_, script_string, "script") \
+ V(_, short_string, "short") \
+ V(_, Set_string, "Set") \
+ V(_, sentence_string, "sentence") \
+ V(_, set_space_string, "set ") \
+ V(_, set_string, "set") \
+ V(_, SetIterator_string, "Set Iterator") \
+ V(_, setPrototypeOf_string, "setPrototypeOf") \
+ V(_, SharedArrayBuffer_string, "SharedArrayBuffer") \
+ V(_, source_string, "source") \
+ V(_, sourceText_string, "sourceText") \
+ V(_, stack_string, "stack") \
+ V(_, stackTraceLimit_string, "stackTraceLimit") \
+ V(_, sticky_string, "sticky") \
+ V(_, String_string, "String") \
+ V(_, string_string, "string") \
+ V(_, string_to_string, "[object String]") \
+ V(_, symbol_species_string, "[Symbol.species]") \
+ V(_, Symbol_string, "Symbol") \
+ V(_, symbol_string, "symbol") \
+ V(_, SyntaxError_string, "SyntaxError") \
+ V(_, target_string, "target") \
+ V(_, then_string, "then") \
+ V(_, this_function_string, ".this_function") \
+ V(_, this_string, "this") \
+ V(_, throw_string, "throw") \
+ V(_, timed_out, "timed-out") \
+ V(_, toJSON_string, "toJSON") \
+ V(_, toString_string, "toString") \
+ V(_, true_string, "true") \
+ V(_, TypeError_string, "TypeError") \
+ V(_, Uint16Array_string, "Uint16Array") \
+ V(_, Uint32Array_string, "Uint32Array") \
+ V(_, Uint8Array_string, "Uint8Array") \
+ V(_, Uint8ClampedArray_string, "Uint8ClampedArray") \
+ V(_, undefined_string, "undefined") \
+ V(_, undefined_to_string, "[object Undefined]") \
+ V(_, unicode_string, "unicode") \
+ V(_, URIError_string, "URIError") \
+ V(_, value_string, "value") \
+ V(_, valueOf_string, "valueOf") \
+ V(_, WeakMap_string, "WeakMap") \
+ V(_, WeakRef_string, "WeakRef") \
+ V(_, WeakSet_string, "WeakSet") \
+ V(_, week_string, "week") \
+ V(_, word_string, "word") \
+ V(_, writable_string, "writable") \
V(_, zero_string, "0")
#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
@@ -472,4 +484,4 @@
F(MINOR_MC_BACKGROUND_MARKING) \
F(SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL)
-#endif // V8_HEAP_SYMBOLS_H_
+#endif // V8_INIT_HEAP_SYMBOLS_H_
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/init/icu_util.cc
index 0a9168af66..81c66e6a20 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/init/icu_util.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/icu_util.h"
+#include "src/init/icu_util.h"
#if defined(_WIN32)
#include "src/base/win32-headers.h"
@@ -18,7 +18,7 @@
#include "src/base/build_config.h"
#include "src/base/file-utils.h"
-#define ICU_UTIL_DATA_FILE 0
+#define ICU_UTIL_DATA_FILE 0
#define ICU_UTIL_DATA_STATIC 1
#endif
@@ -31,9 +31,7 @@ namespace internal {
namespace {
char* g_icu_data_ptr = nullptr;
-void free_icu_data_ptr() {
- delete[] g_icu_data_ptr;
-}
+void free_icu_data_ptr() { delete[] g_icu_data_ptr; }
} // namespace
#endif
diff --git a/deps/v8/src/icu_util.h b/deps/v8/src/init/icu_util.h
index af7f994231..e127e75f10 100644
--- a/deps/v8/src/icu_util.h
+++ b/deps/v8/src/init/icu_util.h
@@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#ifndef V8_ICU_UTIL_H_
-#define V8_ICU_UTIL_H_
+#ifndef V8_INIT_ICU_UTIL_H_
+#define V8_INIT_ICU_UTIL_H_
namespace v8 {
@@ -22,4 +21,4 @@ bool InitializeICUDefaultLocation(const char* exec_path,
} // namespace internal
} // namespace v8
-#endif // V8_ICU_UTIL_H_
+#endif // V8_INIT_ICU_UTIL_H_
diff --git a/deps/v8/src/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index 2fc87a9141..98d5715411 100644
--- a/deps/v8/src/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/isolate-allocator.h"
+#include "src/init/isolate-allocator.h"
#include "src/base/bounded-page-allocator.h"
-#include "src/isolate.h"
-#include "src/ptr-compr.h"
-#include "src/utils.h"
+#include "src/common/ptr-compr.h"
+#include "src/execution/isolate.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/isolate-allocator.h b/deps/v8/src/init/isolate-allocator.h
index c525d8b82c..cd0e102d40 100644
--- a/deps/v8/src/isolate-allocator.h
+++ b/deps/v8/src/init/isolate-allocator.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ISOLATE_ALLOCATOR_H_
-#define V8_ISOLATE_ALLOCATOR_H_
+#ifndef V8_INIT_ISOLATE_ALLOCATOR_H_
+#define V8_INIT_ISOLATE_ALLOCATOR_H_
-#include "src/allocation.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/page-allocator.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/utils/allocation.h"
namespace v8 {
@@ -60,4 +60,4 @@ class V8_EXPORT_PRIVATE IsolateAllocator final {
} // namespace internal
} // namespace v8
-#endif // V8_ISOLATE_ALLOCATOR_H_
+#endif // V8_INIT_ISOLATE_ALLOCATOR_H_
diff --git a/deps/v8/src/setup-isolate-deserialize.cc b/deps/v8/src/init/setup-isolate-deserialize.cc
index d68fee95f6..ff0268d3c8 100644
--- a/deps/v8/src/setup-isolate-deserialize.cc
+++ b/deps/v8/src/init/setup-isolate-deserialize.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/setup-isolate.h"
+#include "src/init/setup-isolate.h"
#include "src/base/logging.h"
+#include "src/execution/isolate.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
-#include "src/ostreams.h"
-#include "src/objects-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/setup-isolate-full.cc b/deps/v8/src/init/setup-isolate-full.cc
index 494322ef06..44b9f47efe 100644
--- a/deps/v8/src/setup-isolate-full.cc
+++ b/deps/v8/src/init/setup-isolate-full.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/setup-isolate.h"
+#include "src/init/setup-isolate.h"
#include "src/base/logging.h"
#include "src/debug/debug-evaluate.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/setup-isolate.h b/deps/v8/src/init/setup-isolate.h
index 70a1a5aaea..1ae9b8d236 100644
--- a/deps/v8/src/setup-isolate.h
+++ b/deps/v8/src/init/setup-isolate.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SETUP_ISOLATE_H_
-#define V8_SETUP_ISOLATE_H_
+#ifndef V8_INIT_SETUP_ISOLATE_H_
+#define V8_INIT_SETUP_ISOLATE_H_
#include "src/base/macros.h"
@@ -56,4 +56,4 @@ class V8_EXPORT_PRIVATE SetupIsolateDelegate {
} // namespace internal
} // namespace v8
-#endif // V8_SETUP_ISOLATE_H_
+#endif // V8_INIT_SETUP_ISOLATE_H_
diff --git a/deps/v8/src/startup-data-util.cc b/deps/v8/src/init/startup-data-util.cc
index e9b6f6ef53..54d697c591 100644
--- a/deps/v8/src/startup-data-util.cc
+++ b/deps/v8/src/init/startup-data-util.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/startup-data-util.h"
+#include "src/init/startup-data-util.h"
#include <stdlib.h>
#include <string.h>
@@ -10,9 +10,8 @@
#include "src/base/file-utils.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
-#include "src/flags.h"
-#include "src/utils.h"
-
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -24,25 +23,21 @@ namespace {
v8::StartupData g_natives;
v8::StartupData g_snapshot;
-
void ClearStartupData(v8::StartupData* data) {
data->data = nullptr;
data->raw_size = 0;
}
-
void DeleteStartupData(v8::StartupData* data) {
delete[] data->data;
ClearStartupData(data);
}
-
void FreeStartupData() {
DeleteStartupData(&g_natives);
DeleteStartupData(&g_snapshot);
}
-
void Load(const char* blob_file, v8::StartupData* startup_data,
void (*setter_fn)(v8::StartupData*)) {
ClearStartupData(startup_data);
@@ -71,7 +66,6 @@ void Load(const char* blob_file, v8::StartupData* startup_data,
}
}
-
void LoadFromFiles(const char* natives_blob, const char* snapshot_blob) {
Load(natives_blob, &g_natives, v8::V8::SetNativesDataBlob);
Load(snapshot_blob, &g_snapshot, v8::V8::SetSnapshotDataBlob);
@@ -82,7 +76,6 @@ void LoadFromFiles(const char* natives_blob, const char* snapshot_blob) {
} // namespace
#endif // V8_USE_EXTERNAL_STARTUP_DATA
-
void InitializeExternalStartupData(const char* directory_path) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
char* natives;
@@ -101,7 +94,6 @@ void InitializeExternalStartupData(const char* directory_path) {
#endif // V8_USE_EXTERNAL_STARTUP_DATA
}
-
void InitializeExternalStartupData(const char* natives_blob,
const char* snapshot_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/deps/v8/src/startup-data-util.h b/deps/v8/src/init/startup-data-util.h
index 7cb51e362a..dfa26510ab 100644
--- a/deps/v8/src/startup-data-util.h
+++ b/deps/v8/src/init/startup-data-util.h
@@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-#ifndef V8_STARTUP_DATA_UTIL_H_
-#define V8_STARTUP_DATA_UTIL_H_
+#ifndef V8_INIT_STARTUP_DATA_UTIL_H_
+#define V8_INIT_STARTUP_DATA_UTIL_H_
#include "include/v8.h"
@@ -26,4 +25,4 @@ void InitializeExternalStartupData(const char* natives_blob,
} // namespace internal
} // namespace v8
-#endif // V8_STARTUP_DATA_UTIL_H_
+#endif // V8_INIT_STARTUP_DATA_UTIL_H_
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/init/v8.cc
index 318e846d61..19ad57038f 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -2,27 +2,27 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include <fstream>
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/base/atomicops.h"
#include "src/base/once.h"
#include "src/base/platform/platform.h"
-#include "src/bootstrapper.h"
-#include "src/cpu-features.h"
+#include "src/codegen/cpu-features.h"
+#include "src/codegen/interface-descriptors.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/elements.h"
-#include "src/frames.h"
-#include "src/interface-descriptors.h"
-#include "src/isolate.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/simulator.h"
+#include "src/init/bootstrapper.h"
#include "src/libsampler/sampler.h"
-#include "src/objects-inl.h"
+#include "src/objects/elements.h"
+#include "src/objects/objects-inl.h"
#include "src/profiler/heap-profiler.h"
-#include "src/runtime-profiler.h"
-#include "src/simulator.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/tracing-category-observer.h"
@@ -45,7 +45,6 @@ bool V8::Initialize() {
return true;
}
-
void V8::TearDown() {
wasm::WasmEngine::GlobalTearDown();
#if defined(USE_SIMULATOR)
@@ -57,7 +56,6 @@ void V8::TearDown() {
FlagList::ResetAllFlags(); // Frees memory held by string arguments.
}
-
void V8::InitializeOncePerProcessImpl() {
FlagList::EnforceFlagImplications();
@@ -88,7 +86,7 @@ void V8::InitializeOncePerProcessImpl() {
// continue exposing wasm on correctness fuzzers even in jitless mode.
// TODO(jgruber): Remove this once / if wasm can run without executable
// memory.
- if (FLAG_jitless && !FLAG_abort_on_stack_or_string_length_overflow) {
+ if (FLAG_jitless && !FLAG_correctness_fuzzer_suppressions) {
FLAG_expose_wasm = false;
}
@@ -108,12 +106,10 @@ void V8::InitializeOncePerProcessImpl() {
wasm::WasmEngine::InitializeOncePerProcess();
}
-
void V8::InitializeOncePerProcess() {
base::CallOnce(&init_once, &InitializeOncePerProcessImpl);
}
-
void V8::InitializePlatform(v8::Platform* platform) {
CHECK(!platform_);
CHECK(platform);
@@ -122,7 +118,6 @@ void V8::InitializePlatform(v8::Platform* platform) {
v8::tracing::TracingCategoryObserver::SetUp();
}
-
void V8::ShutdownPlatform() {
CHECK(platform_);
v8::tracing::TracingCategoryObserver::TearDown();
@@ -130,7 +125,6 @@ void V8::ShutdownPlatform() {
platform_ = nullptr;
}
-
v8::Platform* V8::GetCurrentPlatform() {
v8::Platform* platform = reinterpret_cast<v8::Platform*>(
base::Relaxed_Load(reinterpret_cast<base::AtomicWord*>(&platform_)));
@@ -151,7 +145,6 @@ void V8::SetNativesBlob(StartupData* natives_blob) {
#endif
}
-
void V8::SetSnapshotBlob(StartupData* snapshot_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
base::CallOnce(&init_snapshot_once, &SetSnapshotFromFile, snapshot_blob);
diff --git a/deps/v8/src/v8.h b/deps/v8/src/init/v8.h
index 76d2fda658..bd8331a907 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/init/v8.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_V8_H_
-#define V8_V8_H_
+#ifndef V8_INIT_V8_H_
+#define V8_INIT_V8_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
@@ -50,4 +50,4 @@ class V8 : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_V8_H_
+#endif // V8_INIT_V8_H_
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index b3e328a69d..863940ef4b 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -80,9 +80,22 @@ v8_header_set("inspector_test_headers") {
]
}
+v8_source_set("inspector_string_conversions") {
+ sources = [
+ "v8-string-conversions.cc",
+ "v8-string-conversions.h",
+ ]
+ configs = [ "../..:internal_config_base" ]
+ deps = [
+ "../..:v8_libbase",
+ ]
+}
+
v8_source_set("inspector") {
deps = [
+ ":inspector_string_conversions",
"../..:v8_version",
+ "../../third_party/inspector_protocol:encoding",
]
public_deps = [
@@ -130,6 +143,8 @@ v8_source_set("inspector") {
"v8-heap-profiler-agent-impl.h",
"v8-inspector-impl.cc",
"v8-inspector-impl.h",
+ "v8-inspector-protocol-encoding.cc",
+ "v8-inspector-protocol-encoding.h",
"v8-inspector-session-impl.cc",
"v8-inspector-session-impl.h",
"v8-profiler-agent-impl.cc",
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 330371a82c..5122d5d997 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -9,10 +9,15 @@ include_rules = [
"+src/base/platform/mutex.h",
"+src/base/safe_conversions.h",
"+src/base/v8-fallthrough.h",
- "+src/conversions.h",
- "+src/v8memory.h",
+ "+src/common/v8memory.h",
+ "+src/numbers/conversions.h",
"+src/inspector",
"+src/tracing",
"+src/debug/debug-interface.h",
"+src/debug/interface-types.h",
+ "+src/utils/vector.h",
+ "+third_party/inspector_protocol/encoding/encoding.h",
+ "+third_party/inspector_protocol/encoding/encoding.cc",
+ "+../../third_party/inspector_protocol/encoding/encoding.h",
+ "+../../third_party/inspector_protocol/encoding/encoding.cc",
]
diff --git a/deps/v8/src/inspector/OWNERS b/deps/v8/src/inspector/OWNERS
index a42adce782..55f8ac7875 100644
--- a/deps/v8/src/inspector/OWNERS
+++ b/deps/v8/src/inspector/OWNERS
@@ -13,7 +13,6 @@ per-file js_protocol.pdl=set noparent
per-file js_protocol.pdl=dgozman@chromium.org
per-file js_protocol.pdl=pfeldman@chromium.org
-per-file PRESUBMIT.py=machenbach@chromium.org
-per-file PRESUBMIT.py=sergiyb@chromium.org
+per-file PRESUBMIT.py=file://INFRA_OWNERS
# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/inspector/js_protocol.pdl b/deps/v8/src/inspector/js_protocol.pdl
index e4715f47ef..c4ff51b060 100644
--- a/deps/v8/src/inspector/js_protocol.pdl
+++ b/deps/v8/src/inspector/js_protocol.pdl
@@ -317,6 +317,17 @@ domain Debugger
# Location this breakpoint resolved into.
Location actualLocation
+ # Sets instrumentation breakpoint.
+ command setInstrumentationBreakpoint
+ parameters
+ # Instrumentation name.
+ enum instrumentation
+ beforeScriptExecution
+ beforeScriptWithSourceMapExecution
+ returns
+ # Id of the created breakpoint for further reference.
+ BreakpointId breakpointId
+
# Sets JavaScript breakpoint at given location specified either by URL or URL regex. Once this
# command is issued, all existing parsed scripts will have breakpoints resolved and returned in
# `locations` property. Further matching script parsing will result in subsequent
@@ -449,16 +460,17 @@ domain Debugger
array of CallFrame callFrames
# Pause reason.
enum reason
- XHR
+ ambiguous
+ assert
+ debugCommand
DOM
EventListener
exception
- assert
- debugCommand
- promiseRejection
+ instrumentation
OOM
other
- ambiguous
+ promiseRejection
+ XHR
# Object containing break-specific auxiliary properties.
optional object data
# Hit breakpoints IDs
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 303987dede..acf0159f27 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -6,14 +6,15 @@
#include <algorithm>
#include <cctype>
+#include <cinttypes>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <string>
#include "src/base/platform/platform.h"
-#include "src/base/v8-fallthrough.h"
-#include "src/conversions.h"
+#include "src/inspector/v8-string-conversions.h"
+#include "src/numbers/conversions.h"
namespace v8_inspector {
@@ -44,331 +45,6 @@ int64_t charactersToInteger(const UChar* characters, size_t length,
if (ok) *ok = !(*endptr);
return result;
}
-
-const UChar replacementCharacter = 0xFFFD;
-using UChar32 = uint32_t;
-
-inline int inlineUTF8SequenceLengthNonASCII(char b0) {
- if ((b0 & 0xC0) != 0xC0) return 0;
- if ((b0 & 0xE0) == 0xC0) return 2;
- if ((b0 & 0xF0) == 0xE0) return 3;
- if ((b0 & 0xF8) == 0xF0) return 4;
- return 0;
-}
-
-inline int inlineUTF8SequenceLength(char b0) {
- return isASCII(b0) ? 1 : inlineUTF8SequenceLengthNonASCII(b0);
-}
-
-// Once the bits are split out into bytes of UTF-8, this is a mask OR-ed
-// into the first byte, depending on how many bytes follow. There are
-// as many entries in this table as there are UTF-8 sequence types.
-// (I.e., one byte sequence, two byte... etc.). Remember that sequences
-// for *legal* UTF-8 will be 4 or fewer bytes total.
-static const unsigned char firstByteMark[7] = {0x00, 0x00, 0xC0, 0xE0,
- 0xF0, 0xF8, 0xFC};
-
-typedef enum {
- conversionOK, // conversion successful
- sourceExhausted, // partial character in source, but hit end
- targetExhausted, // insuff. room in target for conversion
- sourceIllegal // source sequence is illegal/malformed
-} ConversionResult;
-
-ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
- const UChar* sourceEnd, char** targetStart,
- char* targetEnd, bool strict) {
- ConversionResult result = conversionOK;
- const UChar* source = *sourceStart;
- char* target = *targetStart;
- while (source < sourceEnd) {
- UChar32 ch;
- uint32_t bytesToWrite = 0;
- const UChar32 byteMask = 0xBF;
- const UChar32 byteMark = 0x80;
- const UChar* oldSource =
- source; // In case we have to back up because of target overflow.
- ch = static_cast<uint16_t>(*source++);
- // If we have a surrogate pair, convert to UChar32 first.
- if (ch >= 0xD800 && ch <= 0xDBFF) {
- // If the 16 bits following the high surrogate are in the source buffer...
- if (source < sourceEnd) {
- UChar32 ch2 = static_cast<uint16_t>(*source);
- // If it's a low surrogate, convert to UChar32.
- if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {
- ch = ((ch - 0xD800) << 10) + (ch2 - 0xDC00) + 0x0010000;
- ++source;
- } else if (strict) { // it's an unpaired high surrogate
- --source; // return to the illegal value itself
- result = sourceIllegal;
- break;
- }
- } else { // We don't have the 16 bits following the high surrogate.
- --source; // return to the high surrogate
- result = sourceExhausted;
- break;
- }
- } else if (strict) {
- // UTF-16 surrogate values are illegal in UTF-32
- if (ch >= 0xDC00 && ch <= 0xDFFF) {
- --source; // return to the illegal value itself
- result = sourceIllegal;
- break;
- }
- }
- // Figure out how many bytes the result will require
- if (ch < static_cast<UChar32>(0x80)) {
- bytesToWrite = 1;
- } else if (ch < static_cast<UChar32>(0x800)) {
- bytesToWrite = 2;
- } else if (ch < static_cast<UChar32>(0x10000)) {
- bytesToWrite = 3;
- } else if (ch < static_cast<UChar32>(0x110000)) {
- bytesToWrite = 4;
- } else {
- bytesToWrite = 3;
- ch = replacementCharacter;
- }
-
- target += bytesToWrite;
- if (target > targetEnd) {
- source = oldSource; // Back up source pointer!
- target -= bytesToWrite;
- result = targetExhausted;
- break;
- }
- switch (bytesToWrite) {
- case 4:
- *--target = static_cast<char>((ch | byteMark) & byteMask);
- ch >>= 6;
- V8_FALLTHROUGH;
- case 3:
- *--target = static_cast<char>((ch | byteMark) & byteMask);
- ch >>= 6;
- V8_FALLTHROUGH;
- case 2:
- *--target = static_cast<char>((ch | byteMark) & byteMask);
- ch >>= 6;
- V8_FALLTHROUGH;
- case 1:
- *--target = static_cast<char>(ch | firstByteMark[bytesToWrite]);
- }
- target += bytesToWrite;
- }
- *sourceStart = source;
- *targetStart = target;
- return result;
-}
-
-/**
- * Is this code point a BMP code point (U+0000..U+ffff)?
- * @param c 32-bit code point
- * @return TRUE or FALSE
- * @stable ICU 2.8
- */
-#define U_IS_BMP(c) ((uint32_t)(c) <= 0xFFFF)
-
-/**
- * Is this code point a supplementary code point (U+010000..U+10FFFF)?
- * @param c 32-bit code point
- * @return TRUE or FALSE
- * @stable ICU 2.8
- */
-#define U_IS_SUPPLEMENTARY(c) ((uint32_t)((c)-0x010000) <= 0xFFFFF)
-
-/**
- * Is this code point a surrogate (U+d800..U+dfff)?
- * @param c 32-bit code point
- * @return TRUE or FALSE
- * @stable ICU 2.4
- */
-#define U_IS_SURROGATE(c) (((c)&0xFFFFF800) == 0xD800)
-
-/**
- * Get the lead surrogate (0xD800..0xDBFF) for a
- * supplementary code point (0x010000..0x10FFFF).
- * @param supplementary 32-bit code point (U+010000..U+10FFFF)
- * @return lead surrogate (U+D800..U+DBFF) for supplementary
- * @stable ICU 2.4
- */
-#define U16_LEAD(supplementary) (UChar)(((supplementary) >> 10) + 0xD7C0)
-
-/**
- * Get the trail surrogate (0xDC00..0xDFFF) for a
- * supplementary code point (0x010000..0x10FFFF).
- * @param supplementary 32-bit code point (U+010000..U+10FFFF)
- * @return trail surrogate (U+DC00..U+DFFF) for supplementary
- * @stable ICU 2.4
- */
-#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3FF) | 0xDC00)
-
-// This must be called with the length pre-determined by the first byte.
-// If presented with a length > 4, this returns false. The Unicode
-// definition of UTF-8 goes up to 4-byte sequences.
-static bool isLegalUTF8(const unsigned char* source, int length) {
- unsigned char a;
- const unsigned char* srcptr = source + length;
- switch (length) {
- default:
- return false;
- // Everything else falls through when "true"...
- case 4:
- if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
- V8_FALLTHROUGH;
- case 3:
- if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
- V8_FALLTHROUGH;
- case 2:
- if ((a = (*--srcptr)) > 0xBF) return false;
-
- // no fall-through in this inner switch
- switch (*source) {
- case 0xE0:
- if (a < 0xA0) return false;
- break;
- case 0xED:
- if (a > 0x9F) return false;
- break;
- case 0xF0:
- if (a < 0x90) return false;
- break;
- case 0xF4:
- if (a > 0x8F) return false;
- break;
- default:
- if (a < 0x80) return false;
- }
- V8_FALLTHROUGH;
-
- case 1:
- if (*source >= 0x80 && *source < 0xC2) return false;
- }
- if (*source > 0xF4) return false;
- return true;
-}
-
-// Magic values subtracted from a buffer value during UTF8 conversion.
-// This table contains as many values as there might be trailing bytes
-// in a UTF-8 sequence.
-static const UChar32 offsetsFromUTF8[6] = {0x00000000UL,
- 0x00003080UL,
- 0x000E2080UL,
- 0x03C82080UL,
- static_cast<UChar32>(0xFA082080UL),
- static_cast<UChar32>(0x82082080UL)};
-
-static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
- UChar32 character = 0;
-
- // The cases all fall through.
- switch (length) {
- case 6:
- character += static_cast<unsigned char>(*sequence++);
- character <<= 6;
- V8_FALLTHROUGH;
- case 5:
- character += static_cast<unsigned char>(*sequence++);
- character <<= 6;
- V8_FALLTHROUGH;
- case 4:
- character += static_cast<unsigned char>(*sequence++);
- character <<= 6;
- V8_FALLTHROUGH;
- case 3:
- character += static_cast<unsigned char>(*sequence++);
- character <<= 6;
- V8_FALLTHROUGH;
- case 2:
- character += static_cast<unsigned char>(*sequence++);
- character <<= 6;
- V8_FALLTHROUGH;
- case 1:
- character += static_cast<unsigned char>(*sequence++);
- }
-
- return character - offsetsFromUTF8[length - 1];
-}
-
-ConversionResult convertUTF8ToUTF16(const char** sourceStart,
- const char* sourceEnd, UChar** targetStart,
- UChar* targetEnd, bool* sourceAllASCII,
- bool strict) {
- ConversionResult result = conversionOK;
- const char* source = *sourceStart;
- UChar* target = *targetStart;
- UChar orAllData = 0;
- while (source < sourceEnd) {
- int utf8SequenceLength = inlineUTF8SequenceLength(*source);
- if (sourceEnd - source < utf8SequenceLength) {
- result = sourceExhausted;
- break;
- }
- // Do this check whether lenient or strict
- if (!isLegalUTF8(reinterpret_cast<const unsigned char*>(source),
- utf8SequenceLength)) {
- result = sourceIllegal;
- break;
- }
-
- UChar32 character = readUTF8Sequence(source, utf8SequenceLength);
-
- if (target >= targetEnd) {
- source -= utf8SequenceLength; // Back up source pointer!
- result = targetExhausted;
- break;
- }
-
- if (U_IS_BMP(character)) {
- // UTF-16 surrogate values are illegal in UTF-32
- if (U_IS_SURROGATE(character)) {
- if (strict) {
- source -= utf8SequenceLength; // return to the illegal value itself
- result = sourceIllegal;
- break;
- }
- *target++ = replacementCharacter;
- orAllData |= replacementCharacter;
- } else {
- *target++ = static_cast<UChar>(character); // normal case
- orAllData |= character;
- }
- } else if (U_IS_SUPPLEMENTARY(character)) {
- // target is a character in range 0xFFFF - 0x10FFFF
- if (target + 1 >= targetEnd) {
- source -= utf8SequenceLength; // Back up source pointer!
- result = targetExhausted;
- break;
- }
- *target++ = U16_LEAD(character);
- *target++ = U16_TRAIL(character);
- orAllData = 0xFFFF;
- } else {
- if (strict) {
- source -= utf8SequenceLength; // return to the start
- result = sourceIllegal;
- break; // Bail out; shouldn't continue
- } else {
- *target++ = replacementCharacter;
- orAllData |= replacementCharacter;
- }
- }
- }
- *sourceStart = source;
- *targetStart = target;
-
- if (sourceAllASCII) *sourceAllASCII = !(orAllData & ~0x7F);
-
- return result;
-}
-
-// Helper to write a three-byte UTF-8 code point to the buffer, caller must
-// check room is available.
-static inline void putUTF8Triple(char*& buffer, UChar ch) {
- *buffer++ = static_cast<char>(((ch >> 12) & 0x0F) | 0xE0);
- *buffer++ = static_cast<char>(((ch >> 6) & 0x3F) | 0x80);
- *buffer++ = static_cast<char>((ch & 0x3F) | 0x80);
-}
-
} // namespace
String16::String16(const UChar* characters, size_t size)
@@ -386,6 +62,8 @@ String16::String16(const char* characters, size_t size) {
String16::String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
+String16::String16(std::basic_string<UChar>&& impl) : m_impl(impl) {}
+
// static
String16 String16::fromInteger(int number) {
char arr[50];
@@ -519,65 +197,11 @@ void String16Builder::reserveCapacity(size_t capacity) {
}
String16 String16::fromUTF8(const char* stringStart, size_t length) {
- if (!stringStart || !length) return String16();
-
- std::vector<UChar> buffer(length);
- UChar* bufferStart = buffer.data();
-
- UChar* bufferCurrent = bufferStart;
- const char* stringCurrent = stringStart;
- if (convertUTF8ToUTF16(&stringCurrent, stringStart + length, &bufferCurrent,
- bufferCurrent + buffer.size(), nullptr,
- true) != conversionOK)
- return String16();
-
- size_t utf16Length = bufferCurrent - bufferStart;
- return String16(bufferStart, utf16Length);
+ return String16(UTF8ToUTF16(stringStart, length));
}
std::string String16::utf8() const {
- size_t length = this->length();
-
- if (!length) return std::string("");
-
- // Allocate a buffer big enough to hold all the characters
- // (an individual UTF-16 UChar can only expand to 3 UTF-8 bytes).
- // Optimization ideas, if we find this function is hot:
- // * We could speculatively create a CStringBuffer to contain 'length'
- // characters, and resize if necessary (i.e. if the buffer contains
- // non-ascii characters). (Alternatively, scan the buffer first for
- // ascii characters, so we know this will be sufficient).
- // * We could allocate a CStringBuffer with an appropriate size to
- // have a good chance of being able to write the string into the
- // buffer without reallocing (say, 1.5 x length).
- if (length > std::numeric_limits<unsigned>::max() / 3) return std::string();
-
- std::string output(length * 3, '\0');
- const UChar* characters = m_impl.data();
- const UChar* characters_end = characters + length;
- char* buffer = &*output.begin();
- char* buffer_end = &*output.end();
- while (characters < characters_end) {
- // Use strict conversion to detect unpaired surrogates.
- ConversionResult result = convertUTF16ToUTF8(
- &characters, characters_end, &buffer, buffer_end, /* strict= */ true);
- DCHECK_NE(result, targetExhausted);
- // Conversion fails when there is an unpaired surrogate. Put
- // replacement character (U+FFFD) instead of the unpaired
- // surrogate.
- if (result != conversionOK) {
- DCHECK_LE(0xD800, *characters);
- DCHECK_LE(*characters, 0xDFFF);
- // There should be room left, since one UChar hasn't been
- // converted.
- DCHECK_LE(buffer + 3, buffer_end);
- putUTF8Triple(buffer, replacementCharacter);
- ++characters;
- }
- }
-
- output.resize(buffer - output.data());
- return output;
+ return UTF16ToUTF8(m_impl.data(), m_impl.size());
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 56df993332..1b475a10a6 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -30,6 +30,7 @@ class String16 {
String16(const char* characters); // NOLINT(runtime/explicit)
String16(const char* characters, size_t size);
explicit String16(const std::basic_string<UChar>& impl);
+ explicit String16(std::basic_string<UChar>&& impl);
String16& operator=(const String16&) V8_NOEXCEPT = default;
String16& operator=(String16&&) V8_NOEXCEPT = default;
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index 4dfe8ad352..e81c04d66f 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -4,11 +4,12 @@
#include "src/inspector/string-util.h"
+#include <cinttypes>
#include <cmath>
#include "src/base/platform/platform.h"
-#include "src/conversions.h"
#include "src/inspector/protocol/Protocol.h"
+#include "src/numbers/conversions.h"
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 8aa2102dd7..37b1d5c7a9 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -124,7 +124,6 @@ class ConsoleHelper {
return defaultValue;
}
v8::Local<v8::String> titleValue;
- v8::TryCatch tryCatch(m_context->GetIsolate());
if (!m_info[0]->ToString(m_context).ToLocal(&titleValue))
return defaultValue;
return toProtocolString(m_context->GetIsolate(), titleValue);
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 9fd9e47086..3301838587 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -31,10 +31,13 @@ using protocol::Array;
using protocol::Maybe;
using protocol::Debugger::BreakpointId;
using protocol::Debugger::CallFrame;
+using protocol::Debugger::Scope;
using protocol::Runtime::ExceptionDetails;
-using protocol::Runtime::ScriptId;
using protocol::Runtime::RemoteObject;
-using protocol::Debugger::Scope;
+using protocol::Runtime::ScriptId;
+
+namespace InstrumentationEnum =
+ protocol::Debugger::SetInstrumentationBreakpoint::InstrumentationEnum;
namespace DebuggerAgentState {
static const char pauseOnExceptionsState[] = "pauseOnExceptionsState";
@@ -47,6 +50,7 @@ static const char breakpointsByRegex[] = "breakpointsByRegex";
static const char breakpointsByUrl[] = "breakpointsByUrl";
static const char breakpointsByScriptHash[] = "breakpointsByScriptHash";
static const char breakpointHints[] = "breakpointHints";
+static const char instrumentationBreakpoints[] = "instrumentationBreakpoints";
} // namespace DebuggerAgentState
@@ -80,7 +84,8 @@ enum class BreakpointType {
kByScriptId,
kDebugCommand,
kMonitorCommand,
- kBreakpointAtEntry
+ kBreakpointAtEntry,
+ kInstrumentationBreakpoint
};
String16 generateBreakpointId(BreakpointType type,
@@ -106,6 +111,15 @@ String16 generateBreakpointId(BreakpointType type,
return builder.toString();
}
+String16 generateInstrumentationBreakpointId(const String16& instrumentation) {
+ String16Builder builder;
+ builder.appendNumber(
+ static_cast<int>(BreakpointType::kInstrumentationBreakpoint));
+ builder.append(':');
+ builder.append(instrumentation);
+ return builder.toString();
+}
+
bool parseBreakpointId(const String16& breakpointId, BreakpointType* type,
String16* scriptSelector = nullptr,
int* lineNumber = nullptr, int* columnNumber = nullptr) {
@@ -114,14 +128,15 @@ bool parseBreakpointId(const String16& breakpointId, BreakpointType* type,
int rawType = breakpointId.substring(0, typeLineSeparator).toInteger();
if (rawType < static_cast<int>(BreakpointType::kByUrl) ||
- rawType > static_cast<int>(BreakpointType::kBreakpointAtEntry)) {
+ rawType > static_cast<int>(BreakpointType::kInstrumentationBreakpoint)) {
return false;
}
if (type) *type = static_cast<BreakpointType>(rawType);
if (rawType == static_cast<int>(BreakpointType::kDebugCommand) ||
rawType == static_cast<int>(BreakpointType::kMonitorCommand) ||
- rawType == static_cast<int>(BreakpointType::kBreakpointAtEntry)) {
- // The script and source position is not encoded in this case.
+ rawType == static_cast<int>(BreakpointType::kBreakpointAtEntry) ||
+ rawType == static_cast<int>(BreakpointType::kInstrumentationBreakpoint)) {
+ // The script and source position are not encoded in this case.
return true;
}
@@ -356,6 +371,7 @@ Response V8DebuggerAgentImpl::disable() {
m_state->remove(DebuggerAgentState::breakpointsByUrl);
m_state->remove(DebuggerAgentState::breakpointsByScriptHash);
m_state->remove(DebuggerAgentState::breakpointHints);
+ m_state->remove(DebuggerAgentState::instrumentationBreakpoints);
m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState,
v8::debug::NoBreakOnException);
@@ -506,7 +522,6 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
break;
default:
UNREACHABLE();
- break;
}
if (breakpoints->get(breakpointId)) {
return Response::Error("Breakpoint at specified location already exists.");
@@ -580,6 +595,20 @@ Response V8DebuggerAgentImpl::setBreakpointOnFunctionCall(
return Response::OK();
}
+Response V8DebuggerAgentImpl::setInstrumentationBreakpoint(
+ const String16& instrumentation, String16* outBreakpointId) {
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ String16 breakpointId = generateInstrumentationBreakpointId(instrumentation);
+ protocol::DictionaryValue* breakpoints = getOrCreateObject(
+ m_state, DebuggerAgentState::instrumentationBreakpoints);
+ if (breakpoints->get(breakpointId)) {
+ return Response::Error("Instrumentation breakpoint is already enabled.");
+ }
+ breakpoints->setBoolean(breakpointId, true);
+ *outBreakpointId = breakpointId;
+ return Response::OK();
+}
+
Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
BreakpointType type;
@@ -606,6 +635,10 @@ Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
case BreakpointType::kByUrlRegex:
breakpoints = m_state->getObject(DebuggerAgentState::breakpointsByRegex);
break;
+ case BreakpointType::kInstrumentationBreakpoint:
+ breakpoints =
+ m_state->getObject(DebuggerAgentState::instrumentationBreakpoints);
+ break;
default:
break;
}
@@ -1496,6 +1529,40 @@ void V8DebuggerAgentImpl::didParseSource(
m_frontend.breakpointResolved(breakpointId, std::move(location));
}
}
+ setScriptInstrumentationBreakpointIfNeeded(scriptRef);
+}
+
+void V8DebuggerAgentImpl::setScriptInstrumentationBreakpointIfNeeded(
+ V8DebuggerScript* scriptRef) {
+ protocol::DictionaryValue* breakpoints =
+ m_state->getObject(DebuggerAgentState::instrumentationBreakpoints);
+ if (!breakpoints) return;
+ bool isBlackboxed = isFunctionBlackboxed(
+ scriptRef->scriptId(), v8::debug::Location(0, 0),
+ v8::debug::Location(scriptRef->endLine(), scriptRef->endColumn()));
+ if (isBlackboxed) return;
+
+ String16 sourceMapURL = scriptRef->sourceMappingURL();
+ String16 breakpointId = generateInstrumentationBreakpointId(
+ InstrumentationEnum::BeforeScriptExecution);
+ if (!breakpoints->get(breakpointId)) {
+ if (sourceMapURL.isEmpty()) return;
+ breakpointId = generateInstrumentationBreakpointId(
+ InstrumentationEnum::BeforeScriptWithSourceMapExecution);
+ if (!breakpoints->get(breakpointId)) return;
+ }
+ v8::debug::BreakpointId debuggerBreakpointId;
+ if (!scriptRef->setBreakpointOnRun(&debuggerBreakpointId)) return;
+ std::unique_ptr<protocol::DictionaryValue> data =
+ protocol::DictionaryValue::create();
+ data->setString("url", scriptRef->sourceURL());
+ data->setString("scriptId", scriptRef->scriptId());
+ if (!sourceMapURL.isEmpty()) data->setString("sourceMapURL", sourceMapURL);
+
+ m_breakpointsOnScriptRun[debuggerBreakpointId] = std::move(data);
+ m_debuggerBreakpointIdToBreakpointId[debuggerBreakpointId] = breakpointId;
+ m_breakpointIdToDebuggerBreakpointIds[breakpointId].push_back(
+ debuggerBreakpointId);
}
void V8DebuggerAgentImpl::didPause(
@@ -1539,6 +1606,14 @@ void V8DebuggerAgentImpl::didPause(
std::unique_ptr<Array<String16>> hitBreakpointIds = Array<String16>::create();
for (const auto& id : hitBreakpoints) {
+ auto it = m_breakpointsOnScriptRun.find(id);
+ if (it != m_breakpointsOnScriptRun.end()) {
+ hitReasons.push_back(std::make_pair(
+ protocol::Debugger::Paused::ReasonEnum::Instrumentation,
+ std::move(it->second)));
+ m_breakpointsOnScriptRun.erase(it);
+ continue;
+ }
auto breakpointIterator = m_debuggerBreakpointIdToBreakpointId.find(id);
if (breakpointIterator == m_debuggerBreakpointIdToBreakpointId.end()) {
continue;
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index bd781c7017..0a5a169907 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -60,6 +60,8 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Response setBreakpointOnFunctionCall(const String16& functionObjectId,
Maybe<String16> optionalCondition,
String16* outBreakpointId) override;
+ Response setInstrumentationBreakpoint(const String16& instrumentation,
+ String16* outBreakpointId) override;
Response removeBreakpoint(const String16& breakpointId) override;
Response continueToLocation(std::unique_ptr<protocol::Debugger::Location>,
Maybe<String16> targetCallFrames) override;
@@ -184,6 +186,8 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
bool isPaused() const;
+ void setScriptInstrumentationBreakpointIfNeeded(V8DebuggerScript* script);
+
using ScriptsMap =
std::unordered_map<String16, std::unique_ptr<V8DebuggerScript>>;
using BreakpointIdToDebuggerBreakpointIdsMap =
@@ -201,6 +205,9 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
ScriptsMap m_scripts;
BreakpointIdToDebuggerBreakpointIdsMap m_breakpointIdToDebuggerBreakpointIds;
DebuggerBreakpointIdToBreakpointIdMap m_debuggerBreakpointIdToBreakpointId;
+ std::unordered_map<v8::debug::BreakpointId,
+ std::unique_ptr<protocol::DictionaryValue>>
+ m_breakpointsOnScriptRun;
size_t m_maxScriptCacheSize = 0;
size_t m_cachedScriptSize = 0;
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index 6eaee6e8bc..fe7d570942 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -4,12 +4,12 @@
#include "src/inspector/v8-debugger-script.h"
+#include "src/common/v8memory.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger-agent-impl.h"
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/wasm-translation.h"
-#include "src/v8memory.h"
namespace v8_inspector {
@@ -235,6 +235,11 @@ class ActualScript : public V8DebuggerScript {
id);
}
+ bool setBreakpointOnRun(int* id) const override {
+ v8::HandleScope scope(m_isolate);
+ return script()->SetBreakpointOnScriptEntry(id);
+ }
+
const String16& hash() const override {
if (!m_hash.isEmpty()) return m_hash;
v8::HandleScope scope(m_isolate);
@@ -424,6 +429,8 @@ class WasmVirtualScript : public V8DebuggerScript {
return true;
}
+ bool setBreakpointOnRun(int*) const override { return false; }
+
const String16& hash() const override {
if (m_hash.isEmpty()) {
m_hash = m_wasmTranslation->GetHash(m_id, m_functionIndex);
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index a6e77b6699..547bb0a2cc 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -90,6 +90,7 @@ class V8DebuggerScript {
virtual bool setBreakpoint(const String16& condition,
v8::debug::Location* location, int* id) const = 0;
virtual void MakeWeak() = 0;
+ virtual bool setBreakpointOnRun(int* id) const = 0;
protected:
V8DebuggerScript(v8::Isolate*, String16 id, String16 url);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 7c8eb21299..bc0c9d8cf6 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -42,7 +42,8 @@ class MatchPrototypePredicate : public v8::debug::QueryObjectPredicate {
: m_inspector(inspector), m_context(context), m_prototype(prototype) {}
bool Filter(v8::Local<v8::Object> object) override {
- v8::Local<v8::Context> objectContext = object->CreationContext();
+ v8::Local<v8::Context> objectContext =
+ v8::debug::GetCreationContext(object);
if (objectContext != m_context) return false;
if (!m_inspector->client()->isInspectableHeapObject(object)) return false;
// Get prototype chain for current object until first visited prototype.
diff --git a/deps/v8/src/inspector/v8-inspector-protocol-encoding.cc b/deps/v8/src/inspector/v8-inspector-protocol-encoding.cc
new file mode 100644
index 0000000000..45702e4b33
--- /dev/null
+++ b/deps/v8/src/inspector/v8-inspector-protocol-encoding.cc
@@ -0,0 +1,51 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-inspector-protocol-encoding.h"
+
+#include <cmath>
+#include "../../third_party/inspector_protocol/encoding/encoding.h"
+#include "src/numbers/conversions.h"
+#include "src/utils/vector.h"
+
+namespace v8_inspector {
+namespace {
+using IPEStatus = ::v8_inspector_protocol_encoding::Status;
+using ::v8_inspector_protocol_encoding::span;
+
+class Platform : public ::v8_inspector_protocol_encoding::json::Platform {
+ public:
+ bool StrToD(const char* str, double* result) const override {
+ *result = v8::internal::StringToDouble(str, v8::internal::NO_FLAGS);
+ return !std::isnan(*result);
+ }
+ std::unique_ptr<char[]> DToStr(double value) const override {
+ v8::internal::ScopedVector<char> buffer(
+ v8::internal::kDoubleToCStringMinBufferSize);
+ const char* str = v8::internal::DoubleToCString(value, buffer);
+ if (str == nullptr) return nullptr;
+ std::unique_ptr<char[]> result(new char[strlen(str) + 1]);
+ memcpy(result.get(), str, strlen(str) + 1);
+ DCHECK_EQ(0, result[strlen(str)]);
+ return result;
+ }
+};
+} // namespace
+
+IPEStatus ConvertCBORToJSON(span<uint8_t> cbor, std::vector<uint8_t>* json) {
+ Platform platform;
+ return ConvertCBORToJSON(platform, cbor, json);
+}
+
+IPEStatus ConvertJSONToCBOR(span<uint8_t> json, std::vector<uint8_t>* cbor) {
+ Platform platform;
+ return ConvertJSONToCBOR(platform, json, cbor);
+}
+
+IPEStatus ConvertJSONToCBOR(span<uint16_t> json, std::vector<uint8_t>* cbor) {
+ Platform platform;
+ return ConvertJSONToCBOR(platform, json, cbor);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-inspector-protocol-encoding.h b/deps/v8/src/inspector/v8-inspector-protocol-encoding.h
new file mode 100644
index 0000000000..6dcc7e8401
--- /dev/null
+++ b/deps/v8/src/inspector/v8-inspector-protocol-encoding.h
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8_INSPECTOR_PROTOCOL_ENCODING_H_
+#define V8_INSPECTOR_V8_INSPECTOR_PROTOCOL_ENCODING_H_
+
+#include "../../third_party/inspector_protocol/encoding/encoding.h"
+
+namespace v8_inspector {
+
+::v8_inspector_protocol_encoding::Status ConvertCBORToJSON(
+ ::v8_inspector_protocol_encoding::span<uint8_t> cbor,
+ std::vector<uint8_t>* json);
+
+::v8_inspector_protocol_encoding::Status ConvertJSONToCBOR(
+ ::v8_inspector_protocol_encoding::span<uint8_t> json,
+ std::vector<uint8_t>* cbor);
+
+::v8_inspector_protocol_encoding::Status ConvertJSONToCBOR(
+ ::v8_inspector_protocol_encoding::span<uint16_t> json,
+ std::vector<uint8_t>* cbor);
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8_INSPECTOR_PROTOCOL_ENCODING_H_
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index c4c4cc14a1..4242abb64a 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -4,6 +4,8 @@
#include "src/inspector/v8-inspector-session-impl.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
@@ -15,11 +17,45 @@
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-heap-profiler-agent-impl.h"
#include "src/inspector/v8-inspector-impl.h"
+#include "src/inspector/v8-inspector-protocol-encoding.h"
#include "src/inspector/v8-profiler-agent-impl.h"
#include "src/inspector/v8-runtime-agent-impl.h"
#include "src/inspector/v8-schema-agent-impl.h"
namespace v8_inspector {
+namespace {
+using ::v8_inspector_protocol_encoding::span;
+using ::v8_inspector_protocol_encoding::SpanFrom;
+using IPEStatus = ::v8_inspector_protocol_encoding::Status;
+
+bool IsCBORMessage(const StringView& msg) {
+ return msg.is8Bit() && msg.length() >= 2 && msg.characters8()[0] == 0xd8 &&
+ msg.characters8()[1] == 0x5a;
+}
+
+IPEStatus ConvertToCBOR(const StringView& state, std::vector<uint8_t>* cbor) {
+ return state.is8Bit()
+ ? ConvertJSONToCBOR(
+ span<uint8_t>(state.characters8(), state.length()), cbor)
+ : ConvertJSONToCBOR(
+ span<uint16_t>(state.characters16(), state.length()), cbor);
+}
+
+std::unique_ptr<protocol::DictionaryValue> ParseState(const StringView& state) {
+ std::vector<uint8_t> converted;
+ span<uint8_t> cbor;
+ if (IsCBORMessage(state))
+ cbor = span<uint8_t>(state.characters8(), state.length());
+ else if (ConvertToCBOR(state, &converted).ok())
+ cbor = SpanFrom(converted);
+ if (!cbor.empty()) {
+ std::unique_ptr<protocol::Value> value =
+ protocol::Value::parseBinary(cbor.data(), cbor.size());
+ if (value) return protocol::DictionaryValue::cast(std::move(value));
+ }
+ return protocol::DictionaryValue::create();
+}
+} // namespace
// static
bool V8InspectorSession::canDispatchMethod(const StringView& method) {
@@ -60,22 +96,13 @@ V8InspectorSessionImpl::V8InspectorSessionImpl(V8InspectorImpl* inspector,
m_channel(channel),
m_customObjectFormatterEnabled(false),
m_dispatcher(this),
- m_state(nullptr),
+ m_state(ParseState(savedState)),
m_runtimeAgent(nullptr),
m_debuggerAgent(nullptr),
m_heapProfilerAgent(nullptr),
m_profilerAgent(nullptr),
m_consoleAgent(nullptr),
m_schemaAgent(nullptr) {
- if (savedState.length()) {
- std::unique_ptr<protocol::Value> state =
- protocol::StringUtil::parseJSON(toString16(savedState));
- if (state) m_state = protocol::DictionaryValue::cast(std::move(state));
- if (!m_state) m_state = protocol::DictionaryValue::create();
- } else {
- m_state = protocol::DictionaryValue::create();
- }
-
m_state->getBoolean("use_binary_protocol", &use_binary_protocol_);
m_runtimeAgent.reset(new V8RuntimeAgentImpl(
@@ -330,8 +357,7 @@ void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
void V8InspectorSessionImpl::dispatchProtocolMessage(
const StringView& message) {
- bool binary_protocol =
- message.is8Bit() && message.length() && message.characters8()[0] == 0xD8;
+ bool binary_protocol = IsCBORMessage(message);
if (binary_protocol) {
use_binary_protocol_ = true;
m_state->setBoolean("use_binary_protocol", true);
@@ -355,8 +381,17 @@ void V8InspectorSessionImpl::dispatchProtocolMessage(
}
std::unique_ptr<StringBuffer> V8InspectorSessionImpl::stateJSON() {
- String16 json = m_state->toJSONString();
- return StringBufferImpl::adopt(json);
+ std::vector<uint8_t> json;
+ IPEStatus status = ConvertCBORToJSON(SpanFrom(state()), &json);
+ DCHECK(status.ok());
+ USE(status);
+ return v8::base::make_unique<BinaryStringBuffer>(std::move(json));
+}
+
+std::vector<uint8_t> V8InspectorSessionImpl::state() {
+ std::vector<uint8_t> out;
+ m_state->writeBinary(&out);
+ return out;
}
std::vector<std::unique_ptr<protocol::Schema::API::Domain>>
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index 8834b56f5d..ea1d29773c 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -65,6 +65,7 @@ class V8InspectorSessionImpl : public V8InspectorSession,
// V8InspectorSession implementation.
void dispatchProtocolMessage(const StringView& message) override;
std::unique_ptr<StringBuffer> stateJSON() override;
+ std::vector<uint8_t> state() override;
std::vector<std::unique_ptr<protocol::Schema::API::Domain>> supportedDomains()
override;
void addInspectedObject(
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index b825397b4d..15f93e39d7 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -303,8 +303,8 @@ Response V8ProfilerAgentImpl::startPreciseCoverage(Maybe<bool> callCount,
// coverage data if it exists (at the time of writing, that's the case for
// each function recompiled after the BlockCount mode has been set); and
// function-granularity coverage data otherwise.
- typedef v8::debug::Coverage C;
- typedef v8::debug::CoverageMode Mode;
+ using C = v8::debug::Coverage;
+ using Mode = v8::debug::CoverageMode;
Mode mode = callCountValue
? (detailedValue ? Mode::kBlockCount : Mode::kPreciseCount)
: (detailedValue ? Mode::kBlockBinary : Mode::kPreciseBinary);
diff --git a/deps/v8/src/inspector/v8-string-conversions.cc b/deps/v8/src/inspector/v8-string-conversions.cc
new file mode 100644
index 0000000000..0c75e66b97
--- /dev/null
+++ b/deps/v8/src/inspector/v8-string-conversions.cc
@@ -0,0 +1,403 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/v8-string-conversions.h"
+
+#include <limits>
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/base/v8-fallthrough.h"
+
+namespace v8_inspector {
+namespace {
+using UChar = uint16_t;
+using UChar32 = uint32_t;
+
+bool isASCII(UChar c) { return !(c & ~0x7F); }
+
+const UChar replacementCharacter = 0xFFFD;
+
+inline int inlineUTF8SequenceLengthNonASCII(char b0) {
+ if ((b0 & 0xC0) != 0xC0) return 0;
+ if ((b0 & 0xE0) == 0xC0) return 2;
+ if ((b0 & 0xF0) == 0xE0) return 3;
+ if ((b0 & 0xF8) == 0xF0) return 4;
+ return 0;
+}
+
+inline int inlineUTF8SequenceLength(char b0) {
+ return isASCII(b0) ? 1 : inlineUTF8SequenceLengthNonASCII(b0);
+}
+
+// Once the bits are split out into bytes of UTF-8, this is a mask OR-ed
+// into the first byte, depending on how many bytes follow. There are
+// as many entries in this table as there are UTF-8 sequence types.
+// (I.e., one byte sequence, two byte... etc.). Remember that sequences
+// for *legal* UTF-8 will be 4 or fewer bytes total.
+static const unsigned char firstByteMark[7] = {0x00, 0x00, 0xC0, 0xE0,
+ 0xF0, 0xF8, 0xFC};
+
+enum ConversionResult {
+ conversionOK, // conversion successful
+ sourceExhausted, // partial character in source, but hit end
+ targetExhausted, // insuff. room in target for conversion
+ sourceIllegal // source sequence is illegal/malformed
+};
+
+ConversionResult convertUTF16ToUTF8(const UChar** sourceStart,
+ const UChar* sourceEnd, char** targetStart,
+ char* targetEnd, bool strict) {
+ ConversionResult result = conversionOK;
+ const UChar* source = *sourceStart;
+ char* target = *targetStart;
+ while (source < sourceEnd) {
+ UChar32 ch;
+ uint32_t bytesToWrite = 0;
+ const UChar32 byteMask = 0xBF;
+ const UChar32 byteMark = 0x80;
+ const UChar* oldSource =
+ source; // In case we have to back up because of target overflow.
+ ch = static_cast<uint16_t>(*source++);
+ // If we have a surrogate pair, convert to UChar32 first.
+ if (ch >= 0xD800 && ch <= 0xDBFF) {
+ // If the 16 bits following the high surrogate are in the source buffer...
+ if (source < sourceEnd) {
+ UChar32 ch2 = static_cast<uint16_t>(*source);
+ // If it's a low surrogate, convert to UChar32.
+ if (ch2 >= 0xDC00 && ch2 <= 0xDFFF) {
+ ch = ((ch - 0xD800) << 10) + (ch2 - 0xDC00) + 0x0010000;
+ ++source;
+ } else if (strict) { // it's an unpaired high surrogate
+ --source; // return to the illegal value itself
+ result = sourceIllegal;
+ break;
+ }
+ } else { // We don't have the 16 bits following the high surrogate.
+ --source; // return to the high surrogate
+ result = sourceExhausted;
+ break;
+ }
+ } else if (strict) {
+ // UTF-16 surrogate values are illegal in UTF-32
+ if (ch >= 0xDC00 && ch <= 0xDFFF) {
+ --source; // return to the illegal value itself
+ result = sourceIllegal;
+ break;
+ }
+ }
+ // Figure out how many bytes the result will require
+ if (ch < static_cast<UChar32>(0x80)) {
+ bytesToWrite = 1;
+ } else if (ch < static_cast<UChar32>(0x800)) {
+ bytesToWrite = 2;
+ } else if (ch < static_cast<UChar32>(0x10000)) {
+ bytesToWrite = 3;
+ } else if (ch < static_cast<UChar32>(0x110000)) {
+ bytesToWrite = 4;
+ } else {
+ bytesToWrite = 3;
+ ch = replacementCharacter;
+ }
+
+ target += bytesToWrite;
+ if (target > targetEnd) {
+ source = oldSource; // Back up source pointer!
+ target -= bytesToWrite;
+ result = targetExhausted;
+ break;
+ }
+ switch (bytesToWrite) {
+ case 4:
+ *--target = static_cast<char>((ch | byteMark) & byteMask);
+ ch >>= 6;
+ V8_FALLTHROUGH;
+ case 3:
+ *--target = static_cast<char>((ch | byteMark) & byteMask);
+ ch >>= 6;
+ V8_FALLTHROUGH;
+ case 2:
+ *--target = static_cast<char>((ch | byteMark) & byteMask);
+ ch >>= 6;
+ V8_FALLTHROUGH;
+ case 1:
+ *--target = static_cast<char>(ch | firstByteMark[bytesToWrite]);
+ }
+ target += bytesToWrite;
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/**
+ * Is this code point a BMP code point (U+0000..U+ffff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.8
+ */
+#define U_IS_BMP(c) ((uint32_t)(c) <= 0xFFFF)
+
+/**
+ * Is this code point a supplementary code point (U+010000..U+10FFFF)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.8
+ */
+#define U_IS_SUPPLEMENTARY(c) ((uint32_t)((c)-0x010000) <= 0xFFFFF)
+
+/**
+ * Is this code point a surrogate (U+d800..U+dfff)?
+ * @param c 32-bit code point
+ * @return TRUE or FALSE
+ * @stable ICU 2.4
+ */
+#define U_IS_SURROGATE(c) (((c)&0xFFFFF800) == 0xD800)
+
+/**
+ * Get the lead surrogate (0xD800..0xDBFF) for a
+ * supplementary code point (0x010000..0x10FFFF).
+ * @param supplementary 32-bit code point (U+010000..U+10FFFF)
+ * @return lead surrogate (U+D800..U+DBFF) for supplementary
+ * @stable ICU 2.4
+ */
+#define U16_LEAD(supplementary) (UChar)(((supplementary) >> 10) + 0xD7C0)
+
+/**
+ * Get the trail surrogate (0xDC00..0xDFFF) for a
+ * supplementary code point (0x010000..0x10FFFF).
+ * @param supplementary 32-bit code point (U+010000..U+10FFFF)
+ * @return trail surrogate (U+DC00..U+DFFF) for supplementary
+ * @stable ICU 2.4
+ */
+#define U16_TRAIL(supplementary) (UChar)(((supplementary)&0x3FF) | 0xDC00)
+
+// This must be called with the length pre-determined by the first byte.
+// If presented with a length > 4, this returns false. The Unicode
+// definition of UTF-8 goes up to 4-byte sequences.
+static bool isLegalUTF8(const unsigned char* source, int length) {
+ unsigned char a;
+ const unsigned char* srcptr = source + length;
+ switch (length) {
+ default:
+ return false;
+ // Everything else falls through when "true"...
+ case 4:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ V8_FALLTHROUGH;
+ case 3:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ V8_FALLTHROUGH;
+ case 2:
+ if ((a = (*--srcptr)) > 0xBF) return false;
+
+ // no fall-through in this inner switch
+ switch (*source) {
+ case 0xE0:
+ if (a < 0xA0) return false;
+ break;
+ case 0xED:
+ if (a > 0x9F) return false;
+ break;
+ case 0xF0:
+ if (a < 0x90) return false;
+ break;
+ case 0xF4:
+ if (a > 0x8F) return false;
+ break;
+ default:
+ if (a < 0x80) return false;
+ }
+ V8_FALLTHROUGH;
+
+ case 1:
+ if (*source >= 0x80 && *source < 0xC2) return false;
+ }
+ if (*source > 0xF4) return false;
+ return true;
+}
+
+// Magic values subtracted from a buffer value during UTF8 conversion.
+// This table contains as many values as there might be trailing bytes
+// in a UTF-8 sequence.
+static const UChar32 offsetsFromUTF8[6] = {0x00000000UL,
+ 0x00003080UL,
+ 0x000E2080UL,
+ 0x03C82080UL,
+ static_cast<UChar32>(0xFA082080UL),
+ static_cast<UChar32>(0x82082080UL)};
+
+static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
+ UChar32 character = 0;
+
+ // The cases all fall through.
+ switch (length) {
+ case 6:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ V8_FALLTHROUGH;
+ case 5:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ V8_FALLTHROUGH;
+ case 4:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ V8_FALLTHROUGH;
+ case 3:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ V8_FALLTHROUGH;
+ case 2:
+ character += static_cast<unsigned char>(*sequence++);
+ character <<= 6;
+ V8_FALLTHROUGH;
+ case 1:
+ character += static_cast<unsigned char>(*sequence++);
+ }
+
+ return character - offsetsFromUTF8[length - 1];
+}
+
+ConversionResult convertUTF8ToUTF16(const char** sourceStart,
+ const char* sourceEnd, UChar** targetStart,
+ UChar* targetEnd, bool* sourceAllASCII,
+ bool strict) {
+ ConversionResult result = conversionOK;
+ const char* source = *sourceStart;
+ UChar* target = *targetStart;
+ UChar orAllData = 0;
+ while (source < sourceEnd) {
+ int utf8SequenceLength = inlineUTF8SequenceLength(*source);
+ if (sourceEnd - source < utf8SequenceLength) {
+ result = sourceExhausted;
+ break;
+ }
+ // Do this check whether lenient or strict
+ if (!isLegalUTF8(reinterpret_cast<const unsigned char*>(source),
+ utf8SequenceLength)) {
+ result = sourceIllegal;
+ break;
+ }
+
+ UChar32 character = readUTF8Sequence(source, utf8SequenceLength);
+
+ if (target >= targetEnd) {
+ source -= utf8SequenceLength; // Back up source pointer!
+ result = targetExhausted;
+ break;
+ }
+
+ if (U_IS_BMP(character)) {
+ // UTF-16 surrogate values are illegal in UTF-32
+ if (U_IS_SURROGATE(character)) {
+ if (strict) {
+ source -= utf8SequenceLength; // return to the illegal value itself
+ result = sourceIllegal;
+ break;
+ }
+ *target++ = replacementCharacter;
+ orAllData |= replacementCharacter;
+ } else {
+ *target++ = static_cast<UChar>(character); // normal case
+ orAllData |= character;
+ }
+ } else if (U_IS_SUPPLEMENTARY(character)) {
+ // target is a character in range 0xFFFF - 0x10FFFF
+ if (target + 1 >= targetEnd) {
+ source -= utf8SequenceLength; // Back up source pointer!
+ result = targetExhausted;
+ break;
+ }
+ *target++ = U16_LEAD(character);
+ *target++ = U16_TRAIL(character);
+ orAllData = 0xFFFF;
+ } else {
+ if (strict) {
+ source -= utf8SequenceLength; // return to the start
+ result = sourceIllegal;
+ break; // Bail out; shouldn't continue
+ } else {
+ *target++ = replacementCharacter;
+ orAllData |= replacementCharacter;
+ }
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+
+ if (sourceAllASCII) *sourceAllASCII = !(orAllData & ~0x7F);
+
+ return result;
+}
+
+// Helper to write a three-byte UTF-8 code point to the buffer, caller must
+// check room is available.
+static inline void putUTF8Triple(char*& buffer, UChar ch) {
+ *buffer++ = static_cast<char>(((ch >> 12) & 0x0F) | 0xE0);
+ *buffer++ = static_cast<char>(((ch >> 6) & 0x3F) | 0x80);
+ *buffer++ = static_cast<char>((ch & 0x3F) | 0x80);
+}
+} // namespace
+
+std::string UTF16ToUTF8(const UChar* stringStart, size_t length) {
+ if (!stringStart || !length) return std::string();
+
+ // Allocate a buffer big enough to hold all the characters
+ // (an individual UTF-16 UChar can only expand to 3 UTF-8 bytes).
+ // Optimization ideas, if we find this function is hot:
+ // * We could speculatively create a CStringBuffer to contain 'length'
+ // characters, and resize if necessary (i.e. if the buffer contains
+ // non-ascii characters). (Alternatively, scan the buffer first for
+ // ascii characters, so we know this will be sufficient).
+ // * We could allocate a CStringBuffer with an appropriate size to
+ // have a good chance of being able to write the string into the
+ // buffer without reallocing (say, 1.5 x length).
+ if (length > std::numeric_limits<unsigned>::max() / 3) return std::string();
+
+ std::string output(length * 3, '\0');
+ const UChar* characters = stringStart;
+ const UChar* characters_end = characters + length;
+ char* buffer = &*output.begin();
+ char* buffer_end = &*output.end();
+ while (characters < characters_end) {
+ // Use strict conversion to detect unpaired surrogates.
+ ConversionResult result = convertUTF16ToUTF8(
+ &characters, characters_end, &buffer, buffer_end, /* strict= */ true);
+ DCHECK_NE(result, targetExhausted);
+ // Conversion fails when there is an unpaired surrogate. Put
+ // replacement character (U+FFFD) instead of the unpaired
+ // surrogate.
+ if (result != conversionOK) {
+ DCHECK_LE(0xD800, *characters);
+ DCHECK_LE(*characters, 0xDFFF);
+ // There should be room left, since one UChar hasn't been
+ // converted.
+ DCHECK_LE(buffer + 3, buffer_end);
+ putUTF8Triple(buffer, replacementCharacter);
+ ++characters;
+ }
+ }
+
+ output.resize(buffer - output.data());
+ return output;
+}
+
+std::basic_string<UChar> UTF8ToUTF16(const char* stringStart, size_t length) {
+ if (!stringStart || !length) return std::basic_string<UChar>();
+ std::vector<uint16_t> buffer(length);
+ UChar* bufferStart = buffer.data();
+
+ UChar* bufferCurrent = bufferStart;
+ const char* stringCurrent = reinterpret_cast<const char*>(stringStart);
+ if (convertUTF8ToUTF16(&stringCurrent,
+ reinterpret_cast<const char*>(stringStart + length),
+ &bufferCurrent, bufferCurrent + buffer.size(), nullptr,
+ true) != conversionOK)
+ return std::basic_string<uint16_t>();
+ size_t utf16Length = bufferCurrent - bufferStart;
+ return std::basic_string<UChar>(bufferStart, bufferStart + utf16Length);
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-string-conversions.h b/deps/v8/src/inspector/v8-string-conversions.h
new file mode 100644
index 0000000000..c1d69c18f0
--- /dev/null
+++ b/deps/v8/src/inspector/v8-string-conversions.h
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_V8_STRING_CONVERSIONS_H_
+#define V8_INSPECTOR_V8_STRING_CONVERSIONS_H_
+
+#include <string>
+
+// Conversion routines between UT8 and UTF16, used by string-16.{h,cc}. You may
+// want to use string-16.h directly rather than these.
+namespace v8_inspector {
+std::basic_string<uint16_t> UTF8ToUTF16(const char* stringStart, size_t length);
+std::string UTF16ToUTF8(const uint16_t* stringStart, size_t length);
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_V8_STRING_CONVERSIONS_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index e455cfd065..d7232fcd4c 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -4,11 +4,11 @@
#include "src/interpreter/bytecode-array-accessor.h"
-#include "src/feedback-vector.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter-intrinsics.h"
-#include "src/objects-inl.h"
#include "src/objects/code-inl.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -198,7 +198,7 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
}
Object BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
- return bytecode_array()->constant_pool()->get(index);
+ return bytecode_array()->constant_pool().get(index);
}
Object BytecodeArrayAccessor::GetConstantForIndexOperand(
@@ -216,7 +216,7 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
return GetAbsoluteOffset(relative_offset);
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
Smi smi = Smi::cast(GetConstantForIndexOperand(0));
- return GetAbsoluteOffset(smi->value());
+ return GetAbsoluteOffset(smi.value());
} else {
UNREACHABLE();
}
@@ -318,15 +318,15 @@ void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
if (table_offset_ >= table_end_) return;
Object current = accessor_->GetConstantAtIndex(table_offset_);
- while (!current->IsSmi()) {
- DCHECK(current->IsTheHole());
+ while (!current.IsSmi()) {
+ DCHECK(current.IsTheHole());
++table_offset_;
++index_;
if (table_offset_ >= table_end_) break;
current = accessor_->GetConstantAtIndex(table_offset_);
}
// Make sure we haven't reached the end of the table with a hole in current.
- if (current->IsSmi()) {
+ if (current.IsSmi()) {
current_ = Smi::cast(current);
}
}
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index db33b6f6ac..91b6886204 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -5,11 +5,11 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
-#include "src/globals.h"
-#include "src/handles.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/objects/smi.h"
#include "src/runtime/runtime.h"
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index d0a30349ca..b58fbd3309 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -4,7 +4,7 @@
#include "src/interpreter/bytecode-array-builder.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
@@ -12,7 +12,7 @@
#include "src/interpreter/bytecode-register-optimizer.h"
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/interpreter-intrinsics.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -406,40 +406,40 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperationSmiLiteral(
Token::Value op, Smi literal, int feedback_slot) {
switch (op) {
case Token::Value::ADD:
- OutputAddSmi(literal->value(), feedback_slot);
+ OutputAddSmi(literal.value(), feedback_slot);
break;
case Token::Value::SUB:
- OutputSubSmi(literal->value(), feedback_slot);
+ OutputSubSmi(literal.value(), feedback_slot);
break;
case Token::Value::MUL:
- OutputMulSmi(literal->value(), feedback_slot);
+ OutputMulSmi(literal.value(), feedback_slot);
break;
case Token::Value::DIV:
- OutputDivSmi(literal->value(), feedback_slot);
+ OutputDivSmi(literal.value(), feedback_slot);
break;
case Token::Value::MOD:
- OutputModSmi(literal->value(), feedback_slot);
+ OutputModSmi(literal.value(), feedback_slot);
break;
case Token::Value::EXP:
- OutputExpSmi(literal->value(), feedback_slot);
+ OutputExpSmi(literal.value(), feedback_slot);
break;
case Token::Value::BIT_OR:
- OutputBitwiseOrSmi(literal->value(), feedback_slot);
+ OutputBitwiseOrSmi(literal.value(), feedback_slot);
break;
case Token::Value::BIT_XOR:
- OutputBitwiseXorSmi(literal->value(), feedback_slot);
+ OutputBitwiseXorSmi(literal.value(), feedback_slot);
break;
case Token::Value::BIT_AND:
- OutputBitwiseAndSmi(literal->value(), feedback_slot);
+ OutputBitwiseAndSmi(literal.value(), feedback_slot);
break;
case Token::Value::SHL:
- OutputShiftLeftSmi(literal->value(), feedback_slot);
+ OutputShiftLeftSmi(literal.value(), feedback_slot);
break;
case Token::Value::SAR:
- OutputShiftRightSmi(literal->value(), feedback_slot);
+ OutputShiftRightSmi(literal.value(), feedback_slot);
break;
case Token::Value::SHR:
- OutputShiftRightLogicalSmi(literal->value(), feedback_slot);
+ OutputShiftRightLogicalSmi(literal.value(), feedback_slot);
break;
default:
UNREACHABLE();
@@ -573,7 +573,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadConstantPoolEntry(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Smi smi) {
- int32_t raw_smi = smi->value();
+ int32_t raw_smi = smi.value();
if (raw_smi == 0) {
OutputLdaZero();
} else {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 93d108f7be..c5fd3111c0 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -7,7 +7,7 @@
#include "src/ast/ast.h"
#include "src/base/compiler-specific.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register-allocator.h"
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index 963cd077bf..b582311007 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
#include "src/objects/code-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
index 9d206e2231..4ed5ce5e7d 100644
--- a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-random-iterator.h"
-#include "src/objects-inl.h"
#include "src/objects/code-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 4d9378d6e6..3769eefda1 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -4,7 +4,7 @@
#include "src/interpreter/bytecode-array-writer.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-node.h"
@@ -12,8 +12,8 @@
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/handler-table-builder.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -445,7 +445,6 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
switch (reserved_operand_size) {
case OperandSize::kNone:
UNREACHABLE();
- break;
case OperandSize::kByte:
node->update_operand0(k8BitJumpPlaceholder);
break;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index d18c62a90f..5dac1b41c3 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -6,9 +6,9 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
#include "src/base/compiler-specific.h"
-#include "src/globals.h"
+#include "src/codegen/source-position-table.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecodes.h"
-#include "src/source-position-table.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-decoder.cc b/deps/v8/src/interpreter/bytecode-decoder.cc
index fa0ff9e4a8..6f2f9dda0d 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.cc
+++ b/deps/v8/src/interpreter/bytecode-decoder.cc
@@ -6,9 +6,9 @@
#include <iomanip>
-#include "src/contexts.h"
#include "src/interpreter/interpreter-intrinsics.h"
-#include "src/objects-inl.h"
+#include "src/objects/contexts.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-decoder.h b/deps/v8/src/interpreter/bytecode-decoder.h
index 49ede542d2..5be682b1f5 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.h
+++ b/deps/v8/src/interpreter/bytecode-decoder.h
@@ -7,7 +7,7 @@
#include <iosfwd>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 645a586960..613b0178c1 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -7,7 +7,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/builtins/builtins-constructor.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index 6f05770192..0f87c5bdfb 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -5,7 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_FLAGS_H_
#define V8_INTERPRETER_BYTECODE_FLAGS_H_
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 3f44282e7f..706580ac14 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -4,24 +4,24 @@
#include "src/interpreter/bytecode-generator.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/ast-source-ranges.h"
#include "src/ast/scopes.h"
#include "src/builtins/builtins-constructor.h"
-#include "src/compiler.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/unoptimized-compilation-info.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
-#include "src/objects-inl.h"
#include "src/objects/debug-objects.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/objects/template-objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
-#include "src/unoptimized-compilation-info.h"
namespace v8 {
namespace internal {
@@ -1128,7 +1128,8 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Create a generator object if necessary and initialize the
// {.generator_object} variable.
- if (IsResumableFunction(info()->literal()->kind())) {
+ FunctionLiteral* literal = info()->literal();
+ if (IsResumableFunction(literal->kind())) {
BuildGeneratorObjectVariableInitialization();
}
@@ -1146,6 +1147,9 @@ void BytecodeGenerator::GenerateBytecodeBody() {
}
}
+ // Increment the function-scope block coverage counter.
+ BuildIncrementBlockCoverageCounterIfEnabled(literal, SourceRangeKind::kBody);
+
// Visit declarations within the function scope.
VisitDeclarations(closure_scope()->declarations());
@@ -1153,17 +1157,22 @@ void BytecodeGenerator::GenerateBytecodeBody() {
VisitModuleNamespaceImports();
// Perform a stack-check before the body.
- builder()->StackCheck(info()->literal()->start_position());
+ builder()->StackCheck(literal->start_position());
// The derived constructor case is handled in VisitCallSuper.
- if (IsBaseConstructor(function_kind()) &&
- info()->literal()->requires_instance_members_initializer()) {
- BuildInstanceMemberInitialization(Register::function_closure(),
- builder()->Receiver());
+ if (IsBaseConstructor(function_kind())) {
+ if (literal->requires_brand_initialization()) {
+ BuildPrivateBrandInitialization(builder()->Receiver());
+ }
+
+ if (literal->requires_instance_members_initializer()) {
+ BuildInstanceMemberInitialization(Register::function_closure(),
+ builder()->Receiver());
+ }
}
// Visit statements in the function body.
- VisitStatements(info()->literal()->body());
+ VisitStatements(literal->body());
// Emit an implicit return instruction in case control flow can fall off the
// end of the function without an explicit return being present on all paths.
@@ -1929,6 +1938,39 @@ bool BytecodeGenerator::ShouldOptimizeAsOneShot() const {
info()->literal()->is_oneshot_iife();
}
+void BytecodeGenerator::BuildPrivateClassMemberNameAssignment(
+ ClassLiteral::Property* property) {
+ DCHECK(property->is_private());
+ switch (property->kind()) {
+ case ClassLiteral::Property::FIELD: {
+ // Create the private name symbols for fields during class
+ // evaluation and store them on the context. These will be
+ // used as keys later during instance or static initialization.
+ RegisterAllocationScope private_name_register_scope(this);
+ Register private_name = register_allocator()->NewRegister();
+ VisitForRegisterValue(property->key(), private_name);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
+ .StoreAccumulatorInRegister(private_name)
+ .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
+ DCHECK_NOT_NULL(property->private_name_var());
+ BuildVariableAssignment(property->private_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ break;
+ }
+ case ClassLiteral::Property::METHOD: {
+ // Create the closures for private methods.
+ VisitForAccumulatorValue(property->value());
+ BuildVariableAssignment(property->private_name_var(), Token::INIT,
+ HoleCheckMode::kElided);
+ break;
+ }
+ default:
+ // TODO(joyee): Private accessors are not yet supported.
+ UNREACHABLE();
+ }
+}
+
void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
size_t class_boilerplate_entry =
builder()->AllocateDeferredConstantPoolEntry();
@@ -1993,19 +2035,15 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
}
}
+ if (property->is_private()) {
+ BuildPrivateClassMemberNameAssignment(property);
+ // The private fields are initialized in the initializer function and
+ // the private brand for the private methods are initialized in the
+ // constructor instead.
+ continue;
+ }
+
if (property->kind() == ClassLiteral::Property::FIELD) {
- if (property->is_private()) {
- RegisterAllocationScope private_name_register_scope(this);
- Register private_name = register_allocator()->NewRegister();
- VisitForRegisterValue(property->key(), private_name);
- builder()
- ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
- .StoreAccumulatorInRegister(private_name)
- .CallRuntime(Runtime::kCreatePrivateNameSymbol, private_name);
- DCHECK_NOT_NULL(property->private_name_var());
- BuildVariableAssignment(property->private_name_var(), Token::INIT,
- HoleCheckMode::kElided);
- }
// We don't compute field's value here, but instead do it in the
// initializer function.
continue;
@@ -2029,6 +2067,23 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
HoleCheckMode::kElided);
}
+ // Create the class brand symbol and store it on the context
+ // during class evaluation. This will be stored in the
+ // receiver later in the constructor.
+ if (expr->scope()->brand() != nullptr) {
+ Register brand = register_allocator()->NewRegister();
+ const AstRawString* class_name =
+ expr->class_variable() != nullptr
+ ? expr->class_variable()->raw_name()
+ : ast_string_constants()->empty_string();
+ builder()
+ ->LoadLiteral(class_name)
+ .StoreAccumulatorInRegister(brand)
+ .CallRuntime(Runtime::kCreatePrivateNameSymbol, brand);
+ BuildVariableAssignment(expr->scope()->brand(), Token::INIT,
+ HoleCheckMode::kElided);
+ }
+
if (expr->instance_members_initializer_function() != nullptr) {
Register initializer =
VisitForRegisterValue(expr->instance_members_initializer_function());
@@ -2110,6 +2165,10 @@ void BytecodeGenerator::VisitInitializeClassMembersStatement(
for (int i = 0; i < stmt->fields()->length(); i++) {
ClassLiteral::Property* property = stmt->fields()->at(i);
+ // Private methods are not initialized in the
+ // InitializeClassMembersStatement.
+ DCHECK_IMPLIES(property->is_private(),
+ property->kind() == ClassLiteral::Property::FIELD);
if (property->is_computed_name()) {
DCHECK_EQ(property->kind(), ClassLiteral::Property::FIELD);
@@ -2120,8 +2179,7 @@ void BytecodeGenerator::VisitInitializeClassMembersStatement(
// variable at class definition time.
BuildVariableLoad(var, HoleCheckMode::kElided);
builder()->StoreAccumulatorInRegister(key);
- } else if (property->kind() == ClassLiteral::Property::FIELD &&
- property->is_private()) {
+ } else if (property->is_private()) {
Variable* private_name_var = property->private_name_var();
DCHECK_NOT_NULL(private_name_var);
BuildVariableLoad(private_name_var, HoleCheckMode::kElided);
@@ -2143,6 +2201,17 @@ void BytecodeGenerator::VisitInitializeClassMembersStatement(
}
}
+void BytecodeGenerator::BuildPrivateBrandInitialization(Register receiver) {
+ RegisterList brand_args = register_allocator()->NewRegisterList(2);
+ Variable* brand = info()->scope()->outer_scope()->AsClassScope()->brand();
+ DCHECK_NOT_NULL(brand);
+ BuildVariableLoad(brand, HoleCheckMode::kElided);
+ builder()
+ ->StoreAccumulatorInRegister(brand_args[1])
+ .MoveRegister(receiver, brand_args[0])
+ .CallRuntime(Runtime::kAddPrivateBrand, brand_args);
+}
+
void BytecodeGenerator::BuildInstanceMemberInitialization(Register constructor,
Register instance) {
RegisterList args = register_allocator()->NewRegisterList(1);
@@ -2504,7 +2573,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->MoveRegister(literal, args[0]);
builder()->SetExpressionPosition(property->value());
VisitForRegisterValue(property->value(), args[1]);
- builder()->CallRuntime(Runtime::kCopyDataProperties, args);
+ builder()->CallRuntime(Runtime::kInlineCopyDataProperties, args);
break;
}
case ObjectLiteral::Property::PROTOTYPE:
@@ -3857,7 +3926,10 @@ void BytecodeGenerator::VisitYield(Yield* expr) {
// break;
// case kReturn:
// let iteratorReturn = iterator.return;
-// if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return input;
+// if (IS_NULL_OR_UNDEFINED(iteratorReturn)) {
+// if (IS_ASYNC_GENERATOR) input = await input;
+// return input;
+// }
// output = iteratorReturn.[[Call]](iterator, «input»);
// break;
// case kThrow:
@@ -3965,6 +4037,8 @@ void BytecodeGenerator::VisitYieldStar(YieldStar* expr) {
no_return_method.Bind(builder());
builder()->LoadAccumulatorWithRegister(input);
if (iterator_type == IteratorType::kAsync) {
+ // Await input.
+ BuildAwait(expr->position());
execution_control()->AsyncReturnAccumulator();
} else {
execution_control()->ReturnAccumulator();
@@ -4345,7 +4419,6 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
case Call::SUPER_CALL:
UNREACHABLE();
- break;
}
// Evaluate all arguments to the function call and store in sequential args
@@ -4475,6 +4548,13 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
BuildVariableAssignment(var, Token::INIT, HoleCheckMode::kRequired);
}
+ Register instance = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(instance);
+
+ if (info()->literal()->requires_brand_initialization()) {
+ BuildPrivateBrandInitialization(instance);
+ }
+
// The derived constructor has the correct bit set always, so we
// don't emit code to load and call the initializer if not
// required.
@@ -4487,11 +4567,10 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// if required.
if (info()->literal()->requires_instance_members_initializer() ||
!IsDerivedConstructor(info()->literal()->kind())) {
- Register instance = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(instance);
BuildInstanceMemberInitialization(this_function, instance);
- builder()->LoadAccumulatorWithRegister(instance);
}
+
+ builder()->LoadAccumulatorWithRegister(instance);
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index adad940a70..dda8b15c80 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -6,12 +6,12 @@
#define V8_INTERPRETER_BYTECODE_GENERATOR_H_
#include "src/ast/ast.h"
-#include "src/feedback-vector.h"
-#include "src/function-kind.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/function-kind.h"
namespace v8 {
namespace internal {
@@ -291,10 +291,12 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
+ void BuildPrivateClassMemberNameAssignment(ClassLiteral::Property* property);
void BuildClassLiteral(ClassLiteral* expr, Register name);
void VisitClassLiteral(ClassLiteral* expr, Register name);
void VisitNewTargetVariable(Variable* variable);
void VisitThisFunctionVariable(Variable* variable);
+ void BuildPrivateBrandInitialization(Register receiver);
void BuildInstanceMemberInitialization(Register constructor,
Register instance);
void BuildGeneratorObjectVariableInitialization();
diff --git a/deps/v8/src/interpreter/bytecode-jump-table.h b/deps/v8/src/interpreter/bytecode-jump-table.h
index b0a36cadbb..a77ae0ea12 100644
--- a/deps/v8/src/interpreter/bytecode-jump-table.h
+++ b/deps/v8/src/interpreter/bytecode-jump-table.h
@@ -5,7 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_JUMP_TABLE_H_
#define V8_INTERPRETER_BYTECODE_JUMP_TABLE_H_
-#include "src/bit-vector.h"
+#include "src/utils/bit-vector.h"
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/interpreter/bytecode-label.cc b/deps/v8/src/interpreter/bytecode-label.cc
index df49b03bd4..704843798d 100644
--- a/deps/v8/src/interpreter/bytecode-label.cc
+++ b/deps/v8/src/interpreter/bytecode-label.cc
@@ -5,7 +5,7 @@
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-array-builder.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-node.cc b/deps/v8/src/interpreter/bytecode-node.cc
index 2bcea0a16a..aa980ad713 100644
--- a/deps/v8/src/interpreter/bytecode-node.cc
+++ b/deps/v8/src/interpreter/bytecode-node.cc
@@ -5,7 +5,7 @@
#include "src/interpreter/bytecode-node.h"
#include <iomanip>
-#include "src/source-position-table.h"
+#include "src/codegen/source-position-table.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-node.h b/deps/v8/src/interpreter/bytecode-node.h
index 48d8961632..ebf0e91f50 100644
--- a/deps/v8/src/interpreter/bytecode-node.h
+++ b/deps/v8/src/interpreter/bytecode-node.h
@@ -7,7 +7,7 @@
#include <algorithm>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/bytecodes.h"
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index c73f31592c..19acc4bcfd 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -5,8 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_OPERANDS_H_
#define V8_INTERPRETER_BYTECODE_OPERANDS_H_
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index b120741872..674a4e3ac5 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -6,7 +6,7 @@
#define V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
#include "src/base/compiler-specific.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecode-register-allocator.h"
namespace v8 {
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index ca76fcfec4..034ac0bb76 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -9,8 +9,8 @@
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/frame-constants.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/execution/frame-constants.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-source-info.cc b/deps/v8/src/interpreter/bytecode-source-info.cc
index ed05b3e2e7..bb95c1259a 100644
--- a/deps/v8/src/interpreter/bytecode-source-info.cc
+++ b/deps/v8/src/interpreter/bytecode-source-info.cc
@@ -5,7 +5,7 @@
#include "src/interpreter/bytecode-source-info.h"
#include <iomanip>
-#include "src/source-position-table.h"
+#include "src/codegen/source-position-table.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-source-info.h b/deps/v8/src/interpreter/bytecode-source-info.h
index 790a6b2aa2..40dd49460f 100644
--- a/deps/v8/src/interpreter/bytecode-source-info.h
+++ b/deps/v8/src/interpreter/bytecode-source-info.h
@@ -5,7 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_SOURCE_INFO_H_
#define V8_INTERPRETER_BYTECODE_SOURCE_INFO_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index f9713ef79b..591dfbe2b7 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -10,7 +10,7 @@
#include <string>
#include <vector>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecode-operands.h"
// This interface and it's implementation are independent of the
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 8e37f0d48a..167b0ee7e2 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -12,8 +12,8 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/functional.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -284,7 +284,6 @@ ConstantArrayBuilder::OperandSizeToSlice(OperandSize operand_size) const {
switch (operand_size) {
case OperandSize::kNone:
UNREACHABLE();
- break;
case OperandSize::kByte:
slice = idx_slice_[0];
break;
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 05c43a80bd..968a0cadd5 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -6,10 +6,10 @@
#define V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
#include "src/ast/ast-value-factory.h"
-#include "src/globals.h"
-#include "src/identity-map.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects/smi.h"
+#include "src/utils/identity-map.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 8eb44069f6..3fa1274f82 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/interpreter/control-flow-builders.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
index a0c4735f54..831d31d09f 100644
--- a/deps/v8/src/interpreter/handler-table-builder.cc
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -4,10 +4,10 @@
#include "src/interpreter/handler-table-builder.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/interpreter/bytecode-register.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 029c8dd1a6..db7ed750dd 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -5,7 +5,7 @@
#ifndef V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
#define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
-#include "src/frames.h"
+#include "src/execution/frames.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/zone/zone-containers.h"
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 7ffa69807b..0af58b674f 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -7,13 +7,13 @@
#include <limits>
#include <ostream>
-#include "src/code-factory.h"
-#include "src/frames.h"
-#include "src/interface-descriptors.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
+#include "src/execution/frames.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
-#include "src/machine-type.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -389,7 +389,6 @@ Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
break;
default:
UNREACHABLE();
- break;
}
MachineType msb_type =
result_type.IsSigned() ? MachineType::Int8() : MachineType::Uint8();
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index ddea960ace..db4523b744 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -5,13 +5,13 @@
#ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
#define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
-#include "src/allocation.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
-#include "src/globals.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/common/globals.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index f111b4b7c6..852aae4482 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -10,8 +10,7 @@
#include "src/builtins/builtins-arguments-gen.h"
#include "src/builtins/builtins-constructor-gen.h"
#include "src/builtins/builtins-iterator-gen.h"
-#include "src/code-events.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/debug/debug.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/binary-op-assembler.h"
@@ -20,12 +19,12 @@
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics-generator.h"
-#include "src/objects-inl.h"
#include "src/objects/cell.h"
#include "src/objects/js-generator.h"
#include "src/objects/module.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -371,7 +370,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
}
};
-// LdaLookupSlot <name_index>
+// LdaLookupContextSlot <name_index>
//
// Lookup the object with the name in constant pool entry |name_index|
// dynamically.
@@ -379,7 +378,7 @@ IGNITION_HANDLER(LdaLookupContextSlot, InterpreterLookupContextSlotAssembler) {
LookupContextSlot(Runtime::kLoadLookupSlot);
}
-// LdaLookupSlotInsideTypeof <name_index>
+// LdaLookupContextSlotInsideTypeof <name_index>
//
// Lookup the object with the name in constant pool entry |name_index|
// dynamically without causing a NoReferenceError.
@@ -444,7 +443,7 @@ IGNITION_HANDLER(LdaLookupGlobalSlotInsideTypeof,
LookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof);
}
-// StaLookupSlotSloppy <name_index> <flags>
+// StaLookupSlot <name_index> <flags>
//
// Store the object in accumulator to the object with the name in constant
// pool entry |name_index|.
@@ -2961,7 +2960,8 @@ IGNITION_HANDLER(IncBlockCounter, InterpreterAssembler) {
Node* coverage_array_slot = BytecodeOperandIdxSmi(0);
Node* context = GetContext();
- CallRuntime(Runtime::kIncBlockCounter, context, closure, coverage_array_slot);
+ CallBuiltin(Builtins::kIncBlockCounter, context, closure,
+ coverage_array_slot);
Dispatch();
}
@@ -3291,10 +3291,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
}
Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state, options);
- PROFILE(isolate, CodeCreateEvent(
- CodeEventListener::BYTECODE_HANDLER_TAG,
- AbstractCode::cast(*code),
- Bytecodes::ToString(bytecode, operand_scale).c_str()));
+
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
StdoutStream os;
@@ -3302,6 +3299,7 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, Bytecode bytecode,
os << std::flush;
}
#endif // ENABLE_DISASSEMBLER
+
return code;
}
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index cbb41a7af0..19d17baa52 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -4,17 +4,17 @@
#include "src/interpreter/interpreter-intrinsics-generator.h"
-#include "src/allocation.h"
#include "src/builtins/builtins.h"
-#include "src/code-factory.h"
-#include "src/frames.h"
+#include "src/codegen/code-factory.h"
+#include "src/execution/frames.h"
#include "src/heap/factory-inl.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-assembler.h"
#include "src/interpreter/interpreter-intrinsics.h"
-#include "src/objects-inl.h"
#include "src/objects/js-generator.h"
#include "src/objects/module.h"
+#include "src/utils/allocation.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -159,12 +159,6 @@ Node* IntrinsicsGenerator::IsArray(
return IsInstanceType(input, JS_ARRAY_TYPE);
}
-Node* IntrinsicsGenerator::IsTypedArray(
- const InterpreterAssembler::RegListNodePair& args, Node* context) {
- Node* input = __ LoadRegisterFromRegisterList(args, 0);
- return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
-}
-
Node* IntrinsicsGenerator::IsSmi(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
Node* input = __ LoadRegisterFromRegisterList(args, 0);
@@ -194,6 +188,13 @@ Node* IntrinsicsGenerator::IntrinsicAsBuiltinCall(
return IntrinsicAsStubCall(args, context, callable);
}
+Node* IntrinsicsGenerator::CopyDataProperties(
+ const InterpreterAssembler::RegListNodePair& args, Node* context) {
+ return IntrinsicAsStubCall(
+ args, context,
+ Builtins::CallableFor(isolate(), Builtins::kCopyDataProperties));
+}
+
Node* IntrinsicsGenerator::CreateIterResultObject(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
@@ -207,7 +208,7 @@ Node* IntrinsicsGenerator::HasProperty(
args, context, Builtins::CallableFor(isolate(), Builtins::kHasProperty));
}
-Node* IntrinsicsGenerator::ToString(
+Node* IntrinsicsGenerator::ToStringRT(
const InterpreterAssembler::RegListNodePair& args, Node* context) {
return IntrinsicAsStubCall(
args, context, Builtins::CallableFor(isolate(), Builtins::kToString));
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 08fd428910..6cb2483533 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -29,14 +29,14 @@ namespace interpreter {
V(GeneratorClose, generator_close, 1) \
V(GetImportMetaObject, get_import_meta_object, 0) \
V(Call, call, -1) \
+ V(CopyDataProperties, copy_data_properties, 2) \
V(CreateIterResultObject, create_iter_result_object, 2) \
V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
V(HasProperty, has_property, 2) \
V(IsArray, is_array, 1) \
V(IsJSReceiver, is_js_receiver, 1) \
V(IsSmi, is_smi, 1) \
- V(IsTypedArray, is_typed_array, 1) \
- V(ToString, to_string, 1) \
+ V(ToStringRT, to_string, 1) \
V(ToLength, to_length, 1) \
V(ToObject, to_object, 1)
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 4298003ce2..9e06d95fde 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -9,20 +9,20 @@
#include "builtins-generated/bytecodes-builtins-list.h"
#include "src/ast/prettyprinter.h"
-#include "src/bootstrapper.h"
-#include "src/compiler.h"
-#include "src/counters-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/init/bootstrapper.h"
+#include "src/init/setup-isolate.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots.h"
-#include "src/ostreams.h"
+#include "src/objects/visitors.h"
#include "src/parsing/parse-info.h"
-#include "src/setup-isolate.h"
#include "src/snapshot/snapshot.h"
-#include "src/unoptimized-compilation-info.h"
-#include "src/visitors.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -86,9 +86,9 @@ Code Interpreter::GetBytecodeHandler(Bytecode bytecode,
void Interpreter::SetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale, Code handler) {
- DCHECK(handler->kind() == Code::BYTECODE_HANDLER);
+ DCHECK(handler.kind() == Code::BYTECODE_HANDLER);
size_t index = GetDispatchTableIndex(bytecode, operand_scale);
- dispatch_table_[index] = handler->InstructionStart();
+ dispatch_table_[index] = handler.InstructionStart();
}
// static
@@ -130,7 +130,7 @@ void Interpreter::IterateDispatchTable(RootVisitor* v) {
Code old_code = code;
v->VisitRootPointer(Root::kDispatchTable, nullptr, FullObjectSlot(&code));
if (code != old_code) {
- dispatch_table_[i] = code->entry();
+ dispatch_table_[i] = code.entry();
}
}
}
@@ -291,10 +291,9 @@ bool Interpreter::IsDispatchTableInitialized() const {
const char* Interpreter::LookupNameOfBytecodeHandler(const Code code) {
#ifdef ENABLE_DISASSEMBLER
-#define RETURN_NAME(Name, ...) \
- if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
- code->entry()) { \
- return #Name; \
+#define RETURN_NAME(Name, ...) \
+ if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == code.entry()) { \
+ return #Name; \
}
BYTECODE_LIST(RETURN_NAME)
#undef RETURN_NAME
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
deleted file mode 100644
index a8a272ca4f..0000000000
--- a/deps/v8/src/json-parser.cc
+++ /dev/null
@@ -1,959 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/json-parser.h"
-
-#include "src/char-predicates-inl.h"
-#include "src/conversions.h"
-#include "src/debug/debug.h"
-#include "src/field-type.h"
-#include "src/hash-seed-inl.h"
-#include "src/heap/heap-inl.h" // For string_table().
-#include "src/message-template.h"
-#include "src/objects-inl.h"
-#include "src/objects/hash-table-inl.h"
-#include "src/property-descriptor.h"
-#include "src/string-hasher.h"
-#include "src/transitions.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// A vector-like data structure that uses a larger vector for allocation, and
-// provides limited utility access. The original vector must not be used for the
-// duration, and it may even be reallocated. This allows vector storage to be
-// reused for the properties of sibling objects.
-template <typename Container>
-class VectorSegment {
- public:
- using value_type = typename Container::value_type;
-
- explicit VectorSegment(Container* container)
- : container_(*container), begin_(container->size()) {}
- ~VectorSegment() { container_.resize(begin_); }
-
- Vector<const value_type> GetVector() const {
- return VectorOf(container_) + begin_;
- }
-
- template <typename T>
- void push_back(T&& value) {
- container_.push_back(std::forward<T>(value));
- }
-
- private:
- Container& container_;
- const typename Container::size_type begin_;
-};
-
-} // namespace
-
-MaybeHandle<Object> JsonParseInternalizer::Internalize(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> reviver) {
- DCHECK(reviver->IsCallable());
- JsonParseInternalizer internalizer(isolate,
- Handle<JSReceiver>::cast(reviver));
- Handle<JSObject> holder =
- isolate->factory()->NewJSObject(isolate->object_function());
- Handle<String> name = isolate->factory()->empty_string();
- JSObject::AddProperty(isolate, holder, name, object, NONE);
- return internalizer.InternalizeJsonProperty(holder, name);
-}
-
-MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
- Handle<JSReceiver> holder, Handle<String> name) {
- HandleScope outer_scope(isolate_);
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate_, value, Object::GetPropertyOrElement(isolate_, holder, name),
- Object);
- if (value->IsJSReceiver()) {
- Handle<JSReceiver> object = Handle<JSReceiver>::cast(value);
- Maybe<bool> is_array = Object::IsArray(object);
- if (is_array.IsNothing()) return MaybeHandle<Object>();
- if (is_array.FromJust()) {
- Handle<Object> length_object;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate_, length_object,
- Object::GetLengthFromArrayLike(isolate_, object), Object);
- double length = length_object->Number();
- for (double i = 0; i < length; i++) {
- HandleScope inner_scope(isolate_);
- Handle<Object> index = isolate_->factory()->NewNumber(i);
- Handle<String> name = isolate_->factory()->NumberToString(index);
- if (!RecurseAndApply(object, name)) return MaybeHandle<Object>();
- }
- } else {
- Handle<FixedArray> contents;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate_, contents,
- KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
- ENUMERABLE_STRINGS,
- GetKeysConversion::kConvertToString),
- Object);
- for (int i = 0; i < contents->length(); i++) {
- HandleScope inner_scope(isolate_);
- Handle<String> name(String::cast(contents->get(i)), isolate_);
- if (!RecurseAndApply(object, name)) return MaybeHandle<Object>();
- }
- }
- }
- Handle<Object> argv[] = {name, value};
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate_, result, Execution::Call(isolate_, reviver_, holder, 2, argv),
- Object);
- return outer_scope.CloseAndEscape(result);
-}
-
-bool JsonParseInternalizer::RecurseAndApply(Handle<JSReceiver> holder,
- Handle<String> name) {
- STACK_CHECK(isolate_, false);
-
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, result, InternalizeJsonProperty(holder, name), false);
- Maybe<bool> change_result = Nothing<bool>();
- if (result->IsUndefined(isolate_)) {
- change_result = JSReceiver::DeletePropertyOrElement(holder, name,
- LanguageMode::kSloppy);
- } else {
- PropertyDescriptor desc;
- desc.set_value(result);
- desc.set_configurable(true);
- desc.set_enumerable(true);
- desc.set_writable(true);
- change_result = JSReceiver::DefineOwnProperty(isolate_, holder, name, &desc,
- Just(kDontThrow));
- }
- MAYBE_RETURN(change_result, false);
- return true;
-}
-
-template <bool seq_one_byte>
-JsonParser<seq_one_byte>::JsonParser(Isolate* isolate, Handle<String> source)
- : source_(source),
- source_length_(source->length()),
- isolate_(isolate),
- zone_(isolate_->allocator(), ZONE_NAME),
- object_constructor_(isolate_->native_context()->object_function(),
- isolate_),
- position_(-1),
- properties_(&zone_) {
- source_ = String::Flatten(isolate, source_);
- allocation_ = (source_length_ >= kPretenureTreshold) ? AllocationType::kOld
- : AllocationType::kYoung;
-
- // Optimized fast case where we only have Latin1 characters.
- if (seq_one_byte) {
- seq_source_ = Handle<SeqOneByteString>::cast(source_);
- }
-}
-
-template <bool seq_one_byte>
-MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
- // Advance to the first character (possibly EOS)
- AdvanceSkipWhitespace();
- Handle<Object> result = ParseJsonValue();
- if (result.is_null() || c0_ != kEndOfString) {
- // Some exception (for example stack overflow) is already pending.
- if (isolate_->has_pending_exception()) return Handle<Object>::null();
-
- // Parse failed. Current character is the unexpected token.
- Factory* factory = this->factory();
- MessageTemplate message;
- Handle<Object> arg1 = Handle<Smi>(Smi::FromInt(position_), isolate());
- Handle<Object> arg2;
-
- switch (c0_) {
- case kEndOfString:
- message = MessageTemplate::kJsonParseUnexpectedEOS;
- break;
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9':
- message = MessageTemplate::kJsonParseUnexpectedTokenNumber;
- break;
- case '"':
- message = MessageTemplate::kJsonParseUnexpectedTokenString;
- break;
- default:
- message = MessageTemplate::kJsonParseUnexpectedToken;
- arg2 = arg1;
- arg1 = factory->LookupSingleCharacterStringFromCode(c0_);
- break;
- }
-
- Handle<Script> script(factory->NewScript(source_));
- if (isolate()->NeedsSourcePositionsForProfiling()) {
- Script::InitLineEnds(script);
- }
- // We should sent compile error event because we compile JSON object in
- // separated source file.
- isolate()->debug()->OnCompileError(script);
- MessageLocation location(script, position_, position_ + 1);
- Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
- return isolate()->template Throw<Object>(error, &location);
- }
- return result;
-}
-
-MaybeHandle<Object> InternalizeJsonProperty(Handle<JSObject> holder,
- Handle<String> key);
-
-template <bool seq_one_byte>
-void JsonParser<seq_one_byte>::Advance() {
- position_++;
- if (position_ >= source_length_) {
- c0_ = kEndOfString;
- } else if (seq_one_byte) {
- c0_ = seq_source_->SeqOneByteStringGet(position_);
- } else {
- c0_ = source_->Get(position_);
- }
-}
-
-template <bool seq_one_byte>
-void JsonParser<seq_one_byte>::AdvanceSkipWhitespace() {
- do {
- Advance();
- } while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r');
-}
-
-template <bool seq_one_byte>
-void JsonParser<seq_one_byte>::SkipWhitespace() {
- while (c0_ == ' ' || c0_ == '\t' || c0_ == '\n' || c0_ == '\r') {
- Advance();
- }
-}
-
-template <bool seq_one_byte>
-uc32 JsonParser<seq_one_byte>::AdvanceGetChar() {
- Advance();
- return c0_;
-}
-
-template <bool seq_one_byte>
-bool JsonParser<seq_one_byte>::MatchSkipWhiteSpace(uc32 c) {
- if (c0_ == c) {
- AdvanceSkipWhitespace();
- return true;
- }
- return false;
-}
-
-template <bool seq_one_byte>
-bool JsonParser<seq_one_byte>::ParseJsonString(Handle<String> expected) {
- int length = expected->length();
- if (source_->length() - position_ - 1 > length) {
- DisallowHeapAllocation no_gc;
- String::FlatContent content = expected->GetFlatContent(no_gc);
- if (content.IsOneByte()) {
- DCHECK_EQ('"', c0_);
- const uint8_t* input_chars = seq_source_->GetChars(no_gc) + position_ + 1;
- const uint8_t* expected_chars = content.ToOneByteVector().start();
- for (int i = 0; i < length; i++) {
- uint8_t c0 = input_chars[i];
- if (c0 != expected_chars[i] || c0 == '"' || c0 < 0x20 || c0 == '\\') {
- return false;
- }
- }
- if (input_chars[length] == '"') {
- position_ = position_ + length + 1;
- AdvanceSkipWhitespace();
- return true;
- }
- }
- }
- return false;
-}
-
-// Parse any JSON value.
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonValue() {
- StackLimitCheck stack_check(isolate_);
- if (stack_check.HasOverflowed()) {
- isolate_->StackOverflow();
- return Handle<Object>::null();
- }
-
- if (stack_check.InterruptRequested() &&
- isolate_->stack_guard()->HandleInterrupts()->IsException(isolate_)) {
- return Handle<Object>::null();
- }
-
- if (c0_ == '"') return ParseJsonString();
- if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
- if (c0_ == '{') return ParseJsonObject();
- if (c0_ == '[') return ParseJsonArray();
- if (c0_ == 'f') {
- if (AdvanceGetChar() == 'a' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 's' && AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return factory()->false_value();
- }
- return ReportUnexpectedCharacter();
- }
- if (c0_ == 't') {
- if (AdvanceGetChar() == 'r' && AdvanceGetChar() == 'u' &&
- AdvanceGetChar() == 'e') {
- AdvanceSkipWhitespace();
- return factory()->true_value();
- }
- return ReportUnexpectedCharacter();
- }
- if (c0_ == 'n') {
- if (AdvanceGetChar() == 'u' && AdvanceGetChar() == 'l' &&
- AdvanceGetChar() == 'l') {
- AdvanceSkipWhitespace();
- return factory()->null_value();
- }
- return ReportUnexpectedCharacter();
- }
- return ReportUnexpectedCharacter();
-}
-
-template <bool seq_one_byte>
-ParseElementResult JsonParser<seq_one_byte>::ParseElement(
- Handle<JSObject> json_object) {
- uint32_t index = 0;
- // Maybe an array index, try to parse it.
- if (c0_ == '0') {
- // With a leading zero, the string has to be "0" only to be an index.
- Advance();
- } else {
- do {
- int d = c0_ - '0';
- if (index > 429496729U - ((d + 3) >> 3)) break;
- index = (index * 10) + d;
- Advance();
- } while (IsDecimalDigit(c0_));
- }
-
- if (c0_ == '"') {
- // Successfully parsed index, parse and store element.
- AdvanceSkipWhitespace();
-
- if (c0_ == ':') {
- AdvanceSkipWhitespace();
- Handle<Object> value = ParseJsonValue();
- if (!value.is_null()) {
- JSObject::SetOwnElementIgnoreAttributes(json_object, index, value, NONE)
- .Assert();
- return kElementFound;
- } else {
- return kNullHandle;
- }
- }
- }
- return kElementNotFound;
-}
-
-// Parse a JSON object. Position must be right at '{'.
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
- HandleScope scope(isolate());
- Handle<JSObject> json_object =
- factory()->NewJSObject(object_constructor(), allocation_);
- Handle<Map> map(json_object->map(), isolate());
- int descriptor = 0;
- VectorSegment<ZoneVector<Handle<Object>>> properties(&properties_);
- DCHECK_EQ(c0_, '{');
-
- bool transitioning = true;
-
- AdvanceSkipWhitespace();
- if (c0_ != '}') {
- do {
- if (c0_ != '"') return ReportUnexpectedCharacter();
-
- int start_position = position_;
- Advance();
-
- if (IsDecimalDigit(c0_)) {
- ParseElementResult element_result = ParseElement(json_object);
- if (element_result == kNullHandle) return Handle<Object>::null();
- if (element_result == kElementFound) continue;
- }
- // Not an index, fallback to the slow path.
-
- position_ = start_position;
-#ifdef DEBUG
- c0_ = '"';
-#endif
-
- Handle<String> key;
- Handle<Object> value;
-
- // Try to follow existing transitions as long as possible. Once we stop
- // transitioning, no transition can be found anymore.
- DCHECK(transitioning);
- // First check whether there is a single expected transition. If so, try
- // to parse it first.
- bool follow_expected = false;
- Handle<Map> target;
- if (seq_one_byte) {
- DisallowHeapAllocation no_gc;
- TransitionsAccessor transitions(isolate(), *map, &no_gc);
- key = transitions.ExpectedTransitionKey();
- follow_expected = !key.is_null() && ParseJsonString(key);
- // If the expected transition hits, follow it.
- if (follow_expected) {
- target = transitions.ExpectedTransitionTarget();
- }
- }
- if (!follow_expected) {
- // If the expected transition failed, parse an internalized string and
- // try to find a matching transition.
- key = ParseJsonString();
- if (key.is_null()) return ReportUnexpectedCharacter();
-
- // If a transition was found, follow it and continue.
- transitioning = TransitionsAccessor(isolate(), map)
- .FindTransitionToField(key)
- .ToHandle(&target);
- }
- if (c0_ != ':') return ReportUnexpectedCharacter();
-
- AdvanceSkipWhitespace();
- value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
-
- if (transitioning) {
- PropertyDetails details =
- target->instance_descriptors()->GetDetails(descriptor);
- Representation expected_representation = details.representation();
-
- if (value->FitsRepresentation(expected_representation)) {
- if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors()
- ->GetFieldType(descriptor)
- ->NowContains(value)) {
- Handle<FieldType> value_type(
- value->OptimalType(isolate(), expected_representation));
- Map::GeneralizeField(isolate(), target, descriptor,
- details.constness(), expected_representation,
- value_type);
- }
- DCHECK(target->instance_descriptors()
- ->GetFieldType(descriptor)
- ->NowContains(value));
- properties.push_back(value);
- map = target;
- descriptor++;
- continue;
- } else {
- transitioning = false;
- }
- }
-
- DCHECK(!transitioning);
-
- // Commit the intermediate state to the object and stop transitioning.
- CommitStateToJsonObject(json_object, map, properties.GetVector());
-
- JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key, value)
- .Check();
- } while (transitioning && MatchSkipWhiteSpace(','));
-
- // If we transitioned until the very end, transition the map now.
- if (transitioning) {
- CommitStateToJsonObject(json_object, map, properties.GetVector());
- } else {
- while (MatchSkipWhiteSpace(',')) {
- HandleScope local_scope(isolate());
- if (c0_ != '"') return ReportUnexpectedCharacter();
-
- int start_position = position_;
- Advance();
-
- if (IsDecimalDigit(c0_)) {
- ParseElementResult element_result = ParseElement(json_object);
- if (element_result == kNullHandle) return Handle<Object>::null();
- if (element_result == kElementFound) continue;
- }
- // Not an index, fallback to the slow path.
-
- position_ = start_position;
-#ifdef DEBUG
- c0_ = '"';
-#endif
-
- Handle<String> key;
- Handle<Object> value;
-
- key = ParseJsonString();
- if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
-
- AdvanceSkipWhitespace();
- value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
-
- JSObject::DefinePropertyOrElementIgnoreAttributes(json_object, key,
- value)
- .Check();
- }
- }
-
- if (c0_ != '}') {
- return ReportUnexpectedCharacter();
- }
- }
- AdvanceSkipWhitespace();
- return scope.CloseAndEscape(json_object);
-}
-
-template <bool seq_one_byte>
-void JsonParser<seq_one_byte>::CommitStateToJsonObject(
- Handle<JSObject> json_object, Handle<Map> map,
- Vector<const Handle<Object>> properties) {
- JSObject::AllocateStorageForMap(json_object, map);
- DCHECK(!json_object->map()->is_dictionary_map());
-
- DisallowHeapAllocation no_gc;
- DescriptorArray descriptors = json_object->map()->instance_descriptors();
- for (int i = 0; i < properties.length(); i++) {
- Handle<Object> value = properties[i];
- // Initializing store.
- json_object->WriteToField(i, descriptors->GetDetails(i), *value);
- }
-}
-
-class ElementKindLattice {
- private:
- enum {
- SMI_ELEMENTS,
- NUMBER_ELEMENTS,
- OBJECT_ELEMENTS,
- };
-
- public:
- ElementKindLattice() : value_(SMI_ELEMENTS) {}
-
- void Update(Handle<Object> o) {
- if (o->IsSmi()) {
- return;
- } else if (o->IsHeapNumber()) {
- if (value_ < NUMBER_ELEMENTS) value_ = NUMBER_ELEMENTS;
- } else {
- DCHECK(!o->IsNumber());
- value_ = OBJECT_ELEMENTS;
- }
- }
-
- ElementsKind GetElementsKind() const {
- switch (value_) {
- case SMI_ELEMENTS:
- return PACKED_SMI_ELEMENTS;
- case NUMBER_ELEMENTS:
- return PACKED_DOUBLE_ELEMENTS;
- case OBJECT_ELEMENTS:
- return PACKED_ELEMENTS;
- default:
- UNREACHABLE();
- return PACKED_ELEMENTS;
- }
- }
-
- private:
- int value_;
-};
-
-// Parse a JSON array. Position must be right at '['.
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
- HandleScope scope(isolate());
- ZoneVector<Handle<Object>> elements(zone());
- DCHECK_EQ(c0_, '[');
-
- ElementKindLattice lattice;
-
- AdvanceSkipWhitespace();
- if (c0_ != ']') {
- do {
- Handle<Object> element = ParseJsonValue();
- if (element.is_null()) return ReportUnexpectedCharacter();
- elements.push_back(element);
- lattice.Update(element);
- } while (MatchSkipWhiteSpace(','));
- if (c0_ != ']') {
- return ReportUnexpectedCharacter();
- }
- }
- AdvanceSkipWhitespace();
-
- // Allocate a fixed array with all the elements.
-
- Handle<Object> json_array;
- const ElementsKind kind = lattice.GetElementsKind();
- int elements_size = static_cast<int>(elements.size());
-
- switch (kind) {
- case PACKED_ELEMENTS:
- case PACKED_SMI_ELEMENTS: {
- Handle<FixedArray> elems =
- factory()->NewFixedArray(elements_size, allocation_);
- for (int i = 0; i < elements_size; i++) elems->set(i, *elements[i]);
- json_array = factory()->NewJSArrayWithElements(elems, kind, allocation_);
- break;
- }
- case PACKED_DOUBLE_ELEMENTS: {
- Handle<FixedDoubleArray> elems = Handle<FixedDoubleArray>::cast(
- factory()->NewFixedDoubleArray(elements_size, allocation_));
- for (int i = 0; i < elements_size; i++) {
- elems->set(i, elements[i]->Number());
- }
- json_array = factory()->NewJSArrayWithElements(elems, kind, allocation_);
- break;
- }
- default:
- UNREACHABLE();
- }
-
- return scope.CloseAndEscape(json_array);
-}
-
-template <bool seq_one_byte>
-Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
- bool negative = false;
- int beg_pos = position_;
- if (c0_ == '-') {
- Advance();
- negative = true;
- }
- if (c0_ == '0') {
- Advance();
- // Prefix zero is only allowed if it's the only digit before
- // a decimal point or exponent.
- if (IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
- } else {
- uint32_t i = 0;
- int digits = 0;
- if (c0_ < '1' || c0_ > '9') return ReportUnexpectedCharacter();
- do {
- // This can overflow. That's OK, the "digits < 10" check below
- // will discard overflown results.
- i = i * 10 + c0_ - '0';
- digits++;
- Advance();
- } while (IsDecimalDigit(c0_));
- if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
- SkipWhitespace();
- return Handle<Smi>(Smi::FromInt((negative ? -static_cast<int>(i) : i)),
- isolate());
- }
- }
- if (c0_ == '.') {
- Advance();
- if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
- do {
- Advance();
- } while (IsDecimalDigit(c0_));
- }
- if (AsciiAlphaToLower(c0_) == 'e') {
- Advance();
- if (c0_ == '-' || c0_ == '+') Advance();
- if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
- do {
- Advance();
- } while (IsDecimalDigit(c0_));
- }
- int length = position_ - beg_pos;
- double number;
- if (seq_one_byte) {
- DisallowHeapAllocation no_gc;
- Vector<const uint8_t> chars(seq_source_->GetChars(no_gc) + beg_pos, length);
- number = StringToDouble(chars,
- NO_FLAGS, // Hex, octal or trailing junk.
- std::numeric_limits<double>::quiet_NaN());
- } else {
- Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
- String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
- Vector<const uint8_t> result =
- Vector<const uint8_t>(buffer.start(), length);
- number = StringToDouble(result,
- NO_FLAGS, // Hex, octal or trailing junk.
- 0.0);
- buffer.Dispose();
- }
- SkipWhitespace();
- return factory()->NewNumber(number, allocation_);
-}
-
-template <typename StringType>
-inline void SeqStringSet(Handle<StringType> seq_str, int i, uc32 c);
-
-template <>
-inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
- seq_str->SeqTwoByteStringSet(i, c);
-}
-
-template <>
-inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
- seq_str->SeqOneByteStringSet(i, c);
-}
-
-template <typename StringType>
-inline Handle<StringType> NewRawString(Factory* factory, int length,
- AllocationType allocation);
-
-template <>
-inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length,
- AllocationType allocation) {
- return factory->NewRawTwoByteString(length, allocation).ToHandleChecked();
-}
-
-template <>
-inline Handle<SeqOneByteString> NewRawString(Factory* factory, int length,
- AllocationType allocation) {
- return factory->NewRawOneByteString(length, allocation).ToHandleChecked();
-}
-
-// Scans the rest of a JSON string starting from position_ and writes
-// prefix[start..end] along with the scanned characters into a
-// sequential string of type StringType.
-template <bool seq_one_byte>
-template <typename StringType, typename SinkChar>
-Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
- Handle<String> prefix, int start, int end) {
- int count = end - start;
- int max_length = count + source_length_ - position_;
- int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
- Handle<StringType> seq_string =
- NewRawString<StringType>(factory(), length, allocation_);
-
- {
- DisallowHeapAllocation no_gc;
- // Copy prefix into seq_str.
- SinkChar* dest = seq_string->GetChars(no_gc);
- String::WriteToFlat(*prefix, dest, start, end);
- }
-
- while (c0_ != '"') {
- // Check for control character (0x00-0x1F) or unterminated string (<0).
- if (c0_ < 0x20) return Handle<String>::null();
- if (count >= length) {
- // We need to create a longer sequential string for the result.
- return SlowScanJsonString<StringType, SinkChar>(seq_string, 0, count);
- }
- if (c0_ != '\\') {
- // If the sink can contain UC16 characters, or source_ contains only
- // Latin1 characters, there's no need to test whether we can store the
- // character. Otherwise check whether the UC16 source character can fit
- // in the Latin1 sink.
- if (sizeof(SinkChar) == kUC16Size || seq_one_byte ||
- c0_ <= String::kMaxOneByteCharCode) {
- SeqStringSet(seq_string, count++, c0_);
- Advance();
- } else {
- // StringType is SeqOneByteString and we just read a non-Latin1 char.
- return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0, count);
- }
- } else {
- Advance(); // Advance past the \.
- switch (c0_) {
- case '"':
- case '\\':
- case '/':
- SeqStringSet(seq_string, count++, c0_);
- break;
- case 'b':
- SeqStringSet(seq_string, count++, '\x08');
- break;
- case 'f':
- SeqStringSet(seq_string, count++, '\x0C');
- break;
- case 'n':
- SeqStringSet(seq_string, count++, '\x0A');
- break;
- case 'r':
- SeqStringSet(seq_string, count++, '\x0D');
- break;
- case 't':
- SeqStringSet(seq_string, count++, '\x09');
- break;
- case 'u': {
- uc32 value = 0;
- for (int i = 0; i < 4; i++) {
- Advance();
- int digit = HexValue(c0_);
- if (digit < 0) {
- return Handle<String>::null();
- }
- value = value * 16 + digit;
- }
- if (sizeof(SinkChar) == kUC16Size ||
- value <= String::kMaxOneByteCharCode) {
- SeqStringSet(seq_string, count++, value);
- break;
- } else {
- // StringType is SeqOneByteString and we just read a non-Latin1
- // char.
- position_ -= 6; // Rewind position_ to \ in \uxxxx.
- Advance();
- return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0,
- count);
- }
- }
- default:
- return Handle<String>::null();
- }
- Advance();
- }
- }
-
- DCHECK_EQ('"', c0_);
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
-
- // Shrink seq_string length to count and return.
- return SeqString::Truncate(seq_string, count);
-}
-
-template <bool seq_one_byte>
-Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
- DCHECK_EQ('"', c0_);
- Advance();
- if (c0_ == '"') {
- AdvanceSkipWhitespace();
- return factory()->empty_string();
- }
-
- if (seq_one_byte) {
- // Fast path for existing internalized strings. If the the string being
- // parsed is not a known internalized string, contains backslashes or
- // unexpectedly reaches the end of string, return with an empty handle.
-
- // We intentionally use local variables instead of fields, compute hash
- // while we are iterating a string and manually inline StringTable lookup
- // here.
-
- int position = position_;
- uc32 c0 = c0_;
- uint32_t running_hash = static_cast<uint32_t>(HashSeed(isolate()));
- uint32_t index = 0;
- bool is_array_index = true;
-
- do {
- if (c0 == '\\') {
- c0_ = c0;
- int beg_pos = position_;
- position_ = position;
- return SlowScanJsonString<SeqOneByteString, uint8_t>(source_, beg_pos,
- position_);
- }
- if (c0 < 0x20) {
- c0_ = c0;
- position_ = position;
- return Handle<String>::null();
- }
- if (is_array_index) {
- // With leading zero, the string has to be "0" to be a valid index.
- if (!IsDecimalDigit(c0) || (position > position_ && index == 0)) {
- is_array_index = false;
- } else {
- int d = c0 - '0';
- is_array_index = index <= 429496729U - ((d + 3) >> 3);
- index = (index * 10) + d;
- }
- }
- running_hash = StringHasher::AddCharacterCore(running_hash,
- static_cast<uint16_t>(c0));
- position++;
- if (position >= source_length_) {
- c0_ = kEndOfString;
- position_ = position;
- return Handle<String>::null();
- }
- c0 = seq_source_->SeqOneByteStringGet(position);
- } while (c0 != '"');
- int length = position - position_;
- uint32_t hash;
- if (is_array_index) {
- hash =
- StringHasher::MakeArrayIndexHash(index, length) >> String::kHashShift;
- } else if (length <= String::kMaxHashCalcLength) {
- hash = StringHasher::GetHashCore(running_hash);
- } else {
- hash = static_cast<uint32_t>(length);
- }
- StringTable string_table = isolate()->heap()->string_table();
- uint32_t capacity = string_table->Capacity();
- uint32_t entry = StringTable::FirstProbe(hash, capacity);
- uint32_t count = 1;
- Handle<String> result;
- while (true) {
- Object element = string_table->KeyAt(entry);
- if (element->IsUndefined(isolate())) {
- // Lookup failure.
- result =
- factory()->InternalizeOneByteString(seq_source_, position_, length);
- break;
- }
- if (!element->IsTheHole(isolate())) {
- DisallowHeapAllocation no_gc;
- Vector<const uint8_t> string_vector(
- seq_source_->GetChars(no_gc) + position_, length);
- if (String::cast(element)->IsOneByteEqualTo(string_vector)) {
- result = Handle<String>(String::cast(element), isolate());
- DCHECK_EQ(result->Hash(),
- (hash << String::kHashShift) >> String::kHashShift);
- break;
- }
- }
- entry = StringTable::NextProbe(entry, count++, capacity);
- }
- position_ = position;
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
- return result;
- }
-
- int beg_pos = position_;
- // Fast case for Latin1 only without escape characters.
- do {
- // Check for control character (0x00-0x1F) or unterminated string (<0).
- if (c0_ < 0x20) return Handle<String>::null();
- if (c0_ != '\\') {
- if (seq_one_byte || c0_ <= String::kMaxOneByteCharCode) {
- Advance();
- } else {
- return SlowScanJsonString<SeqTwoByteString, uc16>(source_, beg_pos,
- position_);
- }
- } else {
- return SlowScanJsonString<SeqOneByteString, uint8_t>(source_, beg_pos,
- position_);
- }
- } while (c0_ != '"');
- int length = position_ - beg_pos;
- Handle<String> result =
- factory()->NewRawOneByteString(length, allocation_).ToHandleChecked();
- DisallowHeapAllocation no_gc;
- uint8_t* dest = SeqOneByteString::cast(*result)->GetChars(no_gc);
- String::WriteToFlat(*source_, dest, beg_pos, position_);
-
- DCHECK_EQ('"', c0_);
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
- return result;
-}
-
-// Explicit instantiation.
-template class JsonParser<true>;
-template class JsonParser<false>;
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
deleted file mode 100644
index 7e6df8b59b..0000000000
--- a/deps/v8/src/json-parser.h
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_JSON_PARSER_H_
-#define V8_JSON_PARSER_H_
-
-#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-enum ParseElementResult { kElementFound, kElementNotFound, kNullHandle };
-
-class JsonParseInternalizer {
- public:
- static MaybeHandle<Object> Internalize(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> reviver);
-
- private:
- JsonParseInternalizer(Isolate* isolate, Handle<JSReceiver> reviver)
- : isolate_(isolate), reviver_(reviver) {}
-
- MaybeHandle<Object> InternalizeJsonProperty(Handle<JSReceiver> holder,
- Handle<String> key);
-
- bool RecurseAndApply(Handle<JSReceiver> holder, Handle<String> name);
-
- Isolate* isolate_;
- Handle<JSReceiver> reviver_;
-};
-
-// A simple json parser.
-template <bool seq_one_byte>
-class JsonParser {
- public:
- V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Parse(
- Isolate* isolate, Handle<String> source, Handle<Object> reviver) {
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- JsonParser(isolate, source).ParseJson(), Object);
- if (reviver->IsCallable()) {
- return JsonParseInternalizer::Internalize(isolate, result, reviver);
- }
- return result;
- }
-
- static const int kEndOfString = -1;
-
- private:
- JsonParser(Isolate* isolate, Handle<String> source);
-
- // Parse a string containing a single JSON value.
- MaybeHandle<Object> ParseJson();
-
- V8_INLINE void Advance();
-
- // The JSON lexical grammar is specified in the ECMAScript 5 standard,
- // section 15.12.1.1. The only allowed whitespace characters between tokens
- // are tab, carriage-return, newline and space.
-
- V8_INLINE void AdvanceSkipWhitespace();
- V8_INLINE void SkipWhitespace();
- V8_INLINE uc32 AdvanceGetChar();
-
- // Checks that current charater is c.
- // If so, then consume c and skip whitespace.
- V8_INLINE bool MatchSkipWhiteSpace(uc32 c);
-
- // A JSON string (production JSONString) is subset of valid JavaScript string
- // literals. The string must only be double-quoted (not single-quoted), and
- // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
- // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
- bool ParseJsonString(Handle<String> expected);
-
- Handle<String> ParseJsonString() {
- Handle<String> result = ScanJsonString();
- if (result.is_null()) return result;
- return factory()->InternalizeString(result);
- }
-
- Handle<String> ScanJsonString();
- // Creates a new string and copies prefix[start..end] into the beginning
- // of it. Then scans the rest of the string, adding characters after the
- // prefix. Called by ScanJsonString when reaching a '\' or non-Latin1 char.
- template <typename StringType, typename SinkChar>
- Handle<String> SlowScanJsonString(Handle<String> prefix, int start, int end);
-
- // A JSON number (production JSONNumber) is a subset of the valid JavaScript
- // decimal number literals.
- // It includes an optional minus sign, must have at least one
- // digit before and after a decimal point, may not have prefixed zeros (unless
- // the integer part is zero), and may include an exponent part (e.g., "e-10").
- // Hexadecimal and octal numbers are not allowed.
- Handle<Object> ParseJsonNumber();
-
- // Parse a single JSON value from input (grammar production JSONValue).
- // A JSON value is either a (double-quoted) string literal, a number literal,
- // one of "true", "false", or "null", or an object or array literal.
- Handle<Object> ParseJsonValue();
-
- // Parse a JSON object literal (grammar production JSONObject).
- // An object literal is a squiggly-braced and comma separated sequence
- // (possibly empty) of key/value pairs, where the key is a JSON string
- // literal, the value is a JSON value, and the two are separated by a colon.
- // A JSON array doesn't allow numbers and identifiers as keys, like a
- // JavaScript array.
- Handle<Object> ParseJsonObject();
-
- // Helper for ParseJsonObject. Parses the form "123": obj, which is recorded
- // as an element, not a property.
- ParseElementResult ParseElement(Handle<JSObject> json_object);
-
- // Parses a JSON array literal (grammar production JSONArray). An array
- // literal is a square-bracketed and comma separated sequence (possibly empty)
- // of JSON values.
- // A JSON array doesn't allow leaving out values from the sequence, nor does
- // it allow a terminal comma, like a JavaScript array does.
- Handle<Object> ParseJsonArray();
-
-
- // Mark that a parsing error has happened at the current token, and
- // return a null handle. Primarily for readability.
- inline Handle<Object> ReportUnexpectedCharacter() {
- return Handle<Object>::null();
- }
-
- inline Isolate* isolate() { return isolate_; }
- inline Factory* factory() { return isolate_->factory(); }
- inline Handle<JSFunction> object_constructor() { return object_constructor_; }
-
- static const int kInitialSpecialStringLength = 32;
- static const int kPretenureTreshold = 100 * 1024;
-
- private:
- Zone* zone() { return &zone_; }
-
- void CommitStateToJsonObject(Handle<JSObject> json_object, Handle<Map> map,
- Vector<const Handle<Object>> properties);
-
- Handle<String> source_;
- int source_length_;
- Handle<SeqOneByteString> seq_source_;
-
- AllocationType allocation_;
- Isolate* isolate_;
- Zone zone_;
- Handle<JSFunction> object_constructor_;
- uc32 c0_;
- int position_;
-
- // Property handles are stored here inside ParseJsonObject.
- ZoneVector<Handle<Object>> properties_;
-};
-
-// Explicit instantiation declarations.
-extern template class JsonParser<true>;
-extern template class JsonParser<false>;
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_JSON_PARSER_H_
diff --git a/deps/v8/src/json/OWNERS b/deps/v8/src/json/OWNERS
new file mode 100644
index 0000000000..9a078e6d10
--- /dev/null
+++ b/deps/v8/src/json/OWNERS
@@ -0,0 +1,3 @@
+ishell@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/json/json-parser.cc b/deps/v8/src/json/json-parser.cc
new file mode 100644
index 0000000000..83bacc81a6
--- /dev/null
+++ b/deps/v8/src/json/json-parser.cc
@@ -0,0 +1,1194 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/json/json-parser.h"
+
+#include "src/debug/debug.h"
+#include "src/execution/message-template.h"
+#include "src/numbers/conversions.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/field-type.h"
+#include "src/objects/hash-table-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/strings/string-hasher.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+constexpr JsonToken GetOneCharJsonToken(uint8_t c) {
+ // clang-format off
+ return
+ c == '"' ? JsonToken::STRING :
+ IsDecimalDigit(c) ? JsonToken::NUMBER :
+ c == '-' ? JsonToken::NUMBER :
+ c == '[' ? JsonToken::LBRACK :
+ c == '{' ? JsonToken::LBRACE :
+ c == ']' ? JsonToken::RBRACK :
+ c == '}' ? JsonToken::RBRACE :
+ c == 't' ? JsonToken::TRUE_LITERAL :
+ c == 'f' ? JsonToken::FALSE_LITERAL :
+ c == 'n' ? JsonToken::NULL_LITERAL :
+ c == ' ' ? JsonToken::WHITESPACE :
+ c == '\t' ? JsonToken::WHITESPACE :
+ c == '\r' ? JsonToken::WHITESPACE :
+ c == '\n' ? JsonToken::WHITESPACE :
+ c == ':' ? JsonToken::COLON :
+ c == ',' ? JsonToken::COMMA :
+ JsonToken::ILLEGAL;
+ // clang-format on
+}
+
+// Table of one-character tokens, by character (0x00..0xFF only).
+static const constexpr JsonToken one_char_json_tokens[256] = {
+#define CALL_GET_SCAN_FLAGS(N) GetOneCharJsonToken(N),
+ INT_0_TO_127_LIST(CALL_GET_SCAN_FLAGS)
+#undef CALL_GET_SCAN_FLAGS
+#define CALL_GET_SCAN_FLAGS(N) GetOneCharJsonToken(128 + N),
+ INT_0_TO_127_LIST(CALL_GET_SCAN_FLAGS)
+#undef CALL_GET_SCAN_FLAGS
+};
+
+enum class EscapeKind : uint8_t {
+ kIllegal,
+ kSelf,
+ kBackspace,
+ kTab,
+ kNewLine,
+ kFormFeed,
+ kCarriageReturn,
+ kUnicode
+};
+
+using EscapeKindField = BitField8<EscapeKind, 0, 3>;
+using MayTerminateStringField = BitField8<bool, EscapeKindField::kNext, 1>;
+using NumberPartField = BitField8<bool, MayTerminateStringField::kNext, 1>;
+
+constexpr bool MayTerminateJsonString(uint8_t flags) {
+ return MayTerminateStringField::decode(flags);
+}
+
+constexpr EscapeKind GetEscapeKind(uint8_t flags) {
+ return EscapeKindField::decode(flags);
+}
+
+constexpr bool IsNumberPart(uint8_t flags) {
+ return NumberPartField::decode(flags);
+}
+
+constexpr uint8_t GetJsonScanFlags(uint8_t c) {
+ // clang-format off
+ return (c == 'b' ? EscapeKindField::encode(EscapeKind::kBackspace)
+ : c == 't' ? EscapeKindField::encode(EscapeKind::kTab)
+ : c == 'n' ? EscapeKindField::encode(EscapeKind::kNewLine)
+ : c == 'f' ? EscapeKindField::encode(EscapeKind::kFormFeed)
+ : c == 'r' ? EscapeKindField::encode(EscapeKind::kCarriageReturn)
+ : c == 'u' ? EscapeKindField::encode(EscapeKind::kUnicode)
+ : c == '"' ? EscapeKindField::encode(EscapeKind::kSelf)
+ : c == '\\' ? EscapeKindField::encode(EscapeKind::kSelf)
+ : c == '/' ? EscapeKindField::encode(EscapeKind::kSelf)
+ : EscapeKindField::encode(EscapeKind::kIllegal)) |
+ (c < 0x20 ? MayTerminateStringField::encode(true)
+ : c == '"' ? MayTerminateStringField::encode(true)
+ : c == '\\' ? MayTerminateStringField::encode(true)
+ : MayTerminateStringField::encode(false)) |
+ NumberPartField::encode(c == '.' ||
+ c == 'e' ||
+ c == 'E' ||
+ IsDecimalDigit(c) ||
+ c == '-' ||
+ c == '+');
+ // clang-format on
+}
+
+// Table of one-character scan flags, by character (0x00..0xFF only).
+static const constexpr uint8_t character_json_scan_flags[256] = {
+#define CALL_GET_SCAN_FLAGS(N) GetJsonScanFlags(N),
+ INT_0_TO_127_LIST(CALL_GET_SCAN_FLAGS)
+#undef CALL_GET_SCAN_FLAGS
+#define CALL_GET_SCAN_FLAGS(N) GetJsonScanFlags(128 + N),
+ INT_0_TO_127_LIST(CALL_GET_SCAN_FLAGS)
+#undef CALL_GET_SCAN_FLAGS
+};
+
+} // namespace
+
+MaybeHandle<Object> JsonParseInternalizer::Internalize(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> reviver) {
+ DCHECK(reviver->IsCallable());
+ JsonParseInternalizer internalizer(isolate,
+ Handle<JSReceiver>::cast(reviver));
+ Handle<JSObject> holder =
+ isolate->factory()->NewJSObject(isolate->object_function());
+ Handle<String> name = isolate->factory()->empty_string();
+ JSObject::AddProperty(isolate, holder, name, object, NONE);
+ return internalizer.InternalizeJsonProperty(holder, name);
+}
+
+MaybeHandle<Object> JsonParseInternalizer::InternalizeJsonProperty(
+ Handle<JSReceiver> holder, Handle<String> name) {
+ HandleScope outer_scope(isolate_);
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, value, Object::GetPropertyOrElement(isolate_, holder, name),
+ Object);
+ if (value->IsJSReceiver()) {
+ Handle<JSReceiver> object = Handle<JSReceiver>::cast(value);
+ Maybe<bool> is_array = Object::IsArray(object);
+ if (is_array.IsNothing()) return MaybeHandle<Object>();
+ if (is_array.FromJust()) {
+ Handle<Object> length_object;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, length_object,
+ Object::GetLengthFromArrayLike(isolate_, object), Object);
+ double length = length_object->Number();
+ for (double i = 0; i < length; i++) {
+ HandleScope inner_scope(isolate_);
+ Handle<Object> index = isolate_->factory()->NewNumber(i);
+ Handle<String> name = isolate_->factory()->NumberToString(index);
+ if (!RecurseAndApply(object, name)) return MaybeHandle<Object>();
+ }
+ } else {
+ Handle<FixedArray> contents;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, contents,
+ KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString),
+ Object);
+ for (int i = 0; i < contents->length(); i++) {
+ HandleScope inner_scope(isolate_);
+ Handle<String> name(String::cast(contents->get(i)), isolate_);
+ if (!RecurseAndApply(object, name)) return MaybeHandle<Object>();
+ }
+ }
+ }
+ Handle<Object> argv[] = {name, value};
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate_, result, Execution::Call(isolate_, reviver_, holder, 2, argv),
+ Object);
+ return outer_scope.CloseAndEscape(result);
+}
+
+bool JsonParseInternalizer::RecurseAndApply(Handle<JSReceiver> holder,
+ Handle<String> name) {
+ STACK_CHECK(isolate_, false);
+
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, result, InternalizeJsonProperty(holder, name), false);
+ Maybe<bool> change_result = Nothing<bool>();
+ if (result->IsUndefined(isolate_)) {
+ change_result = JSReceiver::DeletePropertyOrElement(holder, name,
+ LanguageMode::kSloppy);
+ } else {
+ PropertyDescriptor desc;
+ desc.set_value(result);
+ desc.set_configurable(true);
+ desc.set_enumerable(true);
+ desc.set_writable(true);
+ change_result = JSReceiver::DefineOwnProperty(isolate_, holder, name, &desc,
+ Just(kDontThrow));
+ }
+ MAYBE_RETURN(change_result, false);
+ return true;
+}
+
+template <typename Char>
+JsonParser<Char>::JsonParser(Isolate* isolate, Handle<String> source)
+ : isolate_(isolate),
+ hash_seed_(HashSeed(isolate)),
+ object_constructor_(isolate_->object_function()),
+ original_source_(source) {
+ size_t start = 0;
+ size_t length = source->length();
+ if (source->IsSlicedString()) {
+ SlicedString string = SlicedString::cast(*source);
+ start = string.offset();
+ String parent = string.parent();
+ if (parent.IsThinString()) parent = ThinString::cast(parent).actual();
+ source_ = handle(parent, isolate);
+ } else {
+ source_ = String::Flatten(isolate, source);
+ }
+
+ if (StringShape(*source_).IsExternal()) {
+ chars_ =
+ static_cast<const Char*>(SeqExternalString::cast(*source_).GetChars());
+ chars_may_relocate_ = false;
+ } else {
+ DisallowHeapAllocation no_gc;
+ isolate->heap()->AddGCEpilogueCallback(UpdatePointersCallback,
+ v8::kGCTypeAll, this);
+ chars_ = SeqString::cast(*source_).GetChars(no_gc);
+ chars_may_relocate_ = true;
+ }
+ cursor_ = chars_ + start;
+ end_ = cursor_ + length;
+}
+
+template <typename Char>
+void JsonParser<Char>::ReportUnexpectedToken(JsonToken token) {
+ // Some exception (for example stack overflow) is already pending.
+ if (isolate_->has_pending_exception()) return;
+
+ // Parse failed. Current character is the unexpected token.
+ Factory* factory = this->factory();
+ MessageTemplate message;
+ int offset = original_source_->IsSlicedString()
+ ? SlicedString::cast(*original_source_).offset()
+ : 0;
+ int pos = position() - offset;
+ Handle<Object> arg1 = Handle<Smi>(Smi::FromInt(pos), isolate());
+ Handle<Object> arg2;
+
+ switch (token) {
+ case JsonToken::EOS:
+ message = MessageTemplate::kJsonParseUnexpectedEOS;
+ break;
+ case JsonToken::NUMBER:
+ message = MessageTemplate::kJsonParseUnexpectedTokenNumber;
+ break;
+ case JsonToken::STRING:
+ message = MessageTemplate::kJsonParseUnexpectedTokenString;
+ break;
+ default:
+ message = MessageTemplate::kJsonParseUnexpectedToken;
+ arg2 = arg1;
+ arg1 = factory->LookupSingleCharacterStringFromCode(*cursor_);
+ break;
+ }
+
+ Handle<Script> script(factory->NewScript(original_source_));
+ if (isolate()->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
+ // We should sent compile error event because we compile JSON object in
+ // separated source file.
+ isolate()->debug()->OnCompileError(script);
+ MessageLocation location(script, pos, pos + 1);
+ Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
+ isolate()->Throw(*error, &location);
+
+ // Move the cursor to the end so we won't be able to proceed parsing.
+ cursor_ = end_;
+}
+
+template <typename Char>
+void JsonParser<Char>::ReportUnexpectedCharacter(uc32 c) {
+ JsonToken token = JsonToken::ILLEGAL;
+ if (c == kEndOfString) {
+ token = JsonToken::EOS;
+ } else if (c <= unibrow::Latin1::kMaxChar) {
+ token = one_char_json_tokens[c];
+ }
+ return ReportUnexpectedToken(token);
+}
+
+template <typename Char>
+JsonParser<Char>::~JsonParser() {
+ if (StringShape(*source_).IsExternal()) {
+ // Check that the string shape hasn't changed. Otherwise our GC hooks are
+ // broken.
+ SeqExternalString::cast(*source_);
+ } else {
+ // Check that the string shape hasn't changed. Otherwise our GC hooks are
+ // broken.
+ SeqString::cast(*source_);
+ isolate()->heap()->RemoveGCEpilogueCallback(UpdatePointersCallback, this);
+ }
+}
+
+template <typename Char>
+MaybeHandle<Object> JsonParser<Char>::ParseJson() {
+ MaybeHandle<Object> result = ParseJsonValue();
+ if (!Check(JsonToken::EOS)) ReportUnexpectedToken(peek());
+ if (isolate_->has_pending_exception()) return MaybeHandle<Object>();
+ return result;
+}
+
+MaybeHandle<Object> InternalizeJsonProperty(Handle<JSObject> holder,
+ Handle<String> key);
+
+template <typename Char>
+void JsonParser<Char>::SkipWhitespace() {
+ next_ = JsonToken::EOS;
+
+ cursor_ = std::find_if(cursor_, end_, [this](Char c) {
+ JsonToken current = V8_LIKELY(c <= unibrow::Latin1::kMaxChar)
+ ? one_char_json_tokens[c]
+ : JsonToken::ILLEGAL;
+ bool result = current != JsonToken::WHITESPACE;
+ if (result) next_ = current;
+ return result;
+ });
+}
+
+template <typename Char>
+uc32 JsonParser<Char>::ScanUnicodeCharacter() {
+ uc32 value = 0;
+ for (int i = 0; i < 4; i++) {
+ int digit = HexValue(NextCharacter());
+ if (V8_UNLIKELY(digit < 0)) return -1;
+ value = value * 16 + digit;
+ }
+ return value;
+}
+
+// Parse any JSON value.
+template <typename Char>
+JsonString JsonParser<Char>::ScanJsonPropertyKey(JsonContinuation* cont) {
+ {
+ DisallowHeapAllocation no_gc;
+ const Char* start = cursor_;
+ uc32 first = CurrentCharacter();
+ if (first == '\\' && NextCharacter() == 'u') first = ScanUnicodeCharacter();
+ if (IsDecimalDigit(first)) {
+ if (first == '0') {
+ if (NextCharacter() == '"') {
+ advance();
+ // Record element information.
+ cont->elements++;
+ DCHECK_LE(0, cont->max_index);
+ return JsonString(0);
+ }
+ } else {
+ uint32_t index = first - '0';
+ while (true) {
+ cursor_ = std::find_if(cursor_ + 1, end_, [&index](Char c) {
+ return !TryAddIndexChar(&index, c);
+ });
+
+ if (CurrentCharacter() == '"') {
+ advance();
+ // Record element information.
+ cont->elements++;
+ cont->max_index = Max(cont->max_index, index);
+ return JsonString(index);
+ }
+
+ if (CurrentCharacter() == '\\' && NextCharacter() == 'u') {
+ if (TryAddIndexChar(&index, ScanUnicodeCharacter())) continue;
+ }
+
+ break;
+ }
+ }
+ }
+ // Reset cursor_ to start if the key is not an index.
+ cursor_ = start;
+ }
+ return ScanJsonString(true);
+}
+
+namespace {
+Handle<Map> ParentOfDescriptorOwner(Isolate* isolate, Handle<Map> maybe_root,
+ Handle<Map> source, int descriptor) {
+ if (descriptor == 0) {
+ DCHECK_EQ(0, maybe_root->NumberOfOwnDescriptors());
+ return maybe_root;
+ }
+ return handle(source->FindFieldOwner(isolate, descriptor - 1), isolate);
+}
+} // namespace
+
+template <typename Char>
+Handle<Object> JsonParser<Char>::BuildJsonObject(
+ const JsonContinuation& cont,
+ const std::vector<JsonProperty>& property_stack, Handle<Map> feedback) {
+ size_t start = cont.index;
+ int length = static_cast<int>(property_stack.size() - start);
+ int named_length = length - cont.elements;
+
+ Handle<Map> initial_map = factory()->ObjectLiteralMapFromCache(
+ isolate_->native_context(), named_length);
+
+ Handle<Map> map = initial_map;
+
+ Handle<FixedArrayBase> elements = factory()->empty_fixed_array();
+
+ // First store the elements.
+ if (cont.elements > 0) {
+ // Store as dictionary elements if that would use less memory.
+ if (ShouldConvertToSlowElements(cont.elements, cont.max_index + 1)) {
+ Handle<NumberDictionary> elms =
+ NumberDictionary::New(isolate_, cont.elements);
+ for (int i = 0; i < length; i++) {
+ const JsonProperty& property = property_stack[start + i];
+ if (!property.string.is_index()) continue;
+ uint32_t index = property.string.index();
+ Handle<Object> value = property.value;
+ elms = NumberDictionary::Set(isolate_, elms, index, value);
+ }
+ map = Map::AsElementsKind(isolate_, map, DICTIONARY_ELEMENTS);
+ elements = elms;
+ } else {
+ Handle<FixedArray> elms =
+ factory()->NewFixedArrayWithHoles(cont.max_index + 1);
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ DCHECK_EQ(HOLEY_ELEMENTS, map->elements_kind());
+
+ for (int i = 0; i < length; i++) {
+ const JsonProperty& property = property_stack[start + i];
+ if (!property.string.is_index()) continue;
+ uint32_t index = property.string.index();
+ Handle<Object> value = property.value;
+ elms->set(static_cast<int>(index), *value, mode);
+ }
+ elements = elms;
+ }
+ }
+
+ int feedback_descriptors =
+ (feedback.is_null() ||
+ feedback->elements_kind() != map->elements_kind() ||
+ feedback->instance_size() != map->instance_size())
+ ? 0
+ : feedback->NumberOfOwnDescriptors();
+
+ int i;
+ int descriptor = 0;
+ int new_mutable_double = 0;
+ for (i = 0; i < length; i++) {
+ const JsonProperty& property = property_stack[start + i];
+ if (property.string.is_index()) continue;
+ Handle<String> expected;
+ Handle<Map> target;
+ if (descriptor < feedback_descriptors) {
+ expected = handle(
+ String::cast(feedback->instance_descriptors().GetKey(descriptor)),
+ isolate_);
+ } else {
+ DisallowHeapAllocation no_gc;
+ TransitionsAccessor transitions(isolate(), *map, &no_gc);
+ expected = transitions.ExpectedTransitionKey();
+ if (!expected.is_null()) {
+ // Directly read out the target while reading out the key, otherwise it
+ // might die while building the string below.
+ target = TransitionsAccessor(isolate(), *map, &no_gc)
+ .ExpectedTransitionTarget();
+ }
+ }
+
+ Handle<String> key = MakeString(property.string, expected);
+ if (key.is_identical_to(expected)) {
+ if (descriptor < feedback_descriptors) target = feedback;
+ } else {
+ if (descriptor < feedback_descriptors) {
+ map = ParentOfDescriptorOwner(isolate_, map, feedback, descriptor);
+ feedback_descriptors = 0;
+ }
+ if (!TransitionsAccessor(isolate(), map)
+ .FindTransitionToField(key)
+ .ToHandle(&target)) {
+ break;
+ }
+ }
+
+ Handle<Object> value = property.value;
+
+ PropertyDetails details =
+ target->instance_descriptors().GetDetails(descriptor);
+ Representation expected_representation = details.representation();
+
+ if (!value->FitsRepresentation(expected_representation)) {
+ Representation representation = value->OptimalRepresentation();
+ representation = representation.generalize(expected_representation);
+ if (!expected_representation.CanBeInPlaceChangedTo(representation)) {
+ map = ParentOfDescriptorOwner(isolate_, map, target, descriptor);
+ break;
+ }
+ Handle<FieldType> value_type =
+ value->OptimalType(isolate(), representation);
+ Map::GeneralizeField(isolate(), target, descriptor, details.constness(),
+ representation, value_type);
+ } else if (expected_representation.IsHeapObject() &&
+ !target->instance_descriptors()
+ .GetFieldType(descriptor)
+ .NowContains(value)) {
+ Handle<FieldType> value_type =
+ value->OptimalType(isolate(), expected_representation);
+ Map::GeneralizeField(isolate(), target, descriptor, details.constness(),
+ expected_representation, value_type);
+ } else if (!FLAG_unbox_double_fields &&
+ expected_representation.IsDouble() && value->IsSmi()) {
+ new_mutable_double++;
+ }
+
+ DCHECK(target->instance_descriptors()
+ .GetFieldType(descriptor)
+ .NowContains(value));
+ map = target;
+ descriptor++;
+ }
+
+ // Fast path: Write all transitioned named properties.
+ if (i == length && descriptor < feedback_descriptors) {
+ map = ParentOfDescriptorOwner(isolate_, map, map, descriptor);
+ }
+
+ // Preallocate all mutable heap numbers so we don't need to allocate while
+ // setting up the object. Otherwise verification of that object may fail.
+ Handle<ByteArray> mutable_double_buffer;
+ // Allocate enough space so we can double-align the payload.
+ const int kMutableDoubleSize = sizeof(double) * 2;
+ STATIC_ASSERT(MutableHeapNumber::kSize <= kMutableDoubleSize);
+ if (new_mutable_double > 0) {
+ mutable_double_buffer =
+ factory()->NewByteArray(kMutableDoubleSize * new_mutable_double);
+ }
+
+ Handle<JSObject> object = initial_map->is_dictionary_map()
+ ? factory()->NewSlowJSObjectFromMap(map)
+ : factory()->NewJSObjectFromMap(map);
+ object->set_elements(*elements);
+
+ {
+ descriptor = 0;
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = object->GetWriteBarrierMode(no_gc);
+ Address mutable_double_address =
+ mutable_double_buffer.is_null()
+ ? 0
+ : reinterpret_cast<Address>(
+ mutable_double_buffer->GetDataStartAddress());
+ Address filler_address = mutable_double_address;
+ if (IsAligned(mutable_double_address, kDoubleAlignment)) {
+ mutable_double_address += kTaggedSize;
+ } else {
+ filler_address += MutableHeapNumber::kSize;
+ }
+ for (int j = 0; j < i; j++) {
+ const JsonProperty& property = property_stack[start + j];
+ if (property.string.is_index()) continue;
+ PropertyDetails details =
+ map->instance_descriptors().GetDetails(descriptor);
+ Object value = *property.value;
+ FieldIndex index = FieldIndex::ForDescriptor(*map, descriptor);
+ descriptor++;
+
+ if (details.representation().IsDouble()) {
+ if (object->IsUnboxedDoubleField(index)) {
+ uint64_t bits;
+ if (value.IsSmi()) {
+ bits = bit_cast<uint64_t>(static_cast<double>(Smi::ToInt(value)));
+ } else {
+ DCHECK(value.IsHeapNumber());
+ bits = HeapNumber::cast(value).value_as_bits();
+ }
+ object->RawFastDoublePropertyAsBitsAtPut(index, bits);
+ continue;
+ }
+
+ if (value.IsSmi()) {
+ if (kTaggedSize != kDoubleSize) {
+ // Write alignment filler.
+ HeapObject filler = HeapObject::FromAddress(filler_address);
+ filler.set_map_after_allocation(
+ *factory()->one_pointer_filler_map());
+ filler_address += kMutableDoubleSize;
+ }
+
+ uint64_t bits =
+ bit_cast<uint64_t>(static_cast<double>(Smi::ToInt(value)));
+ // Allocate simple heapnumber with immortal map, with non-pointer
+ // payload, so we can skip notifying object layout change.
+
+ HeapObject hn = HeapObject::FromAddress(mutable_double_address);
+ hn.set_map_after_allocation(*factory()->mutable_heap_number_map());
+ MutableHeapNumber::cast(hn).set_value_as_bits(bits);
+ value = hn;
+ mutable_double_address += kMutableDoubleSize;
+ } else {
+ DCHECK(value.IsHeapNumber());
+ HeapObject::cast(value).synchronized_set_map(
+ *factory()->mutable_heap_number_map());
+ }
+ }
+ object->RawFastInobjectPropertyAtPut(index, value, mode);
+ }
+ // Make all MutableHeapNumbers alive.
+ if (!mutable_double_buffer.is_null()) {
+#ifdef DEBUG
+ Address end =
+ reinterpret_cast<Address>(mutable_double_buffer->GetDataEndAddress());
+ DCHECK_EQ(Min(filler_address, mutable_double_address), end);
+ DCHECK_GE(filler_address, end);
+ DCHECK_GE(mutable_double_address, end);
+#endif
+ mutable_double_buffer->set_length(0);
+ }
+ }
+
+ // Slow path: define remaining named properties.
+ for (; i < length; i++) {
+ HandleScope scope(isolate_);
+ const JsonProperty& property = property_stack[start + i];
+ if (property.string.is_index()) continue;
+ Handle<String> key = MakeString(property.string);
+#ifdef DEBUG
+ uint32_t index;
+ DCHECK(!key->AsArrayIndex(&index));
+#endif
+ Handle<Object> value = property.value;
+ LookupIterator it(isolate_, object, key, object, LookupIterator::OWN);
+ JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, NONE).Check();
+ }
+
+ return object;
+}
+
+template <typename Char>
+Handle<Object> JsonParser<Char>::BuildJsonArray(
+ const JsonContinuation& cont,
+ const std::vector<Handle<Object>>& element_stack) {
+ size_t start = cont.index;
+ int length = static_cast<int>(element_stack.size() - start);
+
+ ElementsKind kind = PACKED_SMI_ELEMENTS;
+ for (size_t i = start; i < element_stack.size(); i++) {
+ Object value = *element_stack[i];
+ if (value.IsHeapObject()) {
+ if (HeapObject::cast(value).IsHeapNumber()) {
+ kind = PACKED_DOUBLE_ELEMENTS;
+ } else {
+ kind = PACKED_ELEMENTS;
+ break;
+ }
+ }
+ }
+
+ Handle<JSArray> array = factory()->NewJSArray(kind, length, length);
+ if (kind == PACKED_DOUBLE_ELEMENTS) {
+ DisallowHeapAllocation no_gc;
+ FixedDoubleArray elements = FixedDoubleArray::cast(array->elements());
+ for (int i = 0; i < length; i++) {
+ elements.set(i, element_stack[start + i]->Number());
+ }
+ } else {
+ DisallowHeapAllocation no_gc;
+ FixedArray elements = FixedArray::cast(array->elements());
+ WriteBarrierMode mode = kind == PACKED_SMI_ELEMENTS
+ ? SKIP_WRITE_BARRIER
+ : elements.GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < length; i++) {
+ elements.set(i, *element_stack[start + i], mode);
+ }
+ }
+ return array;
+}
+
+// Parse any JSON value.
+template <typename Char>
+MaybeHandle<Object> JsonParser<Char>::ParseJsonValue() {
+ std::vector<JsonContinuation> cont_stack;
+ std::vector<JsonProperty> property_stack;
+ std::vector<Handle<Object>> element_stack;
+
+ cont_stack.reserve(16);
+ property_stack.reserve(16);
+ element_stack.reserve(16);
+
+ JsonContinuation cont(isolate_, JsonContinuation::kReturn, 0);
+
+ Handle<Object> value;
+ while (true) {
+ // Produce a json value.
+ //
+ // Iterate until a value is produced. Starting but not immediately finishing
+ // objects and arrays will cause the loop to continue until a first member
+ // is completed.
+ while (true) {
+ SkipWhitespace();
+ // The switch is immediately followed by 'break' so we can use 'break' to
+ // break out of the loop, and 'continue' to continue the loop.
+ switch (peek()) {
+ case JsonToken::STRING:
+ Consume(JsonToken::STRING);
+ value = MakeString(ScanJsonString(false));
+ break;
+
+ case JsonToken::NUMBER:
+ value = ParseJsonNumber();
+ break;
+
+ case JsonToken::LBRACE: {
+ Consume(JsonToken::LBRACE);
+ if (Check(JsonToken::RBRACE)) {
+ // TODO(verwaest): Directly use the map instead.
+ value = factory()->NewJSObject(object_constructor_);
+ break;
+ }
+
+ // Start parsing an object with properties.
+ cont_stack.emplace_back(std::move(cont));
+ cont = JsonContinuation(isolate_, JsonContinuation::kObjectProperty,
+ property_stack.size());
+
+ // Parse the property key.
+ ExpectNext(JsonToken::STRING);
+ property_stack.emplace_back(ScanJsonPropertyKey(&cont));
+
+ ExpectNext(JsonToken::COLON);
+
+ // Continue to start producing the first property value.
+ continue;
+ }
+
+ case JsonToken::LBRACK:
+ Consume(JsonToken::LBRACK);
+ if (Check(JsonToken::RBRACK)) {
+ value = factory()->NewJSArray(0, PACKED_SMI_ELEMENTS);
+ break;
+ }
+
+ // Start parsing an array with elements.
+ cont_stack.emplace_back(std::move(cont));
+ cont = JsonContinuation(isolate_, JsonContinuation::kArrayElement,
+ element_stack.size());
+
+ // Continue to start producing the first array element.
+ continue;
+
+ case JsonToken::TRUE_LITERAL:
+ ScanLiteral("true");
+ value = factory()->true_value();
+ break;
+
+ case JsonToken::FALSE_LITERAL:
+ ScanLiteral("false");
+ value = factory()->false_value();
+ break;
+
+ case JsonToken::NULL_LITERAL:
+ ScanLiteral("null");
+ value = factory()->null_value();
+ break;
+
+ case JsonToken::COLON:
+ case JsonToken::COMMA:
+ case JsonToken::ILLEGAL:
+ case JsonToken::RBRACE:
+ case JsonToken::RBRACK:
+ case JsonToken::EOS:
+ ReportUnexpectedCharacter(CurrentCharacter());
+ // Pop the continuation stack to correctly tear down handle scopes.
+ while (!cont_stack.empty()) {
+ cont = std::move(cont_stack.back());
+ cont_stack.pop_back();
+ }
+ return MaybeHandle<Object>();
+
+ case JsonToken::WHITESPACE:
+ UNREACHABLE();
+ }
+
+ // Done producing a value, consume it.
+ break;
+ }
+
+ // Consume a produced json value.
+ //
+ // Iterate as long as values are produced (arrays or object literals are
+ // finished).
+ while (true) {
+ // The switch is immediately followed by 'break' so we can use 'break' to
+ // break out of the loop, and 'continue' to continue the loop.
+ switch (cont.type()) {
+ case JsonContinuation::kReturn:
+ return cont.scope.CloseAndEscape(value);
+
+ case JsonContinuation::kObjectProperty: {
+ // Store the previous property value into its property info.
+ property_stack.back().value = value;
+
+ if (V8_LIKELY(Check(JsonToken::COMMA))) {
+ // Parse the property key.
+ ExpectNext(JsonToken::STRING);
+
+ property_stack.emplace_back(ScanJsonPropertyKey(&cont));
+ ExpectNext(JsonToken::COLON);
+
+ // Break to start producing the subsequent property value.
+ break;
+ }
+
+ Handle<Map> feedback;
+ if (cont_stack.size() > 0 &&
+ cont_stack.back().type() == JsonContinuation::kArrayElement &&
+ cont_stack.back().index < element_stack.size() &&
+ element_stack.back()->IsJSObject()) {
+ feedback =
+ handle(JSObject::cast(*element_stack.back()).map(), isolate_);
+ }
+ value = BuildJsonObject(cont, property_stack, feedback);
+ property_stack.resize(cont.index);
+ Expect(JsonToken::RBRACE);
+
+ // Return the object.
+ value = cont.scope.CloseAndEscape(value);
+ // Pop the continuation.
+ cont = std::move(cont_stack.back());
+ cont_stack.pop_back();
+ // Consume to produced object.
+ continue;
+ }
+
+ case JsonContinuation::kArrayElement: {
+ // Store the previous element on the stack.
+ element_stack.emplace_back(value);
+ // Break to start producing the subsequent element value.
+ if (V8_LIKELY(Check(JsonToken::COMMA))) break;
+
+ value = BuildJsonArray(cont, element_stack);
+ element_stack.resize(cont.index);
+ Expect(JsonToken::RBRACK);
+
+ // Return the array.
+ value = cont.scope.CloseAndEscape(value);
+ // Pop the continuation.
+ cont = std::move(cont_stack.back());
+ cont_stack.pop_back();
+ // Consume the produced array.
+ continue;
+ }
+ }
+
+ // Done consuming a value. Produce next value.
+ break;
+ }
+ }
+}
+
+template <typename Char>
+void JsonParser<Char>::AdvanceToNonDecimal() {
+ cursor_ =
+ std::find_if(cursor_, end_, [](Char c) { return !IsDecimalDigit(c); });
+}
+
+template <typename Char>
+Handle<Object> JsonParser<Char>::ParseJsonNumber() {
+ double number;
+ int sign = 1;
+
+ {
+ const Char* start = cursor_;
+ DisallowHeapAllocation no_gc;
+
+ uc32 c = *cursor_;
+ if (c == '-') {
+ sign = -1;
+ c = NextCharacter();
+ }
+
+ if (c == '0') {
+ // Prefix zero is only allowed if it's the only digit before
+ // a decimal point or exponent.
+ c = NextCharacter();
+ if (IsInRange(c, 0, static_cast<int32_t>(unibrow::Latin1::kMaxChar)) &&
+ IsNumberPart(character_json_scan_flags[c])) {
+ if (V8_UNLIKELY(IsDecimalDigit(c))) {
+ AllowHeapAllocation allow_before_exception;
+ ReportUnexpectedToken(JsonToken::NUMBER);
+ return handle(Smi::FromInt(0), isolate_);
+ }
+ } else if (sign > 0) {
+ return handle(Smi::FromInt(0), isolate_);
+ }
+ } else {
+ const Char* smi_start = cursor_;
+ AdvanceToNonDecimal();
+ if (V8_UNLIKELY(smi_start == cursor_)) {
+ AllowHeapAllocation allow_before_exception;
+ ReportUnexpectedCharacter(CurrentCharacter());
+ return handle(Smi::FromInt(0), isolate_);
+ }
+ uc32 c = CurrentCharacter();
+ STATIC_ASSERT(Smi::IsValid(-999999999));
+ STATIC_ASSERT(Smi::IsValid(999999999));
+ const int kMaxSmiLength = 9;
+ if ((cursor_ - smi_start) <= kMaxSmiLength &&
+ (!IsInRange(c, 0, static_cast<int32_t>(unibrow::Latin1::kMaxChar)) ||
+ !IsNumberPart(character_json_scan_flags[c]))) {
+ // Smi.
+ int32_t i = 0;
+ for (; smi_start != cursor_; smi_start++) {
+ DCHECK(IsDecimalDigit(*smi_start));
+ i = (i * 10) + ((*smi_start) - '0');
+ }
+ // TODO(verwaest): Cache?
+ return handle(Smi::FromInt(i * sign), isolate_);
+ }
+ }
+
+ if (CurrentCharacter() == '.') {
+ uc32 c = NextCharacter();
+ if (!IsDecimalDigit(c)) {
+ AllowHeapAllocation allow_before_exception;
+ ReportUnexpectedCharacter(c);
+ return handle(Smi::FromInt(0), isolate_);
+ }
+ AdvanceToNonDecimal();
+ }
+
+ if (AsciiAlphaToLower(CurrentCharacter()) == 'e') {
+ uc32 c = NextCharacter();
+ if (c == '-' || c == '+') c = NextCharacter();
+ if (!IsDecimalDigit(c)) {
+ AllowHeapAllocation allow_before_exception;
+ ReportUnexpectedCharacter(c);
+ return handle(Smi::FromInt(0), isolate_);
+ }
+ AdvanceToNonDecimal();
+ }
+
+ Vector<const Char> chars(start, cursor_ - start);
+ number = StringToDouble(chars,
+ NO_FLAGS, // Hex, octal or trailing junk.
+ std::numeric_limits<double>::quiet_NaN());
+
+ DCHECK(!std::isnan(number));
+ }
+
+ return factory()->NewNumber(number);
+}
+
+namespace {
+
+template <typename Char>
+bool Matches(const Vector<const Char>& chars, Handle<String> string) {
+ DCHECK(!string.is_null());
+
+ if (chars.length() != string->length()) return false;
+
+ DisallowHeapAllocation no_gc;
+ if (string->IsOneByteRepresentation()) {
+ const uint8_t* string_data = string->GetChars<uint8_t>(no_gc);
+ return CompareChars(chars.begin(), string_data, chars.length()) == 0;
+ }
+ const uint16_t* string_data = string->GetChars<uint16_t>(no_gc);
+ return CompareChars(chars.begin(), string_data, chars.length()) == 0;
+}
+
+} // namespace
+
+template <typename Char>
+template <typename SinkSeqString>
+Handle<String> JsonParser<Char>::DecodeString(
+ const JsonString& string, Handle<SinkSeqString> intermediate,
+ Handle<String> hint) {
+ using SinkChar = typename SinkSeqString::Char;
+ {
+ DisallowHeapAllocation no_gc;
+ SinkChar* dest = intermediate->GetChars(no_gc);
+ if (!string.has_escape()) {
+ DCHECK(!string.internalize());
+ CopyChars(dest, chars_ + string.start(), string.length());
+ return intermediate;
+ }
+ DecodeString(dest, string.start(), string.length());
+
+ if (!string.internalize()) return intermediate;
+
+ Vector<const SinkChar> data(dest, string.length());
+ if (!hint.is_null() && Matches(data, hint)) return hint;
+ }
+
+ return factory()->InternalizeString(intermediate, 0, string.length());
+}
+
+template <typename Char>
+Handle<String> JsonParser<Char>::MakeString(const JsonString& string,
+ Handle<String> hint) {
+ if (string.length() == 0) return factory()->empty_string();
+
+ if (string.internalize() && !string.has_escape()) {
+ if (!hint.is_null()) {
+ Vector<const Char> data(chars_ + string.start(), string.length());
+ if (Matches(data, hint)) return hint;
+ }
+ if (chars_may_relocate_) {
+ return factory()->InternalizeString(Handle<SeqString>::cast(source_),
+ string.start(), string.length(),
+ string.needs_conversion());
+ }
+ Vector<const Char> chars(chars_ + string.start(), string.length());
+ return factory()->InternalizeString(chars, string.needs_conversion());
+ }
+
+ if (sizeof(Char) == 1 ? V8_LIKELY(!string.needs_conversion())
+ : string.needs_conversion()) {
+ Handle<SeqOneByteString> intermediate =
+ factory()->NewRawOneByteString(string.length()).ToHandleChecked();
+ return DecodeString(string, intermediate, hint);
+ }
+
+ Handle<SeqTwoByteString> intermediate =
+ factory()->NewRawTwoByteString(string.length()).ToHandleChecked();
+ return DecodeString(string, intermediate, hint);
+}
+
+template <typename Char>
+template <typename SinkChar>
+void JsonParser<Char>::DecodeString(SinkChar* sink, int start, int length) {
+ SinkChar* sink_start = sink;
+ const Char* cursor = chars_ + start;
+ while (true) {
+ const Char* end = cursor + length - (sink - sink_start);
+ cursor = std::find_if(cursor, end, [&sink](Char c) {
+ if (c == '\\') return true;
+ *sink++ = c;
+ return false;
+ });
+
+ if (cursor == end) return;
+
+ cursor++;
+
+ switch (GetEscapeKind(character_json_scan_flags[*cursor])) {
+ case EscapeKind::kSelf:
+ *sink++ = *cursor;
+ break;
+
+ case EscapeKind::kBackspace:
+ *sink++ = '\x08';
+ break;
+
+ case EscapeKind::kTab:
+ *sink++ = '\x09';
+ break;
+
+ case EscapeKind::kNewLine:
+ *sink++ = '\x0A';
+ break;
+
+ case EscapeKind::kFormFeed:
+ *sink++ = '\x0C';
+ break;
+
+ case EscapeKind::kCarriageReturn:
+ *sink++ = '\x0D';
+ break;
+
+ case EscapeKind::kUnicode: {
+ uc32 value = 0;
+ for (int i = 0; i < 4; i++) {
+ value = value * 16 + HexValue(*++cursor);
+ }
+ if (value <=
+ static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+ *sink++ = value;
+ } else {
+ *sink++ = unibrow::Utf16::LeadSurrogate(value);
+ *sink++ = unibrow::Utf16::TrailSurrogate(value);
+ }
+ break;
+ }
+
+ case EscapeKind::kIllegal:
+ UNREACHABLE();
+ }
+ cursor++;
+ }
+}
+
+template <typename Char>
+JsonString JsonParser<Char>::ScanJsonString(bool needs_internalization) {
+ DisallowHeapAllocation no_gc;
+ int start = position();
+ int offset = start;
+ bool has_escape = false;
+ uc32 bits = 0;
+
+ while (true) {
+ cursor_ = std::find_if(cursor_, end_, [&bits](Char c) {
+ if (sizeof(Char) == 2 && V8_UNLIKELY(c > unibrow::Latin1::kMaxChar)) {
+ bits |= c;
+ return false;
+ }
+ return MayTerminateJsonString(character_json_scan_flags[c]);
+ });
+
+ if (V8_UNLIKELY(is_at_end())) {
+ AllowHeapAllocation allow_before_exception;
+ ReportUnexpectedCharacter(kEndOfString);
+ break;
+ }
+
+ if (*cursor_ == '"') {
+ int end = position();
+ advance();
+ int length = end - offset;
+ bool convert = sizeof(Char) == 1 ? bits > unibrow::Latin1::kMaxChar
+ : bits <= unibrow::Latin1::kMaxChar;
+ return JsonString(start, length, convert, needs_internalization,
+ has_escape);
+ }
+
+ if (*cursor_ == '\\') {
+ has_escape = true;
+ uc32 c = NextCharacter();
+ if (V8_UNLIKELY(!IsInRange(
+ c, 0, static_cast<int32_t>(unibrow::Latin1::kMaxChar)))) {
+ AllowHeapAllocation allow_before_exception;
+ ReportUnexpectedCharacter(c);
+ break;
+ }
+
+ switch (GetEscapeKind(character_json_scan_flags[c])) {
+ case EscapeKind::kSelf:
+ case EscapeKind::kBackspace:
+ case EscapeKind::kTab:
+ case EscapeKind::kNewLine:
+ case EscapeKind::kFormFeed:
+ case EscapeKind::kCarriageReturn:
+ offset += 1;
+ break;
+
+ case EscapeKind::kUnicode: {
+ uc32 value = ScanUnicodeCharacter();
+ if (value == -1) {
+ AllowHeapAllocation allow_before_exception;
+ ReportUnexpectedCharacter(CurrentCharacter());
+ return JsonString();
+ }
+ bits |= value;
+ // \uXXXX results in either 1 or 2 Utf16 characters, depending on
+ // whether the decoded value requires a surrogate pair.
+ offset += 5 - (value > static_cast<uc32>(
+ unibrow::Utf16::kMaxNonSurrogateCharCode));
+ break;
+ }
+
+ case EscapeKind::kIllegal:
+ AllowHeapAllocation allow_before_exception;
+ ReportUnexpectedCharacter(c);
+ return JsonString();
+ }
+
+ advance();
+ continue;
+ }
+
+ DCHECK_LT(*cursor_, 0x20);
+ AllowHeapAllocation allow_before_exception;
+ ReportUnexpectedCharacter(*cursor_);
+ break;
+ }
+
+ return JsonString();
+}
+
+// Explicit instantiation.
+template class JsonParser<uint8_t>;
+template class JsonParser<uint16_t>;
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/json/json-parser.h b/deps/v8/src/json/json-parser.h
new file mode 100644
index 0000000000..5ee1499b36
--- /dev/null
+++ b/deps/v8/src/json/json-parser.h
@@ -0,0 +1,358 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_JSON_JSON_PARSER_H_
+#define V8_JSON_JSON_PARSER_H_
+
+#include "src/execution/isolate.h"
+#include "src/heap/factory.h"
+#include "src/objects/objects.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+enum ParseElementResult { kElementFound, kElementNotFound };
+
+class JsonString final {
+ public:
+ JsonString()
+ : start_(0),
+ length_(0),
+ needs_conversion_(false),
+ internalize_(false),
+ has_escape_(false),
+ is_index_(false) {}
+
+ explicit JsonString(uint32_t index)
+ : index_(index),
+ length_(0),
+ needs_conversion_(false),
+ internalize_(false),
+ has_escape_(false),
+ is_index_(true) {}
+
+ JsonString(int start, int length, bool needs_conversion,
+ bool needs_internalization, bool has_escape)
+ : start_(start),
+ length_(length),
+ needs_conversion_(needs_conversion),
+ internalize_(needs_internalization ||
+ length_ <= kMaxInternalizedStringValueLength),
+ has_escape_(has_escape),
+ is_index_(false) {}
+
+ bool internalize() const {
+ DCHECK(!is_index_);
+ return internalize_;
+ }
+
+ bool needs_conversion() const {
+ DCHECK(!is_index_);
+ return needs_conversion_;
+ }
+
+ bool has_escape() const {
+ DCHECK(!is_index_);
+ return has_escape_;
+ }
+
+ int start() const {
+ DCHECK(!is_index_);
+ return start_;
+ }
+
+ int length() const {
+ DCHECK(!is_index_);
+ return length_;
+ }
+
+ uint32_t index() const {
+ DCHECK(is_index_);
+ return index_;
+ }
+
+ bool is_index() const { return is_index_; }
+
+ private:
+ static const int kMaxInternalizedStringValueLength = 10;
+
+ union {
+ const int start_;
+ const uint32_t index_;
+ };
+ const int length_;
+ const bool needs_conversion_ : 1;
+ const bool internalize_ : 1;
+ const bool has_escape_ : 1;
+ const bool is_index_ : 1;
+};
+
+struct JsonProperty {
+ JsonProperty() { UNREACHABLE(); }
+ explicit JsonProperty(const JsonString& string) : string(string) {}
+
+ JsonString string;
+ Handle<Object> value;
+};
+
+class JsonParseInternalizer {
+ public:
+ static MaybeHandle<Object> Internalize(Isolate* isolate,
+ Handle<Object> object,
+ Handle<Object> reviver);
+
+ private:
+ JsonParseInternalizer(Isolate* isolate, Handle<JSReceiver> reviver)
+ : isolate_(isolate), reviver_(reviver) {}
+
+ MaybeHandle<Object> InternalizeJsonProperty(Handle<JSReceiver> holder,
+ Handle<String> key);
+
+ bool RecurseAndApply(Handle<JSReceiver> holder, Handle<String> name);
+
+ Isolate* isolate_;
+ Handle<JSReceiver> reviver_;
+};
+
+enum class JsonToken : uint8_t {
+ NUMBER,
+ STRING,
+ LBRACE,
+ RBRACE,
+ LBRACK,
+ RBRACK,
+ TRUE_LITERAL,
+ FALSE_LITERAL,
+ NULL_LITERAL,
+ WHITESPACE,
+ COLON,
+ COMMA,
+ ILLEGAL,
+ EOS
+};
+
+// A simple json parser.
+template <typename Char>
+class JsonParser final {
+ public:
+ using SeqString = typename CharTraits<Char>::String;
+ using SeqExternalString = typename CharTraits<Char>::ExternalString;
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Parse(
+ Isolate* isolate, Handle<String> source, Handle<Object> reviver) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+ JsonParser(isolate, source).ParseJson(), Object);
+ if (reviver->IsCallable()) {
+ return JsonParseInternalizer::Internalize(isolate, result, reviver);
+ }
+ return result;
+ }
+
+ static const int kEndOfString = -1;
+
+ private:
+ struct JsonContinuation {
+ enum Type : uint8_t { kReturn, kObjectProperty, kArrayElement };
+ JsonContinuation(Isolate* isolate, Type type, size_t index)
+ : scope(isolate),
+ type_(type),
+ index(static_cast<uint32_t>(index)),
+ max_index(0),
+ elements(0) {}
+
+ Type type() const { return static_cast<Type>(type_); }
+ void set_type(Type type) { type_ = static_cast<uint8_t>(type); }
+
+ HandleScope scope;
+ // Unfortunately GCC doesn't like packing Type in two bits.
+ uint32_t type_ : 2;
+ uint32_t index : 30;
+ uint32_t max_index;
+ uint32_t elements;
+ };
+
+ JsonParser(Isolate* isolate, Handle<String> source);
+ ~JsonParser();
+
+ // Parse a string containing a single JSON value.
+ MaybeHandle<Object> ParseJson();
+
+ void advance() { ++cursor_; }
+
+ uc32 CurrentCharacter() {
+ if (V8_UNLIKELY(is_at_end())) return kEndOfString;
+ return *cursor_;
+ }
+
+ uc32 NextCharacter() {
+ advance();
+ return CurrentCharacter();
+ }
+
+ void AdvanceToNonDecimal();
+
+ V8_INLINE JsonToken peek() const { return next_; }
+
+ void Consume(JsonToken token) {
+ DCHECK_EQ(peek(), token);
+ advance();
+ }
+
+ void Expect(JsonToken token) {
+ if (V8_LIKELY(peek() == token)) {
+ advance();
+ } else {
+ ReportUnexpectedToken(peek());
+ }
+ }
+
+ void ExpectNext(JsonToken token) {
+ SkipWhitespace();
+ Expect(token);
+ }
+
+ bool Check(JsonToken token) {
+ SkipWhitespace();
+ if (next_ != token) return false;
+ advance();
+ return true;
+ }
+
+ template <size_t N>
+ void ScanLiteral(const char (&s)[N]) {
+ DCHECK(!is_at_end());
+ // There's at least 1 character, we always consume a character and compare
+ // the next character. The first character was compared before we jumped
+ // to ScanLiteral.
+ STATIC_ASSERT(N > 2);
+ size_t remaining = static_cast<size_t>(end_ - cursor_);
+ if (V8_LIKELY(remaining >= N - 1 &&
+ CompareChars(s + 1, cursor_ + 1, N - 2) == 0)) {
+ cursor_ += N - 1;
+ return;
+ }
+
+ cursor_++;
+ for (size_t i = 0; i < Min(N - 2, remaining - 1); i++) {
+ if (*(s + 1 + i) != *cursor_) {
+ ReportUnexpectedCharacter(*cursor_);
+ return;
+ }
+ cursor_++;
+ }
+
+ DCHECK(is_at_end());
+ ReportUnexpectedToken(JsonToken::EOS);
+ }
+
+ // The JSON lexical grammar is specified in the ECMAScript 5 standard,
+ // section 15.12.1.1. The only allowed whitespace characters between tokens
+ // are tab, carriage-return, newline and space.
+ void SkipWhitespace();
+
+ // A JSON string (production JSONString) is subset of valid JavaScript string
+ // literals. The string must only be double-quoted (not single-quoted), and
+ // the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
+ // four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
+ JsonString ScanJsonString(bool needs_internalization);
+ JsonString ScanJsonPropertyKey(JsonContinuation* cont);
+ uc32 ScanUnicodeCharacter();
+ Handle<String> MakeString(const JsonString& string,
+ Handle<String> hint = Handle<String>());
+
+ template <typename SinkChar>
+ void DecodeString(SinkChar* sink, int start, int length);
+
+ template <typename SinkSeqString>
+ Handle<String> DecodeString(const JsonString& string,
+ Handle<SinkSeqString> intermediate,
+ Handle<String> hint);
+
+ // A JSON number (production JSONNumber) is a subset of the valid JavaScript
+ // decimal number literals.
+ // It includes an optional minus sign, must have at least one
+ // digit before and after a decimal point, may not have prefixed zeros (unless
+ // the integer part is zero), and may include an exponent part (e.g., "e-10").
+ // Hexadecimal and octal numbers are not allowed.
+ Handle<Object> ParseJsonNumber();
+
+ // Parse a single JSON value from input (grammar production JSONValue).
+ // A JSON value is either a (double-quoted) string literal, a number literal,
+ // one of "true", "false", or "null", or an object or array literal.
+ MaybeHandle<Object> ParseJsonValue();
+
+ Handle<Object> BuildJsonObject(
+ const JsonContinuation& cont,
+ const std::vector<JsonProperty>& property_stack, Handle<Map> feedback);
+ Handle<Object> BuildJsonArray(
+ const JsonContinuation& cont,
+ const std::vector<Handle<Object>>& element_stack);
+
+ // Mark that a parsing error has happened at the current character.
+ void ReportUnexpectedCharacter(uc32 c);
+ // Mark that a parsing error has happened at the current token.
+ void ReportUnexpectedToken(JsonToken token);
+
+ inline Isolate* isolate() { return isolate_; }
+ inline Factory* factory() { return isolate_->factory(); }
+ inline Handle<JSFunction> object_constructor() { return object_constructor_; }
+
+ static const int kInitialSpecialStringLength = 32;
+
+ static void UpdatePointersCallback(v8::Isolate* v8_isolate, v8::GCType type,
+ v8::GCCallbackFlags flags, void* parser) {
+ reinterpret_cast<JsonParser<Char>*>(parser)->UpdatePointers();
+ }
+
+ void UpdatePointers() {
+ DisallowHeapAllocation no_gc;
+ const Char* chars = Handle<SeqString>::cast(source_)->GetChars(no_gc);
+ if (chars_ != chars) {
+ size_t position = cursor_ - chars_;
+ size_t length = end_ - chars_;
+ chars_ = chars;
+ cursor_ = chars_ + position;
+ end_ = chars_ + length;
+ }
+ }
+
+ private:
+ static const bool kIsOneByte = sizeof(Char) == 1;
+
+ bool is_at_end() const {
+ DCHECK_LE(cursor_, end_);
+ return cursor_ == end_;
+ }
+
+ int position() const { return static_cast<int>(cursor_ - chars_); }
+
+ Isolate* isolate_;
+ const uint64_t hash_seed_;
+ JsonToken next_;
+ // Indicates whether the bytes underneath source_ can relocate during GC.
+ bool chars_may_relocate_;
+ Handle<JSFunction> object_constructor_;
+ const Handle<String> original_source_;
+ Handle<String> source_;
+
+ // Cached pointer to the raw chars in source. In case source is on-heap, we
+ // register an UpdatePointers callback. For this reason, chars_, cursor_ and
+ // end_ should never be locally cached across a possible allocation. The scope
+ // in which we cache chars has to be guarded by a DisallowHeapAllocation
+ // scope.
+ const Char* cursor_;
+ const Char* end_;
+ const Char* chars_;
+};
+
+// Explicit instantiation declarations.
+extern template class JsonParser<uint8_t>;
+extern template class JsonParser<uint16_t>;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_JSON_JSON_PARSER_H_
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json/json-stringifier.cc
index b06de2e55b..2280292332 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json/json-stringifier.cc
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/json-stringifier.h"
+#include "src/json/json-stringifier.h"
-#include "src/conversions.h"
-#include "src/lookup.h"
-#include "src/message-template.h"
-#include "src/objects-inl.h"
+#include "src/execution/message-template.h"
+#include "src/numbers/conversions.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/smi.h"
-#include "src/string-builder-inl.h"
-#include "src/utils.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -322,21 +322,13 @@ MaybeHandle<Object> JsonStringifier::ApplyToJsonFunction(Handle<Object> object,
Handle<Object> key) {
HandleScope scope(isolate_);
- Handle<Object> object_for_lookup = object;
- if (object->IsBigInt()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate_, object_for_lookup,
- Object::ToObject(isolate_, object), Object);
- }
- DCHECK(object_for_lookup->IsJSReceiver());
-
- // Retrieve toJSON function.
+ // Retrieve toJSON function. The LookupIterator automatically handles
+ // the ToObject() equivalent ("GetRoot") if {object} is a BigInt.
Handle<Object> fun;
- {
- LookupIterator it(isolate_, object_for_lookup, tojson_string_,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
- if (!fun->IsCallable()) return object;
- }
+ LookupIterator it(isolate_, object, tojson_string_,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
+ if (!fun->IsCallable()) return object;
// Call toJSON function.
if (key->IsSmi()) key = factory()->NumberToString(key);
@@ -464,7 +456,7 @@ class CircularStructureMessageBuilder {
static const int kBufferSize = 100;
char chars[kBufferSize];
Vector<char> buffer(chars, kBufferSize);
- builder_.AppendCString(IntToCString(smi->value(), buffer));
+ builder_.AppendCString(IntToCString(smi.value(), buffer));
}
IncrementalStringBuilder builder_;
@@ -519,7 +511,7 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
StackLimitCheck interrupt_check(isolate_);
Handle<Object> initial_value = object;
if (interrupt_check.InterruptRequested() &&
- isolate_->stack_guard()->HandleInterrupts()->IsException(isolate_)) {
+ isolate_->stack_guard()->HandleInterrupts().IsException(isolate_)) {
return EXCEPTION;
}
if (object->IsJSReceiver() || object->IsBigInt()) {
@@ -537,7 +529,7 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
return SerializeSmi(Smi::cast(*object));
}
- switch (HeapObject::cast(*object)->map()->instance_type()) {
+ switch (HeapObject::cast(*object).map().instance_type()) {
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
@@ -547,7 +539,7 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
*factory()->NewTypeError(MessageTemplate::kBigIntSerializeJSON));
return EXCEPTION;
case ODDBALL_TYPE:
- switch (Oddball::cast(*object)->kind()) {
+ switch (Oddball::cast(*object).kind()) {
case Oddball::kFalse:
if (deferred_string_key) SerializeDeferredKey(comma, key);
builder_.AppendCString("false");
@@ -594,23 +586,23 @@ JsonStringifier::Result JsonStringifier::Serialize_(Handle<Object> object,
JsonStringifier::Result JsonStringifier::SerializeJSValue(
Handle<JSValue> object, Handle<Object> key) {
Object raw = object->value();
- if (raw->IsString()) {
+ if (raw.IsString()) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, value, Object::ToString(isolate_, object), EXCEPTION);
SerializeString(Handle<String>::cast(value));
- } else if (raw->IsNumber()) {
+ } else if (raw.IsNumber()) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, value, Object::ToNumber(isolate_, object), EXCEPTION);
if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
SerializeHeapNumber(Handle<HeapNumber>::cast(value));
- } else if (raw->IsBigInt()) {
+ } else if (raw.IsBigInt()) {
isolate_->Throw(
*factory()->NewTypeError(MessageTemplate::kBigIntSerializeJSON));
return EXCEPTION;
- } else if (raw->IsBoolean()) {
- builder_.AppendCString(raw->IsTrue(isolate_) ? "true" : "false");
+ } else if (raw.IsBoolean()) {
+ builder_.AppendCString(raw.IsTrue(isolate_) ? "true" : "false");
} else {
// ES6 24.3.2.1 step 10.c, serialize as an ordinary JSObject.
return SerializeJSObject(object, key);
@@ -622,7 +614,7 @@ JsonStringifier::Result JsonStringifier::SerializeSmi(Smi object) {
static const int kBufferSize = 100;
char chars[kBufferSize];
Vector<char> buffer(chars, kBufferSize);
- builder_.AppendCString(IntToCString(object->value(), buffer));
+ builder_.AppendCString(IntToCString(object.value(), buffer));
return SUCCESS;
}
@@ -644,7 +636,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSArray(
Result stack_push = StackPush(object, key);
if (stack_push != SUCCESS) return stack_push;
uint32_t length = 0;
- CHECK(object->length()->ToArrayLength(&length));
+ CHECK(object->length().ToArrayLength(&length));
DCHECK(!object->IsAccessCheckNeeded());
builder_.AppendCharacter('[');
Indent();
@@ -657,7 +649,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSArray(
StackLimitCheck interrupt_check(isolate_);
while (i < length) {
if (interrupt_check.InterruptRequested() &&
- isolate_->stack_guard()->HandleInterrupts()->IsException(
+ isolate_->stack_guard()->HandleInterrupts().IsException(
isolate_)) {
return EXCEPTION;
}
@@ -675,7 +667,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSArray(
StackLimitCheck interrupt_check(isolate_);
while (i < length) {
if (interrupt_check.InterruptRequested() &&
- isolate_->stack_guard()->HandleInterrupts()->IsException(
+ isolate_->stack_guard()->HandleInterrupts().IsException(
isolate_)) {
return EXCEPTION;
}
@@ -696,7 +688,7 @@ JsonStringifier::Result JsonStringifier::SerializeJSArray(
Separator(i == 0);
Result result = SerializeElement(
isolate_,
- Handle<Object>(FixedArray::cast(object->elements())->get(i),
+ Handle<Object>(FixedArray::cast(object->elements()).get(i),
isolate_),
i);
if (result == UNCHANGED) {
@@ -760,8 +752,11 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
if (stack_push != SUCCESS) return stack_push;
if (property_list_.is_null() &&
- !object->map()->IsCustomElementsReceiverMap() &&
- object->HasFastProperties() && object->elements()->length() == 0) {
+ !object->map().IsCustomElementsReceiverMap() &&
+ object->HasFastProperties() &&
+ (object->elements() == ReadOnlyRoots(isolate_).empty_fixed_array() ||
+ object->elements() ==
+ ReadOnlyRoots(isolate_).empty_slow_element_dictionary())) {
DCHECK(!object->IsJSGlobalProxy());
DCHECK(!object->HasIndexedInterceptor());
DCHECK(!object->HasNamedInterceptor());
@@ -770,11 +765,11 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
Indent();
bool comma = false;
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- Handle<Name> name(map->instance_descriptors()->GetKey(i), isolate_);
+ Handle<Name> name(map->instance_descriptors().GetKey(i), isolate_);
// TODO(rossberg): Should this throw?
if (!name->IsString()) continue;
Handle<String> key = Handle<String>::cast(name);
- PropertyDetails details = map->instance_descriptors()->GetDetails(i);
+ PropertyDetails details = map->instance_descriptors().GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> property;
if (details.location() == kField && *map == object->map()) {
@@ -880,7 +875,7 @@ void JsonStringifier::SerializeStringUnchecked_(
SrcChar c = src[i];
if (DoNotEscape(c)) {
dest->Append(c);
- } else if (FLAG_harmony_json_stringify && c >= 0xD800 && c <= 0xDFFF) {
+ } else if (c >= 0xD800 && c <= 0xDFFF) {
// The current character is a surrogate.
if (c <= 0xDBFF) {
// The current character is a leading surrogate.
@@ -943,7 +938,7 @@ void JsonStringifier::SerializeString_(Handle<String> string) {
SrcChar c = reader.Get<SrcChar>(i);
if (DoNotEscape(c)) {
builder_.Append<SrcChar, DestChar>(c);
- } else if (FLAG_harmony_json_stringify && c >= 0xD800 && c <= 0xDFFF) {
+ } else if (c >= 0xD800 && c <= 0xDFFF) {
// The current character is a surrogate.
if (c <= 0xDBFF) {
// The current character is a leading surrogate.
@@ -999,8 +994,7 @@ bool JsonStringifier::DoNotEscape(uint8_t c) {
template <>
bool JsonStringifier::DoNotEscape(uint16_t c) {
// https://tc39.github.io/ecma262/#table-json-single-character-escapes
- return c >= 0x23 && c != 0x5C && c != 0x7F &&
- (!FLAG_harmony_json_stringify || (c < 0xD800 || c > 0xDFFF));
+ return c >= 0x23 && c != 0x5C && c != 0x7F && (c < 0xD800 || c > 0xDFFF);
}
void JsonStringifier::NewLine() {
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json/json-stringifier.h
index 7532255c1a..0420cced35 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json/json-stringifier.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_JSON_STRINGIFIER_H_
-#define V8_JSON_STRINGIFIER_H_
+#ifndef V8_JSON_JSON_STRINGIFIER_H_
+#define V8_JSON_JSON_STRINGIFIER_H_
-#include "src/objects.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -17,4 +17,4 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> JsonStringify(Isolate* isolate,
} // namespace internal
} // namespace v8
-#endif // V8_JSON_STRINGIFIER_H_
+#endif // V8_JSON_JSON_STRINGIFIER_H_
diff --git a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
index 34c64dcc5d..b625fdb57c 100644
--- a/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.cc
@@ -11,7 +11,9 @@ namespace platform {
DefaultWorkerThreadsTaskRunner::DefaultWorkerThreadsTaskRunner(
uint32_t thread_pool_size, TimeFunction time_function)
- : queue_(time_function), time_function_(time_function) {
+ : queue_(time_function),
+ time_function_(time_function),
+ thread_pool_size_(thread_pool_size) {
for (uint32_t i = 0; i < thread_pool_size; ++i) {
thread_pool_.push_back(base::make_unique<WorkerThread>(this));
}
@@ -23,12 +25,20 @@ double DefaultWorkerThreadsTaskRunner::MonotonicallyIncreasingTime() {
return time_function_();
}
+bool DefaultWorkerThreadsTaskRunner::RunsTasksOnCurrentThread() const {
+ USE(thread_pool_size_);
+ DCHECK_EQ(thread_pool_size_, 1);
+ return single_worker_thread_id_.load(std::memory_order_relaxed) ==
+ base::OS::GetCurrentThreadId();
+}
+
void DefaultWorkerThreadsTaskRunner::Terminate() {
base::MutexGuard guard(&lock_);
terminated_ = true;
queue_.Terminate();
// Clearing the thread pool lets all worker threads join.
thread_pool_.clear();
+ single_worker_thread_id_.store(0, std::memory_order_relaxed);
}
void DefaultWorkerThreadsTaskRunner::PostTask(std::unique_ptr<Task> task) {
@@ -69,6 +79,8 @@ DefaultWorkerThreadsTaskRunner::WorkerThread::WorkerThread(
DefaultWorkerThreadsTaskRunner::WorkerThread::~WorkerThread() { Join(); }
void DefaultWorkerThreadsTaskRunner::WorkerThread::Run() {
+ runner_->single_worker_thread_id_.store(base::OS::GetCurrentThreadId(),
+ std::memory_order_relaxed);
while (std::unique_ptr<Task> task = runner_->GetNext()) {
task->Run();
}
diff --git a/deps/v8/src/libplatform/default-worker-threads-task-runner.h b/deps/v8/src/libplatform/default-worker-threads-task-runner.h
index d0938a7a9b..31b6c0e817 100644
--- a/deps/v8/src/libplatform/default-worker-threads-task-runner.h
+++ b/deps/v8/src/libplatform/default-worker-threads-task-runner.h
@@ -30,6 +30,10 @@ class V8_PLATFORM_EXPORT DefaultWorkerThreadsTaskRunner
double MonotonicallyIncreasingTime();
+ // It is only valid to call this method on a task runner with a single worker
+ // thread. True if the current thread is the worker thread.
+ bool RunsTasksOnCurrentThread() const;
+
// v8::TaskRunner implementation.
void PostTask(std::unique_ptr<Task> task) override;
@@ -64,6 +68,8 @@ class V8_PLATFORM_EXPORT DefaultWorkerThreadsTaskRunner
DelayedTaskQueue queue_;
std::vector<std::unique_ptr<WorkerThread>> thread_pool_;
TimeFunction time_function_;
+ std::atomic_int single_worker_thread_id_{0};
+ uint32_t thread_pool_size_;
};
} // namespace platform
diff --git a/deps/v8/src/libplatform/tracing/DEPS b/deps/v8/src/libplatform/tracing/DEPS
new file mode 100644
index 0000000000..582200e094
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/DEPS
@@ -0,0 +1,4 @@
+include_rules = [
+ "+perfetto",
+ "+third_party/perfetto/include/perfetto/base",
+] \ No newline at end of file
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc b/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
new file mode 100644
index 0000000000..99db86a7d1
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/json-trace-event-listener.cc
@@ -0,0 +1,166 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/json-trace-event-listener.h"
+
+#include <cmath>
+
+#include "base/trace_event/common/trace_event_common.h"
+#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+JSONTraceEventListener::JSONTraceEventListener(std::ostream* stream)
+ : stream_(stream) {
+ *stream_ << "{\"traceEvents\":[";
+}
+
+JSONTraceEventListener::~JSONTraceEventListener() { *stream_ << "]}"; }
+
+// TODO(petermarshall): Clean up this code which was copied from trace-writer.cc
+// once we've removed that file.
+
+// Writes the given string, taking care to escape characters when necessary.
+void JSONTraceEventListener::AppendJSONString(const char* str) {
+ size_t len = strlen(str);
+ *stream_ << "\"";
+ for (size_t i = 0; i < len; ++i) {
+ // All of the permitted escape sequences in JSON strings, as per
+ // https://mathiasbynens.be/notes/javascript-escapes
+ switch (str[i]) {
+ case '\b':
+ *stream_ << "\\b";
+ break;
+ case '\f':
+ *stream_ << "\\f";
+ break;
+ case '\n':
+ *stream_ << "\\n";
+ break;
+ case '\r':
+ *stream_ << "\\r";
+ break;
+ case '\t':
+ *stream_ << "\\t";
+ break;
+ case '\"':
+ *stream_ << "\\\"";
+ break;
+ case '\\':
+ *stream_ << "\\\\";
+ break;
+ // Note that because we use double quotes for JSON strings,
+ // we don't need to escape single quotes.
+ default:
+ *stream_ << str[i];
+ break;
+ }
+ }
+ *stream_ << "\"";
+}
+
+void JSONTraceEventListener::AppendArgValue(
+ const ::perfetto::protos::ChromeTraceEvent_Arg& arg) {
+ if (arg.has_bool_value()) {
+ *stream_ << (arg.bool_value() ? "true" : "false");
+ } else if (arg.has_uint_value()) {
+ *stream_ << arg.uint_value();
+ } else if (arg.has_int_value()) {
+ *stream_ << arg.int_value();
+ } else if (arg.has_double_value()) {
+ std::string real;
+ double val = arg.double_value();
+ if (std::isfinite(val)) {
+ std::ostringstream convert_stream;
+ convert_stream << val;
+ real = convert_stream.str();
+ // Ensure that the number has a .0 if there's no decimal or 'e'. This
+ // makes sure that when we read the JSON back, it's interpreted as a
+ // real rather than an int.
+ if (real.find('.') == std::string::npos &&
+ real.find('e') == std::string::npos &&
+ real.find('E') == std::string::npos) {
+ real += ".0";
+ }
+ } else if (std::isnan(val)) {
+ // The JSON spec doesn't allow NaN and Infinity (since these are
+ // objects in EcmaScript). Use strings instead.
+ real = "\"NaN\"";
+ } else if (val < 0) {
+ real = "\"-Infinity\"";
+ } else {
+ real = "\"Infinity\"";
+ }
+ *stream_ << real;
+ } else if (arg.has_string_value()) {
+ AppendJSONString(arg.string_value().c_str());
+ } else if (arg.has_pointer_value()) {
+ // JSON only supports double and int numbers.
+ // So as not to lose bits from a 64-bit pointer, output as a hex string.
+ *stream_ << "\"0x" << std::hex << arg.pointer_value() << std::dec << "\"";
+ } else if (arg.has_json_value()) {
+ *stream_ << arg.json_value();
+ }
+ // V8 does not emit proto arguments currently.
+ CHECK(!arg.has_traced_value());
+}
+
+void JSONTraceEventListener::ProcessPacket(
+ const ::perfetto::protos::ChromeTracePacket& packet) {
+ for (const ::perfetto::protos::ChromeTraceEvent& event :
+ packet.chrome_events().trace_events()) {
+ if (append_comma_) *stream_ << ",";
+ append_comma_ = true;
+
+ // TODO(petermarshall): Handle int64 fields differently?
+ // clang-format off
+ *stream_ << "{\"pid\":" << event.process_id()
+ << ",\"tid\":" << event.thread_id()
+ << ",\"ts\":" << event.timestamp()
+ << ",\"tts\":" << event.thread_timestamp()
+ << ",\"ph\":\"" << static_cast<char>(event.phase())
+ << "\",\"cat\":\"" << event.category_group_name()
+ << "\",\"name\":\"" << event.name()
+ << "\",\"dur\":" << event.duration()
+ << ",\"tdur\":" << event.thread_duration();
+ // clang-format on
+
+ if (event.flags() &
+ (TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT)) {
+ *stream_ << ",\"bind_id\":\"0x" << std::hex << event.bind_id() << "\""
+ << std::dec;
+ if (event.flags() & TRACE_EVENT_FLAG_FLOW_IN) {
+ *stream_ << ",\"flow_in\":true";
+ }
+ if (event.flags() & TRACE_EVENT_FLAG_FLOW_OUT) {
+ *stream_ << ",\"flow_out\":true";
+ }
+ }
+ if (event.flags() & TRACE_EVENT_FLAG_HAS_ID) {
+ if (event.has_scope()) {
+ *stream_ << ",\"scope\":\"" << event.scope() << "\"";
+ }
+ // So as not to lose bits from a 64-bit integer, output as a hex string.
+ *stream_ << ",\"id\":\"0x" << std::hex << event.id() << "\"" << std::dec;
+ }
+
+ *stream_ << ",\"args\":{";
+
+ int i = 0;
+ for (const ::perfetto::protos::ChromeTraceEvent_Arg& arg : event.args()) {
+ if (i++ > 0) *stream_ << ",";
+ *stream_ << "\"" << arg.name() << "\":";
+ AppendArgValue(arg);
+ }
+ *stream_ << "}}";
+ }
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/json-trace-event-listener.h b/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
new file mode 100644
index 0000000000..fc4979f14c
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/json-trace-event-listener.h
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_TRACING_JSON_TRACE_EVENT_LISTENER_H_
+#define V8_LIBPLATFORM_TRACING_JSON_TRACE_EVENT_LISTENER_H_
+
+#include <ostream>
+
+#include "src/libplatform/tracing/trace-event-listener.h"
+
+namespace perfetto {
+namespace protos {
+class ChromeTraceEvent_Arg;
+} // namespace protos
+} // namespace perfetto
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+// A listener that converts the proto trace data to JSON and writes it to a
+// file.
+class JSONTraceEventListener final : public TraceEventListener {
+ public:
+ explicit JSONTraceEventListener(std::ostream* stream);
+ ~JSONTraceEventListener() override;
+
+ private:
+ void ProcessPacket(
+ const ::perfetto::protos::ChromeTracePacket& packet) override;
+
+ // Internal implementation
+ void AppendJSONString(const char* str);
+ void AppendArgValue(const ::perfetto::protos::ChromeTraceEvent_Arg& arg);
+
+ std::ostream* stream_;
+ bool append_comma_ = false;
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // V8_LIBPLATFORM_TRACING_JSON_TRACE_EVENT_LISTENER_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-consumer.cc b/deps/v8/src/libplatform/tracing/perfetto-consumer.cc
new file mode 100644
index 0000000000..8071fe52d5
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-consumer.cc
@@ -0,0 +1,44 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/perfetto-consumer.h"
+
+#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
+#include "perfetto/tracing/core/trace_packet.h"
+#include "src/base/macros.h"
+#include "src/base/platform/semaphore.h"
+#include "src/libplatform/tracing/trace-event-listener.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+PerfettoConsumer::PerfettoConsumer(base::Semaphore* finished)
+ : finished_semaphore_(finished) {}
+
+void PerfettoConsumer::OnTraceData(std::vector<::perfetto::TracePacket> packets,
+ bool has_more) {
+ for (const ::perfetto::TracePacket& packet : packets) {
+ perfetto::protos::ChromeTracePacket proto_packet;
+ bool success = packet.Decode(&proto_packet);
+ USE(success);
+ DCHECK(success);
+
+ for (TraceEventListener* listener : listeners_) {
+ listener->ProcessPacket(proto_packet);
+ }
+ }
+ // PerfettoTracingController::StopTracing() waits on this sempahore. This is
+ // so that we can ensure that this consumer has finished consuming all of the
+ // trace events from the buffer before the buffer is destroyed.
+ if (!has_more) finished_semaphore_->Signal();
+}
+
+void PerfettoConsumer::AddTraceEventListener(TraceEventListener* listener) {
+ listeners_.push_back(listener);
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-consumer.h b/deps/v8/src/libplatform/tracing/perfetto-consumer.h
new file mode 100644
index 0000000000..83d0c48c1b
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-consumer.h
@@ -0,0 +1,80 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_CONSUMER_H_
+#define V8_LIBPLATFORM_TRACING_PERFETTO_CONSUMER_H_
+
+#include <memory>
+
+#include "perfetto/tracing/core/consumer.h"
+#include "perfetto/tracing/core/tracing_service.h"
+#include "src/base/logging.h"
+
+namespace perfetto {
+namespace protos {
+class ChromeTracePacket;
+} // namespace protos
+} // namespace perfetto
+
+namespace v8 {
+
+namespace base {
+class Semaphore;
+}
+
+namespace platform {
+namespace tracing {
+
+class TraceEventListener;
+
+// A Perfetto Consumer gets streamed trace events from the Service via
+// OnTraceData(). A Consumer can be configured (via
+// service_endpoint()->EnableTracing()) to listen to various different types of
+// trace events. The Consumer is responsible for producing whatever tracing
+// output the system should have.
+
+// Implements the V8-specific logic for interacting with the tracing controller
+// and directs trace events to the added TraceEventListeners.
+class PerfettoConsumer final : public ::perfetto::Consumer {
+ public:
+ explicit PerfettoConsumer(base::Semaphore* finished);
+
+ using ServiceEndpoint = ::perfetto::TracingService::ConsumerEndpoint;
+
+ // Register a trace event listener that will receive trace events from this
+ // consumer. This can be called multiple times to register multiple listeners,
+ // but must be called before starting tracing.
+ void AddTraceEventListener(TraceEventListener* listener);
+
+ ServiceEndpoint* service_endpoint() const { return service_endpoint_.get(); }
+ void set_service_endpoint(std::unique_ptr<ServiceEndpoint> endpoint) {
+ service_endpoint_ = std::move(endpoint);
+ }
+
+ private:
+ // ::perfetto::Consumer implementation
+ void OnConnect() override {}
+ void OnDisconnect() override {}
+ void OnTracingDisabled() override {}
+ void OnTraceData(std::vector<::perfetto::TracePacket> packets,
+ bool has_more) override;
+ void OnDetach(bool success) override {}
+ void OnAttach(bool success, const ::perfetto::TraceConfig&) override {}
+ void OnTraceStats(bool success, const ::perfetto::TraceStats&) override {
+ UNREACHABLE();
+ }
+ void OnObservableEvents(const ::perfetto::ObservableEvents&) override {
+ UNREACHABLE();
+ }
+
+ std::unique_ptr<ServiceEndpoint> service_endpoint_;
+ base::Semaphore* finished_semaphore_;
+ std::vector<TraceEventListener*> listeners_;
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // V8_LIBPLATFORM_TRACING_PERFETTO_CONSUMER_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-producer.cc b/deps/v8/src/libplatform/tracing/perfetto-producer.cc
new file mode 100644
index 0000000000..814dca6b59
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-producer.cc
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/perfetto-producer.h"
+
+#include "perfetto/tracing/core/data_source_config.h"
+#include "perfetto/tracing/core/data_source_descriptor.h"
+#include "perfetto/tracing/core/trace_writer.h"
+#include "src/libplatform/tracing/perfetto-tasks.h"
+#include "src/libplatform/tracing/perfetto-tracing-controller.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+void PerfettoProducer::OnConnect() {
+ ::perfetto::DataSourceDescriptor ds_desc;
+ ds_desc.set_name("v8.trace_events");
+ service_endpoint_->RegisterDataSource(ds_desc);
+}
+
+void PerfettoProducer::StartDataSource(
+ ::perfetto::DataSourceInstanceID, const ::perfetto::DataSourceConfig& cfg) {
+ target_buffer_ = cfg.target_buffer();
+ tracing_controller_->OnProducerReady();
+}
+
+void PerfettoProducer::StopDataSource(::perfetto::DataSourceInstanceID) {
+ target_buffer_ = 0;
+}
+
+std::unique_ptr<::perfetto::TraceWriter> PerfettoProducer::CreateTraceWriter()
+ const {
+ CHECK_NE(0, target_buffer_);
+ return service_endpoint_->CreateTraceWriter(target_buffer_);
+}
+
+PerfettoProducer::PerfettoProducer(
+ PerfettoTracingController* tracing_controller)
+ : tracing_controller_(tracing_controller) {}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-producer.h b/deps/v8/src/libplatform/tracing/perfetto-producer.h
new file mode 100644
index 0000000000..2a363e8bf8
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-producer.h
@@ -0,0 +1,70 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_PRODUCER_H_
+#define V8_LIBPLATFORM_TRACING_PERFETTO_PRODUCER_H_
+
+#include <atomic>
+#include <memory>
+
+#include "perfetto/tracing/core/producer.h"
+#include "perfetto/tracing/core/tracing_service.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+class PerfettoTracingController;
+
+class PerfettoProducer final : public ::perfetto::Producer {
+ public:
+ using ServiceEndpoint = ::perfetto::TracingService::ProducerEndpoint;
+
+ explicit PerfettoProducer(PerfettoTracingController* tracing_controller);
+
+ ServiceEndpoint* service_endpoint() const { return service_endpoint_.get(); }
+ void set_service_endpoint(std::unique_ptr<ServiceEndpoint> endpoint) {
+ service_endpoint_ = std::move(endpoint);
+ }
+
+ // Create a TraceWriter for the calling thread. The TraceWriter is a
+ // thread-local object that writes data into a buffer which is shared between
+ // all TraceWriters for a given PerfettoProducer instance. Can only be called
+ // after the StartDataSource() callback has been received from the service, as
+ // this provides the buffer.
+ std::unique_ptr<::perfetto::TraceWriter> CreateTraceWriter() const;
+
+ private:
+ // ::perfetto::Producer implementation
+ void OnConnect() override;
+ void OnDisconnect() override {}
+ void OnTracingSetup() override {}
+ void SetupDataSource(::perfetto::DataSourceInstanceID,
+ const ::perfetto::DataSourceConfig&) override {}
+ void StartDataSource(::perfetto::DataSourceInstanceID,
+ const ::perfetto::DataSourceConfig& cfg) override;
+ void StopDataSource(::perfetto::DataSourceInstanceID) override;
+ // TODO(petermarshall): Implement Flush(). A final flush happens when the
+ // TraceWriter object for each thread is destroyed, but this will be more
+ // efficient.
+ void Flush(::perfetto::FlushRequestID,
+ const ::perfetto::DataSourceInstanceID*, size_t) override {}
+
+ void ClearIncrementalState(
+ const ::perfetto::DataSourceInstanceID* data_source_ids,
+ size_t num_data_sources) override {
+ UNREACHABLE();
+ }
+
+ std::unique_ptr<ServiceEndpoint> service_endpoint_;
+ uint32_t target_buffer_ = 0;
+ PerfettoTracingController* tracing_controller_;
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // V8_LIBPLATFORM_TRACING_PERFETTO_PRODUCER_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc b/deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc
new file mode 100644
index 0000000000..6c31c05070
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-shared-memory.cc
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/perfetto-shared-memory.h"
+
+#include "src/base/platform/platform.h"
+#include "src/base/template-utils.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+PerfettoSharedMemory::PerfettoSharedMemory(size_t size)
+ : size_(size),
+ paged_memory_(::perfetto::base::PagedMemory::Allocate(size)) {
+ // TODO(956543): Find a cross-platform solution.
+ // TODO(petermarshall): Don't assume that size is page-aligned.
+}
+
+std::unique_ptr<::perfetto::SharedMemory>
+PerfettoSharedMemoryFactory::CreateSharedMemory(size_t size) {
+ return base::make_unique<PerfettoSharedMemory>(size);
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-shared-memory.h b/deps/v8/src/libplatform/tracing/perfetto-shared-memory.h
new file mode 100644
index 0000000000..7a987cc7f0
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-shared-memory.h
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_SHARED_MEMORY_H_
+#define V8_LIBPLATFORM_TRACING_PERFETTO_SHARED_MEMORY_H_
+
+#include "perfetto/tracing/core/shared_memory.h"
+
+#include "third_party/perfetto/include/perfetto/base/paged_memory.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+// Perfetto requires a shared memory implementation for multi-process embedders
+// but V8 is single process. We implement it here using PagedMemory from
+// perfetto.
+class PerfettoSharedMemory : public ::perfetto::SharedMemory {
+ public:
+ explicit PerfettoSharedMemory(size_t size);
+
+ // The PagedMemory destructor will free the underlying memory when this object
+ // is destroyed.
+
+ void* start() const override { return paged_memory_.Get(); }
+ size_t size() const override { return size_; }
+
+ private:
+ size_t size_;
+ ::perfetto::base::PagedMemory paged_memory_;
+};
+
+class PerfettoSharedMemoryFactory : public ::perfetto::SharedMemory::Factory {
+ public:
+ ~PerfettoSharedMemoryFactory() override = default;
+ std::unique_ptr<::perfetto::SharedMemory> CreateSharedMemory(
+ size_t size) override;
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // V8_LIBPLATFORM_TRACING_PERFETTO_SHARED_MEMORY_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tasks.cc b/deps/v8/src/libplatform/tracing/perfetto-tasks.cc
new file mode 100644
index 0000000000..70d00ed626
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-tasks.cc
@@ -0,0 +1,52 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/perfetto-tasks.h"
+
+#include "src/base/platform/semaphore.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+PerfettoTaskRunner::PerfettoTaskRunner() : runner_(1, DefaultTimeFunction) {}
+
+PerfettoTaskRunner::~PerfettoTaskRunner() { runner_.Terminate(); }
+
+// static
+double PerfettoTaskRunner::DefaultTimeFunction() {
+ return (base::TimeTicks::HighResolutionNow() - base::TimeTicks())
+ .InSecondsF();
+}
+
+void PerfettoTaskRunner::PostTask(std::function<void()> f) {
+ runner_.PostTask(base::make_unique<TracingTask>(std::move(f)));
+}
+
+void PerfettoTaskRunner::PostDelayedTask(std::function<void()> f,
+ uint32_t delay_ms) {
+ double delay_in_seconds =
+ delay_ms / static_cast<double>(base::Time::kMillisecondsPerSecond);
+ runner_.PostDelayedTask(base::make_unique<TracingTask>(std::move(f)),
+ delay_in_seconds);
+}
+
+bool PerfettoTaskRunner::RunsTasksOnCurrentThread() const {
+ return runner_.RunsTasksOnCurrentThread();
+}
+
+void PerfettoTaskRunner::FinishImmediateTasks() {
+ DCHECK(!RunsTasksOnCurrentThread());
+ base::Semaphore semaphore(0);
+ // PostTask has guaranteed ordering so this will be the last task executed.
+ runner_.PostTask(
+ base::make_unique<TracingTask>([&semaphore] { semaphore.Signal(); }));
+
+ semaphore.Wait();
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tasks.h b/deps/v8/src/libplatform/tracing/perfetto-tasks.h
new file mode 100644
index 0000000000..054a9e157a
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-tasks.h
@@ -0,0 +1,55 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_TASKS_H_
+#define V8_LIBPLATFORM_TRACING_PERFETTO_TASKS_H_
+
+#include <functional>
+
+#include "include/v8-platform.h"
+#include "perfetto/base/task_runner.h"
+#include "src/libplatform/default-worker-threads-task-runner.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+class TracingTask : public Task {
+ public:
+ explicit TracingTask(std::function<void()> f) : f_(std::move(f)) {}
+
+ void Run() override { f_(); }
+
+ private:
+ std::function<void()> f_;
+};
+
+class PerfettoTaskRunner : public ::perfetto::base::TaskRunner {
+ public:
+ PerfettoTaskRunner();
+ ~PerfettoTaskRunner() override;
+
+ // ::perfetto::base::TaskRunner implementation
+ void PostTask(std::function<void()> f) override;
+ void PostDelayedTask(std::function<void()> f, uint32_t delay_ms) override;
+ void AddFileDescriptorWatch(int fd, std::function<void()>) override {
+ UNREACHABLE();
+ }
+ void RemoveFileDescriptorWatch(int fd) override { UNREACHABLE(); }
+ bool RunsTasksOnCurrentThread() const override;
+
+ // PerfettoTaskRunner implementation
+ void FinishImmediateTasks();
+
+ private:
+ static double DefaultTimeFunction();
+
+ DefaultWorkerThreadsTaskRunner runner_;
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // V8_LIBPLATFORM_TRACING_PERFETTO_TASKS_H_
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc b/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc
new file mode 100644
index 0000000000..9b62c2ae78
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.cc
@@ -0,0 +1,130 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/tracing/perfetto-tracing-controller.h"
+
+#include "perfetto/tracing/core/trace_config.h"
+#include "perfetto/tracing/core/trace_writer.h"
+#include "perfetto/tracing/core/tracing_service.h"
+#include "src/libplatform/tracing/perfetto-consumer.h"
+#include "src/libplatform/tracing/perfetto-producer.h"
+#include "src/libplatform/tracing/perfetto-shared-memory.h"
+#include "src/libplatform/tracing/perfetto-tasks.h"
+#include "src/libplatform/tracing/trace-event-listener.h"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+PerfettoTracingController::PerfettoTracingController()
+ : writer_key_(base::Thread::CreateThreadLocalKey()),
+ producer_ready_semaphore_(0),
+ consumer_finished_semaphore_(0) {}
+
+void PerfettoTracingController::StartTracing(
+ const ::perfetto::TraceConfig& trace_config) {
+ DCHECK(!task_runner_);
+ task_runner_ = base::make_unique<PerfettoTaskRunner>();
+ // The Perfetto service expects calls on the task runner thread which is why
+ // the setup below occurs in posted tasks.
+ task_runner_->PostTask([&trace_config, this] {
+ std::unique_ptr<::perfetto::SharedMemory::Factory> shmem_factory =
+ base::make_unique<PerfettoSharedMemoryFactory>();
+
+ service_ = ::perfetto::TracingService::CreateInstance(
+ std::move(shmem_factory), task_runner_.get());
+ // This allows Perfetto to recover trace events that were written by
+ // TraceWriters which have not yet been deleted. This allows us to keep
+ // TraceWriters alive past the end of tracing, rather than having to delete
+ // them all when tracing stops which would require synchronization on every
+ // trace event. Eventually we will delete TraceWriters when threads die, but
+ // for now we just leak all TraceWriters.
+ service_->SetSMBScrapingEnabled(true);
+ producer_ = base::make_unique<PerfettoProducer>(this);
+ consumer_ =
+ base::make_unique<PerfettoConsumer>(&consumer_finished_semaphore_);
+
+ for (TraceEventListener* listener : listeners_) {
+ consumer_->AddTraceEventListener(listener);
+ }
+
+ producer_->set_service_endpoint(service_->ConnectProducer(
+ producer_.get(), 0, "v8.perfetto-producer", 0, true));
+
+ consumer_->set_service_endpoint(
+ service_->ConnectConsumer(consumer_.get(), 0));
+
+ // We need to wait for the OnConnected() callbacks of the producer and
+ // consumer to be called.
+ consumer_->service_endpoint()->EnableTracing(trace_config);
+ });
+
+ producer_ready_semaphore_.Wait();
+}
+
+void PerfettoTracingController::StopTracing() {
+ // Finish all of the tasks such as existing AddTraceEvent calls. These
+ // require the data structures below to work properly, so keep them alive
+ // until the tasks are done.
+ task_runner_->FinishImmediateTasks();
+
+ task_runner_->PostTask([this] {
+ // Trigger shared memory buffer scraping which will get all pending trace
+ // events that have been written by still-living TraceWriters.
+ consumer_->service_endpoint()->DisableTracing();
+ // Trigger the consumer to finish. This can trigger multiple calls to
+ // PerfettoConsumer::OnTraceData(), with the final call passing has_more
+ // as false.
+ consumer_->service_endpoint()->ReadBuffers();
+ });
+
+ // Wait until the final OnTraceData() call with has_more=false has completed.
+ consumer_finished_semaphore_.Wait();
+
+ task_runner_->PostTask([this] {
+ consumer_.reset();
+ producer_.reset();
+ service_.reset();
+ });
+
+ // Finish the above task, and any callbacks that were triggered.
+ task_runner_->FinishImmediateTasks();
+ task_runner_.reset();
+}
+
+void PerfettoTracingController::AddTraceEventListener(
+ TraceEventListener* listener) {
+ listeners_.push_back(listener);
+}
+
+PerfettoTracingController::~PerfettoTracingController() {
+ base::Thread::DeleteThreadLocalKey(writer_key_);
+}
+
+::perfetto::TraceWriter*
+PerfettoTracingController::GetOrCreateThreadLocalWriter() {
+ // TODO(petermarshall): Use some form of thread-local destructor so that
+ // repeatedly created threads don't cause excessive leaking of TraceWriters.
+ if (base::Thread::HasThreadLocal(writer_key_)) {
+ return static_cast<::perfetto::TraceWriter*>(
+ base::Thread::GetExistingThreadLocal(writer_key_));
+ }
+
+ // We leak the TraceWriter objects created for each thread. Perfetto has a
+ // way of getting events from leaked TraceWriters and we can avoid needing a
+ // lock on every trace event this way.
+ std::unique_ptr<::perfetto::TraceWriter> tw = producer_->CreateTraceWriter();
+ ::perfetto::TraceWriter* writer = tw.release();
+
+ base::Thread::SetThreadLocal(writer_key_, writer);
+ return writer;
+}
+
+void PerfettoTracingController::OnProducerReady() {
+ producer_ready_semaphore_.Signal();
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
diff --git a/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h b/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h
new file mode 100644
index 0000000000..67a3c26cef
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/perfetto-tracing-controller.h
@@ -0,0 +1,86 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_TRACING_PERFETTO_TRACING_CONTROLLER_H_
+#define V8_LIBPLATFORM_TRACING_PERFETTO_TRACING_CONTROLLER_H_
+
+#include <atomic>
+#include <fstream>
+#include <memory>
+#include <vector>
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
+
+namespace perfetto {
+class TraceConfig;
+class TraceWriter;
+class TracingService;
+} // namespace perfetto
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+class PerfettoConsumer;
+class PerfettoProducer;
+class PerfettoTaskRunner;
+class TraceEventListener;
+
+// This is the top-level interface for performing tracing with perfetto. The
+// user of this class should call StartTracing() to start tracing, and
+// StopTracing() to stop it. To write trace events, the user can obtain a
+// thread-local TraceWriter object using GetOrCreateThreadLocalWriter().
+class PerfettoTracingController {
+ public:
+ PerfettoTracingController();
+
+ // Blocks and sets up all required data structures for tracing. It is safe to
+ // call GetOrCreateThreadLocalWriter() to obtain thread-local TraceWriters for
+ // writing trace events once this call returns. Tracing output will be sent to
+ // the TraceEventListeners registered via AddTraceEventListener().
+ void StartTracing(const ::perfetto::TraceConfig& trace_config);
+
+ // Blocks and finishes all existing AddTraceEvent tasks. Stops the tracing
+ // thread.
+ void StopTracing();
+
+ // Register a trace event listener that will receive trace events. This can be
+ // called multiple times to register multiple listeners, but must be called
+ // before starting tracing.
+ void AddTraceEventListener(TraceEventListener* listener);
+
+ ~PerfettoTracingController();
+
+ // Each thread that wants to trace should call this to get their TraceWriter.
+ // PerfettoTracingController creates and owns the TraceWriter.
+ ::perfetto::TraceWriter* GetOrCreateThreadLocalWriter();
+
+ private:
+ // Signals the producer_ready_semaphore_.
+ void OnProducerReady();
+
+ // PerfettoProducer is the only class allowed to call OnProducerReady().
+ friend class PerfettoProducer;
+
+ std::unique_ptr<::perfetto::TracingService> service_;
+ std::unique_ptr<PerfettoProducer> producer_;
+ std::unique_ptr<PerfettoConsumer> consumer_;
+ std::unique_ptr<PerfettoTaskRunner> task_runner_;
+ std::vector<TraceEventListener*> listeners_;
+ base::Thread::LocalStorageKey writer_key_;
+ // A semaphore that is signalled when StartRecording is called. StartTracing
+ // waits on this semaphore to be notified when the tracing service is ready to
+ // receive trace events.
+ base::Semaphore producer_ready_semaphore_;
+ base::Semaphore consumer_finished_semaphore_;
+
+ DISALLOW_COPY_AND_ASSIGN(PerfettoTracingController);
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // V8_LIBPLATFORM_TRACING_PERFETTO_TRACING_CONTROLLER_H_
diff --git a/deps/v8/src/libplatform/tracing/trace-event-listener.h b/deps/v8/src/libplatform/tracing/trace-event-listener.h
new file mode 100644
index 0000000000..4acdb2935b
--- /dev/null
+++ b/deps/v8/src/libplatform/tracing/trace-event-listener.h
@@ -0,0 +1,34 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_TRACING_TRACE_EVENT_LISTENER_H_
+#define V8_LIBPLATFORM_TRACING_TRACE_EVENT_LISTENER_H_
+
+namespace perfetto {
+namespace protos {
+class ChromeTracePacket;
+} // namespace protos
+} // namespace perfetto
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+// A TraceEventListener is a simple interface that allows subclasses to listen
+// to trace events. This interface is to hide the more complex interactions that
+// the PerfettoConsumer class has to perform. Clients override ProcessPacket()
+// to respond to trace events, e.g. to write them to a file as JSON or for
+// testing purposes.
+class TraceEventListener {
+ public:
+ virtual ~TraceEventListener() = default;
+ virtual void ProcessPacket(
+ const ::perfetto::protos::ChromeTracePacket& packet) = 0;
+};
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#endif // V8_LIBPLATFORM_TRACING_TRACE_EVENT_LISTENER_H_
diff --git a/deps/v8/src/libplatform/tracing/trace-writer.cc b/deps/v8/src/libplatform/tracing/trace-writer.cc
index a22eb83d21..23dfd0cf46 100644
--- a/deps/v8/src/libplatform/tracing/trace-writer.cc
+++ b/deps/v8/src/libplatform/tracing/trace-writer.cc
@@ -109,7 +109,6 @@ void JSONTraceWriter::AppendArgValue(uint8_t type,
break;
default:
UNREACHABLE();
- break;
}
}
diff --git a/deps/v8/src/libplatform/tracing/tracing-controller.cc b/deps/v8/src/libplatform/tracing/tracing-controller.cc
index 8d25787495..91d042ba1e 100644
--- a/deps/v8/src/libplatform/tracing/tracing-controller.cc
+++ b/deps/v8/src/libplatform/tracing/tracing-controller.cc
@@ -12,6 +12,18 @@
#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
+#ifdef V8_USE_PERFETTO
+#include "base/trace_event/common/trace_event_common.h"
+#include "perfetto/trace/chrome/chrome_trace_event.pbzero.h"
+#include "perfetto/trace/trace_packet.pbzero.h"
+#include "perfetto/tracing/core/data_source_config.h"
+#include "perfetto/tracing/core/trace_config.h"
+#include "perfetto/tracing/core/trace_packet.h"
+#include "perfetto/tracing/core/trace_writer.h"
+#include "src/libplatform/tracing/json-trace-event-listener.h"
+#include "src/libplatform/tracing/perfetto-tracing-controller.h"
+#endif // V8_USE_PERFETTO
+
namespace v8 {
namespace platform {
namespace tracing {
@@ -62,6 +74,19 @@ void TracingController::Initialize(TraceBuffer* trace_buffer) {
mutex_.reset(new base::Mutex());
}
+#ifdef V8_USE_PERFETTO
+void TracingController::InitializeForPerfetto(std::ostream* output_stream) {
+ output_stream_ = output_stream;
+ DCHECK_NOT_NULL(output_stream);
+ DCHECK(output_stream->good());
+}
+
+void TracingController::SetTraceEventListenerForTesting(
+ TraceEventListener* listener) {
+ listener_for_testing_ = listener;
+}
+#endif
+
int64_t TracingController::CurrentTimestampMicroseconds() {
return base::TimeTicks::HighResolutionNow().ToInternalValue();
}
@@ -70,6 +95,58 @@ int64_t TracingController::CurrentCpuTimestampMicroseconds() {
return base::ThreadTicks::Now().ToInternalValue();
}
+namespace {
+
+#ifdef V8_USE_PERFETTO
+void AddArgsToTraceProto(
+ ::perfetto::protos::pbzero::ChromeTraceEvent* event, int num_args,
+ const char** arg_names, const uint8_t* arg_types,
+ const uint64_t* arg_values,
+ std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables) {
+ for (int i = 0; i < num_args; i++) {
+ ::perfetto::protos::pbzero::ChromeTraceEvent_Arg* arg = event->add_args();
+ // TODO(petermarshall): Set name_index instead if need be.
+ arg->set_name(arg_names[i]);
+
+ TraceObject::ArgValue arg_value;
+ arg_value.as_uint = arg_values[i];
+ switch (arg_types[i]) {
+ case TRACE_VALUE_TYPE_CONVERTABLE: {
+ // TODO(petermarshall): Support AppendToProto for Convertables.
+ std::string json_value;
+ arg_convertables[i]->AppendAsTraceFormat(&json_value);
+ arg->set_json_value(json_value.c_str());
+ break;
+ }
+ case TRACE_VALUE_TYPE_BOOL:
+ arg->set_bool_value(arg_value.as_bool);
+ break;
+ case TRACE_VALUE_TYPE_UINT:
+ arg->set_uint_value(arg_value.as_uint);
+ break;
+ case TRACE_VALUE_TYPE_INT:
+ arg->set_int_value(arg_value.as_int);
+ break;
+ case TRACE_VALUE_TYPE_DOUBLE:
+ arg->set_double_value(arg_value.as_double);
+ break;
+ case TRACE_VALUE_TYPE_POINTER:
+ arg->set_pointer_value(arg_value.as_uint);
+ break;
+ // TODO(petermarshall): Treat copy strings specially.
+ case TRACE_VALUE_TYPE_COPY_STRING:
+ case TRACE_VALUE_TYPE_STRING:
+ arg->set_string_value(arg_value.as_string);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+#endif // V8_USE_PERFETTO
+
+} // namespace
+
uint64_t TracingController::AddTraceEvent(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int num_args,
@@ -77,20 +154,11 @@ uint64_t TracingController::AddTraceEvent(
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags) {
- uint64_t handle = 0;
- if (recording_.load(std::memory_order_acquire)) {
- TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
- if (trace_object) {
- {
- base::MutexGuard lock(mutex_.get());
- trace_object->Initialize(
- phase, category_enabled_flag, name, scope, id, bind_id, num_args,
- arg_names, arg_types, arg_values, arg_convertables, flags,
- CurrentTimestampMicroseconds(), CurrentCpuTimestampMicroseconds());
- }
- }
- }
- return handle;
+ int64_t now_us = CurrentTimestampMicroseconds();
+
+ return AddTraceEventWithTimestamp(
+ phase, category_enabled_flag, name, scope, id, bind_id, num_args,
+ arg_names, arg_types, arg_values, arg_convertables, flags, now_us);
}
uint64_t TracingController::AddTraceEventWithTimestamp(
@@ -100,6 +168,48 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags, int64_t timestamp) {
+ int64_t cpu_now_us = CurrentCpuTimestampMicroseconds();
+
+#ifdef V8_USE_PERFETTO
+ if (perfetto_recording_.load()) {
+ // Don't use COMPLETE events with perfetto - instead transform them into
+ // BEGIN/END pairs. This avoids the need for a thread-local stack of pending
+ // trace events as perfetto does not support handles into the trace buffer.
+ if (phase == TRACE_EVENT_PHASE_COMPLETE) phase = TRACE_EVENT_PHASE_BEGIN;
+ ::perfetto::TraceWriter* writer =
+ perfetto_tracing_controller_->GetOrCreateThreadLocalWriter();
+ // TODO(petermarshall): We shouldn't start one packet for each event.
+ // We should try to bundle them together in one bundle.
+ auto packet = writer->NewTracePacket();
+ auto* trace_event_bundle = packet->set_chrome_events();
+ auto* trace_event = trace_event_bundle->add_trace_events();
+
+ trace_event->set_name(name);
+ trace_event->set_timestamp(timestamp);
+ trace_event->set_phase(phase);
+ trace_event->set_thread_id(base::OS::GetCurrentThreadId());
+ trace_event->set_duration(0);
+ trace_event->set_thread_duration(0);
+ if (scope) trace_event->set_scope(scope);
+ trace_event->set_id(id);
+ trace_event->set_flags(flags);
+ if (category_enabled_flag) {
+ const char* category_group_name =
+ GetCategoryGroupName(category_enabled_flag);
+ DCHECK_NOT_NULL(category_group_name);
+ trace_event->set_category_group_name(category_group_name);
+ }
+ trace_event->set_process_id(base::OS::GetCurrentProcessId());
+ trace_event->set_thread_timestamp(cpu_now_us);
+ trace_event->set_bind_id(bind_id);
+
+ AddArgsToTraceProto(trace_event, num_args, arg_names, arg_types, arg_values,
+ arg_convertables);
+
+ packet->Finalize();
+ }
+#endif // V8_USE_PERFETTO
+
uint64_t handle = 0;
if (recording_.load(std::memory_order_acquire)) {
TraceObject* trace_object = trace_buffer_->AddTraceEvent(&handle);
@@ -109,7 +219,7 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
trace_object->Initialize(phase, category_enabled_flag, name, scope, id,
bind_id, num_args, arg_names, arg_types,
arg_values, arg_convertables, flags, timestamp,
- CurrentCpuTimestampMicroseconds());
+ cpu_now_us);
}
}
}
@@ -118,10 +228,35 @@ uint64_t TracingController::AddTraceEventWithTimestamp(
void TracingController::UpdateTraceEventDuration(
const uint8_t* category_enabled_flag, const char* name, uint64_t handle) {
+ int64_t now_us = CurrentTimestampMicroseconds();
+ int64_t cpu_now_us = CurrentCpuTimestampMicroseconds();
+
+#ifdef V8_USE_PERFETTO
+ // TODO(petermarshall): Should we still record the end of unfinished events
+ // when tracing has stopped?
+ if (perfetto_recording_.load()) {
+ // TODO(petermarshall): We shouldn't start one packet for each event. We
+ // should try to bundle them together in one bundle.
+ ::perfetto::TraceWriter* writer =
+ perfetto_tracing_controller_->GetOrCreateThreadLocalWriter();
+
+ auto packet = writer->NewTracePacket();
+ auto* trace_event_bundle = packet->set_chrome_events();
+ auto* trace_event = trace_event_bundle->add_trace_events();
+
+ trace_event->set_phase(TRACE_EVENT_PHASE_END);
+ trace_event->set_thread_id(base::OS::GetCurrentThreadId());
+ trace_event->set_timestamp(now_us);
+ trace_event->set_process_id(base::OS::GetCurrentProcessId());
+ trace_event->set_thread_timestamp(cpu_now_us);
+
+ packet->Finalize();
+ }
+#endif // V8_USE_PERFETTO
+
TraceObject* trace_object = trace_buffer_->GetEventByHandle(handle);
if (!trace_object) return;
- trace_object->UpdateDuration(CurrentTimestampMicroseconds(),
- CurrentCpuTimestampMicroseconds());
+ trace_object->UpdateDuration(now_us, cpu_now_us);
}
const char* TracingController::GetCategoryGroupName(
@@ -141,6 +276,27 @@ const char* TracingController::GetCategoryGroupName(
}
void TracingController::StartTracing(TraceConfig* trace_config) {
+#ifdef V8_USE_PERFETTO
+ perfetto_tracing_controller_ = base::make_unique<PerfettoTracingController>();
+
+ if (listener_for_testing_) {
+ perfetto_tracing_controller_->AddTraceEventListener(listener_for_testing_);
+ }
+ DCHECK_NOT_NULL(output_stream_);
+ DCHECK(output_stream_->good());
+ json_listener_ = base::make_unique<JSONTraceEventListener>(output_stream_);
+ perfetto_tracing_controller_->AddTraceEventListener(json_listener_.get());
+ ::perfetto::TraceConfig perfetto_trace_config;
+
+ perfetto_trace_config.add_buffers()->set_size_kb(4096);
+ auto* ds_config = perfetto_trace_config.add_data_sources()->mutable_config();
+ ds_config->set_name("v8.trace_events");
+
+ // TODO(petermarshall): Set all the params from |perfetto_trace_config|.
+ perfetto_tracing_controller_->StartTracing(perfetto_trace_config);
+ perfetto_recording_.store(true);
+#endif // V8_USE_PERFETTO
+
trace_config_.reset(trace_config);
std::unordered_set<v8::TracingController::TraceStateObserver*> observers_copy;
{
@@ -169,6 +325,14 @@ void TracingController::StopTracing() {
for (auto o : observers_copy) {
o->OnTraceDisabled();
}
+
+#ifdef V8_USE_PERFETTO
+ perfetto_recording_.store(false);
+ perfetto_tracing_controller_->StopTracing();
+ perfetto_tracing_controller_.reset();
+ json_listener_.reset();
+#endif // V8_USE_PERFETTO
+
{
base::MutexGuard lock(mutex_.get());
trace_buffer_->Flush();
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index b6ae3de9f4..e445dfc65a 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -46,9 +46,9 @@ zx_status_t zx_thread_read_state(zx_handle_t h, uint32_t k, void* b, size_t l) {
&dummy_out_len);
}
#if defined(__x86_64__)
-typedef zx_x86_64_general_regs_t zx_thread_state_general_regs_t;
+using zx_thread_state_general_regs_t = zx_x86_64_general_regs_t;
#else
-typedef zx_arm64_general_regs_t zx_thread_state_general_regs_t;
+using zx_thread_state_general_regs_t = zx_arm64_general_regs_t;
#endif
#endif // !defined(ZX_THREAD_STATE_GENERAL_REGS)
@@ -71,31 +71,31 @@ typedef zx_arm64_general_regs_t zx_thread_state_general_regs_t;
#if defined(__arm__)
-typedef struct sigcontext mcontext_t;
+using mcontext_t = struct sigcontext;
-typedef struct ucontext {
+struct ucontext_t {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
-} ucontext_t;
+};
#elif defined(__aarch64__)
-typedef struct sigcontext mcontext_t;
+using mcontext_t = struct sigcontext;
-typedef struct ucontext {
+struct ucontext_t {
uint64_t uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
-} ucontext_t;
+};
#elif defined(__mips__)
// MIPS version of sigcontext, for Android bionic.
-typedef struct {
+struct mcontext_t {
uint32_t regmask;
uint32_t status;
uint64_t pc;
@@ -114,50 +114,50 @@ typedef struct {
uint32_t lo2;
uint32_t hi3;
uint32_t lo3;
-} mcontext_t;
+};
-typedef struct ucontext {
+struct ucontext_t {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
-} ucontext_t;
+};
#elif defined(__i386__)
// x86 version for Android.
-typedef struct {
+struct mcontext_t {
uint32_t gregs[19];
void* fpregs;
uint32_t oldmask;
uint32_t cr2;
-} mcontext_t;
+};
-typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
-typedef struct ucontext {
+using kernel_sigset_t = uint32_t[2]; // x86 kernel uses 64-bit signal masks
+struct ucontext_t {
uint32_t uc_flags;
struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
-} ucontext_t;
+};
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
#elif defined(__x86_64__)
// x64 version for Android.
-typedef struct {
+struct mcontext_t {
uint64_t gregs[23];
void* fpregs;
uint64_t __reserved1[8];
-} mcontext_t;
+};
-typedef struct ucontext {
+struct ucontext_t {
uint64_t uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
// Other fields are not used by V8, don't define them here.
-} ucontext_t;
+};
enum { REG_RBP = 10, REG_RSP = 15, REG_RIP = 16 };
#endif
@@ -391,16 +391,20 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
state->pc = reinterpret_cast<void*>(mcontext.gregs[R15]);
state->sp = reinterpret_cast<void*>(mcontext.gregs[R13]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[R11]);
+ state->lr = reinterpret_cast<void*>(mcontext.gregs[R14]);
#else
state->pc = reinterpret_cast<void*>(mcontext.arm_pc);
state->sp = reinterpret_cast<void*>(mcontext.arm_sp);
state->fp = reinterpret_cast<void*>(mcontext.arm_fp);
+ state->lr = reinterpret_cast<void*>(mcontext.arm_lr);
#endif // V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
#elif V8_HOST_ARCH_ARM64
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.sp);
// FP is an alias for x29.
state->fp = reinterpret_cast<void*>(mcontext.regs[29]);
+ // LR is an alias for x30.
+ state->lr = reinterpret_cast<void*>(mcontext.regs[30]);
#elif V8_HOST_ARCH_MIPS
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
@@ -416,11 +420,13 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R1]);
state->fp =
reinterpret_cast<void*>(ucontext->uc_mcontext.regs->gpr[PT_R31]);
+ state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->link);
#else
// Some C libraries, notably Musl, define the regs member as a void pointer
state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[32]);
state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[1]);
state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[31]);
+ state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.gp_regs[36]);
#endif
#elif V8_HOST_ARCH_S390
#if V8_TARGET_ARCH_32_BIT
@@ -433,6 +439,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
#endif // V8_TARGET_ARCH_32_BIT
state->sp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[15]);
state->fp = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[11]);
+ state->lr = reinterpret_cast<void*>(ucontext->uc_mcontext.gregs[14]);
#endif // V8_HOST_ARCH_*
#elif V8_OS_IOS
@@ -512,6 +519,7 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
state->pc = reinterpret_cast<void*>(mcontext.jmp_context.iar);
state->sp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[1]);
state->fp = reinterpret_cast<void*>(mcontext.jmp_context.gpr[31]);
+ state->lr = reinterpret_cast<void*>(mcontext.jmp_context.lr);
#endif // V8_OS_AIX
}
diff --git a/deps/v8/src/libsampler/sampler.h b/deps/v8/src/libsampler/sampler.h
index 2b4f6e8883..997b127686 100644
--- a/deps/v8/src/libsampler/sampler.h
+++ b/deps/v8/src/libsampler/sampler.h
@@ -92,7 +92,7 @@ class V8_EXPORT_PRIVATE Sampler {
#ifdef USE_SIGNALS
-typedef std::atomic_bool AtomicMutex;
+using AtomicMutex = std::atomic_bool;
// A helper that uses an std::atomic_bool to create a lock that is obtained on
// construction and released on destruction.
@@ -120,7 +120,7 @@ class V8_EXPORT_PRIVATE AtomicGuard {
// take a sample for every Sampler on the current thread.
class V8_EXPORT_PRIVATE SamplerManager {
public:
- typedef std::vector<Sampler*> SamplerList;
+ using SamplerList = std::vector<Sampler*>;
// Add |sampler| to the map if it is not already present.
void AddSampler(Sampler* sampler);
diff --git a/deps/v8/src/logging/OWNERS b/deps/v8/src/logging/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/logging/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/logging/code-events.h
index 8a818cab7e..262ddf7df3 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/logging/code-events.h
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CODE_EVENTS_H_
-#define V8_CODE_EVENTS_H_
+#ifndef V8_LOGGING_CODE_EVENTS_H_
+#define V8_LOGGING_CODE_EVENTS_H_
#include <unordered_set>
#include "src/base/platform/mutex.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/code.h"
#include "src/objects/name.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -184,4 +184,4 @@ class CodeEventDispatcher {
} // namespace internal
} // namespace v8
-#endif // V8_CODE_EVENTS_H_
+#endif // V8_LOGGING_CODE_EVENTS_H_
diff --git a/deps/v8/src/counters-definitions.h b/deps/v8/src/logging/counters-definitions.h
index 3f7e668ff3..298d8d4446 100644
--- a/deps/v8/src/counters-definitions.h
+++ b/deps/v8/src/logging/counters-definitions.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COUNTERS_DEFINITIONS_H_
-#define V8_COUNTERS_DEFINITIONS_H_
+#ifndef V8_LOGGING_COUNTERS_DEFINITIONS_H_
+#define V8_LOGGING_COUNTERS_DEFINITIONS_H_
namespace v8 {
namespace internal {
@@ -17,9 +17,9 @@ namespace internal {
HR(code_cache_reject_reason, V8.CodeCacheRejectReason, 1, 6, 6) \
HR(errors_thrown_per_context, V8.ErrorsThrownPerContext, 0, 200, 20) \
HR(debug_feature_usage, V8.DebugFeatureUsage, 1, 7, 7) \
- HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22) \
+ HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 22, 23) \
HR(incremental_marking_sum, V8.GCIncrementalMarkingSum, 0, 10000, 101) \
- HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22) \
+ HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 22, 23) \
HR(gc_finalize_clear, V8.GCFinalizeMC.Clear, 0, 10000, 101) \
HR(gc_finalize_epilogue, V8.GCFinalizeMC.Epilogue, 0, 10000, 101) \
HR(gc_finalize_evacuate, V8.GCFinalizeMC.Evacuate, 0, 10000, 101) \
@@ -34,7 +34,7 @@ namespace internal {
/* Range and bucket matches BlinkGC.MainThreadMarkingThroughput. */ \
HR(gc_main_thread_marking_throughput, V8.GCMainThreadMarkingThroughput, 0, \
100000, 50) \
- HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22) \
+ HR(scavenge_reason, V8.GCScavengeReason, 0, 22, 23) \
HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3) \
/* Asm/Wasm. */ \
HR(wasm_functions_per_asm_module, V8.WasmFunctionsPerModule.asm, 1, 1000000, \
@@ -74,14 +74,22 @@ namespace internal {
HR(wasm_memory_allocation_result, V8.WasmMemoryAllocationResult, 0, 3, 4) \
HR(wasm_address_space_usage_mb, V8.WasmAddressSpaceUsageMiB, 0, 1 << 20, \
128) \
- /* code size of live modules, collected on GC */ \
+ /* committed code size per module, collected on GC */ \
HR(wasm_module_code_size_mb, V8.WasmModuleCodeSizeMiB, 0, 1024, 64) \
- /* code size of modules after baseline compilation */ \
+ /* code size per module after baseline compilation */ \
HR(wasm_module_code_size_mb_after_baseline, \
V8.WasmModuleCodeSizeBaselineMiB, 0, 1024, 64) \
- /* code size of modules after top-tier compilation */ \
+ /* code size per module after top-tier compilation */ \
HR(wasm_module_code_size_mb_after_top_tier, V8.WasmModuleCodeSizeTopTierMiB, \
- 0, 1024, 64)
+ 0, 1024, 64) \
+ /* freed code size per module, collected on GC */ \
+ HR(wasm_module_freed_code_size_mb, V8.WasmModuleCodeSizeFreed, 0, 1024, 64) \
+ /* percent of freed code size per module, collected on GC */ \
+ HR(wasm_module_freed_code_size_percent, V8.WasmModuleCodeSizePercentFreed, \
+ 0, 100, 32) \
+ /* number of code GCs triggered per native module, collected on code GC */ \
+ HR(wasm_module_num_triggered_code_gcs, \
+ V8.WasmModuleNumberOfCodeGCsTriggered, 1, 128, 20)
#define HISTOGRAM_TIMER_LIST(HT) \
/* Timer histograms, not thread safe: HT(name, caption, max, unit) */ \
@@ -146,6 +154,10 @@ namespace internal {
10000000, MICROSECOND) \
HT(turbofan_optimize_total_time, V8.TurboFanOptimizeTotalTime, 10000000, \
MICROSECOND) \
+ HT(turbofan_optimize_non_concurrent_total_time, \
+ V8.TurboFanOptimizeNonConcurrentTotalTime, 10000000, MICROSECOND) \
+ HT(turbofan_optimize_concurrent_total_time, \
+ V8.TurboFanOptimizeConcurrentTotalTime, 10000000, MICROSECOND) \
HT(turbofan_osr_prepare, V8.TurboFanOptimizeForOnStackReplacementPrepare, \
1000000, MICROSECOND) \
HT(turbofan_osr_execute, V8.TurboFanOptimizeForOnStackReplacementExecute, \
@@ -167,6 +179,12 @@ namespace internal {
10000000, MICROSECOND) \
HT(wasm_compile_wasm_module_time, V8.WasmCompileModuleMicroSeconds.wasm, \
10000000, MICROSECOND) \
+ HT(wasm_async_compile_wasm_module_time, \
+ V8.WasmCompileModuleAsyncMicroSeconds, 100000000, MICROSECOND) \
+ HT(wasm_streaming_compile_wasm_module_time, \
+ V8.WasmCompileModuleStreamingMicroSeconds, 100000000, MICROSECOND) \
+ HT(wasm_tier_up_module_time, V8.WasmTierUpModuleMicroSeconds, 100000000, \
+ MICROSECOND) \
HT(wasm_compile_asm_function_time, V8.WasmCompileFunctionMicroSeconds.asm, \
1000000, MICROSECOND) \
HT(wasm_compile_wasm_function_time, V8.WasmCompileFunctionMicroSeconds.wasm, \
@@ -177,6 +195,7 @@ namespace internal {
V8.WasmInstantiateModuleMicroSeconds.wasm, 10000000, MICROSECOND) \
HT(wasm_instantiate_asm_module_time, \
V8.WasmInstantiateModuleMicroSeconds.asm, 10000000, MICROSECOND) \
+ HT(wasm_code_gc_time, V8.WasmCodeGCTime, 1000000, MICROSECOND) \
/* Total compilation time incl. caching/parsing for various cache states. */ \
HT(compile_script_with_produce_cache, \
V8.CompileScriptMicroSeconds.ProduceCache, 1000000, MICROSECOND) \
@@ -225,42 +244,42 @@ namespace internal {
// lines) rather than one macro (of length about 80 lines) to work around
// this problem. Please avoid using recursive macros of this length when
// possible.
-#define STATS_COUNTER_LIST_1(SC) \
- /* Global Handle Count*/ \
- SC(global_handles, V8.GlobalHandles) \
- /* OS Memory allocated */ \
- SC(memory_allocated, V8.OsMemoryAllocated) \
- SC(maps_normalized, V8.MapsNormalized) \
- SC(maps_created, V8.MapsCreated) \
- SC(elements_transitions, V8.ObjectElementsTransitions) \
- SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
- SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
- SC(alive_after_last_gc, V8.AliveAfterLastGC) \
- SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
- SC(objs_since_last_full, V8.ObjsSinceLastFull) \
- SC(string_table_capacity, V8.StringTableCapacity) \
- SC(number_of_symbols, V8.NumberOfSymbols) \
- SC(inlined_copied_elements, V8.InlinedCopiedElements) \
- SC(compilation_cache_hits, V8.CompilationCacheHits) \
- SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- /* Amount of evaled source code. */ \
- SC(total_eval_size, V8.TotalEvalSize) \
- /* Amount of loaded source code. */ \
- SC(total_load_size, V8.TotalLoadSize) \
- /* Amount of parsed source code. */ \
- SC(total_parse_size, V8.TotalParseSize) \
- /* Amount of source code skipped over using preparsing. */ \
- SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
- /* Amount of compiled source code. */ \
- SC(total_compile_size, V8.TotalCompileSize) \
- /* Number of contexts created from scratch. */ \
- SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
- /* Number of contexts created by partial snapshot. */ \
- SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
- /* Number of code objects found from pc. */ \
- SC(pc_to_code, V8.PcToCode) \
- SC(pc_to_code_cached, V8.PcToCodeCached) \
- /* The store-buffer implementation of the write barrier. */ \
+#define STATS_COUNTER_LIST_1(SC) \
+ /* Global Handle Count*/ \
+ SC(global_handles, V8.GlobalHandles) \
+ /* OS Memory allocated */ \
+ SC(memory_allocated, V8.OsMemoryAllocated) \
+ SC(maps_normalized, V8.MapsNormalized) \
+ SC(maps_created, V8.MapsCreated) \
+ SC(elements_transitions, V8.ObjectElementsTransitions) \
+ SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
+ SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
+ SC(alive_after_last_gc, V8.AliveAfterLastGC) \
+ SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
+ SC(objs_since_last_full, V8.ObjsSinceLastFull) \
+ SC(string_table_capacity, V8.StringTableCapacity) \
+ SC(number_of_symbols, V8.NumberOfSymbols) \
+ SC(inlined_copied_elements, V8.InlinedCopiedElements) \
+ SC(compilation_cache_hits, V8.CompilationCacheHits) \
+ SC(compilation_cache_misses, V8.CompilationCacheMisses) \
+ /* Amount of evaled source code. */ \
+ SC(total_eval_size, V8.TotalEvalSize) \
+ /* Amount of loaded source code. */ \
+ SC(total_load_size, V8.TotalLoadSize) \
+ /* Amount of parsed source code. */ \
+ SC(total_parse_size, V8.TotalParseSize) \
+ /* Amount of source code skipped over using preparsing. */ \
+ SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
+ /* Amount of compiled source code. */ \
+ SC(total_compile_size, V8.TotalCompileSize) \
+ /* Number of contexts created from scratch. */ \
+ SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
+ /* Number of contexts created by partial snapshot. */ \
+ SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
+ /* Number of code objects found from pc. */ \
+ SC(pc_to_code, V8.PcToCode) \
+ SC(pc_to_code_cached, V8.PcToCodeCached) \
+ /* The store-buffer implementation of the write barrier. */ \
SC(store_buffer_overflows, V8.StoreBufferOverflows)
#define STATS_COUNTER_LIST_2(SC) \
@@ -329,4 +348,4 @@ namespace internal {
} // namespace internal
} // namespace v8
-#endif // V8_COUNTERS_DEFINITIONS_H_
+#endif // V8_LOGGING_COUNTERS_DEFINITIONS_H_
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/logging/counters-inl.h
index ed16fcf0b7..11a9d92eb2 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/logging/counters-inl.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COUNTERS_INL_H_
-#define V8_COUNTERS_INL_H_
+#ifndef V8_LOGGING_COUNTERS_INL_H_
+#define V8_LOGGING_COUNTERS_INL_H_
-#include "src/counters.h"
+#include "src/logging/counters.h"
namespace v8 {
namespace internal {
@@ -65,4 +65,4 @@ RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
} // namespace internal
} // namespace v8
-#endif // V8_COUNTERS_INL_H_
+#endif // V8_LOGGING_COUNTERS_INL_H_
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/logging/counters.cc
index 888e8dfe66..ce2b1fe9c0 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/logging/counters.cc
@@ -2,22 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/counters.h"
+#include "src/logging/counters.h"
#include <iomanip>
#include "src/base/platform/platform.h"
#include "src/builtins/builtins-definitions.h"
-#include "src/counters-inl.h"
-#include "src/isolate.h"
-#include "src/log-inl.h"
-#include "src/log.h"
-#include "src/ostreams.h"
+#include "src/execution/isolate.h"
+#include "src/logging/counters-inl.h"
+#include "src/logging/log-inl.h"
+#include "src/logging/log.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
std::atomic_uint TracingFlags::runtime_stats{0};
+std::atomic_uint TracingFlags::gc{0};
std::atomic_uint TracingFlags::gc_stats{0};
std::atomic_uint TracingFlags::ic_stats{0};
@@ -295,23 +296,23 @@ void Counters::ResetCreateHistogramFunction(CreateHistogramCallback f) {
#undef HR
#define HT(name, caption, max, res) name##_.Reset();
- HISTOGRAM_TIMER_LIST(HT)
+ HISTOGRAM_TIMER_LIST(HT)
#undef HT
#define HT(name, caption, max, res) name##_.Reset();
- TIMED_HISTOGRAM_LIST(HT)
+ TIMED_HISTOGRAM_LIST(HT)
#undef HT
#define AHT(name, caption) name##_.Reset();
- AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
+ AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
#undef AHT
#define HP(name, caption) name##_.Reset();
- HISTOGRAM_PERCENTAGE_LIST(HP)
+ HISTOGRAM_PERCENTAGE_LIST(HP)
#undef HP
#define HM(name, caption) name##_.Reset();
- HISTOGRAM_LEGACY_MEMORY_LIST(HM)
+ HISTOGRAM_LEGACY_MEMORY_LIST(HM)
#undef HM
}
diff --git a/deps/v8/src/counters.h b/deps/v8/src/logging/counters.h
index 3bcc8503a3..bfe52f45ac 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -2,24 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COUNTERS_H_
-#define V8_COUNTERS_H_
+#ifndef V8_LOGGING_COUNTERS_H_
+#define V8_LOGGING_COUNTERS_H_
#include "include/v8.h"
-#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/time.h"
-#include "src/counters-definitions.h"
-#include "src/globals.h"
-#include "src/heap-symbols.h"
-#include "src/isolate.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/init/heap-symbols.h"
+#include "src/logging/counters-definitions.h"
+#include "src/objects/objects.h"
#include "src/runtime/runtime.h"
#include "src/tracing/trace-event.h"
#include "src/tracing/traced-value.h"
#include "src/tracing/tracing-category-observer.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -30,6 +30,7 @@ namespace internal {
struct TracingFlags {
static V8_EXPORT_PRIVATE std::atomic_uint runtime_stats;
+ static V8_EXPORT_PRIVATE std::atomic_uint gc;
static V8_EXPORT_PRIVATE std::atomic_uint gc_stats;
static V8_EXPORT_PRIVATE std::atomic_uint ic_stats;
@@ -37,6 +38,10 @@ struct TracingFlags {
return runtime_stats.load(std::memory_order_relaxed) != 0;
}
+ static bool is_gc_enabled() {
+ return gc.load(std::memory_order_relaxed) != 0;
+ }
+
static bool is_gc_stats_enabled() {
return gc_stats.load(std::memory_order_relaxed) != 0;
}
@@ -88,10 +93,7 @@ class StatsTable {
// function. min and max define the expected minimum and maximum
// sample values. buckets is the maximum number of buckets
// that the samples will be grouped into.
- void* CreateHistogram(const char* name,
- int min,
- int max,
- size_t buckets) {
+ void* CreateHistogram(const char* name, int min, int max, size_t buckets) {
if (!create_histogram_function_) return nullptr;
return create_histogram_function_(name, min, max, buckets);
}
@@ -424,9 +426,7 @@ class HistogramTimer : public TimedHistogram {
inline void Stop();
// Returns true if the timer is running.
- bool Running() {
- return Enabled() && timer_.IsStarted();
- }
+ bool Running() { return Enabled() && timer_.IsStarted(); }
// TODO(bmeurer): Remove this when HistogramTimerScope is fixed.
#ifdef DEBUG
@@ -549,7 +549,6 @@ class AggregatedHistogramTimerScope {
AggregatableHistogramTimer* histogram_;
};
-
// AggretatedMemoryHistogram collects (time, value) sample pairs and turns
// them into time-uniform samples for the backing historgram, such that the
// backing histogram receives one sample every T ms, where the T is controlled
@@ -599,7 +598,6 @@ class AggregatedMemoryHistogram {
Histogram* backing_histogram_;
};
-
template <typename Histogram>
void AggregatedMemoryHistogram<Histogram>::AddSample(double current_ms,
double current_value) {
@@ -657,7 +655,6 @@ void AggregatedMemoryHistogram<Histogram>::AddSample(double current_ms,
}
}
-
template <typename Histogram>
double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
double current_value) {
@@ -1339,23 +1336,19 @@ class Counters : public std::enable_shared_from_this<Counters> {
TIMED_HISTOGRAM_LIST(HT)
#undef HT
-#define AHT(name, caption) \
- AggregatableHistogramTimer name##_;
+#define AHT(name, caption) AggregatableHistogramTimer name##_;
AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT)
#undef AHT
-#define HP(name, caption) \
- Histogram name##_;
+#define HP(name, caption) Histogram name##_;
HISTOGRAM_PERCENTAGE_LIST(HP)
#undef HP
-#define HM(name, caption) \
- Histogram name##_;
+#define HM(name, caption) Histogram name##_;
HISTOGRAM_LEGACY_MEMORY_LIST(HM)
#undef HM
-#define SC(name, caption) \
- StatsCounter name##_;
+#define SC(name, caption) StatsCounter name##_;
STATS_COUNTER_LIST_1(SC)
STATS_COUNTER_LIST_2(SC)
STATS_COUNTER_NATIVE_CODE_LIST(SC)
@@ -1365,19 +1358,19 @@ class Counters : public std::enable_shared_from_this<Counters> {
STATS_COUNTER_TS_LIST(SC)
#undef SC
-#define SC(name) \
+#define SC(name) \
StatsCounter size_of_##name##_; \
StatsCounter count_of_##name##_;
INSTANCE_TYPE_LIST(SC)
#undef SC
-#define SC(name) \
+#define SC(name) \
StatsCounter size_of_CODE_TYPE_##name##_; \
StatsCounter count_of_CODE_TYPE_##name##_;
CODE_KIND_LIST(SC)
#undef SC
-#define SC(name) \
+#define SC(name) \
StatsCounter size_of_FIXED_ARRAY_##name##_; \
StatsCounter count_of_FIXED_ARRAY_##name##_;
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
@@ -1407,4 +1400,4 @@ RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
} // namespace internal
} // namespace v8
-#endif // V8_COUNTERS_H_
+#endif // V8_LOGGING_COUNTERS_H_
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/logging/log-inl.h
index 92659c2a9c..83677f5f64 100644
--- a/deps/v8/src/log-inl.h
+++ b/deps/v8/src/logging/log-inl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOG_INL_H_
-#define V8_LOG_INL_H_
+#ifndef V8_LOGGING_LOG_INL_H_
+#define V8_LOGGING_LOG_INL_H_
-#include "src/log.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/tracing/trace-event.h"
namespace v8 {
@@ -15,7 +15,7 @@ namespace internal {
CodeEventListener::LogEventsAndTags Logger::ToNativeByScript(
CodeEventListener::LogEventsAndTags tag, Script script) {
- if (script->type() != Script::TYPE_NATIVE) return tag;
+ if (script.type() != Script::TYPE_NATIVE) return tag;
switch (tag) {
case CodeEventListener::FUNCTION_TAG:
return CodeEventListener::NATIVE_FUNCTION_TAG;
@@ -48,4 +48,4 @@ void TimerEventScope<TimerEvent>::LogTimerEvent(Logger::StartEnd se) {
} // namespace internal
} // namespace v8
-#endif // V8_LOG_INL_H_
+#endif // V8_LOGGING_LOG_INL_H_
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/logging/log-utils.cc
index b017b50a3f..3980882402 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/logging/log-utils.cc
@@ -2,20 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/log-utils.h"
+#include "src/logging/log-utils.h"
-#include "src/assert-scope.h"
#include "src/base/platform/platform.h"
-#include "src/objects-inl.h"
-#include "src/string-stream.h"
-#include "src/utils.h"
-#include "src/vector.h"
-#include "src/version.h"
+#include "src/common/assert-scope.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/string-stream.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
+#include "src/utils/version.h"
namespace v8 {
namespace internal {
-
const char* const Log::kLogToTemporaryFile = "&";
const char* const Log::kLogToConsole = "-";
@@ -93,10 +92,10 @@ void Log::MessageBuilder::AppendString(String str,
if (str.is_null()) return;
DisallowHeapAllocation no_gc; // Ensure string stays valid.
- int length = str->length();
+ int length = str.length();
if (length_limit) length = std::min(length, *length_limit);
for (int i = 0; i < length; i++) {
- uint16_t c = str->Get(i);
+ uint16_t c = str.Get(i);
if (c <= 0xFF) {
AppendCharacter(static_cast<char>(c));
} else {
@@ -159,12 +158,12 @@ void Log::MessageBuilder::AppendSymbolName(Symbol symbol) {
DCHECK(!symbol.is_null());
OFStream& os = log_->os_;
os << "symbol(";
- if (!symbol->name()->IsUndefined()) {
+ if (!symbol.name().IsUndefined()) {
os << "\"";
- AppendSymbolNameDetails(String::cast(symbol->name()), false);
+ AppendSymbolNameDetails(String::cast(symbol.name()), false);
os << "\" ";
}
- os << "hash " << std::hex << symbol->Hash() << std::dec << ")";
+ os << "hash " << std::hex << symbol.Hash() << std::dec << ")";
}
void Log::MessageBuilder::AppendSymbolNameDetails(String str,
@@ -173,13 +172,13 @@ void Log::MessageBuilder::AppendSymbolNameDetails(String str,
DisallowHeapAllocation no_gc; // Ensure string stays valid.
OFStream& os = log_->os_;
- int limit = str->length();
+ int limit = str.length();
if (limit > 0x1000) limit = 0x1000;
if (show_impl_info) {
- os << (str->IsOneByteRepresentation() ? 'a' : '2');
+ os << (str.IsOneByteRepresentation() ? 'a' : '2');
if (StringShape(str).IsExternal()) os << 'e';
if (StringShape(str).IsInternalized()) os << '#';
- os << ':' << str->length() << ':';
+ os << ':' << str.length() << ':';
}
AppendString(str, limit);
}
@@ -246,7 +245,7 @@ Log::MessageBuilder& Log::MessageBuilder::operator<<<Symbol>(Symbol symbol) {
template <>
Log::MessageBuilder& Log::MessageBuilder::operator<<<Name>(Name name) {
- if (name->IsString()) {
+ if (name.IsString()) {
this->AppendString(String::cast(name));
} else {
this->AppendSymbolName(Symbol::cast(name));
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/logging/log-utils.h
index ab3cea653c..bc5b09d438 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/logging/log-utils.h
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOG_UTILS_H_
-#define V8_LOG_UTILS_H_
+#ifndef V8_LOGGING_LOG_UTILS_H_
+#define V8_LOGGING_LOG_UTILS_H_
#include <stdio.h>
#include <cstdarg>
-#include "src/allocation.h"
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
-#include "src/flags.h"
-#include "src/ostreams.h"
+#include "src/flags/flags.h"
+#include "src/utils/allocation.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -152,4 +152,4 @@ Log::MessageBuilder& Log::MessageBuilder::operator<<<Name>(Name name);
} // namespace internal
} // namespace v8
-#endif // V8_LOG_UTILS_H_
+#endif // V8_LOGGING_LOG_UTILS_H_
diff --git a/deps/v8/src/log.cc b/deps/v8/src/logging/log.cc
index 3f2892c4bd..9f8cf82d36 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -2,41 +2,41 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/log.h"
+#include "src/logging/log.h"
#include <cstdarg>
#include <memory>
#include <sstream>
-#include "src/api-inl.h"
-#include "src/bailout-reason.h"
+#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
-#include "src/bootstrapper.h"
-#include "src/counters.h"
-#include "src/deoptimizer.h"
-#include "src/global-handles.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/source-position-table.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/diagnostics/perf-jit.h"
+#include "src/execution/isolate.h"
+#include "src/execution/runtime-profiler.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/handles/global-handles.h"
+#include "src/init/bootstrapper.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
#include "src/libsampler/sampler.h"
-#include "src/log-inl.h"
-#include "src/log-utils.h"
-#include "src/macro-assembler.h"
-#include "src/memcopy.h"
+#include "src/logging/counters.h"
+#include "src/logging/log-inl.h"
+#include "src/logging/log-utils.h"
#include "src/objects/api-callbacks.h"
-#include "src/perf-jit.h"
#include "src/profiler/tick-sample.h"
-#include "src/runtime-profiler.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/source-position-table.h"
-#include "src/string-stream.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/string-stream.h"
+#include "src/strings/unicode-inl.h"
#include "src/tracing/tracing-category-observer.h"
-#include "src/unicode-inl.h"
-#include "src/vm-state-inl.h"
+#include "src/utils/memcopy.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/version.h"
+#include "src/utils/version.h"
namespace v8 {
namespace internal {
@@ -73,9 +73,9 @@ static v8::CodeEventType GetCodeEventTypeForTag(
}
static const char* ComputeMarker(SharedFunctionInfo shared, AbstractCode code) {
- switch (code->kind()) {
+ switch (code.kind()) {
case AbstractCode::INTERPRETED_FUNCTION:
- return shared->optimization_disabled() ? "" : "~";
+ return shared.optimization_disabled() ? "" : "~";
case AbstractCode::OPTIMIZED_FUNCTION:
return "*";
default:
@@ -98,9 +98,7 @@ class CodeEventLogger::NameBuffer {
public:
NameBuffer() { Reset(); }
- void Reset() {
- utf8_pos_ = 0;
- }
+ void Reset() { utf8_pos_ = 0; }
void Init(CodeEventListener::LogEventsAndTags tag) {
Reset();
@@ -109,18 +107,18 @@ class CodeEventLogger::NameBuffer {
}
void AppendName(Name name) {
- if (name->IsString()) {
+ if (name.IsString()) {
AppendString(String::cast(name));
} else {
Symbol symbol = Symbol::cast(name);
AppendBytes("symbol(");
- if (!symbol->name()->IsUndefined()) {
+ if (!symbol.name().IsUndefined()) {
AppendBytes("\"");
- AppendString(String::cast(symbol->name()));
+ AppendString(String::cast(symbol.name()));
AppendBytes("\" ");
}
AppendBytes("hash ");
- AppendHex(symbol->Hash());
+ AppendHex(symbol.Hash());
AppendByte(')');
}
}
@@ -129,7 +127,7 @@ class CodeEventLogger::NameBuffer {
if (str.is_null()) return;
int length = 0;
std::unique_ptr<char[]> c_str =
- str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, &length);
+ str.ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, &length);
AppendBytes(c_str.get(), length);
}
@@ -140,7 +138,9 @@ class CodeEventLogger::NameBuffer {
}
void AppendBytes(const char* bytes) {
- AppendBytes(bytes, StrLength(bytes));
+ size_t len = strlen(bytes);
+ DCHECK_GE(kMaxInt, len);
+ AppendBytes(bytes, static_cast<int>(len));
}
void AppendByte(char c) {
@@ -216,13 +216,13 @@ void CodeEventLogger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
int line, int column) {
name_buffer_->Init(tag);
name_buffer_->AppendBytes(ComputeMarker(shared, code));
- name_buffer_->AppendString(shared->DebugName());
+ name_buffer_->AppendString(shared.DebugName());
name_buffer_->AppendByte(' ');
- if (source->IsString()) {
+ if (source.IsString()) {
name_buffer_->AppendString(String::cast(source));
} else {
name_buffer_->AppendBytes("symbol(hash ");
- name_buffer_->AppendHex(Name::cast(source)->Hash());
+ name_buffer_->AppendHex(Name::cast(source).Hash());
name_buffer_->AppendByte(')');
}
name_buffer_->AppendByte(':');
@@ -237,7 +237,7 @@ void CodeEventLogger::CodeCreateEvent(LogEventsAndTags tag,
if (name.empty()) {
name_buffer_->AppendBytes("<wasm-unknown>");
} else {
- name_buffer_->AppendBytes(name.start(), name.length());
+ name_buffer_->AppendBytes(name.begin(), name.length());
}
name_buffer_->AppendByte('-');
if (code->IsAnonymous()) {
@@ -289,18 +289,15 @@ PerfBasicLogger::PerfBasicLogger(Isolate* isolate)
// Open the perf JIT dump file.
int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
ScopedVector<char> perf_dump_name(bufferSize);
- int size = SNPrintF(
- perf_dump_name,
- kFilenameFormatString,
- base::OS::GetCurrentProcessId());
+ int size = SNPrintF(perf_dump_name, kFilenameFormatString,
+ base::OS::GetCurrentProcessId());
CHECK_NE(size, -1);
perf_output_handle_ =
- base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
+ base::OS::FOpen(perf_dump_name.begin(), base::OS::LogFileOpenMode);
CHECK_NOT_NULL(perf_output_handle_);
setvbuf(perf_output_handle_, nullptr, _IOLBF, 0);
}
-
PerfBasicLogger::~PerfBasicLogger() {
fclose(perf_output_handle_);
perf_output_handle_ = nullptr;
@@ -322,14 +319,14 @@ void PerfBasicLogger::WriteLogRecordedBuffer(uintptr_t address, int size,
void PerfBasicLogger::LogRecordedBuffer(AbstractCode code, SharedFunctionInfo,
const char* name, int length) {
if (FLAG_perf_basic_prof_only_functions &&
- (code->kind() != AbstractCode::INTERPRETED_FUNCTION &&
- code->kind() != AbstractCode::BUILTIN &&
- code->kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
+ (code.kind() != AbstractCode::INTERPRETED_FUNCTION &&
+ code.kind() != AbstractCode::BUILTIN &&
+ code.kind() != AbstractCode::OPTIMIZED_FUNCTION)) {
return;
}
- WriteLogRecordedBuffer(static_cast<uintptr_t>(code->InstructionStart()),
- code->InstructionSize(), name, length);
+ WriteLogRecordedBuffer(static_cast<uintptr_t>(code.InstructionStart()),
+ code.InstructionSize(), name, length);
}
void PerfBasicLogger::LogRecordedBuffer(const wasm::WasmCode* code,
@@ -381,8 +378,8 @@ void ExternalCodeEventListener::CodeCreateEvent(
const char* comment) {
CodeEvent code_event;
code_event.code_start_address =
- static_cast<uintptr_t>(code->InstructionStart());
- code_event.code_size = static_cast<size_t>(code->InstructionSize());
+ static_cast<uintptr_t>(code.InstructionStart());
+ code_event.code_size = static_cast<size_t>(code.InstructionSize());
code_event.function_name = isolate_->factory()->empty_string();
code_event.script_name = isolate_->factory()->empty_string();
code_event.script_line = 0;
@@ -401,8 +398,8 @@ void ExternalCodeEventListener::CodeCreateEvent(
CodeEvent code_event;
code_event.code_start_address =
- static_cast<uintptr_t>(code->InstructionStart());
- code_event.code_size = static_cast<size_t>(code->InstructionSize());
+ static_cast<uintptr_t>(code.InstructionStart());
+ code_event.code_size = static_cast<size_t>(code.InstructionSize());
code_event.function_name = name_string;
code_event.script_name = isolate_->factory()->empty_string();
code_event.script_line = 0;
@@ -422,8 +419,8 @@ void ExternalCodeEventListener::CodeCreateEvent(
CodeEvent code_event;
code_event.code_start_address =
- static_cast<uintptr_t>(code->InstructionStart());
- code_event.code_size = static_cast<size_t>(code->InstructionSize());
+ static_cast<uintptr_t>(code.InstructionStart());
+ code_event.code_size = static_cast<size_t>(code.InstructionSize());
code_event.function_name = name_string;
code_event.script_name = isolate_->factory()->empty_string();
code_event.script_line = 0;
@@ -438,7 +435,7 @@ void ExternalCodeEventListener::CodeCreateEvent(
CodeEventListener::LogEventsAndTags tag, AbstractCode code,
SharedFunctionInfo shared, Name source, int line, int column) {
Handle<String> name_string =
- Name::ToFunctionName(isolate_, Handle<Name>(shared->Name(), isolate_))
+ Name::ToFunctionName(isolate_, Handle<Name>(shared.Name(), isolate_))
.ToHandleChecked();
Handle<String> source_string =
Name::ToFunctionName(isolate_, Handle<Name>(source, isolate_))
@@ -446,8 +443,8 @@ void ExternalCodeEventListener::CodeCreateEvent(
CodeEvent code_event;
code_event.code_start_address =
- static_cast<uintptr_t>(code->InstructionStart());
- code_event.code_size = static_cast<size_t>(code->InstructionSize());
+ static_cast<uintptr_t>(code.InstructionStart());
+ code_event.code_size = static_cast<size_t>(code.InstructionSize());
code_event.function_name = name_string;
code_event.script_name = source_string;
code_event.script_line = line;
@@ -468,8 +465,8 @@ void ExternalCodeEventListener::RegExpCodeCreateEvent(AbstractCode code,
String source) {
CodeEvent code_event;
code_event.code_start_address =
- static_cast<uintptr_t>(code->InstructionStart());
- code_event.code_size = static_cast<size_t>(code->InstructionSize());
+ static_cast<uintptr_t>(code.InstructionStart());
+ code_event.code_size = static_cast<size_t>(code.InstructionSize());
code_event.function_name = Handle<String>(source, isolate_);
code_event.script_name = isolate_->factory()->empty_string();
code_event.script_line = 0;
@@ -507,7 +504,6 @@ class LowLevelLogger : public CodeEventLogger {
int32_t code_size;
};
-
struct CodeMoveStruct {
static const char kTag = 'M';
@@ -515,10 +511,8 @@ class LowLevelLogger : public CodeEventLogger {
Address to_address;
};
-
static const char kCodeMovingGCTag = 'G';
-
// Extension added to V8 log file name to get the low-level log name.
static const char kLogExt[];
@@ -542,22 +536,20 @@ LowLevelLogger::LowLevelLogger(Isolate* isolate, const char* name)
// Open the low-level log file.
size_t len = strlen(name);
ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLogExt)));
- MemCopy(ll_name.start(), name, len);
- MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt));
+ MemCopy(ll_name.begin(), name, len);
+ MemCopy(ll_name.begin() + len, kLogExt, sizeof(kLogExt));
ll_output_handle_ =
- base::OS::FOpen(ll_name.start(), base::OS::LogFileOpenMode);
+ base::OS::FOpen(ll_name.begin(), base::OS::LogFileOpenMode);
setvbuf(ll_output_handle_, nullptr, _IOLBF, 0);
LogCodeInfo();
}
-
LowLevelLogger::~LowLevelLogger() {
fclose(ll_output_handle_);
ll_output_handle_ = nullptr;
}
-
void LowLevelLogger::LogCodeInfo() {
#if V8_TARGET_ARCH_IA32
const char arch[] = "ia32";
@@ -583,12 +575,12 @@ void LowLevelLogger::LogRecordedBuffer(AbstractCode code, SharedFunctionInfo,
const char* name, int length) {
CodeCreateStruct event;
event.name_size = length;
- event.code_address = code->InstructionStart();
- event.code_size = code->InstructionSize();
+ event.code_address = code.InstructionStart();
+ event.code_size = code.InstructionSize();
LogWriteStruct(event);
LogWriteBytes(name, length);
- LogWriteBytes(reinterpret_cast<const char*>(code->InstructionStart()),
- code->InstructionSize());
+ LogWriteBytes(reinterpret_cast<const char*>(code.InstructionStart()),
+ code.InstructionSize());
}
void LowLevelLogger::LogRecordedBuffer(const wasm::WasmCode* code,
@@ -605,8 +597,8 @@ void LowLevelLogger::LogRecordedBuffer(const wasm::WasmCode* code,
void LowLevelLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
CodeMoveStruct event;
- event.from_address = from->InstructionStart();
- event.to_address = to->InstructionStart();
+ event.from_address = from.InstructionStart();
+ event.to_address = to.InstructionStart();
LogWriteStruct(event);
}
@@ -616,7 +608,6 @@ void LowLevelLogger::LogWriteBytes(const char* bytes, int size) {
USE(rv);
}
-
void LowLevelLogger::CodeMovingGCEvent() {
const char tag = kCodeMovingGCTag;
@@ -655,14 +646,14 @@ void JitLogger::LogRecordedBuffer(AbstractCode code, SharedFunctionInfo shared,
JitCodeEvent event;
memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
- event.code_start = reinterpret_cast<void*>(code->InstructionStart());
+ event.code_start = reinterpret_cast<void*>(code.InstructionStart());
event.code_type =
- code->IsCode() ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
- event.code_len = code->InstructionSize();
+ code.IsCode() ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
+ event.code_len = code.InstructionSize();
Handle<SharedFunctionInfo> shared_function_handle;
- if (!shared.is_null() && shared->script()->IsScript()) {
+ if (!shared.is_null() && shared.script().IsScript()) {
shared_function_handle =
- Handle<SharedFunctionInfo>(shared, shared->GetIsolate());
+ Handle<SharedFunctionInfo>(shared, shared.GetIsolate());
}
event.script = ToApiHandle<v8::UnboundScript>(shared_function_handle);
event.name.str = name;
@@ -677,7 +668,7 @@ void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
memset(static_cast<void*>(&event), 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
event.code_type = JitCodeEvent::JIT_CODE;
- event.code_start = code->instructions().start();
+ event.code_start = code->instructions().begin();
event.code_len = code->instructions().length();
event.name.str = name;
event.name.len = length;
@@ -691,19 +682,17 @@ void JitLogger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
JitCodeEvent event;
event.type = JitCodeEvent::CODE_MOVED;
event.code_type =
- from->IsCode() ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
- event.code_start = reinterpret_cast<void*>(from->InstructionStart());
- event.code_len = from->InstructionSize();
- event.new_code_start = reinterpret_cast<void*>(to->InstructionStart());
+ from.IsCode() ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE;
+ event.code_start = reinterpret_cast<void*>(from.InstructionStart());
+ event.code_len = from.InstructionSize();
+ event.new_code_start = reinterpret_cast<void*>(to.InstructionStart());
event.isolate = reinterpret_cast<v8::Isolate*>(isolate_);
code_event_handler_(&event);
}
void JitLogger::AddCodeLinePosInfoEvent(
- void* jit_handler_data,
- int pc_offset,
- int position,
+ void* jit_handler_data, int pc_offset, int position,
JitCodeEvent::PositionType position_type) {
JitCodeEvent event;
memset(static_cast<void*>(&event), 0, sizeof(event));
@@ -717,7 +706,6 @@ void JitLogger::AddCodeLinePosInfoEvent(
code_event_handler_(&event);
}
-
void* JitLogger::StartCodePosInfoEvent() {
JitCodeEvent event;
memset(static_cast<void*>(&event), 0, sizeof(event));
@@ -740,7 +728,6 @@ void JitLogger::EndCodePosInfoEvent(Address start_address,
code_event_handler_(&event);
}
-
// TODO(lpy): Keeping sampling thread inside V8 is a workaround currently,
// the reason is to reduce code duplication during migration to sampler library,
// sampling thread, as well as the sampler, will be moved to D8 eventually.
@@ -766,13 +753,12 @@ class SamplingThread : public base::Thread {
const int interval_microseconds_;
};
-
// The Profiler samples pc and sp values for the main thread.
// Each sample is appended to a circular buffer.
// An independent thread removes data and writes it to the log.
// This design minimizes the time spent in the sampler.
//
-class Profiler: public base::Thread {
+class Profiler : public base::Thread {
public:
explicit Profiler(Isolate* isolate);
void Engage();
@@ -811,8 +797,8 @@ class Profiler: public base::Thread {
// between the signal handler and the worker thread.
static const int kBufferSize = 128;
v8::TickSample buffer_[kBufferSize]; // Buffer storage.
- int head_; // Index to the buffer head.
- base::Atomic32 tail_; // Index to the buffer tail.
+ int head_; // Index to the buffer head.
+ base::Atomic32 tail_; // Index to the buffer tail.
bool overflow_; // Tell whether a buffer overflow has occurred.
// Semaphore used for buffer synchronization.
base::Semaphore buffer_semaphore_;
@@ -821,12 +807,11 @@ class Profiler: public base::Thread {
base::Atomic32 running_;
};
-
//
// Ticker used to provide ticks to the profiler and the sliding state
// window.
//
-class Ticker: public sampler::Sampler {
+class Ticker : public sampler::Sampler {
public:
Ticker(Isolate* isolate, int interval_microseconds)
: sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
@@ -895,7 +880,6 @@ void Profiler::Engage() {
logger->ProfilerBeginEvent();
}
-
void Profiler::Disengage() {
// Stop receiving ticks.
isolate_->logger()->ticker_->ClearProfiler();
@@ -911,7 +895,6 @@ void Profiler::Disengage() {
LOG(isolate_, UncheckedStringEvent("profiler", "end"));
}
-
void Profiler::Run() {
v8::TickSample sample;
bool overflow = Remove(&sample);
@@ -921,7 +904,6 @@ void Profiler::Run() {
}
}
-
//
// Logger class implementation.
//
@@ -934,9 +916,7 @@ Logger::Logger(Isolate* isolate)
is_initialized_(false),
existing_code_logger_(isolate) {}
-Logger::~Logger() {
- delete log_;
-}
+Logger::~Logger() { delete log_; }
const LogSeparator Logger::kNext = LogSeparator::kSeparator;
@@ -956,12 +936,10 @@ void Logger::ProfilerBeginEvent() {
msg.WriteToLogFile();
}
-
void Logger::StringEvent(const char* name, const char* value) {
if (FLAG_log) UncheckedStringEvent(name, value);
}
-
void Logger::UncheckedStringEvent(const char* name, const char* value) {
if (!log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -969,12 +947,10 @@ void Logger::UncheckedStringEvent(const char* name, const char* value) {
msg.WriteToLogFile();
}
-
void Logger::IntPtrTEvent(const char* name, intptr_t value) {
if (FLAG_log) UncheckedIntPtrTEvent(name, value);
}
-
void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
if (!log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -990,7 +966,6 @@ void Logger::HandleEvent(const char* name, Address* location) {
msg.WriteToLogFile();
}
-
void Logger::ApiSecurityCheck() {
if (!log_->IsEnabled() || !FLAG_log_api) return;
Log::MessageBuilder msg(log_);
@@ -1015,8 +990,8 @@ void Logger::CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
Log::MessageBuilder msg(log_);
msg << "code-deopt" << kNext << timer_.Elapsed().InMicroseconds() << kNext
- << code->CodeSize() << kNext
- << reinterpret_cast<void*>(code->InstructionStart());
+ << code.CodeSize() << kNext
+ << reinterpret_cast<void*>(code.InstructionStart());
// Deoptimization position.
std::ostringstream deopt_location;
@@ -1036,7 +1011,6 @@ void Logger::CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
msg.WriteToLogFile();
}
-
void Logger::CurrentTimeEvent() {
if (!log_->IsEnabled()) return;
DCHECK(FLAG_log_internal_timer_events);
@@ -1045,7 +1019,6 @@ void Logger::CurrentTimeEvent() {
msg.WriteToLogFile();
}
-
void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
if (!log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
@@ -1063,20 +1036,10 @@ void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
msg.WriteToLogFile();
}
-// static
-void Logger::EnterExternal(Isolate* isolate) {
- DCHECK(FLAG_log_internal_timer_events);
- LOG(isolate, TimerEvent(START, TimerEventExternal::name()));
- DCHECK(isolate->current_vm_state() == JS);
- isolate->set_current_vm_state(EXTERNAL);
-}
-
-// static
-void Logger::LeaveExternal(Isolate* isolate) {
- DCHECK(FLAG_log_internal_timer_events);
- LOG(isolate, TimerEvent(END, TimerEventExternal::name()));
- DCHECK(isolate->current_vm_state() == EXTERNAL);
- isolate->set_current_vm_state(JS);
+bool Logger::is_logging() {
+ // Disable logging while the CPU profiler is running.
+ if (isolate_->is_profiling()) return false;
+ return is_logging_;
}
// Instantiate template methods.
@@ -1088,10 +1051,10 @@ TIMER_EVENTS_LIST(V)
void Logger::ApiNamedPropertyAccess(const char* tag, JSObject holder,
Object property_name) {
- DCHECK(property_name->IsName());
+ DCHECK(property_name.IsName());
if (!log_->IsEnabled() || !FLAG_log_api) return;
Log::MessageBuilder msg(log_);
- msg << "api" << kNext << tag << kNext << holder->class_name() << kNext
+ msg << "api" << kNext << tag << kNext << holder.class_name() << kNext
<< Name::cast(property_name);
msg.WriteToLogFile();
}
@@ -1100,7 +1063,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag, JSObject holder,
uint32_t index) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
Log::MessageBuilder msg(log_);
- msg << "api" << kNext << tag << kNext << holder->class_name() << kNext
+ msg << "api" << kNext << tag << kNext << holder.class_name() << kNext
<< index;
msg.WriteToLogFile();
}
@@ -1108,7 +1071,7 @@ void Logger::ApiIndexedPropertyAccess(const char* tag, JSObject holder,
void Logger::ApiObjectAccess(const char* tag, JSObject object) {
if (!log_->IsEnabled() || !FLAG_log_api) return;
Log::MessageBuilder msg(log_);
- msg << "api" << kNext << tag << kNext << object->class_name();
+ msg << "api" << kNext << tag << kNext << object.class_name();
msg.WriteToLogFile();
}
@@ -1119,7 +1082,6 @@ void Logger::ApiEntryCall(const char* name) {
msg.WriteToLogFile();
}
-
void Logger::NewEvent(const char* name, void* object, size_t size) {
if (!log_->IsEnabled() || !FLAG_log) return;
Log::MessageBuilder msg(log_);
@@ -1128,7 +1090,6 @@ void Logger::NewEvent(const char* name, void* object, size_t size) {
msg.WriteToLogFile();
}
-
void Logger::DeleteEvent(const char* name, void* object) {
if (!log_->IsEnabled() || !FLAG_log) return;
Log::MessageBuilder msg(log_);
@@ -1176,9 +1137,9 @@ void AppendCodeCreateHeader(Log::MessageBuilder& msg,
void AppendCodeCreateHeader(Log::MessageBuilder& msg,
CodeEventListener::LogEventsAndTags tag,
AbstractCode code, base::ElapsedTimer* timer) {
- AppendCodeCreateHeader(msg, tag, code->kind(),
- reinterpret_cast<uint8_t*>(code->InstructionStart()),
- code->InstructionSize(), timer);
+ AppendCodeCreateHeader(msg, tag, code.kind(),
+ reinterpret_cast<uint8_t*>(code.InstructionStart()),
+ code.InstructionSize(), timer);
}
} // namespace
@@ -1215,7 +1176,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(msg, tag, code, &timer_);
- msg << name << kNext << reinterpret_cast<void*>(shared->address()) << kNext
+ msg << name << kNext << reinterpret_cast<void*>(shared.address()) << kNext
<< ComputeMarker(shared, code);
msg.WriteToLogFile();
}
@@ -1226,7 +1187,7 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(msg, tag, AbstractCode::Kind::WASM_FUNCTION,
- code->instructions().start(),
+ code->instructions().begin(),
code->instructions().length(), &timer_);
if (name.empty()) {
msg << "<unknown wasm>";
@@ -1255,15 +1216,15 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
{
Log::MessageBuilder msg(log_);
AppendCodeCreateHeader(msg, tag, code, &timer_);
- msg << shared->DebugName() << " " << source << ":" << line << ":" << column
- << kNext << reinterpret_cast<void*>(shared->address()) << kNext
+ msg << shared.DebugName() << " " << source << ":" << line << ":" << column
+ << kNext << reinterpret_cast<void*>(shared.address()) << kNext
<< ComputeMarker(shared, code);
msg.WriteToLogFile();
}
if (!FLAG_log_source_code) return;
- Object script_object = shared->script();
- if (!script_object->IsScript()) return;
+ Object script_object = shared.script();
+ if (!script_object.IsScript()) return;
Script script = Script::cast(script_object);
if (!EnsureLogScriptSource(script)) return;
@@ -1290,11 +1251,11 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
// S<shared-function-info-address>
Log::MessageBuilder msg(log_);
msg << "code-source-info" << kNext
- << reinterpret_cast<void*>(code->InstructionStart()) << kNext
- << script->id() << kNext << shared->StartPosition() << kNext
- << shared->EndPosition() << kNext;
+ << reinterpret_cast<void*>(code.InstructionStart()) << kNext
+ << script.id() << kNext << shared.StartPosition() << kNext
+ << shared.EndPosition() << kNext;
- SourcePositionTableIterator iterator(code->source_position_table());
+ SourcePositionTableIterator iterator(code.source_position_table());
bool hasInlined = false;
for (; !iterator.done(); iterator.Advance()) {
SourcePosition pos = iterator.source_position();
@@ -1308,10 +1269,10 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
int maxInlinedId = -1;
if (hasInlined) {
PodArray<InliningPosition> inlining_positions =
- DeoptimizationData::cast(Code::cast(code)->deoptimization_data())
- ->InliningPositions();
- for (int i = 0; i < inlining_positions->length(); i++) {
- InliningPosition inlining_pos = inlining_positions->get(i);
+ DeoptimizationData::cast(Code::cast(code).deoptimization_data())
+ .InliningPositions();
+ for (int i = 0; i < inlining_positions.length(); i++) {
+ InliningPosition inlining_pos = inlining_positions.get(i);
msg << "F";
if (inlining_pos.inlined_function_id != -1) {
msg << inlining_pos.inlined_function_id;
@@ -1329,13 +1290,13 @@ void Logger::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
msg << kNext;
if (hasInlined) {
DeoptimizationData deopt_data =
- DeoptimizationData::cast(Code::cast(code)->deoptimization_data());
+ DeoptimizationData::cast(Code::cast(code).deoptimization_data());
msg << std::hex;
for (int i = 0; i <= maxInlinedId; i++) {
msg << "S"
<< reinterpret_cast<void*>(
- deopt_data->GetInlinedFunction(i)->address());
+ deopt_data.GetInlinedFunction(i).address());
}
msg << std::dec;
}
@@ -1347,8 +1308,8 @@ void Logger::CodeDisableOptEvent(AbstractCode code, SharedFunctionInfo shared) {
if (!FLAG_log_code || !log_->IsEnabled()) return;
Log::MessageBuilder msg(log_);
msg << kLogEventsNames[CodeEventListener::CODE_DISABLE_OPT_EVENT] << kNext
- << shared->DebugName() << kNext
- << GetBailoutReason(shared->disable_optimization_reason());
+ << shared.DebugName() << kNext
+ << GetBailoutReason(shared.disable_optimization_reason());
msg.WriteToLogFile();
}
@@ -1369,8 +1330,8 @@ void Logger::RegExpCodeCreateEvent(AbstractCode code, String source) {
void Logger::CodeMoveEvent(AbstractCode from, AbstractCode to) {
if (!is_listening_to_code_events()) return;
- MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from->address(),
- to->address());
+ MoveEventInternal(CodeEventListener::CODE_MOVE_EVENT, from.address(),
+ to.address());
}
namespace {
@@ -1416,7 +1377,6 @@ void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
msg.WriteToLogFile();
}
-
void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
if (!is_listening_to_code_events()) return;
MoveEventInternal(CodeEventListener::SHARED_FUNC_MOVE_EVENT, from, to);
@@ -1431,7 +1391,6 @@ void Logger::MoveEventInternal(CodeEventListener::LogEventsAndTags event,
msg.WriteToLogFile();
}
-
void Logger::ResourceEvent(const char* name, const char* tag) {
if (!log_->IsEnabled() || !FLAG_log) return;
Log::MessageBuilder msg(log_);
@@ -1449,9 +1408,8 @@ void Logger::ResourceEvent(const char* name, const char* tag) {
void Logger::SuspectReadEvent(Name name, Object obj) {
if (!log_->IsEnabled() || !FLAG_log_suspect) return;
Log::MessageBuilder msg(log_);
- String class_name = obj->IsJSObject()
- ? JSObject::cast(obj)->class_name()
- : ReadOnlyRoots(isolate_).empty_string();
+ String class_name = obj.IsJSObject() ? JSObject::cast(obj).class_name()
+ : ReadOnlyRoots(isolate_).empty_string();
msg << "suspect-read" << kNext << class_name << kNext << name;
msg.WriteToLogFile();
}
@@ -1497,12 +1455,12 @@ void Logger::CompilationCacheEvent(const char* action, const char* cache_type,
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
Log::MessageBuilder msg(log_);
int script_id = -1;
- if (sfi->script()->IsScript()) {
- script_id = Script::cast(sfi->script())->id();
+ if (sfi.script().IsScript()) {
+ script_id = Script::cast(sfi.script()).id();
}
msg << "compilation-cache" << Logger::kNext << action << Logger::kNext
<< cache_type << Logger::kNext << script_id << Logger::kNext
- << sfi->StartPosition() << Logger::kNext << sfi->EndPosition()
+ << sfi.StartPosition() << Logger::kNext << sfi.EndPosition()
<< Logger::kNext << timer_.Elapsed().InMicroseconds();
msg.WriteToLogFile();
}
@@ -1537,14 +1495,14 @@ void Logger::ScriptDetails(Script script) {
if (!log_->IsEnabled() || !FLAG_log_function_events) return;
{
Log::MessageBuilder msg(log_);
- msg << "script-details" << Logger::kNext << script->id() << Logger::kNext;
- if (script->name()->IsString()) {
- msg << String::cast(script->name());
+ msg << "script-details" << Logger::kNext << script.id() << Logger::kNext;
+ if (script.name().IsString()) {
+ msg << String::cast(script.name());
}
- msg << Logger::kNext << script->line_offset() << Logger::kNext
- << script->column_offset() << Logger::kNext;
- if (script->source_mapping_url()->IsString()) {
- msg << String::cast(script->source_mapping_url());
+ msg << Logger::kNext << script.line_offset() << Logger::kNext
+ << script.column_offset() << Logger::kNext;
+ if (script.source_mapping_url().IsString()) {
+ msg << String::cast(script.source_mapping_url());
}
msg.WriteToLogFile();
}
@@ -1555,20 +1513,20 @@ bool Logger::EnsureLogScriptSource(Script script) {
if (!log_->IsEnabled()) return false;
Log::MessageBuilder msg(log_);
// Make sure the script is written to the log file.
- int script_id = script->id();
+ int script_id = script.id();
if (logged_source_code_.find(script_id) != logged_source_code_.end()) {
return true;
}
// This script has not been logged yet.
logged_source_code_.insert(script_id);
- Object source_object = script->source();
- if (!source_object->IsString()) return false;
+ Object source_object = script.source();
+ if (!source_object.IsString()) return false;
String source_code = String::cast(source_object);
msg << "script-source" << kNext << script_id << kNext;
// Log the script name.
- if (script->name()->IsString()) {
- msg << String::cast(script->name()) << kNext;
+ if (script.name().IsString()) {
+ msg << String::cast(script.name()) << kNext;
} else {
msg << "<unknown>" << kNext;
}
@@ -1624,11 +1582,11 @@ void Logger::ICEvent(const char* type, bool keyed, Map map, Object key,
msg << type << kNext << reinterpret_cast<void*>(pc) << kNext << line << kNext
<< column << kNext << old_state << kNext << new_state << kNext
<< AsHex::Address(map.ptr()) << kNext;
- if (key->IsSmi()) {
+ if (key.IsSmi()) {
msg << Smi::ToInt(key);
- } else if (key->IsNumber()) {
- msg << key->Number();
- } else if (key->IsName()) {
+ } else if (key.IsNumber()) {
+ msg << key.Number();
+ } else if (key.IsName()) {
msg << Name::cast(key);
}
msg << kNext << modifier << kNext;
@@ -1657,13 +1615,13 @@ void Logger::MapEvent(const char* type, Map from, Map to, const char* reason,
<< line << kNext << column << kNext << reason << kNext;
if (!name_or_sfi.is_null()) {
- if (name_or_sfi->IsName()) {
+ if (name_or_sfi.IsName()) {
msg << Name::cast(name_or_sfi);
- } else if (name_or_sfi->IsSharedFunctionInfo()) {
+ } else if (name_or_sfi.IsSharedFunctionInfo()) {
SharedFunctionInfo sfi = SharedFunctionInfo::cast(name_or_sfi);
- msg << sfi->DebugName();
+ msg << sfi.DebugName();
#if V8_SFI_HAS_UNIQUE_ID
- msg << " " << SharedFunctionInfoWithID::cast(sfi)->unique_id();
+ msg << " " << sfi.unique_id();
#endif // V8_SFI_HAS_UNIQUE_ID
}
}
@@ -1687,7 +1645,7 @@ void Logger::MapDetails(Map map) {
<< AsHex::Address(map.ptr()) << kNext;
if (FLAG_trace_maps_details) {
std::ostringstream buffer;
- map->PrintMapDetails(buffer);
+ map.PrintMapDetails(buffer);
msg << buffer.str().c_str();
}
msg.WriteToLogFile();
@@ -1697,10 +1655,10 @@ static void AddFunctionAndCode(SharedFunctionInfo sfi, AbstractCode code_object,
Handle<SharedFunctionInfo>* sfis,
Handle<AbstractCode>* code_objects, int offset) {
if (sfis != nullptr) {
- sfis[offset] = Handle<SharedFunctionInfo>(sfi, sfi->GetIsolate());
+ sfis[offset] = Handle<SharedFunctionInfo>(sfi, sfi.GetIsolate());
}
if (code_objects != nullptr) {
- code_objects[offset] = Handle<AbstractCode>(code_object, sfi->GetIsolate());
+ code_objects[offset] = Handle<AbstractCode>(code_object, sfi.GetIsolate());
}
}
@@ -1715,30 +1673,29 @@ static int EnumerateCompiledFunctions(Heap* heap,
// the unoptimized code for them.
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (obj->IsSharedFunctionInfo()) {
+ if (obj.IsSharedFunctionInfo()) {
SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
- if (sfi->is_compiled() &&
- (!sfi->script()->IsScript() ||
- Script::cast(sfi->script())->HasValidSource())) {
- AddFunctionAndCode(sfi, AbstractCode::cast(sfi->abstract_code()), sfis,
+ if (sfi.is_compiled() && (!sfi.script().IsScript() ||
+ Script::cast(sfi.script()).HasValidSource())) {
+ AddFunctionAndCode(sfi, AbstractCode::cast(sfi.abstract_code()), sfis,
code_objects, compiled_funcs_count);
++compiled_funcs_count;
}
- } else if (obj->IsJSFunction()) {
+ } else if (obj.IsJSFunction()) {
// Given that we no longer iterate over all optimized JSFunctions, we need
// to take care of this here.
JSFunction function = JSFunction::cast(obj);
- SharedFunctionInfo sfi = SharedFunctionInfo::cast(function->shared());
- Object maybe_script = sfi->script();
- if (maybe_script->IsScript() &&
- !Script::cast(maybe_script)->HasValidSource()) {
+ SharedFunctionInfo sfi = SharedFunctionInfo::cast(function.shared());
+ Object maybe_script = sfi.script();
+ if (maybe_script.IsScript() &&
+ !Script::cast(maybe_script).HasValidSource()) {
continue;
}
// TODO(jarin) This leaves out deoptimized code that might still be on the
// stack. Also note that we will not log optimized code objects that are
// only on a type feedback vector. We should make this mroe precise.
- if (function->IsOptimized()) {
- AddFunctionAndCode(sfi, AbstractCode::cast(function->code()), sfis,
+ if (function.IsOptimized()) {
+ AddFunctionAndCode(sfi, AbstractCode::cast(function.code()), sfis,
code_objects, compiled_funcs_count);
++compiled_funcs_count;
}
@@ -1755,7 +1712,7 @@ static int EnumerateWasmModuleObjects(
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (obj->IsWasmModuleObject()) {
+ if (obj.IsWasmModuleObject()) {
WasmModuleObject module = WasmModuleObject::cast(obj);
if (module_objects != nullptr) {
module_objects[module_objects_count] =
@@ -1788,18 +1745,18 @@ void Logger::LogAccessorCallbacks() {
DisallowHeapAllocation no_gc;
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (!obj->IsAccessorInfo()) continue;
+ if (!obj.IsAccessorInfo()) continue;
AccessorInfo ai = AccessorInfo::cast(obj);
- if (!ai->name()->IsName()) continue;
- Address getter_entry = v8::ToCData<Address>(ai->getter());
- Name name = Name::cast(ai->name());
+ if (!ai.name().IsName()) continue;
+ Address getter_entry = v8::ToCData<Address>(ai.getter());
+ Name name = Name::cast(ai.name());
if (getter_entry != 0) {
#if USES_FUNCTION_DESCRIPTORS
getter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(getter_entry);
#endif
PROFILE(isolate_, GetterCallbackEvent(name, getter_entry));
}
- Address setter_entry = v8::ToCData<Address>(ai->setter());
+ Address setter_entry = v8::ToCData<Address>(ai.setter());
if (setter_entry != 0) {
#if USES_FUNCTION_DESCRIPTORS
setter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(setter_entry);
@@ -1815,7 +1772,7 @@ void Logger::LogAllMaps() {
HeapIterator iterator(heap);
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (!obj->IsMap()) continue;
+ if (!obj.IsMap()) continue;
Map map = Map::cast(obj);
MapCreate(map);
MapDetails(map);
@@ -1874,7 +1831,6 @@ static void PrepareLogFileName(std::ostream& os, // NOLINT
}
}
-
bool Logger::SetUp(Isolate* isolate) {
// Tests and EnsureInitialize() can call this twice in a row. It's harmless.
if (is_initialized_) return true;
@@ -1921,7 +1877,6 @@ bool Logger::SetUp(Isolate* isolate) {
return true;
}
-
void Logger::SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler) {
if (jit_logger_) {
@@ -1988,7 +1943,7 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
AbstractCode abstract_code = AbstractCode::cast(object);
CodeEventListener::LogEventsAndTags tag = CodeEventListener::STUB_TAG;
const char* description = "Unknown code from before profiling";
- switch (abstract_code->kind()) {
+ switch (abstract_code.kind()) {
case AbstractCode::INTERPRETED_FUNCTION:
case AbstractCode::OPTIMIZED_FUNCTION:
return; // We log this later using LogCompiledFunctions.
@@ -2003,13 +1958,13 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
tag = CodeEventListener::REG_EXP_TAG;
break;
case AbstractCode::BUILTIN:
- if (Code::cast(object)->is_interpreter_trampoline_builtin() &&
+ if (Code::cast(object).is_interpreter_trampoline_builtin() &&
Code::cast(object) !=
*BUILTIN_CODE(isolate_, InterpreterEntryTrampoline)) {
return;
}
description =
- isolate_->builtins()->name(abstract_code->GetCode()->builtin_index());
+ isolate_->builtins()->name(abstract_code.GetCode().builtin_index());
tag = CodeEventListener::BUILTIN_TAG;
break;
case AbstractCode::WASM_FUNCTION:
@@ -2020,6 +1975,10 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
description = "A JavaScript to Wasm adapter";
tag = CodeEventListener::STUB_TAG;
break;
+ case AbstractCode::WASM_TO_CAPI_FUNCTION:
+ description = "A Wasm to C-API adapter";
+ tag = CodeEventListener::STUB_TAG;
+ break;
case AbstractCode::WASM_TO_JS_FUNCTION:
description = "A Wasm to JavaScript adapter";
tag = CodeEventListener::STUB_TAG;
@@ -2044,8 +2003,8 @@ void ExistingCodeLogger::LogCodeObjects() {
DisallowHeapAllocation no_gc;
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (obj->IsCode()) LogCodeObject(obj);
- if (obj->IsBytecodeArray()) LogCodeObject(obj);
+ if (obj.IsCode()) LogCodeObject(obj);
+ if (obj.IsBytecodeArray()) LogCodeObject(obj);
}
}
@@ -2056,13 +2015,13 @@ void ExistingCodeLogger::LogCompiledFunctions() {
EnumerateCompiledFunctions(heap, nullptr, nullptr);
ScopedVector<Handle<SharedFunctionInfo>> sfis(compiled_funcs_count);
ScopedVector<Handle<AbstractCode>> code_objects(compiled_funcs_count);
- EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
+ EnumerateCompiledFunctions(heap, sfis.begin(), code_objects.begin());
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, sfis[i]);
- if (sfis[i]->function_data()->IsInterpreterData()) {
+ if (sfis[i]->function_data().IsInterpreterData()) {
LogExistingFunction(
sfis[i],
Handle<AbstractCode>(
@@ -2087,12 +2046,12 @@ void ExistingCodeLogger::LogCompiledFunctions() {
void ExistingCodeLogger::LogExistingFunction(
Handle<SharedFunctionInfo> shared, Handle<AbstractCode> code,
CodeEventListener::LogEventsAndTags tag) {
- if (shared->script()->IsScript()) {
+ if (shared->script().IsScript()) {
Handle<Script> script(Script::cast(shared->script()), isolate_);
int line_num = Script::GetLineNumber(script, shared->StartPosition()) + 1;
int column_num =
Script::GetColumnNumber(script, shared->StartPosition()) + 1;
- if (script->name()->IsString()) {
+ if (script->name().IsString()) {
Handle<String> script_name(String::cast(script->name()), isolate_);
if (line_num > 0) {
CALL_CODE_EVENT_HANDLER(
@@ -2112,10 +2071,10 @@ void ExistingCodeLogger::LogExistingFunction(
} else if (shared->IsApiFunction()) {
// API function.
FunctionTemplateInfo fun_data = shared->get_api_func_data();
- Object raw_call_data = fun_data->call_code();
- if (!raw_call_data->IsUndefined(isolate_)) {
+ Object raw_call_data = fun_data.call_code();
+ if (!raw_call_data.IsUndefined(isolate_)) {
CallHandlerInfo call_data = CallHandlerInfo::cast(raw_call_data);
- Object callback_obj = call_data->callback();
+ Object callback_obj = call_data.callback();
Address entry_point = v8::ToCData<Address>(callback_obj);
#if USES_FUNCTION_DESCRIPTORS
entry_point = *FUNCTION_ENTRYPOINT_ADDRESS(entry_point);
diff --git a/deps/v8/src/log.h b/deps/v8/src/logging/log.h
index a0ecc9e888..e46409a66e 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/logging/log.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOG_H_
-#define V8_LOG_H_
+#ifndef V8_LOGGING_LOG_H_
+#define V8_LOGGING_LOG_H_
#include <set>
#include <string>
#include "include/v8-profiler.h"
#include "src/base/platform/elapsed-timer.h"
-#include "src/code-events.h"
-#include "src/objects.h"
+#include "src/logging/code-events.h"
+#include "src/objects/objects.h"
namespace v8 {
@@ -122,8 +122,7 @@ class Logger : public CodeEventListener {
bool SetUp(Isolate* isolate);
// Sets the current code event handler.
- void SetCodeEventHandler(uint32_t options,
- JitCodeEventHandler event_handler);
+ void SetCodeEventHandler(uint32_t options, JitCodeEventHandler event_handler);
sampler::Sampler* sampler();
@@ -247,12 +246,7 @@ class Logger : public CodeEventListener {
V8_INLINE static void CallEventLogger(Isolate* isolate, const char* name,
StartEnd se, bool expose_to_api);
- bool is_logging() {
- return is_logging_;
- }
-
- // Used by CpuProfiler. TODO(petermarshall): Untangle
- void set_is_logging(bool new_value) { is_logging_ = new_value; }
+ V8_EXPORT_PRIVATE bool is_logging();
bool is_listening_to_code_events() override {
return is_logging() || jit_logger_ != nullptr;
@@ -327,7 +321,8 @@ class Logger : public CodeEventListener {
friend class Isolate;
friend class TimeLog;
friend class Profiler;
- template <StateTag Tag> friend class VMState;
+ template <StateTag Tag>
+ friend class VMState;
friend class LoggerTestHelper;
bool is_logging_;
@@ -357,8 +352,7 @@ class Logger : public CodeEventListener {
V(CompileCode, true) \
V(CompileCodeBackground, true) \
V(DeoptimizeCode, true) \
- V(Execute, true) \
- V(External, true)
+ V(Execute, true)
#define V(TimerName, expose) \
class TimerEvent##TimerName : public AllStatic { \
@@ -371,7 +365,6 @@ class Logger : public CodeEventListener {
TIMER_EVENTS_LIST(V)
#undef V
-
template <class TimerEvent>
class TimerEventScope {
public:
@@ -483,5 +476,4 @@ class ExternalCodeEventListener : public CodeEventListener {
} // namespace internal
} // namespace v8
-
-#endif // V8_LOG_H_
+#endif // V8_LOGGING_LOG_H_
diff --git a/deps/v8/src/macro-assembler-inl.h b/deps/v8/src/macro-assembler-inl.h
deleted file mode 100644
index a9e9ee7a9f..0000000000
--- a/deps/v8/src/macro-assembler-inl.h
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_MACRO_ASSEMBLER_INL_H_
-#define V8_MACRO_ASSEMBLER_INL_H_
-
-#include "src/assembler-inl.h"
-#include "src/macro-assembler.h"
-
-#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#endif
-
-#endif // V8_MACRO_ASSEMBLER_INL_H_
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/mips/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/mips64/OWNERS b/deps/v8/src/mips64/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/mips64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/numbers/OWNERS b/deps/v8/src/numbers/OWNERS
new file mode 100644
index 0000000000..097b008121
--- /dev/null
+++ b/deps/v8/src/numbers/OWNERS
@@ -0,0 +1,5 @@
+ahaas@chromium.org
+bmeurer@chromium.org
+clemensh@chromium.org
+jkummerow@chromium.org
+sigurds@chromium.org
diff --git a/deps/v8/src/bignum-dtoa.cc b/deps/v8/src/numbers/bignum-dtoa.cc
index ccfe690a3a..75d747501a 100644
--- a/deps/v8/src/bignum-dtoa.cc
+++ b/deps/v8/src/numbers/bignum-dtoa.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/bignum-dtoa.h"
+#include "src/numbers/bignum-dtoa.h"
#include <cmath>
#include "src/base/logging.h"
-#include "src/bignum.h"
-#include "src/double.h"
-#include "src/utils.h"
+#include "src/numbers/bignum.h"
+#include "src/numbers/double.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -23,34 +23,30 @@ static int NormalizedExponent(uint64_t significand, int exponent) {
return exponent;
}
-
// Forward declarations:
// Returns an estimation of k such that 10^(k-1) <= v < 10^k.
static int EstimatePower(int exponent);
// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
// and denominator.
-static void InitialScaledStartValues(double v,
- int estimated_power,
+static void InitialScaledStartValues(double v, int estimated_power,
bool need_boundary_deltas,
- Bignum* numerator,
- Bignum* denominator,
- Bignum* delta_minus,
- Bignum* delta_plus);
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus);
// Multiplies numerator/denominator so that its values lies in the range 1-10.
// Returns decimal_point s.t.
// v = numerator'/denominator' * 10^(decimal_point-1)
// where numerator' and denominator' are the values of numerator and
// denominator after the call to this function.
static void FixupMultiply10(int estimated_power, bool is_even,
- int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus);
+ int* decimal_point, Bignum* numerator,
+ Bignum* denominator, Bignum* delta_minus,
+ Bignum* delta_plus);
// Generates digits from the left to the right and stops when the generated
// digits yield the shortest decimal representation of v.
static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus,
- bool is_even,
- Vector<char> buffer, int* length);
+ bool is_even, Vector<char> buffer,
+ int* length);
// Generates 'requested_digits' after the decimal point.
static void BignumToFixed(int requested_digits, int* decimal_point,
Bignum* numerator, Bignum* denominator,
@@ -63,7 +59,6 @@ static void GenerateCountedDigits(int count, int* decimal_point,
Bignum* numerator, Bignum* denominator,
Vector<char>(buffer), int* length);
-
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
Vector<char> buffer, int* length, int* decimal_point) {
DCHECK_GT(v, 0);
@@ -99,30 +94,25 @@ void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
// 308*4 binary digits.
DCHECK_GE(Bignum::kMaxSignificantBits, 324 * 4);
bool need_boundary_deltas = (mode == BIGNUM_DTOA_SHORTEST);
- InitialScaledStartValues(v, estimated_power, need_boundary_deltas,
- &numerator, &denominator,
- &delta_minus, &delta_plus);
+ InitialScaledStartValues(v, estimated_power, need_boundary_deltas, &numerator,
+ &denominator, &delta_minus, &delta_plus);
// We now have v = (numerator / denominator) * 10^estimated_power.
- FixupMultiply10(estimated_power, is_even, decimal_point,
- &numerator, &denominator,
- &delta_minus, &delta_plus);
+ FixupMultiply10(estimated_power, is_even, decimal_point, &numerator,
+ &denominator, &delta_minus, &delta_plus);
// We now have v = (numerator / denominator) * 10^(decimal_point-1), and
// 1 <= (numerator + delta_plus) / denominator < 10
switch (mode) {
case BIGNUM_DTOA_SHORTEST:
- GenerateShortestDigits(&numerator, &denominator,
- &delta_minus, &delta_plus,
- is_even, buffer, length);
+ GenerateShortestDigits(&numerator, &denominator, &delta_minus,
+ &delta_plus, is_even, buffer, length);
break;
case BIGNUM_DTOA_FIXED:
- BignumToFixed(requested_digits, decimal_point,
- &numerator, &denominator,
+ BignumToFixed(requested_digits, decimal_point, &numerator, &denominator,
buffer, length);
break;
case BIGNUM_DTOA_PRECISION:
- GenerateCountedDigits(requested_digits, decimal_point,
- &numerator, &denominator,
- buffer, length);
+ GenerateCountedDigits(requested_digits, decimal_point, &numerator,
+ &denominator, buffer, length);
break;
default:
UNREACHABLE();
@@ -130,7 +120,6 @@ void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
buffer[*length] = '\0';
}
-
// The procedure starts generating digits from the left to the right and stops
// when the generated digits yield the shortest decimal representation of v. A
// decimal representation of v is a number lying closer to v than to any other
@@ -146,8 +135,8 @@ void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
// will be produced. This should be the standard precondition.
static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
Bignum* delta_minus, Bignum* delta_plus,
- bool is_even,
- Vector<char> buffer, int* length) {
+ bool is_even, Vector<char> buffer,
+ int* length) {
// Small optimization: if delta_minus and delta_plus are the same just reuse
// one of the two bignums.
if (Bignum::Equal(*delta_minus, *delta_plus)) {
@@ -235,7 +224,6 @@ static void GenerateShortestDigits(Bignum* numerator, Bignum* denominator,
}
}
-
// Let v = numerator / denominator < 10.
// Then we generate 'count' digits of d = x.xxxxx... (without the decimal point)
// from left to right. Once 'count' digits have been produced we decide wether
@@ -278,7 +266,6 @@ static void GenerateCountedDigits(int count, int* decimal_point,
*length = count;
}
-
// Generates 'requested_digits' after the decimal point. It might omit
// trailing '0's. If the input number is too small then no digits at all are
// generated (ex.: 2 fixed digits for 0.00001).
@@ -321,13 +308,11 @@ static void BignumToFixed(int requested_digits, int* decimal_point,
// The requested digits correspond to the digits after the point.
// The variable 'needed_digits' includes the digits before the point.
int needed_digits = (*decimal_point) + requested_digits;
- GenerateCountedDigits(needed_digits, decimal_point,
- numerator, denominator,
+ GenerateCountedDigits(needed_digits, decimal_point, numerator, denominator,
buffer, length);
}
}
-
// Returns an estimation of k such that 10^(k-1) <= v < 10^k where
// v = f * 2^exponent and 2^52 <= f < 2^53.
// v is hence a normalized double with the given exponent. The output is an
@@ -374,12 +359,10 @@ static int EstimatePower(int exponent) {
return static_cast<int>(estimate);
}
-
// See comments for InitialScaledStartValues.
static void InitialScaledStartValuesPositiveExponent(
- double v, int estimated_power, bool need_boundary_deltas,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
+ double v, int estimated_power, bool need_boundary_deltas, Bignum* numerator,
+ Bignum* denominator, Bignum* delta_minus, Bignum* delta_plus) {
// A positive exponent implies a positive power.
DCHECK_GE(estimated_power, 0);
// Since the estimated_power is positive we simply multiply the denominator
@@ -420,12 +403,10 @@ static void InitialScaledStartValuesPositiveExponent(
}
}
-
// See comments for InitialScaledStartValues
static void InitialScaledStartValuesNegativeExponentPositivePower(
- double v, int estimated_power, bool need_boundary_deltas,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
+ double v, int estimated_power, bool need_boundary_deltas, Bignum* numerator,
+ Bignum* denominator, Bignum* delta_minus, Bignum* delta_plus) {
uint64_t significand = Double(v).Significand();
int exponent = Double(v).Exponent();
// v = f * 2^e with e < 0, and with estimated_power >= 0.
@@ -470,12 +451,10 @@ static void InitialScaledStartValuesNegativeExponentPositivePower(
}
}
-
// See comments for InitialScaledStartValues
static void InitialScaledStartValuesNegativeExponentNegativePower(
- double v, int estimated_power, bool need_boundary_deltas,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
+ double v, int estimated_power, bool need_boundary_deltas, Bignum* numerator,
+ Bignum* denominator, Bignum* delta_minus, Bignum* delta_plus) {
const uint64_t kMinimalNormalizedExponent =
V8_2PART_UINT64_C(0x00100000, 00000000);
uint64_t significand = Double(v).Significand();
@@ -531,7 +510,6 @@ static void InitialScaledStartValuesNegativeExponentNegativePower(
}
}
-
// Let v = significand * 2^exponent.
// Computes v / 10^estimated_power exactly, as a ratio of two bignums, numerator
// and denominator. The functions GenerateShortestDigits and
@@ -568,29 +546,25 @@ static void InitialScaledStartValuesNegativeExponentNegativePower(
// It is then easy to kickstart the digit-generation routine.
//
// The boundary-deltas are only filled if need_boundary_deltas is set.
-static void InitialScaledStartValues(double v,
- int estimated_power,
+static void InitialScaledStartValues(double v, int estimated_power,
bool need_boundary_deltas,
- Bignum* numerator,
- Bignum* denominator,
- Bignum* delta_minus,
- Bignum* delta_plus) {
+ Bignum* numerator, Bignum* denominator,
+ Bignum* delta_minus, Bignum* delta_plus) {
if (Double(v).Exponent() >= 0) {
InitialScaledStartValuesPositiveExponent(
- v, estimated_power, need_boundary_deltas,
- numerator, denominator, delta_minus, delta_plus);
+ v, estimated_power, need_boundary_deltas, numerator, denominator,
+ delta_minus, delta_plus);
} else if (estimated_power >= 0) {
InitialScaledStartValuesNegativeExponentPositivePower(
- v, estimated_power, need_boundary_deltas,
- numerator, denominator, delta_minus, delta_plus);
+ v, estimated_power, need_boundary_deltas, numerator, denominator,
+ delta_minus, delta_plus);
} else {
InitialScaledStartValuesNegativeExponentNegativePower(
- v, estimated_power, need_boundary_deltas,
- numerator, denominator, delta_minus, delta_plus);
+ v, estimated_power, need_boundary_deltas, numerator, denominator,
+ delta_minus, delta_plus);
}
}
-
// This routine multiplies numerator/denominator so that its values lies in the
// range 1-10. That is after a call to this function we have:
// 1 <= (numerator + delta_plus) /denominator < 10.
@@ -603,9 +577,9 @@ static void InitialScaledStartValues(double v,
// estimated_power) but do not touch the numerator or denominator.
// Otherwise the routine multiplies the numerator and the deltas by 10.
static void FixupMultiply10(int estimated_power, bool is_even,
- int* decimal_point,
- Bignum* numerator, Bignum* denominator,
- Bignum* delta_minus, Bignum* delta_plus) {
+ int* decimal_point, Bignum* numerator,
+ Bignum* denominator, Bignum* delta_minus,
+ Bignum* delta_plus) {
bool in_range;
if (is_even) {
// For IEEE doubles half-way cases (in decimal system numbers ending with 5)
diff --git a/deps/v8/src/bignum-dtoa.h b/deps/v8/src/numbers/bignum-dtoa.h
index ac94e651bf..c42d0b865a 100644
--- a/deps/v8/src/bignum-dtoa.h
+++ b/deps/v8/src/numbers/bignum-dtoa.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BIGNUM_DTOA_H_
-#define V8_BIGNUM_DTOA_H_
+#ifndef V8_NUMBERS_BIGNUM_DTOA_H_
+#define V8_NUMBERS_BIGNUM_DTOA_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -59,4 +59,4 @@ V8_EXPORT_PRIVATE void BignumDtoa(double v, BignumDtoaMode mode,
} // namespace internal
} // namespace v8
-#endif // V8_BIGNUM_DTOA_H_
+#endif // V8_NUMBERS_BIGNUM_DTOA_H_
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/numbers/bignum.cc
index a0a398b7aa..beaed6b30a 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/numbers/bignum.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/bignum.h"
-#include "src/utils.h"
+#include "src/numbers/bignum.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -15,13 +15,11 @@ Bignum::Bignum()
}
}
-
-template<typename S>
+template <typename S>
static int BitSize(S value) {
return 8 * sizeof(value);
}
-
// Guaranteed to lie in one Bigit.
void Bignum::AssignUInt16(uint16_t value) {
DCHECK_GE(kBigitSize, BitSize(value));
@@ -33,7 +31,6 @@ void Bignum::AssignUInt16(uint16_t value) {
used_digits_ = 1;
}
-
void Bignum::AssignUInt64(uint64_t value) {
const int kUInt64Size = 64;
@@ -50,7 +47,6 @@ void Bignum::AssignUInt64(uint64_t value) {
Clamp();
}
-
void Bignum::AssignBignum(const Bignum& other) {
exponent_ = other.exponent_;
for (int i = 0; i < other.used_digits_; ++i) {
@@ -63,9 +59,7 @@ void Bignum::AssignBignum(const Bignum& other) {
used_digits_ = other.used_digits_;
}
-
-static uint64_t ReadUInt64(Vector<const char> buffer,
- int from,
+static uint64_t ReadUInt64(Vector<const char> buffer, int from,
int digits_to_read) {
uint64_t result = 0;
int to = from + digits_to_read;
@@ -78,7 +72,6 @@ static uint64_t ReadUInt64(Vector<const char> buffer,
return result;
}
-
void Bignum::AssignDecimalString(Vector<const char> value) {
// 2^64 = 18446744073709551616 > 10^19
const int kMaxUint64DecimalDigits = 19;
@@ -99,7 +92,6 @@ void Bignum::AssignDecimalString(Vector<const char> value) {
Clamp();
}
-
static int HexCharValue(char c) {
if ('0' <= c && c <= '9') return c - '0';
if ('a' <= c && c <= 'f') return 10 + c - 'a';
@@ -107,7 +99,6 @@ static int HexCharValue(char c) {
UNREACHABLE();
}
-
void Bignum::AssignHexString(Vector<const char> value) {
Zero();
int length = value.length();
@@ -137,7 +128,6 @@ void Bignum::AssignHexString(Vector<const char> value) {
Clamp();
}
-
void Bignum::AddUInt64(uint64_t operand) {
if (operand == 0) return;
Bignum other;
@@ -145,7 +135,6 @@ void Bignum::AddUInt64(uint64_t operand) {
AddBignum(other);
}
-
void Bignum::AddBignum(const Bignum& other) {
DCHECK(IsClamped());
DCHECK(other.IsClamped());
@@ -187,7 +176,6 @@ void Bignum::AddBignum(const Bignum& other) {
DCHECK(IsClamped());
}
-
void Bignum::SubtractBignum(const Bignum& other) {
DCHECK(IsClamped());
DCHECK(other.IsClamped());
@@ -214,7 +202,6 @@ void Bignum::SubtractBignum(const Bignum& other) {
Clamp();
}
-
void Bignum::ShiftLeft(int shift_amount) {
if (used_digits_ == 0) return;
exponent_ += shift_amount / kBigitSize;
@@ -223,7 +210,6 @@ void Bignum::ShiftLeft(int shift_amount) {
BigitsShiftLeft(local_shift);
}
-
void Bignum::MultiplyByUInt32(uint32_t factor) {
if (factor == 1) return;
if (factor == 0) {
@@ -249,7 +235,6 @@ void Bignum::MultiplyByUInt32(uint32_t factor) {
}
}
-
void Bignum::MultiplyByUInt64(uint64_t factor) {
if (factor == 1) return;
if (factor == 0) {
@@ -266,7 +251,7 @@ void Bignum::MultiplyByUInt64(uint64_t factor) {
uint64_t tmp = (carry & kBigitMask) + product_low;
bigits_[i] = static_cast<Chunk>(tmp & kBigitMask);
carry = (carry >> kBigitSize) + (tmp >> kBigitSize) +
- (product_high << (32 - kBigitSize));
+ (product_high << (32 - kBigitSize));
}
while (carry != 0) {
EnsureCapacity(used_digits_ + 1);
@@ -276,7 +261,6 @@ void Bignum::MultiplyByUInt64(uint64_t factor) {
}
}
-
void Bignum::MultiplyByPowerOfTen(int exponent) {
const uint64_t kFive27 = V8_2PART_UINT64_C(0x6765C793, fa10079d);
const uint16_t kFive1 = 5;
@@ -292,9 +276,9 @@ void Bignum::MultiplyByPowerOfTen(int exponent) {
const uint32_t kFive11 = kFive10 * 5;
const uint32_t kFive12 = kFive11 * 5;
const uint32_t kFive13 = kFive12 * 5;
- const uint32_t kFive1_to_12[] =
- { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
- kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
+ const uint32_t kFive1_to_12[] = {kFive1, kFive2, kFive3, kFive4,
+ kFive5, kFive6, kFive7, kFive8,
+ kFive9, kFive10, kFive11, kFive12};
DCHECK_GE(exponent, 0);
if (exponent == 0) return;
@@ -316,7 +300,6 @@ void Bignum::MultiplyByPowerOfTen(int exponent) {
ShiftLeft(exponent);
}
-
void Bignum::Square() {
DCHECK(IsClamped());
int product_length = 2 * used_digits_;
@@ -388,7 +371,6 @@ void Bignum::Square() {
Clamp();
}
-
void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
DCHECK_NE(base, 0);
DCHECK_GE(power_exponent, 0);
@@ -461,7 +443,6 @@ void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
ShiftLeft(shifts * power_exponent);
}
-
// Precondition: this/other < 16bit.
uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
DCHECK(IsClamped());
@@ -525,8 +506,7 @@ uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
return result;
}
-
-template<typename S>
+template <typename S>
static int SizeInHexChars(S number) {
DCHECK_GT(number, 0);
int result = 0;
@@ -537,7 +517,6 @@ static int SizeInHexChars(S number) {
return result;
}
-
bool Bignum::ToHexString(char* buffer, int buffer_size) const {
DCHECK(IsClamped());
// Each bigit must be printable as separate hex-character.
@@ -552,7 +531,7 @@ bool Bignum::ToHexString(char* buffer, int buffer_size) const {
}
// We add 1 for the terminating '\0' character.
int needed_chars = (BigitLength() - 1) * kHexCharsPerBigit +
- SizeInHexChars(bigits_[used_digits_ - 1]) + 1;
+ SizeInHexChars(bigits_[used_digits_ - 1]) + 1;
if (needed_chars > buffer_size) return false;
int string_index = needed_chars - 1;
buffer[string_index--] = '\0';
@@ -577,14 +556,12 @@ bool Bignum::ToHexString(char* buffer, int buffer_size) const {
return true;
}
-
Bignum::Chunk Bignum::BigitAt(int index) const {
if (index >= BigitLength()) return 0;
if (index < exponent_) return 0;
return bigits_[index - exponent_];
}
-
int Bignum::Compare(const Bignum& a, const Bignum& b) {
DCHECK(a.IsClamped());
DCHECK(b.IsClamped());
@@ -602,7 +579,6 @@ int Bignum::Compare(const Bignum& a, const Bignum& b) {
return 0;
}
-
int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
DCHECK(a.IsClamped());
DCHECK(b.IsClamped());
@@ -639,7 +615,6 @@ int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
return -1;
}
-
void Bignum::Clamp() {
while (used_digits_ > 0 && bigits_[used_digits_ - 1] == 0) {
used_digits_--;
@@ -650,12 +625,10 @@ void Bignum::Clamp() {
}
}
-
bool Bignum::IsClamped() const {
return used_digits_ == 0 || bigits_[used_digits_ - 1] != 0;
}
-
void Bignum::Zero() {
for (int i = 0; i < used_digits_; ++i) {
bigits_[i] = 0;
@@ -664,7 +637,6 @@ void Bignum::Zero() {
exponent_ = 0;
}
-
void Bignum::Align(const Bignum& other) {
if (exponent_ > other.exponent_) {
// If "X" represents a "hidden" digit (by the exponent) then we are in the
@@ -688,7 +660,6 @@ void Bignum::Align(const Bignum& other) {
}
}
-
void Bignum::BigitsShiftLeft(int shift_amount) {
DCHECK_LT(shift_amount, kBigitSize);
DCHECK_GE(shift_amount, 0);
@@ -704,7 +675,6 @@ void Bignum::BigitsShiftLeft(int shift_amount) {
}
}
-
void Bignum::SubtractTimes(const Bignum& other, int factor) {
#ifdef DEBUG
Bignum a, b;
@@ -741,6 +711,5 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) {
DCHECK(Bignum::Equal(a, *this));
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/bignum.h b/deps/v8/src/numbers/bignum.h
index db82e4d233..fdcd176c4e 100644
--- a/deps/v8/src/bignum.h
+++ b/deps/v8/src/numbers/bignum.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BIGNUM_H_
-#define V8_BIGNUM_H_
+#ifndef V8_NUMBERS_BIGNUM_H_
+#define V8_NUMBERS_BIGNUM_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -73,8 +73,8 @@ class V8_EXPORT_PRIVATE Bignum {
}
private:
- typedef uint32_t Chunk;
- typedef uint64_t DoubleChunk;
+ using Chunk = uint32_t;
+ using DoubleChunk = uint64_t;
static const int kChunkSize = sizeof(Chunk) * 8;
static const int kDoubleChunkSize = sizeof(DoubleChunk) * 8;
@@ -118,4 +118,4 @@ class V8_EXPORT_PRIVATE Bignum {
} // namespace internal
} // namespace v8
-#endif // V8_BIGNUM_H_
+#endif // V8_NUMBERS_BIGNUM_H_
diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/numbers/cached-powers.cc
index 08c9781414..205151d359 100644
--- a/deps/v8/src/cached-powers.cc
+++ b/deps/v8/src/numbers/cached-powers.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/cached-powers.h"
+#include "src/numbers/cached-powers.h"
#include <limits.h>
#include <stdarg.h>
@@ -10,7 +10,7 @@
#include <cmath>
#include "src/base/logging.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -123,17 +123,13 @@ const int PowersOfTenCache::kMinDecimalExponent = -348;
const int PowersOfTenCache::kMaxDecimalExponent = 340;
void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
- int min_exponent,
- int max_exponent,
- DiyFp* power,
- int* decimal_exponent) {
+ int min_exponent, int max_exponent, DiyFp* power, int* decimal_exponent) {
int kQ = DiyFp::kSignificandSize;
// Some platforms return incorrect sign on 0 result. We can ignore that here,
// which means we can avoid depending on platform.h.
double k = std::ceil((min_exponent + kQ - 1) * kD_1_LOG2_10);
int foo = kCachedPowersOffset;
- int index =
- (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
+ int index = (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
DCHECK(0 <= index && index < kCachedPowersLength);
CachedPower cached_power = kCachedPowers[index];
DCHECK(min_exponent <= cached_power.binary_exponent);
@@ -142,7 +138,6 @@ void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
*power = DiyFp(cached_power.significand, cached_power.binary_exponent);
}
-
void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
DiyFp* power,
int* found_exponent) {
diff --git a/deps/v8/src/cached-powers.h b/deps/v8/src/numbers/cached-powers.h
index fade5c9fca..99ffe5068f 100644
--- a/deps/v8/src/cached-powers.h
+++ b/deps/v8/src/numbers/cached-powers.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CACHED_POWERS_H_
-#define V8_CACHED_POWERS_H_
+#ifndef V8_NUMBERS_CACHED_POWERS_H_
+#define V8_NUMBERS_CACHED_POWERS_H_
#include "src/base/logging.h"
-#include "src/diy-fp.h"
+#include "src/numbers/diy-fp.h"
namespace v8 {
namespace internal {
@@ -40,4 +40,4 @@ class PowersOfTenCache {
} // namespace internal
} // namespace v8
-#endif // V8_CACHED_POWERS_H_
+#endif // V8_NUMBERS_CACHED_POWERS_H_
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/numbers/conversions-inl.h
index 34d9069aee..f9d5346ef3 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/numbers/conversions-inl.h
@@ -2,24 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CONVERSIONS_INL_H_
-#define V8_CONVERSIONS_INL_H_
+#ifndef V8_NUMBERS_CONVERSIONS_INL_H_
+#define V8_NUMBERS_CONVERSIONS_INL_H_
-#include <float.h> // Required for DBL_MAX and on Win32 for finite()
-#include <limits.h> // Required for INT_MAX etc.
+#include <float.h> // Required for DBL_MAX and on Win32 for finite()
+#include <limits.h> // Required for INT_MAX etc.
#include <stdarg.h>
#include <cmath>
-#include "src/globals.h" // Required for V8_INFINITY
+#include "src/common/globals.h" // Required for V8_INFINITY
// ----------------------------------------------------------------------------
// Extra POSIX/ANSI functions for Win32/MSVC.
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
-#include "src/conversions.h"
-#include "src/double.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
+#include "src/numbers/double.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -57,7 +57,6 @@ inline unsigned int FastD2UI(double x) {
return 0x80000000u; // Return integer indefinite.
}
-
inline float DoubleToFloat32(double x) {
using limits = std::numeric_limits<float>;
if (x > limits::max()) {
@@ -80,7 +79,6 @@ inline float DoubleToFloat32(double x) {
return static_cast<float>(x);
}
-
inline double DoubleToInteger(double x) {
if (std::isnan(x)) return 0;
if (!std::isfinite(x) || x == 0) return x;
@@ -121,13 +119,11 @@ bool IsSmiDouble(double value) {
!IsMinusZero(value) && value == FastI2D(FastD2I(value));
}
-
bool IsInt32Double(double value) {
return value >= kMinInt && value <= kMaxInt && !IsMinusZero(value) &&
value == FastI2D(FastD2I(value));
}
-
bool IsUint32Double(double value) {
return !IsMinusZero(value) && value >= 0 && value <= kMaxUInt32 &&
value == FastUI2D(FastD2UI(value));
@@ -164,23 +160,23 @@ bool DoubleToUint32IfEqualToSelf(double value, uint32_t* uint32_value) {
}
int32_t NumberToInt32(Object number) {
- if (number->IsSmi()) return Smi::ToInt(number);
- return DoubleToInt32(number->Number());
+ if (number.IsSmi()) return Smi::ToInt(number);
+ return DoubleToInt32(number.Number());
}
uint32_t NumberToUint32(Object number) {
- if (number->IsSmi()) return Smi::ToInt(number);
- return DoubleToUint32(number->Number());
+ if (number.IsSmi()) return Smi::ToInt(number);
+ return DoubleToUint32(number.Number());
}
uint32_t PositiveNumberToUint32(Object number) {
- if (number->IsSmi()) {
+ if (number.IsSmi()) {
int value = Smi::ToInt(number);
if (value <= 0) return 0;
return value;
}
- DCHECK(number->IsHeapNumber());
- double value = number->Number();
+ DCHECK(number.IsHeapNumber());
+ double value = number.Number();
// Catch all values smaller than 1 and use the double-negation trick for NANs.
if (!(value >= 1)) return 0;
uint32_t max = std::numeric_limits<uint32_t>::max();
@@ -189,8 +185,8 @@ uint32_t PositiveNumberToUint32(Object number) {
}
int64_t NumberToInt64(Object number) {
- if (number->IsSmi()) return Smi::ToInt(number);
- double d = number->Number();
+ if (number.IsSmi()) return Smi::ToInt(number);
+ double d = number.Number();
if (std::isnan(d)) return 0;
if (d >= static_cast<double>(std::numeric_limits<int64_t>::max())) {
return std::numeric_limits<int64_t>::max();
@@ -202,13 +198,13 @@ int64_t NumberToInt64(Object number) {
}
uint64_t PositiveNumberToUint64(Object number) {
- if (number->IsSmi()) {
+ if (number.IsSmi()) {
int value = Smi::ToInt(number);
if (value <= 0) return 0;
return value;
}
- DCHECK(number->IsHeapNumber());
- double value = number->Number();
+ DCHECK(number.IsHeapNumber());
+ double value = number.Number();
// Catch all values smaller than 1 and use the double-negation trick for NANs.
if (!(value >= 1)) return 0;
uint64_t max = std::numeric_limits<uint64_t>::max();
@@ -219,7 +215,7 @@ uint64_t PositiveNumberToUint64(Object number) {
bool TryNumberToSize(Object number, size_t* result) {
// Do not create handles in this function! Don't use SealHandleScope because
// the function can be used concurrently.
- if (number->IsSmi()) {
+ if (number.IsSmi()) {
int value = Smi::ToInt(number);
DCHECK(static_cast<unsigned>(Smi::kMaxValue) <=
std::numeric_limits<size_t>::max());
@@ -229,8 +225,8 @@ bool TryNumberToSize(Object number, size_t* result) {
}
return false;
} else {
- DCHECK(number->IsHeapNumber());
- double value = HeapNumber::cast(number)->value();
+ DCHECK(number.IsHeapNumber());
+ double value = HeapNumber::cast(number).value();
// If value is compared directly to the limit, the limit will be
// casted to a double and could end up as limit + 1,
// because a double might not have enough mantissa bits for it.
@@ -259,4 +255,4 @@ uint32_t DoubleToUint32(double x) {
} // namespace internal
} // namespace v8
-#endif // V8_CONVERSIONS_INL_H_
+#endif // V8_NUMBERS_CONVERSIONS_INL_H_
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/numbers/conversions.cc
index f3df399f23..cb424a1ded 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/numbers/conversions.cc
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/conversions.h"
+#include "src/numbers/conversions.h"
#include <limits.h>
#include <stdarg.h>
#include <cmath>
-#include "src/allocation.h"
-#include "src/assert-scope.h"
-#include "src/char-predicates-inl.h"
-#include "src/dtoa.h"
-#include "src/handles.h"
+#include "src/common/assert-scope.h"
+#include "src/handles/handles.h"
#include "src/heap/factory.h"
-#include "src/objects-inl.h"
+#include "src/numbers/dtoa.h"
+#include "src/numbers/strtod.h"
#include "src/objects/bigint.h"
-#include "src/strtod.h"
-#include "src/utils.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
#if defined(_STLP_VENDOR_CSTD)
// STLPort doesn't import fpclassify into the std namespace.
@@ -263,10 +263,10 @@ void StringToIntHelper::ParseInt() {
DisallowHeapAllocation no_gc;
if (IsOneByte()) {
Vector<const uint8_t> vector = GetOneByteVector();
- DetectRadixInternal(vector.start(), vector.length());
+ DetectRadixInternal(vector.begin(), vector.length());
} else {
Vector<const uc16> vector = GetTwoByteVector();
- DetectRadixInternal(vector.start(), vector.length());
+ DetectRadixInternal(vector.begin(), vector.length());
}
}
if (state_ != kRunning) return;
@@ -278,11 +278,11 @@ void StringToIntHelper::ParseInt() {
if (IsOneByte()) {
Vector<const uint8_t> vector = GetOneByteVector();
DCHECK_EQ(length_, vector.length());
- ParseInternal(vector.start());
+ ParseInternal(vector.begin());
} else {
Vector<const uc16> vector = GetTwoByteVector();
DCHECK_EQ(length_, vector.length());
- ParseInternal(vector.start());
+ ParseInternal(vector.begin());
}
}
DCHECK_NE(state_, kRunning);
@@ -468,13 +468,13 @@ class NumberParseIntHelper : public StringToIntHelper {
if (IsOneByte()) {
Vector<const uint8_t> vector = GetOneByteVector();
DCHECK_EQ(length(), vector.length());
- result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.start())
- : HandleBaseTenCase(vector.start());
+ result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.begin())
+ : HandleBaseTenCase(vector.begin());
} else {
Vector<const uc16> vector = GetTwoByteVector();
DCHECK_EQ(length(), vector.length());
- result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.start())
- : HandleBaseTenCase(vector.start());
+ result_ = is_power_of_two ? HandlePowerOfTwoCase(vector.begin())
+ : HandleBaseTenCase(vector.begin());
}
set_state(kDone);
}
@@ -803,26 +803,21 @@ parsing_done:
}
double StringToDouble(const char* str, int flags, double empty_string_val) {
- // We cast to const uint8_t* here to avoid instantiating the
- // InternalStringToDouble() template for const char* as well.
- const uint8_t* start = reinterpret_cast<const uint8_t*>(str);
- const uint8_t* end = start + StrLength(str);
- return InternalStringToDouble(start, end, flags, empty_string_val);
+ // We use {OneByteVector} instead of {CStrVector} to avoid instantiating the
+ // InternalStringToDouble() template for {const char*} as well.
+ return StringToDouble(OneByteVector(str), flags, empty_string_val);
}
double StringToDouble(Vector<const uint8_t> str, int flags,
double empty_string_val) {
- // We cast to const uint8_t* here to avoid instantiating the
- // InternalStringToDouble() template for const char* as well.
- const uint8_t* start = reinterpret_cast<const uint8_t*>(str.start());
- const uint8_t* end = start + str.length();
- return InternalStringToDouble(start, end, flags, empty_string_val);
+ return InternalStringToDouble(str.begin(), str.end(), flags,
+ empty_string_val);
}
double StringToDouble(Vector<const uc16> str, int flags,
double empty_string_val) {
- const uc16* end = str.start() + str.length();
- return InternalStringToDouble(str.start(), end, flags, empty_string_val);
+ const uc16* end = str.begin() + str.length();
+ return InternalStringToDouble(str.begin(), end, flags, empty_string_val);
}
double StringToInt(Isolate* isolate, Handle<String> string, int radix) {
@@ -933,16 +928,19 @@ MaybeHandle<BigInt> BigIntLiteral(Isolate* isolate, const char* string) {
const char* DoubleToCString(double v, Vector<char> buffer) {
switch (FPCLASSIFY_NAMESPACE::fpclassify(v)) {
- case FP_NAN: return "NaN";
- case FP_INFINITE: return (v < 0.0 ? "-Infinity" : "Infinity");
- case FP_ZERO: return "0";
+ case FP_NAN:
+ return "NaN";
+ case FP_INFINITE:
+ return (v < 0.0 ? "-Infinity" : "Infinity");
+ case FP_ZERO:
+ return "0";
default: {
if (IsInt32Double(v)) {
// This will trigger if v is -0 and -0.0 is stringified to "0".
// (see ES section 7.1.12.1 #sec-tostring-applied-to-the-number-type)
return IntToCString(FastD2I(v), buffer);
}
- SimpleStringBuilder builder(buffer.start(), buffer.length());
+ SimpleStringBuilder builder(buffer.begin(), buffer.length());
int decimal_point;
int sign;
const int kV8DtoaBufferCapacity = kBase10MaximalLength + 1;
@@ -950,8 +948,8 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
int length;
DoubleToAscii(v, DTOA_SHORTEST, 0,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &length, &decimal_point);
+ Vector<char>(decimal_rep, kV8DtoaBufferCapacity), &sign,
+ &length, &decimal_point);
if (sign) builder.AddCharacter('-');
@@ -990,7 +988,6 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
}
}
-
const char* IntToCString(int n, Vector<char> buffer) {
bool negative = true;
if (n >= 0) {
@@ -1006,10 +1003,9 @@ const char* IntToCString(int n, Vector<char> buffer) {
n /= 10;
} while (n);
if (negative) buffer[--i] = '-';
- return buffer.start() + i;
+ return buffer.begin() + i;
}
-
char* DoubleToFixedCString(double value, int f) {
const int kMaxDigitsBeforePoint = 21;
const double kFirstNonFixed = 1e21;
@@ -1040,8 +1036,8 @@ char* DoubleToFixedCString(double value, int f) {
char decimal_rep[kDecimalRepCapacity];
int decimal_rep_length;
DoubleToAscii(value, DTOA_FIXED, f,
- Vector<char>(decimal_rep, kDecimalRepCapacity),
- &sign, &decimal_rep_length, &decimal_point);
+ Vector<char>(decimal_rep, kDecimalRepCapacity), &sign,
+ &decimal_rep_length, &decimal_point);
// Create a representation that is padded with zeros if needed.
int zero_prefix_length = 0;
@@ -1053,8 +1049,8 @@ char* DoubleToFixedCString(double value, int f) {
}
if (zero_prefix_length + decimal_rep_length < decimal_point + f) {
- zero_postfix_length = decimal_point + f - decimal_rep_length -
- zero_prefix_length;
+ zero_postfix_length =
+ decimal_point + f - decimal_rep_length - zero_prefix_length;
}
unsigned rep_length =
@@ -1079,9 +1075,7 @@ char* DoubleToFixedCString(double value, int f) {
return builder.Finalize();
}
-
-static char* CreateExponentialRepresentation(char* decimal_rep,
- int exponent,
+static char* CreateExponentialRepresentation(char* decimal_rep, int exponent,
bool negative,
int significant_digits) {
bool negative_exponent = false;
@@ -1101,8 +1095,9 @@ static char* CreateExponentialRepresentation(char* decimal_rep,
if (significant_digits != 1) {
builder.AddCharacter('.');
builder.AddString(decimal_rep + 1);
- int rep_length = StrLength(decimal_rep);
- builder.AddPadding('0', significant_digits - rep_length);
+ size_t rep_length = strlen(decimal_rep);
+ DCHECK_GE(significant_digits, rep_length);
+ builder.AddPadding('0', significant_digits - static_cast<int>(rep_length));
}
builder.AddCharacter('e');
@@ -1111,7 +1106,6 @@ static char* CreateExponentialRepresentation(char* decimal_rep,
return builder.Finalize();
}
-
char* DoubleToExponentialCString(double value, int f) {
// f might be -1 to signal that f was undefined in JavaScript.
DCHECK(f >= -1 && f <= kMaxFractionDigits);
@@ -1137,25 +1131,24 @@ char* DoubleToExponentialCString(double value, int f) {
if (f == -1) {
DoubleToAscii(value, DTOA_SHORTEST, 0,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point);
+ Vector<char>(decimal_rep, kV8DtoaBufferCapacity), &sign,
+ &decimal_rep_length, &decimal_point);
f = decimal_rep_length - 1;
} else {
DoubleToAscii(value, DTOA_PRECISION, f + 1,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point);
+ Vector<char>(decimal_rep, kV8DtoaBufferCapacity), &sign,
+ &decimal_rep_length, &decimal_point);
}
DCHECK_GT(decimal_rep_length, 0);
DCHECK(decimal_rep_length <= f + 1);
int exponent = decimal_point - 1;
char* result =
- CreateExponentialRepresentation(decimal_rep, exponent, negative, f+1);
+ CreateExponentialRepresentation(decimal_rep, exponent, negative, f + 1);
return result;
}
-
char* DoubleToPrecisionCString(double value, int p) {
const int kMinimalDigits = 1;
DCHECK(p >= kMinimalDigits && p <= kMaxFractionDigits);
@@ -1176,8 +1169,8 @@ char* DoubleToPrecisionCString(double value, int p) {
int decimal_rep_length;
DoubleToAscii(value, DTOA_PRECISION, p,
- Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
- &sign, &decimal_rep_length, &decimal_point);
+ Vector<char>(decimal_rep, kV8DtoaBufferCapacity), &sign,
+ &decimal_rep_length, &decimal_point);
DCHECK(decimal_rep_length <= p);
int exponent = decimal_point - 1;
@@ -1193,9 +1186,8 @@ char* DoubleToPrecisionCString(double value, int p) {
// Leave room in the result for appending a minus, a period and in
// the case where decimal_point is not positive for a zero in
// front of the period.
- unsigned result_size = (decimal_point <= 0)
- ? -decimal_point + p + 3
- : p + 2;
+ unsigned result_size =
+ (decimal_point <= 0) ? -decimal_point + p + 3 : p + 2;
SimpleStringBuilder builder(result_size + 1);
if (negative) builder.AddCharacter('-');
if (decimal_point <= 0) {
@@ -1211,8 +1203,10 @@ char* DoubleToPrecisionCString(double value, int p) {
builder.AddCharacter('.');
const int extra = negative ? 2 : 1;
if (decimal_rep_length > decimal_point) {
- const int len = StrLength(decimal_rep + decimal_point);
- const int n = Min(len, p - (builder.position() - extra));
+ const size_t len = strlen(decimal_rep + decimal_point);
+ DCHECK_GE(kMaxInt, len);
+ const int n =
+ Min(static_cast<int>(len), p - (builder.position() - extra));
builder.AddSubstring(decimal_rep + decimal_point, n);
}
builder.AddPadding('0', extra + (p - builder.position()));
@@ -1311,7 +1305,6 @@ char* DoubleToRadixCString(double value, int radix) {
return result;
}
-
// ES6 18.2.4 parseFloat(string)
double StringToDouble(Isolate* isolate, Handle<String> string, int flags,
double empty_string_val) {
@@ -1331,7 +1324,7 @@ double StringToDouble(Isolate* isolate, Handle<String> string, int flags,
bool IsSpecialIndex(String string) {
// Max length of canonical double: -X.XXXXXXXXXXXXXXXXX-eXXX
const int kBufferSize = 24;
- const int length = string->length();
+ const int length = string.length();
if (length == 0 || length > kBufferSize) return false;
uint16_t buffer[kBufferSize];
String::WriteToFlat(string, buffer, 0, length);
@@ -1387,3 +1380,5 @@ bool IsSpecialIndex(String string) {
}
} // namespace internal
} // namespace v8
+
+#undef FPCLASSIFY_NAMESPACE
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/numbers/conversions.h
index 7dde36f970..e71f7ce5d0 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/numbers/conversions.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CONVERSIONS_H_
-#define V8_CONVERSIONS_H_
+#ifndef V8_NUMBERS_CONVERSIONS_H_
+#define V8_NUMBERS_CONVERSIONS_H_
#include "src/base/logging.h"
-#include "src/globals.h"
-#include "src/vector.h"
+#include "src/common/globals.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -42,7 +42,6 @@ inline int FastD2I(double x) {
inline unsigned int FastD2UI(double x);
-
inline double FastI2D(int x) {
// There is no rounding involved in converting an integer to a
// double, so this code should compile to a few instructions without
@@ -50,7 +49,6 @@ inline double FastI2D(int x) {
return static_cast<double>(x);
}
-
inline double FastUI2D(unsigned x) {
// There is no rounding involved in converting an unsigned integer to a
// double, so this code should compile to a few instructions without
@@ -58,23 +56,18 @@ inline double FastUI2D(unsigned x) {
return static_cast<double>(x);
}
-
// This function should match the exact semantics of ECMA-262 20.2.2.17.
inline float DoubleToFloat32(double x);
-
// This function should match the exact semantics of ECMA-262 9.4.
inline double DoubleToInteger(double x);
-
// This function should match the exact semantics of ECMA-262 9.5.
inline int32_t DoubleToInt32(double x);
-
// This function should match the exact semantics of ECMA-262 9.6.
inline uint32_t DoubleToUint32(double x);
-
// Enumeration for allowing octals and ignoring junk when converting
// strings to numbers.
enum ConversionFlags {
@@ -86,7 +79,6 @@ enum ConversionFlags {
ALLOW_TRAILING_JUNK = 16
};
-
// Converts a string into a double value according to ECMA-262 9.3.1
double StringToDouble(Vector<const uint8_t> str, int flags,
double empty_string_val = 0);
@@ -177,4 +169,4 @@ V8_EXPORT_PRIVATE bool IsSpecialIndex(String string);
} // namespace internal
} // namespace v8
-#endif // V8_CONVERSIONS_H_
+#endif // V8_NUMBERS_CONVERSIONS_H_
diff --git a/deps/v8/src/diy-fp.cc b/deps/v8/src/numbers/diy-fp.cc
index 44a9bb122e..600670ede3 100644
--- a/deps/v8/src/diy-fp.cc
+++ b/deps/v8/src/numbers/diy-fp.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/diy-fp.h"
+#include "src/numbers/diy-fp.h"
#include <stdint.h>
diff --git a/deps/v8/src/diy-fp.h b/deps/v8/src/numbers/diy-fp.h
index 680a27ee3b..a7f54427f2 100644
--- a/deps/v8/src/diy-fp.h
+++ b/deps/v8/src/numbers/diy-fp.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DIY_FP_H_
-#define V8_DIY_FP_H_
+#ifndef V8_NUMBERS_DIY_FP_H_
+#define V8_NUMBERS_DIY_FP_H_
#include <stdint.h>
@@ -44,7 +44,6 @@ class DiyFp {
return result;
}
-
// this = this * other.
V8_EXPORT_PRIVATE void Multiply(const DiyFp& other);
@@ -97,4 +96,4 @@ class DiyFp {
} // namespace internal
} // namespace v8
-#endif // V8_DIY_FP_H_
+#endif // V8_NUMBERS_DIY_FP_H_
diff --git a/deps/v8/src/double.h b/deps/v8/src/numbers/double.h
index 65964045e2..56883ad067 100644
--- a/deps/v8/src/double.h
+++ b/deps/v8/src/numbers/double.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DOUBLE_H_
-#define V8_DOUBLE_H_
+#ifndef V8_NUMBERS_DOUBLE_H_
+#define V8_NUMBERS_DOUBLE_H_
#include "src/base/macros.h"
-#include "src/diy-fp.h"
+#include "src/numbers/diy-fp.h"
namespace v8 {
namespace internal {
@@ -32,8 +32,7 @@ class Double {
Double() : d64_(0) {}
explicit Double(double d) : d64_(double_to_uint64(d)) {}
explicit Double(uint64_t d64) : d64_(d64) {}
- explicit Double(DiyFp diy_fp)
- : d64_(DiyFpToUint64(diy_fp)) {}
+ explicit Double(DiyFp diy_fp) : d64_(DiyFpToUint64(diy_fp)) {}
// The value encoded by this Double must be greater or equal to +0.0.
// It must not be special (infinity, or NaN).
@@ -61,9 +60,7 @@ class Double {
}
// Returns the double's bit as uint64.
- uint64_t AsUint64() const {
- return d64_;
- }
+ uint64_t AsUint64() const { return d64_; }
// Returns the next greater double. Returns +infinity on input +infinity.
double NextDouble() const {
@@ -114,12 +111,12 @@ class Double {
bool IsInfinite() const {
uint64_t d64 = AsUint64();
return ((d64 & kExponentMask) == kExponentMask) &&
- ((d64 & kSignificandMask) == 0);
+ ((d64 & kSignificandMask) == 0);
}
int Sign() const {
uint64_t d64 = AsUint64();
- return (d64 & kSignMask) == 0? 1: -1;
+ return (d64 & kSignMask) == 0 ? 1 : -1;
}
// Precondition: the value encoded by this Double must be greater or equal
@@ -206,11 +203,11 @@ class Double {
biased_exponent = static_cast<uint64_t>(exponent + kExponentBias);
}
return (significand & kSignificandMask) |
- (biased_exponent << kPhysicalSignificandSize);
+ (biased_exponent << kPhysicalSignificandSize);
}
};
} // namespace internal
} // namespace v8
-#endif // V8_DOUBLE_H_
+#endif // V8_NUMBERS_DOUBLE_H_
diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/numbers/dtoa.cc
index 6d12994e55..34ca651d10 100644
--- a/deps/v8/src/dtoa.cc
+++ b/deps/v8/src/numbers/dtoa.cc
@@ -5,29 +5,31 @@
#include <cmath>
#include "src/base/logging.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
-#include "src/dtoa.h"
+#include "src/numbers/dtoa.h"
-#include "src/bignum-dtoa.h"
-#include "src/double.h"
-#include "src/fast-dtoa.h"
-#include "src/fixed-dtoa.h"
+#include "src/numbers/bignum-dtoa.h"
+#include "src/numbers/double.h"
+#include "src/numbers/fast-dtoa.h"
+#include "src/numbers/fixed-dtoa.h"
namespace v8 {
namespace internal {
static BignumDtoaMode DtoaToBignumDtoaMode(DtoaMode dtoa_mode) {
switch (dtoa_mode) {
- case DTOA_SHORTEST: return BIGNUM_DTOA_SHORTEST;
- case DTOA_FIXED: return BIGNUM_DTOA_FIXED;
- case DTOA_PRECISION: return BIGNUM_DTOA_PRECISION;
+ case DTOA_SHORTEST:
+ return BIGNUM_DTOA_SHORTEST;
+ case DTOA_FIXED:
+ return BIGNUM_DTOA_FIXED;
+ case DTOA_PRECISION:
+ return BIGNUM_DTOA_PRECISION;
default:
UNREACHABLE();
}
}
-
void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
Vector<char> buffer, int* sign, int* length, int* point) {
DCHECK(!Double(v).IsSpecial());
@@ -63,8 +65,8 @@ void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
fast_worked = FastFixedDtoa(v, requested_digits, buffer, length, point);
break;
case DTOA_PRECISION:
- fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits,
- buffer, length, point);
+ fast_worked = FastDtoa(v, FAST_DTOA_PRECISION, requested_digits, buffer,
+ length, point);
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/dtoa.h b/deps/v8/src/numbers/dtoa.h
index 53968561fa..da67c180e8 100644
--- a/deps/v8/src/dtoa.h
+++ b/deps/v8/src/numbers/dtoa.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DTOA_H_
-#define V8_DTOA_H_
+#ifndef V8_NUMBERS_DTOA_H_
+#define V8_NUMBERS_DTOA_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -63,4 +63,4 @@ V8_EXPORT_PRIVATE void DoubleToAscii(double v, DtoaMode mode,
} // namespace internal
} // namespace v8
-#endif // V8_DTOA_H_
+#endif // V8_NUMBERS_DTOA_H_
diff --git a/deps/v8/src/fast-dtoa.cc b/deps/v8/src/numbers/fast-dtoa.cc
index 8c061dae7f..15ff0d476a 100644
--- a/deps/v8/src/fast-dtoa.cc
+++ b/deps/v8/src/numbers/fast-dtoa.cc
@@ -4,13 +4,13 @@
#include <stdint.h>
#include "src/base/logging.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
-#include "src/fast-dtoa.h"
+#include "src/numbers/fast-dtoa.h"
-#include "src/cached-powers.h"
-#include "src/diy-fp.h"
-#include "src/double.h"
+#include "src/numbers/cached-powers.h"
+#include "src/numbers/diy-fp.h"
+#include "src/numbers/double.h"
namespace v8 {
namespace internal {
@@ -24,7 +24,6 @@ namespace internal {
static const int kMinimalTargetExponent = -60;
static const int kMaximalTargetExponent = -32;
-
// Adjusts the last digit of the generated number, and screens out generated
// solutions that may be inaccurate. A solution may be inaccurate if it is
// outside the safe interval, or if we ctannot prove that it is closer to the
@@ -40,13 +39,9 @@ static const int kMaximalTargetExponent = -32;
// Output: returns true if the buffer is guaranteed to contain the closest
// representable number to the input.
// Modifies the generated digits in the buffer to approach (round towards) w.
-static bool RoundWeed(Vector<char> buffer,
- int length,
- uint64_t distance_too_high_w,
- uint64_t unsafe_interval,
- uint64_t rest,
- uint64_t ten_kappa,
- uint64_t unit) {
+static bool RoundWeed(Vector<char> buffer, int length,
+ uint64_t distance_too_high_w, uint64_t unsafe_interval,
+ uint64_t rest, uint64_t ten_kappa, uint64_t unit) {
uint64_t small_distance = distance_too_high_w - unit;
uint64_t big_distance = distance_too_high_w + unit;
// Let w_low = too_high - big_distance, and
@@ -121,9 +116,9 @@ static bool RoundWeed(Vector<char> buffer,
// We need to do the following tests in this order to avoid over- and
// underflows.
DCHECK(rest <= unsafe_interval);
- while (rest < small_distance && // Negated condition 1
+ while (rest < small_distance && // Negated condition 1
unsafe_interval - rest >= ten_kappa && // Negated condition 2
- (rest + ten_kappa < small_distance || // buffer{-1} > w_high
+ (rest + ten_kappa < small_distance || // buffer{-1} > w_high
small_distance - rest >= rest + ten_kappa - small_distance)) {
buffer[length - 1]--;
rest += ten_kappa;
@@ -132,8 +127,7 @@ static bool RoundWeed(Vector<char> buffer,
// We have approached w+ as much as possible. We now test if approaching w-
// would require changing the buffer. If yes, then we have two possible
// representations close to w, but we cannot decide which one is closer.
- if (rest < big_distance &&
- unsafe_interval - rest >= ten_kappa &&
+ if (rest < big_distance && unsafe_interval - rest >= ten_kappa &&
(rest + ten_kappa < big_distance ||
big_distance - rest > rest + ten_kappa - big_distance)) {
return false;
@@ -147,7 +141,6 @@ static bool RoundWeed(Vector<char> buffer,
return (2 * unit <= rest) && (rest <= unsafe_interval - 4 * unit);
}
-
// Rounds the buffer upwards if the result is closer to v by possibly adding
// 1 to the buffer. If the precision of the calculation is not sufficient to
// round correctly, return false.
@@ -160,12 +153,8 @@ static bool RoundWeed(Vector<char> buffer,
// unambiguously determined.
//
// Precondition: rest < ten_kappa.
-static bool RoundWeedCounted(Vector<char> buffer,
- int length,
- uint64_t rest,
- uint64_t ten_kappa,
- uint64_t unit,
- int* kappa) {
+static bool RoundWeedCounted(Vector<char> buffer, int length, uint64_t rest,
+ uint64_t ten_kappa, uint64_t unit, int* kappa) {
DCHECK(rest < ten_kappa);
// The following tests are done in a specific order to avoid overflows. They
// will work correctly with any uint64 values of rest < ten_kappa and unit.
@@ -204,7 +193,6 @@ static bool RoundWeedCounted(Vector<char> buffer,
return false;
}
-
static const uint32_t kTen4 = 10000;
static const uint32_t kTen5 = 100000;
static const uint32_t kTen6 = 1000000;
@@ -217,9 +205,7 @@ static const uint32_t kTen9 = 1000000000;
// If number_bits == 0 then 0^-1 is returned
// The number of bits must be <= 32.
// Precondition: number < (1 << (number_bits + 1)).
-static void BiggestPowerTen(uint32_t number,
- int number_bits,
- uint32_t* power,
+static void BiggestPowerTen(uint32_t number, int number_bits, uint32_t* power,
int* exponent) {
switch (number_bits) {
case 32:
@@ -368,12 +354,8 @@ static void BiggestPowerTen(uint32_t number,
// represent 'w' we can stop. Everything inside the interval low - high
// represents w. However we have to pay attention to low, high and w's
// imprecision.
-static bool DigitGen(DiyFp low,
- DiyFp w,
- DiyFp high,
- Vector<char> buffer,
- int* length,
- int* kappa) {
+static bool DigitGen(DiyFp low, DiyFp w, DiyFp high, Vector<char> buffer,
+ int* length, int* kappa) {
DCHECK(low.e() == w.e() && w.e() == high.e());
DCHECK(low.f() + 1 <= high.f() - 1);
DCHECK(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
@@ -408,8 +390,8 @@ static bool DigitGen(DiyFp low,
uint64_t fractionals = too_high.f() & (one.f() - 1);
uint32_t divisor;
int divisor_exponent;
- BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
- &divisor, &divisor_exponent);
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()), &divisor,
+ &divisor_exponent);
*kappa = divisor_exponent + 1;
*length = 0;
// Loop invariant: buffer = too_high / 10^kappa (integer division)
@@ -464,8 +446,6 @@ static bool DigitGen(DiyFp low,
}
}
-
-
// Generates (at most) requested_digits of input number w.
// w is a floating-point number (DiyFp), consisting of a significand and an
// exponent. Its exponent is bounded by kMinimalTargetExponent and
@@ -494,11 +474,8 @@ static bool DigitGen(DiyFp low,
// numbers. If the precision is not enough to guarantee all the postconditions
// then false is returned. This usually happens rarely, but the failure-rate
// increases with higher requested_digits.
-static bool DigitGenCounted(DiyFp w,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* kappa) {
+static bool DigitGenCounted(DiyFp w, int requested_digits, Vector<char> buffer,
+ int* length, int* kappa) {
DCHECK(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
DCHECK_GE(kMinimalTargetExponent, -60);
DCHECK_LE(kMaximalTargetExponent, -32);
@@ -516,8 +493,8 @@ static bool DigitGenCounted(DiyFp w,
uint64_t fractionals = w.f() & (one.f() - 1);
uint32_t divisor;
int divisor_exponent;
- BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()),
- &divisor, &divisor_exponent);
+ BiggestPowerTen(integrals, DiyFp::kSignificandSize - (-one.e()), &divisor,
+ &divisor_exponent);
*kappa = divisor_exponent + 1;
*length = 0;
@@ -571,7 +548,6 @@ static bool DigitGenCounted(DiyFp w,
kappa);
}
-
// Provides a decimal representation of v.
// Returns true if it succeeds, otherwise the result cannot be trusted.
// There will be *length digits inside the buffer (not null-terminated).
@@ -583,9 +559,7 @@ static bool DigitGenCounted(DiyFp w,
// The last digit will be closest to the actual v. That is, even if several
// digits might correctly yield 'v' when read again, the closest will be
// computed.
-static bool Grisu3(double v,
- Vector<char> buffer,
- int* length,
+static bool Grisu3(double v, Vector<char> buffer, int* length,
int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
// boundary_minus and boundary_plus are the boundaries between v and its
@@ -598,17 +572,16 @@ static bool Grisu3(double v,
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
int ten_mk_minimal_binary_exponent =
- kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
int ten_mk_maximal_binary_exponent =
- kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
- ten_mk_minimal_binary_exponent,
- ten_mk_maximal_binary_exponent,
- &ten_mk, &mk);
- DCHECK((kMinimalTargetExponent <= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize) &&
- (kMaximalTargetExponent >= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize));
+ ten_mk_minimal_binary_exponent, ten_mk_maximal_binary_exponent, &ten_mk,
+ &mk);
+ DCHECK(
+ (kMinimalTargetExponent <=
+ w.e() + ten_mk.e() + DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() + DiyFp::kSignificandSize));
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
@@ -627,7 +600,7 @@ static bool Grisu3(double v,
// scaled_w. However the code becomes much less readable and the speed
// enhancements are not terriffic.
DiyFp scaled_boundary_minus = DiyFp::Times(boundary_minus, ten_mk);
- DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
+ DiyFp scaled_boundary_plus = DiyFp::Times(boundary_plus, ten_mk);
// DigitGen will generate the digits of scaled_w. Therefore we have
// v == (double) (scaled_w * 10^-mk).
@@ -642,32 +615,27 @@ static bool Grisu3(double v,
return result;
}
-
// The "counted" version of grisu3 (see above) only generates requested_digits
// number of digits. This version does not generate the shortest representation,
// and with enough requested digits 0.1 will at some point print as 0.9999999...
// Grisu3 is too imprecise for real halfway cases (1.5 will not work) and
// therefore the rounding strategy for halfway cases is irrelevant.
-static bool Grisu3Counted(double v,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* decimal_exponent) {
+static bool Grisu3Counted(double v, int requested_digits, Vector<char> buffer,
+ int* length, int* decimal_exponent) {
DiyFp w = Double(v).AsNormalizedDiyFp();
DiyFp ten_mk; // Cached power of ten: 10^-k
int mk; // -k
int ten_mk_minimal_binary_exponent =
- kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ kMinimalTargetExponent - (w.e() + DiyFp::kSignificandSize);
int ten_mk_maximal_binary_exponent =
- kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
+ kMaximalTargetExponent - (w.e() + DiyFp::kSignificandSize);
PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
- ten_mk_minimal_binary_exponent,
- ten_mk_maximal_binary_exponent,
- &ten_mk, &mk);
- DCHECK((kMinimalTargetExponent <= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize) &&
- (kMaximalTargetExponent >= w.e() + ten_mk.e() +
- DiyFp::kSignificandSize));
+ ten_mk_minimal_binary_exponent, ten_mk_maximal_binary_exponent, &ten_mk,
+ &mk);
+ DCHECK(
+ (kMinimalTargetExponent <=
+ w.e() + ten_mk.e() + DiyFp::kSignificandSize) &&
+ (kMaximalTargetExponent >= w.e() + ten_mk.e() + DiyFp::kSignificandSize));
// Note that ten_mk is only an approximation of 10^-k. A DiyFp only contains a
// 64 bit significand and ten_mk is thus only precise up to 64 bits.
@@ -685,19 +653,14 @@ static bool Grisu3Counted(double v,
// will not always be exactly the same since DigitGenCounted only produces a
// limited number of digits.)
int kappa;
- bool result = DigitGenCounted(scaled_w, requested_digits,
- buffer, length, &kappa);
+ bool result =
+ DigitGenCounted(scaled_w, requested_digits, buffer, length, &kappa);
*decimal_exponent = -mk + kappa;
return result;
}
-
-bool FastDtoa(double v,
- FastDtoaMode mode,
- int requested_digits,
- Vector<char> buffer,
- int* length,
- int* decimal_point) {
+bool FastDtoa(double v, FastDtoaMode mode, int requested_digits,
+ Vector<char> buffer, int* length, int* decimal_point) {
DCHECK_GT(v, 0);
DCHECK(!Double(v).IsSpecial());
@@ -708,8 +671,8 @@ bool FastDtoa(double v,
result = Grisu3(v, buffer, length, &decimal_exponent);
break;
case FAST_DTOA_PRECISION:
- result = Grisu3Counted(v, requested_digits,
- buffer, length, &decimal_exponent);
+ result =
+ Grisu3Counted(v, requested_digits, buffer, length, &decimal_exponent);
break;
default:
UNREACHABLE();
diff --git a/deps/v8/src/fast-dtoa.h b/deps/v8/src/numbers/fast-dtoa.h
index 7a02505ed3..4b4fb2fcc2 100644
--- a/deps/v8/src/fast-dtoa.h
+++ b/deps/v8/src/numbers/fast-dtoa.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FAST_DTOA_H_
-#define V8_FAST_DTOA_H_
+#ifndef V8_NUMBERS_FAST_DTOA_H_
+#define V8_NUMBERS_FAST_DTOA_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -57,4 +57,4 @@ V8_EXPORT_PRIVATE bool FastDtoa(double d, FastDtoaMode mode,
} // namespace internal
} // namespace v8
-#endif // V8_FAST_DTOA_H_
+#endif // V8_NUMBERS_FAST_DTOA_H_
diff --git a/deps/v8/src/fixed-dtoa.cc b/deps/v8/src/numbers/fixed-dtoa.cc
index b360ffb85c..29ec68190d 100644
--- a/deps/v8/src/fixed-dtoa.cc
+++ b/deps/v8/src/numbers/fixed-dtoa.cc
@@ -7,10 +7,10 @@
#include <cmath>
#include "src/base/logging.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
-#include "src/double.h"
-#include "src/fixed-dtoa.h"
+#include "src/numbers/double.h"
+#include "src/numbers/fixed-dtoa.h"
namespace v8 {
namespace internal {
@@ -19,8 +19,8 @@ namespace internal {
// platforms that support 128bit integers.
class UInt128 {
public:
- UInt128() : high_bits_(0), low_bits_(0) { }
- UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) { }
+ UInt128() : high_bits_(0), low_bits_(0) {}
+ UInt128(uint64_t high, uint64_t low) : high_bits_(high), low_bits_(low) {}
void Multiply(uint32_t multiplicand) {
uint64_t accumulator;
@@ -77,9 +77,7 @@ class UInt128 {
}
}
- bool IsZero() const {
- return high_bits_ == 0 && low_bits_ == 0;
- }
+ bool IsZero() const { return high_bits_ == 0 && low_bits_ == 0; }
int BitAt(int position) {
if (position >= 64) {
@@ -96,10 +94,8 @@ class UInt128 {
uint64_t low_bits_;
};
-
static const int kDoubleSignificandSize = 53; // Includes the hidden bit.
-
static void FillDigits32FixedLength(uint32_t number, int requested_length,
Vector<char> buffer, int* length) {
for (int i = requested_length - 1; i >= 0; --i) {
@@ -109,7 +105,6 @@ static void FillDigits32FixedLength(uint32_t number, int requested_length,
*length += requested_length;
}
-
static void FillDigits32(uint32_t number, Vector<char> buffer, int* length) {
int number_length = 0;
// We fill the digits in reverse order and exchange them afterwards.
@@ -132,7 +127,6 @@ static void FillDigits32(uint32_t number, Vector<char> buffer, int* length) {
*length += number_length;
}
-
static void FillDigits64FixedLength(uint64_t number, int requested_length,
Vector<char> buffer, int* length) {
const uint32_t kTen7 = 10000000;
@@ -147,7 +141,6 @@ static void FillDigits64FixedLength(uint64_t number, int requested_length,
FillDigits32FixedLength(part2, 7, buffer, length);
}
-
static void FillDigits64(uint64_t number, Vector<char> buffer, int* length) {
const uint32_t kTen7 = 10000000;
// For efficiency cut the number into 3 uint32_t parts, and print those.
@@ -197,7 +190,6 @@ static void DtoaRoundUp(Vector<char> buffer, int* length, int* decimal_point) {
}
}
-
// The given fractionals number represents a fixed-point number with binary
// point at bit (-exponent).
// Preconditions:
@@ -265,7 +257,6 @@ static void FillFractionals(uint64_t fractionals, int exponent,
}
}
-
// Removes leading and trailing zeros.
// If leading zeros are removed then the decimal point position is adjusted.
static void TrimZeros(Vector<char> buffer, int* length, int* decimal_point) {
@@ -285,12 +276,8 @@ static void TrimZeros(Vector<char> buffer, int* length, int* decimal_point) {
}
}
-
-bool FastFixedDtoa(double v,
- int fractional_count,
- Vector<char> buffer,
- int* length,
- int* decimal_point) {
+bool FastFixedDtoa(double v, int fractional_count, Vector<char> buffer,
+ int* length, int* decimal_point) {
const uint32_t kMaxUInt32 = 0xFFFFFFFF;
uint64_t significand = Double(v).Significand();
int exponent = Double(v).Exponent();
@@ -357,8 +344,8 @@ bool FastFixedDtoa(double v,
FillDigits32(static_cast<uint32_t>(integrals), buffer, length);
}
*decimal_point = *length;
- FillFractionals(fractionals, exponent, fractional_count,
- buffer, length, decimal_point);
+ FillFractionals(fractionals, exponent, fractional_count, buffer, length,
+ decimal_point);
} else if (exponent < -128) {
// This configuration (with at most 20 digits) means that all digits must be
// 0.
@@ -368,8 +355,8 @@ bool FastFixedDtoa(double v,
*decimal_point = -fractional_count;
} else {
*decimal_point = 0;
- FillFractionals(significand, exponent, fractional_count,
- buffer, length, decimal_point);
+ FillFractionals(significand, exponent, fractional_count, buffer, length,
+ decimal_point);
}
TrimZeros(buffer, length, decimal_point);
buffer[*length] = '\0';
diff --git a/deps/v8/src/fixed-dtoa.h b/deps/v8/src/numbers/fixed-dtoa.h
index 44915d2cce..d38dbe13be 100644
--- a/deps/v8/src/fixed-dtoa.h
+++ b/deps/v8/src/numbers/fixed-dtoa.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FIXED_DTOA_H_
-#define V8_FIXED_DTOA_H_
+#ifndef V8_NUMBERS_FIXED_DTOA_H_
+#define V8_NUMBERS_FIXED_DTOA_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -33,4 +33,4 @@ V8_EXPORT_PRIVATE bool FastFixedDtoa(double v, int fractional_count,
} // namespace internal
} // namespace v8
-#endif // V8_FIXED_DTOA_H_
+#endif // V8_NUMBERS_FIXED_DTOA_H_
diff --git a/deps/v8/src/hash-seed-inl.h b/deps/v8/src/numbers/hash-seed-inl.h
index 575da0c9fd..dad8db01c7 100644
--- a/deps/v8/src/hash-seed-inl.h
+++ b/deps/v8/src/numbers/hash-seed-inl.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HASH_SEED_INL_H_
-#define V8_HASH_SEED_INL_H_
+#ifndef V8_NUMBERS_HASH_SEED_INL_H_
+#define V8_NUMBERS_HASH_SEED_INL_H_
#include <stdint.h>
@@ -28,7 +28,7 @@ inline uint64_t HashSeed(ReadOnlyRoots roots);
// See comment above for why this isn't at the top of the file.
#include "src/objects/fixed-array-inl.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
@@ -39,7 +39,7 @@ inline uint64_t HashSeed(Isolate* isolate) {
inline uint64_t HashSeed(ReadOnlyRoots roots) {
uint64_t seed;
- roots.hash_seed()->copy_out(0, reinterpret_cast<byte*>(&seed), kInt64Size);
+ roots.hash_seed().copy_out(0, reinterpret_cast<byte*>(&seed), kInt64Size);
DCHECK(FLAG_randomize_hashes || seed == 0);
return seed;
}
@@ -47,4 +47,4 @@ inline uint64_t HashSeed(ReadOnlyRoots roots) {
} // namespace internal
} // namespace v8
-#endif // V8_HASH_SEED_INL_H_
+#endif // V8_NUMBERS_HASH_SEED_INL_H_
diff --git a/deps/v8/src/math-random.cc b/deps/v8/src/numbers/math-random.cc
index d27d88e560..dee18788a7 100644
--- a/deps/v8/src/math-random.cc
+++ b/deps/v8/src/numbers/math-random.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/math-random.h"
+#include "src/numbers/math-random.h"
-#include "src/assert-scope.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/contexts-inl.h"
-#include "src/isolate.h"
+#include "src/common/assert-scope.h"
+#include "src/execution/isolate.h"
+#include "src/objects/contexts-inl.h"
#include "src/objects/fixed-array.h"
#include "src/objects/smi.h"
@@ -28,17 +28,17 @@ void MathRandom::InitializeContext(Isolate* isolate,
}
void MathRandom::ResetContext(Context native_context) {
- native_context->set_math_random_index(Smi::zero());
+ native_context.set_math_random_index(Smi::zero());
State state = {0, 0};
- PodArray<State>::cast(native_context->math_random_state())->set(0, state);
+ PodArray<State>::cast(native_context.math_random_state()).set(0, state);
}
Address MathRandom::RefillCache(Isolate* isolate, Address raw_native_context) {
Context native_context = Context::cast(Object(raw_native_context));
DisallowHeapAllocation no_gc;
PodArray<State> pod =
- PodArray<State>::cast(native_context->math_random_state());
- State state = pod->get(0);
+ PodArray<State>::cast(native_context.math_random_state());
+ State state = pod.get(0);
// Initialize state if not yet initialized. If a fixed random seed was
// requested, use it to reset our state the first time a script asks for
// random numbers in this context. This ensures the script sees a consistent
@@ -56,17 +56,17 @@ Address MathRandom::RefillCache(Isolate* isolate, Address raw_native_context) {
}
FixedDoubleArray cache =
- FixedDoubleArray::cast(native_context->math_random_cache());
+ FixedDoubleArray::cast(native_context.math_random_cache());
// Create random numbers.
for (int i = 0; i < kCacheSize; i++) {
// Generate random numbers using xorshift128+.
base::RandomNumberGenerator::XorShift128(&state.s0, &state.s1);
- cache->set(i, base::RandomNumberGenerator::ToDouble(state.s0));
+ cache.set(i, base::RandomNumberGenerator::ToDouble(state.s0));
}
- pod->set(0, state);
+ pod.set(0, state);
Smi new_index = Smi::FromInt(kCacheSize);
- native_context->set_math_random_index(new_index);
+ native_context.set_math_random_index(new_index);
return new_index.ptr();
}
diff --git a/deps/v8/src/math-random.h b/deps/v8/src/numbers/math-random.h
index 481a245750..c321b82ba2 100644
--- a/deps/v8/src/math-random.h
+++ b/deps/v8/src/numbers/math-random.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MATH_RANDOM_H_
-#define V8_MATH_RANDOM_H_
+#ifndef V8_NUMBERS_MATH_RANDOM_H_
+#define V8_NUMBERS_MATH_RANDOM_H_
-#include "src/contexts.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/objects/contexts.h"
namespace v8 {
namespace internal {
@@ -32,4 +32,4 @@ class MathRandom : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_MATH_RANDOM_H_
+#endif // V8_NUMBERS_MATH_RANDOM_H_
diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/numbers/strtod.cc
index 8d42b4c202..dfc518cc7b 100644
--- a/deps/v8/src/strtod.cc
+++ b/deps/v8/src/numbers/strtod.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/strtod.h"
+#include "src/numbers/strtod.h"
#include <stdarg.h>
#include <cmath>
-#include "src/bignum.h"
-#include "src/cached-powers.h"
-#include "src/double.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/common/globals.h"
+#include "src/numbers/bignum.h"
+#include "src/numbers/cached-powers.h"
+#include "src/numbers/double.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -76,22 +76,19 @@ static Vector<const char> TrimLeadingZeros(Vector<const char> buffer) {
return buffer.SubVector(i, buffer.length());
}
}
- return Vector<const char>(buffer.start(), 0);
+ return Vector<const char>(buffer.begin(), 0);
}
-
static Vector<const char> TrimTrailingZeros(Vector<const char> buffer) {
for (int i = buffer.length() - 1; i >= 0; --i) {
if (buffer[i] != '0') {
return buffer.SubVector(0, i + 1);
}
}
- return Vector<const char>(buffer.start(), 0);
+ return Vector<const char>(buffer.begin(), 0);
}
-
-static void TrimToMaxSignificantDigits(Vector<const char> buffer,
- int exponent,
+static void TrimToMaxSignificantDigits(Vector<const char> buffer, int exponent,
char* significant_buffer,
int* significant_exponent) {
for (int i = 0; i < kMaxSignificantDecimalDigits - 1; ++i) {
@@ -107,7 +104,6 @@ static void TrimToMaxSignificantDigits(Vector<const char> buffer,
exponent + (buffer.length() - kMaxSignificantDecimalDigits);
}
-
// Reads digits from the buffer and converts them to a uint64.
// Reads in as many digits as fit into a uint64.
// When the string starts with "1844674407370955161" no further digit is read.
@@ -126,13 +122,11 @@ static uint64_t ReadUint64(Vector<const char> buffer,
return result;
}
-
// Reads a DiyFp from the buffer.
// The returned DiyFp is not necessarily normalized.
// If remaining_decimals is zero then the returned DiyFp is accurate.
// Otherwise it has been rounded and has error of at most 1/2 ulp.
-static void ReadDiyFp(Vector<const char> buffer,
- DiyFp* result,
+static void ReadDiyFp(Vector<const char> buffer, DiyFp* result,
int* remaining_decimals) {
int read_digits;
uint64_t significand = ReadUint64(buffer, &read_digits);
@@ -151,9 +145,7 @@ static void ReadDiyFp(Vector<const char> buffer,
}
}
-
-static bool DoubleStrtod(Vector<const char> trimmed,
- int exponent,
+static bool DoubleStrtod(Vector<const char> trimmed, int exponent,
double* result) {
#if (V8_TARGET_ARCH_IA32 || defined(USE_SIMULATOR)) && !defined(_MSC_VER)
// On x86 the floating-point stack can be 64 or 80 bits wide. If it is
@@ -208,7 +200,6 @@ static bool DoubleStrtod(Vector<const char> trimmed,
#endif
}
-
// Returns 10^exponent as an exact DiyFp.
// The given exponent must be in the range [1; kDecimalExponentDistance[.
static DiyFp AdjustmentPowerOfTen(int exponent) {
@@ -237,12 +228,10 @@ static DiyFp AdjustmentPowerOfTen(int exponent) {
}
}
-
// If the function returns true then the result is the correct double.
// Otherwise it is either the correct double or the double that is just below
// the correct double.
-static bool DiyFpStrtod(Vector<const char> buffer,
- int exponent,
+static bool DiyFpStrtod(Vector<const char> buffer, int exponent,
double* result) {
DiyFp input;
int remaining_decimals;
@@ -269,8 +258,7 @@ static bool DiyFpStrtod(Vector<const char> buffer,
}
DiyFp cached_power;
int cached_decimal_exponent;
- PowersOfTenCache::GetCachedPowerForDecimalExponent(exponent,
- &cached_power,
+ PowersOfTenCache::GetCachedPowerForDecimalExponent(exponent, &cached_power,
&cached_decimal_exponent);
if (cached_decimal_exponent != exponent) {
@@ -313,7 +301,7 @@ static bool DiyFpStrtod(Vector<const char> buffer,
// half-way multiplied by the denominator exceeds the range of an uint64.
// Simply shift everything to the right.
int shift_amount = (precision_digits_count + kDenominatorLog) -
- DiyFp::kSignificandSize + 1;
+ DiyFp::kSignificandSize + 1;
input.set_f(input.f() >> shift_amount);
input.set_e(input.e() + shift_amount);
// We add 1 for the lost precision of error, and kDenominator for
@@ -350,7 +338,6 @@ static bool DiyFpStrtod(Vector<const char> buffer,
}
}
-
// Returns the correct double for the buffer*10^exponent.
// The variable guess should be a close guess that is either the correct double
// or its lower neighbor (the nearest double less than the correct one).
@@ -358,8 +345,7 @@ static bool DiyFpStrtod(Vector<const char> buffer,
// buffer.length() + exponent <= kMaxDecimalPower + 1
// buffer.length() + exponent > kMinDecimalPower
// buffer.length() <= kMaxDecimalSignificantDigits
-static double BignumStrtod(Vector<const char> buffer,
- int exponent,
+static double BignumStrtod(Vector<const char> buffer, int exponent,
double guess) {
if (guess == V8_INFINITY) {
return guess;
@@ -402,7 +388,6 @@ static double BignumStrtod(Vector<const char> buffer,
}
}
-
double Strtod(Vector<const char> buffer, int exponent) {
Vector<const char> left_trimmed = TrimLeadingZeros(buffer);
Vector<const char> trimmed = TrimTrailingZeros(left_trimmed);
@@ -411,11 +396,11 @@ double Strtod(Vector<const char> buffer, int exponent) {
if (trimmed.length() > kMaxSignificantDecimalDigits) {
char significant_buffer[kMaxSignificantDecimalDigits];
int significant_exponent;
- TrimToMaxSignificantDigits(trimmed, exponent,
- significant_buffer, &significant_exponent);
- return Strtod(Vector<const char>(significant_buffer,
- kMaxSignificantDecimalDigits),
- significant_exponent);
+ TrimToMaxSignificantDigits(trimmed, exponent, significant_buffer,
+ &significant_exponent);
+ return Strtod(
+ Vector<const char>(significant_buffer, kMaxSignificantDecimalDigits),
+ significant_exponent);
}
if (exponent + trimmed.length() - 1 >= kMaxDecimalPower) return V8_INFINITY;
if (exponent + trimmed.length() <= kMinDecimalPower) return 0.0;
diff --git a/deps/v8/src/strtod.h b/deps/v8/src/numbers/strtod.h
index 09b6d06c41..f0978159ba 100644
--- a/deps/v8/src/strtod.h
+++ b/deps/v8/src/numbers/strtod.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STRTOD_H_
-#define V8_STRTOD_H_
+#ifndef V8_NUMBERS_STRTOD_H_
+#define V8_NUMBERS_STRTOD_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -17,4 +17,4 @@ V8_EXPORT_PRIVATE double Strtod(Vector<const char> buffer, int exponent);
} // namespace internal
} // namespace v8
-#endif // V8_STRTOD_H_
+#endif // V8_NUMBERS_STRTOD_H_
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index ac0a16c944..aaf0105e51 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -46,7 +46,7 @@ void AllocationSite::set_boilerplate(JSObject object, WriteBarrierMode mode) {
int AllocationSite::transition_info() const {
DCHECK(!PointsToLiteral());
- return Smi::cast(transition_info_or_boilerplate())->value();
+ return Smi::cast(transition_info_or_boilerplate()).value();
}
void AllocationSite::set_transition_info(int value) {
@@ -105,9 +105,9 @@ void AllocationSite::SetDoNotInlineCall() {
bool AllocationSite::PointsToLiteral() const {
Object raw_value = transition_info_or_boilerplate();
- DCHECK_EQ(!raw_value->IsSmi(),
- raw_value->IsJSArray() || raw_value->IsJSObject());
- return !raw_value->IsSmi();
+ DCHECK_EQ(!raw_value.IsSmi(),
+ raw_value.IsJSArray() || raw_value.IsJSObject());
+ return !raw_value.IsSmi();
}
// Heuristic: We only need to create allocation site info if the boilerplate
@@ -181,8 +181,8 @@ inline void AllocationSite::IncrementMementoCreateCount() {
}
bool AllocationMemento::IsValid() const {
- return allocation_site()->IsAllocationSite() &&
- !AllocationSite::cast(allocation_site())->IsZombie();
+ return allocation_site().IsAllocationSite() &&
+ !AllocationSite::cast(allocation_site()).IsZombie();
}
AllocationSite AllocationMemento::GetAllocationSite() const {
@@ -191,7 +191,7 @@ AllocationSite AllocationMemento::GetAllocationSite() const {
}
Address AllocationMemento::GetAllocationSiteUnchecked() const {
- return allocation_site()->ptr();
+ return allocation_site().ptr();
}
template <AllocationSiteUpdateMode update_or_check>
@@ -200,7 +200,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
Isolate* isolate = site->GetIsolate();
bool result = false;
- if (site->PointsToLiteral() && site->boilerplate()->IsJSArray()) {
+ if (site->PointsToLiteral() && site->boilerplate().IsJSArray()) {
Handle<JSArray> boilerplate(JSArray::cast(site->boilerplate()), isolate);
ElementsKind kind = boilerplate->GetElementsKind();
// if kind is holey ensure that to_kind is as well.
@@ -211,7 +211,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
// If the array is huge, it's not likely to be defined in a local
// function, so we shouldn't make new instances of it very often.
uint32_t length = 0;
- CHECK(boilerplate->length()->ToArrayLength(&length));
+ CHECK(boilerplate->length().ToArrayLength(&length));
if (length <= kMaximumArrayBytesToPretransition) {
if (update_or_check == AllocationSiteUpdateMode::kCheckOnly) {
return true;
@@ -224,7 +224,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
ElementsKindToString(to_kind));
}
JSObject::TransitionElementsKind(boilerplate, to_kind);
- site->dependent_code()->DeoptimizeDependentCodeGroup(
+ site->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
result = true;
}
@@ -244,7 +244,7 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
ElementsKindToString(to_kind));
}
site->SetElementsKind(to_kind);
- site->dependent_code()->DeoptimizeDependentCodeGroup(
+ site->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kAllocationSiteTransitionChangedGroup);
result = true;
}
diff --git a/deps/v8/src/allocation-site-scopes-inl.h b/deps/v8/src/objects/allocation-site-scopes-inl.h
index 6500e9efdc..350b243e46 100644
--- a/deps/v8/src/allocation-site-scopes-inl.h
+++ b/deps/v8/src/objects/allocation-site-scopes-inl.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ALLOCATION_SITE_SCOPES_INL_H_
-#define V8_ALLOCATION_SITE_SCOPES_INL_H_
+#ifndef V8_OBJECTS_ALLOCATION_SITE_SCOPES_INL_H_
+#define V8_OBJECTS_ALLOCATION_SITE_SCOPES_INL_H_
-#include "src/allocation-site-scopes.h"
+#include "src/objects/allocation-site-scopes.h"
#include "src/objects/allocation-site-inl.h"
@@ -39,7 +39,7 @@ void AllocationSiteUsageContext::ExitScope(Handle<AllocationSite> scope_site,
}
bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
- if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
+ if (activated_ && AllocationSite::CanTrack(object->map().instance_type())) {
if (FLAG_allocation_site_pretenuring ||
AllocationSite::ShouldTrack(object->GetElementsKind())) {
if (FLAG_trace_creation_allocation_sites) {
@@ -56,4 +56,4 @@ bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
} // namespace internal
} // namespace v8
-#endif // V8_ALLOCATION_SITE_SCOPES_INL_H_
+#endif // V8_OBJECTS_ALLOCATION_SITE_SCOPES_INL_H_
diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/objects/allocation-site-scopes.h
index b6bc6448fb..8f5fb42986 100644
--- a/deps/v8/src/allocation-site-scopes.h
+++ b/deps/v8/src/objects/allocation-site-scopes.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ALLOCATION_SITE_SCOPES_H_
-#define V8_ALLOCATION_SITE_SCOPES_H_
+#ifndef V8_OBJECTS_ALLOCATION_SITE_SCOPES_H_
+#define V8_OBJECTS_ALLOCATION_SITE_SCOPES_H_
-#include "src/handles.h"
-#include "src/objects.h"
+#include "src/handles/handles.h"
#include "src/objects/allocation-site.h"
#include "src/objects/map.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -17,9 +17,7 @@ namespace internal {
// boilerplate with AllocationSite and AllocationMemento support.
class AllocationSiteContext {
public:
- explicit AllocationSiteContext(Isolate* isolate) {
- isolate_ = isolate;
- }
+ explicit AllocationSiteContext(Isolate* isolate) { isolate_ = isolate; }
Handle<AllocationSite> top() { return top_; }
Handle<AllocationSite> current() { return current_; }
@@ -30,7 +28,7 @@ class AllocationSiteContext {
protected:
void update_current_site(AllocationSite site) {
- *(current_.location()) = site->ptr();
+ *(current_.location()) = site.ptr();
}
inline void InitializeTraversal(Handle<AllocationSite> site);
@@ -41,7 +39,6 @@ class AllocationSiteContext {
Handle<AllocationSite> current_;
};
-
// AllocationSiteUsageContext aids in the creation of AllocationMementos placed
// behind some/all components of a copied object literal.
class AllocationSiteUsageContext : public AllocationSiteContext {
@@ -50,7 +47,7 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
bool activated)
: AllocationSiteContext(isolate),
top_site_(site),
- activated_(activated) { }
+ activated_(activated) {}
inline Handle<AllocationSite> EnterNewScope();
@@ -66,8 +63,7 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
bool activated_;
};
-
} // namespace internal
} // namespace v8
-#endif // V8_ALLOCATION_SITE_SCOPES_H_
+#endif // V8_OBJECTS_ALLOCATION_SITE_SCOPES_H_
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index b221bd02dd..9289a83f70 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_ALLOCATION_SITE_H_
#define V8_OBJECTS_ALLOCATION_SITE_H_
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
@@ -134,25 +134,25 @@ class AllocationSite : public Struct {
static bool ShouldTrack(ElementsKind from, ElementsKind to);
static inline bool CanTrack(InstanceType type);
-// Layout description.
-// AllocationSite has to start with TransitionInfoOrboilerPlateOffset
-// and end with WeakNext field.
-#define ALLOCATION_SITE_FIELDS(V) \
- V(kStartOffset, 0) \
- V(kTransitionInfoOrBoilerplateOffset, kTaggedSize) \
- V(kNestedSiteOffset, kTaggedSize) \
- V(kDependentCodeOffset, kTaggedSize) \
- V(kCommonPointerFieldEndOffset, 0) \
- V(kPretenureDataOffset, kInt32Size) \
- V(kPretenureCreateCountOffset, kInt32Size) \
- /* Size of AllocationSite without WeakNext field */ \
- V(kSizeWithoutWeakNext, 0) \
- V(kWeakNextOffset, kTaggedSize) \
- /* Size of AllocationSite with WeakNext field */ \
- V(kSizeWithWeakNext, 0)
+ // Layout description.
+ // AllocationSite has to start with TransitionInfoOrboilerPlateOffset
+ // and end with WeakNext field.
+ #define ALLOCATION_SITE_FIELDS(V) \
+ V(kStartOffset, 0) \
+ V(kTransitionInfoOrBoilerplateOffset, kTaggedSize) \
+ V(kNestedSiteOffset, kTaggedSize) \
+ V(kDependentCodeOffset, kTaggedSize) \
+ V(kCommonPointerFieldEndOffset, 0) \
+ V(kPretenureDataOffset, kInt32Size) \
+ V(kPretenureCreateCountOffset, kInt32Size) \
+ /* Size of AllocationSite without WeakNext field */ \
+ V(kSizeWithoutWeakNext, 0) \
+ V(kWeakNextOffset, kTaggedSize) \
+ /* Size of AllocationSite with WeakNext field */ \
+ V(kSizeWithWeakNext, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ALLOCATION_SITE_FIELDS)
-#undef ALLOCATION_SITE_FIELDS
+ #undef ALLOCATION_SITE_FIELDS
class BodyDescriptor;
@@ -164,14 +164,9 @@ class AllocationSite : public Struct {
class AllocationMemento : public Struct {
public:
-// Layout description.
-#define ALLOCATION_MEMENTO_FIELDS(V) \
- V(kAllocationSiteOffset, kTaggedSize) \
- V(kSize, 0)
-
+ // Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- ALLOCATION_MEMENTO_FIELDS)
-#undef ALLOCATION_MEMENTO_FIELDS
+ TORQUE_GENERATED_ALLOCATION_MEMENTO_FIELDS)
DECL_ACCESSORS(allocation_site, Object)
diff --git a/deps/v8/src/objects/api-callbacks-inl.h b/deps/v8/src/objects/api-callbacks-inl.h
index 7cd08f7052..041247637a 100644
--- a/deps/v8/src/objects/api-callbacks-inl.h
+++ b/deps/v8/src/objects/api-callbacks-inl.h
@@ -46,7 +46,7 @@ bool AccessorInfo::has_getter() {
bool result = getter() != Smi::kZero;
DCHECK_EQ(result,
getter() != Smi::kZero &&
- Foreign::cast(getter())->foreign_address() != kNullAddress);
+ Foreign::cast(getter()).foreign_address() != kNullAddress);
return result;
}
@@ -54,7 +54,7 @@ bool AccessorInfo::has_setter() {
bool result = setter() != Smi::kZero;
DCHECK_EQ(result,
setter() != Smi::kZero &&
- Foreign::cast(setter())->foreign_address() != kNullAddress);
+ Foreign::cast(setter()).foreign_address() != kNullAddress);
return result;
}
@@ -88,13 +88,13 @@ BIT_FIELD_ACCESSORS(AccessorInfo, flags, initial_property_attributes,
bool AccessorInfo::IsCompatibleReceiver(Object receiver) {
if (!HasExpectedReceiverType()) return true;
- if (!receiver->IsJSObject()) return false;
+ if (!receiver.IsJSObject()) return false;
return FunctionTemplateInfo::cast(expected_receiver_type())
- ->IsTemplateFor(JSObject::cast(receiver)->map());
+ .IsTemplateFor(JSObject::cast(receiver).map());
}
bool AccessorInfo::HasExpectedReceiverType() {
- return expected_receiver_type()->IsFunctionTemplateInfo();
+ return expected_receiver_type().IsFunctionTemplateInfo();
}
ACCESSORS(AccessCheckInfo, callback, Object, kCallbackOffset)
diff --git a/deps/v8/src/objects/arguments-inl.h b/deps/v8/src/objects/arguments-inl.h
index 4132aec04d..c2ef59a896 100644
--- a/deps/v8/src/objects/arguments-inl.h
+++ b/deps/v8/src/objects/arguments-inl.h
@@ -7,10 +7,10 @@
#include "src/objects/arguments.h"
-#include "src/contexts-inl.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/objects/contexts-inl.h"
#include "src/objects/fixed-array-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -62,23 +62,23 @@ bool JSSloppyArgumentsObject::GetSloppyArgumentsLength(Isolate* isolate,
int* out) {
Context context = *isolate->native_context();
Map map = object->map();
- if (map != context->sloppy_arguments_map() &&
- map != context->strict_arguments_map() &&
- map != context->fast_aliased_arguments_map()) {
+ if (map != context.sloppy_arguments_map() &&
+ map != context.strict_arguments_map() &&
+ map != context.fast_aliased_arguments_map()) {
return false;
}
DCHECK(object->HasFastElements() || object->HasFastArgumentsElements());
Object len_obj =
object->InObjectPropertyAt(JSArgumentsObjectWithLength::kLengthIndex);
- if (!len_obj->IsSmi()) return false;
+ if (!len_obj.IsSmi()) return false;
*out = Max(0, Smi::ToInt(len_obj));
FixedArray parameters = FixedArray::cast(object->elements());
if (object->HasSloppyArgumentsElements()) {
- FixedArray arguments = FixedArray::cast(parameters->get(1));
- return *out <= arguments->length();
+ FixedArray arguments = FixedArray::cast(parameters.get(1));
+ return *out <= arguments.length();
}
- return *out <= parameters->length();
+ return *out <= parameters.length();
}
} // namespace internal
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 720820268c..a1d39f1f36 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -8,7 +8,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/struct.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 7b67aa3ffb..92b78f8821 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -19,14 +19,14 @@
#include "src/objects/bigint.h"
-#include "src/conversions.h"
-#include "src/double.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
+#include "src/numbers/double.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/instance-type-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
namespace v8 {
@@ -202,11 +202,11 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
}
inline void initialize_bitfield(bool sign, int length) {
int32_t bitfield = LengthBits::encode(length) | SignBits::encode(sign);
- WRITE_INT32_FIELD(*this, kBitfieldOffset, bitfield);
+ WriteField<int32_t>(kBitfieldOffset, bitfield);
}
inline void set_digit(int n, digit_t value) {
SLOW_DCHECK(0 <= n && n < length());
- WRITE_UINTPTR_FIELD(*this, kDigitsOffset + n * kDigitSize, value);
+ WriteField<digit_t>(kDigitsOffset + n * kDigitSize, value);
}
void set_64_bits(uint64_t bits);
@@ -498,7 +498,7 @@ MaybeHandle<BigInt> BigInt::Multiply(Isolate* isolate, Handle<BigInt> x,
work_estimate = 0;
StackLimitCheck interrupt_check(isolate);
if (interrupt_check.InterruptRequested() &&
- isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) {
+ isolate->stack_guard()->HandleInterrupts().IsException(isolate)) {
return MaybeHandle<BigInt>();
}
}
@@ -655,10 +655,10 @@ ComparisonResult BigInt::CompareToBigInt(Handle<BigInt> x, Handle<BigInt> y) {
}
bool BigInt::EqualToBigInt(BigInt x, BigInt y) {
- if (x->sign() != y->sign()) return false;
- if (x->length() != y->length()) return false;
- for (int i = 0; i < x->length(); i++) {
- if (x->digit(i) != y->digit(i)) return false;
+ if (x.sign() != y.sign()) return false;
+ if (x.length() != y.length()) return false;
+ for (int i = 0; i < x.length(); i++) {
+ if (x.digit(i) != y.digit(i)) return false;
}
return true;
}
@@ -979,7 +979,7 @@ MaybeHandle<BigInt> BigInt::FromNumber(Isolate* isolate,
if (number->IsSmi()) {
return MutableBigInt::NewFromInt(isolate, Smi::ToInt(*number));
}
- double value = HeapNumber::cast(*number)->value();
+ double value = HeapNumber::cast(*number).value();
if (!std::isfinite(value) || (DoubleToInteger(value) != value)) {
THROW_NEW_ERROR(isolate,
NewRangeError(MessageTemplate::kBigIntFromNumber, number),
@@ -1311,8 +1311,8 @@ inline Handle<MutableBigInt> MutableBigInt::AbsoluteBitwiseOp(
if (result_storage.is_null()) {
result = New(isolate, result_length).ToHandleChecked();
} else {
- DCHECK(result_storage->length() >= result_length);
- result_length = result_storage->length();
+ DCHECK(result_storage.length() >= result_length);
+ result_length = result_storage.length();
}
int i = 0;
for (; i < num_pairs; i++) {
@@ -1428,12 +1428,12 @@ void MutableBigInt::MultiplyAccumulate(Handle<BigIntBase> multiplicand,
void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor,
digit_t summand, int n,
MutableBigInt result) {
- DCHECK(source->length() >= n);
- DCHECK(result->length() >= n);
+ DCHECK(source.length() >= n);
+ DCHECK(result.length() >= n);
digit_t carry = summand;
digit_t high = 0;
for (int i = 0; i < n; i++) {
- digit_t current = source->digit(i);
+ digit_t current = source.digit(i);
digit_t new_carry = 0;
// Compute this round's multiplication.
digit_t new_high = 0;
@@ -1442,15 +1442,15 @@ void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor,
current = digit_add(current, high, &new_carry);
current = digit_add(current, carry, &new_carry);
// Store result and prepare for next round.
- result->set_digit(i, current);
+ result.set_digit(i, current);
carry = new_carry;
high = new_high;
}
- if (result->length() > n) {
- result->set_digit(n++, carry + high);
+ if (result.length() > n) {
+ result.set_digit(n++, carry + high);
// Current callers don't pass in such large results, but let's be robust.
- while (n < result->length()) {
- result->set_digit(n++, 0);
+ while (n < result.length()) {
+ result.set_digit(n++, 0);
}
} else {
CHECK_EQ(carry + high, 0);
@@ -1601,7 +1601,7 @@ bool MutableBigInt::AbsoluteDivLarge(Isolate* isolate,
work_estimate = 0;
StackLimitCheck interrupt_check(isolate);
if (interrupt_check.InterruptRequested() &&
- isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) {
+ isolate->stack_guard()->HandleInterrupts().IsException(isolate)) {
return false;
}
}
@@ -1949,14 +1949,14 @@ MaybeHandle<BigInt> BigInt::FromSerializedDigits(
void* digits =
reinterpret_cast<void*>(result->ptr() + kDigitsOffset - kHeapObjectTag);
#if defined(V8_TARGET_LITTLE_ENDIAN)
- memcpy(digits, digits_storage.start(), bytelength);
+ memcpy(digits, digits_storage.begin(), bytelength);
void* padding_start =
reinterpret_cast<void*>(reinterpret_cast<Address>(digits) + bytelength);
memset(padding_start, 0, length * kDigitSize - bytelength);
#elif defined(V8_TARGET_BIG_ENDIAN)
digit_t* digit = reinterpret_cast<digit_t*>(digits);
const digit_t* digit_storage =
- reinterpret_cast<const digit_t*>(digits_storage.start());
+ reinterpret_cast<const digit_t*>(digits_storage.begin());
for (int i = 0; i < bytelength / kDigitSize; i++) {
*digit = ByteReverse(*digit_storage);
digit_storage++;
@@ -2146,7 +2146,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
if (interrupt_check.InterruptRequested()) {
{
AllowHeapAllocation might_throw;
- if (isolate->stack_guard()->HandleInterrupts()->IsException(
+ if (isolate->stack_guard()->HandleInterrupts().IsException(
isolate)) {
return MaybeHandle<String>();
}
@@ -2156,7 +2156,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
chars = result->GetChars(no_gc);
}
if (interrupt_check.InterruptRequested() &&
- isolate->stack_guard()->HandleInterrupts()->IsException(isolate)) {
+ isolate->stack_guard()->HandleInterrupts().IsException(isolate)) {
return MaybeHandle<String>();
}
}
@@ -2457,16 +2457,16 @@ void BigInt::ToWordsArray64(int* sign_bit, int* words64_count,
uint64_t MutableBigInt::GetRawBits(BigIntBase x, bool* lossless) {
if (lossless != nullptr) *lossless = true;
- if (x->is_zero()) return 0;
- int len = x->length();
+ if (x.is_zero()) return 0;
+ int len = x.length();
STATIC_ASSERT(kDigitBits == 64 || kDigitBits == 32);
if (lossless != nullptr && len > 64 / kDigitBits) *lossless = false;
- uint64_t raw = static_cast<uint64_t>(x->digit(0));
+ uint64_t raw = static_cast<uint64_t>(x.digit(0));
if (kDigitBits == 32 && len > 1) {
- raw |= static_cast<uint64_t>(x->digit(1)) << 32;
+ raw |= static_cast<uint64_t>(x.digit(1)) << 32;
}
// Simulate two's complement. MSVC dislikes "-raw".
- return x->sign() ? ((~raw) + 1u) : raw;
+ return x.sign() ? ((~raw) + 1u) : raw;
}
int64_t BigInt::AsInt64(bool* lossless) {
diff --git a/deps/v8/src/objects/bigint.h b/deps/v8/src/objects/bigint.h
index e59c7d6982..3f5d35878b 100644
--- a/deps/v8/src/objects/bigint.h
+++ b/deps/v8/src/objects/bigint.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_BIGINT_H_
#define V8_OBJECTS_BIGINT_H_
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
#include "src/objects/heap-object.h"
-#include "src/utils.h"
+#include "src/objects/objects.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -87,7 +87,7 @@ class BigIntBase : public HeapObject {
inline digit_t digit(int n) const {
SLOW_DCHECK(0 <= n && n < length());
- return READ_UINTPTR_FIELD(*this, kDigitsOffset + n * kDigitSize);
+ return ReadField<digit_t>(kDigitsOffset + n * kDigitSize);
}
bool is_zero() const { return length() == 0; }
diff --git a/deps/v8/src/objects/cell-inl.h b/deps/v8/src/objects/cell-inl.h
index c48a82fd31..90266b7599 100644
--- a/deps/v8/src/objects/cell-inl.h
+++ b/deps/v8/src/objects/cell-inl.h
@@ -8,7 +8,7 @@
#include "src/objects/cell.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
index c15b31a61c..9c77f5d332 100644
--- a/deps/v8/src/objects/cell.h
+++ b/deps/v8/src/objects/cell.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_CELL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index a0dc3b3ae1..0877746d11 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -7,16 +7,16 @@
#include "src/objects/code.h"
-#include "src/code-desc.h"
+#include "src/codegen/code-desc.h"
+#include "src/common/v8memory.h"
+#include "src/execution/isolate.h"
#include "src/interpreter/bytecode-register.h"
-#include "src/isolate.h"
#include "src/objects/dictionary.h"
#include "src/objects/instance-type-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/maybe-object-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/smi-inl.h"
-#include "src/v8memory.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -29,7 +29,7 @@ OBJECT_CONSTRUCTORS_IMPL(BytecodeArray, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(AbstractCode, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(DependentCode, WeakFixedArray)
OBJECT_CONSTRUCTORS_IMPL(CodeDataContainer, HeapObject)
-OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache, Tuple2)
+OBJECT_CONSTRUCTORS_IMPL(SourcePositionTableWithFrameCache, Struct)
NEVER_READ_ONLY_SPACE_IMPL(AbstractCode)
@@ -42,94 +42,94 @@ CAST_ACCESSOR(DeoptimizationData)
CAST_ACCESSOR(SourcePositionTableWithFrameCache)
ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
- kSourcePositionTableIndex)
+ kSourcePositionTableOffset)
ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
- SimpleNumberDictionary, kStackFrameCacheIndex)
+ SimpleNumberDictionary, kStackFrameCacheOffset)
int AbstractCode::raw_instruction_size() {
if (IsCode()) {
- return GetCode()->raw_instruction_size();
+ return GetCode().raw_instruction_size();
} else {
- return GetBytecodeArray()->length();
+ return GetBytecodeArray().length();
}
}
int AbstractCode::InstructionSize() {
if (IsCode()) {
- return GetCode()->InstructionSize();
+ return GetCode().InstructionSize();
} else {
- return GetBytecodeArray()->length();
+ return GetBytecodeArray().length();
}
}
ByteArray AbstractCode::source_position_table() {
if (IsCode()) {
- return GetCode()->SourcePositionTable();
+ return GetCode().SourcePositionTable();
} else {
- return GetBytecodeArray()->SourcePositionTable();
+ return GetBytecodeArray().SourcePositionTable();
}
}
Object AbstractCode::stack_frame_cache() {
Object maybe_table;
if (IsCode()) {
- maybe_table = GetCode()->source_position_table();
+ maybe_table = GetCode().source_position_table();
} else {
- maybe_table = GetBytecodeArray()->source_position_table();
+ maybe_table = GetBytecodeArray().source_position_table();
}
- if (maybe_table->IsSourcePositionTableWithFrameCache()) {
+ if (maybe_table.IsSourcePositionTableWithFrameCache()) {
return SourcePositionTableWithFrameCache::cast(maybe_table)
- ->stack_frame_cache();
+ .stack_frame_cache();
}
return Smi::kZero;
}
int AbstractCode::SizeIncludingMetadata() {
if (IsCode()) {
- return GetCode()->SizeIncludingMetadata();
+ return GetCode().SizeIncludingMetadata();
} else {
- return GetBytecodeArray()->SizeIncludingMetadata();
+ return GetBytecodeArray().SizeIncludingMetadata();
}
}
int AbstractCode::ExecutableSize() {
if (IsCode()) {
- return GetCode()->ExecutableSize();
+ return GetCode().ExecutableSize();
} else {
- return GetBytecodeArray()->BytecodeArraySize();
+ return GetBytecodeArray().BytecodeArraySize();
}
}
Address AbstractCode::raw_instruction_start() {
if (IsCode()) {
- return GetCode()->raw_instruction_start();
+ return GetCode().raw_instruction_start();
} else {
- return GetBytecodeArray()->GetFirstBytecodeAddress();
+ return GetBytecodeArray().GetFirstBytecodeAddress();
}
}
Address AbstractCode::InstructionStart() {
if (IsCode()) {
- return GetCode()->InstructionStart();
+ return GetCode().InstructionStart();
} else {
- return GetBytecodeArray()->GetFirstBytecodeAddress();
+ return GetBytecodeArray().GetFirstBytecodeAddress();
}
}
Address AbstractCode::raw_instruction_end() {
if (IsCode()) {
- return GetCode()->raw_instruction_end();
+ return GetCode().raw_instruction_end();
} else {
- return GetBytecodeArray()->GetFirstBytecodeAddress() +
- GetBytecodeArray()->length();
+ return GetBytecodeArray().GetFirstBytecodeAddress() +
+ GetBytecodeArray().length();
}
}
Address AbstractCode::InstructionEnd() {
if (IsCode()) {
- return GetCode()->InstructionEnd();
+ return GetCode().InstructionEnd();
} else {
- return GetBytecodeArray()->GetFirstBytecodeAddress() +
- GetBytecodeArray()->length();
+ return GetBytecodeArray().GetFirstBytecodeAddress() +
+ GetBytecodeArray().length();
}
}
@@ -139,7 +139,7 @@ bool AbstractCode::contains(Address inner_pointer) {
AbstractCode::Kind AbstractCode::kind() {
if (IsCode()) {
- return static_cast<AbstractCode::Kind>(GetCode()->kind());
+ return static_cast<AbstractCode::Kind>(GetCode().kind());
} else {
return INTERPRETED_FUNCTION;
}
@@ -236,26 +236,26 @@ void Code::clear_padding() {
ByteArray Code::SourcePositionTableIfCollected() const {
ReadOnlyRoots roots = GetReadOnlyRoots();
Object maybe_table = source_position_table();
- if (maybe_table->IsUndefined(roots) || maybe_table->IsException(roots))
+ if (maybe_table.IsUndefined(roots) || maybe_table.IsException(roots))
return roots.empty_byte_array();
return SourcePositionTable();
}
ByteArray Code::SourcePositionTable() const {
Object maybe_table = source_position_table();
- DCHECK(!maybe_table->IsUndefined() && !maybe_table->IsException());
- if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ DCHECK(!maybe_table.IsUndefined() && !maybe_table.IsException());
+ if (maybe_table.IsByteArray()) return ByteArray::cast(maybe_table);
+ DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
return SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table();
+ .source_position_table();
}
Object Code::next_code_link() const {
- return code_data_container()->next_code_link();
+ return code_data_container().next_code_link();
}
void Code::set_next_code_link(Object value) {
- code_data_container()->set_next_code_link(value);
+ code_data_container().set_next_code_link(value);
}
int Code::InstructionSize() const {
@@ -297,13 +297,12 @@ int Code::GetUnwindingInfoSizeOffset() const {
int Code::unwinding_info_size() const {
DCHECK(has_unwinding_info());
- return static_cast<int>(
- READ_UINT64_FIELD(*this, GetUnwindingInfoSizeOffset()));
+ return static_cast<int>(ReadField<uint64_t>(GetUnwindingInfoSizeOffset()));
}
void Code::set_unwinding_info_size(int value) {
DCHECK(has_unwinding_info());
- WRITE_UINT64_FIELD(*this, GetUnwindingInfoSizeOffset(), value);
+ WriteField<uint64_t>(GetUnwindingInfoSizeOffset(), value);
}
Address Code::unwinding_info_start() const {
@@ -326,8 +325,8 @@ int Code::body_size() const {
int Code::SizeIncludingMetadata() const {
int size = CodeSize();
- size += relocation_info()->Size();
- size += deoptimization_data()->Size();
+ size += relocation_info().Size();
+ size += deoptimization_data().Size();
return size;
}
@@ -336,15 +335,15 @@ ByteArray Code::unchecked_relocation_info() const {
}
byte* Code::relocation_start() const {
- return unchecked_relocation_info()->GetDataStartAddress();
+ return unchecked_relocation_info().GetDataStartAddress();
}
byte* Code::relocation_end() const {
- return unchecked_relocation_info()->GetDataEndAddress();
+ return unchecked_relocation_info().GetDataEndAddress();
}
int Code::relocation_size() const {
- return unchecked_relocation_info()->length();
+ return unchecked_relocation_info().length();
}
Address Code::entry() const { return raw_instruction_start(); }
@@ -369,8 +368,8 @@ int Code::ExecutableSize() const {
// static
void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
- DCHECK_EQ(dest->length(), desc.reloc_size);
- CopyBytes(dest->GetDataStartAddress(),
+ DCHECK_EQ(dest.length(), desc.reloc_size);
+ CopyBytes(dest.GetDataStartAddress(),
desc.buffer + desc.buffer_size - desc.reloc_size,
static_cast<size_t>(desc.reloc_size));
}
@@ -378,7 +377,7 @@ void Code::CopyRelocInfoToByteArray(ByteArray dest, const CodeDesc& desc) {
int Code::CodeSize() const { return SizeFor(body_size()); }
Code::Kind Code::kind() const {
- return KindField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return KindField::decode(ReadField<uint32_t>(kFlagsOffset));
}
void Code::initialize_flags(Kind kind, bool has_unwinding_info,
@@ -391,7 +390,7 @@ void Code::initialize_flags(Kind kind, bool has_unwinding_info,
IsTurbofannedField::encode(is_turbofanned) |
StackSlotsField::encode(stack_slots) |
IsOffHeapTrampoline::encode(is_off_heap_trampoline);
- WRITE_UINT32_FIELD(*this, kFlagsOffset, flags);
+ WriteField<uint32_t>(kFlagsOffset, flags);
DCHECK_IMPLIES(stack_slots != 0, has_safepoint_info());
}
@@ -417,54 +416,54 @@ inline bool Code::has_tagged_params() const {
}
inline bool Code::has_unwinding_info() const {
- return HasUnwindingInfoField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return HasUnwindingInfoField::decode(ReadField<uint32_t>(kFlagsOffset));
}
inline bool Code::is_turbofanned() const {
- return IsTurbofannedField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return IsTurbofannedField::decode(ReadField<uint32_t>(kFlagsOffset));
}
inline bool Code::can_have_weak_objects() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return CanHaveWeakObjectsField::decode(flags);
}
inline void Code::set_can_have_weak_objects(bool value) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = CanHaveWeakObjectsField::update(previous, value);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
inline bool Code::is_promise_rejection() const {
DCHECK(kind() == BUILTIN);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return IsPromiseRejectionField::decode(flags);
}
inline void Code::set_is_promise_rejection(bool value) {
DCHECK(kind() == BUILTIN);
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = IsPromiseRejectionField::update(previous, value);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
inline bool Code::is_exception_caught() const {
DCHECK(kind() == BUILTIN);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return IsExceptionCaughtField::decode(flags);
}
inline void Code::set_is_exception_caught(bool value) {
DCHECK(kind() == BUILTIN);
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = IsExceptionCaughtField::update(previous, value);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
inline bool Code::is_off_heap_trampoline() const {
- return IsOffHeapTrampoline::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return IsOffHeapTrampoline::decode(ReadField<uint32_t>(kFlagsOffset));
}
inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
@@ -474,14 +473,14 @@ inline HandlerTable::CatchPrediction Code::GetBuiltinCatchPrediction() {
}
int Code::builtin_index() const {
- int index = READ_INT_FIELD(*this, kBuiltinIndexOffset);
+ int index = ReadField<int>(kBuiltinIndexOffset);
DCHECK(index == -1 || Builtins::IsBuiltinId(index));
return index;
}
void Code::set_builtin_index(int index) {
DCHECK(index == -1 || Builtins::IsBuiltinId(index));
- WRITE_INT_FIELD(*this, kBuiltinIndexOffset, index);
+ WriteField<int>(kBuiltinIndexOffset, index);
}
bool Code::is_builtin() const { return builtin_index() != -1; }
@@ -492,49 +491,49 @@ bool Code::has_safepoint_info() const {
int Code::stack_slots() const {
DCHECK(has_safepoint_info());
- return StackSlotsField::decode(READ_UINT32_FIELD(*this, kFlagsOffset));
+ return StackSlotsField::decode(ReadField<uint32_t>(kFlagsOffset));
}
bool Code::marked_for_deoptimization() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return MarkedForDeoptimizationField::decode(flags);
}
void Code::set_marked_for_deoptimization(bool flag) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = MarkedForDeoptimizationField::update(previous, flag);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
bool Code::embedded_objects_cleared() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return EmbeddedObjectsClearedField::decode(flags);
}
void Code::set_embedded_objects_cleared(bool flag) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
DCHECK_IMPLIES(flag, marked_for_deoptimization());
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = EmbeddedObjectsClearedField::update(previous, flag);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
bool Code::deopt_already_counted() const {
DCHECK(kind() == OPTIMIZED_FUNCTION);
- int32_t flags = code_data_container()->kind_specific_flags();
+ int32_t flags = code_data_container().kind_specific_flags();
return DeoptAlreadyCountedField::decode(flags);
}
void Code::set_deopt_already_counted(bool flag) {
DCHECK(kind() == OPTIMIZED_FUNCTION);
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
- int32_t previous = code_data_container()->kind_specific_flags();
+ int32_t previous = code_data_container().kind_specific_flags();
int32_t updated = DeoptAlreadyCountedField::update(previous, flag);
- code_data_container()->set_kind_specific_flags(updated);
+ code_data_container().set_kind_specific_flags(updated);
}
bool Code::is_optimized_code() const { return kind() == OPTIMIZED_FUNCTION; }
@@ -542,13 +541,13 @@ bool Code::is_wasm_code() const { return kind() == WASM_FUNCTION; }
int Code::constant_pool_offset() const {
if (!FLAG_enable_embedded_constant_pool) return code_comments_offset();
- return READ_INT_FIELD(*this, kConstantPoolOffsetOffset);
+ return ReadField<int>(kConstantPoolOffsetOffset);
}
void Code::set_constant_pool_offset(int value) {
if (!FLAG_enable_embedded_constant_pool) return;
DCHECK_LE(value, InstructionSize());
- WRITE_INT_FIELD(*this, kConstantPoolOffsetOffset, value);
+ WriteField<int>(kConstantPoolOffsetOffset, value);
}
Address Code::constant_pool() const {
@@ -592,10 +591,10 @@ bool Code::IsWeakObject(HeapObject object) {
}
bool Code::IsWeakObjectInOptimizedCode(HeapObject object) {
- Map map = object->synchronized_map();
- InstanceType instance_type = map->instance_type();
+ Map map = object.synchronized_map();
+ InstanceType instance_type = map.instance_type();
if (InstanceTypeChecker::IsMap(instance_type)) {
- return Map::cast(object)->CanTransition();
+ return Map::cast(object).CanTransition();
}
return InstanceTypeChecker::IsPropertyCell(instance_type) ||
InstanceTypeChecker::IsJSReceiver(instance_type) ||
@@ -615,22 +614,22 @@ void CodeDataContainer::clear_padding() {
byte BytecodeArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize);
+ return ReadField<byte>(kHeaderSize + index * kCharSize);
}
void BytecodeArray::set(int index, byte value) {
DCHECK(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize, value);
+ WriteField<byte>(kHeaderSize + index * kCharSize, value);
}
void BytecodeArray::set_frame_size(int frame_size) {
DCHECK_GE(frame_size, 0);
DCHECK(IsAligned(frame_size, kSystemPointerSize));
- WRITE_INT_FIELD(*this, kFrameSizeOffset, frame_size);
+ WriteField<int>(kFrameSizeOffset, frame_size);
}
int BytecodeArray::frame_size() const {
- return READ_INT_FIELD(*this, kFrameSizeOffset);
+ return ReadField<int>(kFrameSizeOffset);
}
int BytecodeArray::register_count() const {
@@ -641,14 +640,14 @@ void BytecodeArray::set_parameter_count(int number_of_parameters) {
DCHECK_GE(number_of_parameters, 0);
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- WRITE_INT_FIELD(*this, kParameterSizeOffset,
+ WriteField<int>(kParameterSizeOffset,
(number_of_parameters << kSystemPointerSizeLog2));
}
interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
const {
int register_operand =
- READ_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset);
+ ReadField<int>(kIncomingNewTargetOrGeneratorRegisterOffset);
if (register_operand == 0) {
return interpreter::Register::invalid_value();
} else {
@@ -659,24 +658,24 @@ interpreter::Register BytecodeArray::incoming_new_target_or_generator_register()
void BytecodeArray::set_incoming_new_target_or_generator_register(
interpreter::Register incoming_new_target_or_generator_register) {
if (!incoming_new_target_or_generator_register.is_valid()) {
- WRITE_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset, 0);
+ WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset, 0);
} else {
DCHECK(incoming_new_target_or_generator_register.index() <
register_count());
DCHECK_NE(0, incoming_new_target_or_generator_register.ToOperand());
- WRITE_INT_FIELD(*this, kIncomingNewTargetOrGeneratorRegisterOffset,
+ WriteField<int>(kIncomingNewTargetOrGeneratorRegisterOffset,
incoming_new_target_or_generator_register.ToOperand());
}
}
int BytecodeArray::osr_loop_nesting_level() const {
- return READ_INT8_FIELD(*this, kOSRNestingLevelOffset);
+ return ReadField<int8_t>(kOSRNestingLevelOffset);
}
void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
STATIC_ASSERT(AbstractCode::kMaxLoopNestingMarker < kMaxInt8);
- WRITE_INT8_FIELD(*this, kOSRNestingLevelOffset, depth);
+ WriteField<int8_t>(kOSRNestingLevelOffset, depth);
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {
@@ -695,7 +694,7 @@ void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
int BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
- return READ_INT_FIELD(*this, kParameterSizeOffset) >> kSystemPointerSizeLog2;
+ return ReadField<int>(kParameterSizeOffset) >> kSystemPointerSizeLog2;
}
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
@@ -715,11 +714,11 @@ Address BytecodeArray::GetFirstBytecodeAddress() {
bool BytecodeArray::HasSourcePositionTable() const {
Object maybe_table = source_position_table();
- return !(maybe_table->IsUndefined() || DidSourcePositionGenerationFail());
+ return !(maybe_table.IsUndefined() || DidSourcePositionGenerationFail());
}
bool BytecodeArray::DidSourcePositionGenerationFail() const {
- return source_position_table()->IsException();
+ return source_position_table().IsException();
}
void BytecodeArray::SetSourcePositionsFailedToCollect() {
@@ -728,14 +727,14 @@ void BytecodeArray::SetSourcePositionsFailedToCollect() {
ByteArray BytecodeArray::SourcePositionTable() const {
Object maybe_table = source_position_table();
- if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
+ if (maybe_table.IsByteArray()) return ByteArray::cast(maybe_table);
ReadOnlyRoots roots = GetReadOnlyRoots();
- if (maybe_table->IsException(roots)) return roots.empty_byte_array();
+ if (maybe_table.IsException(roots)) return roots.empty_byte_array();
- DCHECK(!maybe_table->IsUndefined(roots));
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ DCHECK(!maybe_table.IsUndefined(roots));
+ DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
return SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table();
+ .source_position_table();
}
ByteArray BytecodeArray::SourcePositionTableIfCollected() const {
@@ -746,20 +745,20 @@ ByteArray BytecodeArray::SourcePositionTableIfCollected() const {
void BytecodeArray::ClearFrameCacheFromSourcePositionTable() {
Object maybe_table = source_position_table();
- if (maybe_table->IsUndefined() || maybe_table->IsByteArray()) return;
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ if (maybe_table.IsUndefined() || maybe_table.IsByteArray()) return;
+ DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
set_source_position_table(SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table());
+ .source_position_table());
}
int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
int BytecodeArray::SizeIncludingMetadata() {
int size = BytecodeArraySize();
- size += constant_pool()->Size();
- size += handler_table()->Size();
+ size += constant_pool().Size();
+ size += handler_table().Size();
if (HasSourcePositionTable()) {
- size += SourcePositionTable()->Size();
+ size += SourcePositionTable().Size();
}
return size;
}
@@ -777,7 +776,7 @@ DEFINE_DEOPT_ENTRY_ACCESSORS(TranslationIndex, Smi)
DEFINE_DEOPT_ENTRY_ACCESSORS(Pc, Smi)
BailoutId DeoptimizationData::BytecodeOffset(int i) {
- return BailoutId(BytecodeOffsetRaw(i)->value());
+ return BailoutId(BytecodeOffsetRaw(i).value());
}
void DeoptimizationData::SetBytecodeOffset(int i, BailoutId value) {
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index ab929db8a7..89180693a5 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -6,24 +6,24 @@
#include "src/objects/code.h"
-#include "src/assembler-inl.h"
-#include "src/cpu-features.h"
-#include "src/deoptimizer.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/cpu-features.h"
+#include "src/codegen/reloc-info.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/allocation-site-inl.h"
-#include "src/ostreams.h"
-#include "src/reloc-info.h"
-#include "src/roots-inl.h"
-#include "src/safepoint-table.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/roots/roots-inl.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/utils/ostreams.h"
#ifdef ENABLE_DISASSEMBLER
-#include "src/code-comments.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/eh-frame.h"
+#include "src/codegen/code-comments.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/diagnostics/eh-frame.h"
#endif
namespace v8 {
@@ -63,12 +63,10 @@ int Code::ExecutableInstructionSize() const { return safepoint_table_offset(); }
void Code::ClearEmbeddedObjects(Heap* heap) {
HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ int mode_mask = RelocInfo::EmbeddedObjectModeMask();
for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
- it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
- }
+ DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));
+ it.rinfo()->set_target_object(heap, undefined, SKIP_WRITE_BARRIER);
}
set_embedded_objects_cleared(true);
}
@@ -107,7 +105,7 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
const int mode_mask = RelocInfo::PostCodegenRelocationMask();
for (RelocIterator it(*this, mode_mask); !it.done(); it.next()) {
RelocInfo::Mode mode = it.rinfo()->rmode();
- if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ if (RelocInfo::IsEmbeddedObjectMode(mode)) {
Handle<HeapObject> p = it.rinfo()->target_object_handle(origin);
it.rinfo()->set_target_object(heap, *p, UPDATE_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
@@ -116,7 +114,7 @@ void Code::CopyFromNoFlush(Heap* heap, const CodeDesc& desc) {
// code object.
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code code = Code::cast(*p);
- it.rinfo()->set_target_address(code->raw_instruction_start(),
+ it.rinfo()->set_target_address(code.raw_instruction_start(),
UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
Address p = it.rinfo()->target_runtime_entry(origin);
@@ -194,12 +192,12 @@ void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
namespace {
template <typename Code>
void DropStackFrameCacheCommon(Code code) {
- i::Object maybe_table = code->source_position_table();
- if (maybe_table->IsUndefined() || maybe_table->IsByteArray()) return;
- DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
- code->set_source_position_table(
+ i::Object maybe_table = code.source_position_table();
+ if (maybe_table.IsUndefined() || maybe_table.IsByteArray()) return;
+ DCHECK(maybe_table.IsSourcePositionTableWithFrameCache());
+ code.set_source_position_table(
i::SourcePositionTableWithFrameCache::cast(maybe_table)
- ->source_position_table());
+ .source_position_table());
}
} // namespace
@@ -213,7 +211,7 @@ void AbstractCode::DropStackFrameCache() {
int AbstractCode::SourcePosition(int offset) {
Object maybe_table = source_position_table();
- if (maybe_table->IsException()) return kNoSourcePosition;
+ if (maybe_table.IsException()) return kNoSourcePosition;
ByteArray source_position_table = ByteArray::cast(maybe_table);
int position = 0;
@@ -259,10 +257,10 @@ bool Code::CanDeoptAt(Address pc) {
DeoptimizationData deopt_data =
DeoptimizationData::cast(deoptimization_data());
Address code_start_address = InstructionStart();
- for (int i = 0; i < deopt_data->DeoptCount(); i++) {
- if (deopt_data->Pc(i)->value() == -1) continue;
- Address address = code_start_address + deopt_data->Pc(i)->value();
- if (address == pc && deopt_data->BytecodeOffset(i) != BailoutId::None()) {
+ for (int i = 0; i < deopt_data.DeoptCount(); i++) {
+ if (deopt_data.Pc(i).value() == -1) continue;
+ Address address = code_start_address + deopt_data.Pc(i).value();
+ if (address == pc && deopt_data.BytecodeOffset(i) != BailoutId::None()) {
return true;
}
}
@@ -302,7 +300,8 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
STATIC_ASSERT(mode_mask ==
(RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
@@ -324,7 +323,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
Code target = Code::GetCodeFromTargetAddress(target_address);
- CHECK(target->IsCode());
+ CHECK(target.IsCode());
if (Builtins::IsIsolateIndependentBuiltin(target)) continue;
}
#endif
@@ -340,12 +339,12 @@ bool Code::Inlines(SharedFunctionInfo sfi) {
DisallowHeapAllocation no_gc;
DeoptimizationData const data =
DeoptimizationData::cast(deoptimization_data());
- if (data->length() == 0) return false;
- if (data->SharedFunctionInfo() == sfi) return true;
- FixedArray const literals = data->LiteralArray();
- int const inlined_count = data->InlinedFunctionCount()->value();
+ if (data.length() == 0) return false;
+ if (data.SharedFunctionInfo() == sfi) return true;
+ FixedArray const literals = data.LiteralArray();
+ int const inlined_count = data.InlinedFunctionCount().value();
for (int i = 0; i < inlined_count; ++i) {
- if (SharedFunctionInfo::cast(literals->get(i)) == sfi) return true;
+ if (SharedFunctionInfo::cast(literals.get(i)) == sfi) return true;
}
return false;
}
@@ -353,7 +352,7 @@ bool Code::Inlines(SharedFunctionInfo sfi) {
Code::OptimizedCodeIterator::OptimizedCodeIterator(Isolate* isolate) {
isolate_ = isolate;
Object list = isolate->heap()->native_contexts_list();
- next_context_ = list->IsUndefined(isolate_) ? Context() : Context::cast(list);
+ next_context_ = list.IsUndefined(isolate_) ? Context() : Context::cast(list);
}
Code Code::OptimizedCodeIterator::Next() {
@@ -361,21 +360,21 @@ Code Code::OptimizedCodeIterator::Next() {
Object next;
if (!current_code_.is_null()) {
// Get next code in the linked list.
- next = current_code_->next_code_link();
+ next = current_code_.next_code_link();
} else if (!next_context_.is_null()) {
// Linked list of code exhausted. Get list of next context.
- next = next_context_->OptimizedCodeListHead();
- Object next_context = next_context_->next_context_link();
- next_context_ = next_context->IsUndefined(isolate_)
+ next = next_context_.OptimizedCodeListHead();
+ Object next_context = next_context_.next_context_link();
+ next_context_ = next_context.IsUndefined(isolate_)
? Context()
: Context::cast(next_context);
} else {
// Exhausted contexts.
return Code();
}
- current_code_ = next->IsUndefined(isolate_) ? Code() : Code::cast(next);
+ current_code_ = next.IsUndefined(isolate_) ? Code() : Code::cast(next);
} while (current_code_.is_null());
- DCHECK_EQ(Code::OPTIMIZED_FUNCTION, current_code_->kind());
+ DCHECK_EQ(Code::OPTIMIZED_FUNCTION, current_code_.kind());
return current_code_;
}
@@ -395,7 +394,7 @@ SharedFunctionInfo DeoptimizationData::GetInlinedFunction(int index) {
if (index == -1) {
return SharedFunctionInfo::cast(SharedFunctionInfo());
} else {
- return SharedFunctionInfo::cast(LiteralArray()->get(index));
+ return SharedFunctionInfo::cast(LiteralArray().get(index));
}
}
@@ -428,10 +427,10 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
disasm::NameConverter converter;
- int const inlined_function_count = InlinedFunctionCount()->value();
+ int const inlined_function_count = InlinedFunctionCount().value();
os << "Inlined functions (count = " << inlined_function_count << ")\n";
for (int id = 0; id < inlined_function_count; ++id) {
- Object info = LiteralArray()->get(id);
+ Object info = LiteralArray().get(id);
os << " " << Brief(SharedFunctionInfo::cast(info)) << "\n";
}
os << "\n";
@@ -445,7 +444,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
for (int i = 0; i < deopt_count; i++) {
os << std::setw(6) << i << " " << std::setw(15)
<< BytecodeOffset(i).ToInt() << " " << std::setw(4);
- print_pc(os, Pc(i)->value());
+ print_pc(os, Pc(i).value());
os << std::setw(2);
if (!FLAG_print_code_verbose) {
@@ -454,7 +453,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
}
// Print details of the frame translation.
- int translation_index = TranslationIndex(i)->value();
+ int translation_index = TranslationIndex(i).value();
TranslationIterator iterator(TranslationByteArray(), translation_index);
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator.Next());
@@ -483,9 +482,9 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
unsigned height = iterator.Next();
int return_value_offset = iterator.Next();
int return_value_count = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray().get(shared_info_id);
os << "{bytecode_offset=" << bytecode_offset << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << Brief(SharedFunctionInfo::cast(shared_info).DebugName())
<< ", height=" << height << ", retval=@" << return_value_offset
<< "(#" << return_value_count << ")}";
break;
@@ -494,10 +493,10 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::CONSTRUCT_STUB_FRAME: {
int bailout_id = iterator.Next();
int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray().get(shared_info_id);
unsigned height = iterator.Next();
os << "{bailout_id=" << bailout_id << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << Brief(SharedFunctionInfo::cast(shared_info).DebugName())
<< ", height=" << height << "}";
break;
}
@@ -507,20 +506,20 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH_FRAME: {
int bailout_id = iterator.Next();
int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray().get(shared_info_id);
unsigned height = iterator.Next();
os << "{bailout_id=" << bailout_id << ", function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << Brief(SharedFunctionInfo::cast(shared_info).DebugName())
<< ", height=" << height << "}";
break;
}
case Translation::ARGUMENTS_ADAPTOR_FRAME: {
int shared_info_id = iterator.Next();
- Object shared_info = LiteralArray()->get(shared_info_id);
+ Object shared_info = LiteralArray().get(shared_info_id);
unsigned height = iterator.Next();
os << "{function="
- << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+ << Brief(SharedFunctionInfo::cast(shared_info).DebugName())
<< ", height=" << height << "}";
break;
}
@@ -610,7 +609,7 @@ void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
case Translation::LITERAL: {
int literal_index = iterator.Next();
- Object literal_value = LiteralArray()->get(literal_index);
+ Object literal_value = LiteralArray().get(literal_index);
os << "{literal_id=" << literal_index << " (" << Brief(literal_value)
<< ")}";
break;
@@ -705,7 +704,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
constant_pool_offset());
for (int i = 0; i < pool_size; i += kSystemPointerSize, ptr++) {
SNPrintF(buf, "%4d %08" V8PRIxPTR, i, *ptr);
- os << static_cast<const void*>(ptr) << " " << buf.start() << "\n";
+ os << static_cast<const void*>(ptr) << " " << buf.begin() << "\n";
}
}
}
@@ -744,7 +743,7 @@ void Code::Disassemble(const char* name, std::ostream& os, Address current_pc) {
if (kind() == OPTIMIZED_FUNCTION) {
DeoptimizationData data =
DeoptimizationData::cast(this->deoptimization_data());
- data->DeoptimizationDataPrint(os);
+ data.DeoptimizationDataPrint(os);
}
os << "\n";
@@ -854,16 +853,16 @@ void BytecodeArray::Disassemble(std::ostream& os) {
iterator.Advance();
}
- os << "Constant pool (size = " << constant_pool()->length() << ")\n";
+ os << "Constant pool (size = " << constant_pool().length() << ")\n";
#ifdef OBJECT_PRINT
- if (constant_pool()->length() > 0) {
- constant_pool()->Print();
+ if (constant_pool().length() > 0) {
+ constant_pool().Print();
}
#endif
- os << "Handler Table (size = " << handler_table()->length() << ")\n";
+ os << "Handler Table (size = " << handler_table().length() << ")\n";
#ifdef ENABLE_DISASSEMBLER
- if (handler_table()->length() > 0) {
+ if (handler_table().length() > 0) {
HandlerTable table(*this);
table.HandlerTableRangePrint(os);
}
@@ -872,10 +871,10 @@ void BytecodeArray::Disassemble(std::ostream& os) {
void BytecodeArray::CopyBytecodesTo(BytecodeArray to) {
BytecodeArray from = *this;
- DCHECK_EQ(from->length(), to->length());
- CopyBytes(reinterpret_cast<byte*>(to->GetFirstBytecodeAddress()),
- reinterpret_cast<byte*>(from->GetFirstBytecodeAddress()),
- from->length());
+ DCHECK_EQ(from.length(), to.length());
+ CopyBytes(reinterpret_cast<byte*>(to.GetFirstBytecodeAddress()),
+ reinterpret_cast<byte*>(from.GetFirstBytecodeAddress()),
+ from.length());
}
void BytecodeArray::MakeOlder() {
@@ -1018,7 +1017,7 @@ bool DependentCode::MarkCodeForDeoptimization(
}
if (this->group() < group) {
// The group comes later in the list.
- return next_link()->MarkCodeForDeoptimization(isolate, group);
+ return next_link().MarkCodeForDeoptimization(isolate, group);
}
DCHECK_EQ(group, this->group());
DisallowHeapAllocation no_allocation_scope;
@@ -1029,8 +1028,8 @@ bool DependentCode::MarkCodeForDeoptimization(
MaybeObject obj = object_at(i);
if (obj->IsCleared()) continue;
Code code = Code::cast(obj->GetHeapObjectAssumeWeak());
- if (!code->marked_for_deoptimization()) {
- code->SetMarkedForDeoptimization(DependencyGroupName(group));
+ if (!code.marked_for_deoptimization()) {
+ code.SetMarkedForDeoptimization(DependencyGroupName(group));
marked = true;
}
}
@@ -1061,7 +1060,7 @@ void Code::SetMarkedForDeoptimization(const char* reason) {
PrintF(scope.file(),
"[marking dependent code " V8PRIxPTR_FMT
" (opt #%d) for deoptimization, reason: %s]\n",
- ptr(), deopt_data->OptimizationId()->value(), reason);
+ ptr(), deopt_data.OptimizationId().value(), reason);
}
}
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 647cfebe69..a950261103 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -5,11 +5,11 @@
#ifndef V8_OBJECTS_CODE_H_
#define V8_OBJECTS_CODE_H_
-#include "src/contexts.h"
-#include "src/handler-table.h"
-#include "src/objects.h"
+#include "src/codegen/handler-table.h"
+#include "src/objects/contexts.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
@@ -22,7 +22,6 @@ class ByteArray;
class BytecodeArray;
class CodeDataContainer;
class CodeDesc;
-class MaybeObject;
namespace interpreter {
class Register;
@@ -43,6 +42,7 @@ class Code : public HeapObject {
V(BUILTIN) \
V(REGEXP) \
V(WASM_FUNCTION) \
+ V(WASM_TO_CAPI_FUNCTION) \
V(WASM_TO_JS_FUNCTION) \
V(JS_TO_WASM_FUNCTION) \
V(WASM_INTERPRETER_ENTRY) \
@@ -948,25 +948,22 @@ class DeoptimizationData : public FixedArray {
OBJECT_CONSTRUCTORS(DeoptimizationData, FixedArray);
};
-class SourcePositionTableWithFrameCache : public Tuple2 {
+class SourcePositionTableWithFrameCache : public Struct {
public:
DECL_ACCESSORS(source_position_table, ByteArray)
DECL_ACCESSORS(stack_frame_cache, SimpleNumberDictionary)
DECL_CAST(SourcePositionTableWithFrameCache)
-// Layout description.
-#define SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS(V) \
- V(kSourcePositionTableIndex, kTaggedSize) \
- V(kStackFrameCacheIndex, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
+ DECL_PRINTER(SourcePositionTableWithFrameCache)
+ DECL_VERIFIER(SourcePositionTableWithFrameCache)
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
- SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS)
-#undef SOURCE_POSITION_TABLE_WITH_FRAME_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ Struct::kHeaderSize,
+ TORQUE_GENERATED_SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_FIELDS)
- OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Tuple2);
+ OBJECT_CONSTRUCTORS(SourcePositionTableWithFrameCache, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/compilation-cache-inl.h b/deps/v8/src/objects/compilation-cache-inl.h
index 18491118ad..81b953a589 100644
--- a/deps/v8/src/objects/compilation-cache-inl.h
+++ b/deps/v8/src/objects/compilation-cache-inl.h
@@ -28,22 +28,22 @@ NEVER_READ_ONLY_SPACE_IMPL(CompilationCacheTable)
CAST_ACCESSOR(CompilationCacheTable)
uint32_t CompilationCacheShape::RegExpHash(String string, Smi flags) {
- return string->Hash() + flags->value();
+ return string.Hash() + flags.value();
}
uint32_t CompilationCacheShape::StringSharedHash(String source,
SharedFunctionInfo shared,
LanguageMode language_mode,
int position) {
- uint32_t hash = source->Hash();
- if (shared->HasSourceCode()) {
+ uint32_t hash = source.Hash();
+ if (shared.HasSourceCode()) {
// Instead of using the SharedFunctionInfo pointer in the hash
// code computation, we use a combination of the hash of the
// script source code and the start position of the calling scope.
// We do this to ensure that the cache entries can survive garbage
// collection.
- Script script(Script::cast(shared->script()));
- hash ^= String::cast(script->source())->Hash();
+ Script script(Script::cast(shared.script()));
+ hash ^= String::cast(script.source()).Hash();
STATIC_ASSERT(LanguageModeSize == 2);
if (is_strict(language_mode)) hash ^= 0x8000;
hash += position;
@@ -53,27 +53,27 @@ uint32_t CompilationCacheShape::StringSharedHash(String source,
uint32_t CompilationCacheShape::HashForObject(ReadOnlyRoots roots,
Object object) {
- if (object->IsNumber()) return static_cast<uint32_t>(object->Number());
+ if (object.IsNumber()) return static_cast<uint32_t>(object.Number());
FixedArray val = FixedArray::cast(object);
- if (val->map() == roots.fixed_cow_array_map()) {
- DCHECK_EQ(4, val->length());
- SharedFunctionInfo shared = SharedFunctionInfo::cast(val->get(0));
- String source = String::cast(val->get(1));
- int language_unchecked = Smi::ToInt(val->get(2));
+ if (val.map() == roots.fixed_cow_array_map()) {
+ DCHECK_EQ(4, val.length());
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(val.get(0));
+ String source = String::cast(val.get(1));
+ int language_unchecked = Smi::ToInt(val.get(2));
DCHECK(is_valid_language_mode(language_unchecked));
LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
- int position = Smi::ToInt(val->get(3));
+ int position = Smi::ToInt(val.get(3));
return StringSharedHash(source, shared, language_mode, position);
}
- DCHECK_LT(2, val->length());
- return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
- Smi::cast(val->get(JSRegExp::kFlagsIndex)));
+ DCHECK_LT(2, val.length());
+ return RegExpHash(String::cast(val.get(JSRegExp::kSourceIndex)),
+ Smi::cast(val.get(JSRegExp::kFlagsIndex)));
}
InfoCellPair::InfoCellPair(SharedFunctionInfo shared,
FeedbackCell feedback_cell)
- : is_compiled_scope_(!shared.is_null() ? shared->is_compiled_scope()
+ : is_compiled_scope_(!shared.is_null() ? shared.is_compiled_scope()
: IsCompiledScope()),
shared_(shared),
feedback_cell_(feedback_cell) {}
diff --git a/deps/v8/src/objects/compilation-cache.h b/deps/v8/src/objects/compilation-cache.h
index d3feb1b233..2072339c5e 100644
--- a/deps/v8/src/objects/compilation-cache.h
+++ b/deps/v8/src/objects/compilation-cache.h
@@ -9,7 +9,7 @@
#include "src/objects/hash-table.h"
#include "src/objects/js-regexp.h"
#include "src/objects/shared-function-info.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/compressed-slots-inl.h b/deps/v8/src/objects/compressed-slots-inl.h
index 9c55de9ae6..b08bc938e5 100644
--- a/deps/v8/src/objects/compressed-slots-inl.h
+++ b/deps/v8/src/objects/compressed-slots-inl.h
@@ -7,10 +7,10 @@
#ifdef V8_COMPRESS_POINTERS
+#include "src/common/ptr-compr-inl.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/maybe-object-inl.h"
-#include "src/ptr-compr-inl.h"
namespace v8 {
namespace internal {
@@ -28,7 +28,7 @@ Object CompressedObjectSlot::operator*() const {
}
void CompressedObjectSlot::store(Object value) const {
- *location() = CompressTagged(value->ptr());
+ *location() = CompressTagged(value.ptr());
}
Object CompressedObjectSlot::Acquire_Load() const {
@@ -42,19 +42,19 @@ Object CompressedObjectSlot::Relaxed_Load() const {
}
void CompressedObjectSlot::Relaxed_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value->ptr());
+ Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Relaxed_Store(location(), ptr);
}
void CompressedObjectSlot::Release_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value->ptr());
+ Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Release_Store(location(), ptr);
}
Object CompressedObjectSlot::Release_CompareAndSwap(Object old,
Object target) const {
- Tagged_t old_ptr = CompressTagged(old->ptr());
- Tagged_t target_ptr = CompressTagged(target->ptr());
+ Tagged_t old_ptr = CompressTagged(old.ptr());
+ Tagged_t target_ptr = CompressTagged(target.ptr());
Tagged_t result =
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
return Object(DecompressTaggedAny(address(), result));
@@ -95,14 +95,14 @@ Object CompressedMapWordSlot::Acquire_Load() const {
}
void CompressedMapWordSlot::Release_Store(Object value) const {
- Tagged_t ptr = CompressTagged(value->ptr());
+ Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Release_Store(location(), ptr);
}
Object CompressedMapWordSlot::Release_CompareAndSwap(Object old,
Object target) const {
- Tagged_t old_ptr = CompressTagged(old->ptr());
- Tagged_t target_ptr = CompressTagged(target->ptr());
+ Tagged_t old_ptr = CompressTagged(old.ptr());
+ Tagged_t target_ptr = CompressTagged(target.ptr());
Tagged_t result =
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
return Object(DecompressTaggedPointer(address(), result));
@@ -118,7 +118,7 @@ MaybeObject CompressedMaybeObjectSlot::operator*() const {
}
void CompressedMaybeObjectSlot::store(MaybeObject value) const {
- *location() = CompressTagged(value->ptr());
+ *location() = CompressTagged(value.ptr());
}
MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
@@ -127,14 +127,14 @@ MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const {
}
void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const {
- Tagged_t ptr = CompressTagged(value->ptr());
+ Tagged_t ptr = CompressTagged(value.ptr());
AsAtomicTagged::Relaxed_Store(location(), ptr);
}
void CompressedMaybeObjectSlot::Release_CompareAndSwap(
MaybeObject old, MaybeObject target) const {
- Tagged_t old_ptr = CompressTagged(old->ptr());
- Tagged_t target_ptr = CompressTagged(target->ptr());
+ Tagged_t old_ptr = CompressTagged(old.ptr());
+ Tagged_t target_ptr = CompressTagged(target.ptr());
AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr);
}
@@ -158,7 +158,7 @@ HeapObject CompressedHeapObjectSlot::ToHeapObject() const {
}
void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const {
- *location() = CompressTagged(value->ptr());
+ *location() = CompressTagged(value.ptr());
}
} // namespace internal
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/objects/contexts-inl.h
index faca53c13e..bb861a1d1e 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/objects/contexts-inl.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CONTEXTS_INL_H_
-#define V8_CONTEXTS_INL_H_
+#ifndef V8_OBJECTS_CONTEXTS_INL_H_
+#define V8_OBJECTS_CONTEXTS_INL_H_
-#include "src/contexts.h"
+#include "src/objects/contexts.h"
#include "src/heap/heap-write-barrier.h"
-#include "src/objects-inl.h"
#include "src/objects/dictionary-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/js-objects-inl.h"
#include "src/objects/map-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
#include "src/objects/shared-function-info.h"
@@ -88,7 +88,7 @@ void Context::set_previous(Context context) { set(PREVIOUS_INDEX, context); }
Object Context::next_context_link() { return get(Context::NEXT_CONTEXT_LINK); }
-bool Context::has_extension() { return !extension()->IsTheHole(); }
+bool Context::has_extension() { return !extension().IsTheHole(); }
HeapObject Context::extension() {
return HeapObject::cast(get(EXTENSION_INDEX));
}
@@ -105,44 +105,44 @@ void Context::set_native_context(NativeContext context) {
}
bool Context::IsFunctionContext() const {
- return map()->instance_type() == FUNCTION_CONTEXT_TYPE;
+ return map().instance_type() == FUNCTION_CONTEXT_TYPE;
}
bool Context::IsCatchContext() const {
- return map()->instance_type() == CATCH_CONTEXT_TYPE;
+ return map().instance_type() == CATCH_CONTEXT_TYPE;
}
bool Context::IsWithContext() const {
- return map()->instance_type() == WITH_CONTEXT_TYPE;
+ return map().instance_type() == WITH_CONTEXT_TYPE;
}
bool Context::IsDebugEvaluateContext() const {
- return map()->instance_type() == DEBUG_EVALUATE_CONTEXT_TYPE;
+ return map().instance_type() == DEBUG_EVALUATE_CONTEXT_TYPE;
}
bool Context::IsAwaitContext() const {
- return map()->instance_type() == AWAIT_CONTEXT_TYPE;
+ return map().instance_type() == AWAIT_CONTEXT_TYPE;
}
bool Context::IsBlockContext() const {
- return map()->instance_type() == BLOCK_CONTEXT_TYPE;
+ return map().instance_type() == BLOCK_CONTEXT_TYPE;
}
bool Context::IsModuleContext() const {
- return map()->instance_type() == MODULE_CONTEXT_TYPE;
+ return map().instance_type() == MODULE_CONTEXT_TYPE;
}
bool Context::IsEvalContext() const {
- return map()->instance_type() == EVAL_CONTEXT_TYPE;
+ return map().instance_type() == EVAL_CONTEXT_TYPE;
}
bool Context::IsScriptContext() const {
- return map()->instance_type() == SCRIPT_CONTEXT_TYPE;
+ return map().instance_type() == SCRIPT_CONTEXT_TYPE;
}
bool Context::HasSameSecurityTokenAs(Context that) const {
- return this->native_context()->security_token() ==
- that->native_context()->security_token();
+ return this->native_context().security_token() ==
+ that.native_context().security_token();
}
#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
@@ -232,18 +232,18 @@ Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
if (!IsFastElementsKind(kind)) return Map();
DisallowHeapAllocation no_gc;
Object const initial_js_array_map = get(Context::ArrayMapIndex(kind));
- DCHECK(!initial_js_array_map->IsUndefined());
+ DCHECK(!initial_js_array_map.IsUndefined());
return Map::cast(initial_js_array_map);
}
MicrotaskQueue* NativeContext::microtask_queue() const {
return reinterpret_cast<MicrotaskQueue*>(
- READ_INTPTR_FIELD(*this, kMicrotaskQueueOffset));
+ ReadField<Address>(kMicrotaskQueueOffset));
}
void NativeContext::set_microtask_queue(MicrotaskQueue* microtask_queue) {
- WRITE_INTPTR_FIELD(*this, kMicrotaskQueueOffset,
- reinterpret_cast<intptr_t>(microtask_queue));
+ WriteField<Address>(kMicrotaskQueueOffset,
+ reinterpret_cast<Address>(microtask_queue));
}
OBJECT_CONSTRUCTORS_IMPL(NativeContext, Context)
@@ -253,4 +253,4 @@ OBJECT_CONSTRUCTORS_IMPL(NativeContext, Context)
#include "src/objects/object-macros-undef.h"
-#endif // V8_CONTEXTS_INL_H_
+#endif // V8_OBJECTS_CONTEXTS_INL_H_
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/objects/contexts.cc
index cd7e68a434..cddbcb98c0 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/objects/contexts.cc
@@ -2,18 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/contexts.h"
+#include "src/objects/contexts.h"
#include "src/ast/modules.h"
-#include "src/bootstrapper.h"
#include "src/debug/debug.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/init/bootstrapper.h"
#include "src/objects/module-inl.h"
namespace v8 {
namespace internal {
-
Handle<ScriptContextTable> ScriptContextTable::Extend(
Handle<ScriptContextTable> table, Handle<Context> script_context) {
Handle<ScriptContextTable> result;
@@ -40,11 +39,11 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
String name, LookupResult* result) {
DisallowHeapAllocation no_gc;
- for (int i = 0; i < table->used(); i++) {
- Context context = table->get_context(i);
- DCHECK(context->IsScriptContext());
+ for (int i = 0; i < table.used(); i++) {
+ Context context = table.get_context(i);
+ DCHECK(context.IsScriptContext());
int slot_index = ScopeInfo::ContextSlotIndex(
- context->scope_info(), name, &result->mode, &result->init_flag,
+ context.scope_info(), name, &result->mode, &result->init_flag,
&result->maybe_assigned_flag);
if (slot_index >= 0) {
@@ -62,26 +61,26 @@ bool Context::is_declaration_context() {
return true;
}
if (IsEvalContext()) {
- return scope_info()->language_mode() == LanguageMode::kStrict;
+ return scope_info().language_mode() == LanguageMode::kStrict;
}
if (!IsBlockContext()) return false;
- return scope_info()->is_declaration_scope();
+ return scope_info().is_declaration_scope();
}
Context Context::declaration_context() {
Context current = *this;
- while (!current->is_declaration_context()) {
- current = current->previous();
+ while (!current.is_declaration_context()) {
+ current = current.previous();
}
return current;
}
Context Context::closure_context() {
Context current = *this;
- while (!current->IsFunctionContext() && !current->IsScriptContext() &&
- !current->IsModuleContext() && !current->IsNativeContext() &&
- !current->IsEvalContext()) {
- current = current->previous();
+ while (!current.IsFunctionContext() && !current.IsScriptContext() &&
+ !current.IsModuleContext() && !current.IsNativeContext() &&
+ !current.IsEvalContext()) {
+ current = current.previous();
}
return current;
}
@@ -90,9 +89,9 @@ JSObject Context::extension_object() {
DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext() ||
IsEvalContext() || IsCatchContext());
HeapObject object = extension();
- if (object->IsTheHole()) return JSObject();
- DCHECK(object->IsJSContextExtensionObject() ||
- (IsNativeContext() && object->IsJSGlobalObject()));
+ if (object.IsTheHole()) return JSObject();
+ DCHECK(object.IsJSContextExtensionObject() ||
+ (IsNativeContext() && object.IsJSGlobalObject()));
return JSObject::cast(object);
}
@@ -108,30 +107,30 @@ ScopeInfo Context::scope_info() {
Module Context::module() {
Context current = *this;
- while (!current->IsModuleContext()) {
- current = current->previous();
+ while (!current.IsModuleContext()) {
+ current = current.previous();
}
- return Module::cast(current->extension());
+ return Module::cast(current.extension());
}
JSGlobalObject Context::global_object() {
- return JSGlobalObject::cast(native_context()->extension());
+ return JSGlobalObject::cast(native_context().extension());
}
Context Context::script_context() {
Context current = *this;
- while (!current->IsScriptContext()) {
- current = current->previous();
+ while (!current.IsScriptContext()) {
+ current = current.previous();
}
return current;
}
JSGlobalProxy Context::global_proxy() {
- return native_context()->global_proxy_object();
+ return native_context().global_proxy_object();
}
void Context::set_global_proxy(JSGlobalProxy object) {
- native_context()->set_global_proxy_object(object);
+ native_context().set_global_proxy_object(object);
}
/**
@@ -202,7 +201,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
// 1. Check global objects, subjects of with, and extension objects.
DCHECK_IMPLIES(context->IsEvalContext(),
- context->extension()->IsTheHole(isolate));
+ context->extension().IsTheHole(isolate));
if ((context->IsNativeContext() || context->IsWithContext() ||
context->IsFunctionContext() || context->IsBlockContext()) &&
!context->extension_receiver().is_null()) {
@@ -215,13 +214,13 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
}
// Try other script contexts.
ScriptContextTable script_contexts =
- context->global_object()->native_context()->script_context_table();
+ context->global_object().native_context().script_context_table();
ScriptContextTable::LookupResult r;
if (ScriptContextTable::Lookup(isolate, script_contexts, *name, &r)) {
- Context context = script_contexts->get_context(r.context_index);
+ Context context = script_contexts.get_context(r.context_index);
if (FLAG_trace_contexts) {
PrintF("=> found property in script context %d: %p\n",
- r.context_index, reinterpret_cast<void*>(context->ptr()));
+ r.context_index, reinterpret_cast<void*>(context.ptr()));
}
*index = r.slot_index;
*variable_mode = r.mode;
@@ -307,7 +306,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
// only the function name variable. It's conceptually (and spec-wise)
// in an outer scope of the function's declaration scope.
if (follow_context_chain && context->IsFunctionContext()) {
- int function_index = scope_info->FunctionContextSlotIndex(*name);
+ int function_index = scope_info.FunctionContextSlotIndex(*name);
if (function_index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
@@ -318,7 +317,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
*init_flag = kCreatedInitialized;
*variable_mode = VariableMode::kConst;
if (is_sloppy_function_name != nullptr &&
- is_sloppy(scope_info->language_mode())) {
+ is_sloppy(scope_info.language_mode())) {
*is_sloppy_function_name = true;
}
return context;
@@ -331,7 +330,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
int cell_index =
- scope_info->ModuleIndex(*name, &mode, &flag, &maybe_assigned_flag);
+ scope_info.ModuleIndex(*name, &mode, &flag, &maybe_assigned_flag);
if (cell_index != 0) {
if (FLAG_trace_contexts) {
PrintF("=> found in module imports or exports\n");
@@ -349,7 +348,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
} else if (context->IsDebugEvaluateContext()) {
// Check materialized locals.
Object ext = context->get(EXTENSION_INDEX);
- if (ext->IsJSReceiver()) {
+ if (ext.IsJSReceiver()) {
Handle<JSReceiver> extension(JSReceiver::cast(ext), isolate);
LookupIterator it(extension, name, extension);
Maybe<bool> found = JSReceiver::HasProperty(&it);
@@ -360,7 +359,7 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
}
// Check the original context, but do not follow its context chain.
Object obj = context->get(WRAPPED_CONTEXT_INDEX);
- if (obj->IsContext()) {
+ if (obj.IsContext()) {
Handle<Context> context(Context::cast(obj), isolate);
Handle<Object> result =
Context::Lookup(context, name, DONT_FOLLOW_CHAINS, index,
@@ -370,9 +369,9 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
// Check whitelist. Names that do not pass whitelist shall only resolve
// to with, script or native contexts up the context chain.
obj = context->get(WHITE_LIST_INDEX);
- if (obj->IsStringSet()) {
+ if (obj.IsStringSet()) {
failed_whitelist =
- failed_whitelist || !StringSet::cast(obj)->Has(isolate, name);
+ failed_whitelist || !StringSet::cast(obj).Has(isolate, name);
}
}
@@ -397,9 +396,9 @@ Handle<Object> Context::Lookup(Handle<Context> context, Handle<String> name,
void Context::AddOptimizedCode(Code code) {
DCHECK(IsNativeContext());
- DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- DCHECK(code->next_code_link()->IsUndefined());
- code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
+ DCHECK(code.kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(code.next_code_link().IsUndefined());
+ code.set_next_code_link(get(OPTIMIZED_CODE_LIST));
set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
}
@@ -459,18 +458,18 @@ bool Context::IsBootstrappingOrNativeContext(Isolate* isolate, Object object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
- isolate->bootstrapper()->IsActive() || object->IsNativeContext();
+ isolate->bootstrapper()->IsActive() || object.IsNativeContext();
}
bool Context::IsBootstrappingOrValidParentContext(Object object,
Context child) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
- if (child->GetIsolate()->bootstrapper()->IsActive()) return true;
- if (!object->IsContext()) return false;
+ if (child.GetIsolate()->bootstrapper()->IsActive()) return true;
+ if (!object.IsContext()) return false;
Context context = Context::cast(object);
- return context->IsNativeContext() || context->IsScriptContext() ||
- context->IsModuleContext() || !child->IsModuleContext();
+ return context.IsNativeContext() || context.IsScriptContext() ||
+ context.IsModuleContext() || !child.IsModuleContext();
}
#endif
@@ -483,11 +482,11 @@ void Context::ResetErrorsThrown() {
void Context::IncrementErrorsThrown() {
DCHECK(IsNativeContext());
- int previous_value = errors_thrown()->value();
+ int previous_value = errors_thrown().value();
set_errors_thrown(Smi::FromInt(previous_value + 1));
}
-int Context::GetErrorsThrown() { return errors_thrown()->value(); }
+int Context::GetErrorsThrown() { return errors_thrown().value(); }
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
STATIC_ASSERT(NativeContext::kScopeInfoOffset ==
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/objects/contexts.h
index d534441188..d83e351550 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CONTEXTS_H_
-#define V8_CONTEXTS_H_
+#ifndef V8_OBJECTS_CONTEXTS_H_
+#define V8_OBJECTS_CONTEXTS_H_
-#include "src/function-kind.h"
#include "src/objects/fixed-array.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "src/objects/function-kind.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -300,8 +300,9 @@ enum ContextLookupFlags {
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
- V(NATIVE_FUNCTION_MAP_INDEX, Map, native_function_map) \
+ V(WASM_EXPORTED_FUNCTION_MAP_INDEX, Map, wasm_exported_function_map) \
V(WASM_EXCEPTION_CONSTRUCTOR_INDEX, JSFunction, wasm_exception_constructor) \
+ V(WASM_FUNCTION_CONSTRUCTOR_INDEX, JSFunction, wasm_function_constructor) \
V(WASM_GLOBAL_CONSTRUCTOR_INDEX, JSFunction, wasm_global_constructor) \
V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
@@ -655,7 +656,7 @@ class Context : public HeapObject {
DECL_PRINTER(Context)
DECL_VERIFIER(Context)
- typedef FlexibleBodyDescriptor<kStartOfTaggedFieldsOffset> BodyDescriptor;
+ using BodyDescriptor = FlexibleBodyDescriptor<kStartOfTaggedFieldsOffset>;
private:
#ifdef DEBUG
@@ -709,11 +710,11 @@ class NativeContext : public Context {
OBJECT_CONSTRUCTORS(NativeContext, Context);
};
-typedef Context::Field ContextField;
+using ContextField = Context::Field;
} // namespace internal
} // namespace v8
#include "src/objects/object-macros-undef.h"
-#endif // V8_CONTEXTS_H_
+#endif // V8_OBJECTS_CONTEXTS_H_
diff --git a/deps/v8/src/objects/data-handler-inl.h b/deps/v8/src/objects/data-handler-inl.h
index 1be71ce8fa..f9496cc342 100644
--- a/deps/v8/src/objects/data-handler-inl.h
+++ b/deps/v8/src/objects/data-handler-inl.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_DATA_HANDLER_INL_H_
#define V8_OBJECTS_DATA_HANDLER_INL_H_
-#include "src/objects-inl.h" // Needed for write barriers
#include "src/objects/data-handler.h"
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,15 +22,15 @@ ACCESSORS(DataHandler, smi_handler, Object, kSmiHandlerOffset)
ACCESSORS(DataHandler, validity_cell, Object, kValidityCellOffset)
int DataHandler::data_field_count() const {
- return (map()->instance_size() - kSizeWithData0) / kTaggedSize;
+ return (map().instance_size() - kSizeWithData0) / kTaggedSize;
}
WEAK_ACCESSORS_CHECKED(DataHandler, data1, kData1Offset,
- map()->instance_size() >= kSizeWithData1)
+ map().instance_size() >= kSizeWithData1)
WEAK_ACCESSORS_CHECKED(DataHandler, data2, kData2Offset,
- map()->instance_size() >= kSizeWithData2)
+ map().instance_size() >= kSizeWithData2)
WEAK_ACCESSORS_CHECKED(DataHandler, data3, kData3Offset,
- map()->instance_size() >= kSizeWithData3)
+ map().instance_size() >= kSizeWithData3)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/debug-objects-inl.h b/deps/v8/src/objects/debug-objects-inl.h
index d445174cbc..273f710c3b 100644
--- a/deps/v8/src/objects/debug-objects-inl.h
+++ b/deps/v8/src/objects/debug-objects-inl.h
@@ -8,8 +8,8 @@
#include "src/objects/debug-objects.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/code-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
// Has to be the last include (doesn't have include guards):
@@ -37,7 +37,7 @@ ACCESSORS(DebugInfo, script, Object, kScriptOffset)
ACCESSORS(DebugInfo, original_bytecode_array, Object,
kOriginalBytecodeArrayOffset)
ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayOffset)
-ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateOffset)
+ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsOffset)
ACCESSORS(DebugInfo, coverage_info, Object, kCoverageInfoOffset)
BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, side_effect_state,
@@ -56,9 +56,9 @@ SMI_ACCESSORS(BreakPoint, id, kIdOffset)
ACCESSORS(BreakPoint, condition, String, kConditionOffset)
bool DebugInfo::HasInstrumentedBytecodeArray() {
- DCHECK_EQ(debug_bytecode_array()->IsBytecodeArray(),
- original_bytecode_array()->IsBytecodeArray());
- return debug_bytecode_array()->IsBytecodeArray();
+ DCHECK_EQ(debug_bytecode_array().IsBytecodeArray(),
+ original_bytecode_array().IsBytecodeArray());
+ return debug_bytecode_array().IsBytecodeArray();
}
BytecodeArray DebugInfo::OriginalBytecodeArray() {
@@ -68,7 +68,7 @@ BytecodeArray DebugInfo::OriginalBytecodeArray() {
BytecodeArray DebugInfo::DebugBytecodeArray() {
DCHECK(HasInstrumentedBytecodeArray());
- DCHECK_EQ(shared()->GetDebugBytecodeArray(), debug_bytecode_array());
+ DCHECK_EQ(shared().GetDebugBytecodeArray(), debug_bytecode_array());
return BytecodeArray::cast(debug_bytecode_array());
}
diff --git a/deps/v8/src/objects/debug-objects.cc b/deps/v8/src/objects/debug-objects.cc
index 760edbfbcf..5087918e75 100644
--- a/deps/v8/src/objects/debug-objects.cc
+++ b/deps/v8/src/objects/debug-objects.cc
@@ -5,9 +5,9 @@
#include "src/objects/debug-objects.h"
#include "src/debug/debug-evaluate.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/debug-objects-inl.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -31,7 +31,7 @@ void DebugInfo::ClearBreakInfo(Isolate* isolate) {
if (HasInstrumentedBytecodeArray()) {
// Reset function's bytecode array field to point to the original bytecode
// array.
- shared()->SetDebugBytecodeArray(OriginalBytecodeArray());
+ shared().SetDebugBytecodeArray(OriginalBytecodeArray());
// If the function is currently running on the stack, we need to update the
// bytecode pointers on the stack so they point to the original
@@ -80,19 +80,18 @@ bool DebugInfo::HasBreakPoint(Isolate* isolate, int source_position) {
// If there is no break point info object or no break points in the break
// point info object there is no break point at this code offset.
- if (break_point_info->IsUndefined(isolate)) return false;
- return BreakPointInfo::cast(break_point_info)->GetBreakPointCount(isolate) >
- 0;
+ if (break_point_info.IsUndefined(isolate)) return false;
+ return BreakPointInfo::cast(break_point_info).GetBreakPointCount(isolate) > 0;
}
// Get the break point info object for this source position.
Object DebugInfo::GetBreakPointInfo(Isolate* isolate, int source_position) {
DCHECK(HasBreakInfo());
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined(isolate)) {
+ for (int i = 0; i < break_points().length(); i++) {
+ if (!break_points().get(i).IsUndefined(isolate)) {
BreakPointInfo break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->source_position() == source_position) {
+ BreakPointInfo::cast(break_points().get(i));
+ if (break_point_info.source_position() == source_position) {
return break_point_info;
}
}
@@ -103,10 +102,10 @@ Object DebugInfo::GetBreakPointInfo(Isolate* isolate, int source_position) {
bool DebugInfo::ClearBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (debug_info->break_points()->get(i)->IsUndefined(isolate)) continue;
+ for (int i = 0; i < debug_info->break_points().length(); i++) {
+ if (debug_info->break_points().get(i).IsUndefined(isolate)) continue;
Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
- BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ BreakPointInfo::cast(debug_info->break_points().get(i)), isolate);
if (BreakPointInfo::HasBreakPoint(isolate, break_point_info, break_point)) {
BreakPointInfo::ClearBreakPoint(isolate, break_point_info, break_point);
return true;
@@ -131,8 +130,8 @@ void DebugInfo::SetBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
// break points before. Try to find a free slot.
static const int kNoBreakPointInfo = -1;
int index = kNoBreakPointInfo;
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (debug_info->break_points()->get(i)->IsUndefined(isolate)) {
+ for (int i = 0; i < debug_info->break_points().length(); i++) {
+ if (debug_info->break_points().get(i).IsUndefined(isolate)) {
index = i;
break;
}
@@ -157,7 +156,7 @@ void DebugInfo::SetBreakPoint(Isolate* isolate, Handle<DebugInfo> debug_info,
Handle<BreakPointInfo> new_break_point_info =
isolate->factory()->NewBreakPointInfo(source_position);
BreakPointInfo::SetBreakPoint(isolate, new_break_point_info, break_point);
- debug_info->break_points()->set(index, *new_break_point_info);
+ debug_info->break_points().set(index, *new_break_point_info);
}
// Get the break point objects for a source position.
@@ -165,10 +164,10 @@ Handle<Object> DebugInfo::GetBreakPoints(Isolate* isolate,
int source_position) {
DCHECK(HasBreakInfo());
Object break_point_info = GetBreakPointInfo(isolate, source_position);
- if (break_point_info->IsUndefined(isolate)) {
+ if (break_point_info.IsUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
- return Handle<Object>(BreakPointInfo::cast(break_point_info)->break_points(),
+ return Handle<Object>(BreakPointInfo::cast(break_point_info).break_points(),
isolate);
}
@@ -176,11 +175,11 @@ Handle<Object> DebugInfo::GetBreakPoints(Isolate* isolate,
int DebugInfo::GetBreakPointCount(Isolate* isolate) {
DCHECK(HasBreakInfo());
int count = 0;
- for (int i = 0; i < break_points()->length(); i++) {
- if (!break_points()->get(i)->IsUndefined(isolate)) {
+ for (int i = 0; i < break_points().length(); i++) {
+ if (!break_points().get(i).IsUndefined(isolate)) {
BreakPointInfo break_point_info =
- BreakPointInfo::cast(break_points()->get(i));
- count += break_point_info->GetBreakPointCount(isolate);
+ BreakPointInfo::cast(break_points().get(i));
+ count += break_point_info.GetBreakPointCount(isolate);
}
}
return count;
@@ -190,10 +189,10 @@ Handle<Object> DebugInfo::FindBreakPointInfo(Isolate* isolate,
Handle<DebugInfo> debug_info,
Handle<BreakPoint> break_point) {
DCHECK(debug_info->HasBreakInfo());
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined(isolate)) {
+ for (int i = 0; i < debug_info->break_points().length(); i++) {
+ if (!debug_info->break_points().get(i).IsUndefined(isolate)) {
Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
- BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ BreakPointInfo::cast(debug_info->break_points().get(i)), isolate);
if (BreakPointInfo::HasBreakPoint(isolate, break_point_info,
break_point)) {
return break_point_info;
@@ -228,7 +227,7 @@ DebugInfo::SideEffectState DebugInfo::GetSideEffectState(Isolate* isolate) {
namespace {
bool IsEqual(BreakPoint break_point1, BreakPoint break_point2) {
- return break_point1->id() == break_point2->id();
+ return break_point1.id() == break_point2.id();
}
} // namespace
@@ -237,9 +236,9 @@ void BreakPointInfo::ClearBreakPoint(Isolate* isolate,
Handle<BreakPointInfo> break_point_info,
Handle<BreakPoint> break_point) {
// If there are no break points just ignore.
- if (break_point_info->break_points()->IsUndefined(isolate)) return;
+ if (break_point_info->break_points().IsUndefined(isolate)) return;
// If there is a single break point clear it if it is the same.
- if (!break_point_info->break_points()->IsFixedArray()) {
+ if (!break_point_info->break_points().IsFixedArray()) {
if (IsEqual(BreakPoint::cast(break_point_info->break_points()),
*break_point)) {
break_point_info->set_break_points(
@@ -248,7 +247,7 @@ void BreakPointInfo::ClearBreakPoint(Isolate* isolate,
return;
}
// If there are multiple break points shrink the array
- DCHECK(break_point_info->break_points()->IsFixedArray());
+ DCHECK(break_point_info->break_points().IsFixedArray());
Handle<FixedArray> old_array = Handle<FixedArray>(
FixedArray::cast(break_point_info->break_points()), isolate);
Handle<FixedArray> new_array =
@@ -271,14 +270,14 @@ void BreakPointInfo::SetBreakPoint(Isolate* isolate,
Handle<BreakPointInfo> break_point_info,
Handle<BreakPoint> break_point) {
// If there was no break point objects before just set it.
- if (break_point_info->break_points()->IsUndefined(isolate)) {
+ if (break_point_info->break_points().IsUndefined(isolate)) {
break_point_info->set_break_points(*break_point);
return;
}
// If the break point object is the same as before just ignore.
if (break_point_info->break_points() == *break_point) return;
// If there was one break point object before replace with array.
- if (!break_point_info->break_points()->IsFixedArray()) {
+ if (!break_point_info->break_points().IsFixedArray()) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(2);
array->set(0, break_point_info->break_points());
array->set(1, *break_point);
@@ -304,18 +303,18 @@ bool BreakPointInfo::HasBreakPoint(Isolate* isolate,
Handle<BreakPointInfo> break_point_info,
Handle<BreakPoint> break_point) {
// No break point.
- if (break_point_info->break_points()->IsUndefined(isolate)) {
+ if (break_point_info->break_points().IsUndefined(isolate)) {
return false;
}
// Single break point.
- if (!break_point_info->break_points()->IsFixedArray()) {
+ if (!break_point_info->break_points().IsFixedArray()) {
return IsEqual(BreakPoint::cast(break_point_info->break_points()),
*break_point);
}
// Multiple break points.
FixedArray array = FixedArray::cast(break_point_info->break_points());
- for (int i = 0; i < array->length(); i++) {
- if (IsEqual(BreakPoint::cast(array->get(i)), *break_point)) {
+ for (int i = 0; i < array.length(); i++) {
+ if (IsEqual(BreakPoint::cast(array.get(i)), *break_point)) {
return true;
}
}
@@ -325,11 +324,11 @@ bool BreakPointInfo::HasBreakPoint(Isolate* isolate,
// Get the number of break points.
int BreakPointInfo::GetBreakPointCount(Isolate* isolate) {
// No break point.
- if (break_points()->IsUndefined(isolate)) return 0;
+ if (break_points().IsUndefined(isolate)) return 0;
// Single break point.
- if (!break_points()->IsFixedArray()) return 1;
+ if (!break_points().IsFixedArray()) return 1;
// Multiple break points.
- return FixedArray::cast(break_points())->length();
+ return FixedArray::cast(break_points()).length();
}
int CoverageInfo::SlotCount() const {
diff --git a/deps/v8/src/objects/debug-objects.h b/deps/v8/src/objects/debug-objects.h
index 9839f405f6..243caaa526 100644
--- a/deps/v8/src/objects/debug-objects.h
+++ b/deps/v8/src/objects/debug-objects.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_DEBUG_OBJECTS_H_
#define V8_OBJECTS_DEBUG_OBJECTS_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
@@ -168,21 +168,9 @@ class DebugInfo : public Struct {
DECL_PRINTER(DebugInfo)
DECL_VERIFIER(DebugInfo)
-// Layout description.
-#define DEBUG_INFO_FIELDS(V) \
- V(kSharedFunctionInfoOffset, kTaggedSize) \
- V(kDebuggerHintsOffset, kTaggedSize) \
- V(kScriptOffset, kTaggedSize) \
- V(kOriginalBytecodeArrayOffset, kTaggedSize) \
- V(kDebugBytecodeArrayOffset, kTaggedSize) \
- V(kBreakPointsStateOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- V(kCoverageInfoOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize, DEBUG_INFO_FIELDS)
-#undef DEBUG_INFO_FIELDS
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(Struct::kHeaderSize,
+ TORQUE_GENERATED_DEBUG_INFO_FIELDS)
static const int kEstimatedNofBreakPointsInFunction = 4;
@@ -247,11 +235,6 @@ class CoverageInfo : public FixedArray {
// Print debug info.
void Print(std::unique_ptr<char[]> function_name);
- private:
- static int FirstIndexForSlot(int slot_index) {
- return kFirstSlotIndex + slot_index * kSlotIndexCount;
- }
-
static const int kFirstSlotIndex = 0;
// Each slot is assigned a group of indices starting at kFirstSlotIndex.
@@ -259,7 +242,17 @@ class CoverageInfo : public FixedArray {
static const int kSlotStartSourcePositionIndex = 0;
static const int kSlotEndSourcePositionIndex = 1;
static const int kSlotBlockCountIndex = 2;
- static const int kSlotIndexCount = 3;
+ static const int kSlotPaddingIndex = 3; // Padding to make the index count 4.
+ static const int kSlotIndexCount = 4;
+
+ static const int kSlotIndexCountLog2 = 2;
+ static const int kSlotIndexCountMask = (kSlotIndexCount - 1);
+ STATIC_ASSERT(1 << kSlotIndexCountLog2 == kSlotIndexCount);
+
+ private:
+ static int FirstIndexForSlot(int slot_index) {
+ return kFirstSlotIndex + slot_index * kSlotIndexCount;
+ }
OBJECT_CONSTRUCTORS(CoverageInfo, FixedArray);
};
diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h
index a59d4e5a75..1cd64c1bf1 100644
--- a/deps/v8/src/objects/descriptor-array-inl.h
+++ b/deps/v8/src/objects/descriptor-array-inl.h
@@ -7,16 +7,16 @@
#include "src/objects/descriptor-array.h"
-#include "src/field-type.h"
+#include "src/execution/isolate.h"
+#include "src/handles/maybe-handles-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/lookup-cache-inl.h"
-#include "src/maybe-handles-inl.h"
+#include "src/objects/field-type.h"
#include "src/objects/heap-object-inl.h"
-#include "src/objects/maybe-object.h"
+#include "src/objects/lookup-cache-inl.h"
+#include "src/objects/maybe-object-inl.h"
+#include "src/objects/property.h"
#include "src/objects/struct-inl.h"
-#include "src/property.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -59,25 +59,25 @@ inline int16_t DescriptorArray::CompareAndSwapRawNumberOfMarkedDescriptors(
}
void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) {
- set_enum_cache(array->enum_cache());
+ set_enum_cache(array.enum_cache());
}
int DescriptorArray::Search(Name name, int valid_descriptors) {
- DCHECK(name->IsUniqueName());
+ DCHECK(name.IsUniqueName());
return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors,
nullptr);
}
int DescriptorArray::Search(Name name, Map map) {
- DCHECK(name->IsUniqueName());
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ DCHECK(name.IsUniqueName());
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) return kNotFound;
return Search(name, number_of_own_descriptors);
}
int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) {
- DCHECK(name->IsUniqueName());
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ DCHECK(name.IsUniqueName());
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) return kNotFound;
DescriptorLookupCache* cache = isolate->descriptor_lookup_cache();
@@ -92,7 +92,11 @@ int DescriptorArray::SearchWithCache(Isolate* isolate, Name name, Map map) {
}
ObjectSlot DescriptorArray::GetFirstPointerSlot() {
- return RawField(DescriptorArray::kPointersStartOffset);
+ static_assert(kEndOfStrongFieldsOffset == kStartOfWeakFieldsOffset,
+ "Weak and strong fields are continuous.");
+ static_assert(kEndOfWeakFieldsOffset == kHeaderSize,
+ "Weak fields extend up to the end of the header.");
+ return RawField(DescriptorArray::kStartOfStrongFieldsOffset);
}
ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
@@ -105,7 +109,7 @@ ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
ObjectSlot DescriptorArray::GetKeySlot(int descriptor) {
DCHECK_LE(descriptor, number_of_all_descriptors());
ObjectSlot slot = GetDescriptorSlot(descriptor) + kEntryKeyIndex;
- DCHECK((*slot)->IsObject());
+ DCHECK((*slot).IsObject());
return slot;
}
@@ -194,7 +198,7 @@ void DescriptorArray::Append(Descriptor* desc) {
for (insertion = descriptor_number; insertion > 0; --insertion) {
Name key = GetSortedKey(insertion - 1);
- if (key->Hash() <= hash) break;
+ if (key.Hash() <= hash) break;
SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
}
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 89350514b7..3c1fa98a37 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_DESCRIPTOR_ARRAY_H_
#define V8_OBJECTS_DESCRIPTOR_ARRAY_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -139,20 +139,9 @@ class DescriptorArray : public HeapObject {
static const int kNotFound = -1;
// Layout description.
-#define DESCRIPTOR_ARRAY_FIELDS(V) \
- V(kNumberOfAllDescriptorsOffset, kUInt16Size) \
- V(kNumberOfDescriptorsOffset, kUInt16Size) \
- V(kRawNumberOfMarkedDescriptorsOffset, kUInt16Size) \
- V(kFiller16BitsOffset, kUInt16Size) \
- V(kPointersStartOffset, 0) \
- V(kEnumCacheOffset, kTaggedSize) \
- V(kHeaderSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- DESCRIPTOR_ARRAY_FIELDS)
-#undef DESCRIPTOR_ARRAY_FIELDS
-
- STATIC_ASSERT(IsAligned(kPointersStartOffset, kTaggedSize));
+ TORQUE_GENERATED_DESCRIPTOR_ARRAY_FIELDS)
+ STATIC_ASSERT(IsAligned(kStartOfWeakFieldsOffset, kTaggedSize));
STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
// Garbage collection support.
@@ -174,7 +163,13 @@ class DescriptorArray : public HeapObject {
inline ObjectSlot GetKeySlot(int descriptor);
inline MaybeObjectSlot GetValueSlot(int descriptor);
- using BodyDescriptor = FlexibleWeakBodyDescriptor<kPointersStartOffset>;
+ static_assert(kEndOfStrongFieldsOffset == kStartOfWeakFieldsOffset,
+ "Weak fields follow strong fields.");
+ static_assert(kEndOfWeakFieldsOffset == kHeaderSize,
+ "Weak fields extend up to the end of the header.");
+ // We use this visitor to also visitor to also visit the enum_cache, which is
+ // the only tagged field in the header, and placed at the end of the header.
+ using BodyDescriptor = FlexibleWeakBodyDescriptor<kStartOfStrongFieldsOffset>;
// Layout of descriptor.
// Naming is consistent with Dictionary classes for easy templating.
diff --git a/deps/v8/src/objects/dictionary-inl.h b/deps/v8/src/objects/dictionary-inl.h
index caacde21fa..a1692978f3 100644
--- a/deps/v8/src/objects/dictionary-inl.h
+++ b/deps/v8/src/objects/dictionary-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/dictionary.h"
-#include "src/hash-seed-inl.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/property-cell-inl.h"
@@ -53,14 +53,14 @@ SimpleNumberDictionary::SimpleNumberDictionary(Address ptr)
bool NumberDictionary::requires_slow_elements() {
Object max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return false;
+ if (!max_index_object.IsSmi()) return false;
return 0 != (Smi::ToInt(max_index_object) & kRequiresSlowElementsMask);
}
uint32_t NumberDictionary::max_number_key() {
DCHECK(!requires_slow_elements());
Object max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi()) return 0;
+ if (!max_index_object.IsSmi()) return 0;
uint32_t value = static_cast<uint32_t>(Smi::ToInt(max_index_object));
return value >> kRequiresSlowElementsTagSize;
}
@@ -73,7 +73,7 @@ template <typename Derived, typename Shape>
void Dictionary<Derived, Shape>::ClearEntry(Isolate* isolate, int entry) {
Object the_hole = this->GetReadOnlyRoots().the_hole_value();
PropertyDetails details = PropertyDetails::Empty();
- Derived::cast(*this)->SetEntry(isolate, entry, the_hole, the_hole, details);
+ Derived::cast(*this).SetEntry(isolate, entry, the_hole, the_hole, details);
}
template <typename Derived, typename Shape>
@@ -81,7 +81,7 @@ void Dictionary<Derived, Shape>::SetEntry(Isolate* isolate, int entry,
Object key, Object value,
PropertyDetails details) {
DCHECK(Dictionary::kEntrySize == 2 || Dictionary::kEntrySize == 3);
- DCHECK(!key->IsName() || details.dictionary_index() > 0);
+ DCHECK(!key.IsName() || details.dictionary_index() > 0);
int index = DerivedHashTable::EntryToIndex(entry);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = this->GetWriteBarrierMode(no_gc);
@@ -91,7 +91,7 @@ void Dictionary<Derived, Shape>::SetEntry(Isolate* isolate, int entry,
}
Object GlobalDictionaryShape::Unwrap(Object object) {
- return PropertyCell::cast(object)->name();
+ return PropertyCell::cast(object).name();
}
RootIndex GlobalDictionaryShape::GetMapRootIndex() {
@@ -105,7 +105,7 @@ RootIndex NameDictionaryShape::GetMapRootIndex() {
}
PropertyCell GlobalDictionary::CellAt(int entry) {
- DCHECK(KeyAt(entry)->IsPropertyCell());
+ DCHECK(KeyAt(entry).IsPropertyCell());
return PropertyCell::cast(KeyAt(entry));
}
@@ -115,15 +115,15 @@ bool GlobalDictionaryShape::IsLive(ReadOnlyRoots roots, Object k) {
}
bool GlobalDictionaryShape::IsKey(ReadOnlyRoots roots, Object k) {
- return IsLive(roots, k) && !PropertyCell::cast(k)->value()->IsTheHole(roots);
+ return IsLive(roots, k) && !PropertyCell::cast(k).value().IsTheHole(roots);
}
-Name GlobalDictionary::NameAt(int entry) { return CellAt(entry)->name(); }
-Object GlobalDictionary::ValueAt(int entry) { return CellAt(entry)->value(); }
+Name GlobalDictionary::NameAt(int entry) { return CellAt(entry).name(); }
+Object GlobalDictionary::ValueAt(int entry) { return CellAt(entry).value(); }
void GlobalDictionary::SetEntry(Isolate* isolate, int entry, Object key,
Object value, PropertyDetails details) {
- DCHECK_EQ(key, PropertyCell::cast(value)->name());
+ DCHECK_EQ(key, PropertyCell::cast(value).name());
set(EntryToIndex(entry) + kEntryKeyIndex, value);
DetailsAtPut(isolate, entry, details);
}
@@ -133,8 +133,8 @@ void GlobalDictionary::ValueAtPut(int entry, Object value) {
}
bool NumberDictionaryBaseShape::IsMatch(uint32_t key, Object other) {
- DCHECK(other->IsNumber());
- return key == static_cast<uint32_t>(other->Number());
+ DCHECK(other.IsNumber());
+ return key == static_cast<uint32_t>(other.Number());
}
uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
@@ -143,8 +143,8 @@ uint32_t NumberDictionaryBaseShape::Hash(Isolate* isolate, uint32_t key) {
uint32_t NumberDictionaryBaseShape::HashForObject(ReadOnlyRoots roots,
Object other) {
- DCHECK(other->IsNumber());
- return ComputeSeededHash(static_cast<uint32_t>(other->Number()),
+ DCHECK(other.IsNumber());
+ return ComputeSeededHash(static_cast<uint32_t>(other.Number()),
HashSeed(roots));
}
@@ -162,7 +162,7 @@ RootIndex SimpleNumberDictionaryShape::GetMapRootIndex() {
}
bool NameDictionaryShape::IsMatch(Handle<Name> key, Object other) {
- DCHECK(other->IsTheHole() || Name::cast(other)->IsUniqueName());
+ DCHECK(other.IsTheHole() || Name::cast(other).IsUniqueName());
DCHECK(key->IsUniqueName());
return *key == other;
}
@@ -172,17 +172,17 @@ uint32_t NameDictionaryShape::Hash(Isolate* isolate, Handle<Name> key) {
}
uint32_t NameDictionaryShape::HashForObject(ReadOnlyRoots roots, Object other) {
- return Name::cast(other)->Hash();
+ return Name::cast(other).Hash();
}
bool GlobalDictionaryShape::IsMatch(Handle<Name> key, Object other) {
- DCHECK(PropertyCell::cast(other)->name()->IsUniqueName());
- return *key == PropertyCell::cast(other)->name();
+ DCHECK(PropertyCell::cast(other).name().IsUniqueName());
+ return *key == PropertyCell::cast(other).name();
}
uint32_t GlobalDictionaryShape::HashForObject(ReadOnlyRoots roots,
Object other) {
- return PropertyCell::cast(other)->name()->Hash();
+ return PropertyCell::cast(other).name().Hash();
}
Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
@@ -194,19 +194,19 @@ Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
template <typename Dictionary>
PropertyDetails GlobalDictionaryShape::DetailsAt(Dictionary dict, int entry) {
DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
- return dict->CellAt(entry)->property_details();
+ return dict.CellAt(entry).property_details();
}
template <typename Dictionary>
void GlobalDictionaryShape::DetailsAtPut(Isolate* isolate, Dictionary dict,
int entry, PropertyDetails value) {
DCHECK_LE(0, entry); // Not found is -1, which is not caught by get().
- PropertyCell cell = dict->CellAt(entry);
- if (cell->property_details().IsReadOnly() != value.IsReadOnly()) {
- cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ PropertyCell cell = dict.CellAt(entry);
+ if (cell.property_details().IsReadOnly() != value.IsReadOnly()) {
+ cell.dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
}
- cell->set_property_details(value);
+ cell.set_property_details(value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 0bce08393f..ca709f34d8 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -6,11 +6,11 @@
#define V8_OBJECTS_DICTIONARY_H_
#include "src/base/export-template.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/hash-table.h"
#include "src/objects/property-array.h"
#include "src/objects/smi.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -99,16 +99,16 @@ class BaseDictionaryShape : public BaseShape<Key> {
static inline PropertyDetails DetailsAt(Dictionary dict, int entry) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
DCHECK_GE(entry, 0); // Not found is -1, which is not caught by get().
- return PropertyDetails(Smi::cast(dict->get(
- Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex)));
+ return PropertyDetails(Smi::cast(dict.get(Dictionary::EntryToIndex(entry) +
+ Dictionary::kEntryDetailsIndex)));
}
template <typename Dictionary>
static inline void DetailsAtPut(Isolate* isolate, Dictionary dict, int entry,
PropertyDetails value) {
STATIC_ASSERT(Dictionary::kEntrySize == 3);
- dict->set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex,
- value.AsSmi());
+ dict.set(Dictionary::EntryToIndex(entry) + Dictionary::kEntryDetailsIndex,
+ value.AsSmi());
}
};
@@ -341,10 +341,6 @@ class NumberDictionary
static const int kMaxNumberKeyIndex = kPrefixStartIndex;
void UpdateMaxNumberKey(uint32_t key, Handle<JSObject> dictionary_holder);
- // Returns true if the dictionary contains any elements that are non-writable,
- // non-configurable, non-enumerable, or have getters/setters.
- bool HasComplexElements();
-
// Sorting support
void CopyValuesTo(FixedArray elements);
diff --git a/deps/v8/src/elements-inl.h b/deps/v8/src/objects/elements-inl.h
index a52ccf66ba..c4f2e2bf78 100644
--- a/deps/v8/src/elements-inl.h
+++ b/deps/v8/src/objects/elements-inl.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ELEMENTS_INL_H_
-#define V8_ELEMENTS_INL_H_
+#ifndef V8_OBJECTS_ELEMENTS_INL_H_
+#define V8_OBJECTS_ELEMENTS_INL_H_
-#include "src/elements.h"
+#include "src/objects/elements.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -29,10 +29,10 @@ inline MaybeHandle<FixedArray> ElementsAccessor::PrependElementIndices(
inline bool ElementsAccessor::HasElement(JSObject holder, uint32_t index,
PropertyFilter filter) {
- return HasElement(holder, index, holder->elements(), filter);
+ return HasElement(holder, index, holder.elements(), filter);
}
} // namespace internal
} // namespace v8
-#endif // V8_ELEMENTS_INL_H_
+#endif // V8_OBJECTS_ELEMENTS_INL_H_
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/objects/elements-kind.cc
index 40e97b59ad..a819caf459 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/objects/elements-kind.cc
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/elements-kind.h"
+#include "src/objects/elements-kind.h"
#include "src/base/lazy-instance.h"
-#include "src/elements.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/objects/elements.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
-
int ElementsKindToShiftSize(ElementsKind elements_kind) {
switch (elements_kind) {
case UINT8_ELEMENTS:
@@ -38,6 +37,8 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case PACKED_SEALED_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -57,17 +58,55 @@ int ElementsKindToByteSize(ElementsKind elements_kind) {
int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
+ if (IsTypedArrayElementsKind(elements_kind)) {
return 0;
} else {
return FixedArray::kHeaderSize - kHeapObjectTag;
}
}
-
const char* ElementsKindToString(ElementsKind kind) {
- ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
- return accessor->name();
+ switch (kind) {
+ case PACKED_SMI_ELEMENTS:
+ return "PACKED_SMI_ELEMENTS";
+ case HOLEY_SMI_ELEMENTS:
+ return "HOLEY_SMI_ELEMENTS";
+ case PACKED_ELEMENTS:
+ return "PACKED_ELEMENTS";
+ case HOLEY_ELEMENTS:
+ return "HOLEY_ELEMENTS";
+ case PACKED_DOUBLE_ELEMENTS:
+ return "PACKED_DOUBLE_ELEMENTS";
+ case HOLEY_DOUBLE_ELEMENTS:
+ return "HOLEY_DOUBLE_ELEMENTS";
+ case PACKED_SEALED_ELEMENTS:
+ return "PACKED_SEALED_ELEMENTS";
+ case HOLEY_SEALED_ELEMENTS:
+ return "HOLEY_SEALED_ELEMENTS";
+ case PACKED_FROZEN_ELEMENTS:
+ return "PACKED_FROZEN_ELEMENTS";
+ case HOLEY_FROZEN_ELEMENTS:
+ return "HOLEY_FROZEN_ELEMENTS";
+ case DICTIONARY_ELEMENTS:
+ return "DICTIONARY_ELEMENTS";
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ return "FAST_SLOPPY_ARGUMENTS_ELEMENTS";
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ return "SLOW_SLOPPY_ARGUMENTS_ELEMENTS";
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ return "FAST_STRING_WRAPPER_ELEMENTS";
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ return "SLOW_STRING_WRAPPER_ELEMENTS";
+
+#define PRINT_NAME(Type, type, TYPE, _) \
+ case TYPE##_ELEMENTS: \
+ return #TYPE "ELEMENTS";
+
+ TYPED_ARRAYS(PRINT_NAME);
+#undef PRINT_NAME
+ case NO_ELEMENTS:
+ return "NO_ELEMENTS";
+ }
}
ElementsKind kFastElementsKindSequence[kFastElementsKindCount] = {
@@ -88,8 +127,7 @@ STATIC_ASSERT(PACKED_ELEMENTS + kFastElementsKindPackedToHoley ==
HOLEY_ELEMENTS);
ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
- DCHECK(sequence_number >= 0 &&
- sequence_number < kFastElementsKindCount);
+ DCHECK(sequence_number >= 0 && sequence_number < kFastElementsKindCount);
return kFastElementsKindSequence[sequence_number];
}
@@ -102,24 +140,22 @@ int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind) {
UNREACHABLE();
}
-
ElementsKind GetNextTransitionElementsKind(ElementsKind kind) {
int index = GetSequenceIndexFromFastElementsKind(kind);
return GetFastElementsKindFromSequenceIndex(index + 1);
}
-
static inline bool IsFastTransitionTarget(ElementsKind elements_kind) {
return IsFastElementsKind(elements_kind) ||
- elements_kind == DICTIONARY_ELEMENTS;
+ elements_kind == DICTIONARY_ELEMENTS;
}
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind) {
if (!IsFastElementsKind(from_kind)) return false;
if (!IsFastTransitionTarget(to_kind)) return false;
- DCHECK(!IsFixedTypedArrayElementsKind(from_kind));
- DCHECK(!IsFixedTypedArrayElementsKind(to_kind));
+ DCHECK(!IsTypedArrayElementsKind(from_kind));
+ DCHECK(!IsTypedArrayElementsKind(to_kind));
switch (from_kind) {
case PACKED_SMI_ELEMENTS:
return to_kind != PACKED_SMI_ELEMENTS;
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/objects/elements-kind.h
index 511bb0b0f4..3ed6ea66ec 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/objects/elements-kind.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ELEMENTS_KIND_H_
-#define V8_ELEMENTS_KIND_H_
+#ifndef V8_OBJECTS_ELEMENTS_KIND_H_
+#define V8_OBJECTS_ELEMENTS_KIND_H_
#include "src/base/macros.h"
-#include "src/checks.h"
-#include "src/flags.h"
-#include "src/utils.h"
+#include "src/common/checks.h"
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -43,9 +43,13 @@ enum ElementsKind : uint8_t {
PACKED_DOUBLE_ELEMENTS,
HOLEY_DOUBLE_ELEMENTS,
- // The sealed, frozen kind for packed elements.
+ // The sealed kind for elements.
PACKED_SEALED_ELEMENTS,
+ HOLEY_SEALED_ELEMENTS,
+
+ // The frozen kind for elements.
PACKED_FROZEN_ELEMENTS,
+ HOLEY_FROZEN_ELEMENTS,
// The "slow" kind.
DICTIONARY_ELEMENTS,
@@ -75,7 +79,7 @@ enum ElementsKind : uint8_t {
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = BIGINT64_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS,
- LAST_FROZEN_ELEMENTS_KIND = PACKED_FROZEN_ELEMENTS,
+ LAST_FROZEN_ELEMENTS_KIND = HOLEY_FROZEN_ELEMENTS,
// Alias for kSystemPointerSize-sized elements
#ifdef V8_COMPRESS_POINTERS
@@ -119,14 +123,13 @@ inline bool IsStringWrapperElementsKind(ElementsKind kind) {
SLOW_STRING_WRAPPER_ELEMENTS);
}
-inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
+inline bool IsTypedArrayElementsKind(ElementsKind kind) {
return IsInRange(kind, FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
}
inline bool IsTerminalElementsKind(ElementsKind kind) {
- return kind == TERMINAL_FAST_ELEMENTS_KIND ||
- IsFixedTypedArrayElementsKind(kind);
+ return kind == TERMINAL_FAST_ELEMENTS_KIND || IsTypedArrayElementsKind(kind);
}
inline bool IsFastElementsKind(ElementsKind kind) {
@@ -135,7 +138,7 @@ inline bool IsFastElementsKind(ElementsKind kind) {
}
inline bool IsTransitionElementsKind(ElementsKind kind) {
- return IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind) ||
+ return IsFastElementsKind(kind) || IsTypedArrayElementsKind(kind) ||
kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS ||
kind == FAST_STRING_WRAPPER_ELEMENTS;
}
@@ -144,38 +147,39 @@ inline bool IsDoubleElementsKind(ElementsKind kind) {
return IsInRange(kind, PACKED_DOUBLE_ELEMENTS, HOLEY_DOUBLE_ELEMENTS);
}
-
inline bool IsFixedFloatElementsKind(ElementsKind kind) {
return kind == FLOAT32_ELEMENTS || kind == FLOAT64_ELEMENTS;
}
-
inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
return IsDoubleElementsKind(kind) || IsFixedFloatElementsKind(kind);
}
-inline bool IsPackedFrozenOrSealedElementsKind(ElementsKind kind) {
- DCHECK_IMPLIES(
- IsInRange(kind, PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS),
- FLAG_enable_sealed_frozen_elements_kind);
- return IsInRange(kind, PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS);
+// This predicate is used for disabling respective functionality in builtins.
+inline bool IsFrozenOrSealedElementsKindUnchecked(ElementsKind kind) {
+ return IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_FROZEN_ELEMENTS);
+}
+
+inline bool IsFrozenOrSealedElementsKind(ElementsKind kind) {
+ DCHECK_IMPLIES(IsFrozenOrSealedElementsKindUnchecked(kind),
+ FLAG_enable_sealed_frozen_elements_kind);
+ return IsFrozenOrSealedElementsKindUnchecked(kind);
}
inline bool IsSealedElementsKind(ElementsKind kind) {
- DCHECK_IMPLIES(kind == PACKED_SEALED_ELEMENTS,
+ DCHECK_IMPLIES(IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS),
FLAG_enable_sealed_frozen_elements_kind);
- return kind == PACKED_SEALED_ELEMENTS;
+ return IsInRange(kind, PACKED_SEALED_ELEMENTS, HOLEY_SEALED_ELEMENTS);
}
inline bool IsFrozenElementsKind(ElementsKind kind) {
- DCHECK_IMPLIES(kind == PACKED_FROZEN_ELEMENTS,
+ DCHECK_IMPLIES(IsInRange(kind, PACKED_FROZEN_ELEMENTS, HOLEY_FROZEN_ELEMENTS),
FLAG_enable_sealed_frozen_elements_kind);
- return kind == PACKED_FROZEN_ELEMENTS;
+ return IsInRange(kind, PACKED_FROZEN_ELEMENTS, HOLEY_FROZEN_ELEMENTS);
}
inline bool IsSmiOrObjectElementsKind(ElementsKind kind) {
- return kind == PACKED_SMI_ELEMENTS || kind == HOLEY_SMI_ELEMENTS ||
- kind == PACKED_ELEMENTS || kind == HOLEY_ELEMENTS;
+ return IsInRange(kind, PACKED_SMI_ELEMENTS, HOLEY_ELEMENTS);
}
inline bool IsSmiElementsKind(ElementsKind kind) {
@@ -190,22 +194,28 @@ inline bool IsObjectElementsKind(ElementsKind kind) {
return IsInRange(kind, PACKED_ELEMENTS, HOLEY_ELEMENTS);
}
+inline bool IsHoleyFrozenOrSealedElementsKind(ElementsKind kind) {
+ DCHECK_IMPLIES(kind == HOLEY_SEALED_ELEMENTS || kind == HOLEY_FROZEN_ELEMENTS,
+ FLAG_enable_sealed_frozen_elements_kind);
+ return kind == HOLEY_SEALED_ELEMENTS || kind == HOLEY_FROZEN_ELEMENTS;
+}
+
inline bool IsHoleyElementsKind(ElementsKind kind) {
- return kind == HOLEY_SMI_ELEMENTS || kind == HOLEY_DOUBLE_ELEMENTS ||
- kind == HOLEY_ELEMENTS;
+ return kind % 2 == 1 && kind <= HOLEY_DOUBLE_ELEMENTS;
}
-inline bool IsHoleyOrDictionaryElementsKind(ElementsKind kind) {
- return IsHoleyElementsKind(kind) || kind == DICTIONARY_ELEMENTS;
+inline bool IsHoleyElementsKindForRead(ElementsKind kind) {
+ return kind % 2 == 1 && kind <= HOLEY_FROZEN_ELEMENTS;
}
+inline bool IsHoleyOrDictionaryElementsKind(ElementsKind kind) {
+ return IsHoleyElementsKindForRead(kind) || kind == DICTIONARY_ELEMENTS;
+}
inline bool IsFastPackedElementsKind(ElementsKind kind) {
- return kind == PACKED_SMI_ELEMENTS || kind == PACKED_DOUBLE_ELEMENTS ||
- kind == PACKED_ELEMENTS;
+ return kind % 2 == 0 && kind <= PACKED_DOUBLE_ELEMENTS;
}
-
inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
if (holey_kind == HOLEY_SMI_ELEMENTS) {
return PACKED_SMI_ELEMENTS;
@@ -219,7 +229,6 @@ inline ElementsKind GetPackedElementsKind(ElementsKind holey_kind) {
return holey_kind;
}
-
inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
if (packed_kind == PACKED_SMI_ELEMENTS) {
return HOLEY_SMI_ELEMENTS;
@@ -278,18 +287,15 @@ inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
return (from_kind == PACKED_SMI_ELEMENTS) ? PACKED_ELEMENTS : HOLEY_ELEMENTS;
}
-
inline bool IsSimpleMapChangeTransition(ElementsKind from_kind,
ElementsKind to_kind) {
return (GetHoleyElementsKind(from_kind) == to_kind) ||
(IsSmiElementsKind(from_kind) && IsObjectElementsKind(to_kind));
}
-
bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind);
-
inline ElementsKind GetMoreGeneralElementsKind(ElementsKind from_kind,
ElementsKind to_kind) {
if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
@@ -298,7 +304,6 @@ inline ElementsKind GetMoreGeneralElementsKind(ElementsKind from_kind,
return from_kind;
}
-
inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
return IsFastElementsKind(from_kind) &&
from_kind != TERMINAL_FAST_ELEMENTS_KIND;
@@ -309,4 +314,4 @@ inline bool ElementsKindEqual(ElementsKind a, ElementsKind b) { return a == b; }
} // namespace internal
} // namespace v8
-#endif // V8_ELEMENTS_KIND_H_
+#endif // V8_OBJECTS_ELEMENTS_KIND_H_
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/objects/elements.cc
index 5262cb7a60..e1232a0d5b 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/objects/elements.cc
@@ -2,25 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/elements.h"
+#include "src/objects/elements.h"
-#include "src/arguments.h"
-#include "src/conversions.h"
-#include "src/frames.h"
+#include "src/execution/arguments.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For MaxNumberToStringCacheSize.
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/isolate-inl.h"
-#include "src/keys.h"
-#include "src/message-template.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/keys.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/slots.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -33,24 +33,28 @@
// - FastPackedSmiElementsAccessor
// - FastHoleySmiElementsAccessor
// - FastPackedObjectElementsAccessor
-// - FastPackedFrozenObjectElementsAccessor
-// - FastPackedSealedObjectElementsAccessor
+// - FastSealedObjectElementsAccessor: template
+// - FastPackedSealedObjectElementsAccessor
+// - FastHoleySealedObjectElementsAccessor
+// - FastFrozenObjectElementsAccessor: template
+// - FastPackedFrozenObjectElementsAccessor
+// - FastHoleyFrozenObjectElementsAccessor
// - FastHoleyObjectElementsAccessor
// - FastDoubleElementsAccessor
// - FastPackedDoubleElementsAccessor
// - FastHoleyDoubleElementsAccessor
// - TypedElementsAccessor: template, with instantiations:
-// - FixedUint8ElementsAccessor
-// - FixedInt8ElementsAccessor
-// - FixedUint16ElementsAccessor
-// - FixedInt16ElementsAccessor
-// - FixedUint32ElementsAccessor
-// - FixedInt32ElementsAccessor
-// - FixedFloat32ElementsAccessor
-// - FixedFloat64ElementsAccessor
-// - FixedUint8ClampedElementsAccessor
-// - FixedBigUint64ElementsAccessor
-// - FixedBigInt64ElementsAccessor
+// - Uint8ElementsAccessor
+// - Int8ElementsAccessor
+// - Uint16ElementsAccessor
+// - Int16ElementsAccessor
+// - Uint32ElementsAccessor
+// - Int32ElementsAccessor
+// - Float32ElementsAccessor
+// - Float64ElementsAccessor
+// - Uint8ClampedElementsAccessor
+// - BigUint64ElementsAccessor
+// - BigInt64ElementsAccessor
// - DictionaryElementsAccessor
// - SloppyArgumentsElementsAccessor
// - FastSloppyArgumentsElementsAccessor
@@ -62,20 +66,12 @@
namespace v8 {
namespace internal {
-// Explicit instantiation declarations.
-extern template void JSObject::ApplyAttributesToDictionary(
- Isolate* isolate, ReadOnlyRoots roots, Handle<NumberDictionary> dictionary,
- const PropertyAttributes attributes);
-
-
namespace {
-
static const int kPackedSizeNotKnown = -1;
enum Where { AT_START, AT_END };
-
// First argument in list is the accessor class, the second argument is the
// accessor ElementsKind, and the third is the backing store class. Use the
// fast element handler for smi-only arrays. The implementation is currently
@@ -91,8 +87,10 @@ enum Where { AT_START, AT_END };
V(FastHoleyDoubleElementsAccessor, HOLEY_DOUBLE_ELEMENTS, FixedDoubleArray) \
V(FastPackedSealedObjectElementsAccessor, PACKED_SEALED_ELEMENTS, \
FixedArray) \
+ V(FastHoleySealedObjectElementsAccessor, HOLEY_SEALED_ELEMENTS, FixedArray) \
V(FastPackedFrozenObjectElementsAccessor, PACKED_FROZEN_ELEMENTS, \
FixedArray) \
+ V(FastHoleyFrozenObjectElementsAccessor, HOLEY_FROZEN_ELEMENTS, FixedArray) \
V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, NumberDictionary) \
V(FastSloppyArgumentsElementsAccessor, FAST_SLOPPY_ARGUMENTS_ELEMENTS, \
FixedArray) \
@@ -102,22 +100,22 @@ enum Where { AT_START, AT_END };
FixedArray) \
V(SlowStringWrapperElementsAccessor, SLOW_STRING_WRAPPER_ELEMENTS, \
FixedArray) \
- V(FixedUint8ElementsAccessor, UINT8_ELEMENTS, FixedUint8Array) \
- V(FixedInt8ElementsAccessor, INT8_ELEMENTS, FixedInt8Array) \
- V(FixedUint16ElementsAccessor, UINT16_ELEMENTS, FixedUint16Array) \
- V(FixedInt16ElementsAccessor, INT16_ELEMENTS, FixedInt16Array) \
- V(FixedUint32ElementsAccessor, UINT32_ELEMENTS, FixedUint32Array) \
- V(FixedInt32ElementsAccessor, INT32_ELEMENTS, FixedInt32Array) \
- V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \
- V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \
- V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
- FixedUint8ClampedArray) \
- V(FixedBigUint64ElementsAccessor, BIGUINT64_ELEMENTS, FixedBigUint64Array) \
- V(FixedBigInt64ElementsAccessor, BIGINT64_ELEMENTS, FixedBigInt64Array)
-
-template<ElementsKind Kind> class ElementsKindTraits {
+ V(Uint8ElementsAccessor, UINT8_ELEMENTS, ByteArray) \
+ V(Int8ElementsAccessor, INT8_ELEMENTS, ByteArray) \
+ V(Uint16ElementsAccessor, UINT16_ELEMENTS, ByteArray) \
+ V(Int16ElementsAccessor, INT16_ELEMENTS, ByteArray) \
+ V(Uint32ElementsAccessor, UINT32_ELEMENTS, ByteArray) \
+ V(Int32ElementsAccessor, INT32_ELEMENTS, ByteArray) \
+ V(Float32ElementsAccessor, FLOAT32_ELEMENTS, ByteArray) \
+ V(Float64ElementsAccessor, FLOAT64_ELEMENTS, ByteArray) \
+ V(Uint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, ByteArray) \
+ V(BigUint64ElementsAccessor, BIGUINT64_ELEMENTS, ByteArray) \
+ V(BigInt64ElementsAccessor, BIGINT64_ELEMENTS, ByteArray)
+
+template <ElementsKind Kind>
+class ElementsKindTraits {
public:
- typedef FixedArrayBase BackingStore;
+ using BackingStore = FixedArrayBase;
};
#define ELEMENTS_TRAITS(Class, KindParam, Store) \
@@ -125,7 +123,7 @@ template<ElementsKind Kind> class ElementsKindTraits {
class ElementsKindTraits<KindParam> { \
public: /* NOLINT */ \
static constexpr ElementsKind Kind = KindParam; \
- typedef Store BackingStore; \
+ using BackingStore = Store; \
}; \
constexpr ElementsKind ElementsKindTraits<KindParam>::Kind;
ELEMENTS_LIST(ELEMENTS_TRAITS)
@@ -148,25 +146,25 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
FixedArrayBase to_base, ElementsKind to_kind,
uint32_t to_start, int raw_copy_size) {
ReadOnlyRoots roots(isolate);
- DCHECK(to_base->map() != roots.fixed_cow_array_map());
+ DCHECK(to_base.map() != roots.fixed_cow_array_map());
DisallowHeapAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from_base->length() - from_start,
- to_base->length() - to_start);
+ copy_size =
+ Min(from_base.length() - from_start, to_base.length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
int start = to_start + copy_size;
- int length = to_base->length() - start;
+ int length = to_base.length() - start;
if (length > 0) {
- MemsetTagged(FixedArray::cast(to_base)->RawFieldOfElementAt(start),
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
roots.the_hole_value(), length);
}
}
}
- DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
if (copy_size == 0) return;
FixedArray from = FixedArray::cast(from_base);
FixedArray to = FixedArray::cast(to_base);
@@ -177,8 +175,8 @@ void CopyObjectToObjectElements(Isolate* isolate, FixedArrayBase from_base,
(IsObjectElementsKind(from_kind) && IsObjectElementsKind(to_kind))
? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER;
- to->CopyElements(isolate->heap(), to_start, from, from_start, copy_size,
- write_barrier_mode);
+ to.CopyElements(isolate, to_start, from, from_start, copy_size,
+ write_barrier_mode);
}
static void CopyDictionaryToObjectElements(
@@ -191,12 +189,12 @@ static void CopyDictionaryToObjectElements(
if (raw_copy_size < 0) {
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->max_number_key() + 1 - from_start;
+ copy_size = from.max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
int start = to_start + copy_size;
- int length = to_base->length() - start;
+ int length = to_base.length() - start;
if (length > 0) {
- MemsetTagged(FixedArray::cast(to_base)->RawFieldOfElementAt(start),
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
@@ -205,19 +203,19 @@ static void CopyDictionaryToObjectElements(
DCHECK(IsSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
FixedArray to = FixedArray::cast(to_base);
- uint32_t to_length = to->length();
+ uint32_t to_length = to.length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
}
WriteBarrierMode write_barrier_mode = GetWriteBarrierMode(to_kind);
for (int i = 0; i < copy_size; i++) {
- int entry = from->FindEntry(isolate, i + from_start);
+ int entry = from.FindEntry(isolate, i + from_start);
if (entry != NumberDictionary::kNotFound) {
- Object value = from->ValueAt(entry);
- DCHECK(!value->IsTheHole(isolate));
- to->set(i + to_start, value, write_barrier_mode);
+ Object value = from.ValueAt(entry);
+ DCHECK(!value.IsTheHole(isolate));
+ to.set(i + to_start, value, write_barrier_mode);
} else {
- to->set_the_hole(isolate, i + to_start);
+ to.set_the_hole(isolate, i + to_start);
}
}
}
@@ -235,23 +233,23 @@ static void CopyDoubleToObjectElements(Isolate* isolate,
DisallowHeapAllocation no_allocation;
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from_base->length() - from_start,
- to_base->length() - to_start);
+ copy_size =
+ Min(from_base.length() - from_start, to_base.length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
// Also initialize the area that will be copied over since HeapNumber
// allocation below can cause an incremental marking step, requiring all
// existing heap objects to be propertly initialized.
int start = to_start;
- int length = to_base->length() - start;
+ int length = to_base.length() - start;
if (length > 0) {
- MemsetTagged(FixedArray::cast(to_base)->RawFieldOfElementAt(start),
+ MemsetTagged(FixedArray::cast(to_base).RawFieldOfElementAt(start),
ReadOnlyRoots(isolate).the_hole_value(), length);
}
}
}
- DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
if (copy_size == 0) return;
// From here on, the code below could actually allocate. Therefore the raw
@@ -283,21 +281,21 @@ static void CopyDoubleToDoubleElements(FixedArrayBase from_base,
if (raw_copy_size < 0) {
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from_base->length() - from_start,
- to_base->length() - to_start);
+ copy_size =
+ Min(from_base.length() - from_start, to_base.length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
}
- DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
if (copy_size == 0) return;
FixedDoubleArray from = FixedDoubleArray::cast(from_base);
FixedDoubleArray to = FixedDoubleArray::cast(to_base);
- Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
- Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
+ Address to_address = to.address() + FixedDoubleArray::kHeaderSize;
+ Address from_address = from.address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
from_address += kDoubleSize * from_start;
#ifdef V8_COMPRESS_POINTERS
@@ -322,26 +320,26 @@ static void CopySmiToDoubleElements(FixedArrayBase from_base,
if (raw_copy_size < 0) {
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from_base->length() - from_start;
+ copy_size = from_base.length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
}
- DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
if (copy_size == 0) return;
FixedArray from = FixedArray::cast(from_base);
FixedDoubleArray to = FixedDoubleArray::cast(to_base);
- Object the_hole = from->GetReadOnlyRoots().the_hole_value();
+ Object the_hole = from.GetReadOnlyRoots().the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
- Object hole_or_smi = from->get(from_start);
+ Object hole_or_smi = from.get(from_start);
if (hole_or_smi == the_hole) {
- to->set_the_hole(to_start);
+ to.set_the_hole(to_start);
} else {
- to->set(to_start, Smi::ToInt(hole_or_smi));
+ to.set(to_start, Smi::ToInt(hole_or_smi));
}
}
}
@@ -359,9 +357,9 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = packed_size - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- to_end = to_base->length();
+ to_end = to_base.length();
for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
@@ -369,18 +367,18 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase from_base,
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
- DCHECK(static_cast<int>(to_end) <= to_base->length());
+ DCHECK(static_cast<int>(to_end) <= to_base.length());
DCHECK(packed_size >= 0 && packed_size <= copy_size);
- DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
if (copy_size == 0) return;
FixedArray from = FixedArray::cast(from_base);
FixedDoubleArray to = FixedDoubleArray::cast(to_base);
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
- Object smi = from->get(from_start);
- DCHECK(!smi->IsTheHole());
- to->set(to_start, Smi::ToInt(smi));
+ Object smi = from.get(from_start);
+ DCHECK(!smi.IsTheHole());
+ to.set(to_start, Smi::ToInt(smi));
}
}
@@ -393,26 +391,26 @@ static void CopyObjectToDoubleElements(FixedArrayBase from_base,
if (raw_copy_size < 0) {
DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from_base->length() - from_start;
+ copy_size = from_base.length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
}
- DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ DCHECK((copy_size + static_cast<int>(to_start)) <= to_base.length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base.length());
if (copy_size == 0) return;
FixedArray from = FixedArray::cast(from_base);
FixedDoubleArray to = FixedDoubleArray::cast(to_base);
- Object the_hole = from->GetReadOnlyRoots().the_hole_value();
- for (uint32_t from_end = from_start + copy_size;
- from_start < from_end; from_start++, to_start++) {
- Object hole_or_object = from->get(from_start);
+ Object the_hole = from.GetReadOnlyRoots().the_hole_value();
+ for (uint32_t from_end = from_start + copy_size; from_start < from_end;
+ from_start++, to_start++) {
+ Object hole_or_object = from.get(from_start);
if (hole_or_object == the_hole) {
- to->set_the_hole(to_start);
+ to.set_the_hole(to_start);
} else {
- to->set(to_start, hole_or_object->Number());
+ to.set(to_start, hole_or_object.Number());
}
}
}
@@ -426,75 +424,54 @@ static void CopyDictionaryToDoubleElements(
if (copy_size < 0) {
DCHECK(copy_size == ElementsAccessor::kCopyToEnd ||
copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->max_number_key() + 1 - from_start;
+ copy_size = from.max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base.length(); ++i) {
+ FixedDoubleArray::cast(to_base).set_the_hole(i);
}
}
}
if (copy_size == 0) return;
FixedDoubleArray to = FixedDoubleArray::cast(to_base);
- uint32_t to_length = to->length();
+ uint32_t to_length = to.length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
}
for (int i = 0; i < copy_size; i++) {
- int entry = from->FindEntry(isolate, i + from_start);
+ int entry = from.FindEntry(isolate, i + from_start);
if (entry != NumberDictionary::kNotFound) {
- to->set(i + to_start, from->ValueAt(entry)->Number());
+ to.set(i + to_start, from.ValueAt(entry).Number());
} else {
- to->set_the_hole(i + to_start);
- }
- }
-}
-
-static void TraceTopFrame(Isolate* isolate) {
- StackFrameIterator it(isolate);
- if (it.done()) {
- PrintF("unknown location (no JavaScript frames present)");
- return;
- }
- StackFrame* raw_frame = it.frame();
- if (raw_frame->is_internal()) {
- Code current_code_object =
- isolate->heap()->GcSafeFindCodeForInnerPointer(raw_frame->pc());
- if (current_code_object->builtin_index() ==
- Builtins::kFunctionPrototypeApply) {
- PrintF("apply from ");
- it.Advance();
- raw_frame = it.frame();
+ to.set_the_hole(i + to_start);
}
}
- JavaScriptFrame::PrintTop(isolate, stdout, false, true);
}
-static void SortIndices(
- Isolate* isolate, Handle<FixedArray> indices, uint32_t sort_size,
- WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) {
+static void SortIndices(Isolate* isolate, Handle<FixedArray> indices,
+ uint32_t sort_size) {
// Use AtomicSlot wrapper to ensure that std::sort uses atomic load and
// store operations that are safe for concurrent marking.
AtomicSlot start(indices->GetFirstElementAddress());
- std::sort(start, start + sort_size,
- [isolate](Tagged_t elementA, Tagged_t elementB) {
+ AtomicSlot end(start + sort_size);
+ std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) {
#ifdef V8_COMPRESS_POINTERS
- Object a(DecompressTaggedAny(isolate->isolate_root(), elementA));
- Object b(DecompressTaggedAny(isolate->isolate_root(), elementB));
+ DEFINE_ROOT_VALUE(isolate);
+ Object a(DecompressTaggedAny(ROOT_VALUE, elementA));
+ Object b(DecompressTaggedAny(ROOT_VALUE, elementB));
#else
- Object a(elementA);
- Object b(elementB);
+ Object a(elementA);
+ Object b(elementB);
#endif
- if (a->IsSmi() || !a->IsUndefined(isolate)) {
- if (!b->IsSmi() && b->IsUndefined(isolate)) {
- return true;
- }
- return a->Number() < b->Number();
- }
- return !b->IsSmi() && b->IsUndefined(isolate);
- });
- if (write_barrier_mode != SKIP_WRITE_BARRIER) {
- FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(isolate->heap(), *indices, 0, sort_size);
- }
+ if (a.IsSmi() || !a.IsUndefined(isolate)) {
+ if (!b.IsSmi() && b.IsUndefined(isolate)) {
+ return true;
+ }
+ return a.Number() < b.Number();
+ }
+ return !b.IsSmi() && b.IsUndefined(isolate);
+ });
+ isolate->heap()->WriteBarrierForRange(*indices, ObjectSlot(start),
+ ObjectSlot(end));
}
static Maybe<bool> IncludesValueSlowPath(Isolate* isolate,
@@ -543,9 +520,6 @@ static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate,
// that take an entry (instead of an index) as an argument.
class InternalElementsAccessor : public ElementsAccessor {
public:
- explicit InternalElementsAccessor(const char* name)
- : ElementsAccessor(name) {}
-
uint32_t GetEntryForIndex(Isolate* isolate, JSObject holder,
FixedArrayBase backing_store,
uint32_t index) override = 0;
@@ -573,29 +547,31 @@ class InternalElementsAccessor : public ElementsAccessor {
template <typename Subclass, typename ElementsTraitsParam>
class ElementsAccessorBase : public InternalElementsAccessor {
public:
- explicit ElementsAccessorBase(const char* name)
- : InternalElementsAccessor(name) {}
+ ElementsAccessorBase() = default;
- typedef ElementsTraitsParam ElementsTraits;
- typedef typename ElementsTraitsParam::BackingStore BackingStore;
+ using ElementsTraits = ElementsTraitsParam;
+ using BackingStore = typename ElementsTraitsParam::BackingStore;
static ElementsKind kind() { return ElementsTraits::Kind; }
static void ValidateContents(JSObject holder, int length) {}
static void ValidateImpl(JSObject holder) {
- FixedArrayBase fixed_array_base = holder->elements();
- if (!fixed_array_base->IsHeapObject()) return;
+ FixedArrayBase fixed_array_base = holder.elements();
+ if (!fixed_array_base.IsHeapObject()) return;
// Arrays that have been shifted in place can't be verified.
- if (fixed_array_base->IsFiller()) return;
+ if (fixed_array_base.IsFiller()) return;
int length = 0;
- if (holder->IsJSArray()) {
- Object length_obj = JSArray::cast(holder)->length();
- if (length_obj->IsSmi()) {
+ if (holder.IsJSArray()) {
+ Object length_obj = JSArray::cast(holder).length();
+ if (length_obj.IsSmi()) {
length = Smi::ToInt(length_obj);
}
+ } else if (holder.IsJSTypedArray()) {
+ // TODO(bmeurer, v8:4153): Change this to size_t later.
+ length = static_cast<int>(JSTypedArray::cast(holder).length());
} else {
- length = fixed_array_base->length();
+ length = fixed_array_base.length();
}
Subclass::ValidateContents(holder, length);
}
@@ -609,7 +585,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
uint32_t start, uint32_t end) {
DisallowHeapAllocation no_gc;
if (IsFastPackedElementsKind(kind())) return true;
- Isolate* isolate = holder->GetIsolate();
+ Isolate* isolate = holder.GetIsolate();
for (uint32_t i = start; i < end; i++) {
if (!Subclass::HasElementImpl(isolate, holder, i, backing_store,
ALL_PROPERTIES)) {
@@ -638,7 +614,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
bool HasElement(JSObject holder, uint32_t index, FixedArrayBase backing_store,
PropertyFilter filter) final {
- return Subclass::HasElementImpl(holder->GetIsolate(), holder, index,
+ return Subclass::HasElementImpl(holder.GetIsolate(), holder, index,
backing_store, filter);
}
@@ -650,7 +626,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
bool HasEntry(JSObject holder, uint32_t entry) final {
- return Subclass::HasEntryImpl(holder->GetIsolate(), holder->elements(),
+ return Subclass::HasEntryImpl(holder.GetIsolate(), holder.elements(),
entry);
}
@@ -660,7 +636,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
bool HasAccessors(JSObject holder) final {
- return Subclass::HasAccessorsImpl(holder, holder->elements());
+ return Subclass::HasAccessorsImpl(holder, holder.elements());
}
static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
@@ -679,7 +655,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
uint32_t index = GetIndexForEntryImpl(backing_store, entry);
- return handle(BackingStore::cast(backing_store)->get(index), isolate);
+ return handle(BackingStore::cast(backing_store).get(index), isolate);
}
void Set(Handle<JSObject> holder, uint32_t entry, Object value) final {
@@ -730,31 +706,17 @@ class ElementsAccessorBase : public InternalElementsAccessor {
UNREACHABLE();
}
- Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
- uint32_t end) final {
- return Subclass::SliceImpl(receiver, start, end);
- }
-
- static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
- uint32_t end) {
- UNREACHABLE();
- }
-
Handle<Object> Pop(Handle<JSArray> receiver) final {
return Subclass::PopImpl(receiver);
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver) {
- UNREACHABLE();
- }
+ static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
Handle<Object> Shift(Handle<JSArray> receiver) final {
return Subclass::ShiftImpl(receiver);
}
- static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
- UNREACHABLE();
- }
+ static Handle<Object> ShiftImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
void SetLength(Handle<JSArray> array, uint32_t length) final {
Subclass::SetLengthImpl(array->GetIsolate(), array, length,
@@ -767,7 +729,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
DCHECK(!array->SetLengthWouldNormalize(length));
DCHECK(IsFastElementsKind(array->GetElementsKind()));
uint32_t old_length = 0;
- CHECK(array->length()->ToArrayIndex(&old_length));
+ CHECK(array->length().ToArrayIndex(&old_length));
if (old_length < length) {
ElementsKind kind = array->GetElementsKind();
@@ -800,11 +762,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
isolate->heap()->RightTrimFixedArray(*backing_store, elements_to_trim);
// Fill the non-trimmed elements with holes.
BackingStore::cast(*backing_store)
- ->FillWithHoles(length,
- std::min(old_length, capacity - elements_to_trim));
+ .FillWithHoles(length,
+ std::min(old_length, capacity - elements_to_trim));
} else {
// Otherwise, fill the unused tail with holes.
- BackingStore::cast(*backing_store)->FillWithHoles(length, old_length);
+ BackingStore::cast(*backing_store).FillWithHoles(length, old_length);
}
} else {
// Check whether the backing store should be expanded.
@@ -817,7 +779,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
uint32_t NumberOfElements(JSObject receiver) final {
- return Subclass::NumberOfElementsImpl(receiver, receiver->elements());
+ return Subclass::NumberOfElementsImpl(receiver, receiver.elements());
}
static uint32_t NumberOfElementsImpl(JSObject receiver,
@@ -826,10 +788,10 @@ class ElementsAccessorBase : public InternalElementsAccessor {
}
static uint32_t GetMaxIndex(JSObject receiver, FixedArrayBase elements) {
- if (receiver->IsJSArray()) {
- DCHECK(JSArray::cast(receiver)->length()->IsSmi());
+ if (receiver.IsJSArray()) {
+ DCHECK(JSArray::cast(receiver).length().IsSmi());
return static_cast<uint32_t>(
- Smi::ToInt(JSArray::cast(receiver)->length()));
+ Smi::ToInt(JSArray::cast(receiver).length()));
}
return Subclass::GetCapacityImpl(receiver, elements);
}
@@ -868,7 +830,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
int packed_size = kPackedSizeNotKnown;
if (IsFastPackedElementsKind(from_kind) && object->IsJSArray()) {
- packed_size = Smi::ToInt(JSArray::cast(*object)->length());
+ packed_size = Smi::ToInt(JSArray::cast(*object).length());
}
Subclass::CopyElementsImpl(isolate, *old_elements, src_index, *new_elements,
@@ -903,7 +865,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
DCHECK(
(IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
(IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
- uint32_t capacity = static_cast<uint32_t>(object->elements()->length());
+ uint32_t capacity = static_cast<uint32_t>(object->elements().length());
Handle<FixedArrayBase> elements = ConvertElementsWithCapacity(
object, from_elements, from_kind, capacity);
JSObject::SetMapAndElements(object, to_map, elements);
@@ -969,7 +931,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
bool GrowCapacity(Handle<JSObject> object, uint32_t index) final {
// This function is intended to be called from optimized code. We don't
// want to trigger lazy deopts there, so refuse to handle cases that would.
- if (object->map()->is_prototype_map() ||
+ if (object->map().is_prototype_map() ||
object->WouldConvertToSlowElements(index)) {
return false;
}
@@ -1006,15 +968,15 @@ class ElementsAccessorBase : public InternalElementsAccessor {
ElementsKind from_kind, Handle<FixedArrayBase> to,
uint32_t to_start, int copy_size) final {
int packed_size = kPackedSizeNotKnown;
- bool is_packed = IsFastPackedElementsKind(from_kind) &&
- from_holder->IsJSArray();
+ bool is_packed =
+ IsFastPackedElementsKind(from_kind) && from_holder.IsJSArray();
if (is_packed) {
- packed_size = Smi::ToInt(JSArray::cast(from_holder)->length());
+ packed_size = Smi::ToInt(JSArray::cast(from_holder).length());
if (copy_size >= 0 && packed_size > copy_size) {
packed_size = copy_size;
}
}
- FixedArrayBase from = from_holder->elements();
+ FixedArrayBase from = from_holder.elements();
// NOTE: the Subclass::CopyElementsImpl() methods
// violate the handlified function signature convention:
// raw pointer parameters in the function that allocates. This is done
@@ -1024,7 +986,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
// copying from object with fast double elements to object with object
// elements. In all the other cases there are no allocations performed and
// handle creation causes noticeable performance degradation of the builtin.
- Subclass::CopyElementsImpl(from_holder->GetIsolate(), from, from_start, *to,
+ Subclass::CopyElementsImpl(from_holder.GetIsolate(), from, from_start, *to,
from_kind, to_start, packed_size, copy_size);
}
@@ -1105,7 +1067,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
Handle<Object> value;
if (details.kind() == kData) {
- value = Subclass::GetImpl(isolate, object->elements(), entry);
+ value = Subclass::GetInternalImpl(object, entry);
} else {
// This might modify the elements and/or change the elements kind.
LookupIterator it(isolate, object, index, LookupIterator::OWN);
@@ -1257,7 +1219,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
if (convert == GetKeysConversion::kConvertToString) {
for (uint32_t i = 0; i < nof_indices; i++) {
Handle<Object> index_string = isolate->factory()->Uint32ToString(
- combined_keys->get(i)->Number());
+ combined_keys->get(i).Number());
combined_keys->set(i, *index_string);
}
}
@@ -1289,7 +1251,7 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static uint32_t GetCapacityImpl(JSObject holder,
FixedArrayBase backing_store) {
- return backing_store->length();
+ return backing_store.length();
}
uint32_t GetCapacity(JSObject holder, FixedArrayBase backing_store) final {
@@ -1358,13 +1320,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
FixedArrayBase backing_store,
uint32_t index, PropertyFilter filter) {
- DCHECK(IsFastElementsKind(kind()) ||
- IsPackedFrozenOrSealedElementsKind(kind()));
+ DCHECK(IsFastElementsKind(kind()) || IsFrozenOrSealedElementsKind(kind()));
uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
- if (IsHoleyElementsKind(kind())) {
- return index < length &&
- !BackingStore::cast(backing_store)
- ->is_the_hole(isolate, index)
+ if (IsHoleyElementsKindForRead(kind())) {
+ return index < length && !BackingStore::cast(backing_store)
+ .is_the_hole(isolate, index)
? index
: kMaxUInt32;
} else {
@@ -1408,15 +1368,10 @@ class ElementsAccessorBase : public InternalElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase);
};
-
class DictionaryElementsAccessor
: public ElementsAccessorBase<DictionaryElementsAccessor,
- ElementsKindTraits<DICTIONARY_ELEMENTS> > {
+ ElementsKindTraits<DICTIONARY_ELEMENTS>> {
public:
- explicit DictionaryElementsAccessor(const char* name)
- : ElementsAccessorBase<DictionaryElementsAccessor,
- ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
-
static uint32_t GetMaxIndex(JSObject receiver, FixedArrayBase elements) {
// We cannot properly estimate this for dictionaries.
UNREACHABLE();
@@ -1430,7 +1385,7 @@ class DictionaryElementsAccessor
static uint32_t NumberOfElementsImpl(JSObject receiver,
FixedArrayBase backing_store) {
NumberDictionary dict = NumberDictionary::cast(backing_store);
- return dict->NumberOfElements();
+ return dict.NumberOfElements();
}
static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
@@ -1440,7 +1395,7 @@ class DictionaryElementsAccessor
Handle<NumberDictionary>::cast(backing_store);
int capacity = dict->Capacity();
uint32_t old_length = 0;
- CHECK(array->length()->ToArrayLength(&old_length));
+ CHECK(array->length().ToArrayLength(&old_length));
{
DisallowHeapAllocation no_gc;
ReadOnlyRoots roots(isolate);
@@ -1451,7 +1406,7 @@ class DictionaryElementsAccessor
for (int entry = 0; entry < capacity; entry++) {
Object index = dict->KeyAt(entry);
if (dict->IsKey(roots, index)) {
- uint32_t number = static_cast<uint32_t>(index->Number());
+ uint32_t number = static_cast<uint32_t>(index.Number());
if (length <= number && number < old_length) {
PropertyDetails details = dict->DetailsAt(entry);
if (!details.IsConfigurable()) length = number + 1;
@@ -1469,7 +1424,7 @@ class DictionaryElementsAccessor
for (int entry = 0; entry < capacity; entry++) {
Object index = dict->KeyAt(entry);
if (dict->IsKey(roots, index)) {
- uint32_t number = static_cast<uint32_t>(index->Number());
+ uint32_t number = static_cast<uint32_t>(index.Number());
if (length <= number && number < old_length) {
dict->ClearEntry(isolate, entry);
removed_entries++;
@@ -1496,38 +1451,6 @@ class DictionaryElementsAccessor
UNREACHABLE();
}
- static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
- uint32_t end) {
- Isolate* isolate = receiver->GetIsolate();
- uint32_t result_length = end < start ? 0u : end - start;
-
- // Result must also be a dictionary.
- Handle<JSArray> result_array =
- isolate->factory()->NewJSArray(0, HOLEY_ELEMENTS);
- JSObject::NormalizeElements(result_array);
- result_array->set_length(Smi::FromInt(result_length));
- Handle<NumberDictionary> source_dict(
- NumberDictionary::cast(receiver->elements()), isolate);
- int entry_count = source_dict->Capacity();
- ReadOnlyRoots roots(isolate);
- for (int i = 0; i < entry_count; i++) {
- Object key = source_dict->KeyAt(i);
- if (!source_dict->ToKey(roots, i, &key)) continue;
- uint64_t key_value = NumberToInt64(key);
- if (key_value >= start && key_value < end) {
- Handle<NumberDictionary> dest_dict(
- NumberDictionary::cast(result_array->elements()), isolate);
- Handle<Object> value(source_dict->ValueAt(i), isolate);
- PropertyDetails details = source_dict->DetailsAt(i);
- PropertyAttributes attr = details.attributes();
- AddImpl(result_array, static_cast<uint32_t>(key_value) - start, value,
- attr, 0);
- }
- }
-
- return result_array;
- }
-
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
Handle<NumberDictionary> dict(NumberDictionary::cast(obj->elements()),
obj->GetIsolate());
@@ -1538,13 +1461,13 @@ class DictionaryElementsAccessor
static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
DisallowHeapAllocation no_gc;
NumberDictionary dict = NumberDictionary::cast(backing_store);
- if (!dict->requires_slow_elements()) return false;
- int capacity = dict->Capacity();
- ReadOnlyRoots roots = holder->GetReadOnlyRoots();
+ if (!dict.requires_slow_elements()) return false;
+ int capacity = dict.Capacity();
+ ReadOnlyRoots roots = holder.GetReadOnlyRoots();
for (int i = 0; i < capacity; i++) {
- Object key = dict->KeyAt(i);
- if (!dict->IsKey(roots, key)) continue;
- PropertyDetails details = dict->DetailsAt(i);
+ Object key = dict.KeyAt(i);
+ if (!dict.IsKey(roots, key)) continue;
+ PropertyDetails details = dict.DetailsAt(i);
if (details.kind() == kAccessor) return true;
}
return false;
@@ -1552,7 +1475,7 @@ class DictionaryElementsAccessor
static Object GetRaw(FixedArrayBase store, uint32_t entry) {
NumberDictionary backing_store = NumberDictionary::cast(store);
- return backing_store->ValueAt(entry);
+ return backing_store.ValueAt(entry);
}
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
@@ -1567,7 +1490,7 @@ class DictionaryElementsAccessor
static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
Object value) {
- NumberDictionary::cast(backing_store)->ValueAtPut(entry, value);
+ NumberDictionary::cast(backing_store).ValueAtPut(entry, value);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -1576,12 +1499,12 @@ class DictionaryElementsAccessor
PropertyAttributes attributes) {
NumberDictionary dictionary = NumberDictionary::cast(*store);
if (attributes != NONE) object->RequireSlowElements(dictionary);
- dictionary->ValueAtPut(entry, *value);
- PropertyDetails details = dictionary->DetailsAt(entry);
+ dictionary.ValueAtPut(entry, *value);
+ PropertyDetails details = dictionary.DetailsAt(entry);
details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell,
details.dictionary_index());
- dictionary->DetailsAtPut(object->GetIsolate(), entry, details);
+ dictionary.DetailsAtPut(object->GetIsolate(), entry, details);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -1605,15 +1528,15 @@ class DictionaryElementsAccessor
uint32_t entry) {
DisallowHeapAllocation no_gc;
NumberDictionary dict = NumberDictionary::cast(store);
- Object index = dict->KeyAt(entry);
- return !index->IsTheHole(isolate);
+ Object index = dict.KeyAt(entry);
+ return !index.IsTheHole(isolate);
}
static uint32_t GetIndexForEntryImpl(FixedArrayBase store, uint32_t entry) {
DisallowHeapAllocation no_gc;
NumberDictionary dict = NumberDictionary::cast(store);
uint32_t result = 0;
- CHECK(dict->KeyAt(entry)->ToArrayIndex(&result));
+ CHECK(dict.KeyAt(entry).ToArrayIndex(&result));
return result;
}
@@ -1622,10 +1545,10 @@ class DictionaryElementsAccessor
PropertyFilter filter) {
DisallowHeapAllocation no_gc;
NumberDictionary dictionary = NumberDictionary::cast(store);
- int entry = dictionary->FindEntry(isolate, index);
+ int entry = dictionary.FindEntry(isolate, index);
if (entry == NumberDictionary::kNotFound) return kMaxUInt32;
if (filter != ALL_PROPERTIES) {
- PropertyDetails details = dictionary->DetailsAt(entry);
+ PropertyDetails details = dictionary.DetailsAt(entry);
PropertyAttributes attr = details.attributes();
if ((attr & filter) != 0) return kMaxUInt32;
}
@@ -1633,22 +1556,22 @@ class DictionaryElementsAccessor
}
static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
- return GetDetailsImpl(holder->elements(), entry);
+ return GetDetailsImpl(holder.elements(), entry);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase backing_store,
uint32_t entry) {
- return NumberDictionary::cast(backing_store)->DetailsAt(entry);
+ return NumberDictionary::cast(backing_store).DetailsAt(entry);
}
static uint32_t FilterKey(Handle<NumberDictionary> dictionary, int entry,
Object raw_key, PropertyFilter filter) {
- DCHECK(raw_key->IsNumber());
- DCHECK_LE(raw_key->Number(), kMaxUInt32);
+ DCHECK(raw_key.IsNumber());
+ DCHECK_LE(raw_key.Number(), kMaxUInt32);
PropertyDetails details = dictionary->DetailsAt(entry);
PropertyAttributes attr = details.attributes();
if ((attr & filter) != 0) return kMaxUInt32;
- return static_cast<uint32_t>(raw_key->Number());
+ return static_cast<uint32_t>(raw_key.Number());
}
static uint32_t GetKeyForEntryImpl(Isolate* isolate,
@@ -1724,9 +1647,9 @@ class DictionaryElementsAccessor
Object k = dictionary->KeyAt(i);
if (!dictionary->IsKey(roots, k)) continue;
Object value = dictionary->ValueAt(i);
- DCHECK(!value->IsTheHole(isolate));
- DCHECK(!value->IsAccessorPair());
- DCHECK(!value->IsAccessorInfo());
+ DCHECK(!value.IsTheHole(isolate));
+ DCHECK(!value.IsAccessorPair());
+ DCHECK(!value.IsAccessorInfo());
accumulator->AddKey(value, convert);
}
}
@@ -1736,7 +1659,7 @@ class DictionaryElementsAccessor
uint32_t length, Maybe<bool>* result) {
DisallowHeapAllocation no_gc;
NumberDictionary dictionary = NumberDictionary::cast(receiver->elements());
- int capacity = dictionary->Capacity();
+ int capacity = dictionary.Capacity();
Object the_hole = ReadOnlyRoots(isolate).the_hole_value();
Object undefined = ReadOnlyRoots(isolate).undefined_value();
@@ -1744,21 +1667,21 @@ class DictionaryElementsAccessor
// must be accessed in order via the slow path.
bool found = false;
for (int i = 0; i < capacity; ++i) {
- Object k = dictionary->KeyAt(i);
+ Object k = dictionary.KeyAt(i);
if (k == the_hole) continue;
if (k == undefined) continue;
uint32_t index;
- if (!k->ToArrayIndex(&index) || index < start_from || index >= length) {
+ if (!k.ToArrayIndex(&index) || index < start_from || index >= length) {
continue;
}
- if (dictionary->DetailsAt(i).kind() == kAccessor) {
+ if (dictionary.DetailsAt(i).kind() == kAccessor) {
// Restart from beginning in slow path, otherwise we may observably
// access getters out of order
return false;
} else if (!found) {
- Object element_k = dictionary->ValueAt(i);
+ Object element_k = dictionary.ValueAt(i);
if (value->SameValueZero(element_k)) found = true;
}
}
@@ -1828,7 +1751,7 @@ class DictionaryElementsAccessor
// If switched to initial elements, return true if searching for
// undefined, and false otherwise.
- if (receiver->map()->GetInitialElements() == receiver->elements()) {
+ if (receiver->map().GetInitialElements() == receiver->elements()) {
return Just(search_for_hole);
}
@@ -1913,42 +1836,38 @@ class DictionaryElementsAccessor
static void ValidateContents(JSObject holder, int length) {
DisallowHeapAllocation no_gc;
#if DEBUG
- DCHECK_EQ(holder->map()->elements_kind(), DICTIONARY_ELEMENTS);
+ DCHECK_EQ(holder.map().elements_kind(), DICTIONARY_ELEMENTS);
if (!FLAG_enable_slow_asserts) return;
- ReadOnlyRoots roots = holder->GetReadOnlyRoots();
- NumberDictionary dictionary = NumberDictionary::cast(holder->elements());
+ ReadOnlyRoots roots = holder.GetReadOnlyRoots();
+ NumberDictionary dictionary = NumberDictionary::cast(holder.elements());
// Validate the requires_slow_elements and max_number_key values.
- int capacity = dictionary->Capacity();
+ int capacity = dictionary.Capacity();
bool requires_slow_elements = false;
int max_key = 0;
for (int i = 0; i < capacity; ++i) {
Object k;
- if (!dictionary->ToKey(roots, i, &k)) continue;
- DCHECK_LE(0.0, k->Number());
- if (k->Number() > NumberDictionary::kRequiresSlowElementsLimit) {
+ if (!dictionary.ToKey(roots, i, &k)) continue;
+ DCHECK_LE(0.0, k.Number());
+ if (k.Number() > NumberDictionary::kRequiresSlowElementsLimit) {
requires_slow_elements = true;
} else {
max_key = Max(max_key, Smi::ToInt(k));
}
}
if (requires_slow_elements) {
- DCHECK(dictionary->requires_slow_elements());
- } else if (!dictionary->requires_slow_elements()) {
- DCHECK_LE(max_key, dictionary->max_number_key());
+ DCHECK(dictionary.requires_slow_elements());
+ } else if (!dictionary.requires_slow_elements()) {
+ DCHECK_LE(max_key, dictionary.max_number_key());
}
#endif
}
};
-
// Super class for all fast element arrays.
template <typename Subclass, typename KindTraits>
class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
public:
- explicit FastElementsAccessor(const char* name)
- : ElementsAccessorBase<Subclass, KindTraits>(name) {}
-
- typedef typename KindTraits::BackingStore BackingStore;
+ using BackingStore = typename KindTraits::BackingStore;
static Handle<NumberDictionary> NormalizeImpl(Handle<JSObject> object,
Handle<FixedArrayBase> store) {
@@ -1970,8 +1889,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
int j = 0;
int max_number_key = -1;
for (int i = 0; j < capacity; i++) {
- if (IsHoleyElementsKind(kind)) {
- if (BackingStore::cast(*store)->is_the_hole(isolate, i)) continue;
+ if (IsHoleyElementsKindForRead(kind)) {
+ if (BackingStore::cast(*store).is_the_hole(isolate, i)) continue;
}
max_number_key = i;
Handle<Object> value = Subclass::GetImpl(isolate, *store, i);
@@ -1999,7 +1918,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// Dynamically ask for the elements kind here since we manually redirect
// the operations for argument backing stores.
if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
- SloppyArgumentsElements::cast(obj->elements())->set_arguments(empty);
+ SloppyArgumentsElements::cast(obj->elements()).set_arguments(empty);
} else {
obj->set_elements(empty);
}
@@ -2033,7 +1952,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (ObjectInYoungGeneration(*backing_store)) return;
uint32_t length = 0;
if (obj->IsJSArray()) {
- JSArray::cast(*obj)->length()->ToArrayLength(&length);
+ JSArray::cast(*obj).length().ToArrayLength(&length);
} else {
length = static_cast<uint32_t>(store->length());
}
@@ -2127,14 +2046,14 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
static bool HasEntryImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
- return !BackingStore::cast(backing_store)->is_the_hole(isolate, entry);
+ return !BackingStore::cast(backing_store).is_the_hole(isolate, entry);
}
static uint32_t NumberOfElementsImpl(JSObject receiver,
FixedArrayBase backing_store) {
uint32_t max_index = Subclass::GetMaxIndex(receiver, backing_store);
if (IsFastPackedElementsKind(Subclass::kind())) return max_index;
- Isolate* isolate = receiver->GetIsolate();
+ Isolate* isolate = receiver.GetIsolate();
uint32_t count = 0;
for (uint32_t i = 0; i < max_index; i++) {
if (Subclass::HasEntryImpl(isolate, backing_store, i)) count++;
@@ -2158,10 +2077,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
static void ValidateContents(JSObject holder, int length) {
#if DEBUG
- Isolate* isolate = holder->GetIsolate();
+ Isolate* isolate = holder.GetIsolate();
Heap* heap = isolate->heap();
- FixedArrayBase elements = holder->elements();
- Map map = elements->map();
+ FixedArrayBase elements = holder.elements();
+ Map map = elements.map();
if (IsSmiOrObjectElementsKind(KindTraits::Kind)) {
DCHECK_NE(map, ReadOnlyRoots(heap).fixed_double_array_map());
} else if (IsDoubleElementsKind(KindTraits::Kind)) {
@@ -2179,12 +2098,12 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
for (int i = 0; i < length; i++) {
DCHECK(BackingStore::get(backing_store, i, isolate)->IsSmi() ||
(IsHoleyElementsKind(KindTraits::Kind) &&
- backing_store->is_the_hole(isolate, i)));
+ backing_store.is_the_hole(isolate, i)));
}
} else if (KindTraits::Kind == PACKED_ELEMENTS ||
KindTraits::Kind == PACKED_DOUBLE_ELEMENTS) {
for (int i = 0; i < length; i++) {
- DCHECK(!backing_store->is_the_hole(isolate, i));
+ DCHECK(!backing_store.is_the_hole(isolate, i));
}
} else {
DCHECK(IsHoleyElementsKind(KindTraits::Kind));
@@ -2201,49 +2120,34 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return Subclass::RemoveElement(receiver, AT_START);
}
- static uint32_t PushImpl(Handle<JSArray> receiver,
- Arguments* args, uint32_t push_size) {
+ static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args,
+ uint32_t push_size) {
Handle<FixedArrayBase> backing_store(receiver->elements(),
receiver->GetIsolate());
return Subclass::AddArguments(receiver, backing_store, args, push_size,
AT_END);
}
- static uint32_t UnshiftImpl(Handle<JSArray> receiver,
- Arguments* args, uint32_t unshift_size) {
+ static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args,
+ uint32_t unshift_size) {
Handle<FixedArrayBase> backing_store(receiver->elements(),
receiver->GetIsolate());
return Subclass::AddArguments(receiver, backing_store, args, unshift_size,
AT_START);
}
- static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
- uint32_t end) {
- Isolate* isolate = receiver->GetIsolate();
- Handle<FixedArrayBase> backing_store(receiver->elements(), isolate);
- int result_len = end < start ? 0u : end - start;
- Handle<JSArray> result_array = isolate->factory()->NewJSArray(
- KindTraits::Kind, result_len, result_len);
- DisallowHeapAllocation no_gc;
- Subclass::CopyElementsImpl(isolate, *backing_store, start,
- result_array->elements(), KindTraits::Kind, 0,
- kPackedSizeNotKnown, result_len);
- Subclass::TryTransitionResultArrayToPacked(result_array);
- return result_array;
- }
-
static void MoveElements(Isolate* isolate, Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store, int dst_index,
int src_index, int len, int hole_start,
int hole_end) {
- Heap* heap = isolate->heap();
Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store);
if (len > JSArray::kMaxCopyElements && dst_index == 0 &&
- heap->CanMoveObjectStart(*dst_elms)) {
+ isolate->heap()->CanMoveObjectStart(*dst_elms)) {
// Update all the copies of this backing_store handle.
*dst_elms.location() =
- BackingStore::cast(heap->LeftTrimFixedArray(*dst_elms, src_index))
- ->ptr();
+ BackingStore::cast(
+ isolate->heap()->LeftTrimFixedArray(*dst_elms, src_index))
+ .ptr();
receiver->set_elements(*dst_elms);
// Adjust the hole offset as the array has been shrunk.
hole_end -= src_index;
@@ -2251,7 +2155,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
DCHECK_LE(hole_end, backing_store->length());
} else if (len != 0) {
WriteBarrierMode mode = GetWriteBarrierMode(KindTraits::Kind);
- dst_elms->MoveElements(heap, dst_index, src_index, len, mode);
+ dst_elms->MoveElements(isolate, dst_index, src_index, len, mode);
}
if (hole_start != hole_end) {
dst_elms->FillWithHoles(hole_start, hole_end);
@@ -2298,7 +2202,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (start_from >= length) return Just(false);
// Elements beyond the capacity of the backing store treated as undefined.
- uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
+ uint32_t elements_length = static_cast<uint32_t>(elements_base.length());
if (value == undefined && elements_length < length) return Just(true);
if (elements_length == 0) {
DCHECK_NE(value, undefined);
@@ -2307,18 +2211,18 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
length = std::min(elements_length, length);
- if (!value->IsNumber()) {
+ if (!value.IsNumber()) {
if (value == undefined) {
// Search for `undefined` or The Hole. Even in the case of
// PACKED_DOUBLE_ELEMENTS or PACKED_SMI_ELEMENTS, we might encounter The
// Hole here, since the {length} used here can be larger than
// JSArray::length.
if (IsSmiOrObjectElementsKind(Subclass::kind()) ||
- IsPackedFrozenOrSealedElementsKind(Subclass::kind())) {
+ IsFrozenOrSealedElementsKind(Subclass::kind())) {
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- Object element_k = elements->get(k);
+ Object element_k = elements.get(k);
if (element_k == the_hole || element_k == undefined) {
return Just(true);
@@ -2332,14 +2236,14 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
auto elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (elements->is_the_hole(k)) {
+ if (elements.is_the_hole(k)) {
return Just(true);
}
}
return Just(false);
}
} else if (!IsObjectElementsKind(Subclass::kind()) &&
- !IsPackedFrozenOrSealedElementsKind(Subclass::kind())) {
+ !IsFrozenOrSealedElementsKind(Subclass::kind())) {
// Search for non-number, non-Undefined value, with either
// PACKED_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS, HOLEY_SMI_ELEMENTS or
// HOLEY_DOUBLE_ELEMENTS. Guaranteed to return false, since these
@@ -2349,22 +2253,22 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// Search for non-number, non-Undefined value with either
// PACKED_ELEMENTS or HOLEY_ELEMENTS.
DCHECK(IsObjectElementsKind(Subclass::kind()) ||
- IsPackedFrozenOrSealedElementsKind(Subclass::kind()));
+ IsFrozenOrSealedElementsKind(Subclass::kind()));
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- Object element_k = elements->get(k);
+ Object element_k = elements.get(k);
if (element_k == the_hole) {
continue;
}
- if (value->SameValueZero(element_k)) return Just(true);
+ if (value.SameValueZero(element_k)) return Just(true);
}
return Just(false);
}
} else {
- if (!value->IsNaN()) {
- double search_value = value->Number();
+ if (!value.IsNaN()) {
+ double search_value = value.Number();
if (IsDoubleElementsKind(Subclass::kind())) {
// Search for non-NaN Number in PACKED_DOUBLE_ELEMENTS or
// HOLEY_DOUBLE_ELEMENTS --- Skip TheHole, and trust UCOMISD or
@@ -2372,10 +2276,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
auto elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (elements->is_the_hole(k)) {
+ if (elements.is_the_hole(k)) {
continue;
}
- if (elements->get_scalar(k) == search_value) return Just(true);
+ if (elements.get_scalar(k) == search_value) return Just(true);
}
return Just(false);
} else {
@@ -2385,8 +2289,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- Object element_k = elements->get(k);
- if (element_k->IsNumber() && element_k->Number() == search_value) {
+ Object element_k = elements.get(k);
+ if (element_k.IsNumber() && element_k.Number() == search_value) {
return Just(true);
}
}
@@ -2404,10 +2308,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
auto elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (elements->is_the_hole(k)) {
+ if (elements.is_the_hole(k)) {
continue;
}
- if (std::isnan(elements->get_scalar(k))) return Just(true);
+ if (std::isnan(elements.get_scalar(k))) return Just(true);
}
return Just(false);
} else {
@@ -2415,11 +2319,11 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS. Return true if
// elementK->IsHeapNumber() && std::isnan(elementK->Number())
DCHECK(IsSmiOrObjectElementsKind(Subclass::kind()) ||
- IsPackedFrozenOrSealedElementsKind(Subclass::kind()));
+ IsFrozenOrSealedElementsKind(Subclass::kind()));
auto elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (elements->get(k)->IsNaN()) return Just(true);
+ if (elements.get(k).IsNaN()) return Just(true);
}
return Just(false);
}
@@ -2514,10 +2418,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// Add the provided values.
DisallowHeapAllocation no_gc;
FixedArrayBase raw_backing_store = *dst_store;
- WriteBarrierMode mode = raw_backing_store->GetWriteBarrierMode(no_gc);
+ WriteBarrierMode mode = raw_backing_store.GetWriteBarrierMode(no_gc);
for (uint32_t i = 0; i < copy_size; i++) {
Object argument = (*args)[src_index + i];
- DCHECK(!argument->IsTheHole());
+ DCHECK(!argument.IsTheHole());
Subclass::SetImpl(raw_backing_store, dst_index + i, argument, mode);
}
}
@@ -2527,9 +2431,6 @@ template <typename Subclass, typename KindTraits>
class FastSmiOrObjectElementsAccessor
: public FastElementsAccessor<Subclass, KindTraits> {
public:
- explicit FastSmiOrObjectElementsAccessor(const char* name)
- : FastElementsAccessor<Subclass, KindTraits>(name) {}
-
static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
Object value) {
SetImpl(holder->elements(), entry, value);
@@ -2537,17 +2438,17 @@ class FastSmiOrObjectElementsAccessor
static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
Object value) {
- FixedArray::cast(backing_store)->set(entry, value);
+ FixedArray::cast(backing_store).set(entry, value);
}
static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
Object value, WriteBarrierMode mode) {
- FixedArray::cast(backing_store)->set(entry, value, mode);
+ FixedArray::cast(backing_store).set(entry, value, mode);
}
static Object GetRaw(FixedArray backing_store, uint32_t entry) {
uint32_t index = Subclass::GetIndexForEntryImpl(backing_store, entry);
- return backing_store->get(index);
+ return backing_store.get(index);
}
// NOTE: this method violates the handlified function signature convention:
@@ -2568,6 +2469,8 @@ class FastSmiOrObjectElementsAccessor
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
CopyObjectToObjectElements(isolate, from, from_kind, from_start, to,
to_kind, to_start, copy_size);
break;
@@ -2590,10 +2493,9 @@ class FastSmiOrObjectElementsAccessor
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
- // This function is currently only used for JSArrays with non-zero
- // length.
- UNREACHABLE();
- break;
+ // This function is currently only used for JSArrays with non-zero
+ // length.
+ UNREACHABLE();
case NO_ELEMENTS:
break; // Nothing to do.
}
@@ -2619,7 +2521,7 @@ class FastSmiOrObjectElementsAccessor
// No allocations here, so we can avoid handlification overhead.
DisallowHeapAllocation no_gc;
FixedArray elements = FixedArray::cast(object->elements());
- uint32_t length = elements->length();
+ uint32_t length = elements.length();
for (uint32_t index = 0; index < length; ++index) {
if (!Subclass::HasEntryImpl(isolate, elements, index)) continue;
Object value = GetRaw(elements, index);
@@ -2641,15 +2543,15 @@ class FastSmiOrObjectElementsAccessor
if (start_from >= length) return Just<int64_t>(-1);
- length = std::min(static_cast<uint32_t>(elements_base->length()), length);
+ length = std::min(static_cast<uint32_t>(elements_base.length()), length);
// Only FAST_{,HOLEY_}ELEMENTS can store non-numbers.
- if (!value->IsNumber() && !IsObjectElementsKind(Subclass::kind()) &&
- !IsPackedFrozenOrSealedElementsKind(Subclass::kind())) {
+ if (!value.IsNumber() && !IsObjectElementsKind(Subclass::kind()) &&
+ !IsFrozenOrSealedElementsKind(Subclass::kind())) {
return Just<int64_t>(-1);
}
// NaN can never be found by strict equality.
- if (value->IsNaN()) return Just<int64_t>(-1);
+ if (value.IsNaN()) return Just<int64_t>(-1);
// k can be greater than receiver->length() below, but it is bounded by
// elements_base->length() so we never read out of bounds. This means that
@@ -2657,7 +2559,7 @@ class FastSmiOrObjectElementsAccessor
// always fail.
FixedArray elements = FixedArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (value->StrictEquals(elements->get(k))) return Just<int64_t>(k);
+ if (value.StrictEquals(elements.get(k))) return Just<int64_t>(k);
}
return Just<int64_t>(-1);
}
@@ -2666,60 +2568,23 @@ class FastSmiOrObjectElementsAccessor
class FastPackedSmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedSmiElementsAccessor,
- ElementsKindTraits<PACKED_SMI_ELEMENTS>> {
- public:
- explicit FastPackedSmiElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastPackedSmiElementsAccessor,
- ElementsKindTraits<PACKED_SMI_ELEMENTS>>(name) {}
-};
+ ElementsKindTraits<PACKED_SMI_ELEMENTS>> {};
class FastHoleySmiElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastHoleySmiElementsAccessor,
- ElementsKindTraits<HOLEY_SMI_ELEMENTS>> {
- public:
- explicit FastHoleySmiElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<FastHoleySmiElementsAccessor,
- ElementsKindTraits<HOLEY_SMI_ELEMENTS>>(
- name) {}
-};
+ ElementsKindTraits<HOLEY_SMI_ELEMENTS>> {};
class FastPackedObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastPackedObjectElementsAccessor,
- ElementsKindTraits<PACKED_ELEMENTS>> {
- public:
- explicit FastPackedObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<FastPackedObjectElementsAccessor,
- ElementsKindTraits<PACKED_ELEMENTS>>(
- name) {}
-};
+ ElementsKindTraits<PACKED_ELEMENTS>> {};
-class FastPackedFrozenObjectElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
- FastPackedFrozenObjectElementsAccessor,
- ElementsKindTraits<PACKED_FROZEN_ELEMENTS>> {
+template <typename Subclass, typename KindTraits>
+class FastSealedObjectElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<Subclass, KindTraits> {
public:
- explicit FastPackedFrozenObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastPackedFrozenObjectElementsAccessor,
- ElementsKindTraits<PACKED_FROZEN_ELEMENTS>>(name) {}
-
- static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
- Object value) {
- UNREACHABLE();
- }
-
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
- Object value) {
- UNREACHABLE();
- }
-
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
- Object value, WriteBarrierMode mode) {
- UNREACHABLE();
- }
+ using BackingStore = typename KindTraits::BackingStore;
static Handle<Object> RemoveElement(Handle<JSArray> receiver,
Where remove_position) {
@@ -2756,19 +2621,79 @@ class FastPackedFrozenObjectElementsAccessor
static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
uint32_t length,
Handle<FixedArrayBase> backing_store) {
- UNREACHABLE();
+ uint32_t old_length = 0;
+ CHECK(array->length().ToArrayIndex(&old_length));
+ if (length == old_length) {
+ // Do nothing.
+ return;
+ }
+
+ // Transition to DICTIONARY_ELEMENTS.
+ // Convert to dictionary mode
+ Handle<NumberDictionary> new_element_dictionary =
+ old_length == 0 ? isolate->factory()->empty_slow_element_dictionary()
+ : array->GetElementsAccessor()->Normalize(array);
+
+ // Migrate map.
+ Handle<Map> new_map = Map::Copy(isolate, handle(array->map(), isolate),
+ "SlowCopyForSetLengthImpl");
+ new_map->set_is_extensible(false);
+ new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ JSObject::MigrateToMap(array, new_map);
+
+ if (!new_element_dictionary.is_null()) {
+ array->set_elements(*new_element_dictionary);
+ }
+
+ if (array->elements() !=
+ ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
+ Handle<NumberDictionary> dictionary(array->element_dictionary(), isolate);
+ // Make sure we never go back to the fast case
+ array->RequireSlowElements(*dictionary);
+ JSObject::ApplyAttributesToDictionary(isolate, ReadOnlyRoots(isolate),
+ dictionary,
+ PropertyAttributes::SEALED);
+ }
+
+ // Set length
+ Handle<FixedArrayBase> new_backing_store(array->elements(), isolate);
+ DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
+ new_backing_store);
}
};
class FastPackedSealedObjectElementsAccessor
- : public FastSmiOrObjectElementsAccessor<
+ : public FastSealedObjectElementsAccessor<
FastPackedSealedObjectElementsAccessor,
ElementsKindTraits<PACKED_SEALED_ELEMENTS>> {
+};
+
+class FastHoleySealedObjectElementsAccessor
+ : public FastSealedObjectElementsAccessor<
+ FastHoleySealedObjectElementsAccessor,
+ ElementsKindTraits<HOLEY_SEALED_ELEMENTS>> {
+};
+
+template <typename Subclass, typename KindTraits>
+class FastFrozenObjectElementsAccessor
+ : public FastSmiOrObjectElementsAccessor<Subclass, KindTraits> {
public:
- explicit FastPackedSealedObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<
- FastPackedSealedObjectElementsAccessor,
- ElementsKindTraits<PACKED_SEALED_ELEMENTS>>(name) {}
+ using BackingStore = typename KindTraits::BackingStore;
+
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ Object value) {
+ UNREACHABLE();
+ }
+
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value) {
+ UNREACHABLE();
+ }
+
+ static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
+ Object value, WriteBarrierMode mode) {
+ UNREACHABLE();
+ }
static Handle<Object> RemoveElement(Handle<JSArray> receiver,
Where remove_position) {
@@ -2805,63 +2730,38 @@ class FastPackedSealedObjectElementsAccessor
static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
uint32_t length,
Handle<FixedArrayBase> backing_store) {
- uint32_t old_length = 0;
- CHECK(array->length()->ToArrayIndex(&old_length));
- if (length <= old_length) {
- // Cannot delete entries so do nothing.
- return;
- }
-
- // Transition to DICTIONARY_ELEMENTS.
- // Convert to dictionary mode
- Handle<NumberDictionary> new_element_dictionary =
- old_length == 0 ? isolate->factory()->empty_slow_element_dictionary()
- : array->GetElementsAccessor()->Normalize(array);
-
- // Migrate map.
- Handle<Map> new_map = Map::Copy(isolate, handle(array->map(), isolate),
- "SlowCopyForSetLengthImpl");
- new_map->set_is_extensible(false);
- new_map->set_elements_kind(DICTIONARY_ELEMENTS);
- JSObject::MigrateToMap(array, new_map);
+ UNREACHABLE();
+ }
- if (!new_element_dictionary.is_null()) {
- array->set_elements(*new_element_dictionary);
- }
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ UNREACHABLE();
+ }
+};
- if (array->elements() !=
- ReadOnlyRoots(isolate).empty_slow_element_dictionary()) {
- Handle<NumberDictionary> dictionary(array->element_dictionary(), isolate);
- // Make sure we never go back to the fast case
- array->RequireSlowElements(*dictionary);
- JSObject::ApplyAttributesToDictionary(isolate, ReadOnlyRoots(isolate),
- dictionary,
- PropertyAttributes::SEALED);
- }
+class FastPackedFrozenObjectElementsAccessor
+ : public FastFrozenObjectElementsAccessor<
+ FastPackedFrozenObjectElementsAccessor,
+ ElementsKindTraits<PACKED_FROZEN_ELEMENTS>> {
+};
- // Set length
- Handle<Object> length_obj = isolate->factory()->NewNumberFromUint(length);
- array->set_length(*length_obj);
- }
+class FastHoleyFrozenObjectElementsAccessor
+ : public FastFrozenObjectElementsAccessor<
+ FastHoleyFrozenObjectElementsAccessor,
+ ElementsKindTraits<HOLEY_FROZEN_ELEMENTS>> {
};
class FastHoleyObjectElementsAccessor
: public FastSmiOrObjectElementsAccessor<
FastHoleyObjectElementsAccessor, ElementsKindTraits<HOLEY_ELEMENTS>> {
- public:
- explicit FastHoleyObjectElementsAccessor(const char* name)
- : FastSmiOrObjectElementsAccessor<FastHoleyObjectElementsAccessor,
- ElementsKindTraits<HOLEY_ELEMENTS>>(
- name) {}
};
template <typename Subclass, typename KindTraits>
class FastDoubleElementsAccessor
: public FastElementsAccessor<Subclass, KindTraits> {
public:
- explicit FastDoubleElementsAccessor(const char* name)
- : FastElementsAccessor<Subclass, KindTraits>(name) {}
-
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry,
@@ -2875,12 +2775,12 @@ class FastDoubleElementsAccessor
static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
Object value) {
- FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
+ FixedDoubleArray::cast(backing_store).set(entry, value.Number());
}
static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
Object value, WriteBarrierMode mode) {
- FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
+ FixedDoubleArray::cast(backing_store).set(entry, value.Number());
}
static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
@@ -2904,6 +2804,8 @@ class FastDoubleElementsAccessor
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
break;
case DICTIONARY_ELEMENTS:
@@ -2918,10 +2820,9 @@ class FastDoubleElementsAccessor
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
- // This function is currently only used for JSArrays with non-zero
- // length.
- UNREACHABLE();
- break;
+ // This function is currently only used for JSArrays with non-zero
+ // length.
+ UNREACHABLE();
}
}
@@ -2954,24 +2855,24 @@ class FastDoubleElementsAccessor
FixedArrayBase elements_base = receiver->elements();
Object value = *search_value;
- length = std::min(static_cast<uint32_t>(elements_base->length()), length);
+ length = std::min(static_cast<uint32_t>(elements_base.length()), length);
if (start_from >= length) return Just<int64_t>(-1);
- if (!value->IsNumber()) {
+ if (!value.IsNumber()) {
return Just<int64_t>(-1);
}
- if (value->IsNaN()) {
+ if (value.IsNaN()) {
return Just<int64_t>(-1);
}
- double numeric_search_value = value->Number();
+ double numeric_search_value = value.Number();
FixedDoubleArray elements = FixedDoubleArray::cast(receiver->elements());
for (uint32_t k = start_from; k < length; ++k) {
- if (elements->is_the_hole(k)) {
+ if (elements.is_the_hole(k)) {
continue;
}
- if (elements->get_scalar(k) == numeric_search_value) {
+ if (elements.get_scalar(k) == numeric_search_value) {
return Just<int64_t>(k);
}
}
@@ -2982,57 +2883,125 @@ class FastDoubleElementsAccessor
class FastPackedDoubleElementsAccessor
: public FastDoubleElementsAccessor<
FastPackedDoubleElementsAccessor,
- ElementsKindTraits<PACKED_DOUBLE_ELEMENTS>> {
- public:
- explicit FastPackedDoubleElementsAccessor(const char* name)
- : FastDoubleElementsAccessor<FastPackedDoubleElementsAccessor,
- ElementsKindTraits<PACKED_DOUBLE_ELEMENTS>>(
- name) {}
-};
+ ElementsKindTraits<PACKED_DOUBLE_ELEMENTS>> {};
class FastHoleyDoubleElementsAccessor
: public FastDoubleElementsAccessor<
FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>> {
- public:
- explicit FastHoleyDoubleElementsAccessor(const char* name)
- : FastDoubleElementsAccessor<FastHoleyDoubleElementsAccessor,
- ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>>(
- name) {}
-};
-
+ ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>> {};
// Super class for all external element arrays.
-template <ElementsKind Kind, typename ctype>
+template <ElementsKind Kind, typename ElementType>
class TypedElementsAccessor
- : public ElementsAccessorBase<TypedElementsAccessor<Kind, ctype>,
+ : public ElementsAccessorBase<TypedElementsAccessor<Kind, ElementType>,
ElementsKindTraits<Kind>> {
public:
- explicit TypedElementsAccessor(const char* name)
- : ElementsAccessorBase<AccessorClass,
- ElementsKindTraits<Kind> >(name) {}
+ using BackingStore = typename ElementsKindTraits<Kind>::BackingStore;
+ using AccessorClass = TypedElementsAccessor<Kind, ElementType>;
- typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
- typedef TypedElementsAccessor<Kind, ctype> AccessorClass;
+ // Conversions from (other) scalar values.
+ static ElementType FromScalar(int value) {
+ return static_cast<ElementType>(value);
+ }
+ static ElementType FromScalar(uint32_t value) {
+ return static_cast<ElementType>(value);
+ }
+ static ElementType FromScalar(double value) {
+ return FromScalar(DoubleToInt32(value));
+ }
+ static ElementType FromScalar(int64_t value) { UNREACHABLE(); }
+ static ElementType FromScalar(uint64_t value) { UNREACHABLE(); }
- static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
- Object value) {
- SetImpl(holder->elements(), entry, value);
+ // Conversions from objects / handles.
+ static ElementType FromObject(Object value, bool* lossless = nullptr) {
+ if (value.IsSmi()) {
+ return FromScalar(Smi::ToInt(value));
+ } else if (value.IsHeapNumber()) {
+ return FromScalar(HeapNumber::cast(value).value());
+ } else {
+ // Clamp undefined here as well. All other types have been
+ // converted to a number type further up in the call chain.
+ DCHECK(value.IsUndefined());
+ return FromScalar(Oddball::cast(value).to_number_raw());
+ }
+ }
+ static ElementType FromHandle(Handle<Object> value,
+ bool* lossless = nullptr) {
+ return FromObject(*value, lossless);
}
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
- Object value) {
- BackingStore::cast(backing_store)->SetValue(entry, value);
+ // Conversion of scalar value to handlified object.
+ static Handle<Object> ToHandle(Isolate* isolate, ElementType value);
+
+ static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object value) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
+ DCHECK_LE(entry, typed_array->length());
+ SetImpl(static_cast<ElementType*>(typed_array->DataPtr()), entry,
+ FromObject(value));
}
- static inline void SetImpl(FixedArrayBase backing_store, uint32_t entry,
- Object value, WriteBarrierMode mode) {
- BackingStore::cast(backing_store)->SetValue(entry, value);
+ static void SetImpl(ElementType* data_ptr, size_t entry, ElementType value) {
+ // The JavaScript memory model allows for racy reads and writes to a
+ // SharedArrayBuffer's backing store. ThreadSanitizer will catch these
+ // racy accesses and warn about them, so we disable TSAN for these reads
+ // and writes using annotations.
+ //
+ // We don't use relaxed atomics here, as it is not a requirement of the
+ // JavaScript memory model to have tear-free reads of overlapping accesses,
+ // and using relaxed atomics may introduce overhead.
+ TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
+ // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
+ // fields (external pointers, doubles and BigInt data) are only
+ // kTaggedSize aligned so we have to use unaligned pointer friendly way of
+ // accessing them in order to avoid undefined behavior in C++ code.
+ WriteUnalignedValue<ElementType>(
+ reinterpret_cast<Address>(data_ptr + entry), value);
+ } else {
+ data_ptr[entry] = value;
+ }
+ TSAN_ANNOTATE_IGNORE_WRITES_END;
+ }
+
+ static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
+ uint32_t entry) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
+ Isolate* isolate = typed_array->GetIsolate();
+ DCHECK_LE(entry, typed_array->length());
+ DCHECK(!typed_array->WasDetached());
+ ElementType elem =
+ GetImpl(static_cast<ElementType*>(typed_array->DataPtr()), entry);
+ return ToHandle(isolate, elem);
}
static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase backing_store,
uint32_t entry) {
- return BackingStore::get(isolate, BackingStore::cast(backing_store), entry);
+ UNREACHABLE();
+ }
+
+ static ElementType GetImpl(ElementType* data_ptr, size_t entry) {
+ // The JavaScript memory model allows for racy reads and writes to a
+ // SharedArrayBuffer's backing store. ThreadSanitizer will catch these
+ // racy accesses and warn about them, so we disable TSAN for these reads
+ // and writes using annotations.
+ //
+ // We don't use relaxed atomics here, as it is not a requirement of the
+ // JavaScript memory model to have tear-free reads of overlapping accesses,
+ // and using relaxed atomics may introduce overhead.
+ TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+ ElementType result;
+ if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
+ // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
+ // fields (external pointers, doubles and BigInt data) are only
+ // kTaggedSize aligned so we have to use unaligned pointer friendly way of
+ // accessing them in order to avoid undefined behavior in C++ code.
+ result = ReadUnalignedValue<ElementType>(
+ reinterpret_cast<Address>(data_ptr + entry));
+ } else {
+ result = data_ptr[entry];
+ }
+ TSAN_ANNOTATE_IGNORE_READS_END;
+ return result;
}
static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
@@ -3078,15 +3047,12 @@ class TypedElementsAccessor
: kMaxUInt32;
}
- static bool WasDetached(JSObject holder) {
- JSArrayBufferView view = JSArrayBufferView::cast(holder);
- return view->WasDetached();
- }
-
static uint32_t GetCapacityImpl(JSObject holder,
FixedArrayBase backing_store) {
- if (WasDetached(holder)) return 0;
- return backing_store->length();
+ JSTypedArray typed_array = JSTypedArray::cast(holder);
+ if (typed_array.WasDetached()) return 0;
+ // TODO(bmeurer, v8:4153): We need to support arbitrary size_t here.
+ return static_cast<uint32_t>(typed_array.length());
}
static uint32_t NumberOfElementsImpl(JSObject receiver,
@@ -3101,7 +3067,7 @@ class TypedElementsAccessor
Handle<FixedArrayBase> elements(receiver->elements(), isolate);
uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
for (uint32_t i = 0; i < length; i++) {
- Handle<Object> value = AccessorClass::GetImpl(isolate, *elements, i);
+ Handle<Object> value = AccessorClass::GetInternalImpl(receiver, i);
accumulator->AddKey(value, convert);
}
}
@@ -3115,8 +3081,7 @@ class TypedElementsAccessor
Handle<FixedArrayBase> elements(object->elements(), isolate);
uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements);
for (uint32_t index = 0; index < length; ++index) {
- Handle<Object> value =
- AccessorClass::GetImpl(isolate, *elements, index);
+ Handle<Object> value = AccessorClass::GetInternalImpl(object, index);
if (get_entries) {
value = MakeEntryPair(isolate, index, value);
}
@@ -3127,30 +3092,24 @@ class TypedElementsAccessor
return Just(true);
}
- static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
+ static Object FillImpl(Handle<JSObject> receiver, Handle<Object> value,
uint32_t start, uint32_t end) {
- Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
- DCHECK(!array->WasDetached());
- DCHECK(obj_value->IsNumeric());
-
- ctype value = BackingStore::FromHandle(obj_value);
-
- // Ensure indexes are within array bounds
- CHECK_LE(0, start);
- CHECK_LE(start, end);
- CHECK_LE(end, array->length_value());
-
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
+ DCHECK(!typed_array->WasDetached());
+ DCHECK_LE(0, start);
+ DCHECK_LE(start, end);
+ DCHECK_LE(end, typed_array->length());
DisallowHeapAllocation no_gc;
- BackingStore elements = BackingStore::cast(receiver->elements());
- ctype* data = static_cast<ctype*>(elements->DataPtr());
- if (COMPRESS_POINTERS_BOOL && alignof(ctype) > kTaggedSize) {
+ ElementType scalar = FromHandle(value);
+ ElementType* data = static_cast<ElementType*>(typed_array->DataPtr());
+ if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
// TODO(ishell, v8:8875): See UnalignedSlot<T> for details.
- std::fill(UnalignedSlot<ctype>(data + start),
- UnalignedSlot<ctype>(data + end), value);
+ std::fill(UnalignedSlot<ElementType>(data + start),
+ UnalignedSlot<ElementType>(data + end), scalar);
} else {
- std::fill(data + start, data + end, value);
+ std::fill(data + start, data + end, scalar);
}
- return *array;
+ return *typed_array;
}
static Maybe<bool> IncludesValueImpl(Isolate* isolate,
@@ -3158,29 +3117,32 @@ class TypedElementsAccessor
Handle<Object> value,
uint32_t start_from, uint32_t length) {
DisallowHeapAllocation no_gc;
+ JSTypedArray typed_array = JSTypedArray::cast(*receiver);
// TODO(caitp): return Just(false) here when implementing strict throwing on
// detached views.
- if (WasDetached(*receiver)) {
+ if (typed_array.WasDetached()) {
return Just(value->IsUndefined(isolate) && length > start_from);
}
- BackingStore elements = BackingStore::cast(receiver->elements());
- if (value->IsUndefined(isolate) &&
- length > static_cast<uint32_t>(elements->length())) {
+ if (value->IsUndefined(isolate) && length > typed_array.length()) {
return Just(true);
}
- ctype typed_search_value;
+
// Prototype has no elements, and not searching for the hole --- limit
// search to backing store length.
- if (static_cast<uint32_t>(elements->length()) < length) {
- length = elements->length();
+ if (typed_array.length() < length) {
+ // TODO(bmeurer, v8:4153): Don't cast to uint32_t here.
+ length = static_cast<uint32_t>(typed_array.length());
}
+ ElementType typed_search_value;
+ ElementType* data_ptr =
+ reinterpret_cast<ElementType*>(typed_array.DataPtr());
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
if (!value->IsBigInt()) return Just(false);
bool lossless;
- typed_search_value = BackingStore::FromHandle(value, &lossless);
+ typed_search_value = FromHandle(value, &lossless);
if (!lossless) return Just(false);
} else {
if (!value->IsNumber()) return Just(false);
@@ -3192,25 +3154,26 @@ class TypedElementsAccessor
}
if (std::isnan(search_value)) {
for (uint32_t k = start_from; k < length; ++k) {
- double element_k = elements->get_scalar(k);
- if (std::isnan(element_k)) return Just(true);
+ double elem_k =
+ static_cast<double>(AccessorClass::GetImpl(data_ptr, k));
+ if (std::isnan(elem_k)) return Just(true);
}
return Just(false);
}
- } else if (search_value < std::numeric_limits<ctype>::lowest() ||
- search_value > std::numeric_limits<ctype>::max()) {
+ } else if (search_value < std::numeric_limits<ElementType>::lowest() ||
+ search_value > std::numeric_limits<ElementType>::max()) {
// Return false if value can't be represented in this space.
return Just(false);
}
- typed_search_value = static_cast<ctype>(search_value);
+ typed_search_value = static_cast<ElementType>(search_value);
if (static_cast<double>(typed_search_value) != search_value) {
return Just(false); // Loss of precision.
}
}
for (uint32_t k = start_from; k < length; ++k) {
- ctype element_k = elements->get_scalar(k);
- if (element_k == typed_search_value) return Just(true);
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ if (elem_k == typed_search_value) return Just(true);
}
return Just(false);
}
@@ -3220,16 +3183,18 @@ class TypedElementsAccessor
Handle<Object> value,
uint32_t start_from, uint32_t length) {
DisallowHeapAllocation no_gc;
+ JSTypedArray typed_array = JSTypedArray::cast(*receiver);
- if (WasDetached(*receiver)) return Just<int64_t>(-1);
+ if (typed_array.WasDetached()) return Just<int64_t>(-1);
- BackingStore elements = BackingStore::cast(receiver->elements());
- ctype typed_search_value;
+ ElementType typed_search_value;
+ ElementType* data_ptr =
+ reinterpret_cast<ElementType*>(typed_array.DataPtr());
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
if (!value->IsBigInt()) return Just<int64_t>(-1);
bool lossless;
- typed_search_value = BackingStore::FromHandle(value, &lossless);
+ typed_search_value = FromHandle(value, &lossless);
if (!lossless) return Just<int64_t>(-1);
} else {
if (!value->IsNumber()) return Just<int64_t>(-1);
@@ -3242,12 +3207,12 @@ class TypedElementsAccessor
if (std::isnan(search_value)) {
return Just<int64_t>(-1);
}
- } else if (search_value < std::numeric_limits<ctype>::lowest() ||
- search_value > std::numeric_limits<ctype>::max()) {
+ } else if (search_value < std::numeric_limits<ElementType>::lowest() ||
+ search_value > std::numeric_limits<ElementType>::max()) {
// Return false if value can't be represented in this ElementsKind.
return Just<int64_t>(-1);
}
- typed_search_value = static_cast<ctype>(search_value);
+ typed_search_value = static_cast<ElementType>(search_value);
if (static_cast<double>(typed_search_value) != search_value) {
return Just<int64_t>(-1); // Loss of precision.
}
@@ -3255,13 +3220,14 @@ class TypedElementsAccessor
// Prototype has no elements, and not searching for the hole --- limit
// search to backing store length.
- if (static_cast<uint32_t>(elements->length()) < length) {
- length = elements->length();
+ if (typed_array.length() < length) {
+ // TODO(bmeurer, v8:4153): Don't cast to uint32_t here.
+ length = static_cast<uint32_t>(typed_array.length());
}
for (uint32_t k = start_from; k < length; ++k) {
- ctype element_k = elements->get_scalar(k);
- if (element_k == typed_search_value) return Just<int64_t>(k);
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ if (elem_k == typed_search_value) return Just<int64_t>(k);
}
return Just<int64_t>(-1);
}
@@ -3270,62 +3236,64 @@ class TypedElementsAccessor
Handle<Object> value,
uint32_t start_from) {
DisallowHeapAllocation no_gc;
- DCHECK(!WasDetached(*receiver));
+ JSTypedArray typed_array = JSTypedArray::cast(*receiver);
+
+ DCHECK(!typed_array.WasDetached());
- BackingStore elements = BackingStore::cast(receiver->elements());
- ctype typed_search_value;
+ ElementType typed_search_value;
+ ElementType* data_ptr =
+ reinterpret_cast<ElementType*>(typed_array.DataPtr());
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
if (!value->IsBigInt()) return Just<int64_t>(-1);
bool lossless;
- typed_search_value = BackingStore::FromHandle(value, &lossless);
+ typed_search_value = FromHandle(value, &lossless);
if (!lossless) return Just<int64_t>(-1);
} else {
if (!value->IsNumber()) return Just<int64_t>(-1);
double search_value = value->Number();
if (!std::isfinite(search_value)) {
- if (std::is_integral<ctype>::value) {
+ if (std::is_integral<ElementType>::value) {
// Integral types cannot represent +Inf or NaN.
return Just<int64_t>(-1);
} else if (std::isnan(search_value)) {
// Strict Equality Comparison of NaN is always false.
return Just<int64_t>(-1);
}
- } else if (search_value < std::numeric_limits<ctype>::lowest() ||
- search_value > std::numeric_limits<ctype>::max()) {
+ } else if (search_value < std::numeric_limits<ElementType>::lowest() ||
+ search_value > std::numeric_limits<ElementType>::max()) {
// Return -1 if value can't be represented in this ElementsKind.
return Just<int64_t>(-1);
}
- typed_search_value = static_cast<ctype>(search_value);
+ typed_search_value = static_cast<ElementType>(search_value);
if (static_cast<double>(typed_search_value) != search_value) {
return Just<int64_t>(-1); // Loss of precision.
}
}
- DCHECK_LT(start_from, elements->length());
-
+ DCHECK_LT(start_from, typed_array.length());
uint32_t k = start_from;
do {
- ctype element_k = elements->get_scalar(k);
- if (element_k == typed_search_value) return Just<int64_t>(k);
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ if (elem_k == typed_search_value) return Just<int64_t>(k);
} while (k-- != 0);
return Just<int64_t>(-1);
}
static void ReverseImpl(JSObject receiver) {
DisallowHeapAllocation no_gc;
- DCHECK(!WasDetached(receiver));
+ JSTypedArray typed_array = JSTypedArray::cast(receiver);
- BackingStore elements = BackingStore::cast(receiver->elements());
+ DCHECK(!typed_array.WasDetached());
- uint32_t len = elements->length();
+ size_t len = typed_array.length();
if (len == 0) return;
- ctype* data = static_cast<ctype*>(elements->DataPtr());
- if (COMPRESS_POINTERS_BOOL && alignof(ctype) > kTaggedSize) {
+ ElementType* data = static_cast<ElementType*>(typed_array.DataPtr());
+ if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
// TODO(ishell, v8:8875): See UnalignedSlot<T> for details.
- std::reverse(UnalignedSlot<ctype>(data),
- UnalignedSlot<ctype>(data + len));
+ std::reverse(UnalignedSlot<ElementType>(data),
+ UnalignedSlot<ElementType>(data + len));
} else {
std::reverse(data, data + len);
}
@@ -3334,13 +3302,10 @@ class TypedElementsAccessor
static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate,
Handle<JSObject> object,
uint32_t length) {
- DCHECK(!WasDetached(*object));
- DCHECK(object->IsJSTypedArray());
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object);
Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
- Handle<BackingStore> elements(BackingStore::cast(object->elements()),
- isolate);
for (uint32_t i = 0; i < length; i++) {
- Handle<Object> value = AccessorClass::GetImpl(isolate, *elements, i);
+ Handle<Object> value = AccessorClass::GetInternalImpl(typed_array, i);
result->set(i, *value);
}
return result;
@@ -3350,42 +3315,22 @@ class TypedElementsAccessor
JSTypedArray destination,
size_t start, size_t end) {
DisallowHeapAllocation no_gc;
- DCHECK_EQ(destination->GetElementsKind(), AccessorClass::kind());
- CHECK(!source->WasDetached());
- CHECK(!destination->WasDetached());
+ DCHECK_EQ(destination.GetElementsKind(), AccessorClass::kind());
+ CHECK(!source.WasDetached());
+ CHECK(!destination.WasDetached());
DCHECK_LE(start, end);
- DCHECK_LE(end, source->length_value());
-
+ DCHECK_LE(end, source.length());
size_t count = end - start;
- DCHECK_LE(count, destination->length_value());
-
- FixedTypedArrayBase src_elements =
- FixedTypedArrayBase::cast(source->elements());
- BackingStore dest_elements = BackingStore::cast(destination->elements());
-
- size_t element_size = source->element_size();
- uint8_t* source_data =
- static_cast<uint8_t*>(src_elements->DataPtr()) + start * element_size;
-
- // Fast path for the same type result array
- if (source->type() == destination->type()) {
- uint8_t* dest_data = static_cast<uint8_t*>(dest_elements->DataPtr());
-
- // The spec defines the copy-step iteratively, which means that we
- // cannot use memcpy if the buffer is shared.
- uint8_t* end_ptr = source_data + count * element_size;
- while (source_data < end_ptr) {
- *dest_data++ = *source_data++;
- }
- return;
- }
-
- switch (source->GetElementsKind()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- CopyBetweenBackingStores<Type##ArrayTraits>(source_data, dest_elements, \
- count, 0); \
- break;
+ DCHECK_LE(count, destination.length());
+ ElementType* dest_data = static_cast<ElementType*>(destination.DataPtr());
+ switch (source.GetElementsKind()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: { \
+ ctype* source_data = reinterpret_cast<ctype*>(source.DataPtr()) + start; \
+ CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>(source_data, dest_data, \
+ count); \
+ break; \
+ }
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
default:
@@ -3394,23 +3339,24 @@ class TypedElementsAccessor
}
}
- static bool HasSimpleRepresentation(InstanceType type) {
- return !(type == FIXED_FLOAT32_ARRAY_TYPE ||
- type == FIXED_FLOAT64_ARRAY_TYPE ||
- type == FIXED_UINT8_CLAMPED_ARRAY_TYPE);
+ static bool HasSimpleRepresentation(ExternalArrayType type) {
+ return !(type == kExternalFloat32Array || type == kExternalFloat64Array ||
+ type == kExternalUint8ClampedArray);
}
- template <typename SourceTraits>
- static void CopyBetweenBackingStores(void* source_data_ptr, BackingStore dest,
- size_t length, uint32_t offset) {
+ template <ElementsKind SourceKind, typename SourceElementType>
+ static void CopyBetweenBackingStores(SourceElementType* source_data_ptr,
+ ElementType* dest_data_ptr,
+ size_t length) {
DisallowHeapAllocation no_gc;
- for (uint32_t i = 0; i < length; i++) {
+ for (size_t i = 0; i < length; i++) {
// We use scalar accessors to avoid boxing/unboxing, so there are no
// allocations.
- typename SourceTraits::ElementType elem =
- FixedTypedArray<SourceTraits>::get_scalar_from_data_ptr(
+ SourceElementType source_elem =
+ TypedElementsAccessor<SourceKind, SourceElementType>::GetImpl(
source_data_ptr, i);
- dest->set(offset + i, dest->from(elem));
+ ElementType dest_elem = FromScalar(source_elem);
+ SetImpl(dest_data_ptr, i, dest_elem);
}
}
@@ -3421,39 +3367,32 @@ class TypedElementsAccessor
// side-effects, as the source elements will always be a number.
DisallowHeapAllocation no_gc;
- CHECK(!source->WasDetached());
- CHECK(!destination->WasDetached());
+ CHECK(!source.WasDetached());
+ CHECK(!destination.WasDetached());
- FixedTypedArrayBase source_elements =
- FixedTypedArrayBase::cast(source->elements());
- BackingStore destination_elements =
- BackingStore::cast(destination->elements());
+ DCHECK_LE(offset, destination.length());
+ DCHECK_LE(length, destination.length() - offset);
+ DCHECK_LE(length, source.length());
- DCHECK_LE(offset, destination->length_value());
- DCHECK_LE(length, destination->length_value() - offset);
- DCHECK(source->length()->IsSmi());
- DCHECK_LE(length, source->length_value());
-
- InstanceType source_type = source_elements->map()->instance_type();
- InstanceType destination_type =
- destination_elements->map()->instance_type();
+ ExternalArrayType source_type = source.type();
+ ExternalArrayType destination_type = destination.type();
bool same_type = source_type == destination_type;
- bool same_size = source->element_size() == destination->element_size();
+ bool same_size = source.element_size() == destination.element_size();
bool both_are_simple = HasSimpleRepresentation(source_type) &&
HasSimpleRepresentation(destination_type);
- uint8_t* source_data = static_cast<uint8_t*>(source_elements->DataPtr());
- uint8_t* dest_data = static_cast<uint8_t*>(destination_elements->DataPtr());
- size_t source_byte_length = source->byte_length();
- size_t dest_byte_length = destination->byte_length();
+ uint8_t* source_data = static_cast<uint8_t*>(source.DataPtr());
+ uint8_t* dest_data = static_cast<uint8_t*>(destination.DataPtr());
+ size_t source_byte_length = source.byte_length();
+ size_t dest_byte_length = destination.byte_length();
// We can simply copy the backing store if the types are the same, or if
// we are converting e.g. Uint8 <-> Int8, as the binary representation
// will be the same. This is not the case for floats or clamped Uint8,
// which have special conversion operations.
if (same_type || (same_size && both_are_simple)) {
- size_t element_size = source->element_size();
+ size_t element_size = source.element_size();
std::memmove(dest_data + offset * element_size, source_data,
length * element_size);
} else {
@@ -3468,11 +3407,12 @@ class TypedElementsAccessor
source_data = cloned_source_elements.get();
}
- switch (source->GetElementsKind()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- CopyBetweenBackingStores<Type##ArrayTraits>( \
- source_data, destination_elements, length, offset); \
+ switch (source.GetElementsKind()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>( \
+ reinterpret_cast<ctype*>(source_data), \
+ reinterpret_cast<ElementType*>(dest_data) + offset, length); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -3492,13 +3432,13 @@ class TypedElementsAccessor
if (isolate->force_slow_path()) return true;
#endif
- Object source_proto = source->map()->prototype();
+ Object source_proto = source.map().prototype();
// Null prototypes are OK - we don't need to do prototype chain lookups on
// them.
- if (source_proto->IsNull(isolate)) return false;
- if (source_proto->IsJSProxy()) return true;
- if (!context->native_context()->is_initial_array_prototype(
+ if (source_proto.IsNull(isolate)) return false;
+ if (source_proto.IsJSProxy()) return true;
+ if (!context.native_context().is_initial_array_prototype(
JSObject::cast(source_proto))) {
return true;
}
@@ -3510,24 +3450,23 @@ class TypedElementsAccessor
JSTypedArray destination, size_t length,
uint32_t offset) {
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) return false;
- Isolate* isolate = source->GetIsolate();
+ Isolate* isolate = source.GetIsolate();
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate);
- CHECK(!destination->WasDetached());
+ CHECK(!destination.WasDetached());
size_t current_length;
- DCHECK(source->length()->IsNumber() &&
- TryNumberToSize(source->length(), &current_length) &&
+ DCHECK(source.length().IsNumber() &&
+ TryNumberToSize(source.length(), &current_length) &&
length <= current_length);
USE(current_length);
- size_t dest_length = destination->length_value();
+ size_t dest_length = destination.length();
DCHECK(length + offset <= dest_length);
USE(dest_length);
- ElementsKind kind = source->GetElementsKind();
- BackingStore dest = BackingStore::cast(destination->elements());
+ ElementsKind kind = source.GetElementsKind();
// When we find the hole, we normally have to look up the element on the
// prototype chain, which is not handled here and we return false instead.
@@ -3536,54 +3475,50 @@ class TypedElementsAccessor
// the hole into undefined.
if (HoleyPrototypeLookupRequired(isolate, context, source)) return false;
- Object undefined = ReadOnlyRoots(isolate).undefined_value();
+ Oddball undefined = ReadOnlyRoots(isolate).undefined_value();
+ ElementType* dest_data =
+ reinterpret_cast<ElementType*>(destination.DataPtr()) + offset;
- // Fastpath for packed Smi kind.
+ // Fast-path for packed Smi kind.
if (kind == PACKED_SMI_ELEMENTS) {
- FixedArray source_store = FixedArray::cast(source->elements());
+ FixedArray source_store = FixedArray::cast(source.elements());
for (uint32_t i = 0; i < length; i++) {
- Object elem = source_store->get(i);
- DCHECK(elem->IsSmi());
- int int_value = Smi::ToInt(elem);
- dest->set(offset + i, dest->from(int_value));
+ Object elem = source_store.get(i);
+ SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
}
return true;
} else if (kind == HOLEY_SMI_ELEMENTS) {
- FixedArray source_store = FixedArray::cast(source->elements());
+ FixedArray source_store = FixedArray::cast(source.elements());
for (uint32_t i = 0; i < length; i++) {
- if (source_store->is_the_hole(isolate, i)) {
- dest->SetValue(offset + i, undefined);
+ if (source_store.is_the_hole(isolate, i)) {
+ SetImpl(dest_data, i, FromObject(undefined));
} else {
- Object elem = source_store->get(i);
- DCHECK(elem->IsSmi());
- int int_value = Smi::ToInt(elem);
- dest->set(offset + i, dest->from(int_value));
+ Object elem = source_store.get(i);
+ SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
}
}
return true;
} else if (kind == PACKED_DOUBLE_ELEMENTS) {
- // Fastpath for packed double kind. We avoid boxing and then immediately
+ // Fast-path for packed double kind. We avoid boxing and then immediately
// unboxing the double here by using get_scalar.
- FixedDoubleArray source_store =
- FixedDoubleArray::cast(source->elements());
+ FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements());
for (uint32_t i = 0; i < length; i++) {
// Use the from_double conversion for this specific TypedArray type,
// rather than relying on C++ to convert elem.
- double elem = source_store->get_scalar(i);
- dest->set(offset + i, dest->from(elem));
+ double elem = source_store.get_scalar(i);
+ SetImpl(dest_data, i, FromScalar(elem));
}
return true;
} else if (kind == HOLEY_DOUBLE_ELEMENTS) {
- FixedDoubleArray source_store =
- FixedDoubleArray::cast(source->elements());
+ FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements());
for (uint32_t i = 0; i < length; i++) {
- if (source_store->is_the_hole(i)) {
- dest->SetValue(offset + i, undefined);
+ if (source_store.is_the_hole(i)) {
+ SetImpl(dest_data, i, FromObject(undefined));
} else {
- double elem = source_store->get_scalar(i);
- dest->set(offset + i, dest->from(elem));
+ double elem = source_store.get_scalar(i);
+ SetImpl(dest_data, i, FromScalar(elem));
}
}
return true;
@@ -3595,13 +3530,21 @@ class TypedElementsAccessor
Handle<JSTypedArray> destination,
size_t length, uint32_t offset) {
Isolate* isolate = destination->GetIsolate();
- Handle<BackingStore> destination_elements(
- BackingStore::cast(destination->elements()), isolate);
- for (uint32_t i = 0; i < length; i++) {
- LookupIterator it(isolate, source, i);
+ for (size_t i = 0; i < length; i++) {
Handle<Object> elem;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
- Object::GetProperty(&it));
+ if (i <= kMaxUInt32) {
+ LookupIterator it(isolate, source, static_cast<uint32_t>(i));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ Object::GetProperty(&it));
+ } else {
+ char buffer[kDoubleToCStringMinBufferSize];
+ Vector<char> string(buffer, arraysize(buffer));
+ DoubleToCString(static_cast<double>(i), string);
+ Handle<Name> name = isolate->factory()->InternalizeUtf8String(string);
+ LookupIterator it(isolate, source, name);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ Object::GetProperty(&it));
+ }
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
BigInt::FromObject(isolate, elem));
@@ -3620,7 +3563,8 @@ class TypedElementsAccessor
}
// The spec says we store the length, then get each element, so we don't
// need to check changes to length.
- destination_elements->SetValue(offset + i, *elem);
+ // TODO(bmeurer, v8:4153): Remove this static_cast.
+ SetImpl(destination, static_cast<uint32_t>(offset + i), *elem);
}
return *isolate->factory()->undefined_value();
}
@@ -3634,7 +3578,7 @@ class TypedElementsAccessor
Isolate* isolate = destination->GetIsolate();
Handle<JSTypedArray> destination_ta =
Handle<JSTypedArray>::cast(destination);
- DCHECK_LE(offset + length, destination_ta->length_value());
+ DCHECK_LE(offset + length, destination_ta->length());
CHECK(!destination_ta->WasDetached());
if (length == 0) return *isolate->factory()->undefined_value();
@@ -3647,33 +3591,18 @@ class TypedElementsAccessor
source_kind == BIGINT64_ELEMENTS || source_kind == BIGUINT64_ELEMENTS;
bool target_is_bigint =
Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS;
- if (target_is_bigint) {
- if (V8_UNLIKELY(!source_is_bigint)) {
- Handle<Object> first =
- JSReceiver::GetElement(isolate, source_ta, 0).ToHandleChecked();
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kBigIntFromObject, first));
- }
- } else {
- if (V8_UNLIKELY(source_is_bigint)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kBigIntToNumber));
- }
- }
// If we have to copy more elements than we have in the source, we need to
// do special handling and conversion; that happens in the slow case.
- if (!source_ta->WasDetached() &&
- length + offset <= source_ta->length_value()) {
+ if (source_is_bigint == target_is_bigint && !source_ta->WasDetached() &&
+ length + offset <= source_ta->length()) {
CopyElementsFromTypedArray(*source_ta, *destination_ta, length, offset);
return *isolate->factory()->undefined_value();
}
- }
-
- // Fast cases for packed numbers kinds where we don't need to allocate.
- if (source->IsJSArray()) {
+ } else if (source->IsJSArray()) {
+ // Fast cases for packed numbers kinds where we don't need to allocate.
Handle<JSArray> source_js_array = Handle<JSArray>::cast(source);
size_t current_length;
- if (source_js_array->length()->IsNumber() &&
+ if (source_js_array->length().IsNumber() &&
TryNumberToSize(source_js_array->length(), &current_length)) {
if (length <= current_length) {
Handle<JSArray> source_array = Handle<JSArray>::cast(source);
@@ -3690,10 +3619,214 @@ class TypedElementsAccessor
}
};
-#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype) \
- typedef TypedElementsAccessor<TYPE##_ELEMENTS, ctype> \
- Fixed##Type##ElementsAccessor;
+// static
+template <>
+Handle<Object> TypedElementsAccessor<INT8_ELEMENTS, int8_t>::ToHandle(
+ Isolate* isolate, int8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<UINT8_ELEMENTS, uint8_t>::ToHandle(
+ Isolate* isolate, uint8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<INT16_ELEMENTS, int16_t>::ToHandle(
+ Isolate* isolate, int16_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+// static
+template <>
+Handle<Object> TypedElementsAccessor<UINT16_ELEMENTS, uint16_t>::ToHandle(
+ Isolate* isolate, uint16_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<INT32_ELEMENTS, int32_t>::ToHandle(
+ Isolate* isolate, int32_t value) {
+ return isolate->factory()->NewNumberFromInt(value);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<UINT32_ELEMENTS, uint32_t>::ToHandle(
+ Isolate* isolate, uint32_t value) {
+ return isolate->factory()->NewNumberFromUint(value);
+}
+
+// static
+template <>
+float TypedElementsAccessor<FLOAT32_ELEMENTS, float>::FromScalar(double value) {
+ using limits = std::numeric_limits<float>;
+ if (value > limits::max()) return limits::infinity();
+ if (value < limits::lowest()) return -limits::infinity();
+ return static_cast<float>(value);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<FLOAT32_ELEMENTS, float>::ToHandle(
+ Isolate* isolate, float value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+// static
+template <>
+double TypedElementsAccessor<FLOAT64_ELEMENTS, double>::FromScalar(
+ double value) {
+ return value;
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<FLOAT64_ELEMENTS, double>::ToHandle(
+ Isolate* isolate, double value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<UINT8_CLAMPED_ELEMENTS, uint8_t>::FromScalar(
+ int value) {
+ if (value < 0x00) return 0x00;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<UINT8_CLAMPED_ELEMENTS, uint8_t>::FromScalar(
+ uint32_t value) {
+ // We need this special case for Uint32 -> Uint8Clamped, because the highest
+ // Uint32 values will be negative as an int, clamping to 0, rather than 255.
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<UINT8_CLAMPED_ELEMENTS, uint8_t>::FromScalar(
+ double value) {
+ // Handle NaNs and less than zero values which clamp to zero.
+ if (!(value > 0)) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(lrint(value));
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<UINT8_CLAMPED_ELEMENTS, uint8_t>::ToHandle(
+ Isolate* isolate, uint8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ int value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ uint32_t value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ double value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ int64_t value) {
+ return value;
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ uint64_t value) {
+ return static_cast<int64_t>(value);
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::FromObject(
+ Object value, bool* lossless) {
+ return BigInt::cast(value).AsInt64(lossless);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<BIGINT64_ELEMENTS, int64_t>::ToHandle(
+ Isolate* isolate, int64_t value) {
+ return BigInt::FromInt64(isolate, value);
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ int value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ uint32_t value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ double value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ int64_t value) {
+ return static_cast<uint64_t>(value);
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromScalar(
+ uint64_t value) {
+ return value;
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::FromObject(
+ Object value, bool* lossless) {
+ return BigInt::cast(value).AsUint64(lossless);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::ToHandle(
+ Isolate* isolate, uint64_t value) {
+ return BigInt::FromUint64(isolate, value);
+}
+
+#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype) \
+ using Type##ElementsAccessor = TypedElementsAccessor<TYPE##_ELEMENTS, ctype>;
TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
#undef FIXED_ELEMENTS_ACCESSOR
@@ -3701,11 +3834,6 @@ template <typename Subclass, typename ArgumentsAccessor, typename KindTraits>
class SloppyArgumentsElementsAccessor
: public ElementsAccessorBase<Subclass, KindTraits> {
public:
- explicit SloppyArgumentsElementsAccessor(const char* name)
- : ElementsAccessorBase<Subclass, KindTraits>(name) {
- USE(KindTraits::Kind);
- }
-
static void ConvertArgumentsStoreResult(
Handle<SloppyArgumentsElements> elements, Handle<Object> result) {
UNREACHABLE();
@@ -3720,11 +3848,11 @@ class SloppyArgumentsElementsAccessor
// Read context mapped entry.
DisallowHeapAllocation no_gc;
Object probe = elements->get_mapped_entry(entry);
- DCHECK(!probe->IsTheHole(isolate));
+ DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
- DCHECK(!context->get(context_entry)->IsTheHole(isolate));
- return handle(context->get(context_entry), isolate);
+ DCHECK(!context.get(context_entry).IsTheHole(isolate));
+ return handle(context.get(context_entry), isolate);
} else {
// Entry is not context mapped, defer to the arguments.
Handle<Object> result = ArgumentsAccessor::GetImpl(
@@ -3751,26 +3879,26 @@ class SloppyArgumentsElementsAccessor
static inline void SetImpl(FixedArrayBase store, uint32_t entry,
Object value) {
SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements.parameter_map_length();
if (entry < length) {
// Store context mapped entry.
DisallowHeapAllocation no_gc;
- Object probe = elements->get_mapped_entry(entry);
- DCHECK(!probe->IsTheHole());
- Context context = elements->context();
+ Object probe = elements.get_mapped_entry(entry);
+ DCHECK(!probe.IsTheHole());
+ Context context = elements.context();
int context_entry = Smi::ToInt(probe);
- DCHECK(!context->get(context_entry)->IsTheHole());
- context->set(context_entry, value);
+ DCHECK(!context.get(context_entry).IsTheHole());
+ context.set(context_entry, value);
} else {
// Entry is not context mapped defer to arguments.
- FixedArray arguments = elements->arguments();
+ FixedArray arguments = elements.arguments();
Object current = ArgumentsAccessor::GetRaw(arguments, entry - length);
- if (current->IsAliasedArgumentsEntry()) {
+ if (current.IsAliasedArgumentsEntry()) {
AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(current);
- Context context = elements->context();
- int context_entry = alias->aliased_context_slot();
- DCHECK(!context->get(context_entry)->IsTheHole());
- context->set(context_entry, value);
+ Context context = elements.context();
+ int context_entry = alias.aliased_context_slot();
+ DCHECK(!context.get(context_entry).IsTheHole());
+ context.set(context_entry, value);
} else {
ArgumentsAccessor::SetImpl(arguments, entry - length, value);
}
@@ -3786,8 +3914,8 @@ class SloppyArgumentsElementsAccessor
static uint32_t GetCapacityImpl(JSObject holder, FixedArrayBase store) {
SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
- FixedArray arguments = elements->arguments();
- return elements->parameter_map_length() +
+ FixedArray arguments = elements.arguments();
+ return elements.parameter_map_length() +
ArgumentsAccessor::GetCapacityImpl(holder, arguments);
}
@@ -3795,19 +3923,19 @@ class SloppyArgumentsElementsAccessor
FixedArrayBase backing_store) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(backing_store);
- FixedArrayBase arguments = elements->arguments();
- return elements->parameter_map_length() +
+ FixedArrayBase arguments = elements.arguments();
+ return elements.parameter_map_length() +
ArgumentsAccessor::GetMaxNumberOfEntries(holder, arguments);
}
static uint32_t NumberOfElementsImpl(JSObject receiver,
FixedArrayBase backing_store) {
- Isolate* isolate = receiver->GetIsolate();
+ Isolate* isolate = receiver.GetIsolate();
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(backing_store);
- FixedArrayBase arguments = elements->arguments();
+ FixedArrayBase arguments = elements.arguments();
uint32_t nof_elements = 0;
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements.parameter_map_length();
for (uint32_t entry = 0; entry < length; entry++) {
if (HasParameterMapArg(isolate, elements, entry)) nof_elements++;
}
@@ -3832,18 +3960,18 @@ class SloppyArgumentsElementsAccessor
uint32_t entry) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements.parameter_map_length();
if (entry < length) {
return HasParameterMapArg(isolate, elements, entry);
}
- FixedArrayBase arguments = elements->arguments();
+ FixedArrayBase arguments = elements.arguments();
return ArgumentsAccessor::HasEntryImpl(isolate, arguments, entry - length);
}
static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(backing_store);
- FixedArray arguments = elements->arguments();
+ FixedArray arguments = elements.arguments();
return ArgumentsAccessor::HasAccessorsImpl(holder, arguments);
}
@@ -3851,9 +3979,9 @@ class SloppyArgumentsElementsAccessor
uint32_t entry) {
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements.parameter_map_length();
if (entry < length) return entry;
- FixedArray arguments = elements->arguments();
+ FixedArray arguments = elements.arguments();
return ArgumentsAccessor::GetIndexForEntryImpl(arguments, entry - length);
}
@@ -3863,32 +3991,32 @@ class SloppyArgumentsElementsAccessor
SloppyArgumentsElements elements =
SloppyArgumentsElements::cast(parameters);
if (HasParameterMapArg(isolate, elements, index)) return index;
- FixedArray arguments = elements->arguments();
+ FixedArray arguments = elements.arguments();
uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(
isolate, holder, arguments, index, filter);
if (entry == kMaxUInt32) return kMaxUInt32;
// Arguments entries could overlap with the dictionary entries, hence offset
// them by the number of context mapped entries.
- return elements->parameter_map_length() + entry;
+ return elements.parameter_map_length() + entry;
}
static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
SloppyArgumentsElements elements =
- SloppyArgumentsElements::cast(holder->elements());
- uint32_t length = elements->parameter_map_length();
+ SloppyArgumentsElements::cast(holder.elements());
+ uint32_t length = elements.parameter_map_length();
if (entry < length) {
return PropertyDetails(kData, NONE, PropertyCellType::kNoCell);
}
- FixedArray arguments = elements->arguments();
+ FixedArray arguments = elements.arguments();
return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
}
static bool HasParameterMapArg(Isolate* isolate,
SloppyArgumentsElements elements,
uint32_t index) {
- uint32_t length = elements->parameter_map_length();
+ uint32_t length = elements.parameter_map_length();
if (index >= length) return false;
- return !elements->get_mapped_entry(index)->IsTheHole(isolate);
+ return !elements.get_mapped_entry(index).IsTheHole(isolate);
}
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
@@ -3941,7 +4069,7 @@ class SloppyArgumentsElementsAccessor
uint32_t length = elements->parameter_map_length();
for (uint32_t i = 0; i < length; ++i) {
- if (elements->get_mapped_entry(i)->IsTheHole(isolate)) continue;
+ if (elements->get_mapped_entry(i).IsTheHole(isolate)) continue;
if (convert == GetKeysConversion::kConvertToString) {
Handle<String> index_string = isolate->factory()->Uint32ToString(i);
list->set(insertion_index, *index_string);
@@ -4040,42 +4168,13 @@ class SloppyArgumentsElementsAccessor
}
return Just<int64_t>(-1);
}
-
- static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start,
- uint32_t end) {
- Isolate* isolate = receiver->GetIsolate();
- uint32_t result_len = end < start ? 0u : end - start;
- Handle<JSArray> result_array =
- isolate->factory()->NewJSArray(HOLEY_ELEMENTS, result_len, result_len);
- DisallowHeapAllocation no_gc;
- FixedArray elements = FixedArray::cast(result_array->elements());
- FixedArray parameters = FixedArray::cast(receiver->elements());
- uint32_t insertion_index = 0;
- for (uint32_t i = start; i < end; i++) {
- uint32_t entry = GetEntryForIndexImpl(isolate, *receiver, parameters, i,
- ALL_PROPERTIES);
- if (entry != kMaxUInt32 && HasEntryImpl(isolate, parameters, entry)) {
- elements->set(insertion_index, *GetImpl(isolate, parameters, entry));
- } else {
- elements->set_the_hole(isolate, insertion_index);
- }
- insertion_index++;
- }
- return result_array;
- }
};
-
class SlowSloppyArgumentsElementsAccessor
: public SloppyArgumentsElementsAccessor<
SlowSloppyArgumentsElementsAccessor, DictionaryElementsAccessor,
- ElementsKindTraits<SLOW_SLOPPY_ARGUMENTS_ELEMENTS> > {
+ ElementsKindTraits<SLOW_SLOPPY_ARGUMENTS_ELEMENTS>> {
public:
- explicit SlowSloppyArgumentsElementsAccessor(const char* name)
- : SloppyArgumentsElementsAccessor<
- SlowSloppyArgumentsElementsAccessor, DictionaryElementsAccessor,
- ElementsKindTraits<SLOW_SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
-
static Handle<Object> ConvertArgumentsStoreResult(
Isolate* isolate, Handle<SloppyArgumentsElements> elements,
Handle<Object> result) {
@@ -4084,9 +4183,9 @@ class SlowSloppyArgumentsElementsAccessor
DisallowHeapAllocation no_gc;
AliasedArgumentsEntry alias = AliasedArgumentsEntry::cast(*result);
Context context = elements->context();
- int context_entry = alias->aliased_context_slot();
- DCHECK(!context->get(context_entry)->IsTheHole(isolate));
- return handle(context->get(context_entry), isolate);
+ int context_entry = alias.aliased_context_slot();
+ DCHECK(!context.get(context_entry).IsTheHole(isolate));
+ return handle(context.get(context_entry), isolate);
}
return result;
}
@@ -4133,11 +4232,11 @@ class SlowSloppyArgumentsElementsAccessor
uint32_t length = elements->parameter_map_length();
if (entry < length) {
Object probe = elements->get_mapped_entry(entry);
- DCHECK(!probe->IsTheHole(isolate));
+ DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
- DCHECK(!context->get(context_entry)->IsTheHole(isolate));
- context->set(context_entry, *value);
+ DCHECK(!context.get(context_entry).IsTheHole(isolate));
+ context.set(context_entry, *value);
// Redefining attributes of an aliased element destroys fast aliasing.
elements->set_mapped_entry(entry,
@@ -4165,18 +4264,11 @@ class SlowSloppyArgumentsElementsAccessor
}
};
-
class FastSloppyArgumentsElementsAccessor
: public SloppyArgumentsElementsAccessor<
FastSloppyArgumentsElementsAccessor, FastHoleyObjectElementsAccessor,
- ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS> > {
+ ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS>> {
public:
- explicit FastSloppyArgumentsElementsAccessor(const char* name)
- : SloppyArgumentsElementsAccessor<
- FastSloppyArgumentsElementsAccessor,
- FastHoleyObjectElementsAccessor,
- ElementsKindTraits<FAST_SLOPPY_ARGUMENTS_ELEMENTS> >(name) {}
-
static Handle<Object> ConvertArgumentsStoreResult(
Isolate* isolate, Handle<SloppyArgumentsElements> paramtere_map,
Handle<Object> result) {
@@ -4187,7 +4279,7 @@ class FastSloppyArgumentsElementsAccessor
static Handle<FixedArray> GetArguments(Isolate* isolate,
FixedArrayBase store) {
SloppyArgumentsElements elements = SloppyArgumentsElements::cast(store);
- return Handle<FixedArray>(elements->arguments(), isolate);
+ return Handle<FixedArray>(elements.arguments(), isolate);
}
static Handle<NumberDictionary> NormalizeImpl(
@@ -4258,7 +4350,7 @@ class FastSloppyArgumentsElementsAccessor
uint32_t from_start, FixedArrayBase to,
ElementsKind from_kind, uint32_t to_start,
int packed_size, int copy_size) {
- DCHECK(!to->IsDictionary());
+ DCHECK(!to.IsNumberDictionary());
if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
CopyDictionaryToObjectElements(isolate, from, from_start, to,
HOLEY_ELEMENTS, to_start, copy_size);
@@ -4295,11 +4387,6 @@ template <typename Subclass, typename BackingStoreAccessor, typename KindTraits>
class StringWrapperElementsAccessor
: public ElementsAccessorBase<Subclass, KindTraits> {
public:
- explicit StringWrapperElementsAccessor(const char* name)
- : ElementsAccessorBase<Subclass, KindTraits>(name) {
- USE(KindTraits::Kind);
- }
-
static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
uint32_t entry) {
return GetImpl(holder, entry);
@@ -4323,7 +4410,7 @@ class StringWrapperElementsAccessor
}
static PropertyDetails GetDetailsImpl(JSObject holder, uint32_t entry) {
- uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
+ uint32_t length = static_cast<uint32_t>(GetString(holder).length());
if (entry < length) {
PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
@@ -4335,7 +4422,7 @@ class StringWrapperElementsAccessor
static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject holder,
FixedArrayBase backing_store,
uint32_t index, PropertyFilter filter) {
- uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
+ uint32_t length = static_cast<uint32_t>(GetString(holder).length());
if (index < length) return index;
uint32_t backing_store_entry = BackingStoreAccessor::GetEntryForIndexImpl(
isolate, holder, backing_store, index, filter);
@@ -4345,7 +4432,7 @@ class StringWrapperElementsAccessor
}
static void DeleteImpl(Handle<JSObject> holder, uint32_t entry) {
- uint32_t length = static_cast<uint32_t>(GetString(*holder)->length());
+ uint32_t length = static_cast<uint32_t>(GetString(*holder).length());
if (entry < length) {
return; // String contents can't be deleted.
}
@@ -4353,7 +4440,7 @@ class StringWrapperElementsAccessor
}
static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object value) {
- uint32_t length = static_cast<uint32_t>(GetString(*holder)->length());
+ uint32_t length = static_cast<uint32_t>(GetString(*holder).length());
if (entry < length) {
return; // String contents are read-only.
}
@@ -4363,7 +4450,7 @@ class StringWrapperElementsAccessor
static void AddImpl(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) {
- DCHECK(index >= static_cast<uint32_t>(GetString(*object)->length()));
+ DCHECK(index >= static_cast<uint32_t>(GetString(*object).length()));
// Explicitly grow fast backing stores if needed. Dictionaries know how to
// extend their capacity themselves.
if (KindTraits::Kind == FAST_STRING_WRAPPER_ELEMENTS &&
@@ -4380,7 +4467,7 @@ class StringWrapperElementsAccessor
Handle<FixedArrayBase> store, uint32_t entry,
Handle<Object> value,
PropertyAttributes attributes) {
- uint32_t length = static_cast<uint32_t>(GetString(*object)->length());
+ uint32_t length = static_cast<uint32_t>(GetString(*object).length());
if (entry < length) {
return; // String contents can't be reconfigured.
}
@@ -4408,7 +4495,7 @@ class StringWrapperElementsAccessor
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
KeyAccumulator* keys) {
- uint32_t length = GetString(*object)->length();
+ uint32_t length = GetString(*object).length();
Factory* factory = keys->isolate()->factory();
for (uint32_t i = 0; i < length; i++) {
keys->AddKey(factory->NewNumberFromUint(i));
@@ -4442,7 +4529,7 @@ class StringWrapperElementsAccessor
uint32_t from_start, FixedArrayBase to,
ElementsKind from_kind, uint32_t to_start,
int packed_size, int copy_size) {
- DCHECK(!to->IsDictionary());
+ DCHECK(!to.IsNumberDictionary());
if (from_kind == SLOW_STRING_WRAPPER_ELEMENTS) {
CopyDictionaryToObjectElements(isolate, from, from_start, to,
HOLEY_ELEMENTS, to_start, copy_size);
@@ -4455,17 +4542,17 @@ class StringWrapperElementsAccessor
static uint32_t NumberOfElementsImpl(JSObject object,
FixedArrayBase backing_store) {
- uint32_t length = GetString(object)->length();
+ uint32_t length = GetString(object).length();
return length +
BackingStoreAccessor::NumberOfElementsImpl(object, backing_store);
}
private:
static String GetString(JSObject holder) {
- DCHECK(holder->IsJSValue());
+ DCHECK(holder.IsJSValue());
JSValue js_value = JSValue::cast(holder);
- DCHECK(js_value->value()->IsString());
- return String::cast(js_value->value());
+ DCHECK(js_value.value().IsString());
+ return String::cast(js_value.value());
}
};
@@ -4474,11 +4561,6 @@ class FastStringWrapperElementsAccessor
FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>> {
public:
- explicit FastStringWrapperElementsAccessor(const char* name)
- : StringWrapperElementsAccessor<
- FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
- ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>>(name) {}
-
static Handle<NumberDictionary> NormalizeImpl(
Handle<JSObject> object, Handle<FixedArrayBase> elements) {
return FastHoleyObjectElementsAccessor::NormalizeImpl(object, elements);
@@ -4490,11 +4572,6 @@ class SlowStringWrapperElementsAccessor
SlowStringWrapperElementsAccessor, DictionaryElementsAccessor,
ElementsKindTraits<SLOW_STRING_WRAPPER_ELEMENTS>> {
public:
- explicit SlowStringWrapperElementsAccessor(const char* name)
- : StringWrapperElementsAccessor<
- SlowStringWrapperElementsAccessor, DictionaryElementsAccessor,
- ElementsKindTraits<SLOW_STRING_WRAPPER_ELEMENTS>>(name) {}
-
static bool HasAccessorsImpl(JSObject holder, FixedArrayBase backing_store) {
return DictionaryElementsAccessor::HasAccessorsImpl(holder, backing_store);
}
@@ -4502,46 +4579,6 @@ class SlowStringWrapperElementsAccessor
} // namespace
-
-void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t index,
- bool allow_appending) {
- DisallowHeapAllocation no_allocation;
- Object raw_length;
- const char* elements_type = "array";
- if (obj->IsJSArray()) {
- JSArray array = JSArray::cast(*obj);
- raw_length = array->length();
- } else {
- raw_length = Smi::FromInt(obj->elements()->length());
- elements_type = "object";
- }
-
- if (raw_length->IsNumber()) {
- double n = raw_length->Number();
- if (FastI2D(FastD2UI(n)) == n) {
- int32_t int32_length = DoubleToInt32(n);
- uint32_t compare_length = static_cast<uint32_t>(int32_length);
- if (allow_appending) compare_length++;
- if (index >= compare_length) {
- PrintF("[OOB %s %s (%s length = %d, element accessed = %d) in ",
- elements_type, op, elements_type, static_cast<int>(int32_length),
- static_cast<int>(index));
- TraceTopFrame(obj->GetIsolate());
- PrintF("]\n");
- }
- } else {
- PrintF("[%s elements length not integer value in ", elements_type);
- TraceTopFrame(obj->GetIsolate());
- PrintF("]\n");
- }
- } else {
- PrintF("[%s elements length not a number in ", elements_type);
- TraceTopFrame(obj->GetIsolate());
- PrintF("]\n");
- }
-}
-
-
MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
Arguments* args) {
if (args->length() == 0) {
@@ -4579,8 +4616,8 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
// Set length and elements on the array.
int number_of_elements = args->length();
- JSObject::EnsureCanContainElements(
- array, args, 0, number_of_elements, ALLOW_CONVERTED_DOUBLE_ELEMENTS);
+ JSObject::EnsureCanContainElements(array, args, 0, number_of_elements,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS);
// Allocate an appropriately typed elements array.
ElementsKind elements_kind = array->GetElementsKind();
@@ -4618,13 +4655,12 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
Handle<FixedDoubleArray> double_elms =
Handle<FixedDoubleArray>::cast(elms);
for (int entry = 0; entry < number_of_elements; entry++) {
- double_elms->set(entry, (*args)[entry]->Number());
+ double_elms->set(entry, (*args)[entry].Number());
}
break;
}
default:
UNREACHABLE();
- break;
}
array->set_elements(*elms);
@@ -4641,10 +4677,10 @@ void CopyFastNumberJSArrayElementsToTypedArray(Address raw_context,
JSArray source = JSArray::cast(Object(raw_source));
JSTypedArray destination = JSTypedArray::cast(Object(raw_destination));
- switch (destination->GetElementsKind()) {
+ switch (destination.GetElementsKind()) {
#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
- CHECK(Fixed##Type##ElementsAccessor::TryCopyElementsFastNumber( \
+ CHECK(Type##ElementsAccessor::TryCopyElementsFastNumber( \
context, source, destination, length, static_cast<uint32_t>(offset))); \
break;
TYPED_ARRAYS(TYPED_ARRAYS_CASE)
@@ -4660,10 +4696,10 @@ void CopyTypedArrayElementsToTypedArray(Address raw_source,
JSTypedArray source = JSTypedArray::cast(Object(raw_source));
JSTypedArray destination = JSTypedArray::cast(Object(raw_destination));
- switch (destination->GetElementsKind()) {
+ switch (destination.GetElementsKind()) {
#define TYPED_ARRAYS_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
- Fixed##Type##ElementsAccessor::CopyElementsFromTypedArray( \
+ Type##ElementsAccessor::CopyElementsFromTypedArray( \
source, destination, length, static_cast<uint32_t>(offset)); \
break;
TYPED_ARRAYS(TYPED_ARRAYS_CASE)
@@ -4678,13 +4714,13 @@ void CopyTypedArrayElementsSlice(Address raw_source, Address raw_destination,
JSTypedArray source = JSTypedArray::cast(Object(raw_source));
JSTypedArray destination = JSTypedArray::cast(Object(raw_destination));
- destination->GetElementsAccessor()->CopyTypedArrayElementsSlice(
+ destination.GetElementsAccessor()->CopyTypedArrayElementsSlice(
source, destination, start, end);
}
void ElementsAccessor::InitializeOncePerProcess() {
static ElementsAccessor* accessor_array[] = {
-#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(#Kind),
+#define ACCESSOR_ARRAY(Class, Kind, Store) new Class(),
ELEMENTS_LIST(ACCESSOR_ARRAY)
#undef ACCESSOR_ARRAY
};
@@ -4695,7 +4731,6 @@ void ElementsAccessor::InitializeOncePerProcess() {
elements_accessors_ = accessor_array;
}
-
void ElementsAccessor::TearDown() {
if (elements_accessors_ == nullptr) return;
#define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
@@ -4714,7 +4749,7 @@ Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
bool is_holey = false;
for (uint32_t i = 0; i < concat_size; i++) {
Object arg = (*args)[i];
- ElementsKind arg_kind = JSArray::cast(arg)->GetElementsKind();
+ ElementsKind arg_kind = JSArray::cast(arg).GetElementsKind();
has_raw_doubles = has_raw_doubles || IsDoubleElementsKind(arg_kind);
is_holey = is_holey || IsHoleyElementsKind(arg_kind);
result_elements_kind =
@@ -4745,9 +4780,9 @@ Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
// performance degradation.
JSArray array = JSArray::cast((*args)[i]);
uint32_t len = 0;
- array->length()->ToArrayLength(&len);
+ array.length().ToArrayLength(&len);
if (len == 0) continue;
- ElementsKind from_kind = array->GetElementsKind();
+ ElementsKind from_kind = array.GetElementsKind();
accessor->CopyElements(array, 0, from_kind, storage, insertion_index, len);
insertion_index += len;
}
diff --git a/deps/v8/src/elements.h b/deps/v8/src/objects/elements.h
index 12f8ddc4b5..844cd2ed94 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/objects/elements.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ELEMENTS_H_
-#define V8_ELEMENTS_H_
+#ifndef V8_OBJECTS_ELEMENTS_H_
+#define V8_OBJECTS_ELEMENTS_H_
-#include "src/elements-kind.h"
-#include "src/keys.h"
-#include "src/objects.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/keys.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -18,11 +18,9 @@ class JSTypedArray;
// ElementsKinds.
class ElementsAccessor {
public:
- explicit ElementsAccessor(const char* name) : name_(name) { }
+ ElementsAccessor() = default;
virtual ~ElementsAccessor() = default;
- const char* name() const { return name_; }
-
// Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) {
DCHECK_LT(static_cast<int>(elements_kind), kElementsKindCount);
@@ -68,7 +66,6 @@ class ElementsAccessor {
// element that is non-deletable.
virtual void SetLength(Handle<JSArray> holder, uint32_t new_length) = 0;
-
// If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
// of elements from source after source_start to the destination array.
static const int kCopyToEnd = -1;
@@ -132,9 +129,6 @@ class ElementsAccessor {
virtual uint32_t Unshift(Handle<JSArray> receiver, Arguments* args,
uint32_t unshift_size) = 0;
- virtual Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start,
- uint32_t end) = 0;
-
virtual Handle<Object> Pop(Handle<JSArray> receiver) = 0;
virtual Handle<Object> Shift(Handle<JSArray> receiver) = 0;
@@ -217,14 +211,10 @@ class ElementsAccessor {
private:
static ElementsAccessor** elements_accessors_;
- const char* name_;
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
-void CheckArrayAbuse(Handle<JSObject> obj, const char* op, uint32_t index,
- bool allow_appending = false);
-
V8_WARN_UNUSED_RESULT MaybeHandle<Object> ArrayConstructInitializeElements(
Handle<JSArray> array, Arguments* args);
@@ -248,4 +238,4 @@ void CopyTypedArrayElementsSlice(Address raw_source, Address raw_destination,
} // namespace internal
} // namespace v8
-#endif // V8_ELEMENTS_H_
+#endif // V8_OBJECTS_ELEMENTS_H_
diff --git a/deps/v8/src/objects/embedder-data-array.cc b/deps/v8/src/objects/embedder-data-array.cc
index c85e0b9f31..ba3e92c33c 100644
--- a/deps/v8/src/objects/embedder-data-array.cc
+++ b/deps/v8/src/objects/embedder-data-array.cc
@@ -4,7 +4,7 @@
#include "src/objects/embedder-data-array.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "src/objects/embedder-data-array-inl.h"
namespace v8 {
diff --git a/deps/v8/src/objects/embedder-data-array.h b/deps/v8/src/objects/embedder-data-array.h
index f5ab2fa7ee..ba4fe25465 100644
--- a/deps/v8/src/objects/embedder-data-array.h
+++ b/deps/v8/src/objects/embedder-data-array.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_EMBEDDER_DATA_ARRAY_H_
#define V8_OBJECTS_EMBEDDER_DATA_ARRAY_H_
-#include "src/globals.h"
-#include "src/maybe-handles.h"
+#include "src/common/globals.h"
+#include "src/handles/maybe-handles.h"
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/embedder-data-slot-inl.h b/deps/v8/src/objects/embedder-data-slot-inl.h
index b87f31ac7d..6830a4d22e 100644
--- a/deps/v8/src/objects/embedder-data-slot-inl.h
+++ b/deps/v8/src/objects/embedder-data-slot-inl.h
@@ -7,11 +7,11 @@
#include "src/objects/embedder-data-slot.h"
+#include "src/common/v8memory.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/embedder-data-array.h"
#include "src/objects/js-objects-inl.h"
-#include "src/v8memory.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -25,7 +25,7 @@ EmbedderDataSlot::EmbedderDataSlot(EmbedderDataArray array, int entry_index)
EmbedderDataSlot::EmbedderDataSlot(JSObject object, int embedder_field_index)
: SlotBase(FIELD_ADDR(
- object, object->GetEmbedderFieldOffset(embedder_field_index))) {}
+ object, object.GetEmbedderFieldOffset(embedder_field_index))) {}
Object EmbedderDataSlot::load_tagged() const {
return ObjectSlot(address() + kTaggedPayloadOffset).Relaxed_Load();
@@ -56,7 +56,7 @@ void EmbedderDataSlot::store_tagged(EmbedderDataArray array, int entry_index,
// static
void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
Object value) {
- int slot_offset = object->GetEmbedderFieldOffset(embedder_field_index);
+ int slot_offset = object.GetEmbedderFieldOffset(embedder_field_index);
ObjectSlot(FIELD_ADDR(object, slot_offset + kTaggedPayloadOffset))
.Relaxed_Store(value);
WRITE_BARRIER(object, slot_offset + kTaggedPayloadOffset, value);
diff --git a/deps/v8/src/objects/embedder-data-slot.h b/deps/v8/src/objects/embedder-data-slot.h
index 6cebf28f2d..dee8c3ec56 100644
--- a/deps/v8/src/objects/embedder-data-slot.h
+++ b/deps/v8/src/objects/embedder-data-slot.h
@@ -7,8 +7,8 @@
#include <utility>
-#include "src/assert-scope.h"
-#include "src/globals.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
#include "src/objects/slots.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index c3902ca9aa..e06cfce7de 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -8,7 +8,7 @@
#include "src/objects/feedback-cell.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/struct-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -25,10 +25,15 @@ ACCESSORS(FeedbackCell, value, HeapObject, kValueOffset)
INT32_ACCESSORS(FeedbackCell, interrupt_budget, kInterruptBudgetOffset)
void FeedbackCell::clear_padding() {
- if (FeedbackCell::kSize == FeedbackCell::kUnalignedSize) return;
- DCHECK_GE(FeedbackCell::kSize, FeedbackCell::kUnalignedSize);
+ if (FeedbackCell::kAlignedSize == FeedbackCell::kUnalignedSize) return;
+ DCHECK_GE(FeedbackCell::kAlignedSize, FeedbackCell::kUnalignedSize);
memset(reinterpret_cast<byte*>(address() + FeedbackCell::kUnalignedSize), 0,
- FeedbackCell::kSize - FeedbackCell::kUnalignedSize);
+ FeedbackCell::kAlignedSize - FeedbackCell::kUnalignedSize);
+}
+
+void FeedbackCell::reset() {
+ set_value(GetReadOnlyRoots().undefined_value());
+ set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
}
} // namespace internal
diff --git a/deps/v8/src/objects/feedback-cell.h b/deps/v8/src/objects/feedback-cell.h
index a708f4cb92..3c085f72d9 100644
--- a/deps/v8/src/objects/feedback-cell.h
+++ b/deps/v8/src/objects/feedback-cell.h
@@ -37,23 +37,18 @@ class FeedbackCell : public Struct {
DECL_PRINTER(FeedbackCell)
DECL_VERIFIER(FeedbackCell)
-// Layout description.
-#define FEEDBACK_CELL_FIELDS(V) \
- V(kValueOffset, kTaggedSize) \
- /* Non-pointer fields */ \
- V(kInterruptBudgetOffset, kInt32Size) \
- /* Total size. */ \
- V(kUnalignedSize, 0)
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_FEEDBACK_CELL_FIELDS)
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_CELL_FIELDS)
-#undef FEEDBACK_CELL_FIELDS
-
- static const int kSize = RoundUp<kObjectAlignment>(int{kUnalignedSize});
+ static const int kUnalignedSize = kSize;
+ static const int kAlignedSize = RoundUp<kObjectAlignment>(int{kSize});
inline void clear_padding();
+ inline void reset();
using BodyDescriptor =
- FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kSize>;
+ FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kAlignedSize>;
OBJECT_CONSTRUCTORS(FeedbackCell, Struct);
};
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/objects/feedback-vector-inl.h
index 6573ceab65..6b1fdcc1e5 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/objects/feedback-vector-inl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FEEDBACK_VECTOR_INL_H_
-#define V8_FEEDBACK_VECTOR_INL_H_
+#ifndef V8_OBJECTS_FEEDBACK_VECTOR_INL_H_
+#define V8_OBJECTS_FEEDBACK_VECTOR_INL_H_
-#include "src/feedback-vector.h"
+#include "src/objects/feedback-vector.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/code-inl.h"
@@ -45,13 +45,13 @@ int32_t FeedbackMetadata::synchronized_slot_count() const {
int32_t FeedbackMetadata::get(int index) const {
DCHECK(index >= 0 && index < length());
int offset = kHeaderSize + index * kInt32Size;
- return READ_INT32_FIELD(*this, offset);
+ return ReadField<int32_t>(offset);
}
void FeedbackMetadata::set(int index, int32_t value) {
DCHECK(index >= 0 && index < length());
int offset = kHeaderSize + index * kInt32Size;
- WRITE_INT32_FIELD(*this, offset, value);
+ WriteField<int32_t>(offset, value);
}
bool FeedbackMetadata::is_empty() const { return slot_count() == 0; }
@@ -91,7 +91,6 @@ int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
- break;
}
return 1;
}
@@ -102,29 +101,29 @@ Handle<FeedbackCell> ClosureFeedbackCellArray::GetFeedbackCell(int index) {
ACCESSORS(FeedbackVector, shared_function_info, SharedFunctionInfo,
kSharedFunctionInfoOffset)
-WEAK_ACCESSORS(FeedbackVector, optimized_code_weak_or_smi, kOptimizedCodeOffset)
+WEAK_ACCESSORS(FeedbackVector, optimized_code_weak_or_smi,
+ kOptimizedCodeWeakOrSmiOffset)
ACCESSORS(FeedbackVector, closure_feedback_cell_array, ClosureFeedbackCellArray,
kClosureFeedbackCellArrayOffset)
INT32_ACCESSORS(FeedbackVector, length, kLengthOffset)
INT32_ACCESSORS(FeedbackVector, invocation_count, kInvocationCountOffset)
INT32_ACCESSORS(FeedbackVector, profiler_ticks, kProfilerTicksOffset)
-INT32_ACCESSORS(FeedbackVector, deopt_count, kDeoptCountOffset)
+
+void FeedbackVector::clear_padding() {
+ if (FIELD_SIZE(kPaddingOffset) == 0) return;
+ DCHECK_EQ(4, FIELD_SIZE(kPaddingOffset));
+ memset(reinterpret_cast<void*>(address() + kPaddingOffset), 0,
+ FIELD_SIZE(kPaddingOffset));
+}
bool FeedbackVector::is_empty() const { return length() == 0; }
FeedbackMetadata FeedbackVector::metadata() const {
- return shared_function_info()->feedback_metadata();
+ return shared_function_info().feedback_metadata();
}
void FeedbackVector::clear_invocation_count() { set_invocation_count(0); }
-void FeedbackVector::increment_deopt_count() {
- int count = deopt_count();
- if (count < std::numeric_limits<int32_t>::max()) {
- set_deopt_count(count + 1);
- }
-}
-
Code FeedbackVector::optimized_code() const {
MaybeObject slot = optimized_code_weak_or_smi();
DCHECK(slot->IsSmi() || slot->IsWeakOrCleared());
@@ -136,7 +135,7 @@ OptimizationMarker FeedbackVector::optimization_marker() const {
MaybeObject slot = optimized_code_weak_or_smi();
Smi value;
if (!slot->ToSmi(&value)) return OptimizationMarker::kNone;
- return static_cast<OptimizationMarker>(value->value());
+ return static_cast<OptimizationMarker>(value.value());
}
bool FeedbackVector::has_optimized_code() const {
@@ -170,7 +169,7 @@ Handle<FeedbackCell> FeedbackVector::GetClosureFeedbackCell(int index) const {
DCHECK_GE(index, 0);
ClosureFeedbackCellArray cell_array =
ClosureFeedbackCellArray::cast(closure_feedback_cell_array());
- return cell_array->GetFeedbackCell(index);
+ return cell_array.GetFeedbackCell(index);
}
void FeedbackVector::Set(FeedbackSlot slot, MaybeObject value,
@@ -287,13 +286,13 @@ Symbol FeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
}
bool FeedbackMetadataIterator::HasNext() const {
- return next_slot_.ToInt() < metadata()->slot_count();
+ return next_slot_.ToInt() < metadata().slot_count();
}
FeedbackSlot FeedbackMetadataIterator::Next() {
DCHECK(HasNext());
cur_slot_ = next_slot_;
- slot_kind_ = metadata()->GetKind(cur_slot_);
+ slot_kind_ = metadata().GetKind(cur_slot_);
next_slot_ = FeedbackSlot(next_slot_.ToInt() + entry_size());
return cur_slot_;
}
@@ -303,18 +302,18 @@ int FeedbackMetadataIterator::entry_size() const {
}
MaybeObject FeedbackNexus::GetFeedback() const {
- MaybeObject feedback = vector()->Get(slot());
+ MaybeObject feedback = vector().Get(slot());
FeedbackVector::AssertNoLegacyTypes(feedback);
return feedback;
}
MaybeObject FeedbackNexus::GetFeedbackExtra() const {
#ifdef DEBUG
- FeedbackSlotKind kind = vector()->GetKind(slot());
+ FeedbackSlotKind kind = vector().GetKind(slot());
DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
#endif
- int extra_index = vector()->GetIndex(slot()) + 1;
- return vector()->get(extra_index);
+ int extra_index = vector().GetIndex(slot()) + 1;
+ return vector().get(extra_index);
}
void FeedbackNexus::SetFeedback(Object feedback, WriteBarrierMode mode) {
@@ -323,18 +322,18 @@ void FeedbackNexus::SetFeedback(Object feedback, WriteBarrierMode mode) {
void FeedbackNexus::SetFeedback(MaybeObject feedback, WriteBarrierMode mode) {
FeedbackVector::AssertNoLegacyTypes(feedback);
- vector()->Set(slot(), feedback, mode);
+ vector().Set(slot(), feedback, mode);
}
void FeedbackNexus::SetFeedbackExtra(Object feedback_extra,
WriteBarrierMode mode) {
#ifdef DEBUG
- FeedbackSlotKind kind = vector()->GetKind(slot());
+ FeedbackSlotKind kind = vector().GetKind(slot());
DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
FeedbackVector::AssertNoLegacyTypes(MaybeObject::FromObject(feedback_extra));
#endif
- int index = vector()->GetIndex(slot()) + 1;
- vector()->set(index, MaybeObject::FromObject(feedback_extra), mode);
+ int index = vector().GetIndex(slot()) + 1;
+ vector().set(index, MaybeObject::FromObject(feedback_extra), mode);
}
void FeedbackNexus::SetFeedbackExtra(MaybeObject feedback_extra,
@@ -342,14 +341,14 @@ void FeedbackNexus::SetFeedbackExtra(MaybeObject feedback_extra,
#ifdef DEBUG
FeedbackVector::AssertNoLegacyTypes(feedback_extra);
#endif
- int index = vector()->GetIndex(slot()) + 1;
- vector()->set(index, feedback_extra, mode);
+ int index = vector().GetIndex(slot()) + 1;
+ vector().set(index, feedback_extra, mode);
}
-Isolate* FeedbackNexus::GetIsolate() const { return vector()->GetIsolate(); }
+Isolate* FeedbackNexus::GetIsolate() const { return vector().GetIsolate(); }
} // namespace internal
} // namespace v8
#include "src/objects/object-macros-undef.h"
-#endif // V8_FEEDBACK_VECTOR_INL_H_
+#endif // V8_OBJECTS_FEEDBACK_VECTOR_INL_H_
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/objects/feedback-vector.cc
index b94182f9aa..0393a55f69 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/objects/feedback-vector.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/feedback-vector.h"
-#include "src/feedback-vector-inl.h"
+#include "src/objects/feedback-vector.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
-#include "src/objects.h"
#include "src/objects/data-handler-inl.h"
+#include "src/objects/feedback-vector-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/object-macros.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -44,13 +44,13 @@ bool FeedbackVectorSpec::HasTypeProfileSlot() const {
static bool IsPropertyNameFeedback(MaybeObject feedback) {
HeapObject heap_object;
if (!feedback->GetHeapObjectIfStrong(&heap_object)) return false;
- if (heap_object->IsString()) {
- DCHECK(heap_object->IsInternalizedString());
+ if (heap_object.IsString()) {
+ DCHECK(heap_object.IsInternalizedString());
return true;
}
- if (!heap_object->IsSymbol()) return false;
+ if (!heap_object.IsSymbol()) return false;
Symbol symbol = Symbol::cast(heap_object);
- ReadOnlyRoots roots = symbol->GetReadOnlyRoots();
+ ReadOnlyRoots roots = symbol.GetReadOnlyRoots();
return symbol != roots.uninitialized_symbol() &&
symbol != roots.premonomorphic_symbol() &&
symbol != roots.megamorphic_symbol();
@@ -195,11 +195,11 @@ bool FeedbackMetadata::HasTypeProfileSlot() const {
FeedbackSlotKind FeedbackVector::GetKind(FeedbackSlot slot) const {
DCHECK(!is_empty());
- return metadata()->GetKind(slot);
+ return metadata().GetKind(slot);
}
FeedbackSlot FeedbackVector::GetTypeProfileSlot() const {
- DCHECK(metadata()->HasTypeProfileSlot());
+ DCHECK(metadata().HasTypeProfileSlot());
FeedbackSlot slot =
FeedbackVector::ToSlot(FeedbackVectorSpec::kTypeProfileSlotIndex);
DCHECK_EQ(FeedbackSlotKind::kTypeProfile, GetKind(slot));
@@ -212,7 +212,7 @@ Handle<ClosureFeedbackCellArray> ClosureFeedbackCellArray::New(
Factory* factory = isolate->factory();
int num_feedback_cells =
- shared->feedback_metadata()->closure_feedback_cell_count();
+ shared->feedback_metadata().closure_feedback_cell_count();
Handle<ClosureFeedbackCellArray> feedback_cell_array =
factory->NewClosureFeedbackCellArray(num_feedback_cells);
@@ -231,7 +231,7 @@ Handle<FeedbackVector> FeedbackVector::New(
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array) {
Factory* factory = isolate->factory();
- const int slot_count = shared->feedback_metadata()->slot_count();
+ const int slot_count = shared->feedback_metadata().slot_count();
Handle<FeedbackVector> vector = factory->NewFeedbackVector(
shared, closure_feedback_cell_array, AllocationType::kOld);
@@ -246,7 +246,6 @@ Handle<FeedbackVector> FeedbackVector::New(
: OptimizationMarker::kNone)));
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
- DCHECK_EQ(vector->deopt_count(), 0);
// Ensure we can skip the write barrier
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
@@ -254,7 +253,7 @@ Handle<FeedbackVector> FeedbackVector::New(
*uninitialized_sentinel);
for (int i = 0; i < slot_count;) {
FeedbackSlot slot(i);
- FeedbackSlotKind kind = shared->feedback_metadata()->GetKind(slot);
+ FeedbackSlotKind kind = shared->feedback_metadata().GetKind(slot);
int index = FeedbackVector::GetIndex(slot);
int entry_size = FeedbackMetadata::GetSlotSize(kind);
@@ -319,7 +318,7 @@ void FeedbackVector::AddToVectorsForProfilingTools(
Isolate* isolate, Handle<FeedbackVector> vector) {
DCHECK(!isolate->is_best_effort_code_coverage() ||
isolate->is_collecting_type_profile());
- if (!vector->shared_function_info()->IsSubjectToDebugging()) return;
+ if (!vector->shared_function_info().IsSubjectToDebugging()) return;
Handle<ArrayList> list = Handle<ArrayList>::cast(
isolate->factory()->feedback_vectors_for_profiling_tools());
list = ArrayList::Add(isolate, list, vector);
@@ -360,16 +359,15 @@ void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
}
Code code = Code::cast(slot->GetHeapObject());
- if (code->marked_for_deoptimization()) {
+ if (code.marked_for_deoptimization()) {
if (FLAG_trace_deopt) {
PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
reason);
- shared->ShortPrint();
+ shared.ShortPrint();
PrintF("]\n");
}
- if (!code->deopt_already_counted()) {
- increment_deopt_count();
- code->set_deopt_already_counted(true);
+ if (!code.deopt_already_counted()) {
+ code.set_deopt_already_counted(true);
}
ClearOptimizedCode();
}
@@ -399,7 +397,7 @@ void FeedbackVector::AssertNoLegacyTypes(MaybeObject object) {
if (object->GetHeapObject(&heap_object)) {
// Instead of FixedArray, the Feedback and the Extra should contain
// WeakFixedArrays. The only allowed FixedArray subtype is HashTable.
- DCHECK_IMPLIES(heap_object->IsFixedArray(), heap_object->IsHashTable());
+ DCHECK_IMPLIES(heap_object.IsFixedArray(), heap_object.IsHashTable());
}
#endif
}
@@ -408,8 +406,8 @@ Handle<WeakFixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
HeapObject heap_object;
if (GetFeedback()->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsWeakFixedArray() &&
- WeakFixedArray::cast(heap_object)->length() == length) {
+ heap_object.IsWeakFixedArray() &&
+ WeakFixedArray::cast(heap_object).length() == length) {
return handle(WeakFixedArray::cast(heap_object), isolate);
}
Handle<WeakFixedArray> array = isolate->factory()->NewWeakFixedArray(length);
@@ -421,8 +419,8 @@ Handle<WeakFixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
HeapObject heap_object;
if (GetFeedbackExtra()->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsWeakFixedArray() &&
- WeakFixedArray::cast(heap_object)->length() == length) {
+ heap_object.IsWeakFixedArray() &&
+ WeakFixedArray::cast(heap_object).length() == length) {
return handle(WeakFixedArray::cast(heap_object), isolate);
}
Handle<WeakFixedArray> array = isolate->factory()->NewWeakFixedArray(length);
@@ -521,7 +519,6 @@ bool FeedbackNexus::Clear() {
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
- break;
}
return feedback_updated;
}
@@ -568,7 +565,7 @@ bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
Map FeedbackNexus::GetFirstMap() const {
MapHandles maps;
ExtractMaps(&maps);
- if (maps.size() > 0) return *maps.at(0);
+ if (!maps.empty()) return *maps.at(0);
return Map();
}
@@ -631,17 +628,17 @@ InlineCacheState FeedbackNexus::ic_state() const {
}
HeapObject heap_object;
if (feedback->GetHeapObjectIfStrong(&heap_object)) {
- if (heap_object->IsWeakFixedArray()) {
+ if (heap_object.IsWeakFixedArray()) {
// Determine state purely by our structure, don't check if the maps
// are cleared.
return POLYMORPHIC;
}
- if (heap_object->IsName()) {
+ if (heap_object.IsName()) {
DCHECK(IsKeyedLoadICKind(kind()) || IsKeyedStoreICKind(kind()) ||
IsKeyedHasICKind(kind()));
Object extra = GetFeedbackExtra()->GetHeapObjectAssumeStrong();
WeakFixedArray extra_array = WeakFixedArray::cast(extra);
- return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
+ return extra_array.length() > 2 ? POLYMORPHIC : MONOMORPHIC;
}
}
UNREACHABLE();
@@ -653,7 +650,7 @@ InlineCacheState FeedbackNexus::ic_state() const {
return GENERIC;
} else if (feedback->IsWeakOrCleared() ||
(feedback->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsAllocationSite())) {
+ heap_object.IsAllocationSite())) {
return MONOMORPHIC;
}
@@ -733,14 +730,13 @@ InlineCacheState FeedbackNexus::ic_state() const {
return MONOMORPHIC;
}
- DCHECK(feedback->GetHeapObjectAssumeStrong()->IsWeakFixedArray());
+ DCHECK(feedback->GetHeapObjectAssumeStrong().IsWeakFixedArray());
return POLYMORPHIC;
}
case FeedbackSlotKind::kInvalid:
case FeedbackSlotKind::kKindsNumber:
UNREACHABLE();
- break;
}
return UNINITIALIZED;
}
@@ -798,7 +794,7 @@ void FeedbackNexus::ConfigureCloneObject(Handle<Map> source_map,
break;
case MONOMORPHIC:
if (maybe_feedback->IsCleared() || feedback.is_identical_to(source_map) ||
- Map::cast(*feedback)->is_deprecated()) {
+ Map::cast(*feedback).is_deprecated()) {
// Remain in MONOMORPHIC state if previous feedback has been collected.
SetFeedback(HeapObjectReference::Weak(*source_map));
SetFeedbackExtra(*result_map);
@@ -860,7 +856,7 @@ int FeedbackNexus::GetCallCount() {
DCHECK(IsCallICKind(kind()));
Object call_count = GetFeedbackExtra()->cast<Object>();
- CHECK(call_count->IsSmi());
+ CHECK(call_count.IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return CallCountField::decode(value);
}
@@ -869,7 +865,7 @@ void FeedbackNexus::SetSpeculationMode(SpeculationMode mode) {
DCHECK(IsCallICKind(kind()));
Object call_count = GetFeedbackExtra()->cast<Object>();
- CHECK(call_count->IsSmi());
+ CHECK(call_count.IsSmi());
uint32_t count = static_cast<uint32_t>(Smi::ToInt(call_count));
uint32_t value = CallCountField::encode(CallCountField::decode(count));
int result = static_cast<int>(value | SpeculationModeField::encode(mode));
@@ -880,7 +876,7 @@ SpeculationMode FeedbackNexus::GetSpeculationMode() {
DCHECK(IsCallICKind(kind()));
Object call_count = GetFeedbackExtra()->cast<Object>();
- CHECK(call_count->IsSmi());
+ CHECK(call_count.IsSmi());
uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
return SpeculationModeField::decode(value);
}
@@ -888,7 +884,7 @@ SpeculationMode FeedbackNexus::GetSpeculationMode() {
float FeedbackNexus::ComputeCallFrequency() {
DCHECK(IsCallICKind(kind()));
- double const invocation_count = vector()->invocation_count();
+ double const invocation_count = vector().invocation_count();
double const call_count = GetCallCount();
if (invocation_count == 0) {
// Prevent division by 0.
@@ -952,7 +948,7 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsWeakFixedArray()) ||
+ heap_object.IsWeakFixedArray()) ||
is_named_feedback) {
int found = 0;
WeakFixedArray array;
@@ -964,9 +960,9 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
}
const int increment = 2;
HeapObject heap_object;
- for (int i = 0; i < array->length(); i += increment) {
- DCHECK(array->Get(i)->IsWeakOrCleared());
- if (array->Get(i)->GetHeapObjectIfWeak(&heap_object)) {
+ for (int i = 0; i < array.length(); i += increment) {
+ DCHECK(array.Get(i)->IsWeakOrCleared());
+ if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) {
Map map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
found++;
@@ -979,7 +975,7 @@ int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
return 1;
} else if (feedback->GetHeapObjectIfStrong(&heap_object) &&
heap_object ==
- heap_object->GetReadOnlyRoots().premonomorphic_symbol()) {
+ heap_object.GetReadOnlyRoots().premonomorphic_symbol()) {
if (GetFeedbackExtra()->GetHeapObjectIfWeak(&heap_object)) {
Map map = Map::cast(heap_object);
maps->push_back(handle(map, isolate));
@@ -1001,7 +997,7 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsWeakFixedArray()) ||
+ heap_object.IsWeakFixedArray()) ||
is_named_feedback) {
WeakFixedArray array;
if (is_named_feedback) {
@@ -1012,12 +1008,12 @@ MaybeObjectHandle FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
}
const int increment = 2;
HeapObject heap_object;
- for (int i = 0; i < array->length(); i += increment) {
- DCHECK(array->Get(i)->IsWeakOrCleared());
- if (array->Get(i)->GetHeapObjectIfWeak(&heap_object)) {
+ for (int i = 0; i < array.length(); i += increment) {
+ DCHECK(array.Get(i)->IsWeakOrCleared());
+ if (array.Get(i)->GetHeapObjectIfWeak(&heap_object)) {
Map array_map = Map::cast(heap_object);
- if (array_map == *map && !array->Get(i + increment - 1)->IsCleared()) {
- MaybeObject handler = array->Get(i + increment - 1);
+ if (array_map == *map && !array.Get(i + increment - 1)->IsCleared()) {
+ MaybeObject handler = array.Get(i + increment - 1);
DCHECK(IC::IsHandler(handler));
return handle(handler, isolate);
}
@@ -1048,7 +1044,7 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
bool is_named_feedback = IsPropertyNameFeedback(feedback);
HeapObject heap_object;
if ((feedback->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsWeakFixedArray()) ||
+ heap_object.IsWeakFixedArray()) ||
is_named_feedback) {
WeakFixedArray array;
if (is_named_feedback) {
@@ -1059,12 +1055,12 @@ bool FeedbackNexus::FindHandlers(MaybeObjectHandles* code_list,
}
const int increment = 2;
HeapObject heap_object;
- for (int i = 0; i < array->length(); i += increment) {
+ for (int i = 0; i < array.length(); i += increment) {
// Be sure to skip handlers whose maps have been cleared.
- DCHECK(array->Get(i)->IsWeakOrCleared());
- if (array->Get(i)->GetHeapObjectIfWeak(&heap_object) &&
- !array->Get(i + increment - 1)->IsCleared()) {
- MaybeObject handler = array->Get(i + increment - 1);
+ DCHECK(array.Get(i)->IsWeakOrCleared());
+ if (array.Get(i)->GetHeapObjectIfWeak(&heap_object) &&
+ !array.Get(i + increment - 1)->IsCleared()) {
+ MaybeObject handler = array.Get(i + increment - 1);
DCHECK(IC::IsHandler(handler));
code_list->push_back(handle(handler, isolate));
count++;
@@ -1155,19 +1151,19 @@ KeyedAccessStoreMode KeyedAccessStoreModeForBuiltin(int builtin_index) {
case Builtins::kKeyedStoreIC_Slow_GrowNoTransitionHandleCOW:
case Builtins::kStoreFastElementIC_GrowNoTransitionHandleCOW:
case Builtins::kElementsTransitionAndStore_GrowNoTransitionHandleCOW:
- return STORE_AND_GROW_NO_TRANSITION_HANDLE_COW;
+ return STORE_AND_GROW_HANDLE_COW;
case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionIgnoreOOB:
case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionIgnoreOOB:
case Builtins::kKeyedStoreIC_Slow_NoTransitionIgnoreOOB:
case Builtins::kStoreFastElementIC_NoTransitionIgnoreOOB:
case Builtins::kElementsTransitionAndStore_NoTransitionIgnoreOOB:
- return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
+ return STORE_IGNORE_OUT_OF_BOUNDS;
case Builtins::kKeyedStoreIC_SloppyArguments_NoTransitionHandleCOW:
case Builtins::kStoreInArrayLiteralIC_Slow_NoTransitionHandleCOW:
case Builtins::kKeyedStoreIC_Slow_NoTransitionHandleCOW:
case Builtins::kStoreFastElementIC_NoTransitionHandleCOW:
case Builtins::kElementsTransitionAndStore_NoTransitionHandleCOW:
- return STORE_NO_TRANSITION_HANDLE_COW;
+ return STORE_HANDLE_COW;
default:
UNREACHABLE();
}
@@ -1192,7 +1188,7 @@ KeyedAccessStoreMode FeedbackNexus::GetKeyedAccessStoreMode() const {
Handle<StoreHandler> data_handler =
Handle<StoreHandler>::cast(maybe_code_handler.object());
handler = handle(Code::cast(data_handler->smi_handler()),
- vector()->GetIsolate());
+ vector().GetIsolate());
} else if (maybe_code_handler.object()->IsSmi()) {
// Skip proxy handlers.
DCHECK_EQ(*(maybe_code_handler.object()),
@@ -1261,7 +1257,7 @@ namespace {
bool InList(Handle<ArrayList> types, Handle<String> type) {
for (int i = 0; i < types->Length(); i++) {
Object obj = types->Get(i);
- if (String::cast(obj)->Equals(*type)) {
+ if (String::cast(obj).Equals(*type)) {
return true;
}
}
@@ -1297,7 +1293,7 @@ void FeedbackNexus::Collect(Handle<String> type, int position) {
isolate, types, position,
ArrayList::Add(isolate, position_specific_types, type));
} else {
- DCHECK(types->ValueAt(entry)->IsArrayList());
+ DCHECK(types->ValueAt(entry).IsArrayList());
position_specific_types =
handle(ArrayList::cast(types->ValueAt(entry)), isolate);
if (!InList(position_specific_types, type)) { // Add type
@@ -1329,8 +1325,8 @@ std::vector<int> FeedbackNexus::GetSourcePositions() const {
index < types->length(); index += SimpleNumberDictionary::kEntrySize) {
int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
Object key = types->get(key_index);
- if (key->IsSmi()) {
- int position = Smi::cast(key)->value();
+ if (key.IsSmi()) {
+ int position = Smi::cast(key).value();
source_positions.push_back(position);
}
}
@@ -1357,7 +1353,7 @@ std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
if (entry == SimpleNumberDictionary::kNotFound) {
return types_for_position;
}
- DCHECK(types->ValueAt(entry)->IsArrayList());
+ DCHECK(types->ValueAt(entry).IsArrayList());
Handle<ArrayList> position_specific_types =
Handle<ArrayList>(ArrayList::cast(types->ValueAt(entry)), isolate);
for (int i = 0; i < position_specific_types->Length(); i++) {
@@ -1380,7 +1376,7 @@ Handle<JSObject> ConvertToJSObject(Isolate* isolate,
index += SimpleNumberDictionary::kEntrySize) {
int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
Object key = feedback->get(key_index);
- if (key->IsSmi()) {
+ if (key.IsSmi()) {
int value_index = index + SimpleNumberDictionary::kEntryValueIndex;
Handle<ArrayList> position_specific_types(
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/objects/feedback-vector.h
index 0073335be1..89e0b9e6aa 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/objects/feedback-vector.h
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FEEDBACK_VECTOR_H_
-#define V8_FEEDBACK_VECTOR_H_
+#ifndef V8_OBJECTS_FEEDBACK_VECTOR_H_
+#define V8_OBJECTS_FEEDBACK_VECTOR_H_
#include <vector>
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/elements-kind.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/objects/elements-kind.h"
#include "src/objects/map.h"
#include "src/objects/name.h"
-#include "src/type-hints.h"
+#include "src/objects/type-hints.h"
#include "src/zone/zone-containers.h"
// Has to be the last include (doesn't have include guards):
@@ -141,7 +141,7 @@ inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) {
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
FeedbackSlotKind kind);
-typedef std::vector<MaybeObjectHandle> MaybeObjectHandles;
+using MaybeObjectHandles = std::vector<MaybeObjectHandle>;
class FeedbackMetadata;
@@ -204,15 +204,14 @@ class FeedbackVector : public HeapObject {
// [invocation_count]: The number of times this function has been invoked.
DECL_INT32_ACCESSORS(invocation_count)
- // [invocation_count]: The number of times this function has been seen by the
+ // [profiler_ticks]: The number of times this function has been seen by the
// runtime profiler.
DECL_INT32_ACCESSORS(profiler_ticks)
- // [deopt_count]: The number of times this function has deoptimized.
- DECL_INT32_ACCESSORS(deopt_count)
+ // Initialize the padding if necessary.
+ inline void clear_padding();
inline void clear_invocation_count();
- inline void increment_deopt_count();
inline Code optimized_code() const;
inline OptimizationMarker optimization_marker() const;
@@ -311,23 +310,14 @@ class FeedbackVector : public HeapObject {
// garbage collection (e.g., for patching the cache).
static inline Symbol RawUninitializedSentinel(Isolate* isolate);
-// Layout description.
-#define FEEDBACK_VECTOR_FIELDS(V) \
- /* Header fields. */ \
- V(kSharedFunctionInfoOffset, kTaggedSize) \
- V(kOptimizedCodeOffset, kTaggedSize) \
- V(kClosureFeedbackCellArrayOffset, kTaggedSize) \
- V(kLengthOffset, kInt32Size) \
- V(kInvocationCountOffset, kInt32Size) \
- V(kProfilerTicksOffset, kInt32Size) \
- V(kDeoptCountOffset, kInt32Size) \
- V(kUnalignedHeaderSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_VECTOR_FIELDS)
-#undef FEEDBACK_VECTOR_FIELDS
-
- static const int kHeaderSize =
- RoundUp<kObjectAlignment>(int{kUnalignedHeaderSize});
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_FEEDBACK_VECTOR_FIELDS)
+
+ static const int kHeaderSize = kSize;
+
+ static_assert(kSize % kObjectAlignment == 0,
+ "Header must be padded for alignment");
static const int kFeedbackSlotsOffset = kHeaderSize;
class BodyDescriptor;
@@ -562,9 +552,9 @@ class FeedbackMetadata : public HeapObject {
void SetKind(FeedbackSlot slot, FeedbackSlotKind kind);
- typedef BitSetComputer<FeedbackSlotKind, kFeedbackSlotKindBits,
- kInt32Size * kBitsPerByte, uint32_t>
- VectorICComputer;
+ using VectorICComputer =
+ BitSetComputer<FeedbackSlotKind, kFeedbackSlotKindBits,
+ kInt32Size * kBitsPerByte, uint32_t>;
OBJECT_CONSTRUCTORS(FeedbackMetadata, HeapObject);
};
@@ -628,7 +618,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
FeedbackNexus(FeedbackVector vector, FeedbackSlot slot)
: vector_(vector), slot_(slot) {
kind_ =
- (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector->GetKind(slot);
+ (vector.is_null()) ? FeedbackSlotKind::kInvalid : vector.GetKind(slot);
}
Handle<FeedbackVector> vector_handle() const {
@@ -642,7 +632,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
FeedbackSlotKind kind() const { return kind_; }
inline LanguageMode GetLanguageMode() const {
- return vector()->GetLanguageMode(slot());
+ return vector().GetLanguageMode(slot());
}
InlineCacheState ic_state() const;
@@ -707,8 +697,8 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
// count (taken from the type feedback vector).
float ComputeCallFrequency();
- typedef BitField<SpeculationMode, 0, 1> SpeculationModeField;
- typedef BitField<uint32_t, 1, 31> CallCountField;
+ using SpeculationModeField = BitField<SpeculationMode, 0, 1>;
+ using CallCountField = BitField<uint32_t, 1, 31>;
// For InstanceOf ICs.
MaybeHandle<JSObject> GetConstructorFeedback() const;
@@ -779,4 +769,4 @@ inline ForInHint ForInHintFromFeedback(int type_feedback);
#include "src/objects/object-macros-undef.h"
-#endif // V8_FEEDBACK_VECTOR_H_
+#endif // V8_OBJECTS_FEEDBACK_VECTOR_H_
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/objects/field-index-inl.h
index 42828db923..be60fb54a2 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/objects/field-index-inl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FIELD_INDEX_INL_H_
-#define V8_FIELD_INDEX_INL_H_
+#ifndef V8_OBJECTS_FIELD_INDEX_INL_H_
+#define V8_OBJECTS_FIELD_INDEX_INL_H_
-#include "src/field-index.h"
-#include "src/objects-inl.h"
#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/field-index.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -21,14 +21,14 @@ FieldIndex FieldIndex::ForInObjectOffset(int offset, Encoding encoding) {
FieldIndex FieldIndex::ForPropertyIndex(const Map map, int property_index,
Representation representation) {
- DCHECK(map->instance_type() >= FIRST_NONSTRING_TYPE);
- int inobject_properties = map->GetInObjectProperties();
+ DCHECK(map.instance_type() >= FIRST_NONSTRING_TYPE);
+ int inobject_properties = map.GetInObjectProperties();
bool is_inobject = property_index < inobject_properties;
int first_inobject_offset;
int offset;
if (is_inobject) {
- first_inobject_offset = map->GetInObjectPropertyOffset(0);
- offset = map->GetInObjectPropertyOffset(property_index);
+ first_inobject_offset = map.GetInObjectPropertyOffset(0);
+ offset = map.GetInObjectPropertyOffset(property_index);
} else {
first_inobject_offset = FixedArray::kHeaderSize;
property_index -= inobject_properties;
@@ -62,7 +62,7 @@ int FieldIndex::GetLoadByFieldIndex() const {
FieldIndex FieldIndex::ForDescriptor(const Map map, int descriptor_index) {
PropertyDetails details =
- map->instance_descriptors()->GetDetails(descriptor_index);
+ map.instance_descriptors().GetDetails(descriptor_index);
int field_index = details.field_index();
return ForPropertyIndex(map, field_index, details.representation());
}
@@ -70,4 +70,4 @@ FieldIndex FieldIndex::ForDescriptor(const Map map, int descriptor_index) {
} // namespace internal
} // namespace v8
-#endif // V8_FIELD_INDEX_INL_H_
+#endif // V8_OBJECTS_FIELD_INDEX_INL_H_
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/objects/field-index.h
index 2b5f82203e..f352ef6800 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/objects/field-index.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FIELD_INDEX_H_
-#define V8_FIELD_INDEX_H_
+#ifndef V8_OBJECTS_FIELD_INDEX_H_
+#define V8_OBJECTS_FIELD_INDEX_H_
-#include "src/property-details.h"
-#include "src/utils.h"
+#include "src/objects/property-details.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -31,11 +31,7 @@ class FieldIndex final {
inline int GetLoadByFieldIndex() const;
- bool is_inobject() const {
- return IsInObjectBits::decode(bit_field_);
- }
-
- bool is_hidden_field() const { return IsHiddenField::decode(bit_field_); }
+ bool is_inobject() const { return IsInObjectBits::decode(bit_field_); }
bool is_double() const { return EncodingBits::decode(bit_field_) == kDouble; }
@@ -55,7 +51,6 @@ class FieldIndex final {
// Zero-based from the first inobject property. Overflows to out-of-object
// properties.
int property_index() const {
- DCHECK(!is_hidden_field());
int result = index() - first_inobject_property_offset() / kTaggedSize;
if (!is_inobject()) {
result += InObjectPropertyBits::decode(bit_field_);
@@ -75,14 +70,13 @@ class FieldIndex final {
private:
FieldIndex(bool is_inobject, int offset, Encoding encoding,
- int inobject_properties, int first_inobject_property_offset,
- bool is_hidden = false) {
+ int inobject_properties, int first_inobject_property_offset) {
DCHECK(IsAligned(first_inobject_property_offset, kTaggedSize));
bit_field_ = IsInObjectBits::encode(is_inobject) |
EncodingBits::encode(encoding) |
FirstInobjectPropertyOffsetBits::encode(
first_inobject_property_offset) |
- IsHiddenField::encode(is_hidden) | OffsetBits::encode(offset) |
+ OffsetBits::encode(offset) |
InObjectPropertyBits::encode(inobject_properties);
}
@@ -104,7 +98,6 @@ class FieldIndex final {
}
int first_inobject_property_offset() const {
- DCHECK(!is_hidden_field());
return FirstInobjectPropertyOffsetBits::decode(bit_field_);
}
@@ -123,9 +116,7 @@ class FieldIndex final {
class FirstInobjectPropertyOffsetBits
: public BitField64<int, InObjectPropertyBits::kNext,
kFirstInobjectPropertyOffsetBitCount> {};
- class IsHiddenField
- : public BitField64<bool, FirstInobjectPropertyOffsetBits::kNext, 1> {};
- STATIC_ASSERT(IsHiddenField::kNext <= 64);
+ STATIC_ASSERT(FirstInobjectPropertyOffsetBits::kNext <= 64);
uint64_t bit_field_;
};
@@ -133,4 +124,4 @@ class FieldIndex final {
} // namespace internal
} // namespace v8
-#endif // V8_FIELD_INDEX_H_
+#endif // V8_OBJECTS_FIELD_INDEX_H_
diff --git a/deps/v8/src/field-type.cc b/deps/v8/src/objects/field-type.cc
index bb0869b262..5c771c4ffa 100644
--- a/deps/v8/src/field-type.cc
+++ b/deps/v8/src/objects/field-type.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/field-type.h"
+#include "src/objects/field-type.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -38,8 +38,8 @@ Handle<FieldType> FieldType::Class(Handle<Map> map, Isolate* isolate) {
// static
FieldType FieldType::cast(Object object) {
- DCHECK(object == None() || object == Any() || object->IsMap());
- return FieldType(object->ptr());
+ DCHECK(object == None() || object == Any() || object.IsMap());
+ return FieldType(object.ptr());
}
bool FieldType::IsClass() const { return this->IsMap(); }
@@ -50,16 +50,16 @@ Map FieldType::AsClass() const {
}
bool FieldType::NowStable() const {
- return !this->IsClass() || AsClass()->is_stable();
+ return !this->IsClass() || AsClass().is_stable();
}
bool FieldType::NowIs(FieldType other) const {
- if (other->IsAny()) return true;
+ if (other.IsAny()) return true;
if (IsNone()) return true;
- if (other->IsNone()) return false;
+ if (other.IsNone()) return false;
if (IsAny()) return false;
DCHECK(IsClass());
- DCHECK(other->IsClass());
+ DCHECK(other.IsClass());
return *this == other;
}
@@ -72,15 +72,15 @@ void FieldType::PrintTo(std::ostream& os) const {
os << "None";
} else {
DCHECK(IsClass());
- os << "Class(" << reinterpret_cast<void*>(AsClass()->ptr()) << ")";
+ os << "Class(" << reinterpret_cast<void*>(AsClass().ptr()) << ")";
}
}
bool FieldType::NowContains(Object value) const {
if (*this == Any()) return true;
if (*this == None()) return false;
- if (!value->IsHeapObject()) return false;
- return HeapObject::cast(value)->map() == Map::cast(*this);
+ if (!value.IsHeapObject()) return false;
+ return HeapObject::cast(value).map() == Map::cast(*this);
}
} // namespace internal
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/objects/field-type.h
index 405fa7ab14..3c22692307 100644
--- a/deps/v8/src/field-type.h
+++ b/deps/v8/src/objects/field-type.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FIELD_TYPE_H_
-#define V8_FIELD_TYPE_H_
+#ifndef V8_OBJECTS_FIELD_TYPE_H_
+#define V8_OBJECTS_FIELD_TYPE_H_
-#include "src/objects.h"
#include "src/objects/heap-object.h"
#include "src/objects/map.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -41,7 +41,7 @@ class FieldType : public Object {
bool NowIs(FieldType other) const;
bool NowIs(Handle<FieldType> other) const;
- void PrintTo(std::ostream& os) const;
+ V8_EXPORT_PRIVATE void PrintTo(std::ostream& os) const;
FieldType* operator->() { return this; }
const FieldType* operator->() const { return this; }
@@ -53,4 +53,4 @@ class FieldType : public Object {
} // namespace internal
} // namespace v8
-#endif // V8_FIELD_TYPE_H_
+#endif // V8_OBJECTS_FIELD_TYPE_H_
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index d494f8d15b..6d2b42edbf 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -7,19 +7,19 @@
#include "src/objects/fixed-array.h"
-#include "src/base/tsan.h"
-#include "src/conversions.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
#include "src/objects/bigint.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
#include "src/objects/slots.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
+#include "src/sanitizer/tsan.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -30,7 +30,6 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(FixedArrayBase, HeapObject)
OBJECT_CONSTRUCTORS_IMPL(FixedArray, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(FixedDoubleArray, FixedArrayBase)
-OBJECT_CONSTRUCTORS_IMPL(FixedTypedArrayBase, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(ArrayList, FixedArray)
OBJECT_CONSTRUCTORS_IMPL(ByteArray, FixedArrayBase)
OBJECT_CONSTRUCTORS_IMPL(TemplateList, FixedArray)
@@ -58,13 +57,13 @@ CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(TemplateList)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakArrayList)
SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+
SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
@@ -76,8 +75,6 @@ Object FixedArrayBase::unchecked_synchronized_length() const {
return ACQUIRE_READ_FIELD(*this, kLengthOffset);
}
-ACCESSORS(FixedTypedArrayBase, base_pointer, Object, kBasePointerOffset)
-
ObjectSlot FixedArray::GetFirstElementAddress() {
return RawField(OffsetOfElementAt(0));
}
@@ -87,7 +84,7 @@ bool FixedArray::ContainsOnlySmisOrHoles() {
ObjectSlot current = GetFirstElementAddress();
for (int i = 0; i < length(); ++i, ++current) {
Object candidate = *current;
- if (!candidate->IsSmi() && candidate != the_hole) return false;
+ if (!candidate.IsSmi() && candidate != the_hole) return false;
}
return true;
}
@@ -98,25 +95,11 @@ Object FixedArray::get(int index) const {
}
Handle<Object> FixedArray::get(FixedArray array, int index, Isolate* isolate) {
- return handle(array->get(index), isolate);
-}
-
-template <class T>
-MaybeHandle<T> FixedArray::GetValue(Isolate* isolate, int index) const {
- Object obj = get(index);
- if (obj->IsUndefined(isolate)) return MaybeHandle<T>();
- return Handle<T>(T::cast(obj), isolate);
-}
-
-template <class T>
-Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
- Object obj = get(index);
- CHECK(!obj->IsUndefined(isolate));
- return Handle<T>(T::cast(obj), isolate);
+ return handle(array.get(index), isolate);
}
bool FixedArray::is_the_hole(Isolate* isolate, int index) {
- return get(index)->IsTheHole(isolate);
+ return get(index).IsTheHole(isolate);
}
void FixedArray::set(int index, Smi value) {
@@ -147,9 +130,9 @@ void FixedArray::set(int index, Object value, WriteBarrierMode mode) {
}
void FixedArray::NoWriteBarrierSet(FixedArray array, int index, Object value) {
- DCHECK_NE(array->map(), array->GetReadOnlyRoots().fixed_cow_array_map());
+ DCHECK_NE(array.map(), array.GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_GE(index, 0);
- DCHECK_LT(index, array->length());
+ DCHECK_LT(index, array.length());
DCHECK(!ObjectInYoungGeneration(value));
RELAXED_WRITE_FIELD(array, kHeaderSize + index * kTaggedSize, value);
}
@@ -202,16 +185,27 @@ ObjectSlot FixedArray::RawFieldOfElementAt(int index) {
return RawField(OffsetOfElementAt(index));
}
-void FixedArray::MoveElements(Heap* heap, int dst_index, int src_index, int len,
- WriteBarrierMode mode) {
+void FixedArray::MoveElements(Isolate* isolate, int dst_index, int src_index,
+ int len, WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(dst_index + len, length());
+ DCHECK_LE(src_index + len, length());
DisallowHeapAllocation no_gc;
- heap->MoveElements(*this, dst_index, src_index, len, mode);
+ ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
+ ObjectSlot src_slot(RawFieldOfElementAt(src_index));
+ isolate->heap()->MoveRange(*this, dst_slot, src_slot, len, mode);
}
-void FixedArray::CopyElements(Heap* heap, int dst_index, FixedArray src,
+void FixedArray::CopyElements(Isolate* isolate, int dst_index, FixedArray src,
int src_index, int len, WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(dst_index + len, length());
+ DCHECK_LE(src_index + len, src.length());
DisallowHeapAllocation no_gc;
- heap->CopyElements(*this, src, dst_index, src_index, len, mode);
+
+ ObjectSlot dst_slot(RawFieldOfElementAt(dst_index));
+ ObjectSlot src_slot(src.RawFieldOfElementAt(src_index));
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
}
// Perform a binary search in a fixed array.
@@ -221,7 +215,7 @@ int BinarySearch(T* array, Name name, int valid_entries,
DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == nullptr);
int low = 0;
int high = array->number_of_entries() - 1;
- uint32_t hash = name->hash_field();
+ uint32_t hash = name.hash_field();
int limit = high;
DCHECK(low <= high);
@@ -229,7 +223,7 @@ int BinarySearch(T* array, Name name, int valid_entries,
while (low != high) {
int mid = low + (high - low) / 2;
Name mid_name = array->GetSortedKey(mid);
- uint32_t mid_hash = mid_name->hash_field();
+ uint32_t mid_hash = mid_name.hash_field();
if (mid_hash >= hash) {
high = mid;
@@ -241,7 +235,7 @@ int BinarySearch(T* array, Name name, int valid_entries,
for (; low <= limit; ++low) {
int sort_index = array->GetSortedKeyIndex(low);
Name entry = array->GetKey(sort_index);
- uint32_t current_hash = entry->hash_field();
+ uint32_t current_hash = entry.hash_field();
if (current_hash != hash) {
if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
*out_insertion_index = sort_index + (current_hash > hash ? 0 : 1);
@@ -268,12 +262,12 @@ template <SearchMode search_mode, typename T>
int LinearSearch(T* array, Name name, int valid_entries,
int* out_insertion_index) {
if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
- uint32_t hash = name->hash_field();
+ uint32_t hash = name.hash_field();
int len = array->number_of_entries();
for (int number = 0; number < len; number++) {
int sorted_index = array->GetSortedKeyIndex(number);
Name entry = array->GetKey(sorted_index);
- uint32_t current_hash = entry->hash_field();
+ uint32_t current_hash = entry.hash_field();
if (current_hash > hash) {
*out_insertion_index = sorted_index;
return T::kNotFound;
@@ -320,7 +314,7 @@ double FixedDoubleArray::get_scalar(int index) {
map() != GetReadOnlyRoots().fixed_array_map());
DCHECK(index >= 0 && index < this->length());
DCHECK(!is_the_hole(index));
- return READ_DOUBLE_FIELD(*this, kHeaderSize + index * kDoubleSize);
+ return ReadField<double>(kHeaderSize + index * kDoubleSize);
}
uint64_t FixedDoubleArray::get_representation(int index) {
@@ -328,15 +322,16 @@ uint64_t FixedDoubleArray::get_representation(int index) {
map() != GetReadOnlyRoots().fixed_array_map());
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kDoubleSize;
- return READ_UINT64_FIELD(*this, offset);
+ // Bug(v8:8875): Doubles may be unaligned.
+ return ReadUnalignedValue<uint64_t>(field_address(offset));
}
Handle<Object> FixedDoubleArray::get(FixedDoubleArray array, int index,
Isolate* isolate) {
- if (array->is_the_hole(index)) {
+ if (array.is_the_hole(index)) {
return ReadOnlyRoots(isolate).the_hole_value_handle();
} else {
- return isolate->factory()->NewNumber(array->get_scalar(index));
+ return isolate->factory()->NewNumber(array.get_scalar(index));
}
}
@@ -345,9 +340,9 @@ void FixedDoubleArray::set(int index, double value) {
map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
if (std::isnan(value)) {
- WRITE_DOUBLE_FIELD(*this, offset, std::numeric_limits<double>::quiet_NaN());
+ WriteField<double>(offset, std::numeric_limits<double>::quiet_NaN());
} else {
- WRITE_DOUBLE_FIELD(*this, offset, value);
+ WriteField<double>(offset, value);
}
DCHECK(!is_the_hole(index));
}
@@ -360,7 +355,7 @@ void FixedDoubleArray::set_the_hole(int index) {
DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() &&
map() != GetReadOnlyRoots().fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
- WRITE_UINT64_FIELD(*this, offset, kHoleNanInt64);
+ WriteUnalignedValue<uint64_t>(field_address(offset), kHoleNanInt64);
}
bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
@@ -371,8 +366,9 @@ bool FixedDoubleArray::is_the_hole(int index) {
return get_representation(index) == kHoleNanInt64;
}
-void FixedDoubleArray::MoveElements(Heap* heap, int dst_index, int src_index,
- int len, WriteBarrierMode mode) {
+void FixedDoubleArray::MoveElements(Isolate* isolate, int dst_index,
+ int src_index, int len,
+ WriteBarrierMode mode) {
DCHECK_EQ(SKIP_WRITE_BARRIER, mode);
double* data_start =
reinterpret_cast<double*>(FIELD_ADDR(*this, kHeaderSize));
@@ -414,6 +410,19 @@ MaybeObjectSlot WeakFixedArray::RawFieldOfElementAt(int index) {
return RawMaybeWeakField(OffsetOfElementAt(index));
}
+void WeakFixedArray::CopyElements(Isolate* isolate, int dst_index,
+ WeakFixedArray src, int src_index, int len,
+ WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(dst_index + len, length());
+ DCHECK_LE(src_index + len, src.length());
+ DisallowHeapAllocation no_gc;
+
+ MaybeObjectSlot dst_slot(data_start() + dst_index);
+ MaybeObjectSlot src_slot(src.data_start() + src_index);
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
+}
+
MaybeObject WeakArrayList::Get(int index) const {
DCHECK(index >= 0 && index < this->capacity());
return RELAXED_READ_WEAK_FIELD(*this, OffsetOfElementAt(index));
@@ -431,10 +440,23 @@ MaybeObjectSlot WeakArrayList::data_start() {
return RawMaybeWeakField(kHeaderSize);
}
+void WeakArrayList::CopyElements(Isolate* isolate, int dst_index,
+ WeakArrayList src, int src_index, int len,
+ WriteBarrierMode mode) {
+ if (len == 0) return;
+ DCHECK_LE(dst_index + len, capacity());
+ DCHECK_LE(src_index + len, src.capacity());
+ DisallowHeapAllocation no_gc;
+
+ MaybeObjectSlot dst_slot(data_start() + dst_index);
+ MaybeObjectSlot src_slot(src.data_start() + src_index);
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
+}
+
HeapObject WeakArrayList::Iterator::Next() {
if (!array_.is_null()) {
- while (index_ < array_->length()) {
- MaybeObject item = array_->Get(index_++);
+ while (index_ < array_.length()) {
+ MaybeObject item = array_.Get(index_++);
DCHECK(item->IsWeakOrCleared());
if (!item->IsCleared()) return item->GetHeapObjectAssumeWeak();
}
@@ -444,16 +466,16 @@ HeapObject WeakArrayList::Iterator::Next() {
}
int ArrayList::Length() const {
- if (FixedArray::cast(*this)->length() == 0) return 0;
- return Smi::ToInt(FixedArray::cast(*this)->get(kLengthIndex));
+ if (FixedArray::cast(*this).length() == 0) return 0;
+ return Smi::ToInt(FixedArray::cast(*this).get(kLengthIndex));
}
void ArrayList::SetLength(int length) {
- return FixedArray::cast(*this)->set(kLengthIndex, Smi::FromInt(length));
+ return FixedArray::cast(*this).set(kLengthIndex, Smi::FromInt(length));
}
Object ArrayList::Get(int index) const {
- return FixedArray::cast(*this)->get(kFirstIndex + index);
+ return FixedArray::cast(*this).get(kFirstIndex + index);
}
ObjectSlot ArrayList::Slot(int index) {
@@ -461,25 +483,25 @@ ObjectSlot ArrayList::Slot(int index) {
}
void ArrayList::Set(int index, Object obj, WriteBarrierMode mode) {
- FixedArray::cast(*this)->set(kFirstIndex + index, obj, mode);
+ FixedArray::cast(*this).set(kFirstIndex + index, obj, mode);
}
void ArrayList::Clear(int index, Object undefined) {
- DCHECK(undefined->IsUndefined());
- FixedArray::cast(*this)->set(kFirstIndex + index, undefined,
- SKIP_WRITE_BARRIER);
+ DCHECK(undefined.IsUndefined());
+ FixedArray::cast(*this).set(kFirstIndex + index, undefined,
+ SKIP_WRITE_BARRIER);
}
int ByteArray::Size() { return RoundUp(length() + kHeaderSize, kTaggedSize); }
byte ByteArray::get(int index) const {
DCHECK(index >= 0 && index < this->length());
- return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize);
+ return ReadField<byte>(kHeaderSize + index * kCharSize);
}
void ByteArray::set(int index, byte value) {
DCHECK(index >= 0 && index < this->length());
- WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize, value);
+ WriteField<byte>(kHeaderSize + index * kCharSize, value);
}
void ByteArray::copy_in(int index, const byte* buffer, int length) {
@@ -498,22 +520,22 @@ void ByteArray::copy_out(int index, byte* buffer, int length) {
int ByteArray::get_int(int index) const {
DCHECK(index >= 0 && index < this->length() / kIntSize);
- return READ_INT_FIELD(*this, kHeaderSize + index * kIntSize);
+ return ReadField<int>(kHeaderSize + index * kIntSize);
}
void ByteArray::set_int(int index, int value) {
DCHECK(index >= 0 && index < this->length() / kIntSize);
- WRITE_INT_FIELD(*this, kHeaderSize + index * kIntSize, value);
+ WriteField<int>(kHeaderSize + index * kIntSize, value);
}
uint32_t ByteArray::get_uint32(int index) const {
DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- return READ_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size);
+ return ReadField<uint32_t>(kHeaderSize + index * kUInt32Size);
}
void ByteArray::set_uint32(int index, uint32_t value) {
DCHECK(index >= 0 && index < this->length() / kUInt32Size);
- WRITE_UINT32_FIELD(*this, kHeaderSize + index * kUInt32Size, value);
+ WriteField<uint32_t>(kHeaderSize + index * kUInt32Size, value);
}
void ByteArray::clear_padding() {
@@ -559,385 +581,16 @@ int PodArray<T>::length() const {
return ByteArray::length() / sizeof(T);
}
-void* FixedTypedArrayBase::external_pointer() const {
- intptr_t ptr = READ_INTPTR_FIELD(*this, kExternalPointerOffset);
- return reinterpret_cast<void*>(ptr);
-}
-
-void FixedTypedArrayBase::set_external_pointer(void* value) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(*this, kExternalPointerOffset, ptr);
-}
-
-void* FixedTypedArrayBase::DataPtr() {
- return reinterpret_cast<void*>(
- base_pointer()->ptr() + reinterpret_cast<intptr_t>(external_pointer()));
-}
-
-int FixedTypedArrayBase::ElementSize(InstanceType type) {
- int element_size;
- switch (type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- element_size = sizeof(ctype); \
- break;
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- default:
- UNREACHABLE();
- }
- return element_size;
-}
-
-int FixedTypedArrayBase::DataSize(InstanceType type) const {
- if (base_pointer() == Smi::kZero) return 0;
- return length() * ElementSize(type);
-}
-
-int FixedTypedArrayBase::DataSize() const {
- return DataSize(map()->instance_type());
-}
-
-size_t FixedTypedArrayBase::ByteLength() const {
- return static_cast<size_t>(length()) *
- static_cast<size_t>(ElementSize(map()->instance_type()));
-}
-
-int FixedTypedArrayBase::size() const {
- return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
-}
-
-int FixedTypedArrayBase::TypedArraySize(InstanceType type) const {
- return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type));
-}
-
-// static
-int FixedTypedArrayBase::TypedArraySize(InstanceType type, int length) {
- return OBJECT_POINTER_ALIGN(kDataOffset + length * ElementSize(type));
-}
-
-uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
-
-uint8_t Uint8ClampedArrayTraits::defaultValue() { return 0; }
-
-int8_t Int8ArrayTraits::defaultValue() { return 0; }
-
-uint16_t Uint16ArrayTraits::defaultValue() { return 0; }
-
-int16_t Int16ArrayTraits::defaultValue() { return 0; }
-
-uint32_t Uint32ArrayTraits::defaultValue() { return 0; }
-
-int32_t Int32ArrayTraits::defaultValue() { return 0; }
-
-float Float32ArrayTraits::defaultValue() {
- return std::numeric_limits<float>::quiet_NaN();
-}
-
-double Float64ArrayTraits::defaultValue() {
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
- DCHECK((index >= 0) && (index < this->length()));
- return FixedTypedArray<Traits>::get_scalar_from_data_ptr(DataPtr(), index);
-}
-
-// static
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::get_scalar_from_data_ptr(
- void* data_ptr, int index) {
- typename Traits::ElementType* ptr = reinterpret_cast<ElementType*>(data_ptr);
- // The JavaScript memory model allows for racy reads and writes to a
- // SharedArrayBuffer's backing store, which will always be a FixedTypedArray.
- // ThreadSanitizer will catch these racy accesses and warn about them, so we
- // disable TSAN for these reads and writes using annotations.
- //
- // We don't use relaxed atomics here, as it is not a requirement of the
- // JavaScript memory model to have tear-free reads of overlapping accesses,
- // and using relaxed atomics may introduce overhead.
- TSAN_ANNOTATE_IGNORE_READS_BEGIN;
- ElementType result;
- if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only kTaggedSize
- // aligned so we have to use unaligned pointer friendly way of accessing
- // them in order to avoid undefined behavior in C++ code.
- result = ReadUnalignedValue<ElementType>(reinterpret_cast<Address>(ptr) +
- index * sizeof(ElementType));
- } else {
- result = ptr[index];
- }
- TSAN_ANNOTATE_IGNORE_READS_END;
- return result;
-}
-
-template <class Traits>
-void FixedTypedArray<Traits>::set(int index, ElementType value) {
- CHECK((index >= 0) && (index < this->length()));
- // See the comment in FixedTypedArray<Traits>::get_scalar.
- auto* ptr = reinterpret_cast<ElementType*>(DataPtr());
- TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only kTaggedSize
- // aligned so we have to use unaligned pointer friendly way of accessing
- // them in order to avoid undefined behavior in C++ code.
- WriteUnalignedValue<ElementType>(
- reinterpret_cast<Address>(ptr) + index * sizeof(ElementType), value);
- } else {
- ptr[index] = value;
- }
- TSAN_ANNOTATE_IGNORE_WRITES_END;
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(int value) {
- return static_cast<ElementType>(value);
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(int value) {
- if (value < 0) return 0;
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(value);
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(int value) {
- UNREACHABLE();
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(int value) {
- UNREACHABLE();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(uint32_t value) {
- return static_cast<ElementType>(value);
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(uint32_t value) {
- // We need this special case for Uint32 -> Uint8Clamped, because the highest
- // Uint32 values will be negative as an int, clamping to 0, rather than 255.
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(value);
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(uint32_t value) {
- UNREACHABLE();
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(uint32_t value) {
- UNREACHABLE();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(double value) {
- return static_cast<ElementType>(DoubleToInt32(value));
-}
-
-template <>
-inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(double value) {
- // Handle NaNs and less than zero values which clamp to zero.
- if (!(value > 0)) return 0;
- if (value > 0xFF) return 0xFF;
- return static_cast<uint8_t>(lrint(value));
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(double value) {
- UNREACHABLE();
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(double value) {
- UNREACHABLE();
-}
-
-template <>
-inline float FixedTypedArray<Float32ArrayTraits>::from(double value) {
- using limits = std::numeric_limits<float>;
- if (value > limits::max()) return limits::infinity();
- if (value < limits::lowest()) return -limits::infinity();
- return static_cast<float>(value);
-}
-
-template <>
-inline double FixedTypedArray<Float64ArrayTraits>::from(double value) {
- return value;
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(int64_t value) {
- UNREACHABLE();
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from(uint64_t value) {
- UNREACHABLE();
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(int64_t value) {
- return value;
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(uint64_t value) {
- return value;
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::from(int64_t value) {
- return static_cast<uint64_t>(value);
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::from(uint64_t value) {
- return static_cast<int64_t>(value);
-}
-
-template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::FromHandle(
- Handle<Object> value, bool* lossless) {
- if (value->IsSmi()) {
- return from(Smi::ToInt(*value));
- }
- DCHECK(value->IsHeapNumber());
- return from(HeapNumber::cast(*value)->value());
-}
-
-template <>
-inline int64_t FixedTypedArray<BigInt64ArrayTraits>::FromHandle(
- Handle<Object> value, bool* lossless) {
- DCHECK(value->IsBigInt());
- return BigInt::cast(*value)->AsInt64(lossless);
-}
-
-template <>
-inline uint64_t FixedTypedArray<BigUint64ArrayTraits>::FromHandle(
- Handle<Object> value, bool* lossless) {
- DCHECK(value->IsBigInt());
- return BigInt::cast(*value)->AsUint64(lossless);
-}
-
-template <class Traits>
-Handle<Object> FixedTypedArray<Traits>::get(Isolate* isolate,
- FixedTypedArray<Traits> array,
- int index) {
- return Traits::ToHandle(isolate, array->get_scalar(index));
-}
-
-template <class Traits>
-void FixedTypedArray<Traits>::SetValue(uint32_t index, Object value) {
- ElementType cast_value = Traits::defaultValue();
- if (value->IsSmi()) {
- int int_value = Smi::ToInt(value);
- cast_value = from(int_value);
- } else if (value->IsHeapNumber()) {
- double double_value = HeapNumber::cast(value)->value();
- cast_value = from(double_value);
- } else {
- // Clamp undefined to the default value. All other types have been
- // converted to a number type further up in the call chain.
- DCHECK(value->IsUndefined());
- }
- set(index, cast_value);
-}
-
-template <>
-inline void FixedTypedArray<BigInt64ArrayTraits>::SetValue(uint32_t index,
- Object value) {
- DCHECK(value->IsBigInt());
- set(index, BigInt::cast(value)->AsInt64());
-}
-
-template <>
-inline void FixedTypedArray<BigUint64ArrayTraits>::SetValue(uint32_t index,
- Object value) {
- DCHECK(value->IsBigInt());
- set(index, BigInt::cast(value)->AsUint64());
-}
-
-Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Uint8ClampedArrayTraits::ToHandle(Isolate* isolate,
- uint8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Int8ArrayTraits::ToHandle(Isolate* isolate, int8_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Uint16ArrayTraits::ToHandle(Isolate* isolate, uint16_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Int16ArrayTraits::ToHandle(Isolate* isolate, int16_t scalar) {
- return handle(Smi::FromInt(scalar), isolate);
-}
-
-Handle<Object> Uint32ArrayTraits::ToHandle(Isolate* isolate, uint32_t scalar) {
- return isolate->factory()->NewNumberFromUint(scalar);
-}
-
-Handle<Object> Int32ArrayTraits::ToHandle(Isolate* isolate, int32_t scalar) {
- return isolate->factory()->NewNumberFromInt(scalar);
-}
-
-Handle<Object> Float32ArrayTraits::ToHandle(Isolate* isolate, float scalar) {
- return isolate->factory()->NewNumber(scalar);
-}
-
-Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
- return isolate->factory()->NewNumber(scalar);
-}
-
-Handle<Object> BigInt64ArrayTraits::ToHandle(Isolate* isolate, int64_t scalar) {
- return BigInt::FromInt64(isolate, scalar);
-}
-
-Handle<Object> BigUint64ArrayTraits::ToHandle(Isolate* isolate,
- uint64_t scalar) {
- return BigInt::FromUint64(isolate, scalar);
-}
-
-// static
-template <class Traits>
-STATIC_CONST_MEMBER_DEFINITION const InstanceType
- FixedTypedArray<Traits>::kInstanceType;
-
-template <class Traits>
-FixedTypedArray<Traits>::FixedTypedArray(Address ptr)
- : FixedTypedArrayBase(ptr) {
- DCHECK(IsHeapObject() && map()->instance_type() == Traits::kInstanceType);
-}
-
-template <class Traits>
-FixedTypedArray<Traits> FixedTypedArray<Traits>::cast(Object object) {
- return FixedTypedArray<Traits>(object.ptr());
-}
-
int TemplateList::length() const {
- return Smi::ToInt(FixedArray::cast(*this)->get(kLengthIndex));
+ return Smi::ToInt(FixedArray::cast(*this).get(kLengthIndex));
}
Object TemplateList::get(int index) const {
- return FixedArray::cast(*this)->get(kFirstElementIndex + index);
+ return FixedArray::cast(*this).get(kFirstElementIndex + index);
}
void TemplateList::set(int index, Object value) {
- FixedArray::cast(*this)->set(kFirstElementIndex + index, value);
+ FixedArray::cast(*this).set(kFirstElementIndex + index, value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index e3ab45ba0e..02f26502b2 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_FIXED_ARRAY_H_
#define V8_OBJECTS_FIXED_ARRAY_H_
-#include "src/maybe-handles.h"
+#include "src/handles/maybe-handles.h"
#include "src/objects/instance-type.h"
#include "src/objects/smi.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -100,8 +100,6 @@ class FixedArrayBase : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_FIXED_ARRAY_BASE_FIELDS)
- static const int kHeaderSize = kSize;
-
protected:
// Special-purpose constructor for subclasses that have fast paths where
// their ptr() is a Smi.
@@ -117,11 +115,6 @@ class FixedArray : public FixedArrayBase {
inline Object get(int index) const;
static inline Handle<Object> get(FixedArray array, int index,
Isolate* isolate);
- template <class T>
- MaybeHandle<T> GetValue(Isolate* isolate, int index) const;
-
- template <class T>
- Handle<T> GetValueChecked(Isolate* isolate, int index) const;
// Return a grown copy if the index is bigger than the array's length.
V8_EXPORT_PRIVATE static Handle<FixedArray> SetAndGrow(
@@ -147,16 +140,14 @@ class FixedArray : public FixedArrayBase {
inline ObjectSlot GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
- // Returns true iff the elements are Numbers and sorted ascending.
- bool ContainsSortedNumbers();
// Gives access to raw memory which stores the array's data.
inline ObjectSlot data_start();
- inline void MoveElements(Heap* heap, int dst_index, int src_index, int len,
- WriteBarrierMode mode);
+ inline void MoveElements(Isolate* isolate, int dst_index, int src_index,
+ int len, WriteBarrierMode mode);
- inline void CopyElements(Heap* heap, int dst_index, FixedArray src,
+ inline void CopyElements(Isolate* isolate, int dst_index, FixedArray src,
int src_index, int len, WriteBarrierMode mode);
inline void FillWithHoles(int from, int to);
@@ -201,6 +192,8 @@ class FixedArray : public FixedArrayBase {
using BodyDescriptor = FlexibleBodyDescriptor<kHeaderSize>;
+ static constexpr int kObjectsOffset = kHeaderSize;
+
protected:
// Set operation on FixedArray without using write barriers. Can
// only be used for storing old space objects or smis.
@@ -243,8 +236,8 @@ class FixedDoubleArray : public FixedArrayBase {
return kHeaderSize + length * kDoubleSize;
}
- inline void MoveElements(Heap* heap, int dst_index, int src_index, int len,
- WriteBarrierMode mode);
+ inline void MoveElements(Isolate* isolate, int dst_index, int src_index,
+ int len, WriteBarrierMode mode);
inline void FillWithHoles(int from, int to);
@@ -296,6 +289,9 @@ class WeakFixedArray : public HeapObject {
inline MaybeObjectSlot RawFieldOfElementAt(int index);
+ inline void CopyElements(Isolate* isolate, int dst_index, WeakFixedArray src,
+ int src_index, int len, WriteBarrierMode mode);
+
DECL_PRINTER(WeakFixedArray)
DECL_VERIFIER(WeakFixedArray)
@@ -354,6 +350,9 @@ class WeakArrayList : public HeapObject {
// Gives access to raw memory which stores the array's data.
inline MaybeObjectSlot data_start();
+ inline void CopyElements(Isolate* isolate, int dst_index, WeakArrayList src,
+ int src_index, int len, WriteBarrierMode mode);
+
V8_EXPORT_PRIVATE bool IsFull();
DECL_INT_ACCESSORS(capacity)
@@ -577,128 +576,6 @@ class PodArray : public ByteArray {
OBJECT_CONSTRUCTORS(PodArray<T>, ByteArray);
};
-class FixedTypedArrayBase : public FixedArrayBase {
- public:
- // [base_pointer]: Either points to the FixedTypedArrayBase itself or nullptr.
- DECL_ACCESSORS(base_pointer, Object)
-
- // [external_pointer]: Contains the offset between base_pointer and the start
- // of the data. If the base_pointer is a nullptr, the external_pointer
- // therefore points to the actual backing store.
- DECL_PRIMITIVE_ACCESSORS(external_pointer, void*)
-
- // Dispatched behavior.
- DECL_CAST(FixedTypedArrayBase)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(FixedArrayBase::kHeaderSize,
- TORQUE_GENERATED_FIXED_TYPED_ARRAY_BASE_FIELDS)
- static const int kHeaderSize = kSize;
-
-#ifdef V8_COMPRESS_POINTERS
- // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize
- // is only kTaggedSize aligned but we can keep using unaligned access since
- // both x64 and arm64 architectures (where pointer compression supported)
- // allow unaligned access to doubles.
- STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
-#else
- STATIC_ASSERT(IsAligned(kHeaderSize, kDoubleAlignment));
-#endif
-
- static const int kDataOffset = kHeaderSize;
-
- static const int kMaxElementSize = 8;
-
-#ifdef V8_HOST_ARCH_32_BIT
- static const size_t kMaxByteLength = std::numeric_limits<size_t>::max();
-#else
- static const size_t kMaxByteLength =
- static_cast<size_t>(Smi::kMaxValue) * kMaxElementSize;
-#endif // V8_HOST_ARCH_32_BIT
-
- static const size_t kMaxLength = Smi::kMaxValue;
-
- class BodyDescriptor;
-
- inline int size() const;
-
- static inline int TypedArraySize(InstanceType type, int length);
- inline int TypedArraySize(InstanceType type) const;
-
- // Use with care: returns raw pointer into heap.
- inline void* DataPtr();
-
- inline int DataSize() const;
-
- inline size_t ByteLength() const;
-
- static inline intptr_t ExternalPointerValueForOnHeapArray() {
- return FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
- }
-
- static inline void* ExternalPointerPtrForOnHeapArray() {
- return reinterpret_cast<void*>(ExternalPointerValueForOnHeapArray());
- }
-
- private:
- static inline int ElementSize(InstanceType type);
-
- inline int DataSize(InstanceType type) const;
-
- OBJECT_CONSTRUCTORS(FixedTypedArrayBase, FixedArrayBase);
-};
-
-template <class Traits>
-class FixedTypedArray : public FixedTypedArrayBase {
- public:
- using ElementType = typename Traits::ElementType;
- static const InstanceType kInstanceType = Traits::kInstanceType;
-
- DECL_CAST(FixedTypedArray<Traits>)
-
- static inline ElementType get_scalar_from_data_ptr(void* data_ptr, int index);
- inline ElementType get_scalar(int index);
- static inline Handle<Object> get(Isolate* isolate, FixedTypedArray array,
- int index);
- inline void set(int index, ElementType value);
-
- static inline ElementType from(int value);
- static inline ElementType from(uint32_t value);
- static inline ElementType from(double value);
- static inline ElementType from(int64_t value);
- static inline ElementType from(uint64_t value);
-
- static inline ElementType FromHandle(Handle<Object> value,
- bool* lossless = nullptr);
-
- // This accessor applies the correct conversion from Smi, HeapNumber
- // and undefined.
- inline void SetValue(uint32_t index, Object value);
-
- DECL_PRINTER(FixedTypedArray)
- DECL_VERIFIER(FixedTypedArray)
-
- private:
- OBJECT_CONSTRUCTORS(FixedTypedArray, FixedTypedArrayBase);
-};
-
-#define FIXED_TYPED_ARRAY_TRAITS(Type, type, TYPE, elementType) \
- STATIC_ASSERT(sizeof(elementType) <= FixedTypedArrayBase::kMaxElementSize); \
- class Type##ArrayTraits { \
- public: /* NOLINT */ \
- using ElementType = elementType; \
- static const InstanceType kInstanceType = FIXED_##TYPE##_ARRAY_TYPE; \
- static const char* ArrayTypeName() { return "Fixed" #Type "Array"; } \
- static inline Handle<Object> ToHandle(Isolate* isolate, \
- elementType scalar); \
- static inline elementType defaultValue(); \
- }; \
- \
- using Fixed##Type##Array = FixedTypedArray<Type##ArrayTraits>;
-
-TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
-
-#undef FIXED_TYPED_ARRAY_TRAITS
-
class TemplateList : public FixedArray {
public:
static Handle<TemplateList> New(Isolate* isolate, int size);
diff --git a/deps/v8/src/objects/foreign-inl.h b/deps/v8/src/objects/foreign-inl.h
index 0ac9f652bb..fc93b66a0a 100644
--- a/deps/v8/src/objects/foreign-inl.h
+++ b/deps/v8/src/objects/foreign-inl.h
@@ -8,7 +8,7 @@
#include "src/objects/foreign.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,15 +23,15 @@ CAST_ACCESSOR(Foreign)
// static
bool Foreign::IsNormalized(Object value) {
if (value == Smi::kZero) return true;
- return Foreign::cast(value)->foreign_address() != kNullAddress;
+ return Foreign::cast(value).foreign_address() != kNullAddress;
}
Address Foreign::foreign_address() {
- return READ_UINTPTR_FIELD(*this, kForeignAddressOffset);
+ return ReadField<Address>(kForeignAddressOffset);
}
void Foreign::set_foreign_address(Address value) {
- WRITE_UINTPTR_FIELD(*this, kForeignAddressOffset, value);
+ WriteField<Address>(kForeignAddressOffset, value);
}
} // namespace internal
diff --git a/deps/v8/src/objects/foreign.h b/deps/v8/src/objects/foreign.h
index 629d549b6d..617ca0e34f 100644
--- a/deps/v8/src/objects/foreign.h
+++ b/deps/v8/src/objects/foreign.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_FOREIGN_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/frame-array-inl.h b/deps/v8/src/objects/frame-array-inl.h
index 78d08da00f..5b342c64c0 100644
--- a/deps/v8/src/objects/frame-array-inl.h
+++ b/deps/v8/src/objects/frame-array-inl.h
@@ -33,17 +33,17 @@ FRAME_ARRAY_FIELD_LIST(DEFINE_FRAME_ARRAY_ACCESSORS)
#undef DEFINE_FRAME_ARRAY_ACCESSORS
bool FrameArray::IsWasmFrame(int frame_ix) const {
- const int flags = Flags(frame_ix)->value();
+ const int flags = Flags(frame_ix).value();
return (flags & kIsWasmFrame) != 0;
}
bool FrameArray::IsWasmInterpretedFrame(int frame_ix) const {
- const int flags = Flags(frame_ix)->value();
+ const int flags = Flags(frame_ix).value();
return (flags & kIsWasmInterpretedFrame) != 0;
}
bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const {
- const int flags = Flags(frame_ix)->value();
+ const int flags = Flags(frame_ix).value();
return (flags & kIsAsmJsWasmFrame) != 0;
}
diff --git a/deps/v8/src/objects/frame-array.h b/deps/v8/src/objects/frame-array.h
index 438718e25f..42750cf69c 100644
--- a/deps/v8/src/objects/frame-array.h
+++ b/deps/v8/src/objects/frame-array.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_FRAME_ARRAY_H_
#define V8_OBJECTS_FRAME_ARRAY_H_
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/wasm/wasm-objects.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/free-space-inl.h b/deps/v8/src/objects/free-space-inl.h
index b36c4e154f..bea8257515 100644
--- a/deps/v8/src/objects/free-space-inl.h
+++ b/deps/v8/src/objects/free-space-inl.h
@@ -7,10 +7,10 @@
#include "src/objects/free-space.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -30,7 +30,7 @@ FreeSpace FreeSpace::next() {
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()),
+ DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
@@ -43,7 +43,7 @@ void FreeSpace::set_next(FreeSpace next) {
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- DCHECK_IMPLIES(!map_slot().contains_value(free_space_map->ptr()),
+ DCHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
!heap->deserialization_complete() &&
map_slot().contains_value(kNullAddress));
#endif
@@ -53,7 +53,7 @@ void FreeSpace::set_next(FreeSpace next) {
FreeSpace FreeSpace::cast(HeapObject o) {
SLOW_DCHECK(!GetHeapFromWritableObject(o)->deserialization_complete() ||
- o->IsFreeSpace());
+ o.IsFreeSpace());
return bit_cast<FreeSpace>(o);
}
diff --git a/deps/v8/src/objects/free-space.h b/deps/v8/src/objects/free-space.h
index f1f7bb56c5..38f5794646 100644
--- a/deps/v8/src/objects/free-space.h
+++ b/deps/v8/src/objects/free-space.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_FREE_SPACE_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/function-kind.h b/deps/v8/src/objects/function-kind.h
index fce82b0512..4a1819813c 100644
--- a/deps/v8/src/function-kind.h
+++ b/deps/v8/src/objects/function-kind.h
@@ -3,10 +3,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FUNCTION_KIND_H_
-#define V8_FUNCTION_KIND_H_
+#ifndef V8_OBJECTS_FUNCTION_KIND_H_
+#define V8_OBJECTS_FUNCTION_KIND_H_
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -191,4 +191,4 @@ inline std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
} // namespace internal
} // namespace v8
-#endif // V8_FUNCTION_KIND_H_
+#endif // V8_OBJECTS_FUNCTION_KIND_H_
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
index d65d9de083..77453721ae 100644
--- a/deps/v8/src/objects/hash-table-inl.h
+++ b/deps/v8/src/objects/hash-table-inl.h
@@ -8,10 +8,10 @@
#include "src/objects/hash-table.h"
#include "src/heap/heap.h"
-#include "src/objects-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/heap-object-inl.h"
-#include "src/roots-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/roots/roots-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -193,7 +193,7 @@ bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key, int32_t hash) {
bool ObjectHashSet::Has(Isolate* isolate, Handle<Object> key) {
Object hash = key->GetHash();
- if (!hash->IsSmi()) return false;
+ if (!hash.IsSmi()) return false;
return FindEntry(ReadOnlyRoots(isolate), key, Smi::ToInt(hash)) != kNotFound;
}
@@ -207,7 +207,7 @@ uint32_t ObjectHashTableShape::Hash(Isolate* isolate, Handle<Object> key) {
uint32_t ObjectHashTableShape::HashForObject(ReadOnlyRoots roots,
Object other) {
- return Smi::ToInt(other->GetHash());
+ return Smi::ToInt(other.GetHash());
}
} // namespace internal
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index 0c83d01b42..610dc9d28e 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -8,10 +8,10 @@
#include "src/base/compiler-specific.h"
#include "src/base/export-template.h"
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
#include "src/objects/smi.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -150,7 +150,7 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
// Find entry for key otherwise return kNotFound.
inline int FindEntry(ReadOnlyRoots roots, Key key, int32_t hash);
- int FindEntry(Isolate* isolate, Key key);
+ inline int FindEntry(Isolate* isolate, Key key);
// Rehashes the table in-place.
void Rehash(ReadOnlyRoots roots);
diff --git a/deps/v8/src/objects/heap-number-inl.h b/deps/v8/src/objects/heap-number-inl.h
index ad82296bce..3986e9146c 100644
--- a/deps/v8/src/objects/heap-number-inl.h
+++ b/deps/v8/src/objects/heap-number-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/heap-number.h"
-#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,30 +23,28 @@ OBJECT_CONSTRUCTORS_IMPL(MutableHeapNumber, HeapNumberBase)
CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(MutableHeapNumber)
-double HeapNumberBase::value() const {
- return READ_DOUBLE_FIELD(*this, kValueOffset);
-}
+double HeapNumberBase::value() const { return ReadField<double>(kValueOffset); }
void HeapNumberBase::set_value(double value) {
- WRITE_DOUBLE_FIELD(*this, kValueOffset, value);
+ WriteField<double>(kValueOffset, value);
}
uint64_t HeapNumberBase::value_as_bits() const {
- return READ_UINT64_FIELD(*this, kValueOffset);
+ // Bug(v8:8875): HeapNumber's double may be unaligned.
+ return ReadUnalignedValue<uint64_t>(field_address(kValueOffset));
}
void HeapNumberBase::set_value_as_bits(uint64_t bits) {
- WRITE_UINT64_FIELD(*this, kValueOffset, bits);
+ WriteUnalignedValue<uint64_t>(field_address(kValueOffset), bits);
}
int HeapNumberBase::get_exponent() {
- return ((READ_INT_FIELD(*this, kExponentOffset) & kExponentMask) >>
- kExponentShift) -
+ return ((ReadField<int>(kExponentOffset) & kExponentMask) >> kExponentShift) -
kExponentBias;
}
int HeapNumberBase::get_sign() {
- return READ_INT_FIELD(*this, kExponentOffset) & kSignMask;
+ return ReadField<int>(kExponentOffset) & kSignMask;
}
} // namespace internal
diff --git a/deps/v8/src/objects/heap-object-inl.h b/deps/v8/src/objects/heap-object-inl.h
index be97f8bb79..3d5deeff63 100644
--- a/deps/v8/src/objects/heap-object-inl.h
+++ b/deps/v8/src/objects/heap-object-inl.h
@@ -9,7 +9,7 @@
#include "src/heap/heap-write-barrier-inl.h"
// TODO(jkummerow): Get rid of this by moving NROSO::GetIsolate elsewhere.
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -17,9 +17,6 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
-CAST_ACCESSOR(HeapObject)
-
HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi)
: Object(ptr) {
SLOW_DCHECK(
@@ -28,12 +25,6 @@ HeapObject::HeapObject(Address ptr, AllowInlineSmiStorage allow_smi)
}
// static
-HeapObject HeapObject::FromAddress(Address address) {
- DCHECK_TAG_ALIGNED(address);
- return HeapObject(address + kHeapObjectTag);
-}
-
-// static
Heap* NeverReadOnlySpaceObject::GetHeap(const HeapObject object) {
return GetHeapFromWritableObject(object);
}
diff --git a/deps/v8/src/objects/heap-object.h b/deps/v8/src/objects/heap-object.h
index f42dc05b81..9ca51bdda1 100644
--- a/deps/v8/src/objects/heap-object.h
+++ b/deps/v8/src/objects/heap-object.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_HEAP_OBJECT_H_
#define V8_OBJECTS_HEAP_OBJECT_H_
-#include "src/globals.h"
-#include "src/roots.h"
+#include "src/common/globals.h"
+#include "src/roots/roots.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -85,7 +85,10 @@ class HeapObject : public Object {
#undef DECL_STRUCT_PREDICATE
// Converts an address to a HeapObject pointer.
- static inline HeapObject FromAddress(Address address);
+ static inline HeapObject FromAddress(Address address) {
+ DCHECK_TAG_ALIGNED(address);
+ return HeapObject(address + kHeapObjectTag);
+ }
// Returns the address of this HeapObject.
inline Address address() const { return ptr() - kHeapObjectTag; }
@@ -197,6 +200,9 @@ class HeapObject : public Object {
OBJECT_CONSTRUCTORS(HeapObject, Object);
};
+OBJECT_CONSTRUCTORS_IMPL(HeapObject, Object)
+CAST_ACCESSOR(HeapObject)
+
// Helper class for objects that can never be in RO space.
class NeverReadOnlySpaceObject {
public:
diff --git a/deps/v8/src/objects/instance-type-inl.h b/deps/v8/src/objects/instance-type-inl.h
index 5925c6aa92..2f867411f2 100644
--- a/deps/v8/src/objects/instance-type-inl.h
+++ b/deps/v8/src/objects/instance-type-inl.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_INSTANCE_TYPE_INL_H_
#include "src/objects/map-inl.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -19,11 +19,6 @@ namespace InstanceTypeChecker {
// Define type checkers for classes with single instance type.
INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER)
-#define TYPED_ARRAY_INSTANCE_TYPE_CHECKER(Type, type, TYPE, ctype) \
- INSTANCE_TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
-TYPED_ARRAYS(TYPED_ARRAY_INSTANCE_TYPE_CHECKER)
-#undef TYPED_ARRAY_INSTANCE_TYPE_CHECKER
-
#define STRUCT_INSTANCE_TYPE_CHECKER(TYPE, Name, name) \
INSTANCE_TYPE_CHECKER(Name, TYPE)
STRUCT_LIST(STRUCT_INSTANCE_TYPE_CHECKER)
@@ -40,8 +35,7 @@ INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE)
V8_INLINE bool IsFixedArrayBase(InstanceType instance_type) {
return IsFixedArray(instance_type) || IsFixedDoubleArray(instance_type) ||
- IsFixedTypedArrayBase(instance_type) || IsByteArray(instance_type) ||
- IsBytecodeArray(instance_type);
+ IsByteArray(instance_type) || IsBytecodeArray(instance_type);
}
V8_INLINE bool IsHeapObject(InstanceType instance_type) { return true; }
@@ -69,11 +63,6 @@ V8_INLINE bool IsJSReceiver(InstanceType instance_type) {
// pointer rather than looking up the instance type.
INSTANCE_TYPE_CHECKERS(TYPE_CHECKER)
-#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype) \
- TYPE_CHECKER(Fixed##Type##Array)
-TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
-#undef TYPED_ARRAY_TYPE_CHECKER
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index edbc428a5d..559ed34784 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_INSTANCE_TYPE_H_
#define V8_OBJECTS_INSTANCE_TYPE_H_
-#include "src/elements-kind.h"
-#include "src/objects-definitions.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/objects-definitions.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -131,17 +131,6 @@ enum InstanceType : uint16_t {
BYTE_ARRAY_TYPE,
BYTECODE_ARRAY_TYPE,
FREE_SPACE_TYPE,
- FIXED_INT8_ARRAY_TYPE, // FIRST_FIXED_TYPED_ARRAY_TYPE
- FIXED_UINT8_ARRAY_TYPE,
- FIXED_INT16_ARRAY_TYPE,
- FIXED_UINT16_ARRAY_TYPE,
- FIXED_INT32_ARRAY_TYPE,
- FIXED_UINT32_ARRAY_TYPE,
- FIXED_FLOAT32_ARRAY_TYPE,
- FIXED_FLOAT64_ARRAY_TYPE,
- FIXED_UINT8_CLAMPED_ARRAY_TYPE,
- FIXED_BIGINT64_ARRAY_TYPE,
- FIXED_BIGUINT64_ARRAY_TYPE, // LAST_FIXED_TYPED_ARRAY_TYPE
FIXED_DOUBLE_ARRAY_TYPE,
FEEDBACK_METADATA_TYPE,
FILLER_TYPE, // LAST_DATA_TYPE
@@ -168,14 +157,18 @@ enum InstanceType : uint16_t {
PROMISE_REACTION_TYPE,
PROTOTYPE_INFO_TYPE,
SCRIPT_TYPE,
+ SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE,
STACK_FRAME_INFO_TYPE,
STACK_TRACE_FRAME_TYPE,
+ TEMPLATE_OBJECT_DESCRIPTION_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
+ WASM_CAPI_FUNCTION_DATA_TYPE,
WASM_DEBUG_INFO_TYPE,
WASM_EXCEPTION_TAG_TYPE,
WASM_EXPORTED_FUNCTION_DATA_TYPE,
+ WASM_JS_FUNCTION_DATA_TYPE,
CALLABLE_TASK_TYPE, // FIRST_MICROTASK_TYPE
CALLBACK_TASK_TYPE,
@@ -190,14 +183,14 @@ enum InstanceType : uint16_t {
FIXED_ARRAY_TYPE, // FIRST_FIXED_ARRAY_TYPE
OBJECT_BOILERPLATE_DESCRIPTION_TYPE,
CLOSURE_FEEDBACK_CELL_ARRAY_TYPE,
- HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE
- ORDERED_HASH_MAP_TYPE, // FIRST_DICTIONARY_TYPE
+ HASH_TABLE_TYPE, // FIRST_HASH_TABLE_TYPE
+ ORDERED_HASH_MAP_TYPE,
ORDERED_HASH_SET_TYPE,
ORDERED_NAME_DICTIONARY_TYPE,
NAME_DICTIONARY_TYPE,
GLOBAL_DICTIONARY_TYPE,
NUMBER_DICTIONARY_TYPE,
- SIMPLE_NUMBER_DICTIONARY_TYPE, // LAST_DICTIONARY_TYPE
+ SIMPLE_NUMBER_DICTIONARY_TYPE,
STRING_TABLE_TYPE,
EPHEMERON_HASH_TABLE_TYPE, // LAST_HASH_TABLE_TYPE
SCOPE_INFO_TYPE,
@@ -330,9 +323,6 @@ enum InstanceType : uint16_t {
// Boundaries for testing if given HeapObject is a subclass of HashTable
FIRST_HASH_TABLE_TYPE = HASH_TABLE_TYPE,
LAST_HASH_TABLE_TYPE = EPHEMERON_HASH_TABLE_TYPE,
- // Boundaries for testing if given HeapObject is a subclass of Dictionary
- FIRST_DICTIONARY_TYPE = ORDERED_HASH_MAP_TYPE,
- LAST_DICTIONARY_TYPE = SIMPLE_NUMBER_DICTIONARY_TYPE,
// Boundaries for testing if given HeapObject is a subclass of WeakFixedArray.
FIRST_WEAK_FIXED_ARRAY_TYPE = WEAK_FIXED_ARRAY_TYPE,
LAST_WEAK_FIXED_ARRAY_TYPE = TRANSITION_ARRAY_TYPE,
@@ -342,9 +332,6 @@ enum InstanceType : uint16_t {
// Boundaries for testing if given HeapObject is a subclass of Microtask.
FIRST_MICROTASK_TYPE = CALLABLE_TASK_TYPE,
LAST_MICROTASK_TYPE = FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE,
- // Boundaries for testing for a fixed typed array.
- FIRST_FIXED_TYPED_ARRAY_TYPE = FIXED_INT8_ARRAY_TYPE,
- LAST_FIXED_TYPED_ARRAY_TYPE = FIXED_BIGUINT64_ARRAY_TYPE,
// Boundary for promotion to old space.
LAST_DATA_TYPE = FILLER_TYPE,
// Boundary for objects represented as JSReceiver (i.e. JSObject or JSProxy).
@@ -385,6 +372,10 @@ STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
+// Make sure it doesn't matter whether we sign-extend or zero-extend these
+// values, because Torque treats InstanceType as signed.
+STATIC_ASSERT(LAST_TYPE < 1 << 15);
+
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
InstanceType instance_type);
@@ -471,11 +462,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
V(SmallOrderedHashMap, SMALL_ORDERED_HASH_MAP_TYPE) \
V(SmallOrderedHashSet, SMALL_ORDERED_HASH_SET_TYPE) \
V(SmallOrderedNameDictionary, SMALL_ORDERED_NAME_DICTIONARY_TYPE) \
- V(SourcePositionTableWithFrameCache, TUPLE2_TYPE) \
V(StoreHandler, STORE_HANDLER_TYPE) \
V(StringTable, STRING_TABLE_TYPE) \
V(Symbol, SYMBOL_TYPE) \
- V(TemplateObjectDescription, TUPLE2_TYPE) \
V(TransitionArray, TRANSITION_ARRAY_TYPE) \
V(UncompiledDataWithoutPreparseData, \
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE) \
@@ -511,10 +500,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
#define INSTANCE_TYPE_CHECKERS_RANGE(V) \
V(Context, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE) \
- V(Dictionary, FIRST_DICTIONARY_TYPE, LAST_DICTIONARY_TYPE) \
V(FixedArray, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE) \
- V(FixedTypedArrayBase, FIRST_FIXED_TYPED_ARRAY_TYPE, \
- LAST_FIXED_TYPED_ARRAY_TYPE) \
V(HashTable, FIRST_HASH_TABLE_TYPE, LAST_HASH_TABLE_TYPE) \
V(JSMapIterator, FIRST_MAP_ITERATOR_TYPE, LAST_MAP_ITERATOR_TYPE) \
V(JSSetIterator, FIRST_SET_ITERATOR_TYPE, LAST_SET_ITERATOR_TYPE) \
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index 8a43f36245..f2bc87ebac 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -13,24 +13,25 @@
#include <string>
#include <vector>
-#include "src/api-inl.h"
-#include "src/global-handles.h"
+#include "src/api/api-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/js-collator-inl.h"
#include "src/objects/js-date-time-format-inl.h"
#include "src/objects/js-locale-inl.h"
#include "src/objects/js-number-format-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/objects/string.h"
-#include "src/property-descriptor.h"
-#include "src/string-case.h"
+#include "src/strings/string-case.h"
#include "unicode/basictz.h"
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
#include "unicode/coll.h"
#include "unicode/datefmt.h"
#include "unicode/decimfmt.h"
+#include "unicode/formattedvalue.h"
#include "unicode/locid.h"
#include "unicode/normalizer2.h"
#include "unicode/numfmt.h"
@@ -153,7 +154,7 @@ void ToUpperWithSharpS(const Vector<const Char>& src,
inline int FindFirstUpperOrNonAscii(String s, int length) {
for (int index = 0; index < length; ++index) {
- uint16_t ch = s->Get(index);
+ uint16_t ch = s.Get(index);
if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
return index;
}
@@ -168,11 +169,11 @@ const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
if (flat.IsOneByte()) {
if (!*dest) {
dest->reset(NewArray<uc16>(length));
- CopyChars(dest->get(), flat.ToOneByteVector().start(), length);
+ CopyChars(dest->get(), flat.ToOneByteVector().begin(), length);
}
return reinterpret_cast<const UChar*>(dest->get());
} else {
- return reinterpret_cast<const UChar*>(flat.ToUC16Vector().start());
+ return reinterpret_cast<const UChar*>(flat.ToUC16Vector().begin());
}
}
@@ -192,15 +193,23 @@ const uint8_t* Intl::ToLatin1LowerTable() { return &kToLower[0]; }
icu::UnicodeString Intl::ToICUUnicodeString(Isolate* isolate,
Handle<String> string) {
- string = String::Flatten(isolate, string);
- {
- DisallowHeapAllocation no_gc;
- std::unique_ptr<uc16[]> sap;
- return icu::UnicodeString(
- GetUCharBufferFromFlat(string->GetFlatContent(no_gc), &sap,
- string->length()),
- string->length());
+ DCHECK(string->IsFlat());
+ DisallowHeapAllocation no_gc;
+ std::unique_ptr<uc16[]> sap;
+ // Short one-byte strings can be expanded on the stack to avoid allocating a
+ // temporary buffer.
+ constexpr int kShortStringSize = 80;
+ UChar short_string_buffer[kShortStringSize];
+ const UChar* uchar_buffer = nullptr;
+ const String::FlatContent& flat = string->GetFlatContent(no_gc);
+ int32_t length = string->length();
+ if (flat.IsOneByte() && length <= kShortStringSize) {
+ CopyChars(short_string_buffer, flat.ToOneByteVector().begin(), length);
+ uchar_buffer = short_string_buffer;
+ } else {
+ uchar_buffer = GetUCharBufferFromFlat(flat, &sap, length);
}
+ return icu::UnicodeString(uchar_buffer, length);
}
namespace {
@@ -254,19 +263,19 @@ MaybeHandle<String> LocaleConvertCase(Isolate* isolate, Handle<String> s,
// one-byte sliced string with a two-byte parent string.
// Called from TF builtins.
String Intl::ConvertOneByteToLower(String src, String dst) {
- DCHECK_EQ(src->length(), dst->length());
- DCHECK(src->IsOneByteRepresentation());
- DCHECK(src->IsFlat());
- DCHECK(dst->IsSeqOneByteString());
+ DCHECK_EQ(src.length(), dst.length());
+ DCHECK(src.IsOneByteRepresentation());
+ DCHECK(src.IsFlat());
+ DCHECK(dst.IsSeqOneByteString());
DisallowHeapAllocation no_gc;
- const int length = src->length();
- String::FlatContent src_flat = src->GetFlatContent(no_gc);
- uint8_t* dst_data = SeqOneByteString::cast(dst)->GetChars(no_gc);
+ const int length = src.length();
+ String::FlatContent src_flat = src.GetFlatContent(no_gc);
+ uint8_t* dst_data = SeqOneByteString::cast(dst).GetChars(no_gc);
if (src_flat.IsOneByte()) {
- const uint8_t* src_data = src_flat.ToOneByteVector().start();
+ const uint8_t* src_data = src_flat.ToOneByteVector().begin();
bool has_changed_character = false;
int index_to_first_unprocessed =
@@ -288,7 +297,7 @@ String Intl::ConvertOneByteToLower(String src, String dst) {
int index_to_first_unprocessed = FindFirstUpperOrNonAscii(src, length);
if (index_to_first_unprocessed == length) return src;
- const uint16_t* src_data = src_flat.ToUC16Vector().start();
+ const uint16_t* src_data = src_flat.ToUC16Vector().begin();
CopyChars(dst_data, src_data, index_to_first_unprocessed);
for (int index = index_to_first_unprocessed; index < length; ++index) {
dst_data[index] = ToLatin1Lower(static_cast<uint16_t>(src_data[index]));
@@ -347,7 +356,7 @@ MaybeHandle<String> Intl::ConvertToUpper(Isolate* isolate, Handle<String> s) {
bool has_changed_character = false;
int index_to_first_unprocessed = FastAsciiConvert<false>(
reinterpret_cast<char*>(result->GetChars(no_gc)),
- reinterpret_cast<const char*>(src.start()), length,
+ reinterpret_cast<const char*>(src.begin()), length,
&has_changed_character);
if (index_to_first_unprocessed == length) {
return has_changed_character ? result : s;
@@ -968,7 +977,7 @@ MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate,
Handle<JSFunction> constructor = Handle<JSFunction>(
JSFunction::cast(
- isolate->context()->native_context()->intl_collator_function()),
+ isolate->context().native_context().intl_collator_function()),
isolate);
Handle<JSCollator> collator;
@@ -978,10 +987,9 @@ MaybeHandle<Object> Intl::StringLocaleCompare(Isolate* isolate,
if (can_cache) {
isolate->set_icu_object_in_cache(
Isolate::ICUObjectCacheType::kDefaultCollator,
- std::static_pointer_cast<icu::UObject>(
- collator->icu_collator()->get()));
+ std::static_pointer_cast<icu::UMemory>(collator->icu_collator().get()));
}
- icu::Collator* icu_collator = collator->icu_collator()->raw();
+ icu::Collator* icu_collator = collator->icu_collator().raw();
return Intl::CompareStrings(isolate, *icu_collator, string1, string2);
}
@@ -992,6 +1000,21 @@ Handle<Object> Intl::CompareStrings(Isolate* isolate,
Handle<String> string2) {
Factory* factory = isolate->factory();
+ // Early return for identical strings.
+ if (string1.is_identical_to(string2)) {
+ return factory->NewNumberFromInt(UCollationResult::UCOL_EQUAL);
+ }
+
+ // Early return for empty strings.
+ if (string1->length() == 0) {
+ return factory->NewNumberFromInt(string2->length() == 0
+ ? UCollationResult::UCOL_EQUAL
+ : UCollationResult::UCOL_LESS);
+ }
+ if (string2->length() == 0) {
+ return factory->NewNumberFromInt(UCollationResult::UCOL_GREATER);
+ }
+
string1 = String::Flatten(isolate, string1);
string2 = String::Flatten(isolate, string2);
@@ -1025,9 +1048,10 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
bool can_cache =
locales->IsUndefined(isolate) && options->IsUndefined(isolate);
if (can_cache) {
- icu::NumberFormat* cached_number_format =
- static_cast<icu::NumberFormat*>(isolate->get_cached_icu_object(
- Isolate::ICUObjectCacheType::kDefaultNumberFormat));
+ icu::number::LocalizedNumberFormatter* cached_number_format =
+ static_cast<icu::number::LocalizedNumberFormatter*>(
+ isolate->get_cached_icu_object(
+ Isolate::ICUObjectCacheType::kDefaultNumberFormat));
// We may use the cached icu::NumberFormat for a fast path.
if (cached_number_format != nullptr) {
return JSNumberFormat::FormatNumeric(isolate, *cached_number_format,
@@ -1037,7 +1061,7 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
Handle<JSFunction> constructor = Handle<JSFunction>(
JSFunction::cast(
- isolate->context()->native_context()->intl_number_format_function()),
+ isolate->context().native_context().intl_number_format_function()),
isolate);
Handle<JSNumberFormat> number_format;
// 2. Let numberFormat be ? Construct(%NumberFormat%, « locales, options »).
@@ -1048,13 +1072,13 @@ MaybeHandle<String> Intl::NumberToLocaleString(Isolate* isolate,
if (can_cache) {
isolate->set_icu_object_in_cache(
Isolate::ICUObjectCacheType::kDefaultNumberFormat,
- std::static_pointer_cast<icu::UObject>(
- number_format->icu_number_format()->get()));
+ std::static_pointer_cast<icu::UMemory>(
+ number_format->icu_number_formatter().get()));
}
// Return FormatNumber(numberFormat, x).
- icu::NumberFormat* icu_number_format =
- number_format->icu_number_format()->raw();
+ icu::number::LocalizedNumberFormatter* icu_number_format =
+ number_format->icu_number_formatter().raw();
return JSNumberFormat::FormatNumeric(isolate, *icu_number_format,
numeric_obj);
}
@@ -1116,19 +1140,17 @@ Maybe<int> GetNumberOption(Isolate* isolate, Handle<JSReceiver> options,
} // namespace
-Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
- icu::DecimalFormat* number_format,
- Handle<JSReceiver> options,
- int mnfd_default,
- int mxfd_default) {
- CHECK_NOT_NULL(number_format);
+Maybe<Intl::NumberFormatDigitOptions> Intl::SetNumberFormatDigitOptions(
+ Isolate* isolate, Handle<JSReceiver> options, int mnfd_default,
+ int mxfd_default) {
+ Intl::NumberFormatDigitOptions digit_options;
// 5. Let mnid be ? GetNumberOption(options, "minimumIntegerDigits,", 1, 21,
// 1).
int mnid;
if (!GetNumberOption(isolate, options, "minimumIntegerDigits", 1, 21, 1)
.To(&mnid)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
// 6. Let mnfd be ? GetNumberOption(options, "minimumFractionDigits", 0, 20,
@@ -1137,7 +1159,7 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
if (!GetNumberOption(isolate, options, "minimumFractionDigits", 0, 20,
mnfd_default)
.To(&mnfd)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
// 7. Let mxfdActualDefault be max( mnfd, mxfdDefault ).
@@ -1149,7 +1171,7 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
if (!GetNumberOption(isolate, options, "maximumFractionDigits", mnfd, 20,
mxfd_actual_default)
.To(&mxfd)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
// 9. Let mnsd be ? Get(options, "minimumSignificantDigits").
@@ -1158,7 +1180,7 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
isolate->factory()->minimumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mnsd_obj, JSReceiver::GetProperty(isolate, options, mnsd_str),
- Nothing<bool>());
+ Nothing<NumberFormatDigitOptions>());
// 10. Let mxsd be ? Get(options, "maximumSignificantDigits").
Handle<Object> mxsd_obj;
@@ -1166,45 +1188,43 @@ Maybe<bool> Intl::SetNumberFormatDigitOptions(Isolate* isolate,
isolate->factory()->maximumSignificantDigits_string();
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, mxsd_obj, JSReceiver::GetProperty(isolate, options, mxsd_str),
- Nothing<bool>());
+ Nothing<NumberFormatDigitOptions>());
// 11. Set intlObj.[[MinimumIntegerDigits]] to mnid.
- number_format->setMinimumIntegerDigits(mnid);
+ digit_options.minimum_integer_digits = mnid;
// 12. Set intlObj.[[MinimumFractionDigits]] to mnfd.
- number_format->setMinimumFractionDigits(mnfd);
+ digit_options.minimum_fraction_digits = mnfd;
// 13. Set intlObj.[[MaximumFractionDigits]] to mxfd.
- number_format->setMaximumFractionDigits(mxfd);
+ digit_options.maximum_fraction_digits = mxfd;
- bool significant_digits_used = false;
// 14. If mnsd is not undefined or mxsd is not undefined, then
if (!mnsd_obj->IsUndefined(isolate) || !mxsd_obj->IsUndefined(isolate)) {
// 14. a. Let mnsd be ? DefaultNumberOption(mnsd, 1, 21, 1).
int mnsd;
if (!DefaultNumberOption(isolate, mnsd_obj, 1, 21, 1, mnsd_str).To(&mnsd)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
// 14. b. Let mxsd be ? DefaultNumberOption(mxsd, mnsd, 21, 21).
int mxsd;
if (!DefaultNumberOption(isolate, mxsd_obj, mnsd, 21, 21, mxsd_str)
.To(&mxsd)) {
- return Nothing<bool>();
+ return Nothing<NumberFormatDigitOptions>();
}
- significant_digits_used = true;
-
// 14. c. Set intlObj.[[MinimumSignificantDigits]] to mnsd.
- number_format->setMinimumSignificantDigits(mnsd);
+ digit_options.minimum_significant_digits = mnsd;
// 14. d. Set intlObj.[[MaximumSignificantDigits]] to mxsd.
- number_format->setMaximumSignificantDigits(mxsd);
+ digit_options.maximum_significant_digits = mxsd;
+ } else {
+ digit_options.minimum_significant_digits = 0;
+ digit_options.maximum_significant_digits = 0;
}
- number_format->setSignificantDigitsUsed(significant_digits_used);
- number_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
- return Just(true);
+ return Just(digit_options);
}
namespace {
@@ -1447,36 +1467,47 @@ MaybeHandle<JSObject> Intl::SupportedLocalesOf(
}
namespace {
+
template <typename T>
bool IsValidExtension(const icu::Locale& locale, const char* key,
const std::string& value) {
+ const char* legacy_type = uloc_toLegacyType(key, value.c_str());
+ if (legacy_type == nullptr) {
+ return false;
+ }
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::StringEnumeration> enumeration(
T::getKeywordValuesForLocale(key, icu::Locale(locale.getBaseName()),
false, status));
- if (U_SUCCESS(status)) {
- int32_t length;
- std::string legacy_type(uloc_toLegacyType(key, value.c_str()));
- for (const char* item = enumeration->next(&length, status); item != nullptr;
- item = enumeration->next(&length, status)) {
- if (U_SUCCESS(status) && legacy_type == item) {
- return true;
- }
+ if (U_FAILURE(status)) {
+ return false;
+ }
+ int32_t length;
+ for (const char* item = enumeration->next(&length, status);
+ U_SUCCESS(status) && item != nullptr;
+ item = enumeration->next(&length, status)) {
+ if (strcmp(legacy_type, item) == 0) {
+ return true;
}
}
return false;
}
-bool IsValidCalendar(const icu::Locale& locale, const std::string& value) {
- return IsValidExtension<icu::Calendar>(locale, "calendar", value);
-}
-
bool IsValidCollation(const icu::Locale& locale, const std::string& value) {
std::set<std::string> invalid_values = {"standard", "search"};
if (invalid_values.find(value) != invalid_values.end()) return false;
return IsValidExtension<icu::Collator>(locale, "collation", value);
}
+} // namespace
+
+bool Intl::IsValidCalendar(const icu::Locale& locale,
+ const std::string& value) {
+ return IsValidExtension<icu::Calendar>(locale, "calendar", value);
+}
+
+namespace {
+
bool IsValidNumberingSystem(const std::string& value) {
std::set<std::string> invalid_values = {"native", "traditio", "finance"};
if (invalid_values.find(value) != invalid_values.end()) return false;
@@ -1527,7 +1558,7 @@ std::map<std::string, std::string> LookupAndValidateUnicodeExtensions(
bool is_valid_value = false;
// 8.h.ii.1.a If keyLocaleData contains requestedValue, then
if (strcmp("ca", bcp47_key) == 0) {
- is_valid_value = IsValidCalendar(*icu_locale, bcp47_value);
+ is_valid_value = Intl::IsValidCalendar(*icu_locale, bcp47_value);
} else if (strcmp("co", bcp47_key) == 0) {
is_valid_value = IsValidCollation(*icu_locale, bcp47_value);
} else if (strcmp("hc", bcp47_key) == 0) {
@@ -1649,6 +1680,7 @@ Intl::ResolvedLocale Intl::ResolveLocale(
Managed<icu::UnicodeString> Intl::SetTextToBreakIterator(
Isolate* isolate, Handle<String> text, icu::BreakIterator* break_iterator) {
+ text = String::Flatten(isolate, text);
icu::UnicodeString* u_text =
(icu::UnicodeString*)(Intl::ToICUUnicodeString(isolate, text).clone());
@@ -1858,6 +1890,29 @@ Maybe<Intl::MatcherOption> Intl::GetLocaleMatcher(Isolate* isolate,
Intl::MatcherOption::kLookup);
}
+Maybe<bool> Intl::GetNumberingSystem(Isolate* isolate,
+ Handle<JSReceiver> options,
+ const char* method,
+ std::unique_ptr<char[]>* result) {
+ const std::vector<const char*> empty_values = {};
+ Maybe<bool> maybe = Intl::GetStringOption(isolate, options, "numberingSystem",
+ empty_values, method, result);
+ MAYBE_RETURN(maybe, Nothing<bool>());
+ if (maybe.FromJust() && *result != nullptr) {
+ if (!IsValidNumberingSystem(result->get())) {
+ THROW_NEW_ERROR_RETURN_VALUE(
+ isolate,
+ NewRangeError(
+ MessageTemplate::kInvalid,
+ isolate->factory()->numberingSystem_string(),
+ isolate->factory()->NewStringFromAsciiChecked(result->get())),
+ Nothing<bool>());
+ }
+ return Just(true);
+ }
+ return Just(false);
+}
+
Intl::HourCycle Intl::ToHourCycle(const std::string& hc) {
if (hc == "h11") return Intl::HourCycle::kH11;
if (hc == "h12") return Intl::HourCycle::kH12;
@@ -1928,11 +1983,27 @@ Handle<String> Intl::NumberFieldToType(Isolate* isolate,
UNREACHABLE();
return Handle<String>();
+ case UNUM_COMPACT_FIELD:
+ return isolate->factory()->compact_string();
+ case UNUM_MEASURE_UNIT_FIELD:
+ return isolate->factory()->unit_string();
+
default:
UNREACHABLE();
return Handle<String>();
}
}
+// A helper function to convert the FormattedValue for several Intl objects.
+MaybeHandle<String> Intl::FormattedToString(
+ Isolate* isolate, const icu::FormattedValue& formatted) {
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString result = formatted.toString(status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String);
+ }
+ return Intl::ToString(isolate, result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index 5adb6fa2c8..1274fa0549 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -14,9 +14,9 @@
#include <string>
#include "src/base/timezone-cache.h"
-#include "src/contexts.h"
-#include "src/objects.h"
+#include "src/objects/contexts.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/locid.h"
#include "unicode/uversion.h"
@@ -25,10 +25,10 @@
namespace U_ICU_NAMESPACE {
class BreakIterator;
class Collator;
-class DecimalFormat;
+class FormattedValue;
class SimpleDateFormat;
class UnicodeString;
-}
+} // namespace U_ICU_NAMESPACE
namespace v8 {
namespace internal {
@@ -171,9 +171,16 @@ class Intl {
Handle<Object> options);
// ecma402/#sec-setnfdigitoptions
- V8_WARN_UNUSED_RESULT static Maybe<bool> SetNumberFormatDigitOptions(
- Isolate* isolate, icu::DecimalFormat* number_format,
- Handle<JSReceiver> options, int mnfd_default, int mxfd_default);
+ struct NumberFormatDigitOptions {
+ int minimum_integer_digits;
+ int minimum_fraction_digits;
+ int maximum_fraction_digits;
+ int minimum_significant_digits;
+ int maximum_significant_digits;
+ };
+ V8_WARN_UNUSED_RESULT static Maybe<NumberFormatDigitOptions>
+ SetNumberFormatDigitOptions(Isolate* isolate, Handle<JSReceiver> options,
+ int mnfd_default, int mxfd_default);
static icu::Locale CreateICULocale(const std::string& bcp47_locale);
@@ -186,6 +193,10 @@ class Intl {
Isolate* isolate, const icu::UnicodeString& string, int32_t begin,
int32_t end);
+ // Helper function to convert a FormattedValue to String
+ V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormattedToString(
+ Isolate* isolate, const icu::FormattedValue& formatted);
+
// Helper function to convert number field id to type string.
static Handle<String> NumberFieldToType(Isolate* isolate,
Handle<Object> numeric_obj,
@@ -244,6 +255,15 @@ class Intl {
V8_WARN_UNUSED_RESULT static Maybe<MatcherOption> GetLocaleMatcher(
Isolate* isolate, Handle<JSReceiver> options, const char* method);
+ // Shared function to read the "numberingSystem" option.
+ V8_WARN_UNUSED_RESULT static Maybe<bool> GetNumberingSystem(
+ Isolate* isolate, Handle<JSReceiver> options, const char* method,
+ std::unique_ptr<char[]>* result);
+
+ // Check the calendar is valid or not for that locale.
+ static bool IsValidCalendar(const icu::Locale& locale,
+ const std::string& value);
+
struct ResolvedLocale {
std::string locale;
icu::Locale icu_locale;
diff --git a/deps/v8/src/objects/intl-objects.tq b/deps/v8/src/objects/intl-objects.tq
new file mode 100644
index 0000000000..67d8537feb
--- /dev/null
+++ b/deps/v8/src/objects/intl-objects.tq
@@ -0,0 +1,64 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include 'src/objects/js-number-format.h'
+#include 'src/objects/js-objects.h'
+#include 'src/objects/js-plural-rules.h'
+#include 'src/objects/js-relative-time-format.h'
+#include 'src/objects/js-date-time-format.h'
+#include 'src/objects/js-list-format.h'
+#include 'src/objects/js-locale.h'
+#include 'src/objects/js-segment-iterator.h'
+#include 'src/objects/js-segmenter.h'
+
+extern class JSDateTimeFormat extends JSObject {
+ icu_locale: Foreign; // Managed<icu::Locale>
+ icu_simple_date_format: Foreign; // Managed<icu::SimpleDateFormat>
+ icu_date_interval_format: Foreign; // Managed<icu::DateIntervalFormat>
+ bound_format: JSFunction | Undefined;
+ flags: Smi;
+}
+
+extern class JSListFormat extends JSObject {
+ locale: String;
+ icu_formatter: Foreign; // Managed<icu::ListFormatter>
+ flags: Smi;
+}
+
+extern class JSNumberFormat extends JSObject {
+ locale: String;
+ icu_number_formatter:
+ Foreign; // Managed<icu::number::LocalizedNumberFormatter>
+ bound_format: JSFunction | Undefined;
+ flags: Smi;
+}
+
+extern class JSPluralRules extends JSObject {
+ locale: String;
+ flags: Smi;
+ icu_plural_rules: Foreign; // Managed<icu::PluralRules>
+ icu_decimal_format: Foreign; // Managed<icu::DecimalFormat>
+}
+
+extern class JSRelativeTimeFormat extends JSObject {
+ locale: String;
+ icu_formatter: Foreign; // Managed<icu::RelativeDateTimeFormatter>
+ flags: Smi;
+}
+
+extern class JSLocale extends JSObject {
+ icu_locale: Foreign; // Managed<icu::Locale>
+}
+
+extern class JSSegmenter extends JSObject {
+ locale: String;
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ flags: Smi;
+}
+
+extern class JSSegmentIterator extends JSObject {
+ icu_break_iterator: Foreign; // Managed<icu::BreakIterator>
+ unicode_string: Foreign; // Managed<icu::UnicodeString>
+ flags: Smi;
+}
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 39677093c2..061fec10f7 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -8,8 +8,8 @@
#include "src/objects/js-array-buffer.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/js-objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-engine.h"
// Has to be the last include (doesn't have include guards):
@@ -29,21 +29,19 @@ CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSDataView)
size_t JSArrayBuffer::byte_length() const {
- return READ_UINTPTR_FIELD(*this, kByteLengthOffset);
+ return ReadField<size_t>(kByteLengthOffset);
}
void JSArrayBuffer::set_byte_length(size_t value) {
- WRITE_UINTPTR_FIELD(*this, kByteLengthOffset, value);
+ WriteField<size_t>(kByteLengthOffset, value);
}
void* JSArrayBuffer::backing_store() const {
- intptr_t ptr = READ_INTPTR_FIELD(*this, kBackingStoreOffset);
- return reinterpret_cast<void*>(ptr);
+ return reinterpret_cast<void*>(ReadField<Address>(kBackingStoreOffset));
}
void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
- intptr_t ptr = reinterpret_cast<intptr_t>(value);
- WRITE_INTPTR_FIELD(*this, kBackingStoreOffset, ptr);
+ WriteField<Address>(kBackingStoreOffset, reinterpret_cast<Address>(value));
}
size_t JSArrayBuffer::allocation_length() const {
@@ -93,11 +91,11 @@ void JSArrayBuffer::clear_padding() {
}
void JSArrayBuffer::set_bit_field(uint32_t bits) {
- WRITE_UINT32_FIELD(*this, kBitFieldOffset, bits);
+ WriteField<uint32_t>(kBitFieldOffset, bits);
}
uint32_t JSArrayBuffer::bit_field() const {
- return READ_UINT32_FIELD(*this, kBitFieldOffset);
+ return ReadField<uint32_t>(kBitFieldOffset);
}
// |bit_field| fields.
@@ -111,49 +109,58 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
JSArrayBuffer::IsSharedBit)
size_t JSArrayBufferView::byte_offset() const {
- return READ_UINTPTR_FIELD(*this, kByteOffsetOffset);
+ return ReadField<size_t>(kByteOffsetOffset);
}
void JSArrayBufferView::set_byte_offset(size_t value) {
- WRITE_UINTPTR_FIELD(*this, kByteOffsetOffset, value);
+ WriteField<size_t>(kByteOffsetOffset, value);
}
size_t JSArrayBufferView::byte_length() const {
- return READ_UINTPTR_FIELD(*this, kByteLengthOffset);
+ return ReadField<size_t>(kByteLengthOffset);
}
void JSArrayBufferView::set_byte_length(size_t value) {
- WRITE_UINTPTR_FIELD(*this, kByteLengthOffset, value);
+ WriteField<size_t>(kByteLengthOffset, value);
}
ACCESSORS(JSArrayBufferView, buffer, Object, kBufferOffset)
bool JSArrayBufferView::WasDetached() const {
- return JSArrayBuffer::cast(buffer())->was_detached();
+ return JSArrayBuffer::cast(buffer()).was_detached();
}
-Object JSTypedArray::length() const { return READ_FIELD(*this, kLengthOffset); }
+size_t JSTypedArray::length() const { return ReadField<size_t>(kLengthOffset); }
-size_t JSTypedArray::length_value() const {
- double val = length()->Number();
- DCHECK_LE(val, kMaxSafeInteger); // 2^53-1
- DCHECK_GE(val, -kMaxSafeInteger); // -2^53+1
- DCHECK_LE(val, std::numeric_limits<size_t>::max());
- DCHECK_GE(val, std::numeric_limits<size_t>::min());
- return static_cast<size_t>(val);
+void JSTypedArray::set_length(size_t value) {
+ WriteField<size_t>(kLengthOffset, value);
}
-void JSTypedArray::set_length(Object value, WriteBarrierMode mode) {
- WRITE_FIELD(*this, kLengthOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kLengthOffset, value, mode);
+void* JSTypedArray::external_pointer() const {
+ return reinterpret_cast<void*>(ReadField<Address>(kExternalPointerOffset));
+}
+
+void JSTypedArray::set_external_pointer(void* value) {
+ WriteField<Address>(kExternalPointerOffset, reinterpret_cast<Address>(value));
+}
+
+ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
+
+void* JSTypedArray::DataPtr() {
+ return reinterpret_cast<void*>(
+ base_pointer().ptr() + reinterpret_cast<intptr_t>(external_pointer()));
}
bool JSTypedArray::is_on_heap() const {
DisallowHeapAllocation no_gc;
// Checking that buffer()->backing_store() is not nullptr is not sufficient;
// it will be nullptr when byte_length is 0 as well.
- FixedTypedArrayBase fta = FixedTypedArrayBase::cast(elements());
- return fta->base_pointer()->ptr() == fta.ptr();
+ return base_pointer().ptr() == elements().ptr();
+}
+
+// static
+void* JSTypedArray::ExternalPointerForOnHeapArray() {
+ return reinterpret_cast<void*>(ByteArray::kHeaderSize - kHeapObjectTag);
}
// static
@@ -178,9 +185,13 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
return array;
}
-#ifdef VERIFY_HEAP
-ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
-#endif
+void* JSDataView::data_pointer() const {
+ return reinterpret_cast<void*>(ReadField<Address>(kDataPointerOffset));
+}
+
+void JSDataView::set_data_pointer(void* value) {
+ WriteField<Address>(kDataPointerOffset, reinterpret_cast<Address>(value));
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index f96ae7e752..a506920f95 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -5,8 +5,8 @@
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-buffer-inl.h"
-#include "src/counters.h"
-#include "src/property-descriptor.h"
+#include "src/logging/counters.h"
+#include "src/objects/property-descriptor.h"
namespace v8 {
namespace internal {
@@ -69,7 +69,7 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
if (allocation.is_wasm_memory) {
wasm::WasmMemoryTracker* memory_tracker =
isolate->wasm_engine()->memory_tracker();
- memory_tracker->FreeMemoryIfIsWasmMemory(isolate, allocation.backing_store);
+ memory_tracker->FreeWasmMemory(isolate, allocation.backing_store);
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length);
@@ -150,10 +150,7 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Isolate* isolate = typed_array->GetIsolate();
- DCHECK(IsFixedTypedArrayElementsKind(typed_array->GetElementsKind()));
-
- Handle<FixedTypedArrayBase> fixed_typed_array(
- FixedTypedArrayBase::cast(typed_array->elements()), isolate);
+ DCHECK(IsTypedArrayElementsKind(typed_array->GetElementsKind()));
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
isolate);
@@ -162,14 +159,13 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
void* backing_store =
isolate->array_buffer_allocator()->AllocateUninitialized(
- fixed_typed_array->DataSize());
+ typed_array->byte_length());
if (backing_store == nullptr) {
isolate->heap()->FatalProcessOutOfMemory(
"JSTypedArray::MaterializeArrayBuffer");
}
buffer->set_is_external(false);
- DCHECK_EQ(buffer->byte_length(),
- static_cast<uintptr_t>(fixed_typed_array->DataSize()));
+ DCHECK_EQ(buffer->byte_length(), typed_array->byte_length());
// Initialize backing store at last to avoid handling of |JSArrayBuffers| that
// are currently being constructed in the |ArrayBufferTracker|. The
// registration method below handles the case of registering a buffer that has
@@ -177,14 +173,12 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
buffer->set_backing_store(backing_store);
// RegisterNewArrayBuffer expects a valid length for adjusting counters.
isolate->heap()->RegisterNewArrayBuffer(*buffer);
- memcpy(buffer->backing_store(), fixed_typed_array->DataPtr(),
- fixed_typed_array->DataSize());
- Handle<FixedTypedArrayBase> new_elements =
- isolate->factory()->NewFixedTypedArrayWithExternalPointer(
- fixed_typed_array->length(), typed_array->type(),
- static_cast<uint8_t*>(buffer->backing_store()));
-
- typed_array->set_elements(*new_elements);
+ memcpy(buffer->backing_store(), typed_array->DataPtr(),
+ typed_array->byte_length());
+
+ typed_array->set_elements(ReadOnlyRoots(isolate).empty_byte_array());
+ typed_array->set_external_pointer(backing_store);
+ typed_array->set_base_pointer(Smi::kZero);
DCHECK(!typed_array->is_on_heap());
return buffer;
@@ -226,7 +220,7 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
}
// 3b iv. Let length be O.[[ArrayLength]].
- size_t length = o->length_value();
+ size_t length = o->length();
// 3b v. If numericIndex ≥ length, return false.
if (o->WasDetached() || index >= length) {
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
@@ -271,13 +265,13 @@ Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
}
ExternalArrayType JSTypedArray::type() {
- switch (elements()->map()->instance_type()) {
-#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
+ switch (map().elements_kind()) {
+#define ELEMENTS_KIND_TO_ARRAY_TYPE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return kExternal##Type##Array;
- TYPED_ARRAYS(INSTANCE_TYPE_TO_ARRAY_TYPE)
-#undef INSTANCE_TYPE_TO_ARRAY_TYPE
+ TYPED_ARRAYS(ELEMENTS_KIND_TO_ARRAY_TYPE)
+#undef ELEMENTS_KIND_TO_ARRAY_TYPE
default:
UNREACHABLE();
@@ -285,13 +279,13 @@ ExternalArrayType JSTypedArray::type() {
}
size_t JSTypedArray::element_size() {
- switch (elements()->map()->instance_type()) {
-#define INSTANCE_TYPE_TO_ELEMENT_SIZE(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
+ switch (map().elements_kind()) {
+#define ELEMENTS_KIND_TO_ELEMENT_SIZE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
return sizeof(ctype);
- TYPED_ARRAYS(INSTANCE_TYPE_TO_ELEMENT_SIZE)
-#undef INSTANCE_TYPE_TO_ELEMENT_SIZE
+ TYPED_ARRAYS(ELEMENTS_KIND_TO_ELEMENT_SIZE)
+#undef ELEMENTS_KIND_TO_ELEMENT_SIZE
default:
UNREACHABLE();
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index b77d1c9877..b22001f04a 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -172,16 +172,26 @@ class JSArrayBufferView : public JSObject {
JS_ARRAY_BUFFER_VIEW_FIELDS)
#undef JS_ARRAY_BUFFER_VIEW_FIELDS
- class BodyDescriptor;
+ STATIC_ASSERT(IsAligned(kByteOffsetOffset, kUIntptrSize));
+ STATIC_ASSERT(IsAligned(kByteLengthOffset, kUIntptrSize));
OBJECT_CONSTRUCTORS(JSArrayBufferView, JSObject);
};
class JSTypedArray : public JSArrayBufferView {
public:
+ // TODO(v8:4153): This should be equal to JSArrayBuffer::kMaxByteLength
+ // eventually.
+ static constexpr size_t kMaxLength = v8::TypedArray::kMaxLength;
+
// [length]: length of typed array in elements.
- DECL_ACCESSORS(length, Object)
- inline size_t length_value() const;
+ DECL_PRIMITIVE_ACCESSORS(length, size_t)
+
+ // [external_pointer]: TODO(v8:4153)
+ DECL_PRIMITIVE_ACCESSORS(external_pointer, void*)
+
+ // [base_pointer]: TODO(v8:4153)
+ DECL_ACCESSORS(base_pointer, Object)
// ES6 9.4.5.3
V8_WARN_UNUSED_RESULT static Maybe<bool> DefineOwnProperty(
@@ -195,9 +205,14 @@ class JSTypedArray : public JSArrayBufferView {
V8_EXPORT_PRIVATE Handle<JSArrayBuffer> GetBuffer();
+ // Use with care: returns raw pointer into heap.
+ inline void* DataPtr();
+
// Whether the buffer's backing store is on-heap or off-heap.
inline bool is_on_heap() const;
+ static inline void* ExternalPointerForOnHeapArray();
+
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
const char* method_name);
@@ -207,32 +222,39 @@ class JSTypedArray : public JSArrayBufferView {
DECL_VERIFIER(JSTypedArray)
// Layout description.
-#define JS_TYPED_ARRAY_FIELDS(V) \
- /* Raw data fields. */ \
- V(kLengthOffset, kTaggedSize) \
- /* Header size. */ \
+#define JS_TYPED_ARRAY_FIELDS(V) \
+ /* Raw data fields. */ \
+ V(kLengthOffset, kUIntptrSize) \
+ V(kExternalPointerOffset, kSystemPointerSize) \
+ V(kBasePointerOffset, kTaggedSize) \
+ /* Header size. */ \
V(kHeaderSize, 0)
DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize,
JS_TYPED_ARRAY_FIELDS)
#undef JS_TYPED_ARRAY_FIELDS
+ STATIC_ASSERT(IsAligned(kLengthOffset, kUIntptrSize));
+ STATIC_ASSERT(IsAligned(kExternalPointerOffset, kSystemPointerSize));
+
static const int kSizeWithEmbedderFields =
kHeaderSize +
v8::ArrayBufferView::kEmbedderFieldCount * kEmbedderDataSlotSize;
+ class BodyDescriptor;
+
private:
static Handle<JSArrayBuffer> MaterializeArrayBuffer(
Handle<JSTypedArray> typed_array);
-#ifdef VERIFY_HEAP
- DECL_ACCESSORS(raw_length, Object)
-#endif
OBJECT_CONSTRUCTORS(JSTypedArray, JSArrayBufferView);
};
class JSDataView : public JSArrayBufferView {
public:
+ // [data_pointer]: pointer to the actual data.
+ DECL_PRIMITIVE_ACCESSORS(data_pointer, void*)
+
DECL_CAST(JSDataView)
// Dispatched behavior.
@@ -240,10 +262,24 @@ class JSDataView : public JSArrayBufferView {
DECL_VERIFIER(JSDataView)
// Layout description.
+#define JS_DATA_VIEW_FIELDS(V) \
+ /* Raw data fields. */ \
+ V(kDataPointerOffset, kIntptrSize) \
+ /* Header size. */ \
+ V(kHeaderSize, 0)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSArrayBufferView::kHeaderSize,
+ JS_DATA_VIEW_FIELDS)
+#undef JS_DATA_VIEW_FIELDS
+
+ STATIC_ASSERT(IsAligned(kDataPointerOffset, kUIntptrSize));
+
static const int kSizeWithEmbedderFields =
kHeaderSize +
v8::ArrayBufferView::kEmbedderFieldCount * kEmbedderDataSlotSize;
+ class BodyDescriptor;
+
OBJECT_CONSTRUCTORS(JSDataView, JSArrayBufferView);
};
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index 31c8735f62..335fabba86 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-array.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -33,8 +33,8 @@ bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
}
bool JSArray::AllowsSetLength() {
- bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
- DCHECK(result == !HasFixedTypedArrayElements());
+ bool result = elements().IsFixedArray() || elements().IsFixedDoubleArray();
+ DCHECK(result == !HasTypedArrayElements());
return result;
}
@@ -55,7 +55,7 @@ void JSArray::SetContent(Handle<JSArray> array,
}
bool JSArray::HasArrayPrototype(Isolate* isolate) {
- return map()->prototype() == *isolate->initial_array_prototype();
+ return map().prototype() == *isolate->initial_array_prototype();
}
ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset)
@@ -63,7 +63,7 @@ ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
IterationKind JSArrayIterator::kind() const {
return static_cast<IterationKind>(
- Smi::cast(READ_FIELD(*this, kKindOffset))->value());
+ Smi::cast(READ_FIELD(*this, kKindOffset)).value());
}
void JSArrayIterator::set_kind(IterationKind kind) {
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 23d62c810e..4bc296e31e 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -8,7 +8,7 @@
#include "src/objects/allocation-site.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-break-iterator-inl.h b/deps/v8/src/objects/js-break-iterator-inl.h
index 7c22be25f6..177d9d352b 100644
--- a/deps/v8/src/objects/js-break-iterator-inl.h
+++ b/deps/v8/src/objects/js-break-iterator-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_BREAK_ITERATOR_INL_H_
#define V8_OBJECTS_JS_BREAK_ITERATOR_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-break-iterator.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-break-iterator.cc b/deps/v8/src/objects/js-break-iterator.cc
index 4516b34aac..4879fb41a4 100644
--- a/deps/v8/src/objects/js-break-iterator.cc
+++ b/deps/v8/src/objects/js-break-iterator.cc
@@ -124,7 +124,7 @@ void JSV8BreakIterator::AdoptText(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator_holder,
Handle<String> text) {
icu::BreakIterator* break_iterator =
- break_iterator_holder->break_iterator()->raw();
+ break_iterator_holder->break_iterator().raw();
CHECK_NOT_NULL(break_iterator);
Managed<icu::UnicodeString> unicode_string =
Intl::SetTextToBreakIterator(isolate, text, break_iterator);
@@ -149,24 +149,24 @@ Handle<String> JSV8BreakIterator::TypeAsString() const {
Handle<Object> JSV8BreakIterator::Current(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
return isolate->factory()->NewNumberFromInt(
- break_iterator->break_iterator()->raw()->current());
+ break_iterator->break_iterator().raw()->current());
}
Handle<Object> JSV8BreakIterator::First(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
return isolate->factory()->NewNumberFromInt(
- break_iterator->break_iterator()->raw()->first());
+ break_iterator->break_iterator().raw()->first());
}
Handle<Object> JSV8BreakIterator::Next(
Isolate* isolate, Handle<JSV8BreakIterator> break_iterator) {
return isolate->factory()->NewNumberFromInt(
- break_iterator->break_iterator()->raw()->next());
+ break_iterator->break_iterator().raw()->next());
}
String JSV8BreakIterator::BreakType(Isolate* isolate,
Handle<JSV8BreakIterator> break_iterator) {
- int32_t status = break_iterator->break_iterator()->raw()->getRuleStatus();
+ int32_t status = break_iterator->break_iterator().raw()->getRuleStatus();
// Keep return values in sync with JavaScript BreakType enum.
if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
return ReadOnlyRoots(isolate).none_string();
diff --git a/deps/v8/src/objects/js-break-iterator.h b/deps/v8/src/objects/js-break-iterator.h
index 3eff347485..fe94c177c4 100644
--- a/deps/v8/src/objects/js-break-iterator.h
+++ b/deps/v8/src/objects/js-break-iterator.h
@@ -12,9 +12,9 @@
#include <set>
#include <string>
-#include "src/objects.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-collator-inl.h b/deps/v8/src/objects/js-collator-inl.h
index b6fa239c31..e82351993d 100644
--- a/deps/v8/src/objects/js-collator-inl.h
+++ b/deps/v8/src/objects/js-collator-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_COLLATOR_INL_H_
#define V8_OBJECTS_JS_COLLATOR_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-collator.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-collator.cc b/deps/v8/src/objects/js-collator.cc
index b75468c6f3..4a1e857403 100644
--- a/deps/v8/src/objects/js-collator.cc
+++ b/deps/v8/src/objects/js-collator.cc
@@ -8,9 +8,9 @@
#include "src/objects/js-collator.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
#include "src/objects/js-collator-inl.h"
+#include "src/objects/objects-inl.h"
#include "unicode/coll.h"
#include "unicode/locid.h"
#include "unicode/strenum.h"
@@ -68,7 +68,7 @@ Handle<JSObject> JSCollator::ResolvedOptions(Isolate* isolate,
Handle<JSObject> options =
isolate->factory()->NewJSObject(isolate->object_function());
- icu::Collator* icu_collator = collator->icu_collator()->raw();
+ icu::Collator* icu_collator = collator->icu_collator().raw();
CHECK_NOT_NULL(icu_collator);
UErrorCode status = U_ZERO_ERROR;
diff --git a/deps/v8/src/objects/js-collator.h b/deps/v8/src/objects/js-collator.h
index e5d223aa24..2bedbf811a 100644
--- a/deps/v8/src/objects/js-collator.h
+++ b/deps/v8/src/objects/js-collator.h
@@ -12,12 +12,12 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-collection-inl.h b/deps/v8/src/objects/js-collection-inl.h
index 78b6cc5db3..fb0cf1652e 100644
--- a/deps/v8/src/objects/js-collection-inl.h
+++ b/deps/v8/src/objects/js-collection-inl.h
@@ -8,10 +8,10 @@
#include "src/objects/js-collection.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -63,8 +63,8 @@ CAST_ACCESSOR(JSWeakSet)
Object JSMapIterator::CurrentValue() {
OrderedHashMap table = OrderedHashMap::cast(this->table());
int index = Smi::ToInt(this->index());
- Object value = table->ValueAt(index);
- DCHECK(!value->IsTheHole());
+ Object value = table.ValueAt(index);
+ DCHECK(!value.IsTheHole());
return value;
}
diff --git a/deps/v8/src/objects/js-collection-iterator.h b/deps/v8/src/objects/js-collection-iterator.h
index f25753738b..4952f04a72 100644
--- a/deps/v8/src/objects/js-collection-iterator.h
+++ b/deps/v8/src/objects/js-collection-iterator.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_JS_COLLECTION_ITERATOR_H_
#define V8_OBJECTS_JS_COLLECTION_ITERATOR_H_
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
#include "src/objects/js-objects.h"
+#include "src/objects/objects.h"
#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/js-collection.h b/deps/v8/src/objects/js-collection.h
index 0450de8fb1..6dfde352ca 100644
--- a/deps/v8/src/objects/js-collection.h
+++ b/deps/v8/src/objects/js-collection.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_JS_COLLECTION_H_
#define V8_OBJECTS_JS_COLLECTION_H_
-#include "src/objects.h"
#include "src/objects/js-collection-iterator.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -44,6 +44,8 @@ class JSSet : public JSCollection {
// Dispatched behavior.
DECL_PRINTER(JSSet)
DECL_VERIFIER(JSSet)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSCollection::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_SET_FIELDS)
OBJECT_CONSTRUCTORS(JSSet, JSCollection);
};
@@ -72,6 +74,8 @@ class JSMap : public JSCollection {
// Dispatched behavior.
DECL_PRINTER(JSMap)
DECL_VERIFIER(JSMap)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSCollection::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_MAP_FIELDS)
OBJECT_CONSTRUCTORS(JSMap, JSCollection);
};
@@ -121,6 +125,8 @@ class JSWeakCollection : public JSObject {
// Visit the whole object.
using BodyDescriptor = BodyDescriptorImpl;
+ static const int kSizeOfAllWeakCollections = kHeaderSize;
+
OBJECT_CONSTRUCTORS(JSWeakCollection, JSObject);
};
@@ -133,6 +139,9 @@ class JSWeakMap : public JSWeakCollection {
DECL_PRINTER(JSWeakMap)
DECL_VERIFIER(JSWeakMap)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSWeakCollection::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_MAP_FIELDS)
+ STATIC_ASSERT(kSize == kSizeOfAllWeakCollections);
OBJECT_CONSTRUCTORS(JSWeakMap, JSWeakCollection);
};
@@ -144,6 +153,9 @@ class JSWeakSet : public JSWeakCollection {
// Dispatched behavior.
DECL_PRINTER(JSWeakSet)
DECL_VERIFIER(JSWeakSet)
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSWeakCollection::kHeaderSize,
+ TORQUE_GENERATED_JSWEAK_SET_FIELDS)
+ STATIC_ASSERT(kSize == kSizeOfAllWeakCollections);
OBJECT_CONSTRUCTORS(JSWeakSet, JSWeakCollection);
};
diff --git a/deps/v8/src/objects/js-date-time-format-inl.h b/deps/v8/src/objects/js-date-time-format-inl.h
index 1657241b07..8947ea7b4c 100644
--- a/deps/v8/src/objects/js-date-time-format-inl.h
+++ b/deps/v8/src/objects/js-date-time-format-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_DATE_TIME_FORMAT_INL_H_
#define V8_OBJECTS_JS_DATE_TIME_FORMAT_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-date-time-format.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -20,11 +20,11 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSDateTimeFormat, JSObject)
-ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kICULocaleOffset)
+ACCESSORS(JSDateTimeFormat, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
ACCESSORS(JSDateTimeFormat, icu_simple_date_format,
- Managed<icu::SimpleDateFormat>, kICUSimpleDateFormatOffset)
+ Managed<icu::SimpleDateFormat>, kIcuSimpleDateFormatOffset)
ACCESSORS(JSDateTimeFormat, icu_date_interval_format,
- Managed<icu::DateIntervalFormat>, kICUDateIntervalFormatOffset)
+ Managed<icu::DateIntervalFormat>, kIcuDateIntervalFormatOffset)
ACCESSORS(JSDateTimeFormat, bound_format, Object, kBoundFormatOffset)
SMI_ACCESSORS(JSDateTimeFormat, flags, kFlagsOffset)
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index eda95f8773..8730e0a39b 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -12,9 +12,9 @@
#include <string>
#include <vector>
-#include "src/date.h"
+#include "src/date/date.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-date-time-format-inl.h"
@@ -360,8 +360,8 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
Handle<Object> resolved_obj;
CHECK(!date_time_format->icu_locale().is_null());
- CHECK_NOT_NULL(date_time_format->icu_locale()->raw());
- icu::Locale* icu_locale = date_time_format->icu_locale()->raw();
+ CHECK_NOT_NULL(date_time_format->icu_locale().raw());
+ icu::Locale* icu_locale = date_time_format->icu_locale().raw();
Maybe<std::string> maybe_locale_str = Intl::ToLanguageTag(*icu_locale);
MAYBE_RETURN(maybe_locale_str, MaybeHandle<JSObject>());
std::string locale_str = maybe_locale_str.FromJust();
@@ -369,7 +369,7 @@ MaybeHandle<JSObject> JSDateTimeFormat::ResolvedOptions(
factory->NewStringFromAsciiChecked(locale_str.c_str());
icu::SimpleDateFormat* icu_simple_date_format =
- date_time_format->icu_simple_date_format()->raw();
+ date_time_format->icu_simple_date_format().raw();
// calendar
const icu::Calendar* calendar = icu_simple_date_format->getCalendar();
// getType() returns legacy calendar type name instead of LDML/BCP47 calendar
@@ -580,7 +580,7 @@ MaybeHandle<String> JSDateTimeFormat::DateTimeFormat(
}
// 5. Return FormatDateTime(dtf, x).
icu::SimpleDateFormat* format =
- date_time_format->icu_simple_date_format()->raw();
+ date_time_format->icu_simple_date_format().raw();
return FormatDateTime(isolate, *format, x);
}
@@ -612,7 +612,7 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
String);
}
- double const x = Handle<JSDate>::cast(date)->value()->Number();
+ double const x = Handle<JSDate>::cast(date)->value().Number();
// 2. If x is NaN, return "Invalid Date"
if (std::isnan(x)) {
return factory->Invalid_Date_string();
@@ -640,9 +640,8 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
// 4. Let dateFormat be ? Construct(%DateTimeFormat%, « locales, options »).
Handle<JSFunction> constructor = Handle<JSFunction>(
- JSFunction::cast(isolate->context()
- ->native_context()
- ->intl_date_time_format_function()),
+ JSFunction::cast(
+ isolate->context().native_context().intl_date_time_format_function()),
isolate);
Handle<JSObject> obj;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -658,12 +657,12 @@ MaybeHandle<String> JSDateTimeFormat::ToLocaleDateTime(
if (can_cache) {
isolate->set_icu_object_in_cache(
- cache_type, std::static_pointer_cast<icu::UObject>(
- date_time_format->icu_simple_date_format()->get()));
+ cache_type, std::static_pointer_cast<icu::UMemory>(
+ date_time_format->icu_simple_date_format().get()));
}
// 5. Return FormatDateTime(dateFormat, x).
icu::SimpleDateFormat* format =
- date_time_format->icu_simple_date_format()->raw();
+ date_time_format->icu_simple_date_format().raw();
return FormatDateTime(isolate, *format, x);
}
@@ -779,7 +778,7 @@ MaybeHandle<JSObject> JSDateTimeFormat::ToDateTimeOptions(
MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::UnwrapDateTimeFormat(
Isolate* isolate, Handle<JSReceiver> format_holder) {
Handle<Context> native_context =
- Handle<Context>(isolate->context()->native_context(), isolate);
+ Handle<Context>(isolate->context().native_context(), isolate);
Handle<JSFunction> constructor = Handle<JSFunction>(
JSFunction::cast(native_context->intl_date_time_format_function()),
isolate);
@@ -959,14 +958,41 @@ std::unique_ptr<icu::SimpleDateFormat> CreateICUDateFormatFromCache(
cache.Pointer()->Create(icu_locale, skeleton, generator));
}
-std::unique_ptr<icu::DateIntervalFormat> CreateICUDateIntervalFormat(
- const icu::Locale& icu_locale, const icu::UnicodeString& skeleton) {
+icu::UnicodeString SkeletonFromDateFormat(
+ const icu::SimpleDateFormat& icu_date_format) {
+ icu::UnicodeString pattern;
+ pattern = icu_date_format.toPattern(pattern);
+
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString skeleton =
+ icu::DateTimePatternGenerator::staticGetSkeleton(pattern, status);
+ CHECK(U_SUCCESS(status));
+ return skeleton;
+}
+
+icu::DateIntervalFormat* LazyCreateDateIntervalFormat(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format) {
+ Managed<icu::DateIntervalFormat> managed_format =
+ date_time_format->icu_date_interval_format();
+ if (managed_format.get()) {
+ return managed_format.raw();
+ }
+ icu::SimpleDateFormat* icu_simple_date_format =
+ date_time_format->icu_simple_date_format().raw();
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::DateIntervalFormat> date_interval_format(
- icu::DateIntervalFormat::createInstance(skeleton, icu_locale, status));
- if (U_FAILURE(status)) return std::unique_ptr<icu::DateIntervalFormat>();
- CHECK_NOT_NULL(date_interval_format.get());
- return date_interval_format;
+ icu::DateIntervalFormat::createInstance(
+ SkeletonFromDateFormat(*icu_simple_date_format),
+ *(date_time_format->icu_locale().raw()), status));
+ if (U_FAILURE(status)) {
+ return nullptr;
+ }
+ date_interval_format->setTimeZone(icu_simple_date_format->getTimeZone());
+ Handle<Managed<icu::DateIntervalFormat>> managed_interval_format =
+ Managed<icu::DateIntervalFormat>::FromUniquePtr(
+ isolate, 0, std::move(date_interval_format));
+ date_time_format->set_icu_date_interval_format(*managed_interval_format);
+ return (*managed_interval_format).raw();
}
Intl::HourCycle HourCycleFromPattern(const icu::UnicodeString pattern) {
@@ -1103,18 +1129,6 @@ std::unique_ptr<icu::SimpleDateFormat> DateTimeStylePattern(
generator);
}
-icu::UnicodeString SkeletonFromDateFormat(
- const icu::SimpleDateFormat& icu_date_format) {
- icu::UnicodeString pattern;
- pattern = icu_date_format.toPattern(pattern);
-
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString skeleton =
- icu::DateTimePatternGenerator::staticGetSkeleton(pattern, status);
- CHECK(U_SUCCESS(status));
- return skeleton;
-}
-
class DateTimePatternGeneratorCache {
public:
// Return a clone copy that the caller have to free.
@@ -1146,6 +1160,7 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format,
Handle<Object> locales, Handle<Object> input_options) {
date_time_format->set_flags(0);
+ Factory* factory = isolate->factory();
// 1. Let requestedLocales be ? CanonicalizeLocaleList(locales).
Maybe<std::vector<std::string>> maybe_requested_locales =
Intl::CanonicalizeLocaleList(isolate, locales);
@@ -1163,6 +1178,36 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// 4. Let matcher be ? GetOption(options, "localeMatcher", "string",
// « "lookup", "best fit" », "best fit").
// 5. Set opt.[[localeMatcher]] to matcher.
+
+ std::unique_ptr<char[]> calendar_str = nullptr;
+ std::unique_ptr<char[]> numbering_system_str = nullptr;
+ if (FLAG_harmony_intl_add_calendar_numbering_system) {
+ const std::vector<const char*> empty_values = {};
+ // 6. Let calendar be ? GetOption(options, "calendar",
+ // "string", undefined, undefined).
+ Maybe<bool> maybe_calendar =
+ Intl::GetStringOption(isolate, options, "calendar", empty_values,
+ "Intl.NumberFormat", &calendar_str);
+ MAYBE_RETURN(maybe_calendar, MaybeHandle<JSDateTimeFormat>());
+ if (maybe_calendar.FromJust() && calendar_str != nullptr) {
+ icu::Locale default_locale;
+ if (!Intl::IsValidCalendar(default_locale, calendar_str.get())) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(
+ MessageTemplate::kInvalid, factory->calendar_string(),
+ factory->NewStringFromAsciiChecked(calendar_str.get())),
+ JSDateTimeFormat);
+ }
+ }
+
+ // 8. Let numberingSystem be ? GetOption(options, "numberingSystem",
+ // "string", undefined, undefined).
+ Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
+ isolate, options, "Intl.NumberFormat", &numbering_system_str);
+ MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSDateTimeFormat>());
+ }
+
Maybe<Intl::MatcherOption> maybe_locale_matcher =
Intl::GetLocaleMatcher(isolate, options, "Intl.DateTimeFormat");
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSDateTimeFormat>());
@@ -1206,6 +1251,17 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
icu::Locale icu_locale = r.icu_locale;
DCHECK(!icu_locale.isBogus());
+ UErrorCode status = U_ZERO_ERROR;
+ if (calendar_str != nullptr) {
+ icu_locale.setUnicodeKeywordValue("ca", calendar_str.get(), status);
+ CHECK(U_SUCCESS(status));
+ }
+
+ if (numbering_system_str != nullptr) {
+ icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status);
+ CHECK(U_SUCCESS(status));
+ }
+
// 17. Let timeZone be ? Get(options, "timeZone").
const std::vector<const char*> empty_values;
std::unique_ptr<char[]> timezone = nullptr;
@@ -1216,11 +1272,11 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
std::unique_ptr<icu::TimeZone> tz = CreateTimeZone(isolate, timezone.get());
if (tz.get() == nullptr) {
- THROW_NEW_ERROR(isolate,
- NewRangeError(MessageTemplate::kInvalidTimeZone,
- isolate->factory()->NewStringFromAsciiChecked(
- timezone.get())),
- JSDateTimeFormat);
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidTimeZone,
+ factory->NewStringFromAsciiChecked(timezone.get())),
+ JSDateTimeFormat);
}
std::unique_ptr<icu::Calendar> calendar(
@@ -1229,11 +1285,11 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
// 18.b If the result of IsValidTimeZoneName(timeZone) is false, then
// i. Throw a RangeError exception.
if (calendar.get() == nullptr) {
- THROW_NEW_ERROR(isolate,
- NewRangeError(MessageTemplate::kInvalidTimeZone,
- isolate->factory()->NewStringFromAsciiChecked(
- timezone.get())),
- JSDateTimeFormat);
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidTimeZone,
+ factory->NewStringFromAsciiChecked(timezone.get())),
+ JSDateTimeFormat);
}
static base::LazyInstance<DateTimePatternGeneratorCache>::type
@@ -1243,7 +1299,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
generator_cache.Pointer()->CreateGenerator(icu_locale));
// 15.Let hcDefault be dataLocaleData.[[hourCycle]].
- UErrorCode status = U_ZERO_ERROR;
icu::UnicodeString hour_pattern = generator->getBestPattern("jjmm", status);
CHECK(U_SUCCESS(status));
Intl::HourCycle hc_default = HourCycleFromPattern(hour_pattern);
@@ -1297,7 +1352,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
DateTimeStyle date_style = DateTimeStyle::kUndefined;
DateTimeStyle time_style = DateTimeStyle::kUndefined;
std::unique_ptr<icu::SimpleDateFormat> icu_date_format;
- std::unique_ptr<icu::DateIntervalFormat> icu_date_interval_format;
if (FLAG_harmony_intl_datetime_style) {
// 28. Let dateStyle be ? GetOption(options, "dateStyle", "string", «
@@ -1340,10 +1394,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
time_style != DateTimeStyle::kUndefined) {
icu_date_format = DateTimeStylePattern(date_style, time_style, icu_locale,
hc, *generator);
- if (FLAG_harmony_intl_date_format_range) {
- icu_date_interval_format = CreateICUDateIntervalFormat(
- icu_locale, SkeletonFromDateFormat(*icu_date_format));
- }
}
}
// 33. Else,
@@ -1397,10 +1447,6 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
FATAL("Failed to create ICU date format, are ICU data files missing?");
}
}
- if (FLAG_harmony_intl_date_format_range) {
- icu_date_interval_format =
- CreateICUDateIntervalFormat(icu_locale, skeleton_ustr);
- }
// g. If dateTimeFormat.[[Hour]] is not undefined, then
if (!has_hour_option) {
@@ -1449,12 +1495,10 @@ MaybeHandle<JSDateTimeFormat> JSDateTimeFormat::Initialize(
Managed<icu::SimpleDateFormat>::FromUniquePtr(isolate, 0,
std::move(icu_date_format));
date_time_format->set_icu_simple_date_format(*managed_format);
- if (FLAG_harmony_intl_date_format_range) {
- Handle<Managed<icu::DateIntervalFormat>> managed_interval_format =
- Managed<icu::DateIntervalFormat>::FromUniquePtr(
- isolate, 0, std::move(icu_date_interval_format));
- date_time_format->set_icu_date_interval_format(*managed_interval_format);
- }
+
+ Handle<Managed<icu::DateIntervalFormat>> managed_interval_format =
+ Managed<icu::DateIntervalFormat>::FromRawPtr(isolate, 0, nullptr);
+ date_time_format->set_icu_date_interval_format(*managed_interval_format);
return date_time_format;
}
@@ -1518,7 +1562,7 @@ MaybeHandle<JSArray> JSDateTimeFormat::FormatToParts(
double date_value) {
Factory* factory = isolate->factory();
icu::SimpleDateFormat* format =
- date_time_format->icu_simple_date_format()->raw();
+ date_time_format->icu_simple_date_format().raw();
CHECK_NOT_NULL(format);
icu::UnicodeString formatted;
@@ -1591,75 +1635,176 @@ Handle<String> JSDateTimeFormat::HourCycleAsString() const {
}
}
-MaybeHandle<String> JSDateTimeFormat::FormatRange(
- Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
- double y) {
- // TODO(ftang): Merge the following with FormatRangeToParts after
- // the landing of ICU64 to make it cleaner.
+enum Source { kShared, kStartRange, kEndRange };
- // #sec-partitiondatetimerangepattern
- // 1. Let x be TimeClip(x).
- x = DateCache::TimeClip(x);
- // 2. If x is NaN, throw a RangeError exception.
- if (std::isnan(x)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
- String);
+namespace {
+
+class SourceTracker {
+ public:
+ SourceTracker() { start_[0] = start_[1] = limit_[0] = limit_[1] = 0; }
+ void Add(int32_t field, int32_t start, int32_t limit) {
+ CHECK_LT(field, 2);
+ start_[field] = start;
+ limit_[field] = limit;
}
- // 3. Let y be TimeClip(y).
- y = DateCache::TimeClip(y);
- // 4. If y is NaN, throw a RangeError exception.
- if (std::isnan(y)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
- String);
+
+ Source GetSource(int32_t start, int32_t limit) const {
+ Source source = Source::kShared;
+ if (FieldContains(0, start, limit)) {
+ source = Source::kStartRange;
+ } else if (FieldContains(1, start, limit)) {
+ source = Source::kEndRange;
+ }
+ return source;
}
- icu::DateIntervalFormat* date_interval_format =
- date_time_format->icu_date_interval_format()->raw();
- CHECK_NOT_NULL(date_interval_format);
- icu::DateInterval interval(x, y);
+ private:
+ int32_t start_[2];
+ int32_t limit_[2];
- icu::UnicodeString result;
- icu::FieldPosition fpos;
+ bool FieldContains(int32_t field, int32_t start, int32_t limit) const {
+ CHECK_LT(field, 2);
+ return (start_[field] <= start) && (start <= limit_[field]) &&
+ (start_[field] <= limit) && (limit <= limit_[field]);
+ }
+};
+
+Handle<String> SourceString(Isolate* isolate, Source source) {
+ switch (source) {
+ case Source::kShared:
+ return ReadOnlyRoots(isolate).shared_string_handle();
+ case Source::kStartRange:
+ return ReadOnlyRoots(isolate).startRange_string_handle();
+ case Source::kEndRange:
+ return ReadOnlyRoots(isolate).endRange_string_handle();
+ UNREACHABLE();
+ }
+}
+
+Maybe<bool> AddPartForFormatRange(Isolate* isolate, Handle<JSArray> array,
+ const icu::UnicodeString& string,
+ int32_t index, int32_t field, int32_t start,
+ int32_t end, const SourceTracker& tracker) {
+ Handle<String> substring;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, substring,
+ Intl::ToString(isolate, string, start, end),
+ Nothing<bool>());
+ Intl::AddElement(isolate, array, index,
+ IcuDateFieldIdToDateType(field, isolate), substring,
+ isolate->factory()->source_string(),
+ SourceString(isolate, tracker.GetSource(start, end)));
+ return Just(true);
+}
+
+// A helper function to convert the FormattedDateInterval to a
+// MaybeHandle<JSArray> for the implementation of formatRangeToParts.
+MaybeHandle<JSArray> FormattedDateIntervalToJSArray(
+ Isolate* isolate, const icu::FormattedValue& formatted) {
UErrorCode status = U_ZERO_ERROR;
- date_interval_format->format(&interval, result, fpos, status);
- CHECK(U_SUCCESS(status));
+ icu::UnicodeString result = formatted.toString(status);
- return Intl::ToString(isolate, result);
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(0);
+ icu::ConstrainedFieldPosition cfpos;
+ int index = 0;
+ int32_t previous_end_pos = 0;
+ SourceTracker tracker;
+ while (formatted.nextPosition(cfpos, status)) {
+ int32_t category = cfpos.getCategory();
+ int32_t field = cfpos.getField();
+ int32_t start = cfpos.getStart();
+ int32_t limit = cfpos.getLimit();
+
+ if (category == UFIELD_CATEGORY_DATE_INTERVAL_SPAN) {
+ CHECK_LE(field, 2);
+ tracker.Add(field, start, limit);
+ } else {
+ CHECK(category == UFIELD_CATEGORY_DATE);
+ if (start > previous_end_pos) {
+ // Add "literal" from the previous end position to the start if
+ // necessary.
+ Maybe<bool> maybe_added =
+ AddPartForFormatRange(isolate, array, result, index, -1,
+ previous_end_pos, start, tracker);
+ MAYBE_RETURN(maybe_added, Handle<JSArray>());
+ previous_end_pos = start;
+ index++;
+ }
+ Maybe<bool> maybe_added = AddPartForFormatRange(
+ isolate, array, result, index, field, start, limit, tracker);
+ MAYBE_RETURN(maybe_added, Handle<JSArray>());
+ previous_end_pos = limit;
+ ++index;
+ }
+ }
+ int32_t end = result.length();
+ // Add "literal" in the end if necessary.
+ if (end > previous_end_pos) {
+ Maybe<bool> maybe_added = AddPartForFormatRange(
+ isolate, array, result, index, -1, previous_end_pos, end, tracker);
+ MAYBE_RETURN(maybe_added, Handle<JSArray>());
+ }
+
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), JSArray);
+ }
+
+ JSObject::ValidateElements(*array);
+ return array;
}
-MaybeHandle<JSArray> JSDateTimeFormat::FormatRangeToParts(
+// The shared code between formatRange and formatRangeToParts
+template <typename T>
+MaybeHandle<T> FormatRangeCommon(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
- double y) {
- // TODO(ftang): Merge the following with FormatRangeToParts after
- // the landing of ICU64 to make it cleaner.
-
+ double y,
+ MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
// #sec-partitiondatetimerangepattern
// 1. Let x be TimeClip(x).
x = DateCache::TimeClip(x);
// 2. If x is NaN, throw a RangeError exception.
if (std::isnan(x)) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
- JSArray);
+ T);
}
// 3. Let y be TimeClip(y).
y = DateCache::TimeClip(y);
// 4. If y is NaN, throw a RangeError exception.
if (std::isnan(y)) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidTimeValue),
- JSArray);
+ T);
}
+ icu::DateInterval interval(x, y);
- icu::DateIntervalFormat* date_interval_format =
- date_time_format->icu_date_interval_format()->raw();
- CHECK_NOT_NULL(date_interval_format);
- Factory* factory = isolate->factory();
- Handle<JSArray> result = factory->NewJSArray(0);
+ icu::DateIntervalFormat* format =
+ LazyCreateDateIntervalFormat(isolate, date_time_format);
+ if (format == nullptr) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), T);
+ }
- // TODO(ftang) To be implemented after ICU64 landed that support
- // DateIntervalFormat::formatToValue() and FormattedDateInterval.
+ UErrorCode status = U_ZERO_ERROR;
+ icu::FormattedDateInterval formatted =
+ format->formatToValue(interval, status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), T);
+ }
+ return formatToResult(isolate, formatted);
+}
- JSObject::ValidateElements(*result);
- return result;
+} // namespace
+
+MaybeHandle<String> JSDateTimeFormat::FormatRange(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
+ double y) {
+ return FormatRangeCommon<String>(isolate, date_time_format, x, y,
+ Intl::FormattedToString);
+}
+
+MaybeHandle<JSArray> JSDateTimeFormat::FormatRangeToParts(
+ Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
+ double y) {
+ return FormatRangeCommon<JSArray>(isolate, date_time_format, x, y,
+ FormattedDateIntervalToJSArray);
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-date-time-format.h b/deps/v8/src/objects/js-date-time-format.h
index cf73af2aa8..664ccdcdf7 100644
--- a/deps/v8/src/objects/js-date-time-format.h
+++ b/deps/v8/src/objects/js-date-time-format.h
@@ -12,9 +12,10 @@
#include <set>
#include <string>
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
+#include "torque-generated/field-offsets-tq.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -92,18 +93,8 @@ class JSDateTimeFormat : public JSObject {
enum class DateTimeStyle { kUndefined, kFull, kLong, kMedium, kShort };
// Layout description.
-#define JS_DATE_TIME_FORMAT_FIELDS(V) \
- V(kICULocaleOffset, kTaggedSize) \
- V(kICUSimpleDateFormatOffset, kTaggedSize) \
- V(kICUDateIntervalFormatOffset, kTaggedSize) \
- V(kBoundFormatOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_DATE_TIME_FORMAT_FIELDS)
-#undef JS_DATE_TIME_FORMAT_FIELDS
+ TORQUE_GENERATED_JSDATE_TIME_FORMAT_FIELDS)
inline void set_hour_cycle(Intl::HourCycle hour_cycle);
inline Intl::HourCycle hour_cycle() const;
diff --git a/deps/v8/src/objects/js-generator-inl.h b/deps/v8/src/objects/js-generator-inl.h
index c2895e29f9..d0fe2cd90e 100644
--- a/deps/v8/src/objects/js-generator-inl.h
+++ b/deps/v8/src/objects/js-generator-inl.h
@@ -8,7 +8,7 @@
#include "src/objects/js-generator.h"
#include "src/objects/js-promise-inl.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-list-format-inl.h b/deps/v8/src/objects/js-list-format-inl.h
index dd3f4dceb9..96e61c2205 100644
--- a/deps/v8/src/objects/js-list-format-inl.h
+++ b/deps/v8/src/objects/js-list-format-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_LIST_FORMAT_INL_H_
#define V8_OBJECTS_JS_LIST_FORMAT_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-list-format.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,7 +23,7 @@ OBJECT_CONSTRUCTORS_IMPL(JSListFormat, JSObject)
// Base list format accessors.
ACCESSORS(JSListFormat, locale, String, kLocaleOffset)
ACCESSORS(JSListFormat, icu_formatter, Managed<icu::ListFormatter>,
- kICUFormatterOffset)
+ kIcuFormatterOffset)
SMI_ACCESSORS(JSListFormat, flags, kFlagsOffset)
inline void JSListFormat::set_style(Style style) {
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index c4329401a4..84691194ec 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -11,15 +11,15 @@
#include <memory>
#include <vector>
-#include "src/elements-inl.h"
-#include "src/elements.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/elements-inl.h"
+#include "src/objects/elements.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-list-format-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/objects-inl.h"
#include "unicode/fieldpos.h"
#include "unicode/fpositer.h"
#include "unicode/listformatter.h"
@@ -286,8 +286,9 @@ Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
factory->NewNumber(i), factory->String_string()),
Nothing<std::vector<icu::UnicodeString>>());
}
- result.push_back(
- Intl::ToICUUnicodeString(isolate, Handle<String>::cast(item)));
+ Handle<String> item_str = Handle<String>::cast(item);
+ if (!item_str->IsFlat()) item_str = String::Flatten(isolate, item_str);
+ result.push_back(Intl::ToICUUnicodeString(isolate, item_str));
}
DCHECK(!array->HasDictionaryElements());
return Just(result);
@@ -296,7 +297,7 @@ Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
template <typename T>
MaybeHandle<T> FormatListCommon(
Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list,
- MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedList&)) {
+ MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
DCHECK(!list->IsUndefined());
// ecma402 #sec-createpartsfromlist
// 2. If list contains any element value such that Type(value) is not String,
@@ -306,7 +307,7 @@ MaybeHandle<T> FormatListCommon(
MAYBE_RETURN(maybe_array, Handle<T>());
std::vector<icu::UnicodeString> array = maybe_array.FromJust();
- icu::ListFormatter* formatter = format->icu_formatter()->raw();
+ icu::ListFormatter* formatter = format->icu_formatter().raw();
CHECK_NOT_NULL(formatter);
UErrorCode status = U_ZERO_ERROR;
@@ -318,18 +319,6 @@ MaybeHandle<T> FormatListCommon(
return formatToResult(isolate, formatted);
}
-// A helper function to convert the FormattedList to a
-// MaybeHandle<String> for the implementation of format.
-MaybeHandle<String> FormattedToString(Isolate* isolate,
- const icu::FormattedList& formatted) {
- UErrorCode status = U_ZERO_ERROR;
- icu::UnicodeString result = formatted.toString(status);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kIcuError), String);
- }
- return Intl::ToString(isolate, result);
-}
-
Handle<String> IcuFieldIdToType(Isolate* isolate, int32_t field_id) {
switch (field_id) {
case ULISTFMT_LITERAL_FIELD:
@@ -345,8 +334,8 @@ Handle<String> IcuFieldIdToType(Isolate* isolate, int32_t field_id) {
// A helper function to convert the FormattedList to a
// MaybeHandle<JSArray> for the implementation of formatToParts.
-MaybeHandle<JSArray> FormattedToJSArray(Isolate* isolate,
- const icu::FormattedList& formatted) {
+MaybeHandle<JSArray> FormattedListToJSArray(
+ Isolate* isolate, const icu::FormattedValue& formatted) {
Handle<JSArray> array = isolate->factory()->NewJSArray(0);
icu::ConstrainedFieldPosition cfpos;
cfpos.constrainCategory(UFIELD_CATEGORY_LIST);
@@ -375,13 +364,15 @@ MaybeHandle<JSArray> FormattedToJSArray(Isolate* isolate,
MaybeHandle<String> JSListFormat::FormatList(Isolate* isolate,
Handle<JSListFormat> format,
Handle<JSArray> list) {
- return FormatListCommon<String>(isolate, format, list, FormattedToString);
+ return FormatListCommon<String>(isolate, format, list,
+ Intl::FormattedToString);
}
// ecma42 #sec-formatlisttoparts
MaybeHandle<JSArray> JSListFormat::FormatListToParts(
Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list) {
- return FormatListCommon<JSArray>(isolate, format, list, FormattedToJSArray);
+ return FormatListCommon<JSArray>(isolate, format, list,
+ FormattedListToJSArray);
}
const std::set<std::string>& JSListFormat::GetAvailableLocales() {
diff --git a/deps/v8/src/objects/js-list-format.h b/deps/v8/src/objects/js-list-format.h
index ee576b3ff2..0284d05d42 100644
--- a/deps/v8/src/objects/js-list-format.h
+++ b/deps/v8/src/objects/js-list-format.h
@@ -12,10 +12,10 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -105,15 +105,8 @@ class JSListFormat : public JSObject {
DECL_VERIFIER(JSListFormat)
// Layout description.
-#define JS_LIST_FORMAT_FIELDS(V) \
- V(kLocaleOffset, kTaggedSize) \
- V(kICUFormatterOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_LIST_FORMAT_FIELDS)
-#undef JS_LIST_FORMAT_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSLIST_FORMAT_FIELDS)
OBJECT_CONSTRUCTORS(JSListFormat, JSObject);
};
diff --git a/deps/v8/src/objects/js-locale-inl.h b/deps/v8/src/objects/js-locale-inl.h
index 44e223ef06..17859ea6ab 100644
--- a/deps/v8/src/objects/js-locale-inl.h
+++ b/deps/v8/src/objects/js-locale-inl.h
@@ -9,9 +9,9 @@
#ifndef V8_OBJECTS_JS_LOCALE_INL_H_
#define V8_OBJECTS_JS_LOCALE_INL_H_
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
#include "src/objects/js-locale.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -21,7 +21,7 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSLocale, JSObject)
-ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kICULocaleOffset)
+ACCESSORS(JSLocale, icu_locale, Managed<icu::Locale>, kIcuLocaleOffset)
CAST_ACCESSOR(JSLocale)
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 4e35c16b0f..509f9a3069 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -13,14 +13,15 @@
#include <string>
#include <vector>
-#include "src/api.h"
-#include "src/global-handles.h"
+#include "src/api/api.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-locale-inl.h"
+#include "src/objects/objects-inl.h"
#include "unicode/char16ptr.h"
+#include "unicode/localebuilder.h"
#include "unicode/locid.h"
#include "unicode/uloc.h"
#include "unicode/unistr.h"
@@ -30,21 +31,6 @@ namespace internal {
namespace {
-// Helper function to check a locale is valid. It will return false if
-// the length of the extension fields are incorrect. For example, en-u-a or
-// en-u-co-b will return false.
-bool IsValidLocale(const icu::Locale& locale) {
- // icu::Locale::toLanguageTag won't return U_STRING_NOT_TERMINATED_WARNING for
- // incorrect locale yet. So we still need the following uloc_toLanguageTag
- // TODO(ftang): Change to use icu::Locale::toLanguageTag once it indicate
- // error.
- char result[ULOC_FULLNAME_CAPACITY];
- UErrorCode status = U_ZERO_ERROR;
- uloc_toLanguageTag(locale.getName(), result, ULOC_FULLNAME_CAPACITY, true,
- &status);
- return U_SUCCESS(status) && status != U_STRING_NOT_TERMINATED_WARNING;
-}
-
struct OptionData {
const char* name;
const char* key;
@@ -55,9 +41,8 @@ struct OptionData {
// Inserts tags from options into locale string.
Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
Handle<JSReceiver> options,
- icu::Locale* icu_locale) {
+ icu::LocaleBuilder* builder) {
CHECK(isolate);
- CHECK(!icu_locale->isBogus());
const std::vector<const char*> hour_cycle_values = {"h11", "h12", "h23",
"h24"};
@@ -75,7 +60,6 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
// TODO(cira): Pass in values as per the spec to make this to be
// spec compliant.
- UErrorCode status = U_ZERO_ERROR;
for (const auto& option_to_bcp47 : kOptionToUnicodeTagMap) {
std::unique_ptr<char[]> value_str = nullptr;
bool value_bool = false;
@@ -99,32 +83,18 @@ Maybe<bool> InsertOptionsIntoLocale(Isolate* isolate,
DCHECK_NOT_NULL(value_str.get());
// Overwrite existing, or insert new key-value to the locale string.
- if (uloc_toLegacyType(uloc_toLegacyKey(option_to_bcp47.key),
- value_str.get())) {
- // Only call setUnicodeKeywordValue if that value is a valid one.
- icu_locale->setUnicodeKeywordValue(option_to_bcp47.key, value_str.get(),
- status);
- if (U_FAILURE(status)) {
- return Just(false);
- }
- } else {
+ if (!uloc_toLegacyType(uloc_toLegacyKey(option_to_bcp47.key),
+ value_str.get())) {
return Just(false);
}
+ builder->setUnicodeLocaleKeyword(option_to_bcp47.key, value_str.get());
}
-
- // Check all the unicode extension fields are in the right length.
- if (!IsValidLocale(*icu_locale)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<bool>());
- }
-
return Just(true);
}
Handle<Object> UnicodeKeywordValue(Isolate* isolate, Handle<JSLocale> locale,
const char* key) {
- icu::Locale* icu_locale = locale->icu_locale()->raw();
+ icu::Locale* icu_locale = locale->icu_locale().raw();
UErrorCode status = U_ZERO_ERROR;
std::string value =
icu_locale->getUnicodeKeywordValue<std::string>(key, status);
@@ -237,32 +207,29 @@ bool StartsWithUnicodeLanguageId(const std::string& value) {
return true;
}
-Maybe<std::string> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
- Handle<JSReceiver> options) {
+Maybe<bool> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
+ Handle<JSReceiver> options,
+ icu::LocaleBuilder* builder) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
if (tag->length() == 0) {
THROW_NEW_ERROR_RETURN_VALUE(
isolate, NewRangeError(MessageTemplate::kLocaleNotEmpty),
- Nothing<std::string>());
+ Nothing<bool>());
}
v8::String::Utf8Value bcp47_tag(v8_isolate, v8::Utils::ToLocal(tag));
+ builder->setLanguageTag({*bcp47_tag, bcp47_tag.length()});
CHECK_LT(0, bcp47_tag.length());
CHECK_NOT_NULL(*bcp47_tag);
// 2. If IsStructurallyValidLanguageTag(tag) is false, throw a RangeError
// exception.
if (!StartsWithUnicodeLanguageId(*bcp47_tag)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ return Just(false);
}
UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale =
- icu::Locale::forLanguageTag({*bcp47_tag, bcp47_tag.length()}, status);
+ builder->build(status);
if (U_FAILURE(status)) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ return Just(false);
}
// 3. Let language be ? GetOption(options, "language", "string", undefined,
@@ -272,15 +239,16 @@ Maybe<std::string> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Maybe<bool> maybe_language =
Intl::GetStringOption(isolate, options, "language", empty_values,
"ApplyOptionsToTag", &language_str);
- MAYBE_RETURN(maybe_language, Nothing<std::string>());
+ MAYBE_RETURN(maybe_language, Nothing<bool>());
// 4. If language is not undefined, then
if (maybe_language.FromJust()) {
+ builder->setLanguage(language_str.get());
+ builder->build(status);
// a. If language does not match the unicode_language_subtag production,
// throw a RangeError exception.
- if (!IsUnicodeLanguageSubtag(language_str.get())) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ if (U_FAILURE(status) || language_str[0] == '\0' ||
+ IsAlpha(language_str.get(), 4, 4)) {
+ return Just(false);
}
}
// 5. Let script be ? GetOption(options, "script", "string", undefined,
@@ -289,15 +257,15 @@ Maybe<std::string> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Maybe<bool> maybe_script =
Intl::GetStringOption(isolate, options, "script", empty_values,
"ApplyOptionsToTag", &script_str);
- MAYBE_RETURN(maybe_script, Nothing<std::string>());
+ MAYBE_RETURN(maybe_script, Nothing<bool>());
// 6. If script is not undefined, then
if (maybe_script.FromJust()) {
+ builder->setScript(script_str.get());
+ builder->build(status);
// a. If script does not match the unicode_script_subtag production, throw
// a RangeError exception.
- if (!IsUnicodeScriptSubtag(script_str.get())) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ if (U_FAILURE(status) || script_str[0] == '\0') {
+ return Just(false);
}
}
// 7. Let region be ? GetOption(options, "region", "string", undefined,
@@ -306,85 +274,41 @@ Maybe<std::string> ApplyOptionsToTag(Isolate* isolate, Handle<String> tag,
Maybe<bool> maybe_region =
Intl::GetStringOption(isolate, options, "region", empty_values,
"ApplyOptionsToTag", &region_str);
- MAYBE_RETURN(maybe_region, Nothing<std::string>());
+ MAYBE_RETURN(maybe_region, Nothing<bool>());
// 8. If region is not undefined, then
if (maybe_region.FromJust()) {
// a. If region does not match the region production, throw a RangeError
// exception.
- if (!IsUnicodeRegionSubtag(region_str.get())) {
- THROW_NEW_ERROR_RETURN_VALUE(
- isolate, NewRangeError(MessageTemplate::kLocaleBadParameters),
- Nothing<std::string>());
+ builder->setRegion(region_str.get());
+ builder->build(status);
+ if (U_FAILURE(status) || region_str[0] == '\0') {
+ return Just(false);
}
}
- // 9. Set tag to CanonicalizeLanguageTag(tag).
- Maybe<std::string> maybe_canonicalized =
- Intl::CanonicalizeLanguageTag(isolate, tag);
- MAYBE_RETURN(maybe_canonicalized, Nothing<std::string>());
-
- std::vector<std::string> tokens;
- std::string token;
- std::istringstream token_stream(maybe_canonicalized.FromJust());
- while (std::getline(token_stream, token, '-')) {
- tokens.push_back(token);
- }
+ // 9. Set tag to CanonicalizeLanguageTag(tag).
// 10. If language is not undefined,
- std::string locale_str;
- if (maybe_language.FromJust()) {
- // a. Assert: tag matches the unicode_locale_id production.
- // b. Set tag to tag with the substring corresponding to the
- // unicode_language_subtag production replaced by the string language.
- tokens[0] = language_str.get();
- }
-
+ // a. Assert: tag matches the unicode_locale_id production.
+ // b. Set tag to tag with the substring corresponding to the
+ // unicode_language_subtag production replaced by the string language.
// 11. If script is not undefined, then
- if (maybe_script.FromJust()) {
- // a. If tag does not contain a unicode_script_subtag production, then
- if (tokens.size() < 2 || !IsUnicodeScriptSubtag(tokens[1])) {
- // i. Set tag to the concatenation of the unicode_language_subtag
- // production of tag, "-", script, and the rest of tag.
- tokens.insert(tokens.begin() + 1, script_str.get());
- // b. Else,
- } else {
- // i. Set tag to tag with the substring corresponding to the
- // unicode_script_subtag production replaced by the string script.
- tokens[1] = script_str.get();
- }
- }
+ // a. If tag does not contain a unicode_script_subtag production, then
+ // i. Set tag to the concatenation of the unicode_language_subtag
+ // production of tag, "-", script, and the rest of tag.
+ // b. Else,
+ // i. Set tag to tag with the substring corresponding to the
+ // unicode_script_subtag production replaced by the string script.
// 12. If region is not undefined, then
- if (maybe_region.FromJust()) {
- // a. If tag does not contain a unicode_region_subtag production, then
- // i. Set tag to the concatenation of the unicode_language_subtag
- // production of tag, the substring corresponding to the "-"
- // unicode_script_subtag production if present, "-", region, and
- // the rest of tag.
- // b. Else,
- // i. Set tag to tag with the substring corresponding to the
- // unicode_region_subtag production replaced by the string region.
- if (tokens.size() > 1 && IsUnicodeRegionSubtag(tokens[1])) {
- tokens[1] = region_str.get();
- } else if (tokens.size() > 1 && IsUnicodeScriptSubtag(tokens[1])) {
- if (tokens.size() > 2 && IsUnicodeRegionSubtag(tokens[2])) {
- tokens[2] = region_str.get();
- } else {
- tokens.insert(tokens.begin() + 2, region_str.get());
- }
- } else {
- tokens.insert(tokens.begin() + 1, region_str.get());
- }
- }
-
- std::string replaced;
- for (auto it = tokens.begin(); it != tokens.end(); it++) {
- replaced += *it;
- if (it + 1 != tokens.end()) {
- replaced += '-';
- }
- }
-
+ // a. If tag does not contain a unicode_region_subtag production, then
+ // i. Set tag to the concatenation of the unicode_language_subtag
+ // production of tag, the substring corresponding to the "-"
+ // unicode_script_subtag production if present, "-", region, and
+ // the rest of tag.
+ // b. Else,
+ // i. Set tag to tag with the substring corresponding to the
+ // unicode_region_subtag production replaced by the string region.
// 13. Return CanonicalizeLanguageTag(tag).
- return Intl::CanonicalizeLanguageTag(isolate, replaced);
+ return Just(true);
}
} // namespace
@@ -393,21 +317,22 @@ MaybeHandle<JSLocale> JSLocale::Initialize(Isolate* isolate,
Handle<JSLocale> locale,
Handle<String> locale_str,
Handle<JSReceiver> options) {
- Maybe<std::string> maybe_locale =
- ApplyOptionsToTag(isolate, locale_str, options);
- MAYBE_RETURN(maybe_locale, MaybeHandle<JSLocale>());
- UErrorCode status = U_ZERO_ERROR;
- icu::Locale icu_locale =
- icu::Locale::forLanguageTag(maybe_locale.FromJust().c_str(), status);
- if (U_FAILURE(status)) {
+ icu::LocaleBuilder builder;
+ Maybe<bool> maybe_apply =
+ ApplyOptionsToTag(isolate, locale_str, options, &builder);
+ MAYBE_RETURN(maybe_apply, MaybeHandle<JSLocale>());
+ if (!maybe_apply.FromJust()) {
THROW_NEW_ERROR(isolate,
NewRangeError(MessageTemplate::kLocaleBadParameters),
JSLocale);
}
- Maybe<bool> error = InsertOptionsIntoLocale(isolate, options, &icu_locale);
- MAYBE_RETURN(error, MaybeHandle<JSLocale>());
- if (!error.FromJust()) {
+ Maybe<bool> maybe_insert =
+ InsertOptionsIntoLocale(isolate, options, &builder);
+ MAYBE_RETURN(maybe_insert, MaybeHandle<JSLocale>());
+ UErrorCode status = U_ZERO_ERROR;
+ icu::Locale icu_locale = builder.build(status);
+ if (!maybe_insert.FromJust() || U_FAILURE(status)) {
THROW_NEW_ERROR(isolate,
NewRangeError(MessageTemplate::kLocaleBadParameters),
JSLocale);
@@ -458,28 +383,28 @@ Handle<String> JSLocale::Minimize(Isolate* isolate, String locale) {
Handle<Object> JSLocale::Language(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
- const char* language = locale->icu_locale()->raw()->getLanguage();
+ const char* language = locale->icu_locale().raw()->getLanguage();
if (strlen(language) == 0) return factory->undefined_value();
return factory->NewStringFromAsciiChecked(language);
}
Handle<Object> JSLocale::Script(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
- const char* script = locale->icu_locale()->raw()->getScript();
+ const char* script = locale->icu_locale().raw()->getScript();
if (strlen(script) == 0) return factory->undefined_value();
return factory->NewStringFromAsciiChecked(script);
}
Handle<Object> JSLocale::Region(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
- const char* region = locale->icu_locale()->raw()->getCountry();
+ const char* region = locale->icu_locale().raw()->getCountry();
if (strlen(region) == 0) return factory->undefined_value();
return factory->NewStringFromAsciiChecked(region);
}
Handle<String> JSLocale::BaseName(Isolate* isolate, Handle<JSLocale> locale) {
icu::Locale icu_locale =
- icu::Locale::createFromName(locale->icu_locale()->raw()->getBaseName());
+ icu::Locale::createFromName(locale->icu_locale().raw()->getBaseName());
std::string base_name = Intl::ToLanguageTag(icu_locale).FromJust();
return isolate->factory()->NewStringFromAsciiChecked(base_name.c_str());
}
@@ -502,7 +427,7 @@ Handle<Object> JSLocale::HourCycle(Isolate* isolate, Handle<JSLocale> locale) {
Handle<Object> JSLocale::Numeric(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
- icu::Locale* icu_locale = locale->icu_locale()->raw();
+ icu::Locale* icu_locale = locale->icu_locale().raw();
UErrorCode status = U_ZERO_ERROR;
std::string numeric =
icu_locale->getUnicodeKeywordValue<std::string>("kn", status);
@@ -515,7 +440,7 @@ Handle<Object> JSLocale::NumberingSystem(Isolate* isolate,
}
std::string JSLocale::ToString(Handle<JSLocale> locale) {
- icu::Locale* icu_locale = locale->icu_locale()->raw();
+ icu::Locale* icu_locale = locale->icu_locale().raw();
return Intl::ToLanguageTag(*icu_locale).FromJust();
}
diff --git a/deps/v8/src/objects/js-locale.h b/deps/v8/src/objects/js-locale.h
index 120ddeb965..1a833e0e18 100644
--- a/deps/v8/src/objects/js-locale.h
+++ b/deps/v8/src/objects/js-locale.h
@@ -9,11 +9,11 @@
#ifndef V8_OBJECTS_JS_LOCALE_H_
#define V8_OBJECTS_JS_LOCALE_H_
-#include "src/global-handles.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -58,12 +58,8 @@ class JSLocale : public JSObject {
DECL_VERIFIER(JSLocale)
// Layout description.
-#define JS_LOCALE_FIELDS(V) \
- V(kICULocaleOffset, kTaggedSize) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_LOCALE_FIELDS)
-#undef JS_LOCALE_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSLOCALE_FIELDS)
OBJECT_CONSTRUCTORS(JSLocale, JSObject);
};
diff --git a/deps/v8/src/objects/js-number-format-inl.h b/deps/v8/src/objects/js-number-format-inl.h
index 3edf6f1ea3..bd76dfe556 100644
--- a/deps/v8/src/objects/js-number-format-inl.h
+++ b/deps/v8/src/objects/js-number-format-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_NUMBER_FORMAT_INL_H_
#define V8_OBJECTS_JS_NUMBER_FORMAT_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-number-format.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -21,35 +21,51 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(JSNumberFormat, JSObject)
ACCESSORS(JSNumberFormat, locale, String, kLocaleOffset)
-ACCESSORS(JSNumberFormat, icu_number_format, Managed<icu::NumberFormat>,
- kICUNumberFormatOffset)
+ACCESSORS(JSNumberFormat, icu_number_formatter,
+ Managed<icu::number::LocalizedNumberFormatter>,
+ kIcuNumberFormatterOffset)
ACCESSORS(JSNumberFormat, bound_format, Object, kBoundFormatOffset)
+
+// Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
+// uncondictionally while the unified number proposal eventually will only
+// record either (Min|Max)imumFractionDigits or (Min|Max)imumSignaficantDigits
+// Since LocalizedNumberFormatter can only remember one set, and during
+// 2019-1-17 ECMA402 meeting that the committee decide not to take a PR to
+// address that prior to the unified number proposal, we have to add these two
+// 5 bits int into flags to remember the (Min|Max)imumFractionDigits while
+// (Min|Max)imumSignaficantDigits is present.
+// TODO(ftang) remove the following once we ship int-number-format-unified
+// * SMI_ACCESSORS of flags
+// * Four inline functions: (set_)?(min|max)imum_fraction_digits
+
SMI_ACCESSORS(JSNumberFormat, flags, kFlagsOffset)
-inline void JSNumberFormat::set_style(Style style) {
- DCHECK_LT(style, Style::COUNT);
+inline int JSNumberFormat::minimum_fraction_digits() const {
+ return MinimumFractionDigitsBits::decode(flags());
+}
+
+inline void JSNumberFormat::set_minimum_fraction_digits(int digits) {
+ DCHECK_GE(MinimumFractionDigitsBits::kMax, digits);
+ DCHECK_LE(0, digits);
+ DCHECK_GE(20, digits);
int hints = flags();
- hints = StyleBits::update(hints, style);
+ hints = MinimumFractionDigitsBits::update(hints, digits);
set_flags(hints);
}
-inline JSNumberFormat::Style JSNumberFormat::style() const {
- return StyleBits::decode(flags());
+inline int JSNumberFormat::maximum_fraction_digits() const {
+ return MaximumFractionDigitsBits::decode(flags());
}
-inline void JSNumberFormat::set_currency_display(
- CurrencyDisplay currency_display) {
- DCHECK_LT(currency_display, CurrencyDisplay::COUNT);
+inline void JSNumberFormat::set_maximum_fraction_digits(int digits) {
+ DCHECK_GE(MaximumFractionDigitsBits::kMax, digits);
+ DCHECK_LE(0, digits);
+ DCHECK_GE(20, digits);
int hints = flags();
- hints = CurrencyDisplayBits::update(hints, currency_display);
+ hints = MaximumFractionDigitsBits::update(hints, digits);
set_flags(hints);
}
-inline JSNumberFormat::CurrencyDisplay JSNumberFormat::currency_display()
- const {
- return CurrencyDisplayBits::decode(flags());
-}
-
CAST_ACCESSOR(JSNumberFormat)
} // namespace internal
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index c490eeef57..67d545e0be 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -11,34 +11,269 @@
#include <set>
#include <string>
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format-inl.h"
+#include "src/objects/objects-inl.h"
+#include "unicode/currunit.h"
#include "unicode/decimfmt.h"
#include "unicode/locid.h"
+#include "unicode/nounit.h"
+#include "unicode/numberformatter.h"
#include "unicode/numfmt.h"
+#include "unicode/ucurr.h"
#include "unicode/uloc.h"
+#include "unicode/unumberformatter.h"
+#include "unicode/uvernum.h" // for U_ICU_VERSION_MAJOR_NUM
namespace v8 {
namespace internal {
namespace {
-UNumberFormatStyle ToNumberFormatStyle(
- JSNumberFormat::CurrencyDisplay currency_display) {
+// [[Style]] is one of the values "decimal", "percent", "currency",
+// or "unit" identifying the style of the number format.
+// Note: "unit" is added in proposal-unified-intl-numberformat
+enum class Style {
+ DECIMAL,
+ PERCENT,
+ CURRENCY,
+ UNIT,
+};
+
+// [[CurrencyDisplay]] is one of the values "code", "symbol", "name",
+// or "narrow-symbol" identifying the display of the currency number format.
+// Note: "narrow-symbol" is added in proposal-unified-intl-numberformat
+enum class CurrencyDisplay {
+ CODE,
+ SYMBOL,
+ NAME,
+ NARROW_SYMBOL,
+};
+
+// [[CurrencySign]] is one of the String values "standard" or "accounting",
+// specifying whether to render negative numbers in accounting format, often
+// signified by parenthesis. It is only used when [[Style]] has the value
+// "currency" and when [[SignDisplay]] is not "never".
+enum class CurrencySign {
+ STANDARD,
+ ACCOUNTING,
+};
+
+// [[UnitDisplay]] is one of the String values "short", "narrow", or "long",
+// specifying whether to display the unit as a symbol, narrow symbol, or
+// localized long name if formatting with the "unit" or "percent" style. It is
+// only used when [[Style]] has the value "unit" or "percent".
+enum class UnitDisplay {
+ SHORT,
+ NARROW,
+ LONG,
+};
+
+// [[Notation]] is one of the String values "standard", "scientific",
+// "engineering", or "compact", specifying whether the number should be
+// displayed without scaling, scaled to the units place with the power of ten
+// in scientific notation, scaled to the nearest thousand with the power of
+// ten in scientific notation, or scaled to the nearest locale-dependent
+// compact decimal notation power of ten with the corresponding compact
+// decimal notation affix.
+
+enum class Notation {
+ STANDARD,
+ SCIENTIFIC,
+ ENGINEERING,
+ COMPACT,
+};
+
+// [[CompactDisplay]] is one of the String values "short" or "long",
+// specifying whether to display compact notation affixes in short form ("5K")
+// or long form ("5 thousand") if formatting with the "compact" notation. It
+// is only used when [[Notation]] has the value "compact".
+enum class CompactDisplay {
+ SHORT,
+ LONG,
+};
+
+// [[SignDisplay]] is one of the String values "auto", "always", "never", or
+// "except-zero", specifying whether to show the sign on negative numbers
+// only, positive and negative numbers including zero, neither positive nor
+// negative numbers, or positive and negative numbers but not zero.
+enum class SignDisplay {
+ AUTO,
+ ALWAYS,
+ NEVER,
+ EXCEPT_ZERO,
+};
+
+UNumberUnitWidth ToUNumberUnitWidth(CurrencyDisplay currency_display) {
switch (currency_display) {
- case JSNumberFormat::CurrencyDisplay::SYMBOL:
- return UNUM_CURRENCY;
- case JSNumberFormat::CurrencyDisplay::CODE:
- return UNUM_CURRENCY_ISO;
- case JSNumberFormat::CurrencyDisplay::NAME:
- return UNUM_CURRENCY_PLURAL;
- case JSNumberFormat::CurrencyDisplay::COUNT:
- UNREACHABLE();
+ case CurrencyDisplay::SYMBOL:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_SHORT;
+ case CurrencyDisplay::CODE:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_ISO_CODE;
+ case CurrencyDisplay::NAME:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_FULL_NAME;
+ case CurrencyDisplay::NARROW_SYMBOL:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_NARROW;
+ }
+}
+
+UNumberUnitWidth ToUNumberUnitWidth(UnitDisplay unit_display) {
+ switch (unit_display) {
+ case UnitDisplay::SHORT:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_SHORT;
+ case UnitDisplay::LONG:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_FULL_NAME;
+ case UnitDisplay::NARROW:
+ return UNumberUnitWidth::UNUM_UNIT_WIDTH_NARROW;
+ }
+}
+
+UNumberSignDisplay ToUNumberSignDisplay(SignDisplay sign_display,
+ CurrencySign currency_sign) {
+ switch (sign_display) {
+ case SignDisplay::AUTO:
+ if (currency_sign == CurrencySign::ACCOUNTING) {
+ return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING;
+ }
+ DCHECK(currency_sign == CurrencySign::STANDARD);
+ return UNumberSignDisplay::UNUM_SIGN_AUTO;
+ case SignDisplay::NEVER:
+ return UNumberSignDisplay::UNUM_SIGN_NEVER;
+ case SignDisplay::ALWAYS:
+ if (currency_sign == CurrencySign::ACCOUNTING) {
+ return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING_ALWAYS;
+ }
+ DCHECK(currency_sign == CurrencySign::STANDARD);
+ return UNumberSignDisplay::UNUM_SIGN_ALWAYS;
+ case SignDisplay::EXCEPT_ZERO:
+ if (currency_sign == CurrencySign::ACCOUNTING) {
+ return UNumberSignDisplay::UNUM_SIGN_ACCOUNTING_EXCEPT_ZERO;
+ }
+ DCHECK(currency_sign == CurrencySign::STANDARD);
+ return UNumberSignDisplay::UNUM_SIGN_EXCEPT_ZERO;
+ }
+}
+
+icu::number::Notation ToICUNotation(Notation notation,
+ CompactDisplay compact_display) {
+ switch (notation) {
+ case Notation::STANDARD:
+ return icu::number::Notation::simple();
+ case Notation::SCIENTIFIC:
+ return icu::number::Notation::scientific();
+ case Notation::ENGINEERING:
+ return icu::number::Notation::engineering();
+ case Notation::COMPACT:
+ if (compact_display == CompactDisplay::SHORT) {
+ return icu::number::Notation::compactShort();
+ }
+ DCHECK(compact_display == CompactDisplay::LONG);
+ return icu::number::Notation::compactLong();
}
}
+std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
+ UErrorCode status = U_ZERO_ERROR;
+ int32_t total = icu::MeasureUnit::getAvailable(nullptr, 0, status);
+ CHECK(U_FAILURE(status));
+ status = U_ZERO_ERROR;
+ // See the list in ecma402 #sec-issanctionedsimpleunitidentifier
+ std::set<std::string> sanctioned(
+ {"acre", "bit", "byte", "celsius",
+ "centimeter", "day", "degree", "fahrenheit",
+ "foot", "gigabit", "gigabyte", "gram",
+ "hectare", "hour", "inch", "kilobit",
+ "kilobyte", "kilogram", "kilometer", "megabit",
+ "megabyte", "meter", "mile", "mile-scandinavian",
+ "millimeter", "millisecond", "minute", "month",
+ "ounce", "percent", "petabyte", "pound",
+ "second", "stone", "terabit", "terabyte",
+ "week", "yard", "year"});
+ std::vector<icu::MeasureUnit> units(total);
+ total = icu::MeasureUnit::getAvailable(units.data(), total, status);
+ CHECK(U_SUCCESS(status));
+ std::map<const std::string, icu::MeasureUnit> map;
+ for (auto it = units.begin(); it != units.end(); ++it) {
+ if (sanctioned.count(it->getSubtype()) > 0) {
+ map[it->getSubtype()] = *it;
+ }
+ }
+ return map;
+}
+
+class UnitFactory {
+ public:
+ UnitFactory() : map_(CreateUnitMap()) {}
+ virtual ~UnitFactory() {}
+
+ // ecma402 #sec-issanctionedsimpleunitidentifier
+ icu::MeasureUnit create(const std::string& unitIdentifier) {
+ // 1. If unitIdentifier is in the following list, return true.
+ auto found = map_.find(unitIdentifier);
+ if (found != map_.end()) {
+ return found->second;
+ }
+ // 2. Return false.
+ return icu::NoUnit::base();
+ }
+
+ private:
+ std::map<const std::string, icu::MeasureUnit> map_;
+};
+
+// ecma402 #sec-issanctionedsimpleunitidentifier
+icu::MeasureUnit IsSanctionedUnitIdentifier(const std::string& unit) {
+ static base::LazyInstance<UnitFactory>::type factory =
+ LAZY_INSTANCE_INITIALIZER;
+ return factory.Pointer()->create(unit);
+}
+
+// ecma402 #sec-iswellformedunitidentifier
+Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> IsWellFormedUnitIdentifier(
+ Isolate* isolate, const std::string& unit) {
+ icu::MeasureUnit result = IsSanctionedUnitIdentifier(unit);
+ icu::MeasureUnit none = icu::NoUnit::base();
+ // 1. If the result of IsSanctionedUnitIdentifier(unitIdentifier) is true,
+ // then
+ if (result != none) {
+ // a. Return true.
+ std::pair<icu::MeasureUnit, icu::MeasureUnit> pair(result, none);
+ return Just(pair);
+ }
+ // 2. If the substring "-per-" does not occur exactly once in unitIdentifier,
+ // then
+ size_t first_per = unit.find("-per-");
+ if (first_per == std::string::npos ||
+ unit.find("-per-", first_per + 5) != std::string::npos) {
+ // a. Return false.
+ return Nothing<std::pair<icu::MeasureUnit, icu::MeasureUnit>>();
+ }
+ // 3. Let numerator be the substring of unitIdentifier from the beginning to
+ // just before "-per-".
+ std::string numerator = unit.substr(0, first_per);
+
+ // 4. If the result of IsSanctionedUnitIdentifier(numerator) is false, then
+ result = IsSanctionedUnitIdentifier(numerator);
+ if (result == none) {
+ // a. Return false.
+ return Nothing<std::pair<icu::MeasureUnit, icu::MeasureUnit>>();
+ }
+ // 5. Let denominator be the substring of unitIdentifier from just after
+ // "-per-" to the end.
+ std::string denominator = unit.substr(first_per + 5);
+
+ // 6. If the result of IsSanctionedUnitIdentifier(denominator) is false, then
+ icu::MeasureUnit den_result = IsSanctionedUnitIdentifier(denominator);
+ if (den_result == none) {
+ // a. Return false.
+ return Nothing<std::pair<icu::MeasureUnit, icu::MeasureUnit>>();
+ }
+ // 7. Return true.
+ std::pair<icu::MeasureUnit, icu::MeasureUnit> pair(result, den_result);
+ return Just(pair);
+}
+
// ecma-402/#sec-currencydigits
// The currency is expected to an all upper case string value.
int CurrencyDigits(const icu::UnicodeString& currency) {
@@ -69,23 +304,351 @@ bool IsWellFormedCurrencyCode(const std::string& currency) {
return (IsAToZ(currency[0]) && IsAToZ(currency[1]) && IsAToZ(currency[2]));
}
+// Parse the 'style' from the skeleton.
+Style StyleFromSkeleton(const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "percent precision-integer rounding-mode-half-up scale/100"
+ if (skeleton.indexOf("percent") >= 0 && skeleton.indexOf("scale/100") >= 0) {
+ return Style::PERCENT;
+ }
+ // Ex: skeleton as "currency/TWD .00 rounding-mode-half-up"
+ if (skeleton.indexOf("currency") >= 0) {
+ return Style::CURRENCY;
+ }
+ // Ex: skeleton as
+ // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow"
+ // or special case for "percent .### rounding-mode-half-up"
+ if (skeleton.indexOf("measure-unit") >= 0 ||
+ skeleton.indexOf("percent") >= 0) {
+ return Style::UNIT;
+ }
+ // Ex: skeleton as ".### rounding-mode-half-up"
+ return Style::DECIMAL;
+}
+
+// Return the style as a String.
+Handle<String> StyleAsString(Isolate* isolate, Style style) {
+ switch (style) {
+ case Style::PERCENT:
+ return ReadOnlyRoots(isolate).percent_string_handle();
+ case Style::CURRENCY:
+ return ReadOnlyRoots(isolate).currency_string_handle();
+ case Style::UNIT:
+ return ReadOnlyRoots(isolate).unit_string_handle();
+ case Style::DECIMAL:
+ return ReadOnlyRoots(isolate).decimal_string_handle();
+ }
+ UNREACHABLE();
+}
+
+// Parse the 'currencyDisplay' from the skeleton.
+Handle<String> CurrencyDisplayString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up unit-width-iso-code"
+ if (skeleton.indexOf("unit-width-iso-code") >= 0) {
+ return ReadOnlyRoots(isolate).code_string_handle();
+ }
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up unit-width-full-name;"
+ if (skeleton.indexOf("unit-width-full-name") >= 0) {
+ return ReadOnlyRoots(isolate).name_string_handle();
+ }
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up unit-width-narrow;
+ if (skeleton.indexOf("unit-width-narrow") >= 0) {
+ return ReadOnlyRoots(isolate).narrow_symbol_string_handle();
+ }
+ // Ex: skeleton as "currency/TWD .00 rounding-mode-half-up"
+ return ReadOnlyRoots(isolate).symbol_string_handle();
+}
+
+// Return true if there are no "group-off" in the skeleton.
+bool UseGroupingFromSkeleton(const icu::UnicodeString& skeleton) {
+ return skeleton.indexOf("group-off") == -1;
+}
+
+// Parse currency code from skeleton. For example, skeleton as
+// "currency/TWD .00 rounding-mode-half-up unit-width-full-name;"
+std::string CurrencyFromSkeleton(const icu::UnicodeString& skeleton) {
+ std::string str;
+ str = skeleton.toUTF8String<std::string>(str);
+ std::string search("currency/");
+ size_t index = str.find(search);
+ if (index == str.npos) return "";
+ return str.substr(index + search.size(), 3);
+}
+
+// Return CurrencySign as string based on skeleton.
+Handle<String> CurrencySignString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up sign-accounting-always" OR
+ // "currency/TWD .00 rounding-mode-half-up sign-accounting-except-zero"
+ if (skeleton.indexOf("sign-accounting") >= 0) {
+ return ReadOnlyRoots(isolate).accounting_string_handle();
+ }
+ return ReadOnlyRoots(isolate).standard_string_handle();
+}
+
+// Return UnitDisplay as string based on skeleton.
+Handle<String> UnitDisplayString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "measure-unit/length-meter .### rounding-mode-half-up unit-width-full-name"
+ if (skeleton.indexOf("unit-width-full-name") >= 0) {
+ return ReadOnlyRoots(isolate).long_string_handle();
+ }
+ // Ex: skeleton as
+ // "measure-unit/length-meter .### rounding-mode-half-up unit-width-narrow".
+ if (skeleton.indexOf("unit-width-narrow") >= 0) {
+ return ReadOnlyRoots(isolate).narrow_string_handle();
+ }
+ // Ex: skeleton as
+ // "measure-unit/length-foot .### rounding-mode-half-up"
+ return ReadOnlyRoots(isolate).short_string_handle();
+}
+
+// Parse Notation from skeleton.
+Notation NotationFromSkeleton(const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "scientific .### rounding-mode-half-up"
+ if (skeleton.indexOf("scientific") >= 0) {
+ return Notation::SCIENTIFIC;
+ }
+ // Ex: skeleton as
+ // "engineering .### rounding-mode-half-up"
+ if (skeleton.indexOf("engineering") >= 0) {
+ return Notation::ENGINEERING;
+ }
+ // Ex: skeleton as
+ // "compact-short .### rounding-mode-half-up" or
+ // "compact-long .### rounding-mode-half-up
+ if (skeleton.indexOf("compact-") >= 0) {
+ return Notation::COMPACT;
+ }
+ // Ex: skeleton as
+ // "measure-unit/length-foot .### rounding-mode-half-up"
+ return Notation::STANDARD;
+}
+
+Handle<String> NotationAsString(Isolate* isolate, Notation notation) {
+ switch (notation) {
+ case Notation::SCIENTIFIC:
+ return ReadOnlyRoots(isolate).scientific_string_handle();
+ case Notation::ENGINEERING:
+ return ReadOnlyRoots(isolate).engineering_string_handle();
+ case Notation::COMPACT:
+ return ReadOnlyRoots(isolate).compact_string_handle();
+ case Notation::STANDARD:
+ return ReadOnlyRoots(isolate).standard_string_handle();
+ }
+ UNREACHABLE();
+}
+
+// Return CompactString as string based on skeleton.
+Handle<String> CompactDisplayString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "compact-long .### rounding-mode-half-up"
+ if (skeleton.indexOf("compact-long") >= 0) {
+ return ReadOnlyRoots(isolate).long_string_handle();
+ }
+ // Ex: skeleton as
+ // "compact-short .### rounding-mode-half-up"
+ DCHECK_GE(skeleton.indexOf("compact-short"), 0);
+ return ReadOnlyRoots(isolate).short_string_handle();
+}
+
+// Return SignDisplay as string based on skeleton.
+Handle<String> SignDisplayString(Isolate* isolate,
+ const icu::UnicodeString& skeleton) {
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up sign-never"
+ if (skeleton.indexOf("sign-never") >= 0) {
+ return ReadOnlyRoots(isolate).never_string_handle();
+ }
+ // Ex: skeleton as
+ // ".### rounding-mode-half-up sign-always" or
+ // "currency/TWD .00 rounding-mode-half-up sign-accounting-always"
+ if (skeleton.indexOf("sign-always") >= 0 ||
+ skeleton.indexOf("sign-accounting-always") >= 0) {
+ return ReadOnlyRoots(isolate).always_string_handle();
+ }
+ // Ex: skeleton as
+ // "currency/TWD .00 rounding-mode-half-up sign-accounting-except-zero" or
+ // "currency/TWD .00 rounding-mode-half-up sign-except-zero"
+ if (skeleton.indexOf("sign-accounting-except-zero") >= 0 ||
+ skeleton.indexOf("sign-except-zero") >= 0) {
+ return ReadOnlyRoots(isolate).except_zero_string_handle();
+ }
+ return ReadOnlyRoots(isolate).auto_string_handle();
+}
+
+// Return the minimum integer digits by counting the number of '0' after
+// "integer-width/+" in the skeleton.
+// Ex: Return 15 for skeleton as
+// “currency/TWD .00 rounding-mode-half-up integer-width/+000000000000000”
+// 1
+// 123456789012345
+// Return default value as 1 if there are no "integer-width/+".
+int32_t MinimumIntegerDigitsFromSkeleton(const icu::UnicodeString& skeleton) {
+ // count the number of 0 after "integer-width/+"
+ icu::UnicodeString search("integer-width/+");
+ int32_t index = skeleton.indexOf(search);
+ if (index < 0) return 1; // return 1 if cannot find it.
+ index += search.length();
+ int32_t matched = 0;
+ while (index < skeleton.length() && skeleton[index] == '0') {
+ matched++;
+ index++;
+ }
+ CHECK_GT(matched, 0);
+ return matched;
+}
+
+// Return true if there are fraction digits, false if not.
+// The minimum fraction digits is the number of '0' after '.' in the skeleton
+// The maximum fraction digits is the number of '#' after the above '0's plus
+// the minimum fraction digits.
+// For example, as skeleton “.000#### rounding-mode-half-up”
+// 123
+// 4567
+// Set The minimum as 3 and maximum as 7.
+bool FractionDigitsFromSkeleton(const icu::UnicodeString& skeleton,
+ int32_t* minimum, int32_t* maximum) {
+ icu::UnicodeString search(".");
+ int32_t index = skeleton.indexOf(search);
+ if (index < 0) return false;
+ *minimum = 0;
+ index++; // skip the '.'
+ while (index < skeleton.length() && skeleton[index] == '0') {
+ (*minimum)++;
+ index++;
+ }
+ *maximum = *minimum;
+ while (index < skeleton.length() && skeleton[index] == '#') {
+ (*maximum)++;
+ index++;
+ }
+ return true;
+}
+
+// Return true if there are significant digits, false if not.
+// The minimum significant digits is the number of '@' in the skeleton
+// The maximum significant digits is the number of '#' after these '@'s plus
+// the minimum significant digits.
+// Ex: Skeleton as "@@@@@####### rounding-mode-half-up"
+// 12345
+// 6789012
+// Set The minimum as 5 and maximum as 12.
+bool SignificantDigitsFromSkeleton(const icu::UnicodeString& skeleton,
+ int32_t* minimum, int32_t* maximum) {
+ icu::UnicodeString search("@");
+ int32_t index = skeleton.indexOf(search);
+ if (index < 0) return false;
+ *minimum = 1;
+ index++; // skip the first '@'
+ while (index < skeleton.length() && skeleton[index] == '@') {
+ (*minimum)++;
+ index++;
+ }
+ *maximum = *minimum;
+ while (index < skeleton.length() && skeleton[index] == '#') {
+ (*maximum)++;
+ index++;
+ }
+ return true;
+}
+
+// Ex: percent .### rounding-mode-half-up
+// Special case for "percent"
+// Ex: "measure-unit/length-kilometer per-measure-unit/duration-hour .###
+// rounding-mode-half-up" should return "kilometer-per-unit".
+// Ex: "measure-unit/duration-year .### rounding-mode-half-up" should return
+// "year".
+std::string UnitFromSkeleton(const icu::UnicodeString& skeleton) {
+ std::string str;
+ str = skeleton.toUTF8String<std::string>(str);
+ // Special case for "percent" first.
+ if (str.find("percent") != str.npos) {
+ return "percent";
+ }
+ std::string search("measure-unit/");
+ size_t begin = str.find(search);
+ if (begin == str.npos) {
+ return "";
+ }
+ // Skip the type (ex: "length").
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // b
+ begin = str.find("-", begin + search.size());
+ if (begin == str.npos) {
+ return "";
+ }
+ begin++; // Skip the '-'.
+ // Find the end of the subtype.
+ size_t end = str.find(" ", begin);
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // b e
+ if (end == str.npos) {
+ end = str.size();
+ return str.substr(begin, end - begin);
+ }
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // b e
+ // [result ]
+ std::string result = str.substr(begin, end - begin);
+ begin = end + 1;
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ]eb
+ std::string search_per("per-measure-unit/");
+ begin = str.find(search_per, begin);
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ]e b
+ if (begin == str.npos) {
+ return result;
+ }
+ // Skip the type (ex: "duration").
+ begin = str.find("-", begin + search_per.size());
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ]e b
+ if (begin == str.npos) {
+ return result;
+ }
+ begin++; // Skip the '-'.
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ]e b
+ end = str.find(" ", begin);
+ if (end == str.npos) {
+ end = str.size();
+ }
+ // "measure-unit/length-kilometer per-measure-unit/duration-hour"
+ // [result ] b e
+ return result + "-per-" + str.substr(begin, end - begin);
+}
+
} // anonymous namespace
// static
// ecma402 #sec-intl.numberformat.prototype.resolvedoptions
Handle<JSObject> JSNumberFormat::ResolvedOptions(
- Isolate* isolate, Handle<JSNumberFormat> number_format_holder) {
+ Isolate* isolate, Handle<JSNumberFormat> number_format) {
Factory* factory = isolate->factory();
+ UErrorCode status = U_ZERO_ERROR;
+ icu::number::LocalizedNumberFormatter* icu_number_formatter =
+ number_format->icu_number_formatter().raw();
+ icu::UnicodeString skeleton = icu_number_formatter->toSkeleton(status);
+ CHECK(U_SUCCESS(status));
+
+ std::string s_str;
+ s_str = skeleton.toUTF8String<std::string>(s_str);
+
// 4. Let options be ! ObjectCreate(%ObjectPrototype%).
Handle<JSObject> options = factory->NewJSObject(isolate->object_function());
- icu::NumberFormat* number_format =
- number_format_holder->icu_number_format()->raw();
- CHECK_NOT_NULL(number_format);
-
- Handle<String> locale =
- Handle<String>(number_format_holder->locale(), isolate);
+ Handle<String> locale = Handle<String>(number_format->locale(), isolate);
std::unique_ptr<char[]> locale_str = locale->ToCString();
icu::Locale icu_locale = Intl::CreateICULocale(locale_str.get());
@@ -117,69 +680,120 @@ Handle<JSObject> JSNumberFormat::ResolvedOptions(
Just(kDontThrow))
.FromJust());
}
+ Style style = StyleFromSkeleton(skeleton);
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->style_string(),
- number_format_holder->StyleAsString(), Just(kDontThrow))
+ StyleAsString(isolate, style), Just(kDontThrow))
.FromJust());
- if (number_format_holder->style() == Style::CURRENCY) {
- icu::UnicodeString currency(number_format->getCurrency());
- DCHECK(!currency.isEmpty());
+ std::string currency = CurrencyFromSkeleton(skeleton);
+ if (!currency.empty()) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->currency_string(),
- factory
- ->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(currency.getBuffer()),
- currency.length()))
- .ToHandleChecked(),
+ factory->NewStringFromAsciiChecked(currency.c_str()),
Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->currencyDisplay_string(),
- number_format_holder->CurrencyDisplayAsString(), Just(kDontThrow))
+ CurrencyDisplayString(isolate, skeleton), Just(kDontThrow))
.FromJust());
+ if (FLAG_harmony_intl_numberformat_unified) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->currencySign_string(),
+ CurrencySignString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
}
- CHECK(JSReceiver::CreateDataProperty(
- isolate, options, factory->minimumIntegerDigits_string(),
- factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
- Just(kDontThrow))
- .FromJust());
- CHECK(
- JSReceiver::CreateDataProperty(
- isolate, options, factory->minimumFractionDigits_string(),
- factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
- Just(kDontThrow))
- .FromJust());
+
+ if (FLAG_harmony_intl_numberformat_unified) {
+ std::string unit = UnitFromSkeleton(skeleton);
+ if (!unit.empty()) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->unit_string(),
+ isolate->factory()->NewStringFromAsciiChecked(unit.c_str()),
+ Just(kDontThrow))
+ .FromJust());
+ }
+ if (style == Style::UNIT || style == Style::PERCENT) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->unitDisplay_string(),
+ UnitDisplayString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
+ }
+
CHECK(
JSReceiver::CreateDataProperty(
- isolate, options, factory->maximumFractionDigits_string(),
- factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
+ isolate, options, factory->minimumIntegerDigits_string(),
+ factory->NewNumberFromInt(MinimumIntegerDigitsFromSkeleton(skeleton)),
Just(kDontThrow))
.FromJust());
- CHECK(number_format->getDynamicClassID() ==
- icu::DecimalFormat::getStaticClassID());
- icu::DecimalFormat* decimal_format =
- static_cast<icu::DecimalFormat*>(number_format);
- CHECK_NOT_NULL(decimal_format);
- if (decimal_format->areSignificantDigitsUsed()) {
+ int32_t minimum = 0, maximum = 0;
+ bool output_fraction =
+ FractionDigitsFromSkeleton(skeleton, &minimum, &maximum);
+
+ if (!FLAG_harmony_intl_numberformat_unified && !output_fraction) {
+ // Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
+ // uncondictionally while the unified number proposal eventually will only
+ // record either (Min|Max)imumFractionDigits or
+ // (Min|Max)imumSignaficantDigits Since LocalizedNumberFormatter can only
+ // remember one set, and during 2019-1-17 ECMA402 meeting that the committee
+ // decide not to take a PR to address that prior to the unified number
+ // proposal, we have to add these two 5 bits int into flags to remember the
+ // (Min|Max)imumFractionDigits while (Min|Max)imumSignaficantDigits is
+ // present.
+ // TODO(ftang) remove the following two lines once we ship
+ // int-number-format-unified
+ output_fraction = true;
+ minimum = number_format->minimum_fraction_digits();
+ maximum = number_format->maximum_fraction_digits();
+ }
+ if (output_fraction) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->minimumFractionDigits_string(),
+ factory->NewNumberFromInt(minimum), Just(kDontThrow))
+ .FromJust());
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->maximumFractionDigits_string(),
+ factory->NewNumberFromInt(maximum), Just(kDontThrow))
+ .FromJust());
+ }
+ minimum = 0;
+ maximum = 0;
+ if (SignificantDigitsFromSkeleton(skeleton, &minimum, &maximum)) {
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->minimumSignificantDigits_string(),
- factory->NewNumberFromInt(
- decimal_format->getMinimumSignificantDigits()),
- Just(kDontThrow))
+ factory->NewNumberFromInt(minimum), Just(kDontThrow))
.FromJust());
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->maximumSignificantDigits_string(),
- factory->NewNumberFromInt(
- decimal_format->getMaximumSignificantDigits()),
- Just(kDontThrow))
+ factory->NewNumberFromInt(maximum), Just(kDontThrow))
.FromJust());
}
+
CHECK(JSReceiver::CreateDataProperty(
isolate, options, factory->useGrouping_string(),
- factory->ToBoolean((number_format->isGroupingUsed() == TRUE)),
+ factory->ToBoolean(UseGroupingFromSkeleton(skeleton)),
Just(kDontThrow))
.FromJust());
+ if (FLAG_harmony_intl_numberformat_unified) {
+ Notation notation = NotationFromSkeleton(skeleton);
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->notation_string(),
+ NotationAsString(isolate, notation), Just(kDontThrow))
+ .FromJust());
+ // Only output compactDisplay when notation is compact.
+ if (notation == Notation::COMPACT) {
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->compactDisplay_string(),
+ CompactDisplayString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, options, factory->signDisplay_string(),
+ SignDisplayString(isolate, skeleton), Just(kDontThrow))
+ .FromJust());
+ }
return options;
}
@@ -189,7 +803,7 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat(
// old code copy from NumberFormat::Unwrap that has no spec comment and
// compiled but fail unit tests.
Handle<Context> native_context =
- Handle<Context>(isolate->context()->native_context(), isolate);
+ Handle<Context>(isolate->context().native_context(), isolate);
Handle<JSFunction> constructor = Handle<JSFunction>(
JSFunction::cast(native_context->intl_number_format_function()), isolate);
Handle<Object> object;
@@ -216,7 +830,6 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::UnwrapNumberFormat(
MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
Isolate* isolate, Handle<JSNumberFormat> number_format,
Handle<Object> locales, Handle<Object> options_obj) {
- // set the flags to 0 ASAP.
number_format->set_flags(0);
Factory* factory = isolate->factory();
@@ -252,6 +865,19 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSNumberFormat>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
+ std::unique_ptr<char[]> numbering_system_str = nullptr;
+ if (FLAG_harmony_intl_add_calendar_numbering_system) {
+ // 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`,
+ // `"string"`, *undefined*, *undefined*).
+ Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
+ isolate, options, "Intl.RelativeTimeFormat", &numbering_system_str);
+ // 8. If _numberingSystem_ is not *undefined*, then
+ // a. If _numberingSystem_ does not match the
+ // `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError*
+ // exception.
+ MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSNumberFormat>());
+ }
+
// 7. Let localeData be %NumberFormat%.[[LocaleData]].
// 8. Let r be ResolveLocale(%NumberFormat%.[[AvailableLocales]],
// requestedLocales, opt, %NumberFormat%.[[RelevantExtensionKeys]],
@@ -261,24 +887,43 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
Intl::ResolveLocale(isolate, JSNumberFormat::GetAvailableLocales(),
requested_locales, matcher, relevant_extension_keys);
+ UErrorCode status = U_ZERO_ERROR;
+ if (numbering_system_str != nullptr) {
+ r.icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(),
+ status);
+ CHECK(U_SUCCESS(status));
+ r.locale = Intl::ToLanguageTag(r.icu_locale).FromJust();
+ }
+
// 9. Set numberFormat.[[Locale]] to r.[[locale]].
Handle<String> locale_str =
isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
number_format->set_locale(*locale_str);
// 11. Let dataLocale be r.[[dataLocale]].
- //
+
+ icu::number::LocalizedNumberFormatter icu_number_formatter =
+ icu::number::NumberFormatter::withLocale(r.icu_locale)
+ .roundingMode(UNUM_ROUND_HALFUP);
+
// 12. Let style be ? GetOption(options, "style", "string", « "decimal",
// "percent", "currency" », "decimal").
const char* service = "Intl.NumberFormat";
+
+ std::vector<const char*> style_str_values({"decimal", "percent", "currency"});
+ std::vector<Style> style_enum_values(
+ {Style::DECIMAL, Style::PERCENT, Style::CURRENCY});
+ if (FLAG_harmony_intl_numberformat_unified) {
+ style_str_values.push_back("unit");
+ style_enum_values.push_back(Style::UNIT);
+ }
Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
- isolate, options, "style", service, {"decimal", "percent", "currency"},
- {Style::DECIMAL, Style::PERCENT, Style::CURRENCY}, Style::DECIMAL);
+ isolate, options, "style", service, style_str_values, style_enum_values,
+ Style::DECIMAL);
MAYBE_RETURN(maybe_style, MaybeHandle<JSNumberFormat>());
Style style = maybe_style.FromJust();
// 13. Set numberFormat.[[Style]] to style.
- number_format->set_style(style);
// 14. Let currency be ? GetOption(options, "currency", "string", undefined,
// undefined).
@@ -298,7 +943,8 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
if (!IsWellFormedCurrencyCode(currency)) {
THROW_NEW_ERROR(
isolate,
- NewRangeError(MessageTemplate::kInvalidCurrencyCode,
+ NewRangeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("currency code"),
factory->NewStringFromAsciiChecked(currency.c_str())),
JSNumberFormat);
}
@@ -324,85 +970,131 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
// 18. Let currencyDisplay be ? GetOption(options, "currencyDisplay",
// "string", « "code", "symbol", "name" », "symbol").
- Maybe<CurrencyDisplay> maybe_currencyDisplay =
+ std::vector<const char*> currency_display_str_values(
+ {"code", "symbol", "name"});
+ std::vector<CurrencyDisplay> currency_display_enum_values(
+ {CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL, CurrencyDisplay::NAME});
+ if (FLAG_harmony_intl_numberformat_unified) {
+ currency_display_str_values.push_back("narrow-symbol");
+ currency_display_enum_values.push_back(CurrencyDisplay::NARROW_SYMBOL);
+ }
+ Maybe<CurrencyDisplay> maybe_currency_display =
Intl::GetStringOption<CurrencyDisplay>(
isolate, options, "currencyDisplay", service,
- {"code", "symbol", "name"},
- {CurrencyDisplay::CODE, CurrencyDisplay::SYMBOL,
- CurrencyDisplay::NAME},
+ currency_display_str_values, currency_display_enum_values,
CurrencyDisplay::SYMBOL);
- MAYBE_RETURN(maybe_currencyDisplay, MaybeHandle<JSNumberFormat>());
- CurrencyDisplay currency_display = maybe_currencyDisplay.FromJust();
- UNumberFormatStyle format_style = ToNumberFormatStyle(currency_display);
-
- UErrorCode status = U_ZERO_ERROR;
- std::unique_ptr<icu::NumberFormat> icu_number_format;
- icu::Locale no_extension_locale(r.icu_locale.getBaseName());
- if (style == Style::DECIMAL) {
- icu_number_format.reset(
- icu::NumberFormat::createInstance(r.icu_locale, status));
- // If the subclass is not DecimalFormat, fallback to no extension
- // because other subclass has not support the format() with
- // FieldPositionIterator yet.
- if (U_FAILURE(status) || icu_number_format.get() == nullptr ||
- icu_number_format->getDynamicClassID() !=
- icu::DecimalFormat::getStaticClassID()) {
- status = U_ZERO_ERROR;
- icu_number_format.reset(
- icu::NumberFormat::createInstance(no_extension_locale, status));
- }
- } else if (style == Style::PERCENT) {
- icu_number_format.reset(
- icu::NumberFormat::createPercentInstance(r.icu_locale, status));
- // If the subclass is not DecimalFormat, fallback to no extension
- // because other subclass has not support the format() with
- // FieldPositionIterator yet.
- if (U_FAILURE(status) || icu_number_format.get() == nullptr ||
- icu_number_format->getDynamicClassID() !=
- icu::DecimalFormat::getStaticClassID()) {
- status = U_ZERO_ERROR;
- icu_number_format.reset(icu::NumberFormat::createPercentInstance(
- no_extension_locale, status));
+ MAYBE_RETURN(maybe_currency_display, MaybeHandle<JSNumberFormat>());
+ CurrencyDisplay currency_display = maybe_currency_display.FromJust();
+
+ CurrencySign currency_sign = CurrencySign::STANDARD;
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // Let currencySign be ? GetOption(options, "currencySign", "string", «
+ // "standard", "accounting" », "standard").
+ Maybe<CurrencySign> maybe_currency_sign =
+ Intl::GetStringOption<CurrencySign>(
+ isolate, options, "currencySign", service,
+ {"standard", "accounting"},
+ {CurrencySign::STANDARD, CurrencySign::ACCOUNTING},
+ CurrencySign::STANDARD);
+ MAYBE_RETURN(maybe_currency_sign, MaybeHandle<JSNumberFormat>());
+ currency_sign = maybe_currency_sign.FromJust();
+
+ // Let unit be ? GetOption(options, "unit", "string", undefined, undefined).
+ std::unique_ptr<char[]> unit_cstr;
+ Maybe<bool> found_unit = Intl::GetStringOption(
+ isolate, options, "unit", empty_values, service, &unit_cstr);
+ MAYBE_RETURN(found_unit, MaybeHandle<JSNumberFormat>());
+
+ std::string unit;
+ if (found_unit.FromJust()) {
+ DCHECK_NOT_NULL(unit_cstr.get());
+ unit = unit_cstr.get();
}
- } else {
- DCHECK_EQ(style, Style::CURRENCY);
- icu_number_format.reset(
- icu::NumberFormat::createInstance(r.icu_locale, format_style, status));
- // If the subclass is not DecimalFormat, fallback to no extension
- // because other subclass has not support the format() with
- // FieldPositionIterator yet.
- if (U_FAILURE(status) || icu_number_format.get() == nullptr ||
- icu_number_format->getDynamicClassID() !=
- icu::DecimalFormat::getStaticClassID()) {
- status = U_ZERO_ERROR;
- icu_number_format.reset(icu::NumberFormat::createInstance(
- no_extension_locale, format_style, status));
+
+ // Let unitDisplay be ? GetOption(options, "unitDisplay", "string", «
+ // "short", "narrow", "long" », "short").
+ Maybe<UnitDisplay> maybe_unit_display = Intl::GetStringOption<UnitDisplay>(
+ isolate, options, "unitDisplay", service, {"short", "narrow", "long"},
+ {UnitDisplay::SHORT, UnitDisplay::NARROW, UnitDisplay::LONG},
+ UnitDisplay::SHORT);
+ MAYBE_RETURN(maybe_unit_display, MaybeHandle<JSNumberFormat>());
+ UnitDisplay unit_display = maybe_unit_display.FromJust();
+
+ // If style is "percent", then
+ if (style == Style::PERCENT) {
+ // Let unit be "concentr-percent".
+ unit = "percent";
}
- }
+ // If style is "unit" or "percent", then
+ if (style == Style::PERCENT || style == Style::UNIT) {
+ // If unit is undefined, throw a TypeError exception.
+ if (unit == "") {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidUnit,
+ factory->NewStringFromStaticChars("Intl.NumberFormat"),
+ factory->NewStringFromStaticChars("")),
+ JSNumberFormat);
+ }
+
+ // If the result of IsWellFormedUnitIdentifier(unit) is false, throw a
+ // RangeError exception.
+ Maybe<std::pair<icu::MeasureUnit, icu::MeasureUnit>> maybe_wellformed =
+ IsWellFormedUnitIdentifier(isolate, unit);
+ if (maybe_wellformed.IsNothing()) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(
+ MessageTemplate::kInvalidUnit,
+ factory->NewStringFromStaticChars("Intl.NumberFormat"),
+ factory->NewStringFromAsciiChecked(unit.c_str())),
+ JSNumberFormat);
+ }
+ std::pair<icu::MeasureUnit, icu::MeasureUnit> unit_pair =
+ maybe_wellformed.FromJust();
- if (U_FAILURE(status) || icu_number_format.get() == nullptr) {
- status = U_ZERO_ERROR;
- // Remove extensions and try again.
- icu_number_format.reset(
- icu::NumberFormat::createInstance(no_extension_locale, status));
+ // Set intlObj.[[Unit]] to unit.
+ if (unit_pair.first != icu::NoUnit::base()) {
+ icu_number_formatter = icu_number_formatter.unit(unit_pair.first);
+ }
+ if (unit_pair.second != icu::NoUnit::base()) {
+ icu_number_formatter = icu_number_formatter.perUnit(unit_pair.second);
+ }
- if (U_FAILURE(status) || icu_number_format.get() == nullptr) {
- FATAL("Failed to create ICU number_format, are ICU data files missing?");
+ // The default unitWidth is SHORT in ICU and that mapped from
+ // Symbol so we can skip the setting for optimization.
+ if (unit_display != UnitDisplay::SHORT) {
+ icu_number_formatter =
+ icu_number_formatter.unitWidth(ToUNumberUnitWidth(unit_display));
+ }
}
}
- DCHECK(U_SUCCESS(status));
- CHECK_NOT_NULL(icu_number_format.get());
- CHECK(icu_number_format->getDynamicClassID() ==
- icu::DecimalFormat::getStaticClassID());
+
+ if (style == Style::PERCENT) {
+ icu_number_formatter = icu_number_formatter.unit(icu::NoUnit::percent())
+ .scale(icu::number::Scale::powerOfTen(2));
+ }
+
if (style == Style::CURRENCY) {
// 19. If style is "currency", set numberFormat.[[CurrencyDisplay]] to
// currencyDisplay.
- number_format->set_currency_display(currency_display);
// 17.b. Set numberFormat.[[Currency]] to currency.
if (!currency_ustr.isEmpty()) {
- status = U_ZERO_ERROR;
- icu_number_format->setCurrency(currency_ustr.getBuffer(), status);
+ Handle<String> currency_string;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, currency_string,
+ Intl::ToString(isolate, currency_ustr),
+ JSNumberFormat);
+
+ icu_number_formatter = icu_number_formatter.unit(
+ icu::CurrencyUnit(currency_ustr.getBuffer(), status));
+ CHECK(U_SUCCESS(status));
+ // The default unitWidth is SHORT in ICU and that mapped from
+ // Symbol so we can skip the setting for optimization.
+ if (currency_display != CurrencyDisplay::SYMBOL) {
+ icu_number_formatter = icu_number_formatter.unitWidth(
+ ToUNumberUnitWidth(currency_display));
+ }
CHECK(U_SUCCESS(status));
}
}
@@ -430,15 +1122,75 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
}
// 22. Perform ? SetNumberFormatDigitOptions(numberFormat, options,
// mnfdDefault, mxfdDefault).
- CHECK(icu_number_format->getDynamicClassID() ==
- icu::DecimalFormat::getStaticClassID());
- icu::DecimalFormat* icu_decimal_format =
- static_cast<icu::DecimalFormat*>(icu_number_format.get());
- Maybe<bool> maybe_set_number_for_digit_options =
- Intl::SetNumberFormatDigitOptions(isolate, icu_decimal_format, options,
- mnfd_default, mxfd_default);
- MAYBE_RETURN(maybe_set_number_for_digit_options, Handle<JSNumberFormat>());
+ Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
+ Intl::SetNumberFormatDigitOptions(isolate, options, mnfd_default,
+ mxfd_default);
+ MAYBE_RETURN(maybe_digit_options, Handle<JSNumberFormat>());
+ Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
+
+ icu::number::Precision precision =
+ (digit_options.minimum_significant_digits > 0)
+ ? icu::number::Precision::minMaxSignificantDigits(
+ digit_options.minimum_significant_digits,
+ digit_options.maximum_significant_digits)
+ : icu::number::Precision::minMaxFraction(
+ digit_options.minimum_fraction_digits,
+ digit_options.maximum_fraction_digits);
+
+ if (digit_options.minimum_significant_digits > 0) {
+ // Currenct ECMA 402 spec mandate to record (Min|Max)imumFractionDigits
+ // uncondictionally while the unified number proposal eventually will only
+ // record either (Min|Max)imumFractionDigits or
+ // (Min|Max)imumSignaficantDigits Since LocalizedNumberFormatter can only
+ // remember one set, and during 2019-1-17 ECMA402 meeting that the committee
+ // decide not to take a PR to address that prior to the unified number
+ // proposal, we have to add these two 5 bits int into flags to remember the
+ // (Min|Max)imumFractionDigits while (Min|Max)imumSignaficantDigits is
+ // present.
+ // TODO(ftang) remove the following two lines once we ship
+ // int-number-format-unified
+ number_format->set_minimum_fraction_digits(
+ digit_options.minimum_fraction_digits);
+ number_format->set_maximum_fraction_digits(
+ digit_options.maximum_fraction_digits);
+ }
+
+ icu_number_formatter = icu_number_formatter.precision(precision);
+ if (digit_options.minimum_integer_digits > 1) {
+ icu_number_formatter =
+ icu_number_formatter.integerWidth(icu::number::IntegerWidth::zeroFillTo(
+ digit_options.minimum_integer_digits));
+ }
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // Let notation be ? GetOption(options, "notation", "string", « "standard",
+ // "scientific", "engineering", "compact" », "standard").
+ Maybe<Notation> maybe_notation = Intl::GetStringOption<Notation>(
+ isolate, options, "notation", service,
+ {"standard", "scientific", "engineering", "compact"},
+ {Notation::STANDARD, Notation::SCIENTIFIC, Notation::ENGINEERING,
+ Notation::COMPACT},
+ Notation::STANDARD);
+ MAYBE_RETURN(maybe_notation, MaybeHandle<JSNumberFormat>());
+ Notation notation = maybe_notation.FromJust();
+
+ // Let compactDisplay be ? GetOption(options, "compactDisplay", "string", «
+ // "short", "long" », "short").
+ Maybe<CompactDisplay> maybe_compact_display =
+ Intl::GetStringOption<CompactDisplay>(
+ isolate, options, "compactDisplay", service, {"short", "long"},
+ {CompactDisplay::SHORT, CompactDisplay::LONG},
+ CompactDisplay::SHORT);
+ MAYBE_RETURN(maybe_compact_display, MaybeHandle<JSNumberFormat>());
+ CompactDisplay compact_display = maybe_compact_display.FromJust();
+
+ // The default notation in ICU is Simple, which mapped from STANDARD
+ // so we can skip setting it.
+ if (notation != Notation::STANDARD) {
+ icu_number_formatter = icu_number_formatter.notation(
+ ToICUNotation(notation, compact_display));
+ }
+ }
// 23. Let useGrouping be ? GetOption(options, "useGrouping", "boolean",
// undefined, true).
bool use_grouping = true;
@@ -446,7 +1198,32 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
isolate, options, "useGrouping", service, &use_grouping);
MAYBE_RETURN(found_use_grouping, MaybeHandle<JSNumberFormat>());
// 24. Set numberFormat.[[UseGrouping]] to useGrouping.
- icu_number_format->setGroupingUsed(use_grouping ? TRUE : FALSE);
+ if (!use_grouping) {
+ icu_number_formatter = icu_number_formatter.grouping(
+ UNumberGroupingStrategy::UNUM_GROUPING_OFF);
+ }
+
+ if (FLAG_harmony_intl_numberformat_unified) {
+ // Let signDisplay be ? GetOption(options, "signDisplay", "string", «
+ // "auto", "never", "always", "except-zero" », "auto").
+ Maybe<SignDisplay> maybe_sign_display = Intl::GetStringOption<SignDisplay>(
+ isolate, options, "signDisplay", service,
+ {"auto", "never", "always", "except-zero"},
+ {SignDisplay::AUTO, SignDisplay::NEVER, SignDisplay::ALWAYS,
+ SignDisplay::EXCEPT_ZERO},
+ SignDisplay::AUTO);
+ MAYBE_RETURN(maybe_sign_display, MaybeHandle<JSNumberFormat>());
+ SignDisplay sign_display = maybe_sign_display.FromJust();
+
+ // The default sign in ICU is UNUM_SIGN_AUTO which is mapped from
+ // SignDisplay::AUTO and CurrencySign::STANDARD so we can skip setting
+ // under that values for optimization.
+ if (sign_display != SignDisplay::AUTO ||
+ currency_sign != CurrencySign::STANDARD) {
+ icu_number_formatter = icu_number_formatter.sign(
+ ToUNumberSignDisplay(sign_display, currency_sign));
+ }
+ }
// 25. Let dataLocaleData be localeData.[[<dataLocale>]].
//
@@ -461,64 +1238,51 @@ MaybeHandle<JSNumberFormat> JSNumberFormat::Initialize(
//
// 30. Set numberFormat.[[NegativePattern]] to
// stylePatterns.[[negativePattern]].
-
- Handle<Managed<icu::NumberFormat>> managed_number_format =
- Managed<icu::NumberFormat>::FromUniquePtr(isolate, 0,
- std::move(icu_number_format));
- number_format->set_icu_number_format(*managed_number_format);
+ //
+ Handle<Managed<icu::number::LocalizedNumberFormatter>>
+ managed_number_formatter =
+ Managed<icu::number::LocalizedNumberFormatter>::FromRawPtr(
+ isolate, 0,
+ new icu::number::LocalizedNumberFormatter(icu_number_formatter));
+ number_format->set_icu_number_formatter(*managed_number_formatter);
number_format->set_bound_format(*factory->undefined_value());
// 31. Return numberFormat.
return number_format;
}
-Handle<String> JSNumberFormat::StyleAsString() const {
- switch (style()) {
- case Style::DECIMAL:
- return GetReadOnlyRoots().decimal_string_handle();
- case Style::PERCENT:
- return GetReadOnlyRoots().percent_string_handle();
- case Style::CURRENCY:
- return GetReadOnlyRoots().currency_string_handle();
- case Style::COUNT:
- UNREACHABLE();
- }
-}
-
-Handle<String> JSNumberFormat::CurrencyDisplayAsString() const {
- switch (currency_display()) {
- case CurrencyDisplay::CODE:
- return GetReadOnlyRoots().code_string_handle();
- case CurrencyDisplay::SYMBOL:
- return GetReadOnlyRoots().symbol_string_handle();
- case CurrencyDisplay::NAME:
- return GetReadOnlyRoots().name_string_handle();
- case CurrencyDisplay::COUNT:
- UNREACHABLE();
- }
-}
-
namespace {
Maybe<icu::UnicodeString> IcuFormatNumber(
- Isolate* isolate, const icu::NumberFormat& number_format,
+ Isolate* isolate,
+ const icu::number::LocalizedNumberFormatter& number_format,
Handle<Object> numeric_obj, icu::FieldPositionIterator* fp_iter) {
- icu::UnicodeString result;
// If it is BigInt, handle it differently.
UErrorCode status = U_ZERO_ERROR;
+ icu::number::FormattedNumber formatted;
if (numeric_obj->IsBigInt()) {
Handle<BigInt> big_int = Handle<BigInt>::cast(numeric_obj);
Handle<String> big_int_string;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, big_int_string,
BigInt::ToString(isolate, big_int),
Nothing<icu::UnicodeString>());
- number_format.format(
- {big_int_string->ToCString().get(), big_int_string->length()}, result,
- fp_iter, status);
+ formatted = number_format.formatDecimal(
+ {big_int_string->ToCString().get(), big_int_string->length()}, status);
} else {
double number = numeric_obj->Number();
- number_format.format(number, result, fp_iter, status);
+ formatted = number_format.formatDouble(number, status);
}
if (U_FAILURE(status)) {
+ // This happen because of icu data trimming trim out "unit".
+ // See https://bugs.chromium.org/p/v8/issues/detail?id=8641
+ THROW_NEW_ERROR_RETURN_VALUE(isolate,
+ NewTypeError(MessageTemplate::kIcuError),
+ Nothing<icu::UnicodeString>());
+ }
+ if (fp_iter) {
+ formatted.getAllFieldPositions(*fp_iter, status);
+ }
+ icu::UnicodeString result = formatted.toString(status);
+ if (U_FAILURE(status)) {
THROW_NEW_ERROR_RETURN_VALUE(isolate,
NewTypeError(MessageTemplate::kIcuError),
Nothing<icu::UnicodeString>());
@@ -529,17 +1293,15 @@ Maybe<icu::UnicodeString> IcuFormatNumber(
} // namespace
MaybeHandle<String> JSNumberFormat::FormatNumeric(
- Isolate* isolate, const icu::NumberFormat& number_format,
+ Isolate* isolate,
+ const icu::number::LocalizedNumberFormatter& number_format,
Handle<Object> numeric_obj) {
DCHECK(numeric_obj->IsNumeric());
Maybe<icu::UnicodeString> maybe_format =
IcuFormatNumber(isolate, number_format, numeric_obj, nullptr);
MAYBE_RETURN(maybe_format, Handle<String>());
- icu::UnicodeString result = maybe_format.FromJust();
-
- return isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length()));
+ return Intl::ToString(isolate, maybe_format.FromJust());
}
namespace {
@@ -651,19 +1413,12 @@ std::vector<NumberFormatSpan> FlattenRegionsToParts(
return out_parts;
}
-Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
- Handle<JSArray> result,
- int start_index,
- const icu::NumberFormat& number_format,
- Handle<Object> numeric_obj,
- Handle<String> unit) {
+namespace {
+Maybe<int> ConstructParts(Isolate* isolate, const icu::UnicodeString& formatted,
+ icu::FieldPositionIterator* fp_iter,
+ Handle<JSArray> result, int start_index,
+ Handle<Object> numeric_obj, Handle<String> unit) {
DCHECK(numeric_obj->IsNumeric());
- icu::FieldPositionIterator fp_iter;
- Maybe<icu::UnicodeString> maybe_format =
- IcuFormatNumber(isolate, number_format, numeric_obj, &fp_iter);
- MAYBE_RETURN(maybe_format, Nothing<int>());
- icu::UnicodeString formatted = maybe_format.FromJust();
-
int32_t length = formatted.length();
int index = start_index;
if (length == 0) return Just(index);
@@ -677,7 +1432,7 @@ Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
{
icu::FieldPosition fp;
- while (fp_iter.next(fp)) {
+ while (fp_iter->next(fp)) {
regions.push_back(NumberFormatSpan(fp.getField(), fp.getBeginIndex(),
fp.getEndIndex()));
}
@@ -708,18 +1463,26 @@ Maybe<int> JSNumberFormat::FormatToParts(Isolate* isolate,
return Just(index);
}
+} // namespace
+
MaybeHandle<JSArray> JSNumberFormat::FormatToParts(
Isolate* isolate, Handle<JSNumberFormat> number_format,
Handle<Object> numeric_obj) {
CHECK(numeric_obj->IsNumeric());
Factory* factory = isolate->factory();
- icu::NumberFormat* fmt = number_format->icu_number_format()->raw();
+ icu::number::LocalizedNumberFormatter* fmt =
+ number_format->icu_number_formatter().raw();
CHECK_NOT_NULL(fmt);
- Handle<JSArray> result = factory->NewJSArray(0);
+ icu::FieldPositionIterator fp_iter;
+ Maybe<icu::UnicodeString> maybe_format =
+ IcuFormatNumber(isolate, *fmt, numeric_obj, &fp_iter);
+ MAYBE_RETURN(maybe_format, Handle<JSArray>());
- Maybe<int> maybe_format_to_parts = JSNumberFormat::FormatToParts(
- isolate, result, 0, *fmt, numeric_obj, Handle<String>());
+ Handle<JSArray> result = factory->NewJSArray(0);
+ Maybe<int> maybe_format_to_parts =
+ ConstructParts(isolate, maybe_format.FromJust(), &fp_iter, result, 0,
+ numeric_obj, Handle<String>());
MAYBE_RETURN(maybe_format_to_parts, Handle<JSArray>());
return result;
diff --git a/deps/v8/src/objects/js-number-format.h b/deps/v8/src/objects/js-number-format.h
index 6857989c22..6c59e76f7a 100644
--- a/deps/v8/src/objects/js-number-format.h
+++ b/deps/v8/src/objects/js-number-format.h
@@ -12,17 +12,19 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
+#include "unicode/numberformatter.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace U_ICU_NAMESPACE {
class NumberFormat;
+class UnicodeString;
} // namespace U_ICU_NAMESPACE
namespace v8 {
@@ -47,86 +49,55 @@ class JSNumberFormat : public JSObject {
Isolate* isolate, Handle<JSNumberFormat> number_format,
Handle<Object> numeric_obj);
- // A utility function used by the above JSNumberFormat::FormatToParts()
- // and JSRelativeTimeFormat::FormatToParts().
- // Format the number by using the icu::NumberFormat to get the field
- // information. It add an object into the result array, starting from the
- // start_index and return the total number of elements in the result array.
- // For each object added as element, it set the substring of the field as
- // "value", the field type as "type". If the unit is not null, it also set
- // unit as "unit" to each added object.
- V8_WARN_UNUSED_RESULT static Maybe<int> FormatToParts(
- Isolate* isolate, Handle<JSArray> result, int start_index,
- const icu::NumberFormat& fmt, Handle<Object> numeric_obj,
- Handle<String> unit);
-
V8_WARN_UNUSED_RESULT static MaybeHandle<String> FormatNumeric(
- Isolate* isolate, const icu::NumberFormat& number_format,
+ Isolate* isolate,
+ const icu::number::LocalizedNumberFormatter& number_format,
Handle<Object> numeric_obj);
V8_EXPORT_PRIVATE static const std::set<std::string>& GetAvailableLocales();
- Handle<String> StyleAsString() const;
- Handle<String> CurrencyDisplayAsString() const;
-
DECL_CAST(JSNumberFormat)
DECL_PRINTER(JSNumberFormat)
DECL_VERIFIER(JSNumberFormat)
- // [[Style]] is one of the values "decimal", "percent" or "currency",
- // identifying the style of the number format.
- enum class Style {
- DECIMAL,
- PERCENT,
- CURRENCY,
-
- COUNT
- };
- inline void set_style(Style style);
- inline Style style() const;
-
- // [[CurrencyDisplay]] is one of the values "code", "symbol" or "name",
- // identifying the display of the currency number format.
- enum class CurrencyDisplay {
- CODE,
- SYMBOL,
- NAME,
-
- COUNT
- };
- inline void set_currency_display(CurrencyDisplay currency_display);
- inline CurrencyDisplay currency_display() const;
-
-// Layout description.
-#define JS_NUMBER_FORMAT_FIELDS(V) \
- V(kLocaleOffset, kTaggedSize) \
- V(kICUNumberFormatOffset, kTaggedSize) \
- V(kBoundFormatOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_NUMBER_FORMAT_FIELDS)
-#undef JS_NUMBER_FORMAT_FIELDS
+ // Current ECMA 402 spec mandates to record (Min|Max)imumFractionDigits
+ // unconditionally while the unified number proposal eventually will only
+ // record either (Min|Max)imumFractionDigits or (Min|Max)imumSignaficantDigits
+ // Since LocalizedNumberFormatter can only remember one set, and during
+ // 2019-1-17 ECMA402 meeting that the committee decide not to take a PR to
+ // address that prior to the unified number proposal, we have to add these two
+ // 5 bits int into flags to remember the (Min|Max)imumFractionDigits while
+ // (Min|Max)imumSignaficantDigits is present.
+ // TODO(ftang) remove the following once we ship int-number-format-unified
+ // * Four inline functions: (set_)?(min|max)imum_fraction_digits
+ // * kFlagsOffset
+ // * #define FLAGS_BIT_FIELDS
+ // * DECL_INT_ACCESSORS(flags)
+
+ inline int minimum_fraction_digits() const;
+ inline void set_minimum_fraction_digits(int digits);
+
+ inline int maximum_fraction_digits() const;
+ inline void set_maximum_fraction_digits(int digits);
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSNUMBER_FORMAT_FIELDS)
// Bit positions in |flags|.
-#define FLAGS_BIT_FIELDS(V, _) \
- V(StyleBits, Style, 2, _) \
- V(CurrencyDisplayBits, CurrencyDisplay, 2, _)
+#define FLAGS_BIT_FIELDS(V, _) \
+ V(MinimumFractionDigitsBits, int, 5, _) \
+ V(MaximumFractionDigitsBits, int, 5, _)
DEFINE_BIT_FIELDS(FLAGS_BIT_FIELDS)
#undef FLAGS_BIT_FIELDS
- STATIC_ASSERT(Style::DECIMAL <= StyleBits::kMax);
- STATIC_ASSERT(Style::PERCENT <= StyleBits::kMax);
- STATIC_ASSERT(Style::CURRENCY <= StyleBits::kMax);
-
- STATIC_ASSERT(CurrencyDisplay::CODE <= CurrencyDisplayBits::kMax);
- STATIC_ASSERT(CurrencyDisplay::SYMBOL <= CurrencyDisplayBits::kMax);
- STATIC_ASSERT(CurrencyDisplay::NAME <= CurrencyDisplayBits::kMax);
+ STATIC_ASSERT(20 <= MinimumFractionDigitsBits::kMax);
+ STATIC_ASSERT(20 <= MaximumFractionDigitsBits::kMax);
DECL_ACCESSORS(locale, String)
- DECL_ACCESSORS(icu_number_format, Managed<icu::NumberFormat>)
+ DECL_ACCESSORS(icu_number_formatter,
+ Managed<icu::number::LocalizedNumberFormatter>)
DECL_ACCESSORS(bound_format, Object)
DECL_INT_ACCESSORS(flags)
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index bf7076b517..6b7a7d72f0 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -7,20 +7,20 @@
#include "src/objects/js-objects.h"
-#include "src/feedback-vector.h"
-#include "src/field-index-inl.h"
#include "src/heap/heap-write-barrier.h"
-#include "src/keys.h"
-#include "src/lookup-inl.h"
#include "src/objects/embedder-data-slot-inl.h"
#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/field-index-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/keys.h"
+#include "src/objects/lookup-inl.h"
#include "src/objects/property-array-inl.h"
+#include "src/objects/prototype-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots.h"
#include "src/objects/smi-inl.h"
-#include "src/prototype-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -109,21 +109,21 @@ V8_WARN_UNUSED_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject object) {
DisallowHeapAllocation no_gc;
- HeapObject prototype = HeapObject::cast(object->map()->prototype());
+ HeapObject prototype = HeapObject::cast(object.map().prototype());
ReadOnlyRoots roots(isolate);
HeapObject null = roots.null_value();
FixedArrayBase empty_fixed_array = roots.empty_fixed_array();
FixedArrayBase empty_slow_element_dictionary =
roots.empty_slow_element_dictionary();
while (prototype != null) {
- Map map = prototype->map();
- if (map->IsCustomElementsReceiverMap()) return false;
- FixedArrayBase elements = JSObject::cast(prototype)->elements();
+ Map map = prototype.map();
+ if (map.IsCustomElementsReceiverMap()) return false;
+ FixedArrayBase elements = JSObject::cast(prototype).elements();
if (elements != empty_fixed_array &&
elements != empty_slow_element_dictionary) {
return false;
}
- prototype = HeapObject::cast(map->prototype());
+ prototype = HeapObject::cast(map.prototype());
}
return true;
}
@@ -137,7 +137,7 @@ FixedArrayBase JSObject::elements() const {
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
JSObject::ValidateElements(*object);
- ElementsKind elements_kind = object->map()->elements_kind();
+ ElementsKind elements_kind = object->map().elements_kind();
if (!IsObjectElementsKind(elements_kind)) {
if (IsHoleyElementsKind(elements_kind)) {
TransitionElementsKind(object, HOLEY_ELEMENTS);
@@ -167,8 +167,8 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object, TSlot objects,
if (current == the_hole) {
is_holey = true;
target_kind = GetHoleyElementsKind(target_kind);
- } else if (!current->IsSmi()) {
- if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
+ } else if (!current.IsSmi()) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current.IsNumber()) {
if (IsSmiElementsKind(target_kind)) {
if (is_holey) {
target_kind = HOLEY_DOUBLE_ELEMENTS;
@@ -226,13 +226,13 @@ void JSObject::EnsureCanContainElements(Handle<JSObject> object,
void JSObject::SetMapAndElements(Handle<JSObject> object, Handle<Map> new_map,
Handle<FixedArrayBase> value) {
JSObject::MigrateToMap(object, new_map);
- DCHECK((object->map()->has_fast_smi_or_object_elements() ||
+ DCHECK((object->map().has_fast_smi_or_object_elements() ||
(*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
- object->map()->has_fast_string_wrapper_elements()) ==
+ object->map().has_fast_string_wrapper_elements()) ==
(value->map() == object->GetReadOnlyRoots().fixed_array_map() ||
value->map() == object->GetReadOnlyRoots().fixed_cow_array_map()));
DCHECK((*value == object->GetReadOnlyRoots().empty_fixed_array()) ||
- (object->map()->has_fast_double_elements() ==
+ (object->map().has_fast_double_elements() ==
value->IsFixedDoubleArray()));
object->set_elements(*value);
}
@@ -243,16 +243,16 @@ void JSObject::set_elements(FixedArrayBase value, WriteBarrierMode mode) {
}
void JSObject::initialize_elements() {
- FixedArrayBase elements = map()->GetInitialElements();
+ FixedArrayBase elements = map().GetInitialElements();
WRITE_FIELD(*this, kElementsOffset, elements);
}
InterceptorInfo JSObject::GetIndexedInterceptor() {
- return map()->GetIndexedInterceptor();
+ return map().GetIndexedInterceptor();
}
InterceptorInfo JSObject::GetNamedInterceptor() {
- return map()->GetNamedInterceptor();
+ return map().GetNamedInterceptor();
}
int JSObject::GetHeaderSize() const { return GetHeaderSize(map()); }
@@ -261,10 +261,10 @@ int JSObject::GetHeaderSize(const Map map) {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
- InstanceType instance_type = map->instance_type();
+ InstanceType instance_type = map.instance_type();
return instance_type == JS_OBJECT_TYPE
? JSObject::kHeaderSize
- : GetHeaderSize(instance_type, map->has_prototype_slot());
+ : GetHeaderSize(instance_type, map.has_prototype_slot());
}
// static
@@ -279,7 +279,7 @@ int JSObject::GetEmbedderFieldsStartOffset() {
// static
int JSObject::GetEmbedderFieldCount(const Map map) {
- int instance_size = map->instance_size();
+ int instance_size = map.instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
// Embedder fields are located after the object header, whereas in-object
// properties are located at the end of the object. We don't have to round up
@@ -288,7 +288,7 @@ int JSObject::GetEmbedderFieldCount(const Map map) {
// kSystemPointerSize) anyway.
return (((instance_size - GetEmbedderFieldsStartOffset(map)) >>
kTaggedSizeLog2) -
- map->GetInObjectProperties()) /
+ map.GetInObjectProperties()) /
kEmbedderDataSlotSizeInTaggedSlots;
}
@@ -316,7 +316,7 @@ void JSObject::SetEmbedderField(int index, Smi value) {
bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
if (!FLAG_unbox_double_fields) return false;
- return map()->IsUnboxedDoubleField(index);
+ return map().IsUnboxedDoubleField(index);
}
// Access fast-case object properties at index. The use of these routines
@@ -327,27 +327,35 @@ Object JSObject::RawFastPropertyAt(FieldIndex index) {
if (index.is_inobject()) {
return READ_FIELD(*this, index.offset());
} else {
- return property_array()->get(index.outobject_array_index());
+ return property_array().get(index.outobject_array_index());
}
}
double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
DCHECK(IsUnboxedDoubleField(index));
- return READ_DOUBLE_FIELD(*this, index.offset());
+ return ReadField<double>(index.offset());
}
uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
DCHECK(IsUnboxedDoubleField(index));
- return READ_UINT64_FIELD(*this, index.offset());
+ return ReadField<uint64_t>(index.offset());
}
-void JSObject::RawFastPropertyAtPut(FieldIndex index, Object value) {
+void JSObject::RawFastInobjectPropertyAtPut(FieldIndex index, Object value,
+ WriteBarrierMode mode) {
+ DCHECK(index.is_inobject());
+ int offset = index.offset();
+ WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void JSObject::RawFastPropertyAtPut(FieldIndex index, Object value,
+ WriteBarrierMode mode) {
if (index.is_inobject()) {
- int offset = index.offset();
- WRITE_FIELD(*this, offset, value);
- WRITE_BARRIER(*this, offset, value);
+ RawFastInobjectPropertyAtPut(index, value, mode);
} else {
- property_array()->set(index.outobject_array_index(), value);
+ DCHECK_EQ(UPDATE_WRITE_BARRIER, mode);
+ property_array().set(index.outobject_array_index(), value);
}
}
@@ -363,10 +371,10 @@ void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
void JSObject::FastPropertyAtPut(FieldIndex index, Object value) {
if (IsUnboxedDoubleField(index)) {
- DCHECK(value->IsMutableHeapNumber());
+ DCHECK(value.IsMutableHeapNumber());
// Ensure that all bits of the double value are preserved.
RawFastDoublePropertyAsBitsAtPut(
- index, MutableHeapNumber::cast(value)->value_as_bits());
+ index, MutableHeapNumber::cast(value).value_as_bits());
} else {
RawFastPropertyAtPut(index, value);
}
@@ -379,26 +387,24 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
DisallowHeapAllocation no_gc;
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
if (details.representation().IsDouble()) {
- // Nothing more to be done.
- if (value->IsUninitialized()) {
- return;
- }
// Manipulating the signaling NaN used for the hole and uninitialized
// double field sentinel in C++, e.g. with bit_cast or value()/set_value(),
// will change its value on ia32 (the x87 stack is used to return values
// and stores to the stack silently clear the signalling bit).
uint64_t bits;
- if (value->IsSmi()) {
+ if (value.IsSmi()) {
bits = bit_cast<uint64_t>(static_cast<double>(Smi::ToInt(value)));
+ } else if (value.IsUninitialized()) {
+ bits = kHoleNanInt64;
} else {
- DCHECK(value->IsHeapNumber());
- bits = HeapNumber::cast(value)->value_as_bits();
+ DCHECK(value.IsHeapNumber());
+ bits = HeapNumber::cast(value).value_as_bits();
}
if (IsUnboxedDoubleField(index)) {
RawFastDoublePropertyAsBitsAtPut(index, bits);
} else {
auto box = MutableHeapNumber::cast(RawFastPropertyAt(index));
- box->set_value_as_bits(bits);
+ box.set_value_as_bits(bits);
}
} else {
RawFastPropertyAtPut(index, value);
@@ -406,7 +412,7 @@ void JSObject::WriteToField(int descriptor, PropertyDetails details,
}
int JSObject::GetInObjectPropertyOffset(int index) {
- return map()->GetInObjectPropertyOffset(index);
+ return map().GetInObjectPropertyOffset(index);
}
Object JSObject::InObjectPropertyAt(int index) {
@@ -425,15 +431,15 @@ Object JSObject::InObjectPropertyAtPut(int index, Object value,
void JSObject::InitializeBody(Map map, int start_offset,
Object pre_allocated_value, Object filler_value) {
- DCHECK_IMPLIES(filler_value->IsHeapObject(),
+ DCHECK_IMPLIES(filler_value.IsHeapObject(),
!ObjectInYoungGeneration(filler_value));
- DCHECK_IMPLIES(pre_allocated_value->IsHeapObject(),
+ DCHECK_IMPLIES(pre_allocated_value.IsHeapObject(),
!ObjectInYoungGeneration(pre_allocated_value));
- int size = map->instance_size();
+ int size = map.instance_size();
int offset = start_offset;
if (filler_value != pre_allocated_value) {
int end_of_pre_allocated_offset =
- size - (map->UnusedPropertyFields() * kTaggedSize);
+ size - (map.UnusedPropertyFields() * kTaggedSize);
DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
while (offset < end_of_pre_allocated_offset) {
WRITE_FIELD(*this, offset, pre_allocated_value);
@@ -464,12 +470,12 @@ ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
FeedbackVector JSFunction::feedback_vector() const {
DCHECK(has_feedback_vector());
- return FeedbackVector::cast(raw_feedback_cell()->value());
+ return FeedbackVector::cast(raw_feedback_cell().value());
}
ClosureFeedbackCellArray JSFunction::closure_feedback_cell_array() const {
DCHECK(has_closure_feedback_cell_array());
- return ClosureFeedbackCellArray::cast(raw_feedback_cell()->value());
+ return ClosureFeedbackCellArray::cast(raw_feedback_cell().value());
}
// Code objects that are marked for deoptimization are not considered to be
@@ -479,69 +485,69 @@ ClosureFeedbackCellArray JSFunction::closure_feedback_cell_array() const {
// TODO(jupvfranco): rename this function. Maybe RunOptimizedCode,
// or IsValidOptimizedCode.
bool JSFunction::IsOptimized() {
- return is_compiled() && code()->kind() == Code::OPTIMIZED_FUNCTION &&
- !code()->marked_for_deoptimization();
+ return is_compiled() && code().kind() == Code::OPTIMIZED_FUNCTION &&
+ !code().marked_for_deoptimization();
}
bool JSFunction::HasOptimizedCode() {
return IsOptimized() ||
- (has_feedback_vector() && feedback_vector()->has_optimized_code() &&
- !feedback_vector()->optimized_code()->marked_for_deoptimization());
+ (has_feedback_vector() && feedback_vector().has_optimized_code() &&
+ !feedback_vector().optimized_code().marked_for_deoptimization());
}
bool JSFunction::HasOptimizationMarker() {
- return has_feedback_vector() && feedback_vector()->has_optimization_marker();
+ return has_feedback_vector() && feedback_vector().has_optimization_marker();
}
void JSFunction::ClearOptimizationMarker() {
DCHECK(has_feedback_vector());
- feedback_vector()->ClearOptimizationMarker();
+ feedback_vector().ClearOptimizationMarker();
}
// Optimized code marked for deoptimization will tier back down to running
// interpreted on its next activation, and already doesn't count as IsOptimized.
bool JSFunction::IsInterpreted() {
- return is_compiled() && (code()->is_interpreter_trampoline_builtin() ||
- (code()->kind() == Code::OPTIMIZED_FUNCTION &&
- code()->marked_for_deoptimization()));
+ return is_compiled() && (code().is_interpreter_trampoline_builtin() ||
+ (code().kind() == Code::OPTIMIZED_FUNCTION &&
+ code().marked_for_deoptimization()));
}
bool JSFunction::ChecksOptimizationMarker() {
- return code()->checks_optimization_marker();
+ return code().checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
- return has_feedback_vector() && feedback_vector()->optimization_marker() ==
+ return has_feedback_vector() && feedback_vector().optimization_marker() ==
OptimizationMarker::kCompileOptimized;
}
bool JSFunction::IsMarkedForConcurrentOptimization() {
return has_feedback_vector() &&
- feedback_vector()->optimization_marker() ==
+ feedback_vector().optimization_marker() ==
OptimizationMarker::kCompileOptimizedConcurrent;
}
bool JSFunction::IsInOptimizationQueue() {
- return has_feedback_vector() && feedback_vector()->optimization_marker() ==
+ return has_feedback_vector() && feedback_vector().optimization_marker() ==
OptimizationMarker::kInOptimizationQueue;
}
void JSFunction::CompleteInobjectSlackTrackingIfActive() {
if (!has_prototype_slot()) return;
- if (has_initial_map() && initial_map()->IsInobjectSlackTrackingInProgress()) {
- initial_map()->CompleteInobjectSlackTracking(GetIsolate());
+ if (has_initial_map() && initial_map().IsInobjectSlackTrackingInProgress()) {
+ initial_map().CompleteInobjectSlackTracking(GetIsolate());
}
}
AbstractCode JSFunction::abstract_code() {
if (IsInterpreted()) {
- return AbstractCode::cast(shared()->GetBytecodeArray());
+ return AbstractCode::cast(shared().GetBytecodeArray());
} else {
return AbstractCode::cast(code());
}
}
-int JSFunction::length() { return shared()->length(); }
+int JSFunction::length() { return shared().length(); }
Code JSFunction::code() const {
return Code::cast(RELAXED_READ_FIELD(*this, kCodeOffset));
@@ -570,14 +576,14 @@ void JSFunction::set_shared(SharedFunctionInfo value, WriteBarrierMode mode) {
}
void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
- if (has_feedback_vector() && feedback_vector()->has_optimized_code()) {
+ if (has_feedback_vector() && feedback_vector().has_optimized_code()) {
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code feedback slot (%s) for ",
reason);
ShortPrint();
PrintF("]\n");
}
- feedback_vector()->ClearOptimizedCode();
+ feedback_vector().ClearOptimizedCode();
}
}
@@ -586,17 +592,17 @@ void JSFunction::SetOptimizationMarker(OptimizationMarker marker) {
DCHECK(ChecksOptimizationMarker());
DCHECK(!HasOptimizedCode());
- feedback_vector()->SetOptimizationMarker(marker);
+ feedback_vector().SetOptimizationMarker(marker);
}
bool JSFunction::has_feedback_vector() const {
- return shared()->is_compiled() &&
- raw_feedback_cell()->value()->IsFeedbackVector();
+ return shared().is_compiled() &&
+ raw_feedback_cell().value().IsFeedbackVector();
}
bool JSFunction::has_closure_feedback_cell_array() const {
- return shared()->is_compiled() &&
- raw_feedback_cell()->value()->IsClosureFeedbackCellArray();
+ return shared().is_compiled() &&
+ raw_feedback_cell().value().IsClosureFeedbackCellArray();
}
Context JSFunction::context() {
@@ -604,57 +610,57 @@ Context JSFunction::context() {
}
bool JSFunction::has_context() const {
- return READ_FIELD(*this, kContextOffset)->IsContext();
+ return READ_FIELD(*this, kContextOffset).IsContext();
}
-JSGlobalProxy JSFunction::global_proxy() { return context()->global_proxy(); }
+JSGlobalProxy JSFunction::global_proxy() { return context().global_proxy(); }
NativeContext JSFunction::native_context() {
- return context()->native_context();
+ return context().native_context();
}
void JSFunction::set_context(Object value) {
- DCHECK(value->IsUndefined() || value->IsContext());
+ DCHECK(value.IsUndefined() || value.IsContext());
WRITE_FIELD(*this, kContextOffset, value);
WRITE_BARRIER(*this, kContextOffset, value);
}
ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, Object,
- kPrototypeOrInitialMapOffset, map()->has_prototype_slot())
+ kPrototypeOrInitialMapOffset, map().has_prototype_slot())
bool JSFunction::has_prototype_slot() const {
- return map()->has_prototype_slot();
+ return map().has_prototype_slot();
}
Map JSFunction::initial_map() { return Map::cast(prototype_or_initial_map()); }
bool JSFunction::has_initial_map() {
DCHECK(has_prototype_slot());
- return prototype_or_initial_map()->IsMap();
+ return prototype_or_initial_map().IsMap();
}
bool JSFunction::has_instance_prototype() {
DCHECK(has_prototype_slot());
- return has_initial_map() || !prototype_or_initial_map()->IsTheHole();
+ return has_initial_map() || !prototype_or_initial_map().IsTheHole();
}
bool JSFunction::has_prototype() {
DCHECK(has_prototype_slot());
- return map()->has_non_instance_prototype() || has_instance_prototype();
+ return map().has_non_instance_prototype() || has_instance_prototype();
}
bool JSFunction::has_prototype_property() {
return (has_prototype_slot() && IsConstructor()) ||
- IsGeneratorFunction(shared()->kind());
+ IsGeneratorFunction(shared().kind());
}
bool JSFunction::PrototypeRequiresRuntimeLookup() {
- return !has_prototype_property() || map()->has_non_instance_prototype();
+ return !has_prototype_property() || map().has_non_instance_prototype();
}
HeapObject JSFunction::instance_prototype() {
DCHECK(has_instance_prototype());
- if (has_initial_map()) return initial_map()->prototype();
+ if (has_initial_map()) return initial_map().prototype();
// When there is no initial map and the prototype is a JSReceiver, the
// initial map field is used for the prototype field.
return HeapObject::cast(prototype_or_initial_map());
@@ -664,19 +670,19 @@ Object JSFunction::prototype() {
DCHECK(has_prototype());
// If the function's prototype property has been set to a non-JSReceiver
// value, that value is stored in the constructor field of the map.
- if (map()->has_non_instance_prototype()) {
- Object prototype = map()->GetConstructor();
+ if (map().has_non_instance_prototype()) {
+ Object prototype = map().GetConstructor();
// The map must have a prototype in that field, not a back pointer.
- DCHECK(!prototype->IsMap());
- DCHECK(!prototype->IsFunctionTemplateInfo());
+ DCHECK(!prototype.IsMap());
+ DCHECK(!prototype.IsFunctionTemplateInfo());
return prototype;
}
return instance_prototype();
}
bool JSFunction::is_compiled() const {
- return code()->builtin_index() != Builtins::kCompileLazy &&
- shared()->is_compiled();
+ return code().builtin_index() != Builtins::kCompileLazy &&
+ shared().is_compiled();
}
bool JSFunction::NeedsResetDueToFlushedBytecode() {
@@ -686,14 +692,14 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
Object maybe_shared = ACQUIRE_READ_FIELD(*this, kSharedFunctionInfoOffset);
Object maybe_code = RELAXED_READ_FIELD(*this, kCodeOffset);
- if (!maybe_shared->IsSharedFunctionInfo() || !maybe_code->IsCode()) {
+ if (!maybe_shared.IsSharedFunctionInfo() || !maybe_code.IsCode()) {
return false;
}
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
Code code = Code::cast(maybe_code);
- return !shared->is_compiled() &&
- code->builtin_index() != Builtins::kCompileLazy;
+ return !shared.is_compiled() &&
+ code.builtin_index() != Builtins::kCompileLazy;
}
void JSFunction::ResetIfBytecodeFlushed() {
@@ -701,8 +707,7 @@ void JSFunction::ResetIfBytecodeFlushed() {
// Bytecode was flushed and function is now uncompiled, reset JSFunction
// by setting code to CompileLazy and clearing the feedback vector.
set_code(GetIsolate()->builtins()->builtin(i::Builtins::kCompileLazy));
- raw_feedback_cell()->set_value(
- ReadOnlyRoots(GetIsolate()).undefined_value());
+ raw_feedback_cell().reset();
}
}
@@ -718,22 +723,40 @@ ACCESSORS(JSDate, hour, Object, kHourOffset)
ACCESSORS(JSDate, min, Object, kMinOffset)
ACCESSORS(JSDate, sec, Object, kSecOffset)
+bool JSMessageObject::DidEnsureSourcePositionsAvailable() const {
+ return shared_info().IsUndefined();
+}
+
+int JSMessageObject::GetStartPosition() const {
+ DCHECK(DidEnsureSourcePositionsAvailable());
+ return start_position();
+}
+
+int JSMessageObject::GetEndPosition() const {
+ DCHECK(DidEnsureSourcePositionsAvailable());
+ return end_position();
+}
+
MessageTemplate JSMessageObject::type() const {
Object value = READ_FIELD(*this, kMessageTypeOffset);
return MessageTemplateFromInt(Smi::ToInt(value));
}
+
void JSMessageObject::set_type(MessageTemplate value) {
WRITE_FIELD(*this, kMessageTypeOffset, Smi::FromInt(static_cast<int>(value)));
}
+
ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
+ACCESSORS(JSMessageObject, shared_info, HeapObject, kSharedInfoOffset)
+ACCESSORS(JSMessageObject, bytecode_offset, Smi, kBytecodeOffsetOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
ElementsKind JSObject::GetElementsKind() const {
- ElementsKind kind = map()->elements_kind();
+ ElementsKind kind = map().elements_kind();
#if VERIFY_HEAP && DEBUG
FixedArrayBase fixed_array =
FixedArrayBase::unchecked_cast(READ_FIELD(*this, kElementsOffset));
@@ -741,22 +764,21 @@ ElementsKind JSObject::GetElementsKind() const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
- Map map = fixed_array->map();
+ Map map = fixed_array.map();
if (IsSmiOrObjectElementsKind(kind)) {
DCHECK(map == GetReadOnlyRoots().fixed_array_map() ||
map == GetReadOnlyRoots().fixed_cow_array_map());
} else if (IsDoubleElementsKind(kind)) {
- DCHECK(fixed_array->IsFixedDoubleArray() ||
+ DCHECK(fixed_array.IsFixedDoubleArray() ||
fixed_array == GetReadOnlyRoots().empty_fixed_array());
} else if (kind == DICTIONARY_ELEMENTS) {
- DCHECK(fixed_array->IsFixedArray());
- DCHECK(fixed_array->IsDictionary());
+ DCHECK(fixed_array.IsFixedArray());
+ DCHECK(fixed_array.IsNumberDictionary());
} else {
- DCHECK(kind > DICTIONARY_ELEMENTS ||
- IsPackedFrozenOrSealedElementsKind(kind));
+ DCHECK(kind > DICTIONARY_ELEMENTS || IsFrozenOrSealedElementsKind(kind));
}
DCHECK(!IsSloppyArgumentsElementsKind(kind) ||
- (elements()->IsFixedArray() && elements()->length() >= 2));
+ (elements().IsFixedArray() && elements().length() >= 2));
}
#endif
return kind;
@@ -797,7 +819,11 @@ bool JSObject::HasPackedElements() {
}
bool JSObject::HasFrozenOrSealedElements() {
- return IsPackedFrozenOrSealedElementsKind(GetElementsKind());
+ return IsFrozenOrSealedElementsKind(GetElementsKind());
+}
+
+bool JSObject::HasSealedElements() {
+ return IsSealedElementsKind(GetElementsKind());
}
bool JSObject::HasFastArgumentsElements() {
@@ -824,25 +850,24 @@ bool JSObject::HasSlowStringWrapperElements() {
return GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS;
}
-bool JSObject::HasFixedTypedArrayElements() {
+bool JSObject::HasTypedArrayElements() {
DCHECK(!elements().is_null());
- return map()->has_fixed_typed_array_elements();
+ return map().has_typed_array_elements();
}
-#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
- bool JSObject::HasFixed##Type##Elements() { \
- FixedArrayBase array = elements(); \
- return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype) \
+ bool JSObject::HasFixed##Type##Elements() { \
+ return map().elements_kind() == TYPE##_ELEMENTS; \
}
TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
#undef FIXED_TYPED_ELEMENTS_CHECK
-bool JSObject::HasNamedInterceptor() { return map()->has_named_interceptor(); }
+bool JSObject::HasNamedInterceptor() { return map().has_named_interceptor(); }
bool JSObject::HasIndexedInterceptor() {
- return map()->has_indexed_interceptor();
+ return map().has_indexed_interceptor();
}
void JSGlobalObject::set_global_dictionary(GlobalDictionary dictionary) {
@@ -865,7 +890,7 @@ void JSReceiver::initialize_properties() {
ReadOnlyRoots roots = GetReadOnlyRoots();
DCHECK(!ObjectInYoungGeneration(roots.empty_fixed_array()));
DCHECK(!ObjectInYoungGeneration(roots.empty_property_dictionary()));
- if (map()->is_dictionary_map()) {
+ if (map().is_dictionary_map()) {
WRITE_FIELD(*this, kPropertiesOrHashOffset,
roots.empty_property_dictionary());
} else {
@@ -874,10 +899,11 @@ void JSReceiver::initialize_properties() {
}
bool JSReceiver::HasFastProperties() const {
- DCHECK(
- raw_properties_or_hash()->IsSmi() ||
- (raw_properties_or_hash()->IsDictionary() == map()->is_dictionary_map()));
- return !map()->is_dictionary_map();
+ DCHECK(raw_properties_or_hash().IsSmi() ||
+ ((raw_properties_or_hash().IsGlobalDictionary() ||
+ raw_properties_or_hash().IsNameDictionary()) ==
+ map().is_dictionary_map()));
+ return !map().is_dictionary_map();
}
NameDictionary JSReceiver::property_dictionary() const {
@@ -885,7 +911,7 @@ NameDictionary JSReceiver::property_dictionary() const {
DCHECK(!HasFastProperties());
Object prop = raw_properties_or_hash();
- if (prop->IsSmi()) {
+ if (prop.IsSmi()) {
return GetReadOnlyRoots().empty_property_dictionary();
}
@@ -898,7 +924,7 @@ PropertyArray JSReceiver::property_array() const {
DCHECK(HasFastProperties());
Object prop = raw_properties_or_hash();
- if (prop->IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
+ if (prop.IsSmi() || prop == GetReadOnlyRoots().empty_fixed_array()) {
return GetReadOnlyRoots().empty_property_array();
}
@@ -969,7 +995,7 @@ Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttributes(
}
bool JSGlobalObject::IsDetached() {
- return global_proxy()->IsDetachedFrom(*this);
+ return global_proxy().IsDetachedFrom(*this);
}
bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject global) const {
@@ -992,6 +1018,17 @@ ACCESSORS(JSAsyncFromSyncIterator, next, Object, kNextOffset)
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
+// If the fast-case backing storage takes up much more memory than a dictionary
+// backing storage would, the object should have slow elements.
+// static
+static inline bool ShouldConvertToSlowElements(uint32_t used_elements,
+ uint32_t new_capacity) {
+ uint32_t size_threshold = NumberDictionary::kPreferFastElementsSizeFactor *
+ NumberDictionary::ComputeCapacity(used_elements) *
+ NumberDictionary::kEntrySize;
+ return size_threshold <= new_capacity;
+}
+
static inline bool ShouldConvertToSlowElements(JSObject object,
uint32_t capacity,
uint32_t index,
@@ -1011,13 +1048,8 @@ static inline bool ShouldConvertToSlowElements(JSObject object,
ObjectInYoungGeneration(object))) {
return false;
}
- // If the fast-case backing storage takes up much more memory than a
- // dictionary backing storage would, the object should have slow elements.
- int used_elements = object->GetFastElementsUsage();
- uint32_t size_threshold = NumberDictionary::kPreferFastElementsSizeFactor *
- NumberDictionary::ComputeCapacity(used_elements) *
- NumberDictionary::kEntrySize;
- return size_threshold <= *new_capacity;
+ return ShouldConvertToSlowElements(object.GetFastElementsUsage(),
+ *new_capacity);
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-objects.cc b/deps/v8/src/objects/js-objects.cc
index 5191f237f6..a0dc33909a 100644
--- a/deps/v8/src/objects/js-objects.cc
+++ b/deps/v8/src/objects/js-objects.cc
@@ -4,31 +4,31 @@
#include "src/objects/js-objects.h"
-#include "src/api-arguments-inl.h"
-#include "src/arguments.h"
-#include "src/bootstrapper.h"
-#include "src/compiler.h"
-#include "src/counters.h"
-#include "src/date.h"
-#include "src/elements.h"
-#include "src/field-type.h"
-#include "src/handles-inl.h"
+#include "src/api/api-arguments-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/date/date.h"
+#include "src/execution/arguments.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/maybe-handles.h"
#include "src/heap/heap-inl.h"
#include "src/ic/ic.h"
-#include "src/isolate.h"
-#include "src/layout-descriptor.h"
-#include "src/log.h"
-#include "src/lookup.h"
-#include "src/maybe-handles.h"
-#include "src/objects-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/dictionary.h"
+#include "src/objects/elements.h"
+#include "src/objects/field-type.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-array-buffer.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/layout-descriptor.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator.h"
#include "src/objects/js-collator.h"
@@ -57,15 +57,15 @@
#include "src/objects/module.h"
#include "src/objects/oddball.h"
#include "src/objects/property-cell.h"
+#include "src/objects/property-descriptor.h"
+#include "src/objects/property.h"
#include "src/objects/prototype-info.h"
+#include "src/objects/prototype.h"
#include "src/objects/shared-function-info.h"
-#include "src/ostreams.h"
-#include "src/property-descriptor.h"
-#include "src/property.h"
-#include "src/prototype.h"
-#include "src/string-builder-inl.h"
-#include "src/string-stream.h"
-#include "src/transitions.h"
+#include "src/objects/transitions.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-stream.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
@@ -191,19 +191,19 @@ V8_WARN_UNUSED_RESULT Maybe<bool> FastAssign(
// Non-empty strings are the only non-JSReceivers that need to be handled
// explicitly by Object.assign.
if (!source->IsJSReceiver()) {
- return Just(!source->IsString() || String::cast(*source)->length() == 0);
+ return Just(!source->IsString() || String::cast(*source).length() == 0);
}
// If the target is deprecated, the object will be updated on first store. If
// the source for that store equals the target, this will invalidate the
// cached representation of the source. Preventively upgrade the target.
// Do this on each iteration since any property load could cause deprecation.
- if (target->map()->is_deprecated()) {
+ if (target->map().is_deprecated()) {
JSObject::MigrateInstance(Handle<JSObject>::cast(target));
}
Isolate* isolate = target->GetIsolate();
- Handle<Map> map(JSReceiver::cast(*source)->map(), isolate);
+ Handle<Map> map(JSReceiver::cast(*source).map(), isolate);
if (!map->IsJSObjectMap()) return Just(false);
if (!map->OnlyHasSimpleProperties()) return Just(false);
@@ -348,7 +348,7 @@ String JSReceiver::class_name() {
if (IsJSArgumentsObject()) return roots.Arguments_string();
if (IsJSArray()) return roots.Array_string();
if (IsJSArrayBuffer()) {
- if (JSArrayBuffer::cast(*this)->is_shared()) {
+ if (JSArrayBuffer::cast(*this).is_shared()) {
return roots.SharedArrayBuffer_string();
}
return roots.ArrayBuffer_string();
@@ -360,45 +360,45 @@ String JSReceiver::class_name() {
if (IsJSMap()) return roots.Map_string();
if (IsJSMapIterator()) return roots.MapIterator_string();
if (IsJSProxy()) {
- return map()->is_callable() ? roots.Function_string()
- : roots.Object_string();
+ return map().is_callable() ? roots.Function_string()
+ : roots.Object_string();
}
if (IsJSRegExp()) return roots.RegExp_string();
if (IsJSSet()) return roots.Set_string();
if (IsJSSetIterator()) return roots.SetIterator_string();
if (IsJSTypedArray()) {
-#define SWITCH_KIND(Type, type, TYPE, ctype) \
- if (map()->elements_kind() == TYPE##_ELEMENTS) { \
- return roots.Type##Array_string(); \
+#define SWITCH_KIND(Type, type, TYPE, ctype) \
+ if (map().elements_kind() == TYPE##_ELEMENTS) { \
+ return roots.Type##Array_string(); \
}
TYPED_ARRAYS(SWITCH_KIND)
#undef SWITCH_KIND
}
if (IsJSValue()) {
- Object value = JSValue::cast(*this)->value();
- if (value->IsBoolean()) return roots.Boolean_string();
- if (value->IsString()) return roots.String_string();
- if (value->IsNumber()) return roots.Number_string();
- if (value->IsBigInt()) return roots.BigInt_string();
- if (value->IsSymbol()) return roots.Symbol_string();
- if (value->IsScript()) return roots.Script_string();
+ Object value = JSValue::cast(*this).value();
+ if (value.IsBoolean()) return roots.Boolean_string();
+ if (value.IsString()) return roots.String_string();
+ if (value.IsNumber()) return roots.Number_string();
+ if (value.IsBigInt()) return roots.BigInt_string();
+ if (value.IsSymbol()) return roots.Symbol_string();
+ if (value.IsScript()) return roots.Script_string();
UNREACHABLE();
}
if (IsJSWeakMap()) return roots.WeakMap_string();
if (IsJSWeakSet()) return roots.WeakSet_string();
if (IsJSGlobalProxy()) return roots.global_string();
- Object maybe_constructor = map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
+ Object maybe_constructor = map().GetConstructor();
+ if (maybe_constructor.IsJSFunction()) {
JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (constructor->shared()->IsApiFunction()) {
- maybe_constructor = constructor->shared()->get_api_func_data();
+ if (constructor.shared().IsApiFunction()) {
+ maybe_constructor = constructor.shared().get_api_func_data();
}
}
- if (maybe_constructor->IsFunctionTemplateInfo()) {
+ if (maybe_constructor.IsFunctionTemplateInfo()) {
FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
- if (info->class_name()->IsString()) return String::cast(info->class_name());
+ if (info.class_name().IsString()) return String::cast(info.class_name());
}
return roots.Object_string();
@@ -413,23 +413,22 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
// constructor on the map provides the most accurate name.
// Don't provide the info for prototypes, since their constructors are
// reclaimed and replaced by Object in OptimizeAsPrototype.
- if (!receiver->IsJSProxy() && receiver->map()->new_target_is_base() &&
- !receiver->map()->is_prototype_map()) {
- Object maybe_constructor = receiver->map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
+ if (!receiver->IsJSProxy() && receiver->map().new_target_is_base() &&
+ !receiver->map().is_prototype_map()) {
+ Object maybe_constructor = receiver->map().GetConstructor();
+ if (maybe_constructor.IsJSFunction()) {
JSFunction constructor = JSFunction::cast(maybe_constructor);
- String name = constructor->shared()->DebugName();
- if (name->length() != 0 &&
- !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
+ String name = constructor.shared().DebugName();
+ if (name.length() != 0 &&
+ !name.Equals(ReadOnlyRoots(isolate).Object_string())) {
return std::make_pair(handle(constructor, isolate),
handle(name, isolate));
}
- } else if (maybe_constructor->IsFunctionTemplateInfo()) {
+ } else if (maybe_constructor.IsFunctionTemplateInfo()) {
FunctionTemplateInfo info = FunctionTemplateInfo::cast(maybe_constructor);
- if (info->class_name()->IsString()) {
- return std::make_pair(
- MaybeHandle<JSFunction>(),
- handle(String::cast(info->class_name()), isolate));
+ if (info.class_name().IsString()) {
+ return std::make_pair(MaybeHandle<JSFunction>(),
+ handle(String::cast(info.class_name()), isolate));
}
}
}
@@ -452,10 +451,10 @@ std::pair<MaybeHandle<JSFunction>, Handle<String>> GetConstructorHelper(
Handle<Object> maybe_constructor = JSReceiver::GetDataProperty(&it);
if (maybe_constructor->IsJSFunction()) {
JSFunction constructor = JSFunction::cast(*maybe_constructor);
- String name = constructor->shared()->DebugName();
+ String name = constructor.shared().DebugName();
- if (name->length() != 0 &&
- !name->Equals(ReadOnlyRoots(isolate).Object_string())) {
+ if (name.length() != 0 &&
+ !name.Equals(ReadOnlyRoots(isolate).Object_string())) {
return std::make_pair(handle(constructor, isolate),
handle(name, isolate));
}
@@ -480,26 +479,26 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
Handle<NativeContext> JSReceiver::GetCreationContext() {
JSReceiver receiver = *this;
// Externals are JSObjects with null as a constructor.
- DCHECK(!receiver->IsExternal(GetIsolate()));
- Object constructor = receiver->map()->GetConstructor();
+ DCHECK(!receiver.IsExternal(GetIsolate()));
+ Object constructor = receiver.map().GetConstructor();
JSFunction function;
- if (constructor->IsJSFunction()) {
+ if (constructor.IsJSFunction()) {
function = JSFunction::cast(constructor);
- } else if (constructor->IsFunctionTemplateInfo()) {
+ } else if (constructor.IsFunctionTemplateInfo()) {
// Remote objects don't have a creation context.
return Handle<NativeContext>::null();
- } else if (receiver->IsJSGeneratorObject()) {
- function = JSGeneratorObject::cast(receiver)->function();
+ } else if (receiver.IsJSGeneratorObject()) {
+ function = JSGeneratorObject::cast(receiver).function();
} else {
// Functions have null as a constructor,
// but any JSFunction knows its context immediately.
- CHECK(receiver->IsJSFunction());
+ CHECK(receiver.IsJSFunction());
function = JSFunction::cast(receiver);
}
- return function->has_context()
- ? Handle<NativeContext>(function->context()->native_context(),
- receiver->GetIsolate())
+ return function.has_context()
+ ? Handle<NativeContext>(function.context().native_context(),
+ receiver.GetIsolate())
: Handle<NativeContext>::null();
}
@@ -583,50 +582,50 @@ Object SetHashAndUpdateProperties(HeapObject properties, int hash) {
DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
DCHECK(PropertyArray::HashField::is_valid(hash));
- ReadOnlyRoots roots = properties->GetReadOnlyRoots();
+ ReadOnlyRoots roots = properties.GetReadOnlyRoots();
if (properties == roots.empty_fixed_array() ||
properties == roots.empty_property_array() ||
properties == roots.empty_property_dictionary()) {
return Smi::FromInt(hash);
}
- if (properties->IsPropertyArray()) {
- PropertyArray::cast(properties)->SetHash(hash);
- DCHECK_LT(0, PropertyArray::cast(properties)->length());
+ if (properties.IsPropertyArray()) {
+ PropertyArray::cast(properties).SetHash(hash);
+ DCHECK_LT(0, PropertyArray::cast(properties).length());
return properties;
}
- if (properties->IsGlobalDictionary()) {
- GlobalDictionary::cast(properties)->SetHash(hash);
+ if (properties.IsGlobalDictionary()) {
+ GlobalDictionary::cast(properties).SetHash(hash);
return properties;
}
- DCHECK(properties->IsNameDictionary());
- NameDictionary::cast(properties)->SetHash(hash);
+ DCHECK(properties.IsNameDictionary());
+ NameDictionary::cast(properties).SetHash(hash);
return properties;
}
int GetIdentityHashHelper(JSReceiver object) {
DisallowHeapAllocation no_gc;
- Object properties = object->raw_properties_or_hash();
- if (properties->IsSmi()) {
+ Object properties = object.raw_properties_or_hash();
+ if (properties.IsSmi()) {
return Smi::ToInt(properties);
}
- if (properties->IsPropertyArray()) {
- return PropertyArray::cast(properties)->Hash();
+ if (properties.IsPropertyArray()) {
+ return PropertyArray::cast(properties).Hash();
}
- if (properties->IsNameDictionary()) {
- return NameDictionary::cast(properties)->Hash();
+ if (properties.IsNameDictionary()) {
+ return NameDictionary::cast(properties).Hash();
}
- if (properties->IsGlobalDictionary()) {
- return GlobalDictionary::cast(properties)->Hash();
+ if (properties.IsGlobalDictionary()) {
+ return GlobalDictionary::cast(properties).Hash();
}
#ifdef DEBUG
- ReadOnlyRoots roots = object->GetReadOnlyRoots();
+ ReadOnlyRoots roots = object.GetReadOnlyRoots();
DCHECK(properties == roots.empty_fixed_array() ||
properties == roots.empty_property_dictionary());
#endif
@@ -646,8 +645,8 @@ void JSReceiver::SetIdentityHash(int hash) {
}
void JSReceiver::SetProperties(HeapObject properties) {
- DCHECK_IMPLIES(properties->IsPropertyArray() &&
- PropertyArray::cast(properties)->length() == 0,
+ DCHECK_IMPLIES(properties.IsPropertyArray() &&
+ PropertyArray::cast(properties).length() == 0,
properties == GetReadOnlyRoots().empty_property_array());
DisallowHeapAllocation no_gc;
int hash = GetIdentityHashHelper(*this);
@@ -679,7 +678,7 @@ Smi JSReceiver::CreateIdentityHash(Isolate* isolate, JSReceiver key) {
int hash = isolate->GenerateIdentityHash(PropertyArray::HashField::kMax);
DCHECK_NE(PropertyArray::kNoHashSentinel, hash);
- key->SetIdentityHash(hash);
+ key.SetIdentityHash(hash);
return Smi::FromInt(hash);
}
@@ -702,7 +701,7 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
if (object->IsJSGlobalObject()) {
// If we have a global object, invalidate the cell and swap in a new one.
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*object)->global_dictionary(), isolate);
+ JSGlobalObject::cast(*object).global_dictionary(), isolate);
DCHECK_NE(GlobalDictionary::kNotFound, entry);
auto cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
@@ -716,7 +715,7 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
dictionary = NameDictionary::DeleteEntry(isolate, dictionary, entry);
object->SetProperties(*dictionary);
}
- if (object->map()->is_prototype_map()) {
+ if (object->map().is_prototype_map()) {
// Invalidate prototype validity cell as this may invalidate transitioning
// store IC handlers.
JSObject::InvalidatePrototypeChains(object->map());
@@ -985,7 +984,7 @@ MaybeHandle<Object> GetPropertyWithInterceptorInternal(
// interceptor calls.
AssertNoContextChange ncc(isolate);
- if (interceptor->getter()->IsUndefined(isolate)) {
+ if (interceptor->getter().IsUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
@@ -1031,7 +1030,7 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
}
PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
*holder, Just(kDontThrow));
- if (!interceptor->query()->IsUndefined(isolate)) {
+ if (!interceptor->query().IsUndefined(isolate)) {
Handle<Object> result;
if (it->IsElement()) {
result = args.CallIndexedQuery(interceptor, it->index());
@@ -1043,7 +1042,7 @@ Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptorInternal(
CHECK(result->ToInt32(&value));
return Just(static_cast<PropertyAttributes>(value));
}
- } else if (!interceptor->getter()->IsUndefined(isolate)) {
+ } else if (!interceptor->getter().IsUndefined(isolate)) {
// TODO(verwaest): Use GetPropertyWithInterceptor?
Handle<Object> result;
if (it->IsElement()) {
@@ -1066,7 +1065,7 @@ Maybe<bool> SetPropertyWithInterceptorInternal(
// interceptor calls.
AssertNoContextChange ncc(isolate);
- if (interceptor->setter()->IsUndefined(isolate)) return Just(false);
+ if (interceptor->setter().IsUndefined(isolate)) return Just(false);
Handle<JSObject> holder = it->GetHolder<JSObject>();
bool result;
@@ -1099,7 +1098,7 @@ Maybe<bool> DefinePropertyWithInterceptorInternal(
// interceptor calls.
AssertNoContextChange ncc(isolate);
- if (interceptor->definer()->IsUndefined(isolate)) return Just(false);
+ if (interceptor->definer().IsUndefined(isolate)) return Just(false);
Handle<JSObject> holder = it->GetHolder<JSObject>();
bool result;
@@ -1521,7 +1520,7 @@ Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
Isolate* isolate = it->isolate();
Handle<InterceptorInfo> interceptor = it->GetInterceptor();
- if (interceptor->descriptor()->IsUndefined(isolate)) return Just(false);
+ if (interceptor->descriptor().IsUndefined(isolate)) return Just(false);
Handle<Object> result;
Handle<JSObject> holder = it->GetHolder<JSObject>();
@@ -1729,7 +1728,7 @@ Maybe<bool> GenericTestIntegrityLevel(Handle<JSReceiver> receiver,
Maybe<bool> JSReceiver::TestIntegrityLevel(Handle<JSReceiver> receiver,
IntegrityLevel level) {
- if (!receiver->map()->IsCustomElementsReceiverMap()) {
+ if (!receiver->map().IsCustomElementsReceiverMap()) {
return JSObject::TestIntegrityLevel(Handle<JSObject>::cast(receiver),
level);
}
@@ -1817,7 +1816,7 @@ MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
V8_WARN_UNUSED_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
Isolate* isolate, Handle<JSReceiver> receiver, bool get_entries,
Handle<FixedArray>* result) {
- Handle<Map> map(JSReceiver::cast(*receiver)->map(), isolate);
+ Handle<Map> map(JSReceiver::cast(*receiver).map(), isolate);
if (!map->IsJSObjectMap()) return Just(false);
if (!map->OnlyHasSimpleProperties()) return Just(false);
@@ -1964,17 +1963,6 @@ MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
try_fast_path, true);
}
-Handle<FixedArray> JSReceiver::GetOwnElementIndices(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSObject> object) {
- KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
- ALL_PROPERTIES);
- accumulator.CollectOwnElementIndices(receiver, object);
- Handle<FixedArray> keys =
- accumulator.GetKeys(GetKeysConversion::kKeepNumbers);
- DCHECK(keys->ContainsSortedNumbers());
- return keys;
-}
Maybe<bool> JSReceiver::SetPrototype(Handle<JSReceiver> object,
Handle<Object> value, bool from_javascript,
ShouldThrow should_throw) {
@@ -1990,21 +1978,11 @@ bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
for (PrototypeIterator iter(isolate, *this, kStartAtReceiver,
PrototypeIterator::END_AT_NULL);
!iter.IsAtEnd(); iter.AdvanceIgnoringProxies()) {
- if (iter.GetCurrent()->IsJSProxy()) return true;
+ if (iter.GetCurrent().IsJSProxy()) return true;
}
return false;
}
-bool JSReceiver::HasComplexElements() {
- if (IsJSProxy()) return true;
- JSObject this_object = JSObject::cast(*this);
- if (this_object->HasIndexedInterceptor()) {
- return true;
- }
- if (!this_object->HasDictionaryElements()) return false;
- return this_object->element_dictionary()->HasComplexElements();
-}
-
// static
MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
Handle<JSReceiver> new_target,
@@ -2018,7 +1996,7 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
DCHECK(constructor->IsConstructor());
DCHECK(new_target->IsConstructor());
DCHECK(!constructor->has_initial_map() ||
- constructor->initial_map()->instance_type() != JS_FUNCTION_TYPE);
+ constructor->initial_map().instance_type() != JS_FUNCTION_TYPE);
Handle<Map> initial_map;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -2063,7 +2041,7 @@ void JSObject::EnsureWritableFastElements(Handle<JSObject> object) {
object->HasFrozenOrSealedElements());
FixedArray raw_elems = FixedArray::cast(object->elements());
Isolate* isolate = object->GetIsolate();
- if (raw_elems->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) return;
+ if (raw_elems.map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) return;
Handle<FixedArray> elems(raw_elems, isolate);
Handle<FixedArray> writable_elems = isolate->factory()->CopyFixedArrayWithMap(
elems, isolate->factory()->fixed_array_map());
@@ -2198,7 +2176,7 @@ bool JSObject::AllCanRead(LookupIterator* it) {
if (it->state() == LookupIterator::ACCESSOR) {
auto accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
- if (AccessorInfo::cast(*accessors)->all_can_read()) return true;
+ if (AccessorInfo::cast(*accessors).all_can_read()) return true;
}
} else if (it->state() == LookupIterator::INTERCEPTOR) {
if (it->GetInterceptor()->all_can_read()) return true;
@@ -2241,7 +2219,7 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
// Cross-Origin [[Get]] of Well-Known Symbols does not throw, and returns
// undefined.
Handle<Name> name = it->GetName();
- if (name->IsSymbol() && Symbol::cast(*name)->is_well_known_symbol()) {
+ if (name->IsSymbol() && Symbol::cast(*name).is_well_known_symbol()) {
return it->factory()->undefined_value();
}
@@ -2283,7 +2261,7 @@ bool JSObject::AllCanWrite(LookupIterator* it) {
if (it->state() == LookupIterator::ACCESSOR) {
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
- if (AccessorInfo::cast(*accessors)->all_can_write()) return true;
+ if (AccessorInfo::cast(*accessors).all_can_write()) return true;
}
}
}
@@ -2327,7 +2305,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
int entry = dictionary->FindEntry(ReadOnlyRoots(isolate), name, hash);
if (entry == GlobalDictionary::kNotFound) {
- DCHECK_IMPLIES(global_obj->map()->is_prototype_map(),
+ DCHECK_IMPLIES(global_obj->map().is_prototype_map(),
Map::IsPrototypeChainInvalidated(global_obj->map()));
auto cell = isolate->factory()->NewPropertyCell(name);
cell->set_value(*value);
@@ -2349,7 +2327,7 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
int entry = dictionary->FindEntry(isolate, name);
if (entry == NameDictionary::kNotFound) {
- DCHECK_IMPLIES(object->map()->is_prototype_map(),
+ DCHECK_IMPLIES(object->map().is_prototype_map(),
Map::IsPrototypeChainInvalidated(object->map()));
dictionary =
NameDictionary::Add(isolate, dictionary, name, value, details);
@@ -2365,11 +2343,11 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object, Handle<Name> name,
}
void JSObject::JSObjectShortPrint(StringStream* accumulator) {
- switch (map()->instance_type()) {
+ switch (map().instance_type()) {
case JS_ARRAY_TYPE: {
- double length = JSArray::cast(*this)->length()->IsUndefined()
+ double length = JSArray::cast(*this).length().IsUndefined()
? 0
- : JSArray::cast(*this)->length()->Number();
+ : JSArray::cast(*this).length().Number();
accumulator->Add("<JSArray[%u]>", static_cast<uint32_t>(length));
break;
}
@@ -2378,7 +2356,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JSBoundFunction");
accumulator->Add(" (BoundTargetFunction %p)>",
reinterpret_cast<void*>(
- bound_function->bound_target_function().ptr()));
+ bound_function.bound_target_function().ptr()));
break;
}
case JS_WEAK_MAP_TYPE: {
@@ -2392,9 +2370,9 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
case JS_REGEXP_TYPE: {
accumulator->Add("<JSRegExp");
JSRegExp regexp = JSRegExp::cast(*this);
- if (regexp->source()->IsString()) {
+ if (regexp.source().IsString()) {
accumulator->Add(" ");
- String::cast(regexp->source())->StringShortPrint(accumulator);
+ String::cast(regexp.source()).StringShortPrint(accumulator);
}
accumulator->Add(">");
@@ -2402,11 +2380,11 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
case JS_FUNCTION_TYPE: {
JSFunction function = JSFunction::cast(*this);
- Object fun_name = function->shared()->DebugName();
+ Object fun_name = function.shared().DebugName();
bool printed = false;
- if (fun_name->IsString()) {
+ if (fun_name.IsString()) {
String str = String::cast(fun_name);
- if (str->length() > 0) {
+ if (str.length() > 0) {
accumulator->Add("<JSFunction ");
accumulator->Put(str);
printed = true;
@@ -2416,10 +2394,10 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<JSFunction");
}
if (FLAG_trace_file_names) {
- Object source_name = Script::cast(function->shared()->script())->name();
- if (source_name->IsString()) {
+ Object source_name = Script::cast(function.shared().script()).name();
+ if (source_name.IsString()) {
String str = String::cast(source_name);
- if (str->length() > 0) {
+ if (str.length() > 0) {
accumulator->Add(" <");
accumulator->Put(str);
accumulator->Add(">");
@@ -2427,7 +2405,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
}
accumulator->Add(" (sfi = %p)",
- reinterpret_cast<void*>(function->shared().ptr()));
+ reinterpret_cast<void*>(function.shared().ptr()));
accumulator->Put('>');
break;
}
@@ -2449,30 +2427,29 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
default: {
Map map_of_this = map();
Heap* heap = GetHeap();
- Object constructor = map_of_this->GetConstructor();
+ Object constructor = map_of_this.GetConstructor();
bool printed = false;
- if (constructor->IsHeapObject() &&
+ if (constructor.IsHeapObject() &&
!heap->Contains(HeapObject::cast(constructor))) {
accumulator->Add("!!!INVALID CONSTRUCTOR!!!");
} else {
bool global_object = IsJSGlobalProxy();
- if (constructor->IsJSFunction()) {
- if (!heap->Contains(JSFunction::cast(constructor)->shared())) {
+ if (constructor.IsJSFunction()) {
+ if (!heap->Contains(JSFunction::cast(constructor).shared())) {
accumulator->Add("!!!INVALID SHARED ON CONSTRUCTOR!!!");
} else {
String constructor_name =
- JSFunction::cast(constructor)->shared()->Name();
- if (constructor_name->length() > 0) {
+ JSFunction::cast(constructor).shared().Name();
+ if (constructor_name.length() > 0) {
accumulator->Add(global_object ? "<GlobalObject " : "<");
accumulator->Put(constructor_name);
- accumulator->Add(
- " %smap = %p",
- map_of_this->is_deprecated() ? "deprecated-" : "",
- map_of_this);
+ accumulator->Add(" %smap = %p",
+ map_of_this.is_deprecated() ? "deprecated-" : "",
+ map_of_this);
printed = true;
}
}
- } else if (constructor->IsFunctionTemplateInfo()) {
+ } else if (constructor.IsFunctionTemplateInfo()) {
accumulator->Add(global_object ? "<RemoteObject>" : "<RemoteObject>");
printed = true;
}
@@ -2482,7 +2459,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
if (IsJSValue()) {
accumulator->Add(" value = ");
- JSValue::cast(*this)->value()->ShortPrint(accumulator);
+ JSValue::cast(*this).value().ShortPrint(accumulator);
}
accumulator->Put('>');
break;
@@ -2512,52 +2489,52 @@ void JSObject::PrintElementsTransition(FILE* file, Handle<JSObject> object,
void JSObject::PrintInstanceMigration(FILE* file, Map original_map,
Map new_map) {
- if (new_map->is_dictionary_map()) {
+ if (new_map.is_dictionary_map()) {
PrintF(file, "[migrating to slow]\n");
return;
}
PrintF(file, "[migrating]");
- DescriptorArray o = original_map->instance_descriptors();
- DescriptorArray n = new_map->instance_descriptors();
- for (int i = 0; i < original_map->NumberOfOwnDescriptors(); i++) {
- Representation o_r = o->GetDetails(i).representation();
- Representation n_r = n->GetDetails(i).representation();
+ DescriptorArray o = original_map.instance_descriptors();
+ DescriptorArray n = new_map.instance_descriptors();
+ for (int i = 0; i < original_map.NumberOfOwnDescriptors(); i++) {
+ Representation o_r = o.GetDetails(i).representation();
+ Representation n_r = n.GetDetails(i).representation();
if (!o_r.Equals(n_r)) {
- String::cast(o->GetKey(i))->PrintOn(file);
+ String::cast(o.GetKey(i)).PrintOn(file);
PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
- } else if (o->GetDetails(i).location() == kDescriptor &&
- n->GetDetails(i).location() == kField) {
- Name name = o->GetKey(i);
- if (name->IsString()) {
- String::cast(name)->PrintOn(file);
+ } else if (o.GetDetails(i).location() == kDescriptor &&
+ n.GetDetails(i).location() == kField) {
+ Name name = o.GetKey(i);
+ if (name.IsString()) {
+ String::cast(name).PrintOn(file);
} else {
PrintF(file, "{symbol %p}", reinterpret_cast<void*>(name.ptr()));
}
PrintF(file, " ");
}
}
- if (original_map->elements_kind() != new_map->elements_kind()) {
- PrintF(file, "elements_kind[%i->%i]", original_map->elements_kind(),
- new_map->elements_kind());
+ if (original_map.elements_kind() != new_map.elements_kind()) {
+ PrintF(file, "elements_kind[%i->%i]", original_map.elements_kind(),
+ new_map.elements_kind());
}
PrintF(file, "\n");
}
bool JSObject::IsUnmodifiedApiObject(FullObjectSlot o) {
Object object = *o;
- if (object->IsSmi()) return false;
+ if (object.IsSmi()) return false;
HeapObject heap_object = HeapObject::cast(object);
- if (!object->IsJSObject()) return false;
+ if (!object.IsJSObject()) return false;
JSObject js_object = JSObject::cast(object);
- if (!js_object->IsDroppableApiWrapper()) return false;
- Object maybe_constructor = js_object->map()->GetConstructor();
- if (!maybe_constructor->IsJSFunction()) return false;
+ if (!js_object.IsDroppableApiWrapper()) return false;
+ Object maybe_constructor = js_object.map().GetConstructor();
+ if (!maybe_constructor.IsJSFunction()) return false;
JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (js_object->elements()->length() != 0) return false;
+ if (js_object.elements().length() != 0) return false;
// Check that the object is not a key in a WeakMap (over-approximation).
- if (!js_object->GetIdentityHash()->IsUndefined()) return false;
+ if (!js_object.GetIdentityHash().IsUndefined()) return false;
- return constructor->initial_map() == heap_object->map();
+ return constructor.initial_map() == heap_object.map();
}
// static
@@ -2571,16 +2548,16 @@ void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
old_map->set_prototype_info(Smi::kZero);
if (FLAG_trace_prototype_users) {
PrintF("Moving prototype_info %p from map %p to map %p.\n",
- reinterpret_cast<void*>(new_map->prototype_info()->ptr()),
+ reinterpret_cast<void*>(new_map->prototype_info().ptr()),
reinterpret_cast<void*>(old_map->ptr()),
reinterpret_cast<void*>(new_map->ptr()));
}
if (was_registered) {
- if (new_map->prototype_info()->IsPrototypeInfo()) {
+ if (new_map->prototype_info().IsPrototypeInfo()) {
// The new map isn't registered with its prototype yet; reflect this fact
// in the PrototypeInfo it just inherited from the old map.
PrototypeInfo::cast(new_map->prototype_info())
- ->set_registry_slot(PrototypeInfo::UNREGISTERED);
+ .set_registry_slot(PrototypeInfo::UNREGISTERED);
}
JSObject::LazyRegisterPrototypeUser(new_map, isolate);
}
@@ -2602,6 +2579,7 @@ void JSObject::NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
}
namespace {
+
// To migrate a fast instance to a fast map:
// - First check whether the instance needs to be rewritten. If not, simply
// change the map.
@@ -2629,31 +2607,28 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
return;
}
+ // If the map adds a new kDescriptor property, simply set the map.
PropertyDetails details = new_map->GetLastDescriptorDetails();
- int target_index = details.field_index() - new_map->GetInObjectProperties();
- int property_array_length = object->property_array()->length();
- bool have_space = old_map->UnusedPropertyFields() > 0 ||
- (details.location() == kField && target_index >= 0 &&
- property_array_length > target_index);
- // Either new_map adds an kDescriptor property, or a kField property for
- // which there is still space, and which does not require a mutable double
- // box (an out-of-object double).
- if (details.location() == kDescriptor ||
- (have_space && ((FLAG_unbox_double_fields && target_index < 0) ||
- !details.representation().IsDouble()))) {
+ if (details.location() == kDescriptor) {
object->synchronized_set_map(*new_map);
return;
}
- // If there is still space in the object, we need to allocate a mutable
- // double box.
- if (have_space) {
- FieldIndex index =
- FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
- DCHECK(details.representation().IsDouble());
- DCHECK(!new_map->IsUnboxedDoubleField(index));
- auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
- object->RawFastPropertyAtPut(index, *value);
+ // Check if we still have space in the {object}, in which case we
+ // can also simply set the map (modulo a special case for mutable
+ // double boxes).
+ FieldIndex index =
+ FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
+ if (index.is_inobject() ||
+ index.outobject_array_index() < object->property_array().length()) {
+ // We still need to allocate MutableHeapNumbers for double fields
+ // if either double field unboxing is disabled or the double field
+ // is in the PropertyArray backing store (where we don't support
+ // double field unboxing).
+ if (index.is_double() && !new_map->IsUnboxedDoubleField(index)) {
+ auto value = isolate->factory()->NewMutableHeapNumberWithHoleNaN();
+ object->RawFastPropertyAtPut(index, *value);
+ }
object->synchronized_set_map(*new_map);
return;
}
@@ -2674,8 +2649,8 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
DCHECK_EQ(kField, details.location());
DCHECK_EQ(kData, details.kind());
- DCHECK_GE(target_index, 0); // Must be a backing store index.
- new_storage->set(target_index, *value);
+ DCHECK(!index.is_inobject()); // Must be a backing store index.
+ new_storage->set(index.outobject_array_index(), *value);
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
@@ -2808,10 +2783,10 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// Can't use JSObject::FastPropertyAtPut() because proper map was not set
// yet.
if (new_map->IsUnboxedDoubleField(index)) {
- DCHECK(value->IsMutableHeapNumber());
+ DCHECK(value.IsMutableHeapNumber());
// Ensure that all bits of the double value are preserved.
object->RawFastDoublePropertyAsBitsAtPut(
- index, MutableHeapNumber::cast(value)->value_as_bits());
+ index, MutableHeapNumber::cast(value).value_as_bits());
if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
// Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object, object->RawField(index.offset()));
@@ -2985,7 +2960,7 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
// Ensure that no transition was inserted for prototype migrations.
DCHECK_EQ(0, TransitionsAccessor(object->GetIsolate(), old_map)
.NumberOfTransitions());
- DCHECK(new_map->GetBackPointer()->IsUndefined());
+ DCHECK(new_map->GetBackPointer().IsUndefined());
DCHECK(object->map() != *old_map);
}
} else {
@@ -3025,15 +3000,14 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
// static
MaybeHandle<NativeContext> JSObject::GetFunctionRealm(Handle<JSObject> object) {
- DCHECK(object->map()->is_constructor());
+ DCHECK(object->map().is_constructor());
DCHECK(!object->IsJSFunction());
return object->GetCreationContext();
}
void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
- DCHECK(object->map()->GetInObjectProperties() ==
- map->GetInObjectProperties());
- ElementsKind obj_kind = object->map()->elements_kind();
+ DCHECK(object->map().GetInObjectProperties() == map->GetInObjectProperties());
+ ElementsKind obj_kind = object->map().elements_kind();
ElementsKind map_kind = map->elements_kind();
if (map_kind != obj_kind) {
ElementsKind to_kind = GetMoreGeneralElementsKind(map_kind, obj_kind);
@@ -3143,7 +3117,7 @@ void JSObject::AddProperty(Isolate* isolate, Handle<JSObject> object,
Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
DCHECK(maybe.IsJust());
DCHECK(!it.IsFound());
- DCHECK(object->map()->is_extensible() || name->IsPrivate());
+ DCHECK(object->map().is_extensible() || name->IsPrivate());
#endif
CHECK(Object::AddDataProperty(&it, value, attributes,
Just(ShouldThrow::kThrowOnError),
@@ -3242,7 +3216,7 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
// Special case: properties of typed arrays cannot be reconfigured to
// non-writable nor to non-enumerable.
- if (it->IsElement() && object->HasFixedTypedArrayElements()) {
+ if (it->IsElement() && object->HasTypedArrayElements()) {
return Object::RedefineIncompatibleProperty(
it->isolate(), it->GetName(), value, should_throw);
}
@@ -3329,14 +3303,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
PropertyKind kind = dictionary->DetailsAt(index).kind();
if (kind == kData) {
- if (FLAG_track_constant_fields) {
- number_of_fields += 1;
- } else {
- Object value = dictionary->ValueAt(index);
- if (!value->IsJSFunction()) {
- number_of_fields += 1;
- }
- }
+ number_of_fields += 1;
}
}
@@ -3395,7 +3362,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Name k = dictionary->NameAt(index);
// Dictionary keys are internalized upon insertion.
// TODO(jkummerow): Turn this into a DCHECK if it's not hit in the wild.
- CHECK(k->IsUniqueName());
+ CHECK(k.IsUniqueName());
Handle<Name> key(k, isolate);
// Properly mark the {new_map} if the {key} is an "interesting symbol".
@@ -3411,22 +3378,15 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Descriptor d;
if (details.kind() == kData) {
- if (!FLAG_track_constant_fields && value->IsJSFunction()) {
- d = Descriptor::DataConstant(key, handle(value, isolate),
- details.attributes());
- } else {
- // Ensure that we make constant field only when elements kind is not
- // transitionable.
- PropertyConstness constness =
- FLAG_track_constant_fields && !is_transitionable_elements_kind
- ? PropertyConstness::kConst
- : PropertyConstness::kMutable;
- d = Descriptor::DataField(
- key, current_offset, details.attributes(), constness,
- // TODO(verwaest): value->OptimalRepresentation();
- Representation::Tagged(),
- MaybeObjectHandle(FieldType::Any(isolate)));
- }
+ // Ensure that we make constant field only when elements kind is not
+ // transitionable.
+ PropertyConstness constness = is_transitionable_elements_kind
+ ? PropertyConstness::kMutable
+ : PropertyConstness::kConst;
+ d = Descriptor::DataField(
+ key, current_offset, details.attributes(), constness,
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged(), MaybeObjectHandle(FieldType::Any(isolate)));
} else {
DCHECK_EQ(kAccessor, details.kind());
d = Descriptor::AccessorConstant(key, handle(value, isolate),
@@ -3474,9 +3434,9 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
}
void JSObject::RequireSlowElements(NumberDictionary dictionary) {
- if (dictionary->requires_slow_elements()) return;
- dictionary->set_requires_slow_elements();
- if (map()->is_prototype_map()) {
+ if (dictionary.requires_slow_elements()) return;
+ dictionary.set_requires_slow_elements();
+ if (map().is_prototype_map()) {
// If this object is a prototype (the callee will check), invalidate any
// prototype chains involving it.
InvalidatePrototypeChains(map());
@@ -3484,7 +3444,7 @@ void JSObject::RequireSlowElements(NumberDictionary dictionary) {
}
Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
- DCHECK(!object->HasFixedTypedArrayElements());
+ DCHECK(!object->HasTypedArrayElements());
Isolate* isolate = object->GetIsolate();
bool is_sloppy_arguments = object->HasSloppyArgumentsElements();
{
@@ -3492,17 +3452,17 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
FixedArrayBase elements = object->elements();
if (is_sloppy_arguments) {
- elements = SloppyArgumentsElements::cast(elements)->arguments();
+ elements = SloppyArgumentsElements::cast(elements).arguments();
}
- if (elements->IsNumberDictionary()) {
+ if (elements.IsNumberDictionary()) {
return handle(NumberDictionary::cast(elements), isolate);
}
}
DCHECK(object->HasSmiOrObjectElements() || object->HasDoubleElements() ||
object->HasFastArgumentsElements() ||
- object->HasFastStringWrapperElements());
+ object->HasFastStringWrapperElements() || object->HasSealedElements());
Handle<NumberDictionary> dictionary =
object->GetElementsAccessor()->Normalize(object);
@@ -3519,7 +3479,7 @@ Handle<NumberDictionary> JSObject::NormalizeElements(Handle<JSObject> object) {
if (is_sloppy_arguments) {
SloppyArgumentsElements::cast(object->elements())
- ->set_arguments(*dictionary);
+ .set_arguments(*dictionary);
} else {
object->set_elements(*dictionary);
}
@@ -3549,7 +3509,7 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
Handle<InterceptorInfo> interceptor(it->GetInterceptor());
- if (interceptor->deleter()->IsUndefined(isolate)) return Nothing<bool>();
+ if (interceptor->deleter().IsUndefined(isolate)) return Nothing<bool>();
Handle<JSObject> holder = it->GetHolder<JSObject>();
Handle<Object> receiver = it->GetReceiver();
@@ -3615,12 +3575,12 @@ bool TestDictionaryPropertiesIntegrityLevel(Dictionary dict,
PropertyAttributes level) {
DCHECK(level == SEALED || level == FROZEN);
- uint32_t capacity = dict->Capacity();
+ uint32_t capacity = dict.Capacity();
for (uint32_t i = 0; i < capacity; i++) {
Object key;
- if (!dict->ToKey(roots, i, &key)) continue;
- if (key->FilterKey(ALL_PROPERTIES)) continue;
- PropertyDetails details = dict->DetailsAt(i);
+ if (!dict.ToKey(roots, i, &key)) continue;
+ if (key.FilterKey(ALL_PROPERTIES)) continue;
+ PropertyDetails details = dict.DetailsAt(i);
if (details.IsConfigurable()) return false;
if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
return false;
@@ -3631,14 +3591,14 @@ bool TestDictionaryPropertiesIntegrityLevel(Dictionary dict,
bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
DCHECK(level == SEALED || level == FROZEN);
- DCHECK(!map->IsCustomElementsReceiverMap());
- DCHECK(!map->is_dictionary_map());
+ DCHECK(!map.IsCustomElementsReceiverMap());
+ DCHECK(!map.is_dictionary_map());
- DescriptorArray descriptors = map->instance_descriptors();
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ DescriptorArray descriptors = map.instance_descriptors();
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
- if (descriptors->GetKey(i)->IsPrivate()) continue;
- PropertyDetails details = descriptors->GetDetails(i);
+ if (descriptors.GetKey(i).IsPrivate()) continue;
+ PropertyDetails details = descriptors.GetDetails(i);
if (details.IsConfigurable()) return false;
if (level == FROZEN && details.kind() == kData && !details.IsReadOnly()) {
return false;
@@ -3648,28 +3608,28 @@ bool TestFastPropertiesIntegrityLevel(Map map, PropertyAttributes level) {
}
bool TestPropertiesIntegrityLevel(JSObject object, PropertyAttributes level) {
- DCHECK(!object->map()->IsCustomElementsReceiverMap());
+ DCHECK(!object.map().IsCustomElementsReceiverMap());
- if (object->HasFastProperties()) {
- return TestFastPropertiesIntegrityLevel(object->map(), level);
+ if (object.HasFastProperties()) {
+ return TestFastPropertiesIntegrityLevel(object.map(), level);
}
return TestDictionaryPropertiesIntegrityLevel(
- object->property_dictionary(), object->GetReadOnlyRoots(), level);
+ object.property_dictionary(), object.GetReadOnlyRoots(), level);
}
bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
- DCHECK(!object->HasSloppyArgumentsElements());
+ DCHECK(!object.HasSloppyArgumentsElements());
- ElementsKind kind = object->GetElementsKind();
+ ElementsKind kind = object.GetElementsKind();
if (IsDictionaryElementsKind(kind)) {
return TestDictionaryPropertiesIntegrityLevel(
- NumberDictionary::cast(object->elements()), object->GetReadOnlyRoots(),
+ NumberDictionary::cast(object.elements()), object.GetReadOnlyRoots(),
level);
}
- if (IsFixedTypedArrayElementsKind(kind)) {
- if (level == FROZEN && JSArrayBufferView::cast(object)->byte_length() > 0)
+ if (IsTypedArrayElementsKind(kind)) {
+ if (level == FROZEN && JSArrayBufferView::cast(object).byte_length() > 0)
return false; // TypedArrays with elements can't be frozen.
return TestPropertiesIntegrityLevel(object, level);
}
@@ -3683,9 +3643,9 @@ bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) {
}
bool FastTestIntegrityLevel(JSObject object, PropertyAttributes level) {
- DCHECK(!object->map()->IsCustomElementsReceiverMap());
+ DCHECK(!object.map().IsCustomElementsReceiverMap());
- return !object->map()->is_extensible() &&
+ return !object.map().is_extensible() &&
TestElementsIntegrityLevel(object, level) &&
TestPropertiesIntegrityLevel(object, level);
}
@@ -3694,7 +3654,7 @@ bool FastTestIntegrityLevel(JSObject object, PropertyAttributes level) {
Maybe<bool> JSObject::TestIntegrityLevel(Handle<JSObject> object,
IntegrityLevel level) {
- if (!object->map()->IsCustomElementsReceiverMap() &&
+ if (!object->map().IsCustomElementsReceiverMap() &&
!object->HasSloppyArgumentsElements()) {
return Just(FastTestIntegrityLevel(*object, level));
}
@@ -3717,7 +3677,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
NewTypeError(MessageTemplate::kNoAccess));
}
- if (!object->map()->is_extensible()) return Just(true);
+ if (!object->map().is_extensible()) return Just(true);
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
@@ -3727,13 +3687,13 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
should_throw);
}
- if (object->map()->has_named_interceptor() ||
- object->map()->has_indexed_interceptor()) {
+ if (object->map().has_named_interceptor() ||
+ object->map().has_indexed_interceptor()) {
RETURN_FAILURE(isolate, should_throw,
NewTypeError(MessageTemplate::kCannotPreventExt));
}
- if (!object->HasFixedTypedArrayElements()) {
+ if (!object->HasTypedArrayElements()) {
// If there are fast elements we normalize.
Handle<NumberDictionary> dictionary = NormalizeElements(object);
DCHECK(object->HasDictionaryElements() ||
@@ -3751,7 +3711,7 @@ Maybe<bool> JSObject::PreventExtensions(Handle<JSObject> object,
new_map->set_is_extensible(false);
JSObject::MigrateToMap(object, new_map);
- DCHECK(!object->map()->is_extensible());
+ DCHECK(!object->map().is_extensible());
return Just(true);
}
@@ -3765,10 +3725,10 @@ bool JSObject::IsExtensible(Handle<JSObject> object) {
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, *object);
if (iter.IsAtEnd()) return false;
- DCHECK(iter.GetCurrent()->IsJSGlobalObject());
- return iter.GetCurrent<JSObject>()->map()->is_extensible();
+ DCHECK(iter.GetCurrent().IsJSGlobalObject());
+ return iter.GetCurrent<JSObject>().map().is_extensible();
}
- return object->map()->is_extensible();
+ return object->map().is_extensible();
}
template <typename Dictionary>
@@ -3779,13 +3739,13 @@ void JSObject::ApplyAttributesToDictionary(
for (int i = 0; i < capacity; i++) {
Object k;
if (!dictionary->ToKey(roots, i, &k)) continue;
- if (k->FilterKey(ALL_PROPERTIES)) continue;
+ if (k.FilterKey(ALL_PROPERTIES)) continue;
PropertyDetails details = dictionary->DetailsAt(i);
int attrs = attributes;
// READ_ONLY is an invalid attribute for JS setters/getters.
if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
Object v = dictionary->ValueAt(i);
- if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
+ if (v.IsAccessorPair()) attrs &= ~READ_ONLY;
}
details = details.CopyAddAttributes(static_cast<PropertyAttributes>(attrs));
dictionary->DetailsAtPut(isolate, i, details);
@@ -3811,8 +3771,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
NewTypeError(MessageTemplate::kNoAccess));
}
- if (attrs == NONE && !object->map()->is_extensible()) return Just(true);
- ElementsKind old_elements_kind = object->map()->elements_kind();
+ if (attrs == NONE && !object->map().is_extensible()) return Just(true);
+ ElementsKind old_elements_kind = object->map().elements_kind();
if (attrs != FROZEN && IsSealedElementsKind(old_elements_kind))
return Just(true);
if (old_elements_kind == PACKED_FROZEN_ELEMENTS) return Just(true);
@@ -3825,8 +3785,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
PrototypeIterator::GetCurrent<JSObject>(iter), should_throw);
}
- if (object->map()->has_named_interceptor() ||
- object->map()->has_indexed_interceptor()) {
+ if (object->map().has_named_interceptor() ||
+ object->map().has_indexed_interceptor()) {
MessageTemplate message = MessageTemplate::kNone;
switch (attrs) {
case NONE:
@@ -3845,12 +3805,11 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
Handle<NumberDictionary> new_element_dictionary;
- if (!object->HasFixedTypedArrayElements() &&
- !object->HasDictionaryElements() &&
+ if (!object->HasTypedArrayElements() && !object->HasDictionaryElements() &&
!object->HasSlowStringWrapperElements()) {
int length = object->IsJSArray()
? Smi::ToInt(Handle<JSArray>::cast(object)->length())
- : object->elements()->length();
+ : object->elements().length();
new_element_dictionary =
length == 0 ? isolate->factory()->empty_slow_element_dictionary()
: object->GetElementsAccessor()->Normalize(object);
@@ -3873,7 +3832,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
if (!transition.is_null()) {
Handle<Map> transition_map(transition, isolate);
DCHECK(transition_map->has_dictionary_elements() ||
- transition_map->has_fixed_typed_array_elements() ||
+ transition_map->has_typed_array_elements() ||
transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS ||
transition_map->has_frozen_or_sealed_elements());
DCHECK(!transition_map->is_extensible());
@@ -3907,7 +3866,7 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
ReadOnlyRoots roots(isolate);
if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*object)->global_dictionary(), isolate);
+ JSGlobalObject::cast(*object).global_dictionary(), isolate);
JSObject::ApplyAttributesToDictionary(isolate, roots, dictionary,
attrs);
} else {
@@ -3919,15 +3878,14 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
}
- if (object->map()->has_frozen_or_sealed_elements()) {
+ if (object->map().has_frozen_or_sealed_elements()) {
return Just(true);
}
// Both seal and preventExtensions always go through without modifications to
// typed array elements. Freeze works only if there are no actual elements.
- if (object->HasFixedTypedArrayElements()) {
- if (attrs == FROZEN &&
- JSArrayBufferView::cast(*object)->byte_length() > 0) {
+ if (object->HasTypedArrayElements()) {
+ if (attrs == FROZEN && JSArrayBufferView::cast(*object).byte_length() > 0) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kCannotFreezeArrayBufferView));
return Nothing<bool>();
@@ -3935,8 +3893,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
return Just(true);
}
- DCHECK(object->map()->has_dictionary_elements() ||
- object->map()->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
+ DCHECK(object->map().has_dictionary_elements() ||
+ object->map().elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
if (!new_element_dictionary.is_null()) {
object->set_elements(*new_element_dictionary);
}
@@ -3960,6 +3918,7 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
FieldIndex index) {
Isolate* isolate = object->GetIsolate();
if (object->IsUnboxedDoubleField(index)) {
+ DCHECK(representation.IsDouble());
double value = object->RawFastDoublePropertyAt(index);
return isolate->factory()->NewHeapNumber(value);
}
@@ -3971,39 +3930,41 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
bool JSObject::HasEnumerableElements() {
// TODO(cbruni): cleanup
JSObject object = *this;
- switch (object->GetElementsKind()) {
+ switch (object.GetElementsKind()) {
case PACKED_SMI_ELEMENTS:
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
case PACKED_DOUBLE_ELEMENTS: {
- int length = object->IsJSArray()
- ? Smi::ToInt(JSArray::cast(object)->length())
- : object->elements()->length();
+ int length = object.IsJSArray()
+ ? Smi::ToInt(JSArray::cast(object).length())
+ : object.elements().length();
return length > 0;
}
case HOLEY_SMI_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case HOLEY_ELEMENTS: {
- FixedArray elements = FixedArray::cast(object->elements());
- int length = object->IsJSArray()
- ? Smi::ToInt(JSArray::cast(object)->length())
- : elements->length();
+ FixedArray elements = FixedArray::cast(object.elements());
+ int length = object.IsJSArray()
+ ? Smi::ToInt(JSArray::cast(object).length())
+ : elements.length();
Isolate* isolate = GetIsolate();
for (int i = 0; i < length; i++) {
- if (!elements->is_the_hole(isolate, i)) return true;
+ if (!elements.is_the_hole(isolate, i)) return true;
}
return false;
}
case HOLEY_DOUBLE_ELEMENTS: {
- int length = object->IsJSArray()
- ? Smi::ToInt(JSArray::cast(object)->length())
- : object->elements()->length();
+ int length = object.IsJSArray()
+ ? Smi::ToInt(JSArray::cast(object).length())
+ : object.elements().length();
// Zero-length arrays would use the empty FixedArray...
if (length == 0) return false;
// ...so only cast to FixedDoubleArray otherwise.
- FixedDoubleArray elements = FixedDoubleArray::cast(object->elements());
+ FixedDoubleArray elements = FixedDoubleArray::cast(object.elements());
for (int i = 0; i < length; i++) {
- if (!elements->is_the_hole(i)) return true;
+ if (!elements.is_the_hole(i)) return true;
}
return false;
}
@@ -4012,12 +3973,12 @@ bool JSObject::HasEnumerableElements() {
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
{
- int length = object->elements()->length();
+ size_t length = JSTypedArray::cast(object).length();
return length > 0;
}
case DICTIONARY_ELEMENTS: {
- NumberDictionary elements = NumberDictionary::cast(object->elements());
- return elements->NumberOfEnumerableProperties() > 0;
+ NumberDictionary elements = NumberDictionary::cast(object.elements());
+ return elements.NumberOfEnumerableProperties() > 0;
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -4025,10 +3986,10 @@ bool JSObject::HasEnumerableElements() {
return true;
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
- if (String::cast(JSValue::cast(object)->value())->length() > 0) {
+ if (String::cast(JSValue::cast(object).value()).length() > 0) {
return true;
}
- return object->elements()->length() > 0;
+ return object.elements().length() > 0;
case NO_ELEMENTS:
return false;
}
@@ -4066,7 +4027,7 @@ MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
// Ignore accessors on typed arrays.
- if (it->IsElement() && object->HasFixedTypedArrayElements()) {
+ if (it->IsElement() && object->HasTypedArrayElements()) {
return it->factory()->undefined_value();
}
@@ -4103,7 +4064,7 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
}
// Ignore accessors on typed arrays.
- if (it.IsElement() && object->HasFixedTypedArrayElements()) {
+ if (it.IsElement() && object->HasTypedArrayElements()) {
return it.factory()->undefined_value();
}
@@ -4122,59 +4083,59 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Object JSObject::SlowReverseLookup(Object value) {
if (HasFastProperties()) {
- int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
- DescriptorArray descs = map()->instance_descriptors();
- bool value_is_number = value->IsNumber();
+ int number_of_own_descriptors = map().NumberOfOwnDescriptors();
+ DescriptorArray descs = map().instance_descriptors();
+ bool value_is_number = value.IsNumber();
for (int i = 0; i < number_of_own_descriptors; i++) {
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
if (IsUnboxedDoubleField(field_index)) {
if (value_is_number) {
double property = RawFastDoublePropertyAt(field_index);
- if (property == value->Number()) {
- return descs->GetKey(i);
+ if (property == value.Number()) {
+ return descs.GetKey(i);
}
}
} else {
Object property = RawFastPropertyAt(field_index);
if (field_index.is_double()) {
- DCHECK(property->IsMutableHeapNumber());
- if (value_is_number && property->Number() == value->Number()) {
- return descs->GetKey(i);
+ DCHECK(property.IsMutableHeapNumber());
+ if (value_is_number && property.Number() == value.Number()) {
+ return descs.GetKey(i);
}
} else if (property == value) {
- return descs->GetKey(i);
+ return descs.GetKey(i);
}
}
} else {
DCHECK_EQ(kDescriptor, details.location());
if (details.kind() == kData) {
- if (descs->GetStrongValue(i) == value) {
- return descs->GetKey(i);
+ if (descs.GetStrongValue(i) == value) {
+ return descs.GetKey(i);
}
}
}
}
return GetReadOnlyRoots().undefined_value();
} else if (IsJSGlobalObject()) {
- return JSGlobalObject::cast(*this)->global_dictionary()->SlowReverseLookup(
+ return JSGlobalObject::cast(*this).global_dictionary().SlowReverseLookup(
value);
} else {
- return property_dictionary()->SlowReverseLookup(value);
+ return property_dictionary().SlowReverseLookup(value);
}
}
void JSObject::PrototypeRegistryCompactionCallback(HeapObject value,
int old_index,
int new_index) {
- DCHECK(value->IsMap() && Map::cast(value)->is_prototype_map());
+ DCHECK(value.IsMap() && Map::cast(value).is_prototype_map());
Map map = Map::cast(value);
- DCHECK(map->prototype_info()->IsPrototypeInfo());
- PrototypeInfo proto_info = PrototypeInfo::cast(map->prototype_info());
- DCHECK_EQ(old_index, proto_info->registry_slot());
- proto_info->set_registry_slot(new_index);
+ DCHECK(map.prototype_info().IsPrototypeInfo());
+ PrototypeInfo proto_info = PrototypeInfo::cast(map.prototype_info());
+ DCHECK_EQ(old_index, proto_info.registry_slot());
+ proto_info.set_registry_slot(new_index);
}
// static
@@ -4189,10 +4150,10 @@ void JSObject::MakePrototypesFast(Handle<Object> receiver,
if (!current->IsJSObject()) return;
Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
Map current_map = current_obj->map();
- if (current_map->is_prototype_map()) {
+ if (current_map.is_prototype_map()) {
// If the map is already marked as should be fast, we're done. Its
// prototypes will have been marked already as well.
- if (current_map->should_be_fast_prototype_map()) return;
+ if (current_map.should_be_fast_prototype_map()) return;
Handle<Map> map(current_map, isolate);
Map::SetShouldBeFastPrototypeMap(map, true, isolate);
JSObject::OptimizeAsPrototype(current_obj);
@@ -4205,8 +4166,8 @@ static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
if (!object->HasFastProperties()) return false;
if (object->IsJSGlobalProxy()) return false;
if (object->GetIsolate()->bootstrapper()->IsActive()) return false;
- return !object->map()->is_prototype_map() ||
- !object->map()->should_be_fast_prototype_map();
+ return !object->map().is_prototype_map() ||
+ !object->map().should_be_fast_prototype_map();
}
// static
@@ -4218,8 +4179,8 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
"NormalizeAsPrototype");
}
- if (object->map()->is_prototype_map()) {
- if (object->map()->should_be_fast_prototype_map() &&
+ if (object->map().is_prototype_map()) {
+ if (object->map().should_be_fast_prototype_map() &&
!object->HasFastProperties()) {
JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
}
@@ -4228,18 +4189,18 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
handle(object->map(), object->GetIsolate()),
"CopyAsPrototype");
JSObject::MigrateToMap(object, new_map);
- object->map()->set_is_prototype_map(true);
+ object->map().set_is_prototype_map(true);
// Replace the pointer to the exact constructor with the Object function
// from the same context if undetectable from JS. This is to avoid keeping
// memory alive unnecessarily.
- Object maybe_constructor = object->map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
+ Object maybe_constructor = object->map().GetConstructor();
+ if (maybe_constructor.IsJSFunction()) {
JSFunction constructor = JSFunction::cast(maybe_constructor);
- if (!constructor->shared()->IsApiFunction()) {
- Context context = constructor->context()->native_context();
- JSFunction object_function = context->object_function();
- object->map()->SetConstructor(object_function);
+ if (!constructor.shared().IsApiFunction()) {
+ Context context = constructor.context().native_context();
+ JSFunction object_function = context.object_function();
+ object->map().SetConstructor(object_function);
}
}
}
@@ -4247,8 +4208,8 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
// static
void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
- if (!object->map()->is_prototype_map()) return;
- if (!object->map()->should_be_fast_prototype_map()) return;
+ if (!object->map().is_prototype_map()) return;
+ if (!object->map().should_be_fast_prototype_map()) return;
OptimizeAsPrototype(object);
}
@@ -4290,7 +4251,7 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
PrintF("Registering %p as a user of prototype %p (map=%p).\n",
reinterpret_cast<void*>(current_user->ptr()),
reinterpret_cast<void*>(proto->ptr()),
- reinterpret_cast<void*>(proto->map()->ptr()));
+ reinterpret_cast<void*>(proto->map().ptr()));
}
current_user = handle(proto->map(), isolate);
@@ -4304,23 +4265,23 @@ void JSObject::LazyRegisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
DCHECK(user->is_prototype_map());
// If it doesn't have a PrototypeInfo, it was never registered.
- if (!user->prototype_info()->IsPrototypeInfo()) return false;
+ if (!user->prototype_info().IsPrototypeInfo()) return false;
// If it had no prototype before, see if it had users that might expect
// registration.
- if (!user->prototype()->IsJSObject()) {
+ if (!user->prototype().IsJSObject()) {
Object users =
- PrototypeInfo::cast(user->prototype_info())->prototype_users();
- return users->IsWeakArrayList();
+ PrototypeInfo::cast(user->prototype_info()).prototype_users();
+ return users.IsWeakArrayList();
}
Handle<JSObject> prototype(JSObject::cast(user->prototype()), isolate);
Handle<PrototypeInfo> user_info =
Map::GetOrCreatePrototypeInfo(user, isolate);
int slot = user_info->registry_slot();
if (slot == PrototypeInfo::UNREGISTERED) return false;
- DCHECK(prototype->map()->is_prototype_map());
- Object maybe_proto_info = prototype->map()->prototype_info();
+ DCHECK(prototype->map().is_prototype_map());
+ Object maybe_proto_info = prototype->map().prototype_info();
// User knows its registry slot, prototype info and user registry must exist.
- DCHECK(maybe_proto_info->IsPrototypeInfo());
+ DCHECK(maybe_proto_info.IsPrototypeInfo());
Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(maybe_proto_info),
isolate);
Handle<WeakArrayList> prototype_users(
@@ -4341,36 +4302,35 @@ namespace {
// AccessorAssembler::InvalidateValidityCellIfPrototype() which does pre-checks
// before jumping here.
void InvalidateOnePrototypeValidityCellInternal(Map map) {
- DCHECK(map->is_prototype_map());
+ DCHECK(map.is_prototype_map());
if (FLAG_trace_prototype_users) {
PrintF("Invalidating prototype map %p 's cell\n",
reinterpret_cast<void*>(map.ptr()));
}
- Object maybe_cell = map->prototype_validity_cell();
- if (maybe_cell->IsCell()) {
+ Object maybe_cell = map.prototype_validity_cell();
+ if (maybe_cell.IsCell()) {
// Just set the value; the cell will be replaced lazily.
Cell cell = Cell::cast(maybe_cell);
- cell->set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
+ cell.set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
}
}
void InvalidatePrototypeChainsInternal(Map map) {
InvalidateOnePrototypeValidityCellInternal(map);
- Object maybe_proto_info = map->prototype_info();
- if (!maybe_proto_info->IsPrototypeInfo()) return;
+ Object maybe_proto_info = map.prototype_info();
+ if (!maybe_proto_info.IsPrototypeInfo()) return;
PrototypeInfo proto_info = PrototypeInfo::cast(maybe_proto_info);
- if (!proto_info->prototype_users()->IsWeakArrayList()) {
+ if (!proto_info.prototype_users().IsWeakArrayList()) {
return;
}
WeakArrayList prototype_users =
- WeakArrayList::cast(proto_info->prototype_users());
+ WeakArrayList::cast(proto_info.prototype_users());
// For now, only maps register themselves as users.
- for (int i = PrototypeUsers::kFirstIndex; i < prototype_users->length();
- ++i) {
+ for (int i = PrototypeUsers::kFirstIndex; i < prototype_users.length(); ++i) {
HeapObject heap_object;
- if (prototype_users->Get(i)->GetHeapObjectIfWeak(&heap_object) &&
- heap_object->IsMap()) {
+ if (prototype_users.Get(i)->GetHeapObjectIfWeak(&heap_object) &&
+ heap_object.IsMap()) {
// Walk the prototype chain (backwards, towards leaf objects) if
// necessary.
InvalidatePrototypeChainsInternal(Map::cast(heap_object));
@@ -4397,7 +4357,7 @@ Map JSObject::InvalidatePrototypeChains(Map map) {
// static
void JSObject::InvalidatePrototypeValidityCell(JSGlobalObject global) {
DisallowHeapAllocation no_gc;
- InvalidateOnePrototypeValidityCellInternal(global->map());
+ InvalidateOnePrototypeValidityCellInternal(global.map());
}
Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
@@ -4425,7 +4385,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
// SpiderMonkey behaves this way.
if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true);
- bool all_extensible = object->map()->is_extensible();
+ bool all_extensible = object->map().is_extensible();
Handle<JSObject> real_receiver = object;
if (from_javascript) {
// Find the first object in the chain whose prototype object is not
@@ -4437,7 +4397,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
// JSProxies.
real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
- all_extensible = all_extensible && real_receiver->map()->is_extensible();
+ all_extensible = all_extensible && real_receiver->map().is_extensible();
}
}
Handle<Map> map(real_receiver->map(), isolate);
@@ -4524,14 +4484,14 @@ ElementsAccessor* JSObject::GetElementsAccessor() {
void JSObject::ValidateElements(JSObject object) {
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
- object->GetElementsAccessor()->Validate(object);
+ object.GetElementsAccessor()->Validate(object);
}
#endif
}
bool JSObject::WouldConvertToSlowElements(uint32_t index) {
if (!HasFastElements()) return false;
- uint32_t capacity = static_cast<uint32_t>(elements()->length());
+ uint32_t capacity = static_cast<uint32_t>(elements().length());
uint32_t new_capacity;
return ShouldConvertToSlowElements(*this, capacity, index, &new_capacity);
}
@@ -4542,23 +4502,23 @@ static bool ShouldConvertToFastElements(JSObject object,
uint32_t* new_capacity) {
// If properties with non-standard attributes or accessors were added, we
// cannot go back to fast elements.
- if (dictionary->requires_slow_elements()) return false;
+ if (dictionary.requires_slow_elements()) return false;
// Adding a property with this index will require slow elements.
if (index >= static_cast<uint32_t>(Smi::kMaxValue)) return false;
- if (object->IsJSArray()) {
- Object length = JSArray::cast(object)->length();
- if (!length->IsSmi()) return false;
+ if (object.IsJSArray()) {
+ Object length = JSArray::cast(object).length();
+ if (!length.IsSmi()) return false;
*new_capacity = static_cast<uint32_t>(Smi::ToInt(length));
- } else if (object->IsJSSloppyArgumentsObject()) {
+ } else if (object.IsJSSloppyArgumentsObject()) {
return false;
} else {
- *new_capacity = dictionary->max_number_key() + 1;
+ *new_capacity = dictionary.max_number_key() + 1;
}
*new_capacity = Max(index + 1, *new_capacity);
- uint32_t dictionary_size = static_cast<uint32_t>(dictionary->Capacity()) *
+ uint32_t dictionary_size = static_cast<uint32_t>(dictionary.Capacity()) *
NumberDictionary::kEntrySize;
// Turn fast if the dictionary only saves 50% space.
@@ -4566,24 +4526,24 @@ static bool ShouldConvertToFastElements(JSObject object,
}
static ElementsKind BestFittingFastElementsKind(JSObject object) {
- if (!object->map()->CanHaveFastTransitionableElementsKind()) {
+ if (!object.map().CanHaveFastTransitionableElementsKind()) {
return HOLEY_ELEMENTS;
}
- if (object->HasSloppyArgumentsElements()) {
+ if (object.HasSloppyArgumentsElements()) {
return FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
- if (object->HasStringWrapperElements()) {
+ if (object.HasStringWrapperElements()) {
return FAST_STRING_WRAPPER_ELEMENTS;
}
- DCHECK(object->HasDictionaryElements());
- NumberDictionary dictionary = object->element_dictionary();
+ DCHECK(object.HasDictionaryElements());
+ NumberDictionary dictionary = object.element_dictionary();
ElementsKind kind = HOLEY_SMI_ELEMENTS;
- for (int i = 0; i < dictionary->Capacity(); i++) {
- Object key = dictionary->KeyAt(i);
- if (key->IsNumber()) {
- Object value = dictionary->ValueAt(i);
- if (!value->IsNumber()) return HOLEY_ELEMENTS;
- if (!value->IsSmi()) {
+ for (int i = 0; i < dictionary.Capacity(); i++) {
+ Object key = dictionary.KeyAt(i);
+ if (key.IsNumber()) {
+ Object value = dictionary.ValueAt(i);
+ if (!value.IsNumber()) return HOLEY_ELEMENTS;
+ if (!value.IsSmi()) {
if (!FLAG_unbox_double_arrays) return HOLEY_ELEMENTS;
kind = HOLEY_DOUBLE_ELEMENTS;
}
@@ -4596,7 +4556,7 @@ static ElementsKind BestFittingFastElementsKind(JSObject object) {
void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
Handle<Object> value,
PropertyAttributes attributes) {
- DCHECK(object->map()->is_extensible());
+ DCHECK(object->map().is_extensible());
Isolate* isolate = object->GetIsolate();
@@ -4604,14 +4564,14 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
uint32_t new_capacity = 0;
if (object->IsJSArray()) {
- CHECK(JSArray::cast(*object)->length()->ToArrayLength(&old_length));
+ CHECK(JSArray::cast(*object).length().ToArrayLength(&old_length));
}
ElementsKind kind = object->GetElementsKind();
FixedArrayBase elements = object->elements();
ElementsKind dictionary_kind = DICTIONARY_ELEMENTS;
if (IsSloppyArgumentsElementsKind(kind)) {
- elements = SloppyArgumentsElements::cast(elements)->arguments();
+ elements = SloppyArgumentsElements::cast(elements).arguments();
dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
} else if (IsStringWrapperElementsKind(kind)) {
dictionary_kind = SLOW_STRING_WRAPPER_ELEMENTS;
@@ -4619,13 +4579,13 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
if (attributes != NONE) {
kind = dictionary_kind;
- } else if (elements->IsNumberDictionary()) {
+ } else if (elements.IsNumberDictionary()) {
kind = ShouldConvertToFastElements(
*object, NumberDictionary::cast(elements), index, &new_capacity)
? BestFittingFastElementsKind(*object)
: dictionary_kind;
} else if (ShouldConvertToSlowElements(
- *object, static_cast<uint32_t>(elements->length()), index,
+ *object, static_cast<uint32_t>(elements.length()), index,
&new_capacity)) {
kind = dictionary_kind;
}
@@ -4642,7 +4602,7 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
if (object->IsJSArray() && index >= old_length) {
Handle<Object> new_length =
isolate->factory()->NewNumberFromUint(index + 1);
- JSArray::cast(*object)->set_length(*new_length);
+ JSArray::cast(*object).set_length(*new_length);
}
}
@@ -4665,7 +4625,7 @@ bool JSObject::UpdateAllocationSite(Handle<JSObject> object,
if (memento.is_null()) return false;
// Walk through to the Allocation Site
- site = handle(memento->GetAllocationSite(), heap->isolate());
+ site = handle(memento.GetAllocationSite(), heap->isolate());
}
return AllocationSite::DigestTransitionFeedback<update_or_check>(site,
to_kind);
@@ -4707,19 +4667,19 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
} else {
DCHECK((IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
(IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
- uint32_t c = static_cast<uint32_t>(object->elements()->length());
+ uint32_t c = static_cast<uint32_t>(object->elements().length());
ElementsAccessor::ForKind(to_kind)->GrowCapacityAndConvert(object, c);
}
}
template <typename BackingStore>
static int HoleyElementsUsage(JSObject object, BackingStore store) {
- Isolate* isolate = object->GetIsolate();
- int limit = object->IsJSArray() ? Smi::ToInt(JSArray::cast(object)->length())
- : store->length();
+ Isolate* isolate = object.GetIsolate();
+ int limit = object.IsJSArray() ? Smi::ToInt(JSArray::cast(object).length())
+ : store.length();
int used = 0;
for (int i = 0; i < limit; ++i) {
- if (!store->is_the_hole(isolate, i)) ++used;
+ if (!store.is_the_hole(isolate, i)) ++used;
}
return used;
}
@@ -4732,17 +4692,19 @@ int JSObject::GetFastElementsUsage() {
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
- return IsJSArray() ? Smi::ToInt(JSArray::cast(*this)->length())
- : store->length();
+ return IsJSArray() ? Smi::ToInt(JSArray::cast(*this).length())
+ : store.length();
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- store = SloppyArgumentsElements::cast(store)->arguments();
+ store = SloppyArgumentsElements::cast(store).arguments();
V8_FALLTHROUGH;
case HOLEY_SMI_ELEMENTS:
case HOLEY_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS:
return HoleyElementsUsage(*this, FixedArray::cast(store));
case HOLEY_DOUBLE_ELEMENTS:
- if (elements()->length() == 0) return 0;
+ if (elements().length() == 0) return 0;
return HoleyElementsUsage(*this, FixedDoubleArray::cast(store));
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -4792,7 +4754,7 @@ bool JSObject::IsApiWrapper() {
// These object types can carry information relevant for embedders. The
// *_API_* types are generated through templates which can have embedder
// fields. The other types have their embedder fields added at compile time.
- auto instance_type = map()->instance_type();
+ auto instance_type = map().instance_type();
return instance_type == JS_API_OBJECT_TYPE ||
instance_type == JS_ARRAY_BUFFER_TYPE ||
instance_type == JS_DATA_VIEW_TYPE ||
@@ -4801,7 +4763,7 @@ bool JSObject::IsApiWrapper() {
}
bool JSObject::IsDroppableApiWrapper() {
- auto instance_type = map()->instance_type();
+ auto instance_type = map().instance_type();
return instance_type == JS_API_OBJECT_TYPE ||
instance_type == JS_SPECIAL_API_OBJECT_TYPE;
}
@@ -4809,7 +4771,7 @@ bool JSObject::IsDroppableApiWrapper() {
// static
MaybeHandle<NativeContext> JSBoundFunction::GetFunctionRealm(
Handle<JSBoundFunction> function) {
- DCHECK(function->map()->is_constructor());
+ DCHECK(function->map().is_constructor());
return JSReceiver::GetFunctionRealm(
handle(function->bound_target_function(), function->GetIsolate()));
}
@@ -4821,14 +4783,14 @@ MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
Handle<String> target_name = prefix;
Factory* factory = isolate->factory();
// Concatenate the "bound " up to the last non-bound target.
- while (function->bound_target_function()->IsJSBoundFunction()) {
+ while (function->bound_target_function().IsJSBoundFunction()) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, target_name,
factory->NewConsString(prefix, target_name),
String);
function = handle(JSBoundFunction::cast(function->bound_target_function()),
isolate);
}
- if (function->bound_target_function()->IsJSFunction()) {
+ if (function->bound_target_function().IsJSFunction()) {
Handle<JSFunction> target(
JSFunction::cast(function->bound_target_function()), isolate);
Handle<Object> name = JSFunction::GetName(isolate, target);
@@ -4842,14 +4804,14 @@ MaybeHandle<String> JSBoundFunction::GetName(Isolate* isolate,
// static
Maybe<int> JSBoundFunction::GetLength(Isolate* isolate,
Handle<JSBoundFunction> function) {
- int nof_bound_arguments = function->bound_arguments()->length();
- while (function->bound_target_function()->IsJSBoundFunction()) {
+ int nof_bound_arguments = function->bound_arguments().length();
+ while (function->bound_target_function().IsJSBoundFunction()) {
function = handle(JSBoundFunction::cast(function->bound_target_function()),
isolate);
// Make sure we never overflow {nof_bound_arguments}, the number of
// arguments of a function is strictly limited by the max length of an
// JSAarray, Smi::kMaxValue is thus a reasonably good overestimate.
- int length = function->bound_arguments()->length();
+ int length = function->bound_arguments().length();
if (V8_LIKELY(Smi::kMaxValue - nof_bound_arguments > length)) {
nof_bound_arguments += length;
} else {
@@ -4875,17 +4837,17 @@ Handle<String> JSBoundFunction::ToString(Handle<JSBoundFunction> function) {
// static
Handle<Object> JSFunction::GetName(Isolate* isolate,
Handle<JSFunction> function) {
- if (function->shared()->name_should_print_as_anonymous()) {
+ if (function->shared().name_should_print_as_anonymous()) {
return isolate->factory()->anonymous_string();
}
- return handle(function->shared()->Name(), isolate);
+ return handle(function->shared().Name(), isolate);
}
// static
Handle<NativeContext> JSFunction::GetFunctionRealm(
Handle<JSFunction> function) {
- DCHECK(function->map()->is_constructor());
- return handle(function->context()->native_context(), function->GetIsolate());
+ DCHECK(function->map().is_constructor());
+ return handle(function->context().native_context(), function->GetIsolate());
}
void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
@@ -4896,11 +4858,11 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
}
DCHECK(!is_compiled() || IsInterpreted());
- DCHECK(shared()->IsInterpreted());
+ DCHECK(shared().IsInterpreted());
DCHECK(!IsOptimized());
DCHECK(!HasOptimizedCode());
- DCHECK(shared()->allows_lazy_compilation() ||
- !shared()->optimization_disabled());
+ DCHECK(shared().allows_lazy_compilation() ||
+ !shared().optimization_disabled());
if (mode == ConcurrencyMode::kConcurrent) {
if (IsInOptimizationQueue()) {
@@ -4926,16 +4888,16 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
// static
void JSFunction::EnsureClosureFeedbackCellArray(Handle<JSFunction> function) {
Isolate* const isolate = function->GetIsolate();
- DCHECK(function->shared()->is_compiled());
- DCHECK(function->shared()->HasFeedbackMetadata());
+ DCHECK(function->shared().is_compiled());
+ DCHECK(function->shared().HasFeedbackMetadata());
if (function->has_closure_feedback_cell_array() ||
function->has_feedback_vector()) {
return;
}
- if (function->shared()->HasAsmWasmData()) return;
+ if (function->shared().HasAsmWasmData()) return;
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- DCHECK(function->shared()->HasBytecodeArray());
+ DCHECK(function->shared().HasBytecodeArray());
Handle<HeapObject> feedback_cell_array =
ClosureFeedbackCellArray::New(isolate, shared);
// Many closure cell is used as a way to specify that there is no
@@ -4949,20 +4911,20 @@ void JSFunction::EnsureClosureFeedbackCellArray(Handle<JSFunction> function) {
isolate->factory()->NewOneClosureCell(feedback_cell_array);
function->set_raw_feedback_cell(*feedback_cell);
} else {
- function->raw_feedback_cell()->set_value(*feedback_cell_array);
+ function->raw_feedback_cell().set_value(*feedback_cell_array);
}
}
// static
void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
Isolate* const isolate = function->GetIsolate();
- DCHECK(function->shared()->is_compiled());
- DCHECK(function->shared()->HasFeedbackMetadata());
+ DCHECK(function->shared().is_compiled());
+ DCHECK(function->shared().HasFeedbackMetadata());
if (function->has_feedback_vector()) return;
- if (function->shared()->HasAsmWasmData()) return;
+ if (function->shared().HasAsmWasmData()) return;
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
- DCHECK(function->shared()->HasBytecodeArray());
+ DCHECK(function->shared().HasBytecodeArray());
EnsureClosureFeedbackCellArray(function);
Handle<ClosureFeedbackCellArray> closure_feedback_cell_array =
@@ -4974,15 +4936,24 @@ void JSFunction::EnsureFeedbackVector(Handle<JSFunction> function) {
// for more details.
DCHECK(function->raw_feedback_cell() !=
isolate->heap()->many_closures_cell());
- function->raw_feedback_cell()->set_value(*feedback_vector);
+ function->raw_feedback_cell().set_value(*feedback_vector);
}
// static
void JSFunction::InitializeFeedbackCell(Handle<JSFunction> function) {
- if (FLAG_lazy_feedback_allocation) {
- EnsureClosureFeedbackCellArray(function);
- } else {
+ Isolate* const isolate = function->GetIsolate();
+ bool needs_feedback_vector = !FLAG_lazy_feedback_allocation;
+ // We need feedback vector for certain log events, collecting type profile
+ // and more precise code coverage.
+ if (FLAG_log_function_events) needs_feedback_vector = true;
+ if (!isolate->is_best_effort_code_coverage()) needs_feedback_vector = true;
+ if (isolate->is_collecting_type_profile()) needs_feedback_vector = true;
+ if (FLAG_always_opt) needs_feedback_vector = true;
+
+ if (needs_feedback_vector) {
EnsureFeedbackVector(function);
+ } else {
+ EnsureClosureFeedbackCellArray(function);
}
}
@@ -5014,7 +4985,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
// If the function is used as the global Array function, cache the
// updated initial maps (and transitioned versions) in the native context.
- Handle<Context> native_context(function->context()->native_context(),
+ Handle<Context> native_context(function->context().native_context(),
isolate);
Handle<Object> array_function(
native_context->get(Context::ARRAY_FUNCTION_INDEX), isolate);
@@ -5025,7 +4996,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
}
// Deoptimize all code that embeds the previous initial map.
- initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
+ initial_map->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kInitialMapChangedGroup);
} else {
// Put the value in the initial map field until an initial map is
@@ -5044,7 +5015,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
void JSFunction::SetPrototype(Handle<JSFunction> function,
Handle<Object> value) {
DCHECK(function->IsConstructor() ||
- IsGeneratorFunction(function->shared()->kind()));
+ IsGeneratorFunction(function->shared().kind()));
Isolate* isolate = function->GetIsolate();
Handle<JSReceiver> construct_prototype;
@@ -5063,8 +5034,8 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
new_map->SetConstructor(*value);
new_map->set_has_non_instance_prototype(true);
- FunctionKind kind = function->shared()->kind();
- Handle<Context> native_context(function->context()->native_context(),
+ FunctionKind kind = function->shared().kind();
+ Handle<Context> native_context(function->context().native_context(),
isolate);
construct_prototype = Handle<JSReceiver>(
@@ -5076,7 +5047,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
isolate);
} else {
construct_prototype = Handle<JSReceiver>::cast(value);
- function->map()->set_has_non_instance_prototype(false);
+ function->map().set_has_non_instance_prototype(false);
}
SetInstancePrototype(isolate, function, construct_prototype);
@@ -5090,22 +5061,22 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
map->SetConstructor(*function);
if (FLAG_trace_maps) {
LOG(function->GetIsolate(), MapEvent("InitialMap", Map(), *map, "",
- function->shared()->DebugName()));
+ function->shared().DebugName()));
}
}
void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
DCHECK(function->has_prototype_slot());
DCHECK(function->IsConstructor() ||
- IsResumableFunction(function->shared()->kind()));
+ IsResumableFunction(function->shared().kind()));
if (function->has_initial_map()) return;
Isolate* isolate = function->GetIsolate();
// First create a new map with the size and number of in-object properties
// suggested by the function.
InstanceType instance_type;
- if (IsResumableFunction(function->shared()->kind())) {
- instance_type = IsAsyncGeneratorFunction(function->shared()->kind())
+ if (IsResumableFunction(function->shared().kind())) {
+ instance_type = IsAsyncGeneratorFunction(function->shared().kind())
? JS_ASYNC_GENERATOR_OBJECT_TYPE
: JS_GENERATOR_OBJECT_TYPE;
} else {
@@ -5249,8 +5220,8 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
// Check that |function|'s initial map still in sync with the |constructor|,
// otherwise we must create a new initial map for |function|.
if (new_target->has_initial_map() &&
- new_target->initial_map()->GetConstructor() == *constructor) {
- DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ new_target->initial_map().GetConstructor() == *constructor) {
+ DCHECK(new_target->instance_prototype().IsJSReceiver());
return true;
}
InstanceType instance_type = constructor_initial_map->instance_type();
@@ -5260,7 +5231,7 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
// Link initial map and constructor function if the new.target is actually a
// subclass constructor.
- if (!IsDerivedConstructor(new_target->shared()->kind())) return false;
+ if (!IsDerivedConstructor(new_target->shared().kind())) return false;
int instance_size;
int in_object_properties;
@@ -5282,7 +5253,7 @@ bool FastInitializeDerivedMap(Isolate* isolate, Handle<JSFunction> new_target,
map->set_new_target_is_base(false);
Handle<HeapObject> prototype(new_target->instance_prototype(), isolate);
JSFunction::SetInitialMap(new_target, map, prototype);
- DCHECK(new_target->instance_prototype()->IsJSReceiver());
+ DCHECK(new_target->instance_prototype().IsJSReceiver());
map->SetConstructor(*constructor);
map->set_construction_counter(Map::kNoSlackTracking);
map->StartInobjectSlackTracking();
@@ -5366,15 +5337,15 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
CHECK(has_initial_map());
- if (initial_map()->IsInobjectSlackTrackingInProgress()) {
- int slack = initial_map()->ComputeMinObjectSlack(isolate);
- return initial_map()->InstanceSizeFromSlack(slack);
+ if (initial_map().IsInobjectSlackTrackingInProgress()) {
+ int slack = initial_map().ComputeMinObjectSlack(isolate);
+ return initial_map().InstanceSizeFromSlack(slack);
}
- return initial_map()->instance_size();
+ return initial_map().instance_size();
}
void JSFunction::PrintName(FILE* out) {
- std::unique_ptr<char[]> name = shared()->DebugName()->ToCString();
+ std::unique_ptr<char[]> name = shared().DebugName().ToCString();
PrintF(out, "%s", name.get());
}
@@ -5383,7 +5354,7 @@ Handle<String> JSFunction::GetName(Handle<JSFunction> function) {
Handle<Object> name =
JSReceiver::GetDataProperty(function, isolate->factory()->name_string());
if (name->IsString()) return Handle<String>::cast(name);
- return handle(function->shared()->DebugName(), isolate);
+ return handle(function->shared().DebugName(), isolate);
}
Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
@@ -5447,10 +5418,10 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
if (maybe_class_positions->IsClassPositions()) {
ClassPositions class_positions =
ClassPositions::cast(*maybe_class_positions);
- int start_position = class_positions->start();
- int end_position = class_positions->end();
+ int start_position = class_positions.start();
+ int end_position = class_positions.end();
Handle<String> script_source(
- String::cast(Script::cast(shared_info->script())->source()), isolate);
+ String::cast(Script::cast(shared_info->script()).source()), isolate);
return isolate->factory()->NewSubString(script_source, start_position,
end_position);
}
@@ -5554,7 +5525,7 @@ void JSFunction::ClearTypeFeedbackInfo() {
if (has_feedback_vector()) {
FeedbackVector vector = feedback_vector();
Isolate* isolate = GetIsolate();
- if (vector->ClearSlots(isolate)) {
+ if (vector.ClearSlots(isolate)) {
IC::OnFeedbackChanged(isolate, vector, FeedbackSlot::Invalid(), *this,
"ClearTypeFeedbackInfo");
}
@@ -5588,7 +5559,7 @@ Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
PropertyCellType original_cell_type = cell->property_details().cell_type();
DCHECK(original_cell_type == PropertyCellType::kInvalidated ||
original_cell_type == PropertyCellType::kUninitialized);
- DCHECK(cell->value()->IsTheHole(isolate));
+ DCHECK(cell->value().IsTheHole(isolate));
if (original_cell_type == PropertyCellType::kInvalidated) {
cell = PropertyCell::InvalidateEntry(isolate, dictionary, entry);
}
@@ -5640,8 +5611,8 @@ Address JSDate::GetField(Address raw_object, Address smi_index) {
Object object(raw_object);
Smi index(smi_index);
return JSDate::cast(object)
- ->DoGetField(static_cast<FieldIndex>(index->value()))
- ->ptr();
+ .DoGetField(static_cast<FieldIndex>(index.value()))
+ .ptr();
}
Object JSDate::DoGetField(FieldIndex index) {
@@ -5651,10 +5622,10 @@ Object JSDate::DoGetField(FieldIndex index) {
if (index < kFirstUncachedField) {
Object stamp = cache_stamp();
- if (stamp != date_cache->stamp() && stamp->IsSmi()) {
+ if (stamp != date_cache->stamp() && stamp.IsSmi()) {
// Since the stamp is not NaN, the value is also not NaN.
int64_t local_time_ms =
- date_cache->ToLocal(static_cast<int64_t>(value()->Number()));
+ date_cache->ToLocal(static_cast<int64_t>(value().Number()));
SetCachedFields(local_time_ms, date_cache);
}
switch (index) {
@@ -5678,10 +5649,10 @@ Object JSDate::DoGetField(FieldIndex index) {
}
if (index >= kFirstUTCField) {
- return GetUTCField(index, value()->Number(), date_cache);
+ return GetUTCField(index, value().Number(), date_cache);
}
- double time = value()->Number();
+ double time = value().Number();
if (std::isnan(time)) return GetReadOnlyRoots().nan_value();
int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time));
@@ -5787,7 +5758,27 @@ void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
}
+// static
+void JSMessageObject::EnsureSourcePositionsAvailable(
+ Isolate* isolate, Handle<JSMessageObject> message) {
+ if (!message->DidEnsureSourcePositionsAvailable()) {
+ DCHECK_EQ(message->start_position(), -1);
+ DCHECK_GE(message->bytecode_offset().value(), 0);
+ Handle<SharedFunctionInfo> shared_info(
+ SharedFunctionInfo::cast(message->shared_info()), isolate);
+ SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared_info);
+ DCHECK(shared_info->HasBytecodeArray());
+ int position = shared_info->abstract_code().SourcePosition(
+ message->bytecode_offset().value());
+ DCHECK_GE(position, 0);
+ message->set_start_position(position);
+ message->set_end_position(position + 1);
+ message->set_shared_info(ReadOnlyRoots(isolate).undefined_value());
+ }
+}
+
int JSMessageObject::GetLineNumber() const {
+ DCHECK(DidEnsureSourcePositionsAvailable());
if (start_position() == -1) return Message::kNoLineNumberInfo;
Handle<Script> the_script(script(), GetIsolate());
@@ -5803,6 +5794,7 @@ int JSMessageObject::GetLineNumber() const {
}
int JSMessageObject::GetColumnNumber() const {
+ DCHECK(DidEnsureSourcePositionsAvailable());
if (start_position() == -1) return -1;
Handle<Script> the_script(script(), GetIsolate());
@@ -5827,6 +5819,7 @@ Handle<String> JSMessageObject::GetSourceLine() const {
Script::PositionInfo info;
const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
+ DCHECK(DidEnsureSourcePositionsAvailable());
if (!Script::GetPositionInfo(the_script, start_position(), &info,
offset_flag)) {
return isolate->factory()->empty_string();
@@ -5836,10 +5829,5 @@ Handle<String> JSMessageObject::GetSourceLine() const {
return isolate->factory()->NewSubString(src, info.line_start, info.line_end);
}
-// Explicit instantiation definitions.
-template void JSObject::ApplyAttributesToDictionary(
- Isolate* isolate, ReadOnlyRoots roots, Handle<NumberDictionary> dictionary,
- const PropertyAttributes attributes);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index c67f70c207..5ac1751c48 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -5,10 +5,10 @@
#ifndef V8_OBJECTS_JS_OBJECTS_H_
#define V8_OBJECTS_JS_OBJECTS_H_
-#include "src/objects.h"
#include "src/objects/embedder-data-slot.h"
+#include "src/objects/objects.h"
#include "src/objects/property-array.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -259,19 +259,12 @@ class JSReceiver : public HeapObject {
Handle<JSReceiver> object, PropertyFilter filter,
bool try_fast_path = true);
- V8_WARN_UNUSED_RESULT static Handle<FixedArray> GetOwnElementIndices(
- Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSObject> object);
-
static const int kHashMask = PropertyArray::HashField::kMask;
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_JSRECEIVER_FIELDS)
- static const int kHeaderSize = kSize;
-
bool HasProxyInPrototype(Isolate* isolate);
- bool HasComplexElements();
-
V8_WARN_UNUSED_RESULT static MaybeHandle<FixedArray> GetPrivateEntries(
Isolate* isolate, Handle<JSReceiver> receiver);
@@ -345,8 +338,9 @@ class JSObject : public JSReceiver {
// Returns true if an object has elements of PACKED_ELEMENTS
inline bool HasPackedElements();
inline bool HasFrozenOrSealedElements();
+ inline bool HasSealedElements();
- inline bool HasFixedTypedArrayElements();
+ inline bool HasTypedArrayElements();
inline bool HasFixedUint8ClampedElements();
inline bool HasFixedArrayElements();
@@ -641,7 +635,12 @@ class JSObject : public JSReceiver {
inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index);
inline void FastPropertyAtPut(FieldIndex index, Object value);
- inline void RawFastPropertyAtPut(FieldIndex index, Object value);
+ inline void RawFastPropertyAtPut(
+ FieldIndex index, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void RawFastInobjectPropertyAtPut(
+ FieldIndex index, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits);
inline void WriteToField(int descriptor, PropertyDetails details,
Object value);
@@ -879,15 +878,8 @@ class JSIteratorResult : public JSObject {
DECL_ACCESSORS(done, Object)
// Layout description.
-#define JS_ITERATOR_RESULT_FIELDS(V) \
- V(kValueOffset, kTaggedSize) \
- V(kDoneOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_ITERATOR_RESULT_FIELDS)
-#undef JS_ITERATOR_RESULT_FIELDS
+ TORQUE_GENERATED_JSITERATOR_RESULT_FIELDS)
// Indices of in-object properties.
static const int kValueIndex = 0;
@@ -1209,15 +1201,8 @@ class JSGlobalObject : public JSObject {
DECL_VERIFIER(JSGlobalObject)
// Layout description.
-#define JS_GLOBAL_OBJECT_FIELDS(V) \
- V(kNativeContextOffset, kTaggedSize) \
- V(kGlobalProxyOffset, kTaggedSize) \
- /* Header size. */ \
- V(kHeaderSize, 0) \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_GLOBAL_OBJECT_FIELDS)
-#undef JS_GLOBAL_OBJECT_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSGLOBAL_OBJECT_FIELDS)
OBJECT_CONSTRUCTORS(JSGlobalObject, JSObject);
};
@@ -1356,23 +1341,30 @@ class JSMessageObject : public JSObject {
// [stack_frames]: an array of stack frames for this error object.
DECL_ACCESSORS(stack_frames, Object)
- // [start_position]: the start position in the script for the error message.
- inline int start_position() const;
- inline void set_start_position(int value);
+ // Initializes the source positions in the object if possible. Does nothing if
+ // called more than once. If called when stack space is exhausted, then the
+ // source positions will be not be set and calling it again when there is more
+ // stack space will not have any effect.
+ static void EnsureSourcePositionsAvailable(Isolate* isolate,
+ Handle<JSMessageObject> message);
- // [end_position]: the end position in the script for the error message.
- inline int end_position() const;
- inline void set_end_position(int value);
+ // Gets the start and end positions for the message.
+ // EnsureSourcePositionsAvailable must have been called before calling these.
+ inline int GetStartPosition() const;
+ inline int GetEndPosition() const;
// Returns the line number for the error message (1-based), or
// Message::kNoLineNumberInfo if the line cannot be determined.
+ // EnsureSourcePositionsAvailable must have been called before calling this.
V8_EXPORT_PRIVATE int GetLineNumber() const;
// Returns the offset of the given position within the containing line.
+ // EnsureSourcePositionsAvailable must have been called before calling this.
V8_EXPORT_PRIVATE int GetColumnNumber() const;
// Returns the source code line containing the given source
// position, or the empty string if the position is invalid.
+ // EnsureSourcePositionsAvailable must have been called before calling this.
Handle<String> GetSourceLine() const;
inline int error_level() const;
@@ -1393,6 +1385,27 @@ class JSMessageObject : public JSObject {
kPointerFieldsEndOffset, kSize>;
OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
+
+ private:
+ friend class Factory;
+
+ inline bool DidEnsureSourcePositionsAvailable() const;
+
+ // [shared]: optional SharedFunctionInfo that can be used to reconstruct the
+ // source position if not available when the message was generated.
+ DECL_ACCESSORS(shared_info, HeapObject)
+
+ // [bytecode_offset]: optional offset using along with |shared| to generation
+ // source positions.
+ DECL_ACCESSORS(bytecode_offset, Smi)
+
+ // [start_position]: the start position in the script for the error message.
+ inline int start_position() const;
+ inline void set_start_position(int value);
+
+ // [end_position]: the end position in the script for the error message.
+ inline int end_position() const;
+ inline void set_end_position(int value);
};
// The [Async-from-Sync Iterator] object
diff --git a/deps/v8/src/objects/js-plural-rules-inl.h b/deps/v8/src/objects/js-plural-rules-inl.h
index f7248431e8..1924bdc4ff 100644
--- a/deps/v8/src/objects/js-plural-rules-inl.h
+++ b/deps/v8/src/objects/js-plural-rules-inl.h
@@ -9,9 +9,9 @@
#ifndef V8_OBJECTS_JS_PLURAL_RULES_INL_H_
#define V8_OBJECTS_JS_PLURAL_RULES_INL_H_
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
#include "src/objects/js-plural-rules.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -24,9 +24,9 @@ OBJECT_CONSTRUCTORS_IMPL(JSPluralRules, JSObject)
ACCESSORS(JSPluralRules, locale, String, kLocaleOffset)
SMI_ACCESSORS(JSPluralRules, flags, kFlagsOffset)
ACCESSORS(JSPluralRules, icu_plural_rules, Managed<icu::PluralRules>,
- kICUPluralRulesOffset)
+ kIcuPluralRulesOffset)
ACCESSORS(JSPluralRules, icu_decimal_format, Managed<icu::DecimalFormat>,
- kICUDecimalFormatOffset)
+ kIcuDecimalFormatOffset)
inline void JSPluralRules::set_type(Type type) {
DCHECK_LT(type, Type::COUNT);
diff --git a/deps/v8/src/objects/js-plural-rules.cc b/deps/v8/src/objects/js-plural-rules.cc
index da349dcd81..8daf5db64a 100644
--- a/deps/v8/src/objects/js-plural-rules.cc
+++ b/deps/v8/src/objects/js-plural-rules.cc
@@ -8,7 +8,7 @@
#include "src/objects/js-plural-rules.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-plural-rules-inl.h"
#include "unicode/decimfmt.h"
@@ -164,9 +164,24 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
CHECK_NOT_NULL(icu_decimal_format.get());
// 9. Perform ? SetNumberFormatDigitOptions(pluralRules, options, 0, 3).
- Maybe<bool> done = Intl::SetNumberFormatDigitOptions(
- isolate, icu_decimal_format.get(), options, 0, 3);
- MAYBE_RETURN(done, MaybeHandle<JSPluralRules>());
+ Maybe<Intl::NumberFormatDigitOptions> maybe_digit_options =
+ Intl::SetNumberFormatDigitOptions(isolate, options, 0, 3);
+ MAYBE_RETURN(maybe_digit_options, MaybeHandle<JSPluralRules>());
+ Intl::NumberFormatDigitOptions digit_options = maybe_digit_options.FromJust();
+
+ icu_decimal_format->setRoundingMode(icu::DecimalFormat::kRoundHalfUp);
+ icu_decimal_format->setMinimumIntegerDigits(
+ digit_options.minimum_integer_digits);
+ icu_decimal_format->setMinimumFractionDigits(
+ digit_options.minimum_fraction_digits);
+ icu_decimal_format->setMaximumFractionDigits(
+ digit_options.maximum_fraction_digits);
+ if (digit_options.minimum_significant_digits > 0) {
+ icu_decimal_format->setMinimumSignificantDigits(
+ digit_options.minimum_significant_digits);
+ icu_decimal_format->setMaximumSignificantDigits(
+ digit_options.maximum_significant_digits);
+ }
Handle<Managed<icu::PluralRules>> managed_plural_rules =
Managed<icu::PluralRules>::FromUniquePtr(isolate, 0,
@@ -184,11 +199,11 @@ MaybeHandle<JSPluralRules> JSPluralRules::Initialize(
MaybeHandle<String> JSPluralRules::ResolvePlural(
Isolate* isolate, Handle<JSPluralRules> plural_rules, double number) {
- icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules()->raw();
+ icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules().raw();
CHECK_NOT_NULL(icu_plural_rules);
icu::DecimalFormat* icu_decimal_format =
- plural_rules->icu_decimal_format()->raw();
+ plural_rules->icu_decimal_format().raw();
CHECK_NOT_NULL(icu_decimal_format);
// Currently, PluralRules doesn't implement all the options for rounding that
@@ -247,7 +262,7 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
"type");
icu::DecimalFormat* icu_decimal_format =
- plural_rules->icu_decimal_format()->raw();
+ plural_rules->icu_decimal_format().raw();
CHECK_NOT_NULL(icu_decimal_format);
// This is a safe upcast as icu::DecimalFormat inherits from
@@ -281,7 +296,7 @@ Handle<JSObject> JSPluralRules::ResolvedOptions(
// 6. Let pluralCategories be a List of Strings representing the
// possible results of PluralRuleSelect for the selected locale pr.
- icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules()->raw();
+ icu::PluralRules* icu_plural_rules = plural_rules->icu_plural_rules().raw();
CHECK_NOT_NULL(icu_plural_rules);
UErrorCode status = U_ZERO_ERROR;
diff --git a/deps/v8/src/objects/js-plural-rules.h b/deps/v8/src/objects/js-plural-rules.h
index 70c63a9a8f..249090bdf6 100644
--- a/deps/v8/src/objects/js-plural-rules.h
+++ b/deps/v8/src/objects/js-plural-rules.h
@@ -12,16 +12,17 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/intl-objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
namespace U_ICU_NAMESPACE {
+class DecimalFormat;
class PluralRules;
} // namespace U_ICU_NAMESPACE
@@ -69,16 +70,8 @@ class JSPluralRules : public JSObject {
STATIC_ASSERT(Type::ORDINAL <= TypeBits::kMax);
// Layout description.
-#define JS_PLURAL_RULES_FIELDS(V) \
- V(kLocaleOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- V(kICUPluralRulesOffset, kTaggedSize) \
- V(kICUDecimalFormatOffset, kTaggedSize) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_PLURAL_RULES_FIELDS)
-#undef JS_PLURAL_RULES_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSPLURAL_RULES_FIELDS)
DECL_ACCESSORS(locale, String)
DECL_INT_ACCESSORS(flags)
diff --git a/deps/v8/src/objects/js-promise-inl.h b/deps/v8/src/objects/js-promise-inl.h
index a423c0281c..ecfeb53306 100644
--- a/deps/v8/src/objects/js-promise-inl.h
+++ b/deps/v8/src/objects/js-promise-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/js-promise.h"
-#include "src/objects-inl.h" // Needed for write barriers
-#include "src/objects.h"
+#include "src/objects/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-proxy-inl.h b/deps/v8/src/objects/js-proxy-inl.h
index e0d0835f06..f33628b5c2 100644
--- a/deps/v8/src/objects/js-proxy-inl.h
+++ b/deps/v8/src/objects/js-proxy-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-proxy.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,7 +22,7 @@ CAST_ACCESSOR(JSProxy)
ACCESSORS(JSProxy, target, Object, kTargetOffset)
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
-bool JSProxy::IsRevoked() const { return !handler()->IsJSReceiver(); }
+bool JSProxy::IsRevoked() const { return !handler().IsJSReceiver(); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 68fbb333b9..c4f98927e9 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_JS_PROXY_H_
#include "src/objects/js-objects.h"
-#include "torque-generated/builtin-definitions-from-dsl.h"
+#include "torque-generated/builtin-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 8322a3c258..93e6ee008d 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-regexp.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
#include "src/objects/smi.h"
#include "src/objects/string.h"
@@ -28,9 +28,9 @@ ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
JSRegExp::Type JSRegExp::TypeTag() const {
Object data = this->data();
- if (data->IsUndefined()) return JSRegExp::NOT_COMPILED;
- Smi smi = Smi::cast(FixedArray::cast(data)->get(kTagIndex));
- return static_cast<JSRegExp::Type>(smi->value());
+ if (data.IsUndefined()) return JSRegExp::NOT_COMPILED;
+ Smi smi = Smi::cast(FixedArray::cast(data).get(kTagIndex));
+ return static_cast<JSRegExp::Type>(smi.value());
}
int JSRegExp::CaptureCount() {
@@ -45,21 +45,21 @@ int JSRegExp::CaptureCount() {
}
JSRegExp::Flags JSRegExp::GetFlags() {
- DCHECK(this->data()->IsFixedArray());
+ DCHECK(this->data().IsFixedArray());
Object data = this->data();
- Smi smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
- return Flags(smi->value());
+ Smi smi = Smi::cast(FixedArray::cast(data).get(kFlagsIndex));
+ return Flags(smi.value());
}
String JSRegExp::Pattern() {
- DCHECK(this->data()->IsFixedArray());
+ DCHECK(this->data().IsFixedArray());
Object data = this->data();
- String pattern = String::cast(FixedArray::cast(data)->get(kSourceIndex));
+ String pattern = String::cast(FixedArray::cast(data).get(kSourceIndex));
return pattern;
}
Object JSRegExp::CaptureNameMap() {
- DCHECK(this->data()->IsFixedArray());
+ DCHECK(this->data().IsFixedArray());
DCHECK_EQ(TypeTag(), IRREGEXP);
Object value = DataAt(kIrregexpCaptureNameMapIndex);
DCHECK_NE(value, Smi::FromInt(JSRegExp::kUninitializedValue));
@@ -68,24 +68,24 @@ Object JSRegExp::CaptureNameMap() {
Object JSRegExp::DataAt(int index) const {
DCHECK(TypeTag() != NOT_COMPILED);
- return FixedArray::cast(data())->get(index);
+ return FixedArray::cast(data()).get(index);
}
void JSRegExp::SetDataAt(int index, Object value) {
DCHECK(TypeTag() != NOT_COMPILED);
DCHECK_GE(index,
kDataIndex); // Only implementation data can be set this way.
- FixedArray::cast(data())->set(index, value);
+ FixedArray::cast(data()).set(index, value);
}
bool JSRegExp::HasCompiledCode() const {
if (TypeTag() != IRREGEXP) return false;
#ifdef DEBUG
- DCHECK(DataAt(kIrregexpLatin1CodeIndex)->IsCode() ||
- DataAt(kIrregexpLatin1CodeIndex)->IsByteArray() ||
+ DCHECK(DataAt(kIrregexpLatin1CodeIndex).IsCode() ||
+ DataAt(kIrregexpLatin1CodeIndex).IsByteArray() ||
DataAt(kIrregexpLatin1CodeIndex) == Smi::FromInt(kUninitializedValue));
- DCHECK(DataAt(kIrregexpUC16CodeIndex)->IsCode() ||
- DataAt(kIrregexpUC16CodeIndex)->IsByteArray() ||
+ DCHECK(DataAt(kIrregexpUC16CodeIndex).IsCode() ||
+ DataAt(kIrregexpUC16CodeIndex).IsByteArray() ||
DataAt(kIrregexpUC16CodeIndex) == Smi::FromInt(kUninitializedValue));
#endif // DEBUG
Smi uninitialized = Smi::FromInt(kUninitializedValue);
diff --git a/deps/v8/src/objects/js-regexp-string-iterator-inl.h b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
index 82565f0de9..08e2f99d7e 100644
--- a/deps/v8/src/objects/js-regexp-string-iterator-inl.h
+++ b/deps/v8/src/objects/js-regexp-string-iterator-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-regexp-string-iterator.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index a48900d81b..e525c66e3e 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -167,10 +167,10 @@ class JSRegExp : public JSObject {
// Descriptor array index to important methods in the prototype.
static const int kExecFunctionDescriptorIndex = 1;
static const int kSymbolMatchFunctionDescriptorIndex = 13;
- static const int kSymbolReplaceFunctionDescriptorIndex = 14;
- static const int kSymbolSearchFunctionDescriptorIndex = 15;
- static const int kSymbolSplitFunctionDescriptorIndex = 16;
- static const int kSymbolMatchAllFunctionDescriptorIndex = 17;
+ static const int kSymbolMatchAllFunctionDescriptorIndex = 14;
+ static const int kSymbolReplaceFunctionDescriptorIndex = 15;
+ static const int kSymbolSearchFunctionDescriptorIndex = 16;
+ static const int kSymbolSplitFunctionDescriptorIndex = 17;
// The uninitialized value for a regexp code object.
static const int kUninitializedValue = -1;
diff --git a/deps/v8/src/objects/js-relative-time-format-inl.h b/deps/v8/src/objects/js-relative-time-format-inl.h
index 6b8b4550ac..1ff66b1a12 100644
--- a/deps/v8/src/objects/js-relative-time-format-inl.h
+++ b/deps/v8/src/objects/js-relative-time-format-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_INL_H_
#define V8_OBJECTS_JS_RELATIVE_TIME_FORMAT_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-relative-time-format.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,7 +23,7 @@ OBJECT_CONSTRUCTORS_IMPL(JSRelativeTimeFormat, JSObject)
// Base relative time format accessors.
ACCESSORS(JSRelativeTimeFormat, locale, String, kLocaleOffset)
ACCESSORS(JSRelativeTimeFormat, icu_formatter,
- Managed<icu::RelativeDateTimeFormatter>, kICUFormatterOffset)
+ Managed<icu::RelativeDateTimeFormatter>, kIcuFormatterOffset)
SMI_ACCESSORS(JSRelativeTimeFormat, flags, kFlagsOffset)
inline void JSRelativeTimeFormat::set_style(Style style) {
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index 5b89e83057..59a3bf7ea0 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -12,12 +12,12 @@
#include <memory>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-number-format.h"
#include "src/objects/js-relative-time-format-inl.h"
+#include "src/objects/objects-inl.h"
#include "unicode/numfmt.h"
#include "unicode/reldatefmt.h"
@@ -88,8 +88,21 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_locale_matcher, MaybeHandle<JSRelativeTimeFormat>());
Intl::MatcherOption matcher = maybe_locale_matcher.FromJust();
- // 7. Let localeData be %RelativeTimeFormat%.[[LocaleData]].
- // 8. Let r be
+ // 7. Let _numberingSystem_ be ? GetOption(_options_, `"numberingSystem"`,
+ // `"string"`, *undefined*, *undefined*).
+ std::unique_ptr<char[]> numbering_system_str = nullptr;
+ Maybe<bool> maybe_numberingSystem = Intl::GetNumberingSystem(
+ isolate, options, "Intl.RelativeTimeFormat", &numbering_system_str);
+ // 8. If _numberingSystem_ is not *undefined*, then
+ // a. If _numberingSystem_ does not match the
+ // `(3*8alphanum) *("-" (3*8alphanum))` sequence, throw a *RangeError*
+ // exception.
+ MAYBE_RETURN(maybe_numberingSystem, MaybeHandle<JSRelativeTimeFormat>());
+
+ // 9. Set _opt_.[[nu]] to _numberingSystem_.
+
+ // 10. Let localeData be %RelativeTimeFormat%.[[LocaleData]].
+ // 11. Let r be
// ResolveLocale(%RelativeTimeFormat%.[[AvailableLocales]],
// requestedLocales, opt,
// %RelativeTimeFormat%.[[RelevantExtensionKeys]], localeData).
@@ -97,14 +110,24 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
Intl::ResolveLocale(isolate, JSRelativeTimeFormat::GetAvailableLocales(),
requested_locales, matcher, {"nu"});
- // 9. Let locale be r.[[Locale]].
- // 10. Set relativeTimeFormat.[[Locale]] to locale.
- // 11. Let dataLocale be r.[[DataLocale]].
- Handle<String> locale_str =
- isolate->factory()->NewStringFromAsciiChecked(r.locale.c_str());
+ // 12. Let locale be r.[[Locale]].
+ // 13. Set relativeTimeFormat.[[Locale]] to locale.
+ // 14. Let dataLocale be r.[[DataLocale]].
+ icu::Locale icu_locale = r.icu_locale;
+ UErrorCode status = U_ZERO_ERROR;
+ if (numbering_system_str != nullptr) {
+ icu_locale.setUnicodeKeywordValue("nu", numbering_system_str.get(), status);
+ CHECK(U_SUCCESS(status));
+ }
+
+ Maybe<std::string> maybe_locale_str = Intl::ToLanguageTag(icu_locale);
+ MAYBE_RETURN(maybe_locale_str, MaybeHandle<JSRelativeTimeFormat>());
+
+ Handle<String> locale_str = isolate->factory()->NewStringFromAsciiChecked(
+ maybe_locale_str.FromJust().c_str());
relative_time_format_holder->set_locale(*locale_str);
- // 12. Let s be ? GetOption(options, "style", "string",
+ // 15. Let s be ? GetOption(options, "style", "string",
// «"long", "short", "narrow"», "long").
Maybe<Style> maybe_style = Intl::GetStringOption<Style>(
isolate, options, "style", "Intl.RelativeTimeFormat",
@@ -113,10 +136,10 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_style, MaybeHandle<JSRelativeTimeFormat>());
Style style_enum = maybe_style.FromJust();
- // 13. Set relativeTimeFormat.[[Style]] to s.
+ // 16. Set relativeTimeFormat.[[Style]] to s.
relative_time_format_holder->set_style(style_enum);
- // 14. Let numeric be ? GetOption(options, "numeric", "string",
+ // 17. Let numeric be ? GetOption(options, "numeric", "string",
// «"always", "auto"», "always").
Maybe<Numeric> maybe_numeric = Intl::GetStringOption<Numeric>(
isolate, options, "numeric", "Intl.RelativeTimeFormat",
@@ -124,12 +147,9 @@ MaybeHandle<JSRelativeTimeFormat> JSRelativeTimeFormat::Initialize(
MAYBE_RETURN(maybe_numeric, MaybeHandle<JSRelativeTimeFormat>());
Numeric numeric_enum = maybe_numeric.FromJust();
- // 15. Set relativeTimeFormat.[[Numeric]] to numeric.
+ // 18. Set relativeTimeFormat.[[Numeric]] to numeric.
relative_time_format_holder->set_numeric(numeric_enum);
- icu::Locale icu_locale = r.icu_locale;
- UErrorCode status = U_ZERO_ERROR;
-
// 19. Let relativeTimeFormat.[[NumberFormat]] be
// ? Construct(%NumberFormat%, « nfLocale, nfOptions »).
icu::NumberFormat* number_format =
@@ -177,7 +197,7 @@ Handle<JSObject> JSRelativeTimeFormat::ResolvedOptions(
format_holder->StyleAsString(), NONE);
JSObject::AddProperty(isolate, result, factory->numeric_string(),
format_holder->NumericAsString(), NONE);
- std::string locale_str(format_holder->locale()->ToCString().get());
+ std::string locale_str(format_holder->locale().ToCString().get());
icu::Locale icu_locale = Intl::CreateICULocale(locale_str);
std::string numbering_system = Intl::GetNumberingSystem(icu_locale);
JSObject::AddProperty(
@@ -293,7 +313,7 @@ MaybeHandle<T> FormatCommon(
isolate->factory()->NewStringFromAsciiChecked(func_name)),
T);
}
- icu::RelativeDateTimeFormatter* formatter = format->icu_formatter()->raw();
+ icu::RelativeDateTimeFormatter* formatter = format->icu_formatter().raw();
CHECK_NOT_NULL(formatter);
URelativeDateTimeUnit unit_enum;
if (!GetURelativeDateTimeUnit(unit, &unit_enum)) {
diff --git a/deps/v8/src/objects/js-relative-time-format.h b/deps/v8/src/objects/js-relative-time-format.h
index 8c8ef7bbce..740336c29c 100644
--- a/deps/v8/src/objects/js-relative-time-format.h
+++ b/deps/v8/src/objects/js-relative-time-format.h
@@ -12,10 +12,10 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -109,16 +109,8 @@ class JSRelativeTimeFormat : public JSObject {
DECL_VERIFIER(JSRelativeTimeFormat)
// Layout description.
-#define JS_RELATIVE_TIME_FORMAT_FIELDS(V) \
- V(kLocaleOffset, kTaggedSize) \
- V(kICUFormatterOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- JS_RELATIVE_TIME_FORMAT_FIELDS)
-#undef JS_RELATIVE_TIME_FORMAT_FIELDS
+ TORQUE_GENERATED_JSRELATIVE_TIME_FORMAT_FIELDS)
private:
static Style getStyle(const char* str);
diff --git a/deps/v8/src/objects/js-segment-iterator-inl.h b/deps/v8/src/objects/js-segment-iterator-inl.h
index 0c1a3e4eec..24a827c030 100644
--- a/deps/v8/src/objects/js-segment-iterator-inl.h
+++ b/deps/v8/src/objects/js-segment-iterator-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_SEGMENT_ITERATOR_INL_H_
#define V8_OBJECTS_JS_SEGMENT_ITERATOR_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-segment-iterator.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -22,7 +22,7 @@ OBJECT_CONSTRUCTORS_IMPL(JSSegmentIterator, JSObject)
// Base segment iterator accessors.
ACCESSORS(JSSegmentIterator, icu_break_iterator, Managed<icu::BreakIterator>,
- kICUBreakIteratorOffset)
+ kIcuBreakIteratorOffset)
ACCESSORS(JSSegmentIterator, unicode_string, Managed<icu::UnicodeString>,
kUnicodeStringOffset)
diff --git a/deps/v8/src/objects/js-segment-iterator.cc b/deps/v8/src/objects/js-segment-iterator.cc
index 570c71dd21..3d2b19ca5c 100644
--- a/deps/v8/src/objects/js-segment-iterator.cc
+++ b/deps/v8/src/objects/js-segment-iterator.cc
@@ -12,12 +12,12 @@
#include <memory>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-segment-iterator-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/objects-inl.h"
#include "unicode/brkiter.h"
namespace v8 {
@@ -26,7 +26,7 @@ namespace internal {
MaybeHandle<String> JSSegmentIterator::GetSegment(Isolate* isolate,
int32_t start,
int32_t end) const {
- return Intl::ToString(isolate, *(unicode_string()->raw()), start, end);
+ return Intl::ToString(isolate, *(unicode_string().raw()), start, end);
}
Handle<String> JSSegmentIterator::GranularityAsString() const {
@@ -80,7 +80,7 @@ Handle<Object> JSSegmentIterator::BreakType() const {
if (!is_break_type_set()) {
return GetReadOnlyRoots().undefined_value_handle();
}
- icu::BreakIterator* break_iterator = icu_break_iterator()->raw();
+ icu::BreakIterator* break_iterator = icu_break_iterator().raw();
int32_t rule_status = break_iterator->getRuleStatus();
switch (granularity()) {
case JSSegmenter::Granularity::GRAPHEME:
@@ -128,7 +128,7 @@ Handle<Object> JSSegmentIterator::BreakType() const {
Handle<Object> JSSegmentIterator::Index(
Isolate* isolate, Handle<JSSegmentIterator> segment_iterator) {
icu::BreakIterator* icu_break_iterator =
- segment_iterator->icu_break_iterator()->raw();
+ segment_iterator->icu_break_iterator().raw();
CHECK_NOT_NULL(icu_break_iterator);
return isolate->factory()->NewNumberFromInt(icu_break_iterator->current());
}
@@ -138,7 +138,7 @@ MaybeHandle<JSReceiver> JSSegmentIterator::Next(
Isolate* isolate, Handle<JSSegmentIterator> segment_iterator) {
Factory* factory = isolate->factory();
icu::BreakIterator* icu_break_iterator =
- segment_iterator->icu_break_iterator()->raw();
+ segment_iterator->icu_break_iterator().raw();
// 3. Let _previousIndex be iterator.[[SegmentIteratorIndex]].
int32_t prev = icu_break_iterator->current();
// 4. Let done be AdvanceSegmentIterator(iterator, forwards).
@@ -192,7 +192,7 @@ Maybe<bool> JSSegmentIterator::Following(
Handle<Object> from_obj) {
Factory* factory = isolate->factory();
icu::BreakIterator* icu_break_iterator =
- segment_iterator->icu_break_iterator()->raw();
+ segment_iterator->icu_break_iterator().raw();
// 3. If from is not undefined,
if (!from_obj->IsUndefined()) {
// a. Let from be ? ToIndex(from).
@@ -244,7 +244,7 @@ Maybe<bool> JSSegmentIterator::Preceding(
Handle<Object> from_obj) {
Factory* factory = isolate->factory();
icu::BreakIterator* icu_break_iterator =
- segment_iterator->icu_break_iterator()->raw();
+ segment_iterator->icu_break_iterator().raw();
// 3. If from is not undefined,
if (!from_obj->IsUndefined()) {
// a. Let from be ? ToIndex(from).
diff --git a/deps/v8/src/objects/js-segment-iterator.h b/deps/v8/src/objects/js-segment-iterator.h
index 0535704a68..cadb99e79d 100644
--- a/deps/v8/src/objects/js-segment-iterator.h
+++ b/deps/v8/src/objects/js-segment-iterator.h
@@ -9,11 +9,11 @@
#ifndef V8_OBJECTS_JS_SEGMENT_ITERATOR_H_
#define V8_OBJECTS_JS_SEGMENT_ITERATOR_H_
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/js-segmenter.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -90,16 +90,8 @@ class JSSegmentIterator : public JSObject {
DECL_INT_ACCESSORS(flags)
// Layout description.
-#define SEGMENTER_FIELDS(V) \
- /* Pointer fields. */ \
- V(kICUBreakIteratorOffset, kTaggedSize) \
- V(kUnicodeStringOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Total Size */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, SEGMENTER_FIELDS)
-#undef SEGMENTER_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSSEGMENT_ITERATOR_FIELDS)
OBJECT_CONSTRUCTORS(JSSegmentIterator, JSObject);
};
diff --git a/deps/v8/src/objects/js-segmenter-inl.h b/deps/v8/src/objects/js-segmenter-inl.h
index 05935fa905..b4adf4c8e6 100644
--- a/deps/v8/src/objects/js-segmenter-inl.h
+++ b/deps/v8/src/objects/js-segmenter-inl.h
@@ -9,8 +9,8 @@
#ifndef V8_OBJECTS_JS_SEGMENTER_INL_H_
#define V8_OBJECTS_JS_SEGMENTER_INL_H_
-#include "src/objects-inl.h"
#include "src/objects/js-segmenter.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -23,7 +23,7 @@ OBJECT_CONSTRUCTORS_IMPL(JSSegmenter, JSObject)
// Base segmenter accessors.
ACCESSORS(JSSegmenter, locale, String, kLocaleOffset)
ACCESSORS(JSSegmenter, icu_break_iterator, Managed<icu::BreakIterator>,
- kICUBreakIteratorOffset)
+ kIcuBreakIteratorOffset)
SMI_ACCESSORS(JSSegmenter, flags, kFlagsOffset)
inline void JSSegmenter::set_granularity(Granularity granularity) {
diff --git a/deps/v8/src/objects/js-segmenter.cc b/deps/v8/src/objects/js-segmenter.cc
index 7548b65f23..5321334678 100644
--- a/deps/v8/src/objects/js-segmenter.cc
+++ b/deps/v8/src/objects/js-segmenter.cc
@@ -12,12 +12,12 @@
#include <memory>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-segmenter-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/objects-inl.h"
#include "unicode/brkiter.h"
namespace v8 {
diff --git a/deps/v8/src/objects/js-segmenter.h b/deps/v8/src/objects/js-segmenter.h
index 4fd509eb0c..423dd67497 100644
--- a/deps/v8/src/objects/js-segmenter.h
+++ b/deps/v8/src/objects/js-segmenter.h
@@ -12,10 +12,10 @@
#include <set>
#include <string>
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/managed.h"
+#include "src/objects/objects.h"
#include "unicode/uversion.h"
// Has to be the last include (doesn't have include guards):
@@ -78,16 +78,8 @@ class JSSegmenter : public JSObject {
DECL_VERIFIER(JSSegmenter)
// Layout description.
-#define JS_SEGMENTER_FIELDS(V) \
- V(kJSSegmenterOffset, kTaggedSize) \
- V(kLocaleOffset, kTaggedSize) \
- V(kICUBreakIteratorOffset, kTaggedSize) \
- V(kFlagsOffset, kTaggedSize) \
- /* Header size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize, JS_SEGMENTER_FIELDS)
-#undef JS_SEGMENTER_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
+ TORQUE_GENERATED_JSSEGMENTER_FIELDS)
private:
static Granularity GetGranularity(const char* str);
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index a08cb08fcf..6632a31002 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/js-weak-refs.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/smi-inl.h"
@@ -70,14 +70,14 @@ void JSFinalizationGroup::Register(
// Add to active_cells.
weak_cell->set_next(finalization_group->active_cells());
- if (finalization_group->active_cells()->IsWeakCell()) {
- WeakCell::cast(finalization_group->active_cells())->set_prev(*weak_cell);
+ if (finalization_group->active_cells().IsWeakCell()) {
+ WeakCell::cast(finalization_group->active_cells()).set_prev(*weak_cell);
}
finalization_group->set_active_cells(*weak_cell);
if (!key->IsUndefined(isolate)) {
Handle<ObjectHashTable> key_map;
- if (finalization_group->key_map()->IsUndefined(isolate)) {
+ if (finalization_group->key_map().IsUndefined(isolate)) {
key_map = ObjectHashTable::New(isolate, 1);
} else {
key_map =
@@ -85,12 +85,12 @@ void JSFinalizationGroup::Register(
}
Object value = key_map->Lookup(key);
- if (value->IsWeakCell()) {
+ if (value.IsWeakCell()) {
WeakCell existing_weak_cell = WeakCell::cast(value);
- existing_weak_cell->set_key_list_prev(*weak_cell);
+ existing_weak_cell.set_key_list_prev(*weak_cell);
weak_cell->set_key_list_next(existing_weak_cell);
} else {
- DCHECK(value->IsTheHole(isolate));
+ DCHECK(value.IsTheHole(isolate));
}
key_map = ObjectHashTable::Put(key_map, key, weak_cell);
finalization_group->set_key_map(*key_map);
@@ -103,17 +103,17 @@ void JSFinalizationGroup::Unregister(
// Iterate through the doubly linked list of WeakCells associated with the
// key. Each WeakCell will be in the "active_cells" or "cleared_cells" list of
// its FinalizationGroup; remove it from there.
- if (!finalization_group->key_map()->IsUndefined(isolate)) {
+ if (!finalization_group->key_map().IsUndefined(isolate)) {
Handle<ObjectHashTable> key_map =
handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
Object value = key_map->Lookup(key);
Object undefined = ReadOnlyRoots(isolate).undefined_value();
- while (value->IsWeakCell()) {
+ while (value.IsWeakCell()) {
WeakCell weak_cell = WeakCell::cast(value);
- weak_cell->RemoveFromFinalizationGroupCells(isolate);
- value = weak_cell->key_list_next();
- weak_cell->set_key_list_prev(undefined);
- weak_cell->set_key_list_next(undefined);
+ weak_cell.RemoveFromFinalizationGroupCells(isolate);
+ value = weak_cell.key_list_next();
+ weak_cell.set_key_list_prev(undefined);
+ weak_cell.set_key_list_next(undefined);
}
bool was_present;
key_map = ObjectHashTable::Remove(isolate, key_map, key, &was_present);
@@ -122,7 +122,7 @@ void JSFinalizationGroup::Unregister(
}
bool JSFinalizationGroup::NeedsCleanup() const {
- return cleared_cells()->IsWeakCell();
+ return cleared_cells().IsWeakCell();
}
bool JSFinalizationGroup::scheduled_for_cleanup() const {
@@ -138,23 +138,23 @@ Object JSFinalizationGroup::PopClearedCellHoldings(
Handle<JSFinalizationGroup> finalization_group, Isolate* isolate) {
Handle<WeakCell> weak_cell =
handle(WeakCell::cast(finalization_group->cleared_cells()), isolate);
- DCHECK(weak_cell->prev()->IsUndefined(isolate));
+ DCHECK(weak_cell->prev().IsUndefined(isolate));
finalization_group->set_cleared_cells(weak_cell->next());
weak_cell->set_next(ReadOnlyRoots(isolate).undefined_value());
- if (finalization_group->cleared_cells()->IsWeakCell()) {
+ if (finalization_group->cleared_cells().IsWeakCell()) {
WeakCell cleared_cells_head =
WeakCell::cast(finalization_group->cleared_cells());
- DCHECK_EQ(cleared_cells_head->prev(), *weak_cell);
- cleared_cells_head->set_prev(ReadOnlyRoots(isolate).undefined_value());
+ DCHECK_EQ(cleared_cells_head.prev(), *weak_cell);
+ cleared_cells_head.set_prev(ReadOnlyRoots(isolate).undefined_value());
} else {
- DCHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
+ DCHECK(finalization_group->cleared_cells().IsUndefined(isolate));
}
// Also remove the WeakCell from the key_map (if it's there).
- if (!weak_cell->key()->IsUndefined(isolate)) {
- if (weak_cell->key_list_prev()->IsUndefined(isolate) &&
- weak_cell->key_list_next()->IsUndefined(isolate)) {
+ if (!weak_cell->key().IsUndefined(isolate)) {
+ if (weak_cell->key_list_prev().IsUndefined(isolate) &&
+ weak_cell->key_list_next().IsUndefined(isolate)) {
// weak_cell is the only one associated with its key; remove the key
// from the hash table.
Handle<ObjectHashTable> key_map =
@@ -164,7 +164,7 @@ Object JSFinalizationGroup::PopClearedCellHoldings(
key_map = ObjectHashTable::Remove(isolate, key_map, key, &was_present);
DCHECK(was_present);
finalization_group->set_key_map(*key_map);
- } else if (weak_cell->key_list_prev()->IsUndefined()) {
+ } else if (weak_cell->key_list_prev().IsUndefined()) {
// weak_cell is the list head for its key; we need to change the value of
// the key in the hash table.
Handle<ObjectHashTable> key_map =
@@ -180,10 +180,10 @@ Object JSFinalizationGroup::PopClearedCellHoldings(
} else {
// weak_cell is somewhere in the middle of its key list.
WeakCell prev = WeakCell::cast(weak_cell->key_list_prev());
- prev->set_key_list_next(weak_cell->key_list_next());
- if (!weak_cell->key_list_next()->IsUndefined()) {
+ prev.set_key_list_next(weak_cell->key_list_next());
+ if (!weak_cell->key_list_next().IsUndefined()) {
WeakCell next = WeakCell::cast(weak_cell->key_list_next());
- next->set_key_list_prev(weak_cell->key_list_prev());
+ next.set_key_list_prev(weak_cell->key_list_prev());
}
}
}
@@ -200,41 +200,41 @@ void WeakCell::Nullify(
// only called for WeakCells which haven't been unregistered yet, so they will
// be in the active_cells list. (The caller must guard against calling this
// for unregistered WeakCells by checking that the target is not undefined.)
- DCHECK(target()->IsJSReceiver());
+ DCHECK(target().IsJSReceiver());
set_target(ReadOnlyRoots(isolate).undefined_value());
JSFinalizationGroup fg = JSFinalizationGroup::cast(finalization_group());
- if (prev()->IsWeakCell()) {
- DCHECK_NE(fg->active_cells(), *this);
+ if (prev().IsWeakCell()) {
+ DCHECK_NE(fg.active_cells(), *this);
WeakCell prev_cell = WeakCell::cast(prev());
- prev_cell->set_next(next());
+ prev_cell.set_next(next());
gc_notify_updated_slot(prev_cell, prev_cell.RawField(WeakCell::kNextOffset),
next());
} else {
- DCHECK_EQ(fg->active_cells(), *this);
- fg->set_active_cells(next());
+ DCHECK_EQ(fg.active_cells(), *this);
+ fg.set_active_cells(next());
gc_notify_updated_slot(
fg, fg.RawField(JSFinalizationGroup::kActiveCellsOffset), next());
}
- if (next()->IsWeakCell()) {
+ if (next().IsWeakCell()) {
WeakCell next_cell = WeakCell::cast(next());
- next_cell->set_prev(prev());
+ next_cell.set_prev(prev());
gc_notify_updated_slot(next_cell, next_cell.RawField(WeakCell::kPrevOffset),
prev());
}
set_prev(ReadOnlyRoots(isolate).undefined_value());
- Object cleared_head = fg->cleared_cells();
- if (cleared_head->IsWeakCell()) {
+ Object cleared_head = fg.cleared_cells();
+ if (cleared_head.IsWeakCell()) {
WeakCell cleared_head_cell = WeakCell::cast(cleared_head);
- cleared_head_cell->set_prev(*this);
+ cleared_head_cell.set_prev(*this);
gc_notify_updated_slot(cleared_head_cell,
cleared_head_cell.RawField(WeakCell::kPrevOffset),
*this);
}
- set_next(fg->cleared_cells());
+ set_next(fg.cleared_cells());
gc_notify_updated_slot(*this, RawField(WeakCell::kNextOffset), next());
- fg->set_cleared_cells(*this);
+ fg.set_cleared_cells(*this);
gc_notify_updated_slot(
fg, fg.RawField(JSFinalizationGroup::kClearedCellsOffset), *this);
}
@@ -245,24 +245,24 @@ void WeakCell::RemoveFromFinalizationGroupCells(Isolate* isolate) {
// It's important to set_target to undefined here. This guards that we won't
// call Nullify (which assumes that the WeakCell is in active_cells).
- DCHECK(target()->IsUndefined() || target()->IsJSReceiver());
+ DCHECK(target().IsUndefined() || target().IsJSReceiver());
set_target(ReadOnlyRoots(isolate).undefined_value());
JSFinalizationGroup fg = JSFinalizationGroup::cast(finalization_group());
- if (fg->active_cells() == *this) {
- DCHECK(prev()->IsUndefined(isolate));
- fg->set_active_cells(next());
- } else if (fg->cleared_cells() == *this) {
- DCHECK(!prev()->IsWeakCell());
- fg->set_cleared_cells(next());
+ if (fg.active_cells() == *this) {
+ DCHECK(prev().IsUndefined(isolate));
+ fg.set_active_cells(next());
+ } else if (fg.cleared_cells() == *this) {
+ DCHECK(!prev().IsWeakCell());
+ fg.set_cleared_cells(next());
} else {
- DCHECK(prev()->IsWeakCell());
+ DCHECK(prev().IsWeakCell());
WeakCell prev_cell = WeakCell::cast(prev());
- prev_cell->set_next(next());
+ prev_cell.set_next(next());
}
- if (next()->IsWeakCell()) {
+ if (next().IsWeakCell()) {
WeakCell next_cell = WeakCell::cast(next());
- next_cell->set_prev(prev());
+ next_cell.set_prev(prev());
}
set_prev(ReadOnlyRoots(isolate).undefined_value());
set_next(ReadOnlyRoots(isolate).undefined_value());
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/objects/keys.cc
index 99fa2be414..d3a1f6bdc2 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/objects/keys.cc
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/keys.h"
+#include "src/objects/keys.h"
-#include "src/api-arguments-inl.h"
-#include "src/elements-inl.h"
-#include "src/field-index-inl.h"
-#include "src/handles-inl.h"
+#include "src/api/api-arguments-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
-#include "src/identity-map.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/elements-inl.h"
+#include "src/objects/field-index-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
-#include "src/property-descriptor.h"
-#include "src/prototype.h"
+#include "src/objects/property-descriptor.h"
+#include "src/objects/prototype.h"
+#include "src/utils/identity-map.h"
namespace v8 {
namespace internal {
@@ -28,7 +28,7 @@ static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
int len = array->length();
for (int i = 0; i < len; i++) {
Object e = array->get(i);
- if (!(e->IsName() || e->IsNumber())) return false;
+ if (!(e.IsName() || e.IsNumber())) return false;
}
return true;
}
@@ -71,11 +71,11 @@ void KeyAccumulator::AddKey(Object key, AddKeyConversion convert) {
void KeyAccumulator::AddKey(Handle<Object> key, AddKeyConversion convert) {
if (filter_ == PRIVATE_NAMES_ONLY) {
if (!key->IsSymbol()) return;
- if (!Symbol::cast(*key)->is_private_name()) return;
+ if (!Symbol::cast(*key).is_private_name()) return;
} else if (key->IsSymbol()) {
if (filter_ & SKIP_SYMBOLS) return;
- if (Symbol::cast(*key)->is_private()) return;
+ if (Symbol::cast(*key).is_private()) return;
} else if (filter_ & SKIP_STRINGS) {
return;
}
@@ -231,22 +231,22 @@ void KeyAccumulator::AddShadowingKey(Handle<Object> key) {
namespace {
void TrySettingEmptyEnumCache(JSReceiver object) {
- Map map = object->map();
- DCHECK_EQ(kInvalidEnumCacheSentinel, map->EnumLength());
- if (!map->OnlyHasSimpleProperties()) return;
- if (map->IsJSProxyMap()) return;
- if (map->NumberOfEnumerableProperties() > 0) return;
- DCHECK(object->IsJSObject());
- map->SetEnumLength(0);
+ Map map = object.map();
+ DCHECK_EQ(kInvalidEnumCacheSentinel, map.EnumLength());
+ if (!map.OnlyHasSimpleProperties()) return;
+ if (map.IsJSProxyMap()) return;
+ if (map.NumberOfEnumerableProperties() > 0) return;
+ DCHECK(object.IsJSObject());
+ map.SetEnumLength(0);
}
bool CheckAndInitalizeEmptyEnumCache(JSReceiver object) {
- if (object->map()->EnumLength() == kInvalidEnumCacheSentinel) {
+ if (object.map().EnumLength() == kInvalidEnumCacheSentinel) {
TrySettingEmptyEnumCache(object);
}
- if (object->map()->EnumLength() != 0) return false;
- DCHECK(object->IsJSObject());
- return !JSObject::cast(object)->HasEnumerableElements();
+ if (object.map().EnumLength() != 0) return false;
+ DCHECK(object.IsJSObject());
+ return !JSObject::cast(object).HasEnumerableElements();
}
} // namespace
@@ -268,8 +268,8 @@ void FastKeyAccumulator::Prepare() {
}
if (has_empty_prototype_) {
is_receiver_simple_enum_ =
- receiver_->map()->EnumLength() != kInvalidEnumCacheSentinel &&
- !JSObject::cast(*receiver_)->HasEnumerableElements();
+ receiver_->map().EnumLength() != kInvalidEnumCacheSentinel &&
+ !JSObject::cast(*receiver_).HasEnumerableElements();
} else if (!last_prototype.is_null()) {
last_non_empty_prototype_ = handle(last_prototype, isolate_);
}
@@ -289,7 +289,7 @@ Handle<FixedArray> ReduceFixedArrayTo(Isolate* isolate,
Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
Handle<JSObject> object) {
Handle<Map> map(object->map(), isolate);
- Handle<FixedArray> keys(map->instance_descriptors()->enum_cache()->keys(),
+ Handle<FixedArray> keys(map->instance_descriptors().enum_cache().keys(),
isolate);
// Check if the {map} has a valid enum length, which implies that it
@@ -328,7 +328,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
PropertyDetails details = descriptors->GetDetails(i);
if (details.IsDontEnum()) continue;
Object key = descriptors->GetKey(i);
- if (key->IsSymbol()) continue;
+ if (key.IsSymbol()) continue;
keys->set(index, key);
if (details.location() != kField) fields_only = false;
index++;
@@ -345,7 +345,7 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
PropertyDetails details = descriptors->GetDetails(i);
if (details.IsDontEnum()) continue;
Object key = descriptors->GetKey(i);
- if (key->IsSymbol()) continue;
+ if (key.IsSymbol()) continue;
DCHECK_EQ(kData, details.kind());
DCHECK_EQ(kField, details.location());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
@@ -410,7 +410,7 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
GetKeysConversion keys_conversion) {
bool own_only = has_empty_prototype_ || mode_ == KeyCollectionMode::kOwnOnly;
Map map = receiver_->map();
- if (!own_only || map->IsCustomElementsReceiverMap()) {
+ if (!own_only || map.IsCustomElementsReceiverMap()) {
return MaybeHandle<FixedArray>();
}
@@ -419,11 +419,11 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
// Do not try to use the enum-cache for dict-mode objects.
- if (map->is_dictionary_map()) {
+ if (map.is_dictionary_map()) {
return GetOwnKeysWithElements<false>(isolate_, object, keys_conversion,
skip_indices_);
}
- int enum_length = receiver_->map()->EnumLength();
+ int enum_length = receiver_->map().EnumLength();
if (enum_length == kInvalidEnumCacheSentinel) {
Handle<FixedArray> keys;
// Try initializing the enum cache and return own properties.
@@ -433,7 +433,7 @@ MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysFast(
keys->length());
}
is_receiver_simple_enum_ =
- object->map()->EnumLength() != kInvalidEnumCacheSentinel;
+ object->map().EnumLength() != kInvalidEnumCacheSentinel;
return keys;
}
}
@@ -448,13 +448,15 @@ FastKeyAccumulator::GetOwnKeysWithUninitializedEnumCache() {
Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
// Uninitalized enum cache
Map map = object->map();
- if (object->elements()->length() != 0) {
+ if (object->elements() != ReadOnlyRoots(isolate_).empty_fixed_array() &&
+ object->elements() !=
+ ReadOnlyRoots(isolate_).empty_slow_element_dictionary()) {
// Assume that there are elements.
return MaybeHandle<FixedArray>();
}
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ int number_of_own_descriptors = map.NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) {
- map->SetEnumLength(0);
+ map.SetEnumLength(0);
return isolate_->factory()->empty_fixed_array();
}
// We have no elements but possibly enumerable property keys, hence we can
@@ -531,7 +533,7 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
*object, Just(kDontThrow));
Handle<JSObject> result;
- if (!interceptor->enumerator()->IsUndefined(isolate)) {
+ if (!interceptor->enumerator().IsUndefined(isolate)) {
if (type == kIndexed) {
result = enum_args.CallIndexedEnumerator(interceptor);
} else {
@@ -543,7 +545,7 @@ Maybe<bool> CollectInterceptorKeysInternal(Handle<JSReceiver> receiver,
if (result.is_null()) return Just(true);
if ((accumulator->filter() & ONLY_ENUMERABLE) &&
- !interceptor->query()->IsUndefined(isolate)) {
+ !interceptor->query().IsUndefined(isolate)) {
FilterForEnumerableProperties(receiver, object, interceptor, accumulator,
result, type);
} else {
@@ -612,16 +614,16 @@ int CollectOwnPropertyNamesInternal(Handle<JSObject> object,
if (filter & ONLY_ALL_CAN_READ) {
if (details.kind() != kAccessor) continue;
Object accessors = descs->GetStrongValue(i);
- if (!accessors->IsAccessorInfo()) continue;
- if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
+ if (!accessors.IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors).all_can_read()) continue;
}
Name key = descs->GetKey(i);
- if (skip_symbols == key->IsSymbol()) {
+ if (skip_symbols == key.IsSymbol()) {
if (first_skipped == -1) first_skipped = i;
continue;
}
- if (key->FilterKey(keys->filter())) continue;
+ if (key.FilterKey(keys->filter())) continue;
if (is_shadowing_key) {
keys->AddShadowingKey(key);
@@ -658,10 +660,10 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
// If the number of properties equals the length of enumerable properties
// we do not have to filter out non-enumerable ones
Map map = object->map();
- int nof_descriptors = map->NumberOfOwnDescriptors();
+ int nof_descriptors = map.NumberOfOwnDescriptors();
if (enum_keys->length() != nof_descriptors) {
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map->instance_descriptors(), isolate_);
+ Handle<DescriptorArray>(map.instance_descriptors(), isolate_);
for (int i = 0; i < nof_descriptors; i++) {
PropertyDetails details = descs->GetDetails(i);
if (!details.IsDontEnum()) continue;
@@ -672,7 +674,7 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
} else if (object->IsJSGlobalObject()) {
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object,
- JSGlobalObject::cast(*object)->global_dictionary());
+ JSGlobalObject::cast(*object).global_dictionary());
} else {
enum_keys = GetOwnEnumPropertyDictionaryKeys(
isolate_, mode_, this, object, object->property_dictionary());
@@ -692,8 +694,8 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
AddKeys(enum_keys, DO_NOT_CONVERT);
} else {
if (object->HasFastProperties()) {
- int limit = object->map()->NumberOfOwnDescriptors();
- Handle<DescriptorArray> descs(object->map()->instance_descriptors(),
+ int limit = object->map().NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descs(object->map().instance_descriptors(),
isolate_);
// First collect the strings,
int first_symbol =
@@ -705,7 +707,7 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
}
} else if (object->IsJSGlobalObject()) {
GlobalDictionary::CollectKeysTo(
- handle(JSGlobalObject::cast(*object)->global_dictionary(), isolate_),
+ handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
this);
} else {
NameDictionary::CollectKeysTo(
@@ -719,13 +721,13 @@ Maybe<bool> KeyAccumulator::CollectOwnPropertyNames(Handle<JSReceiver> receiver,
void KeyAccumulator::CollectPrivateNames(Handle<JSReceiver> receiver,
Handle<JSObject> object) {
if (object->HasFastProperties()) {
- int limit = object->map()->NumberOfOwnDescriptors();
- Handle<DescriptorArray> descs(object->map()->instance_descriptors(),
+ int limit = object->map().NumberOfOwnDescriptors();
+ Handle<DescriptorArray> descs(object->map().instance_descriptors(),
isolate_);
CollectOwnPropertyNamesInternal<false>(object, this, descs, 0, limit);
} else if (object->IsJSGlobalObject()) {
GlobalDictionary::CollectKeysTo(
- handle(JSGlobalObject::cast(*object)->global_dictionary(), isolate_),
+ handle(JSGlobalObject::cast(*object).global_dictionary(), isolate_),
this);
} else {
NameDictionary::CollectKeysTo(
@@ -805,7 +807,7 @@ Handle<FixedArray> KeyAccumulator::GetOwnEnumPropertyKeys(
} else if (object->IsJSGlobalObject()) {
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
- JSGlobalObject::cast(*object)->global_dictionary());
+ JSGlobalObject::cast(*object).global_dictionary());
} else {
return GetOwnEnumPropertyDictionaryKeys(
isolate, KeyCollectionMode::kOwnOnly, nullptr, object,
@@ -855,8 +857,9 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
// 5. Let trap be ? GetMethod(handler, "ownKeys").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
- isolate_->factory()->ownKeys_string()),
+ isolate_, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate_->factory()->ownKeys_string()),
Nothing<bool>());
// 6. If trap is undefined, then
if (trap->IsUndefined(isolate_)) {
@@ -971,7 +974,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
// 21. Repeat, for each key that is an element of targetConfigurableKeys:
for (int i = 0; i < target_configurable_keys->length(); ++i) {
Object raw_key = target_configurable_keys->get(i);
- if (raw_key->IsSmi()) continue; // Zapped entry, was nonconfigurable.
+ if (raw_key.IsSmi()) continue; // Zapped entry, was nonconfigurable.
Handle<Name> key(Name::cast(raw_key), isolate_);
// 21a. If key is not an element of uncheckedResultKeys, throw a
// TypeError exception.
diff --git a/deps/v8/src/keys.h b/deps/v8/src/objects/keys.h
index 74876122c5..69f61a886e 100644
--- a/deps/v8/src/keys.h
+++ b/deps/v8/src/objects/keys.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_KEYS_H_
-#define V8_KEYS_H_
+#ifndef V8_OBJECTS_KEYS_H_
+#define V8_OBJECTS_KEYS_H_
-#include "src/objects.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-objects.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -172,4 +172,4 @@ class FastKeyAccumulator {
} // namespace internal
} // namespace v8
-#endif // V8_KEYS_H_
+#endif // V8_OBJECTS_KEYS_H_
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/objects/layout-descriptor-inl.h
index 9518317317..49683da267 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/objects/layout-descriptor-inl.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LAYOUT_DESCRIPTOR_INL_H_
-#define V8_LAYOUT_DESCRIPTOR_INL_H_
+#ifndef V8_OBJECTS_LAYOUT_DESCRIPTOR_INL_H_
+#define V8_OBJECTS_LAYOUT_DESCRIPTOR_INL_H_
-#include "src/layout-descriptor.h"
+#include "src/objects/layout-descriptor.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
// Has to be the last include (doesn't have include guards):
@@ -42,7 +42,6 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
return result;
}
-
bool LayoutDescriptor::InobjectUnboxedField(int inobject_properties,
PropertyDetails details) {
if (details.location() != kField || !details.representation().IsDouble()) {
@@ -122,7 +121,6 @@ bool LayoutDescriptor::IsTagged(int field_index) {
}
}
-
bool LayoutDescriptor::IsFastPointerLayout() {
return *this == FastPointerLayout();
}
@@ -133,7 +131,6 @@ bool LayoutDescriptor::IsFastPointerLayout(Object layout_descriptor) {
bool LayoutDescriptor::IsSlowLayout() { return !IsSmi(); }
-
int LayoutDescriptor::capacity() {
return IsSlowLayout() ? (length() * kBitsPerByte) : kBitsInSmiLayout;
}
@@ -156,10 +153,10 @@ int LayoutDescriptor::GetSlowModeBackingStoreLength(int length) {
int LayoutDescriptor::CalculateCapacity(Map map, DescriptorArray descriptors,
int num_descriptors) {
- int inobject_properties = map->GetInObjectProperties();
+ int inobject_properties = map.GetInObjectProperties();
if (inobject_properties == 0) return 0;
- DCHECK_LE(num_descriptors, descriptors->number_of_descriptors());
+ DCHECK_LE(num_descriptors, descriptors.number_of_descriptors());
int layout_descriptor_length;
const int kMaxWordsPerField = kDoubleSize / kTaggedSize;
@@ -173,7 +170,7 @@ int LayoutDescriptor::CalculateCapacity(Map map, DescriptorArray descriptors,
layout_descriptor_length = 0;
for (int i = 0; i < num_descriptors; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails details = descriptors.GetDetails(i);
if (!InobjectUnboxedField(inobject_properties, details)) continue;
int field_index = details.field_index();
int field_width_in_words = details.field_width_in_words();
@@ -189,19 +186,19 @@ LayoutDescriptor LayoutDescriptor::Initialize(
LayoutDescriptor layout_descriptor, Map map, DescriptorArray descriptors,
int num_descriptors) {
DisallowHeapAllocation no_allocation;
- int inobject_properties = map->GetInObjectProperties();
+ int inobject_properties = map.GetInObjectProperties();
for (int i = 0; i < num_descriptors; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails details = descriptors.GetDetails(i);
if (!InobjectUnboxedField(inobject_properties, details)) {
DCHECK(details.location() != kField ||
- layout_descriptor->IsTagged(details.field_index()));
+ layout_descriptor.IsTagged(details.field_index()));
continue;
}
int field_index = details.field_index();
- layout_descriptor = layout_descriptor->SetRawData(field_index);
+ layout_descriptor = layout_descriptor.SetRawData(field_index);
if (details.field_width_in_words() > 1) {
- layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
+ layout_descriptor = layout_descriptor.SetRawData(field_index + 1);
}
}
return layout_descriptor;
@@ -227,18 +224,17 @@ LayoutDescriptorHelper::LayoutDescriptorHelper(Map map)
layout_descriptor_(LayoutDescriptor::FastPointerLayout()) {
if (!FLAG_unbox_double_fields) return;
- layout_descriptor_ = map->layout_descriptor_gc_safe();
- if (layout_descriptor_->IsFastPointerLayout()) {
+ layout_descriptor_ = map.layout_descriptor_gc_safe();
+ if (layout_descriptor_.IsFastPointerLayout()) {
return;
}
- header_size_ = map->GetInObjectPropertiesStartInWords() * kTaggedSize;
+ header_size_ = map.GetInObjectPropertiesStartInWords() * kTaggedSize;
DCHECK_GE(header_size_, 0);
all_fields_tagged_ = false;
}
-
bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
DCHECK(IsAligned(offset_in_bytes, kTaggedSize));
if (all_fields_tagged_) return true;
@@ -246,7 +242,7 @@ bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
if (offset_in_bytes < header_size_) return true;
int field_index = (offset_in_bytes - header_size_) / kTaggedSize;
- return layout_descriptor_->IsTagged(field_index);
+ return layout_descriptor_.IsTagged(field_index);
}
} // namespace internal
@@ -254,4 +250,4 @@ bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
#include "src/objects/object-macros-undef.h"
-#endif // V8_LAYOUT_DESCRIPTOR_INL_H_
+#endif // V8_OBJECTS_LAYOUT_DESCRIPTOR_INL_H_
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/objects/layout-descriptor.cc
index c90d53d06c..76421aaf4f 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/objects/layout-descriptor.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/layout-descriptor.h"
+#include "src/objects/layout-descriptor.h"
#include <sstream>
#include "src/base/bits.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -54,9 +54,9 @@ Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
DisallowHeapAllocation no_allocation;
LayoutDescriptor layout_desc = *layout_descriptor;
- layout_desc = layout_desc->SetRawData(field_index);
+ layout_desc = layout_desc.SetRawData(field_index);
if (details.field_width_in_words() > 1) {
- layout_desc = layout_desc->SetRawData(field_index + 1);
+ layout_desc = layout_desc.SetRawData(field_index + 1);
}
return handle(layout_desc, isolate);
}
@@ -66,30 +66,29 @@ Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
Handle<LayoutDescriptor> full_layout_descriptor) {
DisallowHeapAllocation no_allocation;
LayoutDescriptor layout_descriptor = map->layout_descriptor();
- if (layout_descriptor->IsSlowLayout()) {
+ if (layout_descriptor.IsSlowLayout()) {
return full_layout_descriptor;
}
if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
DCHECK(details.location() != kField ||
- layout_descriptor->IsTagged(details.field_index()));
+ layout_descriptor.IsTagged(details.field_index()));
return handle(layout_descriptor, isolate);
}
int field_index = details.field_index();
int new_capacity = field_index + details.field_width_in_words();
- if (new_capacity > layout_descriptor->capacity()) {
+ if (new_capacity > layout_descriptor.capacity()) {
// Current map's layout descriptor runs out of space, so use the full
// layout descriptor.
return full_layout_descriptor;
}
- layout_descriptor = layout_descriptor->SetRawData(field_index);
+ layout_descriptor = layout_descriptor.SetRawData(field_index);
if (details.field_width_in_words() > 1) {
- layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
+ layout_descriptor = layout_descriptor.SetRawData(field_index + 1);
}
return handle(layout_descriptor, isolate);
}
-
Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
int new_capacity) {
@@ -114,7 +113,6 @@ Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
}
}
-
bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
int* out_sequence_length) {
DCHECK_GT(max_sequence_length, 0);
@@ -180,7 +178,6 @@ bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
return is_tagged;
}
-
Handle<LayoutDescriptor> LayoutDescriptor::NewForTesting(Isolate* isolate,
int length) {
return New(isolate, length);
@@ -205,8 +202,8 @@ bool LayoutDescriptorHelper::IsTagged(
int max_sequence_length = (end_offset - offset_in_bytes) / kTaggedSize;
int field_index = Max(0, (offset_in_bytes - header_size_) / kTaggedSize);
int sequence_length;
- bool tagged = layout_descriptor_->IsTagged(field_index, max_sequence_length,
- &sequence_length);
+ bool tagged = layout_descriptor_.IsTagged(field_index, max_sequence_length,
+ &sequence_length);
DCHECK_GT(sequence_length, 0);
if (offset_in_bytes < header_size_) {
// Object headers do not contain non-tagged fields. Check if the contiguous
@@ -260,11 +257,11 @@ LayoutDescriptor LayoutDescriptor::Trim(Heap* heap, Map map,
bool LayoutDescriptor::IsConsistentWithMap(Map map, bool check_tail) {
if (FLAG_unbox_double_fields) {
- DescriptorArray descriptors = map->instance_descriptors();
- int nof_descriptors = map->NumberOfOwnDescriptors();
+ DescriptorArray descriptors = map.instance_descriptors();
+ int nof_descriptors = map.NumberOfOwnDescriptors();
int last_field_index = 0;
for (int i = 0; i < nof_descriptors; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails details = descriptors.GetDetails(i);
if (details.location() != kField) continue;
FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
bool tagged_expected =
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/objects/layout-descriptor.h
index 12b32f5a32..2311594ff6 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/objects/layout-descriptor.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LAYOUT_DESCRIPTOR_H_
-#define V8_LAYOUT_DESCRIPTOR_H_
+#ifndef V8_OBJECTS_LAYOUT_DESCRIPTOR_H_
+#define V8_OBJECTS_LAYOUT_DESCRIPTOR_H_
#include <iosfwd>
@@ -172,4 +172,4 @@ class LayoutDescriptorHelper {
#include "src/objects/object-macros-undef.h"
-#endif // V8_LAYOUT_DESCRIPTOR_H_
+#endif // V8_OBJECTS_LAYOUT_DESCRIPTOR_H_
diff --git a/deps/v8/src/objects/literal-objects-inl.h b/deps/v8/src/objects/literal-objects-inl.h
index fafbb17f88..1ddb333cff 100644
--- a/deps/v8/src/objects/literal-objects-inl.h
+++ b/deps/v8/src/objects/literal-objects-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/literal-objects.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -70,7 +70,7 @@ void ArrayBoilerplateDescription::set_elements_kind(ElementsKind kind) {
}
bool ArrayBoilerplateDescription::is_empty() const {
- return constant_elements()->length() == 0;
+ return constant_elements().length() == 0;
}
} // namespace internal
diff --git a/deps/v8/src/objects/literal-objects.cc b/deps/v8/src/objects/literal-objects.cc
index d699ac7345..bfdbd9317b 100644
--- a/deps/v8/src/objects/literal-objects.cc
+++ b/deps/v8/src/objects/literal-objects.cc
@@ -4,13 +4,13 @@
#include "src/objects/literal-objects.h"
-#include "src/accessors.h"
#include "src/ast/ast.h"
+#include "src/builtins/accessors.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "src/objects/struct-inl.h"
@@ -113,7 +113,7 @@ void AddToDescriptorArrayTemplate(
value_kind == ClassBoilerplate::kSetter);
Object raw_accessor = descriptor_array_template->GetStrongValue(entry);
AccessorPair pair;
- if (raw_accessor->IsAccessorPair()) {
+ if (raw_accessor.IsAccessorPair()) {
pair = AccessorPair::cast(raw_accessor);
} else {
Handle<AccessorPair> new_pair = isolate->factory()->NewAccessorPair();
@@ -122,9 +122,9 @@ void AddToDescriptorArrayTemplate(
descriptor_array_template->Set(entry, &d);
pair = *new_pair;
}
- pair->set(value_kind == ClassBoilerplate::kGetter ? ACCESSOR_GETTER
- : ACCESSOR_SETTER,
- *value);
+ pair.set(value_kind == ClassBoilerplate::kGetter ? ACCESSOR_GETTER
+ : ACCESSOR_SETTER,
+ *value);
}
}
}
@@ -165,7 +165,7 @@ constexpr int ComputeEnumerationIndex(int value_index) {
}
inline int GetExistingValueIndex(Object value) {
- return value->IsSmi() ? Smi::ToInt(value) : -1;
+ return value.IsSmi() ? Smi::ToInt(value) : -1;
}
template <typename Dictionary, typename Key>
@@ -215,13 +215,13 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
Object existing_value = dictionary->ValueAt(entry);
if (value_kind == ClassBoilerplate::kData) {
// Computed value is a normal method.
- if (existing_value->IsAccessorPair()) {
+ if (existing_value.IsAccessorPair()) {
AccessorPair current_pair = AccessorPair::cast(existing_value);
int existing_getter_index =
- GetExistingValueIndex(current_pair->getter());
+ GetExistingValueIndex(current_pair.getter());
int existing_setter_index =
- GetExistingValueIndex(current_pair->setter());
+ GetExistingValueIndex(current_pair.setter());
// At least one of the accessors must already be defined.
DCHECK(existing_getter_index >= 0 || existing_setter_index >= 0);
if (existing_getter_index < key_index &&
@@ -243,7 +243,7 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
// and then it was overwritten by the current computed method which
// in turn was later overwritten by the setter method. So we clear
// the getter.
- current_pair->set_getter(*isolate->factory()->null_value());
+ current_pair.set_getter(*isolate->factory()->null_value());
} else if (existing_setter_index < key_index) {
DCHECK_LT(key_index, existing_getter_index);
@@ -251,19 +251,18 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
// and then it was overwritten by the current computed method which
// in turn was later overwritten by the getter method. So we clear
// the setter.
- current_pair->set_setter(*isolate->factory()->null_value());
+ current_pair.set_setter(*isolate->factory()->null_value());
}
}
} else {
// Overwrite existing value if it was defined before the computed one
// (AccessorInfo "length" property is always defined before).
- DCHECK_IMPLIES(!existing_value->IsSmi(),
- existing_value->IsAccessorInfo());
- DCHECK_IMPLIES(!existing_value->IsSmi(),
- AccessorInfo::cast(existing_value)->name() ==
+ DCHECK_IMPLIES(!existing_value.IsSmi(),
+ existing_value.IsAccessorInfo());
+ DCHECK_IMPLIES(!existing_value.IsSmi(),
+ AccessorInfo::cast(existing_value).name() ==
*isolate->factory()->length_string());
- if (!existing_value->IsSmi() ||
- Smi::ToInt(existing_value) < key_index) {
+ if (!existing_value.IsSmi() || Smi::ToInt(existing_value) < key_index) {
PropertyDetails details(kData, DONT_ENUM, PropertyCellType::kNoCell,
enum_order);
dictionary->DetailsAtPut(isolate, entry, details);
@@ -274,14 +273,14 @@ void AddToDictionaryTemplate(Isolate* isolate, Handle<Dictionary> dictionary,
AccessorComponent component = value_kind == ClassBoilerplate::kGetter
? ACCESSOR_GETTER
: ACCESSOR_SETTER;
- if (existing_value->IsAccessorPair()) {
+ if (existing_value.IsAccessorPair()) {
// Update respective component of existing AccessorPair.
AccessorPair current_pair = AccessorPair::cast(existing_value);
int existing_component_index =
- GetExistingValueIndex(current_pair->get(component));
+ GetExistingValueIndex(current_pair.get(component));
if (existing_component_index < key_index) {
- current_pair->set(component, value);
+ current_pair.set(component, value);
}
} else {
@@ -380,7 +379,7 @@ class ObjectDescriptor {
AddToDictionaryTemplate(isolate, properties_dictionary_template_, name,
value_index, value_kind, value);
} else {
- *temp_handle_.location() = value->ptr();
+ *temp_handle_.location() = value.ptr();
AddToDescriptorArrayTemplate(isolate, descriptor_array_template_, name,
value_kind, temp_handle_);
}
@@ -526,6 +525,11 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
for (int i = 0; i < expr->properties()->length(); i++) {
ClassLiteral::Property* property = expr->properties()->at(i);
+ // Private members are not processed using the class boilerplate.
+ if (property->is_private()) {
+ continue;
+ }
+
ClassBoilerplate::ValueKind value_kind;
switch (property->kind()) {
case ClassLiteral::Property::METHOD:
diff --git a/deps/v8/src/lookup-cache-inl.h b/deps/v8/src/objects/lookup-cache-inl.h
index e980deb7da..0894d6c4c7 100644
--- a/deps/v8/src/lookup-cache-inl.h
+++ b/deps/v8/src/objects/lookup-cache-inl.h
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOOKUP_CACHE_INL_H_
-#define V8_LOOKUP_CACHE_INL_H_
+#ifndef V8_OBJECTS_LOOKUP_CACHE_INL_H_
+#define V8_OBJECTS_LOOKUP_CACHE_INL_H_
-#include "src/lookup-cache.h"
+#include "src/objects/lookup-cache.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
// static
int DescriptorLookupCache::Hash(Map source, Name name) {
- DCHECK(name->IsUniqueName());
+ DCHECK(name.IsUniqueName());
// Uses only lower 32 bits if pointers are larger.
uint32_t source_hash = static_cast<uint32_t>(source.ptr()) >> kTaggedSizeLog2;
- uint32_t name_hash = name->hash_field();
+ uint32_t name_hash = name.hash_field();
return (source_hash ^ name_hash) % kLength;
}
@@ -40,4 +40,4 @@ void DescriptorLookupCache::Update(Map source, Name name, int result) {
} // namespace internal
} // namespace v8
-#endif // V8_LOOKUP_CACHE_INL_H_
+#endif // V8_OBJECTS_LOOKUP_CACHE_INL_H_
diff --git a/deps/v8/src/lookup-cache.cc b/deps/v8/src/objects/lookup-cache.cc
index 60491ff535..3f251912d6 100644
--- a/deps/v8/src/lookup-cache.cc
+++ b/deps/v8/src/objects/lookup-cache.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/lookup-cache.h"
+#include "src/objects/lookup-cache.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/lookup-cache.h b/deps/v8/src/objects/lookup-cache.h
index 8904127266..a2016d23df 100644
--- a/deps/v8/src/lookup-cache.h
+++ b/deps/v8/src/objects/lookup-cache.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOOKUP_CACHE_H_
-#define V8_LOOKUP_CACHE_H_
+#ifndef V8_OBJECTS_LOOKUP_CACHE_H_
+#define V8_OBJECTS_LOOKUP_CACHE_H_
-#include "src/objects.h"
#include "src/objects/map.h"
#include "src/objects/name.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -57,4 +57,4 @@ class DescriptorLookupCache {
} // namespace internal
} // namespace v8
-#endif // V8_LOOKUP_CACHE_H_
+#endif // V8_OBJECTS_LOOKUP_CACHE_H_
diff --git a/deps/v8/src/lookup-inl.h b/deps/v8/src/objects/lookup-inl.h
index abe865f69c..5b2dbff258 100644
--- a/deps/v8/src/lookup-inl.h
+++ b/deps/v8/src/objects/lookup-inl.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOOKUP_INL_H_
-#define V8_LOOKUP_INL_H_
+#ifndef V8_OBJECTS_LOOKUP_INL_H_
+#define V8_OBJECTS_LOOKUP_INL_H_
-#include "src/lookup.h"
+#include "src/objects/lookup.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
-#include "src/objects/name-inl.h"
#include "src/objects/map-inl.h"
+#include "src/objects/name-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -53,9 +53,11 @@ LookupIterator::LookupIterator(Isolate* isolate, Handle<Object> receiver,
: LookupIterator(isolate, receiver, index,
GetRoot(isolate, receiver, index), configuration) {}
-LookupIterator LookupIterator::PropertyOrElement(
- Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Handle<JSReceiver> holder, Configuration configuration) {
+LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ Handle<JSReceiver> holder,
+ Configuration configuration) {
uint32_t index;
if (name->AsArrayIndex(&index)) {
LookupIterator it =
@@ -63,12 +65,13 @@ LookupIterator LookupIterator::PropertyOrElement(
it.name_ = name;
return it;
}
- return LookupIterator(receiver, name, holder, configuration);
+ return LookupIterator(isolate, receiver, name, holder, configuration);
}
-LookupIterator LookupIterator::PropertyOrElement(
- Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
- Configuration configuration) {
+LookupIterator LookupIterator::PropertyOrElement(Isolate* isolate,
+ Handle<Object> receiver,
+ Handle<Name> name,
+ Configuration configuration) {
uint32_t index;
if (name->AsArrayIndex(&index)) {
LookupIterator it = LookupIterator(isolate, receiver, index, configuration);
@@ -108,7 +111,7 @@ Handle<T> LookupIterator::GetHolder() const {
bool LookupIterator::ExtendingNonExtensible(Handle<JSReceiver> receiver) {
DCHECK(receiver.is_identical_to(GetStoreTarget<JSReceiver>()));
- return !receiver->map()->is_extensible() &&
+ return !receiver->map().is_extensible() &&
(IsElement() || !name_->IsPrivate());
}
@@ -117,7 +120,7 @@ bool LookupIterator::IsCacheableTransition() {
return transition_->IsPropertyCell() ||
(transition_map()->is_dictionary_map() &&
!GetStoreTarget<JSReceiver>()->HasFastProperties()) ||
- transition_map()->GetBackPointer()->IsMap();
+ transition_map()->GetBackPointer().IsMap();
}
void LookupIterator::UpdateProtector() {
@@ -163,9 +166,9 @@ template <class T>
Handle<T> LookupIterator::GetStoreTarget() const {
DCHECK(receiver_->IsJSReceiver());
if (receiver_->IsJSGlobalProxy()) {
- Map map = JSGlobalProxy::cast(*receiver_)->map();
- if (map->has_hidden_prototype()) {
- return handle(JSGlobalObject::cast(map->prototype()), isolate_);
+ Map map = JSGlobalProxy::cast(*receiver_).map();
+ if (map.has_hidden_prototype()) {
+ return handle(JSGlobalObject::cast(map.prototype()), isolate_);
}
}
return Handle<T>::cast(receiver_);
@@ -173,8 +176,8 @@ Handle<T> LookupIterator::GetStoreTarget() const {
template <bool is_element>
InterceptorInfo LookupIterator::GetInterceptor(JSObject holder) {
- return is_element ? holder->GetIndexedInterceptor()
- : holder->GetNamedInterceptor();
+ return is_element ? holder.GetIndexedInterceptor()
+ : holder.GetNamedInterceptor();
}
inline Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
@@ -188,4 +191,4 @@ inline Handle<InterceptorInfo> LookupIterator::GetInterceptor() const {
} // namespace internal
} // namespace v8
-#endif // V8_LOOKUP_INL_H_
+#endif // V8_OBJECTS_LOOKUP_INL_H_
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/objects/lookup.cc
index ddd1260ff1..744cf67482 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/objects/lookup.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/lookup.h"
-
-#include "src/bootstrapper.h"
-#include "src/counters.h"
-#include "src/deoptimizer.h"
-#include "src/elements.h"
-#include "src/field-type.h"
-#include "src/isolate-inl.h"
+#include "src/objects/lookup.h"
+
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/objects/elements.h"
+#include "src/objects/field-type.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/struct-inl.h"
@@ -118,7 +118,7 @@ LookupIterator LookupIterator::ForTransitionHandler(
PropertyConstness::kConst, value);
// Reload information; this is no-op if nothing changed.
it.property_details_ =
- new_map->instance_descriptors()->GetDetails(descriptor_number);
+ new_map->instance_descriptors().GetDetails(descriptor_number);
it.transition_ = new_map;
}
return it;
@@ -151,7 +151,7 @@ void LookupIterator::Start() {
holder_ = initial_holder_;
JSReceiver holder = *holder_;
- Map map = holder->map();
+ Map map = holder.map();
state_ = LookupInHolder<is_element>(map, holder);
if (IsFound()) return;
@@ -169,9 +169,9 @@ void LookupIterator::Next() {
has_property_ = false;
JSReceiver holder = *holder_;
- Map map = holder->map();
+ Map map = holder.map();
- if (map->IsSpecialReceiverMap()) {
+ if (map.IsSpecialReceiverMap()) {
state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
: LookupInSpecialHolder<false>(map, holder);
if (IsFound()) return;
@@ -195,7 +195,7 @@ void LookupIterator::NextInternal(Map map, JSReceiver holder) {
return;
}
holder = maybe_holder;
- map = holder->map();
+ map = holder.map();
state_ = LookupInHolder<is_element>(map, holder);
} while (!IsFound());
@@ -219,7 +219,7 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
// Strings are the only objects with properties (only elements) directly on
// the wrapper. Hence we can skip generating the wrapper for all other cases.
if (receiver->IsString() &&
- index < static_cast<uint32_t>(String::cast(*receiver)->length())) {
+ index < static_cast<uint32_t>(String::cast(*receiver).length())) {
// TODO(verwaest): Speed this up. Perhaps use a cached wrapper on the native
// context, ensuring that we don't leak it into JS?
Handle<JSFunction> constructor = isolate->string_function();
@@ -228,14 +228,13 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
return result;
}
auto root =
- handle(receiver->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
+ handle(receiver->GetPrototypeChainRootMap(isolate).prototype(), isolate);
if (root->IsNull(isolate)) {
isolate->PushStackTraceAndDie(reinterpret_cast<void*>(receiver->ptr()));
}
return Handle<JSReceiver>::cast(root);
}
-
Handle<Map> LookupIterator::GetReceiverMap() const {
if (receiver_->IsNumber()) return factory()->heap_number_map();
return handle(Handle<HeapObject>::cast(receiver_)->map(), isolate_);
@@ -266,7 +265,7 @@ bool IsTypedArrayFunctionInAnyContext(Isolate* isolate, JSReceiver holder) {
#undef TYPED_ARRAY_CONTEXT_SLOTS
};
- if (!holder->IsJSFunction()) return false;
+ if (!holder.IsJSFunction()) return false;
return std::any_of(
std::begin(context_slots), std::end(context_slots),
@@ -306,7 +305,7 @@ void LookupIterator::InternalUpdateProtector() {
isolate_->InvalidateTypedArraySpeciesProtector();
return;
}
- if (holder_->map()->is_prototype_map()) {
+ if (holder_->map().is_prototype_map()) {
DisallowHeapAllocation no_gc;
// Setting the constructor of any prototype with the @@species protector
// (of any realm) also needs to invalidate the protector.
@@ -328,7 +327,7 @@ void LookupIterator::InternalUpdateProtector() {
if (!isolate_->IsRegExpSpeciesLookupChainIntact()) return;
isolate_->InvalidateRegExpSpeciesProtector();
} else if (isolate_->IsInAnyContext(
- holder_->map()->prototype(),
+ holder_->map().prototype(),
Context::TYPED_ARRAY_PROTOTYPE_INDEX)) {
if (!isolate_->IsTypedArraySpeciesLookupChainIntact()) return;
isolate_->InvalidateTypedArraySpeciesProtector();
@@ -467,7 +466,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
if (holder_obj->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder_obj)->global_dictionary(), isolate());
+ JSGlobalObject::cast(*holder_obj).global_dictionary(), isolate());
Handle<PropertyCell> cell(dictionary->CellAt(dictionary_entry()),
isolate());
property_details_ = cell->property_details();
@@ -478,16 +477,12 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
if (!holder_obj->HasFastProperties()) return;
PropertyConstness new_constness = PropertyConstness::kConst;
- if (FLAG_track_constant_fields) {
- if (constness() == PropertyConstness::kConst) {
- DCHECK_EQ(kData, property_details_.kind());
- // Check that current value matches new value otherwise we should make
- // the property mutable.
- if (!IsConstFieldValueEqualTo(*value))
- new_constness = PropertyConstness::kMutable;
- }
- } else {
- new_constness = PropertyConstness::kMutable;
+ if (constness() == PropertyConstness::kConst) {
+ DCHECK_EQ(kData, property_details_.kind());
+ // Check that current value matches new value otherwise we should make
+ // the property mutable.
+ if (!IsConstFieldValueEqualTo(*value))
+ new_constness = PropertyConstness::kMutable;
}
Handle<Map> old_map(holder_obj->map(), isolate_);
@@ -498,7 +493,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
// Update the property details if the representation was None.
if (constness() != new_constness || representation().IsNone()) {
property_details_ =
- new_map->instance_descriptors()->GetDetails(descriptor_number());
+ new_map->instance_descriptors().GetDetails(descriptor_number());
}
return;
}
@@ -507,7 +502,6 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
ReloadPropertyInformation<false>();
}
-
void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
PropertyAttributes attributes) {
DCHECK(state_ == DATA || state_ == ACCESSOR);
@@ -523,7 +517,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
Handle<JSObject> holder_obj = Handle<JSObject>::cast(holder);
if (IsElement()) {
- DCHECK(!holder_obj->HasFixedTypedArrayElements());
+ DCHECK(!holder_obj->HasTypedArrayElements());
DCHECK(attributes != NONE || !holder_obj->HasFastElements());
Handle<FixedArrayBase> elements(holder_obj->elements(), isolate());
holder_obj->GetElementsAccessor()->Reconfigure(holder_obj, elements,
@@ -544,7 +538,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
if (!IsElement() && !holder_obj->HasFastProperties()) {
PropertyDetails details(kData, attributes, PropertyCellType::kMutable);
- if (holder_obj->map()->is_prototype_map() &&
+ if (holder_obj->map().is_prototype_map() &&
(property_details_.attributes() & READ_ONLY) == 0 &&
(attributes & READ_ONLY) != 0) {
// Invalidate prototype validity cell when a property is reconfigured
@@ -554,7 +548,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
}
if (holder_obj->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(
- JSGlobalObject::cast(*holder_obj)->global_dictionary(), isolate());
+ JSGlobalObject::cast(*holder_obj).global_dictionary(), isolate());
Handle<PropertyCell> cell = PropertyCell::PrepareForValue(
isolate(), dictionary, dictionary_entry(), value, details);
@@ -599,7 +593,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
DCHECK(state_ != LookupIterator::ACCESSOR ||
(GetAccessors()->IsAccessorInfo() &&
- AccessorInfo::cast(*GetAccessors())->is_special_data_property()));
+ AccessorInfo::cast(*GetAccessors()).is_special_data_property()));
DCHECK_NE(INTEGER_INDEXED_EXOTIC, state_);
DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
@@ -616,7 +610,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
global, name(), PropertyCellType::kUninitialized, &entry);
Handle<GlobalDictionary> dictionary(global->global_dictionary(),
isolate_);
- DCHECK(cell->value()->IsTheHole(isolate_));
+ DCHECK(cell->value().IsTheHole(isolate_));
DCHECK(!value->IsTheHole(isolate_));
transition_ = cell;
// Assign an enumeration index to the property and update
@@ -642,7 +636,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
Handle<Map> transition =
Map::TransitionToDataProperty(isolate_, map, name_, value, attributes,
- kDefaultFieldConstness, store_origin);
+ PropertyConstness::kConst, store_origin);
state_ = TRANSITION;
transition_ = transition;
@@ -688,11 +682,11 @@ void LookupIterator::ApplyTransitionToDataProperty(
number_ = static_cast<uint32_t>(number);
property_details_ = transition->GetLastDescriptorDetails();
state_ = DATA;
- } else if (receiver->map()->is_dictionary_map()) {
+ } else if (receiver->map().is_dictionary_map()) {
Handle<NameDictionary> dictionary(receiver->property_dictionary(),
isolate_);
int entry;
- if (receiver->map()->is_prototype_map() && receiver->IsJSObject()) {
+ if (receiver->map().is_prototype_map() && receiver->IsJSObject()) {
JSObject::InvalidatePrototypeChains(receiver->map());
}
dictionary = NameDictionary::Add(isolate(), dictionary, name(),
@@ -710,7 +704,6 @@ void LookupIterator::ApplyTransitionToDataProperty(
}
}
-
void LookupIterator::Delete() {
Handle<JSReceiver> holder = Handle<JSReceiver>::cast(holder_);
if (IsElement()) {
@@ -719,7 +712,7 @@ void LookupIterator::Delete() {
accessor->Delete(object, number_);
} else {
DCHECK(!name()->IsPrivateName());
- bool is_prototype_map = holder->map()->is_prototype_map();
+ bool is_prototype_map = holder->map().is_prototype_map();
RuntimeCallTimerScope stats_scope(
isolate_, is_prototype_map
? RuntimeCallCounterId::kPrototypeObject_DeleteProperty
@@ -753,7 +746,7 @@ void LookupIterator::TransitionToAccessorProperty(
attributes = static_cast<PropertyAttributes>(attributes | DONT_ENUM);
}
- if (!IsElement() && !receiver->map()->is_dictionary_map()) {
+ if (!IsElement() && !receiver->map().is_dictionary_map()) {
Handle<Map> old_map(receiver->map(), isolate_);
if (!holder_.is_identical_to(receiver)) {
@@ -809,7 +802,6 @@ void LookupIterator::TransitionToAccessorProperty(
#endif
}
-
void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
PropertyAttributes attributes) {
Handle<JSObject> receiver = GetStoreTarget<JSObject>();
@@ -828,11 +820,11 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
if (receiver->HasSlowArgumentsElements()) {
FixedArray parameter_map = FixedArray::cast(receiver->elements());
- uint32_t length = parameter_map->length() - 2;
+ uint32_t length = parameter_map.length() - 2;
if (number_ < length) {
- parameter_map->set(number_ + 2, ReadOnlyRoots(heap()).the_hole_value());
+ parameter_map.set(number_ + 2, ReadOnlyRoots(heap()).the_hole_value());
}
- FixedArray::cast(receiver->elements())->set(1, *dictionary);
+ FixedArray::cast(receiver->elements()).set(1, *dictionary);
} else {
receiver->set_elements(*dictionary);
}
@@ -840,7 +832,7 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
ReloadPropertyInformation<true>();
} else {
PropertyNormalizationMode mode = CLEAR_INOBJECT_PROPERTIES;
- if (receiver->map()->is_prototype_map()) {
+ if (receiver->map().is_prototype_map()) {
JSObject::InvalidatePrototypeChains(receiver->map());
mode = KEEP_INOBJECT_PROPERTIES;
}
@@ -872,9 +864,9 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
if (!receiver_->IsJSReceiver()) return false;
JSReceiver current = JSReceiver::cast(*receiver_);
JSReceiver object = *holder_;
- if (!current->map()->has_hidden_prototype()) return false;
+ if (!current.map().has_hidden_prototype()) return false;
// JSProxy do not occur as hidden prototypes.
- if (object->IsJSProxy()) return false;
+ if (object.IsJSProxy()) return false;
PrototypeIterator iter(isolate(), current, kStartAtPrototype,
PrototypeIterator::END_AT_NON_HIDDEN);
while (!iter.IsAtEnd()) {
@@ -884,7 +876,6 @@ bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
return false;
}
-
Handle<Object> LookupIterator::FetchValue() const {
Object result;
if (IsElement()) {
@@ -893,9 +884,9 @@ Handle<Object> LookupIterator::FetchValue() const {
return accessor->Get(holder, number_);
} else if (holder_->IsJSGlobalObject()) {
Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
- result = holder->global_dictionary()->ValueAt(number_);
+ result = holder->global_dictionary().ValueAt(number_);
} else if (!holder_->HasFastProperties()) {
- result = holder_->property_dictionary()->ValueAt(number_);
+ result = holder_->property_dictionary().ValueAt(number_);
} else if (property_details_.location() == kField) {
DCHECK_EQ(kData, property_details_.kind());
Handle<JSObject> holder = GetHolder<JSObject>();
@@ -903,7 +894,7 @@ Handle<Object> LookupIterator::FetchValue() const {
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
} else {
- result = holder_->map()->instance_descriptors()->GetStrongValue(number_);
+ result = holder_->map().instance_descriptors().GetStrongValue(number_);
}
return handle(result, isolate_);
}
@@ -916,14 +907,14 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
Handle<JSObject> holder = GetHolder<JSObject>();
FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
if (property_details_.representation().IsDouble()) {
- if (!value->IsNumber()) return false;
+ if (!value.IsNumber()) return false;
uint64_t bits;
if (holder->IsUnboxedDoubleField(field_index)) {
bits = holder->RawFastDoublePropertyAsBitsAt(field_index);
} else {
Object current_value = holder->RawFastPropertyAt(field_index);
- DCHECK(current_value->IsMutableHeapNumber());
- bits = MutableHeapNumber::cast(current_value)->value_as_bits();
+ DCHECK(current_value.IsMutableHeapNumber());
+ bits = MutableHeapNumber::cast(current_value).value_as_bits();
}
// Use bit representation of double to to check for hole double, since
// manipulating the signaling NaN used for the hole in C++, e.g. with
@@ -934,14 +925,14 @@ bool LookupIterator::IsConstFieldValueEqualTo(Object value) const {
// Uninitialized double field.
return true;
}
- return Object::SameNumberValue(bit_cast<double>(bits), value->Number());
+ return Object::SameNumberValue(bit_cast<double>(bits), value.Number());
} else {
Object current_value = holder->RawFastPropertyAt(field_index);
- if (current_value->IsUninitialized(isolate()) || current_value == value) {
+ if (current_value.IsUninitialized(isolate()) || current_value == value) {
return true;
}
- return current_value->IsNumber() && value->IsNumber() &&
- Object::SameNumberValue(current_value->Number(), value->Number());
+ return current_value.IsNumber() && value.IsNumber() &&
+ Object::SameNumberValue(current_value.Number(), value.Number());
}
}
@@ -961,24 +952,13 @@ int LookupIterator::GetAccessorIndex() const {
return descriptor_number();
}
-
-int LookupIterator::GetConstantIndex() const {
- DCHECK(has_property_);
- DCHECK(holder_->HasFastProperties());
- DCHECK_EQ(kDescriptor, property_details_.location());
- DCHECK_EQ(kData, property_details_.kind());
- DCHECK(!FLAG_track_constant_fields);
- DCHECK(!IsElement());
- return descriptor_number();
-}
-
Handle<Map> LookupIterator::GetFieldOwnerMap() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
DCHECK_EQ(kField, property_details_.location());
DCHECK(!IsElement());
Map holder_map = holder_->map();
- return handle(holder_map->FindFieldOwner(isolate(), descriptor_number()),
+ return handle(holder_map.FindFieldOwner(isolate(), descriptor_number()),
isolate_);
}
@@ -995,25 +975,22 @@ Handle<FieldType> LookupIterator::GetFieldType() const {
DCHECK(holder_->HasFastProperties());
DCHECK_EQ(kField, property_details_.location());
return handle(
- holder_->map()->instance_descriptors()->GetFieldType(descriptor_number()),
+ holder_->map().instance_descriptors().GetFieldType(descriptor_number()),
isolate_);
}
-
Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
DCHECK(!IsElement());
Handle<JSGlobalObject> holder = GetHolder<JSGlobalObject>();
- return handle(holder->global_dictionary()->CellAt(dictionary_entry()),
+ return handle(holder->global_dictionary().CellAt(dictionary_entry()),
isolate_);
}
-
Handle<Object> LookupIterator::GetAccessors() const {
DCHECK_EQ(ACCESSOR, state_);
return FetchValue();
}
-
Handle<Object> LookupIterator::GetDataValue() const {
DCHECK_EQ(DATA, state_);
Handle<Object> value = FetchValue();
@@ -1035,30 +1012,30 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
DCHECK_IMPLIES(!initializing_store && property_details_.constness() ==
PropertyConstness::kConst,
IsConstFieldValueEqualTo(*value));
- JSObject::cast(*holder)->WriteToField(descriptor_number(),
- property_details_, *value);
+ JSObject::cast(*holder).WriteToField(descriptor_number(),
+ property_details_, *value);
} else {
DCHECK_EQ(kDescriptor, property_details_.location());
DCHECK_EQ(PropertyConstness::kConst, property_details_.constness());
}
} else if (holder->IsJSGlobalObject()) {
GlobalDictionary dictionary =
- JSGlobalObject::cast(*holder)->global_dictionary();
- dictionary->CellAt(dictionary_entry())->set_value(*value);
+ JSGlobalObject::cast(*holder).global_dictionary();
+ dictionary.CellAt(dictionary_entry()).set_value(*value);
} else {
DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
NameDictionary dictionary = holder->property_dictionary();
- dictionary->ValueAtPut(dictionary_entry(), *value);
+ dictionary.ValueAtPut(dictionary_entry(), *value);
}
}
template <bool is_element>
bool LookupIterator::SkipInterceptor(JSObject holder) {
auto info = GetInterceptor<is_element>(holder);
- if (!is_element && name_->IsSymbol() && !info->can_intercept_symbols()) {
+ if (!is_element && name_->IsSymbol() && !info.can_intercept_symbols()) {
return true;
}
- if (info->non_masking()) {
+ if (info.non_masking()) {
switch (interceptor_state_) {
case InterceptorState::kUninitialized:
interceptor_state_ = InterceptorState::kSkipNonMasking;
@@ -1074,18 +1051,18 @@ bool LookupIterator::SkipInterceptor(JSObject holder) {
JSReceiver LookupIterator::NextHolder(Map map) {
DisallowHeapAllocation no_gc;
- if (map->prototype() == ReadOnlyRoots(heap()).null_value()) {
+ if (map.prototype() == ReadOnlyRoots(heap()).null_value()) {
return JSReceiver();
}
- if (!check_prototype_chain() && !map->has_hidden_prototype()) {
+ if (!check_prototype_chain() && !map.has_hidden_prototype()) {
return JSReceiver();
}
- return JSReceiver::cast(map->prototype());
+ return JSReceiver::cast(map.prototype());
}
LookupIterator::State LookupIterator::NotFound(JSReceiver const holder) const {
DCHECK(!IsElement());
- if (!holder->IsJSTypedArray() || !name_->IsString()) return NOT_FOUND;
+ if (!holder.IsJSTypedArray() || !name_->IsString()) return NOT_FOUND;
return IsSpecialIndex(String::cast(*name_)) ? INTEGER_INDEXED_EXOTIC
: NOT_FOUND;
}
@@ -1094,8 +1071,8 @@ namespace {
template <bool is_element>
bool HasInterceptor(Map map) {
- return is_element ? map->has_indexed_interceptor()
- : map->has_named_interceptor();
+ return is_element ? map.has_indexed_interceptor()
+ : map.has_named_interceptor();
}
} // namespace
@@ -1106,10 +1083,10 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
switch (state_) {
case NOT_FOUND:
- if (map->IsJSProxyMap()) {
+ if (map.IsJSProxyMap()) {
if (is_element || !name_->IsPrivate()) return JSPROXY;
}
- if (map->is_access_check_needed()) {
+ if (map.is_access_check_needed()) {
if (is_element || !name_->IsPrivate()) return ACCESS_CHECK;
}
V8_FALLTHROUGH;
@@ -1120,15 +1097,15 @@ LookupIterator::State LookupIterator::LookupInSpecialHolder(
}
V8_FALLTHROUGH;
case INTERCEPTOR:
- if (!is_element && map->IsJSGlobalObjectMap()) {
+ if (!is_element && map.IsJSGlobalObjectMap()) {
GlobalDictionary dict =
- JSGlobalObject::cast(holder)->global_dictionary();
- int number = dict->FindEntry(isolate(), name_);
+ JSGlobalObject::cast(holder).global_dictionary();
+ int number = dict.FindEntry(isolate(), name_);
if (number == GlobalDictionary::kNotFound) return NOT_FOUND;
number_ = static_cast<uint32_t>(number);
- PropertyCell cell = dict->CellAt(number_);
- if (cell->value()->IsTheHole(isolate_)) return NOT_FOUND;
- property_details_ = cell->property_details();
+ PropertyCell cell = dict.CellAt(number_);
+ if (cell.value().IsTheHole(isolate_)) return NOT_FOUND;
+ property_details_ = cell.property_details();
has_property_ = true;
switch (property_details_.kind()) {
case v8::internal::kData:
@@ -1159,31 +1136,31 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
if (is_element) {
JSObject js_object = JSObject::cast(holder);
- ElementsAccessor* accessor = js_object->GetElementsAccessor();
- FixedArrayBase backing_store = js_object->elements();
+ ElementsAccessor* accessor = js_object.GetElementsAccessor();
+ FixedArrayBase backing_store = js_object.elements();
number_ =
accessor->GetEntryForIndex(isolate_, js_object, backing_store, index_);
if (number_ == kMaxUInt32) {
- return holder->IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
+ return holder.IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
}
property_details_ = accessor->GetDetails(js_object, number_);
- if (map->has_frozen_or_sealed_elements()) {
- PropertyAttributes attrs = map->has_sealed_elements() ? SEALED : FROZEN;
+ if (map.has_frozen_or_sealed_elements()) {
+ PropertyAttributes attrs = map.has_sealed_elements() ? SEALED : FROZEN;
property_details_ = property_details_.CopyAddAttributes(attrs);
}
- } else if (!map->is_dictionary_map()) {
- DescriptorArray descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(isolate_, *name_, map);
+ } else if (!map.is_dictionary_map()) {
+ DescriptorArray descriptors = map.instance_descriptors();
+ int number = descriptors.SearchWithCache(isolate_, *name_, map);
if (number == DescriptorArray::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
- property_details_ = descriptors->GetDetails(number_);
+ property_details_ = descriptors.GetDetails(number_);
} else {
- DCHECK_IMPLIES(holder->IsJSProxy(), name()->IsPrivate());
- NameDictionary dict = holder->property_dictionary();
- int number = dict->FindEntry(isolate(), name_);
+ DCHECK_IMPLIES(holder.IsJSProxy(), name()->IsPrivate());
+ NameDictionary dict = holder.property_dictionary();
+ int number = dict.FindEntry(isolate(), name_);
if (number == NameDictionary::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
- property_details_ = dict->DetailsAt(number_);
+ property_details_ = dict.DetailsAt(number_);
}
has_property_ = true;
switch (property_details_.kind()) {
@@ -1203,8 +1180,8 @@ Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
AccessCheckInfo access_check_info =
AccessCheckInfo::Get(isolate_, Handle<JSObject>::cast(holder_));
if (!access_check_info.is_null()) {
- Object interceptor = IsElement() ? access_check_info->indexed_interceptor()
- : access_check_info->named_interceptor();
+ Object interceptor = IsElement() ? access_check_info.indexed_interceptor()
+ : access_check_info.named_interceptor();
if (interceptor != Object()) {
return handle(InterceptorInfo::cast(interceptor), isolate_);
}
@@ -1222,7 +1199,7 @@ bool LookupIterator::LookupCachedProperty() {
DCHECK(GetAccessors()->IsAccessorPair());
AccessorPair accessor_pair = AccessorPair::cast(*GetAccessors());
- Handle<Object> getter(accessor_pair->getter(), isolate());
+ Handle<Object> getter(accessor_pair.getter(), isolate());
MaybeHandle<Name> maybe_name =
FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), getter);
if (maybe_name.is_null()) return false;
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/objects/lookup.h
index 012ba83d14..820b8ef9b0 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/objects/lookup.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOOKUP_H_
-#define V8_LOOKUP_H_
+#ifndef V8_OBJECTS_LOOKUP_H_
+#define V8_OBJECTS_LOOKUP_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/map.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -180,7 +180,6 @@ class V8_EXPORT_PRIVATE LookupIterator final {
Handle<FieldType> GetFieldType() const;
int GetFieldDescriptorIndex() const;
int GetAccessorIndex() const;
- int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
Handle<Object> GetAccessors() const;
inline Handle<InterceptorInfo> GetInterceptor() const;
@@ -218,7 +217,7 @@ class V8_EXPORT_PRIVATE LookupIterator final {
void NextInternal(Map map, JSReceiver holder);
template <bool is_element>
inline State LookupInHolder(Map map, JSReceiver holder) {
- return map->IsSpecialReceiverMap()
+ return map.IsSpecialReceiverMap()
? LookupInSpecialHolder<is_element>(map, holder)
: LookupInRegularHolder<is_element>(map, holder);
}
@@ -248,8 +247,8 @@ class V8_EXPORT_PRIVATE LookupIterator final {
inline int descriptor_number() const;
inline int dictionary_entry() const;
- static inline Configuration ComputeConfiguration(
- Configuration configuration, Handle<Name> name);
+ static inline Configuration ComputeConfiguration(Configuration configuration,
+ Handle<Name> name);
static Handle<JSReceiver> GetRootForNonJSReceiver(
Isolate* isolate, Handle<Object> receiver, uint32_t index = kMaxUInt32);
@@ -276,8 +275,7 @@ class V8_EXPORT_PRIVATE LookupIterator final {
uint32_t number_;
};
-
} // namespace internal
} // namespace v8
-#endif // V8_LOOKUP_H_
+#endif // V8_OBJECTS_LOOKUP_H_
diff --git a/deps/v8/src/objects/managed.h b/deps/v8/src/objects/managed.h
index f1d42380dc..9653efa1c2 100644
--- a/deps/v8/src/objects/managed.h
+++ b/deps/v8/src/objects/managed.h
@@ -6,10 +6,10 @@
#define V8_OBJECTS_MANAGED_H_
#include <memory>
-#include "src/global-handles.h"
-#include "src/handles.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
+#include "src/handles/handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
#include "src/objects/foreign.h"
namespace v8 {
@@ -59,7 +59,7 @@ class Managed : public Foreign {
// Get a reference to the shared pointer to the C++ object.
V8_INLINE const std::shared_ptr<CppType>& get() { return *GetSharedPtrPtr(); }
- static Managed cast(Object obj) { return Managed(obj->ptr()); }
+ static Managed cast(Object obj) { return Managed(obj.ptr()); }
static Managed unchecked_cast(Object obj) { return bit_cast<Managed>(obj); }
// Allocate a new {CppType} and wrap it in a {Managed<CppType>}.
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 792e12d126..8c26196fb5 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -7,19 +7,19 @@
#include "src/objects/map.h"
-#include "src/field-type.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/layout-descriptor-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/descriptor-array-inl.h"
+#include "src/objects/field-type.h"
#include "src/objects/instance-type-inl.h"
+#include "src/objects/layout-descriptor-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property.h"
#include "src/objects/prototype-info-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/templates-inl.h"
-#include "src/property.h"
-#include "src/transitions.h"
+#include "src/objects/transitions-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -31,17 +31,18 @@ OBJECT_CONSTRUCTORS_IMPL(Map, HeapObject)
CAST_ACCESSOR(Map)
DescriptorArray Map::instance_descriptors() const {
- return DescriptorArray::cast(READ_FIELD(*this, kDescriptorsOffset));
+ return DescriptorArray::cast(READ_FIELD(*this, kInstanceDescriptorsOffset));
}
DescriptorArray Map::synchronized_instance_descriptors() const {
- return DescriptorArray::cast(ACQUIRE_READ_FIELD(*this, kDescriptorsOffset));
+ return DescriptorArray::cast(
+ ACQUIRE_READ_FIELD(*this, kInstanceDescriptorsOffset));
}
void Map::set_synchronized_instance_descriptors(DescriptorArray value,
WriteBarrierMode mode) {
- RELEASE_WRITE_FIELD(*this, kDescriptorsOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kDescriptorsOffset, value, mode);
+ RELEASE_WRITE_FIELD(*this, kInstanceDescriptorsOffset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, kInstanceDescriptorsOffset, value, mode);
}
// A freshly allocated layout descriptor can be set on an existing map.
@@ -75,14 +76,14 @@ BIT_FIELD_ACCESSORS(Map, relaxed_bit_field, has_prototype_slot,
// |bit_field2| fields.
BIT_FIELD_ACCESSORS(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
BIT_FIELD_ACCESSORS(Map, bit_field2, is_prototype_map, Map::IsPrototypeMapBit)
-BIT_FIELD_ACCESSORS(Map, bit_field2, is_in_retained_map_list,
- Map::IsInRetainedMapListBit)
+BIT_FIELD_ACCESSORS(Map, bit_field2, has_hidden_prototype,
+ Map::HasHiddenPrototypeBit)
// |bit_field3| fields.
BIT_FIELD_ACCESSORS(Map, bit_field3, owns_descriptors, Map::OwnsDescriptorsBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, has_hidden_prototype,
- Map::HasHiddenPrototypeBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
+BIT_FIELD_ACCESSORS(Map, bit_field3, is_in_retained_map_list,
+ Map::IsInRetainedMapListBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_migration_target,
Map::IsMigrationTargetBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, is_immutable_proto,
@@ -97,18 +98,18 @@ BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
InterceptorInfo Map::GetNamedInterceptor() {
DCHECK(has_named_interceptor());
FunctionTemplateInfo info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info->GetNamedPropertyHandler());
+ return InterceptorInfo::cast(info.GetNamedPropertyHandler());
}
InterceptorInfo Map::GetIndexedInterceptor() {
DCHECK(has_indexed_interceptor());
FunctionTemplateInfo info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info->GetIndexedPropertyHandler());
+ return InterceptorInfo::cast(info.GetIndexedPropertyHandler());
}
bool Map::IsMostGeneralFieldType(Representation representation,
FieldType field_type) {
- return !representation.IsHeapObject() || field_type->IsAny();
+ return !representation.IsHeapObject() || field_type.IsAny();
}
bool Map::CanHaveFastTransitionableElementsKind(InstanceType instance_type) {
@@ -122,26 +123,23 @@ bool Map::CanHaveFastTransitionableElementsKind() const {
// static
void Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
- Isolate* isolate, InstanceType instance_type, PropertyConstness* constness,
+ Isolate* isolate, InstanceType instance_type,
Representation* representation, Handle<FieldType>* field_type) {
if (CanHaveFastTransitionableElementsKind(instance_type)) {
// We don't support propagation of field generalization through elements
// kind transitions because they are inserted into the transition tree
// before field transitions. In order to avoid complexity of handling
// such a case we ensure that all maps with transitionable elements kinds
- // have the most general field type.
- if (representation->IsHeapObject()) {
- // The field type is either already Any or should become Any if it was
- // something else.
- *field_type = FieldType::Any(isolate);
- }
+ // have the most general field representation and type.
+ *field_type = FieldType::Any(isolate);
+ *representation = Representation::Tagged();
}
}
bool Map::IsUnboxedDoubleField(FieldIndex index) const {
if (!FLAG_unbox_double_fields) return false;
- if (index.is_hidden_field() || !index.is_inobject()) return false;
- return !layout_descriptor()->IsTagged(index.property_index());
+ if (!index.is_inobject()) return false;
+ return !layout_descriptor().IsTagged(index.property_index());
}
bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
@@ -163,7 +161,7 @@ bool Map::TooManyFastProperties(StoreOrigin store_origin) const {
}
PropertyDetails Map::GetLastDescriptorDetails() const {
- return instance_descriptors()->GetDetails(LastAdded());
+ return instance_descriptors().GetDetails(LastAdded());
}
int Map::LastAdded() const {
@@ -177,7 +175,7 @@ int Map::NumberOfOwnDescriptors() const {
}
void Map::SetNumberOfOwnDescriptors(int number) {
- DCHECK_LE(number, instance_descriptors()->number_of_descriptors());
+ DCHECK_LE(number, instance_descriptors().number_of_descriptors());
CHECK_LE(static_cast<unsigned>(number),
static_cast<unsigned>(kMaxNumberOfDescriptors));
set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
@@ -200,9 +198,8 @@ FixedArrayBase Map::GetInitialElements() const {
result = GetReadOnlyRoots().empty_fixed_array();
} else if (has_fast_sloppy_arguments_elements()) {
result = GetReadOnlyRoots().empty_sloppy_arguments_elements();
- } else if (has_fixed_typed_array_elements()) {
- result =
- GetReadOnlyRoots().EmptyFixedTypedArrayForTypedArray(elements_kind());
+ } else if (has_typed_array_elements()) {
+ result = GetReadOnlyRoots().empty_byte_array();
} else if (has_dictionary_elements()) {
result = GetReadOnlyRoots().empty_slow_element_dictionary();
} else {
@@ -293,12 +290,11 @@ Handle<Map> Map::AddMissingTransitionsForTesting(
}
InstanceType Map::instance_type() const {
- return static_cast<InstanceType>(
- READ_UINT16_FIELD(*this, kInstanceTypeOffset));
+ return static_cast<InstanceType>(ReadField<uint16_t>(kInstanceTypeOffset));
}
void Map::set_instance_type(InstanceType value) {
- WRITE_UINT16_FIELD(*this, kInstanceTypeOffset, value);
+ WriteField<uint16_t>(kInstanceTypeOffset, value);
}
int Map::UnusedPropertyFields() const {
@@ -373,19 +369,19 @@ void Map::SetOutOfObjectUnusedPropertyFields(int value) {
void Map::CopyUnusedPropertyFields(Map map) {
set_used_or_unused_instance_size_in_words(
- map->used_or_unused_instance_size_in_words());
- DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
+ map.used_or_unused_instance_size_in_words());
+ DCHECK_EQ(UnusedPropertyFields(), map.UnusedPropertyFields());
}
void Map::CopyUnusedPropertyFieldsAdjustedForInstanceSize(Map map) {
- int value = map->used_or_unused_instance_size_in_words();
+ int value = map.used_or_unused_instance_size_in_words();
if (value >= JSValue::kFieldsAdded) {
// Unused in-object fields. Adjust the offset from the object’s start
// so it matches the distance to the object’s end.
- value += instance_size_in_words() - map->instance_size_in_words();
+ value += instance_size_in_words() - map.instance_size_in_words();
}
set_used_or_unused_instance_size_in_words(value);
- DCHECK_EQ(UnusedPropertyFields(), map->UnusedPropertyFields());
+ DCHECK_EQ(UnusedPropertyFields(), map.UnusedPropertyFields());
}
void Map::AccountAddedPropertyField() {
@@ -420,10 +416,10 @@ void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
}
-byte Map::bit_field() const { return READ_BYTE_FIELD(*this, kBitFieldOffset); }
+byte Map::bit_field() const { return ReadField<byte>(kBitFieldOffset); }
void Map::set_bit_field(byte value) {
- WRITE_BYTE_FIELD(*this, kBitFieldOffset, value);
+ WriteField<byte>(kBitFieldOffset, value);
}
byte Map::relaxed_bit_field() const {
@@ -434,12 +430,10 @@ void Map::set_relaxed_bit_field(byte value) {
RELAXED_WRITE_BYTE_FIELD(*this, kBitFieldOffset, value);
}
-byte Map::bit_field2() const {
- return READ_BYTE_FIELD(*this, kBitField2Offset);
-}
+byte Map::bit_field2() const { return ReadField<byte>(kBitField2Offset); }
void Map::set_bit_field2(byte value) {
- WRITE_BYTE_FIELD(*this, kBitField2Offset, value);
+ WriteField<byte>(kBitField2Offset, value);
}
bool Map::is_abandoned_prototype_map() const {
@@ -447,8 +441,8 @@ bool Map::is_abandoned_prototype_map() const {
}
bool Map::should_be_fast_prototype_map() const {
- if (!prototype_info()->IsPrototypeInfo()) return false;
- return PrototypeInfo::cast(prototype_info())->should_be_fast_map();
+ if (!prototype_info().IsPrototypeInfo()) return false;
+ return PrototypeInfo::cast(prototype_info()).should_be_fast_map();
}
void Map::set_elements_kind(ElementsKind elements_kind) {
@@ -492,8 +486,8 @@ bool Map::has_fast_string_wrapper_elements() const {
return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
}
-bool Map::has_fixed_typed_array_elements() const {
- return IsFixedTypedArrayElementsKind(elements_kind());
+bool Map::has_typed_array_elements() const {
+ return IsTypedArrayElementsKind(elements_kind());
}
bool Map::has_dictionary_elements() const {
@@ -501,7 +495,7 @@ bool Map::has_dictionary_elements() const {
}
bool Map::has_frozen_or_sealed_elements() const {
- return IsPackedFrozenOrSealedElementsKind(elements_kind());
+ return IsFrozenOrSealedElementsKind(elements_kind());
}
bool Map::has_sealed_elements() const {
@@ -531,7 +525,7 @@ bool Map::is_stable() const { return !IsUnstableBit::decode(bit_field3()); }
bool Map::CanBeDeprecated() const {
int descriptor = LastAdded();
for (int i = 0; i <= descriptor; i++) {
- PropertyDetails details = instance_descriptors()->GetDetails(i);
+ PropertyDetails details = instance_descriptors().GetDetails(i);
if (details.representation().IsNone()) return true;
if (details.representation().IsSmi()) return true;
if (details.representation().IsDouble()) return true;
@@ -546,7 +540,7 @@ bool Map::CanBeDeprecated() const {
void Map::NotifyLeafMapLayoutChange(Isolate* isolate) {
if (is_stable()) {
mark_unstable();
- dependent_code()->DeoptimizeDependentCodeGroup(
+ dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPrototypeCheckGroup);
}
}
@@ -581,7 +575,7 @@ HeapObject Map::prototype() const {
}
void Map::set_prototype(HeapObject value, WriteBarrierMode mode) {
- DCHECK(value->IsNull() || value->IsJSReceiver());
+ DCHECK(value.IsNull() || value.IsJSReceiver());
WRITE_FIELD(*this, kPrototypeOffset, value);
CONDITIONAL_WRITE_BARRIER(*this, kPrototypeOffset, value, mode);
}
@@ -608,13 +602,13 @@ void Map::UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
int number_of_own_descriptors) {
SetInstanceDescriptors(isolate, descriptors, number_of_own_descriptors);
if (FLAG_unbox_double_fields) {
- if (layout_descriptor()->IsSlowLayout()) {
+ if (layout_descriptor().IsSlowLayout()) {
set_layout_descriptor(layout_desc);
}
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(*this));
+ CHECK(layout_descriptor().IsConsistentWithMap(*this));
CHECK_EQ(Map::GetVisitorId(*this), visitor_id());
}
#else
@@ -627,14 +621,14 @@ void Map::UpdateDescriptors(Isolate* isolate, DescriptorArray descriptors,
void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors,
LayoutDescriptor layout_desc) {
SetInstanceDescriptors(isolate, descriptors,
- descriptors->number_of_descriptors());
+ descriptors.number_of_descriptors());
if (FLAG_unbox_double_fields) {
set_layout_descriptor(layout_desc);
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(layout_descriptor()->IsConsistentWithMap(*this));
+ CHECK(layout_descriptor().IsConsistentWithMap(*this));
}
#else
SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(*this));
@@ -666,11 +660,11 @@ LayoutDescriptor Map::GetLayoutDescriptor() const {
void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
DescriptorArray descriptors = instance_descriptors();
int number_of_own_descriptors = NumberOfOwnDescriptors();
- DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
{
// The following two operations need to happen before the marking write
// barrier.
- descriptors->Append(desc);
+ descriptors.Append(desc);
SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
MarkingBarrierForDescriptorArray(isolate->heap(), *this, descriptors,
number_of_own_descriptors + 1);
@@ -694,7 +688,7 @@ void Map::AppendDescriptor(Isolate* isolate, Descriptor* desc) {
HeapObject Map::GetBackPointer() const {
Object object = constructor_or_backpointer();
- if (object->IsMap()) {
+ if (object.IsMap()) {
return Map::cast(object);
}
return GetReadOnlyRoots().undefined_value();
@@ -723,10 +717,10 @@ void Map::set_prototype_info(Object value, WriteBarrierMode mode) {
void Map::SetBackPointer(Object value, WriteBarrierMode mode) {
CHECK_GE(instance_type(), FIRST_JS_RECEIVER_TYPE);
- CHECK(value->IsMap());
- CHECK(GetBackPointer()->IsUndefined());
- CHECK_IMPLIES(value->IsMap(), Map::cast(value)->GetConstructor() ==
- constructor_or_backpointer());
+ CHECK(value.IsMap());
+ CHECK(GetBackPointer().IsUndefined());
+ CHECK_IMPLIES(value.IsMap(), Map::cast(value).GetConstructor() ==
+ constructor_or_backpointer());
set_constructor_or_backpointer(value, mode);
}
@@ -737,34 +731,34 @@ ACCESSORS(Map, constructor_or_backpointer, Object,
bool Map::IsPrototypeValidityCellValid() const {
Object validity_cell = prototype_validity_cell();
- Object value = validity_cell->IsSmi() ? Smi::cast(validity_cell)
- : Cell::cast(validity_cell)->value();
+ Object value = validity_cell.IsSmi() ? Smi::cast(validity_cell)
+ : Cell::cast(validity_cell).value();
return value == Smi::FromInt(Map::kPrototypeChainValid);
}
Object Map::GetConstructor() const {
Object maybe_constructor = constructor_or_backpointer();
// Follow any back pointers.
- while (maybe_constructor->IsMap()) {
+ while (maybe_constructor.IsMap()) {
maybe_constructor =
- Map::cast(maybe_constructor)->constructor_or_backpointer();
+ Map::cast(maybe_constructor).constructor_or_backpointer();
}
return maybe_constructor;
}
FunctionTemplateInfo Map::GetFunctionTemplateInfo() const {
Object constructor = GetConstructor();
- if (constructor->IsJSFunction()) {
- DCHECK(JSFunction::cast(constructor)->shared()->IsApiFunction());
- return JSFunction::cast(constructor)->shared()->get_api_func_data();
+ if (constructor.IsJSFunction()) {
+ DCHECK(JSFunction::cast(constructor).shared().IsApiFunction());
+ return JSFunction::cast(constructor).shared().get_api_func_data();
}
- DCHECK(constructor->IsFunctionTemplateInfo());
+ DCHECK(constructor.IsFunctionTemplateInfo());
return FunctionTemplateInfo::cast(constructor);
}
void Map::SetConstructor(Object constructor, WriteBarrierMode mode) {
// Never overwrite a back pointer with a constructor.
- CHECK(!constructor_or_backpointer()->IsMap());
+ CHECK(!constructor_or_backpointer().IsMap());
set_constructor_or_backpointer(constructor, mode);
}
@@ -780,7 +774,7 @@ bool Map::IsInobjectSlackTrackingInProgress() const {
void Map::InobjectSlackTrackingStep(Isolate* isolate) {
// Slack tracking should only be performed on an initial map.
- DCHECK(GetBackPointer()->IsUndefined());
+ DCHECK(GetBackPointer().IsUndefined());
if (!IsInobjectSlackTrackingInProgress()) return;
int counter = construction_counter();
set_construction_counter(counter - 1);
@@ -813,7 +807,7 @@ int NormalizedMapCache::GetIndex(Handle<Map> map) {
bool HeapObject::IsNormalizedMapCache() const {
if (!IsWeakFixedArray()) return false;
- if (WeakFixedArray::cast(*this)->length() != NormalizedMapCache::kEntries) {
+ if (WeakFixedArray::cast(*this).length() != NormalizedMapCache::kEntries) {
return false;
}
return true;
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/objects/map-updater.cc
index cf29e86237..855fdabdf3 100644
--- a/deps/v8/src/map-updater.cc
+++ b/deps/v8/src/objects/map-updater.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/map-updater.h"
+#include "src/objects/map-updater.h"
-#include "src/field-type.h"
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/property-details.h"
-#include "src/transitions.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
+#include "src/objects/field-type.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
+#include "src/objects/property-details.h"
+#include "src/objects/transitions.h"
namespace v8 {
namespace internal {
@@ -34,9 +34,8 @@ MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map)
is_transitionable_fast_elements_kind_(
IsTransitionableFastElementsKind(new_elements_kind_)) {
// We shouldn't try to update remote objects.
- DCHECK(!old_map->FindRootMap(isolate)
- ->GetConstructor()
- ->IsFunctionTemplateInfo());
+ DCHECK(
+ !old_map->FindRootMap(isolate).GetConstructor().IsFunctionTemplateInfo());
}
Name MapUpdater::GetKey(int descriptor) const {
@@ -89,7 +88,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
if (location == kField) {
return handle(GetFieldType(descriptor), isolate_);
} else {
- return GetValue(descriptor)->OptimalType(isolate_, representation);
+ return GetValue(descriptor).OptimalType(isolate_, representation);
}
}
@@ -102,7 +101,7 @@ Handle<FieldType> MapUpdater::GetOrComputeFieldType(
return handle(descriptors->GetFieldType(descriptor), isolate_);
} else {
return descriptors->GetStrongValue(descriptor)
- ->OptimalType(isolate_, representation);
+ .OptimalType(isolate_, representation);
}
}
@@ -147,8 +146,8 @@ Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
}
Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
- isolate_, old_map_->instance_type(), &new_constness_,
- &new_representation_, &new_field_type_);
+ isolate_, old_map_->instance_type(), &new_representation_,
+ &new_field_type_);
if (TryReconfigureToDataFieldInplace() == kEnd) return result_map_;
if (FindRootMap() == kEnd) return result_map_;
@@ -214,14 +213,7 @@ MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
// Updating deprecated maps in-place doesn't make sense.
if (old_map_->is_deprecated()) return state_;
- // If it's just a representation generalization case (i.e. property kind and
- // attributes stays unchanged) it's fine to transition from None to anything
- // but double without any modification to the object, because the default
- // uninitialized value for representation None can be overwritten by both
- // smi and tagged values. Doubles, however, would require a box allocation.
- if (new_representation_.IsNone() || new_representation_.IsDouble()) {
- return state_; // Not done yet.
- }
+ if (new_representation_.IsNone()) return state_; // Not done yet.
PropertyDetails old_details =
old_descriptors_->GetDetails(modified_descriptor_);
@@ -237,6 +229,7 @@ MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
old_map_->PrintGeneralization(
isolate_, stdout, "uninitialized field", modified_descriptor_, old_nof_,
old_nof_, false, old_representation, new_representation_,
+ old_details.constness(), new_constness_,
handle(old_descriptors_->GetFieldType(modified_descriptor_), isolate_),
MaybeHandle<Object>(), new_field_type_, MaybeHandle<Object>());
}
@@ -250,7 +243,7 @@ MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
.representation()
.Equals(new_representation_));
DCHECK(old_descriptors_->GetFieldType(modified_descriptor_)
- ->NowIs(new_field_type_));
+ .NowIs(new_field_type_));
result_map_ = old_map_;
state_ = kEnd;
@@ -310,7 +303,7 @@ MapUpdater::State MapUpdater::FindRootMap() {
if (root_map_->is_deprecated()) {
state_ = kEnd;
result_map_ = handle(
- JSFunction::cast(root_map_->GetConstructor())->initial_map(), isolate_);
+ JSFunction::cast(root_map_->GetConstructor()).initial_map(), isolate_);
result_map_ = Map::AsElementsKind(isolate_, result_map_, to_kind);
DCHECK(result_map_->is_dictionary_map());
return state_;
@@ -331,8 +324,8 @@ MapUpdater::State MapUpdater::FindRootMap() {
// the seal transitions), so change {to_kind} accordingly.
DCHECK(to_kind == DICTIONARY_ELEMENTS ||
to_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
- IsFixedTypedArrayElementsKind(to_kind) ||
- IsPackedFrozenOrSealedElementsKind(to_kind));
+ IsTypedArrayElementsKind(to_kind) ||
+ IsFrozenOrSealedElementsKind(to_kind));
to_kind = integrity_source_map_->elements_kind();
}
@@ -434,7 +427,7 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (modified_descriptor_ >= 0) {
DescriptorArray target_descriptors = target_map_->instance_descriptors();
PropertyDetails details =
- target_descriptors->GetDetails(modified_descriptor_);
+ target_descriptors.GetDetails(modified_descriptor_);
DCHECK_EQ(new_kind_, details.kind());
DCHECK_EQ(GetDetails(modified_descriptor_).attributes(),
details.attributes());
@@ -444,12 +437,12 @@ MapUpdater::State MapUpdater::FindTargetMap() {
if (new_location_ == kField) {
DCHECK_EQ(kField, details.location());
DCHECK(new_field_type_->NowIs(
- target_descriptors->GetFieldType(modified_descriptor_)));
+ target_descriptors.GetFieldType(modified_descriptor_)));
} else {
DCHECK(details.location() == kField ||
EqualImmutableValues(
*new_value_,
- target_descriptors->GetStrongValue(modified_descriptor_)));
+ target_descriptors.GetStrongValue(modified_descriptor_)));
}
}
#endif
@@ -565,9 +558,6 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
? kField
: kDescriptor;
- if (!FLAG_track_constant_fields && next_location == kField) {
- next_constness = PropertyConstness::kMutable;
- }
// Ensure that mutable values are stored in fields.
DCHECK_IMPLIES(next_constness == PropertyConstness::kMutable,
next_location == kField);
@@ -589,8 +579,7 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
target_field_type, isolate_);
Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
- isolate_, instance_type, &next_constness, &next_representation,
- &next_field_type);
+ isolate_, instance_type, &next_representation, &next_field_type);
MaybeObjectHandle wrapped_type(
Map::WrapFieldType(isolate_, next_field_type));
@@ -610,14 +599,8 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
DCHECK_EQ(PropertyConstness::kConst, next_constness);
Handle<Object> value(GetValue(i), isolate_);
- Descriptor d;
- if (next_kind == kData) {
- DCHECK(!FLAG_track_constant_fields);
- d = Descriptor::DataConstant(key, value, next_attributes);
- } else {
- DCHECK_EQ(kAccessor, next_kind);
- d = Descriptor::AccessorConstant(key, value, next_attributes);
- }
+ DCHECK_EQ(kAccessor, next_kind);
+ Descriptor d = Descriptor::AccessorConstant(key, value, next_attributes);
new_descriptors->Set(i, &d);
}
}
@@ -650,8 +633,6 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
Map::WrapFieldType(isolate_, next_field_type));
Descriptor d;
if (next_kind == kData) {
- DCHECK_IMPLIES(!FLAG_track_constant_fields,
- next_constness == PropertyConstness::kMutable);
d = Descriptor::DataField(key, current_offset, next_attributes,
next_constness, next_representation,
wrapped_type);
@@ -692,9 +673,9 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
TransitionsAccessor(isolate_, current, &no_allocation)
.SearchTransition(name, details.kind(), details.attributes());
if (next.is_null()) break;
- DescriptorArray next_descriptors = next->instance_descriptors();
+ DescriptorArray next_descriptors = next.instance_descriptors();
- PropertyDetails next_details = next_descriptors->GetDetails(i);
+ PropertyDetails next_details = next_descriptors.GetDetails(i);
DCHECK_EQ(details.kind(), next_details.kind());
DCHECK_EQ(details.attributes(), next_details.attributes());
if (details.constness() != next_details.constness()) break;
@@ -702,13 +683,13 @@ Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
if (!details.representation().Equals(next_details.representation())) break;
if (next_details.location() == kField) {
- FieldType next_field_type = next_descriptors->GetFieldType(i);
- if (!descriptors->GetFieldType(i)->NowIs(next_field_type)) {
+ FieldType next_field_type = next_descriptors.GetFieldType(i);
+ if (!descriptors->GetFieldType(i).NowIs(next_field_type)) {
break;
}
} else {
if (!EqualImmutableValues(descriptors->GetStrongValue(i),
- next_descriptors->GetStrongValue(i))) {
+ next_descriptors.GetStrongValue(i))) {
break;
}
}
@@ -735,7 +716,7 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
Map maybe_transition = transitions.SearchTransition(
GetKey(split_nof), split_details.kind(), split_details.attributes());
if (!maybe_transition.is_null()) {
- maybe_transition->DeprecateTransitionTree(isolate_);
+ maybe_transition.DeprecateTransitionTree(isolate_);
}
// If |maybe_transition| is not nullptr then the transition array already
@@ -775,7 +756,8 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
isolate_, stdout, "", modified_descriptor_, split_nof, old_nof_,
old_details.location() == kDescriptor && new_location_ == kField,
old_details.representation(), new_details.representation(),
- old_field_type, old_value, new_field_type, new_value);
+ old_details.constness(), new_details.constness(), old_field_type,
+ old_value, new_field_type, new_value);
}
Handle<LayoutDescriptor> new_layout_descriptor =
@@ -810,7 +792,10 @@ MapUpdater::State MapUpdater::ConstructNewMapWithIntegrityLevelTransition() {
result_map_ = Map::CopyForPreventExtensions(
isolate_, target_map_, integrity_level_, integrity_level_symbol_,
- "CopyForPreventExtensions");
+ "CopyForPreventExtensions",
+ old_map_->elements_kind() == DICTIONARY_ELEMENTS);
+ DCHECK_IMPLIES(old_map_->elements_kind() == DICTIONARY_ELEMENTS,
+ result_map_->elements_kind() == DICTIONARY_ELEMENTS);
state_ = kEnd;
return state_;
diff --git a/deps/v8/src/map-updater.h b/deps/v8/src/objects/map-updater.h
index 13571a3321..3ba86eacbc 100644
--- a/deps/v8/src/map-updater.h
+++ b/deps/v8/src/objects/map-updater.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MAP_UPDATER_H_
-#define V8_MAP_UPDATER_H_
+#ifndef V8_OBJECTS_MAP_UPDATER_H_
+#define V8_OBJECTS_MAP_UPDATER_H_
-#include "src/elements-kind.h"
-#include "src/field-type.h"
-#include "src/globals.h"
-#include "src/handles.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/field-type.h"
#include "src/objects/map.h"
-#include "src/property-details.h"
+#include "src/objects/property-details.h"
namespace v8 {
namespace internal {
@@ -202,4 +202,4 @@ class MapUpdater {
} // namespace internal
} // namespace v8
-#endif // V8_MAP_UPDATER_H_
+#endif // V8_OBJECTS_MAP_UPDATER_H_
diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc
index 80ea74a176..43d8c305c5 100644
--- a/deps/v8/src/objects/map.cc
+++ b/deps/v8/src/objects/map.cc
@@ -4,24 +4,24 @@
#include "src/objects/map.h"
-#include "src/bootstrapper.h"
-#include "src/counters-inl.h"
-#include "src/field-type.h"
-#include "src/frames.h"
-#include "src/handles-inl.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/maybe-handles.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/isolate.h"
-#include "src/layout-descriptor.h"
-#include "src/log.h"
-#include "src/map-updater.h"
-#include "src/maybe-handles.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters-inl.h"
+#include "src/logging/log.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/field-type.h"
#include "src/objects/js-objects.h"
+#include "src/objects/layout-descriptor.h"
+#include "src/objects/map-updater.h"
#include "src/objects/maybe-object.h"
#include "src/objects/oddball.h"
-#include "src/ostreams.h"
-#include "src/property.h"
-#include "src/transitions-inl.h"
+#include "src/objects/property.h"
+#include "src/objects/transitions-inl.h"
+#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -34,12 +34,12 @@ Map Map::GetPrototypeChainRootMap(Isolate* isolate) const {
}
int constructor_function_index = GetConstructorFunctionIndex();
if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
- Context native_context = isolate->context()->native_context();
+ Context native_context = isolate->context().native_context();
JSFunction constructor_function =
- JSFunction::cast(native_context->get(constructor_function_index));
- return constructor_function->initial_map();
+ JSFunction::cast(native_context.get(constructor_function_index));
+ return constructor_function.initial_map();
}
- return ReadOnlyRoots(isolate).null_value()->map();
+ return ReadOnlyRoots(isolate).null_value().map();
}
// static
@@ -73,9 +73,9 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
PropertyAttributes attributes) {
OFStream os(file);
os << "[reconfiguring]";
- Name name = instance_descriptors()->GetKey(modify_index);
- if (name->IsString()) {
- String::cast(name)->PrintOn(file);
+ Name name = instance_descriptors().GetKey(modify_index);
+ if (name.IsString()) {
+ String::cast(name).PrintOn(file);
} else {
os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
}
@@ -88,7 +88,7 @@ void Map::PrintReconfiguration(Isolate* isolate, FILE* file, int modify_index,
VisitorId Map::GetVisitorId(Map map) {
STATIC_ASSERT(kVisitorIdCount <= 256);
- const int instance_type = map->instance_type();
+ const int instance_type = map.instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
switch (instance_type & kStringRepresentationMask) {
@@ -301,7 +301,7 @@ VisitorId Map::GetVisitorId(Map map) {
case WASM_TABLE_TYPE:
case JS_BOUND_FUNCTION_TYPE: {
const bool has_raw_data_fields =
- (FLAG_unbox_double_fields && !map->HasFastPointerLayout()) ||
+ (FLAG_unbox_double_fields && !map.HasFastPointerLayout()) ||
(COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0);
return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast;
}
@@ -325,21 +325,6 @@ VisitorId Map::GetVisitorId(Map map) {
case BIGINT_TYPE:
return kVisitBigInt;
- case FIXED_UINT8_ARRAY_TYPE:
- case FIXED_INT8_ARRAY_TYPE:
- case FIXED_UINT16_ARRAY_TYPE:
- case FIXED_INT16_ARRAY_TYPE:
- case FIXED_UINT32_ARRAY_TYPE:
- case FIXED_INT32_ARRAY_TYPE:
- case FIXED_FLOAT32_ARRAY_TYPE:
- case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
- case FIXED_BIGUINT64_ARRAY_TYPE:
- case FIXED_BIGINT64_ARRAY_TYPE:
- return kVisitFixedTypedArrayBase;
-
- case FIXED_FLOAT64_ARRAY_TYPE:
- return kVisitFixedFloat64Array;
-
case ALLOCATION_SITE_TYPE:
return kVisitAllocationSite;
@@ -349,6 +334,9 @@ VisitorId Map::GetVisitorId(Map map) {
if (instance_type == PROTOTYPE_INFO_TYPE) {
return kVisitPrototypeInfo;
}
+ if (instance_type == WASM_CAPI_FUNCTION_DATA_TYPE) {
+ return kVisitWasmCapiFunctionData;
+ }
return kVisitStruct;
case LOAD_HANDLER_TYPE:
@@ -364,13 +352,14 @@ void Map::PrintGeneralization(
Isolate* isolate, FILE* file, const char* reason, int modify_index,
int split, int descriptors, bool descriptor_to_field,
Representation old_representation, Representation new_representation,
+ PropertyConstness old_constness, PropertyConstness new_constness,
MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) {
OFStream os(file);
os << "[generalizing]";
- Name name = instance_descriptors()->GetKey(modify_index);
- if (name->IsString()) {
- String::cast(name)->PrintOn(file);
+ Name name = instance_descriptors().GetKey(modify_index);
+ if (name.IsString()) {
+ String::cast(name).PrintOn(file);
} else {
os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
}
@@ -384,7 +373,7 @@ void Map::PrintGeneralization(
} else {
old_field_type.ToHandleChecked()->PrintTo(os);
}
- os << "}";
+ os << ";" << old_constness << "}";
}
os << "->" << new_representation.Mnemonic() << "{";
if (new_field_type.is_null()) {
@@ -392,7 +381,7 @@ void Map::PrintGeneralization(
} else {
new_field_type.ToHandleChecked()->PrintTo(os);
}
- os << "} (";
+ os << ";" << new_constness << "} (";
if (strlen(reason) > 0) {
os << reason;
} else {
@@ -429,9 +418,9 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
PropertyConstness constness,
Representation representation,
TransitionFlag flag) {
- DCHECK(DescriptorArray::kNotFound ==
- map->instance_descriptors()->Search(*name,
- map->NumberOfOwnDescriptors()));
+ DCHECK(
+ DescriptorArray::kNotFound ==
+ map->instance_descriptors().Search(*name, map->NumberOfOwnDescriptors()));
// Ensure the descriptor array does not get too big.
if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors) {
@@ -447,13 +436,11 @@ MaybeHandle<Map> Map::CopyWithField(Isolate* isolate, Handle<Map> map,
type = FieldType::Any(isolate);
} else {
Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
- isolate, map->instance_type(), &constness, &representation, &type);
+ isolate, map->instance_type(), &representation, &type);
}
MaybeObjectHandle wrapped_type = WrapFieldType(isolate, type);
- DCHECK_IMPLIES(!FLAG_track_constant_fields,
- constness == PropertyConstness::kMutable);
Descriptor d = Descriptor::DataField(name, index, attributes, constness,
representation, wrapped_type);
Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
@@ -471,23 +458,15 @@ MaybeHandle<Map> Map::CopyWithConstant(Isolate* isolate, Handle<Map> map,
return MaybeHandle<Map>();
}
- if (FLAG_track_constant_fields) {
- Representation representation = constant->OptimalRepresentation();
- Handle<FieldType> type = constant->OptimalType(isolate, representation);
- return CopyWithField(isolate, map, name, type, attributes,
- PropertyConstness::kConst, representation, flag);
- } else {
- // Allocate new instance descriptors with (name, constant) added.
- Descriptor d =
- Descriptor::DataConstant(isolate, name, 0, constant, attributes);
- Handle<Map> new_map = Map::CopyAddDescriptor(isolate, map, &d, flag);
- return new_map;
- }
+ Representation representation = constant->OptimalRepresentation();
+ Handle<FieldType> type = constant->OptimalType(isolate, representation);
+ return CopyWithField(isolate, map, name, type, attributes,
+ PropertyConstness::kConst, representation, flag);
}
bool Map::TransitionRemovesTaggedField(Map target) const {
int inobject = NumberOfFields();
- int target_inobject = target->NumberOfFields();
+ int target_inobject = target.NumberOfFields();
for (int i = target_inobject; i < inobject; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*this, i);
if (!IsUnboxedDoubleField(index)) return true;
@@ -497,11 +476,11 @@ bool Map::TransitionRemovesTaggedField(Map target) const {
bool Map::TransitionChangesTaggedFieldToUntaggedField(Map target) const {
int inobject = NumberOfFields();
- int target_inobject = target->NumberOfFields();
+ int target_inobject = target.NumberOfFields();
int limit = Min(inobject, target_inobject);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
- if (!IsUnboxedDoubleField(index) && target->IsUnboxedDoubleField(index)) {
+ if (!IsUnboxedDoubleField(index) && target.IsUnboxedDoubleField(index)) {
return true;
}
}
@@ -514,9 +493,9 @@ bool Map::TransitionRequiresSynchronizationWithGC(Map target) const {
}
bool Map::InstancesNeedRewriting(Map target) const {
- int target_number_of_fields = target->NumberOfFields();
- int target_inobject = target->GetInObjectProperties();
- int target_unused = target->UnusedPropertyFields();
+ int target_number_of_fields = target.NumberOfFields();
+ int target_inobject = target.GetInObjectProperties();
+ int target_unused = target.UnusedPropertyFields();
int old_number_of_fields;
return InstancesNeedRewriting(target, target_number_of_fields,
@@ -534,11 +513,11 @@ bool Map::InstancesNeedRewriting(Map target, int target_number_of_fields,
// If smi descriptors were replaced by double descriptors, rewrite.
DescriptorArray old_desc = instance_descriptors();
- DescriptorArray new_desc = target->instance_descriptors();
+ DescriptorArray new_desc = target.instance_descriptors();
int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
- if (new_desc->GetDetails(i).representation().IsDouble() !=
- old_desc->GetDetails(i).representation().IsDouble()) {
+ if (new_desc.GetDetails(i).representation().IsDouble() !=
+ old_desc.GetDetails(i).representation().IsDouble()) {
return true;
}
}
@@ -562,7 +541,7 @@ int Map::NumberOfFields() const {
DescriptorArray descriptors = instance_descriptors();
int result = 0;
for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
- if (descriptors->GetDetails(i).location() == kField) result++;
+ if (descriptors.GetDetails(i).location() == kField) result++;
}
return result;
}
@@ -572,7 +551,7 @@ Map::FieldCounts Map::GetFieldCounts() const {
int mutable_count = 0;
int const_count = 0;
for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
switch (details.constness()) {
case PropertyConstness::kMutable:
@@ -631,14 +610,15 @@ Handle<Map> Map::CopyGeneralizeAllFields(Isolate* isolate, Handle<Map> map,
MaybeHandle<FieldType> field_type = FieldType::None(isolate);
if (details.location() == kField) {
field_type = handle(
- map->instance_descriptors()->GetFieldType(modify_index), isolate);
+ map->instance_descriptors().GetFieldType(modify_index), isolate);
}
map->PrintGeneralization(
isolate, stdout, reason, modify_index,
new_map->NumberOfOwnDescriptors(), new_map->NumberOfOwnDescriptors(),
details.location() == kDescriptor, details.representation(),
- Representation::Tagged(), field_type, MaybeHandle<Object>(),
- FieldType::Any(isolate), MaybeHandle<Object>());
+ Representation::Tagged(), details.constness(), details.constness(),
+ field_type, MaybeHandle<Object>(), FieldType::Any(isolate),
+ MaybeHandle<Object>());
}
}
new_map->set_elements_kind(elements_kind);
@@ -651,14 +631,14 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
TransitionsAccessor transitions(isolate, *this, &no_gc);
int num_transitions = transitions.NumberOfTransitions();
for (int i = 0; i < num_transitions; ++i) {
- transitions.GetTarget(i)->DeprecateTransitionTree(isolate);
+ transitions.GetTarget(i).DeprecateTransitionTree(isolate);
}
- DCHECK(!constructor_or_backpointer()->IsFunctionTemplateInfo());
+ DCHECK(!constructor_or_backpointer().IsFunctionTemplateInfo());
set_is_deprecated(true);
if (FLAG_trace_maps) {
LOG(isolate, MapEvent("Deprecate", *this, Map()));
}
- dependent_code()->DeoptimizeDependentCodeGroup(
+ dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kTransitionGroup);
NotifyLeafMapLayoutChange(isolate);
}
@@ -668,7 +648,7 @@ void Map::DeprecateTransitionTree(Isolate* isolate) {
void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
LayoutDescriptor new_layout_descriptor) {
// Don't overwrite the empty descriptor array or initial map's descriptors.
- if (NumberOfOwnDescriptors() == 0 || GetBackPointer()->IsUndefined(isolate)) {
+ if (NumberOfOwnDescriptors() == 0 || GetBackPointer().IsUndefined(isolate)) {
return;
}
@@ -678,13 +658,13 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
// all its elements.
Map current = *this;
MarkingBarrierForDescriptorArray(isolate->heap(), current, to_replace,
- to_replace->number_of_descriptors());
- while (current->instance_descriptors() == to_replace) {
- Object next = current->GetBackPointer();
- if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
- current->SetEnumLength(kInvalidEnumCacheSentinel);
- current->UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
- current->NumberOfOwnDescriptors());
+ to_replace.number_of_descriptors());
+ while (current.instance_descriptors() == to_replace) {
+ Object next = current.GetBackPointer();
+ if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
+ current.SetEnumLength(kInvalidEnumCacheSentinel);
+ current.UpdateDescriptors(isolate, new_descriptors, new_layout_descriptor,
+ current.NumberOfOwnDescriptors());
current = Map::cast(next);
}
set_owns_descriptors(false);
@@ -693,13 +673,13 @@ void Map::ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors,
Map Map::FindRootMap(Isolate* isolate) const {
Map result = *this;
while (true) {
- Object back = result->GetBackPointer();
- if (back->IsUndefined(isolate)) {
+ Object back = result.GetBackPointer();
+ if (back.IsUndefined(isolate)) {
// Initial map always owns descriptors and doesn't have unused entries
// in the descriptor array.
- DCHECK(result->owns_descriptors());
- DCHECK_EQ(result->NumberOfOwnDescriptors(),
- result->instance_descriptors()->number_of_descriptors());
+ DCHECK(result.owns_descriptors());
+ DCHECK_EQ(result.NumberOfOwnDescriptors(),
+ result.instance_descriptors().number_of_descriptors());
return result;
}
result = Map::cast(back);
@@ -708,13 +688,13 @@ Map Map::FindRootMap(Isolate* isolate) const {
Map Map::FindFieldOwner(Isolate* isolate, int descriptor) const {
DisallowHeapAllocation no_allocation;
- DCHECK_EQ(kField, instance_descriptors()->GetDetails(descriptor).location());
+ DCHECK_EQ(kField, instance_descriptors().GetDetails(descriptor).location());
Map result = *this;
while (true) {
- Object back = result->GetBackPointer();
- if (back->IsUndefined(isolate)) break;
+ Object back = result.GetBackPointer();
+ if (back.IsUndefined(isolate)) break;
const Map parent = Map::cast(back);
- if (parent->NumberOfOwnDescriptors() <= descriptor) break;
+ if (parent.NumberOfOwnDescriptors() <= descriptor) break;
result = parent;
}
return result;
@@ -727,7 +707,7 @@ void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
// We store raw pointers in the queue, so no allocations are allowed.
DisallowHeapAllocation no_allocation;
- PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
+ PropertyDetails details = instance_descriptors().GetDetails(descriptor);
if (details.location() != kField) return;
DCHECK_EQ(kData, details.kind());
@@ -745,8 +725,8 @@ void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
Map target = transitions.GetTarget(i);
backlog.push(target);
}
- DescriptorArray descriptors = current->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
+ DescriptorArray descriptors = current.instance_descriptors();
+ PropertyDetails details = descriptors.GetDetails(descriptor);
// It is allowed to change representation here only from None
// to something or from Smi or HeapObject to Tagged.
@@ -756,19 +736,17 @@ void Map::UpdateFieldType(Isolate* isolate, int descriptor, Handle<Name> name,
// Skip if already updated the shared descriptor.
if (new_constness != details.constness() ||
!new_representation.Equals(details.representation()) ||
- descriptors->GetFieldType(descriptor) != *new_wrapped_type.object()) {
- DCHECK_IMPLIES(!FLAG_track_constant_fields,
- new_constness == PropertyConstness::kMutable);
+ descriptors.GetFieldType(descriptor) != *new_wrapped_type.object()) {
Descriptor d = Descriptor::DataField(
- name, descriptors->GetFieldIndex(descriptor), details.attributes(),
+ name, descriptors.GetFieldIndex(descriptor), details.attributes(),
new_constness, new_representation, new_wrapped_type);
- descriptors->Replace(descriptor, &d);
+ descriptors.Replace(descriptor, &d);
}
}
}
bool FieldTypeIsCleared(Representation rep, FieldType type) {
- return type->IsNone() && rep.IsHeapObject();
+ return type.IsNone() && rep.IsHeapObject();
}
// static
@@ -833,15 +811,16 @@ void Map::GeneralizeField(Isolate* isolate, Handle<Map> map, int modify_index,
MaybeObjectHandle wrapped_type(WrapFieldType(isolate, new_field_type));
field_owner->UpdateFieldType(isolate, modify_index, name, new_constness,
new_representation, wrapped_type);
- field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
+ field_owner->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kFieldOwnerGroup);
if (FLAG_trace_generalization) {
map->PrintGeneralization(
isolate, stdout, "field type generalization", modify_index,
map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
- details.representation(), details.representation(), old_field_type,
- MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
+ details.representation(), details.representation(), old_constness,
+ new_constness, old_field_type, MaybeHandle<Object>(), new_field_type,
+ MaybeHandle<Object>());
}
}
@@ -877,7 +856,7 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
do {
target = TransitionsAccessor(isolate, target, &no_allocation)
.GetMigrationTarget();
- } while (!target.is_null() && target->is_deprecated());
+ } while (!target.is_null() && target.is_deprecated());
if (target.is_null()) return Map();
// TODO(ishell): if this validation ever become a bottleneck consider adding a
@@ -888,12 +867,12 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
// types instead of old_map's types.
// Go to slow map updating if the old_map has fast properties with cleared
// field types.
- int old_nof = old_map->NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map->instance_descriptors();
+ int old_nof = old_map.NumberOfOwnDescriptors();
+ DescriptorArray old_descriptors = old_map.instance_descriptors();
for (int i = 0; i < old_nof; i++) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
+ PropertyDetails old_details = old_descriptors.GetDetails(i);
if (old_details.location() == kField && old_details.kind() == kData) {
- FieldType old_type = old_descriptors->GetFieldType(i);
+ FieldType old_type = old_descriptors.GetFieldType(i);
if (FieldTypeIsCleared(old_details.representation(), old_type)) {
return Map();
}
@@ -947,8 +926,8 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// Figure out the most restrictive integrity level transition (it should
// be the last one in the transition tree).
- DCHECK(!map->is_extensible());
- Map previous = Map::cast(map->GetBackPointer());
+ DCHECK(!map.is_extensible());
+ Map previous = Map::cast(map.GetBackPointer());
TransitionsAccessor last_transitions(isolate, previous, no_allocation);
if (!last_transitions.HasIntegrityLevelTransitionTo(
map, &(info.integrity_level_symbol), &(info.integrity_level))) {
@@ -965,8 +944,8 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
// Now walk up the back pointer chain and skip all integrity level
// transitions. If we encounter any non-integrity level transition interleaved
// with integrity level transitions, just bail out.
- while (!source_map->is_extensible()) {
- previous = Map::cast(source_map->GetBackPointer());
+ while (!source_map.is_extensible()) {
+ previous = Map::cast(source_map.GetBackPointer());
TransitionsAccessor transitions(isolate, previous, no_allocation);
if (!transitions.HasIntegrityLevelTransitionTo(source_map)) {
return info;
@@ -975,7 +954,7 @@ IntegrityLevelTransitionInfo DetectIntegrityLevelTransitions(
}
// Integrity-level transitions never change number of descriptors.
- CHECK_EQ(map->NumberOfOwnDescriptors(), source_map->NumberOfOwnDescriptors());
+ CHECK_EQ(map.NumberOfOwnDescriptors(), source_map.NumberOfOwnDescriptors());
info.has_integrity_level_transition = true;
info.integrity_level_source_map = source_map;
@@ -989,26 +968,25 @@ Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
DisallowDeoptimization no_deoptimization(isolate);
// Check the state of the root map.
- Map root_map = old_map->FindRootMap(isolate);
- if (root_map->is_deprecated()) {
- JSFunction constructor = JSFunction::cast(root_map->GetConstructor());
- DCHECK(constructor->has_initial_map());
- DCHECK(constructor->initial_map()->is_dictionary_map());
- if (constructor->initial_map()->elements_kind() !=
- old_map->elements_kind()) {
+ Map root_map = old_map.FindRootMap(isolate);
+ if (root_map.is_deprecated()) {
+ JSFunction constructor = JSFunction::cast(root_map.GetConstructor());
+ DCHECK(constructor.has_initial_map());
+ DCHECK(constructor.initial_map().is_dictionary_map());
+ if (constructor.initial_map().elements_kind() != old_map.elements_kind()) {
return Map();
}
- return constructor->initial_map();
+ return constructor.initial_map();
}
- if (!old_map->EquivalentToForTransition(root_map)) return Map();
+ if (!old_map.EquivalentToForTransition(root_map)) return Map();
- ElementsKind from_kind = root_map->elements_kind();
- ElementsKind to_kind = old_map->elements_kind();
+ ElementsKind from_kind = root_map.elements_kind();
+ ElementsKind to_kind = old_map.elements_kind();
IntegrityLevelTransitionInfo info(old_map);
- if (root_map->is_extensible() != old_map->is_extensible()) {
- DCHECK(!old_map->is_extensible());
- DCHECK(root_map->is_extensible());
+ if (root_map.is_extensible() != old_map.is_extensible()) {
+ DCHECK(!old_map.is_extensible());
+ DCHECK(root_map.is_extensible());
info = DetectIntegrityLevelTransitions(old_map, isolate, &no_allocation);
// Bail out if there were some private symbol transitions mixed up
// with the integrity level transitions.
@@ -1017,18 +995,19 @@ Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
// the integrity level transition sets the elements to dictionary mode.
DCHECK(to_kind == DICTIONARY_ELEMENTS ||
to_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
- IsFixedTypedArrayElementsKind(to_kind));
- to_kind = info.integrity_level_source_map->elements_kind();
+ IsTypedArrayElementsKind(to_kind) ||
+ IsHoleyFrozenOrSealedElementsKind(to_kind));
+ to_kind = info.integrity_level_source_map.elements_kind();
}
if (from_kind != to_kind) {
// Try to follow existing elements kind transitions.
- root_map = root_map->LookupElementsTransitionMap(isolate, to_kind);
+ root_map = root_map.LookupElementsTransitionMap(isolate, to_kind);
if (root_map.is_null()) return Map();
// From here on, use the map with correct elements kind as root map.
}
// Replay the transitions as they were before the integrity level transition.
- Map result = root_map->TryReplayPropertyTransitions(
+ Map result = root_map.TryReplayPropertyTransitions(
isolate, info.integrity_level_source_map);
if (result.is_null()) return Map();
@@ -1039,9 +1018,9 @@ Map Map::TryUpdateSlow(Isolate* isolate, Map old_map) {
}
DCHECK_IMPLIES(!result.is_null(),
- old_map->elements_kind() == result->elements_kind());
+ old_map.elements_kind() == result.elements_kind());
DCHECK_IMPLIES(!result.is_null(),
- old_map->instance_type() == result->instance_type());
+ old_map.instance_type() == result.instance_type());
return result;
}
@@ -1051,21 +1030,21 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
int root_nof = NumberOfOwnDescriptors();
- int old_nof = old_map->NumberOfOwnDescriptors();
- DescriptorArray old_descriptors = old_map->instance_descriptors();
+ int old_nof = old_map.NumberOfOwnDescriptors();
+ DescriptorArray old_descriptors = old_map.instance_descriptors();
Map new_map = *this;
for (int i = root_nof; i < old_nof; ++i) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
+ PropertyDetails old_details = old_descriptors.GetDetails(i);
Map transition =
TransitionsAccessor(isolate, new_map, &no_allocation)
- .SearchTransition(old_descriptors->GetKey(i), old_details.kind(),
+ .SearchTransition(old_descriptors.GetKey(i), old_details.kind(),
old_details.attributes());
if (transition.is_null()) return Map();
new_map = transition;
- DescriptorArray new_descriptors = new_map->instance_descriptors();
+ DescriptorArray new_descriptors = new_map.instance_descriptors();
- PropertyDetails new_details = new_descriptors->GetDetails(i);
+ PropertyDetails new_details = new_descriptors.GetDetails(i);
DCHECK_EQ(old_details.kind(), new_details.kind());
DCHECK_EQ(old_details.attributes(), new_details.attributes());
if (!IsGeneralizableTo(old_details.constness(), new_details.constness())) {
@@ -1077,46 +1056,37 @@ Map Map::TryReplayPropertyTransitions(Isolate* isolate, Map old_map) {
}
if (new_details.location() == kField) {
if (new_details.kind() == kData) {
- FieldType new_type = new_descriptors->GetFieldType(i);
+ FieldType new_type = new_descriptors.GetFieldType(i);
// Cleared field types need special treatment. They represent lost
// knowledge, so we must first generalize the new_type to "Any".
if (FieldTypeIsCleared(new_details.representation(), new_type)) {
return Map();
}
DCHECK_EQ(kData, old_details.kind());
- if (old_details.location() == kField) {
- FieldType old_type = old_descriptors->GetFieldType(i);
- if (FieldTypeIsCleared(old_details.representation(), old_type) ||
- !old_type->NowIs(new_type)) {
- return Map();
- }
- } else {
- DCHECK_EQ(kDescriptor, old_details.location());
- DCHECK(!FLAG_track_constant_fields);
- Object old_value = old_descriptors->GetStrongValue(i);
- if (!new_type->NowContains(old_value)) {
- return Map();
- }
+ DCHECK_EQ(kField, old_details.location());
+ FieldType old_type = old_descriptors.GetFieldType(i);
+ if (FieldTypeIsCleared(old_details.representation(), old_type) ||
+ !old_type.NowIs(new_type)) {
+ return Map();
}
-
} else {
DCHECK_EQ(kAccessor, new_details.kind());
#ifdef DEBUG
- FieldType new_type = new_descriptors->GetFieldType(i);
- DCHECK(new_type->IsAny());
+ FieldType new_type = new_descriptors.GetFieldType(i);
+ DCHECK(new_type.IsAny());
#endif
UNREACHABLE();
}
} else {
DCHECK_EQ(kDescriptor, new_details.location());
if (old_details.location() == kField ||
- old_descriptors->GetStrongValue(i) !=
- new_descriptors->GetStrongValue(i)) {
+ old_descriptors.GetStrongValue(i) !=
+ new_descriptors.GetStrongValue(i)) {
return Map();
}
}
}
- if (new_map->NumberOfOwnDescriptors() != old_nof) return Map();
+ if (new_map.NumberOfOwnDescriptors() != old_nof) return Map();
return new_map;
}
@@ -1168,11 +1138,11 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
descriptors->number_of_descriptors());
Map current = *map;
- while (current->instance_descriptors() == *descriptors) {
- Object next = current->GetBackPointer();
- if (next->IsUndefined(isolate)) break; // Stop overwriting at initial map.
- current->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
- current->NumberOfOwnDescriptors());
+ while (current.instance_descriptors() == *descriptors) {
+ Object next = current.GetBackPointer();
+ if (next.IsUndefined(isolate)) break; // Stop overwriting at initial map.
+ current.UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
+ current.NumberOfOwnDescriptors());
current = Map::cast(next);
}
map->UpdateDescriptors(isolate, *new_descriptors, layout_descriptor,
@@ -1182,7 +1152,7 @@ void Map::EnsureDescriptorSlack(Isolate* isolate, Handle<Map> map, int slack) {
// static
Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
Handle<HeapObject> prototype) {
- Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ Handle<Map> map(isolate->native_context()->object_function().initial_map(),
isolate);
if (map->prototype() == *prototype) return map;
if (prototype->IsNull(isolate)) {
@@ -1190,7 +1160,7 @@ Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
}
if (prototype->IsJSObject()) {
Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
- if (!js_prototype->map()->is_prototype_map()) {
+ if (!js_prototype->map().is_prototype_map()) {
JSObject::OptimizeAsPrototype(js_prototype);
}
Handle<PrototypeInfo> info =
@@ -1212,7 +1182,7 @@ Handle<Map> Map::GetObjectCreateMap(Isolate* isolate,
// static
MaybeHandle<Map> Map::TryGetObjectCreateMap(Isolate* isolate,
Handle<HeapObject> prototype) {
- Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ Handle<Map> map(isolate->native_context()->object_function().initial_map(),
isolate);
if (map->prototype() == *prototype) return map;
if (prototype->IsNull(isolate)) {
@@ -1220,7 +1190,7 @@ MaybeHandle<Map> Map::TryGetObjectCreateMap(Isolate* isolate,
}
if (!prototype->IsJSObject()) return MaybeHandle<Map>();
Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
- if (!js_prototype->map()->is_prototype_map()) return MaybeHandle<Map>();
+ if (!js_prototype->map().is_prototype_map()) return MaybeHandle<Map>();
Handle<PrototypeInfo> info =
Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
if (!info->HasObjectCreateMap()) return MaybeHandle<Map>();
@@ -1235,6 +1205,15 @@ static bool ContainsMap(MapHandles const& maps, Map map) {
return false;
}
+static bool HasElementsKind(MapHandles const& maps,
+ ElementsKind elements_kind) {
+ for (Handle<Map> current : maps) {
+ if (!current.is_null() && current->elements_kind() == elements_kind)
+ return true;
+ }
+ return false;
+}
+
Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
MapHandles const& candidates) {
DisallowHeapAllocation no_allocation;
@@ -1250,22 +1229,25 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
// Check the state of the root map.
Map root_map = FindRootMap(isolate);
if (!EquivalentToForElementsKindTransition(root_map)) return Map();
- root_map = root_map->LookupElementsTransitionMap(isolate, kind);
+ root_map = root_map.LookupElementsTransitionMap(isolate, kind);
DCHECK(!root_map.is_null());
// Starting from the next existing elements kind transition try to
// replay the property transitions that does not involve instance rewriting
// (ElementsTransitionAndStoreStub does not support that).
- for (root_map = root_map->ElementsTransitionMap();
- !root_map.is_null() && root_map->has_fast_elements();
- root_map = root_map->ElementsTransitionMap()) {
- Map current = root_map->TryReplayPropertyTransitions(isolate, *this);
+ for (root_map = root_map.ElementsTransitionMap();
+ !root_map.is_null() && root_map.has_fast_elements();
+ root_map = root_map.ElementsTransitionMap()) {
+ // If root_map's elements kind doesn't match any of the elements kind in
+ // the candidates there is no need to do any additional work.
+ if (!HasElementsKind(candidates, root_map.elements_kind())) continue;
+ Map current = root_map.TryReplayPropertyTransitions(isolate, *this);
if (current.is_null()) continue;
if (InstancesNeedRewriting(current)) continue;
if (ContainsMap(candidates, current) &&
- (packed || !IsFastPackedElementsKind(current->elements_kind()))) {
+ (packed || !IsFastPackedElementsKind(current.elements_kind()))) {
transition = current;
- packed = packed && IsFastPackedElementsKind(current->elements_kind());
+ packed = packed && IsFastPackedElementsKind(current.elements_kind());
}
}
}
@@ -1275,25 +1257,25 @@ Map Map::FindElementsKindTransitionedMap(Isolate* isolate,
static Map FindClosestElementsTransition(Isolate* isolate, Map map,
ElementsKind to_kind) {
// Ensure we are requested to search elements kind transition "near the root".
- DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
- map->NumberOfOwnDescriptors());
+ DCHECK_EQ(map.FindRootMap(isolate).NumberOfOwnDescriptors(),
+ map.NumberOfOwnDescriptors());
Map current_map = map;
- ElementsKind kind = map->elements_kind();
+ ElementsKind kind = map.elements_kind();
while (kind != to_kind) {
- Map next_map = current_map->ElementsTransitionMap();
+ Map next_map = current_map.ElementsTransitionMap();
if (next_map.is_null()) return current_map;
- kind = next_map->elements_kind();
+ kind = next_map.elements_kind();
current_map = next_map;
}
- DCHECK_EQ(to_kind, current_map->elements_kind());
+ DCHECK_EQ(to_kind, current_map.elements_kind());
return current_map;
}
Map Map::LookupElementsTransitionMap(Isolate* isolate, ElementsKind to_kind) {
Map to_map = FindClosestElementsTransition(isolate, *this, to_kind);
- if (to_map->elements_kind() == to_kind) return to_map;
+ if (to_map.elements_kind() == to_kind) return to_map;
return Map();
}
@@ -1314,24 +1296,24 @@ Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
ElementsKind from_kind = map->elements_kind();
if (from_kind == to_kind) return map;
- Context native_context = isolate->context()->native_context();
+ Context native_context = isolate->context().native_context();
if (from_kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
- if (*map == native_context->fast_aliased_arguments_map()) {
+ if (*map == native_context.fast_aliased_arguments_map()) {
DCHECK_EQ(SLOW_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
- return handle(native_context->slow_aliased_arguments_map(), isolate);
+ return handle(native_context.slow_aliased_arguments_map(), isolate);
}
} else if (from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS) {
- if (*map == native_context->slow_aliased_arguments_map()) {
+ if (*map == native_context.slow_aliased_arguments_map()) {
DCHECK_EQ(FAST_SLOPPY_ARGUMENTS_ELEMENTS, to_kind);
- return handle(native_context->fast_aliased_arguments_map(), isolate);
+ return handle(native_context.fast_aliased_arguments_map(), isolate);
}
} else if (IsFastElementsKind(from_kind) && IsFastElementsKind(to_kind)) {
// Reuse map transitions for JSArrays.
DisallowHeapAllocation no_gc;
- if (native_context->GetInitialJSArrayMap(from_kind) == *map) {
+ if (native_context.GetInitialJSArrayMap(from_kind) == *map) {
Object maybe_transitioned_map =
- native_context->get(Context::ArrayMapIndex(to_kind));
- if (maybe_transitioned_map->IsMap()) {
+ native_context.get(Context::ArrayMapIndex(to_kind));
+ if (maybe_transitioned_map.IsMap()) {
return handle(Map::cast(maybe_transitioned_map), isolate);
}
}
@@ -1341,8 +1323,8 @@ Handle<Map> Map::TransitionElementsTo(Isolate* isolate, Handle<Map> map,
// Check if we can go back in the elements kind transition chain.
if (IsHoleyElementsKind(from_kind) &&
to_kind == GetPackedElementsKind(from_kind) &&
- map->GetBackPointer()->IsMap() &&
- Map::cast(map->GetBackPointer())->elements_kind() == to_kind) {
+ map->GetBackPointer().IsMap() &&
+ Map::cast(map->GetBackPointer()).elements_kind() == to_kind) {
return handle(Map::cast(map->GetBackPointer()), isolate);
}
@@ -1410,8 +1392,8 @@ int Map::NumberOfEnumerableProperties() const {
DescriptorArray descs = instance_descriptors();
int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
- if ((descs->GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
- !descs->GetKey(i)->FilterKey(ENUMERABLE_STRINGS)) {
+ if ((descs.GetDetails(i).attributes() & ONLY_ENUMERABLE) == 0 &&
+ !descs.GetKey(i).FilterKey(ENUMERABLE_STRINGS)) {
result++;
}
}
@@ -1423,7 +1405,7 @@ int Map::NextFreePropertyIndex() const {
int number_of_own_descriptors = NumberOfOwnDescriptors();
DescriptorArray descs = instance_descriptors();
for (int i = 0; i < number_of_own_descriptors; i++) {
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
int candidate = details.field_index() + details.field_width_in_words();
if (candidate > free_index) free_index = candidate;
@@ -1448,20 +1430,20 @@ bool Map::DictionaryElementsInPrototypeChainOnly(Isolate* isolate) {
for (PrototypeIterator iter(isolate, *this); !iter.IsAtEnd();
iter.Advance()) {
// Be conservative, don't walk into proxies.
- if (iter.GetCurrent()->IsJSProxy()) return true;
+ if (iter.GetCurrent().IsJSProxy()) return true;
// String wrappers have non-configurable, non-writable elements.
- if (iter.GetCurrent()->IsStringWrapper()) return true;
+ if (iter.GetCurrent().IsStringWrapper()) return true;
JSObject current = iter.GetCurrent<JSObject>();
- if (current->HasDictionaryElements() &&
- current->element_dictionary()->requires_slow_elements()) {
+ if (current.HasDictionaryElements() &&
+ current.element_dictionary().requires_slow_elements()) {
return true;
}
- if (current->HasSlowArgumentsElements()) {
- FixedArray parameter_map = FixedArray::cast(current->elements());
- Object arguments = parameter_map->get(1);
- if (NumberDictionary::cast(arguments)->requires_slow_elements()) {
+ if (current.HasSlowArgumentsElements()) {
+ FixedArray parameter_map = FixedArray::cast(current.elements());
+ Object arguments = parameter_map.get(1);
+ if (NumberDictionary::cast(arguments).requires_slow_elements()) {
return true;
}
}
@@ -1486,6 +1468,7 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
new_bit_field3 =
EnumLengthBits::update(new_bit_field3, kInvalidEnumCacheSentinel);
new_bit_field3 = IsDeprecatedBit::update(new_bit_field3, false);
+ new_bit_field3 = IsInRetainedMapListBit::update(new_bit_field3, false);
if (!map->is_dictionary_map()) {
new_bit_field3 = IsUnstableBit::update(new_bit_field3, false);
}
@@ -1517,26 +1500,29 @@ Handle<Map> Map::Normalize(Isolate* isolate, Handle<Map> fast_map,
// applied to the shared map, dependent code and weak cell cache.
Handle<Map> fresh = Map::CopyNormalized(isolate, fast_map, mode);
+ STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
+ Map::kDependentCodeOffset + kTaggedSize);
+ DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
+ reinterpret_cast<void*>(new_map->address()),
+ Map::kBitField3Offset));
+ // The IsInRetainedMapListBit might be different if the {new_map}
+ // that we got from the {cache} was already embedded into optimized
+ // code somewhere.
+ DCHECK_EQ(fresh->bit_field3() & ~IsInRetainedMapListBit::kMask,
+ new_map->bit_field3() & ~IsInRetainedMapListBit::kMask);
+ int offset = Map::kBitField3Offset + kInt32Size;
+ DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
+ reinterpret_cast<void*>(new_map->address() + offset),
+ Map::kDependentCodeOffset - offset));
+ offset = Map::kPrototypeValidityCellOffset + kTaggedSize;
if (new_map->is_prototype_map()) {
// For prototype maps, the PrototypeInfo is not copied.
- DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
- reinterpret_cast<void*>(new_map->address()),
- kTransitionsOrPrototypeInfoOffset));
+ STATIC_ASSERT(Map::kTransitionsOrPrototypeInfoOffset ==
+ Map::kPrototypeValidityCellOffset + kTaggedSize);
+ offset = kTransitionsOrPrototypeInfoOffset + kTaggedSize;
DCHECK_EQ(fresh->raw_transitions(),
MaybeObject::FromObject(Smi::kZero));
- STATIC_ASSERT(kDescriptorsOffset ==
- kTransitionsOrPrototypeInfoOffset + kTaggedSize);
- DCHECK_EQ(0, memcmp(fresh->RawField(kDescriptorsOffset).ToVoidPtr(),
- new_map->RawField(kDescriptorsOffset).ToVoidPtr(),
- kDependentCodeOffset - kDescriptorsOffset));
- } else {
- DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address()),
- reinterpret_cast<void*>(new_map->address()),
- Map::kDependentCodeOffset));
}
- STATIC_ASSERT(Map::kPrototypeValidityCellOffset ==
- Map::kDependentCodeOffset + kTaggedSize);
- int offset = Map::kPrototypeValidityCellOffset + kTaggedSize;
DCHECK_EQ(0, memcmp(reinterpret_cast<void*>(fresh->address() + offset),
reinterpret_cast<void*>(new_map->address() + offset),
Map::kSize - offset));
@@ -1600,8 +1586,8 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
// Function's initial map is a sloppy function map. Same holds for
// GeneratorFunction / AsyncFunction and its initial map.
Object constructor = map->GetConstructor();
- DCHECK(constructor->IsJSFunction());
- DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
+ DCHECK(constructor.IsJSFunction());
+ DCHECK(*map == JSFunction::cast(constructor).initial_map() ||
*map == *isolate->strict_function_map() ||
*map == *isolate->strict_function_with_name_map() ||
*map == *isolate->generator_function_map() ||
@@ -1617,7 +1603,7 @@ void EnsureInitialMap(Isolate* isolate, Handle<Map> map) {
// does not contain descriptors that do not belong to the map.
DCHECK(map->owns_descriptors());
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors()->number_of_descriptors());
+ map->instance_descriptors().number_of_descriptors());
}
} // namespace
@@ -1673,7 +1659,7 @@ Handle<Map> Map::ShareDescriptor(Isolate* isolate, Handle<Map> map,
// array, implying that its NumberOfOwnDescriptors equals the number of
// descriptors in the descriptor array.
DCHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors()->number_of_descriptors());
+ map->instance_descriptors().number_of_descriptors());
Handle<Map> result = CopyDropDescriptors(isolate, map);
Handle<Name> name = descriptor->GetKey();
@@ -1720,24 +1706,14 @@ void Map::ConnectTransition(Isolate* isolate, Handle<Map> parent,
child->may_have_interesting_symbols());
DCHECK_IMPLIES(parent->may_have_interesting_symbols(),
child->may_have_interesting_symbols());
- // Do not track transitions during bootstrap except for element transitions.
- if (isolate->bootstrapper()->IsActive() &&
- !name.is_identical_to(isolate->factory()->elements_transition_symbol())) {
- if (FLAG_trace_maps) {
- LOG(isolate,
- MapEvent("Transition", *parent, *child,
- child->is_prototype_map() ? "prototype" : "", *name));
- }
- return;
- }
- if (!parent->GetBackPointer()->IsUndefined(isolate)) {
+ if (!parent->GetBackPointer().IsUndefined(isolate)) {
parent->set_owns_descriptors(false);
} else {
// |parent| is initial map and it must keep the ownership, there must be no
// descriptors in the descriptors array that do not belong to the map.
DCHECK(parent->owns_descriptors());
DCHECK_EQ(parent->NumberOfOwnDescriptors(),
- parent->instance_descriptors()->number_of_descriptors());
+ parent->instance_descriptors().number_of_descriptors());
}
if (parent->is_prototype_map()) {
DCHECK(child->is_prototype_map());
@@ -1864,7 +1840,7 @@ void Map::InstallDescriptors(Isolate* isolate, Handle<Map> parent,
#ifdef VERIFY_HEAP
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
- CHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
+ CHECK(child->layout_descriptor().IsConsistentWithMap(*child));
}
#else
SLOW_DCHECK(child->layout_descriptor()->IsConsistentWithMap(*child));
@@ -1891,14 +1867,14 @@ Handle<Map> Map::CopyAsElementsKind(Isolate* isolate, Handle<Map> map,
Map maybe_elements_transition_map;
if (flag == INSERT_TRANSITION) {
// Ensure we are requested to add elements kind transition "near the root".
- DCHECK_EQ(map->FindRootMap(isolate)->NumberOfOwnDescriptors(),
+ DCHECK_EQ(map->FindRootMap(isolate).NumberOfOwnDescriptors(),
map->NumberOfOwnDescriptors());
maybe_elements_transition_map = map->ElementsTransitionMap();
- DCHECK(maybe_elements_transition_map.is_null() ||
- (maybe_elements_transition_map->elements_kind() ==
- DICTIONARY_ELEMENTS &&
- kind == DICTIONARY_ELEMENTS));
+ DCHECK(
+ maybe_elements_transition_map.is_null() ||
+ (maybe_elements_transition_map.elements_kind() == DICTIONARY_ELEMENTS &&
+ kind == DICTIONARY_ELEMENTS));
DCHECK(!IsFastElementsKind(kind) ||
IsMoreGeneralElementsKindTransition(map->elements_kind(), kind));
DCHECK(kind != map->elements_kind());
@@ -2025,25 +2001,29 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
return copy;
}
-Handle<Map> Map::CopyForPreventExtensions(Isolate* isolate, Handle<Map> map,
- PropertyAttributes attrs_to_add,
- Handle<Symbol> transition_marker,
- const char* reason) {
+Handle<Map> Map::CopyForPreventExtensions(
+ Isolate* isolate, Handle<Map> map, PropertyAttributes attrs_to_add,
+ Handle<Symbol> transition_marker, const char* reason,
+ bool old_map_is_dictionary_elements_kind) {
int num_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
isolate, handle(map->instance_descriptors(), isolate), num_descriptors,
attrs_to_add);
Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
isolate);
+ // Do not track transitions during bootstrapping.
+ TransitionFlag flag =
+ isolate->bootstrapper()->IsActive() ? OMIT_TRANSITION : INSERT_TRANSITION;
Handle<Map> new_map = CopyReplaceDescriptors(
- isolate, map, new_desc, new_layout_descriptor, INSERT_TRANSITION,
- transition_marker, reason, SPECIAL_TRANSITION);
+ isolate, map, new_desc, new_layout_descriptor, flag, transition_marker,
+ reason, SPECIAL_TRANSITION);
new_map->set_is_extensible(false);
- if (!IsFixedTypedArrayElementsKind(map->elements_kind())) {
+ if (!IsTypedArrayElementsKind(map->elements_kind())) {
ElementsKind new_kind = IsStringWrapperElementsKind(map->elements_kind())
? SLOW_STRING_WRAPPER_ELEMENTS
: DICTIONARY_ELEMENTS;
- if (FLAG_enable_sealed_frozen_elements_kind) {
+ if (FLAG_enable_sealed_frozen_elements_kind &&
+ !old_map_is_dictionary_elements_kind) {
switch (map->elements_kind()) {
case PACKED_ELEMENTS:
if (attrs_to_add == SEALED) {
@@ -2057,6 +2037,18 @@ Handle<Map> Map::CopyForPreventExtensions(Isolate* isolate, Handle<Map> map,
new_kind = PACKED_FROZEN_ELEMENTS;
}
break;
+ case HOLEY_ELEMENTS:
+ if (attrs_to_add == SEALED) {
+ new_kind = HOLEY_SEALED_ELEMENTS;
+ } else if (attrs_to_add == FROZEN) {
+ new_kind = HOLEY_FROZEN_ELEMENTS;
+ }
+ break;
+ case HOLEY_SEALED_ELEMENTS:
+ if (attrs_to_add == FROZEN) {
+ new_kind = HOLEY_FROZEN_ELEMENTS;
+ }
+ break;
default:
break;
}
@@ -2070,12 +2062,12 @@ namespace {
bool CanHoldValue(DescriptorArray descriptors, int descriptor,
PropertyConstness constness, Object value) {
- PropertyDetails details = descriptors->GetDetails(descriptor);
+ PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.location() == kField) {
if (details.kind() == kData) {
return IsGeneralizableTo(constness, details.constness()) &&
- value->FitsRepresentation(details.representation()) &&
- descriptors->GetFieldType(descriptor)->NowContains(value);
+ value.FitsRepresentation(details.representation()) &&
+ descriptors.GetFieldType(descriptor).NowContains(value);
} else {
DCHECK_EQ(kAccessor, details.kind());
return false;
@@ -2084,15 +2076,8 @@ bool CanHoldValue(DescriptorArray descriptors, int descriptor,
} else {
DCHECK_EQ(kDescriptor, details.location());
DCHECK_EQ(PropertyConstness::kConst, details.constness());
- if (details.kind() == kData) {
- DCHECK(!FLAG_track_constant_fields);
- DCHECK(descriptors->GetStrongValue(descriptor) != value ||
- value->FitsRepresentation(details.representation()));
- return descriptors->GetStrongValue(descriptor) == value;
- } else {
- DCHECK_EQ(kAccessor, details.kind());
- return false;
- }
+ DCHECK_EQ(kAccessor, details.kind());
+ return false;
}
UNREACHABLE();
}
@@ -2107,7 +2092,7 @@ Handle<Map> UpdateDescriptorForValue(Isolate* isolate, Handle<Map> map,
}
PropertyAttributes attributes =
- map->instance_descriptors()->GetDetails(descriptor).attributes();
+ map->instance_descriptors().GetDetails(descriptor).attributes();
Representation representation = value->OptimalRepresentation();
Handle<FieldType> type = value->OptimalType(isolate, representation);
@@ -2154,26 +2139,23 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> transition(maybe_transition, isolate);
int descriptor = transition->LastAdded();
- DCHECK_EQ(attributes, transition->instance_descriptors()
- ->GetDetails(descriptor)
- .attributes());
+ DCHECK_EQ(
+ attributes,
+ transition->instance_descriptors().GetDetails(descriptor).attributes());
return UpdateDescriptorForValue(isolate, transition, descriptor, constness,
value);
}
- TransitionFlag flag = INSERT_TRANSITION;
+ // Do not track transitions during bootstrapping.
+ TransitionFlag flag =
+ isolate->bootstrapper()->IsActive() ? OMIT_TRANSITION : INSERT_TRANSITION;
MaybeHandle<Map> maybe_map;
if (!map->TooManyFastProperties(store_origin)) {
- if (!FLAG_track_constant_fields && value->IsJSFunction()) {
- maybe_map =
- Map::CopyWithConstant(isolate, map, name, value, attributes, flag);
- } else {
- Representation representation = value->OptimalRepresentation();
- Handle<FieldType> type = value->OptimalType(isolate, representation);
- maybe_map = Map::CopyWithField(isolate, map, name, type, attributes,
- constness, representation, flag);
- }
+ Representation representation = value->OptimalRepresentation();
+ Handle<FieldType> type = value->OptimalType(isolate, representation);
+ maybe_map = Map::CopyWithField(isolate, map, name, type, attributes,
+ constness, representation, flag);
}
Handle<Map> result;
@@ -2185,18 +2167,18 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
ScopedVector<char> name_buffer(100);
name->NameShortPrint(name_buffer);
buffer.reset(new ScopedVector<char>(128));
- SNPrintF(*buffer, "TooManyFastProperties %s", name_buffer.start());
- reason = buffer->start();
+ SNPrintF(*buffer, "TooManyFastProperties %s", name_buffer.begin());
+ reason = buffer->begin();
}
#endif
Handle<Object> maybe_constructor(map->GetConstructor(), isolate);
if (FLAG_feedback_normalization && map->new_target_is_base() &&
maybe_constructor->IsJSFunction() &&
- !JSFunction::cast(*maybe_constructor)->shared()->native()) {
+ !JSFunction::cast(*maybe_constructor).shared().native()) {
Handle<JSFunction> constructor =
Handle<JSFunction>::cast(maybe_constructor);
DCHECK_NE(*constructor,
- constructor->context()->native_context()->object_function());
+ constructor->context().native_context().object_function());
Handle<Map> initial_map(constructor->initial_map(), isolate);
result = Map::Normalize(isolate, initial_map, CLEAR_INOBJECT_PROPERTIES,
reason);
@@ -2205,7 +2187,7 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
JSFunction::SetInitialMap(constructor, result, prototype);
// Deoptimize all code that embeds the previous initial map.
- initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
+ initial_map->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kInitialMapChangedGroup);
if (!result->EquivalentToForNormalization(*map,
CLEAR_INOBJECT_PROPERTIES)) {
@@ -2226,7 +2208,7 @@ Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
// Dictionaries have to be reconfigured in-place.
DCHECK(!map->is_dictionary_map());
- if (!map->GetBackPointer()->IsMap()) {
+ if (!map->GetBackPointer().IsMap()) {
// There is no benefit from reconstructing transition tree for maps without
// back pointers.
return CopyGeneralizeAllFields(isolate, map, map->elements_kind(),
@@ -2241,7 +2223,7 @@ Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
MapUpdater mu(isolate, map);
DCHECK_EQ(kData, kind); // Only kData case is supported so far.
Handle<Map> new_map = mu.ReconfigureToDataField(
- descriptor, attributes, kDefaultFieldConstness, Representation::None(),
+ descriptor, attributes, PropertyConstness::kConst, Representation::None(),
FieldType::None(isolate));
return new_map;
}
@@ -2277,12 +2259,12 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
Handle<Map> transition(maybe_transition, isolate);
DescriptorArray descriptors = transition->instance_descriptors();
int descriptor = transition->LastAdded();
- DCHECK(descriptors->GetKey(descriptor)->Equals(*name));
+ DCHECK(descriptors.GetKey(descriptor).Equals(*name));
- DCHECK_EQ(kAccessor, descriptors->GetDetails(descriptor).kind());
- DCHECK_EQ(attributes, descriptors->GetDetails(descriptor).attributes());
+ DCHECK_EQ(kAccessor, descriptors.GetDetails(descriptor).kind());
+ DCHECK_EQ(attributes, descriptors.GetDetails(descriptor).attributes());
- Handle<Object> maybe_pair(descriptors->GetStrongValue(descriptor), isolate);
+ Handle<Object> maybe_pair(descriptors.GetStrongValue(descriptor), isolate);
if (!maybe_pair->IsAccessorPair()) {
return Map::Normalize(isolate, map, mode,
"TransitionToAccessorFromNonPair");
@@ -2303,7 +2285,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
if (descriptor != map->LastAdded()) {
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonLast");
}
- PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
+ PropertyDetails old_details = old_descriptors.GetDetails(descriptor);
if (old_details.kind() != kAccessor) {
return Map::Normalize(isolate, map, mode,
"AccessorsOverwritingNonAccessors");
@@ -2313,7 +2295,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
return Map::Normalize(isolate, map, mode, "AccessorsWithAttributes");
}
- Handle<Object> maybe_pair(old_descriptors->GetStrongValue(descriptor),
+ Handle<Object> maybe_pair(old_descriptors.GetStrongValue(descriptor),
isolate);
if (!maybe_pair->IsAccessorPair()) {
return Map::Normalize(isolate, map, mode, "AccessorsOverwritingNonPair");
@@ -2324,12 +2306,12 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
bool overwriting_accessor = false;
if (!getter->IsNull(isolate) &&
- !current_pair->get(ACCESSOR_GETTER)->IsNull(isolate) &&
+ !current_pair->get(ACCESSOR_GETTER).IsNull(isolate) &&
current_pair->get(ACCESSOR_GETTER) != *getter) {
overwriting_accessor = true;
}
if (!setter->IsNull(isolate) &&
- !current_pair->get(ACCESSOR_SETTER)->IsNull(isolate) &&
+ !current_pair->get(ACCESSOR_SETTER).IsNull(isolate) &&
current_pair->get(ACCESSOR_SETTER) != *setter) {
overwriting_accessor = true;
}
@@ -2349,7 +2331,9 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
pair->SetComponents(*getter, *setter);
- TransitionFlag flag = INSERT_TRANSITION;
+ // Do not track transitions during bootstrapping.
+ TransitionFlag flag =
+ isolate->bootstrapper()->IsActive() ? OMIT_TRANSITION : INSERT_TRANSITION;
Descriptor d = Descriptor::AccessorConstant(name, pair, attributes);
return Map::CopyInsertDescriptor(isolate, map, &d, flag);
}
@@ -2361,7 +2345,7 @@ Handle<Map> Map::CopyAddDescriptor(Isolate* isolate, Handle<Map> map,
// Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
- !map->GetBackPointer()->IsUndefined(isolate) &&
+ !map->GetBackPointer().IsUndefined(isolate) &&
TransitionsAccessor(isolate, map).CanHaveMoreTransitions()) {
return ShareDescriptor(isolate, map, descriptors, descriptor);
}
@@ -2444,31 +2428,31 @@ int Map::Hash() {
namespace {
bool CheckEquivalent(const Map first, const Map second) {
- return first->GetConstructor() == second->GetConstructor() &&
- first->prototype() == second->prototype() &&
- first->instance_type() == second->instance_type() &&
- first->bit_field() == second->bit_field() &&
- first->is_extensible() == second->is_extensible() &&
- first->new_target_is_base() == second->new_target_is_base() &&
- first->has_hidden_prototype() == second->has_hidden_prototype();
+ return first.GetConstructor() == second.GetConstructor() &&
+ first.prototype() == second.prototype() &&
+ first.instance_type() == second.instance_type() &&
+ first.bit_field() == second.bit_field() &&
+ first.is_extensible() == second.is_extensible() &&
+ first.new_target_is_base() == second.new_target_is_base() &&
+ first.has_hidden_prototype() == second.has_hidden_prototype();
}
} // namespace
bool Map::EquivalentToForTransition(const Map other) const {
- CHECK_EQ(GetConstructor(), other->GetConstructor());
- CHECK_EQ(instance_type(), other->instance_type());
- CHECK_EQ(has_hidden_prototype(), other->has_hidden_prototype());
+ CHECK_EQ(GetConstructor(), other.GetConstructor());
+ CHECK_EQ(instance_type(), other.instance_type());
+ CHECK_EQ(has_hidden_prototype(), other.has_hidden_prototype());
- if (bit_field() != other->bit_field()) return false;
- if (new_target_is_base() != other->new_target_is_base()) return false;
- if (prototype() != other->prototype()) return false;
+ if (bit_field() != other.bit_field()) return false;
+ if (new_target_is_base() != other.new_target_is_base()) return false;
+ if (prototype() != other.prototype()) return false;
if (instance_type() == JS_FUNCTION_TYPE) {
// JSFunctions require more checks to ensure that sloppy function is
// not equivalent to strict function.
- int nof = Min(NumberOfOwnDescriptors(), other->NumberOfOwnDescriptors());
- return instance_descriptors()->IsEqualUpTo(other->instance_descriptors(),
- nof);
+ int nof = Min(NumberOfOwnDescriptors(), other.NumberOfOwnDescriptors());
+ return instance_descriptors().IsEqualUpTo(other.instance_descriptors(),
+ nof);
}
return true;
}
@@ -2482,10 +2466,10 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
DescriptorArray descriptors = instance_descriptors();
int nof = NumberOfOwnDescriptors();
for (int i = 0; i < nof; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails details = descriptors.GetDetails(i);
if (details.location() == kField) {
DCHECK(IsMostGeneralFieldType(details.representation(),
- descriptors->GetFieldType(i)));
+ descriptors.GetFieldType(i)));
}
}
#endif
@@ -2495,15 +2479,15 @@ bool Map::EquivalentToForElementsKindTransition(const Map other) const {
bool Map::EquivalentToForNormalization(const Map other,
PropertyNormalizationMode mode) const {
int properties =
- mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other->GetInObjectProperties();
- return CheckEquivalent(*this, other) && bit_field2() == other->bit_field2() &&
+ mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other.GetInObjectProperties();
+ return CheckEquivalent(*this, other) && bit_field2() == other.bit_field2() &&
GetInObjectProperties() == properties &&
JSObject::GetEmbedderFieldCount(*this) ==
JSObject::GetEmbedderFieldCount(other);
}
static void GetMinInobjectSlack(Map map, void* data) {
- int slack = map->UnusedPropertyFields();
+ int slack = map.UnusedPropertyFields();
if (*reinterpret_cast<int*>(data) > slack) {
*reinterpret_cast<int*>(data) = slack;
}
@@ -2512,7 +2496,7 @@ static void GetMinInobjectSlack(Map map, void* data) {
int Map::ComputeMinObjectSlack(Isolate* isolate) {
DisallowHeapAllocation no_gc;
// Has to be an initial map.
- DCHECK(GetBackPointer()->IsUndefined(isolate));
+ DCHECK(GetBackPointer().IsUndefined(isolate));
int slack = UnusedPropertyFields();
TransitionsAccessor transitions(isolate, *this, &no_gc);
@@ -2525,22 +2509,22 @@ static void ShrinkInstanceSize(Map map, void* data) {
DCHECK_GE(slack, 0);
#ifdef DEBUG
int old_visitor_id = Map::GetVisitorId(map);
- int new_unused = map->UnusedPropertyFields() - slack;
+ int new_unused = map.UnusedPropertyFields() - slack;
#endif
- map->set_instance_size(map->InstanceSizeFromSlack(slack));
- map->set_construction_counter(Map::kNoSlackTracking);
+ map.set_instance_size(map.InstanceSizeFromSlack(slack));
+ map.set_construction_counter(Map::kNoSlackTracking);
DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
- DCHECK_EQ(new_unused, map->UnusedPropertyFields());
+ DCHECK_EQ(new_unused, map.UnusedPropertyFields());
}
static void StopSlackTracking(Map map, void* data) {
- map->set_construction_counter(Map::kNoSlackTracking);
+ map.set_construction_counter(Map::kNoSlackTracking);
}
void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
DisallowHeapAllocation no_gc;
// Has to be an initial map.
- DCHECK(GetBackPointer()->IsUndefined(isolate));
+ DCHECK(GetBackPointer().IsUndefined(isolate));
int slack = ComputeMinObjectSlack(isolate);
TransitionsAccessor transitions(isolate, *this, &no_gc);
@@ -2563,12 +2547,12 @@ void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
// static
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
Isolate* isolate) {
- Object maybe_proto_info = prototype->map()->prototype_info();
- if (maybe_proto_info->IsPrototypeInfo()) {
+ Object maybe_proto_info = prototype->map().prototype_info();
+ if (maybe_proto_info.IsPrototypeInfo()) {
return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
}
Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
- prototype->map()->set_prototype_info(*proto_info);
+ prototype->map().set_prototype_info(*proto_info);
return proto_info;
}
@@ -2576,7 +2560,7 @@ Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<JSObject> prototype,
Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
Isolate* isolate) {
Object maybe_proto_info = prototype_map->prototype_info();
- if (maybe_proto_info->IsPrototypeInfo()) {
+ if (maybe_proto_info.IsPrototypeInfo()) {
return handle(PrototypeInfo::cast(maybe_proto_info), isolate);
}
Handle<PrototypeInfo> proto_info = isolate->factory()->NewPrototypeInfo();
@@ -2587,7 +2571,7 @@ Handle<PrototypeInfo> Map::GetOrCreatePrototypeInfo(Handle<Map> prototype_map,
// static
void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
Isolate* isolate) {
- if (value == false && !map->prototype_info()->IsPrototypeInfo()) {
+ if (value == false && !map->prototype_info().IsPrototypeInfo()) {
// "False" is the implicit default value, so there's nothing to do.
return;
}
@@ -2605,7 +2589,7 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
maybe_prototype = isolate->global_object();
} else {
maybe_prototype =
- handle(map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
+ handle(map->GetPrototypeChainRootMap(isolate).prototype(), isolate);
}
if (!maybe_prototype->IsJSObject()) {
return handle(Smi::FromInt(Map::kPrototypeChainValid), isolate);
@@ -2616,9 +2600,9 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
JSObject::LazyRegisterPrototypeUser(handle(prototype->map(), isolate),
isolate);
- Object maybe_cell = prototype->map()->prototype_validity_cell();
+ Object maybe_cell = prototype->map().prototype_validity_cell();
// Return existing cell if it's still valid.
- if (maybe_cell->IsCell()) {
+ if (maybe_cell.IsCell()) {
Handle<Cell> cell(Cell::cast(maybe_cell), isolate);
if (cell->value() == Smi::FromInt(Map::kPrototypeChainValid)) {
return cell;
@@ -2627,17 +2611,17 @@ Handle<Object> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
// Otherwise create a new cell.
Handle<Cell> cell = isolate->factory()->NewCell(
handle(Smi::FromInt(Map::kPrototypeChainValid), isolate));
- prototype->map()->set_prototype_validity_cell(*cell);
+ prototype->map().set_prototype_validity_cell(*cell);
return cell;
}
// static
bool Map::IsPrototypeChainInvalidated(Map map) {
- DCHECK(map->is_prototype_map());
- Object maybe_cell = map->prototype_validity_cell();
- if (maybe_cell->IsCell()) {
+ DCHECK(map.is_prototype_map());
+ Object maybe_cell = map.prototype_validity_cell();
+ if (maybe_cell.IsCell()) {
Cell cell = Cell::cast(maybe_cell);
- return cell->value() != Smi::FromInt(Map::kPrototypeChainValid);
+ return cell.value() != Smi::FromInt(Map::kPrototypeChainValid);
}
return true;
}
@@ -2649,27 +2633,13 @@ void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
RuntimeCallTimerScope stats_scope(isolate, *map,
RuntimeCallCounterId::kMap_SetPrototype);
- bool is_hidden = false;
if (prototype->IsJSObject()) {
Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
JSObject::OptimizeAsPrototype(prototype_jsobj, enable_prototype_setup_mode);
-
- Object maybe_constructor = prototype_jsobj->map()->GetConstructor();
- if (maybe_constructor->IsJSFunction()) {
- JSFunction constructor = JSFunction::cast(maybe_constructor);
- Object data = constructor->shared()->function_data();
- is_hidden = (data->IsFunctionTemplateInfo() &&
- FunctionTemplateInfo::cast(data)->hidden_prototype()) ||
- prototype->IsJSGlobalObject();
- } else if (maybe_constructor->IsFunctionTemplateInfo()) {
- is_hidden =
- FunctionTemplateInfo::cast(maybe_constructor)->hidden_prototype() ||
- prototype->IsJSGlobalObject();
- }
} else {
DCHECK(prototype->IsNull(isolate) || prototype->IsJSProxy());
}
- map->set_has_hidden_prototype(is_hidden);
+ map->set_has_hidden_prototype(prototype->IsJSGlobalObject());
WriteBarrierMode wb_mode =
prototype->IsNull(isolate) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
@@ -2711,7 +2681,7 @@ MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map,
}
Map normalized_map = Map::cast(heap_object);
- if (!normalized_map->EquivalentToForNormalization(*fast_map, mode)) {
+ if (!normalized_map.EquivalentToForNormalization(*fast_map, mode)) {
return MaybeHandle<Map>();
}
return handle(normalized_map, GetIsolate());
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 96c09e1664..814f8ed3be 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -5,10 +5,11 @@
#ifndef V8_OBJECTS_MAP_H_
#define V8_OBJECTS_MAP_H_
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
#include "src/objects/code.h"
#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -41,8 +42,6 @@ enum InstanceType : uint16_t;
V(FeedbackCell) \
V(FeedbackVector) \
V(FixedArray) \
- V(FixedFloat64Array) \
- V(FixedTypedArrayBase) \
V(FreeSpace) \
V(JSApiObject) \
V(JSArrayBuffer) \
@@ -72,6 +71,7 @@ enum InstanceType : uint16_t;
V(TransitionArray) \
V(UncompiledDataWithoutPreparseData) \
V(UncompiledDataWithPreparseData) \
+ V(WasmCapiFunctionData) \
V(WasmInstanceObject) \
V(WeakArray) \
V(WeakCell)
@@ -140,7 +140,7 @@ using MapHandles = std::vector<Handle<Map>>;
// | Byte | [bit_field2] |
// | | - is_extensible (bit 0) |
// | | - is_prototype_map (bit 1) |
-// | | - is_in_retained_map_list (bit 2) |
+// | | - has_hidden_prototype (bit 2) |
// | | - elements_kind (bits 3..7) |
// +----+----------+---------------------------------------------+
// | Int | [bit_field3] |
@@ -148,7 +148,7 @@ using MapHandles = std::vector<Handle<Map>>;
// | | - number_of_own_descriptors (bit 10..19) |
// | | - is_dictionary_map (bit 20) |
// | | - owns_descriptors (bit 21) |
-// | | - has_hidden_prototype (bit 22) |
+// | | - is_in_retained_map_list (bit 22) |
// | | - is_deprecated (bit 23) |
// | | - is_unstable (bit 24) |
// | | - is_migration_target (bit 25) |
@@ -165,11 +165,6 @@ using MapHandles = std::vector<Handle<Map>>;
// +---------------+---------------------------------------------+
// | TaggedPointer | [constructor_or_backpointer] |
// +---------------+---------------------------------------------+
-// | TaggedPointer | If Map is a prototype map: |
-// | | [prototype_info] |
-// | | Else: |
-// | | [raw_transitions] |
-// +---------------+---------------------------------------------+
// | TaggedPointer | [instance_descriptors] |
// +*************************************************************+
// ! TaggedPointer ! [layout_descriptors] !
@@ -179,6 +174,13 @@ using MapHandles = std::vector<Handle<Map>>;
// +*************************************************************+
// | TaggedPointer | [dependent_code] |
// +---------------+---------------------------------------------+
+// | TaggedPointer | [prototype_validity_cell] |
+// +---------------+---------------------------------------------+
+// | TaggedPointer | If Map is a prototype map: |
+// | | [prototype_info] |
+// | | Else: |
+// | | [raw_transitions] |
+// +---------------+---------------------------------------------+
class Map : public HeapObject {
public:
@@ -263,10 +265,10 @@ class Map : public HeapObject {
DECL_PRIMITIVE_ACCESSORS(bit_field2, byte)
// Bit positions for |bit_field2|.
-#define MAP_BIT_FIELD2_FIELDS(V, _) \
- V(IsExtensibleBit, bool, 1, _) \
- V(IsPrototypeMapBit, bool, 1, _) \
- V(IsInRetainedMapListBit, bool, 1, _) \
+#define MAP_BIT_FIELD2_FIELDS(V, _) \
+ V(IsExtensibleBit, bool, 1, _) \
+ V(IsPrototypeMapBit, bool, 1, _) \
+ V(HasHiddenPrototypeBit, bool, 1, _) \
V(ElementsKindBits, ElementsKind, 5, _)
DEFINE_BIT_FIELDS(MAP_BIT_FIELD2_FIELDS)
@@ -287,7 +289,7 @@ class Map : public HeapObject {
V(NumberOfOwnDescriptorsBits, int, kDescriptorIndexBitCount, _) \
V(IsDictionaryMapBit, bool, 1, _) \
V(OwnsDescriptorsBit, bool, 1, _) \
- V(HasHiddenPrototypeBit, bool, 1, _) \
+ V(IsInRetainedMapListBit, bool, 1, _) \
V(IsDeprecatedBit, bool, 1, _) \
V(IsUnstableBit, bool, 1, _) \
V(IsMigrationTargetBit, bool, 1, _) \
@@ -419,7 +421,7 @@ class Map : public HeapObject {
inline bool has_sloppy_arguments_elements() const;
inline bool has_fast_sloppy_arguments_elements() const;
inline bool has_fast_string_wrapper_elements() const;
- inline bool has_fixed_typed_array_elements() const;
+ inline bool has_typed_array_elements() const;
inline bool has_dictionary_elements() const;
inline bool has_frozen_or_sealed_elements() const;
inline bool has_sealed_elements() const;
@@ -518,17 +520,16 @@ class Map : public HeapObject {
static inline bool IsMostGeneralFieldType(Representation representation,
FieldType field_type);
- // Generalizes constness, representation and field_type if objects with given
- // instance type can have fast elements that can be transitioned by stubs or
- // optimized code to more general elements kind.
+ // Generalizes representation and field_type if objects with given
+ // instance type can have fast elements that can be transitioned by
+ // stubs or optimized code to more general elements kind.
// This generalization is necessary in order to ensure that elements kind
// transitions performed by stubs / optimized code don't silently transition
- // PropertyConstness::kMutable fields back to VariableMode::kConst state or
+ // fields with representation "Tagged" back to "Smi" or "HeapObject" or
// fields with HeapObject representation and "Any" type back to "Class" type.
static inline void GeneralizeIfCanHaveTransitionableFastElementsKind(
Isolate* isolate, InstanceType instance_type,
- PropertyConstness* constness, Representation* representation,
- Handle<FieldType>* field_type);
+ Representation* representation, Handle<FieldType>* field_type);
V8_EXPORT_PRIVATE static Handle<Map> ReconfigureProperty(
Isolate* isolate, Handle<Map> map, int modify_index,
@@ -724,9 +725,8 @@ class Map : public HeapObject {
V8_EXPORT_PRIVATE static Handle<Map> CopyForPreventExtensions(
Isolate* isolate, Handle<Map> map, PropertyAttributes attrs_to_add,
- Handle<Symbol> transition_marker, const char* reason);
-
- static Handle<Map> FixProxy(Handle<Map> map, InstanceType type, int size);
+ Handle<Symbol> transition_marker, const char* reason,
+ bool old_map_is_dictionary_elements_kind = false);
// Maximal number of fast properties. Used to restrict the number of map
// transitions to avoid an explosion in the number of maps for objects used as
@@ -829,34 +829,8 @@ class Map : public HeapObject {
static const int kMaxPreAllocatedPropertyFields = 255;
- // Layout description.
-#define MAP_FIELDS(V) \
- /* Raw data fields. */ \
- V(kInstanceSizeInWordsOffset, kUInt8Size) \
- V(kInObjectPropertiesStartOrConstructorFunctionIndexOffset, kUInt8Size) \
- V(kUsedOrUnusedInstanceSizeInWordsOffset, kUInt8Size) \
- V(kVisitorIdOffset, kUInt8Size) \
- V(kInstanceTypeOffset, kUInt16Size) \
- V(kBitFieldOffset, kUInt8Size) \
- V(kBitField2Offset, kUInt8Size) \
- V(kBitField3Offset, kUInt32Size) \
- /* Adds padding to make tagged fields kTaggedSize-aligned. */ \
- V(kOptionalPaddingOffset, OBJECT_POINTER_PADDING(kOptionalPaddingOffset)) \
- /* Pointer fields. */ \
- V(kPointerFieldsBeginOffset, 0) \
- V(kPrototypeOffset, kTaggedSize) \
- V(kConstructorOrBackPointerOffset, kTaggedSize) \
- V(kTransitionsOrPrototypeInfoOffset, kTaggedSize) \
- V(kDescriptorsOffset, kTaggedSize) \
- V(kLayoutDescriptorOffset, FLAG_unbox_double_fields ? kTaggedSize : 0) \
- V(kDependentCodeOffset, kTaggedSize) \
- V(kPrototypeValidityCellOffset, kTaggedSize) \
- V(kPointerFieldsEndOffset, 0) \
- /* Total size. */ \
- V(kSize, 0)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
-#undef MAP_FIELDS
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_MAP_FIELDS)
STATIC_ASSERT(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
@@ -987,6 +961,7 @@ class Map : public HeapObject {
Isolate* isolate, FILE* file, const char* reason, int modify_index,
int split, int descriptors, bool constant_to_field,
Representation old_representation, Representation new_representation,
+ PropertyConstness old_constness, PropertyConstness new_constness,
MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value);
diff --git a/deps/v8/src/objects/maybe-object-inl.h b/deps/v8/src/objects/maybe-object-inl.h
index 72c328d29b..02f5b485ce 100644
--- a/deps/v8/src/objects/maybe-object-inl.h
+++ b/deps/v8/src/objects/maybe-object-inl.h
@@ -8,109 +8,54 @@
#include "src/objects/maybe-object.h"
#ifdef V8_COMPRESS_POINTERS
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#endif
-#include "src/objects/heap-object-inl.h"
-#include "src/objects/slots-inl.h"
#include "src/objects/smi-inl.h"
+#include "src/objects/tagged-impl-inl.h"
namespace v8 {
namespace internal {
-bool MaybeObject::ToSmi(Smi* value) {
- if (HAS_SMI_TAG(ptr_)) {
- *value = Smi::cast(Object(ptr_));
- return true;
- }
- return false;
-}
-
-Smi MaybeObject::ToSmi() const {
- DCHECK(HAS_SMI_TAG(ptr_));
- return Smi::cast(Object(ptr_));
-}
-
-bool MaybeObject::IsStrongOrWeak() const {
- if (IsSmi() || IsCleared()) {
- return false;
- }
- return true;
-}
-
-bool MaybeObject::GetHeapObject(HeapObject* result) const {
- if (IsSmi() || IsCleared()) {
- return false;
- }
- *result = GetHeapObject();
- return true;
-}
-
-bool MaybeObject::GetHeapObject(HeapObject* result,
- HeapObjectReferenceType* reference_type) const {
- if (IsSmi() || IsCleared()) {
- return false;
- }
- *reference_type = HasWeakHeapObjectTag(ptr_)
- ? HeapObjectReferenceType::WEAK
- : HeapObjectReferenceType::STRONG;
- *result = GetHeapObject();
- return true;
-}
-
-bool MaybeObject::IsStrong() const {
- return !HasWeakHeapObjectTag(ptr_) && !IsSmi();
-}
-
-bool MaybeObject::GetHeapObjectIfStrong(HeapObject* result) const {
- if (!HasWeakHeapObjectTag(ptr_) && !IsSmi()) {
- *result = HeapObject::cast(Object(ptr_));
- return true;
- }
- return false;
-}
+//
+// MaybeObject implementation.
+//
-HeapObject MaybeObject::GetHeapObjectAssumeStrong() const {
- DCHECK(IsStrong());
- return HeapObject::cast(Object(ptr_));
+// static
+MaybeObject MaybeObject::FromSmi(Smi smi) {
+ DCHECK(HAS_SMI_TAG(smi.ptr()));
+ return MaybeObject(smi.ptr());
}
-bool MaybeObject::IsWeak() const {
- return HasWeakHeapObjectTag(ptr_) && !IsCleared();
+// static
+MaybeObject MaybeObject::FromObject(Object object) {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(object.ptr()));
+ return MaybeObject(object.ptr());
}
-bool MaybeObject::IsWeakOrCleared() const { return HasWeakHeapObjectTag(ptr_); }
-
-bool MaybeObject::GetHeapObjectIfWeak(HeapObject* result) const {
- if (IsWeak()) {
- *result = GetHeapObject();
- return true;
- }
- return false;
+MaybeObject MaybeObject::MakeWeak(MaybeObject object) {
+ DCHECK(object.IsStrongOrWeak());
+ return MaybeObject(object.ptr() | kWeakHeapObjectMask);
}
-HeapObject MaybeObject::GetHeapObjectAssumeWeak() const {
- DCHECK(IsWeak());
- return GetHeapObject();
-}
+//
+// HeapObjectReference implementation.
+//
-HeapObject MaybeObject::GetHeapObject() const {
- DCHECK(!IsSmi());
- DCHECK(!IsCleared());
- return HeapObject::cast(Object(ptr_ & ~kWeakHeapObjectMask));
-}
+HeapObjectReference::HeapObjectReference(Object object)
+ : MaybeObject(object.ptr()) {}
-Object MaybeObject::GetHeapObjectOrSmi() const {
- if (IsSmi()) {
- return Object(ptr_);
- }
- return GetHeapObject();
+// static
+HeapObjectReference HeapObjectReference::Strong(Object object) {
+ DCHECK(!object.IsSmi());
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return HeapObjectReference(object);
}
-bool MaybeObject::IsObject() const { return IsSmi() || IsStrong(); }
-
-MaybeObject MaybeObject::MakeWeak(MaybeObject object) {
- DCHECK(object.IsStrongOrWeak());
- return MaybeObject(object.ptr_ | kWeakHeapObjectMask);
+// static
+HeapObjectReference HeapObjectReference::Weak(Object object) {
+ DCHECK(!object.IsSmi());
+ DCHECK(!HasWeakHeapObjectTag(object));
+ return HeapObjectReference(object.ptr() | kWeakHeapObjectMask);
}
// static
@@ -137,18 +82,18 @@ void HeapObjectReference::Update(THeapObjectSlot slot, HeapObject value) {
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
Address old_value = (*slot).ptr();
DCHECK(!HAS_SMI_TAG(old_value));
- Address new_value = value->ptr();
+ Address new_value = value.ptr();
DCHECK(Internals::HasHeapObjectTag(new_value));
#ifdef DEBUG
- bool weak_before = HasWeakHeapObjectTag(old_value);
+ bool weak_before = HAS_WEAK_HEAP_OBJECT_TAG(old_value);
#endif
slot.store(
HeapObjectReference(new_value | (old_value & kWeakHeapObjectMask)));
#ifdef DEBUG
- bool weak_after = HasWeakHeapObjectTag((*slot).ptr());
+ bool weak_after = HAS_WEAK_HEAP_OBJECT_TAG((*slot).ptr());
DCHECK_EQ(weak_before, weak_after);
#endif
}
diff --git a/deps/v8/src/objects/maybe-object.h b/deps/v8/src/objects/maybe-object.h
index e62099b2d5..a1645c0604 100644
--- a/deps/v8/src/objects/maybe-object.h
+++ b/deps/v8/src/objects/maybe-object.h
@@ -5,121 +5,31 @@
#ifndef V8_OBJECTS_MAYBE_OBJECT_H_
#define V8_OBJECTS_MAYBE_OBJECT_H_
-#include "include/v8-internal.h"
-#include "include/v8.h"
-#include "src/globals.h"
-#include "src/objects.h"
-#include "src/objects/smi.h"
+#include "src/objects/tagged-impl.h"
namespace v8 {
namespace internal {
-class HeapObject;
-class Isolate;
-class StringStream;
-
// A MaybeObject is either a SMI, a strong reference to a HeapObject, a weak
// reference to a HeapObject, or a cleared weak reference. It's used for
// implementing in-place weak references (see design doc: goo.gl/j6SdcK )
-class MaybeObject {
+class MaybeObject : public TaggedImpl<HeapObjectReferenceType::WEAK, Address> {
public:
- MaybeObject() : ptr_(kNullAddress) {}
- explicit MaybeObject(Address ptr) : ptr_(ptr) {}
-
- bool operator==(const MaybeObject& other) const { return ptr_ == other.ptr_; }
- bool operator!=(const MaybeObject& other) const { return ptr_ != other.ptr_; }
-
- Address ptr() const { return ptr_; }
-
- // Enable incremental transition of client code.
- MaybeObject* operator->() { return this; }
- const MaybeObject* operator->() const { return this; }
-
- bool IsSmi() const { return HAS_SMI_TAG(ptr_); }
- inline bool ToSmi(Smi* value);
- inline Smi ToSmi() const;
-
- bool IsCleared() const {
- return static_cast<uint32_t>(ptr_) == kClearedWeakHeapObjectLower32;
- }
-
- inline bool IsStrongOrWeak() const;
- inline bool IsStrong() const;
-
- // If this MaybeObject is a strong pointer to a HeapObject, returns true and
- // sets *result. Otherwise returns false.
- inline bool GetHeapObjectIfStrong(HeapObject* result) const;
-
- // DCHECKs that this MaybeObject is a strong pointer to a HeapObject and
- // returns the HeapObject.
- inline HeapObject GetHeapObjectAssumeStrong() const;
-
- inline bool IsWeak() const;
- inline bool IsWeakOrCleared() const;
-
- // If this MaybeObject is a weak pointer to a HeapObject, returns true and
- // sets *result. Otherwise returns false.
- inline bool GetHeapObjectIfWeak(HeapObject* result) const;
-
- // DCHECKs that this MaybeObject is a weak pointer to a HeapObject and
- // returns the HeapObject.
- inline HeapObject GetHeapObjectAssumeWeak() const;
+ constexpr MaybeObject() : TaggedImpl(kNullAddress) {}
+ constexpr explicit MaybeObject(Address ptr) : TaggedImpl(ptr) {}
- // If this MaybeObject is a strong or weak pointer to a HeapObject, returns
- // true and sets *result. Otherwise returns false.
- inline bool GetHeapObject(HeapObject* result) const;
- inline bool GetHeapObject(HeapObject* result,
- HeapObjectReferenceType* reference_type) const;
+ // These operator->() overloads are required for handlified code.
+ constexpr const MaybeObject* operator->() const { return this; }
- // DCHECKs that this MaybeObject is a strong or a weak pointer to a HeapObject
- // and returns the HeapObject.
- inline HeapObject GetHeapObject() const;
+ V8_INLINE static MaybeObject FromSmi(Smi smi);
- // DCHECKs that this MaybeObject is a strong or a weak pointer to a HeapObject
- // or a SMI and returns the HeapObject or SMI.
- inline Object GetHeapObjectOrSmi() const;
+ V8_INLINE static MaybeObject FromObject(Object object);
- inline bool IsObject() const;
- template <typename T>
- T cast() const {
- DCHECK(!HasWeakHeapObjectTag(ptr_));
- return T::cast(Object(ptr_));
- }
-
- static MaybeObject FromSmi(Smi smi) {
- DCHECK(HAS_SMI_TAG(smi->ptr()));
- return MaybeObject(smi->ptr());
- }
-
- static MaybeObject FromObject(Object object) {
- DCHECK(!HasWeakHeapObjectTag(object.ptr()));
- return MaybeObject(object.ptr());
- }
-
- static inline MaybeObject MakeWeak(MaybeObject object);
+ V8_INLINE static MaybeObject MakeWeak(MaybeObject object);
#ifdef VERIFY_HEAP
static void VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p);
#endif
-
- // Prints this object without details.
- void ShortPrint(FILE* out = stdout);
-
- // Prints this object without details to a message accumulator.
- void ShortPrint(StringStream* accumulator);
-
- void ShortPrint(std::ostream& os);
-
-#ifdef OBJECT_PRINT
- void Print();
- void Print(std::ostream& os);
-#else
- void Print() { ShortPrint(); }
- void Print(std::ostream& os) { ShortPrint(os); }
-#endif
-
- private:
- Address ptr_;
};
// A HeapObjectReference is either a strong reference to a HeapObject, a weak
@@ -127,19 +37,11 @@ class MaybeObject {
class HeapObjectReference : public MaybeObject {
public:
explicit HeapObjectReference(Address address) : MaybeObject(address) {}
- explicit HeapObjectReference(Object object) : MaybeObject(object->ptr()) {}
-
- static HeapObjectReference Strong(Object object) {
- DCHECK(!object->IsSmi());
- DCHECK(!HasWeakHeapObjectTag(object));
- return HeapObjectReference(object);
- }
-
- static HeapObjectReference Weak(Object object) {
- DCHECK(!object->IsSmi());
- DCHECK(!HasWeakHeapObjectTag(object));
- return HeapObjectReference(object->ptr() | kWeakHeapObjectMask);
- }
+ V8_INLINE explicit HeapObjectReference(Object object);
+
+ V8_INLINE static HeapObjectReference Strong(Object object);
+
+ V8_INLINE static HeapObjectReference Weak(Object object);
V8_INLINE static HeapObjectReference ClearedValue(Isolate* isolate);
diff --git a/deps/v8/src/objects/microtask-inl.h b/deps/v8/src/objects/microtask-inl.h
index 036b227056..91fa5890cb 100644
--- a/deps/v8/src/objects/microtask-inl.h
+++ b/deps/v8/src/objects/microtask-inl.h
@@ -7,8 +7,8 @@
#include "src/objects/microtask.h"
-#include "src/contexts-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/contexts-inl.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/js-objects-inl.h"
diff --git a/deps/v8/src/objects/microtask.h b/deps/v8/src/objects/microtask.h
index b3c81533a6..d631bf6903 100644
--- a/deps/v8/src/objects/microtask.h
+++ b/deps/v8/src/objects/microtask.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_MICROTASK_H_
#define V8_OBJECTS_MICROTASK_H_
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/module-inl.h b/deps/v8/src/objects/module-inl.h
index 09e19343e1..a3bc31b63a 100644
--- a/deps/v8/src/objects/module-inl.h
+++ b/deps/v8/src/objects/module-inl.h
@@ -7,7 +7,7 @@
#include "src/objects/module.h"
-#include "src/objects-inl.h" // Needed for write barriers
+#include "src/objects/objects-inl.h" // Needed for write barriers
#include "src/objects/scope-info.h"
// Has to be the last include (doesn't have include guards):
@@ -40,7 +40,7 @@ SMI_ACCESSORS(Module, hash, kHashOffset)
ModuleInfo Module::info() const {
return (status() >= kEvaluating)
? ModuleInfo::cast(code())
- : GetSharedFunctionInfo()->scope_info()->ModuleDescriptorInfo();
+ : GetSharedFunctionInfo().scope_info().ModuleDescriptorInfo();
}
CAST_ACCESSOR(JSModuleNamespace)
@@ -84,12 +84,12 @@ FixedArray ModuleInfo::module_request_positions() const {
#ifdef DEBUG
bool ModuleInfo::Equals(ModuleInfo other) const {
- return regular_exports() == other->regular_exports() &&
- regular_imports() == other->regular_imports() &&
- special_exports() == other->special_exports() &&
- namespace_imports() == other->namespace_imports() &&
- module_requests() == other->module_requests() &&
- module_request_positions() == other->module_request_positions();
+ return regular_exports() == other.regular_exports() &&
+ regular_imports() == other.regular_imports() &&
+ special_exports() == other.special_exports() &&
+ namespace_imports() == other.namespace_imports() &&
+ module_requests() == other.module_requests() &&
+ module_request_positions() == other.module_request_positions();
}
#endif
diff --git a/deps/v8/src/objects/module.cc b/deps/v8/src/objects/module.cc
index 4e2ae75b06..ea40989df1 100644
--- a/deps/v8/src/objects/module.cc
+++ b/deps/v8/src/objects/module.cc
@@ -7,15 +7,15 @@
#include "src/objects/module.h"
-#include "src/accessors.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/modules.h"
-#include "src/objects-inl.h"
+#include "src/builtins/accessors.h"
#include "src/objects/cell-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-generator-inl.h"
#include "src/objects/module-inl.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -120,7 +120,7 @@ void Module::CreateIndirectExport(Isolate* isolate, Handle<Module> module,
Handle<String> name,
Handle<ModuleInfoEntry> entry) {
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+ DCHECK(exports->Lookup(name).IsTheHole(isolate));
exports = ObjectHashTable::Put(exports, name, entry);
module->set_exports(*exports);
}
@@ -130,12 +130,12 @@ void Module::CreateExport(Isolate* isolate, Handle<Module> module,
DCHECK_LT(0, names->length());
Handle<Cell> cell =
isolate->factory()->NewCell(isolate->factory()->undefined_value());
- module->regular_exports()->set(ExportIndex(cell_index), *cell);
+ module->regular_exports().set(ExportIndex(cell_index), *cell);
Handle<ObjectHashTable> exports(module->exports(), isolate);
for (int i = 0, n = names->length(); i < n; ++i) {
Handle<String> name(String::cast(names->get(i)), isolate);
- DCHECK(exports->Lookup(name)->IsTheHole(isolate));
+ DCHECK(exports->Lookup(name).IsTheHole(isolate));
exports = ObjectHashTable::Put(exports, name, cell);
}
module->set_exports(*exports);
@@ -146,21 +146,20 @@ Cell Module::GetCell(int cell_index) {
Object cell;
switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
case ModuleDescriptor::kImport:
- cell = regular_imports()->get(ImportIndex(cell_index));
+ cell = regular_imports().get(ImportIndex(cell_index));
break;
case ModuleDescriptor::kExport:
- cell = regular_exports()->get(ExportIndex(cell_index));
+ cell = regular_exports().get(ExportIndex(cell_index));
break;
case ModuleDescriptor::kInvalid:
UNREACHABLE();
- break;
}
return Cell::cast(cell);
}
Handle<Object> Module::LoadVariable(Isolate* isolate, Handle<Module> module,
int cell_index) {
- return handle(module->GetCell(cell_index)->value(), isolate);
+ return handle(module->GetCell(cell_index).value(), isolate);
}
void Module::StoreVariable(Handle<Module> module, int cell_index,
@@ -168,7 +167,7 @@ void Module::StoreVariable(Handle<Module> module, int cell_index,
DisallowHeapAllocation no_gc;
DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
ModuleDescriptor::kExport);
- module->GetCell(cell_index)->set_value(*value);
+ module->GetCell(cell_index).set_value(*value);
}
#ifdef DEBUG
@@ -177,7 +176,7 @@ void Module::PrintStatusTransition(Status new_status) {
StdoutStream os;
os << "Changing module status from " << status() << " to " << new_status
<< " for ";
- script()->GetNameOrSourceURL()->Print(os);
+ script().GetNameOrSourceURL().Print(os);
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -216,24 +215,24 @@ void Module::Reset(Isolate* isolate, Handle<Module> module) {
DCHECK(module->status() == kPreInstantiating ||
module->status() == kInstantiating);
- DCHECK(module->exception()->IsTheHole(isolate));
- DCHECK(module->import_meta()->IsTheHole(isolate));
+ DCHECK(module->exception().IsTheHole(isolate));
+ DCHECK(module->import_meta().IsTheHole(isolate));
// The namespace object cannot exist, because it would have been created
// by RunInitializationCode, which is called only after this module's SCC
// succeeds instantiation.
- DCHECK(!module->module_namespace()->IsJSModuleNamespace());
+ DCHECK(!module->module_namespace().IsJSModuleNamespace());
Handle<ObjectHashTable> exports =
- ObjectHashTable::New(isolate, module->info()->RegularExportCount());
+ ObjectHashTable::New(isolate, module->info().RegularExportCount());
Handle<FixedArray> regular_exports =
- factory->NewFixedArray(module->regular_exports()->length());
+ factory->NewFixedArray(module->regular_exports().length());
Handle<FixedArray> regular_imports =
- factory->NewFixedArray(module->regular_imports()->length());
+ factory->NewFixedArray(module->regular_imports().length());
Handle<FixedArray> requested_modules =
- factory->NewFixedArray(module->requested_modules()->length());
+ factory->NewFixedArray(module->requested_modules().length());
if (module->status() == kInstantiating) {
- module->set_code(JSFunction::cast(module->code())->shared());
+ module->set_code(JSFunction::cast(module->code()).shared());
}
#ifdef DEBUG
module->PrintStatusTransition(kUninstantiated);
@@ -249,9 +248,9 @@ void Module::Reset(Isolate* isolate, Handle<Module> module) {
void Module::RecordError(Isolate* isolate) {
DisallowHeapAllocation no_alloc;
- DCHECK(exception()->IsTheHole(isolate));
+ DCHECK(exception().IsTheHole(isolate));
Object the_exception = isolate->pending_exception();
- DCHECK(!the_exception->IsTheHole(isolate));
+ DCHECK(!the_exception.IsTheHole(isolate));
set_code(info());
#ifdef DEBUG
@@ -264,7 +263,7 @@ void Module::RecordError(Isolate* isolate) {
Object Module::GetException() {
DisallowHeapAllocation no_alloc;
DCHECK_EQ(status(), Module::kErrored);
- DCHECK(!exception()->IsTheHole());
+ DCHECK(!exception().IsTheHole());
return exception();
}
@@ -275,14 +274,14 @@ SharedFunctionInfo Module::GetSharedFunctionInfo() const {
switch (status()) {
case kUninstantiated:
case kPreInstantiating:
- DCHECK(code()->IsSharedFunctionInfo());
+ DCHECK(code().IsSharedFunctionInfo());
return SharedFunctionInfo::cast(code());
case kInstantiating:
- DCHECK(code()->IsJSFunction());
- return JSFunction::cast(code())->shared();
+ DCHECK(code().IsJSFunction());
+ return JSFunction::cast(code()).shared();
case kInstantiated:
- DCHECK(code()->IsJSGeneratorObject());
- return JSGeneratorObject::cast(code())->function()->shared();
+ DCHECK(code().IsJSGeneratorObject());
+ return JSGeneratorObject::cast(code()).function().shared();
case kEvaluating:
case kEvaluated:
case kErrored:
@@ -297,9 +296,9 @@ MaybeHandle<Cell> Module::ResolveImport(Isolate* isolate, Handle<Module> module,
MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
Handle<Module> requested_module(
- Module::cast(module->requested_modules()->get(module_request)), isolate);
+ Module::cast(module->requested_modules().get(module_request)), isolate);
Handle<String> specifier(
- String::cast(module->info()->module_requests()->get(module_request)),
+ String::cast(module->info().module_requests().get(module_request)),
isolate);
MaybeHandle<Cell> result =
Module::ResolveExport(isolate, requested_module, specifier, name, loc,
@@ -315,7 +314,7 @@ MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
Module::ResolveSet* resolve_set) {
DCHECK_GE(module->status(), kPreInstantiating);
DCHECK_NE(module->status(), kEvaluating);
- Handle<Object> object(module->exports()->Lookup(export_name), isolate);
+ Handle<Object> object(module->exports().Lookup(export_name), isolate);
if (object->IsCell()) {
// Already resolved (e.g. because it's a local export).
return Handle<Cell>::cast(object);
@@ -363,7 +362,7 @@ MaybeHandle<Cell> Module::ResolveExport(Isolate* isolate, Handle<Module> module,
// The export table may have changed but the entry in question should be
// unchanged.
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(export_name)->IsModuleInfoEntry());
+ DCHECK(exports->Lookup(export_name).IsModuleInfoEntry());
exports = ObjectHashTable::Put(exports, export_name, cell);
module->set_exports(*exports);
@@ -384,12 +383,12 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
// Go through all star exports looking for the given name. If multiple star
// exports provide the name, make sure they all map it to the same cell.
Handle<Cell> unique_cell;
- Handle<FixedArray> special_exports(module->info()->special_exports(),
+ Handle<FixedArray> special_exports(module->info().special_exports(),
isolate);
for (int i = 0, n = special_exports->length(); i < n; ++i) {
i::Handle<i::ModuleInfoEntry> entry(
i::ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- if (!entry->export_name()->IsUndefined(isolate)) {
+ if (!entry->export_name().IsUndefined(isolate)) {
continue; // Indirect export.
}
@@ -415,7 +414,7 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
if (!unique_cell.is_null()) {
// Found a unique star export for this name.
Handle<ObjectHashTable> exports(module->exports(), isolate);
- DCHECK(exports->Lookup(export_name)->IsTheHole(isolate));
+ DCHECK(exports->Lookup(export_name).IsTheHole(isolate));
exports = ObjectHashTable::Put(exports, export_name, unique_cell);
module->set_exports(*exports);
return unique_cell;
@@ -439,7 +438,7 @@ bool Module::Instantiate(Isolate* isolate, Handle<Module> module,
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Instantiating module ";
- module->script()->GetNameOrSourceURL()->Print(os);
+ module->script().GetNameOrSourceURL().Print(os);
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -532,7 +531,7 @@ bool Module::PrepareInstantiate(Isolate* isolate, Handle<Module> module,
bool Module::RunInitializationCode(Isolate* isolate, Handle<Module> module) {
DCHECK_EQ(module->status(), kInstantiating);
Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
- DCHECK_EQ(MODULE_SCOPE, function->shared()->scope_info()->scope_type());
+ DCHECK_EQ(MODULE_SCOPE, function->shared().scope_info().scope_type());
Handle<Object> receiver = isolate->factory()->undefined_value();
Handle<Object> argv[] = {module};
MaybeHandle<Object> maybe_generator =
@@ -637,7 +636,7 @@ bool Module::FinishInstantiate(Isolate* isolate, Handle<Module> module,
.ToHandle(&cell)) {
return false;
}
- module->regular_imports()->set(ImportIndex(entry->cell_index()), *cell);
+ module->regular_imports().set(ImportIndex(entry->cell_index()), *cell);
}
// Resolve indirect exports.
@@ -664,7 +663,7 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module) {
if (FLAG_trace_module_status) {
StdoutStream os;
os << "Evaluating module ";
- module->script()->GetNameOrSourceURL()->Print(os);
+ module->script().GetNameOrSourceURL().Print(os);
#ifndef OBJECT_PRINT
os << "\n";
#endif // OBJECT_PRINT
@@ -710,7 +709,7 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
Handle<JSGeneratorObject> generator(JSGeneratorObject::cast(module->code()),
isolate);
module->set_code(
- generator->function()->shared()->scope_info()->ModuleDescriptorInfo());
+ generator->function().shared().scope_info().ModuleDescriptorInfo());
module->SetStatus(kEvaluating);
module->set_dfs_index(*dfs_index);
module->set_dfs_ancestor_index(*dfs_index);
@@ -748,10 +747,10 @@ MaybeHandle<Object> Module::Evaluate(Isolate* isolate, Handle<Module> module,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result, Execution::Call(isolate, resume, generator, 0, nullptr),
Object);
- DCHECK(JSIteratorResult::cast(*result)->done()->BooleanValue(isolate));
+ DCHECK(JSIteratorResult::cast(*result).done().BooleanValue(isolate));
CHECK(MaybeTransitionComponent(isolate, module, stack, kEvaluated));
- return handle(JSIteratorResult::cast(*result)->value(), isolate);
+ return handle(JSIteratorResult::cast(*result).value(), isolate);
}
namespace {
@@ -760,7 +759,7 @@ void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
UnorderedModuleSet* visited) {
DCHECK_GE(module->status(), Module::kInstantiating);
- if (module->module_namespace()->IsJSModuleNamespace()) return; // Shortcut.
+ if (module->module_namespace().IsJSModuleNamespace()) return; // Shortcut.
bool cycle = !visited->insert(module).second;
if (cycle) return;
@@ -771,17 +770,16 @@ void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
// Maybe split special_exports into indirect_exports and star_exports.
ReadOnlyRoots roots(isolate);
- Handle<FixedArray> special_exports(module->info()->special_exports(),
- isolate);
+ Handle<FixedArray> special_exports(module->info().special_exports(), isolate);
for (int i = 0, n = special_exports->length(); i < n; ++i) {
Handle<ModuleInfoEntry> entry(
ModuleInfoEntry::cast(special_exports->get(i)), isolate);
- if (!entry->export_name()->IsUndefined(roots)) {
+ if (!entry->export_name().IsUndefined(roots)) {
continue; // Indirect export.
}
Handle<Module> requested_module(
- Module::cast(module->requested_modules()->get(entry->module_request())),
+ Module::cast(module->requested_modules().get(entry->module_request())),
isolate);
// Recurse.
@@ -799,7 +797,7 @@ void FetchStarExports(Isolate* isolate, Handle<Module> module, Zone* zone,
Handle<String> name(String::cast(key), isolate);
if (name->Equals(roots.default_string())) continue;
- if (!exports->Lookup(name)->IsTheHole(roots)) continue;
+ if (!exports->Lookup(name).IsTheHole(roots)) continue;
Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
auto insert_result = more_exports.insert(std::make_pair(name, cell));
@@ -834,7 +832,7 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
Handle<Module> module,
int module_request) {
Handle<Module> requested_module(
- Module::cast(module->requested_modules()->get(module_request)), isolate);
+ Module::cast(module->requested_modules().get(module_request)), isolate);
return Module::GetModuleNamespace(isolate, requested_module);
}
@@ -900,7 +898,7 @@ Handle<JSModuleNamespace> Module::GetModuleNamespace(Isolate* isolate,
MaybeHandle<Object> JSModuleNamespace::GetExport(Isolate* isolate,
Handle<String> name) {
- Handle<Object> object(module()->exports()->Lookup(name), isolate);
+ Handle<Object> object(module().exports().Lookup(name), isolate);
if (object->IsTheHole(isolate)) {
return isolate->factory()->undefined_value();
}
@@ -922,7 +920,7 @@ Maybe<PropertyAttributes> JSModuleNamespace::GetPropertyAttributes(
Isolate* isolate = it->isolate();
- Handle<Object> lookup(object->module()->exports()->Lookup(name), isolate);
+ Handle<Object> lookup(object->module().exports().Lookup(name), isolate);
if (lookup->IsTheHole(isolate)) {
return Just(ABSENT);
}
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 5137d92351..a1672dce7e 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_MODULE_H_
#define V8_OBJECTS_MODULE_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h
index af1724b76d..b3e04bbd50 100644
--- a/deps/v8/src/objects/name-inl.h
+++ b/deps/v8/src/objects/name-inl.h
@@ -45,28 +45,26 @@ void Symbol::set_is_private_name() {
}
bool Name::IsUniqueName() const {
- uint32_t type = map()->instance_type();
+ uint32_t type = map().instance_type();
bool result = (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
(kStringTag | kNotInternalizedTag);
SLOW_DCHECK(result == HeapObject::IsUniqueName());
return result;
}
-uint32_t Name::hash_field() {
- return READ_UINT32_FIELD(*this, kHashFieldOffset);
-}
+uint32_t Name::hash_field() { return ReadField<uint32_t>(kHashFieldOffset); }
void Name::set_hash_field(uint32_t value) {
- WRITE_UINT32_FIELD(*this, kHashFieldOffset, value);
+ WriteField<uint32_t>(kHashFieldOffset, value);
}
bool Name::Equals(Name other) {
if (other == *this) return true;
- if ((this->IsInternalizedString() && other->IsInternalizedString()) ||
- this->IsSymbol() || other->IsSymbol()) {
+ if ((this->IsInternalizedString() && other.IsInternalizedString()) ||
+ this->IsSymbol() || other.IsSymbol()) {
return false;
}
- return String::cast(*this)->SlowEquals(String::cast(other));
+ return String::cast(*this).SlowEquals(String::cast(other));
}
bool Name::Equals(Isolate* isolate, Handle<Name> one, Handle<Name> two) {
@@ -90,26 +88,26 @@ uint32_t Name::Hash() {
uint32_t field = hash_field();
if (IsHashFieldComputed(field)) return field >> kHashShift;
// Slow case: compute hash code and set it. Has to be a string.
- return String::cast(*this)->ComputeAndSetHash();
+ return String::cast(*this).ComputeAndSetHash();
}
bool Name::IsInterestingSymbol() const {
- return IsSymbol() && Symbol::cast(*this)->is_interesting_symbol();
+ return IsSymbol() && Symbol::cast(*this).is_interesting_symbol();
}
bool Name::IsPrivate() {
- return this->IsSymbol() && Symbol::cast(*this)->is_private();
+ return this->IsSymbol() && Symbol::cast(*this).is_private();
}
bool Name::IsPrivateName() {
bool is_private_name =
- this->IsSymbol() && Symbol::cast(*this)->is_private_name();
+ this->IsSymbol() && Symbol::cast(*this).is_private_name();
DCHECK_IMPLIES(is_private_name, IsPrivate());
return is_private_name;
}
bool Name::AsArrayIndex(uint32_t* index) {
- return IsString() && String::cast(*this)->AsArrayIndex(index);
+ return IsString() && String::cast(*this).AsArrayIndex(index);
}
// static
diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h
index c17f73f775..8b2a8f0a01 100644
--- a/deps/v8/src/objects/name.h
+++ b/deps/v8/src/objects/name.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_NAME_H_
#define V8_OBJECTS_NAME_H_
-#include "src/objects.h"
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "src/objects/objects.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -71,8 +71,6 @@ class Name : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
TORQUE_GENERATED_NAME_FIELDS)
- static const int kHeaderSize = kSize;
-
// Mask constant for checking if a name has a computed hash code
// and if it is a string that is an array index. The least significant bit
// indicates whether a hash code has been computed. If the hash code has
diff --git a/deps/v8/src/objects/object-list-macros.h b/deps/v8/src/objects/object-list-macros.h
new file mode 100644
index 0000000000..78452de502
--- /dev/null
+++ b/deps/v8/src/objects/object-list-macros.h
@@ -0,0 +1,270 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_OBJECT_LIST_MACROS_H_
+#define V8_OBJECTS_OBJECT_LIST_MACROS_H_
+
+namespace v8 {
+namespace internal {
+
+class AbstractCode;
+class AccessorPair;
+class AccessCheckInfo;
+class AllocationSite;
+class ByteArray;
+class CachedTemplateObject;
+class Cell;
+class ClosureFeedbackCellArray;
+class ConsString;
+class DependentCode;
+class ElementsAccessor;
+class EnumCache;
+class FixedArrayBase;
+class FixedDoubleArray;
+class FreeSpace;
+class FunctionLiteral;
+class FunctionTemplateInfo;
+class JSAsyncGeneratorObject;
+class JSGlobalProxy;
+class JSPromise;
+class JSProxy;
+class JSProxyRevocableResult;
+class KeyAccumulator;
+class LayoutDescriptor;
+class LookupIterator;
+class FieldType;
+class Module;
+class ModuleInfoEntry;
+class MutableHeapNumber;
+class ObjectHashTable;
+class ObjectTemplateInfo;
+class ObjectVisitor;
+class PreparseData;
+class PropertyArray;
+class PropertyCell;
+class PropertyDescriptor;
+class PrototypeInfo;
+class ReadOnlyRoots;
+class RegExpMatchInfo;
+class RootVisitor;
+class SafepointEntry;
+class ScriptContextTable;
+class SharedFunctionInfo;
+class StringStream;
+class Symbol;
+class FeedbackCell;
+class FeedbackMetadata;
+class FeedbackVector;
+class UncompiledData;
+class TemplateInfo;
+class TransitionArray;
+class TemplateList;
+class WasmInstanceObject;
+class WasmMemoryObject;
+template <typename T>
+class ZoneForwardList;
+
+#define OBJECT_TYPE_LIST(V) \
+ V(LayoutDescriptor) \
+ V(Primitive) \
+ V(Number) \
+ V(Numeric)
+
+#define HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
+ V(AbstractCode) \
+ V(AccessCheckNeeded) \
+ V(AllocationSite) \
+ V(ArrayList) \
+ V(BigInt) \
+ V(BigIntWrapper) \
+ V(ObjectBoilerplateDescription) \
+ V(Boolean) \
+ V(BooleanWrapper) \
+ V(BreakPoint) \
+ V(BreakPointInfo) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(CachedTemplateObject) \
+ V(CallHandlerInfo) \
+ V(Callable) \
+ V(Cell) \
+ V(ClassBoilerplate) \
+ V(Code) \
+ V(CodeDataContainer) \
+ V(CompilationCacheTable) \
+ V(ConsString) \
+ V(Constructor) \
+ V(Context) \
+ V(CoverageInfo) \
+ V(ClosureFeedbackCellArray) \
+ V(DataHandler) \
+ V(DeoptimizationData) \
+ V(DependentCode) \
+ V(DescriptorArray) \
+ V(EmbedderDataArray) \
+ V(EphemeronHashTable) \
+ V(ExternalOneByteString) \
+ V(ExternalString) \
+ V(ExternalTwoByteString) \
+ V(FeedbackCell) \
+ V(FeedbackMetadata) \
+ V(FeedbackVector) \
+ V(Filler) \
+ V(FixedArray) \
+ V(FixedArrayBase) \
+ V(FixedArrayExact) \
+ V(FixedDoubleArray) \
+ V(Foreign) \
+ V(FrameArray) \
+ V(FreeSpace) \
+ V(Function) \
+ V(GlobalDictionary) \
+ V(HandlerTable) \
+ V(HeapNumber) \
+ V(InternalizedString) \
+ V(JSArgumentsObject) \
+ V(JSArgumentsObjectWithLength) \
+ V(JSArray) \
+ V(JSArrayBuffer) \
+ V(JSArrayBufferView) \
+ V(JSArrayIterator) \
+ V(JSAsyncFromSyncIterator) \
+ V(JSAsyncFunctionObject) \
+ V(JSAsyncGeneratorObject) \
+ V(JSBoundFunction) \
+ V(JSCollection) \
+ V(JSContextExtensionObject) \
+ V(JSDataView) \
+ V(JSDate) \
+ V(JSError) \
+ V(JSFunction) \
+ V(JSGeneratorObject) \
+ V(JSGlobalObject) \
+ V(JSGlobalProxy) \
+ V(JSMap) \
+ V(JSMapIterator) \
+ V(JSMessageObject) \
+ V(JSModuleNamespace) \
+ V(JSObject) \
+ V(JSPromise) \
+ V(JSProxy) \
+ V(JSReceiver) \
+ V(JSRegExp) \
+ V(JSRegExpResult) \
+ V(JSRegExpStringIterator) \
+ V(JSSet) \
+ V(JSSetIterator) \
+ V(JSSloppyArgumentsObject) \
+ V(JSStringIterator) \
+ V(JSTypedArray) \
+ V(JSValue) \
+ V(JSWeakRef) \
+ V(JSWeakCollection) \
+ V(JSFinalizationGroup) \
+ V(JSFinalizationGroupCleanupIterator) \
+ V(JSWeakMap) \
+ V(JSWeakSet) \
+ V(LoadHandler) \
+ V(Map) \
+ V(MapCache) \
+ V(Microtask) \
+ V(ModuleInfo) \
+ V(MutableHeapNumber) \
+ V(Name) \
+ V(NameDictionary) \
+ V(NativeContext) \
+ V(NormalizedMapCache) \
+ V(NumberDictionary) \
+ V(NumberWrapper) \
+ V(ObjectHashSet) \
+ V(ObjectHashTable) \
+ V(Oddball) \
+ V(OrderedHashMap) \
+ V(OrderedHashSet) \
+ V(OrderedNameDictionary) \
+ V(PreparseData) \
+ V(PromiseReactionJobTask) \
+ V(PropertyArray) \
+ V(PropertyCell) \
+ V(PropertyDescriptorObject) \
+ V(RegExpMatchInfo) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ V(ScriptWrapper) \
+ V(SeqOneByteString) \
+ V(SeqString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SimpleNumberDictionary) \
+ V(SlicedString) \
+ V(SloppyArgumentsElements) \
+ V(SmallOrderedHashMap) \
+ V(SmallOrderedHashSet) \
+ V(SmallOrderedNameDictionary) \
+ V(StoreHandler) \
+ V(String) \
+ V(StringSet) \
+ V(StringTable) \
+ V(StringWrapper) \
+ V(Struct) \
+ V(Symbol) \
+ V(SymbolWrapper) \
+ V(TemplateInfo) \
+ V(TemplateList) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(UncompiledData) \
+ V(UncompiledDataWithPreparseData) \
+ V(UncompiledDataWithoutPreparseData) \
+ V(Undetectable) \
+ V(UniqueName) \
+ V(WasmExceptionObject) \
+ V(WasmGlobalObject) \
+ V(WasmInstanceObject) \
+ V(WasmMemoryObject) \
+ V(WasmModuleObject) \
+ V(WasmTableObject) \
+ V(WeakFixedArray) \
+ V(WeakArrayList) \
+ V(WeakCell)
+
+#ifdef V8_INTL_SUPPORT
+#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
+ HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
+ V(JSV8BreakIterator) \
+ V(JSCollator) \
+ V(JSDateTimeFormat) \
+ V(JSListFormat) \
+ V(JSLocale) \
+ V(JSNumberFormat) \
+ V(JSPluralRules) \
+ V(JSRelativeTimeFormat) \
+ V(JSSegmentIterator) \
+ V(JSSegmenter)
+#else
+#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V)
+#endif // V8_INTL_SUPPORT
+
+#define HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) V(HashTable)
+
+#define HEAP_OBJECT_TYPE_LIST(V) \
+ HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
+ HEAP_OBJECT_TEMPLATE_TYPE_LIST(V)
+
+#define ODDBALL_LIST(V) \
+ V(Undefined, undefined_value) \
+ V(Null, null_value) \
+ V(TheHole, the_hole_value) \
+ V(Exception, exception) \
+ V(Uninitialized, uninitialized_value) \
+ V(True, true_value) \
+ V(False, false_value) \
+ V(ArgumentsMarker, arguments_marker) \
+ V(OptimizedOut, optimized_out) \
+ V(StaleRegister, stale_register)
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_OBJECT_LIST_MACROS_H_
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index b5d076564d..c8ebf57ce7 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -57,46 +57,26 @@
#undef RELAXED_WRITE_WEAK_FIELD
#undef WRITE_BARRIER
#undef WEAK_WRITE_BARRIER
+#undef EPHEMERON_KEY_WRITE_BARRIER
#undef CONDITIONAL_WRITE_BARRIER
#undef CONDITIONAL_WEAK_WRITE_BARRIER
-#undef READ_DOUBLE_FIELD
-#undef WRITE_DOUBLE_FIELD
-#undef READ_INT_FIELD
-#undef WRITE_INT_FIELD
+#undef CONDITIONAL_EPHEMERON_KEY_WRITE_BARRIER
#undef ACQUIRE_READ_INT32_FIELD
-#undef READ_UINT8_FIELD
-#undef WRITE_UINT8_FIELD
#undef RELAXED_WRITE_INT8_FIELD
-#undef READ_INT8_FIELD
#undef RELAXED_READ_INT8_FIELD
-#undef WRITE_INT8_FIELD
-#undef READ_UINT16_FIELD
-#undef WRITE_UINT16_FIELD
-#undef READ_INT16_FIELD
-#undef WRITE_INT16_FIELD
#undef RELAXED_READ_INT16_FIELD
#undef RELAXED_WRITE_INT16_FIELD
-#undef READ_UINT32_FIELD
#undef RELAXED_READ_UINT32_FIELD
-#undef WRITE_UINT32_FIELD
#undef RELAXED_WRITE_UINT32_FIELD
-#undef READ_INT32_FIELD
#undef RELAXED_READ_INT32_FIELD
-#undef WRITE_INT32_FIELD
#undef RELEASE_WRITE_INT32_FIELD
#undef RELAXED_WRITE_INT32_FIELD
-#undef READ_FLOAT_FIELD
-#undef WRITE_FLOAT_FIELD
-#undef READ_INTPTR_FIELD
-#undef WRITE_INTPTR_FIELD
-#undef READ_UINTPTR_FIELD
-#undef WRITE_UINTPTR_FIELD
-#undef READ_UINT64_FIELD
-#undef WRITE_UINT64_FIELD
-#undef READ_BYTE_FIELD
#undef RELAXED_READ_BYTE_FIELD
-#undef WRITE_BYTE_FIELD
#undef RELAXED_WRITE_BYTE_FIELD
+#undef DECL_PRINTER
#undef DECL_VERIFIER
+#undef EXPORT_DECL_VERIFIER
#undef DEFINE_DEOPT_ELEMENT_ACCESSORS
#undef DEFINE_DEOPT_ENTRY_ACCESSORS
+#undef TQ_OBJECT_CONSTRUCTORS
+#undef TQ_OBJECT_CONSTRUCTORS_IMPL
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index da5c157bbc..1f499d4fba 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -14,15 +14,13 @@
// for fields that can be written to and read from multiple threads at the same
// time. See comments in src/base/atomicops.h for the memory ordering sematics.
-#include <src/v8memory.h>
+#include "src/common/v8memory.h"
// Since this changes visibility, it should always be last in a class
// definition.
#define OBJECT_CONSTRUCTORS(Type, ...) \
public: \
constexpr Type() : __VA_ARGS__() {} \
- Type* operator->() { return this; } \
- const Type* operator->() const { return this; } \
\
protected: \
explicit inline Type(Address ptr)
@@ -80,15 +78,13 @@
#define CAST_ACCESSOR(Type) \
Type Type::cast(Object object) { return Type(object.ptr()); }
-#define INT_ACCESSORS(holder, name, offset) \
- int holder::name() const { return READ_INT_FIELD(*this, offset); } \
- void holder::set_##name(int value) { WRITE_INT_FIELD(*this, offset, value); }
+#define INT_ACCESSORS(holder, name, offset) \
+ int holder::name() const { return ReadField<int>(offset); } \
+ void holder::set_##name(int value) { WriteField<int>(offset, value); }
-#define INT32_ACCESSORS(holder, name, offset) \
- int32_t holder::name() const { return READ_INT32_FIELD(*this, offset); } \
- void holder::set_##name(int32_t value) { \
- WRITE_INT32_FIELD(*this, offset, value); \
- }
+#define INT32_ACCESSORS(holder, name, offset) \
+ int32_t holder::name() const { return ReadField<int32_t>(offset); } \
+ void holder::set_##name(int32_t value) { WriteField<int32_t>(offset, value); }
#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
int32_t holder::name() const { \
@@ -98,20 +94,20 @@
RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
}
-#define UINT16_ACCESSORS(holder, name, offset) \
- uint16_t holder::name() const { return READ_UINT16_FIELD(*this, offset); } \
- void holder::set_##name(int value) { \
- DCHECK_GE(value, 0); \
- DCHECK_LE(value, static_cast<uint16_t>(-1)); \
- WRITE_UINT16_FIELD(*this, offset, value); \
+#define UINT16_ACCESSORS(holder, name, offset) \
+ uint16_t holder::name() const { return ReadField<uint16_t>(offset); } \
+ void holder::set_##name(int value) { \
+ DCHECK_GE(value, 0); \
+ DCHECK_LE(value, static_cast<uint16_t>(-1)); \
+ WriteField<uint16_t>(offset, value); \
}
-#define UINT8_ACCESSORS(holder, name, offset) \
- uint8_t holder::name() const { return READ_UINT8_FIELD(*this, offset); } \
- void holder::set_##name(int value) { \
- DCHECK_GE(value, 0); \
- DCHECK_LE(value, static_cast<uint8_t>(-1)); \
- WRITE_UINT8_FIELD(*this, offset, value); \
+#define UINT8_ACCESSORS(holder, name, offset) \
+ uint8_t holder::name() const { return ReadField<uint8_t>(offset); } \
+ void holder::set_##name(int value) { \
+ DCHECK_GE(value, 0); \
+ DCHECK_LE(value, static_cast<uint8_t>(-1)); \
+ WriteField<uint8_t>(offset, value); \
}
#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
@@ -227,9 +223,9 @@
return instance_type == forinstancetype; \
}
-#define TYPE_CHECKER(type, ...) \
- bool HeapObject::Is##type() const { \
- return InstanceTypeChecker::Is##type(map()->instance_type()); \
+#define TYPE_CHECKER(type, ...) \
+ bool HeapObject::Is##type() const { \
+ return InstanceTypeChecker::Is##type(map().instance_type()); \
}
#define RELAXED_INT16_ACCESSORS(holder, name, offset) \
@@ -276,50 +272,50 @@
#define RELAXED_WRITE_WEAK_FIELD(p, offset, value) \
MaybeObjectSlot(FIELD_ADDR(p, offset)).Relaxed_Store(value)
-#define WRITE_BARRIER(object, offset, value) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- MarkingBarrier(object, (object)->RawField(offset), value); \
- GenerationalBarrier(object, (object)->RawField(offset), value); \
+#define WRITE_BARRIER(object, offset, value) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ MarkingBarrier(object, (object).RawField(offset), value); \
+ GenerationalBarrier(object, (object).RawField(offset), value); \
} while (false)
-#define WEAK_WRITE_BARRIER(object, offset, value) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
- GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
+#define WEAK_WRITE_BARRIER(object, offset, value) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ MarkingBarrier(object, (object).RawMaybeWeakField(offset), value); \
+ GenerationalBarrier(object, (object).RawMaybeWeakField(offset), value); \
} while (false)
-#define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- EphemeronHashTable table = EphemeronHashTable::cast(object); \
- MarkingBarrier(object, (object)->RawField(offset), value); \
- GenerationalEphemeronKeyBarrier(table, (object)->RawField(offset), value); \
+#define EPHEMERON_KEY_WRITE_BARRIER(object, offset, value) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ EphemeronHashTable table = EphemeronHashTable::cast(object); \
+ MarkingBarrier(object, (object).RawField(offset), value); \
+ GenerationalEphemeronKeyBarrier(table, (object).RawField(offset), value); \
} while (false)
-#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- MarkingBarrier(object, (object)->RawField(offset), value); \
- } \
- GenerationalBarrier(object, (object)->RawField(offset), value); \
- } \
+#define CONDITIONAL_WRITE_BARRIER(object, offset, value, mode) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ MarkingBarrier(object, (object).RawField(offset), value); \
+ } \
+ GenerationalBarrier(object, (object).RawField(offset), value); \
+ } \
} while (false)
-#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
- do { \
- DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
- DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
- if (mode != SKIP_WRITE_BARRIER) { \
- if (mode == UPDATE_WRITE_BARRIER) { \
- MarkingBarrier(object, (object)->RawMaybeWeakField(offset), value); \
- } \
- GenerationalBarrier(object, (object)->RawMaybeWeakField(offset), value); \
- } \
+#define CONDITIONAL_WEAK_WRITE_BARRIER(object, offset, value, mode) \
+ do { \
+ DCHECK_NOT_NULL(GetHeapFromWritableObject(object)); \
+ DCHECK_NE(mode, UPDATE_EPHEMERON_KEY_WRITE_BARRIER); \
+ if (mode != SKIP_WRITE_BARRIER) { \
+ if (mode == UPDATE_WRITE_BARRIER) { \
+ MarkingBarrier(object, (object).RawMaybeWeakField(offset), value); \
+ } \
+ GenerationalBarrier(object, (object).RawMaybeWeakField(offset), value); \
+ } \
} while (false)
#define CONDITIONAL_EPHEMERON_KEY_WRITE_BARRIER(object, offset, value, mode) \
@@ -329,60 +325,24 @@
EphemeronHashTable table = EphemeronHashTable::cast(object); \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
- MarkingBarrier(object, (object)->RawField(offset), value); \
+ MarkingBarrier(object, (object).RawField(offset), value); \
} \
- GenerationalEphemeronKeyBarrier(table, (object)->RawField(offset), \
+ GenerationalEphemeronKeyBarrier(table, (object).RawField(offset), \
value); \
} \
} while (false)
-#define READ_DOUBLE_FIELD(p, offset) ReadDoubleValue(FIELD_ADDR(p, offset))
-
-#define WRITE_DOUBLE_FIELD(p, offset, value) \
- WriteDoubleValue(FIELD_ADDR(p, offset), value)
-
-#define READ_INT_FIELD(p, offset) \
- (*reinterpret_cast<const int*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INT_FIELD(p, offset, value) \
- (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
-
#define ACQUIRE_READ_INT32_FIELD(p, offset) \
static_cast<int32_t>(base::Acquire_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
-#define READ_UINT8_FIELD(p, offset) \
- (*reinterpret_cast<const uint8_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINT8_FIELD(p, offset, value) \
- (*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
-
#define RELAXED_WRITE_INT8_FIELD(p, offset, value) \
base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
-
-#define READ_INT8_FIELD(p, offset) \
- (*reinterpret_cast<const int8_t*>(FIELD_ADDR(p, offset)))
-
#define RELAXED_READ_INT8_FIELD(p, offset) \
static_cast<int8_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
-#define WRITE_INT8_FIELD(p, offset, value) \
- (*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT16_FIELD(p, offset) \
- (*reinterpret_cast<const uint16_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINT16_FIELD(p, offset, value) \
- (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INT16_FIELD(p, offset) \
- (*reinterpret_cast<const int16_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INT16_FIELD(p, offset, value) \
- (*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
-
#define RELAXED_READ_INT16_FIELD(p, offset) \
static_cast<int16_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic16*>(FIELD_ADDR(p, offset))))
@@ -392,31 +352,19 @@
reinterpret_cast<base::Atomic16*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic16>(value));
-#define READ_UINT32_FIELD(p, offset) \
- (*reinterpret_cast<const uint32_t*>(FIELD_ADDR(p, offset)))
-
#define RELAXED_READ_UINT32_FIELD(p, offset) \
static_cast<uint32_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
-#define WRITE_UINT32_FIELD(p, offset, value) \
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
-
#define RELAXED_WRITE_UINT32_FIELD(p, offset, value) \
base::Relaxed_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
-#define READ_INT32_FIELD(p, offset) \
- (*reinterpret_cast<const int32_t*>(FIELD_ADDR(p, offset)))
-
#define RELAXED_READ_INT32_FIELD(p, offset) \
static_cast<int32_t>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic32*>(FIELD_ADDR(p, offset))))
-#define WRITE_INT32_FIELD(p, offset, value) \
- (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
-
#define RELEASE_WRITE_INT32_FIELD(p, offset, value) \
base::Release_Store( \
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
@@ -427,72 +375,20 @@
reinterpret_cast<base::Atomic32*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic32>(value));
-#define READ_FLOAT_FIELD(p, offset) \
- (*reinterpret_cast<const float*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_FLOAT_FIELD(p, offset, value) \
- (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
-
-// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size fields
-// (external pointers, doubles and BigInt data) are only kTaggedSize aligned so
-// we have to use unaligned pointer friendly way of accessing them in order to
-// avoid undefined behavior in C++ code.
-#ifdef V8_COMPRESS_POINTERS
-
-#define READ_INTPTR_FIELD(p, offset) \
- ReadUnalignedValue<intptr_t>(FIELD_ADDR(p, offset))
-
-#define WRITE_INTPTR_FIELD(p, offset, value) \
- WriteUnalignedValue<intptr_t>(FIELD_ADDR(p, offset), value)
-
-#define READ_UINTPTR_FIELD(p, offset) \
- ReadUnalignedValue<uintptr_t>(FIELD_ADDR(p, offset))
-
-#define WRITE_UINTPTR_FIELD(p, offset, value) \
- WriteUnalignedValue<uintptr_t>(FIELD_ADDR(p, offset), value)
-
-#define READ_UINT64_FIELD(p, offset) \
- ReadUnalignedValue<uint64_t>(FIELD_ADDR(p, offset))
-
-#define WRITE_UINT64_FIELD(p, offset, value) \
- WriteUnalignedValue<uint64_t>(FIELD_ADDR(p, offset), value)
-
-#else // V8_COMPRESS_POINTERS
-
-#define READ_INTPTR_FIELD(p, offset) \
- (*reinterpret_cast<const intptr_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INTPTR_FIELD(p, offset, value) \
- (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINTPTR_FIELD(p, offset) \
- (*reinterpret_cast<const uintptr_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINTPTR_FIELD(p, offset, value) \
- (*reinterpret_cast<uintptr_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_UINT64_FIELD(p, offset) \
- (*reinterpret_cast<const uint64_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_UINT64_FIELD(p, offset, value) \
- (*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
-
-#endif // V8_COMPRESS_POINTERS
-
-#define READ_BYTE_FIELD(p, offset) \
- (*reinterpret_cast<const byte*>(FIELD_ADDR(p, offset)))
-
#define RELAXED_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::Relaxed_Load( \
reinterpret_cast<const base::Atomic8*>(FIELD_ADDR(p, offset))))
-#define WRITE_BYTE_FIELD(p, offset, value) \
- (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
-
#define RELAXED_WRITE_BYTE_FIELD(p, offset, value) \
base::Relaxed_Store(reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
+#ifdef OBJECT_PRINT
+#define DECL_PRINTER(Name) void Name##Print(std::ostream& os); // NOLINT
+#else
+#define DECL_PRINTER(Name)
+#endif
+
#ifdef VERIFY_HEAP
#define DECL_VERIFIER(Name) void Name##Verify(Isolate* isolate);
#define EXPORT_DECL_VERIFIER(Name) \
@@ -515,3 +411,15 @@
void DeoptimizationData::Set##name(int i, type value) { \
set(IndexForEntry(i) + k##name##Offset, value); \
}
+
+#define TQ_OBJECT_CONSTRUCTORS(Type) \
+ public: \
+ constexpr Type() = default; \
+ \
+ protected: \
+ inline explicit Type(Address ptr); \
+ friend class TorqueGenerated##Type<Type, Super>;
+
+#define TQ_OBJECT_CONSTRUCTORS_IMPL(Type) \
+ inline Type::Type(Address ptr) \
+ : TorqueGenerated##Type<Type, Type::Super>(ptr) {}
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 7c828495ff..8626165647 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -2,24 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
-#define V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
+#ifndef V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_INL_H_
+#define V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_INL_H_
-#include "src/objects-body-descriptors.h"
+#include "src/objects/objects-body-descriptors.h"
#include <algorithm>
-#include "src/feedback-vector.h"
+#include "src/codegen/reloc-info.h"
#include "src/objects/cell.h"
#include "src/objects/data-handler.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-weak-refs.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table.h"
-#include "src/reloc-info.h"
-#include "src/transitions.h"
+#include "src/objects/transitions.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
@@ -27,13 +27,13 @@ namespace internal {
template <int start_offset>
int FlexibleBodyDescriptor<start_offset>::SizeOf(Map map, HeapObject object) {
- return object->SizeFromMap(map);
+ return object.SizeFromMap(map);
}
template <int start_offset>
int FlexibleWeakBodyDescriptor<start_offset>::SizeOf(Map map,
HeapObject object) {
- return object->SizeFromMap(map);
+ return object.SizeFromMap(map);
}
bool BodyDescriptorBase::IsValidJSObjectSlotImpl(Map map, HeapObject obj,
@@ -41,7 +41,7 @@ bool BodyDescriptorBase::IsValidJSObjectSlotImpl(Map map, HeapObject obj,
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
int embedder_fields_offset = JSObject::GetEmbedderFieldsStartOffset(map);
- int inobject_fields_offset = map->GetInObjectPropertyOffset(0);
+ int inobject_fields_offset = map.GetInObjectPropertyOffset(0);
// |embedder_fields_offset| may be greater than |inobject_fields_offset| if
// the object does not have embedder fields but the check handles this
// case properly.
@@ -57,7 +57,7 @@ bool BodyDescriptorBase::IsValidJSObjectSlotImpl(Map map, HeapObject obj,
// embedder field area as tagged slots.
STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
#endif
- if (!FLAG_unbox_double_fields || map->HasFastPointerLayout()) {
+ if (!FLAG_unbox_double_fields || map.HasFastPointerLayout()) {
return true;
} else {
DCHECK(FLAG_unbox_double_fields);
@@ -77,7 +77,7 @@ void BodyDescriptorBase::IterateJSObjectBodyImpl(Map map, HeapObject obj,
#ifdef V8_COMPRESS_POINTERS
STATIC_ASSERT(kEmbedderDataSlotSize == 2 * kTaggedSize);
int header_size = JSObject::GetHeaderSize(map);
- int inobject_fields_offset = map->GetInObjectPropertyOffset(0);
+ int inobject_fields_offset = map.GetInObjectPropertyOffset(0);
// We are always requested to process header and embedder fields.
DCHECK_LE(inobject_fields_offset, end_offset);
// Embedder fields are located between header and inobject properties.
@@ -98,7 +98,7 @@ void BodyDescriptorBase::IterateJSObjectBodyImpl(Map map, HeapObject obj,
// embedder field area as tagged slots.
STATIC_ASSERT(kEmbedderDataSlotSize == kTaggedSize);
#endif
- if (!FLAG_unbox_double_fields || map->HasFastPointerLayout()) {
+ if (!FLAG_unbox_double_fields || map.HasFastPointerLayout()) {
IteratePointers(obj, start_offset, end_offset, v);
} else {
DCHECK(FLAG_unbox_double_fields);
@@ -183,7 +183,7 @@ class JSObject::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
@@ -202,7 +202,7 @@ class JSObject::FastBodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
@@ -221,7 +221,7 @@ class WeakCell::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
@@ -241,15 +241,18 @@ class JSWeakRef::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
class SharedFunctionInfo::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- return FixedBodyDescriptor<kStartOfPointerFieldsOffset,
- kEndOfTaggedFieldsOffset,
+ static_assert(kEndOfWeakFieldsOffset == kStartOfStrongFieldsOffset,
+ "Leverage that strong fields directly follow weak fields"
+ "to call FixedBodyDescriptor<...>::IsValidSlot below");
+ return FixedBodyDescriptor<kStartOfWeakFieldsOffset,
+ kEndOfStrongFieldsOffset,
kAlignedSize>::IsValidSlot(map, obj, offset);
}
@@ -258,11 +261,11 @@ class SharedFunctionInfo::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {
IterateCustomWeakPointer(obj, kFunctionDataOffset, v);
IteratePointers(obj, SharedFunctionInfo::kStartOfStrongFieldsOffset,
- SharedFunctionInfo::kEndOfTaggedFieldsOffset, v);
+ SharedFunctionInfo::kEndOfStrongFieldsOffset, v);
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
@@ -281,7 +284,7 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
return true;
}
// check for weak_next offset
- if (map->instance_size() == AllocationSite::kSizeWithWeakNext &&
+ if (map.instance_size() == AllocationSite::kSizeWithWeakNext &&
offset == AllocationSite::kWeakNextOffset) {
return true;
}
@@ -303,7 +306,7 @@ class AllocationSite::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
@@ -324,14 +327,16 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
-class JSArrayBufferView::BodyDescriptor final : public BodyDescriptorBase {
+class JSTypedArray::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
if (offset < kEndOfTaggedFieldsOffset) return true;
+ // TODO(v8:4153): Remove this.
+ if (offset == kBasePointerOffset) return true;
if (offset < kHeaderSize) return false;
return IsValidJSObjectSlotImpl(map, obj, offset);
}
@@ -339,13 +344,36 @@ class JSArrayBufferView::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- // JSArrayBufferView contains raw data that the GC does not know about.
+ // JSTypedArray contains raw data that the GC does not know about.
+ IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
+ // TODO(v8:4153): Remove this.
+ IteratePointer(obj, kBasePointerOffset, v);
+ IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) {
+ return map.instance_size();
+ }
+};
+
+class JSDataView::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ if (offset < kEndOfTaggedFieldsOffset) return true;
+ if (offset < kHeaderSize) return false;
+ return IsValidJSObjectSlotImpl(map, obj, offset);
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ // JSDataView contains raw data that the GC does not know about.
IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
@@ -357,7 +385,7 @@ class V8_EXPORT_PRIVATE SmallOrderedHashTable<Derived>::BodyDescriptor final
Derived table = Derived::cast(obj);
// Only data table part contains tagged values.
return (offset >= DataTableStartOffset()) &&
- (offset < table->GetBucketsStartOffset());
+ (offset < table.GetBucketsStartOffset());
}
template <typename ObjectVisitor>
@@ -365,13 +393,13 @@ class V8_EXPORT_PRIVATE SmallOrderedHashTable<Derived>::BodyDescriptor final
ObjectVisitor* v) {
Derived table = Derived::cast(obj);
int start_offset = DataTableStartOffset();
- int end_offset = table->GetBucketsStartOffset();
+ int end_offset = table.GetBucketsStartOffset();
IteratePointers(obj, start_offset, end_offset, v);
}
static inline int SizeOf(Map map, HeapObject obj) {
Derived table = Derived::cast(obj);
- return table->SizeFor(table->Capacity());
+ return table.SizeFor(table.Capacity());
}
};
@@ -384,7 +412,7 @@ class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {}
static inline int SizeOf(Map map, HeapObject obj) {
- return ByteArray::SizeFor(ByteArray::cast(obj)->synchronized_length());
+ return ByteArray::SizeFor(ByteArray::cast(obj).synchronized_length());
}
};
@@ -405,7 +433,7 @@ class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject obj) {
return BytecodeArray::SizeFor(
- BytecodeArray::cast(obj)->synchronized_length());
+ BytecodeArray::cast(obj).synchronized_length());
}
};
@@ -418,7 +446,7 @@ class BigInt::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {}
static inline int SizeOf(Map map, HeapObject obj) {
- return BigInt::SizeFor(BigInt::cast(obj)->synchronized_length());
+ return BigInt::SizeFor(BigInt::cast(obj).synchronized_length());
}
};
@@ -432,24 +460,7 @@ class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject obj) {
return FixedDoubleArray::SizeFor(
- FixedDoubleArray::cast(obj)->synchronized_length());
- }
-};
-
-class FixedTypedArrayBase::BodyDescriptor final : public BodyDescriptorBase {
- public:
- static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- return offset == kBasePointerOffset;
- }
-
- template <typename ObjectVisitor>
- static inline void IterateBody(Map map, HeapObject obj, int object_size,
- ObjectVisitor* v) {
- IteratePointer(obj, kBasePointerOffset, v);
- }
-
- static inline int SizeOf(Map map, HeapObject object) {
- return FixedTypedArrayBase::cast(object)->size();
+ FixedDoubleArray::cast(obj).synchronized_length());
}
};
@@ -463,7 +474,7 @@ class FeedbackMetadata::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject obj) {
return FeedbackMetadata::SizeFor(
- FeedbackMetadata::cast(obj)->synchronized_slot_count());
+ FeedbackMetadata::cast(obj).synchronized_slot_count());
}
};
@@ -471,7 +482,7 @@ class FeedbackVector::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return offset == kSharedFunctionInfoOffset ||
- offset == kOptimizedCodeOffset ||
+ offset == kOptimizedCodeWeakOrSmiOffset ||
offset == kClosureFeedbackCellArrayOffset ||
offset >= kFeedbackSlotsOffset;
}
@@ -480,34 +491,34 @@ class FeedbackVector::BodyDescriptor final : public BodyDescriptorBase {
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointer(obj, kSharedFunctionInfoOffset, v);
- IterateMaybeWeakPointer(obj, kOptimizedCodeOffset, v);
+ IterateMaybeWeakPointer(obj, kOptimizedCodeWeakOrSmiOffset, v);
IteratePointer(obj, kClosureFeedbackCellArrayOffset, v);
IterateMaybeWeakPointers(obj, kFeedbackSlotsOffset, object_size, v);
}
static inline int SizeOf(Map map, HeapObject obj) {
- return FeedbackVector::SizeFor(FeedbackVector::cast(obj)->length());
+ return FeedbackVector::SizeFor(FeedbackVector::cast(obj).length());
}
};
class PreparseData::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- return offset >= PreparseData::cast(obj)->inner_start_offset();
+ return offset >= PreparseData::cast(obj).inner_start_offset();
}
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
PreparseData data = PreparseData::cast(obj);
- int start_offset = data->inner_start_offset();
- int end_offset = start_offset + data->children_length() * kTaggedSize;
+ int start_offset = data.inner_start_offset();
+ int end_offset = start_offset + data.children_length() * kTaggedSize;
IteratePointers(obj, start_offset, end_offset, v);
}
static inline int SizeOf(Map map, HeapObject obj) {
PreparseData data = PreparseData::cast(obj);
- return PreparseData::SizeFor(data->data_length(), data->children_length());
+ return PreparseData::SizeFor(data.data_length(), data.children_length());
}
};
@@ -526,13 +537,13 @@ class PrototypeInfo::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject obj) {
- return obj->SizeFromMap(map);
+ return obj.SizeFromMap(map);
}
};
class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
public:
- STATIC_ASSERT(kTableOffset + kTaggedSize == kSize);
+ STATIC_ASSERT(kTableOffset + kTaggedSize == kSizeOfAllWeakCollections);
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
return IsValidJSObjectSlotImpl(map, obj, offset);
@@ -545,7 +556,7 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
@@ -604,7 +615,8 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
static constexpr int kRelocModeMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
@@ -627,7 +639,7 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return Code::unchecked_cast(object)->CodeSize();
+ return Code::unchecked_cast(object).CodeSize();
}
};
@@ -641,7 +653,7 @@ class SeqOneByteString::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject obj) {
SeqOneByteString string = SeqOneByteString::cast(obj);
- return string->SizeFor(string->synchronized_length());
+ return string.SizeFor(string.synchronized_length());
}
};
@@ -655,7 +667,7 @@ class SeqTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject obj) {
SeqTwoByteString string = SeqTwoByteString::cast(obj);
- return string->SizeFor(string->synchronized_length());
+ return string.SizeFor(string.synchronized_length());
}
};
@@ -685,25 +697,27 @@ class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return map->instance_size();
+ return map.instance_size();
}
};
class Map::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
- return offset >= Map::kPointerFieldsBeginOffset &&
- offset < Map::kPointerFieldsEndOffset;
+ static_assert(
+ Map::kEndOfStrongFieldsOffset == Map::kStartOfWeakFieldsOffset,
+ "Leverage that weak fields directly follow strong fields for the "
+ "check below");
+ return offset >= Map::kStartOfStrongFieldsOffset &&
+ offset < Map::kEndOfWeakFieldsOffset;
}
template <typename ObjectVisitor>
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
- IteratePointers(obj, Map::kPointerFieldsBeginOffset,
- Map::kTransitionsOrPrototypeInfoOffset, v);
+ IteratePointers(obj, Map::kStartOfStrongFieldsOffset,
+ Map::kEndOfStrongFieldsOffset, v);
IterateMaybeWeakPointer(obj, kTransitionsOrPrototypeInfoOffset, v);
- IteratePointers(obj, Map::kTransitionsOrPrototypeInfoOffset + kTaggedSize,
- Map::kPointerFieldsEndOffset, v);
}
static inline int SizeOf(Map map, HeapObject obj) { return Map::kSize; }
@@ -727,7 +741,7 @@ class DataHandler::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return object->SizeFromMap(map);
+ return object.SizeFromMap(map);
}
};
@@ -811,7 +825,7 @@ class EmbedderDataArray::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return object->SizeFromMap(map);
+ return object.SizeFromMap(map);
}
};
@@ -1013,13 +1027,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
case BIGINT_TYPE:
return ReturnType();
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- return Op::template apply<FixedTypedArrayBase::BodyDescriptor>(p1, p2, p3, \
- p4);
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
case SHARED_FUNCTION_INFO_TYPE: {
return Op::template apply<SharedFunctionInfo::BodyDescriptor>(p1, p2, p3,
p4);
@@ -1033,6 +1040,9 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
if (type == PROTOTYPE_INFO_TYPE) {
return Op::template apply<PrototypeInfo::BodyDescriptor>(p1, p2, p3,
p4);
+ } else if (type == WASM_CAPI_FUNCTION_DATA_TYPE) {
+ return Op::template apply<WasmCapiFunctionData::BodyDescriptor>(p1, p2,
+ p3, p4);
} else {
return Op::template apply<StructBodyDescriptor>(p1, p2, p3, p4);
}
@@ -1047,21 +1057,18 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
}
}
-
template <typename ObjectVisitor>
void HeapObject::IterateFast(ObjectVisitor* v) {
BodyDescriptorBase::IteratePointer(*this, kMapOffset, v);
IterateBodyFast(v);
}
-
template <typename ObjectVisitor>
void HeapObject::IterateBodyFast(ObjectVisitor* v) {
Map m = map();
IterateBodyFast(m, SizeFromMap(m), v);
}
-
struct CallIterateBody {
template <typename BodyDescriptor, typename ObjectVisitor>
static void apply(Map map, HeapObject obj, int object_size,
@@ -1072,7 +1079,7 @@ struct CallIterateBody {
template <typename ObjectVisitor>
void HeapObject::IterateBodyFast(Map map, int object_size, ObjectVisitor* v) {
- BodyDescriptorApply<CallIterateBody, void>(map->instance_type(), map, *this,
+ BodyDescriptorApply<CallIterateBody, void>(map.instance_type(), map, *this,
object_size, v);
}
@@ -1099,11 +1106,11 @@ class EphemeronHashTable::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return object->SizeFromMap(map);
+ return object.SizeFromMap(map);
}
};
} // namespace internal
} // namespace v8
-#endif // V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
+#endif // V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_INL_H_
diff --git a/deps/v8/src/objects-body-descriptors.h b/deps/v8/src/objects/objects-body-descriptors.h
index 4b0377a3fd..728708f436 100644
--- a/deps/v8/src/objects-body-descriptors.h
+++ b/deps/v8/src/objects/objects-body-descriptors.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_BODY_DESCRIPTORS_H_
-#define V8_OBJECTS_BODY_DESCRIPTORS_H_
+#ifndef V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_H_
+#define V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_H_
-#include "src/objects.h"
#include "src/objects/map.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -74,7 +74,6 @@ class BodyDescriptorBase {
ObjectVisitor* v);
};
-
// This class describes a body of an object of a fixed size
// in which all pointer fields are located in the [start_offset, end_offset)
// interval.
@@ -103,7 +102,6 @@ class FixedBodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
-
// This class describes a body of an object of a variable size
// in which all pointer fields are located in the [start_offset, object_size)
// interval.
@@ -125,7 +123,7 @@ class FlexibleBodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject object);
};
-typedef FlexibleBodyDescriptor<HeapObject::kHeaderSize> StructBodyDescriptor;
+using StructBodyDescriptor = FlexibleBodyDescriptor<HeapObject::kHeaderSize>;
template <int start_offset>
class FlexibleWeakBodyDescriptor final : public BodyDescriptorBase {
@@ -185,4 +183,4 @@ class SubclassBodyDescriptor final : public BodyDescriptorBase {
} // namespace internal
} // namespace v8
-#endif // V8_OBJECTS_BODY_DESCRIPTORS_H_
+#endif // V8_OBJECTS_OBJECTS_BODY_DESCRIPTORS_H_
diff --git a/deps/v8/src/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 97c57f8058..90824c68ef 100644
--- a/deps/v8/src/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_DEFINITIONS_H_
-#define V8_OBJECTS_DEFINITIONS_H_
+#ifndef V8_OBJECTS_OBJECTS_DEFINITIONS_H_
+#define V8_OBJECTS_OBJECTS_DEFINITIONS_H_
-#include "src/heap-symbols.h"
+#include "src/init/heap-symbols.h"
namespace v8 {
@@ -71,18 +71,6 @@ namespace internal {
V(BYTECODE_ARRAY_TYPE) \
V(FREE_SPACE_TYPE) \
\
- V(FIXED_INT8_ARRAY_TYPE) \
- V(FIXED_UINT8_ARRAY_TYPE) \
- V(FIXED_INT16_ARRAY_TYPE) \
- V(FIXED_UINT16_ARRAY_TYPE) \
- V(FIXED_INT32_ARRAY_TYPE) \
- V(FIXED_UINT32_ARRAY_TYPE) \
- V(FIXED_FLOAT32_ARRAY_TYPE) \
- V(FIXED_FLOAT64_ARRAY_TYPE) \
- V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
- V(FIXED_BIGINT64_ARRAY_TYPE) \
- V(FIXED_BIGUINT64_ARRAY_TYPE) \
- \
V(FIXED_DOUBLE_ARRAY_TYPE) \
V(FEEDBACK_METADATA_TYPE) \
V(FILLER_TYPE) \
@@ -108,14 +96,18 @@ namespace internal {
V(PROMISE_REACTION_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
V(SCRIPT_TYPE) \
+ V(SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE) \
V(STACK_FRAME_INFO_TYPE) \
V(STACK_TRACE_FRAME_TYPE) \
+ V(TEMPLATE_OBJECT_DESCRIPTION_TYPE) \
V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
V(ARRAY_BOILERPLATE_DESCRIPTION_TYPE) \
+ V(WASM_CAPI_FUNCTION_DATA_TYPE) \
V(WASM_DEBUG_INFO_TYPE) \
V(WASM_EXCEPTION_TAG_TYPE) \
V(WASM_EXPORTED_FUNCTION_DATA_TYPE) \
+ V(WASM_JS_FUNCTION_DATA_TYPE) \
\
V(CALLABLE_TASK_TYPE) \
V(CALLBACK_TASK_TYPE) \
@@ -297,51 +289,58 @@ namespace internal {
// Note that for subtle reasons related to the ordering or numerical values of
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
-#define STRUCT_LIST_GENERATOR(V, _) \
- V(_, ACCESS_CHECK_INFO_TYPE, AccessCheckInfo, access_check_info) \
- V(_, ACCESSOR_INFO_TYPE, AccessorInfo, accessor_info) \
- V(_, ACCESSOR_PAIR_TYPE, AccessorPair, accessor_pair) \
- V(_, ALIASED_ARGUMENTS_ENTRY_TYPE, AliasedArgumentsEntry, \
- aliased_arguments_entry) \
- V(_, ALLOCATION_MEMENTO_TYPE, AllocationMemento, allocation_memento) \
- V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
- V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
- async_generator_request) \
- V(_, CLASS_POSITIONS_TYPE, ClassPositions, class_positions) \
- V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
- V(_, ENUM_CACHE_TYPE, EnumCache, enum_cache) \
- V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
- function_template_info) \
- V(_, FUNCTION_TEMPLATE_RARE_DATA_TYPE, FunctionTemplateRareData, \
- function_template_rare_data) \
- V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
- V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
- V(_, MODULE_INFO_ENTRY_TYPE, ModuleInfoEntry, module_info_entry) \
- V(_, MODULE_TYPE, Module, module) \
- V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
- V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
- V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
- V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \
- V(_, SCRIPT_TYPE, Script, script) \
- V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
- V(_, STACK_TRACE_FRAME_TYPE, StackTraceFrame, stack_trace_frame) \
- V(_, TUPLE2_TYPE, Tuple2, tuple2) \
- V(_, TUPLE3_TYPE, Tuple3, tuple3) \
- V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
- array_boilerplate_description) \
- V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \
- V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
- V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
- wasm_exported_function_data) \
- V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
- V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
- V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \
- promise_fulfill_reaction_job_task) \
- V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \
- promise_reject_reaction_job_task) \
- V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
- promise_resolve_thenable_job_task) \
- V(_, FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE, \
+#define STRUCT_LIST_GENERATOR(V, _) \
+ V(_, ACCESS_CHECK_INFO_TYPE, AccessCheckInfo, access_check_info) \
+ V(_, ACCESSOR_INFO_TYPE, AccessorInfo, accessor_info) \
+ V(_, ACCESSOR_PAIR_TYPE, AccessorPair, accessor_pair) \
+ V(_, ALIASED_ARGUMENTS_ENTRY_TYPE, AliasedArgumentsEntry, \
+ aliased_arguments_entry) \
+ V(_, ALLOCATION_MEMENTO_TYPE, AllocationMemento, allocation_memento) \
+ V(_, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
+ V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
+ async_generator_request) \
+ V(_, CLASS_POSITIONS_TYPE, ClassPositions, class_positions) \
+ V(_, DEBUG_INFO_TYPE, DebugInfo, debug_info) \
+ V(_, ENUM_CACHE_TYPE, EnumCache, enum_cache) \
+ V(_, FUNCTION_TEMPLATE_INFO_TYPE, FunctionTemplateInfo, \
+ function_template_info) \
+ V(_, FUNCTION_TEMPLATE_RARE_DATA_TYPE, FunctionTemplateRareData, \
+ function_template_rare_data) \
+ V(_, INTERCEPTOR_INFO_TYPE, InterceptorInfo, interceptor_info) \
+ V(_, INTERPRETER_DATA_TYPE, InterpreterData, interpreter_data) \
+ V(_, MODULE_INFO_ENTRY_TYPE, ModuleInfoEntry, module_info_entry) \
+ V(_, MODULE_TYPE, Module, module) \
+ V(_, OBJECT_TEMPLATE_INFO_TYPE, ObjectTemplateInfo, object_template_info) \
+ V(_, PROMISE_CAPABILITY_TYPE, PromiseCapability, promise_capability) \
+ V(_, PROMISE_REACTION_TYPE, PromiseReaction, promise_reaction) \
+ V(_, PROTOTYPE_INFO_TYPE, PrototypeInfo, prototype_info) \
+ V(_, SCRIPT_TYPE, Script, script) \
+ V(_, SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE, \
+ SourcePositionTableWithFrameCache, source_position_table_with_frame_cache) \
+ V(_, STACK_FRAME_INFO_TYPE, StackFrameInfo, stack_frame_info) \
+ V(_, STACK_TRACE_FRAME_TYPE, StackTraceFrame, stack_trace_frame) \
+ V(_, TEMPLATE_OBJECT_DESCRIPTION_TYPE, TemplateObjectDescription, \
+ template_object_description) \
+ V(_, TUPLE2_TYPE, Tuple2, tuple2) \
+ V(_, TUPLE3_TYPE, Tuple3, tuple3) \
+ V(_, ARRAY_BOILERPLATE_DESCRIPTION_TYPE, ArrayBoilerplateDescription, \
+ array_boilerplate_description) \
+ V(_, WASM_CAPI_FUNCTION_DATA_TYPE, WasmCapiFunctionData, \
+ wasm_capi_function_data) \
+ V(_, WASM_DEBUG_INFO_TYPE, WasmDebugInfo, wasm_debug_info) \
+ V(_, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
+ V(_, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
+ wasm_exported_function_data) \
+ V(_, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, wasm_js_function_data) \
+ V(_, CALLABLE_TASK_TYPE, CallableTask, callable_task) \
+ V(_, CALLBACK_TASK_TYPE, CallbackTask, callback_task) \
+ V(_, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE, PromiseFulfillReactionJobTask, \
+ promise_fulfill_reaction_job_task) \
+ V(_, PROMISE_REJECT_REACTION_JOB_TASK_TYPE, PromiseRejectReactionJobTask, \
+ promise_reject_reaction_job_task) \
+ V(_, PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, PromiseResolveThenableJobTask, \
+ promise_resolve_thenable_job_task) \
+ V(_, FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE, \
FinalizationGroupCleanupJobTask, finalization_group_cleanup_job_task)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
@@ -398,4 +397,4 @@ namespace internal {
} // namespace internal
} // namespace v8
-#endif // V8_OBJECTS_DEFINITIONS_H_
+#endif // V8_OBJECTS_OBJECTS_DEFINITIONS_H_
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects/objects-inl.h
index 0112f74156..ce92d64f2f 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects/objects-inl.h
@@ -9,36 +9,38 @@
// code. gcc is not happy when attempting to inline too deep.
//
-#ifndef V8_OBJECTS_INL_H_
-#define V8_OBJECTS_INL_H_
+#ifndef V8_OBJECTS_OBJECTS_INL_H_
+#define V8_OBJECTS_OBJECTS_INL_H_
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/base/bits.h"
-#include "src/base/tsan.h"
#include "src/builtins/builtins.h"
-#include "src/conversions.h"
-#include "src/double.h"
-#include "src/handles-inl.h"
+#include "src/common/v8memory.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/keys.h"
-#include "src/lookup-inl.h" // TODO(jkummerow): Drop.
+#include "src/numbers/conversions.h"
+#include "src/numbers/double.h"
#include "src/objects/bigint.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-proxy-inl.h" // TODO(jkummerow): Drop.
+#include "src/objects/keys.h"
#include "src/objects/literal-objects.h"
+#include "src/objects/lookup-inl.h" // TODO(jkummerow): Drop.
#include "src/objects/oddball.h"
+#include "src/objects/property-details.h"
+#include "src/objects/property.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/smi-inl.h"
+#include "src/objects/tagged-impl-inl.h"
#include "src/objects/templates.h"
-#include "src/property-details.h"
-#include "src/property.h"
-#include "src/v8memory.h"
+#include "src/sanitizer/tsan.h"
+#include "torque-generated/class-definitions-tq-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -46,7 +48,7 @@
namespace v8 {
namespace internal {
-PropertyDetails::PropertyDetails(Smi smi) { value_ = smi->value(); }
+PropertyDetails::PropertyDetails(Smi smi) { value_ = smi.value(); }
Smi PropertyDetails::AsSmi() const {
// Ensure the upper 2 bits have the same value by sign extending it. This is
@@ -55,7 +57,6 @@ Smi PropertyDetails::AsSmi() const {
return Smi::FromInt(value >> 1);
}
-
int PropertyDetails::field_width_in_words() const {
DCHECK_EQ(location(), kField);
if (!FLAG_unbox_double_fields) return 1;
@@ -72,7 +73,7 @@ bool HeapObject::IsJSSloppyArgumentsObject() const {
}
bool HeapObject::IsJSGeneratorObject() const {
- return map()->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
+ return map().instance_type() == JS_GENERATOR_OBJECT_TYPE ||
IsJSAsyncFunctionObject() || IsJSAsyncGeneratorObject();
}
@@ -82,29 +83,29 @@ bool HeapObject::IsDataHandler() const {
bool HeapObject::IsClassBoilerplate() const { return IsFixedArrayExact(); }
-#define IS_TYPE_FUNCTION_DEF(type_) \
- bool Object::Is##type_() const { \
- return IsHeapObject() && HeapObject::cast(*this)->Is##type_(); \
+#define IS_TYPE_FUNCTION_DEF(type_) \
+ bool Object::Is##type_() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##type_(); \
}
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
-#define IS_TYPE_FUNCTION_DEF(Type, Value) \
- bool Object::Is##Type(Isolate* isolate) const { \
- return Is##Type(ReadOnlyRoots(isolate->heap())); \
- } \
- bool Object::Is##Type(ReadOnlyRoots roots) const { \
- return *this == roots.Value(); \
- } \
- bool Object::Is##Type() const { \
- return IsHeapObject() && HeapObject::cast(*this)->Is##Type(); \
- } \
- bool HeapObject::Is##Type(Isolate* isolate) const { \
- return Object::Is##Type(isolate); \
- } \
- bool HeapObject::Is##Type(ReadOnlyRoots roots) const { \
- return Object::Is##Type(roots); \
- } \
+#define IS_TYPE_FUNCTION_DEF(Type, Value) \
+ bool Object::Is##Type(Isolate* isolate) const { \
+ return Is##Type(ReadOnlyRoots(isolate->heap())); \
+ } \
+ bool Object::Is##Type(ReadOnlyRoots roots) const { \
+ return *this == roots.Value(); \
+ } \
+ bool Object::Is##Type() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Type(); \
+ } \
+ bool HeapObject::Is##Type(Isolate* isolate) const { \
+ return Object::Is##Type(isolate); \
+ } \
+ bool HeapObject::Is##Type(ReadOnlyRoots roots) const { \
+ return Object::Is##Type(roots); \
+ } \
bool HeapObject::Is##Type() const { return Is##Type(GetReadOnlyRoots()); }
ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
@@ -118,7 +119,13 @@ bool Object::IsNullOrUndefined(ReadOnlyRoots roots) const {
}
bool Object::IsNullOrUndefined() const {
- return IsHeapObject() && HeapObject::cast(*this)->IsNullOrUndefined();
+ return IsHeapObject() && HeapObject::cast(*this).IsNullOrUndefined();
+}
+
+bool Object::IsZero() const { return *this == Smi::zero(); }
+
+bool Object::IsNoSharedNameSentinel() const {
+ return *this == SharedFunctionInfo::kNoSharedNameSentinel;
}
bool HeapObject::IsNullOrUndefined(Isolate* isolate) const {
@@ -139,12 +146,12 @@ bool HeapObject::IsUniqueName() const {
bool HeapObject::IsFunction() const {
STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- return map()->instance_type() >= FIRST_FUNCTION_TYPE;
+ return map().instance_type() >= FIRST_FUNCTION_TYPE;
}
-bool HeapObject::IsCallable() const { return map()->is_callable(); }
+bool HeapObject::IsCallable() const { return map().is_callable(); }
-bool HeapObject::IsConstructor() const { return map()->is_constructor(); }
+bool HeapObject::IsConstructor() const { return map().is_constructor(); }
bool HeapObject::IsModuleInfo() const {
return map() == GetReadOnlyRoots().module_info_map();
@@ -177,13 +184,13 @@ bool HeapObject::IsSeqString() const {
bool HeapObject::IsSeqOneByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(*this)).IsSequential() &&
- String::cast(*this)->IsOneByteRepresentation();
+ String::cast(*this).IsOneByteRepresentation();
}
bool HeapObject::IsSeqTwoByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(*this)).IsSequential() &&
- String::cast(*this)->IsTwoByteRepresentation();
+ String::cast(*this).IsTwoByteRepresentation();
}
bool HeapObject::IsExternalString() const {
@@ -194,13 +201,13 @@ bool HeapObject::IsExternalString() const {
bool HeapObject::IsExternalOneByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(*this)).IsExternal() &&
- String::cast(*this)->IsOneByteRepresentation();
+ String::cast(*this).IsOneByteRepresentation();
}
bool HeapObject::IsExternalTwoByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(*this)).IsExternal() &&
- String::cast(*this)->IsTwoByteRepresentation();
+ String::cast(*this).IsTwoByteRepresentation();
}
bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
@@ -208,7 +215,7 @@ bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
bool Object::IsNumeric() const { return IsNumber() || IsBigInt(); }
bool HeapObject::IsFiller() const {
- InstanceType instance_type = map()->instance_type();
+ InstanceType instance_type = map().instance_type();
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
@@ -241,7 +248,7 @@ bool HeapObject::IsDeoptimizationData() const {
// a deoptimization data array. Since this is used for asserts we can
// check that the length is zero or else the fixed size plus a multiple of
// the entry size.
- int length = FixedArray::cast(*this)->length();
+ int length = FixedArray::cast(*this).length();
if (length == 0) return true;
length -= DeoptimizationData::kFirstDeoptEntryIndex;
@@ -259,7 +266,7 @@ bool HeapObject::IsTemplateList() const {
if (!IsFixedArrayExact()) return false;
// There's actually no way to see the difference between a fixed array and
// a template list.
- if (FixedArray::cast(*this)->length() < 1) return false;
+ if (FixedArray::cast(*this).length() < 1) return false;
return true;
}
@@ -275,27 +282,27 @@ bool HeapObject::IsAbstractCode() const {
}
bool HeapObject::IsStringWrapper() const {
- return IsJSValue() && JSValue::cast(*this)->value()->IsString();
+ return IsJSValue() && JSValue::cast(*this).value().IsString();
}
bool HeapObject::IsBooleanWrapper() const {
- return IsJSValue() && JSValue::cast(*this)->value()->IsBoolean();
+ return IsJSValue() && JSValue::cast(*this).value().IsBoolean();
}
bool HeapObject::IsScriptWrapper() const {
- return IsJSValue() && JSValue::cast(*this)->value()->IsScript();
+ return IsJSValue() && JSValue::cast(*this).value().IsScript();
}
bool HeapObject::IsNumberWrapper() const {
- return IsJSValue() && JSValue::cast(*this)->value()->IsNumber();
+ return IsJSValue() && JSValue::cast(*this).value().IsNumber();
}
bool HeapObject::IsBigIntWrapper() const {
- return IsJSValue() && JSValue::cast(*this)->value()->IsBigInt();
+ return IsJSValue() && JSValue::cast(*this).value().IsBigInt();
}
bool HeapObject::IsSymbolWrapper() const {
- return IsJSValue() && JSValue::cast(*this)->value()->IsSymbol();
+ return IsJSValue() && JSValue::cast(*this).value().IsSymbol();
}
bool HeapObject::IsJSArrayBufferView() const {
@@ -320,7 +327,7 @@ bool Object::IsSmallOrderedHashTable() const {
}
bool Object::IsPrimitive() const {
- return IsSmi() || HeapObject::cast(*this)->map()->IsPrimitiveMap();
+ return IsSmi() || HeapObject::cast(*this).map().IsPrimitiveMap();
}
// static
@@ -332,19 +339,19 @@ Maybe<bool> Object::IsArray(Handle<Object> object) {
return JSProxy::IsArray(Handle<JSProxy>::cast(object));
}
-bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); }
+bool HeapObject::IsUndetectable() const { return map().is_undetectable(); }
bool HeapObject::IsAccessCheckNeeded() const {
if (IsJSGlobalProxy()) {
const JSGlobalProxy proxy = JSGlobalProxy::cast(*this);
- JSGlobalObject global = proxy->GetIsolate()->context()->global_object();
- return proxy->IsDetachedFrom(global);
+ JSGlobalObject global = proxy.GetIsolate()->context().global_object();
+ return proxy.IsDetachedFrom(global);
}
- return map()->is_access_check_needed();
+ return map().is_access_check_needed();
}
bool HeapObject::IsStruct() const {
- switch (map()->instance_type()) {
+ switch (map().instance_type()) {
#define MAKE_STRUCT_CASE(TYPE, Name, name) \
case TYPE: \
return true;
@@ -367,18 +374,18 @@ bool HeapObject::IsStruct() const {
}
}
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() const { \
- return IsHeapObject() && HeapObject::cast(*this)->Is##Name(); \
- } \
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() const { \
+ return IsHeapObject() && HeapObject::cast(*this).Is##Name(); \
+ } \
TYPE_CHECKER(Name)
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
double Object::Number() const {
DCHECK(IsNumber());
- return IsSmi() ? static_cast<double>(Smi(this->ptr())->value())
- : HeapNumber::unchecked_cast(*this)->value();
+ return IsSmi() ? static_cast<double>(Smi(this->ptr()).value())
+ : HeapNumber::unchecked_cast(*this).value();
}
// static
@@ -392,12 +399,12 @@ bool Object::SameNumberValue(double value1, double value2) {
}
bool Object::IsNaN() const {
- return this->IsHeapNumber() && std::isnan(HeapNumber::cast(*this)->value());
+ return this->IsHeapNumber() && std::isnan(HeapNumber::cast(*this).value());
}
bool Object::IsMinusZero() const {
return this->IsHeapNumber() &&
- i::IsMinusZero(HeapNumber::cast(*this)->value());
+ i::IsMinusZero(HeapNumber::cast(*this).value());
}
OBJECT_CONSTRUCTORS_IMPL(RegExpMatchInfo, FixedArray)
@@ -414,19 +421,20 @@ CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
bool Object::HasValidElements() {
- // Dictionary is covered under FixedArray.
- return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
+ // Dictionary is covered under FixedArray. ByteArray is used
+ // for the JSTypedArray backing stores.
+ return IsFixedArray() || IsFixedDoubleArray() || IsByteArray();
}
bool Object::FilterKey(PropertyFilter filter) {
DCHECK(!IsPropertyCell());
if (filter == PRIVATE_NAMES_ONLY) {
if (!IsSymbol()) return true;
- return !Symbol::cast(*this)->is_private_name();
+ return !Symbol::cast(*this).is_private_name();
} else if (IsSymbol()) {
if (filter & SKIP_SYMBOLS) return true;
- if (Symbol::cast(*this)->is_private()) return true;
+ if (Symbol::cast(*this).is_private()) return true;
} else {
if (filter & SKIP_STRINGS) return true;
}
@@ -449,14 +457,12 @@ Representation Object::OptimalRepresentation() {
}
}
-
ElementsKind Object::OptimalElementsKind() {
if (IsSmi()) return PACKED_SMI_ELEMENTS;
if (IsNumber()) return PACKED_DOUBLE_ELEMENTS;
return PACKED_ELEMENTS;
}
-
bool Object::FitsRepresentation(Representation representation) {
if (FLAG_track_fields && representation.IsSmi()) {
return IsSmi();
@@ -478,7 +484,7 @@ bool Object::ToUint32(uint32_t* value) const {
return true;
}
if (IsHeapNumber()) {
- double num = HeapNumber::cast(*this)->value();
+ double num = HeapNumber::cast(*this).value();
return DoubleToUint32IfEqualToSelf(num, value);
}
return false;
@@ -492,7 +498,6 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
return ToObjectImpl(isolate, object, method_name);
}
-
// static
MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
if (input->IsName()) return Handle<Name>::cast(input);
@@ -502,7 +507,7 @@ MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
// static
MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
Handle<Object> value) {
- if (value->IsSmi() || HeapObject::cast(*value)->IsName()) return value;
+ if (value->IsSmi() || HeapObject::cast(*value).IsName()) return value;
return ConvertToPropertyKey(isolate, value);
}
@@ -539,7 +544,7 @@ MaybeHandle<Object> Object::ToInt32(Isolate* isolate, Handle<Object> input) {
// static
MaybeHandle<Object> Object::ToUint32(Isolate* isolate, Handle<Object> input) {
- if (input->IsSmi()) return handle(Smi::cast(*input)->ToUint32Smi(), isolate);
+ if (input->IsSmi()) return handle(Smi::cast(*input).ToUint32Smi(), isolate);
return ConvertToUint32(isolate, input);
}
@@ -603,7 +608,7 @@ Map MapWord::ToMap() const { return Map::unchecked_cast(Object(value_)); }
bool MapWord::IsForwardingAddress() const { return HAS_SMI_TAG(value_); }
MapWord MapWord::FromForwardingAddress(HeapObject object) {
- return MapWord(object->ptr() - kHeapObjectTag);
+ return MapWord(object.ptr() - kHeapObjectTag);
}
HeapObject MapWord::ToForwardingAddress() {
@@ -624,16 +629,14 @@ void HeapObject::VerifyMaybeObjectField(Isolate* isolate, int offset) {
}
void HeapObject::VerifySmiField(int offset) {
- CHECK(READ_FIELD(*this, offset)->IsSmi());
+ CHECK(READ_FIELD(*this, offset).IsSmi());
STATIC_ASSERT(!COMPRESS_POINTERS_BOOL || kTaggedSize == kInt32Size);
}
#endif
ReadOnlyRoots HeapObject::GetReadOnlyRoots() const {
- // TODO(v8:7464): When RO_SPACE is embedded, this will access a global
- // variable instead.
- return ReadOnlyRoots(GetHeapFromWritableObject(*this));
+ return ReadOnlyHeap::GetReadOnlyRoots(*this);
}
Map HeapObject::map() const { return map_word().ToMap(); }
@@ -670,7 +673,6 @@ void HeapObject::synchronized_set_map(Map value) {
}
}
-
// Unsafe accessor omitting write barrier.
void HeapObject::set_map_no_write_barrier(Map value) {
if (!value.is_null()) {
@@ -703,7 +705,6 @@ void HeapObject::set_map_word(MapWord map_word) {
map_slot().Relaxed_Store(Object(map_word.value_));
}
-
MapWord HeapObject::synchronized_map_word() const {
return MapWord(map_slot().Acquire_Load().ptr());
}
@@ -745,21 +746,6 @@ bool Object::ToArrayIndex(uint32_t* index) const {
return Object::ToUint32(index) && *index != kMaxUInt32;
}
-bool Object::GetHeapObjectIfStrong(HeapObject* result) const {
- return GetHeapObject(result);
-}
-
-bool Object::GetHeapObject(HeapObject* result) const {
- if (!IsHeapObject()) return false;
- *result = HeapObject::cast(*this);
- return true;
-}
-
-HeapObject Object::GetHeapObject() const {
- DCHECK(IsHeapObject());
- return HeapObject::cast(*this);
-}
-
int RegExpMatchInfo::NumberOfCaptureRegisters() {
DCHECK_GE(length(), kLastMatchOverhead);
Object obj = get(kNumberOfCapturesIndex);
@@ -809,6 +795,9 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
// static
AllocationAlignment HeapObject::RequiredAlignment(Map map) {
+ // TODO(bmeurer, v8:4153): We should think about requiring double alignment
+ // in general for ByteArray, since they are used as backing store for typed
+ // arrays now.
#ifdef V8_COMPRESS_POINTERS
// TODO(ishell, v8:8875): Consider using aligned allocations once the
// allocation alignment inconsistency is fixed. For now we keep using
@@ -816,11 +805,8 @@ AllocationAlignment HeapObject::RequiredAlignment(Map map) {
// compression is supported) allow unaligned access to doubles and full words.
#endif // V8_COMPRESS_POINTERS
#ifdef V8_HOST_ARCH_32_BIT
- int instance_type = map->instance_type();
- if (instance_type == FIXED_FLOAT64_ARRAY_TYPE ||
- instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
- return kDoubleAligned;
- }
+ int instance_type = map.instance_type();
+ if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) return kDoubleAligned;
if (instance_type == HEAP_NUMBER_TYPE) return kDoubleUnaligned;
#endif // V8_HOST_ARCH_32_BIT
return kWordAligned;
@@ -847,7 +833,6 @@ Maybe<bool> Object::GreaterThan(Isolate* isolate, Handle<Object> x,
return Nothing<bool>();
}
-
// static
Maybe<bool> Object::GreaterThanOrEqual(Isolate* isolate, Handle<Object> x,
Handle<Object> y) {
@@ -865,7 +850,6 @@ Maybe<bool> Object::GreaterThanOrEqual(Isolate* isolate, Handle<Object> x,
return Nothing<bool>();
}
-
// static
Maybe<bool> Object::LessThan(Isolate* isolate, Handle<Object> x,
Handle<Object> y) {
@@ -883,7 +867,6 @@ Maybe<bool> Object::LessThan(Isolate* isolate, Handle<Object> x,
return Nothing<bool>();
}
-
// static
Maybe<bool> Object::LessThanOrEqual(Isolate* isolate, Handle<Object> x,
Handle<Object> y) {
@@ -928,12 +911,12 @@ MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
// static
Object Object::GetSimpleHash(Object object) {
DisallowHeapAllocation no_gc;
- if (object->IsSmi()) {
+ if (object.IsSmi()) {
uint32_t hash = ComputeUnseededHash(Smi::ToInt(object));
return Smi::FromInt(hash & Smi::kMaxValue);
}
- if (object->IsHeapNumber()) {
- double num = HeapNumber::cast(object)->value();
+ if (object.IsHeapNumber()) {
+ double num = HeapNumber::cast(object).value();
if (std::isnan(num)) return Smi::FromInt(Smi::kMaxValue);
// Use ComputeUnseededHash for all values in Signed32 range, including -0,
// which is considered equal to 0 because collections use SameValueZero.
@@ -946,34 +929,34 @@ Object Object::GetSimpleHash(Object object) {
}
return Smi::FromInt(hash & Smi::kMaxValue);
}
- if (object->IsName()) {
- uint32_t hash = Name::cast(object)->Hash();
+ if (object.IsName()) {
+ uint32_t hash = Name::cast(object).Hash();
return Smi::FromInt(hash);
}
- if (object->IsOddball()) {
- uint32_t hash = Oddball::cast(object)->to_string()->Hash();
+ if (object.IsOddball()) {
+ uint32_t hash = Oddball::cast(object).to_string().Hash();
return Smi::FromInt(hash);
}
- if (object->IsBigInt()) {
- uint32_t hash = BigInt::cast(object)->Hash();
+ if (object.IsBigInt()) {
+ uint32_t hash = BigInt::cast(object).Hash();
return Smi::FromInt(hash & Smi::kMaxValue);
}
- if (object->IsSharedFunctionInfo()) {
- uint32_t hash = SharedFunctionInfo::cast(object)->Hash();
+ if (object.IsSharedFunctionInfo()) {
+ uint32_t hash = SharedFunctionInfo::cast(object).Hash();
return Smi::FromInt(hash & Smi::kMaxValue);
}
- DCHECK(object->IsJSReceiver());
+ DCHECK(object.IsJSReceiver());
return object;
}
Object Object::GetHash() {
DisallowHeapAllocation no_gc;
Object hash = GetSimpleHash(*this);
- if (hash->IsSmi()) return hash;
+ if (hash.IsSmi()) return hash;
DCHECK(IsJSReceiver());
JSReceiver receiver = JSReceiver::cast(*this);
- return receiver->GetIdentityHash();
+ return receiver.GetIdentityHash();
}
Handle<Object> ObjectHashTableShape::AsHandle(Handle<Object> key) {
@@ -986,7 +969,6 @@ Relocatable::Relocatable(Isolate* isolate) {
isolate->set_relocatable_top(this);
}
-
Relocatable::~Relocatable() {
DCHECK_EQ(isolate_->relocatable_top(), this);
isolate_->set_relocatable_top(prev_);
@@ -1045,8 +1027,8 @@ FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
#undef FIELD_ACCESSORS
FreshlyAllocatedBigInt FreshlyAllocatedBigInt::cast(Object object) {
- SLOW_DCHECK(object->IsBigInt());
- return FreshlyAllocatedBigInt(object->ptr());
+ SLOW_DCHECK(object.IsBigInt());
+ return FreshlyAllocatedBigInt(object.ptr());
}
} // namespace internal
@@ -1054,4 +1036,4 @@ FreshlyAllocatedBigInt FreshlyAllocatedBigInt::cast(Object object) {
#include "src/objects/object-macros-undef.h"
-#endif // V8_OBJECTS_INL_H_
+#endif // V8_OBJECTS_OBJECTS_INL_H_
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects/objects.cc
index 7786073ba7..8cc22fa0e5 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include <algorithm>
#include <cmath>
@@ -10,47 +10,38 @@
#include <sstream>
#include <vector>
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
-#include "src/accessors.h"
-#include "src/allocation-site-scopes.h"
-#include "src/api-arguments-inl.h"
-#include "src/api-natives.h"
-#include "src/api.h"
-#include "src/arguments.h"
+#include "src/api/api-arguments-inl.h"
+#include "src/api/api-natives.h"
+#include "src/api/api.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/bits.h"
+#include "src/base/debug/stack_trace.h"
#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/bootstrapper.h"
+#include "src/builtins/accessors.h"
#include "src/builtins/builtins.h"
-#include "src/compiler.h"
-#include "src/counters-inl.h"
-#include "src/counters.h"
-#include "src/date.h"
+#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
+#include "src/date/date.h"
#include "src/debug/debug.h"
-#include "src/elements.h"
-#include "src/execution.h"
-#include "src/field-index-inl.h"
-#include "src/field-index.h"
-#include "src/field-type.h"
-#include "src/frames-inl.h"
-#include "src/function-kind.h"
-#include "src/globals.h"
+#include "src/execution/arguments.h"
+#include "src/execution/execution.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
+#include "src/execution/microtask-queue.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/ic/ic.h"
-#include "src/identity-map.h"
-#include "src/isolate-inl.h"
-#include "src/keys.h"
-#include "src/log.h"
-#include "src/lookup-inl.h"
-#include "src/map-updater.h"
-#include "src/message-template.h"
-#include "src/microtask-queue.h"
-#include "src/objects-body-descriptors-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/objects/allocation-site-inl.h"
+#include "src/objects/allocation-site-scopes.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/bigint.h"
@@ -58,12 +49,22 @@
#include "src/objects/code-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/elements.h"
#include "src/objects/embedder-data-array-inl.h"
+#include "src/objects/field-index-inl.h"
+#include "src/objects/field-index.h"
+#include "src/objects/field-type.h"
#include "src/objects/foreign.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/free-space-inl.h"
+#include "src/objects/function-kind.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/keys.h"
+#include "src/objects/lookup-inl.h"
+#include "src/objects/map-updater.h"
+#include "src/objects/objects-body-descriptors-inl.h"
+#include "src/utils/identity-map.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/js-break-iterator.h"
#include "src/objects/js-collator.h"
@@ -86,6 +87,7 @@
#include "src/objects/js-segment-iterator.h"
#include "src/objects/js-segmenter.h"
#endif // V8_INTL_SUPPORT
+#include "src/codegen/source-position-table.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/map-inl.h"
@@ -93,23 +95,23 @@
#include "src/objects/microtask-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/objects/property-descriptor.h"
+#include "src/objects/prototype.h"
#include "src/objects/slots-atomic-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/string-comparator.h"
#include "src/objects/struct-inl.h"
-#include "src/ostreams.h"
+#include "src/objects/template-objects-inl.h"
+#include "src/objects/transitions-inl.h"
#include "src/parsing/preparse-data.h"
-#include "src/property-descriptor.h"
-#include "src/prototype.h"
#include "src/regexp/jsregexp.h"
-#include "src/source-position-table.h"
-#include "src/string-builder-inl.h"
-#include "src/string-search.h"
-#include "src/string-stream.h"
-#include "src/transitions-inl.h"
-#include "src/unicode-decoder.h"
-#include "src/unicode-inl.h"
-#include "src/utils-inl.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-search.h"
+#include "src/strings/string-stream.h"
+#include "src/strings/unicode-decoder.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/utils-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
@@ -120,7 +122,7 @@ namespace internal {
ShouldThrow GetShouldThrow(Isolate* isolate, Maybe<ShouldThrow> should_throw) {
if (should_throw.IsJust()) return should_throw.FromJust();
- LanguageMode mode = isolate->context()->scope_info()->language_mode();
+ LanguageMode mode = isolate->context().scope_info().language_mode();
if (mode == LanguageMode::kStrict) return kThrowOnError;
for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
@@ -131,7 +133,7 @@ ShouldThrow GetShouldThrow(Isolate* isolate, Maybe<ShouldThrow> should_throw) {
JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(it.frame());
std::vector<SharedFunctionInfo> functions;
js_frame->GetFunctions(&functions);
- LanguageMode closure_language_mode = functions.back()->language_mode();
+ LanguageMode closure_language_mode = functions.back().language_mode();
if (closure_language_mode > mode) {
mode = closure_language_mode;
}
@@ -176,7 +178,7 @@ Handle<FieldType> Object::OptimalType(Isolate* isolate,
if (FLAG_track_field_types) {
if (representation.IsHeapObject() && IsHeapObject()) {
// We can track only JavaScript objects with stable maps.
- Handle<Map> map(HeapObject::cast(*this)->map(), isolate);
+ Handle<Map> map(HeapObject::cast(*this).map(), isolate);
if (map->is_stable() && map->IsJSReceiverMap()) {
return FieldType::Class(map, isolate);
}
@@ -193,8 +195,7 @@ Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
result->set_value_as_bits(kHoleNanInt64);
} else if (object->IsMutableHeapNumber()) {
// Ensure that all bits of the double value are preserved.
- result->set_value_as_bits(
- MutableHeapNumber::cast(*object)->value_as_bits());
+ result->set_value_as_bits(MutableHeapNumber::cast(*object).value_as_bits());
} else {
result->set_value(object->Number());
}
@@ -209,7 +210,7 @@ Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
return object;
}
return isolate->factory()->NewHeapNumber(
- MutableHeapNumber::cast(*object)->value());
+ MutableHeapNumber::cast(*object).value());
}
MaybeHandle<JSReceiver> Object::ToObjectImpl(Isolate* isolate,
@@ -222,7 +223,7 @@ MaybeHandle<JSReceiver> Object::ToObjectImpl(Isolate* isolate,
constructor = handle(native_context->number_function(), isolate);
} else {
int constructor_function_index =
- Handle<HeapObject>::cast(object)->map()->GetConstructorFunctionIndex();
+ Handle<HeapObject>::cast(object)->map().GetConstructorFunctionIndex();
if (constructor_function_index == Map::kNoConstructorFunctionIndex) {
if (method_name != nullptr) {
THROW_NEW_ERROR(
@@ -281,8 +282,9 @@ MaybeHandle<Object> Object::ConvertToNumberOrNumeric(Isolate* isolate,
Object);
}
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
- ToPrimitiveHint::kNumber),
+ isolate, input,
+ JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
+ ToPrimitiveHint::kNumber),
Object);
}
}
@@ -313,7 +315,7 @@ MaybeHandle<Object> Object::ConvertToUint32(Isolate* isolate,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, input,
ConvertToNumberOrNumeric(isolate, input, Conversion::kToNumber), Object);
- if (input->IsSmi()) return handle(Smi::cast(*input)->ToUint32Smi(), isolate);
+ if (input->IsSmi()) return handle(Smi::cast(*input).ToUint32Smi(), isolate);
return isolate->factory()->NewNumberFromUint(DoubleToUint32(input->Number()));
}
@@ -370,8 +372,9 @@ MaybeHandle<String> Object::ConvertToString(Isolate* isolate,
return BigInt::ToString(isolate, Handle<BigInt>::cast(input));
}
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
- ToPrimitiveHint::kString),
+ isolate, input,
+ JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
+ ToPrimitiveHint::kString),
String);
// The previous isString() check happened in Object::ToString and thus we
// put it at the end of the loop in this helper.
@@ -466,7 +469,7 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
IncrementalStringBuilder builder(isolate);
builder.AppendCString("Symbol(");
- if (symbol->name()->IsString()) {
+ if (symbol->name().IsString()) {
builder.AppendString(handle(String::cast(symbol->name()), isolate));
}
builder.AppendCharacter(')');
@@ -520,7 +523,7 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
// This is the only case where Object::ToObject throws.
DCHECK(!input->IsSmi());
int constructor_function_index =
- Handle<HeapObject>::cast(input)->map()->GetConstructorFunctionIndex();
+ Handle<HeapObject>::cast(input)->map().GetConstructorFunctionIndex();
if (constructor_function_index == Map::kNoConstructorFunctionIndex) {
return isolate->factory()->NewStringFromAsciiChecked("[object Unknown]");
}
@@ -580,9 +583,9 @@ bool Object::BooleanValue(Isolate* isolate) {
if (IsBoolean()) return IsTrue(isolate);
if (IsNullOrUndefined(isolate)) return false;
if (IsUndetectable()) return false; // Undetectable object is false.
- if (IsString()) return String::cast(*this)->length() != 0;
- if (IsHeapNumber()) return DoubleToBoolean(HeapNumber::cast(*this)->value());
- if (IsBigInt()) return BigInt::cast(*this)->ToBoolean();
+ if (IsString()) return String::cast(*this).length() != 0;
+ if (IsHeapNumber()) return DoubleToBoolean(HeapNumber::cast(*this).value());
+ if (IsBigInt()) return BigInt::cast(*this).ToBoolean();
return true;
}
@@ -616,7 +619,7 @@ bool StrictNumberEquals(double x, double y) {
}
bool StrictNumberEquals(const Object x, const Object y) {
- return StrictNumberEquals(x->Number(), y->Number());
+ return StrictNumberEquals(x.Number(), y.Number());
}
bool StrictNumberEquals(Handle<Object> x, Handle<Object> y) {
@@ -676,7 +679,6 @@ Maybe<ComparisonResult> Object::Compare(Isolate* isolate, Handle<Object> x,
}
}
-
// static
Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
Handle<Object> y) {
@@ -781,13 +783,13 @@ Maybe<bool> Object::Equals(Isolate* isolate, Handle<Object> x,
bool Object::StrictEquals(Object that) {
if (this->IsNumber()) {
- if (!that->IsNumber()) return false;
+ if (!that.IsNumber()) return false;
return StrictNumberEquals(*this, that);
} else if (this->IsString()) {
- if (!that->IsString()) return false;
- return String::cast(*this)->Equals(String::cast(that));
+ if (!that.IsString()) return false;
+ return String::cast(*this).Equals(String::cast(that));
} else if (this->IsBigInt()) {
- if (!that->IsBigInt()) return false;
+ if (!that.IsBigInt()) return false;
return BigInt::EqualToBigInt(BigInt::cast(*this), BigInt::cast(that));
}
return *this == that;
@@ -797,7 +799,7 @@ bool Object::StrictEquals(Object that) {
Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
if (object->IsNumber()) return isolate->factory()->number_string();
if (object->IsOddball())
- return handle(Oddball::cast(*object)->type_of(), isolate);
+ return handle(Oddball::cast(*object).type_of(), isolate);
if (object->IsUndetectable()) {
return isolate->factory()->undefined_string();
}
@@ -808,7 +810,6 @@ Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
return isolate->factory()->object_string();
}
-
// static
MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
Handle<Object> rhs) {
@@ -835,7 +836,6 @@ MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
return isolate->factory()->NewNumber(lhs->Number() + rhs->Number());
}
-
// static
MaybeHandle<Object> Object::OrdinaryHasInstance(Isolate* isolate,
Handle<Object> callable,
@@ -929,8 +929,9 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
return isolate->factory()->undefined_value();
}
if (!func->IsCallable()) {
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kPropertyNotFunction,
- func, name, receiver),
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kPropertyNotFunction, func,
+ name, receiver),
Object);
}
return func;
@@ -945,7 +946,7 @@ MaybeHandle<FixedArray> CreateListFromArrayLikeFastPath(
Handle<JSArray> array = Handle<JSArray>::cast(object);
uint32_t length;
if (!array->HasArrayPrototype(isolate) ||
- !array->length()->ToUint32(&length) || !array->HasFastElements() ||
+ !array->length().ToUint32(&length) || !array->HasFastElements() ||
!JSObject::PrototypeHasNoElements(isolate, *array)) {
return MaybeHandle<FixedArray>();
}
@@ -953,7 +954,7 @@ MaybeHandle<FixedArray> CreateListFromArrayLikeFastPath(
isolate, array, length);
} else if (object->IsJSTypedArray()) {
Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(object);
- size_t length = array->length_value();
+ size_t length = array->length();
if (array->WasDetached() ||
length > static_cast<size_t>(FixedArray::kMaxLength)) {
return MaybeHandle<FixedArray>();
@@ -1034,7 +1035,6 @@ MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
return list;
}
-
// static
MaybeHandle<Object> Object::GetLengthFromArrayLike(Isolate* isolate,
Handle<JSReceiver> object) {
@@ -1059,7 +1059,7 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
// In case of global IC, the receiver is the global object. Replace by
// the global proxy.
if (receiver->IsJSGlobalObject()) {
- receiver = handle(JSGlobalObject::cast(*receiver)->global_proxy(),
+ receiver = handle(JSGlobalObject::cast(*receiver).global_proxy(),
it->isolate());
}
MaybeHandle<Object> result =
@@ -1097,7 +1097,6 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
return it->isolate()->factory()->undefined_value();
}
-
// static
MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
Handle<JSProxy> proxy,
@@ -1216,15 +1215,13 @@ MaybeHandle<Object> JSProxy::CheckGetSetTrapResult(Isolate* isolate,
return isolate->factory()->undefined_value();
}
-
-
bool Object::ToInt32(int32_t* value) {
if (IsSmi()) {
*value = Smi::ToInt(*this);
return true;
}
if (IsHeapNumber()) {
- double num = HeapNumber::cast(*this)->value();
+ double num = HeapNumber::cast(*this).value();
// Check range before conversion to avoid undefined behavior.
if (num >= kMinInt && num <= kMaxInt && FastI2D(FastD2I(num)) == num) {
*value = FastD2I(num);
@@ -1243,14 +1240,14 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
Isolate* isolate, Handle<FunctionTemplateInfo> info,
MaybeHandle<Name> maybe_name) {
Object current_info = info->shared_function_info();
- if (current_info->IsSharedFunctionInfo()) {
+ if (current_info.IsSharedFunctionInfo()) {
return handle(SharedFunctionInfo::cast(current_info), isolate);
}
Handle<Name> name;
Handle<String> name_string;
if (maybe_name.ToHandle(&name) && name->IsString()) {
name_string = Handle<String>::cast(name);
- } else if (info->class_name()->IsString()) {
+ } else if (info->class_name().IsString()) {
name_string = handle(String::cast(info->class_name()), isolate);
} else {
name_string = isolate->factory()->empty_string();
@@ -1275,23 +1272,23 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
bool FunctionTemplateInfo::IsTemplateFor(Map map) {
// There is a constraint on the object; check.
- if (!map->IsJSObjectMap()) return false;
+ if (!map.IsJSObjectMap()) return false;
// Fetch the constructor function of the object.
- Object cons_obj = map->GetConstructor();
+ Object cons_obj = map.GetConstructor();
Object type;
- if (cons_obj->IsJSFunction()) {
+ if (cons_obj.IsJSFunction()) {
JSFunction fun = JSFunction::cast(cons_obj);
- type = fun->shared()->function_data();
- } else if (cons_obj->IsFunctionTemplateInfo()) {
+ type = fun.shared().function_data();
+ } else if (cons_obj.IsFunctionTemplateInfo()) {
type = FunctionTemplateInfo::cast(cons_obj);
} else {
return false;
}
// Iterate through the chain of inheriting function templates to
// see if the required one occurs.
- while (type->IsFunctionTemplateInfo()) {
+ while (type.IsFunctionTemplateInfo()) {
if (type == *this) return true;
- type = FunctionTemplateInfo::cast(type)->GetParentTemplate();
+ type = FunctionTemplateInfo::cast(type).GetParentTemplate();
}
// Didn't find the required type in the inheritance chain.
return false;
@@ -1300,7 +1297,7 @@ bool FunctionTemplateInfo::IsTemplateFor(Map map) {
// static
FunctionTemplateRareData FunctionTemplateInfo::AllocateFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
- DCHECK(function_template_info->rare_data()->IsUndefined(isolate));
+ DCHECK(function_template_info->rare_data().IsUndefined(isolate));
Handle<Struct> struct_obj = isolate->factory()->NewStruct(
FUNCTION_TEMPLATE_RARE_DATA_TYPE, AllocationType::kOld);
Handle<FunctionTemplateRareData> rare_data =
@@ -1329,7 +1326,6 @@ Handle<TemplateList> TemplateList::Add(Isolate* isolate,
return Handle<TemplateList>::cast(fixed_array);
}
-
// ES6 9.5.1
// static
MaybeHandle<HeapObject> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
@@ -1399,7 +1395,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
// In case of global IC, the receiver is the global object. Replace by the
// global proxy.
if (receiver->IsJSGlobalObject()) {
- receiver = handle(JSGlobalObject::cast(*receiver)->global_proxy(), isolate);
+ receiver = handle(JSGlobalObject::cast(*receiver).global_proxy(), isolate);
}
// We should never get here to initialize a const with the hole value since a
@@ -1447,7 +1443,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
}
// Regular accessor.
- Handle<Object> getter(AccessorPair::cast(*structure)->getter(), isolate);
+ Handle<Object> getter(AccessorPair::cast(*structure).getter(), isolate);
if (getter->IsFunctionTemplateInfo()) {
SaveAndSwitchContext save(isolate, *holder->GetCreationContext());
return Builtins::InvokeApiFunction(
@@ -1488,7 +1484,7 @@ bool AccessorInfo::IsCompatibleReceiverMap(Handle<AccessorInfo> info,
if (!info->HasExpectedReceiverType()) return true;
if (!map->IsJSObjectMap()) return false;
return FunctionTemplateInfo::cast(info->expected_receiver_type())
- ->IsTemplateFor(*map);
+ .IsTemplateFor(*map);
}
Maybe<bool> Object::SetPropertyWithAccessor(
@@ -1500,7 +1496,7 @@ Maybe<bool> Object::SetPropertyWithAccessor(
// In case of global IC, the receiver is the global object. Replace by the
// global proxy.
if (receiver->IsJSGlobalObject()) {
- receiver = handle(JSGlobalObject::cast(*receiver)->global_proxy(), isolate);
+ receiver = handle(JSGlobalObject::cast(*receiver).global_proxy(), isolate);
}
// We should never get here to initialize a const with the hole value since a
@@ -1552,15 +1548,16 @@ Maybe<bool> Object::SetPropertyWithAccessor(
}
// Regular accessor.
- Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
+ Handle<Object> setter(AccessorPair::cast(*structure).setter(), isolate);
if (setter->IsFunctionTemplateInfo()) {
SaveAndSwitchContext save(isolate, *holder->GetCreationContext());
Handle<Object> argv[] = {value};
RETURN_ON_EXCEPTION_VALUE(
- isolate, Builtins::InvokeApiFunction(
- isolate, false, Handle<FunctionTemplateInfo>::cast(setter),
- receiver, arraysize(argv), argv,
- isolate->factory()->undefined_value()),
+ isolate,
+ Builtins::InvokeApiFunction(isolate, false,
+ Handle<FunctionTemplateInfo>::cast(setter),
+ receiver, arraysize(argv), argv,
+ isolate->factory()->undefined_value()),
Nothing<bool>());
return Just(true);
} else if (setter->IsCallable()) {
@@ -1575,8 +1572,7 @@ Maybe<bool> Object::SetPropertyWithAccessor(
}
MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
- Handle<Object> receiver,
- Handle<JSReceiver> getter) {
+ Handle<Object> receiver, Handle<JSReceiver> getter) {
Isolate* isolate = getter->GetIsolate();
// Platforms with simulators like arm/arm64 expose a funny issue. If the
@@ -1601,43 +1597,44 @@ Maybe<bool> Object::SetPropertyWithDefinedSetter(
Maybe<ShouldThrow> should_throw) {
Isolate* isolate = setter->GetIsolate();
- Handle<Object> argv[] = { value };
- RETURN_ON_EXCEPTION_VALUE(isolate, Execution::Call(isolate, setter, receiver,
- arraysize(argv), argv),
- Nothing<bool>());
+ Handle<Object> argv[] = {value};
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate,
+ Execution::Call(isolate, setter, receiver, arraysize(argv), argv),
+ Nothing<bool>());
return Just(true);
}
Map Object::GetPrototypeChainRootMap(Isolate* isolate) const {
DisallowHeapAllocation no_alloc;
if (IsSmi()) {
- Context native_context = isolate->context()->native_context();
- return native_context->number_function()->initial_map();
+ Context native_context = isolate->context().native_context();
+ return native_context.number_function().initial_map();
}
const HeapObject heap_object = HeapObject::cast(*this);
- return heap_object->map()->GetPrototypeChainRootMap(isolate);
+ return heap_object.map().GetPrototypeChainRootMap(isolate);
}
Smi Object::GetOrCreateHash(Isolate* isolate) {
DisallowHeapAllocation no_gc;
Object hash = Object::GetSimpleHash(*this);
- if (hash->IsSmi()) return Smi::cast(hash);
+ if (hash.IsSmi()) return Smi::cast(hash);
DCHECK(IsJSReceiver());
- return JSReceiver::cast(*this)->GetOrCreateIdentityHash(isolate);
+ return JSReceiver::cast(*this).GetOrCreateIdentityHash(isolate);
}
bool Object::SameValue(Object other) {
if (other == *this) return true;
- if (IsNumber() && other->IsNumber()) {
- return SameNumberValue(Number(), other->Number());
+ if (IsNumber() && other.IsNumber()) {
+ return SameNumberValue(Number(), other.Number());
}
- if (IsString() && other->IsString()) {
- return String::cast(*this)->Equals(String::cast(other));
+ if (IsString() && other.IsString()) {
+ return String::cast(*this).Equals(String::cast(other));
}
- if (IsBigInt() && other->IsBigInt()) {
+ if (IsBigInt() && other.IsBigInt()) {
return BigInt::EqualToBigInt(BigInt::cast(*this), BigInt::cast(other));
}
return false;
@@ -1646,17 +1643,17 @@ bool Object::SameValue(Object other) {
bool Object::SameValueZero(Object other) {
if (other == *this) return true;
- if (IsNumber() && other->IsNumber()) {
+ if (IsNumber() && other.IsNumber()) {
double this_value = Number();
- double other_value = other->Number();
+ double other_value = other.Number();
// +0 == -0 is true
return this_value == other_value ||
(std::isnan(this_value) && std::isnan(other_value));
}
- if (IsString() && other->IsString()) {
- return String::cast(*this)->Equals(String::cast(other));
+ if (IsString() && other.IsString()) {
+ return String::cast(*this).Equals(String::cast(other));
}
- if (IsBigInt() && other->IsBigInt()) {
+ if (IsBigInt() && other.IsBigInt()) {
return BigInt::EqualToBigInt(BigInt::cast(*this), BigInt::cast(other));
}
return false;
@@ -1707,8 +1704,8 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
} else {
if (!constructor->IsConstructor()) {
THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kSpeciesNotConstructor),
- Object);
+ NewTypeError(MessageTemplate::kSpeciesNotConstructor),
+ Object);
}
return constructor;
}
@@ -1756,15 +1753,15 @@ bool Object::IterationHasObservableEffects() {
// Check that this object is an array.
if (!IsJSArray()) return true;
JSArray array = JSArray::cast(*this);
- Isolate* isolate = array->GetIsolate();
+ Isolate* isolate = array.GetIsolate();
#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return true;
#endif
// Check that we have the original ArrayPrototype.
- if (!array->map()->prototype()->IsJSObject()) return true;
- JSObject array_proto = JSObject::cast(array->map()->prototype());
+ if (!array.map().prototype().IsJSObject()) return true;
+ JSObject array_proto = JSObject::cast(array.map().prototype());
if (!isolate->is_initial_array_prototype(array_proto)) return true;
// Check that the ArrayPrototype hasn't been modified in a way that would
@@ -1773,7 +1770,7 @@ bool Object::IterationHasObservableEffects() {
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
- ElementsKind array_kind = array->GetElementsKind();
+ ElementsKind array_kind = array.GetElementsKind();
if (IsFastPackedElementsKind(array_kind)) return false;
// For FastHoley kinds, an element access on a hole would cause a lookup on
@@ -1804,35 +1801,19 @@ std::ostream& operator<<(std::ostream& os, const Object& obj) {
return os;
}
-void MaybeObject::ShortPrint(FILE* out) {
- OFStream os(out);
- os << Brief(*this);
-}
-
-void MaybeObject::ShortPrint(StringStream* accumulator) {
- std::ostringstream os;
- os << Brief(*this);
- accumulator->Add(os.str().c_str());
-}
-
-void MaybeObject::ShortPrint(std::ostream& os) { os << Brief(*this); }
-
-Brief::Brief(const Object v) : value(v->ptr()) {}
-Brief::Brief(const MaybeObject v) : value(v.ptr()) {}
-
std::ostream& operator<<(std::ostream& os, const Brief& v) {
MaybeObject maybe_object(v.value);
Smi smi;
HeapObject heap_object;
if (maybe_object->ToSmi(&smi)) {
- smi->SmiPrint(os);
+ smi.SmiPrint(os);
} else if (maybe_object->IsCleared()) {
os << "[cleared]";
} else if (maybe_object->GetHeapObjectIfWeak(&heap_object)) {
os << "[weak] ";
- heap_object->HeapObjectShortPrint(os);
+ heap_object.HeapObjectShortPrint(os);
} else if (maybe_object->GetHeapObjectIfStrong(&heap_object)) {
- heap_object->HeapObjectShortPrint(os);
+ heap_object.HeapObjectShortPrint(os);
} else {
UNREACHABLE();
}
@@ -1843,33 +1824,31 @@ void Smi::SmiPrint(std::ostream& os) const { // NOLINT
os << value();
}
-
-
void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << AsHex::Address(this->ptr()) << " ";
if (IsString()) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- String::cast(*this)->StringShortPrint(&accumulator);
+ String::cast(*this).StringShortPrint(&accumulator);
os << accumulator.ToCString().get();
return;
}
if (IsJSObject()) {
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- JSObject::cast(*this)->JSObjectShortPrint(&accumulator);
+ JSObject::cast(*this).JSObjectShortPrint(&accumulator);
os << accumulator.ToCString().get();
return;
}
- switch (map()->instance_type()) {
+ switch (map().instance_type()) {
case MAP_TYPE: {
os << "<Map";
Map mapInstance = Map::cast(*this);
- if (mapInstance->IsJSObjectMap()) {
- os << "(" << ElementsKindToString(mapInstance->elements_kind()) << ")";
- } else if (mapInstance->instance_size() != kVariableSizeSentinel) {
- os << "[" << mapInstance->instance_size() << "]";
+ if (mapInstance.IsJSObjectMap()) {
+ os << "(" << ElementsKindToString(mapInstance.elements_kind()) << ")";
+ } else if (mapInstance.instance_size() != kVariableSizeSentinel) {
+ os << "[" << mapInstance.instance_size() << "]";
}
os << ">";
} break;
@@ -1877,97 +1856,97 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<AwaitContext generator= ";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- Context::cast(*this)->extension()->ShortPrint(&accumulator);
+ Context::cast(*this).extension().ShortPrint(&accumulator);
os << accumulator.ToCString().get();
os << '>';
break;
}
case BLOCK_CONTEXT_TYPE:
- os << "<BlockContext[" << Context::cast(*this)->length() << "]>";
+ os << "<BlockContext[" << Context::cast(*this).length() << "]>";
break;
case CATCH_CONTEXT_TYPE:
- os << "<CatchContext[" << Context::cast(*this)->length() << "]>";
+ os << "<CatchContext[" << Context::cast(*this).length() << "]>";
break;
case DEBUG_EVALUATE_CONTEXT_TYPE:
- os << "<DebugEvaluateContext[" << Context::cast(*this)->length() << "]>";
+ os << "<DebugEvaluateContext[" << Context::cast(*this).length() << "]>";
break;
case EVAL_CONTEXT_TYPE:
- os << "<EvalContext[" << Context::cast(*this)->length() << "]>";
+ os << "<EvalContext[" << Context::cast(*this).length() << "]>";
break;
case FUNCTION_CONTEXT_TYPE:
- os << "<FunctionContext[" << Context::cast(*this)->length() << "]>";
+ os << "<FunctionContext[" << Context::cast(*this).length() << "]>";
break;
case MODULE_CONTEXT_TYPE:
- os << "<ModuleContext[" << Context::cast(*this)->length() << "]>";
+ os << "<ModuleContext[" << Context::cast(*this).length() << "]>";
break;
case NATIVE_CONTEXT_TYPE:
- os << "<NativeContext[" << Context::cast(*this)->length() << "]>";
+ os << "<NativeContext[" << Context::cast(*this).length() << "]>";
break;
case SCRIPT_CONTEXT_TYPE:
- os << "<ScriptContext[" << Context::cast(*this)->length() << "]>";
+ os << "<ScriptContext[" << Context::cast(*this).length() << "]>";
break;
case WITH_CONTEXT_TYPE:
- os << "<WithContext[" << Context::cast(*this)->length() << "]>";
+ os << "<WithContext[" << Context::cast(*this).length() << "]>";
break;
case SCRIPT_CONTEXT_TABLE_TYPE:
- os << "<ScriptContextTable[" << FixedArray::cast(*this)->length() << "]>";
+ os << "<ScriptContextTable[" << FixedArray::cast(*this).length() << "]>";
break;
case HASH_TABLE_TYPE:
- os << "<HashTable[" << FixedArray::cast(*this)->length() << "]>";
+ os << "<HashTable[" << FixedArray::cast(*this).length() << "]>";
break;
case ORDERED_HASH_MAP_TYPE:
- os << "<OrderedHashMap[" << FixedArray::cast(*this)->length() << "]>";
+ os << "<OrderedHashMap[" << FixedArray::cast(*this).length() << "]>";
break;
case ORDERED_HASH_SET_TYPE:
- os << "<OrderedHashSet[" << FixedArray::cast(*this)->length() << "]>";
+ os << "<OrderedHashSet[" << FixedArray::cast(*this).length() << "]>";
break;
case ORDERED_NAME_DICTIONARY_TYPE:
- os << "<OrderedNameDictionary[" << FixedArray::cast(*this)->length()
+ os << "<OrderedNameDictionary[" << FixedArray::cast(*this).length()
<< "]>";
break;
case NAME_DICTIONARY_TYPE:
- os << "<NameDictionary[" << FixedArray::cast(*this)->length() << "]>";
+ os << "<NameDictionary[" << FixedArray::cast(*this).length() << "]>";
break;
case GLOBAL_DICTIONARY_TYPE:
- os << "<GlobalDictionary[" << FixedArray::cast(*this)->length() << "]>";
+ os << "<GlobalDictionary[" << FixedArray::cast(*this).length() << "]>";
break;
case NUMBER_DICTIONARY_TYPE:
- os << "<NumberDictionary[" << FixedArray::cast(*this)->length() << "]>";
+ os << "<NumberDictionary[" << FixedArray::cast(*this).length() << "]>";
break;
case SIMPLE_NUMBER_DICTIONARY_TYPE:
- os << "<SimpleNumberDictionary[" << FixedArray::cast(*this)->length()
+ os << "<SimpleNumberDictionary[" << FixedArray::cast(*this).length()
<< "]>";
break;
case STRING_TABLE_TYPE:
- os << "<StringTable[" << FixedArray::cast(*this)->length() << "]>";
+ os << "<StringTable[" << FixedArray::cast(*this).length() << "]>";
break;
case FIXED_ARRAY_TYPE:
- os << "<FixedArray[" << FixedArray::cast(*this)->length() << "]>";
+ os << "<FixedArray[" << FixedArray::cast(*this).length() << "]>";
break;
case OBJECT_BOILERPLATE_DESCRIPTION_TYPE:
- os << "<ObjectBoilerplateDescription["
- << FixedArray::cast(*this)->length() << "]>";
+ os << "<ObjectBoilerplateDescription[" << FixedArray::cast(*this).length()
+ << "]>";
break;
case FIXED_DOUBLE_ARRAY_TYPE:
- os << "<FixedDoubleArray[" << FixedDoubleArray::cast(*this)->length()
+ os << "<FixedDoubleArray[" << FixedDoubleArray::cast(*this).length()
<< "]>";
break;
case BYTE_ARRAY_TYPE:
- os << "<ByteArray[" << ByteArray::cast(*this)->length() << "]>";
+ os << "<ByteArray[" << ByteArray::cast(*this).length() << "]>";
break;
case BYTECODE_ARRAY_TYPE:
- os << "<BytecodeArray[" << BytecodeArray::cast(*this)->length() << "]>";
+ os << "<BytecodeArray[" << BytecodeArray::cast(*this).length() << "]>";
break;
case DESCRIPTOR_ARRAY_TYPE:
os << "<DescriptorArray["
- << DescriptorArray::cast(*this)->number_of_descriptors() << "]>";
+ << DescriptorArray::cast(*this).number_of_descriptors() << "]>";
break;
case TRANSITION_ARRAY_TYPE:
- os << "<TransitionArray[" << TransitionArray::cast(*this)->length()
+ os << "<TransitionArray[" << TransitionArray::cast(*this).length()
<< "]>";
break;
case PROPERTY_ARRAY_TYPE:
- os << "<PropertyArray[" << PropertyArray::cast(*this)->length() << "]>";
+ os << "<PropertyArray[" << PropertyArray::cast(*this).length() << "]>";
break;
case FEEDBACK_CELL_TYPE: {
{
@@ -1990,50 +1969,42 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
}
case CLOSURE_FEEDBACK_CELL_ARRAY_TYPE:
os << "<ClosureFeedbackCellArray["
- << ClosureFeedbackCellArray::cast(*this)->length() << "]>";
+ << ClosureFeedbackCellArray::cast(*this).length() << "]>";
break;
case FEEDBACK_VECTOR_TYPE:
- os << "<FeedbackVector[" << FeedbackVector::cast(*this)->length() << "]>";
+ os << "<FeedbackVector[" << FeedbackVector::cast(*this).length() << "]>";
break;
case FREE_SPACE_TYPE:
- os << "<FreeSpace[" << FreeSpace::cast(*this)->size() << "]>";
+ os << "<FreeSpace[" << FreeSpace::cast(*this).size() << "]>";
break;
-#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype) \
- case FIXED_##TYPE##_ARRAY_TYPE: \
- os << "<Fixed" #Type "Array[" << Fixed##Type##Array::cast(*this)->length() \
- << "]>"; \
- break;
-
- TYPED_ARRAYS(TYPED_ARRAY_SHORT_PRINT)
-#undef TYPED_ARRAY_SHORT_PRINT
case PREPARSE_DATA_TYPE: {
PreparseData data = PreparseData::cast(*this);
- os << "<PreparseData[data=" << data->data_length()
- << " children=" << data->children_length() << "]>";
+ os << "<PreparseData[data=" << data.data_length()
+ << " children=" << data.children_length() << "]>";
break;
}
case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE: {
UncompiledDataWithoutPreparseData data =
UncompiledDataWithoutPreparseData::cast(*this);
- os << "<UncompiledDataWithoutPreparseData (" << data->start_position()
- << ", " << data->end_position() << ")]>";
+ os << "<UncompiledDataWithoutPreparseData (" << data.start_position()
+ << ", " << data.end_position() << ")]>";
break;
}
case UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE: {
UncompiledDataWithPreparseData data =
UncompiledDataWithPreparseData::cast(*this);
- os << "<UncompiledDataWithPreparseData (" << data->start_position()
- << ", " << data->end_position()
- << ") preparsed=" << Brief(data->preparse_data()) << ">";
+ os << "<UncompiledDataWithPreparseData (" << data.start_position() << ", "
+ << data.end_position() << ") preparsed=" << Brief(data.preparse_data())
+ << ">";
break;
}
case SHARED_FUNCTION_INFO_TYPE: {
SharedFunctionInfo shared = SharedFunctionInfo::cast(*this);
- std::unique_ptr<char[]> debug_name = shared->DebugName()->ToCString();
+ std::unique_ptr<char[]> debug_name = shared.DebugName().ToCString();
if (debug_name[0] != 0) {
os << "<SharedFunctionInfo " << debug_name.get() << ">";
} else {
@@ -2044,32 +2015,32 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case JS_MESSAGE_OBJECT_TYPE:
os << "<JSMessageObject>";
break;
-#define MAKE_STRUCT_CASE(TYPE, Name, name) \
- case TYPE: \
- os << "<" #Name; \
- Name::cast(*this)->BriefPrintDetails(os); \
- os << ">"; \
+#define MAKE_STRUCT_CASE(TYPE, Name, name) \
+ case TYPE: \
+ os << "<" #Name; \
+ Name::cast(*this).BriefPrintDetails(os); \
+ os << ">"; \
break;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
case ALLOCATION_SITE_TYPE: {
os << "<AllocationSite";
- AllocationSite::cast(*this)->BriefPrintDetails(os);
+ AllocationSite::cast(*this).BriefPrintDetails(os);
os << ">";
break;
}
case SCOPE_INFO_TYPE: {
ScopeInfo scope = ScopeInfo::cast(*this);
os << "<ScopeInfo";
- if (scope->length()) os << " " << scope->scope_type() << " ";
- os << "[" << scope->length() << "]>";
+ if (scope.length()) os << " " << scope.scope_type() << " ";
+ os << "[" << scope.length() << "]>";
break;
}
case CODE_TYPE: {
Code code = Code::cast(*this);
- os << "<Code " << Code::Kind2String(code->kind());
- if (code->is_builtin()) {
- os << " " << Builtins::name(code->builtin_index());
+ os << "<Code " << Code::Kind2String(code.kind());
+ if (code.is_builtin()) {
+ os << " " << Builtins::name(code.builtin_index());
}
os << ">";
break;
@@ -2087,31 +2058,31 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<false>";
} else {
os << "<Odd Oddball: ";
- os << Oddball::cast(*this)->to_string()->ToCString().get();
+ os << Oddball::cast(*this).to_string().ToCString().get();
os << ">";
}
break;
}
case SYMBOL_TYPE: {
Symbol symbol = Symbol::cast(*this);
- symbol->SymbolShortPrint(os);
+ symbol.SymbolShortPrint(os);
break;
}
case HEAP_NUMBER_TYPE: {
os << "<HeapNumber ";
- HeapNumber::cast(*this)->HeapNumberPrint(os);
+ HeapNumber::cast(*this).HeapNumberPrint(os);
os << ">";
break;
}
case MUTABLE_HEAP_NUMBER_TYPE: {
os << "<MutableHeapNumber ";
- MutableHeapNumber::cast(*this)->MutableHeapNumberPrint(os);
+ MutableHeapNumber::cast(*this).MutableHeapNumberPrint(os);
os << '>';
break;
}
case BIGINT_TYPE: {
os << "<BigInt ";
- BigInt::cast(*this)->BigIntShortPrint(os);
+ BigInt::cast(*this).BigIntShortPrint(os);
os << ">";
break;
}
@@ -2125,7 +2096,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<Cell value= ";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- Cell::cast(*this)->value()->ShortPrint(&accumulator);
+ Cell::cast(*this).value().ShortPrint(&accumulator);
os << accumulator.ToCString().get();
os << '>';
break;
@@ -2133,11 +2104,11 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case PROPERTY_CELL_TYPE: {
PropertyCell cell = PropertyCell::cast(*this);
os << "<PropertyCell name=";
- cell->name()->ShortPrint(os);
+ cell.name().ShortPrint(os);
os << " value=";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- cell->value()->ShortPrint(&accumulator);
+ cell.value().ShortPrint(&accumulator);
os << accumulator.ToCString().get();
os << '>';
break;
@@ -2145,10 +2116,10 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
case CALL_HANDLER_INFO_TYPE: {
CallHandlerInfo info = CallHandlerInfo::cast(*this);
os << "<CallHandlerInfo ";
- os << "callback= " << Brief(info->callback());
- os << ", js_callback= " << Brief(info->js_callback());
- os << ", data= " << Brief(info->data());
- if (info->IsSideEffectFreeCallHandlerInfo()) {
+ os << "callback= " << Brief(info.callback());
+ os << ", js_callback= " << Brief(info.js_callback());
+ os << ", data= " << Brief(info.data());
+ if (info.IsSideEffectFreeCallHandlerInfo()) {
os << ", side_effect_free= true>";
} else {
os << ", side_effect_free= false>";
@@ -2156,7 +2127,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
break;
}
default:
- os << "<Other heap object (" << map()->instance_type() << ")>";
+ os << "<Other heap object (" << map().instance_type() << ")>";
break;
}
}
@@ -2186,7 +2157,6 @@ void CallableTask::BriefPrintDetails(std::ostream& os) {
void HeapObject::Iterate(ObjectVisitor* v) { IterateFast<ObjectVisitor>(v); }
-
void HeapObject::IterateBody(ObjectVisitor* v) {
Map m = map();
IterateBodyFast<ObjectVisitor>(m, SizeFromMap(m), v);
@@ -2196,7 +2166,6 @@ void HeapObject::IterateBody(Map map, int object_size, ObjectVisitor* v) {
IterateBodyFast<ObjectVisitor>(map, object_size, v);
}
-
struct CallIsValidSlot {
template <typename BodyDescriptor>
static bool apply(Map map, HeapObject obj, int offset, int) {
@@ -2206,120 +2175,115 @@ struct CallIsValidSlot {
bool HeapObject::IsValidSlot(Map map, int offset) {
DCHECK_NE(0, offset);
- return BodyDescriptorApply<CallIsValidSlot, bool>(map->instance_type(), map,
+ return BodyDescriptorApply<CallIsValidSlot, bool>(map.instance_type(), map,
*this, offset, 0);
}
int HeapObject::SizeFromMap(Map map) const {
- int instance_size = map->instance_size();
+ int instance_size = map.instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
// Only inline the most frequent cases.
- InstanceType instance_type = map->instance_type();
+ InstanceType instance_type = map.instance_type();
if (IsInRange(instance_type, FIRST_FIXED_ARRAY_TYPE, LAST_FIXED_ARRAY_TYPE)) {
return FixedArray::SizeFor(
- FixedArray::unchecked_cast(*this)->synchronized_length());
+ FixedArray::unchecked_cast(*this).synchronized_length());
}
if (IsInRange(instance_type, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)) {
// Native context has fixed size.
DCHECK_NE(instance_type, NATIVE_CONTEXT_TYPE);
- return Context::SizeFor(Context::unchecked_cast(*this)->length());
+ return Context::SizeFor(Context::unchecked_cast(*this).length());
}
if (instance_type == ONE_BYTE_STRING_TYPE ||
instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqOneByteString::SizeFor(
- SeqOneByteString::unchecked_cast(*this)->synchronized_length());
+ SeqOneByteString::unchecked_cast(*this).synchronized_length());
}
if (instance_type == BYTE_ARRAY_TYPE) {
return ByteArray::SizeFor(
- ByteArray::unchecked_cast(*this)->synchronized_length());
+ ByteArray::unchecked_cast(*this).synchronized_length());
}
if (instance_type == BYTECODE_ARRAY_TYPE) {
return BytecodeArray::SizeFor(
- BytecodeArray::unchecked_cast(*this)->synchronized_length());
+ BytecodeArray::unchecked_cast(*this).synchronized_length());
}
if (instance_type == FREE_SPACE_TYPE) {
- return FreeSpace::unchecked_cast(*this)->relaxed_read_size();
+ return FreeSpace::unchecked_cast(*this).relaxed_read_size();
}
if (instance_type == STRING_TYPE ||
instance_type == INTERNALIZED_STRING_TYPE) {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqTwoByteString::SizeFor(
- SeqTwoByteString::unchecked_cast(*this)->synchronized_length());
+ SeqTwoByteString::unchecked_cast(*this).synchronized_length());
}
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
return FixedDoubleArray::SizeFor(
- FixedDoubleArray::unchecked_cast(*this)->synchronized_length());
+ FixedDoubleArray::unchecked_cast(*this).synchronized_length());
}
if (instance_type == FEEDBACK_METADATA_TYPE) {
return FeedbackMetadata::SizeFor(
- FeedbackMetadata::unchecked_cast(*this)->synchronized_slot_count());
+ FeedbackMetadata::unchecked_cast(*this).synchronized_slot_count());
}
if (instance_type == DESCRIPTOR_ARRAY_TYPE) {
return DescriptorArray::SizeFor(
- DescriptorArray::unchecked_cast(*this)->number_of_all_descriptors());
+ DescriptorArray::unchecked_cast(*this).number_of_all_descriptors());
}
if (IsInRange(instance_type, FIRST_WEAK_FIXED_ARRAY_TYPE,
LAST_WEAK_FIXED_ARRAY_TYPE)) {
return WeakFixedArray::SizeFor(
- WeakFixedArray::unchecked_cast(*this)->synchronized_length());
+ WeakFixedArray::unchecked_cast(*this).synchronized_length());
}
if (instance_type == WEAK_ARRAY_LIST_TYPE) {
return WeakArrayList::SizeForCapacity(
- WeakArrayList::unchecked_cast(*this)->synchronized_capacity());
- }
- if (IsInRange(instance_type, FIRST_FIXED_TYPED_ARRAY_TYPE,
- LAST_FIXED_TYPED_ARRAY_TYPE)) {
- return FixedTypedArrayBase::unchecked_cast(*this)->TypedArraySize(
- instance_type);
+ WeakArrayList::unchecked_cast(*this).synchronized_capacity());
}
if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) {
return SmallOrderedHashSet::SizeFor(
- SmallOrderedHashSet::unchecked_cast(*this)->Capacity());
+ SmallOrderedHashSet::unchecked_cast(*this).Capacity());
}
if (instance_type == SMALL_ORDERED_HASH_MAP_TYPE) {
return SmallOrderedHashMap::SizeFor(
- SmallOrderedHashMap::unchecked_cast(*this)->Capacity());
+ SmallOrderedHashMap::unchecked_cast(*this).Capacity());
}
if (instance_type == SMALL_ORDERED_NAME_DICTIONARY_TYPE) {
return SmallOrderedNameDictionary::SizeFor(
- SmallOrderedNameDictionary::unchecked_cast(*this)->Capacity());
+ SmallOrderedNameDictionary::unchecked_cast(*this).Capacity());
}
if (instance_type == PROPERTY_ARRAY_TYPE) {
return PropertyArray::SizeFor(
- PropertyArray::cast(*this)->synchronized_length());
+ PropertyArray::cast(*this).synchronized_length());
}
if (instance_type == FEEDBACK_VECTOR_TYPE) {
return FeedbackVector::SizeFor(
- FeedbackVector::unchecked_cast(*this)->length());
+ FeedbackVector::unchecked_cast(*this).length());
}
if (instance_type == BIGINT_TYPE) {
- return BigInt::SizeFor(BigInt::unchecked_cast(*this)->length());
+ return BigInt::SizeFor(BigInt::unchecked_cast(*this).length());
}
if (instance_type == PREPARSE_DATA_TYPE) {
PreparseData data = PreparseData::unchecked_cast(*this);
- return PreparseData::SizeFor(data->data_length(), data->children_length());
+ return PreparseData::SizeFor(data.data_length(), data.children_length());
}
if (instance_type == CODE_TYPE) {
- return Code::unchecked_cast(*this)->CodeSize();
+ return Code::unchecked_cast(*this).CodeSize();
}
DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
return EmbedderDataArray::SizeFor(
- EmbedderDataArray::unchecked_cast(*this)->length());
+ EmbedderDataArray::unchecked_cast(*this).length());
}
bool HeapObject::NeedsRehashing() const {
- switch (map()->instance_type()) {
+ switch (map().instance_type()) {
case DESCRIPTOR_ARRAY_TYPE:
- return DescriptorArray::cast(*this)->number_of_descriptors() > 1;
+ return DescriptorArray::cast(*this).number_of_descriptors() > 1;
case TRANSITION_ARRAY_TYPE:
- return TransitionArray::cast(*this)->number_of_entries() > 1;
+ return TransitionArray::cast(*this).number_of_entries() > 1;
case ORDERED_HASH_MAP_TYPE:
- return OrderedHashMap::cast(*this)->NumberOfElements() > 0;
+ return OrderedHashMap::cast(*this).NumberOfElements() > 0;
case ORDERED_HASH_SET_TYPE:
- return OrderedHashSet::cast(*this)->NumberOfElements() > 0;
+ return OrderedHashSet::cast(*this).NumberOfElements() > 0;
case NAME_DICTIONARY_TYPE:
case GLOBAL_DICTIONARY_TYPE:
case NUMBER_DICTIONARY_TYPE:
@@ -2337,7 +2301,7 @@ bool HeapObject::NeedsRehashing() const {
bool HeapObject::CanBeRehashed() const {
DCHECK(NeedsRehashing());
- switch (map()->instance_type()) {
+ switch (map().instance_type()) {
case ORDERED_HASH_MAP_TYPE:
case ORDERED_HASH_SET_TYPE:
case ORDERED_NAME_DICTIONARY_TYPE:
@@ -2354,11 +2318,11 @@ bool HeapObject::CanBeRehashed() const {
case TRANSITION_ARRAY_TYPE:
return true;
case SMALL_ORDERED_HASH_MAP_TYPE:
- return SmallOrderedHashMap::cast(*this)->NumberOfElements() == 0;
+ return SmallOrderedHashMap::cast(*this).NumberOfElements() == 0;
case SMALL_ORDERED_HASH_SET_TYPE:
- return SmallOrderedHashMap::cast(*this)->NumberOfElements() == 0;
+ return SmallOrderedHashMap::cast(*this).NumberOfElements() == 0;
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
- return SmallOrderedNameDictionary::cast(*this)->NumberOfElements() == 0;
+ return SmallOrderedNameDictionary::cast(*this).NumberOfElements() == 0;
default:
return false;
}
@@ -2366,46 +2330,45 @@ bool HeapObject::CanBeRehashed() const {
}
void HeapObject::RehashBasedOnMap(ReadOnlyRoots roots) {
- switch (map()->instance_type()) {
+ switch (map().instance_type()) {
case HASH_TABLE_TYPE:
UNREACHABLE();
- break;
case NAME_DICTIONARY_TYPE:
- NameDictionary::cast(*this)->Rehash(roots);
+ NameDictionary::cast(*this).Rehash(roots);
break;
case GLOBAL_DICTIONARY_TYPE:
- GlobalDictionary::cast(*this)->Rehash(roots);
+ GlobalDictionary::cast(*this).Rehash(roots);
break;
case NUMBER_DICTIONARY_TYPE:
- NumberDictionary::cast(*this)->Rehash(roots);
+ NumberDictionary::cast(*this).Rehash(roots);
break;
case SIMPLE_NUMBER_DICTIONARY_TYPE:
- SimpleNumberDictionary::cast(*this)->Rehash(roots);
+ SimpleNumberDictionary::cast(*this).Rehash(roots);
break;
case STRING_TABLE_TYPE:
- StringTable::cast(*this)->Rehash(roots);
+ StringTable::cast(*this).Rehash(roots);
break;
case DESCRIPTOR_ARRAY_TYPE:
- DCHECK_LE(1, DescriptorArray::cast(*this)->number_of_descriptors());
- DescriptorArray::cast(*this)->Sort();
+ DCHECK_LE(1, DescriptorArray::cast(*this).number_of_descriptors());
+ DescriptorArray::cast(*this).Sort();
break;
case TRANSITION_ARRAY_TYPE:
- TransitionArray::cast(*this)->Sort();
+ TransitionArray::cast(*this).Sort();
break;
case SMALL_ORDERED_HASH_MAP_TYPE:
- DCHECK_EQ(0, SmallOrderedHashMap::cast(*this)->NumberOfElements());
+ DCHECK_EQ(0, SmallOrderedHashMap::cast(*this).NumberOfElements());
break;
case SMALL_ORDERED_HASH_SET_TYPE:
- DCHECK_EQ(0, SmallOrderedHashSet::cast(*this)->NumberOfElements());
+ DCHECK_EQ(0, SmallOrderedHashSet::cast(*this).NumberOfElements());
break;
case SMALL_ORDERED_NAME_DICTIONARY_TYPE:
- DCHECK_EQ(0, SmallOrderedNameDictionary::cast(*this)->NumberOfElements());
+ DCHECK_EQ(0, SmallOrderedNameDictionary::cast(*this).NumberOfElements());
break;
case ONE_BYTE_INTERNALIZED_STRING_TYPE:
case INTERNALIZED_STRING_TYPE:
// Rare case, rehash read-only space strings before they are sealed.
DCHECK(ReadOnlyHeap::Contains(*this));
- String::cast(*this)->Hash();
+ String::cast(*this).Hash();
break;
default:
UNREACHABLE();
@@ -2413,7 +2376,7 @@ void HeapObject::RehashBasedOnMap(ReadOnlyRoots roots) {
}
bool HeapObject::IsExternal(Isolate* isolate) const {
- return map()->FindRootMap(isolate) == isolate->heap()->external_map();
+ return map().FindRootMap(isolate) == isolate->heap()->external_map();
}
void DescriptorArray::GeneralizeAllFields() {
@@ -2467,7 +2430,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
// In case of global IC, the receiver is the global object. Replace by
// the global proxy.
if (receiver->IsJSGlobalObject()) {
- receiver = handle(JSGlobalObject::cast(*receiver)->global_proxy(),
+ receiver = handle(JSGlobalObject::cast(*receiver).global_proxy(),
it->isolate());
}
return JSProxy::SetProperty(it->GetHolder<JSProxy>(), it->GetName(),
@@ -2500,7 +2463,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo() &&
!it->HolderIsReceiverOrHiddenPrototype() &&
- AccessorInfo::cast(*accessors)->is_special_data_property()) {
+ AccessorInfo::cast(*accessors).is_special_data_property()) {
*found = false;
return Nothing<bool>();
}
@@ -2570,7 +2533,7 @@ Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
(GetShouldThrow(it->isolate(), should_throw) ==
ShouldThrow::kThrowOnError)) {
it->isolate()->Throw(*it->isolate()->factory()->NewReferenceError(
- MessageTemplate::kNotDefined, it->name()));
+ MessageTemplate::kNotDefined, it->GetName()));
return Nothing<bool>();
}
@@ -2594,7 +2557,6 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
// The property either doesn't exist on the holder or exists there as a data
// property.
-
if (!it->GetReceiver()->IsJSReceiver()) {
return WriteToReadOnlyProperty(it, value, should_throw);
}
@@ -2723,8 +2685,8 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
Handle<Object> to_assign = value;
// Convert the incoming value to a number for storing into typed arrays.
if (it->IsElement() && receiver->IsJSObject() &&
- JSObject::cast(*receiver)->HasFixedTypedArrayElements()) {
- ElementsKind elements_kind = JSObject::cast(*receiver)->GetElementsKind();
+ JSObject::cast(*receiver).HasTypedArrayElements()) {
+ ElementsKind elements_kind = JSObject::cast(*receiver).GetElementsKind();
if (elements_kind == BIGINT64_ELEMENTS ||
elements_kind == BIGUINT64_ELEMENTS) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(it->isolate(), to_assign,
@@ -2809,15 +2771,6 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
isolate->factory()->length_string(),
Object::TypeOf(isolate, array), array));
}
-
- if (FLAG_trace_external_array_abuse &&
- array->HasFixedTypedArrayElements()) {
- CheckArrayAbuse(array, "typed elements write", it->index(), true);
- }
-
- if (FLAG_trace_js_array_abuse && !array->HasFixedTypedArrayElements()) {
- CheckArrayAbuse(array, "elements write", it->index(), false);
- }
}
Handle<JSObject> receiver_obj = Handle<JSObject>::cast(receiver);
@@ -2846,7 +2799,6 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
return Just(true);
}
-
template <class T>
static int AppendUniqueCallbacks(Isolate* isolate,
Handle<TemplateList> callbacks,
@@ -2872,20 +2824,16 @@ static int AppendUniqueCallbacks(Isolate* isolate,
}
struct FixedArrayAppender {
- typedef FixedArray Array;
- static bool Contains(Handle<Name> key,
- Handle<AccessorInfo> entry,
- int valid_descriptors,
- Handle<FixedArray> array) {
+ using Array = FixedArray;
+ static bool Contains(Handle<Name> key, Handle<AccessorInfo> entry,
+ int valid_descriptors, Handle<FixedArray> array) {
for (int i = 0; i < valid_descriptors; i++) {
- if (*key == AccessorInfo::cast(array->get(i))->name()) return true;
+ if (*key == AccessorInfo::cast(array->get(i)).name()) return true;
}
return false;
}
- static void Insert(Handle<Name> key,
- Handle<AccessorInfo> entry,
- int valid_descriptors,
- Handle<FixedArray> array) {
+ static void Insert(Handle<Name> key, Handle<AccessorInfo> entry,
+ int valid_descriptors, Handle<FixedArray> array) {
DisallowHeapAllocation no_gc;
array->set(valid_descriptors, *entry);
}
@@ -2900,10 +2848,6 @@ int AccessorInfo::AppendUnique(Isolate* isolate, Handle<Object> descriptors,
valid_descriptors);
}
-
-
-
-
void JSProxy::Revoke(Handle<JSProxy> proxy) {
Isolate* isolate = proxy->GetIsolate();
// ES#sec-proxy-revocation-functions
@@ -2957,8 +2901,9 @@ Maybe<bool> JSProxy::HasProperty(Isolate* isolate, Handle<JSProxy> proxy,
// 6. Let trap be ? GetMethod(handler, "has").
Handle<Object> trap;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
- isolate->factory()->has_string()),
+ isolate, trap,
+ Object::GetMethod(Handle<JSReceiver>::cast(handler),
+ isolate->factory()->has_string()),
Nothing<bool>());
// 7. If trap is undefined, then
if (trap->IsUndefined(isolate)) {
@@ -3102,15 +3047,26 @@ Maybe<bool> JSProxy::DeletePropertyOrElement(Handle<JSProxy> proxy,
Maybe<bool> owned =
JSReceiver::GetOwnPropertyDescriptor(isolate, target, name, &target_desc);
MAYBE_RETURN(owned, Nothing<bool>());
- if (owned.FromJust() && !target_desc.configurable()) {
- isolate->Throw(*factory->NewTypeError(
- MessageTemplate::kProxyDeletePropertyNonConfigurable, name));
- return Nothing<bool>();
+ if (owned.FromJust()) {
+ if (!target_desc.configurable()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kProxyDeletePropertyNonConfigurable, name));
+ return Nothing<bool>();
+ }
+ // 13. Let extensibleTarget be ? IsExtensible(target).
+ // 14. If extensibleTarget is false, throw a TypeError exception.
+ Maybe<bool> extensible = JSReceiver::IsExtensible(target);
+ MAYBE_RETURN(extensible, Nothing<bool>());
+ if (!extensible.FromJust()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kProxyDeletePropertyNonExtensible, name));
+ return Nothing<bool>();
+ }
}
+
return Just(true);
}
-
// static
MaybeHandle<JSProxy> JSProxy::New(Isolate* isolate, Handle<Object> target,
Handle<Object> handler) {
@@ -3118,7 +3074,7 @@ MaybeHandle<JSProxy> JSProxy::New(Isolate* isolate, Handle<Object> target,
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
JSProxy);
}
- if (target->IsJSProxy() && JSProxy::cast(*target)->IsRevoked()) {
+ if (target->IsJSProxy() && JSProxy::cast(*target).IsRevoked()) {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
JSProxy);
@@ -3127,7 +3083,7 @@ MaybeHandle<JSProxy> JSProxy::New(Isolate* isolate, Handle<Object> target,
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kProxyNonObject),
JSProxy);
}
- if (handler->IsJSProxy() && JSProxy::cast(*handler)->IsRevoked()) {
+ if (handler->IsJSProxy() && JSProxy::cast(*handler).IsRevoked()) {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kProxyHandlerOrTargetRevoked),
JSProxy);
@@ -3136,10 +3092,9 @@ MaybeHandle<JSProxy> JSProxy::New(Isolate* isolate, Handle<Object> target,
Handle<JSReceiver>::cast(handler));
}
-
// static
MaybeHandle<NativeContext> JSProxy::GetFunctionRealm(Handle<JSProxy> proxy) {
- DCHECK(proxy->map()->is_constructor());
+ DCHECK(proxy->map().is_constructor());
if (proxy->IsRevoked()) {
THROW_NEW_ERROR(proxy->GetIsolate(),
NewTypeError(MessageTemplate::kProxyRevoked),
@@ -3164,7 +3119,7 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributes(LookupIterator* it) {
bool PropertyKeyToArrayLength(Handle<Object> value, uint32_t* length) {
DCHECK(value->IsNumber() || value->IsName());
if (value->ToArrayLength(length)) return true;
- if (value->IsString()) return String::cast(*value)->AsArrayIndex(length);
+ if (value->IsString()) return String::cast(*value).AsArrayIndex(length);
return false;
}
@@ -3345,7 +3300,7 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
USE(success);
}
uint32_t actual_new_len = 0;
- CHECK(a->length()->ToArrayLength(&actual_new_len));
+ CHECK(a->length().ToArrayLength(&actual_new_len));
// Steps 19d-v, 21. Return false if there were non-deletable elements.
bool result = actual_new_len == new_len;
if (!result) {
@@ -3466,6 +3421,20 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
MessageTemplate::kProxyDefinePropertyNonConfigurable, property_name));
return Nothing<bool>();
}
+ // 16c. If IsDataDescriptor(targetDesc) is true,
+ // targetDesc.[[Configurable]] is
+ // false, and targetDesc.[[Writable]] is true, then
+ if (PropertyDescriptor::IsDataDescriptor(&target_desc) &&
+ !target_desc.configurable() && target_desc.writable()) {
+ // 16c i. If Desc has a [[Writable]] field and Desc.[[Writable]] is false,
+ // throw a TypeError exception.
+ if (desc->has_writable() && !desc->writable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kProxyDefinePropertyNonConfigurableWritable,
+ property_name));
+ return Nothing<bool>();
+ }
+ }
}
// 17. Return true.
return Just(true);
@@ -3483,7 +3452,7 @@ Maybe<bool> JSProxy::SetPrivateSymbol(Isolate* isolate, Handle<JSProxy> proxy,
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kProxyPrivate));
}
- DCHECK(proxy->map()->is_dictionary_map());
+ DCHECK(proxy->map().is_dictionary_map());
Handle<Object> value =
desc->has_value()
? desc->value()
@@ -3617,6 +3586,18 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
name));
return Nothing<bool>();
}
+ // 17b. If resultDesc has a [[Writable]] field and resultDesc.[[Writable]]
+ // is false, then
+ if (desc->has_writable() && !desc->writable()) {
+ // 17b i. If targetDesc.[[Writable]] is true, throw a TypeError exception.
+ if (target_desc.writable()) {
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::
+ kProxyGetOwnPropertyDescriptorNonConfigurableWritable,
+ name));
+ return Nothing<bool>();
+ }
+ }
}
// 18. Return resultDesc.
return Just(true);
@@ -3733,13 +3714,13 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
Name key = desc->GetKey(i);
PropertyDetails details = desc->GetDetails(i);
// Bulk attribute changes never affect private properties.
- if (!key->IsPrivate()) {
+ if (!key.IsPrivate()) {
int mask = DONT_DELETE | DONT_ENUM;
// READ_ONLY is an invalid attribute for JS setters/getters.
HeapObject heap_object;
if (details.kind() != kAccessor ||
!(value_or_field_type->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsAccessorPair())) {
+ heap_object.IsAccessorPair())) {
mask |= READ_ONLY;
}
details = details.CopyAddAttributes(
@@ -3775,7 +3756,7 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
Name key = src->GetKey(i);
PropertyDetails details = src->GetDetails(i);
- DCHECK(!key->IsPrivateName());
+ DCHECK(!key.IsPrivateName());
DCHECK(details.IsEnumerable());
DCHECK_EQ(details.kind(), kData);
@@ -3811,11 +3792,11 @@ Handle<DescriptorArray> DescriptorArray::CopyForFastObjectClone(
bool DescriptorArray::IsEqualUpTo(DescriptorArray desc, int nof_descriptors) {
for (int i = 0; i < nof_descriptors; i++) {
- if (GetKey(i) != desc->GetKey(i) || GetValue(i) != desc->GetValue(i)) {
+ if (GetKey(i) != desc.GetKey(i) || GetValue(i) != desc.GetValue(i)) {
return false;
}
PropertyDetails details = GetDetails(i);
- PropertyDetails other_details = desc->GetDetails(i);
+ PropertyDetails other_details = desc.GetDetails(i);
if (details.kind() != other_details.kind() ||
details.location() != other_details.location() ||
!details.representation().Equals(other_details.representation())) {
@@ -3845,20 +3826,6 @@ Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate,
return new_array;
}
-bool FixedArray::ContainsSortedNumbers() {
- for (int i = 1; i < length(); ++i) {
- Object a_obj = get(i - 1);
- Object b_obj = get(i);
- if (!a_obj->IsNumber() || !b_obj->IsNumber()) return false;
-
- uint32_t a = NumberToUint32(a_obj);
- uint32_t b = NumberToUint32(b_obj);
-
- if (a > b) return false;
- }
- return true;
-}
-
Handle<FixedArray> FixedArray::ShrinkOrEmpty(Isolate* isolate,
Handle<FixedArray> array,
int new_length) {
@@ -3882,13 +3849,12 @@ void FixedArray::CopyTo(int pos, FixedArray dest, int dest_pos, int len) const {
// Return early if len == 0 so that we don't try to read the write barrier off
// a canonical read-only empty fixed array.
if (len == 0) return;
- WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
+ WriteBarrierMode mode = dest.GetWriteBarrierMode(no_gc);
for (int index = 0; index < len; index++) {
- dest->set(dest_pos + index, get(pos + index), mode);
+ dest.set(dest_pos + index, get(pos + index), mode);
}
}
-
// static
Handle<ArrayList> ArrayList::Add(Isolate* isolate, Handle<ArrayList> array,
Handle<Object> obj) {
@@ -4146,7 +4112,7 @@ Handle<FrameArray> FrameArray::AppendWasmFrame(
// The {code} will be {nullptr} for interpreted wasm frames.
Handle<Object> code_ref = isolate->factory()->undefined_value();
if (code) {
- auto native_module = wasm_instance->module_object()->shared_native_module();
+ auto native_module = wasm_instance->module_object().shared_native_module();
code_ref = Managed<wasm::GlobalWasmCodeRef>::Allocate(
isolate, 0, code, std::move(native_module));
}
@@ -4214,14 +4180,14 @@ void DescriptorArray::InitializeOrChangeEnumCache(
enum_cache = *isolate->factory()->NewEnumCache(keys, indices);
descriptors->set_enum_cache(enum_cache);
} else {
- enum_cache->set_keys(*keys);
- enum_cache->set_indices(*indices);
+ enum_cache.set_keys(*keys);
+ enum_cache.set_indices(*indices);
}
}
void DescriptorArray::CopyFrom(int index, DescriptorArray src) {
- PropertyDetails details = src->GetDetails(index);
- Set(index, src->GetKey(index), src->GetValue(index), details);
+ PropertyDetails details = src.GetDetails(index);
+ Set(index, src.GetKey(index), src.GetValue(index), details);
}
void DescriptorArray::Sort() {
@@ -4234,12 +4200,12 @@ void DescriptorArray::Sort() {
const int max_parent_index = (len / 2) - 1;
for (int i = max_parent_index; i >= 0; --i) {
int parent_index = i;
- const uint32_t parent_hash = GetSortedKey(i)->Hash();
+ const uint32_t parent_hash = GetSortedKey(i).Hash();
while (parent_index <= max_parent_index) {
int child_index = 2 * parent_index + 1;
- uint32_t child_hash = GetSortedKey(child_index)->Hash();
+ uint32_t child_hash = GetSortedKey(child_index).Hash();
if (child_index + 1 < len) {
- uint32_t right_child_hash = GetSortedKey(child_index + 1)->Hash();
+ uint32_t right_child_hash = GetSortedKey(child_index + 1).Hash();
if (right_child_hash > child_hash) {
child_index++;
child_hash = right_child_hash;
@@ -4258,13 +4224,13 @@ void DescriptorArray::Sort() {
SwapSortedKeys(0, i);
// Shift down the new top element.
int parent_index = 0;
- const uint32_t parent_hash = GetSortedKey(parent_index)->Hash();
+ const uint32_t parent_hash = GetSortedKey(parent_index).Hash();
const int max_parent_index = (i / 2) - 1;
while (parent_index <= max_parent_index) {
int child_index = parent_index * 2 + 1;
- uint32_t child_hash = GetSortedKey(child_index)->Hash();
+ uint32_t child_hash = GetSortedKey(child_index).Hash();
if (child_index + 1 < i) {
- uint32_t right_child_hash = GetSortedKey(child_index + 1)->Hash();
+ uint32_t right_child_hash = GetSortedKey(child_index + 1).Hash();
if (right_child_hash > child_hash) {
child_index++;
child_hash = right_child_hash;
@@ -4312,12 +4278,12 @@ Handle<Object> AccessorPair::GetComponent(Isolate* isolate,
Handle<AccessorPair> accessor_pair,
AccessorComponent component) {
Object accessor = accessor_pair->get(component);
- if (accessor->IsFunctionTemplateInfo()) {
+ if (accessor.IsFunctionTemplateInfo()) {
return ApiNatives::InstantiateFunction(
handle(FunctionTemplateInfo::cast(accessor), isolate))
.ToHandleChecked();
}
- if (accessor->IsNull(isolate)) {
+ if (accessor.IsNull(isolate)) {
return isolate->factory()->undefined_value();
}
return handle(accessor, isolate);
@@ -4325,11 +4291,11 @@ Handle<Object> AccessorPair::GetComponent(Isolate* isolate,
#ifdef DEBUG
bool DescriptorArray::IsEqualTo(DescriptorArray other) {
- if (number_of_all_descriptors() != other->number_of_all_descriptors()) {
+ if (number_of_all_descriptors() != other.number_of_all_descriptors()) {
return false;
}
for (int i = 0; i < number_of_all_descriptors(); ++i) {
- if (get(i) != other->get(i)) return false;
+ if (get(i) != other.get(i)) return false;
}
return true;
}
@@ -4363,7 +4329,6 @@ MaybeHandle<String> Name::ToFunctionName(Isolate* isolate, Handle<Name> name,
return builder.Finish();
}
-
void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
Relocatable* current = isolate->relocatable_top();
while (current != nullptr) {
@@ -4372,13 +4337,11 @@ void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
}
}
-
// Reserve space for statics needing saving and restoring.
int Relocatable::ArchiveSpacePerThread() {
return sizeof(Relocatable*); // NOLINT
}
-
// Archive statics that are thread-local.
char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
*reinterpret_cast<Relocatable**>(to) = isolate->relocatable_top();
@@ -4386,7 +4349,6 @@ char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
return to + ArchiveSpacePerThread();
}
-
// Restore statics that are thread-local.
char* Relocatable::RestoreState(Isolate* isolate, char* from) {
isolate->set_relocatable_top(*reinterpret_cast<Relocatable**>(from));
@@ -4411,10 +4373,6 @@ void Relocatable::Iterate(RootVisitor* v, Relocatable* top) {
}
}
-
-
-
-
namespace {
template <typename sinkchar>
@@ -4422,31 +4380,31 @@ void WriteFixedArrayToFlat(FixedArray fixed_array, int length, String separator,
sinkchar* sink, int sink_length) {
DisallowHeapAllocation no_allocation;
CHECK_GT(length, 0);
- CHECK_LE(length, fixed_array->length());
+ CHECK_LE(length, fixed_array.length());
#ifdef DEBUG
sinkchar* sink_end = sink + sink_length;
#endif
- const int separator_length = separator->length();
+ const int separator_length = separator.length();
const bool use_one_byte_separator_fast_path =
separator_length == 1 && sizeof(sinkchar) == 1 &&
StringShape(separator).IsSequentialOneByte();
uint8_t separator_one_char;
if (use_one_byte_separator_fast_path) {
CHECK(StringShape(separator).IsSequentialOneByte());
- CHECK_EQ(separator->length(), 1);
+ CHECK_EQ(separator.length(), 1);
separator_one_char =
- SeqOneByteString::cast(separator)->GetChars(no_allocation)[0];
+ SeqOneByteString::cast(separator).GetChars(no_allocation)[0];
}
uint32_t num_separators = 0;
for (int i = 0; i < length; i++) {
- Object element = fixed_array->get(i);
- const bool element_is_separator_sequence = element->IsSmi();
+ Object element = fixed_array.get(i);
+ const bool element_is_separator_sequence = element.IsSmi();
// If element is a Smi, it represents the number of separators to write.
if (V8_UNLIKELY(element_is_separator_sequence)) {
- CHECK(element->ToUint32(&num_separators));
+ CHECK(element.ToUint32(&num_separators));
// Verify that Smis (number of separators) only occur when necessary:
// 1) at the beginning
// 2) at the end
@@ -4478,9 +4436,9 @@ void WriteFixedArrayToFlat(FixedArray fixed_array, int length, String separator,
if (V8_UNLIKELY(element_is_separator_sequence)) {
num_separators = 0;
} else {
- DCHECK(element->IsString());
+ DCHECK(element.IsString());
String string = String::cast(element);
- const int string_length = string->length();
+ const int string_length = string.length();
DCHECK(string_length == 0 || sink < sink_end);
String::WriteToFlat(string, sink, 0, string_length);
@@ -4508,26 +4466,23 @@ Address JSArray::ArrayJoinConcatToSequentialString(Isolate* isolate,
FixedArray fixed_array = FixedArray::cast(Object(raw_fixed_array));
String separator = String::cast(Object(raw_separator));
String dest = String::cast(Object(raw_dest));
- DCHECK(fixed_array->IsFixedArray());
+ DCHECK(fixed_array.IsFixedArray());
DCHECK(StringShape(dest).IsSequentialOneByte() ||
StringShape(dest).IsSequentialTwoByte());
if (StringShape(dest).IsSequentialOneByte()) {
WriteFixedArrayToFlat(fixed_array, static_cast<int>(length), separator,
- SeqOneByteString::cast(dest)->GetChars(no_allocation),
- dest->length());
+ SeqOneByteString::cast(dest).GetChars(no_allocation),
+ dest.length());
} else {
DCHECK(StringShape(dest).IsSequentialTwoByte());
WriteFixedArrayToFlat(fixed_array, static_cast<int>(length), separator,
- SeqTwoByteString::cast(dest)->GetChars(no_allocation),
- dest->length());
+ SeqTwoByteString::cast(dest).GetChars(no_allocation),
+ dest.length());
}
- return dest->ptr();
+ return dest.ptr();
}
-
-
-
uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
// For array indexes mix the length into the hash as an array index could
// be zero.
@@ -4545,88 +4500,6 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
return value;
}
-
-uint32_t StringHasher::GetHashField() {
- if (length_ <= String::kMaxHashCalcLength) {
- if (is_array_index_) {
- return MakeArrayIndexHash(array_index_, length_);
- }
- return (GetHashCore(raw_running_hash_) << String::kHashShift) |
- String::kIsNotArrayIndexMask;
- } else {
- return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask;
- }
-}
-
-uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars, uint64_t seed,
- int* utf16_length_out) {
- int vector_length = chars.length();
- // Handle some edge cases
- if (vector_length <= 1) {
- DCHECK(vector_length == 0 ||
- static_cast<uint8_t>(chars.start()[0]) <=
- unibrow::Utf8::kMaxOneByteChar);
- *utf16_length_out = vector_length;
- return HashSequentialString(chars.start(), vector_length, seed);
- }
-
- // Start with a fake length which won't affect computation.
- // It will be updated later.
- StringHasher hasher(String::kMaxArrayIndexSize, seed);
- DCHECK(hasher.is_array_index_);
-
- unibrow::Utf8Iterator it = unibrow::Utf8Iterator(chars);
- int utf16_length = 0;
- bool is_index = true;
-
- while (utf16_length < String::kMaxHashCalcLength && !it.Done()) {
- utf16_length++;
- uint16_t c = *it;
- ++it;
- hasher.AddCharacter(c);
- if (is_index) is_index = hasher.UpdateIndex(c);
- }
-
- // Now that hashing is done, we just need to calculate utf16_length
- while (!it.Done()) {
- ++it;
- utf16_length++;
- }
-
- *utf16_length_out = utf16_length;
- // Must set length here so that hash computation is correct.
- hasher.length_ = utf16_length;
- return hasher.GetHashField();
-}
-
-void IteratingStringHasher::VisitConsString(ConsString cons_string) {
- // Run small ConsStrings through ConsStringIterator.
- if (cons_string->length() < 64) {
- ConsStringIterator iter(cons_string);
- int offset;
- for (String string = iter.Next(&offset); !string.is_null();
- string = iter.Next(&offset)) {
- DCHECK_EQ(0, offset);
- String::VisitFlat(this, string, 0);
- }
- return;
- }
- // Slow case.
- const int max_length = String::kMaxHashCalcLength;
- int length = std::min(cons_string->length(), max_length);
- if (cons_string->IsOneByteRepresentation()) {
- uint8_t* buffer = new uint8_t[length];
- String::WriteToFlat(cons_string, buffer, 0, length);
- AddCharacters(buffer, length);
- delete[] buffer;
- } else {
- uint16_t* buffer = new uint16_t[length];
- String::WriteToFlat(cons_string, buffer, 0, length);
- AddCharacters(buffer, length);
- delete[] buffer;
- }
-}
-
Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context,
Handle<Map> initial_map) {
// Replace all of the cached initial array maps in the native context with
@@ -4690,7 +4563,7 @@ int Script::GetEvalPosition(Isolate* isolate, Handle<Script> script) {
Handle<SharedFunctionInfo> shared =
handle(script->eval_from_shared(), isolate);
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared);
- position = shared->abstract_code()->SourcePosition(-position);
+ position = shared->abstract_code().SourcePosition(-position);
}
DCHECK_GE(position, 0);
script->set_eval_from_position(position);
@@ -4700,22 +4573,22 @@ int Script::GetEvalPosition(Isolate* isolate, Handle<Script> script) {
void Script::InitLineEnds(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
- if (!script->line_ends()->IsUndefined(isolate)) return;
+ if (!script->line_ends().IsUndefined(isolate)) return;
DCHECK(script->type() != Script::TYPE_WASM ||
- script->source_mapping_url()->IsString());
+ script->source_mapping_url().IsString());
Object src_obj = script->source();
- if (!src_obj->IsString()) {
- DCHECK(src_obj->IsUndefined(isolate));
+ if (!src_obj.IsString()) {
+ DCHECK(src_obj.IsUndefined(isolate));
script->set_line_ends(ReadOnlyRoots(isolate).empty_fixed_array());
} else {
- DCHECK(src_obj->IsString());
+ DCHECK(src_obj.IsString());
Handle<String> src(String::cast(src_obj), isolate);
Handle<FixedArray> array = String::CalculateLineEnds(isolate, src, true);
script->set_line_ends(*array);
}
- DCHECK(script->line_ends()->IsFixedArray());
+ DCHECK(script->line_ends().IsFixedArray());
}
bool Script::GetPositionInfo(Handle<Script> script, int position,
@@ -4733,7 +4606,7 @@ bool Script::ContainsAsmModule() {
SharedFunctionInfo::ScriptIterator iter(this->GetIsolate(), *this);
for (SharedFunctionInfo info = iter.Next(); !info.is_null();
info = iter.Next()) {
- if (info->HasAsmWasmData()) return true;
+ if (info.HasAsmWasmData()) return true;
}
return false;
}
@@ -4741,15 +4614,15 @@ bool Script::ContainsAsmModule() {
namespace {
bool GetPositionInfoSlow(const Script script, int position,
Script::PositionInfo* info) {
- if (!script->source()->IsString()) return false;
+ if (!script.source().IsString()) return false;
if (position < 0) position = 0;
- String source_string = String::cast(script->source());
+ String source_string = String::cast(script.source());
int line = 0;
int line_start = 0;
- int len = source_string->length();
+ int len = source_string.length();
for (int pos = 0; pos <= len; ++pos) {
- if (pos == len || source_string->Get(pos) == '\n') {
+ if (pos == len || source_string.Get(pos) == '\n') {
if (position <= pos) {
info->line = line;
info->column = position - line_start;
@@ -4775,29 +4648,29 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
if (type() == Script::TYPE_WASM) {
DCHECK_LE(0, position);
return WasmModuleObject::cast(wasm_module_object())
- ->GetPositionInfo(static_cast<uint32_t>(position), info);
+ .GetPositionInfo(static_cast<uint32_t>(position), info);
}
- if (line_ends()->IsUndefined()) {
+ if (line_ends().IsUndefined()) {
// Slow mode: we do not have line_ends. We have to iterate through source.
if (!GetPositionInfoSlow(*this, position, info)) return false;
} else {
- DCHECK(line_ends()->IsFixedArray());
+ DCHECK(line_ends().IsFixedArray());
FixedArray ends = FixedArray::cast(line_ends());
- const int ends_len = ends->length();
+ const int ends_len = ends.length();
if (ends_len == 0) return false;
// Return early on invalid positions. Negative positions behave as if 0 was
// passed, and positions beyond the end of the script return as failure.
if (position < 0) {
position = 0;
- } else if (position > SMI_VALUE(ends->get(ends_len - 1))) {
+ } else if (position > SMI_VALUE(ends.get(ends_len - 1))) {
return false;
}
// Determine line number by doing a binary search on the line ends array.
- if (SMI_VALUE(ends->get(0)) >= position) {
+ if (SMI_VALUE(ends.get(0)) >= position) {
info->line = 0;
info->line_start = 0;
info->column = position;
@@ -4808,28 +4681,28 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
while (right > 0) {
DCHECK_LE(left, right);
const int mid = (left + right) / 2;
- if (position > SMI_VALUE(ends->get(mid))) {
+ if (position > SMI_VALUE(ends.get(mid))) {
left = mid + 1;
- } else if (position <= SMI_VALUE(ends->get(mid - 1))) {
+ } else if (position <= SMI_VALUE(ends.get(mid - 1))) {
right = mid - 1;
} else {
info->line = mid;
break;
}
}
- DCHECK(SMI_VALUE(ends->get(info->line)) >= position &&
- SMI_VALUE(ends->get(info->line - 1)) < position);
- info->line_start = SMI_VALUE(ends->get(info->line - 1)) + 1;
+ DCHECK(SMI_VALUE(ends.get(info->line)) >= position &&
+ SMI_VALUE(ends.get(info->line - 1)) < position);
+ info->line_start = SMI_VALUE(ends.get(info->line - 1)) + 1;
info->column = position - info->line_start;
}
// Line end is position of the linebreak character.
- info->line_end = SMI_VALUE(ends->get(info->line));
+ info->line_end = SMI_VALUE(ends.get(info->line));
if (info->line_end > 0) {
- DCHECK(source()->IsString());
+ DCHECK(source().IsString());
String src = String::cast(source());
- if (src->length() >= info->line_end &&
- src->Get(info->line_end - 1) == '\r') {
+ if (src.length() >= info->line_end &&
+ src.Get(info->line_end - 1) == '\r') {
info->line_end--;
}
}
@@ -4873,7 +4746,7 @@ int Script::GetLineNumber(int code_pos) const {
Object Script::GetNameOrSourceURL() {
// Keep in sync with ScriptNameOrSourceURL in messages.js.
- if (!source_url()->IsUndefined()) return source_url();
+ if (!source_url().IsUndefined()) return source_url();
return name();
}
@@ -4884,11 +4757,11 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
// renumbering done by AstFunctionLiteralIdReindexer; in particular, that
// AstTraversalVisitor doesn't recurse properly in the construct which
// triggers the mismatch.
- CHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
- MaybeObject shared = shared_function_infos()->Get(fun->function_literal_id());
+ CHECK_LT(fun->function_literal_id(), shared_function_infos().length());
+ MaybeObject shared = shared_function_infos().Get(fun->function_literal_id());
HeapObject heap_object;
if (!shared->GetHeapObject(&heap_object) ||
- heap_object->IsUndefined(isolate)) {
+ heap_object.IsUndefined(isolate)) {
return MaybeHandle<SharedFunctionInfo>();
}
return handle(SharedFunctionInfo::cast(heap_object), isolate);
@@ -4896,17 +4769,17 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
std::unique_ptr<v8::tracing::TracedValue> Script::ToTracedValue() {
auto value = v8::tracing::TracedValue::Create();
- if (name()->IsString()) {
- value->SetString("name", String::cast(name())->ToCString());
+ if (name().IsString()) {
+ value->SetString("name", String::cast(name()).ToCString());
}
value->SetInteger("lineOffset", line_offset());
value->SetInteger("columnOffset", column_offset());
- if (source_mapping_url()->IsString()) {
+ if (source_mapping_url().IsString()) {
value->SetString("sourceMappingURL",
- String::cast(source_mapping_url())->ToCString());
+ String::cast(source_mapping_url()).ToCString());
}
- if (source()->IsString()) {
- value->SetString("source", String::cast(source())->ToCString());
+ if (source().IsString()) {
+ value->SetString("source", String::cast(source()).ToCString());
}
return value;
}
@@ -4941,17 +4814,18 @@ uint32_t SharedFunctionInfo::Hash() {
// don't use the function's literal id since getting that is slow for compiled
// funcitons.
int start_pos = StartPosition();
- int script_id = script()->IsScript() ? Script::cast(script())->id() : 0;
+ int script_id = script().IsScript() ? Script::cast(script()).id() : 0;
return static_cast<uint32_t>(base::hash_combine(start_pos, script_id));
}
-std::unique_ptr<v8::tracing::TracedValue> SharedFunctionInfo::ToTracedValue() {
+std::unique_ptr<v8::tracing::TracedValue> SharedFunctionInfo::ToTracedValue(
+ FunctionLiteral* literal) {
auto value = v8::tracing::TracedValue::Create();
if (HasSharedName()) {
- value->SetString("name", Name()->ToCString());
+ value->SetString("name", Name().ToCString());
}
if (HasInferredName()) {
- value->SetString("inferredName", inferred_name()->ToCString());
+ value->SetString("inferredName", inferred_name().ToCString());
}
if (is_toplevel()) {
value->SetBoolean("isToplevel", true);
@@ -4959,12 +4833,16 @@ std::unique_ptr<v8::tracing::TracedValue> SharedFunctionInfo::ToTracedValue() {
value->SetInteger("formalParameterCount", internal_formal_parameter_count());
value->SetString("languageMode", LanguageMode2String(language_mode()));
value->SetString("kind", FunctionKind2String(kind()));
- if (script()->IsScript()) {
- value->SetValue("script", Script::cast(script())->TraceIDRef());
+ if (script().IsScript()) {
+ value->SetValue("script", Script::cast(script()).TraceIDRef());
value->BeginDictionary("sourcePosition");
Script::PositionInfo info;
- if (Script::cast(script())->GetPositionInfo(StartPosition(), &info,
- Script::WITH_OFFSET)) {
+ // We get the start position from the {literal} here, because the
+ // SharedFunctionInfo itself might not have a way to get to the
+ // start position early on (currently that's the case when it's
+ // marked for eager compilation).
+ if (Script::cast(script()).GetPositionInfo(literal->start_position(), &info,
+ Script::WITH_OFFSET)) {
value->SetInteger("line", info.line + 1);
value->SetInteger("column", info.column + 1);
}
@@ -4983,12 +4861,12 @@ uint64_t SharedFunctionInfo::TraceID() const {
// can add significant overhead, and we should probably find a better way
// to uniquely identify SharedFunctionInfos over time.
Script script = Script::cast(this->script());
- WeakFixedArray script_functions = script->shared_function_infos();
- for (int i = 0; i < script_functions->length(); ++i) {
+ WeakFixedArray script_functions = script.shared_function_infos();
+ for (int i = 0; i < script_functions.length(); ++i) {
HeapObject script_function;
- if (script_functions->Get(i).GetHeapObjectIfWeak(&script_function) &&
- script_function->address() == address()) {
- return (static_cast<uint64_t>(script->id() + 1) << 32) |
+ if (script_functions.Get(i).GetHeapObjectIfWeak(&script_function) &&
+ script_function.address() == address()) {
+ return (static_cast<uint64_t>(script.id() + 1) << 32) |
(static_cast<uint64_t>(i));
}
}
@@ -5013,35 +4891,39 @@ Code SharedFunctionInfo::GetCode() const {
Isolate* isolate = GetIsolate();
Object data = function_data();
- if (data->IsSmi()) {
+ if (data.IsSmi()) {
// Holding a Smi means we are a builtin.
DCHECK(HasBuiltinId());
return isolate->builtins()->builtin(builtin_id());
- } else if (data->IsBytecodeArray()) {
+ } else if (data.IsBytecodeArray()) {
// Having a bytecode array means we are a compiled, interpreted function.
DCHECK(HasBytecodeArray());
return isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
- } else if (data->IsAsmWasmData()) {
+ } else if (data.IsAsmWasmData()) {
// Having AsmWasmData means we are an asm.js/wasm function.
DCHECK(HasAsmWasmData());
return isolate->builtins()->builtin(Builtins::kInstantiateAsmJs);
- } else if (data->IsUncompiledData()) {
+ } else if (data.IsUncompiledData()) {
// Having uncompiled data (with or without scope) means we need to compile.
DCHECK(HasUncompiledData());
return isolate->builtins()->builtin(Builtins::kCompileLazy);
- } else if (data->IsFunctionTemplateInfo()) {
+ } else if (data.IsFunctionTemplateInfo()) {
// Having a function template info means we are an API function.
DCHECK(IsApiFunction());
return isolate->builtins()->builtin(Builtins::kHandleApiCall);
- } else if (data->IsWasmExportedFunctionData()) {
+ } else if (data.IsWasmExportedFunctionData()) {
// Having a WasmExportedFunctionData means the code is in there.
DCHECK(HasWasmExportedFunctionData());
- return wasm_exported_function_data()->wrapper_code();
- } else if (data->IsInterpreterData()) {
+ return wasm_exported_function_data().wrapper_code();
+ } else if (data.IsInterpreterData()) {
Code code = InterpreterTrampoline();
- DCHECK(code->IsCode());
- DCHECK(code->is_interpreter_trampoline_builtin());
+ DCHECK(code.IsCode());
+ DCHECK(code.is_interpreter_trampoline_builtin());
return code;
+ } else if (data.IsWasmJSFunctionData()) {
+ return wasm_js_function_data().wrapper_code();
+ } else if (data.IsWasmCapiFunctionData()) {
+ return wasm_capi_function_data().wrapper_code();
}
UNREACHABLE();
}
@@ -5052,10 +4934,20 @@ WasmExportedFunctionData SharedFunctionInfo::wasm_exported_function_data()
return WasmExportedFunctionData::cast(function_data());
}
+WasmJSFunctionData SharedFunctionInfo::wasm_js_function_data() const {
+ DCHECK(HasWasmJSFunctionData());
+ return WasmJSFunctionData::cast(function_data());
+}
+
+WasmCapiFunctionData SharedFunctionInfo::wasm_capi_function_data() const {
+ DCHECK(HasWasmCapiFunctionData());
+ return WasmCapiFunctionData::cast(function_data());
+}
+
SharedFunctionInfo::ScriptIterator::ScriptIterator(Isolate* isolate,
Script script)
- : ScriptIterator(isolate,
- handle(script->shared_function_infos(), isolate)) {}
+ : ScriptIterator(isolate, handle(script.shared_function_infos(), isolate)) {
+}
SharedFunctionInfo::ScriptIterator::ScriptIterator(
Isolate* isolate, Handle<WeakFixedArray> shared_function_infos)
@@ -5068,7 +4960,7 @@ SharedFunctionInfo SharedFunctionInfo::ScriptIterator::Next() {
MaybeObject raw = shared_function_infos_->Get(index_++);
HeapObject heap_object;
if (!raw->GetHeapObject(&heap_object) ||
- heap_object->IsUndefined(isolate_)) {
+ heap_object.IsUndefined(isolate_)) {
continue;
}
return SharedFunctionInfo::cast(heap_object);
@@ -5077,7 +4969,7 @@ SharedFunctionInfo SharedFunctionInfo::ScriptIterator::Next() {
}
void SharedFunctionInfo::ScriptIterator::Reset(Script script) {
- shared_function_infos_ = handle(script->shared_function_infos(), isolate_);
+ shared_function_infos_ = handle(script.shared_function_infos(), isolate_);
index_ = 0;
}
@@ -5115,7 +5007,7 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
// This is okay because the gc-time processing of these lists can tolerate
// duplicates.
if (script_object->IsScript()) {
- DCHECK(!shared->script()->IsScript());
+ DCHECK(!shared->script().IsScript());
Handle<Script> script = Handle<Script>::cast(script_object);
Handle<WeakFixedArray> list =
handle(script->shared_function_infos(), isolate);
@@ -5132,9 +5024,9 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
// Remove shared function info from root array.
WeakArrayList noscript_list =
isolate->heap()->noscript_shared_function_infos();
- CHECK(noscript_list->RemoveOne(MaybeObjectHandle::Weak(shared)));
+ CHECK(noscript_list.RemoveOne(MaybeObjectHandle::Weak(shared)));
} else {
- DCHECK(shared->script()->IsScript());
+ DCHECK(shared->script().IsScript());
Handle<WeakArrayList> list =
isolate->factory()->noscript_shared_function_infos();
@@ -5158,13 +5050,13 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
// Due to liveedit, it might happen that the old_script doesn't know
// about the SharedFunctionInfo, so we have to guard against that.
- Handle<WeakFixedArray> infos(old_script->shared_function_infos(), isolate);
+ Handle<WeakFixedArray> infos(old_script.shared_function_infos(), isolate);
if (function_literal_id < infos->length()) {
MaybeObject raw =
- old_script->shared_function_infos()->Get(function_literal_id);
+ old_script.shared_function_infos().Get(function_literal_id);
HeapObject heap_object;
if (raw->GetHeapObjectIfWeak(&heap_object) && heap_object == *shared) {
- old_script->shared_function_infos()->Set(
+ old_script.shared_function_infos().Set(
function_literal_id, HeapObjectReference::Strong(
ReadOnlyRoots(isolate).undefined_value()));
}
@@ -5178,46 +5070,46 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
bool SharedFunctionInfo::HasBreakInfo() const {
if (!HasDebugInfo()) return false;
DebugInfo info = GetDebugInfo();
- bool has_break_info = info->HasBreakInfo();
+ bool has_break_info = info.HasBreakInfo();
return has_break_info;
}
bool SharedFunctionInfo::BreakAtEntry() const {
if (!HasDebugInfo()) return false;
DebugInfo info = GetDebugInfo();
- bool break_at_entry = info->BreakAtEntry();
+ bool break_at_entry = info.BreakAtEntry();
return break_at_entry;
}
bool SharedFunctionInfo::HasCoverageInfo() const {
if (!HasDebugInfo()) return false;
DebugInfo info = GetDebugInfo();
- bool has_coverage_info = info->HasCoverageInfo();
+ bool has_coverage_info = info.HasCoverageInfo();
return has_coverage_info;
}
CoverageInfo SharedFunctionInfo::GetCoverageInfo() const {
DCHECK(HasCoverageInfo());
- return CoverageInfo::cast(GetDebugInfo()->coverage_info());
+ return CoverageInfo::cast(GetDebugInfo().coverage_info());
}
String SharedFunctionInfo::DebugName() {
DisallowHeapAllocation no_gc;
String function_name = Name();
- if (function_name->length() > 0) return function_name;
+ if (function_name.length() > 0) return function_name;
return inferred_name();
}
bool SharedFunctionInfo::PassesFilter(const char* raw_filter) {
Vector<const char> filter = CStrVector(raw_filter);
- std::unique_ptr<char[]> cstrname(DebugName()->ToCString());
+ std::unique_ptr<char[]> cstrname(DebugName().ToCString());
return v8::internal::PassesFilter(CStrVector(cstrname.get()), filter);
}
bool SharedFunctionInfo::HasSourceCode() const {
Isolate* isolate = GetIsolate();
- return !script()->IsUndefined(isolate) &&
- !Script::cast(script())->source()->IsUndefined(isolate);
+ return !script().IsUndefined(isolate) &&
+ !Script::cast(script()).source().IsUndefined(isolate);
}
void SharedFunctionInfo::DiscardCompiledMetadata(
@@ -5227,8 +5119,8 @@ void SharedFunctionInfo::DiscardCompiledMetadata(
DisallowHeapAllocation no_gc;
if (is_compiled()) {
HeapObject outer_scope_info;
- if (scope_info()->HasOuterScopeInfo()) {
- outer_scope_info = scope_info()->OuterScopeInfo();
+ if (scope_info().HasOuterScopeInfo()) {
+ outer_scope_info = scope_info().OuterScopeInfo();
} else {
outer_scope_info = ReadOnlyRoots(isolate).the_hole_value();
}
@@ -5241,8 +5133,7 @@ void SharedFunctionInfo::DiscardCompiledMetadata(
RawField(SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset),
outer_scope_info);
} else {
- DCHECK(outer_scope_info()->IsScopeInfo() ||
- outer_scope_info()->IsTheHole());
+ DCHECK(outer_scope_info().IsScopeInfo() || outer_scope_info().IsTheHole());
}
// TODO(rmcilroy): Possibly discard ScopeInfo here as well.
@@ -5283,7 +5174,7 @@ Handle<Object> SharedFunctionInfo::GetSourceCode(
Handle<SharedFunctionInfo> shared) {
Isolate* isolate = shared->GetIsolate();
if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
- Handle<String> source(String::cast(Script::cast(shared->script())->source()),
+ Handle<String> source(String::cast(Script::cast(shared->script()).source()),
isolate);
return isolate->factory()->NewSubString(source, shared->StartPosition(),
shared->EndPosition());
@@ -5295,7 +5186,7 @@ Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
Isolate* isolate = shared->GetIsolate();
if (!shared->HasSourceCode()) return isolate->factory()->undefined_value();
Handle<String> script_source(
- String::cast(Script::cast(shared->script())->source()), isolate);
+ String::cast(Script::cast(shared->script()).source()), isolate);
int start_pos = shared->function_token_position();
DCHECK_NE(start_pos, kNoSourcePosition);
Handle<String> source = isolate->factory()->NewSubString(
@@ -5307,7 +5198,7 @@ Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony(
builder.AppendCString("function ");
builder.AppendString(Handle<String>(shared->Name(), isolate));
builder.AppendCString("(");
- Handle<FixedArray> args(Script::cast(shared->script())->wrapped_arguments(),
+ Handle<FixedArray> args(Script::cast(shared->script()).wrapped_arguments(),
isolate);
int argc = args->length();
for (int i = 0; i < argc; i++) {
@@ -5330,7 +5221,7 @@ void TraceInlining(SharedFunctionInfo shared, const char* msg) {
} // namespace
bool SharedFunctionInfo::IsInlineable() {
- if (!script()->IsScript()) {
+ if (!script().IsScript()) {
TraceInlining(*this, "false (no Script associated with it)");
return false;
}
@@ -5338,7 +5229,7 @@ bool SharedFunctionInfo::IsInlineable() {
if (GetIsolate()->is_precise_binary_code_coverage() &&
!has_reported_binary_coverage()) {
// We may miss invocations if this function is inlined.
- TraceInlining(*this, "false (requires precise binary coverage)");
+ TraceInlining(*this, "false (requires reported binary coverage)");
return false;
}
@@ -5366,7 +5257,7 @@ bool SharedFunctionInfo::IsInlineable() {
return false;
}
- if (GetBytecodeArray()->length() > FLAG_max_inlined_bytecode_size) {
+ if (GetBytecodeArray().length() > FLAG_max_inlined_bytecode_size) {
TraceInlining(*this, "false (length > FLAG_max_inlined_bytecode_size)");
return false;
}
@@ -5386,10 +5277,10 @@ int SharedFunctionInfo::FindIndexInScript(Isolate* isolate) const {
DisallowHeapAllocation no_gc;
Object script_obj = script();
- if (!script_obj->IsScript()) return kFunctionLiteralIdInvalid;
+ if (!script_obj.IsScript()) return kFunctionLiteralIdInvalid;
WeakFixedArray shared_info_list =
- Script::cast(script_obj)->shared_function_infos();
+ Script::cast(script_obj).shared_function_infos();
SharedFunctionInfo::ScriptIterator iterator(
isolate,
Handle<WeakFixedArray>(reinterpret_cast<Address*>(&shared_info_list)));
@@ -5404,48 +5295,46 @@ int SharedFunctionInfo::FindIndexInScript(Isolate* isolate) const {
return kFunctionLiteralIdInvalid;
}
-
// Output the source code without any allocation in the heap.
std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
const SharedFunctionInfo s = v.value;
// For some native functions there is no source.
- if (!s->HasSourceCode()) return os << "<No Source>";
+ if (!s.HasSourceCode()) return os << "<No Source>";
// Get the source for the script which this function came from.
// Don't use String::cast because we don't want more assertion errors while
// we are already creating a stack dump.
String script_source =
- String::unchecked_cast(Script::cast(s->script())->source());
+ String::unchecked_cast(Script::cast(s.script()).source());
- if (!script_source->LooksValid()) return os << "<Invalid Source>";
+ if (!script_source.LooksValid()) return os << "<Invalid Source>";
- if (!s->is_toplevel()) {
+ if (!s.is_toplevel()) {
os << "function ";
- String name = s->Name();
- if (name->length() > 0) {
- name->PrintUC16(os);
+ String name = s.Name();
+ if (name.length() > 0) {
+ name.PrintUC16(os);
}
}
- int len = s->EndPosition() - s->StartPosition();
+ int len = s.EndPosition() - s.StartPosition();
if (len <= v.max_length || v.max_length < 0) {
- script_source->PrintUC16(os, s->StartPosition(), s->EndPosition());
+ script_source.PrintUC16(os, s.StartPosition(), s.EndPosition());
return os;
} else {
- script_source->PrintUC16(os, s->StartPosition(),
- s->StartPosition() + v.max_length);
+ script_source.PrintUC16(os, s.StartPosition(),
+ s.StartPosition() + v.max_length);
return os << "...\n";
}
}
-
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
set_flags(DisabledOptimizationReasonBits::update(flags(), reason));
// Code should be the lazy compilation stub or else interpreted.
- DCHECK(abstract_code()->kind() == AbstractCode::INTERPRETED_FUNCTION ||
- abstract_code()->kind() == AbstractCode::BUILTIN);
+ DCHECK(abstract_code().kind() == AbstractCode::INTERPRETED_FUNCTION ||
+ abstract_code().kind() == AbstractCode::BUILTIN);
PROFILE(GetIsolate(), CodeDisableOptEvent(abstract_code(), *this));
if (FLAG_trace_opt) {
PrintF("[disabled optimization for ");
@@ -5465,9 +5354,9 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_internal_formal_parameter_count(lit->parameter_count());
shared_info->SetFunctionTokenPosition(lit->function_token_position(),
lit->start_position());
- if (shared_info->scope_info()->HasPositionInfo()) {
- shared_info->scope_info()->SetPositionInfo(lit->start_position(),
- lit->end_position());
+ if (shared_info->scope_info().HasPositionInfo()) {
+ shared_info->scope_info().SetPositionInfo(lit->start_position(),
+ lit->end_position());
needs_position_info = false;
}
shared_info->set_is_declaration(lit->is_declaration());
@@ -5486,7 +5375,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
lit->requires_instance_members_initializer());
shared_info->set_is_toplevel(is_toplevel);
- DCHECK(shared_info->outer_scope_info()->IsTheHole());
+ DCHECK(shared_info->outer_scope_info().IsTheHole());
if (!is_toplevel) {
Scope* outer_scope = lit->scope()->GetOuterScopeWithContext();
if (outer_scope) {
@@ -5589,14 +5478,14 @@ void SharedFunctionInfo::SetFunctionTokenPosition(int function_token_position,
int SharedFunctionInfo::StartPosition() const {
Object maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
+ if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
- if (info->HasPositionInfo()) {
- return info->StartPosition();
+ if (info.HasPositionInfo()) {
+ return info.StartPosition();
}
} else if (HasUncompiledData()) {
// Works with or without scope.
- return uncompiled_data()->start_position();
+ return uncompiled_data().start_position();
} else if (IsApiFunction() || HasBuiltinId()) {
DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
return 0;
@@ -5606,14 +5495,14 @@ int SharedFunctionInfo::StartPosition() const {
int SharedFunctionInfo::EndPosition() const {
Object maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
+ if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
- if (info->HasPositionInfo()) {
- return info->EndPosition();
+ if (info.HasPositionInfo()) {
+ return info.EndPosition();
}
} else if (HasUncompiledData()) {
// Works with or without scope.
- return uncompiled_data()->end_position();
+ return uncompiled_data().end_position();
} else if (IsApiFunction() || HasBuiltinId()) {
DCHECK_IMPLIES(HasBuiltinId(), builtin_id() != Builtins::kCompileLazy);
return 0;
@@ -5624,8 +5513,8 @@ int SharedFunctionInfo::EndPosition() const {
int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
// Fast path for the common case when the SFI is uncompiled and so the
// function literal id is already in the uncompiled data.
- if (HasUncompiledData() && uncompiled_data()->has_function_literal_id()) {
- int id = uncompiled_data()->function_literal_id();
+ if (HasUncompiledData() && uncompiled_data().has_function_literal_id()) {
+ int id = uncompiled_data().function_literal_id();
// Make sure the id is what we should have found with the slow path.
DCHECK_EQ(id, FindIndexInScript(isolate));
return id;
@@ -5638,10 +5527,10 @@ int SharedFunctionInfo::FunctionLiteralId(Isolate* isolate) const {
void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
Object maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
+ if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo info = ScopeInfo::cast(maybe_scope_info);
- if (info->HasPositionInfo()) {
- info->SetPositionInfo(start_position, end_position);
+ if (info.HasPositionInfo()) {
+ info.SetPositionInfo(start_position, end_position);
}
} else if (HasUncompiledData()) {
if (HasUncompiledDataWithPreparseData()) {
@@ -5649,27 +5538,34 @@ void SharedFunctionInfo::SetPosition(int start_position, int end_position) {
// any scope data.
ClearPreparseData();
}
- uncompiled_data()->set_start_position(start_position);
- uncompiled_data()->set_end_position(end_position);
+ uncompiled_data().set_start_position(start_position);
+ uncompiled_data().set_end_position(end_position);
} else {
UNREACHABLE();
}
}
+bool SharedFunctionInfo::AreSourcePositionsAvailable() const {
+ if (FLAG_enable_lazy_source_positions) {
+ return !HasBytecodeArray() || GetBytecodeArray().HasSourcePositionTable();
+ }
+ return true;
+}
+
// static
void SharedFunctionInfo::EnsureSourcePositionsAvailable(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info) {
if (FLAG_enable_lazy_source_positions && shared_info->HasBytecodeArray() &&
- !shared_info->GetBytecodeArray()->HasSourcePositionTable()) {
+ !shared_info->GetBytecodeArray().HasSourcePositionTable()) {
Compiler::CollectSourcePositions(isolate, shared_info);
}
}
bool BytecodeArray::IsBytecodeEqual(const BytecodeArray other) const {
- if (length() != other->length()) return false;
+ if (length() != other.length()) return false;
for (int i = 0; i < length(); ++i) {
- if (get(i) != other->get(i)) return false;
+ if (get(i) != other.get(i)) return false;
}
return true;
@@ -5762,23 +5658,17 @@ Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
return Just(true);
}
-
-
-
-
bool JSArray::SetLengthWouldNormalize(uint32_t new_length) {
if (!HasFastElements()) return false;
- uint32_t capacity = static_cast<uint32_t>(elements()->length());
+ uint32_t capacity = static_cast<uint32_t>(elements().length());
uint32_t new_capacity;
return JSArray::SetLengthWouldNormalize(GetHeap(), new_length) &&
ShouldConvertToSlowElements(*this, capacity, new_length - 1,
&new_capacity);
}
-
const double AllocationSite::kPretenureRatio = 0.85;
-
void AllocationSite::ResetPretenureDecision() {
set_pretenure_decision(kUndecided);
set_memento_found_count(0);
@@ -5793,18 +5683,17 @@ AllocationType AllocationSite::GetAllocationType() const {
bool AllocationSite::IsNested() {
DCHECK(FLAG_trace_track_allocation_sites);
- Object current = boilerplate()->GetHeap()->allocation_sites_list();
- while (current->IsAllocationSite()) {
+ Object current = boilerplate().GetHeap()->allocation_sites_list();
+ while (current.IsAllocationSite()) {
AllocationSite current_site = AllocationSite::cast(current);
- if (current_site->nested_site() == *this) {
+ if (current_site.nested_site() == *this) {
return true;
}
- current = current_site->weak_next();
+ current = current_site.weak_next();
}
return false;
}
-
bool AllocationSite::ShouldTrack(ElementsKind from, ElementsKind to) {
return IsSmiElementsKind(from) &&
IsMoreGeneralElementsKindTransition(from, to);
@@ -5812,26 +5701,30 @@ bool AllocationSite::ShouldTrack(ElementsKind from, ElementsKind to) {
const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
switch (decision) {
- case kUndecided: return "undecided";
- case kDontTenure: return "don't tenure";
- case kMaybeTenure: return "maybe tenure";
- case kTenure: return "tenure";
- case kZombie: return "zombie";
- default: UNREACHABLE();
+ case kUndecided:
+ return "undecided";
+ case kDontTenure:
+ return "don't tenure";
+ case kMaybeTenure:
+ return "maybe tenure";
+ case kTenure:
+ return "tenure";
+ case kZombie:
+ return "zombie";
+ default:
+ UNREACHABLE();
}
return nullptr;
}
-
-
bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
Map map = array->map();
// Fast path: "length" is the first fast property of arrays. Since it's not
// configurable, it's guaranteed to be the first in the descriptor array.
- if (!map->is_dictionary_map()) {
- DCHECK(map->instance_descriptors()->GetKey(0) ==
+ if (!map.is_dictionary_map()) {
+ DCHECK(map.instance_descriptors().GetKey(0) ==
array->GetReadOnlyRoots().length_string());
- return map->instance_descriptors()->GetDetails(0).IsReadOnly();
+ return map.instance_descriptors().GetDetails(0).IsReadOnly();
}
Isolate* isolate = array->GetIsolate();
@@ -5841,17 +5734,13 @@ bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
return it.IsReadOnly();
}
-
-bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
- uint32_t index) {
+bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index) {
uint32_t length = 0;
- CHECK(array->length()->ToArrayLength(&length));
+ CHECK(array->length().ToArrayLength(&length));
if (length <= index) return HasReadOnlyLength(array);
return false;
}
-
-
// Certain compilers request function template instantiation when they
// see the definition of the other template functions in the
// class. This requires us to have the template functions put
@@ -5863,18 +5752,18 @@ void Dictionary<Derived, Shape>::Print(std::ostream& os) {
DisallowHeapAllocation no_gc;
ReadOnlyRoots roots = this->GetReadOnlyRoots();
Derived dictionary = Derived::cast(*this);
- int capacity = dictionary->Capacity();
+ int capacity = dictionary.Capacity();
for (int i = 0; i < capacity; i++) {
- Object k = dictionary->KeyAt(i);
- if (!dictionary->ToKey(roots, i, &k)) continue;
+ Object k = dictionary.KeyAt(i);
+ if (!dictionary.ToKey(roots, i, &k)) continue;
os << "\n ";
- if (k->IsString()) {
- String::cast(k)->StringPrint(os);
+ if (k.IsString()) {
+ String::cast(k).StringPrint(os);
} else {
os << Brief(k);
}
- os << ": " << Brief(dictionary->ValueAt(i)) << " ";
- dictionary->DetailsAt(i).PrintAsSlowTo(os);
+ os << ": " << Brief(dictionary.ValueAt(i)) << " ";
+ dictionary.DetailsAt(i).PrintAsSlowTo(os);
}
}
template <typename Derived, typename Shape>
@@ -5885,8 +5774,6 @@ void Dictionary<Derived, Shape>::Print() {
}
#endif
-
-
int FixedArrayBase::GetMaxLengthForNewSpaceAllocation(ElementsKind kind) {
return ((kMaxRegularHeapObjectSize - FixedArrayBase::kHeaderSize) >>
ElementsKindToShiftSize(kind));
@@ -5896,7 +5783,6 @@ bool FixedArrayBase::IsCowArray() const {
return map() == GetReadOnlyRoots().fixed_cow_array_map();
}
-
const char* Symbol::PrivateSymbolToName() const {
ReadOnlyRoots roots = GetReadOnlyRoots();
#define SYMBOL_CHECK_AND_PRINT(_, name) \
@@ -5906,14 +5792,13 @@ const char* Symbol::PrivateSymbolToName() const {
return "UNKNOWN";
}
-
void Symbol::SymbolShortPrint(std::ostream& os) {
os << "<Symbol:";
- if (!name()->IsUndefined()) {
+ if (!name().IsUndefined()) {
os << " ";
HeapStringAllocator allocator;
StringStream accumulator(&allocator);
- String::cast(name())->StringShortPrint(&accumulator, false);
+ String::cast(name()).StringShortPrint(&accumulator, false);
os << accumulator.ToCString().get();
} else {
os << " (" << PrivateSymbolToName() << ")";
@@ -5921,7 +5806,6 @@ void Symbol::SymbolShortPrint(std::ostream& os) {
os << ">";
}
-
// StringSharedKeys are used as keys in the eval cache.
class StringSharedKey : public HashTableKey {
public:
@@ -5946,22 +5830,22 @@ class StringSharedKey : public HashTableKey {
bool IsMatch(Object other) override {
DisallowHeapAllocation no_allocation;
- if (!other->IsFixedArray()) {
- DCHECK(other->IsNumber());
- uint32_t other_hash = static_cast<uint32_t>(other->Number());
+ if (!other.IsFixedArray()) {
+ DCHECK(other.IsNumber());
+ uint32_t other_hash = static_cast<uint32_t>(other.Number());
return Hash() == other_hash;
}
FixedArray other_array = FixedArray::cast(other);
- SharedFunctionInfo shared = SharedFunctionInfo::cast(other_array->get(0));
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(other_array.get(0));
if (shared != *shared_) return false;
- int language_unchecked = Smi::ToInt(other_array->get(2));
+ int language_unchecked = Smi::ToInt(other_array.get(2));
DCHECK(is_valid_language_mode(language_unchecked));
LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
if (language_mode != language_mode_) return false;
- int position = Smi::ToInt(other_array->get(3));
+ int position = Smi::ToInt(other_array.get(3));
if (position != position_) return false;
- String source = String::cast(other_array->get(1));
- return source->Equals(*source_);
+ String source = String::cast(other_array.get(1));
+ return source.Equals(*source_);
}
Handle<Object> AsHandle(Isolate* isolate) {
@@ -6159,9 +6043,9 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
DisallowHeapAllocation no_gc;
Object current = *reactions;
Object reversed = Smi::kZero;
- while (!current->IsSmi()) {
- Object next = PromiseReaction::cast(current)->next();
- PromiseReaction::cast(current)->set_next(reversed);
+ while (!current.IsSmi()) {
+ Object next = PromiseReaction::cast(current).next();
+ PromiseReaction::cast(current).set_next(reversed);
reversed = current;
current = next;
}
@@ -6199,8 +6083,10 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
}
if (handler_context.is_null()) handler_context = isolate->native_context();
- STATIC_ASSERT(static_cast<int>(PromiseReaction::kSize) ==
- static_cast<int>(PromiseReactionJobTask::kSize));
+ STATIC_ASSERT(
+ static_cast<int>(PromiseReaction::kSize) ==
+ static_cast<int>(
+ PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks));
if (type == PromiseReaction::kFulfill) {
task->synchronized_set_map(
ReadOnlyRoots(isolate).promise_fulfill_reaction_job_task_map());
@@ -6292,7 +6178,7 @@ JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
DisallowHeapAllocation no_gc;
SeqOneByteString seq_flags = SeqOneByteString::cast(*flags);
for (int i = 0; i < length; i++) {
- JSRegExp::Flag flag = CharToFlag(seq_flags.SeqOneByteStringGet(i));
+ JSRegExp::Flag flag = CharToFlag(seq_flags.Get(i));
// Duplicate or invalid flag.
if (value & flag) return JSRegExp::Flags(0);
value |= flag;
@@ -6316,7 +6202,6 @@ JSRegExp::Flags RegExpFlagsFromString(Isolate* isolate, Handle<String> flags,
} // namespace
-
// static
MaybeHandle<JSRegExp> JSRegExp::New(Isolate* isolate, Handle<String> pattern,
Flags flags) {
@@ -6327,7 +6212,6 @@ MaybeHandle<JSRegExp> JSRegExp::New(Isolate* isolate, Handle<String> pattern,
return JSRegExp::Initialize(regexp, pattern, flags);
}
-
// static
Handle<JSRegExp> JSRegExp::Copy(Handle<JSRegExp> regexp) {
Isolate* const isolate = regexp->GetIsolate();
@@ -6453,7 +6337,6 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
return Initialize(regexp, source, flags);
}
-
// static
MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
Handle<String> source, Flags flags) {
@@ -6476,9 +6359,9 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
regexp->set_flags(Smi::FromInt(flags));
Map map = regexp->map();
- Object constructor = map->GetConstructor();
- if (constructor->IsJSFunction() &&
- JSFunction::cast(constructor)->initial_map() == map) {
+ Object constructor = map.GetConstructor();
+ if (constructor.IsJSFunction() &&
+ JSFunction::cast(constructor).initial_map() == map) {
// If we still have the original map, set in-object properties directly.
regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, Smi::kZero,
SKIP_WRITE_BARRIER);
@@ -6494,7 +6377,6 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
return regexp;
}
-
// RegExpKey carries the source and flags of a regular expression as key.
class RegExpKey : public HashTableKey {
public:
@@ -6510,38 +6392,19 @@ class RegExpKey : public HashTableKey {
// a key to a key.
bool IsMatch(Object obj) override {
FixedArray val = FixedArray::cast(obj);
- return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
- && (flags_ == val->get(JSRegExp::kFlagsIndex));
+ return string_->Equals(String::cast(val.get(JSRegExp::kSourceIndex))) &&
+ (flags_ == val.get(JSRegExp::kFlagsIndex));
}
Handle<String> string_;
Smi flags_;
};
-Handle<String> OneByteStringKey::AsHandle(Isolate* isolate) {
- return isolate->factory()->NewOneByteInternalizedString(string_, HashField());
-}
-
-Handle<String> TwoByteStringKey::AsHandle(Isolate* isolate) {
- return isolate->factory()->NewTwoByteInternalizedString(string_, HashField());
-}
-
-Handle<String> SeqOneByteSubStringKey::AsHandle(Isolate* isolate) {
- return isolate->factory()->NewOneByteInternalizedSubString(
- string_, from_, length_, HashField());
-}
-
-bool SeqOneByteSubStringKey::IsMatch(Object string) {
- DisallowHeapAllocation no_gc;
- Vector<const uint8_t> chars(string_->GetChars(no_gc) + from_, length_);
- return String::cast(string)->IsOneByteEqualTo(chars);
-}
-
// InternalizedStringKey carries a string/internalized-string object as key.
-class InternalizedStringKey : public StringTableKey {
+class InternalizedStringKey final : public StringTableKey {
public:
explicit InternalizedStringKey(Handle<String> string)
- : StringTableKey(0), string_(string) {
+ : StringTableKey(0, string->length()), string_(string) {
DCHECK(!string->IsInternalizedString());
DCHECK(string->IsFlat());
// Make sure hash_field is computed.
@@ -6549,9 +6412,7 @@ class InternalizedStringKey : public StringTableKey {
set_hash_field(string->hash_field());
}
- bool IsMatch(Object string) override {
- return string_->SlowEquals(String::cast(string));
- }
+ bool IsMatch(String string) override { return string_->SlowEquals(string); }
Handle<String> AsHandle(Isolate* isolate) override {
// Internalize the string if possible.
@@ -6630,13 +6491,13 @@ Handle<Derived> HashTable<Derived, Shape>::NewInternal(
template <typename Derived, typename Shape>
void HashTable<Derived, Shape>::Rehash(ReadOnlyRoots roots, Derived new_table) {
DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
+ WriteBarrierMode mode = new_table.GetWriteBarrierMode(no_gc);
- DCHECK_LT(NumberOfElements(), new_table->Capacity());
+ DCHECK_LT(NumberOfElements(), new_table.Capacity());
// Copy prefix to new array.
for (int i = kPrefixStartIndex; i < kElementsStartIndex; i++) {
- new_table->set(i, get(i), mode);
+ new_table.set(i, get(i), mode);
}
// Rehash the elements.
@@ -6646,15 +6507,14 @@ void HashTable<Derived, Shape>::Rehash(ReadOnlyRoots roots, Derived new_table) {
Object k = this->get(from_index);
if (!Shape::IsLive(roots, k)) continue;
uint32_t hash = Shape::HashForObject(roots, k);
- uint32_t insertion_index =
- EntryToIndex(new_table->FindInsertionEntry(hash));
- new_table->set_key(insertion_index, get(from_index), mode);
+ uint32_t insertion_index = EntryToIndex(new_table.FindInsertionEntry(hash));
+ new_table.set_key(insertion_index, get(from_index), mode);
for (int j = 1; j < Shape::kEntrySize; j++) {
- new_table->set(insertion_index + j, get(from_index + j), mode);
+ new_table.set(insertion_index + j, get(from_index + j), mode);
}
}
- new_table->SetNumberOfElements(NumberOfElements());
- new_table->SetNumberOfDeletedElements(0);
+ new_table.SetNumberOfElements(NumberOfElements());
+ new_table.SetNumberOfDeletedElements(0);
}
template <typename Derived, typename Shape>
@@ -6817,77 +6677,6 @@ uint32_t HashTable<Derived, Shape>::FindInsertionEntry(uint32_t hash) {
return entry;
}
-// This class is used for looking up two character strings in the string table.
-// If we don't have a hit we don't want to waste much time so we unroll the
-// string hash calculation loop here for speed. Doesn't work if the two
-// characters form a decimal integer, since such strings have a different hash
-// algorithm.
-class TwoCharHashTableKey : public StringTableKey {
- public:
- TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint64_t seed)
- : StringTableKey(ComputeHashField(c1, c2, seed)), c1_(c1), c2_(c2) {}
-
- bool IsMatch(Object o) override {
- String other = String::cast(o);
- if (other->length() != 2) return false;
- if (other->Get(0) != c1_) return false;
- return other->Get(1) == c2_;
- }
-
- Handle<String> AsHandle(Isolate* isolate) override {
- // The TwoCharHashTableKey is only used for looking in the string
- // table, not for adding to it.
- UNREACHABLE();
- }
-
- private:
- uint32_t ComputeHashField(uint16_t c1, uint16_t c2, uint64_t seed) {
- // Char 1.
- uint32_t hash = static_cast<uint32_t>(seed);
- hash += c1;
- hash += hash << 10;
- hash ^= hash >> 6;
- // Char 2.
- hash += c2;
- hash += hash << 10;
- hash ^= hash >> 6;
- // GetHash.
- hash += hash << 3;
- hash ^= hash >> 11;
- hash += hash << 15;
- if ((hash & String::kHashBitMask) == 0) hash = StringHasher::kZeroHash;
- hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
-#ifdef DEBUG
- // If this assert fails then we failed to reproduce the two-character
- // version of the string hashing algorithm above. One reason could be
- // that we were passed two digits as characters, since the hash
- // algorithm is different in that case.
- uint16_t chars[2] = {c1, c2};
- uint32_t check_hash = StringHasher::HashSequentialString(chars, 2, seed);
- DCHECK_EQ(hash, check_hash);
-#endif
- return hash;
- }
-
- uint16_t c1_;
- uint16_t c2_;
-};
-
-MaybeHandle<String> StringTable::LookupTwoCharsStringIfExists(
- Isolate* isolate,
- uint16_t c1,
- uint16_t c2) {
- TwoCharHashTableKey key(c1, c2, HashSeed(isolate));
- Handle<StringTable> string_table = isolate->factory()->string_table();
- int entry = string_table->FindEntry(isolate, &key);
- if (entry == kNotFound) return MaybeHandle<String>();
-
- Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate);
- DCHECK(StringShape(*result).IsInternalized());
- DCHECK_EQ(result->Hash(), key.Hash());
- return result;
-}
-
void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
int expected) {
Handle<StringTable> table = isolate->factory()->string_table();
@@ -6896,66 +6685,6 @@ void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
isolate->heap()->SetRootStringTable(*table);
}
-namespace {
-
-template <class StringClass>
-void MigrateExternalStringResource(Isolate* isolate, String from, String to) {
- StringClass cast_from = StringClass::cast(from);
- StringClass cast_to = StringClass::cast(to);
- const typename StringClass::Resource* to_resource = cast_to->resource();
- if (to_resource == nullptr) {
- // |to| is a just-created internalized copy of |from|. Migrate the resource.
- cast_to->SetResource(isolate, cast_from->resource());
- // Zap |from|'s resource pointer to reflect the fact that |from| has
- // relinquished ownership of its resource.
- isolate->heap()->UpdateExternalString(
- from, ExternalString::cast(from)->ExternalPayloadSize(), 0);
- cast_from->SetResource(isolate, nullptr);
- } else if (to_resource != cast_from->resource()) {
- // |to| already existed and has its own resource. Finalize |from|.
- isolate->heap()->FinalizeExternalString(from);
- }
-}
-
-void MakeStringThin(String string, String internalized, Isolate* isolate) {
- DCHECK_NE(string, internalized);
- DCHECK(internalized->IsInternalizedString());
-
- if (string->IsExternalString()) {
- if (internalized->IsExternalOneByteString()) {
- MigrateExternalStringResource<ExternalOneByteString>(isolate, string,
- internalized);
- } else if (internalized->IsExternalTwoByteString()) {
- MigrateExternalStringResource<ExternalTwoByteString>(isolate, string,
- internalized);
- } else {
- // If the external string is duped into an existing non-external
- // internalized string, free its resource (it's about to be rewritten
- // into a ThinString below).
- isolate->heap()->FinalizeExternalString(string);
- }
- }
-
- DisallowHeapAllocation no_gc;
- int old_size = string->Size();
- isolate->heap()->NotifyObjectLayoutChange(string, old_size, no_gc);
- bool one_byte = internalized->IsOneByteRepresentation();
- Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
- : isolate->factory()->thin_string_map();
- DCHECK_GE(old_size, ThinString::kSize);
- string->synchronized_set_map(*map);
- ThinString thin = ThinString::cast(string);
- thin->set_actual(internalized);
- Address thin_end = thin->address() + ThinString::kSize;
- int size_delta = old_size - ThinString::kSize;
- if (size_delta != 0) {
- Heap* heap = isolate->heap();
- heap->CreateFillerObjectAt(thin_end, size_delta, ClearRecordedSlots::kNo);
- }
-}
-
-} // namespace
-
// static
Handle<String> StringTable::LookupString(Isolate* isolate,
Handle<String> string) {
@@ -6967,7 +6696,7 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
if (FLAG_thin_strings) {
if (!string->IsInternalizedString()) {
- MakeStringThin(*string, *result, isolate);
+ string->MakeThin(isolate, *result);
}
} else { // !FLAG_thin_strings
if (string->IsConsString()) {
@@ -6992,6 +6721,7 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
}
// static
+template <typename StringTableKey>
Handle<String> StringTable::LookupKey(Isolate* isolate, StringTableKey* key) {
Handle<StringTable> table = isolate->factory()->string_table();
int entry = table->FindEntry(isolate, key);
@@ -7009,6 +6739,15 @@ Handle<String> StringTable::LookupKey(Isolate* isolate, StringTableKey* key) {
return AddKeyNoResize(isolate, key);
}
+template Handle<String> StringTable::LookupKey(Isolate* isolate,
+ OneByteStringKey* key);
+template Handle<String> StringTable::LookupKey(Isolate* isolate,
+ TwoByteStringKey* key);
+template Handle<String> StringTable::LookupKey(Isolate* isolate,
+ SeqOneByteSubStringKey* key);
+template Handle<String> StringTable::LookupKey(Isolate* isolate,
+ SeqTwoByteSubStringKey* key);
+
Handle<String> StringTable::AddKeyNoResize(Isolate* isolate,
StringTableKey* key) {
Handle<StringTable> table = isolate->factory()->string_table();
@@ -7022,7 +6761,7 @@ Handle<String> StringTable::AddKeyNoResize(Isolate* isolate,
DCHECK_EQ(table->FindEntry(isolate, key), kNotFound);
// Add the new string and return it along with the string table.
- int entry = table->FindInsertionEntry(key->Hash());
+ int entry = table->FindInsertionEntry(key->hash());
table->set(EntryToIndex(entry), *string);
table->ElementAdded();
@@ -7043,149 +6782,62 @@ Handle<StringTable> StringTable::CautiousShrink(Isolate* isolate,
namespace {
-class StringTableNoAllocateKey : public StringTableKey {
- public:
- StringTableNoAllocateKey(String string, uint64_t seed)
- : StringTableKey(0), string_(string) {
- StringShape shape(string);
- one_byte_ = shape.encoding_tag() == kOneByteStringTag;
- DCHECK(!shape.IsInternalized());
- DCHECK(!shape.IsThin());
- int length = string->length();
- if (shape.IsCons() && length <= String::kMaxHashCalcLength) {
- special_flattening_ = true;
- uint32_t hash_field = 0;
- if (one_byte_) {
- if (V8_LIKELY(length <=
- static_cast<int>(arraysize(one_byte_buffer_)))) {
- one_byte_content_ = one_byte_buffer_;
- } else {
- one_byte_content_ = new uint8_t[length];
- }
- String::WriteToFlat(string, one_byte_content_, 0, length);
- hash_field =
- StringHasher::HashSequentialString(one_byte_content_, length, seed);
- } else {
- if (V8_LIKELY(length <=
- static_cast<int>(arraysize(two_byte_buffer_)))) {
- two_byte_content_ = two_byte_buffer_;
- } else {
- two_byte_content_ = new uint16_t[length];
- }
- String::WriteToFlat(string, two_byte_content_, 0, length);
- hash_field =
- StringHasher::HashSequentialString(two_byte_content_, length, seed);
- }
- string->set_hash_field(hash_field);
- } else {
- special_flattening_ = false;
- one_byte_content_ = nullptr;
- string->Hash();
- }
+template <typename Char>
+Address LookupString(Isolate* isolate, String string, String source,
+ size_t start) {
+ DisallowHeapAllocation no_gc;
+ StringTable table = isolate->heap()->string_table();
+ uint64_t seed = HashSeed(isolate);
- DCHECK(string->HasHashCode());
- set_hash_field(string->hash_field());
- }
+ int length = string.length();
- ~StringTableNoAllocateKey() override {
- if (one_byte_) {
- if (one_byte_content_ != one_byte_buffer_) delete[] one_byte_content_;
- } else {
- if (two_byte_content_ != two_byte_buffer_) delete[] two_byte_content_;
- }
+ std::unique_ptr<Char[]> buffer;
+ const Char* chars;
+
+ if (source.IsConsString()) {
+ DCHECK(!source.IsFlat());
+ buffer.reset(new Char[length]);
+ String::WriteToFlat(source, buffer.get(), 0, length);
+ chars = buffer.get();
+ } else {
+ chars = source.GetChars<Char>(no_gc) + start;
}
+ // TODO(verwaest): Internalize to one-byte when possible.
+ SequentialStringKey<Char> key(Vector<const Char>(chars, length), seed);
- bool IsMatch(Object otherstring) override {
- String other = String::cast(otherstring);
- DCHECK(other->IsInternalizedString());
- DCHECK(other->IsFlat());
- if (Hash() != other->Hash()) return false;
- int len = string_->length();
- if (len != other->length()) return false;
+ // String could be an array index.
+ uint32_t hash_field = key.hash_field();
- DisallowHeapAllocation no_gc;
- if (!special_flattening_) {
- if (string_->Get(0) != other->Get(0)) return false;
- if (string_->IsFlat()) {
- StringShape shape1(string_);
- StringShape shape2(other);
- if (shape1.encoding_tag() == kOneByteStringTag &&
- shape2.encoding_tag() == kOneByteStringTag) {
- String::FlatContent flat1 = string_->GetFlatContent(no_gc);
- String::FlatContent flat2 = other->GetFlatContent(no_gc);
- return CompareRawStringContents(flat1.ToOneByteVector().start(),
- flat2.ToOneByteVector().start(), len);
- }
- if (shape1.encoding_tag() == kTwoByteStringTag &&
- shape2.encoding_tag() == kTwoByteStringTag) {
- String::FlatContent flat1 = string_->GetFlatContent(no_gc);
- String::FlatContent flat2 = other->GetFlatContent(no_gc);
- return CompareRawStringContents(flat1.ToUC16Vector().start(),
- flat2.ToUC16Vector().start(), len);
- }
- }
- StringComparator comparator;
- return comparator.Equals(string_, other);
- }
+ if (Name::ContainsCachedArrayIndex(hash_field)) {
+ return Smi::FromInt(String::ArrayIndexValueBits::decode(hash_field)).ptr();
+ }
- String::FlatContent flat_content = other->GetFlatContent(no_gc);
- if (one_byte_) {
- if (flat_content.IsOneByte()) {
- return CompareRawStringContents(
- one_byte_content_, flat_content.ToOneByteVector().start(), len);
- } else {
- DCHECK(flat_content.IsTwoByte());
- for (int i = 0; i < len; i++) {
- if (flat_content.Get(i) != one_byte_content_[i]) return false;
- }
- return true;
- }
- } else {
- if (flat_content.IsTwoByte()) {
- return CompareRawStringContents(
- two_byte_content_, flat_content.ToUC16Vector().start(), len);
- } else {
- DCHECK(flat_content.IsOneByte());
- for (int i = 0; i < len; i++) {
- if (flat_content.Get(i) != two_byte_content_[i]) return false;
- }
- return true;
- }
- }
+ if ((hash_field & Name::kIsNotArrayIndexMask) == 0) {
+ // It is an indexed, but it's not cached.
+ return Smi::FromInt(ResultSentinel::kUnsupported).ptr();
}
- V8_WARN_UNUSED_RESULT Handle<String> AsHandle(Isolate* isolate) override {
- UNREACHABLE();
+ int entry = table.FindEntry(ReadOnlyRoots(isolate), &key, key.hash());
+ if (entry == kNotFound) {
+ // A string that's not an array index, and not in the string table,
+ // cannot have been used as a property name before.
+ return Smi::FromInt(ResultSentinel::kNotFound).ptr();
}
- private:
- String string_;
- bool one_byte_;
- bool special_flattening_;
- union {
- uint8_t* one_byte_content_;
- uint16_t* two_byte_content_;
- };
- union {
- uint8_t one_byte_buffer_[256];
- uint16_t two_byte_buffer_[128];
- };
-};
+ String internalized = String::cast(table.KeyAt(entry));
+ if (FLAG_thin_strings) {
+ string.MakeThin(isolate, internalized);
+ }
+ return internalized.ptr();
+}
} // namespace
// static
Address StringTable::LookupStringIfExists_NoAllocate(Isolate* isolate,
Address raw_string) {
- DisallowHeapAllocation no_gc;
String string = String::cast(Object(raw_string));
- Heap* heap = isolate->heap();
- StringTable table = heap->string_table();
-
- StringTableNoAllocateKey key(string, HashSeed(isolate));
-
- // String could be an array index.
- uint32_t hash = string->hash_field();
+ DCHECK(!string.IsInternalizedString());
// Valid array indices are >= 0, so they cannot be mixed up with any of
// the result sentinels, which are negative.
@@ -7194,37 +6846,25 @@ Address StringTable::LookupStringIfExists_NoAllocate(Isolate* isolate,
STATIC_ASSERT(
!String::ArrayIndexValueBits::is_valid(ResultSentinel::kNotFound));
- if (Name::ContainsCachedArrayIndex(hash)) {
- return Smi::FromInt(String::ArrayIndexValueBits::decode(hash)).ptr();
- }
- if ((hash & Name::kIsNotArrayIndexMask) == 0) {
- // It is an indexed, but it's not cached.
- return Smi::FromInt(ResultSentinel::kUnsupported).ptr();
+ size_t start = 0;
+ String source = string;
+ if (source.IsSlicedString()) {
+ SlicedString sliced = SlicedString::cast(source);
+ start = sliced.offset();
+ source = sliced.parent();
+ } else if (source.IsConsString() && source.IsFlat()) {
+ source = ConsString::cast(source).first();
}
-
- DCHECK(!string->IsInternalizedString());
- int entry = table->FindEntry(ReadOnlyRoots(isolate), &key, key.Hash());
- if (entry != kNotFound) {
- String internalized = String::cast(table->KeyAt(entry));
- if (FLAG_thin_strings) {
- MakeStringThin(string, internalized, isolate);
+ if (source.IsThinString()) {
+ source = ThinString::cast(source).actual();
+ if (string.length() == source.length()) {
+ return source.ptr();
}
- return internalized.ptr();
}
- // A string that's not an array index, and not in the string table,
- // cannot have been used as a property name before.
- return Smi::FromInt(ResultSentinel::kNotFound).ptr();
-}
-
-String StringTable::ForwardStringIfExists(Isolate* isolate, StringTableKey* key,
- String string) {
- Handle<StringTable> table = isolate->factory()->string_table();
- int entry = table->FindEntry(isolate, key);
- if (entry == kNotFound) return String();
-
- String canonical = String::cast(table->KeyAt(entry));
- if (canonical != string) MakeStringThin(string, canonical, isolate);
- return canonical;
+ if (source.IsOneByteRepresentation()) {
+ return i::LookupString<uint8_t>(isolate, string, source, start);
+ }
+ return i::LookupString<uint16_t>(isolate, string, source, start);
}
Handle<StringSet> StringSet::New(Isolate* isolate) {
@@ -7250,7 +6890,7 @@ bool StringSet::Has(Isolate* isolate, Handle<String> name) {
Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
Handle<ObjectHashSet> set,
Handle<Object> key) {
- int32_t hash = key->GetOrCreateHash(isolate)->value();
+ int32_t hash = key->GetOrCreateHash(isolate).value();
if (!set->Has(isolate, key, hash)) {
set = EnsureCapacity(isolate, set, 1);
int entry = set->FindInsertionEntry(hash);
@@ -7270,18 +6910,18 @@ const int kLiteralLiteralsOffset = 1;
int SearchLiteralsMapEntry(CompilationCacheTable cache, int cache_entry,
Context native_context) {
DisallowHeapAllocation no_gc;
- DCHECK(native_context->IsNativeContext());
- Object obj = cache->get(cache_entry);
+ DCHECK(native_context.IsNativeContext());
+ Object obj = cache.get(cache_entry);
// Check that there's no confusion between FixedArray and WeakFixedArray (the
// object used to be a FixedArray here).
- DCHECK(!obj->IsFixedArray());
- if (obj->IsWeakFixedArray()) {
+ DCHECK(!obj.IsFixedArray());
+ if (obj.IsWeakFixedArray()) {
WeakFixedArray literals_map = WeakFixedArray::cast(obj);
- int length = literals_map->length();
+ int length = literals_map.length();
for (int i = 0; i < length; i += kLiteralEntryLength) {
- DCHECK(literals_map->Get(i + kLiteralContextOffset)->IsWeakOrCleared());
- if (literals_map->Get(i + kLiteralContextOffset) ==
+ DCHECK(literals_map.Get(i + kLiteralContextOffset)->IsWeakOrCleared());
+ if (literals_map.Get(i + kLiteralContextOffset) ==
HeapObjectReference::Weak(native_context)) {
return i;
}
@@ -7303,8 +6943,8 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
// Check that there's no confusion between FixedArray and WeakFixedArray (the
// object used to be a FixedArray here).
- DCHECK(!obj->IsFixedArray());
- if (!obj->IsWeakFixedArray() || WeakFixedArray::cast(obj)->length() == 0) {
+ DCHECK(!obj.IsFixedArray());
+ if (!obj.IsWeakFixedArray() || WeakFixedArray::cast(obj).length() == 0) {
new_literals_map = isolate->factory()->NewWeakFixedArray(
kLiteralInitialLength, AllocationType::kOld);
entry = 0;
@@ -7346,10 +6986,10 @@ void AddToFeedbackCellsMap(Handle<CompilationCacheTable> cache, int cache_entry,
for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
MaybeObject object = new_literals_map->Get(i + kLiteralContextOffset);
DCHECK(object->IsCleared() ||
- object->GetHeapObjectAssumeWeak()->IsNativeContext());
+ object->GetHeapObjectAssumeWeak().IsNativeContext());
object = new_literals_map->Get(i + kLiteralLiteralsOffset);
DCHECK(object->IsCleared() ||
- object->GetHeapObjectAssumeWeak()->IsFeedbackCell());
+ object->GetHeapObjectAssumeWeak().IsFeedbackCell());
}
#endif
@@ -7364,15 +7004,15 @@ FeedbackCell SearchLiteralsMap(CompilationCacheTable cache, int cache_entry,
FeedbackCell result;
int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
if (entry >= 0) {
- WeakFixedArray literals_map = WeakFixedArray::cast(cache->get(cache_entry));
- DCHECK_LE(entry + kLiteralEntryLength, literals_map->length());
- MaybeObject object = literals_map->Get(entry + kLiteralLiteralsOffset);
+ WeakFixedArray literals_map = WeakFixedArray::cast(cache.get(cache_entry));
+ DCHECK_LE(entry + kLiteralEntryLength, literals_map.length());
+ MaybeObject object = literals_map.Get(entry + kLiteralLiteralsOffset);
if (!object->IsCleared()) {
result = FeedbackCell::cast(object->GetHeapObjectAssumeWeak());
}
}
- DCHECK(result.is_null() || result->IsFeedbackCell());
+ DCHECK(result.is_null() || result.IsFeedbackCell());
return result;
}
@@ -7385,7 +7025,7 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
// empty_function is native context dependent, the SFI is de-duped on
// snapshot builds by the PartialSnapshotCache, and so this does not prevent
// reuse of scripts in the compilation cache across native contexts.
- Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared(),
+ Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
native_context->GetIsolate());
Isolate* isolate = native_context->GetIsolate();
src = String::Flatten(isolate, src);
@@ -7393,11 +7033,11 @@ MaybeHandle<SharedFunctionInfo> CompilationCacheTable::LookupScript(
int entry = table->FindEntry(isolate, &key);
if (entry == kNotFound) return MaybeHandle<SharedFunctionInfo>();
int index = EntryToIndex(entry);
- if (!table->get(index)->IsFixedArray()) {
+ if (!table->get(index).IsFixedArray()) {
return MaybeHandle<SharedFunctionInfo>();
}
Object obj = table->get(index + 1);
- if (obj->IsSharedFunctionInfo()) {
+ if (obj.IsSharedFunctionInfo()) {
return handle(SharedFunctionInfo::cast(obj), native_context->GetIsolate());
}
return MaybeHandle<SharedFunctionInfo>();
@@ -7414,9 +7054,9 @@ InfoCellPair CompilationCacheTable::LookupEval(
int entry = table->FindEntry(isolate, &key);
if (entry == kNotFound) return empty_result;
int index = EntryToIndex(entry);
- if (!table->get(index)->IsFixedArray()) return empty_result;
+ if (!table->get(index).IsFixedArray()) return empty_result;
Object obj = table->get(EntryToIndex(entry) + 1);
- if (obj->IsSharedFunctionInfo()) {
+ if (obj.IsSharedFunctionInfo()) {
FeedbackCell feedback_cell =
SearchLiteralsMap(*table, EntryToIndex(entry) + 2, *native_context);
return InfoCellPair(SharedFunctionInfo::cast(obj), feedback_cell);
@@ -7443,7 +7083,7 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
// empty_function is native context dependent, the SFI is de-duped on
// snapshot builds by the PartialSnapshotCache, and so this does not prevent
// reuse of scripts in the compilation cache across native contexts.
- Handle<SharedFunctionInfo> shared(native_context->empty_function()->shared(),
+ Handle<SharedFunctionInfo> shared(native_context->empty_function().shared(),
isolate);
src = String::Flatten(isolate, src);
StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
@@ -7504,7 +7144,6 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
return cache;
}
-
void CompilationCacheTable::Age() {
DisallowHeapAllocation no_allocation;
Object the_hole_value = GetReadOnlyRoots().the_hole_value();
@@ -7512,19 +7151,19 @@ void CompilationCacheTable::Age() {
int entry_index = EntryToIndex(entry);
int value_index = entry_index + 1;
- if (get(entry_index)->IsNumber()) {
+ if (get(entry_index).IsNumber()) {
Smi count = Smi::cast(get(value_index));
- count = Smi::FromInt(count->value() - 1);
- if (count->value() == 0) {
+ count = Smi::FromInt(count.value() - 1);
+ if (count.value() == 0) {
NoWriteBarrierSet(*this, entry_index, the_hole_value);
NoWriteBarrierSet(*this, value_index, the_hole_value);
ElementRemoved();
} else {
NoWriteBarrierSet(*this, value_index, count);
}
- } else if (get(entry_index)->IsFixedArray()) {
+ } else if (get(entry_index).IsFixedArray()) {
SharedFunctionInfo info = SharedFunctionInfo::cast(get(value_index));
- if (info->IsInterpreted() && info->GetBytecodeArray()->IsOld()) {
+ if (info.IsInterpreted() && info.GetBytecodeArray().IsOld()) {
for (int i = 0; i < kEntrySize; i++) {
NoWriteBarrierSet(*this, entry_index + i, the_hole_value);
}
@@ -7547,7 +7186,6 @@ void CompilationCacheTable::Remove(Object value) {
ElementRemoved();
}
}
- return;
}
template <typename Derived, typename Shape>
@@ -7667,8 +7305,8 @@ Handle<Derived> Dictionary<Derived, Shape>::Add(Isolate* isolate,
uint32_t entry = dictionary->FindInsertionEntry(hash);
dictionary->SetEntry(isolate, entry, *k, *value, details);
- DCHECK(dictionary->KeyAt(entry)->IsNumber() ||
- Shape::Unwrap(dictionary->KeyAt(entry))->IsUniqueName());
+ DCHECK(dictionary->KeyAt(entry).IsNumber() ||
+ Shape::Unwrap(dictionary->KeyAt(entry)).IsUniqueName());
dictionary->ElementAdded();
if (entry_out) *entry_out = entry;
return dictionary;
@@ -7681,21 +7319,6 @@ Handle<SimpleNumberDictionary> SimpleNumberDictionary::Set(
return AtPut(isolate, dictionary, key, value, PropertyDetails::Empty());
}
-bool NumberDictionary::HasComplexElements() {
- if (!requires_slow_elements()) return false;
- ReadOnlyRoots roots = GetReadOnlyRoots();
- int capacity = this->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object k;
- if (!this->ToKey(roots, i, &k)) continue;
- PropertyDetails details = this->DetailsAt(i);
- if (details.kind() == kAccessor) return true;
- PropertyAttributes attr = details.attributes();
- if (attr & ALL_ATTRIBUTES_MASK) return true;
- }
- return false;
-}
-
void NumberDictionary::UpdateMaxNumberKey(uint32_t key,
Handle<JSObject> dictionary_holder) {
DisallowHeapAllocation no_allocation;
@@ -7713,7 +7336,7 @@ void NumberDictionary::UpdateMaxNumberKey(uint32_t key,
}
// Update max key value.
Object max_index_object = get(kMaxNumberKeyIndex);
- if (!max_index_object->IsSmi() || max_number_key() < key) {
+ if (!max_index_object.IsSmi() || max_number_key() < key) {
FixedArray::set(kMaxNumberKeyIndex,
Smi::FromInt(key << kRequiresSlowElementsTagSize));
}
@@ -7732,14 +7355,14 @@ void NumberDictionary::CopyValuesTo(FixedArray elements) {
int pos = 0;
int capacity = this->Capacity();
DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
+ WriteBarrierMode mode = elements.GetWriteBarrierMode(no_gc);
for (int i = 0; i < capacity; i++) {
Object k;
if (this->ToKey(roots, i, &k)) {
- elements->set(pos++, this->ValueAt(i), mode);
+ elements.set(pos++, this->ValueAt(i), mode);
}
}
- DCHECK_EQ(pos, elements->length());
+ DCHECK_EQ(pos, elements.length());
}
template <typename Derived, typename Shape>
@@ -7750,7 +7373,7 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
for (int i = 0; i < capacity; i++) {
Object k;
if (!this->ToKey(roots, i, &k)) continue;
- if (k->FilterKey(ENUMERABLE_STRINGS)) continue;
+ if (k.FilterKey(ENUMERABLE_STRINGS)) continue;
PropertyDetails details = this->DetailsAt(i);
PropertyAttributes attr = details.attributes();
if ((attr & ONLY_ENUMERABLE) == 0) result++;
@@ -7758,13 +7381,12 @@ int Dictionary<Derived, Shape>::NumberOfEnumerableProperties() {
return result;
}
-
template <typename Dictionary>
struct EnumIndexComparator {
explicit EnumIndexComparator(Dictionary dict) : dict(dict) {}
bool operator()(Tagged_t a, Tagged_t b) {
- PropertyDetails da(dict->DetailsAt(Smi(static_cast<Address>(a)).value()));
- PropertyDetails db(dict->DetailsAt(Smi(static_cast<Address>(b)).value()));
+ PropertyDetails da(dict.DetailsAt(Smi(static_cast<Address>(a)).value()));
+ PropertyDetails db(dict.DetailsAt(Smi(static_cast<Address>(b)).value()));
return da.dictionary_index() < db.dictionary_index();
}
Dictionary dict;
@@ -7783,7 +7405,7 @@ void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
Object key;
if (!dictionary->ToKey(roots, i, &key)) continue;
bool is_shadowing_key = false;
- if (key->IsSymbol()) continue;
+ if (key.IsSymbol()) continue;
PropertyDetails details = dictionary->DetailsAt(i);
if (details.IsDontEnum()) {
if (mode == KeyCollectionMode::kIncludePrototypes) {
@@ -7812,8 +7434,8 @@ void BaseNameDictionary<Derived, Shape>::CopyEnumKeysTo(
AtomicSlot start(storage->GetFirstElementAddress());
std::sort(start, start + length, cmp);
for (int i = 0; i < length; i++) {
- int index = Smi::ToInt(raw_storage->get(i));
- raw_storage->set(i, raw_dictionary->NameAt(index));
+ int index = Smi::ToInt(raw_storage.get(i));
+ raw_storage.set(i, raw_dictionary.NameAt(index));
}
}
@@ -7830,7 +7452,7 @@ Handle<FixedArray> BaseNameDictionary<Derived, Shape>::IterationIndices(
Derived raw_dictionary = *dictionary;
for (int i = 0; i < capacity; i++) {
Object k;
- if (!raw_dictionary->ToKey(roots, i, &k)) continue;
+ if (!raw_dictionary.ToKey(roots, i, &k)) continue;
array->set(array_size++, Smi::FromInt(i));
}
@@ -7860,18 +7482,18 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
Derived raw_dictionary = *dictionary;
for (int i = 0; i < capacity; i++) {
Object k;
- if (!raw_dictionary->ToKey(roots, i, &k)) continue;
- if (k->FilterKey(filter)) continue;
- PropertyDetails details = raw_dictionary->DetailsAt(i);
+ if (!raw_dictionary.ToKey(roots, i, &k)) continue;
+ if (k.FilterKey(filter)) continue;
+ PropertyDetails details = raw_dictionary.DetailsAt(i);
if ((details.attributes() & filter) != 0) {
keys->AddShadowingKey(k);
continue;
}
if (filter & ONLY_ALL_CAN_READ) {
if (details.kind() != kAccessor) continue;
- Object accessors = raw_dictionary->ValueAt(i);
- if (!accessors->IsAccessorInfo()) continue;
- if (!AccessorInfo::cast(accessors)->all_can_read()) continue;
+ Object accessors = raw_dictionary.ValueAt(i);
+ if (!accessors.IsAccessorInfo()) continue;
+ if (!AccessorInfo::cast(accessors).all_can_read()) continue;
}
array->set(array_size++, Smi::FromInt(i));
}
@@ -7887,7 +7509,7 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
for (int i = 0; i < array_size; i++) {
int index = Smi::ToInt(array->get(i));
Object key = dictionary->NameAt(index);
- if (key->IsSymbol()) {
+ if (key.IsSymbol()) {
has_seen_symbol = true;
continue;
}
@@ -7897,7 +7519,7 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
for (int i = 0; i < array_size; i++) {
int index = Smi::ToInt(array->get(i));
Object key = dictionary->NameAt(index);
- if (!key->IsSymbol()) continue;
+ if (!key.IsSymbol()) continue;
keys->AddKey(key, DO_NOT_CONVERT);
}
}
@@ -7907,12 +7529,12 @@ void BaseNameDictionary<Derived, Shape>::CollectKeysTo(
template <typename Derived, typename Shape>
Object Dictionary<Derived, Shape>::SlowReverseLookup(Object value) {
Derived dictionary = Derived::cast(*this);
- ReadOnlyRoots roots = dictionary->GetReadOnlyRoots();
- int capacity = dictionary->Capacity();
+ ReadOnlyRoots roots = dictionary.GetReadOnlyRoots();
+ int capacity = dictionary.Capacity();
for (int i = 0; i < capacity; i++) {
Object k;
- if (!dictionary->ToKey(roots, i, &k)) continue;
- Object e = dictionary->ValueAt(i);
+ if (!dictionary.ToKey(roots, i, &k)) continue;
+ Object e = dictionary.ValueAt(i);
if (e == value) return k;
}
return roots.undefined_value();
@@ -7948,7 +7570,7 @@ Object ObjectHashTableBase<Derived, Shape>::Lookup(Handle<Object> key) {
// If the object does not have an identity hash, it was never used as a key.
Object hash = key->GetHash();
- if (hash->IsUndefined(roots)) {
+ if (hash.IsUndefined(roots)) {
return roots.the_hole_value();
}
return Lookup(roots, key, Smi::ToInt(hash));
@@ -7974,7 +7596,7 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Put(Handle<Derived> table,
DCHECK(!value->IsTheHole(ReadOnlyRoots(isolate)));
// Make sure the key object has an identity hash code.
- int32_t hash = key->GetOrCreateHash(isolate)->value();
+ int32_t hash = key->GetOrCreateHash(isolate).value();
return ObjectHashTableBase<Derived, Shape>::Put(isolate, table, key, value,
hash);
@@ -8030,7 +7652,7 @@ Handle<Derived> ObjectHashTableBase<Derived, Shape>::Remove(
DCHECK(table->IsKey(table->GetReadOnlyRoots(), *key));
Object hash = key->GetHash();
- if (hash->IsUndefined()) {
+ if (hash.IsUndefined()) {
*was_present = false;
return table;
}
@@ -8072,7 +7694,6 @@ void ObjectHashTableBase<Derived, Shape>::RemoveEntry(int entry) {
this->ElementRemoved();
}
-
void JSSet::Initialize(Handle<JSSet> set, Isolate* isolate) {
Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
set->set_table(*table);
@@ -8084,7 +7705,6 @@ void JSSet::Clear(Isolate* isolate, Handle<JSSet> set) {
set->set_table(*table);
}
-
void JSMap::Initialize(Handle<JSMap> map, Isolate* isolate) {
Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
map->set_table(*table);
@@ -8096,14 +7716,12 @@ void JSMap::Clear(Isolate* isolate, Handle<JSMap> map) {
map->set_table(*table);
}
-
void JSWeakCollection::Initialize(Handle<JSWeakCollection> weak_collection,
Isolate* isolate) {
Handle<EphemeronHashTable> table = EphemeronHashTable::New(isolate, 0);
weak_collection->set_table(*table);
}
-
void JSWeakCollection::Set(Handle<JSWeakCollection> weak_collection,
Handle<Object> key, Handle<Object> value,
int32_t hash) {
@@ -8121,7 +7739,6 @@ void JSWeakCollection::Set(Handle<JSWeakCollection> weak_collection,
}
}
-
bool JSWeakCollection::Delete(Handle<JSWeakCollection> weak_collection,
Handle<Object> key, int32_t hash) {
DCHECK(key->IsJSReceiver() || key->IsSymbol());
@@ -8176,7 +7793,6 @@ Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
return isolate->factory()->NewJSArrayWithElements(entries);
}
-
Handle<PropertyCell> PropertyCell::InvalidateEntry(
Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry) {
// Swap with a copy.
@@ -8185,7 +7801,7 @@ Handle<PropertyCell> PropertyCell::InvalidateEntry(
Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell(name);
new_cell->set_value(cell->value());
dictionary->ValueAtPut(entry, *new_cell);
- bool is_the_hole = cell->value()->IsTheHole(isolate);
+ bool is_the_hole = cell->value().IsTheHole(isolate);
// Cell is officially mutable henceforth.
PropertyDetails details = cell->property_details();
details = details.set_cell_type(is_the_hole ? PropertyCellType::kUninitialized
@@ -8199,27 +7815,25 @@ Handle<PropertyCell> PropertyCell::InvalidateEntry(
}
details = details.set_cell_type(PropertyCellType::kInvalidated);
cell->set_property_details(details);
- cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ cell->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
return new_cell;
}
-
PropertyCellConstantType PropertyCell::GetConstantType() {
- if (value()->IsSmi()) return PropertyCellConstantType::kSmi;
+ if (value().IsSmi()) return PropertyCellConstantType::kSmi;
return PropertyCellConstantType::kStableMap;
}
-
static bool RemainsConstantType(Handle<PropertyCell> cell,
Handle<Object> value) {
// TODO(dcarney): double->smi and smi->double transition from kConstant
- if (cell->value()->IsSmi() && value->IsSmi()) {
+ if (cell->value().IsSmi() && value->IsSmi()) {
return true;
- } else if (cell->value()->IsHeapObject() && value->IsHeapObject()) {
- return HeapObject::cast(cell->value())->map() ==
- HeapObject::cast(*value)->map() &&
- HeapObject::cast(*value)->map()->is_stable();
+ } else if (cell->value().IsHeapObject() && value->IsHeapObject()) {
+ return HeapObject::cast(cell->value()).map() ==
+ HeapObject::cast(*value).map() &&
+ HeapObject::cast(*value).map().is_stable();
}
return false;
}
@@ -8230,7 +7844,7 @@ PropertyCellType PropertyCell::UpdatedType(Isolate* isolate,
PropertyDetails details) {
PropertyCellType type = details.cell_type();
DCHECK(!value->IsTheHole(isolate));
- if (cell->value()->IsTheHole(isolate)) {
+ if (cell->value().IsTheHole(isolate)) {
switch (type) {
// Only allow a cell to transition once into constant state.
case PropertyCellType::kUninitialized:
@@ -8273,7 +7887,7 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
PropertyCellType old_type = original_details.cell_type();
// Preserve the enumeration index unless the property was deleted or never
// initialized.
- if (cell->value()->IsTheHole(isolate)) {
+ if (cell->value().IsTheHole(isolate)) {
index = dictionary->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index + 1);
} else {
@@ -8303,28 +7917,27 @@ Handle<PropertyCell> PropertyCell::PrepareForValue(
// Deopt when transitioning from a constant type.
if (!invalidate && (old_type != new_type ||
original_details.IsReadOnly() != details.IsReadOnly())) {
- cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ cell->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
}
return cell;
}
-
// static
void PropertyCell::SetValueWithInvalidation(Isolate* isolate,
Handle<PropertyCell> cell,
Handle<Object> new_value) {
if (cell->value() != *new_value) {
cell->set_value(*new_value);
- cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ cell->dependent_code().DeoptimizeDependentCodeGroup(
isolate, DependentCode::kPropertyCellChangedGroup);
}
}
int JSGeneratorObject::source_position() const {
CHECK(is_suspended());
- DCHECK(function()->shared()->HasBytecodeArray());
- DCHECK(function()->shared()->GetBytecodeArray()->HasSourcePositionTable());
+ DCHECK(function().shared().HasBytecodeArray());
+ DCHECK(function().shared().GetBytecodeArray().HasSourcePositionTable());
int code_offset = Smi::ToInt(input_or_debug_pos());
@@ -8332,43 +7945,42 @@ int JSGeneratorObject::source_position() const {
// is used in the source position table, hence the subtraction.
code_offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
AbstractCode code =
- AbstractCode::cast(function()->shared()->GetBytecodeArray());
- return code->SourcePosition(code_offset);
+ AbstractCode::cast(function().shared().GetBytecodeArray());
+ return code.SourcePosition(code_offset);
}
// static
AccessCheckInfo AccessCheckInfo::Get(Isolate* isolate,
Handle<JSObject> receiver) {
DisallowHeapAllocation no_gc;
- DCHECK(receiver->map()->is_access_check_needed());
- Object maybe_constructor = receiver->map()->GetConstructor();
- if (maybe_constructor->IsFunctionTemplateInfo()) {
+ DCHECK(receiver->map().is_access_check_needed());
+ Object maybe_constructor = receiver->map().GetConstructor();
+ if (maybe_constructor.IsFunctionTemplateInfo()) {
Object data_obj =
- FunctionTemplateInfo::cast(maybe_constructor)->GetAccessCheckInfo();
- if (data_obj->IsUndefined(isolate)) return AccessCheckInfo();
+ FunctionTemplateInfo::cast(maybe_constructor).GetAccessCheckInfo();
+ if (data_obj.IsUndefined(isolate)) return AccessCheckInfo();
return AccessCheckInfo::cast(data_obj);
}
// Might happen for a detached context.
- if (!maybe_constructor->IsJSFunction()) return AccessCheckInfo();
+ if (!maybe_constructor.IsJSFunction()) return AccessCheckInfo();
JSFunction constructor = JSFunction::cast(maybe_constructor);
// Might happen for the debug context.
- if (!constructor->shared()->IsApiFunction()) return AccessCheckInfo();
+ if (!constructor.shared().IsApiFunction()) return AccessCheckInfo();
Object data_obj =
- constructor->shared()->get_api_func_data()->GetAccessCheckInfo();
- if (data_obj->IsUndefined(isolate)) return AccessCheckInfo();
+ constructor.shared().get_api_func_data().GetAccessCheckInfo();
+ if (data_obj.IsUndefined(isolate)) return AccessCheckInfo();
return AccessCheckInfo::cast(data_obj);
}
-
MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
Isolate* isolate, Handle<Object> getter) {
if (getter->IsFunctionTemplateInfo()) {
Handle<FunctionTemplateInfo> fti =
Handle<FunctionTemplateInfo>::cast(getter);
// Check if the accessor uses a cached property.
- if (!fti->cached_property_name()->IsTheHole(isolate)) {
+ if (!fti->cached_property_name().IsTheHole(isolate)) {
return handle(Name::cast(fti->cached_property_name()), isolate);
}
}
@@ -8519,7 +8131,7 @@ void JSFinalizationGroup::Cleanup(
// It's possible that the cleared_cells list is empty, since
// FinalizationGroup.unregister() removed all its elements before this task
// ran. In that case, don't call the cleanup function.
- if (!finalization_group->cleared_cells()->IsUndefined(isolate)) {
+ if (!finalization_group->cleared_cells().IsUndefined(isolate)) {
// Construct the iterator.
Handle<JSFinalizationGroupCleanupIterator> iterator;
{
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects/objects.h
index 6a79fc6a9a..857f3ed0f6 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OBJECTS_H_
-#define V8_OBJECTS_H_
+#ifndef V8_OBJECTS_OBJECTS_H_
+#define V8_OBJECTS_OBJECTS_H_
#include <iosfwd>
#include <memory>
@@ -11,20 +11,22 @@
#include "include/v8-internal.h"
#include "include/v8.h"
#include "include/v8config.h"
-#include "src/assert-scope.h"
#include "src/base/bits.h"
#include "src/base/build_config.h"
#include "src/base/flags.h"
#include "src/base/logging.h"
-#include "src/checks.h"
-#include "src/constants-arch.h"
-#include "src/elements-kind.h"
-#include "src/field-index.h"
-#include "src/flags.h"
-#include "src/message-template.h"
-#include "src/objects-definitions.h"
-#include "src/property-details.h"
-#include "src/utils.h"
+#include "src/codegen/constants-arch.h"
+#include "src/common/assert-scope.h"
+#include "src/common/checks.h"
+#include "src/execution/message-template.h"
+#include "src/flags/flags.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/field-index.h"
+#include "src/objects/object-list-macros.h"
+#include "src/objects/objects-definitions.h"
+#include "src/objects/property-details.h"
+#include "src/objects/tagged-impl.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -186,12 +188,15 @@ namespace internal {
struct InliningPosition;
class PropertyDescriptorObject;
-// SKIP_WRITE_BARRIER skips the write barrier.
+// UNSAFE_SKIP_WRITE_BARRIER skips the write barrier.
+// SKIP_WRITE_BARRIER skips the write barrier and asserts that this is safe in
+// the MemoryOptimizer
// UPDATE_WEAK_WRITE_BARRIER skips the marking part of the write barrier and
// only performs the generational part.
// UPDATE_WRITE_BARRIER is doing the full barrier, marking and generational.
enum WriteBarrierMode {
SKIP_WRITE_BARRIER,
+ UNSAFE_SKIP_WRITE_BARRIER,
UPDATE_WEAK_WRITE_BARRIER,
UPDATE_EPHEMERON_KEY_WRITE_BARRIER,
UPDATE_WRITE_BARRIER
@@ -204,13 +209,8 @@ enum PropertyNormalizationMode {
KEEP_INOBJECT_PROPERTIES
};
-
// Indicates whether transitions can be added to a source map or not.
-enum TransitionFlag {
- INSERT_TRANSITION,
- OMIT_TRANSITION
-};
-
+enum TransitionFlag { INSERT_TRANSITION, OMIT_TRANSITION };
// Indicates whether the transition is simple: the target map of the transition
// either extends the current map with a new property, or it modifies the
@@ -223,10 +223,7 @@ enum SimpleTransitionFlag {
// Indicates whether we are only interested in the descriptors of a particular
// map, or in all descriptors in the descriptor array.
-enum DescriptorFlag {
- ALL_DESCRIPTORS,
- OWN_DESCRIPTORS
-};
+enum DescriptorFlag { ALL_DESCRIPTORS, OWN_DESCRIPTORS };
// Instance size sentinel for objects of variable size.
const int kVariableSizeSentinel = 0;
@@ -250,286 +247,6 @@ bool ComparisonResultToBool(Operation op, ComparisonResult result);
enum class OnNonExistent { kThrowReferenceError, kReturnUndefined };
-class AbstractCode;
-class AccessorPair;
-class AccessCheckInfo;
-class AllocationSite;
-class ByteArray;
-class CachedTemplateObject;
-class Cell;
-class ClosureFeedbackCellArray;
-class ConsString;
-class DependentCode;
-class ElementsAccessor;
-class EnumCache;
-class FixedArrayBase;
-class FixedDoubleArray;
-class FreeSpace;
-class FunctionLiteral;
-class FunctionTemplateInfo;
-class JSAsyncGeneratorObject;
-class JSGlobalProxy;
-class JSPromise;
-class JSProxy;
-class JSProxyRevocableResult;
-class KeyAccumulator;
-class LayoutDescriptor;
-class LookupIterator;
-class FieldType;
-class Module;
-class ModuleInfoEntry;
-class MutableHeapNumber;
-class ObjectHashTable;
-class ObjectTemplateInfo;
-class ObjectVisitor;
-class PreparseData;
-class PropertyArray;
-class PropertyCell;
-class PropertyDescriptor;
-class PrototypeInfo;
-class ReadOnlyRoots;
-class RegExpMatchInfo;
-class RootVisitor;
-class SafepointEntry;
-class ScriptContextTable;
-class SharedFunctionInfo;
-class StringStream;
-class Symbol;
-class FeedbackCell;
-class FeedbackMetadata;
-class FeedbackVector;
-class UncompiledData;
-class TemplateInfo;
-class TransitionArray;
-class TemplateList;
-class WasmInstanceObject;
-class WasmMemoryObject;
-template <typename T>
-class ZoneForwardList;
-
-#ifdef OBJECT_PRINT
-#define DECL_PRINTER(Name) void Name##Print(std::ostream& os); // NOLINT
-#else
-#define DECL_PRINTER(Name)
-#endif
-
-#define OBJECT_TYPE_LIST(V) \
- V(Smi) \
- V(LayoutDescriptor) \
- V(HeapObject) \
- V(Primitive) \
- V(Number) \
- V(Numeric)
-
-#define HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
- V(AbstractCode) \
- V(AccessCheckNeeded) \
- V(AllocationSite) \
- V(ArrayList) \
- V(BigInt) \
- V(BigIntWrapper) \
- V(ObjectBoilerplateDescription) \
- V(Boolean) \
- V(BooleanWrapper) \
- V(BreakPoint) \
- V(BreakPointInfo) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(CachedTemplateObject) \
- V(CallHandlerInfo) \
- V(Callable) \
- V(Cell) \
- V(ClassBoilerplate) \
- V(Code) \
- V(CodeDataContainer) \
- V(CompilationCacheTable) \
- V(ConsString) \
- V(Constructor) \
- V(Context) \
- V(CoverageInfo) \
- V(ClosureFeedbackCellArray) \
- V(DataHandler) \
- V(DeoptimizationData) \
- V(DependentCode) \
- V(DescriptorArray) \
- V(EmbedderDataArray) \
- V(EphemeronHashTable) \
- V(ExternalOneByteString) \
- V(ExternalString) \
- V(ExternalTwoByteString) \
- V(FeedbackCell) \
- V(FeedbackMetadata) \
- V(FeedbackVector) \
- V(Filler) \
- V(FixedArray) \
- V(FixedArrayBase) \
- V(FixedArrayExact) \
- V(FixedBigInt64Array) \
- V(FixedBigUint64Array) \
- V(FixedDoubleArray) \
- V(FixedFloat32Array) \
- V(FixedFloat64Array) \
- V(FixedInt16Array) \
- V(FixedInt32Array) \
- V(FixedInt8Array) \
- V(FixedTypedArrayBase) \
- V(FixedUint16Array) \
- V(FixedUint32Array) \
- V(FixedUint8Array) \
- V(FixedUint8ClampedArray) \
- V(Foreign) \
- V(FrameArray) \
- V(FreeSpace) \
- V(Function) \
- V(GlobalDictionary) \
- V(HandlerTable) \
- V(HeapNumber) \
- V(InternalizedString) \
- V(JSArgumentsObject) \
- V(JSArgumentsObjectWithLength) \
- V(JSArray) \
- V(JSArrayBuffer) \
- V(JSArrayBufferView) \
- V(JSArrayIterator) \
- V(JSAsyncFromSyncIterator) \
- V(JSAsyncFunctionObject) \
- V(JSAsyncGeneratorObject) \
- V(JSBoundFunction) \
- V(JSCollection) \
- V(JSContextExtensionObject) \
- V(JSDataView) \
- V(JSDate) \
- V(JSError) \
- V(JSFunction) \
- V(JSGeneratorObject) \
- V(JSGlobalObject) \
- V(JSGlobalProxy) \
- V(JSMap) \
- V(JSMapIterator) \
- V(JSMessageObject) \
- V(JSModuleNamespace) \
- V(JSObject) \
- V(JSPromise) \
- V(JSProxy) \
- V(JSReceiver) \
- V(JSRegExp) \
- V(JSRegExpResult) \
- V(JSRegExpStringIterator) \
- V(JSSet) \
- V(JSSetIterator) \
- V(JSSloppyArgumentsObject) \
- V(JSStringIterator) \
- V(JSTypedArray) \
- V(JSValue) \
- V(JSWeakRef) \
- V(JSWeakCollection) \
- V(JSFinalizationGroup) \
- V(JSFinalizationGroupCleanupIterator) \
- V(JSWeakMap) \
- V(JSWeakSet) \
- V(LoadHandler) \
- V(Map) \
- V(MapCache) \
- V(Microtask) \
- V(ModuleInfo) \
- V(MutableHeapNumber) \
- V(Name) \
- V(NameDictionary) \
- V(NativeContext) \
- V(NormalizedMapCache) \
- V(NumberDictionary) \
- V(NumberWrapper) \
- V(ObjectHashSet) \
- V(ObjectHashTable) \
- V(Oddball) \
- V(OrderedHashMap) \
- V(OrderedHashSet) \
- V(OrderedNameDictionary) \
- V(PreparseData) \
- V(PromiseReactionJobTask) \
- V(PropertyArray) \
- V(PropertyCell) \
- V(PropertyDescriptorObject) \
- V(RegExpMatchInfo) \
- V(ScopeInfo) \
- V(ScriptContextTable) \
- V(ScriptWrapper) \
- V(SeqOneByteString) \
- V(SeqString) \
- V(SeqTwoByteString) \
- V(SharedFunctionInfo) \
- V(SimpleNumberDictionary) \
- V(SlicedString) \
- V(SloppyArgumentsElements) \
- V(SmallOrderedHashMap) \
- V(SmallOrderedHashSet) \
- V(SmallOrderedNameDictionary) \
- V(SourcePositionTableWithFrameCache) \
- V(StoreHandler) \
- V(String) \
- V(StringSet) \
- V(StringTable) \
- V(StringWrapper) \
- V(Struct) \
- V(Symbol) \
- V(SymbolWrapper) \
- V(TemplateInfo) \
- V(TemplateList) \
- V(TemplateObjectDescription) \
- V(ThinString) \
- V(TransitionArray) \
- V(UncompiledData) \
- V(UncompiledDataWithPreparseData) \
- V(UncompiledDataWithoutPreparseData) \
- V(Undetectable) \
- V(UniqueName) \
- V(WasmExceptionObject) \
- V(WasmGlobalObject) \
- V(WasmInstanceObject) \
- V(WasmMemoryObject) \
- V(WasmModuleObject) \
- V(WasmTableObject) \
- V(WeakFixedArray) \
- V(WeakArrayList) \
- V(WeakCell)
-
-#ifdef V8_INTL_SUPPORT
-#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
- HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V) \
- V(JSV8BreakIterator) \
- V(JSCollator) \
- V(JSDateTimeFormat) \
- V(JSListFormat) \
- V(JSLocale) \
- V(JSNumberFormat) \
- V(JSPluralRules) \
- V(JSRelativeTimeFormat) \
- V(JSSegmentIterator) \
- V(JSSegmenter)
-#else
-#define HEAP_OBJECT_ORDINARY_TYPE_LIST(V) HEAP_OBJECT_ORDINARY_TYPE_LIST_BASE(V)
-#endif // V8_INTL_SUPPORT
-
-#define HEAP_OBJECT_TEMPLATE_TYPE_LIST(V) \
- V(Dictionary) \
- V(HashTable)
-
-#define HEAP_OBJECT_TYPE_LIST(V) \
- HEAP_OBJECT_ORDINARY_TYPE_LIST(V) \
- HEAP_OBJECT_TEMPLATE_TYPE_LIST(V)
-
-#define ODDBALL_LIST(V) \
- V(Undefined, undefined_value) \
- V(Null, null_value) \
- V(TheHole, the_hole_value) \
- V(Exception, exception) \
- V(Uninitialized, uninitialized_value) \
- V(True, true_value) \
- V(False, false_value) \
- V(ArgumentsMarker, arguments_marker) \
- V(OptimizedOut, optimized_out) \
- V(StaleRegister, stale_register)
-
// The element types selection for CreateListFromArrayLike.
enum class ElementTypes { kAll, kStringAndSymbol };
@@ -543,28 +260,10 @@ ShouldThrow GetShouldThrow(Isolate* isolate, Maybe<ShouldThrow> should_throw);
// There must only be a single data member in Object: the Address ptr,
// containing the tagged heap pointer that this Object instance refers to.
// For a design overview, see https://goo.gl/Ph4CGz.
-class Object {
+class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
public:
- constexpr Object() : ptr_(kNullAddress) {}
- explicit constexpr Object(Address ptr) : ptr_(ptr) {}
-
- // Make clang on Linux catch what MSVC complains about on Windows:
- operator bool() const = delete;
-
- bool operator==(const Object that) const { return this->ptr() == that.ptr(); }
- bool operator!=(const Object that) const { return this->ptr() != that.ptr(); }
- // Usage in std::set requires operator<.
- bool operator<(const Object that) const { return this->ptr() < that.ptr(); }
-
- // Returns the tagged "(heap) object pointer" representation of this object.
- constexpr Address ptr() const { return ptr_; }
-
- // These operator->() overloads are required for handlified code.
- Object* operator->() { return this; }
- const Object* operator->() const { return this; }
-
- // Type testing.
- bool IsObject() const { return true; }
+ constexpr Object() : TaggedImpl(kNullAddress) {}
+ explicit constexpr Object(Address ptr) : TaggedImpl(ptr) {}
#define IS_TYPE_FUNCTION_DECL(Type) V8_INLINE bool Is##Type() const;
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
@@ -584,33 +283,10 @@ class Object {
V8_INLINE bool IsNullOrUndefined(ReadOnlyRoots roots) const;
V8_INLINE bool IsNullOrUndefined() const;
- enum class Conversion { kToNumber, kToNumeric };
+ V8_INLINE bool IsZero() const;
+ V8_INLINE bool IsNoSharedNameSentinel() const;
-#define RETURN_FAILURE(isolate, should_throw, call) \
- do { \
- if ((should_throw) == kDontThrow) { \
- return Just(false); \
- } else { \
- isolate->Throw(*isolate->factory()->call); \
- return Nothing<bool>(); \
- } \
- } while (false)
-
-#define MAYBE_RETURN(call, value) \
- do { \
- if ((call).IsNothing()) return value; \
- } while (false)
-
-#define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
-
-#define MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, dst, call) \
- do { \
- Isolate* __isolate__ = (isolate); \
- if (!(call).To(&dst)) { \
- DCHECK(__isolate__->has_pending_exception()); \
- return ReadOnlyRoots(__isolate__).exception(); \
- } \
- } while (false)
+ enum class Conversion { kToNumber, kToNumeric };
#define DECL_STRUCT_PREDICATE(NAME, Name, name) V8_INLINE bool Is##Name() const;
STRUCT_LIST(DECL_STRUCT_PREDICATE)
@@ -889,34 +565,6 @@ class Object {
// and length.
bool IterationHasObservableEffects();
- //
- // The following GetHeapObjectXX methods mimic corresponding functionality
- // in MaybeObject. Having them here allows us to unify code that processes
- // ObjectSlots and MaybeObjectSlots.
- //
-
- // If this Object is a strong pointer to a HeapObject, returns true and
- // sets *result. Otherwise returns false.
- inline bool GetHeapObjectIfStrong(HeapObject* result) const;
-
- // If this Object is a strong pointer to a HeapObject (weak pointers are not
- // expected), returns true and sets *result. Otherwise returns false.
- inline bool GetHeapObject(HeapObject* result) const;
-
- // DCHECKs that this Object is a strong pointer to a HeapObject and returns
- // the HeapObject.
- inline HeapObject GetHeapObject() const;
-
- // Always returns false because Object is not expected to be a weak pointer
- // to a HeapObject.
- inline bool GetHeapObjectIfWeak(HeapObject* result) const {
- DCHECK(!HasWeakHeapObjectTag(ptr()));
- return false;
- }
- // Always returns false because Object is not expected to be a weak pointer
- // to a HeapObject.
- inline bool IsCleared() const { return false; }
-
EXPORT_DECL_VERIFIER(Object)
#ifdef VERIFY_HEAP
@@ -965,6 +613,45 @@ class Object {
}
};
+ template <class T, typename std::enable_if<std::is_arithmetic<T>::value,
+ int>::type = 0>
+ inline T ReadField(size_t offset) const {
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+#ifdef V8_COMPRESS_POINTERS
+ constexpr bool v8_pointer_compression_unaligned = sizeof(T) > kTaggedSize;
+#else
+ constexpr bool v8_pointer_compression_unaligned = false;
+#endif
+ if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
+ // Bug(v8:8875) Double fields may be unaligned.
+ return ReadUnalignedValue<T>(field_address(offset));
+ } else {
+ return Memory<T>(field_address(offset));
+ }
+ }
+
+ template <class T, typename std::enable_if<std::is_arithmetic<T>::value,
+ int>::type = 0>
+ inline void WriteField(size_t offset, T value) const {
+ // Pointer compression causes types larger than kTaggedSize to be unaligned.
+#ifdef V8_COMPRESS_POINTERS
+ constexpr bool v8_pointer_compression_unaligned = sizeof(T) > kTaggedSize;
+#else
+ constexpr bool v8_pointer_compression_unaligned = false;
+#endif
+ if (std::is_same<T, double>::value || v8_pointer_compression_unaligned) {
+ // Bug(v8:8875) Double fields may be unaligned.
+ WriteUnalignedValue<T>(field_address(offset), value);
+ } else {
+ Memory<T>(field_address(offset)) = value;
+ }
+ }
+
+ protected:
+ inline Address field_address(size_t offset) const {
+ return ptr() + offset - kHeapObjectTag;
+ }
+
private:
friend class CompressedObjectSlot;
friend class FullObjectSlot;
@@ -1007,22 +694,13 @@ class Object {
Isolate* isolate, Handle<Object> input);
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ConvertToIndex(
Isolate* isolate, Handle<Object> input, MessageTemplate error_index);
-
- Address ptr_;
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Object& obj);
-// In objects.h to be usable without objects-inl.h inclusion.
-bool Object::IsSmi() const { return HAS_SMI_TAG(ptr()); }
-bool Object::IsHeapObject() const {
- DCHECK_EQ(!IsSmi(), Internals::HasHeapObjectTag(ptr()));
- return !IsSmi();
-}
-
struct Brief {
- V8_EXPORT_PRIVATE explicit Brief(const Object v);
- explicit Brief(const MaybeObject v);
+ template <typename TObject>
+ explicit Brief(TObject v) : value{v.ptr()} {}
// {value} is a tagged heap object reference (weak or strong), equivalent to
// a MaybeObject's payload. It has a plain Address type to keep #includes
// lightweight.
@@ -1034,7 +712,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Brief& v);
// Objects should never have the weak tag; this variant is for overzealous
// checking.
V8_INLINE static bool HasWeakHeapObjectTag(const Object value) {
- return ((value->ptr() & kHeapObjectTagMask) == kWeakHeapObjectTag);
+ return HAS_WEAK_HEAP_OBJECT_TAG(value.ptr());
}
// Heap objects typically have a map pointer in their first word. However,
@@ -1065,13 +743,9 @@ class MapWord {
// View this map word as a forwarding address.
inline HeapObject ToForwardingAddress();
- static inline MapWord FromRawValue(uintptr_t value) {
- return MapWord(value);
- }
+ static inline MapWord FromRawValue(uintptr_t value) { return MapWord(value); }
- inline uintptr_t ToRawValue() {
- return value_;
- }
+ inline uintptr_t ToRawValue() { return value_; }
private:
// HeapObject calls the private constructor and directly reads the value.
@@ -1100,12 +774,8 @@ enum EnsureElementsMode {
ALLOW_CONVERTED_DOUBLE_ELEMENTS
};
-
// Indicator for one component of an AccessorPair.
-enum AccessorComponent {
- ACCESSOR_GETTER,
- ACCESSOR_SETTER
-};
+enum AccessorComponent { ACCESSOR_GETTER, ACCESSOR_SETTER };
enum class GetKeysConversion {
kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
@@ -1126,7 +796,7 @@ class Relocatable {
explicit inline Relocatable(Isolate* isolate);
inline virtual ~Relocatable();
virtual void IterateInstance(RootVisitor* v) {}
- virtual void PostGarbageCollection() { }
+ virtual void PostGarbageCollection() {}
static void PostGarbageCollectionProcessing(Isolate* isolate);
static int ArchiveSpacePerThread();
@@ -1158,10 +828,9 @@ class BooleanBit : public AllStatic {
}
};
-
-} // NOLINT, false-positive due to second-order macros.
-} // NOLINT, false-positive due to second-order macros.
+} // namespace internal
+} // namespace v8
#include "src/objects/object-macros-undef.h"
-#endif // V8_OBJECTS_H_
+#endif // V8_OBJECTS_OBJECTS_H_
diff --git a/deps/v8/src/objects/oddball-inl.h b/deps/v8/src/objects/oddball-inl.h
index fbd5a1b2c6..e0d77b9043 100644
--- a/deps/v8/src/objects/oddball-inl.h
+++ b/deps/v8/src/objects/oddball-inl.h
@@ -7,9 +7,9 @@
#include "src/objects/oddball.h"
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/string-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -18,32 +18,19 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Oddball, HeapObject)
-
-CAST_ACCESSOR(Oddball)
-
-double Oddball::to_number_raw() const {
- return READ_DOUBLE_FIELD(*this, kToNumberRawOffset);
-}
-
-void Oddball::set_to_number_raw(double value) {
- WRITE_DOUBLE_FIELD(*this, kToNumberRawOffset, value);
-}
+TQ_OBJECT_CONSTRUCTORS_IMPL(Oddball)
void Oddball::set_to_number_raw_as_bits(uint64_t bits) {
- WRITE_UINT64_FIELD(*this, kToNumberRawOffset, bits);
+ // Bug(v8:8875): HeapNumber's double may be unaligned.
+ WriteUnalignedValue<uint64_t>(field_address(kToNumberRawOffset), bits);
}
-ACCESSORS(Oddball, to_string, String, kToStringOffset)
-ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
-ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
-
byte Oddball::kind() const {
- return Smi::ToInt(READ_FIELD(*this, kKindOffset));
+ return Smi::ToInt(TorqueGeneratedOddball::kind());
}
void Oddball::set_kind(byte value) {
- WRITE_FIELD(*this, kKindOffset, Smi::FromInt(value));
+ TorqueGeneratedOddball::set_kind(Smi::FromInt(value));
}
// static
@@ -53,7 +40,7 @@ Handle<Object> Oddball::ToNumber(Isolate* isolate, Handle<Oddball> input) {
bool HeapObject::IsBoolean() const {
return IsOddball() &&
- ((Oddball::cast(*this)->kind() & Oddball::kNotBooleanMask) == 0);
+ ((Oddball::cast(*this).kind() & Oddball::kNotBooleanMask) == 0);
}
} // namespace internal
diff --git a/deps/v8/src/objects/oddball.h b/deps/v8/src/objects/oddball.h
index f608a76a2f..025f9379ba 100644
--- a/deps/v8/src/objects/oddball.h
+++ b/deps/v8/src/objects/oddball.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_ODDBALL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -15,45 +15,26 @@ namespace v8 {
namespace internal {
// The Oddball describes objects null, undefined, true, and false.
-class Oddball : public HeapObject {
+class Oddball : public TorqueGeneratedOddball<Oddball, HeapObject> {
public:
// [to_number_raw]: Cached raw to_number computed at startup.
- inline double to_number_raw() const;
- inline void set_to_number_raw(double value);
inline void set_to_number_raw_as_bits(uint64_t bits);
- // [to_string]: Cached to_string computed at startup.
- DECL_ACCESSORS(to_string, String)
-
- // [to_number]: Cached to_number computed at startup.
- DECL_ACCESSORS(to_number, Object)
-
- // [typeof]: Cached type_of computed at startup.
- DECL_ACCESSORS(type_of, String)
-
inline byte kind() const;
inline void set_kind(byte kind);
+ // Oddball has a custom verifier.
+ void OddballVerify(Isolate* isolate);
+
// ES6 section 7.1.3 ToNumber for Boolean, Null, Undefined.
V8_WARN_UNUSED_RESULT static inline Handle<Object> ToNumber(
Isolate* isolate, Handle<Oddball> input);
- DECL_CAST(Oddball)
-
- // Dispatched behavior.
- DECL_VERIFIER(Oddball)
-
// Initialize the fields.
static void Initialize(Isolate* isolate, Handle<Oddball> oddball,
const char* to_string, Handle<Object> to_number,
const char* type_of, byte kind);
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_ODDBALL_FIELDS)
- // TODO(v8:8989): [torque] Support marker constants.
- static const int kTaggedFieldsStartOffset = kToStringOffset;
- static const int kTaggedFieldsEndOffset = kKindOffset;
-
static const byte kFalse = 0;
static const byte kTrue = 1;
static const byte kNotBooleanMask = static_cast<byte>(~1);
@@ -68,14 +49,16 @@ class Oddball : public HeapObject {
static const byte kStaleRegister = 10;
static const byte kSelfReferenceMarker = 10;
- using BodyDescriptor = FixedBodyDescriptor<kTaggedFieldsStartOffset,
- kTaggedFieldsEndOffset, kSize>;
+ static_assert(kStartOfWeakFieldsOffset == kEndOfWeakFieldsOffset,
+ "Ensure BodyDescriptor does not need to handle weak fields.");
+ using BodyDescriptor = FixedBodyDescriptor<kStartOfStrongFieldsOffset,
+ kEndOfStrongFieldsOffset, kSize>;
STATIC_ASSERT(kKindOffset == Internals::kOddballKindOffset);
STATIC_ASSERT(kNull == Internals::kNullOddballKind);
STATIC_ASSERT(kUndefined == Internals::kUndefinedOddballKind);
- OBJECT_CONSTRUCTORS(Oddball, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(Oddball)
};
} // namespace internal
diff --git a/deps/v8/src/objects/ordered-hash-table-inl.h b/deps/v8/src/objects/ordered-hash-table-inl.h
index 1ab26ca8ab..0eaa7567e2 100644
--- a/deps/v8/src/objects/ordered-hash-table-inl.h
+++ b/deps/v8/src/objects/ordered-hash-table-inl.h
@@ -8,10 +8,10 @@
#include "src/objects/ordered-hash-table.h"
#include "src/heap/heap.h"
-#include "src/objects-inl.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/js-collection-iterator.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
// Has to be the last include (doesn't have include guards):
@@ -185,18 +185,18 @@ template <class Derived, class TableType>
Object OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
TableType table = TableType::cast(this->table());
int index = Smi::ToInt(this->index());
- Object key = table->KeyAt(index);
- DCHECK(!key->IsTheHole());
+ Object key = table.KeyAt(index);
+ DCHECK(!key.IsTheHole());
return key;
}
inline void SmallOrderedNameDictionary::SetHash(int hash) {
DCHECK(PropertyArray::HashField::is_valid(hash));
- WRITE_INT_FIELD(*this, PrefixOffset(), hash);
+ WriteField<int>(PrefixOffset(), hash);
}
inline int SmallOrderedNameDictionary::Hash() {
- int hash = READ_INT_FIELD(*this, PrefixOffset());
+ int hash = ReadField<int>(PrefixOffset());
DCHECK(PropertyArray::HashField::is_valid(hash));
return hash;
}
diff --git a/deps/v8/src/objects/ordered-hash-table.cc b/deps/v8/src/objects/ordered-hash-table.cc
index 0b52160805..3d628cc406 100644
--- a/deps/v8/src/objects/ordered-hash-table.cc
+++ b/deps/v8/src/objects/ordered-hash-table.cc
@@ -4,10 +4,10 @@
#include "src/objects/ordered-hash-table.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
namespace v8 {
@@ -86,10 +86,10 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Clear(
template <class Derived, int entrysize>
bool OrderedHashTable<Derived, entrysize>::HasKey(Isolate* isolate,
Derived table, Object key) {
- DCHECK_IMPLIES(entrysize == 1, table->IsOrderedHashSet());
- DCHECK_IMPLIES(entrysize == 2, table->IsOrderedHashMap());
+ DCHECK_IMPLIES(entrysize == 1, table.IsOrderedHashSet());
+ DCHECK_IMPLIES(entrysize == 2, table.IsOrderedHashMap());
DisallowHeapAllocation no_gc;
- int entry = table->FindEntry(isolate, key);
+ int entry = table.FindEntry(isolate, key);
return entry != kNotFound;
}
@@ -99,21 +99,21 @@ int OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
int entry;
// This special cases for Smi, so that we avoid the HandleScope
// creation below.
- if (key->IsSmi()) {
+ if (key.IsSmi()) {
uint32_t hash = ComputeUnseededHash(Smi::ToInt(key));
entry = HashToEntry(hash & Smi::kMaxValue);
} else {
HandleScope scope(isolate);
- Object hash = key->GetHash();
+ Object hash = key.GetHash();
// If the object does not have an identity hash, it was never used as a key
- if (hash->IsUndefined(isolate)) return kNotFound;
+ if (hash.IsUndefined(isolate)) return kNotFound;
entry = HashToEntry(Smi::ToInt(hash));
}
// Walk the chain in the bucket to find the key.
while (entry != kNotFound) {
Object candidate_key = KeyAt(entry);
- if (candidate_key->SameValueZero(key)) break;
+ if (candidate_key.SameValueZero(key)) break;
entry = NextChainEntry(entry);
}
@@ -123,13 +123,13 @@ int OrderedHashTable<Derived, entrysize>::FindEntry(Isolate* isolate,
Handle<OrderedHashSet> OrderedHashSet::Add(Isolate* isolate,
Handle<OrderedHashSet> table,
Handle<Object> key) {
- int hash = key->GetOrCreateHash(isolate)->value();
+ int hash = key->GetOrCreateHash(isolate).value();
int entry = table->HashToEntry(hash);
// Walk the chain of the bucket and try finding the key.
while (entry != kNotFound) {
Object candidate_key = table->KeyAt(entry);
// Do not add if we have the key already
- if (candidate_key->SameValueZero(*key)) return table;
+ if (candidate_key.SameValueZero(*key)) return table;
entry = table->NextChainEntry(entry);
}
@@ -164,12 +164,12 @@ Handle<FixedArray> OrderedHashSet::ConvertToKeysArray(
Object key = table->get(index);
if (convert == GetKeysConversion::kConvertToString) {
uint32_t index_value;
- if (key->ToArrayIndex(&index_value)) {
+ if (key.ToArrayIndex(&index_value)) {
// Avoid trashing the Number2String cache if indices get very large.
bool use_cache = i < kMaxStringTableEntries;
key = *isolate->factory()->Uint32ToString(index_value, use_cache);
} else {
- CHECK(key->IsName());
+ CHECK(key.IsName());
}
}
result->set(i, key);
@@ -203,12 +203,12 @@ Handle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
DisallowHeapAllocation no_gc;
for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
Object key = table->KeyAt(old_entry);
- if (key->IsTheHole(isolate)) {
+ if (key.IsTheHole(isolate)) {
table->SetRemovedIndexAt(removed_holes_index++, old_entry);
continue;
}
- Object hash = key->GetHash();
+ Object hash = key.GetHash();
int bucket = Smi::ToInt(hash) & (new_buckets - 1);
Object chain_entry = new_table->get(HashTableStartIndex() + bucket);
new_table->set(HashTableStartIndex() + bucket, Smi::FromInt(new_entry));
@@ -257,20 +257,20 @@ template <class Derived, int entrysize>
bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
Derived table, Object key) {
DisallowHeapAllocation no_gc;
- int entry = table->FindEntry(isolate, key);
+ int entry = table.FindEntry(isolate, key);
if (entry == kNotFound) return false;
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
- int index = table->EntryToIndex(entry);
+ int nof = table.NumberOfElements();
+ int nod = table.NumberOfDeletedElements();
+ int index = table.EntryToIndex(entry);
Object hole = ReadOnlyRoots(isolate).the_hole_value();
for (int i = 0; i < entrysize; ++i) {
- table->set(index + i, hole);
+ table.set(index + i, hole);
}
- table->SetNumberOfElements(nof - 1);
- table->SetNumberOfDeletedElements(nod + 1);
+ table.SetNumberOfElements(nof - 1);
+ table.SetNumberOfDeletedElements(nod + 1);
return true;
}
@@ -278,11 +278,11 @@ bool OrderedHashTable<Derived, entrysize>::Delete(Isolate* isolate,
Address OrderedHashMap::GetHash(Isolate* isolate, Address raw_key) {
DisallowHeapAllocation no_gc;
Object key(raw_key);
- Object hash = key->GetHash();
+ Object hash = key.GetHash();
// If the object does not have an identity hash, it was never used as a key
- if (hash->IsUndefined(isolate)) return Smi::FromInt(-1).ptr();
- DCHECK(hash->IsSmi());
- DCHECK_GE(Smi::cast(hash)->value(), 0);
+ if (hash.IsUndefined(isolate)) return Smi::FromInt(-1).ptr();
+ DCHECK(hash.IsSmi());
+ DCHECK_GE(Smi::cast(hash).value(), 0);
return hash.ptr();
}
@@ -290,7 +290,7 @@ Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
Handle<OrderedHashMap> table,
Handle<Object> key,
Handle<Object> value) {
- int hash = key->GetOrCreateHash(isolate)->value();
+ int hash = key->GetOrCreateHash(isolate).value();
int entry = table->HashToEntry(hash);
// Walk the chain of the bucket and try finding the key.
{
@@ -299,7 +299,7 @@ Handle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
while (entry != kNotFound) {
Object candidate_key = table->KeyAt(entry);
// Do not add if we have the key already
- if (candidate_key->SameValueZero(raw_key)) return table;
+ if (candidate_key.SameValueZero(raw_key)) return table;
entry = table->NextChainEntry(entry);
}
}
@@ -326,14 +326,14 @@ V8_EXPORT_PRIVATE int OrderedHashTable<OrderedNameDictionary, 3>::FindEntry(
Isolate* isolate, Object key) {
DisallowHeapAllocation no_gc;
- DCHECK(key->IsUniqueName());
+ DCHECK(key.IsUniqueName());
Name raw_key = Name::cast(key);
- int entry = HashToEntry(raw_key->Hash());
+ int entry = HashToEntry(raw_key.Hash());
while (entry != kNotFound) {
Object candidate_key = KeyAt(entry);
- DCHECK(candidate_key->IsTheHole() ||
- Name::cast(candidate_key)->IsUniqueName());
+ DCHECK(candidate_key.IsTheHole() ||
+ Name::cast(candidate_key).IsUniqueName());
if (candidate_key == raw_key) return entry;
// TODO(gsathya): This is loading the bucket count from the hash
@@ -377,7 +377,7 @@ Handle<OrderedNameDictionary> OrderedNameDictionary::Add(
void OrderedNameDictionary::SetEntry(Isolate* isolate, int entry, Object key,
Object value, PropertyDetails details) {
DisallowHeapAllocation gc;
- DCHECK_IMPLIES(!key->IsName(), key->IsTheHole(isolate));
+ DCHECK_IMPLIES(!key.IsName(), key.IsTheHole(isolate));
DisallowHeapAllocation no_gc;
int index = EntryToIndex(entry);
this->set(index, key);
@@ -554,7 +554,7 @@ MaybeHandle<SmallOrderedHashSet> SmallOrderedHashSet::Add(
}
}
- int hash = key->GetOrCreateHash(isolate)->value();
+ int hash = key->GetOrCreateHash(isolate).value();
int nof = table->NumberOfElements();
// Read the existing bucket values.
@@ -597,7 +597,7 @@ MaybeHandle<SmallOrderedHashMap> SmallOrderedHashMap::Add(
}
}
- int hash = key->GetOrCreateHash(isolate)->value();
+ int hash = key->GetOrCreateHash(isolate).value();
int nof = table->NumberOfElements();
// Read the existing bucket values.
@@ -633,10 +633,10 @@ int V8_EXPORT_PRIVATE
SmallOrderedHashTable<SmallOrderedNameDictionary>::FindEntry(Isolate* isolate,
Object key) {
DisallowHeapAllocation no_gc;
- DCHECK(key->IsUniqueName());
+ DCHECK(key.IsUniqueName());
Name raw_key = Name::cast(key);
- int entry = HashToFirstEntry(raw_key->Hash());
+ int entry = HashToFirstEntry(raw_key.Hash());
// Walk the chain in the bucket to find the key.
while (entry != kNotFound) {
@@ -692,7 +692,7 @@ MaybeHandle<SmallOrderedNameDictionary> SmallOrderedNameDictionary::Add(
void SmallOrderedNameDictionary::SetEntry(Isolate* isolate, int entry,
Object key, Object value,
PropertyDetails details) {
- DCHECK_IMPLIES(!key->IsName(), key->IsTheHole(isolate));
+ DCHECK_IMPLIES(!key.IsName(), key.IsTheHole(isolate));
SetDataEntry(entry, SmallOrderedNameDictionary::kValueIndex, value);
SetDataEntry(entry, SmallOrderedNameDictionary::kKeyIndex, key);
@@ -713,19 +713,19 @@ template <class Derived>
bool SmallOrderedHashTable<Derived>::Delete(Isolate* isolate, Derived table,
Object key) {
DisallowHeapAllocation no_gc;
- int entry = table->FindEntry(isolate, key);
+ int entry = table.FindEntry(isolate, key);
if (entry == kNotFound) return false;
- int nof = table->NumberOfElements();
- int nod = table->NumberOfDeletedElements();
+ int nof = table.NumberOfElements();
+ int nod = table.NumberOfDeletedElements();
Object hole = ReadOnlyRoots(isolate).the_hole_value();
for (int j = 0; j < Derived::kEntrySize; j++) {
- table->SetDataEntry(entry, j, hole);
+ table.SetDataEntry(entry, j, hole);
}
- table->SetNumberOfElements(nof - 1);
- table->SetNumberOfDeletedElements(nod + 1);
+ table.SetNumberOfElements(nof - 1);
+ table.SetNumberOfDeletedElements(nod + 1);
return true;
}
@@ -765,9 +765,9 @@ Handle<Derived> SmallOrderedHashTable<Derived>::Rehash(Isolate* isolate,
DisallowHeapAllocation no_gc;
for (int old_entry = 0; old_entry < (nof + nod); ++old_entry) {
Object key = table->KeyAt(old_entry);
- if (key->IsTheHole(isolate)) continue;
+ if (key.IsTheHole(isolate)) continue;
- int hash = Smi::ToInt(key->GetHash());
+ int hash = Smi::ToInt(key.GetHash());
int bucket = new_table->HashToBucket(hash);
int chain = new_table->GetFirstEntry(bucket);
@@ -848,15 +848,15 @@ MaybeHandle<Derived> SmallOrderedHashTable<Derived>::Grow(
template <class Derived>
int SmallOrderedHashTable<Derived>::FindEntry(Isolate* isolate, Object key) {
DisallowHeapAllocation no_gc;
- Object hash = key->GetHash();
+ Object hash = key.GetHash();
- if (hash->IsUndefined(isolate)) return kNotFound;
+ if (hash.IsUndefined(isolate)) return kNotFound;
int entry = HashToFirstEntry(Smi::ToInt(hash));
// Walk the chain in the bucket to find the key.
while (entry != kNotFound) {
Object candidate_key = KeyAt(entry);
- if (candidate_key->SameValueZero(key)) return entry;
+ if (candidate_key.SameValueZero(key)) return entry;
entry = GetNextEntry(entry);
}
return kNotFound;
@@ -930,7 +930,6 @@ OrderedHashTableHandler<SmallOrderedNameDictionary,
OrderedNameDictionary>::Allocate(Isolate* isolate,
int capacity);
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
Handle<HeapObject> table, Handle<Object> key) {
@@ -943,9 +942,7 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::Delete(
// down to a smaller hash table.
return LargeTable::Delete(Handle<LargeTable>::cast(table), key);
}
-#endif
-#if !defined(V8_OS_WIN)
template <class SmallTable, class LargeTable>
bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
Isolate* isolate, Handle<HeapObject> table, Handle<Object> key) {
@@ -956,7 +953,6 @@ bool OrderedHashTableHandler<SmallTable, LargeTable>::HasKey(
DCHECK(LargeTable::Is(table));
return LargeTable::HasKey(isolate, LargeTable::cast(*table), *key);
}
-#endif
template bool
OrderedHashTableHandler<SmallOrderedHashSet, OrderedHashSet>::HasKey(
@@ -1096,114 +1092,112 @@ void OrderedNameDictionaryHandler::SetEntry(Isolate* isolate, HeapObject table,
int entry, Object key, Object value,
PropertyDetails details) {
DisallowHeapAllocation no_gc;
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->SetEntry(
- isolate, entry, key, value, details);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).SetEntry(isolate, entry, key,
+ value, details);
}
- DCHECK(table->IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table)->SetEntry(isolate, entry, key,
- value, details);
+ DCHECK(table.IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table).SetEntry(isolate, entry, key, value,
+ details);
}
int OrderedNameDictionaryHandler::FindEntry(Isolate* isolate, HeapObject table,
Name key) {
DisallowHeapAllocation no_gc;
- if (table->IsSmallOrderedNameDictionary()) {
- int entry =
- SmallOrderedNameDictionary::cast(table)->FindEntry(isolate, key);
+ if (table.IsSmallOrderedNameDictionary()) {
+ int entry = SmallOrderedNameDictionary::cast(table).FindEntry(isolate, key);
return entry == SmallOrderedNameDictionary::kNotFound
? OrderedNameDictionaryHandler::kNotFound
: entry;
}
- DCHECK(table->IsOrderedNameDictionary());
- int entry = OrderedNameDictionary::cast(table)->FindEntry(isolate, key);
+ DCHECK(table.IsOrderedNameDictionary());
+ int entry = OrderedNameDictionary::cast(table).FindEntry(isolate, key);
return entry == OrderedNameDictionary::kNotFound
? OrderedNameDictionaryHandler::kNotFound
: entry;
}
Object OrderedNameDictionaryHandler::ValueAt(HeapObject table, int entry) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->ValueAt(entry);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).ValueAt(entry);
}
- DCHECK(table->IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table)->ValueAt(entry);
+ DCHECK(table.IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table).ValueAt(entry);
}
void OrderedNameDictionaryHandler::ValueAtPut(HeapObject table, int entry,
Object value) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->ValueAtPut(entry, value);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).ValueAtPut(entry, value);
}
- DCHECK(table->IsOrderedNameDictionary());
- OrderedNameDictionary::cast(table)->ValueAtPut(entry, value);
+ DCHECK(table.IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(table).ValueAtPut(entry, value);
}
PropertyDetails OrderedNameDictionaryHandler::DetailsAt(HeapObject table,
int entry) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->DetailsAt(entry);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).DetailsAt(entry);
}
- DCHECK(table->IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table)->DetailsAt(entry);
+ DCHECK(table.IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table).DetailsAt(entry);
}
void OrderedNameDictionaryHandler::DetailsAtPut(HeapObject table, int entry,
PropertyDetails details) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->DetailsAtPut(entry,
- details);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).DetailsAtPut(entry, details);
}
- DCHECK(table->IsOrderedNameDictionary());
- OrderedNameDictionary::cast(table)->DetailsAtPut(entry, details);
+ DCHECK(table.IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(table).DetailsAtPut(entry, details);
}
int OrderedNameDictionaryHandler::Hash(HeapObject table) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->Hash();
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).Hash();
}
- DCHECK(table->IsOrderedNameDictionary());
- return OrderedNameDictionary::cast(table)->Hash();
+ DCHECK(table.IsOrderedNameDictionary());
+ return OrderedNameDictionary::cast(table).Hash();
}
void OrderedNameDictionaryHandler::SetHash(HeapObject table, int hash) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->SetHash(hash);
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).SetHash(hash);
}
- DCHECK(table->IsOrderedNameDictionary());
- OrderedNameDictionary::cast(table)->SetHash(hash);
+ DCHECK(table.IsOrderedNameDictionary());
+ OrderedNameDictionary::cast(table).SetHash(hash);
}
Name OrderedNameDictionaryHandler::KeyAt(HeapObject table, int entry) {
- if (table->IsSmallOrderedNameDictionary()) {
- return Name::cast(SmallOrderedNameDictionary::cast(table)->KeyAt(entry));
+ if (table.IsSmallOrderedNameDictionary()) {
+ return Name::cast(SmallOrderedNameDictionary::cast(table).KeyAt(entry));
}
- return Name::cast(OrderedNameDictionary::cast(table)->KeyAt(entry));
+ return Name::cast(OrderedNameDictionary::cast(table).KeyAt(entry));
}
int OrderedNameDictionaryHandler::NumberOfElements(HeapObject table) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->NumberOfElements();
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).NumberOfElements();
}
- return OrderedNameDictionary::cast(table)->NumberOfElements();
+ return OrderedNameDictionary::cast(table).NumberOfElements();
}
int OrderedNameDictionaryHandler::Capacity(HeapObject table) {
- if (table->IsSmallOrderedNameDictionary()) {
- return SmallOrderedNameDictionary::cast(table)->Capacity();
+ if (table.IsSmallOrderedNameDictionary()) {
+ return SmallOrderedNameDictionary::cast(table).Capacity();
}
- return OrderedNameDictionary::cast(table)->Capacity();
+ return OrderedNameDictionary::cast(table).Capacity();
}
Handle<HeapObject> OrderedNameDictionaryHandler::Shrink(
@@ -1237,21 +1231,21 @@ template <class Derived, class TableType>
void OrderedHashTableIterator<Derived, TableType>::Transition() {
DisallowHeapAllocation no_allocation;
TableType table = TableType::cast(this->table());
- if (!table->IsObsolete()) return;
+ if (!table.IsObsolete()) return;
int index = Smi::ToInt(this->index());
- while (table->IsObsolete()) {
- TableType next_table = table->NextTable();
+ while (table.IsObsolete()) {
+ TableType next_table = table.NextTable();
if (index > 0) {
- int nod = table->NumberOfDeletedElements();
+ int nod = table.NumberOfDeletedElements();
if (nod == TableType::kClearedTableSentinel) {
index = 0;
} else {
int old_index = index;
for (int i = 0; i < nod; ++i) {
- int removed_index = table->RemovedIndexAt(i);
+ int removed_index = table.RemovedIndexAt(i);
if (removed_index >= old_index) break;
--index;
}
@@ -1274,9 +1268,9 @@ bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
TableType table = TableType::cast(this->table());
int index = Smi::ToInt(this->index());
- int used_capacity = table->UsedCapacity();
+ int used_capacity = table.UsedCapacity();
- while (index < used_capacity && table->KeyAt(index)->IsTheHole(ro_roots)) {
+ while (index < used_capacity && table.KeyAt(index).IsTheHole(ro_roots)) {
index++;
}
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 6afbb6b662..a83109ed90 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -6,11 +6,11 @@
#define V8_OBJECTS_ORDERED_HASH_TABLE_H_
#include "src/base/export-template.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/smi.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -126,7 +126,7 @@ class OrderedHashTable : public FixedArray {
return get(EntryToIndex(entry));
}
- bool IsObsolete() { return !get(NextTableIndex())->IsSmi(); }
+ bool IsObsolete() { return !get(NextTableIndex()).IsSmi(); }
// The next newer table. This is only valid if the table is obsolete.
Derived NextTable() { return Derived::cast(get(NextTableIndex())); }
@@ -540,13 +540,13 @@ class SmallOrderedHashTable : public HeapObject {
byte getByte(Offset offset, ByteIndex index) const {
DCHECK(offset < DataTableStartOffset() ||
offset >= GetBucketsStartOffset());
- return READ_BYTE_FIELD(*this, offset + (index * kOneByteSize));
+ return ReadField<byte>(offset + (index * kOneByteSize));
}
void setByte(Offset offset, ByteIndex index, byte value) {
DCHECK(offset < DataTableStartOffset() ||
offset >= GetBucketsStartOffset());
- WRITE_BYTE_FIELD(*this, offset + (index * kOneByteSize), value);
+ WriteField<byte>(offset + (index * kOneByteSize), value);
}
Offset GetDataEntryOffset(int entry, int relative_index) const {
diff --git a/deps/v8/src/objects/promise.h b/deps/v8/src/objects/promise.h
index 0b15546d03..f7c60413d1 100644
--- a/deps/v8/src/objects/promise.h
+++ b/deps/v8/src/objects/promise.h
@@ -39,7 +39,7 @@ class PromiseReactionJobTask : public Microtask {
// Dispatched behavior.
DECL_CAST(PromiseReactionJobTask)
DECL_VERIFIER(PromiseReactionJobTask)
-
+ static const int kSizeOfAllPromiseReactionJobTasks = kHeaderSize;
OBJECT_CONSTRUCTORS(PromiseReactionJobTask, Microtask);
};
@@ -51,6 +51,11 @@ class PromiseFulfillReactionJobTask : public PromiseReactionJobTask {
DECL_PRINTER(PromiseFulfillReactionJobTask)
DECL_VERIFIER(PromiseFulfillReactionJobTask)
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ PromiseReactionJobTask::kHeaderSize,
+ TORQUE_GENERATED_PROMISE_FULFILL_REACTION_JOB_TASK_FIELDS)
+ STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
+
OBJECT_CONSTRUCTORS(PromiseFulfillReactionJobTask, PromiseReactionJobTask);
};
@@ -62,6 +67,11 @@ class PromiseRejectReactionJobTask : public PromiseReactionJobTask {
DECL_PRINTER(PromiseRejectReactionJobTask)
DECL_VERIFIER(PromiseRejectReactionJobTask)
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ PromiseReactionJobTask::kHeaderSize,
+ TORQUE_GENERATED_PROMISE_REJECT_REACTION_JOB_TASK_FIELDS)
+ STATIC_ASSERT(kSize == kSizeOfAllPromiseReactionJobTasks);
+
OBJECT_CONSTRUCTORS(PromiseRejectReactionJobTask, PromiseReactionJobTask);
};
diff --git a/deps/v8/src/objects/property-array-inl.h b/deps/v8/src/objects/property-array-inl.h
index fa3f4ccde2..f23e63e50d 100644
--- a/deps/v8/src/objects/property-array-inl.h
+++ b/deps/v8/src/objects/property-array-inl.h
@@ -8,8 +8,8 @@
#include "src/objects/property-array.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -77,6 +77,17 @@ void PropertyArray::SetHash(int hash) {
WRITE_FIELD(*this, kLengthAndHashOffset, Smi::FromInt(value));
}
+void PropertyArray::CopyElements(Isolate* isolate, int dst_index,
+ PropertyArray src, int src_index, int len,
+ WriteBarrierMode mode) {
+ if (len == 0) return;
+ DisallowHeapAllocation no_gc;
+
+ ObjectSlot dst_slot(data_start() + dst_index);
+ ObjectSlot src_slot(src.data_start() + src_index);
+ isolate->heap()->CopyRange(*this, dst_slot, src_slot, len, mode);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/property-array.h b/deps/v8/src/objects/property-array.h
index 1112de4ae6..0c8b40ece2 100644
--- a/deps/v8/src/objects/property-array.h
+++ b/deps/v8/src/objects/property-array.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_PROPERTY_ARRAY_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -35,6 +35,10 @@ class PropertyArray : public HeapObject {
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
+ // Signature must be in sync with FixedArray::CopyElements().
+ inline void CopyElements(Isolate* isolate, int dst_index, PropertyArray src,
+ int src_index, int len, WriteBarrierMode mode);
+
// Gives access to raw memory which stores the array's data.
inline ObjectSlot data_start();
diff --git a/deps/v8/src/objects/property-cell.h b/deps/v8/src/objects/property-cell.h
index 7bdcfb8e49..75a5132728 100644
--- a/deps/v8/src/objects/property-cell.h
+++ b/deps/v8/src/objects/property-cell.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_PROPERTY_CELL_H_
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/property-descriptor-object-inl.h b/deps/v8/src/objects/property-descriptor-object-inl.h
index 66ca48164f..7754de5964 100644
--- a/deps/v8/src/objects/property-descriptor-object-inl.h
+++ b/deps/v8/src/objects/property-descriptor-object-inl.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_INL_H_
#define V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_INL_H_
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/property-descriptor-object.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/property-descriptor-object.h b/deps/v8/src/objects/property-descriptor-object.h
index 7c90369be7..f4930c4a31 100644
--- a/deps/v8/src/objects/property-descriptor-object.h
+++ b/deps/v8/src/objects/property-descriptor-object.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
#define V8_OBJECTS_PROPERTY_DESCRIPTOR_OBJECT_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/objects/property-descriptor.cc
index 9feab5d4bf..b3b05deceb 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/objects/property-descriptor.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/property-descriptor.h"
+#include "src/objects/property-descriptor.h"
-#include "src/bootstrapper.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
-#include "src/lookup.h"
-#include "src/objects-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/property-descriptor-object-inl.h"
namespace v8 {
@@ -36,7 +36,6 @@ bool GetPropertyIfPresent(Handle<JSReceiver> receiver, Handle<String> name,
return true;
}
-
// Helper function for ToPropertyDescriptor. Handles the case of "simple"
// objects: nothing on the prototype chain, just own fast data properties.
// Must not have observable side effects, because the slow path will restart
@@ -45,21 +44,21 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
PropertyDescriptor* desc) {
if (!obj->IsJSObject()) return false;
Map map = Handle<JSObject>::cast(obj)->map();
- if (map->instance_type() != JS_OBJECT_TYPE) return false;
- if (map->is_access_check_needed()) return false;
- if (map->prototype() != *isolate->initial_object_prototype()) return false;
+ if (map.instance_type() != JS_OBJECT_TYPE) return false;
+ if (map.is_access_check_needed()) return false;
+ if (map.prototype() != *isolate->initial_object_prototype()) return false;
// During bootstrapping, the object_function_prototype_map hasn't been
// set up yet.
if (isolate->bootstrapper()->IsActive()) return false;
- if (JSObject::cast(map->prototype())->map() !=
+ if (JSObject::cast(map.prototype()).map() !=
isolate->native_context()->object_function_prototype_map()) {
return false;
}
// TODO(jkummerow): support dictionary properties?
- if (map->is_dictionary_map()) return false;
+ if (map.is_dictionary_map()) return false;
Handle<DescriptorArray> descs =
- Handle<DescriptorArray>(map->instance_descriptors(), isolate);
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ Handle<DescriptorArray>(map.instance_descriptors(), isolate);
+ for (int i = 0; i < map.NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
Name key = descs->GetKey(i);
Handle<Object> value;
@@ -120,7 +119,6 @@ void CreateDataProperty(Handle<JSObject> object, Handle<String> name,
} // namespace
-
// ES6 6.2.4.4 "FromPropertyDescriptor"
Handle<Object> PropertyDescriptor::ToObject(Isolate* isolate) {
DCHECK(!(PropertyDescriptor::IsAccessorDescriptor(this) &&
@@ -181,7 +179,6 @@ Handle<Object> PropertyDescriptor::ToObject(Isolate* isolate) {
return result;
}
-
// ES6 6.2.4.5
// Returns false in case of exception.
// static
@@ -298,7 +295,6 @@ bool PropertyDescriptor::ToPropertyDescriptor(Isolate* isolate,
return true;
}
-
// ES6 6.2.4.6
// static
void PropertyDescriptor::CompletePropertyDescriptor(Isolate* isolate,
diff --git a/deps/v8/src/property-descriptor.h b/deps/v8/src/objects/property-descriptor.h
index f5f5f7ad4a..22fb1d6ff8 100644
--- a/deps/v8/src/property-descriptor.h
+++ b/deps/v8/src/objects/property-descriptor.h
@@ -2,13 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROPERTY_DESCRIPTOR_H_
-#define V8_PROPERTY_DESCRIPTOR_H_
-
-
-#include "src/handles.h"
-#include "src/property-details.h"
+#ifndef V8_OBJECTS_PROPERTY_DESCRIPTOR_H_
+#define V8_OBJECTS_PROPERTY_DESCRIPTOR_H_
+#include "src/handles/handles.h"
+#include "src/objects/property-details.h"
namespace v8 {
namespace internal {
@@ -133,4 +131,4 @@ class PropertyDescriptor {
} // namespace internal
} // namespace v8
-#endif // V8_PROPERTY_DESCRIPTOR_H_
+#endif // V8_OBJECTS_PROPERTY_DESCRIPTOR_H_
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/objects/property-details.h
index d366b2b8bc..7836575edf 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROPERTY_DETAILS_H_
-#define V8_PROPERTY_DETAILS_H_
+#ifndef V8_OBJECTS_PROPERTY_DETAILS_H_
+#define V8_OBJECTS_PROPERTY_DETAILS_H_
#include "include/v8.h"
-#include "src/allocation.h"
-// TODO(ishell): remove once FLAG_track_constant_fields is removed.
-#include "src/flags.h"
-#include "src/utils.h"
+#include "src/utils/allocation.h"
+// TODO(bmeurer): Remove once FLAG_modify_field_representation_inplace is gone.
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -78,23 +78,11 @@ enum PropertyLocation { kField = 0, kDescriptor = 1 };
// Must fit in the BitField PropertyDetails::ConstnessField.
enum class PropertyConstness { kMutable = 0, kConst = 1 };
-// TODO(ishell): remove once constant field tracking is done.
-const PropertyConstness kDefaultFieldConstness =
- FLAG_track_constant_fields ? PropertyConstness::kConst
- : PropertyConstness::kMutable;
-
class Representation {
public:
- enum Kind {
- kNone,
- kSmi,
- kDouble,
- kHeapObject,
- kTagged,
- kNumRepresentations
- };
+ enum Kind { kNone, kSmi, kDouble, kHeapObject, kTagged, kNumRepresentations };
- Representation() : kind_(kNone) { }
+ Representation() : kind_(kNone) {}
static Representation None() { return Representation(kNone); }
static Representation Tagged() { return Representation(kTagged); }
@@ -117,7 +105,12 @@ class Representation {
}
bool CanBeInPlaceChangedTo(const Representation& other) const {
- if (IsNone()) return true;
+ // If it's just a representation generalization case (i.e. property kind and
+ // attributes stays unchanged) it's fine to transition from None to anything
+ // but double without any modification to the object, because the default
+ // uninitialized value for representation None can be overwritten by both
+ // smi and tagged values. Doubles, however, would require a box allocation.
+ if (IsNone()) return !other.IsDouble();
if (!FLAG_modify_field_representation_inplace) return false;
return (IsSmi() || IsHeapObject()) && other.IsTagged();
}
@@ -169,7 +162,7 @@ class Representation {
}
private:
- explicit Representation(Kind k) : kind_(k) { }
+ explicit Representation(Kind k) : kind_(k) {}
// Make sure kind fits in int8.
STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
@@ -177,7 +170,6 @@ class Representation {
int8_t kind_;
};
-
static const int kDescriptorIndexBitCount = 10;
static const int kFirstInobjectPropertyOffsetBitCount = 7;
// The maximum number of descriptors we want in a descriptor array. It should
@@ -208,7 +200,6 @@ enum class PropertyCellConstantType {
kStableMap,
};
-
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails {
@@ -378,8 +369,8 @@ class PropertyDetails {
value_ = DescriptorPointer::update(value, pointer);
}
PropertyDetails(int value, Representation representation) {
- value_ = RepresentationField::update(
- value, EncodeRepresentation(representation));
+ value_ = RepresentationField::update(value,
+ EncodeRepresentation(representation));
}
PropertyDetails(int value, PropertyConstness constness) {
value_ = ConstnessField::update(value, constness);
@@ -410,7 +401,9 @@ inline PropertyConstness GeneralizeConstness(PropertyConstness a,
V8_EXPORT_PRIVATE std::ostream& operator<<(
std::ostream& os, const PropertyAttributes& attributes);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ PropertyConstness constness);
} // namespace internal
} // namespace v8
-#endif // V8_PROPERTY_DETAILS_H_
+#endif // V8_OBJECTS_PROPERTY_DETAILS_H_
diff --git a/deps/v8/src/property.cc b/deps/v8/src/objects/property.cc
index 064f329fc0..c226c28a76 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/objects/property.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/property.h"
+#include "src/objects/property.h"
-#include "src/field-type.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/field-type.h"
#include "src/objects/name-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -24,6 +24,16 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
+std::ostream& operator<<(std::ostream& os, PropertyConstness constness) {
+ switch (constness) {
+ case PropertyConstness::kMutable:
+ return os << "mutable";
+ case PropertyConstness::kConst:
+ return os << "const";
+ }
+ UNREACHABLE();
+}
+
Descriptor::Descriptor() : details_(Smi::zero()) {}
Descriptor::Descriptor(Handle<Name> key, const MaybeObjectHandle& value,
@@ -73,16 +83,9 @@ Descriptor Descriptor::DataConstant(Handle<Name> key, Handle<Object> value,
Descriptor Descriptor::DataConstant(Isolate* isolate, Handle<Name> key,
int field_index, Handle<Object> value,
PropertyAttributes attributes) {
- if (FLAG_track_constant_fields) {
- MaybeObjectHandle any_type(FieldType::Any(), isolate);
- return DataField(key, field_index, attributes, PropertyConstness::kConst,
- Representation::Tagged(), any_type);
-
- } else {
- return Descriptor(key, MaybeObjectHandle(value), kData, attributes,
- kDescriptor, PropertyConstness::kConst,
- value->OptimalRepresentation(), field_index);
- }
+ MaybeObjectHandle any_type(FieldType::Any(), isolate);
+ return DataField(key, field_index, attributes, PropertyConstness::kConst,
+ Representation::Tagged(), any_type);
}
Descriptor Descriptor::AccessorConstant(Handle<Name> key,
diff --git a/deps/v8/src/property.h b/deps/v8/src/objects/property.h
index 4e3194e1a1..100b39e1f9 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/objects/property.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROPERTY_H_
-#define V8_PROPERTY_H_
+#ifndef V8_OBJECTS_PROPERTY_H_
+#define V8_OBJECTS_PROPERTY_H_
#include <iosfwd>
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/maybe-handles.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
#include "src/objects/name.h"
-#include "src/property-details.h"
+#include "src/objects/objects.h"
+#include "src/objects/property-details.h"
namespace v8 {
namespace internal {
@@ -72,4 +72,4 @@ class V8_EXPORT_PRIVATE Descriptor final {
} // namespace internal
} // namespace v8
-#endif // V8_PROPERTY_H_
+#endif // V8_OBJECTS_PROPERTY_H_
diff --git a/deps/v8/src/objects/prototype-info-inl.h b/deps/v8/src/objects/prototype-info-inl.h
index 56104ba4c1..b83bb1346a 100644
--- a/deps/v8/src/objects/prototype-info-inl.h
+++ b/deps/v8/src/objects/prototype-info-inl.h
@@ -8,10 +8,10 @@
#include "src/objects/prototype-info.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/struct-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -48,19 +48,19 @@ BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
void PrototypeUsers::MarkSlotEmpty(WeakArrayList array, int index) {
DCHECK_GT(index, 0);
- DCHECK_LT(index, array->length());
+ DCHECK_LT(index, array.length());
// Chain the empty slots into a linked list (each empty slot contains the
// index of the next empty slot).
- array->Set(index, MaybeObject::FromObject(empty_slot_index(array)));
+ array.Set(index, MaybeObject::FromObject(empty_slot_index(array)));
set_empty_slot_index(array, index);
}
Smi PrototypeUsers::empty_slot_index(WeakArrayList array) {
- return array->Get(kEmptySlotIndex).ToSmi();
+ return array.Get(kEmptySlotIndex).ToSmi();
}
void PrototypeUsers::set_empty_slot_index(WeakArrayList array, int index) {
- array->Set(kEmptySlotIndex, MaybeObject::FromObject(Smi::FromInt(index)));
+ array.Set(kEmptySlotIndex, MaybeObject::FromObject(Smi::FromInt(index)));
}
} // namespace internal
diff --git a/deps/v8/src/objects/prototype-info.h b/deps/v8/src/objects/prototype-info.h
index 4e6ba68cde..94d86d2e19 100644
--- a/deps/v8/src/objects/prototype-info.h
+++ b/deps/v8/src/objects/prototype-info.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_PROTOTYPE_INFO_H_
#define V8_OBJECTS_PROTOTYPE_INFO_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/prototype-inl.h b/deps/v8/src/objects/prototype-inl.h
index 52786ada78..5f7c3e23c5 100644
--- a/deps/v8/src/prototype-inl.h
+++ b/deps/v8/src/objects/prototype-inl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROTOTYPE_INL_H_
-#define V8_PROTOTYPE_INL_H_
+#ifndef V8_OBJECTS_PROTOTYPE_INL_H_
+#define V8_OBJECTS_PROTOTYPE_INL_H_
-#include "src/prototype.h"
+#include "src/objects/prototype.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/js-proxy.h"
#include "src/objects/map-inl.h"
@@ -41,29 +41,29 @@ PrototypeIterator::PrototypeIterator(Isolate* isolate, JSReceiver receiver,
PrototypeIterator::PrototypeIterator(Isolate* isolate, Map receiver_map,
WhereToEnd where_to_end)
: isolate_(isolate),
- object_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype()),
+ object_(receiver_map.GetPrototypeChainRootMap(isolate_).prototype()),
where_to_end_(where_to_end),
- is_at_end_(object_->IsNull(isolate_)),
+ is_at_end_(object_.IsNull(isolate_)),
seen_proxies_(0) {
if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
- DCHECK(object_->IsJSReceiver());
- Map map = JSReceiver::cast(object_)->map();
- is_at_end_ = !map->has_hidden_prototype();
+ DCHECK(object_.IsJSReceiver());
+ Map map = JSReceiver::cast(object_).map();
+ is_at_end_ = !map.has_hidden_prototype();
}
}
PrototypeIterator::PrototypeIterator(Isolate* isolate, Handle<Map> receiver_map,
WhereToEnd where_to_end)
: isolate_(isolate),
- handle_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype(),
+ handle_(receiver_map->GetPrototypeChainRootMap(isolate_).prototype(),
isolate_),
where_to_end_(where_to_end),
is_at_end_(handle_->IsNull(isolate_)),
seen_proxies_(0) {
if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
DCHECK(handle_->IsJSReceiver());
- Map map = JSReceiver::cast(*handle_)->map();
- is_at_end_ = !map->has_hidden_prototype();
+ Map map = JSReceiver::cast(*handle_).map();
+ is_at_end_ = !map.has_hidden_prototype();
}
}
@@ -79,7 +79,7 @@ bool PrototypeIterator::HasAccess() const {
}
void PrototypeIterator::Advance() {
- if (handle_.is_null() && object_->IsJSProxy()) {
+ if (handle_.is_null() && object_.IsJSProxy()) {
is_at_end_ = true;
object_ = ReadOnlyRoots(isolate_).null_value();
return;
@@ -93,11 +93,11 @@ void PrototypeIterator::Advance() {
void PrototypeIterator::AdvanceIgnoringProxies() {
Object object = handle_.is_null() ? object_ : *handle_;
- Map map = HeapObject::cast(object)->map();
+ Map map = HeapObject::cast(object).map();
- HeapObject prototype = map->prototype();
- is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN ? !map->has_hidden_prototype()
- : prototype->IsNull(isolate_);
+ HeapObject prototype = map.prototype();
+ is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN ? !map.has_hidden_prototype()
+ : prototype.IsNull(isolate_);
if (handle_.is_null()) {
object_ = prototype;
@@ -107,7 +107,7 @@ void PrototypeIterator::AdvanceIgnoringProxies() {
}
V8_WARN_UNUSED_RESULT bool PrototypeIterator::AdvanceFollowingProxies() {
- DCHECK(!(handle_.is_null() && object_->IsJSProxy()));
+ DCHECK(!(handle_.is_null() && object_.IsJSProxy()));
if (!HasAccess()) {
// Abort the lookup if we do not have access to the current object.
handle_ = isolate_->factory()->null_value();
@@ -141,4 +141,4 @@ PrototypeIterator::AdvanceFollowingProxiesIgnoringAccessChecks() {
} // namespace internal
} // namespace v8
-#endif // V8_PROTOTYPE_INL_H_
+#endif // V8_OBJECTS_PROTOTYPE_INL_H_
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/objects/prototype.h
index 69f63291ee..cd003837ca 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/objects/prototype.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROTOTYPE_H_
-#define V8_PROTOTYPE_H_
+#ifndef V8_OBJECTS_PROTOTYPE_H_
+#define V8_OBJECTS_PROTOTYPE_H_
-#include "src/isolate.h"
-#include "src/objects.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -82,9 +82,8 @@ class PrototypeIterator {
DISALLOW_COPY_AND_ASSIGN(PrototypeIterator);
};
-
} // namespace internal
} // namespace v8
-#endif // V8_PROTOTYPE_H_
+#endif // V8_OBJECTS_PROTOTYPE_H_
diff --git a/deps/v8/src/objects/regexp-match-info.h b/deps/v8/src/objects/regexp-match-info.h
index 32828e9591..859dc4a09a 100644
--- a/deps/v8/src/objects/regexp-match-info.h
+++ b/deps/v8/src/objects/regexp-match-info.h
@@ -6,8 +6,8 @@
#define V8_OBJECTS_REGEXP_MATCH_INFO_H_
#include "src/base/compiler-specific.h"
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
diff --git a/deps/v8/src/objects/scope-info.cc b/deps/v8/src/objects/scope-info.cc
index f3b3a15ab7..af45e86af3 100644
--- a/deps/v8/src/objects/scope-info.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -8,10 +8,10 @@
#include "src/ast/scopes.h"
#include "src/ast/variables.h"
-#include "src/bootstrapper.h"
+#include "src/init/bootstrapper.h"
-#include "src/objects-inl.h"
#include "src/objects/module-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -26,27 +26,27 @@ enum ModuleVariableEntryOffset {
#ifdef DEBUG
bool ScopeInfo::Equals(ScopeInfo other) const {
- if (length() != other->length()) return false;
+ if (length() != other.length()) return false;
for (int index = 0; index < length(); ++index) {
Object entry = get(index);
- Object other_entry = other->get(index);
- if (entry->IsSmi()) {
+ Object other_entry = other.get(index);
+ if (entry.IsSmi()) {
if (entry != other_entry) return false;
} else {
- if (HeapObject::cast(entry)->map()->instance_type() !=
- HeapObject::cast(other_entry)->map()->instance_type()) {
+ if (HeapObject::cast(entry).map().instance_type() !=
+ HeapObject::cast(other_entry).map().instance_type()) {
return false;
}
- if (entry->IsString()) {
- if (!String::cast(entry)->Equals(String::cast(other_entry))) {
+ if (entry.IsString()) {
+ if (!String::cast(entry).Equals(String::cast(other_entry))) {
return false;
}
- } else if (entry->IsScopeInfo()) {
- if (!ScopeInfo::cast(entry)->Equals(ScopeInfo::cast(other_entry))) {
+ } else if (entry.IsScopeInfo()) {
+ if (!ScopeInfo::cast(entry).Equals(ScopeInfo::cast(other_entry))) {
return false;
}
- } else if (entry->IsModuleInfo()) {
- if (!ModuleInfo::cast(entry)->Equals(ModuleInfo::cast(other_entry))) {
+ } else if (entry.IsModuleInfo()) {
+ if (!ModuleInfo::cast(entry).Equals(ModuleInfo::cast(other_entry))) {
return false;
}
} else {
@@ -134,6 +134,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
function_name_info = NONE;
}
+ const bool has_brand = scope->is_class_scope()
+ ? scope->AsClassScope()->brand() != nullptr
+ : false;
const bool has_function_name = function_name_info != NONE;
const bool has_position_info = NeedsPositionInfo(scope->scope_type());
const bool has_receiver = receiver_info == STACK || receiver_info == CONTEXT;
@@ -158,7 +161,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
{
DisallowHeapAllocation no_gc;
ScopeInfo scope_info = *scope_info_handle;
- WriteBarrierMode mode = scope_info->GetWriteBarrierMode(no_gc);
+ WriteBarrierMode mode = scope_info.GetWriteBarrierMode(no_gc);
bool has_simple_parameters = false;
bool is_asm_module = false;
@@ -181,6 +184,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
LanguageModeField::encode(scope->language_mode()) |
DeclarationScopeField::encode(scope->is_declaration_scope()) |
ReceiverVariableField::encode(receiver_info) |
+ HasClassBrandField::encode(has_brand) |
HasNewTargetField::encode(has_new_target) |
FunctionVariableField::encode(function_name_info) |
HasInferredFunctionNameField::encode(has_inferred_function_name) |
@@ -191,16 +195,16 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
IsDebugEvaluateScopeField::encode(scope->is_debug_evaluate_scope()) |
ForceContextAllocationField::encode(
scope->ForceContextForLanguageMode());
- scope_info->SetFlags(flags);
+ scope_info.SetFlags(flags);
- scope_info->SetParameterCount(parameter_count);
- scope_info->SetContextLocalCount(context_local_count);
+ scope_info.SetParameterCount(parameter_count);
+ scope_info.SetContextLocalCount(context_local_count);
// Add context locals' names and info, module variables' names and info.
// Context locals are added using their index.
int context_local_base = index;
int context_local_info_base = context_local_base + context_local_count;
- int module_var_entry = scope_info->ModuleVariablesIndex();
+ int module_var_entry = scope_info.ModuleVariablesIndex();
for (Variable* var : *scope->locals()) {
switch (var->location()) {
@@ -215,23 +219,23 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
- scope_info->set(context_local_base + local_index, *var->name(), mode);
- scope_info->set(context_local_info_base + local_index,
- Smi::FromInt(info));
+ scope_info.set(context_local_base + local_index, *var->name(), mode);
+ scope_info.set(context_local_info_base + local_index,
+ Smi::FromInt(info));
break;
}
case VariableLocation::MODULE: {
- scope_info->set(module_var_entry + kModuleVariableNameOffset,
- *var->name(), mode);
- scope_info->set(module_var_entry + kModuleVariableIndexOffset,
- Smi::FromInt(var->index()));
+ scope_info.set(module_var_entry + kModuleVariableNameOffset,
+ *var->name(), mode);
+ scope_info.set(module_var_entry + kModuleVariableIndexOffset,
+ Smi::FromInt(var->index()));
uint32_t properties =
VariableModeField::encode(var->mode()) |
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
- scope_info->set(module_var_entry + kModuleVariablePropertiesOffset,
- Smi::FromInt(properties));
+ scope_info.set(module_var_entry + kModuleVariablePropertiesOffset,
+ Smi::FromInt(properties));
module_var_entry += kModuleVariableEntryLength;
break;
}
@@ -253,9 +257,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
if (parameter->location() != VariableLocation::CONTEXT) continue;
int index = parameter->index() - Context::MIN_CONTEXT_SLOTS;
int info_index = context_local_info_base + index;
- int info = Smi::ToInt(scope_info->get(info_index));
+ int info = Smi::ToInt(scope_info.get(info_index));
info = ParameterNumberField::update(info, i);
- scope_info->set(info_index, Smi::FromInt(info));
+ scope_info.set(info_index, Smi::FromInt(info));
}
// TODO(verwaest): Remove this unnecessary entry.
@@ -268,9 +272,9 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
InitFlagField::encode(var->initialization_flag()) |
MaybeAssignedFlagField::encode(var->maybe_assigned()) |
ParameterNumberField::encode(ParameterNumberField::kMax);
- scope_info->set(context_local_base + local_index, *var->name(), mode);
- scope_info->set(context_local_info_base + local_index,
- Smi::FromInt(info));
+ scope_info.set(context_local_base + local_index, *var->name(), mode);
+ scope_info.set(context_local_info_base + local_index,
+ Smi::FromInt(info));
}
}
}
@@ -278,16 +282,16 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
index += 2 * context_local_count;
// If the receiver is allocated, add its index.
- DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
+ DCHECK_EQ(index, scope_info.ReceiverInfoIndex());
if (has_receiver) {
int var_index = scope->AsDeclarationScope()->receiver()->index();
- scope_info->set(index++, Smi::FromInt(var_index));
+ scope_info.set(index++, Smi::FromInt(var_index));
// ?? DCHECK(receiver_info != CONTEXT || var_index ==
// scope_info->ContextLength() - 1);
}
// If present, add the function variable name and its index.
- DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
+ DCHECK_EQ(index, scope_info.FunctionNameInfoIndex());
if (has_function_name) {
Variable* var = scope->AsDeclarationScope()->function_var();
int var_index = -1;
@@ -296,28 +300,28 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
var_index = var->index();
name = *var->name();
}
- scope_info->set(index++, name, mode);
- scope_info->set(index++, Smi::FromInt(var_index));
+ scope_info.set(index++, name, mode);
+ scope_info.set(index++, Smi::FromInt(var_index));
DCHECK(function_name_info != CONTEXT ||
- var_index == scope_info->ContextLength() - 1);
+ var_index == scope_info.ContextLength() - 1);
}
- DCHECK_EQ(index, scope_info->InferredFunctionNameIndex());
+ DCHECK_EQ(index, scope_info.InferredFunctionNameIndex());
if (has_inferred_function_name) {
// The inferred function name is taken from the SFI.
index++;
}
- DCHECK_EQ(index, scope_info->PositionInfoIndex());
+ DCHECK_EQ(index, scope_info.PositionInfoIndex());
if (has_position_info) {
- scope_info->set(index++, Smi::FromInt(scope->start_position()));
- scope_info->set(index++, Smi::FromInt(scope->end_position()));
+ scope_info.set(index++, Smi::FromInt(scope->start_position()));
+ scope_info.set(index++, Smi::FromInt(scope->end_position()));
}
// If present, add the outer scope info.
- DCHECK(index == scope_info->OuterScopeInfoIndex());
+ DCHECK(index == scope_info.OuterScopeInfoIndex());
if (has_outer_scope_info) {
- scope_info->set(index++, *outer_scope.ToHandleChecked(), mode);
+ scope_info.set(index++, *outer_scope.ToHandleChecked(), mode);
}
}
@@ -354,9 +358,9 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
ScopeTypeField::encode(WITH_SCOPE) | CallsSloppyEvalField::encode(false) |
LanguageModeField::encode(LanguageMode::kSloppy) |
DeclarationScopeField::encode(false) |
- ReceiverVariableField::encode(NONE) | HasNewTargetField::encode(false) |
- FunctionVariableField::encode(NONE) | IsAsmModuleField::encode(false) |
- HasSimpleParametersField::encode(true) |
+ ReceiverVariableField::encode(NONE) | HasClassBrandField::encode(false) |
+ HasNewTargetField::encode(false) | FunctionVariableField::encode(NONE) |
+ IsAsmModuleField::encode(false) | HasSimpleParametersField::encode(true) |
FunctionKindField::encode(kNormalFunction) |
HasOuterScopeInfoField::encode(has_outer_scope_info) |
IsDebugEvaluateScopeField::encode(false);
@@ -416,7 +420,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForBootstrapping(Isolate* isolate,
LanguageModeField::encode(LanguageMode::kSloppy) |
DeclarationScopeField::encode(true) |
ReceiverVariableField::encode(is_empty_function ? UNUSED : CONTEXT) |
- HasNewTargetField::encode(false) |
+ HasClassBrandField::encode(false) | HasNewTargetField::encode(false) |
FunctionVariableField::encode(is_empty_function ? UNUSED : NONE) |
HasInferredFunctionNameField::encode(has_inferred_function_name) |
IsAsmModuleField::encode(false) | HasSimpleParametersField::encode(true) |
@@ -536,6 +540,10 @@ bool ScopeInfo::HasAllocatedReceiver() const {
return allocation == STACK || allocation == CONTEXT;
}
+bool ScopeInfo::HasClassBrand() const {
+ return HasClassBrandField::decode(Flags());
+}
+
bool ScopeInfo::HasNewTarget() const {
return HasNewTargetField::decode(Flags());
}
@@ -567,7 +575,7 @@ bool ScopeInfo::HasSharedFunctionName() const {
void ScopeInfo::SetFunctionName(Object name) {
DCHECK(HasFunctionName());
- DCHECK(name->IsString() || name == SharedFunctionInfo::kNoSharedNameSentinel);
+ DCHECK(name.IsString() || name == SharedFunctionInfo::kNoSharedNameSentinel);
set(FunctionNameInfoIndex(), name);
}
@@ -609,12 +617,12 @@ Object ScopeInfo::InferredFunctionName() const {
String ScopeInfo::FunctionDebugName() const {
Object name = FunctionName();
- if (name->IsString() && String::cast(name)->length() > 0) {
+ if (name.IsString() && String::cast(name).length() > 0) {
return String::cast(name);
}
if (HasInferredFunctionName()) {
name = InferredFunctionName();
- if (name->IsString()) return String::cast(name);
+ if (name.IsString()) return String::cast(name);
}
return GetReadOnlyRoots().empty_string();
}
@@ -698,15 +706,15 @@ bool ScopeInfo::VariableIsSynthetic(String name) {
// variable is a compiler-introduced temporary. However, to avoid conflict
// with user declarations, the current temporaries like .generator_object and
// .result start with a dot, so we can use that as a flag. It's a hack!
- return name->length() == 0 || name->Get(0) == '.' ||
- name->Equals(name->GetReadOnlyRoots().this_string());
+ return name.length() == 0 || name.Get(0) == '.' ||
+ name.Equals(name.GetReadOnlyRoots().this_string());
}
int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
DisallowHeapAllocation no_gc;
- DCHECK(name->IsInternalizedString());
+ DCHECK(name.IsInternalizedString());
DCHECK_EQ(scope_type(), MODULE_SCOPE);
DCHECK_NOT_NULL(mode);
DCHECK_NOT_NULL(init_flag);
@@ -716,7 +724,7 @@ int ScopeInfo::ModuleIndex(String name, VariableMode* mode,
int entry = ModuleVariablesIndex();
for (int i = 0; i < module_vars_count; ++i) {
String var_name = String::cast(get(entry + kModuleVariableNameOffset));
- if (name->Equals(var_name)) {
+ if (name.Equals(var_name)) {
int index;
ModuleVariable(i, nullptr, &index, mode, init_flag, maybe_assigned_flag);
return index;
@@ -733,24 +741,24 @@ int ScopeInfo::ContextSlotIndex(ScopeInfo scope_info, String name,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
DisallowHeapAllocation no_gc;
- DCHECK(name->IsInternalizedString());
+ DCHECK(name.IsInternalizedString());
DCHECK_NOT_NULL(mode);
DCHECK_NOT_NULL(init_flag);
DCHECK_NOT_NULL(maybe_assigned_flag);
- if (scope_info->length() == 0) return -1;
+ if (scope_info.length() == 0) return -1;
- int start = scope_info->ContextLocalNamesIndex();
- int end = start + scope_info->ContextLocalCount();
+ int start = scope_info.ContextLocalNamesIndex();
+ int end = start + scope_info.ContextLocalCount();
for (int i = start; i < end; ++i) {
- if (name != scope_info->get(i)) continue;
+ if (name != scope_info.get(i)) continue;
int var = i - start;
- *mode = scope_info->ContextLocalMode(var);
- *init_flag = scope_info->ContextLocalInitFlag(var);
- *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
+ *mode = scope_info.ContextLocalMode(var);
+ *init_flag = scope_info.ContextLocalInitFlag(var);
+ *maybe_assigned_flag = scope_info.ContextLocalMaybeAssignedFlag(var);
int result = Context::MIN_CONTEXT_SLOTS + var;
- DCHECK_LT(result, scope_info->ContextLength());
+ DCHECK_LT(result, scope_info.ContextLength());
return result;
}
@@ -765,7 +773,7 @@ int ScopeInfo::ReceiverContextSlotIndex() const {
}
int ScopeInfo::FunctionContextSlotIndex(String name) const {
- DCHECK(name->IsInternalizedString());
+ DCHECK(name.IsInternalizedString());
if (length() > 0) {
if (FunctionVariableField::decode(Flags()) == CONTEXT &&
FunctionName() == name) {
@@ -946,22 +954,22 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
}
int ModuleInfo::RegularExportCount() const {
- DCHECK_EQ(regular_exports()->length() % kRegularExportLength, 0);
- return regular_exports()->length() / kRegularExportLength;
+ DCHECK_EQ(regular_exports().length() % kRegularExportLength, 0);
+ return regular_exports().length() / kRegularExportLength;
}
String ModuleInfo::RegularExportLocalName(int i) const {
- return String::cast(regular_exports()->get(i * kRegularExportLength +
- kRegularExportLocalNameOffset));
+ return String::cast(regular_exports().get(i * kRegularExportLength +
+ kRegularExportLocalNameOffset));
}
int ModuleInfo::RegularExportCellIndex(int i) const {
- return Smi::ToInt(regular_exports()->get(i * kRegularExportLength +
- kRegularExportCellIndexOffset));
+ return Smi::ToInt(regular_exports().get(i * kRegularExportLength +
+ kRegularExportCellIndexOffset));
}
FixedArray ModuleInfo::RegularExportExportNames(int i) const {
- return FixedArray::cast(regular_exports()->get(
+ return FixedArray::cast(regular_exports().get(
i * kRegularExportLength + kRegularExportExportNamesOffset));
}
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
index b5fb1d1a7c..8d43357631 100644
--- a/deps/v8/src/objects/scope-info.h
+++ b/deps/v8/src/objects/scope-info.h
@@ -5,11 +5,11 @@
#ifndef V8_OBJECTS_SCOPE_INFO_H_
#define V8_OBJECTS_SCOPE_INFO_H_
-#include "src/function-kind.h"
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
-#include "src/utils.h"
+#include "src/objects/function-kind.h"
+#include "src/objects/objects.h"
+#include "src/utils/utils.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -69,6 +69,9 @@ class ScopeInfo : public FixedArray {
// or context-allocated?
bool HasAllocatedReceiver() const;
+ // Does this scope has class brand (for private methods)?
+ bool HasClassBrand() const;
+
// Does this scope declare a "new.target" binding?
bool HasNewTarget() const;
@@ -228,8 +231,10 @@ class ScopeInfo : public FixedArray {
class ReceiverVariableField
: public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
2> {};
- class HasNewTargetField
+ class HasClassBrandField
: public BitField<bool, ReceiverVariableField::kNext, 1> {};
+ class HasNewTargetField
+ : public BitField<bool, HasClassBrandField::kNext, 1> {};
class FunctionVariableField
: public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
// TODO(cbruni): Combine with function variable field when only storing the
diff --git a/deps/v8/src/objects/script-inl.h b/deps/v8/src/objects/script-inl.h
index 33c794e4a5..07450c73ec 100644
--- a/deps/v8/src/objects/script-inl.h
+++ b/deps/v8/src/objects/script-inl.h
@@ -47,11 +47,11 @@ ACCESSORS_CHECKED(Script, wasm_module_object, Object,
this->type() == TYPE_WASM)
bool Script::is_wrapped() const {
- return eval_from_shared_or_wrapped_arguments()->IsFixedArray();
+ return eval_from_shared_or_wrapped_arguments().IsFixedArray();
}
bool Script::has_eval_from_shared() const {
- return eval_from_shared_or_wrapped_arguments()->IsSharedFunctionInfo();
+ return eval_from_shared_or_wrapped_arguments().IsSharedFunctionInfo();
}
void Script::set_eval_from_shared(SharedFunctionInfo shared,
@@ -104,13 +104,13 @@ void Script::set_origin_options(ScriptOriginOptions origin_options) {
bool Script::HasValidSource() {
Object src = this->source();
- if (!src->IsString()) return true;
+ if (!src.IsString()) return true;
String src_str = String::cast(src);
if (!StringShape(src_str).IsExternal()) return true;
- if (src_str->IsOneByteRepresentation()) {
- return ExternalOneByteString::cast(src)->resource() != nullptr;
- } else if (src_str->IsTwoByteRepresentation()) {
- return ExternalTwoByteString::cast(src)->resource() != nullptr;
+ if (src_str.IsOneByteRepresentation()) {
+ return ExternalOneByteString::cast(src).resource() != nullptr;
+ } else if (src_str.IsTwoByteRepresentation()) {
+ return ExternalTwoByteString::cast(src).resource() != nullptr;
}
return true;
}
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 315ab038a8..2d9e4bca78 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_SCRIPT_H_
#define V8_OBJECTS_SCRIPT_H_
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
#include "src/objects/struct.h"
// Has to be the last include (doesn't have include guards):
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 1187db2d94..f5413ce1de 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -7,10 +7,10 @@
#include "src/objects/shared-function-info.h"
-#include "src/feedback-vector-inl.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/feedback-vector-inl.h"
#include "src/objects/scope-info.h"
#include "src/objects/templates.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -47,14 +47,14 @@ byte PreparseData::get(int index) const {
DCHECK_LE(0, index);
DCHECK_LT(index, data_length());
int offset = kDataStartOffset + index * kByteSize;
- return READ_BYTE_FIELD(*this, offset);
+ return ReadField<byte>(offset);
}
void PreparseData::set(int index, byte value) {
DCHECK_LE(0, index);
DCHECK_LT(index, data_length());
int offset = kDataStartOffset + index * kByteSize;
- WRITE_BYTE_FIELD(*this, offset, value);
+ WriteField<byte>(offset, value);
}
void PreparseData::copy_in(int index, const byte* buffer, int length) {
@@ -128,6 +128,9 @@ ACCESSORS(SharedFunctionInfo, name_or_scope_info, Object,
ACCESSORS(SharedFunctionInfo, script_or_debug_info, Object,
kScriptOrDebugInfoOffset)
+#if V8_SFI_HAS_UNIQUE_ID
+INT_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
+#endif
UINT16_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
UINT16_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
kFormalParameterCountOffset)
@@ -139,8 +142,8 @@ RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
bool SharedFunctionInfo::HasSharedName() const {
Object value = name_or_scope_info();
- if (value->IsScopeInfo()) {
- return ScopeInfo::cast(value)->HasSharedFunctionName();
+ if (value.IsScopeInfo()) {
+ return ScopeInfo::cast(value).HasSharedFunctionName();
}
return value != kNoSharedNameSentinel;
}
@@ -148,9 +151,9 @@ bool SharedFunctionInfo::HasSharedName() const {
String SharedFunctionInfo::Name() const {
if (!HasSharedName()) return GetReadOnlyRoots().empty_string();
Object value = name_or_scope_info();
- if (value->IsScopeInfo()) {
- if (ScopeInfo::cast(value)->HasFunctionName()) {
- return String::cast(ScopeInfo::cast(value)->FunctionName());
+ if (value.IsScopeInfo()) {
+ if (ScopeInfo::cast(value).HasFunctionName()) {
+ return String::cast(ScopeInfo::cast(value).FunctionName());
}
return GetReadOnlyRoots().empty_string();
}
@@ -159,10 +162,10 @@ String SharedFunctionInfo::Name() const {
void SharedFunctionInfo::SetName(String name) {
Object maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
- ScopeInfo::cast(maybe_scope_info)->SetFunctionName(name);
+ if (maybe_scope_info.IsScopeInfo()) {
+ ScopeInfo::cast(maybe_scope_info).SetFunctionName(name);
} else {
- DCHECK(maybe_scope_info->IsString() ||
+ DCHECK(maybe_scope_info.IsString() ||
maybe_scope_info == kNoSharedNameSentinel);
set_name_or_scope_info(name);
}
@@ -335,7 +338,7 @@ bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
ScopeInfo SharedFunctionInfo::scope_info() const {
Object maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
+ if (maybe_scope_info.IsScopeInfo()) {
return ScopeInfo::cast(maybe_scope_info);
}
return ScopeInfo::Empty(GetIsolate());
@@ -345,14 +348,14 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo scope_info,
WriteBarrierMode mode) {
// Move the existing name onto the ScopeInfo.
Object name = name_or_scope_info();
- if (name->IsScopeInfo()) {
- name = ScopeInfo::cast(name)->FunctionName();
+ if (name.IsScopeInfo()) {
+ name = ScopeInfo::cast(name).FunctionName();
}
- DCHECK(name->IsString() || name == kNoSharedNameSentinel);
+ DCHECK(name.IsString() || name == kNoSharedNameSentinel);
// Only set the function name for function scopes.
- scope_info->SetFunctionName(name);
- if (HasInferredName() && inferred_name()->length() != 0) {
- scope_info->SetInferredFunctionName(inferred_name());
+ scope_info.SetFunctionName(name);
+ if (HasInferredName() && inferred_name().length() != 0) {
+ scope_info.SetInferredFunctionName(inferred_name());
}
WRITE_FIELD(*this, kNameOrScopeInfoOffset, scope_info);
CONDITIONAL_WRITE_BARRIER(*this, kNameOrScopeInfoOffset, scope_info, mode);
@@ -370,31 +373,31 @@ HeapObject SharedFunctionInfo::outer_scope_info() const {
bool SharedFunctionInfo::HasOuterScopeInfo() const {
ScopeInfo outer_info;
if (!is_compiled()) {
- if (!outer_scope_info()->IsScopeInfo()) return false;
+ if (!outer_scope_info().IsScopeInfo()) return false;
outer_info = ScopeInfo::cast(outer_scope_info());
} else {
- if (!scope_info()->HasOuterScopeInfo()) return false;
- outer_info = scope_info()->OuterScopeInfo();
+ if (!scope_info().HasOuterScopeInfo()) return false;
+ outer_info = scope_info().OuterScopeInfo();
}
- return outer_info->length() > 0;
+ return outer_info.length() > 0;
}
ScopeInfo SharedFunctionInfo::GetOuterScopeInfo() const {
DCHECK(HasOuterScopeInfo());
if (!is_compiled()) return ScopeInfo::cast(outer_scope_info());
- return scope_info()->OuterScopeInfo();
+ return scope_info().OuterScopeInfo();
}
void SharedFunctionInfo::set_outer_scope_info(HeapObject value,
WriteBarrierMode mode) {
DCHECK(!is_compiled());
- DCHECK(raw_outer_scope_info_or_feedback_metadata()->IsTheHole());
- DCHECK(value->IsScopeInfo() || value->IsTheHole());
+ DCHECK(raw_outer_scope_info_or_feedback_metadata().IsTheHole());
+ DCHECK(value.IsScopeInfo() || value.IsTheHole());
set_raw_outer_scope_info_or_feedback_metadata(value, mode);
}
bool SharedFunctionInfo::HasFeedbackMetadata() const {
- return raw_outer_scope_info_or_feedback_metadata()->IsFeedbackMetadata();
+ return raw_outer_scope_info_or_feedback_metadata().IsFeedbackMetadata();
}
FeedbackMetadata SharedFunctionInfo::feedback_metadata() const {
@@ -405,14 +408,14 @@ FeedbackMetadata SharedFunctionInfo::feedback_metadata() const {
void SharedFunctionInfo::set_feedback_metadata(FeedbackMetadata value,
WriteBarrierMode mode) {
DCHECK(!HasFeedbackMetadata());
- DCHECK(value->IsFeedbackMetadata());
+ DCHECK(value.IsFeedbackMetadata());
set_raw_outer_scope_info_or_feedback_metadata(value, mode);
}
bool SharedFunctionInfo::is_compiled() const {
Object data = function_data();
return data != Smi::FromEnum(Builtins::kCompileLazy) &&
- !data->IsUncompiledData();
+ !data.IsUncompiledData();
}
IsCompiledScope SharedFunctionInfo::is_compiled_scope() const {
@@ -421,19 +424,19 @@ IsCompiledScope SharedFunctionInfo::is_compiled_scope() const {
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
Isolate* isolate)
- : retain_bytecode_(shared->HasBytecodeArray()
- ? handle(shared->GetBytecodeArray(), isolate)
+ : retain_bytecode_(shared.HasBytecodeArray()
+ ? handle(shared.GetBytecodeArray(), isolate)
: MaybeHandle<BytecodeArray>()),
- is_compiled_(shared->is_compiled()) {
+ is_compiled_(shared.is_compiled()) {
DCHECK_IMPLIES(!retain_bytecode_.is_null(), is_compiled());
}
bool SharedFunctionInfo::has_simple_parameters() {
- return scope_info()->HasSimpleParameters();
+ return scope_info().HasSimpleParameters();
}
bool SharedFunctionInfo::IsApiFunction() const {
- return function_data()->IsFunctionTemplateInfo();
+ return function_data().IsFunctionTemplateInfo();
}
FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() {
@@ -442,40 +445,40 @@ FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() {
}
bool SharedFunctionInfo::HasBytecodeArray() const {
- return function_data()->IsBytecodeArray() ||
- function_data()->IsInterpreterData();
+ return function_data().IsBytecodeArray() ||
+ function_data().IsInterpreterData();
}
BytecodeArray SharedFunctionInfo::GetBytecodeArray() const {
DCHECK(HasBytecodeArray());
- if (HasDebugInfo() && GetDebugInfo()->HasInstrumentedBytecodeArray()) {
- return GetDebugInfo()->OriginalBytecodeArray();
- } else if (function_data()->IsBytecodeArray()) {
+ if (HasDebugInfo() && GetDebugInfo().HasInstrumentedBytecodeArray()) {
+ return GetDebugInfo().OriginalBytecodeArray();
+ } else if (function_data().IsBytecodeArray()) {
return BytecodeArray::cast(function_data());
} else {
- DCHECK(function_data()->IsInterpreterData());
- return InterpreterData::cast(function_data())->bytecode_array();
+ DCHECK(function_data().IsInterpreterData());
+ return InterpreterData::cast(function_data()).bytecode_array();
}
}
BytecodeArray SharedFunctionInfo::GetDebugBytecodeArray() const {
DCHECK(HasBytecodeArray());
- DCHECK(HasDebugInfo() && GetDebugInfo()->HasInstrumentedBytecodeArray());
- if (function_data()->IsBytecodeArray()) {
+ DCHECK(HasDebugInfo() && GetDebugInfo().HasInstrumentedBytecodeArray());
+ if (function_data().IsBytecodeArray()) {
return BytecodeArray::cast(function_data());
} else {
- DCHECK(function_data()->IsInterpreterData());
- return InterpreterData::cast(function_data())->bytecode_array();
+ DCHECK(function_data().IsInterpreterData());
+ return InterpreterData::cast(function_data()).bytecode_array();
}
}
void SharedFunctionInfo::SetDebugBytecodeArray(BytecodeArray bytecode) {
DCHECK(HasBytecodeArray());
- if (function_data()->IsBytecodeArray()) {
+ if (function_data().IsBytecodeArray()) {
set_function_data(bytecode);
} else {
- DCHECK(function_data()->IsInterpreterData());
- interpreter_data()->set_bytecode_array(bytecode);
+ DCHECK(function_data().IsInterpreterData());
+ interpreter_data().set_bytecode_array(bytecode);
}
}
@@ -497,22 +500,22 @@ bool SharedFunctionInfo::ShouldFlushBytecode(BytecodeFlushMode mode) {
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
Object data = function_data();
- if (!data->IsBytecodeArray()) return false;
+ if (!data.IsBytecodeArray()) return false;
if (mode == BytecodeFlushMode::kStressFlushBytecode) return true;
BytecodeArray bytecode = BytecodeArray::cast(data);
- return bytecode->IsOld();
+ return bytecode.IsOld();
}
Code SharedFunctionInfo::InterpreterTrampoline() const {
DCHECK(HasInterpreterData());
- return interpreter_data()->interpreter_trampoline();
+ return interpreter_data().interpreter_trampoline();
}
bool SharedFunctionInfo::HasInterpreterData() const {
- return function_data()->IsInterpreterData();
+ return function_data().IsInterpreterData();
}
InterpreterData SharedFunctionInfo::interpreter_data() const {
@@ -527,7 +530,7 @@ void SharedFunctionInfo::set_interpreter_data(
}
bool SharedFunctionInfo::HasAsmWasmData() const {
- return function_data()->IsAsmWasmData();
+ return function_data().IsAsmWasmData();
}
AsmWasmData SharedFunctionInfo::asm_wasm_data() const {
@@ -542,7 +545,7 @@ void SharedFunctionInfo::set_asm_wasm_data(AsmWasmData data) {
}
bool SharedFunctionInfo::HasBuiltinId() const {
- return function_data()->IsSmi();
+ return function_data().IsSmi();
}
int SharedFunctionInfo::builtin_id() const {
@@ -558,7 +561,7 @@ void SharedFunctionInfo::set_builtin_id(int builtin_id) {
}
bool SharedFunctionInfo::HasUncompiledData() const {
- return function_data()->IsUncompiledData();
+ return function_data().IsUncompiledData();
}
UncompiledData SharedFunctionInfo::uncompiled_data() const {
@@ -568,12 +571,12 @@ UncompiledData SharedFunctionInfo::uncompiled_data() const {
void SharedFunctionInfo::set_uncompiled_data(UncompiledData uncompiled_data) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
- DCHECK(uncompiled_data->IsUncompiledData());
+ DCHECK(uncompiled_data.IsUncompiledData());
set_function_data(uncompiled_data);
}
bool SharedFunctionInfo::HasUncompiledDataWithPreparseData() const {
- return function_data()->IsUncompiledDataWithPreparseData();
+ return function_data().IsUncompiledDataWithPreparseData();
}
UncompiledDataWithPreparseData
@@ -585,13 +588,12 @@ SharedFunctionInfo::uncompiled_data_with_preparse_data() const {
void SharedFunctionInfo::set_uncompiled_data_with_preparse_data(
UncompiledDataWithPreparseData uncompiled_data_with_preparse_data) {
DCHECK(function_data() == Smi::FromEnum(Builtins::kCompileLazy));
- DCHECK(
- uncompiled_data_with_preparse_data->IsUncompiledDataWithPreparseData());
+ DCHECK(uncompiled_data_with_preparse_data.IsUncompiledDataWithPreparseData());
set_function_data(uncompiled_data_with_preparse_data);
}
bool SharedFunctionInfo::HasUncompiledDataWithoutPreparseData() const {
- return function_data()->IsUncompiledDataWithoutPreparseData();
+ return function_data().IsUncompiledDataWithoutPreparseData();
}
void SharedFunctionInfo::ClearPreparseData() {
@@ -610,12 +612,12 @@ void SharedFunctionInfo::ClearPreparseData() {
UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
UncompiledData::kSize);
- data->synchronized_set_map(
+ data.synchronized_set_map(
GetReadOnlyRoots().uncompiled_data_without_preparse_data_map());
// Fill the remaining space with filler.
heap->CreateFillerObjectAt(
- data->address() + UncompiledDataWithoutPreparseData::kSize,
+ data.address() + UncompiledDataWithoutPreparseData::kSize,
UncompiledDataWithPreparseData::kSize -
UncompiledDataWithoutPreparseData::kSize,
ClearRecordedSlots::kNo);
@@ -624,23 +626,19 @@ void SharedFunctionInfo::ClearPreparseData() {
DCHECK(HasUncompiledDataWithoutPreparseData());
}
-OBJECT_CONSTRUCTORS_IMPL(SharedFunctionInfoWithID, SharedFunctionInfo)
-CAST_ACCESSOR(SharedFunctionInfoWithID)
-INT_ACCESSORS(SharedFunctionInfoWithID, unique_id, kUniqueIdOffset)
-
// static
void UncompiledData::Initialize(
UncompiledData data, String inferred_name, int start_position,
int end_position, int function_literal_id,
std::function<void(HeapObject object, ObjectSlot slot, HeapObject target)>
gc_notify_updated_slot) {
- data->set_inferred_name(inferred_name);
+ data.set_inferred_name(inferred_name);
gc_notify_updated_slot(
- data, data->RawField(UncompiledData::kInferredNameOffset), inferred_name);
- data->set_start_position(start_position);
- data->set_end_position(end_position);
- data->set_function_literal_id(function_literal_id);
- data->clear_padding();
+ data, data.RawField(UncompiledData::kInferredNameOffset), inferred_name);
+ data.set_start_position(start_position);
+ data.set_end_position(end_position);
+ data.set_function_literal_id(function_literal_id);
+ data.clear_padding();
}
void UncompiledDataWithPreparseData::Initialize(
@@ -651,9 +649,9 @@ void UncompiledDataWithPreparseData::Initialize(
gc_notify_updated_slot) {
UncompiledData::Initialize(data, inferred_name, start_position, end_position,
function_literal_id, gc_notify_updated_slot);
- data->set_preparse_data(scope_data);
+ data.set_preparse_data(scope_data);
gc_notify_updated_slot(
- data, data->RawField(UncompiledDataWithPreparseData::kPreparseDataOffset),
+ data, data.RawField(UncompiledDataWithPreparseData::kPreparseDataOffset),
scope_data);
}
@@ -662,28 +660,36 @@ bool UncompiledData::has_function_literal_id() {
}
bool SharedFunctionInfo::HasWasmExportedFunctionData() const {
- return function_data()->IsWasmExportedFunctionData();
+ return function_data().IsWasmExportedFunctionData();
+}
+
+bool SharedFunctionInfo::HasWasmJSFunctionData() const {
+ return function_data().IsWasmJSFunctionData();
+}
+
+bool SharedFunctionInfo::HasWasmCapiFunctionData() const {
+ return function_data().IsWasmCapiFunctionData();
}
Object SharedFunctionInfo::script() const {
Object maybe_script = script_or_debug_info();
- if (maybe_script->IsDebugInfo()) {
- return DebugInfo::cast(maybe_script)->script();
+ if (maybe_script.IsDebugInfo()) {
+ return DebugInfo::cast(maybe_script).script();
}
return maybe_script;
}
void SharedFunctionInfo::set_script(Object script) {
Object maybe_debug_info = script_or_debug_info();
- if (maybe_debug_info->IsDebugInfo()) {
- DebugInfo::cast(maybe_debug_info)->set_script(script);
+ if (maybe_debug_info.IsDebugInfo()) {
+ DebugInfo::cast(maybe_debug_info).set_script(script);
} else {
set_script_or_debug_info(script);
}
}
bool SharedFunctionInfo::HasDebugInfo() const {
- return script_or_debug_info()->IsDebugInfo();
+ return script_or_debug_info().IsDebugInfo();
}
DebugInfo SharedFunctionInfo::GetDebugInfo() const {
@@ -693,37 +699,37 @@ DebugInfo SharedFunctionInfo::GetDebugInfo() const {
void SharedFunctionInfo::SetDebugInfo(DebugInfo debug_info) {
DCHECK(!HasDebugInfo());
- DCHECK_EQ(debug_info->script(), script_or_debug_info());
+ DCHECK_EQ(debug_info.script(), script_or_debug_info());
set_script_or_debug_info(debug_info);
}
bool SharedFunctionInfo::HasInferredName() {
Object scope_info = name_or_scope_info();
- if (scope_info->IsScopeInfo()) {
- return ScopeInfo::cast(scope_info)->HasInferredFunctionName();
+ if (scope_info.IsScopeInfo()) {
+ return ScopeInfo::cast(scope_info).HasInferredFunctionName();
}
return HasUncompiledData();
}
String SharedFunctionInfo::inferred_name() {
Object maybe_scope_info = name_or_scope_info();
- if (maybe_scope_info->IsScopeInfo()) {
+ if (maybe_scope_info.IsScopeInfo()) {
ScopeInfo scope_info = ScopeInfo::cast(maybe_scope_info);
- if (scope_info->HasInferredFunctionName()) {
- Object name = scope_info->InferredFunctionName();
- if (name->IsString()) return String::cast(name);
+ if (scope_info.HasInferredFunctionName()) {
+ Object name = scope_info.InferredFunctionName();
+ if (name.IsString()) return String::cast(name);
}
} else if (HasUncompiledData()) {
- return uncompiled_data()->inferred_name();
+ return uncompiled_data().inferred_name();
}
return GetReadOnlyRoots().empty_string();
}
bool SharedFunctionInfo::IsUserJavaScript() {
Object script_obj = script();
- if (script_obj->IsUndefined()) return false;
+ if (script_obj.IsUndefined()) return false;
Script script = Script::cast(script_obj);
- return script->IsUserJavaScript();
+ return script.IsUserJavaScript();
}
bool SharedFunctionInfo::IsSubjectToDebugging() {
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index 5b79098fc0..a3b84ee46e 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -5,16 +5,16 @@
#ifndef V8_OBJECTS_SHARED_FUNCTION_INFO_H_
#define V8_OBJECTS_SHARED_FUNCTION_INFO_H_
-#include "src/bailout-reason.h"
-#include "src/function-kind.h"
-#include "src/objects.h"
+#include "src/codegen/bailout-reason.h"
#include "src/objects/compressed-slots.h"
+#include "src/objects/function-kind.h"
+#include "src/objects/objects.h"
#include "src/objects/script.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/struct.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "torque-generated/field-offsets-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -32,7 +32,9 @@ class BytecodeArray;
class CoverageInfo;
class DebugInfo;
class IsCompiledScope;
+class WasmCapiFunctionData;
class WasmExportedFunctionData;
+class WasmJSFunctionData;
// Data collected by the pre-parser storing information about scopes and inner
// functions.
@@ -118,9 +120,9 @@ class UncompiledData : public HeapObject {
// Layout description.
#define UNCOMPILED_DATA_FIELDS(V) \
- V(kStartOfPointerFieldsOffset, 0) \
+ V(kStartOfStrongFieldsOffset, 0) \
V(kInferredNameOffset, kTaggedSize) \
- V(kEndOfTaggedFieldsOffset, 0) \
+ V(kEndOfStrongFieldsOffset, 0) \
/* Raw data fields. */ \
V(kStartPositionOffset, kInt32Size) \
V(kEndPositionOffset, kInt32Size) \
@@ -132,8 +134,8 @@ class UncompiledData : public HeapObject {
DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, UNCOMPILED_DATA_FIELDS)
#undef UNCOMPILED_DATA_FIELDS
- using BodyDescriptor = FixedBodyDescriptor<kStartOfPointerFieldsOffset,
- kEndOfTaggedFieldsOffset, kSize>;
+ using BodyDescriptor = FixedBodyDescriptor<kStartOfStrongFieldsOffset,
+ kEndOfStrongFieldsOffset, kSize>;
// Clear uninitialized padding space.
inline void clear_padding();
@@ -179,9 +181,9 @@ class UncompiledDataWithPreparseData : public UncompiledData {
// Layout description.
#define UNCOMPILED_DATA_WITH_PREPARSE_DATA_FIELDS(V) \
- V(kStartOfPointerFieldsOffset, 0) \
+ V(kStartOfStrongFieldsOffset, 0) \
V(kPreparseDataOffset, kTaggedSize) \
- V(kEndOfTaggedFieldsOffset, 0) \
+ V(kEndOfStrongFieldsOffset, 0) \
/* Total size. */ \
V(kSize, 0)
@@ -194,7 +196,7 @@ class UncompiledDataWithPreparseData : public UncompiledData {
using BodyDescriptor = SubclassBodyDescriptor<
UncompiledData::BodyDescriptor,
- FixedBodyDescriptor<kStartOfPointerFieldsOffset, kEndOfTaggedFieldsOffset,
+ FixedBodyDescriptor<kStartOfStrongFieldsOffset, kEndOfStrongFieldsOffset,
kSize>>;
OBJECT_CONSTRUCTORS(UncompiledDataWithPreparseData, UncompiledData);
@@ -314,6 +316,12 @@ class SharedFunctionInfo : public HeapObject {
// function. The value is only reliable when the function has been compiled.
DECL_UINT16_ACCESSORS(expected_nof_properties)
+#if V8_SFI_HAS_UNIQUE_ID
+ // [unique_id] - For --trace-maps purposes, an identifier that's persistent
+ // even if the GC moves this SharedFunctionInfo.
+ DECL_INT_ACCESSORS(unique_id)
+#endif
+
// [function data]: This field holds some additional data for function.
// Currently it has one of:
// - a FunctionTemplateInfo to make benefit the API [IsApiFunction()].
@@ -361,6 +369,10 @@ class SharedFunctionInfo : public HeapObject {
inline bool HasUncompiledDataWithoutPreparseData() const;
inline bool HasWasmExportedFunctionData() const;
WasmExportedFunctionData wasm_exported_function_data() const;
+ inline bool HasWasmJSFunctionData() const;
+ WasmJSFunctionData wasm_js_function_data() const;
+ inline bool HasWasmCapiFunctionData() const;
+ WasmCapiFunctionData wasm_capi_function_data() const;
// Clear out pre-parsed scope data from UncompiledDataWithPreparseData,
// turning it into UncompiledDataWithoutPreparseData.
@@ -581,6 +593,8 @@ class SharedFunctionInfo : public HeapObject {
static void EnsureSourcePositionsAvailable(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info);
+ bool AreSourcePositionsAvailable() const;
+
// Hash based on function literal id and script id.
V8_EXPORT_PRIVATE uint32_t Hash();
@@ -601,7 +615,8 @@ class SharedFunctionInfo : public HeapObject {
#endif
// Returns the SharedFunctionInfo in a format tracing can support.
- std::unique_ptr<v8::tracing::TracedValue> ToTracedValue();
+ std::unique_ptr<v8::tracing::TracedValue> ToTracedValue(
+ FunctionLiteral* literal);
// The tracing scope for SharedFunctionInfo objects.
static const char* kTraceScope;
@@ -702,14 +717,6 @@ class SharedFunctionInfo : public HeapObject {
// This is needed to set up the [[HomeObject]] on the function instance.
inline bool needs_home_object() const;
- V8_INLINE bool IsSharedFunctionInfoWithID() const {
-#if V8_SFI_HAS_UNIQUE_ID
- return true;
-#else
- return false;
-#endif
- }
-
private:
// [name_or_scope_info]: Function name string, kNoSharedNameSentinel or
// ScopeInfo.
@@ -744,23 +751,6 @@ class SharedFunctionInfo : public HeapObject {
OBJECT_CONSTRUCTORS(SharedFunctionInfo, HeapObject);
};
-class SharedFunctionInfoWithID : public SharedFunctionInfo {
- public:
- // [unique_id] - For --trace-maps purposes, an identifier that's persistent
- // even if the GC moves this SharedFunctionInfo.
- DECL_INT_ACCESSORS(unique_id)
-
- DECL_CAST(SharedFunctionInfoWithID)
-
- DEFINE_FIELD_OFFSET_CONSTANTS(
- SharedFunctionInfo::kSize,
- TORQUE_GENERATED_SHARED_FUNCTION_INFO_WITH_ID_FIELDS)
-
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
-
- OBJECT_CONSTRUCTORS(SharedFunctionInfoWithID, SharedFunctionInfo);
-};
-
// Printing support.
struct SourceCodeOf {
explicit SourceCodeOf(SharedFunctionInfo v, int max = -1)
diff --git a/deps/v8/src/objects/slots-atomic-inl.h b/deps/v8/src/objects/slots-atomic-inl.h
index 57da18dd66..220013b7b5 100644
--- a/deps/v8/src/objects/slots-atomic-inl.h
+++ b/deps/v8/src/objects/slots-atomic-inl.h
@@ -80,6 +80,7 @@ class AtomicSlot : public SlotBase<AtomicSlot, Tagged_t> {
AtomicSlot() : SlotBase(kNullAddress) {}
explicit AtomicSlot(Address address) : SlotBase(address) {}
explicit AtomicSlot(ObjectSlot slot) : SlotBase(slot.address()) {}
+ explicit AtomicSlot(MaybeObjectSlot slot) : SlotBase(slot.address()) {}
Reference operator*() const {
return Reference(reinterpret_cast<Tagged_t*>(address()));
diff --git a/deps/v8/src/objects/slots-inl.h b/deps/v8/src/objects/slots-inl.h
index e0a42fbd91..b240729114 100644
--- a/deps/v8/src/objects/slots-inl.h
+++ b/deps/v8/src/objects/slots-inl.h
@@ -8,11 +8,12 @@
#include "src/objects/slots.h"
#include "src/base/atomic-utils.h"
-#include "src/memcopy.h"
-#include "src/objects.h"
-#include "src/objects/heap-object-inl.h"
+#include "src/common/ptr-compr-inl.h"
+#include "src/objects/compressed-slots.h"
+#include "src/objects/heap-object.h"
#include "src/objects/maybe-object.h"
-#include "src/ptr-compr-inl.h"
+#include "src/objects/objects.h"
+#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
@@ -30,7 +31,7 @@ bool FullObjectSlot::contains_value(Address raw_value) const {
const Object FullObjectSlot::operator*() const { return Object(*location()); }
-void FullObjectSlot::store(Object value) const { *location() = value->ptr(); }
+void FullObjectSlot::store(Object value) const { *location() = value.ptr(); }
Object FullObjectSlot::Acquire_Load() const {
return Object(base::AsAtomicPointer::Acquire_Load(location()));
@@ -41,16 +42,16 @@ Object FullObjectSlot::Relaxed_Load() const {
}
void FullObjectSlot::Relaxed_Store(Object value) const {
- base::AsAtomicPointer::Relaxed_Store(location(), value->ptr());
+ base::AsAtomicPointer::Relaxed_Store(location(), value.ptr());
}
void FullObjectSlot::Release_Store(Object value) const {
- base::AsAtomicPointer::Release_Store(location(), value->ptr());
+ base::AsAtomicPointer::Release_Store(location(), value.ptr());
}
Object FullObjectSlot::Release_CompareAndSwap(Object old, Object target) const {
Address result = base::AsAtomicPointer::Release_CompareAndSwap(
- location(), old->ptr(), target->ptr());
+ location(), old.ptr(), target.ptr());
return Object(result);
}
@@ -98,7 +99,7 @@ HeapObject FullHeapObjectSlot::ToHeapObject() const {
}
void FullHeapObjectSlot::StoreHeapObject(HeapObject value) const {
- *location() = value->ptr();
+ *location() = value.ptr();
}
//
diff --git a/deps/v8/src/objects/slots.h b/deps/v8/src/objects/slots.h
index 18c8152f5b..fa8b558939 100644
--- a/deps/v8/src/objects/slots.h
+++ b/deps/v8/src/objects/slots.h
@@ -5,8 +5,8 @@
#ifndef V8_OBJECTS_SLOTS_H_
#define V8_OBJECTS_SLOTS_H_
-#include "src/globals.h"
-#include "src/v8memory.h"
+#include "src/common/globals.h"
+#include "src/common/v8memory.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/objects/smi-inl.h b/deps/v8/src/objects/smi-inl.h
index 38d644fbea..b355a5b1bd 100644
--- a/deps/v8/src/objects/smi-inl.h
+++ b/deps/v8/src/objects/smi-inl.h
@@ -13,9 +13,7 @@
namespace v8 {
namespace internal {
-CAST_ACCESSOR(Smi)
-
-int Smi::ToInt(const Object object) { return Smi::cast(object)->value(); }
+// TODO(ishell): remove this file
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/smi.h b/deps/v8/src/objects/smi.h
index 938fc5504d..0f93f37458 100644
--- a/deps/v8/src/objects/smi.h
+++ b/deps/v8/src/objects/smi.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_SMI_H_
#define V8_OBJECTS_SMI_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/heap-object.h"
// Has to be the last include (doesn't have include guards):
@@ -41,7 +41,9 @@ class Smi : public Object {
}
// Convert a Smi object to an int.
- static inline int ToInt(const Object object);
+ static inline int ToInt(const Object object) {
+ return Smi::cast(object).value();
+ }
// Convert a value to a Smi object.
static inline constexpr Smi FromInt(int value) {
@@ -107,6 +109,8 @@ class Smi : public Object {
static constexpr int kMaxValue = kSmiMaxValue;
};
+CAST_ACCESSOR(Smi)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/stack-frame-info-inl.h b/deps/v8/src/objects/stack-frame-info-inl.h
index 1007c78b18..8069e6e5c9 100644
--- a/deps/v8/src/objects/stack-frame-info-inl.h
+++ b/deps/v8/src/objects/stack-frame-info-inl.h
@@ -8,8 +8,8 @@
#include "src/objects/stack-frame-info.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/struct-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -27,14 +27,20 @@ CAST_ACCESSOR(StackFrameInfo)
SMI_ACCESSORS(StackFrameInfo, line_number, kLineNumberOffset)
SMI_ACCESSORS(StackFrameInfo, column_number, kColumnNumberOffset)
SMI_ACCESSORS(StackFrameInfo, script_id, kScriptIdOffset)
+SMI_ACCESSORS(StackFrameInfo, promise_all_index, kPromiseAllIndexOffset)
ACCESSORS(StackFrameInfo, script_name, Object, kScriptNameOffset)
ACCESSORS(StackFrameInfo, script_name_or_source_url, Object,
kScriptNameOrSourceUrlOffset)
ACCESSORS(StackFrameInfo, function_name, Object, kFunctionNameOffset)
+ACCESSORS(StackFrameInfo, wasm_module_name, Object, kWasmModuleNameOffset)
SMI_ACCESSORS(StackFrameInfo, flag, kFlagOffset)
BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, kIsWasmBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_user_java_script, kIsUserJavaScriptBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_toplevel, kIsToplevelBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_async, kIsAsyncBit)
+BOOL_ACCESSORS(StackFrameInfo, flag, is_promise_all, kIsPromiseAllBit)
OBJECT_CONSTRUCTORS_IMPL(StackTraceFrame, Struct)
NEVER_READ_ONLY_SPACE_IMPL(StackTraceFrame)
diff --git a/deps/v8/src/objects/stack-frame-info.cc b/deps/v8/src/objects/stack-frame-info.cc
index 45ab671ee5..f427d7eae2 100644
--- a/deps/v8/src/objects/stack-frame-info.cc
+++ b/deps/v8/src/objects/stack-frame-info.cc
@@ -10,59 +10,77 @@ namespace v8 {
namespace internal {
int StackTraceFrame::GetLineNumber(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
int line = GetFrameInfo(frame)->line_number();
return line != StackFrameBase::kNone ? line : Message::kNoLineNumberInfo;
}
int StackTraceFrame::GetColumnNumber(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
int column = GetFrameInfo(frame)->column_number();
return column != StackFrameBase::kNone ? column : Message::kNoColumnInfo;
}
int StackTraceFrame::GetScriptId(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
int id = GetFrameInfo(frame)->script_id();
return id != StackFrameBase::kNone ? id : Message::kNoScriptIdInfo;
}
+int StackTraceFrame::GetPromiseAllIndex(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->promise_all_index();
+}
+
Handle<Object> StackTraceFrame::GetFileName(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
auto name = GetFrameInfo(frame)->script_name();
return handle(name, frame->GetIsolate());
}
Handle<Object> StackTraceFrame::GetScriptNameOrSourceUrl(
Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
auto name = GetFrameInfo(frame)->script_name_or_source_url();
return handle(name, frame->GetIsolate());
}
Handle<Object> StackTraceFrame::GetFunctionName(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
auto name = GetFrameInfo(frame)->function_name();
return handle(name, frame->GetIsolate());
}
+Handle<Object> StackTraceFrame::GetWasmModuleName(
+ Handle<StackTraceFrame> frame) {
+ auto module = GetFrameInfo(frame)->wasm_module_name();
+ return handle(module, frame->GetIsolate());
+}
+
bool StackTraceFrame::IsEval(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
return GetFrameInfo(frame)->is_eval();
}
bool StackTraceFrame::IsConstructor(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
return GetFrameInfo(frame)->is_constructor();
}
bool StackTraceFrame::IsWasm(Handle<StackTraceFrame> frame) {
- if (frame->frame_info()->IsUndefined()) InitializeFrameInfo(frame);
return GetFrameInfo(frame)->is_wasm();
}
+bool StackTraceFrame::IsUserJavaScript(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_user_java_script();
+}
+
+bool StackTraceFrame::IsToplevel(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_toplevel();
+}
+
+bool StackTraceFrame::IsAsync(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_async();
+}
+
+bool StackTraceFrame::IsPromiseAll(Handle<StackTraceFrame> frame) {
+ return GetFrameInfo(frame)->is_promise_all();
+}
+
Handle<StackFrameInfo> StackTraceFrame::GetFrameInfo(
Handle<StackTraceFrame> frame) {
+ if (frame->frame_info().IsUndefined()) InitializeFrameInfo(frame);
return handle(StackFrameInfo::cast(frame->frame_info()), frame->GetIsolate());
}
diff --git a/deps/v8/src/objects/stack-frame-info.h b/deps/v8/src/objects/stack-frame-info.h
index cf1d4b0e2d..44826f67e6 100644
--- a/deps/v8/src/objects/stack-frame-info.h
+++ b/deps/v8/src/objects/stack-frame-info.h
@@ -21,12 +21,18 @@ class StackFrameInfo : public Struct {
DECL_INT_ACCESSORS(line_number)
DECL_INT_ACCESSORS(column_number)
DECL_INT_ACCESSORS(script_id)
+ DECL_INT_ACCESSORS(promise_all_index)
DECL_ACCESSORS(script_name, Object)
DECL_ACCESSORS(script_name_or_source_url, Object)
DECL_ACCESSORS(function_name, Object)
+ DECL_ACCESSORS(wasm_module_name, Object)
DECL_BOOLEAN_ACCESSORS(is_eval)
DECL_BOOLEAN_ACCESSORS(is_constructor)
DECL_BOOLEAN_ACCESSORS(is_wasm)
+ DECL_BOOLEAN_ACCESSORS(is_user_java_script)
+ DECL_BOOLEAN_ACCESSORS(is_toplevel)
+ DECL_BOOLEAN_ACCESSORS(is_async)
+ DECL_BOOLEAN_ACCESSORS(is_promise_all)
DECL_INT_ACCESSORS(flag)
DECL_CAST(StackFrameInfo)
@@ -43,6 +49,10 @@ class StackFrameInfo : public Struct {
static const int kIsEvalBit = 0;
static const int kIsConstructorBit = 1;
static const int kIsWasmBit = 2;
+ static const int kIsUserJavaScriptBit = 3;
+ static const int kIsToplevelBit = 4;
+ static const int kIsAsyncBit = 5;
+ static const int kIsPromiseAllBit = 6;
OBJECT_CONSTRUCTORS(StackFrameInfo, Struct);
};
@@ -72,14 +82,20 @@ class StackTraceFrame : public Struct {
static int GetLineNumber(Handle<StackTraceFrame> frame);
static int GetColumnNumber(Handle<StackTraceFrame> frame);
static int GetScriptId(Handle<StackTraceFrame> frame);
+ static int GetPromiseAllIndex(Handle<StackTraceFrame> frame);
static Handle<Object> GetFileName(Handle<StackTraceFrame> frame);
static Handle<Object> GetScriptNameOrSourceUrl(Handle<StackTraceFrame> frame);
static Handle<Object> GetFunctionName(Handle<StackTraceFrame> frame);
+ static Handle<Object> GetWasmModuleName(Handle<StackTraceFrame> frame);
static bool IsEval(Handle<StackTraceFrame> frame);
static bool IsConstructor(Handle<StackTraceFrame> frame);
static bool IsWasm(Handle<StackTraceFrame> frame);
+ static bool IsUserJavaScript(Handle<StackTraceFrame> frame);
+ static bool IsToplevel(Handle<StackTraceFrame> frame);
+ static bool IsAsync(Handle<StackTraceFrame> frame);
+ static bool IsPromiseAll(Handle<StackTraceFrame> frame);
private:
OBJECT_CONSTRUCTORS(StackTraceFrame, Struct);
diff --git a/deps/v8/src/objects/string-comparator.cc b/deps/v8/src/objects/string-comparator.cc
index b29f9c3d7b..6f517edb20 100644
--- a/deps/v8/src/objects/string-comparator.cc
+++ b/deps/v8/src/objects/string-comparator.cc
@@ -40,7 +40,7 @@ void StringComparator::State::Advance(int consumed) {
}
bool StringComparator::Equals(String string_1, String string_2) {
- int length = string_1->length();
+ int length = string_1.length();
state_1_.Init(string_1);
state_2_.Init(string_2);
while (true) {
diff --git a/deps/v8/src/objects/string-comparator.h b/deps/v8/src/objects/string-comparator.h
index 5b4354deeb..8cee98a642 100644
--- a/deps/v8/src/objects/string-comparator.h
+++ b/deps/v8/src/objects/string-comparator.h
@@ -6,9 +6,9 @@
#define V8_OBJECTS_STRING_COMPARATOR_H_
#include "src/base/logging.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/string.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/objects/string-inl.h b/deps/v8/src/objects/string-inl.h
index f3a4f5908b..0d8f83ca86 100644
--- a/deps/v8/src/objects/string-inl.h
+++ b/deps/v8/src/objects/string-inl.h
@@ -7,14 +7,14 @@
#include "src/objects/string.h"
-#include "src/conversions-inl.h"
-#include "src/handles-inl.h"
-#include "src/hash-seed-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
+#include "src/numbers/conversions-inl.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/objects/name-inl.h"
#include "src/objects/smi-inl.h"
#include "src/objects/string-table-inl.h"
-#include "src/string-hasher-inl.h"
+#include "src/strings/string-hasher-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -58,13 +58,12 @@ CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(String)
CAST_ACCESSOR(ThinString)
-StringShape::StringShape(const String str)
- : type_(str->map()->instance_type()) {
+StringShape::StringShape(const String str) : type_(str.map().instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
-StringShape::StringShape(Map map) : type_(map->instance_type()) {
+StringShape::StringShape(Map map) : type_(map.instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
@@ -149,18 +148,18 @@ STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
bool String::IsOneByteRepresentation() const {
- uint32_t type = map()->instance_type();
+ uint32_t type = map().instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
bool String::IsTwoByteRepresentation() const {
- uint32_t type = map()->instance_type();
+ uint32_t type = map().instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
bool String::IsOneByteRepresentationUnderneath(String string) {
while (true) {
- uint32_t type = string.map()->instance_type();
+ uint32_t type = string.map().instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
DCHECK(string.IsFlat());
@@ -195,30 +194,51 @@ Char FlatStringReader::Get(int index) {
}
template <typename Char>
-class SequentialStringKey : public StringTableKey {
+class SequentialStringKey final : public StringTableKey {
public:
- explicit SequentialStringKey(Vector<const Char> string, uint64_t seed)
- : StringTableKey(StringHasher::HashSequentialString<Char>(
- string.start(), string.length(), seed)),
- string_(string) {}
-
- Vector<const Char> string_;
-};
-
-class OneByteStringKey : public SequentialStringKey<uint8_t> {
- public:
- OneByteStringKey(Vector<const uint8_t> str, uint64_t seed)
- : SequentialStringKey<uint8_t>(str, seed) {}
+ SequentialStringKey(const Vector<const Char>& chars, uint64_t seed,
+ bool convert = false)
+ : SequentialStringKey(StringHasher::HashSequentialString<Char>(
+ chars.begin(), chars.length(), seed),
+ chars, convert) {}
+
+ SequentialStringKey(int hash, const Vector<const Char>& chars,
+ bool convert = false)
+ : StringTableKey(hash, chars.length()),
+ chars_(chars),
+ convert_(convert) {}
+
+ bool IsMatch(String s) override {
+ DisallowHeapAllocation no_gc;
+ if (s.IsOneByteRepresentation()) {
+ const uint8_t* chars = s.GetChars<uint8_t>(no_gc);
+ return CompareChars(chars, chars_.begin(), chars_.length()) == 0;
+ }
+ const uint16_t* chars = s.GetChars<uint16_t>(no_gc);
+ return CompareChars(chars, chars_.begin(), chars_.length()) == 0;
+ }
- bool IsMatch(Object string) override {
- return String::cast(string)->IsOneByteEqualTo(string_);
+ Handle<String> AsHandle(Isolate* isolate) override {
+ if (sizeof(Char) == 1) {
+ return isolate->factory()->NewOneByteInternalizedString(
+ Vector<const uint8_t>::cast(chars_), hash_field());
+ }
+ return isolate->factory()->NewTwoByteInternalizedString(
+ Vector<const uint16_t>::cast(chars_), hash_field());
}
- Handle<String> AsHandle(Isolate* isolate) override;
+ private:
+ Vector<const Char> chars_;
+ bool convert_;
};
-class SeqOneByteSubStringKey : public StringTableKey {
+using OneByteStringKey = SequentialStringKey<uint8_t>;
+using TwoByteStringKey = SequentialStringKey<uint16_t>;
+
+template <typename SeqString>
+class SeqSubStringKey final : public StringTableKey {
public:
+ using Char = typename SeqString::Char;
// VS 2017 on official builds gives this spurious warning:
// warning C4789: buffer 'key' of size 16 bytes will be overrun; 4 bytes will
// be written starting at offset 16
@@ -227,68 +247,69 @@ class SeqOneByteSubStringKey : public StringTableKey {
#pragma warning(push)
#pragma warning(disable : 4789)
#endif
- SeqOneByteSubStringKey(Isolate* isolate, Handle<SeqOneByteString> string,
- int from, int length)
- : StringTableKey(0), string_(string), from_(from), length_(length) {
+ SeqSubStringKey(Isolate* isolate, Handle<SeqString> string, int from, int len,
+ bool convert = false)
+ : StringTableKey(0, len),
+ string_(string),
+ from_(from),
+ convert_(convert) {
// We have to set the hash later.
DisallowHeapAllocation no_gc;
uint32_t hash = StringHasher::HashSequentialString(
- string->GetChars(no_gc) + from, length, HashSeed(isolate));
+ string->GetChars(no_gc) + from, len, HashSeed(isolate));
set_hash_field(hash);
- DCHECK_LE(0, length_);
- DCHECK_LE(from_ + length_, string_->length());
- DCHECK(string_->IsSeqOneByteString());
+ DCHECK_LE(0, length());
+ DCHECK_LE(from_ + length(), string_->length());
+ DCHECK_EQ(string_->IsSeqOneByteString(), sizeof(Char) == 1);
+ DCHECK_EQ(string_->IsSeqTwoByteString(), sizeof(Char) == 2);
}
#if defined(V8_CC_MSVC)
#pragma warning(pop)
#endif
- bool IsMatch(Object string) override;
- Handle<String> AsHandle(Isolate* isolate) override;
-
- private:
- Handle<SeqOneByteString> string_;
- int from_;
- int length_;
-};
-
-class TwoByteStringKey : public SequentialStringKey<uc16> {
- public:
- explicit TwoByteStringKey(Vector<const uc16> str, uint64_t seed)
- : SequentialStringKey<uc16>(str, seed) {}
-
- bool IsMatch(Object string) override {
- return String::cast(string)->IsTwoByteEqualTo(string_);
- }
-
- Handle<String> AsHandle(Isolate* isolate) override;
-};
-
-// Utf8StringKey carries a vector of chars as key.
-class Utf8StringKey : public StringTableKey {
- public:
- explicit Utf8StringKey(Vector<const char> string, uint64_t seed)
- : StringTableKey(StringHasher::ComputeUtf8Hash(string, seed, &chars_)),
- string_(string) {}
-
- bool IsMatch(Object string) override {
- return String::cast(string)->IsUtf8EqualTo(string_);
+ bool IsMatch(String string) override {
+ DisallowHeapAllocation no_gc;
+ if (string.IsOneByteRepresentation()) {
+ const uint8_t* data = string.GetChars<uint8_t>(no_gc);
+ return CompareChars(string_->GetChars(no_gc) + from_, data, length()) ==
+ 0;
+ }
+ const uint16_t* data = string.GetChars<uint16_t>(no_gc);
+ return CompareChars(string_->GetChars(no_gc) + from_, data, length()) == 0;
}
Handle<String> AsHandle(Isolate* isolate) override {
- return isolate->factory()->NewInternalizedStringFromUtf8(string_, chars_,
- HashField());
+ if (sizeof(Char) == 1 || (sizeof(Char) == 2 && convert_)) {
+ Handle<SeqOneByteString> result =
+ isolate->factory()->AllocateRawOneByteInternalizedString(
+ length(), hash_field());
+ DisallowHeapAllocation no_gc;
+ CopyChars(result->GetChars(no_gc), string_->GetChars(no_gc) + from_,
+ length());
+ return result;
+ }
+ Handle<SeqTwoByteString> result =
+ isolate->factory()->AllocateRawTwoByteInternalizedString(length(),
+ hash_field());
+ DisallowHeapAllocation no_gc;
+ CopyChars(result->GetChars(no_gc), string_->GetChars(no_gc) + from_,
+ length());
+ return result;
}
private:
- Vector<const char> string_;
- int chars_; // Caches the number of characters when computing the hash code.
+ Handle<typename CharTraits<Char>::String> string_;
+ int from_;
+ bool convert_;
};
+using SeqOneByteSubStringKey = SeqSubStringKey<SeqOneByteString>;
+using SeqTwoByteSubStringKey = SeqSubStringKey<SeqTwoByteString>;
+
bool String::Equals(String other) {
if (other == *this) return true;
- if (this->IsInternalizedString() && other->IsInternalizedString()) {
+ if (this->IsInternalizedString() && other.IsInternalizedString()) {
return false;
}
return SlowEquals(other);
@@ -302,6 +323,13 @@ bool String::Equals(Isolate* isolate, Handle<String> one, Handle<String> two) {
return SlowEquals(isolate, one, two);
}
+template <typename Char>
+const Char* String::GetChars(const DisallowHeapAllocation& no_gc) {
+ return StringShape(*this).IsExternal()
+ ? CharTraits<Char>::ExternalString::cast(*this).GetChars()
+ : CharTraits<Char>::String::cast(*this).GetChars(no_gc);
+}
+
Handle<String> String::Flatten(Isolate* isolate, Handle<String> string,
AllocationType allocation) {
if (string->IsConsString()) {
@@ -323,24 +351,22 @@ uint16_t String::Get(int index) {
DCHECK(index >= 0 && index < length());
switch (StringShape(*this).full_representation_tag()) {
case kSeqStringTag | kOneByteStringTag:
- return SeqOneByteString::cast(*this)->SeqOneByteStringGet(index);
+ return SeqOneByteString::cast(*this).Get(index);
case kSeqStringTag | kTwoByteStringTag:
- return SeqTwoByteString::cast(*this)->SeqTwoByteStringGet(index);
+ return SeqTwoByteString::cast(*this).Get(index);
case kConsStringTag | kOneByteStringTag:
case kConsStringTag | kTwoByteStringTag:
- return ConsString::cast(*this)->ConsStringGet(index);
+ return ConsString::cast(*this).Get(index);
case kExternalStringTag | kOneByteStringTag:
- return ExternalOneByteString::cast(*this)->ExternalOneByteStringGet(
- index);
+ return ExternalOneByteString::cast(*this).Get(index);
case kExternalStringTag | kTwoByteStringTag:
- return ExternalTwoByteString::cast(*this)->ExternalTwoByteStringGet(
- index);
+ return ExternalTwoByteString::cast(*this).Get(index);
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag:
- return SlicedString::cast(*this)->SlicedStringGet(index);
+ return SlicedString::cast(*this).Get(index);
case kThinStringTag | kOneByteStringTag:
case kThinStringTag | kTwoByteStringTag:
- return ThinString::cast(*this)->ThinStringGet(index);
+ return ThinString::cast(*this).Get(index);
default:
break;
}
@@ -353,13 +379,13 @@ void String::Set(int index, uint16_t value) {
DCHECK(StringShape(*this).IsSequential());
return this->IsOneByteRepresentation()
- ? SeqOneByteString::cast(*this)->SeqOneByteStringSet(index, value)
- : SeqTwoByteString::cast(*this)->SeqTwoByteStringSet(index, value);
+ ? SeqOneByteString::cast(*this).SeqOneByteStringSet(index, value)
+ : SeqTwoByteString::cast(*this).SeqTwoByteStringSet(index, value);
}
bool String::IsFlat() {
if (!StringShape(*this).IsCons()) return true;
- return ConsString::cast(*this)->second()->length() == 0;
+ return ConsString::cast(*this).second().length() == 0;
}
String String::GetUnderlying() {
@@ -380,40 +406,40 @@ ConsString String::VisitFlat(Visitor* visitor, String string,
const int offset) {
DisallowHeapAllocation no_gc;
int slice_offset = offset;
- const int length = string->length();
+ const int length = string.length();
DCHECK(offset <= length);
while (true) {
- int32_t type = string->map()->instance_type();
+ int32_t type = string.map().instance_type();
switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
case kSeqStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- SeqOneByteString::cast(string)->GetChars(no_gc) + slice_offset,
+ SeqOneByteString::cast(string).GetChars(no_gc) + slice_offset,
length - offset);
return ConsString();
case kSeqStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
- SeqTwoByteString::cast(string)->GetChars(no_gc) + slice_offset,
+ SeqTwoByteString::cast(string).GetChars(no_gc) + slice_offset,
length - offset);
return ConsString();
case kExternalStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- ExternalOneByteString::cast(string)->GetChars() + slice_offset,
+ ExternalOneByteString::cast(string).GetChars() + slice_offset,
length - offset);
return ConsString();
case kExternalStringTag | kTwoByteStringTag:
visitor->VisitTwoByteString(
- ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
+ ExternalTwoByteString::cast(string).GetChars() + slice_offset,
length - offset);
return ConsString();
case kSlicedStringTag | kOneByteStringTag:
case kSlicedStringTag | kTwoByteStringTag: {
SlicedString slicedString = SlicedString::cast(string);
- slice_offset += slicedString->offset();
- string = slicedString->parent();
+ slice_offset += slicedString.offset();
+ string = slicedString.parent();
continue;
}
@@ -423,7 +449,7 @@ ConsString String::VisitFlat(Visitor* visitor, String string,
case kThinStringTag | kOneByteStringTag:
case kThinStringTag | kTwoByteStringTag:
- string = ThinString::cast(string)->actual();
+ string = ThinString::cast(string).actual();
continue;
default:
@@ -455,15 +481,14 @@ uint32_t String::ToValidIndex(Object number) {
return index;
}
-uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
+uint8_t SeqOneByteString::Get(int index) {
DCHECK(index >= 0 && index < length());
- return READ_BYTE_FIELD(*this, kHeaderSize + index * kCharSize);
+ return ReadField<byte>(kHeaderSize + index * kCharSize);
}
void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
- WRITE_BYTE_FIELD(*this, kHeaderSize + index * kCharSize,
- static_cast<byte>(value));
+ WriteField<byte>(kHeaderSize + index * kCharSize, static_cast<byte>(value));
}
Address SeqOneByteString::GetCharsAddress() {
@@ -484,14 +509,14 @@ uc16* SeqTwoByteString::GetChars(const DisallowHeapAllocation& no_gc) {
return reinterpret_cast<uc16*>(FIELD_ADDR(*this, kHeaderSize));
}
-uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
+uint16_t SeqTwoByteString::Get(int index) {
DCHECK(index >= 0 && index < length());
- return READ_UINT16_FIELD(*this, kHeaderSize + index * kShortSize);
+ return ReadField<uint16_t>(kHeaderSize + index * kShortSize);
}
void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
DCHECK(index >= 0 && index < length());
- WRITE_UINT16_FIELD(*this, kHeaderSize + index * kShortSize, value);
+ WriteField<uint16_t>(kHeaderSize + index * kShortSize, value);
}
int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
@@ -508,7 +533,7 @@ String SlicedString::parent() {
void SlicedString::set_parent(Isolate* isolate, String parent,
WriteBarrierMode mode) {
- DCHECK(parent->IsSeqString() || parent->IsExternalString());
+ DCHECK(parent.IsSeqString() || parent.IsExternalString());
WRITE_FIELD(*this, kParentOffset, parent);
CONDITIONAL_WRITE_BARRIER(*this, kParentOffset, parent, mode);
}
@@ -548,53 +573,52 @@ HeapObject ThinString::unchecked_actual() const {
}
bool ExternalString::is_uncached() const {
- InstanceType type = map()->instance_type();
+ InstanceType type = map().instance_type();
return (type & kUncachedExternalStringMask) == kUncachedExternalStringTag;
}
Address ExternalString::resource_as_address() {
- return READ_UINTPTR_FIELD(*this, kResourceOffset);
+ return ReadField<Address>(kResourceOffset);
}
void ExternalString::set_address_as_resource(Address address) {
- WRITE_UINTPTR_FIELD(*this, kResourceOffset, address);
+ WriteField<Address>(kResourceOffset, address);
if (IsExternalOneByteString()) {
- ExternalOneByteString::cast(*this)->update_data_cache();
+ ExternalOneByteString::cast(*this).update_data_cache();
} else {
- ExternalTwoByteString::cast(*this)->update_data_cache();
+ ExternalTwoByteString::cast(*this).update_data_cache();
}
}
uint32_t ExternalString::resource_as_uint32() {
- return static_cast<uint32_t>(READ_UINTPTR_FIELD(*this, kResourceOffset));
+ return static_cast<uint32_t>(ReadField<Address>(kResourceOffset));
}
void ExternalString::set_uint32_as_resource(uint32_t value) {
- WRITE_UINTPTR_FIELD(*this, kResourceOffset, value);
+ WriteField<Address>(kResourceOffset, value);
if (is_uncached()) return;
- WRITE_UINTPTR_FIELD(*this, kResourceDataOffset, kNullAddress);
+ WriteField<Address>(kResourceDataOffset, kNullAddress);
}
void ExternalString::DisposeResource() {
v8::String::ExternalStringResourceBase* resource =
reinterpret_cast<v8::String::ExternalStringResourceBase*>(
- READ_UINTPTR_FIELD(*this, ExternalString::kResourceOffset));
+ ReadField<Address>(ExternalString::kResourceOffset));
// Dispose of the C++ object if it has not already been disposed.
if (resource != nullptr) {
resource->Dispose();
- WRITE_UINTPTR_FIELD(*this, ExternalString::kResourceOffset, kNullAddress);
+ WriteField<Address>(ExternalString::kResourceOffset, kNullAddress);
}
}
const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
- return reinterpret_cast<Resource*>(
- READ_UINTPTR_FIELD(*this, kResourceOffset));
+ return reinterpret_cast<Resource*>(ReadField<Address>(kResourceOffset));
}
void ExternalOneByteString::update_data_cache() {
if (is_uncached()) return;
- WRITE_UINTPTR_FIELD(*this, kResourceDataOffset,
+ WriteField<Address>(kResourceDataOffset,
reinterpret_cast<Address>(resource()->data()));
}
@@ -609,8 +633,7 @@ void ExternalOneByteString::SetResource(
void ExternalOneByteString::set_resource(
const ExternalOneByteString::Resource* resource) {
- WRITE_UINTPTR_FIELD(*this, kResourceOffset,
- reinterpret_cast<Address>(resource));
+ WriteField<Address>(kResourceOffset, reinterpret_cast<Address>(resource));
if (resource != nullptr) update_data_cache();
}
@@ -618,19 +641,18 @@ const uint8_t* ExternalOneByteString::GetChars() {
return reinterpret_cast<const uint8_t*>(resource()->data());
}
-uint16_t ExternalOneByteString::ExternalOneByteStringGet(int index) {
+uint8_t ExternalOneByteString::Get(int index) {
DCHECK(index >= 0 && index < length());
return GetChars()[index];
}
const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
- return reinterpret_cast<Resource*>(
- READ_UINTPTR_FIELD(*this, kResourceOffset));
+ return reinterpret_cast<Resource*>(ReadField<Address>(kResourceOffset));
}
void ExternalTwoByteString::update_data_cache() {
if (is_uncached()) return;
- WRITE_UINTPTR_FIELD(*this, kResourceDataOffset,
+ WriteField<Address>(kResourceDataOffset,
reinterpret_cast<Address>(resource()->data()));
}
@@ -645,14 +667,13 @@ void ExternalTwoByteString::SetResource(
void ExternalTwoByteString::set_resource(
const ExternalTwoByteString::Resource* resource) {
- WRITE_UINTPTR_FIELD(*this, kResourceOffset,
- reinterpret_cast<Address>(resource));
+ WriteField<Address>(kResourceOffset, reinterpret_cast<Address>(resource));
if (resource != nullptr) update_data_cache();
}
const uint16_t* ExternalTwoByteString::GetChars() { return resource()->data(); }
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+uint16_t ExternalTwoByteString::Get(int index) {
DCHECK(index >= 0 && index < length());
return GetChars()[index];
}
@@ -745,7 +766,7 @@ SubStringRange::SubStringRange(String string,
int length)
: string_(string),
first_(first),
- length_(length == -1 ? string->length() : length),
+ length_(length == -1 ? string.length() : length),
no_gc_(no_gc) {}
class SubStringRange::iterator final {
@@ -775,7 +796,7 @@ class SubStringRange::iterator final {
friend class String;
friend class SubStringRange;
iterator(String from, int offset, const DisallowHeapAllocation& no_gc)
- : content_(from->GetFlatContent(no_gc)), offset_(offset) {}
+ : content_(from.GetFlatContent(no_gc)), offset_(offset) {}
String::FlatContent content_;
int offset_;
};
diff --git a/deps/v8/src/objects/string-table-inl.h b/deps/v8/src/objects/string-table-inl.h
index 199f0a0f6b..1b7b7d140c 100644
--- a/deps/v8/src/objects/string-table-inl.h
+++ b/deps/v8/src/objects/string-table-inl.h
@@ -28,24 +28,39 @@ StringSet::StringSet(Address ptr) : HashTable<StringSet, StringSetShape>(ptr) {
}
bool StringSetShape::IsMatch(String key, Object value) {
- DCHECK(value->IsString());
- return key->Equals(String::cast(value));
+ DCHECK(value.IsString());
+ return key.Equals(String::cast(value));
}
uint32_t StringSetShape::Hash(Isolate* isolate, String key) {
- return key->Hash();
+ return key.Hash();
}
uint32_t StringSetShape::HashForObject(ReadOnlyRoots roots, Object object) {
- return String::cast(object)->Hash();
+ return String::cast(object).Hash();
}
-StringTableKey::StringTableKey(uint32_t hash_field)
- : HashTableKey(hash_field >> Name::kHashShift), hash_field_(hash_field) {}
+bool StringTableShape::IsMatch(Key key, Object value) {
+ String string = String::cast(value);
+ if (string.hash_field() != key->hash_field()) return false;
+ if (string.length() != key->length()) return false;
+ return key->IsMatch(string);
+}
+
+StringTableKey::StringTableKey(uint32_t hash_field, int length)
+ : hash_field_(hash_field), length_(length) {}
void StringTableKey::set_hash_field(uint32_t hash_field) {
hash_field_ = hash_field;
- set_hash(hash_field >> Name::kHashShift);
+}
+
+uint32_t StringTableKey::hash() const {
+ return hash_field_ >> Name::kHashShift;
+}
+
+// static
+uint32_t StringTableShape::Hash(Isolate* isolate, Key key) {
+ return key->hash();
}
Handle<Object> StringTableShape::AsHandle(Isolate* isolate,
@@ -54,7 +69,7 @@ Handle<Object> StringTableShape::AsHandle(Isolate* isolate,
}
uint32_t StringTableShape::HashForObject(ReadOnlyRoots roots, Object object) {
- return String::cast(object)->Hash();
+ return String::cast(object).Hash();
}
RootIndex StringTableShape::GetMapRootIndex() {
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index e71a3a1341..6279137b1f 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -6,7 +6,7 @@
#define V8_OBJECTS_STRING_TABLE_H_
#include "src/objects/hash-table.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -14,30 +14,34 @@
namespace v8 {
namespace internal {
-class StringTableKey : public HashTableKey {
+class StringTableKey {
public:
- explicit inline StringTableKey(uint32_t hash_field);
+ virtual ~StringTableKey() {}
+ inline StringTableKey(uint32_t hash_field, int length);
virtual Handle<String> AsHandle(Isolate* isolate) = 0;
- uint32_t HashField() const {
+ uint32_t hash_field() const {
DCHECK_NE(0, hash_field_);
return hash_field_;
}
+ virtual bool IsMatch(String string) = 0;
+ inline uint32_t hash() const;
+ int length() const { return length_; }
+
protected:
inline void set_hash_field(uint32_t hash_field);
private:
uint32_t hash_field_ = 0;
+ int length_;
};
class StringTableShape : public BaseShape<StringTableKey*> {
public:
- static inline bool IsMatch(Key key, Object value) {
- return key->IsMatch(value);
- }
+ static inline bool IsMatch(Key key, Object value);
- static inline uint32_t Hash(Isolate* isolate, Key key) { return key->Hash(); }
+ static inline uint32_t Hash(Isolate* isolate, Key key);
static inline uint32_t HashForObject(ReadOnlyRoots roots, Object object);
@@ -61,20 +65,15 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
// added. The return value is the string found.
V8_EXPORT_PRIVATE static Handle<String> LookupString(Isolate* isolate,
Handle<String> key);
+ template <typename StringTableKey>
static Handle<String> LookupKey(Isolate* isolate, StringTableKey* key);
static Handle<String> AddKeyNoResize(Isolate* isolate, StringTableKey* key);
- static String ForwardStringIfExists(Isolate* isolate, StringTableKey* key,
- String string);
// Shink the StringTable if it's very empty (kMaxEmptyFactor) to avoid the
// performance overhead of re-allocating the StringTable over and over again.
static Handle<StringTable> CautiousShrink(Isolate* isolate,
Handle<StringTable> table);
- // Looks up a string that is equal to the given string and returns
- // string handle if it is found, or an empty handle otherwise.
- V8_WARN_UNUSED_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists(
- Isolate* isolate, uint16_t c1, uint16_t c2);
// {raw_string} must be a tagged String pointer.
// Returns a tagged pointer: either an internalized string, or a Smi
// sentinel.
@@ -90,7 +89,7 @@ class StringTable : public HashTable<StringTable, StringTableShape> {
static const int kMinShrinkCapacity = kMinCapacity;
private:
- template <bool seq_one_byte>
+ template <typename char_type>
friend class JsonParser;
OBJECT_CONSTRUCTORS(StringTable, HashTable<StringTable, StringTableShape>);
diff --git a/deps/v8/src/objects/string.cc b/deps/v8/src/objects/string.cc
index 22157a3500..cc513f88cb 100644
--- a/deps/v8/src/objects/string.cc
+++ b/deps/v8/src/objects/string.cc
@@ -4,34 +4,35 @@
#include "src/objects/string.h"
-#include "src/char-predicates.h"
-#include "src/conversions.h"
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap-inl.h" // For LooksValid implementation.
+#include "src/heap/read-only-heap.h"
+#include "src/numbers/conversions.h"
#include "src/objects/map.h"
#include "src/objects/oddball.h"
#include "src/objects/string-comparator.h"
#include "src/objects/string-inl.h"
-#include "src/ostreams.h"
-#include "src/string-builder-inl.h"
-#include "src/string-hasher.h"
-#include "src/string-search.h"
-#include "src/string-stream.h"
-#include "src/unicode-inl.h"
+#include "src/strings/char-predicates.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-hasher.h"
+#include "src/strings/string-search.h"
+#include "src/strings/string-stream.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
AllocationType allocation) {
- DCHECK_NE(cons->second()->length(), 0);
+ DCHECK_NE(cons->second().length(), 0);
// TurboFan can create cons strings with empty first parts.
- while (cons->first()->length() == 0) {
+ while (cons->first().length() == 0) {
// We do not want to call this function recursively. Therefore we call
// String::Flatten only in those cases where String::SlowFlatten is not
// called again.
- if (cons->second()->IsConsString() && !cons->second()->IsFlat()) {
+ if (cons->second().IsConsString() && !cons->second().IsFlat()) {
cons = handle(ConsString::cast(cons->second()), isolate);
} else {
return String::Flatten(isolate, handle(cons->second(), isolate));
@@ -66,6 +67,66 @@ Handle<String> String::SlowFlatten(Isolate* isolate, Handle<ConsString> cons,
return result;
}
+namespace {
+
+template <class StringClass>
+void MigrateExternalStringResource(Isolate* isolate, String from, String to) {
+ StringClass cast_from = StringClass::cast(from);
+ StringClass cast_to = StringClass::cast(to);
+ const typename StringClass::Resource* to_resource = cast_to.resource();
+ if (to_resource == nullptr) {
+ // |to| is a just-created internalized copy of |from|. Migrate the resource.
+ cast_to.SetResource(isolate, cast_from.resource());
+ // Zap |from|'s resource pointer to reflect the fact that |from| has
+ // relinquished ownership of its resource.
+ isolate->heap()->UpdateExternalString(
+ from, ExternalString::cast(from).ExternalPayloadSize(), 0);
+ cast_from.SetResource(isolate, nullptr);
+ } else if (to_resource != cast_from.resource()) {
+ // |to| already existed and has its own resource. Finalize |from|.
+ isolate->heap()->FinalizeExternalString(from);
+ }
+}
+
+} // namespace
+
+void String::MakeThin(Isolate* isolate, String internalized) {
+ DisallowHeapAllocation no_gc;
+ DCHECK_NE(*this, internalized);
+ DCHECK(internalized.IsInternalizedString());
+
+ if (this->IsExternalString()) {
+ if (internalized.IsExternalOneByteString()) {
+ MigrateExternalStringResource<ExternalOneByteString>(isolate, *this,
+ internalized);
+ } else if (internalized.IsExternalTwoByteString()) {
+ MigrateExternalStringResource<ExternalTwoByteString>(isolate, *this,
+ internalized);
+ } else {
+ // If the external string is duped into an existing non-external
+ // internalized string, free its resource (it's about to be rewritten
+ // into a ThinString below).
+ isolate->heap()->FinalizeExternalString(*this);
+ }
+ }
+
+ int old_size = this->Size();
+ isolate->heap()->NotifyObjectLayoutChange(*this, old_size, no_gc);
+ bool one_byte = internalized.IsOneByteRepresentation();
+ Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
+ : isolate->factory()->thin_string_map();
+ DCHECK_GE(old_size, ThinString::kSize);
+ this->synchronized_set_map(*map);
+ ThinString thin = ThinString::cast(*this);
+ thin.set_actual(internalized);
+ Address thin_end = thin.address() + ThinString::kSize;
+ int size_delta = old_size - ThinString::kSize;
+ if (size_delta != 0) {
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(thin_end, size_delta, ClearRecordedSlots::kNo);
+ }
+}
+
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
DisallowHeapAllocation no_allocation;
// Externalizing twice leaks the external resource, so it's
@@ -77,8 +138,8 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Assert that the resource and the string are equivalent.
DCHECK(static_cast<size_t>(this->length()) == resource->length());
ScopedVector<uc16> smart_chars(this->length());
- String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
- DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
+ String::WriteToFlat(*this, smart_chars.begin(), 0, this->length());
+ DCHECK_EQ(0, memcmp(smart_chars.begin(), resource->data(),
resource->length() * sizeof(smart_chars[0])));
}
#endif // DEBUG
@@ -103,7 +164,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// strings in generated code, we need to bailout to runtime.
Map new_map;
ReadOnlyRoots roots(heap);
- if (size < ExternalString::kSize) {
+ if (size < ExternalString::kSizeOfAllExternalStrings) {
if (is_internalized) {
new_map = roots.uncached_external_internalized_string_map();
} else {
@@ -127,9 +188,9 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
- self->SetResource(isolate, resource);
+ self.SetResource(isolate, resource);
heap->RegisterExternalString(*this);
- if (is_internalized) self->Hash(); // Force regeneration of the hash value.
+ if (is_internalized) self.Hash(); // Force regeneration of the hash value.
return true;
}
@@ -145,12 +206,12 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
DCHECK(static_cast<size_t>(this->length()) == resource->length());
if (this->IsTwoByteRepresentation()) {
ScopedVector<uint16_t> smart_chars(this->length());
- String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
- DCHECK(String::IsOneByte(smart_chars.start(), this->length()));
+ String::WriteToFlat(*this, smart_chars.begin(), 0, this->length());
+ DCHECK(String::IsOneByte(smart_chars.begin(), this->length()));
}
ScopedVector<char> smart_chars(this->length());
- String::WriteToFlat(*this, smart_chars.start(), 0, this->length());
- DCHECK_EQ(0, memcmp(smart_chars.start(), resource->data(),
+ String::WriteToFlat(*this, smart_chars.begin(), 0, this->length());
+ DCHECK_EQ(0, memcmp(smart_chars.begin(), resource->data(),
resource->length() * sizeof(smart_chars[0])));
}
#endif // DEBUG
@@ -177,7 +238,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// strings in generated code, we need to bailout to runtime.
Map new_map;
ReadOnlyRoots roots(heap);
- if (size < ExternalString::kSize) {
+ if (size < ExternalString::kSizeOfAllExternalStrings) {
new_map = is_internalized
? roots.uncached_external_one_byte_internalized_string_map()
: roots.uncached_external_one_byte_string_map();
@@ -200,15 +261,15 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
this->synchronized_set_map(new_map);
ExternalOneByteString self = ExternalOneByteString::cast(*this);
- self->SetResource(isolate, resource);
+ self.SetResource(isolate, resource);
heap->RegisterExternalString(*this);
- if (is_internalized) self->Hash(); // Force regeneration of the hash value.
+ if (is_internalized) self.Hash(); // Force regeneration of the hash value.
return true;
}
bool String::SupportsExternalization() {
if (this->IsThinString()) {
- return i::ThinString::cast(*this)->actual()->SupportsExternalization();
+ return i::ThinString::cast(*this).actual().SupportsExternalization();
}
Isolate* isolate;
@@ -295,7 +356,6 @@ void String::StringShortPrint(StringStream* accumulator, bool show_details) {
}
if (show_details) accumulator->Put('>');
}
- return;
}
void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
@@ -337,7 +397,7 @@ bool String::LooksValid() {
// basically the same logic as the way we access the heap in the first place.
MemoryChunk* chunk = MemoryChunk::FromHeapObject(*this);
// RO_SPACE objects should always be valid.
- if (chunk->owner()->identity() == RO_SPACE) return true;
+ if (ReadOnlyHeap::Contains(*this)) return true;
if (chunk->heap() == nullptr) return false;
return chunk->heap()->Contains(*this);
}
@@ -435,22 +495,22 @@ String::FlatContent String::GetFlatContent(
int offset = 0;
if (shape.representation_tag() == kConsStringTag) {
ConsString cons = ConsString::cast(string);
- if (cons->second()->length() != 0) {
+ if (cons.second().length() != 0) {
return FlatContent();
}
- string = cons->first();
+ string = cons.first();
shape = StringShape(string);
} else if (shape.representation_tag() == kSlicedStringTag) {
SlicedString slice = SlicedString::cast(string);
- offset = slice->offset();
- string = slice->parent();
+ offset = slice.offset();
+ string = slice.parent();
shape = StringShape(string);
DCHECK(shape.representation_tag() != kConsStringTag &&
shape.representation_tag() != kSlicedStringTag);
}
if (shape.representation_tag() == kThinStringTag) {
ThinString thin = ThinString::cast(string);
- string = thin->actual();
+ string = thin.actual();
shape = StringShape(string);
DCHECK(!shape.IsCons());
DCHECK(!shape.IsSliced());
@@ -458,18 +518,18 @@ String::FlatContent String::GetFlatContent(
if (shape.encoding_tag() == kOneByteStringTag) {
const uint8_t* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqOneByteString::cast(string)->GetChars(no_gc);
+ start = SeqOneByteString::cast(string).GetChars(no_gc);
} else {
- start = ExternalOneByteString::cast(string)->GetChars();
+ start = ExternalOneByteString::cast(string).GetChars();
}
return FlatContent(start + offset, length);
} else {
DCHECK_EQ(shape.encoding_tag(), kTwoByteStringTag);
const uc16* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqTwoByteString::cast(string)->GetChars(no_gc);
+ start = SeqTwoByteString::cast(string).GetChars(no_gc);
} else {
- start = ExternalTwoByteString::cast(string)->GetChars();
+ start = ExternalTwoByteString::cast(string).GetChars();
}
return FlatContent(start + offset, length);
}
@@ -533,38 +593,40 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
int from = f;
int to = t;
while (true) {
- DCHECK(0 <= from && from <= to && to <= source->length());
+ DCHECK_LE(0, from);
+ DCHECK_LE(from, to);
+ DCHECK_LE(to, source.length());
switch (StringShape(source).full_representation_tag()) {
case kOneByteStringTag | kExternalStringTag: {
- CopyChars(sink, ExternalOneByteString::cast(source)->GetChars() + from,
+ CopyChars(sink, ExternalOneByteString::cast(source).GetChars() + from,
to - from);
return;
}
case kTwoByteStringTag | kExternalStringTag: {
- const uc16* data = ExternalTwoByteString::cast(source)->GetChars();
+ const uc16* data = ExternalTwoByteString::cast(source).GetChars();
CopyChars(sink, data + from, to - from);
return;
}
case kOneByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqOneByteString::cast(source)->GetChars(no_gc) + from,
+ CopyChars(sink, SeqOneByteString::cast(source).GetChars(no_gc) + from,
to - from);
return;
}
case kTwoByteStringTag | kSeqStringTag: {
- CopyChars(sink, SeqTwoByteString::cast(source)->GetChars(no_gc) + from,
+ CopyChars(sink, SeqTwoByteString::cast(source).GetChars(no_gc) + from,
to - from);
return;
}
case kOneByteStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString cons_string = ConsString::cast(source);
- String first = cons_string->first();
- int boundary = first->length();
+ String first = cons_string.first();
+ int boundary = first.length();
if (to - boundary >= boundary - from) {
// Right hand side is longer. Recurse over left.
if (from < boundary) {
WriteToFlat(first, sink, from, boundary);
- if (from == 0 && cons_string->second() == first) {
+ if (from == 0 && cons_string.second() == first) {
CopyChars(sink + boundary, sink, boundary);
return;
}
@@ -574,19 +636,19 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
from -= boundary;
}
to -= boundary;
- source = cons_string->second();
+ source = cons_string.second();
} else {
// Left hand side is longer. Recurse over right.
if (to > boundary) {
- String second = cons_string->second();
+ String second = cons_string.second();
// When repeatedly appending to a string, we get a cons string that
// is unbalanced to the left, a list, essentially. We inline the
// common case of sequential one-byte right child.
if (to - boundary == 1) {
- sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
- } else if (second->IsSeqOneByteString()) {
+ sink[boundary - from] = static_cast<sinkchar>(second.Get(0));
+ } else if (second.IsSeqOneByteString()) {
CopyChars(sink + boundary - from,
- SeqOneByteString::cast(second)->GetChars(no_gc),
+ SeqOneByteString::cast(second).GetChars(no_gc),
to - boundary);
} else {
WriteToFlat(second, sink + boundary - from, 0, to - boundary);
@@ -600,13 +662,13 @@ void String::WriteToFlat(String src, sinkchar* sink, int f, int t) {
case kOneByteStringTag | kSlicedStringTag:
case kTwoByteStringTag | kSlicedStringTag: {
SlicedString slice = SlicedString::cast(source);
- unsigned offset = slice->offset();
- WriteToFlat(slice->parent(), sink, from + offset, to + offset);
+ unsigned offset = slice.offset();
+ WriteToFlat(slice.parent(), sink, from + offset, to + offset);
return;
}
case kOneByteStringTag | kThinStringTag:
case kTwoByteStringTag | kThinStringTag:
- source = ThinString::cast(source)->actual();
+ source = ThinString::cast(source).actual();
break;
}
}
@@ -667,15 +729,15 @@ bool String::SlowEquals(String other) {
DisallowHeapAllocation no_gc;
// Fast check: negative check with lengths.
int len = length();
- if (len != other->length()) return false;
+ if (len != other.length()) return false;
if (len == 0) return true;
// Fast check: if at least one ThinString is involved, dereference it/them
// and restart.
- if (this->IsThinString() || other->IsThinString()) {
- if (other->IsThinString()) other = ThinString::cast(other)->actual();
+ if (this->IsThinString() || other.IsThinString()) {
+ if (other.IsThinString()) other = ThinString::cast(other).actual();
if (this->IsThinString()) {
- return ThinString::cast(*this)->actual()->Equals(other);
+ return ThinString::cast(*this).actual().Equals(other);
} else {
return this->Equals(other);
}
@@ -683,13 +745,13 @@ bool String::SlowEquals(String other) {
// Fast check: if hash code is computed for both strings
// a fast negative check can be performed.
- if (HasHashCode() && other->HasHashCode()) {
+ if (HasHashCode() && other.HasHashCode()) {
#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
- if (Hash() != other->Hash()) {
+ if (Hash() != other.Hash()) {
bool found_difference = false;
for (int i = 0; i < len; i++) {
- if (Get(i) != other->Get(i)) {
+ if (Get(i) != other.Get(i)) {
found_difference = true;
break;
}
@@ -698,16 +760,16 @@ bool String::SlowEquals(String other) {
}
}
#endif
- if (Hash() != other->Hash()) return false;
+ if (Hash() != other.Hash()) return false;
}
// We know the strings are both non-empty. Compare the first chars
// before we try to flatten the strings.
- if (this->Get(0) != other->Get(0)) return false;
+ if (this->Get(0) != other.Get(0)) return false;
- if (IsSeqOneByteString() && other->IsSeqOneByteString()) {
- const uint8_t* str1 = SeqOneByteString::cast(*this)->GetChars(no_gc);
- const uint8_t* str2 = SeqOneByteString::cast(other)->GetChars(no_gc);
+ if (IsSeqOneByteString() && other.IsSeqOneByteString()) {
+ const uint8_t* str1 = SeqOneByteString::cast(*this).GetChars(no_gc);
+ const uint8_t* str2 = SeqOneByteString::cast(other).GetChars(no_gc);
return CompareRawStringContents(str1, str2, len);
}
@@ -726,9 +788,9 @@ bool String::SlowEquals(Isolate* isolate, Handle<String> one,
// and restart.
if (one->IsThinString() || two->IsThinString()) {
if (one->IsThinString())
- one = handle(ThinString::cast(*one)->actual(), isolate);
+ one = handle(ThinString::cast(*one).actual(), isolate);
if (two->IsThinString())
- two = handle(ThinString::cast(*two)->actual(), isolate);
+ two = handle(ThinString::cast(*two).actual(), isolate);
return String::Equals(isolate, one, two);
}
@@ -764,8 +826,8 @@ bool String::SlowEquals(Isolate* isolate, Handle<String> one,
String::FlatContent flat2 = two->GetFlatContent(no_gc);
if (flat1.IsOneByte() && flat2.IsOneByte()) {
- return CompareRawStringContents(flat1.ToOneByteVector().start(),
- flat2.ToOneByteVector().start(),
+ return CompareRawStringContents(flat1.ToOneByteVector().begin(),
+ flat2.ToOneByteVector().begin(),
one_length);
} else {
for (int i = 0; i < one_length; i++) {
@@ -815,19 +877,19 @@ ComparisonResult String::Compare(Isolate* isolate, Handle<String> x,
Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
if (y_content.IsOneByte()) {
Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ r = CompareChars(x_chars.begin(), y_chars.begin(), prefix_length);
} else {
Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ r = CompareChars(x_chars.begin(), y_chars.begin(), prefix_length);
}
} else {
Vector<const uc16> x_chars = x_content.ToUC16Vector();
if (y_content.IsOneByte()) {
Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ r = CompareChars(x_chars.begin(), y_chars.begin(), prefix_length);
} else {
Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ r = CompareChars(x_chars.begin(), y_chars.begin(), prefix_length);
}
}
if (r < 0) {
@@ -1180,26 +1242,6 @@ Object String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
return Smi::FromInt(last_index);
}
-bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
- int slen = length();
- // Can't check exact length equality, but we can check bounds.
- int str_len = str.length();
- if (!allow_prefix_match &&
- (str_len < slen ||
- str_len > slen * static_cast<int>(unibrow::Utf8::kMaxEncodedSize))) {
- return false;
- }
-
- int i = 0;
- unibrow::Utf8Iterator it = unibrow::Utf8Iterator(str);
- while (i < slen && !it.Done()) {
- if (Get(i++) != *it) return false;
- ++it;
- }
-
- return (allow_prefix_match || i == slen) && it.Done();
-}
-
template <>
bool String::IsEqualTo(Vector<const uint8_t> str) {
return IsOneByteEqualTo(str);
@@ -1210,16 +1252,28 @@ bool String::IsEqualTo(Vector<const uc16> str) {
return IsTwoByteEqualTo(str);
}
+bool String::HasOneBytePrefix(Vector<const char> str) {
+ int slen = str.length();
+ if (slen > length()) return false;
+ DisallowHeapAllocation no_gc;
+ FlatContent content = GetFlatContent(no_gc);
+ if (content.IsOneByte()) {
+ return CompareChars(content.ToOneByteVector().begin(), str.begin(), slen) ==
+ 0;
+ }
+ return CompareChars(content.ToUC16Vector().begin(), str.begin(), slen) == 0;
+}
+
bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
int slen = length();
if (str.length() != slen) return false;
DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent(no_gc);
if (content.IsOneByte()) {
- return CompareChars(content.ToOneByteVector().start(), str.start(), slen) ==
+ return CompareChars(content.ToOneByteVector().begin(), str.begin(), slen) ==
0;
}
- return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
+ return CompareChars(content.ToUC16Vector().begin(), str.begin(), slen) == 0;
}
bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
@@ -1228,20 +1282,67 @@ bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
DisallowHeapAllocation no_gc;
FlatContent content = GetFlatContent(no_gc);
if (content.IsOneByte()) {
- return CompareChars(content.ToOneByteVector().start(), str.start(), slen) ==
+ return CompareChars(content.ToOneByteVector().begin(), str.begin(), slen) ==
0;
}
- return CompareChars(content.ToUC16Vector().start(), str.start(), slen) == 0;
+ return CompareChars(content.ToUC16Vector().begin(), str.begin(), slen) == 0;
+}
+
+namespace {
+
+template <typename Char>
+uint32_t HashString(String string, size_t start, int length, uint64_t seed) {
+ DisallowHeapAllocation no_gc;
+
+ if (length > String::kMaxHashCalcLength) {
+ return StringHasher::GetTrivialHash(length);
+ }
+
+ std::unique_ptr<Char[]> buffer;
+ const Char* chars;
+
+ if (string.IsConsString()) {
+ DCHECK_EQ(0, start);
+ DCHECK(!string.IsFlat());
+ buffer.reset(new Char[length]);
+ String::WriteToFlat(string, buffer.get(), 0, length);
+ chars = buffer.get();
+ } else {
+ chars = string.GetChars<Char>(no_gc) + start;
+ }
+
+ return StringHasher::HashSequentialString<Char>(chars, length, seed);
}
+} // namespace
+
uint32_t String::ComputeAndSetHash() {
DisallowHeapAllocation no_gc;
// Should only be called if hash code has not yet been computed.
DCHECK(!HasHashCode());
// Store the hash code in the object.
- uint32_t field =
- IteratingStringHasher::Hash(*this, HashSeed(GetReadOnlyRoots()));
+ uint64_t seed = HashSeed(GetReadOnlyRoots());
+ size_t start = 0;
+ String string = *this;
+ if (string.IsSlicedString()) {
+ SlicedString sliced = SlicedString::cast(string);
+ start = sliced.offset();
+ string = sliced.parent();
+ }
+ if (string.IsConsString() && string.IsFlat()) {
+ string = ConsString::cast(string).first();
+ }
+ if (string.IsThinString()) {
+ string = ThinString::cast(string).actual();
+ if (length() == string.length()) {
+ set_hash_field(string.hash_field());
+ return hash_field() >> kHashShift;
+ }
+ }
+ uint32_t field = string.IsOneByteRepresentation()
+ ? HashString<uint8_t>(string, start, length(), seed)
+ : HashString<uint16_t>(string, start, length(), seed);
set_hash_field(field);
// Check the hash code is there.
@@ -1325,13 +1426,13 @@ void SeqTwoByteString::clear_padding() {
SizeFor(length()) - data_size);
}
-uint16_t ConsString::ConsStringGet(int index) {
+uint16_t ConsString::Get(int index) {
DCHECK(index >= 0 && index < this->length());
// Check for a flattened cons string
- if (second()->length() == 0) {
+ if (second().length() == 0) {
String left = first();
- return left->Get(index);
+ return left.Get(index);
}
String string = String::cast(*this);
@@ -1339,26 +1440,24 @@ uint16_t ConsString::ConsStringGet(int index) {
while (true) {
if (StringShape(string).IsCons()) {
ConsString cons_string = ConsString::cast(string);
- String left = cons_string->first();
- if (left->length() > index) {
+ String left = cons_string.first();
+ if (left.length() > index) {
string = left;
} else {
- index -= left->length();
- string = cons_string->second();
+ index -= left.length();
+ string = cons_string.second();
}
} else {
- return string->Get(index);
+ return string.Get(index);
}
}
UNREACHABLE();
}
-uint16_t ThinString::ThinStringGet(int index) { return actual()->Get(index); }
+uint16_t ThinString::Get(int index) { return actual().Get(index); }
-uint16_t SlicedString::SlicedStringGet(int index) {
- return parent()->Get(offset() + index);
-}
+uint16_t SlicedString::Get(int index) { return parent().Get(offset() + index); }
int ExternalString::ExternalPayloadSize() const {
int length_multiplier = IsTwoByteRepresentation() ? i::kShortSize : kCharSize;
@@ -1375,7 +1474,7 @@ FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
str_(nullptr),
is_one_byte_(true),
length_(input.length()),
- start_(input.start()) {}
+ start_(input.begin()) {}
void FlatStringReader::PostGarbageCollection() {
if (str_ == nullptr) return;
@@ -1387,9 +1486,9 @@ void FlatStringReader::PostGarbageCollection() {
DCHECK(content.IsFlat());
is_one_byte_ = content.IsOneByte();
if (is_one_byte_) {
- start_ = content.ToOneByteVector().start();
+ start_ = content.ToOneByteVector().begin();
} else {
- start_ = content.ToUC16Vector().start();
+ start_ = content.ToUC16Vector().begin();
}
}
@@ -1430,13 +1529,13 @@ String ConsStringIterator::Search(int* offset_out) {
int offset = 0;
while (true) {
// Loop until the string is found which contains the target offset.
- String string = cons_string->first();
- int length = string->length();
+ String string = cons_string.first();
+ int length = string.length();
int32_t type;
if (consumed < offset + length) {
// Target offset is in the left branch.
// Keep going if we're still in a ConString.
- type = string->map()->instance_type();
+ type = string.map().instance_type();
if ((type & kStringRepresentationMask) == kConsStringTag) {
cons_string = ConsString::cast(string);
PushLeft(cons_string);
@@ -1449,15 +1548,15 @@ String ConsStringIterator::Search(int* offset_out) {
// Update progress through the string.
offset += length;
// Keep going if we're still in a ConString.
- string = cons_string->second();
- type = string->map()->instance_type();
+ string = cons_string.second();
+ type = string.map().instance_type();
if ((type & kStringRepresentationMask) == kConsStringTag) {
cons_string = ConsString::cast(string);
PushRight(cons_string);
continue;
}
// Need this to be updated for the current string.
- length = string->length();
+ length = string.length();
// Account for the possibility of an empty right leaf.
// This happens only if we have asked for an offset outside the string.
if (length == 0) {
@@ -1493,12 +1592,12 @@ String ConsStringIterator::NextLeaf(bool* blew_stack) {
}
// Go right.
ConsString cons_string = frames_[OffsetForDepth(depth_ - 1)];
- String string = cons_string->second();
- int32_t type = string->map()->instance_type();
+ String string = cons_string.second();
+ int32_t type = string.map().instance_type();
if ((type & kStringRepresentationMask) != kConsStringTag) {
// Pop stack so next iteration is in correct place.
Pop();
- int length = string->length();
+ int length = string.length();
// Could be a flattened ConsString.
if (length == 0) continue;
consumed_ += length;
@@ -1509,11 +1608,11 @@ String ConsStringIterator::NextLeaf(bool* blew_stack) {
// Need to traverse all the way left.
while (true) {
// Continue left.
- string = cons_string->first();
- type = string->map()->instance_type();
+ string = cons_string.first();
+ type = string.map().instance_type();
if ((type & kStringRepresentationMask) != kConsStringTag) {
AdjustMaximumDepth();
- int length = string->length();
+ int length = string.length();
if (length == 0) break; // Skip empty left-hand sides of ConsStrings.
consumed_ += length;
return string;
diff --git a/deps/v8/src/objects/string.h b/deps/v8/src/objects/string.h
index 7c6616a6f6..74fc8fa763 100644
--- a/deps/v8/src/objects/string.h
+++ b/deps/v8/src/objects/string.h
@@ -10,7 +10,7 @@
#include "src/objects/instance-type.h"
#include "src/objects/name.h"
#include "src/objects/smi.h"
-#include "src/unicode-decoder.h"
+#include "src/strings/unicode-decoder.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -142,10 +142,16 @@ class String : public Name {
friend class IterableSubString;
};
+ void MakeThin(Isolate* isolate, String canonical);
+
template <typename Char>
V8_INLINE Vector<const Char> GetCharVector(
const DisallowHeapAllocation& no_gc);
+ // Get chars from sequential or external strings.
+ template <typename Char>
+ inline const Char* GetChars(const DisallowHeapAllocation& no_gc);
+
// Get and set the length of the string.
inline int length() const;
inline void set_length(int value);
@@ -268,14 +274,16 @@ class String : public Name {
inline bool Equals(String other);
inline static bool Equals(Isolate* isolate, Handle<String> one,
Handle<String> two);
- V8_EXPORT_PRIVATE bool IsUtf8EqualTo(Vector<const char> str,
- bool allow_prefix_match = false);
// Dispatches to Is{One,Two}ByteEqualTo.
template <typename Char>
bool IsEqualTo(Vector<const Char> str);
+ V8_EXPORT_PRIVATE bool HasOneBytePrefix(Vector<const char> str);
V8_EXPORT_PRIVATE bool IsOneByteEqualTo(Vector<const uint8_t> str);
+ V8_EXPORT_PRIVATE bool IsOneByteEqualTo(Vector<const char> str) {
+ return IsOneByteEqualTo(Vector<const uint8_t>::cast(str));
+ }
bool IsTwoByteEqualTo(Vector<const uc16> str);
// Return a UTF8 representation of the string. The string is null
@@ -333,8 +341,6 @@ class String : public Name {
DEFINE_FIELD_OFFSET_CONSTANTS(Name::kHeaderSize,
TORQUE_GENERATED_STRING_FIELDS)
- static const int kHeaderSize = kSize;
-
// Max char codes.
static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
@@ -366,35 +372,46 @@ class String : public Name {
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
static void WriteToFlat(String source, sinkchar* sink, int from, int to);
- // The return value may point to the first aligned word containing the first
- // non-one-byte character, rather than directly to the non-one-byte character.
- // If the return value is >= the passed length, the entire string was
- // one-byte.
- static inline int NonAsciiStart(const char* chars, int length) {
- const char* start = chars;
- const char* limit = chars + length;
-
- if (length >= kIntptrSize) {
- // Check unaligned bytes.
- while (!IsAligned(reinterpret_cast<intptr_t>(chars), sizeof(uintptr_t))) {
- if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
+ static inline bool IsAscii(const char* chars, int length) {
+ return IsAscii(reinterpret_cast<const uint8_t*>(chars), length);
+ }
+
+ static inline bool IsAscii(const uint8_t* chars, int length) {
+ return NonAsciiStart(chars, length) >= length;
+ }
+
+ static inline int NonOneByteStart(const uc16* chars, int length) {
+ DCHECK(IsAligned(reinterpret_cast<Address>(chars), sizeof(uc16)));
+ const uint16_t* start = chars;
+ const uint16_t* limit = chars + length;
+
+ if (static_cast<size_t>(length) >= kUIntptrSize) {
+ // Check unaligned chars.
+ while (!IsAligned(reinterpret_cast<Address>(chars), kUIntptrSize)) {
+ if (*chars > unibrow::Latin1::kMaxChar) {
return static_cast<int>(chars - start);
}
++chars;
}
+
// Check aligned words.
- DCHECK_EQ(unibrow::Utf8::kMaxOneByteChar, 0x7F);
- const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
+ STATIC_ASSERT(unibrow::Latin1::kMaxChar == 0xFF);
+#ifdef V8_TARGET_LITTLE_ENDIAN
+ const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFFFF * 0xFF00;
+#else
+ const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFFFF * 0x00FF;
+#endif
while (chars + sizeof(uintptr_t) <= limit) {
if (*reinterpret_cast<const uintptr_t*>(chars) & non_one_byte_mask) {
- return static_cast<int>(chars - start);
+ break;
}
- chars += sizeof(uintptr_t);
+ chars += (sizeof(uintptr_t) / sizeof(uc16));
}
}
- // Check remaining unaligned bytes.
+
+ // Check remaining unaligned chars, or find non-one-byte char in word.
while (chars < limit) {
- if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
+ if (*chars > unibrow::Latin1::kMaxChar) {
return static_cast<int>(chars - start);
}
++chars;
@@ -403,25 +420,6 @@ class String : public Name {
return static_cast<int>(chars - start);
}
- static inline bool IsAscii(const char* chars, int length) {
- return NonAsciiStart(chars, length) >= length;
- }
-
- static inline bool IsAscii(const uint8_t* chars, int length) {
- return NonAsciiStart(reinterpret_cast<const char*>(chars), length) >=
- length;
- }
-
- static inline int NonOneByteStart(const uc16* chars, int length) {
- const uc16* limit = chars + length;
- const uc16* start = chars;
- while (chars < limit) {
- if (*chars > kMaxOneByteCharCodeU) return static_cast<int>(chars - start);
- ++chars;
- }
- return static_cast<int>(chars - start);
- }
-
static inline bool IsOneByte(const uc16* chars, int length) {
return NonOneByteStart(chars, length) >= length;
}
@@ -505,9 +503,10 @@ class InternalizedString : public String {
class SeqOneByteString : public SeqString {
public:
static const bool kHasOneByteEncoding = true;
+ using Char = uint8_t;
// Dispatched behavior.
- inline uint16_t SeqOneByteStringGet(int index);
+ inline uint8_t Get(int index);
inline void SeqOneByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
@@ -546,9 +545,10 @@ class SeqOneByteString : public SeqString {
class SeqTwoByteString : public SeqString {
public:
static const bool kHasOneByteEncoding = false;
+ using Char = uint16_t;
// Dispatched behavior.
- inline uint16_t SeqTwoByteStringGet(int index);
+ inline uint16_t Get(int index);
inline void SeqTwoByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
@@ -610,7 +610,7 @@ class ConsString : public String {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
- V8_EXPORT_PRIVATE uint16_t ConsStringGet(int index);
+ V8_EXPORT_PRIVATE uint16_t Get(int index);
DECL_CAST(ConsString)
@@ -642,7 +642,7 @@ class ThinString : public String {
inline void set_actual(String s,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- V8_EXPORT_PRIVATE uint16_t ThinStringGet(int index);
+ V8_EXPORT_PRIVATE uint16_t Get(int index);
DECL_CAST(ThinString)
DECL_VERIFIER(ThinString)
@@ -676,7 +676,7 @@ class SlicedString : public String {
inline void set_offset(int offset);
// Dispatched behavior.
- V8_EXPORT_PRIVATE uint16_t SlicedStringGet(int index);
+ V8_EXPORT_PRIVATE uint16_t Get(int index);
DECL_CAST(SlicedString)
@@ -728,6 +728,7 @@ class ExternalString : public String {
inline void DisposeResource();
STATIC_ASSERT(kResourceOffset == Internals::kStringResourceOffset);
+ static const int kSizeOfAllExternalStrings = kHeaderSize;
OBJECT_CONSTRUCTORS(ExternalString, String);
};
@@ -758,12 +759,18 @@ class ExternalOneByteString : public ExternalString {
inline const uint8_t* GetChars();
// Dispatched behavior.
- inline uint16_t ExternalOneByteStringGet(int index);
+ inline uint8_t Get(int index);
DECL_CAST(ExternalOneByteString)
class BodyDescriptor;
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ ExternalString::kHeaderSize,
+ TORQUE_GENERATED_EXTERNAL_ONE_BYTE_STRING_FIELDS)
+
+ STATIC_ASSERT(kSize == kSizeOfAllExternalStrings);
+
OBJECT_CONSTRUCTORS(ExternalOneByteString, ExternalString);
};
@@ -793,7 +800,7 @@ class ExternalTwoByteString : public ExternalString {
inline const uint16_t* GetChars();
// Dispatched behavior.
- inline uint16_t ExternalTwoByteStringGet(int index);
+ inline uint16_t Get(int index);
// For regexp code.
inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
@@ -802,6 +809,12 @@ class ExternalTwoByteString : public ExternalString {
class BodyDescriptor;
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ ExternalString::kHeaderSize,
+ TORQUE_GENERATED_EXTERNAL_TWO_BYTE_STRING_FIELDS)
+
+ STATIC_ASSERT(kSize == kSizeOfAllExternalStrings);
+
OBJECT_CONSTRUCTORS(ExternalTwoByteString, ExternalString);
};
@@ -895,6 +908,21 @@ class StringCharacterStream {
DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
};
+template <typename Char>
+struct CharTraits;
+
+template <>
+struct CharTraits<uint8_t> {
+ using String = SeqOneByteString;
+ using ExternalString = ExternalOneByteString;
+};
+
+template <>
+struct CharTraits<uint16_t> {
+ using String = SeqTwoByteString;
+ using ExternalString = ExternalTwoByteString;
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/struct-inl.h b/deps/v8/src/objects/struct-inl.h
index 9502698058..47d55a876f 100644
--- a/deps/v8/src/objects/struct-inl.h
+++ b/deps/v8/src/objects/struct-inl.h
@@ -8,9 +8,10 @@
#include "src/objects/struct.h"
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
+#include "torque-generated/class-definitions-tq-inl.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,20 +19,14 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(Struct, HeapObject)
-// TODO(jkummerow): Fix IsTuple2() and IsTuple3() to be subclassing-aware,
-// or rethink this more generally (see crbug.com/v8/8516).
-Tuple2::Tuple2(Address ptr) : Struct(ptr) {}
-Tuple3::Tuple3(Address ptr) : Tuple2(ptr) {}
+TQ_OBJECT_CONSTRUCTORS_IMPL(Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple2)
+TQ_OBJECT_CONSTRUCTORS_IMPL(Tuple3)
OBJECT_CONSTRUCTORS_IMPL(AccessorPair, Struct)
OBJECT_CONSTRUCTORS_IMPL(ClassPositions, Struct)
CAST_ACCESSOR(AccessorPair)
-CAST_ACCESSOR(Struct)
-CAST_ACCESSOR(Tuple2)
-CAST_ACCESSOR(Tuple3)
-
CAST_ACCESSOR(ClassPositions)
void Struct::InitializeBody(int object_size) {
@@ -41,10 +36,6 @@ void Struct::InitializeBody(int object_size) {
}
}
-ACCESSORS(Tuple2, value1, Object, kValue1Offset)
-ACCESSORS(Tuple2, value2, Object, kValue2Offset)
-ACCESSORS(Tuple3, value3, Object, kValue3Offset)
-
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -64,8 +55,8 @@ void AccessorPair::set(AccessorComponent component, Object value) {
}
void AccessorPair::SetComponents(Object getter, Object setter) {
- if (!getter->IsNull()) set_getter(getter);
- if (!setter->IsNull()) set_setter(setter);
+ if (!getter.IsNull()) set_getter(getter);
+ if (!setter.IsNull()) set_setter(setter);
}
bool AccessorPair::Equals(Object getter_value, Object setter_value) {
diff --git a/deps/v8/src/objects/struct.h b/deps/v8/src/objects/struct.h
index cab41665bd..b01a33561b 100644
--- a/deps/v8/src/objects/struct.h
+++ b/deps/v8/src/objects/struct.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_STRUCT_H_
#define V8_OBJECTS_STRUCT_H_
-#include "src/objects.h"
#include "src/objects/heap-object.h"
-#include "torque-generated/class-definitions-from-dsl.h"
+#include "src/objects/objects.h"
+#include "torque-generated/class-definitions-tq.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -18,47 +18,26 @@ namespace internal {
// An abstract superclass, a marker class really, for simple structure classes.
// It doesn't carry much functionality but allows struct classes to be
// identified in the type system.
-class Struct : public HeapObject {
+class Struct : public TorqueGeneratedStruct<Struct, HeapObject> {
public:
inline void InitializeBody(int object_size);
- DECL_CAST(Struct)
void BriefPrintDetails(std::ostream& os);
- OBJECT_CONSTRUCTORS(Struct, HeapObject);
+ TQ_OBJECT_CONSTRUCTORS(Struct)
};
-class Tuple2 : public Struct {
+class Tuple2 : public TorqueGeneratedTuple2<Tuple2, Struct> {
public:
- DECL_ACCESSORS(value1, Object)
- DECL_ACCESSORS(value2, Object)
-
- DECL_CAST(Tuple2)
-
- // Dispatched behavior.
- DECL_PRINTER(Tuple2)
- DECL_VERIFIER(Tuple2)
void BriefPrintDetails(std::ostream& os);
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_TUPLE2_FIELDS)
-
- OBJECT_CONSTRUCTORS(Tuple2, Struct);
+ TQ_OBJECT_CONSTRUCTORS(Tuple2)
};
-class Tuple3 : public Tuple2 {
+class Tuple3 : public TorqueGeneratedTuple3<Tuple3, Tuple2> {
public:
- DECL_ACCESSORS(value3, Object)
-
- DECL_CAST(Tuple3)
-
- // Dispatched behavior.
- DECL_PRINTER(Tuple3)
- DECL_VERIFIER(Tuple3)
void BriefPrintDetails(std::ostream& os);
- DEFINE_FIELD_OFFSET_CONSTANTS(Tuple2::kSize, TORQUE_GENERATED_TUPLE3_FIELDS)
-
- OBJECT_CONSTRUCTORS(Tuple3, Tuple2);
+ TQ_OBJECT_CONSTRUCTORS(Tuple3)
};
// Support for JavaScript accessors: A pair of a getter and a setter. Each
diff --git a/deps/v8/src/objects/tagged-impl-inl.h b/deps/v8/src/objects/tagged-impl-inl.h
new file mode 100644
index 0000000000..f735a241a8
--- /dev/null
+++ b/deps/v8/src/objects/tagged-impl-inl.h
@@ -0,0 +1,257 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_IMPL_INL_H_
+#define V8_OBJECTS_TAGGED_IMPL_INL_H_
+
+#include "src/objects/tagged-impl.h"
+
+#ifdef V8_COMPRESS_POINTERS
+#include "src/execution/isolate.h"
+#endif
+#include "src/common/ptr-compr-inl.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/smi.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::ToSmi(Smi* value) const {
+ if (HAS_SMI_TAG(ptr_)) {
+ *value = ToSmi();
+ return true;
+ }
+ return false;
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+Smi TaggedImpl<kRefType, StorageType>::ToSmi() const {
+ DCHECK(HAS_SMI_TAG(ptr_));
+ if (kIsFull) {
+ return Smi(ptr_);
+ }
+ // Implementation for compressed pointers.
+ return Smi(DecompressTaggedSigned(static_cast<Tagged_t>(ptr_)));
+}
+
+//
+// TaggedImpl::GetHeapObject(HeapObject* result) implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ HeapObject* result) const {
+ CHECK(kIsFull);
+ if (!IsStrongOrWeak()) return false;
+ *result = GetHeapObject();
+ return true;
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ ROOT_PARAM, HeapObject* result) const {
+ if (kIsFull) return GetHeapObject(result);
+ // Implementation for compressed pointers.
+ if (!IsStrongOrWeak()) return false;
+ *result = GetHeapObject(ROOT_VALUE);
+ return true;
+}
+
+//
+// TaggedImpl::GetHeapObject(HeapObject* result,
+// HeapObjectReferenceType* reference_type)
+// implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ HeapObject* result, HeapObjectReferenceType* reference_type) const {
+ CHECK(kIsFull);
+ if (!IsStrongOrWeak()) return false;
+ *reference_type = IsWeakOrCleared() ? HeapObjectReferenceType::WEAK
+ : HeapObjectReferenceType::STRONG;
+ *result = GetHeapObject();
+ return true;
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObject(
+ ROOT_PARAM, HeapObject* result,
+ HeapObjectReferenceType* reference_type) const {
+ if (kIsFull) return GetHeapObject(result, reference_type);
+ // Implementation for compressed pointers.
+ if (!IsStrongOrWeak()) return false;
+ *reference_type = IsWeakOrCleared() ? HeapObjectReferenceType::WEAK
+ : HeapObjectReferenceType::STRONG;
+ *result = GetHeapObject(ROOT_VALUE);
+ return true;
+}
+
+//
+// TaggedImpl::GetHeapObjectIfStrong(HeapObject* result) implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
+ HeapObject* result) const {
+ CHECK(kIsFull);
+ if (IsStrong()) {
+ *result = HeapObject::cast(Object(ptr_));
+ return true;
+ }
+ return false;
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfStrong(
+ ROOT_PARAM, HeapObject* result) const {
+ if (kIsFull) return GetHeapObjectIfStrong(result);
+ // Implementation for compressed pointers.
+ if (IsStrong()) {
+ *result =
+ HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ return true;
+ }
+ return false;
+}
+
+//
+// TaggedImpl::GetHeapObjectAssumeStrong() implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong()
+ const {
+ CHECK(kIsFull);
+ DCHECK(IsStrong());
+ return HeapObject::cast(Object(ptr_));
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeStrong(
+ ROOT_PARAM) const {
+ if (kIsFull) return GetHeapObjectAssumeStrong();
+ // Implementation for compressed pointers.
+ DCHECK(IsStrong());
+ return HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+}
+
+//
+// TaggedImpl::GetHeapObjectIfWeak(HeapObject* result) implementation
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfWeak(
+ HeapObject* result) const {
+ CHECK(kIsFull);
+ if (kCanBeWeak) {
+ if (IsWeak()) {
+ *result = GetHeapObject();
+ return true;
+ }
+ return false;
+ } else {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return false;
+ }
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+bool TaggedImpl<kRefType, StorageType>::GetHeapObjectIfWeak(
+ ROOT_PARAM, HeapObject* result) const {
+ if (kIsFull) return GetHeapObjectIfWeak(result);
+ // Implementation for compressed pointers.
+ if (kCanBeWeak) {
+ if (IsWeak()) {
+ *result = GetHeapObject(ROOT_VALUE);
+ return true;
+ }
+ return false;
+ } else {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return false;
+ }
+}
+
+//
+// TaggedImpl::GetHeapObjectAssumeWeak() implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeWeak() const {
+ CHECK(kIsFull);
+ DCHECK(IsWeak());
+ return GetHeapObject();
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObjectAssumeWeak(
+ ROOT_PARAM) const {
+ if (kIsFull) return GetHeapObjectAssumeWeak();
+ // Implementation for compressed pointers.
+ DCHECK(IsWeak());
+ return GetHeapObject(ROOT_VALUE);
+}
+
+//
+// TaggedImpl::GetHeapObject() implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject() const {
+ CHECK(kIsFull);
+ DCHECK(!IsSmi());
+ if (kCanBeWeak) {
+ DCHECK(!IsCleared());
+ return HeapObject::cast(Object(ptr_ & ~kWeakHeapObjectMask));
+ } else {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return HeapObject::cast(Object(ptr_));
+ }
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+HeapObject TaggedImpl<kRefType, StorageType>::GetHeapObject(ROOT_PARAM) const {
+ if (kIsFull) return GetHeapObject();
+ // Implementation for compressed pointers.
+ DCHECK(!IsSmi());
+ if (kCanBeWeak) {
+ DCHECK(!IsCleared());
+ return HeapObject::cast(Object(
+ DecompressTaggedPointer(ROOT_VALUE, ptr_ & ~kWeakHeapObjectMask)));
+ } else {
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return HeapObject::cast(Object(DecompressTaggedPointer(ROOT_VALUE, ptr_)));
+ }
+}
+
+//
+// TaggedImpl::GetHeapObjectOrSmi() implementation.
+//
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi() const {
+ CHECK(kIsFull);
+ if (IsSmi()) {
+ return Object(ptr_);
+ }
+ return GetHeapObject();
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+Object TaggedImpl<kRefType, StorageType>::GetHeapObjectOrSmi(ROOT_PARAM) const {
+ if (kIsFull) return GetHeapObjectOrSmi();
+ // Implementation for compressed pointers.
+ if (IsSmi()) {
+ return Object(DecompressTaggedSigned(ptr_));
+ }
+ return GetHeapObject(ROOT_VALUE);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_IMPL_INL_H_
diff --git a/deps/v8/src/objects/tagged-impl.cc b/deps/v8/src/objects/tagged-impl.cc
new file mode 100644
index 0000000000..f50cec1e67
--- /dev/null
+++ b/deps/v8/src/objects/tagged-impl.cc
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/tagged-impl.h"
+
+#include <sstream>
+
+#include "src/objects/objects.h"
+#include "src/strings/string-stream.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+void TaggedImpl<kRefType, StorageType>::ShortPrint(FILE* out) {
+ OFStream os(out);
+ os << Brief(*this);
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+void TaggedImpl<kRefType, StorageType>::ShortPrint(StringStream* accumulator) {
+ std::ostringstream os;
+ os << Brief(*this);
+ accumulator->Add(os.str().c_str());
+}
+
+template <HeapObjectReferenceType kRefType, typename StorageType>
+void TaggedImpl<kRefType, StorageType>::ShortPrint(std::ostream& os) {
+ os << Brief(*this);
+}
+
+// Explicit instantiation declarations.
+template class TaggedImpl<HeapObjectReferenceType::STRONG, Address>;
+template class TaggedImpl<HeapObjectReferenceType::WEAK, Address>;
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/objects/tagged-impl.h b/deps/v8/src/objects/tagged-impl.h
new file mode 100644
index 0000000000..e3d982565f
--- /dev/null
+++ b/deps/v8/src/objects/tagged-impl.h
@@ -0,0 +1,181 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_IMPL_H_
+#define V8_OBJECTS_TAGGED_IMPL_H_
+
+#include "include/v8-internal.h"
+#include "include/v8.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// An TaggedImpl is a base class for Object (which is either a Smi or a strong
+// reference to a HeapObject) and MaybeObject (which is either a Smi, a strong
+// reference to a HeapObject, a weak reference to a HeapObject, or a cleared
+// weak reference.
+// This class provides storage and one canonical implementation of various
+// predicates that check Smi and heap object tags' values and also take into
+// account whether the tagged value is expected to be weak reference to a
+// HeapObject or cleared weak reference.
+template <HeapObjectReferenceType kRefType, typename StorageType>
+class TaggedImpl {
+ public:
+ static_assert(std::is_same<StorageType, Address>::value ||
+ std::is_same<StorageType, Tagged_t>::value,
+ "StorageType must be either Address or Tagged_t");
+
+ // True for those TaggedImpl instantiations that represent uncompressed
+ // tagged values and false for TaggedImpl instantiations that represent
+ // compressed tagged values.
+ static const bool kIsFull = sizeof(StorageType) == kSystemPointerSize;
+
+ static const bool kCanBeWeak = kRefType == HeapObjectReferenceType::WEAK;
+
+ constexpr TaggedImpl() : ptr_{} {}
+ explicit constexpr TaggedImpl(StorageType ptr) : ptr_(ptr) {}
+
+ // Make clang on Linux catch what MSVC complains about on Windows:
+ operator bool() const = delete;
+
+ constexpr bool operator==(TaggedImpl other) const {
+ return ptr_ == other.ptr_;
+ }
+ constexpr bool operator!=(TaggedImpl other) const {
+ return ptr_ != other.ptr_;
+ }
+
+ // For using in std::set and std::map.
+ constexpr bool operator<(TaggedImpl other) const {
+ return ptr_ < other.ptr();
+ }
+
+ constexpr StorageType ptr() const { return ptr_; }
+
+ // Returns true if this tagged value is a strong pointer to a HeapObject or
+ // Smi.
+ constexpr inline bool IsObject() const { return !IsWeakOrCleared(); }
+
+ // Returns true if this tagged value is a Smi.
+ constexpr bool IsSmi() const { return HAS_SMI_TAG(ptr_); }
+ inline bool ToSmi(Smi* value) const;
+ inline Smi ToSmi() const;
+
+ // Returns true if this tagged value is a strong pointer to a HeapObject.
+ constexpr inline bool IsHeapObject() const { return IsStrong(); }
+
+ // Returns true if this tagged value is a cleared weak reference.
+ constexpr inline bool IsCleared() const {
+ return kCanBeWeak &&
+ (static_cast<uint32_t>(ptr_) == kClearedWeakHeapObjectLower32);
+ }
+
+ // Returns true if this tagged value is a strong or weak pointer to a
+ // HeapObject.
+ constexpr inline bool IsStrongOrWeak() const {
+ return !IsSmi() && !IsCleared();
+ }
+
+ // Returns true if this tagged value is a strong pointer to a HeapObject.
+ constexpr inline bool IsStrong() const {
+#ifdef V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
+ DCHECK_IMPLIES(!kCanBeWeak, !IsSmi() == HAS_STRONG_HEAP_OBJECT_TAG(ptr_));
+#endif
+ return kCanBeWeak ? HAS_STRONG_HEAP_OBJECT_TAG(ptr_) : !IsSmi();
+ }
+
+ // Returns true if this tagged value is a weak pointer to a HeapObject.
+ constexpr inline bool IsWeak() const {
+ return IsWeakOrCleared() && !IsCleared();
+ }
+
+ // Returns true if this tagged value is a weak pointer to a HeapObject or
+ // cleared weak reference.
+ constexpr inline bool IsWeakOrCleared() const {
+ return kCanBeWeak && HAS_WEAK_HEAP_OBJECT_TAG(ptr_);
+ }
+
+ //
+ // The following set of methods get HeapObject out of the tagged value
+ // which may involve decompression in which case the ROOT_PARAM is required.
+ // If the pointer compression is not enabled then the variants with
+ // ROOT_PARAM will be exactly the same as non-ROOT_PARAM ones.
+ //
+
+ // If this tagged value is a strong pointer to a HeapObject, returns true and
+ // sets *result. Otherwise returns false.
+ inline bool GetHeapObjectIfStrong(HeapObject* result) const;
+ inline bool GetHeapObjectIfStrong(ROOT_PARAM, HeapObject* result) const;
+
+ // DCHECKs that this tagged value is a strong pointer to a HeapObject and
+ // returns the HeapObject.
+ inline HeapObject GetHeapObjectAssumeStrong() const;
+ inline HeapObject GetHeapObjectAssumeStrong(ROOT_PARAM) const;
+
+ // If this tagged value is a weak pointer to a HeapObject, returns true and
+ // sets *result. Otherwise returns false.
+ inline bool GetHeapObjectIfWeak(HeapObject* result) const;
+ inline bool GetHeapObjectIfWeak(ROOT_PARAM, HeapObject* result) const;
+
+ // DCHECKs that this tagged value is a weak pointer to a HeapObject and
+ // returns the HeapObject.
+ inline HeapObject GetHeapObjectAssumeWeak() const;
+ inline HeapObject GetHeapObjectAssumeWeak(ROOT_PARAM) const;
+
+ // If this tagged value is a strong or weak pointer to a HeapObject, returns
+ // true and sets *result. Otherwise returns false.
+ inline bool GetHeapObject(HeapObject* result) const;
+ inline bool GetHeapObject(ROOT_PARAM, HeapObject* result) const;
+
+ inline bool GetHeapObject(HeapObject* result,
+ HeapObjectReferenceType* reference_type) const;
+ inline bool GetHeapObject(ROOT_PARAM, HeapObject* result,
+ HeapObjectReferenceType* reference_type) const;
+
+ // DCHECKs that this tagged value is a strong or a weak pointer to a
+ // HeapObject and returns the HeapObject.
+ inline HeapObject GetHeapObject() const;
+ inline HeapObject GetHeapObject(ROOT_PARAM) const;
+
+ // DCHECKs that this tagged value is a strong or a weak pointer to a
+ // HeapObject or a Smi and returns the HeapObject or Smi.
+ inline Object GetHeapObjectOrSmi() const;
+ inline Object GetHeapObjectOrSmi(ROOT_PARAM) const;
+
+ // Cast operation is available only for full non-weak tagged values.
+ template <typename T>
+ T cast() const {
+ CHECK(kIsFull);
+ DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_));
+ return T::cast(Object(ptr_));
+ }
+
+ // Prints this object without details.
+ void ShortPrint(FILE* out = stdout);
+
+ // Prints this object without details to a message accumulator.
+ void ShortPrint(StringStream* accumulator);
+
+ void ShortPrint(std::ostream& os);
+
+#ifdef OBJECT_PRINT
+ void Print();
+ void Print(std::ostream& os);
+#else
+ void Print() { ShortPrint(); }
+ void Print(std::ostream& os) { ShortPrint(os); }
+#endif
+
+ private:
+ friend class CompressedObjectSlot;
+ friend class FullObjectSlot;
+
+ StorageType ptr_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_IMPL_H_
diff --git a/deps/v8/src/objects/tagged-value-inl.h b/deps/v8/src/objects/tagged-value-inl.h
new file mode 100644
index 0000000000..5eb0e20947
--- /dev/null
+++ b/deps/v8/src/objects/tagged-value-inl.h
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_VALUE_INL_H_
+#define V8_OBJECTS_TAGGED_VALUE_INL_H_
+
+#include "src/objects/tagged-value.h"
+
+#include "include/v8-internal.h"
+#include "src/common/ptr-compr-inl.h"
+#include "src/objects/heap-object-inl.h"
+#include "src/objects/oddball.h"
+#include "src/objects/tagged-impl-inl.h"
+#include "src/roots/roots-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Object StrongTaggedValue::ToObject(WITH_ROOT_PARAM(StrongTaggedValue object)) {
+#ifdef V8_COMPRESS_POINTERS
+ return Object(DecompressTaggedAny(ROOT_VALUE, object.ptr()));
+#else
+ return Object(object.ptr());
+#endif
+}
+
+MaybeObject TaggedValue::ToMaybeObject(WITH_ROOT_PARAM(TaggedValue object)) {
+#ifdef V8_COMPRESS_POINTERS
+ return MaybeObject(DecompressTaggedAny(ROOT_VALUE, object.ptr()));
+#else
+ return MaybeObject(object.ptr());
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_VALUE_INL_H_
diff --git a/deps/v8/src/objects/tagged-value.h b/deps/v8/src/objects/tagged-value.h
new file mode 100644
index 0000000000..bb7609f7c3
--- /dev/null
+++ b/deps/v8/src/objects/tagged-value.h
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_TAGGED_VALUE_H_
+#define V8_OBJECTS_TAGGED_VALUE_H_
+
+#include "src/objects/objects.h"
+
+#include "include/v8-internal.h"
+#include "src/objects/tagged-impl.h"
+
+namespace v8 {
+namespace internal {
+
+// Almost same as Object but this one deals with in-heap and potentially
+// compressed representation of Objects and provide only limited functionality
+// which doesn't require decompression.
+class StrongTaggedValue
+ : public TaggedImpl<HeapObjectReferenceType::STRONG, Tagged_t> {
+ public:
+ constexpr StrongTaggedValue() : TaggedImpl() {}
+ explicit constexpr StrongTaggedValue(Tagged_t ptr) : TaggedImpl(ptr) {}
+
+ inline static Object ToObject(WITH_ROOT_PARAM(StrongTaggedValue object));
+};
+
+// Almost same as MaybeObject but this one deals with in-heap and potentially
+// compressed representation of Objects and provide only limited functionality
+// which doesn't require decompression.
+class TaggedValue : public TaggedImpl<HeapObjectReferenceType::WEAK, Tagged_t> {
+ public:
+ constexpr TaggedValue() : TaggedImpl() {}
+ explicit constexpr TaggedValue(Tagged_t ptr) : TaggedImpl(ptr) {}
+
+ inline static MaybeObject ToMaybeObject(WITH_ROOT_PARAM(TaggedValue object));
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_OBJECTS_TAGGED_VALUE_H_
diff --git a/deps/v8/src/objects/template-objects-inl.h b/deps/v8/src/objects/template-objects-inl.h
index cc6c096265..85c1e6c8f4 100644
--- a/deps/v8/src/objects/template-objects-inl.h
+++ b/deps/v8/src/objects/template-objects-inl.h
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription, Tuple2)
+OBJECT_CONSTRUCTORS_IMPL(TemplateObjectDescription, Struct)
OBJECT_CONSTRUCTORS_IMPL(CachedTemplateObject, Tuple3)
CAST_ACCESSOR(TemplateObjectDescription)
diff --git a/deps/v8/src/objects/template-objects.cc b/deps/v8/src/objects/template-objects.cc
index 448d54fb9d..2f34a48a2a 100644
--- a/deps/v8/src/objects/template-objects.cc
+++ b/deps/v8/src/objects/template-objects.cc
@@ -5,11 +5,11 @@
#include "src/objects/template-objects.h"
#include "src/base/functional.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/objects/template-objects-inl.h"
-#include "src/property-descriptor.h"
namespace v8 {
namespace internal {
@@ -23,20 +23,20 @@ Handle<JSArray> TemplateObjectDescription::GetTemplateObject(
// Check the template weakmap to see if the template object already exists.
Handle<EphemeronHashTable> template_weakmap =
- native_context->template_weakmap()->IsUndefined(isolate)
+ native_context->template_weakmap().IsUndefined(isolate)
? EphemeronHashTable::New(isolate, 0)
: handle(EphemeronHashTable::cast(native_context->template_weakmap()),
isolate);
uint32_t hash = shared_info->Hash();
Object maybe_cached_template = template_weakmap->Lookup(shared_info, hash);
- while (!maybe_cached_template->IsTheHole()) {
+ while (!maybe_cached_template.IsTheHole()) {
CachedTemplateObject cached_template =
CachedTemplateObject::cast(maybe_cached_template);
- if (cached_template->slot_id() == slot_id)
- return handle(cached_template->template_object(), isolate);
+ if (cached_template.slot_id() == slot_id)
+ return handle(cached_template.template_object(), isolate);
- maybe_cached_template = cached_template->next();
+ maybe_cached_template = cached_template.next();
}
// Create the raw object from the {raw_strings}.
diff --git a/deps/v8/src/objects/template-objects.h b/deps/v8/src/objects/template-objects.h
index e99c8530e6..220f9dab1e 100644
--- a/deps/v8/src/objects/template-objects.h
+++ b/deps/v8/src/objects/template-objects.h
@@ -39,22 +39,25 @@ class CachedTemplateObject final : public Tuple3 {
// TemplateObjectDescription is a tuple of raw strings and cooked strings for
// tagged template literals. Used to communicate with the runtime for template
// object creation within the {Runtime_GetTemplateObject} method.
-class TemplateObjectDescription final : public Tuple2 {
+class TemplateObjectDescription final : public Struct {
public:
DECL_ACCESSORS(raw_strings, FixedArray)
DECL_ACCESSORS(cooked_strings, FixedArray)
+ DECL_CAST(TemplateObjectDescription)
+
static Handle<JSArray> GetTemplateObject(
Isolate* isolate, Handle<Context> native_context,
Handle<TemplateObjectDescription> description,
Handle<SharedFunctionInfo> shared_info, int slot_id);
- DECL_CAST(TemplateObjectDescription)
+ DECL_PRINTER(TemplateObjectDescription)
+ DECL_VERIFIER(TemplateObjectDescription)
- static constexpr int kRawStringsOffset = kValue1Offset;
- static constexpr int kCookedStringsOffset = kValue2Offset;
+ DEFINE_FIELD_OFFSET_CONSTANTS(
+ Struct::kHeaderSize, TORQUE_GENERATED_TEMPLATE_OBJECT_DESCRIPTION_FIELDS)
- OBJECT_CONSTRUCTORS(TemplateObjectDescription, Tuple2);
+ OBJECT_CONSTRUCTORS(TemplateObjectDescription, Struct);
};
} // namespace internal
diff --git a/deps/v8/src/objects/templates-inl.h b/deps/v8/src/objects/templates-inl.h
index 19739be91a..a1a098ffc0 100644
--- a/deps/v8/src/objects/templates-inl.h
+++ b/deps/v8/src/objects/templates-inl.h
@@ -40,8 +40,6 @@ ACCESSORS(FunctionTemplateInfo, rare_data, HeapObject,
ACCESSORS(FunctionTemplateInfo, cached_property_name, Object,
kCachedPropertyNameOffset)
SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
- kHiddenPrototypeBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, needs_access_check,
kNeedsAccessCheckBit)
@@ -58,26 +56,26 @@ SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
HeapObject extra = function_template_info->rare_data();
- if (extra->IsUndefined(isolate)) {
+ if (extra.IsUndefined(isolate)) {
return AllocateFunctionTemplateRareData(isolate, function_template_info);
} else {
return FunctionTemplateRareData::cast(extra);
}
}
-#define RARE_ACCESSORS(Name, CamelName, Type) \
- Type FunctionTemplateInfo::Get##CamelName() { \
- HeapObject extra = rare_data(); \
- HeapObject undefined = GetReadOnlyRoots().undefined_value(); \
- return extra == undefined ? undefined \
- : FunctionTemplateRareData::cast(extra)->Name(); \
- } \
- inline void FunctionTemplateInfo::Set##CamelName( \
- Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info, \
- Handle<Type> Name) { \
- FunctionTemplateRareData rare_data = \
- EnsureFunctionTemplateRareData(isolate, function_template_info); \
- rare_data->set_##Name(*Name); \
+#define RARE_ACCESSORS(Name, CamelName, Type) \
+ Type FunctionTemplateInfo::Get##CamelName() { \
+ HeapObject extra = rare_data(); \
+ HeapObject undefined = GetReadOnlyRoots().undefined_value(); \
+ return extra == undefined ? undefined \
+ : FunctionTemplateRareData::cast(extra).Name(); \
+ } \
+ inline void FunctionTemplateInfo::Set##CamelName( \
+ Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info, \
+ Handle<Type> Name) { \
+ FunctionTemplateRareData rare_data = \
+ EnsureFunctionTemplateRareData(isolate, function_template_info); \
+ rare_data.set_##Name(*Name); \
}
RARE_ACCESSORS(prototype_template, PrototypeTemplate, Object)
@@ -116,33 +114,33 @@ CAST_ACCESSOR(FunctionTemplateRareData)
CAST_ACCESSOR(ObjectTemplateInfo)
bool FunctionTemplateInfo::instantiated() {
- return shared_function_info()->IsSharedFunctionInfo();
+ return shared_function_info().IsSharedFunctionInfo();
}
bool FunctionTemplateInfo::BreakAtEntry() {
Object maybe_shared = shared_function_info();
- if (maybe_shared->IsSharedFunctionInfo()) {
+ if (maybe_shared.IsSharedFunctionInfo()) {
SharedFunctionInfo shared = SharedFunctionInfo::cast(maybe_shared);
- return shared->BreakAtEntry();
+ return shared.BreakAtEntry();
}
return false;
}
FunctionTemplateInfo FunctionTemplateInfo::GetParent(Isolate* isolate) {
Object parent = GetParentTemplate();
- return parent->IsUndefined(isolate) ? FunctionTemplateInfo()
- : FunctionTemplateInfo::cast(parent);
+ return parent.IsUndefined(isolate) ? FunctionTemplateInfo()
+ : FunctionTemplateInfo::cast(parent);
}
ObjectTemplateInfo ObjectTemplateInfo::GetParent(Isolate* isolate) {
Object maybe_ctor = constructor();
- if (maybe_ctor->IsUndefined(isolate)) return ObjectTemplateInfo();
+ if (maybe_ctor.IsUndefined(isolate)) return ObjectTemplateInfo();
FunctionTemplateInfo constructor = FunctionTemplateInfo::cast(maybe_ctor);
while (true) {
- constructor = constructor->GetParent(isolate);
+ constructor = constructor.GetParent(isolate);
if (constructor.is_null()) return ObjectTemplateInfo();
- Object maybe_obj = constructor->GetInstanceTemplate();
- if (!maybe_obj->IsUndefined(isolate)) {
+ Object maybe_obj = constructor.GetInstanceTemplate();
+ if (!maybe_obj.IsUndefined(isolate)) {
return ObjectTemplateInfo::cast(maybe_obj);
}
}
@@ -151,7 +149,7 @@ ObjectTemplateInfo ObjectTemplateInfo::GetParent(Isolate* isolate) {
int ObjectTemplateInfo::embedder_field_count() const {
Object value = data();
- DCHECK(value->IsSmi());
+ DCHECK(value.IsSmi());
return EmbedderFieldCount::decode(Smi::ToInt(value));
}
@@ -163,7 +161,7 @@ void ObjectTemplateInfo::set_embedder_field_count(int count) {
bool ObjectTemplateInfo::immutable_proto() const {
Object value = data();
- DCHECK(value->IsSmi());
+ DCHECK(value.IsSmi());
return IsImmutablePrototype::decode(Smi::ToInt(value));
}
@@ -173,7 +171,7 @@ void ObjectTemplateInfo::set_immutable_proto(bool immutable) {
}
bool FunctionTemplateInfo::IsTemplateFor(JSObject object) {
- return IsTemplateFor(object->map());
+ return IsTemplateFor(object.map());
}
} // namespace internal
diff --git a/deps/v8/src/objects/templates.h b/deps/v8/src/objects/templates.h
index bd55821c7d..66cd038114 100644
--- a/deps/v8/src/objects/templates.h
+++ b/deps/v8/src/objects/templates.h
@@ -139,7 +139,6 @@ class FunctionTemplateInfo : public TemplateInfo {
DECL_ACCESSORS(cached_property_name, Object)
// Begin flag bits ---------------------
- DECL_BOOLEAN_ACCESSORS(hidden_prototype)
DECL_BOOLEAN_ACCESSORS(undetectable)
// If set, object instances created by this function
@@ -169,7 +168,7 @@ class FunctionTemplateInfo : public TemplateInfo {
static const int kInvalidSerialNumber = 0;
- DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kSize,
+ DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kHeaderSize,
TORQUE_GENERATED_FUNCTION_TEMPLATE_INFO_FIELDS)
static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
@@ -189,13 +188,12 @@ class FunctionTemplateInfo : public TemplateInfo {
Handle<Object> getter);
// Bit position in the flag, from least significant bit position.
- static const int kHiddenPrototypeBit = 0;
- static const int kUndetectableBit = 1;
- static const int kNeedsAccessCheckBit = 2;
- static const int kReadOnlyPrototypeBit = 3;
- static const int kRemovePrototypeBit = 4;
- static const int kDoNotCacheBit = 5;
- static const int kAcceptAnyReceiver = 6;
+ static const int kUndetectableBit = 0;
+ static const int kNeedsAccessCheckBit = 1;
+ static const int kReadOnlyPrototypeBit = 2;
+ static const int kRemovePrototypeBit = 3;
+ static const int kDoNotCacheBit = 4;
+ static const int kAcceptAnyReceiver = 5;
private:
static inline FunctionTemplateRareData EnsureFunctionTemplateRareData(
@@ -221,7 +219,7 @@ class ObjectTemplateInfo : public TemplateInfo {
DECL_VERIFIER(ObjectTemplateInfo)
// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kSize,
+ DEFINE_FIELD_OFFSET_CONSTANTS(TemplateInfo::kHeaderSize,
TORQUE_GENERATED_OBJECT_TEMPLATE_INFO_FIELDS)
// Starting from given object template's constructor walk up the inheritance
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h
index cd1bd9d654..893de78dc4 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/objects/transitions-inl.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TRANSITIONS_INL_H_
-#define V8_TRANSITIONS_INL_H_
+#ifndef V8_OBJECTS_TRANSITIONS_INL_H_
+#define V8_OBJECTS_TRANSITIONS_INL_H_
-#include "src/transitions.h"
+#include "src/objects/transitions.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/objects/fixed-array-inl.h"
@@ -45,16 +45,16 @@ HeapObjectSlot TransitionArray::GetKeySlot(int transition_number) {
}
void TransitionArray::SetPrototypeTransitions(WeakFixedArray transitions) {
- DCHECK(transitions->IsWeakFixedArray());
+ DCHECK(transitions.IsWeakFixedArray());
WeakFixedArray::Set(kPrototypeTransitionsIndex,
HeapObjectReference::Strong(transitions));
}
int TransitionArray::NumberOfPrototypeTransitions(
WeakFixedArray proto_transitions) {
- if (proto_transitions->length() == 0) return 0;
+ if (proto_transitions.length() == 0) return 0;
MaybeObject raw =
- proto_transitions->Get(kProtoTransitionNumberOfEntriesOffset);
+ proto_transitions.Get(kProtoTransitionNumberOfEntriesOffset);
return raw.ToSmi().value();
}
@@ -76,7 +76,7 @@ Name TransitionsAccessor::GetKey(int transition_number) {
return GetSimpleTransitionKey(map);
}
case kFullTransitionArray:
- return transitions()->GetKey(transition_number);
+ return transitions().GetKey(transition_number);
}
UNREACHABLE();
}
@@ -94,23 +94,23 @@ HeapObjectSlot TransitionArray::GetTargetSlot(int transition_number) {
// static
PropertyDetails TransitionsAccessor::GetTargetDetails(Name name, Map target) {
- DCHECK(!IsSpecialTransition(name->GetReadOnlyRoots(), name));
- int descriptor = target->LastAdded();
- DescriptorArray descriptors = target->instance_descriptors();
+ DCHECK(!IsSpecialTransition(name.GetReadOnlyRoots(), name));
+ int descriptor = target.LastAdded();
+ DescriptorArray descriptors = target.instance_descriptors();
// Transitions are allowed only for the last added property.
- DCHECK(descriptors->GetKey(descriptor)->Equals(name));
- return descriptors->GetDetails(descriptor);
+ DCHECK(descriptors.GetKey(descriptor).Equals(name));
+ return descriptors.GetDetails(descriptor);
}
// static
PropertyDetails TransitionsAccessor::GetSimpleTargetDetails(Map transition) {
- return transition->GetLastDescriptorDetails();
+ return transition.GetLastDescriptorDetails();
}
// static
Name TransitionsAccessor::GetSimpleTransitionKey(Map transition) {
- int descriptor = transition->LastAdded();
- return transition->instance_descriptors()->GetKey(descriptor);
+ int descriptor = transition.LastAdded();
+ return transition.instance_descriptors().GetKey(descriptor);
}
// static
@@ -138,7 +138,7 @@ Map TransitionsAccessor::GetTarget(int transition_number) {
case kWeakRef:
return Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
case kFullTransitionArray:
- return transitions()->GetTarget(transition_number);
+ return transitions().GetTarget(transition_number);
}
UNREACHABLE();
}
@@ -146,7 +146,7 @@ Map TransitionsAccessor::GetTarget(int transition_number) {
void TransitionArray::SetRawTarget(int transition_number, MaybeObject value) {
DCHECK(transition_number < number_of_transitions());
DCHECK(value->IsWeak());
- DCHECK(value->GetHeapObjectAssumeWeak()->IsMap());
+ DCHECK(value->GetHeapObjectAssumeWeak().IsMap());
WeakFixedArray::Set(ToTargetIndex(transition_number), value);
}
@@ -155,7 +155,7 @@ bool TransitionArray::GetTargetIfExists(int transition_number, Isolate* isolate,
MaybeObject raw = GetRawTarget(transition_number);
HeapObject heap_object;
if (raw->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsUndefined(isolate)) {
+ heap_object.IsUndefined(isolate)) {
return false;
}
*target = TransitionsAccessor::GetTargetFromRaw(raw);
@@ -171,11 +171,54 @@ int TransitionArray::SearchSpecial(Symbol symbol, int* out_insertion_index) {
}
int TransitionArray::SearchName(Name name, int* out_insertion_index) {
- DCHECK(name->IsUniqueName());
+ DCHECK(name.IsUniqueName());
return internal::Search<ALL_ENTRIES>(this, name, number_of_entries(),
out_insertion_index);
}
+TransitionsAccessor::TransitionsAccessor(Isolate* isolate, Map map,
+ DisallowHeapAllocation* no_gc)
+ : isolate_(isolate), map_(map) {
+ Initialize();
+ USE(no_gc);
+}
+
+TransitionsAccessor::TransitionsAccessor(Isolate* isolate, Handle<Map> map)
+ : isolate_(isolate), map_handle_(map), map_(*map) {
+ Initialize();
+}
+
+void TransitionsAccessor::Reload() {
+ DCHECK(!map_handle_.is_null());
+ map_ = *map_handle_;
+ Initialize();
+}
+
+void TransitionsAccessor::Initialize() {
+ raw_transitions_ = map_.raw_transitions();
+ HeapObject heap_object;
+ if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
+ encoding_ = kUninitialized;
+ } else if (raw_transitions_->IsWeak()) {
+ encoding_ = kWeakRef;
+ } else if (raw_transitions_->GetHeapObjectIfStrong(&heap_object)) {
+ if (heap_object.IsTransitionArray()) {
+ encoding_ = kFullTransitionArray;
+ } else if (heap_object.IsPrototypeInfo()) {
+ encoding_ = kPrototypeInfo;
+ } else {
+ DCHECK(map_.is_deprecated());
+ DCHECK(heap_object.IsMap());
+ encoding_ = kMigrationTarget;
+ }
+ } else {
+ UNREACHABLE();
+ }
+#if DEBUG
+ needs_reload_ = false;
+#endif
+}
+
int TransitionArray::number_of_transitions() const {
if (length() < kFirstIndex) return 0;
return Get(kTransitionLengthIndex).ToSmi().value();
@@ -243,9 +286,36 @@ void TransitionArray::SetNumberOfTransitions(int number_of_transitions) {
MaybeObject::FromSmi(Smi::FromInt(number_of_transitions)));
}
+Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
+ DisallowHeapAllocation no_gc;
+ switch (encoding()) {
+ case kPrototypeInfo:
+ case kUninitialized:
+ case kMigrationTarget:
+ case kFullTransitionArray:
+ return Handle<String>::null();
+ case kWeakRef: {
+ Map target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
+ PropertyDetails details = GetSimpleTargetDetails(target);
+ if (details.location() != kField) return Handle<String>::null();
+ DCHECK_EQ(kData, details.kind());
+ if (details.attributes() != NONE) return Handle<String>::null();
+ Name name = GetSimpleTransitionKey(target);
+ if (!name.IsString()) return Handle<String>::null();
+ return handle(String::cast(name), isolate_);
+ }
+ }
+ UNREACHABLE();
+}
+
+Handle<Map> TransitionsAccessor::ExpectedTransitionTarget() {
+ DCHECK(!ExpectedTransitionKey().is_null());
+ return handle(GetTarget(0), isolate_);
+}
+
} // namespace internal
} // namespace v8
#include "src/objects/object-macros-undef.h"
-#endif // V8_TRANSITIONS_INL_H_
+#endif // V8_OBJECTS_TRANSITIONS_INL_H_
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/objects/transitions.cc
index dca0a728e3..a2cd102aaf 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/objects/transitions.cc
@@ -2,40 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/transitions.h"
+#include "src/objects/transitions.h"
-#include "src/objects-inl.h"
-#include "src/transitions-inl.h"
-#include "src/utils.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/transitions-inl.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
-void TransitionsAccessor::Initialize() {
- raw_transitions_ = map_->raw_transitions();
- HeapObject heap_object;
- if (raw_transitions_->IsSmi() || raw_transitions_->IsCleared()) {
- encoding_ = kUninitialized;
- } else if (raw_transitions_->IsWeak()) {
- encoding_ = kWeakRef;
- } else if (raw_transitions_->GetHeapObjectIfStrong(&heap_object)) {
- if (heap_object->IsTransitionArray()) {
- encoding_ = kFullTransitionArray;
- } else if (heap_object->IsPrototypeInfo()) {
- encoding_ = kPrototypeInfo;
- } else {
- DCHECK(map_->is_deprecated());
- DCHECK(heap_object->IsMap());
- encoding_ = kMigrationTarget;
- }
- } else {
- UNREACHABLE();
- }
-#if DEBUG
- needs_reload_ = false;
-#endif
-}
-
Map TransitionsAccessor::GetSimpleTransition() {
switch (encoding()) {
case kWeakRef:
@@ -85,7 +60,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
PropertyDetails new_details = is_special_transition
? PropertyDetails::Empty()
: GetTargetDetails(*name, *target);
- if (flag == SIMPLE_PROPERTY_TRANSITION && key->Equals(*name) &&
+ if (flag == SIMPLE_PROPERTY_TRANSITION && key.Equals(*name) &&
old_details.kind() == new_details.kind() &&
old_details.attributes() == new_details.attributes()) {
ReplaceTransitions(HeapObjectReference::Weak(*target));
@@ -128,17 +103,16 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
{
DisallowHeapAllocation no_gc;
TransitionArray array = transitions();
- number_of_transitions = array->number_of_transitions();
+ number_of_transitions = array.number_of_transitions();
new_nof = number_of_transitions;
- int index =
- is_special_transition
- ? array->SearchSpecial(Symbol::cast(*name), &insertion_index)
- : array->Search(details.kind(), *name, details.attributes(),
- &insertion_index);
+ int index = is_special_transition
+ ? array.SearchSpecial(Symbol::cast(*name), &insertion_index)
+ : array.Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
// If an existing entry was found, overwrite it and return.
if (index != kNotFound) {
- array->SetRawTarget(index, HeapObjectReference::Weak(*target));
+ array.SetRawTarget(index, HeapObjectReference::Weak(*target));
return;
}
@@ -147,15 +121,15 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
DCHECK(insertion_index >= 0 && insertion_index <= number_of_transitions);
// If there is enough capacity, insert new entry into the existing array.
- if (new_nof <= array->Capacity()) {
- array->SetNumberOfTransitions(new_nof);
+ if (new_nof <= array.Capacity()) {
+ array.SetNumberOfTransitions(new_nof);
for (index = number_of_transitions; index > insertion_index; --index) {
- array->SetKey(index, array->GetKey(index - 1));
- array->SetRawTarget(index, array->GetRawTarget(index - 1));
+ array.SetKey(index, array.GetKey(index - 1));
+ array.SetRawTarget(index, array.GetRawTarget(index - 1));
}
- array->SetKey(index, *name);
- array->SetRawTarget(index, HeapObjectReference::Weak(*target));
- SLOW_DCHECK(array->IsSortedNoDuplicates());
+ array.SetKey(index, *name);
+ array.SetRawTarget(index, HeapObjectReference::Weak(*target));
+ SLOW_DCHECK(array.IsSortedNoDuplicates());
return;
}
}
@@ -171,18 +145,17 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
Reload();
DisallowHeapAllocation no_gc;
TransitionArray array = transitions();
- if (array->number_of_transitions() != number_of_transitions) {
- DCHECK(array->number_of_transitions() < number_of_transitions);
+ if (array.number_of_transitions() != number_of_transitions) {
+ DCHECK(array.number_of_transitions() < number_of_transitions);
- number_of_transitions = array->number_of_transitions();
+ number_of_transitions = array.number_of_transitions();
new_nof = number_of_transitions;
insertion_index = kNotFound;
- int index =
- is_special_transition
- ? array->SearchSpecial(Symbol::cast(*name), &insertion_index)
- : array->Search(details.kind(), *name, details.attributes(),
- &insertion_index);
+ int index = is_special_transition
+ ? array.SearchSpecial(Symbol::cast(*name), &insertion_index)
+ : array.Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
if (index == kNotFound) {
++new_nof;
} else {
@@ -193,17 +166,17 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
result->SetNumberOfTransitions(new_nof);
}
- if (array->HasPrototypeTransitions()) {
- result->SetPrototypeTransitions(array->GetPrototypeTransitions());
+ if (array.HasPrototypeTransitions()) {
+ result->SetPrototypeTransitions(array.GetPrototypeTransitions());
}
DCHECK_NE(kNotFound, insertion_index);
for (int i = 0; i < insertion_index; ++i) {
- result->Set(i, array->GetKey(i), array->GetRawTarget(i));
+ result->Set(i, array.GetKey(i), array.GetRawTarget(i));
}
result->Set(insertion_index, *name, HeapObjectReference::Weak(*target));
for (int i = insertion_index; i < number_of_transitions; ++i) {
- result->Set(i + 1, array->GetKey(i), array->GetRawTarget(i));
+ result->Set(i + 1, array.GetKey(i), array.GetRawTarget(i));
}
SLOW_DCHECK(result->IsSortedNoDuplicates());
@@ -212,7 +185,7 @@ void TransitionsAccessor::Insert(Handle<Name> name, Handle<Map> target,
Map TransitionsAccessor::SearchTransition(Name name, PropertyKind kind,
PropertyAttributes attributes) {
- DCHECK(name->IsUniqueName());
+ DCHECK(name.IsUniqueName());
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
@@ -224,7 +197,7 @@ Map TransitionsAccessor::SearchTransition(Name name, PropertyKind kind,
return map;
}
case kFullTransitionArray: {
- return transitions()->SearchAndGetTarget(kind, name, attributes);
+ return transitions().SearchAndGetTarget(kind, name, attributes);
}
}
UNREACHABLE();
@@ -232,14 +205,14 @@ Map TransitionsAccessor::SearchTransition(Name name, PropertyKind kind,
Map TransitionsAccessor::SearchSpecial(Symbol name) {
if (encoding() != kFullTransitionArray) return Map();
- int transition = transitions()->SearchSpecial(name);
+ int transition = transitions().SearchSpecial(name);
if (transition == kNotFound) return Map();
- return transitions()->GetTarget(transition);
+ return transitions().GetTarget(transition);
}
// static
bool TransitionsAccessor::IsSpecialTransition(ReadOnlyRoots roots, Name name) {
- if (!name->IsSymbol()) return false;
+ if (!name.IsSymbol()) return false;
return name == roots.nonextensible_symbol() ||
name == roots.sealed_symbol() || name == roots.frozen_symbol() ||
name == roots.elements_transition_symbol() ||
@@ -253,7 +226,7 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
PropertyAttributes attributes = name->IsPrivate() ? DONT_ENUM : NONE;
Map target = SearchTransition(*name, kData, attributes);
if (target.is_null()) return MaybeHandle<Map>();
- PropertyDetails details = target->GetLastDescriptorDetails();
+ PropertyDetails details = target.GetLastDescriptorDetails();
DCHECK_EQ(attributes, details.attributes());
DCHECK_EQ(kData, details.kind());
if (requested_location == kFieldOnly && details.location() != kField) {
@@ -262,37 +235,10 @@ MaybeHandle<Map> TransitionsAccessor::FindTransitionToDataProperty(
return Handle<Map>(target, isolate_);
}
-Handle<String> TransitionsAccessor::ExpectedTransitionKey() {
- DisallowHeapAllocation no_gc;
- switch (encoding()) {
- case kPrototypeInfo:
- case kUninitialized:
- case kMigrationTarget:
- case kFullTransitionArray:
- return Handle<String>::null();
- case kWeakRef: {
- Map target = Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
- PropertyDetails details = GetSimpleTargetDetails(target);
- if (details.location() != kField) return Handle<String>::null();
- DCHECK_EQ(kData, details.kind());
- if (details.attributes() != NONE) return Handle<String>::null();
- Name name = GetSimpleTransitionKey(target);
- if (!name->IsString()) return Handle<String>::null();
- return handle(String::cast(name), isolate_);
- }
- }
- UNREACHABLE();
-}
-
-Handle<Map> TransitionsAccessor::ExpectedTransitionTarget() {
- DCHECK(!ExpectedTransitionKey().is_null());
- return handle(GetTarget(0), isolate_);
-}
-
bool TransitionsAccessor::CanHaveMoreTransitions() {
- if (map_->is_dictionary_map()) return false;
+ if (map_.is_dictionary_map()) return false;
if (encoding() == kFullTransitionArray) {
- return transitions()->number_of_transitions() < kMaxNumberOfTransitions;
+ return transitions().number_of_transitions() < kMaxNumberOfTransitions;
}
return true;
}
@@ -301,11 +247,11 @@ bool TransitionsAccessor::CanHaveMoreTransitions() {
bool TransitionsAccessor::IsMatchingMap(Map target, Name name,
PropertyKind kind,
PropertyAttributes attributes) {
- int descriptor = target->LastAdded();
- DescriptorArray descriptors = target->instance_descriptors();
- Name key = descriptors->GetKey(descriptor);
+ int descriptor = target.LastAdded();
+ DescriptorArray descriptors = target.instance_descriptors();
+ Name key = descriptors.GetKey(descriptor);
if (key != name) return false;
- return descriptors->GetDetails(descriptor)
+ return descriptors.GetDetails(descriptor)
.HasKindAndAttributes(kind, attributes);
}
@@ -320,12 +266,12 @@ bool TransitionArray::CompactPrototypeTransitionArray(Isolate* isolate,
}
int new_number_of_transitions = 0;
for (int i = 0; i < number_of_transitions; i++) {
- MaybeObject target = array->Get(header + i);
+ MaybeObject target = array.Get(header + i);
DCHECK(target->IsCleared() ||
- (target->IsWeak() && target->GetHeapObject()->IsMap()));
+ (target->IsWeak() && target->GetHeapObject().IsMap()));
if (!target->IsCleared()) {
if (new_number_of_transitions != i) {
- array->Set(header + new_number_of_transitions, target);
+ array.Set(header + new_number_of_transitions, target);
}
new_number_of_transitions++;
}
@@ -334,7 +280,7 @@ bool TransitionArray::CompactPrototypeTransitionArray(Isolate* isolate,
MaybeObject undefined =
MaybeObject::FromObject(*isolate->factory()->undefined_value());
for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
- array->Set(header + i, undefined);
+ array.Set(header + i, undefined);
}
if (number_of_transitions != new_number_of_transitions) {
SetNumberOfPrototypeTransitions(array, new_number_of_transitions);
@@ -362,11 +308,11 @@ Handle<WeakFixedArray> TransitionArray::GrowPrototypeTransitionArray(
void TransitionsAccessor::PutPrototypeTransition(Handle<Object> prototype,
Handle<Map> target_map) {
- DCHECK(HeapObject::cast(*prototype)->map()->IsMap());
+ DCHECK(HeapObject::cast(*prototype).map().IsMap());
// Don't cache prototype transition if this map is either shared, or a map of
// a prototype.
- if (map_->is_prototype_map()) return;
- if (map_->is_dictionary_map() || !FLAG_cache_prototype_transitions) return;
+ if (map_.is_prototype_map()) return;
+ if (map_.is_dictionary_map() || !FLAG_cache_prototype_transitions) return;
const int header = TransitionArray::kProtoTransitionHeaderSize;
@@ -400,12 +346,12 @@ Handle<Map> TransitionsAccessor::GetPrototypeTransition(
int length = TransitionArray::NumberOfPrototypeTransitions(cache);
for (int i = 0; i < length; i++) {
MaybeObject target =
- cache->Get(TransitionArray::kProtoTransitionHeaderSize + i);
+ cache.Get(TransitionArray::kProtoTransitionHeaderSize + i);
DCHECK(target->IsWeakOrCleared());
HeapObject heap_object;
if (target->GetHeapObjectIfWeak(&heap_object)) {
Map map = Map::cast(heap_object);
- if (map->prototype() == *prototype) {
+ if (map.prototype() == *prototype) {
return handle(map, isolate_);
}
}
@@ -415,18 +361,18 @@ Handle<Map> TransitionsAccessor::GetPrototypeTransition(
WeakFixedArray TransitionsAccessor::GetPrototypeTransitions() {
if (encoding() != kFullTransitionArray ||
- !transitions()->HasPrototypeTransitions()) {
+ !transitions().HasPrototypeTransitions()) {
return ReadOnlyRoots(isolate_).empty_weak_fixed_array();
}
- return transitions()->GetPrototypeTransitions();
+ return transitions().GetPrototypeTransitions();
}
// static
void TransitionArray::SetNumberOfPrototypeTransitions(
WeakFixedArray proto_transitions, int value) {
- DCHECK_NE(proto_transitions->length(), 0);
- proto_transitions->Set(kProtoTransitionNumberOfEntriesOffset,
- MaybeObject::FromSmi(Smi::FromInt(value)));
+ DCHECK_NE(proto_transitions.length(), 0);
+ proto_transitions.Set(kProtoTransitionNumberOfEntriesOffset,
+ MaybeObject::FromSmi(Smi::FromInt(value)));
}
int TransitionsAccessor::NumberOfTransitions() {
@@ -438,7 +384,7 @@ int TransitionsAccessor::NumberOfTransitions() {
case kWeakRef:
return 1;
case kFullTransitionArray:
- return transitions()->number_of_transitions();
+ return transitions().number_of_transitions();
}
UNREACHABLE();
return 0; // Make GCC happy.
@@ -448,14 +394,14 @@ void TransitionsAccessor::SetMigrationTarget(Map migration_target) {
// We only cache the migration target for maps with empty transitions for GC's
// sake.
if (encoding() != kUninitialized) return;
- DCHECK(map_->is_deprecated());
- map_->set_raw_transitions(MaybeObject::FromObject(migration_target));
+ DCHECK(map_.is_deprecated());
+ map_.set_raw_transitions(MaybeObject::FromObject(migration_target));
MarkNeedsReload();
}
Map TransitionsAccessor::GetMigrationTarget() {
if (encoding() == kMigrationTarget) {
- return map_->raw_transitions()->cast<Map>();
+ return map_.raw_transitions()->cast<Map>();
}
return Map();
}
@@ -479,16 +425,16 @@ void TransitionsAccessor::ReplaceTransitions(MaybeObject new_transitions) {
// keep referenced objects alive, so we zap it.
// When there is another reference to the array somewhere (e.g. a handle),
// not zapping turns from a waste of memory into a source of crashes.
- old_transitions->Zap(isolate_);
+ old_transitions.Zap(isolate_);
}
- map_->set_raw_transitions(new_transitions);
+ map_.set_raw_transitions(new_transitions);
MarkNeedsReload();
}
void TransitionsAccessor::SetPrototypeTransitions(
Handle<WeakFixedArray> proto_transitions) {
EnsureHasFullTransitionArray();
- transitions()->SetPrototypeTransitions(*proto_transitions);
+ transitions().SetPrototypeTransitions(*proto_transitions);
}
void TransitionsAccessor::EnsureHasFullTransitionArray() {
@@ -527,12 +473,12 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
break;
}
case kFullTransitionArray: {
- if (transitions()->HasPrototypeTransitions()) {
- WeakFixedArray proto_trans = transitions()->GetPrototypeTransitions();
+ if (transitions().HasPrototypeTransitions()) {
+ WeakFixedArray proto_trans = transitions().GetPrototypeTransitions();
int length = TransitionArray::NumberOfPrototypeTransitions(proto_trans);
for (int i = 0; i < length; ++i) {
int index = TransitionArray::kProtoTransitionHeaderSize + i;
- MaybeObject target = proto_trans->Get(index);
+ MaybeObject target = proto_trans.Get(index);
HeapObject heap_object;
if (target->GetHeapObjectIfWeak(&heap_object)) {
TransitionsAccessor(isolate_, Map::cast(heap_object), no_gc)
@@ -542,8 +488,8 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
}
}
}
- for (int i = 0; i < transitions()->number_of_transitions(); ++i) {
- TransitionsAccessor(isolate_, transitions()->GetTarget(i), no_gc)
+ for (int i = 0; i < transitions().number_of_transitions(); ++i) {
+ TransitionsAccessor(isolate_, transitions().GetTarget(i), no_gc)
.TraverseTransitionTreeInternal(callback, data, no_gc);
}
break;
@@ -558,20 +504,20 @@ void TransitionsAccessor::CheckNewTransitionsAreConsistent(
// This function only handles full transition arrays.
DCHECK_EQ(kFullTransitionArray, encoding());
TransitionArray new_transitions = TransitionArray::cast(transitions);
- for (int i = 0; i < old_transitions->number_of_transitions(); i++) {
- Map target = old_transitions->GetTarget(i);
- if (target->instance_descriptors() == map_->instance_descriptors()) {
- Name key = old_transitions->GetKey(i);
+ for (int i = 0; i < old_transitions.number_of_transitions(); i++) {
+ Map target = old_transitions.GetTarget(i);
+ if (target.instance_descriptors() == map_.instance_descriptors()) {
+ Name key = old_transitions.GetKey(i);
int new_target_index;
if (IsSpecialTransition(ReadOnlyRoots(isolate_), key)) {
- new_target_index = new_transitions->SearchSpecial(Symbol::cast(key));
+ new_target_index = new_transitions.SearchSpecial(Symbol::cast(key));
} else {
PropertyDetails details = GetTargetDetails(key, target);
new_target_index =
- new_transitions->Search(details.kind(), key, details.attributes());
+ new_transitions.Search(details.kind(), key, details.attributes());
}
DCHECK_NE(TransitionArray::kNotFound, new_target_index);
- DCHECK_EQ(target, new_transitions->GetTarget(new_target_index));
+ DCHECK_EQ(target, new_transitions.GetTarget(new_target_index));
}
}
}
@@ -674,9 +620,8 @@ void TransitionArray::Sort() {
temp_kind = details.kind();
temp_attributes = details.attributes();
}
- int cmp =
- CompareKeys(temp_key, temp_key->Hash(), temp_kind, temp_attributes,
- key, key->Hash(), kind, attributes);
+ int cmp = CompareKeys(temp_key, temp_key.Hash(), temp_kind,
+ temp_attributes, key, key.Hash(), kind, attributes);
if (cmp > 0) {
SetKey(j + 1, temp_key);
SetRawTarget(j + 1, temp_target);
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/objects/transitions.h
index 7765e51caf..b4dadcc22a 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/objects/transitions.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TRANSITIONS_H_
-#define V8_TRANSITIONS_H_
+#ifndef V8_OBJECTS_TRANSITIONS_H_
+#define V8_OBJECTS_TRANSITIONS_H_
-#include "src/checks.h"
-#include "src/elements-kind.h"
-#include "src/objects.h"
+#include "src/common/checks.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/elements-kind.h"
#include "src/objects/map.h"
#include "src/objects/maybe-object.h"
#include "src/objects/name.h"
+#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -38,16 +38,9 @@ namespace internal {
// cleared when the map they refer to is not otherwise reachable.
class V8_EXPORT_PRIVATE TransitionsAccessor {
public:
- TransitionsAccessor(Isolate* isolate, Map map, DisallowHeapAllocation* no_gc)
- : isolate_(isolate), map_(map) {
- Initialize();
- USE(no_gc);
- }
- TransitionsAccessor(Isolate* isolate, Handle<Map> map)
- : isolate_(isolate), map_handle_(map), map_(*map) {
- Initialize();
- }
-
+ inline TransitionsAccessor(Isolate* isolate, Map map,
+ DisallowHeapAllocation* no_gc);
+ inline TransitionsAccessor(Isolate* isolate, Handle<Map> map);
// Insert a new transition into |map|'s transition array, extending it
// as necessary.
// Requires the constructor that takes a Handle<Map> to have been used.
@@ -70,8 +63,8 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
return FindTransitionToDataProperty(name, kFieldOnly);
}
- Handle<String> ExpectedTransitionKey();
- Handle<Map> ExpectedTransitionTarget();
+ inline Handle<String> ExpectedTransitionKey();
+ inline Handle<Map> ExpectedTransitionTarget();
int NumberOfTransitions();
// The size of transition arrays are limited so they do not end up in large
@@ -91,7 +84,7 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
PropertyAttributes* out_integrity_level = nullptr);
// ===== ITERATION =====
- typedef void (*TraverseCallback)(Map map, void* data);
+ using TraverseCallback = void (*)(Map map, void* data);
// Traverse the transition tree in postorder.
void TraverseTransitionTree(TraverseCallback callback, void* data) {
@@ -143,11 +136,7 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
kFullTransitionArray,
};
- void Reload() {
- DCHECK(!map_handle_.is_null());
- map_ = *map_handle_;
- Initialize();
- }
+ inline void Reload();
inline Encoding encoding() {
DCHECK(!needs_reload_);
@@ -170,7 +159,7 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
#endif
}
- void Initialize();
+ inline void Initialize();
inline Map GetSimpleTransition();
bool HasSimpleTransitionTo(Map map);
@@ -358,4 +347,4 @@ class TransitionArray : public WeakFixedArray {
#include "src/objects/object-macros-undef.h"
-#endif // V8_TRANSITIONS_H_
+#endif // V8_OBJECTS_TRANSITIONS_H_
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/objects/type-hints.cc
index c6fd06f9c8..cb0a6a4ea9 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/objects/type-hints.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/type-hints.h"
+#include "src/objects/type-hints.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/objects/type-hints.h
index 3f34f925c4..1aa2709665 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/objects/type-hints.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TYPE_HINTS_H_
-#define V8_TYPE_HINTS_H_
+#ifndef V8_OBJECTS_TYPE_HINTS_H_
+#define V8_OBJECTS_TYPE_HINTS_H_
#include "src/base/flags.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -74,4 +74,4 @@ std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags);
} // namespace internal
} // namespace v8
-#endif // V8_TYPE_HINTS_H_
+#endif // V8_OBJECTS_TYPE_HINTS_H_
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 3adef409a3..331a12b157 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -2,29 +2,29 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/value-serializer.h"
+#include "src/objects/value-serializer.h"
#include <type_traits>
#include "include/v8-value-serializer-version.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/logging.h"
-#include "src/conversions.h"
-#include "src/flags.h"
-#include "src/handles-inl.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/maybe-handles-inl.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/maybe-handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/smi.h"
+#include "src/objects/transitions-inl.h"
#include "src/snapshot/code-serializer.h"
-#include "src/transitions.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
@@ -268,12 +268,12 @@ void ValueSerializer::WriteTwoByteString(Vector<const uc16> chars) {
}
void ValueSerializer::WriteBigIntContents(BigInt bigint) {
- uint32_t bitfield = bigint->GetBitfieldForSerialization();
+ uint32_t bitfield = bigint.GetBitfieldForSerialization();
int bytelength = BigInt::DigitsByteLengthForBitfield(bitfield);
WriteVarint<uint32_t>(bitfield);
uint8_t* dest;
if (ReserveRawBytes(bytelength).To(&dest)) {
- bigint->SerializeDigits(dest);
+ bigint.SerializeDigits(dest);
}
}
@@ -356,7 +356,7 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
}
DCHECK(object->IsHeapObject());
- switch (HeapObject::cast(*object)->map()->instance_type()) {
+ switch (HeapObject::cast(*object).map().instance_type()) {
case ODDBALL_TYPE:
WriteOddball(Oddball::cast(*object));
return ThrowIfOutOfMemory();
@@ -401,7 +401,7 @@ Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
void ValueSerializer::WriteOddball(Oddball oddball) {
SerializationTag tag = SerializationTag::kUndefined;
- switch (oddball->kind()) {
+ switch (oddball.kind()) {
case Oddball::kUndefined:
tag = SerializationTag::kUndefined;
break;
@@ -416,7 +416,6 @@ void ValueSerializer::WriteOddball(Oddball oddball) {
break;
default:
UNREACHABLE();
- break;
}
WriteTag(tag);
}
@@ -424,17 +423,17 @@ void ValueSerializer::WriteOddball(Oddball oddball) {
void ValueSerializer::WriteSmi(Smi smi) {
static_assert(kSmiValueSize <= 32, "Expected SMI <= 32 bits.");
WriteTag(SerializationTag::kInt32);
- WriteZigZag<int32_t>(smi->value());
+ WriteZigZag<int32_t>(smi.value());
}
void ValueSerializer::WriteHeapNumber(HeapNumber number) {
WriteTag(SerializationTag::kDouble);
- WriteDouble(number->value());
+ WriteDouble(number.value());
}
void ValueSerializer::WriteMutableHeapNumber(MutableHeapNumber number) {
WriteTag(SerializationTag::kDouble);
- WriteDouble(number->value());
+ WriteDouble(number.value());
}
void ValueSerializer::WriteBigInt(BigInt bigint) {
@@ -478,7 +477,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
*id_map_entry = id + 1;
// Eliminate callable and exotic objects, which should not be serialized.
- InstanceType instance_type = receiver->map()->instance_type();
+ InstanceType instance_type = receiver->map().instance_type();
if (receiver->IsCallable() || (IsSpecialReceiverInstanceType(instance_type) &&
instance_type != JS_SPECIAL_API_OBJECT_TYPE)) {
ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
@@ -544,9 +543,9 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
}
Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
- DCHECK(!object->map()->IsCustomElementsReceiverMap());
+ DCHECK(!object->map().IsCustomElementsReceiverMap());
const bool can_serialize_fast =
- object->HasFastProperties() && object->elements()->length() == 0;
+ object->HasFastProperties() && object->elements().length() == 0;
if (!can_serialize_fast) return WriteJSObjectSlow(object);
Handle<Map> map(object->map(), isolate_);
@@ -557,9 +556,9 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
uint32_t properties_written = 0;
bool map_changed = false;
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- Handle<Name> key(map->instance_descriptors()->GetKey(i), isolate_);
+ Handle<Name> key(map->instance_descriptors().GetKey(i), isolate_);
if (!key->IsString()) continue;
- PropertyDetails details = map->instance_descriptors()->GetDetails(i);
+ PropertyDetails details = map->instance_descriptors().GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> value;
@@ -607,7 +606,7 @@ Maybe<bool> ValueSerializer::WriteJSObjectSlow(Handle<JSObject> object) {
Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
uint32_t length = 0;
- bool valid_length = array->length()->ToArrayLength(&length);
+ bool valid_length = array->length().ToArrayLength(&length);
DCHECK(valid_length);
USE(valid_length);
@@ -654,7 +653,7 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
// Fall back to slow path.
break;
}
- Handle<Object> element(FixedArray::cast(array->elements())->get(i),
+ Handle<Object> element(FixedArray::cast(array->elements()).get(i),
isolate_);
if (!WriteObject(element).FromMaybe(false)) return Nothing<bool>();
}
@@ -718,26 +717,26 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
void ValueSerializer::WriteJSDate(JSDate date) {
WriteTag(SerializationTag::kDate);
- WriteDouble(date->value()->Number());
+ WriteDouble(date.value().Number());
}
Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
Object inner_value = value->value();
- if (inner_value->IsTrue(isolate_)) {
+ if (inner_value.IsTrue(isolate_)) {
WriteTag(SerializationTag::kTrueObject);
- } else if (inner_value->IsFalse(isolate_)) {
+ } else if (inner_value.IsFalse(isolate_)) {
WriteTag(SerializationTag::kFalseObject);
- } else if (inner_value->IsNumber()) {
+ } else if (inner_value.IsNumber()) {
WriteTag(SerializationTag::kNumberObject);
- WriteDouble(inner_value->Number());
- } else if (inner_value->IsBigInt()) {
+ WriteDouble(inner_value.Number());
+ } else if (inner_value.IsBigInt()) {
WriteTag(SerializationTag::kBigIntObject);
WriteBigIntContents(BigInt::cast(inner_value));
- } else if (inner_value->IsString()) {
+ } else if (inner_value.IsString()) {
WriteTag(SerializationTag::kStringObject);
WriteString(handle(String::cast(inner_value), isolate_));
} else {
- DCHECK(inner_value->IsSymbol());
+ DCHECK(inner_value.IsSymbol());
ThrowDataCloneError(MessageTemplate::kDataCloneError, value);
return Nothing<bool>();
}
@@ -746,8 +745,8 @@ Maybe<bool> ValueSerializer::WriteJSValue(Handle<JSValue> value) {
void ValueSerializer::WriteJSRegExp(JSRegExp regexp) {
WriteTag(SerializationTag::kRegExp);
- WriteString(handle(regexp->Pattern(), isolate_));
- WriteVarint(static_cast<uint32_t>(regexp->GetFlags()));
+ WriteString(handle(regexp.Pattern(), isolate_));
+ WriteVarint(static_cast<uint32_t>(regexp.GetFlags()));
}
Maybe<bool> ValueSerializer::WriteJSMap(Handle<JSMap> map) {
@@ -856,8 +855,8 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
}
WriteTag(SerializationTag::kArrayBufferView);
ArrayBufferViewTag tag = ArrayBufferViewTag::kInt8Array;
- if (view->IsJSTypedArray()) {
- switch (JSTypedArray::cast(view)->type()) {
+ if (view.IsJSTypedArray()) {
+ switch (JSTypedArray::cast(view).type()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case kExternal##Type##Array: \
tag = ArrayBufferViewTag::k##Type##Array; \
@@ -866,12 +865,12 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView view) {
#undef TYPED_ARRAY_CASE
}
} else {
- DCHECK(view->IsJSDataView());
+ DCHECK(view.IsJSDataView());
tag = ArrayBufferViewTag::kDataView;
}
WriteVarint(static_cast<uint8_t>(tag));
- WriteVarint(static_cast<uint32_t>(view->byte_offset()));
- WriteVarint(static_cast<uint32_t>(view->byte_length()));
+ WriteVarint(static_cast<uint32_t>(view.byte_offset()));
+ WriteVarint(static_cast<uint32_t>(view.byte_length()));
return ThrowIfOutOfMemory();
}
@@ -900,7 +899,7 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
WriteVarint<uint32_t>(static_cast<uint32_t>(wire_bytes.size()));
uint8_t* destination;
if (ReserveRawBytes(wire_bytes.size()).To(&destination)) {
- memcpy(destination, wire_bytes.start(), wire_bytes.size());
+ memcpy(destination, wire_bytes.begin(), wire_bytes.size());
}
wasm::WasmSerializer wasm_serializer(native_module);
@@ -917,7 +916,7 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<WasmModuleObject> object) {
}
Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
- if (!object->array_buffer()->is_shared()) {
+ if (!object->array_buffer().is_shared()) {
ThrowDataCloneError(MessageTemplate::kDataCloneError, object);
return Nothing<bool>();
}
@@ -1007,8 +1006,8 @@ ValueDeserializer::ValueDeserializer(Isolate* isolate,
v8::ValueDeserializer::Delegate* delegate)
: isolate_(isolate),
delegate_(delegate),
- position_(data.start()),
- end_(data.start() + data.length()),
+ position_(data.begin()),
+ end_(data.begin() + data.length()),
allocation_(data.length() > kPretenureThreshold ? AllocationType::kOld
: AllocationType::kYoung),
id_map_(isolate->global_handles()->Create(
@@ -1897,13 +1896,13 @@ MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
const std::vector<Handle<Object>>& properties) {
JSObject::AllocateStorageForMap(object, map);
- DCHECK(!object->map()->is_dictionary_map());
+ DCHECK(!object->map().is_dictionary_map());
DisallowHeapAllocation no_gc;
- DescriptorArray descriptors = object->map()->instance_descriptors();
+ DescriptorArray descriptors = object->map().instance_descriptors();
for (unsigned i = 0; i < properties.size(); i++) {
// Initializing store.
- object->WriteToField(i, descriptors->GetDetails(i), *properties[i]);
+ object->WriteToField(i, descriptors.GetDetails(i), *properties[i]);
}
}
@@ -1921,7 +1920,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool transitioning = true;
Handle<Map> map(object->map(), isolate_);
DCHECK(!map->is_dictionary_map());
- DCHECK_EQ(0, map->instance_descriptors()->number_of_descriptors());
+ DCHECK_EQ(0, map->instance_descriptors().number_of_descriptors());
std::vector<Handle<Object>> properties;
properties.reserve(8);
@@ -1972,13 +1971,13 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
if (transitioning) {
int descriptor = static_cast<int>(properties.size());
PropertyDetails details =
- target->instance_descriptors()->GetDetails(descriptor);
+ target->instance_descriptors().GetDetails(descriptor);
Representation expected_representation = details.representation();
if (value->FitsRepresentation(expected_representation)) {
if (expected_representation.IsHeapObject() &&
!target->instance_descriptors()
- ->GetFieldType(descriptor)
- ->NowContains(value)) {
+ .GetFieldType(descriptor)
+ .NowContains(value)) {
Handle<FieldType> value_type =
value->OptimalType(isolate_, expected_representation);
Map::GeneralizeField(isolate_, target, descriptor,
@@ -1986,8 +1985,8 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
value_type);
}
DCHECK(target->instance_descriptors()
- ->GetFieldType(descriptor)
- ->NowContains(value));
+ .GetFieldType(descriptor)
+ .NowContains(value));
properties.push_back(value);
map = target;
continue;
@@ -2050,7 +2049,7 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
bool ValueDeserializer::HasObjectWithID(uint32_t id) {
return id < static_cast<unsigned>(id_map_->length()) &&
- !id_map_->get(id)->IsTheHole(isolate_);
+ !id_map_->get(id).IsTheHole(isolate_);
}
MaybeHandle<JSReceiver> ValueDeserializer::GetObjectWithID(uint32_t id) {
@@ -2058,8 +2057,8 @@ MaybeHandle<JSReceiver> ValueDeserializer::GetObjectWithID(uint32_t id) {
return MaybeHandle<JSReceiver>();
}
Object value = id_map_->get(id);
- if (value->IsTheHole(isolate_)) return MaybeHandle<JSReceiver>();
- DCHECK(value->IsJSReceiver());
+ if (value.IsTheHole(isolate_)) return MaybeHandle<JSReceiver>();
+ DCHECK(value.IsJSReceiver());
return Handle<JSReceiver>(JSReceiver::cast(value), isolate_);
}
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index 585d0d4768..b83227d9d3 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_VALUE_SERIALIZER_H_
-#define V8_VALUE_SERIALIZER_H_
+#ifndef V8_OBJECTS_VALUE_SERIALIZER_H_
+#define V8_OBJECTS_VALUE_SERIALIZER_H_
#include <cstdint>
#include <vector>
@@ -11,10 +11,10 @@
#include "include/v8.h"
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
-#include "src/identity-map.h"
-#include "src/maybe-handles.h"
-#include "src/message-template.h"
-#include "src/vector.h"
+#include "src/execution/message-template.h"
+#include "src/handles/maybe-handles.h"
+#include "src/utils/identity-map.h"
+#include "src/utils/vector.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -311,4 +311,4 @@ class ValueDeserializer {
} // namespace internal
} // namespace v8
-#endif // V8_VALUE_SERIALIZER_H_
+#endif // V8_OBJECTS_VALUE_SERIALIZER_H_
diff --git a/deps/v8/src/visitors.cc b/deps/v8/src/objects/visitors.cc
index 4bb5c00ed7..7621ce1b9f 100644
--- a/deps/v8/src/visitors.cc
+++ b/deps/v8/src/objects/visitors.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/visitors.h"
+#include "src/objects/visitors.h"
-#include "src/reloc-info.h"
+#include "src/codegen/reloc-info.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/visitors.h b/deps/v8/src/objects/visitors.h
index 840fa29a7b..d36723b440 100644
--- a/deps/v8/src/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_VISITORS_H_
-#define V8_VISITORS_H_
+#ifndef V8_OBJECTS_VISITORS_H_
+#define V8_OBJECTS_VISITORS_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/code.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/foreign.h"
@@ -15,8 +15,6 @@ namespace v8 {
namespace internal {
class CodeDataContainer;
-class MaybeObject;
-class Object;
#define ROOT_ID_LIST(V) \
V(kStringTable, "(Internalized strings)") \
@@ -156,4 +154,4 @@ class ObjectVisitor {
} // namespace internal
} // namespace v8
-#endif // V8_VISITORS_H_
+#endif // V8_OBJECTS_VISITORS_H_
diff --git a/deps/v8/src/parsing/expression-scope-reparenter.cc b/deps/v8/src/parsing/expression-scope-reparenter.cc
index bc7076e5a2..78167a06e7 100644
--- a/deps/v8/src/parsing/expression-scope-reparenter.cc
+++ b/deps/v8/src/parsing/expression-scope-reparenter.cc
@@ -7,7 +7,7 @@
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/expression-scope.h b/deps/v8/src/parsing/expression-scope.h
index 0112cd9a0f..62e8c0a47a 100644
--- a/deps/v8/src/parsing/expression-scope.h
+++ b/deps/v8/src/parsing/expression-scope.h
@@ -6,8 +6,8 @@
#define V8_PARSING_EXPRESSION_SCOPE_H_
#include "src/ast/scopes.h"
-#include "src/function-kind.h"
-#include "src/message-template.h"
+#include "src/execution/message-template.h"
+#include "src/objects/function-kind.h"
#include "src/parsing/scanner.h"
#include "src/zone/zone.h" // For ScopedPtrList.
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index c21fb35ae9..0b05176a77 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -6,7 +6,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/func-name-inferrer.h b/deps/v8/src/parsing/func-name-inferrer.h
index bdc58221e1..66dd21f8cd 100644
--- a/deps/v8/src/parsing/func-name-inferrer.h
+++ b/deps/v8/src/parsing/func-name-inferrer.h
@@ -8,7 +8,7 @@
#include <vector>
#include "src/base/macros.h"
-#include "src/pointer-with-payload.h"
+#include "src/utils/pointer-with-payload.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/literal-buffer.cc b/deps/v8/src/parsing/literal-buffer.cc
new file mode 100644
index 0000000000..6400809a87
--- /dev/null
+++ b/deps/v8/src/parsing/literal-buffer.cc
@@ -0,0 +1,80 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/literal-buffer.h"
+
+#include "src/execution/isolate.h"
+#include "src/heap/factory.h"
+#include "src/utils/memcopy.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<String> LiteralBuffer::Internalize(Isolate* isolate) const {
+ if (is_one_byte()) {
+ return isolate->factory()->InternalizeString(one_byte_literal());
+ }
+ return isolate->factory()->InternalizeString(two_byte_literal());
+}
+
+int LiteralBuffer::NewCapacity(int min_capacity) {
+ return min_capacity < (kMaxGrowth / (kGrowthFactor - 1))
+ ? min_capacity * kGrowthFactor
+ : min_capacity + kMaxGrowth;
+}
+
+void LiteralBuffer::ExpandBuffer() {
+ int min_capacity = Max(kInitialCapacity, backing_store_.length());
+ Vector<byte> new_store = Vector<byte>::New(NewCapacity(min_capacity));
+ if (position_ > 0) {
+ MemCopy(new_store.begin(), backing_store_.begin(), position_);
+ }
+ backing_store_.Dispose();
+ backing_store_ = new_store;
+}
+
+void LiteralBuffer::ConvertToTwoByte() {
+ DCHECK(is_one_byte());
+ Vector<byte> new_store;
+ int new_content_size = position_ * kUC16Size;
+ if (new_content_size >= backing_store_.length()) {
+ // Ensure room for all currently read code units as UC16 as well
+ // as the code unit about to be stored.
+ new_store = Vector<byte>::New(NewCapacity(new_content_size));
+ } else {
+ new_store = backing_store_;
+ }
+ uint8_t* src = backing_store_.begin();
+ uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.begin());
+ for (int i = position_ - 1; i >= 0; i--) {
+ dst[i] = src[i];
+ }
+ if (new_store.begin() != backing_store_.begin()) {
+ backing_store_.Dispose();
+ backing_store_ = new_store;
+ }
+ position_ = new_content_size;
+ is_one_byte_ = false;
+}
+
+void LiteralBuffer::AddTwoByteChar(uc32 code_unit) {
+ DCHECK(!is_one_byte());
+ if (position_ >= backing_store_.length()) ExpandBuffer();
+ if (code_unit <=
+ static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
+ position_ += kUC16Size;
+ } else {
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
+ unibrow::Utf16::LeadSurrogate(code_unit);
+ position_ += kUC16Size;
+ if (position_ >= backing_store_.length()) ExpandBuffer();
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
+ unibrow::Utf16::TrailSurrogate(code_unit);
+ position_ += kUC16Size;
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/parsing/literal-buffer.h b/deps/v8/src/parsing/literal-buffer.h
new file mode 100644
index 0000000000..3d61a00393
--- /dev/null
+++ b/deps/v8/src/parsing/literal-buffer.h
@@ -0,0 +1,104 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_LITERAL_BUFFER_H_
+#define V8_PARSING_LITERAL_BUFFER_H_
+
+#include "src/strings/unicode-decoder.h"
+#include "src/utils/vector.h"
+
+namespace v8 {
+namespace internal {
+
+// LiteralBuffer - Collector of chars of literals.
+class LiteralBuffer final {
+ public:
+ LiteralBuffer() : backing_store_(), position_(0), is_one_byte_(true) {}
+
+ ~LiteralBuffer() { backing_store_.Dispose(); }
+
+ V8_INLINE void AddChar(char code_unit) {
+ DCHECK(IsValidAscii(code_unit));
+ AddOneByteChar(static_cast<byte>(code_unit));
+ }
+
+ V8_INLINE void AddChar(uc32 code_unit) {
+ if (is_one_byte()) {
+ if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
+ AddOneByteChar(static_cast<byte>(code_unit));
+ return;
+ }
+ ConvertToTwoByte();
+ }
+ AddTwoByteChar(code_unit);
+ }
+
+ bool is_one_byte() const { return is_one_byte_; }
+
+ bool Equals(Vector<const char> keyword) const {
+ return is_one_byte() && keyword.length() == position_ &&
+ (memcmp(keyword.begin(), backing_store_.begin(), position_) == 0);
+ }
+
+ Vector<const uint16_t> two_byte_literal() const {
+ return literal<uint16_t>();
+ }
+
+ Vector<const uint8_t> one_byte_literal() const { return literal<uint8_t>(); }
+
+ template <typename Char>
+ Vector<const Char> literal() const {
+ DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
+ DCHECK_EQ(position_ & (sizeof(Char) - 1), 0);
+ return Vector<const Char>(
+ reinterpret_cast<const Char*>(backing_store_.begin()),
+ position_ >> (sizeof(Char) - 1));
+ }
+
+ int length() const { return is_one_byte() ? position_ : (position_ >> 1); }
+
+ void Start() {
+ position_ = 0;
+ is_one_byte_ = true;
+ }
+
+ Handle<String> Internalize(Isolate* isolate) const;
+
+ private:
+ static const int kInitialCapacity = 16;
+ static const int kGrowthFactor = 4;
+ static const int kMaxGrowth = 1 * MB;
+
+ inline bool IsValidAscii(char code_unit) {
+ // Control characters and printable characters span the range of
+ // valid ASCII characters (0-127). Chars are unsigned on some
+ // platforms which causes compiler warnings if the validity check
+ // tests the lower bound >= 0 as it's always true.
+ return iscntrl(code_unit) || isprint(code_unit);
+ }
+
+ V8_INLINE void AddOneByteChar(byte one_byte_char) {
+ DCHECK(is_one_byte());
+ if (position_ >= backing_store_.length()) ExpandBuffer();
+ backing_store_[position_] = one_byte_char;
+ position_ += kOneByteSize;
+ }
+
+ void AddTwoByteChar(uc32 code_unit);
+ int NewCapacity(int min_capacity);
+ void ExpandBuffer();
+ void ConvertToTwoByte();
+
+ Vector<byte> backing_store_;
+ int position_;
+
+ bool is_one_byte_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_LITERAL_BUFFER_H_
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 176350e068..ed9d80861b 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -9,11 +9,11 @@
#include "src/ast/ast.h"
#include "src/base/template-utils.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
-#include "src/counters.h"
-#include "src/hash-seed-inl.h"
#include "src/heap/heap-inl.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/scope-info.h"
#include "src/zone/zone.h"
@@ -60,12 +60,9 @@ ParseInfo::ParseInfo(Isolate* isolate, AccountingAllocator* zone_allocator)
set_might_always_opt(FLAG_always_opt || FLAG_prepare_always_opt);
set_allow_lazy_compile(FLAG_lazy);
set_allow_natives_syntax(FLAG_allow_natives_syntax);
- set_allow_harmony_public_fields(FLAG_harmony_public_fields);
- set_allow_harmony_static_fields(FLAG_harmony_static_fields);
set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
set_allow_harmony_import_meta(FLAG_harmony_import_meta);
set_allow_harmony_numeric_separator(FLAG_harmony_numeric_separator);
- set_allow_harmony_private_fields(FLAG_harmony_private_fields);
set_allow_harmony_private_methods(FLAG_harmony_private_methods);
}
@@ -93,7 +90,7 @@ ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
// Do not support re-parsing top-level function of a wrapped script.
// TODO(yangguo): consider whether we need a top-level function in a
// wrapped script at all.
- DCHECK_IMPLIES(is_toplevel(), !Script::cast(shared->script())->is_wrapped());
+ DCHECK_IMPLIES(is_toplevel(), !Script::cast(shared->script()).is_wrapped());
set_allow_lazy_parsing(true);
set_asm_wasm_broken(shared->is_asm_wasm_broken());
@@ -116,7 +113,7 @@ ParseInfo::ParseInfo(Isolate* isolate, Handle<SharedFunctionInfo> shared)
set_collect_type_profile(
isolate->is_collecting_type_profile() &&
(shared->HasFeedbackMetadata()
- ? shared->feedback_metadata()->HasTypeProfileSlot()
+ ? shared->feedback_metadata().HasTypeProfileSlot()
: script->IsUserJavaScript()));
}
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index ea6053a2d8..7b74e7aa90 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -10,12 +10,12 @@
#include <vector>
#include "include/v8.h"
-#include "src/function-kind.h"
-#include "src/globals.h"
-#include "src/handles.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/objects/function-kind.h"
#include "src/objects/script.h"
+#include "src/parsing/pending-compilation-error-handler.h"
#include "src/parsing/preparse-data.h"
-#include "src/pending-compilation-error-handler.h"
namespace v8 {
@@ -101,18 +101,12 @@ class V8_EXPORT_PRIVATE ParseInfo {
FLAG_ACCESSOR(kAllowLazyCompile, allow_lazy_compile, set_allow_lazy_compile)
FLAG_ACCESSOR(kAllowNativeSyntax, allow_native_syntax,
set_allow_native_syntax)
- FLAG_ACCESSOR(kAllowHarmonyPublicFields, allow_harmony_public_fields,
- set_allow_harmony_public_fields)
- FLAG_ACCESSOR(kAllowHarmonyStaticFields, allow_harmony_static_fields,
- set_allow_harmony_static_fields)
FLAG_ACCESSOR(kAllowHarmonyDynamicImport, allow_harmony_dynamic_import,
set_allow_harmony_dynamic_import)
FLAG_ACCESSOR(kAllowHarmonyImportMeta, allow_harmony_import_meta,
set_allow_harmony_import_meta)
FLAG_ACCESSOR(kAllowHarmonyNumericSeparator, allow_harmony_numeric_separator,
set_allow_harmony_numeric_separator)
- FLAG_ACCESSOR(kAllowHarmonyPrivateFields, allow_harmony_private_fields,
- set_allow_harmony_private_fields)
FLAG_ACCESSOR(kAllowHarmonyPrivateMethods, allow_harmony_private_methods,
set_allow_harmony_private_methods)
FLAG_ACCESSOR(kIsOneshotIIFE, is_oneshot_iife, set_is_oneshot_iife)
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 37a9a6d73a..0ecd8ecedb 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -11,20 +11,20 @@
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/bailout-reason.h"
#include "src/base/flags.h"
#include "src/base/hashmap.h"
#include "src/base/v8-fallthrough.h"
-#include "src/counters.h"
-#include "src/function-kind.h"
-#include "src/globals.h"
-#include "src/log.h"
-#include "src/message-template.h"
+#include "src/codegen/bailout-reason.h"
+#include "src/common/globals.h"
+#include "src/execution/message-template.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/objects/function-kind.h"
#include "src/parsing/expression-scope.h"
#include "src/parsing/func-name-inferrer.h"
#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
-#include "src/pointer-with-payload.h"
+#include "src/utils/pointer-with-payload.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -263,11 +263,8 @@ class ParserBase {
script_id_(script_id),
default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
allow_natives_(false),
- allow_harmony_public_fields_(false),
- allow_harmony_static_fields_(false),
allow_harmony_dynamic_import_(false),
allow_harmony_import_meta_(false),
- allow_harmony_private_fields_(false),
allow_harmony_private_methods_(false),
allow_eval_cache_(true) {
pointer_buffer_.reserve(32);
@@ -279,8 +276,6 @@ class ParserBase {
void set_allow_##name(bool allow) { allow_##name##_ = allow; }
ALLOW_ACCESSORS(natives)
- ALLOW_ACCESSORS(harmony_public_fields)
- ALLOW_ACCESSORS(harmony_static_fields)
ALLOW_ACCESSORS(harmony_dynamic_import)
ALLOW_ACCESSORS(harmony_import_meta)
ALLOW_ACCESSORS(harmony_private_methods)
@@ -296,13 +291,6 @@ class ParserBase {
scanner()->set_allow_harmony_numeric_separator(allow);
}
- bool allow_harmony_private_fields() const {
- return scanner()->allow_harmony_private_fields();
- }
- void set_allow_harmony_private_fields(bool allow) {
- scanner()->set_allow_harmony_private_fields(allow);
- }
-
uintptr_t stack_limit() const { return stack_limit_; }
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
@@ -539,6 +527,7 @@ class ParserBase {
has_static_computed_names(false),
has_static_class_fields(false),
has_instance_members(false),
+ requires_brand(false),
is_anonymous(false),
static_fields_scope(nullptr),
instance_members_scope(nullptr),
@@ -555,6 +544,7 @@ class ParserBase {
bool has_static_computed_names;
bool has_static_class_fields;
bool has_instance_members;
+ bool requires_brand;
bool is_anonymous;
DeclarationScope* static_fields_scope;
DeclarationScope* instance_members_scope;
@@ -1442,11 +1432,8 @@ class ParserBase {
bool accept_IN_ = true;
bool allow_natives_;
- bool allow_harmony_public_fields_;
- bool allow_harmony_static_fields_;
bool allow_harmony_dynamic_import_;
bool allow_harmony_import_meta_;
- bool allow_harmony_private_fields_;
bool allow_harmony_private_methods_;
bool allow_eval_cache_;
};
@@ -1566,7 +1553,7 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
if (V8_LIKELY(Token::IsPropertyName(next))) {
name = impl()->GetSymbol();
key = factory()->NewStringLiteral(name, pos);
- } else if (allow_harmony_private_fields() && next == Token::PRIVATE_NAME) {
+ } else if (next == Token::PRIVATE_NAME) {
// In the case of a top level function, we completely skip
// analysing it's scope, meaning, we don't have a chance to
// resolve private names and find that they are not enclosed in a
@@ -1574,16 +1561,16 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
//
// Here, we check if this is a new private name reference in a top
// level function and throw an error if so.
- //
- // Bug(v8:7468): This hack will go away once we refactor private
- // name resolution to happen independently from scope resolution.
ClassScope* class_scope = scope()->GetClassScope();
+ // Parse the identifier so that we can display it in the error message
+ name = impl()->GetIdentifier();
if (class_scope == nullptr) {
- ReportMessage(MessageTemplate::kInvalidPrivateFieldResolution);
+ impl()->ReportMessageAt(Scanner::Location(pos, pos + 1),
+ MessageTemplate::kInvalidPrivateFieldResolution,
+ impl()->GetRawNameFromIdentifier(name),
+ kSyntaxError);
return impl()->FailureExpression();
}
-
- name = impl()->GetIdentifier();
key = impl()->ExpressionFromPrivateName(class_scope, name, pos);
} else {
ReportUnexpectedToken(next);
@@ -2128,9 +2115,6 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassInfo* class_info,
DCHECK_EQ(prop_info->position, PropertyPosition::kClassLiteral);
Token::Value name_token = peek();
- DCHECK_IMPLIES(name_token == Token::PRIVATE_NAME,
- allow_harmony_private_fields());
-
int property_beg_pos = scanner()->peek_location().beg_pos;
int name_token_position = property_beg_pos;
ExpressionT name_expression;
@@ -2166,44 +2150,34 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassInfo* class_info,
case ParsePropertyKind::kAssign:
case ParsePropertyKind::kClassField:
case ParsePropertyKind::kShorthandOrClassField:
- case ParsePropertyKind::kNotSet: // This case is a name followed by a name
- // or other property. Here we have to
- // assume that's an uninitialized field
- // followed by a linebreak followed by a
- // property, with ASI adding the
- // semicolon. If not, there will be a
- // syntax error after parsing the first
- // name as an uninitialized field.
- if (allow_harmony_public_fields() || allow_harmony_private_fields()) {
- prop_info->kind = ParsePropertyKind::kClassField;
- DCHECK_IMPLIES(prop_info->is_computed_name, !prop_info->is_private);
-
- if (prop_info->is_static && !allow_harmony_static_fields()) {
- ReportUnexpectedToken(Next());
- return impl()->NullLiteralProperty();
- }
-
- if (!prop_info->is_computed_name) {
- CheckClassFieldName(prop_info->name, prop_info->is_static);
- }
-
- ExpressionT initializer = ParseMemberInitializer(
- class_info, property_beg_pos, prop_info->is_static);
- ExpectSemicolon();
+ case ParsePropertyKind::kNotSet: { // This case is a name followed by a
+ // name or other property. Here we have
+ // to assume that's an uninitialized
+ // field followed by a linebreak
+ // followed by a property, with ASI
+ // adding the semicolon. If not, there
+ // will be a syntax error after parsing
+ // the first name as an uninitialized
+ // field.
+ prop_info->kind = ParsePropertyKind::kClassField;
+ DCHECK_IMPLIES(prop_info->is_computed_name, !prop_info->is_private);
- ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
- name_expression, initializer, ClassLiteralProperty::FIELD,
- prop_info->is_static, prop_info->is_computed_name,
- prop_info->is_private);
- impl()->SetFunctionNameFromPropertyName(result, prop_info->name);
+ if (!prop_info->is_computed_name) {
+ CheckClassFieldName(prop_info->name, prop_info->is_static);
+ }
- return result;
+ ExpressionT initializer = ParseMemberInitializer(
+ class_info, property_beg_pos, prop_info->is_static);
+ ExpectSemicolon();
- } else {
- ReportUnexpectedToken(Next());
- return impl()->NullLiteralProperty();
- }
+ ClassLiteralPropertyT result = factory()->NewClassLiteralProperty(
+ name_expression, initializer, ClassLiteralProperty::FIELD,
+ prop_info->is_static, prop_info->is_computed_name,
+ prop_info->is_private);
+ impl()->SetFunctionNameFromPropertyName(result, prop_info->name);
+ return result;
+ }
case ParsePropertyKind::kMethod: {
// MethodDefinition
// PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
@@ -2782,7 +2756,8 @@ ParserBase<Impl>::ParseYieldExpression() {
impl()->RecordSuspendSourceRange(yieldstar, PositionAfterSemicolon());
function_state_->AddSuspend();
if (IsAsyncGeneratorFunction(function_state_->kind())) {
- // iterator_close and delegated_iterator_output suspend ids.
+ // return, iterator_close and delegated_iterator_output suspend ids.
+ function_state_->AddSuspend();
function_state_->AddSuspend();
function_state_->AddSuspend();
}
@@ -4202,6 +4177,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
function_literal->set_function_token_position(
formal_parameters.scope->start_position());
+ impl()->RecordFunctionLiteralSourceRange(function_literal);
impl()->AddFunctionForNameInference(function_literal);
if (V8_UNLIKELY((FLAG_log_function_events))) {
@@ -4277,19 +4253,32 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
}
is_constructor &= class_info.has_seen_constructor;
- if (V8_UNLIKELY(property_kind == ClassLiteralProperty::FIELD)) {
+ bool is_field = property_kind == ClassLiteralProperty::FIELD;
+
+ if (V8_UNLIKELY(prop_info.is_private)) {
+ DCHECK(!is_constructor);
+ class_info.requires_brand |= !is_field;
+ impl()->DeclarePrivateClassMember(class_scope, prop_info.name, property,
+ property_kind, prop_info.is_static,
+ &class_info);
+ impl()->InferFunctionName();
+ continue;
+ }
+
+ if (V8_UNLIKELY(is_field)) {
+ DCHECK(!prop_info.is_private);
if (prop_info.is_computed_name) {
- DCHECK(!prop_info.is_private);
class_info.computed_field_count++;
}
-
- impl()->DeclareClassField(class_scope, property, prop_info.name,
- prop_info.is_static, prop_info.is_computed_name,
- prop_info.is_private, &class_info);
- } else {
- impl()->DeclareClassProperty(class_scope, name, property, is_constructor,
- &class_info);
+ impl()->DeclarePublicClassField(class_scope, property,
+ prop_info.is_static,
+ prop_info.is_computed_name, &class_info);
+ impl()->InferFunctionName();
+ continue;
}
+
+ impl()->DeclarePublicClassMethod(name, property, is_constructor,
+ &class_info);
impl()->InferFunctionName();
}
@@ -4306,6 +4295,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
return impl()->FailureExpression();
}
+ if (class_info.requires_brand) {
+ class_scope->DeclareBrandVariable(ast_value_factory(), kNoSourcePosition);
+ }
+
return impl()->RewriteClassLiteral(class_scope, name, &class_info,
class_token_pos, end_pos);
}
@@ -5443,7 +5436,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
Consume(Token::FOR);
Expect(Token::LPAREN);
- if (peek() == Token::CONST || (peek() == Token::LET && IsNextLetKeyword())) {
+ bool starts_with_let = peek() == Token::LET;
+ if (peek() == Token::CONST || (starts_with_let && IsNextLetKeyword())) {
// The initializer contains lexical declarations,
// so create an in-between scope.
BlockState for_state(zone(), &scope_);
@@ -5508,10 +5502,12 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
init = impl()->BuildInitializationBlock(&for_info.parsing_result);
} else if (peek() != Token::SEMICOLON) {
// The initializer does not contain declarations.
- int lhs_beg_pos = peek_position();
+ Scanner::Location next_loc = scanner()->peek_location();
+ int lhs_beg_pos = next_loc.beg_pos;
int lhs_end_pos;
bool is_for_each;
ExpressionT expression;
+
{
ExpressionParsingScope parsing_scope(impl());
AcceptINScope scope(this, false);
@@ -5520,6 +5516,10 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
lhs_end_pos = end_position();
is_for_each = CheckInOrOf(&for_info.mode);
if (is_for_each) {
+ if (starts_with_let && for_info.mode == ForEachStatement::ITERATE) {
+ impl()->ReportMessageAt(next_loc, MessageTemplate::kForOfLet);
+ return impl()->NullStatement();
+ }
if (expression->IsPattern()) {
parsing_scope.ValidatePattern(expression, lhs_beg_pos, lhs_end_pos);
} else {
@@ -5791,8 +5791,9 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
bool has_declarations = false;
Scope* inner_block_scope = NewScope(BLOCK_SCOPE);
+ bool starts_with_let = peek() == Token::LET;
if (peek() == Token::VAR || peek() == Token::CONST ||
- (peek() == Token::LET && IsNextLetKeyword())) {
+ (starts_with_let && IsNextLetKeyword())) {
// The initializer contains declarations
// 'for' 'await' '(' ForDeclaration 'of' AssignmentExpression ')'
// Statement
@@ -5826,6 +5827,11 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
// The initializer does not contain declarations.
// 'for' 'await' '(' LeftHandSideExpression 'of' AssignmentExpression ')'
// Statement
+ if (starts_with_let) {
+ impl()->ReportMessageAt(scanner()->peek_location(),
+ MessageTemplate::kForOfLet);
+ return impl()->NullStatement();
+ }
int lhs_beg_pos = peek_position();
BlockState inner_state(&scope_, inner_block_scope);
ExpressionParsingScope parsing_scope(impl());
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index b3ba9011f9..380920b8ba 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -11,21 +11,21 @@
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/ast.h"
#include "src/ast/source-range-ast-visitor.h"
-#include "src/bailout-reason.h"
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
-#include "src/char-predicates-inl.h"
+#include "src/codegen/bailout-reason.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
-#include "src/conversions-inl.h"
-#include "src/log.h"
-#include "src/message-template.h"
+#include "src/execution/message-template.h"
+#include "src/logging/log.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/scope-info.h"
#include "src/parsing/expression-scope-reparenter.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/rewriter.h"
#include "src/runtime/runtime.h"
-#include "src/string-stream.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/strings/string-stream.h"
#include "src/tracing/trace-event.h"
#include "src/zone/zone-list-inl.h"
@@ -422,12 +422,9 @@ Parser::Parser(ParseInfo* info)
allow_lazy_ = info->allow_lazy_compile() && info->allow_lazy_parsing() &&
info->extension() == nullptr && can_compile_lazily;
set_allow_natives(info->allow_natives_syntax());
- set_allow_harmony_public_fields(info->allow_harmony_public_fields());
- set_allow_harmony_static_fields(info->allow_harmony_static_fields());
set_allow_harmony_dynamic_import(info->allow_harmony_dynamic_import());
set_allow_harmony_import_meta(info->allow_harmony_import_meta());
set_allow_harmony_numeric_separator(info->allow_harmony_numeric_separator());
- set_allow_harmony_private_fields(info->allow_harmony_private_fields());
set_allow_harmony_private_methods(info->allow_harmony_private_methods());
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
@@ -522,10 +519,9 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
if (!info->is_eval()) {
event_name = "parse-script";
start = 0;
- end = String::cast(script->source())->length();
+ end = String::cast(script.source()).length();
}
- LOG(isolate,
- FunctionEvent(event_name, script->id(), ms, start, end, "", 0));
+ LOG(isolate, FunctionEvent(event_name, script.id(), ms, start, end, "", 0));
}
return result;
}
@@ -641,6 +637,9 @@ FunctionLiteral* Parser::DoParseProgram(Isolate* isolate, ParseInfo* info) {
DCHECK_NULL(target_stack_);
if (has_error()) return nullptr;
+
+ RecordFunctionLiteralSourceRange(result);
+
return result;
}
@@ -1717,7 +1716,7 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
// try {
// InitialYield;
// ...body...;
- // return undefined; // See comment below
+ // // fall through to the implicit return after the try-finally
// } catch (.catch) {
// %AsyncGeneratorReject(generator, .catch);
// } finally {
@@ -1744,12 +1743,6 @@ void Parser::ParseAndRewriteAsyncGeneratorFunctionBody(
// Don't create iterator result for async generators, as the resume methods
// will create it.
- // TODO(leszeks): This will create another suspend point, which is
- // unnecessary if there is already an unconditional return in the body.
- Statement* final_return = BuildReturnStatement(
- factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
- statements.Add(final_return);
-
try_block = factory()->NewBlock(false, statements);
}
@@ -2453,6 +2446,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_literal->set_function_token_position(function_token_pos);
function_literal->set_suspend_count(suspend_count);
+ RecordFunctionLiteralSourceRange(function_literal);
+
if (should_post_parallel_task) {
// Start a parallel parse / compile task on the compiler dispatcher.
info()->parallel_tasks()->Enqueue(info(), function_name, function_literal);
@@ -2803,20 +2798,16 @@ Variable* Parser::CreatePrivateNameVariable(ClassScope* scope,
return proxy->var();
}
-void Parser::DeclareClassField(ClassScope* scope,
- ClassLiteralProperty* property,
- const AstRawString* property_name,
- bool is_static, bool is_computed_name,
- bool is_private, ClassInfo* class_info) {
- DCHECK(allow_harmony_public_fields() || allow_harmony_private_fields());
-
+void Parser::DeclarePublicClassField(ClassScope* scope,
+ ClassLiteralProperty* property,
+ bool is_static, bool is_computed_name,
+ ClassInfo* class_info) {
if (is_static) {
class_info->static_fields->Add(property, zone());
} else {
class_info->instance_fields->Add(property, zone());
}
- DCHECK_IMPLIES(is_computed_name, !is_private);
if (is_computed_name) {
// We create a synthetic variable name here so that scope
// analysis doesn't dedupe the vars.
@@ -2825,27 +2816,49 @@ void Parser::DeclareClassField(ClassScope* scope,
ast_value_factory(), class_info->computed_field_count));
property->set_computed_name_var(computed_name_var);
class_info->properties->Add(property, zone());
- } else if (is_private) {
- Variable* private_name_var =
- CreatePrivateNameVariable(scope, property_name);
- int pos = property->value()->position();
- if (pos == kNoSourcePosition) {
- pos = property->key()->position();
+ }
+}
+
+void Parser::DeclarePrivateClassMember(ClassScope* scope,
+ const AstRawString* property_name,
+ ClassLiteralProperty* property,
+ ClassLiteralProperty::Kind kind,
+ bool is_static, ClassInfo* class_info) {
+ DCHECK_IMPLIES(kind == ClassLiteralProperty::Kind::METHOD,
+ allow_harmony_private_methods());
+ // TODO(joyee): We do not support private accessors yet (which allow
+ // declaring the same private name twice). Make them noops.
+ if (kind != ClassLiteralProperty::Kind::FIELD &&
+ kind != ClassLiteralProperty::Kind::METHOD) {
+ return;
+ }
+
+ if (kind == ClassLiteralProperty::Kind::FIELD) {
+ if (is_static) {
+ class_info->static_fields->Add(property, zone());
+ } else {
+ class_info->instance_fields->Add(property, zone());
}
- private_name_var->set_initializer_position(pos);
- property->set_private_name_var(private_name_var);
- class_info->properties->Add(property, zone());
}
+
+ Variable* private_name_var = CreatePrivateNameVariable(scope, property_name);
+ int pos = property->value()->position();
+ if (pos == kNoSourcePosition) {
+ pos = property->key()->position();
+ }
+ private_name_var->set_initializer_position(pos);
+ property->set_private_name_var(private_name_var);
+ class_info->properties->Add(property, zone());
}
// This method declares a property of the given class. It updates the
// following fields of class_info, as appropriate:
// - constructor
// - properties
-void Parser::DeclareClassProperty(ClassScope* scope,
- const AstRawString* class_name,
- ClassLiteralProperty* property,
- bool is_constructor, ClassInfo* class_info) {
+void Parser::DeclarePublicClassMethod(const AstRawString* class_name,
+ ClassLiteralProperty* property,
+ bool is_constructor,
+ ClassInfo* class_info) {
if (is_constructor) {
DCHECK(!class_info->constructor);
class_info->constructor = property->value()->AsFunctionLiteral();
@@ -2866,15 +2879,19 @@ FunctionLiteral* Parser::CreateInitializerFunction(
FunctionKind::kClassMembersInitializerFunction);
// function() { .. class fields initializer .. }
ScopedPtrList<Statement> statements(pointer_buffer());
- InitializeClassMembersStatement* static_fields =
+ InitializeClassMembersStatement* stmt =
factory()->NewInitializeClassMembersStatement(fields, kNoSourcePosition);
- statements.Add(static_fields);
- return factory()->NewFunctionLiteral(
+ statements.Add(stmt);
+ FunctionLiteral* result = factory()->NewFunctionLiteral(
ast_value_factory()->GetOneByteString(name), scope, statements, 0, 0, 0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldEagerCompile, scope->start_position(), false,
GetNextFunctionLiteralId());
+
+ RecordFunctionLiteralSourceRange(result);
+
+ return result;
}
// This method generates a ClassLiteral AST node.
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index b863e3ba2a..cb1c473af5 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -12,11 +12,11 @@
#include "src/ast/scopes.h"
#include "src/base/compiler-specific.h"
#include "src/base/threaded-list.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/parsing.h"
#include "src/parsing/preparser.h"
-#include "src/pointer-with-payload.h"
+#include "src/utils/pointer-with-payload.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -233,11 +233,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
parsing_module_, parsing_on_main_thread_);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
- SET_ALLOW(harmony_public_fields);
- SET_ALLOW(harmony_static_fields);
SET_ALLOW(harmony_dynamic_import);
SET_ALLOW(harmony_import_meta);
- SET_ALLOW(harmony_private_fields);
SET_ALLOW(harmony_private_methods);
SET_ALLOW(eval_cache);
#undef SET_ALLOW
@@ -316,6 +313,19 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int class_token_pos, int end_pos);
void DeclareClassVariable(const AstRawString* name, ClassInfo* class_info,
int class_token_pos);
+ void DeclareClassBrandVariable(ClassScope* scope, ClassInfo* class_info,
+ int class_token_pos);
+ void DeclarePrivateClassMember(ClassScope* scope,
+ const AstRawString* property_name,
+ ClassLiteralProperty* property,
+ ClassLiteralProperty::Kind kind,
+ bool is_static, ClassInfo* class_info);
+ void DeclarePublicClassMethod(const AstRawString* class_name,
+ ClassLiteralProperty* property,
+ bool is_constructor, ClassInfo* class_info);
+ void DeclarePublicClassField(ClassScope* scope,
+ ClassLiteralProperty* property, bool is_static,
+ bool is_computed_name, ClassInfo* class_info);
void DeclareClassProperty(ClassScope* scope, const AstRawString* class_name,
ClassLiteralProperty* property, bool is_constructor,
ClassInfo* class_info);
@@ -631,7 +641,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
if (expr->IsStringLiteral()) return expr;
ScopedPtrList<Expression> args(pointer_buffer());
args.Add(expr);
- return factory()->NewCallRuntime(Runtime::kInlineToString, args,
+ return factory()->NewCallRuntime(Runtime::kInlineToStringRT, args,
expr->position());
}
@@ -701,6 +711,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
scanner_.set_parser_error();
}
+ const AstRawString* GetRawNameFromIdentifier(const AstRawString* arg) {
+ return arg;
+ }
+
void ReportUnexpectedTokenAt(
Scanner::Location location, Token::Value token,
MessageTemplate message = MessageTemplate::kUnexpectedToken);
@@ -919,6 +933,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
new (zone()) ConditionalSourceRanges(then_range, else_range));
}
+ V8_INLINE void RecordFunctionLiteralSourceRange(FunctionLiteral* node) {
+ if (source_range_map_ == nullptr) return;
+ source_range_map_->Insert(node, new (zone()) FunctionLiteralSourceRanges);
+ }
+
V8_INLINE void RecordBinaryOperationSourceRange(
Expression* node, const SourceRange& right_range) {
if (source_range_map_ == nullptr) return;
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
index eb17d17793..af4cb9b5ee 100644
--- a/deps/v8/src/parsing/parsing.cc
+++ b/deps/v8/src/parsing/parsing.cc
@@ -7,11 +7,11 @@
#include <memory>
#include "src/ast/ast.h"
-#include "src/objects-inl.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
-#include "src/vm-state-inl.h"
#include "src/zone/zone-list-inl.h" // crbug.com/v8/8816
namespace v8 {
diff --git a/deps/v8/src/parsing/parsing.h b/deps/v8/src/parsing/parsing.h
index ab78064f4c..5f19500a19 100644
--- a/deps/v8/src/parsing/parsing.h
+++ b/deps/v8/src/parsing/parsing.h
@@ -5,7 +5,7 @@
#ifndef V8_PARSING_PARSING_H_
#define V8_PARSING_PARSING_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/parsing/pending-compilation-error-handler.cc
index c533aa1e18..b6331b2f9d 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/pending-compilation-error-handler.h"
+#include "src/parsing/pending-compilation-error-handler.h"
#include "src/ast/ast-value-factory.h"
#include "src/debug/debug.h"
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/messages.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/execution/messages.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -107,7 +107,6 @@ void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
break;
default:
UNREACHABLE();
- break;
}
if (!error->IsJSObject()) {
diff --git a/deps/v8/src/pending-compilation-error-handler.h b/deps/v8/src/parsing/pending-compilation-error-handler.h
index d355ef544a..c6b9559931 100644
--- a/deps/v8/src/pending-compilation-error-handler.h
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PENDING_COMPILATION_ERROR_HANDLER_H_
-#define V8_PENDING_COMPILATION_ERROR_HANDLER_H_
+#ifndef V8_PARSING_PENDING_COMPILATION_ERROR_HANDLER_H_
+#define V8_PARSING_PENDING_COMPILATION_ERROR_HANDLER_H_
#include <forward_list>
#include "src/base/macros.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/message-template.h"
+#include "src/common/globals.h"
+#include "src/execution/message-template.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
@@ -119,4 +119,4 @@ class PendingCompilationErrorHandler {
} // namespace internal
} // namespace v8
-#endif // V8_PENDING_COMPILATION_ERROR_HANDLER_H_
+#endif // V8_PARSING_PENDING_COMPILATION_ERROR_HANDLER_H_
diff --git a/deps/v8/src/parsing/preparse-data-impl.h b/deps/v8/src/parsing/preparse-data-impl.h
index 22585723c2..0bc8027266 100644
--- a/deps/v8/src/parsing/preparse-data-impl.h
+++ b/deps/v8/src/parsing/preparse-data-impl.h
@@ -7,7 +7,7 @@
#include "src/parsing/preparse-data.h"
-#include "src/assert-scope.h"
+#include "src/common/assert-scope.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 52adc6934b..7f33d301cb 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -8,8 +8,8 @@
#include "src/ast/scopes.h"
#include "src/ast/variables.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/preparse-data-impl.h"
@@ -420,7 +420,7 @@ Handle<PreparseData> PreparseDataBuilder::ByteData::CopyToHeap(
int data_length = zone_byte_data_.length();
Handle<PreparseData> data =
isolate->factory()->NewPreparseData(data_length, children_length);
- data->copy_in(0, zone_byte_data_.start(), data_length);
+ data->copy_in(0, zone_byte_data_.begin(), data_length);
return data;
}
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index fcdc693ec8..613f13bc82 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -5,10 +5,10 @@
#ifndef V8_PARSING_PREPARSE_DATA_H_
#define V8_PARSING_PREPARSE_DATA_H_
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/maybe-handles.h"
-#include "src/vector.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/handles/maybe-handles.h"
+#include "src/utils/vector.h"
#include "src/zone/zone-chunk-list.h"
#include "src/zone/zone-containers.h"
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 1b59a6d804..5d11bddb41 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -4,16 +4,16 @@
#include <cmath>
-#include "src/allocation.h"
#include "src/base/logging.h"
-#include "src/conversions-inl.h"
-#include "src/conversions.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/numbers/conversions-inl.h"
+#include "src/numbers/conversions.h"
#include "src/parsing/parser-base.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
-#include "src/unicode.h"
-#include "src/utils.h"
+#include "src/strings/unicode.h"
+#include "src/utils/allocation.h"
+#include "src/utils/utils.h"
#include "src/zone/zone-list-inl.h"
namespace v8 {
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 06c33eb42d..cca3b3675d 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -8,8 +8,8 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/parsing/parser-base.h"
+#include "src/parsing/pending-compilation-error-handler.h"
#include "src/parsing/preparser-logger.h"
-#include "src/pending-compilation-error-handler.h"
namespace v8 {
namespace internal {
@@ -1230,32 +1230,39 @@ class PreParser : public ParserBase<PreParser> {
&was_added);
}
}
- V8_INLINE void DeclareClassProperty(ClassScope* scope,
- const PreParserIdentifier& class_name,
- const PreParserExpression& property,
- bool is_constructor,
- ClassInfo* class_info) {}
-
- V8_INLINE void DeclareClassField(ClassScope* scope,
- const PreParserExpression& property,
- const PreParserIdentifier& property_name,
- bool is_static, bool is_computed_name,
- bool is_private, ClassInfo* class_info) {
- DCHECK_IMPLIES(is_computed_name, !is_private);
+ V8_INLINE void DeclarePublicClassMethod(const PreParserIdentifier& class_name,
+ const PreParserExpression& property,
+ bool is_constructor,
+ ClassInfo* class_info) {}
+ V8_INLINE void DeclarePublicClassField(ClassScope* scope,
+ const PreParserExpression& property,
+ bool is_static, bool is_computed_name,
+ ClassInfo* class_info) {
if (is_computed_name) {
bool was_added;
DeclareVariableName(
ClassFieldVariableName(ast_value_factory(),
class_info->computed_field_count),
VariableMode::kConst, scope, &was_added);
- } else if (is_private) {
- bool was_added;
- DeclarePrivateVariableName(property_name.string_, scope, &was_added);
- if (!was_added) {
- Scanner::Location loc(property.position(), property.position() + 1);
- ReportMessageAt(loc, MessageTemplate::kVarRedeclaration,
- property_name.string_);
- }
+ }
+ }
+
+ V8_INLINE void DeclarePrivateClassMember(
+ ClassScope* scope, const PreParserIdentifier& property_name,
+ const PreParserExpression& property, ClassLiteralProperty::Kind kind,
+ bool is_static, ClassInfo* class_info) {
+ // TODO(joyee): We do not support private accessors yet (which allow
+ // declaring the same private name twice). Make them noops.
+ if (kind != ClassLiteralProperty::Kind::FIELD &&
+ kind != ClassLiteralProperty::Kind::METHOD) {
+ return;
+ }
+ bool was_added;
+ DeclarePrivateVariableName(property_name.string_, scope, &was_added);
+ if (!was_added) {
+ Scanner::Location loc(property.position(), property.position() + 1);
+ ReportMessageAt(loc, MessageTemplate::kVarRedeclaration,
+ property_name.string_);
}
}
@@ -1505,6 +1512,10 @@ class PreParser : public ParserBase<PreParser> {
scanner()->set_parser_error();
}
+ const AstRawString* GetRawNameFromIdentifier(const PreParserIdentifier& arg) {
+ return arg.string_;
+ }
+
// "null" return type creators.
V8_INLINE static PreParserIdentifier NullIdentifier() {
return PreParserIdentifier::Null();
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 5ab1937c3c..94d8c90a79 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -6,7 +6,7 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/zone/zone-list-inl.h"
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index d58e346904..7758b2bb73 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -8,12 +8,12 @@
#include <vector>
#include "include/v8.h"
-#include "src/counters.h"
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/scanner.h"
-#include "src/unicode-inl.h"
+#include "src/strings/unicode-inl.h"
namespace v8 {
namespace internal {
@@ -22,11 +22,11 @@ class ScopedExternalStringLock {
public:
explicit ScopedExternalStringLock(ExternalString string) {
DCHECK(!string.is_null());
- if (string->IsExternalOneByteString()) {
- resource_ = ExternalOneByteString::cast(string)->resource();
+ if (string.IsExternalOneByteString()) {
+ resource_ = ExternalOneByteString::cast(string).resource();
} else {
- DCHECK(string->IsExternalTwoByteString());
- resource_ = ExternalTwoByteString::cast(string)->resource();
+ DCHECK(string.IsExternalTwoByteString());
+ resource_ = ExternalTwoByteString::cast(string).resource();
}
DCHECK(resource_);
resource_->Lock();
@@ -50,21 +50,6 @@ const unibrow::uchar kUtf8Bom = 0xFEFF;
} // namespace
template <typename Char>
-struct CharTraits;
-
-template <>
-struct CharTraits<uint8_t> {
- using String = SeqOneByteString;
- using ExternalString = ExternalOneByteString;
-};
-
-template <>
-struct CharTraits<uint16_t> {
- using String = SeqTwoByteString;
- using ExternalString = ExternalTwoByteString;
-};
-
-template <typename Char>
struct Range {
const Char* start;
const Char* end;
@@ -115,7 +100,7 @@ class ExternalStringStream {
ExternalStringStream(ExternalString string, size_t start_offset,
size_t length)
: lock_(string),
- data_(string->GetChars() + start_offset),
+ data_(string.GetChars() + start_offset),
length_(length) {}
ExternalStringStream(const ExternalStringStream& other) V8_NOEXCEPT
@@ -761,9 +746,9 @@ Utf16CharacterStream* ScannerStream::For(Isolate* isolate, Handle<String> data,
size_t start_offset = 0;
if (data->IsSlicedString()) {
SlicedString string = SlicedString::cast(*data);
- start_offset = string->offset();
- String parent = string->parent();
- if (parent->IsThinString()) parent = ThinString::cast(parent)->actual();
+ start_offset = string.offset();
+ String parent = string.parent();
+ if (parent.IsThinString()) parent = ThinString::cast(parent).actual();
data = handle(parent, isolate);
} else {
data = String::Flatten(isolate, data);
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 4c85f5383f..4b85567480 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -6,7 +6,7 @@
#define V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
#include "include/v8.h" // for v8::ScriptCompiler
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/scanner-inl.h b/deps/v8/src/parsing/scanner-inl.h
index 86b3f3c606..ef5a8faf85 100644
--- a/deps/v8/src/parsing/scanner-inl.h
+++ b/deps/v8/src/parsing/scanner-inl.h
@@ -5,9 +5,9 @@
#ifndef V8_PARSING_SCANNER_INL_H_
#define V8_PARSING_SCANNER_INL_H_
-#include "src/char-predicates-inl.h"
#include "src/parsing/keywords-gen.h"
#include "src/parsing/scanner.h"
+#include "src/strings/char-predicates-inl.h"
namespace v8 {
namespace internal {
@@ -296,7 +296,7 @@ V8_INLINE Token::Value Scanner::ScanIdentifierOrKeywordInner() {
if (!CanBeKeyword(scan_flags)) return Token::IDENTIFIER;
// Could be a keyword or identifier.
Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
- return KeywordOrIdentifierToken(chars.start(), chars.length());
+ return KeywordOrIdentifierToken(chars.begin(), chars.length());
}
can_be_keyword = CanBeKeyword(scan_flags);
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 08e82bea17..709f28a02d 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -11,7 +11,7 @@
#include <cmath>
#include "src/ast/ast-value-factory.h"
-#include "src/conversions-inl.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/bigint.h"
#include "src/parsing/scanner-inl.h"
#include "src/zone/zone.h"
@@ -55,74 +55,6 @@ class Scanner::ErrorState {
};
// ----------------------------------------------------------------------------
-// Scanner::LiteralBuffer
-
-Handle<String> Scanner::LiteralBuffer::Internalize(Isolate* isolate) const {
- if (is_one_byte()) {
- return isolate->factory()->InternalizeOneByteString(one_byte_literal());
- }
- return isolate->factory()->InternalizeTwoByteString(two_byte_literal());
-}
-
-int Scanner::LiteralBuffer::NewCapacity(int min_capacity) {
- return min_capacity < (kMaxGrowth / (kGrowthFactor - 1))
- ? min_capacity * kGrowthFactor
- : min_capacity + kMaxGrowth;
-}
-
-void Scanner::LiteralBuffer::ExpandBuffer() {
- int min_capacity = Max(kInitialCapacity, backing_store_.length());
- Vector<byte> new_store = Vector<byte>::New(NewCapacity(min_capacity));
- if (position_ > 0) {
- MemCopy(new_store.start(), backing_store_.start(), position_);
- }
- backing_store_.Dispose();
- backing_store_ = new_store;
-}
-
-void Scanner::LiteralBuffer::ConvertToTwoByte() {
- DCHECK(is_one_byte());
- Vector<byte> new_store;
- int new_content_size = position_ * kUC16Size;
- if (new_content_size >= backing_store_.length()) {
- // Ensure room for all currently read code units as UC16 as well
- // as the code unit about to be stored.
- new_store = Vector<byte>::New(NewCapacity(new_content_size));
- } else {
- new_store = backing_store_;
- }
- uint8_t* src = backing_store_.start();
- uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
- for (int i = position_ - 1; i >= 0; i--) {
- dst[i] = src[i];
- }
- if (new_store.start() != backing_store_.start()) {
- backing_store_.Dispose();
- backing_store_ = new_store;
- }
- position_ = new_content_size;
- is_one_byte_ = false;
-}
-
-void Scanner::LiteralBuffer::AddTwoByteChar(uc32 code_unit) {
- DCHECK(!is_one_byte());
- if (position_ >= backing_store_.length()) ExpandBuffer();
- if (code_unit <=
- static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
- position_ += kUC16Size;
- } else {
- *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
- unibrow::Utf16::LeadSurrogate(code_unit);
- position_ += kUC16Size;
- if (position_ >= backing_store_.length()) ExpandBuffer();
- *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
- unibrow::Utf16::TrailSurrogate(code_unit);
- position_ += kUC16Size;
- }
-}
-
-// ----------------------------------------------------------------------------
// Scanner::BookmarkScope
const size_t Scanner::BookmarkScope::kNoBookmark =
@@ -556,12 +488,6 @@ Token::Value Scanner::ScanString() {
}
Token::Value Scanner::ScanPrivateName() {
- if (!allow_harmony_private_fields()) {
- ReportScannerError(source_pos(),
- MessageTemplate::kInvalidOrUnexpectedToken);
- return Token::ILLEGAL;
- }
-
next().literal_chars.Start();
DCHECK_EQ(c0_, '#');
DCHECK(!IsIdentifierStart(kEndOfInput));
@@ -1011,7 +937,7 @@ Token::Value Scanner::ScanIdentifierOrKeywordInnerSlow(bool escaped,
if (can_be_keyword && next().literal_chars.is_one_byte()) {
Vector<const uint8_t> chars = next().literal_chars.one_byte_literal();
Token::Value token =
- KeywordOrIdentifierToken(chars.start(), chars.length());
+ KeywordOrIdentifierToken(chars.begin(), chars.length());
if (IsInRange(token, Token::IDENTIFIER, Token::YIELD)) return token;
if (token == Token::FUTURE_STRICT_RESERVED_WORD) {
@@ -1156,7 +1082,7 @@ const char* Scanner::CurrentLiteralAsCString(Zone* zone) const {
Vector<const uint8_t> vector = literal_one_byte_string();
int length = vector.length();
char* buffer = zone->NewArray<char>(length + 1);
- memcpy(buffer, vector.start(), length);
+ memcpy(buffer, vector.begin(), length);
buffer[length] = '\0';
return buffer;
}
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index c0fdba98df..449aca46ff 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -9,15 +9,15 @@
#include <algorithm>
-#include "src/allocation.h"
#include "src/base/logging.h"
-#include "src/char-predicates.h"
-#include "src/globals.h"
-#include "src/message-template.h"
+#include "src/common/globals.h"
+#include "src/execution/message-template.h"
+#include "src/parsing/literal-buffer.h"
#include "src/parsing/token.h"
-#include "src/pointer-with-payload.h"
-#include "src/unicode-decoder.h"
-#include "src/unicode.h"
+#include "src/strings/char-predicates.h"
+#include "src/strings/unicode.h"
+#include "src/utils/allocation.h"
+#include "src/utils/pointer-with-payload.h"
namespace v8 {
namespace internal {
@@ -345,7 +345,7 @@ class V8_EXPORT_PRIVATE Scanner {
if (peek_location().length() != N + 1) return false;
Vector<const uint8_t> next = next_literal_one_byte_string();
- const char* chars = reinterpret_cast<const char*>(next.start());
+ const char* chars = reinterpret_cast<const char*>(next.begin());
return next.length() == N - 1 && strncmp(s, chars, N - 1) == 0;
}
@@ -355,7 +355,7 @@ class V8_EXPORT_PRIVATE Scanner {
if (!is_literal_one_byte()) return false;
Vector<const uint8_t> current = literal_one_byte_string();
- const char* chars = reinterpret_cast<const char*>(current.start());
+ const char* chars = reinterpret_cast<const char*>(current.begin());
return current.length() == N - 1 && strncmp(s, chars, N - 1) == 0;
}
@@ -406,12 +406,6 @@ class V8_EXPORT_PRIVATE Scanner {
bool FoundHtmlComment() const { return found_html_comment_; }
- bool allow_harmony_private_fields() const {
- return allow_harmony_private_fields_;
- }
- void set_allow_harmony_private_fields(bool allow) {
- allow_harmony_private_fields_ = allow;
- }
bool allow_harmony_numeric_separator() const {
return allow_harmony_numeric_separator_;
}
@@ -430,92 +424,6 @@ class V8_EXPORT_PRIVATE Scanner {
// escape sequences are allowed.
class ErrorState;
- // LiteralBuffer - Collector of chars of literals.
- class LiteralBuffer {
- public:
- LiteralBuffer() : backing_store_(), position_(0), is_one_byte_(true) {}
-
- ~LiteralBuffer() { backing_store_.Dispose(); }
-
- V8_INLINE void AddChar(char code_unit) {
- DCHECK(IsValidAscii(code_unit));
- AddOneByteChar(static_cast<byte>(code_unit));
- }
-
- V8_INLINE void AddChar(uc32 code_unit) {
- if (is_one_byte()) {
- if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
- AddOneByteChar(static_cast<byte>(code_unit));
- return;
- }
- ConvertToTwoByte();
- }
- AddTwoByteChar(code_unit);
- }
-
- bool is_one_byte() const { return is_one_byte_; }
-
- bool Equals(Vector<const char> keyword) const {
- return is_one_byte() && keyword.length() == position_ &&
- (memcmp(keyword.start(), backing_store_.start(), position_) == 0);
- }
-
- Vector<const uint16_t> two_byte_literal() const {
- DCHECK(!is_one_byte());
- DCHECK_EQ(position_ & 0x1, 0);
- return Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(backing_store_.start()),
- position_ >> 1);
- }
-
- Vector<const uint8_t> one_byte_literal() const {
- DCHECK(is_one_byte());
- return Vector<const uint8_t>(
- reinterpret_cast<const uint8_t*>(backing_store_.start()), position_);
- }
-
- int length() const { return is_one_byte() ? position_ : (position_ >> 1); }
-
- void Start() {
- position_ = 0;
- is_one_byte_ = true;
- }
-
- Handle<String> Internalize(Isolate* isolate) const;
-
- private:
- static const int kInitialCapacity = 16;
- static const int kGrowthFactor = 4;
- static const int kMaxGrowth = 1 * MB;
-
- inline bool IsValidAscii(char code_unit) {
- // Control characters and printable characters span the range of
- // valid ASCII characters (0-127). Chars are unsigned on some
- // platforms which causes compiler warnings if the validity check
- // tests the lower bound >= 0 as it's always true.
- return iscntrl(code_unit) || isprint(code_unit);
- }
-
- V8_INLINE void AddOneByteChar(byte one_byte_char) {
- DCHECK(is_one_byte());
- if (position_ >= backing_store_.length()) ExpandBuffer();
- backing_store_[position_] = one_byte_char;
- position_ += kOneByteSize;
- }
-
- void AddTwoByteChar(uc32 code_unit);
- int NewCapacity(int min_capacity);
- void ExpandBuffer();
- void ConvertToTwoByte();
-
- Vector<byte> backing_store_;
- int position_;
-
- bool is_one_byte_;
-
- DISALLOW_COPY_AND_ASSIGN(LiteralBuffer);
- };
-
// The current and look-ahead token.
struct TokenDesc {
Location location = {0, 0};
@@ -813,7 +721,6 @@ class V8_EXPORT_PRIVATE Scanner {
bool found_html_comment_;
// Harmony flags to allow ESNext features.
- bool allow_harmony_private_fields_;
bool allow_harmony_numeric_separator_;
const bool is_module_;
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index e94b03a668..197a26f325 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -6,8 +6,8 @@
#define V8_PARSING_TOKEN_H_
#include "src/base/logging.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ppc/OWNERS b/deps/v8/src/ppc/OWNERS
deleted file mode 100644
index 6d1a8fc472..0000000000
--- a/deps/v8/src/ppc/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-jyan@ca.ibm.com
-joransiu@ca.ibm.com
-michael_dawson@ca.ibm.com
-miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/profiler/OWNERS b/deps/v8/src/profiler/OWNERS
index 991d9bafa6..7ab7c063da 100644
--- a/deps/v8/src/profiler/OWNERS
+++ b/deps/v8/src/profiler/OWNERS
@@ -1,3 +1,4 @@
alph@chromium.org
+petermarshall@chromium.org
# COMPONENT: Platform>DevTools>JavaScript
diff --git a/deps/v8/src/profiler/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 4f22d1b472..59004380cf 100644
--- a/deps/v8/src/profiler/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -4,9 +4,9 @@
#include "src/profiler/allocation-tracker.h"
-#include "src/frames-inl.h"
-#include "src/global-handles.h"
-#include "src/objects-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/handles/global-handles.h"
+#include "src/objects/objects-inl.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
namespace v8 {
@@ -78,9 +78,8 @@ AllocationTraceTree::AllocationTraceTree()
AllocationTraceNode* AllocationTraceTree::AddPathFromEnd(
const Vector<unsigned>& path) {
AllocationTraceNode* node = root();
- for (unsigned* entry = path.start() + path.length() - 1;
- entry != path.start() - 1;
- --entry) {
+ for (unsigned* entry = path.begin() + path.length() - 1;
+ entry != path.begin() - 1; --entry) {
node = node->FindOrAddChild(*entry);
}
return node;
@@ -137,7 +136,7 @@ void AddressToTraceMap::Clear() {
void AddressToTraceMap::Print() {
- PrintF("[AddressToTraceMap (%" PRIuS "): \n", ranges_.size());
+ PrintF("[AddressToTraceMap (%zu): \n", ranges_.size());
for (RangeMap::iterator it = ranges_.begin(); it != ranges_.end(); ++it) {
PrintF("[%p - %p] => %u\n", reinterpret_cast<void*>(it->second.start),
reinterpret_cast<void*>(it->first), it->second.trace_node_id);
@@ -213,9 +212,9 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
JavaScriptFrameIterator it(isolate);
while (!it.done() && length < kMaxAllocationTraceLength) {
JavaScriptFrame* frame = it.frame();
- SharedFunctionInfo shared = frame->function()->shared();
- SnapshotObjectId id = ids_->FindOrAddEntry(
- shared->address(), shared->Size(), false);
+ SharedFunctionInfo shared = frame->function().shared();
+ SnapshotObjectId id =
+ ids_->FindOrAddEntry(shared.address(), shared.Size(), false);
allocation_trace_buffer_[length++] = AddFunctionInfo(shared, id);
it.Advance();
}
@@ -243,19 +242,19 @@ unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo shared,
reinterpret_cast<void*>(id), SnapshotObjectIdHash(id));
if (entry->value == nullptr) {
FunctionInfo* info = new FunctionInfo();
- info->name = names_->GetName(shared->DebugName());
+ info->name = names_->GetName(shared.DebugName());
info->function_id = id;
- if (shared->script()->IsScript()) {
- Script script = Script::cast(shared->script());
- if (script->name()->IsName()) {
- Name name = Name::cast(script->name());
+ if (shared.script().IsScript()) {
+ Script script = Script::cast(shared.script());
+ if (script.name().IsName()) {
+ Name name = Name::cast(script.name());
info->script_name = names_->GetName(name);
}
- info->script_id = script->id();
+ info->script_id = script.id();
// Converting start offset into line and column may cause heap
// allocations so we postpone them until snapshot serialization.
unresolved_locations_.push_back(
- new UnresolvedLocation(script, shared->StartPosition(), info));
+ new UnresolvedLocation(script, shared.StartPosition(), info));
}
entry->value = reinterpret_cast<void*>(function_info_list_.size());
function_info_list_.push_back(info);
@@ -279,7 +278,7 @@ AllocationTracker::UnresolvedLocation::UnresolvedLocation(Script script,
int start,
FunctionInfo* info)
: start_position_(start), info_(info) {
- script_ = script->GetIsolate()->global_handles()->Create(script);
+ script_ = script.GetIsolate()->global_handles()->Create(script);
GlobalHandles::MakeWeak(script_.location(), this, &HandleWeakScript,
v8::WeakCallbackType::kParameter);
}
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index 8e2c1e5c20..e67994ca19 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -10,8 +10,8 @@
#include "include/v8-profiler.h"
#include "src/base/hashmap.h"
-#include "src/handles.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
@@ -87,7 +87,7 @@ class V8_EXPORT_PRIVATE AddressToTraceMap {
unsigned trace_node_id;
};
// [start, end) -> trace
- typedef std::map<Address, RangeStack> RangeMap;
+ using RangeMap = std::map<Address, RangeStack>;
void RemoveRange(Address start, Address end);
diff --git a/deps/v8/src/profiler/circular-queue.h b/deps/v8/src/profiler/circular-queue.h
index fcbb898571..e9278fbccc 100644
--- a/deps/v8/src/profiler/circular-queue.h
+++ b/deps/v8/src/profiler/circular-queue.h
@@ -6,7 +6,7 @@
#define V8_PROFILER_CIRCULAR_QUEUE_H_
#include "src/base/atomicops.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 43bf90b987..495840fabf 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -10,11 +10,12 @@
#include "src/base/lazy-instance.h"
#include "src/base/template-utils.h"
#include "src/debug/debug.h"
-#include "src/frames-inl.h"
-#include "src/locked-queue-inl.h"
-#include "src/log.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/profiler/cpu-profiler-inl.h"
-#include "src/vm-state-inl.h"
+#include "src/utils/locked-queue-inl.h"
#include "src/wasm/wasm-engine.h"
namespace v8 {
@@ -32,7 +33,9 @@ class CpuSampler : public sampler::Sampler {
TickSample* sample = processor_->StartTickSample();
if (sample == nullptr) return;
Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
- sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame, true);
+ sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame,
+ /* update_stats */ true,
+ /* use_simulator_reg_state */ true, processor_->period());
if (is_counting_samples_ && !sample->timestamp.IsNull()) {
if (sample->state == JS) ++js_sample_count_;
if (sample->state == EXTERNAL) ++external_sample_count_;
@@ -51,7 +54,8 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(Isolate* isolate,
running_(1),
last_code_event_id_(0),
last_processed_code_event_id_(0),
- isolate_(isolate) {}
+ isolate_(isolate),
+ profiling_scope_(isolate) {}
SamplingEventsProcessor::SamplingEventsProcessor(Isolate* isolate,
ProfileGenerator* generator,
@@ -242,6 +246,16 @@ void SamplingEventsProcessor::Run() {
} while (ProcessCodeEvent());
}
+void SamplingEventsProcessor::SetSamplingInterval(base::TimeDelta period) {
+ if (period_ == period) return;
+ StopSynchronously();
+
+ period_ = period;
+ base::Relaxed_Store(&running_, 1);
+
+ StartSynchronously();
+}
+
void* SamplingEventsProcessor::operator new(size_t size) {
return AlignedAlloc(size, alignof(SamplingEventsProcessor));
}
@@ -310,15 +324,17 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager)
} // namespace
-CpuProfiler::CpuProfiler(Isolate* isolate)
- : CpuProfiler(isolate, new CpuProfilesCollection(isolate), nullptr,
- nullptr) {}
+CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode)
+ : CpuProfiler(isolate, naming_mode, new CpuProfilesCollection(isolate),
+ nullptr, nullptr) {}
-CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilesCollection* test_profiles,
+CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
+ CpuProfilesCollection* test_profiles,
ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor)
: isolate_(isolate),
- sampling_interval_(base::TimeDelta::FromMicroseconds(
+ naming_mode_(naming_mode),
+ base_sampling_interval_(base::TimeDelta::FromMicroseconds(
FLAG_cpu_profiler_sampling_interval)),
profiles_(test_profiles),
generator_(test_generator),
@@ -335,7 +351,7 @@ CpuProfiler::~CpuProfiler() {
void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
DCHECK(!is_profiling_);
- sampling_interval_ = value;
+ base_sampling_interval_ = value;
}
void CpuProfiler::set_use_precise_sampling(bool value) {
@@ -362,6 +378,17 @@ void CpuProfiler::CreateEntriesForRuntimeCallStats() {
}
}
+base::TimeDelta CpuProfiler::ComputeSamplingInterval() const {
+ return profiles_->GetCommonSamplingInterval();
+}
+
+void CpuProfiler::AdjustSamplingInterval() {
+ if (!processor_) return;
+
+ base::TimeDelta base_interval = ComputeSamplingInterval();
+ processor_->SetSamplingInterval(base_interval);
+}
+
// static
void CpuProfiler::CollectSample(Isolate* isolate) {
GetProfilersManager()->CallCollectSample(isolate);
@@ -373,18 +400,17 @@ void CpuProfiler::CollectSample() {
}
}
-void CpuProfiler::StartProfiling(const char* title, bool record_samples,
- ProfilingMode mode) {
- if (profiles_->StartProfiling(title, record_samples, mode)) {
+void CpuProfiler::StartProfiling(const char* title,
+ CpuProfilingOptions options) {
+ if (profiles_->StartProfiling(title, options)) {
TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
+ AdjustSamplingInterval();
StartProcessorIfNotStarted();
}
}
-void CpuProfiler::StartProfiling(String title, bool record_samples,
- ProfilingMode mode) {
- StartProfiling(profiles_->GetName(title), record_samples, mode);
- isolate_->debug()->feature_tracker()->Track(DebugFeatureTracker::kProfiler);
+void CpuProfiler::StartProfiling(String title, CpuProfilingOptions options) {
+ StartProfiling(profiles_->GetName(title), options);
}
void CpuProfiler::StartProcessorIfNotStarted() {
@@ -394,9 +420,6 @@ void CpuProfiler::StartProcessorIfNotStarted() {
}
isolate_->wasm_engine()->EnableCodeLogging(isolate_);
Logger* logger = isolate_->logger();
- // Disable logging when using the new implementation.
- saved_is_logging_ = logger->is_logging();
- logger->set_is_logging(false);
bool codemap_needs_initialization = false;
if (!generator_) {
@@ -404,16 +427,17 @@ void CpuProfiler::StartProcessorIfNotStarted() {
codemap_needs_initialization = true;
CreateEntriesForRuntimeCallStats();
}
+ base::TimeDelta sampling_interval = ComputeSamplingInterval();
processor_.reset(new SamplingEventsProcessor(
- isolate_, generator_.get(), sampling_interval_, use_precise_sampling_));
+ isolate_, generator_.get(), sampling_interval, use_precise_sampling_));
if (profiler_listener_) {
profiler_listener_->set_observer(processor_.get());
} else {
- profiler_listener_.reset(new ProfilerListener(isolate_, processor_.get()));
+ profiler_listener_.reset(
+ new ProfilerListener(isolate_, processor_.get(), naming_mode_));
}
logger->AddCodeEventListener(profiler_listener_.get());
is_profiling_ = true;
- isolate_->set_is_profiling(true);
// Enumerate stuff we already have in the heap.
DCHECK(isolate_->heap()->HasBeenSetUp());
if (codemap_needs_initialization) {
@@ -432,7 +456,9 @@ void CpuProfiler::StartProcessorIfNotStarted() {
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
if (!is_profiling_) return nullptr;
StopProcessorIfLastProfile(title);
- return profiles_->StopProfiling(title);
+ CpuProfile* result = profiles_->StopProfiling(title);
+ AdjustSamplingInterval();
+ return result;
}
CpuProfile* CpuProfiler::StopProfiling(String title) {
@@ -447,11 +473,9 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
is_profiling_ = false;
- isolate_->set_is_profiling(false);
logger->RemoveCodeEventListener(profiler_listener_.get());
processor_->StopSynchronously();
processor_.reset();
- logger->set_is_logging(saved_is_logging_);
}
@@ -462,7 +486,7 @@ void CpuProfiler::LogBuiltins() {
CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
Builtins::Name id = static_cast<Builtins::Name>(i);
- rec->instruction_start = builtins->builtin(id)->InstructionStart();
+ rec->instruction_start = builtins->builtin(id).InstructionStart();
rec->builtin_id = id;
processor_->Enqueue(evt_rec);
}
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index cecb0ac28b..96d53356e6 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -7,18 +7,18 @@
#include <memory>
-#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "src/libsampler/sampler.h"
-#include "src/locked-queue.h"
#include "src/profiler/circular-queue.h"
#include "src/profiler/profiler-listener.h"
#include "src/profiler/tick-sample.h"
+#include "src/utils/allocation.h"
+#include "src/utils/locked-queue.h"
namespace v8 {
namespace internal {
@@ -129,6 +129,27 @@ class CodeEventsContainer {
};
};
+// Maintains the number of active CPU profilers in an isolate.
+class ProfilingScope {
+ public:
+ explicit ProfilingScope(Isolate* isolate) : isolate_(isolate) {
+ size_t profiler_count = isolate_->num_cpu_profilers();
+ profiler_count++;
+ isolate_->set_num_cpu_profilers(profiler_count);
+ isolate_->set_is_profiling(true);
+ }
+
+ ~ProfilingScope() {
+ size_t profiler_count = isolate_->num_cpu_profilers();
+ DCHECK_GT(profiler_count, 0);
+ profiler_count--;
+ isolate_->set_num_cpu_profilers(profiler_count);
+ if (profiler_count == 0) isolate_->set_is_profiling(false);
+ }
+
+ private:
+ Isolate* const isolate_;
+};
// This class implements both the profile events processor thread and
// methods called by event producers: VM and stack sampler threads.
@@ -151,6 +172,8 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
// Add a sample into the tick sample events buffer. Used for testing.
void AddSample(TickSample sample);
+ virtual void SetSamplingInterval(base::TimeDelta) {}
+
protected:
ProfilerEventsProcessor(Isolate* isolate, ProfileGenerator* generator);
@@ -173,6 +196,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
std::atomic<unsigned> last_code_event_id_;
unsigned last_processed_code_event_id_;
Isolate* isolate_;
+ ProfilingScope profiling_scope_;
};
class V8_EXPORT_PRIVATE SamplingEventsProcessor
@@ -189,6 +213,8 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
void Run() override;
+ void SetSamplingInterval(base::TimeDelta period) override;
+
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
// stack frame entries are filled.) This method returns a pointer to the
@@ -199,6 +225,7 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
inline void FinishTickSample();
sampler::Sampler* sampler() { return sampler_.get(); }
+ base::TimeDelta period() const { return period_; }
private:
SampleProcessingResult ProcessOneSample() override;
@@ -209,31 +236,32 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
std::unique_ptr<sampler::Sampler> sampler_;
- const base::TimeDelta period_; // Samples & code events processing period.
+ base::TimeDelta period_; // Samples & code events processing period.
const bool use_precise_sampling_; // Whether or not busy-waiting is used for
// low sampling intervals on Windows.
};
class V8_EXPORT_PRIVATE CpuProfiler {
public:
- explicit CpuProfiler(Isolate* isolate);
+ explicit CpuProfiler(Isolate* isolate, CpuProfilingNamingMode = kDebugNaming);
- CpuProfiler(Isolate* isolate, CpuProfilesCollection* profiles,
- ProfileGenerator* test_generator,
+ CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
+ CpuProfilesCollection* profiles, ProfileGenerator* test_generator,
ProfilerEventsProcessor* test_processor);
~CpuProfiler();
static void CollectSample(Isolate* isolate);
- typedef v8::CpuProfilingMode ProfilingMode;
+ using ProfilingMode = v8::CpuProfilingMode;
+ using NamingMode = v8::CpuProfilingNamingMode;
+ base::TimeDelta sampling_interval() const { return base_sampling_interval_; }
void set_sampling_interval(base::TimeDelta value);
void set_use_precise_sampling(bool);
void CollectSample();
- void StartProfiling(const char* title, bool record_samples = false,
- ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
- void StartProfiling(String title, bool record_samples, ProfilingMode mode);
+ void StartProfiling(const char* title, CpuProfilingOptions options = {});
+ void StartProfiling(String title, CpuProfilingOptions options = {});
CpuProfile* StopProfiling(const char* title);
CpuProfile* StopProfiling(String title);
int GetProfilesCount();
@@ -259,14 +287,22 @@ class V8_EXPORT_PRIVATE CpuProfiler {
void LogBuiltins();
void CreateEntriesForRuntimeCallStats();
+ // Computes a sampling interval sufficient to accomodate attached profiles.
+ base::TimeDelta ComputeSamplingInterval() const;
+ // Dynamically updates the sampler to use a sampling interval sufficient for
+ // child profiles.
+ void AdjustSamplingInterval();
+
Isolate* const isolate_;
- base::TimeDelta sampling_interval_;
+ const NamingMode naming_mode_;
bool use_precise_sampling_ = true;
+ // Sampling interval to which per-profile sampling intervals will be clamped
+ // to a multiple of, or used as the default if unspecified.
+ base::TimeDelta base_sampling_interval_;
std::unique_ptr<CpuProfilesCollection> profiles_;
std::unique_ptr<ProfileGenerator> generator_;
std::unique_ptr<ProfilerEventsProcessor> processor_;
std::unique_ptr<ProfilerListener> profiler_listener_;
- bool saved_is_logging_;
bool is_profiling_;
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 8018280ff1..a912c2e1b2 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -4,8 +4,9 @@
#include "src/profiler/heap-profiler.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/debug/debug.h"
+#include "src/heap/combined-heap.h"
#include "src/heap/heap-inl.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
@@ -147,7 +148,7 @@ HeapSnapshot* HeapProfiler::GetSnapshot(int index) {
SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
if (!obj->IsHeapObject())
return v8::HeapProfiler::kUnknownObjectId;
- return ids_->FindEntry(HeapObject::cast(*obj)->address());
+ return ids_->FindEntry(HeapObject::cast(*obj).address());
}
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
@@ -172,16 +173,17 @@ void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
Handle<HeapObject> HeapProfiler::FindHeapObjectById(SnapshotObjectId id) {
HeapObject object;
- HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
+ CombinedHeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
// Make sure that object with the given id is still reachable.
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
- if (ids_->FindEntry(obj->address()) == id) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ if (ids_->FindEntry(obj.address()) == id) {
DCHECK(object.is_null());
object = obj;
// Can't break -- kFilterUnreachable requires full heap traversal.
}
}
+
return !object.is_null() ? Handle<HeapObject>(object, isolate())
: Handle<HeapObject>();
}
@@ -203,10 +205,10 @@ void HeapProfiler::QueryObjects(Handle<Context> context,
// We should return accurate information about live objects, so we need to
// collect all garbage first.
heap()->CollectAllAvailableGarbage(GarbageCollectionReason::kHeapProfiler);
- HeapIterator heap_iterator(heap());
- for (HeapObject heap_obj = heap_iterator.next(); !heap_obj.is_null();
- heap_obj = heap_iterator.next()) {
- if (!heap_obj->IsJSObject() || heap_obj->IsExternal(isolate())) continue;
+ CombinedHeapIterator heap_iterator(heap());
+ for (HeapObject heap_obj = heap_iterator.Next(); !heap_obj.is_null();
+ heap_obj = heap_iterator.Next()) {
+ if (!heap_obj.IsJSObject() || heap_obj.IsExternal(isolate())) continue;
v8::Local<v8::Object> v8_obj(
Utils::ToLocal(handle(JSObject::cast(heap_obj), isolate())));
if (!predicate->Filter(v8_obj)) continue;
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index b946f62758..940574282e 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -10,8 +10,8 @@
#include "include/v8-profiler.h"
#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/debug/debug-interface.h"
-#include "src/globals.h"
#include "src/heap/heap.h"
namespace v8 {
diff --git a/deps/v8/src/profiler/heap-snapshot-generator-inl.h b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
index 6ddb6d4658..c1fdd65e5a 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
@@ -8,7 +8,7 @@
#include "src/profiler/heap-snapshot-generator.h"
#include "src/profiler/heap-profiler.h"
-#include "src/string-hasher-inl.h"
+#include "src/strings/string-hasher-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index bf5eeaf6d0..bc171360b5 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -6,14 +6,12 @@
#include <utility>
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
-#include "src/conversions.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/debug/debug.h"
-#include "src/global-handles.h"
-#include "src/layout-descriptor.h"
-#include "src/objects-body-descriptors.h"
-#include "src/objects-inl.h"
+#include "src/handles/global-handles.h"
+#include "src/heap/combined-heap.h"
+#include "src/numbers/conversions.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell-inl.h"
@@ -25,16 +23,19 @@
#include "src/objects/js-generator-inl.h"
#include "src/objects/js-promise-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/layout-descriptor.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/objects-body-descriptors.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/prototype.h"
#include "src/objects/slots-inl.h"
#include "src/objects/struct-inl.h"
+#include "src/objects/transitions-inl.h"
+#include "src/objects/visitors.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
-#include "src/prototype.h"
-#include "src/transitions-inl.h"
-#include "src/vector.h"
-#include "src/visitors.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -103,7 +104,7 @@ void HeapEntry::SetNamedAutoIndexReference(HeapGraphEdge::Type type,
void HeapEntry::Print(
const char* prefix, const char* edge_name, int max_depth, int indent) {
STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
- base::OS::Print("%6" PRIuS " @%6u %*c %s%s: ", self_size(), id(), indent, ' ',
+ base::OS::Print("%6zu @%6u %*c %s%s: ", self_size(), id(), indent, ' ',
prefix, edge_name);
if (type() != kString) {
base::OS::Print("%s %.40s\n", TypeAsString(), name_);
@@ -124,7 +125,7 @@ void HeapEntry::Print(
HeapGraphEdge& edge = **i;
const char* edge_prefix = "";
EmbeddedVector<char, 64> index;
- const char* edge_name = index.start();
+ const char* edge_name = index.begin();
switch (edge.type()) {
case HeapGraphEdge::kContextVariable:
edge_prefix = "#";
@@ -394,14 +395,14 @@ void HeapObjectsMap::UpdateHeapObjectsMap() {
}
heap_->PreciseCollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kHeapProfiler);
- HeapIterator iterator(heap_);
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next()) {
- FindOrAddEntry(obj->address(), obj->Size());
+ CombinedHeapIterator iterator(heap_);
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ FindOrAddEntry(obj.address(), obj.Size());
if (FLAG_heap_profiler_trace_objects) {
PrintF("Update object : %p %6d. Next address is %p\n",
- reinterpret_cast<void*>(obj->address()), obj->Size(),
- reinterpret_cast<void*>(obj->address() + obj->Size()));
+ reinterpret_cast<void*>(obj.address()), obj.Size(),
+ reinterpret_cast<void*>(obj.address() + obj.Size()));
}
}
RemoveDeadEntries();
@@ -507,15 +508,15 @@ HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
}
void V8HeapExplorer::ExtractLocation(HeapEntry* entry, HeapObject object) {
- if (object->IsJSFunction()) {
+ if (object.IsJSFunction()) {
JSFunction func = JSFunction::cast(object);
ExtractLocationForJSFunction(entry, func);
- } else if (object->IsJSGeneratorObject()) {
+ } else if (object.IsJSGeneratorObject()) {
JSGeneratorObject gen = JSGeneratorObject::cast(object);
- ExtractLocationForJSFunction(entry, gen->function());
+ ExtractLocationForJSFunction(entry, gen.function());
- } else if (object->IsJSObject()) {
+ } else if (object.IsJSObject()) {
JSObject obj = JSObject::cast(object);
JSFunction maybe_constructor = GetConstructor(obj);
@@ -527,73 +528,70 @@ void V8HeapExplorer::ExtractLocation(HeapEntry* entry, HeapObject object) {
void V8HeapExplorer::ExtractLocationForJSFunction(HeapEntry* entry,
JSFunction func) {
- if (!func->shared()->script()->IsScript()) return;
- Script script = Script::cast(func->shared()->script());
- int scriptId = script->id();
- int start = func->shared()->StartPosition();
- int line = script->GetLineNumber(start);
- int col = script->GetColumnNumber(start);
+ if (!func.shared().script().IsScript()) return;
+ Script script = Script::cast(func.shared().script());
+ int scriptId = script.id();
+ int start = func.shared().StartPosition();
+ int line = script.GetLineNumber(start);
+ int col = script.GetColumnNumber(start);
snapshot_->AddLocation(entry, scriptId, line, col);
}
HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
- if (object->IsJSFunction()) {
+ if (object.IsJSFunction()) {
JSFunction func = JSFunction::cast(object);
- SharedFunctionInfo shared = func->shared();
- const char* name = names_->GetName(shared->Name());
+ SharedFunctionInfo shared = func.shared();
+ const char* name = names_->GetName(shared.Name());
return AddEntry(object, HeapEntry::kClosure, name);
- } else if (object->IsJSBoundFunction()) {
+ } else if (object.IsJSBoundFunction()) {
return AddEntry(object, HeapEntry::kClosure, "native_bind");
- } else if (object->IsJSRegExp()) {
+ } else if (object.IsJSRegExp()) {
JSRegExp re = JSRegExp::cast(object);
- return AddEntry(object,
- HeapEntry::kRegExp,
- names_->GetName(re->Pattern()));
- } else if (object->IsJSObject()) {
+ return AddEntry(object, HeapEntry::kRegExp, names_->GetName(re.Pattern()));
+ } else if (object.IsJSObject()) {
const char* name = names_->GetName(
GetConstructorName(JSObject::cast(object)));
- if (object->IsJSGlobalObject()) {
+ if (object.IsJSGlobalObject()) {
auto it = objects_tags_.find(JSGlobalObject::cast(object));
if (it != objects_tags_.end()) {
name = names_->GetFormatted("%s / %s", name, it->second);
}
}
return AddEntry(object, HeapEntry::kObject, name);
- } else if (object->IsString()) {
+ } else if (object.IsString()) {
String string = String::cast(object);
- if (string->IsConsString()) {
+ if (string.IsConsString()) {
return AddEntry(object, HeapEntry::kConsString, "(concatenated string)");
- } else if (string->IsSlicedString()) {
+ } else if (string.IsSlicedString()) {
return AddEntry(object, HeapEntry::kSlicedString, "(sliced string)");
} else {
return AddEntry(object, HeapEntry::kString,
names_->GetName(String::cast(object)));
}
- } else if (object->IsSymbol()) {
- if (Symbol::cast(object)->is_private())
+ } else if (object.IsSymbol()) {
+ if (Symbol::cast(object).is_private())
return AddEntry(object, HeapEntry::kHidden, "private symbol");
else
return AddEntry(object, HeapEntry::kSymbol, "symbol");
- } else if (object->IsBigInt()) {
+ } else if (object.IsBigInt()) {
return AddEntry(object, HeapEntry::kBigInt, "bigint");
- } else if (object->IsCode()) {
+ } else if (object.IsCode()) {
return AddEntry(object, HeapEntry::kCode, "");
- } else if (object->IsSharedFunctionInfo()) {
- String name = SharedFunctionInfo::cast(object)->Name();
+ } else if (object.IsSharedFunctionInfo()) {
+ String name = SharedFunctionInfo::cast(object).Name();
return AddEntry(object, HeapEntry::kCode, names_->GetName(name));
- } else if (object->IsScript()) {
- Object name = Script::cast(object)->name();
- return AddEntry(
- object, HeapEntry::kCode,
- name->IsString() ? names_->GetName(String::cast(name)) : "");
- } else if (object->IsNativeContext()) {
+ } else if (object.IsScript()) {
+ Object name = Script::cast(object).name();
+ return AddEntry(object, HeapEntry::kCode,
+ name.IsString() ? names_->GetName(String::cast(name)) : "");
+ } else if (object.IsNativeContext()) {
return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
- } else if (object->IsContext()) {
+ } else if (object.IsContext()) {
return AddEntry(object, HeapEntry::kObject, "system / Context");
- } else if (object->IsFixedArray() || object->IsFixedDoubleArray() ||
- object->IsByteArray()) {
+ } else if (object.IsFixedArray() || object.IsFixedDoubleArray() ||
+ object.IsByteArray()) {
return AddEntry(object, HeapEntry::kArray, "");
- } else if (object->IsHeapNumber()) {
+ } else if (object.IsHeapNumber()) {
return AddEntry(object, HeapEntry::kHeapNumber, "number");
}
return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
@@ -601,7 +599,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
HeapEntry* V8HeapExplorer::AddEntry(HeapObject object, HeapEntry::Type type,
const char* name) {
- return AddEntry(object->address(), type, name, object->Size());
+ return AddEntry(object.address(), type, name, object.Size());
}
HeapEntry* V8HeapExplorer::AddEntry(Address address,
@@ -620,9 +618,9 @@ HeapEntry* V8HeapExplorer::AddEntry(Address address,
}
const char* V8HeapExplorer::GetSystemEntryName(HeapObject object) {
- switch (object->map()->instance_type()) {
+ switch (object.map().instance_type()) {
case MAP_TYPE:
- switch (Map::cast(object)->instance_type()) {
+ switch (Map::cast(object).instance_type()) {
#define MAKE_STRING_MAP_CASE(instance_type, size, name, Name) \
case instance_type: return "system / Map (" #Name ")";
STRING_TYPE_LIST(MAKE_STRING_MAP_CASE)
@@ -645,9 +643,9 @@ const char* V8HeapExplorer::GetSystemEntryName(HeapObject object) {
}
int V8HeapExplorer::EstimateObjectsCount() {
- HeapIterator it(heap_, HeapIterator::kFilterUnreachable);
+ CombinedHeapIterator it(heap_, HeapIterator::kFilterUnreachable);
int objects_count = 0;
- while (!it.next().is_null()) ++objects_count;
+ while (!it.Next().is_null()) ++objects_count;
return objects_count;
}
@@ -658,7 +656,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
: generator_(generator),
parent_obj_(parent_obj),
parent_start_(parent_obj_.RawMaybeWeakField(0)),
- parent_end_(parent_obj_.RawMaybeWeakField(parent_obj_->Size())),
+ parent_end_(parent_obj_.RawMaybeWeakField(parent_obj_.Size())),
parent_(parent),
next_index_(0) {}
void VisitPointers(HeapObject host, ObjectSlot start,
@@ -712,74 +710,74 @@ class IndexedReferencesExtractor : public ObjectVisitor {
};
void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject obj) {
- if (obj->IsJSGlobalProxy()) {
+ if (obj.IsJSGlobalProxy()) {
ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
- } else if (obj->IsJSArrayBuffer()) {
+ } else if (obj.IsJSArrayBuffer()) {
ExtractJSArrayBufferReferences(entry, JSArrayBuffer::cast(obj));
- } else if (obj->IsJSObject()) {
- if (obj->IsJSWeakSet()) {
+ } else if (obj.IsJSObject()) {
+ if (obj.IsJSWeakSet()) {
ExtractJSWeakCollectionReferences(entry, JSWeakSet::cast(obj));
- } else if (obj->IsJSWeakMap()) {
+ } else if (obj.IsJSWeakMap()) {
ExtractJSWeakCollectionReferences(entry, JSWeakMap::cast(obj));
- } else if (obj->IsJSSet()) {
+ } else if (obj.IsJSSet()) {
ExtractJSCollectionReferences(entry, JSSet::cast(obj));
- } else if (obj->IsJSMap()) {
+ } else if (obj.IsJSMap()) {
ExtractJSCollectionReferences(entry, JSMap::cast(obj));
- } else if (obj->IsJSPromise()) {
+ } else if (obj.IsJSPromise()) {
ExtractJSPromiseReferences(entry, JSPromise::cast(obj));
- } else if (obj->IsJSGeneratorObject()) {
+ } else if (obj.IsJSGeneratorObject()) {
ExtractJSGeneratorObjectReferences(entry, JSGeneratorObject::cast(obj));
}
ExtractJSObjectReferences(entry, JSObject::cast(obj));
- } else if (obj->IsString()) {
+ } else if (obj.IsString()) {
ExtractStringReferences(entry, String::cast(obj));
- } else if (obj->IsSymbol()) {
+ } else if (obj.IsSymbol()) {
ExtractSymbolReferences(entry, Symbol::cast(obj));
- } else if (obj->IsMap()) {
+ } else if (obj.IsMap()) {
ExtractMapReferences(entry, Map::cast(obj));
- } else if (obj->IsSharedFunctionInfo()) {
+ } else if (obj.IsSharedFunctionInfo()) {
ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
- } else if (obj->IsScript()) {
+ } else if (obj.IsScript()) {
ExtractScriptReferences(entry, Script::cast(obj));
- } else if (obj->IsAccessorInfo()) {
+ } else if (obj.IsAccessorInfo()) {
ExtractAccessorInfoReferences(entry, AccessorInfo::cast(obj));
- } else if (obj->IsAccessorPair()) {
+ } else if (obj.IsAccessorPair()) {
ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
- } else if (obj->IsCode()) {
+ } else if (obj.IsCode()) {
ExtractCodeReferences(entry, Code::cast(obj));
- } else if (obj->IsCell()) {
+ } else if (obj.IsCell()) {
ExtractCellReferences(entry, Cell::cast(obj));
- } else if (obj->IsFeedbackCell()) {
+ } else if (obj.IsFeedbackCell()) {
ExtractFeedbackCellReferences(entry, FeedbackCell::cast(obj));
- } else if (obj->IsPropertyCell()) {
+ } else if (obj.IsPropertyCell()) {
ExtractPropertyCellReferences(entry, PropertyCell::cast(obj));
- } else if (obj->IsAllocationSite()) {
+ } else if (obj.IsAllocationSite()) {
ExtractAllocationSiteReferences(entry, AllocationSite::cast(obj));
- } else if (obj->IsArrayBoilerplateDescription()) {
+ } else if (obj.IsArrayBoilerplateDescription()) {
ExtractArrayBoilerplateDescriptionReferences(
entry, ArrayBoilerplateDescription::cast(obj));
- } else if (obj->IsFeedbackVector()) {
+ } else if (obj.IsFeedbackVector()) {
ExtractFeedbackVectorReferences(entry, FeedbackVector::cast(obj));
- } else if (obj->IsDescriptorArray()) {
+ } else if (obj.IsDescriptorArray()) {
ExtractDescriptorArrayReferences(entry, DescriptorArray::cast(obj));
- } else if (obj->IsWeakFixedArray()) {
+ } else if (obj.IsWeakFixedArray()) {
ExtractWeakArrayReferences(WeakFixedArray::kHeaderSize, entry,
WeakFixedArray::cast(obj));
- } else if (obj->IsWeakArrayList()) {
+ } else if (obj.IsWeakArrayList()) {
ExtractWeakArrayReferences(WeakArrayList::kHeaderSize, entry,
WeakArrayList::cast(obj));
- } else if (obj->IsContext()) {
+ } else if (obj.IsContext()) {
ExtractContextReferences(entry, Context::cast(obj));
- } else if (obj->IsEphemeronHashTable()) {
+ } else if (obj.IsEphemeronHashTable()) {
ExtractEphemeronHashTableReferences(entry, EphemeronHashTable::cast(obj));
- } else if (obj->IsFixedArray()) {
+ } else if (obj.IsFixedArray()) {
ExtractFixedArrayReferences(entry, FixedArray::cast(obj));
}
}
void V8HeapExplorer::ExtractJSGlobalProxyReferences(HeapEntry* entry,
JSGlobalProxy proxy) {
- SetInternalReference(entry, "native_context", proxy->native_context(),
+ SetInternalReference(entry, "native_context", proxy.native_context(),
JSGlobalProxy::kNativeContextOffset);
}
@@ -793,118 +791,116 @@ void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
PrototypeIterator iter(isolate, js_obj);
ReadOnlyRoots roots(isolate);
SetPropertyReference(entry, roots.proto_string(), iter.GetCurrent());
- if (obj->IsJSBoundFunction()) {
+ if (obj.IsJSBoundFunction()) {
JSBoundFunction js_fun = JSBoundFunction::cast(obj);
- TagObject(js_fun->bound_arguments(), "(bound arguments)");
- SetInternalReference(entry, "bindings", js_fun->bound_arguments(),
+ TagObject(js_fun.bound_arguments(), "(bound arguments)");
+ SetInternalReference(entry, "bindings", js_fun.bound_arguments(),
JSBoundFunction::kBoundArgumentsOffset);
- SetInternalReference(entry, "bound_this", js_fun->bound_this(),
+ SetInternalReference(entry, "bound_this", js_fun.bound_this(),
JSBoundFunction::kBoundThisOffset);
SetInternalReference(entry, "bound_function",
- js_fun->bound_target_function(),
+ js_fun.bound_target_function(),
JSBoundFunction::kBoundTargetFunctionOffset);
- FixedArray bindings = js_fun->bound_arguments();
- for (int i = 0; i < bindings->length(); i++) {
+ FixedArray bindings = js_fun.bound_arguments();
+ for (int i = 0; i < bindings.length(); i++) {
const char* reference_name = names_->GetFormatted("bound_argument_%d", i);
- SetNativeBindReference(entry, reference_name, bindings->get(i));
+ SetNativeBindReference(entry, reference_name, bindings.get(i));
}
- } else if (obj->IsJSFunction()) {
+ } else if (obj.IsJSFunction()) {
JSFunction js_fun = JSFunction::cast(js_obj);
- if (js_fun->has_prototype_slot()) {
- Object proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole(isolate)) {
- if (!proto_or_map->IsMap()) {
+ if (js_fun.has_prototype_slot()) {
+ Object proto_or_map = js_fun.prototype_or_initial_map();
+ if (!proto_or_map.IsTheHole(isolate)) {
+ if (!proto_or_map.IsMap()) {
SetPropertyReference(entry, roots.prototype_string(), proto_or_map,
nullptr,
JSFunction::kPrototypeOrInitialMapOffset);
} else {
SetPropertyReference(entry, roots.prototype_string(),
- js_fun->prototype());
+ js_fun.prototype());
SetInternalReference(entry, "initial_map", proto_or_map,
JSFunction::kPrototypeOrInitialMapOffset);
}
}
}
- SharedFunctionInfo shared_info = js_fun->shared();
- TagObject(js_fun->raw_feedback_cell(), "(function feedback cell)");
- SetInternalReference(entry, "feedback_cell", js_fun->raw_feedback_cell(),
+ SharedFunctionInfo shared_info = js_fun.shared();
+ TagObject(js_fun.raw_feedback_cell(), "(function feedback cell)");
+ SetInternalReference(entry, "feedback_cell", js_fun.raw_feedback_cell(),
JSFunction::kFeedbackCellOffset);
TagObject(shared_info, "(shared function info)");
SetInternalReference(entry, "shared", shared_info,
JSFunction::kSharedFunctionInfoOffset);
- TagObject(js_fun->context(), "(context)");
- SetInternalReference(entry, "context", js_fun->context(),
+ TagObject(js_fun.context(), "(context)");
+ SetInternalReference(entry, "context", js_fun.context(),
JSFunction::kContextOffset);
- SetInternalReference(entry, "code", js_fun->code(),
- JSFunction::kCodeOffset);
- } else if (obj->IsJSGlobalObject()) {
+ SetInternalReference(entry, "code", js_fun.code(), JSFunction::kCodeOffset);
+ } else if (obj.IsJSGlobalObject()) {
JSGlobalObject global_obj = JSGlobalObject::cast(obj);
- SetInternalReference(entry, "native_context", global_obj->native_context(),
+ SetInternalReference(entry, "native_context", global_obj.native_context(),
JSGlobalObject::kNativeContextOffset);
- SetInternalReference(entry, "global_proxy", global_obj->global_proxy(),
+ SetInternalReference(entry, "global_proxy", global_obj.global_proxy(),
JSGlobalObject::kGlobalProxyOffset);
STATIC_ASSERT(JSGlobalObject::kSize - JSObject::kHeaderSize ==
2 * kTaggedSize);
- } else if (obj->IsJSArrayBufferView()) {
+ } else if (obj.IsJSArrayBufferView()) {
JSArrayBufferView view = JSArrayBufferView::cast(obj);
- SetInternalReference(entry, "buffer", view->buffer(),
+ SetInternalReference(entry, "buffer", view.buffer(),
JSArrayBufferView::kBufferOffset);
}
- TagObject(js_obj->raw_properties_or_hash(), "(object properties)");
- SetInternalReference(entry, "properties", js_obj->raw_properties_or_hash(),
+ TagObject(js_obj.raw_properties_or_hash(), "(object properties)");
+ SetInternalReference(entry, "properties", js_obj.raw_properties_or_hash(),
JSObject::kPropertiesOrHashOffset);
- TagObject(js_obj->elements(), "(object elements)");
- SetInternalReference(entry, "elements", js_obj->elements(),
+ TagObject(js_obj.elements(), "(object elements)");
+ SetInternalReference(entry, "elements", js_obj.elements(),
JSObject::kElementsOffset);
}
void V8HeapExplorer::ExtractStringReferences(HeapEntry* entry, String string) {
- if (string->IsConsString()) {
+ if (string.IsConsString()) {
ConsString cs = ConsString::cast(string);
- SetInternalReference(entry, "first", cs->first(), ConsString::kFirstOffset);
- SetInternalReference(entry, "second", cs->second(),
+ SetInternalReference(entry, "first", cs.first(), ConsString::kFirstOffset);
+ SetInternalReference(entry, "second", cs.second(),
ConsString::kSecondOffset);
- } else if (string->IsSlicedString()) {
+ } else if (string.IsSlicedString()) {
SlicedString ss = SlicedString::cast(string);
- SetInternalReference(entry, "parent", ss->parent(),
+ SetInternalReference(entry, "parent", ss.parent(),
SlicedString::kParentOffset);
- } else if (string->IsThinString()) {
+ } else if (string.IsThinString()) {
ThinString ts = ThinString::cast(string);
- SetInternalReference(entry, "actual", ts->actual(),
+ SetInternalReference(entry, "actual", ts.actual(),
ThinString::kActualOffset);
}
}
void V8HeapExplorer::ExtractSymbolReferences(HeapEntry* entry, Symbol symbol) {
- SetInternalReference(entry, "name", symbol->name(), Symbol::kNameOffset);
+ SetInternalReference(entry, "name", symbol.name(), Symbol::kNameOffset);
}
void V8HeapExplorer::ExtractJSCollectionReferences(HeapEntry* entry,
JSCollection collection) {
- SetInternalReference(entry, "table", collection->table(),
+ SetInternalReference(entry, "table", collection.table(),
JSCollection::kTableOffset);
}
void V8HeapExplorer::ExtractJSWeakCollectionReferences(HeapEntry* entry,
JSWeakCollection obj) {
- SetInternalReference(entry, "table", obj->table(),
+ SetInternalReference(entry, "table", obj.table(),
JSWeakCollection::kTableOffset);
}
void V8HeapExplorer::ExtractEphemeronHashTableReferences(
HeapEntry* entry, EphemeronHashTable table) {
- for (int i = 0, capacity = table->Capacity(); i < capacity; ++i) {
+ for (int i = 0, capacity = table.Capacity(); i < capacity; ++i) {
int key_index = EphemeronHashTable::EntryToIndex(i) +
EphemeronHashTable::kEntryKeyIndex;
int value_index = EphemeronHashTable::EntryToValueIndex(i);
- Object key = table->get(key_index);
- Object value = table->get(value_index);
- SetWeakReference(entry, key_index, key,
- table->OffsetOfElementAt(key_index));
+ Object key = table.get(key_index);
+ Object value = table.get(value_index);
+ SetWeakReference(entry, key_index, key, table.OffsetOfElementAt(key_index));
SetWeakReference(entry, value_index, value,
- table->OffsetOfElementAt(value_index));
+ table.OffsetOfElementAt(value_index));
HeapEntry* key_entry = GetEntry(key);
HeapEntry* value_entry = GetEntry(value);
if (key_entry && value_entry) {
@@ -930,55 +926,54 @@ static const struct {
void V8HeapExplorer::ExtractContextReferences(HeapEntry* entry,
Context context) {
- if (!context->IsNativeContext() && context->is_declaration_context()) {
- ScopeInfo scope_info = context->scope_info();
+ if (!context.IsNativeContext() && context.is_declaration_context()) {
+ ScopeInfo scope_info = context.scope_info();
// Add context allocated locals.
- int context_locals = scope_info->ContextLocalCount();
+ int context_locals = scope_info.ContextLocalCount();
for (int i = 0; i < context_locals; ++i) {
- String local_name = scope_info->ContextLocalName(i);
+ String local_name = scope_info.ContextLocalName(i);
int idx = Context::MIN_CONTEXT_SLOTS + i;
- SetContextReference(entry, local_name, context->get(idx),
+ SetContextReference(entry, local_name, context.get(idx),
Context::OffsetOfElementAt(idx));
}
- if (scope_info->HasFunctionName()) {
- String name = String::cast(scope_info->FunctionName());
- int idx = scope_info->FunctionContextSlotIndex(name);
+ if (scope_info.HasFunctionName()) {
+ String name = String::cast(scope_info.FunctionName());
+ int idx = scope_info.FunctionContextSlotIndex(name);
if (idx >= 0) {
- SetContextReference(entry, name, context->get(idx),
+ SetContextReference(entry, name, context.get(idx),
Context::OffsetOfElementAt(idx));
}
}
}
SetInternalReference(
- entry, "scope_info", context->get(Context::SCOPE_INFO_INDEX),
+ entry, "scope_info", context.get(Context::SCOPE_INFO_INDEX),
FixedArray::OffsetOfElementAt(Context::SCOPE_INFO_INDEX));
- SetInternalReference(entry, "previous", context->get(Context::PREVIOUS_INDEX),
+ SetInternalReference(entry, "previous", context.get(Context::PREVIOUS_INDEX),
FixedArray::OffsetOfElementAt(Context::PREVIOUS_INDEX));
SetInternalReference(entry, "extension",
- context->get(Context::EXTENSION_INDEX),
+ context.get(Context::EXTENSION_INDEX),
FixedArray::OffsetOfElementAt(Context::EXTENSION_INDEX));
SetInternalReference(
- entry, "native_context", context->get(Context::NATIVE_CONTEXT_INDEX),
+ entry, "native_context", context.get(Context::NATIVE_CONTEXT_INDEX),
FixedArray::OffsetOfElementAt(Context::NATIVE_CONTEXT_INDEX));
- if (context->IsNativeContext()) {
- TagObject(context->normalized_map_cache(), "(context norm. map cache)");
- TagObject(context->embedder_data(), "(context data)");
+ if (context.IsNativeContext()) {
+ TagObject(context.normalized_map_cache(), "(context norm. map cache)");
+ TagObject(context.embedder_data(), "(context data)");
for (size_t i = 0; i < arraysize(native_context_names); i++) {
int index = native_context_names[i].index;
const char* name = native_context_names[i].name;
- SetInternalReference(entry, name, context->get(index),
+ SetInternalReference(entry, name, context.get(index),
FixedArray::OffsetOfElementAt(index));
}
SetWeakReference(
- entry, "optimized_code_list",
- context->get(Context::OPTIMIZED_CODE_LIST),
+ entry, "optimized_code_list", context.get(Context::OPTIMIZED_CODE_LIST),
FixedArray::OffsetOfElementAt(Context::OPTIMIZED_CODE_LIST));
SetWeakReference(
entry, "deoptimized_code_list",
- context->get(Context::DEOPTIMIZED_CODE_LIST),
+ context.get(Context::DEOPTIMIZED_CODE_LIST),
FixedArray::OffsetOfElementAt(Context::DEOPTIMIZED_CODE_LIST));
STATIC_ASSERT(Context::OPTIMIZED_CODE_LIST == Context::FIRST_WEAK_SLOT);
STATIC_ASSERT(Context::NEXT_CONTEXT_LINK + 1 ==
@@ -989,54 +984,54 @@ void V8HeapExplorer::ExtractContextReferences(HeapEntry* entry,
}
void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map map) {
- MaybeObject maybe_raw_transitions_or_prototype_info = map->raw_transitions();
+ MaybeObject maybe_raw_transitions_or_prototype_info = map.raw_transitions();
HeapObject raw_transitions_or_prototype_info;
if (maybe_raw_transitions_or_prototype_info->GetHeapObjectIfWeak(
&raw_transitions_or_prototype_info)) {
- DCHECK(raw_transitions_or_prototype_info->IsMap());
+ DCHECK(raw_transitions_or_prototype_info.IsMap());
SetWeakReference(entry, "transition", raw_transitions_or_prototype_info,
Map::kTransitionsOrPrototypeInfoOffset);
} else if (maybe_raw_transitions_or_prototype_info->GetHeapObjectIfStrong(
&raw_transitions_or_prototype_info)) {
- if (raw_transitions_or_prototype_info->IsTransitionArray()) {
+ if (raw_transitions_or_prototype_info.IsTransitionArray()) {
TransitionArray transitions =
TransitionArray::cast(raw_transitions_or_prototype_info);
- if (map->CanTransition() && transitions->HasPrototypeTransitions()) {
- TagObject(transitions->GetPrototypeTransitions(),
+ if (map.CanTransition() && transitions.HasPrototypeTransitions()) {
+ TagObject(transitions.GetPrototypeTransitions(),
"(prototype transitions)");
}
TagObject(transitions, "(transition array)");
SetInternalReference(entry, "transitions", transitions,
Map::kTransitionsOrPrototypeInfoOffset);
- } else if (raw_transitions_or_prototype_info->IsTuple3() ||
- raw_transitions_or_prototype_info->IsFixedArray()) {
+ } else if (raw_transitions_or_prototype_info.IsTuple3() ||
+ raw_transitions_or_prototype_info.IsFixedArray()) {
TagObject(raw_transitions_or_prototype_info, "(transition)");
SetInternalReference(entry, "transition",
raw_transitions_or_prototype_info,
Map::kTransitionsOrPrototypeInfoOffset);
- } else if (map->is_prototype_map()) {
+ } else if (map.is_prototype_map()) {
TagObject(raw_transitions_or_prototype_info, "prototype_info");
SetInternalReference(entry, "prototype_info",
raw_transitions_or_prototype_info,
Map::kTransitionsOrPrototypeInfoOffset);
}
}
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors();
TagObject(descriptors, "(map descriptors)");
SetInternalReference(entry, "descriptors", descriptors,
- Map::kDescriptorsOffset);
- SetInternalReference(entry, "prototype", map->prototype(),
+ Map::kInstanceDescriptorsOffset);
+ SetInternalReference(entry, "prototype", map.prototype(),
Map::kPrototypeOffset);
if (FLAG_unbox_double_fields) {
- SetInternalReference(entry, "layout_descriptor", map->layout_descriptor(),
+ SetInternalReference(entry, "layout_descriptor", map.layout_descriptor(),
Map::kLayoutDescriptorOffset);
}
- Object constructor_or_backpointer = map->constructor_or_backpointer();
- if (constructor_or_backpointer->IsMap()) {
+ Object constructor_or_backpointer = map.constructor_or_backpointer();
+ if (constructor_or_backpointer.IsMap()) {
TagObject(constructor_or_backpointer, "(back pointer)");
SetInternalReference(entry, "back_pointer", constructor_or_backpointer,
Map::kConstructorOrBackPointerOffset);
- } else if (constructor_or_backpointer->IsFunctionTemplateInfo()) {
+ } else if (constructor_or_backpointer.IsFunctionTemplateInfo()) {
TagObject(constructor_or_backpointer, "(constructor function data)");
SetInternalReference(entry, "constructor_function_data",
constructor_or_backpointer,
@@ -1045,72 +1040,70 @@ void V8HeapExplorer::ExtractMapReferences(HeapEntry* entry, Map map) {
SetInternalReference(entry, "constructor", constructor_or_backpointer,
Map::kConstructorOrBackPointerOffset);
}
- TagObject(map->dependent_code(), "(dependent code)");
- SetInternalReference(entry, "dependent_code", map->dependent_code(),
+ TagObject(map.dependent_code(), "(dependent code)");
+ SetInternalReference(entry, "dependent_code", map.dependent_code(),
Map::kDependentCodeOffset);
}
void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
HeapEntry* entry, SharedFunctionInfo shared) {
- String shared_name = shared->DebugName();
+ String shared_name = shared.DebugName();
const char* name = nullptr;
if (shared_name != ReadOnlyRoots(heap_).empty_string()) {
name = names_->GetName(shared_name);
- TagObject(shared->GetCode(), names_->GetFormatted("(code for %s)", name));
+ TagObject(shared.GetCode(), names_->GetFormatted("(code for %s)", name));
} else {
- TagObject(shared->GetCode(),
- names_->GetFormatted(
- "(%s code)", Code::Kind2String(shared->GetCode()->kind())));
+ TagObject(shared.GetCode(),
+ names_->GetFormatted("(%s code)",
+ Code::Kind2String(shared.GetCode().kind())));
}
- if (shared->name_or_scope_info()->IsScopeInfo()) {
- TagObject(shared->name_or_scope_info(), "(function scope info)");
+ if (shared.name_or_scope_info().IsScopeInfo()) {
+ TagObject(shared.name_or_scope_info(), "(function scope info)");
}
- SetInternalReference(entry, "name_or_scope_info",
- shared->name_or_scope_info(),
+ SetInternalReference(entry, "name_or_scope_info", shared.name_or_scope_info(),
SharedFunctionInfo::kNameOrScopeInfoOffset);
SetInternalReference(entry, "script_or_debug_info",
- shared->script_or_debug_info(),
+ shared.script_or_debug_info(),
SharedFunctionInfo::kScriptOrDebugInfoOffset);
- SetInternalReference(entry, "function_data", shared->function_data(),
+ SetInternalReference(entry, "function_data", shared.function_data(),
SharedFunctionInfo::kFunctionDataOffset);
SetInternalReference(
entry, "raw_outer_scope_info_or_feedback_metadata",
- shared->raw_outer_scope_info_or_feedback_metadata(),
+ shared.raw_outer_scope_info_or_feedback_metadata(),
SharedFunctionInfo::kOuterScopeInfoOrFeedbackMetadataOffset);
}
void V8HeapExplorer::ExtractScriptReferences(HeapEntry* entry, Script script) {
- SetInternalReference(entry, "source", script->source(),
- Script::kSourceOffset);
- SetInternalReference(entry, "name", script->name(), Script::kNameOffset);
- SetInternalReference(entry, "context_data", script->context_data(),
+ SetInternalReference(entry, "source", script.source(), Script::kSourceOffset);
+ SetInternalReference(entry, "name", script.name(), Script::kNameOffset);
+ SetInternalReference(entry, "context_data", script.context_data(),
Script::kContextOffset);
- TagObject(script->line_ends(), "(script line ends)");
- SetInternalReference(entry, "line_ends", script->line_ends(),
+ TagObject(script.line_ends(), "(script line ends)");
+ SetInternalReference(entry, "line_ends", script.line_ends(),
Script::kLineEndsOffset);
}
void V8HeapExplorer::ExtractAccessorInfoReferences(HeapEntry* entry,
AccessorInfo accessor_info) {
- SetInternalReference(entry, "name", accessor_info->name(),
+ SetInternalReference(entry, "name", accessor_info.name(),
AccessorInfo::kNameOffset);
SetInternalReference(entry, "expected_receiver_type",
- accessor_info->expected_receiver_type(),
+ accessor_info.expected_receiver_type(),
AccessorInfo::kExpectedReceiverTypeOffset);
- SetInternalReference(entry, "getter", accessor_info->getter(),
+ SetInternalReference(entry, "getter", accessor_info.getter(),
AccessorInfo::kGetterOffset);
- SetInternalReference(entry, "setter", accessor_info->setter(),
+ SetInternalReference(entry, "setter", accessor_info.setter(),
AccessorInfo::kSetterOffset);
- SetInternalReference(entry, "data", accessor_info->data(),
+ SetInternalReference(entry, "data", accessor_info.data(),
AccessorInfo::kDataOffset);
}
void V8HeapExplorer::ExtractAccessorPairReferences(HeapEntry* entry,
AccessorPair accessors) {
- SetInternalReference(entry, "getter", accessors->getter(),
+ SetInternalReference(entry, "getter", accessors.getter(),
AccessorPair::kGetterOffset);
- SetInternalReference(entry, "setter", accessors->setter(),
+ SetInternalReference(entry, "setter", accessors.setter(),
AccessorPair::kSetterOffset);
}
@@ -1119,54 +1112,53 @@ void V8HeapExplorer::TagBuiltinCodeObject(Code code, const char* name) {
}
void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
- TagObject(code->relocation_info(), "(code relocation info)");
- SetInternalReference(entry, "relocation_info", code->relocation_info(),
+ TagObject(code.relocation_info(), "(code relocation info)");
+ SetInternalReference(entry, "relocation_info", code.relocation_info(),
Code::kRelocationInfoOffset);
- TagObject(code->deoptimization_data(), "(code deopt data)");
- SetInternalReference(entry, "deoptimization_data",
- code->deoptimization_data(),
+ TagObject(code.deoptimization_data(), "(code deopt data)");
+ SetInternalReference(entry, "deoptimization_data", code.deoptimization_data(),
Code::kDeoptimizationDataOffset);
- TagObject(code->source_position_table(), "(source position table)");
+ TagObject(code.source_position_table(), "(source position table)");
SetInternalReference(entry, "source_position_table",
- code->source_position_table(),
+ code.source_position_table(),
Code::kSourcePositionTableOffset);
}
void V8HeapExplorer::ExtractCellReferences(HeapEntry* entry, Cell cell) {
- SetInternalReference(entry, "value", cell->value(), Cell::kValueOffset);
+ SetInternalReference(entry, "value", cell.value(), Cell::kValueOffset);
}
void V8HeapExplorer::ExtractFeedbackCellReferences(HeapEntry* entry,
FeedbackCell feedback_cell) {
TagObject(feedback_cell, "(feedback cell)");
- SetInternalReference(entry, "value", feedback_cell->value(),
+ SetInternalReference(entry, "value", feedback_cell.value(),
FeedbackCell::kValueOffset);
}
void V8HeapExplorer::ExtractPropertyCellReferences(HeapEntry* entry,
PropertyCell cell) {
- SetInternalReference(entry, "value", cell->value(),
+ SetInternalReference(entry, "value", cell.value(),
PropertyCell::kValueOffset);
- TagObject(cell->dependent_code(), "(dependent code)");
- SetInternalReference(entry, "dependent_code", cell->dependent_code(),
+ TagObject(cell.dependent_code(), "(dependent code)");
+ SetInternalReference(entry, "dependent_code", cell.dependent_code(),
PropertyCell::kDependentCodeOffset);
}
void V8HeapExplorer::ExtractAllocationSiteReferences(HeapEntry* entry,
AllocationSite site) {
SetInternalReference(entry, "transition_info",
- site->transition_info_or_boilerplate(),
+ site.transition_info_or_boilerplate(),
AllocationSite::kTransitionInfoOrBoilerplateOffset);
- SetInternalReference(entry, "nested_site", site->nested_site(),
+ SetInternalReference(entry, "nested_site", site.nested_site(),
AllocationSite::kNestedSiteOffset);
- TagObject(site->dependent_code(), "(dependent code)");
- SetInternalReference(entry, "dependent_code", site->dependent_code(),
+ TagObject(site.dependent_code(), "(dependent code)");
+ SetInternalReference(entry, "dependent_code", site.dependent_code(),
AllocationSite::kDependentCodeOffset);
}
void V8HeapExplorer::ExtractArrayBoilerplateDescriptionReferences(
HeapEntry* entry, ArrayBoilerplateDescription value) {
- SetInternalReference(entry, "constant_elements", value->constant_elements(),
+ SetInternalReference(entry, "constant_elements", value.constant_elements(),
ArrayBoilerplateDescription::kConstantElementsOffset);
}
@@ -1189,11 +1181,11 @@ class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
void V8HeapExplorer::ExtractJSArrayBufferReferences(HeapEntry* entry,
JSArrayBuffer buffer) {
// Setup a reference to a native memory backing_store object.
- if (!buffer->backing_store()) return;
- size_t data_size = buffer->byte_length();
+ if (!buffer.backing_store()) return;
+ size_t data_size = buffer.byte_length();
JSArrayBufferDataEntryAllocator allocator(data_size, this);
HeapEntry* data_entry =
- generator_->FindOrAddEntry(buffer->backing_store(), &allocator);
+ generator_->FindOrAddEntry(buffer.backing_store(), &allocator);
entry->SetNamedReference(HeapGraphEdge::kInternal, "backing_store",
data_entry);
}
@@ -1201,51 +1193,51 @@ void V8HeapExplorer::ExtractJSArrayBufferReferences(HeapEntry* entry,
void V8HeapExplorer::ExtractJSPromiseReferences(HeapEntry* entry,
JSPromise promise) {
SetInternalReference(entry, "reactions_or_result",
- promise->reactions_or_result(),
+ promise.reactions_or_result(),
JSPromise::kReactionsOrResultOffset);
}
void V8HeapExplorer::ExtractJSGeneratorObjectReferences(
HeapEntry* entry, JSGeneratorObject generator) {
- SetInternalReference(entry, "function", generator->function(),
+ SetInternalReference(entry, "function", generator.function(),
JSGeneratorObject::kFunctionOffset);
- SetInternalReference(entry, "context", generator->context(),
+ SetInternalReference(entry, "context", generator.context(),
JSGeneratorObject::kContextOffset);
- SetInternalReference(entry, "receiver", generator->receiver(),
+ SetInternalReference(entry, "receiver", generator.receiver(),
JSGeneratorObject::kReceiverOffset);
SetInternalReference(entry, "parameters_and_registers",
- generator->parameters_and_registers(),
+ generator.parameters_and_registers(),
JSGeneratorObject::kParametersAndRegistersOffset);
}
void V8HeapExplorer::ExtractFixedArrayReferences(HeapEntry* entry,
FixedArray array) {
- for (int i = 0, l = array->length(); i < l; ++i) {
- DCHECK(!HasWeakHeapObjectTag(array->get(i)));
- SetInternalReference(entry, i, array->get(i), array->OffsetOfElementAt(i));
+ for (int i = 0, l = array.length(); i < l; ++i) {
+ DCHECK(!HasWeakHeapObjectTag(array.get(i)));
+ SetInternalReference(entry, i, array.get(i), array.OffsetOfElementAt(i));
}
}
void V8HeapExplorer::ExtractFeedbackVectorReferences(
HeapEntry* entry, FeedbackVector feedback_vector) {
- MaybeObject code = feedback_vector->optimized_code_weak_or_smi();
+ MaybeObject code = feedback_vector.optimized_code_weak_or_smi();
HeapObject code_heap_object;
if (code->GetHeapObjectIfWeak(&code_heap_object)) {
SetWeakReference(entry, "optimized code", code_heap_object,
- FeedbackVector::kOptimizedCodeOffset);
+ FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
}
}
void V8HeapExplorer::ExtractDescriptorArrayReferences(HeapEntry* entry,
DescriptorArray array) {
- SetInternalReference(entry, "enum_cache", array->enum_cache(),
+ SetInternalReference(entry, "enum_cache", array.enum_cache(),
DescriptorArray::kEnumCacheOffset);
- MaybeObjectSlot start = MaybeObjectSlot(array->GetDescriptorSlot(0));
+ MaybeObjectSlot start = MaybeObjectSlot(array.GetDescriptorSlot(0));
MaybeObjectSlot end = MaybeObjectSlot(
- array->GetDescriptorSlot(array->number_of_all_descriptors()));
+ array.GetDescriptorSlot(array.number_of_all_descriptors()));
for (int i = 0; start + i < end; ++i) {
MaybeObjectSlot slot = start + i;
- int offset = static_cast<int>(slot.address() - array->address());
+ int offset = static_cast<int>(slot.address() - array.address());
MaybeObject object = *slot;
HeapObject heap_object;
if (object->GetHeapObjectIfWeak(&heap_object)) {
@@ -1259,8 +1251,8 @@ void V8HeapExplorer::ExtractDescriptorArrayReferences(HeapEntry* entry,
template <typename T>
void V8HeapExplorer::ExtractWeakArrayReferences(int header_size,
HeapEntry* entry, T array) {
- for (int i = 0; i < array->length(); ++i) {
- MaybeObject object = array->Get(i);
+ for (int i = 0; i < array.length(); ++i) {
+ MaybeObject object = array.Get(i);
HeapObject heap_object;
if (object->GetHeapObjectIfWeak(&heap_object)) {
SetWeakReference(entry, i, heap_object, header_size + i * kTaggedSize);
@@ -1273,20 +1265,20 @@ void V8HeapExplorer::ExtractWeakArrayReferences(int header_size,
void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
HeapEntry* entry) {
- Isolate* isolate = js_obj->GetIsolate();
- if (js_obj->HasFastProperties()) {
- DescriptorArray descs = js_obj->map()->instance_descriptors();
- int real_size = js_obj->map()->NumberOfOwnDescriptors();
+ Isolate* isolate = js_obj.GetIsolate();
+ if (js_obj.HasFastProperties()) {
+ DescriptorArray descs = js_obj.map().instance_descriptors();
+ int real_size = js_obj.map().NumberOfOwnDescriptors();
for (int i = 0; i < real_size; i++) {
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
case kField: {
Representation r = details.representation();
if (r.IsSmi() || r.IsDouble()) break;
- Name k = descs->GetKey(i);
- FieldIndex field_index = FieldIndex::ForDescriptor(js_obj->map(), i);
- Object value = js_obj->RawFastPropertyAt(field_index);
+ Name k = descs.GetKey(i);
+ FieldIndex field_index = FieldIndex::ForDescriptor(js_obj.map(), i);
+ Object value = js_obj.RawFastPropertyAt(field_index);
int field_offset =
field_index.is_inobject() ? field_index.offset() : -1;
@@ -1295,35 +1287,34 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
break;
}
case kDescriptor:
- SetDataOrAccessorPropertyReference(details.kind(), entry,
- descs->GetKey(i),
- descs->GetStrongValue(i));
+ SetDataOrAccessorPropertyReference(
+ details.kind(), entry, descs.GetKey(i), descs.GetStrongValue(i));
break;
}
}
- } else if (js_obj->IsJSGlobalObject()) {
+ } else if (js_obj.IsJSGlobalObject()) {
// We assume that global objects can only have slow properties.
GlobalDictionary dictionary =
- JSGlobalObject::cast(js_obj)->global_dictionary();
- int length = dictionary->Capacity();
+ JSGlobalObject::cast(js_obj).global_dictionary();
+ int length = dictionary.Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < length; ++i) {
- if (!dictionary->IsKey(roots, dictionary->KeyAt(i))) continue;
- PropertyCell cell = dictionary->CellAt(i);
- Name name = cell->name();
- Object value = cell->value();
- PropertyDetails details = cell->property_details();
+ if (!dictionary.IsKey(roots, dictionary.KeyAt(i))) continue;
+ PropertyCell cell = dictionary.CellAt(i);
+ Name name = cell.name();
+ Object value = cell.value();
+ PropertyDetails details = cell.property_details();
SetDataOrAccessorPropertyReference(details.kind(), entry, name, value);
}
} else {
- NameDictionary dictionary = js_obj->property_dictionary();
- int length = dictionary->Capacity();
+ NameDictionary dictionary = js_obj.property_dictionary();
+ int length = dictionary.Capacity();
ReadOnlyRoots roots(isolate);
for (int i = 0; i < length; ++i) {
- Object k = dictionary->KeyAt(i);
- if (!dictionary->IsKey(roots, k)) continue;
- Object value = dictionary->ValueAt(i);
- PropertyDetails details = dictionary->DetailsAt(i);
+ Object k = dictionary.KeyAt(i);
+ if (!dictionary.IsKey(roots, k)) continue;
+ Object value = dictionary.ValueAt(i);
+ PropertyDetails details = dictionary.DetailsAt(i);
SetDataOrAccessorPropertyReference(details.kind(), entry, Name::cast(k),
value);
}
@@ -1333,56 +1324,55 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
void V8HeapExplorer::ExtractAccessorPairProperty(HeapEntry* entry, Name key,
Object callback_obj,
int field_offset) {
- if (!callback_obj->IsAccessorPair()) return;
+ if (!callback_obj.IsAccessorPair()) return;
AccessorPair accessors = AccessorPair::cast(callback_obj);
SetPropertyReference(entry, key, accessors, nullptr, field_offset);
- Object getter = accessors->getter();
- if (!getter->IsOddball()) {
+ Object getter = accessors.getter();
+ if (!getter.IsOddball()) {
SetPropertyReference(entry, key, getter, "get %s");
}
- Object setter = accessors->setter();
- if (!setter->IsOddball()) {
+ Object setter = accessors.setter();
+ if (!setter.IsOddball()) {
SetPropertyReference(entry, key, setter, "set %s");
}
}
void V8HeapExplorer::ExtractElementReferences(JSObject js_obj,
HeapEntry* entry) {
- ReadOnlyRoots roots = js_obj->GetReadOnlyRoots();
- if (js_obj->HasObjectElements()) {
- FixedArray elements = FixedArray::cast(js_obj->elements());
- int length = js_obj->IsJSArray()
- ? Smi::ToInt(JSArray::cast(js_obj)->length())
- : elements->length();
+ ReadOnlyRoots roots = js_obj.GetReadOnlyRoots();
+ if (js_obj.HasObjectElements()) {
+ FixedArray elements = FixedArray::cast(js_obj.elements());
+ int length = js_obj.IsJSArray() ? Smi::ToInt(JSArray::cast(js_obj).length())
+ : elements.length();
for (int i = 0; i < length; ++i) {
- if (!elements->get(i)->IsTheHole(roots)) {
- SetElementReference(entry, i, elements->get(i));
+ if (!elements.get(i).IsTheHole(roots)) {
+ SetElementReference(entry, i, elements.get(i));
}
}
- } else if (js_obj->HasDictionaryElements()) {
- NumberDictionary dictionary = js_obj->element_dictionary();
- int length = dictionary->Capacity();
+ } else if (js_obj.HasDictionaryElements()) {
+ NumberDictionary dictionary = js_obj.element_dictionary();
+ int length = dictionary.Capacity();
for (int i = 0; i < length; ++i) {
- Object k = dictionary->KeyAt(i);
- if (!dictionary->IsKey(roots, k)) continue;
- DCHECK(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- SetElementReference(entry, index, dictionary->ValueAt(i));
+ Object k = dictionary.KeyAt(i);
+ if (!dictionary.IsKey(roots, k)) continue;
+ DCHECK(k.IsNumber());
+ uint32_t index = static_cast<uint32_t>(k.Number());
+ SetElementReference(entry, index, dictionary.ValueAt(i));
}
}
}
void V8HeapExplorer::ExtractInternalReferences(JSObject js_obj,
HeapEntry* entry) {
- int length = js_obj->GetEmbedderFieldCount();
+ int length = js_obj.GetEmbedderFieldCount();
for (int i = 0; i < length; ++i) {
- Object o = js_obj->GetEmbedderField(i);
- SetInternalReference(entry, i, o, js_obj->GetEmbedderFieldOffset(i));
+ Object o = js_obj.GetEmbedderField(i);
+ SetInternalReference(entry, i, o, js_obj.GetEmbedderFieldOffset(i));
}
}
JSFunction V8HeapExplorer::GetConstructor(JSReceiver receiver) {
- Isolate* isolate = receiver->GetIsolate();
+ Isolate* isolate = receiver.GetIsolate();
DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
MaybeHandle<JSFunction> maybe_constructor =
@@ -1394,17 +1384,17 @@ JSFunction V8HeapExplorer::GetConstructor(JSReceiver receiver) {
}
String V8HeapExplorer::GetConstructorName(JSObject object) {
- Isolate* isolate = object->GetIsolate();
- if (object->IsJSFunction()) return ReadOnlyRoots(isolate).closure_string();
+ Isolate* isolate = object.GetIsolate();
+ if (object.IsJSFunction()) return ReadOnlyRoots(isolate).closure_string();
DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
return *JSReceiver::GetConstructorName(handle(object, isolate));
}
HeapEntry* V8HeapExplorer::GetEntry(Object obj) {
- return obj->IsHeapObject() ? generator_->FindOrAddEntry(
- reinterpret_cast<void*>(obj.ptr()), this)
- : nullptr;
+ return obj.IsHeapObject() ? generator_->FindOrAddEntry(
+ reinterpret_cast<void*>(obj.ptr()), this)
+ : nullptr;
}
class RootsReferencesExtractor : public RootVisitor {
@@ -1456,13 +1446,13 @@ bool V8HeapExplorer::IterateAndExtractReferences(
bool interrupted = false;
- HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
+ CombinedHeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
// Heap iteration with filtering must be finished in any case.
- for (HeapObject obj = iterator.next(); !obj.is_null();
- obj = iterator.next(), progress_->ProgressStep()) {
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next(), progress_->ProgressStep()) {
if (interrupted) continue;
- size_t max_pointer = obj->Size() / kTaggedSize;
+ size_t max_pointer = obj.Size() / kTaggedSize;
if (max_pointer > visited_fields_.size()) {
// Clear the current bits.
std::vector<bool>().swap(visited_fields_);
@@ -1472,11 +1462,11 @@ bool V8HeapExplorer::IterateAndExtractReferences(
HeapEntry* entry = GetEntry(obj);
ExtractReferences(entry, obj);
- SetInternalReference(entry, "map", obj->map(), HeapObject::kMapOffset);
+ SetInternalReference(entry, "map", obj.map(), HeapObject::kMapOffset);
// Extract unvisited fields as hidden references and restore tags
// of visited fields.
IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
+ obj.Iterate(&refs_extractor);
// Ensure visited_fields_ doesn't leak to the next object.
for (size_t i = 0; i < max_pointer; ++i) {
@@ -1495,7 +1485,7 @@ bool V8HeapExplorer::IterateAndExtractReferences(
bool V8HeapExplorer::IsEssentialObject(Object object) {
ReadOnlyRoots roots(heap_);
- return object->IsHeapObject() && !object->IsOddball() &&
+ return object.IsHeapObject() && !object.IsOddball() &&
object != roots.empty_byte_array() &&
object != roots.empty_fixed_array() &&
object != roots.empty_weak_fixed_array() &&
@@ -1510,13 +1500,13 @@ bool V8HeapExplorer::IsEssentialObject(Object object) {
bool V8HeapExplorer::IsEssentialHiddenReference(Object parent,
int field_offset) {
- if (parent->IsAllocationSite() &&
+ if (parent.IsAllocationSite() &&
field_offset == AllocationSite::kWeakNextOffset)
return false;
- if (parent->IsCodeDataContainer() &&
+ if (parent.IsCodeDataContainer() &&
field_offset == CodeDataContainer::kNextCodeLinkOffset)
return false;
- if (parent->IsContext() &&
+ if (parent.IsContext() &&
field_offset == Context::OffsetOfElementAt(Context::NEXT_CONTEXT_LINK))
return false;
return true;
@@ -1633,15 +1623,15 @@ void V8HeapExplorer::SetPropertyReference(HeapEntry* parent_entry,
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
HeapGraphEdge::Type type =
- reference_name->IsSymbol() || String::cast(reference_name)->length() > 0
+ reference_name.IsSymbol() || String::cast(reference_name).length() > 0
? HeapGraphEdge::kProperty
: HeapGraphEdge::kInternal;
const char* name =
- name_format_string != nullptr && reference_name->IsString()
+ name_format_string != nullptr && reference_name.IsString()
? names_->GetFormatted(
name_format_string,
String::cast(reference_name)
- ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
+ .ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
.get())
: names_->GetName(reference_name);
@@ -1688,10 +1678,10 @@ void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
// Add a shortcut to JS global object reference at snapshot root.
// That allows the user to easily find global objects. They are
// also used as starting points in distance calculations.
- if (is_weak || !child_obj->IsNativeContext()) return;
+ if (is_weak || !child_obj.IsNativeContext()) return;
- JSGlobalObject global = Context::cast(child_obj)->global_object();
- if (!global->IsJSGlobalObject()) return;
+ JSGlobalObject global = Context::cast(child_obj).global_object();
+ if (!global.IsJSGlobalObject()) return;
if (!user_roots_.insert(global).second) return;
@@ -1726,13 +1716,13 @@ class GlobalObjectsEnumerator : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
- if (!(*p)->IsNativeContext()) continue;
- JSObject proxy = Context::cast(*p)->global_proxy();
- if (!proxy->IsJSGlobalProxy()) continue;
- Object global = proxy->map()->prototype();
- if (!global->IsJSGlobalObject()) continue;
+ if (!(*p).IsNativeContext()) continue;
+ JSObject proxy = Context::cast(*p).global_proxy();
+ if (!proxy.IsJSGlobalProxy()) continue;
+ Object global = proxy.map().prototype();
+ if (!global.IsJSGlobalObject()) continue;
objects_.push_back(Handle<JSGlobalObject>(JSGlobalObject::cast(global),
- proxy->GetIsolate()));
+ proxy.GetIsolate()));
}
}
int count() const { return static_cast<int>(objects_.size()); }
@@ -1890,7 +1880,7 @@ HeapEntry* NativeObjectsExplorer::EntryForEmbedderGraphNode(
EmbedderGraphImpl::V8NodeImpl* v8_node =
static_cast<EmbedderGraphImpl::V8NodeImpl*>(node);
Object object = v8_node->GetObject();
- if (object->IsSmi()) return nullptr;
+ if (object.IsSmi()) return nullptr;
return generator_->FindEntry(
reinterpret_cast<void*>(Object::cast(object).ptr()));
}
@@ -2067,17 +2057,19 @@ class OutputStreamWriter {
MaybeWriteChunk();
}
void AddString(const char* s) {
- AddSubstring(s, StrLength(s));
+ size_t len = strlen(s);
+ DCHECK_GE(kMaxInt, len);
+ AddSubstring(s, static_cast<int>(len));
}
void AddSubstring(const char* s, int n) {
if (n <= 0) return;
- DCHECK(static_cast<size_t>(n) <= strlen(s));
+ DCHECK_LE(n, strlen(s));
const char* s_end = s + n;
while (s < s_end) {
int s_chunk_size =
Min(chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
DCHECK_GT(s_chunk_size, 0);
- MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size);
+ MemCopy(chunk_.begin() + chunk_pos_, s, s_chunk_size);
s += s_chunk_size;
chunk_pos_ += s_chunk_size;
MaybeWriteChunk();
@@ -2110,7 +2102,7 @@ class OutputStreamWriter {
int result = SNPrintF(buffer, format, n);
USE(result);
DCHECK_NE(result, -1);
- AddString(buffer.start());
+ AddString(buffer.begin());
}
}
void MaybeWriteChunk() {
@@ -2121,8 +2113,9 @@ class OutputStreamWriter {
}
void WriteChunk() {
if (aborted_) return;
- if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
- v8::OutputStream::kAbort) aborted_ = true;
+ if (stream_->WriteAsciiChunk(chunk_.begin(), chunk_pos_) ==
+ v8::OutputStream::kAbort)
+ aborted_ = true;
chunk_pos_ = 0;
}
@@ -2211,11 +2204,11 @@ namespace {
template<size_t size> struct ToUnsigned;
template<> struct ToUnsigned<4> {
- typedef uint32_t Type;
+ using Type = uint32_t;
};
template<> struct ToUnsigned<8> {
- typedef uint64_t Type;
+ using Type = uint64_t;
};
} // namespace
@@ -2269,7 +2262,7 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
buffer_pos = utoa(to_node_index(edge->to()), buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
+ writer_->AddString(buffer.begin());
}
void HeapSnapshotJSONSerializer::SerializeEdges() {
@@ -2306,7 +2299,7 @@ void HeapSnapshotJSONSerializer::SerializeNode(const HeapEntry* entry) {
buffer_pos = utoa(entry->trace_node_id(), buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
+ writer_->AddString(buffer.begin());
}
void HeapSnapshotJSONSerializer::SerializeNodes() {
@@ -2445,7 +2438,7 @@ void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) {
buffer[buffer_pos++] = ',';
buffer[buffer_pos++] = '[';
buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
+ writer_->AddString(buffer.begin());
int i = 0;
for (AllocationTraceNode* child : node->children()) {
@@ -2500,7 +2493,7 @@ void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
buffer_pos = SerializePosition(info->column, buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
+ writer_->AddString(buffer.begin());
}
}
@@ -2528,7 +2521,7 @@ void HeapSnapshotJSONSerializer::SerializeSamples() {
buffer_pos = utoa(sample.last_assigned_id(), buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
+ writer_->AddString(buffer.begin());
}
}
@@ -2615,7 +2608,7 @@ void HeapSnapshotJSONSerializer::SerializeLocation(
buffer_pos = utoa(location.col, buffer, buffer_pos);
buffer[buffer_pos++] = '\n';
buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
+ writer_->AddString(buffer.begin());
}
void HeapSnapshotJSONSerializer::SerializeLocations() {
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 045e288885..756500151f 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -12,15 +12,15 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
#include "src/objects/hash-table.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-objects.h"
#include "src/objects/literal-objects.h"
+#include "src/objects/objects.h"
+#include "src/objects/visitors.h"
#include "src/profiler/strings-storage.h"
-#include "src/string-hasher.h"
-#include "src/visitors.h"
+#include "src/strings/string-hasher.h"
namespace v8 {
namespace internal {
@@ -294,7 +294,7 @@ class HeapObjectsMap {
// A typedef for referencing anything that can be snapshotted living
// in any kind of heap memory.
-typedef void* HeapThing;
+using HeapThing = void*;
// An interface that creates HeapEntries by HeapThings.
class HeapEntriesAllocator {
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index c8850a6a02..e869f65762 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -4,6 +4,8 @@
#include "src/profiler/profile-generator.h"
+#include <algorithm>
+
#include "src/objects/shared-function-info-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
@@ -179,12 +181,12 @@ void CodeEntry::set_deopt_info(
}
void CodeEntry::FillFunctionInfo(SharedFunctionInfo shared) {
- if (!shared->script()->IsScript()) return;
- Script script = Script::cast(shared->script());
- set_script_id(script->id());
- set_position(shared->StartPosition());
- if (shared->optimization_disabled()) {
- set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
+ if (!shared.script().IsScript()) return;
+ Script script = Script::cast(shared.script());
+ set_script_id(script.id());
+ set_position(shared.StartPosition());
+ if (shared.optimization_disabled()) {
+ set_bailout_reason(GetBailoutReason(shared.disable_optimization_reason()));
}
}
@@ -325,13 +327,12 @@ void ProfileNode::Print(int indent) {
base::OS::Print("\n");
for (size_t i = 0; i < deopt_infos_.size(); ++i) {
CpuProfileDeoptInfo& info = deopt_infos_[i];
- base::OS::Print("%*s;;; deopted at script_id: %d position: %" PRIuS
- " with reason '%s'.\n",
- indent + 10, "", info.stack[0].script_id,
- info.stack[0].position, info.deopt_reason);
+ base::OS::Print(
+ "%*s;;; deopted at script_id: %d position: %zu with reason '%s'.\n",
+ indent + 10, "", info.stack[0].script_id, info.stack[0].position,
+ info.deopt_reason);
for (size_t index = 1; index < info.stack.size(); ++index) {
- base::OS::Print("%*s;;; Inline point: script_id %d position: %" PRIuS
- ".\n",
+ base::OS::Print("%*s;;; Inline point: script_id %d position: %zu.\n",
indent + 10, "", info.stack[index].script_id,
info.stack[index].position);
}
@@ -474,10 +475,9 @@ using v8::tracing::TracedValue;
std::atomic<uint32_t> CpuProfile::last_id_;
CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
- bool record_samples, ProfilingMode mode)
+ CpuProfilingOptions options)
: title_(title),
- record_samples_(record_samples),
- mode_(mode),
+ options_(options),
start_time_(base::TimeTicks::HighResolutionNow()),
top_down_(profiler->isolate()),
profiler_(profiler),
@@ -490,15 +490,38 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
"Profile", id_, "data", std::move(value));
}
+bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) {
+ DCHECK_GE(source_sampling_interval, base::TimeDelta());
+
+ // If the sampling source's sampling interval is 0, record as many samples
+ // are possible irrespective of the profile's sampling interval. Manually
+ // taken samples (via CollectSample) fall into this case as well.
+ if (source_sampling_interval.IsZero()) return true;
+
+ next_sample_delta_ -= source_sampling_interval;
+ if (next_sample_delta_ <= base::TimeDelta()) {
+ next_sample_delta_ =
+ base::TimeDelta::FromMicroseconds(options_.sampling_interval_us());
+ return true;
+ }
+ return false;
+}
+
void CpuProfile::AddPath(base::TimeTicks timestamp,
const ProfileStackTrace& path, int src_line,
- bool update_stats) {
+ bool update_stats, base::TimeDelta sampling_interval) {
+ if (!CheckSubsample(sampling_interval)) return;
+
ProfileNode* top_frame_node =
- top_down_.AddPathFromEnd(path, src_line, update_stats, mode_);
+ top_down_.AddPathFromEnd(path, src_line, update_stats, options_.mode());
+
+ bool should_record_sample =
+ !timestamp.IsNull() &&
+ (options_.max_samples() == CpuProfilingOptions::kNoSampleLimit ||
+ samples_.size() < options_.max_samples());
- if (record_samples_ && !timestamp.IsNull()) {
+ if (should_record_sample)
samples_.push_back({top_frame_node, timestamp, src_line});
- }
const int kSamplesFlushCount = 100;
const int kNodesFlushCount = 10;
@@ -697,8 +720,7 @@ CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
: profiler_(nullptr), current_profiles_semaphore_(1) {}
bool CpuProfilesCollection::StartProfiling(const char* title,
- bool record_samples,
- ProfilingMode mode) {
+ CpuProfilingOptions options) {
current_profiles_semaphore_.Wait();
if (static_cast<int>(current_profiles_.size()) >= kMaxSimultaneousProfiles) {
current_profiles_semaphore_.Signal();
@@ -712,23 +734,20 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
return true;
}
}
- current_profiles_.emplace_back(
- new CpuProfile(profiler_, title, record_samples, mode));
+ current_profiles_.emplace_back(new CpuProfile(profiler_, title, options));
current_profiles_semaphore_.Signal();
return true;
}
-
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
- const int title_len = StrLength(title);
+ const bool empty_title = (title[0] == '\0');
CpuProfile* profile = nullptr;
current_profiles_semaphore_.Wait();
- auto it =
- std::find_if(current_profiles_.rbegin(), current_profiles_.rend(),
- [&](const std::unique_ptr<CpuProfile>& p) {
- return title_len == 0 || strcmp(p->title(), title) == 0;
- });
+ auto it = std::find_if(current_profiles_.rbegin(), current_profiles_.rend(),
+ [&](const std::unique_ptr<CpuProfile>& p) {
+ return empty_title || strcmp(p->title(), title) == 0;
+ });
if (it != current_profiles_.rend()) {
(*it)->FinishProfile();
@@ -747,8 +766,7 @@ bool CpuProfilesCollection::IsLastProfile(const char* title) {
// Called from VM thread, and only it can mutate the list,
// so no locking is needed here.
if (current_profiles_.size() != 1) return false;
- return StrLength(title) == 0
- || strcmp(current_profiles_[0]->title(), title) == 0;
+ return title[0] == '\0' || strcmp(current_profiles_[0]->title(), title) == 0;
}
@@ -763,15 +781,46 @@ void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
finished_profiles_.erase(pos);
}
+namespace {
+
+int64_t GreatestCommonDivisor(int64_t a, int64_t b) {
+ return b ? GreatestCommonDivisor(b, a % b) : a;
+}
+
+} // namespace
+
+base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
+ DCHECK(profiler_);
+
+ int64_t base_sampling_interval_us =
+ profiler_->sampling_interval().InMicroseconds();
+ if (base_sampling_interval_us == 0) return base::TimeDelta();
+
+ int64_t interval_us = 0;
+ for (const auto& profile : current_profiles_) {
+ // Snap the profile's requested sampling interval to the next multiple of
+ // the base sampling interval.
+ int64_t profile_interval_us =
+ std::max<int64_t>(
+ (profile->sampling_interval_us() + base_sampling_interval_us - 1) /
+ base_sampling_interval_us,
+ 1) *
+ base_sampling_interval_us;
+ interval_us = GreatestCommonDivisor(interval_us, profile_interval_us);
+ }
+ return base::TimeDelta::FromMicroseconds(interval_us);
+}
+
void CpuProfilesCollection::AddPathToCurrentProfiles(
base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
- bool update_stats) {
+ bool update_stats, base::TimeDelta sampling_interval) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
- profile->AddPath(timestamp, path, src_line, update_stats);
+ profile->AddPath(timestamp, path, src_line, update_stats,
+ sampling_interval);
}
current_profiles_semaphore_.Signal();
}
@@ -905,7 +954,8 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
profiles_->AddPathToCurrentProfiles(sample.timestamp, stack_trace, src_line,
- sample.update_stats);
+ sample.update_stats,
+ sample.sampling_interval);
}
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 404aca4396..b0543c9d79 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -15,12 +15,12 @@
#include <vector>
#include "include/v8-profiler.h"
-#include "src/allocation.h"
#include "src/base/platform/time.h"
#include "src/builtins/builtins.h"
-#include "src/code-events.h"
+#include "src/codegen/source-position.h"
+#include "src/logging/code-events.h"
#include "src/profiler/strings-storage.h"
-#include "src/source-position.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -234,7 +234,7 @@ struct CodeEntryAndLineNumber {
int line_number;
};
-typedef std::vector<CodeEntryAndLineNumber> ProfileStackTrace;
+using ProfileStackTrace = std::vector<CodeEntryAndLineNumber>;
class ProfileTree;
@@ -311,7 +311,7 @@ class V8_EXPORT_PRIVATE ProfileTree {
explicit ProfileTree(Isolate* isolate);
~ProfileTree();
- typedef v8::CpuProfilingMode ProfilingMode;
+ using ProfilingMode = v8::CpuProfilingMode;
ProfileNode* AddPathFromEnd(
const std::vector<CodeEntry*>& path,
@@ -358,20 +358,22 @@ class CpuProfiler;
class CpuProfile {
public:
- typedef v8::CpuProfilingMode ProfilingMode;
-
struct SampleInfo {
ProfileNode* node;
base::TimeTicks timestamp;
int line;
};
- CpuProfile(CpuProfiler* profiler, const char* title, bool record_samples,
- ProfilingMode mode);
+ V8_EXPORT_PRIVATE CpuProfile(CpuProfiler* profiler, const char* title,
+ CpuProfilingOptions options);
+ // Checks whether or not the given TickSample should be (sub)sampled, given
+ // the sampling interval of the profiler that recorded it (in microseconds).
+ V8_EXPORT_PRIVATE bool CheckSubsample(base::TimeDelta sampling_interval);
// Add pc -> ... -> main() call path to the profile.
void AddPath(base::TimeTicks timestamp, const ProfileStackTrace& path,
- int src_line, bool update_stats);
+ int src_line, bool update_stats,
+ base::TimeDelta sampling_interval);
void FinishProfile();
const char* title() const { return title_; }
@@ -380,6 +382,10 @@ class CpuProfile {
int samples_count() const { return static_cast<int>(samples_.size()); }
const SampleInfo& sample(int index) const { return samples_[index]; }
+ int64_t sampling_interval_us() const {
+ return options_.sampling_interval_us();
+ }
+
base::TimeTicks start_time() const { return start_time_; }
base::TimeTicks end_time() const { return end_time_; }
CpuProfiler* cpu_profiler() const { return profiler_; }
@@ -392,8 +398,7 @@ class CpuProfile {
void StreamPendingTraceEvents();
const char* title_;
- bool record_samples_;
- ProfilingMode mode_;
+ const CpuProfilingOptions options_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
std::deque<SampleInfo> samples_;
@@ -401,6 +406,9 @@ class CpuProfile {
CpuProfiler* const profiler_;
size_t streaming_next_sample_;
uint32_t id_;
+ // Number of microseconds worth of profiler ticks that should elapse before
+ // the next sample is recorded.
+ base::TimeDelta next_sample_delta_;
static std::atomic<uint32_t> last_id_;
@@ -447,11 +455,9 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
public:
explicit CpuProfilesCollection(Isolate* isolate);
- typedef v8::CpuProfilingMode ProfilingMode;
-
void set_cpu_profiler(CpuProfiler* profiler) { profiler_ = profiler; }
- bool StartProfiling(const char* title, bool record_samples,
- ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
+ bool StartProfiling(const char* title, CpuProfilingOptions options = {});
+
CpuProfile* StopProfiling(const char* title);
std::vector<std::unique_ptr<CpuProfile>>* profiles() {
return &finished_profiles_;
@@ -460,10 +466,16 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
bool IsLastProfile(const char* title);
void RemoveProfile(CpuProfile* profile);
+ // Finds a common sampling interval dividing each CpuProfile's interval,
+ // rounded up to the nearest multiple of the CpuProfiler's sampling interval.
+ // Returns 0 if no profiles are attached.
+ base::TimeDelta GetCommonSamplingInterval() const;
+
// Called from profile generator thread.
void AddPathToCurrentProfiles(base::TimeTicks timestamp,
const ProfileStackTrace& path, int src_line,
- bool update_stats);
+ bool update_stats,
+ base::TimeDelta sampling_interval);
// Limits the number of profiles that can be simultaneously collected.
static const int kMaxSimultaneousProfiles = 100;
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 15ef912523..156c1b8bb0 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -4,25 +4,26 @@
#include "src/profiler/profiler-listener.h"
-#include "src/deoptimizer.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/codegen/reloc-info.h"
+#include "src/codegen/source-position-table.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/handles/handles-inl.h"
#include "src/objects/code-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/script-inl.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/objects/string-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
-#include "src/reloc-info.h"
-#include "src/source-position-table.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
ProfilerListener::ProfilerListener(Isolate* isolate,
- CodeEventObserver* observer)
- : isolate_(isolate), observer_(observer) {}
+ CodeEventObserver* observer,
+ CpuProfilingNamingMode naming_mode)
+ : isolate_(isolate), observer_(observer), naming_mode_(naming_mode) {}
ProfilerListener::~ProfilerListener() = default;
@@ -39,12 +40,12 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode code, const char* name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ rec->instruction_start = code.InstructionStart();
rec->entry = new CodeEntry(tag, GetName(name), CodeEntry::kEmptyResourceName,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->InstructionStart());
- rec->instruction_size = code->InstructionSize();
+ code.InstructionStart());
+ rec->instruction_size = code.InstructionSize();
DispatchCodeEvent(evt_rec);
}
@@ -52,12 +53,12 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
AbstractCode code, Name name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ rec->instruction_start = code.InstructionStart();
rec->entry = new CodeEntry(tag, GetName(name), CodeEntry::kEmptyResourceName,
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->InstructionStart());
- rec->instruction_size = code->InstructionSize();
+ code.InstructionStart());
+ rec->instruction_size = code.InstructionSize();
DispatchCodeEvent(evt_rec);
}
@@ -67,15 +68,15 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
Name script_name) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
- rec->entry = new CodeEntry(tag, GetName(shared->DebugName()),
+ rec->instruction_start = code.InstructionStart();
+ rec->entry = new CodeEntry(tag, GetName(shared.DebugName()),
GetName(InferScriptName(script_name, shared)),
CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, nullptr,
- code->InstructionStart());
- DCHECK(!code->IsCode());
+ code.InstructionStart());
+ DCHECK(!code.IsCode());
rec->entry->FillFunctionInfo(shared);
- rec->instruction_size = code->InstructionSize();
+ rec->instruction_size = code.InstructionSize();
DispatchCodeEvent(evt_rec);
}
@@ -100,15 +101,15 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
Name script_name, int line, int column) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = abstract_code->InstructionStart();
+ rec->instruction_start = abstract_code.InstructionStart();
std::unique_ptr<SourcePositionTable> line_table;
std::unordered_map<int, std::vector<CodeEntryAndLineNumber>> inline_stacks;
std::unordered_set<std::unique_ptr<CodeEntry>, CodeEntry::Hasher,
CodeEntry::Equals>
cached_inline_entries;
bool is_shared_cross_origin = false;
- if (shared->script()->IsScript()) {
- Script script = Script::cast(shared->script());
+ if (shared.script().IsScript()) {
+ Script script = Script::cast(shared.script());
line_table.reset(new SourcePositionTable());
HandleScope scope(isolate_);
@@ -119,24 +120,27 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
// profiler as is stored on the code object, except that we transform source
// positions to line numbers here, because we only care about attributing
// ticks to a given line.
- for (SourcePositionTableIterator it(abstract_code->source_position_table());
+ for (SourcePositionTableIterator it(abstract_code.source_position_table());
!it.done(); it.Advance()) {
int position = it.source_position().ScriptOffset();
- int line_number = script->GetLineNumber(position) + 1;
int inlining_id = it.source_position().InliningId();
- // TODO(953309): Fix this.
- if (line_number == 0) continue;
-
- line_table->SetPosition(it.code_offset(), line_number, inlining_id);
-
- if (inlining_id != SourcePosition::kNotInlined) {
- DCHECK(abstract_code->IsCode());
- Code code = abstract_code->GetCode();
+ if (inlining_id == SourcePosition::kNotInlined) {
+ int line_number = script.GetLineNumber(position) + 1;
+ line_table->SetPosition(it.code_offset(), line_number, inlining_id);
+ } else {
+ DCHECK(abstract_code.IsCode());
+ Code code = abstract_code.GetCode();
std::vector<SourcePositionInfo> stack =
it.source_position().InliningStack(handle(code, isolate_));
DCHECK(!stack.empty());
+ // When we have an inlining id and we are doing cross-script inlining,
+ // then the script of the inlined frames may be different to the script
+ // of |shared|.
+ int line_number = stack.front().line + 1;
+ line_table->SetPosition(it.code_offset(), line_number, inlining_id);
+
std::vector<CodeEntryAndLineNumber> inline_stack;
for (SourcePositionInfo& pos_info : stack) {
if (pos_info.position.ScriptOffset() == kNoSourcePosition) continue;
@@ -147,7 +151,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
1;
const char* resource_name =
- (pos_info.script->name()->IsName())
+ (pos_info.script->name().IsName())
? GetName(Name::cast(pos_info.script->name()))
: CodeEntry::kEmptyResourceName;
@@ -163,9 +167,9 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
std::unique_ptr<CodeEntry> inline_entry =
base::make_unique<CodeEntry>(
- tag, GetName(pos_info.shared->DebugName()), resource_name,
+ tag, GetFunctionName(*pos_info.shared), resource_name,
start_pos_info.line + 1, start_pos_info.column + 1, nullptr,
- code->InstructionStart(), inline_is_shared_cross_origin);
+ code.InstructionStart(), inline_is_shared_cross_origin);
inline_entry->FillFunctionInfo(*pos_info.shared);
// Create a canonical CodeEntry for each inlined frame and then re-use
@@ -182,9 +186,9 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
}
rec->entry =
- new CodeEntry(tag, GetName(shared->DebugName()),
+ new CodeEntry(tag, GetFunctionName(shared),
GetName(InferScriptName(script_name, shared)), line, column,
- std::move(line_table), abstract_code->InstructionStart(),
+ std::move(line_table), abstract_code.InstructionStart(),
is_shared_cross_origin);
if (!inline_stacks.empty()) {
rec->entry->SetInlineStacks(std::move(cached_inline_entries),
@@ -192,7 +196,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
}
rec->entry->FillFunctionInfo(shared);
- rec->instruction_size = abstract_code->InstructionSize();
+ rec->instruction_size = abstract_code.InstructionSize();
DispatchCodeEvent(evt_rec);
}
@@ -203,7 +207,7 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->instruction_start = code->instruction_start();
rec->entry = new CodeEntry(
- tag, GetName(name.start()), CodeEntry::kWasmResourceNamePrefix,
+ tag, GetName(name.begin()), CodeEntry::kWasmResourceNamePrefix,
CpuProfileNode::kNoLineNumberInfo, CpuProfileNode::kNoColumnNumberInfo,
nullptr, code->instruction_start(), true);
rec->instruction_size = code->instructions().length();
@@ -213,8 +217,8 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_MOVE);
CodeMoveEventRecord* rec = &evt_rec.CodeMoveEventRecord_;
- rec->from_instruction_start = from->InstructionStart();
- rec->to_instruction_start = to->InstructionStart();
+ rec->from_instruction_start = from.InstructionStart();
+ rec->to_instruction_start = to.InstructionStart();
DispatchCodeEvent(evt_rec);
}
@@ -222,8 +226,8 @@ void ProfilerListener::CodeDisableOptEvent(AbstractCode code,
SharedFunctionInfo shared) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
- rec->instruction_start = code->InstructionStart();
- rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
+ rec->instruction_start = code.InstructionStart();
+ rec->bailout_reason = GetBailoutReason(shared.disable_optimization_reason());
DispatchCodeEvent(evt_rec);
}
@@ -232,7 +236,7 @@ void ProfilerListener::CodeDeoptEvent(Code code, DeoptimizeKind kind,
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
- rec->instruction_start = code->InstructionStart();
+ rec->instruction_start = code.InstructionStart();
rec->deopt_reason = DeoptimizeReasonToString(info.deopt_reason);
rec->deopt_id = info.deopt_id;
rec->pc = pc;
@@ -257,12 +261,12 @@ void ProfilerListener::GetterCallbackEvent(Name name, Address entry_point) {
void ProfilerListener::RegExpCodeCreateEvent(AbstractCode code, String source) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
- rec->instruction_start = code->InstructionStart();
+ rec->instruction_start = code.InstructionStart();
rec->entry = new CodeEntry(
CodeEventListener::REG_EXP_TAG, GetConsName("RegExp: ", source),
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
- CpuProfileNode::kNoColumnNumberInfo, nullptr, code->InstructionStart());
- rec->instruction_size = code->InstructionSize();
+ CpuProfileNode::kNoColumnNumberInfo, nullptr, code.InstructionStart());
+ rec->instruction_size = code.InstructionSize();
DispatchCodeEvent(evt_rec);
}
@@ -277,10 +281,22 @@ void ProfilerListener::SetterCallbackEvent(Name name, Address entry_point) {
}
Name ProfilerListener::InferScriptName(Name name, SharedFunctionInfo info) {
- if (name->IsString() && String::cast(name)->length()) return name;
- if (!info->script()->IsScript()) return name;
- Object source_url = Script::cast(info->script())->source_url();
- return source_url->IsName() ? Name::cast(source_url) : name;
+ if (name.IsString() && String::cast(name).length()) return name;
+ if (!info.script().IsScript()) return name;
+ Object source_url = Script::cast(info.script()).source_url();
+ return source_url.IsName() ? Name::cast(source_url) : name;
+}
+
+const char* ProfilerListener::GetFunctionName(SharedFunctionInfo shared) {
+ DisallowHeapAllocation no_gc;
+ switch (naming_mode_) {
+ case kDebugNaming:
+ return GetName(shared.DebugName());
+ case kStandardNaming:
+ return GetName(shared.Name());
+ default:
+ UNREACHABLE();
+ }
}
void ProfilerListener::AttachDeoptInlinedFrames(Code code,
diff --git a/deps/v8/src/profiler/profiler-listener.h b/deps/v8/src/profiler/profiler-listener.h
index aed6607133..6ca4225e54 100644
--- a/deps/v8/src/profiler/profiler-listener.h
+++ b/deps/v8/src/profiler/profiler-listener.h
@@ -8,7 +8,8 @@
#include <memory>
#include <vector>
-#include "src/code-events.h"
+#include "include/v8-profiler.h"
+#include "src/logging/code-events.h"
#include "src/profiler/profile-generator.h"
namespace v8 {
@@ -25,7 +26,8 @@ class CodeEventObserver {
class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
public:
- ProfilerListener(Isolate*, CodeEventObserver*);
+ ProfilerListener(Isolate*, CodeEventObserver*,
+ CpuProfilingNamingMode mode = kDebugNaming);
~ProfilerListener() override;
void CallbackEvent(Name name, Address entry_point) override;
@@ -70,6 +72,8 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
void set_observer(CodeEventObserver* observer) { observer_ = observer; }
private:
+ const char* GetFunctionName(SharedFunctionInfo);
+
void AttachDeoptInlinedFrames(Code code, CodeDeoptEventRecord* rec);
Name InferScriptName(Name name, SharedFunctionInfo info);
V8_INLINE void DispatchCodeEvent(const CodeEventsContainer& evt_rec) {
@@ -79,6 +83,7 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener {
Isolate* isolate_;
CodeEventObserver* observer_;
StringsStorage function_and_resource_names_;
+ const CpuProfilingNamingMode naming_mode_;
DISALLOW_COPY_AND_ASSIGN(ProfilerListener);
};
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 659ed25c00..de19d39eba 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -7,13 +7,13 @@
#include <stdint.h>
#include <memory>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/ieee754.h"
#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/frames-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
#include "src/profiler/strings-storage.h"
namespace v8 {
@@ -25,10 +25,9 @@ namespace internal {
//
// Let u be a uniformly distributed random number between 0 and 1, then
// next_sample = (- ln u) / λ
-intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
- if (FLAG_sampling_heap_profiler_suppress_randomness) {
+intptr_t SamplingHeapProfiler::Observer::GetNextSampleInterval(uint64_t rate) {
+ if (FLAG_sampling_heap_profiler_suppress_randomness)
return static_cast<intptr_t>(rate);
- }
double u = random_->NextDouble();
double next = (-base::ieee754::log(u)) * rate;
return next < kTaggedSize
@@ -55,12 +54,8 @@ SamplingHeapProfiler::SamplingHeapProfiler(
v8::HeapProfiler::SamplingFlags flags)
: isolate_(Isolate::FromHeap(heap)),
heap_(heap),
- new_space_observer_(new SamplingAllocationObserver(
- heap_, static_cast<intptr_t>(rate), rate, this,
- isolate_->random_number_generator())),
- other_spaces_observer_(new SamplingAllocationObserver(
- heap_, static_cast<intptr_t>(rate), rate, this,
- isolate_->random_number_generator())),
+ allocation_observer_(heap_, static_cast<intptr_t>(rate), rate, this,
+ isolate_->random_number_generator()),
names_(names),
profile_root_(nullptr, "(root)", v8::UnboundScript::kNoScriptId, 0,
next_node_id()),
@@ -68,13 +63,13 @@ SamplingHeapProfiler::SamplingHeapProfiler(
rate_(rate),
flags_(flags) {
CHECK_GT(rate_, 0u);
- heap_->AddAllocationObserversToAllSpaces(other_spaces_observer_.get(),
- new_space_observer_.get());
+ heap_->AddAllocationObserversToAllSpaces(&allocation_observer_,
+ &allocation_observer_);
}
SamplingHeapProfiler::~SamplingHeapProfiler() {
- heap_->RemoveAllocationObserversFromAllSpaces(other_spaces_observer_.get(),
- new_space_observer_.get());
+ heap_->RemoveAllocationObserversFromAllSpaces(&allocation_observer_,
+ &allocation_observer_);
}
void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
@@ -97,16 +92,6 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
base::make_unique<Sample>(size, node, loc, this, next_sample_id());
sample->global.SetWeak(sample.get(), OnWeakCallback,
WeakCallbackType::kParameter);
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- // MarkIndependent is marked deprecated but we still rely on it here
- // temporarily.
- sample->global.MarkIndependent();
-#if __clang__
-#pragma clang diagnostic pop
-#endif
samples_.emplace(sample.get(), std::move(sample));
}
@@ -160,8 +145,8 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
// closure on the stack. Skip over any such frames (they'll be
// in the top frames of the stack). The allocations made in this
// sensitive moment belong to the formerly optimized frame anyway.
- if (frame->unchecked_function()->IsJSFunction()) {
- SharedFunctionInfo shared = frame->function()->shared();
+ if (frame->unchecked_function().IsJSFunction()) {
+ SharedFunctionInfo shared = frame->function().shared();
stack.push_back(shared);
frames_captured++;
} else {
@@ -205,13 +190,13 @@ SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
// the first element in the list.
for (auto it = stack.rbegin(); it != stack.rend(); ++it) {
SharedFunctionInfo shared = *it;
- const char* name = this->names()->GetName(shared->DebugName());
+ const char* name = this->names()->GetName(shared.DebugName());
int script_id = v8::UnboundScript::kNoScriptId;
- if (shared->script()->IsScript()) {
- Script script = Script::cast(shared->script());
- script_id = script->id();
+ if (shared.script().IsScript()) {
+ Script script = Script::cast(shared.script());
+ script_id = script.id();
}
- node = FindOrAddChildNode(node, name, script_id, shared->StartPosition());
+ node = FindOrAddChildNode(node, name, script_id, shared.StartPosition());
}
if (found_arguments_marker_frames) {
@@ -241,7 +226,7 @@ v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
const_cast<std::map<int, Handle<Script>>&>(scripts);
Handle<Script> script = non_const_scripts[node->script_id_];
if (!script.is_null()) {
- if (script->name()->IsName()) {
+ if (script->name().IsName()) {
Name name = Name::cast(script->name());
script_name = ToApiHandle<v8::String>(
isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
@@ -284,12 +269,12 @@ v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
Script::Iterator iterator(isolate_);
for (Script script = iterator.Next(); !script.is_null();
script = iterator.Next()) {
- scripts[script->id()] = handle(script, isolate_);
+ scripts[script.id()] = handle(script, isolate_);
}
}
auto profile = new v8::internal::AllocationProfile();
TranslateAllocationNode(profile, &profile_root_, scripts);
- profile->samples_ = SamplingHeapProfiler::BuildSamples();
+ profile->samples_ = BuildSamples();
return profile;
}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.h b/deps/v8/src/profiler/sampling-heap-profiler.h
index 818c33581d..71ccf4caa8 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.h
+++ b/deps/v8/src/profiler/sampling-heap-profiler.h
@@ -9,6 +9,7 @@
#include <map>
#include <memory>
#include <unordered_map>
+
#include "include/v8-profiler.h"
#include "src/heap/heap.h"
#include "src/profiler/strings-storage.h"
@@ -48,7 +49,7 @@ class SamplingHeapProfiler {
public:
class AllocationNode {
public:
- typedef uint64_t FunctionId;
+ using FunctionId = uint64_t;
AllocationNode(AllocationNode* parent, const char* name, int script_id,
int start_position, uint32_t id)
: parent_(parent),
@@ -105,11 +106,9 @@ class SamplingHeapProfiler {
SamplingHeapProfiler* profiler_, uint64_t sample_id)
: size(size_),
owner(owner_),
- global(Global<Value>(
- reinterpret_cast<v8::Isolate*>(profiler_->isolate_), local_)),
+ global(reinterpret_cast<v8::Isolate*>(profiler_->isolate_), local_),
profiler(profiler_),
sample_id(sample_id) {}
- ~Sample() { global.Reset(); }
const size_t size;
AllocationNode* const owner;
Global<Value> global;
@@ -128,6 +127,38 @@ class SamplingHeapProfiler {
StringsStorage* names() const { return names_; }
private:
+ class Observer : public AllocationObserver {
+ public:
+ Observer(Heap* heap, intptr_t step_size, uint64_t rate,
+ SamplingHeapProfiler* profiler,
+ base::RandomNumberGenerator* random)
+ : AllocationObserver(step_size),
+ profiler_(profiler),
+ heap_(heap),
+ random_(random),
+ rate_(rate) {}
+
+ protected:
+ void Step(int bytes_allocated, Address soon_object, size_t size) override {
+ USE(heap_);
+ DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
+ if (soon_object) {
+ // TODO(ofrobots): it would be better to sample the next object rather
+ // than skipping this sample epoch if soon_object happens to be null.
+ profiler_->SampleObject(soon_object, size);
+ }
+ }
+
+ intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
+
+ private:
+ intptr_t GetNextSampleInterval(uint64_t rate);
+ SamplingHeapProfiler* const profiler_;
+ Heap* const heap_;
+ base::RandomNumberGenerator* const random_;
+ uint64_t const rate_;
+ };
+
void SampleObject(Address soon_object, size_t size);
const std::vector<v8::AllocationProfile::Sample> BuildSamples() const;
@@ -157,8 +188,7 @@ class SamplingHeapProfiler {
Heap* const heap_;
uint64_t last_sample_id_ = 0;
uint32_t last_node_id_ = 0;
- std::unique_ptr<SamplingAllocationObserver> new_space_observer_;
- std::unique_ptr<SamplingAllocationObserver> other_spaces_observer_;
+ Observer allocation_observer_;
StringsStorage* const names_;
AllocationNode profile_root_;
std::unordered_map<Sample*, std::unique_ptr<Sample>> samples_;
@@ -166,44 +196,9 @@ class SamplingHeapProfiler {
const uint64_t rate_;
v8::HeapProfiler::SamplingFlags flags_;
- friend class SamplingAllocationObserver;
-
DISALLOW_COPY_AND_ASSIGN(SamplingHeapProfiler);
};
-class SamplingAllocationObserver : public AllocationObserver {
- public:
- SamplingAllocationObserver(Heap* heap, intptr_t step_size, uint64_t rate,
- SamplingHeapProfiler* profiler,
- base::RandomNumberGenerator* random)
- : AllocationObserver(step_size),
- profiler_(profiler),
- heap_(heap),
- random_(random),
- rate_(rate) {}
- ~SamplingAllocationObserver() override = default;
-
- protected:
- void Step(int bytes_allocated, Address soon_object, size_t size) override {
- USE(heap_);
- DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
- if (soon_object) {
- // TODO(ofrobots): it would be better to sample the next object rather
- // than skipping this sample epoch if soon_object happens to be null.
- profiler_->SampleObject(soon_object, size);
- }
- }
-
- intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
-
- private:
- intptr_t GetNextSampleInterval(uint64_t rate);
- SamplingHeapProfiler* const profiler_;
- Heap* const heap_;
- base::RandomNumberGenerator* const random_;
- uint64_t const rate_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 04a2379707..eeb5261f2c 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -6,8 +6,8 @@
#include <memory>
-#include "src/allocation.h"
-#include "src/objects-inl.h"
+#include "src/utils/allocation.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -33,7 +33,7 @@ const char* StringsStorage::GetCopy(const char* src) {
Vector<char> dst = Vector<char>::New(len + 1);
StrNCpy(dst, src, len);
dst[len] = '\0';
- entry->key = dst.start();
+ entry->key = dst.begin();
entry->value = entry->key;
}
return reinterpret_cast<const char*>(entry->value);
@@ -63,21 +63,21 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
Vector<char> str = Vector<char>::New(1024);
int len = VSNPrintF(str, format, args);
if (len == -1) {
- DeleteArray(str.start());
+ DeleteArray(str.begin());
return GetCopy(format);
}
- return AddOrDisposeString(str.start(), len);
+ return AddOrDisposeString(str.begin(), len);
}
const char* StringsStorage::GetName(Name name) {
- if (name->IsString()) {
+ if (name.IsString()) {
String str = String::cast(name);
- int length = Min(FLAG_heap_snapshot_string_limit, str->length());
+ int length = Min(FLAG_heap_snapshot_string_limit, str.length());
int actual_length = 0;
- std::unique_ptr<char[]> data = str->ToCString(
+ std::unique_ptr<char[]> data = str.ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length);
return AddOrDisposeString(data.release(), actual_length);
- } else if (name->IsSymbol()) {
+ } else if (name.IsSymbol()) {
return "<symbol>";
}
return "";
@@ -88,11 +88,11 @@ const char* StringsStorage::GetName(int index) {
}
const char* StringsStorage::GetConsName(const char* prefix, Name name) {
- if (name->IsString()) {
+ if (name.IsString()) {
String str = String::cast(name);
- int length = Min(FLAG_heap_snapshot_string_limit, str->length());
+ int length = Min(FLAG_heap_snapshot_string_limit, str.length());
int actual_length = 0;
- std::unique_ptr<char[]> data = str->ToCString(
+ std::unique_ptr<char[]> data = str.ToCString(
DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length);
int cons_length = actual_length + static_cast<int>(strlen(prefix)) + 1;
@@ -100,7 +100,7 @@ const char* StringsStorage::GetConsName(const char* prefix, Name name) {
snprintf(cons_result, cons_length, "%s%s", prefix, data.get());
return AddOrDisposeString(cons_result, cons_length);
- } else if (name->IsSymbol()) {
+ } else if (name.IsSymbol()) {
return "<symbol>";
}
return "";
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index 9b56a6e412..650ecac3e6 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -9,7 +9,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/hashmap.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index dca3e2d045..1f2b4bc72a 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -4,14 +4,16 @@
#include "src/profiler/tick-sample.h"
+#include <cinttypes>
+
#include "include/v8-profiler.h"
-#include "src/asan.h"
-#include "src/counters.h"
-#include "src/frames-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/simulator.h"
+#include "src/execution/vm-state-inl.h"
#include "src/heap/heap-inl.h" // For MemoryAllocator::code_range.
-#include "src/msan.h"
-#include "src/simulator.h"
-#include "src/vm-state-inl.h"
+#include "src/logging/counters.h"
+#include "src/sanitizer/asan.h"
+#include "src/sanitizer/msan.h"
namespace v8 {
namespace {
@@ -100,10 +102,12 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
}
state->sp = reinterpret_cast<void*>(simulator->get_register(Simulator::sp));
state->fp = reinterpret_cast<void*>(simulator->get_register(Simulator::r11));
+ state->lr = reinterpret_cast<void*>(simulator->get_register(Simulator::lr));
#elif V8_TARGET_ARCH_ARM64
state->pc = reinterpret_cast<void*>(simulator->pc());
state->sp = reinterpret_cast<void*>(simulator->sp());
state->fp = reinterpret_cast<void*>(simulator->fp());
+ state->lr = reinterpret_cast<void*>(simulator->lr());
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
if (!simulator->has_bad_pc()) {
state->pc = reinterpret_cast<void*>(simulator->get_pc());
@@ -116,12 +120,14 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
}
state->sp = reinterpret_cast<void*>(simulator->get_register(Simulator::sp));
state->fp = reinterpret_cast<void*>(simulator->get_register(Simulator::fp));
+ state->lr = reinterpret_cast<void*>(simulator->get_lr());
#elif V8_TARGET_ARCH_S390
if (!simulator->has_bad_pc()) {
state->pc = reinterpret_cast<void*>(simulator->get_pc());
}
state->sp = reinterpret_cast<void*>(simulator->get_register(Simulator::sp));
state->fp = reinterpret_cast<void*>(simulator->get_register(Simulator::fp));
+ state->lr = reinterpret_cast<void*>(simulator->get_register(Simulator::ra));
#endif
if (state->sp == 0 || state->fp == 0) {
// It possible that the simulator is interrupted while it is updating
@@ -234,8 +240,10 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
: reinterpret_cast<void*>(*external_callback_entry_ptr);
}
- i::SafeStackFrameIterator it(isolate, reinterpret_cast<i::Address>(regs->fp),
+ i::SafeStackFrameIterator it(isolate, reinterpret_cast<i::Address>(regs->pc),
+ reinterpret_cast<i::Address>(regs->fp),
reinterpret_cast<i::Address>(regs->sp),
+ reinterpret_cast<i::Address>(regs->lr),
js_entry_sp);
if (it.done()) return true;
@@ -269,7 +277,8 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
// If the bytecode array is a heap object and the bytecode offset is a
// Smi, use those, otherwise fall back to using the frame's pc.
- if (HAS_HEAP_OBJECT_TAG(bytecode_array) && HAS_SMI_TAG(bytecode_offset)) {
+ if (HAS_STRONG_HEAP_OBJECT_TAG(bytecode_array) &&
+ HAS_SMI_TAG(bytecode_offset)) {
frames[i++] = reinterpret_cast<void*>(
bytecode_array + i::Internals::SmiValue(bytecode_offset));
continue;
@@ -285,10 +294,12 @@ namespace internal {
void TickSample::Init(Isolate* isolate, const v8::RegisterState& state,
RecordCEntryFrame record_c_entry_frame, bool update_stats,
- bool use_simulator_reg_state) {
+ bool use_simulator_reg_state,
+ base::TimeDelta sampling_interval) {
v8::TickSample::Init(reinterpret_cast<v8::Isolate*>(isolate), state,
record_c_entry_frame, update_stats,
use_simulator_reg_state);
+ this->sampling_interval = sampling_interval;
if (pc == nullptr) return;
timestamp = base::TimeTicks::HighResolutionNow();
}
@@ -305,6 +316,8 @@ void TickSample::print() const {
PrintF(" - %s: %p\n",
has_external_callback ? "external_callback_entry" : "tos", tos);
PrintF(" - update_stats: %d\n", update_stats);
+ PrintF(" - sampling_interval: %" PRId64 "\n",
+ sampling_interval.InMicroseconds());
PrintF("\n");
}
diff --git a/deps/v8/src/profiler/tick-sample.h b/deps/v8/src/profiler/tick-sample.h
index ea66010632..ba78c923c4 100644
--- a/deps/v8/src/profiler/tick-sample.h
+++ b/deps/v8/src/profiler/tick-sample.h
@@ -7,7 +7,7 @@
#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -17,8 +17,10 @@ class Isolate;
struct TickSample : public v8::TickSample {
void Init(Isolate* isolate, const v8::RegisterState& state,
RecordCEntryFrame record_c_entry_frame, bool update_stats,
- bool use_simulator_reg_state = true);
+ bool use_simulator_reg_state = true,
+ base::TimeDelta sampling_interval = base::TimeDelta());
base::TimeTicks timestamp;
+ base::TimeDelta sampling_interval; // Sampling interval used to capture.
void print() const;
};
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.cc b/deps/v8/src/profiler/tracing-cpu-profiler.cc
index 0cb502bdf1..26092885a9 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.cc
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.cc
@@ -4,9 +4,9 @@
#include "src/profiler/tracing-cpu-profiler.h"
+#include "src/init/v8.h"
#include "src/profiler/cpu-profiler.h"
#include "src/tracing/trace-event.h"
-#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -53,10 +53,10 @@ void TracingCpuProfilerImpl::StartProfiling() {
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires"), &enabled);
int sampling_interval_us = enabled ? 100 : 1000;
- profiler_.reset(new CpuProfiler(isolate_));
+ profiler_.reset(new CpuProfiler(isolate_, kDebugNaming));
profiler_->set_sampling_interval(
base::TimeDelta::FromMicroseconds(sampling_interval_us));
- profiler_->StartProfiling("", true);
+ profiler_->StartProfiling("", {kLeafNodeLineNumbers});
}
void TracingCpuProfilerImpl::StopProfiling() {
diff --git a/deps/v8/src/protobuf/DEPS b/deps/v8/src/protobuf/DEPS
new file mode 100644
index 0000000000..7aecc6fcb5
--- /dev/null
+++ b/deps/v8/src/protobuf/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+third_party/protobuf/src/google/protobuf"
+]
diff --git a/deps/v8/src/protobuf/OWNERS b/deps/v8/src/protobuf/OWNERS
new file mode 100644
index 0000000000..507f904088
--- /dev/null
+++ b/deps/v8/src/protobuf/OWNERS
@@ -0,0 +1 @@
+petermarshall@chromium.org
diff --git a/deps/v8/src/protobuf/protobuf-compiler-main.cc b/deps/v8/src/protobuf/protobuf-compiler-main.cc
new file mode 100644
index 0000000000..74588e6915
--- /dev/null
+++ b/deps/v8/src/protobuf/protobuf-compiler-main.cc
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/protobuf/src/google/protobuf/compiler/command_line_interface.h"
+#include "third_party/protobuf/src/google/protobuf/compiler/cpp/cpp_generator.h"
+
+namespace v8 {
+namespace internal {
+
+int ProtobufCompilerMain(int argc, char* argv[]) {
+ google::protobuf::compiler::CommandLineInterface cli;
+ cli.AllowPlugins("protoc-");
+
+ // Proto2 C++
+ google::protobuf::compiler::cpp::CppGenerator cpp_generator;
+ cli.RegisterGenerator("--cpp_out", "--cpp_opt", &cpp_generator,
+ "Generate C++ header and source.");
+
+ return cli.Run(argc, argv);
+}
+
+} // namespace internal
+} // namespace v8
+
+int main(int argc, char* argv[]) {
+ return v8::internal::ProtobufCompilerMain(argc, argv);
+}
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index a523ccd3d3..8b462cb03c 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -6,15 +6,15 @@
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
#include "src/heap/factory.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/unicode.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -671,7 +671,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ sub(sp, sp, Operand(num_registers_ * kPointerSize));
+ __ AllocateStackSpace(num_registers_ * kPointerSize);
// Load string end.
__ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Load input start.
@@ -884,8 +884,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
- masm_->CodeObject());
+ Handle<Code> code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
@@ -1057,7 +1058,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState() {
// We need to make room for the return address on the stack.
int stack_alignment = base::OS::ActivationFrameAlignment();
DCHECK(IsAligned(stack_alignment, kPointerSize));
- __ sub(sp, sp, Operand(stack_alignment));
+ __ AllocateStackSpace(stack_alignment);
// r0 will point to the return address, placed by DirectCEntry.
__ mov(r0, sp);
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index c38d8a06d7..9e95f8e1f2 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -5,8 +5,8 @@
#ifndef V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_REGEXP_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
-#include "src/arm/assembler-arm.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/arm/assembler-arm.h"
+#include "src/codegen/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 70521f2603..b299ad0535 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -6,14 +6,14 @@
#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/unicode.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -1068,8 +1068,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
- masm_->CodeObject());
+ Handle<Code> code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
@@ -1182,7 +1183,6 @@ void RegExpMacroAssemblerARM64::ReadCurrentPositionFromRegister(int reg) {
break;
default:
UNREACHABLE();
- break;
}
}
@@ -1514,7 +1514,6 @@ Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
break;
default:
UNREACHABLE();
- break;
}
DCHECK(result.Is32Bits());
return result;
@@ -1548,7 +1547,6 @@ void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
}
default:
UNREACHABLE();
- break;
}
}
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 9c87bf37b6..ef83f9e43c 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -5,8 +5,8 @@
#ifndef V8_REGEXP_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
#define V8_REGEXP_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
-#include "src/arm64/assembler-arm64.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/arm64/assembler-arm64.h"
+#include "src/codegen/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/gen-regexp-special-case.cc b/deps/v8/src/regexp/gen-regexp-special-case.cc
new file mode 100644
index 0000000000..8aace6ab88
--- /dev/null
+++ b/deps/v8/src/regexp/gen-regexp-special-case.cc
@@ -0,0 +1,125 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fstream>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+
+#include "src/base/logging.h"
+#include "unicode/uchar.h"
+#include "unicode/uniset.h"
+
+namespace v8 {
+namespace internal {
+
+// The following code generates BuildSpecialAddSet() and BuildIgnoreSet()
+// functions into "src/regexp/special-case.cc".
+// See more details in http://shorturl.at/adfO5
+void PrintSet(std::ofstream& out, const char* func_name,
+ const icu::UnicodeSet& set) {
+ out << "icu::UnicodeSet " << func_name << "() {\n"
+ << " icu::UnicodeSet set;\n";
+ for (int32_t i = 0; i < set.getRangeCount(); i++) {
+ if (set.getRangeStart(i) == set.getRangeEnd(i)) {
+ out << " set.add(0x" << set.getRangeStart(i) << ");\n";
+ } else {
+ out << " set.add(0x" << set.getRangeStart(i) << ", 0x"
+ << set.getRangeEnd(i) << ");\n";
+ }
+ }
+ out << " set.freeze();\n"
+ << " return set;\n"
+ << "}\n";
+}
+
+void PrintSpecial(std::ofstream& out) {
+ icu::UnicodeSet current;
+ icu::UnicodeSet processed(0xd800, 0xdbff); // Ignore surrogate range.
+ icu::UnicodeSet special_add;
+ icu::UnicodeSet ignore;
+ UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeSet upper("[\\p{Lu}]", status);
+ CHECK(U_SUCCESS(status));
+ // Iterate through all chars in BMP except ASCII and Surrogate.
+ for (UChar32 i = 0x80; i < 0x010000; i++) {
+ // Ignore those characters which is already processed.
+ if (!processed.contains(i)) {
+ current.set(i, i);
+ current.closeOver(USET_CASE_INSENSITIVE);
+
+ // Remember we already processed current.
+ processed.addAll(current);
+
+ // All uppercase characters in current.
+ icu::UnicodeSet keep_upper(current);
+ keep_upper.retainAll(upper);
+
+ // Check if we have more than one uppercase character in current.
+ // If there are more than one uppercase character, then it is a special
+ // set which need to be added into either "Special Add" set or "Ignore"
+ // set.
+ int32_t number_of_upper = 0;
+ for (int32_t i = 0; i < keep_upper.getRangeCount() && i <= 1; i++) {
+ number_of_upper +=
+ keep_upper.getRangeEnd(i) - keep_upper.getRangeStart(i) + 1;
+ }
+ if (number_of_upper > 1) {
+ // Add all non uppercase characters (could be Ll or Mn) to special add
+ // set.
+ current.removeAll(upper);
+ special_add.addAll(current);
+
+ // Add the uppercase characters of non uppercase character to
+ // special add set.
+ CHECK_GT(current.getRangeCount(), 0);
+ UChar32 main_upper = u_toupper(current.getRangeStart(0));
+ special_add.add(main_upper);
+
+ // Add all uppercase except the main upper to ignore set.
+ keep_upper.remove(main_upper);
+ ignore.addAll(keep_upper);
+ }
+ }
+ }
+
+ // Remove any ASCII
+ special_add.remove(0x0000, 0x007f);
+ PrintSet(out, "BuildIgnoreSet", ignore);
+ PrintSet(out, "BuildSpecialAddSet", special_add);
+}
+
+void WriteHeader(const char* header_filename) {
+ std::ofstream out(header_filename);
+ out << std::hex << std::setfill('0') << std::setw(4);
+
+ out << "// Automatically generated by regexp/gen-regexp-special-case.cc\n"
+ << "// The following functions are used to build icu::UnicodeSet\n"
+ << "// for specical cases different between Unicode and ECMA262.\n"
+ << "#ifdef V8_INTL_SUPPORT\n"
+ << "#include \"src/regexp/special-case.h\"\n\n"
+ << "#include \"unicode/uniset.h\"\n"
+ << "namespace v8 {\n"
+ << "namespace internal {\n\n";
+
+ PrintSpecial(out);
+
+ out << "\n"
+ << "} // namespace internal\n"
+ << "} // namespace v8\n"
+ << "#endif // V8_INTL_SUPPORT\n";
+}
+
+} // namespace internal
+} // namespace v8
+
+int main(int argc, const char** argv) {
+ if (argc != 2) {
+ std::cerr << "Usage: " << argv[0] << " <output filename>\n";
+ std::exit(1);
+ }
+ v8::internal::WriteHeader(argv[1]);
+
+ return 0;
+}
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index af9237a264..eb42c23215 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -6,13 +6,13 @@
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
-#include "src/assembler-inl.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/unicode.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -709,7 +709,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ mov(ebx, Operand(ebp, kStartIndex));
// Allocate space on stack for registers.
- __ sub(esp, Immediate(num_registers_ * kSystemPointerSize));
+ __ AllocateStackSpace(num_registers_ * kSystemPointerSize);
// Load string length.
__ mov(esi, Operand(ebp, kInputEnd));
// Load input position.
@@ -729,18 +729,6 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// position registers.
__ mov(Operand(ebp, kStringStartMinusOne), eax);
-#if V8_OS_WIN
- // Ensure that we write to each stack page, in order. Skipping a page
- // on Windows can cause segmentation faults. Assuming page size is 4k.
- const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kSystemPointerSize;
- for (int i = num_saved_registers_ + kRegistersPerPage - 1;
- i < num_registers_;
- i += kRegistersPerPage) {
- __ mov(register_location(i), eax); // One write every page.
- }
-#endif // V8_OS_WIN
-
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
__ cmp(Operand(ebp, kStartIndex), Immediate(0));
@@ -941,8 +929,9 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(masm_->isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
- masm_->CodeObject());
+ Handle<Code> code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index eb6b0335a8..914552cc93 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -5,8 +5,8 @@
#ifndef V8_REGEXP_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#define V8_REGEXP_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
-#include "src/ia32/assembler-ia32.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/ia32/assembler-ia32.h"
+#include "src/codegen/macro-assembler.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index 55b862dc56..04bb63ee7a 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -7,12 +7,12 @@
#include "src/regexp/interpreter-irregexp.h"
#include "src/ast/ast.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/bytecodes-irregexp.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
-#include "src/unicode.h"
-#include "src/utils.h"
+#include "src/strings/unicode.h"
+#include "src/utils/utils.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uchar.h"
@@ -176,7 +176,7 @@ IrregexpInterpreter::Result HandleInterrupts(Isolate* isolate,
result = isolate->stack_guard()->HandleInterrupts();
}
- if (result->IsException(isolate)) {
+ if (result.IsException(isolate)) {
return IrregexpInterpreter::EXCEPTION;
}
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
index 926fec2868..b542add17b 100644
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ b/deps/v8/src/regexp/jsregexp-inl.h
@@ -6,10 +6,10 @@
#ifndef V8_REGEXP_JSREGEXP_INL_H_
#define V8_REGEXP_JSREGEXP_INL_H_
-#include "src/allocation.h"
-#include "src/objects.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/objects.h"
#include "src/regexp/jsregexp.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 7995505226..a6f3a5ebcb 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -8,15 +8,14 @@
#include <vector>
#include "src/base/platform/platform.h"
-#include "src/code-tracer.h"
-#include "src/compilation-cache.h"
-#include "src/elements.h"
-#include "src/execution.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/diagnostics/code-tracer.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate-inl.h"
-#include "src/message-template.h"
-#include "src/ostreams.h"
+#include "src/objects/elements.h"
#include "src/regexp/interpreter-irregexp.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/regexp-macro-assembler-irregexp.h"
@@ -24,14 +23,19 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-parser.h"
#include "src/regexp/regexp-stack.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/regexp/special-case.h"
+#endif // V8_INTL_SUPPORT
#include "src/runtime/runtime.h"
-#include "src/splay-tree-inl.h"
-#include "src/string-search.h"
-#include "src/unicode-decoder.h"
-#include "src/unicode-inl.h"
+#include "src/strings/string-search.h"
+#include "src/strings/unicode-decoder.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/splay-tree-inl.h"
#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "unicode/locid.h"
#include "unicode/uniset.h"
#include "unicode/utypes.h"
#endif // V8_INTL_SUPPORT
@@ -176,7 +180,7 @@ MaybeHandle<Object> RegExpImpl::Compile(Isolate* isolate, Handle<JSRegExp> re,
if (!has_been_compiled) {
IrregexpInitialize(isolate, re, pattern, flags, parse_result.capture_count);
}
- DCHECK(re->data()->IsFixedArray());
+ DCHECK(re->data().IsFixedArray());
// Compilation succeeded so the data is set on the regexp
// and we can store it in the cache.
Handle<FixedArray> data(FixedArray::cast(re->data()), isolate);
@@ -230,8 +234,8 @@ int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
DisallowHeapAllocation no_gc; // ensure vectors stay valid
String needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
- int needle_len = needle->length();
- DCHECK(needle->IsFlat());
+ int needle_len = needle.length();
+ DCHECK(needle.IsFlat());
DCHECK_LT(0, needle_len);
if (index + needle_len > subject->length()) {
@@ -239,7 +243,7 @@ int RegExpImpl::AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
}
for (int i = 0; i < output_size; i += 2) {
- String::FlatContent needle_content = needle->GetFlatContent(no_gc);
+ String::FlatContent needle_content = needle.GetFlatContent(no_gc);
String::FlatContent subject_content = subject->GetFlatContent(no_gc);
DCHECK(needle_content.IsFlat());
DCHECK(subject_content.IsFlat());
@@ -300,8 +304,8 @@ bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
bool is_one_byte) {
Object compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
if (compiled_code != Smi::FromInt(JSRegExp::kUninitializedValue)) {
- DCHECK(FLAG_regexp_interpret_all ? compiled_code->IsByteArray()
- : compiled_code->IsCode());
+ DCHECK(FLAG_regexp_interpret_all ? compiled_code.IsByteArray()
+ : compiled_code.IsCode());
return true;
}
return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
@@ -317,7 +321,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Object entry = re->DataAt(JSRegExp::code_index(is_one_byte));
// When arriving here entry can only be a smi representing an uncompiled
// regexp.
- DCHECK(entry->IsSmi());
+ DCHECK(entry.IsSmi());
int entry_value = Smi::ToInt(entry);
DCHECK_EQ(JSRegExp::kUninitializedValue, entry_value);
#endif
@@ -340,7 +344,7 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
sample_subject, is_one_byte);
if (result.error_message != nullptr) {
// Unable to compile regexp.
- if (FLAG_abort_on_stack_or_string_length_overflow &&
+ if (FLAG_correctness_fuzzer_suppressions &&
strncmp(result.error_message, "Stack overflow", 15) == 0) {
FATAL("Aborting on stack overflow");
}
@@ -363,37 +367,36 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
}
int RegExpImpl::IrregexpMaxRegisterCount(FixedArray re) {
- return Smi::cast(
- re->get(JSRegExp::kIrregexpMaxRegisterCountIndex))->value();
+ return Smi::cast(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex)).value();
}
void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray re, int value) {
- re->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
+ re.set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(value));
}
void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray re,
Handle<FixedArray> value) {
if (value.is_null()) {
- re->set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero);
+ re.set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero);
} else {
- re->set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
+ re.set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
}
}
int RegExpImpl::IrregexpNumberOfCaptures(FixedArray re) {
- return Smi::ToInt(re->get(JSRegExp::kIrregexpCaptureCountIndex));
+ return Smi::ToInt(re.get(JSRegExp::kIrregexpCaptureCountIndex));
}
int RegExpImpl::IrregexpNumberOfRegisters(FixedArray re) {
- return Smi::ToInt(re->get(JSRegExp::kIrregexpMaxRegisterCountIndex));
+ return Smi::ToInt(re.get(JSRegExp::kIrregexpMaxRegisterCountIndex));
}
ByteArray RegExpImpl::IrregexpByteCode(FixedArray re, bool is_one_byte) {
- return ByteArray::cast(re->get(JSRegExp::code_index(is_one_byte)));
+ return ByteArray::cast(re.get(JSRegExp::code_index(is_one_byte)));
}
Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
- return Code::cast(re->get(JSRegExp::code_index(is_one_byte)));
+ return Code::cast(re.get(JSRegExp::code_index(is_one_byte)));
}
void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
@@ -525,7 +528,7 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
#ifdef DEBUG
if (FLAG_regexp_interpret_all && FLAG_trace_regexp_bytecodes) {
String pattern = regexp->Pattern();
- PrintF("\n\nRegexp match: /%s/\n\n", pattern->ToCString().get());
+ PrintF("\n\nRegexp match: /%s/\n\n", pattern.ToCString().get());
PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
}
#endif
@@ -1725,7 +1728,6 @@ static inline bool EmitAtomLetter(Isolate* isolate,
break;
default:
UNREACHABLE();
- break;
}
return true;
}
@@ -2754,7 +2756,7 @@ RegExpNode* TextNode::FilterOneByte(int depth) {
}
if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
// Replace quark in case we converted to Latin-1.
- uint16_t* writable_quarks = const_cast<uint16_t*>(quarks.start());
+ uint16_t* writable_quarks = const_cast<uint16_t*>(quarks.begin());
writable_quarks[j] = c;
}
} else {
@@ -5929,6 +5931,39 @@ Vector<const int> CharacterRange::GetWordBounds() {
return Vector<const int>(kWordRanges, kWordRangeCount - 1);
}
+#ifdef V8_INTL_SUPPORT
+struct IgnoreSet {
+ IgnoreSet() : set(BuildIgnoreSet()) {}
+ const icu::UnicodeSet set;
+};
+
+struct SpecialAddSet {
+ SpecialAddSet() : set(BuildSpecialAddSet()) {}
+ const icu::UnicodeSet set;
+};
+
+icu::UnicodeSet BuildAsciiAToZSet() {
+ icu::UnicodeSet set('a', 'z');
+ set.add('A', 'Z');
+ set.freeze();
+ return set;
+}
+
+struct AsciiAToZSet {
+ AsciiAToZSet() : set(BuildAsciiAToZSet()) {}
+ const icu::UnicodeSet set;
+};
+
+static base::LazyInstance<IgnoreSet>::type ignore_set =
+ LAZY_INSTANCE_INITIALIZER;
+
+static base::LazyInstance<SpecialAddSet>::type special_add_set =
+ LAZY_INSTANCE_INITIALIZER;
+
+static base::LazyInstance<AsciiAToZSet>::type ascii_a_to_z_set =
+ LAZY_INSTANCE_INITIALIZER;
+#endif // V8_INTL_SUPPORT
+
// static
void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
ZoneList<CharacterRange>* ranges,
@@ -5936,49 +5971,100 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
CharacterRange::Canonicalize(ranges);
int range_count = ranges->length();
#ifdef V8_INTL_SUPPORT
- icu::UnicodeSet already_added;
icu::UnicodeSet others;
for (int i = 0; i < range_count; i++) {
CharacterRange range = ranges->at(i);
- uc32 bottom = range.from();
- if (bottom > String::kMaxUtf16CodeUnit) continue;
- uc32 top = Min(range.to(), String::kMaxUtf16CodeUnit);
+ uc32 from = range.from();
+ if (from > String::kMaxUtf16CodeUnit) continue;
+ uc32 to = Min(range.to(), String::kMaxUtf16CodeUnit);
// Nothing to be done for surrogates.
- if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
+ if (from >= kLeadSurrogateStart && to <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (bottom > String::kMaxOneByteCharCode) continue;
- if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
+ if (from > String::kMaxOneByteCharCode) continue;
+ if (to > String::kMaxOneByteCharCode) to = String::kMaxOneByteCharCode;
}
- already_added.add(bottom, top);
- while (bottom <= top) {
- icu::UnicodeString upper(bottom);
- upper.toUpper();
- icu::UnicodeSet expanded(bottom, bottom);
- expanded.closeOver(USET_CASE_INSENSITIVE);
- for (int32_t i = 0; i < expanded.getRangeCount(); i++) {
- UChar32 start = expanded.getRangeStart(i);
- UChar32 end = expanded.getRangeEnd(i);
- while (start <= end) {
- icu::UnicodeString upper2(start);
- upper2.toUpper();
- // Only add if the upper case are the same.
- if (upper[0] == upper2[0]) {
- others.add(start);
+ others.add(from, to);
+ }
+
+ // Set of characters already added to ranges that do not need to be added
+ // again.
+ icu::UnicodeSet already_added(others);
+
+ // Set of characters in ranges that are in the 52 ASCII characters [a-zA-Z].
+ icu::UnicodeSet in_ascii_a_to_z(others);
+ in_ascii_a_to_z.retainAll(ascii_a_to_z_set.Pointer()->set);
+
+ // Remove all chars in [a-zA-Z] from others.
+ others.removeAll(in_ascii_a_to_z);
+
+ // Set of characters in ranges that are overlapping with special add set.
+ icu::UnicodeSet in_special_add(others);
+ in_special_add.retainAll(special_add_set.Pointer()->set);
+
+ others.removeAll(in_special_add);
+
+ // Ignore all chars in ignore set.
+ others.removeAll(ignore_set.Pointer()->set);
+
+ // For most of the chars in ranges that is still in others, find the case
+ // equivlant set by calling closeOver(USET_CASE_INSENSITIVE).
+ others.closeOver(USET_CASE_INSENSITIVE);
+
+ // Because closeOver(USET_CASE_INSENSITIVE) may add ASCII [a-zA-Z] to others,
+ // but ECMA262 "i" mode won't consider that, remove them from others.
+ // Ex: U+017F add 'S' and 's' to others.
+ others.removeAll(ascii_a_to_z_set.Pointer()->set);
+
+ // Special handling for in_ascii_a_to_z.
+ for (int32_t i = 0; i < in_ascii_a_to_z.getRangeCount(); i++) {
+ UChar32 start = in_ascii_a_to_z.getRangeStart(i);
+ UChar32 end = in_ascii_a_to_z.getRangeEnd(i);
+ // Check if it is uppercase A-Z by checking bit 6.
+ if (start & 0x0020) {
+ // Add the lowercases
+ others.add(start & 0x005F, end & 0x005F);
+ } else {
+ // Add the uppercases
+ others.add(start | 0x0020, end | 0x0020);
+ }
+ }
+
+ // Special handling for chars in "Special Add" set.
+ for (int32_t i = 0; i < in_special_add.getRangeCount(); i++) {
+ UChar32 end = in_special_add.getRangeEnd(i);
+ for (UChar32 ch = in_special_add.getRangeStart(i); ch <= end; ch++) {
+ // Add the uppercase of this character if itself is not an uppercase
+ // character.
+ // Note: The if condiction cannot be u_islower(ch) because ch could be
+ // neither uppercase nor lowercase but Mn.
+ if (!u_isupper(ch)) {
+ others.add(u_toupper(ch));
+ }
+ icu::UnicodeSet candidates(ch, ch);
+ candidates.closeOver(USET_CASE_INSENSITIVE);
+ for (int32_t j = 0; j < candidates.getRangeCount(); j++) {
+ UChar32 end2 = candidates.getRangeEnd(j);
+ for (UChar32 ch2 = candidates.getRangeStart(j); ch2 <= end2; ch2++) {
+ // Add character that is not uppercase to others.
+ if (!u_isupper(ch2)) {
+ others.add(ch2);
}
- start++;
}
}
- bottom++;
}
}
+
+ // Remove all characters which already in the ranges.
others.removeAll(already_added);
+
+ // Add others to the ranges
for (int32_t i = 0; i < others.getRangeCount(); i++) {
- UChar32 start = others.getRangeStart(i);
- UChar32 end = others.getRangeEnd(i);
- if (start == end) {
- ranges->Add(CharacterRange::Singleton(start), zone);
+ UChar32 from = others.getRangeStart(i);
+ UChar32 to = others.getRangeEnd(i);
+ if (from == to) {
+ ranges->Add(CharacterRange::Singleton(from), zone);
} else {
- ranges->Add(CharacterRange::Range(start, end), zone);
+ ranges->Add(CharacterRange::Range(from, to), zone);
}
}
#else
@@ -6872,32 +6958,32 @@ Object RegExpResultsCache::Lookup(Heap* heap, String key_string,
FixedArray* last_match_cache,
ResultsCacheType type) {
FixedArray cache;
- if (!key_string->IsInternalizedString()) return Smi::kZero;
+ if (!key_string.IsInternalizedString()) return Smi::kZero;
if (type == STRING_SPLIT_SUBSTRINGS) {
- DCHECK(key_pattern->IsString());
- if (!key_pattern->IsInternalizedString()) return Smi::kZero;
+ DCHECK(key_pattern.IsString());
+ if (!key_pattern.IsInternalizedString()) return Smi::kZero;
cache = heap->string_split_cache();
} else {
DCHECK(type == REGEXP_MULTIPLE_INDICES);
- DCHECK(key_pattern->IsFixedArray());
+ DCHECK(key_pattern.IsFixedArray());
cache = heap->regexp_multiple_cache();
}
- uint32_t hash = key_string->Hash();
+ uint32_t hash = key_string.Hash();
uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) != key_string ||
- cache->get(index + kPatternOffset) != key_pattern) {
+ if (cache.get(index + kStringOffset) != key_string ||
+ cache.get(index + kPatternOffset) != key_pattern) {
index =
((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index + kStringOffset) != key_string ||
- cache->get(index + kPatternOffset) != key_pattern) {
+ if (cache.get(index + kStringOffset) != key_string ||
+ cache.get(index + kPatternOffset) != key_pattern) {
return Smi::kZero;
}
}
- *last_match_cache = FixedArray::cast(cache->get(index + kLastMatchOffset));
- return cache->get(index + kArrayOffset);
+ *last_match_cache = FixedArray::cast(cache.get(index + kLastMatchOffset));
+ return cache.get(index + kArrayOffset);
}
void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
@@ -6961,7 +7047,7 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
void RegExpResultsCache::Clear(FixedArray cache) {
for (int i = 0; i < kRegExpResultsCacheSize; i++) {
- cache->set(i, Smi::kZero);
+ cache.set(i, Smi::kZero);
}
}
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 0a0b5c10d6..832c7e3aa5 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -5,11 +5,11 @@
#ifndef V8_REGEXP_JSREGEXP_H_
#define V8_REGEXP_JSREGEXP_H_
-#include "src/allocation.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "src/objects/js-regexp.h"
#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp-macro-assembler.h"
+#include "src/utils/allocation.h"
#include "src/zone/zone-splay-tree.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/mips/OWNERS b/deps/v8/src/regexp/mips/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/regexp/mips/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 35303ff1d3..e8104ced7e 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -6,14 +6,14 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
-#include "src/assembler-inl.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/unicode.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -899,8 +899,9 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
- masm_->CodeObject());
+ Handle<Code> code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
LOG(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 51004ecc97..b785910466 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -5,8 +5,8 @@
#ifndef V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
#define V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
-#include "src/macro-assembler.h"
-#include "src/mips/assembler-mips.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips/assembler-mips.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/mips64/OWNERS b/deps/v8/src/regexp/mips64/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/regexp/mips64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 0d1b591005..239cc87ae8 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -6,14 +6,14 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
-#include "src/assembler-inl.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/unicode.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -936,8 +936,9 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
- masm_->CodeObject());
+ Handle<Code> code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
LOG(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 179f4844d3..d24735d08e 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -5,8 +5,8 @@
#ifndef V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
#define V8_REGEXP_MIPS64_REGEXP_MACRO_ASSEMBLER_MIPS64_H_
-#include "src/macro-assembler.h"
-#include "src/mips64/assembler-mips64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips64/assembler-mips64.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index a646aa17ef..bce612e66f 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -6,14 +6,14 @@
#include "src/regexp/ppc/regexp-macro-assembler-ppc.h"
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/log.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/unicode.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -756,7 +756,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
if (num_saved_registers_ > 8) {
// One slot beyond address of register 0.
__ addi(r4, frame_pointer(), Operand(kRegisterZero + kPointerSize));
- __ li(r5, Operand(num_saved_registers_));
+ __ mov(r5, Operand(num_saved_registers_));
__ mtctr(r5);
Label init_loop;
__ bind(&init_loop);
@@ -938,8 +938,9 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
- masm_->CodeObject());
+ Handle<Code> code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 4e69daa1e5..418a01a9a4 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -5,8 +5,8 @@
#ifndef V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
#define V8_REGEXP_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
-#include "src/macro-assembler.h"
-#include "src/ppc/assembler-ppc.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/ppc/assembler-ppc.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/property-sequences.h b/deps/v8/src/regexp/property-sequences.h
index 1d9fce3571..10c79e82b1 100644
--- a/deps/v8/src/regexp/property-sequences.h
+++ b/deps/v8/src/regexp/property-sequences.h
@@ -7,7 +7,7 @@
#ifdef V8_INTL_SUPPORT
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-ast.cc b/deps/v8/src/regexp/regexp-ast.cc
index 782c9c9037..561d11eef5 100644
--- a/deps/v8/src/regexp/regexp-ast.cc
+++ b/deps/v8/src/regexp/regexp-ast.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ostreams.h"
#include "src/regexp/regexp-ast.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 7c21a02590..1fa9f7a35b 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -5,10 +5,10 @@
#ifndef V8_REGEXP_REGEXP_AST_H_
#define V8_REGEXP_REGEXP_AST_H_
-#include "src/objects.h"
#include "src/objects/js-regexp.h"
+#include "src/objects/objects.h"
#include "src/objects/string.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
index 2a4d6e5e2f..cda48aa00b 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
@@ -20,7 +20,7 @@ void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
if (pc_ + 3 >= buffer_.length()) {
Expand();
}
- *reinterpret_cast<uint32_t*>(buffer_.start() + pc_) = word;
+ *reinterpret_cast<uint32_t*>(buffer_.begin() + pc_) = word;
pc_ += 4;
}
@@ -30,7 +30,7 @@ void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) {
if (pc_ + 1 >= buffer_.length()) {
Expand();
}
- *reinterpret_cast<uint16_t*>(buffer_.start() + pc_) = word;
+ *reinterpret_cast<uint16_t*>(buffer_.begin() + pc_) = word;
pc_ += 2;
}
@@ -40,7 +40,7 @@ void RegExpMacroAssemblerIrregexp::Emit8(uint32_t word) {
if (pc_ == buffer_.length()) {
Expand();
}
- *reinterpret_cast<unsigned char*>(buffer_.start() + pc_) = word;
+ *reinterpret_cast<unsigned char*>(buffer_.begin() + pc_) = word;
pc_ += 1;
}
@@ -50,7 +50,7 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
if (pc_ + 3 >= buffer_.length()) {
Expand();
}
- *reinterpret_cast<uint32_t*>(buffer_.start() + pc_) = word;
+ *reinterpret_cast<uint32_t*>(buffer_.begin() + pc_) = word;
pc_ += 4;
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
index eeffb7d262..712f00e509 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -5,7 +5,7 @@
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/ast/ast.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/bytecodes-irregexp.h"
#include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -41,8 +41,8 @@ void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
int pos = l->pos();
while (pos != 0) {
int fixup = pos;
- pos = *reinterpret_cast<int32_t*>(buffer_.start() + fixup);
- *reinterpret_cast<uint32_t*>(buffer_.start() + fixup) = pc_;
+ pos = *reinterpret_cast<int32_t*>(buffer_.begin() + fixup);
+ *reinterpret_cast<uint32_t*>(buffer_.begin() + fixup) = pc_;
}
}
l->bind_to(pc_);
@@ -436,7 +436,7 @@ int RegExpMacroAssemblerIrregexp::length() {
}
void RegExpMacroAssemblerIrregexp::Copy(byte* a) {
- MemCopy(a, buffer_.start(), length());
+ MemCopy(a, buffer_.begin(), length());
}
@@ -445,7 +445,7 @@ void RegExpMacroAssemblerIrregexp::Expand() {
Vector<byte> old_buffer = buffer_;
buffer_ = Vector<byte>::New(old_buffer.length() * 2);
own_buffer_ = true;
- MemCopy(buffer_.start(), old_buffer.start(), old_buffer.length());
+ MemCopy(buffer_.begin(), old_buffer.begin(), old_buffer.length());
if (old_buffer_was_our_own) {
old_buffer.Dispose();
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index 945c6927b5..db9c5af569 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -5,7 +5,7 @@
#include "src/regexp/regexp-macro-assembler-tracer.h"
#include "src/ast/ast.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index a75c45d24e..cfe827ef4e 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -4,11 +4,11 @@
#include "src/regexp/regexp-macro-assembler.h"
-#include "src/assembler.h"
-#include "src/isolate-inl.h"
+#include "src/codegen/assembler.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/simulator.h"
#include "src/regexp/regexp-stack.h"
-#include "src/simulator.h"
-#include "src/unicode-inl.h"
+#include "src/strings/unicode-inl.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uchar.h"
@@ -102,30 +102,30 @@ bool NativeRegExpMacroAssembler::CanReadUnaligned() {
const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
String subject, int start_index, const DisallowHeapAllocation& no_gc) {
- if (subject->IsConsString()) {
- subject = ConsString::cast(subject)->first();
- } else if (subject->IsSlicedString()) {
- start_index += SlicedString::cast(subject)->offset();
- subject = SlicedString::cast(subject)->parent();
+ if (subject.IsConsString()) {
+ subject = ConsString::cast(subject).first();
+ } else if (subject.IsSlicedString()) {
+ start_index += SlicedString::cast(subject).offset();
+ subject = SlicedString::cast(subject).parent();
}
- if (subject->IsThinString()) {
- subject = ThinString::cast(subject)->actual();
+ if (subject.IsThinString()) {
+ subject = ThinString::cast(subject).actual();
}
DCHECK_LE(0, start_index);
- DCHECK_LE(start_index, subject->length());
- if (subject->IsSeqOneByteString()) {
+ DCHECK_LE(start_index, subject.length());
+ if (subject.IsSeqOneByteString()) {
return reinterpret_cast<const byte*>(
- SeqOneByteString::cast(subject)->GetChars(no_gc) + start_index);
- } else if (subject->IsSeqTwoByteString()) {
+ SeqOneByteString::cast(subject).GetChars(no_gc) + start_index);
+ } else if (subject.IsSeqTwoByteString()) {
return reinterpret_cast<const byte*>(
- SeqTwoByteString::cast(subject)->GetChars(no_gc) + start_index);
- } else if (subject->IsExternalOneByteString()) {
+ SeqTwoByteString::cast(subject).GetChars(no_gc) + start_index);
+ } else if (subject.IsExternalOneByteString()) {
return reinterpret_cast<const byte*>(
- ExternalOneByteString::cast(subject)->GetChars() + start_index);
+ ExternalOneByteString::cast(subject).GetChars() + start_index);
} else {
- DCHECK(subject->IsExternalTwoByteString());
+ DCHECK(subject.IsExternalTwoByteString());
return reinterpret_cast<const byte*>(
- ExternalTwoByteString::cast(subject)->GetChars() + start_index);
+ ExternalTwoByteString::cast(subject).GetChars() + start_index);
}
}
@@ -134,8 +134,8 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
Address* return_address, Code re_code, Address* subject,
const byte** input_start, const byte** input_end) {
AllowHeapAllocation allow_allocation;
- DCHECK(re_code->raw_instruction_start() <= *return_address);
- DCHECK(*return_address <= re_code->raw_instruction_end());
+ DCHECK(re_code.raw_instruction_start() <= *return_address);
+ DCHECK(*return_address <= re_code.raw_instruction_end());
int return_value = 0;
// Prepare for possible GC.
HandleScope handles(isolate);
@@ -158,13 +158,13 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
return_value = EXCEPTION;
} else {
Object result = isolate->stack_guard()->HandleInterrupts();
- if (result->IsException(isolate)) return_value = EXCEPTION;
+ if (result.IsException(isolate)) return_value = EXCEPTION;
}
DisallowHeapAllocation no_gc;
if (*code_handle != re_code) { // Return address no longer valid
- intptr_t delta = code_handle->address() - re_code->address();
+ intptr_t delta = code_handle->address() - re_code.address();
// Overwrite the return address on the stack.
*return_address += delta;
}
@@ -206,25 +206,25 @@ int NativeRegExpMacroAssembler::Match(Handle<Code> regexp_code,
String subject_ptr = *subject;
// Character offsets into string.
int start_offset = previous_index;
- int char_length = subject_ptr->length() - start_offset;
+ int char_length = subject_ptr.length() - start_offset;
int slice_offset = 0;
// The string has been flattened, so if it is a cons string it contains the
// full string in the first part.
if (StringShape(subject_ptr).IsCons()) {
- DCHECK_EQ(0, ConsString::cast(subject_ptr)->second()->length());
- subject_ptr = ConsString::cast(subject_ptr)->first();
+ DCHECK_EQ(0, ConsString::cast(subject_ptr).second().length());
+ subject_ptr = ConsString::cast(subject_ptr).first();
} else if (StringShape(subject_ptr).IsSliced()) {
SlicedString slice = SlicedString::cast(subject_ptr);
- subject_ptr = slice->parent();
- slice_offset = slice->offset();
+ subject_ptr = slice.parent();
+ slice_offset = slice.offset();
}
if (StringShape(subject_ptr).IsThin()) {
- subject_ptr = ThinString::cast(subject_ptr)->actual();
+ subject_ptr = ThinString::cast(subject_ptr).actual();
}
// Ensure that an underlying string has the same representation.
- bool is_one_byte = subject_ptr->IsOneByteRepresentation();
- DCHECK(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
+ bool is_one_byte = subject_ptr.IsOneByteRepresentation();
+ DCHECK(subject_ptr.IsExternalString() || subject_ptr.IsSeqString());
// String is now either Sequential or External
int char_size_shift = is_one_byte ? 0 : 1;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index e28ac1ef22..8626d1a19e 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -5,7 +5,7 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
-#include "src/label.h"
+#include "src/codegen/label.h"
#include "src/regexp/regexp-ast.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 97be9fa27b..7cae456f56 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -6,14 +6,14 @@
#include <vector>
-#include "src/char-predicates-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/property-sequences.h"
-#include "src/utils.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/utils.h"
#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
@@ -77,7 +77,7 @@ void RegExpParser::Advance() {
if (has_next()) {
StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
- if (FLAG_abort_on_stack_or_string_length_overflow) {
+ if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on stack overflow");
}
ReportError(CStrVector(
@@ -995,7 +995,7 @@ Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
capture->name()->size());
// CSA code in ConstructNewResultFromMatchInfo requires these strings to be
// internalized so they can be used as property names in the 'exec' results.
- Handle<String> name = factory->InternalizeTwoByteString(capture_name);
+ Handle<String> name = factory->InternalizeString(capture_name);
array->set(i * 2, *name);
array->set(i * 2 + 1, Smi::FromInt(capture->index()));
}
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index d6db037de0..bf9e62ed71 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -5,8 +5,8 @@
#ifndef V8_REGEXP_REGEXP_PARSER_H_
#define V8_REGEXP_REGEXP_PARSER_H_
-#include "src/objects.h"
#include "src/objects/js-regexp.h"
+#include "src/objects/objects.h"
#include "src/regexp/regexp-ast.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/regexp/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index b87fbc399a..3885fd8e8d 100644
--- a/deps/v8/src/regexp/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -4,7 +4,8 @@
#include "src/regexp/regexp-stack.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
+#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-stack.h b/deps/v8/src/regexp/regexp-stack.h
index 37cecd355b..b1d4571760 100644
--- a/deps/v8/src/regexp/regexp-stack.h
+++ b/deps/v8/src/regexp/regexp-stack.h
@@ -7,7 +7,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index e2e95493fe..49f9d4476b 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -4,10 +4,10 @@
#include "src/regexp/regexp-utils.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/jsregexp.h"
namespace v8 {
@@ -37,7 +37,7 @@ Handle<String> RegExpUtils::GenericCaptureGetter(
namespace {
V8_INLINE bool HasInitialRegExpMap(Isolate* isolate, JSReceiver recv) {
- return recv->map() == isolate->regexp_function()->initial_map();
+ return recv.map() == isolate->regexp_function()->initial_map();
}
} // namespace
@@ -48,7 +48,7 @@ MaybeHandle<Object> RegExpUtils::SetLastIndex(Isolate* isolate,
Handle<Object> value_as_object =
isolate->factory()->NewNumberFromInt64(value);
if (HasInitialRegExpMap(isolate, *recv)) {
- JSRegExp::cast(*recv)->set_last_index(*value_as_object, SKIP_WRITE_BARRIER);
+ JSRegExp::cast(*recv).set_last_index(*value_as_object, SKIP_WRITE_BARRIER);
return recv;
} else {
return Object::SetProperty(
@@ -60,7 +60,7 @@ MaybeHandle<Object> RegExpUtils::SetLastIndex(Isolate* isolate,
MaybeHandle<Object> RegExpUtils::GetLastIndex(Isolate* isolate,
Handle<JSReceiver> recv) {
if (HasInitialRegExpMap(isolate, *recv)) {
- return handle(JSRegExp::cast(*recv)->last_index(), isolate);
+ return handle(JSRegExp::cast(*recv).last_index(), isolate);
} else {
return Object::GetProperty(isolate, recv,
isolate->factory()->lastIndex_string());
@@ -89,7 +89,7 @@ MaybeHandle<Object> RegExpUtils::RegExpExec(Isolate* isolate,
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
- Execution::Call(isolate, exec, regexp, argc, argv.start()), Object);
+ Execution::Call(isolate, exec, regexp, argc, argv.begin()), Object);
if (!result->IsJSReceiver() && !result->IsNull(isolate)) {
THROW_NEW_ERROR(isolate,
@@ -115,7 +115,7 @@ MaybeHandle<Object> RegExpUtils::RegExpExec(Isolate* isolate,
ScopedVector<Handle<Object>> argv(argc);
argv[0] = string;
- return Execution::Call(isolate, regexp_exec, regexp, argc, argv.start());
+ return Execution::Call(isolate, regexp_exec, regexp, argc, argv.begin());
}
}
@@ -158,35 +158,33 @@ bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
if (!HasInitialRegExpMap(isolate, recv)) return false;
// Check the receiver's prototype's map.
- Object proto = recv->map()->prototype();
- if (!proto->IsJSReceiver()) return false;
+ Object proto = recv.map().prototype();
+ if (!proto.IsJSReceiver()) return false;
Handle<Map> initial_proto_initial_map = isolate->regexp_prototype_map();
- Map proto_map = JSReceiver::cast(proto)->map();
+ Map proto_map = JSReceiver::cast(proto).map();
if (proto_map != *initial_proto_initial_map) {
return false;
}
// Check that the "exec" method is unmodified.
- if (FLAG_track_constant_fields) {
- // Check that the index refers to "exec" method (this has to be consistent
- // with the init order in the bootstrapper).
- DCHECK_EQ(*(isolate->factory()->exec_string()),
- proto_map->instance_descriptors()->GetKey(
- JSRegExp::kExecFunctionDescriptorIndex));
- if (proto_map->instance_descriptors()
- ->GetDetails(JSRegExp::kExecFunctionDescriptorIndex)
- .constness() != PropertyConstness::kConst) {
- return false;
- }
+ // Check that the index refers to "exec" method (this has to be consistent
+ // with the init order in the bootstrapper).
+ DCHECK_EQ(*(isolate->factory()->exec_string()),
+ proto_map.instance_descriptors().GetKey(
+ JSRegExp::kExecFunctionDescriptorIndex));
+ if (proto_map.instance_descriptors()
+ .GetDetails(JSRegExp::kExecFunctionDescriptorIndex)
+ .constness() != PropertyConstness::kConst) {
+ return false;
}
if (!isolate->IsRegExpSpeciesLookupChainIntact()) return false;
// The smi check is required to omit ToLength(lastIndex) calls with possible
// user-code execution on the fast path.
- Object last_index = JSRegExp::cast(recv)->last_index();
- return last_index->IsSmi() && Smi::ToInt(last_index) >= 0;
+ Object last_index = JSRegExp::cast(recv).last_index();
+ return last_index.IsSmi() && Smi::ToInt(last_index) >= 0;
}
uint64_t RegExpUtils::AdvanceStringIndex(Handle<String> string, uint64_t index,
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
index 4e32bf10f4..4b8714c55f 100644
--- a/deps/v8/src/regexp/regexp-utils.h
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -5,7 +5,7 @@
#ifndef V8_REGEXP_REGEXP_UTILS_H_
#define V8_REGEXP_REGEXP_UTILS_H_
-#include "src/objects.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/s390/OWNERS b/deps/v8/src/regexp/s390/OWNERS
deleted file mode 100644
index 6d1a8fc472..0000000000
--- a/deps/v8/src/regexp/s390/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-jyan@ca.ibm.com
-joransiu@ca.ibm.com
-michael_dawson@ca.ibm.com
-miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index e73caee402..5ebdd6ce15 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#if V8_TARGET_ARCH_S390
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/log.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/snapshot/embedded-data.h"
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
-#include "src/unicode.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -731,7 +731,7 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
if (num_saved_registers_ > 8) {
// One slot beyond address of register 0.
__ lay(r3, MemOperand(frame_pointer(), kRegisterZero + kPointerSize));
- __ LoadImmP(r4, Operand(num_saved_registers_));
+ __ Load(r4, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
__ StoreP(r1, MemOperand(r3, -kPointerSize));
@@ -930,8 +930,9 @@ Handle<HeapObject> RegExpMacroAssemblerS390::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(isolate(), &code_desc);
- Handle<Code> code = isolate()->factory()->NewCode(code_desc, Code::REGEXP,
- masm_->CodeObject());
+ Handle<Code> code = Factory::CodeBuilder(isolate(), code_desc, Code::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
PROFILE(masm_->isolate(),
RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index a7de245a10..636ba76079 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -5,9 +5,9 @@
#ifndef V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
#define V8_REGEXP_S390_REGEXP_MACRO_ASSEMBLER_S390_H_
-#include "src/macro-assembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/s390/assembler-s390.h"
#include "src/regexp/regexp-macro-assembler.h"
-#include "src/s390/assembler-s390.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/special-case.h b/deps/v8/src/regexp/special-case.h
new file mode 100644
index 0000000000..1ccec5d31a
--- /dev/null
+++ b/deps/v8/src/regexp/special-case.h
@@ -0,0 +1,79 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_SPECIAL_CASE_H_
+#define V8_REGEXP_SPECIAL_CASE_H_
+
+#ifdef V8_INTL_SUPPORT
+#include "unicode/uversion.h"
+namespace U_ICU_NAMESPACE {
+class UnicodeSet;
+} // namespace U_ICU_NAMESPACE
+
+namespace v8 {
+namespace internal {
+
+// Functions to build special sets of Unicode characters that need special
+// handling under "i" mode that cannot use closeOver(USET_CASE_INSENSITIVE).
+//
+// For the characters in the "ignore set", the process should not treat other
+// characters in the result of closeOver(USET_CASE_INSENSITIVE) as case
+// equivlant under the ECMA262 RegExp "i" mode because these characters are
+// uppercase themselves that no other characters in the set uppercase to.
+//
+// For the characters in the "special add set", the proecess should add only
+// those characters in the result of closeOver(USET_CASE_INSENSITIVE) which is
+// not uppercase characters as case equivlant under the ECMA262 RegExp "i" mode
+// and also that ONE uppercase character that other non uppercase character
+// uppercase into to the set. Other uppercase characters in the result of
+// closeOver(USET_CASE_INSENSITIVE) should not be considered because ECMA262
+// RegExp "i" mode consider two characters as "case equivlant" if both
+// characters uppercase to the same character.
+//
+// For example, consider the following case equivalent set defined by Unicode
+// standard. Notice there are more than one uppercase characters in this set:
+// U+212B Å Angstrom Sign - an uppercase character.
+// U+00C5 Å Latin Capital Letter A with Ring Above - an uppercase character.
+// U+00E5 å Latin Small Letter A with Ring Above - a lowercase character which
+// uppercase to U+00C5.
+// In this case equivlant set is a special set and need special handling while
+// considering "case equivlant" under the ECMA262 RegExp "i" mode which is
+// different than Unicode Standard:
+// * U+212B should be included into the "ignore" set because there are no other
+// characters, under the ECMA262 "i" mode, are considered as "case equivlant"
+// to it because U+212B is itself an uppercase but neither U+00C5 nor U+00E5
+// uppercase to U+212B.
+// * U+00C5 and U+00E5 will both be included into the "special add" set. While
+// calculate the "equivlant set" under ECMA262 "i" mode, the process will
+// add U+00E5, because it is not an uppercase character in the set. The
+// process will also add U+00C5, because it is the uppercase character which
+// other non uppercase character, U+00C5, uppercase into.
+//
+// For characters not included in "ignore set" and "special add set", the
+// process will just use closeOver(USET_CASE_INSENSITIVE) to calcualte, which is
+// much faster.
+//
+// Under Unicode 12.0, there are only 7 characters in the "special add set" and
+// 4 characters in "ignore set" so even the special add process is slower, it is
+// limited to a small set of cases only.
+//
+// The implementation of these two function will be generated by calling ICU
+// icu::UnicodeSet during the build time into gen/src/regexp/special-case.cc by
+// the code in src/regexp/gen-regexp-special-case.cc.
+//
+// These two function will be used with LazyInstance<> template to generate
+// global sharable set to reduce memory usage and speed up performance.
+
+// Function to build and return the Ignore set.
+icu::UnicodeSet BuildIgnoreSet();
+
+// Function to build and return the Special Add set.
+icu::UnicodeSet BuildSpecialAddSet();
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTL_SUPPORT
+
+#endif // V8_REGEXP_SPECIAL_CASE_H_
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 856c481b0a..798484d52f 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -6,13 +6,13 @@
#include "src/regexp/x64/regexp-macro-assembler-x64.h"
+#include "src/codegen/macro-assembler.h"
#include "src/heap/factory.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
-#include "src/unicode.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -746,7 +746,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ subq(rsp, Immediate(num_registers_ * kSystemPointerSize));
+ __ AllocateStackSpace(num_registers_ * kSystemPointerSize);
// Load string length.
__ movq(rsi, Operand(rbp, kInputEnd));
// Load input position.
@@ -766,18 +766,6 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// position registers.
__ movq(Operand(rbp, kStringStartMinusOne), rax);
-#if V8_OS_WIN
- // Ensure that we have written to each stack page, in order. Skipping a page
- // on Windows can cause segmentation faults. Assuming page size is 4k.
- const int kPageSize = 4096;
- const int kRegistersPerPage = kPageSize / kSystemPointerSize;
- for (int i = num_saved_registers_ + kRegistersPerPage - 1;
- i < num_registers_;
- i += kRegistersPerPage) {
- __ movq(register_location(i), rax); // One write every page.
- }
-#endif // V8_OS_WIN
-
// Initialize code object pointer.
__ Move(code_object_pointer(), masm_.CodeObject());
@@ -1006,8 +994,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
CodeDesc code_desc;
Isolate* isolate = this->isolate();
masm_.GetCode(isolate, &code_desc);
- Handle<Code> code =
- isolate->factory()->NewCode(code_desc, Code::REGEXP, masm_.CodeObject());
+ Handle<Code> code = Factory::CodeBuilder(isolate, code_desc, Code::REGEXP)
+ .set_self_reference(masm_.CodeObject())
+ .Build();
PROFILE(isolate, RegExpCodeCreateEvent(AbstractCode::cast(*code), *source));
return Handle<HeapObject>::cast(code);
}
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 83193f9319..59b80ef802 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -5,9 +5,9 @@
#ifndef V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
#define V8_REGEXP_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
-#include "src/macro-assembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/x64/assembler-x64.h"
#include "src/regexp/regexp-macro-assembler.h"
-#include "src/x64/assembler-x64.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
diff --git a/deps/v8/src/register-arch.h b/deps/v8/src/register-arch.h
deleted file mode 100644
index 4a5499892e..0000000000
--- a/deps/v8/src/register-arch.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_REGISTER_ARCH_H_
-#define V8_REGISTER_ARCH_H_
-
-#include "src/register.h"
-#include "src/reglist.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/register-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/register-x64.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/register-arm64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/register-arm.h"
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/register-ppc.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/register-mips.h"
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/register-mips64.h"
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/register-s390.h"
-#else
-#error Unknown architecture.
-#endif
-
-#endif // V8_REGISTER_ARCH_H_
diff --git a/deps/v8/src/roots.cc b/deps/v8/src/roots.cc
deleted file mode 100644
index 65aadbca17..0000000000
--- a/deps/v8/src/roots.cc
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/roots.h"
-
-#include "src/elements-kind.h"
-#include "src/objects-inl.h"
-#include "src/visitors.h"
-
-namespace v8 {
-namespace internal {
-
-const char* RootsTable::root_names_[RootsTable::kEntriesCount] = {
-#define ROOT_NAME(type, name, CamelName) #name,
- ROOT_LIST(ROOT_NAME)
-#undef ROOT_NAME
-};
-
-// static
-RootIndex RootsTable::RootIndexForFixedTypedArray(
- ExternalArrayType array_type) {
- switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
- case kExternal##Type##Array: \
- return RootIndex::kFixed##Type##ArrayMap;
-
- TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
-#undef ARRAY_TYPE_TO_ROOT_INDEX
- }
- UNREACHABLE();
-}
-
-// static
-RootIndex RootsTable::RootIndexForFixedTypedArray(ElementsKind elements_kind) {
- switch (elements_kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- return RootIndex::kFixed##Type##ArrayMap;
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
- default:
- UNREACHABLE();
-#undef TYPED_ARRAY_CASE
- }
-}
-
-// static
-RootIndex RootsTable::RootIndexForEmptyFixedTypedArray(
- ElementsKind elements_kind) {
- switch (elements_kind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- return RootIndex::kEmptyFixed##Type##Array;
-
- TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
-#undef ELEMENT_KIND_TO_ROOT_INDEX
- default:
- UNREACHABLE();
- }
-}
-
-void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
- visitor->VisitRootPointers(Root::kReadOnlyRootList, nullptr,
- roots_table_.read_only_roots_begin(),
- roots_table_.read_only_roots_end());
- visitor->Synchronize(VisitorSynchronization::kReadOnlyRootList);
-}
-
-#ifdef DEBUG
-
-bool ReadOnlyRoots::CheckType(RootIndex index) const {
- Object root(roots_table_[index]);
- switch (index) {
-#define CHECKTYPE(Type, name, CamelName) \
- case RootIndex::k##CamelName: \
- return root->Is##Type();
- READ_ONLY_ROOT_LIST(CHECKTYPE)
-#undef CHECKTYPE
-
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-#endif // DEBUG
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/roots-inl.h b/deps/v8/src/roots/roots-inl.h
index cae3d37a39..8153f1758f 100644
--- a/deps/v8/src/roots-inl.h
+++ b/deps/v8/src/roots/roots-inl.h
@@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ROOTS_INL_H_
-#define V8_ROOTS_INL_H_
+#ifndef V8_ROOTS_ROOTS_INL_H_
+#define V8_ROOTS_ROOTS_INL_H_
-#include "src/roots.h"
+#include "src/roots/roots.h"
-#include "src/feedback-vector.h"
-#include "src/handles.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
+#include "src/heap/read-only-heap.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/heap-number.h"
#include "src/objects/literal-objects.h"
#include "src/objects/map.h"
@@ -26,12 +27,12 @@ namespace v8 {
namespace internal {
V8_INLINE constexpr bool operator<(RootIndex lhs, RootIndex rhs) {
- typedef typename std::underlying_type<RootIndex>::type type;
+ using type = typename std::underlying_type<RootIndex>::type;
return static_cast<type>(lhs) < static_cast<type>(rhs);
}
V8_INLINE RootIndex operator++(RootIndex& index) {
- typedef typename std::underlying_type<RootIndex>::type type;
+ using type = typename std::underlying_type<RootIndex>::type;
index = static_cast<RootIndex>(static_cast<type>(index) + 1);
return index;
}
@@ -57,50 +58,38 @@ bool RootsTable::IsRootHandle(Handle<T> handle, RootIndex* index) const {
}
ReadOnlyRoots::ReadOnlyRoots(Heap* heap)
- : roots_table_(Isolate::FromHeap(heap)->roots_table()) {}
+ : ReadOnlyRoots(Isolate::FromHeap(heap)) {}
ReadOnlyRoots::ReadOnlyRoots(Isolate* isolate)
- : roots_table_(isolate->roots_table()) {}
+ : read_only_roots_(reinterpret_cast<Address*>(
+ isolate->roots_table().read_only_roots_begin().address())) {}
+
+ReadOnlyRoots::ReadOnlyRoots(Address* ro_roots) : read_only_roots_(ro_roots) {}
// We use unchecked_cast below because we trust our read-only roots to
// have the right type, and to avoid the heavy #includes that would be
// required for checked casts.
-#define ROOT_ACCESSOR(Type, name, CamelName) \
- Type ReadOnlyRoots::name() const { \
- DCHECK(CheckType(RootIndex::k##CamelName)); \
- return Type::unchecked_cast( \
- Object(roots_table_[RootIndex::k##CamelName])); \
- } \
- Handle<Type> ReadOnlyRoots::name##_handle() const { \
- DCHECK(CheckType(RootIndex::k##CamelName)); \
- return Handle<Type>(&roots_table_[RootIndex::k##CamelName]); \
+#define ROOT_ACCESSOR(Type, name, CamelName) \
+ Type ReadOnlyRoots::name() const { \
+ DCHECK(CheckType(RootIndex::k##CamelName)); \
+ return Type::unchecked_cast(Object(at(RootIndex::k##CamelName))); \
+ } \
+ Handle<Type> ReadOnlyRoots::name##_handle() const { \
+ DCHECK(CheckType(RootIndex::k##CamelName)); \
+ return Handle<Type>(&at(RootIndex::k##CamelName)); \
}
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
-Map ReadOnlyRoots::MapForFixedTypedArray(ExternalArrayType array_type) {
- RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(array_type);
- DCHECK(CheckType(root_index));
- return Map::unchecked_cast(Object(roots_table_[root_index]));
-}
-
-Map ReadOnlyRoots::MapForFixedTypedArray(ElementsKind elements_kind) {
- RootIndex root_index = RootsTable::RootIndexForFixedTypedArray(elements_kind);
- DCHECK(CheckType(root_index));
- return Map::unchecked_cast(Object(roots_table_[root_index]));
-}
-
-FixedTypedArrayBase ReadOnlyRoots::EmptyFixedTypedArrayForTypedArray(
- ElementsKind elements_kind) {
- RootIndex root_index =
- RootsTable::RootIndexForEmptyFixedTypedArray(elements_kind);
- DCHECK(CheckType(root_index));
- return FixedTypedArrayBase::unchecked_cast(Object(roots_table_[root_index]));
+Address& ReadOnlyRoots::at(RootIndex root_index) const {
+ size_t index = static_cast<size_t>(root_index);
+ DCHECK_LT(index, kEntriesCount);
+ return read_only_roots_[index];
}
} // namespace internal
} // namespace v8
-#endif // V8_ROOTS_INL_H_
+#endif // V8_ROOTS_ROOTS_INL_H_
diff --git a/deps/v8/src/roots/roots.cc b/deps/v8/src/roots/roots.cc
new file mode 100644
index 0000000000..e2ca6e5897
--- /dev/null
+++ b/deps/v8/src/roots/roots.cc
@@ -0,0 +1,47 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/roots/roots.h"
+
+#include "src/objects/elements-kind.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/visitors.h"
+
+namespace v8 {
+namespace internal {
+
+const char* RootsTable::root_names_[RootsTable::kEntriesCount] = {
+#define ROOT_NAME(type, name, CamelName) #name,
+ ROOT_LIST(ROOT_NAME)
+#undef ROOT_NAME
+};
+
+void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
+ visitor->VisitRootPointers(Root::kReadOnlyRootList, nullptr,
+ FullObjectSlot(read_only_roots_),
+ FullObjectSlot(&read_only_roots_[kEntriesCount]));
+ visitor->Synchronize(VisitorSynchronization::kReadOnlyRootList);
+}
+
+#ifdef DEBUG
+
+bool ReadOnlyRoots::CheckType(RootIndex index) const {
+ Object root(at(index));
+ switch (index) {
+#define CHECKTYPE(Type, name, CamelName) \
+ case RootIndex::k##CamelName: \
+ return root.Is##Type();
+ READ_ONLY_ROOT_LIST(CHECKTYPE)
+#undef CHECKTYPE
+
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+#endif // DEBUG
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/roots.h b/deps/v8/src/roots/roots.h
index d1d13016ab..5684c28f4e 100644
--- a/deps/v8/src/roots.h
+++ b/deps/v8/src/roots/roots.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ROOTS_H_
-#define V8_ROOTS_H_
-
-#include "src/accessors.h"
-#include "src/globals.h"
-#include "src/heap-symbols.h"
-#include "src/objects-definitions.h"
-#include "src/objects.h"
+#ifndef V8_ROOTS_ROOTS_H_
+#define V8_ROOTS_ROOTS_H_
+
+#include "src/builtins/accessors.h"
+#include "src/common/globals.h"
+#include "src/init/heap-symbols.h"
+#include "src/objects/objects-definitions.h"
+#include "src/objects/objects.h"
#include "src/objects/slots.h"
namespace v8 {
@@ -17,16 +17,16 @@ namespace internal {
// Forward declarations.
enum ElementsKind : uint8_t;
-class FixedTypedArrayBase;
template <typename T>
class Handle;
class Heap;
class Isolate;
class Map;
class PropertyCell;
+class ReadOnlyHeap;
+class RootVisitor;
class String;
class Symbol;
-class RootVisitor;
// Defines all the read-only roots in Heap.
#define STRONG_READ_ONLY_ROOT_LIST(V) \
@@ -149,18 +149,6 @@ class RootVisitor;
UncachedExternalOneByteInternalizedStringMap) \
V(Map, uncached_external_one_byte_string_map, \
UncachedExternalOneByteStringMap) \
- /* Array element maps */ \
- V(Map, fixed_uint8_array_map, FixedUint8ArrayMap) \
- V(Map, fixed_int8_array_map, FixedInt8ArrayMap) \
- V(Map, fixed_uint16_array_map, FixedUint16ArrayMap) \
- V(Map, fixed_int16_array_map, FixedInt16ArrayMap) \
- V(Map, fixed_uint32_array_map, FixedUint32ArrayMap) \
- V(Map, fixed_int32_array_map, FixedInt32ArrayMap) \
- V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
- V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
- V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
- V(Map, fixed_biguint64_array_map, FixedBigUint64ArrayMap) \
- V(Map, fixed_bigint64_array_map, FixedBigInt64ArrayMap) \
/* Oddball maps */ \
V(Map, undefined_map, UndefinedMap) \
V(Map, the_hole_map, TheHoleMap) \
@@ -183,19 +171,6 @@ class RootVisitor;
EmptyArrayBoilerplateDescription) \
V(ClosureFeedbackCellArray, empty_closure_feedback_cell_array, \
EmptyClosureFeedbackCellArray) \
- V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
- V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array) \
- V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array) \
- V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array) \
- V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array) \
- V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array) \
- V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array) \
- V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array) \
- V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
- EmptyFixedUint8ClampedArray) \
- V(FixedTypedArrayBase, empty_fixed_biguint64_array, \
- EmptyFixedBigUint64Array) \
- V(FixedTypedArrayBase, empty_fixed_bigint64_array, EmptyFixedBigInt64Array) \
V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
V(NumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
@@ -215,9 +190,13 @@ class RootVisitor;
V(HeapNumber, minus_infinity_value, MinusInfinityValue) \
/* Marker for self-references during code-generation */ \
V(HeapObject, self_reference_marker, SelfReferenceMarker) \
- /* Canonical trampoline RelocInfo */ \
+ /* Canonical off-heap trampoline data */ \
V(ByteArray, off_heap_trampoline_relocation_info, \
OffHeapTrampolineRelocationInfo) \
+ V(CodeDataContainer, trampoline_trivial_code_data_container, \
+ TrampolineTrivialCodeDataContainer) \
+ V(CodeDataContainer, trampoline_promise_rejection_code_data_container, \
+ TrampolinePromiseRejectionCodeDataContainer) \
/* Hash seed */ \
V(ByteArray, hash_seed, HashSeed)
@@ -423,10 +402,6 @@ class RootsTable {
return static_cast<int>(root_index) * kSystemPointerSize;
}
- static RootIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
- static RootIndex RootIndexForFixedTypedArray(ElementsKind elements_kind);
- static RootIndex RootIndexForEmptyFixedTypedArray(ElementsKind elements_kind);
-
// Immortal immovable root objects are allocated in OLD space and GC never
// moves them and the root table entries are guaranteed to not be modified
// after initialization. Note, however, that contents of those root objects
@@ -509,6 +484,9 @@ class RootsTable {
class ReadOnlyRoots {
public:
+ static constexpr size_t kEntriesCount =
+ static_cast<size_t>(RootIndex::kReadOnlyRootsCount);
+
V8_INLINE explicit ReadOnlyRoots(Heap* heap);
V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
@@ -519,11 +497,6 @@ class ReadOnlyRoots {
READ_ONLY_ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
- V8_INLINE Map MapForFixedTypedArray(ExternalArrayType array_type);
- V8_INLINE Map MapForFixedTypedArray(ElementsKind elements_kind);
- V8_INLINE FixedTypedArrayBase
- EmptyFixedTypedArrayForTypedArray(ElementsKind elements_kind);
-
// Iterate over all the read-only roots. This is not necessary for garbage
// collection and is usually only performed as part of (de)serialization or
// heap verification.
@@ -534,10 +507,16 @@ class ReadOnlyRoots {
#endif
private:
- RootsTable& roots_table_;
+ V8_INLINE explicit ReadOnlyRoots(Address* ro_roots);
+
+ V8_INLINE Address& at(RootIndex root_index) const;
+
+ Address* read_only_roots_;
+
+ friend class ReadOnlyHeap;
};
} // namespace internal
} // namespace v8
-#endif // V8_ROOTS_H_
+#endif // V8_ROOTS_ROOTS_H_
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 57087fe3f2..f35e72a666 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -2,21 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
-#include "src/conversions-inl.h"
-#include "src/counters.h"
#include "src/debug/debug.h"
-#include "src/elements.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/heap/heap-write-barrier-inl.h"
-#include "src/isolate-inl.h"
-#include "src/keys.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/arguments-inl.h"
+#include "src/objects/elements.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
-#include "src/prototype.h"
+#include "src/objects/prototype.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -42,512 +41,6 @@ RUNTIME_FUNCTION(Runtime_TransitionElementsKindWithKind) {
return *object;
}
-namespace {
-// Find the next free position. undefined and holes are both considered
-// free spots. Returns "Nothing" if an exception occurred.
-V8_WARN_UNUSED_RESULT
-Maybe<uint32_t> FindNextFreePosition(Isolate* isolate,
- Handle<JSReceiver> receiver,
- uint32_t current_pos) {
- for (uint32_t position = current_pos;; ++position) {
- Maybe<bool> has_element = JSReceiver::HasOwnProperty(receiver, position);
- MAYBE_RETURN(has_element, Nothing<uint32_t>());
- if (!has_element.FromJust()) return Just(position);
-
- Handle<Object> element;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, JSReceiver::GetElement(isolate, receiver, position),
- Nothing<uint32_t>());
- if (element->IsUndefined(isolate)) return Just(position);
- }
-}
-
-// As RemoveArrayHoles, but also handles Dictionary elements that stay
-// Dictionary (requires_slow_elements() is true), proxies and objects that
-// might have accessors.
-V8_WARN_UNUSED_RESULT
-Object RemoveArrayHolesGeneric(Isolate* isolate, Handle<JSReceiver> receiver,
- uint32_t limit) {
- HandleScope scope(isolate);
-
- // For proxies, we do not collect the keys, instead we use all indices in
- // the full range of [0, limit).
- Handle<FixedArray> keys;
- if (!receiver->IsJSProxy()) {
- keys = JSReceiver::GetOwnElementIndices(isolate, receiver,
- Handle<JSObject>::cast(receiver));
- }
-
- uint32_t num_undefined = 0;
- uint32_t current_pos = 0;
- uint32_t num_indices = keys.is_null() ? limit : keys->length();
-
- // Compact keys with undefined values and moves non-undefined
- // values to the front.
- // The loop does two things simultaneously:
- // (1) Count the number of 'undefined', i.e.
- // i.e.: HasProperty(receiver, key) && Get(receiver, key) == undefined
- // (2) Move all non-undefined values to the front. The variable current_pos
- // is used to track free spots in the array starting at the beginning.
- // Holes and 'undefined' are considered free spots.
- // A hole is when HasElement(receiver, key) is false.
- for (uint32_t i = 0; i < num_indices; ++i) {
- uint32_t key = keys.is_null() ? i : NumberToUint32(keys->get(i));
-
- // We only care about array indices that are smaller than the limit.
- // The keys are sorted, so we can break as soon as we encounter the first.
- if (key >= limit) break;
-
- Maybe<bool> has_element = JSReceiver::HasElement(receiver, key);
- MAYBE_RETURN(has_element, ReadOnlyRoots(isolate).exception());
- if (!has_element.FromJust()) {
- continue;
- }
-
- Handle<Object> element;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, element, JSReceiver::GetElement(isolate, receiver, key));
-
- if (element->IsUndefined(isolate)) {
- ++num_undefined;
- } else {
- // Find next free position to move elements to.
- Maybe<uint32_t> free_position =
- FindNextFreePosition(isolate, receiver, current_pos);
- MAYBE_RETURN(free_position, ReadOnlyRoots(isolate).exception());
- current_pos = free_position.FromJust();
-
- // Do not move elements that are already in the "packed" area.
- if (key <= current_pos) continue;
-
- // array[current_pos] = array[key].
- // Deleting array[key] is done later. This is to preserve the same
- // semantics as the old JS implementation when working with non-extensible
- // objects:
- // If the array contains undefineds, the position at 'key' might later
- // bet set to 'undefined'. If we delete the element now and later set it
- // to undefined, the set operation would throw an exception.
- // Instead, to mark it up as a free space, we set array[key] to undefined.
- // As 'key' will be incremented afterward, this undefined value will not
- // affect 'num_undefined', and the logic afterwards will correctly set
- // the remaining undefineds or delete the remaining properties.
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Object::SetElement(isolate, receiver, current_pos, element,
- ShouldThrow::kThrowOnError));
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Object::SetElement(isolate, receiver, key,
- isolate->factory()->undefined_value(),
- ShouldThrow::kThrowOnError));
- ++current_pos;
- }
- }
-
- // current_pos points to the next free space in the array/object. In most
- // cases this corresponds to the 'length' or to the number of non-undefined
- // elements.
- // In cases where an object is 'packed' and 'length' is smaller, e.g.:
- // { 0: 5, 1: 4, 2: 3, length: 2 }
- // current_pos will be greater than limit, thus, we need to take the minimum.
- uint32_t result = std::min(current_pos, limit);
-
- // Set [current_pos, current_pos + num_undefined) to undefined.
- for (uint32_t i = 0; i < num_undefined; ++i) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Object::SetElement(isolate, receiver, current_pos++,
- isolate->factory()->undefined_value(),
- ShouldThrow::kThrowOnError));
- }
- // TODO(szuend): Re-enable when we also copy from the prototype chain for
- // JSArrays. Then we can use HasOwnProperty instead of
- // HasElement and this condition will hold.
- // DCHECK_LE(current_pos, num_indices);
-
- // Deleting everything after the undefineds up unto the limit.
- for (uint32_t i = num_indices; i > 0;) {
- --i;
- uint32_t key = keys.is_null() ? i : NumberToUint32(keys->get(i));
- if (key < current_pos) break;
- if (key >= limit) continue;
-
- Maybe<bool> delete_result = JSReceiver::DeleteElement(receiver, key);
- MAYBE_RETURN(delete_result, ReadOnlyRoots(isolate).exception());
- }
-
- return *isolate->factory()->NewNumberFromUint(result);
-}
-
-// Collects all defined (non-hole) and non-undefined (array) elements at the
-// start of the elements array. If the object is in dictionary mode, it is
-// converted to fast elements mode. Undefined values are placed after
-// non-undefined values. Returns the number of non-undefined values.
-V8_WARN_UNUSED_RESULT
-Object RemoveArrayHoles(Isolate* isolate, Handle<JSReceiver> receiver,
- uint32_t limit) {
- if (receiver->IsJSProxy()) {
- return RemoveArrayHolesGeneric(isolate, receiver, limit);
- }
-
- Handle<JSObject> object = Handle<JSObject>::cast(receiver);
- if (object->HasStringWrapperElements()) {
- int len = String::cast(Handle<JSValue>::cast(object)->value())->length();
- DCHECK_LE(len, limit);
- return Smi::FromInt(len);
- }
-
- if (object->HasSloppyArgumentsElements() || !object->map()->is_extensible()) {
- return RemoveArrayHolesGeneric(isolate, receiver, limit);
- }
-
- JSObject::ValidateElements(*object);
- if (object->HasDictionaryElements()) {
- // Convert to fast elements containing only the existing properties.
- // Ordering is irrelevant, since we are going to sort anyway.
- Handle<NumberDictionary> dict(object->element_dictionary(), isolate);
- if (object->IsJSArray() || dict->requires_slow_elements() ||
- dict->max_number_key() >= limit) {
- return RemoveArrayHolesGeneric(isolate, receiver, limit);
- }
- // Convert to fast elements.
- Handle<Map> new_map =
- JSObject::GetElementsTransitionMap(object, HOLEY_ELEMENTS);
-
- AllocationType allocation = ObjectInYoungGeneration(*object)
- ? AllocationType::kYoung
- : AllocationType::kOld;
- Handle<FixedArray> fast_elements =
- isolate->factory()->NewFixedArray(dict->NumberOfElements(), allocation);
- dict->CopyValuesTo(*fast_elements);
-
- JSObject::SetMapAndElements(object, new_map, fast_elements);
- JSObject::ValidateElements(*object);
- } else if (object->HasFixedTypedArrayElements()) {
- // Typed arrays cannot have holes or undefined elements.
- int array_length = FixedArrayBase::cast(object->elements())->length();
- return Smi::FromInt(Min(limit, static_cast<uint32_t>(array_length)));
- } else if (!object->HasDoubleElements()) {
- JSObject::EnsureWritableFastElements(object);
- }
- DCHECK(object->HasSmiOrObjectElements() || object->HasDoubleElements());
-
- // Collect holes at the end, undefined before that and the rest at the
- // start, and return the number of non-hole, non-undefined values.
-
- Handle<FixedArrayBase> elements_base(object->elements(), isolate);
- uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
- if (limit > elements_length) {
- limit = elements_length;
- }
- if (limit == 0) {
- return Smi::kZero;
- }
-
- uint32_t result = 0;
- if (elements_base->map() == ReadOnlyRoots(isolate).fixed_double_array_map()) {
- FixedDoubleArray elements = FixedDoubleArray::cast(*elements_base);
- // Split elements into defined and the_hole, in that order.
- unsigned int holes = limit;
- // Assume most arrays contain no holes and undefined values, so minimize the
- // number of stores of non-undefined, non-the-hole values.
- for (unsigned int i = 0; i < holes; i++) {
- if (elements->is_the_hole(i)) {
- holes--;
- } else {
- continue;
- }
- // Position i needs to be filled.
- while (holes > i) {
- if (elements->is_the_hole(holes)) {
- holes--;
- } else {
- elements->set(i, elements->get_scalar(holes));
- break;
- }
- }
- }
- result = holes;
- while (holes < limit) {
- elements->set_the_hole(holes);
- holes++;
- }
- } else {
- FixedArray elements = FixedArray::cast(*elements_base);
- DisallowHeapAllocation no_gc;
-
- // Split elements into defined, undefined and the_hole, in that order. Only
- // count locations for undefined and the hole, and fill them afterwards.
- WriteBarrierMode write_barrier = elements->GetWriteBarrierMode(no_gc);
- unsigned int undefs = limit;
- unsigned int holes = limit;
- // Assume most arrays contain no holes and undefined values, so minimize the
- // number of stores of non-undefined, non-the-hole values.
- for (unsigned int i = 0; i < undefs; i++) {
- Object current = elements->get(i);
- if (current->IsTheHole(isolate)) {
- holes--;
- undefs--;
- } else if (current->IsUndefined(isolate)) {
- undefs--;
- } else {
- continue;
- }
- // Position i needs to be filled.
- while (undefs > i) {
- current = elements->get(undefs);
- if (current->IsTheHole(isolate)) {
- holes--;
- undefs--;
- } else if (current->IsUndefined(isolate)) {
- undefs--;
- } else {
- elements->set(i, current, write_barrier);
- break;
- }
- }
- }
- result = undefs;
- while (undefs < holes) {
- elements->set_undefined(isolate, undefs);
- undefs++;
- }
- while (holes < limit) {
- elements->set_the_hole(isolate, holes);
- holes++;
- }
- }
-
- DCHECK_LE(result, limit);
- return *isolate->factory()->NewNumberFromUint(result);
-}
-
-// Copy element at index from source to target only if target does not have the
-// element on its own. Returns true if a copy occurred, false if not
-// and Nothing if an exception occurred.
-V8_WARN_UNUSED_RESULT
-Maybe<bool> ConditionalCopy(Isolate* isolate, Handle<JSReceiver> source,
- Handle<JSReceiver> target, uint32_t index) {
- Maybe<bool> source_has_prop = JSReceiver::HasOwnProperty(source, index);
- MAYBE_RETURN(source_has_prop, Nothing<bool>());
- if (!source_has_prop.FromJust()) return Just(false);
-
- Maybe<bool> target_has_prop = JSReceiver::HasOwnProperty(target, index);
- MAYBE_RETURN(target_has_prop, Nothing<bool>());
- if (target_has_prop.FromJust()) return Just(false);
-
- Handle<Object> source_element;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, source_element, JSReceiver::GetElement(isolate, target, index),
- Nothing<bool>());
-
- Handle<Object> set_result;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, set_result,
- Object::SetElement(isolate, target, index, source_element,
- ShouldThrow::kThrowOnError),
- Nothing<bool>());
-
- return Just(true);
-}
-
-// Copy elements in the range 0..length from objects prototype chain
-// to object itself, if object has holes. Returns null on error and undefined on
-// success.
-V8_WARN_UNUSED_RESULT
-MaybeHandle<Object> CopyFromPrototype(Isolate* isolate,
- Handle<JSReceiver> object,
- uint32_t length) {
- for (PrototypeIterator iter(isolate, object, kStartAtPrototype);
- !iter.IsAtEnd(); iter.Advance()) {
- Handle<JSReceiver> current(PrototypeIterator::GetCurrent<JSReceiver>(iter));
-
- if (current->IsJSProxy()) {
- for (uint32_t i = 0; i < length; ++i) {
- MAYBE_RETURN_NULL(ConditionalCopy(isolate, current, object, i));
- }
- } else {
- Handle<FixedArray> keys = JSReceiver::GetOwnElementIndices(
- isolate, object, Handle<JSObject>::cast(current));
-
- uint32_t num_indices = keys->length();
- for (uint32_t i = 0; i < num_indices; ++i) {
- uint32_t idx = NumberToUint32(keys->get(i));
-
- // Prototype might have indices that go past length, but we are only
- // interested in the range [0, length).
- if (idx >= length) break;
-
- MAYBE_RETURN_NULL(ConditionalCopy(isolate, current, object, idx));
- }
- }
- }
- return isolate->factory()->undefined_value();
-}
-
-} // namespace
-
-RUNTIME_FUNCTION(Runtime_PrepareElementsForSort) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
-
- if (isolate->debug_execution_mode() == DebugInfo::kSideEffects) {
- if (!isolate->debug()->PerformSideEffectCheckForObject(object)) {
- return ReadOnlyRoots(isolate).exception();
- }
- }
-
- // Counter for sorting arrays that have non-packed elements and where either
- // the ElementsProtector is invalid or the prototype does not match
- // Array.prototype.
- JSObject initial_array_proto = JSObject::cast(
- isolate->native_context()->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
- if (object->IsJSArray() &&
- !Handle<JSArray>::cast(object)->HasFastPackedElements()) {
- if (!isolate->IsNoElementsProtectorIntact() ||
- object->map()->prototype() != initial_array_proto) {
- isolate->CountUsage(
- v8::Isolate::kArrayPrototypeSortJSArrayModifiedPrototype);
- }
- }
-
- // Skip copying from prototype for JSArrays with ElementsProtector intact and
- // the original array prototype.
- if (!object->IsJSArray() || !isolate->IsNoElementsProtectorIntact() ||
- object->map()->prototype() != initial_array_proto) {
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- CopyFromPrototype(isolate, object, length));
- }
- return RemoveArrayHoles(isolate, object, length);
-}
-
-// How many elements does this object/array have?
-RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
- DisallowHeapAllocation no_gc;
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSArray, array, 0);
- FixedArrayBase elements = array->elements();
- SealHandleScope shs(isolate);
- if (elements->IsNumberDictionary()) {
- int result = NumberDictionary::cast(elements)->NumberOfElements();
- return Smi::FromInt(result);
- } else {
- DCHECK(array->length()->IsSmi());
- // For packed elements, we know the exact number of elements
- int length = elements->length();
- ElementsKind kind = array->GetElementsKind();
- if (IsFastPackedElementsKind(kind)) {
- return Smi::FromInt(length);
- }
- // For holey elements, take samples from the buffer checking for holes
- // to generate the estimate.
- const int kNumberOfHoleCheckSamples = 97;
- int increment = (length < kNumberOfHoleCheckSamples)
- ? 1
- : static_cast<int>(length / kNumberOfHoleCheckSamples);
- ElementsAccessor* accessor = array->GetElementsAccessor();
- int holes = 0;
- for (int i = 0; i < length; i += increment) {
- if (!accessor->HasElement(array, i, elements)) {
- ++holes;
- }
- }
- int estimate = static_cast<int>((kNumberOfHoleCheckSamples - holes) /
- kNumberOfHoleCheckSamples * length);
- return Smi::FromInt(estimate);
- }
-}
-
-
-// Returns an array that tells you where in the [0, length) interval an array
-// might have elements. Can either return an array of keys (positive integers
-// or undefined) or a number representing the positive length of an interval
-// starting at index 0.
-// Intervals can span over some keys that are not in the object.
-RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
- CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
- ElementsKind kind = array->GetElementsKind();
-
- if (IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind)) {
- uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
- return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
- }
-
- if (kind == FAST_STRING_WRAPPER_ELEMENTS) {
- int string_length =
- String::cast(Handle<JSValue>::cast(array)->value())->length();
- int backing_store_length = array->elements()->length();
- return *isolate->factory()->NewNumberFromUint(
- Min(length,
- static_cast<uint32_t>(Max(string_length, backing_store_length))));
- }
-
- KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly,
- ALL_PROPERTIES);
- for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
- !iter.IsAtEnd(); iter.Advance()) {
- Handle<JSReceiver> current(PrototypeIterator::GetCurrent<JSReceiver>(iter));
- if (current->HasComplexElements()) {
- return *isolate->factory()->NewNumberFromUint(length);
- }
- accumulator.CollectOwnElementIndices(array,
- Handle<JSObject>::cast(current));
- }
- // Erase any keys >= length.
- Handle<FixedArray> keys =
- accumulator.GetKeys(GetKeysConversion::kKeepNumbers);
- int j = 0;
- for (int i = 0; i < keys->length(); i++) {
- if (NumberToUint32(keys->get(i)) >= length) continue;
- if (i != j) keys->set(j, keys->get(i));
- j++;
- }
-
- keys = FixedArray::ShrinkOrEmpty(isolate, keys, j);
- return *isolate->factory()->NewJSArrayWithElements(keys);
-}
-
-RUNTIME_FUNCTION(Runtime_TrySliceSimpleNonFastElements) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_SMI_ARG_CHECKED(first, 1);
- CONVERT_SMI_ARG_CHECKED(count, 2);
- uint32_t length = first + count;
-
- // Only handle elements kinds that have a ElementsAccessor Slice
- // implementation.
- if (receiver->IsJSArray()) {
- // This "fastish" path must make sure the destination array is a JSArray.
- if (!isolate->IsArraySpeciesLookupChainIntact() ||
- !JSArray::cast(*receiver)->HasArrayPrototype(isolate)) {
- return Smi::FromInt(0);
- }
- } else {
- int len;
- if (!receiver->IsJSObject() ||
- !JSSloppyArgumentsObject::GetSloppyArgumentsLength(
- isolate, Handle<JSObject>::cast(receiver), &len) ||
- (length > static_cast<uint32_t>(len))) {
- return Smi::FromInt(0);
- }
- }
-
- // This "fastish" path must also ensure that elements are simple (no
- // geters/setters), no elements on prototype chain.
- Handle<JSObject> object(Handle<JSObject>::cast(receiver));
- if (!JSObject::PrototypeHasNoElements(isolate, *object) ||
- object->HasComplexElements()) {
- return Smi::FromInt(0);
- }
-
- ElementsAccessor* accessor = object->GetElementsAccessor();
- return *accessor->Slice(object, first, length);
-}
-
RUNTIME_FUNCTION(Runtime_NewArray) {
HandleScope scope(isolate);
DCHECK_LE(3, args.length());
@@ -656,7 +149,7 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
- CHECK(!array->HasFixedTypedArrayElements());
+ CHECK(!array->HasTypedArrayElements());
CHECK(!array->IsJSGlobalProxy());
JSObject::NormalizeElements(array);
return *array;
@@ -672,7 +165,7 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
if (key < 0) return Smi::kZero;
- uint32_t capacity = static_cast<uint32_t>(object->elements()->length());
+ uint32_t capacity = static_cast<uint32_t>(object->elements().length());
uint32_t index = static_cast<uint32_t>(key);
if (index >= capacity) {
@@ -684,20 +177,6 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
return object->elements();
}
-
-RUNTIME_FUNCTION(Runtime_HasComplexElements) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
- for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
- !iter.IsAtEnd(); iter.Advance()) {
- if (PrototypeIterator::GetCurrent<JSReceiver>(iter)->HasComplexElements()) {
- return ReadOnlyRoots(isolate).true_value();
- }
- }
- return ReadOnlyRoots(isolate).false_value();
-}
-
// ES6 22.1.2.2 Array.isArray
RUNTIME_FUNCTION(Runtime_ArrayIsArray) {
HandleScope shs(isolate);
@@ -712,7 +191,7 @@ RUNTIME_FUNCTION(Runtime_IsArray) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSArray());
+ return isolate->heap()->ToBoolean(obj.IsJSArray());
}
RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
@@ -739,9 +218,9 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
// Let len be ? ToLength(? Get(O, "length")).
int64_t len;
{
- if (object->map()->instance_type() == JS_ARRAY_TYPE) {
+ if (object->map().instance_type() == JS_ARRAY_TYPE) {
uint32_t len32 = 0;
- bool success = JSArray::cast(*object)->length()->ToArrayLength(&len32);
+ bool success = JSArray::cast(*object).length().ToArrayLength(&len32);
DCHECK(success);
USE(success);
len = len32;
@@ -793,7 +272,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
// If the receiver is not a special receiver type, and the length is a valid
// element index, perform fast operation tailored to specific ElementsKinds.
- if (!object->map()->IsSpecialReceiverMap() && len < kMaxUInt32 &&
+ if (!object->map().IsSpecialReceiverMap() && len < kMaxUInt32 &&
JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
Handle<JSObject> obj = Handle<JSObject>::cast(object);
ElementsAccessor* elements = obj->GetElementsAccessor();
@@ -843,7 +322,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
{
if (object->IsJSArray()) {
uint32_t len32 = 0;
- bool success = JSArray::cast(*object)->length()->ToArrayLength(&len32);
+ bool success = JSArray::cast(*object).length().ToArrayLength(&len32);
DCHECK(success);
USE(success);
len = len32;
@@ -892,7 +371,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
// If the receiver is not a special receiver type, and the length fits
// uint32_t, perform fast operation tailored to specific ElementsKinds.
- if (!object->map()->IsSpecialReceiverMap() && len <= kMaxUInt32 &&
+ if (!object->map().IsSpecialReceiverMap() && len <= kMaxUInt32 &&
JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
Handle<JSObject> obj = Handle<JSObject>::cast(object);
ElementsAccessor* elements = obj->GetElementsAccessor();
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 3fcb9934f9..7c7a8b6207 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
-#include "src/conversions-inl.h"
-#include "src/counters.h"
+#include "src/execution/arguments-inl.h"
#include "src/heap/factory.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -361,7 +361,7 @@ Object GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
BigInt::FromObject(isolate, value_obj));
// SharedArrayBuffers are not detachable.
- CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_LT(index, sta->length());
if (sta->type() == kExternalBigInt64Array) {
return Op<int64_t>::Do(isolate, source, index, bigint);
}
@@ -373,7 +373,7 @@ Object GetModifySetValueInBuffer(Arguments args, Isolate* isolate) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
Object::ToInteger(isolate, value_obj));
// SharedArrayBuffers are not detachable.
- CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_LT(index, sta->length());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
@@ -403,7 +403,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
DCHECK(sta->type() == kExternalBigInt64Array ||
sta->type() == kExternalBigUint64Array);
// SharedArrayBuffers are not detachable.
- CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_LT(index, sta->length());
if (sta->type() == kExternalBigInt64Array) {
return Load<int64_t>::Do(isolate, source, index);
}
@@ -429,7 +429,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
DCHECK(sta->type() == kExternalBigInt64Array ||
sta->type() == kExternalBigUint64Array);
// SharedArrayBuffers are not detachable.
- CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_LT(index, sta->length());
if (sta->type() == kExternalBigInt64Array) {
Store<int64_t>::Do(isolate, source, index, bigint);
return *bigint;
@@ -451,7 +451,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
CONVERT_ARG_HANDLE_CHECKED(Object, old_value_obj, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, new_value_obj, 3);
CHECK(sta->GetBuffer()->is_shared());
- CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_LT(index, sta->length());
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
@@ -464,7 +464,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
// SharedArrayBuffers are not detachable.
- CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_LT(index, sta->length());
if (sta->type() == kExternalBigInt64Array) {
return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
new_bigint);
@@ -481,7 +481,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
Object::ToInteger(isolate, new_value_obj));
// SharedArrayBuffers are not detachable.
- CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_LT(index, sta->length());
switch (sta->type()) {
#define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \
diff --git a/deps/v8/src/runtime/runtime-bigint.cc b/deps/v8/src/runtime/runtime-bigint.cc
index f718ab7eb4..8f065740d7 100644
--- a/deps/v8/src/runtime/runtime-bigint.cc
+++ b/deps/v8/src/runtime/runtime-bigint.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/execution/arguments-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/bigint.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 07b101684a..0c17047795 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -7,14 +7,14 @@
#include <stdlib.h>
#include <limits>
-#include "src/accessors.h"
-#include "src/arguments-inl.h"
-#include "src/counters.h"
+#include "src/builtins/accessors.h"
#include "src/debug/debug.h"
-#include "src/elements.h"
-#include "src/isolate-inl.h"
-#include "src/log.h"
-#include "src/message-template.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/objects/elements.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/smi.h"
@@ -37,7 +37,7 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
- Handle<String> name(constructor->shared()->Name(), isolate);
+ Handle<String> name(constructor->shared().Name(), isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNonCallable, name));
}
@@ -70,8 +70,8 @@ Object ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
Handle<JSFunction> function) {
Handle<String> super_name;
if (constructor->IsJSFunction()) {
- super_name = handle(Handle<JSFunction>::cast(constructor)->shared()->Name(),
- isolate);
+ super_name =
+ handle(Handle<JSFunction>::cast(constructor)->shared().Name(), isolate);
} else if (constructor->IsOddball()) {
DCHECK(constructor->IsNull(isolate));
super_name = isolate->factory()->null_string();
@@ -82,7 +82,7 @@ Object ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
if (super_name->length() == 0) {
super_name = isolate->factory()->null_string();
}
- Handle<String> function_name(function->shared()->Name(), isolate);
+ Handle<String> function_name(function->shared().Name(), isolate);
// anonymous class
if (function_name->length() == 0) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -129,14 +129,14 @@ Handle<Name> KeyToName<NumberDictionary>(Isolate* isolate, Handle<Object> key) {
inline void SetHomeObject(Isolate* isolate, JSFunction method,
JSObject home_object) {
- if (method->shared()->needs_home_object()) {
+ if (method.shared().needs_home_object()) {
const int kPropertyIndex = JSFunction::kMaybeHomeObjectDescriptorIndex;
- CHECK_EQ(method->map()->instance_descriptors()->GetKey(kPropertyIndex),
+ CHECK_EQ(method.map().instance_descriptors().GetKey(kPropertyIndex),
ReadOnlyRoots(isolate).home_object_symbol());
FieldIndex field_index =
- FieldIndex::ForDescriptor(method->map(), kPropertyIndex);
- method->RawFastPropertyAtPut(field_index, home_object);
+ FieldIndex::ForDescriptor(method.map(), kPropertyIndex);
+ method.RawFastPropertyAtPut(field_index, home_object);
}
}
@@ -163,7 +163,7 @@ MaybeHandle<Object> GetMethodAndSetHomeObjectAndName(
SetHomeObject(isolate, *method, *home_object);
- if (!method->shared()->HasSharedName()) {
+ if (!method->shared().HasSharedName()) {
// TODO(ishell): method does not have a shared name at this point only if
// the key is a computed property name. However, the bytecode generator
// explicitly generates ToName bytecodes to ensure that the computed
@@ -200,7 +200,7 @@ Object GetMethodWithSharedNameAndSetHomeObject(Isolate* isolate,
SetHomeObject(isolate, *method, home_object);
- DCHECK(method->shared()->HasSharedName());
+ DCHECK(method->shared().HasSharedName());
return *method;
}
@@ -215,7 +215,7 @@ Handle<Dictionary> ShallowCopyDictionaryTemplate(
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object value = dictionary->ValueAt(i);
- if (value->IsAccessorPair()) {
+ if (value.IsAccessorPair()) {
Handle<AccessorPair> pair(AccessorPair::cast(value), isolate);
pair = AccessorPair::Copy(isolate, pair);
dictionary->ValueAtPut(i, *pair);
@@ -245,7 +245,7 @@ bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
if (value->IsAccessorPair()) {
Handle<AccessorPair> pair = Handle<AccessorPair>::cast(value);
Object tmp = pair->getter();
- if (tmp->IsSmi()) {
+ if (tmp.IsSmi()) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, result,
@@ -256,7 +256,7 @@ bool SubstituteValues(Isolate* isolate, Handle<Dictionary> dictionary,
pair->set_getter(*result);
}
tmp = pair->setter();
- if (tmp->IsSmi()) {
+ if (tmp.IsSmi()) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, result,
@@ -297,56 +297,51 @@ bool AddDescriptorsByTemplate(
: ShallowCopyDictionaryTemplate(isolate,
elements_dictionary_template);
- Handle<PropertyArray> property_array =
- isolate->factory()->empty_property_array();
- if (FLAG_track_constant_fields) {
- // If we store constants in instances, count the number of properties
- // that must be in the instance and create the property array to
- // hold the constants.
- int count = 0;
- for (int i = 0; i < nof_descriptors; i++) {
- PropertyDetails details = descriptors_template->GetDetails(i);
- if (details.location() == kDescriptor && details.kind() == kData) {
- count++;
- }
+ // Count the number of properties that must be in the instance and
+ // create the property array to hold the constants.
+ int count = 0;
+ for (int i = 0; i < nof_descriptors; i++) {
+ PropertyDetails details = descriptors_template->GetDetails(i);
+ if (details.location() == kDescriptor && details.kind() == kData) {
+ count++;
}
- property_array = isolate->factory()->NewPropertyArray(count);
}
+ Handle<PropertyArray> property_array =
+ isolate->factory()->NewPropertyArray(count);
// Read values from |descriptors_template| and store possibly post-processed
// values into "instantiated" |descriptors| array.
int field_index = 0;
for (int i = 0; i < nof_descriptors; i++) {
Object value = descriptors_template->GetStrongValue(i);
- if (value->IsAccessorPair()) {
+ if (value.IsAccessorPair()) {
Handle<AccessorPair> pair = AccessorPair::Copy(
isolate, handle(AccessorPair::cast(value), isolate));
value = *pair;
}
DisallowHeapAllocation no_gc;
Name name = descriptors_template->GetKey(i);
- DCHECK(name->IsUniqueName());
+ DCHECK(name.IsUniqueName());
PropertyDetails details = descriptors_template->GetDetails(i);
if (details.location() == kDescriptor) {
if (details.kind() == kData) {
- if (value->IsSmi()) {
+ if (value.IsSmi()) {
value = GetMethodWithSharedNameAndSetHomeObject(isolate, args, value,
*receiver);
}
- details =
- details.CopyWithRepresentation(value->OptimalRepresentation());
+ details = details.CopyWithRepresentation(value.OptimalRepresentation());
} else {
DCHECK_EQ(kAccessor, details.kind());
- if (value->IsAccessorPair()) {
+ if (value.IsAccessorPair()) {
AccessorPair pair = AccessorPair::cast(value);
- Object tmp = pair->getter();
- if (tmp->IsSmi()) {
- pair->set_getter(GetMethodWithSharedNameAndSetHomeObject(
+ Object tmp = pair.getter();
+ if (tmp.IsSmi()) {
+ pair.set_getter(GetMethodWithSharedNameAndSetHomeObject(
isolate, args, tmp, *receiver));
}
- tmp = pair->setter();
- if (tmp->IsSmi()) {
- pair->set_setter(GetMethodWithSharedNameAndSetHomeObject(
+ tmp = pair.setter();
+ if (tmp.IsSmi()) {
+ pair.set_setter(GetMethodWithSharedNameAndSetHomeObject(
isolate, args, tmp, *receiver));
}
}
@@ -354,10 +349,8 @@ bool AddDescriptorsByTemplate(
} else {
UNREACHABLE();
}
- DCHECK(value->FitsRepresentation(details.representation()));
- // With constant field tracking, we store the values in the instance.
- if (FLAG_track_constant_fields && details.location() == kDescriptor &&
- details.kind() == kData) {
+ DCHECK(value.FitsRepresentation(details.representation()));
+ if (details.location() == kDescriptor && details.kind() == kData) {
details = PropertyDetails(details.kind(), details.attributes(), kField,
PropertyConstness::kConst,
details.representation(), field_index)
@@ -407,8 +400,8 @@ bool AddDescriptorsByTemplate(
Handle<NumberDictionary> elements_dictionary =
ShallowCopyDictionaryTemplate(isolate, elements_dictionary_template);
- typedef ClassBoilerplate::ValueKind ValueKind;
- typedef ClassBoilerplate::ComputedEntryFlags ComputedEntryFlags;
+ using ValueKind = ClassBoilerplate::ValueKind;
+ using ComputedEntryFlags = ClassBoilerplate::ComputedEntryFlags;
// Merge computed properties with properties and elements dictionary
// templates.
@@ -469,26 +462,14 @@ bool AddDescriptorsByTemplate(
}
Handle<JSObject> CreateClassPrototype(Isolate* isolate) {
- Factory* factory = isolate->factory();
-
- const int kInobjectFields = 0;
-
- Handle<Map> map;
- if (FLAG_track_constant_fields) {
- // For constant tracking we want to avoid tha hassle of handling
- // in-object properties, so create a map with no in-object
- // properties.
-
- // TODO(ishell) Support caching of zero in-object properties map
- // by ObjectLiteralMapFromCache().
- map = Map::Create(isolate, 0);
- } else {
- // Just use some JSObject map of certain size.
- map = factory->ObjectLiteralMapFromCache(isolate->native_context(),
- kInobjectFields);
- }
+ // For constant tracking we want to avoid the hassle of handling
+ // in-object properties, so create a map with no in-object
+ // properties.
- return factory->NewJSObjectFromMap(map);
+ // TODO(ishell) Support caching of zero in-object properties map
+ // by ObjectLiteralMapFromCache().
+ Handle<Map> map = Map::Create(isolate, 0);
+ return isolate->factory()->NewJSObjectFromMap(map);
}
bool InitClassPrototype(Isolate* isolate,
@@ -607,7 +588,7 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
} else if (super_class->IsConstructor()) {
DCHECK(!super_class->IsJSFunction() ||
!IsResumableFunction(
- Handle<JSFunction>::cast(super_class)->shared()->kind()));
+ Handle<JSFunction>::cast(super_class)->shared().kind()));
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype_parent,
Runtime::GetObjectProperty(isolate, super_class,
@@ -647,7 +628,7 @@ MaybeHandle<Object> DefineClass(Isolate* isolate,
if (FLAG_trace_maps) {
LOG(isolate,
MapEvent("InitialMap", Map(), constructor->map(),
- "init class constructor", constructor->shared()->DebugName()));
+ "init class constructor", constructor->shared().DebugName()));
LOG(isolate, MapEvent("InitialMap", Map(), prototype->map(),
"init class prototype"));
}
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 42f6af5f4f..6e7b987458 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
-#include "src/conversions-inl.h"
-#include "src/counters.h"
+#include "src/execution/arguments-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
+#include "src/logging/counters.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/runtime/runtime-utils.h"
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index c6a7e7960c..b3b51ecc07 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
#include "src/asmjs/asm-js.h"
+#include "src/codegen/compiler.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
-#include "src/compiler.h"
-#include "src/deoptimizer.h"
-#include "src/frames-inl.h"
-#include "src/isolate-inl.h"
-#include "src/message-template.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
+#include "src/execution/v8threads.h"
+#include "src/execution/vm-state-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/runtime/runtime-utils.h"
-#include "src/v8threads.h"
-#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -25,7 +25,7 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
#ifdef DEBUG
- if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
+ if (FLAG_trace_lazy && !function->shared().is_compiled()) {
PrintF("[unoptimized: ");
function->PrintName();
PrintF("]\n");
@@ -66,14 +66,14 @@ RUNTIME_FUNCTION(Runtime_FunctionFirstExecution) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- DCHECK_EQ(function->feedback_vector()->optimization_marker(),
+ DCHECK_EQ(function->feedback_vector().optimization_marker(),
OptimizationMarker::kLogFirstExecution);
DCHECK(FLAG_log_function_events);
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
LOG(isolate, FunctionEvent(
- "first-execution", Script::cast(sfi->script())->id(), 0,
+ "first-execution", Script::cast(sfi->script()).id(), 0,
sfi->StartPosition(), sfi->EndPosition(), sfi->DebugName()));
- function->feedback_vector()->ClearOptimizationMarker();
+ function->feedback_vector().ClearOptimizationMarker();
// Return the code to continue execution, we don't care at this point whether
// this is for lazy compilation or has been eagerly complied.
return function->code();
@@ -99,9 +99,9 @@ RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- DCHECK(function->shared()->is_compiled());
+ DCHECK(function->shared().is_compiled());
- function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
+ function->feedback_vector().EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "Runtime_EvictOptimizedCodeSlot");
return function->code();
}
@@ -112,18 +112,18 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
Handle<JSReceiver> stdlib;
- if (args[1]->IsJSReceiver()) {
+ if (args[1].IsJSReceiver()) {
stdlib = args.at<JSReceiver>(1);
}
Handle<JSReceiver> foreign;
- if (args[2]->IsJSReceiver()) {
+ if (args[2].IsJSReceiver()) {
foreign = args.at<JSReceiver>(2);
}
Handle<JSArrayBuffer> memory;
- if (args[3]->IsJSArrayBuffer()) {
+ if (args[3].IsJSArrayBuffer()) {
memory = args.at<JSArrayBuffer>(3);
}
- if (function->shared()->HasAsmWasmData()) {
+ if (function->shared().HasAsmWasmData()) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
Handle<AsmWasmData> data(shared->asm_wasm_data(), isolate);
MaybeHandle<Object> result = AsmJs::InstantiateAsmWasm(
@@ -134,11 +134,11 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
}
// Remove wasm data, mark as broken for asm->wasm, replace function code with
// UncompiledData, and return a smi 0 to indicate failure.
- if (function->shared()->HasAsmWasmData()) {
+ if (function->shared().HasAsmWasmData()) {
SharedFunctionInfo::DiscardCompiled(isolate,
handle(function->shared(), isolate));
}
- function->shared()->set_is_asm_wasm_broken(true);
+ function->shared().set_is_asm_wasm_broken(true);
DCHECK(function->code() ==
isolate->builtins()->builtin(Builtins::kInstantiateAsmJs));
function->set_code(isolate->builtins()->builtin(Builtins::kCompileLazy));
@@ -184,7 +184,7 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function) {
// Keep track of whether we've succeeded in optimizing.
- if (function->shared()->optimization_disabled()) return false;
+ if (function->shared().optimization_disabled()) return false;
// If we are trying to do OSR when there are already optimized
// activations of the function, it means (a) the function is directly or
// indirectly recursive and (b) an optimized invocation has been
@@ -209,8 +209,8 @@ BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
// representing the entry point will be valid for any copy of the bytecode.
Handle<BytecodeArray> bytecode(iframe->GetBytecodeArray(), iframe->isolate());
- DCHECK(frame->LookupCode()->is_interpreter_trampoline_builtin());
- DCHECK(frame->function()->shared()->HasBytecodeArray());
+ DCHECK(frame->LookupCode().is_interpreter_trampoline_builtin());
+ DCHECK(frame->function().shared().HasBytecodeArray());
DCHECK(frame->is_interpreted());
// Reset the OSR loop nesting depth to disarm back edges.
@@ -258,11 +258,11 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
DeoptimizationData data =
DeoptimizationData::cast(result->deoptimization_data());
- if (data->OsrPcOffset()->value() >= 0) {
- DCHECK(BailoutId(data->OsrBytecodeOffset()->value()) == ast_id);
+ if (data.OsrPcOffset().value() >= 0) {
+ DCHECK(BailoutId(data.OsrBytecodeOffset().value()) == ast_id);
if (FLAG_trace_osr) {
PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n",
- ast_id.ToInt(), data->OsrPcOffset()->value());
+ ast_id.ToInt(), data.OsrPcOffset().value());
}
DCHECK(result->is_turbofanned());
@@ -289,7 +289,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
}
if (!function->IsOptimized()) {
- function->set_code(function->shared()->GetCode());
+ function->set_code(function->shared().GetCode());
}
return Object();
}
@@ -303,7 +303,7 @@ static Object CompileGlobalEval(Isolate* isolate, Handle<String> source,
// Check if native context allows code generation from
// strings. Throw an exception if it doesn't.
- if (native_context->allow_code_gen_from_strings()->IsFalse(isolate) &&
+ if (native_context->allow_code_gen_from_strings().IsFalse(isolate) &&
!Compiler::CodeGenerationFromStringsAllowed(isolate, native_context,
source)) {
Handle<Object> error_message =
@@ -340,14 +340,14 @@ RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
// execution default to an indirect call to eval, which will also return
// the first argument without doing anything).
if (*callee != isolate->native_context()->global_eval_fun() ||
- !args[1]->IsString()) {
+ !args[1].IsString()) {
return *callee;
}
- DCHECK(args[3]->IsSmi());
+ DCHECK(args[3].IsSmi());
DCHECK(is_valid_language_mode(args.smi_at(3)));
LanguageMode language_mode = static_cast<LanguageMode>(args.smi_at(3));
- DCHECK(args[4]->IsSmi());
+ DCHECK(args[4].IsSmi());
Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
isolate);
return CompileGlobalEval(isolate, args.at<String>(1), outer_info,
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 5c22d280df..bb5e4e1bcb 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -4,12 +4,12 @@
#include "src/runtime/runtime-utils.h"
-#include "src/arguments.h"
-#include "src/conversions-inl.h"
-#include "src/counters.h"
-#include "src/date.h"
+#include "src/date/date.h"
+#include "src/execution/arguments.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
-#include "src/isolate-inl.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 601d1a8da0..afe4a921e6 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -4,22 +4,22 @@
#include <vector>
-#include "src/arguments-inl.h"
-#include "src/compiler.h"
-#include "src/counters.h"
+#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
#include "src/debug/debug-coverage.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
-#include "src/frames-inl.h"
-#include "src/globals.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/interpreter/bytecode-array-accessor.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/debug-objects-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/js-collection-inl.h"
@@ -66,10 +66,10 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
DCHECK(it.frame()->is_interpreted());
InterpretedFrame* interpreted_frame =
reinterpret_cast<InterpretedFrame*>(it.frame());
- SharedFunctionInfo shared = interpreted_frame->function()->shared();
- BytecodeArray bytecode_array = shared->GetBytecodeArray();
+ SharedFunctionInfo shared = interpreted_frame->function().shared();
+ BytecodeArray bytecode_array = shared.GetBytecodeArray();
int bytecode_offset = interpreted_frame->GetBytecodeOffset();
- Bytecode bytecode = Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
+ Bytecode bytecode = Bytecodes::FromByte(bytecode_array.get(bytecode_offset));
bool side_effect_check_failed = false;
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects) {
@@ -98,7 +98,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
Smi::FromInt(static_cast<uint8_t>(bytecode)));
}
Object interrupt_object = isolate->stack_guard()->HandleInterrupts();
- if (interrupt_object->IsException(isolate)) {
+ if (interrupt_object.IsException(isolate)) {
return MakePair(interrupt_object,
Smi::FromInt(static_cast<uint8_t>(bytecode)));
}
@@ -112,8 +112,8 @@ RUNTIME_FUNCTION(Runtime_DebugBreakAtEntry) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
USE(function);
- DCHECK(function->shared()->HasDebugInfo());
- DCHECK(function->shared()->GetDebugInfo()->BreakAtEntry());
+ DCHECK(function->shared().HasDebugInfo());
+ DCHECK(function->shared().GetDebugInfo().BreakAtEntry());
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it(isolate);
@@ -147,7 +147,7 @@ static MaybeHandle<JSArray> GetIteratorInternalProperties(
Factory* factory = isolate->factory();
Handle<IteratorType> iterator = Handle<IteratorType>::cast(object);
const char* kind = nullptr;
- switch (iterator->map()->instance_type()) {
+ switch (iterator->map().instance_type()) {
case JS_MAP_KEY_ITERATOR_TYPE:
kind = "keys";
break;
@@ -300,7 +300,7 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeCount) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- if (!args[0]->IsJSGeneratorObject()) return Smi::kZero;
+ if (!args[0].IsJSGeneratorObject()) return Smi::kZero;
// Check arguments.
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
@@ -323,7 +323,7 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- if (!args[0]->IsJSGeneratorObject()) {
+ if (!args[0].IsJSGeneratorObject()) {
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -448,8 +448,8 @@ RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, f, 0);
- if (f->IsJSFunction()) {
- return JSFunction::cast(f)->shared()->inferred_name();
+ if (f.IsJSFunction()) {
+ return JSFunction::cast(f).shared().inferred_name();
}
return ReadOnlyRoots(isolate).empty_string();
}
@@ -484,19 +484,19 @@ int ScriptLinePosition(Handle<Script> script, int line) {
if (script->type() == Script::TYPE_WASM) {
return WasmModuleObject::cast(script->wasm_module_object())
- ->GetFunctionOffset(line);
+ .GetFunctionOffset(line);
}
Script::InitLineEnds(script);
FixedArray line_ends_array = FixedArray::cast(script->line_ends());
- const int line_count = line_ends_array->length();
+ const int line_count = line_ends_array.length();
DCHECK_LT(0, line_count);
if (line == 0) return 0;
// If line == line_count, we return the first position beyond the last line.
if (line > line_count) return -1;
- return Smi::ToInt(line_ends_array->get(line - 1)) + 1;
+ return Smi::ToInt(line_ends_array.get(line - 1)) + 1;
}
int ScriptLinePositionWithOffset(Handle<Script> script, int line, int offset) {
@@ -578,7 +578,7 @@ bool GetScriptById(Isolate* isolate, int needle, Handle<Script>* result) {
Script::Iterator iterator(isolate);
for (Script script = iterator.Next(); !script.is_null();
script = iterator.Next()) {
- if (script->id() == needle) {
+ if (script.id() == needle) {
*result = handle(script, isolate);
return true;
}
@@ -737,23 +737,7 @@ RUNTIME_FUNCTION(Runtime_DebugToggleBlockCoverage) {
}
RUNTIME_FUNCTION(Runtime_IncBlockCounter) {
- SealHandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(JSFunction, function, 0);
- CONVERT_SMI_ARG_CHECKED(coverage_array_slot_index, 1);
-
- // It's quite possible that a function contains IncBlockCounter bytecodes, but
- // no coverage info exists. This happens e.g. by selecting the best-effort
- // coverage collection mode, which triggers deletion of all coverage infos in
- // order to avoid memory leaks.
-
- SharedFunctionInfo shared = function->shared();
- if (shared->HasCoverageInfo()) {
- CoverageInfo coverage_info = shared->GetCoverageInfo();
- coverage_info->IncrementBlockCount(coverage_array_slot_index);
- }
-
- return ReadOnlyRoots(isolate).undefined_value();
+ UNREACHABLE(); // Never called. See the IncBlockCounter builtin instead.
}
RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionEntered) {
@@ -793,7 +777,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, script_function, 0);
CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
- Handle<Script> script(Script::cast(script_function->shared()->script()),
+ Handle<Script> script(Script::cast(script_function->shared().script()),
isolate);
v8::debug::LiveEditResult result;
LiveEdit::PatchScript(isolate, script, new_source, false, &result);
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index 56580e91da..6042a867c9 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -4,15 +4,15 @@
#include "src/runtime/runtime-utils.h"
-#include "src/arguments-inl.h"
-#include "src/counters.h"
-#include "src/elements.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
-#include "src/keys.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/elements.h"
+#include "src/objects/keys.h"
#include "src/objects/module.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 3d69845668..0d1879c16a 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/accessors.h"
-#include "src/arguments-inl.h"
-#include "src/compiler.h"
-#include "src/counters.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/compiler.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
+#include "src/logging/counters.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -20,8 +20,8 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptSource) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
if (function->IsJSFunction()) {
- Handle<Object> script(
- Handle<JSFunction>::cast(function)->shared()->script(), isolate);
+ Handle<Object> script(Handle<JSFunction>::cast(function)->shared().script(),
+ isolate);
if (script->IsScript()) return Handle<Script>::cast(script)->source();
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -33,8 +33,8 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptId) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
if (function->IsJSFunction()) {
- Handle<Object> script(
- Handle<JSFunction>::cast(function)->shared()->script(), isolate);
+ Handle<Object> script(Handle<JSFunction>::cast(function)->shared().script(),
+ isolate);
if (script->IsScript()) {
return Smi::FromInt(Handle<Script>::cast(script)->id());
}
@@ -60,7 +60,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- int pos = fun->shared()->StartPosition();
+ int pos = fun.shared().StartPosition();
return Smi::FromInt(pos);
}
@@ -70,7 +70,7 @@ RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->shared()->IsApiFunction());
+ return isolate->heap()->ToBoolean(f.shared().IsApiFunction());
}
@@ -85,7 +85,7 @@ RUNTIME_FUNCTION(Runtime_Call) {
argv[i] = args.at(2 + i);
}
RETURN_RESULT_OR_FAILURE(
- isolate, Execution::Call(isolate, target, receiver, argc, argv.start()));
+ isolate, Execution::Call(isolate, target, receiver, argc, argv.begin()));
}
@@ -93,7 +93,7 @@ RUNTIME_FUNCTION(Runtime_IsFunction) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, object, 0);
- return isolate->heap()->ToBoolean(object->IsFunction());
+ return isolate->heap()->ToBoolean(object.IsFunction());
}
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index e8b4025981..c251653838 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -4,12 +4,12 @@
#include "src/runtime/runtime-utils.h"
-#include "src/arguments-inl.h"
#include "src/base/platform/time.h"
-#include "src/conversions-inl.h"
-#include "src/counters.h"
-#include "src/futex-emulation.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/futex-emulation.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/js-array-buffer-inl.h"
@@ -27,7 +27,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
CONVERT_SIZE_ARG_CHECKED(index, 1);
CHECK(!sta->WasDetached());
CHECK(sta->GetBuffer()->is_shared());
- CHECK_LT(index, NumberToSize(sta->length()));
+ CHECK_LT(index, sta->length());
CHECK_EQ(sta->type(), kExternalInt32Array);
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index f8873ff938..069ea88e12 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
-#include "src/counters.h"
+#include "src/execution/arguments-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/js-generator-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -48,14 +48,14 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
- CHECK_IMPLIES(IsAsyncFunction(function->shared()->kind()),
- IsAsyncGeneratorFunction(function->shared()->kind()));
- CHECK(IsResumableFunction(function->shared()->kind()));
+ CHECK_IMPLIES(IsAsyncFunction(function->shared().kind()),
+ IsAsyncGeneratorFunction(function->shared().kind()));
+ CHECK(IsResumableFunction(function->shared().kind()));
// Underlying function needs to have bytecode available.
- DCHECK(function->shared()->HasBytecodeArray());
- int size = function->shared()->internal_formal_parameter_count() +
- function->shared()->GetBytecodeArray()->register_count();
+ DCHECK(function->shared().HasBytecodeArray());
+ int size = function->shared().internal_formal_parameter_count() +
+ function->shared().GetBytecodeArray().register_count();
Handle<FixedArray> parameters_and_registers =
isolate->factory()->NewFixedArray(size);
@@ -129,7 +129,7 @@ RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSAsyncGeneratorObject, generator, 0);
- int state = generator->continuation();
+ int state = generator.continuation();
DCHECK_NE(state, JSAsyncGeneratorObject::kGeneratorExecuting);
// If state is 0 ("suspendedStart"), there is guaranteed to be no catch
@@ -137,11 +137,11 @@ RUNTIME_FUNCTION(Runtime_AsyncGeneratorHasCatchHandlerForPC) {
// not reach a catch handler.
if (state < 1) return ReadOnlyRoots(isolate).false_value();
- SharedFunctionInfo shared = generator->function()->shared();
- DCHECK(shared->HasBytecodeArray());
- HandlerTable handler_table(shared->GetBytecodeArray());
+ SharedFunctionInfo shared = generator.function().shared();
+ DCHECK(shared.HasBytecodeArray());
+ HandlerTable handler_table(shared.GetBytecodeArray());
- int pc = Smi::cast(generator->input_or_debug_pos())->value();
+ int pc = Smi::cast(generator.input_or_debug_pos()).value();
HandlerTable::CatchPrediction catch_prediction = HandlerTable::ASYNC_AWAIT;
handler_table.LookupRange(pc, nullptr, &catch_prediction);
return isolate->heap()->ToBoolean(catch_prediction == HandlerTable::CAUGHT);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index b884f3b83d..21b1b1ef7c 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -4,27 +4,27 @@
#include <memory>
-#include "src/api.h"
-#include "src/arguments-inl.h"
+#include "src/api/api.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/prettyprinter.h"
-#include "src/bootstrapper.h"
#include "src/builtins/builtins.h"
-#include "src/conversions.h"
-#include "src/counters.h"
#include "src/debug/debug.h"
-#include "src/feedback-vector-inl.h"
-#include "src/frames-inl.h"
-#include "src/isolate-inl.h"
-#include "src/message-template.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/feedback-vector-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/template-objects-inl.h"
-#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
-#include "src/string-builder-inl.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -40,13 +40,6 @@ RUNTIME_FUNCTION(Runtime_AccessCheck) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- CHECK(isolate->bootstrapper()->IsActive());
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_FatalProcessOutOfMemoryInAllocateRaw) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -276,13 +269,13 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- function->raw_feedback_cell()->set_interrupt_budget(FLAG_interrupt_budget);
+ function->raw_feedback_cell().set_interrupt_budget(FLAG_interrupt_budget);
if (!function->has_feedback_vector()) {
JSFunction::EnsureFeedbackVector(function);
// Also initialize the invocation count here. This is only really needed for
// OSR. When we OSR functions with lazy feedback allocation we want to have
// a non zero invocation count so we can inline functions.
- function->feedback_vector()->set_invocation_count(1);
+ function->feedback_vector().set_invocation_count(1);
return ReadOnlyRoots(isolate).undefined_value();
}
// Handle interrupts.
@@ -292,13 +285,6 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
}
}
-RUNTIME_FUNCTION(Runtime_Interrupt) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- TRACE_EVENT0("v8.execute", "V8.Interrupt");
- return isolate->stack_guard()->HandleInterrupts();
-}
-
RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -323,6 +309,14 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
AllocationType::kOld);
}
+RUNTIME_FUNCTION(Runtime_AllocateByteArray) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(length, 0);
+ DCHECK_LT(0, length);
+ return *isolate->factory()->NewByteArray(length);
+}
+
RUNTIME_FUNCTION(Runtime_AllocateSeqOneByteString) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -361,7 +355,7 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate, shared);
int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
if (script->IsScript() &&
- !(Handle<Script>::cast(script)->source()->IsUndefined(isolate))) {
+ !(Handle<Script>::cast(script)->source().IsUndefined(isolate))) {
Handle<Script> casted_script = Handle<Script>::cast(script);
*target = MessageLocation(casted_script, pos, pos + 1, shared);
return true;
@@ -615,7 +609,7 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
} else {
DCHECK_LE(args.length(), 2);
std::FILE* f;
- if (args[0]->IsString()) {
+ if (args[0].IsString()) {
// With a string argument, the results are appended to that file.
CONVERT_ARG_HANDLE_CHECKED(String, arg0, 0);
DisallowHeapAllocation no_gc;
@@ -640,7 +634,7 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
OFStream stats_stream(f);
isolate->counters()->runtime_call_stats()->Print(stats_stream);
isolate->counters()->runtime_call_stats()->Reset();
- if (args[0]->IsString())
+ if (args[0].IsString())
std::fclose(f);
else
std::fflush(f);
@@ -701,7 +695,7 @@ RUNTIME_FUNCTION(Runtime_GetTemplateObject) {
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared_info, 1);
CONVERT_SMI_ARG_CHECKED(slot_id, 2);
- Handle<Context> native_context(isolate->context()->native_context(), isolate);
+ Handle<Context> native_context(isolate->context().native_context(), isolate);
return *TemplateObjectDescription::GetTemplateObject(
isolate, native_context, description, shared_info, slot_id);
}
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index ad84317415..48b4d2b6e7 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -4,19 +4,19 @@
#include <iomanip>
-#include "src/arguments-inl.h"
-#include "src/counters.h"
-#include "src/frames-inl.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate-inl.h"
-#include "src/ostreams.h"
+#include "src/logging/counters.h"
#include "src/runtime/runtime-utils.h"
#include "src/snapshot/snapshot.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -84,7 +84,7 @@ void PrintRegisters(Isolate* isolate, std::ostream& os, bool is_input,
<< interpreter::Register(reg_index).ToString(
bytecode_iterator.bytecode_array()->parameter_count())
<< kArrowDirection;
- reg_object->ShortPrint(os);
+ reg_object.ShortPrint(os);
os << " ]" << std::endl;
}
}
@@ -173,13 +173,13 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceUpdateFeedback) {
CONVERT_SMI_ARG_CHECKED(slot, 1);
CONVERT_ARG_CHECKED(String, reason, 2);
- int slot_count = function->feedback_vector()->metadata()->slot_count();
+ int slot_count = function->feedback_vector().metadata().slot_count();
StdoutStream os;
os << "[Feedback slot " << slot << "/" << slot_count << " in ";
- function->shared()->ShortPrint(os);
+ function->shared().ShortPrint(os);
os << " updated to ";
- function->feedback_vector()->FeedbackSlotPrint(os, FeedbackSlot(slot));
+ function->feedback_vector().FeedbackSlotPrint(os, FeedbackSlot(slot));
os << " - ";
StringCharacterStream stream(reason);
diff --git a/deps/v8/src/runtime/runtime-intl.cc b/deps/v8/src/runtime/runtime-intl.cc
index 37cd2a45d7..de27dca8a3 100644
--- a/deps/v8/src/runtime/runtime-intl.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -9,14 +9,14 @@
#include <cmath>
#include <memory>
-#include "src/api-inl.h"
-#include "src/api-natives.h"
-#include "src/arguments-inl.h"
-#include "src/counters.h"
-#include "src/date.h"
-#include "src/global-handles.h"
+#include "src/api/api-inl.h"
+#include "src/api/api-natives.h"
+#include "src/date/date.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
-#include "src/isolate-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collator-inl.h"
@@ -27,7 +27,7 @@
#include "src/objects/js-plural-rules-inl.h"
#include "src/objects/managed.h"
#include "src/runtime/runtime-utils.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 0947c02a19..67aa097484 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/allocation-site-scopes-inl.h"
-#include "src/arguments-inl.h"
#include "src/ast/ast.h"
-#include "src/counters.h"
-#include "src/isolate-inl.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/allocation-site-scopes-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/heap-object-inl.h"
@@ -86,7 +86,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
}
- if (object->map()->is_deprecated()) {
+ if (object->map().is_deprecated()) {
JSObject::MigrateInstance(object);
}
@@ -113,23 +113,23 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
// Deep copy own properties. Arrays only have 1 property "length".
if (!copy->IsJSArray()) {
if (copy->HasFastProperties()) {
- Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors(),
+ Handle<DescriptorArray> descriptors(copy->map().instance_descriptors(),
isolate);
- int limit = copy->map()->NumberOfOwnDescriptors();
+ int limit = copy->map().NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
DCHECK_EQ(kField, descriptors->GetDetails(i).location());
DCHECK_EQ(kData, descriptors->GetDetails(i).kind());
FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
if (copy->IsUnboxedDoubleField(index)) continue;
Object raw = copy->RawFastPropertyAt(index);
- if (raw->IsJSObject()) {
+ if (raw.IsJSObject()) {
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
if (copying) copy->FastPropertyAtPut(index, *value);
- } else if (copying && raw->IsMutableHeapNumber()) {
+ } else if (copying && raw.IsMutableHeapNumber()) {
DCHECK(descriptors->GetDetails(i).representation().IsDouble());
- uint64_t double_value = MutableHeapNumber::cast(raw)->value_as_bits();
+ uint64_t double_value = MutableHeapNumber::cast(raw).value_as_bits();
auto value =
isolate->factory()->NewMutableHeapNumberFromBits(double_value);
copy->FastPropertyAtPut(index, *value);
@@ -139,8 +139,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
Handle<NameDictionary> dict(copy->property_dictionary(), isolate);
for (int i = 0; i < dict->Capacity(); i++) {
Object raw = dict->ValueAt(i);
- if (!raw->IsJSObject()) continue;
- DCHECK(dict->KeyAt(i)->IsName());
+ if (!raw.IsJSObject()) continue;
+ DCHECK(dict->KeyAt(i).IsName());
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
@@ -149,7 +149,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
// Assume non-arrays don't end up having elements.
- if (copy->elements()->length() == 0) return copy;
+ if (copy->elements().length() == 0) return copy;
}
// Deep copy own elements.
@@ -157,18 +157,20 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
case PACKED_ELEMENTS:
case PACKED_FROZEN_ELEMENTS:
case PACKED_SEALED_ELEMENTS:
+ case HOLEY_FROZEN_ELEMENTS:
+ case HOLEY_SEALED_ELEMENTS:
case HOLEY_ELEMENTS: {
Handle<FixedArray> elements(FixedArray::cast(copy->elements()), isolate);
if (elements->map() == ReadOnlyRoots(isolate).fixed_cow_array_map()) {
#ifdef DEBUG
for (int i = 0; i < elements->length(); i++) {
- DCHECK(!elements->get(i)->IsJSObject());
+ DCHECK(!elements->get(i).IsJSObject());
}
#endif
} else {
for (int i = 0; i < elements->length(); i++) {
Object raw = elements->get(i);
- if (!raw->IsJSObject()) continue;
+ if (!raw.IsJSObject()) continue;
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
@@ -183,7 +185,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
int capacity = element_dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object raw = element_dictionary->ValueAt(i);
- if (!raw->IsJSObject()) continue;
+ if (!raw.IsJSObject()) continue;
Handle<JSObject> value(JSObject::cast(raw), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value, VisitElementOrProperty(copy, value), JSObject);
@@ -198,7 +200,6 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
UNREACHABLE();
- break;
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
@@ -206,7 +207,6 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
#undef TYPED_ARRAY_CASE
// Typed elements cannot be created using an object literal.
UNREACHABLE();
- break;
case PACKED_SMI_ELEMENTS:
case HOLEY_SMI_ELEMENTS:
@@ -392,7 +392,7 @@ struct ObjectLiteralHelper {
// TODO(cbruni): avoid making the boilerplate fast again, the clone stub
// supports dict-mode objects directly.
JSObject::MigrateSlowToFast(boilerplate,
- boilerplate->map()->UnusedPropertyFields(),
+ boilerplate->map().UnusedPropertyFields(),
"FastLiteral");
}
return boilerplate;
@@ -427,7 +427,7 @@ struct ArrayLiteralHelper {
Handle<FixedArray> fixed_array_values =
Handle<FixedArray>::cast(copied_elements_values);
for (int i = 0; i < fixed_array_values->length(); i++) {
- DCHECK(!fixed_array_values->get(i)->IsFixedArray());
+ DCHECK(!fixed_array_values->get(i).IsFixedArray());
}
#endif
} else {
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 91dac4fa1c..41f21865a6 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/execution/arguments-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/js-promise.h"
#include "src/objects/module.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -18,11 +18,10 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, specifier, 1);
- Handle<Script> script(Script::cast(function->shared()->script()), isolate);
+ Handle<Script> script(Script::cast(function->shared().script()), isolate);
while (script->has_eval_from_shared()) {
- script =
- handle(Script::cast(script->eval_from_shared()->script()), isolate);
+ script = handle(Script::cast(script->eval_from_shared().script()), isolate);
}
RETURN_RESULT_OR_FAILURE(
@@ -34,14 +33,14 @@ RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(module_request, 0);
- Handle<Module> module(isolate->context()->module(), isolate);
+ Handle<Module> module(isolate->context().module(), isolate);
return *Module::GetModuleNamespace(isolate, module, module_request);
}
RUNTIME_FUNCTION(Runtime_GetImportMetaObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- Handle<Module> module(isolate->context()->module(), isolate);
+ Handle<Module> module(isolate->context().module(), isolate);
return *isolate->RunHostInitializeImportMetaObjectCallback(module);
}
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index edceef20a5..e496880b71 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
#include "src/base/bits.h"
-#include "src/bootstrapper.h"
-#include "src/counters.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -78,21 +78,6 @@ RUNTIME_FUNCTION(Runtime_NumberToString) {
return *isolate->factory()->NumberToString(number);
}
-// Compare two Smis x, y as if they were converted to strings and then
-// compared lexicographically. Returns:
-// -1 if x < y
-// 0 if x == y
-// 1 if x > y
-// TODO(szuend): Remove once the call-site in src/js/array.js is gone.
-RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Smi, x_value, 0);
- CONVERT_ARG_CHECKED(Smi, y_value, 1);
-
- return Object(Smi::LexicographicCompare(isolate, x_value, y_value));
-}
-
RUNTIME_FUNCTION(Runtime_MaxSmi) {
SealHandleScope shs(isolate);
DCHECK_EQ(0, args.length());
@@ -104,7 +89,7 @@ RUNTIME_FUNCTION(Runtime_IsSmi) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsSmi());
+ return isolate->heap()->ToBoolean(obj.IsSmi());
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index e38bed3620..8b94d83f31 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
-#include "src/bootstrapper.h"
-#include "src/counters.h"
#include "src/debug/debug.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
-#include "src/message-template.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/property-descriptor-object.h"
-#include "src/property-descriptor.h"
+#include "src/objects/property-descriptor.h"
#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
@@ -42,8 +42,8 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
if (is_found_out) *is_found_out = it.IsFound();
if (!it.IsFound() && key->IsSymbol() &&
- Symbol::cast(*key)->is_private_name()) {
- Handle<Object> name_string(Symbol::cast(*key)->name(), isolate);
+ Symbol::cast(*key).is_private_name()) {
+ Handle<Object> name_string(Symbol::cast(*key).name(), isolate);
DCHECK(name_string->IsString());
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kInvalidPrivateFieldRead,
@@ -81,75 +81,91 @@ namespace {
bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<Object> raw_key) {
- DisallowHeapAllocation no_allocation;
// This implements a special case for fast property deletion: when the
// last property in an object is deleted, then instead of normalizing
// the properties, we can undo the last map transition, with a few
// prerequisites:
// (1) The receiver must be a regular object and the key a unique name.
- Map map = receiver->map();
- if (map->IsSpecialReceiverMap()) return false;
+ Handle<Map> receiver_map(receiver->map(), isolate);
+ if (receiver_map->IsSpecialReceiverMap()) return false;
if (!raw_key->IsUniqueName()) return false;
Handle<Name> key = Handle<Name>::cast(raw_key);
// (2) The property to be deleted must be the last property.
- int nof = map->NumberOfOwnDescriptors();
+ int nof = receiver_map->NumberOfOwnDescriptors();
if (nof == 0) return false;
int descriptor = nof - 1;
- DescriptorArray descriptors = map->instance_descriptors();
+ Handle<DescriptorArray> descriptors(receiver_map->instance_descriptors(),
+ isolate);
if (descriptors->GetKey(descriptor) != *key) return false;
// (3) The property to be deleted must be deletable.
PropertyDetails details = descriptors->GetDetails(descriptor);
if (!details.IsConfigurable()) return false;
- // TODO(bmeurer): This optimization is unsound if the property is currently
- // marked as constant, as there's no way that we can learn that it is not
- // constant when we later follow the same transition again with a different
- // value on the same object. As a quick-fix we just disable the optimization
- // in case of constant fields. We might want to restructure the code here to
- // update the {map} instead and deoptimize all code that depends on it.
- if (details.constness() == PropertyConstness::kConst) return false;
// (4) The map must have a back pointer.
- Object backpointer = map->GetBackPointer();
+ Handle<Object> backpointer(receiver_map->GetBackPointer(), isolate);
if (!backpointer->IsMap()) return false;
+ Handle<Map> parent_map = Handle<Map>::cast(backpointer);
// (5) The last transition must have been caused by adding a property
// (and not any kind of special transition).
- if (Map::cast(backpointer)->NumberOfOwnDescriptors() != nof - 1) return false;
+ if (parent_map->NumberOfOwnDescriptors() != nof - 1) return false;
// Preconditions successful. No more bailouts after this point.
+ // If the {descriptor} was "const" so far, we need to update the
+ // {receiver_map} here, otherwise we could get the constants wrong, i.e.
+ //
+ // o.x = 1;
+ // delete o.x;
+ // o.x = 2;
+ //
+ // could trick V8 into thinking that `o.x` is still 1 even after the second
+ // assignment.
+ if (details.constness() == PropertyConstness::kConst &&
+ details.location() == kField) {
+ Handle<FieldType> field_type(descriptors->GetFieldType(descriptor),
+ isolate);
+ Map::GeneralizeField(isolate, receiver_map, descriptor,
+ PropertyConstness::kMutable, details.representation(),
+ field_type);
+ DCHECK_EQ(PropertyConstness::kMutable,
+ descriptors->GetDetails(descriptor).constness());
+ }
+
// Zap the property to avoid keeping objects alive. Zapping is not necessary
// for properties stored in the descriptor array.
if (details.location() == kField) {
- isolate->heap()->NotifyObjectLayoutChange(*receiver, map->instance_size(),
- no_allocation);
- FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index());
+ DisallowHeapAllocation no_allocation;
+ isolate->heap()->NotifyObjectLayoutChange(
+ *receiver, receiver_map->instance_size(), no_allocation);
+ FieldIndex index =
+ FieldIndex::ForPropertyIndex(*receiver_map, details.field_index());
// Special case deleting the last out-of object property.
if (!index.is_inobject() && index.outobject_array_index() == 0) {
- DCHECK(!Map::cast(backpointer)->HasOutOfObjectProperties());
+ DCHECK(!parent_map->HasOutOfObjectProperties());
// Clear out the properties backing store.
receiver->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
} else {
Object filler = ReadOnlyRoots(isolate).one_pointer_filler_map();
- JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);
+ JSObject::cast(*receiver).RawFastPropertyAtPut(index, filler);
// We must clear any recorded slot for the deleted property, because
// subsequent object modifications might put a raw double there.
// Slot clearing is the reason why this entire function cannot currently
// be implemented in the DeleteProperty stub.
- if (index.is_inobject() && !map->IsUnboxedDoubleField(index)) {
+ if (index.is_inobject() && !receiver_map->IsUnboxedDoubleField(index)) {
isolate->heap()->ClearRecordedSlot(*receiver,
receiver->RawField(index.offset()));
}
}
}
- // If the map was marked stable before, then there could be optimized code
- // that depends on the assumption that no object that reached this map
- // transitions away from it without triggering the "deoptimize dependent
- // code" mechanism.
- map->NotifyLeafMapLayoutChange(isolate);
+ // If the {receiver_map} was marked stable before, then there could be
+ // optimized code that depends on the assumption that no object that
+ // reached this {receiver_map} transitions away from it without triggering
+ // the "deoptimize dependent code" mechanism.
+ receiver_map->NotifyLeafMapLayoutChange(isolate);
// Finally, perform the map rollback.
- receiver->synchronized_set_map(Map::cast(backpointer));
+ receiver->synchronized_set_map(*parent_map);
#if VERIFY_HEAP
receiver->HeapObjectVerify(isolate);
- receiver->property_array()->PropertyArrayVerify(isolate);
+ receiver->property_array().PropertyArrayVerify(isolate);
#endif
return true;
}
@@ -288,9 +304,9 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
}
Map map = js_obj->map();
- if (!map->has_hidden_prototype() &&
- (key_is_array_index ? !map->has_indexed_interceptor()
- : !map->has_named_interceptor())) {
+ if (!map.has_hidden_prototype() &&
+ (key_is_array_index ? !map.has_indexed_interceptor()
+ : !map.has_named_interceptor())) {
return ReadOnlyRoots(isolate).false_value();
}
@@ -319,7 +335,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
} else if (object->IsString()) {
return isolate->heap()->ToBoolean(
key_is_array_index
- ? index < static_cast<uint32_t>(String::cast(*object)->length())
+ ? index < static_cast<uint32_t>(String::cast(*object).length())
: key->Equals(ReadOnlyRoots(isolate).length_string()));
} else if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -391,8 +407,8 @@ MaybeHandle<Object> Runtime::SetObjectProperty(
if (!success) return MaybeHandle<Object>();
if (!it.IsFound() && key->IsSymbol() &&
- Symbol::cast(*key)->is_private_name()) {
- Handle<Object> name_string(Symbol::cast(*key)->name(), isolate);
+ Symbol::cast(*key).is_private_name()) {
+ Handle<Object> name_string(Symbol::cast(*key).name(), isolate);
DCHECK(name_string->IsString());
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kInvalidPrivateFieldWrite,
@@ -507,7 +523,7 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
// Convert string-index keys to their number variant to avoid internalization
// below; and speed up subsequent conversion to index.
uint32_t index;
- if (key_obj->IsString() && String::cast(*key_obj)->AsArrayIndex(&index)) {
+ if (key_obj->IsString() && String::cast(*key_obj).AsArrayIndex(&index)) {
key_obj = isolate->factory()->NewNumberFromUint(index);
}
if (receiver_obj->IsJSObject()) {
@@ -521,23 +537,23 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
if (receiver->IsJSGlobalObject()) {
// Attempt dictionary lookup.
GlobalDictionary dictionary =
- JSGlobalObject::cast(*receiver)->global_dictionary();
- int entry = dictionary->FindEntry(isolate, key);
+ JSGlobalObject::cast(*receiver).global_dictionary();
+ int entry = dictionary.FindEntry(isolate, key);
if (entry != GlobalDictionary::kNotFound) {
- PropertyCell cell = dictionary->CellAt(entry);
- if (cell->property_details().kind() == kData) {
- Object value = cell->value();
- if (!value->IsTheHole(isolate)) return value;
+ PropertyCell cell = dictionary.CellAt(entry);
+ if (cell.property_details().kind() == kData) {
+ Object value = cell.value();
+ if (!value.IsTheHole(isolate)) return value;
// If value is the hole (meaning, absent) do the general lookup.
}
}
} else if (!receiver->HasFastProperties()) {
// Attempt dictionary lookup.
NameDictionary dictionary = receiver->property_dictionary();
- int entry = dictionary->FindEntry(isolate, key);
+ int entry = dictionary.FindEntry(isolate, key);
if ((entry != NameDictionary::kNotFound) &&
- (dictionary->DetailsAt(entry).kind() == kData)) {
- return dictionary->ValueAt(entry);
+ (dictionary.DetailsAt(entry).kind() == kData)) {
+ return dictionary.ValueAt(entry);
}
}
} else if (key_obj->IsSmi()) {
@@ -550,7 +566,7 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
Handle<JSObject> js_object = Handle<JSObject>::cast(receiver_obj);
ElementsKind elements_kind = js_object->GetElementsKind();
if (IsDoubleElementsKind(elements_kind)) {
- if (Smi::ToInt(*key_obj) >= js_object->elements()->length()) {
+ if (Smi::ToInt(*key_obj) >= js_object->elements().length()) {
elements_kind = IsHoleyElementsKind(elements_kind) ? HOLEY_ELEMENTS
: PACKED_ELEMENTS;
JSObject::TransitionElementsKind(js_object, elements_kind);
@@ -737,6 +753,15 @@ RUNTIME_FUNCTION(Runtime_NewObject) {
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
}
+RUNTIME_FUNCTION(Runtime_GetDerivedMap) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, 1);
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSFunction::GetDerivedMap(isolate, target, new_target));
+}
+
RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
@@ -756,7 +781,7 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
if (!object->IsJSObject()) return Smi::kZero;
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
// It could have been a DCHECK but we call this function directly from tests.
- if (!js_object->map()->is_deprecated()) return Smi::kZero;
+ if (!js_object->map().is_deprecated()) return Smi::kZero;
// This call must not cause lazy deopts, because it's called from deferred
// code where we can't handle lazy deopts for lack of a suitable bailout
// ID. So we just try migration and signal failure if necessary,
@@ -834,14 +859,14 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
if (flags & DataPropertyInLiteralFlag::kSetFunctionName) {
DCHECK(value->IsJSFunction());
Handle<JSFunction> function = Handle<JSFunction>::cast(value);
- DCHECK(!function->shared()->HasSharedName());
+ DCHECK(!function->shared().HasSharedName());
Handle<Map> function_map(function->map(), isolate);
if (!JSFunction::SetName(function, name,
isolate->factory()->empty_string())) {
return ReadOnlyRoots(isolate).exception();
}
// Class constructors do not reserve in-object space for name field.
- CHECK_IMPLIES(!IsClassConstructor(function->shared()->kind()),
+ CHECK_IMPLIES(!IsClassConstructor(function->shared().kind()),
*function_map == function->map());
}
@@ -872,7 +897,7 @@ RUNTIME_FUNCTION(Runtime_CollectTypeProfile) {
type = Handle<String>(ReadOnlyRoots(isolate).null_string(), isolate);
}
- DCHECK(vector->metadata()->HasTypeProfileSlot());
+ DCHECK(vector->metadata().HasTypeProfileSlot());
FeedbackNexus nexus(vector, vector->GetTypeProfileSlot());
nexus.Collect(type, position->value());
@@ -884,7 +909,7 @@ RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(HeapObject, obj, 0);
return isolate->heap()->ToBoolean(
- IsFastPackedElementsKind(obj->map()->elements_kind()));
+ IsFastPackedElementsKind(obj.map().elements_kind()));
}
@@ -892,7 +917,7 @@ RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSReceiver());
+ return isolate->heap()->ToBoolean(obj.IsJSReceiver());
}
@@ -900,8 +925,8 @@ RUNTIME_FUNCTION(Runtime_ClassOf) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- if (!obj->IsJSReceiver()) return ReadOnlyRoots(isolate).null_value();
- return JSReceiver::cast(obj)->class_name();
+ if (!obj.IsJSReceiver()) return ReadOnlyRoots(isolate).null_value();
+ return JSReceiver::cast(obj).class_name();
}
RUNTIME_FUNCTION(Runtime_GetFunctionName) {
@@ -919,7 +944,7 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- if (String::cast(getter->shared()->Name())->length() == 0) {
+ if (String::cast(getter->shared().Name()).length() == 0) {
Handle<Map> getter_map(getter->map(), isolate);
if (!JSFunction::SetName(getter, name, isolate->factory()->get_string())) {
return ReadOnlyRoots(isolate).exception();
@@ -986,7 +1011,7 @@ RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
// instead because of our call to %ToName() in the desugaring for
// computed properties.
if (property->IsString() &&
- String::cast(*property)->AsArrayIndex(&property_num)) {
+ String::cast(*property).AsArrayIndex(&property_num)) {
property = isolate->factory()->NewNumberFromUint(property_num);
}
@@ -1009,7 +1034,7 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- if (String::cast(setter->shared()->Name())->length() == 0) {
+ if (String::cast(setter->shared().Name()).length() == 0) {
Handle<Map> setter_map(setter->map(), isolate);
if (!JSFunction::SetName(setter, name, isolate->factory()->set_string())) {
return ReadOnlyRoots(isolate).exception();
@@ -1052,15 +1077,13 @@ RUNTIME_FUNCTION(Runtime_ToLength) {
RETURN_RESULT_OR_FAILURE(isolate, Object::ToLength(isolate, input));
}
-
-RUNTIME_FUNCTION(Runtime_ToString) {
+RUNTIME_FUNCTION(Runtime_ToStringRT) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
RETURN_RESULT_OR_FAILURE(isolate, Object::ToString(isolate, input));
}
-
RUNTIME_FUNCTION(Runtime_ToName) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -1122,6 +1145,31 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyDescriptor) {
return *desc.ToPropertyDescriptorObject(isolate);
}
+RUNTIME_FUNCTION(Runtime_AddPrivateBrand) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Symbol, brand, 1);
+ DCHECK(brand->is_private_name());
+
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, receiver, brand, LookupIterator::OWN);
+
+ if (it.IsFound()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kVarRedeclaration, brand));
+ }
+
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ // TODO(joyee): we could use this slot to store something useful. For now,
+ // store the brand itself.
+ CHECK(Object::AddDataProperty(&it, brand, attributes, Just(kDontThrow),
+ StoreOrigin::kMaybeKeyed)
+ .FromJust());
+ return *receiver;
+}
+
RUNTIME_FUNCTION(Runtime_AddPrivateField) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index cc932f2b41..272502b69f 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments.h"
-#include "src/counters.h"
+#include "src/execution/arguments.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
+#include "src/logging/counters.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -45,7 +45,7 @@ RUNTIME_FUNCTION(Runtime_StrictEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(Object, x, 0);
CONVERT_ARG_CHECKED(Object, y, 1);
- return isolate->heap()->ToBoolean(x->StrictEquals(y));
+ return isolate->heap()->ToBoolean(x.StrictEquals(y));
}
RUNTIME_FUNCTION(Runtime_StrictNotEqual) {
@@ -53,7 +53,7 @@ RUNTIME_FUNCTION(Runtime_StrictNotEqual) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(Object, x, 0);
CONVERT_ARG_CHECKED(Object, y, 1);
- return isolate->heap()->ToBoolean(!x->StrictEquals(y));
+ return isolate->heap()->ToBoolean(!x.StrictEquals(y));
}
RUNTIME_FUNCTION(Runtime_LessThan) {
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index d39c7190a3..d1b63a2fc8 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/arguments-inl.h"
-#include "src/counters.h"
+#include "src/api/api-inl.h"
#include "src/debug/debug.h"
-#include "src/elements.h"
-#include "src/microtask-queue.h"
-#include "src/objects-inl.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/microtask-queue.h"
+#include "src/logging/counters.h"
+#include "src/objects/elements.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/js-promise-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/oddball-inl.h"
#include "src/runtime/runtime-utils.h"
@@ -80,7 +80,7 @@ RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
Handle<CallableTask> microtask = isolate->factory()->NewCallableTask(
function, handle(function->native_context(), isolate));
MicrotaskQueue* microtask_queue =
- function->native_context()->microtask_queue();
+ function->native_context().microtask_queue();
if (microtask_queue) microtask_queue->EnqueueMicrotask(*microtask);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -117,7 +117,7 @@ RUNTIME_FUNCTION(Runtime_PromiseMarkAsHandled) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSPromise, promise, 0);
- promise->set_has_handler(true);
+ promise.set_has_handler(true);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 11544cd34b..dd07234a4a 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -4,42 +4,18 @@
#include "src/runtime/runtime-utils.h"
-#include "src/arguments-inl.h"
-#include "src/counters.h"
-#include "src/elements.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/elements.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_IsJSProxy) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSProxy());
-}
-
-
-RUNTIME_FUNCTION(Runtime_JSProxyGetHandler) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
- return proxy->handler();
-}
-
-
-RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
- return proxy->target();
-}
-
-
RUNTIME_FUNCTION(Runtime_GetPropertyWithReceiver) {
HandleScope scope(isolate);
@@ -98,7 +74,7 @@ RUNTIME_FUNCTION(Runtime_CheckProxyGetSetTrapResult) {
JSProxy::AccessKind(access_kind)));
}
-RUNTIME_FUNCTION(Runtime_CheckProxyHasTrap) {
+RUNTIME_FUNCTION(Runtime_CheckProxyHasTrapResult) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 7b5cd91699..85c9ebcb1b 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -4,18 +4,18 @@
#include <functional>
-#include "src/arguments-inl.h"
-#include "src/conversions-inl.h"
-#include "src/counters.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
-#include "src/message-template.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime-utils.h"
-#include "src/string-builder-inl.h"
-#include "src/string-search.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-search.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
@@ -45,17 +45,17 @@ int LookupNamedCapture(const std::function<bool(String)>& name_matches,
// internalized strings.
int maybe_capture_index = -1;
- const int named_capture_count = capture_name_map->length() >> 1;
+ const int named_capture_count = capture_name_map.length() >> 1;
for (int j = 0; j < named_capture_count; j++) {
// The format of {capture_name_map} is documented at
// JSRegExp::kIrregexpCaptureNameMapIndex.
const int name_ix = j * 2;
const int index_ix = j * 2 + 1;
- String capture_name = String::cast(capture_name_map->get(name_ix));
+ String capture_name = String::cast(capture_name_map.get(name_ix));
if (!name_matches(capture_name)) continue;
- maybe_capture_index = Smi::ToInt(capture_name_map->get(index_ix));
+ maybe_capture_index = Smi::ToInt(capture_name_map.get(index_ix));
break;
}
@@ -267,7 +267,7 @@ class CompiledReplacement {
const int capture_index = LookupNamedCapture(
[=](String capture_name) {
- return capture_name->IsEqualTo(requested_name);
+ return capture_name.IsEqualTo(requested_name);
},
capture_name_map);
@@ -323,7 +323,7 @@ bool CompiledReplacement::Compile(Isolate* isolate, Handle<JSRegExp> regexp,
if (capture_count > 0) {
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
Object maybe_capture_name_map = regexp->CaptureNameMap();
- if (maybe_capture_name_map->IsFixedArray()) {
+ if (maybe_capture_name_map.IsFixedArray()) {
capture_name_map = FixedArray::cast(maybe_capture_name_map);
}
}
@@ -405,7 +405,7 @@ void FindOneByteStringIndices(Vector<const uint8_t> subject, uint8_t pattern,
DCHECK_LT(0, limit);
// Collect indices of pattern in subject using memchr.
// Stop after finding at most limit values.
- const uint8_t* subject_start = subject.start();
+ const uint8_t* subject_start = subject.begin();
const uint8_t* subject_end = subject_start + subject.length();
const uint8_t* pos = subject_start;
while (limit > 0) {
@@ -421,7 +421,7 @@ void FindOneByteStringIndices(Vector<const uint8_t> subject, uint8_t pattern,
void FindTwoByteStringIndices(const Vector<const uc16> subject, uc16 pattern,
std::vector<int>* indices, unsigned int limit) {
DCHECK_LT(0, limit);
- const uc16* subject_start = subject.start();
+ const uc16* subject_start = subject.begin();
const uc16* subject_end = subject_start + subject.length();
for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
if (*pos == pattern) {
@@ -454,8 +454,8 @@ void FindStringIndicesDispatch(Isolate* isolate, String subject, String pattern,
std::vector<int>* indices, unsigned int limit) {
{
DisallowHeapAllocation no_gc;
- String::FlatContent subject_content = subject->GetFlatContent(no_gc);
- String::FlatContent pattern_content = pattern->GetFlatContent(no_gc);
+ String::FlatContent subject_content = subject.GetFlatContent(no_gc);
+ String::FlatContent pattern_content = pattern.GetFlatContent(no_gc);
DCHECK(subject_content.IsFlat());
DCHECK(pattern_content.IsFlat());
if (subject_content.IsOneByte()) {
@@ -533,7 +533,7 @@ V8_WARN_UNUSED_RESULT static Object StringReplaceGlobalAtomRegExpWithString(
String pattern =
String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
int subject_len = subject->length();
- int pattern_len = pattern->length();
+ int pattern_len = pattern.length();
int replacement_len = replacement->length();
FindStringIndicesDispatch(isolate, *subject, pattern, indices, 0xFFFFFFFF);
@@ -893,7 +893,7 @@ class MatchInfoBackedMatch : public String::Match {
if (regexp->TypeTag() == JSRegExp::IRREGEXP) {
Object o = regexp->CaptureNameMap();
- has_named_captures_ = o->IsFixedArray();
+ has_named_captures_ = o.IsFixedArray();
if (has_named_captures_) {
capture_name_map_ = handle(FixedArray::cast(o), isolate);
}
@@ -934,7 +934,7 @@ class MatchInfoBackedMatch : public String::Match {
CaptureState* state) override {
DCHECK(has_named_captures_);
const int capture_index = LookupNamedCapture(
- [=](String capture_name) { return capture_name->Equals(*name); },
+ [=](String capture_name) { return capture_name.Equals(*name); },
*capture_name_map_);
if (capture_index == -1) {
@@ -1095,11 +1095,11 @@ static Object SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
Object cached_answer = RegExpResultsCache::Lookup(
isolate->heap(), *subject, regexp->data(), &last_match_cache,
RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
- if (cached_answer->IsFixedArray()) {
+ if (cached_answer.IsFixedArray()) {
int capture_registers = (capture_count + 1) * 2;
int32_t* last_match = NewArray<int32_t>(capture_registers);
for (int i = 0; i < capture_registers; i++) {
- last_match[i] = Smi::ToInt(last_match_cache->get(i));
+ last_match[i] = Smi::ToInt(last_match_cache.get(i));
}
Handle<FixedArray> cached_fixed_array =
Handle<FixedArray>(FixedArray::cast(cached_answer), isolate);
@@ -1339,7 +1339,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<String> RegExpReplace(
Object result = StringReplaceGlobalRegExpWithString(
isolate, string, regexp, replace, last_match_info);
- if (result->IsString()) {
+ if (result.IsString()) {
return handle(String::cast(result), isolate);
} else {
return MaybeHandle<String>();
@@ -1387,7 +1387,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, replace_obj, 2);
DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
- DCHECK(replace_obj->map()->is_callable());
+ DCHECK(replace_obj->map().is_callable());
Factory* factory = isolate->factory();
Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
@@ -1450,7 +1450,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
Object maybe_capture_map = regexp->CaptureNameMap();
- if (maybe_capture_map->IsFixedArray()) {
+ if (maybe_capture_map.IsFixedArray()) {
has_named_captures = true;
capture_map = handle(FixedArray::cast(maybe_capture_map), isolate);
}
@@ -1489,7 +1489,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, replacement_obj,
Execution::Call(isolate, replace_obj, factory->undefined_value(), argc,
- argv.start()));
+ argv.begin()));
Handle<String> replacement;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -1578,7 +1578,7 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
Handle<Object> splitter_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, splitter_obj,
- Execution::New(isolate, ctor, argc, argv.start()));
+ Execution::New(isolate, ctor, argc, argv.begin()));
splitter = Handle<JSReceiver>::cast(splitter_obj);
}
@@ -1851,7 +1851,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplaceRT) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, replacement_obj,
Execution::Call(isolate, replace_obj, factory->undefined_value(),
- argc, argv.start()));
+ argc, argv.begin()));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, replacement, Object::ToString(isolate, replacement_obj));
@@ -1904,7 +1904,7 @@ RUNTIME_FUNCTION(Runtime_IsRegExp) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsJSRegExp());
+ return isolate->heap()->ToBoolean(obj.IsJSRegExp());
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index b7e22b8b3d..25d10e3395 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -4,16 +4,16 @@
#include <memory>
-#include "src/accessors.h"
-#include "src/arguments-inl.h"
#include "src/ast/scopes.h"
-#include "src/bootstrapper.h"
-#include "src/counters.h"
-#include "src/deoptimizer.h"
-#include "src/frames-inl.h"
+#include "src/builtins/accessors.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/message-template.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
-#include "src/message-template.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
#include "src/objects/heap-object-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/smi.h"
@@ -52,7 +52,7 @@ Object DeclareGlobal(
Handle<FeedbackVector> feedback_vector = Handle<FeedbackVector>(),
FeedbackSlot slot = FeedbackSlot::Invalid()) {
Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table(), isolate);
+ global->native_context().script_context_table(), isolate);
ScriptContextTable::LookupResult lookup;
if (ScriptContextTable::Lookup(isolate, *script_contexts, *name, &lookup) &&
IsLexicalVariableMode(lookup.mode)) {
@@ -125,7 +125,7 @@ Object DeclareGlobal(
// Preinitialize the feedback slot if the global object does not have
// named interceptor or the interceptor is not masking.
if (!global->HasNamedInterceptor() ||
- global->GetNamedInterceptor()->non_masking()) {
+ global->GetNamedInterceptor().non_masking()) {
FeedbackNexus nexus(feedback_vector, slot);
nexus.ConfigurePropertyCellMode(it.GetPropertyCell());
}
@@ -221,12 +221,12 @@ Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
// context, or a declaration block scope. Since this is called from eval, the
// context passed is the context of the caller, which may be some nested
// context and not the declaration context.
- Handle<Context> context(isolate->context()->declaration_context(), isolate);
+ Handle<Context> context(isolate->context().declaration_context(), isolate);
DCHECK(context->IsFunctionContext() || context->IsNativeContext() ||
context->IsScriptContext() || context->IsEvalContext() ||
(context->IsBlockContext() &&
- context->scope_info()->is_declaration_scope()));
+ context->scope_info().is_declaration_scope()));
bool is_function = value->IsJSFunction();
bool is_var = !is_function;
@@ -252,13 +252,13 @@ Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
value, NONE, is_var, is_function,
RedeclarationType::kTypeError);
}
- if (context->extension()->IsJSGlobalObject()) {
+ if (context->extension().IsJSGlobalObject()) {
Handle<JSGlobalObject> global(JSGlobalObject::cast(context->extension()),
isolate);
return DeclareGlobal(isolate, global, name, value, NONE, is_var,
is_function, RedeclarationType::kTypeError);
} else if (context->IsScriptContext()) {
- DCHECK(context->global_object()->IsJSGlobalObject());
+ DCHECK(context->global_object().IsJSGlobalObject());
Handle<JSGlobalObject> global(
JSGlobalObject::cast(context->global_object()), isolate);
return DeclareGlobal(isolate, global, name, value, NONE, is_var,
@@ -288,7 +288,7 @@ Object DeclareEvalHelper(Isolate* isolate, Handle<String> name,
// yet. Sloppy eval will never have an extension object, as vars are hoisted
// out, and lets are known statically.
DCHECK((context->IsBlockContext() &&
- context->scope_info()->is_declaration_scope()) ||
+ context->scope_info().is_declaration_scope()) ||
context->IsFunctionContext());
object =
isolate->factory()->NewJSObject(isolate->context_extension_function());
@@ -389,13 +389,13 @@ std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
template <typename T>
Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
T parameters, int argument_count) {
- CHECK(!IsDerivedConstructor(callee->shared()->kind()));
- DCHECK(callee->shared()->has_simple_parameters());
+ CHECK(!IsDerivedConstructor(callee->shared().kind()));
+ DCHECK(callee->shared().has_simple_parameters());
Handle<JSObject> result =
isolate->factory()->NewArgumentsObject(callee, argument_count);
// Allocate the elements if needed.
- int parameter_count = callee->shared()->internal_formal_parameter_count();
+ int parameter_count = callee->shared().internal_formal_parameter_count();
if (argument_count > 0) {
if (parameter_count > 0) {
int mapped_count = Min(argument_count, parameter_count);
@@ -423,7 +423,7 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
--index;
}
- Handle<ScopeInfo> scope_info(callee->shared()->scope_info(), isolate);
+ Handle<ScopeInfo> scope_info(callee->shared().scope_info(), isolate);
// First mark all mappable slots as unmapped and copy the values into the
// arguments object.
@@ -522,7 +522,7 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
- int start_index = callee->shared()->internal_formal_parameter_count();
+ int start_index = callee->shared().internal_formal_parameter_count();
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
@@ -535,9 +535,9 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
{
DisallowHeapAllocation no_gc;
FixedArray elements = FixedArray::cast(result->elements());
- WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
+ WriteBarrierMode mode = elements.GetWriteBarrierMode(no_gc);
for (int i = 0; i < num_elements; i++) {
- elements->set(i, *arguments[i + start_index], mode);
+ elements.set(i, *arguments[i + start_index], mode);
}
}
return *result;
@@ -580,7 +580,7 @@ RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
// Note that args[0] is the address of an array of full object pointers
// (a.k.a. FullObjectSlot), which looks like a Smi because it's aligned.
DCHECK(args[0].IsSmi());
- FullObjectSlot frame(args[0]->ptr());
+ FullObjectSlot frame(args[0].ptr());
CONVERT_SMI_ARG_CHECKED(length, 1);
CONVERT_SMI_ARG_CHECKED(mapped_count, 2);
Handle<FixedArray> result =
@@ -811,7 +811,7 @@ MaybeHandle<Object> LoadLookupSlot(Isolate* isolate, Handle<String> name,
// If the "property" we were looking for is a local variable, the
// receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
Handle<Object> receiver = isolate->factory()->undefined_value();
- Handle<Object> value = handle(Context::cast(*holder)->get(index), isolate);
+ Handle<Object> value = handle(Context::cast(*holder).get(index), isolate);
// Check for uninitialized bindings.
if (flag == kNeedsInitialization && value->IsTheHole(isolate)) {
THROW_NEW_ERROR(isolate,
@@ -875,7 +875,7 @@ RUNTIME_FUNCTION(Runtime_LoadLookupSlotInsideTypeof) {
RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- DCHECK(args[0]->IsString());
+ DCHECK(args[0].IsString());
Handle<String> name = args.at<String>(0);
Handle<Object> value;
Handle<Object> receiver;
@@ -915,7 +915,7 @@ MaybeHandle<Object> StoreLookupSlot(
// The property was found in a context slot.
if (index != Context::kNotFound) {
if (flag == kNeedsInitialization &&
- Handle<Context>::cast(holder)->get(index)->IsTheHole(isolate)) {
+ Handle<Context>::cast(holder)->get(index).IsTheHole(isolate)) {
THROW_NEW_ERROR(isolate,
NewReferenceError(MessageTemplate::kNotDefined, name),
Object);
@@ -985,7 +985,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_SloppyHoisting) {
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
const ContextLookupFlags lookup_flags =
static_cast<ContextLookupFlags>(DONT_FOLLOW_CHAINS);
- Handle<Context> declaration_context(isolate->context()->declaration_context(),
+ Handle<Context> declaration_context(isolate->context().declaration_context(),
isolate);
RETURN_RESULT_OR_FAILURE(
isolate, StoreLookupSlot(isolate, declaration_context, name, value,
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index aa19b103eb..2e2918e47d 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
-#include "src/conversions.h"
-#include "src/counters.h"
+#include "src/execution/arguments-inl.h"
#include "src/heap/heap-inl.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/regexp-utils.h"
#include "src/runtime/runtime-utils.h"
-#include "src/string-builder-inl.h"
-#include "src/string-search.h"
+#include "src/strings/string-builder-inl.h"
+#include "src/strings/string-search.h"
namespace v8 {
namespace internal {
@@ -77,8 +77,8 @@ MaybeHandle<String> StringReplaceOneCharWithString(
recursion_limit--;
if (subject->IsConsString()) {
ConsString cons = ConsString::cast(*subject);
- Handle<String> first = handle(cons->first(), isolate);
- Handle<String> second = handle(cons->second(), isolate);
+ Handle<String> first = handle(cons.first(), isolate);
+ Handle<String> second = handle(cons.second(), isolate);
Handle<String> new_first;
if (!StringReplaceOneCharWithString(isolate, first, search, replace, found,
recursion_limit).ToHandle(&new_first)) {
@@ -276,7 +276,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
int32_t array_length;
- if (!args[1]->ToInt32(&array_length)) {
+ if (!args[1].ToInt32(&array_length)) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
}
CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
@@ -303,15 +303,15 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
{
DisallowHeapAllocation no_gc;
FixedArray fixed_array = FixedArray::cast(array->elements());
- if (fixed_array->length() < array_length) {
- array_length = fixed_array->length();
+ if (fixed_array.length() < array_length) {
+ array_length = fixed_array.length();
}
if (array_length == 0) {
return ReadOnlyRoots(isolate).empty_string();
} else if (array_length == 1) {
- Object first = fixed_array->get(0);
- if (first->IsString()) return first;
+ Object first = fixed_array.get(0);
+ if (first.IsString()) return first;
}
length = StringBuilderConcatLength(special_length, fixed_array,
array_length, &one_byte);
@@ -356,20 +356,20 @@ static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
FixedArray one_byte_cache = heap->single_character_string_cache();
Object undefined = ReadOnlyRoots(heap).undefined_value();
int i;
- WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
+ WriteBarrierMode mode = elements.GetWriteBarrierMode(no_gc);
for (i = 0; i < length; ++i) {
- Object value = one_byte_cache->get(chars[i]);
+ Object value = one_byte_cache.get(chars[i]);
if (value == undefined) break;
- elements->set(i, value, mode);
+ elements.set(i, value, mode);
}
if (i < length) {
- MemsetTagged(elements->RawFieldOfElementAt(i), Smi::kZero, length - i);
+ MemsetTagged(elements.RawFieldOfElementAt(i), Smi::kZero, length - i);
}
#ifdef DEBUG
for (int j = 0; j < length; ++j) {
- Object element = elements->get(j);
+ Object element = elements.get(j);
DCHECK(element == Smi::kZero ||
- (element->IsString() && String::cast(element)->LooksValid()));
+ (element.IsString() && String::cast(element).LooksValid()));
}
#endif
return i;
@@ -398,7 +398,7 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
Vector<const uint8_t> chars = content.ToOneByteVector();
// Note, this will initialize all elements (not only the prefix)
// to prevent GC from seeing partially initialized array.
- position = CopyCachedOneByteCharsToArray(isolate->heap(), chars.start(),
+ position = CopyCachedOneByteCharsToArray(isolate->heap(), chars.begin(),
*elements, length);
} else {
MemsetTagged(elements->data_start(),
@@ -415,7 +415,7 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
#ifdef DEBUG
for (int i = 0; i < length; ++i) {
- DCHECK_EQ(String::cast(elements->get(i))->length(), 1);
+ DCHECK_EQ(String::cast(elements->get(i)).length(), 1);
}
#endif
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index b47794938a..b204033f39 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
-#include "src/counters.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
-#include "src/string-builder-inl.h"
+#include "src/strings/string-builder-inl.h"
namespace v8 {
namespace internal {
@@ -39,7 +39,7 @@ RUNTIME_FUNCTION(Runtime_SymbolDescriptiveString) {
CONVERT_ARG_HANDLE_CHECKED(Symbol, symbol, 0);
IncrementalStringBuilder builder(isolate);
builder.AppendCString("Symbol(");
- if (symbol->name()->IsString()) {
+ if (symbol->name().IsString()) {
builder.AppendString(handle(String::cast(symbol->name()), isolate));
}
builder.AppendCharacter(')');
@@ -51,7 +51,7 @@ RUNTIME_FUNCTION(Runtime_SymbolIsPrivate) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
- return isolate->heap()->ToBoolean(symbol->is_private());
+ return isolate->heap()->ToBoolean(symbol.is_private());
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 3afa2a9899..85a50fca61 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -7,25 +7,26 @@
#include <memory>
#include <sstream>
-#include "src/api-inl.h"
-#include "src/arguments-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/platform/mutex.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compiler.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
-#include "src/compiler.h"
-#include "src/counters.h"
-#include "src/deoptimizer.h"
-#include "src/frames-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/execution/runtime-profiler.h"
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/heap/heap-write-barrier-inl.h"
#include "src/ic/stub-cache.h"
-#include "src/isolate-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/js-array-inl.h"
#include "src/objects/smi.h"
-#include "src/ostreams.h"
-#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/memory-tracing.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-engine.h"
@@ -217,6 +218,28 @@ RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
isolate->concurrent_recompilation_enabled());
}
+namespace {
+
+void RemoveBytecodeFromPendingOptimizeTable(v8::internal::Isolate* isolate,
+ Handle<JSFunction> function) {
+ // TODO(mythria): Remove the check for undefined, once we fix all tests to
+ // add PrepareForOptimization when using OptimizeFunctionOnNextCall.
+ if (isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()) {
+ return;
+ }
+
+ Handle<ObjectHashTable> table =
+ handle(ObjectHashTable::cast(
+ isolate->heap()->pending_optimize_for_test_bytecode()),
+ isolate);
+ bool was_present;
+ table = table->Remove(isolate, table, handle(function->shared(), isolate),
+ &was_present);
+ isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
+}
+
+} // namespace
+
RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
HandleScope scope(isolate);
@@ -233,44 +256,45 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- // Check we called PrepareFunctionForOptimization and hold the bytecode
- // array to prevent it from getting flushed.
- // TODO(mythria): Enable this check once we add PrepareForOptimization in all
- // tests before calling OptimizeFunctionOnNextCall.
- // CHECK(!ObjectHashTable::cast(
- // isolate->heap()->pending_optimize_for_test_bytecode())
- // ->Lookup(handle(function->shared(), isolate))
- // ->IsTheHole());
-
// The following conditions were lifted (in part) from the DCHECK inside
// JSFunction::MarkForOptimization().
- if (!function->shared()->allows_lazy_compilation()) {
+ if (!function->shared().allows_lazy_compilation()) {
return ReadOnlyRoots(isolate).undefined_value();
}
// If function isn't compiled, compile it now.
- IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
+ IsCompiledScope is_compiled_scope(function->shared().is_compiled_scope());
if (!is_compiled_scope.is_compiled() &&
!Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return ReadOnlyRoots(isolate).undefined_value();
}
- if (function->shared()->optimization_disabled() &&
- function->shared()->disable_optimization_reason() ==
+ if (function->shared().optimization_disabled() &&
+ function->shared().disable_optimization_reason() ==
BailoutReason::kNeverOptimize) {
return ReadOnlyRoots(isolate).undefined_value();
}
- // If the function is already optimized, just return.
- if (function->IsOptimized() || function->shared()->HasAsmWasmData()) {
+ if (function->shared().HasAsmWasmData()) {
return ReadOnlyRoots(isolate).undefined_value();
}
- // If the function has optimized code, ensure that we check for it and return.
+ // Check we called PrepareFunctionForOptimization and hold the bytecode
+ // array to prevent it from getting flushed.
+ // TODO(mythria): Enable this check once we add PrepareForOptimization in all
+ // tests before calling OptimizeFunctionOnNextCall.
+ // CHECK(!ObjectHashTable::cast(
+ // isolate->heap()->pending_optimize_for_test_bytecode())
+ // ->Lookup(handle(function->shared(), isolate))
+ // ->IsTheHole());
+
if (function->HasOptimizedCode()) {
- DCHECK(function->ChecksOptimizationMarker());
+ DCHECK(function->IsOptimized() || function->ChecksOptimizationMarker());
+ // If function is already optimized, remove the bytecode array from the
+ // pending optimize for test table and return.
+ RemoveBytecodeFromPendingOptimizeTable(isolate, function);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -298,7 +322,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
// This function may not have been lazily compiled yet, even though its shared
// function has.
if (!function->is_compiled()) {
- DCHECK(function->shared()->IsInterpreted());
+ DCHECK(function->shared().IsInterpreted());
function->set_code(*BUILTIN_CODE(isolate, InterpreterEntryTrampoline));
}
@@ -312,12 +336,12 @@ namespace {
bool EnsureFeedbackVector(Handle<JSFunction> function) {
// Check function allows lazy compilation.
- if (!function->shared()->allows_lazy_compilation()) {
+ if (!function->shared().allows_lazy_compilation()) {
return false;
}
// If function isn't compiled, compile it now.
- IsCompiledScope is_compiled_scope(function->shared()->is_compiled_scope());
+ IsCompiledScope is_compiled_scope(function->shared().is_compiled_scope());
if (!is_compiled_scope.is_compiled() &&
!Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
@@ -352,36 +376,28 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
// If optimization is disabled for the function, return without making it
// pending optimize for test.
- if (function->shared()->optimization_disabled() &&
- function->shared()->disable_optimization_reason() ==
+ if (function->shared().optimization_disabled() &&
+ function->shared().disable_optimization_reason() ==
BailoutReason::kNeverOptimize) {
return ReadOnlyRoots(isolate).undefined_value();
}
- // If the function is already optimized, return without making it pending
- // optimize for test.
- if (function->IsOptimized() || function->shared()->HasAsmWasmData()) {
- return ReadOnlyRoots(isolate).undefined_value();
- }
-
- // If the function has optimized code, ensure that we check for it and then
- // return without making it pending optimize for test.
- if (function->HasOptimizedCode()) {
- DCHECK(function->ChecksOptimizationMarker());
+ // We don't optimize Asm/Wasm functions.
+ if (function->shared().HasAsmWasmData()) {
return ReadOnlyRoots(isolate).undefined_value();
}
// Hold onto the bytecode array between marking and optimization to ensure
// it's not flushed.
Handle<ObjectHashTable> table =
- isolate->heap()->pending_optimize_for_test_bytecode()->IsUndefined()
+ isolate->heap()->pending_optimize_for_test_bytecode().IsUndefined()
? ObjectHashTable::New(isolate, 1)
: handle(ObjectHashTable::cast(
isolate->heap()->pending_optimize_for_test_bytecode()),
isolate);
table = ObjectHashTable::Put(
table, handle(function->shared(), isolate),
- handle(function->shared()->GetBytecodeArray(), isolate));
+ handle(function->shared().GetBytecodeArray(), isolate));
isolate->heap()->SetPendingOptimizeForTestBytecode(*table);
return ReadOnlyRoots(isolate).undefined_value();
@@ -402,25 +418,38 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
if (!it.done()) function = handle(it.frame()->function(), isolate);
if (function.is_null()) return ReadOnlyRoots(isolate).undefined_value();
- // If the function is already optimized, just return.
- if (function->IsOptimized()) return ReadOnlyRoots(isolate).undefined_value();
-
- if (function->shared()->optimization_disabled() &&
- function->shared()->disable_optimization_reason() ==
+ if (function->shared().optimization_disabled() &&
+ function->shared().disable_optimization_reason() ==
BailoutReason::kNeverOptimize) {
return ReadOnlyRoots(isolate).undefined_value();
}
+ // Check we called PrepareFunctionForOptimization and hold the bytecode
+ // array to prevent it from getting flushed.
+ // TODO(mythria): Enable this check once we add PrepareForOptimization in all
+ // tests before calling OptimizeOsr.
+ // CHECK(!ObjectHashTable::cast(
+ // isolate->heap()->pending_optimize_for_test_bytecode())
+ // ->Lookup(handle(function->shared(), isolate))
+ // ->IsTheHole());
+
+ if (function->HasOptimizedCode()) {
+ DCHECK(function->IsOptimized() || function->ChecksOptimizationMarker());
+ // If function is already optimized, remove the bytecode array from the
+ // pending optimize for test table and return.
+ RemoveBytecodeFromPendingOptimizeTable(isolate, function);
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
// Ensure that the function is marked for non-concurrent optimization, so that
// subsequent runs don't also optimize.
- if (!function->HasOptimizedCode()) {
- if (FLAG_trace_osr) {
- PrintF("[OSR - OptimizeOsr marking ");
- function->ShortPrint();
- PrintF(" for non-concurrent optimization]\n");
- }
- function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
+ if (FLAG_trace_osr) {
+ PrintF("[OSR - OptimizeOsr marking ");
+ function->ShortPrint();
+ PrintF(" for non-concurrent optimization]\n");
}
+ JSFunction::EnsureFeedbackVector(function);
+ function->MarkForOptimization(ConcurrencyMode::kNotConcurrent);
// Make the profiler arm all back edges in unoptimized code.
if (it.frame()->type() == StackFrame::INTERPRETED) {
@@ -443,7 +472,7 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
return ReadOnlyRoots(isolate).undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- function->shared()->DisableOptimization(BailoutReason::kNeverOptimize);
+ function->shared().DisableOptimization(BailoutReason::kNeverOptimize);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -505,7 +534,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (function->IsOptimized()) {
status |= static_cast<int>(OptimizationStatus::kOptimized);
- if (function->code()->is_turbofanned()) {
+ if (function->code().is_turbofanned()) {
status |= static_cast<int>(OptimizationStatus::kTurboFanned);
}
}
@@ -544,15 +573,6 @@ RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
return ReadOnlyRoots(isolate).undefined_value();
}
-RUNTIME_FUNCTION(Runtime_GetDeoptCount) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- // Functions without a feedback vector have never deoptimized.
- if (!function->has_feedback_vector()) return Smi::kZero;
- return Smi::FromInt(function->feedback_vector()->deopt_count());
-}
-
static void ReturnThis(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(args.This());
}
@@ -677,11 +697,11 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
bool weak = maybe_object.IsWeak();
#ifdef DEBUG
- if (object->IsString() && !isolate->context().is_null()) {
+ if (object.IsString() && !isolate->context().is_null()) {
DCHECK(!weak);
// If we have a string, assume it's a code "marker"
// and print some interesting cpu debugging info.
- object->Print(os);
+ object.Print(os);
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
os << "fp = " << reinterpret_cast<void*>(frame->fp())
@@ -693,10 +713,10 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
if (weak) {
os << "[weak] ";
}
- object->Print(os);
+ object.Print(os);
}
- if (object->IsHeapObject()) {
- HeapObject::cast(object)->map()->Print(os);
+ if (object.IsHeapObject()) {
+ HeapObject::cast(object).map().Print(os);
}
#else
if (weak) {
@@ -724,7 +744,7 @@ RUNTIME_FUNCTION(Runtime_PrintWithNameForAssert) {
PrintF("%c", character);
}
PrintF(": ");
- args[1]->ShortPrint();
+ args[1].ShortPrint();
PrintF("\n");
return ReadOnlyRoots(isolate).undefined_value();
@@ -792,10 +812,10 @@ RUNTIME_FUNCTION(Runtime_SetForceSlowPath) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, arg, 0);
- if (arg->IsTrue(isolate)) {
+ if (arg.IsTrue(isolate)) {
isolate->set_force_slow_path(true);
} else {
- DCHECK(arg->IsFalse(isolate));
+ DCHECK(arg.IsFalse(isolate));
isolate->set_force_slow_path(false);
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -840,7 +860,7 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
return ReadOnlyRoots(isolate).exception();
}
StdoutStream os;
- func->code()->Print(os);
+ func->code().Print(os);
os << std::endl;
#endif // DEBUG
return ReadOnlyRoots(isolate).undefined_value();
@@ -882,7 +902,7 @@ RUNTIME_FUNCTION(Runtime_TraceExit) {
CONVERT_ARG_CHECKED(Object, obj, 0);
PrintIndentation(isolate);
PrintF("} -> ");
- obj->ShortPrint();
+ obj.ShortPrint();
PrintF("\n");
return obj; // return TOS
}
@@ -892,11 +912,20 @@ RUNTIME_FUNCTION(Runtime_HaveSameMap) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSObject, obj1, 0);
CONVERT_ARG_CHECKED(JSObject, obj2, 1);
- return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
+ return isolate->heap()->ToBoolean(obj1.map() == obj2.map());
}
+RUNTIME_FUNCTION(Runtime_HasElementsInALargeObjectSpace) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSArray, array, 0);
+ FixedArrayBase elements = array.elements();
+ return isolate->heap()->ToBoolean(
+ isolate->heap()->new_lo_space()->Contains(elements) ||
+ isolate->heap()->lo_space()->Contains(elements));
+}
-RUNTIME_FUNCTION(Runtime_InNewSpace) {
+RUNTIME_FUNCTION(Runtime_InYoungGeneration) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
@@ -907,12 +936,12 @@ RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- if (!function->shared()->HasAsmWasmData()) {
+ if (!function.shared().HasAsmWasmData()) {
// Doesn't have wasm data.
return ReadOnlyRoots(isolate).false_value();
}
- if (function->shared()->HasBuiltinId() &&
- function->shared()->builtin_id() == Builtins::kInstantiateAsmJs) {
+ if (function.shared().HasBuiltinId() &&
+ function.shared().builtin_id() == Builtins::kInstantiateAsmJs) {
// Hasn't been compiled yet.
return ReadOnlyRoots(isolate).false_value();
}
@@ -950,7 +979,7 @@ RUNTIME_FUNCTION(Runtime_IsWasmCode) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- bool is_js_to_wasm = function->code()->kind() == Code::JS_TO_WASM_FUNCTION;
+ bool is_js_to_wasm = function.code().kind() == Code::JS_TO_WASM_FUNCTION;
return isolate->heap()->ToBoolean(is_js_to_wasm);
}
@@ -1017,10 +1046,10 @@ RUNTIME_FUNCTION(Runtime_SetWasmThreadsEnabled) {
return ReadOnlyRoots(isolate).undefined_value();
}
-#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
- RUNTIME_FUNCTION(Runtime_Has##Name) { \
- CONVERT_ARG_CHECKED(JSObject, obj, 0); \
- return isolate->heap()->ToBoolean(obj->Has##Name()); \
+#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
+ RUNTIME_FUNCTION(Runtime_Has##Name) { \
+ CONVERT_ARG_CHECKED(JSObject, obj, 0); \
+ return isolate->heap()->ToBoolean(obj.Has##Name()); \
}
ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
@@ -1040,7 +1069,7 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties)
#define FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION(Type, type, TYPE, ctype) \
RUNTIME_FUNCTION(Runtime_HasFixed##Type##Elements) { \
CONVERT_ARG_CHECKED(JSObject, obj, 0); \
- return isolate->heap()->ToBoolean(obj->HasFixed##Type##Elements()); \
+ return isolate->heap()->ToBoolean(obj.HasFixed##Type##Elements()); \
}
TYPED_ARRAYS(FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION)
@@ -1133,7 +1162,7 @@ RUNTIME_FUNCTION(Runtime_HeapObjectVerify) {
#else
CHECK(object->IsObject());
if (object->IsHeapObject()) {
- CHECK(HeapObject::cast(*object)->map()->IsMap());
+ CHECK(HeapObject::cast(*object).map().IsMap());
} else {
CHECK(object->IsSmi());
}
@@ -1147,8 +1176,8 @@ RUNTIME_FUNCTION(Runtime_WasmGetNumberOfInstances) {
CONVERT_ARG_HANDLE_CHECKED(WasmModuleObject, module_obj, 0);
int instance_count = 0;
WeakArrayList weak_instance_list = module_obj->weak_instance_list();
- for (int i = 0; i < weak_instance_list->length(); ++i) {
- if (weak_instance_list->Get(i)->IsWeak()) instance_count++;
+ for (int i = 0; i < weak_instance_list.length(); ++i) {
+ if (weak_instance_list.Get(i)->IsWeak()) instance_count++;
}
return Smi::FromInt(instance_count);
}
@@ -1158,7 +1187,7 @@ RUNTIME_FUNCTION(Runtime_WasmNumInterpretedCalls) {
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
if (!instance->has_debug_info()) return Object();
- uint64_t num = instance->debug_info()->NumInterpretedCalls();
+ uint64_t num = instance->debug_info().NumInterpretedCalls();
return *isolate->factory()->NewNumberFromSize(static_cast<size_t>(num));
}
@@ -1190,12 +1219,12 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
WasmCompiledFrame* frame = WasmCompiledFrame::cast(it.frame());
uint8_t* mem_start = reinterpret_cast<uint8_t*>(
- frame->wasm_instance()->memory_object()->array_buffer()->backing_store());
+ frame->wasm_instance().memory_object().array_buffer().backing_store());
int func_index = frame->function_index();
int pos = frame->position();
// TODO(titzer): eliminate dependency on WasmModule definition here.
int func_start =
- frame->wasm_instance()->module()->functions[func_index].code.offset();
+ frame->wasm_instance().module()->functions[func_index].code.offset();
wasm::ExecutionTier tier = frame->wasm_code()->is_liftoff()
? wasm::ExecutionTier::kLiftoff
: wasm::ExecutionTier::kTurbofan;
@@ -1209,7 +1238,7 @@ RUNTIME_FUNCTION(Runtime_WasmTierUpFunction) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_SMI_ARG_CHECKED(function_index, 1);
- auto* native_module = instance->module_object()->native_module();
+ auto* native_module = instance->module_object().native_module();
isolate->wasm_engine()->CompileFunction(
isolate, native_module, function_index, wasm::ExecutionTier::kTurbofan);
CHECK(!native_module->compilation_state()->failed());
@@ -1224,7 +1253,7 @@ RUNTIME_FUNCTION(Runtime_IsLiftoffFunction) {
Handle<WasmExportedFunction> exp_fun =
Handle<WasmExportedFunction>::cast(function);
wasm::NativeModule* native_module =
- exp_fun->instance()->module_object()->native_module();
+ exp_fun->instance().module_object().native_module();
uint32_t func_index = exp_fun->function_index();
wasm::WasmCodeRefScope code_ref_scope;
wasm::WasmCode* code = native_module->GetCode(func_index);
@@ -1236,7 +1265,7 @@ RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTracking) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- object->map()->CompleteInobjectSlackTracking(isolate);
+ object->map().CompleteInobjectSlackTracking(isolate);
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1246,7 +1275,50 @@ RUNTIME_FUNCTION(Runtime_FreezeWasmLazyCompilation) {
DisallowHeapAllocation no_gc;
CONVERT_ARG_CHECKED(WasmInstanceObject, instance, 0);
- instance->module_object()->native_module()->set_lazy_compile_frozen(true);
+ instance.module_object().native_module()->set_lazy_compile_frozen(true);
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_TurbofanStaticAssert) {
+ SealHandleScope shs(isolate);
+ // Always lowered to StaticAssert node in Turbofan, so we should never get
+ // here in compiled code.
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
+ // The {NoopListener} currently does nothing on any callback, but reports
+ // {true} on {is_listening_to_code_events()}. Feel free to add assertions to
+ // any method to further test the code logging callbacks.
+ class NoopListener final : public CodeEventListener {
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ const char* comment) final {}
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ Name name) final {}
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name source) final {}
+ void CodeCreateEvent(LogEventsAndTags tag, AbstractCode code,
+ SharedFunctionInfo shared, Name source, int line,
+ int column) final {}
+ void CodeCreateEvent(LogEventsAndTags tag, const wasm::WasmCode* code,
+ wasm::WasmName name) final {}
+ void CallbackEvent(Name name, Address entry_point) final {}
+ void GetterCallbackEvent(Name name, Address entry_point) final {}
+ void SetterCallbackEvent(Name name, Address entry_point) final {}
+ void RegExpCodeCreateEvent(AbstractCode code, String source) final {}
+ void CodeMoveEvent(AbstractCode from, AbstractCode to) final {}
+ void SharedFunctionInfoMoveEvent(Address from, Address to) final {}
+ void CodeMovingGCEvent() final {}
+ void CodeDisableOptEvent(AbstractCode code,
+ SharedFunctionInfo shared) final {}
+ void CodeDeoptEvent(Code code, DeoptimizeKind kind, Address pc,
+ int fp_to_sp_delta) final {}
+
+ bool is_listening_to_code_events() final { return true; }
+ };
+ static base::LeakyObject<NoopListener> noop_listener;
+ isolate->wasm_engine()->EnableCodeLogging(isolate);
+ isolate->code_event_dispatcher()->AddListener(noop_listener.get());
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 3d99b1bc7d..1736ee3939 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
-#include "src/counters.h"
-#include "src/elements.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/message-template.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/message-template.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/elements.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
@@ -60,19 +60,6 @@ RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
return accessor->CopyElements(source, target, length);
}
-RUNTIME_FUNCTION(Runtime_TypedArrayGetLength) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
- return holder->length();
-}
-
-RUNTIME_FUNCTION(Runtime_ArrayBufferViewWasDetached) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(JSTypedArray::cast(args[0])->WasDetached());
-}
-
RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -108,26 +95,40 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, target_obj, 0);
+ // Validation is handled in the Torque builtin.
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, array, 0);
+ DCHECK(!array->WasDetached());
- Handle<JSTypedArray> array;
- const char* method = "%TypedArray%.prototype.sort";
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, array, JSTypedArray::Validate(isolate, target_obj, method));
+ size_t length = array->length();
+ if (length <= 1) return *array;
- // This line can be removed when JSTypedArray::Validate throws
- // if array.[[ViewedArrayBuffer]] is detached(v8:4648)
- if (V8_UNLIKELY(array->WasDetached())) return *array;
+ // In case of a SAB, the data is copied into temporary memory, as
+ // std::sort might crash in case the underlying data is concurrently
+ // modified while sorting.
+ CHECK(array->buffer().IsJSArrayBuffer());
+ Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(array->buffer()), isolate);
+ const bool copy_data = buffer->is_shared();
+
+ Handle<ByteArray> array_copy;
+ if (copy_data) {
+ const size_t bytes = array->byte_length();
+ // TODO(szuend): Re-check this approach once support for larger typed
+ // arrays has landed.
+ CHECK_LE(bytes, INT_MAX);
+ array_copy = isolate->factory()->NewByteArray(static_cast<int>(bytes));
+ std::memcpy(static_cast<void*>(array_copy->GetDataStartAddress()),
+ static_cast<void*>(array->DataPtr()), bytes);
+ }
- size_t length = array->length_value();
- if (length <= 1) return *array;
+ DisallowHeapAllocation no_gc;
- Handle<FixedTypedArrayBase> elements(
- FixedTypedArrayBase::cast(array->elements()), isolate);
switch (array->type()) {
#define TYPED_ARRAY_SORT(Type, type, TYPE, ctype) \
case kExternal##Type##Array: { \
- ctype* data = static_cast<ctype*>(elements->DataPtr()); \
+ ctype* data = \
+ copy_data \
+ ? reinterpret_cast<ctype*>(array_copy->GetDataStartAddress()) \
+ : static_cast<ctype*>(array->DataPtr()); \
if (kExternal##Type##Array == kExternalFloat64Array || \
kExternal##Type##Array == kExternalFloat32Array) { \
if (COMPRESS_POINTERS_BOOL && alignof(ctype) > kTaggedSize) { \
@@ -153,13 +154,14 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
#undef TYPED_ARRAY_SORT
}
- return *array;
-}
+ if (copy_data) {
+ DCHECK(!array_copy.is_null());
+ const size_t bytes = array->byte_length();
+ std::memcpy(static_cast<void*>(array->DataPtr()),
+ static_cast<void*>(array_copy->GetDataStartAddress()), bytes);
+ }
-RUNTIME_FUNCTION(Runtime_IsTypedArray) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- return isolate->heap()->ToBoolean(args[0]->IsJSTypedArray());
+ return *array;
}
// 22.2.3.23 %TypedArray%.prototype.set ( overloaded [ , offset ] )
@@ -194,7 +196,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySet) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, len,
Object::ToLength(isolate, len));
- if (uint_offset + len->Number() > target->length_value()) {
+ if (uint_offset + len->Number() > target->length()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kTypedArraySetSourceTooLarge));
}
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 7d35010435..2d6fbc585f 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -6,8 +6,8 @@
#define V8_RUNTIME_RUNTIME_UTILS_H_
#include "src/base/logging.h"
-#include "src/globals.h"
-#include "src/objects.h"
+#include "src/common/globals.h"
+#include "src/objects/objects.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -17,40 +17,40 @@ namespace internal {
// it in a variable with the given name. If the object is not of the
// expected type we crash safely.
#define CONVERT_ARG_CHECKED(Type, name, index) \
- CHECK(args[index]->Is##Type()); \
+ CHECK(args[index].Is##Type()); \
Type name = Type::cast(args[index]);
#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index) \
- CHECK(args[index]->Is##Type()); \
+ CHECK(args[index].Is##Type()); \
Handle<Type> name = args.at<Type>(index);
#define CONVERT_NUMBER_ARG_HANDLE_CHECKED(name, index) \
- CHECK(args[index]->IsNumber()); \
+ CHECK(args[index].IsNumber()); \
Handle<Object> name = args.at(index);
// Cast the given object to a boolean and store it in a variable with
// the given name. If the object is not a boolean we crash safely.
#define CONVERT_BOOLEAN_ARG_CHECKED(name, index) \
- CHECK(args[index]->IsBoolean()); \
- bool name = args[index]->IsTrue(isolate);
+ CHECK(args[index].IsBoolean()); \
+ bool name = args[index].IsTrue(isolate);
// Cast the given argument to a Smi and store its value in an int variable
// with the given name. If the argument is not a Smi we crash safely.
#define CONVERT_SMI_ARG_CHECKED(name, index) \
- CHECK(args[index]->IsSmi()); \
+ CHECK(args[index].IsSmi()); \
int name = args.smi_at(index);
// Cast the given argument to a double and store it in a variable with
// the given name. If the argument is not a number (as opposed to
// the number not-a-number) we crash safely.
#define CONVERT_DOUBLE_ARG_CHECKED(name, index) \
- CHECK(args[index]->IsNumber()); \
+ CHECK(args[index].IsNumber()); \
double name = args.number_at(index);
// Cast the given argument to a size_t and store its value in a variable with
// the given name. If the argument is not a size_t we crash safely.
#define CONVERT_SIZE_ARG_CHECKED(name, index) \
- CHECK(args[index]->IsNumber()); \
+ CHECK(args[index].IsNumber()); \
Handle<Object> name##_object = args.at(index); \
size_t name = 0; \
CHECK(TryNumberToSize(*name##_object, &name));
@@ -59,7 +59,7 @@ namespace internal {
// a variable of the specified type with the given name. If the
// object is not a Number we crash safely.
#define CONVERT_NUMBER_CHECKED(type, name, Type, obj) \
- CHECK(obj->IsNumber()); \
+ CHECK(obj.IsNumber()); \
type name = NumberTo##Type(obj);
// Cast the given argument to PropertyDetails and store its value in a
@@ -80,23 +80,23 @@ namespace internal {
// Assert that the given argument is a number within the Int32 range
// and convert it to int32_t. If the argument is not an Int32 we crash safely.
#define CONVERT_INT32_ARG_CHECKED(name, index) \
- CHECK(args[index]->IsNumber()); \
+ CHECK(args[index].IsNumber()); \
int32_t name = 0; \
- CHECK(args[index]->ToInt32(&name));
+ CHECK(args[index].ToInt32(&name));
// Assert that the given argument is a number within the Uint32 range
// and convert it to uint32_t. If the argument is not an Uint32 call
// IllegalOperation and return.
#define CONVERT_UINT32_ARG_CHECKED(name, index) \
- CHECK(args[index]->IsNumber()); \
+ CHECK(args[index].IsNumber()); \
uint32_t name = 0; \
- CHECK(args[index]->ToUint32(&name));
+ CHECK(args[index].ToUint32(&name));
// Cast the given argument to PropertyAttributes and store its value in a
// variable with the given name. If the argument is not a Smi or the
// enum value is out of range, we crash safely.
#define CONVERT_PROPERTY_ATTRIBUTES_CHECKED(name, index) \
- CHECK(args[index]->IsSmi()); \
+ CHECK(args[index].IsSmi()); \
CHECK_EQ(args.smi_at(index) & ~(READ_ONLY | DONT_ENUM | DONT_DELETE), 0); \
PropertyAttributes name = static_cast<PropertyAttributes>(args.smi_at(index));
@@ -115,16 +115,16 @@ struct ObjectPair {
};
static inline ObjectPair MakePair(Object x, Object y) {
- ObjectPair result = {x->ptr(), y->ptr()};
+ ObjectPair result = {x.ptr(), y.ptr()};
// Pointers x and y returned in rax and rdx, in AMD-x64-abi.
// In Win64 they are assigned to a hidden first argument.
return result;
}
#else
-typedef uint64_t ObjectPair;
+using ObjectPair = uint64_t;
static inline ObjectPair MakePair(Object x, Object y) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- return x->ptr() | (static_cast<ObjectPair>(y->ptr()) << 32);
+ return x.ptr() | (static_cast<ObjectPair>(y.ptr()) << 32);
#elif defined(V8_TARGET_BIG_ENDIAN)
return y->ptr() | (static_cast<ObjectPair>(x->ptr()) << 32);
#else
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 1f107a4c52..288bfa1141 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -2,19 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arguments-inl.h"
+#include "src/common/v8memory.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/conversions.h"
-#include "src/counters.h"
#include "src/debug/debug.h"
-#include "src/frame-constants.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/message-template.h"
#include "src/heap/factory.h"
-#include "src/message-template.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/v8memory.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-constants.h"
@@ -39,7 +39,7 @@ WasmInstanceObject GetWasmInstanceOnStackTop(Isolate* isolate) {
}
Context GetNativeContextFromWasmInstanceOnStackTop(Isolate* isolate) {
- return GetWasmInstanceOnStackTop(isolate)->native_context();
+ return GetWasmInstanceOnStackTop(isolate).native_context();
}
class ClearThreadInWasmScope {
@@ -310,7 +310,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_SMI_ARG_CHECKED(func_index, 1);
- ClearThreadInWasmScope wasm_flag;
+ // This runtime function is always called from wasm code.
+ ClearThreadInWasmScope flag_scope;
#ifdef DEBUG
StackFrameIterator it(isolate, isolate->thread_local_top());
@@ -322,10 +323,17 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
DCHECK_EQ(*instance, WasmCompileLazyFrame::cast(it.frame())->wasm_instance());
#endif
- auto* native_module = instance->module_object()->native_module();
- wasm::CompileLazy(isolate, native_module, func_index);
+ DCHECK(isolate->context().is_null());
+ isolate->set_context(instance->native_context());
+ auto* native_module = instance->module_object().native_module();
+ bool success = wasm::CompileLazy(isolate, native_module, func_index);
+ if (!success) {
+ DCHECK(isolate->has_pending_exception());
+ return ReadOnlyRoots(isolate).exception();
+ }
Address entrypoint = native_module->GetCallTargetForFunction(func_index);
+
return Object(entrypoint);
}
@@ -333,7 +341,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileLazy) {
Handle<JSArrayBuffer> getSharedArrayBuffer(Handle<WasmInstanceObject> instance,
Isolate* isolate, uint32_t address) {
DCHECK(instance->has_memory_object());
- Handle<JSArrayBuffer> array_buffer(instance->memory_object()->array_buffer(),
+ Handle<JSArrayBuffer> array_buffer(instance->memory_object().array_buffer(),
isolate);
// Validation should have failed if the memory was not shared.
@@ -407,6 +415,24 @@ Object ThrowTableOutOfBounds(Isolate* isolate,
}
} // namespace
+RUNTIME_FUNCTION(Runtime_WasmRefFunc) {
+ // This runtime function is always being called from wasm code.
+ ClearThreadInWasmScope flag_scope;
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ auto instance =
+ Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
+ DCHECK(isolate->context().is_null());
+ isolate->set_context(instance->native_context());
+ CONVERT_UINT32_ARG_CHECKED(function_index, 0);
+
+ Handle<WasmExportedFunction> function =
+ WasmInstanceObject::GetOrCreateWasmExportedFunction(isolate, instance,
+ function_index);
+
+ return *function;
+}
+
RUNTIME_FUNCTION(Runtime_WasmFunctionTableGet) {
// This runtime function is always being called from wasm code.
ClearThreadInWasmScope flag_scope;
@@ -416,9 +442,9 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableGet) {
CONVERT_ARG_HANDLE_CHECKED(WasmInstanceObject, instance, 0);
CONVERT_UINT32_ARG_CHECKED(table_index, 1);
CONVERT_UINT32_ARG_CHECKED(entry_index, 2);
- DCHECK_LT(table_index, instance->tables()->length());
+ DCHECK_LT(table_index, instance->tables().length());
auto table = handle(
- WasmTableObject::cast(instance->tables()->get(table_index)), isolate);
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate);
if (!WasmTableObject::IsInBounds(isolate, table, entry_index)) {
return ThrowWasmError(isolate, MessageTemplate::kWasmTrapTableOutOfBounds);
@@ -439,9 +465,9 @@ RUNTIME_FUNCTION(Runtime_WasmFunctionTableSet) {
CONVERT_ARG_CHECKED(Object, element_raw, 3);
// TODO(mstarzinger): Manually box because parameters are not visited yet.
Handle<Object> element(element_raw, isolate);
- DCHECK_LT(table_index, instance->tables()->length());
+ DCHECK_LT(table_index, instance->tables().length());
auto table = handle(
- WasmTableObject::cast(instance->tables()->get(table_index)), isolate);
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate);
if (!WasmTableObject::IsInBounds(isolate, table, entry_index)) {
return ThrowWasmError(isolate, MessageTemplate::kWasmTrapTableOutOfBounds);
@@ -461,9 +487,9 @@ RUNTIME_FUNCTION(Runtime_WasmIndirectCallCheckSignatureAndGetTargetInstance) {
DCHECK(isolate->context().is_null());
isolate->set_context(instance->native_context());
- DCHECK_LT(table_index, instance->tables()->length());
+ DCHECK_LT(table_index, instance->tables().length());
auto table_obj = handle(
- WasmTableObject::cast(instance->tables()->get(table_index)), isolate);
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate);
// This check is already done in generated code.
DCHECK(WasmTableObject::IsInBounds(isolate, table_obj, entry_index));
@@ -490,7 +516,7 @@ RUNTIME_FUNCTION(Runtime_WasmIndirectCallCheckSignatureAndGetTargetInstance) {
maybe_target_instance.ToHandleChecked();
const wasm::WasmModule* target_module =
- target_instance->module_object()->native_module()->module();
+ target_instance->module_object().native_module()->module();
wasm::FunctionSig* target_sig = target_module->functions[function_index].sig;
@@ -519,9 +545,9 @@ RUNTIME_FUNCTION(Runtime_WasmIndirectCallGetTargetAddress) {
CONVERT_UINT32_ARG_CHECKED(table_index, 0);
CONVERT_UINT32_ARG_CHECKED(entry_index, 1);
- DCHECK_LT(table_index, instance->tables()->length());
+ DCHECK_LT(table_index, instance->tables().length());
auto table_obj = handle(
- WasmTableObject::cast(instance->tables()->get(table_index)), isolate);
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate);
DCHECK(WasmTableObject::IsInBounds(isolate, table_obj, entry_index));
@@ -596,5 +622,55 @@ RUNTIME_FUNCTION(Runtime_WasmTableCopy) {
if (oob) return ThrowTableOutOfBounds(isolate, instance);
return ReadOnlyRoots(isolate).undefined_value();
}
+
+RUNTIME_FUNCTION(Runtime_WasmTableGrow) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ auto instance =
+ Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
+ CONVERT_UINT32_ARG_CHECKED(table_index, 0);
+ CONVERT_ARG_CHECKED(Object, value_raw, 1);
+ // TODO(mstarzinger): Manually box because parameters are not visited yet.
+ Handle<Object> value(value_raw, isolate);
+ CONVERT_UINT32_ARG_CHECKED(delta, 2);
+
+ Handle<WasmTableObject> table(
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate);
+ int result = WasmTableObject::Grow(isolate, table, delta, value);
+
+ return Smi::FromInt(result);
+}
+
+RUNTIME_FUNCTION(Runtime_WasmTableFill) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(4, args.length());
+ auto instance =
+ Handle<WasmInstanceObject>(GetWasmInstanceOnStackTop(isolate), isolate);
+ CONVERT_UINT32_ARG_CHECKED(table_index, 0);
+ CONVERT_UINT32_ARG_CHECKED(start, 1);
+ CONVERT_ARG_CHECKED(Object, value_raw, 2);
+ // TODO(mstarzinger): Manually box because parameters are not visited yet.
+ Handle<Object> value(value_raw, isolate);
+ CONVERT_UINT32_ARG_CHECKED(count, 3);
+
+ Handle<WasmTableObject> table(
+ WasmTableObject::cast(instance->tables().get(table_index)), isolate);
+
+ uint32_t table_size = static_cast<uint32_t>(table->entries().length());
+
+ if (start > table_size) {
+ return ThrowTableOutOfBounds(isolate, instance);
+ }
+
+ // Even when table.fill goes out-of-bounds, as many entries as possible are
+ // put into the table. Only afterwards we trap.
+ uint32_t fill_count = std::min(count, table_size - start);
+ WasmTableObject::Fill(isolate, table, start, value, fill_count);
+
+ if (fill_count < count) {
+ return ThrowTableOutOfBounds(isolate, instance);
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-weak-refs.cc b/deps/v8/src/runtime/runtime-weak-refs.cc
index df7ed76bf3..fbb5b42344 100644
--- a/deps/v8/src/runtime/runtime-weak-refs.cc
+++ b/deps/v8/src/runtime/runtime-weak-refs.cc
@@ -3,13 +3,13 @@
// found in the LICENSE file.
#include "include/v8.h"
-#include "src/api.h"
-#include "src/arguments-inl.h"
-#include "src/counters.h"
-#include "src/execution.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api.h"
+#include "src/execution/arguments-inl.h"
+#include "src/execution/execution.h"
+#include "src/handles/handles-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/js-weak-refs-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 058e02733e..ad49a0299c 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -5,12 +5,12 @@
#include "src/runtime/runtime.h"
#include "src/base/hashmap.h"
-#include "src/contexts.h"
-#include "src/handles-inl.h"
+#include "src/codegen/reloc-info.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/reloc-info.h"
+#include "src/objects/contexts.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
@@ -106,6 +106,7 @@ bool Runtime::NeedsExactContext(FunctionId id) {
// try-catch in async function.
return false;
case Runtime::kAddPrivateField:
+ case Runtime::kAddPrivateBrand:
case Runtime::kCopyDataProperties:
case Runtime::kCreateDataProperty:
case Runtime::kCreatePrivateNameSymbol:
@@ -177,6 +178,16 @@ bool Runtime::IsNonReturning(FunctionId id) {
}
}
+bool Runtime::MayAllocate(FunctionId id) {
+ switch (id) {
+ case Runtime::kCompleteInobjectSlackTracking:
+ case Runtime::kCompleteInobjectSlackTrackingForMap:
+ return false;
+ default:
+ return true;
+ }
+}
+
const Runtime::Function* Runtime::FunctionForName(const unsigned char* name,
int length) {
base::CallOnce(&initialize_function_name_map_once,
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 9c8ff6b48f..773a5065e2 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -7,11 +7,11 @@
#include <memory>
-#include "src/allocation.h"
#include "src/base/platform/time.h"
-#include "src/elements-kind.h"
-#include "src/globals.h"
-#include "src/unicode.h"
+#include "src/common/globals.h"
+#include "src/objects/elements-kind.h"
+#include "src/strings/unicode.h"
+#include "src/utils/allocation.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -42,17 +42,12 @@ namespace internal {
F(ArrayIndexOf, 3, 1) \
F(ArrayIsArray, 1, 1) \
F(ArraySpeciesConstructor, 1, 1) \
- F(EstimateNumberOfElements, 1, 1) \
- F(GetArrayKeys, 2, 1) \
F(GrowArrayElements, 2, 1) \
- F(HasComplexElements, 1, 1) \
I(IsArray, 1, 1) \
F(NewArray, -1 /* >= 3 */, 1) \
F(NormalizeElements, 1, 1) \
- F(PrepareElementsForSort, 2, 1) \
F(TransitionElementsKind, 2, 1) \
F(TransitionElementsKindWithKind, 2, 1) \
- F(TrySliceSimpleNonFastElements, 3, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F, I) \
F(AtomicsLoad64, 2, 1) \
@@ -143,7 +138,7 @@ namespace internal {
F(ScheduleBreak, 0, 1) \
F(ScriptLocationFromLine2, 4, 1) \
F(SetGeneratorScopeVariableValue, 4, 1) \
- F(IncBlockCounter, 2, 1)
+ I(IncBlockCounter, 2, 1)
#define FOR_EACH_INTRINSIC_FORIN(F, I) \
F(ForInEnumerate, 1, 1) \
@@ -206,12 +201,12 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
F(AccessCheck, 1, 1) \
+ F(AllocateByteArray, 1, 1) \
F(AllocateInYoungGeneration, 1, 1) \
F(AllocateInOldGeneration, 2, 1) \
F(AllocateSeqOneByteString, 1, 1) \
F(AllocateSeqTwoByteString, 1, 1) \
F(AllowDynamicFunction, 1, 1) \
- F(CheckIsBootstrapping, 0, 1) \
I(CreateAsyncFromSyncIterator, 1, 1) \
F(CreateListFromArrayLike, 1, 1) \
F(FatalProcessOutOfMemoryInAllocateRaw, 0, 1) \
@@ -219,7 +214,6 @@ namespace internal {
F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
F(GetTemplateObject, 3, 1) \
F(IncrementUseCounter, 1, 1) \
- F(Interrupt, 0, 1) \
F(BytecodeBudgetInterrupt, 1, 1) \
F(NewReferenceError, 2, 1) \
F(NewSyntaxError, 2, 1) \
@@ -274,7 +268,6 @@ namespace internal {
F(IsValidSmi, 1, 1) \
F(MaxSmi, 0, 1) \
F(NumberToString, 1, 1) \
- F(SmiLexicographicCompare, 2, 1) \
F(StringParseFloat, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringToNumber, 1, 1)
@@ -282,11 +275,12 @@ namespace internal {
#define FOR_EACH_INTRINSIC_OBJECT(F, I) \
F(AddDictionaryProperty, 3, 1) \
F(AddPrivateField, 3, 1) \
+ F(AddPrivateBrand, 2, 1) \
F(AllocateHeapNumber, 0, 1) \
F(ClassOf, 1, 1) \
F(CollectTypeProfile, 3, 1) \
F(CompleteInobjectSlackTrackingForMap, 1, 1) \
- F(CopyDataProperties, 2, 1) \
+ I(CopyDataProperties, 2, 1) \
F(CopyDataPropertiesWithExcludedProperties, -1 /* >= 1 */, 1) \
I(CreateDataProperty, 3, 1) \
I(CreateIterResultObject, 2, 1) \
@@ -295,6 +289,7 @@ namespace internal {
F(DefineGetterPropertyUnchecked, 4, 1) \
F(DefineSetterPropertyUnchecked, 4, 1) \
F(DeleteProperty, 3, 1) \
+ F(GetDerivedMap, 2, 1) \
F(GetFunctionName, 1, 1) \
F(GetOwnPropertyDescriptor, 2, 1) \
F(GetOwnPropertyKeys, 2, 1) \
@@ -327,7 +322,7 @@ namespace internal {
I(ToNumber, 1, 1) \
F(ToNumeric, 1, 1) \
I(ToObject, 1, 1) \
- I(ToString, 1, 1) \
+ I(ToStringRT, 1, 1) \
F(TryMigrateInstance, 1, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F, I) \
@@ -359,11 +354,8 @@ namespace internal {
#define FOR_EACH_INTRINSIC_PROXY(F, I) \
F(CheckProxyGetSetTrapResult, 2, 1) \
- F(CheckProxyHasTrap, 2, 1) \
+ F(CheckProxyHasTrapResult, 2, 1) \
F(GetPropertyWithReceiver, 3, 1) \
- F(IsJSProxy, 1, 1) \
- F(JSProxyGetHandler, 1, 1) \
- F(JSProxyGetTarget, 1, 1) \
F(SetPropertyWithReceiver, 4, 1)
#define FOR_EACH_INTRINSIC_REGEXP(F, I) \
@@ -452,7 +444,6 @@ namespace internal {
F(DisassembleFunction, 1, 1) \
F(FreezeWasmLazyCompilation, 1, 1) \
F(GetCallable, 0, 1) \
- F(GetDeoptCount, 1, 1) \
F(GetInitializerFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetUndetectable, 0, 1) \
@@ -463,6 +454,7 @@ namespace internal {
F(HasDictionaryElements, 1, 1) \
F(HasPackedElements, 1, 1) \
F(HasDoubleElements, 1, 1) \
+ F(HasElementsInALargeObjectSpace, 1, 1) \
F(HasFastElements, 1, 1) \
F(HasFastProperties, 1, 1) \
F(HasFixedBigInt64Elements, 1, 1) \
@@ -484,7 +476,7 @@ namespace internal {
F(HaveSameMap, 2, 1) \
F(HeapObjectVerify, 1, 1) \
F(ICsAreEnabled, 0, 1) \
- F(InNewSpace, 1, 1) \
+ F(InYoungGeneration, 1, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsConcurrentRecompilationSupported, 0, 1) \
F(WasmTierUpFunction, 2, 1) \
@@ -517,15 +509,14 @@ namespace internal {
F(WasmGetNumberOfInstances, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
F(WasmTraceMemory, 1, 1) \
- F(SetWasmThreadsEnabled, 1, 1)
+ F(SetWasmThreadsEnabled, 1, 1) \
+ F(TurbofanStaticAssert, 1, 1) \
+ F(EnableCodeLoggingForTesting, 0, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
F(ArrayBufferDetach, 1, 1) \
- F(ArrayBufferViewWasDetached, 1, 1) \
- I(IsTypedArray, 1, 1) \
F(TypedArrayCopyElements, 3, 1) \
F(TypedArrayGetBuffer, 1, 1) \
- F(TypedArrayGetLength, 1, 1) \
F(TypedArraySet, 2, 1) \
F(TypedArraySortFast, 1, 1)
@@ -542,10 +533,13 @@ namespace internal {
F(WasmStackGuard, 0, 1) \
F(WasmThrowCreate, 2, 1) \
F(WasmThrowTypeError, 0, 1) \
+ F(WasmRefFunc, 1, 1) \
F(WasmFunctionTableGet, 3, 1) \
F(WasmFunctionTableSet, 4, 1) \
F(WasmTableInit, 5, 1) \
F(WasmTableCopy, 5, 1) \
+ F(WasmTableGrow, 3, 1) \
+ F(WasmTableFill, 4, 1) \
F(WasmIndirectCallCheckSignatureAndGetTargetInstance, 3, 1) \
F(WasmIndirectCallGetTargetAddress, 2, 1) \
F(WasmIsValidAnyFuncValue, 1, 1) \
@@ -689,6 +683,10 @@ class Runtime : public AllStatic {
// sentinel, always.
static bool IsNonReturning(FunctionId id);
+ // Check if a runtime function with the given {id} may trigger a heap
+ // allocation.
+ static bool MayAllocate(FunctionId id);
+
// Get the intrinsic function with the given name.
static const Function* FunctionForName(const unsigned char* name, int length);
diff --git a/deps/v8/src/s390/OWNERS b/deps/v8/src/s390/OWNERS
deleted file mode 100644
index 6d1a8fc472..0000000000
--- a/deps/v8/src/s390/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-jyan@ca.ibm.com
-joransiu@ca.ibm.com
-michael_dawson@ca.ibm.com
-miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/src/sanitizer/OWNERS b/deps/v8/src/sanitizer/OWNERS
new file mode 100644
index 0000000000..25abe6c3b1
--- /dev/null
+++ b/deps/v8/src/sanitizer/OWNERS
@@ -0,0 +1,3 @@
+file://INFRA_OWNERS
+
+clemensh@chromium.org
diff --git a/deps/v8/src/asan.h b/deps/v8/src/sanitizer/asan.h
index 0713392672..0381e5a4c4 100644
--- a/deps/v8/src/asan.h
+++ b/deps/v8/src/sanitizer/asan.h
@@ -4,11 +4,11 @@
// AddressSanitizer support.
-#ifndef V8_ASAN_H_
-#define V8_ASAN_H_
+#ifndef V8_SANITIZER_ASAN_H_
+#define V8_SANITIZER_ASAN_H_
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#ifdef V8_USE_ADDRESS_SANITIZER
@@ -25,4 +25,4 @@
#endif // V8_USE_ADDRESS_SANITIZER
-#endif // V8_ASAN_H_
+#endif // V8_SANITIZER_ASAN_H_
diff --git a/deps/v8/src/base/lsan-page-allocator.cc b/deps/v8/src/sanitizer/lsan-page-allocator.cc
index 4840c7ea80..68b1f130b1 100644
--- a/deps/v8/src/base/lsan-page-allocator.cc
+++ b/deps/v8/src/sanitizer/lsan-page-allocator.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/base/lsan-page-allocator.h"
+#include "src/sanitizer/lsan-page-allocator.h"
#include "src/base/logging.h"
diff --git a/deps/v8/src/base/lsan-page-allocator.h b/deps/v8/src/sanitizer/lsan-page-allocator.h
index d95c7fbf1e..f348ee14f6 100644
--- a/deps/v8/src/base/lsan-page-allocator.h
+++ b/deps/v8/src/sanitizer/lsan-page-allocator.h
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BASE_LSAN_PAGE_ALLOCATOR_H_
-#define V8_BASE_LSAN_PAGE_ALLOCATOR_H_
+#ifndef V8_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
+#define V8_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
#include "include/v8-platform.h"
-#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
namespace v8 {
@@ -15,10 +14,9 @@ namespace base {
// This is a v8::PageAllocator implementation that decorates provided page
// allocator object with leak sanitizer notifications when LEAK_SANITIZER
// is defined.
-class V8_BASE_EXPORT LsanPageAllocator
- : public NON_EXPORTED_BASE(::v8::PageAllocator) {
+class LsanPageAllocator : public v8::PageAllocator {
public:
- LsanPageAllocator(v8::PageAllocator* page_allocator);
+ explicit LsanPageAllocator(v8::PageAllocator* page_allocator);
~LsanPageAllocator() override = default;
size_t AllocatePageSize() override { return allocate_page_size_; }
@@ -53,4 +51,4 @@ class V8_BASE_EXPORT LsanPageAllocator
} // namespace base
} // namespace v8
-#endif // V8_BASE_LSAN_PAGE_ALLOCATOR_H_
+#endif // V8_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
diff --git a/deps/v8/src/msan.h b/deps/v8/src/sanitizer/msan.h
index d3e79160cb..01e774e7e4 100644
--- a/deps/v8/src/msan.h
+++ b/deps/v8/src/sanitizer/msan.h
@@ -4,11 +4,11 @@
// MemorySanitizer support.
-#ifndef V8_MSAN_H_
-#define V8_MSAN_H_
+#ifndef V8_SANITIZER_MSAN_H_
+#define V8_SANITIZER_MSAN_H_
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#ifdef V8_USE_MEMORY_SANITIZER
@@ -33,4 +33,4 @@
#endif // V8_USE_MEMORY_SANITIZER
-#endif // V8_MSAN_H_
+#endif // V8_SANITIZER_MSAN_H_
diff --git a/deps/v8/src/base/tsan.h b/deps/v8/src/sanitizer/tsan.h
index 7cf68a6a64..0013b91bfc 100644
--- a/deps/v8/src/base/tsan.h
+++ b/deps/v8/src/sanitizer/tsan.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BASE_TSAN_H_
-#define V8_BASE_TSAN_H_
+#ifndef V8_SANITIZER_TSAN_H_
+#define V8_SANITIZER_TSAN_H_
namespace v8 {
namespace base {
@@ -44,4 +44,4 @@ void AnnotateIgnoreWritesEnd(const char* file, int line);
} // namespace base
} // namespace v8
-#endif // V8_BASE_TSAN_H_
+#endif // V8_SANITIZER_TSAN_H_
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index da60ea189d..b4e75a6c20 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -4,17 +4,17 @@
#include "src/snapshot/code-serializer.h"
-#include "src/counters.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
#include "src/heap/heap-inl.h"
-#include "src/log.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
+#include "src/objects/visitors.h"
#include "src/snapshot/object-deserializer.h"
#include "src/snapshot/snapshot.h"
-#include "src/version.h"
-#include "src/visitors.h"
+#include "src/utils/version.h"
namespace v8 {
namespace internal {
@@ -50,15 +50,13 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
Handle<Script> script(Script::cast(info->script()), isolate);
if (FLAG_trace_serializer) {
PrintF("[Serializing from");
- script->name()->ShortPrint();
+ script->name().ShortPrint();
PrintF("]\n");
}
// TODO(7110): Enable serialization of Asm modules once the AsmWasmData is
// context independent.
if (script->ContainsAsmModule()) return nullptr;
- isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
-
// Serialize code object.
Handle<String> source(String::cast(script->source()), isolate);
CodeSerializer cs(isolate, SerializedCodeData::SourceHash(
@@ -104,7 +102,7 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
// For objects in RO_SPACE, never serialize the object, but instead create a
// back reference that encodes the page number as the chunk_index and the
// offset within the page as the chunk_offset.
- Address address = obj->address();
+ Address address = obj.address();
Page* page = Page::FromAddress(address);
uint32_t chunk_index = 0;
for (Page* p : *read_only_space) {
@@ -114,7 +112,7 @@ bool CodeSerializer::SerializeReadOnlyObject(HeapObject obj) {
uint32_t chunk_offset = static_cast<uint32_t>(page->Offset(address));
SerializerReference back_reference =
SerializerReference::BackReference(RO_SPACE, chunk_index, chunk_offset);
- reference_map()->Add(reinterpret_cast<void*>(obj->ptr()), back_reference);
+ reference_map()->Add(reinterpret_cast<void*>(obj.ptr()), back_reference);
CHECK(SerializeBackReference(obj));
return true;
}
@@ -128,60 +126,60 @@ void CodeSerializer::SerializeObject(HeapObject obj) {
if (SerializeReadOnlyObject(obj)) return;
- CHECK(!obj->IsCode());
+ CHECK(!obj.IsCode());
ReadOnlyRoots roots(isolate());
if (ElideObject(obj)) {
return SerializeObject(roots.undefined_value());
}
- if (obj->IsScript()) {
+ if (obj.IsScript()) {
Script script_obj = Script::cast(obj);
- DCHECK_NE(script_obj->compilation_type(), Script::COMPILATION_TYPE_EVAL);
+ DCHECK_NE(script_obj.compilation_type(), Script::COMPILATION_TYPE_EVAL);
// We want to differentiate between undefined and uninitialized_symbol for
// context_data for now. It is hack to allow debugging for scripts that are
// included as a part of custom snapshot. (see debug::Script::IsEmbedded())
- Object context_data = script_obj->context_data();
+ Object context_data = script_obj.context_data();
if (context_data != roots.undefined_value() &&
context_data != roots.uninitialized_symbol()) {
- script_obj->set_context_data(roots.undefined_value());
+ script_obj.set_context_data(roots.undefined_value());
}
// We don't want to serialize host options to avoid serializing unnecessary
// object graph.
- FixedArray host_options = script_obj->host_defined_options();
- script_obj->set_host_defined_options(roots.empty_fixed_array());
+ FixedArray host_options = script_obj.host_defined_options();
+ script_obj.set_host_defined_options(roots.empty_fixed_array());
SerializeGeneric(obj);
- script_obj->set_host_defined_options(host_options);
- script_obj->set_context_data(context_data);
+ script_obj.set_host_defined_options(host_options);
+ script_obj.set_context_data(context_data);
return;
}
- if (obj->IsSharedFunctionInfo()) {
+ if (obj.IsSharedFunctionInfo()) {
SharedFunctionInfo sfi = SharedFunctionInfo::cast(obj);
// TODO(7110): Enable serializing of Asm modules once the AsmWasmData
// is context independent.
- DCHECK(!sfi->IsApiFunction() && !sfi->HasAsmWasmData());
+ DCHECK(!sfi.IsApiFunction() && !sfi.HasAsmWasmData());
DebugInfo debug_info;
BytecodeArray debug_bytecode_array;
- if (sfi->HasDebugInfo()) {
+ if (sfi.HasDebugInfo()) {
// Clear debug info.
- debug_info = sfi->GetDebugInfo();
- if (debug_info->HasInstrumentedBytecodeArray()) {
- debug_bytecode_array = debug_info->DebugBytecodeArray();
- sfi->SetDebugBytecodeArray(debug_info->OriginalBytecodeArray());
+ debug_info = sfi.GetDebugInfo();
+ if (debug_info.HasInstrumentedBytecodeArray()) {
+ debug_bytecode_array = debug_info.DebugBytecodeArray();
+ sfi.SetDebugBytecodeArray(debug_info.OriginalBytecodeArray());
}
- sfi->set_script_or_debug_info(debug_info->script());
+ sfi.set_script_or_debug_info(debug_info.script());
}
- DCHECK(!sfi->HasDebugInfo());
+ DCHECK(!sfi.HasDebugInfo());
SerializeGeneric(obj);
// Restore debug info
if (!debug_info.is_null()) {
- sfi->set_script_or_debug_info(debug_info);
+ sfi.set_script_or_debug_info(debug_info);
if (!debug_bytecode_array.is_null()) {
- sfi->SetDebugBytecodeArray(debug_bytecode_array);
+ sfi.SetDebugBytecodeArray(debug_bytecode_array);
}
}
return;
@@ -194,24 +192,24 @@ void CodeSerializer::SerializeObject(HeapObject obj) {
// --interpreted-frames-native-stack is on. See v8:9122 for more context
#ifndef V8_TARGET_ARCH_ARM
if (V8_UNLIKELY(FLAG_interpreted_frames_native_stack) &&
- obj->IsInterpreterData()) {
- obj = InterpreterData::cast(obj)->bytecode_array();
+ obj.IsInterpreterData()) {
+ obj = InterpreterData::cast(obj).bytecode_array();
}
#endif // V8_TARGET_ARCH_ARM
- if (obj->IsBytecodeArray()) {
+ if (obj.IsBytecodeArray()) {
// Clear the stack frame cache if present
- BytecodeArray::cast(obj)->ClearFrameCacheFromSourcePositionTable();
+ BytecodeArray::cast(obj).ClearFrameCacheFromSourcePositionTable();
}
// Past this point we should not see any (context-specific) maps anymore.
- CHECK(!obj->IsMap());
+ CHECK(!obj.IsMap());
// There should be no references to the global object embedded.
- CHECK(!obj->IsJSGlobalProxy() && !obj->IsJSGlobalObject());
+ CHECK(!obj.IsJSGlobalProxy() && !obj.IsJSGlobalObject());
// Embedded FixedArrays that need rehashing must support rehashing.
- CHECK_IMPLIES(obj->NeedsRehashing(), obj->CanBeRehashed());
+ CHECK_IMPLIES(obj.NeedsRehashing(), obj.CanBeRehashed());
// We expect no instantiated function objects or contexts.
- CHECK(!obj->IsJSFunction() && !obj->IsContext());
+ CHECK(!obj.IsJSFunction() && !obj.IsContext());
SerializeGeneric(obj);
}
@@ -233,13 +231,13 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
Script script = Script::cast(sfi->script());
Handle<Script> script_handle(script, isolate);
String name = ReadOnlyRoots(isolate).empty_string();
- if (script->name()->IsString()) name = String::cast(script->name());
+ if (script.name().IsString()) name = String::cast(script.name());
Handle<String> name_handle(name, isolate);
SharedFunctionInfo::ScriptIterator iter(isolate, script);
for (SharedFunctionInfo info = iter.Next(); !info.is_null();
info = iter.Next()) {
- if (!info->HasBytecodeArray()) continue;
+ if (!info.HasBytecodeArray()) continue;
Handle<Code> code = isolate->factory()->CopyCode(Handle<Code>::cast(
isolate->factory()->interpreter_entry_trampoline_for_profiling()));
@@ -247,15 +245,15 @@ void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
Handle<InterpreterData>::cast(isolate->factory()->NewStruct(
INTERPRETER_DATA_TYPE, AllocationType::kOld));
- interpreter_data->set_bytecode_array(info->GetBytecodeArray());
+ interpreter_data->set_bytecode_array(info.GetBytecodeArray());
interpreter_data->set_interpreter_trampoline(*code);
- info->set_interpreter_data(*interpreter_data);
+ info.set_interpreter_data(*interpreter_data);
if (!log_code_creation) continue;
Handle<AbstractCode> abstract_code = Handle<AbstractCode>::cast(code);
- int line_num = script->GetLineNumber(info->StartPosition()) + 1;
- int column_num = script->GetColumnNumber(info->StartPosition()) + 1;
+ int line_num = script.GetLineNumber(info.StartPosition()) + 1;
+ int column_num = script.GetColumnNumber(info.StartPosition()) + 1;
PROFILE(isolate,
CodeCreateEvent(CodeEventListener::INTERPRETED_FUNCTION_TAG,
*abstract_code, info, *name_handle, line_num,
@@ -320,6 +318,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
? String::cast(script->name())
: ReadOnlyRoots(isolate).empty_string(),
isolate);
+
if (FLAG_log_function_events) {
LOG(isolate,
FunctionEvent("deserialize", script->id(),
@@ -328,15 +327,16 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
}
if (log_code_creation) {
Script::InitLineEnds(script);
+
DisallowHeapAllocation no_gc;
SharedFunctionInfo::ScriptIterator iter(isolate, *script);
for (i::SharedFunctionInfo info = iter.Next(); !info.is_null();
info = iter.Next()) {
- if (info->is_compiled()) {
- int line_num = script->GetLineNumber(info->StartPosition()) + 1;
- int column_num = script->GetColumnNumber(info->StartPosition()) + 1;
+ if (info.is_compiled()) {
+ int line_num = script->GetLineNumber(info.StartPosition()) + 1;
+ int column_num = script->GetColumnNumber(info.StartPosition()) + 1;
PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
- info->abstract_code(), info, *name,
+ info.abstract_code(), info, *name,
line_num, column_num));
}
}
diff --git a/deps/v8/src/snapshot/deserializer-allocator.cc b/deps/v8/src/snapshot/deserializer-allocator.cc
index 7862865d43..4fb600d1dd 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.cc
+++ b/deps/v8/src/snapshot/deserializer-allocator.cc
@@ -29,7 +29,7 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
AllocationResult result = lo_space->AllocateRaw(size);
HeapObject obj = result.ToObjectChecked();
deserialized_large_objects_.push_back(obj);
- return obj->address();
+ return obj.address();
} else if (space == MAP_SPACE) {
DCHECK_EQ(Map::kSize, size);
return allocated_maps_[next_map_index_++];
@@ -44,7 +44,10 @@ Address DeserializerAllocator::AllocateRaw(AllocationSpace space, int size) {
int chunk_index = current_chunk_[space];
DCHECK_LE(high_water_[space], reservation[chunk_index].end);
#endif
- if (space == CODE_SPACE) SkipList::Update(address, size);
+ if (space == CODE_SPACE)
+ MemoryChunk::FromAddress(address)
+ ->GetCodeObjectRegistry()
+ ->RegisterNewlyAllocatedCodeObject(address);
return address;
}
}
@@ -60,11 +63,11 @@ Address DeserializerAllocator::Allocate(AllocationSpace space, int size) {
// If one of the following assertions fails, then we are deserializing an
// aligned object when the filler maps have not been deserialized yet.
// We require filler maps as padding to align the object.
- DCHECK(ReadOnlyRoots(heap_).free_space_map()->IsMap());
- DCHECK(ReadOnlyRoots(heap_).one_pointer_filler_map()->IsMap());
- DCHECK(ReadOnlyRoots(heap_).two_pointer_filler_map()->IsMap());
+ DCHECK(ReadOnlyRoots(heap_).free_space_map().IsMap());
+ DCHECK(ReadOnlyRoots(heap_).one_pointer_filler_map().IsMap());
+ DCHECK(ReadOnlyRoots(heap_).two_pointer_filler_map().IsMap());
obj = heap_->AlignWithFiller(obj, size, reserved, next_alignment_);
- address = obj->address();
+ address = obj.address();
next_alignment_ = kWordAligned;
return address;
} else {
@@ -103,7 +106,7 @@ HeapObject DeserializerAllocator::GetObject(AllocationSpace space,
if (next_alignment_ != kWordAligned) {
int padding = Heap::GetFillToAlign(address, next_alignment_);
next_alignment_ = kWordAligned;
- DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
+ DCHECK(padding == 0 || HeapObject::FromAddress(address).IsFiller());
address += padding;
}
return HeapObject::FromAddress(address);
diff --git a/deps/v8/src/snapshot/deserializer-allocator.h b/deps/v8/src/snapshot/deserializer-allocator.h
index 56bd4d1b0e..27cacc79d5 100644
--- a/deps/v8/src/snapshot/deserializer-allocator.h
+++ b/deps/v8/src/snapshot/deserializer-allocator.h
@@ -5,7 +5,7 @@
#ifndef V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
#define V8_SNAPSHOT_DESERIALIZER_ALLOCATOR_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/objects/heap-object.h"
#include "src/snapshot/serializer-common.h"
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 3759c53c21..1fd590db26 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -4,24 +4,24 @@
#include "src/snapshot/deserializer.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
-#include "src/log.h"
-#include "src/objects-body-descriptors-inl.h"
+#include "src/logging/log.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/cell-inl.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/objects-body-descriptors-inl.h"
#include "src/objects/slots.h"
#include "src/objects/smi.h"
#include "src/objects/string.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/tracing/trace-event.h"
@@ -71,7 +71,7 @@ void Deserializer::Initialize(Isolate* isolate) {
void Deserializer::Rehash() {
DCHECK(can_rehash() || deserializing_user_code());
for (HeapObject item : to_rehash_) {
- item->RehashBasedOnMap(ReadOnlyRoots(isolate_));
+ item.RehashBasedOnMap(ReadOnlyRoots(isolate_));
}
}
@@ -117,7 +117,7 @@ void Deserializer::DeserializeDeferredObjects() {
DCHECK_EQ(code - space, kNewObject);
HeapObject object = GetBackReferencedObject(space);
int size = source_.GetInt() << kTaggedSizeLog2;
- Address obj_address = object->address();
+ Address obj_address = object.address();
// Object's map is already initialized, now read the rest.
MaybeObjectSlot start(obj_address + kTaggedSize);
MaybeObjectSlot end(obj_address + size);
@@ -154,28 +154,26 @@ void Deserializer::LogNewMapEvents() {
void Deserializer::LogScriptEvents(Script script) {
DisallowHeapAllocation no_gc;
LOG(isolate_,
- ScriptEvent(Logger::ScriptEventType::kDeserialize, script->id()));
+ ScriptEvent(Logger::ScriptEventType::kDeserialize, script.id()));
LOG(isolate_, ScriptDetails(script));
TRACE_EVENT_OBJECT_CREATED_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "Script",
- TRACE_ID_WITH_SCOPE("v8::internal::Script", script->id()));
+ TRACE_ID_WITH_SCOPE("v8::internal::Script", script.id()));
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("v8.compile"), "Script",
- TRACE_ID_WITH_SCOPE("v8::internal::Script", script->id()),
- script->ToTracedValue());
+ TRACE_ID_WITH_SCOPE("v8::internal::Script", script.id()),
+ script.ToTracedValue());
}
StringTableInsertionKey::StringTableInsertionKey(String string)
- : StringTableKey(ComputeHashField(string)), string_(string) {
- DCHECK(string->IsInternalizedString());
+ : StringTableKey(ComputeHashField(string), string.length()),
+ string_(string) {
+ DCHECK(string.IsInternalizedString());
}
-bool StringTableInsertionKey::IsMatch(Object string) {
- // We know that all entries in a hash table had their hash keys created.
- // Use that knowledge to have fast failure.
- if (Hash() != String::cast(string)->Hash()) return false;
- // We want to compare the content of two internalized strings here.
- return string_->SlowEquals(String::cast(string));
+bool StringTableInsertionKey::IsMatch(String string) {
+ // We want to compare the content of two strings here.
+ return string_.SlowEquals(string);
}
Handle<String> StringTableInsertionKey::AsHandle(Isolate* isolate) {
@@ -184,44 +182,58 @@ Handle<String> StringTableInsertionKey::AsHandle(Isolate* isolate) {
uint32_t StringTableInsertionKey::ComputeHashField(String string) {
// Make sure hash_field() is computed.
- string->Hash();
- return string->hash_field();
+ string.Hash();
+ return string.hash_field();
}
+namespace {
+
+String ForwardStringIfExists(Isolate* isolate, StringTableInsertionKey* key) {
+ StringTable table = isolate->heap()->string_table();
+ int entry = table.FindEntry(isolate, key);
+ if (entry == kNotFound) return String();
+
+ String canonical = String::cast(table.KeyAt(entry));
+ DCHECK_NE(canonical, key->string());
+ key->string().MakeThin(isolate, canonical);
+ return canonical;
+}
+
+} // namespace
+
HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
- if (obj->IsString()) {
+ if (obj.IsString()) {
// Uninitialize hash field as we need to recompute the hash.
String string = String::cast(obj);
- string->set_hash_field(String::kEmptyHashField);
+ string.set_hash_field(String::kEmptyHashField);
// Rehash strings before read-only space is sealed. Strings outside
// read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
if (space == RO_SPACE) {
to_rehash_.push_back(obj);
}
- } else if (obj->NeedsRehashing()) {
+ } else if (obj.NeedsRehashing()) {
to_rehash_.push_back(obj);
}
}
if (deserializing_user_code()) {
- if (obj->IsString()) {
+ if (obj.IsString()) {
String string = String::cast(obj);
- if (string->IsInternalizedString()) {
+ if (string.IsInternalizedString()) {
// Canonicalize the internalized string. If it already exists in the
// string table, set it to forward to the existing one.
StringTableInsertionKey key(string);
- String canonical =
- StringTable::ForwardStringIfExists(isolate_, &key, string);
+ String canonical = ForwardStringIfExists(isolate_, &key);
if (!canonical.is_null()) return canonical;
new_internalized_strings_.push_back(handle(string, isolate_));
return string;
}
- } else if (obj->IsScript()) {
+ } else if (obj.IsScript()) {
new_scripts_.push_back(handle(Script::cast(obj), isolate_));
- } else if (obj->IsAllocationSite()) {
+ } else if (obj.IsAllocationSite()) {
// We should link new allocation sites, but we can't do this immediately
// because |AllocationSite::HasWeakNext()| internally accesses
// |Heap::roots_| that may not have been initialized yet. So defer this to
@@ -231,93 +243,86 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, int space) {
DCHECK(CanBeDeferred(obj));
}
}
- if (obj->IsScript()) {
+ if (obj.IsScript()) {
LogScriptEvents(Script::cast(obj));
- } else if (obj->IsCode()) {
+ } else if (obj.IsCode()) {
// We flush all code pages after deserializing the startup snapshot.
// Hence we only remember each individual code object when deserializing
// user code.
if (deserializing_user_code() || space == LO_SPACE) {
new_code_objects_.push_back(Code::cast(obj));
}
- } else if (FLAG_trace_maps && obj->IsMap()) {
+ } else if (FLAG_trace_maps && obj.IsMap()) {
// Keep track of all seen Maps to log them later since they might be only
// partially initialized at this point.
new_maps_.push_back(Map::cast(obj));
- } else if (obj->IsAccessorInfo()) {
+ } else if (obj.IsAccessorInfo()) {
#ifdef USE_SIMULATOR
accessor_infos_.push_back(AccessorInfo::cast(obj));
#endif
- } else if (obj->IsCallHandlerInfo()) {
+ } else if (obj.IsCallHandlerInfo()) {
#ifdef USE_SIMULATOR
call_handler_infos_.push_back(CallHandlerInfo::cast(obj));
#endif
- } else if (obj->IsExternalString()) {
- if (obj->map() == ReadOnlyRoots(isolate_).native_source_string_map()) {
+ } else if (obj.IsExternalString()) {
+ if (obj.map() == ReadOnlyRoots(isolate_).native_source_string_map()) {
ExternalOneByteString string = ExternalOneByteString::cast(obj);
- DCHECK(string->is_uncached());
- string->SetResource(
+ DCHECK(string.is_uncached());
+ string.SetResource(
isolate_, NativesExternalStringResource::DecodeForDeserialization(
- string->resource()));
+ string.resource()));
} else {
ExternalString string = ExternalString::cast(obj);
- uint32_t index = string->resource_as_uint32();
+ uint32_t index = string.resource_as_uint32();
Address address =
static_cast<Address>(isolate_->api_external_references()[index]);
- string->set_address_as_resource(address);
+ string.set_address_as_resource(address);
isolate_->heap()->UpdateExternalString(string, 0,
- string->ExternalPayloadSize());
+ string.ExternalPayloadSize());
}
isolate_->heap()->RegisterExternalString(String::cast(obj));
- } else if (obj->IsJSTypedArray()) {
+ } else if (obj.IsJSDataView()) {
+ JSDataView data_view = JSDataView::cast(obj);
+ JSArrayBuffer buffer = JSArrayBuffer::cast(data_view.buffer());
+ data_view.set_data_pointer(
+ reinterpret_cast<uint8_t*>(buffer.backing_store()) +
+ data_view.byte_offset());
+ } else if (obj.IsJSTypedArray()) {
JSTypedArray typed_array = JSTypedArray::cast(obj);
- CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
- int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
- if (byte_offset > 0) {
- FixedTypedArrayBase elements =
- FixedTypedArrayBase::cast(typed_array->elements());
- // Must be off-heap layout.
- DCHECK(!typed_array->is_on_heap());
-
- void* pointer_with_offset = reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(elements->external_pointer()) +
- byte_offset);
- elements->set_external_pointer(pointer_with_offset);
+ // Only fixup for the off-heap case.
+ if (!typed_array.is_on_heap()) {
+ Smi store_index(
+ reinterpret_cast<Address>(typed_array.external_pointer()));
+ byte* backing_store = off_heap_backing_stores_[store_index.value()] +
+ typed_array.byte_offset();
+ typed_array.set_external_pointer(backing_store);
}
- } else if (obj->IsJSArrayBuffer()) {
+ } else if (obj.IsJSArrayBuffer()) {
JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
// Only fixup for the off-heap case.
- if (buffer->backing_store() != nullptr) {
- Smi store_index(reinterpret_cast<Address>(buffer->backing_store()));
- void* backing_store = off_heap_backing_stores_[store_index->value()];
+ if (buffer.backing_store() != nullptr) {
+ Smi store_index(reinterpret_cast<Address>(buffer.backing_store()));
+ void* backing_store = off_heap_backing_stores_[store_index.value()];
- buffer->set_backing_store(backing_store);
+ buffer.set_backing_store(backing_store);
isolate_->heap()->RegisterNewArrayBuffer(buffer);
}
- } else if (obj->IsFixedTypedArrayBase()) {
- FixedTypedArrayBase fta = FixedTypedArrayBase::cast(obj);
- // Only fixup for the off-heap case.
- if (fta->base_pointer() == Smi::kZero) {
- Smi store_index(reinterpret_cast<Address>(fta->external_pointer()));
- void* backing_store = off_heap_backing_stores_[store_index->value()];
- fta->set_external_pointer(backing_store);
- }
- } else if (obj->IsBytecodeArray()) {
+ } else if (obj.IsBytecodeArray()) {
// TODO(mythria): Remove these once we store the default values for these
// fields in the serializer.
BytecodeArray bytecode_array = BytecodeArray::cast(obj);
- bytecode_array->set_osr_loop_nesting_level(0);
+ bytecode_array.set_osr_loop_nesting_level(0);
}
#ifdef DEBUG
- if (obj->IsDescriptorArray()) {
+ if (obj.IsDescriptorArray()) {
DescriptorArray descriptor_array = DescriptorArray::cast(obj);
- DCHECK_EQ(0, descriptor_array->raw_number_of_marked_descriptors());
+ DCHECK_EQ(0, descriptor_array.raw_number_of_marked_descriptors());
}
#endif
// Check alignment.
- DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
- HeapObject::RequiredAlignment(obj->map())));
+ DCHECK_EQ(0, Heap::GetFillToAlign(obj.address(),
+ HeapObject::RequiredAlignment(obj.map())));
return obj;
}
@@ -356,12 +361,12 @@ HeapObject Deserializer::GetBackReferencedObject(int space) {
}
}
- if (deserializing_user_code() && obj->IsThinString()) {
- obj = ThinString::cast(obj)->actual();
+ if (deserializing_user_code() && obj.IsThinString()) {
+ obj = ThinString::cast(obj).actual();
}
hot_objects_.Add(obj);
- DCHECK(!HasWeakHeapObjectTag(obj->ptr()));
+ DCHECK(!HasWeakHeapObjectTag(obj));
return obj;
}
@@ -393,7 +398,7 @@ HeapObject Deserializer::ReadObject(int space_number) {
}
#ifdef DEBUG
- if (obj->IsCode()) {
+ if (obj.IsCode()) {
DCHECK(space_number == CODE_SPACE || space_number == CODE_LO_SPACE);
} else {
DCHECK(space_number != CODE_SPACE && space_number != CODE_LO_SPACE);
@@ -424,7 +429,7 @@ void Deserializer::ReadCodeObjectBody(int space_number,
void Deserializer::VisitCodeTarget(Code host, RelocInfo* rinfo) {
HeapObject object = ReadObject();
- rinfo->set_target_address(Code::cast(object)->raw_instruction_start());
+ rinfo->set_target_address(Code::cast(object).raw_instruction_start());
}
void Deserializer::VisitEmbeddedPointer(Code host, RelocInfo* rinfo) {
@@ -460,8 +465,8 @@ void Deserializer::VisitInternalReference(Code host, RelocInfo* rinfo) {
// Internal reference target is encoded as an offset from code entry.
int target_offset = source_.GetInt();
DCHECK_LT(static_cast<unsigned>(target_offset),
- static_cast<unsigned>(host->raw_instruction_size()));
- Address target = host->entry() + target_offset;
+ static_cast<unsigned>(host.raw_instruction_size()));
+ Address target = host.entry() + target_offset;
Assembler::deserialization_set_target_internal_reference_at(
rinfo->pc(), target, rinfo->rmode());
}
@@ -619,7 +624,7 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, int source_space,
HeapObject obj = HeapObject::FromAddress(current_object_address);
// If the deferred object is a map, its instance type may be used
// during deserialization. Initialize it with a temporary value.
- if (obj->IsMap()) Map::cast(obj)->set_instance_type(FILLER_TYPE);
+ if (obj.IsMap()) Map::cast(obj).set_instance_type(FILLER_TYPE);
current = limit;
return false;
}
@@ -814,7 +819,7 @@ TSlot Deserializer::ReadDataCase(Isolate* isolate, TSlot current,
} else if (bytecode == kReadOnlyObjectCache) {
int cache_index = source_.GetInt();
heap_object = HeapObject::cast(
- isolate->heap()->read_only_heap()->read_only_object_cache()->at(
+ isolate->heap()->read_only_heap()->cached_read_only_object(
cache_index));
DCHECK(!Heap::InYoungGeneration(heap_object));
emit_write_barrier = false;
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 86536ca81c..6e3f497d38 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -186,14 +186,16 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
};
// Used to insert a deserialized internalized string into the string table.
-class StringTableInsertionKey : public StringTableKey {
+class StringTableInsertionKey final : public StringTableKey {
public:
explicit StringTableInsertionKey(String string);
- bool IsMatch(Object string) override;
+ bool IsMatch(String string) override;
V8_WARN_UNUSED_RESULT Handle<String> AsHandle(Isolate* isolate) override;
+ String string() const { return string_; }
+
private:
uint32_t ComputeHashField(String string);
diff --git a/deps/v8/src/snapshot/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded-file-writer.cc
deleted file mode 100644
index 3ead35bd9a..0000000000
--- a/deps/v8/src/snapshot/embedded-file-writer.cc
+++ /dev/null
@@ -1,843 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/snapshot/embedded-file-writer.h"
-
-#include <algorithm>
-#include <cinttypes>
-
-#include "src/objects/code-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// V8_CC_MSVC is true for both MSVC and clang on windows. clang can handle
-// __asm__-style inline assembly but MSVC cannot, and thus we need a more
-// precise compiler detection that can distinguish between the two. clang on
-// windows sets both __clang__ and _MSC_VER, MSVC sets only _MSC_VER.
-#if defined(_MSC_VER) && !defined(__clang__)
-#define V8_COMPILER_IS_MSVC
-#endif
-
-// MSVC uses MASM for x86 and x64, while it has a ARMASM for ARM32 and
-// ARMASM64 for ARM64. Since ARMASM and ARMASM64 accept a slightly tweaked
-// version of ARM assembly language, they are referred to together in Visual
-// Studio project files as MARMASM.
-//
-// ARM assembly language docs:
-// http://infocenter.arm.com/help/topic/com.arm.doc.dui0802b/index.html
-// Microsoft ARM assembler and assembly language docs:
-// https://docs.microsoft.com/en-us/cpp/assembler/arm/arm-assembler-reference
-#if defined(V8_COMPILER_IS_MSVC)
-#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_ARM)
-#define V8_ASSEMBLER_IS_MARMASM
-#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
-#define V8_ASSEMBLER_IS_MASM
-#else
-#error Unknown Windows assembler target architecture.
-#endif
-#endif
-
-// Name mangling.
-// Symbols are prefixed with an underscore on 32-bit architectures.
-#if defined(V8_TARGET_OS_WIN) && !defined(V8_TARGET_ARCH_X64) && \
- !defined(V8_TARGET_ARCH_ARM64)
-#define SYMBOL_PREFIX "_"
-#else
-#define SYMBOL_PREFIX ""
-#endif
-
-// Platform-independent bits.
-// -----------------------------------------------------------------------------
-
-namespace {
-
-DataDirective PointerSizeDirective() {
- if (kSystemPointerSize == 8) {
- return kQuad;
- } else {
- CHECK_EQ(4, kSystemPointerSize);
- return kLong;
- }
-}
-
-} // namespace
-
-const char* DirectiveAsString(DataDirective directive) {
-#if defined(V8_TARGET_OS_WIN) && defined(V8_ASSEMBLER_IS_MASM)
- switch (directive) {
- case kByte:
- return "BYTE";
- case kLong:
- return "DWORD";
- case kQuad:
- return "QWORD";
- default:
- UNREACHABLE();
- }
-#elif defined(V8_TARGET_OS_WIN) && defined(V8_ASSEMBLER_IS_MARMASM)
- switch (directive) {
- case kByte:
- return "DCB";
- case kLong:
- return "DCDU";
- case kQuad:
- return "DCQU";
- default:
- UNREACHABLE();
- }
-#elif defined(V8_OS_AIX)
- switch (directive) {
- case kByte:
- return ".byte";
- case kLong:
- return ".long";
- case kQuad:
- return ".llong";
- default:
- UNREACHABLE();
- }
-#else
- switch (directive) {
- case kByte:
- return ".byte";
- case kLong:
- return ".long";
- case kQuad:
- return ".quad";
- case kOcta:
- return ".octa";
- }
- UNREACHABLE();
-#endif
-}
-
-void EmbeddedFileWriter::PrepareBuiltinSourcePositionMap(Builtins* builtins) {
- for (int i = 0; i < Builtins::builtin_count; i++) {
- // Retrieve the SourcePositionTable and copy it.
- Code code = builtins->builtin(i);
- // Verify that the code object is still the "real code" and not a
- // trampoline (which wouldn't have source positions).
- DCHECK(!code->is_off_heap_trampoline());
- std::vector<unsigned char> data(
- code->SourcePositionTable()->GetDataStartAddress(),
- code->SourcePositionTable()->GetDataEndAddress());
- source_positions_[i] = data;
- }
-}
-
-#if defined(V8_OS_WIN_X64)
-std::string EmbeddedFileWriter::BuiltinsUnwindInfoLabel() const {
- char embedded_blob_data_symbol[kTemporaryStringLength];
- i::SNPrintF(i::Vector<char>(embedded_blob_data_symbol),
- "%s_Builtins_UnwindInfo", embedded_variant_);
- return embedded_blob_data_symbol;
-}
-
-void EmbeddedFileWriter::SetBuiltinUnwindData(
- int builtin_index, const win64_unwindinfo::BuiltinUnwindInfo& unwind_info) {
- DCHECK_LT(builtin_index, Builtins::builtin_count);
- unwind_infos_[builtin_index] = unwind_info;
-}
-
-void EmbeddedFileWriter::WriteUnwindInfoEntry(
- PlatformDependentEmbeddedFileWriter* w, uint64_t rva_start,
- uint64_t rva_end) const {
- w->DeclareRvaToSymbol(EmbeddedBlobDataSymbol().c_str(), rva_start);
- w->DeclareRvaToSymbol(EmbeddedBlobDataSymbol().c_str(), rva_end);
- w->DeclareRvaToSymbol(BuiltinsUnwindInfoLabel().c_str());
-}
-
-void EmbeddedFileWriter::WriteUnwindInfo(PlatformDependentEmbeddedFileWriter* w,
- const i::EmbeddedData* blob) const {
- // Emit an UNWIND_INFO (XDATA) struct, which contains the unwinding
- // information that is used for all builtin functions.
- DCHECK(win64_unwindinfo::CanEmitUnwindInfoForBuiltins());
- w->Comment("xdata for all the code in the embedded blob.");
- w->DeclareExternalFunction(CRASH_HANDLER_FUNCTION_NAME_STRING);
-
- w->StartXdataSection();
- {
- w->DeclareLabel(BuiltinsUnwindInfoLabel().c_str());
- std::vector<uint8_t> xdata =
- win64_unwindinfo::GetUnwindInfoForBuiltinFunctions();
- WriteBinaryContentsAsInlineAssembly(w, xdata.data(),
- static_cast<uint32_t>(xdata.size()));
- w->Comment(" ExceptionHandler");
- w->DeclareRvaToSymbol(CRASH_HANDLER_FUNCTION_NAME_STRING);
- }
- w->EndXdataSection();
- w->Newline();
-
- // Emit a RUNTIME_FUNCTION (PDATA) entry for each builtin function, as
- // documented here:
- // https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64.
- w->Comment(
- "pdata for all the code in the embedded blob (structs of type "
- "RUNTIME_FUNCTION).");
- w->Comment(" BeginAddress");
- w->Comment(" EndAddress");
- w->Comment(" UnwindInfoAddress");
- w->StartPdataSection();
- {
- Address prev_builtin_end_offset = 0;
- for (int i = 0; i < Builtins::builtin_count; i++) {
- // Some builtins are leaf functions from the point of view of Win64 stack
- // walking: they do not move the stack pointer and do not require a PDATA
- // entry because the return address can be retrieved from [rsp].
- if (!blob->ContainsBuiltin(i)) continue;
- if (unwind_infos_[i].is_leaf_function()) continue;
-
- uint64_t builtin_start_offset = blob->InstructionStartOfBuiltin(i) -
- reinterpret_cast<Address>(blob->data());
- uint32_t builtin_size = blob->InstructionSizeOfBuiltin(i);
-
- const std::vector<int>& xdata_desc = unwind_infos_[i].fp_offsets();
- if (xdata_desc.empty()) {
- // Some builtins do not have any "push rbp - mov rbp, rsp" instructions
- // to start a stack frame. We still emit a PDATA entry as if they had,
- // relying on the fact that we can find the previous frame address from
- // rbp in most cases. Note that since the function does not really start
- // with a 'push rbp' we need to specify the start RVA in the PDATA entry
- // a few bytes before the beginning of the function, if it does not
- // overlap the end of the previous builtin.
- WriteUnwindInfoEntry(
- w,
- std::max(prev_builtin_end_offset,
- builtin_start_offset - win64_unwindinfo::kRbpPrefixLength),
- builtin_start_offset + builtin_size);
- } else {
- // Some builtins have one or more "push rbp - mov rbp, rsp" sequences,
- // but not necessarily at the beginning of the function. In this case
- // we want to yield a PDATA entry for each block of instructions that
- // emit an rbp frame. If the function does not start with 'push rbp'
- // we also emit a PDATA entry for the initial block of code up to the
- // first 'push rbp', like in the case above.
- if (xdata_desc[0] > 0) {
- WriteUnwindInfoEntry(w,
- std::max(prev_builtin_end_offset,
- builtin_start_offset -
- win64_unwindinfo::kRbpPrefixLength),
- builtin_start_offset + xdata_desc[0]);
- }
-
- for (size_t j = 0; j < xdata_desc.size(); j++) {
- int chunk_start = xdata_desc[j];
- int chunk_end =
- (j < xdata_desc.size() - 1) ? xdata_desc[j + 1] : builtin_size;
- WriteUnwindInfoEntry(w, builtin_start_offset + chunk_start,
- builtin_start_offset + chunk_end);
- }
- }
-
- prev_builtin_end_offset = builtin_start_offset + builtin_size;
- w->Newline();
- }
- }
- w->EndPdataSection();
- w->Newline();
-}
-#endif
-
-// V8_OS_MACOSX
-// Fuchsia target is explicitly excluded here for Mac hosts. This is to avoid
-// generating uncompilable assembly files for the Fuchsia target.
-// -----------------------------------------------------------------------------
-
-#if defined(V8_OS_MACOSX) && !defined(V8_TARGET_OS_FUCHSIA)
-
-void PlatformDependentEmbeddedFileWriter::SectionText() {
- fprintf(fp_, ".text\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionData() {
- fprintf(fp_, ".data\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionRoData() {
- fprintf(fp_, ".const_data\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
- uint32_t value) {
- DeclareSymbolGlobal(name);
- DeclareLabel(name);
- IndentedDataDirective(kLong);
- fprintf(fp_, "%d", value);
- Newline();
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
- const char* name, const char* target) {
- DeclareSymbolGlobal(name);
- DeclareLabel(name);
- fprintf(fp_, " %s _%s\n", DirectiveAsString(PointerSizeDirective()), target);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
- const char* name) {
- // TODO(jgruber): Investigate switching to .globl. Using .private_extern
- // prevents something along the compilation chain from messing with the
- // embedded blob. Using .global here causes embedded blob hash verification
- // failures at runtime.
- fprintf(fp_, ".private_extern _%s\n", name);
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
- fprintf(fp_, ".balign 32\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
- fprintf(fp_, ".balign 8\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
- fprintf(fp_, "// %s\n", string);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
- fprintf(fp_, "_%s:\n", name);
-}
-
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
- const char* filename,
- int line) {
- fprintf(fp_, ".loc %d %d\n", fileid, line);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
- const char* name) {
- DeclareLabel(name);
-
- // TODO(mvstanton): Investigate the proper incantations to mark the label as
- // a function on OSX.
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
-}
-
-int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0x%" PRIx64, value);
-}
-
-void PlatformDependentEmbeddedFileWriter::FilePrologue() {}
-
-void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
- int fileid, const char* filename) {
- fprintf(fp_, ".file %d \"%s\"\n", fileid, filename);
-}
-
-void PlatformDependentEmbeddedFileWriter::FileEpilogue() {}
-
-int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
- DataDirective directive) {
- return fprintf(fp_, " %s ", DirectiveAsString(directive));
-}
-
-// V8_OS_AIX
-// -----------------------------------------------------------------------------
-
-#elif defined(V8_OS_AIX)
-
-void PlatformDependentEmbeddedFileWriter::SectionText() {
- fprintf(fp_, ".csect .text[PR]\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionData() {
- fprintf(fp_, ".csect .data[RW]\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionRoData() {
- fprintf(fp_, ".csect[RO]\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
- uint32_t value) {
- DeclareSymbolGlobal(name);
- fprintf(fp_, ".align 2\n");
- fprintf(fp_, "%s:\n", name);
- IndentedDataDirective(kLong);
- fprintf(fp_, "%d\n", value);
- Newline();
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
- const char* name, const char* target) {
- AlignToCodeAlignment();
- DeclareLabel(name);
- fprintf(fp_, " %s %s\n", DirectiveAsString(PointerSizeDirective()), target);
- Newline();
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
- const char* name) {
- fprintf(fp_, ".globl %s\n", name);
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
- fprintf(fp_, ".align 5\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
- fprintf(fp_, ".align 3\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
- fprintf(fp_, "// %s\n", string);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
- DeclareSymbolGlobal(name);
- fprintf(fp_, "%s:\n", name);
-}
-
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
- const char* filename,
- int line) {
- fprintf(fp_, ".xline %d, \"%s\"\n", line, filename);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
- const char* name) {
- Newline();
- DeclareSymbolGlobal(name);
- fprintf(fp_, ".csect %s[DS]\n", name); // function descriptor
- fprintf(fp_, "%s:\n", name);
- fprintf(fp_, ".llong .%s, 0, 0\n", name);
- SectionText();
- fprintf(fp_, ".%s:\n", name);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
-}
-
-int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0x%" PRIx64, value);
-}
-
-void PlatformDependentEmbeddedFileWriter::FilePrologue() {}
-
-void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
- int fileid, const char* filename) {
- // File name cannot be declared with an identifier on AIX.
- // We use the SourceInfo method to emit debug info in
- //.xline <line-number> <file-name> format.
-}
-
-void PlatformDependentEmbeddedFileWriter::FileEpilogue() {}
-
-int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
- DataDirective directive) {
- return fprintf(fp_, " %s ", DirectiveAsString(directive));
-}
-
-// V8_TARGET_OS_WIN (MSVC)
-// -----------------------------------------------------------------------------
-
-#elif defined(V8_TARGET_OS_WIN) && defined(V8_ASSEMBLER_IS_MASM)
-
-// For MSVC builds we emit assembly in MASM syntax.
-// See https://docs.microsoft.com/en-us/cpp/assembler/masm/directives-reference.
-
-void PlatformDependentEmbeddedFileWriter::SectionText() {
- fprintf(fp_, ".CODE\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionData() {
- fprintf(fp_, ".DATA\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionRoData() {
- fprintf(fp_, ".CONST\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
- uint32_t value) {
- DeclareSymbolGlobal(name);
- fprintf(fp_, "%s%s %s %d\n", SYMBOL_PREFIX, name, DirectiveAsString(kLong),
- value);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
- const char* name, const char* target) {
- DeclareSymbolGlobal(name);
- fprintf(fp_, "%s%s %s %s%s\n", SYMBOL_PREFIX, name,
- DirectiveAsString(PointerSizeDirective()), SYMBOL_PREFIX, target);
-}
-
-#if defined(V8_OS_WIN_X64)
-
-void PlatformDependentEmbeddedFileWriter::StartPdataSection() {
- fprintf(fp_, "OPTION DOTNAME\n");
- fprintf(fp_, ".pdata SEGMENT DWORD READ ''\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::EndPdataSection() {
- fprintf(fp_, ".pdata ENDS\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::StartXdataSection() {
- fprintf(fp_, "OPTION DOTNAME\n");
- fprintf(fp_, ".xdata SEGMENT DWORD READ ''\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::EndXdataSection() {
- fprintf(fp_, ".xdata ENDS\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareExternalFunction(
- const char* name) {
- fprintf(fp_, "EXTERN %s : PROC\n", name);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareRvaToSymbol(const char* name,
- uint64_t offset) {
- if (offset > 0) {
- fprintf(fp_, "DD IMAGEREL %s+%llu\n", name, offset);
- } else {
- fprintf(fp_, "DD IMAGEREL %s\n", name);
- }
-}
-
-#endif // defined(V8_OS_WIN_X64)
-
-void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
- const char* name) {
- fprintf(fp_, "PUBLIC %s%s\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
- // Diverges from other platforms due to compile error
- // 'invalid combination with segment alignment'.
- fprintf(fp_, "ALIGN 4\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
- fprintf(fp_, "ALIGN 4\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
- fprintf(fp_, "; %s\n", string);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
- fprintf(fp_, "%s%s LABEL %s\n", SYMBOL_PREFIX, name,
- DirectiveAsString(kByte));
-}
-
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
- const char* filename,
- int line) {
- // TODO(mvstanton): output source information for MSVC.
- // Its syntax is #line <line> "<filename>"
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
- const char* name) {
- fprintf(fp_, "%s%s PROC\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
- fprintf(fp_, "%s%s ENDP\n", SYMBOL_PREFIX, name);
-}
-
-int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0%" PRIx64 "h", value);
-}
-
-void PlatformDependentEmbeddedFileWriter::FilePrologue() {
-#if !defined(V8_TARGET_ARCH_X64)
- fprintf(fp_, ".MODEL FLAT\n");
-#endif
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
- int fileid, const char* filename) {}
-
-void PlatformDependentEmbeddedFileWriter::FileEpilogue() {
- fprintf(fp_, "END\n");
-}
-
-int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
- DataDirective directive) {
- return fprintf(fp_, " %s ", DirectiveAsString(directive));
-}
-
-#undef V8_ASSEMBLER_IS_MASM
-
-#elif defined(V8_TARGET_OS_WIN) && defined(V8_ASSEMBLER_IS_MARMASM)
-
-// The the AARCH64 ABI requires instructions be 4-byte-aligned and Windows does
-// not have a stricter alignment requirement (see the TEXTAREA macro of
-// kxarm64.h in the Windows SDK), so code is 4-byte-aligned.
-// The data fields in the emitted assembly tend to be accessed with 8-byte
-// LDR instructions, so data is 8-byte-aligned.
-//
-// armasm64's warning A4228 states
-// Alignment value exceeds AREA alignment; alignment not guaranteed
-// To ensure that ALIGN directives are honored, their values are defined as
-// equal to their corresponding AREA's ALIGN attributes.
-
-#define ARM64_DATA_ALIGNMENT_POWER (3)
-#define ARM64_DATA_ALIGNMENT (1 << ARM64_DATA_ALIGNMENT_POWER)
-#define ARM64_CODE_ALIGNMENT_POWER (2)
-#define ARM64_CODE_ALIGNMENT (1 << ARM64_CODE_ALIGNMENT_POWER)
-
-void PlatformDependentEmbeddedFileWriter::SectionText() {
- fprintf(fp_, " AREA |.text|, CODE, ALIGN=%d, READONLY\n",
- ARM64_CODE_ALIGNMENT_POWER);
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionData() {
- fprintf(fp_, " AREA |.data|, DATA, ALIGN=%d, READWRITE\n",
- ARM64_DATA_ALIGNMENT_POWER);
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionRoData() {
- fprintf(fp_, " AREA |.rodata|, DATA, ALIGN=%d, READONLY\n",
- ARM64_DATA_ALIGNMENT_POWER);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
- uint32_t value) {
- DeclareSymbolGlobal(name);
- fprintf(fp_, "%s%s %s %d\n", SYMBOL_PREFIX, name, DirectiveAsString(kLong),
- value);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
- const char* name, const char* target) {
- DeclareSymbolGlobal(name);
- fprintf(fp_, "%s%s %s %s%s\n", SYMBOL_PREFIX, name,
- DirectiveAsString(PointerSizeDirective()), SYMBOL_PREFIX, target);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
- const char* name) {
- fprintf(fp_, " EXPORT %s%s\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
- fprintf(fp_, " ALIGN %d\n", ARM64_CODE_ALIGNMENT);
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
- fprintf(fp_, " ALIGN %d\n", ARM64_DATA_ALIGNMENT);
-}
-
-void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
- fprintf(fp_, "; %s\n", string);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
- fprintf(fp_, "%s%s\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
- const char* filename,
- int line) {
- // TODO(mvstanton): output source information for MSVC.
- // Its syntax is #line <line> "<filename>"
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
- const char* name) {
- fprintf(fp_, "%s%s FUNCTION\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
- fprintf(fp_, " ENDFUNC\n");
-}
-
-int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0x%" PRIx64, value);
-}
-
-void PlatformDependentEmbeddedFileWriter::FilePrologue() {}
-
-void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
- int fileid, const char* filename) {}
-
-void PlatformDependentEmbeddedFileWriter::FileEpilogue() {
- fprintf(fp_, " END\n");
-}
-
-int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
- DataDirective directive) {
- return fprintf(fp_, " %s ", DirectiveAsString(directive));
-}
-
-#undef V8_ASSEMBLER_IS_MARMASM
-#undef ARM64_DATA_ALIGNMENT_POWER
-#undef ARM64_DATA_ALIGNMENT
-#undef ARM64_CODE_ALIGNMENT_POWER
-#undef ARM64_CODE_ALIGNMENT
-
-// Everything but AIX, Windows with MSVC, or OSX.
-// -----------------------------------------------------------------------------
-
-#else
-
-void PlatformDependentEmbeddedFileWriter::SectionText() {
-#ifdef OS_CHROMEOS
- fprintf(fp_, ".section .text.hot.embedded\n");
-#else
- fprintf(fp_, ".section .text\n");
-#endif
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionData() {
- fprintf(fp_, ".section .data\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::SectionRoData() {
- if (i::FLAG_target_os == std::string("win"))
- fprintf(fp_, ".section .rdata\n");
- else
- fprintf(fp_, ".section .rodata\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareUint32(const char* name,
- uint32_t value) {
- DeclareSymbolGlobal(name);
- DeclareLabel(name);
- IndentedDataDirective(kLong);
- fprintf(fp_, "%d", value);
- Newline();
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclarePointerToSymbol(
- const char* name, const char* target) {
- DeclareSymbolGlobal(name);
- DeclareLabel(name);
- fprintf(fp_, " %s %s%s\n", DirectiveAsString(PointerSizeDirective()),
- SYMBOL_PREFIX, target);
-}
-
-#if defined(V8_OS_WIN_X64)
-
-void PlatformDependentEmbeddedFileWriter::StartPdataSection() {
- fprintf(fp_, ".section .pdata\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::EndPdataSection() {}
-
-void PlatformDependentEmbeddedFileWriter::StartXdataSection() {
- fprintf(fp_, ".section .xdata\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::EndXdataSection() {}
-
-void PlatformDependentEmbeddedFileWriter::DeclareExternalFunction(
- const char* name) {}
-
-void PlatformDependentEmbeddedFileWriter::DeclareRvaToSymbol(const char* name,
- uint64_t offset) {
- if (offset > 0) {
- fprintf(fp_, ".rva %s + %llu\n", name, offset);
- } else {
- fprintf(fp_, ".rva %s\n", name);
- }
-}
-
-#endif // defined(V8_OS_WIN_X64)
-
-void PlatformDependentEmbeddedFileWriter::DeclareSymbolGlobal(
- const char* name) {
- fprintf(fp_, ".global %s%s\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToCodeAlignment() {
- fprintf(fp_, ".balign 32\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::AlignToDataAlignment() {
- // On Windows ARM64, s390, PPC and possibly more platforms, aligned load
- // instructions are used to retrieve v8_Default_embedded_blob_ and/or
- // v8_Default_embedded_blob_size_. The generated instructions require the
- // load target to be aligned at 8 bytes (2^3).
- fprintf(fp_, ".balign 8\n");
-}
-
-void PlatformDependentEmbeddedFileWriter::Comment(const char* string) {
- fprintf(fp_, "// %s\n", string);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareLabel(const char* name) {
- fprintf(fp_, "%s%s:\n", SYMBOL_PREFIX, name);
-}
-
-void PlatformDependentEmbeddedFileWriter::SourceInfo(int fileid,
- const char* filename,
- int line) {
- fprintf(fp_, ".loc %d %d\n", fileid, line);
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionBegin(
- const char* name) {
- DeclareLabel(name);
-
- if (i::FLAG_target_os == std::string("win")) {
-#if defined(V8_TARGET_ARCH_ARM64)
- // Windows ARM64 assembly is in GAS syntax, but ".type" is invalid directive
- // in PE/COFF for Windows.
-#else
- // The directives for inserting debugging information on Windows come
- // from the PE (Portable Executable) and COFF (Common Object File Format)
- // standards. Documented here:
- // https://docs.microsoft.com/en-us/windows/desktop/debug/pe-format
- //
- // .scl 2 means StorageClass external.
- // .type 32 means Type Representation Function.
- fprintf(fp_, ".def %s%s; .scl 2; .type 32; .endef;\n", SYMBOL_PREFIX, name);
-#endif
- } else {
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64)
- // ELF format binaries on ARM use ".type <function name>, %function"
- // to create a DWARF subprogram entry.
- fprintf(fp_, ".type %s, %%function\n", name);
-#else
- // Other ELF Format binaries use ".type <function name>, @function"
- // to create a DWARF subprogram entry.
- fprintf(fp_, ".type %s, @function\n", name);
-#endif
- }
-}
-
-void PlatformDependentEmbeddedFileWriter::DeclareFunctionEnd(const char* name) {
-}
-
-int PlatformDependentEmbeddedFileWriter::HexLiteral(uint64_t value) {
- return fprintf(fp_, "0x%" PRIx64, value);
-}
-
-void PlatformDependentEmbeddedFileWriter::FilePrologue() {}
-
-void PlatformDependentEmbeddedFileWriter::DeclareExternalFilename(
- int fileid, const char* filename) {
- // Replace any Windows style paths (backslashes) with forward
- // slashes.
- std::string fixed_filename(filename);
- for (auto& c : fixed_filename) {
- if (c == '\\') {
- c = '/';
- }
- }
- fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
-}
-
-void PlatformDependentEmbeddedFileWriter::FileEpilogue() {}
-
-int PlatformDependentEmbeddedFileWriter::IndentedDataDirective(
- DataDirective directive) {
- return fprintf(fp_, " %s ", DirectiveAsString(directive));
-}
-
-#endif
-
-#undef SYMBOL_PREFIX
-#undef V8_COMPILER_IS_MSVC
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/snapshot/embedded-file-writer.h b/deps/v8/src/snapshot/embedded-file-writer.h
deleted file mode 100644
index 0f4978cfd8..0000000000
--- a/deps/v8/src/snapshot/embedded-file-writer.h
+++ /dev/null
@@ -1,483 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SNAPSHOT_EMBEDDED_FILE_WRITER_H_
-#define V8_SNAPSHOT_EMBEDDED_FILE_WRITER_H_
-
-#include <cstdio>
-#include <cstring>
-
-#include "src/globals.h"
-#include "src/snapshot/snapshot.h"
-#include "src/source-position-table.h"
-
-#if defined(V8_OS_WIN_X64)
-#include "src/unwinding-info-win64.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-enum DataDirective {
- kByte,
- kLong,
- kQuad,
- kOcta,
-};
-
-static constexpr char kDefaultEmbeddedVariant[] = "Default";
-
-// The platform-dependent logic for emitting assembly code for the generated
-// embedded.S file.
-class EmbeddedFileWriter;
-class PlatformDependentEmbeddedFileWriter final {
- public:
- void SetFile(FILE* fp) { fp_ = fp; }
-
- void SectionText();
- void SectionData();
- void SectionRoData();
-
- void AlignToCodeAlignment();
- void AlignToDataAlignment();
-
- void DeclareUint32(const char* name, uint32_t value);
- void DeclarePointerToSymbol(const char* name, const char* target);
-
-#if defined(V8_OS_WIN_X64)
- void StartPdataSection();
- void EndPdataSection();
- void StartXdataSection();
- void EndXdataSection();
- void DeclareExternalFunction(const char* name);
-
- // Emits an RVA (address relative to the module load address) specified as an
- // offset from a given symbol.
- void DeclareRvaToSymbol(const char* name, uint64_t offset = 0);
-#endif
-
- void DeclareLabel(const char* name);
-
- void SourceInfo(int fileid, const char* filename, int line);
- void DeclareFunctionBegin(const char* name);
- void DeclareFunctionEnd(const char* name);
-
- // Returns the number of printed characters.
- int HexLiteral(uint64_t value);
-
- void Comment(const char* string);
- void Newline() { fprintf(fp_, "\n"); }
-
- void FilePrologue();
- void DeclareExternalFilename(int fileid, const char* filename);
- void FileEpilogue();
-
- int IndentedDataDirective(DataDirective directive);
-
- FILE* fp() const { return fp_; }
-
- private:
- void DeclareSymbolGlobal(const char* name);
-
- private:
- FILE* fp_ = nullptr;
-};
-
-// When writing out compiled builtins to a file, we
-// Detailed source-code information about builtins can only be obtained by
-// registration on the isolate during compilation.
-class EmbeddedFileWriterInterface {
- public:
- // We maintain a database of filenames to synthetic IDs.
- virtual int LookupOrAddExternallyCompiledFilename(const char* filename) = 0;
- virtual const char* GetExternallyCompiledFilename(int index) const = 0;
- virtual int GetExternallyCompiledFilenameCount() const = 0;
-
- // The isolate will call the method below just prior to replacing the
- // compiled builtin Code objects with trampolines.
- virtual void PrepareBuiltinSourcePositionMap(Builtins* builtins) = 0;
-
-#if defined(V8_OS_WIN_X64)
- virtual void SetBuiltinUnwindData(
- int builtin_index,
- const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) = 0;
-#endif
-};
-
-// Generates the embedded.S file which is later compiled into the final v8
-// binary. Its contents are exported through two symbols:
-//
-// v8_<variant>_embedded_blob_ (intptr_t):
-// a pointer to the start of the embedded blob.
-// v8_<variant>_embedded_blob_size_ (uint32_t):
-// size of the embedded blob in bytes.
-//
-// The variant is usually "Default" but can be modified in multisnapshot builds.
-class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
- public:
- int LookupOrAddExternallyCompiledFilename(const char* filename) override {
- auto result = external_filenames_.find(filename);
- if (result != external_filenames_.end()) {
- return result->second;
- }
- int new_id =
- ExternalFilenameIndexToId(static_cast<int>(external_filenames_.size()));
- external_filenames_.insert(std::make_pair(filename, new_id));
- external_filenames_by_index_.push_back(filename);
- DCHECK_EQ(external_filenames_by_index_.size(), external_filenames_.size());
- return new_id;
- }
-
- const char* GetExternallyCompiledFilename(int fileid) const override {
- size_t index = static_cast<size_t>(ExternalFilenameIdToIndex(fileid));
- DCHECK_GE(index, 0);
- DCHECK_LT(index, external_filenames_by_index_.size());
-
- return external_filenames_by_index_[index];
- }
-
- int GetExternallyCompiledFilenameCount() const override {
- return static_cast<int>(external_filenames_.size());
- }
-
- void PrepareBuiltinSourcePositionMap(Builtins* builtins) override;
-
-#if defined(V8_OS_WIN_X64)
- void SetBuiltinUnwindData(
- int builtin_index,
- const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) override;
-#endif
-
- void SetEmbeddedFile(const char* embedded_src_path) {
- embedded_src_path_ = embedded_src_path;
- }
-
- void SetEmbeddedVariant(const char* embedded_variant) {
- embedded_variant_ = embedded_variant;
- }
-
- void WriteEmbedded(const i::EmbeddedData* blob) const {
- MaybeWriteEmbeddedFile(blob);
- }
-
- private:
- void MaybeWriteEmbeddedFile(const i::EmbeddedData* blob) const {
- if (embedded_src_path_ == nullptr) return;
-
- FILE* fp = GetFileDescriptorOrDie(embedded_src_path_);
-
- PlatformDependentEmbeddedFileWriter writer;
- writer.SetFile(fp);
-
- WriteFilePrologue(&writer);
- WriteExternalFilenames(&writer);
- WriteMetadataSection(&writer, blob);
- WriteInstructionStreams(&writer, blob);
- WriteFileEpilogue(&writer, blob);
-
- fclose(fp);
- }
-
- static FILE* GetFileDescriptorOrDie(const char* filename) {
- FILE* fp = v8::base::OS::FOpen(filename, "wb");
- if (fp == nullptr) {
- i::PrintF("Unable to open file \"%s\" for writing.\n", filename);
- exit(1);
- }
- return fp;
- }
-
- void WriteFilePrologue(PlatformDependentEmbeddedFileWriter* w) const {
- w->Comment("Autogenerated file. Do not edit.");
- w->Newline();
- w->FilePrologue();
- }
-
- void WriteExternalFilenames(PlatformDependentEmbeddedFileWriter* w) const {
- w->Comment(
- "Source positions in the embedded blob refer to filenames by id.");
- w->Comment("Assembly directives here map the id to a filename.");
- w->Newline();
-
- // Write external filenames.
- int size = static_cast<int>(external_filenames_by_index_.size());
- for (int i = 0; i < size; i++) {
- w->DeclareExternalFilename(ExternalFilenameIndexToId(i),
- external_filenames_by_index_[i]);
- }
- }
-
- // Fairly arbitrary but should fit all symbol names.
- static constexpr int kTemporaryStringLength = 256;
-
- std::string EmbeddedBlobDataSymbol() const {
- char embedded_blob_data_symbol[kTemporaryStringLength];
- i::SNPrintF(i::Vector<char>(embedded_blob_data_symbol),
- "v8_%s_embedded_blob_data_", embedded_variant_);
- return embedded_blob_data_symbol;
- }
-
- void WriteMetadataSection(PlatformDependentEmbeddedFileWriter* w,
- const i::EmbeddedData* blob) const {
- w->Comment("The embedded blob starts here. Metadata comes first, followed");
- w->Comment("by builtin instruction streams.");
- w->SectionText();
- w->AlignToCodeAlignment();
- w->DeclareLabel(EmbeddedBlobDataSymbol().c_str());
-
- WriteBinaryContentsAsInlineAssembly(w, blob->data(),
- i::EmbeddedData::RawDataOffset());
- }
-
- void WriteBuiltin(PlatformDependentEmbeddedFileWriter* w,
- const i::EmbeddedData* blob, const int builtin_id) const {
- const bool is_default_variant =
- std::strcmp(embedded_variant_, kDefaultEmbeddedVariant) == 0;
-
- char builtin_symbol[kTemporaryStringLength];
- if (is_default_variant) {
- // Create nicer symbol names for the default mode.
- i::SNPrintF(i::Vector<char>(builtin_symbol), "Builtins_%s",
- i::Builtins::name(builtin_id));
- } else {
- i::SNPrintF(i::Vector<char>(builtin_symbol), "%s_Builtins_%s",
- embedded_variant_, i::Builtins::name(builtin_id));
- }
-
- // Labels created here will show up in backtraces. We check in
- // Isolate::SetEmbeddedBlob that the blob layout remains unchanged, i.e.
- // that labels do not insert bytes into the middle of the blob byte
- // stream.
- w->DeclareFunctionBegin(builtin_symbol);
- const std::vector<byte>& current_positions = source_positions_[builtin_id];
-
- // The code below interleaves bytes of assembly code for the builtin
- // function with source positions at the appropriate offsets.
- Vector<const byte> vpos(current_positions.data(), current_positions.size());
- v8::internal::SourcePositionTableIterator positions(
- vpos, SourcePositionTableIterator::kExternalOnly);
-
- const uint8_t* data = reinterpret_cast<const uint8_t*>(
- blob->InstructionStartOfBuiltin(builtin_id));
- uint32_t size = blob->PaddedInstructionSizeOfBuiltin(builtin_id);
- uint32_t i = 0;
- uint32_t next_offset = static_cast<uint32_t>(
- positions.done() ? size : positions.code_offset());
- while (i < size) {
- if (i == next_offset) {
- // Write source directive.
- w->SourceInfo(positions.source_position().ExternalFileId(),
- GetExternallyCompiledFilename(
- positions.source_position().ExternalFileId()),
- positions.source_position().ExternalLine());
- positions.Advance();
- next_offset = static_cast<uint32_t>(
- positions.done() ? size : positions.code_offset());
- }
- CHECK_GE(next_offset, i);
- WriteBinaryContentsAsInlineAssembly(w, data + i, next_offset - i);
- i = next_offset;
- }
-
- w->DeclareFunctionEnd(builtin_symbol);
- }
-
- void WriteInstructionStreams(PlatformDependentEmbeddedFileWriter* w,
- const i::EmbeddedData* blob) const {
- for (int i = 0; i < i::Builtins::builtin_count; i++) {
- if (!blob->ContainsBuiltin(i)) continue;
-
- WriteBuiltin(w, blob, i);
- }
- w->Newline();
- }
-
- void WriteFileEpilogue(PlatformDependentEmbeddedFileWriter* w,
- const i::EmbeddedData* blob) const {
- {
- char embedded_blob_symbol[kTemporaryStringLength];
- i::SNPrintF(i::Vector<char>(embedded_blob_symbol), "v8_%s_embedded_blob_",
- embedded_variant_);
-
- w->Comment("Pointer to the beginning of the embedded blob.");
- w->SectionData();
- w->AlignToDataAlignment();
- w->DeclarePointerToSymbol(embedded_blob_symbol,
- EmbeddedBlobDataSymbol().c_str());
- w->Newline();
- }
-
- {
- char embedded_blob_size_symbol[kTemporaryStringLength];
- i::SNPrintF(i::Vector<char>(embedded_blob_size_symbol),
- "v8_%s_embedded_blob_size_", embedded_variant_);
-
- w->Comment("The size of the embedded blob in bytes.");
- w->SectionRoData();
- w->AlignToDataAlignment();
- w->DeclareUint32(embedded_blob_size_symbol, blob->size());
- w->Newline();
- }
-
-#if defined(V8_OS_WIN_X64)
- if (win64_unwindinfo::CanEmitUnwindInfoForBuiltins()) {
- WriteUnwindInfo(w, blob);
- }
-#endif
-
- w->FileEpilogue();
- }
-
-#if defined(V8_OS_WIN_X64)
- std::string BuiltinsUnwindInfoLabel() const;
- void WriteUnwindInfo(PlatformDependentEmbeddedFileWriter* w,
- const i::EmbeddedData* blob) const;
- void WriteUnwindInfoEntry(PlatformDependentEmbeddedFileWriter* w,
- uint64_t rva_start, uint64_t rva_end) const;
-#endif
-
-#if defined(_MSC_VER) && !defined(__clang__)
-#define V8_COMPILER_IS_MSVC
-#endif
-
-#if defined(V8_COMPILER_IS_MSVC)
- // Windows MASM doesn't have an .octa directive, use QWORDs instead.
- // Note: MASM *really* does not like large data streams. It takes over 5
- // minutes to assemble the ~350K lines of embedded.S produced when using
- // BYTE directives in a debug build. QWORD produces roughly 120KLOC and
- // reduces assembly time to ~40 seconds. Still terrible, but much better
- // than before. See also: https://crbug.com/v8/8475.
- static constexpr DataDirective kByteChunkDirective = kQuad;
- static constexpr int kByteChunkSize = 8;
-
- static int WriteByteChunk(PlatformDependentEmbeddedFileWriter* w,
- int current_line_length, const uint8_t* data) {
- const uint64_t* quad_ptr = reinterpret_cast<const uint64_t*>(data);
- return current_line_length + w->HexLiteral(*quad_ptr);
- }
-
-#elif defined(V8_OS_AIX)
- // PPC uses a fixed 4 byte instruction set, using .long
- // to prevent any unnecessary padding.
- static constexpr DataDirective kByteChunkDirective = kLong;
- static constexpr int kByteChunkSize = 4;
-
- static int WriteByteChunk(PlatformDependentEmbeddedFileWriter* w,
- int current_line_length, const uint8_t* data) {
- const uint32_t* long_ptr = reinterpret_cast<const uint32_t*>(data);
- return current_line_length + w->HexLiteral(*long_ptr);
- }
-
-#else // defined(V8_COMPILER_IS_MSVC) || defined(V8_OS_AIX)
- static constexpr DataDirective kByteChunkDirective = kOcta;
- static constexpr int kByteChunkSize = 16;
-
- static int WriteByteChunk(PlatformDependentEmbeddedFileWriter* w,
- int current_line_length, const uint8_t* data) {
- const size_t size = kInt64Size;
-
- uint64_t part1, part2;
- // Use memcpy for the reads since {data} is not guaranteed to be aligned.
-#ifdef V8_TARGET_BIG_ENDIAN
- memcpy(&part1, data, size);
- memcpy(&part2, data + size, size);
-#else
- memcpy(&part1, data + size, size);
- memcpy(&part2, data, size);
-#endif // V8_TARGET_BIG_ENDIAN
-
- if (part1 != 0) {
- current_line_length +=
- fprintf(w->fp(), "0x%" PRIx64 "%016" PRIx64, part1, part2);
- } else {
- current_line_length += fprintf(w->fp(), "0x%" PRIx64, part2);
- }
- return current_line_length;
- }
-#endif // defined(V8_COMPILER_IS_MSVC) || defined(V8_OS_AIX)
-#undef V8_COMPILER_IS_MSVC
-
- static int WriteDirectiveOrSeparator(PlatformDependentEmbeddedFileWriter* w,
- int current_line_length,
- DataDirective directive) {
- int printed_chars;
- if (current_line_length == 0) {
- printed_chars = w->IndentedDataDirective(directive);
- DCHECK_LT(0, printed_chars);
- } else {
- printed_chars = fprintf(w->fp(), ",");
- DCHECK_EQ(1, printed_chars);
- }
- return current_line_length + printed_chars;
- }
-
- static int WriteLineEndIfNeeded(PlatformDependentEmbeddedFileWriter* w,
- int current_line_length, int write_size) {
- static const int kTextWidth = 100;
- // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
- // the actual size of the string to be written to determine this so it's
- // more conservative than strictly needed.
- if (current_line_length + strlen(",0x") + write_size * 2 > kTextWidth) {
- fprintf(w->fp(), "\n");
- return 0;
- } else {
- return current_line_length;
- }
- }
-
- static void WriteBinaryContentsAsInlineAssembly(
- PlatformDependentEmbeddedFileWriter* w, const uint8_t* data,
- uint32_t size) {
- int current_line_length = 0;
- uint32_t i = 0;
-
- // Begin by writing out byte chunks.
- for (; i + kByteChunkSize < size; i += kByteChunkSize) {
- current_line_length = WriteDirectiveOrSeparator(w, current_line_length,
- kByteChunkDirective);
- current_line_length = WriteByteChunk(w, current_line_length, data + i);
- current_line_length =
- WriteLineEndIfNeeded(w, current_line_length, kByteChunkSize);
- }
- if (current_line_length != 0) w->Newline();
- current_line_length = 0;
-
- // Write any trailing bytes one-by-one.
- for (; i < size; i++) {
- current_line_length =
- WriteDirectiveOrSeparator(w, current_line_length, kByte);
- current_line_length += w->HexLiteral(data[i]);
- current_line_length = WriteLineEndIfNeeded(w, current_line_length, 1);
- }
-
- if (current_line_length != 0) w->Newline();
- }
-
- static int ExternalFilenameIndexToId(int index) {
- return kFirstExternalFilenameId + index;
- }
-
- static int ExternalFilenameIdToIndex(int id) {
- return id - kFirstExternalFilenameId;
- }
-
- std::vector<byte> source_positions_[Builtins::builtin_count];
-
-#if defined(V8_OS_WIN_X64)
- win64_unwindinfo::BuiltinUnwindInfo unwind_infos_[Builtins::builtin_count];
-#endif
-
- // In assembly directives, filename ids need to begin with 1.
- static const int kFirstExternalFilenameId = 1;
- std::map<const char*, int> external_filenames_;
- std::vector<const char*> external_filenames_by_index_;
-
- const char* embedded_src_path_ = nullptr;
- const char* embedded_variant_ = kDefaultEmbeddedVariant;
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_SNAPSHOT_EMBEDDED_FILE_WRITER_H_
diff --git a/deps/v8/src/snapshot/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 0488c2f2c7..0474d3babe 100644
--- a/deps/v8/src/snapshot/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
-#include "src/assembler-inl.h"
-#include "src/callable.h"
-#include "src/objects-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/objects/objects-inl.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
@@ -89,8 +89,8 @@ void InstructionStream::FreeOffHeapInstructionStream(uint8_t* data,
namespace {
bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code code) {
- DCHECK(Builtins::IsIsolateIndependent(code->builtin_index()));
- switch (Builtins::KindOf(code->builtin_index())) {
+ DCHECK(Builtins::IsIsolateIndependent(code.builtin_index()));
+ switch (Builtins::KindOf(code.builtin_index())) {
case Builtins::CPP:
case Builtins::TFC:
case Builtins::TFH:
@@ -101,14 +101,13 @@ bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code code) {
// Bytecode handlers will only ever be used by the interpreter and so there
// will never be a need to use trampolines with them.
case Builtins::BCH:
- case Builtins::API:
case Builtins::ASM:
// TODO(jgruber): Extend checks to remaining kinds.
return false;
}
Callable callable = Builtins::CallableFor(
- isolate, static_cast<Builtins::Name>(code->builtin_index()));
+ isolate, static_cast<Builtins::Name>(code.builtin_index()));
CallInterfaceDescriptor descriptor = callable.descriptor();
if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
@@ -152,7 +151,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
// Do not emit write-barrier for off-heap writes.
off_heap_it.rinfo()->set_target_address(
- blob->InstructionStartOfBuiltin(target->builtin_index()),
+ blob->InstructionStartOfBuiltin(target.builtin_index()),
SKIP_WRITE_BARRIER);
on_heap_it.next();
@@ -186,7 +185,7 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
if (Builtins::IsIsolateIndependent(i)) {
// Sanity-check that the given builtin is isolate-independent and does not
// use the trampoline register in its calling convention.
- if (!code->IsIsolateIndependent(isolate)) {
+ if (!code.IsIsolateIndependent(isolate)) {
saw_unsafe_builtin = true;
fprintf(stderr, "%s is not isolate-independent.\n", Builtins::name(i));
}
@@ -206,7 +205,7 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
Builtins::name(i));
}
- uint32_t length = static_cast<uint32_t>(code->raw_instruction_size());
+ uint32_t length = static_cast<uint32_t>(code.raw_instruction_size());
DCHECK_EQ(0, raw_data_size % kCodeAlignment);
metadata[i].instructions_offset = raw_data_size;
@@ -249,10 +248,10 @@ EmbeddedData EmbeddedData::FromIsolate(Isolate* isolate) {
Code code = builtins->builtin(i);
uint32_t offset = metadata[i].instructions_offset;
uint8_t* dst = raw_data_start + offset;
- DCHECK_LE(RawDataOffset() + offset + code->raw_instruction_size(),
+ DCHECK_LE(RawDataOffset() + offset + code.raw_instruction_size(),
blob_size);
- std::memcpy(dst, reinterpret_cast<uint8_t*>(code->raw_instruction_start()),
- code->raw_instruction_size());
+ std::memcpy(dst, reinterpret_cast<uint8_t*>(code.raw_instruction_start()),
+ code.raw_instruction_size());
}
EmbeddedData d(blob, blob_size);
@@ -290,6 +289,19 @@ uint32_t EmbeddedData::InstructionSizeOfBuiltin(int i) const {
return metadata[i].instructions_length;
}
+Address EmbeddedData::InstructionStartOfBytecodeHandlers() const {
+ return InstructionStartOfBuiltin(Builtins::kFirstBytecodeHandler);
+}
+
+Address EmbeddedData::InstructionEndOfBytecodeHandlers() const {
+ STATIC_ASSERT(Builtins::kFirstBytecodeHandler + kNumberOfBytecodeHandlers +
+ 2 * kNumberOfWideBytecodeHandlers ==
+ Builtins::builtin_count);
+ int lastBytecodeHandler = Builtins::builtin_count - 1;
+ return InstructionStartOfBuiltin(lastBytecodeHandler) +
+ InstructionSizeOfBuiltin(lastBytecodeHandler);
+}
+
size_t EmbeddedData::CreateEmbeddedBlobHash() const {
STATIC_ASSERT(EmbeddedBlobHashOffset() == 0);
STATIC_ASSERT(EmbeddedBlobHashSize() == kSizetSize);
diff --git a/deps/v8/src/snapshot/embedded-data.h b/deps/v8/src/snapshot/embedded/embedded-data.h
index 5c5653e2ca..58905668f2 100644
--- a/deps/v8/src/snapshot/embedded-data.h
+++ b/deps/v8/src/snapshot/embedded/embedded-data.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SNAPSHOT_EMBEDDED_DATA_H_
-#define V8_SNAPSHOT_EMBEDDED_DATA_H_
+#ifndef V8_SNAPSHOT_EMBEDDED_EMBEDDED_DATA_H_
+#define V8_SNAPSHOT_EMBEDDED_EMBEDDED_DATA_H_
#include "src/base/macros.h"
#include "src/builtins/builtins.h"
-#include "src/globals.h"
-#include "src/isolate.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
@@ -57,6 +57,9 @@ class EmbeddedData final {
Address InstructionStartOfBuiltin(int i) const;
uint32_t InstructionSizeOfBuiltin(int i) const;
+ Address InstructionStartOfBytecodeHandlers() const;
+ Address InstructionEndOfBytecodeHandlers() const;
+
bool ContainsBuiltin(int i) const { return InstructionSizeOfBuiltin(i) > 0; }
uint32_t AddressForHashing(Address addr) {
@@ -140,4 +143,4 @@ class EmbeddedData final {
} // namespace internal
} // namespace v8
-#endif // V8_SNAPSHOT_EMBEDDED_DATA_H_
+#endif // V8_SNAPSHOT_EMBEDDED_EMBEDDED_DATA_H_
diff --git a/deps/v8/src/snapshot/embedded-empty.cc b/deps/v8/src/snapshot/embedded/embedded-empty.cc
index 9ffb3458d3..9ffb3458d3 100644
--- a/deps/v8/src/snapshot/embedded-empty.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-empty.cc
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.cc b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
new file mode 100644
index 0000000000..4703ef4822
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.cc
@@ -0,0 +1,214 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/embedded/embedded-file-writer.h"
+
+#include <cinttypes>
+
+#include "src/codegen/source-position-table.h"
+#include "src/objects/code-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void EmbeddedFileWriter::WriteBuiltin(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob,
+ const int builtin_id) const {
+ const bool is_default_variant =
+ std::strcmp(embedded_variant_, kDefaultEmbeddedVariant) == 0;
+
+ i::EmbeddedVector<char, kTemporaryStringLength> builtin_symbol;
+ if (is_default_variant) {
+ // Create nicer symbol names for the default mode.
+ i::SNPrintF(builtin_symbol, "Builtins_%s", i::Builtins::name(builtin_id));
+ } else {
+ i::SNPrintF(builtin_symbol, "%s_Builtins_%s", embedded_variant_,
+ i::Builtins::name(builtin_id));
+ }
+
+ // Labels created here will show up in backtraces. We check in
+ // Isolate::SetEmbeddedBlob that the blob layout remains unchanged, i.e.
+ // that labels do not insert bytes into the middle of the blob byte
+ // stream.
+ w->DeclareFunctionBegin(builtin_symbol.begin());
+ const std::vector<byte>& current_positions = source_positions_[builtin_id];
+
+ // The code below interleaves bytes of assembly code for the builtin
+ // function with source positions at the appropriate offsets.
+ Vector<const byte> vpos(current_positions.data(), current_positions.size());
+ v8::internal::SourcePositionTableIterator positions(
+ vpos, SourcePositionTableIterator::kExternalOnly);
+
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(
+ blob->InstructionStartOfBuiltin(builtin_id));
+ uint32_t size = blob->PaddedInstructionSizeOfBuiltin(builtin_id);
+ uint32_t i = 0;
+ uint32_t next_offset =
+ static_cast<uint32_t>(positions.done() ? size : positions.code_offset());
+ while (i < size) {
+ if (i == next_offset) {
+ // Write source directive.
+ w->SourceInfo(positions.source_position().ExternalFileId(),
+ GetExternallyCompiledFilename(
+ positions.source_position().ExternalFileId()),
+ positions.source_position().ExternalLine());
+ positions.Advance();
+ next_offset = static_cast<uint32_t>(
+ positions.done() ? size : positions.code_offset());
+ }
+ CHECK_GE(next_offset, i);
+ WriteBinaryContentsAsInlineAssembly(w, data + i, next_offset - i);
+ i = next_offset;
+ }
+
+ w->DeclareFunctionEnd(builtin_symbol.begin());
+}
+
+void EmbeddedFileWriter::WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const {
+ {
+ i::EmbeddedVector<char, kTemporaryStringLength> embedded_blob_symbol;
+ i::SNPrintF(embedded_blob_symbol, "v8_%s_embedded_blob_",
+ embedded_variant_);
+
+ w->Comment("Pointer to the beginning of the embedded blob.");
+ w->SectionData();
+ w->AlignToDataAlignment();
+ w->DeclarePointerToSymbol(embedded_blob_symbol.begin(),
+ EmbeddedBlobDataSymbol().c_str());
+ w->Newline();
+ }
+
+ {
+ i::EmbeddedVector<char, kTemporaryStringLength> embedded_blob_size_symbol;
+ i::SNPrintF(embedded_blob_size_symbol, "v8_%s_embedded_blob_size_",
+ embedded_variant_);
+
+ w->Comment("The size of the embedded blob in bytes.");
+ w->SectionRoData();
+ w->AlignToDataAlignment();
+ w->DeclareUint32(embedded_blob_size_symbol.begin(), blob->size());
+ w->Newline();
+ }
+
+#if defined(V8_OS_WIN_X64)
+ {
+ i::EmbeddedVector<char, kTemporaryStringLength> unwind_info_symbol;
+ i::SNPrintF(unwind_info_symbol, "%s_Builtins_UnwindInfo",
+ embedded_variant_);
+
+ w->MaybeEmitUnwindData(unwind_info_symbol.begin(),
+ EmbeddedBlobDataSymbol().c_str(), blob,
+ reinterpret_cast<const void*>(&unwind_infos_[0]));
+ }
+#endif
+
+ w->FileEpilogue();
+}
+
+namespace {
+
+int WriteDirectiveOrSeparator(PlatformEmbeddedFileWriterBase* w,
+ int current_line_length,
+ DataDirective directive) {
+ int printed_chars;
+ if (current_line_length == 0) {
+ printed_chars = w->IndentedDataDirective(directive);
+ DCHECK_LT(0, printed_chars);
+ } else {
+ printed_chars = fprintf(w->fp(), ",");
+ DCHECK_EQ(1, printed_chars);
+ }
+ return current_line_length + printed_chars;
+}
+
+int WriteLineEndIfNeeded(PlatformEmbeddedFileWriterBase* w,
+ int current_line_length, int write_size) {
+ static const int kTextWidth = 100;
+ // Check if adding ',0xFF...FF\n"' would force a line wrap. This doesn't use
+ // the actual size of the string to be written to determine this so it's
+ // more conservative than strictly needed.
+ if (current_line_length + strlen(",0x") + write_size * 2 > kTextWidth) {
+ fprintf(w->fp(), "\n");
+ return 0;
+ } else {
+ return current_line_length;
+ }
+}
+
+} // namespace
+
+// static
+void EmbeddedFileWriter::WriteBinaryContentsAsInlineAssembly(
+ PlatformEmbeddedFileWriterBase* w, const uint8_t* data, uint32_t size) {
+ int current_line_length = 0;
+ uint32_t i = 0;
+
+ // Begin by writing out byte chunks.
+ const DataDirective directive = w->ByteChunkDataDirective();
+ const int byte_chunk_size = DataDirectiveSize(directive);
+ for (; i + byte_chunk_size < size; i += byte_chunk_size) {
+ current_line_length =
+ WriteDirectiveOrSeparator(w, current_line_length, directive);
+ current_line_length += w->WriteByteChunk(data + i);
+ current_line_length =
+ WriteLineEndIfNeeded(w, current_line_length, byte_chunk_size);
+ }
+ if (current_line_length != 0) w->Newline();
+ current_line_length = 0;
+
+ // Write any trailing bytes one-by-one.
+ for (; i < size; i++) {
+ current_line_length =
+ WriteDirectiveOrSeparator(w, current_line_length, kByte);
+ current_line_length += w->HexLiteral(data[i]);
+ current_line_length = WriteLineEndIfNeeded(w, current_line_length, 1);
+ }
+
+ if (current_line_length != 0) w->Newline();
+}
+
+int EmbeddedFileWriter::LookupOrAddExternallyCompiledFilename(
+ const char* filename) {
+ auto result = external_filenames_.find(filename);
+ if (result != external_filenames_.end()) {
+ return result->second;
+ }
+ int new_id =
+ ExternalFilenameIndexToId(static_cast<int>(external_filenames_.size()));
+ external_filenames_.insert(std::make_pair(filename, new_id));
+ external_filenames_by_index_.push_back(filename);
+ DCHECK_EQ(external_filenames_by_index_.size(), external_filenames_.size());
+ return new_id;
+}
+
+const char* EmbeddedFileWriter::GetExternallyCompiledFilename(
+ int fileid) const {
+ size_t index = static_cast<size_t>(ExternalFilenameIdToIndex(fileid));
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, external_filenames_by_index_.size());
+
+ return external_filenames_by_index_[index];
+}
+
+int EmbeddedFileWriter::GetExternallyCompiledFilenameCount() const {
+ return static_cast<int>(external_filenames_.size());
+}
+
+void EmbeddedFileWriter::PrepareBuiltinSourcePositionMap(Builtins* builtins) {
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ // Retrieve the SourcePositionTable and copy it.
+ Code code = builtins->builtin(i);
+ // Verify that the code object is still the "real code" and not a
+ // trampoline (which wouldn't have source positions).
+ DCHECK(!code.is_off_heap_trampoline());
+ std::vector<unsigned char> data(
+ code.SourcePositionTable().GetDataStartAddress(),
+ code.SourcePositionTable().GetDataEndAddress());
+ source_positions_[i] = data;
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/embedded/embedded-file-writer.h b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
new file mode 100644
index 0000000000..c26465ae6a
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/embedded-file-writer.h
@@ -0,0 +1,221 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_EMBEDDED_FILE_WRITER_H_
+#define V8_SNAPSHOT_EMBEDDED_EMBEDDED_FILE_WRITER_H_
+
+#include <cinttypes>
+#include <cstdio>
+#include <cstring>
+
+#include "src/common/globals.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/snapshot/embedded/platform-embedded-file-writer-base.h"
+
+#if defined(V8_OS_WIN_X64)
+#include "src/diagnostics/unwinding-info-win64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+static constexpr char kDefaultEmbeddedVariant[] = "Default";
+
+// Detailed source-code information about builtins can only be obtained by
+// registration on the isolate during compilation.
+class EmbeddedFileWriterInterface {
+ public:
+ // We maintain a database of filenames to synthetic IDs.
+ virtual int LookupOrAddExternallyCompiledFilename(const char* filename) = 0;
+ virtual const char* GetExternallyCompiledFilename(int index) const = 0;
+ virtual int GetExternallyCompiledFilenameCount() const = 0;
+
+ // The isolate will call the method below just prior to replacing the
+ // compiled builtin Code objects with trampolines.
+ virtual void PrepareBuiltinSourcePositionMap(Builtins* builtins) = 0;
+
+#if defined(V8_OS_WIN_X64)
+ virtual void SetBuiltinUnwindData(
+ int builtin_index,
+ const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) = 0;
+#endif
+};
+
+// Generates the embedded.S file which is later compiled into the final v8
+// binary. Its contents are exported through two symbols:
+//
+// v8_<variant>_embedded_blob_ (intptr_t):
+// a pointer to the start of the embedded blob.
+// v8_<variant>_embedded_blob_size_ (uint32_t):
+// size of the embedded blob in bytes.
+//
+// The variant is usually "Default" but can be modified in multisnapshot builds.
+class EmbeddedFileWriter : public EmbeddedFileWriterInterface {
+ public:
+ int LookupOrAddExternallyCompiledFilename(const char* filename) override;
+ const char* GetExternallyCompiledFilename(int fileid) const override;
+ int GetExternallyCompiledFilenameCount() const override;
+
+ void PrepareBuiltinSourcePositionMap(Builtins* builtins) override;
+
+#if defined(V8_OS_WIN_X64)
+ void SetBuiltinUnwindData(
+ int builtin_index,
+ const win64_unwindinfo::BuiltinUnwindInfo& unwinding_info) override {
+ DCHECK_LT(builtin_index, Builtins::builtin_count);
+ unwind_infos_[builtin_index] = unwinding_info;
+ }
+#endif
+
+ void SetEmbeddedFile(const char* embedded_src_path) {
+ embedded_src_path_ = embedded_src_path;
+ }
+
+ void SetEmbeddedVariant(const char* embedded_variant) {
+ if (embedded_variant == nullptr) return;
+ embedded_variant_ = embedded_variant;
+ }
+
+ void SetTargetArch(const char* target_arch) { target_arch_ = target_arch; }
+
+ void SetTargetOs(const char* target_os) { target_os_ = target_os; }
+
+ void WriteEmbedded(const i::EmbeddedData* blob) const {
+ MaybeWriteEmbeddedFile(blob);
+ }
+
+ private:
+ void MaybeWriteEmbeddedFile(const i::EmbeddedData* blob) const {
+ if (embedded_src_path_ == nullptr) return;
+
+ FILE* fp = GetFileDescriptorOrDie(embedded_src_path_);
+
+ std::unique_ptr<PlatformEmbeddedFileWriterBase> writer =
+ NewPlatformEmbeddedFileWriter(target_arch_, target_os_);
+ writer->SetFile(fp);
+
+ WriteFilePrologue(writer.get());
+ WriteExternalFilenames(writer.get());
+ WriteMetadataSection(writer.get(), blob);
+ WriteInstructionStreams(writer.get(), blob);
+ WriteFileEpilogue(writer.get(), blob);
+
+ fclose(fp);
+ }
+
+ static FILE* GetFileDescriptorOrDie(const char* filename) {
+ FILE* fp = v8::base::OS::FOpen(filename, "wb");
+ if (fp == nullptr) {
+ i::PrintF("Unable to open file \"%s\" for writing.\n", filename);
+ exit(1);
+ }
+ return fp;
+ }
+
+ void WriteFilePrologue(PlatformEmbeddedFileWriterBase* w) const {
+ w->Comment("Autogenerated file. Do not edit.");
+ w->Newline();
+ w->FilePrologue();
+ }
+
+ void WriteExternalFilenames(PlatformEmbeddedFileWriterBase* w) const {
+ w->Comment(
+ "Source positions in the embedded blob refer to filenames by id.");
+ w->Comment("Assembly directives here map the id to a filename.");
+ w->Newline();
+
+ // Write external filenames.
+ int size = static_cast<int>(external_filenames_by_index_.size());
+ for (int i = 0; i < size; i++) {
+ w->DeclareExternalFilename(ExternalFilenameIndexToId(i),
+ external_filenames_by_index_[i]);
+ }
+ }
+
+ // Fairly arbitrary but should fit all symbol names.
+ static constexpr int kTemporaryStringLength = 256;
+
+ std::string EmbeddedBlobDataSymbol() const {
+ i::EmbeddedVector<char, kTemporaryStringLength> embedded_blob_data_symbol;
+ i::SNPrintF(embedded_blob_data_symbol, "v8_%s_embedded_blob_data_",
+ embedded_variant_);
+ return std::string{embedded_blob_data_symbol.begin()};
+ }
+
+ void WriteMetadataSection(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const {
+ w->Comment("The embedded blob starts here. Metadata comes first, followed");
+ w->Comment("by builtin instruction streams.");
+ w->SectionText();
+ w->AlignToCodeAlignment();
+ w->DeclareLabel(EmbeddedBlobDataSymbol().c_str());
+
+ WriteBinaryContentsAsInlineAssembly(w, blob->data(),
+ i::EmbeddedData::RawDataOffset());
+ }
+
+ void WriteBuiltin(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob, const int builtin_id) const;
+
+ void WriteInstructionStreams(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const {
+ for (int i = 0; i < i::Builtins::builtin_count; i++) {
+ if (!blob->ContainsBuiltin(i)) continue;
+
+ WriteBuiltin(w, blob, i);
+ }
+ w->Newline();
+ }
+
+ void WriteFileEpilogue(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const;
+
+#if defined(V8_OS_WIN_X64)
+ std::string BuiltinsUnwindInfoLabel() const;
+ void WriteUnwindInfo(PlatformEmbeddedFileWriterBase* w,
+ const i::EmbeddedData* blob) const;
+ void WriteUnwindInfoEntry(PlatformEmbeddedFileWriterBase* w,
+ uint64_t rva_start, uint64_t rva_end) const;
+#endif
+
+ static void WriteBinaryContentsAsInlineAssembly(
+ PlatformEmbeddedFileWriterBase* w, const uint8_t* data, uint32_t size);
+
+ // In assembly directives, filename ids need to begin with 1.
+ static constexpr int kFirstExternalFilenameId = 1;
+ static int ExternalFilenameIndexToId(int index) {
+ return kFirstExternalFilenameId + index;
+ }
+ static int ExternalFilenameIdToIndex(int id) {
+ return id - kFirstExternalFilenameId;
+ }
+
+ private:
+ std::vector<byte> source_positions_[Builtins::builtin_count];
+
+#if defined(V8_OS_WIN_X64)
+ win64_unwindinfo::BuiltinUnwindInfo unwind_infos_[Builtins::builtin_count];
+#endif
+
+ std::map<const char*, int> external_filenames_;
+ std::vector<const char*> external_filenames_by_index_;
+
+ // The file to generate or nullptr.
+ const char* embedded_src_path_ = nullptr;
+
+ // The variant is only used in multi-snapshot builds and otherwise set to
+ // "Default".
+ const char* embedded_variant_ = kDefaultEmbeddedVariant;
+
+ // {target_arch} and {target_os} control the generated assembly format. Note
+ // these may differ from both host- and target-platforms specified through
+ // e.g. V8_OS_* and V8_TARGET_ARCH_* defines.
+ const char* target_arch_ = nullptr;
+ const char* target_os_ = nullptr;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_EMBEDDED_FILE_WRITER_H_
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
new file mode 100644
index 0000000000..3aef77e341
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -0,0 +1,132 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/embedded/platform-embedded-file-writer-aix.h"
+
+namespace v8 {
+namespace internal {
+
+#define SYMBOL_PREFIX ""
+
+namespace {
+
+const char* DirectiveAsString(DataDirective directive) {
+ switch (directive) {
+ case kByte:
+ return ".byte";
+ case kLong:
+ return ".long";
+ case kQuad:
+ return ".llong";
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace
+
+void PlatformEmbeddedFileWriterAIX::SectionText() {
+ fprintf(fp_, ".csect .text[PR]\n");
+}
+
+void PlatformEmbeddedFileWriterAIX::SectionData() {
+ fprintf(fp_, ".csect .data[RW]\n");
+}
+
+void PlatformEmbeddedFileWriterAIX::SectionRoData() {
+ fprintf(fp_, ".csect[RO]\n");
+}
+
+void PlatformEmbeddedFileWriterAIX::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, ".align 2\n");
+ fprintf(fp_, "%s:\n", name);
+ IndentedDataDirective(kLong);
+ fprintf(fp_, "%d\n", value);
+ Newline();
+}
+
+void PlatformEmbeddedFileWriterAIX::DeclarePointerToSymbol(const char* name,
+ const char* target) {
+ AlignToCodeAlignment();
+ DeclareLabel(name);
+ fprintf(fp_, " %s %s\n", DirectiveAsString(PointerSizeDirective()), target);
+ Newline();
+}
+
+void PlatformEmbeddedFileWriterAIX::DeclareSymbolGlobal(const char* name) {
+ fprintf(fp_, ".globl %s\n", name);
+}
+
+void PlatformEmbeddedFileWriterAIX::AlignToCodeAlignment() {
+ fprintf(fp_, ".align 5\n");
+}
+
+void PlatformEmbeddedFileWriterAIX::AlignToDataAlignment() {
+ fprintf(fp_, ".align 3\n");
+}
+
+void PlatformEmbeddedFileWriterAIX::Comment(const char* string) {
+ fprintf(fp_, "// %s\n", string);
+}
+
+void PlatformEmbeddedFileWriterAIX::DeclareLabel(const char* name) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s:\n", name);
+}
+
+void PlatformEmbeddedFileWriterAIX::SourceInfo(int fileid, const char* filename,
+ int line) {
+ fprintf(fp_, ".xline %d, \"%s\"\n", line, filename);
+}
+
+void PlatformEmbeddedFileWriterAIX::DeclareFunctionBegin(const char* name) {
+ Newline();
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, ".csect %s[DS]\n", name); // function descriptor
+ fprintf(fp_, "%s:\n", name);
+ fprintf(fp_, ".llong .%s, 0, 0\n", name);
+ SectionText();
+ fprintf(fp_, ".%s:\n", name);
+}
+
+void PlatformEmbeddedFileWriterAIX::DeclareFunctionEnd(const char* name) {}
+
+int PlatformEmbeddedFileWriterAIX::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
+void PlatformEmbeddedFileWriterAIX::FilePrologue() {}
+
+void PlatformEmbeddedFileWriterAIX::DeclareExternalFilename(
+ int fileid, const char* filename) {
+ // File name cannot be declared with an identifier on AIX.
+ // We use the SourceInfo method to emit debug info in
+ //.xline <line-number> <file-name> format.
+}
+
+void PlatformEmbeddedFileWriterAIX::FileEpilogue() {}
+
+int PlatformEmbeddedFileWriterAIX::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+DataDirective PlatformEmbeddedFileWriterAIX::ByteChunkDataDirective() const {
+ // PPC uses a fixed 4 byte instruction set, using .long
+ // to prevent any unnecessary padding.
+ return kLong;
+}
+
+int PlatformEmbeddedFileWriterAIX::WriteByteChunk(const uint8_t* data) {
+ DCHECK_EQ(ByteChunkDataDirective(), kLong);
+ const uint32_t* long_ptr = reinterpret_cast<const uint32_t*>(data);
+ return HexLiteral(*long_ptr);
+}
+
+#undef SYMBOL_PREFIX
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.h
new file mode 100644
index 0000000000..6119d50623
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.h
@@ -0,0 +1,64 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_AIX_H_
+#define V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_AIX_H_
+
+#include "src/base/macros.h"
+#include "src/snapshot/embedded/platform-embedded-file-writer-base.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformEmbeddedFileWriterAIX : public PlatformEmbeddedFileWriterBase {
+ public:
+ PlatformEmbeddedFileWriterAIX(EmbeddedTargetArch target_arch,
+ EmbeddedTargetOs target_os)
+ : target_arch_(target_arch), target_os_(target_os) {
+ USE(target_arch_);
+ USE(target_os_);
+ DCHECK_EQ(target_os_, EmbeddedTargetOs::kAIX);
+ }
+
+ void SectionText() override;
+ void SectionData() override;
+ void SectionRoData() override;
+
+ void AlignToCodeAlignment() override;
+ void AlignToDataAlignment() override;
+
+ void DeclareUint32(const char* name, uint32_t value) override;
+ void DeclarePointerToSymbol(const char* name, const char* target) override;
+
+ void DeclareLabel(const char* name) override;
+
+ void SourceInfo(int fileid, const char* filename, int line) override;
+ void DeclareFunctionBegin(const char* name) override;
+ void DeclareFunctionEnd(const char* name) override;
+
+ int HexLiteral(uint64_t value) override;
+
+ void Comment(const char* string) override;
+
+ void FilePrologue() override;
+ void DeclareExternalFilename(int fileid, const char* filename) override;
+ void FileEpilogue() override;
+
+ int IndentedDataDirective(DataDirective directive) override;
+
+ DataDirective ByteChunkDataDirective() const override;
+ int WriteByteChunk(const uint8_t* data) override;
+
+ private:
+ void DeclareSymbolGlobal(const char* name);
+
+ private:
+ const EmbeddedTargetArch target_arch_;
+ const EmbeddedTargetOs target_os_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_AIX_H_
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
new file mode 100644
index 0000000000..a17f039fa2
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.cc
@@ -0,0 +1,156 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/embedded/platform-embedded-file-writer-base.h"
+
+#include <string>
+
+#include "src/common/globals.h"
+#include "src/snapshot/embedded/platform-embedded-file-writer-aix.h"
+#include "src/snapshot/embedded/platform-embedded-file-writer-generic.h"
+#include "src/snapshot/embedded/platform-embedded-file-writer-mac.h"
+#include "src/snapshot/embedded/platform-embedded-file-writer-win.h"
+
+namespace v8 {
+namespace internal {
+
+DataDirective PointerSizeDirective() {
+ if (kSystemPointerSize == 8) {
+ return kQuad;
+ } else {
+ CHECK_EQ(4, kSystemPointerSize);
+ return kLong;
+ }
+}
+
+int DataDirectiveSize(DataDirective directive) {
+ switch (directive) {
+ case kByte:
+ return 1;
+ case kLong:
+ return 4;
+ case kQuad:
+ return 8;
+ case kOcta:
+ return 16;
+ }
+ UNREACHABLE();
+}
+
+int PlatformEmbeddedFileWriterBase::WriteByteChunk(const uint8_t* data) {
+ DCHECK_EQ(ByteChunkDataDirective(), kOcta);
+
+ static constexpr size_t kSize = kInt64Size;
+
+ uint64_t part1, part2;
+ // Use memcpy for the reads since {data} is not guaranteed to be aligned.
+#ifdef V8_TARGET_BIG_ENDIAN
+ memcpy(&part1, data, kSize);
+ memcpy(&part2, data + kSize, kSize);
+#else
+ memcpy(&part1, data + kSize, kSize);
+ memcpy(&part2, data, kSize);
+#endif // V8_TARGET_BIG_ENDIAN
+
+ if (part1 != 0) {
+ return fprintf(fp(), "0x%" PRIx64 "%016" PRIx64, part1, part2);
+ } else {
+ return fprintf(fp(), "0x%" PRIx64, part2);
+ }
+}
+
+namespace {
+
+EmbeddedTargetArch DefaultEmbeddedTargetArch() {
+#if defined(V8_TARGET_ARCH_ARM)
+ return EmbeddedTargetArch::kArm;
+#elif defined(V8_TARGET_ARCH_ARM64)
+ return EmbeddedTargetArch::kArm64;
+#elif defined(V8_TARGET_ARCH_IA32)
+ return EmbeddedTargetArch::kIA32;
+#elif defined(V8_TARGET_ARCH_X64)
+ return EmbeddedTargetArch::kX64;
+#else
+ return EmbeddedTargetArch::kGeneric;
+#endif
+}
+
+EmbeddedTargetArch ToEmbeddedTargetArch(const char* s) {
+ if (s == nullptr) {
+ return DefaultEmbeddedTargetArch();
+ }
+
+ std::string string(s);
+ if (string == "arm") {
+ return EmbeddedTargetArch::kArm;
+ } else if (string == "arm64") {
+ return EmbeddedTargetArch::kArm64;
+ } else if (string == "ia32") {
+ return EmbeddedTargetArch::kIA32;
+ } else if (string == "x64") {
+ return EmbeddedTargetArch::kX64;
+ } else {
+ return EmbeddedTargetArch::kGeneric;
+ }
+}
+
+EmbeddedTargetOs DefaultEmbeddedTargetOs() {
+#if defined(V8_OS_AIX)
+ return EmbeddedTargetOs::kAIX;
+#elif defined(V8_OS_MACOSX)
+ return EmbeddedTargetOs::kMac;
+#elif defined(V8_OS_WIN)
+ return EmbeddedTargetOs::kWin;
+#else
+ return EmbeddedTargetOs::kGeneric;
+#endif
+}
+
+EmbeddedTargetOs ToEmbeddedTargetOs(const char* s) {
+ if (s == nullptr) {
+ return DefaultEmbeddedTargetOs();
+ }
+
+ std::string string(s);
+ if (string == "aix") {
+ return EmbeddedTargetOs::kAIX;
+ } else if (string == "chromeos") {
+ return EmbeddedTargetOs::kChromeOS;
+ } else if (string == "fuchsia") {
+ return EmbeddedTargetOs::kFuchsia;
+ } else if (string == "ios" || string == "mac") {
+ return EmbeddedTargetOs::kMac;
+ } else if (string == "win") {
+ return EmbeddedTargetOs::kWin;
+ } else {
+ return EmbeddedTargetOs::kGeneric;
+ }
+}
+
+} // namespace
+
+std::unique_ptr<PlatformEmbeddedFileWriterBase> NewPlatformEmbeddedFileWriter(
+ const char* target_arch, const char* target_os) {
+ auto embedded_target_arch = ToEmbeddedTargetArch(target_arch);
+ auto embedded_target_os = ToEmbeddedTargetOs(target_os);
+
+ if (embedded_target_os == EmbeddedTargetOs::kAIX) {
+ return base::make_unique<PlatformEmbeddedFileWriterAIX>(
+ embedded_target_arch, embedded_target_os);
+ } else if (embedded_target_os == EmbeddedTargetOs::kMac) {
+ return base::make_unique<PlatformEmbeddedFileWriterMac>(
+ embedded_target_arch, embedded_target_os);
+ } else if (embedded_target_os == EmbeddedTargetOs::kWin) {
+ return base::make_unique<PlatformEmbeddedFileWriterWin>(
+ embedded_target_arch, embedded_target_os);
+ } else {
+ return base::make_unique<PlatformEmbeddedFileWriterGeneric>(
+ embedded_target_arch, embedded_target_os);
+ }
+
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h
new file mode 100644
index 0000000000..0f1763ba24
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-base.h
@@ -0,0 +1,105 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_BASE_H_
+#define V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_BASE_H_
+
+#include <cinttypes>
+#include <cstdio> // For FILE.
+#include <memory>
+
+namespace v8 {
+namespace internal {
+
+class EmbeddedData;
+
+enum DataDirective {
+ kByte,
+ kLong,
+ kQuad,
+ kOcta,
+};
+
+DataDirective PointerSizeDirective();
+int DataDirectiveSize(DataDirective directive);
+
+enum class EmbeddedTargetOs {
+ kAIX,
+ kChromeOS,
+ kFuchsia,
+ kMac,
+ kWin,
+ kGeneric, // Everything not covered above falls in here.
+};
+
+enum class EmbeddedTargetArch {
+ kArm,
+ kArm64,
+ kIA32,
+ kX64,
+ kGeneric, // Everything not covered above falls in here.
+};
+
+// The platform-dependent logic for emitting assembly code for the generated
+// embedded.S file.
+class PlatformEmbeddedFileWriterBase {
+ public:
+ virtual ~PlatformEmbeddedFileWriterBase() = default;
+
+ void SetFile(FILE* fp) { fp_ = fp; }
+ FILE* fp() const { return fp_; }
+
+ virtual void SectionText() = 0;
+ virtual void SectionData() = 0;
+ virtual void SectionRoData() = 0;
+
+ virtual void AlignToCodeAlignment() = 0;
+ virtual void AlignToDataAlignment() = 0;
+
+ virtual void DeclareUint32(const char* name, uint32_t value) = 0;
+ virtual void DeclarePointerToSymbol(const char* name, const char* target) = 0;
+
+ virtual void DeclareLabel(const char* name) = 0;
+
+ virtual void SourceInfo(int fileid, const char* filename, int line) = 0;
+ virtual void DeclareFunctionBegin(const char* name) = 0;
+ virtual void DeclareFunctionEnd(const char* name) = 0;
+
+ // Returns the number of printed characters.
+ virtual int HexLiteral(uint64_t value) = 0;
+
+ virtual void Comment(const char* string) = 0;
+ virtual void Newline() { fprintf(fp_, "\n"); }
+
+ virtual void FilePrologue() = 0;
+ virtual void DeclareExternalFilename(int fileid, const char* filename) = 0;
+ virtual void FileEpilogue() = 0;
+
+ virtual int IndentedDataDirective(DataDirective directive) = 0;
+
+ virtual DataDirective ByteChunkDataDirective() const { return kOcta; }
+ virtual int WriteByteChunk(const uint8_t* data);
+
+ // This awkward interface works around the fact that unwind data emission
+ // is both high-level and platform-dependent. The former implies it should
+ // live in EmbeddedFileWriter, but code there should be platform-independent.
+ //
+ // Emits unwinding data on x64 Windows, and does nothing otherwise.
+ virtual void MaybeEmitUnwindData(const char* unwind_info_symbol,
+ const char* embedded_blob_data_symbol,
+ const EmbeddedData* blob,
+ const void* unwind_infos) {}
+
+ protected:
+ FILE* fp_ = nullptr;
+};
+
+// The factory function. Returns the appropriate platform-specific instance.
+std::unique_ptr<PlatformEmbeddedFileWriterBase> NewPlatformEmbeddedFileWriter(
+ const char* target_arch, const char* target_os);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_BASE_H_
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
new file mode 100644
index 0000000000..4cee1ac131
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -0,0 +1,140 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/embedded/platform-embedded-file-writer-generic.h"
+
+#include <algorithm>
+#include <cinttypes>
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+#define SYMBOL_PREFIX ""
+
+namespace {
+
+const char* DirectiveAsString(DataDirective directive) {
+ switch (directive) {
+ case kByte:
+ return ".byte";
+ case kLong:
+ return ".long";
+ case kQuad:
+ return ".quad";
+ case kOcta:
+ return ".octa";
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+void PlatformEmbeddedFileWriterGeneric::SectionText() {
+ if (target_os_ == EmbeddedTargetOs::kChromeOS) {
+ fprintf(fp_, ".section .text.hot.embedded\n");
+ } else {
+ fprintf(fp_, ".section .text\n");
+ }
+}
+
+void PlatformEmbeddedFileWriterGeneric::SectionData() {
+ fprintf(fp_, ".section .data\n");
+}
+
+void PlatformEmbeddedFileWriterGeneric::SectionRoData() {
+ fprintf(fp_, ".section .rodata\n");
+}
+
+void PlatformEmbeddedFileWriterGeneric::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ IndentedDataDirective(kLong);
+ fprintf(fp_, "%d", value);
+ Newline();
+}
+
+void PlatformEmbeddedFileWriterGeneric::DeclarePointerToSymbol(
+ const char* name, const char* target) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ fprintf(fp_, " %s %s%s\n", DirectiveAsString(PointerSizeDirective()),
+ SYMBOL_PREFIX, target);
+}
+
+void PlatformEmbeddedFileWriterGeneric::DeclareSymbolGlobal(const char* name) {
+ fprintf(fp_, ".global %s%s\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformEmbeddedFileWriterGeneric::AlignToCodeAlignment() {
+ fprintf(fp_, ".balign 32\n");
+}
+
+void PlatformEmbeddedFileWriterGeneric::AlignToDataAlignment() {
+ // On Windows ARM64, s390, PPC and possibly more platforms, aligned load
+ // instructions are used to retrieve v8_Default_embedded_blob_ and/or
+ // v8_Default_embedded_blob_size_. The generated instructions require the
+ // load target to be aligned at 8 bytes (2^3).
+ fprintf(fp_, ".balign 8\n");
+}
+
+void PlatformEmbeddedFileWriterGeneric::Comment(const char* string) {
+ fprintf(fp_, "// %s\n", string);
+}
+
+void PlatformEmbeddedFileWriterGeneric::DeclareLabel(const char* name) {
+ fprintf(fp_, "%s%s:\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformEmbeddedFileWriterGeneric::SourceInfo(int fileid,
+ const char* filename,
+ int line) {
+ fprintf(fp_, ".loc %d %d\n", fileid, line);
+}
+
+void PlatformEmbeddedFileWriterGeneric::DeclareFunctionBegin(const char* name) {
+ DeclareLabel(name);
+
+ if (target_arch_ == EmbeddedTargetArch::kArm ||
+ target_arch_ == EmbeddedTargetArch::kArm64) {
+ // ELF format binaries on ARM use ".type <function name>, %function"
+ // to create a DWARF subprogram entry.
+ fprintf(fp_, ".type %s, %%function\n", name);
+ } else {
+ // Other ELF Format binaries use ".type <function name>, @function"
+ // to create a DWARF subprogram entry.
+ fprintf(fp_, ".type %s, @function\n", name);
+ }
+}
+
+void PlatformEmbeddedFileWriterGeneric::DeclareFunctionEnd(const char* name) {}
+
+int PlatformEmbeddedFileWriterGeneric::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
+void PlatformEmbeddedFileWriterGeneric::FilePrologue() {}
+
+void PlatformEmbeddedFileWriterGeneric::DeclareExternalFilename(
+ int fileid, const char* filename) {
+ // Replace any Windows style paths (backslashes) with forward
+ // slashes.
+ std::string fixed_filename(filename);
+ std::replace(fixed_filename.begin(), fixed_filename.end(), '\\', '/');
+ fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
+}
+
+void PlatformEmbeddedFileWriterGeneric::FileEpilogue() {}
+
+int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+#undef SYMBOL_PREFIX
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.h
new file mode 100644
index 0000000000..0c76e7df88
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.h
@@ -0,0 +1,63 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_GENERIC_H_
+#define V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_GENERIC_H_
+
+#include "src/base/macros.h"
+#include "src/common/globals.h" // For V8_OS_WIN_X64
+#include "src/snapshot/embedded/platform-embedded-file-writer-base.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformEmbeddedFileWriterGeneric
+ : public PlatformEmbeddedFileWriterBase {
+ public:
+ PlatformEmbeddedFileWriterGeneric(EmbeddedTargetArch target_arch,
+ EmbeddedTargetOs target_os)
+ : target_arch_(target_arch), target_os_(target_os) {
+ DCHECK(target_os_ == EmbeddedTargetOs::kChromeOS ||
+ target_os_ == EmbeddedTargetOs::kFuchsia ||
+ target_os_ == EmbeddedTargetOs::kGeneric);
+ }
+
+ void SectionText() override;
+ void SectionData() override;
+ void SectionRoData() override;
+
+ void AlignToCodeAlignment() override;
+ void AlignToDataAlignment() override;
+
+ void DeclareUint32(const char* name, uint32_t value) override;
+ void DeclarePointerToSymbol(const char* name, const char* target) override;
+
+ void DeclareLabel(const char* name) override;
+
+ void SourceInfo(int fileid, const char* filename, int line) override;
+ void DeclareFunctionBegin(const char* name) override;
+ void DeclareFunctionEnd(const char* name) override;
+
+ int HexLiteral(uint64_t value) override;
+
+ void Comment(const char* string) override;
+
+ void FilePrologue() override;
+ void DeclareExternalFilename(int fileid, const char* filename) override;
+ void FileEpilogue() override;
+
+ int IndentedDataDirective(DataDirective directive) override;
+
+ private:
+ void DeclareSymbolGlobal(const char* name);
+
+ private:
+ const EmbeddedTargetArch target_arch_;
+ const EmbeddedTargetOs target_os_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_GENERIC_H_
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
new file mode 100644
index 0000000000..4be3c7ac6b
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
@@ -0,0 +1,109 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/embedded/platform-embedded-file-writer-mac.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+const char* DirectiveAsString(DataDirective directive) {
+ switch (directive) {
+ case kByte:
+ return ".byte";
+ case kLong:
+ return ".long";
+ case kQuad:
+ return ".quad";
+ case kOcta:
+ return ".octa";
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+void PlatformEmbeddedFileWriterMac::SectionText() { fprintf(fp_, ".text\n"); }
+
+void PlatformEmbeddedFileWriterMac::SectionData() { fprintf(fp_, ".data\n"); }
+
+void PlatformEmbeddedFileWriterMac::SectionRoData() {
+ fprintf(fp_, ".const_data\n");
+}
+
+void PlatformEmbeddedFileWriterMac::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ IndentedDataDirective(kLong);
+ fprintf(fp_, "%d", value);
+ Newline();
+}
+
+void PlatformEmbeddedFileWriterMac::DeclarePointerToSymbol(const char* name,
+ const char* target) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ fprintf(fp_, " %s _%s\n", DirectiveAsString(PointerSizeDirective()), target);
+}
+
+void PlatformEmbeddedFileWriterMac::DeclareSymbolGlobal(const char* name) {
+ // TODO(jgruber): Investigate switching to .globl. Using .private_extern
+ // prevents something along the compilation chain from messing with the
+ // embedded blob. Using .global here causes embedded blob hash verification
+ // failures at runtime.
+ fprintf(fp_, ".private_extern _%s\n", name);
+}
+
+void PlatformEmbeddedFileWriterMac::AlignToCodeAlignment() {
+ fprintf(fp_, ".balign 32\n");
+}
+
+void PlatformEmbeddedFileWriterMac::AlignToDataAlignment() {
+ fprintf(fp_, ".balign 8\n");
+}
+
+void PlatformEmbeddedFileWriterMac::Comment(const char* string) {
+ fprintf(fp_, "// %s\n", string);
+}
+
+void PlatformEmbeddedFileWriterMac::DeclareLabel(const char* name) {
+ fprintf(fp_, "_%s:\n", name);
+}
+
+void PlatformEmbeddedFileWriterMac::SourceInfo(int fileid, const char* filename,
+ int line) {
+ fprintf(fp_, ".loc %d %d\n", fileid, line);
+}
+
+void PlatformEmbeddedFileWriterMac::DeclareFunctionBegin(const char* name) {
+ DeclareLabel(name);
+
+ // TODO(mvstanton): Investigate the proper incantations to mark the label as
+ // a function on OSX.
+}
+
+void PlatformEmbeddedFileWriterMac::DeclareFunctionEnd(const char* name) {}
+
+int PlatformEmbeddedFileWriterMac::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
+void PlatformEmbeddedFileWriterMac::FilePrologue() {}
+
+void PlatformEmbeddedFileWriterMac::DeclareExternalFilename(
+ int fileid, const char* filename) {
+ fprintf(fp_, ".file %d \"%s\"\n", fileid, filename);
+}
+
+void PlatformEmbeddedFileWriterMac::FileEpilogue() {}
+
+int PlatformEmbeddedFileWriterMac::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h
new file mode 100644
index 0000000000..4f2cd3d6ae
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.h
@@ -0,0 +1,61 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_MAC_H_
+#define V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_MAC_H_
+
+#include "src/base/macros.h"
+#include "src/snapshot/embedded/platform-embedded-file-writer-base.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformEmbeddedFileWriterMac : public PlatformEmbeddedFileWriterBase {
+ public:
+ PlatformEmbeddedFileWriterMac(EmbeddedTargetArch target_arch,
+ EmbeddedTargetOs target_os)
+ : target_arch_(target_arch), target_os_(target_os) {
+ USE(target_arch_);
+ USE(target_os_);
+ DCHECK_EQ(target_os_, EmbeddedTargetOs::kMac);
+ }
+
+ void SectionText() override;
+ void SectionData() override;
+ void SectionRoData() override;
+
+ void AlignToCodeAlignment() override;
+ void AlignToDataAlignment() override;
+
+ void DeclareUint32(const char* name, uint32_t value) override;
+ void DeclarePointerToSymbol(const char* name, const char* target) override;
+
+ void DeclareLabel(const char* name) override;
+
+ void SourceInfo(int fileid, const char* filename, int line) override;
+ void DeclareFunctionBegin(const char* name) override;
+ void DeclareFunctionEnd(const char* name) override;
+
+ int HexLiteral(uint64_t value) override;
+
+ void Comment(const char* string) override;
+
+ void FilePrologue() override;
+ void DeclareExternalFilename(int fileid, const char* filename) override;
+ void FileEpilogue() override;
+
+ int IndentedDataDirective(DataDirective directive) override;
+
+ private:
+ void DeclareSymbolGlobal(const char* name);
+
+ private:
+ const EmbeddedTargetArch target_arch_;
+ const EmbeddedTargetOs target_os_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_MAC_H_
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
new file mode 100644
index 0000000000..69457e11a5
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -0,0 +1,615 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/snapshot/embedded/platform-embedded-file-writer-win.h"
+
+#include <algorithm>
+
+#include "src/common/globals.h" // For V8_OS_WIN_X64.
+
+#if defined(V8_OS_WIN_X64)
+#include "src/builtins/builtins.h"
+#include "src/diagnostics/unwinding-info-win64.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+// V8_CC_MSVC is true for both MSVC and clang on windows. clang can handle
+// __asm__-style inline assembly but MSVC cannot, and thus we need a more
+// precise compiler detection that can distinguish between the two. clang on
+// windows sets both __clang__ and _MSC_VER, MSVC sets only _MSC_VER.
+#if defined(_MSC_VER) && !defined(__clang__)
+#define V8_COMPILER_IS_MSVC
+#endif
+
+// MSVC uses MASM for x86 and x64, while it has a ARMASM for ARM32 and
+// ARMASM64 for ARM64. Since ARMASM and ARMASM64 accept a slightly tweaked
+// version of ARM assembly language, they are referred to together in Visual
+// Studio project files as MARMASM.
+//
+// ARM assembly language docs:
+// http://infocenter.arm.com/help/topic/com.arm.doc.dui0802b/index.html
+// Microsoft ARM assembler and assembly language docs:
+// https://docs.microsoft.com/en-us/cpp/assembler/arm/arm-assembler-reference
+#if defined(V8_COMPILER_IS_MSVC)
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_ARM)
+#define V8_ASSEMBLER_IS_MARMASM
+#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
+#define V8_ASSEMBLER_IS_MASM
+#else
+#error Unknown Windows assembler target architecture.
+#endif
+#endif
+
+// Name mangling.
+// Symbols are prefixed with an underscore on 32-bit architectures.
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64)
+#define SYMBOL_PREFIX "_"
+#else
+#define SYMBOL_PREFIX ""
+#endif
+
+// Notes:
+//
+// Cross-bitness builds are unsupported. It's thus safe to detect bitness
+// through compile-time defines.
+//
+// Cross-compiler builds (e.g. with mixed use of clang / MSVC) are likewise
+// unsupported and hence the compiler can also be detected through compile-time
+// defines.
+
+namespace {
+
+const char* DirectiveAsString(DataDirective directive) {
+#if defined(V8_ASSEMBLER_IS_MASM)
+ switch (directive) {
+ case kByte:
+ return "BYTE";
+ case kLong:
+ return "DWORD";
+ case kQuad:
+ return "QWORD";
+ default:
+ UNREACHABLE();
+ }
+#elif defined(V8_ASSEMBLER_IS_MARMASM)
+ switch (directive) {
+ case kByte:
+ return "DCB";
+ case kLong:
+ return "DCDU";
+ case kQuad:
+ return "DCQU";
+ default:
+ UNREACHABLE();
+ }
+#else
+ switch (directive) {
+ case kByte:
+ return ".byte";
+ case kLong:
+ return ".long";
+ case kQuad:
+ return ".quad";
+ case kOcta:
+ return ".octa";
+ }
+ UNREACHABLE();
+#endif
+}
+
+#if defined(V8_OS_WIN_X64)
+
+void WriteUnwindInfoEntry(PlatformEmbeddedFileWriterWin* w,
+ const char* unwind_info_symbol,
+ const char* embedded_blob_data_symbol,
+ uint64_t rva_start, uint64_t rva_end) {
+ w->DeclareRvaToSymbol(embedded_blob_data_symbol, rva_start);
+ w->DeclareRvaToSymbol(embedded_blob_data_symbol, rva_end);
+ w->DeclareRvaToSymbol(unwind_info_symbol);
+}
+
+void EmitUnwindData(PlatformEmbeddedFileWriterWin* w,
+ const char* unwind_info_symbol,
+ const char* embedded_blob_data_symbol,
+ const EmbeddedData* blob,
+ const win64_unwindinfo::BuiltinUnwindInfo* unwind_infos) {
+ // Emit an UNWIND_INFO (XDATA) struct, which contains the unwinding
+ // information that is used for all builtin functions.
+ DCHECK(win64_unwindinfo::CanEmitUnwindInfoForBuiltins());
+ w->Comment("xdata for all the code in the embedded blob.");
+ w->DeclareExternalFunction(CRASH_HANDLER_FUNCTION_NAME_STRING);
+
+ w->StartXdataSection();
+ {
+ w->DeclareLabel(unwind_info_symbol);
+
+ std::vector<uint8_t> xdata =
+ win64_unwindinfo::GetUnwindInfoForBuiltinFunctions();
+ DCHECK(!xdata.empty());
+
+ w->IndentedDataDirective(kByte);
+ for (size_t i = 0; i < xdata.size(); i++) {
+ if (i > 0) fprintf(w->fp(), ",");
+ w->HexLiteral(xdata[i]);
+ }
+ w->Newline();
+
+ w->Comment(" ExceptionHandler");
+ w->DeclareRvaToSymbol(CRASH_HANDLER_FUNCTION_NAME_STRING);
+ }
+ w->EndXdataSection();
+ w->Newline();
+
+ // Emit a RUNTIME_FUNCTION (PDATA) entry for each builtin function, as
+ // documented here:
+ // https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64.
+ w->Comment(
+ "pdata for all the code in the embedded blob (structs of type "
+ "RUNTIME_FUNCTION).");
+ w->Comment(" BeginAddress");
+ w->Comment(" EndAddress");
+ w->Comment(" UnwindInfoAddress");
+ w->StartPdataSection();
+ {
+ Address prev_builtin_end_offset = 0;
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ // Some builtins are leaf functions from the point of view of Win64 stack
+ // walking: they do not move the stack pointer and do not require a PDATA
+ // entry because the return address can be retrieved from [rsp].
+ if (!blob->ContainsBuiltin(i)) continue;
+ if (unwind_infos[i].is_leaf_function()) continue;
+
+ uint64_t builtin_start_offset = blob->InstructionStartOfBuiltin(i) -
+ reinterpret_cast<Address>(blob->data());
+ uint32_t builtin_size = blob->InstructionSizeOfBuiltin(i);
+
+ const std::vector<int>& xdata_desc = unwind_infos[i].fp_offsets();
+ if (xdata_desc.empty()) {
+ // Some builtins do not have any "push rbp - mov rbp, rsp" instructions
+ // to start a stack frame. We still emit a PDATA entry as if they had,
+ // relying on the fact that we can find the previous frame address from
+ // rbp in most cases. Note that since the function does not really start
+ // with a 'push rbp' we need to specify the start RVA in the PDATA entry
+ // a few bytes before the beginning of the function, if it does not
+ // overlap the end of the previous builtin.
+ WriteUnwindInfoEntry(
+ w, unwind_info_symbol, embedded_blob_data_symbol,
+ std::max(prev_builtin_end_offset,
+ builtin_start_offset - win64_unwindinfo::kRbpPrefixLength),
+ builtin_start_offset + builtin_size);
+ } else {
+ // Some builtins have one or more "push rbp - mov rbp, rsp" sequences,
+ // but not necessarily at the beginning of the function. In this case
+ // we want to yield a PDATA entry for each block of instructions that
+ // emit an rbp frame. If the function does not start with 'push rbp'
+ // we also emit a PDATA entry for the initial block of code up to the
+ // first 'push rbp', like in the case above.
+ if (xdata_desc[0] > 0) {
+ WriteUnwindInfoEntry(w, unwind_info_symbol, embedded_blob_data_symbol,
+ std::max(prev_builtin_end_offset,
+ builtin_start_offset -
+ win64_unwindinfo::kRbpPrefixLength),
+ builtin_start_offset + xdata_desc[0]);
+ }
+
+ for (size_t j = 0; j < xdata_desc.size(); j++) {
+ int chunk_start = xdata_desc[j];
+ int chunk_end =
+ (j < xdata_desc.size() - 1) ? xdata_desc[j + 1] : builtin_size;
+ WriteUnwindInfoEntry(w, unwind_info_symbol, embedded_blob_data_symbol,
+ builtin_start_offset + chunk_start,
+ builtin_start_offset + chunk_end);
+ }
+ }
+
+ prev_builtin_end_offset = builtin_start_offset + builtin_size;
+ w->Newline();
+ }
+ }
+ w->EndPdataSection();
+ w->Newline();
+}
+#endif // defined(V8_OS_WIN_X64)
+
+} // namespace
+
+void PlatformEmbeddedFileWriterWin::MaybeEmitUnwindData(
+ const char* unwind_info_symbol, const char* embedded_blob_data_symbol,
+ const EmbeddedData* blob, const void* unwind_infos) {
+#if defined(V8_OS_WIN_X64)
+ if (win64_unwindinfo::CanEmitUnwindInfoForBuiltins()) {
+ EmitUnwindData(this, unwind_info_symbol, embedded_blob_data_symbol, blob,
+ reinterpret_cast<const win64_unwindinfo::BuiltinUnwindInfo*>(
+ unwind_infos));
+ }
+#endif // defined(V8_OS_WIN_X64)
+}
+
+// Windows, MSVC, not arm/arm64.
+// -----------------------------------------------------------------------------
+
+#if defined(V8_ASSEMBLER_IS_MASM)
+
+// For MSVC builds we emit assembly in MASM syntax.
+// See https://docs.microsoft.com/en-us/cpp/assembler/masm/directives-reference.
+
+void PlatformEmbeddedFileWriterWin::SectionText() { fprintf(fp_, ".CODE\n"); }
+
+void PlatformEmbeddedFileWriterWin::SectionData() { fprintf(fp_, ".DATA\n"); }
+
+void PlatformEmbeddedFileWriterWin::SectionRoData() {
+ fprintf(fp_, ".CONST\n");
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s%s %s %d\n", SYMBOL_PREFIX, name, DirectiveAsString(kLong),
+ value);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclarePointerToSymbol(const char* name,
+ const char* target) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s%s %s %s%s\n", SYMBOL_PREFIX, name,
+ DirectiveAsString(PointerSizeDirective()), SYMBOL_PREFIX, target);
+}
+
+void PlatformEmbeddedFileWriterWin::StartPdataSection() {
+ fprintf(fp_, "OPTION DOTNAME\n");
+ fprintf(fp_, ".pdata SEGMENT DWORD READ ''\n");
+}
+
+void PlatformEmbeddedFileWriterWin::EndPdataSection() {
+ fprintf(fp_, ".pdata ENDS\n");
+}
+
+void PlatformEmbeddedFileWriterWin::StartXdataSection() {
+ fprintf(fp_, "OPTION DOTNAME\n");
+ fprintf(fp_, ".xdata SEGMENT DWORD READ ''\n");
+}
+
+void PlatformEmbeddedFileWriterWin::EndXdataSection() {
+ fprintf(fp_, ".xdata ENDS\n");
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareExternalFunction(const char* name) {
+ fprintf(fp_, "EXTERN %s : PROC\n", name);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareRvaToSymbol(const char* name,
+ uint64_t offset) {
+ if (offset > 0) {
+ fprintf(fp_, "DD IMAGEREL %s+%llu\n", name, offset);
+ } else {
+ fprintf(fp_, "DD IMAGEREL %s\n", name);
+ }
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareSymbolGlobal(const char* name) {
+ fprintf(fp_, "PUBLIC %s%s\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformEmbeddedFileWriterWin::AlignToCodeAlignment() {
+ // Diverges from other platforms due to compile error
+ // 'invalid combination with segment alignment'.
+ fprintf(fp_, "ALIGN 4\n");
+}
+
+void PlatformEmbeddedFileWriterWin::AlignToDataAlignment() {
+ fprintf(fp_, "ALIGN 4\n");
+}
+
+void PlatformEmbeddedFileWriterWin::Comment(const char* string) {
+ fprintf(fp_, "; %s\n", string);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareLabel(const char* name) {
+ fprintf(fp_, "%s%s LABEL %s\n", SYMBOL_PREFIX, name,
+ DirectiveAsString(kByte));
+}
+
+void PlatformEmbeddedFileWriterWin::SourceInfo(int fileid, const char* filename,
+ int line) {
+ // TODO(mvstanton): output source information for MSVC.
+ // Its syntax is #line <line> "<filename>"
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareFunctionBegin(const char* name) {
+ fprintf(fp_, "%s%s PROC\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareFunctionEnd(const char* name) {
+ fprintf(fp_, "%s%s ENDP\n", SYMBOL_PREFIX, name);
+}
+
+int PlatformEmbeddedFileWriterWin::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0%" PRIx64 "h", value);
+}
+
+void PlatformEmbeddedFileWriterWin::FilePrologue() {
+ if (target_arch_ != EmbeddedTargetArch::kX64) {
+ fprintf(fp_, ".MODEL FLAT\n");
+ }
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareExternalFilename(
+ int fileid, const char* filename) {}
+
+void PlatformEmbeddedFileWriterWin::FileEpilogue() { fprintf(fp_, "END\n"); }
+
+int PlatformEmbeddedFileWriterWin::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+// Windows, MSVC, arm/arm64.
+// -----------------------------------------------------------------------------
+
+#elif defined(V8_ASSEMBLER_IS_MARMASM)
+
+// The AARCH64 ABI requires instructions be 4-byte-aligned and Windows does
+// not have a stricter alignment requirement (see the TEXTAREA macro of
+// kxarm64.h in the Windows SDK), so code is 4-byte-aligned.
+// The data fields in the emitted assembly tend to be accessed with 8-byte
+// LDR instructions, so data is 8-byte-aligned.
+//
+// armasm64's warning A4228 states
+// Alignment value exceeds AREA alignment; alignment not guaranteed
+// To ensure that ALIGN directives are honored, their values are defined as
+// equal to their corresponding AREA's ALIGN attributes.
+
+#define ARM64_DATA_ALIGNMENT_POWER (3)
+#define ARM64_DATA_ALIGNMENT (1 << ARM64_DATA_ALIGNMENT_POWER)
+#define ARM64_CODE_ALIGNMENT_POWER (2)
+#define ARM64_CODE_ALIGNMENT (1 << ARM64_CODE_ALIGNMENT_POWER)
+
+void PlatformEmbeddedFileWriterWin::SectionText() {
+ fprintf(fp_, " AREA |.text|, CODE, ALIGN=%d, READONLY\n",
+ ARM64_CODE_ALIGNMENT_POWER);
+}
+
+void PlatformEmbeddedFileWriterWin::SectionData() {
+ fprintf(fp_, " AREA |.data|, DATA, ALIGN=%d, READWRITE\n",
+ ARM64_DATA_ALIGNMENT_POWER);
+}
+
+void PlatformEmbeddedFileWriterWin::SectionRoData() {
+ fprintf(fp_, " AREA |.rodata|, DATA, ALIGN=%d, READONLY\n",
+ ARM64_DATA_ALIGNMENT_POWER);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s%s %s %d\n", SYMBOL_PREFIX, name, DirectiveAsString(kLong),
+ value);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclarePointerToSymbol(const char* name,
+ const char* target) {
+ DeclareSymbolGlobal(name);
+ fprintf(fp_, "%s%s %s %s%s\n", SYMBOL_PREFIX, name,
+ DirectiveAsString(PointerSizeDirective()), SYMBOL_PREFIX, target);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareSymbolGlobal(const char* name) {
+ fprintf(fp_, " EXPORT %s%s\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformEmbeddedFileWriterWin::AlignToCodeAlignment() {
+ fprintf(fp_, " ALIGN %d\n", ARM64_CODE_ALIGNMENT);
+}
+
+void PlatformEmbeddedFileWriterWin::AlignToDataAlignment() {
+ fprintf(fp_, " ALIGN %d\n", ARM64_DATA_ALIGNMENT);
+}
+
+void PlatformEmbeddedFileWriterWin::Comment(const char* string) {
+ fprintf(fp_, "; %s\n", string);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareLabel(const char* name) {
+ fprintf(fp_, "%s%s\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformEmbeddedFileWriterWin::SourceInfo(int fileid, const char* filename,
+ int line) {
+ // TODO(mvstanton): output source information for MSVC.
+ // Its syntax is #line <line> "<filename>"
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareFunctionBegin(const char* name) {
+ fprintf(fp_, "%s%s FUNCTION\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareFunctionEnd(const char* name) {
+ fprintf(fp_, " ENDFUNC\n");
+}
+
+int PlatformEmbeddedFileWriterWin::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
+void PlatformEmbeddedFileWriterWin::FilePrologue() {}
+
+void PlatformEmbeddedFileWriterWin::DeclareExternalFilename(
+ int fileid, const char* filename) {}
+
+void PlatformEmbeddedFileWriterWin::FileEpilogue() { fprintf(fp_, " END\n"); }
+
+int PlatformEmbeddedFileWriterWin::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+#undef ARM64_DATA_ALIGNMENT_POWER
+#undef ARM64_DATA_ALIGNMENT
+#undef ARM64_CODE_ALIGNMENT_POWER
+#undef ARM64_CODE_ALIGNMENT
+
+// All Windows builds without MSVC.
+// -----------------------------------------------------------------------------
+
+#else
+
+void PlatformEmbeddedFileWriterWin::SectionText() {
+ fprintf(fp_, ".section .text\n");
+}
+
+void PlatformEmbeddedFileWriterWin::SectionData() {
+ fprintf(fp_, ".section .data\n");
+}
+
+void PlatformEmbeddedFileWriterWin::SectionRoData() {
+ fprintf(fp_, ".section .rdata\n");
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareUint32(const char* name,
+ uint32_t value) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ IndentedDataDirective(kLong);
+ fprintf(fp_, "%d", value);
+ Newline();
+}
+
+void PlatformEmbeddedFileWriterWin::DeclarePointerToSymbol(const char* name,
+ const char* target) {
+ DeclareSymbolGlobal(name);
+ DeclareLabel(name);
+ fprintf(fp_, " %s %s%s\n", DirectiveAsString(PointerSizeDirective()),
+ SYMBOL_PREFIX, target);
+}
+
+void PlatformEmbeddedFileWriterWin::StartPdataSection() {
+ fprintf(fp_, ".section .pdata\n");
+}
+
+void PlatformEmbeddedFileWriterWin::EndPdataSection() {}
+
+void PlatformEmbeddedFileWriterWin::StartXdataSection() {
+ fprintf(fp_, ".section .xdata\n");
+}
+
+void PlatformEmbeddedFileWriterWin::EndXdataSection() {}
+
+void PlatformEmbeddedFileWriterWin::DeclareExternalFunction(const char* name) {}
+
+void PlatformEmbeddedFileWriterWin::DeclareRvaToSymbol(const char* name,
+ uint64_t offset) {
+ if (offset > 0) {
+ fprintf(fp_, ".rva %s + %" PRIu64 "\n", name, offset);
+ } else {
+ fprintf(fp_, ".rva %s\n", name);
+ }
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareSymbolGlobal(const char* name) {
+ fprintf(fp_, ".global %s%s\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformEmbeddedFileWriterWin::AlignToCodeAlignment() {
+ fprintf(fp_, ".balign 32\n");
+}
+
+void PlatformEmbeddedFileWriterWin::AlignToDataAlignment() {
+ // On Windows ARM64, s390, PPC and possibly more platforms, aligned load
+ // instructions are used to retrieve v8_Default_embedded_blob_ and/or
+ // v8_Default_embedded_blob_size_. The generated instructions require the
+ // load target to be aligned at 8 bytes (2^3).
+ fprintf(fp_, ".balign 8\n");
+}
+
+void PlatformEmbeddedFileWriterWin::Comment(const char* string) {
+ fprintf(fp_, "// %s\n", string);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareLabel(const char* name) {
+ fprintf(fp_, "%s%s:\n", SYMBOL_PREFIX, name);
+}
+
+void PlatformEmbeddedFileWriterWin::SourceInfo(int fileid, const char* filename,
+ int line) {
+ fprintf(fp_, ".loc %d %d\n", fileid, line);
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareFunctionBegin(const char* name) {
+ DeclareLabel(name);
+
+ if (target_arch_ == EmbeddedTargetArch::kArm64) {
+ // Windows ARM64 assembly is in GAS syntax, but ".type" is invalid directive
+ // in PE/COFF for Windows.
+ } else {
+ // The directives for inserting debugging information on Windows come
+ // from the PE (Portable Executable) and COFF (Common Object File Format)
+ // standards. Documented here:
+ // https://docs.microsoft.com/en-us/windows/desktop/debug/pe-format
+ //
+ // .scl 2 means StorageClass external.
+ // .type 32 means Type Representation Function.
+ fprintf(fp_, ".def %s%s; .scl 2; .type 32; .endef;\n", SYMBOL_PREFIX, name);
+ }
+}
+
+void PlatformEmbeddedFileWriterWin::DeclareFunctionEnd(const char* name) {}
+
+int PlatformEmbeddedFileWriterWin::HexLiteral(uint64_t value) {
+ return fprintf(fp_, "0x%" PRIx64, value);
+}
+
+void PlatformEmbeddedFileWriterWin::FilePrologue() {}
+
+void PlatformEmbeddedFileWriterWin::DeclareExternalFilename(
+ int fileid, const char* filename) {
+ // Replace any Windows style paths (backslashes) with forward
+ // slashes.
+ std::string fixed_filename(filename);
+ std::replace(fixed_filename.begin(), fixed_filename.end(), '\\', '/');
+ fprintf(fp_, ".file %d \"%s\"\n", fileid, fixed_filename.c_str());
+}
+
+void PlatformEmbeddedFileWriterWin::FileEpilogue() {}
+
+int PlatformEmbeddedFileWriterWin::IndentedDataDirective(
+ DataDirective directive) {
+ return fprintf(fp_, " %s ", DirectiveAsString(directive));
+}
+
+#endif
+
+DataDirective PlatformEmbeddedFileWriterWin::ByteChunkDataDirective() const {
+#if defined(V8_COMPILER_IS_MSVC)
+ // Windows MASM doesn't have an .octa directive, use QWORDs instead.
+ // Note: MASM *really* does not like large data streams. It takes over 5
+ // minutes to assemble the ~350K lines of embedded.S produced when using
+ // BYTE directives in a debug build. QWORD produces roughly 120KLOC and
+ // reduces assembly time to ~40 seconds. Still terrible, but much better
+ // than before. See also: https://crbug.com/v8/8475.
+ return kQuad;
+#else
+ return PlatformEmbeddedFileWriterBase::ByteChunkDataDirective();
+#endif
+}
+
+int PlatformEmbeddedFileWriterWin::WriteByteChunk(const uint8_t* data) {
+#if defined(V8_COMPILER_IS_MSVC)
+ DCHECK_EQ(ByteChunkDataDirective(), kQuad);
+ const uint64_t* quad_ptr = reinterpret_cast<const uint64_t*>(data);
+ return HexLiteral(*quad_ptr);
+#else
+ return PlatformEmbeddedFileWriterBase::WriteByteChunk(data);
+#endif
+}
+
+#undef SYMBOL_PREFIX
+#undef V8_ASSEMBLER_IS_MASM
+#undef V8_ASSEMBLER_IS_MARMASM
+#undef V8_COMPILER_IS_MSVC
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.h b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.h
new file mode 100644
index 0000000000..376c6cc6ef
--- /dev/null
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.h
@@ -0,0 +1,78 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_WIN_H_
+#define V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_WIN_H_
+
+#include "src/base/macros.h"
+#include "src/snapshot/embedded/platform-embedded-file-writer-base.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformEmbeddedFileWriterWin : public PlatformEmbeddedFileWriterBase {
+ public:
+ PlatformEmbeddedFileWriterWin(EmbeddedTargetArch target_arch,
+ EmbeddedTargetOs target_os)
+ : target_arch_(target_arch), target_os_(target_os) {
+ USE(target_os_);
+ DCHECK_EQ(target_os_, EmbeddedTargetOs::kWin);
+ }
+
+ void SectionText() override;
+ void SectionData() override;
+ void SectionRoData() override;
+
+ void AlignToCodeAlignment() override;
+ void AlignToDataAlignment() override;
+
+ void DeclareUint32(const char* name, uint32_t value) override;
+ void DeclarePointerToSymbol(const char* name, const char* target) override;
+
+ void DeclareLabel(const char* name) override;
+
+ void SourceInfo(int fileid, const char* filename, int line) override;
+ void DeclareFunctionBegin(const char* name) override;
+ void DeclareFunctionEnd(const char* name) override;
+
+ int HexLiteral(uint64_t value) override;
+
+ void Comment(const char* string) override;
+
+ void FilePrologue() override;
+ void DeclareExternalFilename(int fileid, const char* filename) override;
+ void FileEpilogue() override;
+
+ int IndentedDataDirective(DataDirective directive) override;
+
+ DataDirective ByteChunkDataDirective() const override;
+ int WriteByteChunk(const uint8_t* data) override;
+
+ void StartPdataSection();
+ void EndPdataSection();
+ void StartXdataSection();
+ void EndXdataSection();
+ void DeclareExternalFunction(const char* name);
+
+ // Emits an RVA (address relative to the module load address) specified as an
+ // offset from a given symbol.
+ void DeclareRvaToSymbol(const char* name, uint64_t offset = 0);
+
+ void MaybeEmitUnwindData(const char* unwind_info_symbol,
+ const char* embedded_blob_data_symbol,
+ const EmbeddedData* blob,
+ const void* unwind_infos) override;
+
+ private:
+ void DeclareSymbolGlobal(const char* name);
+
+ private:
+ const EmbeddedTargetArch target_arch_;
+ const EmbeddedTargetOs target_os_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SNAPSHOT_EMBEDDED_PLATFORM_EMBEDDED_FILE_WRITER_WIN_H_
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index f44f71e145..6bf198230f 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -8,16 +8,16 @@
#include <iomanip>
#include "include/libplatform/libplatform.h"
-#include "src/assembler-arch.h"
#include "src/base/platform/platform.h"
-#include "src/flags.h"
-#include "src/msan.h"
-#include "src/snapshot/embedded-file-writer.h"
+#include "src/codegen/assembler-arch.h"
+#include "src/codegen/source-position-table.h"
+#include "src/flags/flags.h"
+#include "src/sanitizer/msan.h"
+#include "src/snapshot/embedded/embedded-file-writer.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/startup-serializer.h"
-#include "src/source-position-table.h"
namespace {
@@ -70,7 +70,7 @@ class SnapshotFileWriter {
static void WriteSnapshotFilePrefix(FILE* fp) {
fprintf(fp, "// Autogenerated snapshot file. Do not edit.\n\n");
- fprintf(fp, "#include \"src/v8.h\"\n");
+ fprintf(fp, "#include \"src/init/v8.h\"\n");
fprintf(fp, "#include \"src/base/platform/platform.h\"\n\n");
fprintf(fp, "#include \"src/snapshot/snapshot.h\"\n\n");
fprintf(fp, "namespace v8 {\n");
@@ -87,7 +87,8 @@ class SnapshotFileWriter {
static void WriteSnapshotFileData(FILE* fp,
const i::Vector<const i::byte>& blob) {
- fprintf(fp, "static const byte blob_data[] = {\n");
+ fprintf(fp,
+ "alignas(kPointerAlignment) static const byte blob_data[] = {\n");
WriteBinaryContentsAsCArray(fp, blob);
fprintf(fp, "};\n");
fprintf(fp, "static const int blob_size = %d;\n", blob.length());
@@ -143,100 +144,37 @@ char* GetExtraCode(char* filename, const char* description) {
return chars;
}
-bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
- const char* utf8_source, const char* name) {
+v8::StartupData CreateSnapshotDataBlob(v8::Isolate* isolate,
+ const char* embedded_source) {
v8::base::ElapsedTimer timer;
timer.Start();
- v8::Context::Scope context_scope(context);
- v8::TryCatch try_catch(isolate);
- v8::Local<v8::String> source_string;
- if (!v8::String::NewFromUtf8(isolate, utf8_source, v8::NewStringType::kNormal)
- .ToLocal(&source_string)) {
- return false;
- }
- v8::Local<v8::String> resource_name =
- v8::String::NewFromUtf8(isolate, name, v8::NewStringType::kNormal)
- .ToLocalChecked();
- v8::ScriptOrigin origin(resource_name);
- v8::ScriptCompiler::Source source(source_string, origin);
- v8::Local<v8::Script> script;
- if (!v8::ScriptCompiler::Compile(context, &source).ToLocal(&script))
- return false;
- if (script->Run(context).IsEmpty()) return false;
- if (i::FLAG_profile_deserialization) {
- i::PrintF("Executing custom snapshot script %s took %0.3f ms\n", name,
- timer.Elapsed().InMillisecondsF());
- }
- timer.Stop();
- CHECK(!try_catch.HasCaught());
- return true;
-}
-v8::StartupData CreateSnapshotDataBlob(v8::SnapshotCreator* snapshot_creator,
- const char* script_source = nullptr) {
- // Create a new isolate and a new context from scratch, optionally run
- // a script to embed, and serialize to create a snapshot blob.
- v8::StartupData result = {nullptr, 0};
- v8::base::ElapsedTimer timer;
- timer.Start();
- {
- v8::Isolate* isolate = snapshot_creator->GetIsolate();
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- if (script_source != nullptr &&
- !RunExtraCode(isolate, context, script_source, "<embedded>")) {
- return result;
- }
- snapshot_creator->SetDefaultContext(context);
- }
- result = snapshot_creator->CreateBlob(
- v8::SnapshotCreator::FunctionCodeHandling::kClear);
- }
+ v8::StartupData result = i::CreateSnapshotDataBlobInternal(
+ v8::SnapshotCreator::FunctionCodeHandling::kClear, embedded_source,
+ isolate);
if (i::FLAG_profile_deserialization) {
i::PrintF("Creating snapshot took %0.3f ms\n",
timer.Elapsed().InMillisecondsF());
}
+
timer.Stop();
return result;
}
-v8::StartupData WarmUpSnapshotDataBlob(v8::SnapshotCreator* snapshot_creator,
+v8::StartupData WarmUpSnapshotDataBlob(v8::StartupData cold_snapshot_blob,
const char* warmup_source) {
- CHECK_NOT_NULL(warmup_source);
- // Use following steps to create a warmed up snapshot blob from a cold one:
- // - Create a new isolate from the cold snapshot.
- // - Create a new context to run the warmup script. This will trigger
- // compilation of executed functions.
- // - Create a new context. This context will be unpolluted.
- // - Serialize the isolate and the second context into a new snapshot blob.
- v8::StartupData result = {nullptr, 0};
v8::base::ElapsedTimer timer;
timer.Start();
- {
- v8::Isolate* isolate = snapshot_creator->GetIsolate();
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- if (!RunExtraCode(isolate, context, warmup_source, "<warm-up>")) {
- return result;
- }
- }
- {
- v8::HandleScope handle_scope(isolate);
- isolate->ContextDisposedNotification(false);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- snapshot_creator->SetDefaultContext(context);
- }
- result = snapshot_creator->CreateBlob(
- v8::SnapshotCreator::FunctionCodeHandling::kKeep);
- }
+
+ v8::StartupData result =
+ i::WarmUpSnapshotDataBlobInternal(cold_snapshot_blob, warmup_source);
if (i::FLAG_profile_deserialization) {
i::PrintF("Warming up snapshot took %0.3f ms\n",
timer.Elapsed().InMillisecondsF());
}
+
timer.Stop();
return result;
}
@@ -301,9 +239,9 @@ int main(int argc, char** argv) {
i::EmbeddedFileWriter embedded_writer;
embedded_writer.SetEmbeddedFile(i::FLAG_embedded_src);
- if (i::FLAG_embedded_variant != nullptr) {
- embedded_writer.SetEmbeddedVariant(i::FLAG_embedded_variant);
- }
+ embedded_writer.SetEmbeddedVariant(i::FLAG_embedded_variant);
+ embedded_writer.SetTargetArch(i::FLAG_target_arch);
+ embedded_writer.SetTargetOs(i::FLAG_target_os);
std::unique_ptr<char> embed_script(
GetExtraCode(argc >= 2 ? argv[1] : nullptr, "embedding"));
@@ -331,18 +269,18 @@ int main(int argc, char** argv) {
// to be written out if builtins are embedded.
i_isolate->RegisterEmbeddedFileWriter(&embedded_writer);
}
- v8::SnapshotCreator snapshot_creator(isolate);
+ blob = CreateSnapshotDataBlob(isolate, embed_script.get());
if (i::FLAG_embedded_builtins) {
+ // At this point, the Isolate has been torn down but the embedded blob
+ // is still alive (we called DisableEmbeddedBlobRefcounting above).
+ // That's fine as far as the embedded file writer is concerned.
WriteEmbeddedFile(&embedded_writer);
}
- blob = CreateSnapshotDataBlob(&snapshot_creator, embed_script.get());
}
if (warmup_script) {
- CHECK(blob.raw_size > 0 && blob.data != nullptr);
v8::StartupData cold = blob;
- v8::SnapshotCreator snapshot_creator(nullptr, &cold);
- blob = WarmUpSnapshotDataBlob(&snapshot_creator, warmup_script.get());
+ blob = WarmUpSnapshotDataBlob(cold, warmup_script.get());
delete[] cold.data;
}
diff --git a/deps/v8/src/snapshot/natives-common.cc b/deps/v8/src/snapshot/natives-common.cc
index 4cb7b5f0da..321b74b45c 100644
--- a/deps/v8/src/snapshot/natives-common.cc
+++ b/deps/v8/src/snapshot/natives-common.cc
@@ -5,7 +5,7 @@
// The common functionality when building with internal or external natives.
#include "src/heap/heap.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/snapshot/natives.h"
namespace v8 {
@@ -19,7 +19,7 @@ NativesExternalStringResource::NativesExternalStringResource(NativeType type,
CHECK_EQ(EXTRAS, type_);
DCHECK(index < ExtraNatives::GetBuiltinsCount());
source = ExtraNatives::GetScriptSource(index);
- data_ = source.start();
+ data_ = source.begin();
length_ = source.length();
}
diff --git a/deps/v8/src/snapshot/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index c9177bbc99..fe67f33087 100644
--- a/deps/v8/src/snapshot/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -6,7 +6,7 @@
#include "src/base/logging.h"
#include "src/snapshot/snapshot-source-sink.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
#ifndef V8_USE_EXTERNAL_STARTUP_DATA
#error natives-external.cc is used only for the external snapshot build.
@@ -44,7 +44,7 @@ class NativesStore {
for (int i = 0; i < static_cast<int>(native_ids_.size()); ++i) {
int native_id_length = native_ids_[i].length();
if ((static_cast<int>(strlen(id)) == native_id_length) &&
- (strncmp(id, native_ids_[i].start(), native_id_length) == 0)) {
+ (strncmp(id, native_ids_[i].begin(), native_id_length) == 0)) {
return i;
}
}
@@ -76,9 +76,9 @@ class NativesStore {
const char extension[] = ".js";
Vector<char> name(Vector<char>::New(id_length + sizeof(native) - 1 +
sizeof(extension) - 1));
- memcpy(name.start(), native, sizeof(native) - 1);
- memcpy(name.start() + sizeof(native) - 1, id, id_length);
- memcpy(name.start() + sizeof(native) - 1 + id_length, extension,
+ memcpy(name.begin(), native, sizeof(native) - 1);
+ memcpy(name.begin() + sizeof(native) - 1, id, id_length);
+ memcpy(name.begin() + sizeof(native) - 1 + id_length, extension,
sizeof(extension) - 1);
return Vector<const char>::cast(name);
}
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index 76b8bf1bde..f294d33b5c 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -6,8 +6,8 @@
#define V8_SNAPSHOT_NATIVES_H_
#include "include/v8.h"
-#include "src/objects.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
+#include "src/objects/objects.h"
namespace v8 { class StartupData; } // Forward declaration.
diff --git a/deps/v8/src/snapshot/object-deserializer.cc b/deps/v8/src/snapshot/object-deserializer.cc
index 44b7088380..63a0cfca17 100644
--- a/deps/v8/src/snapshot/object-deserializer.cc
+++ b/deps/v8/src/snapshot/object-deserializer.cc
@@ -4,11 +4,11 @@
#include "src/snapshot/object-deserializer.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/allocation-site-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/slots.h"
#include "src/snapshot/code-serializer.h"
@@ -61,8 +61,8 @@ void ObjectDeserializer::FlushICache() {
for (Code code : new_code_objects()) {
// Record all references to embedded objects in the new code object.
WriteBarrierForCode(code);
- FlushInstructionCache(code->raw_instruction_start(),
- code->raw_instruction_size());
+ FlushInstructionCache(code.raw_instruction_start(),
+ code.raw_instruction_size());
}
}
@@ -73,8 +73,6 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
for (Handle<String> string : new_internalized_strings()) {
DisallowHeapAllocation no_gc;
StringTableInsertionKey key(*string);
- DCHECK(
- StringTable::ForwardStringIfExists(isolate(), &key, *string).is_null());
StringTable::AddKeyNoResize(isolate(), &key);
}
@@ -98,14 +96,14 @@ void ObjectDeserializer::LinkAllocationSites() {
// Allocation sites are present in the snapshot, and must be linked into
// a list at deserialization time.
for (AllocationSite site : new_allocation_sites()) {
- if (!site->HasWeakNext()) continue;
+ if (!site.HasWeakNext()) continue;
// TODO(mvstanton): consider treating the heap()->allocation_sites_list()
// as a (weak) root. If this root is relocated correctly, this becomes
// unnecessary.
if (heap->allocation_sites_list() == Smi::kZero) {
- site->set_weak_next(ReadOnlyRoots(heap).undefined_value());
+ site.set_weak_next(ReadOnlyRoots(heap).undefined_value());
} else {
- site->set_weak_next(heap->allocation_sites_list());
+ site.set_weak_next(heap->allocation_sites_list());
}
heap->set_allocation_sites_list(site);
}
diff --git a/deps/v8/src/snapshot/partial-deserializer.cc b/deps/v8/src/snapshot/partial-deserializer.cc
index 4dd25980e8..9b56f129df 100644
--- a/deps/v8/src/snapshot/partial-deserializer.cc
+++ b/deps/v8/src/snapshot/partial-deserializer.cc
@@ -4,7 +4,7 @@
#include "src/snapshot/partial-deserializer.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/heap/heap-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/snapshot.h"
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 6c650be4c1..036f0a0414 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -5,10 +5,11 @@
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/startup-serializer.h"
-#include "src/api-inl.h"
-#include "src/math-random.h"
-#include "src/microtask-queue.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/execution/microtask-queue.h"
+#include "src/heap/combined-heap.h"
+#include "src/numbers/math-random.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
namespace v8 {
@@ -31,29 +32,28 @@ PartialSerializer::~PartialSerializer() {
void PartialSerializer::Serialize(Context* o, bool include_global_proxy) {
context_ = *o;
- DCHECK(context_->IsNativeContext());
+ DCHECK(context_.IsNativeContext());
reference_map()->AddAttachedReference(
- reinterpret_cast<void*>(context_->global_proxy()->ptr()));
+ reinterpret_cast<void*>(context_.global_proxy().ptr()));
// The bootstrap snapshot has a code-stub context. When serializing the
// partial snapshot, it is chained into the weak context list on the isolate
// and it's next context pointer may point to the code-stub context. Clear
// it before serializing, it will get re-added to the context list
// explicitly when it's loaded.
- context_->set(Context::NEXT_CONTEXT_LINK,
- ReadOnlyRoots(isolate()).undefined_value());
- DCHECK(!context_->global_object()->IsUndefined());
+ context_.set(Context::NEXT_CONTEXT_LINK,
+ ReadOnlyRoots(isolate()).undefined_value());
+ DCHECK(!context_.global_object().IsUndefined());
// Reset math random cache to get fresh random numbers.
MathRandom::ResetContext(context_);
#ifdef DEBUG
- MicrotaskQueue* microtask_queue =
- context_->native_context()->microtask_queue();
+ MicrotaskQueue* microtask_queue = context_.native_context().microtask_queue();
DCHECK_EQ(0, microtask_queue->size());
DCHECK(!microtask_queue->HasMicrotasksSuppressions());
DCHECK_EQ(0, microtask_queue->GetMicrotasksScopeDepth());
DCHECK(microtask_queue->DebugMicrotasksScopeDepthIsZero());
#endif
- context_->native_context()->set_microtask_queue(nullptr);
+ context_.native_context().set_microtask_queue(nullptr);
VisitRootPointer(Root::kPartialSnapshotCache, nullptr, FullObjectSlot(o));
SerializeDeferredObjects();
@@ -92,18 +92,18 @@ void PartialSerializer::SerializeObject(HeapObject obj) {
DCHECK(!startup_serializer_->ReferenceMapContains(obj));
// All the internalized strings that the partial snapshot needs should be
// either in the root table or in the partial snapshot cache.
- DCHECK(!obj->IsInternalizedString());
+ DCHECK(!obj.IsInternalizedString());
// Function and object templates are not context specific.
- DCHECK(!obj->IsTemplateInfo());
+ DCHECK(!obj.IsTemplateInfo());
// We should not end up at another native context.
- DCHECK_IMPLIES(obj != context_, !obj->IsNativeContext());
+ DCHECK_IMPLIES(obj != context_, !obj.IsNativeContext());
// Clear literal boilerplates and feedback.
- if (obj->IsFeedbackVector()) FeedbackVector::cast(obj)->ClearSlots(isolate());
+ if (obj.IsFeedbackVector()) FeedbackVector::cast(obj).ClearSlots(isolate());
// Clear InterruptBudget when serializing FeedbackCell.
- if (obj->IsFeedbackCell()) {
- FeedbackCell::cast(obj)->set_interrupt_budget(
+ if (obj.IsFeedbackCell()) {
+ FeedbackCell::cast(obj).set_interrupt_budget(
FeedbackCell::GetInitialInterruptBudget());
}
@@ -111,12 +111,12 @@ void PartialSerializer::SerializeObject(HeapObject obj) {
return;
}
- if (obj->IsJSFunction()) {
+ if (obj.IsJSFunction()) {
// Unconditionally reset the JSFunction to its SFI's code, since we can't
// serialize optimized code anyway.
JSFunction closure = JSFunction::cast(obj);
- closure->ResetIfBytecodeFlushed();
- if (closure->is_compiled()) closure->set_code(closure->shared()->GetCode());
+ closure.ResetIfBytecodeFlushed();
+ if (closure.is_compiled()) closure.set_code(closure.shared().GetCode());
}
CheckRehashability(obj);
@@ -131,12 +131,12 @@ bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject o) {
// allow them to be part of the partial snapshot because they contain a
// unique ID, and deserializing several partial snapshots containing script
// would cause dupes.
- DCHECK(!o->IsScript());
- return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
- o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
- o->IsTemplateInfo() || o->IsClassPositions() ||
- o->map() == ReadOnlyRoots(startup_serializer_->isolate())
- .fixed_cow_array_map();
+ DCHECK(!o.IsScript());
+ return o.IsName() || o.IsSharedFunctionInfo() || o.IsHeapNumber() ||
+ o.IsCode() || o.IsScopeInfo() || o.IsAccessorInfo() ||
+ o.IsTemplateInfo() || o.IsClassPositions() ||
+ o.map() == ReadOnlyRoots(startup_serializer_->isolate())
+ .fixed_cow_array_map();
}
namespace {
@@ -144,12 +144,12 @@ bool DataIsEmpty(const StartupData& data) { return data.raw_size == 0; }
} // anonymous namespace
bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
- if (!obj->IsJSObject()) return false;
+ if (!obj.IsJSObject()) return false;
JSObject js_obj = JSObject::cast(obj);
- int embedder_fields_count = js_obj->GetEmbedderFieldCount();
+ int embedder_fields_count = js_obj.GetEmbedderFieldCount();
if (embedder_fields_count == 0) return false;
CHECK_GT(embedder_fields_count, 0);
- DCHECK(!js_obj->NeedsRehashing());
+ DCHECK(!js_obj.NeedsRehashing());
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate());
@@ -170,14 +170,13 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
EmbedderDataSlot embedder_data_slot(js_obj, i);
original_embedder_values.emplace_back(embedder_data_slot.load_raw(no_gc));
Object object = embedder_data_slot.load_tagged();
- if (object->IsHeapObject()) {
- DCHECK(isolate()->heap()->Contains(HeapObject::cast(object)));
+ if (object.IsHeapObject()) {
+ DCHECK(IsValidHeapObject(isolate()->heap(), HeapObject::cast(object)));
serialized_data.push_back({nullptr, 0});
} else {
// If no serializer is provided and the field was empty, we serialize it
// by default to nullptr.
- if (serialize_embedder_fields_.callback == nullptr &&
- object->ptr() == 0) {
+ if (serialize_embedder_fields_.callback == nullptr && object.ptr() == 0) {
serialized_data.push_back({nullptr, 0});
} else {
DCHECK_NOT_NULL(serialize_embedder_fields_.callback);
@@ -205,7 +204,7 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
// 4) Obtain back reference for the serialized object.
SerializerReference reference =
- reference_map()->LookupReference(reinterpret_cast<void*>(js_obj->ptr()));
+ reference_map()->LookupReference(reinterpret_cast<void*>(js_obj.ptr()));
DCHECK(reference.is_back_reference());
// 5) Write data returned by the embedder callbacks into a separate sink,
@@ -236,8 +235,8 @@ bool PartialSerializer::SerializeJSObjectWithEmbedderFields(Object obj) {
void PartialSerializer::CheckRehashability(HeapObject obj) {
if (!can_be_rehashed_) return;
- if (!obj->NeedsRehashing()) return;
- if (obj->CanBeRehashed()) return;
+ if (!obj.NeedsRehashing()) return;
+ if (obj.CanBeRehashed()) return;
can_be_rehashed_ = false;
}
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index fcba9feed2..d8e9ee2496 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -5,9 +5,9 @@
#ifndef V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
#define V8_SNAPSHOT_PARTIAL_SERIALIZER_H_
-#include "src/address-map.h"
-#include "src/contexts.h"
+#include "src/objects/contexts.h"
#include "src/snapshot/serializer.h"
+#include "src/utils/address-map.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/read-only-deserializer.cc b/deps/v8/src/snapshot/read-only-deserializer.cc
index b1b22cc70b..576e644846 100644
--- a/deps/v8/src/snapshot/read-only-deserializer.cc
+++ b/deps/v8/src/snapshot/read-only-deserializer.cc
@@ -4,12 +4,12 @@
#include "src/snapshot/read-only-deserializer.h"
-#include "src/api.h"
+#include "src/api/api.h"
+#include "src/execution/v8threads.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
#include "src/heap/read-only-heap.h"
#include "src/objects/slots.h"
#include "src/snapshot/snapshot.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -21,12 +21,15 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
V8::FatalProcessOutOfMemory(isolate, "ReadOnlyDeserializer");
}
+ ReadOnlyHeap* ro_heap = isolate->heap()->read_only_heap();
+
// No active threads.
DCHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
// No active handles.
DCHECK(isolate->handle_scope_implementer()->blocks()->empty());
+ // Read-only object cache is not yet populated.
+ DCHECK(!ro_heap->read_only_object_cache_is_initialized());
// Partial snapshot cache is not yet populated.
- DCHECK(isolate->heap()->read_only_heap()->read_only_object_cache()->empty());
DCHECK(isolate->partial_snapshot_cache()->empty());
// Builtins are not yet created.
DCHECK(!isolate->builtins()->is_initialized());
@@ -36,22 +39,16 @@ void ReadOnlyDeserializer::DeserializeInto(Isolate* isolate) {
ReadOnlyRoots roots(isolate);
roots.Iterate(this);
- isolate->heap()
- ->read_only_heap()
- ->read_only_space()
- ->RepairFreeListsAfterDeserialization();
+ ro_heap->read_only_space()->RepairFreeListsAfterDeserialization();
// Deserialize the Read-only Object Cache.
- std::vector<Object>* cache =
- isolate->heap()->read_only_heap()->read_only_object_cache();
for (size_t i = 0;; ++i) {
- // Extend the array ready to get a value when deserializing.
- if (cache->size() <= i) cache->push_back(Smi::kZero);
+ Object* object = ro_heap->ExtendReadOnlyObjectCache();
// During deserialization, the visitor populates the read-only object
// cache and eventually terminates the cache with undefined.
VisitRootPointer(Root::kReadOnlyObjectCache, nullptr,
- FullObjectSlot(&cache->at(i)));
- if (cache->at(i)->IsUndefined(roots)) break;
+ FullObjectSlot(object));
+ if (object->IsUndefined(roots)) break;
}
DeserializeDeferredObjects();
}
diff --git a/deps/v8/src/snapshot/read-only-serializer.cc b/deps/v8/src/snapshot/read-only-serializer.cc
index a65ce4903e..f4b45a15cc 100644
--- a/deps/v8/src/snapshot/read-only-serializer.cc
+++ b/deps/v8/src/snapshot/read-only-serializer.cc
@@ -4,14 +4,14 @@
#include "src/snapshot/read-only-serializer.h"
-#include "src/api.h"
-#include "src/code-tracer.h"
-#include "src/global-handles.h"
+#include "src/api/api.h"
+#include "src/diagnostics/code-tracer.h"
+#include "src/execution/v8threads.h"
+#include "src/handles/global-handles.h"
#include "src/heap/read-only-heap.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/startup-serializer.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -27,7 +27,7 @@ ReadOnlySerializer::~ReadOnlySerializer() {
void ReadOnlySerializer::SerializeObject(HeapObject obj) {
CHECK(ReadOnlyHeap::Contains(obj));
- CHECK_IMPLIES(obj->IsString(), obj->IsInternalizedString());
+ CHECK_IMPLIES(obj.IsString(), obj.IsInternalizedString());
if (SerializeHotObject(obj)) return;
if (IsRootAndHasBeenSerialized(obj) && SerializeRoot(obj)) {
@@ -40,6 +40,9 @@ void ReadOnlySerializer::SerializeObject(HeapObject obj) {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this, obj, &sink_);
object_serializer.Serialize();
+#ifdef DEBUG
+ serialized_objects_.insert(obj);
+#endif
}
void ReadOnlySerializer::SerializeReadOnlyRoots() {
@@ -60,6 +63,16 @@ void ReadOnlySerializer::FinalizeSerialization() {
FullObjectSlot(&undefined));
SerializeDeferredObjects();
Pad();
+
+#ifdef DEBUG
+ // Check that every object on read-only heap is reachable (and was
+ // serialized).
+ ReadOnlyHeapIterator iterator(isolate()->heap()->read_only_heap());
+ for (HeapObject object = iterator.Next(); !object.is_null();
+ object = iterator.Next()) {
+ CHECK(serialized_objects_.count(object));
+ }
+#endif
}
bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
@@ -75,7 +88,7 @@ bool ReadOnlySerializer::MustBeDeferred(HeapObject object) {
// not be fulfilled during deserialization until few first root objects are
// serialized. But we must serialize Map objects since deserializer checks
// that these root objects are indeed Maps.
- return !object->IsMap();
+ return !object.IsMap();
}
bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
diff --git a/deps/v8/src/snapshot/read-only-serializer.h b/deps/v8/src/snapshot/read-only-serializer.h
index 753432502e..c73c397647 100644
--- a/deps/v8/src/snapshot/read-only-serializer.h
+++ b/deps/v8/src/snapshot/read-only-serializer.h
@@ -5,6 +5,8 @@
#ifndef V8_SNAPSHOT_READ_ONLY_SERIALIZER_H_
#define V8_SNAPSHOT_READ_ONLY_SERIALIZER_H_
+#include <unordered_set>
+
#include "src/snapshot/roots-serializer.h"
namespace v8 {
@@ -35,6 +37,9 @@ class V8_EXPORT_PRIVATE ReadOnlySerializer : public RootsSerializer {
void SerializeObject(HeapObject o) override;
bool MustBeDeferred(HeapObject object) override;
+#ifdef DEBUG
+ std::unordered_set<HeapObject, Object::Hasher> serialized_objects_;
+#endif
DISALLOW_COPY_AND_ASSIGN(ReadOnlySerializer);
};
diff --git a/deps/v8/src/snapshot/references.h b/deps/v8/src/snapshot/references.h
index ff3196115c..e7c44236ac 100644
--- a/deps/v8/src/snapshot/references.h
+++ b/deps/v8/src/snapshot/references.h
@@ -5,9 +5,9 @@
#ifndef V8_SNAPSHOT_REFERENCES_H_
#define V8_SNAPSHOT_REFERENCES_H_
-#include "src/assert-scope.h"
#include "src/base/hashmap.h"
-#include "src/utils.h"
+#include "src/common/assert-scope.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/roots-serializer.cc b/deps/v8/src/snapshot/roots-serializer.cc
index e634c45eff..f354dec158 100644
--- a/deps/v8/src/snapshot/roots-serializer.cc
+++ b/deps/v8/src/snapshot/roots-serializer.cc
@@ -4,9 +4,9 @@
#include "src/snapshot/roots-serializer.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
namespace v8 {
@@ -58,8 +58,8 @@ void RootsSerializer::VisitRootPointers(Root root, const char* description,
void RootsSerializer::CheckRehashability(HeapObject obj) {
if (!can_be_rehashed_) return;
- if (!obj->NeedsRehashing()) return;
- if (obj->CanBeRehashed()) return;
+ if (!obj.NeedsRehashing()) return;
+ if (obj.CanBeRehashed()) return;
can_be_rehashed_ = false;
}
diff --git a/deps/v8/src/snapshot/roots-serializer.h b/deps/v8/src/snapshot/roots-serializer.h
index 50c63402d2..cfb59dd75e 100644
--- a/deps/v8/src/snapshot/roots-serializer.h
+++ b/deps/v8/src/snapshot/roots-serializer.h
@@ -7,8 +7,8 @@
#include <bitset>
+#include "src/objects/visitors.h"
#include "src/snapshot/serializer.h"
-#include "src/visitors.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/serializer-allocator.cc b/deps/v8/src/snapshot/serializer-allocator.cc
index d596678789..763244137f 100644
--- a/deps/v8/src/snapshot/serializer-allocator.cc
+++ b/deps/v8/src/snapshot/serializer-allocator.cc
@@ -144,7 +144,7 @@ void SerializerAllocator::OutputStatistics() {
for (int space = FIRST_SPACE; space < kNumberOfPreallocatedSpaces; space++) {
size_t s = pending_chunk_[space];
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
- PrintF("%16" PRIuS, s);
+ PrintF("%16zu", s);
}
STATIC_ASSERT(MAP_SPACE == kNumberOfPreallocatedSpaces);
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index fa8d19e438..2869c2bf24 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -4,9 +4,9 @@
#include "src/snapshot/serializer-common.h"
-#include "src/external-reference-table.h"
-#include "src/objects-inl.h"
+#include "src/codegen/external-reference-table.h"
#include "src/objects/foreign-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
namespace v8 {
@@ -120,28 +120,28 @@ void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
// and eventually terminates the cache with undefined.
visitor->VisitRootPointer(Root::kPartialSnapshotCache, nullptr,
FullObjectSlot(&cache->at(i)));
- if (cache->at(i)->IsUndefined(isolate)) break;
+ if (cache->at(i).IsUndefined(isolate)) break;
}
}
bool SerializerDeserializer::CanBeDeferred(HeapObject o) {
- return !o->IsString() && !o->IsScript() && !o->IsJSTypedArray();
+ return !o.IsString() && !o.IsScript() && !o.IsJSTypedArray();
}
void SerializerDeserializer::RestoreExternalReferenceRedirectors(
const std::vector<AccessorInfo>& accessor_infos) {
// Restore wiped accessor infos.
for (AccessorInfo info : accessor_infos) {
- Foreign::cast(info->js_getter())
- ->set_foreign_address(info->redirected_getter());
+ Foreign::cast(info.js_getter())
+ .set_foreign_address(info.redirected_getter());
}
}
void SerializerDeserializer::RestoreExternalReferenceRedirectors(
const std::vector<CallHandlerInfo>& call_handler_infos) {
for (CallHandlerInfo info : call_handler_infos) {
- Foreign::cast(info->js_callback())
- ->set_foreign_address(info->redirected_callback());
+ Foreign::cast(info.js_callback())
+ .set_foreign_address(info.redirected_callback());
}
}
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index a373683886..30da8db662 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -5,14 +5,14 @@
#ifndef V8_SNAPSHOT_SERIALIZER_COMMON_H_
#define V8_SNAPSHOT_SERIALIZER_COMMON_H_
-#include "src/address-map.h"
#include "src/base/bits.h"
-#include "src/external-reference-table.h"
-#include "src/globals.h"
-#include "src/msan.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/common/globals.h"
+#include "src/common/v8memory.h"
+#include "src/objects/visitors.h"
+#include "src/sanitizer/msan.h"
#include "src/snapshot/references.h"
-#include "src/v8memory.h"
-#include "src/visitors.h"
+#include "src/utils/address-map.h"
namespace v8 {
namespace internal {
@@ -370,13 +370,19 @@ class Checksum {
#ifdef MEMORY_SANITIZER
// Computing the checksum includes padding bytes for objects like strings.
// Mark every object as initialized in the code serializer.
- MSAN_MEMORY_IS_INITIALIZED(payload.start(), payload.length());
+ MSAN_MEMORY_IS_INITIALIZED(payload.begin(), payload.length());
#endif // MEMORY_SANITIZER
// Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
uintptr_t a = 1;
uintptr_t b = 0;
- const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
+ // TODO(jgruber, v8:9171): The following DCHECK should ideally hold since we
+ // access payload through an uintptr_t pointer later on; and some
+ // architectures, e.g. arm, may generate instructions that expect correct
+ // alignment. However, we do not control alignment for external snapshots.
+ // DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload.begin()),
+ // kIntptrSize));
DCHECK(IsAligned(payload.length(), kIntptrSize));
+ const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.begin());
const uintptr_t* end = cur + payload.length() / kIntptrSize;
while (cur < end) {
// Unsigned overflow expected and intended.
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 447a69aea8..9eefbe2c48 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -4,7 +4,7 @@
#include "src/snapshot/serializer.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h" // For Space::identity().
#include "src/heap/read-only-heap.h"
#include "src/interpreter/interpreter.h"
@@ -58,7 +58,7 @@ Serializer::~Serializer() {
#ifdef OBJECT_PRINT
void Serializer::CountInstanceType(Map map, int size, AllocationSpace space) {
- int instance_type = map->instance_type();
+ int instance_type = map.instance_type();
instance_type_count_[space][instance_type]++;
instance_type_size_[space][instance_type] += size;
}
@@ -75,8 +75,7 @@ void Serializer::OutputStatistics(const char* name) {
#define PRINT_INSTANCE_TYPE(Name) \
for (int space = 0; space < LAST_SPACE; ++space) { \
if (instance_type_count_[space][Name]) { \
- PrintF("%10d %10" PRIuS " %-10s %s\n", \
- instance_type_count_[space][Name], \
+ PrintF("%10d %10zu %-10s %s\n", instance_type_count_[space][Name], \
instance_type_size_[space][Name], \
Heap::GetSpaceName(static_cast<AllocationSpace>(space)), #Name); \
} \
@@ -108,7 +107,7 @@ void Serializer::VisitRootPointers(Root root, const char* description,
}
void Serializer::SerializeRootObject(Object object) {
- if (object->IsSmi()) {
+ if (object.IsSmi()) {
PutSmi(Smi::cast(object));
} else {
SerializeObject(HeapObject::cast(object));
@@ -116,12 +115,10 @@ void Serializer::SerializeRootObject(Object object) {
}
#ifdef DEBUG
-void Serializer::PrintStack() { PrintStack(std::cout); }
-
-void Serializer::PrintStack(std::ostream& out) {
+void Serializer::PrintStack() {
for (const auto o : stack_) {
- o.Print(out);
- out << "\n";
+ o.Print();
+ PrintF("\n");
}
}
#endif // DEBUG
@@ -144,10 +141,9 @@ bool Serializer::SerializeHotObject(HeapObject obj) {
DCHECK(index >= 0 && index < kNumberOfHotObjects);
if (FLAG_trace_serializer) {
PrintF(" Encoding hot object %d:", index);
- obj->ShortPrint();
+ obj.ShortPrint();
PrintF("\n");
}
- // TODO(ishell): remove kHotObjectWithSkip
sink_.Put(kHotObject + index, "HotObject");
return true;
}
@@ -170,7 +166,7 @@ bool Serializer::SerializeBackReference(HeapObject obj) {
DCHECK(reference.is_back_reference());
if (FLAG_trace_serializer) {
PrintF(" Encoding back reference to: ");
- obj->ShortPrint();
+ obj.ShortPrint();
PrintF("\n");
}
@@ -183,15 +179,15 @@ bool Serializer::SerializeBackReference(HeapObject obj) {
}
bool Serializer::ObjectIsBytecodeHandler(HeapObject obj) const {
- if (!obj->IsCode()) return false;
- return (Code::cast(obj)->kind() == Code::BYTECODE_HANDLER);
+ if (!obj.IsCode()) return false;
+ return (Code::cast(obj).kind() == Code::BYTECODE_HANDLER);
}
void Serializer::PutRoot(RootIndex root, HeapObject object) {
int root_index = static_cast<int>(root);
if (FLAG_trace_serializer) {
PrintF(" Encoding root %d:", root_index);
- object->ShortPrint();
+ object.ShortPrint();
PrintF("\n");
}
@@ -247,7 +243,7 @@ void Serializer::PutAttachedReference(SerializerReference reference) {
}
int Serializer::PutAlignmentPrefix(HeapObject object) {
- AllocationAlignment alignment = HeapObject::RequiredAlignment(object->map());
+ AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
if (alignment != kWordAligned) {
DCHECK(1 <= alignment && alignment <= 3);
byte prefix = (kAlignmentPrefix - 1) + alignment;
@@ -290,10 +286,10 @@ void Serializer::InitializeCodeAddressMap() {
Code Serializer::CopyCode(Code code) {
code_buffer_.clear(); // Clear buffer without deleting backing store.
- int size = code->CodeSize();
+ int size = code.CodeSize();
code_buffer_.insert(code_buffer_.end(),
- reinterpret_cast<byte*>(code->address()),
- reinterpret_cast<byte*>(code->address() + size));
+ reinterpret_cast<byte*>(code.address()),
+ reinterpret_cast<byte*>(code.address() + size));
// When pointer compression is enabled the checked cast will try to
// decompress map field of off-heap Code object.
return Code::unchecked_cast(HeapObject::FromAddress(
@@ -304,16 +300,16 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
int size, Map map) {
if (serializer_->code_address_map_) {
const char* code_name =
- serializer_->code_address_map_->Lookup(object_->address());
+ serializer_->code_address_map_->Lookup(object_.address());
LOG(serializer_->isolate_,
- CodeNameEvent(object_->address(), sink_->Position(), code_name));
+ CodeNameEvent(object_.address(), sink_->Position(), code_name));
}
SerializerReference back_reference;
if (space == LO_SPACE) {
sink_->Put(kNewObject + space, "NewLargeObject");
sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
- CHECK(!object_->IsCode());
+ CHECK(!object_.IsCode());
back_reference = serializer_->allocator()->AllocateLargeObject(size);
} else if (space == MAP_SPACE) {
DCHECK_EQ(Map::kSize, size);
@@ -363,56 +359,48 @@ int32_t Serializer::ObjectSerializer::SerializeBackingStore(
void Serializer::ObjectSerializer::SerializeJSTypedArray() {
JSTypedArray typed_array = JSTypedArray::cast(object_);
- FixedTypedArrayBase elements =
- FixedTypedArrayBase::cast(typed_array->elements());
-
- if (!typed_array->WasDetached()) {
- if (!typed_array->is_on_heap()) {
+ if (!typed_array.WasDetached()) {
+ if (!typed_array.is_on_heap()) {
// Explicitly serialize the backing store now.
- JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array->buffer());
- CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
- CHECK_LE(typed_array->byte_offset(), Smi::kMaxValue);
- int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
- int32_t byte_offset = static_cast<int32_t>(typed_array->byte_offset());
+ JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array.buffer());
+ CHECK_LE(buffer.byte_length(), Smi::kMaxValue);
+ CHECK_LE(typed_array.byte_offset(), Smi::kMaxValue);
+ int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
+ int32_t byte_offset = static_cast<int32_t>(typed_array.byte_offset());
// We need to calculate the backing store from the external pointer
// because the ArrayBuffer may already have been serialized.
void* backing_store = reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(elements->external_pointer()) -
+ reinterpret_cast<intptr_t>(typed_array.external_pointer()) -
byte_offset);
int32_t ref = SerializeBackingStore(backing_store, byte_length);
// The external_pointer is the backing_store + typed_array->byte_offset.
// To properly share the buffer, we set the backing store ref here. On
// deserialization we re-add the byte_offset to external_pointer.
- elements->set_external_pointer(
+ typed_array.set_external_pointer(
reinterpret_cast<void*>(Smi::FromInt(ref).ptr()));
}
} else {
- // When a JSArrayBuffer is detached, the FixedTypedArray that points to the
- // same backing store does not know anything about it. This fixup step finds
- // detached TypedArrays and clears the values in the FixedTypedArray so that
- // we don't try to serialize the now invalid backing store.
- elements->set_external_pointer(reinterpret_cast<void*>(Smi::kZero.ptr()));
- elements->set_length(0);
+ typed_array.set_external_pointer(nullptr);
}
SerializeObject();
}
void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
JSArrayBuffer buffer = JSArrayBuffer::cast(object_);
- void* backing_store = buffer->backing_store();
+ void* backing_store = buffer.backing_store();
// We cannot store byte_length larger than Smi range in the snapshot.
- CHECK_LE(buffer->byte_length(), Smi::kMaxValue);
- int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
+ CHECK_LE(buffer.byte_length(), Smi::kMaxValue);
+ int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
// The embedder-allocated backing store only exists for the off-heap case.
if (backing_store != nullptr) {
int32_t ref = SerializeBackingStore(backing_store, byte_length);
- buffer->set_backing_store(reinterpret_cast<void*>(Smi::FromInt(ref).ptr()));
+ buffer.set_backing_store(reinterpret_cast<void*>(Smi::FromInt(ref).ptr()));
}
SerializeObject();
- buffer->set_backing_store(backing_store);
+ buffer.set_backing_store(backing_store);
}
void Serializer::ObjectSerializer::SerializeExternalString() {
@@ -422,30 +410,30 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
// for native native source code strings, we replace the resource field
// with the native source id.
// For the rest we serialize them to look like ordinary sequential strings.
- if (object_->map() != ReadOnlyRoots(heap).native_source_string_map()) {
+ if (object_.map() != ReadOnlyRoots(heap).native_source_string_map()) {
ExternalString string = ExternalString::cast(object_);
- Address resource = string->resource_as_address();
+ Address resource = string.resource_as_address();
ExternalReferenceEncoder::Value reference;
if (serializer_->external_reference_encoder_.TryEncode(resource).To(
&reference)) {
DCHECK(reference.is_from_api());
- string->set_uint32_as_resource(reference.index());
+ string.set_uint32_as_resource(reference.index());
SerializeObject();
- string->set_address_as_resource(resource);
+ string.set_address_as_resource(resource);
} else {
SerializeExternalStringAsSequentialString();
}
} else {
ExternalOneByteString string = ExternalOneByteString::cast(object_);
- DCHECK(string->is_uncached());
+ DCHECK(string.is_uncached());
const NativesExternalStringResource* resource =
reinterpret_cast<const NativesExternalStringResource*>(
- string->resource());
+ string.resource());
// Replace the resource field with the type and index of the native source.
- string->set_resource(resource->EncodeForSerialization());
+ string.set_resource(resource->EncodeForSerialization());
SerializeObject();
// Restore the resource field.
- string->set_resource(resource);
+ string.set_resource(resource);
}
}
@@ -453,29 +441,29 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
// Instead of serializing this as an external string, we serialize
// an imaginary sequential string with the same content.
ReadOnlyRoots roots(serializer_->isolate());
- DCHECK(object_->IsExternalString());
- DCHECK(object_->map() != roots.native_source_string_map());
+ DCHECK(object_.IsExternalString());
+ DCHECK(object_.map() != roots.native_source_string_map());
ExternalString string = ExternalString::cast(object_);
- int length = string->length();
+ int length = string.length();
Map map;
int content_size;
int allocation_size;
const byte* resource;
// Find the map and size for the imaginary sequential string.
- bool internalized = object_->IsInternalizedString();
- if (object_->IsExternalOneByteString()) {
+ bool internalized = object_.IsInternalizedString();
+ if (object_.IsExternalOneByteString()) {
map = internalized ? roots.one_byte_internalized_string_map()
: roots.one_byte_string_map();
allocation_size = SeqOneByteString::SizeFor(length);
content_size = length * kCharSize;
resource = reinterpret_cast<const byte*>(
- ExternalOneByteString::cast(string)->resource()->data());
+ ExternalOneByteString::cast(string).resource()->data());
} else {
map = internalized ? roots.internalized_string_map() : roots.string_map();
allocation_size = SeqTwoByteString::SizeFor(length);
content_size = length * kShortSize;
resource = reinterpret_cast<const byte*>(
- ExternalTwoByteString::cast(string)->resource()->data());
+ ExternalTwoByteString::cast(string).resource()->data());
}
AllocationSpace space =
@@ -491,7 +479,7 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
sink_->PutInt(bytes_to_output, "length");
// Serialize string header (except for map).
- uint8_t* string_start = reinterpret_cast<uint8_t*>(string->address());
+ uint8_t* string_start = reinterpret_cast<uint8_t*>(string.address());
for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
sink_->PutSection(string_start[i], "StringHeader");
}
@@ -511,19 +499,19 @@ void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
class UnlinkWeakNextScope {
public:
explicit UnlinkWeakNextScope(Heap* heap, HeapObject object) {
- if (object->IsAllocationSite() &&
- AllocationSite::cast(object)->HasWeakNext()) {
+ if (object.IsAllocationSite() &&
+ AllocationSite::cast(object).HasWeakNext()) {
object_ = object;
- next_ = AllocationSite::cast(object)->weak_next();
- AllocationSite::cast(object)->set_weak_next(
+ next_ = AllocationSite::cast(object).weak_next();
+ AllocationSite::cast(object).set_weak_next(
ReadOnlyRoots(heap).undefined_value());
}
}
~UnlinkWeakNextScope() {
if (!object_.is_null()) {
- AllocationSite::cast(object_)->set_weak_next(next_,
- UPDATE_WEAK_WRITE_BARRIER);
+ AllocationSite::cast(object_).set_weak_next(next_,
+ UPDATE_WEAK_WRITE_BARRIER);
}
}
@@ -536,48 +524,48 @@ class UnlinkWeakNextScope {
void Serializer::ObjectSerializer::Serialize() {
if (FLAG_trace_serializer) {
PrintF(" Encoding heap object: ");
- object_->ShortPrint();
+ object_.ShortPrint();
PrintF("\n");
}
- if (object_->IsExternalString()) {
+ if (object_.IsExternalString()) {
SerializeExternalString();
return;
} else if (!ReadOnlyHeap::Contains(object_)) {
// Only clear padding for strings outside RO_SPACE. RO_SPACE should have
// been cleared elsewhere.
- if (object_->IsSeqOneByteString()) {
+ if (object_.IsSeqOneByteString()) {
// Clear padding bytes at the end. Done here to avoid having to do this
// at allocation sites in generated code.
- SeqOneByteString::cast(object_)->clear_padding();
- } else if (object_->IsSeqTwoByteString()) {
- SeqTwoByteString::cast(object_)->clear_padding();
+ SeqOneByteString::cast(object_).clear_padding();
+ } else if (object_.IsSeqTwoByteString()) {
+ SeqTwoByteString::cast(object_).clear_padding();
}
}
- if (object_->IsJSTypedArray()) {
+ if (object_.IsJSTypedArray()) {
SerializeJSTypedArray();
return;
}
- if (object_->IsJSArrayBuffer()) {
+ if (object_.IsJSArrayBuffer()) {
SerializeJSArrayBuffer();
return;
}
// We don't expect fillers.
- DCHECK(!object_->IsFiller());
+ DCHECK(!object_.IsFiller());
- if (object_->IsScript()) {
+ if (object_.IsScript()) {
// Clear cached line ends.
Object undefined = ReadOnlyRoots(serializer_->isolate()).undefined_value();
- Script::cast(object_)->set_line_ends(undefined);
+ Script::cast(object_).set_line_ends(undefined);
}
SerializeObject();
}
void Serializer::ObjectSerializer::SerializeObject() {
- int size = object_->Size();
- Map map = object_->map();
+ int size = object_.Size();
+ Map map = object_.map();
AllocationSpace space =
MemoryChunk::FromHeapObject(object_)->owner()->identity();
// Young generation large objects are tenured.
@@ -606,12 +594,12 @@ void Serializer::ObjectSerializer::SerializeObject() {
void Serializer::ObjectSerializer::SerializeDeferred() {
if (FLAG_trace_serializer) {
PrintF(" Encoding deferred heap object: ");
- object_->ShortPrint();
+ object_.ShortPrint();
PrintF("\n");
}
- int size = object_->Size();
- Map map = object_->map();
+ int size = object_.Size();
+ Map map = object_.map();
SerializerReference back_reference =
serializer_->reference_map()->LookupReference(
reinterpret_cast<void*>(object_.ptr()));
@@ -631,16 +619,16 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
UnlinkWeakNextScope unlink_weak_next(serializer_->isolate()->heap(), object_);
- if (object_->IsCode()) {
+ if (object_.IsCode()) {
// For code objects, output raw bytes first.
OutputCode(size);
// Then iterate references via reloc info.
- object_->IterateBody(map, size, this);
+ object_.IterateBody(map, size, this);
} else {
// For other objects, iterate references first.
- object_->IterateBody(map, size, this);
+ object_.IterateBody(map, size, this);
// Then output data payload, if any.
- OutputRawData(object_->address() + size);
+ OutputRawData(object_.address() + size);
}
}
@@ -714,7 +702,7 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
void Serializer::ObjectSerializer::VisitExternalReference(Foreign host,
Address* p) {
auto encoded_reference =
- serializer_->EncodeExternalReference(host->foreign_address());
+ serializer_->EncodeExternalReference(host.foreign_address());
if (encoded_reference.is_from_api()) {
sink_->Put(kApiReference, "ApiRef");
} else {
@@ -741,10 +729,10 @@ void Serializer::ObjectSerializer::VisitExternalReference(Code host,
void Serializer::ObjectSerializer::VisitInternalReference(Code host,
RelocInfo* rinfo) {
- Address entry = Code::cast(object_)->entry();
+ Address entry = Code::cast(object_).entry();
DCHECK_GE(rinfo->target_internal_reference(), entry);
uintptr_t target_offset = rinfo->target_internal_reference() - entry;
- DCHECK_LE(target_offset, Code::cast(object_)->raw_instruction_size());
+ DCHECK_LE(target_offset, Code::cast(object_).raw_instruction_size());
sink_->Put(kInternalReference, "InternalRef");
sink_->PutInt(target_offset, "internal ref value");
}
@@ -767,7 +755,7 @@ void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
CHECK(Builtins::IsIsolateIndependentBuiltin(target));
sink_->Put(kOffHeapTarget, "OffHeapTarget");
- sink_->PutInt(target->builtin_index(), "builtin index");
+ sink_->PutInt(target.builtin_index(), "builtin index");
bytes_processed_so_far_ += rinfo->target_address_size();
}
@@ -807,7 +795,7 @@ void OutputRawWithCustomField(SnapshotByteSink* sink, Address object_start,
} // anonymous namespace
void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
- Address object_start = object_->address();
+ Address object_start = object_.address();
int base = bytes_processed_so_far_;
int up_to_offset = static_cast<int>(up_to - object_start);
int to_skip = up_to_offset - bytes_processed_so_far_;
@@ -829,13 +817,13 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
__msan_check_mem_is_initialized(
reinterpret_cast<void*>(object_start + base), bytes_to_output);
#endif // MEMORY_SANITIZER
- if (object_->IsBytecodeArray()) {
+ if (object_.IsBytecodeArray()) {
// The bytecode age field can be changed by GC concurrently.
byte field_value = BytecodeArray::kNoAgeBytecodeAge;
OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
BytecodeArray::kBytecodeAgeOffset,
sizeof(field_value), &field_value);
- } else if (object_->IsDescriptorArray()) {
+ } else if (object_.IsDescriptorArray()) {
// The number of marked descriptors field can be changed by GC
// concurrently.
byte field_value[2];
@@ -859,7 +847,8 @@ void Serializer::ObjectSerializer::OutputCode(int size) {
// and wipe all pointers in the copy, which we then serialize.
Code off_heap_code = serializer_->CopyCode(on_heap_code);
int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
@@ -868,7 +857,7 @@ void Serializer::ObjectSerializer::OutputCode(int size) {
// With enabled pointer compression normal accessors no longer work for
// off-heap objects, so we have to get the relocation info data via the
// on-heap code object.
- ByteArray relocation_info = on_heap_code->unchecked_relocation_info();
+ ByteArray relocation_info = on_heap_code.unchecked_relocation_info();
for (RelocIterator it(off_heap_code, relocation_info, mode_mask); !it.done();
it.next()) {
RelocInfo* rinfo = it.rinfo();
@@ -876,9 +865,9 @@ void Serializer::ObjectSerializer::OutputCode(int size) {
}
// We need to wipe out the header fields *after* wiping out the
// relocations, because some of these fields are needed for the latter.
- off_heap_code->WipeOutHeader();
+ off_heap_code.WipeOutHeader();
- Address start = off_heap_code->address() + Code::kDataStart;
+ Address start = off_heap_code.address() + Code::kDataStart;
int bytes_to_output = size - Code::kDataStart;
DCHECK(IsAligned(bytes_to_output, kTaggedSize));
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 49ffddbefb..c9e7fada80 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -7,10 +7,10 @@
#include <map>
-#include "src/isolate.h"
-#include "src/log.h"
-#include "src/objects.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/execution/isolate.h"
+#include "src/logging/log.h"
+#include "src/objects/objects.h"
+#include "src/snapshot/embedded/embedded-data.h"
#include "src/snapshot/serializer-allocator.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot-source-sink.h"
@@ -29,7 +29,7 @@ class CodeAddressMap : public CodeEventLogger {
}
void CodeMoveEvent(AbstractCode from, AbstractCode to) override {
- address_to_name_map_.Move(from->address(), to->address());
+ address_to_name_map_.Move(from.address(), to.address());
}
void CodeDisableOptEvent(AbstractCode code,
@@ -116,7 +116,7 @@ class CodeAddressMap : public CodeEventLogger {
void LogRecordedBuffer(AbstractCode code, SharedFunctionInfo,
const char* name, int length) override {
- address_to_name_map_.Insert(code->address(), name, length);
+ address_to_name_map_.Insert(code.address(), name, length);
}
void LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
@@ -167,7 +167,7 @@ class Serializer : public SerializerDeserializer {
bool ReferenceMapContains(HeapObject o) {
return reference_map()
- ->LookupReference(reinterpret_cast<void*>(o->ptr()))
+ ->LookupReference(reinterpret_cast<void*>(o.ptr()))
.is_valid();
}
@@ -235,7 +235,7 @@ class Serializer : public SerializerDeserializer {
Code CopyCode(Code code);
void QueueDeferredObject(HeapObject obj) {
- DCHECK(reference_map_.LookupReference(reinterpret_cast<void*>(obj->ptr()))
+ DCHECK(reference_map_.LookupReference(reinterpret_cast<void*>(obj.ptr()))
.is_back_reference());
deferred_objects_.push_back(obj);
}
@@ -250,7 +250,6 @@ class Serializer : public SerializerDeserializer {
void PushStack(HeapObject o) { stack_.push_back(o); }
void PopStack() { stack_.pop_back(); }
void PrintStack();
- void PrintStack(std::ostream&);
#endif // DEBUG
SerializerReferenceMap* reference_map() { return &reference_map_; }
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 271317836c..f489999f88 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -7,11 +7,12 @@
#include "src/snapshot/snapshot.h"
#include "src/base/platform/platform.h"
-#include "src/counters.h"
+#include "src/logging/counters.h"
#include "src/snapshot/partial-deserializer.h"
#include "src/snapshot/read-only-deserializer.h"
#include "src/snapshot/startup-deserializer.h"
-#include "src/version.h"
+#include "src/utils/memcopy.h"
+#include "src/utils/version.h"
namespace v8 {
namespace internal {
@@ -151,7 +152,7 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
uint32_t payload_length =
static_cast<uint32_t>(startup_snapshot->RawData().length());
CopyBytes(data + payload_offset,
- reinterpret_cast<const char*>(startup_snapshot->RawData().start()),
+ reinterpret_cast<const char*>(startup_snapshot->RawData().begin()),
payload_length);
if (FLAG_profile_deserialization) {
PrintF("Snapshot blob consists of:\n%10d bytes in %d chunks for startup\n",
@@ -165,7 +166,7 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
payload_length = read_only_snapshot->RawData().length();
CopyBytes(
data + payload_offset,
- reinterpret_cast<const char*>(read_only_snapshot->RawData().start()),
+ reinterpret_cast<const char*>(read_only_snapshot->RawData().begin()),
payload_length);
if (FLAG_profile_deserialization) {
PrintF("%10d bytes for read-only\n", payload_length);
@@ -179,7 +180,7 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
payload_length = context_snapshot->RawData().length();
CopyBytes(
data + payload_offset,
- reinterpret_cast<const char*>(context_snapshot->RawData().start()),
+ reinterpret_cast<const char*>(context_snapshot->RawData().begin()),
payload_length);
if (FLAG_profile_deserialization) {
PrintF("%10d bytes in %d chunks for context #%d\n", payload_length,
@@ -379,29 +380,55 @@ bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
} // namespace
-// TODO(jgruber): Merge with related code in mksnapshot.cc and
-// inspector-test.cc.
v8::StartupData CreateSnapshotDataBlobInternal(
v8::SnapshotCreator::FunctionCodeHandling function_code_handling,
- const char* embedded_source) {
- // Create a new isolate and a new context from scratch, optionally run
- // a script to embed, and serialize to create a snapshot blob.
- v8::StartupData result = {nullptr, 0};
+ const char* embedded_source, v8::Isolate* isolate) {
+ // If no isolate is passed in, create it (and a new context) from scratch.
+ if (isolate == nullptr) isolate = v8::Isolate::Allocate();
+
+ // Optionally run a script to embed, and serialize to create a snapshot blob.
+ v8::SnapshotCreator snapshot_creator(isolate);
{
- v8::SnapshotCreator snapshot_creator;
- v8::Isolate* isolate = snapshot_creator.GetIsolate();
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- if (embedded_source != nullptr &&
- !RunExtraCode(isolate, context, embedded_source, "<embedded>")) {
- return result;
- }
- snapshot_creator.SetDefaultContext(context);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ if (embedded_source != nullptr &&
+ !RunExtraCode(isolate, context, embedded_source, "<embedded>")) {
+ return {};
}
- result = snapshot_creator.CreateBlob(function_code_handling);
+ snapshot_creator.SetDefaultContext(context);
}
- return result;
+ return snapshot_creator.CreateBlob(function_code_handling);
+}
+
+v8::StartupData WarmUpSnapshotDataBlobInternal(
+ v8::StartupData cold_snapshot_blob, const char* warmup_source) {
+ CHECK(cold_snapshot_blob.raw_size > 0 && cold_snapshot_blob.data != nullptr);
+ CHECK_NOT_NULL(warmup_source);
+
+ // Use following steps to create a warmed up snapshot blob from a cold one:
+ // - Create a new isolate from the cold snapshot.
+ // - Create a new context to run the warmup script. This will trigger
+ // compilation of executed functions.
+ // - Create a new context. This context will be unpolluted.
+ // - Serialize the isolate and the second context into a new snapshot blob.
+ v8::SnapshotCreator snapshot_creator(nullptr, &cold_snapshot_blob);
+ v8::Isolate* isolate = snapshot_creator.GetIsolate();
+ {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ if (!RunExtraCode(isolate, context, warmup_source, "<warm-up>")) {
+ return {};
+ }
+ }
+ {
+ v8::HandleScope handle_scope(isolate);
+ isolate->ContextDisposedNotification(false);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ snapshot_creator.SetDefaultContext(context);
+ }
+
+ return snapshot_creator.CreateBlob(
+ v8::SnapshotCreator::FunctionCodeHandling::kKeep);
}
} // namespace internal
diff --git a/deps/v8/src/snapshot/snapshot-external.cc b/deps/v8/src/snapshot/snapshot-external.cc
index c5f56ebb5a..d77ddae2aa 100644
--- a/deps/v8/src/snapshot/snapshot-external.cc
+++ b/deps/v8/src/snapshot/snapshot-external.cc
@@ -7,9 +7,8 @@
#include "src/snapshot/snapshot.h"
#include "src/base/platform/mutex.h"
+#include "src/init/v8.h" // for V8::Initialize
#include "src/snapshot/snapshot-source-sink.h"
-#include "src/v8.h" // for V8::Initialize
-
#ifndef V8_USE_EXTERNAL_STARTUP_DATA
#error snapshot-external.cc is used only for the external snapshot build.
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index ffc6ad0973..e39e2b393c 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -6,8 +6,8 @@
#include "src/snapshot/snapshot-source-sink.h"
#include "src/base/logging.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index f5b35b174e..61396aaa71 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -6,7 +6,7 @@
#define V8_SNAPSHOT_SNAPSHOT_SOURCE_SINK_H_
#include "src/base/logging.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -25,7 +25,7 @@ class SnapshotByteSource final {
position_(0) {}
explicit SnapshotByteSource(Vector<const byte> payload)
- : data_(payload.start()), length_(payload.length()), position_(0) {}
+ : data_(payload.begin()), length_(payload.length()), position_(0) {}
~SnapshotByteSource() = default;
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 3f50f1060e..a9995b2d3e 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -8,8 +8,7 @@
#include "src/snapshot/partial-serializer.h"
#include "src/snapshot/startup-serializer.h"
-#include "src/objects-inl.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -163,7 +162,12 @@ class Snapshot : public AllStatic {
// mksnapshot.
V8_EXPORT_PRIVATE v8::StartupData CreateSnapshotDataBlobInternal(
v8::SnapshotCreator::FunctionCodeHandling function_code_handling,
- const char* embedded_source);
+ const char* embedded_source, v8::Isolate* isolate = nullptr);
+
+// Convenience wrapper around snapshot data blob warmup used e.g. by tests and
+// mksnapshot.
+V8_EXPORT_PRIVATE v8::StartupData WarmUpSnapshotDataBlobInternal(
+ v8::StartupData cold_snapshot_blob, const char* warmup_source);
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
void SetSnapshotFromFile(StartupData* snapshot_blob);
diff --git a/deps/v8/src/snapshot/startup-deserializer.cc b/deps/v8/src/snapshot/startup-deserializer.cc
index 65def345ce..168bc678fe 100644
--- a/deps/v8/src/snapshot/startup-deserializer.cc
+++ b/deps/v8/src/snapshot/startup-deserializer.cc
@@ -4,11 +4,11 @@
#include "src/snapshot/startup-deserializer.h"
-#include "src/api.h"
-#include "src/assembler-inl.h"
+#include "src/api/api.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/execution/v8threads.h"
#include "src/heap/heap-inl.h"
#include "src/snapshot/snapshot.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 47b3f0f41b..62a786f984 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -4,18 +4,17 @@
#include "src/snapshot/startup-serializer.h"
-#include "src/api.h"
-#include "src/code-tracer.h"
-#include "src/contexts.h"
-#include "src/deoptimizer.h"
-#include "src/global-handles.h"
+#include "src/api/api.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/v8threads.h"
+#include "src/handles/global-handles.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
-#include "src/objects-inl.h"
+#include "src/objects/contexts.h"
#include "src/objects/foreign-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/snapshot/read-only-serializer.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -37,25 +36,25 @@ StartupSerializer::~StartupSerializer() {
namespace {
bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
- if (!obj->IsCode()) return false;
+ if (!obj.IsCode()) return false;
Code code = Code::cast(obj);
// TODO(v8:8768): Deopt entry code should not be serialized.
- if (code->kind() == Code::STUB && isolate->deoptimizer_data() != nullptr) {
+ if (code.kind() == Code::STUB && isolate->deoptimizer_data() != nullptr) {
if (isolate->deoptimizer_data()->IsDeoptEntryCode(code)) return false;
}
- if (code->kind() == Code::REGEXP) return false;
- if (!code->is_builtin()) return true;
+ if (code.kind() == Code::REGEXP) return false;
+ if (!code.is_builtin()) return true;
if (!FLAG_embedded_builtins) return false;
- if (code->is_off_heap_trampoline()) return false;
+ if (code.is_off_heap_trampoline()) return false;
// An on-heap builtin. We only expect this for the interpreter entry
// trampoline copy stored on the root list and transitively called builtins.
// See Heap::interpreter_entry_trampoline_for_profiling.
- switch (code->builtin_index()) {
+ switch (code.builtin_index()) {
case Builtins::kAbort:
case Builtins::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
case Builtins::kInterpreterEntryTrampoline:
@@ -72,16 +71,7 @@ bool IsUnexpectedCodeObject(Isolate* isolate, HeapObject obj) {
#endif // DEBUG
void StartupSerializer::SerializeObject(HeapObject obj) {
-#ifdef DEBUG
- if (obj.IsJSFunction()) {
- v8::base::OS::PrintError("Reference stack:\n");
- PrintStack(std::cerr);
- obj.Print(std::cerr);
- FATAL(
- "JSFunction should be added through the context snapshot instead of "
- "the isolate snapshot");
- }
-#endif // DEBUG
+ DCHECK(!obj.IsJSFunction());
DCHECK(!IsUnexpectedCodeObject(isolate(), obj));
if (SerializeHotObject(obj)) return;
@@ -94,26 +84,25 @@ void StartupSerializer::SerializeObject(HeapObject obj) {
use_simulator = true;
#endif
- if (use_simulator && obj->IsAccessorInfo()) {
+ if (use_simulator && obj.IsAccessorInfo()) {
// Wipe external reference redirects in the accessor info.
AccessorInfo info = AccessorInfo::cast(obj);
- Address original_address = Foreign::cast(info->getter())->foreign_address();
- Foreign::cast(info->js_getter())->set_foreign_address(original_address);
+ Address original_address = Foreign::cast(info.getter()).foreign_address();
+ Foreign::cast(info.js_getter()).set_foreign_address(original_address);
accessor_infos_.push_back(info);
- } else if (use_simulator && obj->IsCallHandlerInfo()) {
+ } else if (use_simulator && obj.IsCallHandlerInfo()) {
CallHandlerInfo info = CallHandlerInfo::cast(obj);
- Address original_address =
- Foreign::cast(info->callback())->foreign_address();
- Foreign::cast(info->js_callback())->set_foreign_address(original_address);
+ Address original_address = Foreign::cast(info.callback()).foreign_address();
+ Foreign::cast(info.js_callback()).set_foreign_address(original_address);
call_handler_infos_.push_back(info);
- } else if (obj->IsScript() && Script::cast(obj)->IsUserJavaScript()) {
- Script::cast(obj)->set_context_data(
+ } else if (obj.IsScript() && Script::cast(obj).IsUserJavaScript()) {
+ Script::cast(obj).set_context_data(
ReadOnlyRoots(isolate()).uninitialized_symbol());
- } else if (obj->IsSharedFunctionInfo()) {
+ } else if (obj.IsSharedFunctionInfo()) {
// Clear inferred name for native functions.
SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
- if (!shared->IsSubjectToDebugging() && shared->HasUncompiledData()) {
- shared->uncompiled_data()->set_inferred_name(
+ if (!shared.IsSubjectToDebugging() && shared.HasUncompiledData()) {
+ shared.uncompiled_data().set_inferred_name(
ReadOnlyRoots(isolate()).empty_string());
}
}
@@ -160,7 +149,7 @@ SerializedHandleChecker::SerializedHandleChecker(Isolate* isolate,
: isolate_(isolate) {
AddToSet(isolate->heap()->serialized_objects());
for (auto const& context : *contexts) {
- AddToSet(context->serialized_objects());
+ AddToSet(context.serialized_objects());
}
}
@@ -177,8 +166,8 @@ void StartupSerializer::SerializeUsingPartialSnapshotCache(
}
void SerializedHandleChecker::AddToSet(FixedArray serialized) {
- int length = serialized->length();
- for (int i = 0; i < length; i++) serialized_.insert(serialized->get(i));
+ int length = serialized.length();
+ for (int i = 0; i < length; i++) serialized_.insert(serialized.get(i));
}
void SerializedHandleChecker::VisitRootPointers(Root root,
@@ -189,7 +178,7 @@ void SerializedHandleChecker::VisitRootPointers(Root root,
if (serialized_.find(*p) != serialized_.end()) continue;
PrintF("%s handle not serialized: ",
root == Root::kGlobalHandles ? "global" : "eternal");
- (*p)->Print();
+ (*p).Print();
ok_ = false;
}
}
diff --git a/deps/v8/src/string-hasher-inl.h b/deps/v8/src/string-hasher-inl.h
deleted file mode 100644
index 21c92084ca..0000000000
--- a/deps/v8/src/string-hasher-inl.h
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_STRING_HASHER_INL_H_
-#define V8_STRING_HASHER_INL_H_
-
-#include "src/string-hasher.h"
-
-#include "src/char-predicates-inl.h"
-#include "src/objects.h"
-#include "src/objects/string-inl.h"
-#include "src/utils-inl.h"
-
-namespace v8 {
-namespace internal {
-
-StringHasher::StringHasher(int length, uint64_t seed)
- : length_(length),
- raw_running_hash_(static_cast<uint32_t>(seed)),
- array_index_(0),
- is_array_index_(IsInRange(length, 1, String::kMaxArrayIndexSize)) {
- DCHECK(FLAG_randomize_hashes || raw_running_hash_ == 0);
-}
-
-bool StringHasher::has_trivial_hash() {
- return length_ > String::kMaxHashCalcLength;
-}
-
-uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint16_t c) {
- running_hash += c;
- running_hash += (running_hash << 10);
- running_hash ^= (running_hash >> 6);
- return running_hash;
-}
-
-uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
- running_hash += (running_hash << 3);
- running_hash ^= (running_hash >> 11);
- running_hash += (running_hash << 15);
- int32_t hash = static_cast<int32_t>(running_hash & String::kHashBitMask);
- int32_t mask = (hash - 1) >> 31;
- return running_hash | (kZeroHash & mask);
-}
-
-template <typename Char>
-uint32_t StringHasher::ComputeRunningHash(uint32_t running_hash,
- const Char* chars, int length) {
- DCHECK_LE(0, length);
- DCHECK_IMPLIES(0 < length, chars != nullptr);
- const Char* end = &chars[length];
- while (chars != end) {
- running_hash = AddCharacterCore(running_hash, *chars++);
- }
- return running_hash;
-}
-
-void StringHasher::AddCharacter(uint16_t c) {
- // Use the Jenkins one-at-a-time hash function to update the hash
- // for the given character.
- raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
-}
-
-bool StringHasher::UpdateIndex(uint16_t c) {
- DCHECK(is_array_index_);
- if (!TryAddIndexChar(&array_index_, c)) {
- is_array_index_ = false;
- return false;
- }
- is_array_index_ = array_index_ != 0 || length_ == 1;
- return is_array_index_;
-}
-
-template <typename Char>
-inline void StringHasher::AddCharacters(const Char* chars, int length) {
- DCHECK(sizeof(Char) == 1 || sizeof(Char) == 2);
- int i = 0;
- if (is_array_index_) {
- for (; i < length; i++) {
- AddCharacter(chars[i]);
- if (!UpdateIndex(chars[i])) {
- i++;
- break;
- }
- }
- }
- raw_running_hash_ =
- ComputeRunningHash(raw_running_hash_, &chars[i], length - i);
-}
-
-template <typename schar>
-uint32_t StringHasher::HashSequentialString(const schar* chars, int length,
- uint64_t seed) {
-#ifdef DEBUG
- StringHasher hasher(length, seed);
- if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length);
- uint32_t expected = hasher.GetHashField();
-#endif
-
- // Check whether the string is a valid array index. In that case, compute the
- // array index hash. It'll fall through to compute a regular string hash from
- // the start if it turns out that the string isn't a valid array index.
- if (IsInRange(length, 1, String::kMaxArrayIndexSize)) {
- if (IsDecimalDigit(chars[0]) && (length == 1 || chars[0] != '0')) {
- uint32_t index = chars[0] - '0';
- int i = 1;
- do {
- if (i == length) {
- uint32_t result = MakeArrayIndexHash(index, length);
- DCHECK_EQ(expected, result);
- return result;
- }
- } while (TryAddIndexChar(&index, chars[i++]));
- }
- } else if (length > String::kMaxHashCalcLength) {
- // String hash of a large string is simply the length.
- uint32_t result =
- (length << String::kHashShift) | String::kIsNotArrayIndexMask;
- DCHECK_EQ(result, expected);
- return result;
- }
-
- // Non-array-index hash.
- uint32_t hash =
- ComputeRunningHash(static_cast<uint32_t>(seed), chars, length);
-
- uint32_t result =
- (GetHashCore(hash) << String::kHashShift) | String::kIsNotArrayIndexMask;
- DCHECK_EQ(result, expected);
- return result;
-}
-
-IteratingStringHasher::IteratingStringHasher(int len, uint64_t seed)
- : StringHasher(len, seed) {}
-
-uint32_t IteratingStringHasher::Hash(String string, uint64_t seed) {
- IteratingStringHasher hasher(string->length(), seed);
- // Nothing to do.
- if (hasher.has_trivial_hash()) return hasher.GetHashField();
- ConsString cons_string = String::VisitFlat(&hasher, string);
- if (cons_string.is_null()) return hasher.GetHashField();
- hasher.VisitConsString(cons_string);
- return hasher.GetHashField();
-}
-
-void IteratingStringHasher::VisitOneByteString(const uint8_t* chars,
- int length) {
- AddCharacters(chars, length);
-}
-
-void IteratingStringHasher::VisitTwoByteString(const uint16_t* chars,
- int length) {
- AddCharacters(chars, length);
-}
-
-std::size_t SeededStringHasher::operator()(const char* name) const {
- return StringHasher::HashSequentialString(
- name, static_cast<int>(strlen(name)), hashseed_);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_STRING_HASHER_INL_H_
diff --git a/deps/v8/src/string-hasher.h b/deps/v8/src/string-hasher.h
deleted file mode 100644
index c661500acd..0000000000
--- a/deps/v8/src/string-hasher.h
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_STRING_HASHER_H_
-#define V8_STRING_HASHER_H_
-
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-
-class ConsString;
-class String;
-
-template <typename T>
-class Vector;
-
-class V8_EXPORT_PRIVATE StringHasher {
- public:
- explicit inline StringHasher(int length, uint64_t seed);
-
- template <typename schar>
- static inline uint32_t HashSequentialString(const schar* chars, int length,
- uint64_t seed);
-
- // Reads all the data, even for long strings and computes the utf16 length.
- static uint32_t ComputeUtf8Hash(Vector<const char> chars, uint64_t seed,
- int* utf16_length_out);
-
- // Calculated hash value for a string consisting of 1 to
- // String::kMaxArrayIndexSize digits with no leading zeros (except "0").
- // value is represented decimal value.
- static uint32_t MakeArrayIndexHash(uint32_t value, int length);
-
- // No string is allowed to have a hash of zero. That value is reserved
- // for internal properties. If the hash calculation yields zero then we
- // use 27 instead.
- static const int kZeroHash = 27;
-
- // Reusable parts of the hashing algorithm.
- V8_INLINE static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c);
- V8_INLINE static uint32_t GetHashCore(uint32_t running_hash);
- template <typename Char>
- V8_INLINE static uint32_t ComputeRunningHash(uint32_t running_hash,
- const Char* chars, int length);
-
- protected:
- // Returns the value to store in the hash field of a string with
- // the given length and contents.
- uint32_t GetHashField();
- // Returns true if the hash of this string can be computed without
- // looking at the contents.
- inline bool has_trivial_hash();
- // Adds a block of characters to the hash.
- template <typename Char>
- inline void AddCharacters(const Char* chars, int len);
-
- private:
- // Add a character to the hash.
- inline void AddCharacter(uint16_t c);
- // Update index. Returns true if string is still an index.
- inline bool UpdateIndex(uint16_t c);
-
- int length_;
- uint32_t raw_running_hash_;
- uint32_t array_index_;
- bool is_array_index_;
- DISALLOW_COPY_AND_ASSIGN(StringHasher);
-};
-
-class IteratingStringHasher : public StringHasher {
- public:
- static inline uint32_t Hash(String string, uint64_t seed);
- inline void VisitOneByteString(const uint8_t* chars, int length);
- inline void VisitTwoByteString(const uint16_t* chars, int length);
-
- private:
- inline IteratingStringHasher(int len, uint64_t seed);
- void VisitConsString(ConsString cons_string);
- DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
-};
-
-// Useful for std containers that require something ()'able.
-struct SeededStringHasher {
- explicit SeededStringHasher(uint64_t hashseed) : hashseed_(hashseed) {}
- inline std::size_t operator()(const char* name) const;
-
- uint64_t hashseed_;
-};
-
-// Useful for std containers that require something ()'able.
-struct StringEquals {
- bool operator()(const char* name1, const char* name2) const {
- return strcmp(name1, name2) == 0;
- }
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_STRING_HASHER_H_
diff --git a/deps/v8/src/strings/OWNERS b/deps/v8/src/strings/OWNERS
new file mode 100644
index 0000000000..037c916f24
--- /dev/null
+++ b/deps/v8/src/strings/OWNERS
@@ -0,0 +1,5 @@
+bmeurer@chromium.org
+jgruber@chromium.org
+jkummerow@chromium.org
+verwaest@chromium.org
+yangguo@chromium.org
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/strings/char-predicates-inl.h
index 329b3a0fbb..cdd8ddb4ea 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/strings/char-predicates-inl.h
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CHAR_PREDICATES_INL_H_
-#define V8_CHAR_PREDICATES_INL_H_
+#ifndef V8_STRINGS_CHAR_PREDICATES_INL_H_
+#define V8_STRINGS_CHAR_PREDICATES_INL_H_
-#include "src/char-predicates.h"
+#include "src/strings/char-predicates.h"
namespace v8 {
namespace internal {
-
// If c is in 'A'-'Z' or 'a'-'z', return its lower-case.
// Else, return something outside of 'A'-'Z' and 'a'-'z'.
// Note: it ignores LOCALE.
@@ -53,9 +52,8 @@ inline constexpr bool IsBinaryDigit(uc32 c) {
}
inline constexpr bool IsRegExpWord(uc16 c) {
- return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
- || IsDecimalDigit(c)
- || (c == '_');
+ return IsInRange(AsciiAlphaToLower(c), 'a', 'z') || IsDecimalDigit(c) ||
+ (c == '_');
}
inline constexpr bool IsRegExpNewline(uc16 c) {
@@ -124,4 +122,4 @@ bool IsLineTerminatorSequence(uc32 c, uc32 next) {
} // namespace internal
} // namespace v8
-#endif // V8_CHAR_PREDICATES_INL_H_
+#endif // V8_STRINGS_CHAR_PREDICATES_INL_H_
diff --git a/deps/v8/src/char-predicates.cc b/deps/v8/src/strings/char-predicates.cc
index a1e8b68fe9..0133a03517 100644
--- a/deps/v8/src/char-predicates.cc
+++ b/deps/v8/src/strings/char-predicates.cc
@@ -6,7 +6,7 @@
#error Internationalization is expected to be enabled.
#endif // V8_INTL_SUPPORT
-#include "src/char-predicates.h"
+#include "src/strings/char-predicates.h"
#include "unicode/uchar.h"
#include "unicode/urename.h"
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/strings/char-predicates.h
index 4828e19a00..43b4d091d1 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/strings/char-predicates.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CHAR_PREDICATES_H_
-#define V8_CHAR_PREDICATES_H_
+#ifndef V8_STRINGS_CHAR_PREDICATES_H_
+#define V8_STRINGS_CHAR_PREDICATES_H_
-#include "src/globals.h"
-#include "src/unicode.h"
+#include "src/common/globals.h"
+#include "src/strings/unicode.h"
namespace v8 {
namespace internal {
@@ -82,4 +82,4 @@ inline bool IsLineTerminatorSequence(uc32 c, uc32 next);
} // namespace internal
} // namespace v8
-#endif // V8_CHAR_PREDICATES_H_
+#endif // V8_STRINGS_CHAR_PREDICATES_H_
diff --git a/deps/v8/src/string-builder-inl.h b/deps/v8/src/strings/string-builder-inl.h
index 8442bbd455..88d69b37b5 100644
--- a/deps/v8/src/string-builder-inl.h
+++ b/deps/v8/src/strings/string-builder-inl.h
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STRING_BUILDER_INL_H_
-#define V8_STRING_BUILDER_INL_H_
+#ifndef V8_STRINGS_STRING_BUILDER_INL_H_
+#define V8_STRINGS_STRING_BUILDER_INL_H_
-#include "src/assert-scope.h"
-#include "src/handles-inl.h"
+#include "src/common/assert-scope.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/fixed-array.h"
+#include "src/objects/objects.h"
#include "src/objects/string-inl.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -20,11 +20,11 @@ namespace internal {
const int kStringBuilderConcatHelperLengthBits = 11;
const int kStringBuilderConcatHelperPositionBits = 19;
-typedef BitField<int, 0, kStringBuilderConcatHelperLengthBits>
- StringBuilderSubstringLength;
-typedef BitField<int, kStringBuilderConcatHelperLengthBits,
- kStringBuilderConcatHelperPositionBits>
- StringBuilderSubstringPosition;
+using StringBuilderSubstringLength =
+ BitField<int, 0, kStringBuilderConcatHelperLengthBits>;
+using StringBuilderSubstringPosition =
+ BitField<int, kStringBuilderConcatHelperLengthBits,
+ kStringBuilderConcatHelperPositionBits>;
template <typename sinkchar>
void StringBuilderConcatHelper(String special, sinkchar* sink,
@@ -296,15 +296,15 @@ void IncrementalStringBuilder::Append(SrcChar c) {
if (sizeof(DestChar) == 1) {
DCHECK_EQ(String::ONE_BYTE_ENCODING, encoding_);
SeqOneByteString::cast(*current_part_)
- ->SeqOneByteStringSet(current_index_++, c);
+ .SeqOneByteStringSet(current_index_++, c);
} else {
DCHECK_EQ(String::TWO_BYTE_ENCODING, encoding_);
SeqTwoByteString::cast(*current_part_)
- ->SeqTwoByteStringSet(current_index_++, c);
+ .SeqTwoByteStringSet(current_index_++, c);
}
if (current_index_ == part_length_) Extend();
}
} // namespace internal
} // namespace v8
-#endif // V8_STRING_BUILDER_INL_H_
+#endif // V8_STRINGS_STRING_BUILDER_INL_H_
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/strings/string-builder.cc
index 0c48681bf7..f647aed190 100644
--- a/deps/v8/src/string-builder.cc
+++ b/deps/v8/src/strings/string-builder.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/string-builder-inl.h"
+#include "src/strings/string-builder-inl.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/objects/fixed-array-inl.h"
#include "src/objects/js-array-inl.h"
@@ -17,8 +17,8 @@ void StringBuilderConcatHelper(String special, sinkchar* sink,
DisallowHeapAllocation no_gc;
int position = 0;
for (int i = 0; i < array_length; i++) {
- Object element = fixed_array->get(i);
- if (element->IsSmi()) {
+ Object element = fixed_array.get(i);
+ if (element.IsSmi()) {
// Smi encoding of position and length.
int encoded_slice = Smi::ToInt(element);
int pos;
@@ -29,8 +29,8 @@ void StringBuilderConcatHelper(String special, sinkchar* sink,
len = StringBuilderSubstringLength::decode(encoded_slice);
} else {
// Position and length encoded in two smis.
- Object obj = fixed_array->get(++i);
- DCHECK(obj->IsSmi());
+ Object obj = fixed_array.get(++i);
+ DCHECK(obj.IsSmi());
pos = Smi::ToInt(obj);
len = -encoded_slice;
}
@@ -38,7 +38,7 @@ void StringBuilderConcatHelper(String special, sinkchar* sink,
position += len;
} else {
String string = String::cast(element);
- int element_length = string->length();
+ int element_length = string.length();
String::WriteToFlat(string, sink + position, 0, element_length);
position += element_length;
}
@@ -59,8 +59,8 @@ int StringBuilderConcatLength(int special_length, FixedArray fixed_array,
int position = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
- Object elt = fixed_array->get(i);
- if (elt->IsSmi()) {
+ Object elt = fixed_array.get(i);
+ if (elt.IsSmi()) {
// Smi encoding of position and length.
int smi_value = Smi::ToInt(elt);
int pos;
@@ -75,8 +75,8 @@ int StringBuilderConcatLength(int special_length, FixedArray fixed_array,
// Get the position and check that it is a positive smi.
i++;
if (i >= array_length) return -1;
- Object next_smi = fixed_array->get(i);
- if (!next_smi->IsSmi()) return -1;
+ Object next_smi = fixed_array.get(i);
+ if (!next_smi.IsSmi()) return -1;
pos = Smi::ToInt(next_smi);
if (pos < 0) return -1;
}
@@ -84,11 +84,11 @@ int StringBuilderConcatLength(int special_length, FixedArray fixed_array,
DCHECK_GE(len, 0);
if (pos > special_length || len > special_length - pos) return -1;
increment = len;
- } else if (elt->IsString()) {
+ } else if (elt.IsString()) {
String element = String::cast(elt);
- int element_length = element->length();
+ int element_length = element.length();
increment = element_length;
- if (*one_byte && !element->IsOneByteRepresentation()) {
+ if (*one_byte && !element.IsOneByteRepresentation()) {
*one_byte = false;
}
} else {
@@ -140,14 +140,14 @@ void FixedArrayBuilder::EnsureCapacity(Isolate* isolate, int elements) {
}
void FixedArrayBuilder::Add(Object value) {
- DCHECK(!value->IsSmi());
+ DCHECK(!value.IsSmi());
array_->set(length_, value);
length_++;
has_non_smi_elements_ = true;
}
void FixedArrayBuilder::Add(Smi value) {
- DCHECK(value->IsSmi());
+ DCHECK(value.IsSmi());
array_->set(length_, value);
length_++;
}
@@ -258,7 +258,6 @@ void IncrementalStringBuilder::Accumulate(Handle<String> new_part) {
set_accumulator(new_accumulator);
}
-
void IncrementalStringBuilder::Extend() {
DCHECK_EQ(current_index_, current_part()->length());
Accumulate(current_part());
@@ -276,7 +275,6 @@ void IncrementalStringBuilder::Extend() {
current_index_ = 0;
}
-
MaybeHandle<String> IncrementalStringBuilder::Finish() {
ShrinkCurrentPart();
Accumulate(current_part());
@@ -286,7 +284,6 @@ MaybeHandle<String> IncrementalStringBuilder::Finish() {
return accumulator();
}
-
void IncrementalStringBuilder::AppendString(Handle<String> string) {
ShrinkCurrentPart();
part_length_ = kInitialPartLength; // Allocate conservatively.
diff --git a/deps/v8/src/string-case.cc b/deps/v8/src/strings/string-case.cc
index d3def4110c..88370a81e3 100644
--- a/deps/v8/src/string-case.cc
+++ b/deps/v8/src/strings/string-case.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/string-case.h"
+#include "src/strings/string-case.h"
-#include "src/assert-scope.h"
#include "src/base/logging.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/string-case.h b/deps/v8/src/strings/string-case.h
index f57bae494f..f45732fb54 100644
--- a/deps/v8/src/string-case.h
+++ b/deps/v8/src/strings/string-case.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STRING_CASE_H_
-#define V8_STRING_CASE_H_
+#ifndef V8_STRINGS_STRING_CASE_H_
+#define V8_STRINGS_STRING_CASE_H_
namespace v8 {
namespace internal {
@@ -14,4 +14,4 @@ int FastAsciiConvert(char* dst, const char* src, int length, bool* changed_out);
} // namespace internal
} // namespace v8
-#endif // V8_STRING_CASE_H_
+#endif // V8_STRINGS_STRING_CASE_H_
diff --git a/deps/v8/src/strings/string-hasher-inl.h b/deps/v8/src/strings/string-hasher-inl.h
new file mode 100644
index 0000000000..b547d0a78d
--- /dev/null
+++ b/deps/v8/src/strings/string-hasher-inl.h
@@ -0,0 +1,81 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRINGS_STRING_HASHER_INL_H_
+#define V8_STRINGS_STRING_HASHER_INL_H_
+
+#include "src/strings/string-hasher.h"
+
+#include "src/objects/objects.h"
+#include "src/objects/string-inl.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/utils/utils-inl.h"
+
+namespace v8 {
+namespace internal {
+
+uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint16_t c) {
+ running_hash += c;
+ running_hash += (running_hash << 10);
+ running_hash ^= (running_hash >> 6);
+ return running_hash;
+}
+
+uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
+ running_hash += (running_hash << 3);
+ running_hash ^= (running_hash >> 11);
+ running_hash += (running_hash << 15);
+ int32_t hash = static_cast<int32_t>(running_hash & String::kHashBitMask);
+ int32_t mask = (hash - 1) >> 31;
+ return running_hash | (kZeroHash & mask);
+}
+
+uint32_t StringHasher::GetTrivialHash(int length) {
+ DCHECK_GT(length, String::kMaxHashCalcLength);
+ // String hash of a large string is simply the length.
+ return (length << String::kHashShift) | String::kIsNotArrayIndexMask;
+}
+
+template <typename schar>
+uint32_t StringHasher::HashSequentialString(const schar* chars, int length,
+ uint64_t seed) {
+ // Check whether the string is a valid array index. In that case, compute the
+ // array index hash. It'll fall through to compute a regular string hash from
+ // the start if it turns out that the string isn't a valid array index.
+ if (IsInRange(length, 1, String::kMaxArrayIndexSize)) {
+ if (IsDecimalDigit(chars[0]) && (length == 1 || chars[0] != '0')) {
+ uint32_t index = chars[0] - '0';
+ int i = 1;
+ do {
+ if (i == length) {
+ return MakeArrayIndexHash(index, length);
+ }
+ } while (TryAddIndexChar(&index, chars[i++]));
+ }
+ } else if (length > String::kMaxHashCalcLength) {
+ return GetTrivialHash(length);
+ }
+
+ // Non-array-index hash.
+ DCHECK_LE(0, length);
+ DCHECK_IMPLIES(0 < length, chars != nullptr);
+ uint32_t running_hash = static_cast<uint32_t>(seed);
+ const schar* end = &chars[length];
+ while (chars != end) {
+ running_hash = AddCharacterCore(running_hash, *chars++);
+ }
+
+ return (GetHashCore(running_hash) << String::kHashShift) |
+ String::kIsNotArrayIndexMask;
+}
+
+std::size_t SeededStringHasher::operator()(const char* name) const {
+ return StringHasher::HashSequentialString(
+ name, static_cast<int>(strlen(name)), hashseed_);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_STRINGS_STRING_HASHER_INL_H_
diff --git a/deps/v8/src/strings/string-hasher.h b/deps/v8/src/strings/string-hasher.h
new file mode 100644
index 0000000000..b3917b75cd
--- /dev/null
+++ b/deps/v8/src/strings/string-hasher.h
@@ -0,0 +1,58 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRINGS_STRING_HASHER_H_
+#define V8_STRINGS_STRING_HASHER_H_
+
+#include "src/common/globals.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class Vector;
+
+class V8_EXPORT_PRIVATE StringHasher final {
+ public:
+ StringHasher() = delete;
+ template <typename schar>
+ static inline uint32_t HashSequentialString(const schar* chars, int length,
+ uint64_t seed);
+
+ // Calculated hash value for a string consisting of 1 to
+ // String::kMaxArrayIndexSize digits with no leading zeros (except "0").
+ // value is represented decimal value.
+ static uint32_t MakeArrayIndexHash(uint32_t value, int length);
+
+ // No string is allowed to have a hash of zero. That value is reserved
+ // for internal properties. If the hash calculation yields zero then we
+ // use 27 instead.
+ static const int kZeroHash = 27;
+
+ // Reusable parts of the hashing algorithm.
+ V8_INLINE static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c);
+ V8_INLINE static uint32_t GetHashCore(uint32_t running_hash);
+
+ static inline uint32_t GetTrivialHash(int length);
+};
+
+// Useful for std containers that require something ()'able.
+struct SeededStringHasher {
+ explicit SeededStringHasher(uint64_t hashseed) : hashseed_(hashseed) {}
+ inline std::size_t operator()(const char* name) const;
+
+ uint64_t hashseed_;
+};
+
+// Useful for std containers that require something ()'able.
+struct StringEquals {
+ bool operator()(const char* name1, const char* name2) const {
+ return strcmp(name1, name2) == 0;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_STRINGS_STRING_HASHER_H_
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/strings/string-search.h
index bf96879a29..1d5800ebcf 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/strings/string-search.h
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STRING_SEARCH_H_
-#define V8_STRING_SEARCH_H_
+#ifndef V8_STRINGS_STRING_SEARCH_H_
+#define V8_STRINGS_STRING_SEARCH_H_
-#include "src/isolate.h"
-#include "src/vector.h"
+#include "src/execution/isolate.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
-
//---------------------------------------------------------------------
// String Search object.
//---------------------------------------------------------------------
@@ -46,13 +45,12 @@ class StringSearchBase {
}
static inline bool IsOneByteString(Vector<const uc16> string) {
- return String::IsOneByte(string.start(), string.length());
+ return String::IsOneByte(string.begin(), string.length());
}
friend class Isolate;
};
-
template <typename PatternChar, typename SubjectChar>
class StringSearch : private StringSearchBase {
public:
@@ -94,14 +92,11 @@ class StringSearch : private StringSearchBase {
}
private:
- typedef int (*SearchFunction)( // NOLINT - it's not a cast!
- StringSearch<PatternChar, SubjectChar>*,
- Vector<const SubjectChar>,
- int);
+ using SearchFunction = int (*)(StringSearch<PatternChar, SubjectChar>*,
+ Vector<const SubjectChar>, int);
static int FailSearch(StringSearch<PatternChar, SubjectChar>*,
- Vector<const SubjectChar>,
- int) {
+ Vector<const SubjectChar>, int) {
return -1;
}
@@ -110,17 +105,14 @@ class StringSearch : private StringSearchBase {
int start_index);
static int LinearSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
+ Vector<const SubjectChar> subject, int start_index);
static int InitialSearch(StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
+ Vector<const SubjectChar> subject, int start_index);
static int BoyerMooreHorspoolSearch(
StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index);
+ Vector<const SubjectChar> subject, int start_index);
static int BoyerMooreSearch(StringSearch<PatternChar, SubjectChar>* search,
Vector<const SubjectChar> subject,
@@ -130,9 +122,7 @@ class StringSearch : private StringSearchBase {
void PopulateBoyerMooreTable();
- static inline bool exceedsOneByte(uint8_t c) {
- return false;
- }
+ static inline bool exceedsOneByte(uint8_t c) { return false; }
static inline bool exceedsOneByte(uint16_t c) {
return c > String::kMaxOneByteCharCodeU;
@@ -161,9 +151,7 @@ class StringSearch : private StringSearchBase {
// Store for the BoyerMoore(Horspool) bad char shift table.
// Return a table covering the last kBMMaxShift+1 positions of
// pattern.
- int* bad_char_table() {
- return isolate_->bad_char_shift_table();
- }
+ int* bad_char_table() { return isolate_->bad_char_shift_table(); }
// Store for the BoyerMoore good suffix shift table.
int* good_suffix_shift_table() {
@@ -189,23 +177,19 @@ class StringSearch : private StringSearchBase {
int start_;
};
-
template <typename T, typename U>
inline T AlignDown(T value, U alignment) {
return reinterpret_cast<T>(
(reinterpret_cast<uintptr_t>(value) & ~(alignment - 1)));
}
-
inline uint8_t GetHighestValueByte(uc16 character) {
return Max(static_cast<uint8_t>(character & 0xFF),
static_cast<uint8_t>(character >> 8));
}
-
inline uint8_t GetHighestValueByte(uint8_t character) { return character; }
-
template <typename PatternChar, typename SubjectChar>
inline int FindFirstCharacter(Vector<const PatternChar> pattern,
Vector<const SubjectChar> subject, int index) {
@@ -218,18 +202,17 @@ inline int FindFirstCharacter(Vector<const PatternChar> pattern,
do {
DCHECK_GE(max_n - pos, 0);
const SubjectChar* char_pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + pos, search_byte,
+ memchr(subject.begin() + pos, search_byte,
(max_n - pos) * sizeof(SubjectChar)));
if (char_pos == nullptr) return -1;
char_pos = AlignDown(char_pos, sizeof(SubjectChar));
- pos = static_cast<int>(char_pos - subject.start());
+ pos = static_cast<int>(char_pos - subject.begin());
if (subject[pos] == search_char) return pos;
} while (++pos < max_n);
return -1;
}
-
//---------------------------------------------------------------------
// Single Character Pattern Search Strategy
//---------------------------------------------------------------------
@@ -237,8 +220,7 @@ inline int FindFirstCharacter(Vector<const PatternChar> pattern,
template <typename PatternChar, typename SubjectChar>
int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int index) {
+ Vector<const SubjectChar> subject, int index) {
DCHECK_EQ(1, search->pattern_.length());
PatternChar pattern_first_char = search->pattern_[0];
if (sizeof(PatternChar) > sizeof(SubjectChar)) {
@@ -253,10 +235,8 @@ int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
// Linear Search Strategy
//---------------------------------------------------------------------
-
template <typename PatternChar, typename SubjectChar>
-inline bool CharCompare(const PatternChar* pattern,
- const SubjectChar* subject,
+inline bool CharCompare(const PatternChar* pattern, const SubjectChar* subject,
int length) {
DCHECK_GT(length, 0);
int pos = 0;
@@ -269,13 +249,11 @@ inline bool CharCompare(const PatternChar* pattern,
return true;
}
-
// Simple linear search for short patterns. Never bails out.
template <typename PatternChar, typename SubjectChar>
int StringSearch<PatternChar, SubjectChar>::LinearSearch(
StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int index) {
+ Vector<const SubjectChar> subject, int index) {
Vector<const PatternChar> pattern = search->pattern_;
DCHECK_GT(pattern.length(), 1);
int pattern_length = pattern.length();
@@ -288,8 +266,7 @@ int StringSearch<PatternChar, SubjectChar>::LinearSearch(
i++;
// Loop extracted to separate function to allow using return to do
// a deeper break.
- if (CharCompare(pattern.start() + 1,
- subject.start() + i,
+ if (CharCompare(pattern.begin() + 1, subject.begin() + i,
pattern_length - 1)) {
return i - 1;
}
@@ -304,8 +281,7 @@ int StringSearch<PatternChar, SubjectChar>::LinearSearch(
template <typename PatternChar, typename SubjectChar>
int StringSearch<PatternChar, SubjectChar>::BoyerMooreSearch(
StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index) {
+ Vector<const SubjectChar> subject, int start_index) {
Vector<const PatternChar> pattern = search->pattern_;
int subject_length = subject.length();
int pattern_length = pattern.length();
@@ -322,8 +298,7 @@ int StringSearch<PatternChar, SubjectChar>::BoyerMooreSearch(
int j = pattern_length - 1;
int c;
while (last_char != (c = subject[index + j])) {
- int shift =
- j - CharOccurrence(bad_char_occurence, c);
+ int shift = j - CharOccurrence(bad_char_occurence, c);
index += shift;
if (index > subject_length - pattern_length) {
return -1;
@@ -335,13 +310,12 @@ int StringSearch<PatternChar, SubjectChar>::BoyerMooreSearch(
} else if (j < start) {
// we have matched more than our tables allow us to be smart about.
// Fall back on BMH shift.
- index += pattern_length - 1
- - CharOccurrence(bad_char_occurence,
- static_cast<SubjectChar>(last_char));
+ index += pattern_length - 1 -
+ CharOccurrence(bad_char_occurence,
+ static_cast<SubjectChar>(last_char));
} else {
int gs_shift = good_suffix_shift[j + 1];
- int bc_occ =
- CharOccurrence(bad_char_occurence, c);
+ int bc_occ = CharOccurrence(bad_char_occurence, c);
int shift = j - bc_occ;
if (gs_shift > shift) {
shift = gs_shift;
@@ -353,11 +327,10 @@ int StringSearch<PatternChar, SubjectChar>::BoyerMooreSearch(
return -1;
}
-
template <typename PatternChar, typename SubjectChar>
void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreTable() {
int pattern_length = pattern_.length();
- const PatternChar* pattern = pattern_.start();
+ const PatternChar* pattern = pattern_.begin();
// Only look at the last kBMMaxShift characters of pattern (from start_
// to pattern_length).
int start = start_;
@@ -427,8 +400,7 @@ void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreTable() {
template <typename PatternChar, typename SubjectChar>
int StringSearch<PatternChar, SubjectChar>::BoyerMooreHorspoolSearch(
StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int start_index) {
+ Vector<const SubjectChar> subject, int start_index) {
Vector<const PatternChar> pattern = search->pattern_;
int subject_length = subject.length();
int pattern_length = pattern.length();
@@ -437,7 +409,8 @@ int StringSearch<PatternChar, SubjectChar>::BoyerMooreHorspoolSearch(
// How bad we are doing without a good-suffix table.
PatternChar last_char = pattern[pattern_length - 1];
- int last_char_shift = pattern_length - 1 -
+ int last_char_shift =
+ pattern_length - 1 -
CharOccurrence(char_occurrences, static_cast<SubjectChar>(last_char));
// Perform search
int index = start_index; // No matches found prior to this index.
@@ -474,7 +447,6 @@ int StringSearch<PatternChar, SubjectChar>::BoyerMooreHorspoolSearch(
return -1;
}
-
template <typename PatternChar, typename SubjectChar>
void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreHorspoolTable() {
int pattern_length = pattern_.length();
@@ -488,9 +460,7 @@ void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreHorspoolTable() {
// Notice: Doesn't include the last character.
int table_size = AlphabetSize();
if (start == 0) { // All patterns less than kBMMaxShift in length.
- memset(bad_char_occurrence,
- -1,
- table_size * sizeof(*bad_char_occurrence));
+ memset(bad_char_occurrence, -1, table_size * sizeof(*bad_char_occurrence));
} else {
for (int i = 0; i < table_size; i++) {
bad_char_occurrence[i] = start - 1;
@@ -512,8 +482,7 @@ void StringSearch<PatternChar, SubjectChar>::PopulateBoyerMooreHorspoolTable() {
template <typename PatternChar, typename SubjectChar>
int StringSearch<PatternChar, SubjectChar>::InitialSearch(
StringSearch<PatternChar, SubjectChar>* search,
- Vector<const SubjectChar> subject,
- int index) {
+ Vector<const SubjectChar> subject, int index) {
Vector<const PatternChar> pattern = search->pattern_;
int pattern_length = pattern.length();
// Badness is a count of how much work we have done. When we have
@@ -549,16 +518,13 @@ int StringSearch<PatternChar, SubjectChar>::InitialSearch(
return -1;
}
-
// Perform a a single stand-alone search.
// If searching multiple times for the same pattern, a search
// object should be constructed once and the Search function then called
// for each search.
template <typename SubjectChar, typename PatternChar>
-int SearchString(Isolate* isolate,
- Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- int start_index) {
+int SearchString(Isolate* isolate, Vector<const SubjectChar> subject,
+ Vector<const PatternChar> pattern, int start_index) {
StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
return search.Search(subject, start_index);
}
@@ -579,4 +545,4 @@ intptr_t SearchStringRaw(Isolate* isolate, const SubjectChar* subject_ptr,
} // namespace internal
} // namespace v8
-#endif // V8_STRING_SEARCH_H_
+#endif // V8_STRINGS_STRING_SEARCH_H_
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/strings/string-stream.cc
index 945a113704..db1891949e 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/strings/string-stream.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/string-stream.h"
+#include "src/strings/string-stream.h"
#include <memory>
-#include "src/handles-inl.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/logging/log.h"
#include "src/objects/js-array-inl.h"
-#include "src/prototype.h"
-#include "src/vector.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/prototype.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -23,19 +23,16 @@ char* HeapStringAllocator::allocate(unsigned bytes) {
return space_;
}
-
char* FixedStringAllocator::allocate(unsigned bytes) {
CHECK_LE(bytes, length_);
return buffer_;
}
-
char* FixedStringAllocator::grow(unsigned* old) {
*old = length_;
return buffer_;
}
-
bool StringStream::Put(char c) {
if (full()) return false;
DCHECK(length_ < capacity_);
@@ -66,20 +63,28 @@ bool StringStream::Put(char c) {
return true;
}
-
// A control character is one that configures a format element. For
// instance, in %.5s, .5 are control characters.
static bool IsControlChar(char c) {
switch (c) {
- case '0': case '1': case '2': case '3': case '4': case '5':
- case '6': case '7': case '8': case '9': case '.': case '-':
- return true;
- default:
- return false;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '.':
+ case '-':
+ return true;
+ default:
+ return false;
}
}
-
void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
// If we already ran out of space then return immediately.
if (full()) return;
@@ -99,78 +104,85 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
temp[format_length++] = format[offset++];
while (offset < format.length() && IsControlChar(format[offset]))
temp[format_length++] = format[offset++];
- if (offset >= format.length())
- return;
+ if (offset >= format.length()) return;
char type = format[offset];
temp[format_length++] = type;
temp[format_length] = '\0';
offset++;
FmtElm current = elms[elm++];
switch (type) {
- case 's': {
- DCHECK_EQ(FmtElm::C_STR, current.type_);
- const char* value = current.data_.u_c_str_;
- Add(value);
- break;
- }
- case 'w': {
- DCHECK_EQ(FmtElm::LC_STR, current.type_);
- Vector<const uc16> value = *current.data_.u_lc_str_;
- for (int i = 0; i < value.length(); i++)
- Put(static_cast<char>(value[i]));
- break;
- }
- case 'o': {
- DCHECK_EQ(FmtElm::OBJ, current.type_);
- Object obj(current.data_.u_obj_);
- PrintObject(obj);
- break;
- }
- case 'k': {
- DCHECK_EQ(FmtElm::INT, current.type_);
- int value = current.data_.u_int_;
- if (0x20 <= value && value <= 0x7F) {
- Put(value);
- } else if (value <= 0xFF) {
- Add("\\x%02x", value);
- } else {
- Add("\\u%04x", value);
+ case 's': {
+ DCHECK_EQ(FmtElm::C_STR, current.type_);
+ const char* value = current.data_.u_c_str_;
+ Add(value);
+ break;
}
- break;
- }
- case 'i': case 'd': case 'u': case 'x': case 'c': case 'X': {
- int value = current.data_.u_int_;
- EmbeddedVector<char, 24> formatted;
- int length = SNPrintF(formatted, temp.start(), value);
- Add(Vector<const char>(formatted.start(), length));
- break;
- }
- case 'f': case 'g': case 'G': case 'e': case 'E': {
- double value = current.data_.u_double_;
- int inf = std::isinf(value);
- if (inf == -1) {
- Add("-inf");
- } else if (inf == 1) {
- Add("inf");
- } else if (std::isnan(value)) {
- Add("nan");
- } else {
- EmbeddedVector<char, 28> formatted;
- SNPrintF(formatted, temp.start(), value);
- Add(formatted.start());
+ case 'w': {
+ DCHECK_EQ(FmtElm::LC_STR, current.type_);
+ Vector<const uc16> value = *current.data_.u_lc_str_;
+ for (int i = 0; i < value.length(); i++)
+ Put(static_cast<char>(value[i]));
+ break;
}
- break;
- }
- case 'p': {
- void* value = current.data_.u_pointer_;
- EmbeddedVector<char, 20> formatted;
- SNPrintF(formatted, temp.start(), value);
- Add(formatted.start());
- break;
- }
- default:
- UNREACHABLE();
- break;
+ case 'o': {
+ DCHECK_EQ(FmtElm::OBJ, current.type_);
+ Object obj(current.data_.u_obj_);
+ PrintObject(obj);
+ break;
+ }
+ case 'k': {
+ DCHECK_EQ(FmtElm::INT, current.type_);
+ int value = current.data_.u_int_;
+ if (0x20 <= value && value <= 0x7F) {
+ Put(value);
+ } else if (value <= 0xFF) {
+ Add("\\x%02x", value);
+ } else {
+ Add("\\u%04x", value);
+ }
+ break;
+ }
+ case 'i':
+ case 'd':
+ case 'u':
+ case 'x':
+ case 'c':
+ case 'X': {
+ int value = current.data_.u_int_;
+ EmbeddedVector<char, 24> formatted;
+ int length = SNPrintF(formatted, temp.begin(), value);
+ Add(Vector<const char>(formatted.begin(), length));
+ break;
+ }
+ case 'f':
+ case 'g':
+ case 'G':
+ case 'e':
+ case 'E': {
+ double value = current.data_.u_double_;
+ int inf = std::isinf(value);
+ if (inf == -1) {
+ Add("-inf");
+ } else if (inf == 1) {
+ Add("inf");
+ } else if (std::isnan(value)) {
+ Add("nan");
+ } else {
+ EmbeddedVector<char, 28> formatted;
+ SNPrintF(formatted, temp.begin(), value);
+ Add(formatted.begin());
+ }
+ break;
+ }
+ case 'p': {
+ void* value = current.data_.u_pointer_;
+ EmbeddedVector<char, 20> formatted;
+ SNPrintF(formatted, temp.begin(), value);
+ Add(formatted.begin());
+ break;
+ }
+ default:
+ UNREACHABLE();
}
}
@@ -179,15 +191,15 @@ void StringStream::Add(Vector<const char> format, Vector<FmtElm> elms) {
}
void StringStream::PrintObject(Object o) {
- o->ShortPrint(this);
- if (o->IsString()) {
- if (String::cast(o)->length() <= String::kMaxShortPrintLength) {
+ o.ShortPrint(this);
+ if (o.IsString()) {
+ if (String::cast(o).length() <= String::kMaxShortPrintLength) {
return;
}
- } else if (o->IsNumber() || o->IsOddball()) {
+ } else if (o.IsNumber() || o.IsOddball()) {
return;
}
- if (o->IsHeapObject() && object_print_mode_ == kPrintObjectVerbose) {
+ if (o.IsHeapObject() && object_print_mode_ == kPrintObjectVerbose) {
// TODO(delphick): Consider whether we can get the isolate without using
// TLS.
Isolate* isolate = Isolate::Current();
@@ -215,12 +227,10 @@ std::unique_ptr<char[]> StringStream::ToCString() const {
return std::unique_ptr<char[]>(str);
}
-
void StringStream::Log(Isolate* isolate) {
LOG(isolate, StringEvent("StackDump", buffer_));
}
-
void StringStream::OutputToFile(FILE* out) {
// Dump the output to stdout, but make sure to break it up into
// manageable chunks to avoid losing parts of the output in the OS
@@ -236,13 +246,12 @@ void StringStream::OutputToFile(FILE* out) {
internal::PrintF(out, "%s", &buffer_[position]);
}
-
Handle<String> StringStream::ToString(Isolate* isolate) {
- return isolate->factory()->NewStringFromUtf8(
- Vector<const char>(buffer_, length_)).ToHandleChecked();
+ return isolate->factory()
+ ->NewStringFromUtf8(Vector<const char>(buffer_, length_))
+ .ToHandleChecked();
}
-
void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
isolate->set_string_stream_current_security_token(Object());
if (isolate->string_stream_debug_object_cache() == nullptr) {
@@ -251,7 +260,6 @@ void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
isolate->string_stream_debug_object_cache()->clear();
}
-
#ifdef DEBUG
bool StringStream::IsMentionedObjectCacheClear(Isolate* isolate) {
return object_print_mode_ == kPrintObjectConcise ||
@@ -259,7 +267,7 @@ bool StringStream::IsMentionedObjectCacheClear(Isolate* isolate) {
}
#endif
-bool StringStream::Put(String str) { return Put(str, 0, str->length()); }
+bool StringStream::Put(String str) { return Put(str, 0, str.length()); }
bool StringStream::Put(String str, int start, int end) {
StringCharacterStream stream(str, start);
@@ -276,9 +284,9 @@ bool StringStream::Put(String str, int start, int end) {
}
void StringStream::PrintName(Object name) {
- if (name->IsString()) {
+ if (name.IsString()) {
String str = String::cast(name);
- if (str->length() > 0) {
+ if (str.length() > 0) {
Put(str);
} else {
Add("/* anonymous */");
@@ -289,33 +297,32 @@ void StringStream::PrintName(Object name) {
}
void StringStream::PrintUsingMap(JSObject js_object) {
- Map map = js_object->map();
- int real_size = map->NumberOfOwnDescriptors();
- DescriptorArray descs = map->instance_descriptors();
+ Map map = js_object.map();
+ int real_size = map.NumberOfOwnDescriptors();
+ DescriptorArray descs = map.instance_descriptors();
for (int i = 0; i < real_size; i++) {
- PropertyDetails details = descs->GetDetails(i);
+ PropertyDetails details = descs.GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
- Object key = descs->GetKey(i);
- if (key->IsString() || key->IsNumber()) {
+ Object key = descs.GetKey(i);
+ if (key.IsString() || key.IsNumber()) {
int len = 3;
- if (key->IsString()) {
- len = String::cast(key)->length();
+ if (key.IsString()) {
+ len = String::cast(key).length();
}
- for (; len < 18; len++)
- Put(' ');
- if (key->IsString()) {
+ for (; len < 18; len++) Put(' ');
+ if (key.IsString()) {
Put(String::cast(key));
} else {
- key->ShortPrint();
+ key.ShortPrint();
}
Add(": ");
FieldIndex index = FieldIndex::ForDescriptor(map, i);
- if (js_object->IsUnboxedDoubleField(index)) {
- double value = js_object->RawFastDoublePropertyAt(index);
+ if (js_object.IsUnboxedDoubleField(index)) {
+ double value = js_object.RawFastDoublePropertyAt(index);
Add("<unboxed double> %.16g\n", FmtElm(value));
} else {
- Object value = js_object->RawFastPropertyAt(index);
+ Object value = js_object.RawFastPropertyAt(index);
Add("%o\n", value);
}
}
@@ -324,14 +331,14 @@ void StringStream::PrintUsingMap(JSObject js_object) {
}
void StringStream::PrintFixedArray(FixedArray array, unsigned int limit) {
- ReadOnlyRoots roots = array->GetReadOnlyRoots();
+ ReadOnlyRoots roots = array.GetReadOnlyRoots();
for (unsigned int i = 0; i < 10 && i < limit; i++) {
- Object element = array->get(i);
- if (element->IsTheHole(roots)) continue;
+ Object element = array.get(i);
+ if (element.IsTheHole(roots)) continue;
for (int len = 1; len < 18; len++) {
Put(' ');
}
- Add("%d: %o\n", i, array->get(i));
+ Add("%d: %o\n", i, array.get(i));
}
if (limit >= 10) {
Add(" ...\n");
@@ -339,9 +346,9 @@ void StringStream::PrintFixedArray(FixedArray array, unsigned int limit) {
}
void StringStream::PrintByteArray(ByteArray byte_array) {
- unsigned int limit = byte_array->length();
+ unsigned int limit = byte_array.length();
for (unsigned int i = 0; i < 10 && i < limit; i++) {
- byte b = byte_array->get(i);
+ byte b = byte_array.get(i);
Add(" %d: %3d 0x%02x", i, b, b);
if (b >= ' ' && b <= '~') {
Add(" '%c'", b);
@@ -367,36 +374,36 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
for (size_t i = 0; i < debug_object_cache->size(); i++) {
HeapObject printee = *(*debug_object_cache)[i];
Add(" #%d# %p: ", static_cast<int>(i),
- reinterpret_cast<void*>(printee->ptr()));
- printee->ShortPrint(this);
+ reinterpret_cast<void*>(printee.ptr()));
+ printee.ShortPrint(this);
Add("\n");
- if (printee->IsJSObject()) {
- if (printee->IsJSValue()) {
- Add(" value(): %o\n", JSValue::cast(printee)->value());
+ if (printee.IsJSObject()) {
+ if (printee.IsJSValue()) {
+ Add(" value(): %o\n", JSValue::cast(printee).value());
}
PrintUsingMap(JSObject::cast(printee));
- if (printee->IsJSArray()) {
+ if (printee.IsJSArray()) {
JSArray array = JSArray::cast(printee);
- if (array->HasObjectElements()) {
- unsigned int limit = FixedArray::cast(array->elements())->length();
+ if (array.HasObjectElements()) {
+ unsigned int limit = FixedArray::cast(array.elements()).length();
unsigned int length =
- static_cast<uint32_t>(JSArray::cast(array)->length()->Number());
+ static_cast<uint32_t>(JSArray::cast(array).length().Number());
if (length < limit) limit = length;
- PrintFixedArray(FixedArray::cast(array->elements()), limit);
+ PrintFixedArray(FixedArray::cast(array.elements()), limit);
}
}
- } else if (printee->IsByteArray()) {
+ } else if (printee.IsByteArray()) {
PrintByteArray(ByteArray::cast(printee));
- } else if (printee->IsFixedArray()) {
- unsigned int limit = FixedArray::cast(printee)->length();
+ } else if (printee.IsFixedArray()) {
+ unsigned int limit = FixedArray::cast(printee).length();
PrintFixedArray(FixedArray::cast(printee), limit);
}
}
}
void StringStream::PrintSecurityTokenIfChanged(JSFunction fun) {
- Object token = fun->native_context()->security_token();
- Isolate* isolate = fun->GetIsolate();
+ Object token = fun.native_context().security_token();
+ Isolate* isolate = fun.GetIsolate();
if (token != isolate->string_stream_current_security_token()) {
Add("Security context: %o\n", token);
isolate->set_string_stream_current_security_token(token);
@@ -405,33 +412,32 @@ void StringStream::PrintSecurityTokenIfChanged(JSFunction fun) {
void StringStream::PrintFunction(JSFunction fun, Object receiver, Code* code) {
PrintPrototype(fun, receiver);
- *code = fun->code();
+ *code = fun.code();
}
void StringStream::PrintPrototype(JSFunction fun, Object receiver) {
- Object name = fun->shared()->Name();
+ Object name = fun.shared().Name();
bool print_name = false;
- Isolate* isolate = fun->GetIsolate();
- if (receiver->IsNullOrUndefined(isolate) || receiver->IsTheHole(isolate) ||
- receiver->IsJSProxy()) {
+ Isolate* isolate = fun.GetIsolate();
+ if (receiver.IsNullOrUndefined(isolate) || receiver.IsTheHole(isolate) ||
+ receiver.IsJSProxy()) {
print_name = true;
} else if (!isolate->context().is_null()) {
- if (!receiver->IsJSObject()) {
- receiver = receiver->GetPrototypeChainRootMap(isolate)->prototype();
+ if (!receiver.IsJSObject()) {
+ receiver = receiver.GetPrototypeChainRootMap(isolate).prototype();
}
for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent()->IsJSProxy()) break;
- Object key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
- if (!key->IsUndefined(isolate)) {
- if (!name->IsString() ||
- !key->IsString() ||
- !String::cast(name)->Equals(String::cast(key))) {
+ if (iter.GetCurrent().IsJSProxy()) break;
+ Object key = iter.GetCurrent<JSObject>().SlowReverseLookup(fun);
+ if (!key.IsUndefined(isolate)) {
+ if (!name.IsString() || !key.IsString() ||
+ !String::cast(name).Equals(String::cast(key))) {
print_name = true;
}
- if (name->IsString() && String::cast(name)->length() == 0) {
+ if (name.IsString() && String::cast(name).length() == 0) {
print_name = false;
}
name = key;
@@ -444,7 +450,7 @@ void StringStream::PrintPrototype(JSFunction fun, Object receiver) {
// which it was looked up.
if (print_name) {
Add("(aka ");
- PrintName(fun->shared()->Name());
+ PrintName(fun.shared().Name());
Put(')');
}
}
@@ -466,6 +472,5 @@ char* HeapStringAllocator::grow(unsigned* bytes) {
return new_space;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/strings/string-stream.h
index ac856c7809..d7b616c6ff 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/strings/string-stream.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_STRING_STREAM_H_
-#define V8_STRING_STREAM_H_
+#ifndef V8_STRINGS_STRING_STREAM_H_
+#define V8_STRINGS_STRING_STREAM_H_
-#include "src/allocation.h"
#include "src/base/small-vector.h"
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/objects/heap-object.h"
-#include "src/vector.h"
+#include "src/utils/allocation.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -29,7 +29,6 @@ class StringAllocator {
virtual char* grow(unsigned* bytes) = 0;
};
-
// Normal allocator uses new[] and delete[].
class HeapStringAllocator final : public StringAllocator {
public:
@@ -41,7 +40,6 @@ class HeapStringAllocator final : public StringAllocator {
char* space_;
};
-
class FixedStringAllocator final : public StringAllocator {
public:
FixedStringAllocator(char* buffer, unsigned length)
@@ -60,7 +58,7 @@ class FixedStringAllocator final : public StringAllocator {
template <std::size_t kInlineSize>
class SmallStringOptimizedAllocator final : public StringAllocator {
public:
- typedef base::SmallVector<char, kInlineSize> SmallVector;
+ using SmallVector = base::SmallVector<char, kInlineSize>;
explicit SmallStringOptimizedAllocator(SmallVector* vector) V8_NOEXCEPT
: vector_(vector) {}
@@ -213,4 +211,4 @@ class StringStream final {
} // namespace internal
} // namespace v8
-#endif // V8_STRING_STREAM_H_
+#endif // V8_STRINGS_STRING_STREAM_H_
diff --git a/deps/v8/src/strings/unicode-decoder.cc b/deps/v8/src/strings/unicode-decoder.cc
new file mode 100644
index 0000000000..8ee66ec251
--- /dev/null
+++ b/deps/v8/src/strings/unicode-decoder.cc
@@ -0,0 +1,81 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/strings/unicode-decoder.h"
+
+#include "src/strings/unicode-inl.h"
+#include "src/utils/memcopy.h"
+
+namespace v8 {
+namespace internal {
+
+Utf8Decoder::Utf8Decoder(const Vector<const uint8_t>& chars)
+ : encoding_(Encoding::kAscii),
+ non_ascii_start_(NonAsciiStart(chars.begin(), chars.length())),
+ utf16_length_(non_ascii_start_) {
+ if (non_ascii_start_ == chars.length()) return;
+
+ const uint8_t* cursor = chars.begin() + non_ascii_start_;
+ const uint8_t* end = chars.begin() + chars.length();
+
+ bool is_one_byte = true;
+ uint32_t incomplete_char = 0;
+ unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
+
+ while (cursor < end) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
+ if (t != unibrow::Utf8::kIncomplete) {
+ is_one_byte = is_one_byte && t <= unibrow::Latin1::kMaxChar;
+ utf16_length_++;
+ if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) utf16_length_++;
+ }
+ }
+
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncrementalFinish(&state);
+ if (t != unibrow::Utf8::kBufferEmpty) {
+ is_one_byte = false;
+ utf16_length_++;
+ }
+
+ encoding_ = is_one_byte ? Encoding::kLatin1 : Encoding::kUtf16;
+}
+
+template <typename Char>
+void Utf8Decoder::Decode(Char* out, const Vector<const uint8_t>& data) {
+ CopyChars(out, data.begin(), non_ascii_start_);
+
+ out += non_ascii_start_;
+
+ uint32_t incomplete_char = 0;
+ unibrow::Utf8::State state = unibrow::Utf8::State::kAccept;
+
+ const uint8_t* cursor = data.begin() + non_ascii_start_;
+ const uint8_t* end = data.begin() + data.length();
+
+ while (cursor < end) {
+ unibrow::uchar t =
+ unibrow::Utf8::ValueOfIncremental(&cursor, &state, &incomplete_char);
+ if (t != unibrow::Utf8::kIncomplete) {
+ if (sizeof(Char) == 1 || t <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ *(out++) = static_cast<Char>(t);
+ } else {
+ *(out++) = unibrow::Utf16::LeadSurrogate(t);
+ *(out++) = unibrow::Utf16::TrailSurrogate(t);
+ }
+ }
+ }
+
+ unibrow::uchar t = unibrow::Utf8::ValueOfIncrementalFinish(&state);
+ if (t != unibrow::Utf8::kBufferEmpty) *out = static_cast<Char>(t);
+}
+
+template void Utf8Decoder::Decode(uint8_t* out,
+ const Vector<const uint8_t>& data);
+
+template void Utf8Decoder::Decode(uint16_t* out,
+ const Vector<const uint8_t>& data);
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/strings/unicode-decoder.h b/deps/v8/src/strings/unicode-decoder.h
new file mode 100644
index 0000000000..e35d176770
--- /dev/null
+++ b/deps/v8/src/strings/unicode-decoder.h
@@ -0,0 +1,74 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRINGS_UNICODE_DECODER_H_
+#define V8_STRINGS_UNICODE_DECODER_H_
+
+#include "src/strings/unicode.h"
+#include "src/utils/vector.h"
+
+namespace v8 {
+namespace internal {
+
+// The return value may point to the first aligned word containing the first
+// non-one-byte character, rather than directly to the non-one-byte character.
+// If the return value is >= the passed length, the entire string was
+// one-byte.
+inline int NonAsciiStart(const uint8_t* chars, int length) {
+ const uint8_t* start = chars;
+ const uint8_t* limit = chars + length;
+
+ if (static_cast<size_t>(length) >= kIntptrSize) {
+ // Check unaligned bytes.
+ while (!IsAligned(reinterpret_cast<intptr_t>(chars), kIntptrSize)) {
+ if (*chars > unibrow::Utf8::kMaxOneByteChar) {
+ return static_cast<int>(chars - start);
+ }
+ ++chars;
+ }
+ // Check aligned words.
+ DCHECK_EQ(unibrow::Utf8::kMaxOneByteChar, 0x7F);
+ const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
+ while (chars + sizeof(uintptr_t) <= limit) {
+ if (*reinterpret_cast<const uintptr_t*>(chars) & non_one_byte_mask) {
+ return static_cast<int>(chars - start);
+ }
+ chars += sizeof(uintptr_t);
+ }
+ }
+ // Check remaining unaligned bytes.
+ while (chars < limit) {
+ if (*chars > unibrow::Utf8::kMaxOneByteChar) {
+ return static_cast<int>(chars - start);
+ }
+ ++chars;
+ }
+
+ return static_cast<int>(chars - start);
+}
+
+class V8_EXPORT_PRIVATE Utf8Decoder final {
+ public:
+ enum class Encoding : uint8_t { kAscii, kLatin1, kUtf16 };
+
+ explicit Utf8Decoder(const Vector<const uint8_t>& chars);
+
+ bool is_ascii() const { return encoding_ == Encoding::kAscii; }
+ bool is_one_byte() const { return encoding_ <= Encoding::kLatin1; }
+ int utf16_length() const { return utf16_length_; }
+ int non_ascii_start() const { return non_ascii_start_; }
+
+ template <typename Char>
+ V8_EXPORT_PRIVATE void Decode(Char* out, const Vector<const uint8_t>& data);
+
+ private:
+ Encoding encoding_;
+ int non_ascii_start_;
+ int utf16_length_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_STRINGS_UNICODE_DECODER_H_
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/strings/unicode-inl.h
index 21292ca59c..6f730b26be 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/strings/unicode-inl.h
@@ -2,31 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UNICODE_INL_H_
-#define V8_UNICODE_INL_H_
+#ifndef V8_STRINGS_UNICODE_INL_H_
+#define V8_STRINGS_UNICODE_INL_H_
-#include "src/unicode.h"
#include "src/base/logging.h"
-#include "src/utils.h"
+#include "src/strings/unicode.h"
+#include "src/utils/utils.h"
namespace unibrow {
#ifndef V8_INTL_SUPPORT
-template <class T, int s> bool Predicate<T, s>::get(uchar code_point) {
+template <class T, int s>
+bool Predicate<T, s>::get(uchar code_point) {
CacheEntry entry = entries_[code_point & kMask];
if (entry.code_point() == code_point) return entry.value();
return CalculateValue(code_point);
}
-template <class T, int s> bool Predicate<T, s>::CalculateValue(
- uchar code_point) {
+template <class T, int s>
+bool Predicate<T, s>::CalculateValue(uchar code_point) {
bool result = T::Is(code_point);
entries_[code_point & kMask] = CacheEntry(code_point, result);
return result;
}
-template <class T, int s> int Mapping<T, s>::get(uchar c, uchar n,
- uchar* result) {
+template <class T, int s>
+int Mapping<T, s>::get(uchar c, uchar n, uchar* result) {
CacheEntry entry = entries_[c & kMask];
if (entry.code_point_ == c) {
if (entry.offset_ == 0) {
@@ -40,8 +41,8 @@ template <class T, int s> int Mapping<T, s>::get(uchar c, uchar n,
}
}
-template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n,
- uchar* result) {
+template <class T, int s>
+int Mapping<T, s>::CalculateValue(uchar c, uchar n, uchar* result) {
bool allow_caching = true;
int length = T::Convert(c, n, result, &allow_caching);
if (allow_caching) {
@@ -121,10 +122,7 @@ unsigned Utf8::EncodeOneByte(char* str, uint8_t c) {
// buffer, and combines surrogate code units into single code points. If
// replace_invalid is set to true, orphan surrogate code units will be replaced
// with kBadChar.
-unsigned Utf8::Encode(char* str,
- uchar c,
- int previous,
- bool replace_invalid) {
+unsigned Utf8::Encode(char* str, uchar c, int previous, bool replace_invalid) {
static const int kMask = ~(1 << 6);
if (c <= kMaxOneByteChar) {
str[0] = c;
@@ -139,11 +137,10 @@ unsigned Utf8::Encode(char* str,
const int kUnmatchedSize = kSizeOfUnmatchedSurrogate;
return Encode(str - kUnmatchedSize,
Utf16::CombineSurrogatePair(previous, c),
- Utf16::kNoPreviousCharacter,
- replace_invalid) - kUnmatchedSize;
+ Utf16::kNoPreviousCharacter, replace_invalid) -
+ kUnmatchedSize;
} else if (replace_invalid &&
- (Utf16::IsLeadSurrogate(c) ||
- Utf16::IsTrailSurrogate(c))) {
+ (Utf16::IsLeadSurrogate(c) || Utf16::IsTrailSurrogate(c))) {
c = kBadChar;
}
str[0] = 0xE0 | (c >> 12);
@@ -159,7 +156,6 @@ unsigned Utf8::Encode(char* str,
}
}
-
uchar Utf8::ValueOf(const byte* bytes, size_t length, size_t* cursor) {
if (length <= 0) return kBadChar;
byte first = bytes[0];
@@ -195,4 +191,4 @@ bool Utf8::IsValidCharacter(uchar c) {
} // namespace unibrow
-#endif // V8_UNICODE_INL_H_
+#endif // V8_STRINGS_UNICODE_INL_H_
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/strings/unicode.cc
index 70a084bc22..21faccd0b4 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/strings/unicode.cc
@@ -4,10 +4,10 @@
//
// This file was generated at 2014-10-08 15:25:47.940335
-#include "src/unicode.h"
-#include "src/unicode-inl.h"
+#include "src/strings/unicode.h"
#include <stdio.h>
#include <stdlib.h>
+#include "src/strings/unicode-inl.h"
#ifdef V8_INTL_SUPPORT
#include "unicode/uchar.h"
@@ -27,9 +27,9 @@ static const uchar kSentinel = static_cast<uchar>(-1);
* Implementations of functions for working with Unicode.
*/
-typedef signed short int16_t; // NOLINT
-typedef unsigned short uint16_t; // NOLINT
-typedef int int32_t; // NOLINT
+using int16_t = signed short; // NOLINT
+using uint16_t = unsigned short; // NOLINT
+using int32_t = int; // NOLINT
#ifndef V8_INTL_SUPPORT
// All access to the character table should go through this function.
@@ -38,14 +38,9 @@ static inline uchar TableGet(const int32_t* table, int index) {
return table[D * index];
}
+static inline uchar GetEntry(int32_t entry) { return entry & (kStartBit - 1); }
-static inline uchar GetEntry(int32_t entry) {
- return entry & (kStartBit - 1);
-}
-
-static inline bool IsStart(int32_t entry) {
- return (entry & kStartBit) != 0;
-}
+static inline bool IsStart(int32_t entry) { return (entry & kStartBit) != 0; }
/**
* Look up a character in the Unicode table using a mix of binary and
@@ -107,12 +102,9 @@ struct MultiCharacterSpecialCase {
// offset by the distance between the match and the start. Otherwise
// the result is the same as for the start point on the entire range.
template <bool ranges_are_linear, int kW>
-static int LookupMapping(const int32_t* table,
- uint16_t size,
+static int LookupMapping(const int32_t* table, uint16_t size,
const MultiCharacterSpecialCase<kW>* multi_chars,
- uchar chr,
- uchar next,
- uchar* result,
+ uchar chr, uchar next, uchar* result,
bool* allow_caching_ptr) {
static const int kEntryDist = 2;
uint16_t key = chr & (kChunkBits - 1);
@@ -362,17 +354,27 @@ static const int32_t kUppercaseTable0[455] = {
8172, 1073750008, 8187}; // NOLINT
static const uint16_t kUppercaseTable1Size = 86;
static const int32_t kUppercaseTable1[86] = {
- 258, 263, 1073742091, 269, 1073742096, 274, 277, 1073742105, // NOLINT
- 285, 292, 294, 296, 1073742122, 301, 1073742128, 307, // NOLINT
- 1073742142, 319, 325, 387, 1073744896, 3118, 3168, 1073744994, // NOLINT
- 3172, 3175, 3177, 3179, 1073745005, 3184, 3186, 3189, // NOLINT
- 1073745022, 3200, 3202, 3204, 3206, 3208, 3210, 3212, // NOLINT
- 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, // NOLINT
- 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, // NOLINT
- 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, // NOLINT
- 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, // NOLINT
- 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, // NOLINT
- 3294, 3296, 3298, 3307, 3309, 3314 }; // NOLINT
+ 258, 263, 1073742091, 269, 1073742096, 274,
+ 277, 1073742105, // NOLINT
+ 285, 292, 294, 296, 1073742122, 301,
+ 1073742128, 307, // NOLINT
+ 1073742142, 319, 325, 387, 1073744896, 3118,
+ 3168, 1073744994, // NOLINT
+ 3172, 3175, 3177, 3179, 1073745005, 3184,
+ 3186, 3189, // NOLINT
+ 1073745022, 3200, 3202, 3204, 3206, 3208,
+ 3210, 3212, // NOLINT
+ 3214, 3216, 3218, 3220, 3222, 3224,
+ 3226, 3228, // NOLINT
+ 3230, 3232, 3234, 3236, 3238, 3240,
+ 3242, 3244, // NOLINT
+ 3246, 3248, 3250, 3252, 3254, 3256,
+ 3258, 3260, // NOLINT
+ 3262, 3264, 3266, 3268, 3270, 3272,
+ 3274, 3276, // NOLINT
+ 3278, 3280, 3282, 3284, 3286, 3288,
+ 3290, 3292, // NOLINT
+ 3294, 3296, 3298, 3307, 3309, 3314}; // NOLINT
static const uint16_t kUppercaseTable5Size = 101;
static const int32_t kUppercaseTable5[101] = {
1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, // NOLINT
@@ -389,24 +391,20 @@ static const int32_t kUppercaseTable5[101] = {
1944, 1946, 1948, 1950, 1952, 1954, 1956, 1958, // NOLINT
1960, 1073743786, 1965, 1073743792, 1969}; // NOLINT
static const uint16_t kUppercaseTable7Size = 2;
-static const int32_t kUppercaseTable7[2] = {
- 1073749793, 7994 }; // NOLINT
+static const int32_t kUppercaseTable7[2] = {1073749793, 7994}; // NOLINT
bool Uppercase::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
- case 0: return LookupPredicate(kUppercaseTable0,
- kUppercaseTable0Size,
- c);
- case 1: return LookupPredicate(kUppercaseTable1,
- kUppercaseTable1Size,
- c);
- case 5: return LookupPredicate(kUppercaseTable5,
- kUppercaseTable5Size,
- c);
- case 7: return LookupPredicate(kUppercaseTable7,
- kUppercaseTable7Size,
- c);
- default: return false;
+ case 0:
+ return LookupPredicate(kUppercaseTable0, kUppercaseTable0Size, c);
+ case 1:
+ return LookupPredicate(kUppercaseTable1, kUppercaseTable1Size, c);
+ case 5:
+ return LookupPredicate(kUppercaseTable5, kUppercaseTable5Size, c);
+ case 7:
+ return LookupPredicate(kUppercaseTable7, kUppercaseTable7Size, c);
+ default:
+ return false;
}
}
#endif // V8_INTL_SUPPORT
@@ -527,26 +525,35 @@ static const int32_t kLetterTable0[431] = {
8180, 1073750006, 8188}; // NOLINT
static const uint16_t kLetterTable1Size = 87;
static const int32_t kLetterTable1[87] = {
- 113, 127, 1073741968, 156, 258, 263, 1073742090, 275, // NOLINT
- 277, 1073742105, 285, 292, 294, 296, 1073742122, 301, // NOLINT
- 1073742127, 313, 1073742140, 319, 1073742149, 329, 334, 1073742176, // NOLINT
- 392, 1073744896, 3118, 1073744944, 3166, 1073744992, 3300, 1073745131, // NOLINT
- 3310, 1073745138, 3315, 1073745152, 3365, 3367, 3373, 1073745200, // NOLINT
- 3431, 3439, 1073745280, 3478, 1073745312, 3494, 1073745320, 3502, // NOLINT
- 1073745328, 3510, 1073745336, 3518, 1073745344, 3526, 1073745352, 3534, // NOLINT
- 1073745360, 3542, 1073745368, 3550, 3631, 1073745925, 4103, 1073745953, // NOLINT
- 4137, 1073745969, 4149, 1073745976, 4156, 1073745985, 4246, 1073746077, // NOLINT
- 4255, 1073746081, 4346, 1073746172, 4351, 1073746181, 4397, 1073746225, // NOLINT
- 4494, 1073746336, 4538, 1073746416, 4607, 1073746944, 8191 }; // NOLINT
+ 113, 127, 1073741968, 156,
+ 258, 263, 1073742090, 275, // NOLINT
+ 277, 1073742105, 285, 292,
+ 294, 296, 1073742122, 301, // NOLINT
+ 1073742127, 313, 1073742140, 319,
+ 1073742149, 329, 334, 1073742176, // NOLINT
+ 392, 1073744896, 3118, 1073744944,
+ 3166, 1073744992, 3300, 1073745131, // NOLINT
+ 3310, 1073745138, 3315, 1073745152,
+ 3365, 3367, 3373, 1073745200, // NOLINT
+ 3431, 3439, 1073745280, 3478,
+ 1073745312, 3494, 1073745320, 3502, // NOLINT
+ 1073745328, 3510, 1073745336, 3518,
+ 1073745344, 3526, 1073745352, 3534, // NOLINT
+ 1073745360, 3542, 1073745368, 3550,
+ 3631, 1073745925, 4103, 1073745953, // NOLINT
+ 4137, 1073745969, 4149, 1073745976,
+ 4156, 1073745985, 4246, 1073746077, // NOLINT
+ 4255, 1073746081, 4346, 1073746172,
+ 4351, 1073746181, 4397, 1073746225, // NOLINT
+ 4494, 1073746336, 4538, 1073746416,
+ 4607, 1073746944, 8191}; // NOLINT
static const uint16_t kLetterTable2Size = 4;
-static const int32_t kLetterTable2[4] = {
- 1073741824, 3509, 1073745408, 8191 }; // NOLINT
+static const int32_t kLetterTable2[4] = {1073741824, 3509, 1073745408,
+ 8191}; // NOLINT
static const uint16_t kLetterTable3Size = 2;
-static const int32_t kLetterTable3[2] = {
- 1073741824, 8191 }; // NOLINT
+static const int32_t kLetterTable3[2] = {1073741824, 8191}; // NOLINT
static const uint16_t kLetterTable4Size = 2;
-static const int32_t kLetterTable4[2] = {
- 1073741824, 8140 }; // NOLINT
+static const int32_t kLetterTable4[2] = {1073741824, 8140}; // NOLINT
static const uint16_t kLetterTable5Size = 100;
static const int32_t kLetterTable5[100] = {
1073741824, 1164, 1073743056, 1277,
@@ -575,44 +582,43 @@ static const int32_t kLetterTable5[100] = {
1073744732, 2911, 1073744740, 2917, // NOLINT
1073744832, 3042, 1073744896, 8191}; // NOLINT
static const uint16_t kLetterTable6Size = 6;
-static const int32_t kLetterTable6[6] = {
- 1073741824, 6051, 1073747888, 6086, 1073747915, 6139 }; // NOLINT
+static const int32_t kLetterTable6[6] = {1073741824, 6051, 1073747888, 6086,
+ 1073747915, 6139}; // NOLINT
static const uint16_t kLetterTable7Size = 48;
static const int32_t kLetterTable7[48] = {
- 1073748224, 6765, 1073748592, 6873, 1073748736, 6918, 1073748755, 6935, // NOLINT
- 6941, 1073748767, 6952, 1073748778, 6966, 1073748792, 6972, 6974, // NOLINT
- 1073748800, 6977, 1073748803, 6980, 1073748806, 7089, 1073748947, 7485, // NOLINT
- 1073749328, 7567, 1073749394, 7623, 1073749488, 7675, 1073749616, 7796, // NOLINT
- 1073749622, 7932, 1073749793, 7994, 1073749825, 8026, 1073749862, 8126, // NOLINT
- 1073749954, 8135, 1073749962, 8143, 1073749970, 8151, 1073749978, 8156 }; // NOLINT
+ 1073748224, 6765, 1073748592, 6873,
+ 1073748736, 6918, 1073748755, 6935, // NOLINT
+ 6941, 1073748767, 6952, 1073748778,
+ 6966, 1073748792, 6972, 6974, // NOLINT
+ 1073748800, 6977, 1073748803, 6980,
+ 1073748806, 7089, 1073748947, 7485, // NOLINT
+ 1073749328, 7567, 1073749394, 7623,
+ 1073749488, 7675, 1073749616, 7796, // NOLINT
+ 1073749622, 7932, 1073749793, 7994,
+ 1073749825, 8026, 1073749862, 8126, // NOLINT
+ 1073749954, 8135, 1073749962, 8143,
+ 1073749970, 8151, 1073749978, 8156}; // NOLINT
bool Letter::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
- case 0: return LookupPredicate(kLetterTable0,
- kLetterTable0Size,
- c);
- case 1: return LookupPredicate(kLetterTable1,
- kLetterTable1Size,
- c);
- case 2: return LookupPredicate(kLetterTable2,
- kLetterTable2Size,
- c);
- case 3: return LookupPredicate(kLetterTable3,
- kLetterTable3Size,
- c);
- case 4: return LookupPredicate(kLetterTable4,
- kLetterTable4Size,
- c);
- case 5: return LookupPredicate(kLetterTable5,
- kLetterTable5Size,
- c);
- case 6: return LookupPredicate(kLetterTable6,
- kLetterTable6Size,
- c);
- case 7: return LookupPredicate(kLetterTable7,
- kLetterTable7Size,
- c);
- default: return false;
+ case 0:
+ return LookupPredicate(kLetterTable0, kLetterTable0Size, c);
+ case 1:
+ return LookupPredicate(kLetterTable1, kLetterTable1Size, c);
+ case 2:
+ return LookupPredicate(kLetterTable2, kLetterTable2Size, c);
+ case 3:
+ return LookupPredicate(kLetterTable3, kLetterTable3Size, c);
+ case 4:
+ return LookupPredicate(kLetterTable4, kLetterTable4Size, c);
+ case 5:
+ return LookupPredicate(kLetterTable5, kLetterTable5Size, c);
+ case 6:
+ return LookupPredicate(kLetterTable6, kLetterTable6Size, c);
+ case 7:
+ return LookupPredicate(kLetterTable7, kLetterTable7Size, c);
+ default:
+ return false;
}
}
#endif
@@ -832,7 +838,6 @@ bool ID_Start::Is(uchar c) {
}
}
-
// ID_Continue: point.category in ['Nd', 'Mn', 'Mc', 'Pc'] or
// 'Other_ID_Continue' in point.properties or 'JS_ID_Continue' in
// point.properties
@@ -959,7 +964,8 @@ bool ID_Continue::Is(uchar c) {
return LookupPredicate(kID_ContinueTable5, kID_ContinueTable5Size, c);
case 7:
return LookupPredicate(kID_ContinueTable7, kID_ContinueTable7Size, c);
- default: return false;
+ default:
+ return false;
}
}
@@ -970,29 +976,30 @@ static const uint16_t kWhiteSpaceTable0Size = 6;
static const int32_t kWhiteSpaceTable0[6] = {9, 1073741835, 12,
32, 160, 5760}; // NOLINT
static const uint16_t kWhiteSpaceTable1Size = 5;
-static const int32_t kWhiteSpaceTable1[5] = {
- 1073741824, 10, 47, 95, 4096 }; // NOLINT
+static const int32_t kWhiteSpaceTable1[5] = {1073741824, 10, 47, 95,
+ 4096}; // NOLINT
static const uint16_t kWhiteSpaceTable7Size = 1;
static const int32_t kWhiteSpaceTable7[1] = {7935}; // NOLINT
bool WhiteSpace::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
- case 0: return LookupPredicate(kWhiteSpaceTable0,
- kWhiteSpaceTable0Size,
- c);
- case 1: return LookupPredicate(kWhiteSpaceTable1,
- kWhiteSpaceTable1Size,
- c);
+ case 0:
+ return LookupPredicate(kWhiteSpaceTable0, kWhiteSpaceTable0Size, c);
+ case 1:
+ return LookupPredicate(kWhiteSpaceTable1, kWhiteSpaceTable1Size, c);
case 7:
return LookupPredicate(kWhiteSpaceTable7, kWhiteSpaceTable7Size, c);
- default: return false;
+ default:
+ return false;
}
}
#endif // !V8_INTL_SUPPORT
#ifndef V8_INTL_SUPPORT
-static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] = { // NOLINT
- {{105, 775}}, {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] =
+ { // NOLINT
+ {{105, 775}},
+ {{kSentinel}}}; // NOLINT
static const uint16_t kToLowercaseTable0Size = 488; // NOLINT
static const int32_t kToLowercaseTable0[976] = {
1073741889, 128, 90, 128, 1073742016, 128,
@@ -1179,23 +1186,44 @@ static const int32_t kToLowercaseTable0[976] = {
1073750008, -512, 8185, -512, 1073750010, -504,
8187, -504, 8188, -36}; // NOLINT
static const uint16_t kToLowercaseMultiStrings0Size = 2; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings1[1] =
+ { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kToLowercaseTable1Size = 79; // NOLINT
static const int32_t kToLowercaseTable1[158] = {
- 294, -30068, 298, -33532, 299, -33048, 306, 112, 1073742176, 64, 367, 64, 387, 4, 1073743030, 104, // NOLINT
- 1231, 104, 1073744896, 192, 3118, 192, 3168, 4, 3170, -42972, 3171, -15256, 3172, -42908, 3175, 4, // NOLINT
- 3177, 4, 3179, 4, 3181, -43120, 3182, -42996, 3183, -43132, 3184, -43128, 3186, 4, 3189, 4, // NOLINT
- 1073745022, -43260, 3199, -43260, 3200, 4, 3202, 4, 3204, 4, 3206, 4, 3208, 4, 3210, 4, // NOLINT
- 3212, 4, 3214, 4, 3216, 4, 3218, 4, 3220, 4, 3222, 4, 3224, 4, 3226, 4, // NOLINT
- 3228, 4, 3230, 4, 3232, 4, 3234, 4, 3236, 4, 3238, 4, 3240, 4, 3242, 4, // NOLINT
- 3244, 4, 3246, 4, 3248, 4, 3250, 4, 3252, 4, 3254, 4, 3256, 4, 3258, 4, // NOLINT
- 3260, 4, 3262, 4, 3264, 4, 3266, 4, 3268, 4, 3270, 4, 3272, 4, 3274, 4, // NOLINT
- 3276, 4, 3278, 4, 3280, 4, 3282, 4, 3284, 4, 3286, 4, 3288, 4, 3290, 4, // NOLINT
- 3292, 4, 3294, 4, 3296, 4, 3298, 4, 3307, 4, 3309, 4, 3314, 4 }; // NOLINT
-static const uint16_t kToLowercaseMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings5[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+ 294, -30068, 298, -33532, 299, -33048, 306,
+ 112, 1073742176, 64, 367, 64, 387, 4,
+ 1073743030, 104, // NOLINT
+ 1231, 104, 1073744896, 192, 3118, 192, 3168,
+ 4, 3170, -42972, 3171, -15256, 3172, -42908,
+ 3175, 4, // NOLINT
+ 3177, 4, 3179, 4, 3181, -43120, 3182,
+ -42996, 3183, -43132, 3184, -43128, 3186, 4,
+ 3189, 4, // NOLINT
+ 1073745022, -43260, 3199, -43260, 3200, 4, 3202,
+ 4, 3204, 4, 3206, 4, 3208, 4,
+ 3210, 4, // NOLINT
+ 3212, 4, 3214, 4, 3216, 4, 3218,
+ 4, 3220, 4, 3222, 4, 3224, 4,
+ 3226, 4, // NOLINT
+ 3228, 4, 3230, 4, 3232, 4, 3234,
+ 4, 3236, 4, 3238, 4, 3240, 4,
+ 3242, 4, // NOLINT
+ 3244, 4, 3246, 4, 3248, 4, 3250,
+ 4, 3252, 4, 3254, 4, 3256, 4,
+ 3258, 4, // NOLINT
+ 3260, 4, 3262, 4, 3264, 4, 3266,
+ 4, 3268, 4, 3270, 4, 3272, 4,
+ 3274, 4, // NOLINT
+ 3276, 4, 3278, 4, 3280, 4, 3282,
+ 4, 3284, 4, 3286, 4, 3288, 4,
+ 3290, 4, // NOLINT
+ 3292, 4, 3294, 4, 3296, 4, 3298,
+ 4, 3307, 4, 3309, 4, 3314, 4}; // NOLINT
+static const uint16_t kToLowercaseMultiStrings1Size = 1; // NOLINT
+static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings5[1] =
+ { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kToLowercaseTable5Size = 103; // NOLINT
static const int32_t kToLowercaseTable5[206] = {
1600, 4, 1602, 4, 1604, 4, 1606, 4,
@@ -1225,68 +1253,72 @@ static const int32_t kToLowercaseTable5[206] = {
1960, 4, 1962, -169232, 1963, -169276, 1964, -169260,
1965, -169220, 1968, -169032, 1969, -169128}; // NOLINT
static const uint16_t kToLowercaseMultiStrings5Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings7[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings7[1] =
+ { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kToLowercaseTable7Size = 2; // NOLINT
-static const int32_t kToLowercaseTable7[4] = {
- 1073749793, 128, 7994, 128 }; // NOLINT
+static const int32_t kToLowercaseTable7[4] = {1073749793, 128, 7994,
+ 128}; // NOLINT
static const uint16_t kToLowercaseMultiStrings7Size = 1; // NOLINT
-int ToLowercase::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
+int ToLowercase::Convert(uchar c, uchar n, uchar* result,
+ bool* allow_caching_ptr) {
int chunk_index = c >> 13;
switch (chunk_index) {
- case 0: return LookupMapping<true>(kToLowercaseTable0,
- kToLowercaseTable0Size,
- kToLowercaseMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kToLowercaseTable1,
- kToLowercaseTable1Size,
- kToLowercaseMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 5: return LookupMapping<true>(kToLowercaseTable5,
- kToLowercaseTable5Size,
- kToLowercaseMultiStrings5,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kToLowercaseTable7,
- kToLowercaseTable7Size,
- kToLowercaseMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
+ case 0:
+ return LookupMapping<true>(kToLowercaseTable0, kToLowercaseTable0Size,
+ kToLowercaseMultiStrings0, c, n, result,
+ allow_caching_ptr);
+ case 1:
+ return LookupMapping<true>(kToLowercaseTable1, kToLowercaseTable1Size,
+ kToLowercaseMultiStrings1, c, n, result,
+ allow_caching_ptr);
+ case 5:
+ return LookupMapping<true>(kToLowercaseTable5, kToLowercaseTable5Size,
+ kToLowercaseMultiStrings5, c, n, result,
+ allow_caching_ptr);
+ case 7:
+ return LookupMapping<true>(kToLowercaseTable7, kToLowercaseTable7Size,
+ kToLowercaseMultiStrings7, c, n, result,
+ allow_caching_ptr);
+ default:
+ return 0;
}
}
-static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings0[62] = { // NOLINT
- {{83, 83, kSentinel}}, {{700, 78, kSentinel}}, {{74, 780, kSentinel}}, {{921, 776, 769}}, // NOLINT
- {{933, 776, 769}}, {{1333, 1362, kSentinel}}, {{72, 817, kSentinel}}, {{84, 776, kSentinel}}, // NOLINT
- {{87, 778, kSentinel}}, {{89, 778, kSentinel}}, {{65, 702, kSentinel}}, {{933, 787, kSentinel}}, // NOLINT
- {{933, 787, 768}}, {{933, 787, 769}}, {{933, 787, 834}}, {{7944, 921, kSentinel}}, // NOLINT
- {{7945, 921, kSentinel}}, {{7946, 921, kSentinel}}, {{7947, 921, kSentinel}}, {{7948, 921, kSentinel}}, // NOLINT
- {{7949, 921, kSentinel}}, {{7950, 921, kSentinel}}, {{7951, 921, kSentinel}}, {{7976, 921, kSentinel}}, // NOLINT
- {{7977, 921, kSentinel}}, {{7978, 921, kSentinel}}, {{7979, 921, kSentinel}}, {{7980, 921, kSentinel}}, // NOLINT
- {{7981, 921, kSentinel}}, {{7982, 921, kSentinel}}, {{7983, 921, kSentinel}}, {{8040, 921, kSentinel}}, // NOLINT
- {{8041, 921, kSentinel}}, {{8042, 921, kSentinel}}, {{8043, 921, kSentinel}}, {{8044, 921, kSentinel}}, // NOLINT
- {{8045, 921, kSentinel}}, {{8046, 921, kSentinel}}, {{8047, 921, kSentinel}}, {{8122, 921, kSentinel}}, // NOLINT
- {{913, 921, kSentinel}}, {{902, 921, kSentinel}}, {{913, 834, kSentinel}}, {{913, 834, 921}}, // NOLINT
- {{8138, 921, kSentinel}}, {{919, 921, kSentinel}}, {{905, 921, kSentinel}}, {{919, 834, kSentinel}}, // NOLINT
- {{919, 834, 921}}, {{921, 776, 768}}, {{921, 834, kSentinel}}, {{921, 776, 834}}, // NOLINT
- {{933, 776, 768}}, {{929, 787, kSentinel}}, {{933, 834, kSentinel}}, {{933, 776, 834}}, // NOLINT
- {{8186, 921, kSentinel}}, {{937, 921, kSentinel}}, {{911, 921, kSentinel}}, {{937, 834, kSentinel}}, // NOLINT
- {{937, 834, 921}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable0Size = 590; // NOLINT
+static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings0[62] =
+ { // NOLINT
+ {{83, 83, kSentinel}}, {{700, 78, kSentinel}},
+ {{74, 780, kSentinel}}, {{921, 776, 769}}, // NOLINT
+ {{933, 776, 769}}, {{1333, 1362, kSentinel}},
+ {{72, 817, kSentinel}}, {{84, 776, kSentinel}}, // NOLINT
+ {{87, 778, kSentinel}}, {{89, 778, kSentinel}},
+ {{65, 702, kSentinel}}, {{933, 787, kSentinel}}, // NOLINT
+ {{933, 787, 768}}, {{933, 787, 769}},
+ {{933, 787, 834}}, {{7944, 921, kSentinel}}, // NOLINT
+ {{7945, 921, kSentinel}}, {{7946, 921, kSentinel}},
+ {{7947, 921, kSentinel}}, {{7948, 921, kSentinel}}, // NOLINT
+ {{7949, 921, kSentinel}}, {{7950, 921, kSentinel}},
+ {{7951, 921, kSentinel}}, {{7976, 921, kSentinel}}, // NOLINT
+ {{7977, 921, kSentinel}}, {{7978, 921, kSentinel}},
+ {{7979, 921, kSentinel}}, {{7980, 921, kSentinel}}, // NOLINT
+ {{7981, 921, kSentinel}}, {{7982, 921, kSentinel}},
+ {{7983, 921, kSentinel}}, {{8040, 921, kSentinel}}, // NOLINT
+ {{8041, 921, kSentinel}}, {{8042, 921, kSentinel}},
+ {{8043, 921, kSentinel}}, {{8044, 921, kSentinel}}, // NOLINT
+ {{8045, 921, kSentinel}}, {{8046, 921, kSentinel}},
+ {{8047, 921, kSentinel}}, {{8122, 921, kSentinel}}, // NOLINT
+ {{913, 921, kSentinel}}, {{902, 921, kSentinel}},
+ {{913, 834, kSentinel}}, {{913, 834, 921}}, // NOLINT
+ {{8138, 921, kSentinel}}, {{919, 921, kSentinel}},
+ {{905, 921, kSentinel}}, {{919, 834, kSentinel}}, // NOLINT
+ {{919, 834, 921}}, {{921, 776, 768}},
+ {{921, 834, kSentinel}}, {{921, 776, 834}}, // NOLINT
+ {{933, 776, 768}}, {{929, 787, kSentinel}},
+ {{933, 834, kSentinel}}, {{933, 776, 834}}, // NOLINT
+ {{8186, 921, kSentinel}}, {{937, 921, kSentinel}},
+ {{911, 921, kSentinel}}, {{937, 834, kSentinel}}, // NOLINT
+ {{937, 834, 921}}, {{kSentinel}}}; // NOLINT
+static const uint16_t kToUppercaseTable0Size = 590; // NOLINT
static const int32_t kToUppercaseTable0[1180] = {
1073741921, -128, 122, -128, 181, 2972,
223, 1, 1073742048, -128, 246, -128,
@@ -1509,24 +1541,44 @@ static const int32_t kToUppercaseTable0[1180] = {
8166, 217, 8167, 221, // NOLINT
8178, 225, 8179, 229, 8180, 233,
8182, 237, 8183, 241, 8188, 229}; // NOLINT
-static const uint16_t kToUppercaseMultiStrings0Size = 62; // NOLINT
-static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const uint16_t kToUppercaseMultiStrings0Size = 62; // NOLINT
+static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings1[1] =
+ { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kToUppercaseTable1Size = 73; // NOLINT
static const int32_t kToUppercaseTable1[146] = {
- 334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192, // NOLINT
- 3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4, 3190, -4, // NOLINT
- 3201, -4, 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, // NOLINT
- 3217, -4, 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, // NOLINT
- 3233, -4, 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, // NOLINT
- 3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, // NOLINT
- 3265, -4, 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, // NOLINT
- 3281, -4, 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, // NOLINT
- 3297, -4, 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056, 3365, -29056, 3367, -29056, // NOLINT
- 3373, -29056 }; // NOLINT
+ 334, -112, 1073742192, -64, 383, -64,
+ 388, -4, 1073743056, -104, 1257, -104,
+ 1073744944, -192, 3166, -192, // NOLINT
+ 3169, -4, 3173, -43180, 3174, -43168,
+ 3176, -4, 3178, -4, 3180, -4,
+ 3187, -4, 3190, -4, // NOLINT
+ 3201, -4, 3203, -4, 3205, -4,
+ 3207, -4, 3209, -4, 3211, -4,
+ 3213, -4, 3215, -4, // NOLINT
+ 3217, -4, 3219, -4, 3221, -4,
+ 3223, -4, 3225, -4, 3227, -4,
+ 3229, -4, 3231, -4, // NOLINT
+ 3233, -4, 3235, -4, 3237, -4,
+ 3239, -4, 3241, -4, 3243, -4,
+ 3245, -4, 3247, -4, // NOLINT
+ 3249, -4, 3251, -4, 3253, -4,
+ 3255, -4, 3257, -4, 3259, -4,
+ 3261, -4, 3263, -4, // NOLINT
+ 3265, -4, 3267, -4, 3269, -4,
+ 3271, -4, 3273, -4, 3275, -4,
+ 3277, -4, 3279, -4, // NOLINT
+ 3281, -4, 3283, -4, 3285, -4,
+ 3287, -4, 3289, -4, 3291, -4,
+ 3293, -4, 3295, -4, // NOLINT
+ 3297, -4, 3299, -4, 3308, -4,
+ 3310, -4, 3315, -4, 1073745152, -29056,
+ 3365, -29056, 3367, -29056, // NOLINT
+ 3373, -29056}; // NOLINT
static const uint16_t kToUppercaseMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings5[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings5[1] =
+ { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kToUppercaseTable5Size = 95; // NOLINT
static const int32_t
kToUppercaseTable5[190] = {1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4,
@@ -1554,55 +1606,55 @@ static const int32_t
1949, -4, 1951, -4, 1953, -4, 1955, -4, 1957, -4,
1959, -4, 1961, -4}; // NOLINT
static const uint16_t kToUppercaseMultiStrings5Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings7[12] = { // NOLINT
- {{70, 70, kSentinel}}, {{70, 73, kSentinel}}, {{70, 76, kSentinel}}, {{70, 70, 73}}, // NOLINT
- {{70, 70, 76}}, {{83, 84, kSentinel}}, {{1348, 1350, kSentinel}}, {{1348, 1333, kSentinel}}, // NOLINT
- {{1348, 1339, kSentinel}}, {{1358, 1350, kSentinel}}, {{1348, 1341, kSentinel}}, {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings7[12] =
+ { // NOLINT
+ {{70, 70, kSentinel}},
+ {{70, 73, kSentinel}},
+ {{70, 76, kSentinel}},
+ {{70, 70, 73}}, // NOLINT
+ {{70, 70, 76}},
+ {{83, 84, kSentinel}},
+ {{1348, 1350, kSentinel}},
+ {{1348, 1333, kSentinel}}, // NOLINT
+ {{1348, 1339, kSentinel}},
+ {{1358, 1350, kSentinel}},
+ {{1348, 1341, kSentinel}},
+ {{kSentinel}}}; // NOLINT
static const uint16_t kToUppercaseTable7Size = 14; // NOLINT
-static const int32_t kToUppercaseTable7[28] = {
- 6912, 1, 6913, 5, 6914, 9, 6915, 13, 6916, 17, 6917, 21, 6918, 21, 6931, 25, // NOLINT
- 6932, 29, 6933, 33, 6934, 37, 6935, 41, 1073749825, -128, 8026, -128 }; // NOLINT
+static const int32_t kToUppercaseTable7[28] =
+ {6912, 1, 6913, 5, 6914, 9, 6915, 13,
+ 6916, 17, 6917, 21, 6918, 21, 6931, 25, // NOLINT
+ 6932, 29, 6933, 33, 6934, 37, 6935, 41,
+ 1073749825, -128, 8026, -128}; // NOLINT
static const uint16_t kToUppercaseMultiStrings7Size = 12; // NOLINT
-int ToUppercase::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
+int ToUppercase::Convert(uchar c, uchar n, uchar* result,
+ bool* allow_caching_ptr) {
int chunk_index = c >> 13;
switch (chunk_index) {
- case 0: return LookupMapping<true>(kToUppercaseTable0,
- kToUppercaseTable0Size,
- kToUppercaseMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kToUppercaseTable1,
- kToUppercaseTable1Size,
- kToUppercaseMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 5: return LookupMapping<true>(kToUppercaseTable5,
- kToUppercaseTable5Size,
- kToUppercaseMultiStrings5,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kToUppercaseTable7,
- kToUppercaseTable7Size,
- kToUppercaseMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
+ case 0:
+ return LookupMapping<true>(kToUppercaseTable0, kToUppercaseTable0Size,
+ kToUppercaseMultiStrings0, c, n, result,
+ allow_caching_ptr);
+ case 1:
+ return LookupMapping<true>(kToUppercaseTable1, kToUppercaseTable1Size,
+ kToUppercaseMultiStrings1, c, n, result,
+ allow_caching_ptr);
+ case 5:
+ return LookupMapping<true>(kToUppercaseTable5, kToUppercaseTable5Size,
+ kToUppercaseMultiStrings5, c, n, result,
+ allow_caching_ptr);
+ case 7:
+ return LookupMapping<true>(kToUppercaseTable7, kToUppercaseTable7Size,
+ kToUppercaseMultiStrings7, c, n, result,
+ allow_caching_ptr);
+ default:
+ return 0;
}
}
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings0[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings0[1] =
+ { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kEcma262CanonicalizeTable0Size = 498; // NOLINT
static const int32_t kEcma262CanonicalizeTable0[996] = {
1073741921, -128, 122, -128, 181, 2972,
@@ -1793,91 +1845,99 @@ static const int32_t kEcma262CanonicalizeTable0[996] = {
8145, 32, 1073749984, 32, // NOLINT
8161, 32, 8165, 28}; // NOLINT
static const uint16_t kEcma262CanonicalizeMultiStrings0Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings1[1] =
+ { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kEcma262CanonicalizeTable1Size = 73; // NOLINT
static const int32_t kEcma262CanonicalizeTable1[146] = {
- 334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192, // NOLINT
- 3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4, 3190, -4, // NOLINT
- 3201, -4, 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, // NOLINT
- 3217, -4, 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, // NOLINT
- 3233, -4, 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, // NOLINT
- 3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, // NOLINT
- 3265, -4, 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, // NOLINT
- 3281, -4, 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, // NOLINT
- 3297, -4, 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056, 3365, -29056, 3367, -29056, // NOLINT
- 3373, -29056 }; // NOLINT
+ 334, -112, 1073742192, -64, 383, -64,
+ 388, -4, 1073743056, -104, 1257, -104,
+ 1073744944, -192, 3166, -192, // NOLINT
+ 3169, -4, 3173, -43180, 3174, -43168,
+ 3176, -4, 3178, -4, 3180, -4,
+ 3187, -4, 3190, -4, // NOLINT
+ 3201, -4, 3203, -4, 3205, -4,
+ 3207, -4, 3209, -4, 3211, -4,
+ 3213, -4, 3215, -4, // NOLINT
+ 3217, -4, 3219, -4, 3221, -4,
+ 3223, -4, 3225, -4, 3227, -4,
+ 3229, -4, 3231, -4, // NOLINT
+ 3233, -4, 3235, -4, 3237, -4,
+ 3239, -4, 3241, -4, 3243, -4,
+ 3245, -4, 3247, -4, // NOLINT
+ 3249, -4, 3251, -4, 3253, -4,
+ 3255, -4, 3257, -4, 3259, -4,
+ 3261, -4, 3263, -4, // NOLINT
+ 3265, -4, 3267, -4, 3269, -4,
+ 3271, -4, 3273, -4, 3275, -4,
+ 3277, -4, 3279, -4, // NOLINT
+ 3281, -4, 3283, -4, 3285, -4,
+ 3287, -4, 3289, -4, 3291, -4,
+ 3293, -4, 3295, -4, // NOLINT
+ 3297, -4, 3299, -4, 3308, -4,
+ 3310, -4, 3315, -4, 1073745152, -29056,
+ 3365, -29056, 3367, -29056, // NOLINT
+ 3373, -29056}; // NOLINT
static const uint16_t kEcma262CanonicalizeMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings5[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings5[1] =
+ { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kEcma262CanonicalizeTable5Size = 95; // NOLINT
-static const int32_t kEcma262CanonicalizeTable5
- [190] = {1601, -4, 1603, -4, 1605, -4, 1607, -4,
- 1609, -4, 1611, -4, 1613, -4, 1615, -4, // NOLINT
- 1617, -4, 1619, -4, 1621, -4, 1623, -4,
- 1625, -4, 1627, -4, 1629, -4, 1631, -4, // NOLINT
- 1633, -4, 1635, -4, 1637, -4, 1639, -4,
- 1641, -4, 1643, -4, 1645, -4, 1665, -4, // NOLINT
- 1667, -4, 1669, -4, 1671, -4, 1673, -4,
- 1675, -4, 1677, -4, 1679, -4, 1681, -4, // NOLINT
- 1683, -4, 1685, -4, 1687, -4, 1689, -4,
- 1691, -4, 1827, -4, 1829, -4, 1831, -4, // NOLINT
- 1833, -4, 1835, -4, 1837, -4, 1839, -4,
- 1843, -4, 1845, -4, 1847, -4, 1849, -4, // NOLINT
- 1851, -4, 1853, -4, 1855, -4, 1857, -4,
- 1859, -4, 1861, -4, 1863, -4, 1865, -4, // NOLINT
- 1867, -4, 1869, -4, 1871, -4, 1873, -4,
- 1875, -4, 1877, -4, 1879, -4, 1881, -4, // NOLINT
- 1883, -4, 1885, -4, 1887, -4, 1889, -4,
- 1891, -4, 1893, -4, 1895, -4, 1897, -4, // NOLINT
- 1899, -4, 1901, -4, 1903, -4, 1914, -4,
- 1916, -4, 1919, -4, 1921, -4, 1923, -4, // NOLINT
- 1925, -4, 1927, -4, 1932, -4, 1937, -4,
- 1939, -4, 1943, -4, 1945, -4, 1947, -4, // NOLINT
- 1949, -4, 1951, -4, 1953, -4, 1955, -4,
- 1957, -4, 1959, -4, 1961, -4}; // NOLINT
+static const int32_t kEcma262CanonicalizeTable5[190] = {
+ 1601, -4, 1603, -4, 1605, -4, 1607, -4,
+ 1609, -4, 1611, -4, 1613, -4, 1615, -4, // NOLINT
+ 1617, -4, 1619, -4, 1621, -4, 1623, -4,
+ 1625, -4, 1627, -4, 1629, -4, 1631, -4, // NOLINT
+ 1633, -4, 1635, -4, 1637, -4, 1639, -4,
+ 1641, -4, 1643, -4, 1645, -4, 1665, -4, // NOLINT
+ 1667, -4, 1669, -4, 1671, -4, 1673, -4,
+ 1675, -4, 1677, -4, 1679, -4, 1681, -4, // NOLINT
+ 1683, -4, 1685, -4, 1687, -4, 1689, -4,
+ 1691, -4, 1827, -4, 1829, -4, 1831, -4, // NOLINT
+ 1833, -4, 1835, -4, 1837, -4, 1839, -4,
+ 1843, -4, 1845, -4, 1847, -4, 1849, -4, // NOLINT
+ 1851, -4, 1853, -4, 1855, -4, 1857, -4,
+ 1859, -4, 1861, -4, 1863, -4, 1865, -4, // NOLINT
+ 1867, -4, 1869, -4, 1871, -4, 1873, -4,
+ 1875, -4, 1877, -4, 1879, -4, 1881, -4, // NOLINT
+ 1883, -4, 1885, -4, 1887, -4, 1889, -4,
+ 1891, -4, 1893, -4, 1895, -4, 1897, -4, // NOLINT
+ 1899, -4, 1901, -4, 1903, -4, 1914, -4,
+ 1916, -4, 1919, -4, 1921, -4, 1923, -4, // NOLINT
+ 1925, -4, 1927, -4, 1932, -4, 1937, -4,
+ 1939, -4, 1943, -4, 1945, -4, 1947, -4, // NOLINT
+ 1949, -4, 1951, -4, 1953, -4, 1955, -4,
+ 1957, -4, 1959, -4, 1961, -4}; // NOLINT
static const uint16_t kEcma262CanonicalizeMultiStrings5Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings7[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings7[1] =
+ { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kEcma262CanonicalizeTable7Size = 2; // NOLINT
-static const int32_t kEcma262CanonicalizeTable7[4] = {
- 1073749825, -128, 8026, -128 }; // NOLINT
+static const int32_t kEcma262CanonicalizeTable7[4] = {1073749825, -128, 8026,
+ -128}; // NOLINT
static const uint16_t kEcma262CanonicalizeMultiStrings7Size = 1; // NOLINT
-int Ecma262Canonicalize::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
+int Ecma262Canonicalize::Convert(uchar c, uchar n, uchar* result,
+ bool* allow_caching_ptr) {
int chunk_index = c >> 13;
switch (chunk_index) {
- case 0: return LookupMapping<true>(kEcma262CanonicalizeTable0,
- kEcma262CanonicalizeTable0Size,
- kEcma262CanonicalizeMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kEcma262CanonicalizeTable1,
- kEcma262CanonicalizeTable1Size,
- kEcma262CanonicalizeMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 5: return LookupMapping<true>(kEcma262CanonicalizeTable5,
- kEcma262CanonicalizeTable5Size,
- kEcma262CanonicalizeMultiStrings5,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kEcma262CanonicalizeTable7,
- kEcma262CanonicalizeTable7Size,
- kEcma262CanonicalizeMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
+ case 0:
+ return LookupMapping<true>(
+ kEcma262CanonicalizeTable0, kEcma262CanonicalizeTable0Size,
+ kEcma262CanonicalizeMultiStrings0, c, n, result, allow_caching_ptr);
+ case 1:
+ return LookupMapping<true>(
+ kEcma262CanonicalizeTable1, kEcma262CanonicalizeTable1Size,
+ kEcma262CanonicalizeMultiStrings1, c, n, result, allow_caching_ptr);
+ case 5:
+ return LookupMapping<true>(
+ kEcma262CanonicalizeTable5, kEcma262CanonicalizeTable5Size,
+ kEcma262CanonicalizeMultiStrings5, c, n, result, allow_caching_ptr);
+ case 7:
+ return LookupMapping<true>(
+ kEcma262CanonicalizeTable7, kEcma262CanonicalizeTable7Size,
+ kEcma262CanonicalizeMultiStrings7, c, n, result, allow_caching_ptr);
+ default:
+ return 0;
}
}
@@ -2770,308 +2830,291 @@ static const int32_t kEcma262UnCanonicalizeTable0[2010] = {
8172, 2021, 1073750008, 1973, 8185, 1977,
1073750010, 1989, 8187, 1993}; // NOLINT
static const uint16_t kEcma262UnCanonicalizeMultiStrings0Size = 507; // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings1[83] = { // NOLINT
- {{8498, 8526}}, {{8544, 8560}}, {{8559, 8575}}, {{8579, 8580}}, // NOLINT
- {{9398, 9424}}, {{9423, 9449}}, {{11264, 11312}}, {{11310, 11358}}, // NOLINT
- {{11360, 11361}}, {{619, 11362}}, {{7549, 11363}}, {{637, 11364}}, // NOLINT
- {{570, 11365}}, {{574, 11366}}, {{11367, 11368}}, {{11369, 11370}}, // NOLINT
- {{11371, 11372}}, {{593, 11373}}, {{625, 11374}}, {{592, 11375}}, // NOLINT
- {{594, 11376}}, {{11378, 11379}}, {{11381, 11382}}, {{575, 11390}}, // NOLINT
- {{576, 11391}}, {{11392, 11393}}, {{11394, 11395}}, {{11396, 11397}}, // NOLINT
- {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}}, {{11404, 11405}}, // NOLINT
- {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}}, {{11412, 11413}}, // NOLINT
- {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}}, {{11420, 11421}}, // NOLINT
- {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}}, {{11428, 11429}}, // NOLINT
- {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}}, {{11436, 11437}}, // NOLINT
- {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}}, {{11444, 11445}}, // NOLINT
- {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}}, {{11452, 11453}}, // NOLINT
- {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}}, {{11460, 11461}}, // NOLINT
- {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}}, {{11468, 11469}}, // NOLINT
- {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}}, {{11476, 11477}}, // NOLINT
- {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}}, {{11484, 11485}}, // NOLINT
- {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}}, {{11499, 11500}}, // NOLINT
- {{11501, 11502}}, {{11506, 11507}}, {{4256, 11520}}, {{4293, 11557}}, // NOLINT
- {{4295, 11559}}, {{4301, 11565}}, {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<2>
+ kEcma262UnCanonicalizeMultiStrings1[83] = { // NOLINT
+ {{8498, 8526}}, {{8544, 8560}}, {{8559, 8575}},
+ {{8579, 8580}}, // NOLINT
+ {{9398, 9424}}, {{9423, 9449}}, {{11264, 11312}},
+ {{11310, 11358}}, // NOLINT
+ {{11360, 11361}}, {{619, 11362}}, {{7549, 11363}},
+ {{637, 11364}}, // NOLINT
+ {{570, 11365}}, {{574, 11366}}, {{11367, 11368}},
+ {{11369, 11370}}, // NOLINT
+ {{11371, 11372}}, {{593, 11373}}, {{625, 11374}},
+ {{592, 11375}}, // NOLINT
+ {{594, 11376}}, {{11378, 11379}}, {{11381, 11382}},
+ {{575, 11390}}, // NOLINT
+ {{576, 11391}}, {{11392, 11393}}, {{11394, 11395}},
+ {{11396, 11397}}, // NOLINT
+ {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}},
+ {{11404, 11405}}, // NOLINT
+ {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}},
+ {{11412, 11413}}, // NOLINT
+ {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}},
+ {{11420, 11421}}, // NOLINT
+ {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}},
+ {{11428, 11429}}, // NOLINT
+ {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}},
+ {{11436, 11437}}, // NOLINT
+ {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}},
+ {{11444, 11445}}, // NOLINT
+ {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}},
+ {{11452, 11453}}, // NOLINT
+ {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}},
+ {{11460, 11461}}, // NOLINT
+ {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}},
+ {{11468, 11469}}, // NOLINT
+ {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}},
+ {{11476, 11477}}, // NOLINT
+ {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}},
+ {{11484, 11485}}, // NOLINT
+ {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}},
+ {{11499, 11500}}, // NOLINT
+ {{11501, 11502}}, {{11506, 11507}}, {{4256, 11520}},
+ {{4293, 11557}}, // NOLINT
+ {{4295, 11559}}, {{4301, 11565}}, {{kSentinel}}}; // NOLINT
static const uint16_t kEcma262UnCanonicalizeTable1Size = 149; // NOLINT
static const int32_t kEcma262UnCanonicalizeTable1[298] = {
- 306, 1, 334, 1, 1073742176, 5, 367, 9, 1073742192, 5, 383, 9, 387, 13, 388, 13, // NOLINT
- 1073743030, 17, 1231, 21, 1073743056, 17, 1257, 21, 1073744896, 25, 3118, 29, 1073744944, 25, 3166, 29, // NOLINT
- 3168, 33, 3169, 33, 3170, 37, 3171, 41, 3172, 45, 3173, 49, 3174, 53, 3175, 57, // NOLINT
- 3176, 57, 3177, 61, 3178, 61, 3179, 65, 3180, 65, 3181, 69, 3182, 73, 3183, 77, // NOLINT
- 3184, 81, 3186, 85, 3187, 85, 3189, 89, 3190, 89, 1073745022, 93, 3199, 97, 3200, 101, // NOLINT
- 3201, 101, 3202, 105, 3203, 105, 3204, 109, 3205, 109, 3206, 113, 3207, 113, 3208, 117, // NOLINT
- 3209, 117, 3210, 121, 3211, 121, 3212, 125, 3213, 125, 3214, 129, 3215, 129, 3216, 133, // NOLINT
- 3217, 133, 3218, 137, 3219, 137, 3220, 141, 3221, 141, 3222, 145, 3223, 145, 3224, 149, // NOLINT
- 3225, 149, 3226, 153, 3227, 153, 3228, 157, 3229, 157, 3230, 161, 3231, 161, 3232, 165, // NOLINT
- 3233, 165, 3234, 169, 3235, 169, 3236, 173, 3237, 173, 3238, 177, 3239, 177, 3240, 181, // NOLINT
- 3241, 181, 3242, 185, 3243, 185, 3244, 189, 3245, 189, 3246, 193, 3247, 193, 3248, 197, // NOLINT
- 3249, 197, 3250, 201, 3251, 201, 3252, 205, 3253, 205, 3254, 209, 3255, 209, 3256, 213, // NOLINT
- 3257, 213, 3258, 217, 3259, 217, 3260, 221, 3261, 221, 3262, 225, 3263, 225, 3264, 229, // NOLINT
- 3265, 229, 3266, 233, 3267, 233, 3268, 237, 3269, 237, 3270, 241, 3271, 241, 3272, 245, // NOLINT
- 3273, 245, 3274, 249, 3275, 249, 3276, 253, 3277, 253, 3278, 257, 3279, 257, 3280, 261, // NOLINT
- 3281, 261, 3282, 265, 3283, 265, 3284, 269, 3285, 269, 3286, 273, 3287, 273, 3288, 277, // NOLINT
- 3289, 277, 3290, 281, 3291, 281, 3292, 285, 3293, 285, 3294, 289, 3295, 289, 3296, 293, // NOLINT
- 3297, 293, 3298, 297, 3299, 297, 3307, 301, 3308, 301, 3309, 305, 3310, 305, 3314, 309, // NOLINT
- 3315, 309, 1073745152, 313, 3365, 317, 3367, 321, 3373, 325 }; // NOLINT
+ 306, 1, 334, 1, 1073742176, 5, 367, 9,
+ 1073742192, 5, 383, 9, 387, 13, 388, 13, // NOLINT
+ 1073743030, 17, 1231, 21, 1073743056, 17, 1257, 21,
+ 1073744896, 25, 3118, 29, 1073744944, 25, 3166, 29, // NOLINT
+ 3168, 33, 3169, 33, 3170, 37, 3171, 41,
+ 3172, 45, 3173, 49, 3174, 53, 3175, 57, // NOLINT
+ 3176, 57, 3177, 61, 3178, 61, 3179, 65,
+ 3180, 65, 3181, 69, 3182, 73, 3183, 77, // NOLINT
+ 3184, 81, 3186, 85, 3187, 85, 3189, 89,
+ 3190, 89, 1073745022, 93, 3199, 97, 3200, 101, // NOLINT
+ 3201, 101, 3202, 105, 3203, 105, 3204, 109,
+ 3205, 109, 3206, 113, 3207, 113, 3208, 117, // NOLINT
+ 3209, 117, 3210, 121, 3211, 121, 3212, 125,
+ 3213, 125, 3214, 129, 3215, 129, 3216, 133, // NOLINT
+ 3217, 133, 3218, 137, 3219, 137, 3220, 141,
+ 3221, 141, 3222, 145, 3223, 145, 3224, 149, // NOLINT
+ 3225, 149, 3226, 153, 3227, 153, 3228, 157,
+ 3229, 157, 3230, 161, 3231, 161, 3232, 165, // NOLINT
+ 3233, 165, 3234, 169, 3235, 169, 3236, 173,
+ 3237, 173, 3238, 177, 3239, 177, 3240, 181, // NOLINT
+ 3241, 181, 3242, 185, 3243, 185, 3244, 189,
+ 3245, 189, 3246, 193, 3247, 193, 3248, 197, // NOLINT
+ 3249, 197, 3250, 201, 3251, 201, 3252, 205,
+ 3253, 205, 3254, 209, 3255, 209, 3256, 213, // NOLINT
+ 3257, 213, 3258, 217, 3259, 217, 3260, 221,
+ 3261, 221, 3262, 225, 3263, 225, 3264, 229, // NOLINT
+ 3265, 229, 3266, 233, 3267, 233, 3268, 237,
+ 3269, 237, 3270, 241, 3271, 241, 3272, 245, // NOLINT
+ 3273, 245, 3274, 249, 3275, 249, 3276, 253,
+ 3277, 253, 3278, 257, 3279, 257, 3280, 261, // NOLINT
+ 3281, 261, 3282, 265, 3283, 265, 3284, 269,
+ 3285, 269, 3286, 273, 3287, 273, 3288, 277, // NOLINT
+ 3289, 277, 3290, 281, 3291, 281, 3292, 285,
+ 3293, 285, 3294, 289, 3295, 289, 3296, 293, // NOLINT
+ 3297, 293, 3298, 297, 3299, 297, 3307, 301,
+ 3308, 301, 3309, 305, 3310, 305, 3314, 309, // NOLINT
+ 3315, 309, 1073745152, 313, 3365, 317, 3367, 321,
+ 3373, 325}; // NOLINT
static const uint16_t kEcma262UnCanonicalizeMultiStrings1Size = 83; // NOLINT
static const MultiCharacterSpecialCase<2>
kEcma262UnCanonicalizeMultiStrings5[104] = { // NOLINT
- {{42560, 42561}},
- {{42562, 42563}},
- {{42564, 42565}},
- {{42566, 42567}}, // NOLINT
- {{42568, 42569}},
- {{42570, 42571}},
- {{42572, 42573}},
- {{42574, 42575}}, // NOLINT
- {{42576, 42577}},
- {{42578, 42579}},
- {{42580, 42581}},
- {{42582, 42583}}, // NOLINT
- {{42584, 42585}},
- {{42586, 42587}},
- {{42588, 42589}},
- {{42590, 42591}}, // NOLINT
- {{42592, 42593}},
- {{42594, 42595}},
- {{42596, 42597}},
- {{42598, 42599}}, // NOLINT
- {{42600, 42601}},
- {{42602, 42603}},
- {{42604, 42605}},
- {{42624, 42625}}, // NOLINT
- {{42626, 42627}},
- {{42628, 42629}},
- {{42630, 42631}},
- {{42632, 42633}}, // NOLINT
- {{42634, 42635}},
- {{42636, 42637}},
- {{42638, 42639}},
- {{42640, 42641}}, // NOLINT
- {{42642, 42643}},
- {{42644, 42645}},
- {{42646, 42647}},
- {{42648, 42649}}, // NOLINT
- {{42650, 42651}},
- {{42786, 42787}},
- {{42788, 42789}},
- {{42790, 42791}}, // NOLINT
- {{42792, 42793}},
- {{42794, 42795}},
- {{42796, 42797}},
- {{42798, 42799}}, // NOLINT
- {{42802, 42803}},
- {{42804, 42805}},
- {{42806, 42807}},
- {{42808, 42809}}, // NOLINT
- {{42810, 42811}},
- {{42812, 42813}},
- {{42814, 42815}},
- {{42816, 42817}}, // NOLINT
- {{42818, 42819}},
- {{42820, 42821}},
- {{42822, 42823}},
- {{42824, 42825}}, // NOLINT
- {{42826, 42827}},
- {{42828, 42829}},
- {{42830, 42831}},
- {{42832, 42833}}, // NOLINT
- {{42834, 42835}},
- {{42836, 42837}},
- {{42838, 42839}},
- {{42840, 42841}}, // NOLINT
- {{42842, 42843}},
- {{42844, 42845}},
- {{42846, 42847}},
- {{42848, 42849}}, // NOLINT
- {{42850, 42851}},
- {{42852, 42853}},
- {{42854, 42855}},
- {{42856, 42857}}, // NOLINT
- {{42858, 42859}},
- {{42860, 42861}},
- {{42862, 42863}},
- {{42873, 42874}}, // NOLINT
- {{42875, 42876}},
- {{7545, 42877}},
- {{42878, 42879}},
- {{42880, 42881}}, // NOLINT
- {{42882, 42883}},
- {{42884, 42885}},
- {{42886, 42887}},
- {{42891, 42892}}, // NOLINT
- {{613, 42893}},
- {{42896, 42897}},
- {{42898, 42899}},
- {{42902, 42903}}, // NOLINT
- {{42904, 42905}},
- {{42906, 42907}},
- {{42908, 42909}},
- {{42910, 42911}}, // NOLINT
- {{42912, 42913}},
- {{42914, 42915}},
- {{42916, 42917}},
- {{42918, 42919}}, // NOLINT
- {{42920, 42921}},
- {{614, 42922}},
- {{604, 42923}},
- {{609, 42924}}, // NOLINT
- {{620, 42925}},
- {{670, 42928}},
- {{647, 42929}},
- {{kSentinel}}}; // NOLINT
+ {{42560, 42561}}, {{42562, 42563}},
+ {{42564, 42565}}, {{42566, 42567}}, // NOLINT
+ {{42568, 42569}}, {{42570, 42571}},
+ {{42572, 42573}}, {{42574, 42575}}, // NOLINT
+ {{42576, 42577}}, {{42578, 42579}},
+ {{42580, 42581}}, {{42582, 42583}}, // NOLINT
+ {{42584, 42585}}, {{42586, 42587}},
+ {{42588, 42589}}, {{42590, 42591}}, // NOLINT
+ {{42592, 42593}}, {{42594, 42595}},
+ {{42596, 42597}}, {{42598, 42599}}, // NOLINT
+ {{42600, 42601}}, {{42602, 42603}},
+ {{42604, 42605}}, {{42624, 42625}}, // NOLINT
+ {{42626, 42627}}, {{42628, 42629}},
+ {{42630, 42631}}, {{42632, 42633}}, // NOLINT
+ {{42634, 42635}}, {{42636, 42637}},
+ {{42638, 42639}}, {{42640, 42641}}, // NOLINT
+ {{42642, 42643}}, {{42644, 42645}},
+ {{42646, 42647}}, {{42648, 42649}}, // NOLINT
+ {{42650, 42651}}, {{42786, 42787}},
+ {{42788, 42789}}, {{42790, 42791}}, // NOLINT
+ {{42792, 42793}}, {{42794, 42795}},
+ {{42796, 42797}}, {{42798, 42799}}, // NOLINT
+ {{42802, 42803}}, {{42804, 42805}},
+ {{42806, 42807}}, {{42808, 42809}}, // NOLINT
+ {{42810, 42811}}, {{42812, 42813}},
+ {{42814, 42815}}, {{42816, 42817}}, // NOLINT
+ {{42818, 42819}}, {{42820, 42821}},
+ {{42822, 42823}}, {{42824, 42825}}, // NOLINT
+ {{42826, 42827}}, {{42828, 42829}},
+ {{42830, 42831}}, {{42832, 42833}}, // NOLINT
+ {{42834, 42835}}, {{42836, 42837}},
+ {{42838, 42839}}, {{42840, 42841}}, // NOLINT
+ {{42842, 42843}}, {{42844, 42845}},
+ {{42846, 42847}}, {{42848, 42849}}, // NOLINT
+ {{42850, 42851}}, {{42852, 42853}},
+ {{42854, 42855}}, {{42856, 42857}}, // NOLINT
+ {{42858, 42859}}, {{42860, 42861}},
+ {{42862, 42863}}, {{42873, 42874}}, // NOLINT
+ {{42875, 42876}}, {{7545, 42877}},
+ {{42878, 42879}}, {{42880, 42881}}, // NOLINT
+ {{42882, 42883}}, {{42884, 42885}},
+ {{42886, 42887}}, {{42891, 42892}}, // NOLINT
+ {{613, 42893}}, {{42896, 42897}},
+ {{42898, 42899}}, {{42902, 42903}}, // NOLINT
+ {{42904, 42905}}, {{42906, 42907}},
+ {{42908, 42909}}, {{42910, 42911}}, // NOLINT
+ {{42912, 42913}}, {{42914, 42915}},
+ {{42916, 42917}}, {{42918, 42919}}, // NOLINT
+ {{42920, 42921}}, {{614, 42922}},
+ {{604, 42923}}, {{609, 42924}}, // NOLINT
+ {{620, 42925}}, {{670, 42928}},
+ {{647, 42929}}, {{kSentinel}}}; // NOLINT
static const uint16_t kEcma262UnCanonicalizeTable5Size = 198; // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable5
- [396] = {1600, 1, 1601, 1, 1602, 5, 1603, 5,
- 1604, 9, 1605, 9, 1606, 13, 1607, 13, // NOLINT
- 1608, 17, 1609, 17, 1610, 21, 1611, 21,
- 1612, 25, 1613, 25, 1614, 29, 1615, 29, // NOLINT
- 1616, 33, 1617, 33, 1618, 37, 1619, 37,
- 1620, 41, 1621, 41, 1622, 45, 1623, 45, // NOLINT
- 1624, 49, 1625, 49, 1626, 53, 1627, 53,
- 1628, 57, 1629, 57, 1630, 61, 1631, 61, // NOLINT
- 1632, 65, 1633, 65, 1634, 69, 1635, 69,
- 1636, 73, 1637, 73, 1638, 77, 1639, 77, // NOLINT
- 1640, 81, 1641, 81, 1642, 85, 1643, 85,
- 1644, 89, 1645, 89, 1664, 93, 1665, 93, // NOLINT
- 1666, 97, 1667, 97, 1668, 101, 1669, 101,
- 1670, 105, 1671, 105, 1672, 109, 1673, 109, // NOLINT
- 1674, 113, 1675, 113, 1676, 117, 1677, 117,
- 1678, 121, 1679, 121, 1680, 125, 1681, 125, // NOLINT
- 1682, 129, 1683, 129, 1684, 133, 1685, 133,
- 1686, 137, 1687, 137, 1688, 141, 1689, 141, // NOLINT
- 1690, 145, 1691, 145, 1826, 149, 1827, 149,
- 1828, 153, 1829, 153, 1830, 157, 1831, 157, // NOLINT
- 1832, 161, 1833, 161, 1834, 165, 1835, 165,
- 1836, 169, 1837, 169, 1838, 173, 1839, 173, // NOLINT
- 1842, 177, 1843, 177, 1844, 181, 1845, 181,
- 1846, 185, 1847, 185, 1848, 189, 1849, 189, // NOLINT
- 1850, 193, 1851, 193, 1852, 197, 1853, 197,
- 1854, 201, 1855, 201, 1856, 205, 1857, 205, // NOLINT
- 1858, 209, 1859, 209, 1860, 213, 1861, 213,
- 1862, 217, 1863, 217, 1864, 221, 1865, 221, // NOLINT
- 1866, 225, 1867, 225, 1868, 229, 1869, 229,
- 1870, 233, 1871, 233, 1872, 237, 1873, 237, // NOLINT
- 1874, 241, 1875, 241, 1876, 245, 1877, 245,
- 1878, 249, 1879, 249, 1880, 253, 1881, 253, // NOLINT
- 1882, 257, 1883, 257, 1884, 261, 1885, 261,
- 1886, 265, 1887, 265, 1888, 269, 1889, 269, // NOLINT
- 1890, 273, 1891, 273, 1892, 277, 1893, 277,
- 1894, 281, 1895, 281, 1896, 285, 1897, 285, // NOLINT
- 1898, 289, 1899, 289, 1900, 293, 1901, 293,
- 1902, 297, 1903, 297, 1913, 301, 1914, 301, // NOLINT
- 1915, 305, 1916, 305, 1917, 309, 1918, 313,
- 1919, 313, 1920, 317, 1921, 317, 1922, 321, // NOLINT
- 1923, 321, 1924, 325, 1925, 325, 1926, 329,
- 1927, 329, 1931, 333, 1932, 333, 1933, 337, // NOLINT
- 1936, 341, 1937, 341, 1938, 345, 1939, 345,
- 1942, 349, 1943, 349, 1944, 353, 1945, 353, // NOLINT
- 1946, 357, 1947, 357, 1948, 361, 1949, 361,
- 1950, 365, 1951, 365, 1952, 369, 1953, 369, // NOLINT
- 1954, 373, 1955, 373, 1956, 377, 1957, 377,
- 1958, 381, 1959, 381, 1960, 385, 1961, 385, // NOLINT
- 1962, 389, 1963, 393, 1964, 397, 1965, 401,
- 1968, 405, 1969, 409}; // NOLINT
+static const int32_t
+ kEcma262UnCanonicalizeTable5[396] =
+ {1600, 1, 1601, 1, 1602, 5, 1603, 5,
+ 1604, 9, 1605, 9, 1606, 13, 1607, 13, // NOLINT
+ 1608, 17, 1609, 17, 1610, 21, 1611, 21,
+ 1612, 25, 1613, 25, 1614, 29, 1615, 29, // NOLINT
+ 1616, 33, 1617, 33, 1618, 37, 1619, 37,
+ 1620, 41, 1621, 41, 1622, 45, 1623, 45, // NOLINT
+ 1624, 49, 1625, 49, 1626, 53, 1627, 53,
+ 1628, 57, 1629, 57, 1630, 61, 1631, 61, // NOLINT
+ 1632, 65, 1633, 65, 1634, 69, 1635, 69,
+ 1636, 73, 1637, 73, 1638, 77, 1639, 77, // NOLINT
+ 1640, 81, 1641, 81, 1642, 85, 1643, 85,
+ 1644, 89, 1645, 89, 1664, 93, 1665, 93, // NOLINT
+ 1666, 97, 1667, 97, 1668, 101, 1669, 101,
+ 1670, 105, 1671, 105, 1672, 109, 1673, 109, // NOLINT
+ 1674, 113, 1675, 113, 1676, 117, 1677, 117,
+ 1678, 121, 1679, 121, 1680, 125, 1681, 125, // NOLINT
+ 1682, 129, 1683, 129, 1684, 133, 1685, 133,
+ 1686, 137, 1687, 137, 1688, 141, 1689, 141, // NOLINT
+ 1690, 145, 1691, 145, 1826, 149, 1827, 149,
+ 1828, 153, 1829, 153, 1830, 157, 1831, 157, // NOLINT
+ 1832, 161, 1833, 161, 1834, 165, 1835, 165,
+ 1836, 169, 1837, 169, 1838, 173, 1839, 173, // NOLINT
+ 1842, 177, 1843, 177, 1844, 181, 1845, 181,
+ 1846, 185, 1847, 185, 1848, 189, 1849, 189, // NOLINT
+ 1850, 193, 1851, 193, 1852, 197, 1853, 197,
+ 1854, 201, 1855, 201, 1856, 205, 1857, 205, // NOLINT
+ 1858, 209, 1859, 209, 1860, 213, 1861, 213,
+ 1862, 217, 1863, 217, 1864, 221, 1865, 221, // NOLINT
+ 1866, 225, 1867, 225, 1868, 229, 1869, 229,
+ 1870, 233, 1871, 233, 1872, 237, 1873, 237, // NOLINT
+ 1874, 241, 1875, 241, 1876, 245, 1877, 245,
+ 1878, 249, 1879, 249, 1880, 253, 1881, 253, // NOLINT
+ 1882, 257, 1883, 257, 1884, 261, 1885, 261,
+ 1886, 265, 1887, 265, 1888, 269, 1889, 269, // NOLINT
+ 1890, 273, 1891, 273, 1892, 277, 1893, 277,
+ 1894, 281, 1895, 281, 1896, 285, 1897, 285, // NOLINT
+ 1898, 289, 1899, 289, 1900, 293, 1901, 293,
+ 1902, 297, 1903, 297, 1913, 301, 1914, 301, // NOLINT
+ 1915, 305, 1916, 305, 1917, 309, 1918, 313,
+ 1919, 313, 1920, 317, 1921, 317, 1922, 321, // NOLINT
+ 1923, 321, 1924, 325, 1925, 325, 1926, 329,
+ 1927, 329, 1931, 333, 1932, 333, 1933, 337, // NOLINT
+ 1936, 341, 1937, 341, 1938, 345, 1939, 345,
+ 1942, 349, 1943, 349, 1944, 353, 1945, 353, // NOLINT
+ 1946, 357, 1947, 357, 1948, 361, 1949, 361,
+ 1950, 365, 1951, 365, 1952, 369, 1953, 369, // NOLINT
+ 1954, 373, 1955, 373, 1956, 377, 1957, 377,
+ 1958, 381, 1959, 381, 1960, 385, 1961, 385, // NOLINT
+ 1962, 389, 1963, 393, 1964, 397, 1965, 401,
+ 1968, 405, 1969, 409}; // NOLINT
static const uint16_t kEcma262UnCanonicalizeMultiStrings5Size = 104; // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings7[3] = { // NOLINT
- {{65313, 65345}}, {{65338, 65370}}, {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<2>
+ kEcma262UnCanonicalizeMultiStrings7[3] = { // NOLINT
+ {{65313, 65345}},
+ {{65338, 65370}},
+ {{kSentinel}}}; // NOLINT
static const uint16_t kEcma262UnCanonicalizeTable7Size = 4; // NOLINT
static const int32_t kEcma262UnCanonicalizeTable7[8] = {
- 1073749793, 1, 7994, 5, 1073749825, 1, 8026, 5 }; // NOLINT
+ 1073749793, 1, 7994, 5, 1073749825, 1, 8026, 5}; // NOLINT
static const uint16_t kEcma262UnCanonicalizeMultiStrings7Size = 3; // NOLINT
-int Ecma262UnCanonicalize::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
+int Ecma262UnCanonicalize::Convert(uchar c, uchar n, uchar* result,
+ bool* allow_caching_ptr) {
int chunk_index = c >> 13;
switch (chunk_index) {
- case 0: return LookupMapping<true>(kEcma262UnCanonicalizeTable0,
- kEcma262UnCanonicalizeTable0Size,
- kEcma262UnCanonicalizeMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<true>(kEcma262UnCanonicalizeTable1,
- kEcma262UnCanonicalizeTable1Size,
- kEcma262UnCanonicalizeMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 5: return LookupMapping<true>(kEcma262UnCanonicalizeTable5,
- kEcma262UnCanonicalizeTable5Size,
- kEcma262UnCanonicalizeMultiStrings5,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<true>(kEcma262UnCanonicalizeTable7,
- kEcma262UnCanonicalizeTable7Size,
- kEcma262UnCanonicalizeMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
+ case 0:
+ return LookupMapping<true>(
+ kEcma262UnCanonicalizeTable0, kEcma262UnCanonicalizeTable0Size,
+ kEcma262UnCanonicalizeMultiStrings0, c, n, result, allow_caching_ptr);
+ case 1:
+ return LookupMapping<true>(
+ kEcma262UnCanonicalizeTable1, kEcma262UnCanonicalizeTable1Size,
+ kEcma262UnCanonicalizeMultiStrings1, c, n, result, allow_caching_ptr);
+ case 5:
+ return LookupMapping<true>(
+ kEcma262UnCanonicalizeTable5, kEcma262UnCanonicalizeTable5Size,
+ kEcma262UnCanonicalizeMultiStrings5, c, n, result, allow_caching_ptr);
+ case 7:
+ return LookupMapping<true>(
+ kEcma262UnCanonicalizeTable7, kEcma262UnCanonicalizeTable7Size,
+ kEcma262UnCanonicalizeMultiStrings7, c, n, result, allow_caching_ptr);
+ default:
+ return 0;
}
}
-static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings0[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1>
+ kCanonicalizationRangeMultiStrings0[1] = { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kCanonicalizationRangeTable0Size = 70; // NOLINT
static const int32_t kCanonicalizationRangeTable0[140] = {
- 1073741889, 100, 90, 0, 1073741921, 100, 122, 0, 1073742016, 88, 214, 0, 1073742040, 24, 222, 0, // NOLINT
- 1073742048, 88, 246, 0, 1073742072, 24, 254, 0, 1073742715, 8, 893, 0, 1073742728, 8, 906, 0, // NOLINT
- 1073742749, 8, 927, 0, 1073742759, 16, 939, 0, 1073742765, 8, 943, 0, 1073742781, 8, 959, 0, // NOLINT
- 1073742791, 16, 971, 0, 1073742845, 8, 1023, 0, 1073742848, 60, 1039, 0, 1073742864, 124, 1071, 0, // NOLINT
- 1073742896, 124, 1103, 0, 1073742928, 60, 1119, 0, 1073743153, 148, 1366, 0, 1073743201, 148, 1414, 0, // NOLINT
- 1073746080, 148, 4293, 0, 1073749760, 28, 7943, 0, 1073749768, 28, 7951, 0, 1073749776, 20, 7957, 0, // NOLINT
- 1073749784, 20, 7965, 0, 1073749792, 28, 7975, 0, 1073749800, 28, 7983, 0, 1073749808, 28, 7991, 0, // NOLINT
- 1073749816, 28, 7999, 0, 1073749824, 20, 8005, 0, 1073749832, 20, 8013, 0, 1073749856, 28, 8039, 0, // NOLINT
- 1073749864, 28, 8047, 0, 1073749874, 12, 8053, 0, 1073749960, 12, 8139, 0 }; // NOLINT
+ 1073741889, 100, 90, 0, 1073741921, 100, 122, 0,
+ 1073742016, 88, 214, 0, 1073742040, 24, 222, 0, // NOLINT
+ 1073742048, 88, 246, 0, 1073742072, 24, 254, 0,
+ 1073742715, 8, 893, 0, 1073742728, 8, 906, 0, // NOLINT
+ 1073742749, 8, 927, 0, 1073742759, 16, 939, 0,
+ 1073742765, 8, 943, 0, 1073742781, 8, 959, 0, // NOLINT
+ 1073742791, 16, 971, 0, 1073742845, 8, 1023, 0,
+ 1073742848, 60, 1039, 0, 1073742864, 124, 1071, 0, // NOLINT
+ 1073742896, 124, 1103, 0, 1073742928, 60, 1119, 0,
+ 1073743153, 148, 1366, 0, 1073743201, 148, 1414, 0, // NOLINT
+ 1073746080, 148, 4293, 0, 1073749760, 28, 7943, 0,
+ 1073749768, 28, 7951, 0, 1073749776, 20, 7957, 0, // NOLINT
+ 1073749784, 20, 7965, 0, 1073749792, 28, 7975, 0,
+ 1073749800, 28, 7983, 0, 1073749808, 28, 7991, 0, // NOLINT
+ 1073749816, 28, 7999, 0, 1073749824, 20, 8005, 0,
+ 1073749832, 20, 8013, 0, 1073749856, 28, 8039, 0, // NOLINT
+ 1073749864, 28, 8047, 0, 1073749874, 12, 8053, 0,
+ 1073749960, 12, 8139, 0}; // NOLINT
static const uint16_t kCanonicalizationRangeMultiStrings0Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings1[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1>
+ kCanonicalizationRangeMultiStrings1[1] = { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kCanonicalizationRangeTable1Size = 14; // NOLINT
static const int32_t kCanonicalizationRangeTable1[28] = {
- 1073742176, 60, 367, 0, 1073742192, 60, 383, 0, 1073743030, 100, 1231, 0, 1073743056, 100, 1257, 0, // NOLINT
- 1073744896, 184, 3118, 0, 1073744944, 184, 3166, 0, 1073745152, 148, 3365, 0 }; // NOLINT
+ 1073742176, 60, 367, 0, 1073742192, 60, 383, 0,
+ 1073743030, 100, 1231, 0, 1073743056, 100, 1257, 0, // NOLINT
+ 1073744896, 184, 3118, 0, 1073744944, 184, 3166, 0,
+ 1073745152, 148, 3365, 0}; // NOLINT
static const uint16_t kCanonicalizationRangeMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kCanonicalizationRangeMultiStrings7[1] = { // NOLINT
- {{kSentinel}} }; // NOLINT
+static const MultiCharacterSpecialCase<1>
+ kCanonicalizationRangeMultiStrings7[1] = { // NOLINT
+ {{kSentinel}}}; // NOLINT
static const uint16_t kCanonicalizationRangeTable7Size = 4; // NOLINT
static const int32_t kCanonicalizationRangeTable7[8] = {
- 1073749793, 100, 7994, 0, 1073749825, 100, 8026, 0 }; // NOLINT
+ 1073749793, 100, 7994, 0, 1073749825, 100, 8026, 0}; // NOLINT
static const uint16_t kCanonicalizationRangeMultiStrings7Size = 1; // NOLINT
-int CanonicalizationRange::Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr) {
+int CanonicalizationRange::Convert(uchar c, uchar n, uchar* result,
+ bool* allow_caching_ptr) {
int chunk_index = c >> 13;
switch (chunk_index) {
- case 0: return LookupMapping<false>(kCanonicalizationRangeTable0,
- kCanonicalizationRangeTable0Size,
- kCanonicalizationRangeMultiStrings0,
- c,
- n,
- result,
- allow_caching_ptr);
- case 1: return LookupMapping<false>(kCanonicalizationRangeTable1,
- kCanonicalizationRangeTable1Size,
- kCanonicalizationRangeMultiStrings1,
- c,
- n,
- result,
- allow_caching_ptr);
- case 7: return LookupMapping<false>(kCanonicalizationRangeTable7,
- kCanonicalizationRangeTable7Size,
- kCanonicalizationRangeMultiStrings7,
- c,
- n,
- result,
- allow_caching_ptr);
- default: return 0;
+ case 0:
+ return LookupMapping<false>(
+ kCanonicalizationRangeTable0, kCanonicalizationRangeTable0Size,
+ kCanonicalizationRangeMultiStrings0, c, n, result, allow_caching_ptr);
+ case 1:
+ return LookupMapping<false>(
+ kCanonicalizationRangeTable1, kCanonicalizationRangeTable1Size,
+ kCanonicalizationRangeMultiStrings1, c, n, result, allow_caching_ptr);
+ case 7:
+ return LookupMapping<false>(
+ kCanonicalizationRangeTable7, kCanonicalizationRangeTable7Size,
+ kCanonicalizationRangeMultiStrings7, c, n, result, allow_caching_ptr);
+ default:
+ return 0;
}
}
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/strings/unicode.h
index 3adaf03a04..bd94300e34 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/strings/unicode.h
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UNICODE_H_
-#define V8_UNICODE_H_
+#ifndef V8_STRINGS_UNICODE_H_
+#define V8_STRINGS_UNICODE_H_
#include <sys/types.h>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/third_party/utf8-decoder/utf8-decoder.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
/**
* \file
* Definitions and convenience functions for working with unicode.
@@ -16,8 +16,8 @@
namespace unibrow {
-typedef unsigned int uchar;
-typedef unsigned char byte;
+using uchar = unsigned int;
+using byte = unsigned char;
/**
* The max length of the result of converting the case of a single
@@ -61,7 +61,6 @@ class Predicate {
CacheEntry entries_[kSize];
};
-
// A cache used in case conversion. It caches the value for characters
// that either have no mapping or map to a single character independent
// of context. Characters that map to more than one character or that
@@ -71,14 +70,14 @@ class Mapping {
public:
inline Mapping() = default;
inline int get(uchar c, uchar n, uchar* result);
+
private:
friend class Test;
int CalculateValue(uchar c, uchar n, uchar* result);
struct CacheEntry {
- inline CacheEntry() : code_point_(kNoChar), offset_(0) { }
+ inline CacheEntry() : code_point_(kNoChar), offset_(0) {}
inline CacheEntry(uchar code_point, signed offset)
- : code_point_(code_point),
- offset_(offset) { }
+ : code_point_(code_point), offset_(offset) {}
uchar code_point_;
signed offset_;
static const int kNoChar = (1 << 21) - 1;
@@ -131,15 +130,32 @@ class Utf16 {
}
};
+class Latin1 {
+ public:
+ static const uint16_t kMaxChar = 0xff;
+ // Convert the character to Latin-1 case equivalent if possible.
+ static inline uint16_t TryConvertToLatin1(uint16_t c) {
+ switch (c) {
+ // This are equivalent characters in unicode.
+ case 0x39c:
+ case 0x3bc:
+ return 0xb5;
+ // This is an uppercase of a Latin-1 character
+ // outside of Latin-1.
+ case 0x178:
+ return 0xff;
+ }
+ return c;
+ }
+};
+
class V8_EXPORT_PRIVATE Utf8 {
public:
using State = Utf8DfaDecoder::State;
static inline uchar Length(uchar chr, int previous);
static inline unsigned EncodeOneByte(char* out, uint8_t c);
- static inline unsigned Encode(char* out,
- uchar c,
- int previous,
+ static inline unsigned Encode(char* out, uchar c, int previous,
bool replace_invalid = false);
static uchar CalculateValue(const byte* str, size_t length, size_t* cursor);
@@ -148,11 +164,11 @@ class V8_EXPORT_PRIVATE Utf8 {
static const uchar kBadChar = 0xFFFD;
static const uchar kBufferEmpty = 0x0;
static const uchar kIncomplete = 0xFFFFFFFC; // any non-valid code point.
- static const unsigned kMaxEncodedSize = 4;
- static const unsigned kMaxOneByteChar = 0x7f;
- static const unsigned kMaxTwoByteChar = 0x7ff;
+ static const unsigned kMaxEncodedSize = 4;
+ static const unsigned kMaxOneByteChar = 0x7f;
+ static const unsigned kMaxTwoByteChar = 0x7ff;
static const unsigned kMaxThreeByteChar = 0xffff;
- static const unsigned kMaxFourByteChar = 0x1fffff;
+ static const unsigned kMaxFourByteChar = 0x1fffff;
// A single surrogate is coded as a 3 byte UTF-8 sequence, but two together
// that match are coded as a 4 byte UTF-8 sequence.
@@ -160,10 +176,10 @@ class V8_EXPORT_PRIVATE Utf8 {
static const unsigned kSizeOfUnmatchedSurrogate = 3;
// The maximum size a single UTF-16 code unit may take up when encoded as
// UTF-8.
- static const unsigned kMax16BitCodeUnitSize = 3;
+ static const unsigned kMax16BitCodeUnitSize = 3;
static inline uchar ValueOf(const byte* str, size_t length, size_t* cursor);
- typedef uint32_t Utf8IncrementalBuffer;
+ using Utf8IncrementalBuffer = uint32_t;
static inline uchar ValueOfIncremental(const byte** cursor, State* state,
Utf8IncrementalBuffer* buffer);
static uchar ValueOfIncrementalFinish(State* state);
@@ -215,42 +231,27 @@ V8_INLINE bool IsStringLiteralLineTerminator(uchar c) {
struct ToLowercase {
static const int kMaxWidth = 3;
static const bool kIsToLower = true;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
+ static int Convert(uchar c, uchar n, uchar* result, bool* allow_caching_ptr);
};
struct ToUppercase {
static const int kMaxWidth = 3;
static const bool kIsToLower = false;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
+ static int Convert(uchar c, uchar n, uchar* result, bool* allow_caching_ptr);
};
struct V8_EXPORT_PRIVATE Ecma262Canonicalize {
static const int kMaxWidth = 1;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
+ static int Convert(uchar c, uchar n, uchar* result, bool* allow_caching_ptr);
};
struct V8_EXPORT_PRIVATE Ecma262UnCanonicalize {
static const int kMaxWidth = 4;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
+ static int Convert(uchar c, uchar n, uchar* result, bool* allow_caching_ptr);
};
struct V8_EXPORT_PRIVATE CanonicalizationRange {
static const int kMaxWidth = 1;
- static int Convert(uchar c,
- uchar n,
- uchar* result,
- bool* allow_caching_ptr);
+ static int Convert(uchar c, uchar n, uchar* result, bool* allow_caching_ptr);
};
#endif // !V8_INTL_SUPPORT
} // namespace unibrow
-#endif // V8_UNICODE_H_
+#endif // V8_STRINGS_UNICODE_H_
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/strings/uri.cc
index b3066b9a2a..430c8dd0eb 100644
--- a/deps/v8/src/uri.cc
+++ b/deps/v8/src/strings/uri.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/uri.h"
+#include "src/strings/uri.h"
#include <vector>
-#include "src/char-predicates-inl.h"
-#include "src/isolate-inl.h"
-#include "src/string-search.h"
-#include "src/unicode-inl.h"
+#include "src/execution/isolate-inl.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/strings/string-search.h"
+#include "src/strings/unicode-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/uri.h b/deps/v8/src/strings/uri.h
index cc861e93da..cb159c3aeb 100644
--- a/deps/v8/src/uri.h
+++ b/deps/v8/src/strings/uri.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_URI_H_
-#define V8_URI_H_
+#ifndef V8_STRINGS_URI_H_
+#define V8_STRINGS_URI_H_
-#include "src/allocation.h"
-#include "src/maybe-handles.h"
-#include "src/objects.h"
+#include "src/utils/allocation.h"
+#include "src/handles/maybe-handles.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
@@ -52,4 +52,4 @@ class Uri : public AllStatic {
} // namespace internal
} // namespace v8
-#endif // V8_URI_H_
+#endif // V8_STRINGS_URI_H_
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/tasks/cancelable-task.cc
index dc89128229..e56e93b83e 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/tasks/cancelable-task.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/cancelable-task.h"
+#include "src/tasks/cancelable-task.h"
#include "src/base/platform/platform.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/tasks/cancelable-task.h
index a82f2b605e..59f04de3ee 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/tasks/cancelable-task.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CANCELABLE_TASK_H_
-#define V8_CANCELABLE_TASK_H_
+#ifndef V8_TASKS_CANCELABLE_TASK_H_
+#define V8_TASKS_CANCELABLE_TASK_H_
#include <atomic>
#include <unordered_map>
@@ -11,7 +11,7 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -184,4 +184,4 @@ class CancelableIdleTask : public Cancelable, public IdleTask {
} // namespace internal
} // namespace v8
-#endif // V8_CANCELABLE_TASK_H_
+#endif // V8_TASKS_CANCELABLE_TASK_H_
diff --git a/deps/v8/src/task-utils.cc b/deps/v8/src/tasks/task-utils.cc
index aaa36346e1..2b75c4549c 100644
--- a/deps/v8/src/task-utils.cc
+++ b/deps/v8/src/tasks/task-utils.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/task-utils.h"
+#include "src/tasks/task-utils.h"
-#include "src/cancelable-task.h"
+#include "src/tasks/cancelable-task.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/task-utils.h b/deps/v8/src/tasks/task-utils.h
index 81ad5e1e3a..cdd1624748 100644
--- a/deps/v8/src/task-utils.h
+++ b/deps/v8/src/tasks/task-utils.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_TASK_UTILS_H_
-#define V8_TASK_UTILS_H_
+#ifndef V8_TASKS_TASK_UTILS_H_
+#define V8_TASKS_TASK_UTILS_H_
#include <functional>
#include <memory>
@@ -30,4 +30,4 @@ std::unique_ptr<CancelableIdleTask> MakeCancelableIdleTask(
} // namespace internal
} // namespace v8
-#endif // V8_TASK_UTILS_H_
+#endif // V8_TASKS_TASK_UTILS_H_
diff --git a/deps/v8/src/third_party/vtune/ittnotify_config.h b/deps/v8/src/third_party/vtune/ittnotify_config.h
index 412e344628..9095107cb3 100644
--- a/deps/v8/src/third_party/vtune/ittnotify_config.h
+++ b/deps/v8/src/third_party/vtune/ittnotify_config.h
@@ -210,9 +210,9 @@
/* OS communication functions */
#if ITT_PLATFORM==ITT_PLATFORM_WIN
#include <windows.h>
-typedef HMODULE lib_t;
-typedef DWORD TIDT;
-typedef CRITICAL_SECTION mutex_t;
+using lib_t = HMODULE;
+using TIDT = DWORD;
+using mutex_t = CRITICAL_SECTION;
#define MUTEX_INITIALIZER { 0 }
#define strong_alias(name, aliasname) /* empty for Windows */
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
@@ -225,8 +225,8 @@ typedef CRITICAL_SECTION mutex_t;
#endif /* _GNU_SOURCE */
#include <pthread.h>
typedef void* lib_t;
-typedef pthread_t TIDT;
-typedef pthread_mutex_t mutex_t;
+using TIDT = pthread_t;
+using mutex_t = pthread_mutex_t;
#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
#define _strong_alias(name, aliasname) extern __typeof (name) aliasname __attribute__ ((alias (#name)));
#define strong_alias(name, aliasname) _strong_alias(name, aliasname)
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index 5c49855359..dbbab7fc58 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -257,8 +257,7 @@ void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
} // namespace internal
v8::JitCodeEventHandler GetVtuneCodeEventHandler() {
- v8::V8::SetFlagsFromString("--nocompact_code_space",
- (int)strlen("--nocompact_code_space"));
+ v8::V8::SetFlagsFromString("--no-compact-code-space");
return vTune::internal::VTUNEJITInterface::event_handler;
}
diff --git a/deps/v8/src/torque/OWNERS b/deps/v8/src/torque/OWNERS
new file mode 100644
index 0000000000..03fa4c9daa
--- /dev/null
+++ b/deps/v8/src/torque/OWNERS
@@ -0,0 +1,6 @@
+danno@chromium.org
+jarin@chromium.org
+mvstanton@chromium.org
+sigurds@chromium.org
+szuend@chromium.org
+tebbi@chromium.org
diff --git a/deps/v8/src/torque/ast.h b/deps/v8/src/torque/ast.h
index d8c6942743..f26e9b2326 100644
--- a/deps/v8/src/torque/ast.h
+++ b/deps/v8/src/torque/ast.h
@@ -11,6 +11,7 @@
#include <vector>
#include "src/base/optional.h"
+#include "src/torque/constants.h"
#include "src/torque/source-positions.h"
namespace v8 {
@@ -61,15 +62,18 @@ namespace torque {
V(VarDeclarationStatement) \
V(GotoStatement)
+#define AST_TYPE_DECLARATION_NODE_KIND_LIST(V) \
+ V(AbstractTypeDeclaration) \
+ V(TypeAliasDeclaration) \
+ V(ClassDeclaration) \
+ V(StructDeclaration)
+
#define AST_DECLARATION_NODE_KIND_LIST(V) \
- V(TypeDeclaration) \
- V(TypeAliasDeclaration) \
+ AST_TYPE_DECLARATION_NODE_KIND_LIST(V) \
V(StandardDeclaration) \
V(GenericDeclaration) \
V(SpecializationDeclaration) \
V(ExternConstDeclaration) \
- V(ClassDeclaration) \
- V(StructDeclaration) \
V(NamespaceDeclaration) \
V(ConstDeclaration) \
V(CppIncludeDeclaration)
@@ -204,6 +208,12 @@ struct Identifier : AstNode {
std::string value;
};
+struct IdentifierPtrValueEq {
+ bool operator()(const Identifier* a, const Identifier* b) {
+ return a->value < b->value;
+ }
+};
+
struct IdentifierExpression : LocationExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(IdentifierExpression)
IdentifierExpression(SourcePosition pos,
@@ -241,7 +251,7 @@ struct CallMethodExpression : Expression {
CallMethodExpression(SourcePosition pos, Expression* target,
IdentifierExpression* method,
std::vector<Expression*> arguments,
- std::vector<std::string> labels)
+ std::vector<Identifier*> labels)
: Expression(kKind, pos),
target(target),
method(method),
@@ -250,21 +260,21 @@ struct CallMethodExpression : Expression {
Expression* target;
IdentifierExpression* method;
std::vector<Expression*> arguments;
- std::vector<std::string> labels;
+ std::vector<Identifier*> labels;
};
struct CallExpression : Expression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(CallExpression)
CallExpression(SourcePosition pos, IdentifierExpression* callee,
std::vector<Expression*> arguments,
- std::vector<std::string> labels)
+ std::vector<Identifier*> labels)
: Expression(kKind, pos),
callee(callee),
arguments(std::move(arguments)),
labels(std::move(labels)) {}
IdentifierExpression* callee;
std::vector<Expression*> arguments;
- std::vector<std::string> labels;
+ std::vector<Identifier*> labels;
};
struct NameAndExpression {
@@ -435,10 +445,10 @@ struct BasicTypeExpression : TypeExpression {
DEFINE_AST_NODE_LEAF_BOILERPLATE(BasicTypeExpression)
BasicTypeExpression(SourcePosition pos,
std::vector<std::string> namespace_qualification,
- bool is_constexpr, std::string name)
+ std::string name)
: TypeExpression(kKind, pos),
namespace_qualification(std::move(namespace_qualification)),
- is_constexpr(is_constexpr),
+ is_constexpr(IsConstexprName(name)),
name(std::move(name)) {}
std::vector<std::string> namespace_qualification;
bool is_constexpr;
@@ -569,12 +579,10 @@ struct ContinueStatement : Statement {
struct GotoStatement : Statement {
DEFINE_AST_NODE_LEAF_BOILERPLATE(GotoStatement)
- GotoStatement(SourcePosition pos, std::string label,
+ GotoStatement(SourcePosition pos, Identifier* label,
const std::vector<Expression*>& arguments)
- : Statement(kKind, pos),
- label(std::move(label)),
- arguments(std::move(arguments)) {}
- std::string label;
+ : Statement(kKind, pos), label(label), arguments(std::move(arguments)) {}
+ Identifier* label;
std::vector<Expression*> arguments;
};
@@ -624,13 +632,13 @@ struct ForOfLoopStatement : Statement {
struct LabelBlock : AstNode {
DEFINE_AST_NODE_LEAF_BOILERPLATE(LabelBlock)
- LabelBlock(SourcePosition pos, std::string label,
+ LabelBlock(SourcePosition pos, Identifier* label,
const ParameterList& parameters, Statement* body)
: AstNode(kKind, pos),
- label(std::move(label)),
+ label(label),
parameters(parameters),
body(std::move(body)) {}
- std::string label;
+ Identifier* label;
ParameterList parameters;
Statement* body;
};
@@ -667,30 +675,33 @@ struct BlockStatement : Statement {
};
struct TypeDeclaration : Declaration {
- DEFINE_AST_NODE_LEAF_BOILERPLATE(TypeDeclaration)
- TypeDeclaration(SourcePosition pos, Identifier* name, bool transient,
- base::Optional<Identifier*> extends,
- base::Optional<std::string> generates,
- base::Optional<std::string> constexpr_generates)
- : Declaration(kKind, pos),
- name(name),
+ DEFINE_AST_NODE_INNER_BOILERPLATE(TypeDeclaration)
+ TypeDeclaration(Kind kKind, SourcePosition pos, Identifier* name)
+ : Declaration(kKind, pos), name(name) {}
+ Identifier* name;
+};
+
+struct AbstractTypeDeclaration : TypeDeclaration {
+ DEFINE_AST_NODE_LEAF_BOILERPLATE(AbstractTypeDeclaration)
+ AbstractTypeDeclaration(SourcePosition pos, Identifier* name, bool transient,
+ base::Optional<Identifier*> extends,
+ base::Optional<std::string> generates)
+ : TypeDeclaration(kKind, pos, name),
+ is_constexpr(IsConstexprName(name->value)),
transient(transient),
extends(extends),
- generates(std::move(generates)),
- constexpr_generates(std::move(constexpr_generates)) {}
- Identifier* name;
+ generates(std::move(generates)) {}
+ bool is_constexpr;
bool transient;
base::Optional<Identifier*> extends;
base::Optional<std::string> generates;
- base::Optional<std::string> constexpr_generates;
};
-struct TypeAliasDeclaration : Declaration {
+struct TypeAliasDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(TypeAliasDeclaration)
TypeAliasDeclaration(SourcePosition pos, Identifier* name,
TypeExpression* type)
- : Declaration(kKind, pos), name(name), type(type) {}
- Identifier* name;
+ : TypeDeclaration(kKind, pos, name), type(type) {}
TypeExpression* type;
};
@@ -704,19 +715,31 @@ struct StructFieldExpression {
bool const_qualified;
};
+enum class ConditionalAnnotationType {
+ kPositive,
+ kNegative,
+};
+
+struct ConditionalAnnotation {
+ std::string condition;
+ ConditionalAnnotationType type;
+};
+
struct ClassFieldExpression {
NameAndTypeExpression name_and_type;
base::Optional<std::string> index;
+ base::Optional<ConditionalAnnotation> conditional;
bool weak;
bool const_qualified;
+ bool generate_verify;
};
struct LabelAndTypes {
- std::string name;
+ Identifier* name;
std::vector<TypeExpression*> types;
};
-typedef std::vector<LabelAndTypes> LabelAndTypesVector;
+using LabelAndTypesVector = std::vector<LabelAndTypes>;
struct CallableNodeSignature {
ParameterList parameters;
@@ -778,10 +801,12 @@ struct TorqueMacroDeclaration : MacroDeclaration {
TorqueMacroDeclaration(SourcePosition pos, bool transitioning,
std::string name, base::Optional<std::string> op,
ParameterList parameters, TypeExpression* return_type,
- const LabelAndTypesVector& labels)
+ const LabelAndTypesVector& labels, bool export_to_csa)
: MacroDeclaration(kKind, pos, transitioning, std::move(name),
std::move(op), std::move(parameters), return_type,
- labels) {}
+ labels),
+ export_to_csa(export_to_csa) {}
+ bool export_to_csa;
};
struct BuiltinDeclaration : CallableNode {
@@ -865,19 +890,19 @@ struct GenericDeclaration : Declaration {
struct SpecializationDeclaration : Declaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(SpecializationDeclaration)
- SpecializationDeclaration(SourcePosition pos, std::string name,
+ SpecializationDeclaration(SourcePosition pos, Identifier* name,
std::vector<TypeExpression*> generic_parameters,
ParameterList parameters,
TypeExpression* return_type,
LabelAndTypesVector labels, Statement* b)
: Declaration(kKind, pos),
- name(std::move(name)),
+ name(name),
external(false),
generic_parameters(std::move(generic_parameters)),
signature(new CallableNodeSignature{std::move(parameters), return_type,
std::move(labels)}),
body(b) {}
- std::string name;
+ Identifier* name;
bool external;
std::vector<TypeExpression*> generic_parameters;
std::unique_ptr<CallableNodeSignature> signature;
@@ -897,42 +922,33 @@ struct ExternConstDeclaration : Declaration {
std::string literal;
};
-struct StructDeclaration : Declaration {
+struct StructDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(StructDeclaration)
StructDeclaration(SourcePosition pos, Identifier* name,
std::vector<Declaration*> methods,
std::vector<StructFieldExpression> fields)
- : Declaration(kKind, pos),
- name(name),
+ : TypeDeclaration(kKind, pos, name),
methods(std::move(methods)),
fields(std::move(fields)) {}
- Identifier* name;
std::vector<Declaration*> methods;
std::vector<StructFieldExpression> fields;
};
-struct ClassDeclaration : Declaration {
+struct ClassDeclaration : TypeDeclaration {
DEFINE_AST_NODE_LEAF_BOILERPLATE(ClassDeclaration)
- ClassDeclaration(SourcePosition pos, Identifier* name, bool is_extern,
- bool generate_print, bool transient,
- base::Optional<std::string> super,
+ ClassDeclaration(SourcePosition pos, Identifier* name, ClassFlags flags,
+ base::Optional<TypeExpression*> super,
base::Optional<std::string> generates,
std::vector<Declaration*> methods,
std::vector<ClassFieldExpression> fields)
- : Declaration(kKind, pos),
- name(name),
- is_extern(is_extern),
- generate_print(generate_print),
- transient(transient),
- super(std::move(super)),
+ : TypeDeclaration(kKind, pos, name),
+ flags(flags),
+ super(super),
generates(std::move(generates)),
methods(std::move(methods)),
fields(std::move(fields)) {}
- Identifier* name;
- bool is_extern;
- bool generate_print;
- bool transient;
- base::Optional<std::string> super;
+ ClassFlags flags;
+ base::Optional<TypeExpression*> super;
base::Optional<std::string> generates;
std::vector<Declaration*> methods;
std::vector<ClassFieldExpression> fields;
diff --git a/deps/v8/src/torque/constants.h b/deps/v8/src/torque/constants.h
new file mode 100644
index 0000000000..650b134140
--- /dev/null
+++ b/deps/v8/src/torque/constants.h
@@ -0,0 +1,82 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_CONSTANTS_H_
+#define V8_TORQUE_CONSTANTS_H_
+
+#include <cstring>
+#include <string>
+
+#include "src/base/flags.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+static const char* const CONSTEXPR_TYPE_PREFIX = "constexpr ";
+static const char* const NEVER_TYPE_STRING = "never";
+static const char* const CONSTEXPR_BOOL_TYPE_STRING = "constexpr bool";
+static const char* const CONSTEXPR_INTPTR_TYPE_STRING = "constexpr intptr";
+static const char* const BOOL_TYPE_STRING = "bool";
+static const char* const VOID_TYPE_STRING = "void";
+static const char* const ARGUMENTS_TYPE_STRING = "Arguments";
+static const char* const CONTEXT_TYPE_STRING = "Context";
+static const char* const MAP_TYPE_STRING = "Map";
+static const char* const OBJECT_TYPE_STRING = "Object";
+static const char* const HEAP_OBJECT_TYPE_STRING = "HeapObject";
+static const char* const JSOBJECT_TYPE_STRING = "JSObject";
+static const char* const SMI_TYPE_STRING = "Smi";
+static const char* const TAGGED_TYPE_STRING = "Tagged";
+static const char* const RAWPTR_TYPE_STRING = "RawPtr";
+static const char* const CONST_STRING_TYPE_STRING = "constexpr string";
+static const char* const STRING_TYPE_STRING = "String";
+static const char* const NUMBER_TYPE_STRING = "Number";
+static const char* const BUILTIN_POINTER_TYPE_STRING = "BuiltinPtr";
+static const char* const INTPTR_TYPE_STRING = "intptr";
+static const char* const UINTPTR_TYPE_STRING = "uintptr";
+static const char* const INT32_TYPE_STRING = "int32";
+static const char* const UINT32_TYPE_STRING = "uint32";
+static const char* const INT16_TYPE_STRING = "int16";
+static const char* const UINT16_TYPE_STRING = "uint16";
+static const char* const INT8_TYPE_STRING = "int8";
+static const char* const UINT8_TYPE_STRING = "uint8";
+static const char* const FLOAT64_TYPE_STRING = "float64";
+static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
+static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
+static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
+
+inline bool IsConstexprName(const std::string& name) {
+ return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
+ CONSTEXPR_TYPE_PREFIX;
+}
+
+inline std::string GetNonConstexprName(const std::string& name) {
+ if (!IsConstexprName(name)) return name;
+ return name.substr(std::strlen(CONSTEXPR_TYPE_PREFIX));
+}
+
+inline std::string GetConstexprName(const std::string& name) {
+ if (IsConstexprName(name)) return name;
+ return CONSTEXPR_TYPE_PREFIX + name;
+}
+
+enum class ClassFlag {
+ kNone = 0,
+ kExtern = 1 << 0,
+ kGeneratePrint = 1 << 1,
+ kGenerateVerify = 1 << 2,
+ kTransient = 1 << 3,
+ kAbstract = 1 << 4,
+ kInstantiatedAbstractClass = 1 << 5,
+ kHasSameInstanceTypeAsParent = 1 << 6,
+ kGenerateCppClassDefinitions = 1 << 7,
+ kHasIndexedField = 1 << 8
+};
+using ClassFlags = base::Flags<ClassFlag>;
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_CONSTANTS_H_
diff --git a/deps/v8/src/torque/contextual.h b/deps/v8/src/torque/contextual.h
index f854e9ae75..628d5b8514 100644
--- a/deps/v8/src/torque/contextual.h
+++ b/deps/v8/src/torque/contextual.h
@@ -67,6 +67,9 @@ class ContextualVariable {
private:
V8_EXPORT_PRIVATE static VarType*& Top();
+
+ static bool HasScope() { return Top() != nullptr; }
+ friend class MessageBuilder;
};
// Usage: DECLARE_CONTEXTUAL_VARIABLE(VarName, VarType)
diff --git a/deps/v8/src/torque/csa-generator.cc b/deps/v8/src/torque/csa-generator.cc
index cdbcc64b01..d7233aceb2 100644
--- a/deps/v8/src/torque/csa-generator.cc
+++ b/deps/v8/src/torque/csa-generator.cc
@@ -4,7 +4,7 @@
#include "src/torque/csa-generator.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/torque/type-oracle.h"
#include "src/torque/utils.h"
@@ -56,14 +56,10 @@ Stack<std::string> CSAGenerator::EmitBlock(const Block* block) {
}
void CSAGenerator::EmitSourcePosition(SourcePosition pos, bool always_emit) {
- std::string file = SourceFileMap::GetSource(pos.source);
+ const std::string& file = SourceFileMap::GetSource(pos.source);
if (always_emit || !previous_position_.CompareStartIgnoreColumn(pos)) {
// Lines in Torque SourcePositions are zero-based, while the
// CodeStubAssembler and downwind systems are one-based.
- for (auto& c : file) {
- if (c == '\\')
- c = '/';
- }
out_ << " ca_.SetSourcePosition(\"" << file << "\", "
<< (pos.start.line + 1) << ");\n";
previous_position_ = pos;
@@ -135,8 +131,7 @@ void CSAGenerator::EmitInstruction(
} else if (results.size() == 1) {
out_ << results[0] << " = ";
}
- out_ << instruction.constant->ExternalAssemblerName() << "(state_)."
- << instruction.constant->name()->value << "()";
+ out_ << instruction.constant->external_name() << "(state_)";
if (type->IsStructType()) {
out_ << ".Flatten();\n";
} else {
@@ -325,8 +320,12 @@ void CSAGenerator::EmitInstruction(const CallCsaMacroInstruction& instruction,
DCHECK_EQ(0, results.size());
}
}
- out_ << instruction.macro->external_assembler_name() << "(state_)."
- << instruction.macro->ExternalName() << "(";
+ if (ExternMacro* extern_macro = ExternMacro::DynamicCast(instruction.macro)) {
+ out_ << extern_macro->external_assembler_name() << "(state_).";
+ } else {
+ args.insert(args.begin(), "state_");
+ }
+ out_ << instruction.macro->ExternalName() << "(";
PrintCommaSeparatedList(out_, args);
if (needs_flattening) {
out_ << ").Flatten();\n";
@@ -390,8 +389,12 @@ void CSAGenerator::EmitInstruction(
PrintCommaSeparatedList(out_, results);
out_ << ") = ";
}
- out_ << instruction.macro->external_assembler_name() << "(state_)."
- << instruction.macro->ExternalName() << "(";
+ if (ExternMacro* extern_macro = ExternMacro::DynamicCast(instruction.macro)) {
+ out_ << extern_macro->external_assembler_name() << "(state_).";
+ } else {
+ args.insert(args.begin(), "state_");
+ }
+ out_ << instruction.macro->ExternalName() << "(";
PrintCommaSeparatedList(out_, args);
bool first = args.empty();
for (size_t i = 0; i < label_names.size(); ++i) {
@@ -584,12 +587,16 @@ void CSAGenerator::EmitInstruction(const CallRuntimeInstruction& instruction,
PreCallableExceptionPreparation(instruction.catch_block);
Stack<std::string> pre_call_stack = *stack;
if (result_types.size() == 1) {
+ std::string generated_type = result_types[0]->GetGeneratedTNodeTypeName();
stack->Push(result_name);
- out_ << " " << result_name
- << " = TORQUE_CAST(CodeStubAssembler(state_).CallRuntime(Runtime::k"
+ out_ << " " << result_name << " = ";
+ if (generated_type != "Object") out_ << "TORQUE_CAST(";
+ out_ << "CodeStubAssembler(state_).CallRuntime(Runtime::k"
<< instruction.runtime_function->ExternalName() << ", ";
PrintCommaSeparatedList(out_, arguments);
- out_ << "));\n";
+ out_ << ")";
+ if (generated_type != "Object") out_ << ")";
+ out_ << "; \n";
out_ << " USE(" << result_name << ");\n";
} else {
DCHECK_EQ(0, result_types.size());
diff --git a/deps/v8/src/torque/declarable.cc b/deps/v8/src/torque/declarable.cc
index 375aa8222c..1fd07d5b0d 100644
--- a/deps/v8/src/torque/declarable.cc
+++ b/deps/v8/src/torque/declarable.cc
@@ -7,6 +7,7 @@
#include "src/torque/declarable.h"
#include "src/torque/global-context.h"
+#include "src/torque/type-visitor.h"
namespace v8 {
namespace internal {
@@ -126,6 +127,22 @@ bool Namespace::IsDefaultNamespace() const {
bool Namespace::IsTestNamespace() const { return name() == kTestNamespaceName; }
+const Type* TypeAlias::Resolve() const {
+ if (!type_) {
+ CurrentScope::Scope scope_activator(ParentScope());
+ CurrentSourcePosition::Scope position_activator(Position());
+ TypeDeclaration* decl = *delayed_;
+ if (being_resolved_) {
+ std::stringstream s;
+ s << "Cannot create type " << decl->name->value
+ << " due to circular dependencies.";
+ ReportError(s.str());
+ }
+ type_ = TypeVisitor::ComputeType(decl);
+ }
+ return *type_;
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/declarable.h b/deps/v8/src/torque/declarable.h
index 2e78e1eeba..afa6d50d94 100644
--- a/deps/v8/src/torque/declarable.h
+++ b/deps/v8/src/torque/declarable.h
@@ -43,7 +43,8 @@ class Declarable {
virtual ~Declarable() = default;
enum Kind {
kNamespace,
- kMacro,
+ kTorqueMacro,
+ kExternMacro,
kMethod,
kBuiltin,
kRuntimeFunction,
@@ -55,8 +56,10 @@ class Declarable {
};
Kind kind() const { return kind_; }
bool IsNamespace() const { return kind() == kNamespace; }
- bool IsMacro() const { return kind() == kMacro || kind() == kMethod; }
+ bool IsMacro() const { return IsTorqueMacro() || IsExternMacro(); }
+ bool IsTorqueMacro() const { return kind() == kTorqueMacro || IsMethod(); }
bool IsMethod() const { return kind() == kMethod; }
+ bool IsExternMacro() const { return kind() == kExternMacro; }
bool IsIntrinsic() const { return kind() == kIntrinsic; }
bool IsBuiltin() const { return kind() == kBuiltin; }
bool IsRuntimeFunction() const { return kind() == kRuntimeFunction; }
@@ -72,7 +75,28 @@ class Declarable {
}
virtual const char* type_name() const { return "<<unknown>>"; }
Scope* ParentScope() const { return parent_scope_; }
- const SourcePosition& pos() const { return pos_; }
+
+ // The SourcePosition of the whole declarable. For example, for a macro
+ // this will encompass not only the signature, but also the body.
+ SourcePosition Position() const { return position_; }
+ void SetPosition(const SourcePosition& position) { position_ = position; }
+
+ // The SourcePosition of the identifying name of the declarable. For example,
+ // for a macro this will be the SourcePosition of the name.
+ // Note that this SourcePosition might not make sense for all kinds of
+ // declarables, in that case, the default SourcePosition is returned.
+ SourcePosition IdentifierPosition() const {
+ return identifier_position_.source.IsValid() ? identifier_position_
+ : position_;
+ }
+ void SetIdentifierPosition(const SourcePosition& position) {
+ identifier_position_ = position;
+ }
+
+ bool IsUserDefined() const { return is_user_defined_; }
+ void SetIsUserDefined(bool is_user_defined) {
+ is_user_defined_ = is_user_defined;
+ }
protected:
explicit Declarable(Kind kind) : kind_(kind) {}
@@ -80,7 +104,9 @@ class Declarable {
private:
const Kind kind_;
Scope* const parent_scope_ = CurrentScope::Get();
- SourcePosition pos_ = CurrentSourcePosition::Get();
+ SourcePosition position_ = CurrentSourcePosition::Get();
+ SourcePosition identifier_position_ = SourcePosition::Invalid();
+ bool is_user_defined_ = true;
};
#define DECLARE_DECLARABLE_BOILERPLATE(x, y) \
@@ -155,9 +181,6 @@ class Namespace : public Scope {
explicit Namespace(const std::string& name)
: Scope(Declarable::kNamespace), name_(name) {}
const std::string& name() const { return name_; }
- std::string ExternalName() const {
- return CamelifyString(name()) + "BuiltinsFromDSLAssembler";
- }
bool IsDefaultNamespace() const;
bool IsTestNamespace() const;
std::ostream& source_stream() { return source_stream_; }
@@ -208,18 +231,19 @@ class NamespaceConstant : public Value {
public:
DECLARE_DECLARABLE_BOILERPLATE(NamespaceConstant, constant)
- Expression* body() { return body_; }
- std::string ExternalAssemblerName() const {
- return Namespace::cast(ParentScope())->ExternalName();
- }
+ const std::string& external_name() const { return external_name_; }
+ Expression* body() const { return body_; }
private:
friend class Declarations;
- explicit NamespaceConstant(Identifier* constant_name, const Type* type,
+ explicit NamespaceConstant(Identifier* constant_name,
+ std::string external_name, const Type* type,
Expression* body)
: Value(Declarable::kNamespaceConstant, type, constant_name),
+ external_name_(std::move(external_name)),
body_(body) {}
+ std::string external_name_;
Expression* body_;
};
@@ -288,39 +312,73 @@ class Macro : public Callable {
if (type->IsStructType()) return true;
}
}
+ // Intrinsics that are used internally in Torque and implemented as torque
+ // code should be inlined and not generate C++ definitions.
+ if (ReadableName()[0] == '%') return true;
return Callable::ShouldBeInlined();
}
- const std::string& external_assembler_name() const {
- return external_assembler_name_;
- }
-
protected:
Macro(Declarable::Kind kind, std::string external_name,
- std::string readable_name, std::string external_assembler_name,
- const Signature& signature, bool transitioning,
- base::Optional<Statement*> body)
+ std::string readable_name, const Signature& signature,
+ bool transitioning, base::Optional<Statement*> body)
: Callable(kind, std::move(external_name), std::move(readable_name),
- signature, transitioning, body),
- external_assembler_name_(std::move(external_assembler_name)) {
+ signature, transitioning, body) {
if (signature.parameter_types.var_args) {
ReportError("Varargs are not supported for macros.");
}
}
+};
+
+class ExternMacro : public Macro {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(ExternMacro, ExternMacro)
+
+ const std::string& external_assembler_name() const {
+ return external_assembler_name_;
+ }
private:
friend class Declarations;
- Macro(std::string external_name, std::string readable_name,
- std::string external_assembler_name, const Signature& signature,
- bool transitioning, base::Optional<Statement*> body)
- : Macro(Declarable::kMacro, std::move(external_name),
- std::move(readable_name), external_assembler_name, signature,
- transitioning, body) {}
+ ExternMacro(const std::string& name, std::string external_assembler_name,
+ Signature signature, bool transitioning)
+ : Macro(Declarable::kExternMacro, name, name, std::move(signature),
+ transitioning, base::nullopt),
+ external_assembler_name_(std::move(external_assembler_name)) {}
std::string external_assembler_name_;
};
-class Method : public Macro {
+class TorqueMacro : public Macro {
+ public:
+ DECLARE_DECLARABLE_BOILERPLATE(TorqueMacro, TorqueMacro)
+ bool IsExportedToCSA() const { return exported_to_csa_; }
+
+ protected:
+ TorqueMacro(Declarable::Kind kind, std::string external_name,
+ std::string readable_name, const Signature& signature,
+ bool transitioning, base::Optional<Statement*> body,
+ bool is_user_defined, bool exported_to_csa)
+ : Macro(kind, std::move(external_name), std::move(readable_name),
+ signature, transitioning, body),
+ exported_to_csa_(exported_to_csa) {
+ SetIsUserDefined(is_user_defined);
+ }
+
+ private:
+ friend class Declarations;
+ TorqueMacro(std::string external_name, std::string readable_name,
+ const Signature& signature, bool transitioning,
+ base::Optional<Statement*> body, bool is_user_defined,
+ bool exported_to_csa)
+ : TorqueMacro(Declarable::kTorqueMacro, std::move(external_name),
+ std::move(readable_name), signature, transitioning, body,
+ is_user_defined, exported_to_csa) {}
+
+ bool exported_to_csa_ = false;
+};
+
+class Method : public TorqueMacro {
public:
DECLARE_DECLARABLE_BOILERPLATE(Method, Method)
bool ShouldBeInlined() const override {
@@ -334,11 +392,11 @@ class Method : public Macro {
private:
friend class Declarations;
Method(AggregateType* aggregate_type, std::string external_name,
- std::string readable_name, std::string external_assembler_name,
- const Signature& signature, bool transitioning, Statement* body)
- : Macro(Declarable::kMethod, std::move(external_name),
- std::move(readable_name), std::move(external_assembler_name),
- signature, transitioning, body),
+ std::string readable_name, const Signature& signature,
+ bool transitioning, Statement* body)
+ : TorqueMacro(Declarable::kMethod, std::move(external_name),
+ std::move(readable_name), signature, transitioning, body,
+ true, false),
aggregate_type_(aggregate_type) {}
AggregateType* aggregate_type_;
};
@@ -437,7 +495,11 @@ class TypeAlias : public Declarable {
public:
DECLARE_DECLARABLE_BOILERPLATE(TypeAlias, type_alias)
- const Type* type() const { return type_; }
+ const Type* type() const {
+ if (type_) return *type_;
+ return Resolve();
+ }
+ const Type* Resolve() const;
bool IsRedeclaration() const { return redeclaration_; }
SourcePosition GetDeclarationPosition() const {
return declaration_position_;
@@ -445,6 +507,8 @@ class TypeAlias : public Declarable {
private:
friend class Declarations;
+ friend class TypeVisitor;
+
explicit TypeAlias(
const Type* type, bool redeclaration,
SourcePosition declaration_position = SourcePosition::Invalid())
@@ -452,8 +516,17 @@ class TypeAlias : public Declarable {
type_(type),
redeclaration_(redeclaration),
declaration_position_(declaration_position) {}
+ explicit TypeAlias(
+ TypeDeclaration* type, bool redeclaration,
+ SourcePosition declaration_position = SourcePosition::Invalid())
+ : Declarable(Declarable::kTypeAlias),
+ delayed_(type),
+ redeclaration_(redeclaration),
+ declaration_position_(declaration_position) {}
- const Type* type_;
+ mutable bool being_resolved_ = false;
+ mutable base::Optional<TypeDeclaration*> delayed_;
+ mutable base::Optional<const Type*> type_;
bool redeclaration_;
const SourcePosition declaration_position_;
};
diff --git a/deps/v8/src/torque/declaration-visitor.cc b/deps/v8/src/torque/declaration-visitor.cc
index fa3a147f2c..34914d7b72 100644
--- a/deps/v8/src/torque/declaration-visitor.cc
+++ b/deps/v8/src/torque/declaration-visitor.cc
@@ -4,13 +4,42 @@
#include "src/torque/declaration-visitor.h"
-#include "src/globals.h"
#include "src/torque/ast.h"
+#include "src/torque/server-data.h"
+#include "src/torque/type-visitor.h"
namespace v8 {
namespace internal {
namespace torque {
+Namespace* GetOrCreateNamespace(const std::string& name) {
+ std::vector<Namespace*> existing_namespaces = FilterDeclarables<Namespace>(
+ Declarations::TryLookupShallow(QualifiedName(name)));
+ if (existing_namespaces.empty()) {
+ return Declarations::DeclareNamespace(name);
+ }
+ DCHECK_EQ(1, existing_namespaces.size());
+ return existing_namespaces.front();
+}
+
+void PredeclarationVisitor::Predeclare(Declaration* decl) {
+ CurrentSourcePosition::Scope scope(decl->pos);
+ switch (decl->kind) {
+#define ENUM_ITEM(name) \
+ case AstNode::Kind::k##name: \
+ return Predeclare(name::cast(decl));
+ AST_TYPE_DECLARATION_NODE_KIND_LIST(ENUM_ITEM)
+#undef ENUM_ITEM
+ case AstNode::Kind::kNamespaceDeclaration:
+ return Predeclare(NamespaceDeclaration::cast(decl));
+ case AstNode::Kind::kGenericDeclaration:
+ return Predeclare(GenericDeclaration::cast(decl));
+ default:
+ // Only processes type declaration nodes, namespaces and generics.
+ break;
+ }
+}
+
void DeclarationVisitor::Visit(Declaration* decl) {
CurrentSourcePosition::Scope scope(decl->pos);
switch (decl->kind) {
@@ -47,52 +76,55 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
Builtin::Kind kind = !javascript ? Builtin::kStub
: varargs ? Builtin::kVarArgsJavaScript
: Builtin::kFixedArgsJavaScript;
-
+ const Type* context_type =
+ Declarations::LookupGlobalType(CONTEXT_TYPE_STRING);
if (signature.types().size() == 0 ||
- !(signature.types()[0] ==
- Declarations::LookupGlobalType(CONTEXT_TYPE_STRING))) {
- std::stringstream stream;
- stream << "first parameter to builtin " << decl->name
- << " is not a context but should be";
- ReportError(stream.str());
+ !(signature.types()[0] == context_type)) {
+ Error("First parameter to builtin ", decl->name, " must be of type ",
+ *context_type);
}
if (varargs && !javascript) {
- std::stringstream stream;
- stream << "builtin " << decl->name
- << " with rest parameters must be a JavaScript builtin";
- ReportError(stream.str());
+ Error("Rest parameters require ", decl->name,
+ " to be a JavaScript builtin");
}
if (javascript) {
- if (signature.types().size() < 2 ||
+ if (signature.types().size() >= 2 &&
!(signature.types()[1] ==
Declarations::LookupGlobalType(OBJECT_TYPE_STRING))) {
- std::stringstream stream;
- stream << "second parameter to javascript builtin " << decl->name
- << " is " << *signature.types()[1] << " but should be Object";
- ReportError(stream.str());
+ Error("Second parameter to javascript builtin ", decl->name, " is ",
+ *signature.types()[1], " but should be Object");
}
}
for (size_t i = 0; i < signature.types().size(); ++i) {
if (const StructType* type =
StructType::DynamicCast(signature.types()[i])) {
- std::stringstream stream;
- stream << "builtin '" << decl->name << "' uses the struct '"
- << type->name() << "' as argument '"
- << signature.parameter_names[i] << "'. This is not supported.";
- ReportError(stream.str());
+ Error("Builtin '", decl->name, "' uses the struct '", type->name(),
+ "' as argument '", signature.parameter_names[i],
+ "', which is not supported.");
+ }
+ }
+
+ if (TorqueBuiltinDeclaration::DynamicCast(decl)) {
+ for (size_t i = 0; i < signature.types().size(); ++i) {
+ const Type* type = signature.types()[i];
+ if (!type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ const Identifier* id = signature.parameter_names.size() > i
+ ? signature.parameter_names[i]
+ : nullptr;
+ Error("Untagged argument ", id ? (id->value + " ") : "", "at position ",
+ i, " to builtin ", decl->name, " is not supported.")
+ .Position(id ? id->pos : decl->pos);
+ }
}
}
if (const StructType* struct_type =
StructType::DynamicCast(signature.return_type)) {
- std::stringstream stream;
- stream << "builtins (in this case " << decl->name
- << ") cannot return structs (in this case " << struct_type->name()
- << ")";
- ReportError(stream.str());
+ Error("Builtins ", decl->name, " cannot return structs ",
+ struct_type->name());
}
return Declarations::CreateBuiltin(
@@ -103,27 +135,28 @@ Builtin* DeclarationVisitor::CreateBuiltin(BuiltinDeclaration* decl,
void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl,
const Signature& signature,
base::Optional<Statement*> body) {
- if (GlobalContext::verbose()) {
- std::cout << "found declaration of external runtime " << decl->name
- << " with signature ";
- }
-
if (signature.parameter_types.types.size() == 0 ||
!(signature.parameter_types.types[0] ==
Declarations::LookupGlobalType(CONTEXT_TYPE_STRING))) {
- std::stringstream stream;
- stream << "first parameter to runtime " << decl->name
- << " is not a context but should be";
- ReportError(stream.str());
+ ReportError(
+ "first parameter to runtime functions has to be the context and have "
+ "type Context, but found type ",
+ signature.parameter_types.types[0]);
}
-
- if (signature.return_type->IsStructType()) {
- std::stringstream stream;
- stream << "runtime functions (in this case" << decl->name
- << ") cannot return structs (in this case "
- << static_cast<const StructType*>(signature.return_type)->name()
- << ")";
- ReportError(stream.str());
+ if (!(signature.return_type->IsSubtypeOf(TypeOracle::GetObjectType()) ||
+ signature.return_type == TypeOracle::GetVoidType() ||
+ signature.return_type == TypeOracle::GetNeverType())) {
+ ReportError(
+ "runtime functions can only return tagged values, but found type ",
+ signature.return_type);
+ }
+ for (const Type* parameter_type : signature.parameter_types.types) {
+ if (!parameter_type->IsSubtypeOf(TypeOracle::GetObjectType())) {
+ ReportError(
+ "runtime functions can only take tagged values as parameters, but "
+ "found type ",
+ *parameter_type);
+ }
}
Declarations::DeclareRuntimeFunction(decl->name, signature,
@@ -133,12 +166,7 @@ void DeclarationVisitor::Visit(ExternalRuntimeDeclaration* decl,
void DeclarationVisitor::Visit(ExternalMacroDeclaration* decl,
const Signature& signature,
base::Optional<Statement*> body) {
- if (GlobalContext::verbose()) {
- std::cout << "found declaration of external macro " << decl->name
- << " with signature ";
- }
-
- Declarations::DeclareMacro(decl->name, decl->external_assembler_name,
+ Declarations::DeclareMacro(decl->name, true, decl->external_assembler_name,
signature, decl->transitioning, body, decl->op);
}
@@ -152,8 +180,12 @@ void DeclarationVisitor::Visit(TorqueBuiltinDeclaration* decl,
void DeclarationVisitor::Visit(TorqueMacroDeclaration* decl,
const Signature& signature,
base::Optional<Statement*> body) {
- Declarations::DeclareMacro(decl->name, base::nullopt, signature,
- decl->transitioning, body, decl->op);
+ Macro* macro = Declarations::DeclareMacro(
+ decl->name, decl->export_to_csa, base::nullopt, signature,
+ decl->transitioning, body, decl->op);
+ // TODO(szuend): Set identifier_position to decl->name->pos once all callable
+ // names are changed from std::string to Identifier*.
+ macro->SetPosition(decl->pos);
}
void DeclarationVisitor::Visit(IntrinsicDeclaration* decl,
@@ -164,18 +196,15 @@ void DeclarationVisitor::Visit(IntrinsicDeclaration* decl,
void DeclarationVisitor::Visit(ConstDeclaration* decl) {
Declarations::DeclareNamespaceConstant(
- decl->name, Declarations::GetType(decl->type), decl->expression);
+ decl->name, TypeVisitor::ComputeType(decl->type), decl->expression);
}
void DeclarationVisitor::Visit(StandardDeclaration* decl) {
- Signature signature = MakeSignature(decl->callable->signature.get());
+ Signature signature =
+ TypeVisitor::MakeSignature(decl->callable->signature.get());
Visit(decl->callable, signature, decl->body);
}
-void DeclarationVisitor::Visit(GenericDeclaration* decl) {
- Declarations::DeclareGeneric(decl->callable->name, decl);
-}
-
void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
if ((decl->body != nullptr) == decl->external) {
std::stringstream stream;
@@ -184,14 +213,17 @@ void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
ReportError(stream.str());
}
- std::vector<Generic*> generic_list = Declarations::LookupGeneric(decl->name);
+ std::vector<Generic*> generic_list =
+ Declarations::LookupGeneric(decl->name->value);
// Find the matching generic specialization based on the concrete parameter
// list.
Generic* matching_generic = nullptr;
- Signature signature_with_types = MakeSignature(decl->signature.get());
+ Signature signature_with_types =
+ TypeVisitor::MakeSignature(decl->signature.get());
for (Generic* generic : generic_list) {
- Signature generic_signature_with_types = MakeSpecializedSignature(
- SpecializationKey{generic, GetTypeVector(decl->generic_parameters)});
+ Signature generic_signature_with_types =
+ MakeSpecializedSignature(SpecializationKey{
+ generic, TypeVisitor::ComputeTypeVector(decl->generic_parameters)});
if (signature_with_types.HasSameTypesAs(generic_signature_with_types,
ParameterMode::kIgnoreImplicit)) {
if (matching_generic != nullptr) {
@@ -219,19 +251,25 @@ void DeclarationVisitor::Visit(SpecializationDeclaration* decl) {
for (Generic* generic : generic_list) {
stream << "\n "
<< MakeSpecializedSignature(SpecializationKey{
- generic, GetTypeVector(decl->generic_parameters)});
+ generic,
+ TypeVisitor::ComputeTypeVector(decl->generic_parameters)});
}
ReportError(stream.str());
}
- Specialize(SpecializationKey{matching_generic,
- GetTypeVector(decl->generic_parameters)},
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(decl->name->pos,
+ matching_generic->IdentifierPosition());
+ }
+
+ Specialize(SpecializationKey{matching_generic, TypeVisitor::ComputeTypeVector(
+ decl->generic_parameters)},
matching_generic->declaration()->callable, decl->signature.get(),
- decl->body);
+ decl->body, decl->pos);
}
void DeclarationVisitor::Visit(ExternConstDeclaration* decl) {
- const Type* type = Declarations::GetType(decl->type);
+ const Type* type = TypeVisitor::ComputeType(decl->type);
if (!type->IsConstexpr()) {
std::stringstream stream;
stream << "extern constants must have constexpr type, but found: \""
@@ -242,125 +280,10 @@ void DeclarationVisitor::Visit(ExternConstDeclaration* decl) {
Declarations::DeclareExternConstant(decl->name, type, decl->literal);
}
-void DeclarationVisitor::DeclareMethods(
- AggregateType* container_type, const std::vector<Declaration*>& methods) {
- // Declare the class' methods
- for (auto declaration : methods) {
- CurrentSourcePosition::Scope pos_scope(declaration->pos);
- StandardDeclaration* standard_declaration =
- StandardDeclaration::DynamicCast(declaration);
- DCHECK(standard_declaration);
- TorqueMacroDeclaration* method =
- TorqueMacroDeclaration::DynamicCast(standard_declaration->callable);
- Signature signature = MakeSignature(method->signature.get());
- signature.parameter_names.insert(
- signature.parameter_names.begin() + signature.implicit_count,
- MakeNode<Identifier>(kThisParameterName));
- Statement* body = *(standard_declaration->body);
- std::string method_name(method->name);
- signature.parameter_types.types.insert(
- signature.parameter_types.types.begin() + signature.implicit_count,
- container_type);
- Declarations::CreateMethod(container_type, method_name, signature, false,
- body);
- }
-}
-
-void DeclarationVisitor::Visit(StructDeclaration* decl) {
- StructType* struct_type = Declarations::DeclareStruct(decl->name);
- struct_declarations_.push_back(
- std::make_tuple(CurrentScope::Get(), decl, struct_type));
-}
-
-void DeclarationVisitor::Visit(ClassDeclaration* decl) {
- ClassType* new_class;
- if (decl->is_extern) {
- if (!decl->super) {
- ReportError("Extern class must extend another type.");
- }
- // Compute the offset of the class' first member. If the class extends
- // another class, it's the size of the extended class, otherwise zero.
- const Type* super_type = Declarations::LookupType(*decl->super);
- if (super_type != TypeOracle::GetTaggedType()) {
- const ClassType* super_class = ClassType::DynamicCast(super_type);
- if (!super_class) {
- ReportError(
- "class \"", decl->name->value,
- "\" must extend either Tagged or an already declared class");
- }
- }
-
- // The generates clause must create a TNode<>
- std::string generates = decl->name->value;
- if (decl->generates) {
- generates = *decl->generates;
- if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
- generates.substr(generates.length() - 1, 1) != ">") {
- ReportError("generated type \"", generates,
- "\" should be of the form \"TNode<...>\"");
- }
- generates = generates.substr(6, generates.length() - 7);
- }
-
- new_class = Declarations::DeclareClass(
- super_type, decl->name, decl->is_extern, decl->generate_print,
- decl->transient, generates);
- } else {
- if (decl->super) {
- ReportError("Only extern classes can inherit.");
- }
- if (decl->generates) {
- ReportError("Only extern classes can specify a generated type.");
- }
- new_class = Declarations::DeclareClass(
- TypeOracle::GetTaggedType(), decl->name, decl->is_extern,
- decl->generate_print, decl->transient, "FixedArray");
- }
- GlobalContext::RegisterClass(decl->name->value, new_class);
- class_declarations_.push_back(
- std::make_tuple(CurrentScope::Get(), decl, new_class));
-}
-
void DeclarationVisitor::Visit(CppIncludeDeclaration* decl) {
GlobalContext::AddCppInclude(decl->include_path);
}
-void DeclarationVisitor::Visit(TypeDeclaration* decl) {
- std::string generates = decl->generates ? *decl->generates : std::string("");
- if (decl->generates) {
- if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
- generates.substr(generates.length() - 1, 1) != ">") {
- ReportError("generated type \"", generates,
- "\" should be of the form \"TNode<...>\"");
- }
- generates = generates.substr(6, generates.length() - 7);
- }
-
- const AbstractType* type = Declarations::DeclareAbstractType(
- decl->name, decl->transient, generates, {}, decl->extends);
-
- if (decl->constexpr_generates) {
- if (decl->transient) {
- ReportError("cannot declare a transient type that is also constexpr");
- }
- // DeclareAbstractType expects an Identifier*. A new one is created from the
- // declaration, and the SourcePosition copied from the original name.
- Identifier* constexpr_name =
- MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + decl->name->value);
- constexpr_name->pos = decl->name->pos;
-
- base::Optional<Identifier*> constexpr_extends;
- if (decl->extends) {
- constexpr_extends =
- MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + (*decl->extends)->value);
- (*constexpr_extends)->pos = (*decl->extends)->pos;
- }
- Declarations::DeclareAbstractType(constexpr_name, false,
- *decl->constexpr_generates, type,
- constexpr_extends);
- }
-}
-
void DeclarationVisitor::DeclareSpecializedTypes(const SpecializationKey& key) {
size_t i = 0;
const std::size_t generic_parameter_count =
@@ -376,7 +299,8 @@ void DeclarationVisitor::DeclareSpecializedTypes(const SpecializationKey& key) {
for (auto type : key.specialized_types) {
Identifier* generic_type_name =
key.generic->declaration()->generic_parameters[i++];
- Declarations::DeclareType(generic_type_name, type, true);
+ TypeAlias* alias = Declarations::DeclareType(generic_type_name, type);
+ alias->SetIsUserDefined(false);
}
}
@@ -388,7 +312,8 @@ Signature DeclarationVisitor::MakeSpecializedSignature(
Namespace tmp_namespace("_tmp");
CurrentScope::Scope tmp_namespace_scope(&tmp_namespace);
DeclareSpecializedTypes(key);
- return MakeSignature(key.generic->declaration()->callable->signature.get());
+ return TypeVisitor::MakeSignature(
+ key.generic->declaration()->callable->signature.get());
}
Callable* DeclarationVisitor::SpecializeImplicit(const SpecializationKey& key) {
@@ -397,12 +322,13 @@ Callable* DeclarationVisitor::SpecializeImplicit(const SpecializationKey& key) {
nullptr) {
ReportError("missing specialization of ", key.generic->name(),
" with types <", key.specialized_types, "> declared at ",
- key.generic->pos());
+ key.generic->Position());
}
CurrentScope::Scope generic_scope(key.generic->ParentScope());
- Callable* result =
- Specialize(key, key.generic->declaration()->callable, base::nullopt,
- key.generic->declaration()->body);
+ Callable* result = Specialize(key, key.generic->declaration()->callable,
+ base::nullopt, key.generic->declaration()->body,
+ CurrentSourcePosition::Get());
+ result->SetIsUserDefined(false);
CurrentScope::Scope callable_scope(result);
DeclareSpecializedTypes(key);
return result;
@@ -411,10 +337,8 @@ Callable* DeclarationVisitor::SpecializeImplicit(const SpecializationKey& key) {
Callable* DeclarationVisitor::Specialize(
const SpecializationKey& key, CallableNode* declaration,
base::Optional<const CallableNodeSignature*> signature,
- base::Optional<Statement*> body) {
- // TODO(tebbi): The error should point to the source position where the
- // instantiation was requested.
- CurrentSourcePosition::Scope pos_scope(key.generic->declaration()->pos);
+ base::Optional<Statement*> body, SourcePosition position) {
+ CurrentSourcePosition::Scope pos_scope(position);
size_t generic_parameter_count =
key.generic->declaration()->generic_parameters.size();
if (generic_parameter_count != key.specialized_types.size()) {
@@ -431,8 +355,8 @@ Callable* DeclarationVisitor::Specialize(
" with types <", key.specialized_types, ">");
}
- Signature type_signature =
- signature ? MakeSignature(*signature) : MakeSpecializedSignature(key);
+ Signature type_signature = signature ? TypeVisitor::MakeSignature(*signature)
+ : MakeSpecializedSignature(key);
std::string generated_name = Declarations::GetGeneratedCallableName(
declaration->name, key.specialized_types);
@@ -447,9 +371,9 @@ Callable* DeclarationVisitor::Specialize(
readable_name << ">";
Callable* callable;
if (MacroDeclaration::DynamicCast(declaration) != nullptr) {
- callable = Declarations::CreateMacro(generated_name, readable_name.str(),
- base::nullopt, type_signature,
- declaration->transitioning, *body);
+ callable = Declarations::CreateTorqueMacro(
+ generated_name, readable_name.str(), false, type_signature,
+ declaration->transitioning, *body, true);
} else if (IntrinsicDeclaration::DynamicCast(declaration) != nullptr) {
callable = Declarations::CreateIntrinsic(declaration->name, type_signature);
} else {
@@ -461,161 +385,13 @@ Callable* DeclarationVisitor::Specialize(
return callable;
}
-void DeclarationVisitor::FinalizeStructFieldsAndMethods(
- StructType* struct_type, StructDeclaration* struct_declaration) {
- size_t offset = 0;
- for (auto& field : struct_declaration->fields) {
- CurrentSourcePosition::Scope position_activator(
- field.name_and_type.type->pos);
- const Type* field_type = Declarations::GetType(field.name_and_type.type);
- struct_type->RegisterField({field.name_and_type.name->pos,
- struct_type,
- base::nullopt,
- {field.name_and_type.name->value, field_type},
- offset,
- false,
- field.const_qualified});
- offset += LoweredSlotCount(field_type);
- }
- CurrentSourcePosition::Scope position_activator(struct_declaration->pos);
- DeclareMethods(struct_type, struct_declaration->methods);
-}
-
-void DeclarationVisitor::FinalizeClassFieldsAndMethods(
- ClassType* class_type, ClassDeclaration* class_declaration) {
- const ClassType* super_class = class_type->GetSuperClass();
- size_t class_offset = super_class ? super_class->size() : 0;
- bool seen_indexed_field = false;
- for (ClassFieldExpression& field_expression : class_declaration->fields) {
- CurrentSourcePosition::Scope position_activator(
- field_expression.name_and_type.type->pos);
- const Type* field_type =
- Declarations::GetType(field_expression.name_and_type.type);
- if (!class_declaration->is_extern) {
- if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- ReportError("non-extern classes do not support untagged fields");
- }
- if (field_expression.weak) {
- ReportError("non-extern classes do not support weak fields");
- }
+void PredeclarationVisitor::ResolvePredeclarations() {
+ for (auto& p : GlobalContext::AllDeclarables()) {
+ if (const TypeAlias* alias = TypeAlias::DynamicCast(p.get())) {
+ CurrentScope::Scope scope_activator(alias->ParentScope());
+ CurrentSourcePosition::Scope position_activator(alias->Position());
+ alias->Resolve();
}
- if (field_expression.index) {
- if (seen_indexed_field ||
- (super_class && super_class->HasIndexedField())) {
- ReportError(
- "only one indexable field is currently supported per class");
- }
- seen_indexed_field = true;
- const Field* index_field =
- &(class_type->LookupField(*field_expression.index));
- class_type->RegisterField(
- {field_expression.name_and_type.name->pos,
- class_type,
- index_field,
- {field_expression.name_and_type.name->value, field_type},
- class_offset,
- field_expression.weak,
- field_expression.const_qualified});
- } else {
- if (seen_indexed_field) {
- ReportError("cannot declare non-indexable field \"",
- field_expression.name_and_type.name,
- "\" after an indexable field "
- "declaration");
- }
- const Field& field = class_type->RegisterField(
- {field_expression.name_and_type.name->pos,
- class_type,
- base::nullopt,
- {field_expression.name_and_type.name->value, field_type},
- class_offset,
- field_expression.weak,
- field_expression.const_qualified});
- size_t field_size;
- std::string size_string;
- std::string machine_type;
- std::tie(field_size, size_string, machine_type) =
- field.GetFieldSizeInformation();
- // Our allocations don't support alignments beyond kTaggedSize.
- size_t alignment = std::min(size_t{kTaggedSize}, field_size);
- if (class_offset % alignment != 0) {
- ReportError("field ", field_expression.name_and_type.name,
- " at offset ", class_offset, " is not ", alignment,
- "-byte aligned.");
- }
- class_offset += field_size;
- }
- }
- class_type->SetSize(class_offset);
-
- // For each field, construct AST snippits that implement a CSA accessor
- // function and define a corresponding '.field' operator. The
- // implementation iterator will turn the snippits into code.
- for (auto& field : class_type->fields()) {
- if (field.index) continue;
- CurrentSourcePosition::Scope position_activator(field.pos);
- IdentifierExpression* parameter =
- MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"o"}));
-
- // Load accessor
- std::string camel_field_name = CamelifyString(field.name_and_type.name);
- std::string load_macro_name =
- "Load" + class_type->name() + camel_field_name;
- Signature load_signature;
- load_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
- load_signature.parameter_types.types.push_back(class_type);
- load_signature.parameter_types.var_args = false;
- load_signature.return_type = field.name_and_type.type;
- Statement* load_body =
- MakeNode<ReturnStatement>(MakeNode<FieldAccessExpression>(
- parameter, MakeNode<Identifier>(field.name_and_type.name)));
- Declarations::DeclareMacro(load_macro_name, base::nullopt, load_signature,
- false, load_body);
-
- // Store accessor
- IdentifierExpression* value = MakeNode<IdentifierExpression>(
- std::vector<std::string>{}, MakeNode<Identifier>(std::string{"v"}));
- std::string store_macro_name =
- "Store" + class_type->name() + camel_field_name;
- Signature store_signature;
- store_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
- store_signature.parameter_names.push_back(MakeNode<Identifier>("v"));
- store_signature.parameter_types.types.push_back(class_type);
- store_signature.parameter_types.types.push_back(field.name_and_type.type);
- store_signature.parameter_types.var_args = false;
- // TODO(danno): Store macros probably should return their value argument
- store_signature.return_type = TypeOracle::GetVoidType();
- Statement* store_body =
- MakeNode<ExpressionStatement>(MakeNode<AssignmentExpression>(
- MakeNode<FieldAccessExpression>(
- parameter, MakeNode<Identifier>(field.name_and_type.name)),
- value));
- Declarations::DeclareMacro(store_macro_name, base::nullopt, store_signature,
- false, store_body);
- }
-
- DeclareMethods(class_type, class_declaration->methods);
-}
-
-void DeclarationVisitor::FinalizeStructsAndClasses() {
- for (auto current_struct_info : struct_declarations_) {
- Scope* scope;
- StructDeclaration* struct_declaration;
- StructType* struct_type;
- std::tie(scope, struct_declaration, struct_type) = current_struct_info;
- CurrentScope::Scope scope_activator(scope);
- CurrentSourcePosition::Scope position_activator(struct_declaration->pos);
- FinalizeStructFieldsAndMethods(struct_type, struct_declaration);
- }
-
- for (auto current_class_info : class_declarations_) {
- Scope* scope;
- ClassDeclaration* class_declaration;
- ClassType* class_type;
- std::tie(scope, class_declaration, class_type) = current_class_info;
- CurrentScope::Scope scope_activator(scope);
- CurrentSourcePosition::Scope position_activator(class_declaration->pos);
- FinalizeClassFieldsAndMethods(class_type, class_declaration);
}
}
diff --git a/deps/v8/src/torque/declaration-visitor.h b/deps/v8/src/torque/declaration-visitor.h
index 855dd4f048..4c6053d86a 100644
--- a/deps/v8/src/torque/declaration-visitor.h
+++ b/deps/v8/src/torque/declaration-visitor.h
@@ -10,7 +10,6 @@
#include "src/base/macros.h"
#include "src/torque/declarations.h"
-#include "src/torque/file-visitor.h"
#include "src/torque/global-context.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
@@ -19,94 +18,92 @@ namespace v8 {
namespace internal {
namespace torque {
-class DeclarationVisitor : public FileVisitor {
+Namespace* GetOrCreateNamespace(const std::string& name);
+
+class PredeclarationVisitor {
public:
- void Visit(Ast* ast) {
+ static void Predeclare(Ast* ast) {
CurrentScope::Scope current_namespace(GlobalContext::GetDefaultNamespace());
- for (Declaration* child : ast->declarations()) Visit(child);
+ for (Declaration* child : ast->declarations()) Predeclare(child);
}
+ static void ResolvePredeclarations();
- void Visit(Declaration* decl);
-
- Namespace* GetOrCreateNamespace(const std::string& name) {
- std::vector<Namespace*> existing_namespaces = FilterDeclarables<Namespace>(
- Declarations::TryLookupShallow(QualifiedName(name)));
- if (existing_namespaces.empty()) {
- return Declarations::DeclareNamespace(name);
- }
- DCHECK_EQ(1, existing_namespaces.size());
- return existing_namespaces.front();
+ private:
+ static void Predeclare(Declaration* decl);
+ static void Predeclare(NamespaceDeclaration* decl) {
+ CurrentScope::Scope current_scope(GetOrCreateNamespace(decl->name));
+ for (Declaration* child : decl->declarations) Predeclare(child);
}
+ static void Predeclare(TypeDeclaration* decl) {
+ Declarations::PredeclareTypeAlias(decl->name, decl, false);
+ }
+ static void Predeclare(GenericDeclaration* decl) {
+ Declarations::DeclareGeneric(decl->callable->name, decl);
+ }
+};
- void Visit(NamespaceDeclaration* decl) {
+class DeclarationVisitor {
+ public:
+ static void Visit(Ast* ast) {
+ CurrentScope::Scope current_namespace(GlobalContext::GetDefaultNamespace());
+ for (Declaration* child : ast->declarations()) Visit(child);
+ }
+ static void Visit(Declaration* decl);
+ static void Visit(NamespaceDeclaration* decl) {
CurrentScope::Scope current_scope(GetOrCreateNamespace(decl->name));
for (Declaration* child : decl->declarations) Visit(child);
}
- void Visit(TypeDeclaration* decl);
-
- void DeclareMethods(AggregateType* container,
- const std::vector<Declaration*>& methods);
- void Visit(StructDeclaration* decl);
- void Visit(ClassDeclaration* decl);
-
- void Visit(TypeAliasDeclaration* decl) {
- const Type* type = Declarations::GetType(decl->type);
- type->AddAlias(decl->name->value);
- Declarations::DeclareType(decl->name, type, true);
+ static void Visit(TypeDeclaration* decl) {
+ // Looking up the type will trigger type computation; this ensures errors
+ // are reported even if the type is unused.
+ Declarations::LookupType(decl->name);
}
- Builtin* CreateBuiltin(BuiltinDeclaration* decl, std::string external_name,
- std::string readable_name, Signature signature,
- base::Optional<Statement*> body);
- void Visit(ExternalBuiltinDeclaration* decl, const Signature& signature,
- base::Optional<Statement*> body) {
+ static Builtin* CreateBuiltin(BuiltinDeclaration* decl,
+ std::string external_name,
+ std::string readable_name, Signature signature,
+ base::Optional<Statement*> body);
+ static void Visit(ExternalBuiltinDeclaration* decl,
+ const Signature& signature,
+ base::Optional<Statement*> body) {
Declarations::Declare(
decl->name,
CreateBuiltin(decl, decl->name, decl->name, signature, base::nullopt));
}
- void Visit(ExternalRuntimeDeclaration* decl, const Signature& sig,
- base::Optional<Statement*> body);
- void Visit(ExternalMacroDeclaration* decl, const Signature& sig,
- base::Optional<Statement*> body);
- void Visit(TorqueBuiltinDeclaration* decl, const Signature& signature,
- base::Optional<Statement*> body);
- void Visit(TorqueMacroDeclaration* decl, const Signature& signature,
- base::Optional<Statement*> body);
- void Visit(IntrinsicDeclaration* decl, const Signature& signature,
- base::Optional<Statement*> body);
-
- void Visit(CallableNode* decl, const Signature& signature,
- base::Optional<Statement*> body);
-
- void Visit(ConstDeclaration* decl);
- void Visit(StandardDeclaration* decl);
- void Visit(GenericDeclaration* decl);
- void Visit(SpecializationDeclaration* decl);
- void Visit(ExternConstDeclaration* decl);
- void Visit(CppIncludeDeclaration* decl);
-
- Signature MakeSpecializedSignature(const SpecializationKey& key);
- Callable* SpecializeImplicit(const SpecializationKey& key);
- Callable* Specialize(const SpecializationKey& key, CallableNode* declaration,
- base::Optional<const CallableNodeSignature*> signature,
- base::Optional<Statement*> body);
-
- void FinalizeStructsAndClasses();
-
- private:
- void DeclareSpecializedTypes(const SpecializationKey& key);
+ static void Visit(ExternalRuntimeDeclaration* decl, const Signature& sig,
+ base::Optional<Statement*> body);
+ static void Visit(ExternalMacroDeclaration* decl, const Signature& sig,
+ base::Optional<Statement*> body);
+ static void Visit(TorqueBuiltinDeclaration* decl, const Signature& signature,
+ base::Optional<Statement*> body);
+ static void Visit(TorqueMacroDeclaration* decl, const Signature& signature,
+ base::Optional<Statement*> body);
+ static void Visit(IntrinsicDeclaration* decl, const Signature& signature,
+ base::Optional<Statement*> body);
+
+ static void Visit(CallableNode* decl, const Signature& signature,
+ base::Optional<Statement*> body);
+
+ static void Visit(ConstDeclaration* decl);
+ static void Visit(StandardDeclaration* decl);
+ static void Visit(GenericDeclaration* decl) {
+ // The PredeclarationVisitor already handled this case.
+ }
+ static void Visit(SpecializationDeclaration* decl);
+ static void Visit(ExternConstDeclaration* decl);
+ static void Visit(CppIncludeDeclaration* decl);
- void FinalizeStructFieldsAndMethods(StructType* struct_type,
- StructDeclaration* struct_declaration);
- void FinalizeClassFieldsAndMethods(ClassType* class_type,
- ClassDeclaration* class_declaration);
+ static Signature MakeSpecializedSignature(const SpecializationKey& key);
+ static Callable* SpecializeImplicit(const SpecializationKey& key);
+ static Callable* Specialize(
+ const SpecializationKey& key, CallableNode* declaration,
+ base::Optional<const CallableNodeSignature*> signature,
+ base::Optional<Statement*> body, SourcePosition position);
- std::vector<std::tuple<Scope*, StructDeclaration*, StructType*>>
- struct_declarations_;
- std::vector<std::tuple<Scope*, ClassDeclaration*, ClassType*>>
- class_declarations_;
+ private:
+ static void DeclareSpecializedTypes(const SpecializationKey& key);
};
} // namespace torque
diff --git a/deps/v8/src/torque/declarations.cc b/deps/v8/src/torque/declarations.cc
index fb08980a0e..f3f3e84cad 100644
--- a/deps/v8/src/torque/declarations.cc
+++ b/deps/v8/src/torque/declarations.cc
@@ -70,8 +70,13 @@ const Type* Declarations::LookupType(const QualifiedName& name) {
return LookupTypeAlias(name)->type();
}
-const Type* Declarations::LookupType(std::string name) {
- return LookupType(QualifiedName(std::move(name)));
+const Type* Declarations::LookupType(const Identifier* name) {
+ const TypeAlias* alias = LookupTypeAlias(QualifiedName(name->value));
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(name->pos,
+ alias->GetDeclarationPosition());
+ }
+ return alias->type();
}
const Type* Declarations::LookupGlobalType(const std::string& name) {
@@ -80,36 +85,6 @@ const Type* Declarations::LookupGlobalType(const std::string& name) {
return declaration->type();
}
-const Type* Declarations::GetType(TypeExpression* type_expression) {
- if (auto* basic = BasicTypeExpression::DynamicCast(type_expression)) {
- std::string name =
- (basic->is_constexpr ? CONSTEXPR_TYPE_PREFIX : "") + basic->name;
- const TypeAlias* alias =
- LookupTypeAlias(QualifiedName{basic->namespace_qualification, name});
- if (GlobalContext::collect_language_server_data()) {
- LanguageServerData::AddDefinition(type_expression->pos,
- alias->GetDeclarationPosition());
- }
- return alias->type();
- } else if (auto* union_type =
- UnionTypeExpression::DynamicCast(type_expression)) {
- return TypeOracle::GetUnionType(GetType(union_type->a),
- GetType(union_type->b));
- } else if (auto* reference_type =
- ReferenceTypeExpression::DynamicCast(type_expression)) {
- return TypeOracle::GetReferenceType(
- GetType(reference_type->referenced_type));
- } else {
- auto* function_type_exp = FunctionTypeExpression::cast(type_expression);
- TypeVector argument_types;
- for (TypeExpression* type_exp : function_type_exp->parameters) {
- argument_types.push_back(GetType(type_exp));
- }
- return TypeOracle::GetBuiltinPointerType(
- argument_types, GetType(function_type_exp->return_type));
- }
-}
-
Builtin* Declarations::FindSomeInternalBuiltinWithType(
const BuiltinPointerType* type) {
for (auto& declarable : GlobalContext::AllDeclarables()) {
@@ -162,76 +137,59 @@ Namespace* Declarations::DeclareNamespace(const std::string& name) {
return Declare(name, std::unique_ptr<Namespace>(new Namespace(name)));
}
-const AbstractType* Declarations::DeclareAbstractType(
- const Identifier* name, bool transient, std::string generated,
- base::Optional<const AbstractType*> non_constexpr_version,
- const base::Optional<Identifier*>& parent) {
+TypeAlias* Declarations::DeclareType(const Identifier* name, const Type* type) {
CheckAlreadyDeclared<TypeAlias>(name->value, "type");
- const Type* parent_type = nullptr;
- if (parent) {
- auto parent_type_alias = LookupTypeAlias(QualifiedName{(*parent)->value});
- parent_type = parent_type_alias->type();
- if (GlobalContext::collect_language_server_data()) {
- LanguageServerData::AddDefinition(
- (*parent)->pos, parent_type_alias->GetDeclarationPosition());
- }
- }
- if (generated == "" && parent) {
- generated = parent_type->GetGeneratedTNodeTypeName();
- }
- const AbstractType* type = TypeOracle::GetAbstractType(
- parent_type, name->value, transient, generated, non_constexpr_version);
- DeclareType(name, type, false);
- return type;
+ return Declare(name->value, std::unique_ptr<TypeAlias>(
+ new TypeAlias(type, true, name->pos)));
}
-void Declarations::DeclareType(const Identifier* name, const Type* type,
- bool redeclaration) {
+const TypeAlias* Declarations::PredeclareTypeAlias(const Identifier* name,
+ TypeDeclaration* type,
+ bool redeclaration) {
CheckAlreadyDeclared<TypeAlias>(name->value, "type");
- Declare(name->value, std::unique_ptr<TypeAlias>(
- new TypeAlias(type, redeclaration, name->pos)));
+ std::unique_ptr<TypeAlias> alias_ptr(
+ new TypeAlias(type, redeclaration, name->pos));
+ return Declare(name->value, std::move(alias_ptr));
}
-StructType* Declarations::DeclareStruct(const Identifier* name) {
- StructType* new_type = TypeOracle::GetStructType(name->value);
- DeclareType(name, new_type, false);
- return new_type;
+TorqueMacro* Declarations::CreateTorqueMacro(
+ std::string external_name, std::string readable_name, bool exported_to_csa,
+ Signature signature, bool transitioning, base::Optional<Statement*> body,
+ bool is_user_defined) {
+ // TODO(tebbi): Switch to more predictable names to improve incremental
+ // compilation.
+ external_name += "_" + std::to_string(GlobalContext::FreshId());
+ return RegisterDeclarable(std::unique_ptr<TorqueMacro>(new TorqueMacro(
+ std::move(external_name), std::move(readable_name), std::move(signature),
+ transitioning, body, is_user_defined, exported_to_csa)));
}
-ClassType* Declarations::DeclareClass(const Type* super_type,
- const Identifier* name, bool is_extern,
- bool generate_print, bool transient,
- const std::string& generates) {
- ClassType* new_type = TypeOracle::GetClassType(
- super_type, name->value, is_extern, generate_print, transient, generates);
- DeclareType(name, new_type, false);
- return new_type;
-}
-
-Macro* Declarations::CreateMacro(
- std::string external_name, std::string readable_name,
- base::Optional<std::string> external_assembler_name, Signature signature,
- bool transitioning, base::Optional<Statement*> body) {
- if (!external_assembler_name) {
- external_assembler_name = CurrentNamespace()->ExternalName();
- }
- return RegisterDeclarable(std::unique_ptr<Macro>(
- new Macro(std::move(external_name), std::move(readable_name),
- std::move(*external_assembler_name), std::move(signature),
- transitioning, body)));
+ExternMacro* Declarations::CreateExternMacro(
+ std::string name, std::string external_assembler_name, Signature signature,
+ bool transitioning) {
+ return RegisterDeclarable(std::unique_ptr<ExternMacro>(
+ new ExternMacro(std::move(name), std::move(external_assembler_name),
+ std::move(signature), transitioning)));
}
Macro* Declarations::DeclareMacro(
- const std::string& name,
+ const std::string& name, bool accessible_from_csa,
base::Optional<std::string> external_assembler_name,
const Signature& signature, bool transitioning,
- base::Optional<Statement*> body, base::Optional<std::string> op) {
+ base::Optional<Statement*> body, base::Optional<std::string> op,
+ bool is_user_defined) {
if (TryLookupMacro(name, signature.GetExplicitTypes())) {
ReportError("cannot redeclare macro ", name,
" with identical explicit parameters");
}
- Macro* macro = CreateMacro(name, name, std::move(external_assembler_name),
- signature, transitioning, body);
+ Macro* macro;
+ if (external_assembler_name) {
+ macro = CreateExternMacro(name, std::move(*external_assembler_name),
+ signature, transitioning);
+ } else {
+ macro = CreateTorqueMacro(name, name, accessible_from_csa, signature,
+ transitioning, body, is_user_defined);
+ }
Declare(name, macro);
if (op) {
if (TryLookupMacro(*op, signature.GetExplicitTypes())) {
@@ -249,8 +207,7 @@ Method* Declarations::CreateMethod(AggregateType* container_type,
std::string generated_name{container_type->GetGeneratedMethodName(name)};
Method* result = RegisterDeclarable(std::unique_ptr<Method>(
new Method(container_type, container_type->GetGeneratedMethodName(name),
- name, CurrentNamespace()->ExternalName(), std::move(signature),
- transitioning, body)));
+ name, std::move(signature), transitioning, body)));
container_type->RegisterMethod(result);
return result;
}
@@ -308,7 +265,10 @@ NamespaceConstant* Declarations::DeclareNamespaceConstant(Identifier* name,
const Type* type,
Expression* body) {
CheckAlreadyDeclared<Value>(name->value, "constant");
- NamespaceConstant* result = new NamespaceConstant(name, type, body);
+ std::string external_name =
+ name->value + "_" + std::to_string(GlobalContext::FreshId());
+ NamespaceConstant* result =
+ new NamespaceConstant(name, std::move(external_name), type, body);
Declare(name->value, std::unique_ptr<Declarable>(result));
return result;
}
diff --git a/deps/v8/src/torque/declarations.h b/deps/v8/src/torque/declarations.h
index 75dd4b0459..0dd9be9974 100644
--- a/deps/v8/src/torque/declarations.h
+++ b/deps/v8/src/torque/declarations.h
@@ -56,9 +56,8 @@ class Declarations {
static const TypeAlias* LookupTypeAlias(const QualifiedName& name);
static const Type* LookupType(const QualifiedName& name);
- static const Type* LookupType(std::string name);
+ static const Type* LookupType(const Identifier* identifier);
static const Type* LookupGlobalType(const std::string& name);
- static const Type* GetType(TypeExpression* type_expression);
static Builtin* FindSomeInternalBuiltinWithType(
const BuiltinPointerType* type);
@@ -73,31 +72,27 @@ class Declarations {
static Generic* LookupUniqueGeneric(const QualifiedName& name);
static Namespace* DeclareNamespace(const std::string& name);
-
- static const AbstractType* DeclareAbstractType(
- const Identifier* name, bool transient, std::string generated,
- base::Optional<const AbstractType*> non_constexpr_version,
- const base::Optional<Identifier*>& parent = {});
-
- static void DeclareType(const Identifier* name, const Type* type,
- bool redeclaration);
-
- static StructType* DeclareStruct(const Identifier* name);
-
- static ClassType* DeclareClass(const Type* super, const Identifier* name,
- bool is_extern, bool generate_print,
- bool transient, const std::string& generates);
-
- static Macro* CreateMacro(std::string external_name,
- std::string readable_name,
- base::Optional<std::string> external_assembler_name,
- Signature signature, bool transitioning,
- base::Optional<Statement*> body);
+ static TypeAlias* DeclareType(const Identifier* name, const Type* type);
+
+ static const TypeAlias* PredeclareTypeAlias(const Identifier* name,
+ TypeDeclaration* type,
+ bool redeclaration);
+ static TorqueMacro* CreateTorqueMacro(std::string external_name,
+ std::string readable_name,
+ bool exported_to_csa,
+ Signature signature, bool transitioning,
+ base::Optional<Statement*> body,
+ bool is_user_defined);
+ static ExternMacro* CreateExternMacro(std::string name,
+ std::string external_assembler_name,
+ Signature signature,
+ bool transitioning);
static Macro* DeclareMacro(
- const std::string& name,
+ const std::string& name, bool accessible_from_csa,
base::Optional<std::string> external_assembler_name,
const Signature& signature, bool transitioning,
- base::Optional<Statement*> body, base::Optional<std::string> op = {});
+ base::Optional<Statement*> body, base::Optional<std::string> op = {},
+ bool is_user_defined = true);
static Method* CreateMethod(AggregateType* class_type,
const std::string& name, Signature signature,
diff --git a/deps/v8/src/torque/earley-parser.h b/deps/v8/src/torque/earley-parser.h
index 16d3e92571..9d9cfb02c0 100644
--- a/deps/v8/src/torque/earley-parser.h
+++ b/deps/v8/src/torque/earley-parser.h
@@ -54,6 +54,8 @@ enum class ParseResultHolderBase::TypeId {
kOptionalLabelBlockPtr,
kNameAndTypeExpression,
kNameAndExpression,
+ kConditionalAnnotation,
+ kOptionalConditionalAnnotation,
kClassFieldExpression,
kStructFieldExpression,
kStdVectorOfNameAndTypeExpression,
@@ -64,6 +66,7 @@ enum class ParseResultHolderBase::TypeId {
kOptionalStdString,
kStdVectorOfStatementPtr,
kStdVectorOfDeclarationPtr,
+ kStdVectorOfStdVectorOfDeclarationPtr,
kStdVectorOfExpressionPtr,
kExpressionWithSource,
kParameterList,
@@ -189,6 +192,15 @@ inline base::Optional<ParseResult> DefaultAction(
return child_results->Next();
}
+template <class T, Action action>
+inline Action AsSingletonVector() {
+ return [](ParseResultIterator* child_results) -> base::Optional<ParseResult> {
+ auto result = action(child_results);
+ if (!result) return result;
+ return ParseResult{std::vector<T>{(*result).Cast<T>()}};
+ };
+}
+
// A rule of the context-free grammar. Each rule can have an action attached to
// it, which is executed after the parsing is finished.
class Rule final {
@@ -303,10 +315,13 @@ class Item {
void CheckAmbiguity(const Item& other, const LexerResult& tokens) const;
MatchedInput GetMatchedInput(const LexerResult& tokens) const {
- return {tokens.token_contents[start_].begin,
- start_ == pos_ ? tokens.token_contents[start_].begin
- : tokens.token_contents[pos_ - 1].end,
- tokens.token_contents[start_].pos};
+ const MatchedInput& start = tokens.token_contents[start_];
+ const MatchedInput& end = start_ == pos_ ? tokens.token_contents[start_]
+ : tokens.token_contents[pos_ - 1];
+ CHECK_EQ(start.pos.source, end.pos.source);
+ SourcePosition combined{start.pos.source, start.pos.start, end.pos.end};
+
+ return {start.begin, end.end, combined};
}
// We exclude {prev_} and {child_} from equality and hash computations,
diff --git a/deps/v8/src/torque/file-visitor.cc b/deps/v8/src/torque/file-visitor.cc
deleted file mode 100644
index deeebded9d..0000000000
--- a/deps/v8/src/torque/file-visitor.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/torque/file-visitor.h"
-
-#include "src/torque/declarable.h"
-
-namespace v8 {
-namespace internal {
-namespace torque {
-
-Signature FileVisitor::MakeSignature(const CallableNodeSignature* signature) {
- LabelDeclarationVector definition_vector;
- for (const auto& label : signature->labels) {
- LabelDeclaration def = {label.name, GetTypeVector(label.types)};
- definition_vector.push_back(def);
- }
- base::Optional<std::string> arguments_variable;
- if (signature->parameters.has_varargs)
- arguments_variable = signature->parameters.arguments_variable;
- Signature result{signature->parameters.names,
- arguments_variable,
- {GetTypeVector(signature->parameters.types),
- signature->parameters.has_varargs},
- signature->parameters.implicit_count,
- Declarations::GetType(signature->return_type),
- definition_vector};
- return result;
-}
-
-} // namespace torque
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/torque/file-visitor.h b/deps/v8/src/torque/file-visitor.h
deleted file mode 100644
index 4d9700bd6c..0000000000
--- a/deps/v8/src/torque/file-visitor.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TORQUE_FILE_VISITOR_H_
-#define V8_TORQUE_FILE_VISITOR_H_
-
-#include <deque>
-#include <string>
-
-#include "src/torque/ast.h"
-#include "src/torque/global-context.h"
-#include "src/torque/types.h"
-#include "src/torque/utils.h"
-
-namespace v8 {
-namespace internal {
-namespace torque {
-
-class FileVisitor {
- public:
- TypeVector GetTypeVector(const std::vector<TypeExpression*>& v) {
- TypeVector result;
- for (TypeExpression* t : v) {
- result.push_back(Declarations::GetType(t));
- }
- return result;
- }
-
- protected:
- Signature MakeSignature(const CallableNodeSignature* signature);
-};
-
-} // namespace torque
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TORQUE_FILE_VISITOR_H_
diff --git a/deps/v8/src/torque/global-context.h b/deps/v8/src/torque/global-context.h
index f791f9a9f2..aa70b23fb5 100644
--- a/deps/v8/src/torque/global-context.h
+++ b/deps/v8/src/torque/global-context.h
@@ -17,9 +17,11 @@ namespace torque {
class GlobalContext : public ContextualClass<GlobalContext> {
public:
+ GlobalContext(GlobalContext&&) V8_NOEXCEPT = default;
+ GlobalContext& operator=(GlobalContext&&) V8_NOEXCEPT = default;
explicit GlobalContext(Ast ast)
- : verbose_(false),
- collect_language_server_data_(false),
+ : collect_language_server_data_(false),
+ force_assert_statements_(false),
ast_(std::move(ast)) {
CurrentScope::Scope current_scope(nullptr);
CurrentSourcePosition::Scope current_source_position(
@@ -49,13 +51,14 @@ class GlobalContext : public ContextualClass<GlobalContext> {
return result;
}
- static void RegisterClass(const std::string& name, ClassType* new_class) {
- Get().classes_[name] = new_class;
+ static void RegisterClass(const TypeAlias* alias) {
+ DCHECK(alias->ParentScope()->IsNamespace());
+ Get().classes_.push_back(alias);
}
- static const std::map<std::string, ClassType*>& GetClasses() {
- return Get().classes_;
- }
+ using GlobalClassList = std::vector<const TypeAlias*>;
+
+ static const GlobalClassList& GetClasses() { return Get().classes_; }
static void AddCppInclude(std::string include_path) {
Get().cpp_includes_.push_back(std::move(include_path));
@@ -64,24 +67,32 @@ class GlobalContext : public ContextualClass<GlobalContext> {
return Get().cpp_includes_;
}
- static void SetVerbose() { Get().verbose_ = true; }
- static bool verbose() { return Get().verbose_; }
static void SetCollectLanguageServerData() {
Get().collect_language_server_data_ = true;
}
static bool collect_language_server_data() {
return Get().collect_language_server_data_;
}
+ static void SetForceAssertStatements() {
+ Get().force_assert_statements_ = true;
+ }
+ static bool force_assert_statements() {
+ return Get().force_assert_statements_;
+ }
static Ast* ast() { return &Get().ast_; }
+ static size_t FreshId() { return Get().fresh_id_++; }
private:
- bool verbose_;
bool collect_language_server_data_;
+ bool force_assert_statements_;
Namespace* default_namespace_;
Ast ast_;
std::vector<std::unique_ptr<Declarable>> declarables_;
std::vector<std::string> cpp_includes_;
- std::map<std::string, ClassType*> classes_;
+ GlobalClassList classes_;
+ size_t fresh_id_ = 0;
+
+ friend class LanguageServerData;
};
template <class T>
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index 11d2b16555..d4798b28cb 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -4,12 +4,13 @@
#include <algorithm>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
#include "src/torque/implementation-visitor.h"
#include "src/torque/parameter-difference.h"
#include "src/torque/server-data.h"
+#include "src/torque/type-visitor.h"
namespace v8 {
namespace internal {
@@ -57,7 +58,7 @@ void ImplementationVisitor::BeginNamespaceFile(Namespace* nspace) {
for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
source << "#include \"torque-generated/builtins-" +
- DashifyString(n->name()) + "-from-dsl-gen.h\"\n";
+ DashifyString(n->name()) + "-gen-tq.h\"\n";
}
source << "\n";
@@ -69,30 +70,19 @@ void ImplementationVisitor::BeginNamespaceFile(Namespace* nspace) {
transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
::toupper);
std::string headerDefine =
- std::string("V8_TORQUE_") + upper_name + "_FROM_DSL_BASE_H__";
+ "V8_GEN_TORQUE_GENERATED_" + upper_name + "_NAMESPACE_TQ_H_";
header << "#ifndef " << headerDefine << "\n";
header << "#define " << headerDefine << "\n\n";
header << "#include \"src/compiler/code-assembler.h\"\n";
- if (nspace != GlobalContext::GetDefaultNamespace()) {
- header << "#include \"src/code-stub-assembler.h\"\n";
- }
- header << "#include \"src/utils.h\"\n";
- header << "#include \"torque-generated/class-definitions-from-dsl.h\"\n";
+ header << "#include \"src/codegen/code-stub-assembler.h\"\n";
+ header << "#include \"src/utils/utils.h\"\n";
+ header << "#include \"torque-generated/field-offsets-tq.h\"\n";
+ header << "#include \"torque-generated/csa-types-tq.h\"\n";
header << "\n";
header << "namespace v8 {\n"
<< "namespace internal {\n"
<< "\n";
-
- header << "class ";
- if (nspace->IsTestNamespace()) {
- header << "V8_EXPORT_PRIVATE ";
- }
- header << nspace->ExternalName() << " {\n";
- header << " public:\n";
- header << " explicit " << nspace->ExternalName()
- << "(compiler::CodeAssemblerState* state) : state_(state), ca_(state) "
- "{ USE(state_, ca_); }\n";
}
void ImplementationVisitor::EndNamespaceFile(Namespace* nspace) {
@@ -103,16 +93,12 @@ void ImplementationVisitor::EndNamespaceFile(Namespace* nspace) {
transform(upper_name.begin(), upper_name.end(), upper_name.begin(),
::toupper);
std::string headerDefine =
- std::string("V8_TORQUE_") + upper_name + "_FROM_DSL_BASE_H__";
+ "V8_GEN_TORQUE_GENERATED_" + upper_name + "_NAMESPACE_V8_H_";
source << "} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
- header << " private:\n"
- << " compiler::CodeAssemblerState* const state_;\n"
- << " compiler::CodeAssembler ca_;\n"
- << "};\n\n";
header << "} // namespace internal\n"
<< "} // namespace v8\n"
<< "\n";
@@ -121,18 +107,18 @@ void ImplementationVisitor::EndNamespaceFile(Namespace* nspace) {
void ImplementationVisitor::Visit(NamespaceConstant* decl) {
Signature signature{{}, base::nullopt, {{}, false}, 0, decl->type(), {}};
- const std::string& name = decl->name()->value;
BindingsManagersScope bindings_managers_scope;
header_out() << " ";
- GenerateFunctionDeclaration(header_out(), "", name, signature, {});
+ GenerateFunctionDeclaration(header_out(), "", decl->external_name(),
+ signature, {});
header_out() << ";\n";
- GenerateFunctionDeclaration(source_out(),
- CurrentNamespace()->ExternalName() + "::", name,
+ GenerateFunctionDeclaration(source_out(), "", decl->external_name(),
signature, {});
source_out() << " {\n";
+ source_out() << " compiler::CodeAssembler ca_(state_);\n";
DCHECK(!signature.return_type->IsVoidOrNever());
@@ -155,66 +141,13 @@ void ImplementationVisitor::Visit(NamespaceConstant* decl) {
void ImplementationVisitor::Visit(TypeAlias* alias) {
if (alias->IsRedeclaration()) return;
- const ClassType* class_type = ClassType::DynamicCast(alias->type());
- if (class_type && class_type->IsExtern()) {
- // Classes that are in the default namespace are defined in the C++
- // world and all of their fields and methods are declared explicitly.
- // Internal classes (e.g. ones used for testing that are not in the default
- // name space) need to be defined by Torque.
- // TODO(danno): This is a pretty cheesy hack for now. There should be a more
- // robust mechanism for this, e.g. declaring classes 'extern' or something.
- if (class_type->nspace()->IsTestNamespace()) {
- std::string class_name{
- class_type->GetSuperClass()->GetGeneratedTNodeTypeName()};
- header_out() << " class " << class_type->name() << " : public "
- << class_name << " {\n";
- header_out() << " public:\n";
- header_out() << " DEFINE_FIELD_OFFSET_CONSTANTS(" << class_name
- << "::kSize, TORQUE_GENERATED_"
- << CapifyStringWithUnderscores(class_type->name())
- << "_FIELDS)\n";
- header_out() << " };\n";
- } else if (!class_type->nspace()->IsDefaultNamespace()) {
- ReportError(
- "extern classes are currently only supported in the default and test "
- "namespaces");
+ if (const ClassType* class_type = ClassType::DynamicCast(alias->type())) {
+ if (class_type->IsExtern() && !class_type->nspace()->IsDefaultNamespace()) {
+ Error(
+ "extern classes are currently only supported in the default "
+ "namespace");
}
- return;
}
- const StructType* struct_type = StructType::DynamicCast(alias->type());
- if (!struct_type) return;
- const std::string& name = struct_type->name();
- header_out() << " struct " << name << " {\n";
- for (auto& field : struct_type->fields()) {
- header_out() << " " << field.name_and_type.type->GetGeneratedTypeName();
- header_out() << " " << field.name_and_type.name << ";\n";
- }
- header_out() << "\n std::tuple<";
- bool first = true;
- for (const Type* type : LowerType(struct_type)) {
- if (!first) {
- header_out() << ", ";
- }
- first = false;
- header_out() << type->GetGeneratedTypeName();
- }
- header_out() << "> Flatten() const {\n"
- << " return std::tuple_cat(";
- first = true;
- for (auto& field : struct_type->fields()) {
- if (!first) {
- header_out() << ", ";
- }
- first = false;
- if (field.name_and_type.type->IsStructType()) {
- header_out() << field.name_and_type.name << ".Flatten()";
- } else {
- header_out() << "std::make_tuple(" << field.name_and_type.name << ")";
- }
- }
- header_out() << ");\n";
- header_out() << " }\n";
- header_out() << " };\n";
}
VisitResult ImplementationVisitor::InlineMacro(
@@ -332,13 +265,12 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
bool has_return_value =
can_return && return_type != TypeOracle::GetVoidType();
- header_out() << " ";
GenerateMacroFunctionDeclaration(header_out(), "", macro);
header_out() << ";\n";
- GenerateMacroFunctionDeclaration(
- source_out(), CurrentNamespace()->ExternalName() + "::", macro);
+ GenerateMacroFunctionDeclaration(source_out(), "", macro);
source_out() << " {\n";
+ source_out() << " compiler::CodeAssembler ca_(state_);\n";
Stack<std::string> lowered_parameters;
Stack<const Type*> lowered_parameter_types;
@@ -409,11 +341,11 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
std::vector<std::string> label_parameter_variables;
for (size_t i = 0; i < label_info.types.size(); ++i) {
LowerLabelParameter(label_info.types[i],
- ExternalLabelParameterName(label_info.name, i),
+ ExternalLabelParameterName(label_info.name->value, i),
&label_parameter_variables);
}
- assembler().Emit(GotoExternalInstruction{ExternalLabelName(label_info.name),
- label_parameter_variables});
+ assembler().Emit(GotoExternalInstruction{
+ ExternalLabelName(label_info.name->value), label_parameter_variables});
}
if (return_type != TypeOracle::GetNeverType()) {
@@ -434,8 +366,7 @@ void ImplementationVisitor::VisitMacroCommon(Macro* macro) {
source_out() << "}\n\n";
}
-void ImplementationVisitor::Visit(Macro* macro) {
- if (macro->IsExternal()) return;
+void ImplementationVisitor::Visit(TorqueMacro* macro) {
VisitMacroCommon(macro);
}
@@ -499,7 +430,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
<< " TNode<IntPtrT> arguments_length(ChangeInt32ToIntPtr(argc));\n";
source_out() << " TNode<RawPtrT> arguments_frame = "
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
- source_out() << " BaseBuiltinsFromDSLAssembler::Arguments "
+ source_out() << " TorqueStructArguments "
"torque_arguments(GetFrameArguments(arguments_frame, "
"arguments_length));\n";
source_out() << " CodeStubArguments arguments(this, torque_arguments);\n";
@@ -558,11 +489,7 @@ const Type* ImplementationVisitor::Visit(
base::Optional<const Type*> type;
if (stmt->type) {
- type = Declarations::GetType(*stmt->type);
- if ((*type)->IsConstexpr() && !stmt->const_qualified) {
- ReportError(
- "cannot declare variable with constexpr type. Use 'const' instead.");
- }
+ type = TypeVisitor::ComputeType(*stmt->type);
}
base::Optional<VisitResult> init_result;
if (stmt->initializer) {
@@ -571,6 +498,13 @@ const Type* ImplementationVisitor::Visit(
if (type) {
init_result = GenerateImplicitConvert(*type, *init_result);
}
+ type = init_result->type();
+ if ((*type)->IsConstexpr() && !stmt->const_qualified) {
+ Error("Use 'const' instead of 'let' for variable '", stmt->name->value,
+ "' of constexpr type '", (*type)->ToString(), "'.")
+ .Position(stmt->name->pos)
+ .Throw();
+ }
init_result = scope.Yield(*init_result);
} else {
DCHECK(type.has_value());
@@ -762,12 +696,12 @@ VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
// TODO(tebbi): Do not silently loose precision; support 64bit literals.
double d = std::stod(expr->number.c_str());
int32_t i = static_cast<int32_t>(d);
- const Type* result_type = Declarations::LookupType(CONST_FLOAT64_TYPE_STRING);
+ const Type* result_type = TypeOracle::GetConstFloat64Type();
if (i == d) {
if ((i >> 30) == (i >> 31)) {
- result_type = Declarations::LookupType(CONST_INT31_TYPE_STRING);
+ result_type = TypeOracle::GetConstInt31Type();
} else {
- result_type = Declarations::LookupType(CONST_INT32_TYPE_STRING);
+ result_type = TypeOracle::GetConstInt32Type();
}
}
return VisitResult{result_type, expr->number};
@@ -775,8 +709,8 @@ VisitResult ImplementationVisitor::Visit(NumberLiteralExpression* expr) {
VisitResult ImplementationVisitor::Visit(AssumeTypeImpossibleExpression* expr) {
VisitResult result = Visit(expr->expression);
- const Type* result_type =
- SubtractType(result.type(), Declarations::GetType(expr->excluded_type));
+ const Type* result_type = SubtractType(
+ result.type(), TypeVisitor::ComputeType(expr->excluded_type));
if (result_type->IsNever()) {
ReportError("unreachable code");
}
@@ -812,13 +746,18 @@ VisitResult ImplementationVisitor::Visit(LocationExpression* expr) {
}
const Type* ImplementationVisitor::Visit(GotoStatement* stmt) {
- LocalLabel* label = LookupLabel(stmt->label);
+ Binding<LocalLabel>* label = LookupLabel(stmt->label->value);
size_t parameter_count = label->parameter_types.size();
if (stmt->arguments.size() != parameter_count) {
ReportError("goto to label has incorrect number of parameters (expected ",
parameter_count, " found ", stmt->arguments.size(), ")");
}
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(stmt->label->pos,
+ label->declaration_position());
+ }
+
size_t i = 0;
StackRange arguments = assembler().TopRange(0);
for (Expression* e : stmt->arguments) {
@@ -997,7 +936,7 @@ std::string FormatAssertSource(const std::string& str) {
} // namespace
const Type* ImplementationVisitor::Visit(AssertStatement* stmt) {
- bool do_check = !stmt->debug_only;
+ bool do_check = !stmt->debug_only || GlobalContext::force_assert_statements();
#if defined(DEBUG)
do_check = true;
#endif
@@ -1124,7 +1063,7 @@ const Type* ImplementationVisitor::Visit(ForOfLoopStatement* stmt) {
VisitResult result = GenerateCall("[]", {{expression_result, index}, {}});
if (stmt->var_declaration->type) {
const Type* declared_type =
- Declarations::GetType(*stmt->var_declaration->type);
+ TypeVisitor::ComputeType(*stmt->var_declaration->type);
result = GenerateImplicitConvert(declared_type, result);
}
element_result = element_scope.Yield(result);
@@ -1192,7 +1131,7 @@ VisitResult ImplementationVisitor::Visit(TryLabelExpression* expr) {
TypeVector parameter_types;
for (size_t i = 0; i < parameter_count; ++i) {
const Type* type =
- Declarations::GetType(expr->label_block->parameters.types[i]);
+ TypeVisitor::ComputeType(expr->label_block->parameters.types[i]);
parameter_types.push_back(type);
if (type->IsConstexpr()) {
ReportError("no constexpr type allowed for label arguments");
@@ -1333,65 +1272,17 @@ size_t ImplementationVisitor::InitializeAggregateHelper(
void ImplementationVisitor::InitializeFieldFromSpread(
VisitResult object, const Field& field,
const InitializerResults& initializer_results) {
- StackScope stack_scope(this);
-
- VisitResult zero(TypeOracle::GetConstInt31Type(), "0");
- const Type* index_type = (*field.index)->name_and_type.type;
- VisitResult index = GenerateImplicitConvert(index_type, zero);
- Block* post_exit_block = assembler().NewBlock(assembler().CurrentStack());
- Block* exit_block = assembler().NewBlock(assembler().CurrentStack());
- Block* body_block = assembler().NewBlock(assembler().CurrentStack());
- Block* fail_block = assembler().NewBlock(assembler().CurrentStack(), true);
- Block* header_block = assembler().NewBlock(assembler().CurrentStack());
+ NameAndType index = (*field.index)->name_and_type;
+ VisitResult iterator =
+ initializer_results.field_value_map.at(field.name_and_type.name);
+ VisitResult length = initializer_results.field_value_map.at(index.name);
- assembler().Goto(header_block);
-
- assembler().Bind(header_block);
- Arguments compare_arguments;
- compare_arguments.parameters.push_back(index);
- compare_arguments.parameters.push_back(initializer_results.field_value_map.at(
- (*field.index)->name_and_type.name));
- GenerateExpressionBranch(
- [&]() { return GenerateCall("<", compare_arguments); }, body_block,
- exit_block);
-
- assembler().Bind(body_block);
- {
- VisitResult spreadee =
- initializer_results.field_value_map.at(field.name_and_type.name);
- LocationReference target = LocationReference::VariableAccess(spreadee);
- Binding<LocalLabel> no_more{&LabelBindingsManager::Get(), "_Done",
- LocalLabel{fail_block}};
-
- // Call the Next() method of the iterator
- Arguments next_arguments;
- next_arguments.labels.push_back(&no_more);
- Callable* callable = LookupMethod("Next", target, next_arguments, {});
- VisitResult next_result =
- GenerateCall(callable, target, next_arguments, {}, false);
- Arguments assign_arguments;
- assign_arguments.parameters.push_back(object);
- assign_arguments.parameters.push_back(index);
- assign_arguments.parameters.push_back(next_result);
- GenerateCall("[]=", assign_arguments);
-
- // Increment the indexed field index.
- LocationReference index_ref = LocationReference::VariableAccess(index);
- Arguments increment_arguments;
- VisitResult one = {TypeOracle::GetConstInt31Type(), "1"};
- increment_arguments.parameters = {index, one};
- VisitResult assignment_value = GenerateCall("+", increment_arguments);
- GenerateAssignToLocation(index_ref, assignment_value);
- }
- assembler().Goto(header_block);
-
- assembler().Bind(fail_block);
- assembler().Emit(AbortInstruction(AbortInstruction::Kind::kUnreachable));
-
- assembler().Bind(exit_block);
- assembler().Goto(post_exit_block);
-
- assembler().Bind(post_exit_block);
+ Arguments assign_arguments;
+ assign_arguments.parameters.push_back(object);
+ assign_arguments.parameters.push_back(length);
+ assign_arguments.parameters.push_back(iterator);
+ GenerateCall("%InitializeFieldsFromIterator", assign_arguments,
+ {field.aggregate, index.type, iterator.type()});
}
void ImplementationVisitor::InitializeAggregate(
@@ -1422,13 +1313,13 @@ VisitResult ImplementationVisitor::AddVariableObjectSize(
VisitResult(TypeOracle::GetConstInt31Type(), "kTaggedSize");
VisitResult initializer_value = initializer_results.field_value_map.at(
(*current_field->index)->name_and_type.name);
- VisitResult index_intptr_size =
- GenerateCall("Convert", {{initializer_value}, {}},
- {TypeOracle::GetIntPtrType()}, false);
- VisitResult variable_size = GenerateCall(
- "*", {{index_intptr_size, index_field_size}, {}}, {}, false);
+ Arguments args;
+ args.parameters.push_back(object_size);
+ args.parameters.push_back(initializer_value);
+ args.parameters.push_back(index_field_size);
object_size =
- GenerateCall("+", {{object_size, variable_size}, {}}, {}, false);
+ GenerateCall("%AddIndexedFieldSizeToObjectSize", args,
+ {(*current_field->index)->name_and_type.type}, false);
}
++current_field;
}
@@ -1439,7 +1330,7 @@ VisitResult ImplementationVisitor::AddVariableObjectSize(
VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
StackScope stack_scope(this);
- const Type* type = Declarations::GetType(expr->type);
+ const Type* type = TypeVisitor::ComputeType(expr->type);
const ClassType* class_type = ClassType::DynamicCast(type);
if (class_type == nullptr) {
ReportError("type for new expression must be a class, \"", *type,
@@ -1464,8 +1355,13 @@ VisitResult ImplementationVisitor::Visit(NewExpression* expr) {
ReportError(
"external classes initializers must have a map as first parameter");
}
- VisitResult object_map =
- initializer_results.field_value_map[map_field.name_and_type.name];
+ NameValueMap initializer_fields = initializer_results.field_value_map;
+ if (initializer_fields.find(map_field.name_and_type.name) ==
+ initializer_fields.end()) {
+ ReportError("Constructor for ", class_type->name(),
+ " needs Map argument!");
+ }
+ VisitResult object_map = initializer_fields[map_field.name_and_type.name];
Arguments size_arguments;
size_arguments.parameters.push_back(object_map);
VisitResult object_size = GenerateCall("%GetAllocationBaseSize",
@@ -1574,13 +1470,13 @@ void ImplementationVisitor::GenerateImplementation(const std::string& dir,
Namespace* nspace) {
std::string new_source(nspace->source());
std::string base_file_name =
- "builtins-" + DashifyString(nspace->name()) + "-from-dsl-gen";
+ "builtins-" + DashifyString(nspace->name()) + "-gen-tq";
std::string source_file_name = dir + "/" + base_file_name + ".cc";
- ReplaceFileContentsIfDifferent(source_file_name, new_source);
+ WriteFile(source_file_name, new_source);
std::string new_header(nspace->header());
std::string header_file_name = dir + "/" + base_file_name + ".h";
- ReplaceFileContentsIfDifferent(header_file_name, new_header);
+ WriteFile(header_file_name, new_header);
}
void ImplementationVisitor::GenerateMacroFunctionDeclaration(
@@ -1589,13 +1485,11 @@ void ImplementationVisitor::GenerateMacroFunctionDeclaration(
macro->signature(), macro->parameter_names());
}
-void ImplementationVisitor::GenerateFunctionDeclaration(
+std::vector<std::string> ImplementationVisitor::GenerateFunctionDeclaration(
std::ostream& o, const std::string& macro_prefix, const std::string& name,
- const Signature& signature, const NameVector& parameter_names) {
- if (GlobalContext::verbose()) {
- std::cout << "generating source for declaration " << name << "\n";
- }
-
+ const Signature& signature, const NameVector& parameter_names,
+ bool pass_code_assembler_state) {
+ std::vector<std::string> generated_parameter_names;
if (signature.return_type->IsVoidOrNever()) {
o << "void";
} else {
@@ -1603,26 +1497,32 @@ void ImplementationVisitor::GenerateFunctionDeclaration(
}
o << " " << macro_prefix << name << "(";
- DCHECK_EQ(signature.types().size(), parameter_names.size());
- auto type_iterator = signature.types().begin();
bool first = true;
- for (const Identifier* name : parameter_names) {
- if (!first) {
- o << ", ";
- }
- const Type* parameter_type = *type_iterator;
+ if (pass_code_assembler_state) {
+ first = false;
+ o << "compiler::CodeAssemblerState* state_";
+ }
+
+ DCHECK_GE(signature.types().size(), parameter_names.size());
+ for (size_t i = 0; i < signature.types().size(); ++i) {
+ if (!first) o << ", ";
+ first = false;
+ const Type* parameter_type = signature.types()[i];
const std::string& generated_type_name =
parameter_type->GetGeneratedTypeName();
- o << generated_type_name << " " << ExternalParameterName(name->value);
- type_iterator++;
- first = false;
+
+ generated_parameter_names.push_back(ExternalParameterName(
+ i < parameter_names.size() ? parameter_names[i]->value
+ : std::to_string(i)));
+ o << generated_type_name << " " << generated_parameter_names.back();
}
for (const LabelDeclaration& label_info : signature.labels) {
- if (!first) {
- o << ", ";
- }
- o << "compiler::CodeAssemblerLabel* " << ExternalLabelName(label_info.name);
+ if (!first) o << ", ";
+ first = false;
+ generated_parameter_names.push_back(
+ ExternalLabelName(label_info.name->value));
+ o << "compiler::CodeAssemblerLabel* " << generated_parameter_names.back();
size_t i = 0;
for (const Type* type : label_info.types) {
std::string generated_type_name;
@@ -1634,13 +1534,15 @@ void ImplementationVisitor::GenerateFunctionDeclaration(
generated_type_name += ">*";
}
o << ", ";
- o << generated_type_name << " "
- << ExternalLabelParameterName(label_info.name, i);
+ generated_parameter_names.push_back(
+ ExternalLabelParameterName(label_info.name->value, i));
+ o << generated_type_name << " " << generated_parameter_names.back();
++i;
}
}
o << ")";
+ return generated_parameter_names;
}
namespace {
@@ -1670,7 +1572,7 @@ Callable* GetOrCreateSpecialization(const SpecializationKey& key) {
key.generic->GetSpecialization(key.specialized_types)) {
return *specialization;
}
- return DeclarationVisitor().SpecializeImplicit(key);
+ return DeclarationVisitor::SpecializeImplicit(key);
}
} // namespace
@@ -1729,7 +1631,7 @@ Callable* ImplementationVisitor::LookupCallable(
if (!inferred_specialization_types) continue;
overloads.push_back(generic);
overload_signatures.push_back(
- DeclarationVisitor().MakeSpecializedSignature(
+ DeclarationVisitor::MakeSpecializedSignature(
SpecializationKey{generic, *inferred_specialization_types}));
} else if (Callable* callable = Callable::DynamicCast(declarable)) {
overloads.push_back(callable);
@@ -1810,14 +1712,14 @@ Callable* ImplementationVisitor::LookupCallable(
const QualifiedName& name, const Container& declaration_container,
const Arguments& arguments, const TypeVector& specialization_types) {
return LookupCallable(name, declaration_container,
- arguments.parameters.GetTypeVector(), arguments.labels,
- specialization_types);
+ arguments.parameters.ComputeTypeVector(),
+ arguments.labels, specialization_types);
}
Method* ImplementationVisitor::LookupMethod(
const std::string& name, LocationReference this_reference,
const Arguments& arguments, const TypeVector& specialization_types) {
- TypeVector types(arguments.parameters.GetTypeVector());
+ TypeVector types(arguments.parameters.ComputeTypeVector());
types.insert(types.begin(), this_reference.ReferencedType());
return Method::cast(LookupCallable(
{{}, name},
@@ -1849,7 +1751,7 @@ VisitResult ImplementationVisitor::GenerateCopy(const VisitResult& to_copy) {
VisitResult ImplementationVisitor::Visit(StructExpression* expr) {
StackScope stack_scope(this);
- const Type* raw_type = Declarations::GetType(expr->type);
+ const Type* raw_type = TypeVisitor::ComputeType(expr->type);
if (!raw_type->IsStructType()) {
ReportError(*raw_type, " is not a struct but used like one");
}
@@ -1985,15 +1887,16 @@ LocationReference ImplementationVisitor::GetLocationReference(
QualifiedName(expr->namespace_qualification, expr->name->value);
if (base::Optional<Builtin*> builtin = Declarations::TryLookupBuiltin(name)) {
if (GlobalContext::collect_language_server_data()) {
- LanguageServerData::AddDefinition(expr->name->pos, (*builtin)->pos());
+ LanguageServerData::AddDefinition(expr->name->pos,
+ (*builtin)->Position());
}
return LocationReference::Temporary(GetBuiltinCode(*builtin),
"builtin " + expr->name->value);
}
if (expr->generic_arguments.size() != 0) {
Generic* generic = Declarations::LookupUniqueGeneric(name);
- Callable* specialization = GetOrCreateSpecialization(
- SpecializationKey{generic, GetTypeVector(expr->generic_arguments)});
+ Callable* specialization = GetOrCreateSpecialization(SpecializationKey{
+ generic, TypeVisitor::ComputeTypeVector(expr->generic_arguments)});
if (Builtin* builtin = Builtin::DynamicCast(specialization)) {
DCHECK(!builtin->IsExternal());
return LocationReference::Temporary(GetBuiltinCode(builtin),
@@ -2010,9 +1913,7 @@ LocationReference ImplementationVisitor::GetLocationReference(
if (auto* constant = NamespaceConstant::DynamicCast(value)) {
if (constant->type()->IsConstexpr()) {
return LocationReference::Temporary(
- VisitResult(constant->type(), constant->ExternalAssemblerName() +
- "(state_)." +
- constant->name()->value + "()"),
+ VisitResult(constant->type(), constant->external_name() + "(state_)"),
"namespace constant " + expr->name->value);
}
assembler().Emit(NamespaceConstantInstruction{constant});
@@ -2089,7 +1990,7 @@ void ImplementationVisitor::GenerateAssignToLocation(
VisitResult ImplementationVisitor::GeneratePointerCall(
Expression* callee, const Arguments& arguments, bool is_tailcall) {
StackScope scope(this);
- TypeVector parameter_types(arguments.parameters.GetTypeVector());
+ TypeVector parameter_types(arguments.parameters.ComputeTypeVector());
VisitResult callee_result = Visit(callee);
if (!callee_result.type()->IsBuiltinPointerType()) {
std::stringstream stream;
@@ -2228,11 +2129,6 @@ VisitResult ImplementationVisitor::GenerateCall(
&argument_range, &constexpr_arguments);
}
- if (GlobalContext::verbose()) {
- std::cout << "generating code for call to " << callable->ReadableName()
- << "\n";
- }
-
size_t label_count = callable->signature().labels.size();
if (label_count != arguments.labels.size()) {
std::stringstream s;
@@ -2274,9 +2170,15 @@ VisitResult ImplementationVisitor::GenerateCall(
if (return_type->IsConstexpr()) {
DCHECK_EQ(0, arguments.labels.size());
std::stringstream result;
- result << "(" << macro->external_assembler_name() << "(state_)."
- << macro->ExternalName() << "(";
+ result << "(";
bool first = true;
+ if (auto* extern_macro = ExternMacro::DynamicCast(macro)) {
+ result << extern_macro->external_assembler_name() << "(state_)."
+ << extern_macro->ExternalName() << "(";
+ } else {
+ result << macro->ExternalName() << "(state_";
+ first = false;
+ }
for (VisitResult arg : arguments.parameters) {
DCHECK(!arg.IsOnStack());
if (!first) {
@@ -2427,7 +2329,7 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
QualifiedName name = QualifiedName(expr->callee->namespace_qualification,
expr->callee->name->value);
TypeVector specialization_types =
- GetTypeVector(expr->callee->generic_arguments);
+ TypeVisitor::ComputeTypeVector(expr->callee->generic_arguments);
bool has_template_arguments = !specialization_types.empty();
for (Expression* arg : expr->arguments)
arguments.parameters.push_back(Visit(arg));
@@ -2441,7 +2343,7 @@ VisitResult ImplementationVisitor::Visit(CallExpression* expr,
Callable* callable = LookupCallable(name, Declarations::Lookup(name),
arguments, specialization_types);
LanguageServerData::AddDefinition(expr->callee->name->pos,
- callable->pos());
+ callable->IdentifierPosition());
}
return scope.Yield(
GenerateCall(name, arguments, specialization_types, is_tailcall));
@@ -2453,7 +2355,7 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
Arguments arguments;
std::string method_name = expr->method->name->value;
TypeVector specialization_types =
- GetTypeVector(expr->method->generic_arguments);
+ TypeVisitor::ComputeTypeVector(expr->method->generic_arguments);
LocationReference target = GetLocationReference(expr->target);
if (!target.IsVariableAccess()) {
VisitResult result = GenerateFetchFromLocation(target);
@@ -2468,18 +2370,23 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
arguments.parameters.push_back(Visit(arg));
}
arguments.labels = LabelsFromIdentifiers(expr->labels);
- TypeVector argument_types = arguments.parameters.GetTypeVector();
+ TypeVector argument_types = arguments.parameters.ComputeTypeVector();
DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
QualifiedName qualified_name = QualifiedName(method_name);
Callable* callable = nullptr;
callable = LookupMethod(method_name, target, arguments, {});
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(expr->method->name->pos,
+ callable->IdentifierPosition());
+ }
return scope.Yield(GenerateCall(callable, target, arguments, {}, false));
}
VisitResult ImplementationVisitor::Visit(IntrinsicCallExpression* expr) {
StackScope scope(this);
Arguments arguments;
- TypeVector specialization_types = GetTypeVector(expr->generic_arguments);
+ TypeVector specialization_types =
+ TypeVisitor::ComputeTypeVector(expr->generic_arguments);
for (Expression* arg : expr->arguments)
arguments.parameters.push_back(Visit(arg));
return scope.Yield(
@@ -2554,11 +2461,20 @@ StackRange ImplementationVisitor::GenerateLabelGoto(
}
std::vector<Binding<LocalLabel>*> ImplementationVisitor::LabelsFromIdentifiers(
- const std::vector<std::string>& names) {
+ const std::vector<Identifier*>& names) {
std::vector<Binding<LocalLabel>*> result;
result.reserve(names.size());
for (const auto& name : names) {
- result.push_back(LookupLabel(name));
+ Binding<LocalLabel>* label = LookupLabel(name->value);
+ result.push_back(label);
+
+ // Link up labels in "otherwise" part of the call expression with
+ // either the label in the signature of the calling macro or the label
+ // block ofa surrounding "try".
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(name->pos,
+ label->declaration_position());
+ }
}
return result;
}
@@ -2660,7 +2576,6 @@ void ImplementationVisitor::GenerateCatchBlock(
}
}
}
-
void ImplementationVisitor::VisitAllDeclarables() {
CurrentCallable::Scope current_callable(nullptr);
const std::vector<std::unique_ptr<Declarable>>& all_declarables =
@@ -2668,16 +2583,22 @@ void ImplementationVisitor::VisitAllDeclarables() {
// This has to be an index-based loop because all_declarables can be extended
// during the loop.
for (size_t i = 0; i < all_declarables.size(); ++i) {
- Visit(all_declarables[i].get());
+ try {
+ Visit(all_declarables[i].get());
+ } catch (TorqueAbortCompilation&) {
+ // Recover from compile errors here. The error is recorded already.
+ }
}
}
void ImplementationVisitor::Visit(Declarable* declarable) {
CurrentScope::Scope current_scope(declarable->ParentScope());
- CurrentSourcePosition::Scope current_source_position(declarable->pos());
+ CurrentSourcePosition::Scope current_source_position(declarable->Position());
switch (declarable->kind()) {
- case Declarable::kMacro:
- return Visit(Macro::cast(declarable));
+ case Declarable::kExternMacro:
+ return Visit(ExternMacro::cast(declarable));
+ case Declarable::kTorqueMacro:
+ return Visit(TorqueMacro::cast(declarable));
case Declarable::kMethod:
return Visit(Method::cast(declarable));
case Declarable::kBuiltin:
@@ -2695,253 +2616,971 @@ void ImplementationVisitor::Visit(Declarable* declarable) {
}
}
-void ImplementationVisitor::GenerateBuiltinDefinitions(std::string& file_name) {
+namespace {
+class IfDefScope {
+ public:
+ IfDefScope(std::ostream& os, std::string d) : os_(os), d_(std::move(d)) {
+ os_ << "#ifdef " << d_ << "\n";
+ }
+ ~IfDefScope() { os_ << "#endif // " << d_ << "\n"; }
+
+ private:
+ std::ostream& os_;
+ std::string d_;
+};
+
+class NamespaceScope {
+ public:
+ NamespaceScope(std::ostream& os,
+ std::initializer_list<std::string> namespaces)
+ : os_(os), d_(std::move(namespaces)) {
+ for (const std::string& s : d_) {
+ os_ << "namespace " << s << " {\n";
+ }
+ }
+ ~NamespaceScope() {
+ for (auto i = d_.rbegin(); i != d_.rend(); ++i) {
+ os_ << "} // namespace " << *i << "\n";
+ }
+ }
+
+ private:
+ std::ostream& os_;
+ std::vector<std::string> d_;
+};
+
+class IncludeGuardScope {
+ public:
+ IncludeGuardScope(std::ostream& os, std::string file_name)
+ : os_(os),
+ d_("V8_GEN_TORQUE_GENERATED_" + CapifyStringWithUnderscores(file_name) +
+ "_") {
+ os_ << "#ifndef " << d_ << "\n";
+ os_ << "#define " << d_ << "\n\n";
+ }
+ ~IncludeGuardScope() { os_ << "#endif // " << d_ << "\n"; }
+
+ private:
+ std::ostream& os_;
+ std::string d_;
+};
+
+class IncludeObjectMacrosScope {
+ public:
+ explicit IncludeObjectMacrosScope(std::ostream& os) : os_(os) {
+ os_ << "\n// Has to be the last include (doesn't have include guards):\n"
+ "#include \"src/objects/object-macros.h\"\n";
+ }
+ ~IncludeObjectMacrosScope() {
+ os_ << "\n#include \"src/objects/object-macros-undef.h\"\n";
+ }
+
+ private:
+ std::ostream& os_;
+};
+} // namespace
+
+void ImplementationVisitor::GenerateBuiltinDefinitions(
+ const std::string& output_directory) {
std::stringstream new_contents_stream;
- new_contents_stream
- << "#ifndef V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
- "#define V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
- "\n"
- "#define BUILTIN_LIST_FROM_DSL(CPP, API, TFJ, TFC, TFS, TFH, ASM) "
- "\\\n";
- for (auto& declarable : GlobalContext::AllDeclarables()) {
- Builtin* builtin = Builtin::DynamicCast(declarable.get());
- if (!builtin || builtin->IsExternal()) continue;
- int firstParameterIndex = 1;
- bool declareParameters = true;
- if (builtin->IsStub()) {
- new_contents_stream << "TFS(" << builtin->ExternalName();
- } else {
- new_contents_stream << "TFJ(" << builtin->ExternalName();
- if (builtin->IsVarArgsJavaScript()) {
- new_contents_stream
- << ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
- declareParameters = false;
+ std::string file_name = "builtin-definitions-tq.h";
+ {
+ IncludeGuardScope include_guard(new_contents_stream, file_name);
+ new_contents_stream
+ << "\n"
+ "#define BUILTIN_LIST_FROM_TORQUE(CPP, TFJ, TFC, TFS, TFH, "
+ "ASM) "
+ "\\\n";
+ for (auto& declarable : GlobalContext::AllDeclarables()) {
+ Builtin* builtin = Builtin::DynamicCast(declarable.get());
+ if (!builtin || builtin->IsExternal()) continue;
+ int firstParameterIndex = 1;
+ bool declareParameters = true;
+ if (builtin->IsStub()) {
+ new_contents_stream << "TFS(" << builtin->ExternalName();
} else {
- assert(builtin->IsFixedArgsJavaScript());
- // FixedArg javascript builtins need to offer the parameter
- // count.
- assert(builtin->parameter_names().size() >= 2);
- new_contents_stream << ", " << (builtin->parameter_names().size() - 2);
- // And the receiver is explicitly declared.
- new_contents_stream << ", kReceiver";
- firstParameterIndex = 2;
+ new_contents_stream << "TFJ(" << builtin->ExternalName();
+ if (builtin->IsVarArgsJavaScript()) {
+ new_contents_stream
+ << ", SharedFunctionInfo::kDontAdaptArgumentsSentinel";
+ declareParameters = false;
+ } else {
+ assert(builtin->IsFixedArgsJavaScript());
+ // FixedArg javascript builtins need to offer the parameter
+ // count.
+ int size = static_cast<int>(builtin->parameter_names().size());
+ assert(size >= 1);
+ new_contents_stream << ", " << (std::max(size - 2, 0));
+ // And the receiver is explicitly declared.
+ new_contents_stream << ", kReceiver";
+ firstParameterIndex = 2;
+ }
}
- }
- if (declareParameters) {
- int index = 0;
- for (const auto& parameter : builtin->parameter_names()) {
- if (index >= firstParameterIndex) {
- new_contents_stream << ", k" << CamelifyString(parameter->value);
+ if (declareParameters) {
+ int index = 0;
+ for (const auto& parameter : builtin->parameter_names()) {
+ if (index >= firstParameterIndex) {
+ new_contents_stream << ", k" << CamelifyString(parameter->value);
+ }
+ index++;
}
- index++;
}
+ new_contents_stream << ") \\\n";
}
- new_contents_stream << ") \\\n";
- }
- new_contents_stream << "\n";
+ new_contents_stream << "\n";
- new_contents_stream
- << "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n";
- for (const BuiltinPointerType* type : TypeOracle::AllBuiltinPointerTypes()) {
- Builtin* example_builtin =
- Declarations::FindSomeInternalBuiltinWithType(type);
- if (!example_builtin) {
- CurrentSourcePosition::Scope current_source_position(
- SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
- ReportError("unable to find any builtin with type \"", *type, "\"");
+ new_contents_stream
+ << "#define TORQUE_FUNCTION_POINTER_TYPE_TO_BUILTIN_MAP(V) \\\n";
+ for (const BuiltinPointerType* type :
+ TypeOracle::AllBuiltinPointerTypes()) {
+ Builtin* example_builtin =
+ Declarations::FindSomeInternalBuiltinWithType(type);
+ if (!example_builtin) {
+ CurrentSourcePosition::Scope current_source_position(
+ SourcePosition{CurrentSourceFile::Get(), {-1, -1}, {-1, -1}});
+ ReportError("unable to find any builtin with type \"", *type, "\"");
+ }
+ new_contents_stream << " V(" << type->function_pointer_type_id() << ","
+ << example_builtin->ExternalName() << ")\\\n";
}
- new_contents_stream << " V(" << type->function_pointer_type_id() << ","
- << example_builtin->ExternalName() << ")\\\n";
+ new_contents_stream << "\n";
}
- new_contents_stream << "\n";
-
- new_contents_stream
- << "#endif // V8_BUILTINS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n";
-
std::string new_contents(new_contents_stream.str());
- ReplaceFileContentsIfDifferent(file_name, new_contents);
+ WriteFile(output_directory + "/" + file_name, new_contents);
}
namespace {
-enum class FieldSectionType {
+enum class FieldSectionType : uint32_t {
kNoSection = 0,
- kWeakSection,
- kStrongSection,
- kScalarSection
+ kWeakSection = 1 << 0,
+ kStrongSection = 2 << 0,
+ kScalarSection = 3 << 0
};
-void PossiblyStartTagged(FieldSectionType* section,
- std::set<FieldSectionType>* completed_sections,
- std::stringstream* o) {
- if (completed_sections->count(FieldSectionType::kWeakSection) == 0 &&
- completed_sections->count(FieldSectionType::kStrongSection) == 0 &&
- *section != FieldSectionType::kWeakSection &&
- *section != FieldSectionType::kStrongSection) {
- *o << "V(kStartOfPointerFieldsOffset, 0) \\\n";
- }
+bool IsPointerSection(FieldSectionType type) {
+ return type == FieldSectionType::kWeakSection ||
+ type == FieldSectionType::kStrongSection;
}
-void PossiblyEndTagged(FieldSectionType* section,
- std::set<FieldSectionType>* completed_sections,
- std::stringstream* o) {
- if (completed_sections->count(FieldSectionType::kWeakSection) != 0 &&
- completed_sections->count(FieldSectionType::kStrongSection) != 0) {
- *o << "V(kEndOfTaggedFieldsOffset, 0) \\\n";
+using FieldSections = base::Flags<FieldSectionType>;
+
+std::string ToString(FieldSectionType type) {
+ switch (type) {
+ case FieldSectionType::kNoSection:
+ return "NoSection";
+ break;
+ case FieldSectionType::kWeakSection:
+ return "WeakFields";
+ break;
+ case FieldSectionType::kStrongSection:
+ return "StrongFields";
+ break;
+ case FieldSectionType::kScalarSection:
+ return "ScalarFields";
+ break;
}
+ UNREACHABLE();
}
-void ProcessFieldInSection(FieldSectionType* section,
- std::set<FieldSectionType>* completed_sections,
- FieldSectionType field_section,
- std::stringstream* o) {
- if (*section != FieldSectionType::kNoSection) {
- if (*section != field_section) {
- if (completed_sections->count(field_section) != 0) {
- ReportError("reopening of weak, strong or scalar field section");
+class FieldOffsetsGenerator {
+ public:
+ explicit FieldOffsetsGenerator(const ClassType* type) : type_(type) {}
+
+ virtual void WriteField(const Field& f) = 0;
+ virtual void WriteMarker(const std::string& marker) = 0;
+
+ virtual ~FieldOffsetsGenerator() { CHECK(is_finished_); }
+
+ void RecordOffsetFor(const Field& f) {
+ CHECK(!is_finished_);
+ UpdateSection(f);
+ WriteField(f);
+ }
+
+ void Finish() {
+ End(current_section_);
+ if (!(completed_sections_ & FieldSectionType::kWeakSection)) {
+ Begin(FieldSectionType::kWeakSection);
+ End(FieldSectionType::kWeakSection);
+ }
+ if (!(completed_sections_ & FieldSectionType::kStrongSection)) {
+ Begin(FieldSectionType::kStrongSection);
+ End(FieldSectionType::kStrongSection);
+ }
+ is_finished_ = true;
+ if (type_->IsAbstract()) {
+ WriteMarker("kHeaderSize");
+ }
+ if (!type_->IsAbstract() || type_->IsInstantiatedAbstractClass()) {
+ WriteMarker("kSize");
+ }
+ }
+
+ protected:
+ const ClassType* type_;
+
+ private:
+ FieldSectionType GetSectionFor(const Field& f) {
+ if (f.name_and_type.type == TypeOracle::GetVoidType()) {
+ // Allow void type for marker constants of size zero.
+ return current_section_;
+ }
+ if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ if (f.is_weak) {
+ return FieldSectionType::kWeakSection;
+ } else {
+ return FieldSectionType::kStrongSection;
}
- completed_sections->insert(*section);
- if (*section == FieldSectionType::kWeakSection) {
- *o << "V(kEndOfWeakFieldsOffset, 0) \\\n";
- PossiblyEndTagged(section, completed_sections, o);
- } else if (*section == FieldSectionType::kStrongSection) {
- *o << "V(kEndOfStrongFieldsOffset, 0) \\\n";
- PossiblyEndTagged(section, completed_sections, o);
+ } else {
+ return FieldSectionType::kScalarSection;
+ }
+ }
+ void UpdateSection(const Field& f) {
+ FieldSectionType type = GetSectionFor(f);
+ if (current_section_ == type) return;
+ if (IsPointerSection(type)) {
+ if (completed_sections_ & type) {
+ std::stringstream s;
+ s << "cannot declare field " << f.name_and_type.name << " in class "
+ << type_->name() << ", because section " << ToString(type)
+ << " to which it belongs has already been finished.";
+ Error(s.str()).Position(f.pos);
}
}
+ End(current_section_);
+ current_section_ = type;
+ Begin(current_section_);
+ }
+ void Begin(FieldSectionType type) {
+ DCHECK(type != FieldSectionType::kNoSection);
+ if (!IsPointerSection(type)) return;
+ WriteMarker("kStartOf" + ToString(type) + "Offset");
+ }
+ void End(FieldSectionType type) {
+ if (!IsPointerSection(type)) return;
+ completed_sections_ |= type;
+ WriteMarker("kEndOf" + ToString(type) + "Offset");
+ }
+
+ FieldSectionType current_section_ = FieldSectionType::kNoSection;
+ FieldSections completed_sections_ = FieldSectionType::kNoSection;
+ bool is_finished_ = false;
+};
+
+class MacroFieldOffsetsGenerator : public FieldOffsetsGenerator {
+ public:
+ MacroFieldOffsetsGenerator(std::ostream& out, const ClassType* type)
+ : FieldOffsetsGenerator(type), out_(out) {
+ out_ << "#define ";
+ out_ << "TORQUE_GENERATED_" << CapifyStringWithUnderscores(type_->name())
+ << "_FIELDS(V) \\\n";
+ }
+ virtual void WriteField(const Field& f) {
+ size_t field_size;
+ std::string size_string;
+ std::string machine_type;
+ std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+ out_ << "V(k" << CamelifyString(f.name_and_type.name) << "Offset, "
+ << size_string << ") \\\n";
}
- if (*section != field_section) {
- if (field_section == FieldSectionType::kWeakSection) {
- PossiblyStartTagged(section, completed_sections, o);
- *o << "V(kStartOfWeakFieldsOffset, 0) \\\n";
- } else if (field_section == FieldSectionType::kStrongSection) {
- PossiblyStartTagged(section, completed_sections, o);
- *o << "V(kStartOfStrongFieldsOffset, 0) \\\n";
+ virtual void WriteMarker(const std::string& marker) {
+ out_ << "V(" << marker << ", 0) \\\n";
+ }
+
+ private:
+ std::ostream& out_;
+};
+
+} // namespace
+
+void ImplementationVisitor::GenerateClassFieldOffsets(
+ const std::string& output_directory) {
+ std::stringstream header;
+ std::string file_name = "field-offsets-tq.h";
+ {
+ IncludeGuardScope include_guard(header, file_name);
+
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (!type->IsExtern()) continue;
+
+ // TODO(danno): Remove this once all classes use ClassFieldOffsetGenerator
+ // to generate field offsets without the use of macros.
+ MacroFieldOffsetsGenerator g(header, type);
+ for (auto f : type->fields()) {
+ CurrentSourcePosition::Scope scope(f.pos);
+ g.RecordOffsetFor(f);
+ }
+ g.Finish();
+ header << "\n";
}
}
- *section = field_section;
+ const std::string output_header_path = output_directory + "/" + file_name;
+ WriteFile(output_header_path, header.str());
+}
+
+namespace {
+
+class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
+ public:
+ ClassFieldOffsetGenerator(std::ostream& header, const ClassType* type)
+ : FieldOffsetsGenerator(type),
+ hdr_(header),
+ previous_field_end_("P::kHeaderSize") {}
+ virtual void WriteField(const Field& f) {
+ size_t field_size;
+ std::string size_string;
+ std::string machine_type;
+ std::tie(field_size, size_string) = f.GetFieldSizeInformation();
+ std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
+ std::string field_end = field + "End";
+ hdr_ << " static constexpr int " << field << " = " << previous_field_end_
+ << ";\n";
+ hdr_ << " static constexpr int " << field_end << " = " << field << " + "
+ << size_string << ";\n";
+ previous_field_end_ = field_end;
+ }
+ virtual void WriteMarker(const std::string& marker) {
+ hdr_ << " static constexpr int " << marker << " = " << previous_field_end_
+ << ";\n";
+ }
+
+ private:
+ std::ostream& hdr_;
+ std::string previous_field_end_;
+};
+
+class CppClassGenerator {
+ public:
+ CppClassGenerator(const ClassType* type, std::ostream& header,
+ std::ostream& inl_header, std::ostream& impl)
+ : type_(type),
+ super_(type->GetSuperClass()),
+ name_(type->name()),
+ gen_name_("TorqueGenerated" + name_),
+ gen_name_T_(gen_name_ + "<D, P>"),
+ gen_name_I_(gen_name_ + "<" + name_ + ", " + super_->name() + ">"),
+ hdr_(header),
+ inl_(inl_header),
+ impl_(impl) {}
+ const std::string template_decl() const {
+ return "template <class D, class P>";
+ }
+
+ void GenerateClass();
+
+ private:
+ void GenerateClassConstructors();
+ void GenerateFieldAccessor(const Field& f);
+ void GenerateFieldAccessorForUntagged(const Field& f);
+ void GenerateFieldAccessorForSmi(const Field& f);
+ void GenerateFieldAccessorForObject(const Field& f);
+
+ void GenerateClassCasts();
+
+ const ClassType* type_;
+ const ClassType* super_;
+ const std::string name_;
+ const std::string gen_name_;
+ const std::string gen_name_T_;
+ const std::string gen_name_I_;
+ std::ostream& hdr_;
+ std::ostream& inl_;
+ std::ostream& impl_;
+};
+
+void CppClassGenerator::GenerateClass() {
+ hdr_ << "class " << name_ << ";\n\n";
+
+ hdr_ << template_decl() << "\n";
+ hdr_ << "class " << gen_name_ << " : public P {\n";
+ hdr_ << " static_assert(std::is_same<" << name_ << ", D>::value,\n"
+ << " \"Use this class as direct base for " << name_ << ".\");\n";
+ hdr_ << " static_assert(std::is_same<" << super_->name() << ", P>::value,\n"
+ << " \"Pass in " << super_->name()
+ << " as second template parameter for " << gen_name_ << ".\");\n";
+ hdr_ << "public: \n";
+ hdr_ << " using Super = P;\n";
+ for (const Field& f : type_->fields()) {
+ GenerateFieldAccessor(f);
+ }
+
+ GenerateClassCasts();
+
+ if (type_->ShouldGeneratePrint()) {
+ hdr_ << "\n DECL_PRINTER(" << name_ << ")\n";
+ }
+
+ if (type_->ShouldGenerateVerify()) {
+ IfDefScope hdr_scope(hdr_, "VERIFY_HEAP");
+ hdr_ << " V8_EXPORT_PRIVATE void " << name_
+ << "Verify(Isolate* isolate);\n";
+
+ IfDefScope impl_scope(impl_, "VERIFY_HEAP");
+ impl_ << "\ntemplate <>\n";
+ impl_ << "void " << gen_name_I_ << "::" << name_
+ << "Verify(Isolate* isolate) {\n";
+ impl_ << " TorqueGeneratedClassVerifiers::" << name_ << "Verify(" << name_
+ << "::cast(*this), "
+ "isolate);\n";
+ impl_ << "}\n";
+ }
+
+ hdr_ << "\n";
+ ClassFieldOffsetGenerator g(hdr_, type_);
+ for (auto f : type_->fields()) {
+ CurrentSourcePosition::Scope scope(f.pos);
+ g.RecordOffsetFor(f);
+ }
+ g.Finish();
+ hdr_ << "\n";
+
+ GenerateClassConstructors();
+
+ hdr_ << "};\n\n";
+}
+
+void CppClassGenerator::GenerateClassCasts() {
+ hdr_ << " V8_INLINE static D cast(Object object) {\n";
+ hdr_ << " return D(object.ptr());\n";
+ hdr_ << " }\n";
+
+ hdr_ << " V8_INLINE static D unchecked_cast(Object object) {\n";
+ hdr_ << " return bit_cast<D>(object);\n";
+ hdr_ << " }\n";
+}
+
+void CppClassGenerator::GenerateClassConstructors() {
+ hdr_ << "public:\n";
+ hdr_ << " template <class DAlias = D>\n";
+ hdr_ << " constexpr " << gen_name_ << "() : P() {\n";
+ hdr_ << " static_assert(std::is_base_of<" << gen_name_ << ", \n";
+ hdr_ << " DAlias>::value,\n";
+ hdr_ << " \"class " << gen_name_ << " should be used as direct base for "
+ << name_ << ".\");\n";
+ hdr_ << " }\n";
+ hdr_ << " D* operator->() { return static_cast<D*>(this); }\n";
+ hdr_ << " const D* operator->() const { return static_cast<const D*>(this); "
+ "}\n\n";
+
+ hdr_ << "protected:\n";
+ hdr_ << " inline explicit " << gen_name_ << "(Address ptr);\n";
+
+ inl_ << "template<class D, class P>\n";
+ inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
+ inl_ << " : P(ptr) {\n";
+ if (type_->IsInstantiatedAbstractClass()) {
+ // This is a hack to prevent wrong instance type checks.
+ inl_ << " // Instance check omitted because class is annotated with "
+ "@dirtyInstantiatedAbstractClass.\n";
+ } else {
+ inl_ << " SLOW_DCHECK(this->Is" << name_ << "());\n";
+ }
+ inl_ << "}\n";
+}
+
+// TODO(sigurds): Keep in sync with DECL_ACCESSORS and ACCESSORS macro.
+void CppClassGenerator::GenerateFieldAccessor(const Field& f) {
+ const Type* field_type = f.name_and_type.type;
+ if (field_type == TypeOracle::GetVoidType()) return;
+ if (!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ return GenerateFieldAccessorForUntagged(f);
+ }
+ if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType())) {
+ return GenerateFieldAccessorForSmi(f);
+ }
+ if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetObjectType())) {
+ return GenerateFieldAccessorForObject(f);
+ }
+
+ Error("Generation of field accessor for ", type_->name(),
+ ":: ", f.name_and_type.name, " : ", *field_type, " is not supported.")
+ .Position(f.pos);
}
-void CompleteFieldSection(FieldSectionType* section,
- std::set<FieldSectionType>* completed_sections,
- FieldSectionType field_section,
- std::stringstream* o) {
- if (completed_sections->count(field_section) == 0) {
- ProcessFieldInSection(section, completed_sections, field_section, o);
- ProcessFieldInSection(section, completed_sections,
- FieldSectionType::kNoSection, o);
+void CppClassGenerator::GenerateFieldAccessorForUntagged(const Field& f) {
+ DCHECK(!f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType()));
+ const Type* field_type = f.name_and_type.type;
+ if (field_type == TypeOracle::GetVoidType()) return;
+ const Type* constexpr_version = field_type->ConstexprVersion();
+ if (!constexpr_version) {
+ Error("Field accessor for ", type_->name(), ":: ", f.name_and_type.name,
+ " cannot be generated because its type ", *field_type,
+ " is neither a subclass of Object nor does the type have a constexpr "
+ "version.")
+ .Position(f.pos);
+ return;
+ }
+ const std::string& name = f.name_and_type.name;
+ const std::string type = constexpr_version->GetGeneratedTypeName();
+ const std::string offset = "k" + CamelifyString(name) + "Offset";
+
+ // Generate declarations in header.
+ hdr_ << " inline " << type << " " << name << "() const;\n";
+ hdr_ << " inline void set_" << name << "(" << type << " value);\n\n";
+
+ // Generate implementation in inline header.
+ inl_ << "template <class D, class P>\n";
+ inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
+ inl_ << " return this->template ReadField<" << type << ">(" << offset
+ << ");\n";
+ inl_ << "}\n";
+
+ inl_ << "template <class D, class P>\n";
+ inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(" << type
+ << " value) {\n";
+ inl_ << " this->template WriteField<" << type << ">(" << offset
+ << ", value);\n";
+ inl_ << "}\n\n";
+}
+
+void CppClassGenerator::GenerateFieldAccessorForSmi(const Field& f) {
+ DCHECK(f.name_and_type.type->IsSubtypeOf(TypeOracle::GetSmiType()));
+ const std::string type = "Smi";
+ const std::string& name = f.name_and_type.name;
+ const std::string offset = "k" + CamelifyString(name) + "Offset";
+
+ // Generate declarations in header.
+ hdr_ << " inline " << type << " " << name << "() const;\n";
+ hdr_ << " inline void set_" << name << "(" << type << " value);\n\n";
+
+ // Generate implementation in inline header.
+ inl_ << "template <class D, class P>\n";
+ inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
+ inl_ << " return Smi::cast(READ_FIELD(*this, " << offset << "));\n";
+ inl_ << "}\n";
+
+ inl_ << "template <class D, class P>\n";
+ inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(" << type
+ << " value) {\n";
+ inl_ << " DCHECK(value.IsSmi());\n";
+ inl_ << " WRITE_FIELD(*this, " << offset << ", value);\n";
+ inl_ << "}\n\n";
+}
+
+void CppClassGenerator::GenerateFieldAccessorForObject(const Field& f) {
+ const Type* field_type = f.name_and_type.type;
+ DCHECK(field_type->IsSubtypeOf(TypeOracle::GetObjectType()));
+ const std::string& name = f.name_and_type.name;
+ const std::string offset = "k" + CamelifyString(name) + "Offset";
+ const ClassType* class_type = ClassType::DynamicCast(field_type);
+
+ std::string type = class_type ? class_type->name() : "Object";
+
+ // Generate declarations in header.
+ if (!class_type && field_type != TypeOracle::GetObjectType()) {
+ hdr_ << " // Torque type: " << field_type->ToString() << "\n";
+ }
+ hdr_ << " inline " << type << " " << name << "() const;\n";
+ hdr_ << " inline void set_" << name << "(" << type
+ << " value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);\n\n";
+
+ std::string type_check;
+ for (const std::string& runtime_type : field_type->GetRuntimeTypes()) {
+ if (!type_check.empty()) type_check += " || ";
+ type_check += "value.Is" + runtime_type + "()";
+ }
+
+ // Generate implementation in inline header.
+ inl_ << "template <class D, class P>\n";
+ inl_ << type << " " << gen_name_ << "<D, P>::" << name << "() const {\n";
+ inl_ << " Object value = READ_FIELD(*this, " << offset << ");\n";
+ if (class_type) {
+ inl_ << " return " << type << "::cast(value);\n";
+ } else {
+ inl_ << " DCHECK(" << type_check << ");\n";
+ inl_ << " return value;\n";
}
+ inl_ << "}\n";
+
+ inl_ << "template <class D, class P>\n";
+ inl_ << "void " << gen_name_ << "<D, P>::set_" << name << "(" << type
+ << " value, WriteBarrierMode mode) {\n";
+ inl_ << " SLOW_DCHECK(" << type_check << ");\n";
+ inl_ << " WRITE_FIELD(*this, " << offset << ", value);\n";
+ inl_ << " CONDITIONAL_WRITE_BARRIER(*this, " << offset
+ << ", value, mode);\n";
+ inl_ << "}\n\n";
}
} // namespace
-void ImplementationVisitor::GenerateClassDefinitions(std::string& file_name) {
- std::stringstream new_contents_stream;
- new_contents_stream << "#ifndef V8_CLASS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
- "#define V8_CLASS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n"
- "\n\n";
-
- for (auto i : GlobalContext::GetClasses()) {
- ClassType* type = i.second;
- if (!type->IsExtern()) continue;
-
- // TODO(danno): Ideally (and we've got several core V8 dev's feedback
- // supporting this), Torque should generate the constants for the offsets
- // directly and not go through the existing layer of macros, which actually
- // currently just serves to additionally obfuscate where these values come
- // from.
- new_contents_stream << "#define ";
- new_contents_stream << "TORQUE_GENERATED_"
- << CapifyStringWithUnderscores(i.first)
- << "_FIELDS(V) \\\n";
- std::vector<Field> fields = type->fields();
- FieldSectionType section = FieldSectionType::kNoSection;
- std::set<FieldSectionType> completed_sections;
- for (auto f : fields) {
- CurrentSourcePosition::Scope scope(f.pos);
- if (f.name_and_type.type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
- if (f.is_weak) {
- ProcessFieldInSection(&section, &completed_sections,
- FieldSectionType::kWeakSection,
- &new_contents_stream);
- } else {
- ProcessFieldInSection(&section, &completed_sections,
- FieldSectionType::kStrongSection,
- &new_contents_stream);
- }
+void ImplementationVisitor::GenerateClassDefinitions(
+ const std::string& output_directory) {
+ std::stringstream header;
+ std::stringstream inline_header;
+ std::stringstream implementation;
+ std::string basename = "class-definitions-tq";
+ std::string file_basename = output_directory + "/" + basename;
+
+ {
+ IncludeGuardScope header_guard(header, basename + ".h");
+ header << "#include \"src/objects/heap-number.h\"\n";
+ header << "#include \"src/objects/objects.h\"\n";
+ header << "#include \"src/objects/smi.h\"\n";
+ header << "#include \"torque-generated/field-offsets-tq.h\"\n";
+ header << "#include <type_traits>\n\n";
+ IncludeObjectMacrosScope header_macros(header);
+ NamespaceScope header_namespaces(header, {"v8", "internal"});
+ header << "using BuiltinPtr = Smi;\n\n";
+
+ IncludeGuardScope inline_header_guard(inline_header, basename + "-inl.h");
+ inline_header << "#include \"torque-generated/class-definitions-tq.h\"\n\n";
+ inline_header << "#include \"src/objects/objects-inl.h\"\n\n";
+ IncludeObjectMacrosScope inline_header_macros(inline_header);
+ NamespaceScope inline_header_namespaces(inline_header, {"v8", "internal"});
+
+ implementation
+ << "#include \"torque-generated/class-definitions-tq.h\"\n\n";
+ implementation << "#include \"torque-generated/class-verifiers-tq.h\"\n\n";
+ implementation << "#include \"src/objects/struct-inl.h\"\n\n";
+ NamespaceScope implementation_namespaces(implementation,
+ {"v8", "internal"});
+
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (!type->GenerateCppClassDefinitions()) continue;
+ CppClassGenerator g(type, header, inline_header, implementation);
+ g.GenerateClass();
+ }
+ }
+ WriteFile(file_basename + ".h", header.str());
+ WriteFile(file_basename + "-inl.h", inline_header.str());
+ WriteFile(file_basename + ".cc", implementation.str());
+}
+
+namespace {
+void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
+ const std::string& gen_name,
+ const std::string& gen_name_T,
+ const std::string template_params) {
+ impl << template_params << "\n";
+ impl << "void " << gen_name_T << "::" << type->name()
+ << "Print(std::ostream& os) {\n";
+ impl << " this->PrintHeader(os, \"" << gen_name << "\");\n";
+ auto hierarchy = type->GetHierarchy();
+ std::map<std::string, const AggregateType*> field_names;
+ for (const AggregateType* aggregate_type : hierarchy) {
+ for (const Field& f : aggregate_type->fields()) {
+ if (f.name_and_type.name == "map") continue;
+ impl << " os << \"\\n - " << f.name_and_type.name << ": \" << "
+ << "Brief(this->" << f.name_and_type.name << "());\n";
+ }
+ }
+ impl << " os << \"\\n\";\n";
+ impl << "}\n\n";
+}
+} // namespace
+
+void ImplementationVisitor::GeneratePrintDefinitions(
+ const std::string& output_directory) {
+ std::stringstream impl;
+ std::string file_name = "objects-printer-tq.cc";
+ {
+ IfDefScope object_print(impl, "OBJECT_PRINT");
+
+ impl << "#include \"src/objects/objects.h\"\n\n";
+ impl << "#include <iosfwd>\n\n";
+ impl << "#include \"src/objects/struct-inl.h\"\n\n";
+ impl << "#include \"src/objects/template-objects-inl.h\"\n\n";
+
+ NamespaceScope impl_namespaces(impl, {"v8", "internal"});
+
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (!type->ShouldGeneratePrint()) continue;
+
+ if (type->IsExtern() && type->GenerateCppClassDefinitions()) {
+ const ClassType* super = type->GetSuperClass();
+ std::string gen_name = "TorqueGenerated" + type->name();
+ std::string gen_name_T =
+ gen_name + "<" + type->name() + ", " + super->name() + ">";
+ std::string template_decl = "template <>";
+ GeneratePrintDefinitionsForClass(impl, type, gen_name, gen_name_T,
+ template_decl);
} else {
- ProcessFieldInSection(&section, &completed_sections,
- FieldSectionType::kScalarSection,
- &new_contents_stream);
+ GeneratePrintDefinitionsForClass(impl, type, type->name(), type->name(),
+ "");
}
- size_t field_size;
- std::string size_string;
- std::string machine_type;
- std::tie(field_size, size_string, machine_type) =
- f.GetFieldSizeInformation();
- new_contents_stream << "V(k" << CamelifyString(f.name_and_type.name)
- << "Offset, " << size_string << ") \\\n";
- }
-
- ProcessFieldInSection(&section, &completed_sections,
- FieldSectionType::kNoSection, &new_contents_stream);
- CompleteFieldSection(&section, &completed_sections,
- FieldSectionType::kWeakSection, &new_contents_stream);
- CompleteFieldSection(&section, &completed_sections,
- FieldSectionType::kStrongSection,
- &new_contents_stream);
-
- new_contents_stream << "V(kSize, 0) \\\n";
- new_contents_stream << "\n";
+ }
}
- new_contents_stream
- << "\n#endif // V8_CLASS_BUILTIN_DEFINITIONS_FROM_DSL_H_\n";
+ std::string new_contents(impl.str());
+ WriteFile(output_directory + "/" + file_name, new_contents);
+}
- std::string new_contents(new_contents_stream.str());
- ReplaceFileContentsIfDifferent(file_name, new_contents);
+namespace {
+
+void GenerateClassFieldVerifier(const std::string& class_name,
+ const ClassType& class_type, const Field& f,
+ std::ostream& h_contents,
+ std::ostream& cc_contents) {
+ if (!f.generate_verify) return;
+ const Type* field_type = f.name_and_type.type;
+
+ // We only verify tagged types, not raw numbers or pointers.
+ if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) return;
+
+ if (f.index) {
+ if ((*f.index)->name_and_type.type != TypeOracle::GetSmiType()) {
+ ReportError("Non-SMI values are not (yet) supported as indexes.");
+ }
+ // We already verified the index field because it was listed earlier, so we
+ // can assume it's safe to read here.
+ cc_contents << " for (int i = 0; i < Smi::ToInt(READ_FIELD(o, "
+ << class_name << "::k"
+ << CamelifyString((*f.index)->name_and_type.name)
+ << "Offset)); ++i) {\n";
+ } else {
+ cc_contents << " {\n";
+ }
+
+ const char* object_type = f.is_weak ? "MaybeObject" : "Object";
+ const char* read_fn = f.is_weak ? "READ_WEAK_FIELD" : "READ_FIELD";
+ const char* verify_fn =
+ f.is_weak ? "VerifyMaybeObjectPointer" : "VerifyPointer";
+ const char* index_offset = f.index ? " + i * kTaggedSize" : "";
+ // Name the local var based on the field name for nicer CHECK output.
+ const std::string value = f.name_and_type.name + "_value";
+
+ // Read the field.
+ cc_contents << " " << object_type << " " << value << " = " << read_fn
+ << "(o, " << class_name << "::k"
+ << CamelifyString(f.name_and_type.name) << "Offset"
+ << index_offset << ");\n";
+
+ // Call VerifyPointer or VerifyMaybeObjectPointer on it.
+ cc_contents << " " << object_type << "::" << verify_fn << "(isolate, "
+ << value << ");\n";
+
+ // Check that the value is of an appropriate type. We can skip this part for
+ // the Object type because it would not check anything beyond what we already
+ // checked with VerifyPointer.
+ if (f.name_and_type.type != TypeOracle::GetObjectType()) {
+ std::string type_check = f.is_weak ? value + ".IsWeakOrCleared()" : "";
+ std::string strong_value =
+ value + (f.is_weak ? ".GetHeapObjectOrSmi()" : "");
+ for (const std::string& runtime_type : field_type->GetRuntimeTypes()) {
+ if (!type_check.empty()) type_check += " || ";
+ type_check += strong_value + ".Is" + runtime_type + "()";
+ }
+ // Many subtypes of JSObject can be verified in partially-initialized states
+ // where their fields are all undefined. We explicitly allow that here. For
+ // any such fields that should never be undefined, we can include extra code
+ // in the custom verifier functions for them.
+ // TODO(1240798): If Factory::InitializeJSObjectFromMap is updated to use
+ // correct initial values based on the type of the field, then make this
+ // check stricter too.
+ if (class_type.IsSubtypeOf(TypeOracle::GetJSObjectType())) {
+ type_check += " || " + strong_value + ".IsUndefined(isolate)";
+ }
+ cc_contents << " CHECK(" << type_check << ");\n";
+ }
+ cc_contents << " }\n";
}
-void ImplementationVisitor::GeneratePrintDefinitions(std::string& file_name) {
- std::stringstream new_contents_stream;
+} // namespace
+
+void ImplementationVisitor::GenerateClassVerifiers(
+ const std::string& output_directory) {
+ std::string file_name = "class-verifiers-tq";
+ std::stringstream h_contents;
+ std::stringstream cc_contents;
+ {
+ IncludeGuardScope include_guard(h_contents, file_name + ".h");
+ IfDefScope verify_heap_h(h_contents, "VERIFY_HEAP");
+ IfDefScope verify_heap_cc(cc_contents, "VERIFY_HEAP");
+
+ cc_contents << "\n#include \"src/objects/objects.h\"\n";
+
+ for (const std::string& include_path : GlobalContext::CppIncludes()) {
+ cc_contents << "#include " << StringLiteralQuote(include_path) << "\n";
+ }
+ cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
+
+ IncludeObjectMacrosScope object_macros(cc_contents);
+
+ NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
+ NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});
+
+ // Generate forward declarations to avoid including any headers.
+ h_contents << "class Isolate;\n";
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ if (!type->IsExtern() || !type->ShouldGenerateVerify()) continue;
+ h_contents << "class " << type->name() << ";\n";
+ }
+
+ const char* verifier_class = "TorqueGeneratedClassVerifiers";
+
+ h_contents << "class " << verifier_class << "{\n";
+ h_contents << " public:\n";
+
+ for (const TypeAlias* alias : GlobalContext::GetClasses()) {
+ const ClassType* type = ClassType::DynamicCast(alias->type());
+ std::string name = type->name();
+ if (!type->IsExtern() || !type->ShouldGenerateVerify()) continue;
+
+ std::string method_name = name + "Verify";
+
+ h_contents << " static void " << method_name << "(" << name
+ << " o, Isolate* isolate);\n";
+
+ cc_contents << "void " << verifier_class << "::" << method_name << "("
+ << name << " o, Isolate* isolate) {\n";
+
+ // First, do any verification for the super class. Not all classes have
+ // verifiers, so skip to the nearest super class that has one.
+ const ClassType* super_type = type->GetSuperClass();
+ while (super_type && !super_type->ShouldGenerateVerify()) {
+ super_type = super_type->GetSuperClass();
+ }
+ if (super_type) {
+ std::string super_name = super_type->name();
+ if (super_name == "HeapObject") {
+ // Special case: HeapObjectVerify checks the Map type and dispatches
+ // to more specific types, so calling it here would cause infinite
+ // recursion. We could consider moving that behavior into a
+ // different method to make the contract of *Verify methods more
+ // consistent, but for now we'll just avoid the bad case.
+ cc_contents << " " << super_name << "Verify(o, isolate);\n";
+ } else {
+ cc_contents << " o." << super_name << "Verify(isolate);\n";
+ }
+ }
+
+ // Second, verify that this object is what it claims to be.
+ if (type->IsInstantiatedAbstractClass()) {
+ cc_contents << " // Instance type check skipped because\n";
+ cc_contents << " // it is an instantiated abstract class.\n";
+ } else {
+ cc_contents << " CHECK(o.Is" << name << "());\n";
+ }
- new_contents_stream << "#ifdef OBJECT_PRINT\n\n";
-
- new_contents_stream << "#include \"src/objects.h\"\n\n";
- new_contents_stream << "#include <iosfwd>\n\n";
- new_contents_stream << "#include \"src/objects/struct-inl.h\"\n\n";
-
- new_contents_stream << "namespace v8 {\n";
- new_contents_stream << "namespace internal {\n\n";
-
- for (auto i : GlobalContext::GetClasses()) {
- ClassType* type = i.second;
- if (!type->ShouldGeneratePrint()) continue;
-
- new_contents_stream << "void " << type->name() << "::" << type->name()
- << "Print(std::ostream& os) {\n";
- new_contents_stream << " PrintHeader(os, \"" << type->name() << "\");\n";
- auto hierarchy = type->GetHierarchy();
- std::map<std::string, const AggregateType*> field_names;
- for (const AggregateType* aggregate_type : hierarchy) {
- for (const Field& f : aggregate_type->fields()) {
- if (f.name_and_type.name == "map") continue;
- new_contents_stream << " os << \"\\n - " << f.name_and_type.name
- << ": \" << "
- << "Brief(" << f.name_and_type.name << "());\n";
+ // Third, verify its properties.
+ for (auto f : type->fields()) {
+ GenerateClassFieldVerifier(name, *type, f, h_contents, cc_contents);
}
+
+ cc_contents << "}\n";
}
- new_contents_stream << " os << \"\\n\";\n";
- new_contents_stream << "}\n\n";
+
+ h_contents << "};\n";
}
+ WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
+ WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
+}
- new_contents_stream << "} // namespace internal\"\n";
- new_contents_stream << "} // namespace v8\"\n";
+void ImplementationVisitor::GenerateExportedMacrosAssembler(
+ const std::string& output_directory) {
+ std::string file_name = "exported-macros-assembler-tq";
+ std::stringstream h_contents;
+ std::stringstream cc_contents;
+ {
+ IncludeGuardScope include_guard(h_contents, file_name + ".h");
+
+ h_contents << "#include \"src/compiler/code-assembler.h\"\n";
+ h_contents << "#include \"src/execution/frames.h\"\n";
+ h_contents << "#include \"torque-generated/csa-types-tq.h\"\n";
+ cc_contents << "#include \"torque-generated/" << file_name << ".h\"\n";
+ for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
+ cc_contents << "#include \"torque-generated/builtins-" +
+ DashifyString(n->name()) + "-gen-tq.h\"\n";
+ }
+
+ NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
+ NamespaceScope cc_namespaces(cc_contents, {"v8", "internal"});
+
+ h_contents << "class V8_EXPORT_PRIVATE "
+ "TorqueGeneratedExportedMacrosAssembler {\n"
+ << " public:\n"
+ << " explicit TorqueGeneratedExportedMacrosAssembler"
+ "(compiler::CodeAssemblerState* state) : state_(state) {\n"
+ << " USE(state_);\n"
+ << " }\n";
+
+ for (auto& declarable : GlobalContext::AllDeclarables()) {
+ TorqueMacro* macro = TorqueMacro::DynamicCast(declarable.get());
+ if (!(macro && macro->IsExportedToCSA())) continue;
+
+ h_contents << " ";
+ GenerateFunctionDeclaration(h_contents, "", macro->ReadableName(),
+ macro->signature(), macro->parameter_names(),
+ false);
+ h_contents << ";\n";
+
+ std::vector<std::string> parameter_names = GenerateFunctionDeclaration(
+ cc_contents,
+ "TorqueGeneratedExportedMacrosAssembler::", macro->ReadableName(),
+ macro->signature(), macro->parameter_names(), false);
+ cc_contents << "{\n";
+ cc_contents << "return " << macro->ExternalName() << "(state_";
+ for (auto& name : parameter_names) {
+ cc_contents << ", " << name;
+ }
+ cc_contents << ");\n";
+ cc_contents << "}\n";
+ }
- new_contents_stream << "\n#endif // OBJECT_PRINT\n\n";
+ h_contents << " private:\n"
+ << " compiler::CodeAssemblerState* state_;\n"
+ << "};\n";
+ }
+ WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
+ WriteFile(output_directory + "/" + file_name + ".cc", cc_contents.str());
+}
- std::string new_contents(new_contents_stream.str());
- ReplaceFileContentsIfDifferent(file_name, new_contents);
+void ImplementationVisitor::GenerateCSATypes(
+ const std::string& output_directory) {
+ std::string file_name = "csa-types-tq";
+ std::stringstream h_contents;
+ {
+ IncludeGuardScope include_guard(h_contents, file_name + ".h");
+ h_contents << "#include \"src/compiler/code-assembler.h\"\n\n";
+
+ NamespaceScope h_namespaces(h_contents, {"v8", "internal"});
+
+ for (auto& declarable : GlobalContext::AllDeclarables()) {
+ TypeAlias* alias = TypeAlias::DynamicCast(declarable.get());
+ if (!alias || alias->IsRedeclaration()) continue;
+ const StructType* struct_type = StructType::DynamicCast(alias->type());
+ if (!struct_type) continue;
+ const std::string& name = struct_type->name();
+ h_contents << "struct TorqueStruct" << name << " {\n";
+ for (auto& field : struct_type->fields()) {
+ h_contents << " " << field.name_and_type.type->GetGeneratedTypeName();
+ h_contents << " " << field.name_and_type.name << ";\n";
+ }
+ h_contents << "\n std::tuple<";
+ bool first = true;
+ for (const Type* type : LowerType(struct_type)) {
+ if (!first) {
+ h_contents << ", ";
+ }
+ first = false;
+ h_contents << type->GetGeneratedTypeName();
+ }
+ h_contents << "> Flatten() const {\n"
+ << " return std::tuple_cat(";
+ first = true;
+ for (auto& field : struct_type->fields()) {
+ if (!first) {
+ h_contents << ", ";
+ }
+ first = false;
+ if (field.name_and_type.type->IsStructType()) {
+ h_contents << field.name_and_type.name << ".Flatten()";
+ } else {
+ h_contents << "std::make_tuple(" << field.name_and_type.name << ")";
+ }
+ }
+ h_contents << ");\n";
+ h_contents << " }\n";
+ h_contents << "};\n";
+ }
+ }
+ WriteFile(output_directory + "/" + file_name + ".h", h_contents.str());
}
} // namespace torque
diff --git a/deps/v8/src/torque/implementation-visitor.h b/deps/v8/src/torque/implementation-visitor.h
index d6930615a2..e79c768e5c 100644
--- a/deps/v8/src/torque/implementation-visitor.h
+++ b/deps/v8/src/torque/implementation-visitor.h
@@ -10,7 +10,6 @@
#include "src/base/macros.h"
#include "src/torque/ast.h"
#include "src/torque/cfg.h"
-#include "src/torque/file-visitor.h"
#include "src/torque/global-context.h"
#include "src/torque/types.h"
#include "src/torque/utils.h"
@@ -274,11 +273,15 @@ struct Arguments {
bool IsCompatibleSignature(const Signature& sig, const TypeVector& types,
size_t label_count);
-class ImplementationVisitor : public FileVisitor {
+class ImplementationVisitor {
public:
- void GenerateBuiltinDefinitions(std::string& file_name);
- void GenerateClassDefinitions(std::string& file_name);
- void GeneratePrintDefinitions(std::string& file_name);
+ void GenerateBuiltinDefinitions(const std::string& output_directory);
+ void GenerateClassFieldOffsets(const std::string& output_directory);
+ void GeneratePrintDefinitions(const std::string& output_directory);
+ void GenerateClassDefinitions(const std::string& output_directory);
+ void GenerateClassVerifiers(const std::string& output_directory);
+ void GenerateExportedMacrosAssembler(const std::string& output_directory);
+ void GenerateCSATypes(const std::string& output_directory);
VisitResult Visit(Expression* expr);
const Type* Visit(Statement* stmt);
@@ -326,7 +329,8 @@ class ImplementationVisitor : public FileVisitor {
const std::vector<VisitResult>& arguments,
const std::vector<Block*> label_blocks);
void VisitMacroCommon(Macro* macro);
- void Visit(Macro* macro);
+ void Visit(ExternMacro* macro) {}
+ void Visit(TorqueMacro* macro);
void Visit(Method* macro);
void Visit(Builtin* builtin);
void Visit(NamespaceConstant* decl);
@@ -387,6 +391,8 @@ class ImplementationVisitor : public FileVisitor {
LabelBindingsManager::Scope label_bindings_manager;
};
+ void SetDryRun(bool is_dry_run) { is_dry_run_ = is_dry_run; }
+
private:
base::Optional<Block*> GetCatchBlock();
void GenerateCatchBlock(base::Optional<Block*> catch_block);
@@ -526,7 +532,7 @@ class ImplementationVisitor : public FileVisitor {
void GenerateBranch(const VisitResult& condition, Block* true_block,
Block* false_block);
- typedef std::function<VisitResult()> VisitResultGenerator;
+ using VisitResultGenerator = std::function<VisitResult()>;
void GenerateExpressionBranch(VisitResultGenerator, Block* true_block,
Block* false_block);
void GenerateExpressionBranch(Expression* expression, Block* true_block,
@@ -535,11 +541,10 @@ class ImplementationVisitor : public FileVisitor {
void GenerateMacroFunctionDeclaration(std::ostream& o,
const std::string& macro_prefix,
Macro* macro);
- void GenerateFunctionDeclaration(std::ostream& o,
- const std::string& macro_prefix,
- const std::string& name,
- const Signature& signature,
- const NameVector& parameter_names);
+ std::vector<std::string> GenerateFunctionDeclaration(
+ std::ostream& o, const std::string& macro_prefix, const std::string& name,
+ const Signature& signature, const NameVector& parameter_names,
+ bool pass_code_assembler_state = true);
VisitResult GenerateImplicitConvert(const Type* destination_type,
VisitResult source);
@@ -548,7 +553,7 @@ class ImplementationVisitor : public FileVisitor {
base::Optional<StackRange> arguments = {});
std::vector<Binding<LocalLabel>*> LabelsFromIdentifiers(
- const std::vector<std::string>& names);
+ const std::vector<Identifier*>& names);
StackRange LowerParameter(const Type* type, const std::string& parameter_name,
Stack<std::string>* lowered_parameters);
@@ -592,8 +597,14 @@ class ImplementationVisitor : public FileVisitor {
return return_value;
}
+ void WriteFile(const std::string& file, const std::string& content) {
+ if (is_dry_run_) return;
+ ReplaceFileContentsIfDifferent(file, content);
+ }
+
base::Optional<CfgAssembler> assembler_;
NullOStream null_stream_;
+ bool is_dry_run_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/ls/json-parser.cc b/deps/v8/src/torque/ls/json-parser.cc
index 6a70691bd0..708b75fd4c 100644
--- a/deps/v8/src/torque/ls/json-parser.cc
+++ b/deps/v8/src/torque/ls/json-parser.cc
@@ -184,13 +184,15 @@ JsonParserResult ParseJson(const std::string& input) {
// Torque needs a CurrentSourceFile scope during parsing.
// As JSON lives in memory only, a unknown file scope is created.
SourceFileMap::Scope source_map_scope;
+ TorqueMessages::Scope messages_scope;
CurrentSourceFile::Scope unkown_file(SourceFileMap::AddSource("<json>"));
JsonParserResult result;
try {
result.value = (*JsonGrammar().Parse(input)).Cast<JsonValue>();
- } catch (TorqueError& error) {
- result.error = error;
+ } catch (TorqueAbortCompilation&) {
+ CHECK(!TorqueMessages::Get().empty());
+ result.error = TorqueMessages::Get().front();
}
return result;
}
diff --git a/deps/v8/src/torque/ls/json-parser.h b/deps/v8/src/torque/ls/json-parser.h
index e132375670..1c5fd3f147 100644
--- a/deps/v8/src/torque/ls/json-parser.h
+++ b/deps/v8/src/torque/ls/json-parser.h
@@ -17,7 +17,7 @@ namespace ls {
struct JsonParserResult {
JsonValue value;
- base::Optional<TorqueError> error;
+ base::Optional<TorqueMessage> error;
};
V8_EXPORT_PRIVATE JsonParserResult ParseJson(const std::string& input);
diff --git a/deps/v8/src/torque/ls/message-handler.cc b/deps/v8/src/torque/ls/message-handler.cc
index 715fd9d34f..d55c11af56 100644
--- a/deps/v8/src/torque/ls/message-handler.cc
+++ b/deps/v8/src/torque/ls/message-handler.cc
@@ -19,6 +19,7 @@ namespace torque {
DEFINE_CONTEXTUAL_VARIABLE(Logger)
DEFINE_CONTEXTUAL_VARIABLE(TorqueFileList)
+DEFINE_CONTEXTUAL_VARIABLE(DiagnosticsFiles)
namespace ls {
@@ -63,21 +64,131 @@ void WriteMessage(JsonValue& message) {
namespace {
-void RecompileTorque() {
+void ResetCompilationErrorDiagnostics(MessageWriter writer) {
+ for (const SourceId& source : DiagnosticsFiles::Get()) {
+ PublishDiagnosticsNotification notification;
+ notification.set_method("textDocument/publishDiagnostics");
+
+ std::string error_file = SourceFileMap::GetSource(source);
+ notification.params().set_uri(error_file);
+ // Trigger empty array creation.
+ USE(notification.params().diagnostics_size());
+
+ writer(notification.GetJsonValue());
+ }
+ DiagnosticsFiles::Get() = {};
+}
+
+// Each notification must contain all diagnostics for a specific file,
+// because sending multiple notifications per file resets previously sent
+// diagnostics. Thus, two steps are needed:
+// 1) collect all notifications in this class.
+// 2) send one notification per entry (per file).
+class DiagnosticCollector {
+ public:
+ void AddTorqueMessage(const TorqueMessage& message) {
+ SourceId id =
+ message.position ? message.position->source : SourceId::Invalid();
+ auto& notification = GetOrCreateNotificationForSource(id);
+
+ Diagnostic diagnostic = notification.params().add_diagnostics();
+ diagnostic.set_severity(ServerityFor(message.kind));
+ diagnostic.set_message(message.message);
+ diagnostic.set_source("Torque Compiler");
+
+ if (message.position) {
+ PopulateRangeFromSourcePosition(diagnostic.range(), *message.position);
+ }
+ }
+
+ std::map<SourceId, PublishDiagnosticsNotification>& notifications() {
+ return notifications_;
+ }
+
+ private:
+ PublishDiagnosticsNotification& GetOrCreateNotificationForSource(
+ SourceId id) {
+ auto iter = notifications_.find(id);
+ if (iter != notifications_.end()) return iter->second;
+
+ PublishDiagnosticsNotification& notification = notifications_[id];
+ notification.set_method("textDocument/publishDiagnostics");
+
+ std::string file =
+ id.IsValid() ? SourceFileMap::GetSource(id) : "<unknown>";
+ notification.params().set_uri(file);
+ return notification;
+ }
+
+ void PopulateRangeFromSourcePosition(Range range,
+ const SourcePosition& position) {
+ range.start().set_line(position.start.line);
+ range.start().set_character(position.start.column);
+ range.end().set_line(position.end.line);
+ range.end().set_character(position.end.column);
+ }
+
+ Diagnostic::DiagnosticSeverity ServerityFor(TorqueMessage::Kind kind) {
+ switch (kind) {
+ case TorqueMessage::Kind::kError:
+ return Diagnostic::kError;
+ case TorqueMessage::Kind::kLint:
+ return Diagnostic::kWarning;
+ }
+ }
+
+ std::map<SourceId, PublishDiagnosticsNotification> notifications_;
+};
+
+void SendCompilationDiagnostics(const TorqueCompilerResult& result,
+ MessageWriter writer) {
+ DiagnosticCollector collector;
+
+ // TODO(szuend): Split up messages by SourceId and sort them by line number.
+ for (const TorqueMessage& message : result.messages) {
+ collector.AddTorqueMessage(message);
+ }
+
+ for (auto& pair : collector.notifications()) {
+ PublishDiagnosticsNotification& notification = pair.second;
+ writer(notification.GetJsonValue());
+
+ // Record all source files for which notifications are sent, so they
+ // can be reset before the next compiler run.
+ const SourceId& source = pair.first;
+ if (source.IsValid()) DiagnosticsFiles::Get().push_back(source);
+ }
+}
+
+} // namespace
+
+void CompilationFinished(TorqueCompilerResult result, MessageWriter writer) {
+ LanguageServerData::Get() = std::move(result.language_server_data);
+ SourceFileMap::Get() = result.source_file_map;
+
+ SendCompilationDiagnostics(result, writer);
+}
+
+namespace {
+
+void RecompileTorque(MessageWriter writer) {
Logger::Log("[info] Start compilation run ...\n");
TorqueCompilerOptions options;
options.output_directory = "";
- options.verbose = false;
options.collect_language_server_data = true;
- options.abort_on_lint_errors = false;
+ options.force_assert_statements = true;
TorqueCompilerResult result = CompileTorque(TorqueFileList::Get(), options);
- LanguageServerData::Get() = result.language_server_data;
- SourceFileMap::Get() = result.source_file_map;
-
Logger::Log("[info] Finished compilation run ...\n");
+
+ CompilationFinished(std::move(result), writer);
+}
+
+void RecompileTorqueWithDiagnostics(MessageWriter writer) {
+ ResetCompilationErrorDiagnostics(writer);
+ RecompileTorque(writer);
}
void HandleInitializeRequest(InitializeRequest request, MessageWriter writer) {
@@ -85,6 +196,7 @@ void HandleInitializeRequest(InitializeRequest request, MessageWriter writer) {
response.set_id(request.id());
response.result().capabilities().textDocumentSync();
response.result().capabilities().set_definitionProvider(true);
+ response.result().capabilities().set_documentSymbolProvider(true);
// TODO(szuend): Register for document synchronisation here,
// so we work with the content that the client
@@ -115,7 +227,8 @@ void HandleInitializedNotification(MessageWriter writer) {
writer(request.GetJsonValue());
}
-void HandleTorqueFileListNotification(TorqueFileListNotification notification) {
+void HandleTorqueFileListNotification(TorqueFileListNotification notification,
+ MessageWriter writer) {
CHECK_EQ(notification.params().object()["files"].tag, JsonValue::ARRAY);
std::vector<std::string>& files = TorqueFileList::Get();
@@ -130,22 +243,7 @@ void HandleTorqueFileListNotification(TorqueFileListNotification notification) {
files.push_back(file_json.ToString());
Logger::Log(" ", file_json.ToString(), "\n");
}
-
- // The Torque compiler expects to see some files first,
- // we need to order them in the correct way.
- // TODO(szuend): Remove this, once the compiler doesn't require the input
- // files to be in a specific order.
- std::vector<std::string> sort_to_front = {"base.tq", "frames.tq",
- "arguments.tq", "array.tq"};
- std::sort(files.begin(), files.end(), [&](std::string a, std::string b) {
- for (const std::string& fixed_file : sort_to_front) {
- if (a.find(fixed_file) != std::string::npos) return true;
- if (b.find(fixed_file) != std::string::npos) return false;
- }
- return a < b;
- });
-
- RecompileTorque();
+ RecompileTorqueWithDiagnostics(writer);
}
void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
@@ -169,15 +267,7 @@ void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
if (auto maybe_definition = LanguageServerData::FindDefinition(id, pos)) {
SourcePosition definition = *maybe_definition;
-
- std::string definition_file = SourceFileMap::GetSource(definition.source);
- response.result().set_uri(definition_file);
-
- Range range = response.result().range();
- range.start().set_line(definition.start.line);
- range.start().set_character(definition.start.column);
- range.end().set_line(definition.end.line);
- range.end().set_character(definition.end.column);
+ response.result().SetTo(definition);
} else {
response.SetNull("result");
}
@@ -186,10 +276,56 @@ void HandleGotoDefinitionRequest(GotoDefinitionRequest request,
}
void HandleChangeWatchedFilesNotification(
- DidChangeWatchedFilesNotification notification) {
+ DidChangeWatchedFilesNotification notification, MessageWriter writer) {
// TODO(szuend): Implement updates to the TorqueFile list when create/delete
// notifications are received. Currently we simply re-compile.
- RecompileTorque();
+ RecompileTorqueWithDiagnostics(writer);
+}
+
+void HandleDocumentSymbolRequest(DocumentSymbolRequest request,
+ MessageWriter writer) {
+ DocumentSymbolResponse response;
+ response.set_id(request.id());
+
+ SourceId id =
+ SourceFileMap::GetSourceId(request.params().textDocument().uri());
+
+ for (const auto& symbol : LanguageServerData::SymbolsForSourceId(id)) {
+ DCHECK(symbol->IsUserDefined());
+ if (symbol->IsMacro()) {
+ Macro* macro = Macro::cast(symbol);
+ SymbolInformation symbol = response.add_result();
+ symbol.set_name(macro->ReadableName());
+ symbol.set_kind(SymbolKind::kFunction);
+ symbol.location().SetTo(macro->Position());
+ } else if (symbol->IsBuiltin()) {
+ Builtin* builtin = Builtin::cast(symbol);
+ SymbolInformation symbol = response.add_result();
+ symbol.set_name(builtin->ReadableName());
+ symbol.set_kind(SymbolKind::kFunction);
+ symbol.location().SetTo(builtin->Position());
+ } else if (symbol->IsGeneric()) {
+ Generic* generic = Generic::cast(symbol);
+ SymbolInformation symbol = response.add_result();
+ symbol.set_name(generic->name());
+ symbol.set_kind(SymbolKind::kFunction);
+ symbol.location().SetTo(generic->Position());
+ } else if (symbol->IsTypeAlias()) {
+ const Type* type = TypeAlias::cast(symbol)->type();
+ SymbolKind kind =
+ type->IsClassType() ? SymbolKind::kClass : SymbolKind::kStruct;
+
+ SymbolInformation sym = response.add_result();
+ sym.set_name(type->ToString());
+ sym.set_kind(kind);
+ sym.location().SetTo(symbol->Position());
+ }
+ }
+
+ // Trigger empty array creation in case no symbols were found.
+ USE(response.result_size());
+
+ writer(response.GetJsonValue());
}
} // namespace
@@ -213,13 +349,16 @@ void HandleMessage(JsonValue& raw_message, MessageWriter writer) {
HandleInitializedNotification(writer);
} else if (method == "torque/fileList") {
HandleTorqueFileListNotification(
- TorqueFileListNotification(request.GetJsonValue()));
+ TorqueFileListNotification(request.GetJsonValue()), writer);
} else if (method == "textDocument/definition") {
HandleGotoDefinitionRequest(GotoDefinitionRequest(request.GetJsonValue()),
writer);
} else if (method == "workspace/didChangeWatchedFiles") {
HandleChangeWatchedFilesNotification(
- DidChangeWatchedFilesNotification(request.GetJsonValue()));
+ DidChangeWatchedFilesNotification(request.GetJsonValue()), writer);
+ } else if (method == "textDocument/documentSymbol") {
+ HandleDocumentSymbolRequest(DocumentSymbolRequest(request.GetJsonValue()),
+ writer);
} else {
Logger::Log("[error] Message of type ", method, " is not handled!\n\n");
}
diff --git a/deps/v8/src/torque/ls/message-handler.h b/deps/v8/src/torque/ls/message-handler.h
index 2f0f83f1b4..3be5cf03e4 100644
--- a/deps/v8/src/torque/ls/message-handler.h
+++ b/deps/v8/src/torque/ls/message-handler.h
@@ -7,10 +7,19 @@
#include "src/base/macros.h"
#include "src/torque/ls/json.h"
+#include "src/torque/source-positions.h"
+#include "src/torque/torque-compiler.h"
namespace v8 {
namespace internal {
namespace torque {
+
+// A list of source Ids for which the LS provided diagnostic information
+// after the last compile. The LS is responsible for syncing diagnostic
+// information with the client. Before updated information can be sent,
+// old diagnostic messages have to be reset.
+DECLARE_CONTEXTUAL_VARIABLE(DiagnosticsFiles, std::vector<SourceId>);
+
namespace ls {
// The message handler might send responses or follow up requests.
@@ -19,6 +28,10 @@ using MessageWriter = void (*)(JsonValue& message);
V8_EXPORT_PRIVATE void HandleMessage(JsonValue& raw_message, MessageWriter);
+// Called when a compilation run finishes. Exposed for testability.
+V8_EXPORT_PRIVATE void CompilationFinished(TorqueCompilerResult result,
+ MessageWriter);
+
} // namespace ls
} // namespace torque
} // namespace internal
diff --git a/deps/v8/src/torque/ls/message.h b/deps/v8/src/torque/ls/message.h
index 65c7ce1b9e..4389e9265d 100644
--- a/deps/v8/src/torque/ls/message.h
+++ b/deps/v8/src/torque/ls/message.h
@@ -8,6 +8,7 @@
#include "src/base/logging.h"
#include "src/torque/ls/json.h"
#include "src/torque/ls/message-macros.h"
+#include "src/torque/source-positions.h"
namespace v8 {
namespace internal {
@@ -190,6 +191,7 @@ class ServerCapabilities : public NestedJsonAccessor {
JSON_OBJECT_ACCESSORS(TextDocumentSyncOptions, textDocumentSync)
JSON_BOOL_ACCESSORS(definitionProvider)
+ JSON_BOOL_ACCESSORS(documentSymbolProvider)
};
class InitializeResult : public NestedJsonAccessor {
@@ -237,6 +239,14 @@ class Location : public NestedJsonAccessor {
JSON_STRING_ACCESSORS(uri)
JSON_OBJECT_ACCESSORS(Range, range)
+
+ void SetTo(SourcePosition position) {
+ set_uri(SourceFileMap::GetSource(position.source));
+ range().start().set_line(position.start.line);
+ range().start().set_character(position.start.column);
+ range().end().set_line(position.end.line);
+ range().end().set_character(position.end.column);
+ }
};
class TextDocumentIdentifier : public NestedJsonAccessor {
@@ -254,6 +264,62 @@ class TextDocumentPositionParams : public NestedJsonAccessor {
JSON_OBJECT_ACCESSORS(JsonPosition, position)
};
+class Diagnostic : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ enum DiagnosticSeverity {
+ kError = 1,
+ kWarning = 2,
+ kInformation = 3,
+ kHint = 4
+ };
+
+ JSON_OBJECT_ACCESSORS(Range, range)
+ JSON_INT_ACCESSORS(severity)
+ JSON_STRING_ACCESSORS(source)
+ JSON_STRING_ACCESSORS(message)
+};
+
+class PublishDiagnosticsParams : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_STRING_ACCESSORS(uri)
+ JSON_ARRAY_OBJECT_ACCESSORS(Diagnostic, diagnostics)
+};
+
+enum SymbolKind {
+ kFile = 1,
+ kNamespace = 3,
+ kClass = 5,
+ kMethod = 6,
+ kProperty = 7,
+ kField = 8,
+ kConstructor = 9,
+ kFunction = 12,
+ kVariable = 13,
+ kConstant = 14,
+ kStruct = 23,
+};
+
+class DocumentSymbolParams : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_OBJECT_ACCESSORS(TextDocumentIdentifier, textDocument)
+};
+
+class SymbolInformation : public NestedJsonAccessor {
+ public:
+ using NestedJsonAccessor::NestedJsonAccessor;
+
+ JSON_STRING_ACCESSORS(name)
+ JSON_INT_ACCESSORS(kind)
+ JSON_OBJECT_ACCESSORS(Location, location)
+ JSON_STRING_ACCESSORS(containerName)
+};
+
template <class T>
class Request : public Message {
public:
@@ -269,6 +335,8 @@ using RegistrationRequest = Request<RegistrationParams>;
using TorqueFileListNotification = Request<FileListParams>;
using GotoDefinitionRequest = Request<TextDocumentPositionParams>;
using DidChangeWatchedFilesNotification = Request<DidChangeWatchedFilesParams>;
+using PublishDiagnosticsNotification = Request<PublishDiagnosticsParams>;
+using DocumentSymbolRequest = Request<DocumentSymbolParams>;
template <class T>
class Response : public Message {
@@ -283,6 +351,19 @@ class Response : public Message {
using InitializeResponse = Response<InitializeResult>;
using GotoDefinitionResponse = Response<Location>;
+// Same as "Response" but the result is T[] instead of T.
+template <class T>
+class ResponseArrayResult : public Message {
+ public:
+ explicit ResponseArrayResult(JsonValue& value) : Message(value) {}
+ ResponseArrayResult() : Message() {}
+
+ JSON_INT_ACCESSORS(id)
+ JSON_OBJECT_ACCESSORS(ResponseError, error)
+ JSON_ARRAY_OBJECT_ACCESSORS(T, result)
+};
+using DocumentSymbolResponse = ResponseArrayResult<SymbolInformation>;
+
} // namespace ls
} // namespace torque
} // namespace internal
diff --git a/deps/v8/src/torque/ls/torque-language-server.cc b/deps/v8/src/torque/ls/torque-language-server.cc
index e8b16f641c..4cf0b4c9fb 100644
--- a/deps/v8/src/torque/ls/torque-language-server.cc
+++ b/deps/v8/src/torque/ls/torque-language-server.cc
@@ -22,6 +22,7 @@ int WrappedMain(int argc, const char** argv) {
TorqueFileList::Scope files_scope;
LanguageServerData::Scope server_data_scope;
SourceFileMap::Scope source_file_map_scope;
+ DiagnosticsFiles::Scope diagnostics_files_scope;
for (int i = 1; i < argc; ++i) {
if (!strcmp("-l", argv[i])) {
diff --git a/deps/v8/src/torque/server-data.cc b/deps/v8/src/torque/server-data.cc
index 2dc92a4960..2911a2b4cd 100644
--- a/deps/v8/src/torque/server-data.cc
+++ b/deps/v8/src/torque/server-data.cc
@@ -4,6 +4,9 @@
#include "src/torque/server-data.h"
+#include "src/torque/declarable.h"
+#include "src/torque/implementation-visitor.h"
+
namespace v8 {
namespace internal {
namespace torque {
@@ -17,7 +20,12 @@ void LanguageServerData::AddDefinition(SourcePosition token,
base::Optional<SourcePosition> LanguageServerData::FindDefinition(
SourceId source, LineAndColumn pos) {
- for (const DefinitionMapping& mapping : Get().definitions_map_.at(source)) {
+ if (!source.IsValid()) return base::nullopt;
+
+ auto iter = Get().definitions_map_.find(source);
+ if (iter == Get().definitions_map_.end()) return base::nullopt;
+
+ for (const DefinitionMapping& mapping : iter->second) {
SourcePosition current = mapping.first;
if (current.Contains(pos)) return mapping.second;
}
@@ -25,6 +33,20 @@ base::Optional<SourcePosition> LanguageServerData::FindDefinition(
return base::nullopt;
}
+void LanguageServerData::PrepareAllDeclarableSymbols() {
+ const std::vector<std::unique_ptr<Declarable>>& all_declarables =
+ global_context_->declarables_;
+
+ for (const auto& declarable : all_declarables) {
+ // Class field accessors and implicit specializations are
+ // auto-generated and should not show up.
+ if (!declarable->IsUserDefined()) continue;
+
+ SourceId source = declarable->Position().source;
+ symbols_map_[source].push_back(declarable.get());
+ }
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/server-data.h b/deps/v8/src/torque/server-data.h
index 1377fae3d9..ebaafb2fd0 100644
--- a/deps/v8/src/torque/server-data.h
+++ b/deps/v8/src/torque/server-data.h
@@ -10,6 +10,8 @@
#include "src/base/macros.h"
#include "src/base/optional.h"
+#include "src/torque/declarable.h"
+#include "src/torque/global-context.h"
#include "src/torque/source-positions.h"
namespace v8 {
@@ -22,6 +24,13 @@ using DefinitionMapping = std::pair<SourcePosition, SourcePosition>;
using Definitions = std::vector<DefinitionMapping>;
using DefinitionsMap = std::map<SourceId, Definitions>;
+// Symbols are used to answer search queries (either workspace or document
+// scope). For now, declarables are stored directly without converting them
+// into a custom format. Symbols are grouped by sourceId to implement document
+// scoped searches.
+using Symbols = std::vector<Declarable*>;
+using SymbolsMap = std::map<SourceId, Symbols>;
+
// This contextual class holds all the necessary data to answer incoming
// LSP requests. It is reset for each compilation step and all information
// is calculated eagerly during compilation.
@@ -35,8 +44,28 @@ class LanguageServerData : public ContextualClass<LanguageServerData> {
V8_EXPORT_PRIVATE static base::Optional<SourcePosition> FindDefinition(
SourceId source, LineAndColumn pos);
+ static void SetGlobalContext(GlobalContext global_context) {
+ Get().global_context_ =
+ base::make_unique<GlobalContext>(std::move(global_context));
+ Get().PrepareAllDeclarableSymbols();
+ }
+
+ static void SetTypeOracle(TypeOracle type_oracle) {
+ Get().type_oracle_ = base::make_unique<TypeOracle>(std::move(type_oracle));
+ }
+
+ static const Symbols& SymbolsForSourceId(SourceId id) {
+ return Get().symbols_map_[id];
+ }
+
private:
+ // Splits all declarables up by SourceId and filters out auto-generated ones.
+ void PrepareAllDeclarableSymbols();
+
DefinitionsMap definitions_map_;
+ SymbolsMap symbols_map_;
+ std::unique_ptr<GlobalContext> global_context_;
+ std::unique_ptr<TypeOracle> type_oracle_;
};
} // namespace torque
diff --git a/deps/v8/src/torque/torque-compiler.cc b/deps/v8/src/torque/torque-compiler.cc
index 9bcff411ed..d761b3ab53 100644
--- a/deps/v8/src/torque/torque-compiler.cc
+++ b/deps/v8/src/torque/torque-compiler.cc
@@ -39,7 +39,7 @@ void ReadAndParseTorqueFile(const std::string& path) {
}
if (!maybe_content) {
- ReportErrorWithoutPosition("Cannot open file path/uri: ", path);
+ Error("Cannot open file path/uri: ", path).Throw();
}
ParseTorque(*maybe_content);
@@ -47,58 +47,54 @@ void ReadAndParseTorqueFile(const std::string& path) {
void CompileCurrentAst(TorqueCompilerOptions options) {
GlobalContext::Scope global_context(std::move(CurrentAst::Get()));
- if (options.verbose) GlobalContext::SetVerbose();
if (options.collect_language_server_data) {
GlobalContext::SetCollectLanguageServerData();
}
+ if (options.force_assert_statements) {
+ GlobalContext::SetForceAssertStatements();
+ }
TypeOracle::Scope type_oracle;
- DeclarationVisitor declaration_visitor;
+ // Two-step process of predeclaration + resolution allows to resolve type
+ // declarations independent of the order they are given.
+ PredeclarationVisitor::Predeclare(GlobalContext::Get().ast());
+ PredeclarationVisitor::ResolvePredeclarations();
+
+ // Process other declarations.
+ DeclarationVisitor::Visit(GlobalContext::Get().ast());
- declaration_visitor.Visit(GlobalContext::Get().ast());
- declaration_visitor.FinalizeStructsAndClasses();
+ // A class types' fields are resolved here, which allows two class fields to
+ // mutually refer to each others.
+ TypeOracle::FinalizeClassTypes();
+
+ std::string output_directory = options.output_directory;
ImplementationVisitor implementation_visitor;
+ implementation_visitor.SetDryRun(output_directory.length() == 0);
+
for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
implementation_visitor.BeginNamespaceFile(n);
}
implementation_visitor.VisitAllDeclarables();
- std::string output_directory = options.output_directory;
- if (output_directory.length() != 0) {
- std::string output_header_path = output_directory;
- output_header_path += "/builtin-definitions-from-dsl.h";
- implementation_visitor.GenerateBuiltinDefinitions(output_header_path);
-
- output_header_path = output_directory + "/class-definitions-from-dsl.h";
- implementation_visitor.GenerateClassDefinitions(output_header_path);
-
- std::string output_source_path =
- output_directory + "/objects-printer-from-dsl.cc";
- implementation_visitor.GeneratePrintDefinitions(output_source_path);
+ implementation_visitor.GenerateBuiltinDefinitions(output_directory);
+ implementation_visitor.GenerateClassFieldOffsets(output_directory);
+ implementation_visitor.GeneratePrintDefinitions(output_directory);
+ implementation_visitor.GenerateClassDefinitions(output_directory);
+ implementation_visitor.GenerateClassVerifiers(output_directory);
+ implementation_visitor.GenerateExportedMacrosAssembler(output_directory);
+ implementation_visitor.GenerateCSATypes(output_directory);
- for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
- implementation_visitor.EndNamespaceFile(n);
- implementation_visitor.GenerateImplementation(output_directory, n);
- }
+ for (Namespace* n : GlobalContext::Get().GetNamespaces()) {
+ implementation_visitor.EndNamespaceFile(n);
+ implementation_visitor.GenerateImplementation(output_directory, n);
}
- if (LintErrorStatus::HasLintErrors()) std::abort();
-}
-
-TorqueCompilerResult CollectResultFromContextuals() {
- TorqueCompilerResult result;
- result.source_file_map = SourceFileMap::Get();
- result.language_server_data = LanguageServerData::Get();
- return result;
-}
-
-TorqueCompilerResult ResultFromError(TorqueError& error) {
- TorqueCompilerResult result;
- result.source_file_map = SourceFileMap::Get();
- result.error = error;
- return result;
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::SetGlobalContext(std::move(GlobalContext::Get()));
+ LanguageServerData::SetTypeOracle(std::move(TypeOracle::Get()));
+ }
}
} // namespace
@@ -108,17 +104,23 @@ TorqueCompilerResult CompileTorque(const std::string& source,
SourceFileMap::Scope source_map_scope;
CurrentSourceFile::Scope no_file_scope(SourceFileMap::AddSource("<torque>"));
CurrentAst::Scope ast_scope;
- LintErrorStatus::Scope lint_error_status_scope;
+ TorqueMessages::Scope messages_scope;
LanguageServerData::Scope server_data_scope;
+ TorqueCompilerResult result;
try {
ParseTorque(source);
CompileCurrentAst(options);
- } catch (TorqueError& error) {
- return ResultFromError(error);
+ } catch (TorqueAbortCompilation&) {
+ // Do nothing. The relevant TorqueMessage is part of the
+ // TorqueMessages contextual.
}
- return CollectResultFromContextuals();
+ result.source_file_map = SourceFileMap::Get();
+ result.language_server_data = std::move(LanguageServerData::Get());
+ result.messages = std::move(TorqueMessages::Get());
+
+ return result;
}
TorqueCompilerResult CompileTorque(std::vector<std::string> files,
@@ -126,16 +128,23 @@ TorqueCompilerResult CompileTorque(std::vector<std::string> files,
SourceFileMap::Scope source_map_scope;
CurrentSourceFile::Scope unknown_source_file_scope(SourceId::Invalid());
CurrentAst::Scope ast_scope;
- LintErrorStatus::Scope lint_error_status_scope;
+ TorqueMessages::Scope messages_scope;
LanguageServerData::Scope server_data_scope;
+ TorqueCompilerResult result;
try {
for (const auto& path : files) ReadAndParseTorqueFile(path);
CompileCurrentAst(options);
- } catch (TorqueError& error) {
- return ResultFromError(error);
+ } catch (TorqueAbortCompilation&) {
+ // Do nothing. The relevant TorqueMessage is part of the
+ // TorqueMessages contextual.
}
- return CollectResultFromContextuals();
+
+ result.source_file_map = SourceFileMap::Get();
+ result.language_server_data = std::move(LanguageServerData::Get());
+ result.messages = std::move(TorqueMessages::Get());
+
+ return result;
}
} // namespace torque
diff --git a/deps/v8/src/torque/torque-compiler.h b/deps/v8/src/torque/torque-compiler.h
index a97df1906f..8e412d1be0 100644
--- a/deps/v8/src/torque/torque-compiler.h
+++ b/deps/v8/src/torque/torque-compiler.h
@@ -16,10 +16,13 @@ namespace internal {
namespace torque {
struct TorqueCompilerOptions {
- std::string output_directory;
- bool verbose;
- bool collect_language_server_data;
- bool abort_on_lint_errors;
+ std::string output_directory = "";
+ bool collect_language_server_data = false;
+
+ // assert(...) are only generated for debug builds. The provide
+ // language server support for statements inside asserts, this flag
+ // can force generate them.
+ bool force_assert_statements = false;
};
struct TorqueCompilerResult {
@@ -32,9 +35,8 @@ struct TorqueCompilerResult {
// Set the corresponding options flag to enable.
LanguageServerData language_server_data;
- // If any error occurred during either parsing or compilation,
- // this field will be set.
- base::Optional<TorqueError> error;
+ // Errors collected during compilation.
+ std::vector<TorqueMessage> messages;
};
V8_EXPORT_PRIVATE TorqueCompilerResult
diff --git a/deps/v8/src/torque/torque-parser.cc b/deps/v8/src/torque/torque-parser.cc
index dd8bfe2d49..619096c6a5 100644
--- a/deps/v8/src/torque/torque-parser.cc
+++ b/deps/v8/src/torque/torque-parser.cc
@@ -2,8 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <algorithm>
#include <cctype>
+#include <set>
+#include <unordered_map>
+#include "src/common/globals.h"
+#include "src/torque/constants.h"
#include "src/torque/earley-parser.h"
#include "src/torque/torque-parser.h"
#include "src/torque/utils.h"
@@ -29,6 +34,29 @@ struct TypeswitchCase {
Statement* block;
};
+class BuildFlags : public ContextualClass<BuildFlags> {
+ public:
+ BuildFlags() {
+ build_flags_["V8_SFI_HAS_UNIQUE_ID"] = V8_SFI_HAS_UNIQUE_ID;
+ build_flags_["TAGGED_SIZE_8_BYTES"] = TAGGED_SIZE_8_BYTES;
+ build_flags_["V8_DOUBLE_FIELDS_UNBOXING"] = V8_DOUBLE_FIELDS_UNBOXING;
+ build_flags_["TRUE_FOR_TESTING"] = true;
+ build_flags_["FALSE_FOR_TESTING"] = false;
+ }
+ static bool GetFlag(const std::string& name, const char* production) {
+ auto it = Get().build_flags_.find(name);
+ if (it == Get().build_flags_.end()) {
+ ReportError("Unknown flag used in ", production, ": ", name,
+ ". Please add it to the list in BuildFlags.");
+ }
+ return it->second;
+ }
+
+ private:
+ std::unordered_map<std::string, bool> build_flags_;
+};
+DEFINE_CONTEXTUAL_VARIABLE(BuildFlags)
+
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId ParseResultHolder<std::string>::id =
ParseResultTypeId::kStdString;
@@ -80,6 +108,14 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultTypeId::kNameAndExpression;
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<ConditionalAnnotation>::id =
+ ParseResultTypeId::kConditionalAnnotation;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<base::Optional<ConditionalAnnotation>>::id =
+ ParseResultTypeId::kOptionalConditionalAnnotation;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<ClassFieldExpression>::id =
ParseResultTypeId::kClassFieldExpression;
template <>
@@ -120,6 +156,10 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultTypeId::kStdVectorOfDeclarationPtr;
template <>
V8_EXPORT_PRIVATE const ParseResultTypeId
+ ParseResultHolder<std::vector<std::vector<Declaration*>>>::id =
+ ParseResultTypeId::kStdVectorOfStdVectorOfDeclarationPtr;
+template <>
+V8_EXPORT_PRIVATE const ParseResultTypeId
ParseResultHolder<std::vector<Expression*>>::id =
ParseResultTypeId::kStdVectorOfExpressionPtr;
template <>
@@ -177,10 +217,12 @@ V8_EXPORT_PRIVATE const ParseResultTypeId
namespace {
-base::Optional<ParseResult> AddGlobalDeclaration(
+base::Optional<ParseResult> AddGlobalDeclarations(
ParseResultIterator* child_results) {
- auto declaration = child_results->NextAs<Declaration*>();
- CurrentAst::Get().declarations().push_back(declaration);
+ auto declarations = child_results->NextAs<std::vector<Declaration*>>();
+ for (Declaration* declaration : declarations) {
+ CurrentAst::Get().declarations().push_back(declaration);
+ }
return base::nullopt;
}
@@ -193,11 +235,21 @@ void LintGenericParameters(const GenericParameters& parameters) {
}
}
+base::Optional<ParseResult> ConcatList(ParseResultIterator* child_results) {
+ auto list_of_lists =
+ child_results->NextAs<std::vector<std::vector<Declaration*>>>();
+ std::vector<Declaration*> result;
+ for (auto& list : list_of_lists) {
+ result.insert(result.end(), list.begin(), list.end());
+ }
+ return ParseResult{result};
+}
+
void CheckNotDeferredStatement(Statement* statement) {
CurrentSourcePosition::Scope source_position(statement->pos);
if (BlockStatement* block = BlockStatement::DynamicCast(statement)) {
if (block->deferred) {
- LintError(
+ Lint(
"cannot use deferred with a statement block here, it will have no "
"effect");
}
@@ -208,7 +260,7 @@ Expression* MakeCall(IdentifierExpression* callee,
base::Optional<Expression*> target,
std::vector<Expression*> arguments,
const std::vector<Statement*>& otherwise) {
- std::vector<std::string> labels;
+ std::vector<Identifier*> labels;
// All IdentifierExpressions are treated as label names and can be directly
// used as labels identifiers. All other statements in a call's otherwise
@@ -221,14 +273,16 @@ Expression* MakeCall(IdentifierExpression* callee,
if (id->generic_arguments.size() != 0) {
ReportError("An otherwise label cannot have generic parameters");
}
- labels.push_back(id->name->value);
+ labels.push_back(id->name);
continue;
}
}
auto label_name = std::string("_label") + std::to_string(label_id++);
- labels.push_back(label_name);
+ auto label_id = MakeNode<Identifier>(label_name);
+ label_id->pos = SourcePosition::Invalid();
+ labels.push_back(label_id);
auto* label_block =
- MakeNode<LabelBlock>(label_name, ParameterList::Empty(), statement);
+ MakeNode<LabelBlock>(label_id, ParameterList::Empty(), statement);
temp_labels.push_back(label_block);
}
@@ -247,12 +301,11 @@ Expression* MakeCall(IdentifierExpression* callee,
return result;
}
-Expression* MakeCall(const std::string& callee,
+Expression* MakeCall(Identifier* callee,
const std::vector<TypeExpression*>& generic_arguments,
const std::vector<Expression*>& arguments,
const std::vector<Statement*>& otherwise) {
- return MakeCall(MakeNode<IdentifierExpression>(MakeNode<Identifier>(callee),
- generic_arguments),
+ return MakeCall(MakeNode<IdentifierExpression>(callee, generic_arguments),
base::nullopt, arguments, otherwise);
}
@@ -285,7 +338,7 @@ base::Optional<ParseResult> MakeNewExpression(
base::Optional<ParseResult> MakeBinaryOperator(
ParseResultIterator* child_results) {
auto left = child_results->NextAs<Expression*>();
- auto op = child_results->NextAs<std::string>();
+ auto op = child_results->NextAs<Identifier*>();
auto right = child_results->NextAs<Expression*>();
return ParseResult{MakeCall(op, TypeList{},
std::vector<Expression*>{left, right},
@@ -305,7 +358,7 @@ base::Optional<ParseResult> MakeIntrinsicCallExpression(
base::Optional<ParseResult> MakeUnaryOperator(
ParseResultIterator* child_results) {
- auto op = child_results->NextAs<std::string>();
+ auto op = child_results->NextAs<Identifier*>();
auto e = child_results->NextAs<Expression*>();
return ParseResult{MakeCall(op, TypeList{}, std::vector<Expression*>{e},
std::vector<Statement*>{})};
@@ -377,7 +430,7 @@ base::Optional<ParseResult> MakeParameterListFromNameAndTypeList(
base::Optional<ParseResult> MakeAssertStatement(
ParseResultIterator* child_results) {
- auto kind = child_results->NextAs<std::string>();
+ auto kind = child_results->NextAs<Identifier*>()->value;
auto expr_with_source = child_results->NextAs<ExpressionWithSource>();
DCHECK(kind == "assert" || kind == "check");
Statement* result = MakeNode<AssertStatement>(
@@ -387,7 +440,7 @@ base::Optional<ParseResult> MakeAssertStatement(
base::Optional<ParseResult> MakeDebugStatement(
ParseResultIterator* child_results) {
- auto kind = child_results->NextAs<std::string>();
+ auto kind = child_results->NextAs<Identifier*>()->value;
DCHECK(kind == "unreachable" || kind == "debug");
Statement* result = MakeNode<DebugStatement>(kind, kind == "unreachable");
return ParseResult{result};
@@ -395,7 +448,7 @@ base::Optional<ParseResult> MakeDebugStatement(
base::Optional<ParseResult> MakeVoidType(ParseResultIterator* child_results) {
TypeExpression* result =
- MakeNode<BasicTypeExpression>(std::vector<std::string>{}, false, "void");
+ MakeNode<BasicTypeExpression>(std::vector<std::string>{}, "void");
return ParseResult{result};
}
@@ -433,19 +486,28 @@ base::Optional<ParseResult> MakeIntrinsicDeclaration(
auto args = child_results->NextAs<ParameterList>();
auto return_type = child_results->NextAs<TypeExpression*>();
- IntrinsicDeclaration* macro =
- MakeNode<IntrinsicDeclaration>(name, args, return_type);
+ auto body = child_results->NextAs<base::Optional<Statement*>>();
+ LabelAndTypesVector labels;
+ CallableNode* callable = nullptr;
+ if (body) {
+ callable = MakeNode<TorqueMacroDeclaration>(
+ false, name, base::Optional<std::string>{}, args, return_type, labels,
+ false);
+ } else {
+ callable = MakeNode<IntrinsicDeclaration>(name, args, return_type);
+ }
Declaration* result;
if (generic_parameters.empty()) {
- result = MakeNode<StandardDeclaration>(macro, base::nullopt);
+ result = MakeNode<StandardDeclaration>(callable, body);
} else {
- result = MakeNode<GenericDeclaration>(macro, generic_parameters);
+ result = MakeNode<GenericDeclaration>(callable, generic_parameters, body);
}
return ParseResult{result};
}
base::Optional<ParseResult> MakeTorqueMacroDeclaration(
ParseResultIterator* child_results) {
+ auto export_to_csa = child_results->NextAs<bool>();
auto transitioning = child_results->NextAs<bool>();
auto operator_name = child_results->NextAs<base::Optional<std::string>>();
auto name = child_results->NextAs<std::string>();
@@ -460,13 +522,15 @@ base::Optional<ParseResult> MakeTorqueMacroDeclaration(
auto return_type = child_results->NextAs<TypeExpression*>();
auto labels = child_results->NextAs<LabelAndTypesVector>();
auto body = child_results->NextAs<base::Optional<Statement*>>();
- MacroDeclaration* macro = MakeNode<TorqueMacroDeclaration>(
- transitioning, name, operator_name, args, return_type, labels);
+ MacroDeclaration* macro =
+ MakeNode<TorqueMacroDeclaration>(transitioning, name, operator_name, args,
+ return_type, labels, export_to_csa);
Declaration* result;
if (generic_parameters.empty()) {
if (!body) ReportError("A non-generic declaration needs a body.");
result = MakeNode<StandardDeclaration>(macro, *body);
} else {
+ if (export_to_csa) ReportError("Cannot export generics to CSA.");
result = MakeNode<GenericDeclaration>(macro, generic_parameters, body);
}
return ParseResult{result};
@@ -530,7 +594,7 @@ base::Optional<ParseResult> MakeTypeAliasDeclaration(
return ParseResult{result};
}
-base::Optional<ParseResult> MakeTypeDeclaration(
+base::Optional<ParseResult> MakeAbstractTypeDeclaration(
ParseResultIterator* child_results) {
auto transient = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
@@ -539,11 +603,31 @@ base::Optional<ParseResult> MakeTypeDeclaration(
}
auto extends = child_results->NextAs<base::Optional<Identifier*>>();
auto generates = child_results->NextAs<base::Optional<std::string>>();
+ Declaration* decl = MakeNode<AbstractTypeDeclaration>(
+ name, transient, extends, std::move(generates));
+
auto constexpr_generates =
child_results->NextAs<base::Optional<std::string>>();
- Declaration* result =
- MakeNode<TypeDeclaration>(name, transient, extends, std::move(generates),
- std::move(constexpr_generates));
+ std::vector<Declaration*> result{decl};
+
+ if (constexpr_generates) {
+ // Create a AbstractTypeDeclaration for the associated constexpr type.
+ Identifier* constexpr_name =
+ MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + name->value);
+ constexpr_name->pos = name->pos;
+
+ base::Optional<Identifier*> constexpr_extends;
+ if (extends) {
+ constexpr_extends =
+ MakeNode<Identifier>(CONSTEXPR_TYPE_PREFIX + (*extends)->value);
+ (*constexpr_extends)->pos = name->pos;
+ }
+ AbstractTypeDeclaration* constexpr_decl = MakeNode<AbstractTypeDeclaration>(
+ constexpr_name, transient, constexpr_extends, constexpr_generates);
+ constexpr_decl->pos = name->pos;
+ result.push_back(constexpr_decl);
+ }
+
return ParseResult{result};
}
@@ -561,27 +645,86 @@ base::Optional<ParseResult> MakeMethodDeclaration(
auto labels = child_results->NextAs<LabelAndTypesVector>();
auto body = child_results->NextAs<Statement*>();
MacroDeclaration* macro = MakeNode<TorqueMacroDeclaration>(
- transitioning, name, operator_name, args, return_type, labels);
+ transitioning, name, operator_name, args, return_type, labels, false);
Declaration* result = MakeNode<StandardDeclaration>(macro, body);
return ParseResult{result};
}
+class AnnotationSet {
+ public:
+ AnnotationSet(ParseResultIterator* iter,
+ const std::set<std::string>& allowed) {
+ auto list = iter->NextAs<std::vector<Identifier*>>();
+ for (const Identifier* i : list) {
+ if (allowed.find(i->value) == allowed.end()) {
+ Lint("Annotation ", i->value, " is not allowed here").Position(i->pos);
+ }
+ if (!set_.insert(i->value).second) {
+ Lint("Duplicate annotation ", i->value).Position(i->pos);
+ }
+ }
+ }
+
+ bool Contains(const std::string& s) { return set_.find(s) != set_.end(); }
+
+ private:
+ std::set<std::string> set_;
+};
+
base::Optional<ParseResult> MakeClassDeclaration(
ParseResultIterator* child_results) {
- auto generate_print = child_results->NextAs<bool>();
+ AnnotationSet annotations(
+ child_results, {"@generatePrint", "@noVerifier", "@abstract",
+ "@dirtyInstantiatedAbstractClass",
+ "@hasSameInstanceTypeAsParent", "@generateCppClass"});
+ ClassFlags flags = ClassFlag::kNone;
+ bool generate_print = annotations.Contains("@generatePrint");
+ if (generate_print) flags |= ClassFlag::kGeneratePrint;
+ bool generate_verify = !annotations.Contains("@noVerifier");
+ if (generate_verify) flags |= ClassFlag::kGenerateVerify;
+ if (annotations.Contains("@abstract")) {
+ flags |= ClassFlag::kAbstract;
+ }
+ if (annotations.Contains("@dirtyInstantiatedAbstractClass")) {
+ flags |= ClassFlag::kInstantiatedAbstractClass;
+ }
+ if (annotations.Contains("@hasSameInstanceTypeAsParent")) {
+ flags |= ClassFlag::kHasSameInstanceTypeAsParent;
+ }
+ if (annotations.Contains("@generateCppClass")) {
+ flags |= ClassFlag::kGenerateCppClassDefinitions;
+ }
auto is_extern = child_results->NextAs<bool>();
+ if (is_extern) flags |= ClassFlag::kExtern;
auto transient = child_results->NextAs<bool>();
+ if (transient) flags |= ClassFlag::kTransient;
auto name = child_results->NextAs<Identifier*>();
if (!IsValidTypeName(name->value)) {
NamingConventionError("Type", name->value, "UpperCamelCase");
}
- auto extends = child_results->NextAs<base::Optional<std::string>>();
+ auto extends = child_results->NextAs<base::Optional<TypeExpression*>>();
+ if (extends && !BasicTypeExpression::DynamicCast(*extends)) {
+ ReportError("Expected type name in extends clause.");
+ }
auto generates = child_results->NextAs<base::Optional<std::string>>();
auto methods = child_results->NextAs<std::vector<Declaration*>>();
- auto fields = child_results->NextAs<std::vector<ClassFieldExpression>>();
+ auto fields_raw = child_results->NextAs<std::vector<ClassFieldExpression>>();
+
+ // Filter to only include fields that should be present based on decoration.
+ std::vector<ClassFieldExpression> fields;
+ std::copy_if(fields_raw.begin(), fields_raw.end(), std::back_inserter(fields),
+ [](const ClassFieldExpression& exp) {
+ if (!exp.conditional.has_value()) return true;
+ const ConditionalAnnotation& conditional = *exp.conditional;
+ return conditional.type == ConditionalAnnotationType::kPositive
+ ? BuildFlags::GetFlag(conditional.condition, "@if")
+ : !BuildFlags::GetFlag(conditional.condition,
+ "@ifnot");
+ });
+
Declaration* result = MakeNode<ClassDeclaration>(
- name, is_extern, generate_print, transient, std::move(extends),
- std::move(generates), std::move(methods), fields);
+ name, flags, std::move(extends), std::move(generates), std::move(methods),
+ fields);
return ParseResult{result};
}
@@ -599,7 +742,7 @@ base::Optional<ParseResult> MakeNamespaceDeclaration(
base::Optional<ParseResult> MakeSpecializationDeclaration(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<std::string>();
+ auto name = child_results->NextAs<Identifier*>();
auto generic_parameters =
child_results->NextAs<std::vector<TypeExpression*>>();
auto parameters = child_results->NextAs<ParameterList>();
@@ -616,6 +759,9 @@ base::Optional<ParseResult> MakeSpecializationDeclaration(
base::Optional<ParseResult> MakeStructDeclaration(
ParseResultIterator* child_results) {
auto name = child_results->NextAs<Identifier*>();
+ if (!IsValidTypeName(name->value)) {
+ NamingConventionError("Struct", name->value, "UpperCamelCase");
+ }
auto methods = child_results->NextAs<std::vector<Declaration*>>();
auto fields = child_results->NextAs<std::vector<StructFieldExpression>>();
Declaration* result =
@@ -677,7 +823,8 @@ base::Optional<ParseResult> MakeBasicTypeExpression(
auto is_constexpr = child_results->NextAs<bool>();
auto name = child_results->NextAs<std::string>();
TypeExpression* result = MakeNode<BasicTypeExpression>(
- std::move(namespace_qualification), is_constexpr, std::move(name));
+ std::move(namespace_qualification),
+ is_constexpr ? GetConstexprName(name) : std::move(name));
return ParseResult{result};
}
@@ -787,7 +934,8 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
}
BlockStatement* case_block;
if (i < cases.size() - 1) {
- value = MakeCall("Cast", std::vector<TypeExpression*>{cases[i].type},
+ value = MakeCall(MakeNode<Identifier>("Cast"),
+ std::vector<TypeExpression*>{cases[i].type},
std::vector<Expression*>{value},
std::vector<Statement*>{MakeNode<ExpressionStatement>(
MakeNode<IdentifierExpression>(
@@ -806,8 +954,8 @@ base::Optional<ParseResult> MakeTypeswitchStatement(
current_block->statements.push_back(
MakeNode<ExpressionStatement>(MakeNode<TryLabelExpression>(
false, MakeNode<StatementExpression>(case_block),
- MakeNode<LabelBlock>("_NextCase", ParameterList::Empty(),
- next_block))));
+ MakeNode<LabelBlock>(MakeNode<Identifier>("_NextCase"),
+ ParameterList::Empty(), next_block))));
current_block = next_block;
}
accumulated_types =
@@ -851,9 +999,9 @@ base::Optional<ParseResult> MakeTailCallStatement(
base::Optional<ParseResult> MakeVarDeclarationStatement(
ParseResultIterator* child_results) {
- auto kind = child_results->NextAs<std::string>();
- bool const_qualified = kind == "const";
- if (!const_qualified) DCHECK_EQ("let", kind);
+ auto kind = child_results->NextAs<Identifier*>();
+ bool const_qualified = kind->value == "const";
+ if (!const_qualified) DCHECK_EQ("let", kind->value);
auto name = child_results->NextAs<Identifier*>();
if (!IsLowerCamelCase(name->value)) {
NamingConventionError("Variable", name->value, "lowerCamelCase");
@@ -885,10 +1033,9 @@ base::Optional<ParseResult> MakeContinueStatement(
base::Optional<ParseResult> MakeGotoStatement(
ParseResultIterator* child_results) {
- auto label = child_results->NextAs<std::string>();
+ auto label = child_results->NextAs<Identifier*>();
auto arguments = child_results->NextAs<std::vector<Expression*>>();
- Statement* result =
- MakeNode<GotoStatement>(std::move(label), std::move(arguments));
+ Statement* result = MakeNode<GotoStatement>(label, std::move(arguments));
return ParseResult{result};
}
@@ -949,14 +1096,13 @@ base::Optional<ParseResult> MakeForLoopStatement(
}
base::Optional<ParseResult> MakeLabelBlock(ParseResultIterator* child_results) {
- auto label = child_results->NextAs<std::string>();
- if (!IsUpperCamelCase(label)) {
- NamingConventionError("Label", label, "UpperCamelCase");
+ auto label = child_results->NextAs<Identifier*>();
+ if (!IsUpperCamelCase(label->value)) {
+ NamingConventionError("Label", label->value, "UpperCamelCase");
}
auto parameters = child_results->NextAs<ParameterList>();
auto body = child_results->NextAs<Statement*>();
- LabelBlock* result =
- MakeNode<LabelBlock>(std::move(label), std::move(parameters), body);
+ LabelBlock* result = MakeNode<LabelBlock>(label, std::move(parameters), body);
return ParseResult{result};
}
@@ -968,11 +1114,11 @@ base::Optional<ParseResult> MakeCatchBlock(ParseResultIterator* child_results) {
}
ParameterList parameters;
parameters.names.push_back(MakeNode<Identifier>(variable));
- parameters.types.push_back(MakeNode<BasicTypeExpression>(
- std::vector<std::string>{}, false, "Object"));
+ parameters.types.push_back(
+ MakeNode<BasicTypeExpression>(std::vector<std::string>{}, "Object"));
parameters.has_varargs = false;
- LabelBlock* result =
- MakeNode<LabelBlock>("_catch", std::move(parameters), body);
+ LabelBlock* result = MakeNode<LabelBlock>(MakeNode<Identifier>("_catch"),
+ std::move(parameters), body);
return ParseResult{result};
}
@@ -997,6 +1143,12 @@ base::Optional<ParseResult> MakeIdentifier(ParseResultIterator* child_results) {
return ParseResult{result};
}
+base::Optional<ParseResult> MakeIdentifierFromMatchedInput(
+ ParseResultIterator* child_results) {
+ return ParseResult{
+ MakeNode<Identifier>(child_results->matched_input().ToString())};
+}
+
base::Optional<ParseResult> MakeIdentifierExpression(
ParseResultIterator* child_results) {
auto namespace_qualification =
@@ -1111,12 +1263,12 @@ base::Optional<ParseResult> MakeConditionalExpression(
base::Optional<ParseResult> MakeLabelAndTypes(
ParseResultIterator* child_results) {
- auto name = child_results->NextAs<std::string>();
- if (!IsUpperCamelCase(name)) {
- NamingConventionError("Label", name, "UpperCamelCase");
+ auto name = child_results->NextAs<Identifier*>();
+ if (!IsUpperCamelCase(name->value)) {
+ NamingConventionError("Label", name->value, "UpperCamelCase");
}
auto types = child_results->NextAs<std::vector<TypeExpression*>>();
- return ParseResult{LabelAndTypes{std::move(name), std::move(types)}};
+ return ParseResult{LabelAndTypes{name, std::move(types)}};
}
base::Optional<ParseResult> MakeNameAndType(
@@ -1146,14 +1298,33 @@ base::Optional<ParseResult> MakeNameAndExpressionFromExpression(
ReportError("Constructor parameters need to be named.");
}
+base::Optional<ParseResult> MakeConditionalAnnotation(
+ ParseResultIterator* child_results) {
+ auto type_str = child_results->NextAs<Identifier*>()->value;
+ DCHECK(type_str == "@if" || type_str == "@ifnot");
+ ConditionalAnnotationType type = type_str == "@if"
+ ? ConditionalAnnotationType::kPositive
+ : ConditionalAnnotationType::kNegative;
+ auto condition = child_results->NextAs<std::string>();
+ return ParseResult{ConditionalAnnotation{condition, type}};
+}
+
base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
+ auto conditional =
+ child_results->NextAs<base::Optional<ConditionalAnnotation>>();
+ AnnotationSet annotations(child_results, {"@noVerifier"});
+ bool generate_verify = !annotations.Contains("@noVerifier");
auto weak = child_results->NextAs<bool>();
auto const_qualified = child_results->NextAs<bool>();
auto name = child_results->NextAs<Identifier*>();
auto index = child_results->NextAs<base::Optional<std::string>>();
auto type = child_results->NextAs<TypeExpression*>();
- return ParseResult{
- ClassFieldExpression{{name, type}, index, weak, const_qualified}};
+ return ParseResult{ClassFieldExpression{{name, type},
+ index,
+ conditional,
+ weak,
+ const_qualified,
+ generate_verify}};
}
base::Optional<ParseResult> MakeStructField(
@@ -1166,8 +1337,9 @@ base::Optional<ParseResult> MakeStructField(
base::Optional<ParseResult> ExtractAssignmentOperator(
ParseResultIterator* child_results) {
- auto op = child_results->NextAs<std::string>();
- base::Optional<std::string> result = std::string(op.begin(), op.end() - 1);
+ auto op = child_results->NextAs<Identifier*>();
+ base::Optional<std::string> result =
+ std::string(op->value.begin(), op->value.end() - 1);
return ParseResult(std::move(result));
}
@@ -1191,12 +1363,18 @@ struct TorqueGrammar : Grammar {
return true;
}
+ static bool MatchAnnotation(InputPosition* pos) {
+ InputPosition current = *pos;
+ if (!MatchString("@", &current)) return false;
+ if (!MatchIdentifier(&current)) return false;
+ *pos = current;
+ return true;
+ }
+
static bool MatchIntrinsicName(InputPosition* pos) {
InputPosition current = *pos;
if (!MatchString("%", &current)) return false;
- if (!MatchChar(std::isalpha, &current)) return false;
- while (MatchChar(std::isalnum, &current) || MatchString("_", pos)) {
- }
+ if (!MatchIdentifier(&current)) return false;
*pos = current;
return true;
}
@@ -1262,11 +1440,19 @@ struct TorqueGrammar : Grammar {
TorqueGrammar() : Grammar(&file) { SetWhitespace(MatchWhitespace); }
// Result: std::string
- Symbol identifier = {Rule({Pattern(MatchIdentifier)}, YieldMatchedInput)};
+ Symbol identifier = {Rule({Pattern(MatchIdentifier)}, YieldMatchedInput),
+ Rule({Token("runtime")}, YieldMatchedInput)};
// Result: Identifier*
Symbol name = {Rule({&identifier}, MakeIdentifier)};
+ // Result: Identifier*
+ Symbol annotation = {
+ Rule({Pattern(MatchAnnotation)}, MakeIdentifierFromMatchedInput)};
+
+ // Result: std::vector<Identifier*>
+ Symbol* annotations = List<Identifier*>(&annotation);
+
// Result: std::string
Symbol intrinsicName = {
Rule({Pattern(MatchIntrinsicName)}, YieldMatchedInput)};
@@ -1332,7 +1518,7 @@ struct TorqueGrammar : Grammar {
// Result: LabelAndTypes
Symbol labelParameter = {Rule(
- {&identifier,
+ {&name,
TryOrDefault<TypeList>(Sequence({Token("("), typeList, Token(")")}))},
MakeLabelAndTypes)};
@@ -1356,8 +1542,14 @@ struct TorqueGrammar : Grammar {
Symbol* optionalArraySpecifier =
Optional<std::string>(Sequence({Token("["), &identifier, Token("]")}));
+ // Result: ConditionalAnnotation
+ Symbol conditionalAnnotation = {
+ Rule({OneOf({"@if", "@ifnot"}), Token("("), &identifier, Token(")")},
+ MakeConditionalAnnotation)};
+
Symbol classField = {
- Rule({CheckIf(Token("weak")), CheckIf(Token("const")), &name,
+ Rule({Optional<ConditionalAnnotation>(&conditionalAnnotation),
+ annotations, CheckIf(Token("weak")), CheckIf(Token("const")), &name,
optionalArraySpecifier, Token(":"), &type, Token(";")},
MakeClassField)};
@@ -1379,11 +1571,11 @@ struct TorqueGrammar : Grammar {
Token(","), Token("..."), &identifier, Token(")")},
MakeParameterListFromNameAndTypeList<true>)};
- // Result: std::string
+ // Result: Identifier*
Symbol* OneOf(const std::vector<std::string>& alternatives) {
Symbol* result = NewSymbol();
for (const std::string& s : alternatives) {
- result->AddRule(Rule({Token(s)}, YieldMatchedInput));
+ result->AddRule(Rule({Token(s)}, MakeIdentifierFromMatchedInput));
}
return result;
}
@@ -1539,7 +1731,7 @@ struct TorqueGrammar : Grammar {
// Result: LabelBlock*
Symbol labelBlock = {
- Rule({Token("label"), &identifier,
+ Rule({Token("label"), &name,
TryOrDefault<ParameterList>(&parameterListNoVararg), &block},
MakeLabelBlock)};
@@ -1578,7 +1770,7 @@ struct TorqueGrammar : Grammar {
Rule({Token("tail"), &callExpression}, MakeTailCallStatement),
Rule({Token("break")}, MakeBreakStatement),
Rule({Token("continue")}, MakeContinueStatement),
- Rule({Token("goto"), &identifier,
+ Rule({Token("goto"), &name,
TryOrDefault<std::vector<Expression*>>(&argumentList)},
MakeGotoStatement),
Rule({OneOf({"debug", "unreachable"})}, MakeDebugStatement)};
@@ -1637,25 +1829,25 @@ struct TorqueGrammar : Grammar {
optionalLabelList, &block},
MakeMethodDeclaration)};
- // Result: Declaration*
+ // Result: std::vector<Declaration*>
Symbol declaration = {
Rule({Token("const"), &name, Token(":"), &type, Token("="), expression,
Token(";")},
- MakeConstDeclaration),
+ AsSingletonVector<Declaration*, MakeConstDeclaration>()),
Rule({Token("const"), &name, Token(":"), &type, Token("generates"),
&externalString, Token(";")},
- MakeExternConstDeclaration),
- Rule({CheckIf(Token("@generatePrint")), CheckIf(Token("extern")),
- CheckIf(Token("transient")), Token("class"), &name,
- Optional<std::string>(Sequence({Token("extends"), &identifier})),
+ AsSingletonVector<Declaration*, MakeExternConstDeclaration>()),
+ Rule({annotations, CheckIf(Token("extern")), CheckIf(Token("transient")),
+ Token("class"), &name,
+ Optional<TypeExpression*>(Sequence({Token("extends"), &type})),
Optional<std::string>(
Sequence({Token("generates"), &externalString})),
Token("{"), List<Declaration*>(&method),
List<ClassFieldExpression>(&classField), Token("}")},
- MakeClassDeclaration),
+ AsSingletonVector<Declaration*, MakeClassDeclaration>()),
Rule({Token("struct"), &name, Token("{"), List<Declaration*>(&method),
List<StructFieldExpression>(&structField), Token("}")},
- MakeStructDeclaration),
+ AsSingletonVector<Declaration*, MakeStructDeclaration>()),
Rule({CheckIf(Token("transient")), Token("type"), &name,
Optional<Identifier*>(Sequence({Token("extends"), &name})),
Optional<std::string>(
@@ -1663,13 +1855,13 @@ struct TorqueGrammar : Grammar {
Optional<std::string>(
Sequence({Token("constexpr"), &externalString})),
Token(";")},
- MakeTypeDeclaration),
+ MakeAbstractTypeDeclaration),
Rule({Token("type"), &name, Token("="), &type, Token(";")},
- MakeTypeAliasDeclaration),
+ AsSingletonVector<Declaration*, MakeTypeAliasDeclaration>()),
Rule({Token("intrinsic"), &intrinsicName,
TryOrDefault<GenericParameters>(&genericParameters),
- &parameterListNoVararg, &optionalReturnType, Token(";")},
- MakeIntrinsicDeclaration),
+ &parameterListNoVararg, &optionalReturnType, &optionalBody},
+ AsSingletonVector<Declaration*, MakeIntrinsicDeclaration>()),
Rule({Token("extern"), CheckIf(Token("transitioning")),
Optional<std::string>(
Sequence({Token("operator"), &externalString})),
@@ -1678,48 +1870,55 @@ struct TorqueGrammar : Grammar {
&identifier, TryOrDefault<GenericParameters>(&genericParameters),
&typeListMaybeVarArgs, &optionalReturnType, optionalLabelList,
Token(";")},
- MakeExternalMacro),
+ AsSingletonVector<Declaration*, MakeExternalMacro>()),
Rule({Token("extern"), CheckIf(Token("transitioning")),
CheckIf(Token("javascript")), Token("builtin"), &identifier,
TryOrDefault<GenericParameters>(&genericParameters),
&typeListMaybeVarArgs, &optionalReturnType, Token(";")},
- MakeExternalBuiltin),
+ AsSingletonVector<Declaration*, MakeExternalBuiltin>()),
Rule(
{Token("extern"), CheckIf(Token("transitioning")), Token("runtime"),
&identifier, &typeListMaybeVarArgs, &optionalReturnType, Token(";")},
- MakeExternalRuntime),
- Rule({CheckIf(Token("transitioning")),
+ AsSingletonVector<Declaration*, MakeExternalRuntime>()),
+ Rule({CheckIf(Token("@export")), CheckIf(Token("transitioning")),
Optional<std::string>(
Sequence({Token("operator"), &externalString})),
Token("macro"), &identifier,
TryOrDefault<GenericParameters>(&genericParameters),
&parameterListNoVararg, &optionalReturnType, optionalLabelList,
&optionalBody},
- MakeTorqueMacroDeclaration),
+ AsSingletonVector<Declaration*, MakeTorqueMacroDeclaration>()),
Rule({CheckIf(Token("transitioning")), CheckIf(Token("javascript")),
Token("builtin"), &identifier,
TryOrDefault<GenericParameters>(&genericParameters),
&parameterListAllowVararg, &optionalReturnType, &optionalBody},
- MakeTorqueBuiltinDeclaration),
- Rule({&identifier, &genericSpecializationTypeList,
- &parameterListAllowVararg, &optionalReturnType, optionalLabelList,
- &block},
- MakeSpecializationDeclaration),
- Rule({Token("#include"), &externalString}, MakeCppIncludeDeclaration)};
-
- // Result: Declaration*
+ AsSingletonVector<Declaration*, MakeTorqueBuiltinDeclaration>()),
+ Rule({&name, &genericSpecializationTypeList, &parameterListAllowVararg,
+ &optionalReturnType, optionalLabelList, &block},
+ AsSingletonVector<Declaration*, MakeSpecializationDeclaration>()),
+ Rule({Token("#include"), &externalString},
+ AsSingletonVector<Declaration*, MakeCppIncludeDeclaration>())};
+
+ // Result: std::vector<Declaration*>
+ Symbol declarationList = {
+ Rule({List<std::vector<Declaration*>>(&declaration)}, ConcatList)};
+
+ // Result: std::vector<Declaration*>
Symbol namespaceDeclaration = {
- Rule({Token("namespace"), &identifier, Token("{"),
- List<Declaration*>(&declaration), Token("}")},
- MakeNamespaceDeclaration)};
+ Rule({Token("namespace"), &identifier, Token("{"), &declarationList,
+ Token("}")},
+ AsSingletonVector<Declaration*, MakeNamespaceDeclaration>())};
- Symbol file = {Rule({&file, &namespaceDeclaration}, AddGlobalDeclaration),
- Rule({&file, &declaration}, AddGlobalDeclaration), Rule({})};
+ Symbol file = {Rule({&file, &namespaceDeclaration}, AddGlobalDeclarations),
+ Rule({&file, &declaration}, AddGlobalDeclarations), Rule({})};
};
} // namespace
-void ParseTorque(const std::string& input) { TorqueGrammar().Parse(input); }
+void ParseTorque(const std::string& input) {
+ BuildFlags::Scope build_flags_scope;
+ TorqueGrammar().Parse(input);
+}
} // namespace torque
} // namespace internal
diff --git a/deps/v8/src/torque/torque.cc b/deps/v8/src/torque/torque.cc
index 4dc6ac80ab..6b596aab39 100644
--- a/deps/v8/src/torque/torque.cc
+++ b/deps/v8/src/torque/torque.cc
@@ -9,9 +9,17 @@ namespace v8 {
namespace internal {
namespace torque {
+std::string ErrorPrefixFor(TorqueMessage::Kind kind) {
+ switch (kind) {
+ case TorqueMessage::Kind::kError:
+ return "Torque Error";
+ case TorqueMessage::Kind::kLint:
+ return "Lint error";
+ }
+}
+
int WrappedMain(int argc, const char** argv) {
std::string output_directory;
- bool verbose = false;
std::vector<std::string> files;
for (int i = 1; i < argc; ++i) {
@@ -20,10 +28,6 @@ int WrappedMain(int argc, const char** argv) {
output_directory = argv[++i];
continue;
}
- if (!strcmp("-v", argv[i])) {
- verbose = true;
- continue;
- }
// Otherwise it's a .tq file. Remember it for compilation.
files.emplace_back(argv[i]);
@@ -31,22 +35,26 @@ int WrappedMain(int argc, const char** argv) {
TorqueCompilerOptions options;
options.output_directory = output_directory;
- options.verbose = verbose;
options.collect_language_server_data = false;
- options.abort_on_lint_errors = true;
+ options.force_assert_statements = false;
TorqueCompilerResult result = CompileTorque(files, options);
- if (result.error) {
- // PositionAsString requires the SourceFileMap to be set to
- // resolve the file name.
- SourceFileMap::Scope source_file_map_scope(result.source_file_map);
-
- TorqueError& error = *result.error;
- if (error.position) std::cerr << PositionAsString(*error.position) << ": ";
- std::cerr << "Torque error: " << error.message << "\n";
- v8::base::OS::Abort();
+
+ // PositionAsString requires the SourceFileMap to be set to
+ // resolve the file name. Needed to report errors and lint warnings.
+ SourceFileMap::Scope source_file_map_scope(result.source_file_map);
+
+ for (const TorqueMessage& message : result.messages) {
+ if (message.position) {
+ std::cerr << *message.position << ": ";
+ }
+
+ std::cerr << ErrorPrefixFor(message.kind) << ": " << message.message
+ << "\n";
}
+ if (!result.messages.empty()) v8::base::OS::Abort();
+
return 0;
}
diff --git a/deps/v8/src/torque/type-oracle.cc b/deps/v8/src/torque/type-oracle.cc
index f1c29cc13c..7c266a419a 100644
--- a/deps/v8/src/torque/type-oracle.cc
+++ b/deps/v8/src/torque/type-oracle.cc
@@ -10,6 +10,13 @@ namespace torque {
DEFINE_CONTEXTUAL_VARIABLE(TypeOracle)
+// static
+void TypeOracle::FinalizeClassTypes() {
+ for (const std::unique_ptr<AggregateType>& p : Get().struct_types_) {
+ p->Finalize();
+ }
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/torque/type-oracle.h b/deps/v8/src/torque/type-oracle.h
index 515b0d84f7..c9d6bb0bf3 100644
--- a/deps/v8/src/torque/type-oracle.h
+++ b/deps/v8/src/torque/type-oracle.h
@@ -19,12 +19,14 @@ class TypeOracle : public ContextualClass<TypeOracle> {
public:
static const AbstractType* GetAbstractType(
const Type* parent, std::string name, bool transient,
- std::string generated,
- base::Optional<const AbstractType*> non_constexpr_version) {
+ std::string generated, const AbstractType* non_constexpr_version) {
AbstractType* result =
new AbstractType(parent, transient, std::move(name),
std::move(generated), non_constexpr_version);
Get().nominal_types_.push_back(std::unique_ptr<AbstractType>(result));
+ if (non_constexpr_version) {
+ non_constexpr_version->SetConstexprVersion(result);
+ }
return result;
}
@@ -35,11 +37,11 @@ class TypeOracle : public ContextualClass<TypeOracle> {
}
static ClassType* GetClassType(const Type* parent, const std::string& name,
- bool is_extern, bool generate_print,
- bool transient, const std::string& generates) {
- ClassType* result =
- new ClassType(parent, CurrentNamespace(), name, is_extern,
- generate_print, transient, generates);
+ ClassFlags flags, const std::string& generates,
+ ClassDeclaration* decl,
+ const TypeAlias* alias) {
+ ClassType* result = new ClassType(parent, CurrentNamespace(), name, flags,
+ generates, decl, alias);
Get().struct_types_.push_back(std::unique_ptr<ClassType>(result));
return result;
}
@@ -185,6 +187,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(FLOAT64_TYPE_STRING);
}
+ static const Type* GetConstFloat64Type() {
+ return Get().GetBuiltinType(CONST_FLOAT64_TYPE_STRING);
+ }
+
static const Type* GetNeverType() {
return Get().GetBuiltinType(NEVER_TYPE_STRING);
}
@@ -193,6 +199,10 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return Get().GetBuiltinType(CONST_INT31_TYPE_STRING);
}
+ static const Type* GetConstInt32Type() {
+ return Get().GetBuiltinType(CONST_INT32_TYPE_STRING);
+ }
+
static bool IsImplicitlyConvertableFrom(const Type* to, const Type* from) {
for (Generic* from_constexpr :
Declarations::LookupGeneric(kFromConstexprMacroName)) {
@@ -207,6 +217,8 @@ class TypeOracle : public ContextualClass<TypeOracle> {
return false;
}
+ static void FinalizeClassTypes();
+
private:
const Type* GetBuiltinType(const std::string& name) {
return Declarations::LookupGlobalType(name);
@@ -217,7 +229,7 @@ class TypeOracle : public ContextualClass<TypeOracle> {
Deduplicator<UnionType> union_types_;
Deduplicator<ReferenceType> reference_types_;
std::vector<std::unique_ptr<Type>> nominal_types_;
- std::vector<std::unique_ptr<Type>> struct_types_;
+ std::vector<std::unique_ptr<AggregateType>> struct_types_;
std::vector<std::unique_ptr<Type>> top_types_;
};
diff --git a/deps/v8/src/torque/type-visitor.cc b/deps/v8/src/torque/type-visitor.cc
new file mode 100644
index 0000000000..e9fd50c02a
--- /dev/null
+++ b/deps/v8/src/torque/type-visitor.cc
@@ -0,0 +1,292 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/torque/type-visitor.h"
+
+#include "src/common/globals.h"
+#include "src/torque/declarable.h"
+#include "src/torque/global-context.h"
+#include "src/torque/server-data.h"
+#include "src/torque/type-oracle.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+const Type* TypeVisitor::ComputeType(TypeDeclaration* decl) {
+ CurrentSourcePosition::Scope scope(decl->pos);
+ switch (decl->kind) {
+#define ENUM_ITEM(name) \
+ case AstNode::Kind::k##name: \
+ return ComputeType(name::cast(decl));
+ AST_TYPE_DECLARATION_NODE_KIND_LIST(ENUM_ITEM)
+#undef ENUM_ITEM
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+const Type* TypeVisitor::ComputeType(TypeAliasDeclaration* decl) {
+ const Type* type = ComputeType(decl->type);
+ type->AddAlias(decl->name->value);
+ return type;
+}
+
+namespace {
+std::string ComputeGeneratesType(base::Optional<std::string> opt_gen,
+ bool enforce_tnode_type) {
+ if (!opt_gen) return "";
+ const std::string& generates = *opt_gen;
+ if (enforce_tnode_type) {
+ if (generates.length() < 7 || generates.substr(0, 6) != "TNode<" ||
+ generates.substr(generates.length() - 1, 1) != ">") {
+ ReportError("generated type \"", generates,
+ "\" should be of the form \"TNode<...>\"");
+ }
+ return generates.substr(6, generates.length() - 7);
+ }
+ return generates;
+}
+} // namespace
+
+const AbstractType* TypeVisitor::ComputeType(AbstractTypeDeclaration* decl) {
+ std::string generates =
+ ComputeGeneratesType(decl->generates, !decl->is_constexpr);
+
+ const Type* parent_type = nullptr;
+ if (decl->extends) {
+ parent_type = Declarations::LookupType(*decl->extends);
+ }
+
+ if (generates == "" && parent_type) {
+ generates = parent_type->GetGeneratedTNodeTypeName();
+ }
+
+ if (decl->is_constexpr && decl->transient) {
+ ReportError("cannot declare a transient type that is also constexpr");
+ }
+
+ const AbstractType* non_constexpr_version = nullptr;
+ if (decl->is_constexpr) {
+ QualifiedName non_constexpr_name{GetNonConstexprName(decl->name->value)};
+ const Type* non_constexpr_type =
+ Declarations::LookupType(non_constexpr_name);
+ non_constexpr_version = AbstractType::DynamicCast(non_constexpr_type);
+ DCHECK_NOT_NULL(non_constexpr_version);
+ }
+
+ return TypeOracle::GetAbstractType(parent_type, decl->name->value,
+ decl->transient, generates,
+ non_constexpr_version);
+}
+
+void DeclareMethods(AggregateType* container_type,
+ const std::vector<Declaration*>& methods) {
+ for (auto declaration : methods) {
+ CurrentSourcePosition::Scope pos_scope(declaration->pos);
+ StandardDeclaration* standard_declaration =
+ StandardDeclaration::DynamicCast(declaration);
+ DCHECK(standard_declaration);
+ TorqueMacroDeclaration* method =
+ TorqueMacroDeclaration::DynamicCast(standard_declaration->callable);
+ Signature signature = TypeVisitor::MakeSignature(method->signature.get());
+ signature.parameter_names.insert(
+ signature.parameter_names.begin() + signature.implicit_count,
+ MakeNode<Identifier>(kThisParameterName));
+ Statement* body = *(standard_declaration->body);
+ std::string method_name(method->name);
+ signature.parameter_types.types.insert(
+ signature.parameter_types.types.begin() + signature.implicit_count,
+ container_type);
+ Declarations::CreateMethod(container_type, method_name, signature, false,
+ body);
+ }
+}
+
+const StructType* TypeVisitor::ComputeType(StructDeclaration* decl) {
+ CurrentSourcePosition::Scope position_activator(decl->pos);
+ StructType* struct_type = TypeOracle::GetStructType(decl->name->value);
+ size_t offset = 0;
+ for (auto& field : decl->fields) {
+ CurrentSourcePosition::Scope position_activator(
+ field.name_and_type.type->pos);
+ const Type* field_type = TypeVisitor::ComputeType(field.name_and_type.type);
+ struct_type->RegisterField({field.name_and_type.name->pos,
+ struct_type,
+ base::nullopt,
+ {field.name_and_type.name->value, field_type},
+ offset,
+ false,
+ field.const_qualified,
+ false});
+ offset += LoweredSlotCount(field_type);
+ }
+ DeclareMethods(struct_type, decl->methods);
+ return struct_type;
+}
+
+const ClassType* TypeVisitor::ComputeType(ClassDeclaration* decl) {
+ ClassType* new_class;
+ // TODO(sigurds): Remove this hack by introducing a declarable for classes.
+ const TypeAlias* alias =
+ Declarations::LookupTypeAlias(QualifiedName(decl->name->value));
+ GlobalContext::RegisterClass(alias);
+ DCHECK_EQ(*alias->delayed_, decl);
+ if (decl->flags & ClassFlag::kExtern) {
+ if (!decl->super) {
+ ReportError("Extern class must extend another type.");
+ }
+ const Type* super_type = TypeVisitor::ComputeType(*decl->super);
+ if (super_type != TypeOracle::GetTaggedType()) {
+ const ClassType* super_class = ClassType::DynamicCast(super_type);
+ if (!super_class) {
+ ReportError(
+ "class \"", decl->name->value,
+ "\" must extend either Tagged or an already declared class");
+ }
+ }
+
+ std::string generates = decl->name->value;
+ if (decl->generates) {
+ bool enforce_tnode_type = true;
+ generates = ComputeGeneratesType(decl->generates, enforce_tnode_type);
+ }
+
+ new_class = TypeOracle::GetClassType(super_type, decl->name->value,
+ decl->flags, generates, decl, alias);
+ } else {
+ if (decl->super) {
+ ReportError("Only extern classes can inherit.");
+ }
+ if (decl->generates) {
+ ReportError("Only extern classes can specify a generated type.");
+ }
+ new_class =
+ TypeOracle::GetClassType(TypeOracle::GetTaggedType(), decl->name->value,
+ decl->flags, "FixedArray", decl, alias);
+ }
+ return new_class;
+}
+
+const Type* TypeVisitor::ComputeType(TypeExpression* type_expression) {
+ if (auto* basic = BasicTypeExpression::DynamicCast(type_expression)) {
+ const TypeAlias* alias = Declarations::LookupTypeAlias(
+ QualifiedName{basic->namespace_qualification, basic->name});
+ if (GlobalContext::collect_language_server_data()) {
+ LanguageServerData::AddDefinition(type_expression->pos,
+ alias->GetDeclarationPosition());
+ }
+ return alias->type();
+ } else if (auto* union_type =
+ UnionTypeExpression::DynamicCast(type_expression)) {
+ return TypeOracle::GetUnionType(ComputeType(union_type->a),
+ ComputeType(union_type->b));
+ } else if (auto* reference_type =
+ ReferenceTypeExpression::DynamicCast(type_expression)) {
+ return TypeOracle::GetReferenceType(
+ ComputeType(reference_type->referenced_type));
+ } else {
+ auto* function_type_exp = FunctionTypeExpression::cast(type_expression);
+ TypeVector argument_types;
+ for (TypeExpression* type_exp : function_type_exp->parameters) {
+ argument_types.push_back(ComputeType(type_exp));
+ }
+ return TypeOracle::GetBuiltinPointerType(
+ argument_types, ComputeType(function_type_exp->return_type));
+ }
+}
+
+Signature TypeVisitor::MakeSignature(const CallableNodeSignature* signature) {
+ LabelDeclarationVector definition_vector;
+ for (const auto& label : signature->labels) {
+ LabelDeclaration def = {label.name, ComputeTypeVector(label.types)};
+ definition_vector.push_back(def);
+ }
+ base::Optional<std::string> arguments_variable;
+ if (signature->parameters.has_varargs)
+ arguments_variable = signature->parameters.arguments_variable;
+ Signature result{signature->parameters.names,
+ arguments_variable,
+ {ComputeTypeVector(signature->parameters.types),
+ signature->parameters.has_varargs},
+ signature->parameters.implicit_count,
+ ComputeType(signature->return_type),
+ definition_vector};
+ return result;
+}
+
+void TypeVisitor::VisitClassFieldsAndMethods(
+ ClassType* class_type, const ClassDeclaration* class_declaration) {
+ const ClassType* super_class = class_type->GetSuperClass();
+ size_t class_offset = super_class ? super_class->size() : 0;
+ bool seen_indexed_field = false;
+ for (const ClassFieldExpression& field_expression :
+ class_declaration->fields) {
+ CurrentSourcePosition::Scope position_activator(
+ field_expression.name_and_type.type->pos);
+ const Type* field_type = ComputeType(field_expression.name_and_type.type);
+ if (!(class_declaration->flags & ClassFlag::kExtern)) {
+ if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
+ ReportError("non-extern classes do not support untagged fields");
+ }
+ if (field_expression.weak) {
+ ReportError("non-extern classes do not support weak fields");
+ }
+ }
+ if (field_expression.index) {
+ if (seen_indexed_field ||
+ (super_class && super_class->HasIndexedField())) {
+ ReportError(
+ "only one indexable field is currently supported per class");
+ }
+ seen_indexed_field = true;
+ const Field* index_field =
+ &(class_type->LookupFieldInternal(*field_expression.index));
+ class_type->RegisterField(
+ {field_expression.name_and_type.name->pos,
+ class_type,
+ index_field,
+ {field_expression.name_and_type.name->value, field_type},
+ class_offset,
+ field_expression.weak,
+ field_expression.const_qualified,
+ field_expression.generate_verify});
+ } else {
+ if (seen_indexed_field) {
+ ReportError("cannot declare non-indexable field \"",
+ field_expression.name_and_type.name,
+ "\" after an indexable field "
+ "declaration");
+ }
+ const Field& field = class_type->RegisterField(
+ {field_expression.name_and_type.name->pos,
+ class_type,
+ base::nullopt,
+ {field_expression.name_and_type.name->value, field_type},
+ class_offset,
+ field_expression.weak,
+ field_expression.const_qualified,
+ field_expression.generate_verify});
+ size_t field_size;
+ std::string size_string;
+ std::string machine_type;
+ std::tie(field_size, size_string) = field.GetFieldSizeInformation();
+ // Our allocations don't support alignments beyond kTaggedSize.
+ size_t alignment = std::min(size_t{kTaggedSize}, field_size);
+ if (alignment > 0 && class_offset % alignment != 0) {
+ ReportError("field ", field_expression.name_and_type.name,
+ " at offset ", class_offset, " is not ", alignment,
+ "-byte aligned.");
+ }
+ class_offset += field_size;
+ }
+ }
+ class_type->SetSize(class_offset);
+ class_type->GenerateAccessors();
+ DeclareMethods(class_type, class_declaration->methods);
+}
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/torque/type-visitor.h b/deps/v8/src/torque/type-visitor.h
new file mode 100644
index 0000000000..93de02b860
--- /dev/null
+++ b/deps/v8/src/torque/type-visitor.h
@@ -0,0 +1,45 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TORQUE_TYPE_VISITOR_H_
+#define V8_TORQUE_TYPE_VISITOR_H_
+
+#include <string>
+
+#include "src/torque/ast.h"
+#include "src/torque/types.h"
+
+namespace v8 {
+namespace internal {
+namespace torque {
+
+class TypeVisitor {
+ public:
+ static TypeVector ComputeTypeVector(const std::vector<TypeExpression*>& v) {
+ TypeVector result;
+ for (TypeExpression* t : v) {
+ result.push_back(ComputeType(t));
+ }
+ return result;
+ }
+
+ static const Type* ComputeType(TypeExpression* type_expression);
+ static void VisitClassFieldsAndMethods(
+ ClassType* class_type, const ClassDeclaration* class_declaration);
+ static Signature MakeSignature(const CallableNodeSignature* signature);
+
+ private:
+ friend class TypeAlias;
+ static const Type* ComputeType(TypeDeclaration* decl);
+ static const AbstractType* ComputeType(AbstractTypeDeclaration* decl);
+ static const Type* ComputeType(TypeAliasDeclaration* decl);
+ static const StructType* ComputeType(StructDeclaration* decl);
+ static const ClassType* ComputeType(ClassDeclaration* decl);
+};
+
+} // namespace torque
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TORQUE_TYPE_VISITOR_H_
diff --git a/deps/v8/src/torque/types.cc b/deps/v8/src/torque/types.cc
index 93de4137a9..1d7ca1d5f2 100644
--- a/deps/v8/src/torque/types.cc
+++ b/deps/v8/src/torque/types.cc
@@ -4,9 +4,11 @@
#include <iostream>
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/torque/ast.h"
#include "src/torque/declarable.h"
#include "src/torque/type-oracle.h"
+#include "src/torque/type-visitor.h"
#include "src/torque/types.h"
namespace v8 {
@@ -93,7 +95,7 @@ std::string Type::GetGeneratedTypeName() const {
std::string Type::GetGeneratedTNodeTypeName() const {
std::string result = GetGeneratedTNodeTypeNameImpl();
- if (result.empty()) {
+ if (result.empty() || IsConstexpr()) {
ReportError("Generated TNode type is required for type '", ToString(),
"'. Use 'generates' clause in definition.");
}
@@ -195,7 +197,7 @@ const Type* SubtractType(const Type* a, const Type* b) {
return TypeOracle::GetUnionType(result);
}
-void AggregateType::CheckForDuplicateFields() {
+void AggregateType::CheckForDuplicateFields() const {
// Check the aggregate hierarchy and currently defined class for duplicate
// field declarations.
auto hierarchy = GetHierarchy();
@@ -224,7 +226,8 @@ void AggregateType::CheckForDuplicateFields() {
}
}
-std::vector<const AggregateType*> AggregateType::GetHierarchy() {
+std::vector<const AggregateType*> AggregateType::GetHierarchy() const {
+ if (!is_finalized_) Finalize();
std::vector<const AggregateType*> hierarchy;
const AggregateType* current_container_type = this;
while (current_container_type != nullptr) {
@@ -239,6 +242,7 @@ std::vector<const AggregateType*> AggregateType::GetHierarchy() {
}
bool AggregateType::HasField(const std::string& name) const {
+ if (!is_finalized_) Finalize();
for (auto& field : fields_) {
if (field.name_and_type.name == name) return true;
}
@@ -250,7 +254,7 @@ bool AggregateType::HasField(const std::string& name) const {
return false;
}
-const Field& AggregateType::LookupField(const std::string& name) const {
+const Field& AggregateType::LookupFieldInternal(const std::string& name) const {
for (auto& field : fields_) {
if (field.name_and_type.name == name) return field;
}
@@ -262,11 +266,17 @@ const Field& AggregateType::LookupField(const std::string& name) const {
ReportError("no field ", name, " found");
}
+const Field& AggregateType::LookupField(const std::string& name) const {
+ if (!is_finalized_) Finalize();
+ return LookupFieldInternal(name);
+}
+
std::string StructType::GetGeneratedTypeNameImpl() const {
- return nspace()->ExternalName() + "::" + name();
+ return "TorqueStruct" + name();
}
std::vector<Method*> AggregateType::Methods(const std::string& name) const {
+ if (!is_finalized_) Finalize();
std::vector<Method*> result;
std::copy_if(methods_.begin(), methods_.end(), std::back_inserter(result),
[name](Macro* macro) { return macro->ReadableName() == name; });
@@ -275,46 +285,32 @@ std::vector<Method*> AggregateType::Methods(const std::string& name) const {
std::string StructType::ToExplicitString() const {
std::stringstream result;
- result << "struct " << name() << "{";
- PrintCommaSeparatedList(result, fields());
- result << "}";
+ result << "struct " << name();
return result.str();
}
+constexpr ClassFlags ClassType::kInternalFlags;
+
ClassType::ClassType(const Type* parent, Namespace* nspace,
- const std::string& name, bool is_extern,
- bool generate_print, bool transient,
- const std::string& generates)
+ const std::string& name, ClassFlags flags,
+ const std::string& generates, const ClassDeclaration* decl,
+ const TypeAlias* alias)
: AggregateType(Kind::kClassType, parent, nspace, name),
- is_extern_(is_extern),
- generate_print_(generate_print),
- transient_(transient),
size_(0),
- has_indexed_field_(false),
- generates_(generates) {
- CheckForDuplicateFields();
- if (parent) {
- if (const ClassType* super_class = ClassType::DynamicCast(parent)) {
- if (super_class->HasIndexedField()) {
- has_indexed_field_ = true;
- }
- }
- }
+ flags_(flags & ~(kInternalFlags)),
+ generates_(generates),
+ decl_(decl),
+ alias_(alias) {
+ DCHECK_EQ(flags & kInternalFlags, 0);
}
bool ClassType::HasIndexedField() const {
- if (has_indexed_field_) return true;
- const ClassType* super_class = GetSuperClass();
- if (super_class) return super_class->HasIndexedField();
- return false;
+ if (!is_finalized_) Finalize();
+ return flags_ & ClassFlag::kHasIndexedField;
}
std::string ClassType::GetGeneratedTNodeTypeNameImpl() const {
- if (!IsExtern()) return generates_;
- std::string prefix = nspace()->IsDefaultNamespace()
- ? std::string{}
- : (nspace()->ExternalName() + "::");
- return prefix + generates_;
+ return generates_;
}
std::string ClassType::GetGeneratedTypeNameImpl() const {
@@ -324,14 +320,96 @@ std::string ClassType::GetGeneratedTypeNameImpl() const {
std::string ClassType::ToExplicitString() const {
std::stringstream result;
- result << "class " << name() << "{";
- PrintCommaSeparatedList(result, fields());
- result << "}";
+ result << "class " << name();
return result.str();
}
bool ClassType::AllowInstantiation() const {
- return !IsExtern() || nspace()->IsDefaultNamespace();
+ return (!IsExtern() || nspace()->IsDefaultNamespace()) &&
+ (!IsAbstract() || IsInstantiatedAbstractClass());
+}
+
+void ClassType::Finalize() const {
+ if (is_finalized_) return;
+ CurrentScope::Scope scope_activator(alias_->ParentScope());
+ CurrentSourcePosition::Scope position_activator(decl_->pos);
+ if (parent()) {
+ if (const ClassType* super_class = ClassType::DynamicCast(parent())) {
+ if (super_class->HasIndexedField()) flags_ |= ClassFlag::kHasIndexedField;
+ if (!super_class->IsAbstract() && !HasSameInstanceTypeAsParent()) {
+ Error(
+ "Super class must either be abstract (annotate super class with "
+ "@abstract) "
+ "or this class must have the same instance type as the super class "
+ "(annotate this class with @hasSameInstanceTypeAsParent).")
+ .Position(this->decl_->name->pos);
+ }
+ }
+ }
+ TypeVisitor::VisitClassFieldsAndMethods(const_cast<ClassType*>(this),
+ this->decl_);
+ is_finalized_ = true;
+ if (GenerateCppClassDefinitions()) {
+ for (const Field& f : fields()) {
+ if (f.is_weak) {
+ Error("Generation of C++ class for Torque class ", name(),
+ " is not supported yet, because field ", f.name_and_type.name,
+ ": ", *f.name_and_type.type, " is a weak field.")
+ .Position(f.pos);
+ }
+ }
+ }
+ CheckForDuplicateFields();
+}
+
+void ClassType::GenerateAccessors() {
+ // For each field, construct AST snippets that implement a CSA accessor
+ // function and define a corresponding '.field' operator. The
+ // implementation iterator will turn the snippets into code.
+ for (auto& field : fields_) {
+ if (field.index || field.name_and_type.type == TypeOracle::GetVoidType()) {
+ continue;
+ }
+ CurrentSourcePosition::Scope position_activator(field.pos);
+ IdentifierExpression* parameter =
+ MakeNode<IdentifierExpression>(MakeNode<Identifier>(std::string{"o"}));
+
+ // Load accessor
+ std::string camel_field_name = CamelifyString(field.name_and_type.name);
+ std::string load_macro_name = "Load" + this->name() + camel_field_name;
+ Signature load_signature;
+ load_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
+ load_signature.parameter_types.types.push_back(this);
+ load_signature.parameter_types.var_args = false;
+ load_signature.return_type = field.name_and_type.type;
+ Statement* load_body =
+ MakeNode<ReturnStatement>(MakeNode<FieldAccessExpression>(
+ parameter, MakeNode<Identifier>(field.name_and_type.name)));
+ Declarations::DeclareMacro(load_macro_name, true, base::nullopt,
+ load_signature, false, load_body, base::nullopt,
+ false);
+
+ // Store accessor
+ IdentifierExpression* value = MakeNode<IdentifierExpression>(
+ std::vector<std::string>{}, MakeNode<Identifier>(std::string{"v"}));
+ std::string store_macro_name = "Store" + this->name() + camel_field_name;
+ Signature store_signature;
+ store_signature.parameter_names.push_back(MakeNode<Identifier>("o"));
+ store_signature.parameter_names.push_back(MakeNode<Identifier>("v"));
+ store_signature.parameter_types.types.push_back(this);
+ store_signature.parameter_types.types.push_back(field.name_and_type.type);
+ store_signature.parameter_types.var_args = false;
+ // TODO(danno): Store macros probably should return their value argument
+ store_signature.return_type = TypeOracle::GetVoidType();
+ Statement* store_body =
+ MakeNode<ExpressionStatement>(MakeNode<AssignmentExpression>(
+ MakeNode<FieldAccessExpression>(
+ parameter, MakeNode<Identifier>(field.name_and_type.name)),
+ value));
+ Declarations::DeclareMacro(store_macro_name, true, base::nullopt,
+ store_signature, false, store_body,
+ base::nullopt, false);
+ }
}
void PrintSignature(std::ostream& os, const Signature& sig, bool with_names) {
@@ -505,62 +583,50 @@ VisitResult VisitResult::NeverResult() {
return result;
}
-std::tuple<size_t, std::string, std::string> Field::GetFieldSizeInformation()
- const {
+std::tuple<size_t, std::string> Field::GetFieldSizeInformation() const {
std::string size_string = "#no size";
- std::string machine_type = "#no machine type";
const Type* field_type = this->name_and_type.type;
size_t field_size = 0;
if (field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
field_size = kTaggedSize;
size_string = "kTaggedSize";
- machine_type = field_type->IsSubtypeOf(TypeOracle::GetSmiType())
- ? "MachineType::TaggedSigned()"
- : "MachineType::AnyTagged()";
} else if (field_type->IsSubtypeOf(TypeOracle::GetRawPtrType())) {
field_size = kSystemPointerSize;
size_string = "kSystemPointerSize";
- machine_type = "MachineType::Pointer()";
- } else if (field_type == TypeOracle::GetInt32Type()) {
- field_size = kInt32Size;
- size_string = "kInt32Size";
- machine_type = "MachineType::Int32()";
- } else if (field_type == TypeOracle::GetUint32Type()) {
- field_size = kInt32Size;
- size_string = "kInt32Size";
- machine_type = "MachineType::Uint32()";
- } else if (field_type == TypeOracle::GetInt16Type()) {
- field_size = kUInt16Size;
- size_string = "kUInt16Size";
- machine_type = "MachineType::Int16()";
- } else if (field_type == TypeOracle::GetUint16Type()) {
- field_size = kUInt16Size;
- size_string = "kUInt16Size";
- machine_type = "MachineType::Uint16()";
- } else if (field_type == TypeOracle::GetInt8Type()) {
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetVoidType())) {
+ field_size = 0;
+ size_string = "0";
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetInt8Type())) {
field_size = kUInt8Size;
size_string = "kUInt8Size";
- machine_type = "MachineType::Int8()";
- } else if (field_type == TypeOracle::GetUint8Type()) {
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetUint8Type())) {
field_size = kUInt8Size;
size_string = "kUInt8Size";
- machine_type = "MachineType::Uint8()";
- } else if (field_type == TypeOracle::GetFloat64Type()) {
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetInt16Type())) {
+ field_size = kUInt16Size;
+ size_string = "kUInt16Size";
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetUint16Type())) {
+ field_size = kUInt16Size;
+ size_string = "kUInt16Size";
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetInt32Type())) {
+ field_size = kInt32Size;
+ size_string = "kInt32Size";
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetUint32Type())) {
+ field_size = kInt32Size;
+ size_string = "kInt32Size";
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetFloat64Type())) {
field_size = kDoubleSize;
size_string = "kDoubleSize";
- machine_type = "MachineType::Float64()";
- } else if (field_type == TypeOracle::GetIntPtrType()) {
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetIntPtrType())) {
field_size = kIntptrSize;
size_string = "kIntptrSize";
- machine_type = "MachineType::IntPtr()";
- } else if (field_type == TypeOracle::GetUIntPtrType()) {
+ } else if (field_type->IsSubtypeOf(TypeOracle::GetUIntPtrType())) {
field_size = kIntptrSize;
size_string = "kIntptrSize";
- machine_type = "MachineType::IntPtr()";
} else {
ReportError("fields of type ", *field_type, " are not (yet) supported");
}
- return std::make_tuple(field_size, size_string, machine_type);
+ return std::make_tuple(field_size, size_string);
}
} // namespace torque
diff --git a/deps/v8/src/torque/types.h b/deps/v8/src/torque/types.h
index 8d8a5dd12c..0d79c1f405 100644
--- a/deps/v8/src/torque/types.h
+++ b/deps/v8/src/torque/types.h
@@ -12,6 +12,8 @@
#include <vector>
#include "src/base/optional.h"
+#include "src/torque/ast.h"
+#include "src/torque/constants.h"
#include "src/torque/source-positions.h"
#include "src/torque/utils.h"
@@ -19,38 +21,6 @@ namespace v8 {
namespace internal {
namespace torque {
-static const char* const CONSTEXPR_TYPE_PREFIX = "constexpr ";
-static const char* const NEVER_TYPE_STRING = "never";
-static const char* const CONSTEXPR_BOOL_TYPE_STRING = "constexpr bool";
-static const char* const CONSTEXPR_INTPTR_TYPE_STRING = "constexpr intptr";
-static const char* const BOOL_TYPE_STRING = "bool";
-static const char* const VOID_TYPE_STRING = "void";
-static const char* const ARGUMENTS_TYPE_STRING = "Arguments";
-static const char* const CONTEXT_TYPE_STRING = "Context";
-static const char* const MAP_TYPE_STRING = "Map";
-static const char* const OBJECT_TYPE_STRING = "Object";
-static const char* const HEAP_OBJECT_TYPE_STRING = "HeapObject";
-static const char* const JSOBJECT_TYPE_STRING = "JSObject";
-static const char* const SMI_TYPE_STRING = "Smi";
-static const char* const TAGGED_TYPE_STRING = "Tagged";
-static const char* const RAWPTR_TYPE_STRING = "RawPtr";
-static const char* const CONST_STRING_TYPE_STRING = "constexpr string";
-static const char* const STRING_TYPE_STRING = "String";
-static const char* const NUMBER_TYPE_STRING = "Number";
-static const char* const BUILTIN_POINTER_TYPE_STRING = "BuiltinPtr";
-static const char* const INTPTR_TYPE_STRING = "intptr";
-static const char* const UINTPTR_TYPE_STRING = "uintptr";
-static const char* const INT32_TYPE_STRING = "int32";
-static const char* const UINT32_TYPE_STRING = "uint32";
-static const char* const INT16_TYPE_STRING = "int16";
-static const char* const UINT16_TYPE_STRING = "uint16";
-static const char* const INT8_TYPE_STRING = "int8";
-static const char* const UINT8_TYPE_STRING = "uint8";
-static const char* const FLOAT64_TYPE_STRING = "float64";
-static const char* const CONST_INT31_TYPE_STRING = "constexpr int31";
-static const char* const CONST_INT32_TYPE_STRING = "constexpr int32";
-static const char* const CONST_FLOAT64_TYPE_STRING = "constexpr float64";
-
class AggregateType;
struct Identifier;
class Macro;
@@ -111,7 +81,7 @@ class TypeBase {
return static_cast<const x*>(declarable); \
}
-class Type : public TypeBase {
+class V8_EXPORT_PRIVATE Type : public TypeBase {
public:
virtual bool IsSubtypeOf(const Type* supertype) const;
@@ -132,7 +102,9 @@ class Type : public TypeBase {
}
virtual bool IsTransient() const { return false; }
virtual const Type* NonConstexprVersion() const { return this; }
+ virtual const Type* ConstexprVersion() const { return nullptr; }
base::Optional<const ClassType*> ClassSupertype() const;
+ virtual std::vector<std::string> GetRuntimeTypes() const { return {}; }
static const Type* CommonSupertype(const Type* a, const Type* b);
void AddAlias(std::string alias) const { aliases_.insert(std::move(alias)); }
@@ -175,7 +147,7 @@ struct Field {
// TODO(danno): This likely should be refactored, the handling of the types
// using the universal grab-bag utility with std::tie, as well as the
// reliance of string types is quite clunky.
- std::tuple<size_t, std::string, std::string> GetFieldSizeInformation() const;
+ std::tuple<size_t, std::string> GetFieldSizeInformation() const;
SourcePosition pos;
const AggregateType* aggregate;
@@ -184,6 +156,7 @@ struct Field {
size_t offset;
bool is_weak;
bool const_qualified;
+ bool generate_verify;
};
std::ostream& operator<<(std::ostream& os, const Field& name_and_type);
@@ -231,26 +204,44 @@ class AbstractType final : public Type {
}
std::string GetGeneratedTNodeTypeNameImpl() const override;
bool IsConstexpr() const override {
- return name().substr(0, strlen(CONSTEXPR_TYPE_PREFIX)) ==
- CONSTEXPR_TYPE_PREFIX;
+ bool is_constexpr = non_constexpr_version_ != nullptr;
+ DCHECK_EQ(is_constexpr, IsConstexprName(name()));
+ return is_constexpr;
}
+
const Type* NonConstexprVersion() const override {
- if (IsConstexpr()) return *non_constexpr_version_;
- return this;
+ if (non_constexpr_version_) return non_constexpr_version_;
+ if (!IsConstexpr()) return this;
+ return nullptr;
+ }
+
+ const AbstractType* ConstexprVersion() const override {
+ if (constexpr_version_) return constexpr_version_;
+ if (IsConstexpr()) return this;
+ return nullptr;
}
+ std::vector<std::string> GetRuntimeTypes() const override { return {name()}; }
+
private:
friend class TypeOracle;
AbstractType(const Type* parent, bool transient, const std::string& name,
const std::string& generated_type,
- base::Optional<const AbstractType*> non_constexpr_version)
+ const Type* non_constexpr_version)
: Type(Kind::kAbstractType, parent),
transient_(transient),
name_(name),
generated_type_(generated_type),
non_constexpr_version_(non_constexpr_version) {
- DCHECK_EQ(non_constexpr_version_.has_value(), IsConstexpr());
if (parent) DCHECK(parent->IsConstexpr() == IsConstexpr());
+ DCHECK_EQ(!IsConstexprName(name), non_constexpr_version == nullptr);
+ DCHECK_IMPLIES(IsConstexprName(name),
+ !non_constexpr_version->IsConstexpr());
+ }
+
+ void SetConstexprVersion(const AbstractType* type) const {
+ DCHECK_EQ(GetConstexprName(name()), type->name());
+ constexpr_version_ = type;
}
bool IsTransient() const override { return transient_; }
@@ -258,11 +249,12 @@ class AbstractType final : public Type {
bool transient_;
const std::string name_;
const std::string generated_type_;
- base::Optional<const AbstractType*> non_constexpr_version_;
+ const Type* non_constexpr_version_;
+ mutable const AbstractType* constexpr_version_ = nullptr;
};
// For now, builtin pointers are restricted to Torque-defined builtins.
-class BuiltinPointerType final : public Type {
+class V8_EXPORT_PRIVATE BuiltinPointerType final : public Type {
public:
DECLARE_TYPE_BOILERPLATE(BuiltinPointerType)
std::string ToExplicitString() const override;
@@ -348,7 +340,7 @@ struct TypeLess {
}
};
-class UnionType final : public Type {
+class V8_EXPORT_PRIVATE UnionType final : public Type {
public:
DECLARE_TYPE_BOILERPLATE(UnionType)
std::string ToExplicitString() const override;
@@ -423,6 +415,15 @@ class UnionType final : public Type {
return union_type ? UnionType(*union_type) : UnionType(t);
}
+ std::vector<std::string> GetRuntimeTypes() const override {
+ std::vector<std::string> result;
+ for (const Type* member : types_) {
+ std::vector<std::string> sub_result = member->GetRuntimeTypes();
+ result.insert(result.end(), sub_result.begin(), sub_result.end());
+ }
+ return result;
+ }
+
private:
explicit UnionType(const Type* t) : Type(Kind::kUnionType, t), types_({t}) {}
void RecomputeParent();
@@ -439,10 +440,15 @@ class AggregateType : public Type {
std::string GetGeneratedTypeNameImpl() const override { UNREACHABLE(); }
std::string GetGeneratedTNodeTypeNameImpl() const override { UNREACHABLE(); }
+ virtual void Finalize() const = 0;
+
virtual bool HasIndexedField() const { return false; }
void SetFields(std::vector<Field> fields) { fields_ = std::move(fields); }
- const std::vector<Field>& fields() const { return fields_; }
+ const std::vector<Field>& fields() const {
+ if (!is_finalized_) Finalize();
+ return fields_;
+ }
bool HasField(const std::string& name) const;
const Field& LookupField(const std::string& name) const;
const std::string& name() const { return name_; }
@@ -458,23 +464,35 @@ class AggregateType : public Type {
}
void RegisterMethod(Method* method) { methods_.push_back(method); }
- const std::vector<Method*>& Methods() const { return methods_; }
+ const std::vector<Method*>& Methods() const {
+ if (!is_finalized_) Finalize();
+ return methods_;
+ }
std::vector<Method*> Methods(const std::string& name) const;
- std::vector<const AggregateType*> GetHierarchy();
+ std::vector<const AggregateType*> GetHierarchy() const;
+ std::vector<std::string> GetRuntimeTypes() const override { return {name_}; }
protected:
AggregateType(Kind kind, const Type* parent, Namespace* nspace,
const std::string& name)
- : Type(kind, parent), namespace_(nspace), name_(name) {}
+ : Type(kind, parent),
+ is_finalized_(false),
+ namespace_(nspace),
+ name_(name) {}
- void CheckForDuplicateFields();
+ void CheckForDuplicateFields() const;
+ // Use this lookup if you do not want to trigger finalization on this type.
+ const Field& LookupFieldInternal(const std::string& name) const;
+
+ protected:
+ mutable bool is_finalized_;
+ std::vector<Field> fields_;
private:
Namespace* namespace_;
std::string name_;
std::vector<Method*> methods_;
- std::vector<Field> fields_;
};
class StructType final : public AggregateType {
@@ -486,22 +504,44 @@ class StructType final : public AggregateType {
private:
friend class TypeOracle;
StructType(Namespace* nspace, const std::string& name)
- : AggregateType(Kind::kStructType, nullptr, nspace, name) {
+ : AggregateType(Kind::kStructType, nullptr, nspace, name) {}
+
+ void Finalize() const override {
+ is_finalized_ = true;
CheckForDuplicateFields();
}
const std::string& GetStructName() const { return name(); }
};
+class TypeAlias;
+
class ClassType final : public AggregateType {
public:
+ static constexpr ClassFlags kInternalFlags = ClassFlag::kHasIndexedField;
+
DECLARE_TYPE_BOILERPLATE(ClassType)
std::string ToExplicitString() const override;
std::string GetGeneratedTypeNameImpl() const override;
std::string GetGeneratedTNodeTypeNameImpl() const override;
- bool IsExtern() const { return is_extern_; }
- bool ShouldGeneratePrint() const { return generate_print_; }
- bool IsTransient() const override { return transient_; }
+ bool IsExtern() const { return flags_ & ClassFlag::kExtern; }
+ bool ShouldGeneratePrint() const {
+ return flags_ & ClassFlag::kGeneratePrint;
+ }
+ bool ShouldGenerateVerify() const {
+ return flags_ & ClassFlag::kGenerateVerify;
+ }
+ bool IsTransient() const override { return flags_ & ClassFlag::kTransient; }
+ bool IsAbstract() const { return flags_ & ClassFlag::kAbstract; }
+ bool IsInstantiatedAbstractClass() const {
+ return flags_ & ClassFlag::kInstantiatedAbstractClass;
+ }
+ bool HasSameInstanceTypeAsParent() const {
+ return flags_ & ClassFlag::kHasSameInstanceTypeAsParent;
+ }
+ bool GenerateCppClassDefinitions() const {
+ return flags_ & ClassFlag::kGenerateCppClassDefinitions;
+ }
bool HasIndexedField() const override;
size_t size() const { return size_; }
const ClassType* GetSuperClass() const {
@@ -509,26 +549,28 @@ class ClassType final : public AggregateType {
return parent()->IsClassType() ? ClassType::DynamicCast(parent()) : nullptr;
}
void SetSize(size_t size) { size_ = size; }
+ void GenerateAccessors();
bool AllowInstantiation() const;
const Field& RegisterField(Field field) override {
if (field.index) {
- has_indexed_field_ = true;
+ flags_ |= ClassFlag::kHasIndexedField;
}
return AggregateType::RegisterField(field);
}
+ void Finalize() const override;
private:
friend class TypeOracle;
+ friend class TypeVisitor;
ClassType(const Type* parent, Namespace* nspace, const std::string& name,
- bool is_extern, bool generate_print, bool transient,
- const std::string& generates);
+ ClassFlags flags, const std::string& generates,
+ const ClassDeclaration* decl, const TypeAlias* alias);
- bool is_extern_;
- bool generate_print_;
- bool transient_;
size_t size_;
- bool has_indexed_field_;
+ mutable ClassFlags flags_;
const std::string generates_;
+ const ClassDeclaration* decl_;
+ const TypeAlias* alias_;
};
inline std::ostream& operator<<(std::ostream& os, const Type& t) {
@@ -564,7 +606,7 @@ class VisitResult {
base::Optional<StackRange> stack_range_;
};
-typedef std::map<std::string, VisitResult> NameValueMap;
+using NameValueMap = std::map<std::string, VisitResult>;
VisitResult ProjectStructField(VisitResult structure,
const std::string& fieldname);
@@ -574,7 +616,7 @@ class VisitResultVector : public std::vector<VisitResult> {
VisitResultVector() : std::vector<VisitResult>() {}
VisitResultVector(std::initializer_list<VisitResult> init)
: std::vector<VisitResult>(init) {}
- TypeVector GetTypeVector() const {
+ TypeVector ComputeTypeVector() const {
TypeVector result;
for (auto& visit_result : *this) {
result.push_back(visit_result.type());
@@ -585,21 +627,21 @@ class VisitResultVector : public std::vector<VisitResult> {
std::ostream& operator<<(std::ostream& os, const TypeVector& types);
-typedef std::vector<NameAndType> NameAndTypeVector;
+using NameAndTypeVector = std::vector<NameAndType>;
struct LabelDefinition {
std::string name;
NameAndTypeVector parameters;
};
-typedef std::vector<LabelDefinition> LabelDefinitionVector;
+using LabelDefinitionVector = std::vector<LabelDefinition>;
struct LabelDeclaration {
- std::string name;
+ Identifier* name;
TypeVector types;
};
-typedef std::vector<LabelDeclaration> LabelDeclarationVector;
+using LabelDeclarationVector = std::vector<LabelDeclaration>;
struct ParameterTypes {
TypeVector types;
@@ -610,7 +652,7 @@ std::ostream& operator<<(std::ostream& os, const ParameterTypes& parameters);
enum class ParameterMode { kProcessImplicit, kIgnoreImplicit };
-typedef std::vector<Identifier*> NameVector;
+using NameVector = std::vector<Identifier*>;
struct Signature {
Signature(NameVector n, base::Optional<std::string> arguments_variable,
diff --git a/deps/v8/src/torque/utils.cc b/deps/v8/src/torque/utils.cc
index 71d56d123a..3e2f715f0d 100644
--- a/deps/v8/src/torque/utils.cc
+++ b/deps/v8/src/torque/utils.cc
@@ -15,6 +15,8 @@ namespace v8 {
namespace internal {
namespace torque {
+DEFINE_CONTEXTUAL_VARIABLE(TorqueMessages)
+
std::string StringLiteralUnquote(const std::string& s) {
DCHECK(('"' == s.front() && '"' == s.back()) ||
('\'' == s.front() && '\'' == s.back()));
@@ -60,7 +62,6 @@ std::string StringLiteralQuote(const std::string& s) {
case '\t':
result << "\\t";
break;
- case '\'':
case '"':
case '\\':
result << "\\" << s[i];
@@ -73,7 +74,11 @@ std::string StringLiteralQuote(const std::string& s) {
return result.str();
}
+#ifdef V8_OS_WIN
+static const char kFileUriPrefix[] = "file:///";
+#else
static const char kFileUriPrefix[] = "file://";
+#endif
static const int kFileUriPrefixLength = sizeof(kFileUriPrefix) - 1;
static int HexCharToInt(unsigned char c) {
@@ -118,26 +123,27 @@ std::string CurrentPositionAsString() {
return PositionAsString(CurrentSourcePosition::Get());
}
-DEFINE_CONTEXTUAL_VARIABLE(LintErrorStatus)
+void NamingConventionError(const std::string& type, const std::string& name,
+ const std::string& convention) {
+ Lint(type, " \"", name, "\" does not follow \"", convention,
+ "\" naming convention.");
+}
-[[noreturn]] void ThrowTorqueError(const std::string& message,
- bool include_position) {
- TorqueError error(message);
- if (include_position) error.position = CurrentSourcePosition::Get();
- throw error;
+MessageBuilder::MessageBuilder(const std::string& message,
+ TorqueMessage::Kind kind) {
+ base::Optional<SourcePosition> position;
+ if (CurrentSourcePosition::HasScope()) {
+ position = CurrentSourcePosition::Get();
+ }
+ message_ = TorqueMessage{message, position, kind};
}
-void LintError(const std::string& error) {
- LintErrorStatus::SetLintError();
- std::cerr << CurrentPositionAsString() << ": Lint error: " << error << "\n";
+void MessageBuilder::Report() const {
+ TorqueMessages::Get().push_back(message_);
}
-void NamingConventionError(const std::string& type, const std::string& name,
- const std::string& convention) {
- std::stringstream sstream;
- sstream << type << " \"" << name << "\" doesn't follow \"" << convention
- << "\" naming convention.";
- LintError(sstream.str());
+[[noreturn]] void MessageBuilder::Throw() const {
+ throw TorqueAbortCompilation{};
}
namespace {
@@ -214,6 +220,11 @@ std::string CapifyStringWithUnderscores(const std::string& camellified_string) {
if (previousWasLower && isupper(current)) {
result += "_";
}
+ if (current == '.' || current == '-') {
+ result += "_";
+ previousWasLower = false;
+ continue;
+ }
result += toupper(current);
previousWasLower = (islower(current));
}
diff --git a/deps/v8/src/torque/utils.h b/deps/v8/src/torque/utils.h
index e90f6ec55a..10b91ce7d4 100644
--- a/deps/v8/src/torque/utils.h
+++ b/deps/v8/src/torque/utils.h
@@ -28,20 +28,60 @@ std::string StringLiteralQuote(const std::string& s);
V8_EXPORT_PRIVATE base::Optional<std::string> FileUriDecode(
const std::string& s);
-class LintErrorStatus : public ContextualClass<LintErrorStatus> {
+struct TorqueMessage {
+ enum class Kind { kError, kLint };
+
+ std::string message;
+ base::Optional<SourcePosition> position;
+ Kind kind;
+};
+
+DECLARE_CONTEXTUAL_VARIABLE(TorqueMessages, std::vector<TorqueMessage>);
+
+class V8_EXPORT_PRIVATE MessageBuilder {
public:
- LintErrorStatus() : has_lint_errors_(false) {}
+ MessageBuilder(const std::string& message, TorqueMessage::Kind kind);
+
+ MessageBuilder& Position(SourcePosition position) {
+ message_.position = position;
+ return *this;
+ }
- static bool HasLintErrors() { return Get().has_lint_errors_; }
- static void SetLintError() { Get().has_lint_errors_ = true; }
+ [[noreturn]] void Throw() const;
+
+ ~MessageBuilder() {
+ // This will also get called in case the error is thrown.
+ Report();
+ }
private:
- bool has_lint_errors_;
+ MessageBuilder() = delete;
+ void Report() const;
+
+ TorqueMessage message_;
};
-void LintError(const std::string& error);
+// Used for throwing exceptions. Retrieve TorqueMessage from the contextual
+// for specific error information.
+struct TorqueAbortCompilation {};
+
+template <class... Args>
+static MessageBuilder Message(TorqueMessage::Kind kind, Args&&... args) {
+ std::stringstream stream;
+ USE((stream << std::forward<Args>(args))...);
+ return MessageBuilder(stream.str(), kind);
+}
+
+template <class... Args>
+MessageBuilder Error(Args&&... args) {
+ return Message(TorqueMessage::Kind::kError, std::forward<Args>(args)...);
+}
+template <class... Args>
+MessageBuilder Lint(Args&&... args) {
+ return Message(TorqueMessage::Kind::kLint, std::forward<Args>(args)...);
+}
-// Prints a LintError with the format "{type} '{name}' doesn't follow
+// Report a LintError with the format "{type} '{name}' doesn't follow
// '{convention}' naming convention".
void NamingConventionError(const std::string& type, const std::string& name,
const std::string& convention);
@@ -52,26 +92,9 @@ bool IsSnakeCase(const std::string& s);
bool IsValidNamespaceConstName(const std::string& s);
bool IsValidTypeName(const std::string& s);
-struct TorqueError : public std::exception {
- explicit TorqueError(const std::string& message) : message(message) {}
-
- std::string message;
- base::Optional<SourcePosition> position;
-};
-
-[[noreturn]] void ThrowTorqueError(const std::string& error,
- bool include_position);
template <class... Args>
[[noreturn]] void ReportError(Args&&... args) {
- std::stringstream s;
- USE((s << std::forward<Args>(args))...);
- ThrowTorqueError(s.str(), true);
-}
-template <class... Args>
-[[noreturn]] void ReportErrorWithoutPosition(Args&&... args) {
- std::stringstream s;
- USE((s << std::forward<Args>(args))...);
- ThrowTorqueError(s.str(), false);
+ Error(std::forward<Args>(args)...).Throw();
}
std::string CapifyStringWithUnderscores(const std::string& camellified_string);
@@ -208,7 +231,9 @@ class Stack {
void Poke(BottomOffset from_bottom, T x) {
elements_.at(from_bottom.offset) = std::move(x);
}
- void Push(T x) { elements_.push_back(std::move(x)); }
+ void Push(T x) {
+ elements_.push_back(std::move(x));
+ }
StackRange TopRange(size_t slot_count) const {
DCHECK_GE(Size(), slot_count);
return StackRange{AboveTop() - slot_count, AboveTop()};
diff --git a/deps/v8/src/tracing/OWNERS b/deps/v8/src/tracing/OWNERS
index 87c96616bc..6afd4d0fee 100644
--- a/deps/v8/src/tracing/OWNERS
+++ b/deps/v8/src/tracing/OWNERS
@@ -1 +1,2 @@
alph@chromium.org
+petermarshall@chromium.org
diff --git a/deps/v8/src/tracing/trace-event.cc b/deps/v8/src/tracing/trace-event.cc
index 41c59269e8..6c631b1f3c 100644
--- a/deps/v8/src/tracing/trace-event.cc
+++ b/deps/v8/src/tracing/trace-event.cc
@@ -6,10 +6,10 @@
#include <string.h>
-#include "src/counters.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
+#include "src/init/v8.h"
+#include "src/logging/counters.h"
#include "src/tracing/traced-value.h"
-#include "src/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/tracing/traced-value.cc b/deps/v8/src/tracing/traced-value.cc
index 67279c2cdd..9011b51f48 100644
--- a/deps/v8/src/tracing/traced-value.cc
+++ b/deps/v8/src/tracing/traced-value.cc
@@ -5,8 +5,8 @@
#include "src/tracing/traced-value.h"
#include "src/base/platform/platform.h"
-#include "src/conversions.h"
-#include "src/vector.h"
+#include "src/numbers/conversions.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace tracing {
diff --git a/deps/v8/src/tracing/tracing-category-observer.cc b/deps/v8/src/tracing/tracing-category-observer.cc
index d7d3a69c77..a44074d52d 100644
--- a/deps/v8/src/tracing/tracing-category-observer.cc
+++ b/deps/v8/src/tracing/tracing-category-observer.cc
@@ -5,9 +5,9 @@
#include "src/tracing/tracing-category-observer.h"
#include "src/base/atomic-utils.h"
-#include "src/counters.h"
+#include "src/init/v8.h"
+#include "src/logging/counters.h"
#include "src/tracing/trace-event.h"
-#include "src/v8.h"
namespace v8 {
namespace tracing {
@@ -40,6 +40,11 @@ void TracingCategoryObserver::OnTraceEnabled() {
i::TracingFlags::runtime_stats.fetch_or(ENABLED_BY_SAMPLING,
std::memory_order_relaxed);
}
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
+ &enabled);
+ if (enabled) {
+ i::TracingFlags::gc.fetch_or(ENABLED_BY_TRACING, std::memory_order_relaxed);
+ }
TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
&enabled);
if (enabled) {
@@ -58,6 +63,8 @@ void TracingCategoryObserver::OnTraceDisabled() {
i::TracingFlags::runtime_stats.fetch_and(
~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING), std::memory_order_relaxed);
+ i::TracingFlags::gc.fetch_and(~ENABLED_BY_TRACING, std::memory_order_relaxed);
+
i::TracingFlags::gc_stats.fetch_and(~ENABLED_BY_TRACING,
std::memory_order_relaxed);
diff --git a/deps/v8/src/trap-handler/DEPS b/deps/v8/src/trap-handler/DEPS
index 7241cf55c5..061634d51b 100644
--- a/deps/v8/src/trap-handler/DEPS
+++ b/deps/v8/src/trap-handler/DEPS
@@ -11,8 +11,8 @@ include_rules = [
specific_include_rules = {
"trap-handler.h": [
"+src/base/build_config.h",
- "+src/globals.h",
- "+src/flags.h",
+ "+src/common/globals.h",
+ "+src/flags/flags.h",
],
"handler-inside-posix.h": [
# To access V8_OS_LINUX. This file is already included in build_config.h.
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
index 50fd4de439..51f5fe8250 100644
--- a/deps/v8/src/trap-handler/trap-handler.h
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -9,8 +9,8 @@
#include <stdlib.h>
#include "src/base/build_config.h"
-#include "src/flags.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/unicode-cache.h b/deps/v8/src/unicode-cache.h
deleted file mode 100644
index b6f6a85c6c..0000000000
--- a/deps/v8/src/unicode-cache.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UNICODE_CACHE_H_
-#define V8_UNICODE_CACHE_H_
-
-#include "src/base/macros.h"
-#include "src/unicode-decoder.h"
-#include "src/unicode.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-// Caching predicates used by scanners.
-class UnicodeCache {
- public:
- UnicodeCache() = default;
- typedef unibrow::Utf8Decoder<512> Utf8Decoder;
-
- StaticResource<Utf8Decoder>* utf8_decoder() { return &utf8_decoder_; }
-
- private:
- StaticResource<Utf8Decoder> utf8_decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
-};
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_UNICODE_CACHE_H_
diff --git a/deps/v8/src/unicode-decoder.cc b/deps/v8/src/unicode-decoder.cc
deleted file mode 100644
index 6074bae81d..0000000000
--- a/deps/v8/src/unicode-decoder.cc
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-#include "src/unicode-inl.h"
-#include "src/unicode-decoder.h"
-#include <stdio.h>
-#include <stdlib.h>
-
-namespace unibrow {
-
-uint16_t Utf8Iterator::operator*() {
- if (V8_UNLIKELY(char_ > Utf16::kMaxNonSurrogateCharCode)) {
- return trailing_ ? Utf16::TrailSurrogate(char_)
- : Utf16::LeadSurrogate(char_);
- }
-
- DCHECK_EQ(trailing_, false);
- return char_;
-}
-
-Utf8Iterator& Utf8Iterator::operator++() {
- if (V8_UNLIKELY(this->Done())) {
- char_ = Utf8::kBufferEmpty;
- return *this;
- }
-
- if (V8_UNLIKELY(char_ > Utf16::kMaxNonSurrogateCharCode && !trailing_)) {
- trailing_ = true;
- return *this;
- }
-
- trailing_ = false;
- offset_ = cursor_;
-
- char_ =
- Utf8::ValueOf(reinterpret_cast<const uint8_t*>(stream_.begin()) + cursor_,
- stream_.length() - cursor_, &cursor_);
- return *this;
-}
-
-Utf8Iterator Utf8Iterator::operator++(int) {
- Utf8Iterator old(*this);
- ++*this;
- return old;
-}
-
-bool Utf8Iterator::Done() {
- return offset_ == static_cast<size_t>(stream_.length());
-}
-
-void Utf8DecoderBase::Reset(uint16_t* buffer, size_t buffer_length,
- const v8::internal::Vector<const char>& stream) {
- size_t utf16_length = 0;
-
- Utf8Iterator it = Utf8Iterator(stream);
- // Loop until stream is read, writing to buffer as long as buffer has space.
- while (utf16_length < buffer_length && !it.Done()) {
- *buffer++ = *it;
- ++it;
- utf16_length++;
- }
- bytes_read_ = it.Offset();
- trailing_ = it.Trailing();
- chars_written_ = utf16_length;
-
- // Now that writing to buffer is done, we just need to calculate utf16_length
- while (!it.Done()) {
- ++it;
- utf16_length++;
- }
- utf16_length_ = utf16_length;
-}
-
-void Utf8DecoderBase::WriteUtf16Slow(
- uint16_t* data, size_t length,
- const v8::internal::Vector<const char>& stream, size_t offset,
- bool trailing) {
- Utf8Iterator it = Utf8Iterator(stream, offset, trailing);
- while (!it.Done()) {
- DCHECK_GT(length--, 0);
- *data++ = *it;
- ++it;
- }
-}
-
-} // namespace unibrow
diff --git a/deps/v8/src/unicode-decoder.h b/deps/v8/src/unicode-decoder.h
deleted file mode 100644
index 2bd4032c56..0000000000
--- a/deps/v8/src/unicode-decoder.h
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UNICODE_DECODER_H_
-#define V8_UNICODE_DECODER_H_
-
-#include <sys/types.h>
-#include <algorithm>
-#include "src/globals.h"
-#include "src/memcopy.h"
-#include "src/unicode.h"
-#include "src/vector.h"
-
-namespace unibrow {
-
-class Utf8Iterator {
- public:
- explicit Utf8Iterator(const v8::internal::Vector<const char>& stream)
- : Utf8Iterator(stream, 0, false) {}
- Utf8Iterator(const v8::internal::Vector<const char>& stream, size_t offset,
- bool trailing)
- : stream_(stream),
- cursor_(offset),
- offset_(0),
- char_(0),
- trailing_(false) {
- DCHECK_LE(offset, stream.length());
- // Read the first char, setting offset_ to offset in the process.
- ++*this;
-
- // This must be set after reading the first char, since the offset marks
- // the start of the octet sequence that the trailing char is part of.
- trailing_ = trailing;
- if (trailing) {
- DCHECK_GT(char_, Utf16::kMaxNonSurrogateCharCode);
- }
- }
-
- uint16_t operator*();
- Utf8Iterator& operator++();
- Utf8Iterator operator++(int);
- bool Done();
- bool Trailing() { return trailing_; }
- size_t Offset() { return offset_; }
-
- private:
- const v8::internal::Vector<const char>& stream_;
- size_t cursor_;
- size_t offset_;
- uint32_t char_;
- bool trailing_;
-};
-
-class V8_EXPORT_PRIVATE Utf8DecoderBase {
- public:
- // Initialization done in subclass.
- inline Utf8DecoderBase();
- inline Utf8DecoderBase(uint16_t* buffer, size_t buffer_length,
- const v8::internal::Vector<const char>& stream);
- inline size_t Utf16Length() const { return utf16_length_; }
-
- protected:
- // This reads all characters and sets the utf16_length_.
- // The first buffer_length utf16 chars are cached in the buffer.
- void Reset(uint16_t* buffer, size_t buffer_length,
- const v8::internal::Vector<const char>& vector);
- static void WriteUtf16Slow(uint16_t* data, size_t length,
- const v8::internal::Vector<const char>& stream,
- size_t offset, bool trailing);
-
- size_t bytes_read_;
- size_t chars_written_;
- size_t utf16_length_;
- bool trailing_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(Utf8DecoderBase);
-};
-
-template <size_t kBufferSize>
-class Utf8Decoder : public Utf8DecoderBase {
- public:
- inline Utf8Decoder() = default;
- explicit inline Utf8Decoder(const v8::internal::Vector<const char>& stream);
- inline void Reset(const v8::internal::Vector<const char>& stream);
- inline size_t WriteUtf16(
- uint16_t* data, size_t length,
- const v8::internal::Vector<const char>& stream) const;
-
- private:
- uint16_t buffer_[kBufferSize];
-};
-
-Utf8DecoderBase::Utf8DecoderBase()
- : bytes_read_(0), chars_written_(0), utf16_length_(0), trailing_(false) {}
-
-Utf8DecoderBase::Utf8DecoderBase(
- uint16_t* buffer, size_t buffer_length,
- const v8::internal::Vector<const char>& stream) {
- Reset(buffer, buffer_length, stream);
-}
-
-template <size_t kBufferSize>
-Utf8Decoder<kBufferSize>::Utf8Decoder(
- const v8::internal::Vector<const char>& stream)
- : Utf8DecoderBase(buffer_, kBufferSize, stream) {}
-
-template <size_t kBufferSize>
-void Utf8Decoder<kBufferSize>::Reset(
- const v8::internal::Vector<const char>& stream) {
- Utf8DecoderBase::Reset(buffer_, kBufferSize, stream);
-}
-
-template <size_t kBufferSize>
-size_t Utf8Decoder<kBufferSize>::WriteUtf16(
- uint16_t* data, size_t data_length,
- const v8::internal::Vector<const char>& stream) const {
- DCHECK_GT(data_length, 0);
- data_length = std::min(data_length, utf16_length_);
-
- // memcpy everything in buffer.
- size_t memcpy_length = std::min(data_length, chars_written_);
- v8::internal::MemCopy(data, buffer_, memcpy_length * sizeof(uint16_t));
-
- if (data_length <= chars_written_) return data_length;
-
- // Copy the rest the slow way.
- WriteUtf16Slow(data + chars_written_, data_length - chars_written_, stream,
- bytes_read_, trailing_);
- return data_length;
-}
-
-class Latin1 {
- public:
- static const unsigned kMaxChar = 0xff;
- // Convert the character to Latin-1 case equivalent if possible.
- static inline uint16_t TryConvertToLatin1(uint16_t);
-};
-
-uint16_t Latin1::TryConvertToLatin1(uint16_t c) {
- switch (c) {
- // This are equivalent characters in unicode.
- case 0x39c:
- case 0x3bc:
- return 0xb5;
- // This is an uppercase of a Latin-1 character
- // outside of Latin-1.
- case 0x178:
- return 0xff;
- }
- return c;
-}
-
-
-} // namespace unibrow
-
-#endif // V8_UNICODE_DECODER_H_
diff --git a/deps/v8/src/utils/OWNERS b/deps/v8/src/utils/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/src/utils/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/utils/address-map.cc
index 112e3134b8..ab32ba418a 100644
--- a/deps/v8/src/address-map.cc
+++ b/deps/v8/src/utils/address-map.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/address-map.h"
+#include "src/utils/address-map.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/heap-object-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -18,7 +18,7 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
for (RootIndex root_index = RootIndex::kFirstStrongOrReadOnlyRoot;
root_index <= RootIndex::kLastStrongOrReadOnlyRoot; ++root_index) {
Object root = isolate->root(root_index);
- if (!root->IsHeapObject()) continue;
+ if (!root.IsHeapObject()) continue;
// Omit root entries that can be written after initialization. They must
// not be referenced through the root list in the snapshot.
// Since we map the raw address of an root item to its root list index, the
diff --git a/deps/v8/src/address-map.h b/deps/v8/src/utils/address-map.h
index 72ba97a4ec..d4de76ca8c 100644
--- a/deps/v8/src/address-map.h
+++ b/deps/v8/src/utils/address-map.h
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ADDRESS_MAP_H_
-#define V8_ADDRESS_MAP_H_
+#ifndef V8_UTILS_ADDRESS_MAP_H_
+#define V8_UTILS_ADDRESS_MAP_H_
#include "include/v8.h"
-#include "src/assert-scope.h"
#include "src/base/hashmap.h"
+#include "src/common/assert-scope.h"
#include "src/objects/heap-object.h"
-#include "src/roots.h"
+#include "src/roots/roots.h"
namespace v8 {
namespace internal {
@@ -20,7 +20,7 @@ class PointerToIndexHashMap
base::KeyEqualityMatcher<intptr_t>,
base::DefaultAllocationPolicy> {
public:
- typedef base::TemplateHashMapEntry<uintptr_t, uint32_t> Entry;
+ using Entry = base::TemplateHashMapEntry<uintptr_t, uint32_t>;
inline void Set(Type value, uint32_t index) {
uintptr_t key = Key(value);
@@ -77,4 +77,4 @@ class RootIndexMap {
} // namespace internal
} // namespace v8
-#endif // V8_ADDRESS_MAP_H_
+#endif // V8_UTILS_ADDRESS_MAP_H_
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/utils/allocation.cc
index 09d07920b3..27db17a479 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/allocation.h"
+#include "src/utils/allocation.h"
#include <stdlib.h> // For free, malloc.
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
-#include "src/base/lsan-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
-#include "src/memcopy.h"
-#include "src/v8.h"
-#include "src/vector.h"
+#include "src/init/v8.h"
+#include "src/sanitizer/lsan-page-allocator.h"
+#include "src/utils/memcopy.h"
+#include "src/utils/vector.h"
#if V8_LIBC_BIONIC
#include <malloc.h> // NOLINT
@@ -92,20 +92,18 @@ void* Malloced::New(size_t size) {
return result;
}
-void Malloced::Delete(void* p) {
- free(p);
-}
+void Malloced::Delete(void* p) { free(p); }
char* StrDup(const char* str) {
- int length = StrLength(str);
+ size_t length = strlen(str);
char* result = NewArray<char>(length + 1);
MemCopy(result, str, length);
result[length] = '\0';
return result;
}
-char* StrNDup(const char* str, int n) {
- int length = StrLength(str);
+char* StrNDup(const char* str, size_t n) {
+ size_t length = strlen(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
MemCopy(result, str, length);
@@ -138,7 +136,7 @@ void* AlignedAlloc(size_t size, size_t alignment) {
return result;
}
-void AlignedFree(void *ptr) {
+void AlignedFree(void* ptr) {
#if V8_OS_WIN
_aligned_free(ptr);
#elif V8_LIBC_BIONIC
@@ -283,12 +281,5 @@ void VirtualMemory::Free() {
RoundUp(region.size(), page_allocator->AllocatePageSize())));
}
-void VirtualMemory::TakeControl(VirtualMemory* from) {
- DCHECK(!IsReserved());
- page_allocator_ = from->page_allocator_;
- region_ = from->region_;
- from->Reset();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/utils/allocation.h
index af1d459654..fa3e6f3d7d 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ALLOCATION_H_
-#define V8_ALLOCATION_H_
+#ifndef V8_UTILS_ALLOCATION_H_
+#define V8_UTILS_ALLOCATION_H_
#include "include/v8-platform.h"
#include "src/base/address-region.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
-#include "src/globals.h"
-#include "src/v8.h"
+#include "src/common/globals.h"
+#include "src/init/v8.h"
namespace v8 {
namespace internal {
@@ -30,7 +30,7 @@ class Isolate;
class V8_EXPORT_PRIVATE Malloced {
public:
void* operator new(size_t size) { return New(size); }
- void operator delete(void* p) { Delete(p); }
+ void operator delete(void* p) { Delete(p); }
static void* New(size_t size);
static void Delete(void* p);
@@ -60,14 +60,12 @@ void DeleteArray(T* array) {
delete[] array;
}
-
// The normal strdup functions use malloc. These versions of StrDup
// and StrNDup uses new and calls the FatalProcessOutOfMemory handler
// if allocation fails.
V8_EXPORT_PRIVATE char* StrDup(const char* str);
char* StrNDup(const char* str, int n);
-
// Allocation policy for allocating in the C free store using malloc
// and free. Used as the default policy for lists.
class FreeStoreAllocationPolicy {
@@ -81,7 +79,7 @@ class FreeStoreAllocationPolicy {
void* AllocWithRetry(size_t size);
V8_EXPORT_PRIVATE void* AlignedAlloc(size_t size, size_t alignment);
-void AlignedFree(void *ptr);
+void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
@@ -186,11 +184,14 @@ class V8_EXPORT_PRIVATE VirtualMemory final {
~VirtualMemory();
// Move constructor.
- VirtualMemory(VirtualMemory&& other) V8_NOEXCEPT { TakeControl(&other); }
+ VirtualMemory(VirtualMemory&& other) V8_NOEXCEPT { *this = std::move(other); }
// Move assignment operator.
VirtualMemory& operator=(VirtualMemory&& other) V8_NOEXCEPT {
- TakeControl(&other);
+ DCHECK(!IsReserved());
+ page_allocator_ = other.page_allocator_;
+ region_ = other.region_;
+ other.Reset();
return *this;
}
@@ -202,7 +203,7 @@ class V8_EXPORT_PRIVATE VirtualMemory final {
v8::PageAllocator* page_allocator() { return page_allocator_; }
- const base::AddressRegion& region() const { return region_; }
+ base::AddressRegion region() const { return region_; }
// Returns the start address of the reserved memory.
// If the memory was reserved with an alignment, this address is not
@@ -235,10 +236,6 @@ class V8_EXPORT_PRIVATE VirtualMemory final {
// Frees all memory.
void Free();
- // Assign control of the reserved region to a different VirtualMemory object.
- // The old object is no longer functional (IsReserved() returns false).
- void TakeControl(VirtualMemory* from);
-
bool InVM(Address address, size_t size) {
return region_.contains(address, size);
}
@@ -254,4 +251,4 @@ class V8_EXPORT_PRIVATE VirtualMemory final {
} // namespace internal
} // namespace v8
-#endif // V8_ALLOCATION_H_
+#endif // V8_UTILS_ALLOCATION_H_
diff --git a/deps/v8/src/bit-vector.cc b/deps/v8/src/utils/bit-vector.cc
index 1da110b342..8d4b097bfd 100644
--- a/deps/v8/src/bit-vector.cc
+++ b/deps/v8/src/utils/bit-vector.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/bit-vector.h"
+#include "src/utils/bit-vector.h"
#include "src/base/bits.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -25,7 +25,6 @@ void BitVector::Print() {
}
#endif
-
void BitVector::Iterator::Advance() {
current_++;
uintptr_t val = current_value_;
@@ -41,7 +40,6 @@ void BitVector::Iterator::Advance() {
current_value_ = val >> 1;
}
-
int BitVector::Count() const {
if (data_length_ == 0) {
return base::bits::CountPopulation(data_.inline_);
diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/utils/bit-vector.h
index 1465e84983..5bb5ef8595 100644
--- a/deps/v8/src/bit-vector.h
+++ b/deps/v8/src/utils/bit-vector.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BIT_VECTOR_H_
-#define V8_BIT_VECTOR_H_
+#ifndef V8_UTILS_BIT_VECTOR_H_
+#define V8_UTILS_BIT_VECTOR_H_
-#include "src/allocation.h"
+#include "src/utils/allocation.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -369,4 +369,4 @@ class GrowableBitVector {
} // namespace internal
} // namespace v8
-#endif // V8_BIT_VECTOR_H_
+#endif // V8_UTILS_BIT_VECTOR_H_
diff --git a/deps/v8/src/boxed-float.h b/deps/v8/src/utils/boxed-float.h
index cdcc8fdad7..93f6baee4a 100644
--- a/deps/v8/src/boxed-float.h
+++ b/deps/v8/src/utils/boxed-float.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_BOXED_FLOAT_H_
-#define V8_BOXED_FLOAT_H_
+#ifndef V8_UTILS_BOXED_FLOAT_H_
+#define V8_UTILS_BOXED_FLOAT_H_
#include <cmath>
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -95,4 +95,4 @@ ASSERT_TRIVIALLY_COPYABLE(Float64);
} // namespace internal
} // namespace v8
-#endif // V8_BOXED_FLOAT_H_
+#endif // V8_UTILS_BOXED_FLOAT_H_
diff --git a/deps/v8/src/detachable-vector.cc b/deps/v8/src/utils/detachable-vector.cc
index 68e1ec8f17..3ca3d18a51 100644
--- a/deps/v8/src/detachable-vector.cc
+++ b/deps/v8/src/utils/detachable-vector.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/detachable-vector.h"
+#include "src/utils/detachable-vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/detachable-vector.h b/deps/v8/src/utils/detachable-vector.h
index 1e9ac98df2..232e89f183 100644
--- a/deps/v8/src/detachable-vector.h
+++ b/deps/v8/src/utils/detachable-vector.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_DETACHABLE_VECTOR_H_
-#define V8_DETACHABLE_VECTOR_H_
+#ifndef V8_UTILS_DETACHABLE_VECTOR_H_
+#define V8_UTILS_DETACHABLE_VECTOR_H_
#include <stddef.h>
@@ -101,4 +101,4 @@ class DetachableVector : public DetachableVectorBase {
} // namespace internal
} // namespace v8
-#endif // V8_DETACHABLE_VECTOR_H_
+#endif // V8_UTILS_DETACHABLE_VECTOR_H_
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/utils/identity-map.cc
index bd0377aff0..b9afe6b55f 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/utils/identity-map.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/identity-map.h"
+#include "src/utils/identity-map.h"
#include "src/base/functional.h"
#include "src/heap/heap.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/identity-map.h b/deps/v8/src/utils/identity-map.h
index fe254becee..b0dcb1c991 100644
--- a/deps/v8/src/identity-map.h
+++ b/deps/v8/src/utils/identity-map.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_IDENTITY_MAP_H_
-#define V8_IDENTITY_MAP_H_
+#ifndef V8_UTILS_IDENTITY_MAP_H_
+#define V8_UTILS_IDENTITY_MAP_H_
#include "src/base/functional.h"
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/objects/heap-object.h"
namespace v8 {
@@ -29,7 +29,7 @@ class V8_EXPORT_PRIVATE IdentityMapBase {
// within the {keys_} array in order to simulate a moving GC.
friend class IdentityMapTester;
- typedef void** RawEntry;
+ using RawEntry = void**;
explicit IdentityMapBase(Heap* heap)
: heap_(heap),
@@ -193,4 +193,4 @@ class IdentityMap : public IdentityMapBase {
} // namespace internal
} // namespace v8
-#endif // V8_IDENTITY_MAP_H_
+#endif // V8_UTILS_IDENTITY_MAP_H_
diff --git a/deps/v8/src/locked-queue-inl.h b/deps/v8/src/utils/locked-queue-inl.h
index bbc800c4a9..9416dd7d37 100644
--- a/deps/v8/src/locked-queue-inl.h
+++ b/deps/v8/src/utils/locked-queue-inl.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOCKED_QUEUE_INL_H_
-#define V8_LOCKED_QUEUE_INL_H_
+#ifndef V8_UTILS_LOCKED_QUEUE_INL_H_
+#define V8_UTILS_LOCKED_QUEUE_INL_H_
#include "src/base/atomic-utils.h"
-#include "src/locked-queue.h"
+#include "src/utils/locked-queue.h"
namespace v8 {
namespace internal {
@@ -18,7 +18,6 @@ struct LockedQueue<Record>::Node : Malloced {
base::AtomicValue<Node*> next;
};
-
template <typename Record>
inline LockedQueue<Record>::LockedQueue() {
head_ = new Node();
@@ -26,7 +25,6 @@ inline LockedQueue<Record>::LockedQueue() {
tail_ = head_;
}
-
template <typename Record>
inline LockedQueue<Record>::~LockedQueue() {
// Destroy all remaining nodes. Note that we do not destroy the actual values.
@@ -39,7 +37,6 @@ inline LockedQueue<Record>::~LockedQueue() {
}
}
-
template <typename Record>
inline void LockedQueue<Record>::Enqueue(const Record& record) {
Node* n = new Node();
@@ -52,7 +49,6 @@ inline void LockedQueue<Record>::Enqueue(const Record& record) {
}
}
-
template <typename Record>
inline bool LockedQueue<Record>::Dequeue(Record* record) {
Node* old_head = nullptr;
@@ -68,14 +64,12 @@ inline bool LockedQueue<Record>::Dequeue(Record* record) {
return true;
}
-
template <typename Record>
inline bool LockedQueue<Record>::IsEmpty() const {
base::MutexGuard guard(&head_mutex_);
return head_->next.Value() == nullptr;
}
-
template <typename Record>
inline bool LockedQueue<Record>::Peek(Record* record) const {
base::MutexGuard guard(&head_mutex_);
@@ -88,4 +82,4 @@ inline bool LockedQueue<Record>::Peek(Record* record) const {
} // namespace internal
} // namespace v8
-#endif // V8_LOCKED_QUEUE_INL_H_
+#endif // V8_UTILS_LOCKED_QUEUE_INL_H_
diff --git a/deps/v8/src/locked-queue.h b/deps/v8/src/utils/locked-queue.h
index 5bcab57b0c..4dd6488184 100644
--- a/deps/v8/src/locked-queue.h
+++ b/deps/v8/src/utils/locked-queue.h
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_LOCKED_QUEUE_H_
-#define V8_LOCKED_QUEUE_H_
+#ifndef V8_UTILS_LOCKED_QUEUE_H_
+#define V8_UTILS_LOCKED_QUEUE_H_
-#include "src/allocation.h"
#include "src/base/platform/platform.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
@@ -40,4 +40,4 @@ class LockedQueue final {
} // namespace internal
} // namespace v8
-#endif // V8_LOCKED_QUEUE_H_
+#endif // V8_UTILS_LOCKED_QUEUE_H_
diff --git a/deps/v8/src/memcopy.cc b/deps/v8/src/utils/memcopy.cc
index 2185faea29..1cac2189d0 100644
--- a/deps/v8/src/memcopy.cc
+++ b/deps/v8/src/utils/memcopy.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/memcopy.h"
+#include "src/utils/memcopy.h"
-#include "src/snapshot/embedded-data.h"
+#include "src/snapshot/embedded/embedded-data.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/memcopy.h b/deps/v8/src/utils/memcopy.h
index 79e6e3c955..c1a0afbcb4 100644
--- a/deps/v8/src/memcopy.h
+++ b/deps/v8/src/utils/memcopy.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MEMCOPY_H_
-#define V8_MEMCOPY_H_
+#ifndef V8_UTILS_MEMCOPY_H_
+#define V8_UTILS_MEMCOPY_H_
#include <stdint.h>
#include <stdlib.h>
@@ -15,7 +15,7 @@
namespace v8 {
namespace internal {
-typedef uintptr_t Address;
+using Address = uintptr_t;
// ----------------------------------------------------------------------------
// Generated memcpy/memmove for ia32, arm, and mips.
@@ -29,7 +29,7 @@ const size_t kMinComplexMemCopy = 64;
// Copy memory area. No restrictions.
V8_EXPORT_PRIVATE void MemMove(void* dest, const void* src, size_t size);
-typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
+using MemMoveFunction = void (*)(void* dest, const void* src, size_t size);
// Keep the distinction of "move" vs. "copy" for the benefit of other
// architectures.
@@ -37,8 +37,8 @@ V8_INLINE void MemCopy(void* dest, const void* src, size_t size) {
MemMove(dest, src, size);
}
#elif defined(V8_HOST_ARCH_ARM)
-typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
- size_t size);
+using MemCopyUint8Function = void (*)(uint8_t* dest, const uint8_t* src,
+ size_t size);
V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
size_t chars) {
@@ -55,8 +55,8 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src,
memmove(dest, src, size);
}
-typedef void (*MemCopyUint16Uint8Function)(uint16_t* dest, const uint8_t* src,
- size_t size);
+using MemCopyUint16Uint8Function = void (*)(uint16_t* dest, const uint8_t* src,
+ size_t size);
extern MemCopyUint16Uint8Function memcopy_uint16_uint8_function;
void MemCopyUint16Uint8Wrapper(uint16_t* dest, const uint8_t* src,
size_t chars);
@@ -67,8 +67,8 @@ V8_INLINE void MemCopyUint16Uint8(uint16_t* dest, const uint8_t* src,
(*memcopy_uint16_uint8_function)(dest, src, size);
}
#elif defined(V8_HOST_ARCH_MIPS)
-typedef void (*MemCopyUint8Function)(uint8_t* dest, const uint8_t* src,
- size_t size);
+using MemCopyUint8Function = void (*)(uint8_t* dest, const uint8_t* src,
+ size_t size);
V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function;
V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src,
size_t chars) {
@@ -510,4 +510,4 @@ void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, size_t chars) {
} // namespace internal
} // namespace v8
-#endif // V8_MEMCOPY_H_
+#endif // V8_UTILS_MEMCOPY_H_
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/utils/ostreams.cc
index 1f2d53c239..c43f01be56 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/utils/ostreams.cc
@@ -2,8 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ostreams.h"
-#include "src/objects.h"
+#include "src/utils/ostreams.h"
+
+#include <cinttypes>
+
+#include "src/objects/objects.h"
#include "src/objects/string.h"
#if V8_OS_WIN
@@ -70,12 +73,10 @@ int OFStreamBase::sync() {
return 0;
}
-
OFStreamBase::int_type OFStreamBase::overflow(int_type c) {
return (c != EOF) ? std::fputc(c, f_) : c;
}
-
std::streamsize OFStreamBase::xsputn(const char* s, std::streamsize n) {
return static_cast<std::streamsize>(
std::fwrite(s, 1, static_cast<size_t>(n), f_));
@@ -120,7 +121,6 @@ bool IsPrint(uint16_t c) { return 0x20 <= c && c <= 0x7E; }
bool IsSpace(uint16_t c) { return (0x9 <= c && c <= 0xD) || c == 0x20; }
bool IsOK(uint16_t c) { return (IsPrint(c) || IsSpace(c)) && c != '\\'; }
-
std::ostream& PrintUC16(std::ostream& os, uint16_t c, bool (*pred)(uint16_t)) {
char buf[10];
const char* format = pred(c) ? "%c" : (c <= 0xFF) ? "\\x%02x" : "\\u%04x";
@@ -148,12 +148,10 @@ std::ostream& PrintUC32(std::ostream& os, int32_t c, bool (*pred)(uint16_t)) {
} // namespace
-
std::ostream& operator<<(std::ostream& os, const AsReversiblyEscapedUC16& c) {
return PrintUC16(os, c.value, IsOK);
}
-
std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c) {
if (c.value == '\n') return os << "\\n";
if (c.value == '\r') return os << "\\r";
@@ -162,12 +160,10 @@ std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c) {
return PrintUC16ForJSON(os, c.value, IsOK);
}
-
std::ostream& operator<<(std::ostream& os, const AsUC16& c) {
return PrintUC16(os, c.value, IsPrint);
}
-
std::ostream& operator<<(std::ostream& os, const AsUC32& c) {
return PrintUC32(os, c.value, IsPrint);
}
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/utils/ostreams.h
index 5f77e0d83e..e87675d541 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/utils/ostreams.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_OSTREAMS_H_
-#define V8_OSTREAMS_H_
+#ifndef V8_UTILS_OSTREAMS_H_
+#define V8_UTILS_OSTREAMS_H_
#include <cstddef>
#include <cstdio>
@@ -13,7 +13,7 @@
#include "include/v8config.h"
#include "src/base/macros.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
@@ -95,13 +95,11 @@ struct AsUC16 {
uint16_t value;
};
-
struct AsUC32 {
explicit AsUC32(int32_t v) : value(v) {}
int32_t value;
};
-
struct AsReversiblyEscapedUC16 {
explicit AsReversiblyEscapedUC16(uint16_t v) : value(v) {}
uint16_t value;
@@ -193,4 +191,4 @@ std::ostream& operator<<(std::ostream& os, const PrintIteratorRange<T>& range) {
} // namespace internal
} // namespace v8
-#endif // V8_OSTREAMS_H_
+#endif // V8_UTILS_OSTREAMS_H_
diff --git a/deps/v8/src/pointer-with-payload.h b/deps/v8/src/utils/pointer-with-payload.h
index 06af29e907..1c140ff684 100644
--- a/deps/v8/src/pointer-with-payload.h
+++ b/deps/v8/src/utils/pointer-with-payload.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_POINTER_WITH_PAYLOAD_H_
-#define V8_POINTER_WITH_PAYLOAD_H_
+#ifndef V8_UTILS_POINTER_WITH_PAYLOAD_H_
+#define V8_UTILS_POINTER_WITH_PAYLOAD_H_
#include <cstdint>
#include <type_traits>
@@ -101,4 +101,4 @@ class PointerWithPayload {
} // namespace internal
} // namespace v8
-#endif // V8_POINTER_WITH_PAYLOAD_H_
+#endif // V8_UTILS_POINTER_WITH_PAYLOAD_H_
diff --git a/deps/v8/src/splay-tree-inl.h b/deps/v8/src/utils/splay-tree-inl.h
index d83b15faa5..bda453fd8f 100644
--- a/deps/v8/src/splay-tree-inl.h
+++ b/deps/v8/src/utils/splay-tree-inl.h
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SPLAY_TREE_INL_H_
-#define V8_SPLAY_TREE_INL_H_
+#ifndef V8_UTILS_SPLAY_TREE_INL_H_
+#define V8_UTILS_SPLAY_TREE_INL_H_
#include <vector>
-#include "src/splay-tree.h"
+#include "src/utils/splay-tree.h"
namespace v8 {
namespace internal {
@@ -289,4 +289,4 @@ void SplayTree<Config, Allocator>::ForEachNode(Callback* callback) {
} // namespace internal
} // namespace v8
-#endif // V8_SPLAY_TREE_INL_H_
+#endif // V8_UTILS_SPLAY_TREE_INL_H_
diff --git a/deps/v8/src/splay-tree.h b/deps/v8/src/utils/splay-tree.h
index 454b409fbb..47633f39db 100644
--- a/deps/v8/src/splay-tree.h
+++ b/deps/v8/src/utils/splay-tree.h
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SPLAY_TREE_H_
-#define V8_SPLAY_TREE_H_
+#ifndef V8_UTILS_SPLAY_TREE_H_
+#define V8_UTILS_SPLAY_TREE_H_
-#include "src/allocation.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
-
// A splay tree. The config type parameter encapsulates the different
// configurations of a concrete splay tree:
//
@@ -30,8 +29,8 @@ namespace internal {
template <typename Config, class AllocationPolicy>
class SplayTree {
public:
- typedef typename Config::Key Key;
- typedef typename Config::Value Value;
+ using Key = typename Config::Key;
+ using Value = typename Config::Value;
class Locator;
@@ -130,7 +129,7 @@ class SplayTree {
// exposing the node.
class Locator {
public:
- explicit Locator(Node* node) : node_(node) { }
+ explicit Locator(Node* node) : node_(node) {}
Locator() : node_(nullptr) {}
const Key& key() { return node_->key_; }
Value& value() { return node_->value_; }
@@ -162,11 +161,8 @@ class SplayTree {
template <class Callback>
class NodeToPairAdaptor {
public:
- explicit NodeToPairAdaptor(Callback* callback)
- : callback_(callback) { }
- void Call(Node* node) {
- callback_->Call(node->key(), node->value());
- }
+ explicit NodeToPairAdaptor(Callback* callback) : callback_(callback) {}
+ void Call(Node* node) { callback_->Call(node->key(), node->value()); }
private:
Callback* callback_;
@@ -192,8 +188,7 @@ class SplayTree {
DISALLOW_COPY_AND_ASSIGN(SplayTree);
};
-
} // namespace internal
} // namespace v8
-#endif // V8_SPLAY_TREE_H_
+#endif // V8_UTILS_SPLAY_TREE_H_
diff --git a/deps/v8/src/utils-inl.h b/deps/v8/src/utils/utils-inl.h
index 3627327ff3..e88055023e 100644
--- a/deps/v8/src/utils-inl.h
+++ b/deps/v8/src/utils/utils-inl.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UTILS_INL_H_
-#define V8_UTILS_INL_H_
+#ifndef V8_UTILS_UTILS_INL_H_
+#define V8_UTILS_UTILS_INL_H_
-#include "src/utils.h"
+#include "src/utils/utils.h"
#include "include/v8-platform.h"
#include "src/base/platform/time.h"
-#include "src/char-predicates-inl.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/strings/char-predicates-inl.h"
namespace v8 {
namespace internal {
@@ -67,4 +67,4 @@ bool StringToArrayIndex(Stream* stream, uint32_t* index) {
} // namespace internal
} // namespace v8
-#endif // V8_UTILS_INL_H_
+#endif // V8_UTILS_UTILS_INL_H_
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils/utils.cc
index e8d84e12c8..f2283e91e2 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils/utils.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/utils.h"
+#include "src/utils/utils.h"
#include <stdarg.h>
#include <sys/stat.h>
@@ -11,7 +11,7 @@
#include "src/base/functional.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
-#include "src/memcopy.h"
+#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
@@ -21,27 +21,25 @@ SimpleStringBuilder::SimpleStringBuilder(int size) {
position_ = 0;
}
-
void SimpleStringBuilder::AddString(const char* s) {
- AddSubstring(s, StrLength(s));
+ size_t len = strlen(s);
+ DCHECK_GE(kMaxInt, len);
+ AddSubstring(s, static_cast<int>(len));
}
-
void SimpleStringBuilder::AddSubstring(const char* s, int n) {
DCHECK(!is_finalized() && position_ + n <= buffer_.length());
- DCHECK(static_cast<size_t>(n) <= strlen(s));
+ DCHECK_LE(n, strlen(s));
MemCopy(&buffer_[position_], s, n * kCharSize);
position_ += n;
}
-
void SimpleStringBuilder::AddPadding(char c, int count) {
for (int i = 0; i < count; i++) {
AddCharacter(c);
}
}
-
void SimpleStringBuilder::AddDecimalInteger(int32_t value) {
uint32_t number = static_cast<uint32_t>(value);
if (value < 0) {
@@ -59,7 +57,6 @@ void SimpleStringBuilder::AddDecimalInteger(int32_t value) {
}
}
-
char* SimpleStringBuilder::Finalize() {
DCHECK(!is_finalized() && position_ <= buffer_.length());
// If there is no space for null termination, overwrite last character.
@@ -71,28 +68,25 @@ char* SimpleStringBuilder::Finalize() {
buffer_[position_] = '\0';
// Make sure nobody managed to add a 0-character to the
// buffer while building the string.
- DCHECK(strlen(buffer_.start()) == static_cast<size_t>(position_));
+ DCHECK(strlen(buffer_.begin()) == static_cast<size_t>(position_));
position_ = -1;
DCHECK(is_finalized());
- return buffer_.start();
+ return buffer_.begin();
}
std::ostream& operator<<(std::ostream& os, FeedbackSlot slot) {
return os << "#" << slot.id_;
}
-
size_t hash_value(BailoutId id) {
base::hash<int> h;
return h(id.id_);
}
-
std::ostream& operator<<(std::ostream& os, BailoutId id) {
return os << id.id_;
}
-
void PrintF(const char* format, ...) {
va_list arguments;
va_start(arguments, format);
@@ -100,7 +94,6 @@ void PrintF(const char* format, ...) {
va_end(arguments);
}
-
void PrintF(FILE* out, const char* format, ...) {
va_list arguments;
va_start(arguments, format);
@@ -108,7 +101,6 @@ void PrintF(FILE* out, const char* format, ...) {
va_end(arguments);
}
-
void PrintPID(const char* format, ...) {
base::OS::Print("[%d] ", base::OS::GetCurrentProcessId());
va_list arguments;
@@ -117,7 +109,6 @@ void PrintPID(const char* format, ...) {
va_end(arguments);
}
-
void PrintIsolate(void* isolate, const char* format, ...) {
base::OS::Print("[%d:%p] ", base::OS::GetCurrentProcessId(), isolate);
va_list arguments;
@@ -126,7 +117,6 @@ void PrintIsolate(void* isolate, const char* format, ...) {
va_end(arguments);
}
-
int SNPrintF(Vector<char> str, const char* format, ...) {
va_list args;
va_start(args, format);
@@ -135,26 +125,20 @@ int SNPrintF(Vector<char> str, const char* format, ...) {
return result;
}
-
int VSNPrintF(Vector<char> str, const char* format, va_list args) {
- return base::OS::VSNPrintF(str.start(), str.length(), format, args);
+ return base::OS::VSNPrintF(str.begin(), str.length(), format, args);
}
-
void StrNCpy(Vector<char> dest, const char* src, size_t n) {
- base::OS::StrNCpy(dest.start(), dest.length(), src, n);
-}
-
-
-void Flush(FILE* out) {
- fflush(out);
+ base::OS::StrNCpy(dest.begin(), dest.length(), src, n);
}
+void Flush(FILE* out) { fflush(out); }
char* ReadLine(const char* prompt) {
char* result = nullptr;
char line_buf[256];
- int offset = 0;
+ size_t offset = 0;
bool keep_going = true;
fprintf(stdout, "%s", prompt);
fflush(stdout);
@@ -166,10 +150,8 @@ char* ReadLine(const char* prompt) {
}
return nullptr;
}
- int len = StrLength(line_buf);
- if (len > 1 &&
- line_buf[len - 2] == '\\' &&
- line_buf[len - 1] == '\n') {
+ size_t len = strlen(line_buf);
+ if (len > 1 && line_buf[len - 2] == '\\' && line_buf[len - 1] == '\n') {
// When we read a line that ends with a "\" we remove the escape and
// append the remainder.
line_buf[len - 2] = '\n';
@@ -185,7 +167,7 @@ char* ReadLine(const char* prompt) {
result = NewArray<char>(len + 1);
} else {
// Allocate a new result with enough room for the new addition.
- int new_len = offset + len + 1;
+ size_t new_len = offset + len + 1;
char* new_result = NewArray<char>(new_len);
// Copy the existing input into the new array and set the new
// array as the result.
@@ -241,7 +223,7 @@ std::vector<char> ReadCharsFromFile(const char* filename, bool* exists,
}
std::string VectorToString(const std::vector<char>& chars) {
- if (chars.size() == 0) {
+ if (chars.empty()) {
return std::string();
}
return std::string(chars.begin(), chars.end());
@@ -259,7 +241,6 @@ std::string ReadFile(FILE* file, bool* exists, bool verbose) {
return VectorToString(result);
}
-
int WriteCharsToFile(const char* str, int size, FILE* f) {
int total = 0;
while (total < size) {
@@ -273,11 +254,7 @@ int WriteCharsToFile(const char* str, int size, FILE* f) {
return total;
}
-
-int AppendChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
+int AppendChars(const char* filename, const char* str, int size, bool verbose) {
FILE* f = base::OS::FOpen(filename, "ab");
if (f == nullptr) {
if (verbose) {
@@ -290,11 +267,7 @@ int AppendChars(const char* filename,
return written;
}
-
-int WriteChars(const char* filename,
- const char* str,
- int size,
- bool verbose) {
+int WriteChars(const char* filename, const char* str, int size, bool verbose) {
FILE* f = base::OS::FOpen(filename, "wb");
if (f == nullptr) {
if (verbose) {
@@ -307,17 +280,12 @@ int WriteChars(const char* filename,
return written;
}
-
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
+int WriteBytes(const char* filename, const byte* bytes, int size,
bool verbose) {
const char* str = reinterpret_cast<const char*>(bytes);
return WriteChars(filename, str, size, verbose);
}
-
-
void StringBuilder::AddFormatted(const char* format, ...) {
va_list arguments;
va_start(arguments, format);
@@ -325,7 +293,6 @@ void StringBuilder::AddFormatted(const char* format, ...) {
va_end(arguments);
}
-
void StringBuilder::AddFormattedList(const char* format, va_list list) {
DCHECK(!is_finalized() && position_ <= buffer_.length());
int n = VSNPrintF(buffer_ + position_, format, list);
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils/utils.h
index 9a77317a7c..17e07d3042 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils/utils.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UTILS_H_
-#define V8_UTILS_H_
+#ifndef V8_UTILS_UTILS_H_
+#define V8_UTILS_UTILS_H_
#include <limits.h>
#include <stdlib.h>
@@ -13,16 +13,16 @@
#include <type_traits>
#include "include/v8.h"
-#include "src/allocation.h"
#include "src/base/bits.h"
#include "src/base/compiler-specific.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/v8-fallthrough.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/third_party/siphash/halfsiphash.h"
-#include "src/vector.h"
+#include "src/utils/allocation.h"
+#include "src/utils/vector.h"
#if defined(V8_OS_AIX)
#include <fenv.h> // NOLINT(build/c++11)
@@ -60,7 +60,7 @@ inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) {
DCHECK(lower_limit <= higher_limit);
#endif
STATIC_ASSERT(sizeof(U) <= sizeof(T));
- typedef typename std::make_unsigned<T>::type unsigned_T;
+ using unsigned_T = typename std::make_unsigned<T>::type;
// Use static_cast to support enum classes.
return static_cast<unsigned_T>(static_cast<unsigned_T>(value) -
static_cast<unsigned_T>(lower_limit)) <=
@@ -112,7 +112,8 @@ inline int WhichPowerOf2(T x) {
CHECK_BIGGER(4)
#undef CHECK_BIGGER
switch (x) {
- default: UNREACHABLE();
+ default:
+ UNREACHABLE();
case 8:
bits++;
V8_FALLTHROUGH;
@@ -122,7 +123,8 @@ inline int WhichPowerOf2(T x) {
case 2:
bits++;
V8_FALLTHROUGH;
- case 1: break;
+ case 1:
+ break;
}
DCHECK_EQ(T{1} << bits, original_x);
return bits;
@@ -173,7 +175,8 @@ int Compare(const T& a, const T& b) {
// Compare function to compare the object pointer value of two
// handlified objects. The handles are passed as pointers to the
// handles.
-template<typename T> class Handle; // Forward declaration.
+template <typename T>
+class Handle; // Forward declaration.
template <typename T>
int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
return Compare<T*>(*(*a), *(*b));
@@ -185,7 +188,6 @@ constexpr T Max(T a, T b) {
return a < b ? b : a;
}
-
// Returns the minimum of the two parameters.
template <typename T>
constexpr T Min(T a, T b) {
@@ -217,7 +219,7 @@ typename std::make_unsigned<T>::type Abs(T a) {
// This is a branch-free implementation of the absolute value function and is
// described in Warren's "Hacker's Delight", chapter 2. It avoids undefined
// behavior with the arithmetic negation operation on signed values as well.
- typedef typename std::make_unsigned<T>::type unsignedT;
+ using unsignedT = typename std::make_unsigned<T>::type;
unsignedT x = static_cast<unsignedT>(a);
unsignedT y = static_cast<unsignedT>(a >> (sizeof(T) * 8 - 1));
return (x ^ y) - y;
@@ -301,23 +303,27 @@ T SaturateSub(T a, T b) {
// BitField is a help template for encoding and decode bitfield with
// unsigned content.
-template<class T, int shift, int size, class U>
-class BitFieldBase {
+template <class T, int shift, int size, class U = uint32_t>
+class BitField {
public:
- typedef T FieldType;
+ STATIC_ASSERT(std::is_unsigned<U>::value);
+ STATIC_ASSERT(shift < 8 * sizeof(U)); // Otherwise shifts by {shift} are UB.
+ STATIC_ASSERT(size < 8 * sizeof(U)); // Otherwise shifts by {size} are UB.
+ STATIC_ASSERT(shift + size <= 8 * sizeof(U));
+
+ using FieldType = T;
// A type U mask of bit field. To use all bits of a type U of x bits
// in a bitfield without compiler warnings we have to compute 2^x
// without using a shift count of x in the computation.
- static const U kOne = static_cast<U>(1U);
- static const U kMask = ((kOne << shift) << size) - (kOne << shift);
- static const U kShift = shift;
- static const U kSize = size;
- static const U kNext = kShift + kSize;
- static const U kNumValues = kOne << size;
+ static constexpr U kShift = shift;
+ static constexpr U kSize = size;
+ static constexpr U kMask = ((U{1} << kShift) << kSize) - (U{1} << kShift);
+ static constexpr U kNext = kShift + kSize;
+ static constexpr U kNumValues = U{1} << kSize;
// Value for the field with all bits set.
- static const T kMax = static_cast<T>(kNumValues - 1);
+ static constexpr T kMax = static_cast<T>(kNumValues - 1);
// Tells whether the provided value fits into the bit field.
static constexpr bool is_valid(T value) {
@@ -329,7 +335,7 @@ class BitFieldBase {
#if V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
DCHECK(is_valid(value));
#endif
- return static_cast<U>(value) << shift;
+ return static_cast<U>(value) << kShift;
}
// Returns a type U with the bit field value updated.
@@ -339,26 +345,18 @@ class BitFieldBase {
// Extracts the bit field from the value.
static constexpr T decode(U value) {
- return static_cast<T>((value & kMask) >> shift);
+ return static_cast<T>((value & kMask) >> kShift);
}
-
- STATIC_ASSERT((kNext - 1) / 8 < sizeof(U));
};
template <class T, int shift, int size>
-class BitField8 : public BitFieldBase<T, shift, size, uint8_t> {};
-
+using BitField8 = BitField<T, shift, size, uint8_t>;
template <class T, int shift, int size>
-class BitField16 : public BitFieldBase<T, shift, size, uint16_t> {};
+using BitField16 = BitField<T, shift, size, uint16_t>;
-
-template<class T, int shift, int size>
-class BitField : public BitFieldBase<T, shift, size, uint32_t> { };
-
-
-template<class T, int shift, int size>
-class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
+template <class T, int shift, int size>
+using BitField64 = BitField<T, shift, size, uint64_t>;
// Helper macros for defining a contiguous sequence of bit fields. Example:
// (backslashes at the ends of respective lines of this multi-line macro
@@ -383,10 +381,10 @@ class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
};
#define DEFINE_BIT_FIELD_TYPE(Name, Type, Size, RangesName) \
- typedef BitField<Type, RangesName::k##Name##Start, Size> Name;
+ using Name = BitField<Type, RangesName::k##Name##Start, Size>;
#define DEFINE_BIT_FIELD_64_TYPE(Name, Type, Size, RangesName) \
- typedef BitField64<Type, RangesName::k##Name##Start, Size> Name;
+ using Name = BitField64<Type, RangesName::k##Name##Start, Size>;
#define DEFINE_BIT_FIELDS(LIST_MACRO) \
DEFINE_BIT_RANGES(LIST_MACRO) \
@@ -401,7 +399,7 @@ class BitField64 : public BitFieldBase<T, shift, size, uint64_t> { };
// a variable number of items in an array.
//
// To encode boolean data in a smi array you would use:
-// typedef BitSetComputer<bool, 1, kSmiValueSize, uint32_t> BoolComputer;
+// using BoolComputer = BitSetComputer<bool, 1, kSmiValueSize, uint32_t>;
//
template <class T, int kBitsPerItem, int kBitsPerWord, class U>
class BitSetComputer {
@@ -526,22 +524,21 @@ static const int kInt64UpperHalfMemoryOffset = 0;
template <typename T>
class StaticResource {
public:
- StaticResource() : is_reserved_(false) {}
+ StaticResource() : is_reserved_(false) {}
private:
- template <typename S> friend class Access;
+ template <typename S>
+ friend class Access;
T instance_;
bool is_reserved_;
};
-
// Locally scoped access to a static resource.
template <typename T>
class Access {
public:
explicit Access(StaticResource<T>* resource)
- : resource_(resource)
- , instance_(&resource->instance_) {
+ : resource_(resource), instance_(&resource->instance_) {
DCHECK(!resource->is_reserved_);
resource->is_reserved_ = true;
}
@@ -552,8 +549,8 @@ class Access {
instance_ = nullptr;
}
- T* value() { return instance_; }
- T* operator -> () { return instance_; }
+ T* value() { return instance_; }
+ T* operator->() { return instance_; }
private:
StaticResource<T>* resource_;
@@ -561,7 +558,7 @@ class Access {
};
// A pointer that can only be set once and doesn't allow NULL values.
-template<typename T>
+template <typename T>
class SetOncePointer {
public:
SetOncePointer() = default;
@@ -578,9 +575,9 @@ class SetOncePointer {
pointer_ = value;
}
- T* operator=(T* value) {
+ SetOncePointer& operator=(T* value) {
set(value);
- return value;
+ return *this;
}
bool operator==(std::nullptr_t) const { return pointer_ == nullptr; }
@@ -616,8 +613,7 @@ inline int CompareChars(const lchar* lhs, const rchar* rhs, size_t chars) {
if (sizeof(lchar) == 1) {
if (sizeof(rchar) == 1) {
return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
- reinterpret_cast<const uint8_t*>(rhs),
- chars);
+ reinterpret_cast<const uint8_t*>(rhs), chars);
} else {
return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
reinterpret_cast<const uint16_t*>(rhs),
@@ -626,8 +622,7 @@ inline int CompareChars(const lchar* lhs, const rchar* rhs, size_t chars) {
} else {
if (sizeof(rchar) == 1) {
return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
- reinterpret_cast<const uint8_t*>(rhs),
- chars);
+ reinterpret_cast<const uint8_t*>(rhs), chars);
} else {
return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
reinterpret_cast<const uint16_t*>(rhs),
@@ -636,7 +631,6 @@ inline int CompareChars(const lchar* lhs, const rchar* rhs, size_t chars) {
}
}
-
// Calculate 10^exponent.
inline int TenToThe(int exponent) {
DCHECK_LE(exponent, 9);
@@ -646,11 +640,10 @@ inline int TenToThe(int exponent) {
return answer;
}
-
-template<typename ElementType, int NumElements>
+template <typename ElementType, int NumElements>
class EmbeddedContainer {
public:
- EmbeddedContainer() : elems_() { }
+ EmbeddedContainer() : elems_() {}
int length() const { return NumElements; }
const ElementType& operator[](int i) const {
@@ -666,8 +659,7 @@ class EmbeddedContainer {
ElementType elems_[NumElements];
};
-
-template<typename ElementType>
+template <typename ElementType>
class EmbeddedContainer<ElementType, 0> {
public:
int length() const { return 0; }
@@ -683,7 +675,6 @@ class EmbeddedContainer<ElementType, 0> {
}
};
-
// Helper class for building result strings in a character buffer. The
// purpose of the class is to use safe operations that checks the
// buffer bounds on all operations in debug mode.
@@ -696,9 +687,11 @@ class SimpleStringBuilder {
explicit SimpleStringBuilder(int size);
SimpleStringBuilder(char* buffer, int size)
- : buffer_(buffer, size), position_(0) { }
+ : buffer_(buffer, size), position_(0) {}
- ~SimpleStringBuilder() { if (!is_finalized()) Finalize(); }
+ ~SimpleStringBuilder() {
+ if (!is_finalized()) Finalize();
+ }
int size() const { return buffer_.length(); }
@@ -784,24 +777,49 @@ inline T truncate_to_intn(T x, unsigned n) {
return (x & ((static_cast<T>(1) << n) - 1));
}
-#define INT_1_TO_63_LIST(V) \
-V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
-V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
-V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
-V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \
-V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
-V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
-V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
-V(57) V(58) V(59) V(60) V(61) V(62) V(63)
-
-#define DECLARE_IS_INT_N(N) \
-inline bool is_int##N(int64_t x) { return is_intn(x, N); }
-#define DECLARE_IS_UINT_N(N) \
-template <class T> \
-inline bool is_uint##N(T x) { return is_uintn(x, N); }
-#define DECLARE_TRUNCATE_TO_INT_N(N) \
-template <class T> \
-inline T truncate_to_int##N(T x) { return truncate_to_intn(x, N); }
+#define INT_1_TO_63_LIST(V) \
+ V(1) \
+ V(2) \
+ V(3) \
+ V(4) \
+ V(5) \
+ V(6) \
+ V(7) \
+ V(8) \
+ V(9) \
+ V(10) \
+ V(11) \
+ V(12) \
+ V(13) \
+ V(14) \
+ V(15) \
+ V(16) \
+ V(17) \
+ V(18) \
+ V(19) \
+ V(20) \
+ V(21) \
+ V(22) \
+ V(23) \
+ V(24) \
+ V(25) \
+ V(26) V(27) V(28) V(29) V(30) V(31) V(32) V(33) V(34) V(35) V(36) V(37) \
+ V(38) V(39) V(40) V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) \
+ V(50) V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
+ V(61) V(62) V(63)
+
+#define DECLARE_IS_INT_N(N) \
+ inline bool is_int##N(int64_t x) { return is_intn(x, N); }
+#define DECLARE_IS_UINT_N(N) \
+ template <class T> \
+ inline bool is_uint##N(T x) { \
+ return is_uintn(x, N); \
+ }
+#define DECLARE_TRUNCATE_TO_INT_N(N) \
+ template <class T> \
+ inline T truncate_to_int##N(T x) { \
+ return truncate_to_intn(x, N); \
+ }
INT_1_TO_63_LIST(DECLARE_IS_INT_N)
INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
@@ -853,7 +871,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, FeedbackSlot);
class BailoutId {
public:
- explicit BailoutId(int id) : id_(id) { }
+ explicit BailoutId(int id) : id_(id) {}
int ToInt() const { return id_; }
static BailoutId None() { return BailoutId(kNoneId); }
@@ -910,7 +928,6 @@ class BailoutId {
int id_;
};
-
// ----------------------------------------------------------------------------
// I/O support.
@@ -936,47 +953,33 @@ void StrNCpy(Vector<char> dest, const char* src, size_t n);
// Our version of fflush.
void Flush(FILE* out);
-inline void Flush() {
- Flush(stdout);
-}
-
+inline void Flush() { Flush(stdout); }
// Read a line of characters after printing the prompt to stdout. The resulting
// char* needs to be disposed off with DeleteArray by the caller.
char* ReadLine(const char* prompt);
-
// Append size chars from str to the file given by filename.
// The file is overwritten. Returns the number of chars written.
-int AppendChars(const char* filename,
- const char* str,
- int size,
+int AppendChars(const char* filename, const char* str, int size,
bool verbose = true);
-
// Write size chars from str to the file given by filename.
// The file is overwritten. Returns the number of chars written.
-int WriteChars(const char* filename,
- const char* str,
- int size,
+int WriteChars(const char* filename, const char* str, int size,
bool verbose = true);
-
// Write size bytes to the file given by filename.
// The file is overwritten. Returns the number of bytes written.
-int WriteBytes(const char* filename,
- const byte* bytes,
- int size,
+int WriteBytes(const char* filename, const byte* bytes, int size,
bool verbose = true);
-
// Write the C code
// const char* <varname> = "<str>";
// const int <varname>_len = <len>;
// to the file given by filename. Only the first len chars are written.
-int WriteAsCFile(const char* filename, const char* varname,
- const char* str, int size, bool verbose = true);
-
+int WriteAsCFile(const char* filename, const char* varname, const char* str,
+ int size, bool verbose = true);
// Simple support to read a file into std::string.
// On return, *exits tells whether the file existed.
@@ -987,8 +990,8 @@ V8_EXPORT_PRIVATE std::string ReadFile(FILE* file, bool* exists,
class StringBuilder : public SimpleStringBuilder {
public:
- explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
- StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) { }
+ explicit StringBuilder(int size) : SimpleStringBuilder(size) {}
+ StringBuilder(char* buffer, int size) : SimpleStringBuilder(buffer, size) {}
// Add formatted contents to the builder just like printf().
void PRINTF_FORMAT(2, 3) AddFormatted(const char* format, ...);
@@ -1000,7 +1003,6 @@ class StringBuilder : public SimpleStringBuilder {
DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
};
-
bool DoubleToBoolean(double d);
template <typename Char>
@@ -1017,36 +1019,34 @@ V8_EXPORT_PRIVATE V8_NOINLINE uintptr_t GetCurrentStackPosition();
static inline uint16_t ByteReverse16(uint16_t value) {
#if V8_HAS_BUILTIN_BSWAP16
- return __builtin_bswap16(value);
+ return __builtin_bswap16(value);
#else
- return value << 8 | (value >> 8 & 0x00FF);
+ return value << 8 | (value >> 8 & 0x00FF);
#endif
}
static inline uint32_t ByteReverse32(uint32_t value) {
#if V8_HAS_BUILTIN_BSWAP32
- return __builtin_bswap32(value);
+ return __builtin_bswap32(value);
#else
- return value << 24 |
- ((value << 8) & 0x00FF0000) |
- ((value >> 8) & 0x0000FF00) |
- ((value >> 24) & 0x00000FF);
+ return value << 24 | ((value << 8) & 0x00FF0000) |
+ ((value >> 8) & 0x0000FF00) | ((value >> 24) & 0x00000FF);
#endif
}
static inline uint64_t ByteReverse64(uint64_t value) {
#if V8_HAS_BUILTIN_BSWAP64
- return __builtin_bswap64(value);
+ return __builtin_bswap64(value);
#else
- size_t bits_of_v = sizeof(value) * kBitsPerByte;
- return value << (bits_of_v - 8) |
- ((value << (bits_of_v - 24)) & 0x00FF000000000000) |
- ((value << (bits_of_v - 40)) & 0x0000FF0000000000) |
- ((value << (bits_of_v - 56)) & 0x000000FF00000000) |
- ((value >> (bits_of_v - 56)) & 0x00000000FF000000) |
- ((value >> (bits_of_v - 40)) & 0x0000000000FF0000) |
- ((value >> (bits_of_v - 24)) & 0x000000000000FF00) |
- ((value >> (bits_of_v - 8)) & 0x00000000000000FF);
+ size_t bits_of_v = sizeof(value) * kBitsPerByte;
+ return value << (bits_of_v - 8) |
+ ((value << (bits_of_v - 24)) & 0x00FF000000000000) |
+ ((value << (bits_of_v - 40)) & 0x0000FF0000000000) |
+ ((value << (bits_of_v - 56)) & 0x000000FF00000000) |
+ ((value >> (bits_of_v - 56)) & 0x00000000FF000000) |
+ ((value >> (bits_of_v - 40)) & 0x0000000000FF0000) |
+ ((value >> (bits_of_v - 24)) & 0x000000000000FF00) |
+ ((value >> (bits_of_v - 8)) & 0x00000000000000FF);
#endif
}
@@ -1082,4 +1082,4 @@ V8_INLINE void ZapCode(Address addr, size_t size_in_bytes) {
} // namespace internal
} // namespace v8
-#endif // V8_UTILS_H_
+#endif // V8_UTILS_UTILS_H_
diff --git a/deps/v8/src/v8dll-main.cc b/deps/v8/src/utils/v8dll-main.cc
index 6250b3e341..255f0d8dbf 100644
--- a/deps/v8/src/v8dll-main.cc
+++ b/deps/v8/src/utils/v8dll-main.cc
@@ -4,16 +4,14 @@
// The GYP based build ends up defining USING_V8_SHARED when compiling this
// file.
-#undef USING_V8_SHARED
+#undef USING_V8_SHARED // NOLINT
#include "include/v8.h"
#if V8_OS_WIN
#include "src/base/win32-headers.h"
extern "C" {
-BOOL WINAPI DllMain(HANDLE hinstDLL,
- DWORD dwReason,
- LPVOID lpvReserved) {
+BOOL WINAPI DllMain(HANDLE hinstDLL, DWORD dwReason, LPVOID lpvReserved) {
// Do nothing.
return TRUE;
}
diff --git a/deps/v8/src/vector.h b/deps/v8/src/utils/vector.h
index 03438959fc..5b6c878e34 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/utils/vector.h
@@ -2,34 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_VECTOR_H_
-#define V8_VECTOR_H_
+#ifndef V8_UTILS_VECTOR_H_
+#define V8_UTILS_VECTOR_H_
#include <algorithm>
#include <cstring>
#include <iterator>
-#include "src/allocation.h"
-#include "src/checks.h"
-#include "src/globals.h"
+#include "src/common/checks.h"
+#include "src/common/globals.h"
+#include "src/utils/allocation.h"
namespace v8 {
namespace internal {
-
template <typename T>
class Vector {
public:
constexpr Vector() : start_(nullptr), length_(0) {}
- Vector(T* data, size_t length) : start_(data), length_(length) {
+ constexpr Vector(T* data, size_t length) : start_(data), length_(length) {
+#ifdef V8_CAN_HAVE_DCHECK_IN_CONSTEXPR
DCHECK(length == 0 || data != nullptr);
+#endif
}
- template <int N>
- explicit constexpr Vector(T (&arr)[N]) : start_(arr), length_(N) {}
-
- static Vector<T> New(int length) {
+ static Vector<T> New(size_t length) {
return Vector<T>(NewArray<T>(length), length);
}
@@ -38,12 +36,13 @@ class Vector {
Vector<T> SubVector(size_t from, size_t to) const {
DCHECK_LE(from, to);
DCHECK_LE(to, length_);
- return Vector<T>(start() + from, to - from);
+ return Vector<T>(begin() + from, to - from);
}
- // Returns the length of the vector.
+ // Returns the length of the vector. Only use this if you really need an
+ // integer return value. Use {size()} otherwise.
int length() const {
- DCHECK(length_ <= static_cast<size_t>(std::numeric_limits<int>::max()));
+ DCHECK_GE(std::numeric_limits<int>::max(), length_);
return static_cast<int>(length_);
}
@@ -53,9 +52,6 @@ class Vector {
// Returns whether or not the vector is empty.
constexpr bool empty() const { return length_ == 0; }
- // Returns the pointer to the start of the data in the vector.
- constexpr T* start() const { return start_; }
-
// Access individual vector elements - checks bounds in debug mode.
T& operator[](size_t index) const {
DCHECK_LT(index, length_);
@@ -71,9 +67,11 @@ class Vector {
return start_[length_ - 1];
}
- typedef T* iterator;
- constexpr iterator begin() const { return start_; }
- constexpr iterator end() const { return start_ + length_; }
+ // Returns a pointer to the start of the data in the vector.
+ constexpr T* begin() const { return start_; }
+
+ // Returns a pointer past the end of the data in the vector.
+ constexpr T* end() const { return start_ + length_; }
// Returns a clone of this vector with a new backing store.
Vector<T> Clone() const {
@@ -82,34 +80,6 @@ class Vector {
return Vector<T>(result, length_);
}
- template <typename CompareFunction>
- void Sort(CompareFunction cmp, size_t s, size_t l) {
- std::sort(start() + s, start() + s + l, RawComparer<CompareFunction>(cmp));
- }
-
- template <typename CompareFunction>
- void Sort(CompareFunction cmp) {
- std::sort(start(), start() + length(), RawComparer<CompareFunction>(cmp));
- }
-
- void Sort() {
- std::sort(start(), start() + length());
- }
-
- template <typename CompareFunction>
- void StableSort(CompareFunction cmp, size_t s, size_t l) {
- std::stable_sort(start() + s, start() + s + l,
- RawComparer<CompareFunction>(cmp));
- }
-
- template <typename CompareFunction>
- void StableSort(CompareFunction cmp) {
- std::stable_sort(start(), start() + length(),
- RawComparer<CompareFunction>(cmp));
- }
-
- void StableSort() { std::stable_sort(start(), start() + length()); }
-
void Truncate(size_t length) {
DCHECK(length <= length_);
length_ = length;
@@ -142,7 +112,7 @@ class Vector {
template <typename S>
static constexpr Vector<T> cast(Vector<S> input) {
- return Vector<T>(reinterpret_cast<T*>(input.start()),
+ return Vector<T>(reinterpret_cast<T*>(input.begin()),
input.length() * sizeof(S) / sizeof(T));
}
@@ -160,28 +130,14 @@ class Vector {
private:
T* start_;
size_t length_;
-
- template <typename CookedComparer>
- class RawComparer {
- public:
- explicit RawComparer(CookedComparer cmp) : cmp_(cmp) {}
- bool operator()(const T& a, const T& b) {
- return cmp_(&a, &b) < 0;
- }
-
- private:
- CookedComparer cmp_;
- };
};
-
template <typename T>
class ScopedVector : public Vector<T> {
public:
- explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
- ~ScopedVector() {
- DeleteArray(this->start());
- }
+ explicit ScopedVector(size_t length)
+ : Vector<T>(NewArray<T>(length), length) {}
+ ~ScopedVector() { DeleteArray(this->begin()); }
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
@@ -219,6 +175,15 @@ class OwnedVector {
return data_.get();
}
+ constexpr T* begin() const { return start(); }
+ constexpr T* end() const { return start() + size(); }
+
+ // Access individual vector elements - checks bounds in debug mode.
+ T& operator[](size_t index) const {
+ DCHECK_LT(index, length_);
+ return data_[index];
+ }
+
// Returns a {Vector<T>} view of the data in this vector.
Vector<T> as_vector() const { return Vector<T>(start(), size()); }
@@ -260,41 +225,34 @@ class OwnedVector {
size_t length_ = 0;
};
-inline int StrLength(const char* string) {
- size_t length = strlen(string);
- DCHECK(length == static_cast<size_t>(static_cast<int>(length)));
- return static_cast<int>(length);
-}
-
template <size_t N>
constexpr Vector<const uint8_t> StaticCharVector(const char (&array)[N]) {
return Vector<const uint8_t>::cast(Vector<const char>(array, N - 1));
}
inline Vector<const char> CStrVector(const char* data) {
- return Vector<const char>(data, StrLength(data));
+ return Vector<const char>(data, strlen(data));
}
-inline Vector<const uint8_t> OneByteVector(const char* data, int length) {
+inline Vector<const uint8_t> OneByteVector(const char* data, size_t length) {
return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length);
}
inline Vector<const uint8_t> OneByteVector(const char* data) {
- return OneByteVector(data, StrLength(data));
+ return OneByteVector(data, strlen(data));
}
inline Vector<char> MutableCStrVector(char* data) {
- return Vector<char>(data, StrLength(data));
+ return Vector<char>(data, strlen(data));
}
-inline Vector<char> MutableCStrVector(char* data, int max) {
- int length = StrLength(data);
- return Vector<char>(data, (length < max) ? length : max);
+inline Vector<char> MutableCStrVector(char* data, size_t max) {
+ return Vector<char>(data, strnlen(data, max));
}
-template <typename T, int N>
+template <typename T, size_t N>
inline constexpr Vector<T> ArrayVector(T (&arr)[N]) {
- return Vector<T>(arr);
+ return Vector<T>{arr, N};
}
// Construct a Vector from a start pointer and a size.
@@ -310,38 +268,22 @@ inline constexpr auto VectorOf(Container&& c)
return VectorOf(c.data(), c.size());
}
-template <typename T, int kSize>
+template <typename T, size_t kSize>
class EmbeddedVector : public Vector<T> {
public:
EmbeddedVector() : Vector<T>(buffer_, kSize) {}
- explicit EmbeddedVector(T initial_value) : Vector<T>(buffer_, kSize) {
- for (int i = 0; i < kSize; ++i) {
- buffer_[i] = initial_value;
- }
+ explicit EmbeddedVector(const T& initial_value) : Vector<T>(buffer_, kSize) {
+ std::fill_n(buffer_, kSize, initial_value);
}
-#if !defined(V8_OS_WIN)
- // When copying, make underlying Vector to reference our buffer.
- EmbeddedVector(const EmbeddedVector& rhs) V8_NOEXCEPT : Vector<T>(rhs) {
- MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- this->set_start(buffer_);
- }
-
- EmbeddedVector& operator=(const EmbeddedVector& rhs) V8_NOEXCEPT {
- if (this == &rhs) return *this;
- Vector<T>::operator=(rhs);
- MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
- this->set_start(buffer_);
- return *this;
- }
-#endif
-
private:
T buffer_[kSize];
+
+ DISALLOW_COPY_AND_ASSIGN(EmbeddedVector);
};
} // namespace internal
} // namespace v8
-#endif // V8_VECTOR_H_
+#endif // V8_UTILS_VECTOR_H_
diff --git a/deps/v8/src/version.cc b/deps/v8/src/utils/version.cc
index be8c85fdb9..4b7653051d 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/utils/version.cc
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/version.h"
+#include "src/utils/version.h"
#include "include/v8-version-string.h"
#include "include/v8-version.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
// Define SONAME to have the build system put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
// number. This define is mainly used by the build system script.
-#define SONAME ""
+#define SONAME ""
namespace v8 {
namespace internal {
@@ -37,7 +37,6 @@ void Version::GetString(Vector<char> str) {
}
}
-
// Calculate the SONAME for the V8 shared library.
void Version::GetSONAME(Vector<char> str) {
if (soname_ == nullptr || *soname_ == '\0') {
@@ -56,5 +55,7 @@ void Version::GetSONAME(Vector<char> str) {
}
}
+#undef SONAME
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/version.h b/deps/v8/src/utils/version.h
index 9b3ad548ac..1479636d8e 100644
--- a/deps/v8/src/version.h
+++ b/deps/v8/src/utils/version.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_VERSION_H_
-#define V8_VERSION_H_
+#ifndef V8_UTILS_VERSION_H_
+#define V8_UTILS_VERSION_H_
#include <cstdint>
@@ -57,4 +57,4 @@ class V8_EXPORT Version {
} // namespace internal
} // namespace v8
-#endif // V8_VERSION_H_
+#endif // V8_UTILS_VERSION_H_
diff --git a/deps/v8/src/wasm/DEPS b/deps/v8/src/wasm/DEPS
index 8024d9097a..eb0780f5e3 100644
--- a/deps/v8/src/wasm/DEPS
+++ b/deps/v8/src/wasm/DEPS
@@ -2,6 +2,8 @@ specific_include_rules = {
"c-api\.cc": [
"+include/libplatform/libplatform.h",
"+third_party/wasm-api/wasm.h",
+ ],
+ "c-api\.h": [
"+third_party/wasm-api/wasm.hh",
],
}
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 21ec7fdeff..b2cd566873 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -153,8 +153,9 @@ inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst,
}
(assm->*op)(scratch, lhs.low_gp(), Operand(imm), SetCC, al);
// Top half of the immediate sign extended, either 0 or -1.
- (assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(),
- Operand(imm < 0 ? -1 : 0), LeaveCC, al);
+ int32_t sign_extend = imm < 0 ? -1 : 0;
+ (assm->*op_with_carry)(dst.high_gp(), lhs.high_gp(), Operand(sign_extend),
+ LeaveCC, al);
if (!can_use_dst) {
assm->mov(dst.low_gp(), scratch);
}
@@ -253,6 +254,28 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset,
liftoff::kPatchInstructionsRequired);
+#if V8_OS_WIN
+ if (bytes > kStackPageSize) {
+ // Generate OOL code (at the end of the function, where the current
+ // assembler is pointing) to do the explicit stack limit check (see
+ // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
+ // visual-studio-6.0/aa227153(v=vs.60)).
+ // At the function start, emit a jump to that OOL code (from {offset} to
+ // {pc_offset()}).
+ int ool_offset = pc_offset() - offset;
+ patching_assembler.b(ool_offset - Instruction::kPcLoadDelta);
+ patching_assembler.PadWithNops();
+
+ // Now generate the OOL code.
+ AllocateStackSpace(bytes);
+ // Jump back to the start of the function (from {pc_offset()} to {offset +
+ // liftoff::kPatchInstructionsRequired * kInstrSize}).
+ int func_start_offset =
+ offset + liftoff::kPatchInstructionsRequired * kInstrSize - pc_offset();
+ b(func_start_offset - Instruction::kPcLoadDelta);
+ return;
+ }
+#endif
patching_assembler.sub(sp, sp, Operand(bytes));
patching_assembler.PadWithNops();
}
@@ -618,6 +641,12 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
Register rhs) { \
instruction(dst, lhs, rhs); \
}
+#define I32_BINOP_I(name, instruction) \
+ I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
#define I32_SHIFTOP(name, instruction) \
void LiftoffAssembler::emit_##name(Register dst, Register src, \
Register amount, LiftoffRegList pinned) { \
@@ -648,12 +677,12 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
instruction(dst, lhs, rhs); \
}
-I32_BINOP(i32_add, add)
+I32_BINOP_I(i32_add, add)
I32_BINOP(i32_sub, sub)
I32_BINOP(i32_mul, mul)
-I32_BINOP(i32_and, and_)
-I32_BINOP(i32_or, orr)
-I32_BINOP(i32_xor, eor)
+I32_BINOP_I(i32_and, and_)
+I32_BINOP_I(i32_or, orr)
+I32_BINOP_I(i32_xor, eor)
I32_SHIFTOP(i32_shl, lsl)
I32_SHIFTOP(i32_sar, asr)
I32_SHIFTOP(i32_shr, lsr)
@@ -679,10 +708,6 @@ FP64_UNOP(f64_sqrt, vsqrt)
#undef FP64_UNOP
#undef FP64_BINOP
-void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
- add(dst, lhs, Operand(imm));
-}
-
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
clz(dst, src);
return true;
@@ -1378,7 +1403,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
// a pointer to them.
DCHECK(IsAligned(stack_bytes, kSystemPointerSize));
// Reserve space in the stack.
- sub(sp, sp, Operand(stack_bytes));
+ AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
@@ -1464,7 +1489,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- sub(sp, sp, Operand(size));
+ AllocateStackSpace(size);
mov(addr, sp);
}
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 0fe0237653..b1d71dce2f 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -150,6 +150,26 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
#endif
PatchingAssembler patching_assembler(AssemblerOptions{},
buffer_start_ + offset, 1);
+#if V8_OS_WIN
+ if (bytes > kStackPageSize) {
+ // Generate OOL code (at the end of the function, where the current
+ // assembler is pointing) to do the explicit stack limit check (see
+ // https://docs.microsoft.com/en-us/previous-versions/visualstudio/
+ // visual-studio-6.0/aa227153(v=vs.60)).
+ // At the function start, emit a jump to that OOL code (from {offset} to
+ // {pc_offset()}).
+ int ool_offset = pc_offset() - offset;
+ patching_assembler.b(ool_offset >> kInstrSizeLog2);
+
+ // Now generate the OOL code.
+ Claim(bytes, 1);
+ // Jump back to the start of the function (from {pc_offset()} to {offset +
+ // kInstrSize}).
+ int func_start_offset = offset + kInstrSize - pc_offset();
+ b(func_start_offset >> kInstrSizeLog2);
+ return;
+ }
+#endif
patching_assembler.PatchSubSp(bytes);
}
@@ -382,11 +402,23 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
Register rhs) { \
instruction(dst.W(), lhs.W(), rhs.W()); \
}
+#define I32_BINOP_I(name, instruction) \
+ I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst.W(), lhs.W(), Immediate(imm)); \
+ }
#define I64_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
instruction(dst.gp().X(), lhs.gp().X(), rhs.gp().X()); \
}
+#define I64_BINOP_I(name, instruction) \
+ I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ int32_t imm) { \
+ instruction(dst.gp().X(), lhs.gp().X(), imm); \
+ }
#define FP32_BINOP(name, instruction) \
void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
DoubleRegister rhs) { \
@@ -439,21 +471,21 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
instruction(dst.gp().X(), src.gp().X(), amount); \
}
-I32_BINOP(i32_add, Add)
+I32_BINOP_I(i32_add, Add)
I32_BINOP(i32_sub, Sub)
I32_BINOP(i32_mul, Mul)
-I32_BINOP(i32_and, And)
-I32_BINOP(i32_or, Orr)
-I32_BINOP(i32_xor, Eor)
+I32_BINOP_I(i32_and, And)
+I32_BINOP_I(i32_or, Orr)
+I32_BINOP_I(i32_xor, Eor)
I32_SHIFTOP(i32_shl, Lsl)
I32_SHIFTOP(i32_sar, Asr)
I32_SHIFTOP_I(i32_shr, Lsr)
-I64_BINOP(i64_add, Add)
+I64_BINOP_I(i64_add, Add)
I64_BINOP(i64_sub, Sub)
I64_BINOP(i64_mul, Mul)
-I64_BINOP(i64_and, And)
-I64_BINOP(i64_or, Orr)
-I64_BINOP(i64_xor, Eor)
+I64_BINOP_I(i64_and, And)
+I64_BINOP_I(i64_or, Orr)
+I64_BINOP_I(i64_xor, Eor)
I64_SHIFTOP(i64_shl, Lsl)
I64_SHIFTOP(i64_sar, Asr)
I64_SHIFTOP_I(i64_shr, Lsr)
@@ -580,15 +612,6 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Msub(dst_w, scratch, rhs_w, lhs_w);
}
-void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
- Add(dst.gp().X(), lhs.gp().X(), Immediate(imm));
-}
-
-void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
- Add(dst.W(), lhs.W(), Immediate(imm));
-}
-
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 8c5d8c918d..1b5ca87c3d 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -7,7 +7,7 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
#include "src/wasm/value-type.h"
namespace v8 {
@@ -101,11 +101,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->push(reg.low_gp());
break;
case kWasmF32:
- assm->sub(esp, Immediate(sizeof(float)));
+ assm->AllocateStackSpace(sizeof(float));
assm->movss(Operand(esp, 0), reg.fp());
break;
case kWasmF64:
- assm->sub(esp, Immediate(sizeof(double)));
+ assm->AllocateStackSpace(sizeof(double));
assm->movsd(Operand(esp, 0), reg.fp());
break;
default:
@@ -171,8 +171,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
#if V8_OS_WIN
- constexpr int kPageSize = 4 * 1024;
- if (bytes > kPageSize) {
+ if (bytes > kStackPageSize) {
// Generate OOL code (at the end of the function, where the current
// assembler is pointing) to do the explicit stack limit check (see
// https://docs.microsoft.com/en-us/previous-versions/visualstudio/
@@ -186,10 +185,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
patching_assembler.pc_offset());
// Now generate the OOL code.
- // Use {edi} as scratch register; it is not being used as parameter
- // register (see wasm-linkage.h).
- mov(edi, bytes);
- AllocateStackFrame(edi);
+ AllocateStackSpace(bytes);
// Jump back to the start of the function (from {pc_offset()} to {offset +
// kSubSpSize}).
int func_start_offset = offset + liftoff::kSubSpSize - pc_offset();
@@ -557,6 +553,13 @@ void EmitCommutativeBinOp(LiftoffAssembler* assm, Register dst, Register lhs,
(assm->*op)(dst, rhs);
}
}
+
+template <void (Assembler::*op)(Register, int32_t)>
+void EmitCommutativeBinOpImm(LiftoffAssembler* assm, Register dst, Register lhs,
+ int32_t imm) {
+ if (dst != lhs) assm->mov(dst, lhs);
+ (assm->*op)(dst, imm);
+}
} // namespace liftoff
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
@@ -659,14 +662,26 @@ void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::and_>(this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, int32_t imm) {
+ liftoff::EmitCommutativeBinOpImm<&Assembler::and_>(this, dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::or_>(this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, int32_t imm) {
+ liftoff::EmitCommutativeBinOpImm<&Assembler::or_>(this, dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::xor_>(this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, int32_t imm) {
+ liftoff::EmitCommutativeBinOpImm<&Assembler::xor_>(this, dst, lhs, imm);
+}
+
namespace liftoff {
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register src, Register amount,
@@ -825,7 +840,8 @@ inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst,
if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp());
// Top half of the immediate sign extended, either 0 or -1.
- (assm->*op_with_carry)(dst_high, imm < 0 ? -1 : 0);
+ int32_t sign_extend = imm < 0 ? -1 : 0;
+ (assm->*op_with_carry)(dst_high, sign_extend);
// If necessary, move result into the right registers.
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
@@ -1439,7 +1455,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
return true;
case kExprI64ReinterpretF64:
// Push src to the stack.
- sub(esp, Immediate(8));
+ AllocateStackSpace(8);
movsd(Operand(esp, 0), src.fp());
// Pop to dst.
pop(dst.low_gp());
@@ -1689,7 +1705,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList fp_regs = regs & kFpCacheRegList;
unsigned num_fp_regs = fp_regs.GetNumRegsSet();
if (num_fp_regs) {
- sub(esp, Immediate(num_fp_regs * kStackSlotSize));
+ AllocateStackSpace(num_fp_regs * kStackSlotSize);
unsigned offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
@@ -1730,7 +1746,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- sub(esp, Immediate(stack_bytes));
+ AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
@@ -1795,7 +1811,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- sub(esp, Immediate(size));
+ AllocateStackSpace(size);
mov(addr, esp);
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index a5e2803e8a..1796698797 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -5,8 +5,8 @@
#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_DEFS_H_
-#include "src/assembler-arch.h"
-#include "src/reglist.h"
+#include "src/codegen/assembler-arch.h"
+#include "src/codegen/reglist.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index fd380b36a6..0fcfb8dbfc 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -6,12 +6,12 @@
#include <sstream>
-#include "src/assembler-inl.h"
#include "src/base/optional.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/macro-assembler-inl.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-opcodes.h"
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index a3e4e4ce07..40e1636b6e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -10,8 +10,8 @@
#include "src/base/bits.h"
#include "src/base/small-vector.h"
-#include "src/frames.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/frames.h"
#include "src/wasm/baseline/liftoff-assembler-defs.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/function-body-decoder.h"
@@ -402,8 +402,11 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_rem_by_zero);
inline void emit_i32_and(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_and(Register dst, Register lhs, int32_t imm);
inline void emit_i32_or(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_or(Register dst, Register lhs, int32_t imm);
inline void emit_i32_xor(Register dst, Register lhs, Register rhs);
+ inline void emit_i32_xor(Register dst, Register lhs, int32_t imm);
inline void emit_i32_shl(Register dst, Register src, Register amount,
LiftoffRegList pinned = {});
inline void emit_i32_sar(Register dst, Register src, Register amount,
@@ -437,10 +440,16 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs, Label* trap_rem_by_zero);
inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm);
inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm);
inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm);
inline void emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned = {});
inline void emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
@@ -686,6 +695,34 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
assm->Move(dst.low_gp(), tmp, kWasmI32);
}
+
+template <void (LiftoffAssembler::*op)(Register, Register, int32_t)>
+void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
+ LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t imm) {
+ // Top half of the immediate sign extended, either 0 or -1.
+ int32_t sign_extend = imm < 0 ? -1 : 0;
+ // If {dst.low_gp()} does not overlap with {lhs.high_gp()},
+ // just first compute the lower half, then the upper half.
+ if (dst.low() != lhs.high()) {
+ (assm->*op)(dst.low_gp(), lhs.low_gp(), imm);
+ (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
+ return;
+ }
+ // If {dst.high_gp()} does not overlap with {lhs.low_gp()},
+ // we can compute this the other way around.
+ if (dst.high() != lhs.low()) {
+ (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
+ (assm->*op)(dst.low_gp(), lhs.low_gp(), imm);
+ return;
+ }
+ // Otherwise, we need a temporary register.
+ Register tmp =
+ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp();
+ (assm->*op)(tmp, lhs.low_gp(), imm);
+ (assm->*op)(dst.high_gp(), lhs.high_gp(), sign_extend);
+ assm->Move(dst.low_gp(), tmp, kWasmI32);
+}
} // namespace liftoff
void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
@@ -694,18 +731,36 @@ void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_and>(
+ this, dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>(
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_or>(
+ this, dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>(
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_xor>(
+ this, dst, lhs, imm);
+}
+
#endif // V8_TARGET_ARCH_32_BIT
// End of the partially platform-independent implementations of the
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index d539fe481e..caf00a24ca 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -4,19 +4,19 @@
#include "src/wasm/baseline/liftoff-compiler.h"
-#include "src/assembler-inl.h"
#include "src/base/optional.h"
+#include "src/codegen/assembler-inl.h"
// TODO(clemensh): Remove dependences on compiler stuff.
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/counters.h"
-#include "src/interface-descriptors.h"
-#include "src/log.h"
-#include "src/macro-assembler-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
#include "src/objects/smi.h"
-#include "src/ostreams.h"
#include "src/tracing/trace-event.h"
-#include "src/utils.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/utils.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
@@ -424,8 +424,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool.position), false);
__ CallRuntimeStub(ool.stub);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
DCHECK_EQ(ool.continuation.get()->is_bound(), is_stack_check);
if (!ool.regs_to_save.is_empty()) __ PopRegisters(ool.regs_to_save);
if (is_stack_check) {
@@ -943,12 +942,12 @@ class LiftoffCompiler {
CASE_I32_BINOPI(I32Add, i32_add)
CASE_I32_BINOP(I32Sub, i32_sub)
CASE_I32_BINOP(I32Mul, i32_mul)
- CASE_I32_BINOP(I32And, i32_and)
- CASE_I32_BINOP(I32Ior, i32_or)
- CASE_I32_BINOP(I32Xor, i32_xor)
- CASE_I64_BINOP(I64And, i64_and)
- CASE_I64_BINOP(I64Ior, i64_or)
- CASE_I64_BINOP(I64Xor, i64_xor)
+ CASE_I32_BINOPI(I32And, i32_and)
+ CASE_I32_BINOPI(I32Ior, i32_or)
+ CASE_I32_BINOPI(I32Xor, i32_xor)
+ CASE_I64_BINOPI(I64And, i64_and)
+ CASE_I64_BINOPI(I64Ior, i64_or)
+ CASE_I64_BINOPI(I64Xor, i64_xor)
CASE_I32_CMPOP(I32Eq, kEqual)
CASE_I32_CMPOP(I32Ne, kUnequal)
CASE_I32_CMPOP(I32LtS, kSignedLessThan)
@@ -1157,6 +1156,10 @@ class LiftoffCompiler {
unsupported(decoder, "ref_null");
}
+ void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
+ unsupported(decoder, "func");
+ }
+
void Drop(FullDecoder* decoder, const Value& value) {
auto& slot = __ cache_state()->stack_state.back();
// If the dropped slot contains a register, decrement it's use count.
@@ -1586,8 +1589,7 @@ class LiftoffCompiler {
Register centry = kJavaScriptCallCodeStartRegister;
LOAD_TAGGED_PTR_INSTANCE_FIELD(centry, CEntryStub);
__ CallRuntimeWithCEntry(runtime_function, centry);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
}
Register AddMemoryMasking(Register index, uint32_t* offset,
@@ -1706,8 +1708,7 @@ class LiftoffCompiler {
if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
__ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
if (kReturnRegister0 != result.gp()) {
__ Move(result.gp(), kReturnRegister0, kWasmI32);
@@ -1758,8 +1759,7 @@ class LiftoffCompiler {
__ CallIndirect(imm.sig, call_descriptor, target);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
} else {
@@ -1773,8 +1773,7 @@ class LiftoffCompiler {
Address addr = static_cast<Address>(imm.index);
__ CallNativeWasmCode(addr);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
}
@@ -1911,8 +1910,7 @@ class LiftoffCompiler {
__ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
__ CallIndirect(imm.sig, call_descriptor, target);
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kSimple,
- Safepoint::kNoLazyDeopt);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
__ FinishCall(imm.sig, call_descriptor);
}
@@ -1992,6 +1990,18 @@ class LiftoffCompiler {
Vector<Value> args) {
unsupported(decoder, "table.copy");
}
+ void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ Value& value, Value& delta, Value* result) {
+ unsupported(decoder, "table.grow");
+ }
+ void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ Value* result) {
+ unsupported(decoder, "table.size");
+ }
+ void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ Value& start, Value& value, Value& count) {
+ unsupported(decoder, "table.fill");
+ }
private:
LiftoffAssembler asm_;
@@ -2032,15 +2042,16 @@ class LiftoffCompiler {
} // namespace
-WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
- AccountingAllocator* allocator, CompilationEnv* env,
- const FunctionBody& func_body, Counters* counters, WasmFeatures* detected) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "ExecuteLiftoffCompilation");
- base::ElapsedTimer compile_timer;
- if (FLAG_trace_wasm_decode_time) {
- compile_timer.Start();
- }
+WasmCompilationResult ExecuteLiftoffCompilation(AccountingAllocator* allocator,
+ CompilationEnv* env,
+ const FunctionBody& func_body,
+ int func_index,
+ Counters* counters,
+ WasmFeatures* detected) {
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
+ "ExecuteLiftoffCompilation", "func_index", func_index,
+ "body_size",
+ static_cast<uint32_t>(func_body.end - func_body.start));
Zone zone(allocator, "LiftoffCompilationZone");
const WasmModule* module = env ? env->module : nullptr;
@@ -2067,14 +2078,6 @@ WasmCompilationResult LiftoffCompilationUnit::ExecuteCompilation(
counters->liftoff_compiled_functions()->Increment();
- if (FLAG_trace_wasm_decode_time) {
- double compile_ms = compile_timer.Elapsed().InMillisecondsF();
- PrintF(
- "wasm-compilation liftoff phase 1 ok: %u bytes, %0.3f ms decode and "
- "compile\n",
- static_cast<unsigned>(func_body.end - func_body.start), compile_ms);
- }
-
WasmCompilationResult result;
compiler->GetCode(&result.code_desc);
result.instr_buffer = instruction_buffer->ReleaseBuffer();
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index 1ae0b8e83a..f310b9a54b 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -5,7 +5,6 @@
#ifndef V8_WASM_BASELINE_LIFTOFF_COMPILER_H_
#define V8_WASM_BASELINE_LIFTOFF_COMPILER_H_
-#include "src/base/macros.h"
#include "src/wasm/function-compiler.h"
namespace v8 {
@@ -18,21 +17,11 @@ namespace wasm {
struct CompilationEnv;
struct FunctionBody;
-class NativeModule;
struct WasmFeatures;
-class LiftoffCompilationUnit final {
- public:
- LiftoffCompilationUnit() = default;
-
- WasmCompilationResult ExecuteCompilation(AccountingAllocator*,
- CompilationEnv*, const FunctionBody&,
- Counters*,
- WasmFeatures* detected_features);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LiftoffCompilationUnit);
-};
+WasmCompilationResult ExecuteLiftoffCompilation(
+ AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
+ Counters*, WasmFeatures* detected_features);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/mips/OWNERS b/deps/v8/src/wasm/baseline/mips/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/wasm/baseline/mips/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 530118c526..5be769685c 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -585,10 +585,6 @@ void LiftoffAssembler::FillI64Half(Register reg, uint32_t index,
lw(reg, liftoff::GetHalfStackSlot(index, half));
}
-void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
- Addu(dst, lhs, imm);
-}
-
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
@@ -644,6 +640,21 @@ I32_BINOP(xor, xor_)
#undef I32_BINOP
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+
+// clang-format off
+I32_BINOP_I(add, Addu)
+I32_BINOP_I(and, And)
+I32_BINOP_I(or, Or)
+I32_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I32_BINOP_I
+
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
TurboAssembler::Clz(dst, src);
return true;
diff --git a/deps/v8/src/wasm/baseline/mips64/OWNERS b/deps/v8/src/wasm/baseline/mips64/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/src/wasm/baseline/mips64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 7bfa172def..1da72cb9b8 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -436,13 +436,13 @@ void LiftoffAssembler::Spill(uint32_t index, LiftoffRegister reg,
MemOperand dst = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
- sw(reg.gp(), dst);
+ Sw(reg.gp(), dst);
break;
case kWasmI64:
- sd(reg.gp(), dst);
+ Sd(reg.gp(), dst);
break;
case kWasmF32:
- swc1(reg.fp(), dst);
+ Swc1(reg.fp(), dst);
break;
case kWasmF64:
TurboAssembler::Sdc1(reg.fp(), dst);
@@ -480,13 +480,13 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t index,
MemOperand src = liftoff::GetStackSlot(index);
switch (type) {
case kWasmI32:
- lw(reg.gp(), src);
+ Lw(reg.gp(), src);
break;
case kWasmI64:
- ld(reg.gp(), src);
+ Ld(reg.gp(), src);
break;
case kWasmF32:
- lwc1(reg.fp(), src);
+ Lwc1(reg.fp(), src);
break;
case kWasmF64:
TurboAssembler::Ldc1(reg.fp(), src);
@@ -500,10 +500,6 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
UNREACHABLE();
}
-void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
- Addu(dst, lhs, Operand(imm));
-}
-
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
TurboAssembler::Mul(dst, lhs, rhs);
}
@@ -559,6 +555,21 @@ I32_BINOP(xor, xor_)
#undef I32_BINOP
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+
+// clang-format off
+I32_BINOP_I(add, Addu)
+I32_BINOP_I(and, And)
+I32_BINOP_I(or, Or)
+I32_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I32_BINOP_I
+
bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
TurboAssembler::Clz(dst, src);
return true;
@@ -594,11 +605,6 @@ I32_SHIFTOP_I(shr, srl)
#undef I32_SHIFTOP
#undef I32_SHIFTOP_I
-void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
- Daddu(dst.gp(), lhs.gp(), Operand(imm));
-}
-
void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp());
@@ -664,6 +670,21 @@ I64_BINOP(xor, xor_)
#undef I64_BINOP
+#define I64_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \
+ LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp(), lhs.gp(), Operand(imm)); \
+ }
+
+// clang-format off
+I64_BINOP_I(add, Daddu)
+I64_BINOP_I(and, And)
+I64_BINOP_I(or, Or)
+I64_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I64_BINOP_I
+
#define I64_SHIFTOP(name, instruction) \
void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \
LiftoffRegister src, Register amount, \
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index b7b17afcfb..577df835e8 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -109,16 +109,28 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
BAILOUT("FillI64Half");
}
-#define UNIMPLEMENTED_GP_BINOP(name) \
+#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("gp binop: " #name); \
+ BAILOUT("i32 binop:: " #name); \
+ }
+#define UNIMPLEMENTED_I32_BINOP_I(name) \
+ UNIMPLEMENTED_I32_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ int32_t imm) { \
+ BAILOUT("i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
BAILOUT("i64 binop: " #name); \
}
+#define UNIMPLEMENTED_I64_BINOP_I(name) \
+ UNIMPLEMENTED_I64_BINOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ int32_t imm) { \
+ BAILOUT("i64_i binop: " #name); \
+ }
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
BAILOUT("gp unop: " #name); \
@@ -149,22 +161,22 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
BAILOUT("i64 shiftop: " #name); \
}
-UNIMPLEMENTED_GP_BINOP(i32_add)
-UNIMPLEMENTED_GP_BINOP(i32_sub)
-UNIMPLEMENTED_GP_BINOP(i32_mul)
-UNIMPLEMENTED_GP_BINOP(i32_and)
-UNIMPLEMENTED_GP_BINOP(i32_or)
-UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_I32_BINOP_I(i32_add)
+UNIMPLEMENTED_I32_BINOP(i32_sub)
+UNIMPLEMENTED_I32_BINOP(i32_mul)
+UNIMPLEMENTED_I32_BINOP_I(i32_and)
+UNIMPLEMENTED_I32_BINOP_I(i32_or)
+UNIMPLEMENTED_I32_BINOP_I(i32_xor)
UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
-UNIMPLEMENTED_I64_BINOP(i64_add)
+UNIMPLEMENTED_I64_BINOP_I(i64_add)
UNIMPLEMENTED_I64_BINOP(i64_sub)
UNIMPLEMENTED_I64_BINOP(i64_mul)
#ifdef V8_TARGET_ARCH_PPC64
-UNIMPLEMENTED_I64_BINOP(i64_and)
-UNIMPLEMENTED_I64_BINOP(i64_or)
-UNIMPLEMENTED_I64_BINOP(i64_xor)
+UNIMPLEMENTED_I64_BINOP_I(i64_and)
+UNIMPLEMENTED_I64_BINOP_I(i64_or)
+UNIMPLEMENTED_I64_BINOP_I(i64_xor)
#endif
UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
@@ -201,8 +213,10 @@ UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_trunc)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_nearest_int)
UNIMPLEMENTED_FP_UNOP(f64_sqrt)
-#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_I32_BINOP
+#undef UNIMPLEMENTED_I32_BINOP_I
#undef UNIMPLEMENTED_I64_BINOP
+#undef UNIMPLEMENTED_I64_BINOP_I
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_FP_UNOP
@@ -231,15 +245,6 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
-void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
- BAILOUT("i64_add");
-}
-
-void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
- BAILOUT("i32_add");
-}
-
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
BAILOUT("i32_shr");
}
diff --git a/deps/v8/src/wasm/baseline/s390/OWNERS b/deps/v8/src/wasm/baseline/s390/OWNERS
deleted file mode 100644
index 85b6cb38f0..0000000000
--- a/deps/v8/src/wasm/baseline/s390/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-jyan@ca.ibm.com
-joransiu@ca.ibm.com
-michael_dawson@ca.ibm.com
-miladfar@ca.ibm.com
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 1cb8e97d89..1e01bec407 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -109,16 +109,28 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
BAILOUT("FillI64Half");
}
-#define UNIMPLEMENTED_GP_BINOP(name) \
+#define UNIMPLEMENTED_I32_BINOP(name) \
void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
Register rhs) { \
- BAILOUT("gp binop: " #name); \
+ BAILOUT("i32 binop: " #name); \
+ }
+#define UNIMPLEMENTED_I32_BINOP_I(name) \
+ UNIMPLEMENTED_I32_BINOP(name) \
+ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
+ int32_t imm) { \
+ BAILOUT("i32 binop_i: " #name); \
}
#define UNIMPLEMENTED_I64_BINOP(name) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
BAILOUT("i64 binop: " #name); \
}
+#define UNIMPLEMENTED_I64_BINOP_I(name) \
+ UNIMPLEMENTED_I64_BINOP(name) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ int32_t imm) { \
+ BAILOUT("i64 binop_i: " #name); \
+ }
#define UNIMPLEMENTED_GP_UNOP(name) \
bool LiftoffAssembler::emit_##name(Register dst, Register src) { \
BAILOUT("gp unop: " #name); \
@@ -149,22 +161,22 @@ void LiftoffAssembler::FillI64Half(Register, uint32_t index, RegPairHalf) {
BAILOUT("i64 shiftop: " #name); \
}
-UNIMPLEMENTED_GP_BINOP(i32_add)
-UNIMPLEMENTED_GP_BINOP(i32_sub)
-UNIMPLEMENTED_GP_BINOP(i32_mul)
-UNIMPLEMENTED_GP_BINOP(i32_and)
-UNIMPLEMENTED_GP_BINOP(i32_or)
-UNIMPLEMENTED_GP_BINOP(i32_xor)
+UNIMPLEMENTED_I32_BINOP_I(i32_add)
+UNIMPLEMENTED_I32_BINOP(i32_sub)
+UNIMPLEMENTED_I32_BINOP(i32_mul)
+UNIMPLEMENTED_I32_BINOP_I(i32_and)
+UNIMPLEMENTED_I32_BINOP_I(i32_or)
+UNIMPLEMENTED_I32_BINOP_I(i32_xor)
UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
-UNIMPLEMENTED_I64_BINOP(i64_add)
+UNIMPLEMENTED_I64_BINOP_I(i64_add)
UNIMPLEMENTED_I64_BINOP(i64_sub)
UNIMPLEMENTED_I64_BINOP(i64_mul)
#ifdef V8_TARGET_ARCH_S390X
-UNIMPLEMENTED_I64_BINOP(i64_and)
-UNIMPLEMENTED_I64_BINOP(i64_or)
-UNIMPLEMENTED_I64_BINOP(i64_xor)
+UNIMPLEMENTED_I64_BINOP_I(i64_and)
+UNIMPLEMENTED_I64_BINOP_I(i64_or)
+UNIMPLEMENTED_I64_BINOP_I(i64_xor)
#endif
UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
@@ -201,8 +213,10 @@ UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_trunc)
UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_nearest_int)
UNIMPLEMENTED_FP_UNOP(f64_sqrt)
-#undef UNIMPLEMENTED_GP_BINOP
+#undef UNIMPLEMENTED_I32_BINOP
+#undef UNIMPLEMENTED_I32_BINOP_I
#undef UNIMPLEMENTED_I64_BINOP
+#undef UNIMPLEMENTED_I64_BINOP_I
#undef UNIMPLEMENTED_GP_UNOP
#undef UNIMPLEMENTED_FP_BINOP
#undef UNIMPLEMENTED_FP_UNOP
@@ -231,15 +245,6 @@ void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
BAILOUT("i32_remu");
}
-void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t imm) {
- BAILOUT("i64_add");
-}
-
-void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, int32_t imm) {
- BAILOUT("i32_add");
-}
-
void LiftoffAssembler::emit_i32_shr(Register dst, Register lhs, int amount) {
BAILOUT("i32_shr");
}
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index ccd352df7e..cbff0d4da9 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -7,7 +7,7 @@
#include "src/wasm/baseline/liftoff-assembler.h"
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
#include "src/wasm/value-type.h"
namespace v8 {
@@ -112,11 +112,11 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
assm->pushq(reg.gp());
break;
case kWasmF32:
- assm->subq(rsp, Immediate(kSystemPointerSize));
+ assm->AllocateStackSpace(kSystemPointerSize);
assm->Movss(Operand(rsp, 0), reg.fp());
break;
case kWasmF64:
- assm->subq(rsp, Immediate(kSystemPointerSize));
+ assm->AllocateStackSpace(kSystemPointerSize);
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
default:
@@ -131,11 +131,14 @@ inline void SpillRegisters(LiftoffAssembler* assm, Regs... regs) {
}
}
+constexpr int kSubSpSize = 7; // 7 bytes for "subq rsp, <imm32>"
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
int offset = pc_offset();
sub_sp_32(0);
+ DCHECK_EQ(liftoff::kSubSpSize, pc_offset() - offset);
return offset;
}
@@ -149,7 +152,30 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset,
Assembler patching_assembler(
AssemblerOptions{},
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
+#if V8_OS_WIN
+ if (bytes > kStackPageSize) {
+ // Generate OOL code (at the end of the function, where the current
+ // assembler is pointing) to do the explicit stack limit check (see
+ // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-6.0/aa227153(v=vs.60)).
+ // At the function start, emit a jump to that OOL code (from {offset} to
+ // {pc_offset()}).
+ int ool_offset = pc_offset() - offset;
+ patching_assembler.jmp_rel(ool_offset);
+ DCHECK_GE(liftoff::kSubSpSize, patching_assembler.pc_offset());
+ patching_assembler.Nop(liftoff::kSubSpSize -
+ patching_assembler.pc_offset());
+
+ // Now generate the OOL code.
+ AllocateStackSpace(bytes);
+ // Jump back to the start of the function (from {pc_offset()} to {offset +
+ // kSubSpSize}).
+ int func_start_offset = offset + liftoff::kSubSpSize - pc_offset();
+ jmp_rel(func_start_offset);
+ return;
+ }
+#endif
patching_assembler.sub_sp_32(bytes);
+ DCHECK_EQ(liftoff::kSubSpSize, patching_assembler.pc_offset());
}
void LiftoffAssembler::FinishCode() {}
@@ -469,6 +495,14 @@ void EmitCommutativeBinOp(LiftoffAssembler* assm, Register dst, Register lhs,
(assm->*op)(dst, rhs);
}
}
+
+template <void (Assembler::*op)(Register, Immediate),
+ void (Assembler::*mov)(Register, Register)>
+void EmitCommutativeBinOpImm(LiftoffAssembler* assm, Register dst, Register lhs,
+ int32_t imm) {
+ if (dst != lhs) (assm->*mov)(dst, lhs);
+ (assm->*op)(dst, Immediate(imm));
+}
} // namespace liftoff
void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
@@ -592,16 +626,31 @@ void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, Register rhs) {
lhs, rhs);
}
+void LiftoffAssembler::emit_i32_and(Register dst, Register lhs, int32_t imm) {
+ liftoff::EmitCommutativeBinOpImm<&Assembler::andl, &Assembler::movl>(
+ this, dst, lhs, imm);
+}
+
void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::orl, &Assembler::movl>(this, dst,
lhs, rhs);
}
+void LiftoffAssembler::emit_i32_or(Register dst, Register lhs, int32_t imm) {
+ liftoff::EmitCommutativeBinOpImm<&Assembler::orl, &Assembler::movl>(this, dst,
+ lhs, imm);
+}
+
void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, Register rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::xorl, &Assembler::movl>(this, dst,
lhs, rhs);
}
+void LiftoffAssembler::emit_i32_xor(Register dst, Register lhs, int32_t imm) {
+ liftoff::EmitCommutativeBinOpImm<&Assembler::xorl, &Assembler::movl>(
+ this, dst, lhs, imm);
+}
+
namespace liftoff {
template <ValueType type>
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
@@ -778,18 +827,36 @@ void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
this, dst.gp(), lhs.gp(), rhs.gp());
}
+void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::EmitCommutativeBinOpImm<&Assembler::andq, &Assembler::movq>(
+ this, dst.gp(), lhs.gp(), imm);
+}
+
void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::orq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), rhs.gp());
}
+void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::EmitCommutativeBinOpImm<&Assembler::orq, &Assembler::movq>(
+ this, dst.gp(), lhs.gp(), imm);
+}
+
void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitCommutativeBinOp<&Assembler::xorq, &Assembler::movq>(
this, dst.gp(), lhs.gp(), rhs.gp());
}
+void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t imm) {
+ liftoff::EmitCommutativeBinOpImm<&Assembler::xorq, &Assembler::movq>(
+ this, dst.gp(), lhs.gp(), imm);
+}
+
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount, LiftoffRegList pinned) {
liftoff::EmitShiftOperation<kWasmI64>(this, dst.gp(), src.gp(), amount,
@@ -1452,7 +1519,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
LiftoffRegList fp_regs = regs & kFpCacheRegList;
unsigned num_fp_regs = fp_regs.GetNumRegsSet();
if (num_fp_regs) {
- subq(rsp, Immediate(num_fp_regs * kStackSlotSize));
+ AllocateStackSpace(num_fp_regs * kStackSlotSize);
unsigned offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
@@ -1493,7 +1560,7 @@ void LiftoffAssembler::CallC(wasm::FunctionSig* sig,
const LiftoffRegister* rets,
ValueType out_argument_type, int stack_bytes,
ExternalReference ext_ref) {
- subq(rsp, Immediate(stack_bytes));
+ AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
for (ValueType param_type : sig->parameters()) {
@@ -1555,7 +1622,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- subq(rsp, Immediate(size));
+ AllocateStackSpace(size);
movq(addr, rsp);
}
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 18ab23dcce..e5c1fa4686 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -22,15 +22,17 @@
#include <cstring>
#include <iostream>
+#include "src/wasm/c-api.h"
+
#include "third_party/wasm-api/wasm.h"
-#include "third_party/wasm-api/wasm.hh"
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/wasm/leb-helper.h"
+#include "src/wasm/module-instantiate.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
// BEGIN FILE wasm-bin.cc
@@ -162,32 +164,6 @@ auto wrapper(const FuncType* type) -> vec<byte_t> {
return binary;
}
-auto wrapper(const GlobalType* type) -> vec<byte_t> {
- auto size = 25 + zero_size(type->content());
- auto binary = vec<byte_t>::make_uninitialized(size);
- auto ptr = binary.get();
-
- encode_header(ptr);
-
- *ptr++ = i::wasm::kGlobalSectionCode;
- encode_size32(ptr, 5 + zero_size(type->content())); // size
- *ptr++ = 1; // length
- encode_valtype(ptr, type->content());
- *ptr++ = (type->mutability() == VAR);
- encode_const_zero(ptr, type->content());
- *ptr++ = 0x0b; // end
-
- *ptr++ = i::wasm::kExportSectionCode;
- *ptr++ = 4; // size
- *ptr++ = 1; // length
- *ptr++ = 0; // name length
- *ptr++ = i::wasm::kExternalGlobal;
- *ptr++ = 0; // func index
-
- assert(ptr - binary.get() == static_cast<ptrdiff_t>(size));
- return binary;
-}
-
////////////////////////////////////////////////////////////////////////////////
// Decoding
@@ -230,11 +206,6 @@ auto name(const byte_t*& pos) -> Name {
return name;
}
-void name_skip(const byte_t*& pos) {
- auto size = bin::u32(pos);
- pos += size;
-}
-
// Types
auto valtype(const byte_t*& pos) -> own<wasm::ValType*> {
@@ -330,7 +301,7 @@ void expr_skip(const byte_t*& pos) {
// Sections
-auto section(const vec<byte_t>& binary, i::wasm::SectionCode sec)
+auto section(const vec<const byte_t>& binary, i::wasm::SectionCode sec)
-> const byte_t* {
const byte_t* end = binary.get() + binary.size();
const byte_t* pos = binary.get() + 8; // skip header
@@ -344,7 +315,7 @@ auto section(const vec<byte_t>& binary, i::wasm::SectionCode sec)
}
// Only for asserts/DCHECKs.
-auto section_end(const vec<byte_t>& binary, i::wasm::SectionCode sec)
+auto section_end(const vec<const byte_t>& binary, i::wasm::SectionCode sec)
-> const byte_t* {
const byte_t* end = binary.get() + binary.size();
const byte_t* pos = binary.get() + 8; // skip header
@@ -361,7 +332,7 @@ auto section_end(const vec<byte_t>& binary, i::wasm::SectionCode sec)
// Type section
-auto types(const vec<byte_t>& binary) -> vec<FuncType*> {
+auto types(const vec<const byte_t>& binary) -> vec<FuncType*> {
auto pos = bin::section(binary, i::wasm::kTypeSectionCode);
if (pos == nullptr) return vec<FuncType*>::make();
size_t size = bin::u32(pos);
@@ -376,7 +347,7 @@ auto types(const vec<byte_t>& binary) -> vec<FuncType*> {
// Import section
-auto imports(const vec<byte_t>& binary, const vec<FuncType*>& types)
+auto imports(const vec<const byte_t>& binary, const vec<FuncType*>& types)
-> vec<ImportType*> {
auto pos = bin::section(binary, i::wasm::kImportSectionCode);
if (pos == nullptr) return vec<ImportType*>::make();
@@ -419,7 +390,7 @@ auto count(const vec<ImportType*>& imports, ExternKind kind) -> uint32_t {
// Function section
-auto funcs(const vec<byte_t>& binary, const vec<ImportType*>& imports,
+auto funcs(const vec<const byte_t>& binary, const vec<ImportType*>& imports,
const vec<FuncType*>& types) -> vec<FuncType*> {
auto pos = bin::section(binary, i::wasm::kFunctionSectionCode);
size_t size = pos != nullptr ? bin::u32(pos) : 0;
@@ -443,7 +414,7 @@ auto funcs(const vec<byte_t>& binary, const vec<ImportType*>& imports,
// Global section
-auto globals(const vec<byte_t>& binary, const vec<ImportType*>& imports)
+auto globals(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
-> vec<GlobalType*> {
auto pos = bin::section(binary, i::wasm::kGlobalSectionCode);
size_t size = pos != nullptr ? bin::u32(pos) : 0;
@@ -468,7 +439,7 @@ auto globals(const vec<byte_t>& binary, const vec<ImportType*>& imports)
// Table section
-auto tables(const vec<byte_t>& binary, const vec<ImportType*>& imports)
+auto tables(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
-> vec<TableType*> {
auto pos = bin::section(binary, i::wasm::kTableSectionCode);
size_t size = pos != nullptr ? bin::u32(pos) : 0;
@@ -492,7 +463,7 @@ auto tables(const vec<byte_t>& binary, const vec<ImportType*>& imports)
// Memory section
-auto memories(const vec<byte_t>& binary, const vec<ImportType*>& imports)
+auto memories(const vec<const byte_t>& binary, const vec<ImportType*>& imports)
-> vec<MemoryType*> {
auto pos = bin::section(binary, i::wasm::kMemorySectionCode);
size_t size = pos != nullptr ? bin::u32(pos) : 0;
@@ -516,7 +487,7 @@ auto memories(const vec<byte_t>& binary, const vec<ImportType*>& imports)
// Export section
-auto exports(const vec<byte_t>& binary, const vec<FuncType*>& funcs,
+auto exports(const vec<const byte_t>& binary, const vec<FuncType*>& funcs,
const vec<GlobalType*>& globals, const vec<TableType*>& tables,
const vec<MemoryType*>& memories) -> vec<ExportType*> {
auto pos = bin::section(binary, i::wasm::kExportSectionCode);
@@ -550,11 +521,11 @@ auto exports(const vec<byte_t>& binary, const vec<FuncType*>& funcs,
return exports;
}
-auto imports(const vec<byte_t>& binary) -> vec<ImportType*> {
+auto imports(const vec<const byte_t>& binary) -> vec<ImportType*> {
return bin::imports(binary, bin::types(binary));
}
-auto exports(const vec<byte_t>& binary) -> vec<ExportType*> {
+auto exports(const vec<const byte_t>& binary) -> vec<ExportType*> {
auto types = bin::types(binary);
auto imports = bin::imports(binary, types);
auto funcs = bin::funcs(binary, imports, types);
@@ -572,21 +543,6 @@ auto exports(const vec<byte_t>& binary) -> vec<ExportType*> {
namespace v8 {
namespace wasm {
-// Objects
-
-auto object_isolate(const v8::Persistent<v8::Object>& obj) -> v8::Isolate* {
- struct FakePersistent {
- v8::Object* val;
- };
- auto v8_obj = reinterpret_cast<const FakePersistent*>(&obj)->val;
- return v8_obj->GetIsolate();
-}
-
-template <class T>
-auto object_handle(T v8_obj) -> i::Handle<T> {
- return handle(v8_obj, v8_obj->GetIsolate());
-}
-
// Foreign pointers
auto foreign_new(v8::Isolate* isolate, void* ptr) -> v8::Local<v8::Value> {
@@ -620,328 +576,20 @@ auto v8_valtype_to_wasm(i::wasm::ValueType v8_valtype) -> ::wasm::ValKind {
}
}
-auto func_type_param_arity(v8::Local<v8::Object> function) -> uint32_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(function);
- auto v8_function = i::Handle<i::WasmExportedFunction>::cast(v8_object);
- i::wasm::FunctionSig* sig = v8_function->instance()
- ->module()
- ->functions[v8_function->function_index()]
- .sig;
- return static_cast<uint32_t>(sig->parameter_count());
-}
-
-auto func_type_result_arity(v8::Local<v8::Object> function) -> uint32_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(function);
- auto v8_function = i::Handle<i::WasmExportedFunction>::cast(v8_object);
- i::wasm::FunctionSig* sig = v8_function->instance()
- ->module()
- ->functions[v8_function->function_index()]
- .sig;
- return static_cast<uint32_t>(sig->return_count());
-}
-
-auto func_type_param(v8::Local<v8::Object> function, size_t i)
- -> ::wasm::ValKind {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(function);
- auto v8_function = i::Handle<i::WasmExportedFunction>::cast(v8_object);
- i::wasm::FunctionSig* sig = v8_function->instance()
- ->module()
- ->functions[v8_function->function_index()]
- .sig;
- return v8_valtype_to_wasm(sig->GetParam(i));
-}
-
-auto func_type_result(v8::Local<v8::Object> function, size_t i)
- -> ::wasm::ValKind {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(function);
- auto v8_function = i::Handle<i::WasmExportedFunction>::cast(v8_object);
- i::wasm::FunctionSig* sig = v8_function->instance()
- ->module()
- ->functions[v8_function->function_index()]
- .sig;
- return v8_valtype_to_wasm(sig->GetReturn(i));
-}
-
-auto global_type_content(v8::Local<v8::Object> global) -> ::wasm::ValKind {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- return v8_valtype_to_wasm(v8_global->type());
-}
-
-auto global_type_mutable(v8::Local<v8::Object> global) -> bool {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- return v8_global->is_mutable();
-}
-
-auto table_type_min(v8::Local<v8::Object> table) -> uint32_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
- auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
- return v8_table->current_length();
-}
-
-auto table_type_max(v8::Local<v8::Object> table) -> uint32_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
- auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
- auto v8_max_obj = v8_table->maximum_length();
- uint32_t max;
- return v8_max_obj->ToUint32(&max) ? max : 0xffffffffu;
-}
-
-auto memory_size(v8::Local<v8::Object> memory) -> uint32_t;
-
-auto memory_type_min(v8::Local<v8::Object> memory) -> uint32_t {
- return memory_size(memory);
-}
-
-auto memory_type_max(v8::Local<v8::Object> memory) -> uint32_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
- auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
- return v8_memory->has_maximum_pages() ? v8_memory->maximum_pages()
- : 0xffffffffu;
-}
-
-// Modules
-
-auto module_binary_size(v8::Local<v8::Object> module) -> size_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(module);
- auto v8_module = i::Handle<i::WasmModuleObject>::cast(v8_object);
- return v8_module->native_module()->wire_bytes().size();
-}
-
-auto module_binary(v8::Local<v8::Object> module) -> const char* {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(module);
- auto v8_module = i::Handle<i::WasmModuleObject>::cast(v8_object);
- return reinterpret_cast<const char*>(
- v8_module->native_module()->wire_bytes().start());
-}
-
-auto module_serialize_size(v8::Local<v8::Object> module) -> size_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(module);
- auto v8_module = i::Handle<i::WasmModuleObject>::cast(v8_object);
- i::wasm::WasmSerializer serializer(v8_module->native_module());
- return serializer.GetSerializedNativeModuleSize();
-}
-
-auto module_serialize(v8::Local<v8::Object> module, char* buffer, size_t size)
- -> bool {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(module);
- auto v8_module = i::Handle<i::WasmModuleObject>::cast(v8_object);
- i::wasm::WasmSerializer serializer(v8_module->native_module());
- return serializer.SerializeNativeModule(
- {reinterpret_cast<uint8_t*>(buffer), size});
-}
-
-auto module_deserialize(v8::Isolate* isolate, const char* binary,
- size_t binary_size, const char* buffer,
- size_t buffer_size) -> v8::MaybeLocal<v8::Object> {
- auto v8_isolate = reinterpret_cast<i::Isolate*>(isolate);
- auto maybe_v8_module = i::wasm::DeserializeNativeModule(
- v8_isolate, {reinterpret_cast<const uint8_t*>(buffer), buffer_size},
- {reinterpret_cast<const uint8_t*>(binary), binary_size});
- if (maybe_v8_module.is_null()) return v8::MaybeLocal<v8::Object>();
- auto v8_module =
- i::Handle<i::JSObject>::cast(maybe_v8_module.ToHandleChecked());
- return v8::MaybeLocal<v8::Object>(v8::Utils::ToLocal(v8_module));
-}
-
-// Instances
-
-auto instance_module(v8::Local<v8::Object> instance) -> v8::Local<v8::Object> {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(instance);
- auto v8_instance = i::Handle<i::WasmInstanceObject>::cast(v8_object);
- auto v8_module =
- object_handle(i::JSObject::cast(v8_instance->module_object()));
- return v8::Utils::ToLocal(v8_module);
-}
-
-auto instance_exports(v8::Local<v8::Object> instance) -> v8::Local<v8::Object> {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(instance);
- auto v8_instance = i::Handle<i::WasmInstanceObject>::cast(v8_object);
- auto v8_exports = object_handle(v8_instance->exports_object());
- return v8::Utils::ToLocal(v8_exports);
-}
-
-// Externals
-
-auto extern_kind(v8::Local<v8::Object> external) -> ::wasm::ExternKind {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(external);
-
- if (i::WasmExportedFunction::IsWasmExportedFunction(*v8_object))
- return ::wasm::EXTERN_FUNC;
- if (v8_object->IsWasmGlobalObject()) return ::wasm::EXTERN_GLOBAL;
- if (v8_object->IsWasmTableObject()) return ::wasm::EXTERN_TABLE;
- if (v8_object->IsWasmMemoryObject()) return ::wasm::EXTERN_MEMORY;
- UNREACHABLE();
-}
-
-// Functions
-
-auto func_instance(v8::Local<v8::Function> function) -> v8::Local<v8::Object> {
- auto v8_function = v8::Utils::OpenHandle(*function);
- auto v8_func = i::Handle<i::WasmExportedFunction>::cast(v8_function);
- auto v8_instance = object_handle(i::JSObject::cast(v8_func->instance()));
- return v8::Utils::ToLocal(v8_instance);
-}
-
-// Globals
-
-auto global_get_i32(v8::Local<v8::Object> global) -> int32_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- return v8_global->GetI32();
-}
-auto global_get_i64(v8::Local<v8::Object> global) -> int64_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- return v8_global->GetI64();
-}
-auto global_get_f32(v8::Local<v8::Object> global) -> float {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- return v8_global->GetF32();
-}
-auto global_get_f64(v8::Local<v8::Object> global) -> double {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- return v8_global->GetF64();
-}
-
-void global_set_i32(v8::Local<v8::Object> global, int32_t val) {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- v8_global->SetI32(val);
-}
-void global_set_i64(v8::Local<v8::Object> global, int64_t val) {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- v8_global->SetI64(val);
-}
-void global_set_f32(v8::Local<v8::Object> global, float val) {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- v8_global->SetF32(val);
-}
-void global_set_f64(v8::Local<v8::Object> global, double val) {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(global);
- auto v8_global = i::Handle<i::WasmGlobalObject>::cast(v8_object);
- v8_global->SetF32(val);
-}
-
-// Tables
-
-auto table_get(v8::Local<v8::Object> table, size_t index)
- -> v8::MaybeLocal<v8::Function> {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
- auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
- if (index > std::numeric_limits<int>::max()) return {};
- i::Isolate* isolate = v8_table->GetIsolate();
- i::MaybeHandle<i::Object> maybe_result =
- i::WasmTableObject::Get(isolate, v8_table, static_cast<int>(index));
- i::Handle<i::Object> result;
- if (!maybe_result.ToHandle(&result)) {
- // TODO(jkummerow): Clear pending exception?
- return {};
- }
- if (!result->IsJSFunction()) return {};
- return v8::MaybeLocal<v8::Function>(
- v8::Utils::ToLocal(i::Handle<i::JSFunction>::cast(result)));
-}
-
-auto table_set(v8::Local<v8::Object> table, size_t index,
- v8::MaybeLocal<v8::Function> maybe) -> bool {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
- auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
- i::Handle<i::Object> v8_function =
- maybe.IsEmpty()
- ? i::Handle<i::Object>::cast(
- i::ReadOnlyRoots(v8_table->GetIsolate()).null_value_handle())
- : i::Handle<i::Object>::cast(
- v8::Utils::OpenHandle<v8::Function, i::JSReceiver>(
- maybe.ToLocalChecked()));
- if (index >= v8_table->current_length()) return false;
-
- {
- v8::TryCatch handler(table->GetIsolate());
- i::WasmTableObject::Set(v8_table->GetIsolate(), v8_table,
- static_cast<uint32_t>(index), v8_function);
- if (handler.HasCaught()) return false;
- }
-
- return true;
-}
-
-auto table_size(v8::Local<v8::Object> table) -> size_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
- auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
- return v8_table->current_length();
-}
-
-auto table_grow(v8::Local<v8::Object> table, size_t delta,
- v8::MaybeLocal<v8::Function> init) -> bool {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(table);
- auto v8_table = i::Handle<i::WasmTableObject>::cast(v8_object);
- if (delta > 0xfffffffflu) return false;
- auto old_size = v8_table->current_length();
- // TODO(jkummerow): Overflow check.
- auto new_size = old_size + static_cast<uint32_t>(delta);
- // TODO(v8): This should happen in WasmTableObject::Grow.
- if (new_size > table_type_max(table)) return false;
-
- {
- v8::TryCatch handler(table->GetIsolate());
- v8_table->Grow(v8_table->GetIsolate(), static_cast<uint32_t>(delta));
- if (handler.HasCaught()) return false;
- }
-
- // TODO(v8): This should happen in WasmTableObject::Grow.
- if (new_size != old_size) {
- auto isolate = v8_table->GetIsolate();
- i::Handle<i::FixedArray> old_array(v8_table->elements(), isolate);
- auto new_array =
- isolate->factory()->NewFixedArray(static_cast<int>(new_size));
- assert(static_cast<uint32_t>(old_array->length()) == old_size);
- for (int i = 0; i < static_cast<int>(old_size); ++i)
- new_array->set(i, old_array->get(i));
- i::Handle<i::Object> val = isolate->factory()->null_value();
- if (!init.IsEmpty())
- val = v8::Utils::OpenHandle<v8::Function, i::JSReceiver>(
- init.ToLocalChecked());
- for (int i = old_size; i < static_cast<int>(new_size); ++i)
- new_array->set(i, *val);
- v8_table->set_elements(*new_array);
+i::wasm::ValueType wasm_valtype_to_v8(::wasm::ValKind type) {
+ switch (type) {
+ case ::wasm::I32:
+ return i::wasm::kWasmI32;
+ case ::wasm::I64:
+ return i::wasm::kWasmI64;
+ case ::wasm::F32:
+ return i::wasm::kWasmF32;
+ case ::wasm::F64:
+ return i::wasm::kWasmF64;
+ default:
+ // TODO(wasm+): support new value types
+ UNREACHABLE();
}
-
- return true;
-}
-
-// Memory
-
-auto memory_data(v8::Local<v8::Object> memory) -> char* {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
- auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
- return reinterpret_cast<char*>(v8_memory->array_buffer()->backing_store());
-}
-
-auto memory_data_size(v8::Local<v8::Object> memory) -> size_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
- auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
- return v8_memory->array_buffer()->byte_length();
-}
-
-auto memory_size(v8::Local<v8::Object> memory) -> uint32_t {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
- auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
- return static_cast<uint32_t>(v8_memory->array_buffer()->byte_length() /
- i::wasm::kWasmPageSize);
-}
-
-auto memory_grow(v8::Local<v8::Object> memory, uint32_t delta) -> bool {
- auto v8_object = v8::Utils::OpenHandle<v8::Object, i::JSReceiver>(memory);
- auto v8_memory = i::Handle<i::WasmMemoryObject>::cast(v8_object);
- auto old =
- i::WasmMemoryObject::Grow(v8_memory->GetIsolate(), v8_memory, delta);
- return old != -1;
}
} // namespace wasm
@@ -1059,89 +707,17 @@ auto Engine::make(own<Config*>&& config) -> own<Engine*> {
// Stores
-enum v8_string_t {
- V8_S_EMPTY,
- V8_S_I32,
- V8_S_I64,
- V8_S_F32,
- V8_S_F64,
- V8_S_ANYREF,
- V8_S_ANYFUNC,
- V8_S_VALUE,
- V8_S_MUTABLE,
- V8_S_ELEMENT,
- V8_S_MINIMUM,
- V8_S_MAXIMUM,
- V8_S_COUNT
-};
-
-enum v8_symbol_t { V8_Y_CALLBACK, V8_Y_ENV, V8_Y_COUNT };
-
-enum v8_function_t {
- V8_F_WEAKMAP,
- V8_F_WEAKMAP_PROTO,
- V8_F_WEAKMAP_GET,
- V8_F_WEAKMAP_SET,
- V8_F_MODULE,
- V8_F_GLOBAL,
- V8_F_TABLE,
- V8_F_MEMORY,
- V8_F_INSTANCE,
- V8_F_VALIDATE,
- V8_F_COUNT,
-};
-
-class StoreImpl {
- friend own<Store*> Store::make(Engine*);
-
- v8::Isolate::CreateParams create_params_;
- v8::Isolate* isolate_;
- v8::Eternal<v8::Context> context_;
- v8::Eternal<v8::String> strings_[V8_S_COUNT];
- v8::Eternal<v8::Symbol> symbols_[V8_Y_COUNT];
- v8::Eternal<v8::Function> functions_[V8_F_COUNT];
- v8::Eternal<v8::Object> host_data_map_;
- v8::Eternal<v8::Symbol> callback_symbol_;
-
- public:
- StoreImpl() {}
-
- ~StoreImpl() {
+StoreImpl::~StoreImpl() {
#ifdef DEBUG
- reinterpret_cast<i::Isolate*>(isolate_)->heap()->PreciseCollectAllGarbage(
- i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
- v8::kGCCallbackFlagForced);
+ reinterpret_cast<i::Isolate*>(isolate_)->heap()->PreciseCollectAllGarbage(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
+ v8::kGCCallbackFlagForced);
#endif
- context()->Exit();
- isolate_->Exit();
- isolate_->Dispose();
- delete create_params_.array_buffer_allocator;
- }
-
- auto isolate() const -> v8::Isolate* { return isolate_; }
-
- auto context() const -> v8::Local<v8::Context> {
- return context_.Get(isolate_);
- }
-
- auto v8_string(v8_string_t i) const -> v8::Local<v8::String> {
- return strings_[i].Get(isolate_);
- }
- auto v8_string(v8_symbol_t i) const -> v8::Local<v8::Symbol> {
- return symbols_[i].Get(isolate_);
- }
- auto v8_function(v8_function_t i) const -> v8::Local<v8::Function> {
- return functions_[i].Get(isolate_);
- }
-
- auto host_data_map() const -> v8::Local<v8::Object> {
- return host_data_map_.Get(isolate_);
- }
-
- static auto get(v8::Isolate* isolate) -> StoreImpl* {
- return static_cast<StoreImpl*>(isolate->GetData(0));
- }
-};
+ context()->Exit();
+ isolate_->Exit();
+ isolate_->Dispose();
+ delete create_params_.array_buffer_allocator;
+}
template <>
struct implement<Store> {
@@ -1173,77 +749,6 @@ auto Store::make(Engine*) -> own<Store*> {
store->isolate_ = isolate;
store->context_ = v8::Eternal<v8::Context>(isolate, context);
-
- // Create strings.
- static const char* const raw_strings[V8_S_COUNT] = {
- "", "i32", "i64", "f32", "f64", "anyref",
- "anyfunc", "value", "mutable", "element", "initial", "maximum",
- };
- for (int i = 0; i < V8_S_COUNT; ++i) {
- auto maybe = v8::String::NewFromUtf8(isolate, raw_strings[i],
- v8::NewStringType::kNormal);
- if (maybe.IsEmpty()) return own<Store*>();
- auto string = maybe.ToLocalChecked();
- store->strings_[i] = v8::Eternal<v8::String>(isolate, string);
- }
-
- for (int i = 0; i < V8_Y_COUNT; ++i) {
- auto symbol = v8::Symbol::New(isolate);
- store->symbols_[i] = v8::Eternal<v8::Symbol>(isolate, symbol);
- }
-
- // Extract functions.
- auto global = context->Global();
- auto maybe_wasm_name = v8::String::NewFromUtf8(isolate, "WebAssembly",
- v8::NewStringType::kNormal);
- if (maybe_wasm_name.IsEmpty()) return own<Store*>();
- auto wasm_name = maybe_wasm_name.ToLocalChecked();
- auto maybe_wasm = global->Get(context, wasm_name);
- if (maybe_wasm.IsEmpty()) return own<Store*>();
- auto wasm = v8::Local<v8::Object>::Cast(maybe_wasm.ToLocalChecked());
- v8::Local<v8::Object> weakmap;
- v8::Local<v8::Object> weakmap_proto;
-
- struct {
- const char* name;
- v8::Local<v8::Object>* carrier;
- } raw_functions[V8_F_COUNT] = {
- {"WeakMap", &global}, {"prototype", &weakmap},
- {"get", &weakmap_proto}, {"set", &weakmap_proto},
- {"Module", &wasm}, {"Global", &wasm},
- {"Table", &wasm}, {"Memory", &wasm},
- {"Instance", &wasm}, {"validate", &wasm},
- };
- for (int i = 0; i < V8_F_COUNT; ++i) {
- auto maybe_name = v8::String::NewFromUtf8(isolate, raw_functions[i].name,
- v8::NewStringType::kNormal);
- if (maybe_name.IsEmpty()) return own<Store*>();
- auto name = maybe_name.ToLocalChecked();
- assert(!raw_functions[i].carrier->IsEmpty());
- // TODO(wasm+): remove
- if ((*raw_functions[i].carrier)->IsUndefined()) continue;
- auto maybe_obj = (*raw_functions[i].carrier)->Get(context, name);
- if (maybe_obj.IsEmpty()) return own<Store*>();
- auto obj = v8::Local<v8::Object>::Cast(maybe_obj.ToLocalChecked());
- if (i == V8_F_WEAKMAP_PROTO) {
- assert(obj->IsObject());
- weakmap_proto = obj;
- } else {
- assert(obj->IsFunction());
- auto function = v8::Local<v8::Function>::Cast(obj);
- store->functions_[i] = v8::Eternal<v8::Function>(isolate, function);
- if (i == V8_F_WEAKMAP) weakmap = function;
- }
- }
-
- // Create host data weak map.
- v8::Local<v8::Value>* empty_args = nullptr;
- auto maybe_weakmap =
- store->v8_function(V8_F_WEAKMAP)->NewInstance(context, 0, empty_args);
- if (maybe_weakmap.IsEmpty()) return own<Store*>();
- auto map = v8::Local<v8::Object>::Cast(maybe_weakmap.ToLocalChecked());
- assert(map->IsWeakMap());
- store->host_data_map_ = v8::Eternal<v8::Object>(isolate, map);
}
store->isolate()->Enter();
@@ -1599,92 +1104,7 @@ auto ExportType::type() const -> const ExternType* {
}
///////////////////////////////////////////////////////////////////////////////
-// Conversions of types from and to V8 objects
-
-// Types
-
-auto valtype_to_v8(StoreImpl* store, const ValType* type)
- -> v8::Local<v8::Value> {
- v8_string_t string;
- switch (type->kind()) {
- case I32:
- string = V8_S_I32;
- break;
- case I64:
- string = V8_S_I64;
- break;
- case F32:
- string = V8_S_F32;
- break;
- case F64:
- string = V8_S_F64;
- break;
- case ANYREF:
- string = V8_S_ANYREF;
- break;
- case FUNCREF:
- string = V8_S_ANYFUNC;
- break;
- default:
- // TODO(wasm+): support new value types
- UNREACHABLE();
- }
- return store->v8_string(string);
-}
-
-auto mutability_to_v8(StoreImpl* store, Mutability mutability)
- -> v8::Local<v8::Boolean> {
- return v8::Boolean::New(store->isolate(), mutability == VAR);
-}
-
-void limits_to_v8(StoreImpl* store, Limits limits, v8::Local<v8::Object> desc) {
- auto isolate = store->isolate();
- auto context = store->context();
- ignore(desc->DefineOwnProperty(
- context, store->v8_string(V8_S_MINIMUM),
- v8::Integer::NewFromUnsigned(isolate, limits.min)));
- if (limits.max != Limits(0).max) {
- ignore(desc->DefineOwnProperty(
- context, store->v8_string(V8_S_MAXIMUM),
- v8::Integer::NewFromUnsigned(isolate, limits.max)));
- }
-}
-
-auto globaltype_to_v8(StoreImpl* store, const GlobalType* type)
- -> v8::Local<v8::Object> {
- auto isolate = store->isolate();
- auto context = store->context();
- auto desc = v8::Object::New(isolate);
- ignore(desc->DefineOwnProperty(context, store->v8_string(V8_S_VALUE),
- valtype_to_v8(store, type->content())));
- ignore(desc->DefineOwnProperty(context, store->v8_string(V8_S_MUTABLE),
- mutability_to_v8(store, type->mutability())));
- return desc;
-}
-
-auto tabletype_to_v8(StoreImpl* store, const TableType* type)
- -> v8::Local<v8::Object> {
- auto isolate = store->isolate();
- auto context = store->context();
- auto desc = v8::Object::New(isolate);
- ignore(desc->DefineOwnProperty(context, store->v8_string(V8_S_ELEMENT),
- valtype_to_v8(store, type->element())));
- limits_to_v8(store, type->limits(), desc);
- return desc;
-}
-
-auto memorytype_to_v8(StoreImpl* store, const MemoryType* type)
- -> v8::Local<v8::Object> {
- auto isolate = store->isolate();
- auto desc = v8::Object::New(isolate);
- limits_to_v8(store, type->limits(), desc);
- return desc;
-}
-
-///////////////////////////////////////////////////////////////////////////////
-// Runtime Values
-
-// Values
+// Conversions of values from and to V8 objects
auto val_to_v8(StoreImpl* store, const Val& v) -> v8::Local<v8::Value> {
auto isolate = store->isolate();
@@ -1710,25 +1130,50 @@ auto val_to_v8(StoreImpl* store, const Val& v) -> v8::Local<v8::Value> {
}
}
-auto v8_to_val(StoreImpl* store, v8::Local<v8::Value> value, const ValType* t)
- -> own<Val> {
- auto context = store->context();
- switch (t->kind()) {
+own<Val> v8_to_val(i::Isolate* isolate, i::Handle<i::Object> value,
+ ValKind kind) {
+ switch (kind) {
case I32:
- return Val(value->Int32Value(context).ToChecked());
- case I64: {
- auto bigint = value->ToBigInt(context).ToLocalChecked();
- return Val(bigint->Int64Value());
- }
- case F32: {
- auto number = value->NumberValue(context).ToChecked();
- return Val(static_cast<float32_t>(number));
- }
+ do {
+ if (value->IsSmi()) return Val(i::Smi::ToInt(*value));
+ if (value->IsHeapNumber()) {
+ return Val(i::DoubleToInt32(i::HeapNumber::cast(*value).value()));
+ }
+ value = i::Object::ToInt32(isolate, value).ToHandleChecked();
+ // This will loop back at most once.
+ } while (true);
+ UNREACHABLE();
+ case I64:
+ if (value->IsBigInt()) return Val(i::BigInt::cast(*value).AsInt64());
+ return Val(
+ i::BigInt::FromObject(isolate, value).ToHandleChecked()->AsInt64());
+ case F32:
+ do {
+ if (value->IsSmi()) {
+ return Val(static_cast<float32_t>(i::Smi::ToInt(*value)));
+ }
+ if (value->IsHeapNumber()) {
+ return Val(i::DoubleToFloat32(i::HeapNumber::cast(*value).value()));
+ }
+ value = i::Object::ToNumber(isolate, value).ToHandleChecked();
+ // This will loop back at most once.
+ } while (true);
+ UNREACHABLE();
case F64:
- return Val(value->NumberValue(context).ToChecked());
+ do {
+ if (value->IsSmi()) {
+ return Val(static_cast<float64_t>(i::Smi::ToInt(*value)));
+ }
+ if (value->IsHeapNumber()) {
+ return Val(i::HeapNumber::cast(*value).value());
+ }
+ value = i::Object::ToNumber(isolate, value).ToHandleChecked();
+ // This will loop back at most once.
+ } while (true);
+ UNREACHABLE();
case ANYREF:
case FUNCREF: {
- if (value->IsNull()) {
+ if (value->IsNull(isolate)) {
return Val(nullptr);
} else {
WASM_UNIMPLEMENTED("ref value");
@@ -1737,93 +1182,87 @@ auto v8_to_val(StoreImpl* store, v8::Local<v8::Value> value, const ValType* t)
}
}
+i::Handle<i::String> VecToString(i::Isolate* isolate,
+ const vec<byte_t>& chars) {
+ return isolate->factory()
+ ->NewStringFromUtf8({chars.get(), chars.size()})
+ .ToHandleChecked();
+}
+
// References
-template <class Ref>
-class RefImpl : public v8::Persistent<v8::Object> {
+template <class Ref, class JSType>
+class RefImpl {
public:
- RefImpl() = delete;
-
- static auto make(StoreImpl* store, v8::Local<v8::Object> obj) -> own<Ref*> {
- static_assert(sizeof(RefImpl) == sizeof(v8::Persistent<v8::Object>),
- "incompatible object layout");
- RefImpl* self =
- static_cast<RefImpl*>(new (std::nothrow) v8::Persistent<v8::Object>());
+ static own<Ref*> make(StoreImpl* store, i::Handle<JSType> obj) {
+ RefImpl* self = new (std::nothrow) RefImpl();
if (!self) return nullptr;
- self->Reset(store->isolate(), obj);
+ i::Isolate* isolate = store->i_isolate();
+ self->val_ = isolate->global_handles()->Create(*obj);
return make_own(seal<Ref>(self));
}
- auto copy() const -> own<Ref*> {
- v8::HandleScope handle_scope(isolate());
- return make(store(), v8_object());
+ void Reset() {
+ i::GlobalHandles::Destroy(location());
+ if (host_data_) {
+ if (host_data_->finalizer) {
+ host_data_->finalizer(host_data_->info);
+ }
+ delete host_data_;
+ }
}
- auto store() const -> StoreImpl* { return StoreImpl::get(isolate()); }
+ own<Ref*> copy() const { return make(store(), v8_object()); }
- auto isolate() const -> v8::Isolate* {
- return v8::wasm::object_isolate(*this);
- }
-
- auto v8_object() const -> v8::Local<v8::Object> { return Get(isolate()); }
+ StoreImpl* store() const { return StoreImpl::get(isolate()); }
- auto get_host_info() const -> void* {
- v8::HandleScope handle_scope(isolate());
- auto store = this->store();
+ i::Isolate* isolate() const { return val_->GetIsolate(); }
- v8::Local<v8::Value> args[] = {v8_object()};
- auto maybe_result =
- store->v8_function(V8_F_WEAKMAP_GET)
- ->Call(store->context(), store->host_data_map(), 1, args);
- if (maybe_result.IsEmpty()) return nullptr;
+ i::Handle<JSType> v8_object() const { return i::Handle<JSType>::cast(val_); }
- auto data = v8::wasm::foreign_get(maybe_result.ToLocalChecked());
- return reinterpret_cast<HostData*>(data)->info;
+ void* get_host_info() const {
+ if (host_data_ == nullptr) return nullptr;
+ return host_data_->info;
}
void set_host_info(void* info, void (*finalizer)(void*)) {
- v8::HandleScope handle_scope(isolate());
- auto store = this->store();
-
- // V8 attaches finalizers to handles instead of objects, and such handles
- // cannot be reused after the finalizer has been invoked.
- // So we need to create them separately from the pool.
- auto data = new HostData(store->isolate(), v8_object(), info, finalizer);
- data->handle.template SetWeak<HostData>(data, &v8_finalizer,
- v8::WeakCallbackType::kParameter);
- auto foreign = v8::wasm::foreign_new(store->isolate(), data);
- v8::Local<v8::Value> args[] = {v8_object(), foreign};
- auto maybe_result =
- store->v8_function(V8_F_WEAKMAP_SET)
- ->Call(store->context(), store->host_data_map(), 2, args);
- if (maybe_result.IsEmpty()) return;
+ host_data_ = new HostData(location(), info, finalizer);
+ i::GlobalHandles::MakeWeak(host_data_->location, host_data_, &v8_finalizer,
+ v8::WeakCallbackType::kParameter);
}
private:
struct HostData {
- HostData(v8::Isolate* isolate, v8::Local<v8::Object> object, void* info,
- void (*finalizer)(void*))
- : handle(isolate, object), info(info), finalizer(finalizer) {}
- v8::Persistent<v8::Object> handle;
+ HostData(i::Address* location, void* info, void (*finalizer)(void*))
+ : location(location), info(info), finalizer(finalizer) {}
+ i::Address* location;
void* info;
void (*finalizer)(void*);
};
- static void v8_finalizer(const v8::WeakCallbackInfo<HostData>& info) {
- auto data = info.GetParameter();
- data->handle.Reset(); // Must reset weak handle before deleting it!
+ RefImpl() {}
+
+ static void v8_finalizer(const v8::WeakCallbackInfo<void>& info) {
+ HostData* data = reinterpret_cast<HostData*>(info.GetParameter());
+ i::GlobalHandles::Destroy(data->location);
if (data->finalizer) (*data->finalizer)(data->info);
delete data;
}
+
+ i::Address* location() const {
+ return reinterpret_cast<i::Address*>(val_.address());
+ }
+
+ i::Handle<i::JSReceiver> val_;
+ HostData* host_data_ = nullptr;
};
template <>
struct implement<Ref> {
- using type = RefImpl<Ref>;
+ using type = RefImpl<Ref, i::JSReceiver>;
};
Ref::~Ref() {
- v8::HandleScope handle_scope(impl(this)->isolate());
impl(this)->Reset();
delete impl(this);
}
@@ -1845,7 +1284,7 @@ void Ref::set_host_info(void* info, void (*finalizer)(void*)) {
template <>
struct implement<Trap> {
- using type = RefImpl<Trap>;
+ using type = RefImpl<Trap, i::JSReceiver>;
};
Trap::~Trap() {}
@@ -1854,31 +1293,33 @@ auto Trap::copy() const -> own<Trap*> { return impl(this)->copy(); }
auto Trap::make(Store* store_abs, const Message& message) -> own<Trap*> {
auto store = impl(store_abs);
- v8::Isolate* isolate = store->isolate();
- v8::HandleScope handle_scope(isolate);
-
- auto maybe_string = v8::String::NewFromUtf8(isolate, message.get(),
- v8::NewStringType::kNormal,
- static_cast<int>(message.size()));
- if (maybe_string.IsEmpty()) return own<Trap*>();
- auto exception = v8::Exception::Error(maybe_string.ToLocalChecked());
- return RefImpl<Trap>::make(store, v8::Local<v8::Object>::Cast(exception));
+ i::Isolate* isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::String> string = VecToString(isolate, message);
+ i::Handle<i::JSReceiver> exception = i::Handle<i::JSReceiver>::cast(
+ isolate->factory()->NewError(isolate->error_function(), string));
+ return implement<Trap>::type::make(store, exception);
}
auto Trap::message() const -> Message {
auto isolate = impl(this)->isolate();
- v8::HandleScope handle_scope(isolate);
+ i::HandleScope handle_scope(isolate);
- auto message = v8::Exception::CreateMessage(isolate, impl(this)->v8_object());
- v8::String::Utf8Value string(isolate, message->Get());
- return vec<byte_t>::make(std::string(*string));
+ i::Handle<i::JSMessageObject> message =
+ isolate->CreateMessage(impl(this)->v8_object(), nullptr);
+ i::Handle<i::String> result = i::MessageHandler::GetMessage(isolate, message);
+ result = i::String::Flatten(isolate, result); // For performance.
+ int length = 0;
+ std::unique_ptr<char[]> utf8 =
+ result->ToCString(i::DISALLOW_NULLS, i::FAST_STRING_TRAVERSAL, &length);
+ return vec<byte_t>::adopt(length, utf8.release());
}
// Foreign Objects
template <>
struct implement<Foreign> {
- using type = RefImpl<Foreign>;
+ using type = RefImpl<Foreign, i::JSReceiver>;
};
Foreign::~Foreign() {}
@@ -1887,18 +1328,18 @@ auto Foreign::copy() const -> own<Foreign*> { return impl(this)->copy(); }
auto Foreign::make(Store* store_abs) -> own<Foreign*> {
auto store = impl(store_abs);
- auto isolate = store->isolate();
- v8::HandleScope handle_scope(isolate);
+ auto isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
- auto obj = v8::Object::New(isolate);
- return RefImpl<Foreign>::make(store, obj);
+ auto obj = i::Handle<i::JSReceiver>();
+ return implement<Foreign>::type::make(store, obj);
}
// Modules
template <>
struct implement<Module> {
- using type = RefImpl<Module>;
+ using type = RefImpl<Module, i::WasmModuleObject>;
};
Module::~Module() {}
@@ -1906,89 +1347,102 @@ Module::~Module() {}
auto Module::copy() const -> own<Module*> { return impl(this)->copy(); }
auto Module::validate(Store* store_abs, const vec<byte_t>& binary) -> bool {
- auto store = impl(store_abs);
- v8::Isolate* isolate = store->isolate();
- v8::HandleScope handle_scope(isolate);
-
- auto array_buffer = v8::ArrayBuffer::New(
- isolate, const_cast<byte_t*>(binary.get()), binary.size());
-
- v8::Local<v8::Value> args[] = {array_buffer};
- auto result = store->v8_function(V8_F_VALIDATE)
- ->Call(store->context(), v8::Undefined(isolate), 1, args);
- if (result.IsEmpty()) return false;
-
- return result.ToLocalChecked()->IsTrue();
+ i::wasm::ModuleWireBytes bytes(
+ {reinterpret_cast<const uint8_t*>(binary.get()), binary.size()});
+ i::Isolate* isolate = impl(store_abs)->i_isolate();
+ i::wasm::WasmFeatures features = i::wasm::WasmFeaturesFromIsolate(isolate);
+ return isolate->wasm_engine()->SyncValidate(isolate, features, bytes);
}
-auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module*> {
- auto store = impl(store_abs);
- auto isolate = store->isolate();
- auto context = store->context();
- v8::HandleScope handle_scope(isolate);
-
- auto array_buffer = v8::ArrayBuffer::New(
- isolate, const_cast<byte_t*>(binary.get()), binary.size());
+class NopErrorThrower : public i::wasm::ErrorThrower {
+ public:
+ explicit NopErrorThrower(i::Isolate* isolate)
+ : i::wasm::ErrorThrower(isolate, "ignored") {}
+ ~NopErrorThrower() { Reset(); }
+};
- v8::Local<v8::Value> args[] = {array_buffer};
- auto maybe_obj =
- store->v8_function(V8_F_MODULE)->NewInstance(context, 1, args);
- if (maybe_obj.IsEmpty()) return nullptr;
- return RefImpl<Module>::make(store, maybe_obj.ToLocalChecked());
+auto Module::make(Store* store_abs, const vec<byte_t>& binary) -> own<Module*> {
+ StoreImpl* store = impl(store_abs);
+ i::Isolate* isolate = store->i_isolate();
+ i::HandleScope scope(isolate);
+ i::wasm::ModuleWireBytes bytes(
+ {reinterpret_cast<const uint8_t*>(binary.get()), binary.size()});
+ i::wasm::WasmFeatures features = i::wasm::WasmFeaturesFromIsolate(isolate);
+ NopErrorThrower thrower(isolate);
+ i::Handle<i::WasmModuleObject> module;
+ if (!isolate->wasm_engine()
+ ->SyncCompile(isolate, features, &thrower, bytes)
+ .ToHandle(&module)) {
+ return nullptr;
+ }
+ return implement<Module>::type::make(store, module);
}
auto Module::imports() const -> vec<ImportType*> {
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto module = impl(this)->v8_object();
- auto binary =
- vec<byte_t>::adopt(v8::wasm::module_binary_size(module),
- const_cast<byte_t*>(v8::wasm::module_binary(module)));
+ i::Vector<const uint8_t> wire_bytes =
+ impl(this)->v8_object()->native_module()->wire_bytes();
+ vec<const byte_t> binary = vec<const byte_t>::adopt(
+ wire_bytes.size(), reinterpret_cast<const byte_t*>(wire_bytes.begin()));
auto imports = wasm::bin::imports(binary);
binary.release();
return imports;
}
-auto Module::exports() const -> vec<ExportType*> {
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto module = impl(this)->v8_object();
- auto binary =
- vec<byte_t>::adopt(v8::wasm::module_binary_size(module),
- const_cast<byte_t*>(v8::wasm::module_binary(module)));
+vec<ExportType*> ExportsImpl(i::Handle<i::WasmModuleObject> module_obj) {
+ i::Vector<const uint8_t> wire_bytes =
+ module_obj->native_module()->wire_bytes();
+ vec<const byte_t> binary = vec<const byte_t>::adopt(
+ wire_bytes.size(), reinterpret_cast<const byte_t*>(wire_bytes.begin()));
auto exports = wasm::bin::exports(binary);
binary.release();
return exports;
}
+auto Module::exports() const -> vec<ExportType*> {
+ return ExportsImpl(impl(this)->v8_object());
+}
+
auto Module::serialize() const -> vec<byte_t> {
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto module = impl(this)->v8_object();
- auto binary_size = v8::wasm::module_binary_size(module);
- auto serial_size = v8::wasm::module_serialize_size(module);
- auto size_size = i::wasm::LEBHelper::sizeof_u64v(binary_size);
- auto buffer =
+ i::wasm::NativeModule* native_module =
+ impl(this)->v8_object()->native_module();
+ i::Vector<const uint8_t> wire_bytes = native_module->wire_bytes();
+ size_t binary_size = wire_bytes.size();
+ i::wasm::WasmSerializer serializer(native_module);
+ size_t serial_size = serializer.GetSerializedNativeModuleSize();
+ size_t size_size = i::wasm::LEBHelper::sizeof_u64v(binary_size);
+ vec<byte_t> buffer =
vec<byte_t>::make_uninitialized(size_size + binary_size + serial_size);
- auto ptr = buffer.get();
+ byte_t* ptr = buffer.get();
i::wasm::LEBHelper::write_u64v(reinterpret_cast<uint8_t**>(&ptr),
binary_size);
- std::memcpy(ptr, v8::wasm::module_binary(module), binary_size);
+ std::memcpy(ptr, wire_bytes.begin(), binary_size);
ptr += binary_size;
- if (!v8::wasm::module_serialize(module, ptr, serial_size)) buffer.reset();
+ if (!serializer.SerializeNativeModule(
+ {reinterpret_cast<uint8_t*>(ptr), serial_size})) {
+ buffer.reset();
+ }
return std::move(buffer);
}
auto Module::deserialize(Store* store_abs, const vec<byte_t>& serialized)
-> own<Module*> {
- auto store = impl(store_abs);
- auto isolate = store->isolate();
- v8::HandleScope handle_scope(isolate);
- auto ptr = serialized.get();
- auto binary_size = wasm::bin::u64(ptr);
- auto size_size = ptr - serialized.get();
- auto serial_size = serialized.size() - size_size - binary_size;
- auto maybe_obj = v8::wasm::module_deserialize(isolate, ptr, binary_size,
- ptr + binary_size, serial_size);
- if (maybe_obj.IsEmpty()) return nullptr;
- return RefImpl<Module>::make(store, maybe_obj.ToLocalChecked());
+ StoreImpl* store = impl(store_abs);
+ i::Isolate* isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
+ const byte_t* ptr = serialized.get();
+ uint64_t binary_size = wasm::bin::u64(ptr);
+ ptrdiff_t size_size = ptr - serialized.get();
+ size_t serial_size = serialized.size() - size_size - binary_size;
+ i::Handle<i::WasmModuleObject> module_obj;
+ size_t data_size = static_cast<size_t>(binary_size);
+ if (!i::wasm::DeserializeNativeModule(
+ isolate,
+ {reinterpret_cast<const uint8_t*>(ptr + data_size), serial_size},
+ {reinterpret_cast<const uint8_t*>(ptr), data_size})
+ .ToHandle(&module_obj)) {
+ return nullptr;
+ }
+ return implement<Module>::type::make(store, module_obj);
}
// TODO(v8): do better when V8 can do better.
@@ -2021,7 +1475,7 @@ auto Module::obtain(Store* store, const Shared<Module>* shared)
template <>
struct implement<Extern> {
- using type = RefImpl<Extern>;
+ using type = RefImpl<Extern, i::JSReceiver>;
};
Extern::~Extern() {}
@@ -2029,8 +1483,14 @@ Extern::~Extern() {}
auto Extern::copy() const -> own<Extern*> { return impl(this)->copy(); }
auto Extern::kind() const -> ExternKind {
- v8::HandleScope handle_scope(impl(this)->isolate());
- return v8::wasm::extern_kind(impl(this)->v8_object());
+ i::Handle<i::JSReceiver> obj = impl(this)->v8_object();
+ if (i::WasmExportedFunction::IsWasmExportedFunction(*obj)) {
+ return wasm::EXTERN_FUNC;
+ }
+ if (obj->IsWasmGlobalObject()) return wasm::EXTERN_GLOBAL;
+ if (obj->IsWasmTableObject()) return wasm::EXTERN_TABLE;
+ if (obj->IsWasmMemoryObject()) return wasm::EXTERN_MEMORY;
+ UNREACHABLE();
}
auto Extern::type() const -> own<ExternType*> {
@@ -2078,7 +1538,7 @@ auto Extern::memory() const -> const Memory* {
return kind() == EXTERN_MEMORY ? static_cast<const Memory*>(this) : nullptr;
}
-auto extern_to_v8(const Extern* ex) -> v8::Local<v8::Value> {
+auto extern_to_v8(const Extern* ex) -> i::Handle<i::JSReceiver> {
return impl(ex)->v8_object();
}
@@ -2086,7 +1546,7 @@ auto extern_to_v8(const Extern* ex) -> v8::Local<v8::Value> {
template <>
struct implement<Func> {
- using type = RefImpl<Func>;
+ using type = RefImpl<Func, i::JSFunction>;
};
Func::~Func() {}
@@ -2115,72 +1575,87 @@ struct FuncData {
if (finalizer) (*finalizer)(env);
}
- static void v8_callback(const v8::FunctionCallbackInfo<v8::Value>&);
+ static i::Address v8_callback(void* data, i::Address argv);
static void finalize_func_data(void* data);
};
namespace {
-auto make_func(Store* store_abs, FuncData* data) -> own<Func*> {
- auto store = impl(store_abs);
- auto isolate = store->isolate();
- v8::HandleScope handle_scope(isolate);
- auto context = store->context();
-
- // Create V8 function
- auto v8_data = v8::wasm::foreign_new(isolate, data);
- auto function_template =
- v8::FunctionTemplate::New(isolate, &FuncData::v8_callback, v8_data);
- auto maybe_func_obj = function_template->GetFunction(context);
- if (maybe_func_obj.IsEmpty()) return own<Func*>();
- auto func_obj = maybe_func_obj.ToLocalChecked();
-
- // Create wrapper instance
- auto binary = wasm::bin::wrapper(data->type.get());
- auto module = Module::make(store_abs, binary);
-
- auto imports_obj = v8::Object::New(isolate);
- auto module_obj = v8::Object::New(isolate);
- auto str = store->v8_string(V8_S_EMPTY);
- ignore(imports_obj->DefineOwnProperty(context, str, module_obj));
- ignore(module_obj->DefineOwnProperty(context, str, func_obj));
-
- v8::Local<v8::Value> instantiate_args[] = {impl(module.get())->v8_object(),
- imports_obj};
- auto instance_obj = store->v8_function(V8_F_INSTANCE)
- ->NewInstance(context, 2, instantiate_args)
- .ToLocalChecked();
- assert(!instance_obj.IsEmpty());
- assert(instance_obj->IsObject());
- auto exports_obj = v8::wasm::instance_exports(instance_obj);
- assert(!exports_obj.IsEmpty());
- assert(exports_obj->IsObject());
- auto wrapped_func_obj = v8::Local<v8::Function>::Cast(
- exports_obj->Get(context, str).ToLocalChecked());
- assert(!wrapped_func_obj.IsEmpty());
- assert(wrapped_func_obj->IsFunction());
-
- auto func = RefImpl<Func>::make(store, wrapped_func_obj);
- func->set_host_info(data, &FuncData::finalize_func_data);
- return func;
-}
+// TODO(jkummerow): Generalize for WasmExportedFunction and WasmCapiFunction.
+class SignatureHelper : public i::AllStatic {
+ public:
+ // Use an invalid type as a marker separating params and results.
+ static const i::wasm::ValueType kMarker = i::wasm::kWasmStmt;
+
+ static i::Handle<i::PodArray<i::wasm::ValueType>> Serialize(
+ i::Isolate* isolate, FuncType* type) {
+ int sig_size =
+ static_cast<int>(type->params().size() + type->results().size() + 1);
+ i::Handle<i::PodArray<i::wasm::ValueType>> sig =
+ i::PodArray<i::wasm::ValueType>::New(isolate, sig_size,
+ i::AllocationType::kOld);
+ int index = 0;
+ // TODO(jkummerow): Consider making vec<> range-based for-iterable.
+ for (size_t i = 0; i < type->results().size(); i++) {
+ sig->set(index++,
+ v8::wasm::wasm_valtype_to_v8(type->results()[i]->kind()));
+ }
+ // {sig->set} needs to take the address of its second parameter,
+ // so we can't pass in the static const kMarker directly.
+ i::wasm::ValueType marker = kMarker;
+ sig->set(index++, marker);
+ for (size_t i = 0; i < type->params().size(); i++) {
+ sig->set(index++,
+ v8::wasm::wasm_valtype_to_v8(type->params()[i]->kind()));
+ }
+ return sig;
+ }
-auto func_type(v8::Local<v8::Object> v8_func) -> own<FuncType*> {
- auto param_arity = v8::wasm::func_type_param_arity(v8_func);
- auto result_arity = v8::wasm::func_type_result_arity(v8_func);
- auto params = vec<ValType*>::make_uninitialized(param_arity);
- auto results = vec<ValType*>::make_uninitialized(result_arity);
+ static own<FuncType*> Deserialize(i::PodArray<i::wasm::ValueType> sig) {
+ int result_arity = ResultArity(sig);
+ int param_arity = sig.length() - result_arity - 1;
+ vec<ValType*> results = vec<ValType*>::make_uninitialized(result_arity);
+ vec<ValType*> params = vec<ValType*>::make_uninitialized(param_arity);
- for (size_t i = 0; i < params.size(); ++i) {
- auto kind = v8::wasm::func_type_param(v8_func, i);
- params[i] = ValType::make(kind);
+ int i = 0;
+ for (; i < result_arity; ++i) {
+ results[i] = ValType::make(v8::wasm::v8_valtype_to_wasm(sig.get(i)));
+ }
+ i++; // Skip marker.
+ for (int p = 0; i < sig.length(); ++i, ++p) {
+ params[p] = ValType::make(v8::wasm::v8_valtype_to_wasm(sig.get(i)));
+ }
+ return FuncType::make(std::move(params), std::move(results));
}
- for (size_t i = 0; i < results.size(); ++i) {
- auto kind = v8::wasm::func_type_result(v8_func, i);
- results[i] = ValType::make(kind);
+
+ static int ResultArity(i::PodArray<i::wasm::ValueType> sig) {
+ int count = 0;
+ for (; count < sig.length(); count++) {
+ if (sig.get(count) == kMarker) return count;
+ }
+ UNREACHABLE();
}
- return FuncType::make(std::move(params), std::move(results));
+ static int ParamArity(i::PodArray<i::wasm::ValueType> sig) {
+ return sig.length() - ResultArity(sig) - 1;
+ }
+
+ static i::PodArray<i::wasm::ValueType> GetSig(
+ i::Handle<i::JSFunction> function) {
+ return i::WasmCapiFunction::cast(*function).GetSerializedSignature();
+ }
+};
+
+auto make_func(Store* store_abs, FuncData* data) -> own<Func*> {
+ auto store = impl(store_abs);
+ i::Isolate* isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::WasmCapiFunction> function = i::WasmCapiFunction::New(
+ isolate, reinterpret_cast<i::Address>(&FuncData::v8_callback), data,
+ SignatureHelper::Serialize(isolate, data->type.get()));
+ auto func = implement<Func>::type::make(store, function);
+ func->set_host_info(data, &FuncData::finalize_func_data);
+ return func;
}
} // namespace
@@ -2202,110 +1677,221 @@ auto Func::make(Store* store, const FuncType* type, callback_with_env callback,
}
auto Func::type() const -> own<FuncType*> {
- // return impl(this)->data->type->copy();
- v8::HandleScope handle_scope(impl(this)->isolate());
- return func_type(impl(this)->v8_object());
+ i::Handle<i::JSFunction> func = impl(this)->v8_object();
+ if (i::WasmCapiFunction::IsWasmCapiFunction(*func)) {
+ return SignatureHelper::Deserialize(SignatureHelper::GetSig(func));
+ }
+ DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*func));
+ i::Handle<i::WasmExportedFunction> function =
+ i::Handle<i::WasmExportedFunction>::cast(func);
+ i::wasm::FunctionSig* sig =
+ function->instance().module()->functions[function->function_index()].sig;
+ uint32_t param_arity = static_cast<uint32_t>(sig->parameter_count());
+ uint32_t result_arity = static_cast<uint32_t>(sig->return_count());
+ auto params = vec<ValType*>::make_uninitialized(param_arity);
+ auto results = vec<ValType*>::make_uninitialized(result_arity);
+
+ for (size_t i = 0; i < params.size(); ++i) {
+ auto kind = v8::wasm::v8_valtype_to_wasm(sig->GetParam(i));
+ params[i] = ValType::make(kind);
+ }
+ for (size_t i = 0; i < results.size(); ++i) {
+ auto kind = v8::wasm::v8_valtype_to_wasm(sig->GetReturn(i));
+ results[i] = ValType::make(kind);
+ }
+ return FuncType::make(std::move(params), std::move(results));
}
auto Func::param_arity() const -> size_t {
- v8::HandleScope handle_scope(impl(this)->isolate());
- return v8::wasm::func_type_param_arity(impl(this)->v8_object());
+ i::Handle<i::JSFunction> func = impl(this)->v8_object();
+ if (i::WasmCapiFunction::IsWasmCapiFunction(*func)) {
+ return SignatureHelper::ParamArity(SignatureHelper::GetSig(func));
+ }
+ DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*func));
+ i::Handle<i::WasmExportedFunction> function =
+ i::Handle<i::WasmExportedFunction>::cast(func);
+ i::wasm::FunctionSig* sig =
+ function->instance().module()->functions[function->function_index()].sig;
+ return sig->parameter_count();
}
auto Func::result_arity() const -> size_t {
- v8::HandleScope handle_scope(impl(this)->isolate());
- return v8::wasm::func_type_result_arity(impl(this)->v8_object());
+ i::Handle<i::JSFunction> func = impl(this)->v8_object();
+ if (i::WasmCapiFunction::IsWasmCapiFunction(*func)) {
+ return SignatureHelper::ResultArity(SignatureHelper::GetSig(func));
+ }
+ DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*func));
+ i::Handle<i::WasmExportedFunction> function =
+ i::Handle<i::WasmExportedFunction>::cast(func);
+ i::wasm::FunctionSig* sig =
+ function->instance().module()->functions[function->function_index()].sig;
+ return sig->return_count();
}
auto Func::call(const Val args[], Val results[]) const -> own<Trap*> {
auto func = impl(this);
auto store = func->store();
auto isolate = store->isolate();
+ auto i_isolate = store->i_isolate();
v8::HandleScope handle_scope(isolate);
- auto context = store->context();
- auto type = this->type();
- auto& param_types = type->params();
- auto& result_types = type->results();
-
+ int num_params;
+ int num_results;
+ ValKind result_kind;
+ i::Handle<i::JSFunction> v8_func = func->v8_object();
+ if (i::WasmExportedFunction::IsWasmExportedFunction(*v8_func)) {
+ i::WasmExportedFunction wef = i::WasmExportedFunction::cast(*v8_func);
+ i::wasm::FunctionSig* sig =
+ wef.instance().module()->functions[wef.function_index()].sig;
+ num_params = static_cast<int>(sig->parameter_count());
+ num_results = static_cast<int>(sig->return_count());
+ if (num_results > 0) {
+ result_kind = v8::wasm::v8_valtype_to_wasm(sig->GetReturn(0));
+ }
+#if DEBUG
+ for (int i = 0; i < num_params; i++) {
+ DCHECK_EQ(args[i].kind(), v8::wasm::v8_valtype_to_wasm(sig->GetParam(i)));
+ }
+#endif
+ } else {
+ DCHECK(i::WasmCapiFunction::IsWasmCapiFunction(*v8_func));
+ UNIMPLEMENTED();
+ }
// TODO(rossberg): cache v8_args array per thread.
- auto v8_args = std::unique_ptr<v8::Local<v8::Value>[]>(
- new (std::nothrow) v8::Local<v8::Value>[param_types.size()]);
- for (size_t i = 0; i < param_types.size(); ++i) {
- assert(args[i].kind() == param_types[i]->kind());
- v8_args[i] = val_to_v8(store, args[i]);
+ auto v8_args = std::unique_ptr<i::Handle<i::Object>[]>(
+ new (std::nothrow) i::Handle<i::Object>[num_params]);
+ for (int i = 0; i < num_params; ++i) {
+ v8_args[i] = v8::Utils::OpenHandle(*val_to_v8(store, args[i]));
}
+ // TODO(jkummerow): Use Execution::TryCall instead of manual TryCatch.
v8::TryCatch handler(isolate);
- auto v8_function = v8::Local<v8::Function>::Cast(func->v8_object());
- auto maybe_val =
- v8_function->Call(context, v8::Undefined(isolate),
- static_cast<int>(param_types.size()), v8_args.get());
+ i::MaybeHandle<i::Object> maybe_val = i::Execution::Call(
+ i_isolate, func->v8_object(), i_isolate->factory()->undefined_value(),
+ num_params, v8_args.get());
if (handler.HasCaught()) {
- auto exception = handler.Exception();
- if (!exception->IsObject()) {
- auto maybe_string = exception->ToString(store->context());
- auto string = maybe_string.IsEmpty() ? store->v8_string(V8_S_EMPTY)
- : maybe_string.ToLocalChecked();
- exception = v8::Exception::Error(string);
+ i_isolate->OptionalRescheduleException(true);
+ i::Handle<i::Object> exception =
+ v8::Utils::OpenHandle(*handler.Exception());
+ if (!exception->IsJSReceiver()) {
+ i::MaybeHandle<i::String> maybe_string =
+ i::Object::ToString(i_isolate, exception);
+ i::Handle<i::String> string = maybe_string.is_null()
+ ? i_isolate->factory()->empty_string()
+ : maybe_string.ToHandleChecked();
+ exception =
+ i_isolate->factory()->NewError(i_isolate->error_function(), string);
}
- return RefImpl<Trap>::make(store, v8::Local<v8::Object>::Cast(exception));
+ return implement<Trap>::type::make(
+ store, i::Handle<i::JSReceiver>::cast(exception));
}
- auto val = maybe_val.ToLocalChecked();
- if (result_types.size() == 0) {
- assert(val->IsUndefined());
- } else if (result_types.size() == 1) {
- assert(!val->IsUndefined());
- new (&results[0]) Val(v8_to_val(store, val, result_types[0]));
+ auto val = maybe_val.ToHandleChecked();
+ if (num_results == 0) {
+ assert(val->IsUndefined(i_isolate));
+ } else if (num_results == 1) {
+ assert(!val->IsUndefined(i_isolate));
+ new (&results[0]) Val(v8_to_val(i_isolate, val, result_kind));
} else {
WASM_UNIMPLEMENTED("multiple results");
}
return nullptr;
}
-void FuncData::v8_callback(const v8::FunctionCallbackInfo<v8::Value>& info) {
- auto self = reinterpret_cast<FuncData*>(v8::wasm::foreign_get(info.Data()));
- auto store = impl(self->store);
- auto isolate = store->isolate();
- v8::HandleScope handle_scope(isolate);
+i::Address FuncData::v8_callback(void* data, i::Address argv) {
+ FuncData* self = reinterpret_cast<FuncData*>(data);
- auto& param_types = self->type->params();
- auto& result_types = self->type->results();
+ const vec<ValType*>& param_types = self->type->params();
+ const vec<ValType*>& result_types = self->type->results();
- assert(param_types.size() == static_cast<size_t>(info.Length()));
int num_param_types = static_cast<int>(param_types.size());
int num_result_types = static_cast<int>(result_types.size());
- // TODO(rossberg): cache params and result arrays per thread.
- std::unique_ptr<Val[]> args(new Val[num_param_types]);
+ std::unique_ptr<Val[]> params(new Val[num_param_types]);
std::unique_ptr<Val[]> results(new Val[num_result_types]);
+ i::Address p = argv;
for (int i = 0; i < num_param_types; ++i) {
- args[i] = v8_to_val(store, info[i], param_types[i]);
+ switch (param_types[i]->kind()) {
+ case I32:
+ params[i] = Val(i::ReadUnalignedValue<int32_t>(p));
+ p += 4;
+ break;
+ case I64:
+ params[i] = Val(i::ReadUnalignedValue<int64_t>(p));
+ p += 8;
+ break;
+ case F32:
+ params[i] = Val(i::ReadUnalignedValue<float32_t>(p));
+ p += 4;
+ break;
+ case F64:
+ params[i] = Val(i::ReadUnalignedValue<float64_t>(p));
+ p += 8;
+ break;
+ case ANYREF:
+ case FUNCREF: {
+ i::Address raw = i::ReadUnalignedValue<i::Address>(p);
+ p += sizeof(raw);
+ if (raw == i::kNullAddress) {
+ params[i] = Val(nullptr);
+ } else {
+ i::JSReceiver raw_obj = i::JSReceiver::cast(i::Object(raw));
+ i::Handle<i::JSReceiver> obj(raw_obj, raw_obj.GetIsolate());
+ params[i] = Val(implement<Ref>::type::make(impl(self->store), obj));
+ }
+ break;
+ }
+ }
}
own<Trap*> trap;
if (self->kind == kCallbackWithEnv) {
- trap = self->callback_with_env(self->env, args.get(), results.get());
+ trap = self->callback_with_env(self->env, params.get(), results.get());
} else {
- trap = self->callback(args.get(), results.get());
+ trap = self->callback(params.get(), results.get());
}
if (trap) {
- isolate->ThrowException(impl(trap.get())->v8_object());
- return;
- }
-
- auto ret = info.GetReturnValue();
- if (result_types.size() == 0) {
- ret.SetUndefined();
- } else if (result_types.size() == 1) {
- assert(results[0].kind() == result_types[0]->kind());
- ret.Set(val_to_v8(store, results[0]));
- } else {
- WASM_UNIMPLEMENTED("multiple results");
+ i::Isolate* isolate = impl(self->store)->i_isolate();
+ isolate->Throw(*impl(trap.get())->v8_object());
+ i::Object ex = isolate->pending_exception();
+ isolate->clear_pending_exception();
+ return ex.ptr();
+ }
+
+ p = argv;
+ for (int i = 0; i < num_result_types; ++i) {
+ switch (result_types[i]->kind()) {
+ case I32:
+ i::WriteUnalignedValue(p, results[i].i32());
+ p += 4;
+ break;
+ case I64:
+ i::WriteUnalignedValue(p, results[i].i64());
+ p += 8;
+ break;
+ case F32:
+ i::WriteUnalignedValue(p, results[i].f32());
+ p += 4;
+ break;
+ case F64:
+ i::WriteUnalignedValue(p, results[i].f64());
+ p += 8;
+ break;
+ case ANYREF:
+ case FUNCREF: {
+ if (results[i].ref() == nullptr) {
+ i::WriteUnalignedValue(p, i::kNullAddress);
+ } else {
+ i::WriteUnalignedValue(p, impl(results[i].ref())->v8_object()->ptr());
+ }
+ p += sizeof(i::Address);
+ break;
+ }
+ }
}
+ return i::kNullAddress;
}
void FuncData::finalize_func_data(void* data) {
@@ -2316,7 +1902,7 @@ void FuncData::finalize_func_data(void* data) {
template <>
struct implement<Global> {
- using type = RefImpl<Global>;
+ using type = RefImpl<Global, i::WasmGlobalObject>;
};
Global::~Global() {}
@@ -2325,53 +1911,46 @@ auto Global::copy() const -> own<Global*> { return impl(this)->copy(); }
auto Global::make(Store* store_abs, const GlobalType* type, const Val& val)
-> own<Global*> {
- auto store = impl(store_abs);
- auto isolate = store->isolate();
- v8::HandleScope handle_scope(isolate);
- auto context = store->context();
-
- assert(type->content()->kind() == val.kind());
-
- // Create wrapper instance
- auto binary = wasm::bin::wrapper(type);
- auto module = Module::make(store_abs, binary);
-
- v8::Local<v8::Value> instantiate_args[] = {impl(module.get())->v8_object()};
- auto instance_obj = store->v8_function(V8_F_INSTANCE)
- ->NewInstance(context, 1, instantiate_args)
- .ToLocalChecked();
- auto exports_obj = v8::wasm::instance_exports(instance_obj);
- auto obj = v8::Local<v8::Object>::Cast(
- exports_obj->Get(context, store->v8_string(V8_S_EMPTY)).ToLocalChecked());
- assert(!obj.IsEmpty() && obj->IsObject());
-
- auto global = RefImpl<Global>::make(store, obj);
+ StoreImpl* store = impl(store_abs);
+ i::Isolate* isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
+
+ DCHECK_EQ(type->content()->kind(), val.kind());
+
+ i::wasm::ValueType i_type =
+ v8::wasm::wasm_valtype_to_v8(type->content()->kind());
+ bool is_mutable = (type->mutability() == VAR);
+ const int32_t offset = 0;
+ i::Handle<i::WasmGlobalObject> obj =
+ i::WasmGlobalObject::New(isolate, i::MaybeHandle<i::JSArrayBuffer>(),
+ i::MaybeHandle<i::FixedArray>(), i_type, offset,
+ is_mutable)
+ .ToHandleChecked();
+
+ auto global = implement<Global>::type::make(store, obj);
assert(global);
global->set(val);
return global;
}
auto Global::type() const -> own<GlobalType*> {
- // return impl(this)->data->type->copy();
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto v8_global = impl(this)->v8_object();
- auto kind = v8::wasm::global_type_content(v8_global);
- auto mutability = v8::wasm::global_type_mutable(v8_global) ? VAR : CONST;
+ i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
+ ValKind kind = v8::wasm::v8_valtype_to_wasm(v8_global->type());
+ Mutability mutability = v8_global->is_mutable() ? VAR : CONST;
return GlobalType::make(ValType::make(kind), mutability);
}
auto Global::get() const -> Val {
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto v8_global = impl(this)->v8_object();
+ i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
switch (type()->content()->kind()) {
case I32:
- return Val(v8::wasm::global_get_i32(v8_global));
+ return Val(v8_global->GetI32());
case I64:
- return Val(v8::wasm::global_get_i64(v8_global));
+ return Val(v8_global->GetI64());
case F32:
- return Val(v8::wasm::global_get_f32(v8_global));
+ return Val(v8_global->GetF32());
case F64:
- return Val(v8::wasm::global_get_f64(v8_global));
+ return Val(v8_global->GetF64());
case ANYREF:
case FUNCREF:
WASM_UNIMPLEMENTED("globals of reference type");
@@ -2382,17 +1961,16 @@ auto Global::get() const -> Val {
}
void Global::set(const Val& val) {
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto v8_global = impl(this)->v8_object();
+ i::Handle<i::WasmGlobalObject> v8_global = impl(this)->v8_object();
switch (val.kind()) {
case I32:
- return v8::wasm::global_set_i32(v8_global, val.i32());
+ return v8_global->SetI32(val.i32());
case I64:
- return v8::wasm::global_set_i64(v8_global, val.i64());
+ return v8_global->SetI64(val.i64());
case F32:
- return v8::wasm::global_set_f32(v8_global, val.f32());
+ return v8_global->SetF32(val.f32());
case F64:
- return v8::wasm::global_set_f64(v8_global, val.f64());
+ return v8_global->SetF64(val.f64());
case ANYREF:
case FUNCREF:
WASM_UNIMPLEMENTED("globals of reference type");
@@ -2406,7 +1984,7 @@ void Global::set(const Val& val) {
template <>
struct implement<Table> {
- using type = RefImpl<Table>;
+ using type = RefImpl<Table, i::WasmTableObject>;
};
Table::~Table() {}
@@ -2415,81 +1993,120 @@ auto Table::copy() const -> own<Table*> { return impl(this)->copy(); }
auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
-> own<Table*> {
- auto store = impl(store_abs);
- auto isolate = store->isolate();
- v8::HandleScope handle_scope(isolate);
- auto context = store->context();
-
- v8::Local<v8::Value> init = v8::Null(isolate);
- if (ref) init = impl(ref)->v8_object();
- v8::Local<v8::Value> args[] = {tabletype_to_v8(store, type), init};
- auto maybe_obj =
- store->v8_function(V8_F_TABLE)->NewInstance(context, 2, args);
- if (maybe_obj.IsEmpty()) return own<Table*>();
- auto table = RefImpl<Table>::make(store, maybe_obj.ToLocalChecked());
- // TODO(wasm+): pass reference initialiser as parameter
- if (table && ref) {
- auto size = type->limits().min;
- auto obj = maybe_obj.ToLocalChecked();
- auto maybe_func =
- v8::MaybeLocal<v8::Function>(v8::Local<v8::Function>::Cast(init));
- for (size_t i = 0; i < size; ++i) {
- v8::wasm::table_set(obj, i, maybe_func);
+ StoreImpl* store = impl(store_abs);
+ i::Isolate* isolate = store->i_isolate();
+ i::HandleScope scope(isolate);
+ auto enabled_features = i::wasm::WasmFeaturesFromFlags();
+
+ // Get "element".
+ i::wasm::ValueType i_type;
+ switch (type->element()->kind()) {
+ case FUNCREF:
+ i_type = i::wasm::kWasmAnyFunc;
+ break;
+ case ANYREF:
+ if (enabled_features.anyref) {
+ i_type = i::wasm::kWasmAnyRef;
+ break;
+ } // Else fall through.
+ V8_FALLTHROUGH;
+ default:
+ UNREACHABLE(); // 'element' must be 'FUNCREF'.
+ return nullptr;
+ }
+
+ const Limits& limits = type->limits();
+ uint32_t minimum = limits.min;
+ if (minimum > i::wasm::max_table_init_entries()) return nullptr;
+ uint32_t maximum = limits.max;
+ bool has_maximum = false;
+ if (maximum != Limits(0).max) {
+ has_maximum = true;
+ if (maximum < minimum) return nullptr;
+ if (maximum > i::wasm::max_table_init_entries()) return nullptr;
+ }
+
+ i::Handle<i::FixedArray> backing_store;
+ i::Handle<i::WasmTableObject> table_obj = i::WasmTableObject::New(
+ isolate, i_type, minimum, has_maximum, maximum, &backing_store);
+
+ if (ref) {
+ i::Handle<i::JSReceiver> init = impl(ref)->v8_object();
+ DCHECK(i::wasm::max_table_init_entries() <= i::kMaxInt);
+ for (int i = 0; i < static_cast<int>(minimum); i++) {
+ // This doesn't call WasmTableObject::Set because the table has
+ // just been created, so it can't be imported by any instances
+ // yet that might require updating.
+ DCHECK_EQ(table_obj->dispatch_tables().length(), 0);
+ backing_store->set(i, *init);
}
}
- return table;
+ return implement<Table>::type::make(store, table_obj);
}
auto Table::type() const -> own<TableType*> {
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto v8_table = impl(this)->v8_object();
- uint32_t min = v8::wasm::table_type_min(v8_table);
- uint32_t max = v8::wasm::table_type_max(v8_table);
+ i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
+ uint32_t min = table->current_length();
+ uint32_t max;
+ if (!table->maximum_length().ToUint32(&max)) max = 0xFFFFFFFFu;
// TODO(wasm+): support new element types.
return TableType::make(ValType::make(FUNCREF), Limits(min, max));
}
auto Table::get(size_t index) const -> own<Ref*> {
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto maybe = v8::wasm::table_get(impl(this)->v8_object(), index);
- if (maybe.IsEmpty() || maybe.ToLocalChecked()->IsNull()) return own<Ref*>();
+ i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
+ if (index >= table->current_length()) return own<Ref*>();
+ i::Isolate* isolate = table->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Object> result =
+ i::WasmTableObject::Get(isolate, table, static_cast<uint32_t>(index));
+ if (!result->IsJSFunction()) return own<Ref*>();
+ DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*result) ||
+ i::WasmCapiFunction::IsWasmCapiFunction(*result));
// TODO(wasm+): other references
- auto obj = maybe.ToLocalChecked();
- assert(obj->IsFunction());
- return RefImpl<Func>::make(impl(this)->store(), obj);
+ return implement<Func>::type::make(impl(this)->store(),
+ i::Handle<i::JSFunction>::cast(result));
}
auto Table::set(size_t index, const Ref* ref) -> bool {
- v8::HandleScope handle_scope(impl(this)->isolate());
if (ref && !impl(ref)->v8_object()->IsFunction()) {
WASM_UNIMPLEMENTED("non-function table elements");
}
- auto obj = ref ? v8::MaybeLocal<v8::Function>(
- v8::Local<v8::Function>::Cast(impl(ref)->v8_object()))
- : v8::MaybeLocal<v8::Function>();
- return v8::wasm::table_set(impl(this)->v8_object(), index, obj);
+ i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
+ if (index >= table->current_length()) return false;
+ i::Isolate* isolate = table->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Object> obj =
+ ref ? i::Handle<i::Object>::cast(impl(ref)->v8_object())
+ : i::Handle<i::Object>::cast(
+ i::ReadOnlyRoots(isolate).null_value_handle());
+ i::WasmTableObject::Set(isolate, table, static_cast<uint32_t>(index), obj);
+ return true;
}
+// TODO(jkummerow): Having Table::size_t shadowing "std" size_t is ugly.
auto Table::size() const -> size_t {
- v8::HandleScope handle_scope(impl(this)->isolate());
- // TODO(jkummerow): Having Table::size_t shadowing "std" size_t is ugly.
- return static_cast<Table::size_t>(
- v8::wasm::table_size(impl(this)->v8_object()));
+ return impl(this)->v8_object()->current_length();
}
auto Table::grow(size_t delta, const Ref* ref) -> bool {
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto obj = ref ? v8::MaybeLocal<v8::Function>(
- v8::Local<v8::Function>::Cast(impl(ref)->v8_object()))
- : v8::MaybeLocal<v8::Function>();
- return v8::wasm::table_grow(impl(this)->v8_object(), delta, obj);
+ i::Handle<i::WasmTableObject> table = impl(this)->v8_object();
+ i::Isolate* isolate = table->GetIsolate();
+ i::HandleScope scope(isolate);
+ i::Handle<i::Object> init_value =
+ ref == nullptr
+ ? i::Handle<i::Object>::cast(isolate->factory()->null_value())
+ : i::Handle<i::Object>::cast(impl(ref)->v8_object());
+ int result = i::WasmTableObject::Grow(
+ isolate, table, static_cast<uint32_t>(delta), init_value);
+ return result >= 0;
}
// Memory Instances
template <>
struct implement<Memory> {
- using type = RefImpl<Memory>;
+ using type = RefImpl<Memory, i::WasmMemoryObject>;
};
Memory::~Memory() {}
@@ -2497,51 +2114,64 @@ Memory::~Memory() {}
auto Memory::copy() const -> own<Memory*> { return impl(this)->copy(); }
auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory*> {
- auto store = impl(store_abs);
- auto isolate = store->isolate();
- v8::HandleScope handle_scope(isolate);
- auto context = store->context();
+ StoreImpl* store = impl(store_abs);
+ i::Isolate* isolate = store->i_isolate();
+ i::HandleScope scope(isolate);
- v8::Local<v8::Value> args[] = {memorytype_to_v8(store, type)};
- auto maybe_obj =
- store->v8_function(V8_F_MEMORY)->NewInstance(context, 1, args);
- if (maybe_obj.IsEmpty()) return own<Memory*>();
- return RefImpl<Memory>::make(store, maybe_obj.ToLocalChecked());
+ const Limits& limits = type->limits();
+ uint32_t minimum = limits.min;
+ if (minimum > i::wasm::max_mem_pages()) return nullptr;
+ uint32_t maximum = limits.max;
+ if (maximum != Limits(0).max) {
+ if (maximum < minimum) return nullptr;
+ if (maximum > i::wasm::kSpecMaxWasmMemoryPages) return nullptr;
+ }
+ bool is_shared = false; // TODO(wasm+): Support shared memory.
+ i::Handle<i::WasmMemoryObject> memory_obj;
+ if (!i::WasmMemoryObject::New(isolate, minimum, maximum, is_shared)
+ .ToHandle(&memory_obj)) {
+ return own<Memory*>();
+ }
+ return implement<Memory>::type::make(store, memory_obj);
}
auto Memory::type() const -> own<MemoryType*> {
- v8::HandleScope handle_scope(impl(this)->isolate());
- auto v8_memory = impl(this)->v8_object();
- uint32_t min = v8::wasm::memory_type_min(v8_memory);
- uint32_t max = v8::wasm::memory_type_max(v8_memory);
+ i::Handle<i::WasmMemoryObject> memory = impl(this)->v8_object();
+ uint32_t min = static_cast<uint32_t>(memory->array_buffer().byte_length() /
+ i::wasm::kWasmPageSize);
+ uint32_t max =
+ memory->has_maximum_pages() ? memory->maximum_pages() : 0xFFFFFFFFu;
return MemoryType::make(Limits(min, max));
}
auto Memory::data() const -> byte_t* {
- v8::HandleScope handle_scope(impl(this)->isolate());
- return v8::wasm::memory_data(impl(this)->v8_object());
+ return reinterpret_cast<byte_t*>(
+ impl(this)->v8_object()->array_buffer().backing_store());
}
auto Memory::data_size() const -> size_t {
- v8::HandleScope handle_scope(impl(this)->isolate());
- return v8::wasm::memory_data_size(impl(this)->v8_object());
+ return impl(this)->v8_object()->array_buffer().byte_length();
}
auto Memory::size() const -> pages_t {
- v8::HandleScope handle_scope(impl(this)->isolate());
- return v8::wasm::memory_size(impl(this)->v8_object());
+ return static_cast<pages_t>(
+ impl(this)->v8_object()->array_buffer().byte_length() /
+ i::wasm::kWasmPageSize);
}
auto Memory::grow(pages_t delta) -> bool {
- v8::HandleScope handle_scope(impl(this)->isolate());
- return v8::wasm::memory_grow(impl(this)->v8_object(), delta);
+ i::Handle<i::WasmMemoryObject> memory = impl(this)->v8_object();
+ i::Isolate* isolate = memory->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ int32_t old = i::WasmMemoryObject::Grow(isolate, memory, delta);
+ return old != -1;
}
// Module Instances
template <>
struct implement<Instance> {
- using type = RefImpl<Instance>;
+ using type = RefImpl<Instance, i::WasmInstanceObject>;
};
Instance::~Instance() {}
@@ -2550,90 +2180,84 @@ auto Instance::copy() const -> own<Instance*> { return impl(this)->copy(); }
auto Instance::make(Store* store_abs, const Module* module_abs,
const Extern* const imports[]) -> own<Instance*> {
- auto store = impl(store_abs);
- auto module = impl(module_abs);
- auto isolate = store->isolate();
- auto context = store->context();
- v8::HandleScope handle_scope(isolate);
+ StoreImpl* store = impl(store_abs);
+ const implement<Module>::type* module = impl(module_abs);
+ i::Isolate* isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
- assert(module->v8_object()->GetIsolate() == isolate);
+ DCHECK_EQ(module->v8_object()->GetIsolate(), isolate);
- auto import_types = module_abs->imports();
- auto imports_obj = v8::Object::New(isolate);
+ vec<ImportType*> import_types = module_abs->imports();
+ i::Handle<i::JSObject> imports_obj =
+ isolate->factory()->NewJSObject(isolate->object_function());
for (size_t i = 0; i < import_types.size(); ++i) {
auto type = import_types[i];
- auto maybe_module = v8::String::NewFromOneByte(
- isolate, reinterpret_cast<const uint8_t*>(type->module().get()),
- v8::NewStringType::kNormal, static_cast<int>(type->module().size()));
- if (maybe_module.IsEmpty()) return own<Instance*>();
- auto module_str = maybe_module.ToLocalChecked();
- auto maybe_name = v8::String::NewFromOneByte(
- isolate, reinterpret_cast<const uint8_t*>(type->name().get()),
- v8::NewStringType::kNormal, static_cast<int>(type->name().size()));
- if (maybe_name.IsEmpty()) return own<Instance*>();
- auto name_str = maybe_name.ToLocalChecked();
-
- v8::Local<v8::Object> module_obj;
- if (imports_obj->HasOwnProperty(context, module_str).ToChecked()) {
- module_obj = v8::Local<v8::Object>::Cast(
- imports_obj->Get(context, module_str).ToLocalChecked());
+ i::Handle<i::String> module_str = VecToString(isolate, type->module());
+ i::Handle<i::String> name_str = VecToString(isolate, type->name());
+
+ i::Handle<i::JSObject> module_obj;
+ i::LookupIterator module_it(isolate, imports_obj, module_str,
+ i::LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (i::JSObject::HasProperty(&module_it).ToChecked()) {
+ module_obj = i::Handle<i::JSObject>::cast(
+ i::Object::GetProperty(&module_it).ToHandleChecked());
} else {
- module_obj = v8::Object::New(isolate);
- ignore(imports_obj->DefineOwnProperty(context, module_str, module_obj));
+ module_obj = isolate->factory()->NewJSObject(isolate->object_function());
+ ignore(
+ i::Object::SetProperty(isolate, imports_obj, module_str, module_obj));
}
-
- ignore(module_obj->DefineOwnProperty(context, name_str,
- extern_to_v8(imports[i])));
+ ignore(i::Object::SetProperty(isolate, module_obj, name_str,
+ impl(imports[i])->v8_object()));
}
- v8::Local<v8::Value> instantiate_args[] = {module->v8_object(), imports_obj};
- auto obj = store->v8_function(V8_F_INSTANCE)
- ->NewInstance(context, 2, instantiate_args)
- .ToLocalChecked();
- return RefImpl<Instance>::make(store, obj);
+ NopErrorThrower thrower(isolate);
+ i::Handle<i::WasmInstanceObject> instance_obj =
+ isolate->wasm_engine()
+ ->SyncInstantiate(isolate, &thrower, module->v8_object(), imports_obj,
+ i::MaybeHandle<i::JSArrayBuffer>())
+ .ToHandleChecked();
+ return implement<Instance>::type::make(store, instance_obj);
}
auto Instance::exports() const -> vec<Extern*> {
- auto instance = impl(this);
- auto store = instance->store();
- auto isolate = store->isolate();
- auto context = store->context();
- v8::HandleScope handle_scope(isolate);
-
- auto module_obj = v8::wasm::instance_module(instance->v8_object());
- auto exports_obj = v8::wasm::instance_exports(instance->v8_object());
- assert(!module_obj.IsEmpty() && module_obj->IsObject());
- assert(!exports_obj.IsEmpty() && exports_obj->IsObject());
-
- auto module = RefImpl<Module>::make(store, module_obj);
- auto export_types = module->exports();
- auto exports = vec<Extern*>::make_uninitialized(export_types.size());
+ const implement<Instance>::type* instance = impl(this);
+ StoreImpl* store = instance->store();
+ i::Isolate* isolate = store->i_isolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::WasmInstanceObject> instance_obj = instance->v8_object();
+ i::Handle<i::WasmModuleObject> module_obj(instance_obj->module_object(),
+ isolate);
+ i::Handle<i::JSObject> exports_obj(instance_obj->exports_object(), isolate);
+
+ vec<ExportType*> export_types = ExportsImpl(module_obj);
+ vec<Extern*> exports = vec<Extern*>::make_uninitialized(export_types.size());
if (!exports) return vec<Extern*>::invalid();
for (size_t i = 0; i < export_types.size(); ++i) {
auto& name = export_types[i]->name();
- auto maybe_name_obj =
- v8::String::NewFromUtf8(isolate, name.get(), v8::NewStringType::kNormal,
- static_cast<int>(name.size()));
- if (maybe_name_obj.IsEmpty()) return vec<Extern*>::invalid();
- auto name_obj = maybe_name_obj.ToLocalChecked();
- auto obj = v8::Local<v8::Object>::Cast(
- exports_obj->Get(context, name_obj).ToLocalChecked());
-
- auto type = export_types[i]->type();
- assert(type->kind() == v8::wasm::extern_kind(obj));
+ i::Handle<i::String> name_str = VecToString(isolate, name);
+ i::Handle<i::Object> obj =
+ i::Object::GetProperty(isolate, exports_obj, name_str)
+ .ToHandleChecked();
+
+ const ExternType* type = export_types[i]->type();
switch (type->kind()) {
case EXTERN_FUNC: {
- exports[i].reset(RefImpl<Func>::make(store, obj));
+ DCHECK(i::WasmExportedFunction::IsWasmExportedFunction(*obj));
+ exports[i].reset(implement<Func>::type::make(
+ store, i::Handle<i::WasmExportedFunction>::cast(obj)));
} break;
case EXTERN_GLOBAL: {
- exports[i].reset(RefImpl<Global>::make(store, obj));
+ exports[i].reset(implement<Global>::type::make(
+ store, i::Handle<i::WasmGlobalObject>::cast(obj)));
} break;
case EXTERN_TABLE: {
- exports[i].reset(RefImpl<Table>::make(store, obj));
+ exports[i].reset(implement<Table>::type::make(
+ store, i::Handle<i::WasmTableObject>::cast(obj)));
} break;
case EXTERN_MEMORY: {
- exports[i].reset(RefImpl<Memory>::make(store, obj));
+ exports[i].reset(implement<Memory>::type::make(
+ store, i::Handle<i::WasmMemoryObject>::cast(obj)));
} break;
}
}
diff --git a/deps/v8/src/wasm/c-api.h b/deps/v8/src/wasm/c-api.h
new file mode 100644
index 0000000000..c1a914a16e
--- /dev/null
+++ b/deps/v8/src/wasm/c-api.h
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_C_API_H_
+#define V8_WASM_C_API_H_
+
+#include "include/v8.h"
+#include "src/common/globals.h"
+#include "third_party/wasm-api/wasm.hh"
+
+namespace wasm {
+
+class StoreImpl {
+ public:
+ ~StoreImpl();
+
+ v8::Isolate* isolate() const { return isolate_; }
+ i::Isolate* i_isolate() const {
+ return reinterpret_cast<i::Isolate*>(isolate_);
+ }
+
+ v8::Local<v8::Context> context() const { return context_.Get(isolate_); }
+
+ static StoreImpl* get(i::Isolate* isolate) {
+ return static_cast<StoreImpl*>(
+ reinterpret_cast<v8::Isolate*>(isolate)->GetData(0));
+ }
+
+ private:
+ friend own<Store*> Store::make(Engine*);
+
+ StoreImpl() {}
+
+ v8::Isolate::CreateParams create_params_;
+ v8::Isolate* isolate_ = nullptr;
+ v8::Eternal<v8::Context> context_;
+};
+
+} // namespace wasm
+
+#endif // V8_WASM_C_API_H_
diff --git a/deps/v8/src/wasm/compilation-environment.h b/deps/v8/src/wasm/compilation-environment.h
index 1df93c8296..07bc8f57bc 100644
--- a/deps/v8/src/wasm/compilation-environment.h
+++ b/deps/v8/src/wasm/compilation-environment.h
@@ -119,13 +119,17 @@ class CompilationState {
void AddCallback(callback_t);
bool failed() const;
+ V8_EXPORT_PRIVATE bool baseline_compilation_finished() const;
+ V8_EXPORT_PRIVATE bool top_tier_compilation_finished() const;
- void OnFinishedUnit(WasmCode*);
- void OnFinishedUnits(Vector<WasmCode*>);
+ // Override {operator delete} to avoid implicit instantiation of {operator
+ // delete} with {size_t} argument. The {size_t} argument would be incorrect.
+ void operator delete(void* ptr) { ::operator delete(ptr); }
private:
+ // NativeModule is allowed to call the static {New} method.
friend class NativeModule;
- friend class WasmCompilationUnit;
+
CompilationState() = delete;
// The CompilationState keeps a {std::weak_ptr} back to the {NativeModule}
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 315f504761..440267bd25 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -5,15 +5,16 @@
#ifndef V8_WASM_DECODER_H_
#define V8_WASM_DECODER_H_
+#include <cinttypes>
#include <cstdarg>
#include <memory>
#include "src/base/compiler-specific.h"
-#include "src/flags.h"
-#include "src/signature.h"
-#include "src/utils.h"
-#include "src/v8memory.h"
-#include "src/vector.h"
+#include "src/codegen/signature.h"
+#include "src/common/v8memory.h"
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
#include "src/wasm/wasm-result.h"
#include "src/zone/zone-containers.h"
@@ -46,7 +47,7 @@ class Decoder {
Decoder(const byte* start, const byte* end, uint32_t buffer_offset = 0)
: Decoder(start, start, end, buffer_offset) {}
explicit Decoder(const Vector<const byte> bytes, uint32_t buffer_offset = 0)
- : Decoder(bytes.start(), bytes.start() + bytes.length(), buffer_offset) {}
+ : Decoder(bytes.begin(), bytes.begin() + bytes.length(), buffer_offset) {}
Decoder(const byte* start, const byte* pc, const byte* end,
uint32_t buffer_offset = 0)
: start_(start), pc_(pc), end_(end), buffer_offset_(buffer_offset) {
@@ -287,7 +288,7 @@ class Decoder {
EmbeddedVector<char, kMaxErrorMsg> buffer;
int len = VSNPrintF(buffer, format, args);
CHECK_LT(0, len);
- error_ = {offset, {buffer.start(), static_cast<size_t>(len)}};
+ error_ = {offset, {buffer.begin(), static_cast<size_t>(len)}};
onFirstError();
}
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index a5214513fc..eb895a25b3 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -9,7 +9,8 @@
// WasmFullDecoder.
#include "src/base/platform/elapsed-timer.h"
-#include "src/bit-vector.h"
+#include "src/base/small-vector.h"
+#include "src/utils/bit-vector.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-features.h"
@@ -205,6 +206,68 @@ struct GlobalIndexImmediate {
}
};
+namespace function_body_decoder {
+// Decode a byte representing a local type. Return {false} if the encoded
+// byte was invalid or the start of a type index.
+inline bool decode_local_type(uint8_t val, ValueType* result) {
+ switch (static_cast<ValueTypeCode>(val)) {
+ case kLocalVoid:
+ *result = kWasmStmt;
+ return true;
+ case kLocalI32:
+ *result = kWasmI32;
+ return true;
+ case kLocalI64:
+ *result = kWasmI64;
+ return true;
+ case kLocalF32:
+ *result = kWasmF32;
+ return true;
+ case kLocalF64:
+ *result = kWasmF64;
+ return true;
+ case kLocalS128:
+ *result = kWasmS128;
+ return true;
+ case kLocalAnyFunc:
+ *result = kWasmAnyFunc;
+ return true;
+ case kLocalAnyRef:
+ *result = kWasmAnyRef;
+ return true;
+ case kLocalExceptRef:
+ *result = kWasmExceptRef;
+ return true;
+ default:
+ *result = kWasmVar;
+ return false;
+ }
+}
+} // namespace function_body_decoder
+
+template <Decoder::ValidateFlag validate>
+struct SelectTypeImmediate {
+ uint32_t length;
+ ValueType type;
+
+ inline SelectTypeImmediate(Decoder* decoder, const byte* pc) {
+ uint8_t num_types =
+ decoder->read_u32v<validate>(pc + 1, &length, "number of select types");
+ if (!VALIDATE(num_types == 1)) {
+ decoder->error(
+ pc + 1, "Invalid number of types. Select accepts exactly one type");
+ return;
+ }
+ uint8_t val = decoder->read_u8<validate>(pc + length + 1, "select type");
+ length++;
+ if (!function_body_decoder::decode_local_type(val, &type) ||
+ type == kWasmStmt) {
+ decoder->error(pc + 1, "invalid select type");
+ return;
+ }
+ }
+};
+
template <Decoder::ValidateFlag validate>
struct BlockTypeImmediate {
uint32_t length = 1;
@@ -215,7 +278,7 @@ struct BlockTypeImmediate {
inline BlockTypeImmediate(const WasmFeatures& enabled, Decoder* decoder,
const byte* pc) {
uint8_t val = decoder->read_u8<validate>(pc + 1, "block type");
- if (!decode_local_type(val, &type)) {
+ if (!function_body_decoder::decode_local_type(val, &type)) {
// Handle multi-value blocks.
if (!VALIDATE(enabled.mv)) {
decoder->error(pc + 1, "invalid block type");
@@ -232,40 +295,6 @@ struct BlockTypeImmediate {
}
}
- // Decode a byte representing a local type. Return {false} if the encoded
- // byte was invalid or the start of a type index.
- inline bool decode_local_type(uint8_t val, ValueType* result) {
- switch (static_cast<ValueTypeCode>(val)) {
- case kLocalVoid:
- *result = kWasmStmt;
- return true;
- case kLocalI32:
- *result = kWasmI32;
- return true;
- case kLocalI64:
- *result = kWasmI64;
- return true;
- case kLocalF32:
- *result = kWasmF32;
- return true;
- case kLocalF64:
- *result = kWasmF64;
- return true;
- case kLocalS128:
- *result = kWasmS128;
- return true;
- case kLocalAnyFunc:
- *result = kWasmAnyFunc;
- return true;
- case kLocalAnyRef:
- *result = kWasmAnyRef;
- return true;
- default:
- *result = kWasmVar;
- return false;
- }
- }
-
uint32_t in_arity() const {
if (type != kWasmVar) return 0;
return static_cast<uint32_t>(sig->parameter_count());
@@ -338,6 +367,15 @@ struct CallFunctionImmediate {
};
template <Decoder::ValidateFlag validate>
+struct FunctionIndexImmediate {
+ uint32_t index = 0;
+ uint32_t length = 1;
+ inline FunctionIndexImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_u32v<validate>(pc + 1, &length, "function index");
+ }
+};
+
+template <Decoder::ValidateFlag validate>
struct MemoryIndexImmediate {
uint32_t index = 0;
uint32_t length = 1;
@@ -671,6 +709,7 @@ struct ControlBase {
F(F32Const, Value* result, float value) \
F(F64Const, Value* result, double value) \
F(RefNull, Value* result) \
+ F(RefFunc, uint32_t function_index, Value* result) \
F(Drop, const Value& value) \
F(DoReturn, Vector<Value> values) \
F(GetLocal, Value* result, const LocalIndexImmediate<validate>& imm) \
@@ -729,7 +768,12 @@ struct ControlBase {
const Value& value, const Value& size) \
F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
F(ElemDrop, const ElemDropImmediate<validate>& imm) \
- F(TableCopy, const TableCopyImmediate<validate>& imm, Vector<Value> args)
+ F(TableCopy, const TableCopyImmediate<validate>& imm, Vector<Value> args) \
+ F(TableGrow, const TableIndexImmediate<validate>& imm, const Value& value, \
+ const Value& delta, Value* result) \
+ F(TableSize, const TableIndexImmediate<validate>& imm, Value* result) \
+ F(TableFill, const TableIndexImmediate<validate>& imm, const Value& start, \
+ const Value& value, const Value& count)
// Generic Wasm bytecode decoder with utilities for decoding immediates,
// lengths, etc.
@@ -1094,6 +1138,15 @@ class WasmDecoder : public Decoder {
return true;
}
+ inline bool Validate(const byte* pc, FunctionIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_ != nullptr &&
+ imm.index < module_->functions.size())) {
+ errorf(pc, "invalid function index: %u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
inline bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
if (!VALIDATE(module_ != nullptr && module_->has_memory)) {
errorf(pc + 1, "memory instruction with no memory");
@@ -1225,6 +1278,10 @@ class WasmDecoder : public Decoder {
LocalIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
}
+ case kExprSelectWithType: {
+ SelectTypeImmediate<validate> imm(decoder, pc);
+ return 1 + imm.length;
+ }
case kExprBrTable: {
BranchTableImmediate<validate> imm(decoder, pc);
BranchTableIterator<validate> iterator(decoder, imm);
@@ -1241,6 +1298,10 @@ class WasmDecoder : public Decoder {
case kExprRefNull: {
return 1;
}
+ case kExprRefFunc: {
+ FunctionIndexImmediate<validate> imm(decoder, pc);
+ return 1 + imm.length;
+ }
case kExprMemoryGrow:
case kExprMemorySize: {
MemoryIndexImmediate<validate> imm(decoder, pc);
@@ -1293,6 +1354,12 @@ class WasmDecoder : public Decoder {
TableCopyImmediate<validate> imm(decoder, pc);
return 2 + imm.length;
}
+ case kExprTableGrow:
+ case kExprTableSize:
+ case kExprTableFill: {
+ TableIndexImmediate<validate> imm(decoder, pc);
+ return 2 + imm.length;
+ }
default:
decoder->error(pc, "invalid numeric opcode");
return 2;
@@ -1359,6 +1426,7 @@ class WasmDecoder : public Decoder {
// clang-format off
switch (opcode) {
case kExprSelect:
+ case kExprSelectWithType:
return {3, 1};
case kExprSetTable:
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
@@ -1383,6 +1451,7 @@ class WasmDecoder : public Decoder {
case kExprF32Const:
case kExprF64Const:
case kExprRefNull:
+ case kExprRefFunc:
case kExprMemorySize:
return {0, 1};
case kExprCallFunction: {
@@ -1437,8 +1506,8 @@ class WasmDecoder : public Decoder {
V8_FALLTHROUGH;
}
default:
- V8_Fatal(__FILE__, __LINE__, "unimplemented opcode: %x (%s)", opcode,
- WasmOpcodes::OpcodeName(opcode));
+ FATAL("unimplemented opcode: %x (%s)", opcode,
+ WasmOpcodes::OpcodeName(opcode));
return {0, 0};
}
#undef DECLARE_OPCODE_CASE
@@ -1468,6 +1537,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
using Value = typename Interface::Value;
using Control = typename Interface::Control;
using MergeValues = Merge<Value>;
+ using ArgVector = base::SmallVector<Value, 8>;
// All Value types should be trivially copyable for performance. We push, pop,
// and store them in local variables.
@@ -1484,8 +1554,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
interface_(std::forward<InterfaceArgs>(interface_args)...),
local_type_vec_(zone),
stack_(zone),
- control_(zone),
- args_(zone) {
+ control_(zone) {
this->local_types_ = &local_type_vec_;
}
@@ -1495,11 +1564,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DCHECK(stack_.empty());
DCHECK(control_.empty());
- base::ElapsedTimer decode_timer;
- if (FLAG_trace_wasm_decode_time) {
- decode_timer.Start();
- }
-
if (this->end_ < this->pc_) {
this->error("function body end < start");
return false;
@@ -1522,13 +1586,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (this->failed()) return this->TraceFailed();
- if (FLAG_trace_wasm_decode_time) {
- double ms = decode_timer.Elapsed().InMillisecondsF();
- PrintF("wasm-decode %s (%0.3f ms)\n\n",
- VALIDATE(this->ok()) ? "ok" : "failed", ms);
- } else {
- TRACE("wasm-decode %s\n\n", VALIDATE(this->ok()) ? "ok" : "failed");
- }
+ TRACE("wasm-decode %s\n\n", VALIDATE(this->ok()) ? "ok" : "failed");
return true;
}
@@ -1542,7 +1600,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
const char* SafeOpcodeNameAt(const byte* pc) {
if (pc >= this->end_) return "<end>";
- return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(*pc));
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+ if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
+ return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode));
+ }
+ // We need one more byte.
+ ++pc;
+ if (pc >= this->end_) return "<end>";
+ byte sub_opcode = *pc;
+ opcode = static_cast<WasmOpcode>(opcode << 8 | sub_opcode);
+ return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(opcode));
}
inline Zone* zone() const { return zone_; }
@@ -1589,7 +1656,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ZoneVector<ValueType> local_type_vec_; // types of local variables.
ZoneVector<Value> stack_; // stack of values.
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
- ZoneVector<Value> args_; // parameters of current block or call
static Value UnreachableValue(const uint8_t* pc) {
return Value{pc, kWasmVar};
@@ -1639,9 +1705,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Decodes the body of a function.
void DecodeFunctionBody() {
- TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n",
- reinterpret_cast<const void*>(this->start()),
- reinterpret_cast<const void*>(this->end()), this->pc_offset(),
+ TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n", this->start(),
+ this->end(), this->pc_offset(),
static_cast<int>(this->end() - this->start()));
// Set up initial function block.
@@ -1685,9 +1750,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprBlock: {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (!this->Validate(imm)) break;
- PopArgs(imm.sig);
+ auto args = PopArgs(imm.sig);
auto* block = PushControl(kControlBlock);
- SetBlockType(block, imm);
+ SetBlockType(block, imm, args.begin());
CALL_INTERFACE_IF_REACHABLE(Block, block);
PushMergeValues(block, &block->start_merge);
len = 1 + imm.length;
@@ -1705,8 +1770,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ExceptionIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
- PopArgs(imm.exception->ToFunctionSig());
- CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args_));
+ auto args = PopArgs(imm.exception->ToFunctionSig());
+ CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args));
EndControl();
break;
}
@@ -1714,9 +1779,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CHECK_PROTOTYPE_OPCODE(eh);
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (!this->Validate(imm)) break;
- PopArgs(imm.sig);
+ auto args = PopArgs(imm.sig);
auto* try_block = PushControl(kControlTry);
- SetBlockType(try_block, imm);
+ SetBlockType(try_block, imm, args.begin());
len = 1 + imm.length;
CALL_INTERFACE_IF_REACHABLE(Try, try_block);
PushMergeValues(try_block, &try_block->start_merge);
@@ -1775,9 +1840,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
case kExprLoop: {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (!this->Validate(imm)) break;
- PopArgs(imm.sig);
+ auto args = PopArgs(imm.sig);
auto* block = PushControl(kControlLoop);
- SetBlockType(&control_.back(), imm);
+ SetBlockType(&control_.back(), imm, args.begin());
len = 1 + imm.length;
CALL_INTERFACE_IF_REACHABLE(Loop, block);
PushMergeValues(block, &block->start_merge);
@@ -1787,10 +1852,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
if (!this->Validate(imm)) break;
auto cond = Pop(0, kWasmI32);
- PopArgs(imm.sig);
+ auto args = PopArgs(imm.sig);
if (!VALIDATE(this->ok())) break;
auto* if_block = PushControl(kControlIf);
- SetBlockType(if_block, imm);
+ SetBlockType(if_block, imm, args.begin());
CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
len = 1 + imm.length;
PushMergeValues(if_block, &if_block->start_merge);
@@ -1860,8 +1925,26 @@ class WasmFullDecoder : public WasmDecoder<validate> {
auto cond = Pop(2, kWasmI32);
auto fval = Pop();
auto tval = Pop(0, fval.type);
- auto* result = Push(tval.type == kWasmVar ? fval.type : tval.type);
+ ValueType type = tval.type == kWasmVar ? fval.type : tval.type;
+ if (ValueTypes::IsSubType(kWasmAnyRef, type)) {
+ this->error(
+ "select without type is only valid for value type inputs");
+ break;
+ }
+ auto* result = Push(type);
+ CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ break;
+ }
+ case kExprSelectWithType: {
+ CHECK_PROTOTYPE_OPCODE(anyref);
+ SelectTypeImmediate<validate> imm(this, this->pc_);
+ if (this->failed()) break;
+ auto cond = Pop(2, kWasmI32);
+ auto fval = Pop(1, imm.type);
+ auto tval = Pop(0, imm.type);
+ auto* result = Push(imm.type);
CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ len = 1 + imm.length;
break;
}
case kExprBr: {
@@ -1988,6 +2071,15 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len = 1;
break;
}
+ case kExprRefFunc: {
+ CHECK_PROTOTYPE_OPCODE(anyref);
+ FunctionIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ auto* value = Push(kWasmAnyFunc);
+ CALL_INTERFACE_IF_REACHABLE(RefFunc, imm.index, value);
+ len = 1 + imm.length;
+ break;
+ }
case kExprGetLocal: {
LocalIndexImmediate<validate> imm(this, this->pc_);
if (!this->Validate(this->pc_, imm)) break;
@@ -2055,7 +2147,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableIndexImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(0, this->module_->tables[imm.index].type);
+ auto value = Pop(1, this->module_->tables[imm.index].type);
auto index = Pop(0, kWasmI32);
CALL_INTERFACE_IF_REACHABLE(SetTable, index, value, imm);
break;
@@ -2156,10 +2248,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CallFunctionImmediate<validate> imm(this, this->pc_);
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
- // TODO(clemensh): Better memory management.
- PopArgs(imm.sig);
+ auto args = PopArgs(imm.sig);
auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args_.data(), returns);
+ CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args.begin(), returns);
break;
}
case kExprCallIndirect: {
@@ -2167,9 +2258,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
len = 1 + imm.length;
if (!this->Validate(this->pc_, imm)) break;
auto index = Pop(0, kWasmI32);
- PopArgs(imm.sig);
+ auto args = PopArgs(imm.sig);
auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args_.data(),
+ CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args.begin(),
returns);
break;
}
@@ -2184,9 +2275,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
- PopArgs(imm.sig);
+ auto args = PopArgs(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(ReturnCall, imm, args_.data());
+ CALL_INTERFACE_IF_REACHABLE(ReturnCall, imm, args.begin());
EndControl();
break;
}
@@ -2200,9 +2291,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
break;
}
auto index = Pop(0, kWasmI32);
- PopArgs(imm.sig);
+ auto args = PopArgs(imm.sig);
CALL_INTERFACE_IF_REACHABLE(ReturnCallIndirect, index, imm,
- args_.data());
+ args.begin());
EndControl();
break;
}
@@ -2213,6 +2304,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
opcode = static_cast<WasmOpcode>(opcode << 8 | numeric_index);
if (opcode < kExprMemoryInit) {
CHECK_PROTOTYPE_OPCODE(sat_f2i_conversions);
+ } else if (opcode == kExprTableGrow || opcode == kExprTableSize ||
+ opcode == kExprTableFill) {
+ CHECK_PROTOTYPE_OPCODE(anyref);
} else {
CHECK_PROTOTYPE_OPCODE(bulk_memory);
}
@@ -2357,10 +2451,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- void SetBlockType(Control* c, BlockTypeImmediate<validate>& imm) {
- DCHECK_EQ(imm.in_arity(), this->args_.size());
+ void SetBlockType(Control* c, BlockTypeImmediate<validate>& imm,
+ Value* args) {
const byte* pc = this->pc_;
- Value* args = this->args_.data();
InitMerge(&c->end_merge, imm.out_arity(), [pc, &imm](uint32_t i) {
return Value{pc, imm.out_type(i)};
});
@@ -2368,13 +2461,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
[args](uint32_t i) { return args[i]; });
}
- // Pops arguments as required by signature into {args_}.
- V8_INLINE void PopArgs(FunctionSig* sig) {
+ // Pops arguments as required by signature.
+ V8_INLINE ArgVector PopArgs(FunctionSig* sig) {
int count = sig ? static_cast<int>(sig->parameter_count()) : 0;
- args_.resize(count, UnreachableValue(nullptr));
+ ArgVector args(count);
for (int i = count - 1; i >= 0; --i) {
- args_[i] = Pop(i, sig->GetParam(i));
+ args[i] = Pop(i, sig->GetParam(i));
}
+ return args;
}
ValueType GetReturnType(FunctionSig* sig) {
@@ -2524,10 +2618,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("invalid simd opcode");
break;
}
- PopArgs(sig);
+ auto args = PopArgs(sig);
auto* results =
sig->return_count() == 0 ? nullptr : Push(GetReturnType(sig));
- CALL_INTERFACE_IF_REACHABLE(SimdOp, opcode, VectorOf(args_), results);
+ CALL_INTERFACE_IF_REACHABLE(SimdOp, opcode, VectorOf(args), results);
}
}
return len;
@@ -2563,9 +2657,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryAccessImmediate<validate> imm(
this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
len += imm.length;
- PopArgs(sig);
+ auto args = PopArgs(sig);
auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
- CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args_), imm,
+ CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm,
result);
} else {
this->error("invalid atomic opcode");
@@ -2629,8 +2723,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableInitImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- PopArgs(sig);
- CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args_));
+ auto args = PopArgs(sig);
+ CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args));
break;
}
case kExprElemDrop: {
@@ -2644,8 +2738,36 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableCopyImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- PopArgs(sig);
- CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args_));
+ auto args = PopArgs(sig);
+ CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args));
+ break;
+ }
+ case kExprTableGrow: {
+ TableIndexImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(this->pc_, imm)) break;
+ len += imm.length;
+ auto delta = Pop(1, sig->GetParam(1));
+ auto value = Pop(0, this->module_->tables[imm.index].type);
+ auto* result = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(TableGrow, imm, value, delta, result);
+ break;
+ }
+ case kExprTableSize: {
+ TableIndexImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(this->pc_, imm)) break;
+ len += imm.length;
+ auto* result = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(TableSize, imm, result);
+ break;
+ }
+ case kExprTableFill: {
+ TableIndexImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(this->pc_, imm)) break;
+ len += imm.length;
+ auto count = Pop(2, sig->GetParam(2));
+ auto value = Pop(1, this->module_->tables[imm.index].type);
+ auto start = Pop(0, sig->GetParam(0));
+ CALL_INTERFACE_IF_REACHABLE(TableFill, imm, start, value, count);
break;
}
default:
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index 1e5cb86f49..c1e8e541b5 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -4,10 +4,10 @@
#include "src/wasm/function-body-decoder.h"
-#include "src/flags.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/flags/flags.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/wasm-limits.h"
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 5564dcd969..16f90a41cb 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -7,7 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/iterator.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index a74cb43e66..a5d7a08846 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -4,9 +4,9 @@
#include "src/wasm/function-compiler.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/counters.h"
-#include "src/macro-assembler-inl.h"
+#include "src/logging/counters.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/wasm-code-manager.h"
@@ -24,19 +24,19 @@ class WasmInstructionBufferImpl {
: buffer_(buffer), holder_(holder) {}
~View() override {
- if (buffer_.start() == holder_->old_buffer_.start()) {
+ if (buffer_.begin() == holder_->old_buffer_.start()) {
DCHECK_EQ(buffer_.size(), holder_->old_buffer_.size());
holder_->old_buffer_ = {};
}
}
- byte* start() const override { return buffer_.start(); }
+ byte* start() const override { return buffer_.begin(); }
int size() const override { return static_cast<int>(buffer_.size()); }
std::unique_ptr<AssemblerBuffer> Grow(int new_size) override {
// If we grow, we must be the current buffer of {holder_}.
- DCHECK_EQ(buffer_.start(), holder_->buffer_.start());
+ DCHECK_EQ(buffer_.begin(), holder_->buffer_.start());
DCHECK_EQ(buffer_.size(), holder_->buffer_.size());
DCHECK_NULL(holder_->old_buffer_);
@@ -112,26 +112,13 @@ ExecutionTier WasmCompilationUnit::GetDefaultExecutionTier(
return FLAG_liftoff ? ExecutionTier::kLiftoff : ExecutionTier::kTurbofan;
}
-WasmCompilationUnit::WasmCompilationUnit(int index, ExecutionTier tier)
- : func_index_(index), tier_(tier) {
- if (V8_UNLIKELY(FLAG_wasm_tier_mask_for_testing) && index < 32 &&
- (FLAG_wasm_tier_mask_for_testing & (1 << index))) {
- tier = ExecutionTier::kTurbofan;
- }
- SwitchTier(tier);
-}
-
-// Declared here such that {LiftoffCompilationUnit} and
-// {TurbofanWasmCompilationUnit} can be opaque in the header file.
-WasmCompilationUnit::~WasmCompilationUnit() = default;
-
WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
WasmEngine* wasm_engine, CompilationEnv* env,
const std::shared_ptr<WireBytesStorage>& wire_bytes_storage,
Counters* counters, WasmFeatures* detected) {
auto* func = &env->module->functions[func_index_];
Vector<const uint8_t> code = wire_bytes_storage->GetCode(func->code);
- wasm::FunctionBody func_body{func->sig, func->code.offset(), code.start(),
+ wasm::FunctionBody func_body{func->sig, func->code.offset(), code.begin(),
code.end()};
auto size_histogram = SELECT_WASM_COUNTER(counters, env->module->origin, wasm,
@@ -141,35 +128,45 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
wasm_compile, function_time);
TimedHistogramScope wasm_compile_function_time_scope(timed_histogram);
- // Exactly one compiler-specific unit must be set.
- DCHECK_EQ(1, !!liftoff_unit_ + !!turbofan_unit_ + !!interpreter_unit_);
-
if (FLAG_trace_wasm_compiler) {
- const char* tier =
- liftoff_unit_ ? "liftoff" : turbofan_unit_ ? "turbofan" : "interpreter";
- PrintF("Compiling wasm function %d with %s\n\n", func_index_, tier);
+ PrintF("Compiling wasm function %d with %s\n\n", func_index_,
+ ExecutionTierToString(tier_));
}
WasmCompilationResult result;
- if (liftoff_unit_) {
- result = liftoff_unit_->ExecuteCompilation(wasm_engine->allocator(), env,
- func_body, counters, detected);
- if (!result.succeeded()) {
+
+ switch (tier_) {
+ case ExecutionTier::kNone:
+ UNREACHABLE();
+
+ case ExecutionTier::kLiftoff:
+ // The --wasm-tier-mask-for-testing flag can force functions to be
+ // compiled with TurboFan, see documentation.
+ if (V8_LIKELY(FLAG_wasm_tier_mask_for_testing == 0) ||
+ func_index_ >= 32 ||
+ ((FLAG_wasm_tier_mask_for_testing & (1 << func_index_)) == 0)) {
+ result =
+ ExecuteLiftoffCompilation(wasm_engine->allocator(), env, func_body,
+ func_index_, counters, detected);
+ if (result.succeeded()) break;
+ }
+
// If Liftoff failed, fall back to turbofan.
// TODO(wasm): We could actually stop or remove the tiering unit for this
// function to avoid compiling it twice with TurboFan.
- SwitchTier(ExecutionTier::kTurbofan);
- DCHECK_NOT_NULL(turbofan_unit_);
- }
- }
- if (turbofan_unit_) {
- result = turbofan_unit_->ExecuteCompilation(wasm_engine, env, func_body,
- counters, detected);
- }
- if (interpreter_unit_) {
- result = interpreter_unit_->ExecuteCompilation(wasm_engine, env, func_body,
- counters, detected);
+ V8_FALLTHROUGH;
+
+ case ExecutionTier::kTurbofan:
+ result = compiler::ExecuteTurbofanWasmCompilation(
+ wasm_engine, env, func_body, func_index_, counters, detected);
+ break;
+
+ case ExecutionTier::kInterpreter:
+ result = compiler::ExecuteInterpreterEntryCompilation(
+ wasm_engine, env, func_body, func_index_, counters, detected);
+ break;
}
+
result.func_index = func_index_;
result.requested_tier = tier_;
@@ -182,36 +179,6 @@ WasmCompilationResult WasmCompilationUnit::ExecuteCompilation(
return result;
}
-void WasmCompilationUnit::SwitchTier(ExecutionTier new_tier) {
- // This method is being called in the constructor, where neither
- // {liftoff_unit_} nor {turbofan_unit_} nor {interpreter_unit_} are set, or to
- // switch tier from kLiftoff to kTurbofan, in which case {liftoff_unit_} is
- // already set.
- switch (new_tier) {
- case ExecutionTier::kLiftoff:
- DCHECK(!turbofan_unit_);
- DCHECK(!liftoff_unit_);
- DCHECK(!interpreter_unit_);
- liftoff_unit_.reset(new LiftoffCompilationUnit());
- return;
- case ExecutionTier::kTurbofan:
- DCHECK(!turbofan_unit_);
- DCHECK(!interpreter_unit_);
- liftoff_unit_.reset();
- turbofan_unit_.reset(new compiler::TurbofanWasmCompilationUnit(this));
- return;
- case ExecutionTier::kInterpreter:
- DCHECK(!turbofan_unit_);
- DCHECK(!liftoff_unit_);
- DCHECK(!interpreter_unit_);
- interpreter_unit_.reset(new compiler::InterpreterCompilationUnit(this));
- return;
- case ExecutionTier::kNone:
- UNREACHABLE();
- }
- UNREACHABLE();
-}
-
// static
void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
NativeModule* native_module,
diff --git a/deps/v8/src/wasm/function-compiler.h b/deps/v8/src/wasm/function-compiler.h
index ae577e8ee0..e7d8ff9471 100644
--- a/deps/v8/src/wasm/function-compiler.h
+++ b/deps/v8/src/wasm/function-compiler.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_FUNCTION_COMPILER_H_
#define V8_WASM_FUNCTION_COMPILER_H_
-#include "src/code-desc.h"
+#include "src/codegen/code-desc.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-body-decoder.h"
@@ -19,18 +19,10 @@ namespace internal {
class AssemblerBuffer;
class Counters;
-namespace compiler {
-class InterpreterCompilationUnit;
-class Pipeline;
-class TurbofanWasmCompilationUnit;
-} // namespace compiler
-
namespace wasm {
-class LiftoffCompilationUnit;
class NativeModule;
class WasmCode;
-class WasmCompilationUnit;
class WasmEngine;
struct WasmFunction;
@@ -70,40 +62,30 @@ class V8_EXPORT_PRIVATE WasmCompilationUnit final {
public:
static ExecutionTier GetDefaultExecutionTier(const WasmModule*);
- WasmCompilationUnit(int index, ExecutionTier);
-
- ~WasmCompilationUnit();
+ WasmCompilationUnit(int index, ExecutionTier tier)
+ : func_index_(index), tier_(tier) {}
WasmCompilationResult ExecuteCompilation(
WasmEngine*, CompilationEnv*, const std::shared_ptr<WireBytesStorage>&,
Counters*, WasmFeatures* detected);
ExecutionTier tier() const { return tier_; }
+ int func_index() const { return func_index_; }
static void CompileWasmFunction(Isolate*, NativeModule*,
WasmFeatures* detected, const WasmFunction*,
ExecutionTier);
private:
- friend class LiftoffCompilationUnit;
- friend class compiler::TurbofanWasmCompilationUnit;
- friend class compiler::InterpreterCompilationUnit;
-
- const int func_index_;
+ int func_index_;
ExecutionTier tier_;
-
- // LiftoffCompilationUnit, set if {tier_ == kLiftoff}.
- std::unique_ptr<LiftoffCompilationUnit> liftoff_unit_;
- // TurbofanWasmCompilationUnit, set if {tier_ == kTurbofan}.
- std::unique_ptr<compiler::TurbofanWasmCompilationUnit> turbofan_unit_;
- // InterpreterCompilationUnit, set if {tier_ == kInterpreter}.
- std::unique_ptr<compiler::InterpreterCompilationUnit> interpreter_unit_;
-
- void SwitchTier(ExecutionTier new_tier);
-
- DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
+// {WasmCompilationUnit} should be trivially copyable and small enough so we can
+// efficiently pass it by value.
+ASSERT_TRIVIALLY_COPYABLE(WasmCompilationUnit);
+STATIC_ASSERT(sizeof(WasmCompilationUnit) <= 2 * kSystemPointerSize);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 1f870598a9..90d8749f2c 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -5,10 +5,10 @@
#include "src/wasm/graph-builder-interface.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/flags.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/flags/flags.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
@@ -251,6 +251,10 @@ class WasmGraphBuildingInterface {
result->node = builder_->RefNull();
}
+ void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
+ result->node = BUILD(RefFunc, function_index);
+ }
+
void Drop(FullDecoder* decoder, const Value& value) {}
void DoReturn(FullDecoder* decoder, Vector<Value> values) {
@@ -461,7 +465,7 @@ class WasmGraphBuildingInterface {
for (int i = 0; i < count; ++i) {
args[i] = value_args[i].node;
}
- BUILD(Throw, imm.index, imm.exception, VectorOf(args));
+ BUILD(Throw, imm.index, imm.exception, VectorOf(args), decoder->position());
builder_->TerminateThrow(ssa_env_->effect, ssa_env_->control);
}
@@ -534,33 +538,54 @@ class WasmGraphBuildingInterface {
BUILD(MemoryInit, imm.data_segment_index, dst.node, src.node, size.node,
decoder->position());
}
+
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
BUILD(DataDrop, imm.index, decoder->position());
}
+
void MemoryCopy(FullDecoder* decoder,
const MemoryCopyImmediate<validate>& imm, const Value& dst,
const Value& src, const Value& size) {
BUILD(MemoryCopy, dst.node, src.node, size.node, decoder->position());
}
+
void MemoryFill(FullDecoder* decoder,
const MemoryIndexImmediate<validate>& imm, const Value& dst,
const Value& value, const Value& size) {
BUILD(MemoryFill, dst.node, value.node, size.node, decoder->position());
}
+
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
Vector<Value> args) {
BUILD(TableInit, imm.table.index, imm.elem_segment_index, args[0].node,
args[1].node, args[2].node, decoder->position());
}
+
void ElemDrop(FullDecoder* decoder, const ElemDropImmediate<validate>& imm) {
BUILD(ElemDrop, imm.index, decoder->position());
}
+
void TableCopy(FullDecoder* decoder, const TableCopyImmediate<validate>& imm,
Vector<Value> args) {
BUILD(TableCopy, imm.table_src.index, imm.table_dst.index, args[0].node,
args[1].node, args[2].node, decoder->position());
}
+ void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ Value& value, Value& delta, Value* result) {
+ result->node = BUILD(TableGrow, imm.index, value.node, delta.node);
+ }
+
+ void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ Value* result) {
+ result->node = BUILD(TableSize, imm.index);
+ }
+
+ void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
+ Value& start, Value& value, Value& count) {
+ BUILD(TableFill, imm.index, start.node, value.node, count.node);
+ }
+
private:
SsaEnv* ssa_env_;
compiler::WasmGraphBuilder* builder_;
@@ -580,7 +605,7 @@ class WasmGraphBuildingInterface {
}
TFNode** GetNodes(Vector<Value> values) {
- return GetNodes(values.start(), values.size());
+ return GetNodes(values.begin(), values.size());
}
void SetEnv(SsaEnv* env) {
@@ -603,7 +628,7 @@ class WasmGraphBuildingInterface {
break;
}
}
- PrintF("{set_env = %p, state = %c", static_cast<void*>(env), state);
+ PrintF("{set_env = %p, state = %c", env, state);
if (env && env->control) {
PrintF(", control = ");
compiler::WasmGraphBuilder::PrintDebugName(env->control);
@@ -692,7 +717,9 @@ class WasmGraphBuildingInterface {
Value& val = stack_values[i];
Value& old = (*merge)[i];
DCHECK_NOT_NULL(val.node);
- DCHECK(val.type == old.type || val.type == kWasmVar);
+ DCHECK(val.type == kWasmVar ||
+ ValueTypes::MachineRepresentationFor(val.type) ==
+ ValueTypes::MachineRepresentationFor(old.type));
old.node = first ? val.node
: builder_->CreateOrMergeIntoPhi(
ValueTypes::MachineRepresentationFor(old.type),
diff --git a/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h b/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
index ff5fb8de72..ba2093d2c1 100644
--- a/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
+++ b/deps/v8/src/wasm/js-to-wasm-wrapper-cache.h
@@ -6,7 +6,7 @@
#define V8_WASM_JS_TO_WASM_WRAPPER_CACHE_H_
#include "src/compiler/wasm-compiler.h"
-#include "src/counters.h"
+#include "src/logging/counters.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-code-manager.h"
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index 988c22d6fc..93ff8a9317 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -4,8 +4,8 @@
#include "src/wasm/jump-table-assembler.h"
-#include "src/assembler-inl.h"
-#include "src/macro-assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index f3d4f954bf..eef9fea167 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_JUMP_TABLE_ASSEMBLER_H_
#define V8_WASM_JUMP_TABLE_ASSEMBLER_H_
-#include "src/macro-assembler.h"
+#include "src/codegen/macro-assembler.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
index 6b0d824768..ba1ebffe2c 100644
--- a/deps/v8/src/wasm/local-decl-encoder.cc
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -4,7 +4,7 @@
#include "src/wasm/local-decl-encoder.h"
-#include "src/signature.h"
+#include "src/codegen/signature.h"
#include "src/wasm/leb-helper.h"
namespace v8 {
diff --git a/deps/v8/src/wasm/local-decl-encoder.h b/deps/v8/src/wasm/local-decl-encoder.h
index e0725efe9b..6fd2314d2d 100644
--- a/deps/v8/src/wasm/local-decl-encoder.h
+++ b/deps/v8/src/wasm/local-decl-encoder.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_LOCAL_DECL_ENCODER_H_
#define V8_WASM_LOCAL_DECL_ENCODER_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/wasm/memory-tracing.cc b/deps/v8/src/wasm/memory-tracing.cc
index 4dc5b80dbc..10483cf8ea 100644
--- a/deps/v8/src/wasm/memory-tracing.cc
+++ b/deps/v8/src/wasm/memory-tracing.cc
@@ -4,9 +4,11 @@
#include "src/wasm/memory-tracing.h"
-#include "src/utils.h"
-#include "src/v8memory.h"
-#include "src/vector.h"
+#include <cinttypes>
+
+#include "src/common/v8memory.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -35,23 +37,10 @@ void TraceMemoryOperation(ExecutionTier tier, const MemoryTracingInfo* info,
default:
SNPrintF(value, "???");
}
- const char* eng = "?";
- switch (tier) {
- case ExecutionTier::kTurbofan:
- eng = "turbofan";
- break;
- case ExecutionTier::kLiftoff:
- eng = "liftoff";
- break;
- case ExecutionTier::kInterpreter:
- eng = "interpreter";
- break;
- case ExecutionTier::kNone:
- UNREACHABLE();
- }
+ const char* eng = ExecutionTierToString(tier);
printf("%-11s func:%6d+0x%-6x%s %08x val: %s\n", eng, func_index, position,
info->is_store ? " store to" : "load from", info->address,
- value.start());
+ value.begin());
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/memory-tracing.h b/deps/v8/src/wasm/memory-tracing.h
index b5105c5327..15457399c1 100644
--- a/deps/v8/src/wasm/memory-tracing.h
+++ b/deps/v8/src/wasm/memory-tracing.h
@@ -7,7 +7,7 @@
#include <cstdint>
-#include "src/machine-type.h"
+#include "src/codegen/machine-type.h"
#include "src/wasm/wasm-tier.h"
namespace v8 {
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index 6f061e7b5d..476a7731f0 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -5,23 +5,25 @@
#include "src/wasm/module-compiler.h"
#include <algorithm>
+#include <queue>
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/asmjs/asm-js.h"
#include "src/base/enum-set.h"
#include "src/base/optional.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
+#include "src/base/platform/time.h"
#include "src/base/template-utils.h"
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/counters.h"
#include "src/heap/heap-inl.h" // For CodeSpaceMemoryModificationScope.
-#include "src/identity-map.h"
-#include "src/property-descriptor.h"
-#include "src/task-utils.h"
+#include "src/logging/counters.h"
+#include "src/objects/property-descriptor.h"
+#include "src/tasks/task-utils.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/utils/identity-map.h"
#include "src/wasm/js-to-wasm-wrapper-cache.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
@@ -58,6 +60,26 @@ namespace {
enum class CompileMode : uint8_t { kRegular, kTiering };
+enum class CompileStrategy : uint8_t {
+ // Compiles functions on first use. In this case, execution will block until
+ // the function's baseline is reached and top tier compilation starts in
+ // background (if applicable).
+ // Lazy compilation can help to reduce startup time and code size at the risk
+ // of blocking execution.
+ kLazy,
+ // Compiles baseline ahead of execution and starts top tier compilation in
+ // background (if applicable).
+ kEager,
+ // Triggers baseline compilation on first use (just like {kLazy}) with the
+ // difference that top tier compilation is started eagerly.
+ // This strategy can help to reduce startup time at the risk of blocking
+ // execution, but only in its early phase (until top tier compilation
+ // finishes).
+ kLazyBaselineEagerTopTier,
+ // Marker for default strategy.
+ kDefault = kEager,
+};
+
// Background compile jobs hold a shared pointer to this token. The token is
// used to notify them that they should stop. As soon as they see this (after
// finishing their current compilation unit), they will stop.
@@ -128,21 +150,14 @@ class CompilationUnitQueues {
explicit CompilationUnitQueues(int max_tasks) : queues_(max_tasks) {
DCHECK_LT(0, max_tasks);
for (int task_id = 0; task_id < max_tasks; ++task_id) {
- queues_[task_id].next_steal_task_id_ = next_task_id(task_id);
+ queues_[task_id].next_steal_task_id = next_task_id(task_id);
}
for (auto& atomic_counter : num_units_) {
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wbraced-scalar-init"
-#endif
- atomic_counter = ATOMIC_VAR_INIT(0);
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
+ std::atomic_init(&atomic_counter, size_t{0});
}
}
- std::unique_ptr<WasmCompilationUnit> GetNextUnit(
+ base::Optional<WasmCompilationUnit> GetNextUnit(
int task_id, CompileBaselineOnly baseline_only) {
DCHECK_LE(0, task_id);
DCHECK_GT(queues_.size(), task_id);
@@ -151,38 +166,20 @@ class CompilationUnitQueues {
// before executing own higher-tier units.
int max_tier = baseline_only ? kBaseline : kTopTier;
for (int tier = GetLowestTierWithUnits(); tier <= max_tier; ++tier) {
- Queue* queue = &queues_[task_id];
- // First, check whether our own queue has a unit of the wanted tier. If
- // so, return it, otherwise get the task id to steal from.
- int steal_task_id;
- {
- base::MutexGuard mutex_guard(&queue->mutex_);
- if (!queue->units_[tier].empty()) {
- auto unit = std::move(queue->units_[tier].back());
- queue->units_[tier].pop_back();
- DecrementUnitCount(tier);
- return unit;
- }
- steal_task_id = queue->next_steal_task_id_;
- }
-
- // Try to steal from all other queues. If none of this succeeds, the outer
- // loop increases the tier and retries.
- size_t steal_trials = queues_.size();
- for (; steal_trials > 0;
- --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
- if (steal_task_id == task_id) continue;
- if (auto unit = StealUnitsAndGetFirst(task_id, steal_task_id, tier)) {
- DecrementUnitCount(tier);
- return unit;
- }
+ if (auto unit = GetNextUnitOfTier(task_id, tier)) {
+ size_t old_units_count =
+ num_units_[tier].fetch_sub(1, std::memory_order_relaxed);
+ DCHECK_LE(1, old_units_count);
+ USE(old_units_count);
+ return unit;
}
}
return {};
}
- void AddUnits(Vector<std::unique_ptr<WasmCompilationUnit>> baseline_units,
- Vector<std::unique_ptr<WasmCompilationUnit>> top_tier_units) {
+ void AddUnits(Vector<WasmCompilationUnit> baseline_units,
+ Vector<WasmCompilationUnit> top_tier_units,
+ const WasmModule* module) {
DCHECK_LT(0, baseline_units.size() + top_tier_units.size());
// Add to the individual queues in a round-robin fashion. No special care is
// taken to balance them; they will be balanced by work stealing.
@@ -193,22 +190,27 @@ class CompilationUnitQueues {
}
Queue* queue = &queues_[queue_to_add];
- base::MutexGuard guard(&queue->mutex_);
- if (!baseline_units.empty()) {
- queue->units_[kBaseline].insert(
- queue->units_[kBaseline].end(),
- std::make_move_iterator(baseline_units.begin()),
- std::make_move_iterator(baseline_units.end()));
- num_units_[kBaseline].fetch_add(baseline_units.size(),
- std::memory_order_relaxed);
- }
- if (!top_tier_units.empty()) {
- queue->units_[kTopTier].insert(
- queue->units_[kTopTier].end(),
- std::make_move_iterator(top_tier_units.begin()),
- std::make_move_iterator(top_tier_units.end()));
- num_units_[kTopTier].fetch_add(top_tier_units.size(),
- std::memory_order_relaxed);
+ base::MutexGuard guard(&queue->mutex);
+ base::Optional<base::MutexGuard> big_units_guard;
+ for (auto pair : {std::make_pair(int{kBaseline}, baseline_units),
+ std::make_pair(int{kTopTier}, top_tier_units)}) {
+ int tier = pair.first;
+ Vector<WasmCompilationUnit> units = pair.second;
+ if (units.empty()) continue;
+ num_units_[tier].fetch_add(units.size(), std::memory_order_relaxed);
+ for (WasmCompilationUnit unit : units) {
+ size_t func_size = module->functions[unit.func_index()].code.length();
+ if (func_size <= kBigUnitsLimit) {
+ queue->units[tier].push_back(unit);
+ } else {
+ if (!big_units_guard) {
+ big_units_guard.emplace(&big_units_queue_.mutex);
+ }
+ big_units_queue_.has_units[tier].store(true,
+ std::memory_order_relaxed);
+ big_units_queue_.units[tier].emplace(func_size, unit);
+ }
+ }
}
}
@@ -229,16 +231,47 @@ class CompilationUnitQueues {
static constexpr int kTopTier = 1;
static constexpr int kNumTiers = kTopTier + 1;
+ // Functions bigger than {kBigUnitsLimit} will be compiled first, in ascending
+ // order of their function body size.
+ static constexpr size_t kBigUnitsLimit = 4096;
+
struct Queue {
- base::Mutex mutex_;
+ base::Mutex mutex;
- // Protected by {mutex_}:
- std::vector<std::unique_ptr<WasmCompilationUnit>> units_[kNumTiers];
- int next_steal_task_id_;
- // End of fields protected by {mutex_}.
+ // Protected by {mutex}:
+ std::vector<WasmCompilationUnit> units[kNumTiers];
+ int next_steal_task_id;
+ // End of fields protected by {mutex}.
+ };
+
+ struct BigUnit {
+ BigUnit(size_t func_size, WasmCompilationUnit unit)
+ : func_size{func_size}, unit(unit) {}
+
+ size_t func_size;
+ WasmCompilationUnit unit;
+
+ bool operator<(const BigUnit& other) const {
+ return func_size < other.func_size;
+ }
+ };
+
+ struct BigUnitsQueue {
+ BigUnitsQueue() {
+ for (auto& atomic : has_units) std::atomic_init(&atomic, false);
+ }
+
+ base::Mutex mutex;
+
+ // Can be read concurrently to check whether any elements are in the queue.
+ std::atomic<bool> has_units[kNumTiers];
+
+ // Protected by {mutex}:
+ std::priority_queue<BigUnit> units[kNumTiers];
};
std::vector<Queue> queues_;
+ BigUnitsQueue big_units_queue_;
std::atomic<size_t> num_units_[kNumTiers];
std::atomic<int> next_queue_to_add{0};
@@ -255,40 +288,78 @@ class CompilationUnitQueues {
return kNumTiers;
}
- void DecrementUnitCount(int tier) {
- size_t old_units_count = num_units_[tier].fetch_sub(1);
- DCHECK_LE(1, old_units_count);
- USE(old_units_count);
+ base::Optional<WasmCompilationUnit> GetNextUnitOfTier(int task_id, int tier) {
+ Queue* queue = &queues_[task_id];
+ // First check whether there is a big unit of that tier. Execute that first.
+ if (auto unit = GetBigUnitOfTier(tier)) return unit;
+
+ // Then check whether our own queue has a unit of the wanted tier. If
+ // so, return it, otherwise get the task id to steal from.
+ int steal_task_id;
+ {
+ base::MutexGuard mutex_guard(&queue->mutex);
+ if (!queue->units[tier].empty()) {
+ auto unit = queue->units[tier].back();
+ queue->units[tier].pop_back();
+ return unit;
+ }
+ steal_task_id = queue->next_steal_task_id;
+ }
+
+ // Try to steal from all other queues. If this succeeds, return one of the
+ // stolen units.
+ size_t steal_trials = queues_.size();
+ for (; steal_trials > 0;
+ --steal_trials, steal_task_id = next_task_id(steal_task_id)) {
+ if (steal_task_id == task_id) continue;
+ if (auto unit = StealUnitsAndGetFirst(task_id, steal_task_id, tier)) {
+ return unit;
+ }
+ }
+
+ // If we reach here, we didn't find any unit of the requested tier.
+ return {};
+ }
+
+ base::Optional<WasmCompilationUnit> GetBigUnitOfTier(int tier) {
+ // Fast-path without locking.
+ if (!big_units_queue_.has_units[tier].load(std::memory_order_relaxed)) {
+ return {};
+ }
+ base::MutexGuard guard(&big_units_queue_.mutex);
+ if (big_units_queue_.units[tier].empty()) return {};
+ WasmCompilationUnit unit = big_units_queue_.units[tier].top().unit;
+ big_units_queue_.units[tier].pop();
+ if (big_units_queue_.units[tier].empty()) {
+ big_units_queue_.has_units[tier].store(false, std::memory_order_relaxed);
+ }
+ return unit;
}
// Steal units of {wanted_tier} from {steal_from_task_id} to {task_id}. Return
- // first stolen unit (rest put in queue of {task_id}), or {nullptr} if
+ // first stolen unit (rest put in queue of {task_id}), or {nullopt} if
// {steal_from_task_id} had no units of {wanted_tier}.
- std::unique_ptr<WasmCompilationUnit> StealUnitsAndGetFirst(
+ base::Optional<WasmCompilationUnit> StealUnitsAndGetFirst(
int task_id, int steal_from_task_id, int wanted_tier) {
DCHECK_NE(task_id, steal_from_task_id);
- std::vector<std::unique_ptr<WasmCompilationUnit>> stolen;
+ std::vector<WasmCompilationUnit> stolen;
+ base::Optional<WasmCompilationUnit> returned_unit;
{
Queue* steal_queue = &queues_[steal_from_task_id];
- base::MutexGuard guard(&steal_queue->mutex_);
- if (steal_queue->units_[wanted_tier].empty()) return {};
- auto* steal_from_vector = &steal_queue->units_[wanted_tier];
+ base::MutexGuard guard(&steal_queue->mutex);
+ auto* steal_from_vector = &steal_queue->units[wanted_tier];
+ if (steal_from_vector->empty()) return {};
size_t remaining = steal_from_vector->size() / 2;
- stolen.assign(
- std::make_move_iterator(steal_from_vector->begin()) + remaining,
- std::make_move_iterator(steal_from_vector->end()));
- steal_from_vector->resize(remaining);
+ auto steal_begin = steal_from_vector->begin() + remaining;
+ returned_unit = *steal_begin;
+ stolen.assign(steal_begin + 1, steal_from_vector->end());
+ steal_from_vector->erase(steal_begin, steal_from_vector->end());
}
- DCHECK(!stolen.empty());
- auto returned_unit = std::move(stolen.back());
- stolen.pop_back();
Queue* queue = &queues_[task_id];
- base::MutexGuard guard(&queue->mutex_);
- auto* target_queue = &queue->units_[wanted_tier];
- target_queue->insert(target_queue->end(),
- std::make_move_iterator(stolen.begin()),
- std::make_move_iterator(stolen.end()));
- queue->next_steal_task_id_ = next_task_id(steal_from_task_id);
+ base::MutexGuard guard(&queue->mutex);
+ auto* target_queue = &queue->units[wanted_tier];
+ target_queue->insert(target_queue->end(), stolen.begin(), stolen.end());
+ queue->next_steal_task_id = next_task_id(steal_from_task_id);
return returned_unit;
}
};
@@ -307,10 +378,10 @@ class CompilationStateImpl {
// this before destructing this object.
void AbortCompilation();
- // Set the number of compilations unit expected to be executed. Needs to be
- // set before {AddCompilationUnits} is run, which triggers background
- // compilation.
- void SetNumberOfFunctionsToCompile(int num_functions, int num_lazy_functions);
+ // Initialize compilation progress. Set compilation tiers to expect for
+ // baseline and top tier compilation. Must be set before {AddCompilationUnits}
+ // is invoked which triggers background compilation.
+ void InitializeCompilationProgress(bool lazy_module);
// Add the callback function to be called on compilation events. Needs to be
// set before {AddCompilationUnits} is run to ensure that it receives all
@@ -318,14 +389,12 @@ class CompilationStateImpl {
void AddCallback(CompilationState::callback_t);
// Inserts new functions to compile and kicks off compilation.
- void AddCompilationUnits(
- Vector<std::unique_ptr<WasmCompilationUnit>> baseline_units,
- Vector<std::unique_ptr<WasmCompilationUnit>> top_tier_units);
- void AddTopTierCompilationUnit(std::unique_ptr<WasmCompilationUnit>);
- std::unique_ptr<WasmCompilationUnit> GetNextCompilationUnit(
+ void AddCompilationUnits(Vector<WasmCompilationUnit> baseline_units,
+ Vector<WasmCompilationUnit> top_tier_units);
+ void AddTopTierCompilationUnit(WasmCompilationUnit);
+ base::Optional<WasmCompilationUnit> GetNextCompilationUnit(
int task_id, CompileBaselineOnly baseline_only);
- void OnFinishedUnit(WasmCode*);
void OnFinishedUnits(Vector<WasmCode*>);
void OnBackgroundTaskStopped(int task_id, const WasmFeatures& detected);
@@ -345,6 +414,12 @@ class CompilationStateImpl {
return outstanding_baseline_functions_ == 0;
}
+ bool top_tier_compilation_finished() const {
+ base::MutexGuard guard(&callbacks_mutex_);
+ DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
+ return outstanding_top_tier_functions_ == 0;
+ }
+
CompileMode compile_mode() const { return compile_mode_; }
Counters* counters() const { return async_counters_.get(); }
WasmFeatures* detected_features() { return &detected_features_; }
@@ -366,6 +441,29 @@ class CompilationStateImpl {
return background_compile_token_;
}
+ double GetCompilationDeadline(double now) {
+ // Execute for at least 50ms. Try to distribute deadlines of different tasks
+ // such that every 5ms one task stops. No task should execute longer than
+ // 200ms though.
+ constexpr double kMinLimit = 50. / base::Time::kMillisecondsPerSecond;
+ constexpr double kMaxLimit = 200. / base::Time::kMillisecondsPerSecond;
+ constexpr double kGapBetweenTasks = 5. / base::Time::kMillisecondsPerSecond;
+ double min_deadline = now + kMinLimit;
+ double max_deadline = now + kMaxLimit;
+ double next_deadline =
+ next_compilation_deadline_.load(std::memory_order_relaxed);
+ while (true) {
+ double deadline =
+ std::max(min_deadline, std::min(max_deadline, next_deadline));
+ if (next_compilation_deadline_.compare_exchange_weak(
+ next_deadline, deadline + kGapBetweenTasks,
+ std::memory_order_relaxed)) {
+ return deadline;
+ }
+ // Otherwise, retry with the updated {next_deadline}.
+ }
+ }
+
private:
NativeModule* const native_module_;
const std::shared_ptr<BackgroundCompileToken> background_compile_token_;
@@ -380,6 +478,13 @@ class CompilationStateImpl {
CompilationUnitQueues compilation_unit_queues_;
+ // Each compilation task executes until a certain deadline. The
+ // {CompilationStateImpl} orchestrates the deadlines such that they are
+ // evenly distributed and not all tasks stop at the same time. This removes
+ // contention during publishing of compilation results and also gives other
+ // tasks a fair chance to utilize the worker threads on a regular basis.
+ std::atomic<double> next_compilation_deadline_{0};
+
// This mutex protects all information of this {CompilationStateImpl} which is
// being accessed concurrently.
mutable base::Mutex mutex_;
@@ -415,10 +520,15 @@ class CompilationStateImpl {
int outstanding_baseline_functions_ = 0;
int outstanding_top_tier_functions_ = 0;
- std::vector<ExecutionTier> highest_execution_tier_;
+ std::vector<uint8_t> compilation_progress_;
// End of fields protected by {callbacks_mutex_}.
//////////////////////////////////////////////////////////////////////////////
+
+ // Encoding of fields in the {compilation_progress_} vector.
+ class RequiredBaselineTierField : public BitField8<ExecutionTier, 0, 2> {};
+ class RequiredTopTierField : public BitField8<ExecutionTier, 2, 2> {};
+ class ReachedTierField : public BitField8<ExecutionTier, 4, 2> {};
};
CompilationStateImpl* Impl(CompilationState* compilation_state) {
@@ -465,12 +575,12 @@ void CompilationState::AddCallback(CompilationState::callback_t callback) {
bool CompilationState::failed() const { return Impl(this)->failed(); }
-void CompilationState::OnFinishedUnit(WasmCode* code) {
- Impl(this)->OnFinishedUnit(code);
+bool CompilationState::baseline_compilation_finished() const {
+ return Impl(this)->baseline_compilation_finished();
}
-void CompilationState::OnFinishedUnits(Vector<WasmCode*> code_vector) {
- Impl(this)->OnFinishedUnits(code_vector);
+bool CompilationState::top_tier_compilation_finished() const {
+ return Impl(this)->top_tier_compilation_finished();
}
// static
@@ -513,23 +623,23 @@ const WasmCompilationHint* GetCompilationHint(const WasmModule* module,
return nullptr;
}
-bool IsLazyCompilation(const WasmModule* module,
- const WasmFeatures& enabled_features,
- uint32_t func_index) {
- if (enabled_features.compilation_hints) {
- const WasmCompilationHint* hint = GetCompilationHint(module, func_index);
- return hint != nullptr &&
- hint->strategy == WasmCompilationHintStrategy::kLazy;
+CompileStrategy GetCompileStrategy(const WasmModule* module,
+ const WasmFeatures& enabled_features,
+ uint32_t func_index, bool lazy_module) {
+ if (lazy_module) return CompileStrategy::kLazy;
+ if (!enabled_features.compilation_hints) return CompileStrategy::kDefault;
+ auto* hint = GetCompilationHint(module, func_index);
+ if (hint == nullptr) return CompileStrategy::kDefault;
+ switch (hint->strategy) {
+ case WasmCompilationHintStrategy::kLazy:
+ return CompileStrategy::kLazy;
+ case WasmCompilationHintStrategy::kEager:
+ return CompileStrategy::kEager;
+ case WasmCompilationHintStrategy::kLazyBaselineEagerTopTier:
+ return CompileStrategy::kLazyBaselineEagerTopTier;
+ case WasmCompilationHintStrategy::kDefault:
+ return CompileStrategy::kDefault;
}
- return false;
-}
-
-bool IsLazyCompilation(const WasmModule* module,
- const NativeModule* native_module,
- const WasmFeatures& enabled_features,
- uint32_t func_index) {
- if (native_module->lazy_compilation()) return true;
- return IsLazyCompilation(module, enabled_features, func_index);
}
struct ExecutionTierPair {
@@ -541,12 +651,14 @@ ExecutionTierPair GetRequestedExecutionTiers(
const WasmModule* module, CompileMode compile_mode,
const WasmFeatures& enabled_features, uint32_t func_index) {
ExecutionTierPair result;
+
switch (compile_mode) {
case CompileMode::kRegular:
result.baseline_tier =
WasmCompilationUnit::GetDefaultExecutionTier(module);
result.top_tier = result.baseline_tier;
return result;
+
case CompileMode::kTiering:
// Default tiering behaviour.
@@ -591,12 +703,29 @@ class CompilationUnitBuilder {
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
native_module_->enabled_features(), func_index);
- baseline_units_.emplace_back(CreateUnit(func_index, tiers.baseline_tier));
+ baseline_units_.emplace_back(func_index, tiers.baseline_tier);
if (tiers.baseline_tier != tiers.top_tier) {
- tiering_units_.emplace_back(CreateUnit(func_index, tiers.top_tier));
+ tiering_units_.emplace_back(func_index, tiers.top_tier);
}
}
+ void AddTopTierUnit(int func_index) {
+ ExecutionTierPair tiers = GetRequestedExecutionTiers(
+ native_module_->module(), compilation_state()->compile_mode(),
+ native_module_->enabled_features(), func_index);
+ // In this case, the baseline is lazily compiled, if at all. The compilation
+ // unit is added even if the baseline tier is the same.
+#ifdef DEBUG
+ auto* module = native_module_->module();
+ DCHECK_EQ(kWasmOrigin, module->origin);
+ const bool lazy_module = false;
+ DCHECK_EQ(CompileStrategy::kLazyBaselineEagerTopTier,
+ GetCompileStrategy(module, native_module_->enabled_features(),
+ func_index, lazy_module));
+#endif
+ tiering_units_.emplace_back(func_index, tiers.top_tier);
+ }
+
bool Commit() {
if (baseline_units_.empty() && tiering_units_.empty()) return false;
compilation_state()->AddCompilationUnits(VectorOf(baseline_units_),
@@ -611,75 +740,139 @@ class CompilationUnitBuilder {
}
private:
- std::unique_ptr<WasmCompilationUnit> CreateUnit(uint32_t func_index,
- ExecutionTier tier) {
- return base::make_unique<WasmCompilationUnit>(func_index, tier);
- }
-
CompilationStateImpl* compilation_state() const {
return Impl(native_module_->compilation_state());
}
NativeModule* const native_module_;
const ExecutionTier default_tier_;
- std::vector<std::unique_ptr<WasmCompilationUnit>> baseline_units_;
- std::vector<std::unique_ptr<WasmCompilationUnit>> tiering_units_;
+ std::vector<WasmCompilationUnit> baseline_units_;
+ std::vector<WasmCompilationUnit> tiering_units_;
};
+void SetCompileError(ErrorThrower* thrower, ModuleWireBytes wire_bytes,
+ const WasmFunction* func, const WasmModule* module,
+ WasmError error) {
+ WasmName name = wire_bytes.GetNameOrNull(func, module);
+ if (name.begin() == nullptr) {
+ thrower->CompileError("Compiling function #%d failed: %s @+%u",
+ func->func_index, error.message().c_str(),
+ error.offset());
+ } else {
+ TruncatedUserString<> truncated_name(name);
+ thrower->CompileError("Compiling function #%d:\"%.*s\" failed: %s @+%u",
+ func->func_index, truncated_name.length(),
+ truncated_name.start(), error.message().c_str(),
+ error.offset());
+ }
+}
+
+DecodeResult ValidateSingleFunction(const WasmModule* module, int func_index,
+ Vector<const uint8_t> code,
+ Counters* counters,
+ AccountingAllocator* allocator,
+ WasmFeatures enabled_features) {
+ const WasmFunction* func = &module->functions[func_index];
+ FunctionBody body{func->sig, func->code.offset(), code.begin(), code.end()};
+ DecodeResult result;
+
+ auto time_counter =
+ SELECT_WASM_COUNTER(counters, module->origin, wasm_decode, function_time);
+ TimedHistogramScope wasm_decode_function_time_scope(time_counter);
+ WasmFeatures detected;
+ result = VerifyWasmCode(allocator, enabled_features, module, &detected, body);
+
+ return result;
+}
+
+enum OnlyLazyFunctions : bool {
+ kAllFunctions = false,
+ kOnlyLazyFunctions = true,
+};
+
+void ValidateSequentially(
+ const WasmModule* module, NativeModule* native_module, Counters* counters,
+ AccountingAllocator* allocator, ErrorThrower* thrower, bool lazy_module,
+ OnlyLazyFunctions only_lazy_functions = kAllFunctions) {
+ DCHECK(!thrower->error());
+ uint32_t start = module->num_imported_functions;
+ uint32_t end = start + module->num_declared_functions;
+ auto enabled_features = native_module->enabled_features();
+ for (uint32_t func_index = start; func_index < end; func_index++) {
+ // Skip non-lazy functions if requested.
+ if (only_lazy_functions) {
+ CompileStrategy strategy =
+ GetCompileStrategy(module, enabled_features, func_index, lazy_module);
+ if (strategy != CompileStrategy::kLazy &&
+ strategy != CompileStrategy::kLazyBaselineEagerTopTier) {
+ continue;
+ }
+ }
+
+ ModuleWireBytes wire_bytes{native_module->wire_bytes()};
+ const WasmFunction* func = &module->functions[func_index];
+ Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
+ DecodeResult result = ValidateSingleFunction(
+ module, func_index, code, counters, allocator, enabled_features);
+ if (result.failed()) {
+ SetCompileError(thrower, wire_bytes, func, module, result.error());
+ }
+ }
+}
+
+bool IsLazyModule(const WasmModule* module) {
+ return FLAG_wasm_lazy_compilation ||
+ (FLAG_asm_wasm_lazy_compilation && module->origin == kAsmJsOrigin);
+}
+
} // namespace
-void CompileLazy(Isolate* isolate, NativeModule* native_module,
- uint32_t func_index) {
+bool CompileLazy(Isolate* isolate, NativeModule* native_module,
+ int func_index) {
+ const WasmModule* module = native_module->module();
+ auto enabled_features = native_module->enabled_features();
Counters* counters = isolate->counters();
- HistogramTimerScope lazy_time_scope(counters->wasm_lazy_compilation_time());
DCHECK(!native_module->lazy_compile_frozen());
-
- base::ElapsedTimer compilation_timer;
-
+ HistogramTimerScope lazy_time_scope(counters->wasm_lazy_compilation_time());
NativeModuleModificationScope native_module_modification_scope(native_module);
- DCHECK(!native_module->HasCode(static_cast<uint32_t>(func_index)));
-
+ base::ElapsedTimer compilation_timer;
compilation_timer.Start();
TRACE_LAZY("Compiling wasm-function#%d.\n", func_index);
- const uint8_t* module_start = native_module->wire_bytes().start();
-
- const WasmFunction* func = &native_module->module()->functions[func_index];
- FunctionBody func_body{func->sig, func->code.offset(),
- module_start + func->code.offset(),
- module_start + func->code.end_offset()};
-
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
ExecutionTierPair tiers = GetRequestedExecutionTiers(
- native_module->module(), compilation_state->compile_mode(),
- native_module->enabled_features(), func_index);
+ module, compilation_state->compile_mode(), enabled_features, func_index);
WasmCompilationUnit baseline_unit(func_index, tiers.baseline_tier);
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = baseline_unit.ExecuteCompilation(
isolate->wasm_engine(), &env, compilation_state->GetWireBytesStorage(),
- isolate->counters(), compilation_state->detected_features());
- WasmCodeRefScope code_ref_scope;
- WasmCode* code = native_module->AddCompiledCode(std::move(result));
+ counters, compilation_state->detected_features());
- if (tiers.baseline_tier < tiers.top_tier) {
- auto tiering_unit =
- base::make_unique<WasmCompilationUnit>(func_index, tiers.top_tier);
- compilation_state->AddTopTierCompilationUnit(std::move(tiering_unit));
+ // During lazy compilation, we can only get compilation errors when
+ // {--wasm-lazy-validation} is enabled. Otherwise, the module was fully
+ // verified before starting its execution.
+ DCHECK_IMPLIES(result.failed(), FLAG_wasm_lazy_validation);
+ const WasmFunction* func = &module->functions[func_index];
+ if (result.failed()) {
+ ErrorThrower thrower(isolate, nullptr);
+ Vector<const uint8_t> code =
+ compilation_state->GetWireBytesStorage()->GetCode(func->code);
+ DecodeResult decode_result = ValidateSingleFunction(
+ module, func_index, code, counters, isolate->wasm_engine()->allocator(),
+ enabled_features);
+ CHECK(decode_result.failed());
+ SetCompileError(&thrower, ModuleWireBytes(native_module->wire_bytes()),
+ func, module, decode_result.error());
+ return false;
}
- // During lazy compilation, we should never get compilation errors. The module
- // was verified before starting execution with lazy compilation.
- // This might be OOM, but then we cannot continue execution anyway.
- // TODO(clemensh): According to the spec, we can actually skip validation at
- // module creation time, and return a function that always traps here.
- CHECK(!compilation_state->failed());
-
- // The code we just produced should be the one that was requested.
+ WasmCodeRefScope code_ref_scope;
+ WasmCode* code = native_module->AddCompiledCode(std::move(result));
DCHECK_EQ(func_index, code->index());
if (WasmCode::ShouldBeLogged(isolate)) code->LogCode(isolate);
@@ -691,13 +884,23 @@ void CompileLazy(Isolate* isolate, NativeModule* native_module,
int throughput_sample = static_cast<int>(func_kb / compilation_seconds);
counters->wasm_lazy_compilation_throughput()->AddSample(throughput_sample);
+
+ const bool lazy_module = IsLazyModule(module);
+ if (GetCompileStrategy(module, enabled_features, func_index, lazy_module) ==
+ CompileStrategy::kLazy &&
+ tiers.baseline_tier < tiers.top_tier) {
+ WasmCompilationUnit tiering_unit{func_index, tiers.top_tier};
+ compilation_state->AddTopTierCompilationUnit(tiering_unit);
+ }
+
+ return true;
}
namespace {
void RecordStats(const Code code, Counters* counters) {
- counters->wasm_generated_code_size()->Increment(code->body_size());
- counters->wasm_reloc_size()->Increment(code->relocation_info()->length());
+ counters->wasm_generated_code_size()->Increment(code.body_size());
+ counters->wasm_reloc_size()->Increment(code.relocation_info().length());
}
constexpr int kMainThreadTaskId = -1;
@@ -717,19 +920,16 @@ bool ExecuteCompilationUnits(
if (is_foreground) task_id = 0;
Platform* platform = V8::GetCurrentPlatform();
- // Deadline is in 50ms from now.
- static constexpr double kBackgroundCompileTimeLimit =
- 50.0 / base::Time::kMillisecondsPerSecond;
- const double deadline =
- platform->MonotonicallyIncreasingTime() + kBackgroundCompileTimeLimit;
+ double compilation_start = platform->MonotonicallyIncreasingTime();
// These fields are initialized in a {BackgroundCompileScope} before
// starting compilation.
+ double deadline = 0;
base::Optional<CompilationEnv> env;
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
WasmEngine* wasm_engine = nullptr;
- std::unique_ptr<WasmCompilationUnit> unit;
+ base::Optional<WasmCompilationUnit> unit;
WasmFeatures detected_features = kNoWasmFeatures;
auto stop = [is_foreground, task_id,
@@ -748,13 +948,14 @@ bool ExecuteCompilationUnits(
{
BackgroundCompileScope compile_scope(token);
if (compile_scope.cancelled()) return false;
+ auto* compilation_state = compile_scope.compilation_state();
+ deadline = compilation_state->GetCompilationDeadline(compilation_start);
env.emplace(compile_scope.native_module()->CreateCompilationEnv());
- wire_bytes = compile_scope.compilation_state()->GetWireBytesStorage();
+ wire_bytes = compilation_state->GetWireBytesStorage();
module = compile_scope.native_module()->shared_module();
wasm_engine = compile_scope.native_module()->engine();
- unit = compile_scope.compilation_state()->GetNextCompilationUnit(
- task_id, baseline_only);
- if (unit == nullptr) {
+ unit = compilation_state->GetNextCompilationUnit(task_id, baseline_only);
+ if (!unit) {
stop(compile_scope);
return false;
}
@@ -764,6 +965,7 @@ bool ExecuteCompilationUnits(
auto publish_results = [&results_to_publish](
BackgroundCompileScope* compile_scope) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "PublishResults");
if (results_to_publish.empty()) return;
WasmCodeRefScope code_ref_scope;
std::vector<WasmCode*> code_vector =
@@ -791,23 +993,25 @@ bool ExecuteCompilationUnits(
compilation_failed = true;
break;
}
- // Publish TurboFan units immediately to reduce peak memory consumption.
- if (result.requested_tier == ExecutionTier::kTurbofan) {
- publish_results(&compile_scope);
- }
// Get next unit.
if (deadline < platform->MonotonicallyIncreasingTime()) {
- unit = nullptr;
+ unit = {};
} else {
unit = compile_scope.compilation_state()->GetNextCompilationUnit(
task_id, baseline_only);
}
- if (unit == nullptr) {
+ if (!unit) {
publish_results(&compile_scope);
stop(compile_scope);
return true;
+ } else if (unit->tier() == ExecutionTier::kTurbofan) {
+ // Before executing a TurboFan unit, ensure to publish all previous
+ // units. If we compiled Liftoff before, we need to publish them anyway
+ // to ensure fast completion of baseline compilation, if we compiled
+ // TurboFan before, we publish to reduce peak memory consumption.
+ publish_results(&compile_scope);
}
}
}
@@ -818,88 +1022,27 @@ bool ExecuteCompilationUnits(
return true;
}
-DecodeResult ValidateSingleFunction(const WasmModule* module, int func_index,
- Vector<const uint8_t> code,
- Counters* counters,
- AccountingAllocator* allocator,
- WasmFeatures enabled_features) {
- const WasmFunction* func = &module->functions[func_index];
- FunctionBody body{func->sig, func->code.offset(), code.start(), code.end()};
- DecodeResult result;
- {
- auto time_counter = SELECT_WASM_COUNTER(counters, module->origin,
- wasm_decode, function_time);
- TimedHistogramScope wasm_decode_function_time_scope(time_counter);
- WasmFeatures detected;
- result =
- VerifyWasmCode(allocator, enabled_features, module, &detected, body);
- }
- return result;
-}
-
-enum class OnlyLazyFunctions : bool { kNo = false, kYes = true };
-
-void ValidateSequentially(
- const WasmModule* module, NativeModule* native_module, Counters* counters,
- AccountingAllocator* allocator, ErrorThrower* thrower,
- OnlyLazyFunctions only_lazy_functions = OnlyLazyFunctions ::kNo) {
- DCHECK(!thrower->error());
- uint32_t start = module->num_imported_functions;
- uint32_t end = start + module->num_declared_functions;
- auto enabled_features = native_module->enabled_features();
- for (uint32_t func_index = start; func_index < end; func_index++) {
- // Skip non-lazy functions if requested.
- if (only_lazy_functions == OnlyLazyFunctions::kYes &&
- !IsLazyCompilation(module, native_module, enabled_features,
- func_index)) {
- continue;
- }
- ModuleWireBytes wire_bytes{native_module->wire_bytes()};
- const WasmFunction* func = &module->functions[func_index];
- Vector<const uint8_t> code = wire_bytes.GetFunctionBytes(func);
- DecodeResult result = ValidateSingleFunction(
- module, func_index, code, counters, allocator, enabled_features);
-
- if (result.failed()) {
- WasmName name = wire_bytes.GetNameOrNull(func, module);
- if (name.start() == nullptr) {
- thrower->CompileError(
- "Compiling function #%d failed: %s @+%u", func->func_index,
- result.error().message().c_str(), result.error().offset());
- } else {
- TruncatedUserString<> name(wire_bytes.GetNameOrNull(func, module));
- thrower->CompileError("Compiling function #%d:\"%.*s\" failed: %s @+%u",
- func->func_index, name.length(), name.start(),
- result.error().message().c_str(),
- result.error().offset());
- }
- }
- }
-}
-
void InitializeCompilationUnits(NativeModule* native_module) {
- // Set number of functions that must be compiled to consider the module fully
- // compiled.
- auto wasm_module = native_module->module();
- int num_functions = wasm_module->num_declared_functions;
- DCHECK_IMPLIES(!native_module->enabled_features().compilation_hints,
- wasm_module->num_lazy_compilation_hints == 0);
- int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
CompilationStateImpl* compilation_state =
Impl(native_module->compilation_state());
- compilation_state->SetNumberOfFunctionsToCompile(num_functions,
- num_lazy_functions);
+ const bool lazy_module = IsLazyModule(native_module->module());
+ compilation_state->InitializeCompilationProgress(lazy_module);
ModuleWireBytes wire_bytes(native_module->wire_bytes());
- const WasmModule* module = native_module->module();
CompilationUnitBuilder builder(native_module);
+ auto* module = native_module->module();
uint32_t start = module->num_imported_functions;
uint32_t end = start + module->num_declared_functions;
for (uint32_t func_index = start; func_index < end; func_index++) {
- if (IsLazyCompilation(module, native_module,
- native_module->enabled_features(), func_index)) {
+ CompileStrategy strategy = GetCompileStrategy(
+ module, native_module->enabled_features(), func_index, lazy_module);
+ if (strategy == CompileStrategy::kLazy) {
+ native_module->UseLazyStub(func_index);
+ } else if (strategy == CompileStrategy::kLazyBaselineEagerTopTier) {
+ builder.AddTopTierUnit(func_index);
native_module->UseLazyStub(func_index);
} else {
+ DCHECK_EQ(strategy, CompileStrategy::kEager);
builder.AddUnits(func_index);
}
}
@@ -910,34 +1053,69 @@ bool NeedsDeterministicCompile() {
return FLAG_trace_wasm_decoder || FLAG_wasm_num_compilation_tasks <= 1;
}
-void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
- const WasmModule* wasm_module,
- NativeModule* native_module) {
- ModuleWireBytes wire_bytes(native_module->wire_bytes());
+bool MayCompriseLazyFunctions(const WasmModule* module,
+ const WasmFeatures& enabled_features,
+ bool lazy_module) {
+ if (lazy_module || enabled_features.compilation_hints) return true;
+#ifdef ENABLE_SLOW_DCHECKS
+ int start = module->num_imported_functions;
+ int end = start + module->num_declared_functions;
+ for (int func_index = start; func_index < end; func_index++) {
+ SLOW_DCHECK(GetCompileStrategy(module, enabled_features, func_index,
+ lazy_module) != CompileStrategy::kLazy);
+ }
+#endif
+ return false;
+}
+
+class CompilationTimeCallback {
+ public:
+ enum CompileMode { kSynchronous, kAsync, kStreaming };
+ explicit CompilationTimeCallback(std::shared_ptr<Counters> async_counters,
+ CompileMode compile_mode)
+ : start_time_(base::TimeTicks::Now()),
+ async_counters_(std::move(async_counters)),
+ compile_mode_(compile_mode) {}
- if (FLAG_wasm_lazy_compilation ||
- (FLAG_asm_wasm_lazy_compilation && wasm_module->origin == kAsmJsOrigin)) {
- if (wasm_module->origin == kWasmOrigin) {
- // Validate wasm modules for lazy compilation. Don't validate asm.js
- // modules, they are valid by construction (otherwise a CHECK will fail
- // during lazy compilation).
- // TODO(clemensh): According to the spec, we can actually skip validation
- // at module creation time, and return a function that always traps at
- // (lazy) compilation time.
- ValidateSequentially(wasm_module, native_module, isolate->counters(),
- isolate->allocator(), thrower);
- // On error: Return and leave the module in an unexecutable state.
- if (thrower->error()) return;
+ void operator()(CompilationEvent event) {
+ DCHECK(base::TimeTicks::IsHighResolution());
+ if (event == CompilationEvent::kFinishedBaselineCompilation) {
+ auto now = base::TimeTicks::Now();
+ auto duration = now - start_time_;
+ // Reset {start_time_} to measure tier-up time.
+ start_time_ = now;
+ if (compile_mode_ != kSynchronous) {
+ TimedHistogram* histogram =
+ compile_mode_ == kAsync
+ ? async_counters_->wasm_async_compile_wasm_module_time()
+ : async_counters_->wasm_streaming_compile_wasm_module_time();
+ histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
+ }
+ }
+ if (event == CompilationEvent::kFinishedTopTierCompilation) {
+ auto duration = base::TimeTicks::Now() - start_time_;
+ TimedHistogram* histogram = async_counters_->wasm_tier_up_module_time();
+ histogram->AddSample(static_cast<int>(duration.InMicroseconds()));
}
- native_module->set_lazy_compilation(true);
- native_module->UseLazyStubs();
- return;
}
- if (native_module->enabled_features().compilation_hints) {
+ private:
+ base::TimeTicks start_time_;
+ const std::shared_ptr<Counters> async_counters_;
+ const CompileMode compile_mode_;
+};
+
+void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
+ const WasmModule* wasm_module,
+ NativeModule* native_module) {
+ ModuleWireBytes wire_bytes(native_module->wire_bytes());
+ const bool lazy_module = IsLazyModule(wasm_module);
+ if (!FLAG_wasm_lazy_validation &&
+ MayCompriseLazyFunctions(wasm_module, native_module->enabled_features(),
+ lazy_module)) {
ValidateSequentially(wasm_module, native_module, isolate->counters(),
- isolate->allocator(), thrower,
- OnlyLazyFunctions::kYes);
+ isolate->allocator(), thrower, lazy_module,
+ kOnlyLazyFunctions);
// On error: Return and leave the module in an unexecutable state.
if (thrower->error()) return;
}
@@ -946,13 +1124,13 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
// use the node cache.
CanonicalHandleScope canonical(isolate);
- auto* compilation_state = Impl(native_module->compilation_state());
DCHECK_GE(kMaxInt, native_module->module()->num_declared_functions);
// Install a callback to notify us once background compilation finished, or
// compilation failed.
auto baseline_finished_semaphore = std::make_shared<base::Semaphore>(0);
// The callback captures a shared ptr to the semaphore.
+ auto* compilation_state = Impl(native_module->compilation_state());
compilation_state->AddCallback(
[baseline_finished_semaphore](CompilationEvent event) {
if (event == CompilationEvent::kFinishedBaselineCompilation ||
@@ -960,6 +1138,10 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
baseline_finished_semaphore->Signal();
}
});
+ if (base::TimeTicks::IsHighResolution()) {
+ compilation_state->AddCallback(CompilationTimeCallback{
+ isolate->async_counters(), CompilationTimeCallback::kSynchronous});
+ }
// Initialize the compilation units and kick off background compile tasks.
InitializeCompilationUnits(native_module);
@@ -985,8 +1167,9 @@ void CompileNativeModule(Isolate* isolate, ErrorThrower* thrower,
compilation_state->PublishDetectedFeatures(isolate);
if (compilation_state->failed()) {
+ DCHECK_IMPLIES(lazy_module, !FLAG_wasm_lazy_validation);
ValidateSequentially(wasm_module, native_module, isolate->counters(),
- isolate->allocator(), thrower);
+ isolate->allocator(), thrower, lazy_module);
CHECK(thrower->error());
}
}
@@ -1028,8 +1211,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
if (wasm_module->has_shared_memory) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory);
}
- int export_wrapper_size = static_cast<int>(module->num_exported_functions);
-
// TODO(wasm): only save the sections necessary to deserialize a
// {WasmModule}. E.g. function bodies could be omitted.
OwnedVector<uint8_t> wire_bytes_copy =
@@ -1050,8 +1231,9 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
if (thrower->error()) return {};
// Compile JS->wasm wrappers for exported functions.
- *export_wrappers_out = isolate->factory()->NewFixedArray(
- export_wrapper_size, AllocationType::kOld);
+ int num_wrappers = MaxNumExportWrappers(native_module->module());
+ *export_wrappers_out =
+ isolate->factory()->NewFixedArray(num_wrappers, AllocationType::kOld);
CompileJsToWasmWrappers(isolate, native_module->module(),
*export_wrappers_out);
@@ -1064,9 +1246,12 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
AsyncCompileJob::AsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
+ const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver)
: isolate_(isolate),
+ api_method_name_(api_method_name),
enabled_features_(enabled),
+ wasm_lazy_compilation_(FLAG_wasm_lazy_compilation),
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
resolver_(std::move(resolver)) {
@@ -1227,11 +1412,12 @@ void AsyncCompileJob::FinishCompile() {
// TODO(wasm): compiling wrappers should be made async.
CompileWrappers();
}
+
FinishModule();
}
void AsyncCompileJob::DecodeFailed(const WasmError& error) {
- ErrorThrower thrower(isolate_, "WebAssembly.compile()");
+ ErrorThrower thrower(isolate_, api_method_name_);
thrower.CompileFailed(error);
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
@@ -1240,9 +1426,12 @@ void AsyncCompileJob::DecodeFailed(const WasmError& error) {
}
void AsyncCompileJob::AsyncCompileFailed() {
- ErrorThrower thrower(isolate_, "WebAssembly.compile()");
+ ErrorThrower thrower(isolate_, api_method_name_);
+ DCHECK_EQ(native_module_->module()->origin, kWasmOrigin);
+ const bool lazy_module = wasm_lazy_compilation_;
ValidateSequentially(native_module_->module(), native_module_.get(),
- isolate_->counters(), isolate_->allocator(), &thrower);
+ isolate_->counters(), isolate_->allocator(), &thrower,
+ lazy_module);
DCHECK(thrower.error());
// {job} keeps the {this} pointer alive.
std::shared_ptr<AsyncCompileJob> job =
@@ -1433,30 +1622,40 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
TRACE_COMPILE("(1) Decoding module...\n");
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"AsyncCompileJob::DecodeModule");
- result = DecodeWasmModule(
- job->enabled_features_, job->wire_bytes_.start(),
- job->wire_bytes_.end(), false, kWasmOrigin, counters_,
- job->isolate()->wasm_engine()->allocator());
-
- // Validate lazy functions here.
auto enabled_features = job->enabled_features_;
- if (enabled_features.compilation_hints && result.ok()) {
+ result = DecodeWasmModule(enabled_features, job->wire_bytes_.start(),
+ job->wire_bytes_.end(), false, kWasmOrigin,
+ counters_,
+ job->isolate()->wasm_engine()->allocator());
+
+ // Validate lazy functions here if requested.
+ if (!FLAG_wasm_lazy_validation && result.ok()) {
const WasmModule* module = result.value().get();
- auto allocator = job->isolate()->wasm_engine()->allocator();
- int start = module->num_imported_functions;
- int end = start + module->num_declared_functions;
-
- for (int func_index = start; func_index < end; func_index++) {
- const WasmFunction* func = &module->functions[func_index];
- Vector<const uint8_t> code = job->wire_bytes_.GetFunctionBytes(func);
-
- if (IsLazyCompilation(module, enabled_features, func_index)) {
- DecodeResult function_result =
- ValidateSingleFunction(module, func_index, code, counters_,
- allocator, enabled_features);
- if (function_result.failed()) {
- result = ModuleResult(function_result.error());
- break;
+ DCHECK_EQ(module->origin, kWasmOrigin);
+ const bool lazy_module = job->wasm_lazy_compilation_;
+ if (MayCompriseLazyFunctions(module, enabled_features, lazy_module)) {
+ auto allocator = job->isolate()->wasm_engine()->allocator();
+ int start = module->num_imported_functions;
+ int end = start + module->num_declared_functions;
+
+ for (int func_index = start; func_index < end; func_index++) {
+ const WasmFunction* func = &module->functions[func_index];
+ Vector<const uint8_t> code =
+ job->wire_bytes_.GetFunctionBytes(func);
+
+ CompileStrategy strategy = GetCompileStrategy(
+ module, enabled_features, func_index, lazy_module);
+ bool validate_lazily_compiled_function =
+ strategy == CompileStrategy::kLazy ||
+ strategy == CompileStrategy::kLazyBaselineEagerTopTier;
+ if (validate_lazily_compiled_function) {
+ DecodeResult function_result =
+ ValidateSingleFunction(module, func_index, code, counters_,
+ allocator, enabled_features);
+ if (function_result.failed()) {
+ result = ModuleResult(function_result.error());
+ break;
+ }
}
}
}
@@ -1517,6 +1716,14 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
CompilationStateImpl* compilation_state =
Impl(job->native_module_->compilation_state());
compilation_state->AddCallback(CompilationStateCallback{job});
+ if (base::TimeTicks::IsHighResolution()) {
+ auto compile_mode = job->stream_ == nullptr
+ ? CompilationTimeCallback::kAsync
+ : CompilationTimeCallback::kStreaming;
+ compilation_state->AddCallback(CompilationTimeCallback{
+ job->isolate_->async_counters(), compile_mode});
+ }
+
if (start_compilation_) {
// TODO(ahaas): Try to remove the {start_compilation_} check when
// streaming decoding is done in the background. If
@@ -1657,7 +1864,7 @@ bool AsyncStreamingProcessor::ProcessSection(SectionCode section_code,
if (section_code == SectionCode::kUnknownSectionCode) {
Decoder decoder(bytes, offset);
section_code = ModuleDecoder::IdentifyUnknownSection(
- decoder, bytes.start() + bytes.length());
+ decoder, bytes.begin() + bytes.length());
if (section_code == SectionCode::kUnknownSectionCode) {
// Skip unknown sections that we do not know how to handle.
return true;
@@ -1692,16 +1899,9 @@ bool AsyncStreamingProcessor::ProcessCodeSectionHeader(
decoder_.shared_module(), false);
auto* compilation_state = Impl(job_->native_module_->compilation_state());
compilation_state->SetWireBytesStorage(std::move(wire_bytes_storage));
-
- // Set number of functions that must be compiled to consider the module fully
- // compiled.
- auto wasm_module = job_->native_module_->module();
- int num_functions = wasm_module->num_declared_functions;
- DCHECK_IMPLIES(!job_->native_module_->enabled_features().compilation_hints,
- wasm_module->num_lazy_compilation_hints == 0);
- int num_lazy_functions = wasm_module->num_lazy_compilation_hints;
- compilation_state->SetNumberOfFunctionsToCompile(num_functions,
- num_lazy_functions);
+ DCHECK_EQ(job_->native_module_->module()->origin, kWasmOrigin);
+ const bool lazy_module = job_->wasm_lazy_compilation_;
+ compilation_state->InitializeCompilationProgress(lazy_module);
// Set outstanding_finishers_ to 2, because both the AsyncCompileJob and the
// AsyncStreamingProcessor have to finish.
@@ -1721,11 +1921,19 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
NativeModule* native_module = job_->native_module_.get();
const WasmModule* module = native_module->module();
- auto enabled_features = native_module->enabled_features();
+ auto enabled_features = job_->enabled_features_;
uint32_t func_index =
num_functions_ + decoder_.module()->num_imported_functions;
-
- if (IsLazyCompilation(module, native_module, enabled_features, func_index)) {
+ DCHECK_EQ(module->origin, kWasmOrigin);
+ const bool lazy_module = job_->wasm_lazy_compilation_;
+
+ CompileStrategy strategy =
+ GetCompileStrategy(module, enabled_features, func_index, lazy_module);
+ bool validate_lazily_compiled_function =
+ !FLAG_wasm_lazy_validation &&
+ (strategy == CompileStrategy::kLazy ||
+ strategy == CompileStrategy::kLazyBaselineEagerTopTier);
+ if (validate_lazily_compiled_function) {
Counters* counters = Impl(native_module->compilation_state())->counters();
AccountingAllocator* allocator = native_module->engine()->allocator();
@@ -1738,9 +1946,15 @@ bool AsyncStreamingProcessor::ProcessFunctionBody(Vector<const uint8_t> bytes,
FinishAsyncCompileJobWithError(result.error());
return false;
}
+ }
+ if (strategy == CompileStrategy::kLazy) {
+ native_module->UseLazyStub(func_index);
+ } else if (strategy == CompileStrategy::kLazyBaselineEagerTopTier) {
+ compilation_unit_builder_->AddTopTierUnit(func_index);
native_module->UseLazyStub(func_index);
} else {
+ DCHECK_EQ(strategy, CompileStrategy::kEager);
compilation_unit_builder_->AddUnits(func_index);
}
@@ -1773,6 +1987,11 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
HandleScope scope(job_->isolate_);
SaveAndSwitchContext saved_context(job_->isolate_, *job_->native_context_);
+ // Record the size of the wire bytes. In synchronous and asynchronous
+ // (non-streaming) compilation, this happens in {DecodeWasmModule}.
+ auto* histogram = job_->isolate_->counters()->wasm_wasm_module_size_bytes();
+ histogram->AddSample(static_cast<int>(bytes.size()));
+
bool needs_finish = job_->DecrementAndCheckFinisherCount();
if (job_->native_module_ == nullptr) {
// We are processing a WebAssembly module without code section. Create the
@@ -1823,7 +2042,6 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
return true;
}
-namespace {
int GetMaxBackgroundTasks() {
if (NeedsDeterministicCompile()) return 1;
int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
@@ -1831,7 +2049,6 @@ int GetMaxBackgroundTasks() {
std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
return std::max(1, num_compile_tasks);
}
-} // namespace
CompilationStateImpl::CompilationStateImpl(
const std::shared_ptr<NativeModule>& native_module,
@@ -1861,21 +2078,60 @@ void CompilationStateImpl::AbortCompilation() {
callbacks_.clear();
}
-void CompilationStateImpl::SetNumberOfFunctionsToCompile(
- int num_functions, int num_lazy_functions) {
+void CompilationStateImpl::InitializeCompilationProgress(bool lazy_module) {
DCHECK(!failed());
- base::MutexGuard guard(&callbacks_mutex_);
+ auto enabled_features = native_module_->enabled_features();
+ auto* module = native_module_->module();
- int num_functions_to_compile = num_functions - num_lazy_functions;
- outstanding_baseline_functions_ = num_functions_to_compile;
- outstanding_top_tier_functions_ = num_functions_to_compile;
- highest_execution_tier_.assign(num_functions, ExecutionTier::kNone);
+ base::MutexGuard guard(&callbacks_mutex_);
+ DCHECK_EQ(0, outstanding_baseline_functions_);
+ DCHECK_EQ(0, outstanding_top_tier_functions_);
+ compilation_progress_.reserve(module->num_declared_functions);
- // Degenerate case of an empty module. Trigger callbacks immediately.
- if (num_functions_to_compile == 0) {
+ int start = module->num_imported_functions;
+ int end = start + module->num_declared_functions;
+ for (int func_index = start; func_index < end; func_index++) {
+ ExecutionTierPair requested_tiers = GetRequestedExecutionTiers(
+ module, compile_mode(), enabled_features, func_index);
+ CompileStrategy strategy =
+ GetCompileStrategy(module, enabled_features, func_index, lazy_module);
+
+ bool required_for_baseline = strategy == CompileStrategy::kEager;
+ bool required_for_top_tier = strategy != CompileStrategy::kLazy;
+ DCHECK_EQ(required_for_top_tier,
+ strategy == CompileStrategy::kEager ||
+ strategy == CompileStrategy::kLazyBaselineEagerTopTier);
+
+ // Count functions to complete baseline and top tier compilation.
+ if (required_for_baseline) outstanding_baseline_functions_++;
+ if (required_for_top_tier) outstanding_top_tier_functions_++;
+
+ // Initialize function's compilation progress.
+ ExecutionTier required_baseline_tier = required_for_baseline
+ ? requested_tiers.baseline_tier
+ : ExecutionTier::kNone;
+ ExecutionTier required_top_tier =
+ required_for_top_tier ? requested_tiers.top_tier : ExecutionTier::kNone;
+ uint8_t function_progress = ReachedTierField::encode(ExecutionTier::kNone);
+ function_progress = RequiredBaselineTierField::update(
+ function_progress, required_baseline_tier);
+ function_progress =
+ RequiredTopTierField::update(function_progress, required_top_tier);
+ compilation_progress_.push_back(function_progress);
+ }
+ DCHECK_IMPLIES(lazy_module, outstanding_baseline_functions_ == 0);
+ DCHECK_IMPLIES(lazy_module, outstanding_top_tier_functions_ == 0);
+ DCHECK_LE(0, outstanding_baseline_functions_);
+ DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
+
+ // Trigger callbacks if module needs no baseline or top tier compilation. This
+ // can be the case for an empty or fully lazy module.
+ if (outstanding_baseline_functions_ == 0) {
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
+ }
+ if (outstanding_top_tier_functions_ == 0) {
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedTopTierCompilation);
}
@@ -1890,31 +2146,37 @@ void CompilationStateImpl::AddCallback(CompilationState::callback_t callback) {
}
void CompilationStateImpl::AddCompilationUnits(
- Vector<std::unique_ptr<WasmCompilationUnit>> baseline_units,
- Vector<std::unique_ptr<WasmCompilationUnit>> top_tier_units) {
- compilation_unit_queues_.AddUnits(baseline_units, top_tier_units);
+ Vector<WasmCompilationUnit> baseline_units,
+ Vector<WasmCompilationUnit> top_tier_units) {
+ compilation_unit_queues_.AddUnits(baseline_units, top_tier_units,
+ native_module_->module());
RestartBackgroundTasks();
}
-void CompilationStateImpl::AddTopTierCompilationUnit(
- std::unique_ptr<WasmCompilationUnit> unit) {
+void CompilationStateImpl::AddTopTierCompilationUnit(WasmCompilationUnit unit) {
AddCompilationUnits({}, {&unit, 1});
}
-std::unique_ptr<WasmCompilationUnit>
+base::Optional<WasmCompilationUnit>
CompilationStateImpl::GetNextCompilationUnit(
int task_id, CompileBaselineOnly baseline_only) {
return compilation_unit_queues_.GetNextUnit(task_id, baseline_only);
}
-void CompilationStateImpl::OnFinishedUnit(WasmCode* code) {
- OnFinishedUnits({&code, 1});
-}
-
void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "OnFinishedUnits");
+
base::MutexGuard guard(&callbacks_mutex_);
+ // In case of no outstanding functions we can return early.
+ // This is especially important for lazy modules that were deserialized.
+ // Compilation progress was not set up in these cases.
+ if (outstanding_baseline_functions_ == 0 &&
+ outstanding_top_tier_functions_ == 0) {
+ return;
+ }
+
// Assume an order of execution tiers that represents the quality of their
// generated code.
static_assert(ExecutionTier::kNone < ExecutionTier::kInterpreter &&
@@ -1922,69 +2184,64 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
"Assume an order on execution tiers");
- auto module = native_module_->module();
- auto enabled_features = native_module_->enabled_features();
+ DCHECK_EQ(compilation_progress_.size(),
+ native_module_->module()->num_declared_functions);
+
for (WasmCode* code : code_vector) {
DCHECK_NOT_NULL(code);
DCHECK_NE(code->tier(), ExecutionTier::kNone);
native_module_->engine()->LogCode(code);
- // Skip lazily compiled code as we do not consider this for the completion
- // of baseline respectively top tier compilation.
- int func_index = code->index();
- if (IsLazyCompilation(module, native_module_, enabled_features,
- func_index)) {
- continue;
+ // Read function's compilation progress.
+ // This view on the compilation progress may differ from the actually
+ // compiled code. Any lazily compiled function does not contribute to the
+ // compilation progress but may publish code to the code manager.
+ int slot_index =
+ code->index() - native_module_->module()->num_imported_functions;
+ uint8_t function_progress = compilation_progress_[slot_index];
+ ExecutionTier required_baseline_tier =
+ RequiredBaselineTierField::decode(function_progress);
+ ExecutionTier required_top_tier =
+ RequiredTopTierField::decode(function_progress);
+ ExecutionTier reached_tier = ReachedTierField::decode(function_progress);
+
+ bool completes_baseline_compilation = false;
+ bool completes_top_tier_compilation = false;
+
+ // Check whether required baseline or top tier are reached.
+ if (reached_tier < required_baseline_tier &&
+ required_baseline_tier <= code->tier()) {
+ DCHECK_GT(outstanding_baseline_functions_, 0);
+ outstanding_baseline_functions_--;
+ if (outstanding_baseline_functions_ == 0) {
+ completes_baseline_compilation = true;
+ }
+ }
+ if (reached_tier < required_top_tier && required_top_tier <= code->tier()) {
+ DCHECK_GT(outstanding_top_tier_functions_, 0);
+ outstanding_top_tier_functions_--;
+ if (outstanding_top_tier_functions_ == 0) {
+ completes_top_tier_compilation = true;
+ }
}
- // Determine whether we are reaching baseline or top tier with the given
- // code.
- uint32_t slot_index = code->index() - module->num_imported_functions;
- ExecutionTierPair requested_tiers = GetRequestedExecutionTiers(
- module, compile_mode(), enabled_features, func_index);
- DCHECK_EQ(highest_execution_tier_.size(), module->num_declared_functions);
- ExecutionTier prior_tier = highest_execution_tier_[slot_index];
- bool had_reached_baseline = prior_tier >= requested_tiers.baseline_tier;
- bool had_reached_top_tier = prior_tier >= requested_tiers.top_tier;
- DCHECK_IMPLIES(had_reached_baseline, prior_tier > ExecutionTier::kNone);
- bool reaches_baseline = !had_reached_baseline;
- bool reaches_top_tier =
- !had_reached_top_tier && code->tier() >= requested_tiers.top_tier;
- DCHECK_IMPLIES(reaches_baseline,
- code->tier() >= requested_tiers.baseline_tier);
- DCHECK_IMPLIES(reaches_top_tier, had_reached_baseline || reaches_baseline);
-
- // Remember compilation state before update.
- bool had_completed_baseline_compilation =
- outstanding_baseline_functions_ == 0;
- bool had_completed_top_tier_compilation =
- outstanding_top_tier_functions_ == 0;
-
- // Update compilation state.
- if (code->tier() > prior_tier) {
- highest_execution_tier_[slot_index] = code->tier();
+ // Update function's compilation progress.
+ if (code->tier() > reached_tier) {
+ compilation_progress_[slot_index] = ReachedTierField::update(
+ compilation_progress_[slot_index], code->tier());
}
- if (reaches_baseline) outstanding_baseline_functions_--;
- if (reaches_top_tier) outstanding_top_tier_functions_--;
DCHECK_LE(0, outstanding_baseline_functions_);
DCHECK_LE(outstanding_baseline_functions_, outstanding_top_tier_functions_);
- // Conclude if we are completing baseline or top tier compilation.
- bool completes_baseline_compilation = !had_completed_baseline_compilation &&
- outstanding_baseline_functions_ == 0;
- bool completes_top_tier_compilation = !had_completed_top_tier_compilation &&
- outstanding_top_tier_functions_ == 0;
- DCHECK_IMPLIES(
- completes_top_tier_compilation,
- had_completed_baseline_compilation || completes_baseline_compilation);
-
// Trigger callbacks.
if (completes_baseline_compilation) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "BaselineFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedBaselineCompilation);
}
}
if (completes_top_tier_compilation) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "TopTierFinished");
for (auto& callback : callbacks_) {
callback(CompilationEvent::kFinishedTopTierCompilation);
}
@@ -2080,7 +2337,6 @@ void CompilationStateImpl::SetError() {
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers) {
JSToWasmWrapperCache js_to_wasm_cache;
- int wrapper_index = 0;
// TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
// optimization we keep the code space unlocked to avoid repeated unlocking
@@ -2091,12 +2347,36 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
auto& function = module->functions[exp.index];
Handle<Code> wrapper_code = js_to_wasm_cache.GetOrCompileJSToWasmWrapper(
isolate, function.sig, function.imported);
+ int wrapper_index =
+ GetExportWrapperIndex(module, function.sig, function.imported);
+
export_wrappers->set(wrapper_index, *wrapper_code);
RecordStats(*wrapper_code, isolate->counters());
- ++wrapper_index;
}
}
+WasmCode* CompileImportWrapper(
+ WasmEngine* wasm_engine, NativeModule* native_module, Counters* counters,
+ compiler::WasmImportCallKind kind, FunctionSig* sig,
+ WasmImportWrapperCache::ModificationScope* cache_scope) {
+ // Entry should exist, so that we don't insert a new one and invalidate
+ // other threads' iterators/references, but it should not have been compiled
+ // yet.
+ WasmImportWrapperCache::CacheKey key(kind, sig);
+ DCHECK_NULL((*cache_scope)[key]);
+ bool source_positions = native_module->module()->origin == kAsmJsOrigin;
+ // Keep the {WasmCode} alive until we explicitly call {IncRef}.
+ WasmCodeRefScope code_ref_scope;
+ WasmCode* wasm_code = compiler::CompileWasmImportCallWrapper(
+ wasm_engine, native_module, kind, sig, source_positions);
+ (*cache_scope)[key] = wasm_code;
+ wasm_code->IncRef();
+ counters->wasm_generated_code_size()->Increment(
+ wasm_code->instructions().length());
+ counters->wasm_reloc_size()->Increment(wasm_code->reloc_info().length());
+ return wasm_code;
+}
+
Handle<Script> CreateWasmScript(Isolate* isolate,
const ModuleWireBytes& wire_bytes,
const std::string& source_map_url) {
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index cf5098f613..d465d6a322 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -9,10 +9,11 @@
#include <functional>
#include <memory>
-#include "src/cancelable-task.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/tasks/cancelable-task.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module.h"
namespace v8 {
@@ -46,12 +47,25 @@ V8_EXPORT_PRIVATE
void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
Handle<FixedArray> export_wrappers);
+// Compiles the wrapper for this (kind, sig) pair and sets the corresponding
+// cache entry. Assumes the key already exists in the cache but has not been
+// compiled yet.
+V8_EXPORT_PRIVATE
+WasmCode* CompileImportWrapper(
+ WasmEngine* wasm_engine, NativeModule* native_module, Counters* counters,
+ compiler::WasmImportCallKind kind, FunctionSig* sig,
+ WasmImportWrapperCache::ModificationScope* cache_scope);
+
V8_EXPORT_PRIVATE Handle<Script> CreateWasmScript(
Isolate* isolate, const ModuleWireBytes& wire_bytes,
const std::string& source_map_url);
-// Triggered by the WasmCompileLazy builtin.
-void CompileLazy(Isolate*, NativeModule*, uint32_t func_index);
+// Triggered by the WasmCompileLazy builtin. The return value indicates whether
+// compilation was successful. Lazy compilation can fail only if validation is
+// also lazy.
+bool CompileLazy(Isolate*, NativeModule*, int func_index);
+
+int GetMaxBackgroundTasks();
// Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed
@@ -64,7 +78,7 @@ class AsyncCompileJob {
public:
AsyncCompileJob(Isolate* isolate, const WasmFeatures& enabled_features,
std::unique_ptr<byte[]> bytes_copy, size_t length,
- Handle<Context> context,
+ Handle<Context> context, const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver);
~AsyncCompileJob();
@@ -146,7 +160,9 @@ class AsyncCompileJob {
void NextStep(Args&&... args);
Isolate* const isolate_;
+ const char* const api_method_name_;
const WasmFeatures enabled_features_;
+ const bool wasm_lazy_compilation_;
// Copy of the module wire bytes, moved into the {native_module_} on its
// creation.
std::unique_ptr<byte[]> bytes_copy_;
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index f27cdd59ab..4201b1e76c 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -7,11 +7,11 @@
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
#include "src/base/template-utils.h"
-#include "src/counters.h"
-#include "src/flags.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-#include "src/v8.h"
+#include "src/flags/flags.h"
+#include "src/init/v8.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/wasm-engine.h"
@@ -122,6 +122,8 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
return kWasmF64;
case WasmInitExpr::kRefNullConst:
return kWasmNullRef;
+ case WasmInitExpr::kRefFuncConst:
+ return kWasmAnyFunc;
default:
UNREACHABLE();
}
@@ -284,12 +286,12 @@ class ModuleDecoderImpl : public Decoder {
}
}
// File are named `HASH.{ok,failed}.wasm`.
- size_t hash = base::hash_range(module_bytes.start(), module_bytes.end());
+ size_t hash = base::hash_range(module_bytes.begin(), module_bytes.end());
EmbeddedVector<char, 32> buf;
SNPrintF(buf, "%016zx.%s.wasm", hash, ok() ? "ok" : "failed");
- std::string name(buf.start());
+ std::string name(buf.begin());
if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
- if (fwrite(module_bytes.start(), module_bytes.length(), 1, wasm_file) !=
+ if (fwrite(module_bytes.begin(), module_bytes.length(), 1, wasm_file) !=
1) {
OFStream os(stderr);
os << "Error while dumping wasm file" << std::endl;
@@ -365,8 +367,7 @@ class ModuleDecoderImpl : public Decoder {
if (failed()) return;
Reset(bytes, offset);
TRACE("Section: %s\n", SectionName(section_code));
- TRACE("Decode Section %p - %p\n", static_cast<const void*>(bytes.begin()),
- static_cast<const void*>(bytes.end()));
+ TRACE("Decode Section %p - %p\n", bytes.begin(), bytes.end());
// Check if the section is out-of-order.
if (section_code < next_ordered_section_ &&
@@ -807,7 +808,8 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos, "out of bounds table index %u", table_index);
break;
}
- if (module_->tables[table_index].type != kWasmAnyFunc) {
+ if (!ValueTypes::IsSubType(module_->tables[table_index].type,
+ kWasmAnyFunc)) {
errorf(pos,
"Invalid element segment. Table %u is not of type AnyFunc",
table_index);
@@ -815,7 +817,7 @@ class ModuleDecoderImpl : public Decoder {
}
} else {
ValueType type = consume_reference_type();
- if (type != kWasmAnyFunc) {
+ if (!ValueTypes::IsSubType(type, kWasmAnyFunc)) {
error(pc_ - 1, "invalid element segment type");
break;
}
@@ -1011,17 +1013,16 @@ class ModuleDecoderImpl : public Decoder {
// Decode sequence of compilation hints.
if (decoder.ok()) {
module_->compilation_hints.reserve(hint_count);
- module_->num_lazy_compilation_hints = 0;
}
for (uint32_t i = 0; decoder.ok() && i < hint_count; i++) {
TRACE("DecodeCompilationHints[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
// Compilation hints are encoded in one byte each.
- // +-------+----------+---------------+------------------+
- // | 2 bit | 2 bit | 2 bit | 2 bit |
- // | ... | Top tier | Baseline tier | Lazy compilation |
- // +-------+----------+---------------+------------------+
+ // +-------+----------+---------------+----------+
+ // | 2 bit | 2 bit | 2 bit | 2 bit |
+ // | ... | Top tier | Baseline tier | Strategy |
+ // +-------+----------+---------------+----------+
uint8_t hint_byte = decoder.consume_u8("compilation hint");
if (!decoder.ok()) break;
@@ -1034,13 +1035,6 @@ class ModuleDecoderImpl : public Decoder {
hint.top_tier =
static_cast<WasmCompilationHintTier>(hint_byte >> 4 & 0x3);
- // Check strategy.
- if (hint.strategy > WasmCompilationHintStrategy::kEager) {
- decoder.errorf(decoder.pc(),
- "Invalid compilation hint %#x (unknown strategy)",
- hint_byte);
- }
-
// Ensure that the top tier never downgrades a compilation result.
// If baseline and top tier are the same compilation will be invoked only
// once.
@@ -1053,9 +1047,6 @@ class ModuleDecoderImpl : public Decoder {
// Happily accept compilation hint.
if (decoder.ok()) {
- if (hint.strategy == WasmCompilationHintStrategy::kLazy) {
- module_->num_lazy_compilation_hints++;
- }
module_->compilation_hints.push_back(std::move(hint));
}
}
@@ -1063,7 +1054,6 @@ class ModuleDecoderImpl : public Decoder {
// If section was invalid reset compilation hints.
if (decoder.failed()) {
module_->compilation_hints.clear();
- module_->num_lazy_compilation_hints = 0;
}
// @TODO(frgossen) Skip the whole compilation hints section in the outer
@@ -1317,9 +1307,8 @@ class ModuleDecoderImpl : public Decoder {
const WasmModule* module, WasmFunction* function) {
WasmFunctionName func_name(function,
wire_bytes.GetNameOrNull(function, module));
- if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
- StdoutStream os;
- os << "Verifying wasm function " << func_name << std::endl;
+ if (FLAG_trace_wasm_decoder) {
+ StdoutStream{} << "Verifying wasm function " << func_name << std::endl;
}
FunctionBody body = {
function->sig, function->code.offset(),
@@ -1551,6 +1540,16 @@ class ModuleDecoderImpl : public Decoder {
}
V8_FALLTHROUGH;
}
+ case kExprRefFunc: {
+ if (enabled_features_.anyref) {
+ FunctionIndexImmediate<Decoder::kValidate> imm(this, pc() - 1);
+ expr.kind = WasmInitExpr::kRefFuncConst;
+ expr.val.function_index = imm.index;
+ len = imm.length;
+ break;
+ }
+ V8_FALLTHROUGH;
+ }
default: {
error("invalid opcode in initialization expression");
expr.kind = WasmInitExpr::kNone;
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 48b4129eb3..07d6e66019 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_MODULE_DECODER_H_
#define V8_WASM_MODULE_DECODER_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-features.h"
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index 4dc61a91bf..8293674826 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -5,11 +5,11 @@
#include "src/wasm/module-instantiate.h"
#include "src/asmjs/asm-js.h"
-#include "src/conversions-inl.h"
-#include "src/counters.h"
-#include "src/property-descriptor.h"
+#include "src/logging/counters.h"
+#include "src/numbers/conversions-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/tracing/trace-event.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
@@ -38,16 +38,76 @@ uint32_t EvalUint32InitExpr(Handle<WasmInstanceObject> instance,
case WasmInitExpr::kGlobalIndex: {
uint32_t offset =
instance->module()->globals[expr.val.global_index].offset;
- auto raw_addr =
- reinterpret_cast<Address>(
- instance->untagged_globals_buffer()->backing_store()) +
- offset;
+ auto raw_addr = reinterpret_cast<Address>(
+ instance->untagged_globals_buffer().backing_store()) +
+ offset;
return ReadLittleEndianValue<uint32_t>(raw_addr);
}
default:
UNREACHABLE();
}
}
+
+// Queue of import wrapper keys to compile for an instance.
+class ImportWrapperQueue {
+ public:
+ // Removes an arbitrary cache key from the queue and returns it.
+ // If the queue is empty, returns nullopt.
+ // Thread-safe.
+ base::Optional<WasmImportWrapperCache::CacheKey> pop() {
+ base::Optional<WasmImportWrapperCache::CacheKey> key = base::nullopt;
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ auto it = queue_.begin();
+ if (it != queue_.end()) {
+ key = *it;
+ queue_.erase(it);
+ }
+ return key;
+ }
+
+ // Add the given key to the queue.
+ // Not thread-safe.
+ void insert(const WasmImportWrapperCache::CacheKey& key) {
+ queue_.insert(key);
+ }
+
+ private:
+ base::Mutex mutex_;
+ std::unordered_set<WasmImportWrapperCache::CacheKey,
+ WasmImportWrapperCache::CacheKeyHash>
+ queue_;
+};
+
+class CompileImportWrapperTask final : public CancelableTask {
+ public:
+ CompileImportWrapperTask(
+ CancelableTaskManager* task_manager, WasmEngine* engine,
+ Counters* counters, NativeModule* native_module,
+ ImportWrapperQueue* queue,
+ WasmImportWrapperCache::ModificationScope* cache_scope)
+ : CancelableTask(task_manager),
+ engine_(engine),
+ counters_(counters),
+ native_module_(native_module),
+ queue_(queue),
+ cache_scope_(cache_scope) {}
+
+ void RunInternal() override {
+ while (base::Optional<WasmImportWrapperCache::CacheKey> key =
+ queue_->pop()) {
+ CompileImportWrapper(engine_, native_module_, counters_, key->first,
+ key->second, cache_scope_);
+ }
+ }
+
+ private:
+ WasmEngine* const engine_;
+ Counters* const counters_;
+ NativeModule* const native_module_;
+ ImportWrapperQueue* const queue_;
+ WasmImportWrapperCache::ModificationScope* const cache_scope_;
+};
+
} // namespace
// A helper class to simplify instantiating a module from a module object.
@@ -140,6 +200,11 @@ class InstanceBuilder {
Handle<String> import_name,
Handle<Object> value);
+ // Initialize imported tables of type anyfunc.
+ bool InitializeImportedIndirectFunctionTable(
+ Handle<WasmInstanceObject> instance, int import_index,
+ Handle<WasmTableObject> table_object);
+
// Process a single imported table.
bool ProcessImportedTable(Handle<WasmInstanceObject> instance,
int import_index, int table_index,
@@ -165,6 +230,10 @@ class InstanceBuilder {
const WasmGlobal& global,
Handle<WasmGlobalObject> global_object);
+ // Compile import wrappers in parallel. The result goes into the native
+ // module's import_wrapper_cache.
+ void CompileImportWrappers(Handle<WasmInstanceObject> instance);
+
// Process the imports, including functions, tables, globals, and memory, in
// order, loading them from the {ffi_} object. Returns the number of imported
// functions.
@@ -174,7 +243,7 @@ class InstanceBuilder {
T* GetRawGlobalPtr(const WasmGlobal& global);
// Process initialization of globals.
- void InitGlobals();
+ void InitGlobals(Handle<WasmInstanceObject> instance);
// Allocate memory for a module instance as a new JSArrayBuffer.
Handle<JSArrayBuffer> AllocateMemory(uint32_t initial_pages,
@@ -371,7 +440,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
//--------------------------------------------------------------------------
// Process the initialization for the module's globals.
//--------------------------------------------------------------------------
- InitGlobals();
+ InitGlobals(instance);
//--------------------------------------------------------------------------
// Initialize the indirect tables.
@@ -424,10 +493,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
uint32_t base = EvalUint32InitExpr(instance, elem_segment.offset);
// Because of imported tables, {table_size} has to come from the table
// object itself.
- auto table_object = handle(WasmTableObject::cast(instance->tables()->get(
+ auto table_object = handle(WasmTableObject::cast(instance->tables().get(
elem_segment.table_index)),
isolate_);
- size_t table_size = table_object->elements()->length();
+ size_t table_size = table_object->entries().length();
if (!IsInBounds(base, elem_segment.entries.size(), table_size)) {
thrower_->LinkError("table initializer is out of bounds");
return {};
@@ -487,7 +556,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// TODO(clemensh): Don't generate an exported function for the start
// function. Use CWasmEntry instead.
start_function_ = WasmExportedFunction::New(
- isolate_, instance, MaybeHandle<String>(), start_index,
+ isolate_, instance, start_index,
static_cast<int>(function.sig->parameter_count()), wrapper_code);
}
@@ -601,7 +670,7 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
static_cast<uint32_t>(instance->memory_size()));
Address dest_addr =
reinterpret_cast<Address>(instance->memory_start()) + dest_offset;
- Address src_addr = reinterpret_cast<Address>(wire_bytes.start()) +
+ Address src_addr = reinterpret_cast<Address>(wire_bytes.begin()) +
segment.source.offset();
memory_copy_wrapper(dest_addr, src_addr, size);
if (!ok) {
@@ -616,7 +685,7 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
DCHECK(IsInBounds(dest_offset, size, instance->memory_size()));
byte* dest = instance->memory_start() + dest_offset;
- const byte* src = wire_bytes.start() + segment.source.offset();
+ const byte* src = wire_bytes.begin() + segment.source.offset();
memcpy(dest, src, size);
}
}
@@ -624,8 +693,8 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
- reinterpret_cast<void*>(raw_buffer_ptr(untagged_globals_, 0)),
- global.offset, num, ValueTypes::TypeName(global.type));
+ raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
+ ValueTypes::TypeName(global.type));
switch (global.type) {
case kWasmI32:
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
@@ -636,7 +705,6 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
// only be initialized with BigInts. See:
// https://github.com/WebAssembly/JS-BigInt-integration/issues/12
UNREACHABLE();
- break;
case kWasmF32:
WriteLittleEndianValue<float>(GetRawGlobalPtr<float>(global),
DoubleToFloat32(num));
@@ -651,16 +719,15 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, int64_t num) {
TRACE("init [globals_start=%p + %u] = %" PRId64 ", type = %s\n",
- reinterpret_cast<void*>(raw_buffer_ptr(untagged_globals_, 0)),
- global.offset, num, ValueTypes::TypeName(global.type));
+ raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
+ ValueTypes::TypeName(global.type));
DCHECK_EQ(kWasmI64, global.type);
WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
}
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
Handle<WasmGlobalObject> value) {
- TRACE("init [globals_start=%p + %u] = ",
- reinterpret_cast<void*>(raw_buffer_ptr(untagged_globals_, 0)),
+ TRACE("init [globals_start=%p + %u] = ", raw_buffer_ptr(untagged_globals_, 0),
global.offset);
switch (global.type) {
case kWasmI32: {
@@ -687,6 +754,12 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
TRACE("%lf", num);
break;
}
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef: {
+ tagged_globals_->set(global.offset, *value->GetRef());
+ break;
+ }
default:
UNREACHABLE();
}
@@ -785,13 +858,35 @@ bool InstanceBuilder::ProcessImportedFunction(
Address imported_target = imported_function->GetWasmCallTarget();
ImportedFunctionEntry entry(instance, func_index);
entry.SetWasmToWasm(*imported_instance, imported_target);
+ // Also store the {WasmExportedFunction} in the instance to preserve its
+ // identity.
+ WasmInstanceObject::SetWasmExportedFunction(
+ isolate_, instance, func_index, imported_function);
+ break;
+ }
+ case compiler::WasmImportCallKind::kWasmToCapi: {
+ NativeModule* native_module = instance->module_object().native_module();
+ Address host_address = WasmCapiFunction::cast(*value).GetHostCallTarget();
+ WasmCodeRefScope code_ref_scope;
+ WasmCode* wasm_code = compiler::CompileWasmCapiCallWrapper(
+ isolate_->wasm_engine(), native_module, expected_sig, host_address);
+ isolate_->counters()->wasm_generated_code_size()->Increment(
+ wasm_code->instructions().length());
+ isolate_->counters()->wasm_reloc_size()->Increment(
+ wasm_code->reloc_info().length());
+
+ ImportedFunctionEntry entry(instance, func_index);
+ // We re-use the SetWasmToJs infrastructure because it passes the
+ // callable to the wrapper, which we need to get the function data.
+ entry.SetWasmToJs(isolate_, js_receiver, wasm_code);
break;
}
default: {
// The imported function is a callable.
- NativeModule* native_module = instance->module_object()->native_module();
- WasmCode* wasm_code = native_module->import_wrapper_cache()->GetOrCompile(
- isolate_->wasm_engine(), isolate_->counters(), kind, expected_sig);
+ NativeModule* native_module = instance->module_object().native_module();
+ WasmCode* wasm_code =
+ native_module->import_wrapper_cache()->Get(kind, expected_sig);
+ DCHECK_NOT_NULL(wasm_code);
ImportedFunctionEntry entry(instance, func_index);
if (wasm_code->kind() == WasmCode::kWasmToJsWrapper) {
// Wasm to JS wrappers are treated specially in the import table.
@@ -808,6 +903,49 @@ bool InstanceBuilder::ProcessImportedFunction(
return true;
}
+bool InstanceBuilder::InitializeImportedIndirectFunctionTable(
+ Handle<WasmInstanceObject> instance, int import_index,
+ Handle<WasmTableObject> table_object) {
+ int imported_table_size = table_object->entries().length();
+ // Allocate a new dispatch table.
+ if (!instance->has_indirect_function_table()) {
+ WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
+ instance, imported_table_size);
+ }
+ // Initialize the dispatch table with the (foreign) JS functions
+ // that are already in the table.
+ for (int i = 0; i < imported_table_size; ++i) {
+ bool is_valid;
+ bool is_null;
+ MaybeHandle<WasmInstanceObject> maybe_target_instance;
+ int function_index;
+ WasmTableObject::GetFunctionTableEntry(isolate_, table_object, i, &is_valid,
+ &is_null, &maybe_target_instance,
+ &function_index);
+ if (!is_valid) {
+ thrower_->LinkError("table import %d[%d] is not a wasm function",
+ import_index, i);
+ return false;
+ }
+ if (is_null) continue;
+
+ Handle<WasmInstanceObject> target_instance =
+ maybe_target_instance.ToHandleChecked();
+ FunctionSig* sig = target_instance->module_object()
+ .module()
+ ->functions[function_index]
+ .sig;
+
+ // Look up the signature's canonical id. If there is no canonical
+ // id, then the signature does not appear at all in this module,
+ // so putting {-1} in the table will cause checks to always fail.
+ IndirectFunctionTableEntry(instance, i)
+ .Set(module_->signature_map.Find(*sig), target_instance,
+ function_index);
+ }
+ return true;
+}
+
bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
int import_index, int table_index,
Handle<String> module_name,
@@ -820,10 +958,10 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
}
const WasmTable& table = module_->tables[table_index];
- instance->tables()->set(table_index, *value);
+ instance->tables().set(table_index, *value);
auto table_object = Handle<WasmTableObject>::cast(value);
- int imported_table_size = table_object->elements().length();
+ int imported_table_size = table_object->entries().length();
if (imported_table_size < static_cast<int>(table.initial_size)) {
thrower_->LinkError("table import %d is smaller than initial %d, got %u",
import_index, table.initial_size, imported_table_size);
@@ -831,12 +969,12 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
}
if (table.has_maximum_size) {
- if (table_object->maximum_length()->IsUndefined(isolate_)) {
+ if (table_object->maximum_length().IsUndefined(isolate_)) {
thrower_->LinkError("table import %d has no maximum length, expected %d",
import_index, table.maximum_size);
return false;
}
- int64_t imported_maximum_size = table_object->maximum_length()->Number();
+ int64_t imported_maximum_size = table_object->maximum_length().Number();
if (imported_maximum_size < 0) {
thrower_->LinkError("table import %d has no maximum length, expected %d",
import_index, table.maximum_size);
@@ -851,42 +989,19 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
}
}
- // Allocate a new dispatch table.
- if (!instance->has_indirect_function_table()) {
- WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
- instance, imported_table_size);
+ if (table.type != table_object->type()) {
+ ReportLinkError("imported table does not match the expected type",
+ import_index, module_name, import_name);
+ return false;
}
- // Initialize the dispatch table with the (foreign) JS functions
- // that are already in the table.
- for (int i = 0; i < imported_table_size; ++i) {
- bool is_valid;
- bool is_null;
- MaybeHandle<WasmInstanceObject> maybe_target_instance;
- int function_index;
- WasmTableObject::GetFunctionTableEntry(isolate_, table_object, i, &is_valid,
- &is_null, &maybe_target_instance,
- &function_index);
- if (!is_valid) {
- thrower_->LinkError("table import %d[%d] is not a wasm function",
- import_index, i);
- return false;
- }
- if (is_null) continue;
- Handle<WasmInstanceObject> target_instance =
- maybe_target_instance.ToHandleChecked();
- FunctionSig* sig = target_instance->module_object()
- ->module()
- ->functions[function_index]
- .sig;
-
- // Look up the signature's canonical id. If there is no canonical
- // id, then the signature does not appear at all in this module,
- // so putting {-1} in the table will cause checks to always fail.
- IndirectFunctionTableEntry(instance, i)
- .Set(module_->signature_map.Find(*sig), target_instance,
- function_index);
+ // The indirect function table only exists for table 0.
+ if (table.type == kWasmAnyFunc && table_index == 0 &&
+ !InitializeImportedIndirectFunctionTable(instance, import_index,
+ table_object)) {
+ return false;
}
+
return true;
}
@@ -947,13 +1062,18 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
Handle<WasmInstanceObject> instance, int import_index,
Handle<String> module_name, Handle<String> import_name,
const WasmGlobal& global, Handle<WasmGlobalObject> global_object) {
- if (global_object->type() != global.type) {
- ReportLinkError("imported global does not match the expected type",
+ if (global_object->is_mutable() != global.mutability) {
+ ReportLinkError("imported global does not match the expected mutability",
import_index, module_name, import_name);
return false;
}
- if (global_object->is_mutable() != global.mutability) {
- ReportLinkError("imported global does not match the expected mutability",
+
+ bool is_sub_type = ValueTypes::IsSubType(global.type, global_object->type());
+ bool is_same_type = global_object->type() == global.type;
+ bool valid_type = global.mutability ? is_same_type : is_sub_type;
+
+ if (!valid_type) {
+ ReportLinkError("imported global does not match the expected type",
import_index, module_name, import_name);
return false;
}
@@ -976,7 +1096,7 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
address_or_offset = reinterpret_cast<Address>(raw_buffer_ptr(
Handle<JSArrayBuffer>::cast(buffer), global_object->offset()));
}
- instance->imported_mutable_globals_buffers()->set(global.index, *buffer);
+ instance->imported_mutable_globals_buffers().set(global.index, *buffer);
instance->imported_mutable_globals()[global.index] = address_or_offset;
return true;
}
@@ -1075,6 +1195,62 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
return false;
}
+void InstanceBuilder::CompileImportWrappers(
+ Handle<WasmInstanceObject> instance) {
+ int num_imports = static_cast<int>(module_->import_table.size());
+ NativeModule* native_module = instance->module_object().native_module();
+ WasmImportWrapperCache::ModificationScope cache_scope(
+ native_module->import_wrapper_cache());
+
+ // Compilation is done in two steps:
+ // 1) Insert nullptr entries in the cache for wrappers that need to be
+ // compiled. 2) Compile wrappers in background tasks using the
+ // ImportWrapperQueue. This way the cache won't invalidate other iterators
+ // when inserting a new WasmCode, since the key will already be there.
+ ImportWrapperQueue import_wrapper_queue;
+ for (int index = 0; index < num_imports; ++index) {
+ Handle<Object> value = sanitized_imports_[index].value;
+ if (module_->import_table[index].kind != kExternalFunction ||
+ !value->IsCallable()) {
+ continue;
+ }
+ auto js_receiver = Handle<JSReceiver>::cast(value);
+ uint32_t func_index = module_->import_table[index].index;
+ FunctionSig* sig = module_->functions[func_index].sig;
+ auto kind =
+ compiler::GetWasmImportCallKind(js_receiver, sig, enabled_.bigint);
+ if (kind == compiler::WasmImportCallKind::kWasmToWasm ||
+ kind == compiler::WasmImportCallKind::kLinkError ||
+ kind == compiler::WasmImportCallKind::kWasmToCapi) {
+ continue;
+ }
+ WasmImportWrapperCache::CacheKey key(kind, sig);
+ if (cache_scope[key] != nullptr) {
+ // Cache entry already exists, no need to compile it again.
+ continue;
+ }
+ import_wrapper_queue.insert(key);
+ }
+
+ CancelableTaskManager task_manager;
+ const int max_background_tasks = GetMaxBackgroundTasks();
+ for (int i = 0; i < max_background_tasks; ++i) {
+ auto task = base::make_unique<CompileImportWrapperTask>(
+ &task_manager, isolate_->wasm_engine(), isolate_->counters(),
+ native_module, &import_wrapper_queue, &cache_scope);
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
+ }
+
+ // Also compile in the current thread, in case there are no worker threads.
+ while (base::Optional<WasmImportWrapperCache::CacheKey> key =
+ import_wrapper_queue.pop()) {
+ CompileImportWrapper(isolate_->wasm_engine(), native_module,
+ isolate_->counters(), key->first, key->second,
+ &cache_scope);
+ }
+ task_manager.CancelAndWait();
+}
+
// Process the imports, including functions, tables, globals, and memory, in
// order, loading them from the {ffi_} object. Returns the number of imported
// functions.
@@ -1083,6 +1259,8 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
int num_imported_tables = 0;
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
+
+ CompileImportWrappers(instance);
int num_imports = static_cast<int>(module_->import_table.size());
for (int index = 0; index < num_imports; ++index) {
const WasmImport& import = module_->import_table[index];
@@ -1141,8 +1319,8 @@ int InstanceBuilder::ProcessImports(Handle<WasmInstanceObject> instance) {
return -1;
}
Object exception_tag = imported_exception->exception_tag();
- DCHECK(instance->exceptions_table()->get(import.index)->IsUndefined());
- instance->exceptions_table()->set(import.index, exception_tag);
+ DCHECK(instance->exceptions_table().get(import.index).IsUndefined());
+ instance->exceptions_table().set(import.index, exception_tag);
exception_wrappers_[import.index] = imported_exception;
break;
}
@@ -1160,7 +1338,7 @@ T* InstanceBuilder::GetRawGlobalPtr(const WasmGlobal& global) {
}
// Process initialization of globals.
-void InstanceBuilder::InitGlobals() {
+void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
for (auto global : module_->globals) {
if (global.mutability && global.imported) {
continue;
@@ -1191,6 +1369,13 @@ void InstanceBuilder::InitGlobals() {
ReadOnlyRoots(isolate_).null_value(),
SKIP_WRITE_BARRIER);
break;
+ case WasmInitExpr::kRefFuncConst: {
+ DCHECK(enabled_.anyref);
+ auto function = WasmInstanceObject::GetOrCreateWasmExportedFunction(
+ isolate_, instance, global.init.val.function_index);
+ tagged_globals_->set(global.offset, *function);
+ break;
+ }
case WasmInitExpr::kGlobalIndex: {
// Initialize with another global.
uint32_t new_offset = global.offset;
@@ -1254,8 +1439,6 @@ bool InstanceBuilder::NeedsWrappers() const {
// Process the exports, creating wrappers for functions, tables, memories,
// globals, and exceptions.
void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
- Handle<FixedArray> export_wrappers(module_object_->export_wrappers(),
- isolate_);
if (NeedsWrappers()) {
// If an imported WebAssembly function gets exported, the exported function
// has to be identical to to imported function. Therefore we cache all
@@ -1303,7 +1486,6 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
desc.set_configurable(is_asm_js);
// Process each export in the export table.
- int export_index = 0; // Index into {export_wrappers}.
for (const WasmExport& exp : module_->export_table) {
Handle<String> name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
isolate_, module_object_, exp.name)
@@ -1320,37 +1502,15 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
case kExternalFunction: {
// Wrap and export the code as a JSFunction.
// TODO(wasm): reduce duplication with LoadElemSegment() further below
- const WasmFunction& function = module_->functions[exp.index];
MaybeHandle<WasmExportedFunction> wasm_exported_function =
- WasmInstanceObject::GetWasmExportedFunction(isolate_, instance,
- exp.index);
- if (wasm_exported_function.is_null()) {
- // Wrap the exported code as a JSFunction.
- Handle<Code> export_code =
- export_wrappers->GetValueChecked<Code>(isolate_, export_index);
- MaybeHandle<String> func_name;
- if (is_asm_js) {
- // For modules arising from asm.js, honor the names section.
- WireBytesRef func_name_ref = module_->LookupFunctionName(
- ModuleWireBytes(module_object_->native_module()->wire_bytes()),
- function.func_index);
- func_name = WasmModuleObject::ExtractUtf8StringFromModuleBytes(
- isolate_, module_object_, func_name_ref)
- .ToHandleChecked();
- }
- wasm_exported_function = WasmExportedFunction::New(
- isolate_, instance, func_name, function.func_index,
- static_cast<int>(function.sig->parameter_count()), export_code);
- WasmInstanceObject::SetWasmExportedFunction(
- isolate_, instance, exp.index,
- wasm_exported_function.ToHandleChecked());
- }
+ WasmInstanceObject::GetOrCreateWasmExportedFunction(
+ isolate_, instance, exp.index);
+
desc.set_value(wasm_exported_function.ToHandleChecked());
- export_index++;
break;
}
case kExternalTable: {
- desc.set_value(handle(instance->tables()->get(exp.index), isolate_));
+ desc.set_value(handle(instance->tables().get(exp.index), isolate_));
break;
}
case kExternalMemory: {
@@ -1372,8 +1532,8 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<FixedArray> buffers_array(
instance->imported_mutable_globals_buffers(), isolate_);
if (ValueTypes::IsReferenceType(global.type)) {
- tagged_buffer = buffers_array->GetValueChecked<FixedArray>(
- isolate_, global.index);
+ tagged_buffer = handle(
+ FixedArray::cast(buffers_array->get(global.index)), isolate_);
// For anyref globals we store the relative offset in the
// imported_mutable_globals array instead of an absolute address.
Address addr = instance->imported_mutable_globals()[global.index];
@@ -1381,8 +1541,9 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
std::numeric_limits<uint32_t>::max()));
offset = static_cast<uint32_t>(addr);
} else {
- untagged_buffer = buffers_array->GetValueChecked<JSArrayBuffer>(
- isolate_, global.index);
+ untagged_buffer =
+ handle(JSArrayBuffer::cast(buffers_array->get(global.index)),
+ isolate_);
Address global_addr =
instance->imported_mutable_globals()[global.index];
@@ -1417,7 +1578,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
Handle<WasmExceptionObject> wrapper = exception_wrappers_[exp.index];
if (wrapper.is_null()) {
Handle<HeapObject> exception_tag(
- HeapObject::cast(instance->exceptions_table()->get(exp.index)),
+ HeapObject::cast(instance->exceptions_table().get(exp.index)),
isolate_);
wrapper =
WasmExceptionObject::New(isolate_, exception.sig, exception_tag);
@@ -1441,7 +1602,6 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
return;
}
}
- DCHECK_EQ(export_index, export_wrappers->length());
if (module_->origin == kWasmOrigin) {
v8::Maybe<bool> success =
@@ -1472,7 +1632,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// for both instantiation and in the implementation of the table.init
// instruction.
bool ok =
- ClampToBounds<size_t>(dst, &count, table_object->elements()->length());
+ ClampToBounds<size_t>(dst, &count, table_object->entries().length());
// Use & instead of && so the clamp is not short-circuited.
ok &= ClampToBounds<size_t>(src, &count, elem_segment.entries.size());
@@ -1482,7 +1642,9 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
int entry_index = static_cast<int>(dst + i);
if (func_index == WasmElemSegment::kNullIndex) {
- IndirectFunctionTableEntry(instance, entry_index).clear();
+ if (table_object->type() == kWasmAnyFunc) {
+ IndirectFunctionTableEntry(instance, entry_index).clear();
+ }
WasmTableObject::Set(isolate, table_object, entry_index,
isolate->factory()->null_value());
continue;
@@ -1490,28 +1652,44 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
const WasmFunction* function = &module->functions[func_index];
- // Update the local dispatch table first.
- uint32_t sig_id = module->signature_ids[function->sig_index];
- IndirectFunctionTableEntry(instance, entry_index)
- .Set(sig_id, instance, func_index);
-
- // Update the table object's other dispatch tables.
- MaybeHandle<WasmExportedFunction> wasm_exported_function =
- WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
- func_index);
- if (wasm_exported_function.is_null()) {
- // No JSFunction entry yet exists for this function. Create a {Tuple2}
- // holding the information to lazily allocate one.
- WasmTableObject::SetFunctionTablePlaceholder(
- isolate, table_object, entry_index, instance, func_index);
+ // Update the local dispatch table first if necessary. We only have to
+ // update the dispatch table if the first table of the instance is changed.
+ // For all other tables, function calls do not use a dispatch table at
+ // the moment.
+ if (elem_segment.table_index == 0 && table_object->type() == kWasmAnyFunc) {
+ uint32_t sig_id = module->signature_ids[function->sig_index];
+ IndirectFunctionTableEntry(instance, entry_index)
+ .Set(sig_id, instance, func_index);
+ }
+
+ // For AnyRef tables, we have to generate the WasmExportedFunction eagerly.
+ // Later we cannot know if an entry is a placeholder or not.
+ if (table_object->type() == kWasmAnyRef) {
+ Handle<WasmExportedFunction> wasm_exported_function =
+ WasmInstanceObject::GetOrCreateWasmExportedFunction(isolate, instance,
+ func_index);
+ WasmTableObject::Set(isolate, table_object, entry_index,
+ wasm_exported_function);
} else {
- table_object->elements()->set(entry_index,
+ // Update the table object's other dispatch tables.
+ MaybeHandle<WasmExportedFunction> wasm_exported_function =
+ WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
+ func_index);
+ if (wasm_exported_function.is_null()) {
+ // No JSFunction entry yet exists for this function. Create a {Tuple2}
+ // holding the information to lazily allocate one.
+ WasmTableObject::SetFunctionTablePlaceholder(
+ isolate, table_object, entry_index, instance, func_index);
+ } else {
+ table_object->entries().set(entry_index,
*wasm_exported_function.ToHandleChecked());
+ }
+ // UpdateDispatchTables() updates all other dispatch tables, since
+ // we have not yet added the dispatch table we are currently building.
+ WasmTableObject::UpdateDispatchTables(isolate, table_object, entry_index,
+ function->sig, instance,
+ func_index);
}
- // UpdateDispatchTables() updates all other dispatch tables, since
- // we have not yet added the dispatch table we are currently building.
- WasmTableObject::UpdateDispatchTables(isolate, table_object, entry_index,
- function->sig, instance, func_index);
}
return ok;
}
@@ -1528,7 +1706,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
bool success = LoadElemSegmentImpl(
isolate_, instance,
handle(WasmTableObject::cast(
- instance->tables()->get(elem_segment.table_index)),
+ instance->tables().get(elem_segment.table_index)),
isolate_),
elem_segment, dst, src, count);
if (enabled_.bulk_memory) {
@@ -1548,7 +1726,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
for (int index = 0; index < table_count; ++index) {
if (module_->tables[index].type == kWasmAnyFunc) {
auto table_object = handle(
- WasmTableObject::cast(instance->tables()->get(index)), isolate_);
+ WasmTableObject::cast(instance->tables().get(index)), isolate_);
// Add the new dispatch table at the end to avoid redundant lookups.
WasmTableObject::AddDispatchTable(isolate_, table_object, instance,
@@ -1561,7 +1739,7 @@ void InstanceBuilder::InitializeExceptions(
Handle<WasmInstanceObject> instance) {
Handle<FixedArray> exceptions_table(instance->exceptions_table(), isolate_);
for (int index = 0; index < exceptions_table->length(); ++index) {
- if (!exceptions_table->get(index)->IsUndefined(isolate_)) continue;
+ if (!exceptions_table->get(index).IsUndefined(isolate_)) continue;
Handle<WasmExceptionTag> exception_tag =
WasmExceptionTag::New(isolate_, index);
exceptions_table->set(index, *exception_tag);
@@ -1571,10 +1749,17 @@ void InstanceBuilder::InitializeExceptions(
bool LoadElemSegment(Isolate* isolate, Handle<WasmInstanceObject> instance,
uint32_t table_index, uint32_t segment_index, uint32_t dst,
uint32_t src, uint32_t count) {
+ // This code path is only used for passive element segments with the
+ // table.init instruction. This instruction was introduced in the
+ // bulk-memory-operations proposal. At the moment, table.init can only operate
+ // on table-0. If table.init should work for tables with higher indices, then
+ // we have to adjust the code in {LoadElemSegmentImpl}. The code there uses
+ // {IndirectFunctionTableEntry} at the moment, which only works for table-0.
+ CHECK_EQ(table_index, 0);
auto& elem_segment = instance->module()->elem_segments[segment_index];
return LoadElemSegmentImpl(
isolate, instance,
- handle(WasmTableObject::cast(instance->tables()->get(table_index)),
+ handle(WasmTableObject::cast(instance->tables().get(table_index)),
isolate),
elem_segment, dst, src, count);
}
diff --git a/deps/v8/src/wasm/object-access.h b/deps/v8/src/wasm/object-access.h
index 0f4a4d447d..38f0f58a00 100644
--- a/deps/v8/src/wasm/object-access.h
+++ b/deps/v8/src/wasm/object-access.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_OBJECT_ACCESS_H_
#define V8_WASM_OBJECT_ACCESS_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/shared-function-info.h"
diff --git a/deps/v8/src/wasm/signature-map.cc b/deps/v8/src/wasm/signature-map.cc
index 5f494aca62..5d449a9ee7 100644
--- a/deps/v8/src/wasm/signature-map.cc
+++ b/deps/v8/src/wasm/signature-map.cc
@@ -4,7 +4,7 @@
#include "src/wasm/signature-map.h"
-#include "src/signature.h"
+#include "src/codegen/signature.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/signature-map.h b/deps/v8/src/wasm/signature-map.h
index 04c6b2efa5..d947dcd26d 100644
--- a/deps/v8/src/wasm/signature-map.h
+++ b/deps/v8/src/wasm/signature-map.h
@@ -7,7 +7,7 @@
#include <unordered_map>
-#include "src/signature.h"
+#include "src/codegen/signature.h"
#include "src/wasm/value-type.h"
namespace v8 {
@@ -34,6 +34,10 @@ class V8_EXPORT_PRIVATE SignatureMap {
// Disallows further insertions to this signature map.
void Freeze() { frozen_ = true; }
+ size_t size() const { return map_.size(); }
+
+ bool is_frozen() const { return frozen_; }
+
private:
bool frozen_ = false;
std::unordered_map<FunctionSig, uint32_t, base::hash<FunctionSig>> map_;
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
index 6c0403fcb4..94945ea58a 100644
--- a/deps/v8/src/wasm/streaming-decoder.cc
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -5,10 +5,10 @@
#include "src/wasm/streaming-decoder.h"
#include "src/base/template-utils.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/dictionary.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/decoder.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-decoder.h"
@@ -56,7 +56,7 @@ size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
Vector<uint8_t> remaining_buf = buffer() + offset();
size_t num_bytes = std::min(bytes.size(), remaining_buf.size());
TRACE_STREAMING("ReadBytes(%zu bytes)\n", num_bytes);
- memcpy(remaining_buf.start(), &bytes.first(), num_bytes);
+ memcpy(remaining_buf.begin(), &bytes.first(), num_bytes);
set_offset(offset() + num_bytes);
return num_bytes;
}
@@ -94,7 +94,7 @@ void StreamingDecoder::Finish() {
}
for (const auto& buffer : section_buffers_) {
DCHECK_LE(cursor - bytes.start() + buffer->length(), total_size_);
- memcpy(cursor, buffer->bytes().start(), buffer->length());
+ memcpy(cursor, buffer->bytes().begin(), buffer->length());
cursor += buffer->length();
}
processor_->OnFinishedStream(std::move(bytes));
@@ -317,14 +317,14 @@ size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
Vector<uint8_t> remaining_buf = buf + offset();
size_t new_bytes = std::min(bytes.size(), remaining_buf.size());
TRACE_STREAMING("ReadBytes of a VarInt\n");
- memcpy(remaining_buf.start(), &bytes.first(), new_bytes);
+ memcpy(remaining_buf.begin(), &bytes.first(), new_bytes);
buf.Truncate(offset() + new_bytes);
Decoder decoder(buf,
streaming->module_offset() - static_cast<uint32_t>(offset()));
value_ = decoder.consume_u32v(field_name_);
// The number of bytes we actually needed to read.
- DCHECK_GT(decoder.pc(), buffer().start());
- bytes_consumed_ = static_cast<size_t>(decoder.pc() - buf.start());
+ DCHECK_GT(decoder.pc(), buffer().begin());
+ bytes_consumed_ = static_cast<size_t>(decoder.pc() - buf.begin());
TRACE_STREAMING(" ==> %zu bytes consumed\n", bytes_consumed_);
if (decoder.failed()) {
@@ -427,7 +427,7 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
if (payload_buf.size() < bytes_consumed_) {
return streaming->Error("invalid code section length");
}
- memcpy(payload_buf.start(), buffer().start(), bytes_consumed_);
+ memcpy(payload_buf.begin(), buffer().begin(), bytes_consumed_);
// {value} is the number of functions.
if (value_ == 0) {
@@ -455,7 +455,7 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
if (fun_length_buffer.size() < bytes_consumed_) {
return streaming->Error("read past code section end");
}
- memcpy(fun_length_buffer.start(), buffer().start(), bytes_consumed_);
+ memcpy(fun_length_buffer.begin(), buffer().begin(), bytes_consumed_);
// {value} is the length of the function.
if (value_ == 0) return streaming->Error("invalid function length (0)");
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
index 4df6b1d32f..0d680a2df7 100644
--- a/deps/v8/src/wasm/streaming-decoder.h
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -9,7 +9,7 @@
#include <vector>
#include "src/base/macros.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-result.h"
diff --git a/deps/v8/src/wasm/value-type.h b/deps/v8/src/wasm/value-type.h
index b40a337ca0..49fd2892eb 100644
--- a/deps/v8/src/wasm/value-type.h
+++ b/deps/v8/src/wasm/value-type.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_VALUE_TYPE_H_
#define V8_WASM_VALUE_TYPE_H_
-#include "src/machine-type.h"
+#include "src/codegen/machine-type.h"
#include "src/wasm/wasm-constants.h"
namespace v8 {
@@ -299,6 +299,7 @@ class V8_EXPORT_PRIVATE ValueTypes {
return MachineRepresentation::kFloat64;
case kWasmAnyRef:
case kWasmAnyFunc:
+ case kWasmNullRef:
case kWasmExceptRef:
return MachineRepresentation::kTaggedPointer;
case kWasmS128:
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index c874aa0f69..2eddce3d95 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -6,20 +6,21 @@
#include <iomanip>
-#include "src/assembler-inl.h"
#include "src/base/adapters.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/counters.h"
-#include "src/disassembler.h"
-#include "src/globals.h"
-#include "src/log.h"
-#include "src/macro-assembler-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-#include "src/snapshot/embedded-data.h"
-#include "src/vector.h"
+#include "src/base/small-vector.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/common/globals.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/logging/counters.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
@@ -29,7 +30,7 @@
#include "src/wasm/wasm-objects.h"
#if defined(V8_OS_WIN_X64)
-#include "src/unwinding-info-win64.h"
+#include "src/diagnostics/unwinding-info-win64.h"
#endif
#define TRACE_HEAP(...) \
@@ -43,7 +44,7 @@ namespace wasm {
using trap_handler::ProtectedInstructionData;
-void DisjointAllocationPool::Merge(base::AddressRegion region) {
+base::AddressRegion DisjointAllocationPool::Merge(base::AddressRegion region) {
auto dest_it = regions_.begin();
auto dest_end = regions_.end();
@@ -53,7 +54,7 @@ void DisjointAllocationPool::Merge(base::AddressRegion region) {
// After last dest region: insert and done.
if (dest_it == dest_end) {
regions_.push_back(region);
- return;
+ return region;
}
// Adjacent (from below) to dest: merge and done.
@@ -62,13 +63,13 @@ void DisjointAllocationPool::Merge(base::AddressRegion region) {
region.size() + dest_it->size()};
DCHECK_EQ(merged_region.end(), dest_it->end());
*dest_it = merged_region;
- return;
+ return merged_region;
}
// Before dest: insert and done.
if (dest_it->begin() > region.end()) {
regions_.insert(dest_it, region);
- return;
+ return region;
}
// Src is adjacent from above. Merge and check whether the merged region is
@@ -83,6 +84,7 @@ void DisjointAllocationPool::Merge(base::AddressRegion region) {
DCHECK_EQ(dest_it->end(), next_dest->end());
regions_.erase(next_dest);
}
+ return *dest_it;
}
base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {
@@ -108,6 +110,15 @@ Address WasmCode::constant_pool() const {
return kNullAddress;
}
+Address WasmCode::handler_table() const {
+ return instruction_start() + handler_table_offset_;
+}
+
+uint32_t WasmCode::handler_table_size() const {
+ DCHECK_GE(constant_pool_offset_, handler_table_offset_);
+ return static_cast<uint32_t>(constant_pool_offset_ - handler_table_offset_);
+}
+
Address WasmCode::code_comments() const {
return instruction_start() + code_comments_offset_;
}
@@ -117,17 +128,8 @@ uint32_t WasmCode::code_comments_size() const {
return static_cast<uint32_t>(unpadded_binary_size_ - code_comments_offset_);
}
-size_t WasmCode::trap_handler_index() const {
- CHECK(HasTrapHandlerIndex());
- return static_cast<size_t>(trap_handler_index_);
-}
-
-void WasmCode::set_trap_handler_index(size_t value) {
- trap_handler_index_ = value;
-}
-
void WasmCode::RegisterTrapHandlerData() {
- DCHECK(!HasTrapHandlerIndex());
+ DCHECK(!has_trap_handler_index());
if (kind() != WasmCode::kFunction) return;
if (protected_instructions_.empty()) return;
@@ -136,20 +138,19 @@ void WasmCode::RegisterTrapHandlerData() {
size_t size = instructions().size();
const int index =
RegisterHandlerData(base, size, protected_instructions().size(),
- protected_instructions().start());
+ protected_instructions().begin());
// TODO(eholk): if index is negative, fail.
CHECK_LE(0, index);
- set_trap_handler_index(static_cast<size_t>(index));
+ set_trap_handler_index(index);
+ DCHECK(has_trap_handler_index());
}
-bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
-
bool WasmCode::ShouldBeLogged(Isolate* isolate) {
// The return value is cached in {WasmEngine::IsolateData::log_codes}. Ensure
// to call {WasmEngine::EnableCodeLogging} if this return value would change
// for any isolate. Otherwise we might lose code events.
- return isolate->logger()->is_listening_to_code_events() ||
+ return isolate->code_event_dispatcher()->IsListeningToCodeEvents() ||
isolate->is_profiling();
}
@@ -274,18 +275,18 @@ void WasmCode::Disassemble(const char* name, std::ostream& os,
if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
instruction_size = safepoint_table_offset_;
}
- if (handler_table_offset_ && handler_table_offset_ < instruction_size) {
+ if (handler_table_offset_ < instruction_size) {
instruction_size = handler_table_offset_;
}
DCHECK_LT(0, instruction_size);
os << "Instructions (size = " << instruction_size << ")\n";
- Disassembler::Decode(nullptr, &os, instructions().start(),
- instructions().start() + instruction_size,
+ Disassembler::Decode(nullptr, &os, instructions().begin(),
+ instructions().begin() + instruction_size,
CodeReference(this), current_pc);
os << "\n";
- if (handler_table_offset_ > 0) {
- HandlerTable table(instruction_start(), handler_table_offset_);
+ if (handler_table_size() > 0) {
+ HandlerTable table(handler_table(), handler_table_size());
os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
<< "):\n";
table.HandlerTableReturnPrint(os);
@@ -351,6 +352,8 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
switch (kind) {
case WasmCode::kFunction:
return "wasm function";
+ case WasmCode::kWasmToCapiWrapper:
+ return "wasm-to-capi";
case WasmCode::kWasmToJsWrapper:
return "wasm-to-js";
case WasmCode::kRuntimeStub:
@@ -364,10 +367,8 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
}
WasmCode::~WasmCode() {
- if (HasTrapHandlerIndex()) {
- CHECK_LT(trap_handler_index(),
- static_cast<size_t>(std::numeric_limits<int>::max()));
- trap_handler::ReleaseHandlerData(static_cast<int>(trap_handler_index()));
+ if (has_trap_handler_index()) {
+ trap_handler::ReleaseHandlerData(trap_handler_index());
}
}
@@ -380,30 +381,229 @@ V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {
}
// If we reach here, the code was already potentially dead. Decrement the ref
// count, and return true if it drops to zero.
- int old_count = ref_count_.load(std::memory_order_relaxed);
- while (true) {
- DCHECK_LE(1, old_count);
- if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
- std::memory_order_relaxed)) {
- return old_count == 1;
- }
- }
+ return DecRefOnDeadCode();
}
// static
-void WasmCode::DecrementRefCount(Vector<WasmCode*> code_vec) {
+void WasmCode::DecrementRefCount(Vector<WasmCode* const> code_vec) {
// Decrement the ref counter of all given code objects. Keep the ones whose
// ref count drops to zero.
- std::unordered_map<NativeModule*, std::vector<WasmCode*>> dead_code;
+ WasmEngine::DeadCodeMap dead_code;
+ WasmEngine* engine = nullptr;
for (WasmCode* code : code_vec) {
- if (code->DecRef()) dead_code[code->native_module()].push_back(code);
+ if (!code->DecRef()) continue; // Remaining references.
+ dead_code[code->native_module()].push_back(code);
+ if (!engine) engine = code->native_module()->engine();
+ DCHECK_EQ(engine, code->native_module()->engine());
+ }
+
+ DCHECK_EQ(dead_code.empty(), engine == nullptr);
+ if (engine) engine->FreeDeadCode(dead_code);
+}
+
+WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
+ VirtualMemory code_space,
+ bool can_request_more)
+ : code_manager_(code_manager),
+ free_code_space_(code_space.region()),
+ can_request_more_memory_(can_request_more) {
+ owned_code_space_.reserve(can_request_more ? 4 : 1);
+ owned_code_space_.emplace_back(std::move(code_space));
+}
+
+WasmCodeAllocator::~WasmCodeAllocator() {
+ code_manager_->FreeNativeModule(VectorOf(owned_code_space_),
+ committed_code_space());
+}
+
+namespace {
+// On Windows, we cannot commit a region that straddles different reservations
+// of virtual memory. Because we bump-allocate, and because, if we need more
+// memory, we append that memory at the end of the owned_code_space_ list, we
+// traverse that list in reverse order to find the reservation(s) that guide how
+// to chunk the region to commit.
+#if V8_OS_WIN
+constexpr bool kNeedsToSplitRangeByReservations = true;
+#else
+constexpr bool kNeedsToSplitRangeByReservations = false;
+#endif
+
+base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
+ base::AddressRegion range,
+ const std::vector<VirtualMemory>& owned_code_space) {
+ if (!kNeedsToSplitRangeByReservations) return {range};
+
+ base::SmallVector<base::AddressRegion, 1> split_ranges;
+ size_t missing_begin = range.begin();
+ size_t missing_end = range.end();
+ for (auto& vmem : base::Reversed(owned_code_space)) {
+ Address overlap_begin = std::max(missing_begin, vmem.address());
+ Address overlap_end = std::min(missing_end, vmem.end());
+ if (overlap_begin >= overlap_end) continue;
+ split_ranges.emplace_back(overlap_begin, overlap_end - overlap_begin);
+ // Opportunistically reduce the missing range. This might terminate the loop
+ // early.
+ if (missing_begin == overlap_begin) missing_begin = overlap_end;
+ if (missing_end == overlap_end) missing_end = overlap_begin;
+ if (missing_begin >= missing_end) break;
+ }
+#ifdef ENABLE_SLOW_DCHECKS
+ // The returned vector should cover the full range.
+ size_t total_split_size = 0;
+ for (auto split : split_ranges) total_split_size += split.size();
+ DCHECK_EQ(range.size(), total_split_size);
+#endif
+ return split_ranges;
+}
+} // namespace
+
+Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
+ size_t size) {
+ base::MutexGuard lock(&mutex_);
+ DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
+ DCHECK_LT(0, size);
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+ // This happens under a lock assumed by the caller.
+ size = RoundUp<kCodeAlignment>(size);
+ base::AddressRegion code_space = free_code_space_.Allocate(size);
+ if (code_space.is_empty()) {
+ if (!can_request_more_memory_) {
+ V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
+ UNREACHABLE();
+ }
+
+ Address hint = owned_code_space_.empty() ? kNullAddress
+ : owned_code_space_.back().end();
+
+ VirtualMemory new_mem =
+ code_manager_->TryAllocate(size, reinterpret_cast<void*>(hint));
+ if (!new_mem.IsReserved()) {
+ V8::FatalProcessOutOfMemory(nullptr, "wasm code reservation");
+ UNREACHABLE();
+ }
+ code_manager_->AssignRange(new_mem.region(), native_module);
+
+ free_code_space_.Merge(new_mem.region());
+ owned_code_space_.emplace_back(std::move(new_mem));
+ code_space = free_code_space_.Allocate(size);
+ DCHECK(!code_space.is_empty());
+ }
+ const Address commit_page_size = page_allocator->CommitPageSize();
+ Address commit_start = RoundUp(code_space.begin(), commit_page_size);
+ Address commit_end = RoundUp(code_space.end(), commit_page_size);
+ // {commit_start} will be either code_space.start or the start of the next
+ // page. {commit_end} will be the start of the page after the one in which
+ // the allocation ends.
+ // We start from an aligned start, and we know we allocated vmem in
+ // page multiples.
+ // We just need to commit what's not committed. The page in which we
+ // start is already committed (or we start at the beginning of a page).
+ // The end needs to be committed all through the end of the page.
+ if (commit_start < commit_end) {
+ committed_code_space_.fetch_add(commit_end - commit_start);
+ // Committed code cannot grow bigger than maximum code space size.
+ DCHECK_LE(committed_code_space_.load(), kMaxWasmCodeMemory);
+ for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
+ {commit_start, commit_end - commit_start}, owned_code_space_)) {
+ if (!code_manager_->Commit(split_range)) {
+ V8::FatalProcessOutOfMemory(nullptr, "wasm code commit");
+ UNREACHABLE();
+ }
+ }
+ }
+ DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
+ allocated_code_space_.Merge(code_space);
+ generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
+
+ TRACE_HEAP("Code alloc for %p: 0x%" PRIxPTR ",+%zu\n", this,
+ code_space.begin(), size);
+ return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
+}
+
+bool WasmCodeAllocator::SetExecutable(bool executable) {
+ base::MutexGuard lock(&mutex_);
+ if (is_executable_ == executable) return true;
+ TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
+
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+
+ if (FLAG_wasm_write_protect_code_memory) {
+ PageAllocator::Permission permission =
+ executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
+#if V8_OS_WIN
+ // On windows, we need to switch permissions per separate virtual memory
+ // reservation. This is really just a problem when the NativeModule is
+ // growable (meaning can_request_more_memory_). That's 32-bit in production,
+ // or unittests.
+ // For now, in that case, we commit at reserved memory granularity.
+ // Technically, that may be a waste, because we may reserve more than we
+ // use. On 32-bit though, the scarce resource is the address space -
+ // committed or not.
+ if (can_request_more_memory_) {
+ for (auto& vmem : owned_code_space_) {
+ if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
+ permission)) {
+ return false;
+ }
+ TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
+ executable);
+ }
+ is_executable_ = executable;
+ return true;
+ }
+#endif
+ size_t commit_page_size = page_allocator->CommitPageSize();
+ for (auto& region : allocated_code_space_.regions()) {
+ // allocated_code_space_ is fine-grained, so we need to
+ // page-align it.
+ size_t region_size = RoundUp(region.size(), commit_page_size);
+ if (!SetPermissions(page_allocator, region.begin(), region_size,
+ permission)) {
+ return false;
+ }
+ TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to executable:%d\n",
+ region.begin(), region.end(), executable);
+ }
}
+ is_executable_ = executable;
+ return true;
+}
- // For each native module, free all its code objects at once.
- for (auto& dead_code_entry : dead_code) {
- NativeModule* native_module = dead_code_entry.first;
- Vector<WasmCode*> code_vec = VectorOf(dead_code_entry.second);
- native_module->FreeCode(code_vec);
+void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
+ // Zap code area and collect freed code regions.
+ DisjointAllocationPool freed_regions;
+ size_t code_size = 0;
+ for (WasmCode* code : codes) {
+ ZapCode(code->instruction_start(), code->instructions().size());
+ FlushInstructionCache(code->instruction_start(),
+ code->instructions().size());
+ code_size += code->instructions().size();
+ freed_regions.Merge(base::AddressRegion{code->instruction_start(),
+ code->instructions().size()});
+ }
+ freed_code_size_.fetch_add(code_size);
+
+ // Merge {freed_regions} into {freed_code_space_} and discard full pages.
+ base::MutexGuard guard(&mutex_);
+ PageAllocator* allocator = GetPlatformPageAllocator();
+ size_t commit_page_size = allocator->CommitPageSize();
+ for (auto region : freed_regions.regions()) {
+ auto merged_region = freed_code_space_.Merge(region);
+ Address discard_start =
+ std::max(RoundUp(merged_region.begin(), commit_page_size),
+ RoundDown(region.begin(), commit_page_size));
+ Address discard_end =
+ std::min(RoundDown(merged_region.end(), commit_page_size),
+ RoundUp(region.end(), commit_page_size));
+ if (discard_start >= discard_end) continue;
+ size_t discard_size = discard_end - discard_start;
+ size_t old_committed = committed_code_space_.fetch_sub(discard_size);
+ DCHECK_GE(old_committed, discard_size);
+ USE(old_committed);
+ for (base::AddressRegion split_range : SplitRangeByReservationsIfNeeded(
+ {discard_start, discard_size}, owned_code_space_)) {
+ code_manager_->Decommit(split_range);
+ }
}
}
@@ -412,13 +612,13 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::shared_ptr<const WasmModule> module,
std::shared_ptr<Counters> async_counters,
std::shared_ptr<NativeModule>* shared_this)
- : enabled_features_(enabled),
+ : code_allocator_(engine->code_manager(), std::move(code_space),
+ can_request_more),
+ enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
- new WasmImportWrapperCache(this))),
- free_code_space_(code_space.region()),
+ new WasmImportWrapperCache())),
engine_(engine),
- can_request_more_memory_(can_request_more),
use_trap_handler_(trap_handler::IsTrapHandlerEnabled() ? kUseTrapHandler
: kNoTrapHandler) {
// We receive a pointer to an empty {std::shared_ptr}, and install ourselve
@@ -429,8 +629,6 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
compilation_state_ =
CompilationState::New(*shared_this, std::move(async_counters));
DCHECK_NOT_NULL(module_);
- owned_code_space_.emplace_back(std::move(code_space));
- owned_code_.reserve(num_functions());
#if defined(V8_OS_WIN_X64)
// On some platforms, specifically Win64, we need to reserve some pages at
@@ -438,9 +636,9 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
// See src/heap/spaces.cc, MemoryAllocator::InitializeCodePageAllocator() and
// https://cs.chromium.org/chromium/src/components/crash/content/app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
// for details.
- if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- FLAG_win64_unwinding_info) {
- AllocateForCode(Heap::GetCodeRangeReservedAreaSize());
+ if (engine_->code_manager()
+ ->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
+ code_allocator_.AllocateForCode(this, Heap::GetCodeRangeReservedAreaSize());
}
#endif
@@ -491,14 +689,6 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
return AddAndPublishAnonymousCode(code, WasmCode::kFunction);
}
-void NativeModule::UseLazyStubs() {
- uint32_t start = module_->num_imported_functions;
- uint32_t end = start + module_->num_declared_functions;
- for (uint32_t func_index = start; func_index < end; func_index++) {
- UseLazyStub(func_index);
- }
-}
-
void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LE(module_->num_imported_functions, func_index);
DCHECK_LT(func_index,
@@ -538,7 +728,7 @@ void NativeModule::SetRuntimeStubs(Isolate* isolate) {
JumpTableAssembler::StubSlotIndexToOffset(pair.second);
runtime_stub_entries_[pair.second] = base + slot_offset;
}
- FlushInstructionCache(jump_table->instructions().start(),
+ FlushInstructionCache(jump_table->instructions().begin(),
jump_table->instructions().size());
DCHECK_NULL(runtime_stub_table_);
runtime_stub_table_ = jump_table;
@@ -592,15 +782,16 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
// mean 'empty'.
const size_t safepoint_table_offset = static_cast<size_t>(
code->has_safepoint_table() ? code->safepoint_table_offset() : 0);
- const size_t handler_table_offset = static_cast<size_t>(
- code->has_handler_table() ? code->handler_table_offset() : 0);
+ const size_t handler_table_offset =
+ static_cast<size_t>(code->handler_table_offset());
const size_t constant_pool_offset =
static_cast<size_t>(code->constant_pool_offset());
const size_t code_comments_offset =
static_cast<size_t>(code->code_comments_offset());
- Vector<uint8_t> dst_code_bytes = AllocateForCode(instructions.size());
- memcpy(dst_code_bytes.begin(), instructions.start(), instructions.size());
+ Vector<uint8_t> dst_code_bytes =
+ code_allocator_.AllocateForCode(this, instructions.size());
+ memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
// Apply the relocation delta by iterating over the RelocInfo.
intptr_t delta = reinterpret_cast<Address>(dst_code_bytes.begin()) -
@@ -626,7 +817,7 @@ WasmCode* NativeModule::AddAndPublishAnonymousCode(Handle<Code> code,
}
// Flush the i-cache after relocation.
- FlushInstructionCache(dst_code_bytes.start(), dst_code_bytes.size());
+ FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
DCHECK_NE(kind, WasmCode::Kind::kInterpreterEntry);
std::unique_ptr<WasmCode> new_code{new WasmCode{
@@ -657,10 +848,10 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier) {
- return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
- std::move(protected_instructions),
- std::move(source_position_table), kind, tier,
- AllocateForCode(desc.instr_size));
+ return AddCodeWithCodeSpace(
+ index, desc, stack_slots, tagged_parameter_slots,
+ std::move(protected_instructions), std::move(source_position_table), kind,
+ tier, code_allocator_.AllocateForCode(this, desc.instr_size));
}
std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
@@ -681,8 +872,8 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
// 'empty'.
const size_t safepoint_table_offset = static_cast<size_t>(
desc.safepoint_table_size == 0 ? 0 : desc.safepoint_table_offset);
- const size_t handler_table_offset = static_cast<size_t>(
- desc.handler_table_size == 0 ? 0 : desc.handler_table_offset);
+ const size_t handler_table_offset =
+ static_cast<size_t>(desc.handler_table_offset);
const size_t constant_pool_offset =
static_cast<size_t>(desc.constant_pool_offset);
const size_t code_comments_offset =
@@ -718,6 +909,9 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
}
}
+ // Flush the i-cache after relocation.
+ FlushInstructionCache(dst_code_bytes.begin(), dst_code_bytes.size());
+
std::unique_ptr<WasmCode> code{new WasmCode{
this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset,
@@ -728,11 +922,6 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
code->RegisterTrapHandlerData();
- // Flush the i-cache for the region holding the relocated code.
- // Do this last, as this seems to trigger an LTO bug that clobbers a register
- // on arm, see https://crbug.com/952759#c6.
- FlushInstructionCache(dst_code_bytes.start(), dst_code_bytes.size());
-
return code;
}
@@ -804,7 +993,7 @@ WasmCode* NativeModule::PublishCodeLocked(std::unique_ptr<WasmCode> code) {
}
WasmCodeRefScope::AddRef(code.get());
WasmCode* result = code.get();
- owned_code_.emplace_back(std::move(code));
+ owned_code_.emplace(result->instruction_start(), std::move(code));
return result;
}
@@ -817,8 +1006,9 @@ WasmCode* NativeModule::AddDeserializedCode(
OwnedVector<const byte> reloc_info,
OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier) {
- Vector<uint8_t> dst_code_bytes = AllocateForCode(instructions.size());
- memcpy(dst_code_bytes.begin(), instructions.start(), instructions.size());
+ Vector<uint8_t> dst_code_bytes =
+ code_allocator_.AllocateForCode(this, instructions.size());
+ memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
std::unique_ptr<WasmCode> code{new WasmCode{
this, index, dst_code_bytes, stack_slots, tagged_parameter_slots,
@@ -847,7 +1037,7 @@ WasmCode* NativeModule::GetCode(uint32_t index) const {
DCHECK_LT(index, num_functions());
DCHECK_LE(module_->num_imported_functions, index);
WasmCode* code = code_table_[index - module_->num_imported_functions];
- WasmCodeRefScope::AddRef(code);
+ if (code) WasmCodeRefScope::AddRef(code);
return code;
}
@@ -861,7 +1051,8 @@ bool NativeModule::HasCode(uint32_t index) const {
WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
- Vector<uint8_t> code_space = AllocateForCode(jump_table_size);
+ Vector<uint8_t> code_space =
+ code_allocator_.AllocateForCode(this, jump_table_size);
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{new WasmCode{
this, // native_module
@@ -870,7 +1061,7 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
0, // stack_slots
0, // tagged_parameter_slots
0, // safepoint_table_offset
- 0, // handler_table_offset
+ jump_table_size, // handler_table_offset
jump_table_size, // constant_pool_offset
jump_table_size, // code_comments_offset
jump_table_size, // unpadded_binary_size
@@ -882,93 +1073,6 @@ WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t jump_table_size) {
return PublishCode(std::move(code));
}
-Vector<byte> NativeModule::AllocateForCode(size_t size) {
- base::MutexGuard lock(&allocation_mutex_);
- DCHECK_LT(0, size);
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
- // This happens under a lock assumed by the caller.
- size = RoundUp<kCodeAlignment>(size);
- base::AddressRegion code_space = free_code_space_.Allocate(size);
- if (code_space.is_empty()) {
- if (!can_request_more_memory_) {
- V8::FatalProcessOutOfMemory(nullptr,
- "NativeModule::AllocateForCode reservation");
- UNREACHABLE();
- }
-
- Address hint = owned_code_space_.empty() ? kNullAddress
- : owned_code_space_.back().end();
-
- VirtualMemory new_mem = engine_->code_manager()->TryAllocate(
- size, reinterpret_cast<void*>(hint));
- if (!new_mem.IsReserved()) {
- V8::FatalProcessOutOfMemory(nullptr,
- "NativeModule::AllocateForCode reservation");
- UNREACHABLE();
- }
- engine_->code_manager()->AssignRanges(new_mem.address(), new_mem.end(),
- this);
-
- free_code_space_.Merge(new_mem.region());
- owned_code_space_.emplace_back(std::move(new_mem));
- code_space = free_code_space_.Allocate(size);
- DCHECK(!code_space.is_empty());
- }
- const Address page_size = page_allocator->AllocatePageSize();
- Address commit_start = RoundUp(code_space.begin(), page_size);
- Address commit_end = RoundUp(code_space.end(), page_size);
- // {commit_start} will be either code_space.start or the start of the next
- // page. {commit_end} will be the start of the page after the one in which
- // the allocation ends.
- // We start from an aligned start, and we know we allocated vmem in
- // page multiples.
- // We just need to commit what's not committed. The page in which we
- // start is already committed (or we start at the beginning of a page).
- // The end needs to be committed all through the end of the page.
- if (commit_start < commit_end) {
- committed_code_space_.fetch_add(commit_end - commit_start);
- // Committed code cannot grow bigger than maximum code space size.
- DCHECK_LE(committed_code_space_.load(), kMaxWasmCodeMemory);
-#if V8_OS_WIN
- // On Windows, we cannot commit a region that straddles different
- // reservations of virtual memory. Because we bump-allocate, and because, if
- // we need more memory, we append that memory at the end of the
- // owned_code_space_ list, we traverse that list in reverse order to find
- // the reservation(s) that guide how to chunk the region to commit.
- for (auto& vmem : base::Reversed(owned_code_space_)) {
- if (commit_end <= vmem.address() || vmem.end() <= commit_start) continue;
- Address start = std::max(commit_start, vmem.address());
- Address end = std::min(commit_end, vmem.end());
- size_t commit_size = static_cast<size_t>(end - start);
- if (!engine_->code_manager()->Commit(start, commit_size)) {
- V8::FatalProcessOutOfMemory(nullptr,
- "NativeModule::AllocateForCode commit");
- UNREACHABLE();
- }
- // Opportunistically reduce the commit range. This might terminate the
- // loop early.
- if (commit_start == start) commit_start = end;
- if (commit_end == end) commit_end = start;
- if (commit_start >= commit_end) break;
- }
-#else
- if (!engine_->code_manager()->Commit(commit_start,
- commit_end - commit_start)) {
- V8::FatalProcessOutOfMemory(nullptr,
- "NativeModule::AllocateForCode commit");
- UNREACHABLE();
- }
-#endif
- }
- DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
- allocated_code_space_.Merge(code_space);
- generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
-
- TRACE_HEAP("Code alloc for %p: %" PRIxPTR ",+%zu\n", this, code_space.begin(),
- size);
- return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
-}
-
namespace {
class NativeModuleWireBytesStorage final : public WireBytesStorage {
public:
@@ -998,49 +1102,17 @@ void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
WasmCode* NativeModule::Lookup(Address pc) const {
base::MutexGuard lock(&allocation_mutex_);
- if (owned_code_.empty()) return nullptr;
- // First update the sorted portion counter.
- if (owned_code_sorted_portion_ == 0) ++owned_code_sorted_portion_;
- while (owned_code_sorted_portion_ < owned_code_.size() &&
- owned_code_[owned_code_sorted_portion_ - 1]->instruction_start() <=
- owned_code_[owned_code_sorted_portion_]->instruction_start()) {
- ++owned_code_sorted_portion_;
- }
- // Execute at most two rounds: First check whether the {pc} is within the
- // sorted portion of {owned_code_}. If it's not, then sort the whole vector
- // and retry.
- while (true) {
- auto iter =
- std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
- [](Address pc, const std::unique_ptr<WasmCode>& code) {
- DCHECK_NE(kNullAddress, pc);
- DCHECK_NOT_NULL(code);
- return pc < code->instruction_start();
- });
- if (iter != owned_code_.begin()) {
- --iter;
- WasmCode* candidate = iter->get();
- DCHECK_NOT_NULL(candidate);
- if (candidate->contains(pc)) {
- WasmCodeRefScope::AddRef(candidate);
- return candidate;
- }
- }
- if (owned_code_sorted_portion_ == owned_code_.size()) return nullptr;
- std::sort(owned_code_.begin(), owned_code_.end(),
- [](const std::unique_ptr<WasmCode>& code1,
- const std::unique_ptr<WasmCode>& code2) {
- return code1->instruction_start() < code2->instruction_start();
- });
- owned_code_sorted_portion_ = owned_code_.size();
- }
+ auto iter = owned_code_.upper_bound(pc);
+ if (iter == owned_code_.begin()) return nullptr;
+ --iter;
+ WasmCode* candidate = iter->second.get();
+ DCHECK_EQ(candidate->instruction_start(), iter->first);
+ if (!candidate->contains(pc)) return nullptr;
+ WasmCodeRefScope::AddRef(candidate);
+ return candidate;
}
Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
- // TODO(clemensh): Measure performance win of returning instruction start
- // directly if we have turbofan code. Downside: Redirecting functions (e.g.
- // for debugging) gets much harder.
-
// Return the jump table slot for that function index.
DCHECK_NOT_NULL(jump_table_);
uint32_t slot_idx = func_index - module_->num_imported_functions;
@@ -1072,7 +1144,7 @@ const char* NativeModule::GetRuntimeStubName(Address runtime_stub_entry) const {
}
NativeModule::~NativeModule() {
- TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
+ TRACE_HEAP("Deleting native module: %p\n", this);
// Cancel all background compilation before resetting any field of the
// NativeModule or freeing anything.
compilation_state_->AbortCompilation();
@@ -1087,24 +1159,35 @@ WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed)
: memory_tracker_(memory_tracker),
max_committed_code_space_(max_committed),
+#if defined(V8_OS_WIN_X64)
+ is_win64_unwind_info_disabled_for_testing_(false),
+#endif
total_committed_code_space_(0),
critical_committed_code_space_(max_committed / 2) {
DCHECK_LE(max_committed, kMaxWasmCodeMemory);
}
-bool WasmCodeManager::Commit(Address start, size_t size) {
- // TODO(v8:8462) Remove eager commit once perf supports remapping.
+#if defined(V8_OS_WIN_X64)
+bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() const {
+ return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
+ FLAG_win64_unwinding_info &&
+ !is_win64_unwind_info_disabled_for_testing_;
+}
+#endif
+
+bool WasmCodeManager::Commit(base::AddressRegion region) {
+ // TODO(v8:8462): Remove eager commit once perf supports remapping.
if (FLAG_perf_prof) return true;
- DCHECK(IsAligned(start, AllocatePageSize()));
- DCHECK(IsAligned(size, AllocatePageSize()));
+ DCHECK(IsAligned(region.begin(), CommitPageSize()));
+ DCHECK(IsAligned(region.size(), CommitPageSize()));
// Reserve the size. Use CAS loop to avoid overflow on
// {total_committed_code_space_}.
size_t old_value = total_committed_code_space_.load();
while (true) {
DCHECK_GE(max_committed_code_space_, old_value);
- if (size > max_committed_code_space_ - old_value) return false;
- if (total_committed_code_space_.compare_exchange_weak(old_value,
- old_value + size)) {
+ if (region.size() > max_committed_code_space_ - old_value) return false;
+ if (total_committed_code_space_.compare_exchange_weak(
+ old_value, old_value + region.size())) {
break;
}
}
@@ -1112,44 +1195,58 @@ bool WasmCodeManager::Commit(Address start, size_t size) {
? PageAllocator::kReadWrite
: PageAllocator::kReadWriteExecute;
- bool ret =
- SetPermissions(GetPlatformPageAllocator(), start, size, permission);
- TRACE_HEAP("Setting rw permissions for %p:%p\n",
- reinterpret_cast<void*>(start),
- reinterpret_cast<void*>(start + size));
+ bool ret = SetPermissions(GetPlatformPageAllocator(), region.begin(),
+ region.size(), permission);
+ TRACE_HEAP("Setting rw permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
+ region.begin(), region.end());
if (!ret) {
// Highly unlikely.
- total_committed_code_space_.fetch_sub(size);
+ total_committed_code_space_.fetch_sub(region.size());
return false;
}
return true;
}
-void WasmCodeManager::AssignRanges(Address start, Address end,
- NativeModule* native_module) {
+void WasmCodeManager::Decommit(base::AddressRegion region) {
+ // TODO(v8:8462): Remove this once perf supports remapping.
+ if (FLAG_perf_prof) return;
+ PageAllocator* allocator = GetPlatformPageAllocator();
+ DCHECK(IsAligned(region.begin(), allocator->CommitPageSize()));
+ DCHECK(IsAligned(region.size(), allocator->CommitPageSize()));
+ size_t old_committed = total_committed_code_space_.fetch_sub(region.size());
+ DCHECK_LE(region.size(), old_committed);
+ USE(old_committed);
+ TRACE_HEAP("Discarding system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
+ region.begin(), region.end());
+ CHECK(allocator->DiscardSystemPages(reinterpret_cast<void*>(region.begin()),
+ region.size()));
+}
+
+void WasmCodeManager::AssignRange(base::AddressRegion region,
+ NativeModule* native_module) {
base::MutexGuard lock(&native_modules_mutex_);
- lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
+ lookup_map_.insert(std::make_pair(
+ region.begin(), std::make_pair(region.end(), native_module)));
}
VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0);
- size = RoundUp(size, page_allocator->AllocatePageSize());
+ size_t allocate_page_size = page_allocator->AllocatePageSize();
+ size = RoundUp(size, allocate_page_size);
if (!memory_tracker_->ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
- VirtualMemory mem(page_allocator, size, hint,
- page_allocator->AllocatePageSize());
+ VirtualMemory mem(page_allocator, size, hint, allocate_page_size);
if (!mem.IsReserved()) {
memory_tracker_->ReleaseReservation(size);
return {};
}
- TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
- reinterpret_cast<void*>(mem.address()),
- reinterpret_cast<void*>(mem.end()), mem.size());
+ TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
+ mem.end(), mem.size());
- // TODO(v8:8462) Remove eager commit once perf supports remapping.
+ // TODO(v8:8462): Remove eager commit once perf supports remapping.
if (FLAG_perf_prof) {
SetPermissions(GetPlatformPageAllocator(), mem.address(), mem.size(),
PageAllocator::kReadWriteExecute);
@@ -1225,7 +1322,7 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
code_space = TryAllocate(code_vmem_size);
if (code_space.IsReserved()) break;
if (retries == kAllocationRetries) {
- V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
+ V8::FatalProcessOutOfMemory(isolate, "NewNativeModule");
UNREACHABLE();
}
// Run one GC, then try the allocation again.
@@ -1245,8 +1342,7 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
size);
#if defined(V8_OS_WIN_X64)
- if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- FLAG_win64_unwinding_info) {
+ if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
win64_unwindinfo::RegisterNonABICompliantCodeRange(
reinterpret_cast<void*>(start), size);
}
@@ -1257,60 +1353,11 @@ std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
return ret;
}
-bool NativeModule::SetExecutable(bool executable) {
- if (is_executable_ == executable) return true;
- TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
-
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
-
- if (FLAG_wasm_write_protect_code_memory) {
- PageAllocator::Permission permission =
- executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
-#if V8_OS_WIN
- // On windows, we need to switch permissions per separate virtual memory
- // reservation. This is really just a problem when the NativeModule is
- // growable (meaning can_request_more_memory_). That's 32-bit in production,
- // or unittests.
- // For now, in that case, we commit at reserved memory granularity.
- // Technically, that may be a waste, because we may reserve more than we
- // use. On 32-bit though, the scarce resource is the address space -
- // committed or not.
- if (can_request_more_memory_) {
- for (auto& vmem : owned_code_space_) {
- if (!SetPermissions(page_allocator, vmem.address(), vmem.size(),
- permission)) {
- return false;
- }
- TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
- executable);
- }
- is_executable_ = executable;
- return true;
- }
-#endif
- for (auto& region : allocated_code_space_.regions()) {
- // allocated_code_space_ is fine-grained, so we need to
- // page-align it.
- size_t region_size =
- RoundUp(region.size(), page_allocator->AllocatePageSize());
- if (!SetPermissions(page_allocator, region.begin(), region_size,
- permission)) {
- return false;
- }
- TRACE_HEAP("Set %p:%p to executable:%d\n",
- reinterpret_cast<void*>(region.begin()),
- reinterpret_cast<void*>(region.end()), executable);
- }
- }
- is_executable_ = executable;
- return true;
-}
-
void NativeModule::SampleCodeSize(
Counters* counters, NativeModule::CodeSamplingTime sampling_time) const {
size_t code_size = sampling_time == kSampling
- ? committed_code_space()
- : generated_code_size_.load(std::memory_order_relaxed);
+ ? code_allocator_.committed_code_space()
+ : code_allocator_.generated_code_size();
int code_size_mb = static_cast<int>(code_size / MB);
Histogram* histogram = nullptr;
switch (sampling_time) {
@@ -1320,9 +1367,23 @@ void NativeModule::SampleCodeSize(
case kAfterTopTier:
histogram = counters->wasm_module_code_size_mb_after_top_tier();
break;
- case kSampling:
+ case kSampling: {
histogram = counters->wasm_module_code_size_mb();
+ // If this is a wasm module of >= 2MB, also sample the freed code size,
+ // absolute and relative. Code GC does not happen on asm.js modules, and
+ // small modules will never trigger GC anyway.
+ size_t generated_size = code_allocator_.generated_code_size();
+ if (generated_size >= 2 * MB && module()->origin == kWasmOrigin) {
+ size_t freed_size = code_allocator_.freed_code_size();
+ DCHECK_LE(freed_size, generated_size);
+ int total_freed_mb = static_cast<int>(freed_size / MB);
+ counters->wasm_module_freed_code_size_mb()->AddSample(total_freed_mb);
+ int freed_percent = static_cast<int>(100 * freed_size / generated_size);
+ counters->wasm_module_freed_code_size_percent()->AddSample(
+ freed_percent);
+ }
break;
+ }
}
histogram->AddSample(code_size_mb);
}
@@ -1340,7 +1401,8 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
DCHECK(result.succeeded());
total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
}
- Vector<byte> code_space = AllocateForCode(total_code_space);
+ Vector<byte> code_space =
+ code_allocator_.AllocateForCode(this, total_code_space);
std::vector<std::unique_ptr<WasmCode>> generated_code;
generated_code.reserve(results.size());
@@ -1373,21 +1435,33 @@ std::vector<WasmCode*> NativeModule::AddCompiledCode(
return code_vector;
}
+bool NativeModule::IsRedirectedToInterpreter(uint32_t func_index) {
+ base::MutexGuard lock(&allocation_mutex_);
+ return has_interpreter_redirection(func_index);
+}
+
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
- // TODO(clemensh): Implement.
+ // Free the code space.
+ code_allocator_.FreeCode(codes);
+
+ // Free the {WasmCode} objects. This will also unregister trap handler data.
+ base::MutexGuard guard(&allocation_mutex_);
+ for (WasmCode* code : codes) {
+ DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
+ owned_code_.erase(code->instruction_start());
+ }
}
-void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
+void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
+ size_t committed_size) {
base::MutexGuard lock(&native_modules_mutex_);
- TRACE_HEAP("Freeing NativeModule %p\n", native_module);
- for (auto& code_space : native_module->owned_code_space_) {
+ for (auto& code_space : owned_code_space) {
DCHECK(code_space.IsReserved());
- TRACE_HEAP("VMem Release: %" PRIxPTR ":%" PRIxPTR " (%zu)\n",
+ TRACE_HEAP("VMem Release: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n",
code_space.address(), code_space.end(), code_space.size());
#if defined(V8_OS_WIN_X64)
- if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- FLAG_win64_unwinding_info) {
+ if (CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
win64_unwindinfo::UnregisterNonABICompliantCodeRange(
reinterpret_cast<void*>(code_space.address()));
}
@@ -1398,12 +1472,10 @@ void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
code_space.Free();
DCHECK(!code_space.IsReserved());
}
- native_module->owned_code_space_.clear();
- size_t code_size = native_module->committed_code_space_.load();
- DCHECK(IsAligned(code_size, AllocatePageSize()));
- size_t old_committed = total_committed_code_space_.fetch_sub(code_size);
- DCHECK_LE(code_size, old_committed);
+ DCHECK(IsAligned(committed_size, CommitPageSize()));
+ size_t old_committed = total_committed_code_space_.fetch_sub(committed_size);
+ DCHECK_LE(committed_size, old_committed);
USE(old_committed);
}
@@ -1467,6 +1539,7 @@ WasmCodeRefScope::~WasmCodeRefScope() {
// static
void WasmCodeRefScope::AddRef(WasmCode* code) {
+ DCHECK_NOT_NULL(code);
WasmCodeRefScope* current_scope = current_code_refs_scope;
DCHECK_NOT_NULL(current_scope);
auto entry = current_scope->code_ptrs_.insert(code);
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index e689644430..49c287df2c 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -13,12 +13,13 @@
#include <utility>
#include <vector>
+#include "src/base/address-region.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/builtins/builtins-definitions.h"
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
@@ -46,20 +47,15 @@ struct WasmModule;
// because that should have been reduced to [start, other_end).
class V8_EXPORT_PRIVATE DisjointAllocationPool final {
public:
- DisjointAllocationPool() = default;
-
+ MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(DisjointAllocationPool);
explicit DisjointAllocationPool(base::AddressRegion region)
: regions_({region}) {}
- DisjointAllocationPool(DisjointAllocationPool&& other) V8_NOEXCEPT = default;
- DisjointAllocationPool& operator=(DisjointAllocationPool&& other)
- V8_NOEXCEPT = default;
-
// Merge the parameter region into this object while preserving ordering of
// the regions. The assumption is that the passed parameter is not
// intersecting this object - for example, it was obtained from a previous
- // Allocate.
- void Merge(base::AddressRegion);
+ // Allocate. Returns the merged region.
+ base::AddressRegion Merge(base::AddressRegion);
// Allocate a contiguous region of size {size}. Return an empty pool on
// failure.
@@ -70,14 +66,13 @@ class V8_EXPORT_PRIVATE DisjointAllocationPool final {
private:
std::list<base::AddressRegion> regions_;
-
- DISALLOW_COPY_AND_ASSIGN(DisjointAllocationPool);
};
class V8_EXPORT_PRIVATE WasmCode final {
public:
enum Kind {
kFunction,
+ kWasmToCapiWrapper,
kWasmToJsWrapper,
kRuntimeStub,
kInterpreterEntry,
@@ -97,7 +92,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
Vector<byte> instructions() const { return instructions_; }
Address instruction_start() const {
- return reinterpret_cast<Address>(instructions_.start());
+ return reinterpret_cast<Address>(instructions_.begin());
}
Vector<const byte> reloc_info() const { return reloc_info_.as_vector(); }
Vector<const byte> source_positions() const {
@@ -114,6 +109,8 @@ class V8_EXPORT_PRIVATE WasmCode final {
NativeModule* native_module() const { return native_module_; }
ExecutionTier tier() const { return tier_; }
Address constant_pool() const;
+ Address handler_table() const;
+ uint32_t handler_table_size() const;
Address code_comments() const;
uint32_t code_comments_size() const;
size_t constant_pool_offset() const { return constant_pool_offset_; }
@@ -125,7 +122,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
uint32_t tagged_parameter_slots() const { return tagged_parameter_slots_; }
bool is_liftoff() const { return tier_ == ExecutionTier::kLiftoff; }
bool contains(Address pc) const {
- return reinterpret_cast<Address>(instructions_.start()) <= pc &&
+ return reinterpret_cast<Address>(instructions_.begin()) <= pc &&
pc < reinterpret_cast<Address>(instructions_.end());
}
@@ -146,7 +143,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
~WasmCode();
void IncRef() {
- int old_val = ref_count_.fetch_add(1, std::memory_order_relaxed);
+ int old_val = ref_count_.fetch_add(1, std::memory_order_acq_rel);
DCHECK_LE(1, old_val);
DCHECK_GT(kMaxInt, old_val);
USE(old_val);
@@ -155,20 +152,27 @@ class V8_EXPORT_PRIVATE WasmCode final {
// Decrement the ref count. Returns whether this code becomes dead and needs
// to be freed.
V8_WARN_UNUSED_RESULT bool DecRef() {
- int old_count = ref_count_.load(std::memory_order_relaxed);
+ int old_count = ref_count_.load(std::memory_order_acquire);
while (true) {
DCHECK_LE(1, old_count);
if (V8_UNLIKELY(old_count == 1)) return DecRefOnPotentiallyDeadCode();
if (ref_count_.compare_exchange_weak(old_count, old_count - 1,
- std::memory_order_relaxed)) {
+ std::memory_order_acq_rel)) {
return false;
}
}
}
+ // Decrement the ref count on code that is known to be dead, even though there
+ // might still be C++ references. Returns whether this drops the last
+ // reference and the code needs to be freed.
+ V8_WARN_UNUSED_RESULT bool DecRefOnDeadCode() {
+ return ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1;
+ }
+
// Decrement the ref count on a set of {WasmCode} objects, potentially
// belonging to different {NativeModule}s. Dead code will be deleted.
- static void DecrementRefCount(Vector<WasmCode*>);
+ static void DecrementRefCount(Vector<WasmCode* const>);
enum FlushICache : bool { kFlushICache = true, kNoFlushICache = false };
@@ -211,9 +215,15 @@ class V8_EXPORT_PRIVATE WasmCode final {
// Code objects that have been registered with the global trap handler within
// this process, will have a {trap_handler_index} associated with them.
- size_t trap_handler_index() const;
- void set_trap_handler_index(size_t);
- bool HasTrapHandlerIndex() const;
+ int trap_handler_index() const {
+ CHECK(has_trap_handler_index());
+ return trap_handler_index_;
+ }
+ void set_trap_handler_index(int value) {
+ CHECK(!has_trap_handler_index());
+ trap_handler_index_ = value;
+ }
+ bool has_trap_handler_index() const { return trap_handler_index_ >= 0; }
// Register protected instruction information with the trap handler. Sets
// trap_handler_index.
@@ -221,7 +231,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
// Slow path for {DecRef}: The code becomes potentially dead.
// Returns whether this code becomes dead and needs to be freed.
- bool DecRefOnPotentiallyDeadCode();
+ V8_NOINLINE bool DecRefOnPotentiallyDeadCode();
Vector<byte> instructions_;
OwnedVector<const byte> reloc_info_;
@@ -241,20 +251,20 @@ class V8_EXPORT_PRIVATE WasmCode final {
size_t handler_table_offset_ = 0;
size_t code_comments_offset_ = 0;
size_t unpadded_binary_size_ = 0;
- intptr_t trap_handler_index_ = -1;
+ int trap_handler_index_ = -1;
OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions_;
ExecutionTier tier_;
// WasmCode is ref counted. Counters are held by:
- // 1) The jump table.
- // 2) Function tables.
- // 3) {WasmCodeRefScope}s.
- // 4) The set of potentially dead code in the {WasmEngine}.
- // If a decrement of (1) or (2) would drop the ref count to 0, that code
- // becomes a candidate for garbage collection. At that point, we add
- // ref counts for (4) *before* decrementing the counter to ensure the code
- // stays alive as long as it's being used. Once the ref count drops to zero,
- // the code object is deleted and the memory for the machine code is freed.
+ // 1) The jump table / code table.
+ // 2) {WasmCodeRefScope}s.
+ // 3) The set of potentially dead code in the {WasmEngine}.
+ // If a decrement of (1) would drop the ref count to 0, that code becomes a
+ // candidate for garbage collection. At that point, we add a ref count for (3)
+ // *before* decrementing the counter to ensure the code stays alive as long as
+ // it's being used. Once the ref count drops to zero (i.e. after being removed
+ // from (3) and all (2)), the code object is deleted and the memory for the
+ // machine code is freed.
std::atomic<int> ref_count_{1};
DISALLOW_COPY_AND_ASSIGN(WasmCode);
@@ -263,6 +273,64 @@ class V8_EXPORT_PRIVATE WasmCode final {
// Return a textual description of the kind.
const char* GetWasmCodeKindAsString(WasmCode::Kind);
+// Manages the code reservations and allocations of a single {NativeModule}.
+class WasmCodeAllocator {
+ public:
+ WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
+ bool can_request_more);
+ ~WasmCodeAllocator();
+
+ size_t committed_code_space() const {
+ return committed_code_space_.load(std::memory_order_acquire);
+ }
+ size_t generated_code_size() const {
+ return generated_code_size_.load(std::memory_order_acquire);
+ }
+ size_t freed_code_size() const {
+ return freed_code_size_.load(std::memory_order_acquire);
+ }
+
+ // Allocate code space. Returns a valid buffer or fails with OOM (crash).
+ Vector<byte> AllocateForCode(NativeModule*, size_t size);
+
+ // Sets permissions of all owned code space to executable, or read-write (if
+ // {executable} is false). Returns true on success.
+ V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
+
+ // Free memory pages of all given code objects. Used for wasm code GC.
+ void FreeCode(Vector<WasmCode* const>);
+
+ private:
+ // The engine-wide wasm code manager.
+ WasmCodeManager* const code_manager_;
+
+ mutable base::Mutex mutex_;
+
+ //////////////////////////////////////////////////////////////////////////////
+ // Protected by {mutex_}:
+
+ // Code space that was reserved and is available for allocations (subset of
+ // {owned_code_space_}).
+ DisjointAllocationPool free_code_space_;
+ // Code space that was allocated for code (subset of {owned_code_space_}).
+ DisjointAllocationPool allocated_code_space_;
+ // Code space that was allocated before but is dead now. Full pages within
+ // this region are discarded. It's still a subset of {owned_code_space_}).
+ DisjointAllocationPool freed_code_space_;
+ std::vector<VirtualMemory> owned_code_space_;
+
+ // End of fields protected by {mutex_}.
+ //////////////////////////////////////////////////////////////////////////////
+
+ std::atomic<size_t> committed_code_space_{0};
+ std::atomic<size_t> generated_code_size_{0};
+ std::atomic<size_t> freed_code_size_{0};
+
+ bool is_executable_ = false;
+
+ const bool can_request_more_memory_;
+};
+
class V8_EXPORT_PRIVATE NativeModule final {
public:
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_ARM64
@@ -303,11 +371,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Adds anonymous code for testing purposes.
WasmCode* AddCodeForTesting(Handle<Code> code);
- // Use this to setup lazy compilation for the entire module ({UseLazyStubs})
- // or for individual functions ({UseLazyStub}). It will use the existing
- // {WasmCode::kWasmCompileLazy} runtime stub and populate the jump table with
- // trampolines to that runtime stub.
- void UseLazyStubs();
+ // Use {UseLazyStub} to setup lazy compilation per function. It will use the
+ // existing {WasmCode::kWasmCompileLazy} runtime stub and populate the jump
+ // table with trampolines accordingly.
void UseLazyStub(uint32_t func_index);
// Initializes all runtime stubs by setting up entry addresses in the runtime
@@ -350,7 +416,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// above {GetCallTargetForFunction} returns) to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
- bool SetExecutable(bool executable);
+ bool SetExecutable(bool executable) {
+ return code_allocator_.SetExecutable(executable);
+ }
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
@@ -374,12 +442,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
- void set_lazy_compilation(bool lazy) { lazy_compilation_ = lazy; }
- bool lazy_compilation() const { return lazy_compilation_; }
Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; }
- size_t committed_code_space() const { return committed_code_space_.load(); }
+ size_t committed_code_space() const {
+ return code_allocator_.committed_code_space();
+ }
WasmEngine* engine() const { return engine_; }
void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
@@ -403,9 +471,15 @@ class V8_EXPORT_PRIVATE NativeModule final {
WasmCode* AddCompiledCode(WasmCompilationResult);
std::vector<WasmCode*> AddCompiledCode(Vector<WasmCompilationResult>);
+ // Allows to check whether a function has been redirected to the interpreter
+ // by publishing an entry stub with the {Kind::kInterpreterEntry} code kind.
+ bool IsRedirectedToInterpreter(uint32_t func_index);
+
// Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all
// {WasmCode} objects must not be used any more.
+ // Should only be called via {WasmEngine::FreeDeadCode}, so the engine can do
+ // its accounting.
void FreeCode(Vector<WasmCode* const>);
private:
@@ -431,12 +505,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Add and publish anonymous code.
WasmCode* AddAndPublishAnonymousCode(Handle<Code>, WasmCode::Kind kind,
const char* name = nullptr);
- // Allocate code space. Returns a valid buffer or fails with OOM (crash).
- Vector<byte> AllocateForCode(size_t size);
WasmCode* CreateEmptyJumpTable(uint32_t jump_table_size);
- // Hold the {mutex_} when calling this method.
+ // Hold the {allocation_mutex_} when calling this method.
bool has_interpreter_redirection(uint32_t func_index) {
DCHECK_LT(func_index, num_functions());
DCHECK_LE(module_->num_imported_functions, func_index);
@@ -446,7 +518,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
return byte & (1 << (bitset_idx % kBitsPerByte));
}
- // Hold the {mutex_} when calling this method.
+ // Hold the {allocation_mutex_} when calling this method.
void SetInterpreterRedirection(uint32_t func_index) {
DCHECK_LT(func_index, num_functions());
DCHECK_LE(module_->num_imported_functions, func_index);
@@ -460,6 +532,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
byte |= 1 << (bitset_idx % kBitsPerByte);
}
+ // {WasmCodeAllocator} manages all code reservations and allocations for this
+ // {NativeModule}.
+ WasmCodeAllocator code_allocator_;
+
// Features enabled for this module. We keep a copy of the features that
// were enabled at the time of the creation of this native module,
// to be consistent across asynchronous compilations later.
@@ -496,14 +572,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
//////////////////////////////////////////////////////////////////////////////
// Protected by {allocation_mutex_}:
- // Holds all allocated code objects. Mutable because it might get sorted in
- // {Lookup()}.
- mutable std::vector<std::unique_ptr<WasmCode>> owned_code_;
-
- // Keep track of the portion of {owned_code_} that is sorted.
- // Entries [0, owned_code_sorted_portion_) are known to be sorted.
- // Mutable because it might get modified in {Lookup()}.
- mutable size_t owned_code_sorted_portion_ = 0;
+ // Holds all allocated code objects. For lookup based on pc, the key is the
+ // instruction start address of the value.
+ std::map<Address, std::unique_ptr<WasmCode>> owned_code_;
std::unique_ptr<WasmCode* []> code_table_;
@@ -511,22 +582,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
// this module marking those functions that have been redirected.
std::unique_ptr<uint8_t[]> interpreter_redirections_;
- DisjointAllocationPool free_code_space_;
- DisjointAllocationPool allocated_code_space_;
- std::list<VirtualMemory> owned_code_space_;
-
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
WasmEngine* const engine_;
- std::atomic<size_t> committed_code_space_{0};
- std::atomic<size_t> generated_code_size_{0};
int modification_scope_depth_ = 0;
- bool can_request_more_memory_;
UseTrapHandler use_trap_handler_ = kNoTrapHandler;
- bool is_executable_ = false;
bool lazy_compile_frozen_ = false;
- bool lazy_compilation_ = false;
DISALLOW_COPY_AND_ASSIGN(NativeModule);
};
@@ -543,6 +605,10 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
}
#endif
+#if defined(V8_OS_WIN_X64)
+ bool CanRegisterUnwindInfoForNonABICompliantCodeRange() const;
+#endif
+
NativeModule* LookupNativeModule(Address pc) const;
WasmCode* LookupCode(Address pc) const;
size_t committed_code_space() const {
@@ -551,11 +617,17 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void SetMaxCommittedMemoryForTesting(size_t limit);
+#if defined(V8_OS_WIN_X64)
+ void DisableWin64UnwindInfoForTesting() {
+ is_win64_unwind_info_disabled_for_testing_ = true;
+ }
+#endif
+
static size_t EstimateNativeModuleCodeSize(const WasmModule* module);
static size_t EstimateNativeModuleNonCodeSize(const WasmModule* module);
private:
- friend class NativeModule;
+ friend class WasmCodeAllocator;
friend class WasmEngine;
std::shared_ptr<NativeModule> NewNativeModule(
@@ -565,19 +637,22 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
V8_WARN_UNUSED_RESULT VirtualMemory TryAllocate(size_t size,
void* hint = nullptr);
- bool Commit(Address, size_t);
- // Currently, we uncommit a whole module, so all we need is account
- // for the freed memory size. We do that in FreeNativeModule.
- // There's no separate Uncommit.
+ bool Commit(base::AddressRegion);
+ void Decommit(base::AddressRegion);
- void FreeNativeModule(NativeModule*);
+ void FreeNativeModule(Vector<VirtualMemory> owned_code,
+ size_t committed_size);
- void AssignRanges(Address start, Address end, NativeModule*);
+ void AssignRange(base::AddressRegion, NativeModule*);
WasmMemoryTracker* const memory_tracker_;
size_t max_committed_code_space_;
+#if defined(V8_OS_WIN_X64)
+ bool is_win64_unwind_info_disabled_for_testing_;
+#endif
+
std::atomic<size_t> total_committed_code_space_;
// If the committed code space exceeds {critical_committed_code_space_}, then
// we trigger a GC before creating the next module. This value is set to the
@@ -646,9 +721,7 @@ class GlobalWasmCodeRef {
code_->IncRef();
}
- ~GlobalWasmCodeRef() {
- if (code_->DecRef()) code_->native_module()->FreeCode(VectorOf(&code_, 1));
- }
+ ~GlobalWasmCodeRef() { WasmCode::DecrementRefCount({&code_, 1}); }
// Get a pointer to the contained {WasmCode} object. This is only guaranteed
// to exist as long as this {GlobalWasmCodeRef} exists.
diff --git a/deps/v8/src/wasm/wasm-constants.h b/deps/v8/src/wasm/wasm-constants.h
index bd24471bc3..fce60cb593 100644
--- a/deps/v8/src/wasm/wasm-constants.h
+++ b/deps/v8/src/wasm/wasm-constants.h
@@ -8,6 +8,8 @@
#include <cstddef>
#include <cstdint>
+#include "src/common/globals.h"
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -87,6 +89,10 @@ enum SectionCode : int8_t {
kFirstUnorderedSection = kDataCountSectionCode,
};
+// Binary encoding of compilation hints.
+constexpr uint8_t kDefaultCompilationHint = 0x0;
+constexpr uint8_t kNoCompilationHint = kMaxUInt8;
+
// Binary encoding of name section kinds.
enum NameSectionKindCode : uint8_t { kModule = 0, kFunction = 1, kLocal = 2 };
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 9775e47d71..33d9a64bf4 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -4,16 +4,16 @@
#include <unordered_map>
-#include "src/assembler-inl.h"
-#include "src/assert-scope.h"
#include "src/base/optional.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/common/assert-scope.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
-#include "src/frames-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/identity-map.h"
-#include "src/isolate.h"
+#include "src/utils/identity-map.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-interpreter.h"
@@ -37,9 +37,10 @@ Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
EmbeddedVector<char, kMaxStrLen> value;
int len = SNPrintF(value, format, args...);
CHECK(len > 0 && len < value.length());
- Vector<uint8_t> name = Vector<uint8_t>::cast(value.SubVector(0, len));
+ Vector<const uint8_t> name =
+ Vector<const uint8_t>::cast(value.SubVector(0, len));
return internal
- ? isolate->factory()->InternalizeOneByteString(name)
+ ? isolate->factory()->InternalizeString(name)
: isolate->factory()->NewStringFromOneByte(name).ToHandleChecked();
}
@@ -60,6 +61,8 @@ Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
return isolate->factory()->NewNumber(value.to<float>());
case kWasmF64:
return isolate->factory()->NewNumber(value.to<double>());
+ case kWasmAnyRef:
+ return value.to_anyref();
default:
UNIMPLEMENTED();
return isolate->factory()->undefined_value();
@@ -73,21 +76,21 @@ MaybeHandle<String> GetLocalName(Isolate* isolate,
DCHECK_LE(0, local_index);
if (!debug_info->has_locals_names()) {
Handle<WasmModuleObject> module_object(
- debug_info->wasm_instance()->module_object(), isolate);
+ debug_info->wasm_instance().module_object(), isolate);
Handle<FixedArray> locals_names = DecodeLocalNames(isolate, module_object);
debug_info->set_locals_names(*locals_names);
}
Handle<FixedArray> locals_names(debug_info->locals_names(), isolate);
if (func_index >= locals_names->length() ||
- locals_names->get(func_index)->IsUndefined(isolate)) {
+ locals_names->get(func_index).IsUndefined(isolate)) {
return {};
}
Handle<FixedArray> func_locals_names(
FixedArray::cast(locals_names->get(func_index)), isolate);
if (local_index >= func_locals_names->length() ||
- func_locals_names->get(local_index)->IsUndefined(isolate)) {
+ func_locals_names->get(local_index).IsUndefined(isolate)) {
return {};
}
return handle(String::cast(func_locals_names->get(local_index)), isolate);
@@ -135,19 +138,17 @@ class InterpreterHandle {
// Return raw pointer into heap. The WasmInterpreter will make its own copy
// of this data anyway, and there is no heap allocation in-between.
NativeModule* native_module =
- debug_info->wasm_instance()->module_object()->native_module();
+ debug_info.wasm_instance().module_object().native_module();
return ModuleWireBytes{native_module->wire_bytes()};
}
public:
InterpreterHandle(Isolate* isolate, Handle<WasmDebugInfo> debug_info)
: isolate_(isolate),
- module_(debug_info->wasm_instance()->module_object()->module()),
+ module_(debug_info->wasm_instance().module_object().module()),
interpreter_(isolate, module_, GetBytes(*debug_info),
handle(debug_info->wasm_instance(), isolate)) {}
- ~InterpreterHandle() { DCHECK_EQ(0, activations_.size()); }
-
WasmInterpreter* interpreter() { return &interpreter_; }
const WasmModule* module() const { return module_; }
@@ -180,7 +181,7 @@ class InterpreterHandle {
WasmCodeRefScope code_ref_scope;
WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
thread->InitFrame(&module()->functions[func_index],
- argument_values.start());
+ argument_values.begin());
bool finished = false;
while (!finished) {
// TODO(clemensh): Add occasional StackChecks.
@@ -263,8 +264,8 @@ class InterpreterHandle {
// Check that this is indeed the instance which is connected to this
// interpreter.
DCHECK_EQ(this, Managed<InterpreterHandle>::cast(
- instance_obj->debug_info()->interpreter_handle())
- ->raw());
+ instance_obj->debug_info().interpreter_handle())
+ .raw());
return instance_obj;
}
@@ -361,20 +362,44 @@ class InterpreterHandle {
Isolate* isolate = isolate_;
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
- // TODO(clemensh): Add globals to the global scope.
Handle<JSObject> global_scope_object =
isolate_->factory()->NewJSObjectWithNullProto();
if (instance->has_memory_object()) {
- Handle<String> name = isolate_->factory()->InternalizeOneByteString(
- StaticCharVector("memory"));
+ Handle<String> name =
+ isolate_->factory()->InternalizeString(StaticCharVector("memory"));
Handle<JSArrayBuffer> memory_buffer(
- instance->memory_object()->array_buffer(), isolate_);
+ instance->memory_object().array_buffer(), isolate_);
Handle<JSTypedArray> uint8_array = isolate_->factory()->NewJSTypedArray(
kExternalUint8Array, memory_buffer, 0, memory_buffer->byte_length());
JSObject::SetOwnPropertyIgnoreAttributes(global_scope_object, name,
uint8_array, NONE)
.Assert();
}
+
+ DCHECK_EQ(1, interpreter()->GetThreadCount());
+ WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
+
+ uint32_t global_count = thread->GetGlobalCount();
+ if (global_count > 0) {
+ Handle<JSObject> globals_obj =
+ isolate_->factory()->NewJSObjectWithNullProto();
+ Handle<String> globals_name =
+ isolate_->factory()->InternalizeString(StaticCharVector("globals"));
+ JSObject::SetOwnPropertyIgnoreAttributes(global_scope_object,
+ globals_name, globals_obj, NONE)
+ .Assert();
+
+ for (uint32_t i = 0; i < global_count; ++i) {
+ const char* label = "global#%d";
+ Handle<String> name = PrintFToOneByteString<true>(isolate_, label, i);
+ WasmValue value = thread->GetGlobalValue(i);
+ Handle<Object> value_obj = WasmValueToValueObject(isolate_, value);
+ JSObject::SetOwnPropertyIgnoreAttributes(globals_obj, name, value_obj,
+ NONE)
+ .Assert();
+ }
+ }
+
return global_scope_object;
}
@@ -392,8 +417,7 @@ class InterpreterHandle {
Handle<JSObject> locals_obj =
isolate_->factory()->NewJSObjectWithNullProto();
Handle<String> locals_name =
- isolate_->factory()->InternalizeOneByteString(
- StaticCharVector("locals"));
+ isolate_->factory()->InternalizeString(StaticCharVector("locals"));
JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, locals_name,
locals_obj, NONE)
.Assert();
@@ -421,8 +445,8 @@ class InterpreterHandle {
// which does not make too much sense here.
Handle<JSObject> stack_obj =
isolate_->factory()->NewJSObjectWithNullProto();
- Handle<String> stack_name = isolate_->factory()->InternalizeOneByteString(
- StaticCharVector("stack"));
+ Handle<String> stack_name =
+ isolate_->factory()->InternalizeString(StaticCharVector("stack"));
JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, stack_name,
stack_obj, NONE)
.Assert();
@@ -462,30 +486,15 @@ wasm::InterpreterHandle* GetOrCreateInterpreterHandle(
}
wasm::InterpreterHandle* GetInterpreterHandle(WasmDebugInfo debug_info) {
- Object handle_obj = debug_info->interpreter_handle();
- DCHECK(!handle_obj->IsUndefined());
- return Managed<wasm::InterpreterHandle>::cast(handle_obj)->raw();
+ Object handle_obj = debug_info.interpreter_handle();
+ DCHECK(!handle_obj.IsUndefined());
+ return Managed<wasm::InterpreterHandle>::cast(handle_obj).raw();
}
wasm::InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo debug_info) {
- Object handle_obj = debug_info->interpreter_handle();
- if (handle_obj->IsUndefined()) return nullptr;
- return Managed<wasm::InterpreterHandle>::cast(handle_obj)->raw();
-}
-
-Handle<FixedArray> GetOrCreateInterpretedFunctions(
- Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
- Handle<FixedArray> arr(debug_info->interpreted_functions(), isolate);
- int num_functions = debug_info->wasm_instance()
- ->module_object()
- ->native_module()
- ->num_functions();
- if (arr->length() == 0 && num_functions > 0) {
- arr = isolate->factory()->NewFixedArray(num_functions);
- debug_info->set_interpreted_functions(*arr);
- }
- DCHECK_EQ(num_functions, arr->length());
- return arr;
+ Object handle_obj = debug_info.interpreter_handle();
+ if (handle_obj.IsUndefined()) return nullptr;
+ return Managed<wasm::InterpreterHandle>::cast(handle_obj).raw();
}
} // namespace
@@ -496,7 +505,6 @@ Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(
factory->NewStruct(WASM_DEBUG_INFO_TYPE, AllocationType::kOld));
debug_info->set_wasm_instance(*instance);
- debug_info->set_interpreted_functions(*factory->empty_fixed_array());
instance->set_debug_info(*debug_info);
return debug_info;
}
@@ -530,11 +538,8 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
Isolate* isolate = debug_info->GetIsolate();
// Ensure that the interpreter is instantiated.
GetOrCreateInterpreterHandle(isolate, debug_info);
- Handle<FixedArray> interpreted_functions =
- GetOrCreateInterpretedFunctions(isolate, debug_info);
Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
- wasm::NativeModule* native_module =
- instance->module_object()->native_module();
+ wasm::NativeModule* native_module = instance->module_object().native_module();
const wasm::WasmModule* module = instance->module();
// We may modify the wasm jump table.
@@ -544,7 +549,9 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
for (int func_index : func_indexes) {
DCHECK_LE(0, func_index);
DCHECK_GT(module->functions.size(), func_index);
- if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) continue;
+ // Note that this is just a best effort check. Multiple threads can still
+ // race at redirecting the same function to the interpreter, which is OK.
+ if (native_module->IsRedirectedToInterpreter(func_index)) continue;
wasm::WasmCodeRefScope code_ref_scope;
wasm::WasmCompilationResult result = compiler::CompileWasmInterpreterEntry(
@@ -555,12 +562,8 @@ void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
result.tagged_parameter_slots, std::move(result.protected_instructions),
std::move(result.source_positions), wasm::WasmCode::kInterpreterEntry,
wasm::ExecutionTier::kInterpreter);
- Address instruction_start = wasm_code->instruction_start();
native_module->PublishCode(std::move(wasm_code));
-
- Handle<Foreign> foreign_holder =
- isolate->factory()->NewForeign(instruction_start, AllocationType::kOld);
- interpreted_functions->set(func_index, *foreign_holder);
+ DCHECK(native_module->IsRedirectedToInterpreter(func_index));
}
}
@@ -627,7 +630,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
debug_info->set_c_wasm_entry_map(*managed_map);
}
Handle<FixedArray> entries(debug_info->c_wasm_entries(), isolate);
- wasm::SignatureMap* map = debug_info->c_wasm_entry_map()->raw();
+ wasm::SignatureMap* map = debug_info->c_wasm_entry_map().raw();
int32_t index = map->Find(*sig);
if (index == -1) {
index = static_cast<int32_t>(map->FindOrInsert(*sig));
@@ -636,7 +639,7 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
entries, entries->length(), AllocationType::kOld);
debug_info->set_c_wasm_entries(*entries);
}
- DCHECK(entries->get(index)->IsUndefined(isolate));
+ DCHECK(entries->get(index).IsUndefined(isolate));
Handle<Code> new_entry_code =
compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
Handle<WasmExportedFunctionData> function_data =
@@ -646,13 +649,13 @@ Handle<JSFunction> WasmDebugInfo::GetCWasmEntry(
function_data->set_instance(debug_info->wasm_instance());
function_data->set_jump_table_offset(-1);
function_data->set_function_index(-1);
- Handle<String> name = isolate->factory()->InternalizeOneByteString(
- StaticCharVector("c-wasm-entry"));
+ Handle<String> name =
+ isolate->factory()->InternalizeString(StaticCharVector("c-wasm-entry"));
NewFunctionArgs args = NewFunctionArgs::ForWasm(
name, function_data, isolate->sloppy_function_map());
Handle<JSFunction> new_entry = isolate->factory()->NewFunction(args);
- new_entry->set_context(debug_info->wasm_instance()->native_context());
- new_entry->shared()->set_internal_formal_parameter_count(
+ new_entry->set_context(debug_info->wasm_instance().native_context());
+ new_entry->shared().set_internal_formal_parameter_count(
compiler::CWasmEntryParameters::kNumParameters);
entries->set(index, *new_entry);
}
diff --git a/deps/v8/src/wasm/wasm-engine.cc b/deps/v8/src/wasm/wasm-engine.cc
index 48bd96f254..83053fd71f 100644
--- a/deps/v8/src/wasm/wasm-engine.cc
+++ b/deps/v8/src/wasm/wasm-engine.cc
@@ -4,13 +4,15 @@
#include "src/wasm/wasm-engine.h"
-#include "src/code-tracer.h"
-#include "src/compilation-statistics.h"
-#include "src/counters.h"
-#include "src/objects-inl.h"
+#include "src/base/platform/time.h"
+#include "src/diagnostics/code-tracer.h"
+#include "src/diagnostics/compilation-statistics.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-promise.h"
-#include "src/ostreams.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
@@ -22,6 +24,11 @@ namespace v8 {
namespace internal {
namespace wasm {
+#define TRACE_CODE_GC(...) \
+ do { \
+ if (FLAG_trace_wasm_code_gc) PrintF("[wasm-gc] " __VA_ARGS__); \
+ } while (false)
+
namespace {
// A task to log a set of {WasmCode} objects in an isolate. It does not own any
// data itself, since it is owned by the platform, so lifetime is not really
@@ -87,12 +94,29 @@ class WasmGCForegroundTask : public Task {
DCHECK_NOT_NULL(isolate);
}
+ ~WasmGCForegroundTask() {
+ // If the isolate is already shutting down, the platform can delete this
+ // task without ever executing it. For that case, we need to deregister the
+ // task from the engine to avoid UAF.
+ if (isolate_) {
+ WasmEngine* engine = isolate_->wasm_engine();
+ engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
+ }
+ }
+
void Run() final {
if (isolate_ == nullptr) return; // cancelled.
WasmEngine* engine = isolate_->wasm_engine();
// If the foreground task is executing, there is no wasm code active. Just
// report an empty set of live wasm code.
+#ifdef ENABLE_SLOW_DCHECKS
+ for (StackFrameIterator it(isolate_); !it.done(); it.Advance()) {
+ DCHECK_NE(StackFrame::WASM_COMPILED, it.frame()->type());
+ }
+#endif
engine->ReportLiveCodeForGC(isolate_, Vector<WasmCode*>{});
+ // Cancel to signal to the destructor that this task executed.
+ Cancel();
}
void Cancel() { isolate_ = nullptr; }
@@ -104,6 +128,11 @@ class WasmGCForegroundTask : public Task {
} // namespace
struct WasmEngine::CurrentGCInfo {
+ explicit CurrentGCInfo(int8_t gc_sequence_index)
+ : gc_sequence_index(gc_sequence_index) {
+ DCHECK_NE(0, gc_sequence_index);
+ }
+
// Set of isolates that did not scan their stack yet for used WasmCode, and
// their scheduled foreground task.
std::unordered_map<Isolate*, WasmGCForegroundTask*> outstanding_isolates;
@@ -111,11 +140,28 @@ struct WasmEngine::CurrentGCInfo {
// Set of dead code. Filled with all potentially dead code on initialization.
// Code that is still in-use is removed by the individual isolates.
std::unordered_set<WasmCode*> dead_code;
+
+ // The number of GCs triggered in the native module that triggered this GC.
+ // This is stored in the histogram for each participating isolate during
+ // execution of that isolate's foreground task.
+ const int8_t gc_sequence_index;
+
+ // If during this GC, another GC was requested, we skipped that other GC (we
+ // only run one GC at a time). Remember though to trigger another one once
+ // this one finishes. {next_gc_sequence_index} is 0 if no next GC is needed,
+ // and >0 otherwise. It stores the {num_code_gcs_triggered} of the native
+ // module which triggered the next GC.
+ int8_t next_gc_sequence_index = 0;
+
+ // The start time of this GC; used for tracing and sampled via {Counters}.
+ // Can be null ({TimeTicks::IsNull()}) if timer is not high resolution.
+ base::TimeTicks start_time;
};
struct WasmEngine::IsolateInfo {
explicit IsolateInfo(Isolate* isolate)
- : log_codes(WasmCode::ShouldBeLogged(isolate)) {
+ : log_codes(WasmCode::ShouldBeLogged(isolate)),
+ async_counters(isolate->async_counters()) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner = platform->GetForegroundTaskRunner(v8_isolate);
@@ -144,16 +190,27 @@ struct WasmEngine::IsolateInfo {
// The foreground task runner of the isolate (can be called from background).
std::shared_ptr<v8::TaskRunner> foreground_task_runner;
+
+ const std::shared_ptr<Counters> async_counters;
};
struct WasmEngine::NativeModuleInfo {
// Set of isolates using this NativeModule.
std::unordered_set<Isolate*> isolates;
- // Set of potentially dead code. The ref-count of these code objects was
- // incremented for each Isolate that might still execute the code, and is
- // decremented on {RemoveIsolate} or on a GC.
+ // Set of potentially dead code. This set holds one ref for each code object,
+ // until code is detected to be really dead. At that point, the ref count is
+ // decremented and code is move to the {dead_code} set. If the code is finally
+ // deleted, it is also removed from {dead_code}.
std::unordered_set<WasmCode*> potentially_dead_code;
+
+ // Code that is not being executed in any isolate any more, but the ref count
+ // did not drop to zero yet.
+ std::unordered_set<WasmCode*> dead_code;
+
+ // Number of code GCs triggered because code in this native module became
+ // potentially dead.
+ int8_t num_code_gcs_triggered = 0;
};
WasmEngine::WasmEngine()
@@ -206,7 +263,7 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
// object.
Handle<ByteArray> asm_js_offset_table =
isolate->factory()->NewByteArray(asm_js_offset_table_bytes.length());
- asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
+ asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.begin(),
asm_js_offset_table_bytes.length());
return AsmWasmData::New(isolate, std::move(native_module), export_wrappers,
@@ -217,7 +274,7 @@ Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
Isolate* isolate, Handle<AsmWasmData> asm_wasm_data,
Handle<Script> script) {
std::shared_ptr<NativeModule> native_module =
- asm_wasm_data->managed_native_module()->get();
+ asm_wasm_data->managed_native_module().get();
Handle<FixedArray> export_wrappers =
handle(asm_wasm_data->export_wrappers(), isolate);
size_t code_size_estimate =
@@ -319,10 +376,11 @@ void WasmEngine::AsyncInstantiate(
void WasmEngine::AsyncCompile(
Isolate* isolate, const WasmFeatures& enabled,
std::shared_ptr<CompilationResultResolver> resolver,
- const ModuleWireBytes& bytes, bool is_shared) {
+ const ModuleWireBytes& bytes, bool is_shared,
+ const char* api_method_name_for_errors) {
if (!FLAG_wasm_async_compilation) {
// Asynchronous compilation disabled; fall back on synchronous compilation.
- ErrorThrower thrower(isolate, "WasmCompile");
+ ErrorThrower thrower(isolate, api_method_name_for_errors);
MaybeHandle<WasmModuleObject> module_object;
if (is_shared) {
// Make a copy of the wire bytes to avoid concurrent modification.
@@ -345,9 +403,9 @@ void WasmEngine::AsyncCompile(
if (FLAG_wasm_test_streaming) {
std::shared_ptr<StreamingDecoder> streaming_decoder =
- StartStreamingCompilation(isolate, enabled,
- handle(isolate->context(), isolate),
- std::move(resolver));
+ StartStreamingCompilation(
+ isolate, enabled, handle(isolate->context(), isolate),
+ api_method_name_for_errors, std::move(resolver));
streaming_decoder->OnBytesReceived(bytes.module_bytes());
streaming_decoder->Finish();
return;
@@ -357,18 +415,20 @@ void WasmEngine::AsyncCompile(
std::unique_ptr<byte[]> copy(new byte[bytes.length()]);
memcpy(copy.get(), bytes.start(), bytes.length());
- AsyncCompileJob* job = CreateAsyncCompileJob(
- isolate, enabled, std::move(copy), bytes.length(),
- handle(isolate->context(), isolate), std::move(resolver));
+ AsyncCompileJob* job =
+ CreateAsyncCompileJob(isolate, enabled, std::move(copy), bytes.length(),
+ handle(isolate->context(), isolate),
+ api_method_name_for_errors, std::move(resolver));
job->Start();
}
std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
+ const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver) {
AsyncCompileJob* job =
CreateAsyncCompileJob(isolate, enabled, std::unique_ptr<byte[]>(nullptr),
- 0, context, std::move(resolver));
+ 0, context, api_method_name, std::move(resolver));
return job->CreateStreamingDecoder();
}
@@ -434,10 +494,11 @@ CodeTracer* WasmEngine::GetCodeTracer() {
AsyncCompileJob* WasmEngine::CreateAsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length, Handle<Context> context,
+ const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver) {
AsyncCompileJob* job =
new AsyncCompileJob(isolate, enabled, std::move(bytes_copy), length,
- context, std::move(resolver));
+ context, api_method_name, std::move(resolver));
// Pass ownership to the unique_ptr in {async_compile_jobs_}.
base::MutexGuard guard(&mutex_);
async_compile_jobs_[job] = std::unique_ptr<AsyncCompileJob>(job);
@@ -482,6 +543,15 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
}
}
+namespace {
+int GetGCTimeMicros(base::TimeTicks start) {
+ DCHECK(!start.IsNull());
+ int64_t duration_us = (base::TimeTicks::Now() - start).InMicroseconds();
+ return static_cast<int>(
+ std::min(std::max(int64_t{0}, duration_us), int64_t{kMaxInt}));
+}
+} // namespace
+
void WasmEngine::AddIsolate(Isolate* isolate) {
base::MutexGuard guard(&mutex_);
DCHECK_EQ(0, isolates_.count(isolate));
@@ -501,6 +571,13 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
for (auto* native_module : engine->isolates_[isolate]->native_modules) {
native_module->SampleCodeSize(counters, NativeModule::kSampling);
}
+ // If there is an ongoing code GC, sample its time here. This will record
+ // samples for very long-running or never ending GCs.
+ if (engine->current_gc_info_ &&
+ !engine->current_gc_info_->start_time.IsNull()) {
+ isolate->counters()->wasm_code_gc_time()->AddSample(
+ GetGCTimeMicros(engine->current_gc_info_->start_time));
+ }
};
isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
nullptr);
@@ -518,16 +595,14 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
auto* info = native_modules_[native_module].get();
info->isolates.erase(isolate);
if (current_gc_info_) {
- auto it = current_gc_info_->outstanding_isolates.find(isolate);
- if (it != current_gc_info_->outstanding_isolates.end()) {
- if (auto* gc_task = it->second) gc_task->Cancel();
- current_gc_info_->outstanding_isolates.erase(it);
- }
for (WasmCode* code : info->potentially_dead_code) {
current_gc_info_->dead_code.erase(code);
}
}
}
+ if (current_gc_info_) {
+ if (RemoveIsolateFromCurrentGC(isolate)) PotentiallyFinishCurrentGC();
+ }
if (auto* task = info->log_codes_task) task->Cancel();
if (!info->code_to_log.empty()) {
WasmCode::DecrementRefCount(VectorOf(info->code_to_log));
@@ -548,6 +623,8 @@ void WasmEngine::LogCode(WasmCode* code) {
&mutex_, &info->log_codes_task, isolate, this);
info->log_codes_task = new_task.get();
info->foreground_task_runner->PostTask(std::move(new_task));
+ }
+ if (info->code_to_log.empty()) {
isolate->stack_guard()->RequestLogWasmCode();
}
info->code_to_log.push_back(code);
@@ -597,34 +674,46 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
}
void WasmEngine::FreeNativeModule(NativeModule* native_module) {
- {
- base::MutexGuard guard(&mutex_);
- auto it = native_modules_.find(native_module);
- DCHECK_NE(native_modules_.end(), it);
- for (Isolate* isolate : it->second->isolates) {
- DCHECK_EQ(1, isolates_.count(isolate));
- IsolateInfo* info = isolates_[isolate].get();
- DCHECK_EQ(1, info->native_modules.count(native_module));
- info->native_modules.erase(native_module);
- // If there are {WasmCode} objects of the deleted {NativeModule}
- // outstanding to be logged in this isolate, remove them. Decrementing the
- // ref count is not needed, since the {NativeModule} dies anyway.
- size_t remaining = info->code_to_log.size();
- if (remaining > 0) {
- for (size_t i = 0; i < remaining; ++i) {
- while (i < remaining &&
- info->code_to_log[i]->native_module() == native_module) {
- // Move the last remaining item to this slot (this can be the same
- // as {i}, which is OK).
- info->code_to_log[i] = info->code_to_log[--remaining];
- }
+ base::MutexGuard guard(&mutex_);
+ auto it = native_modules_.find(native_module);
+ DCHECK_NE(native_modules_.end(), it);
+ for (Isolate* isolate : it->second->isolates) {
+ DCHECK_EQ(1, isolates_.count(isolate));
+ IsolateInfo* info = isolates_[isolate].get();
+ DCHECK_EQ(1, info->native_modules.count(native_module));
+ info->native_modules.erase(native_module);
+ // If there are {WasmCode} objects of the deleted {NativeModule}
+ // outstanding to be logged in this isolate, remove them. Decrementing the
+ // ref count is not needed, since the {NativeModule} dies anyway.
+ size_t remaining = info->code_to_log.size();
+ if (remaining > 0) {
+ for (size_t i = 0; i < remaining; ++i) {
+ while (i < remaining &&
+ info->code_to_log[i]->native_module() == native_module) {
+ // Move the last remaining item to this slot (this can be the same
+ // as {i}, which is OK).
+ info->code_to_log[i] = info->code_to_log[--remaining];
}
- info->code_to_log.resize(remaining);
+ }
+ info->code_to_log.resize(remaining);
+ }
+ }
+ // If there is a GC running which has references to code contained in the
+ // deleted {NativeModule}, remove those references.
+ if (current_gc_info_) {
+ for (auto it = current_gc_info_->dead_code.begin(),
+ end = current_gc_info_->dead_code.end();
+ it != end;) {
+ if ((*it)->native_module() == native_module) {
+ it = current_gc_info_->dead_code.erase(it);
+ } else {
+ ++it;
}
}
- native_modules_.erase(it);
+ TRACE_CODE_GC("Native module %p died, reducing dead code objects to %zu.\n",
+ native_module, current_gc_info_->dead_code.size());
}
- code_manager_.FreeNativeModule(native_module);
+ native_modules_.erase(it);
}
namespace {
@@ -663,53 +752,104 @@ void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
Vector<WasmCode*> live_code) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ReportLiveCodeForGC");
+ TRACE_CODE_GC("Isolate %d reporting %zu live code objects.\n", isolate->id(),
+ live_code.size());
base::MutexGuard guard(&mutex_);
- DCHECK_NOT_NULL(current_gc_info_);
- auto outstanding_isolate_it =
- current_gc_info_->outstanding_isolates.find(isolate);
- DCHECK_NE(current_gc_info_->outstanding_isolates.end(),
- outstanding_isolate_it);
- auto* fg_task = outstanding_isolate_it->second;
- if (fg_task) fg_task->Cancel();
- current_gc_info_->outstanding_isolates.erase(outstanding_isolate_it);
+ // This report might come in late (note that we trigger both a stack guard and
+ // a foreground task). In that case, ignore it.
+ if (current_gc_info_ == nullptr) return;
+ if (!RemoveIsolateFromCurrentGC(isolate)) return;
+ isolate->counters()->wasm_module_num_triggered_code_gcs()->AddSample(
+ current_gc_info_->gc_sequence_index);
for (WasmCode* code : live_code) current_gc_info_->dead_code.erase(code);
+ PotentiallyFinishCurrentGC();
+}
- if (current_gc_info_->outstanding_isolates.empty()) {
- std::unordered_map<NativeModule*, std::vector<WasmCode*>>
- dead_code_per_native_module;
- for (WasmCode* code : current_gc_info_->dead_code) {
- dead_code_per_native_module[code->native_module()].push_back(code);
- }
- for (auto& entry : dead_code_per_native_module) {
- entry.first->FreeCode(VectorOf(entry.second));
- }
- current_gc_info_.reset();
+void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
+ wasm::WasmCodeRefScope code_ref_scope;
+ std::unordered_set<wasm::WasmCode*> live_wasm_code;
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
+ StackFrame* const frame = it.frame();
+ if (frame->type() != StackFrame::WASM_COMPILED) continue;
+ live_wasm_code.insert(WasmCompiledFrame::cast(frame)->wasm_code());
}
+
+ ReportLiveCodeForGC(isolate,
+ OwnedVector<WasmCode*>::Of(live_wasm_code).as_vector());
}
bool WasmEngine::AddPotentiallyDeadCode(WasmCode* code) {
base::MutexGuard guard(&mutex_);
auto it = native_modules_.find(code->native_module());
DCHECK_NE(native_modules_.end(), it);
- auto added = it->second->potentially_dead_code.insert(code);
+ NativeModuleInfo* info = it->second.get();
+ if (info->dead_code.count(code)) return false; // Code is already dead.
+ auto added = info->potentially_dead_code.insert(code);
if (!added.second) return false; // An entry already existed.
new_potentially_dead_code_size_ += code->instructions().size();
- // Trigger a GC if 1MiB plus 10% of committed code are potentially dead.
- size_t dead_code_limit = 1 * MB + code_manager_.committed_code_space() / 10;
- if (FLAG_wasm_code_gc && new_potentially_dead_code_size_ > dead_code_limit &&
- !current_gc_info_) {
- TriggerGC();
+ if (FLAG_wasm_code_gc) {
+ // Trigger a GC if 64kB plus 10% of committed code are potentially dead.
+ size_t dead_code_limit =
+ FLAG_stress_wasm_code_gc
+ ? 0
+ : 64 * KB + code_manager_.committed_code_space() / 10;
+ if (new_potentially_dead_code_size_ > dead_code_limit) {
+ bool inc_gc_count =
+ info->num_code_gcs_triggered < std::numeric_limits<int8_t>::max();
+ if (current_gc_info_ == nullptr) {
+ if (inc_gc_count) ++info->num_code_gcs_triggered;
+ TRACE_CODE_GC(
+ "Triggering GC (potentially dead: %zu bytes; limit: %zu bytes).\n",
+ new_potentially_dead_code_size_, dead_code_limit);
+ TriggerGC(info->num_code_gcs_triggered);
+ } else if (current_gc_info_->next_gc_sequence_index == 0) {
+ if (inc_gc_count) ++info->num_code_gcs_triggered;
+ TRACE_CODE_GC(
+ "Scheduling another GC after the current one (potentially dead: "
+ "%zu bytes; limit: %zu bytes).\n",
+ new_potentially_dead_code_size_, dead_code_limit);
+ current_gc_info_->next_gc_sequence_index = info->num_code_gcs_triggered;
+ DCHECK_NE(0, current_gc_info_->next_gc_sequence_index);
+ }
+ }
}
return true;
}
-void WasmEngine::TriggerGC() {
+void WasmEngine::FreeDeadCode(const DeadCodeMap& dead_code) {
+ base::MutexGuard guard(&mutex_);
+ FreeDeadCodeLocked(dead_code);
+}
+
+void WasmEngine::FreeDeadCodeLocked(const DeadCodeMap& dead_code) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FreeDeadCode");
+ DCHECK(!mutex_.TryLock());
+ for (auto& dead_code_entry : dead_code) {
+ NativeModule* native_module = dead_code_entry.first;
+ const std::vector<WasmCode*>& code_vec = dead_code_entry.second;
+ DCHECK_EQ(1, native_modules_.count(native_module));
+ auto* info = native_modules_[native_module].get();
+ TRACE_CODE_GC("Freeing %zu code object%s of module %p.\n", code_vec.size(),
+ code_vec.size() == 1 ? "" : "s", native_module);
+ for (WasmCode* code : code_vec) {
+ DCHECK_EQ(1, info->dead_code.count(code));
+ info->dead_code.erase(code);
+ }
+ native_module->FreeCode(VectorOf(code_vec));
+ }
+}
+
+void WasmEngine::TriggerGC(int8_t gc_sequence_index) {
DCHECK_NULL(current_gc_info_);
DCHECK(FLAG_wasm_code_gc);
- current_gc_info_.reset(new CurrentGCInfo());
+ new_potentially_dead_code_size_ = 0;
+ current_gc_info_.reset(new CurrentGCInfo(gc_sequence_index));
+ if (base::TimeTicks::IsHighResolution()) {
+ current_gc_info_->start_time = base::TimeTicks::Now();
+ }
// Add all potentially dead code to this GC, and trigger a GC task in each
// isolate.
- // TODO(clemensh): Also trigger a stack check interrupt.
for (auto& entry : native_modules_) {
NativeModuleInfo* info = entry.second.get();
if (info->potentially_dead_code.empty()) continue;
@@ -722,11 +862,72 @@ void WasmEngine::TriggerGC() {
isolates_[isolate]->foreground_task_runner->PostTask(
std::move(new_task));
}
+ isolate->stack_guard()->RequestWasmCodeGC();
}
for (WasmCode* code : info->potentially_dead_code) {
current_gc_info_->dead_code.insert(code);
}
}
+ TRACE_CODE_GC(
+ "Starting GC. Total number of potentially dead code objects: %zu\n",
+ current_gc_info_->dead_code.size());
+}
+
+bool WasmEngine::RemoveIsolateFromCurrentGC(Isolate* isolate) {
+ DCHECK(!mutex_.TryLock());
+ DCHECK_NOT_NULL(current_gc_info_);
+ auto it = current_gc_info_->outstanding_isolates.find(isolate);
+ if (it == current_gc_info_->outstanding_isolates.end()) return false;
+ if (auto* fg_task = it->second) fg_task->Cancel();
+ current_gc_info_->outstanding_isolates.erase(it);
+ return true;
+}
+
+void WasmEngine::PotentiallyFinishCurrentGC() {
+ DCHECK(!mutex_.TryLock());
+ TRACE_CODE_GC(
+ "Remaining dead code objects: %zu; outstanding isolates: %zu.\n",
+ current_gc_info_->dead_code.size(),
+ current_gc_info_->outstanding_isolates.size());
+
+ // If there are more outstanding isolates, return immediately.
+ if (!current_gc_info_->outstanding_isolates.empty()) return;
+
+ // All remaining code in {current_gc_info->dead_code} is really dead.
+ // Move it from the set of potentially dead code to the set of dead code,
+ // and decrement its ref count.
+ size_t num_freed = 0;
+ DeadCodeMap dead_code;
+ for (WasmCode* code : current_gc_info_->dead_code) {
+ DCHECK_EQ(1, native_modules_.count(code->native_module()));
+ auto* native_module_info = native_modules_[code->native_module()].get();
+ DCHECK_EQ(1, native_module_info->potentially_dead_code.count(code));
+ native_module_info->potentially_dead_code.erase(code);
+ DCHECK_EQ(0, native_module_info->dead_code.count(code));
+ native_module_info->dead_code.insert(code);
+ if (code->DecRefOnDeadCode()) {
+ dead_code[code->native_module()].push_back(code);
+ ++num_freed;
+ }
+ }
+
+ FreeDeadCodeLocked(dead_code);
+
+ int duration_us = 0;
+ if (!current_gc_info_->start_time.IsNull()) {
+ duration_us = GetGCTimeMicros(current_gc_info_->start_time);
+ for (auto& entry : isolates_) {
+ entry.second->async_counters->wasm_code_gc_time()->AddSample(duration_us);
+ }
+ }
+
+ TRACE_CODE_GC("Took %d us; found %zu dead code objects, freed %zu.\n",
+ duration_us, current_gc_info_->dead_code.size(), num_freed);
+ USE(num_freed);
+
+ int8_t next_gc_sequence_index = current_gc_info_->next_gc_sequence_index;
+ current_gc_info_.reset();
+ if (next_gc_sequence_index != 0) TriggerGC(next_gc_sequence_index);
}
namespace {
@@ -766,6 +967,8 @@ uint32_t max_table_init_entries() {
FLAG_wasm_max_table_size);
}
+#undef TRACE_CODE_GC
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index c990005090..2ae3e81368 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -8,7 +8,7 @@
#include <memory>
#include <unordered_set>
-#include "src/cancelable-task.h"
+#include "src/tasks/cancelable-task.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-tier.h"
@@ -88,7 +88,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
// be shared across threads, i.e. could be concurrently modified.
void AsyncCompile(Isolate* isolate, const WasmFeatures& enabled,
std::shared_ptr<CompilationResultResolver> resolver,
- const ModuleWireBytes& bytes, bool is_shared);
+ const ModuleWireBytes& bytes, bool is_shared,
+ const char* api_method_name_for_errors);
// Begin an asynchronous instantiation of the given WASM module.
void AsyncInstantiate(Isolate* isolate,
@@ -98,6 +99,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
std::shared_ptr<StreamingDecoder> StartStreamingCompilation(
Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
+ const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver);
// Compiles the function with the given index at a specific compilation tier.
@@ -185,8 +187,11 @@ class V8_EXPORT_PRIVATE WasmEngine {
// This will spawn foreground tasks that do *not* keep the NativeModule alive.
void SampleTopTierCodeSizeInAllIsolates(const std::shared_ptr<NativeModule>&);
- // Called by each Isolate to report its live code for a GC cycle.
- void ReportLiveCodeForGC(Isolate*, Vector<WasmCode*> live_code);
+ // Called by each Isolate to report its live code for a GC cycle. First
+ // version reports an externally determined set of live code (might be empty),
+ // second version gets live code from the execution stack of that isolate.
+ void ReportLiveCodeForGC(Isolate*, Vector<WasmCode*>);
+ void ReportLiveCodeFromStackForGC(Isolate*);
// Add potentially dead code. The occurrence in the set of potentially dead
// code counts as a reference, and is decremented on the next GC.
@@ -195,6 +200,11 @@ class V8_EXPORT_PRIVATE WasmEngine {
// case.
V8_WARN_UNUSED_RESULT bool AddPotentiallyDeadCode(WasmCode*);
+ // Free dead code.
+ using DeadCodeMap = std::unordered_map<NativeModule*, std::vector<WasmCode*>>;
+ void FreeDeadCode(const DeadCodeMap&);
+ void FreeDeadCodeLocked(const DeadCodeMap&);
+
// Call on process start and exit.
static void InitializeOncePerProcess();
static void GlobalTearDown();
@@ -211,10 +221,19 @@ class V8_EXPORT_PRIVATE WasmEngine {
AsyncCompileJob* CreateAsyncCompileJob(
Isolate* isolate, const WasmFeatures& enabled,
std::unique_ptr<byte[]> bytes_copy, size_t length,
- Handle<Context> context,
+ Handle<Context> context, const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver);
- void TriggerGC();
+ void TriggerGC(int8_t gc_sequence_index);
+
+ // Remove an isolate from the outstanding isolates of the current GC. Returns
+ // true if the isolate was still outstanding, false otherwise. Hold {mutex_}
+ // when calling this method.
+ bool RemoveIsolateFromCurrentGC(Isolate*);
+
+ // Finish a GC if there are no more outstanding isolates. Hold {mutex_} when
+ // calling this method.
+ void PotentiallyFinishCurrentGC();
WasmMemoryTracker memory_tracker_;
WasmCodeManager code_manager_;
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 83f060cb9a..997cf83bb7 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -11,9 +11,26 @@
#include "src/base/bits.h"
#include "src/base/ieee754.h"
-#include "src/memcopy.h"
-#include "src/utils.h"
-#include "src/v8memory.h"
+#include "src/utils/memcopy.h"
+
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+ defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) || \
+ defined(UNDEFINED_SANITIZER)
+#define V8_WITH_SANITIZER
+#endif
+
+#if defined(V8_OS_WIN) && defined(V8_WITH_SANITIZER)
+// With ASAN on Windows we have to reset the thread-in-wasm flag. Exceptions
+// caused by ASAN let the thread-in-wasm flag get out of sync. Even marking
+// functions with DISABLE_ASAN is not sufficient when the compiler produces
+// calls to memset. Therefore we add test-specific code for ASAN on
+// Windows.
+#define RESET_THREAD_IN_WASM_FLAG_FOR_ASAN_ON_WINDOWS
+#include "src/trap-handler/trap-handler.h"
+#endif
+
+#include "src/common/v8memory.h"
+#include "src/utils/utils.h"
#include "src/wasm/wasm-external-refs.h"
namespace v8 {
@@ -249,7 +266,15 @@ void float64_pow_wrapper(Address data) {
WriteUnalignedValue<double>(data, base::ieee754::pow(x, y));
}
-void memory_copy_wrapper(Address dst, Address src, uint32_t size) {
+// Asan on Windows triggers exceptions in this function to allocate
+// shadow memory lazily. When this function is called from WebAssembly,
+// these exceptions would be handled by the trap handler before they get
+// handled by Asan, and thereby confuse the thread-in-wasm flag.
+// Therefore we disable ASAN for this function. Alternatively we could
+// reset the thread-in-wasm flag before calling this function. However,
+// as this is only a problem with Asan on Windows, we did not consider
+// it worth the overhead.
+DISABLE_ASAN void memory_copy_wrapper(Address dst, Address src, uint32_t size) {
// Use explicit forward and backward copy to match the required semantics for
// the memory.copy instruction. It is assumed that the caller of this
// function has already performed bounds checks, so {src + size} and
@@ -270,7 +295,17 @@ void memory_copy_wrapper(Address dst, Address src, uint32_t size) {
}
}
+// Asan on Windows triggers exceptions in this function that confuse the
+// WebAssembly trap handler, so Asan is disabled. See the comment on
+// memory_copy_wrapper above for more info.
void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size) {
+#if defined(RESET_THREAD_IN_WASM_FLAG_FOR_ASAN_ON_WINDOWS)
+ bool thread_was_in_wasm = trap_handler::IsThreadInWasm();
+ if (thread_was_in_wasm) {
+ trap_handler::ClearThreadInWasm();
+ }
+#endif
+
// Use an explicit forward copy to match the required semantics for the
// memory.fill instruction. It is assumed that the caller of this function
// has already performed bounds checks, so {dst + size} should not overflow.
@@ -280,6 +315,11 @@ void memory_fill_wrapper(Address dst, uint32_t value, uint32_t size) {
for (; size > 0; size--) {
*dst8++ = value8;
}
+#if defined(RESET_THREAD_IN_WASM_FLAG_FOR_ASAN_ON_WINDOWS)
+ if (thread_was_in_wasm) {
+ trap_handler::SetThreadInWasm();
+ }
+#endif
}
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
@@ -297,3 +337,6 @@ void call_trap_callback_for_testing() {
} // namespace wasm
} // namespace internal
} // namespace v8
+
+#undef V8_WITH_SANITIZER
+#undef RESET_THREAD_IN_WASM_FLAG_FOR_ASAN_ON_WINDOWS
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index 1db608bf99..8318b8b0bb 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -7,7 +7,7 @@
#include <stdint.h>
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/wasm-features.cc b/deps/v8/src/wasm/wasm-features.cc
index 6271fd0506..fc0286655e 100644
--- a/deps/v8/src/wasm/wasm-features.cc
+++ b/deps/v8/src/wasm/wasm-features.cc
@@ -3,9 +3,9 @@
// found in the LICENSE file.
#include "src/wasm/wasm-features.h"
-#include "src/flags.h"
-#include "src/handles-inl.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/handles/handles-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
index caa9eb7904..b586d07ff4 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.cc
@@ -6,39 +6,34 @@
#include <vector>
-#include "src/counters.h"
+#include "src/logging/counters.h"
#include "src/wasm/wasm-code-manager.h"
namespace v8 {
namespace internal {
namespace wasm {
+WasmCode*& WasmImportWrapperCache::ModificationScope::operator[](
+ const CacheKey& key) {
+ return cache_->entry_map_[key];
+}
+
+WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind,
+ FunctionSig* sig) const {
+ auto it = entry_map_.find({kind, sig});
+ DCHECK(it != entry_map_.end());
+ return it->second;
+}
+
WasmImportWrapperCache::~WasmImportWrapperCache() {
std::vector<WasmCode*> ptrs;
ptrs.reserve(entry_map_.size());
- for (auto& e : entry_map_) ptrs.push_back(e.second);
- WasmCode::DecrementRefCount(VectorOf(ptrs));
-}
-
-WasmCode* WasmImportWrapperCache::GetOrCompile(
- WasmEngine* wasm_engine, Counters* counters,
- compiler::WasmImportCallKind kind, FunctionSig* sig) {
- base::MutexGuard lock(&mutex_);
- CacheKey key(static_cast<uint8_t>(kind), *sig);
- WasmCode*& cached = entry_map_[key];
- if (cached == nullptr) {
- // TODO(wasm): no need to hold the lock while compiling an import wrapper.
- bool source_positions = native_module_->module()->origin == kAsmJsOrigin;
- // Keep the {WasmCode} alive until we explicitly call {IncRef}.
- WasmCodeRefScope code_ref_scope;
- cached = compiler::CompileWasmImportCallWrapper(
- wasm_engine, native_module_, kind, sig, source_positions);
- cached->IncRef();
- counters->wasm_generated_code_size()->Increment(
- cached->instructions().length());
- counters->wasm_reloc_size()->Increment(cached->reloc_info().length());
+ for (auto& e : entry_map_) {
+ if (e.second) {
+ ptrs.push_back(e.second);
+ }
}
- return cached;
+ WasmCode::DecrementRefCount(VectorOf(ptrs));
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-import-wrapper-cache.h b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
index 91fe1c7b23..62f27cd9a4 100644
--- a/deps/v8/src/wasm/wasm-import-wrapper-cache.h
+++ b/deps/v8/src/wasm/wasm-import-wrapper-cache.h
@@ -23,23 +23,37 @@ using FunctionSig = Signature<ValueType>;
// Implements a cache for import wrappers.
class WasmImportWrapperCache {
public:
- ~WasmImportWrapperCache();
+ using CacheKey = std::pair<compiler::WasmImportCallKind, FunctionSig*>;
- V8_EXPORT_PRIVATE WasmCode* GetOrCompile(WasmEngine* wasm_engine,
- Counters* counters,
- compiler::WasmImportCallKind kind,
- FunctionSig* sig);
+ class CacheKeyHash {
+ public:
+ size_t operator()(const CacheKey& key) const {
+ return base::hash_combine(static_cast<uint8_t>(key.first), *key.second);
+ }
+ };
- private:
- friend class NativeModule;
- using CacheKey = std::pair<uint8_t, FunctionSig>;
+ // Helper class to modify the cache under a lock.
+ class ModificationScope {
+ public:
+ explicit ModificationScope(WasmImportWrapperCache* cache)
+ : cache_(cache), guard_(&cache->mutex_) {}
+
+ V8_EXPORT_PRIVATE WasmCode*& operator[](const CacheKey& key);
+
+ private:
+ WasmImportWrapperCache* const cache_;
+ base::MutexGuard guard_;
+ };
- mutable base::Mutex mutex_;
- NativeModule* native_module_;
- std::unordered_map<CacheKey, WasmCode*, base::hash<CacheKey>> entry_map_;
+ // Assumes the key exists in the map.
+ V8_EXPORT_PRIVATE WasmCode* Get(compiler::WasmImportCallKind kind,
+ FunctionSig* sig) const;
- explicit WasmImportWrapperCache(NativeModule* native_module)
- : native_module_(native_module) {}
+ ~WasmImportWrapperCache();
+
+ private:
+ base::Mutex mutex_;
+ std::unordered_map<CacheKey, WasmCode*, CacheKeyHash> entry_map_;
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 9118719def..f06cead069 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -7,19 +7,20 @@
#include "src/wasm/wasm-interpreter.h"
-#include "src/assembler-inl.h"
#include "src/base/overflowing-math.h"
-#include "src/boxed-float.h"
+#include "src/codegen/assembler-inl.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/conversions.h"
-#include "src/identity-map.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
+#include "src/objects/objects-inl.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/utils.h"
+#include "src/utils/boxed-float.h"
+#include "src/utils/identity-map.h"
+#include "src/utils/utils.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/memory-tracing.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-limits.h"
@@ -1171,7 +1172,7 @@ class ThreadImpl {
void Reset() {
TRACE("----- RESET -----\n");
- sp_ = stack_.get();
+ ResetStack(0);
frames_.clear();
state_ = WasmInterpreter::STOPPED;
trap_reason_ = kTrapCount;
@@ -1240,7 +1241,7 @@ class ThreadImpl {
// first).
DCHECK_EQ(activations_.back().fp, frames_.size());
DCHECK_LE(activations_.back().sp, StackHeight());
- sp_ = stack_.get() + activations_.back().sp;
+ ResetStack(activations_.back().sp);
activations_.pop_back();
}
@@ -1261,6 +1262,37 @@ class ThreadImpl {
return WasmInterpreter::Thread::HANDLED;
}
+ uint32_t GetGlobalCount() {
+ return static_cast<uint32_t>(module()->globals.size());
+ }
+
+ WasmValue GetGlobalValue(uint32_t index) {
+ const WasmGlobal* global = &module()->globals[index];
+ switch (global->type) {
+#define CASE_TYPE(wasm, ctype) \
+ case kWasm##wasm: { \
+ byte* ptr = GetGlobalPtr(global); \
+ return WasmValue( \
+ ReadLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr))); \
+ break; \
+ }
+ WASM_CTYPES(CASE_TYPE)
+#undef CASE_TYPE
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef: {
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
+ Handle<FixedArray> global_buffer; // The buffer of the global.
+ uint32_t global_index = 0; // The index into the buffer.
+ GetGlobalBufferAndIndex(global, &global_buffer, &global_index);
+ Handle<Object> value(global_buffer->get(global_index), isolate_);
+ return WasmValue(handle_scope.CloseAndEscape(value));
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
private:
// Handle a thrown exception. Returns whether the exception was handled inside
// the current activation. Unwinds the interpreted stack accordingly.
@@ -1283,7 +1315,7 @@ class ThreadImpl {
}
TRACE(" => drop frame #%zu (#%u @%zu)\n", frames_.size() - 1,
code->function->func_index, frame.pc);
- sp_ = stack_.get() + frame.sp;
+ ResetStack(frame.sp);
frames_.pop_back();
}
TRACE("----- UNWIND -----\n");
@@ -1310,8 +1342,6 @@ class ThreadImpl {
// kept in a separate on-heap reference stack to make the GC trace them.
// TODO(mstarzinger): Optimize simple stack operations (like "get_local",
// "set_local", and "tee_local") so that they don't require a handle scope.
- // TODO(mstarzinger): Ensure unused slots on the reference stack are cleared
- // so that they don't keep alive old/stale references unnecessarily long.
// TODO(mstarzinger): Consider optimizing activations that use no reference
// values to avoid allocating the reference stack entirely.
class StackValue {
@@ -1321,7 +1351,7 @@ class ThreadImpl {
if (IsReferenceValue()) {
value_ = WasmValue(Handle<Object>::null());
int ref_index = static_cast<int>(index);
- thread->reference_stack()->set(ref_index, *v.to_anyref());
+ thread->reference_stack().set(ref_index, *v.to_anyref());
}
}
@@ -1330,12 +1360,31 @@ class ThreadImpl {
DCHECK(value_.to_anyref().is_null());
int ref_index = static_cast<int>(index);
Isolate* isolate = thread->isolate_;
- Handle<Object> ref(thread->reference_stack()->get(ref_index), isolate);
+ Handle<Object> ref(thread->reference_stack().get(ref_index), isolate);
+ DCHECK(!ref->IsTheHole(isolate));
return WasmValue(ref);
}
bool IsReferenceValue() const { return value_.type() == kWasmAnyRef; }
+ void ClearValue(ThreadImpl* thread, sp_t index) {
+ if (!IsReferenceValue()) return;
+ int ref_index = static_cast<int>(index);
+ Isolate* isolate = thread->isolate_;
+ thread->reference_stack().set_the_hole(isolate, ref_index);
+ }
+
+ static void ClearValues(ThreadImpl* thread, sp_t index, int count) {
+ int ref_index = static_cast<int>(index);
+ thread->reference_stack().FillWithHoles(ref_index, ref_index + count);
+ }
+
+ static bool IsClearedValue(ThreadImpl* thread, sp_t index) {
+ int ref_index = static_cast<int>(index);
+ Isolate* isolate = thread->isolate_;
+ return thread->reference_stack().is_the_hole(isolate, ref_index);
+ }
+
private:
WasmValue value_;
};
@@ -1454,14 +1503,14 @@ class ThreadImpl {
int JumpToHandlerDelta(InterpreterCode* code, pc_t pc) {
ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
- DoStackTransfer(sp_ - (control_transfer_entry.sp_diff + kCatchInArity),
+ DoStackTransfer(control_transfer_entry.sp_diff + kCatchInArity,
control_transfer_entry.target_arity);
return control_transfer_entry.pc_diff;
}
int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
- DoStackTransfer(sp_ - control_transfer_entry.sp_diff,
+ DoStackTransfer(control_transfer_entry.sp_diff,
control_transfer_entry.target_arity);
return control_transfer_entry.pc_diff;
}
@@ -1485,12 +1534,12 @@ class ThreadImpl {
bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
size_t arity) {
DCHECK_GT(frames_.size(), 0);
- StackValue* sp_dest = stack_.get() + frames_.back().sp;
+ spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - frames_.back().sp);
frames_.pop_back();
if (frames_.size() == current_activation().fp) {
// A return from the last frame terminates the execution.
state_ = WasmInterpreter::FINISHED;
- DoStackTransfer(sp_dest, arity);
+ DoStackTransfer(sp_diff, arity);
TRACE(" => finish\n");
return false;
} else {
@@ -1502,7 +1551,7 @@ class ThreadImpl {
*limit = top->code->end - top->code->start;
TRACE(" => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
(*code)->function->func_index, *pc);
- DoStackTransfer(sp_dest, arity);
+ DoStackTransfer(sp_diff, arity);
return true;
}
}
@@ -1533,10 +1582,10 @@ class ThreadImpl {
Frame* top = &frames_.back();
// Drop everything except current parameters.
- StackValue* sp_dest = stack_.get() + top->sp;
+ spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - top->sp);
size_t arity = target->function->sig->parameter_count();
- DoStackTransfer(sp_dest, arity);
+ DoStackTransfer(sp_diff, arity);
*limit = target->end - target->start;
decoder->Reset(target->start, target->end);
@@ -1555,27 +1604,29 @@ class ThreadImpl {
return true;
}
- // Copies {arity} values on the top of the stack down the stack to {dest},
- // dropping the values in-between.
- void DoStackTransfer(StackValue* dest, size_t arity) {
+ // Copies {arity} values on the top of the stack down the stack while also
+ // dropping {sp_diff} many stack values in total from the stack.
+ void DoStackTransfer(spdiff_t sp_diff, size_t arity) {
// before: |---------------| pop_count | arity |
- // ^ 0 ^ dest ^ sp_
+ // ^ 0 ^ dest ^ src ^ StackHeight()
+ // ^----< sp_diff >----^
//
// after: |---------------| arity |
- // ^ 0 ^ sp_
- DCHECK_LE(dest, sp_);
- DCHECK_LE(dest + arity, sp_);
- if (arity && (dest != sp_ - arity)) {
- memmove(dest, sp_ - arity, arity * sizeof(*sp_));
+ // ^ 0 ^ StackHeight()
+ sp_t stack_height = StackHeight();
+ sp_t dest = stack_height - sp_diff;
+ sp_t src = stack_height - arity;
+ DCHECK_LE(dest, stack_height);
+ DCHECK_LE(dest, src);
+ if (arity && (dest != src)) {
+ StackValue* stack = stack_.get();
+ memmove(stack + dest, stack + src, arity * sizeof(StackValue));
// Also move elements on the reference stack accordingly.
- // TODO(mstarzinger): Refactor the interface so that we don't have to
- // recompute values here which are already known at the call-site.
- int dst = static_cast<int>(StackHeight() - (sp_ - dest));
- int src = static_cast<int>(StackHeight() - arity);
- int len = static_cast<int>(arity);
- isolate_->heap()->MoveElements(reference_stack(), dst, src, len);
+ reference_stack().MoveElements(
+ isolate_, static_cast<int>(dest), static_cast<int>(src),
+ static_cast<int>(arity), UPDATE_WRITE_BARRIER);
}
- sp_ = dest + arity;
+ ResetStack(dest + arity);
}
inline Address EffectiveAddress(uint32_t index) {
@@ -2054,7 +2105,7 @@ class ThreadImpl {
if (global->mutability && global->imported) {
*buffer =
handle(FixedArray::cast(
- instance_object_->imported_mutable_globals_buffers()->get(
+ instance_object_->imported_mutable_globals_buffers().get(
global->index)),
isolate_);
Address idx = instance_object_->imported_mutable_globals()[global->index];
@@ -2487,8 +2538,7 @@ class ThreadImpl {
uint32_t index) V8_WARN_UNUSED_RESULT {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
Handle<WasmExceptionTag> exception_tag(
- WasmExceptionTag::cast(
- instance_object_->exceptions_table()->get(index)),
+ WasmExceptionTag::cast(instance_object_->exceptions_table().get(index)),
isolate_);
uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
Handle<Object> exception_object =
@@ -2532,7 +2582,9 @@ class ThreadImpl {
EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[3]);
break;
}
- case kWasmAnyRef: {
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef: {
Handle<Object> anyref = value.to_anyref();
encoded_values->set(encoded_index++, *anyref);
break;
@@ -2561,7 +2613,7 @@ class ThreadImpl {
Handle<Object> caught_tag =
WasmExceptionPackage::GetExceptionTag(isolate_, exception_object);
Handle<Object> expected_tag =
- handle(instance_object_->exceptions_table()->get(index), isolate_);
+ handle(instance_object_->exceptions_table().get(index), isolate_);
DCHECK(expected_tag->IsWasmExceptionTag());
return expected_tag.is_identical_to(caught_tag);
}
@@ -2630,7 +2682,9 @@ class ThreadImpl {
value = WasmValue(Simd128(s128));
break;
}
- case kWasmAnyRef: {
+ case kWasmAnyRef:
+ case kWasmAnyFunc:
+ case kWasmExceptRef: {
Handle<Object> anyref(encoded_values->get(encoded_index++), isolate_);
value = WasmValue(anyref);
break;
@@ -2783,6 +2837,11 @@ class ThreadImpl {
}
break;
}
+ case kExprSelectWithType: {
+ SelectTypeImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
+ len = 1 + imm.length;
+ V8_FALLTHROUGH;
+ }
case kExprSelect: {
WasmValue cond = Pop();
WasmValue fval = Pop();
@@ -2866,6 +2925,18 @@ class ThreadImpl {
Push(WasmValue(isolate_->factory()->null_value()));
break;
}
+ case kExprRefFunc: {
+ FunctionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
+ code->at(pc));
+ HandleScope handle_scope(isolate_); // Avoid leaking handles.
+
+ Handle<WasmExportedFunction> function =
+ WasmInstanceObject::GetOrCreateWasmExportedFunction(
+ isolate_, instance_object_, imm.index);
+ Push(WasmValue(function));
+ len = 1 + imm.length;
+ break;
+ }
case kExprGetLocal: {
LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
HandleScope handle_scope(isolate_); // Avoid leaking handles.
@@ -3050,31 +3121,8 @@ class ThreadImpl {
case kExprGetGlobal: {
GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
code->at(pc));
- const WasmGlobal* global = &module()->globals[imm.index];
- switch (global->type) {
-#define CASE_TYPE(wasm, ctype) \
- case kWasm##wasm: { \
- byte* ptr = GetGlobalPtr(global); \
- Push(WasmValue( \
- ReadLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr)))); \
- break; \
- }
- WASM_CTYPES(CASE_TYPE)
-#undef CASE_TYPE
- case kWasmAnyRef:
- case kWasmAnyFunc:
- case kWasmExceptRef: {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Handle<FixedArray> global_buffer; // The buffer of the global.
- uint32_t global_index = 0; // The index into the buffer.
- GetGlobalBufferAndIndex(global, &global_buffer, &global_index);
- Handle<Object> value(global_buffer->get(global_index), isolate_);
- Push(WasmValue(value));
- break;
- }
- default:
- UNREACHABLE();
- }
+ HandleScope handle_scope(isolate_);
+ Push(GetGlobalValue(imm.index));
len = 1 + imm.length;
break;
}
@@ -3347,7 +3395,9 @@ class ThreadImpl {
StackValue stack_value = *--sp_;
// Note that {StackHeight} depends on the current {sp} value, hence this
// operation is split into two statements to ensure proper evaluation order.
- return stack_value.ExtractValue(this, StackHeight());
+ WasmValue val = stack_value.ExtractValue(this, StackHeight());
+ stack_value.ClearValue(this, StackHeight());
+ return val;
}
void Drop(int n = 1) {
@@ -3355,6 +3405,7 @@ class ThreadImpl {
DCHECK_GT(frames_.size(), 0);
// Check that we don't pop into locals.
DCHECK_GE(StackHeight() - n, frames_.back().llimit());
+ StackValue::ClearValues(this, StackHeight() - n, n);
sp_ -= n;
}
@@ -3367,6 +3418,7 @@ class ThreadImpl {
void Push(WasmValue val) {
DCHECK_NE(kWasmStmt, val.type());
DCHECK_LE(1, stack_limit_ - sp_);
+ DCHECK(StackValue::IsClearedValue(this, StackHeight()));
StackValue stack_value(val, this, StackHeight());
// Note that {StackHeight} depends on the current {sp} value, hence this
// operation is split into two statements to ensure proper evaluation order.
@@ -3381,6 +3433,13 @@ class ThreadImpl {
}
}
+ void ResetStack(sp_t new_height) {
+ DCHECK_LE(new_height, StackHeight()); // Only allowed to shrink.
+ int count = static_cast<int>(StackHeight() - new_height);
+ StackValue::ClearValues(this, new_height, count);
+ sp_ = stack_.get() + new_height;
+ }
+
void EnsureStackSpace(size_t size) {
if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
size_t old_size = stack_limit_ - stack_.get();
@@ -3400,6 +3459,8 @@ class ThreadImpl {
Handle<FixedArray> old_ref_stack(reference_stack(), isolate_);
Handle<FixedArray> new_ref_stack =
isolate_->factory()->CopyFixedArrayAndGrow(old_ref_stack, grow_by);
+ new_ref_stack->FillWithHoles(static_cast<int>(old_size),
+ static_cast<int>(new_size));
reference_stack_cell_->set_value(*new_ref_stack);
}
@@ -3479,7 +3540,7 @@ class ThreadImpl {
if (code->kind() == WasmCode::kWasmToJsWrapper &&
!IsJSCompatibleSignature(sig, enabled_features.bigint)) {
- sp_ -= num_args; // Pop arguments before throwing.
+ Drop(num_args); // Pop arguments before throwing.
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kWasmTrapTypeError));
return TryHandleException(isolate);
@@ -3562,7 +3623,7 @@ class ThreadImpl {
maybe_retval.is_null() ? " with exception" : "");
// Pop arguments off the stack.
- sp_ -= num_args;
+ Drop(num_args);
if (maybe_retval.is_null()) {
// JSEntry may throw a stack overflow before we actually get to wasm code
@@ -3608,12 +3669,21 @@ class ThreadImpl {
return {ExternalCallResult::EXTERNAL_RETURNED};
}
- static WasmCode* GetTargetCode(WasmCodeManager* code_manager,
- Address target) {
+ static WasmCode* GetTargetCode(Isolate* isolate, Address target) {
+ WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager();
NativeModule* native_module = code_manager->LookupNativeModule(target);
if (native_module->is_jump_table_slot(target)) {
uint32_t func_index =
native_module->GetFunctionIndexFromJumpTableSlot(target);
+
+ if (!native_module->HasCode(func_index)) {
+ bool success = CompileLazy(isolate, native_module, func_index);
+ if (!success) {
+ DCHECK(isolate->has_pending_exception());
+ return nullptr;
+ }
+ }
+
return native_module->GetCode(func_index);
}
WasmCode* code = native_module->Lookup(target);
@@ -3627,8 +3697,12 @@ class ThreadImpl {
ImportedFunctionEntry entry(instance_object_, function_index);
Handle<Object> object_ref(entry.object_ref(), isolate_);
- WasmCode* code =
- GetTargetCode(isolate_->wasm_engine()->code_manager(), entry.target());
+ WasmCode* code = GetTargetCode(isolate_, entry.target());
+
+ // In case a function's body is invalid and the function is lazily validated
+ // and compiled we may get an exception.
+ if (code == nullptr) return TryHandleException(isolate_);
+
FunctionSig* sig = module()->functions[function_index].sig;
return CallExternalWasmFunction(isolate_, object_ref, code, sig);
}
@@ -3657,8 +3731,11 @@ class ThreadImpl {
HandleScope handle_scope(isolate_); // Avoid leaking handles.
FunctionSig* signature = module()->signatures[sig_index];
Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
- WasmCode* code =
- GetTargetCode(isolate_->wasm_engine()->code_manager(), entry.target());
+ WasmCode* code = GetTargetCode(isolate_, entry.target());
+
+ // In case a function's body is invalid and the function is lazily validated
+ // and compiled we may get an exception.
+ if (code == nullptr) return TryHandleException(isolate_);
if (!object_ref->IsWasmInstanceObject() || /* call to an import */
!instance_object_.is_identical_to(object_ref) /* cross-instance */) {
@@ -3800,6 +3877,12 @@ WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
TrapReason WasmInterpreter::Thread::GetTrapReason() {
return ToImpl(this)->GetTrapReason();
}
+uint32_t WasmInterpreter::Thread::GetGlobalCount() {
+ return ToImpl(this)->GetGlobalCount();
+}
+WasmValue WasmInterpreter::Thread::GetGlobalValue(uint32_t index) {
+ return ToImpl(this)->GetGlobalValue(index);
+}
bool WasmInterpreter::Thread::PossibleNondeterminism() {
return ToImpl(this)->PossibleNondeterminism();
}
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index 9432446fb8..da0ce01835 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -138,6 +138,9 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
WasmValue GetReturnValue(int index = 0);
TrapReason GetTrapReason();
+ uint32_t GetGlobalCount();
+ WasmValue GetGlobalValue(uint32_t index);
+
// Returns true if the thread executed an instruction which may produce
// nondeterministic results, e.g. float div, float sqrt, and float mul,
// where the sign bit of a NaN is nondeterministic.
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 9aafc45b7e..fb633c6c26 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -4,24 +4,25 @@
#include "src/wasm/wasm-js.h"
-#include <string>
+#include <cinttypes>
+#include <cstring>
-#include "src/api-inl.h"
-#include "src/api-natives.h"
-#include "src/assert-scope.h"
+#include "src/api/api-inl.h"
+#include "src/api/api-natives.h"
#include "src/ast/ast.h"
#include "src/base/overflowing-math.h"
-#include "src/execution.h"
-#include "src/handles.h"
+#include "src/common/assert-scope.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/init/v8.h"
#include "src/objects/js-promise-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
-#include "src/task-utils.h"
+#include "src/tasks/task-utils.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/v8.h"
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
@@ -36,14 +37,14 @@ namespace v8 {
class WasmStreaming::WasmStreamingImpl {
public:
WasmStreamingImpl(
- Isolate* isolate,
+ Isolate* isolate, const char* api_method_name,
std::shared_ptr<internal::wasm::CompilationResultResolver> resolver)
: isolate_(isolate), resolver_(std::move(resolver)) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
streaming_decoder_ = i_isolate->wasm_engine()->StartStreamingCompilation(
i_isolate, enabled_features, handle(i_isolate->context(), i_isolate),
- resolver_);
+ api_method_name, resolver_);
}
void OnBytesReceived(const uint8_t* bytes, size_t size) {
@@ -502,11 +503,12 @@ bool EnforceUint32(T argument_name, Local<v8::Value> v, Local<Context> context,
// WebAssembly.compile(bytes) -> Promise
void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ constexpr const char* kAPIMethodName = "WebAssembly.compile()";
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+ ScheduledErrorThrower thrower(i_isolate, kAPIMethodName);
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
thrower.CompileError("Wasm code generation disallowed by embedder");
@@ -530,7 +532,8 @@ void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
// Asynchronous compilation handles copying wire bytes if necessary.
auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
i_isolate->wasm_engine()->AsyncCompile(i_isolate, enabled_features,
- std::move(resolver), bytes, is_shared);
+ std::move(resolver), bytes, is_shared,
+ kAPIMethodName);
}
void WasmStreamingCallbackForTesting(
@@ -569,7 +572,8 @@ void WebAssemblyCompileStreaming(
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.compile()");
+ const char* const kAPIMethodName = "WebAssembly.compileStreaming()";
+ ScheduledErrorThrower thrower(i_isolate, kAPIMethodName);
Local<Context> context = isolate->GetCurrentContext();
// Create and assign the return value of this function.
@@ -593,8 +597,8 @@ void WebAssemblyCompileStreaming(
i::Handle<i::Managed<WasmStreaming>> data =
i::Managed<WasmStreaming>::Allocate(
i_isolate, 0,
- base::make_unique<WasmStreaming::WasmStreamingImpl>(isolate,
- resolver));
+ base::make_unique<WasmStreaming::WasmStreamingImpl>(
+ isolate, kAPIMethodName, resolver));
DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback());
ASSIGN(
@@ -828,8 +832,8 @@ void WebAssemblyInstantiateStreaming(
HandleScope scope(isolate);
Local<Context> context = isolate->GetCurrentContext();
- ScheduledErrorThrower thrower(i_isolate,
- "WebAssembly.instantiateStreaming()");
+ const char* const kAPIMethodName = "WebAssembly.instantiateStreaming()";
+ ScheduledErrorThrower thrower(i_isolate, kAPIMethodName);
// Create and assign the return value of this function.
ASSIGN(Promise::Resolver, result_resolver, Promise::Resolver::New(context));
@@ -873,7 +877,7 @@ void WebAssemblyInstantiateStreaming(
i::Managed<WasmStreaming>::Allocate(
i_isolate, 0,
base::make_unique<WasmStreaming::WasmStreamingImpl>(
- isolate, compilation_resolver));
+ isolate, kAPIMethodName, compilation_resolver));
DCHECK_NOT_NULL(i_isolate->wasm_streaming_callback());
ASSIGN(
@@ -905,12 +909,13 @@ void WebAssemblyInstantiateStreaming(
// WebAssembly.instantiate(bytes, imports) ->
// {module: WebAssembly.Module, instance: WebAssembly.Instance}
void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ constexpr const char* kAPIMethodName = "WebAssembly.instantiate()";
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i_isolate->CountUsage(
v8::Isolate::UseCounterFeature::kWebAssemblyInstantiation);
- ScheduledErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
+ ScheduledErrorThrower thrower(i_isolate, kAPIMethodName);
HandleScope scope(isolate);
@@ -979,7 +984,7 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
i_isolate->wasm_engine()->AsyncCompile(i_isolate, enabled_features,
std::move(compilation_resolver), bytes,
- is_shared);
+ is_shared, kAPIMethodName);
}
bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
@@ -1193,6 +1198,39 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
+// Determines the type encoded in a value type property (e.g. type reflection).
+// Returns false if there was an exception, true upon success. On success the
+// outgoing {type} is set accordingly, or set to {wasm::kWasmStmt} in case the
+// type could not be properly recognized.
+bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
+ Local<Context> context, i::wasm::ValueType* type,
+ i::wasm::WasmFeatures enabled_features) {
+ v8::Local<v8::Value> value;
+ if (!maybe.ToLocal(&value)) return false;
+ v8::Local<v8::String> string;
+ if (!value->ToString(context).ToLocal(&string)) return false;
+ if (string->StringEquals(v8_str(isolate, "i32"))) {
+ *type = i::wasm::kWasmI32;
+ } else if (string->StringEquals(v8_str(isolate, "f32"))) {
+ *type = i::wasm::kWasmF32;
+ } else if (string->StringEquals(v8_str(isolate, "i64"))) {
+ *type = i::wasm::kWasmI64;
+ } else if (string->StringEquals(v8_str(isolate, "f64"))) {
+ *type = i::wasm::kWasmF64;
+ } else if (enabled_features.anyref &&
+ string->StringEquals(v8_str(isolate, "anyref"))) {
+ *type = i::wasm::kWasmAnyRef;
+ } else if (enabled_features.anyref &&
+ string->StringEquals(v8_str(isolate, "anyfunc"))) {
+ *type = i::wasm::kWasmAnyFunc;
+ } else {
+ // Unrecognized type.
+ *type = i::wasm::kWasmStmt;
+ }
+ return true;
+}
+
+// WebAssembly.Global
void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -1208,6 +1246,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Local<Context> context = isolate->GetCurrentContext();
Local<v8::Object> descriptor = Local<Object>::Cast(args[0]);
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
// The descriptor's 'mutable'.
bool is_mutable = false;
@@ -1227,27 +1266,8 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
{
v8::MaybeLocal<v8::Value> maybe =
descriptor->Get(context, v8_str(isolate, "value"));
- v8::Local<v8::Value> value;
- if (!maybe.ToLocal(&value)) return;
- v8::Local<v8::String> string;
- if (!value->ToString(context).ToLocal(&string)) return;
-
- auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
- if (string->StringEquals(v8_str(isolate, "i32"))) {
- type = i::wasm::kWasmI32;
- } else if (string->StringEquals(v8_str(isolate, "f32"))) {
- type = i::wasm::kWasmF32;
- } else if (string->StringEquals(v8_str(isolate, "i64"))) {
- type = i::wasm::kWasmI64;
- } else if (string->StringEquals(v8_str(isolate, "f64"))) {
- type = i::wasm::kWasmF64;
- } else if (enabled_features.anyref &&
- string->StringEquals(v8_str(isolate, "anyref"))) {
- type = i::wasm::kWasmAnyRef;
- } else if (enabled_features.anyref &&
- string->StringEquals(v8_str(isolate, "anyfunc"))) {
- type = i::wasm::kWasmAnyFunc;
- } else {
+ if (!GetValueType(isolate, maybe, context, &type, enabled_features)) return;
+ if (type == i::wasm::kWasmStmt) {
thrower.TypeError(
"Descriptor property 'value' must be 'i32', 'i64', 'f32', or "
"'f64'");
@@ -1283,7 +1303,6 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
case i::wasm::kWasmI64: {
int64_t i64_value = 0;
if (!value->IsUndefined()) {
- auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
if (!enabled_features.bigint) {
thrower.TypeError("Can't set the value of i64 WebAssembly.Global");
return;
@@ -1360,6 +1379,184 @@ void WebAssemblyException(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.TypeError("WebAssembly.Exception cannot be called");
}
+// WebAssembly.Function
+void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope scope(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Function()");
+ if (!args.IsConstructCall()) {
+ thrower.TypeError("WebAssembly.Function must be invoked with 'new'");
+ return;
+ }
+ if (!args[0]->IsObject()) {
+ thrower.TypeError("Argument 0 must be a function type");
+ return;
+ }
+ Local<Object> function_type = Local<Object>::Cast(args[0]);
+ Local<Context> context = isolate->GetCurrentContext();
+ auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+
+ // Load the 'parameters' property of the function type.
+ Local<String> parameters_key = v8_str(isolate, "parameters");
+ v8::MaybeLocal<v8::Value> parameters_maybe =
+ function_type->Get(context, parameters_key);
+ v8::Local<v8::Value> parameters_value;
+ if (!parameters_maybe.ToLocal(&parameters_value)) return;
+ // TODO(7742): Allow any iterable, not just {Array} here.
+ if (!parameters_value->IsArray()) {
+ thrower.TypeError("Argument 0 must be a function type with 'parameters'");
+ return;
+ }
+ Local<Array> parameters = parameters_value.As<Array>();
+ uint32_t parameters_len = parameters->Length();
+ if (parameters_len > i::wasm::kV8MaxWasmFunctionParams) {
+ thrower.TypeError("Argument 0 contains too many parameters");
+ return;
+ }
+
+ // Load the 'results' property of the function type.
+ Local<String> results_key = v8_str(isolate, "results");
+ v8::MaybeLocal<v8::Value> results_maybe =
+ function_type->Get(context, results_key);
+ v8::Local<v8::Value> results_value;
+ if (!results_maybe.ToLocal(&results_value)) return;
+ // TODO(7742): Allow any iterable, not just {Array} here.
+ if (!results_value->IsArray()) {
+ thrower.TypeError("Argument 0 must be a function type with 'results'");
+ return;
+ }
+ Local<Array> results = results_value.As<Array>();
+ uint32_t results_len = results->Length();
+ if (results_len > (enabled_features.mv
+ ? i::wasm::kV8MaxWasmFunctionMultiReturns
+ : i::wasm::kV8MaxWasmFunctionReturns)) {
+ thrower.TypeError("Argument 0 contains too many results");
+ return;
+ }
+
+ // Decode the function type and construct a signature.
+ i::Zone zone(i_isolate->allocator(), ZONE_NAME);
+ i::wasm::FunctionSig::Builder builder(&zone, parameters_len, results_len);
+ for (uint32_t i = 0; i < parameters_len; ++i) {
+ i::wasm::ValueType type;
+ MaybeLocal<Value> maybe = parameters->Get(context, i);
+ if (!GetValueType(isolate, maybe, context, &type, enabled_features)) return;
+ if (type == i::wasm::kWasmStmt) {
+ thrower.TypeError(
+ "Argument 0 parameter type at index #%u must be a value type", i);
+ return;
+ }
+ builder.AddParam(type);
+ }
+ for (uint32_t i = 0; i < results_len; ++i) {
+ i::wasm::ValueType type;
+ MaybeLocal<Value> maybe = results->Get(context, i);
+ if (!GetValueType(isolate, maybe, context, &type, enabled_features)) return;
+ if (type == i::wasm::kWasmStmt) {
+ thrower.TypeError(
+ "Argument 0 result type at index #%u must be a value type", i);
+ return;
+ }
+ builder.AddReturn(type);
+ }
+
+ if (!args[1]->IsFunction()) {
+ thrower.TypeError("Argument 1 must be a function");
+ return;
+ }
+
+ i::wasm::FunctionSig* sig = builder.Build();
+ i::Handle<i::JSReceiver> callable =
+ Utils::OpenHandle(*args[1].As<Function>());
+ i::Handle<i::JSFunction> result =
+ i::WasmJSFunction::New(i_isolate, sig, callable);
+ args.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+// Converts the given {type} into a string representation that can be used in
+// reflective functions. Should be kept in sync with the {GetValueType} helper.
+Local<String> ToValueTypeString(Isolate* isolate, i::wasm::ValueType type) {
+ Local<String> string;
+ switch (type) {
+ case i::wasm::kWasmI32: {
+ string = v8_str(isolate, "i32");
+ break;
+ }
+ case i::wasm::kWasmI64: {
+ string = v8_str(isolate, "i64");
+ break;
+ }
+ case i::wasm::kWasmF32: {
+ string = v8_str(isolate, "f32");
+ break;
+ }
+ case i::wasm::kWasmF64: {
+ string = v8_str(isolate, "f64");
+ break;
+ }
+ case i::wasm::kWasmAnyRef: {
+ string = v8_str(isolate, "anyref");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return string;
+}
+
+// WebAssembly.Function.type(WebAssembly.Function) -> FunctionType
+void WebAssemblyFunctionType(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ HandleScope scope(isolate);
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ScheduledErrorThrower thrower(i_isolate, "WebAssembly.Function.type()");
+
+ i::wasm::FunctionSig* sig;
+ i::Handle<i::Object> arg0 = Utils::OpenHandle(*args[0]);
+ if (i::WasmExportedFunction::IsWasmExportedFunction(*arg0)) {
+ sig = i::Handle<i::WasmExportedFunction>::cast(arg0)->sig();
+ } else if (i::WasmJSFunction::IsWasmJSFunction(*arg0)) {
+ // TODO(7742): Implement deserialization of signature.
+ sig = nullptr;
+ UNIMPLEMENTED();
+ } else {
+ thrower.TypeError("Argument 0 must be a WebAssembly.Function");
+ return;
+ }
+
+ // Extract values for the {ValueType[]} arrays.
+ size_t param_index = 0;
+ i::ScopedVector<Local<Value>> param_values(sig->parameter_count());
+ for (i::wasm::ValueType type : sig->parameters()) {
+ param_values[param_index++] = ToValueTypeString(isolate, type);
+ }
+ size_t result_index = 0;
+ i::ScopedVector<Local<Value>> result_values(sig->return_count());
+ for (i::wasm::ValueType type : sig->returns()) {
+ result_values[result_index++] = ToValueTypeString(isolate, type);
+ }
+
+ // Create the resulting {FunctionType} object.
+ Local<Object> ret = v8::Object::New(isolate);
+ Local<Context> context = isolate->GetCurrentContext();
+ Local<Array> params =
+ v8::Array::New(isolate, param_values.begin(), param_values.size());
+ if (!ret->CreateDataProperty(context, v8_str(isolate, "parameters"), params)
+ .IsJust()) {
+ return;
+ }
+ Local<Array> results =
+ v8::Array::New(isolate, result_values.begin(), result_values.size());
+ if (!ret->CreateDataProperty(context, v8_str(isolate, "results"), results)
+ .IsJust()) {
+ return;
+ }
+
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(ret);
+}
+
constexpr const char* kName_WasmGlobalObject = "WebAssembly.Global";
constexpr const char* kName_WasmMemoryObject = "WebAssembly.Memory";
constexpr const char* kName_WasmInstanceObject = "WebAssembly.Instance";
@@ -1412,40 +1609,13 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::FixedArray> old_array(receiver->elements(), i_isolate);
- uint32_t old_size = static_cast<uint32_t>(old_array->length());
+ int old_size = i::WasmTableObject::Grow(i_isolate, receiver, grow_by,
+ i_isolate->factory()->null_value());
- uint64_t max_size64 = receiver->maximum_length().IsUndefined(i_isolate)
- ? i::FLAG_wasm_max_table_size
- : receiver->maximum_length()->Number();
- if (max_size64 > i::FLAG_wasm_max_table_size) {
- max_size64 = i::FLAG_wasm_max_table_size;
- }
-
- DCHECK_LE(max_size64, std::numeric_limits<uint32_t>::max());
-
- uint64_t new_size64 =
- static_cast<uint64_t>(old_size) + static_cast<uint64_t>(grow_by);
- if (new_size64 > max_size64) {
- thrower.RangeError("maximum table size exceeded");
+ if (old_size < 0) {
+ thrower.RangeError("failed to grow table by %u", grow_by);
return;
}
- uint32_t new_size = static_cast<uint32_t>(new_size64);
-
- if (new_size != old_size) {
- receiver->Grow(i_isolate, new_size - old_size);
-
- i::Handle<i::FixedArray> new_array =
- i_isolate->factory()->NewFixedArray(new_size);
- for (uint32_t i = 0; i < old_size; ++i) {
- new_array->set(i, old_array->get(i));
- }
- i::Object null = i::ReadOnlyRoots(i_isolate).null_value();
- for (uint32_t i = old_size; i < new_size; ++i) new_array->set(i, null);
- receiver->set_elements(*new_array);
- }
-
- // TODO(gdeepti): use weak links for instances
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(old_size);
}
@@ -1503,7 +1673,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
// WebAssembly.Table.type(WebAssembly.Table) -> TableType
-void WebAssemblyTableGetType(const v8::FunctionCallbackInfo<v8::Value>& args) {
+void WebAssemblyTableType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -1541,8 +1711,8 @@ void WebAssemblyTableGetType(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- if (!table->maximum_length()->IsUndefined()) {
- uint64_t max_size = table->maximum_length()->Number();
+ if (!table->maximum_length().IsUndefined()) {
+ uint64_t max_size = table->maximum_length().Number();
DCHECK_LE(max_size, std::numeric_limits<uint32_t>::max());
if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
v8_str(isolate, "maximum"),
@@ -1625,7 +1795,7 @@ void WebAssemblyMemoryGetBuffer(
}
// WebAssembly.Memory.type(WebAssembly.Memory) -> MemoryType
-void WebAssemblyMemoryGetType(const v8::FunctionCallbackInfo<v8::Value>& args) {
+void WebAssemblyMemoryType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -1783,7 +1953,7 @@ void WebAssemblyGlobalSetValue(
}
// WebAssembly.Global.type(WebAssembly.Global) -> GlobalType
-void WebAssemblyGlobalGetType(const v8::FunctionCallbackInfo<v8::Value>& args) {
+void WebAssemblyGlobalType(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -1801,31 +1971,7 @@ void WebAssemblyGlobalGetType(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- Local<String> type;
- switch (global->type()) {
- case i::wasm::kWasmI32: {
- type = v8_str(isolate, "i32");
- break;
- }
- case i::wasm::kWasmI64: {
- type = v8_str(isolate, "i64");
- break;
- }
- case i::wasm::kWasmF32: {
- type = v8_str(isolate, "f32");
- break;
- }
- case i::wasm::kWasmF64: {
- type = v8_str(isolate, "f64");
- break;
- }
- case i::wasm::kWasmAnyRef: {
- type = v8_str(isolate, "anyref");
- break;
- }
- default:
- UNREACHABLE();
- }
+ Local<String> type = ToValueTypeString(isolate, global->type());
if (!ret->CreateDataProperty(isolate->GetCurrentContext(),
v8_str(isolate, "value"), type)
.IsJust()) {
@@ -1862,7 +2008,7 @@ Handle<JSFunction> CreateFunc(Isolate* isolate, Handle<String> name,
Handle<FunctionTemplateInfo> temp = NewFunctionTemplate(isolate, func);
Handle<JSFunction> function =
ApiNatives::InstantiateFunction(temp, name).ToHandleChecked();
- DCHECK(function->shared()->HasSharedName());
+ DCHECK(function->shared().HasSharedName());
return function;
}
@@ -1872,7 +2018,7 @@ Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
PropertyAttributes attributes = NONE) {
Handle<String> name = v8_str(isolate, str);
Handle<JSFunction> function = CreateFunc(isolate, name, func);
- function->shared()->set_length(length);
+ function->shared().set_length(length);
JSObject::AddProperty(isolate, object, name, function, attributes);
return function;
}
@@ -1913,7 +2059,7 @@ void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object,
CreateFunc(isolate, GetterName(isolate, name), getter);
Handle<JSFunction> setter_func =
CreateFunc(isolate, SetterName(isolate, name), setter);
- setter_func->shared()->set_length(1);
+ setter_func->shared().set_length(1);
v8::PropertyAttribute attributes = v8::None;
@@ -1929,7 +2075,7 @@ void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object,
void SetDummyInstanceTemplate(Isolate* isolate, Handle<JSFunction> fun) {
Handle<ObjectTemplateInfo> instance_template = NewObjectTemplate(isolate);
FunctionTemplateInfo::SetInstanceTemplate(
- isolate, handle(fun->shared()->get_api_func_data(), isolate),
+ isolate, handle(fun->shared().get_api_func_data(), isolate),
instance_template);
}
@@ -1939,8 +2085,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
Handle<Context> context(global->native_context(), isolate);
// Install the JS API once only.
Object prev = context->get(Context::WASM_MODULE_CONSTRUCTOR_INDEX);
- if (!prev->IsUndefined(isolate)) {
- DCHECK(prev->IsJSFunction());
+ if (!prev.IsUndefined(isolate)) {
+ DCHECK(prev.IsJSFunction());
return;
}
@@ -1987,7 +2133,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(module_constructor);
Handle<JSObject> module_proto(
JSObject::cast(module_constructor->instance_prototype()), isolate);
- i::Handle<i::Map> module_map =
+ Handle<Map> module_map =
isolate->factory()->NewMap(i::WASM_MODULE_TYPE, WasmModuleObject::kSize);
JSFunction::SetInitialMap(module_constructor, module_map, module_proto);
InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
@@ -2007,7 +2153,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(instance_constructor);
Handle<JSObject> instance_proto(
JSObject::cast(instance_constructor->instance_prototype()), isolate);
- i::Handle<i::Map> instance_map = isolate->factory()->NewMap(
+ Handle<Map> instance_map = isolate->factory()->NewMap(
i::WASM_INSTANCE_TYPE, WasmInstanceObject::kSize);
JSFunction::SetInitialMap(instance_constructor, instance_map, instance_proto);
InstallGetter(isolate, instance_proto, "exports",
@@ -2028,7 +2174,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(table_constructor);
Handle<JSObject> table_proto(
JSObject::cast(table_constructor->instance_prototype()), isolate);
- i::Handle<i::Map> table_map =
+ Handle<Map> table_map =
isolate->factory()->NewMap(i::WASM_TABLE_TYPE, WasmTableObject::kSize);
JSFunction::SetInitialMap(table_constructor, table_map, table_proto);
InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
@@ -2036,7 +2182,7 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1);
InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
if (enabled_features.type_reflection) {
- InstallFunc(isolate, table_constructor, "type", WebAssemblyTableGetType, 1);
+ InstallFunc(isolate, table_constructor, "type", WebAssemblyTableType, 1);
}
JSObject::AddProperty(isolate, table_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Table"), ro_attributes);
@@ -2049,14 +2195,13 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(memory_constructor);
Handle<JSObject> memory_proto(
JSObject::cast(memory_constructor->instance_prototype()), isolate);
- i::Handle<i::Map> memory_map =
+ Handle<Map> memory_map =
isolate->factory()->NewMap(i::WASM_MEMORY_TYPE, WasmMemoryObject::kSize);
JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto);
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
if (enabled_features.type_reflection) {
- InstallFunc(isolate, memory_constructor, "type", WebAssemblyMemoryGetType,
- 1);
+ InstallFunc(isolate, memory_constructor, "type", WebAssemblyMemoryType, 1);
}
JSObject::AddProperty(isolate, memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
@@ -2069,15 +2214,14 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(global_constructor);
Handle<JSObject> global_proto(
JSObject::cast(global_constructor->instance_prototype()), isolate);
- i::Handle<i::Map> global_map =
+ Handle<Map> global_map =
isolate->factory()->NewMap(i::WASM_GLOBAL_TYPE, WasmGlobalObject::kSize);
JSFunction::SetInitialMap(global_constructor, global_map, global_proto);
InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
WebAssemblyGlobalSetValue);
if (enabled_features.type_reflection) {
- InstallFunc(isolate, global_constructor, "type", WebAssemblyGlobalGetType,
- 1);
+ InstallFunc(isolate, global_constructor, "type", WebAssemblyGlobalType, 1);
}
JSObject::AddProperty(isolate, global_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Global"), ro_attributes);
@@ -2091,12 +2235,40 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::EnsureHasInitialMap(exception_constructor);
Handle<JSObject> exception_proto(
JSObject::cast(exception_constructor->instance_prototype()), isolate);
- i::Handle<i::Map> exception_map = isolate->factory()->NewMap(
+ Handle<Map> exception_map = isolate->factory()->NewMap(
i::WASM_EXCEPTION_TYPE, WasmExceptionObject::kSize);
JSFunction::SetInitialMap(exception_constructor, exception_map,
exception_proto);
}
+ // Setup Function
+ if (enabled_features.type_reflection) {
+ Handle<JSFunction> function_constructor = InstallConstructorFunc(
+ isolate, webassembly, "Function", WebAssemblyFunction);
+ context->set_wasm_function_constructor(*function_constructor);
+ SetDummyInstanceTemplate(isolate, function_constructor);
+ JSFunction::EnsureHasInitialMap(function_constructor);
+ Handle<JSObject> function_proto(
+ JSObject::cast(function_constructor->instance_prototype()), isolate);
+ Handle<Map> function_map = isolate->factory()->CreateSloppyFunctionMap(
+ FUNCTION_WITHOUT_PROTOTYPE, MaybeHandle<JSFunction>());
+ CHECK(JSObject::SetPrototype(
+ function_proto,
+ handle(context->function_function().prototype(), isolate), false,
+ kDontThrow)
+ .FromJust());
+ JSFunction::SetInitialMap(function_constructor, function_map,
+ function_proto);
+ InstallFunc(isolate, function_constructor, "type", WebAssemblyFunctionType,
+ 1);
+ // Make all exported functions an instance of {WebAssembly.Function}.
+ context->set_wasm_exported_function_map(*function_map);
+ } else {
+ // Make all exported functions an instance of {Function}.
+ Handle<Map> function_map = isolate->sloppy_function_without_prototype_map();
+ context->set_wasm_exported_function_map(*function_map);
+ }
+
// Setup errors
Handle<JSFunction> compile_error(
isolate->native_context()->wasm_compile_error_function(), isolate);
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 4a60f5d13d..4811288f4d 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -5,7 +5,7 @@
#ifndef V8_WASM_WASM_JS_H_
#define V8_WASM_WASM_JS_H_
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index 1761a4cea0..1bd32ef561 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -5,9 +5,9 @@
#ifndef V8_WASM_WASM_LINKAGE_H_
#define V8_WASM_WASM_LINKAGE_H_
-#include "src/assembler-arch.h"
-#include "src/machine-type.h"
-#include "src/signature.h"
+#include "src/codegen/assembler-arch.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/signature.h"
#include "src/wasm/value-type.h"
namespace v8 {
diff --git a/deps/v8/src/wasm/wasm-memory.cc b/deps/v8/src/wasm/wasm-memory.cc
index 914b61244d..8633a61504 100644
--- a/deps/v8/src/wasm/wasm-memory.cc
+++ b/deps/v8/src/wasm/wasm-memory.cc
@@ -4,10 +4,10 @@
#include <limits>
-#include "src/counters.h"
#include "src/heap/heap-inl.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
@@ -84,10 +84,10 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
// We are over the address space limit. Fail.
//
// When running under the correctness fuzzer (i.e.
- // --abort-on-stack-or-string-length-overflow is preset), we crash
+ // --correctness-fuzzer-suppressions is preset), we crash
// instead so it is not incorrectly reported as a correctness
// violation. See https://crbug.com/828293#c4
- if (FLAG_abort_on_stack_or_string_length_overflow) {
+ if (FLAG_correctness_fuzzer_suppressions) {
FATAL("could not allocate wasm memory");
}
AddAllocationStatusSample(
@@ -137,7 +137,7 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
-constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB
+constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
#elif V8_TARGET_ARCH_64_BIT
constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else
@@ -260,8 +260,8 @@ bool WasmMemoryTracker::IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer) {
return allocation->second.is_growable;
}
-bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
- const void* buffer_start) {
+bool WasmMemoryTracker::FreeWasmMemory(Isolate* isolate,
+ const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
const auto& result = allocations_.find(buffer_start);
if (result == allocations_.end()) return false;
@@ -280,7 +280,9 @@ bool WasmMemoryTracker::FreeMemoryIfIsWasmMemory(Isolate* isolate,
void WasmMemoryTracker::RegisterWasmMemoryAsShared(
Handle<WasmMemoryObject> object, Isolate* isolate) {
- const void* backing_store = object->array_buffer()->backing_store();
+ // Only register with the tracker if shared grow is enabled.
+ if (!FLAG_wasm_grow_shared_memory) return;
+ const void* backing_store = object->array_buffer().backing_store();
// TODO(V8:8810): This should be a DCHECK, currently some tests do not
// use a full WebAssembly.Memory, and fail on registering so return early.
if (!IsWasmMemory(backing_store)) return;
@@ -323,9 +325,9 @@ void WasmMemoryTracker::UpdateSharedMemoryInstances(Isolate* isolate) {
void WasmMemoryTracker::RegisterSharedWasmMemory_Locked(
Handle<WasmMemoryObject> object, Isolate* isolate) {
- DCHECK(object->array_buffer()->is_shared());
+ DCHECK(object->array_buffer().is_shared());
- void* backing_store = object->array_buffer()->backing_store();
+ void* backing_store = object->array_buffer().backing_store();
// The allocation of a WasmMemoryObject should always be registered with the
// WasmMemoryTracker.
const auto& result = allocations_.find(backing_store);
@@ -426,11 +428,11 @@ void WasmMemoryTracker::UpdateMemoryObjectsForIsolate_Locked(
HandleScope scope(isolate);
Handle<WasmMemoryObject> memory_object = memory_obj_state.memory_object;
DCHECK(memory_object->IsWasmMemoryObject());
- DCHECK(memory_object->array_buffer()->is_shared());
+ DCHECK(memory_object->array_buffer().is_shared());
// Permissions adjusted, but create a new buffer with new size
// and old attributes. Buffer has already been allocated,
// just create a new buffer with same backing store.
- bool is_external = memory_object->array_buffer()->is_external();
+ bool is_external = memory_object->array_buffer().is_external();
Handle<JSArrayBuffer> new_buffer = SetupArrayBuffer(
isolate, backing_store, new_size, is_external, SharedFlag::kShared);
memory_obj_state.memory_object->update_instances(isolate, new_buffer);
@@ -465,14 +467,7 @@ bool WasmMemoryTracker::CanFreeSharedMemory_Locked(const void* backing_store) {
const auto& value = isolates_per_buffer_.find(backing_store);
// If no isolates share this buffer, backing store can be freed.
// Erase the buffer entry.
- if (value == isolates_per_buffer_.end()) return true;
- if (value->second.empty()) {
- // If no isolates share this buffer, the global handles to memory objects
- // associated with this buffer should have been destroyed.
- // DCHECK(shared_memory_map_.find(backing_store) ==
- // shared_memory_map_.end());
- return true;
- }
+ if (value == isolates_per_buffer_.end() || value->second.empty()) return true;
return false;
}
diff --git a/deps/v8/src/wasm/wasm-memory.h b/deps/v8/src/wasm/wasm-memory.h
index 8cda54eaf9..ecb6203ac5 100644
--- a/deps/v8/src/wasm/wasm-memory.h
+++ b/deps/v8/src/wasm/wasm-memory.h
@@ -10,8 +10,8 @@
#include <unordered_set>
#include "src/base/platform/mutex.h"
-#include "src/flags.h"
-#include "src/handles.h"
+#include "src/flags/flags.h"
+#include "src/handles/handles.h"
#include "src/objects/js-array-buffer.h"
namespace v8 {
@@ -107,10 +107,8 @@ class WasmMemoryTracker {
V8_EXPORT_PRIVATE const AllocationData* FindAllocationData(
const void* buffer_start);
- // Checks if a buffer points to a Wasm memory and if so does any necessary
- // work to reclaim the buffer. If this function returns false, the caller must
- // free the buffer manually.
- bool FreeMemoryIfIsWasmMemory(Isolate* isolate, const void* buffer_start);
+ // Free Memory allocated by the Wasm memory tracker
+ bool FreeWasmMemory(Isolate* isolate, const void* buffer_start);
void MarkWasmMemoryNotGrowable(Handle<JSArrayBuffer> buffer);
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 20a33f2cb9..eb253219ad 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/signature.h"
+#include "src/codegen/signature.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
+#include "src/handles/handles.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
#include "src/zone/zone-containers.h"
#include "src/wasm/function-body-decoder.h"
@@ -16,7 +16,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
-#include "src/v8memory.h"
+#include "src/common/v8memory.h"
namespace v8 {
namespace internal {
@@ -171,6 +171,16 @@ void WasmFunctionBuilder::SetAsmFunctionStartPosition(
last_asm_source_position_ = function_position_u32;
}
+void WasmFunctionBuilder::SetCompilationHint(
+ WasmCompilationHintStrategy strategy, WasmCompilationHintTier baseline,
+ WasmCompilationHintTier top_tier) {
+ uint8_t hint_byte = static_cast<uint8_t>(strategy) |
+ static_cast<uint8_t>(baseline) << 2 |
+ static_cast<uint8_t>(top_tier) << 4;
+ DCHECK_NE(hint_byte, kNoCompilationHint);
+ hint_ = hint_byte;
+}
+
void WasmFunctionBuilder::DeleteCodeAfter(size_t position) {
DCHECK_LE(position, body_.size());
body_.Truncate(position);
@@ -361,7 +371,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
if (functions_.size() > 0) {
size_t start = EmitSection(kFunctionSectionCode, buffer);
buffer.write_size(functions_.size());
- for (auto function : functions_) {
+ for (auto* function : functions_) {
function->WriteSignature(buffer);
if (!function->name_.empty()) ++num_function_names;
}
@@ -498,11 +508,37 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
FixupSection(buffer, start);
}
+ // == emit compilation hints section =========================================
+ bool emit_compilation_hints = false;
+ for (auto* fn : functions_) {
+ if (fn->hint_ != kNoCompilationHint) {
+ emit_compilation_hints = true;
+ break;
+ }
+ }
+ if (emit_compilation_hints) {
+ // Emit the section code.
+ buffer.write_u8(kUnknownSectionCode);
+ // Emit a placeholder for section length.
+ size_t start = buffer.reserve_u32v();
+ // Emit custom section name.
+ buffer.write_string(CStrVector("compilationHints"));
+ // Emit hint count.
+ buffer.write_size(functions_.size());
+ // Emit hint bytes.
+ for (auto* fn : functions_) {
+ uint8_t hint_byte =
+ fn->hint_ != kNoCompilationHint ? fn->hint_ : kDefaultCompilationHint;
+ buffer.write_u8(hint_byte);
+ }
+ FixupSection(buffer, start);
+ }
+
// == emit code ==============================================================
if (functions_.size() > 0) {
size_t start = EmitSection(kCodeSectionCode, buffer);
buffer.write_size(functions_.size());
- for (auto function : functions_) {
+ for (auto* function : functions_) {
function->WriteBody(buffer);
}
FixupSection(buffer, start);
@@ -531,8 +567,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
// Emit a placeholder for the length.
size_t start = buffer.reserve_u32v();
// Emit the section string.
- buffer.write_size(4);
- buffer.write(reinterpret_cast<const byte*>("name"), 4);
+ buffer.write_string(CStrVector("name"));
// Emit a subsection for the function names.
buffer.write_u8(NameSectionKindCode::kFunction);
// Emit a placeholder for the subsection length.
@@ -549,7 +584,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_string(import->name);
}
if (num_function_names > 0) {
- for (auto function : functions_) {
+ for (auto* function : functions_) {
DCHECK_EQ(function_index,
function->func_index() + function_imports_.size());
if (!function->name_.empty()) {
@@ -568,7 +603,7 @@ void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer& buffer) const {
// == Emit asm.js offset table ===============================================
buffer.write_size(functions_.size());
// Emit the offset table per function.
- for (auto function : functions_) {
+ for (auto* function : functions_) {
function->WriteAsmWasmOffsetTable(buffer);
}
// Append a 0 to indicate that this is an encoded table.
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index e9b22a392c..750dafa227 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -5,13 +5,14 @@
#ifndef V8_WASM_WASM_MODULE_BUILDER_H_
#define V8_WASM_WASM_MODULE_BUILDER_H_
-#include "src/signature.h"
+#include "src/codegen/signature.h"
#include "src/zone/zone-containers.h"
-#include "src/v8memory.h"
-#include "src/vector.h"
+#include "src/common/v8memory.h"
+#include "src/utils/vector.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/local-decl-encoder.h"
+#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -89,7 +90,7 @@ class ZoneBuffer : public ZoneObject {
void write_string(Vector<const char> name) {
write_size(name.length());
- write(reinterpret_cast<const byte*>(name.start()), name.length());
+ write(reinterpret_cast<const byte*>(name.begin()), name.length());
}
size_t reserve_u32v() {
@@ -176,6 +177,9 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
void SetName(Vector<const char> name);
void AddAsmWasmOffset(size_t call_position, size_t to_number_position);
void SetAsmFunctionStartPosition(size_t function_position);
+ void SetCompilationHint(WasmCompilationHintStrategy strategy,
+ WasmCompilationHintTier baseline,
+ WasmCompilationHintTier top_tier);
size_t GetPosition() const { return body_.size(); }
void FixupByte(size_t position, byte value) {
@@ -217,6 +221,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
uint32_t last_asm_byte_offset_ = 0;
uint32_t last_asm_source_position_ = 0;
uint32_t asm_func_start_source_position_ = 0;
+ uint8_t hint_ = kNoCompilationHint;
};
class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 53fe290d55..05057301ed 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -5,17 +5,17 @@
#include <functional>
#include <memory>
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/interface-types.h"
-#include "src/frames-inl.h"
-#include "src/objects.h"
+#include "src/execution/frames-inl.h"
+#include "src/execution/simulator.h"
+#include "src/init/v8.h"
#include "src/objects/js-array-inl.h"
-#include "src/property-descriptor.h"
-#include "src/simulator.h"
+#include "src/objects/objects.h"
+#include "src/objects/property-descriptor.h"
#include "src/snapshot/snapshot.h"
-#include "src/v8.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-js.h"
@@ -42,6 +42,22 @@ WireBytesRef WasmModule::LookupFunctionName(const ModuleWireBytes& wire_bytes,
return it->second;
}
+// static
+int MaxNumExportWrappers(const WasmModule* module) {
+ // For each signature there may exist a wrapper, both for imported and
+ // internal functions.
+ return static_cast<int>(module->signature_map.size()) * 2;
+}
+
+// static
+int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig,
+ bool is_import) {
+ int result = module->signature_map.Find(*sig);
+ CHECK_GE(result, 0);
+ result += is_import ? module->signature_map.size() : 0;
+ return result;
+}
+
void WasmModule::AddFunctionNameForTesting(int function_index,
WireBytesRef name) {
if (!function_names) {
@@ -67,9 +83,9 @@ WasmName ModuleWireBytes::GetNameOrNull(const WasmFunction* function,
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
os << "#" << name.function_->func_index;
if (!name.name_.empty()) {
- if (name.name_.start()) {
+ if (name.name_.begin()) {
os << ":";
- os.write(name.name_.start(), name.name_.length());
+ os.write(name.name_.begin(), name.name_.length());
}
} else {
os << "?";
@@ -242,7 +258,7 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
Vector<const uint8_t> wire_bytes =
module_object->native_module()->wire_bytes();
std::vector<CustomSectionOffset> custom_sections =
- DecodeCustomSections(wire_bytes.start(), wire_bytes.end());
+ DecodeCustomSections(wire_bytes.begin(), wire_bytes.end());
std::vector<Handle<Object>> matching_sections;
@@ -267,7 +283,7 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
constexpr bool is_external = false;
JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size);
- memcpy(memory, wire_bytes.start() + section.payload.offset(),
+ memcpy(memory, wire_bytes.begin() + section.payload.offset(),
section.payload.length());
matching_sections.push_back(buffer);
@@ -291,7 +307,7 @@ Handle<FixedArray> DecodeLocalNames(Isolate* isolate,
Vector<const uint8_t> wire_bytes =
module_object->native_module()->wire_bytes();
LocalNames decoded_locals;
- DecodeLocalNames(wire_bytes.start(), wire_bytes.end(), &decoded_locals);
+ DecodeLocalNames(wire_bytes.begin(), wire_bytes.end(), &decoded_locals);
Handle<FixedArray> locals_names =
isolate->factory()->NewFixedArray(decoded_locals.max_function_index + 1);
for (LocalNamesPerFunction& func : decoded_locals.names) {
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index c4f171ecf8..eb40c51dd3 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -7,9 +7,9 @@
#include <memory>
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/vector.h"
+#include "src/common/globals.h"
+#include "src/handles/handles.h"
+#include "src/utils/vector.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-opcodes.h"
@@ -147,6 +147,7 @@ enum class WasmCompilationHintStrategy : uint8_t {
kDefault = 0,
kLazy = 1,
kEager = 2,
+ kLazyBaselineEagerTopTier = 3,
};
enum class WasmCompilationHintTier : uint8_t {
@@ -195,7 +196,6 @@ struct V8_EXPORT_PRIVATE WasmModule {
uint32_t num_declared_functions = 0; // excluding imported
uint32_t num_exported_functions = 0;
uint32_t num_declared_data_segments = 0; // From the DataCount section.
- uint32_t num_lazy_compilation_hints = 0; // From compilation hints section.
WireBytesRef name = {0, 0};
std::vector<FunctionSig*> signatures; // by signature index
std::vector<uint32_t> signature_ids; // by signature index
@@ -223,6 +223,14 @@ struct V8_EXPORT_PRIVATE WasmModule {
size_t EstimateStoredSize(const WasmModule* module);
+// Returns the number of possible export wrappers for a given module.
+V8_EXPORT_PRIVATE int MaxNumExportWrappers(const WasmModule* module);
+
+// Returns the wrapper index for a function in {module} with signature {sig}
+// and origin defined by {is_import}.
+int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig,
+ bool is_import);
+
// Interface to the storage (wire bytes) of a wasm module.
// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
// on module_bytes, as this storage is only guaranteed to be alive as long as
@@ -254,7 +262,7 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
}
Vector<const byte> module_bytes() const { return module_bytes_; }
- const byte* start() const { return module_bytes_.start(); }
+ const byte* start() const { return module_bytes_.begin(); }
const byte* end() const { return module_bytes_.end(); }
size_t length() const { return module_bytes_.length(); }
@@ -310,7 +318,7 @@ class TruncatedUserString {
public:
template <typename T>
explicit TruncatedUserString(Vector<T> name)
- : TruncatedUserString(name.start(), name.length()) {}
+ : TruncatedUserString(name.begin(), name.length()) {}
TruncatedUserString(const byte* start, size_t len)
: TruncatedUserString(reinterpret_cast<const char*>(start), len) {}
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index c1f9e7876a..e1fc2d2410 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -7,8 +7,9 @@
#include "src/wasm/wasm-objects.h"
-#include "src/contexts-inl.h"
+#include "src/common/v8memory.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/contexts-inl.h"
#include "src/objects/foreign-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
@@ -16,8 +17,7 @@
#include "src/objects/managed.h"
#include "src/objects/oddball-inl.h"
#include "src/objects/script-inl.h"
-#include "src/roots.h"
-#include "src/v8memory.h"
+#include "src/roots/roots.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-module.h"
@@ -53,7 +53,7 @@ CAST_ACCESSOR(AsmWasmData)
#define OPTIONAL_ACCESSORS(holder, name, type, offset) \
bool holder::has_##name() { \
- return !READ_FIELD(*this, offset)->IsUndefined(); \
+ return !READ_FIELD(*this, offset).IsUndefined(); \
} \
ACCESSORS(holder, name, type, offset)
@@ -95,11 +95,11 @@ OPTIONAL_ACCESSORS(WasmModuleObject, asm_js_offset_table, ByteArray,
OPTIONAL_ACCESSORS(WasmModuleObject, breakpoint_infos, FixedArray,
kBreakPointInfosOffset)
wasm::NativeModule* WasmModuleObject::native_module() const {
- return managed_native_module()->raw();
+ return managed_native_module().raw();
}
const std::shared_ptr<wasm::NativeModule>&
WasmModuleObject::shared_native_module() const {
- return managed_native_module()->get();
+ return managed_native_module().get();
}
const wasm::WasmModule* WasmModuleObject::module() const {
// TODO(clemensh): Remove this helper (inline in callers).
@@ -111,13 +111,13 @@ void WasmModuleObject::reset_breakpoint_infos() {
}
bool WasmModuleObject::is_asm_js() {
bool asm_js = module()->origin == wasm::kAsmJsOrigin;
- DCHECK_EQ(asm_js, script()->IsUserJavaScript());
+ DCHECK_EQ(asm_js, script().IsUserJavaScript());
DCHECK_EQ(asm_js, has_asm_js_offset_table());
return asm_js;
}
// WasmTableObject
-ACCESSORS(WasmTableObject, elements, FixedArray, kElementsOffset)
+ACCESSORS(WasmTableObject, entries, FixedArray, kEntriesOffset)
ACCESSORS(WasmTableObject, maximum_length, Object, kMaximumLengthOffset)
ACCESSORS(WasmTableObject, dispatch_tables, FixedArray, kDispatchTablesOffset)
SMI_ACCESSORS(WasmTableObject, raw_type, kRawTypeOffset)
@@ -143,8 +143,8 @@ int WasmGlobalObject::type_size() const {
Address WasmGlobalObject::address() const {
DCHECK_NE(type(), wasm::kWasmAnyRef);
- DCHECK_LE(offset() + type_size(), untagged_buffer()->byte_length());
- return Address(untagged_buffer()->backing_store()) + offset();
+ DCHECK_LE(offset() + type_size(), untagged_buffer().byte_length());
+ return Address(untagged_buffer().backing_store()) + offset();
}
int32_t WasmGlobalObject::GetI32() {
@@ -166,7 +166,7 @@ double WasmGlobalObject::GetF64() {
Handle<Object> WasmGlobalObject::GetRef() {
// We use this getter for anyref, anyfunc, and except_ref.
DCHECK(wasm::ValueTypes::IsReferenceType(type()));
- return handle(tagged_buffer()->get(offset()), GetIsolate());
+ return handle(tagged_buffer().get(offset()), GetIsolate());
}
void WasmGlobalObject::SetI32(int32_t value) {
@@ -188,7 +188,7 @@ void WasmGlobalObject::SetF64(double value) {
void WasmGlobalObject::SetAnyRef(Handle<Object> value) {
// We use this getter anyref and except_ref.
DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExceptRef);
- tagged_buffer()->set(offset(), *value);
+ tagged_buffer().set(offset(), *value);
}
bool WasmGlobalObject::SetAnyFunc(Isolate* isolate, Handle<Object> value) {
@@ -197,7 +197,7 @@ bool WasmGlobalObject::SetAnyFunc(Isolate* isolate, Handle<Object> value) {
!WasmExportedFunction::IsWasmExportedFunction(*value)) {
return false;
}
- tagged_buffer()->set(offset(), *value);
+ tagged_buffer().set(offset(), *value);
return true;
}
@@ -308,11 +308,37 @@ SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
+// WasmJSFunction
+WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
+ SLOW_DCHECK(IsWasmJSFunction(*this));
+}
+CAST_ACCESSOR(WasmJSFunction)
+
+// WasmJSFunctionData
+OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData, Struct)
+CAST_ACCESSOR(WasmJSFunctionData)
+ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
+
+// WasmCapiFunction
+WasmCapiFunction::WasmCapiFunction(Address ptr) : JSFunction(ptr) {
+ SLOW_DCHECK(IsWasmCapiFunction(*this));
+}
+CAST_ACCESSOR(WasmCapiFunction)
+
+// WasmCapiFunctionData
+OBJECT_CONSTRUCTORS_IMPL(WasmCapiFunctionData, Struct)
+CAST_ACCESSOR(WasmCapiFunctionData)
+PRIMITIVE_ACCESSORS(WasmCapiFunctionData, call_target, Address,
+ kCallTargetOffset)
+PRIMITIVE_ACCESSORS(WasmCapiFunctionData, embedder_data, void*,
+ kEmbedderDataOffset)
+ACCESSORS(WasmCapiFunctionData, wrapper_code, Code, kWrapperCodeOffset)
+ACCESSORS(WasmCapiFunctionData, serialized_signature, PodArray<wasm::ValueType>,
+ kSerializedSignatureOffset)
+
// WasmDebugInfo
ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
-ACCESSORS(WasmDebugInfo, interpreted_functions, FixedArray,
- kInterpretedFunctionsOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, locals_names, FixedArray, kLocalsNamesOffset)
OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
kCWasmEntriesOffset)
@@ -324,7 +350,7 @@ OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
#undef WRITE_PRIMITIVE_FIELD
#undef PRIMITIVE_ACCESSORS
-uint32_t WasmTableObject::current_length() { return elements()->length(); }
+uint32_t WasmTableObject::current_length() { return entries().length(); }
wasm::ValueType WasmTableObject::type() {
return static_cast<wasm::ValueType>(raw_type());
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 950fc8bc45..27a56695c2 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -3,20 +3,20 @@
// found in the LICENSE file.
#include "src/wasm/wasm-objects.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
-#include "src/assembler-inl.h"
#include "src/base/iterator.h"
-#include "src/code-factory.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/counters.h"
#include "src/debug/debug-interface.h"
-#include "src/objects-inl.h"
+#include "src/logging/counters.h"
#include "src/objects/debug-objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/struct-inl.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
@@ -169,8 +169,8 @@ size_t EstimateNativeAllocationsSize(const WasmModule* module) {
WasmInstanceNativeAllocations* GetNativeAllocations(
WasmInstanceObject instance) {
return Managed<WasmInstanceNativeAllocations>::cast(
- instance->managed_native_allocations())
- ->raw();
+ instance.managed_native_allocations())
+ .raw();
}
#ifdef DEBUG
@@ -179,7 +179,7 @@ bool IsBreakablePosition(wasm::NativeModule* native_module, int func_index,
AccountingAllocator alloc;
Zone tmp(&alloc, ZONE_NAME);
wasm::BodyLocalDecls locals(&tmp);
- const byte* module_start = native_module->wire_bytes().start();
+ const byte* module_start = native_module->wire_bytes().begin();
const WasmFunction& func = native_module->module()->functions[func_index];
wasm::BytecodeIterator iterator(module_start + func.code.offset(),
module_start + func.code.end_offset(),
@@ -232,9 +232,9 @@ Handle<WasmModuleObject> WasmModuleObject::New(
Isolate* isolate, std::shared_ptr<wasm::NativeModule> native_module,
Handle<Script> script, size_t code_size_estimate) {
const WasmModule* module = native_module->module();
- int export_wrapper_size = static_cast<int>(module->num_exported_functions);
- Handle<FixedArray> export_wrappers = isolate->factory()->NewFixedArray(
- export_wrapper_size, AllocationType::kOld);
+ int num_wrappers = MaxNumExportWrappers(module);
+ Handle<FixedArray> export_wrappers =
+ isolate->factory()->NewFixedArray(num_wrappers, AllocationType::kOld);
return New(isolate, std::move(native_module), script, export_wrappers,
code_size_estimate);
}
@@ -309,8 +309,8 @@ bool WasmModuleObject::SetBreakPoint(Handle<WasmModuleObject> module_object,
namespace {
int GetBreakpointPos(Isolate* isolate, Object break_point_info_or_undef) {
- if (break_point_info_or_undef->IsUndefined(isolate)) return kMaxInt;
- return BreakPointInfo::cast(break_point_info_or_undef)->source_position();
+ if (break_point_info_or_undef.IsUndefined(isolate)) return kMaxInt;
+ return BreakPointInfo::cast(break_point_info_or_undef).source_position();
}
int FindBreakpointInfoInsertPos(Isolate* isolate,
@@ -367,7 +367,7 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
// Enlarge break positions array if necessary.
bool need_realloc = !breakpoint_infos->get(breakpoint_infos->length() - 1)
- ->IsUndefined(isolate);
+ .IsUndefined(isolate);
Handle<FixedArray> new_breakpoint_infos = breakpoint_infos;
if (need_realloc) {
new_breakpoint_infos = isolate->factory()->NewFixedArray(
@@ -381,7 +381,7 @@ void WasmModuleObject::AddBreakpoint(Handle<WasmModuleObject> module_object,
// Move elements [insert_pos, ...] up by one.
for (int i = breakpoint_infos->length() - 1; i >= insert_pos; --i) {
Object entry = breakpoint_infos->get(i);
- if (entry->IsUndefined(isolate)) continue;
+ if (entry.IsUndefined(isolate)) continue;
new_breakpoint_infos->set(i + 1, entry);
}
@@ -411,7 +411,7 @@ void WasmModuleObject::SetBreakpointsOnNewInstance(
Handle<Object> obj(breakpoint_infos->get(i), isolate);
if (obj->IsUndefined(isolate)) {
for (; i < e; ++i) {
- DCHECK(breakpoint_infos->get(i)->IsUndefined(isolate));
+ DCHECK(breakpoint_infos->get(i).IsUndefined(isolate));
}
break;
}
@@ -610,7 +610,7 @@ bool WasmModuleObject::GetPossibleBreakpoints(
AccountingAllocator alloc;
Zone tmp(&alloc, ZONE_NAME);
- const byte* module_start = native_module()->wire_bytes().start();
+ const byte* module_start = native_module()->wire_bytes().begin();
for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
++func_idx) {
@@ -678,7 +678,7 @@ MaybeHandle<String> WasmModuleObject::ExtractUtf8StringFromModuleBytes(
Vector<const uint8_t> name_vec = wire_bytes + ref.offset();
name_vec.Truncate(ref.length());
// UTF8 validation happens at decode time.
- DCHECK(unibrow::Utf8::ValidateEncoding(name_vec.start(), name_vec.length()));
+ DCHECK(unibrow::Utf8::ValidateEncoding(name_vec.begin(), name_vec.length()));
return isolate->factory()->NewStringFromUtf8(
Vector<const char>::cast(name_vec));
}
@@ -758,7 +758,7 @@ int WasmModuleObject::GetContainingFunction(uint32_t byte_offset) {
bool WasmModuleObject::GetPositionInfo(uint32_t position,
Script::PositionInfo* info) {
- if (script()->source_mapping_url()->IsString()) {
+ if (script().source_mapping_url().IsString()) {
if (module()->functions.size() == 0) return false;
info->line = 0;
info->column = position;
@@ -782,7 +782,7 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
wasm::ValueType type,
uint32_t initial, bool has_maximum,
uint32_t maximum,
- Handle<FixedArray>* elements) {
+ Handle<FixedArray>* entries) {
Handle<FixedArray> backing_store = isolate->factory()->NewFixedArray(initial);
Object null = ReadOnlyRoots(isolate).null_value();
for (int i = 0; i < static_cast<int>(initial); ++i) {
@@ -795,7 +795,7 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
isolate->factory()->NewJSObject(table_ctor));
table_obj->set_raw_type(static_cast<int>(type));
- table_obj->set_elements(*backing_store);
+ table_obj->set_entries(*backing_store);
Handle<Object> max;
if (has_maximum) {
max = isolate->factory()->NewNumberFromUint(maximum);
@@ -805,8 +805,8 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
table_obj->set_maximum_length(*max);
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
- if (elements != nullptr) {
- *elements = backing_store;
+ if (entries != nullptr) {
+ *entries = backing_store;
}
return Handle<WasmTableObject>::cast(table_obj);
}
@@ -835,13 +835,26 @@ void WasmTableObject::AddDispatchTable(Isolate* isolate,
table_obj->set_dispatch_tables(*new_dispatch_tables);
}
-void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
- if (count == 0) return; // Degenerate case: nothing to do.
+int WasmTableObject::Grow(Isolate* isolate, Handle<WasmTableObject> table,
+ uint32_t count, Handle<Object> init_value) {
+ uint32_t old_size = table->current_length();
+ if (count == 0) return old_size; // Degenerate case: nothing to do.
- Handle<FixedArray> dispatch_tables(this->dispatch_tables(), isolate);
- DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
- uint32_t old_size = elements()->length();
+ // Check if growing by {count} is valid.
+ uint32_t max_size;
+ if (!table->maximum_length().ToUint32(&max_size)) {
+ max_size = FLAG_wasm_max_table_size;
+ }
+ DCHECK_LE(old_size, max_size);
+ if (max_size - old_size < count) return -1;
+ uint32_t new_size = old_size + count;
+ auto new_store = isolate->factory()->CopyFixedArrayAndGrow(
+ handle(table->entries(), isolate), count);
+ table->set_entries(*new_store, WriteBarrierMode::UPDATE_WRITE_BARRIER);
+
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
// Tables are stored in the instance object, no code patching is
// necessary. We simply have to grow the raw tables in each instance
// that has imported this table.
@@ -850,13 +863,23 @@ void WasmTableObject::Grow(Isolate* isolate, uint32_t count) {
// the instances that import a given table.
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
+ int table_index =
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
+ if (table_index > 0) {
+ continue;
+ }
+ // For Table 0 we have to update the indirect function table.
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
DCHECK_EQ(old_size, instance->indirect_function_table_size());
- uint32_t new_size = old_size + count;
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(instance,
new_size);
}
+
+ for (uint32_t entry = old_size; entry < new_size; ++entry) {
+ WasmTableObject::Set(isolate, table, entry, init_value);
+ }
+ return old_size;
}
bool WasmTableObject::IsInBounds(Isolate* isolate,
@@ -864,112 +887,109 @@ bool WasmTableObject::IsInBounds(Isolate* isolate,
uint32_t entry_index) {
return (entry_index <
static_cast<uint32_t>(std::numeric_limits<int>::max()) &&
- static_cast<int>(entry_index) < table->elements()->length());
+ static_cast<int>(entry_index) < table->entries().length());
}
bool WasmTableObject::IsValidElement(Isolate* isolate,
Handle<WasmTableObject> table,
- Handle<Object> element) {
+ Handle<Object> entry) {
// Anyref tables take everything.
if (table->type() == wasm::kWasmAnyRef) return true;
- // Anyfunc tables can store {null} or {WasmExportedFunction} objects.
- if (element->IsNull(isolate)) return true;
- return WasmExportedFunction::IsWasmExportedFunction(*element);
+ // Anyfunc tables can store {null} or {WasmExportedFunction} or
+ // {WasmCapiFunction} objects.
+ if (entry->IsNull(isolate)) return true;
+ return WasmExportedFunction::IsWasmExportedFunction(*entry) ||
+ WasmCapiFunction::IsWasmCapiFunction(*entry);
}
void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t index, Handle<Object> element) {
+ uint32_t index, Handle<Object> entry) {
// Callers need to perform bounds checks, type check, and error handling.
DCHECK(IsInBounds(isolate, table, index));
- DCHECK(IsValidElement(isolate, table, element));
+ DCHECK(IsValidElement(isolate, table, entry));
- Handle<FixedArray> elements(table->elements(), isolate);
+ Handle<FixedArray> entries(table->entries(), isolate);
// The FixedArray is addressed with int's.
int entry_index = static_cast<int>(index);
if (table->type() == wasm::kWasmAnyRef) {
- elements->set(entry_index, *element);
+ entries->set(entry_index, *entry);
return;
}
- if (element->IsNull(isolate)) {
+ if (entry->IsNull(isolate)) {
ClearDispatchTables(isolate, table, entry_index); // Degenerate case.
- elements->set(entry_index, ReadOnlyRoots(isolate).null_value());
+ entries->set(entry_index, ReadOnlyRoots(isolate).null_value());
return;
}
- DCHECK(WasmExportedFunction::IsWasmExportedFunction(*element));
- auto exported_function = Handle<WasmExportedFunction>::cast(element);
- Handle<WasmInstanceObject> target_instance(exported_function->instance(),
- isolate);
- int func_index = exported_function->function_index();
- auto* wasm_function = &target_instance->module()->functions[func_index];
- DCHECK_NOT_NULL(wasm_function);
- DCHECK_NOT_NULL(wasm_function->sig);
- UpdateDispatchTables(isolate, table, entry_index, wasm_function->sig,
- handle(exported_function->instance(), isolate),
- func_index);
- elements->set(entry_index, *element);
+ if (WasmExportedFunction::IsWasmExportedFunction(*entry)) {
+ auto exported_function = Handle<WasmExportedFunction>::cast(entry);
+ Handle<WasmInstanceObject> target_instance(exported_function->instance(),
+ isolate);
+ int func_index = exported_function->function_index();
+ auto* wasm_function = &target_instance->module()->functions[func_index];
+ DCHECK_NOT_NULL(wasm_function);
+ DCHECK_NOT_NULL(wasm_function->sig);
+ UpdateDispatchTables(isolate, table, entry_index, wasm_function->sig,
+ target_instance, func_index);
+ } else {
+ DCHECK(WasmCapiFunction::IsWasmCapiFunction(*entry));
+ UpdateDispatchTables(isolate, table, entry_index,
+ Handle<WasmCapiFunction>::cast(entry));
+ }
+ entries->set(entry_index, *entry);
}
Handle<Object> WasmTableObject::Get(Isolate* isolate,
Handle<WasmTableObject> table,
uint32_t index) {
- Handle<FixedArray> elements(table->elements(), isolate);
+ Handle<FixedArray> entries(table->entries(), isolate);
// Callers need to perform bounds checks and error handling.
DCHECK(IsInBounds(isolate, table, index));
// The FixedArray is addressed with int's.
int entry_index = static_cast<int>(index);
- Handle<Object> element(elements->get(entry_index), isolate);
+ Handle<Object> entry(entries->get(entry_index), isolate);
// First we handle the easy anyref table case.
- if (table->type() == wasm::kWasmAnyRef) return element;
+ if (table->type() == wasm::kWasmAnyRef) return entry;
// Now we handle the anyfunc case.
- if (WasmExportedFunction::IsWasmExportedFunction(*element)) {
- return element;
+ if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
+ WasmCapiFunction::IsWasmCapiFunction(*entry)) {
+ return entry;
}
- if (element->IsNull(isolate)) {
- return element;
+ if (entry->IsNull(isolate)) {
+ return entry;
}
- // {element} is not a valid entry in the table. It has to be a placeholder
+ // {entry} is not a valid entry in the table. It has to be a placeholder
// for lazy initialization.
- Handle<Tuple2> tuple = Handle<Tuple2>::cast(element);
+ Handle<Tuple2> tuple = Handle<Tuple2>::cast(entry);
auto instance = handle(WasmInstanceObject::cast(tuple->value1()), isolate);
int function_index = Smi::cast(tuple->value2()).value();
// Check if we already compiled a wrapper for the function but did not store
// it in the table slot yet.
- MaybeHandle<Object> maybe_element =
- WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
- function_index);
- if (maybe_element.ToHandle(&element)) {
- elements->set(entry_index, *element);
- return element;
- }
-
- const WasmModule* module = instance->module_object()->module();
- const WasmFunction& function = module->functions[function_index];
- // Exported functions got their wrapper compiled during instantiation.
- CHECK(!function.exported);
- Handle<Code> wrapper_code =
- compiler::CompileJSToWasmWrapper(isolate, function.sig, function.imported)
- .ToHandleChecked();
-
- MaybeHandle<String> function_name = WasmModuleObject::GetFunctionNameOrNull(
- isolate, handle(instance->module_object(), isolate), function_index);
+ entry = WasmInstanceObject::GetOrCreateWasmExportedFunction(isolate, instance,
+ function_index);
+ entries->set(entry_index, *entry);
+ return entry;
+}
- Handle<WasmExportedFunction> result = WasmExportedFunction::New(
- isolate, instance, function_name, function_index,
- static_cast<int>(function.sig->parameter_count()), wrapper_code);
+void WasmTableObject::Fill(Isolate* isolate, Handle<WasmTableObject> table,
+ uint32_t start, Handle<Object> entry,
+ uint32_t count) {
+ // Bounds checks must be done by the caller.
+ DCHECK_LE(start, table->entries().length());
+ DCHECK_LE(count, table->entries().length());
+ DCHECK_LE(start + count, table->entries().length());
- elements->set(entry_index, *result);
- WasmInstanceObject::SetWasmExportedFunction(isolate, instance, function_index,
- result);
- return result;
+ for (uint32_t i = 0; i < count; i++) {
+ WasmTableObject::Set(isolate, table, start + i, entry);
+ }
}
void WasmTableObject::UpdateDispatchTables(
@@ -984,7 +1004,7 @@ void WasmTableObject::UpdateDispatchTables(
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
int table_index =
- Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset))->value();
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
if (table_index > 0) {
// Only table 0 has a dispatch table in the instance at the moment.
// TODO(ahaas): Introduce dispatch tables for the other tables as well.
@@ -1002,6 +1022,66 @@ void WasmTableObject::UpdateDispatchTables(
}
}
+void WasmTableObject::UpdateDispatchTables(
+ Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
+ Handle<WasmCapiFunction> capi_function) {
+ // We simply need to update the IFTs for each instance that imports
+ // this table.
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
+ DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
+
+ // Reconstruct signature.
+ // TODO(jkummerow): Unify with "SignatureHelper" in c-api.cc.
+ PodArray<wasm::ValueType> serialized_sig =
+ capi_function->GetSerializedSignature();
+ int total_count = serialized_sig.length() - 1;
+ std::unique_ptr<wasm::ValueType[]> reps(new wasm::ValueType[total_count]);
+ int result_count;
+ static const wasm::ValueType kMarker = wasm::kWasmStmt;
+ for (int i = 0, j = 0; i <= total_count; i++) {
+ if (serialized_sig.get(i) == kMarker) {
+ result_count = i;
+ continue;
+ }
+ reps[j++] = serialized_sig.get(i);
+ }
+ int param_count = total_count - result_count;
+ wasm::FunctionSig sig(result_count, param_count, reps.get());
+
+ for (int i = 0; i < dispatch_tables->length();
+ i += kDispatchTableNumElements) {
+ int table_index =
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
+ if (table_index > 0) {
+ // Only table 0 has a dispatch table in the instance at the moment.
+ // TODO(ahaas): Introduce dispatch tables for the other tables as well.
+ continue;
+ }
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(
+ dispatch_tables->get(i + kDispatchTableInstanceOffset)),
+ isolate);
+ // TODO(jkummerow): Find a way to avoid recompiling wrappers.
+ wasm::NativeModule* native_module =
+ instance->module_object().native_module();
+ Address host_address = capi_function->GetHostCallTarget();
+ wasm::WasmCodeRefScope code_ref_scope;
+ wasm::WasmCode* wasm_code = compiler::CompileWasmCapiCallWrapper(
+ isolate->wasm_engine(), native_module, &sig, host_address);
+ isolate->counters()->wasm_generated_code_size()->Increment(
+ wasm_code->instructions().length());
+ isolate->counters()->wasm_reloc_size()->Increment(
+ wasm_code->reloc_info().length());
+ Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
+ instance, capi_function, AllocationType::kOld);
+ // Note that {SignatureMap::Find} may return {-1} if the signature is
+ // not found; it will simply never match any check.
+ auto sig_id = instance->module()->signature_map.Find(sig);
+ IndirectFunctionTableEntry(instance, entry_index)
+ .Set(sig_id, wasm_code->instruction_start(), *tuple);
+ }
+}
+
void WasmTableObject::ClearDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int index) {
@@ -1009,6 +1089,12 @@ void WasmTableObject::ClearDispatchTables(Isolate* isolate,
DCHECK_EQ(0, dispatch_tables->length() % kDispatchTableNumElements);
for (int i = 0; i < dispatch_tables->length();
i += kDispatchTableNumElements) {
+ int table_index =
+ Smi::cast(dispatch_tables->get(i + kDispatchTableIndexOffset)).value();
+ if (table_index > 0) {
+ // Only table 0 has a dispatch table in the instance at the moment.
+ continue;
+ }
Handle<WasmInstanceObject> target_instance(
WasmInstanceObject::cast(
dispatch_tables->get(i + kDispatchTableInstanceOffset)),
@@ -1026,7 +1112,7 @@ void WasmTableObject::SetFunctionTablePlaceholder(
Handle<Tuple2> tuple = isolate->factory()->NewTuple2(
instance, Handle<Smi>(Smi::FromInt(func_index), isolate),
AllocationType::kYoung);
- table->elements()->set(entry_index, *tuple);
+ table->entries().set(entry_index, *tuple);
}
void WasmTableObject::GetFunctionTableEntry(
@@ -1034,10 +1120,10 @@ void WasmTableObject::GetFunctionTableEntry(
bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
int* function_index) {
DCHECK_EQ(table->type(), wasm::kWasmAnyFunc);
- DCHECK_LT(entry_index, table->elements()->length());
+ DCHECK_LT(entry_index, table->entries().length());
// We initialize {is_valid} with {true}. We may change it later.
*is_valid = true;
- Handle<Object> element(table->elements()->get(entry_index), isolate);
+ Handle<Object> element(table->entries().get(entry_index), isolate);
*is_null = element->IsNull(isolate);
if (*is_null) return;
@@ -1337,18 +1423,18 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
void IndirectFunctionTableEntry::clear() {
instance_->indirect_function_table_sig_ids()[index_] = -1;
instance_->indirect_function_table_targets()[index_] = 0;
- instance_->indirect_function_table_refs()->set(
+ instance_->indirect_function_table_refs().set(
index_, ReadOnlyRoots(instance_->GetIsolate()).undefined_value());
}
void IndirectFunctionTableEntry::Set(int sig_id,
Handle<WasmInstanceObject> target_instance,
int target_func_index) {
- TRACE_IFT(
- "IFT entry %p[%d] = {sig_id=%d, target_instance=%p, "
- "target_func_index=%d}\n",
- reinterpret_cast<void*>(instance_->ptr()), index_, sig_id,
- reinterpret_cast<void*>(target_instance->ptr()), target_func_index);
+ TRACE_IFT("IFT entry 0x%" PRIxPTR
+ "[%d] = {sig_id=%d, target_instance=0x%" PRIxPTR
+ ", target_func_index=%d}\n",
+ instance_->ptr(), index_, sig_id, target_instance->ptr(),
+ target_func_index);
Object ref;
Address call_target = 0;
@@ -1364,15 +1450,18 @@ void IndirectFunctionTableEntry::Set(int sig_id,
ref = *target_instance;
call_target = target_instance->GetCallTarget(target_func_index);
}
+ Set(sig_id, call_target, ref);
+}
- // Set the signature id, the target, and the receiver ref.
+void IndirectFunctionTableEntry::Set(int sig_id, Address call_target,
+ Object ref) {
instance_->indirect_function_table_sig_ids()[index_] = sig_id;
instance_->indirect_function_table_targets()[index_] = call_target;
- instance_->indirect_function_table_refs()->set(index_, ref);
+ instance_->indirect_function_table_refs().set(index_, ref);
}
Object IndirectFunctionTableEntry::object_ref() {
- return instance_->indirect_function_table_refs()->get(index_);
+ return instance_->indirect_function_table_refs().get(index_);
}
int IndirectFunctionTableEntry::sig_id() {
@@ -1389,51 +1478,52 @@ void IndirectFunctionTableEntry::CopyFrom(
that.instance_->indirect_function_table_sig_ids()[that.index_];
instance_->indirect_function_table_targets()[index_] =
that.instance_->indirect_function_table_targets()[that.index_];
- instance_->indirect_function_table_refs()->set(
- index_, that.instance_->indirect_function_table_refs()->get(that.index_));
+ instance_->indirect_function_table_refs().set(
+ index_, that.instance_->indirect_function_table_refs().get(that.index_));
}
void ImportedFunctionEntry::SetWasmToJs(
Isolate* isolate, Handle<JSReceiver> callable,
const wasm::WasmCode* wasm_to_js_wrapper) {
- TRACE_IFT("Import callable %p[%d] = {callable=%p, target=%p}\n",
- reinterpret_cast<void*>(instance_->ptr()), index_,
- reinterpret_cast<void*>(callable->ptr()),
- wasm_to_js_wrapper->instructions().start());
- DCHECK_EQ(wasm::WasmCode::kWasmToJsWrapper, wasm_to_js_wrapper->kind());
+ TRACE_IFT("Import callable 0x%" PRIxPTR "[%d] = {callable=0x%" PRIxPTR
+ ", target=%p}\n",
+ instance_->ptr(), index_, callable->ptr(),
+ wasm_to_js_wrapper->instructions().begin());
+ DCHECK(wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToJsWrapper ||
+ wasm_to_js_wrapper->kind() == wasm::WasmCode::kWasmToCapiWrapper);
Handle<Tuple2> tuple =
isolate->factory()->NewTuple2(instance_, callable, AllocationType::kOld);
- instance_->imported_function_refs()->set(index_, *tuple);
+ instance_->imported_function_refs().set(index_, *tuple);
instance_->imported_function_targets()[index_] =
wasm_to_js_wrapper->instruction_start();
}
void ImportedFunctionEntry::SetWasmToWasm(WasmInstanceObject instance,
Address call_target) {
- TRACE_IFT("Import WASM %p[%d] = {instance=%p, target=%" PRIuPTR "}\n",
- reinterpret_cast<void*>(instance_->ptr()), index_,
- reinterpret_cast<void*>(instance->ptr()), call_target);
- instance_->imported_function_refs()->set(index_, instance);
+ TRACE_IFT("Import WASM 0x%" PRIxPTR "[%d] = {instance=0x%" PRIxPTR
+ ", target=0x%" PRIxPTR "}\n",
+ instance_->ptr(), index_, instance.ptr(), call_target);
+ instance_->imported_function_refs().set(index_, instance);
instance_->imported_function_targets()[index_] = call_target;
}
WasmInstanceObject ImportedFunctionEntry::instance() {
// The imported reference entry is either a target instance or a tuple
// of this instance and the target callable.
- Object value = instance_->imported_function_refs()->get(index_);
- if (value->IsWasmInstanceObject()) {
+ Object value = instance_->imported_function_refs().get(index_);
+ if (value.IsWasmInstanceObject()) {
return WasmInstanceObject::cast(value);
}
Tuple2 tuple = Tuple2::cast(value);
- return WasmInstanceObject::cast(tuple->value1());
+ return WasmInstanceObject::cast(tuple.value1());
}
JSReceiver ImportedFunctionEntry::callable() {
- return JSReceiver::cast(Tuple2::cast(object_ref())->value2());
+ return JSReceiver::cast(Tuple2::cast(object_ref()).value2());
}
Object ImportedFunctionEntry::object_ref() {
- return instance_->imported_function_refs()->get(index_);
+ return instance_->imported_function_refs().get(index_);
}
Address ImportedFunctionEntry::target() {
@@ -1479,7 +1569,7 @@ void WasmInstanceObject::SetRawMemory(byte* mem_start, size_t mem_size) {
}
const WasmModule* WasmInstanceObject::module() {
- return module_object()->module();
+ return module_object().module();
}
Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
@@ -1578,7 +1668,7 @@ void WasmInstanceObject::InitDataSegmentArrays(
auto source_bytes = wire_bytes.SubVector(segment.source.offset(),
segment.source.end_offset());
instance->data_segment_starts()[i] =
- reinterpret_cast<Address>(source_bytes.start());
+ reinterpret_cast<Address>(source_bytes.begin());
instance->data_segment_sizes()[i] = source_bytes.length();
}
}
@@ -1598,7 +1688,7 @@ void WasmInstanceObject::InitElemSegmentArrays(
}
Address WasmInstanceObject::GetCallTarget(uint32_t func_index) {
- wasm::NativeModule* native_module = module_object()->native_module();
+ wasm::NativeModule* native_module = module_object().native_module();
if (func_index < native_module->num_imported_functions()) {
return imported_function_targets()[func_index];
}
@@ -1632,10 +1722,10 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
uint32_t table_dst_index,
uint32_t dst, uint32_t src,
uint32_t count) {
- if (static_cast<int>(table_dst_index) >= instance->tables()->length()) {
+ if (static_cast<int>(table_dst_index) >= instance->tables().length()) {
return false;
}
- if (static_cast<int>(table_src_index) >= instance->tables()->length()) {
+ if (static_cast<int>(table_src_index) >= instance->tables().length()) {
return false;
}
@@ -1656,7 +1746,7 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
// TODO(titzer): multiple tables in TableCopy
auto table = handle(
- WasmTableObject::cast(instance->tables()->get(table_src_index)), isolate);
+ WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
// Broadcast table copy operation to all instances that import this table.
Handle<FixedArray> dispatch_tables(table->dispatch_tables(), isolate);
for (int i = 0; i < dispatch_tables->length();
@@ -1670,17 +1760,17 @@ bool WasmInstanceObject::CopyTableEntries(Isolate* isolate,
// Copy the function entries.
auto dst_table = handle(
- WasmTableObject::cast(instance->tables()->get(table_dst_index)), isolate);
+ WasmTableObject::cast(instance->tables().get(table_dst_index)), isolate);
auto src_table = handle(
- WasmTableObject::cast(instance->tables()->get(table_src_index)), isolate);
+ WasmTableObject::cast(instance->tables().get(table_src_index)), isolate);
if (copy_backward) {
for (uint32_t i = count; i > 0; i--) {
- dst_table->elements()->set(dst + i - 1,
- src_table->elements()->get(src + i - 1));
+ dst_table->entries().set(dst + i - 1,
+ src_table->entries().get(src + i - 1));
}
} else {
for (uint32_t i = 0; i < count; i++) {
- dst_table->elements()->set(dst + i, src_table->elements()->get(src + i));
+ dst_table->entries().set(dst + i, src_table->entries().get(src + i));
}
}
return ok;
@@ -1703,8 +1793,8 @@ MaybeHandle<WasmExportedFunction> WasmInstanceObject::GetWasmExportedFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int index) {
MaybeHandle<WasmExportedFunction> result;
if (instance->has_wasm_exported_functions()) {
- Object val = instance->wasm_exported_functions()->get(index);
- if (!val->IsUndefined(isolate)) {
+ Object val = instance->wasm_exported_functions().get(index);
+ if (!val.IsUndefined(isolate)) {
result = Handle<WasmExportedFunction>(WasmExportedFunction::cast(val),
isolate);
}
@@ -1712,6 +1802,48 @@ MaybeHandle<WasmExportedFunction> WasmInstanceObject::GetWasmExportedFunction(
return result;
}
+Handle<WasmExportedFunction>
+WasmInstanceObject::GetOrCreateWasmExportedFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int function_index) {
+ MaybeHandle<WasmExportedFunction> maybe_result =
+ WasmInstanceObject::GetWasmExportedFunction(isolate, instance,
+ function_index);
+
+ Handle<WasmExportedFunction> result;
+ if (maybe_result.ToHandle(&result)) {
+ return result;
+ }
+
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ const WasmModule* module = module_object->module();
+ const WasmFunction& function = module->functions[function_index];
+ int wrapper_index =
+ GetExportWrapperIndex(module, function.sig, function.imported);
+
+ Handle<Object> entry =
+ FixedArray::get(module_object->export_wrappers(), wrapper_index, isolate);
+
+ Handle<Code> wrapper;
+ if (entry->IsCode()) {
+ wrapper = Handle<Code>::cast(entry);
+ } else {
+ // The wrapper may not exist yet if no function in the exports section has
+ // this signature. We compile it and store the wrapper in the module for
+ // later use.
+ wrapper = compiler::CompileJSToWasmWrapper(isolate, function.sig,
+ function.imported)
+ .ToHandleChecked();
+ module_object->export_wrappers().set(wrapper_index, *wrapper);
+ }
+ result = WasmExportedFunction::New(
+ isolate, instance, function_index,
+ static_cast<int>(function.sig->parameter_count()), wrapper);
+
+ WasmInstanceObject::SetWasmExportedFunction(isolate, instance, function_index,
+ result);
+ return result;
+}
+
void WasmInstanceObject::SetWasmExportedFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int index,
Handle<WasmExportedFunction> val) {
@@ -1760,12 +1892,33 @@ bool WasmExceptionObject::IsSignatureEqual(const wasm::FunctionSig* sig) {
DCHECK_EQ(0, sig->return_count());
DCHECK_LE(sig->parameter_count(), std::numeric_limits<int>::max());
int sig_size = static_cast<int>(sig->parameter_count());
- if (sig_size != serialized_signature()->length()) return false;
+ if (sig_size != serialized_signature().length()) return false;
for (int index = 0; index < sig_size; ++index) {
- if (sig->GetParam(index) != serialized_signature()->get(index)) {
+ if (sig->GetParam(index) != serialized_signature().get(index)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool WasmCapiFunction::IsSignatureEqual(const wasm::FunctionSig* sig) const {
+ // TODO(jkummerow): Unify with "SignatureHelper" in c-api.cc.
+ int param_count = static_cast<int>(sig->parameter_count());
+ int result_count = static_cast<int>(sig->return_count());
+ PodArray<wasm::ValueType> serialized_sig =
+ shared().wasm_capi_function_data().serialized_signature();
+ if (param_count + result_count + 1 != serialized_sig.length()) return false;
+ int serialized_index = 0;
+ for (int i = 0; i < result_count; i++, serialized_index++) {
+ if (sig->GetReturn(i) != serialized_sig.get(serialized_index)) {
return false;
}
}
+ if (serialized_sig.get(serialized_index) != wasm::kWasmStmt) return false;
+ serialized_index++;
+ for (int i = 0; i < param_count; i++, serialized_index++) {
+ if (sig->GetParam(i) != serialized_sig.get(serialized_index)) return false;
+ }
return true;
}
@@ -1860,6 +2013,8 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
encoded_size += 8;
break;
case wasm::kWasmAnyRef:
+ case wasm::kWasmAnyFunc:
+ case wasm::kWasmExceptRef:
encoded_size += 1;
break;
default:
@@ -1870,31 +2025,63 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
}
bool WasmExportedFunction::IsWasmExportedFunction(Object object) {
- if (!object->IsJSFunction()) return false;
+ if (!object.IsJSFunction()) return false;
JSFunction js_function = JSFunction::cast(object);
- if (Code::JS_TO_WASM_FUNCTION != js_function->code()->kind()) return false;
- DCHECK(js_function->shared()->HasWasmExportedFunctionData());
+ if (Code::JS_TO_WASM_FUNCTION != js_function.code().kind()) return false;
+ DCHECK(js_function.shared().HasWasmExportedFunctionData());
return true;
}
+bool WasmCapiFunction::IsWasmCapiFunction(Object object) {
+ if (!object.IsJSFunction()) return false;
+ JSFunction js_function = JSFunction::cast(object);
+ // TODO(jkummerow): Enable this when there is a JavaScript wrapper
+ // able to call this function.
+ // if (js_function->code()->kind() != Code::WASM_TO_CAPI_FUNCTION) {
+ // return false;
+ // }
+ // DCHECK(js_function->shared()->HasWasmCapiFunctionData());
+ // return true;
+ return js_function.shared().HasWasmCapiFunctionData();
+}
+
+Handle<WasmCapiFunction> WasmCapiFunction::New(
+ Isolate* isolate, Address call_target, void* embedder_data,
+ Handle<PodArray<wasm::ValueType>> serialized_signature) {
+ Handle<WasmCapiFunctionData> fun_data =
+ Handle<WasmCapiFunctionData>::cast(isolate->factory()->NewStruct(
+ WASM_CAPI_FUNCTION_DATA_TYPE, AllocationType::kOld));
+ fun_data->set_call_target(call_target);
+ fun_data->set_embedder_data(embedder_data);
+ fun_data->set_serialized_signature(*serialized_signature);
+ // TODO(jkummerow): Install a JavaScript wrapper. For now, calling
+ // these functions directly is unsupported; they can only be called
+ // from Wasm code.
+ fun_data->set_wrapper_code(isolate->builtins()->builtin(Builtins::kIllegal));
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfoForWasmCapiFunction(fun_data);
+ return Handle<WasmCapiFunction>::cast(
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ shared, isolate->native_context()));
+}
+
WasmInstanceObject WasmExportedFunction::instance() {
- return shared()->wasm_exported_function_data()->instance();
+ return shared().wasm_exported_function_data().instance();
}
int WasmExportedFunction::function_index() {
- return shared()->wasm_exported_function_data()->function_index();
+ return shared().wasm_exported_function_data().function_index();
}
Handle<WasmExportedFunction> WasmExportedFunction::New(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- MaybeHandle<String> maybe_name, int func_index, int arity,
- Handle<Code> export_wrapper) {
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index,
+ int arity, Handle<Code> export_wrapper) {
DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
int num_imported_functions = instance->module()->num_imported_functions;
int jump_table_offset = -1;
if (func_index >= num_imported_functions) {
ptrdiff_t jump_table_diff =
- instance->module_object()->native_module()->jump_table_offset(
+ instance->module_object().native_module()->jump_table_offset(
func_index);
DCHECK(jump_table_diff >= 0 && jump_table_diff <= INT_MAX);
jump_table_offset = static_cast<int>(jump_table_diff);
@@ -1906,6 +2093,14 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
function_data->set_instance(*instance);
function_data->set_jump_table_offset(jump_table_offset);
function_data->set_function_index(func_index);
+
+ MaybeHandle<String> maybe_name;
+ if (instance->module()->origin == wasm::kAsmJsOrigin) {
+ // We can use the function name only for asm.js. For WebAssembly, the
+ // function name is specified as the function_index.toString().
+ maybe_name = WasmModuleObject::GetFunctionNameOrNull(
+ isolate, handle(instance->module_object(), isolate), func_index);
+ }
Handle<String> name;
if (!maybe_name.ToHandle(&name)) {
EmbeddedVector<char, 16> buffer;
@@ -1915,23 +2110,62 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
Vector<uint8_t>::cast(buffer.SubVector(0, length)))
.ToHandleChecked();
}
- NewFunctionArgs args = NewFunctionArgs::ForWasm(
- name, function_data, isolate->sloppy_function_without_prototype_map());
+ bool is_asm_js_module = instance->module_object().is_asm_js();
+ Handle<Map> function_map = is_asm_js_module
+ ? isolate->sloppy_function_map()
+ : isolate->wasm_exported_function_map();
+ NewFunctionArgs args =
+ NewFunctionArgs::ForWasm(name, function_data, function_map);
Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
// According to the spec, exported functions should not have a [[Construct]]
- // method.
- DCHECK(!js_function->IsConstructor());
- js_function->shared()->set_length(arity);
- js_function->shared()->set_internal_formal_parameter_count(arity);
+ // method. This does not apply to functions exported from asm.js however.
+ DCHECK_EQ(is_asm_js_module, js_function->IsConstructor());
+ js_function->shared().set_length(arity);
+ js_function->shared().set_internal_formal_parameter_count(arity);
return Handle<WasmExportedFunction>::cast(js_function);
}
Address WasmExportedFunction::GetWasmCallTarget() {
- return instance()->GetCallTarget(function_index());
+ return instance().GetCallTarget(function_index());
}
wasm::FunctionSig* WasmExportedFunction::sig() {
- return instance()->module()->functions[function_index()].sig;
+ return instance().module()->functions[function_index()].sig;
+}
+
+// static
+bool WasmJSFunction::IsWasmJSFunction(Object object) {
+ if (!object.IsJSFunction()) return false;
+ JSFunction js_function = JSFunction::cast(object);
+ return js_function.shared().HasWasmJSFunctionData();
+}
+
+Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
+ wasm::FunctionSig* sig,
+ Handle<JSReceiver> callable) {
+ Handle<WasmJSFunctionData> function_data =
+ Handle<WasmJSFunctionData>::cast(isolate->factory()->NewStruct(
+ WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
+ // TODO(7742): Make this callable by using a proper wrapper code.
+ function_data->set_wrapper_code(
+ isolate->builtins()->builtin(Builtins::kIllegal));
+ Handle<String> name = isolate->factory()->Function_string();
+ if (callable->IsJSFunction()) {
+ name = JSFunction::GetName(Handle<JSFunction>::cast(callable));
+ }
+ Handle<Map> function_map = isolate->wasm_exported_function_map();
+ NewFunctionArgs args =
+ NewFunctionArgs::ForWasm(name, function_data, function_map);
+ Handle<JSFunction> js_function = isolate->factory()->NewFunction(args);
+ return Handle<WasmJSFunction>::cast(js_function);
+}
+
+Address WasmCapiFunction::GetHostCallTarget() const {
+ return shared().wasm_capi_function_data().call_target();
+}
+
+PodArray<wasm::ValueType> WasmCapiFunction::GetSerializedSignature() const {
+ return shared().wasm_capi_function_data().serialized_signature();
}
Handle<WasmExceptionTag> WasmExceptionTag::New(Isolate* isolate, int index) {
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 10169ea2bb..1e6ced0b76 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -6,12 +6,12 @@
#define V8_WASM_WASM_OBJECTS_H_
#include "src/base/bits.h"
+#include "src/codegen/signature.h"
#include "src/debug/debug.h"
#include "src/debug/interface-types.h"
#include "src/heap/heap.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/objects/script.h"
-#include "src/signature.h"
#include "src/wasm/value-type.h"
// Has to be the last include (doesn't have include guards)
@@ -37,6 +37,7 @@ class WireBytesRef;
class BreakPoint;
class JSArrayBuffer;
class SeqOneByteString;
+class WasmCapiFunction;
class WasmDebugInfo;
class WasmExceptionTag;
class WasmInstanceObject;
@@ -65,6 +66,7 @@ class IndirectFunctionTableEntry {
V8_EXPORT_PRIVATE void Set(int sig_id,
Handle<WasmInstanceObject> target_instance,
int target_func_index);
+ void Set(int sig_id, Address call_target, Object ref);
void CopyFrom(const IndirectFunctionTableEntry& that);
@@ -247,7 +249,7 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
public:
DECL_CAST(WasmTableObject)
- DECL_ACCESSORS(elements, FixedArray)
+ DECL_ACCESSORS(entries, FixedArray)
// TODO(titzer): introduce DECL_I64_ACCESSORS macro
DECL_ACCESSORS(maximum_length, Object)
DECL_ACCESSORS(dispatch_tables, FixedArray)
@@ -262,12 +264,14 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
inline uint32_t current_length();
inline wasm::ValueType type();
- void Grow(Isolate* isolate, uint32_t count);
+
+ static int Grow(Isolate* isolate, Handle<WasmTableObject> table,
+ uint32_t count, Handle<Object> init_value);
static Handle<WasmTableObject> New(Isolate* isolate, wasm::ValueType type,
uint32_t initial, bool has_maximum,
uint32_t maximum,
- Handle<FixedArray>* elements);
+ Handle<FixedArray>* entries);
static void AddDispatchTable(Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance,
@@ -280,16 +284,23 @@ class V8_EXPORT_PRIVATE WasmTableObject : public JSObject {
Handle<Object> entry);
static void Set(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t index, Handle<Object> element);
+ uint32_t index, Handle<Object> entry);
static Handle<Object> Get(Isolate* isolate, Handle<WasmTableObject> table,
uint32_t index);
+ static void Fill(Isolate* isolate, Handle<WasmTableObject> table,
+ uint32_t start, Handle<Object> entry, uint32_t count);
+
static void UpdateDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table,
int entry_index, wasm::FunctionSig* sig,
Handle<WasmInstanceObject> target_instance,
int target_func_index);
+ static void UpdateDispatchTables(Isolate* isolate,
+ Handle<WasmTableObject> table,
+ int entry_index,
+ Handle<WasmCapiFunction> capi_function);
static void ClearDispatchTables(Isolate* isolate,
Handle<WasmTableObject> table, int index);
@@ -571,6 +582,16 @@ class WasmInstanceObject : public JSObject {
static MaybeHandle<WasmExportedFunction> GetWasmExportedFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int index);
+
+ // Acquires the {WasmExportedFunction} for a given {function_index} from the
+ // cache of the given {instance}, or creates a new {WasmExportedFunction} if
+ // it does not exist yet. The new {WasmExportedFunction} is added to the
+ // cache of the {instance} immediately.
+ V8_EXPORT_PRIVATE static Handle<WasmExportedFunction>
+ GetOrCreateWasmExportedFunction(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ int function_index);
+
static void SetWasmExportedFunction(Isolate* isolate,
Handle<WasmInstanceObject> instance,
int index,
@@ -631,6 +652,7 @@ class WasmExceptionPackage : public JSReceiver {
};
// A Wasm function that is wrapped and exported to JavaScript.
+// Representation of WebAssembly.Function JavaScript-level object.
class WasmExportedFunction : public JSFunction {
public:
WasmInstanceObject instance();
@@ -639,9 +661,8 @@ class WasmExportedFunction : public JSFunction {
V8_EXPORT_PRIVATE static bool IsWasmExportedFunction(Object object);
V8_EXPORT_PRIVATE static Handle<WasmExportedFunction> New(
- Isolate* isolate, Handle<WasmInstanceObject> instance,
- MaybeHandle<String> maybe_name, int func_index, int arity,
- Handle<Code> export_wrapper);
+ Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index,
+ int arity, Handle<Code> export_wrapper);
Address GetWasmCallTarget();
@@ -651,6 +672,59 @@ class WasmExportedFunction : public JSFunction {
OBJECT_CONSTRUCTORS(WasmExportedFunction, JSFunction);
};
+// A Wasm function that was created by wrapping a JavaScript callable.
+// Representation of WebAssembly.Function JavaScript-level object.
+class WasmJSFunction : public JSFunction {
+ public:
+ static bool IsWasmJSFunction(Object object);
+
+ static Handle<WasmJSFunction> New(Isolate* isolate, wasm::FunctionSig* sig,
+ Handle<JSReceiver> callable);
+
+ DECL_CAST(WasmJSFunction)
+ OBJECT_CONSTRUCTORS(WasmJSFunction, JSFunction);
+};
+
+// An external function exposed to Wasm via the C/C++ API.
+class WasmCapiFunction : public JSFunction {
+ public:
+ static bool IsWasmCapiFunction(Object object);
+
+ static Handle<WasmCapiFunction> New(
+ Isolate* isolate, Address call_target, void* embedder_data,
+ Handle<PodArray<wasm::ValueType>> serialized_signature);
+
+ Address GetHostCallTarget() const;
+ PodArray<wasm::ValueType> GetSerializedSignature() const;
+ // Checks whether the given {sig} has the same parameter types as the
+ // serialized signature stored within this C-API function object.
+ bool IsSignatureEqual(const wasm::FunctionSig* sig) const;
+
+ DECL_CAST(WasmCapiFunction)
+ OBJECT_CONSTRUCTORS(WasmCapiFunction, JSFunction);
+};
+
+class WasmCapiFunctionData : public Struct {
+ public:
+ DECL_PRIMITIVE_ACCESSORS(call_target, Address)
+ DECL_PRIMITIVE_ACCESSORS(embedder_data, void*)
+ DECL_ACCESSORS(wrapper_code, Code)
+ DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
+
+ DECL_CAST(WasmCapiFunctionData)
+
+ DECL_PRINTER(WasmCapiFunctionData)
+ DECL_VERIFIER(WasmCapiFunctionData)
+
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WASM_CAPI_FUNCTION_DATA_FIELDS)
+
+ STATIC_ASSERT(kStartOfStrongFieldsOffset == kWrapperCodeOffset);
+ using BodyDescriptor = FlexibleBodyDescriptor<kStartOfStrongFieldsOffset>;
+
+ OBJECT_CONSTRUCTORS(WasmCapiFunctionData, Struct);
+};
+
// Information for a WasmExportedFunction which is referenced as the function
// data of the SharedFunctionInfo underlying the function. For details please
// see the {SharedFunctionInfo::HasWasmExportedFunctionData} predicate.
@@ -675,12 +749,31 @@ class WasmExportedFunctionData : public Struct {
OBJECT_CONSTRUCTORS(WasmExportedFunctionData, Struct);
};
+// Information for a WasmJSFunction which is referenced as the function data of
+// the SharedFunctionInfo underlying the function. For details please see the
+// {SharedFunctionInfo::HasWasmJSFunctionData} predicate.
+class WasmJSFunctionData : public Struct {
+ public:
+ DECL_ACCESSORS(wrapper_code, Code)
+
+ DECL_CAST(WasmJSFunctionData)
+
+ // Dispatched behavior.
+ DECL_PRINTER(WasmJSFunctionData)
+ DECL_VERIFIER(WasmJSFunctionData)
+
+ // Layout description.
+ DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ TORQUE_GENERATED_WASM_JSFUNCTION_DATA_FIELDS)
+
+ OBJECT_CONSTRUCTORS(WasmJSFunctionData, Struct);
+};
+
class WasmDebugInfo : public Struct {
public:
NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
DECL_ACCESSORS(interpreter_handle, Object) // Foreign or undefined
- DECL_ACCESSORS(interpreted_functions, FixedArray)
DECL_OPTIONAL_ACCESSORS(locals_names, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray)
DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>)
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index c37a94524c..88b9e90381 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -7,9 +7,9 @@
#include <array>
#include "src/base/template-utils.h"
-#include "src/messages.h"
+#include "src/codegen/signature.h"
+#include "src/execution/messages.h"
#include "src/runtime/runtime.h"
-#include "src/signature.h"
namespace v8 {
namespace internal {
@@ -48,11 +48,11 @@ namespace wasm {
#define CASE_ALL_SIGN_OP(name, str) \
CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
- CASE_##RES##_OP(U##name##SRC, str "_u/" src_suffix) \
- CASE_##RES##_OP(S##name##SRC, str "_s/" src_suffix)
-#define CASE_CONVERT_SAT_OP(name, RES, SRC, src_suffix, str) \
- CASE_##RES##_OP(U##name##Sat##SRC, str "_u:sat/" src_suffix) \
- CASE_##RES##_OP(S##name##Sat##SRC, str "_s:sat/" src_suffix)
+ CASE_##RES##_OP(U##name##SRC, str "_" src_suffix "_u") \
+ CASE_##RES##_OP(S##name##SRC, str "_" src_suffix "_s")
+#define CASE_CONVERT_SAT_OP(name, RES, SRC, src_suffix, str) \
+ CASE_##RES##_OP(U##name##Sat##SRC, str "_sat_" src_suffix "_u") \
+ CASE_##RES##_OP(S##name##Sat##SRC, str "_sat_" src_suffix "_s")
#define CASE_L32_OP(name, str) \
CASE_SIGN_OP(I32, name##8, str "8") \
CASE_SIGN_OP(I32, name##16, str "16") \
@@ -108,23 +108,23 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_REF_OP(Null, "null")
CASE_REF_OP(IsNull, "is_null")
CASE_REF_OP(Func, "func")
- CASE_I32_OP(ConvertI64, "wrap/i64")
+ CASE_I32_OP(ConvertI64, "wrap_i64")
CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
- CASE_F32_OP(ConvertF64, "demote/f64")
+ CASE_F32_OP(ConvertF64, "demote_f64")
CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
- CASE_F64_OP(ConvertF32, "promote/f32")
- CASE_I32_OP(ReinterpretF32, "reinterpret/f32")
- CASE_I64_OP(ReinterpretF64, "reinterpret/f64")
- CASE_F32_OP(ReinterpretI32, "reinterpret/i32")
- CASE_F64_OP(ReinterpretI64, "reinterpret/i64")
- CASE_INT_OP(SExtendI8, "sign_extend8")
- CASE_INT_OP(SExtendI16, "sign_extend16")
- CASE_I64_OP(SExtendI32, "sign_extend32")
+ CASE_F64_OP(ConvertF32, "promote_f32")
+ CASE_I32_OP(ReinterpretF32, "reinterpret_f32")
+ CASE_I64_OP(ReinterpretF64, "reinterpret_f64")
+ CASE_F32_OP(ReinterpretI32, "reinterpret_i32")
+ CASE_F64_OP(ReinterpretI64, "reinterpret_i64")
+ CASE_INT_OP(SExtendI8, "extend8_s")
+ CASE_INT_OP(SExtendI16, "extend16_s")
+ CASE_I64_OP(SExtendI32, "extend32_s")
CASE_OP(Unreachable, "unreachable")
CASE_OP(Nop, "nop")
CASE_OP(Block, "block")
@@ -142,13 +142,14 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(ReturnCallIndirect, "return_call_indirect")
CASE_OP(Drop, "drop")
CASE_OP(Select, "select")
- CASE_OP(GetLocal, "get_local")
- CASE_OP(SetLocal, "set_local")
- CASE_OP(TeeLocal, "tee_local")
- CASE_OP(GetGlobal, "get_global")
- CASE_OP(SetGlobal, "set_global")
- CASE_OP(GetTable, "get_table")
- CASE_OP(SetTable, "set_table")
+ CASE_OP(SelectWithType, "select")
+ CASE_OP(GetLocal, "local.get")
+ CASE_OP(SetLocal, "local.set")
+ CASE_OP(TeeLocal, "local.tee")
+ CASE_OP(GetGlobal, "global.get")
+ CASE_OP(SetGlobal, "global.set")
+ CASE_OP(GetTable, "table.get")
+ CASE_OP(SetTable, "table.set")
CASE_ALL_OP(Const, "const")
CASE_OP(MemorySize, "memory.size")
CASE_OP(MemoryGrow, "memory.grow")
@@ -192,10 +193,10 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_I32_OP(AsmjsStoreMem16, "asmjs_store16")
CASE_SIGN_OP(I32, AsmjsDiv, "asmjs_div")
CASE_SIGN_OP(I32, AsmjsRem, "asmjs_rem")
- CASE_I32_OP(AsmjsSConvertF32, "asmjs_convert_s/f32")
- CASE_I32_OP(AsmjsUConvertF32, "asmjs_convert_u/f32")
- CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_s/f64")
- CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_u/f64")
+ CASE_I32_OP(AsmjsSConvertF32, "asmjs_convert_f32_s")
+ CASE_I32_OP(AsmjsUConvertF32, "asmjs_convert_f32_u")
+ CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_f64_s")
+ CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_f64_u")
// Numeric Opcodes.
CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
@@ -209,6 +210,9 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(TableInit, "table.init")
CASE_OP(ElemDrop, "elem.drop")
CASE_OP(TableCopy, "table.copy")
+ CASE_OP(TableGrow, "table.grow")
+ CASE_OP(TableSize, "table.size")
+ CASE_OP(TableFill, "table.fill")
// SIMD opcodes.
CASE_SIMD_OP(Splat, "splat")
@@ -268,17 +272,17 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S1x16_OP(AllTrue, "all_true")
// Atomic operations.
- CASE_OP(AtomicNotify, "atomic_notify")
- CASE_INT_OP(AtomicWait, "atomic_wait")
- CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic_load")
- CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic_store")
- CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic_add")
- CASE_UNSIGNED_ALL_OP(AtomicSub, "atomic_sub")
- CASE_UNSIGNED_ALL_OP(AtomicAnd, "atomic_and")
- CASE_UNSIGNED_ALL_OP(AtomicOr, "atomic_or")
- CASE_UNSIGNED_ALL_OP(AtomicXor, "atomic_xor")
- CASE_UNSIGNED_ALL_OP(AtomicExchange, "atomic_xchng")
- CASE_UNSIGNED_ALL_OP(AtomicCompareExchange, "atomic_cmpxchng")
+ CASE_OP(AtomicNotify, "atomic.notify")
+ CASE_INT_OP(AtomicWait, "atomic.wait")
+ CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic.load")
+ CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic.store")
+ CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic.add")
+ CASE_UNSIGNED_ALL_OP(AtomicSub, "atomic.sub")
+ CASE_UNSIGNED_ALL_OP(AtomicAnd, "atomic.and")
+ CASE_UNSIGNED_ALL_OP(AtomicOr, "atomic.or")
+ CASE_UNSIGNED_ALL_OP(AtomicXor, "atomic.xor")
+ CASE_UNSIGNED_ALL_OP(AtomicExchange, "atomic.xchng")
+ CASE_UNSIGNED_ALL_OP(AtomicCompareExchange, "atomic.cmpxchng")
default : return "unknown";
// clang-format on
@@ -367,6 +371,7 @@ bool WasmOpcodes::IsAnyRefOpcode(WasmOpcode opcode) {
switch (opcode) {
case kExprRefNull:
case kExprRefIsNull:
+ case kExprRefFunc:
return true;
default:
return false;
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 71829b6479..6f9cb70141 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -5,8 +5,8 @@
#ifndef V8_WASM_WASM_OPCODES_H_
#define V8_WASM_WASM_OPCODES_H_
-#include "src/globals.h"
-#include "src/message-template.h"
+#include "src/common/globals.h"
+#include "src/execution/message-template.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-constants.h"
@@ -45,6 +45,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(ReturnCallIndirect, 0x13, _) \
V(Drop, 0x1a, _) \
V(Select, 0x1b, _) \
+ V(SelectWithType, 0x1c, _) \
V(GetLocal, 0x20, _) \
V(SetLocal, 0x21, _) \
V(TeeLocal, 0x22, _) \
@@ -405,22 +406,26 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
-#define FOREACH_NUMERIC_OPCODE(V) \
- V(I32SConvertSatF32, 0xfc00, i_f) \
- V(I32UConvertSatF32, 0xfc01, i_f) \
- V(I32SConvertSatF64, 0xfc02, i_d) \
- V(I32UConvertSatF64, 0xfc03, i_d) \
- V(I64SConvertSatF32, 0xfc04, l_f) \
- V(I64UConvertSatF32, 0xfc05, l_f) \
- V(I64SConvertSatF64, 0xfc06, l_d) \
- V(I64UConvertSatF64, 0xfc07, l_d) \
- V(MemoryInit, 0xfc08, v_iii) \
- V(DataDrop, 0xfc09, v_v) \
- V(MemoryCopy, 0xfc0a, v_iii) \
- V(MemoryFill, 0xfc0b, v_iii) \
- V(TableInit, 0xfc0c, v_iii) \
- V(ElemDrop, 0xfc0d, v_v) \
- V(TableCopy, 0xfc0e, v_iii)
+#define FOREACH_NUMERIC_OPCODE(V) \
+ V(I32SConvertSatF32, 0xfc00, i_f) \
+ V(I32UConvertSatF32, 0xfc01, i_f) \
+ V(I32SConvertSatF64, 0xfc02, i_d) \
+ V(I32UConvertSatF64, 0xfc03, i_d) \
+ V(I64SConvertSatF32, 0xfc04, l_f) \
+ V(I64UConvertSatF32, 0xfc05, l_f) \
+ V(I64SConvertSatF64, 0xfc06, l_d) \
+ V(I64UConvertSatF64, 0xfc07, l_d) \
+ V(MemoryInit, 0xfc08, v_iii) \
+ V(DataDrop, 0xfc09, v_v) \
+ V(MemoryCopy, 0xfc0a, v_iii) \
+ V(MemoryFill, 0xfc0b, v_iii) \
+ V(TableInit, 0xfc0c, v_iii) \
+ V(ElemDrop, 0xfc0d, v_v) \
+ V(TableCopy, 0xfc0e, v_iii) \
+ V(TableGrow, 0xfc0f, i_ai) \
+ V(TableSize, 0xfc10, i_v) \
+ /*TableFill is polymorph in the second parameter. It's anyref or anyfunc.*/ \
+ V(TableFill, 0xfc11, v_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
V(AtomicNotify, 0xfe00, i_ii) \
@@ -547,7 +552,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, bool hasBigIntFeature);
V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
- V(i_r, kWasmI32, kWasmAnyRef)
+ V(i_r, kWasmI32, kWasmAnyRef) \
+ V(i_ai, kWasmI32, kWasmAnyFunc, kWasmI32)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
@@ -610,6 +616,7 @@ struct WasmInitExpr {
kF32Const,
kF64Const,
kRefNullConst,
+ kRefFuncConst,
} kind;
union {
@@ -618,6 +625,7 @@ struct WasmInitExpr {
float f32_const;
double f64_const;
uint32_t global_index;
+ uint32_t function_index;
} val;
WasmInitExpr() : kind(kNone) {}
@@ -625,8 +633,15 @@ struct WasmInitExpr {
explicit WasmInitExpr(int64_t v) : kind(kI64Const) { val.i64_const = v; }
explicit WasmInitExpr(float v) : kind(kF32Const) { val.f32_const = v; }
explicit WasmInitExpr(double v) : kind(kF64Const) { val.f64_const = v; }
- WasmInitExpr(WasmInitKind kind, uint32_t global_index) : kind(kGlobalIndex) {
- val.global_index = global_index;
+ WasmInitExpr(WasmInitKind kind, uint32_t index) : kind(kind) {
+ if (kind == kGlobalIndex) {
+ val.global_index = index;
+ } else if (kind == kRefFuncConst) {
+ val.function_index = index;
+ } else {
+ // For the other types, the other initializers should be used.
+ UNREACHABLE();
+ }
}
};
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 80b7b4a6ad..4688bcf8e1 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -4,10 +4,10 @@
#include "src/wasm/wasm-result.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap.h"
-#include "src/isolate-inl.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/base/platform/platform.h"
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 76de1ea303..92049e6080 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -12,7 +12,7 @@
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/wasm-serialization.cc b/deps/v8/src/wasm/wasm-serialization.cc
index 1b7973aac9..1cea08943b 100644
--- a/deps/v8/src/wasm/wasm-serialization.cc
+++ b/deps/v8/src/wasm/wasm-serialization.cc
@@ -4,16 +4,16 @@
#include "src/wasm/wasm-serialization.h"
-#include "src/assembler-inl.h"
-#include "src/external-reference-table.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/ostreams.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/serializer-common.h"
-#include "src/utils.h"
-#include "src/version.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/utils.h"
+#include "src/utils/version.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
@@ -34,7 +34,7 @@ namespace {
class Writer {
public:
explicit Writer(Vector<byte> buffer)
- : start_(buffer.start()), end_(buffer.end()), pos_(buffer.start()) {}
+ : start_(buffer.begin()), end_(buffer.end()), pos_(buffer.begin()) {}
size_t bytes_written() const { return pos_ - start_; }
byte* current_location() const { return pos_; }
@@ -57,7 +57,7 @@ class Writer {
void WriteVector(const Vector<const byte> v) {
DCHECK_GE(current_size(), v.size());
if (v.size() > 0) {
- memcpy(current_location(), v.start(), v.size());
+ memcpy(current_location(), v.begin(), v.size());
pos_ += v.size();
}
if (FLAG_trace_wasm_serialization) {
@@ -77,7 +77,7 @@ class Writer {
class Reader {
public:
explicit Reader(Vector<const byte> buffer)
- : start_(buffer.start()), end_(buffer.end()), pos_(buffer.start()) {}
+ : start_(buffer.begin()), end_(buffer.end()), pos_(buffer.begin()) {}
size_t bytes_read() const { return pos_ - start_; }
const byte* current_location() const { return pos_; }
@@ -102,7 +102,7 @@ class Reader {
void ReadVector(Vector<byte> v) {
if (v.size() > 0) {
DCHECK_GE(current_size(), v.size());
- memcpy(v.start(), current_location(), v.size());
+ memcpy(v.begin(), current_location(), v.size());
pos_ += v.size();
}
if (FLAG_trace_wasm_serialization) {
@@ -136,6 +136,7 @@ void WriteVersion(Writer* writer) {
void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
DCHECK(rinfo->HasTargetAddressAddress());
+ DCHECK(!RelocInfo::IsCompressedEmbeddedObject(rinfo->rmode()));
WriteUnalignedValue(rinfo->target_address_address(), tag);
#elif V8_TARGET_ARCH_ARM64
Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
@@ -161,6 +162,7 @@ void SetWasmCalleeTag(RelocInfo* rinfo, uint32_t tag) {
uint32_t GetWasmCalleeTag(RelocInfo* rinfo) {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32
+ DCHECK(!RelocInfo::IsCompressedEmbeddedObject(rinfo->rmode()));
return ReadUnalignedValue<uint32_t>(rinfo->target_address_address());
#elif V8_TARGET_ARCH_ARM64
Instruction* instr = reinterpret_cast<Instruction*>(rinfo->pc());
@@ -356,7 +358,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
writer->Write(code->tier());
// Get a pointer to the destination buffer, to hold relocated code.
- byte* serialized_code_start = writer->current_buffer().start();
+ byte* serialized_code_start = writer->current_buffer().begin();
byte* code_start = serialized_code_start;
size_t code_size = code->instructions().size();
writer->Skip(code_size);
@@ -375,7 +377,7 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) {
code_start = aligned_buffer.get();
}
#endif
- memcpy(code_start, code->instructions().start(), code_size);
+ memcpy(code_start, code->instructions().begin(), code_size);
// Relocate the code.
int mask = RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL) |
@@ -590,7 +592,7 @@ bool NativeModuleDeserializer::ReadCode(uint32_t fn_index, Reader* reader) {
code->Validate();
// Finally, flush the icache for that code.
- FlushInstructionCache(code->instructions().start(),
+ FlushInstructionCache(code->instructions().begin(),
code->instructions().size());
return true;
@@ -601,7 +603,7 @@ bool IsSupportedVersion(Vector<const byte> version) {
byte current_version[kVersionSize];
Writer writer({current_version, kVersionSize});
WriteVersion(&writer);
- return memcmp(version.start(), current_version, kVersionSize) == 0;
+ return memcmp(version.begin(), current_version, kVersionSize) == 0;
}
MaybeHandle<WasmModuleObject> DeserializeNativeModule(
@@ -631,8 +633,6 @@ MaybeHandle<WasmModuleObject> DeserializeNativeModule(
std::move(wire_bytes_copy), script, Handle<ByteArray>::null());
NativeModule* native_module = module_object->native_module();
- native_module->set_lazy_compilation(FLAG_wasm_lazy_compilation);
-
NativeModuleDeserializer deserializer(native_module);
WasmCodeRefScope wasm_code_ref_scope;
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 7b654ec1cf..a79ae02fe2 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -5,9 +5,9 @@
#include "src/wasm/wasm-text.h"
#include "src/debug/interface-types.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-#include "src/vector.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/vector.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
@@ -49,7 +49,7 @@ void PrintWasmText(const WasmModule* module, const ModuleWireBytes& wire_bytes,
WasmName fun_name = wire_bytes.GetNameOrNull(fun, module);
if (IsValidFunctionName(fun_name)) {
os << " $";
- os.write(fun_name.start(), fun_name.length());
+ os.write(fun_name.begin(), fun_name.length());
}
if (fun->sig->parameter_count()) {
os << " (param";
diff --git a/deps/v8/src/wasm/wasm-tier.h b/deps/v8/src/wasm/wasm-tier.h
index 6010d3f5fb..b649723479 100644
--- a/deps/v8/src/wasm/wasm-tier.h
+++ b/deps/v8/src/wasm/wasm-tier.h
@@ -19,6 +19,19 @@ enum class ExecutionTier : int8_t {
kTurbofan,
};
+inline const char* ExecutionTierToString(ExecutionTier tier) {
+ switch (tier) {
+ case ExecutionTier::kTurbofan:
+ return "turbofan";
+ case ExecutionTier::kLiftoff:
+ return "liftoff";
+ case ExecutionTier::kInterpreter:
+ return "interpreter";
+ case ExecutionTier::kNone:
+ return "none";
+ }
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-value.h b/deps/v8/src/wasm/wasm-value.h
index 4201c14ae4..23f1aed7f0 100644
--- a/deps/v8/src/wasm/wasm-value.h
+++ b/deps/v8/src/wasm/wasm-value.h
@@ -5,9 +5,9 @@
#ifndef V8_WASM_WASM_VALUE_H_
#define V8_WASM_WASM_VALUE_H_
-#include "src/boxed-float.h"
-#include "src/handles.h"
-#include "src/v8memory.h"
+#include "src/common/v8memory.h"
+#include "src/handles/handles.h"
+#include "src/utils/boxed-float.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone-containers.h"
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index a6071ab6fa..917175b80d 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -4,8 +4,8 @@
#include "src/zone/accounting-allocator.h"
-#include "src/allocation.h"
#include "src/base/logging.h"
+#include "src/utils/allocation.h"
#include "src/zone/zone-segment.h"
namespace v8 {
diff --git a/deps/v8/src/zone/zone-allocator.h b/deps/v8/src/zone/zone-allocator.h
index 56d8ea09ef..fe62d4bb4c 100644
--- a/deps/v8/src/zone/zone-allocator.h
+++ b/deps/v8/src/zone/zone-allocator.h
@@ -14,16 +14,16 @@ namespace internal {
template <typename T>
class ZoneAllocator {
public:
- typedef T* pointer;
- typedef const T* const_pointer;
- typedef T& reference;
- typedef const T& const_reference;
- typedef T value_type;
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using reference = T&;
+ using const_reference = const T&;
+ using value_type = T;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
template <class O>
struct rebind {
- typedef ZoneAllocator<O> other;
+ using other = ZoneAllocator<O>;
};
#ifdef V8_CC_MSVC
@@ -81,7 +81,7 @@ class RecyclingZoneAllocator : public ZoneAllocator<T> {
public:
template <class O>
struct rebind {
- typedef RecyclingZoneAllocator<O> other;
+ using other = RecyclingZoneAllocator<O>;
};
#ifdef V8_CC_MSVC
@@ -137,8 +137,8 @@ class RecyclingZoneAllocator : public ZoneAllocator<T> {
FreeBlock* free_list_;
};
-typedef ZoneAllocator<bool> ZoneBoolAllocator;
-typedef ZoneAllocator<int> ZoneIntAllocator;
+using ZoneBoolAllocator = ZoneAllocator<bool>;
+using ZoneIntAllocator = ZoneAllocator<int>;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/zone/zone-chunk-list.h b/deps/v8/src/zone/zone-chunk-list.h
index a15f27fab1..f72fc4f900 100644
--- a/deps/v8/src/zone/zone-chunk-list.h
+++ b/deps/v8/src/zone/zone-chunk-list.h
@@ -5,8 +5,8 @@
#include <algorithm>
#include "src/base/iterator.h"
-#include "src/globals.h"
-#include "src/memcopy.h"
+#include "src/common/globals.h"
+#include "src/utils/memcopy.h"
#include "src/zone/zone.h"
#ifndef V8_ZONE_ZONE_CHUNK_LIST_H_
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 86c4bd0702..2aa2f122a9 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -195,8 +195,8 @@ class ZoneMultimap
};
// Typedefs to shorten commonly used vectors.
-typedef ZoneVector<bool> BoolVector;
-typedef ZoneVector<int> IntVector;
+using BoolVector = ZoneVector<bool>;
+using IntVector = ZoneVector<int>;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/zone/zone-handle-set.h b/deps/v8/src/zone/zone-handle-set.h
index ccf7411268..96752dce35 100644
--- a/deps/v8/src/zone/zone-handle-set.h
+++ b/deps/v8/src/zone/zone-handle-set.h
@@ -5,7 +5,7 @@
#ifndef V8_ZONE_ZONE_HANDLE_SET_H_
#define V8_ZONE_ZONE_HANDLE_SET_H_
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@@ -157,7 +157,7 @@ class ZoneHandleSet final {
inline const_iterator end() const;
private:
- typedef ZoneVector<Address*> List;
+ using List = ZoneVector<Address*>;
List const* list() const {
DCHECK_EQ(kListTag, data_ & kTagMask);
@@ -193,11 +193,11 @@ std::ostream& operator<<(std::ostream& os, ZoneHandleSet<T> set) {
template <typename T>
class ZoneHandleSet<T>::const_iterator {
public:
- typedef std::forward_iterator_tag iterator_category;
- typedef std::ptrdiff_t difference_type;
- typedef Handle<T> value_type;
- typedef value_type reference;
- typedef value_type* pointer;
+ using iterator_category = std::forward_iterator_tag;
+ using difference_type = std::ptrdiff_t;
+ using value_type = Handle<T>;
+ using reference = value_type;
+ using pointer = value_type*;
const_iterator(const const_iterator& other)
: set_(other.set_), current_(other.current_) {}
diff --git a/deps/v8/src/zone/zone-list-inl.h b/deps/v8/src/zone/zone-list-inl.h
index a0e4b1950b..aca54133f9 100644
--- a/deps/v8/src/zone/zone-list-inl.h
+++ b/deps/v8/src/zone/zone-list-inl.h
@@ -9,7 +9,7 @@
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/memcopy.h"
+#include "src/utils/memcopy.h"
namespace v8 {
namespace internal {
@@ -34,7 +34,7 @@ void ZoneList<T>::AddAll(const Vector<T>& other, Zone* zone) {
if (capacity_ < result_length)
Resize(result_length, ZoneAllocationPolicy(zone));
if (std::is_fundamental<T>()) {
- memcpy(data_ + length_, other.start(), sizeof(*data_) * other.length());
+ memcpy(data_ + length_, other.begin(), sizeof(*data_) * other.length());
} else {
for (int i = 0; i < other.length(); i++) data_[length_ + i] = other.at(i);
}
@@ -133,7 +133,8 @@ void ZoneList<T>::Iterate(Visitor* visitor) {
template <typename T>
template <typename CompareFunction>
void ZoneList<T>::Sort(CompareFunction cmp) {
- ToVector().Sort(cmp, 0, length_);
+ std::sort(begin(), end(),
+ [cmp](const T& a, const T& b) { return cmp(&a, &b) < 0; });
#ifdef DEBUG
for (int i = 1; i < length_; i++) {
DCHECK_LE(cmp(&data_[i - 1], &data_[i]), 0);
@@ -144,7 +145,8 @@ void ZoneList<T>::Sort(CompareFunction cmp) {
template <typename T>
template <typename CompareFunction>
void ZoneList<T>::StableSort(CompareFunction cmp, size_t s, size_t l) {
- ToVector().StableSort(cmp, s, l);
+ std::stable_sort(begin() + s, begin() + s + l,
+ [cmp](const T& a, const T& b) { return cmp(&a, &b) < 0; });
#ifdef DEBUG
for (size_t i = s + 1; i < l; i++) {
DCHECK_LE(cmp(&data_[i - 1], &data_[i]), 0);
diff --git a/deps/v8/src/zone/zone-segment.cc b/deps/v8/src/zone/zone-segment.cc
index b9649d494a..49cedb851b 100644
--- a/deps/v8/src/zone/zone-segment.cc
+++ b/deps/v8/src/zone/zone-segment.cc
@@ -4,7 +4,7 @@
#include "src/zone/zone-segment.h"
-#include "src/msan.h"
+#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/zone/zone-segment.h b/deps/v8/src/zone/zone-segment.h
index 2bc2f7f1ca..2f40bb68e1 100644
--- a/deps/v8/src/zone/zone-segment.h
+++ b/deps/v8/src/zone/zone-segment.h
@@ -5,7 +5,7 @@
#ifndef V8_ZONE_ZONE_SEGMENT_H_
#define V8_ZONE_ZONE_SEGMENT_H_
-#include "src/v8.h"
+#include "src/init/v8.h"
// Segments represent chunks of memory: They have starting address
// (encoded in the this pointer) and a size in bytes. Segments are
diff --git a/deps/v8/src/zone/zone-splay-tree.h b/deps/v8/src/zone/zone-splay-tree.h
index 55a81738b6..c28df38fda 100644
--- a/deps/v8/src/zone/zone-splay-tree.h
+++ b/deps/v8/src/zone/zone-splay-tree.h
@@ -5,7 +5,7 @@
#ifndef V8_ZONE_ZONE_SPLAY_TREE_H_
#define V8_ZONE_ZONE_SPLAY_TREE_H_
-#include "src/splay-tree.h"
+#include "src/utils/splay-tree.h"
#include "src/zone/zone.h"
namespace v8 {
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index 9b5153d3f6..a6f45fad54 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -6,9 +6,9 @@
#include <cstring>
-#include "src/asan.h"
-#include "src/utils.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/sanitizer/asan.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index 3ebcadfa3c..b113f49585 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -11,7 +11,7 @@
#include "src/base/hashmap.h"
#include "src/base/logging.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone-segment.h"
@@ -216,7 +216,7 @@ class ZoneList final {
inline T& last() const { return at(length_ - 1); }
inline T& first() const { return at(0); }
- typedef T* iterator;
+ using iterator = T*;
inline iterator begin() const { return &data_[0]; }
inline iterator end() const { return &data_[length_]; }
@@ -388,7 +388,7 @@ class ScopedPtrList final {
end_ += list.length();
}
- typedef T** iterator;
+ using iterator = T**;
inline iterator begin() const {
return reinterpret_cast<T**>(buffer_.data() + start_);
}
@@ -402,10 +402,10 @@ class ScopedPtrList final {
size_t end_;
};
-typedef base::PointerTemplateHashMapImpl<ZoneAllocationPolicy> ZoneHashMap;
+using ZoneHashMap = base::PointerTemplateHashMapImpl<ZoneAllocationPolicy>;
-typedef base::CustomMatcherTemplateHashMapImpl<ZoneAllocationPolicy>
- CustomMatcherZoneHashMap;
+using CustomMatcherZoneHashMap =
+ base::CustomMatcherTemplateHashMapImpl<ZoneAllocationPolicy>;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index 6c64ef6893..6bf9ca643d 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -24,6 +24,7 @@ group("gn_all") {
deps = [
"inspector:inspector-test",
"mkgrokdump:mkgrokdump",
+ "wasm-api-tests:wasm_api_tests",
]
if (host_os != "mac" || !is_android) {
@@ -44,6 +45,7 @@ group("v8_perf") {
testonly = true
data_deps = [
+ "..:v8_python_base",
"cctest:cctest",
"..:d8",
"../tools:v8_android_test_runner_deps",
@@ -81,6 +83,7 @@ group("v8_bot_default") {
"mkgrokdump:mkgrokdump",
"preparser:v8_preparser",
"unittests:unittests",
+ "wasm-api-tests:wasm_api_tests",
"wasm-js:v8_wasm_js",
"wasm-spec-tests:v8_wasm_spec_tests",
"webkit:v8_webkit",
@@ -101,6 +104,7 @@ group("v8_default") {
"mkgrokdump:mkgrokdump",
"preparser:v8_preparser",
"unittests:unittests",
+ "wasm-api-tests:wasm_api_tests",
"wasm-js:v8_wasm_js",
"wasm-spec-tests:v8_wasm_spec_tests",
]
diff --git a/deps/v8/test/OWNERS b/deps/v8/test/OWNERS
index 85f514c4ab..bdb1d555a4 100644
--- a/deps/v8/test/OWNERS
+++ b/deps/v8/test/OWNERS
@@ -1,3 +1 @@
-machenbach@chromium.org
-sergiyb@chromium.org
-tmrts@chromium.org \ No newline at end of file
+file://INFRA_OWNERS
diff --git a/deps/v8/test/benchmarks/benchmarks.status b/deps/v8/test/benchmarks/benchmarks.status
index bb87cc6dba..d176e35312 100644
--- a/deps/v8/test/benchmarks/benchmarks.status
+++ b/deps/v8/test/benchmarks/benchmarks.status
@@ -37,10 +37,26 @@
'octane/typescript': [PASS, SLOW],
}], # ALWAYS
+# Slow variants.
+['variant in [jitless, nooptimization, stress]', {
+ # Slow tests.
+ 'kraken/ai-astar': [PASS, SLOW],
+ 'kraken/imaging-desaturate': [PASS, SLOW],
+ 'octane/box2d': [PASS, SLOW],
+ 'octane/code-load': [PASS, SLOW],
+ 'octane/crypto': [PASS, SLOW],
+ 'octane/gbemu-part1': [PASS, SLOW],
+ 'octane/pdfjs': [PASS, SLOW],
+ 'octane/regexp': [PASS, SLOW],
+}],
+
['variant == stress', {
# Too slow for stress mode.
'octane/mandreel': [SKIP],
'octane/typescript': [SKIP],
+
+ # Too memory hungry on Odroid devices: https://crbug.com/v8/8963
+ 'kraken/stanford-crypto-ccm': [PASS, ['arch == arm and not simulator_run', SKIP]],
}],
['variant == jitless', {
@@ -55,6 +71,11 @@
'octane/typescript': [SKIP],
}], # 'gc_fuzzer'
+['predictable', {
+ # https://crbug.com/v8/8537
+ 'octane/typescript': [SKIP],
+}], # 'predictable'
+
##############################################################################
['variant == jitless and not embedded_builtins', {
'*': [SKIP],
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index fee23224c3..42396087ee 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -6,6 +6,11 @@ import("../../gni/v8.gni")
v8_executable("cctest") {
testonly = true
+
+ sources = [
+ "cctest.cc",
+ ]
+
deps = [
":cctest_sources",
]
@@ -57,7 +62,7 @@ v8_source_set("cctest_sources") {
"../common/wasm/flag-utils.h",
"../common/wasm/test-signatures.h",
"../common/wasm/wasm-macro-gen.h",
- "cctest.cc",
+ "collector.h",
"compiler/c-signature.h",
"compiler/call-tester.h",
"compiler/code-assembler-tester.h",
@@ -125,6 +130,7 @@ v8_source_set("cctest_sources") {
"heap/test-heap.cc",
"heap/test-incremental-marking.cc",
"heap/test-invalidated-slots.cc",
+ "heap/test-iterators.cc",
"heap/test-lab.cc",
"heap/test-mark-compact.cc",
"heap/test-page-promotion.cc",
@@ -160,6 +166,7 @@ v8_source_set("cctest_sources") {
"test-allocation.cc",
"test-api-accessors.cc",
"test-api-interceptors.cc",
+ "test-api-stack-traces.cc",
"test-api.cc",
"test-api.h",
"test-array-list.cc",
@@ -405,6 +412,10 @@ v8_source_set("cctest_sources") {
# MSVS wants this for gay-{precision,shortest}.cc.
cflags += [ "/bigobj" ]
}
+
+ if (v8_use_perfetto) {
+ deps += [ "//third_party/perfetto/protos/perfetto/trace/chrome:minimal_complete_lite" ]
+ }
}
action("resources") {
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 0f3e4526db..92be1567b0 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -10,3 +10,4 @@ per-file *-s390*=jyan@ca.ibm.com
per-file *-s390*=mbrandy@us.ibm.com
per-file *-s390*=michael_dawson@ca.ibm.com
per-file *profile*=alph@chromium.org
+
diff --git a/deps/v8/test/cctest/assembler-helper-arm.cc b/deps/v8/test/cctest/assembler-helper-arm.cc
index b3a27f8cd8..fd05a33648 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.cc
+++ b/deps/v8/test/cctest/assembler-helper-arm.cc
@@ -4,9 +4,9 @@
#include "test/cctest/assembler-helper-arm.h"
-#include "src/macro-assembler.h"
-#include "src/isolate-inl.h"
-#include "src/v8.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/isolate-inl.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -14,15 +14,14 @@ namespace internal {
Handle<Code> AssembleCodeImpl(std::function<void(MacroAssembler&)> assemble) {
Isolate* isolate = CcTest::i_isolate();
- MacroAssembler assm(AssemblerOptions{});
+ MacroAssembler assm(isolate, CodeObjectRequired::kYes);
assemble(assm);
assm.bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
if (FLAG_print_code) {
code->Print();
}
diff --git a/deps/v8/test/cctest/assembler-helper-arm.h b/deps/v8/test/cctest/assembler-helper-arm.h
index 15b821a30d..0585d36526 100644
--- a/deps/v8/test/cctest/assembler-helper-arm.h
+++ b/deps/v8/test/cctest/assembler-helper-arm.h
@@ -7,8 +7,8 @@
#include <functional>
-#include "src/handles.h"
-#include "src/simulator.h"
+#include "src/execution/simulator.h"
+#include "src/handles/handles.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index e4a0bd8a50..353f7f5c76 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -29,11 +29,11 @@
#include "test/cctest/cctest.h"
#include "include/libplatform/libplatform.h"
-#include "src/compiler.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/pipeline.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
-#include "src/optimized-compilation-info.h"
+#include "src/objects/objects-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "test/cctest/print-extension.h"
#include "test/cctest/profiler-extension.h"
@@ -121,6 +121,7 @@ void CcTest::Run() {
}
i::Heap* CcTest::heap() { return i_isolate()->heap(); }
+i::ReadOnlyHeap* CcTest::read_only_heap() { return heap()->read_only_heap(); }
void CcTest::CollectGarbage(i::AllocationSpace space) {
heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
@@ -225,10 +226,9 @@ HandleAndZoneScope::HandleAndZoneScope()
HandleAndZoneScope::~HandleAndZoneScope() = default;
-i::Handle<i::JSFunction> Optimize(i::Handle<i::JSFunction> function,
- i::Zone* zone, i::Isolate* isolate,
- uint32_t flags,
- i::compiler::JSHeapBroker** out_broker) {
+i::Handle<i::JSFunction> Optimize(
+ i::Handle<i::JSFunction> function, i::Zone* zone, i::Isolate* isolate,
+ uint32_t flags, std::unique_ptr<i::compiler::JSHeapBroker>* out_broker) {
i::Handle<i::SharedFunctionInfo> shared(function->shared(), isolate);
i::IsCompiledScope is_compiled_scope(shared->is_compiled_scope());
CHECK(is_compiled_scope.is_compiled() ||
@@ -249,7 +249,7 @@ i::Handle<i::JSFunction> Optimize(i::Handle<i::JSFunction> function,
i::Handle<i::Code> code =
i::compiler::Pipeline::GenerateCodeForTesting(&info, isolate, out_broker)
.ToHandleChecked();
- info.native_context()->AddOptimizedCode(*code);
+ info.native_context().AddOptimizedCode(*code);
function->set_code(*code);
return function;
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 0f0dfddaf6..3de8baa3e4 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -33,13 +33,13 @@
#include "include/libplatform/libplatform.h"
#include "include/v8-platform.h"
#include "src/base/enum-set.h"
+#include "src/codegen/register-configuration.h"
#include "src/debug/debug-interface.h"
-#include "src/flags.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
-#include "src/register-configuration.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/objects/objects.h"
#include "src/zone/accounting-allocator.h"
namespace v8 {
@@ -106,7 +106,7 @@ static constexpr const char* kExtensionName[kMaxExtensions] = {
class CcTest {
public:
- typedef void (TestFunction)();
+ using TestFunction = void();
CcTest(TestFunction* callback, const char* file, const char* name,
bool enabled, bool initialize);
~CcTest() { i::DeleteArray(file_); }
@@ -133,6 +133,7 @@ class CcTest {
}
static i::Heap* heap();
+ static i::ReadOnlyHeap* read_only_heap();
static void CollectGarbage(i::AllocationSpace space);
static void CollectAllGarbage(i::Isolate* isolate = nullptr);
@@ -321,9 +322,9 @@ class LocalContext {
static inline uint16_t* AsciiToTwoByteString(const char* source) {
- int array_length = i::StrLength(source) + 1;
+ size_t array_length = strlen(source) + 1;
uint16_t* converted = i::NewArray<uint16_t>(array_length);
- for (int i = 0; i < array_length; i++) converted[i] = source[i];
+ for (size_t i = 0; i < array_length; i++) converted[i] = source[i];
return converted;
}
@@ -500,11 +501,13 @@ static inline v8::Local<v8::Value> CompileRunWithOrigin(
// Takes a JSFunction and runs it through the test version of the optimizing
// pipeline, allocating the temporary compilation artifacts in a given Zone.
-// For possible {flags} values, look at OptimizedCompilationInfo::Flag.
-// If passed a non-null pointer for {broker}, outputs the JSHeapBroker to it.
+// For possible {flags} values, look at OptimizedCompilationInfo::Flag. If
+// {out_broker} is not nullptr, returns the JSHeapBroker via that (transferring
+// ownership to the caller).
i::Handle<i::JSFunction> Optimize(
i::Handle<i::JSFunction> function, i::Zone* zone, i::Isolate* isolate,
- uint32_t flags, i::compiler::JSHeapBroker** out_broker = nullptr);
+ uint32_t flags,
+ std::unique_ptr<i::compiler::JSHeapBroker>* out_broker = nullptr);
static inline void ExpectString(const char* code, const char* expected) {
v8::Local<v8::Value> result = CompileRun(code);
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 5c3b9dab4a..274a8bf28a 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -121,6 +121,8 @@
############################################################################
# Slow tests.
'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]],
+ 'test-heap-profiler/ManyLocalsInSharedContext': [PASS, SLOW],
+ 'test-jump-table-assembler/JumpTablePatchingStress': [PASS, SLOW],
'test-strings/StringOOM*': [PASS, ['mode == debug', SKIP]],
'test-serialize/CustomSnapshotDataBlobImmortalImmovableRoots': [PASS, ['mode == debug', SKIP]],
'test-parsing/ObjectRestNegativeTestSlow': [PASS, ['mode == debug', SKIP]],
@@ -210,6 +212,14 @@
}], # 'no_snap == True and system == windows'
##############################################################################
+['is_full_debug', {
+ # Tests too slow in non-optimized debug mode.
+ 'test-api/InternalFieldsSubclassing': [SKIP],
+ 'test-cpu-profiler/Inlining2': [SKIP],
+ 'test-heap/TestInternalWeakLists': [SKIP],
+}], # 'is_full_debug'
+
+##############################################################################
['byteorder == big', {
# Skip WASM atomic tests on big-endian machines.
# There is no support to emulate atomic WASM operations on big-endian
@@ -417,14 +427,15 @@
##############################################################################
# The test relies on deterministic compilation.
-['variant == stress_background_compile', {
+['variant == stress_js_bg_compile_wasm_code_gc', {
'test-compiler/DecideToPretenureDuringCompilation': [SKIP],
-}], # variant == stress_background_compile
+}], # variant == stress_js_bg_compile_wasm_code_gc
##############################################################################
['variant == no_wasm_traps', {
'test-accessors/*': [SKIP],
'test-api-interceptors/*': [SKIP],
+ 'test-api-stack-traces/*': [SKIP],
'test-api/*': [SKIP],
'test-bignum-dtoa/*': [SKIP],
'test-cpu-profiler/*': [SKIP],
@@ -461,23 +472,6 @@
##############################################################################
['lite_mode or variant == jitless', {
- # TODO(8394): First execution events don't work in lite_mode. Enable this after
- # we fix the lite mode to track the first execution.
- 'test-log/LogFunctionEvents': [SKIP],
-
- # Skip tests for weak references in feedback vector.
- 'test-weak-references/WeakReferencesBasic': [SKIP],
- 'test-weak-references/WeakReferencesOldToOld': [SKIP],
- 'test-weak-references/WeakReferencesOldToNew': [SKIP],
- 'test-weak-references/WeakReferencesOldToNewScavenged': [SKIP],
- 'test-weak-references/WeakReferencesOldToCleared': [SKIP],
- 'test-weak-references/ObjectMovesBeforeClearingWeakField': [SKIP],
- 'test-weak-references/ObjectWithWeakFieldDies': [SKIP],
- 'test-weak-references/ObjectWithWeakReferencePromoted': [SKIP],
- 'test-weak-references/ObjectWithClearedWeakReferencePromoted': [SKIP],
- 'test-weak-references/WeakReferenceWriteBarrier': [SKIP],
- 'test-heap-profiler/WeakReference': [SKIP],
-
# Slow tests
'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [SKIP],
@@ -550,6 +544,7 @@
'test-run-unwinding-info/*': [SKIP],
'test-run-variables/*': [SKIP],
'test-torque/*': [SKIP],
+ 'test-macro-assembler-x64/EmbeddedObj': [SKIP],
# Field representation tracking is disabled in jitless mode.
'test-field-type-tracking/*': [SKIP],
diff --git a/deps/v8/src/collector.h b/deps/v8/test/cctest/collector.h
index bfaa9d42ce..0e7251f4bb 100644
--- a/deps/v8/src/collector.h
+++ b/deps/v8/test/cctest/collector.h
@@ -7,8 +7,8 @@
#include <vector>
-#include "src/checks.h"
-#include "src/vector.h"
+#include "src/common/checks.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
@@ -57,7 +57,7 @@ class Collector {
if (size > current_chunk_.length() - index_) {
Grow(size);
}
- T* position = current_chunk_.start() + index_;
+ T* position = current_chunk_.begin() + index_;
index_ += size;
size_ += size;
for (int i = 0; i < size; i++) {
@@ -74,7 +74,7 @@ class Collector {
if (source.length() > current_chunk_.length() - index_) {
Grow(source.length());
}
- T* position = current_chunk_.start() + index_;
+ T* position = current_chunk_.begin() + index_;
index_ += source.length();
size_ += source.length();
for (int i = 0; i < source.length(); i++) {
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h
index f7d08ec899..375723c904 100644
--- a/deps/v8/test/cctest/compiler/c-signature.h
+++ b/deps/v8/test/cctest/compiler/c-signature.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_C_SIGNATURE_H_
#define V8_COMPILER_C_SIGNATURE_H_
-#include "src/machine-type.h"
+#include "src/codegen/machine-type.h"
namespace v8 {
namespace internal {
@@ -110,11 +110,11 @@ class CSignatureOf : public CSignature {
MachineType storage_[kReturnCount + kParamCount];
};
-typedef CSignatureOf<int32_t, int32_t, int32_t> CSignature_i_ii;
-typedef CSignatureOf<uint32_t, uint32_t, uint32_t> CSignature_u_uu;
-typedef CSignatureOf<float, float, float> CSignature_f_ff;
-typedef CSignatureOf<double, double, double> CSignature_d_dd;
-typedef CSignatureOf<Object, Object, Object> CSignature_o_oo;
+using CSignature_i_ii = CSignatureOf<int32_t, int32_t, int32_t>;
+using CSignature_u_uu = CSignatureOf<uint32_t, uint32_t, uint32_t>;
+using CSignature_f_ff = CSignatureOf<float, float, float>;
+using CSignature_d_dd = CSignatureOf<double, double, double>;
+using CSignature_o_oo = CSignatureOf<Object, Object, Object>;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index 4bca79625c..0b7174bc2b 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -5,9 +5,9 @@
#ifndef V8_CCTEST_COMPILER_CALL_TESTER_H_
#define V8_CCTEST_COMPILER_CALL_TESTER_H_
-#include "src/handles.h"
+#include "src/execution/simulator.h"
+#include "src/handles/handles.h"
#include "src/objects/code.h"
-#include "src/simulator.h"
#include "test/cctest/compiler/c-signature.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
index 6707e2ba13..68144987bd 100644
--- a/deps/v8/test/cctest/compiler/code-assembler-tester.h
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -5,11 +5,11 @@
#ifndef V8_TEST_CCTEST_COMPILER_CODE_ASSEMBLER_TESTER_H_
#define V8_TEST_CCTEST_COMPILER_CODE_ASSEMBLER_TESTER_H_
+#include "src/codegen/interface-descriptors.h"
#include "src/compiler/code-assembler.h"
#include "src/compiler/raw-machine-assembler.h"
-#include "src/handles.h"
-#include "src/interface-descriptors.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.cc b/deps/v8/test/cctest/compiler/codegen-tester.cc
index a06585cbca..e73a182905 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.cc
+++ b/deps/v8/test/cctest/compiler/codegen-tester.cc
@@ -5,7 +5,7 @@
#include "test/cctest/compiler/codegen-tester.h"
#include "src/base/overflowing-math.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index ff35d8b453..62db9445ea 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -5,11 +5,11 @@
#ifndef V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
#define V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
-#include "src/optimized-compilation-info.h"
-#include "src/simulator.h"
+#include "src/execution/simulator.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/call-tester.h"
@@ -216,11 +216,11 @@ template <typename CType, bool use_result_buffer>
class BinopTester {
public:
explicit BinopTester(RawMachineAssemblerTester<int32_t>* tester,
- MachineType rep)
+ MachineType type)
: T(tester),
- param0(T->LoadFromPointer(&p0, rep)),
- param1(T->LoadFromPointer(&p1, rep)),
- rep(rep),
+ param0(T->LoadFromPointer(&p0, type)),
+ param1(T->LoadFromPointer(&p1, type)),
+ type(type),
p0(static_cast<CType>(0)),
p1(static_cast<CType>(0)),
result(static_cast<CType>(0)) {}
@@ -242,7 +242,7 @@ class BinopTester {
void AddReturn(Node* val) {
if (use_result_buffer) {
- T->Store(rep.representation(), T->PointerConstant(&result),
+ T->Store(type.representation(), T->PointerConstant(&result),
T->Int32Constant(0), val, kNoWriteBarrier);
T->Return(T->Int32Constant(CHECK_VALUE));
} else {
@@ -262,7 +262,7 @@ class BinopTester {
}
protected:
- MachineType rep;
+ MachineType type;
CType p0;
CType p1;
CType result;
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 347f414b56..81053c2e9e 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -4,14 +4,14 @@
#include "test/cctest/compiler/function-tester.h"
-#include "src/api-inl.h"
-#include "src/assembler.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
-#include "src/execution.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
-#include "src/optimized-compilation-info.h"
+#include "src/execution/execution.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "test/cctest/cctest.h"
@@ -137,7 +137,7 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
p = *f.function;
}
return Handle<JSFunction>(
- p, p->GetIsolate()); // allocated in outer handle scope.
+ p, p.GetIsolate()); // allocated in outer handle scope.
}
Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index 6e0146958f..09249f40b0 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -5,8 +5,8 @@
#ifndef V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
#define V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
-#include "src/execution.h"
-#include "src/handles.h"
+#include "src/execution/execution.h"
+#include "src/handles/handles.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/graph-and-builders.h b/deps/v8/test/cctest/compiler/graph-and-builders.h
new file mode 100644
index 0000000000..e8cfc54c93
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/graph-and-builders.h
@@ -0,0 +1,43 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_GRAPH_AND_BUILDERS_H_
+#define V8_CCTEST_COMPILER_GRAPH_AND_BUILDERS_H_
+
+#include "src/compiler/backend/instruction-selector.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphAndBuilders {
+ public:
+ explicit GraphAndBuilders(Zone* zone)
+ : main_graph_(new (zone) Graph(zone)),
+ main_common_(zone),
+ main_machine_(zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()),
+ main_simplified_(zone) {}
+
+ Graph* graph() const { return main_graph_; }
+ Zone* zone() const { return graph()->zone(); }
+ CommonOperatorBuilder* common() { return &main_common_; }
+ MachineOperatorBuilder* machine() { return &main_machine_; }
+ SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
+
+ protected:
+ // Prefixed with main_ to avoid naming conflicts.
+ Graph* main_graph_;
+ CommonOperatorBuilder main_common_;
+ MachineOperatorBuilder main_machine_;
+ SimplifiedOperatorBuilder main_simplified_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CCTEST_COMPILER_GRAPH_AND_BUILDERS_H_
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
deleted file mode 100644
index 4fe0fc9292..0000000000
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
-#define V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
-
-#include "src/assembler.h"
-#include "src/compiler/backend/instruction-selector.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/operator-properties.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/optimized-compilation-info.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/call-tester.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class GraphAndBuilders {
- public:
- explicit GraphAndBuilders(Zone* zone)
- : main_graph_(new (zone) Graph(zone)),
- main_common_(zone),
- main_machine_(zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements()),
- main_simplified_(zone) {}
-
- Graph* graph() const { return main_graph_; }
- Zone* zone() const { return graph()->zone(); }
- CommonOperatorBuilder* common() { return &main_common_; }
- MachineOperatorBuilder* machine() { return &main_machine_; }
- SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
-
- protected:
- // Prefixed with main_ to avoid naming conflicts.
- Graph* main_graph_;
- CommonOperatorBuilder main_common_;
- MachineOperatorBuilder main_machine_;
- SimplifiedOperatorBuilder main_simplified_;
-};
-
-
-template <typename ReturnType>
-class GraphBuilderTester : public HandleAndZoneScope,
- public GraphAndBuilders,
- public CallHelper<ReturnType> {
- public:
- template <typename... ParamMachTypes>
- explicit GraphBuilderTester(ParamMachTypes... p)
- : GraphAndBuilders(main_zone()),
- CallHelper<ReturnType>(
- main_isolate(),
- CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p...)),
- effect_(nullptr),
- return_(nullptr),
- parameters_(main_zone()->template NewArray<Node*>(parameter_count())) {
- Begin(static_cast<int>(parameter_count()));
- InitParameters();
- }
- ~GraphBuilderTester() override = default;
-
- void GenerateCode() { Generate(); }
- Node* Parameter(size_t index) {
- CHECK_LT(index, parameter_count());
- return parameters_[index];
- }
-
- Isolate* isolate() { return main_isolate(); }
- Factory* factory() { return isolate()->factory(); }
-
- // Initialize graph and builder.
- void Begin(int num_parameters) {
- CHECK_NULL(graph()->start());
- Node* start = graph()->NewNode(common()->Start(num_parameters + 3));
- graph()->SetStart(start);
- effect_ = start;
- }
-
- void Return(Node* value) {
- Node* zero = graph()->NewNode(common()->Int32Constant(0));
- return_ = graph()->NewNode(common()->Return(), zero, value, effect_,
- graph()->start());
- effect_ = nullptr;
- }
-
- // Close the graph.
- void End() {
- Node* end = graph()->NewNode(common()->End(1), return_);
- graph()->SetEnd(end);
- }
-
- Node* PointerConstant(void* value) {
- intptr_t intptr_value = reinterpret_cast<intptr_t>(value);
- return kSystemPointerSize == 8
- ? NewNode(common()->Int64Constant(intptr_value))
- : Int32Constant(static_cast<int>(intptr_value));
- }
- Node* Int32Constant(int32_t value) {
- return NewNode(common()->Int32Constant(value));
- }
- Node* HeapConstant(Handle<HeapObject> object) {
- return NewNode(common()->HeapConstant(object));
- }
-
- Node* BooleanNot(Node* a) { return NewNode(simplified()->BooleanNot(), a); }
-
- Node* NumberEqual(Node* a, Node* b) {
- return NewNode(simplified()->NumberEqual(), a, b);
- }
- Node* NumberLessThan(Node* a, Node* b) {
- return NewNode(simplified()->NumberLessThan(), a, b);
- }
- Node* NumberLessThanOrEqual(Node* a, Node* b) {
- return NewNode(simplified()->NumberLessThanOrEqual(), a, b);
- }
- Node* NumberAdd(Node* a, Node* b) {
- return NewNode(simplified()->NumberAdd(), a, b);
- }
- Node* NumberSubtract(Node* a, Node* b) {
- return NewNode(simplified()->NumberSubtract(), a, b);
- }
- Node* NumberMultiply(Node* a, Node* b) {
- return NewNode(simplified()->NumberMultiply(), a, b);
- }
- Node* NumberDivide(Node* a, Node* b) {
- return NewNode(simplified()->NumberDivide(), a, b);
- }
- Node* NumberModulus(Node* a, Node* b) {
- return NewNode(simplified()->NumberModulus(), a, b);
- }
- Node* NumberToInt32(Node* a) {
- return NewNode(simplified()->NumberToInt32(), a);
- }
- Node* NumberToUint32(Node* a) {
- return NewNode(simplified()->NumberToUint32(), a);
- }
-
- Node* StringEqual(Node* a, Node* b) {
- return NewNode(simplified()->StringEqual(), a, b);
- }
- Node* StringLessThan(Node* a, Node* b) {
- return NewNode(simplified()->StringLessThan(), a, b);
- }
- Node* StringLessThanOrEqual(Node* a, Node* b) {
- return NewNode(simplified()->StringLessThanOrEqual(), a, b);
- }
-
- Node* ChangeTaggedToInt32(Node* a) {
- return NewNode(simplified()->ChangeTaggedToInt32(), a);
- }
- Node* ChangeTaggedToUint32(Node* a) {
- return NewNode(simplified()->ChangeTaggedToUint32(), a);
- }
- Node* ChangeTaggedToFloat64(Node* a) {
- return NewNode(simplified()->ChangeTaggedToFloat64(), a);
- }
- Node* ChangeInt32ToTagged(Node* a) {
- return NewNode(simplified()->ChangeInt32ToTagged(), a);
- }
- Node* ChangeUint32ToTagged(Node* a) {
- return NewNode(simplified()->ChangeUint32ToTagged(), a);
- }
- Node* ChangeTaggedToBit(Node* a) {
- return NewNode(simplified()->ChangeTaggedToBit(), a);
- }
- Node* ChangeBitToTagged(Node* a) {
- return NewNode(simplified()->ChangeBitToTagged(), a);
- }
-
- Node* LoadField(const FieldAccess& access, Node* object) {
- return NewNode(simplified()->LoadField(access), object);
- }
- Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
- return NewNode(simplified()->StoreField(access), object, value);
- }
- Node* LoadElement(const ElementAccess& access, Node* object, Node* index) {
- return NewNode(simplified()->LoadElement(access), object, index);
- }
- Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
- Node* value) {
- return NewNode(simplified()->StoreElement(access), object, index, value);
- }
-
- template <typename... NodePtrs>
- Node* NewNode(const Operator* op, NodePtrs... n) {
- std::array<Node*, sizeof...(n)> inputs{{n...}};
- return MakeNode(op, inputs.size(), inputs.data());
- }
-
- Node* NewNode(const Operator* op, int value_input_count,
- Node** value_inputs) {
- return MakeNode(op, value_input_count, value_inputs);
- }
-
- Handle<Code> GetCode() {
- Generate();
- return code_.ToHandleChecked();
- }
-
- protected:
- Node* MakeNode(const Operator* op, int value_input_count,
- Node** value_inputs) {
- CHECK_EQ(op->ValueInputCount(), value_input_count);
-
- CHECK(!OperatorProperties::HasContextInput(op));
- CHECK(!OperatorProperties::HasFrameStateInput(op));
- bool has_control = op->ControlInputCount() == 1;
- bool has_effect = op->EffectInputCount() == 1;
-
- CHECK_LT(op->ControlInputCount(), 2);
- CHECK_LT(op->EffectInputCount(), 2);
-
- Node* result = nullptr;
- if (!has_control && !has_effect) {
- result = graph()->NewNode(op, value_input_count, value_inputs);
- } else {
- int input_count_with_deps = value_input_count;
- if (has_control) ++input_count_with_deps;
- if (has_effect) ++input_count_with_deps;
- Node** buffer = zone()->template NewArray<Node*>(input_count_with_deps);
- memcpy(buffer, value_inputs, kSystemPointerSize * value_input_count);
- Node** current_input = buffer + value_input_count;
- if (has_effect) {
- *current_input++ = effect_;
- }
- if (has_control) {
- *current_input++ = graph()->start();
- }
- result = graph()->NewNode(op, input_count_with_deps, buffer);
- if (has_effect) {
- effect_ = result;
- }
- // This graph builder does not support control flow.
- CHECK_EQ(0, op->ControlOutputCount());
- }
-
- return result;
- }
-
- Address Generate() override {
- if (code_.is_null()) {
- Zone* zone = graph()->zone();
- auto call_descriptor =
- Linkage::GetSimplifiedCDescriptor(zone, this->csig_);
- OptimizedCompilationInfo info(ArrayVector("testing"), main_zone(),
- Code::STUB);
- code_ = Pipeline::GenerateCodeForTesting(
- &info, main_isolate(), call_descriptor, graph(),
- AssemblerOptions::Default(main_isolate()));
-#ifdef ENABLE_DISASSEMBLER
- if (!code_.is_null() && FLAG_print_opt_code) {
- StdoutStream os;
- code_.ToHandleChecked()->Disassemble("test code", os);
- }
-#endif
- }
- return code_.ToHandleChecked()->entry();
- }
-
- void InitParameters() {
- int param_count = static_cast<int>(parameter_count());
- for (int i = 0; i < param_count; ++i) {
- parameters_[i] = this->NewNode(common()->Parameter(i), graph()->start());
- }
- }
-
- size_t parameter_count() const { return this->csig_->parameter_count(); }
-
- private:
- Node* effect_;
- Node* return_;
- Node** parameters_;
- MaybeHandle<Code> code_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.cc b/deps/v8/test/cctest/compiler/serializer-tester.cc
index beb7255a66..45f4a1fb9c 100644
--- a/deps/v8/test/cctest/compiler/serializer-tester.cc
+++ b/deps/v8/test/cctest/compiler/serializer-tester.cc
@@ -8,10 +8,10 @@
#include "test/cctest/compiler/serializer-tester.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/serializer-for-background-compilation.h"
#include "src/compiler/zone-stats.h"
-#include "src/optimized-compilation-info.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -30,6 +30,7 @@ SerializerTester::SerializerTester(const char* source)
FLAG_always_opt = false;
// We need allocation of executable memory for the compilation.
FLAG_jitless = false;
+ FLAG_allow_natives_syntax = true;
std::string function_string = "(function() { ";
function_string += source;
@@ -45,11 +46,12 @@ SerializerTester::SerializerTester(const char* source)
i::OptimizedCompilationInfo::kSplittingEnabled |
i::OptimizedCompilationInfo::kAnalyzeEnvironmentLiveness;
Optimize(function, main_zone(), main_isolate(), flags, &broker_);
- function_ = JSFunctionRef(broker_, function);
+ function_ = JSFunctionRef(broker(), function);
}
TEST(SerializeEmptyFunction) {
- SerializerTester tester("function f() {}; return f;");
+ SerializerTester tester(
+ "function f() {}; %EnsureFeedbackVectorForFunction(f); return f;");
CHECK(tester.function().IsSerializedForCompilation());
}
@@ -79,32 +81,45 @@ void CheckForSerializedInlinee(const char* source, int argc = 0,
TEST(SerializeInlinedClosure) {
CheckForSerializedInlinee(
"function f() {"
- " return (function g(){ return g; })();"
- "}; f(); return f;");
+ " function g(){ return g; }"
+ " %EnsureFeedbackVectorForFunction(g);"
+ " return g();"
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
TEST(SerializeInlinedFunction) {
CheckForSerializedInlinee(
"function g() {};"
+ "%EnsureFeedbackVectorForFunction(g);"
"function f() {"
" g(); return g;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
TEST(SerializeCallUndefinedReceiver) {
CheckForSerializedInlinee(
"function g(a,b,c) {};"
+ "%EnsureFeedbackVectorForFunction(g);"
"function f() {"
" g(1,2,3); return g;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
TEST(SerializeCallUndefinedReceiver2) {
CheckForSerializedInlinee(
"function g(a,b) {};"
+ "%EnsureFeedbackVectorForFunction(g);"
"function f() {"
" g(1,2); return g;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
TEST(SerializeCallProperty) {
@@ -112,9 +127,12 @@ TEST(SerializeCallProperty) {
"let obj = {"
" g: function g(a,b,c) {}"
"};"
+ "%EnsureFeedbackVectorForFunction(obj.g);"
"function f() {"
" obj.g(1,2,3); return obj.g;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
TEST(SerializeCallProperty2) {
@@ -122,9 +140,12 @@ TEST(SerializeCallProperty2) {
"let obj = {"
" g: function g(a,b) {}"
"};"
+ "%EnsureFeedbackVectorForFunction(obj.g);"
"function f() {"
" obj.g(1,2); return obj.g;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
TEST(SerializeCallAnyReceiver) {
@@ -132,21 +153,26 @@ TEST(SerializeCallAnyReceiver) {
"let obj = {"
" g: function g() {}"
"};"
+ "%EnsureFeedbackVectorForFunction(obj.g);"
"function f() {"
" with(obj) {"
" g(); return g;"
" };"
"};"
+ "%EnsureFeedbackVectorForFunction(f);"
"f(); return f;");
}
TEST(SerializeCallWithSpread) {
CheckForSerializedInlinee(
"function g(args) {};"
+ "%EnsureFeedbackVectorForFunction(g);"
"const arr = [1,2,3];"
"function f() {"
" g(...arr); return g;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
// The following test causes the CallIC of `g` to turn megamorphic,
@@ -157,38 +183,53 @@ TEST(SerializeCallArguments) {
"function g(callee) { callee(); };"
"function h() {};"
"function i() {};"
+ "%EnsureFeedbackVectorForFunction(g);"
"g(h); g(i);"
"function f() {"
" function j() {};"
" g(j);"
" return j;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "var j = f();"
+ "%EnsureFeedbackVectorForFunction(j);"
+ "f(); return f;");
}
TEST(SerializeConstruct) {
CheckForSerializedInlinee(
"function g() {};"
+ "%EnsureFeedbackVectorForFunction(g);"
"function f() {"
" new g(); return g;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
TEST(SerializeConstructWithSpread) {
CheckForSerializedInlinee(
"function g(a, b, c) {};"
+ "%EnsureFeedbackVectorForFunction(g);"
"const arr = [1, 2];"
"function f() {"
" new g(0, ...arr); return g;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
TEST(SerializeConstructSuper) {
CheckForSerializedInlinee(
"class A {};"
"class B extends A { constructor() { super(); } };"
+ "%EnsureFeedbackVectorForFunction(A);"
+ "%EnsureFeedbackVectorForFunction(B);"
"function f() {"
" new B(); return A;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "f(); return f;");
}
TEST(SerializeConditionalJump) {
@@ -196,13 +237,18 @@ TEST(SerializeConditionalJump) {
"function g(callee) { callee(); };"
"function h() {};"
"function i() {};"
+ "%EnsureFeedbackVectorForFunction(g);"
"let a = true;"
"g(h); g(i);"
"function f() {"
" function q() {};"
" if (a) g(q);"
" return q;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "var q = f();"
+ "%EnsureFeedbackVectorForFunction(q);"
+ "f(); return f;");
}
TEST(SerializeUnconditionalJump) {
@@ -210,6 +256,9 @@ TEST(SerializeUnconditionalJump) {
"function g(callee) { callee(); };"
"function h() {};"
"function i() {};"
+ "%EnsureFeedbackVectorForFunction(g);"
+ "%EnsureFeedbackVectorForFunction(h);"
+ "%EnsureFeedbackVectorForFunction(i);"
"let a = false;"
"g(h); g(i);"
"function f() {"
@@ -218,7 +267,11 @@ TEST(SerializeUnconditionalJump) {
" if (a) g(q);"
" else g(p);"
" return p;"
- "}; f(); return f;");
+ "};"
+ "%EnsureFeedbackVectorForFunction(f);"
+ "var p = f();"
+ "%EnsureFeedbackVectorForFunction(p);"
+ "f(); return f;");
}
} // namespace compiler
diff --git a/deps/v8/test/cctest/compiler/serializer-tester.h b/deps/v8/test/cctest/compiler/serializer-tester.h
index f5a5107841..7c8016ef81 100644
--- a/deps/v8/test/cctest/compiler/serializer-tester.h
+++ b/deps/v8/test/cctest/compiler/serializer-tester.h
@@ -27,13 +27,13 @@ class SerializerTester : public HandleAndZoneScope {
explicit SerializerTester(const char* source);
JSFunctionRef function() const { return function_.value(); }
- JSHeapBroker* broker() const { return broker_; }
+ JSHeapBroker* broker() const { return broker_.get(); }
Isolate* isolate() { return main_isolate(); }
private:
CanonicalHandleScope canonical_;
base::Optional<JSFunctionRef> function_;
- JSHeapBroker* broker_ = nullptr;
+ std::unique_ptr<JSHeapBroker> broker_;
};
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
index 0414532002..eb8fa641cf 100644
--- a/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
+++ b/deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/basic-block-profiler.h"
-#include "src/objects-inl.h"
+#include "src/diagnostics/basic-block-profiler.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-branch-combine.cc b/deps/v8/test/cctest/compiler/test-branch-combine.cc
index b36d61fbc6..25914222de 100644
--- a/deps/v8/test/cctest/compiler/test-branch-combine.cc
+++ b/deps/v8/test/cctest/compiler/test-branch-combine.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/base/overflowing-math.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
diff --git a/deps/v8/test/cctest/compiler/test-code-assembler.cc b/deps/v8/test/cctest/compiler/test-code-assembler.cc
index 154c77af42..4dc4ac03e1 100644
--- a/deps/v8/test/cctest/compiler/test-code-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-assembler.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/code-assembler.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/opcodes.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -18,8 +18,8 @@ namespace compiler {
namespace {
-typedef CodeAssemblerLabel Label;
-typedef CodeAssemblerVariable Variable;
+using Label = CodeAssemblerLabel;
+using Variable = CodeAssemblerVariable;
Node* SmiTag(CodeAssembler& m, Node* value) {
int32_t constant_value;
@@ -41,8 +41,8 @@ Node* SmiFromInt32(CodeAssembler& m, Node* value) {
}
Node* LoadObjectField(CodeAssembler& m, Node* object, int offset,
- MachineType rep = MachineType::AnyTagged()) {
- return m.Load(rep, object, m.IntPtrConstant(offset - kHeapObjectTag));
+ MachineType type = MachineType::AnyTagged()) {
+ return m.Load(type, object, m.IntPtrConstant(offset - kHeapObjectTag));
}
Node* LoadMap(CodeAssembler& m, Node* object) {
@@ -529,7 +529,7 @@ TEST(GotoIfExceptionMultiple) {
result = ft.Call(isolate->factory()->undefined_value(),
isolate->factory()->to_string_tag_symbol())
.ToHandleChecked();
- CHECK(String::cast(*result)->IsOneByteEqualTo(OneByteVector("undefined")));
+ CHECK(String::cast(*result).IsOneByteEqualTo(OneByteVector("undefined")));
// First handler returns a number.
result = ft.Call(isolate->factory()->to_string_tag_symbol(),
@@ -604,6 +604,14 @@ TEST(TestCodeAssemblerCodeComment) {
CHECK(found_comment);
}
+TEST(StaticAssert) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ CodeAssemblerTester asm_tester(isolate);
+ CodeAssembler m(asm_tester.state());
+ m.StaticAssert(m.ReinterpretCast<BoolT>(m.Int32Constant(1)));
+ USE(asm_tester.GenerateCode());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index ed39225747..bb686e8e70 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/linkage.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/optimized-compilation-info.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/code-assembler-tester.h"
@@ -192,9 +192,9 @@ Handle<Code> BuildSetupFunction(Isolate* isolate,
//
// Finally, it is important that this function does not call `RecordWrite` which
// is why "setup" is in charge of all allocations and we are using
-// SKIP_WRITE_BARRIER. The reason for this is that `RecordWrite` may clobber the
-// top 64 bits of Simd128 registers. This is the case on x64, ia32 and Arm64 for
-// example.
+// UNSAFE_SKIP_WRITE_BARRIER. The reason for this is that `RecordWrite` may
+// clobber the top 64 bits of Simd128 registers. This is the case on x64, ia32
+// and Arm64 for example.
Handle<Code> BuildTeardownFunction(Isolate* isolate,
CallDescriptor* call_descriptor,
std::vector<AllocatedOperand> parameters) {
@@ -206,7 +206,8 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
Node* param = __ Parameter(i + 2);
switch (parameters[i].representation()) {
case MachineRepresentation::kTagged:
- __ StoreFixedArrayElement(result_array, i, param, SKIP_WRITE_BARRIER);
+ __ StoreFixedArrayElement(result_array, i, param,
+ UNSAFE_SKIP_WRITE_BARRIER);
break;
// Box FP values into HeapNumbers.
case MachineRepresentation::kFloat32:
@@ -229,7 +230,7 @@ Handle<Code> BuildTeardownFunction(Isolate* isolate,
->I32x4ExtractLane(lane),
param));
__ StoreFixedArrayElement(vector, lane, lane_value,
- SKIP_WRITE_BARRIER);
+ UNSAFE_SKIP_WRITE_BARRIER);
}
break;
}
@@ -249,7 +250,7 @@ void PrintStateValue(std::ostream& os, Isolate* isolate, Handle<Object> value,
switch (operand.representation()) {
case MachineRepresentation::kTagged:
if (value->IsSmi()) {
- os << Smi::cast(*value)->value();
+ os << Smi::cast(*value).value();
} else {
os << value->Number();
}
@@ -262,7 +263,7 @@ void PrintStateValue(std::ostream& os, Isolate* isolate, Handle<Object> value,
FixedArray vector = FixedArray::cast(*value);
os << "[";
for (int lane = 0; lane < 4; lane++) {
- os << Smi::cast(*vector->GetValueChecked<Smi>(isolate, lane))->value();
+ os << Smi::cast(vector.get(lane)).value();
if (lane < 3) {
os << ", ";
}
@@ -272,7 +273,6 @@ void PrintStateValue(std::ostream& os, Isolate* isolate, Handle<Object> value,
}
default:
UNREACHABLE();
- break;
}
os << " (" << operand.representation() << " ";
if (operand.location_kind() == AllocatedOperand::REGISTER) {
@@ -752,8 +752,7 @@ class TestEnvironment : public HandleAndZoneScope {
state_out->set(to_index, *constant_value);
} else {
int from_index = OperandToStatePosition(AllocatedOperand::cast(from));
- state_out->set(to_index, *state_out->GetValueChecked<Object>(
- main_isolate(), from_index));
+ state_out->set(to_index, state_out->get(from_index));
}
}
return state_out;
@@ -773,10 +772,8 @@ class TestEnvironment : public HandleAndZoneScope {
OperandToStatePosition(AllocatedOperand::cast(swap->destination()));
int rhs_index =
OperandToStatePosition(AllocatedOperand::cast(swap->source()));
- Handle<Object> lhs =
- state_out->GetValueChecked<Object>(main_isolate(), lhs_index);
- Handle<Object> rhs =
- state_out->GetValueChecked<Object>(main_isolate(), rhs_index);
+ Handle<Object> lhs{state_out->get(lhs_index), main_isolate()};
+ Handle<Object> rhs{state_out->get(rhs_index), main_isolate()};
state_out->set(lhs_index, *rhs);
state_out->set(rhs_index, *lhs);
}
@@ -786,10 +783,8 @@ class TestEnvironment : public HandleAndZoneScope {
// Compare the given state with a reference.
void CheckState(Handle<FixedArray> actual, Handle<FixedArray> expected) {
for (int i = 0; i < static_cast<int>(layout_.size()); i++) {
- Handle<Object> actual_value =
- actual->GetValueChecked<Object>(main_isolate(), i);
- Handle<Object> expected_value =
- expected->GetValueChecked<Object>(main_isolate(), i);
+ Handle<Object> actual_value{actual->get(i), main_isolate()};
+ Handle<Object> expected_value{expected->get(i), main_isolate()};
if (!CompareValues(actual_value, expected_value,
layout_[i].representation())) {
std::ostringstream expected_str;
@@ -797,8 +792,8 @@ class TestEnvironment : public HandleAndZoneScope {
layout_[i]);
std::ostringstream actual_str;
PrintStateValue(actual_str, main_isolate(), actual_value, layout_[i]);
- V8_Fatal(__FILE__, __LINE__, "Expected: '%s' but got '%s'",
- expected_str.str().c_str(), actual_str.str().c_str());
+ FATAL("Expected: '%s' but got '%s'", expected_str.str().c_str(),
+ actual_str.str().c_str());
}
}
}
@@ -812,13 +807,11 @@ class TestEnvironment : public HandleAndZoneScope {
return actual->StrictEquals(*expected);
case MachineRepresentation::kSimd128:
for (int lane = 0; lane < 4; lane++) {
- Handle<Smi> actual_lane =
- FixedArray::cast(*actual)->GetValueChecked<Smi>(main_isolate(),
- lane);
- Handle<Smi> expected_lane =
- FixedArray::cast(*expected)->GetValueChecked<Smi>(main_isolate(),
- lane);
- if (*actual_lane != *expected_lane) {
+ int actual_lane =
+ Smi::cast(FixedArray::cast(*actual).get(lane)).value();
+ int expected_lane =
+ Smi::cast(FixedArray::cast(*expected).get(lane)).value();
+ if (actual_lane != expected_lane) {
return false;
}
}
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index 85dd389287..ca26e0b49f 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -110,8 +110,8 @@ class InterpreterState {
};
// Internally, the state is a normalized permutation of Value pairs.
- typedef Key Value;
- typedef std::map<Key, Value> OperandMap;
+ using Value = Key;
+ using OperandMap = std::map<Key, Value>;
Value read(const InstructionOperand& op) const {
OperandMap::const_iterator it = values_.find(KeyFor(op));
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index a806cd857f..6d4a8eda1d 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -12,15 +12,15 @@
#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace compiler {
-typedef v8::internal::compiler::Instruction TestInstr;
-typedef v8::internal::compiler::InstructionSequence TestInstrSeq;
+using TestInstr = v8::internal::compiler::Instruction;
+using TestInstrSeq = v8::internal::compiler::InstructionSequence;
// A testing helper for the register code abstraction.
class InstructionTester : public HandleAndZoneScope {
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index 23711bb3e7..aef10b472d 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler.h"
+#include "src/codegen/assembler.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/heap/factory-inl.h"
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 5e6e3b3cc2..458b1e521b 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -8,12 +8,12 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/heap/factory.h"
-#include "src/objects-inl.h"
-#include "src/property.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/function-tester.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 559ed1088c..cc717c618e 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -10,9 +10,9 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/typer.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory-inl.h"
-#include "src/isolate.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 994fea0868..9a149f67f4 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/codegen/source-position.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/backend/jump-threading.h"
-#include "src/source-position.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index b8e9479675..75899aeaac 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/code-factory.h"
-#include "src/compiler.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
@@ -13,8 +14,7 @@
#include "src/compiler/operator.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
-#include "src/objects-inl.h"
-#include "src/optimized-compilation-info.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/zone/zone.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index 61736ae2dc..1376823657 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -8,7 +8,7 @@
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/typer.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 2ea4334327..4a81ec2691 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -7,14 +7,14 @@
#include <limits>
#include <memory>
-#include "src/assembler.h"
#include "src/base/bits.h"
-#include "src/compiler.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/macro-assembler.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/machine-type.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -151,7 +151,7 @@ void TestReturnMultipleValues(MachineType type) {
// m.Parameter(0) is the WasmContext.
Node* p0 = m.Parameter(1);
Node* p1 = m.Parameter(2);
- typedef Node* Node_ptr;
+ using Node_ptr = Node*;
std::unique_ptr<Node_ptr[]> returns(new Node_ptr[count]);
for (int i = 0; i < count; ++i) {
if (i % 3 == 0) returns[i] = Add(m, type, p0, p1);
@@ -187,7 +187,7 @@ void TestReturnMultipleValues(MachineType type) {
handles.main_isolate(), code->raw_instruction_size());
wasm::WasmCodeRefScope wasm_code_ref_scope;
byte* code_start =
- module->AddCodeForTesting(code)->instructions().start();
+ module->AddCodeForTesting(code)->instructions().begin();
RawMachineAssemblerTester<int32_t> mt(Code::Kind::JS_TO_WASM_FUNCTION);
const int input_count = 2 + param_count;
@@ -276,7 +276,7 @@ void ReturnLastValue(MachineType type) {
std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
handles.main_isolate(), code->raw_instruction_size());
wasm::WasmCodeRefScope wasm_code_ref_scope;
- byte* code_start = module->AddCodeForTesting(code)->instructions().start();
+ byte* code_start = module->AddCodeForTesting(code)->instructions().begin();
// Generate caller.
int expect = return_count - 1;
@@ -338,7 +338,7 @@ void ReturnSumOfReturns(MachineType type) {
std::shared_ptr<wasm::NativeModule> module = AllocateNativeModule(
handles.main_isolate(), code->raw_instruction_size());
wasm::WasmCodeRefScope wasm_code_ref_scope;
- byte* code_start = module->AddCodeForTesting(code)->instructions().start();
+ byte* code_start = module->AddCodeForTesting(code)->instructions().begin();
// Generate caller.
RawMachineAssemblerTester<int32_t> mt;
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index a8927de33c..d48d4c86e3 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -36,8 +36,7 @@ static Operator dummy_operator3(IrOpcode::kParameter, Operator::kNoWrite,
namespace {
-typedef std::multiset<Node*, std::less<Node*>> NodeMSet;
-
+using NodeMSet = std::multiset<Node*, std::less<Node*>>;
void CheckUseChain(Node* node, Node** uses, int use_count) {
// Check ownership.
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index a1acfab06e..33e5cf1548 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -7,10 +7,10 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/type-cache.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/graph-and-builders.h"
#include "test/cctest/compiler/value-helper.h"
namespace v8 {
@@ -544,7 +544,7 @@ TEST(SingleChanges) {
Type::Number(), MachineRepresentation::kFloat64);
CheckChange(IrOpcode::kTruncateTaggedToFloat64,
MachineRepresentation::kTagged, Type::NumberOrUndefined(),
- MachineRepresentation::kFloat64);
+ UseInfo(MachineRepresentation::kFloat64, Truncation::Float64()));
CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
Type::Signed31(), MachineRepresentation::kFloat64);
@@ -663,6 +663,18 @@ TEST(CompressedAndTagged) {
CheckChange(IrOpcode::kChangeTaggedToCompressedSigned,
MachineRepresentation::kTagged, Type::SignedSmall(),
MachineRepresentation::kCompressedSigned);
+
+ // TaggedSigned to CompressedPointer
+ CheckChange(IrOpcode::kCheckedTaggedToCompressedPointer,
+ MachineRepresentation::kTaggedSigned, Type::SignedSmall(),
+ UseInfo(MachineRepresentation::kCompressedPointer,
+ Truncation::Any(), TypeCheckKind::kHeapObject));
+
+ // CompressedSigned to TaggedPointer
+ CheckChange(IrOpcode::kCheckedCompressedToTaggedPointer,
+ MachineRepresentation::kCompressedSigned, Type::SignedSmall(),
+ UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
+ TypeCheckKind::kHeapObject));
}
static void TestMinusZeroCheck(IrOpcode::Value expected, Type from_type) {
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index b6043f2a9d..643a867c49 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -4,15 +4,15 @@
#include <utility>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/pipeline.h"
#include "src/debug/debug-interface.h"
-#include "src/execution.h"
-#include "src/handles.h"
+#include "src/execution/execution.h"
+#include "src/handles/handles.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects-inl.h"
-#include "src/optimized-compilation-info.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "test/cctest/cctest.h"
@@ -115,7 +115,8 @@ class BytecodeGraphTester {
.ToLocalChecked());
Handle<JSFunction> function =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*api_function));
- CHECK(function->shared()->HasBytecodeArray());
+ JSFunction::EnsureFeedbackVector(function);
+ CHECK(function->shared().HasBytecodeArray());
Zone zone(isolate_->allocator(), ZONE_NAME);
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
@@ -208,7 +209,7 @@ TEST(BytecodeGraphBuilderReturnStatements) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -233,7 +234,7 @@ TEST(BytecodeGraphBuilderPrimitiveExpressions) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -289,7 +290,7 @@ TEST(BytecodeGraphBuilderTwoParameterTests) {
SNPrintF(script, "function %s(p1, p2) { %s }\n%s(0, 0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0), snippets[i].parameter(1))
@@ -332,7 +333,7 @@ TEST(BytecodeGraphBuilderNamedLoad) {
SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -386,7 +387,7 @@ TEST(BytecodeGraphBuilderKeyedLoad) {
SNPrintF(script, "function %s(p1, p2) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0), snippets[i].parameter(1))
@@ -435,7 +436,7 @@ void TestBytecodeGraphBuilderNamedStore(size_t shard) {
SNPrintF(script, "function %s(p1) { %s };\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -493,7 +494,7 @@ void TestBytecodeGraphBuilderKeyedStore(size_t shard) {
SNPrintF(script, "function %s(p1, p2) { %s };\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -527,7 +528,7 @@ TEST(BytecodeGraphBuilderPropertyCall) {
SNPrintF(script, "function %s(p1) { %s };\n%s({func() {}});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -740,7 +741,7 @@ TEST(BytecodeGraphBuilderToName) {
SNPrintF(script, "function %s() { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -769,7 +770,7 @@ TEST(BytecodeGraphBuilderLogicalNot) {
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -804,7 +805,7 @@ TEST(BytecodeGraphBuilderTypeOf) {
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -848,7 +849,7 @@ TEST(BytecodeGraphBuilderCompareTypeOf) {
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -900,7 +901,7 @@ TEST(BytecodeGraphBuilderCountOperation) {
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -937,7 +938,7 @@ TEST(BytecodeGraphBuilderDelete) {
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -988,7 +989,7 @@ TEST(BytecodeGraphBuilderDeleteGlobal) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s %s({});", snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1023,7 +1024,7 @@ TEST(BytecodeGraphBuilderDeleteLookupSlot) {
SNPrintF(script, "%s %s %s", function_prologue, snippets[i].code_snippet,
function_epilogue);
- BytecodeGraphTester tester(isolate, script.start(), "t");
+ BytecodeGraphTester tester(isolate, script.begin(), "t");
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1062,7 +1063,7 @@ TEST(BytecodeGraphBuilderLookupSlot) {
SNPrintF(script, "%s %s %s", function_prologue, snippets[i].code_snippet,
function_epilogue);
- BytecodeGraphTester tester(isolate, script.start(), "t");
+ BytecodeGraphTester tester(isolate, script.begin(), "t");
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1089,7 +1090,7 @@ TEST(BytecodeGraphBuilderLookupContextSlot) {
inner_eval_prologue, inner_eval_snippets[i].code_snippet,
inner_eval_epilogue, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*inner_eval_snippets[i].return_value()));
@@ -1111,7 +1112,7 @@ TEST(BytecodeGraphBuilderLookupContextSlot) {
outer_eval_prologue, outer_eval_snippets[i].code_snippet,
outer_eval_epilogue, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*outer_eval_snippets[i].return_value()));
@@ -1138,7 +1139,7 @@ TEST(BytecodeGraphBuilderLookupGlobalSlot) {
inner_eval_prologue, inner_eval_snippets[i].code_snippet,
inner_eval_epilogue, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*inner_eval_snippets[i].return_value()));
@@ -1160,7 +1161,7 @@ TEST(BytecodeGraphBuilderLookupGlobalSlot) {
outer_eval_prologue, outer_eval_snippets[i].code_snippet,
outer_eval_epilogue, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*outer_eval_snippets[i].return_value()));
@@ -1201,7 +1202,7 @@ TEST(BytecodeGraphBuilderLookupSlotWide) {
SNPrintF(script, "%s %s %s", function_prologue, snippets[i].code_snippet,
function_epilogue);
- BytecodeGraphTester tester(isolate, script.start(), "t");
+ BytecodeGraphTester tester(isolate, script.begin(), "t");
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1228,7 +1229,7 @@ TEST(BytecodeGraphBuilderCallLookupSlot) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1278,7 +1279,7 @@ TEST(BytecodeGraphBuilderEval) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1304,7 +1305,7 @@ TEST(BytecodeGraphBuilderEvalParams) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s(p1) { %s }\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -1406,7 +1407,7 @@ TEST(BytecodeGraphBuilderCompare) {
SNPrintF(script, "function %s(p1, p2) { %s }\n%s({}, {});", kFunctionName,
get_code_snippet(kCompareOperators[i]), kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
for (size_t j = 0; j < arraysize(lhs_values); j++) {
for (size_t k = 0; k < arraysize(rhs_values); k++) {
@@ -1458,7 +1459,7 @@ TEST(BytecodeGraphBuilderTestIn) {
SNPrintF(script, "function %s(p1, p2) { %s }\n%s({}, {});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0), snippets[i].parameter(1))
@@ -1488,7 +1489,7 @@ TEST(BytecodeGraphBuilderTestInstanceOf) {
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -1517,7 +1518,7 @@ TEST(BytecodeGraphBuilderTryCatch) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1554,7 +1555,7 @@ TEST(BytecodeGraphBuilderTryFinally1) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1577,7 +1578,7 @@ TEST(BytecodeGraphBuilderTryFinally2) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
v8::Local<v8::String> expected_string = v8_str(snippets[i].return_value());
CHECK(
@@ -1605,7 +1606,7 @@ TEST(BytecodeGraphBuilderThrow) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
v8::Local<v8::String> expected_string = v8_str(snippets[i].return_value());
CHECK(
@@ -1664,7 +1665,7 @@ TEST(BytecodeGraphBuilderContext) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s", snippets[i].code_snippet);
- BytecodeGraphTester tester(isolate, script.start(), "f");
+ BytecodeGraphTester tester(isolate, script.begin(), "f");
auto callable = tester.GetCallable<>("f");
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1727,7 +1728,7 @@ TEST(BytecodeGraphBuilderLoadContext) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s", snippets[i].code_snippet);
- BytecodeGraphTester tester(isolate, script.start(), "*");
+ BytecodeGraphTester tester(isolate, script.begin(), "*");
auto callable = tester.GetCallable<Handle<Object>>("f");
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -1757,7 +1758,7 @@ TEST(BytecodeGraphBuilderCreateArgumentsNoParameters) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s\n%s();", snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1798,7 +1799,7 @@ TEST(BytecodeGraphBuilderCreateArguments) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s\n%s();", snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable =
tester.GetCallable<Handle<Object>, Handle<Object>, Handle<Object>>();
Handle<Object> return_value =
@@ -1840,7 +1841,7 @@ TEST(BytecodeGraphBuilderCreateRestArguments) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s\n%s();", snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable =
tester.GetCallable<Handle<Object>, Handle<Object>, Handle<Object>>();
Handle<Object> return_value =
@@ -1878,7 +1879,7 @@ TEST(BytecodeGraphBuilderRegExpLiterals) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1916,7 +1917,7 @@ TEST(BytecodeGraphBuilderArrayLiterals) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -1978,7 +1979,7 @@ TEST(BytecodeGraphBuilderObjectLiterals) {
ScopedVector<char> script(4096);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2085,7 +2086,7 @@ TEST(BytecodeGraphBuilderIf) {
SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -2114,7 +2115,7 @@ TEST(BytecodeGraphBuilderConditionalOperator) {
SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -2160,7 +2161,7 @@ TEST(BytecodeGraphBuilderSwitch) {
SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -2208,7 +2209,7 @@ TEST(BytecodeGraphBuilderSwitchMerge) {
SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0)).ToHandleChecked();
@@ -2266,7 +2267,7 @@ TEST(BytecodeGraphBuilderNestedSwitch) {
SNPrintF(script, "function %s(p1, p2) { %s };\n%s(0, 0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
Handle<Object> return_value =
callable(snippets[i].parameter(0), snippets[i].parameter(1))
@@ -2307,7 +2308,7 @@ TEST(BytecodeGraphBuilderBreakableBlocks) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2354,7 +2355,7 @@ TEST(BytecodeGraphBuilderWhile) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2401,7 +2402,7 @@ TEST(BytecodeGraphBuilderDo) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2493,7 +2494,7 @@ TEST(BytecodeGraphBuilderFor) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2563,7 +2564,7 @@ TEST(BytecodeGraphBuilderForIn) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2654,7 +2655,7 @@ TEST(BytecodeGraphBuilderForOf) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2732,7 +2733,7 @@ TEST(BytecodeGraphBuilderWithStatement) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2778,7 +2779,7 @@ TEST(BytecodeGraphBuilderConstDeclaration) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2790,7 +2791,7 @@ TEST(BytecodeGraphBuilderConstDeclaration) {
SNPrintF(script, "function %s() {'use strict'; %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2819,7 +2820,7 @@ TEST(BytecodeGraphBuilderConstDeclarationLookupSlots) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2831,7 +2832,7 @@ TEST(BytecodeGraphBuilderConstDeclarationLookupSlots) {
SNPrintF(script, "function %s() {'use strict'; %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*snippets[i].return_value()));
@@ -2878,7 +2879,7 @@ TEST(BytecodeGraphBuilderConstInLookupContextChain) {
SNPrintF(script, "%s %s %s", prologue, const_decl[i].code_snippet,
epilogue);
- BytecodeGraphTester tester(isolate, script.start(), "*");
+ BytecodeGraphTester tester(isolate, script.begin(), "*");
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*const_decl[i].return_value()));
@@ -2910,7 +2911,7 @@ TEST(BytecodeGraphBuilderIllegalConstDeclaration) {
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
illegal_const_decl[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
v8::Local<v8::String> expected_string =
v8_str(illegal_const_decl[i].return_value());
@@ -2925,7 +2926,7 @@ TEST(BytecodeGraphBuilderIllegalConstDeclaration) {
SNPrintF(script, "function %s() {'use strict'; %s }\n%s();", kFunctionName,
illegal_const_decl[i].code_snippet, kFunctionName);
- BytecodeGraphTester tester(isolate, script.start());
+ BytecodeGraphTester tester(isolate, script.begin());
v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
v8::Local<v8::String> expected_string =
v8_str(illegal_const_decl[i].return_value());
diff --git a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
index 5229ee7cfe..966946ef99 100644
--- a/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
+++ b/deps/v8/test/cctest/compiler/test-run-calls-to-external-references.cc
@@ -2,7 +2,7 @@
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-external-refs.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-deopt.cc b/deps/v8/test/cctest/compiler/test-run-deopt.cc
index 76dc9acae3..049a8b3956 100644
--- a/deps/v8/test/cctest/compiler/test-run-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-run-deopt.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/frames-inl.h"
+#include "src/execution/frames-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/function-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index 82c4c447f2..80237dbfea 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/codegen/optimized-compilation-info.h"
#include "src/objects/string.h"
-#include "src/optimized-compilation-info.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
index 76cb9a2843..85428883e9 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsbranches.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index b1e9ddfce3..88dc62fce1 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/contexts.h"
-#include "src/flags.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/flags/flags.h"
+#include "src/objects/contexts.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -165,7 +165,7 @@ TEST(RuntimeCallInline) {
TEST(EvalCall) {
FunctionTester T("(function(a,b) { return eval(a); })");
- Handle<JSObject> g(T.function->context()->global_object()->global_proxy(),
+ Handle<JSObject> g(T.function->context().global_object().global_proxy(),
T.isolate);
T.CheckCall(T.Val(23), T.Val("17 + 6"), T.undefined());
@@ -190,7 +190,7 @@ TEST(ReceiverPatching) {
// patches an undefined receiver to the global receiver. If this starts to
// fail once we fix the calling protocol, just remove this test.
FunctionTester T("(function(a) { return this; })");
- Handle<JSObject> g(T.function->context()->global_object()->global_proxy(),
+ Handle<JSObject> g(T.function->context().global_object().global_proxy(),
T.isolate);
T.CheckCall(g, T.undefined());
}
diff --git a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
index 8da2b53fe6..a658886b5c 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
index 83282e905d..2ce6242e9e 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -17,7 +17,7 @@ TEST(ArgumentsMapped) {
Handle<Object> arguments;
T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandle(&arguments);
CHECK(arguments->IsJSObject() && !arguments->IsJSArray());
- CHECK(JSObject::cast(*arguments)->HasSloppyArgumentsElements());
+ CHECK(JSObject::cast(*arguments).HasSloppyArgumentsElements());
Handle<String> l = T.isolate->factory()->length_string();
Handle<Object> length =
Object::GetProperty(T.isolate, arguments, l).ToHandleChecked();
@@ -31,7 +31,7 @@ TEST(ArgumentsUnmapped) {
Handle<Object> arguments;
T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandle(&arguments);
CHECK(arguments->IsJSObject() && !arguments->IsJSArray());
- CHECK(!JSObject::cast(*arguments)->HasSloppyArgumentsElements());
+ CHECK(!JSObject::cast(*arguments).HasSloppyArgumentsElements());
Handle<String> l = T.isolate->factory()->length_string();
Handle<Object> length =
Object::GetProperty(T.isolate, arguments, l).ToHandleChecked();
@@ -45,7 +45,7 @@ TEST(ArgumentsRest) {
Handle<Object> arguments;
T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandle(&arguments);
CHECK(arguments->IsJSObject() && arguments->IsJSArray());
- CHECK(!JSObject::cast(*arguments)->HasSloppyArgumentsElements());
+ CHECK(!JSObject::cast(*arguments).HasSloppyArgumentsElements());
Handle<String> l = T.isolate->factory()->length_string();
Handle<Object> length =
Object::GetProperty(T.isolate, arguments, l).ToHandleChecked();
diff --git a/deps/v8/test/cctest/compiler/test-run-jsops.cc b/deps/v8/test/cctest/compiler/test-run-jsops.cc
index dfa2299cff..e652ee19b7 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsops.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc
index d3ed7d6405..3a8e9d61d4 100644
--- a/deps/v8/test/cctest/compiler/test-run-load-store.cc
+++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc
@@ -9,10 +9,9 @@
#include "src/base/bits.h"
#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
#include "test/cctest/compiler/value-helper.h"
@@ -219,9 +218,9 @@ void CheckEq<Smi>(Smi in_value, Smi out_value) {
// Initializes the buffer with some raw data respecting requested representation
// of the values.
template <typename CType>
-void InitBuffer(CType* buffer, size_t length, MachineType rep) {
+void InitBuffer(CType* buffer, size_t length, MachineType type) {
const size_t kBufferSize = sizeof(CType) * length;
- if (!rep.IsTagged()) {
+ if (!type.IsTagged()) {
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < kBufferSize; i++) {
raw[i] = static_cast<byte>((i + kBufferSize) ^ 0xAA);
@@ -233,13 +232,13 @@ void InitBuffer(CType* buffer, size_t length, MachineType rep) {
// pointer decompression that may be happenning during load.
Isolate* isolate = CcTest::InitIsolateOnce();
Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
- if (rep.IsTaggedSigned()) {
+ if (type.IsTaggedSigned()) {
for (size_t i = 0; i < length; i++) {
smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
}
} else {
memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
- if (!rep.IsTaggedPointer()) {
+ if (!type.IsTaggedPointer()) {
// Also add some Smis if we are checking AnyTagged case.
for (size_t i = 0; i < length / 2; i++) {
smi_view[i] =
@@ -250,11 +249,11 @@ void InitBuffer(CType* buffer, size_t length, MachineType rep) {
}
template <typename CType>
-void RunLoadImmIndex(MachineType rep, TestAlignment t) {
+void RunLoadImmIndex(MachineType type, TestAlignment t) {
const int kNumElems = 16;
CType buffer[kNumElems];
- InitBuffer(buffer, kNumElems, rep);
+ InitBuffer(buffer, kNumElems, type);
// Test with various large and small offsets.
for (int offset = -1; offset <= 200000; offset *= -5) {
@@ -262,7 +261,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
BufferedRawMachineAssemblerTester<CType> m;
void* base_pointer = &buffer[0] - offset;
#ifdef V8_COMPRESS_POINTERS
- if (rep.IsTagged()) {
+ if (type.IsTagged()) {
// When pointer compression is enabled then we need to access only
// the lower 32-bit of the tagged value while the buffer contains
// full 64-bit values.
@@ -272,9 +271,9 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
Node* base = m.PointerConstant(base_pointer);
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
if (t == TestAlignment::kAligned) {
- m.Return(m.Load(rep, base, index));
+ m.Return(m.Load(type, base, index));
} else if (t == TestAlignment::kUnaligned) {
- m.Return(m.UnalignedLoad(rep, base, index));
+ m.Return(m.UnalignedLoad(type, base, index));
} else {
UNREACHABLE();
}
@@ -285,7 +284,7 @@ void RunLoadImmIndex(MachineType rep, TestAlignment t) {
}
template <typename CType>
-void RunLoadStore(MachineType rep, TestAlignment t) {
+void RunLoadStore(MachineType type, TestAlignment t) {
const int kNumElems = 16;
CType in_buffer[kNumElems];
CType out_buffer[kNumElems];
@@ -294,7 +293,7 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
STATIC_ASSERT(sizeof(CType) <= sizeof(zap_data));
MemCopy(&zap_value, &zap_data, sizeof(CType));
- InitBuffer(in_buffer, kNumElems, rep);
+ InitBuffer(in_buffer, kNumElems, type);
for (int32_t x = 0; x < kNumElems; x++) {
int32_t y = kNumElems - x - 1;
@@ -306,11 +305,12 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y * sizeof(CType));
if (t == TestAlignment::kAligned) {
- Node* load = m.Load(rep, in_base, in_index);
- m.Store(rep.representation(), out_base, out_index, load, kNoWriteBarrier);
+ Node* load = m.Load(type, in_base, in_index);
+ m.Store(type.representation(), out_base, out_index, load,
+ kNoWriteBarrier);
} else if (t == TestAlignment::kUnaligned) {
- Node* load = m.UnalignedLoad(rep, in_base, in_index);
- m.UnalignedStore(rep.representation(), out_base, out_index, load);
+ Node* load = m.UnalignedLoad(type, in_base, in_index);
+ m.UnalignedStore(type.representation(), out_base, out_index, load);
}
m.Return(m.Int32Constant(OK));
@@ -329,12 +329,12 @@ void RunLoadStore(MachineType rep, TestAlignment t) {
}
template <typename CType>
-void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
+void RunUnalignedLoadStoreUnalignedAccess(MachineType type) {
CType in, out;
byte in_buffer[2 * sizeof(CType)];
byte out_buffer[2 * sizeof(CType)];
- InitBuffer(&in, 1, rep);
+ InitBuffer(&in, 1, type);
for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
// Direct write to &in_buffer[x] may cause unaligned access in C++ code so
@@ -347,11 +347,11 @@ void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
Node* in_base = m.PointerConstant(in_buffer);
Node* in_index = m.IntPtrConstant(x);
- Node* load = m.UnalignedLoad(rep, in_base, in_index);
+ Node* load = m.UnalignedLoad(type, in_base, in_index);
Node* out_base = m.PointerConstant(out_buffer);
Node* out_index = m.IntPtrConstant(y);
- m.UnalignedStore(rep.representation(), out_base, out_index, load);
+ m.UnalignedStore(type.representation(), out_base, out_index, load);
m.Return(m.Int32Constant(OK));
@@ -392,10 +392,6 @@ TEST(RunUnalignedLoadImmIndex) {
RunLoadImmIndex<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadImmIndex<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadImmIndex<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
- RunLoadImmIndex<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
- RunLoadImmIndex<HeapObject>(MachineType::TaggedPointer(),
- TestAlignment::kUnaligned);
- RunLoadImmIndex<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadImmIndex<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadImmIndex<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT
@@ -429,10 +425,6 @@ TEST(RunUnalignedLoadStore) {
RunLoadStore<int32_t>(MachineType::Int32(), TestAlignment::kUnaligned);
RunLoadStore<uint32_t>(MachineType::Uint32(), TestAlignment::kUnaligned);
RunLoadStore<void*>(MachineType::Pointer(), TestAlignment::kUnaligned);
- RunLoadStore<Smi>(MachineType::TaggedSigned(), TestAlignment::kUnaligned);
- RunLoadStore<HeapObject>(MachineType::TaggedPointer(),
- TestAlignment::kUnaligned);
- RunLoadStore<Object>(MachineType::AnyTagged(), TestAlignment::kUnaligned);
RunLoadStore<float>(MachineType::Float32(), TestAlignment::kUnaligned);
RunLoadStore<double>(MachineType::Float64(), TestAlignment::kUnaligned);
#if V8_TARGET_ARCH_64_BIT
@@ -446,10 +438,6 @@ TEST(RunUnalignedLoadStoreUnalignedAccess) {
RunUnalignedLoadStoreUnalignedAccess<int32_t>(MachineType::Int32());
RunUnalignedLoadStoreUnalignedAccess<uint32_t>(MachineType::Uint32());
RunUnalignedLoadStoreUnalignedAccess<void*>(MachineType::Pointer());
- RunUnalignedLoadStoreUnalignedAccess<Smi>(MachineType::TaggedSigned());
- RunUnalignedLoadStoreUnalignedAccess<HeapObject>(
- MachineType::TaggedPointer());
- RunUnalignedLoadStoreUnalignedAccess<Object>(MachineType::AnyTagged());
RunUnalignedLoadStoreUnalignedAccess<float>(MachineType::Float32());
RunUnalignedLoadStoreUnalignedAccess<double>(MachineType::Float64());
#if V8_TARGET_ARCH_64_BIT
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 35142ca098..1e5a73389e 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -10,15 +10,13 @@
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/boxed-float.h"
-#include "src/objects-inl.h"
-#include "src/utils.h"
+#include "src/utils/boxed-float.h"
+#include "src/utils/utils.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
#include "test/cctest/compiler/value-helper.h"
-
namespace v8 {
namespace internal {
namespace compiler {
@@ -3998,6 +3996,87 @@ TEST(RunFloat64MulP) {
}
}
+TEST(RunFloat32MulAndFloat32Neg) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
+ m.Return(m.Float32Neg(m.Float32Mul(m.Parameter(0), m.Parameter(1))));
+
+ FOR_FLOAT32_INPUTS(i) {
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ(-(i * j), m.Call(i, j)); }
+ }
+}
+
+TEST(RunFloat64MulAndFloat64Neg) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
+ m.Return(m.Float64Neg(m.Float64Mul(m.Parameter(0), m.Parameter(1))));
+
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ(-(i * j), m.Call(i, j)); }
+ }
+}
+
+TEST(RunFloat32NegAndFloat32Mul1) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
+ m.Return(m.Float32Mul(m.Float32Neg(m.Parameter(0)), m.Parameter(1)));
+
+ FOR_FLOAT32_INPUTS(i) {
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ((-i * j), m.Call(i, j)); }
+ }
+}
+
+TEST(RunFloat64NegAndFloat64Mul1) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
+ m.Return(m.Float64Mul(m.Float64Neg(m.Parameter(0)), m.Parameter(1)));
+
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ((-i * j), m.Call(i, j)); }
+ }
+}
+
+TEST(RunFloat32NegAndFloat32Mul2) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
+ m.Return(m.Float32Mul(m.Parameter(0), m.Float32Neg(m.Parameter(1))));
+
+ FOR_FLOAT32_INPUTS(i) {
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ((i * -j), m.Call(i, j)); }
+ }
+}
+
+TEST(RunFloat64NegAndFloat64Mul2) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
+ m.Return(m.Float64Mul(m.Parameter(0), m.Float64Neg(m.Parameter(1))));
+
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ((i * -j), m.Call(i, j)); }
+ }
+}
+
+TEST(RunFloat32NegAndFloat32Mul3) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
+ MachineType::Float32());
+ m.Return(
+ m.Float32Mul(m.Float32Neg(m.Parameter(0)), m.Float32Neg(m.Parameter(1))));
+
+ FOR_FLOAT32_INPUTS(i) {
+ FOR_FLOAT32_INPUTS(j) { CHECK_FLOAT_EQ((-i * -j), m.Call(i, j)); }
+ }
+}
+
+TEST(RunFloat64NegAndFloat64Mul3) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
+ m.Return(
+ m.Float64Mul(m.Float64Neg(m.Parameter(0)), m.Float64Neg(m.Parameter(1))));
+
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) { CHECK_DOUBLE_EQ((-i * -j), m.Call(i, j)); }
+ }
+}
TEST(RunFloat64MulAndFloat64Add1) {
BufferedRawMachineAssemblerTester<double> m(
@@ -6797,15 +6876,13 @@ TEST(RunBitcastInt32ToFloat32) {
TEST(RunComputedCodeObject) {
- GraphBuilderTester<int32_t> a;
+ RawMachineAssemblerTester<int32_t> a;
a.Return(a.Int32Constant(33));
- a.End();
- Handle<Code> code_a = a.GetCode();
+ CHECK_EQ(33, a.Call());
- GraphBuilderTester<int32_t> b;
+ RawMachineAssemblerTester<int32_t> b;
b.Return(b.Int32Constant(44));
- b.End();
- Handle<Code> code_b = b.GetCode();
+ CHECK_EQ(44, b.Call());
RawMachineAssemblerTester<int32_t> r(MachineType::Int32());
RawMachineLabel tlabel;
@@ -6813,10 +6890,10 @@ TEST(RunComputedCodeObject) {
RawMachineLabel merge;
r.Branch(r.Parameter(0), &tlabel, &flabel);
r.Bind(&tlabel);
- Node* fa = r.HeapConstant(code_a);
+ Node* fa = r.HeapConstant(a.GetCode());
r.Goto(&merge);
r.Bind(&flabel);
- Node* fb = r.HeapConstant(code_b);
+ Node* fb = r.HeapConstant(b.GetCode());
r.Goto(&merge);
r.Bind(&merge);
Node* phi = r.Phi(MachineRepresentation::kWord32, fa, fb);
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 7037bd5f2b..2432ec3afe 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -4,18 +4,18 @@
#include <vector>
-#include "src/assembler.h"
#include "src/base/overflowing-math.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/register-configuration.h"
#include "src/compiler/linkage.h"
#include "src/compiler/raw-machine-assembler.h"
-#include "src/machine-type.h"
-#include "src/objects-inl.h"
-#include "src/register-configuration.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-linkage.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/graph-and-builders.h"
#include "test/cctest/compiler/value-helper.h"
namespace v8 {
@@ -24,8 +24,8 @@ namespace compiler {
namespace test_run_native_calls {
namespace {
-typedef float float32;
-typedef double float64;
+using float32 = float;
+using float64 = double;
// Picks a representative pair of integers from the given range.
// If there are less than {max_pairs} possible pairs, do them all, otherwise try
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
index 24080bc573..be329e1b00 100644
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
-#include "src/code-stub-assembler.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/macro-assembler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/code-assembler-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-stackcheck.cc b/deps/v8/test/cctest/compiler/test-run-stackcheck.cc
index 0dd28a7419..e5874a65dd 100644
--- a/deps/v8/test/cctest/compiler/test-run-stackcheck.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stackcheck.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
index b0ca000a02..1562befb9d 100644
--- a/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-tail-calls.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/code-stub-assembler.h"
-#include "src/macro-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/macro-assembler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/code-assembler-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
index 5ecc501c2e..e4e355b801 100644
--- a/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
+++ b/deps/v8/test/cctest/compiler/test-run-unwinding-info.cc
@@ -6,9 +6,9 @@
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM) || \
defined(V8_TARGET_ARCH_ARM64)
-#include "src/flags.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/flags/flags.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -27,7 +27,7 @@ TEST(RunUnwindingInfo) {
tester.Call(tester.Val(-1));
- CHECK(tester.function->code()->has_unwinding_info());
+ CHECK(tester.function->code().has_unwinding_info());
}
// TODO(ssanfilippo) Build low-level graph and check that state is correctly
diff --git a/deps/v8/test/cctest/compiler/test-run-variables.cc b/deps/v8/test/cctest/compiler/test-run-variables.cc
index e2539dc16c..0097de584b 100644
--- a/deps/v8/test/cctest/compiler/test-run-variables.cc
+++ b/deps/v8/test/cctest/compiler/test-run-variables.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/compiler/function-tester.h"
namespace v8 {
@@ -61,8 +61,8 @@ static void RunVariableTests(const char* source, const char* tests[]) {
for (int i = 0; tests[i] != nullptr; i += 3) {
SNPrintF(buffer, source, tests[i]);
- PrintF("#%d: %s\n", i / 3, buffer.start());
- FunctionTester T(buffer.start());
+ PrintF("#%d: %s\n", i / 3, buffer.begin());
+ FunctionTester T(buffer.begin());
// Check function with non-falsey parameter.
if (tests[i + 1] != throws) {
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index 34079f1032..4b8f34c9d2 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -11,8 +11,8 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node.h"
-#include "src/isolate.h"
-#include "src/objects.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/gay-fixed.cc b/deps/v8/test/cctest/gay-fixed.cc
index 75c872fd81..5dae8e5072 100644
--- a/deps/v8/test/cctest/gay-fixed.cc
+++ b/deps/v8/test/cctest/gay-fixed.cc
@@ -29,7 +29,7 @@
// have been generated using Gay's dtoa to produce the fixed representation:
// dtoa(v, 3, number_digits, &decimal_point, &sign, nullptr);
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/gay-fixed.h"
diff --git a/deps/v8/test/cctest/gay-fixed.h b/deps/v8/test/cctest/gay-fixed.h
index 5baa6c2b89..3219ed3b73 100644
--- a/deps/v8/test/cctest/gay-fixed.h
+++ b/deps/v8/test/cctest/gay-fixed.h
@@ -28,7 +28,7 @@
#ifndef GAY_FIXED_H_
#define GAY_FIXED_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/gay-precision.cc b/deps/v8/test/cctest/gay-precision.cc
index 0661e92897..34615fbda3 100644
--- a/deps/v8/test/cctest/gay-precision.cc
+++ b/deps/v8/test/cctest/gay-precision.cc
@@ -29,7 +29,7 @@
// have been generated using Gay's dtoa to produce the precision representation:
// dtoa(v, 2, number_digits, &decimal_point, &sign, nullptr);
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/gay-precision.h"
diff --git a/deps/v8/test/cctest/gay-precision.h b/deps/v8/test/cctest/gay-precision.h
index 0ba3462aa6..e3dcac6755 100644
--- a/deps/v8/test/cctest/gay-precision.h
+++ b/deps/v8/test/cctest/gay-precision.h
@@ -28,7 +28,7 @@
#ifndef GAY_PRECISION_H_
#define GAY_PRECISION_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/gay-shortest.cc b/deps/v8/test/cctest/gay-shortest.cc
index b810fd2468..53f7fc2741 100644
--- a/deps/v8/test/cctest/gay-shortest.cc
+++ b/deps/v8/test/cctest/gay-shortest.cc
@@ -29,7 +29,7 @@
// have been generated using Gay's dtoa to produce the shortest representation:
// decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, nullptr);
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/gay-shortest.h"
diff --git a/deps/v8/test/cctest/gay-shortest.h b/deps/v8/test/cctest/gay-shortest.h
index b76476ea52..0d4efbce47 100644
--- a/deps/v8/test/cctest/gay-shortest.h
+++ b/deps/v8/test/cctest/gay-shortest.h
@@ -28,7 +28,7 @@
#ifndef GAY_SHORTEST_H_
#define GAY_SHORTEST_H_
-#include "src/vector.h"
+#include "src/utils/vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 7ace6d4bb2..24298d685c 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -4,11 +4,11 @@
#include "test/cctest/heap/heap-utils.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
-#include "src/isolate.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/heap/heap-utils.h b/deps/v8/test/cctest/heap/heap-utils.h
index dbe8e30a49..dfd4094913 100644
--- a/deps/v8/test/cctest/heap/heap-utils.h
+++ b/deps/v8/test/cctest/heap/heap-utils.h
@@ -5,13 +5,29 @@
#ifndef HEAP_HEAP_UTILS_H_
#define HEAP_HEAP_UTILS_H_
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/heap/heap.h"
namespace v8 {
namespace internal {
namespace heap {
+class TemporaryEmbedderHeapTracerScope {
+ public:
+ TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate,
+ v8::EmbedderHeapTracer* tracer)
+ : isolate_(isolate) {
+ isolate_->SetEmbedderHeapTracer(tracer);
+ }
+
+ ~TemporaryEmbedderHeapTracerScope() {
+ isolate_->SetEmbedderHeapTracer(nullptr);
+ }
+
+ private:
+ v8::Isolate* const isolate_;
+};
+
void SealCurrentObjects(Heap* heap);
int FixedArrayLenFromSize(int size);
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index cf01d9fe9b..f3ae6c125d 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -25,15 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
-#include "src/accessors.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
+#include "src/builtins/accessors.h"
#include "src/heap/heap-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
-#include "src/property.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
@@ -54,12 +54,12 @@ Handle<Object> HeapTester::TestAllocateAfterFailures() {
heap->AllocateRaw(size, AllocationType::kYoung).ToObjectChecked();
// In order to pass heap verification on Isolate teardown, mark the
// allocated area as a filler.
- heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
// Old generation.
heap::SimulateFullSpace(heap->old_space());
obj = heap->AllocateRaw(size, AllocationType::kOld).ToObjectChecked();
- heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
// Large object space.
static const size_t kLargeObjectSpaceFillerLength =
@@ -71,23 +71,23 @@ Handle<Object> HeapTester::TestAllocateAfterFailures() {
while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
obj = heap->AllocateRaw(kLargeObjectSpaceFillerSize, AllocationType::kOld)
.ToObjectChecked();
- heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
}
obj = heap->AllocateRaw(kLargeObjectSpaceFillerSize, AllocationType::kOld)
.ToObjectChecked();
- heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
// Map space.
heap::SimulateFullSpace(heap->map_space());
obj = heap->AllocateRaw(Map::kSize, AllocationType::kMap).ToObjectChecked();
- heap->CreateFillerObjectAt(obj->address(), Map::kSize,
+ heap->CreateFillerObjectAt(obj.address(), Map::kSize,
ClearRecordedSlots::kNo);
// Code space.
heap::SimulateFullSpace(heap->code_space());
- size = CcTest::i_isolate()->builtins()->builtin(Builtins::kIllegal)->Size();
+ size = CcTest::i_isolate()->builtins()->builtin(Builtins::kIllegal).Size();
obj = heap->AllocateRaw(size, AllocationType::kCode).ToObjectChecked();
- heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
return CcTest::i_isolate()->factory()->true_value();
}
@@ -135,7 +135,7 @@ TEST(StressJS) {
factory->function_string(), isolate->sloppy_function_map(),
Builtins::kEmptyFunction);
Handle<JSFunction> function = factory->NewFunction(args);
- CHECK(!function->shared()->construct_as_builtin());
+ CHECK(!function->shared().construct_as_builtin());
// Force the creation of an initial map.
factory->NewJSObject(function);
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 575e6946c0..b4122c9619 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/heap-inl.h"
#include "src/heap/spaces.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index 6699e48af5..114a4639bd 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/mark-compact.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
diff --git a/deps/v8/test/cctest/heap/test-concurrent-marking.cc b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
index 57a5842850..3a67954528 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-marking.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
@@ -4,7 +4,7 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/heap/concurrent-marking.h"
#include "src/heap/heap-inl.h"
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index ace016dbd0..2d0833e1a3 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -6,10 +6,10 @@
#include <vector>
#include "include/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/heap/heap-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/module.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/script.h"
#include "src/objects/shared-function-info.h"
#include "test/cctest/cctest.h"
@@ -36,7 +36,7 @@ v8::Local<v8::Object> ConstructTraceableJSApiObject(
instance->SetAlignedPointerInInternalField(1, second_field);
CHECK(!instance.IsEmpty());
i::Handle<i::JSReceiver> js_obj = v8::Utils::OpenHandle(*instance);
- CHECK_EQ(i::JS_API_OBJECT_TYPE, js_obj->map()->instance_type());
+ CHECK_EQ(i::JS_API_OBJECT_TYPE, js_obj->map().instance_type());
return scope.Escape(instance);
}
@@ -69,7 +69,7 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
bool IsTracingDone() final { return to_register_with_v8_.empty(); }
- void TracePrologue() final {
+ void TracePrologue(EmbedderHeapTracer::TraceFlags) final {
if (prologue_behavior_ == TracePrologueBehavior::kCallV8WriteBarrier) {
auto local = array_.Get(isolate());
local->Set(local->CreationContext(), 0, v8::Object::New(isolate()))
@@ -103,22 +103,6 @@ class TestEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
v8::Global<v8::Array> array_;
};
-class TemporaryEmbedderHeapTracerScope {
- public:
- TemporaryEmbedderHeapTracerScope(v8::Isolate* isolate,
- EmbedderHeapTracer* tracer)
- : isolate_(isolate) {
- isolate_->SetEmbedderHeapTracer(tracer);
- }
-
- ~TemporaryEmbedderHeapTracerScope() {
- isolate_->SetEmbedderHeapTracer(nullptr);
- }
-
- private:
- v8::Isolate* const isolate_;
-};
-
} // namespace
TEST(V8RegisteringEmbedderReference) {
@@ -128,7 +112,7 @@ TEST(V8RegisteringEmbedderReference) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -148,7 +132,7 @@ TEST(EmbedderRegisteringV8Reference) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -181,7 +165,7 @@ TEST(TracingInRevivedSubgraph) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -209,7 +193,7 @@ TEST(TracingInEphemerons) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -227,7 +211,7 @@ TEST(TracingInEphemerons) {
Handle<JSObject> js_key =
handle(JSObject::cast(*v8::Utils::OpenHandle(*key)), i_isolate);
Handle<JSReceiver> js_api_object = v8::Utils::OpenHandle(*api_object);
- int32_t hash = js_key->GetOrCreateHash(i_isolate)->value();
+ int32_t hash = js_key->GetOrCreateHash(i_isolate).value();
JSWeakCollection::Set(weak_map, js_key, js_api_object, hash);
}
CcTest::CollectGarbage(i::OLD_SPACE);
@@ -240,7 +224,7 @@ TEST(FinalizeTracingIsNoopWhenNotMarking) {
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
// Finalize a potentially running garbage collection.
i_isolate->heap()->CollectGarbage(OLD_SPACE,
@@ -259,7 +243,7 @@ TEST(FinalizeTracingWhenMarking) {
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
// Finalize a potentially running garbage collection.
i_isolate->heap()->CollectGarbage(OLD_SPACE,
@@ -284,7 +268,7 @@ TEST(GarbageCollectionForTesting) {
v8::Isolate* isolate = CcTest::isolate();
Isolate* i_isolate = CcTest::i_isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
int saved_gc_counter = i_isolate->heap()->gc_count();
tracer.GarbageCollectionForTesting(EmbedderHeapTracer::kUnknown);
@@ -414,7 +398,7 @@ TEST(TracedGlobalToUnmodifiedJSObjectSurvivesScavengeWhenExcludedFromRoots) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
tracer.ConsiderTracedGlobalAsRoot(false);
TracedGlobalTest(
CcTest::isolate(), ConstructJSObject,
@@ -427,7 +411,7 @@ TEST(TracedGlobalToUnmodifiedJSApiObjectSurvivesScavengePerDefault) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
tracer.ConsiderTracedGlobalAsRoot(true);
TracedGlobalTest(
CcTest::isolate(), ConstructJSApiObject,
@@ -440,7 +424,7 @@ TEST(TracedGlobalToUnmodifiedJSApiObjectDiesOnScavengeWhenExcludedFromRoots) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
tracer.ConsiderTracedGlobalAsRoot(false);
TracedGlobalTest(
CcTest::isolate(), ConstructJSApiObject,
@@ -454,7 +438,7 @@ TEST(TracedGlobalWrapperClassId) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::TracedGlobal<v8::Object> traced;
ConstructJSObject(isolate, isolate->GetCurrentContext(), &traced);
@@ -489,7 +473,7 @@ TEST(TracedGlobalIteration) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::TracedGlobal<v8::Object> traced;
ConstructJSObject(isolate, isolate->GetCurrentContext(), &traced);
@@ -522,7 +506,7 @@ TEST(TracedGlobalSetFinalizationCallbackScavenge) {
v8::HandleScope scope(isolate);
TestEmbedderHeapTracer tracer;
tracer.ConsiderTracedGlobalAsRoot(false);
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::TracedGlobal<v8::Object> traced;
ConstructJSApiObject(isolate, isolate->GetCurrentContext(), &traced);
@@ -544,7 +528,7 @@ TEST(TracedGlobalSetFinalizationCallbackMarkSweep) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
TestEmbedderHeapTracer tracer;
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
v8::TracedGlobal<v8::Object> traced;
ConstructJSApiObject(isolate, isolate->GetCurrentContext(), &traced);
@@ -574,7 +558,7 @@ TEST(TracePrologueCallingIntoV8WriteBarrier) {
}
TestEmbedderHeapTracer tracer(TracePrologueBehavior::kCallV8WriteBarrier,
std::move(global));
- TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
SimulateIncrementalMarking(CcTest::i_isolate()->heap());
}
diff --git a/deps/v8/test/cctest/heap/test-external-string-tracker.cc b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
index bcc00764de..7eb03e10e2 100644
--- a/deps/v8/test/cctest/heap/test-external-string-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-external-string-tracker.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/api.h"
+#include "src/api/api-inl.h"
+#include "src/api/api.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/spaces.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 50ac47a7ab..445853bf9c 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -28,16 +28,15 @@
#include <stdlib.h>
#include <utility>
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
-#include "src/compilation-cache.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/elements.h"
-#include "src/execution.h"
-#include "src/field-type.h"
-#include "src/global-handles.h"
-#include "src/hash-seed-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/execution.h"
+#include "src/handles/global-handles.h"
+#include "src/heap/combined-heap.h"
#include "src/heap/factory.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
@@ -46,18 +45,20 @@
#include "src/heap/memory-reducer.h"
#include "src/heap/remembered-set.h"
#include "src/ic/ic.h"
-#include "src/macro-assembler-inl.h"
-#include "src/objects-inl.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/elements.h"
+#include "src/objects/field-type.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/managed.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
-#include "src/ostreams.h"
+#include "src/objects/transitions.h"
#include "src/regexp/jsregexp.h"
#include "src/snapshot/snapshot.h"
-#include "src/transitions.h"
+#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
@@ -73,13 +74,11 @@ static const int kPretenureCreationCount =
AllocationSite::kPretenureMinimumCreated + 1;
static void CheckMap(Map map, int type, int instance_size) {
- CHECK(map->IsHeapObject());
-#ifdef DEBUG
- CHECK(CcTest::heap()->Contains(map));
-#endif
- CHECK_EQ(ReadOnlyRoots(CcTest::heap()).meta_map(), map->map());
- CHECK_EQ(type, map->instance_type());
- CHECK_EQ(instance_size, map->instance_size());
+ CHECK(map.IsHeapObject());
+ DCHECK(IsValidHeapObject(CcTest::heap(), map));
+ CHECK_EQ(ReadOnlyRoots(CcTest::heap()).meta_map(), map.map());
+ CHECK_EQ(type, map.instance_type());
+ CHECK_EQ(instance_size, map.instance_size());
}
@@ -103,7 +102,7 @@ static void VerifyStoredPrototypeMap(Isolate* isolate,
Handle<JSFunction> fun(
JSFunction::cast(context->get(stored_ctor_context_index)), isolate);
- Handle<JSObject> proto(JSObject::cast(fun->initial_map()->prototype()),
+ Handle<JSObject> proto(JSObject::cast(fun->initial_map().prototype()),
isolate);
Handle<Map> that_map(proto->map(), isolate);
@@ -150,16 +149,16 @@ TEST(InitialObjects) {
}
static void CheckOddball(Isolate* isolate, Object obj, const char* string) {
- CHECK(obj->IsOddball());
+ CHECK(obj.IsOddball());
Handle<Object> handle(obj, isolate);
Object print_string = *Object::ToString(isolate, handle).ToHandleChecked();
- CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
+ CHECK(String::cast(print_string).IsOneByteEqualTo(CStrVector(string)));
}
static void CheckSmi(Isolate* isolate, int value, const char* string) {
Handle<Object> handle(Smi::FromInt(value), isolate);
Object print_string = *Object::ToString(isolate, handle).ToHandleChecked();
- CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
+ CHECK(String::cast(print_string).IsOneByteEqualTo(CStrVector(string)));
}
@@ -168,11 +167,11 @@ static void CheckNumber(Isolate* isolate, double value, const char* string) {
CHECK(number->IsNumber());
Handle<Object> print_string =
Object::ToString(isolate, number).ToHandleChecked();
- CHECK(String::cast(*print_string)->IsUtf8EqualTo(CStrVector(string)));
+ CHECK(String::cast(*print_string).IsOneByteEqualTo(CStrVector(string)));
}
void CheckEmbeddedObjectsAreEqual(Handle<Code> lhs, Handle<Code> rhs) {
- int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT);
RelocIterator lhs_it(*lhs, mode_mask);
RelocIterator rhs_it(*rhs, mode_mask);
while (!lhs_it.done() && !rhs_it.done()) {
@@ -201,8 +200,7 @@ HEAP_TEST(TestNewSpaceRefsInCopiedCode) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
Handle<Code> copy;
{
@@ -225,23 +223,21 @@ static void CheckFindCodeObject(Isolate* isolate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
CHECK(code->IsCode());
HeapObject obj = HeapObject::cast(*code);
- Address obj_addr = obj->address();
+ Address obj_addr = obj.address();
- for (int i = 0; i < obj->Size(); i += kTaggedSize) {
+ for (int i = 0; i < obj.Size(); i += kTaggedSize) {
Object found = isolate->FindCodeObject(obj_addr + i);
CHECK_EQ(*code, found);
}
- Handle<Code> copy =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> copy = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
HeapObject obj_copy = HeapObject::cast(*copy);
Object not_right =
- isolate->FindCodeObject(obj_copy->address() + obj_copy->Size() / 2);
+ isolate->FindCodeObject(obj_copy.address() + obj_copy.Size() / 2);
CHECK(not_right != *code);
}
@@ -317,7 +313,7 @@ TEST(HeapObjects) {
CHECK_EQ(10, s->length());
Handle<String> object_string = Handle<String>::cast(factory->Object_string());
- Handle<JSGlobalObject> global(CcTest::i_isolate()->context()->global_object(),
+ Handle<JSGlobalObject> global(CcTest::i_isolate()->context().global_object(),
isolate);
CHECK(Just(true) == JSReceiver::HasOwnProperty(global, object_string));
@@ -343,9 +339,9 @@ TEST(Tagging) {
CcTest::InitializeVM();
int request = 24;
CHECK_EQ(request, static_cast<int>(OBJECT_POINTER_ALIGN(request)));
- CHECK(Smi::FromInt(42)->IsSmi());
- CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi());
- CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi());
+ CHECK(Smi::FromInt(42).IsSmi());
+ CHECK(Smi::FromInt(Smi::kMinValue).IsSmi());
+ CHECK(Smi::FromInt(Smi::kMaxValue).IsSmi());
}
@@ -358,7 +354,7 @@ TEST(GarbageCollection) {
// Check GC.
CcTest::CollectGarbage(NEW_SPACE);
- Handle<JSGlobalObject> global(CcTest::i_isolate()->context()->global_object(),
+ Handle<JSGlobalObject> global(CcTest::i_isolate()->context().global_object(),
isolate);
Handle<String> name = factory->InternalizeUtf8String("theFunction");
Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
@@ -417,7 +413,7 @@ static void VerifyStringAllocation(Isolate* isolate, const char* string) {
HandleScope scope(isolate);
Handle<String> s = isolate->factory()->NewStringFromUtf8(
CStrVector(string)).ToHandleChecked();
- CHECK_EQ(StrLength(string), s->length());
+ CHECK_EQ(strlen(string), s->length());
for (int index = 0; index < s->length(); index++) {
CHECK_EQ(static_cast<uint16_t>(string[index]), s->Get(index));
}
@@ -444,7 +440,7 @@ TEST(LocalHandles) {
v8::HandleScope scope(CcTest::isolate());
const char* name = "Kasper the spunky";
Handle<String> string = factory->NewStringFromAsciiChecked(name);
- CHECK_EQ(StrLength(name), string->length());
+ CHECK_EQ(strlen(name), string->length());
}
@@ -474,10 +470,10 @@ TEST(GlobalHandles) {
// after gc, it should survive
CcTest::CollectGarbage(NEW_SPACE);
- CHECK((*h1)->IsString());
- CHECK((*h2)->IsHeapNumber());
- CHECK((*h3)->IsString());
- CHECK((*h4)->IsHeapNumber());
+ CHECK((*h1).IsString());
+ CHECK((*h2).IsHeapNumber());
+ CHECK((*h3).IsString());
+ CHECK((*h4).IsHeapNumber());
CHECK_EQ(*h3, *h1);
GlobalHandles::Destroy(h1.location());
@@ -500,44 +496,6 @@ static void TestWeakGlobalHandleCallback(
p->first->Reset();
}
-
-TEST(WeakGlobalHandlesScavenge) {
- FLAG_stress_compaction = false;
- FLAG_stress_incremental_marking = false;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- GlobalHandles* global_handles = isolate->global_handles();
-
- WeakPointerCleared = false;
-
- Handle<Object> h1;
- Handle<Object> h2;
-
- {
- HandleScope scope(isolate);
-
- Handle<Object> i = factory->NewStringFromStaticChars("fisk");
- Handle<Object> u = factory->NewNumber(1.12344);
-
- h1 = global_handles->Create(*i);
- h2 = global_handles->Create(*u);
- }
-
- std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
- GlobalHandles::MakeWeak(
- h2.location(), reinterpret_cast<void*>(&handle_and_id),
- &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
-
- // Scavenge treats weak pointers as normal roots.
- CcTest::CollectGarbage(NEW_SPACE);
- CHECK((*h1)->IsString());
- CHECK((*h2)->IsHeapNumber());
- CHECK(!WeakPointerCleared);
- GlobalHandles::Destroy(h1.location());
- GlobalHandles::Destroy(h2.location());
-}
-
TEST(WeakGlobalUnmodifiedApiHandlesScavenge) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -571,89 +529,11 @@ TEST(WeakGlobalUnmodifiedApiHandlesScavenge) {
&TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
CcTest::CollectGarbage(NEW_SPACE);
- CHECK((*h1)->IsHeapNumber());
+ CHECK((*h1).IsHeapNumber());
CHECK(WeakPointerCleared);
GlobalHandles::Destroy(h1.location());
}
-TEST(WeakGlobalApiHandleModifiedMapScavenge) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- LocalContext context;
- GlobalHandles* global_handles = isolate->global_handles();
-
- WeakPointerCleared = false;
-
- Handle<Object> h1;
-
- {
- HandleScope scope(isolate);
-
- // Create an API object which does not have the same map as constructor.
- auto function_template = FunctionTemplate::New(context->GetIsolate());
- auto instance_t = function_template->InstanceTemplate();
- instance_t->Set(v8::String::NewFromUtf8(context->GetIsolate(), "a",
- NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Number::New(context->GetIsolate(), 10));
- auto function =
- function_template->GetFunction(context.local()).ToLocalChecked();
- auto i = function->NewInstance(context.local()).ToLocalChecked();
-
- h1 = global_handles->Create(*(reinterpret_cast<internal::Address*>(*i)));
- }
-
- std::pair<Handle<Object>*, int> handle_and_id(&h1, 1234);
- GlobalHandles::MakeWeak(
- h1.location(), reinterpret_cast<void*>(&handle_and_id),
- &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
-
- CcTest::CollectGarbage(NEW_SPACE);
- CHECK(!WeakPointerCleared);
- GlobalHandles::Destroy(h1.location());
-}
-
-TEST(WeakGlobalApiHandleWithElementsScavenge) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- LocalContext context;
- GlobalHandles* global_handles = isolate->global_handles();
-
- WeakPointerCleared = false;
-
- Handle<Object> h1;
-
- {
- HandleScope scope(isolate);
-
- // Create an API object which has elements.
- auto function_template = FunctionTemplate::New(context->GetIsolate());
- auto instance_t = function_template->InstanceTemplate();
- instance_t->Set(v8::String::NewFromUtf8(context->GetIsolate(), "1",
- NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Number::New(context->GetIsolate(), 10));
- instance_t->Set(v8::String::NewFromUtf8(context->GetIsolate(), "2",
- NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Number::New(context->GetIsolate(), 10));
- auto function =
- function_template->GetFunction(context.local()).ToLocalChecked();
- auto i = function->NewInstance(context.local()).ToLocalChecked();
-
- h1 = global_handles->Create(*(reinterpret_cast<internal::Address*>(*i)));
- }
-
- std::pair<Handle<Object>*, int> handle_and_id(&h1, 1234);
- GlobalHandles::MakeWeak(
- h1.location(), reinterpret_cast<void*>(&handle_and_id),
- &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
-
- CcTest::CollectGarbage(NEW_SPACE);
- CHECK(!WeakPointerCleared);
- GlobalHandles::Destroy(h1.location());
-}
-
TEST(WeakGlobalHandlesMark) {
FLAG_stress_incremental_marking = false;
CcTest::InitializeVM();
@@ -688,7 +568,7 @@ TEST(WeakGlobalHandlesMark) {
// Incremental marking potentially marked handles before they turned weak.
CcTest::CollectAllGarbage();
- CHECK((*h1)->IsString());
+ CHECK((*h1).IsString());
CHECK(WeakPointerCleared);
GlobalHandles::Destroy(h1.location());
}
@@ -703,9 +583,7 @@ TEST(DeleteWeakGlobalHandle) {
GlobalHandles* global_handles = isolate->global_handles();
WeakPointerCleared = false;
-
Handle<Object> h;
-
{
HandleScope scope(isolate);
@@ -717,15 +595,8 @@ TEST(DeleteWeakGlobalHandle) {
GlobalHandles::MakeWeak(h.location(), reinterpret_cast<void*>(&handle_and_id),
&TestWeakGlobalHandleCallback,
v8::WeakCallbackType::kParameter);
-
- // Scanvenge does not recognize weak reference.
- CcTest::CollectGarbage(NEW_SPACE);
-
CHECK(!WeakPointerCleared);
-
- // Mark-compact treats weak reference properly.
CcTest::CollectGarbage(OLD_SPACE);
-
CHECK(WeakPointerCleared);
}
@@ -889,10 +760,10 @@ static void CheckInternalizedStrings(const char** strings) {
CHECK(a->IsInternalizedString());
Handle<String> b = factory->InternalizeUtf8String(string);
CHECK_EQ(*b, *a);
- CHECK(b->IsUtf8EqualTo(CStrVector(string)));
+ CHECK(b->IsOneByteEqualTo(CStrVector(string)));
b = isolate->factory()->InternalizeUtf8String(CStrVector(string));
CHECK_EQ(*b, *a);
- CHECK(b->IsUtf8EqualTo(CStrVector(string)));
+ CHECK(b->IsOneByteEqualTo(CStrVector(string)));
}
}
@@ -1068,7 +939,7 @@ TEST(JSArray) {
JSArray::SetLength(array, static_cast<uint32_t>(Smi::kMaxValue) + 1);
uint32_t int_length = 0;
- CHECK(array->length()->ToArrayIndex(&int_length));
+ CHECK(array->length().ToArrayIndex(&int_length));
CHECK_EQ(static_cast<uint32_t>(Smi::kMaxValue) + 1, int_length);
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
@@ -1076,7 +947,7 @@ TEST(JSArray) {
Object::SetElement(isolate, array, int_length, name, ShouldThrow::kDontThrow)
.Check();
uint32_t new_int_length = 0;
- CHECK(array->length()->ToArrayIndex(&new_int_length));
+ CHECK(array->length().ToArrayIndex(&new_int_length));
CHECK_EQ(static_cast<double>(int_length), new_int_length - 1);
element = Object::GetElement(isolate, array, int_length).ToHandleChecked();
CHECK_EQ(*element, *name);
@@ -1176,7 +1047,7 @@ TEST(StringAllocation) {
Vector<const char>(non_one_byte, 3 * length));
CHECK_EQ(length, non_one_byte_sym->length());
Handle<String> one_byte_sym =
- factory->InternalizeOneByteString(OneByteVector(one_byte, length));
+ factory->InternalizeString(OneByteVector(one_byte, length));
CHECK_EQ(length, one_byte_sym->length());
Handle<String> non_one_byte_str =
factory->NewStringFromUtf8(Vector<const char>(non_one_byte, 3 * length))
@@ -1242,7 +1113,7 @@ TEST(Iteration) {
// Add a Map object to look for.
objs[next_objs_index++] =
- Handle<Map>(HeapObject::cast(*objs[0])->map(), isolate);
+ Handle<Map>(HeapObject::cast(*objs[0]).map(), isolate);
CHECK_EQ(objs_count, next_objs_index);
CHECK_EQ(objs_count, ObjectsFoundInHeap(CcTest::heap(), objs, objs_count));
@@ -1286,12 +1157,12 @@ TEST(TestBytecodeFlushing) {
.ToHandleChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
- CHECK(function->shared()->is_compiled());
+ CHECK(function->shared().is_compiled());
// The code will survive at least two GCs.
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
- CHECK(function->shared()->is_compiled());
+ CHECK(function->shared().is_compiled());
// Simulate several GCs that use full marking.
const int kAgingThreshold = 6;
@@ -1300,11 +1171,11 @@ TEST(TestBytecodeFlushing) {
}
// foo should no longer be in the compilation cache
- CHECK(!function->shared()->is_compiled());
+ CHECK(!function->shared().is_compiled());
CHECK(!function->is_compiled());
// Call foo to get it recompiled.
CompileRun("foo()");
- CHECK(function->shared()->is_compiled());
+ CHECK(function->shared().is_compiled());
CHECK(function->is_compiled());
}
}
@@ -1344,12 +1215,12 @@ TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
.ToHandleChecked();
CHECK(func_value->IsJSFunction());
Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
- CHECK(function->shared()->is_compiled());
+ CHECK(function->shared().is_compiled());
// The code will survive at least two GCs.
CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage();
- CHECK(function->shared()->is_compiled());
+ CHECK(function->shared().is_compiled());
// Simulate several GCs that use incremental marking.
const int kAgingThreshold = 6;
@@ -1357,7 +1228,7 @@ TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::CollectAllGarbage();
}
- CHECK(!function->shared()->is_compiled());
+ CHECK(!function->shared().is_compiled());
CHECK(!function->is_compiled());
// This compile will compile the function again.
@@ -1370,7 +1241,7 @@ TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
// the loop breaks once the function is enqueued as a candidate.
for (int i = 0; i < kAgingThreshold; i++) {
heap::SimulateIncrementalMarking(CcTest::heap());
- if (function->shared()->GetBytecodeArray()->IsOld()) break;
+ if (function->shared().GetBytecodeArray().IsOld()) break;
CcTest::CollectAllGarbage();
}
@@ -1378,12 +1249,14 @@ TEST(TestOptimizeAfterBytecodeFlushingCandidate) {
// the function is enqueued as a candidate.
{
v8::HandleScope scope(CcTest::isolate());
- CompileRun("%OptimizeFunctionOnNextCall(foo); foo();");
+ CompileRun(
+ "%PrepareFunctionForOptimization(foo);"
+ "%OptimizeFunctionOnNextCall(foo); foo();");
}
// Simulate one final GC and make sure the candidate wasn't flushed.
CcTest::CollectAllGarbage();
- CHECK(function->shared()->is_compiled());
+ CHECK(function->shared().is_compiled());
CHECK(function->is_compiled());
}
@@ -1405,7 +1278,8 @@ TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
"function make_closure(x) {"
" return function() { return x + 3 };"
"}"
- "var f = make_closure(5); f();"
+ "var f = make_closure(5);"
+ "%PrepareFunctionForOptimization(f); f();"
"var g = make_closure(5);");
// Check f is compiled.
@@ -1486,7 +1360,7 @@ TEST(CompilationCacheCachingBehavior) {
CHECK(shared->HasBytecodeArray());
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
- shared->GetBytecodeArray()->MakeOlder();
+ shared->GetBytecodeArray().MakeOlder();
}
}
@@ -1509,11 +1383,12 @@ static void OptimizeEmptyFunction(const char* name) {
EmbeddedVector<char, 256> source;
SNPrintF(source,
"function %s() { return 0; }"
+ "%%PrepareFunctionForOptimization(%s);"
"%s(); %s();"
"%%OptimizeFunctionOnNextCall(%s);"
"%s();",
- name, name, name, name, name);
- CompileRun(source.start());
+ name, name, name, name, name, name);
+ CompileRun(source.begin());
}
@@ -1521,9 +1396,9 @@ static void OptimizeEmptyFunction(const char* name) {
int CountNativeContexts() {
int count = 0;
Object object = CcTest::heap()->native_contexts_list();
- while (!object->IsUndefined(CcTest::i_isolate())) {
+ while (!object.IsUndefined(CcTest::i_isolate())) {
count++;
- object = Context::cast(object)->next_context_link();
+ object = Context::cast(object).next_context_link();
}
return count;
}
@@ -1606,7 +1481,7 @@ TEST(TestInternalWeakLists) {
for (int i = 0; i < kNumTestContexts; i++) {
// TODO(dcarney): is there a better way to do this?
i::Address* unsafe = reinterpret_cast<i::Address*>(*ctx[i]);
- *unsafe = ReadOnlyRoots(CcTest::heap()).undefined_value()->ptr();
+ *unsafe = ReadOnlyRoots(CcTest::heap()).undefined_value().ptr();
ctx[i].Clear();
// Scavenge treats these references as strong.
@@ -1762,7 +1637,7 @@ static HeapObject NewSpaceAllocateAligned(int size,
heap->new_space()->AllocateRawAligned(size, alignment);
HeapObject obj;
allocation.To(&obj);
- heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
return obj;
}
@@ -1791,7 +1666,7 @@ TEST(TestAlignedAllocation) {
// aligned address.
start = AlignNewSpace(kDoubleAligned, 0);
obj = NewSpaceAllocateAligned(kTaggedSize, kDoubleAligned);
- CHECK(IsAligned(obj->address(), kDoubleAlignment));
+ CHECK(IsAligned(obj.address(), kDoubleAlignment));
// There is no filler.
CHECK_EQ(kTaggedSize, *top_addr - start);
@@ -1799,23 +1674,23 @@ TEST(TestAlignedAllocation) {
// unaligned address.
start = AlignNewSpace(kDoubleAligned, kTaggedSize);
obj = NewSpaceAllocateAligned(kTaggedSize, kDoubleAligned);
- CHECK(IsAligned(obj->address(), kDoubleAlignment));
+ CHECK(IsAligned(obj.address(), kDoubleAlignment));
// There is a filler object before the object.
filler = HeapObject::FromAddress(start);
- CHECK(obj != filler && filler->IsFiller() && filler->Size() == kTaggedSize);
+ CHECK(obj != filler && filler.IsFiller() && filler.Size() == kTaggedSize);
CHECK_EQ(kTaggedSize + double_misalignment, *top_addr - start);
// Similarly for kDoubleUnaligned.
start = AlignNewSpace(kDoubleUnaligned, 0);
obj = NewSpaceAllocateAligned(kTaggedSize, kDoubleUnaligned);
- CHECK(IsAligned(obj->address() + kTaggedSize, kDoubleAlignment));
+ CHECK(IsAligned(obj.address() + kTaggedSize, kDoubleAlignment));
CHECK_EQ(kTaggedSize, *top_addr - start);
start = AlignNewSpace(kDoubleUnaligned, kTaggedSize);
obj = NewSpaceAllocateAligned(kTaggedSize, kDoubleUnaligned);
- CHECK(IsAligned(obj->address() + kTaggedSize, kDoubleAlignment));
+ CHECK(IsAligned(obj.address() + kTaggedSize, kDoubleAlignment));
// There is a filler object before the object.
filler = HeapObject::FromAddress(start);
- CHECK(obj != filler && filler->IsFiller() && filler->Size() == kTaggedSize);
+ CHECK(obj != filler && filler.IsFiller() && filler.Size() == kTaggedSize);
CHECK_EQ(kTaggedSize + double_misalignment, *top_addr - start);
}
}
@@ -1827,7 +1702,7 @@ static HeapObject OldSpaceAllocateAligned(int size,
heap->old_space()->AllocateRawAligned(size, alignment);
HeapObject obj;
allocation.To(&obj);
- heap->CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(obj.address(), size, ClearRecordedSlots::kNo);
return obj;
}
@@ -1856,7 +1731,7 @@ TEST(TestAlignedOverAllocation) {
// Allocate a dummy object to properly set up the linear allocation info.
AllocationResult dummy = heap->old_space()->AllocateRawUnaligned(kTaggedSize);
CHECK(!dummy.IsRetry());
- heap->CreateFillerObjectAt(dummy.ToObjectChecked()->address(), kTaggedSize,
+ heap->CreateFillerObjectAt(dummy.ToObjectChecked().address(), kTaggedSize,
ClearRecordedSlots::kNo);
// Double misalignment is 4 on 32-bit platforms or when pointer compression
@@ -1869,28 +1744,28 @@ TEST(TestAlignedOverAllocation) {
start = AlignOldSpace(kDoubleAligned, 0);
obj = OldSpaceAllocateAligned(kTaggedSize, kDoubleAligned);
// The object is aligned.
- CHECK(IsAligned(obj->address(), kDoubleAlignment));
+ CHECK(IsAligned(obj.address(), kDoubleAlignment));
// Try the opposite alignment case.
start = AlignOldSpace(kDoubleAligned, kTaggedSize);
obj = OldSpaceAllocateAligned(kTaggedSize, kDoubleAligned);
- CHECK(IsAligned(obj->address(), kDoubleAlignment));
+ CHECK(IsAligned(obj.address(), kDoubleAlignment));
filler = HeapObject::FromAddress(start);
CHECK(obj != filler);
- CHECK(filler->IsFiller());
- CHECK_EQ(kTaggedSize, filler->Size());
- CHECK(obj != filler && filler->IsFiller() && filler->Size() == kTaggedSize);
+ CHECK(filler.IsFiller());
+ CHECK_EQ(kTaggedSize, filler.Size());
+ CHECK(obj != filler && filler.IsFiller() && filler.Size() == kTaggedSize);
// Similarly for kDoubleUnaligned.
start = AlignOldSpace(kDoubleUnaligned, 0);
obj = OldSpaceAllocateAligned(kTaggedSize, kDoubleUnaligned);
// The object is aligned.
- CHECK(IsAligned(obj->address() + kTaggedSize, kDoubleAlignment));
+ CHECK(IsAligned(obj.address() + kTaggedSize, kDoubleAlignment));
// Try the opposite alignment case.
start = AlignOldSpace(kDoubleUnaligned, kTaggedSize);
obj = OldSpaceAllocateAligned(kTaggedSize, kDoubleUnaligned);
- CHECK(IsAligned(obj->address() + kTaggedSize, kDoubleAlignment));
+ CHECK(IsAligned(obj.address() + kTaggedSize, kDoubleAlignment));
filler = HeapObject::FromAddress(start);
- CHECK(obj != filler && filler->IsFiller() && filler->Size() == kTaggedSize);
+ CHECK(obj != filler && filler.IsFiller() && filler.Size() == kTaggedSize);
}
}
@@ -1911,7 +1786,7 @@ TEST(HeapNumberAlignment) {
Handle<Object> number_new = factory->NewNumber(1.000123);
CHECK(number_new->IsHeapNumber());
CHECK(Heap::InYoungGeneration(*number_new));
- CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_new)->address(),
+ CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_new).address(),
required_alignment));
AlignOldSpace(required_alignment, offset);
@@ -1919,7 +1794,7 @@ TEST(HeapNumberAlignment) {
factory->NewNumber(1.000321, AllocationType::kOld);
CHECK(number_old->IsHeapNumber());
CHECK(heap->InOldSpace(*number_old));
- CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_old)->address(),
+ CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_old).address(),
required_alignment));
}
}
@@ -1941,7 +1816,7 @@ TEST(MutableHeapNumberAlignment) {
Handle<Object> number_new = factory->NewMutableHeapNumber(1.000123);
CHECK(number_new->IsMutableHeapNumber());
CHECK(Heap::InYoungGeneration(*number_new));
- CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_new)->address(),
+ CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_new).address(),
required_alignment));
AlignOldSpace(required_alignment, offset);
@@ -1949,7 +1824,7 @@ TEST(MutableHeapNumberAlignment) {
factory->NewMutableHeapNumber(1.000321, AllocationType::kOld);
CHECK(number_old->IsMutableHeapNumber());
CHECK(heap->InOldSpace(*number_old));
- CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_old)->address(),
+ CHECK_EQ(0, Heap::GetFillToAlign(HeapObject::cast(*number_old).address(),
required_alignment));
}
}
@@ -1961,8 +1836,8 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
intptr_t size_of_objects_2 = 0;
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (!obj->IsFreeSpace()) {
- size_of_objects_2 += obj->Size();
+ if (!obj.IsFreeSpace()) {
+ size_of_objects_2 += obj.Size();
}
}
// Delta must be within 5% of the larger result.
@@ -2076,7 +1951,7 @@ static int NumberOfGlobalObjects() {
HeapIterator iterator(CcTest::heap());
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (obj->IsJSGlobalObject()) count++;
+ if (obj.IsJSGlobalObject()) count++;
}
return count;
}
@@ -2111,6 +1986,7 @@ TEST(LeakNativeContextViaMap) {
CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust());
v8::Local<v8::Value> res = CompileRun(
"function f() { return o.x; }"
+ "%PrepareFunctionForOptimization(f);"
"for (var i = 0; i < 10; ++i) f();"
"%OptimizeFunctionOnNextCall(f);"
"f();");
@@ -2160,6 +2036,7 @@ TEST(LeakNativeContextViaFunction) {
CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust());
v8::Local<v8::Value> res = CompileRun(
"function f(x) { return x(); }"
+ "%PrepareFunctionForOptimization(f);"
"for (var i = 0; i < 10; ++i) f(o);"
"%OptimizeFunctionOnNextCall(f);"
"f(o);");
@@ -2207,6 +2084,7 @@ TEST(LeakNativeContextViaMapKeyed) {
CHECK(ctx2->Global()->Set(ctx2, v8_str("o"), v).FromJust());
v8::Local<v8::Value> res = CompileRun(
"function f() { return o[0]; }"
+ "%PrepareFunctionForOptimization(f);"
"for (var i = 0; i < 10; ++i) f();"
"%OptimizeFunctionOnNextCall(f);"
"f();");
@@ -2258,6 +2136,7 @@ TEST(LeakNativeContextViaMapProto) {
" p.__proto__ = o;"
" return p.x;"
"}"
+ "%PrepareFunctionForOptimization(f);"
"for (var i = 0; i < 10; ++i) f();"
"%OptimizeFunctionOnNextCall(f);"
"f();");
@@ -2299,6 +2178,7 @@ TEST(InstanceOfStubWriteBarrier) {
"function mkbar () { return new (new Function(\"\")) (); }"
"function f (x) { return (x instanceof foo); }"
"function g () { f(mkbar()); }"
+ "%PrepareFunctionForOptimization(f);"
"f(new foo()); f(new foo());"
"%OptimizeFunctionOnNextCall(f);"
"f(new foo()); g();");
@@ -2395,7 +2275,7 @@ HEAP_TEST(Regress845060) {
// Run the test (which allocates results) until the original string was
// promoted to old space. Unmapping of from_space causes accesses to any
// stale raw pointers to crash.
- CompileRun("while (%InNewSpace(str)) { str.split(''); }");
+ CompileRun("while (%InYoungGeneration(str)) { str.split(''); }");
CHECK(!Heap::InYoungGeneration(*v8::Utils::OpenHandle(*str)));
}
@@ -2452,6 +2332,7 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
" }"
"}"
"function f(x) { return new c(x); };"
+ "%PrepareFunctionForOptimization(f);"
"f(1); f(2); f(3);"
"%OptimizeFunctionOnNextCall(f);"
"f(4);");
@@ -2494,13 +2375,14 @@ TEST(OptimizedPretenuringAllocationFolding) {
" }"
" return elements[number_elements-1]"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();",
kPretenureCreationCount);
- v8::Local<v8::Value> res = CompileRun(source.start());
+ v8::Local<v8::Value> res = CompileRun(source.begin());
v8::Local<v8::Value> int_array =
v8::Object::Cast(*res)->Get(ctx, v8_str("0")).ToLocalChecked();
@@ -2547,13 +2429,14 @@ TEST(OptimizedPretenuringObjectArrayLiterals) {
" }"
" return elements[number_elements - 1];"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();",
kPretenureCreationCount);
- v8::Local<v8::Value> res = CompileRun(source.start());
+ v8::Local<v8::Value> res = CompileRun(source.begin());
i::Handle<JSObject> o = Handle<JSObject>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
@@ -2590,13 +2473,14 @@ TEST(OptimizedPretenuringNestedInObjectProperties) {
" }"
" return elements[number_elements-1];"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();",
kPretenureCreationCount);
- v8::Local<v8::Value> res = CompileRun(source.start());
+ v8::Local<v8::Value> res = CompileRun(source.begin());
i::Handle<JSObject> o = Handle<JSObject>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
@@ -2632,13 +2516,14 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
" }"
" return elements[number_elements - 1];"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();",
kPretenureCreationCount);
- v8::Local<v8::Value> res = CompileRun(source.start());
+ v8::Local<v8::Value> res = CompileRun(source.begin());
i::Handle<JSObject> o = Handle<JSObject>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
@@ -2655,12 +2540,12 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
JSObject inner_object = JSObject::cast(o->RawFastPropertyAt(idx1));
CHECK(CcTest::heap()->InOldSpace(inner_object));
- if (!inner_object->IsUnboxedDoubleField(idx1)) {
- CHECK(CcTest::heap()->InOldSpace(inner_object->RawFastPropertyAt(idx1)));
+ if (!inner_object.IsUnboxedDoubleField(idx1)) {
+ CHECK(CcTest::heap()->InOldSpace(inner_object.RawFastPropertyAt(idx1)));
} else {
- CHECK_EQ(2.2, inner_object->RawFastDoublePropertyAt(idx1));
+ CHECK_EQ(2.2, inner_object.RawFastDoublePropertyAt(idx1));
}
- CHECK(CcTest::heap()->InOldSpace(inner_object->RawFastPropertyAt(idx2)));
+ CHECK(CcTest::heap()->InOldSpace(inner_object.RawFastPropertyAt(idx2)));
}
@@ -2689,13 +2574,14 @@ TEST(OptimizedPretenuringDoubleArrayProperties) {
" }"
" return elements[i - 1];"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();",
kPretenureCreationCount);
- v8::Local<v8::Value> res = CompileRun(source.start());
+ v8::Local<v8::Value> res = CompileRun(source.begin());
i::Handle<JSObject> o = Handle<JSObject>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
@@ -2731,13 +2617,14 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
" }"
" return elements[number_elements - 1];"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();",
kPretenureCreationCount);
- v8::Local<v8::Value> res = CompileRun(source.start());
+ v8::Local<v8::Value> res = CompileRun(source.begin());
i::Handle<JSObject> o = Handle<JSObject>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(res)));
@@ -2772,13 +2659,14 @@ TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
" }"
" return elements[number_elements - 1];"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();",
kPretenureCreationCount);
- v8::Local<v8::Value> res = CompileRun(source.start());
+ v8::Local<v8::Value> res = CompileRun(source.begin());
v8::Local<v8::Value> int_array =
v8::Object::Cast(*res)->Get(ctx, v8_str("0")).ToLocalChecked();
@@ -2824,13 +2712,14 @@ TEST(OptimizedPretenuringNestedObjectLiterals) {
" }"
" return elements[number_elements - 1];"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();",
kPretenureCreationCount);
- v8::Local<v8::Value> res = CompileRun(source.start());
+ v8::Local<v8::Value> res = CompileRun(source.begin());
v8::Local<v8::Value> int_array_1 =
v8::Object::Cast(*res)->Get(ctx, v8_str("0")).ToLocalChecked();
@@ -2876,13 +2765,14 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
" }"
" return elements[number_elements - 1];"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
"f();",
kPretenureCreationCount);
- v8::Local<v8::Value> res = CompileRun(source.start());
+ v8::Local<v8::Value> res = CompileRun(source.begin());
v8::Local<v8::Value> double_array_1 =
v8::Object::Cast(*res)->Get(ctx, v8_str("0")).ToLocalChecked();
@@ -2919,6 +2809,7 @@ TEST(OptimizedAllocationArrayLiterals) {
" numbers[0] = 3.14;"
" return numbers;"
"};"
+ "%PrepareFunctionForOptimization(f);"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
"f();");
@@ -2962,7 +2853,7 @@ TEST(Regress1465) {
for (int i = 0; i < transitions_count; i++) {
EmbeddedVector<char, 64> buffer;
SNPrintF(buffer, "var o = new F; o.prop%d = %d;", i, i);
- CompileRun(buffer.start());
+ CompileRun(buffer.begin());
}
CompileRun("var root = new F;");
}
@@ -3000,7 +2891,7 @@ static void AddTransitions(int transitions_count) {
for (int i = 0; i < transitions_count; i++) {
EmbeddedVector<char, 64> buffer;
SNPrintF(buffer, "var o = new F; o.prop%d = %d;", i, i);
- CompileRun(buffer.start());
+ CompileRun(buffer.begin());
}
}
@@ -3046,7 +2937,7 @@ TEST(TransitionArrayShrinksDuringAllocToZero) {
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
int transitions_after =
- CountMapTransitions(i_isolate, Map::cast(root->map()->GetBackPointer()));
+ CountMapTransitions(i_isolate, Map::cast(root->map().GetBackPointer()));
CHECK_EQ(1, transitions_after);
}
@@ -3075,7 +2966,7 @@ TEST(TransitionArrayShrinksDuringAllocToOne) {
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
int transitions_after =
- CountMapTransitions(i_isolate, Map::cast(root->map()->GetBackPointer()));
+ CountMapTransitions(i_isolate, Map::cast(root->map().GetBackPointer()));
CHECK_EQ(2, transitions_after);
}
@@ -3104,7 +2995,7 @@ TEST(TransitionArrayShrinksDuringAllocToOnePropertyFound) {
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
int transitions_after =
- CountMapTransitions(i_isolate, Map::cast(root->map()->GetBackPointer()));
+ CountMapTransitions(i_isolate, Map::cast(root->map().GetBackPointer()));
CHECK_EQ(1, transitions_after);
}
#endif // DEBUG
@@ -3214,7 +3105,7 @@ TEST(PrintSharedFunctionInfo) {
CcTest::global()->Get(ctx, v8_str("g")).ToLocalChecked())));
StdoutStream os;
- g->shared()->Print(os);
+ g->shared().Print(os);
os << std::endl;
}
#endif // OBJECT_PRINT
@@ -3578,23 +3469,23 @@ TEST(DetailedErrorStackTrace) {
DetailedErrorStackTraceTest(source, [](Handle<FrameArray> stack_trace) {
FixedArray foo_parameters = stack_trace->Parameters(0);
- CHECK_EQ(foo_parameters->length(), 1);
- CHECK(foo_parameters->get(0)->IsSmi());
- CHECK_EQ(Smi::ToInt(foo_parameters->get(0)), 42);
+ CHECK_EQ(foo_parameters.length(), 1);
+ CHECK(foo_parameters.get(0).IsSmi());
+ CHECK_EQ(Smi::ToInt(foo_parameters.get(0)), 42);
FixedArray bar_parameters = stack_trace->Parameters(1);
- CHECK_EQ(bar_parameters->length(), 2);
- CHECK(bar_parameters->get(0)->IsJSObject());
- CHECK(bar_parameters->get(1)->IsBoolean());
+ CHECK_EQ(bar_parameters.length(), 2);
+ CHECK(bar_parameters.get(0).IsJSObject());
+ CHECK(bar_parameters.get(1).IsBoolean());
Handle<Object> foo = Handle<Object>::cast(GetByName("foo"));
- CHECK_EQ(bar_parameters->get(0), *foo);
- CHECK(!bar_parameters->get(1)->BooleanValue(CcTest::i_isolate()));
+ CHECK_EQ(bar_parameters.get(0), *foo);
+ CHECK(!bar_parameters.get(1).BooleanValue(CcTest::i_isolate()));
FixedArray main_parameters = stack_trace->Parameters(2);
- CHECK_EQ(main_parameters->length(), 2);
- CHECK(main_parameters->get(0)->IsJSObject());
- CHECK(main_parameters->get(1)->IsUndefined());
- CHECK_EQ(main_parameters->get(0), *foo);
+ CHECK_EQ(main_parameters.length(), 2);
+ CHECK(main_parameters.get(0).IsJSObject());
+ CHECK(main_parameters.get(1).IsUndefined());
+ CHECK_EQ(main_parameters.get(0), *foo);
});
}
@@ -3602,30 +3493,31 @@ TEST(DetailedErrorStackTrace) {
TEST(DetailedErrorStackTraceInline) {
FLAG_allow_natives_syntax = true;
static const char* source =
- "function add(x) { "
- " if (x == 42) "
- " throw new Error(); "
- " return x + x; "
- "} "
- "add(0); "
- "add(1); "
- "function foo(x) { "
- " return add(x + 1) "
- "} "
- "foo(40); "
- "%OptimizeFunctionOnNextCall(foo); "
- "foo(41); ";
+ "function add(x) { "
+ " if (x == 42) "
+ " throw new Error(); "
+ " return x + x; "
+ "} "
+ "add(0); "
+ "add(1); "
+ "function foo(x) { "
+ " return add(x + 1) "
+ "} "
+ "%PrepareFunctionForOptimization(foo); "
+ "foo(40); "
+ "%OptimizeFunctionOnNextCall(foo); "
+ "foo(41); ";
DetailedErrorStackTraceTest(source, [](Handle<FrameArray> stack_trace) {
FixedArray parameters_add = stack_trace->Parameters(0);
- CHECK_EQ(parameters_add->length(), 1);
- CHECK(parameters_add->get(0)->IsSmi());
- CHECK_EQ(Smi::ToInt(parameters_add->get(0)), 42);
+ CHECK_EQ(parameters_add.length(), 1);
+ CHECK(parameters_add.get(0).IsSmi());
+ CHECK_EQ(Smi::ToInt(parameters_add.get(0)), 42);
FixedArray parameters_foo = stack_trace->Parameters(1);
- CHECK_EQ(parameters_foo->length(), 1);
- CHECK(parameters_foo->get(0)->IsSmi());
- CHECK_EQ(Smi::ToInt(parameters_foo->get(0)), 41);
+ CHECK_EQ(parameters_foo.length(), 1);
+ CHECK(parameters_foo.get(0).IsSmi());
+ CHECK_EQ(Smi::ToInt(parameters_foo.get(0)), 41);
});
}
@@ -3640,9 +3532,9 @@ TEST(DetailedErrorStackTraceBuiltinExit) {
DetailedErrorStackTraceTest(source, [](Handle<FrameArray> stack_trace) {
FixedArray parameters = stack_trace->Parameters(0);
- CHECK_EQ(parameters->length(), 2);
- CHECK(parameters->get(0)->IsSmi());
- CHECK_EQ(Smi::ToInt(parameters->get(0)), 9999);
+ CHECK_EQ(parameters.length(), 2);
+ CHECK(parameters.get(0).IsSmi());
+ CHECK_EQ(Smi::ToInt(parameters.get(0)), 9999);
});
}
@@ -3711,7 +3603,7 @@ TEST(Regress169928) {
CcTest::heap()->new_space()->AllocateRawUnaligned(
AllocationMemento::kSize + kTaggedSize);
CHECK(allocation.To(&obj));
- Address addr_obj = obj->address();
+ Address addr_obj = obj.address();
CcTest::heap()->CreateFillerObjectAt(addr_obj,
AllocationMemento::kSize + kTaggedSize,
ClearRecordedSlots::kNo);
@@ -3830,17 +3722,19 @@ TEST(DisableInlineAllocation) {
FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
- CompileRun("function test() {"
- " var x = [];"
- " for (var i = 0; i < 10; i++) {"
- " x[i] = [ {}, [1,2,3], [1,x,3] ];"
- " }"
- "}"
- "function run() {"
- " %OptimizeFunctionOnNextCall(test);"
- " test();"
- " %DeoptimizeFunction(test);"
- "}");
+ CompileRun(
+ "function test() {"
+ " var x = [];"
+ " for (var i = 0; i < 10; i++) {"
+ " x[i] = [ {}, [1,2,3], [1,x,3] ];"
+ " }"
+ "}"
+ "function run() {"
+ " %PrepareFunctionForOptimization(test);"
+ " %OptimizeFunctionOnNextCall(test);"
+ " test();"
+ " %DeoptimizeFunction(test);"
+ "}");
// Warm-up with inline allocation enabled.
CompileRun("test(); test(); run();");
@@ -3857,10 +3751,10 @@ TEST(DisableInlineAllocation) {
static int AllocationSitesCount(Heap* heap) {
int count = 0;
- for (Object site = heap->allocation_sites_list(); site->IsAllocationSite();) {
+ for (Object site = heap->allocation_sites_list(); site.IsAllocationSite();) {
AllocationSite cur = AllocationSite::cast(site);
- CHECK(cur->HasWeakNext());
- site = cur->weak_next();
+ CHECK(cur.HasWeakNext());
+ site = cur.weak_next();
count++;
}
return count;
@@ -3869,15 +3763,15 @@ static int AllocationSitesCount(Heap* heap) {
static int SlimAllocationSiteCount(Heap* heap) {
int count = 0;
for (Object weak_list = heap->allocation_sites_list();
- weak_list->IsAllocationSite();) {
+ weak_list.IsAllocationSite();) {
AllocationSite weak_cur = AllocationSite::cast(weak_list);
- for (Object site = weak_cur->nested_site(); site->IsAllocationSite();) {
+ for (Object site = weak_cur.nested_site(); site.IsAllocationSite();) {
AllocationSite cur = AllocationSite::cast(site);
- CHECK(!cur->HasWeakNext());
- site = cur->nested_site();
+ CHECK(!cur.HasWeakNext());
+ site = cur.nested_site();
count++;
}
- weak_list = weak_cur->weak_next();
+ weak_list = weak_cur.weak_next();
}
return count;
}
@@ -3899,10 +3793,12 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
v8::HandleScope scope(context->GetIsolate());
int count = AllocationSitesCount(heap);
- CompileRun("var bar = function() { return (new Array()); };"
- "var a = bar();"
- "bar();"
- "bar();");
+ CompileRun(
+ "var bar = function() { return (new Array()); };"
+ "%PrepareFunctionForOptimization(bar);"
+ "var a = bar();"
+ "bar();"
+ "bar();");
// One allocation site should have been created.
int new_count = AllocationSitesCount(heap);
@@ -3922,16 +3818,16 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
int dependency_group_count = 0;
DependentCode dependency = site->dependent_code();
while (dependency != ReadOnlyRoots(heap).empty_weak_fixed_array()) {
- CHECK(dependency->group() ==
+ CHECK(dependency.group() ==
DependentCode::kAllocationSiteTransitionChangedGroup ||
- dependency->group() ==
+ dependency.group() ==
DependentCode::kAllocationSiteTenuringChangedGroup);
- CHECK_EQ(1, dependency->count());
- CHECK(dependency->object_at(0)->IsWeak());
+ CHECK_EQ(1, dependency.count());
+ CHECK(dependency.object_at(0)->IsWeak());
Code function_bar =
- Code::cast(dependency->object_at(0)->GetHeapObjectAssumeWeak());
+ Code::cast(dependency.object_at(0)->GetHeapObjectAssumeWeak());
CHECK_EQ(bar_handle->code(), function_bar);
- dependency = dependency->next_link();
+ dependency = dependency.next_link();
dependency_group_count++;
}
// Expect a dependent code object for transitioning and pretenuring.
@@ -3946,7 +3842,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
// The site still exists because of our global handle, but the code is no
// longer referred to by dependent_code().
- CHECK(site->dependent_code()->object_at(0)->IsCleared());
+ CHECK(site->dependent_code().object_at(0)->IsCleared());
}
void CheckNumberOfAllocations(Heap* heap, const char* source,
@@ -3965,72 +3861,67 @@ void CheckNumberOfAllocations(Heap* heap, const char* source,
}
TEST(AllocationSiteCreation) {
- // No feedback vectors and hence no allocation sites.
- if (FLAG_lite_mode) return;
FLAG_always_opt = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
- i::FLAG_enable_one_shot_optimization = true;
+ i::FLAG_allow_natives_syntax = true;
// Array literals.
- CheckNumberOfAllocations(heap, "function f1() { return []; }; f1()", 1, 0);
- CheckNumberOfAllocations(heap, "function f2() { return [1, 2]; }; f2()", 1,
- 0);
- CheckNumberOfAllocations(heap, "function f3() { return [[1], [2]]; }; f3()",
+ CheckNumberOfAllocations(heap,
+ "function f1() {"
+ " return []; "
+ "};"
+ "%EnsureFeedbackVectorForFunction(f1); f1();",
+ 1, 0);
+ CheckNumberOfAllocations(heap,
+ "function f2() {"
+ " return [1, 2];"
+ "};"
+ "%EnsureFeedbackVectorForFunction(f2); f2();",
+ 1, 0);
+ CheckNumberOfAllocations(heap,
+ "function f3() {"
+ " return [[1], [2]];"
+ "};"
+ "%EnsureFeedbackVectorForFunction(f3); f3();",
1, 2);
-
CheckNumberOfAllocations(heap,
"function f4() { "
"return [0, [1, 1.1, 1.2, "
"], 1.5, [2.1, 2.2], 3];"
- "}; f4();",
+ "};"
+ "%EnsureFeedbackVectorForFunction(f4); f4();",
1, 2);
- // No allocation sites within IIFE/top-level
- CheckNumberOfAllocations(heap,
- R"(
- (function f4() {
- return [ 0, [ 1, 1.1, 1.2,], 1.5, [2.1, 2.2], 3 ];
- })();
- )",
- 0, 0);
-
- CheckNumberOfAllocations(heap,
- R"(
- l = [ 1, 2, 3, 4];
- )",
- 0, 0);
-
- CheckNumberOfAllocations(heap,
- R"(
- a = [];
- )",
- 0, 0);
-
+ // Object literals have lazy AllocationSites
CheckNumberOfAllocations(heap,
- R"(
- (function f4() {
- return [];
- })();
- )",
+ "function f5() {"
+ " return {};"
+ "};"
+ "%EnsureFeedbackVectorForFunction(f5); f5();",
0, 0);
- // Object literals have lazy AllocationSites
- CheckNumberOfAllocations(heap, "function f5() { return {}; }; f5(); ", 0, 0);
-
// No AllocationSites are created for the empty object literal.
for (int i = 0; i < 5; i++) {
CheckNumberOfAllocations(heap, "f5(); ", 0, 0);
}
- CheckNumberOfAllocations(heap, "function f6() { return {a:1}; }; f6(); ", 0,
- 0);
+ CheckNumberOfAllocations(heap,
+ "function f6() {"
+ " return {a:1};"
+ "};"
+ "%EnsureFeedbackVectorForFunction(f6); f6();",
+ 0, 0);
CheckNumberOfAllocations(heap, "f6(); ", 1, 0);
- CheckNumberOfAllocations(heap, "function f7() { return {a:1, b:2}; }; f7(); ",
+ CheckNumberOfAllocations(heap,
+ "function f7() {"
+ " return {a:1, b:2};"
+ "};"
+ "%EnsureFeedbackVectorForFunction(f7); f7(); ",
0, 0);
CheckNumberOfAllocations(heap, "f7(); ", 1, 0);
@@ -4038,7 +3929,8 @@ TEST(AllocationSiteCreation) {
CheckNumberOfAllocations(heap,
"function f8() {"
"return {a:{}, b:{ a:2, c:{ d:{f:{}}} } }; "
- "}; f8(); ",
+ "};"
+ "%EnsureFeedbackVectorForFunction(f8); f8();",
0, 0);
CheckNumberOfAllocations(heap, "f8(); ", 1, 0);
@@ -4047,11 +3939,54 @@ TEST(AllocationSiteCreation) {
CheckNumberOfAllocations(heap,
"function f9() {"
"return {a:[1, 2, 3], b:{ a:2, c:{ d:{f:[]} } }}; "
- "}; f9(); ",
+ "};"
+ "%EnsureFeedbackVectorForFunction(f9); f9(); ",
1, 2);
// No new AllocationSites created on the second invocation.
CheckNumberOfAllocations(heap, "f9(); ", 0, 0);
+}
+
+TEST(AllocationSiteCreationForIIFE) {
+ // No feedback vectors and hence no allocation sites.
+ // TODO(mythria): Once lazy feedback allocation is enabled by default
+ // re-evaluate if we need any of these tests.
+ if (FLAG_lite_mode || FLAG_lazy_feedback_allocation) return;
+ FLAG_always_opt = false;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+ i::FLAG_enable_one_shot_optimization = true;
+
+ // No allocation sites within IIFE/top-level
+ CheckNumberOfAllocations(heap,
+ R"(
+ (function f4() {
+ return [ 0, [ 1, 1.1, 1.2,], 1.5, [2.1, 2.2], 3 ];
+ })();
+ )",
+ 0, 0);
+
+ CheckNumberOfAllocations(heap,
+ R"(
+ l = [ 1, 2, 3, 4];
+ )",
+ 0, 0);
+
+ CheckNumberOfAllocations(heap,
+ R"(
+ a = [];
+ )",
+ 0, 0);
+
+ CheckNumberOfAllocations(heap,
+ R"(
+ (function f4() {
+ return [];
+ })();
+ )",
+ 0, 0);
// No allocation sites for literals in an iife/top level code even if it has
// array subliterals
@@ -4119,6 +4054,7 @@ TEST(CellsInOptimizedCodeAreWeak) {
" function bar() {"
" return foo(1);"
" };"
+ " %PrepareFunctionForOptimization(bar);"
" var foo = function(x) { with (x) { return 1 + x; } };"
" %NeverOptimizeFunction(foo);"
" bar(foo);"
@@ -4163,6 +4099,7 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
"function bar() {"
" return foo(1);"
"};"
+ "%PrepareFunctionForOptimization(bar);"
"function foo(x) { with (x) { return 1 + x; } };"
"%NeverOptimizeFunction(foo);"
"bar();"
@@ -4209,6 +4146,7 @@ TEST(NewSpaceObjectsInOptimizedCode) {
" function bar_func() {"
" return foo(1);"
" };"
+ " %PrepareFunctionForOptimization(bar_func);"
" bar = bar_func;"
" foo = foo_func;"
" bar_func();"
@@ -4235,7 +4173,7 @@ TEST(NewSpaceObjectsInOptimizedCode) {
#ifdef VERIFY_HEAP
CcTest::heap()->Verify();
#endif
- CHECK(!bar->code()->marked_for_deoptimization());
+ CHECK(!bar->code().marked_for_deoptimization());
code = scope.CloseAndEscape(Handle<Code>(bar->code(), isolate));
}
@@ -4268,6 +4206,7 @@ TEST(ObjectsInEagerlyDeoptimizedCodeAreWeak) {
"};"
"function foo(x) { with (x) { return 1 + x; } };"
"%NeverOptimizeFunction(foo);"
+ "%PrepareFunctionForOptimization(bar);"
"bar();"
"bar();"
"bar();"
@@ -4297,11 +4236,13 @@ static Handle<JSFunction> OptimizeDummyFunction(v8::Isolate* isolate,
const char* name) {
EmbeddedVector<char, 256> source;
SNPrintF(source,
- "function %s() { return 0; }"
- "%s(); %s();"
- "%%OptimizeFunctionOnNextCall(%s);"
- "%s();", name, name, name, name, name);
- CompileRun(source.start());
+ "function %s() { return 0; }"
+ "%%PrepareFunctionForOptimization(%s);"
+ "%s(); %s();"
+ "%%OptimizeFunctionOnNextCall(%s);"
+ "%s();",
+ name, name, name, name, name, name);
+ CompileRun(source.begin());
i::Handle<JSFunction> fun = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()
@@ -4312,9 +4253,9 @@ static Handle<JSFunction> OptimizeDummyFunction(v8::Isolate* isolate,
static int GetCodeChainLength(Code code) {
int result = 0;
- while (code->next_code_link()->IsCode()) {
+ while (code.next_code_link().IsCode()) {
result++;
- code = Code::cast(code->next_code_link());
+ code = Code::cast(code.next_code_link());
}
return result;
}
@@ -4338,7 +4279,7 @@ TEST(NextCodeLinkIsWeak) {
OptimizeDummyFunction(CcTest::isolate(), "mortal");
Handle<JSFunction> immortal =
OptimizeDummyFunction(CcTest::isolate(), "immortal");
- CHECK_EQ(immortal->code()->next_code_link(), mortal->code());
+ CHECK_EQ(immortal->code().next_code_link(), mortal->code());
code_chain_length_before = GetCodeChainLength(immortal->code());
// Keep the immortal code and let the mortal code die.
code = scope.CloseAndEscape(Handle<Code>(immortal->code(), isolate));
@@ -4366,13 +4307,13 @@ TEST(NextCodeLinkInCodeDataContainerIsCleared) {
OptimizeDummyFunction(CcTest::isolate(), "mortal1");
Handle<JSFunction> mortal2 =
OptimizeDummyFunction(CcTest::isolate(), "mortal2");
- CHECK_EQ(mortal2->code()->next_code_link(), mortal1->code());
+ CHECK_EQ(mortal2->code().next_code_link(), mortal1->code());
code_data_container = scope.CloseAndEscape(Handle<CodeDataContainer>(
- mortal2->code()->code_data_container(), isolate));
+ mortal2->code().code_data_container(), isolate));
CompileRun("mortal1 = null; mortal2 = null;");
}
CcTest::CollectAllAvailableGarbage();
- CHECK(code_data_container->next_code_link()->IsUndefined(isolate));
+ CHECK(code_data_container->next_code_link().IsUndefined(isolate));
}
static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
@@ -4384,8 +4325,10 @@ static Handle<Code> DummyOptimizedCode(Isolate* isolate) {
masm.Push(isolate->factory()->undefined_value());
masm.Drop(2);
masm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::OPTIMIZED_FUNCTION, masm.CodeObject());
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, Code::OPTIMIZED_FUNCTION)
+ .set_self_reference(masm.CodeObject())
+ .Build();
CHECK(code->IsCode());
return code;
}
@@ -4429,10 +4372,10 @@ static void ClearWeakIC(
TEST(WeakFunctionInConstructor) {
- if (FLAG_lite_mode) return;
if (FLAG_always_opt) return;
FLAG_stress_compaction = false;
FLAG_stress_incremental_marking = false;
+ FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
LocalContext env;
@@ -4453,6 +4396,8 @@ TEST(WeakFunctionInConstructor) {
const char* source =
" (function() {"
" function hat() { this.x = 5; }"
+ " %EnsureFeedbackVectorForFunction(hat);"
+ " %EnsureFeedbackVectorForFunction(createObj);"
" createObj(hat);"
" createObj(hat);"
" return hat;"
@@ -4798,6 +4743,7 @@ TEST(AddInstructionChangesNewSpacePromotion) {
" oldSpaceObject = object;"
" return object;"
"}"
+ "%PrepareFunctionForOptimization(crash);"
"crash(1);"
"crash(1);"
"%OptimizeFunctionOnNextCall(crash);"
@@ -4926,7 +4872,7 @@ TEST(Regress507979) {
for (HeapObject obj = it.next(); !obj.is_null(); obj = it.next()) {
// Let's not optimize the loop away.
- CHECK_NE(obj->address(), kNullAddress);
+ CHECK_NE(obj.address(), kNullAddress);
}
}
@@ -5026,7 +4972,7 @@ TEST(Regress442710) {
Factory* factory = isolate->factory();
HandleScope sc(isolate);
- Handle<JSGlobalObject> global(CcTest::i_isolate()->context()->global_object(),
+ Handle<JSGlobalObject> global(CcTest::i_isolate()->context().global_object(),
isolate);
Handle<JSArray> array = factory->NewJSArray(2);
@@ -5044,7 +4990,7 @@ HEAP_TEST(NumberStringCacheSize) {
if (!isolate->snapshot_available()) return;
Heap* heap = isolate->heap();
CHECK_EQ(Heap::kInitialNumberStringCacheSize * 2,
- heap->number_string_cache()->length());
+ heap->number_string_cache().length());
}
@@ -5238,7 +5184,7 @@ static void CheckLeak(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = CcTest::i_isolate();
Object message(
*reinterpret_cast<Address*>(isolate->pending_message_obj_address()));
- CHECK(message->IsTheHole(isolate));
+ CHECK(message.IsTheHole(isolate));
}
@@ -5269,7 +5215,7 @@ TEST(MessageObjectLeak) {
CompileRun(test);
const char* flag = "--turbo-filter=*";
- FlagList::SetFlagsFromString(flag, StrLength(flag));
+ FlagList::SetFlagsFromString(flag, strlen(flag));
FLAG_always_opt = true;
CompileRun(test);
@@ -5337,7 +5283,7 @@ TEST(ScriptIterator) {
{
HeapIterator it(heap);
for (HeapObject obj = it.next(); !obj.is_null(); obj = it.next()) {
- if (obj->IsScript()) script_count++;
+ if (obj.IsScript()) script_count++;
}
}
@@ -5367,7 +5313,7 @@ TEST(SharedFunctionInfoIterator) {
{
HeapIterator it(heap);
for (HeapObject obj = it.next(); !obj.is_null(); obj = it.next()) {
- if (!obj->IsSharedFunctionInfo()) continue;
+ if (!obj.IsSharedFunctionInfo()) continue;
sfi_count++;
}
}
@@ -5392,10 +5338,10 @@ AllocationResult HeapTester::AllocateByteArrayForTest(
if (!allocation.To(&result)) return allocation;
}
- result->set_map_after_allocation(ReadOnlyRoots(heap).byte_array_map(),
- SKIP_WRITE_BARRIER);
- ByteArray::cast(result)->set_length(length);
- ByteArray::cast(result)->clear_padding();
+ result.set_map_after_allocation(ReadOnlyRoots(heap).byte_array_map(),
+ SKIP_WRITE_BARRIER);
+ ByteArray::cast(result).set_length(length);
+ ByteArray::cast(result).clear_padding();
return result;
}
@@ -5430,7 +5376,7 @@ HEAP_TEST(Regress587004) {
while (
AllocateByteArrayForTest(heap, M, AllocationType::kOld).To(&byte_array)) {
for (int j = 0; j < M; j++) {
- byte_array->set(j, 0x31);
+ byte_array.set(j, 0x31);
}
}
// Re-enable old space expansion to avoid OOM crash.
@@ -5458,7 +5404,7 @@ HEAP_TEST(Regress589413) {
while (AllocateByteArrayForTest(heap, M, AllocationType::kYoung)
.To(&byte_array)) {
for (int j = 0; j < M; j++) {
- byte_array->set(j, 0x31);
+ byte_array.set(j, 0x31);
}
// Add the array in root set.
handle(byte_array, isolate);
@@ -5504,7 +5450,7 @@ HEAP_TEST(Regress589413) {
for (size_t j = 0; j < arrays.size(); j++) {
array = arrays[j];
for (int i = 0; i < N; i++) {
- array->set(i, *ec_obj);
+ array.set(i, *ec_obj);
}
}
}
@@ -5541,9 +5487,9 @@ TEST(Regress598319) {
Handle<FixedArray> tmp = isolate->factory()->NewFixedArray(
number_of_objects, AllocationType::kOld);
root->set(0, *tmp);
- for (int i = 0; i < get()->length(); i++) {
+ for (int i = 0; i < get().length(); i++) {
tmp = isolate->factory()->NewFixedArray(100, AllocationType::kOld);
- get()->set(i, *tmp);
+ get().set(i, *tmp);
}
}
}
@@ -5553,7 +5499,7 @@ TEST(Regress598319) {
Handle<FixedArray> root;
} arr(isolate, kNumberOfObjects);
- CHECK_EQ(arr.get()->length(), kNumberOfObjects);
+ CHECK_EQ(arr.get().length(), kNumberOfObjects);
CHECK(heap->lo_space()->Contains(arr.get()));
LargePage* page = LargePage::FromHeapObject(arr.get());
CHECK_NOT_NULL(page);
@@ -5569,8 +5515,8 @@ TEST(Regress598319) {
IncrementalMarking* marking = heap->incremental_marking();
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
CHECK(marking_state->IsWhite(arr.get()));
- for (int i = 0; i < arr.get()->length(); i++) {
- HeapObject arr_value = HeapObject::cast(arr.get()->get(i));
+ for (int i = 0; i < arr.get().length(); i++) {
+ HeapObject arr_value = HeapObject::cast(arr.get().get(i));
CHECK(marking_state->IsWhite(arr_value));
}
@@ -5583,8 +5529,8 @@ TEST(Regress598319) {
CHECK(marking->IsMarking());
// Check that we have not marked the interesting array during root scanning.
- for (int i = 0; i < arr.get()->length(); i++) {
- HeapObject arr_value = HeapObject::cast(arr.get()->get(i));
+ for (int i = 0; i < arr.get().length(); i++) {
+ HeapObject arr_value = HeapObject::cast(arr.get().get(i));
CHECK(marking_state->IsWhite(arr_value));
}
@@ -5596,7 +5542,7 @@ TEST(Regress598319) {
i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->ProgressBar() > 0) {
- CHECK_NE(page->ProgressBar(), arr.get()->Size());
+ CHECK_NE(page->ProgressBar(), arr.get().Size());
{
// Shift by 1, effectively moving one white object across the progress
// bar, meaning that we will miss marking it.
@@ -5623,8 +5569,8 @@ TEST(Regress598319) {
// All objects need to be black after marking. If a white object crossed the
// progress bar, we would fail here.
- for (int i = 0; i < arr.get()->length(); i++) {
- HeapObject arr_value = HeapObject::cast(arr.get()->get(i));
+ for (int i = 0; i < arr.get().length(); i++) {
+ HeapObject arr_value = HeapObject::cast(arr.get().get(i));
CHECK(marking_state->IsBlack(arr_value));
}
}
@@ -5862,8 +5808,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
// First trim in one word steps.
for (int i = 0; i < 10; i++) {
trimmed = heap->LeftTrimFixedArray(previous, 1);
- HeapObject filler = HeapObject::FromAddress(previous->address());
- CHECK(filler->IsFiller());
+ HeapObject filler = HeapObject::FromAddress(previous.address());
+ CHECK(filler.IsFiller());
CHECK(marking_state->IsBlack(trimmed));
CHECK(marking_state->IsBlack(previous));
previous = trimmed;
@@ -5873,8 +5819,8 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
for (int i = 2; i <= 3; i++) {
for (int j = 0; j < 10; j++) {
trimmed = heap->LeftTrimFixedArray(previous, i);
- HeapObject filler = HeapObject::FromAddress(previous->address());
- CHECK(filler->IsFiller());
+ HeapObject filler = HeapObject::FromAddress(previous.address());
+ CHECK(filler.IsFiller());
CHECK(marking_state->IsBlack(trimmed));
CHECK(marking_state->IsBlack(previous));
previous = trimmed;
@@ -5930,7 +5876,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
isolate->heap()->RightTrimFixedArray(*array, 1);
HeapObject filler = HeapObject::FromAddress(previous);
- CHECK(filler->IsFiller());
+ CHECK(filler.IsFiller());
CHECK(marking_state->IsImpossible(filler));
// Trim 10 times by one, two, and three word.
@@ -5939,7 +5885,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
previous -= kTaggedSize * i;
isolate->heap()->RightTrimFixedArray(*array, i);
HeapObject filler = HeapObject::FromAddress(previous);
- CHECK(filler->IsFiller());
+ CHECK(filler.IsFiller());
CHECK(marking_state->IsWhite(filler));
}
}
@@ -6198,9 +6144,8 @@ Handle<Code> GenerateDummyImmovableCode(Isolate* isolate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::STUB, Handle<Code>(), Builtins::kNoBuiltinId,
- MaybeHandle<ByteArray>(), DeoptimizationData::Empty(isolate), kImmovable);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, Code::STUB).set_immovable().Build();
CHECK(code->IsCode());
return code;
@@ -6324,7 +6269,7 @@ HEAP_TEST(RegressMissingWriteBarrierInAllocate) {
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
}
- CHECK(object->map()->IsMap());
+ CHECK(object->map().IsMap());
}
HEAP_TEST(MarkCompactEpochCounter) {
@@ -6363,6 +6308,7 @@ UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
v8::Context::Scope context_scope(context);
}
isolate->Dispose();
+ ReadOnlyHeap::ClearSharedHeapForTest();
}
}
@@ -6714,6 +6660,32 @@ HEAP_TEST(MemoryReducerActivationForSmallHeaps) {
CHECK_EQ(heap->memory_reducer()->state_.action, MemoryReducer::Action::kWait);
}
+TEST(CodeObjectRegistry) {
+ // We turn off compaction to ensure that code is not moving.
+ FLAG_never_compact = true;
+
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+
+ Handle<Code> code1;
+ HandleScope outer_scope(heap->isolate());
+ Address code2_address;
+ {
+ code1 = DummyOptimizedCode(isolate);
+ Handle<Code> code2 = DummyOptimizedCode(isolate);
+ code2_address = code2->address();
+ // If this check breaks, change the allocation to ensure that both code
+ // objects are on the same page.
+ CHECK_EQ(MemoryChunk::FromHeapObject(*code1),
+ MemoryChunk::FromHeapObject(*code2));
+ CHECK(MemoryChunk::FromHeapObject(*code1)->Contains(code1->address()));
+ CHECK(MemoryChunk::FromHeapObject(*code2)->Contains(code2->address()));
+ }
+ CcTest::CollectAllAvailableGarbage();
+ CHECK(MemoryChunk::FromHeapObject(*code1)->Contains(code1->address()));
+ CHECK(MemoryChunk::FromAddress(code2_address)->Contains(code2_address));
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-incremental-marking.cc b/deps/v8/test/cctest/heap/test-incremental-marking.cc
index d90c2c2139..88669ebb92 100644
--- a/deps/v8/test/cctest/heap/test-incremental-marking.cc
+++ b/deps/v8/test/cctest/heap/test-incremental-marking.cc
@@ -14,12 +14,12 @@
#include <utility>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/global-handles.h"
+#include "src/handles/global-handles.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
diff --git a/deps/v8/test/cctest/heap/test-invalidated-slots.cc b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
index 3f94f71bb1..bac98c8a26 100644
--- a/deps/v8/test/cctest/heap/test-invalidated-slots.cc
+++ b/deps/v8/test/cctest/heap/test-invalidated-slots.cc
@@ -4,7 +4,7 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
@@ -55,8 +55,8 @@ HEAP_TEST(InvalidatedSlotsNoInvalidatedRanges) {
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
InvalidatedSlotsFilter filter(page);
for (ByteArray byte_array : byte_arrays) {
- Address start = byte_array->address() + ByteArray::kHeaderSize;
- Address end = byte_array->address() + byte_array->Size();
+ Address start = byte_array.address() + ByteArray::kHeaderSize;
+ Address end = byte_array.address() + byte_array.Size();
for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK(filter.IsValid(addr));
}
@@ -71,13 +71,13 @@ HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
- byte_arrays[i]->Size());
+ byte_arrays[i].Size());
}
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
- Address start = byte_array->address() + ByteArray::kHeaderSize;
- Address end = byte_array->address() + byte_array->Size();
+ Address start = byte_array.address() + ByteArray::kHeaderSize;
+ Address end = byte_array.address() + byte_array.Size();
for (Address addr = start; addr < end; addr += kTaggedSize) {
if (i % 2 == 0) {
CHECK(!filter.IsValid(addr));
@@ -96,13 +96,13 @@ HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
- byte_arrays[i]->Size());
+ byte_arrays[i].Size());
}
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
- Address start = byte_array->address() + ByteArray::kHeaderSize;
- Address end = byte_array->address() + byte_array->Size();
+ Address start = byte_array.address() + ByteArray::kHeaderSize;
+ Address end = byte_array.address() + byte_array.Size();
for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK(!filter.IsValid(addr));
}
@@ -118,16 +118,16 @@ HEAP_TEST(InvalidatedSlotsAfterTrimming) {
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
- byte_arrays[i]->Size());
+ byte_arrays[i].Size());
}
// Trim byte arrays and check that the slots outside the byte arrays are
// considered invalid if the old space page was swept.
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
- Address start = byte_array->address() + ByteArray::kHeaderSize;
- Address end = byte_array->address() + byte_array->Size();
- heap->RightTrimFixedArray(byte_array, byte_array->length());
+ Address start = byte_array.address() + ByteArray::kHeaderSize;
+ Address end = byte_array.address() + byte_array.Size();
+ heap->RightTrimFixedArray(byte_array, byte_array.length());
for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK_EQ(filter.IsValid(addr), page->SweepingDone());
}
@@ -146,14 +146,14 @@ HEAP_TEST(InvalidatedSlotsEvacuationCandidate) {
// candidate.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
- byte_arrays[i]->Size());
+ byte_arrays[i].Size());
}
// All slots must still be valid.
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
- Address start = byte_array->address() + ByteArray::kHeaderSize;
- Address end = byte_array->address() + byte_array->Size();
+ Address start = byte_array.address() + ByteArray::kHeaderSize;
+ Address end = byte_array.address() + byte_array.Size();
for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK(filter.IsValid(addr));
}
@@ -166,18 +166,18 @@ HEAP_TEST(InvalidatedSlotsResetObjectRegression) {
std::vector<ByteArray> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Ensure that the first array has smaller size then the rest.
- heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0]->length() - 8);
+ heap->RightTrimFixedArray(byte_arrays[0], byte_arrays[0].length() - 8);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
- byte_arrays[i]->Size());
+ byte_arrays[i].Size());
}
// All slots must still be invalid.
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray byte_array = byte_arrays[i];
- Address start = byte_array->address() + ByteArray::kHeaderSize;
- Address end = byte_array->address() + byte_array->Size();
+ Address start = byte_array.address() + ByteArray::kHeaderSize;
+ Address end = byte_array.address() + byte_array.Size();
for (Address addr = start; addr < end; addr += kTaggedSize) {
CHECK(!filter.IsValid(addr));
}
@@ -326,7 +326,7 @@ HEAP_TEST(InvalidatedSlotsFastToSlow) {
{
AlwaysAllocateScope always_allocate(isolate);
Handle<JSFunction> function = factory->NewFunctionForTest(name);
- function->shared()->set_expected_nof_properties(3);
+ function->shared().set_expected_nof_properties(3);
obj = factory->NewJSObject(function, AllocationType::kOld);
}
// Start incremental marking.
diff --git a/deps/v8/test/cctest/heap/test-iterators.cc b/deps/v8/test/cctest/heap/test-iterators.cc
new file mode 100644
index 0000000000..9e39f7ca47
--- /dev/null
+++ b/deps/v8/test/cctest/heap/test-iterators.cc
@@ -0,0 +1,101 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8.h"
+#include "src/api/api-inl.h"
+#include "src/execution/isolate.h"
+#include "src/heap/combined-heap.h"
+#include "src/heap/heap.h"
+#include "src/heap/read-only-heap.h"
+#include "src/objects/heap-object.h"
+#include "src/objects/objects.h"
+#include "src/roots/roots-inl.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace heap {
+
+TEST(HeapIteratorNullPastEnd) {
+ HeapIterator iterator(CcTest::heap());
+ while (!iterator.next().is_null()) {
+ }
+ for (int i = 0; i < 20; i++) {
+ CHECK(iterator.next().is_null());
+ }
+}
+
+TEST(ReadOnlyHeapIteratorNullPastEnd) {
+ ReadOnlyHeapIterator iterator(CcTest::heap()->read_only_heap());
+ while (!iterator.Next().is_null()) {
+ }
+ for (int i = 0; i < 20; i++) {
+ CHECK(iterator.Next().is_null());
+ }
+}
+
+TEST(CombinedHeapIteratorNullPastEnd) {
+ CombinedHeapIterator iterator(CcTest::heap());
+ while (!iterator.Next().is_null()) {
+ }
+ for (int i = 0; i < 20; i++) {
+ CHECK(iterator.Next().is_null());
+ }
+}
+
+namespace {
+// An arbitrary object guaranteed to live on the non-read-only heap.
+Object CreateWritableObject() {
+ return *v8::Utils::OpenHandle(*v8::Object::New(CcTest::isolate()));
+}
+} // namespace
+
+TEST(ReadOnlyHeapIterator) {
+ CcTest::InitializeVM();
+ HandleScope handle_scope(CcTest::i_isolate());
+ const Object sample_object = CreateWritableObject();
+ ReadOnlyHeapIterator iterator(CcTest::read_only_heap());
+
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ CHECK(ReadOnlyHeap::Contains(obj));
+ CHECK(!CcTest::heap()->Contains(obj));
+ CHECK_NE(sample_object, obj);
+ }
+}
+
+TEST(HeapIterator) {
+ CcTest::InitializeVM();
+ HandleScope handle_scope(CcTest::i_isolate());
+ const Object sample_object = CreateWritableObject();
+ HeapIterator iterator(CcTest::heap());
+ bool seen_sample_object = false;
+
+ for (HeapObject obj = iterator.next(); !obj.is_null();
+ obj = iterator.next()) {
+ CHECK(!ReadOnlyHeap::Contains(obj));
+ CHECK(CcTest::heap()->Contains(obj));
+ if (sample_object == obj) seen_sample_object = true;
+ }
+ CHECK(seen_sample_object);
+}
+
+TEST(CombinedHeapIterator) {
+ CcTest::InitializeVM();
+ HandleScope handle_scope(CcTest::i_isolate());
+ const Object sample_object = CreateWritableObject();
+ CombinedHeapIterator iterator(CcTest::heap());
+ bool seen_sample_object = false;
+
+ for (HeapObject obj = iterator.Next(); !obj.is_null();
+ obj = iterator.Next()) {
+ CHECK(IsValidHeapObject(CcTest::heap(), obj));
+ if (sample_object == obj) seen_sample_object = true;
+ }
+ CHECK(seen_sample_object);
+}
+
+} // namespace heap
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-lab.cc b/deps/v8/test/cctest/heap/test-lab.cc
index 94f652e037..4c1b154b61 100644
--- a/deps/v8/test/cctest/heap/test-lab.cc
+++ b/deps/v8/test/cctest/heap/test-lab.cc
@@ -4,10 +4,10 @@
#include <vector>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/heap/heap-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -17,7 +17,7 @@ namespace heap {
static Address AllocateLabBackingStore(Heap* heap, intptr_t size_in_bytes) {
AllocationResult result = heap->old_space()->AllocateRaw(
static_cast<int>(size_in_bytes), kDoubleAligned);
- Address adr = result.ToObjectChecked()->address();
+ Address adr = result.ToObjectChecked().address();
return adr;
}
@@ -30,10 +30,10 @@ static void VerifyIterable(v8::internal::Address base,
size_t counter = 0;
while (base < limit) {
object = HeapObject::FromAddress(base);
- CHECK(object->IsFiller());
+ CHECK(object.IsFiller());
CHECK_LT(counter, expected_size.size());
- CHECK_EQ(expected_size[counter], object->Size());
- base += object->Size();
+ CHECK_EQ(expected_size[counter], object.Size());
+ base += object.Size();
counter++;
}
}
@@ -46,7 +46,7 @@ static bool AllocateFromLab(Heap* heap, LocalAllocationBuffer* lab,
AllocationResult result =
lab->AllocateRawAligned(static_cast<int>(size_in_bytes), alignment);
if (result.To(&obj)) {
- heap->CreateFillerObjectAt(obj->address(), static_cast<int>(size_in_bytes),
+ heap->CreateFillerObjectAt(obj.address(), static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
return true;
}
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index 9f2f5f9a81..b4d8c1b04e 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -37,12 +37,12 @@
#include <utility>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/global-handles.h"
+#include "src/handles/global-handles.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
@@ -103,8 +103,8 @@ AllocationResult HeapTester::AllocateMapForTest(Isolate* isolate) {
HeapObject obj;
AllocationResult alloc = heap->AllocateRaw(Map::kSize, AllocationType::kMap);
if (!alloc.To(&obj)) return alloc;
- obj->set_map_after_allocation(ReadOnlyRoots(heap).meta_map(),
- SKIP_WRITE_BARRIER);
+ obj.set_map_after_allocation(ReadOnlyRoots(heap).meta_map(),
+ SKIP_WRITE_BARRIER);
return isolate->factory()->InitializeMap(Map::cast(obj), JS_OBJECT_TYPE,
JSObject::kHeaderSize,
TERMINAL_FAST_ELEMENTS_KIND, 0);
@@ -121,11 +121,11 @@ AllocationResult HeapTester::AllocateFixedArrayForTest(
AllocationResult result = heap->AllocateRaw(size, allocation);
if (!result.To(&obj)) return result;
}
- obj->set_map_after_allocation(ReadOnlyRoots(heap).fixed_array_map(),
- SKIP_WRITE_BARRIER);
+ obj.set_map_after_allocation(ReadOnlyRoots(heap).fixed_array_map(),
+ SKIP_WRITE_BARRIER);
FixedArray array = FixedArray::cast(obj);
- array->set_length(length);
- MemsetTagged(array->data_start(), ReadOnlyRoots(heap).undefined_value(),
+ array.set_length(length);
+ MemsetTagged(array.data_start(), ReadOnlyRoots(heap).undefined_value(),
length);
return array;
}
@@ -139,7 +139,7 @@ HEAP_TEST(MarkCompactCollector) {
Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
- Handle<JSGlobalObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSGlobalObject> global(isolate->context().global_object(), isolate);
// call mark-compact when heap is empty
CcTest::CollectGarbage(OLD_SPACE);
@@ -370,7 +370,7 @@ TEST(Regress5829) {
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
for (auto object_and_size :
LiveObjectRange<kGreyObjects>(page, marking_state->bitmap(page))) {
- CHECK(!object_and_size.first->IsFiller());
+ CHECK(!object_and_size.first.IsFiller());
}
}
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index b68484e3c0..f629bc1171 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/execution/isolate.h"
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/factory.h"
#include "src/heap/spaces-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 24a882fc82..5e6392bf59 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -31,8 +31,8 @@
#include "src/base/platform/platform.h"
#include "src/heap/factory.h"
#include "src/heap/spaces-inl.h"
-#include "src/objects-inl.h"
#include "src/objects/free-space.h"
+#include "src/objects/objects-inl.h"
#include "src/snapshot/snapshot.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
@@ -301,7 +301,7 @@ TEST(LargeObjectSpace) {
int lo_size = Page::kPageSize;
Object obj = lo->AllocateRaw(lo_size).ToObjectChecked();
- CHECK(obj->IsHeapObject());
+ CHECK(obj.IsHeapObject());
HeapObject ho = HeapObject::cast(obj);
@@ -390,7 +390,7 @@ static HeapObject AllocateUnaligned(NewSpace* space, int size) {
CHECK(!allocation.IsRetry());
HeapObject filler;
CHECK(allocation.To(&filler));
- space->heap()->CreateFillerObjectAt(filler->address(), size,
+ space->heap()->CreateFillerObjectAt(filler.address(), size,
ClearRecordedSlots::kNo);
return filler;
}
@@ -400,7 +400,7 @@ static HeapObject AllocateUnaligned(PagedSpace* space, int size) {
CHECK(!allocation.IsRetry());
HeapObject filler;
CHECK(allocation.To(&filler));
- space->heap()->CreateFillerObjectAt(filler->address(), size,
+ space->heap()->CreateFillerObjectAt(filler.address(), size,
ClearRecordedSlots::kNo);
return filler;
}
@@ -571,7 +571,7 @@ HEAP_TEST(Regress777177) {
heap::SimulateFullSpace(old_space);
AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
HeapObject obj = result.ToObjectChecked();
- heap->CreateFillerObjectAt(obj->address(), filler_size,
+ heap->CreateFillerObjectAt(obj.address(), filler_size,
ClearRecordedSlots::kNo);
}
@@ -582,14 +582,14 @@ HEAP_TEST(Regress777177) {
old_space->AllocateRaw(max_object_size, kWordAligned);
HeapObject obj = result.ToObjectChecked();
// Simulate allocation folding moving the top pointer back.
- old_space->SetTopAndLimit(obj->address(), old_space->limit());
+ old_space->SetTopAndLimit(obj.address(), old_space->limit());
}
{
// This triggers assert in crbug.com/777177.
AllocationResult result = old_space->AllocateRaw(filler_size, kWordAligned);
HeapObject obj = result.ToObjectChecked();
- heap->CreateFillerObjectAt(obj->address(), filler_size,
+ heap->CreateFillerObjectAt(obj.address(), filler_size,
ClearRecordedSlots::kNo);
}
old_space->RemoveAllocationObserver(&observer);
@@ -621,17 +621,17 @@ HEAP_TEST(Regress791582) {
AllocationResult result =
new_space->AllocateRaw(until_page_end, kWordAligned);
HeapObject obj = result.ToObjectChecked();
- heap->CreateFillerObjectAt(obj->address(), until_page_end,
+ heap->CreateFillerObjectAt(obj.address(), until_page_end,
ClearRecordedSlots::kNo);
// Simulate allocation folding moving the top pointer back.
- *new_space->allocation_top_address() = obj->address();
+ *new_space->allocation_top_address() = obj.address();
}
{
// This triggers assert in crbug.com/791582
AllocationResult result = new_space->AllocateRaw(256, kWordAligned);
HeapObject obj = result.ToObjectChecked();
- heap->CreateFillerObjectAt(obj->address(), 256, ClearRecordedSlots::kNo);
+ heap->CreateFillerObjectAt(obj.address(), 256, ClearRecordedSlots::kNo);
}
new_space->RemoveAllocationObserver(&observer);
}
@@ -656,7 +656,7 @@ TEST(ShrinkPageToHighWaterMarkFreeSpaceEnd) {
old_space->ResetFreeList();
HeapObject filler = HeapObject::FromAddress(array->address() + array->Size());
- CHECK(filler->IsFreeSpace());
+ CHECK(filler.IsFreeSpace());
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
size_t should_have_shrunk = RoundDown(
static_cast<size_t>(MemoryChunkLayout::AllocatableMemoryInDataPage() -
@@ -707,7 +707,7 @@ TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
old_space->ResetFreeList();
HeapObject filler = HeapObject::FromAddress(array->address() + array->Size());
- CHECK_EQ(filler->map(),
+ CHECK_EQ(filler.map(),
ReadOnlyRoots(CcTest::heap()).one_pointer_filler_map());
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
@@ -734,7 +734,7 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
old_space->ResetFreeList();
HeapObject filler = HeapObject::FromAddress(array->address() + array->Size());
- CHECK_EQ(filler->map(),
+ CHECK_EQ(filler.map(),
ReadOnlyRoots(CcTest::heap()).two_pointer_filler_map());
size_t shrunk = old_space->ShrinkPageToHighWaterMark(page);
diff --git a/deps/v8/test/cctest/heap/test-unmapper.cc b/deps/v8/test/cctest/heap/test-unmapper.cc
index 1fbe5c1f5c..89d163b36c 100644
--- a/deps/v8/test/cctest/heap/test-unmapper.cc
+++ b/deps/v8/test/cctest/heap/test-unmapper.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/heap/spaces.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/heap/test-weak-references.cc b/deps/v8/test/cctest/heap/test-weak-references.cc
index 0504c134f7..18dca8edb8 100644
--- a/deps/v8/test/cctest/heap/test-weak-references.cc
+++ b/deps/v8/test/cctest/heap/test-weak-references.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
#include "src/objects/smi.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
@@ -28,7 +28,7 @@ Handle<FeedbackVector> CreateFeedbackVectorForTest(
.ToLocalChecked();
Handle<Object> obj = v8::Utils::OpenHandle(*script);
Handle<SharedFunctionInfo> shared_function =
- Handle<SharedFunctionInfo>(JSFunction::cast(*obj)->shared(), i_isolate);
+ Handle<SharedFunctionInfo>(JSFunction::cast(*obj).shared(), i_isolate);
Handle<ClosureFeedbackCellArray> closure_cell_array =
ClosureFeedbackCellArray::New(i_isolate, shared_function);
Handle<FeedbackVector> fv = factory->NewFeedbackVector(
@@ -60,8 +60,7 @@ TEST(WeakReferencesBasic) {
assm.nop(); // supported on all architectures
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
CHECK(code->IsCode());
fv->set_optimized_code_weak_or_smi(HeapObjectReference::Weak(*code));
@@ -415,21 +414,21 @@ TEST(WeakArraysBasic) {
CcTest::CollectGarbage(NEW_SPACE);
HeapObject heap_object;
CHECK(array->Get(0)->GetHeapObjectIfWeak(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2016);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2016);
CHECK(array->Get(1)->GetHeapObjectIfWeak(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2017);
CHECK(array->Get(2)->GetHeapObjectIfStrong(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2018);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2018);
CHECK(array->Get(3)->GetHeapObjectIfWeak(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2019);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2019);
CcTest::CollectAllGarbage();
CHECK(heap->InOldSpace(*array));
CHECK(array->Get(0)->IsCleared());
CHECK(array->Get(1)->GetHeapObjectIfWeak(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2017);
CHECK(array->Get(2)->GetHeapObjectIfStrong(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2018);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2018);
CHECK(array->Get(3)->IsCleared());
}
@@ -510,19 +509,19 @@ TEST(WeakArrayListBasic) {
HeapObject heap_object;
CHECK_EQ(array->length(), 8);
CHECK(array->Get(0)->GetHeapObjectIfWeak(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2016);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2016);
CHECK_EQ(array->Get(1).ToSmi().value(), 1);
CHECK(array->Get(2)->GetHeapObjectIfWeak(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2017);
CHECK_EQ(array->Get(3).ToSmi().value(), 3);
CHECK(array->Get(4)->GetHeapObjectIfWeak(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2018);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2018);
CHECK_EQ(array->Get(5).ToSmi().value(), 5);
CHECK(array->Get(6)->GetHeapObjectIfWeak(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2019);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2019);
CHECK_EQ(array->Get(7).ToSmi().value(), 7);
CcTest::CollectAllGarbage();
@@ -532,7 +531,7 @@ TEST(WeakArrayListBasic) {
CHECK_EQ(array->Get(1).ToSmi().value(), 1);
CHECK(array->Get(2)->GetHeapObjectIfWeak(&heap_object));
- CHECK_EQ(Smi::cast(FixedArray::cast(heap_object)->get(0))->value(), 2017);
+ CHECK_EQ(Smi::cast(FixedArray::cast(heap_object).get(0)).value(), 2017);
CHECK_EQ(array->Get(3).ToSmi().value(), 3);
CHECK(array->Get(4)->IsCleared());
@@ -607,6 +606,7 @@ TEST(Regress7768) {
// function ("f"). The weak reference is the only reference to the function.
CompileRun(
"function myfunc(f) { f(); } "
+ "%PrepareFunctionForOptimization(myfunc); "
"(function wrapper() { "
" function f() {}; myfunc(f); myfunc(f); "
" %OptimizeFunctionOnNextCall(myfunc); myfunc(f); "
@@ -765,7 +765,7 @@ TEST(PrototypeUsersCompacted) {
CHECK_EQ(array->length(), 3 + PrototypeUsers::kFirstIndex);
WeakArrayList new_array =
PrototypeUsers::Compact(array, heap, TestCompactCallback);
- CHECK_EQ(new_array->length(), 1 + PrototypeUsers::kFirstIndex);
+ CHECK_EQ(new_array.length(), 1 + PrototypeUsers::kFirstIndex);
CHECK_EQ(saved_heap_object, *live_map);
}
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 0a169b766c..957bcff1db 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -10,19 +10,19 @@
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/logging.h"
+#include "src/codegen/source-position-table.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/module-inl.h"
-#include "src/ostreams.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime.h"
-#include "src/source-position-table.h"
+#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -98,7 +98,7 @@ BytecodeExpectationsPrinter::GetBytecodeArrayForGlobal(
i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*function));
i::Handle<i::BytecodeArray> bytecodes =
- i::handle(js_function->shared()->GetBytecodeArray(), i_isolate());
+ i::handle(js_function->shared().GetBytecodeArray(), i_isolate());
return bytecodes;
}
@@ -108,7 +108,7 @@ BytecodeExpectationsPrinter::GetBytecodeArrayForModule(
v8::Local<v8::Module> module) const {
i::Handle<i::Module> i_module = v8::Utils::OpenHandle(*module);
return i::handle(
- SharedFunctionInfo::cast(i_module->code())->GetBytecodeArray(),
+ SharedFunctionInfo::cast(i_module->code()).GetBytecodeArray(),
i_isolate());
}
@@ -116,7 +116,7 @@ i::Handle<i::BytecodeArray>
BytecodeExpectationsPrinter::GetBytecodeArrayForScript(
v8::Local<v8::Script> script) const {
i::Handle<i::JSFunction> js_function = v8::Utils::OpenHandle(*script);
- return i::handle(js_function->shared()->GetBytecodeArray(), i_isolate());
+ return i::handle(js_function->shared().GetBytecodeArray(), i_isolate());
}
i::Handle<i::BytecodeArray>
@@ -126,8 +126,8 @@ BytecodeExpectationsPrinter::GetBytecodeArrayOfCallee(
v8::Utils::OpenHandle(*CompileRun(source_code));
i::Handle<i::JSFunction> js_function =
i::Handle<i::JSFunction>::cast(i_object);
- CHECK(js_function->shared()->HasBytecodeArray());
- return i::handle(js_function->shared()->GetBytecodeArray(), i_isolate());
+ CHECK(js_function->shared().HasBytecodeArray());
+ return i::handle(js_function->shared().GetBytecodeArray(), i_isolate());
}
void BytecodeExpectationsPrinter::PrintEscapedString(
@@ -167,7 +167,6 @@ void BytecodeExpectationsPrinter::PrintBytecodeOperand(
break;
default:
UNREACHABLE();
- return;
}
if (Bytecodes::IsRegisterOperandType(op_type)) {
@@ -279,8 +278,8 @@ void BytecodeExpectationsPrinter::PrintSourcePosition(
void BytecodeExpectationsPrinter::PrintV8String(std::ostream& stream,
i::String string) const {
stream << '"';
- for (int i = 0, length = string->length(); i < length; ++i) {
- stream << i::AsEscapedUC16ForJSON(string->Get(i));
+ for (int i = 0, length = string.length(); i < length; ++i) {
+ stream << i::AsEscapedUC16ForJSON(string.Get(i));
}
stream << '"';
}
@@ -289,13 +288,13 @@ void BytecodeExpectationsPrinter::PrintConstant(
std::ostream& stream, i::Handle<i::Object> constant) const {
if (constant->IsSmi()) {
stream << "Smi [";
- i::Smi::cast(*constant)->SmiPrint(stream);
+ i::Smi::cast(*constant).SmiPrint(stream);
stream << "]";
} else {
- stream << i::HeapObject::cast(*constant)->map()->instance_type();
+ stream << i::HeapObject::cast(*constant).map().instance_type();
if (constant->IsHeapNumber()) {
stream << " [";
- i::HeapNumber::cast(*constant)->HeapNumberPrint(stream);
+ i::HeapNumber::cast(*constant).HeapNumberPrint(stream);
stream << "]";
} else if (constant->IsString()) {
stream << " [";
@@ -335,7 +334,7 @@ void BytecodeExpectationsPrinter::PrintBytecodeSequence(
void BytecodeExpectationsPrinter::PrintConstantPool(
std::ostream& stream, i::FixedArray constant_pool) const {
stream << "constant pool: [\n";
- int num_constants = constant_pool->length();
+ int num_constants = constant_pool.length();
if (num_constants > 0) {
for (int i = 0; i < num_constants; ++i) {
stream << kIndent;
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index 53793c1751..06329940ff 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -10,7 +10,7 @@
#include <vector>
#include "src/interpreter/bytecodes.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
index 231a9050b8..adbe5c9b92 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AsyncGenerators.golden
@@ -14,9 +14,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 180
+bytecode array length: 148
bytecodes: [
- B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(1),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
@@ -29,33 +29,17 @@ bytecodes: [
B(ResumeGenerator), R(0), R(0), U8(5),
B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
B(Ldar), R(5),
/* 17 E> */ B(Throw),
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(85),
- B(LdaUndefined),
- B(Star), R(6),
- B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(5), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(5), U8(1),
- B(ResumeGenerator), R(0), R(0), U8(5),
- B(Star), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(6),
- B(LdaZero),
- B(TestReferenceEqual), R(6),
- B(JumpIfTrue), U8(5),
+ B(Jump), U8(53),
B(Ldar), R(5),
- B(ReThrow),
- B(LdaSmi), I8(1),
- B(Star), R(1),
- B(Mov), R(5), R(2),
- B(Jump), U8(41),
+ B(Jump), U8(36),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(4),
+ B(CreateCatchContext), R(5), U8(3),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -69,6 +53,10 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(1),
+ B(Jump), U8(15),
+ B(LdaSmi), I8(-1),
+ B(Star), R(2),
+ B(Star), R(1),
B(Jump), U8(7),
B(Star), R(2),
B(LdaZero),
@@ -80,7 +68,7 @@ bytecodes: [
B(Ldar), R(3),
B(SetPendingMessage),
B(Ldar), R(1),
- B(SwitchOnSmiNoFeedback), U8(5), U8(3), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(4), U8(3), I8(0),
B(Jump), U8(22),
B(Ldar), R(2),
B(ReThrow),
@@ -97,7 +85,6 @@ bytecodes: [
]
constant pool: [
Smi [30],
- Smi [71],
Smi [16],
Smi [7],
SCOPE_INFO_TYPE,
@@ -106,8 +93,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [20, 134, 134],
- [23, 100, 100],
+ [20, 94, 102],
+ [23, 56, 60],
]
---
@@ -117,9 +104,9 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 225
+bytecode array length: 193
bytecodes: [
- B(SwitchOnGeneratorState), R(0), U8(0), U8(3),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(1),
B(Mov), R(this), R(2),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(1), U8(2),
@@ -132,13 +119,13 @@ bytecodes: [
B(ResumeGenerator), R(0), R(0), U8(5),
B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(5),
/* 17 E> */ B(Throw),
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(130),
+ B(Jump), U8(98),
/* 22 S> */ B(LdaSmi), I8(42),
B(Star), R(6),
B(LdaFalse),
@@ -149,33 +136,17 @@ bytecodes: [
B(ResumeGenerator), R(0), R(0), U8(5),
B(Star), R(5),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
B(Ldar), R(5),
/* 22 E> */ B(Throw),
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(5), R(2),
- B(Jump), U8(85),
- B(LdaUndefined),
- B(Star), R(6),
- B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(5), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(5), U8(2),
- B(ResumeGenerator), R(0), R(0), U8(5),
- B(Star), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(6),
- B(LdaZero),
- B(TestReferenceEqual), R(6),
- B(JumpIfTrue), U8(5),
+ B(Jump), U8(53),
B(Ldar), R(5),
- B(ReThrow),
- B(LdaSmi), I8(1),
- B(Star), R(1),
- B(Mov), R(5), R(2),
- B(Jump), U8(41),
+ B(Jump), U8(36),
B(Star), R(5),
- B(CreateCatchContext), R(5), U8(7),
+ B(CreateCatchContext), R(5), U8(6),
B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
@@ -189,6 +160,10 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(1),
+ B(Jump), U8(15),
+ B(LdaSmi), I8(-1),
+ B(Star), R(2),
+ B(Star), R(1),
B(Jump), U8(7),
B(Star), R(2),
B(LdaZero),
@@ -200,7 +175,7 @@ bytecodes: [
B(Ldar), R(3),
B(SetPendingMessage),
B(Ldar), R(1),
- B(SwitchOnSmiNoFeedback), U8(8), U8(3), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(3), I8(0),
B(Jump), U8(22),
B(Ldar), R(2),
B(ReThrow),
@@ -218,7 +193,6 @@ bytecodes: [
constant pool: [
Smi [30],
Smi [75],
- Smi [116],
Smi [16],
Smi [7],
Smi [16],
@@ -229,8 +203,8 @@ constant pool: [
Smi [23],
]
handlers: [
- [20, 179, 179],
- [23, 145, 145],
+ [20, 139, 147],
+ [23, 101, 105],
]
---
@@ -240,9 +214,9 @@ snippet: "
"
frame size: 20
parameter count: 1
-bytecode array length: 406
+bytecode array length: 372
bytecodes: [
- B(SwitchOnGeneratorState), R(0), U8(0), U8(3),
+ B(SwitchOnGeneratorState), R(0), U8(0), U8(2),
B(Mov), R(closure), R(4),
B(Mov), R(this), R(5),
B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(4), U8(2),
@@ -255,22 +229,22 @@ bytecodes: [
B(ResumeGenerator), R(0), R(0), U8(8),
B(Star), R(8),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(3), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
B(Ldar), R(8),
/* 17 E> */ B(Throw),
B(LdaSmi), I8(1),
B(Star), R(4),
B(Mov), R(8), R(5),
- B(JumpConstant), U8(17),
- /* 36 S> */ B(CreateArrayLiteral), U8(5), U8(0), U8(37),
+ B(JumpConstant), U8(16),
+ /* 36 S> */ B(CreateArrayLiteral), U8(4), U8(0), U8(37),
B(Star), R(10),
- B(LdaNamedProperty), R(10), U8(6), U8(1),
+ B(LdaNamedProperty), R(10), U8(5), U8(1),
B(Star), R(11),
B(CallProperty0), R(11), R(10), U8(3),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(9),
- B(LdaNamedProperty), R(9), U8(7), U8(5),
+ B(LdaNamedProperty), R(9), U8(6), U8(5),
B(Star), R(8),
B(LdaFalse),
B(Star), R(12),
@@ -281,9 +255,9 @@ bytecodes: [
B(Star), R(16),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
- B(LdaNamedProperty), R(16), U8(8), U8(9),
+ B(LdaNamedProperty), R(16), U8(7), U8(9),
B(JumpIfToBooleanTrue), U8(67),
- B(LdaNamedProperty), R(16), U8(9), U8(11),
+ B(LdaNamedProperty), R(16), U8(8), U8(11),
B(Star), R(16),
B(LdaFalse),
B(Star), R(12),
@@ -299,7 +273,7 @@ bytecodes: [
B(ResumeGenerator), R(0), R(0), U8(17),
B(Star), R(17),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(9), U8(2), I8(0),
B(Ldar), R(17),
/* 42 E> */ B(Throw),
B(LdaSmi), I8(1),
@@ -320,15 +294,15 @@ bytecodes: [
B(Star), R(15),
B(Ldar), R(12),
B(JumpIfToBooleanTrue), U8(60),
- B(LdaNamedProperty), R(9), U8(12), U8(13),
+ B(LdaNamedProperty), R(9), U8(11), U8(13),
B(Star), R(17),
B(JumpIfUndefined), U8(52),
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(18),
- B(LdaConstant), U8(13),
+ B(LdaConstant), U8(12),
B(Star), R(19),
B(CallRuntime), U16(Runtime::kNewTypeError), R(18), U8(2),
B(Throw),
@@ -347,34 +321,17 @@ bytecodes: [
B(Ldar), R(15),
B(SetPendingMessage),
B(Ldar), R(13),
- B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
B(Jump), U8(14),
B(Ldar), R(14),
B(ReThrow),
B(LdaSmi), I8(1),
B(Star), R(4),
B(Mov), R(14), R(5),
- B(Jump), U8(85),
- B(LdaUndefined),
- B(Star), R(9),
- B(Mov), R(0), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(8), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(8), U8(2),
- B(ResumeGenerator), R(0), R(0), U8(8),
+ B(Jump), U8(51),
+ B(Jump), U8(36),
B(Star), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(9),
- B(LdaZero),
- B(TestReferenceEqual), R(9),
- B(JumpIfTrue), U8(5),
- B(Ldar), R(8),
- B(ReThrow),
- B(LdaSmi), I8(1),
- B(Star), R(4),
- B(Mov), R(8), R(5),
- B(Jump), U8(41),
- B(Star), R(8),
- B(CreateCatchContext), R(8), U8(16),
+ B(CreateCatchContext), R(8), U8(15),
B(Star), R(7),
B(LdaTheHole),
B(SetPendingMessage),
@@ -388,6 +345,10 @@ bytecodes: [
B(Star), R(5),
B(LdaSmi), I8(2),
B(Star), R(4),
+ B(Jump), U8(15),
+ B(LdaSmi), I8(-1),
+ B(Star), R(5),
+ B(Star), R(4),
B(Jump), U8(7),
B(Star), R(5),
B(LdaZero),
@@ -399,7 +360,7 @@ bytecodes: [
B(Ldar), R(6),
B(SetPendingMessage),
B(Ldar), R(4),
- B(SwitchOnSmiNoFeedback), U8(18), U8(3), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(17), U8(3), I8(0),
B(Jump), U8(22),
B(Ldar), R(5),
B(ReThrow),
@@ -417,7 +378,6 @@ bytecodes: [
constant pool: [
Smi [30],
Smi [149],
- Smi [297],
Smi [16],
Smi [7],
ARRAY_BOILERPLATE_DESCRIPTION_TYPE,
@@ -432,14 +392,14 @@ constant pool: [
Smi [6],
Smi [9],
SCOPE_INFO_TYPE,
- Smi [311],
+ Smi [277],
Smi [6],
Smi [9],
Smi [23],
]
handlers: [
- [20, 360, 360],
- [23, 326, 326],
+ [20, 318, 326],
+ [23, 282, 284],
[93, 180, 188],
[234, 247, 249],
]
@@ -450,9 +410,9 @@ snippet: "
async function* f() { yield* g() }
f();
"
-frame size: 17
+frame size: 19
parameter count: 1
-bytecode array length: 472
+bytecode array length: 475
bytecodes: [
B(SwitchOnGeneratorState), R(0), U8(0), U8(5),
B(Mov), R(closure), R(1),
@@ -500,78 +460,91 @@ bytecodes: [
B(Ldar), R(6),
B(SwitchOnSmiNoFeedback), U8(11), U8(2), I8(1),
B(CallProperty1), R(9), R(7), R(8), U8(14),
- B(Jump), U8(111),
+ B(Jump), U8(146),
B(LdaNamedProperty), R(7), U8(13), U8(16),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
B(Star), R(12),
B(CallProperty1), R(12), R(7), R(8), U8(18),
- B(Jump), U8(94),
+ B(Jump), U8(129),
+ B(Mov), R(0), R(12),
+ B(Mov), R(8), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
+ /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(1),
+ B(ResumeGenerator), R(0), R(0), U8(12),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
+ B(Star), R(13),
+ B(LdaZero),
+ B(TestReferenceEqual), R(13),
+ B(JumpIfTrue), U8(5),
+ B(Ldar), R(12),
+ B(ReThrow),
B(LdaSmi), I8(1),
B(Star), R(1),
- B(Mov), R(8), R(2),
- B(JumpConstant), U8(19),
+ B(Mov), R(12), R(2),
+ B(Jump), U8(245),
B(LdaNamedProperty), R(7), U8(14), U8(20),
B(JumpIfUndefined), U8(13),
B(JumpIfNull), U8(11),
- B(Star), R(12),
- B(CallProperty1), R(12), R(7), R(8), U8(22),
+ B(Star), R(14),
+ B(CallProperty1), R(14), R(7), R(8), U8(22),
B(Jump), U8(68),
B(LdaNamedProperty), R(7), U8(13), U8(24),
B(JumpIfUndefined), U8(57),
B(JumpIfNull), U8(55),
- B(Star), R(12),
- B(CallProperty0), R(12), R(7), U8(26),
+ B(Star), R(14),
+ B(CallProperty0), R(14), R(7), U8(26),
B(Jump), U8(2),
- B(Star), R(13),
- B(Mov), R(0), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
- /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(1),
- B(ResumeGenerator), R(0), R(0), U8(12),
- B(Star), R(12),
+ B(Star), R(15),
+ B(Mov), R(0), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(14), U8(2),
+ /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(2),
+ B(ResumeGenerator), R(0), R(0), U8(14),
+ B(Star), R(14),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(13),
+ B(Star), R(15),
B(LdaZero),
- B(TestReferenceEqual), R(13),
+ B(TestReferenceEqual), R(15),
B(JumpIfTrue), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(14),
B(ReThrow),
- B(Ldar), R(12),
+ B(Ldar), R(14),
B(JumpIfJSReceiver), U8(9),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(14), U8(1),
+ B(Star), R(16),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(16), U8(1),
B(CallRuntime), U16(Runtime::kThrowThrowMethodMissing), R(0), U8(0),
- B(Star), R(13),
- B(Mov), R(0), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(12), U8(2),
- /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(12), U8(2),
- B(ResumeGenerator), R(0), R(0), U8(12),
- B(Star), R(12),
+ B(Star), R(15),
+ B(Mov), R(0), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(14), U8(2),
+ /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(3),
+ B(ResumeGenerator), R(0), R(0), U8(14),
+ B(Star), R(14),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(13),
+ B(Star), R(15),
B(LdaZero),
- B(TestReferenceEqual), R(13),
+ B(TestReferenceEqual), R(15),
B(JumpIfTrue), U8(5),
- B(Ldar), R(12),
+ B(Ldar), R(14),
B(ReThrow),
- B(Ldar), R(12),
- B(Mov), R(12), R(5),
+ B(Ldar), R(14),
+ B(Mov), R(14), R(5),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
B(LdaNamedProperty), R(5), U8(15), U8(28),
B(JumpIfToBooleanTrue), U8(38),
B(LdaNamedProperty), R(5), U8(16), U8(30),
- B(Star), R(15),
+ B(Star), R(17),
B(LdaFalse),
- B(Star), R(16),
- B(Mov), R(0), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(14), U8(3),
- /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(14), U8(3),
- B(ResumeGenerator), R(0), R(0), U8(14),
+ B(Star), R(18),
+ B(Mov), R(0), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorYield), R(16), U8(3),
+ /* 49 E> */ B(SuspendGenerator), R(0), R(0), U8(16), U8(4),
+ B(ResumeGenerator), R(0), R(0), U8(16),
B(Star), R(8),
B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
B(Star), R(6),
- B(JumpLoop), U8(207), I8(0),
+ B(JumpLoop), U8(242), I8(0),
B(LdaNamedProperty), R(5), U8(16), U8(32),
B(Star), R(7),
B(LdaSmi), I8(1),
@@ -580,25 +553,9 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(1),
B(Mov), R(7), R(2),
- B(Jump), U8(85),
- B(LdaUndefined),
- B(Star), R(6),
- B(Mov), R(0), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_AsyncGeneratorAwaitUncaught), R(5), U8(2),
- B(SuspendGenerator), R(0), R(0), U8(5), U8(4),
- B(ResumeGenerator), R(0), R(0), U8(5),
- B(Star), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(0), U8(1),
- B(Star), R(6),
- B(LdaZero),
- B(TestReferenceEqual), R(6),
- B(JumpIfTrue), U8(5),
- B(Ldar), R(5),
- B(ReThrow),
- B(LdaSmi), I8(1),
- B(Star), R(1),
- B(Mov), R(5), R(2),
- B(Jump), U8(41),
+ B(Jump), U8(53),
+ B(Ldar), R(7),
+ B(Jump), U8(36),
B(Star), R(5),
B(CreateCatchContext), R(5), U8(17),
B(Star), R(4),
@@ -614,6 +571,10 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(1),
+ B(Jump), U8(15),
+ B(LdaSmi), I8(-1),
+ B(Star), R(2),
+ B(Star), R(1),
B(Jump), U8(7),
B(Star), R(2),
B(LdaZero),
@@ -625,7 +586,7 @@ bytecodes: [
B(Ldar), R(3),
B(SetPendingMessage),
B(Ldar), R(1),
- B(SwitchOnSmiNoFeedback), U8(20), U8(3), I8(0),
+ B(SwitchOnSmiNoFeedback), U8(19), U8(3), I8(0),
B(Jump), U8(22),
B(Ldar), R(2),
B(ReThrow),
@@ -642,10 +603,10 @@ bytecodes: [
]
constant pool: [
Smi [30],
- Smi [203],
- Smi [253],
- Smi [312],
- Smi [363],
+ Smi [162],
+ Smi [238],
+ Smi [288],
+ Smi [347],
Smi [16],
Smi [7],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["g"],
@@ -653,20 +614,19 @@ constant pool: [
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
Smi [11],
- Smi [37],
+ Smi [72],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["throw"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
SCOPE_INFO_TYPE,
- Smi [377],
- Smi [277],
+ Smi [380],
Smi [6],
Smi [9],
Smi [23],
]
handlers: [
- [20, 426, 426],
- [23, 392, 392],
+ [20, 421, 429],
+ [23, 383, 387],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
index f4a7c340c4..43515711db 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DestructuringAssignment.golden
@@ -66,7 +66,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(13),
B(LdaConstant), U8(6),
B(Star), R(14),
@@ -203,7 +203,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(14),
B(LdaConstant), U8(6),
B(Star), R(15),
@@ -327,7 +327,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(15),
B(LdaConstant), U8(7),
B(Star), R(16),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 6fe59da400..82d51820bb 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -94,7 +94,7 @@ bytecodes: [
B(JumpIfNull), U8(86),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(17),
B(LdaConstant), U8(9),
B(Star), R(18),
@@ -263,7 +263,7 @@ bytecodes: [
B(JumpIfNull), U8(86),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(17),
B(LdaConstant), U8(9),
B(Star), R(18),
@@ -448,7 +448,7 @@ bytecodes: [
B(JumpIfNull), U8(86),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(17),
B(LdaConstant), U8(9),
B(Star), R(18),
@@ -600,7 +600,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(14),
B(LdaConstant), U8(8),
B(Star), R(15),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 571002d16e..dffa8f577b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -62,7 +62,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(12),
B(LdaConstant), U8(6),
B(Star), R(13),
@@ -165,7 +165,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(13),
B(LdaConstant), U8(6),
B(Star), R(14),
@@ -278,7 +278,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(12),
B(LdaConstant), U8(6),
B(Star), R(13),
@@ -384,7 +384,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(12),
B(LdaConstant), U8(8),
B(Star), R(13),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
index f85f09c1f7..1752a3124e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -66,7 +66,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(14),
B(LdaConstant), U8(5),
B(Star), R(15),
@@ -203,7 +203,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(15),
B(LdaConstant), U8(10),
B(Star), R(16),
@@ -318,7 +318,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(13),
B(LdaConstant), U8(7),
B(Star), R(14),
@@ -430,7 +430,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(16),
B(LdaConstant), U8(7),
B(Star), R(17),
@@ -547,7 +547,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(15),
B(LdaConstant), U8(8),
B(Star), R(16),
@@ -679,7 +679,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(14),
B(LdaConstant), U8(11),
B(Star), R(15),
@@ -795,7 +795,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(16),
B(LdaConstant), U8(5),
B(Star), R(17),
@@ -935,7 +935,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(15),
B(LdaConstant), U8(6),
B(Star), R(16),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index 77b1924c73..996c15d2af 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -180,7 +180,7 @@ bytecodes: [
B(JumpIfNull), U8(50),
B(TestTypeOf), U8(6),
B(JumpIfTrue), U8(18),
- B(Wide), B(LdaSmi), I16(155),
+ B(Wide), B(LdaSmi), I16(158),
B(Star), R(14),
B(LdaConstant), U8(12),
B(Star), R(15),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
index c7f5101dd7..dbe688f814 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateClassFields.golden
@@ -4,7 +4,6 @@
---
wrap: yes
-private fields: yes
---
snippet: "
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden
new file mode 100644
index 0000000000..e783d81376
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethods.golden
@@ -0,0 +1,139 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: yes
+private methods: yes
+
+---
+snippet: "
+ {
+ class A {
+ #a() { return 1; }
+ }
+
+ new A;
+ }
+"
+frame size: 7
+parameter count: 1
+bytecode array length: 62
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(2),
+ B(LdaTheHole),
+ B(Star), R(6),
+ B(CreateClosure), U8(2), U8(0), U8(2),
+ B(Star), R(3),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(3),
+ B(Star), R(4),
+ B(Mov), R(5), R(1),
+ B(LdaConstant), U8(4),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(5), U8(1),
+ B(StaCurrentContextSlot), U8(5),
+ B(PopContext), R(2),
+ B(Mov), R(1), R(0),
+ /* 78 S> */ B(Ldar), R(0),
+ /* 78 E> */ B(Construct), R(0), R(0), U8(0), U8(0),
+ B(LdaUndefined),
+ /* 87 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["A"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ {
+ class D {
+ #d() {}
+ }
+
+ class E extends D {
+ #e() {}
+ }
+
+ new D;
+ new E;
+ }
+"
+frame size: 9
+parameter count: 1
+bytecode array length: 121
+bytecodes: [
+ /* 30 E> */ B(StackCheck),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(4),
+ B(LdaTheHole),
+ B(Star), R(8),
+ B(CreateClosure), U8(2), U8(0), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(1),
+ B(Star), R(6),
+ B(CreateClosure), U8(3), U8(1), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(5), R(7),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+ B(Star), R(6),
+ B(Mov), R(7), R(3),
+ B(LdaConstant), U8(4),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+ B(StaCurrentContextSlot), U8(5),
+ B(PopContext), R(4),
+ B(Mov), R(3), R(0),
+ /* 38 E> */ B(CreateBlockContext), U8(5),
+ B(PushContext), R(4),
+ /* 83 E> */ B(CreateClosure), U8(7), U8(2), U8(2),
+ B(Star), R(5),
+ B(LdaConstant), U8(6),
+ B(Star), R(6),
+ B(CreateClosure), U8(8), U8(3), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(5), R(7),
+ B(Mov), R(3), R(8),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(6), U8(3),
+ B(Star), R(6),
+ B(Mov), R(7), R(2),
+ B(LdaConstant), U8(9),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kCreatePrivateNameSymbol), R(7), U8(1),
+ B(StaCurrentContextSlot), U8(5),
+ B(PopContext), R(4),
+ B(Mov), R(2), R(1),
+ /* 106 S> */ B(Ldar), R(3),
+ /* 106 E> */ B(Construct), R(3), R(0), U8(0), U8(0),
+ /* 115 S> */ B(Ldar), R(2),
+ /* 115 E> */ B(Construct), R(2), R(0), U8(0), U8(2),
+ B(LdaUndefined),
+ /* 124 S> */ B(Return),
+]
+constant pool: [
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["D"],
+ SCOPE_INFO_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["E"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
index 6671c1fb06..c91e7b06aa 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PublicClassFields.golden
@@ -4,7 +4,6 @@
---
wrap: yes
-public fields: yes
---
snippet: "
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
index 752e4cdc6f..f03337e4aa 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticClassFields.golden
@@ -4,8 +4,6 @@
---
wrap: yes
-public fields: yes
-static fields: yes
---
snippet: "
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 245d9d9afd..af18097284 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -43,10 +43,7 @@ class ProgramOptions final {
print_callee_(false),
oneshot_opt_(false),
async_iteration_(false),
- public_fields_(false),
- private_fields_(false),
private_methods_(false),
- static_fields_(false),
verbose_(false) {}
bool Validate() const;
@@ -67,10 +64,7 @@ class ProgramOptions final {
bool print_callee() const { return print_callee_; }
bool oneshot_opt() const { return oneshot_opt_; }
bool async_iteration() const { return async_iteration_; }
- bool public_fields() const { return public_fields_; }
- bool private_fields() const { return private_fields_; }
bool private_methods() const { return private_methods_; }
- bool static_fields() const { return static_fields_; }
bool verbose() const { return verbose_; }
bool suppress_runtime_errors() const { return rebaseline_ && !verbose_; }
std::vector<std::string> input_filenames() const { return input_filenames_; }
@@ -89,10 +83,7 @@ class ProgramOptions final {
bool print_callee_;
bool oneshot_opt_;
bool async_iteration_;
- bool public_fields_;
- bool private_fields_;
bool private_methods_;
- bool static_fields_;
bool verbose_;
std::vector<std::string> input_filenames_;
std::string output_filename_;
@@ -134,8 +125,8 @@ bool CollectGoldenFiles(std::vector<std::string>* golden_file_list,
if (!directory) return false;
auto str_ends_with = [](const char* string, const char* suffix) {
- int string_size = i::StrLength(string);
- int suffix_size = i::StrLength(suffix);
+ size_t string_size = strlen(string);
+ size_t suffix_size = strlen(suffix);
if (string_size < suffix_size) return false;
return strcmp(string + (string_size - suffix_size), suffix) == 0;
@@ -195,14 +186,8 @@ ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
options.oneshot_opt_ = false;
} else if (strcmp(argv[i], "--async-iteration") == 0) {
options.async_iteration_ = true;
- } else if (strcmp(argv[i], "--public-fields") == 0) {
- options.public_fields_ = true;
- } else if (strcmp(argv[i], "--private-fields") == 0) {
- options.private_fields_ = true;
} else if (strcmp(argv[i], "--private-methods") == 0) {
options.private_methods_ = true;
- } else if (strcmp(argv[i], "--static-fields") == 0) {
- options.static_fields_ = true;
} else if (strcmp(argv[i], "--verbose") == 0) {
options.verbose_ = true;
} else if (strncmp(argv[i], "--output=", 9) == 0) {
@@ -312,21 +297,14 @@ void ProgramOptions::UpdateFromHeader(std::istream& stream) {
oneshot_opt_ = ParseBoolean(line.c_str() + strlen(kOneshotOpt));
} else if (line.compare(0, 17, "async iteration: ") == 0) {
async_iteration_ = ParseBoolean(line.c_str() + 17);
- } else if (line.compare(0, 15, "public fields: ") == 0) {
- public_fields_ = ParseBoolean(line.c_str() + 15);
- } else if (line.compare(0, 16, "private fields: ") == 0) {
- private_fields_ = ParseBoolean(line.c_str() + 16);
} else if (line.compare(0, 16, "private methods: ") == 0) {
private_methods_ = ParseBoolean(line.c_str() + 16);
- } else if (line.compare(0, 15, "static fields: ") == 0) {
- static_fields_ = ParseBoolean(line.c_str() + 15);
} else if (line == "---") {
break;
} else if (line.empty()) {
continue;
} else {
UNREACHABLE();
- return;
}
}
}
@@ -344,10 +322,7 @@ void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
if (print_callee_) stream << "\nprint callee: yes";
if (oneshot_opt_) stream << "\noneshot opt: yes";
if (async_iteration_) stream << "\nasync iteration: yes";
- if (public_fields_) stream << "\npublic fields: yes";
- if (private_fields_) stream << "\nprivate fields: yes";
if (private_methods_) stream << "\nprivate methods: yes";
- if (static_fields_) stream << "\nstatic fields: yes";
stream << "\n\n";
}
@@ -456,10 +431,7 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
printer.set_test_function_name(options.test_function_name());
}
- if (options.public_fields()) i::FLAG_harmony_public_fields = true;
- if (options.private_fields()) i::FLAG_harmony_private_fields = true;
if (options.private_methods()) i::FLAG_harmony_private_methods = true;
- if (options.static_fields()) i::FLAG_harmony_static_fields = true;
stream << "#\n# Autogenerated by generate-bytecode-expectations.\n#\n\n";
options.PrintHeader(stream);
@@ -467,10 +439,7 @@ void GenerateExpectationsFile(std::ostream& stream, // NOLINT
printer.PrintExpectation(stream, snippet);
}
- i::FLAG_harmony_public_fields = false;
- i::FLAG_harmony_private_fields = false;
i::FLAG_harmony_private_methods = false;
- i::FLAG_harmony_static_fields = false;
}
bool WriteExpectationsFile(const std::vector<std::string>& snippet_list,
@@ -519,10 +488,7 @@ void PrintUsage(const char* exec_path) {
" --test-function-name=foo "
"Specify the name of the test function.\n"
" --top-level Process top level code, not the top-level function.\n"
- " --public-fields Enable harmony_public_fields flag.\n"
- " --private-fields Enable harmony_private_fields flag.\n"
" --private-methods Enable harmony_private_methods flag.\n"
- " --static-fields Enable harmony_static_fields flag.\n"
" --output=file.name\n"
" Specify the output file. If not specified, output goes to "
"stdout.\n"
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.cc b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
index c66c1a279b..6246dde025 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.cc
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.cc
@@ -4,9 +4,9 @@
#include "test/cctest/interpreter/interpreter-tester.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/heap/heap-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -31,10 +31,8 @@ InterpreterTester::InterpreterTester(
InterpreterTester::InterpreterTester(
Isolate* isolate, Handle<BytecodeArray> bytecode,
MaybeHandle<FeedbackMetadata> feedback_metadata, const char* filter)
- : InterpreterTester(
- isolate, nullptr, bytecode,
- FLAG_lite_mode ? MaybeHandle<FeedbackMetadata>() : feedback_metadata,
- filter) {}
+ : InterpreterTester(isolate, nullptr, bytecode, feedback_metadata, filter) {
+}
InterpreterTester::InterpreterTester(Isolate* isolate, const char* source,
const char* filter)
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index a768908998..fbc2666c10 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -5,11 +5,11 @@
#ifndef V8_TEST_CCTEST_INTERPRETER_INTERPRETER_TESTER_H_
#define V8_TEST_CCTEST_INTERPRETER_INTERPRETER_TESTER_H_
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api.h"
-#include "src/execution.h"
-#include "src/handles.h"
+#include "src/api/api.h"
+#include "src/execution/execution.h"
+#include "src/handles/handles.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/feedback-cell.h"
@@ -122,13 +122,13 @@ class InterpreterTester {
}
if (!bytecode_.is_null()) {
- function->shared()->set_function_data(*bytecode_.ToHandleChecked());
+ function->shared().set_function_data(*bytecode_.ToHandleChecked());
}
if (HasFeedbackMetadata()) {
function->set_raw_feedback_cell(isolate_->heap()->many_closures_cell());
// Set the raw feedback metadata to circumvent checks that we are not
// overwriting existing metadata.
- function->shared()->set_raw_outer_scope_info_or_feedback_metadata(
+ function->shared().set_raw_outer_scope_info_or_feedback_metadata(
*feedback_metadata_.ToHandleChecked());
JSFunction::EnsureFeedbackVector(function);
}
diff --git a/deps/v8/test/cctest/interpreter/source-position-matcher.cc b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
index 2fcc292b1c..07fde890e8 100644
--- a/deps/v8/test/cctest/interpreter/source-position-matcher.cc
+++ b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
@@ -4,8 +4,8 @@
#include "test/cctest/interpreter/source-position-matcher.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/interpreter/source-position-matcher.h b/deps/v8/test/cctest/interpreter/source-position-matcher.h
index 7cc49cc20c..dc7a4ca450 100644
--- a/deps/v8/test/cctest/interpreter/source-position-matcher.h
+++ b/deps/v8/test/cctest/interpreter/source-position-matcher.h
@@ -5,10 +5,10 @@
#ifndef TEST_CCTEST_INTERPRETER_SOURCE_POSITION_COMPARER_H_
#define TEST_CCTEST_INTERPRETER_SOURCE_POSITION_COMPARER_H_
+#include "src/codegen/source-position-table.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects.h"
-#include "src/source-position-table.h"
-#include "src/v8.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 168dabd8dc..3a4d089786 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -4,12 +4,12 @@
#include <fstream>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/interpreter/bytecode-expectations-printer.h"
#include "test/cctest/test-feedback-vector.h"
@@ -2650,8 +2650,6 @@ TEST(ClassAndSuperClass) {
}
TEST(PublicClassFields) {
- bool old_flag = i::FLAG_harmony_public_fields;
- i::FLAG_harmony_public_fields = true;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -2700,12 +2698,9 @@ TEST(PublicClassFields) {
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PublicClassFields.golden")));
- i::FLAG_harmony_public_fields = old_flag;
}
TEST(PrivateClassFields) {
- bool old_flag = i::FLAG_harmony_private_fields;
- i::FLAG_harmony_private_fields = true;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -2760,14 +2755,41 @@ TEST(PrivateClassFields) {
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("PrivateClassFields.golden")));
- i::FLAG_harmony_private_fields = old_flag;
+}
+
+TEST(PrivateMethods) {
+ bool old_methods_flag = i::FLAG_harmony_private_methods;
+ i::FLAG_harmony_private_methods = true;
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+
+ const char* snippets[] = {
+ "{\n"
+ " class A {\n"
+ " #a() { return 1; }\n"
+ " }\n"
+ "\n"
+ " new A;\n"
+ "}\n",
+
+ "{\n"
+ " class D {\n"
+ " #d() {}\n"
+ " }\n"
+ "\n"
+ " class E extends D {\n"
+ " #e() {}\n"
+ " }\n"
+ "\n"
+ " new D;\n"
+ " new E;\n"
+ "}\n"};
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("PrivateMethods.golden")));
+ i::FLAG_harmony_private_methods = old_methods_flag;
}
TEST(StaticClassFields) {
- bool old_flag = i::FLAG_harmony_public_fields;
- bool old_static_flag = i::FLAG_harmony_static_fields;
- i::FLAG_harmony_public_fields = true;
- i::FLAG_harmony_static_fields = true;
InitializedIgnitionHandleScope scope;
BytecodeExpectationsPrinter printer(CcTest::isolate());
@@ -2826,8 +2848,6 @@ TEST(StaticClassFields) {
CHECK(CompareTexts(BuildActual(printer, snippets),
LoadGolden("StaticClassFields.golden")));
- i::FLAG_harmony_public_fields = old_flag;
- i::FLAG_harmony_static_fields = old_static_flag;
}
TEST(Generators) {
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
index becc46ab9c..3e1c006f20 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter-intrinsics.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/heap/heap-inl.h"
#include "src/interpreter/interpreter-intrinsics.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/interpreter/interpreter-tester.h"
namespace v8 {
@@ -95,37 +95,6 @@ TEST(IsArray) {
CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
}
-TEST(IsTypedArray) {
- HandleAndZoneScope handles;
-
- InvokeIntrinsicHelper helper(handles.main_isolate(), handles.main_zone(),
- Runtime::kInlineIsTypedArray);
- Factory* factory = handles.main_isolate()->factory();
-
- CHECK_EQ(*factory->false_value(),
- *helper.Invoke(helper.NewObject("new Date()")));
- CHECK_EQ(*factory->false_value(),
- *helper.Invoke(helper.NewObject("(function() {})")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("([1])")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("({})")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("(/x/)")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Undefined()));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.Null()));
- CHECK_EQ(*factory->false_value(),
- *helper.Invoke(helper.NewObject("'string'")));
- CHECK_EQ(*factory->false_value(), *helper.Invoke(helper.NewObject("42")));
-
- CHECK_EQ(
- *factory->true_value(),
- *helper.Invoke(helper.NewObject("new Uint8Array(new ArrayBuffer(1));")));
- CHECK_EQ(
- *factory->true_value(),
- *helper.Invoke(helper.NewObject("new Uint16Array(new ArrayBuffer(2));")));
- CHECK_EQ(
- *factory->true_value(),
- *helper.Invoke(helper.NewObject("new Int32Array(new ArrayBuffer(4));")));
-}
-
TEST(IsSmi) {
HandleAndZoneScope handles;
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index ba247dde50..9b907588ef 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -4,22 +4,22 @@
#include <tuple>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/overflowing-math.h"
-#include "src/compiler.h"
-#include "src/execution.h"
-#include "src/handles.h"
-#include "src/hash-seed-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/execution/execution.h"
+#include "src/handles/handles.h"
#include "src/heap/heap-inl.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects-inl.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "test/cctest/cctest.h"
#include "test/cctest/interpreter/interpreter-tester.h"
@@ -176,7 +176,7 @@ TEST(InterpreterLoadLiteral) {
InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
- CHECK_EQ(i::HeapNumber::cast(*return_val)->value(), -2.1e19);
+ CHECK_EQ(i::HeapNumber::cast(*return_val).value(), -2.1e19);
}
// Strings.
@@ -195,7 +195,7 @@ TEST(InterpreterLoadLiteral) {
InterpreterTester tester(isolate, bytecode_array);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
- CHECK(i::String::cast(*return_val)->Equals(*raw_string->string()));
+ CHECK(i::String::cast(*return_val).Equals(*raw_string->string()));
}
}
@@ -424,7 +424,7 @@ TEST(InterpreterBinaryOpsBigInt) {
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBigInt());
if (tester.HasFeedbackMetadata()) {
- MaybeObject feedback = callable.vector()->Get(slot);
+ MaybeObject feedback = callable.vector().Get(slot);
CHECK(feedback->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kBigInt, feedback->ToSmi().value());
}
@@ -546,7 +546,7 @@ TEST(InterpreterStringAdd) {
CHECK(return_value->SameValue(*test_cases[i].expected_value));
if (tester.HasFeedbackMetadata()) {
- MaybeObject feedback = callable.vector()->Get(slot);
+ MaybeObject feedback = callable.vector().Get(slot);
CHECK(feedback->IsSmi());
CHECK_EQ(test_cases[i].expected_feedback, feedback->ToSmi().value());
}
@@ -609,7 +609,7 @@ TEST(InterpreterParameter8) {
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- typedef Handle<Object> H;
+ using H = Handle<Object>;
auto callable = tester.GetCallable<H, H, H, H, H, H, H, H>();
Handle<Smi> arg1 = Handle<Smi>(Smi::FromInt(1), handles.main_isolate());
@@ -628,8 +628,6 @@ TEST(InterpreterParameter8) {
}
TEST(InterpreterBinaryOpTypeFeedback) {
- if (FLAG_lite_mode) return;
-
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -754,7 +752,7 @@ TEST(InterpreterBinaryOpTypeFeedback) {
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
- MaybeObject feedback0 = callable.vector()->Get(slot0);
+ MaybeObject feedback0 = callable.vector().Get(slot0);
CHECK(feedback0->IsSmi());
CHECK_EQ(test_case.feedback, feedback0->ToSmi().value());
CHECK(Object::Equals(isolate, test_case.result, return_val).ToChecked());
@@ -762,8 +760,6 @@ TEST(InterpreterBinaryOpTypeFeedback) {
}
TEST(InterpreterBinaryOpSmiTypeFeedback) {
- if (FLAG_lite_mode) return;
-
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -862,7 +858,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
- MaybeObject feedback0 = callable.vector()->Get(slot0);
+ MaybeObject feedback0 = callable.vector().Get(slot0);
CHECK(feedback0->IsSmi());
CHECK_EQ(test_case.feedback, feedback0->ToSmi().value());
CHECK(Object::Equals(isolate, test_case.result, return_val).ToChecked());
@@ -870,8 +866,6 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
}
TEST(InterpreterUnaryOpFeedback) {
- if (FLAG_lite_mode) return;
-
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -925,7 +919,7 @@ TEST(InterpreterUnaryOpFeedback) {
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- typedef Handle<Object> H;
+ using H = Handle<Object>;
auto callable = tester.GetCallable<H, H, H, H, H>();
Handle<Object> return_val =
@@ -935,31 +929,29 @@ TEST(InterpreterUnaryOpFeedback) {
test_case.bigint_feedback_value, test_case.any_feedback_value)
.ToHandleChecked();
USE(return_val);
- MaybeObject feedback0 = callable.vector()->Get(slot0);
+ MaybeObject feedback0 = callable.vector().Get(slot0);
CHECK(feedback0->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kSignedSmall, feedback0->ToSmi().value());
- MaybeObject feedback1 = callable.vector()->Get(slot1);
+ MaybeObject feedback1 = callable.vector().Get(slot1);
CHECK(feedback1->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->ToSmi().value());
- MaybeObject feedback2 = callable.vector()->Get(slot2);
+ MaybeObject feedback2 = callable.vector().Get(slot2);
CHECK(feedback2->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kNumber, feedback2->ToSmi().value());
- MaybeObject feedback3 = callable.vector()->Get(slot3);
+ MaybeObject feedback3 = callable.vector().Get(slot3);
CHECK(feedback3->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kBigInt, feedback3->ToSmi().value());
- MaybeObject feedback4 = callable.vector()->Get(slot4);
+ MaybeObject feedback4 = callable.vector().Get(slot4);
CHECK(feedback4->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kAny, feedback4->ToSmi().value());
}
}
TEST(InterpreterBitwiseTypeFeedback) {
- if (FLAG_lite_mode) return;
-
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
@@ -987,7 +979,7 @@ TEST(InterpreterBitwiseTypeFeedback) {
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
InterpreterTester tester(isolate, bytecode_array, metadata);
- typedef Handle<Object> H;
+ using H = Handle<Object>;
auto callable = tester.GetCallable<H, H, H, H>();
Handle<Smi> arg1 = Handle<Smi>(Smi::FromInt(2), isolate);
@@ -998,15 +990,15 @@ TEST(InterpreterBitwiseTypeFeedback) {
Handle<Object> return_val =
callable(arg1, arg2, arg3, arg4).ToHandleChecked();
USE(return_val);
- MaybeObject feedback0 = callable.vector()->Get(slot0);
+ MaybeObject feedback0 = callable.vector().Get(slot0);
CHECK(feedback0->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kSignedSmall, feedback0->ToSmi().value());
- MaybeObject feedback1 = callable.vector()->Get(slot1);
+ MaybeObject feedback1 = callable.vector().Get(slot1);
CHECK(feedback1->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kNumber, feedback1->ToSmi().value());
- MaybeObject feedback2 = callable.vector()->Get(slot2);
+ MaybeObject feedback2 = callable.vector().Get(slot2);
CHECK(feedback2->IsSmi());
CHECK_EQ(BinaryOperationFeedback::kAny, feedback2->ToSmi().value());
}
@@ -1489,7 +1481,7 @@ TEST(InterpreterCall) {
Handle<Object> return_val = callable(object).ToHandleChecked();
Handle<i::String> expected =
factory->NewStringFromAsciiChecked("prefix_abcdefghij");
- CHECK(i::String::cast(*return_val)->Equals(*expected));
+ CHECK(i::String::cast(*return_val).Equals(*expected));
}
}
@@ -1831,7 +1823,7 @@ TEST(InterpreterSmiComparisons) {
CHECK_EQ(return_value->BooleanValue(isolate),
CompareC(comparison, inputs[i], inputs[j]));
if (tester.HasFeedbackMetadata()) {
- MaybeObject feedback = callable.vector()->Get(slot);
+ MaybeObject feedback = callable.vector().Get(slot);
CHECK(feedback->IsSmi());
CHECK_EQ(CompareOperationFeedback::kSignedSmall,
feedback->ToSmi().value());
@@ -1882,7 +1874,7 @@ TEST(InterpreterHeapNumberComparisons) {
CHECK_EQ(return_value->BooleanValue(isolate),
CompareC(comparison, inputs[i], inputs[j]));
if (tester.HasFeedbackMetadata()) {
- MaybeObject feedback = callable.vector()->Get(slot);
+ MaybeObject feedback = callable.vector().Get(slot);
CHECK(feedback->IsSmi());
CHECK_EQ(CompareOperationFeedback::kNumber,
feedback->ToSmi().value());
@@ -1927,7 +1919,7 @@ TEST(InterpreterBigIntComparisons) {
Handle<Object> return_value = callable().ToHandleChecked();
CHECK(return_value->IsBoolean());
if (tester.HasFeedbackMetadata()) {
- MaybeObject feedback = callable.vector()->Get(slot);
+ MaybeObject feedback = callable.vector().Get(slot);
CHECK(feedback->IsSmi());
CHECK_EQ(CompareOperationFeedback::kBigInt,
feedback->ToSmi().value());
@@ -1977,7 +1969,7 @@ TEST(InterpreterStringComparisons) {
CHECK_EQ(return_value->BooleanValue(isolate),
CompareC(comparison, inputs[i], inputs[j]));
if (tester.HasFeedbackMetadata()) {
- MaybeObject feedback = callable.vector()->Get(slot);
+ MaybeObject feedback = callable.vector().Get(slot);
CHECK(feedback->IsSmi());
int const expected_feedback =
Token::IsOrderedRelationalCompareOp(comparison)
@@ -2090,7 +2082,7 @@ TEST(InterpreterMixedComparisons) {
CHECK_EQ(return_value->BooleanValue(isolate),
CompareC(comparison, lhs, rhs, true));
if (tester.HasFeedbackMetadata()) {
- MaybeObject feedback = callable.vector()->Get(slot);
+ MaybeObject feedback = callable.vector().Get(slot);
CHECK(feedback->IsSmi());
// Comparison with a number and string collects kAny feedback.
CHECK_EQ(CompareOperationFeedback::kAny,
@@ -2173,7 +2165,7 @@ TEST(InterpreterStrictNotEqual) {
}
TEST(InterpreterCompareTypeOf) {
- typedef TestTypeOfFlags::LiteralFlag LiteralFlag;
+ using LiteralFlag = TestTypeOfFlags::LiteralFlag;
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
@@ -5030,14 +5022,14 @@ TEST(InterpreterWithNativeStack) {
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v8_compile(source_text));
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
- CHECK(f->shared()->HasBytecodeArray());
- i::Code code = f->shared()->GetCode();
+ CHECK(f->shared().HasBytecodeArray());
+ i::Code code = f->shared().GetCode();
i::Handle<i::Code> interpreter_entry_trampoline =
BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
- CHECK(code->IsCode());
- CHECK(code->is_interpreter_trampoline_builtin());
- CHECK_NE(code->address(), interpreter_entry_trampoline->address());
+ CHECK(code.IsCode());
+ CHECK(code.is_interpreter_trampoline_builtin());
+ CHECK_NE(code.address(), interpreter_entry_trampoline->address());
}
#endif // V8_TARGET_ARCH_ARM
@@ -5050,24 +5042,24 @@ TEST(InterpreterGetBytecodeHandler) {
Code wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kSingle);
- CHECK_EQ(wide_handler->builtin_index(), Builtins::kWideHandler);
+ CHECK_EQ(wide_handler.builtin_index(), Builtins::kWideHandler);
Code add_handler =
interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kSingle);
- CHECK_EQ(add_handler->builtin_index(), Builtins::kAddHandler);
+ CHECK_EQ(add_handler.builtin_index(), Builtins::kAddHandler);
// Test that double-width bytecode handlers deserializer correctly, including
// an illegal bytecode handler since there is no Wide.Wide handler.
Code wide_wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kWide, OperandScale::kDouble);
- CHECK_EQ(wide_wide_handler->builtin_index(), Builtins::kIllegalHandler);
+ CHECK_EQ(wide_wide_handler.builtin_index(), Builtins::kIllegalHandler);
Code add_wide_handler =
interpreter->GetBytecodeHandler(Bytecode::kAdd, OperandScale::kDouble);
- CHECK_EQ(add_wide_handler->builtin_index(), Builtins::kAddWideHandler);
+ CHECK_EQ(add_wide_handler.builtin_index(), Builtins::kAddWideHandler);
}
TEST(InterpreterCollectSourcePositions) {
@@ -5092,7 +5084,7 @@ TEST(InterpreterCollectSourcePositions) {
ByteArray source_position_table = bytecode_array->SourcePositionTable();
CHECK(bytecode_array->HasSourcePositionTable());
- CHECK_GT(source_position_table->length(), 0);
+ CHECK_GT(source_position_table.length(), 0);
}
TEST(InterpreterCollectSourcePositions_StackOverflow) {
@@ -5121,19 +5113,17 @@ TEST(InterpreterCollectSourcePositions_StackOverflow) {
// Stack overflowed so source position table can be returned but is empty.
ByteArray source_position_table = bytecode_array->SourcePositionTable();
CHECK(!bytecode_array->HasSourcePositionTable());
- CHECK_EQ(source_position_table->length(), 0);
+ CHECK_EQ(source_position_table.length(), 0);
// Reset the stack limit and try again.
isolate->stack_guard()->SetStackLimit(previous_limit);
Compiler::CollectSourcePositions(isolate, sfi);
source_position_table = bytecode_array->SourcePositionTable();
CHECK(bytecode_array->HasSourcePositionTable());
- CHECK_GT(source_position_table->length(), 0);
+ CHECK_GT(source_position_table.length(), 0);
}
-// TODO(v8:8510): When an exception is thrown, the top frame still has its
-// source positions collected. Re-enable this test when that is fixed.
-DISABLED_TEST(InterpreterCollectSourcePositions_ThrowFrom1stFrame) {
+TEST(InterpreterCollectSourcePositions_ThrowFrom1stFrame) {
FLAG_enable_lazy_source_positions = true;
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
@@ -5253,7 +5243,7 @@ TEST(InterpreterCollectSourcePositions_GenerateStackTrace) {
CHECK(bytecode_array->HasSourcePositionTable());
ByteArray source_position_table = bytecode_array->SourcePositionTable();
- CHECK_GT(source_position_table->length(), 0);
+ CHECK_GT(source_position_table.length(), 0);
}
} // namespace interpreter
diff --git a/deps/v8/test/cctest/interpreter/test-source-positions.cc b/deps/v8/test/cctest/interpreter/test-source-positions.cc
index ccdbd53558..ee9d338f01 100644
--- a/deps/v8/test/cctest/interpreter/test-source-positions.cc
+++ b/deps/v8/test/cctest/interpreter/test-source-positions.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/compiler/pipeline.h"
-#include "src/handles.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/interpreter/source-position-matcher.h"
@@ -172,7 +172,7 @@ Handle<BytecodeArray> OptimizedBytecodeSourcePositionTester::MakeBytecode(
.ToLocalChecked());
Handle<JSFunction> function =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*api_function));
- return handle(function->shared()->GetBytecodeArray(), isolate_);
+ return handle(function->shared().GetBytecodeArray(), isolate_);
}
void OptimizedBytecodeSourcePositionTester::SetOptimizationFlags(
diff --git a/deps/v8/test/cctest/libplatform/DEPS b/deps/v8/test/cctest/libplatform/DEPS
new file mode 100644
index 0000000000..b2bee408ab
--- /dev/null
+++ b/deps/v8/test/cctest/libplatform/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+perfetto",
+]
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index 7fca7db46a..0d6de10f2a 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -9,6 +9,12 @@
#include "src/tracing/trace-event.h"
#include "test/cctest/cctest.h"
+#ifdef V8_USE_PERFETTO
+#include "perfetto/trace/chrome/chrome_trace_event.pb.h"
+#include "perfetto/trace/chrome/chrome_trace_packet.pb.h"
+#include "src/libplatform/tracing/trace-event-listener.h"
+#endif
+
namespace v8 {
namespace platform {
namespace tracing {
@@ -68,7 +74,9 @@ class ConvertableToTraceFormatMock : public v8::ConvertableToTraceFormat {
class MockTraceWriter : public TraceWriter {
public:
void AppendTraceEvent(TraceObject* trace_event) override {
- events_.push_back(trace_event->name());
+ // TraceObject might not have been initialized.
+ const char* name = trace_event->name() ? trace_event->name() : "";
+ events_.push_back(name);
}
void Flush() override {}
@@ -142,6 +150,10 @@ void PopulateJSONWriter(TraceWriter* writer) {
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
tracing_controller->Initialize(ring_buffer);
+#ifdef V8_USE_PERFETTO
+ std::ostringstream sstream;
+ tracing_controller->InitializeForPerfetto(&sstream);
+#endif
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8-cat");
tracing_controller->StartTracing(trace_config);
@@ -209,6 +221,10 @@ TEST(TestTracingController) {
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
tracing_controller->Initialize(ring_buffer);
+#ifdef V8_USE_PERFETTO
+ std::ostringstream sstream;
+ tracing_controller->InitializeForPerfetto(&sstream);
+#endif
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8");
tracing_controller->StartTracing(trace_config);
@@ -241,7 +257,7 @@ void GetJSONStrings(std::vector<std::string>& ret, std::string str,
}
TEST(TestTracingControllerMultipleArgsAndCopy) {
- std::ostringstream stream;
+ std::ostringstream stream, perfetto_stream;
uint64_t aa = 11;
unsigned int bb = 22;
uint16_t cc = 33;
@@ -280,47 +296,52 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
tracing_controller->Initialize(ring_buffer);
+#ifdef V8_USE_PERFETTO
+ tracing_controller->InitializeForPerfetto(&perfetto_stream);
+#endif
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8");
tracing_controller->StartTracing(trace_config);
- TRACE_EVENT1("v8", "v8.Test.aa", "aa", aa);
- TRACE_EVENT1("v8", "v8.Test.bb", "bb", bb);
- TRACE_EVENT1("v8", "v8.Test.cc", "cc", cc);
- TRACE_EVENT1("v8", "v8.Test.dd", "dd", dd);
- TRACE_EVENT1("v8", "v8.Test.ee", "ee", ee);
- TRACE_EVENT1("v8", "v8.Test.ff", "ff", ff);
- TRACE_EVENT1("v8", "v8.Test.gg", "gg", gg);
- TRACE_EVENT1("v8", "v8.Test.hh", "hh", hh);
- TRACE_EVENT1("v8", "v8.Test.ii", "ii1", ii1);
- TRACE_EVENT1("v8", "v8.Test.ii", "ii2", ii2);
- TRACE_EVENT1("v8", "v8.Test.jj1", "jj1", jj1);
- TRACE_EVENT1("v8", "v8.Test.jj2", "jj2", jj2);
- TRACE_EVENT1("v8", "v8.Test.jj3", "jj3", jj3);
- TRACE_EVENT1("v8", "v8.Test.jj4", "jj4", jj4);
- TRACE_EVENT1("v8", "v8.Test.jj5", "jj5", jj5);
- TRACE_EVENT1("v8", "v8.Test.kk", "kk", kk);
- TRACE_EVENT1("v8", "v8.Test.ll", "ll", ll);
- TRACE_EVENT1("v8", "v8.Test.mm", "mm", TRACE_STR_COPY(mmm.c_str()));
-
- TRACE_EVENT2("v8", "v8.Test2.1", "aa", aa, "ll", ll);
- TRACE_EVENT2("v8", "v8.Test2.2", "mm1", TRACE_STR_COPY(mm.c_str()), "mm2",
- TRACE_STR_COPY(mmm.c_str()));
-
- // Check copies are correct.
- TRACE_EVENT_COPY_INSTANT0("v8", mm.c_str(), TRACE_EVENT_SCOPE_THREAD);
- TRACE_EVENT_COPY_INSTANT2("v8", mm.c_str(), TRACE_EVENT_SCOPE_THREAD, "mm1",
- mm.c_str(), "mm2", mmm.c_str());
- mm = "CHANGED";
- mmm = "CHANGED";
-
- TRACE_EVENT_INSTANT1("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
- new ConvertableToTraceFormatMock(42));
- std::unique_ptr<ConvertableToTraceFormatMock> trace_event_arg(
- new ConvertableToTraceFormatMock(42));
- TRACE_EVENT_INSTANT2("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
- std::move(trace_event_arg), "a2",
- new ConvertableToTraceFormatMock(123));
+ {
+ TRACE_EVENT1("v8", "v8.Test.aa", "aa", aa);
+ TRACE_EVENT1("v8", "v8.Test.bb", "bb", bb);
+ TRACE_EVENT1("v8", "v8.Test.cc", "cc", cc);
+ TRACE_EVENT1("v8", "v8.Test.dd", "dd", dd);
+ TRACE_EVENT1("v8", "v8.Test.ee", "ee", ee);
+ TRACE_EVENT1("v8", "v8.Test.ff", "ff", ff);
+ TRACE_EVENT1("v8", "v8.Test.gg", "gg", gg);
+ TRACE_EVENT1("v8", "v8.Test.hh", "hh", hh);
+ TRACE_EVENT1("v8", "v8.Test.ii", "ii1", ii1);
+ TRACE_EVENT1("v8", "v8.Test.ii", "ii2", ii2);
+ TRACE_EVENT1("v8", "v8.Test.jj1", "jj1", jj1);
+ TRACE_EVENT1("v8", "v8.Test.jj2", "jj2", jj2);
+ TRACE_EVENT1("v8", "v8.Test.jj3", "jj3", jj3);
+ TRACE_EVENT1("v8", "v8.Test.jj4", "jj4", jj4);
+ TRACE_EVENT1("v8", "v8.Test.jj5", "jj5", jj5);
+ TRACE_EVENT1("v8", "v8.Test.kk", "kk", kk);
+ TRACE_EVENT1("v8", "v8.Test.ll", "ll", ll);
+ TRACE_EVENT1("v8", "v8.Test.mm", "mm", TRACE_STR_COPY(mmm.c_str()));
+
+ TRACE_EVENT2("v8", "v8.Test2.1", "aa", aa, "ll", ll);
+ TRACE_EVENT2("v8", "v8.Test2.2", "mm1", TRACE_STR_COPY(mm.c_str()), "mm2",
+ TRACE_STR_COPY(mmm.c_str()));
+
+ // Check copies are correct.
+ TRACE_EVENT_COPY_INSTANT0("v8", mm.c_str(), TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_COPY_INSTANT2("v8", mm.c_str(), TRACE_EVENT_SCOPE_THREAD,
+ "mm1", mm.c_str(), "mm2", mmm.c_str());
+ mm = "CHANGED";
+ mmm = "CHANGED";
+
+ TRACE_EVENT_INSTANT1("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
+ new ConvertableToTraceFormatMock(42));
+ std::unique_ptr<ConvertableToTraceFormatMock> trace_event_arg(
+ new ConvertableToTraceFormatMock(42));
+ TRACE_EVENT_INSTANT2("v8", "v8.Test", TRACE_EVENT_SCOPE_THREAD, "a1",
+ std::move(trace_event_arg), "a2",
+ new ConvertableToTraceFormatMock(123));
+ }
tracing_controller->StopTracing();
@@ -395,6 +416,10 @@ TEST(TracingObservers) {
v8::platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(1,
writer);
tracing_controller->Initialize(ring_buffer);
+#ifdef V8_USE_PERFETTO
+ std::ostringstream sstream;
+ tracing_controller->InitializeForPerfetto(&sstream);
+#endif
v8::platform::tracing::TraceConfig* trace_config =
new v8::platform::tracing::TraceConfig();
trace_config->AddIncludedCategory("v8");
@@ -451,18 +476,21 @@ class TraceWritingThread : public base::Thread {
tracing_controller_(tracing_controller) {}
void Run() override {
- for (int i = 0; i < 1000; i++) {
+ running_.store(true);
+ while (running_.load()) {
TRACE_EVENT0("v8", "v8.Test");
tracing_controller_->AddTraceEvent('A', nullptr, "v8", "", 1, 1, 0,
nullptr, nullptr, nullptr, nullptr, 0);
tracing_controller_->AddTraceEventWithTimestamp('A', nullptr, "v8", "", 1,
1, 0, nullptr, nullptr,
nullptr, nullptr, 0, 0);
- base::OS::Sleep(base::TimeDelta::FromMilliseconds(1));
}
}
+ void Stop() { running_.store(false); }
+
private:
+ std::atomic_bool running_{false};
v8::platform::tracing::TracingController* tracing_controller_;
};
@@ -481,21 +509,181 @@ TEST(AddTraceEventMultiThreaded) {
TraceBuffer* ring_buffer =
TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
tracing_controller->Initialize(ring_buffer);
+#ifdef V8_USE_PERFETTO
+ std::ostringstream sstream;
+ tracing_controller->InitializeForPerfetto(&sstream);
+#endif
TraceConfig* trace_config = new TraceConfig();
trace_config->AddIncludedCategory("v8");
tracing_controller->StartTracing(trace_config);
TraceWritingThread thread(tracing_controller);
thread.StartSynchronously();
+ TRACE_EVENT0("v8", "v8.Test2");
+ TRACE_EVENT0("v8", "v8.Test2");
- base::OS::Sleep(base::TimeDelta::FromMilliseconds(100));
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(10));
tracing_controller->StopTracing();
+ thread.Stop();
thread.Join();
i::V8::SetPlatformForTesting(old_platform);
}
+#ifdef V8_USE_PERFETTO
+
+struct TraceEvent {
+ std::string name;
+ int64_t timestamp;
+ int32_t phase;
+ int32_t thread_id;
+ int64_t duration;
+ int64_t thread_duration;
+ std::string scope;
+ uint64_t id;
+ uint32_t flags;
+ std::string category_group_name;
+ int32_t process_id;
+ int64_t thread_timestamp;
+ uint64_t bind_id;
+};
+
+class TestListener : public TraceEventListener {
+ public:
+ void ProcessPacket(
+ const ::perfetto::protos::ChromeTracePacket& packet) override {
+ for (const ::perfetto::protos::ChromeTraceEvent& event :
+ packet.chrome_events().trace_events()) {
+ TraceEvent trace_event{event.name(), event.timestamp(),
+ event.phase(), event.thread_id(),
+ event.duration(), event.thread_duration(),
+ event.scope(), event.id(),
+ event.flags(), event.category_group_name(),
+ event.process_id(), event.thread_timestamp(),
+ event.bind_id()};
+ events_.push_back(trace_event);
+ }
+ }
+
+ TraceEvent* get_event(size_t index) { return &events_.at(index); }
+
+ size_t events_size() const { return events_.size(); }
+
+ private:
+ std::vector<TraceEvent> events_;
+};
+
+class TracingTestHarness {
+ public:
+ TracingTestHarness() {
+ old_platform_ = i::V8::GetCurrentPlatform();
+ default_platform_ = v8::platform::NewDefaultPlatform();
+ i::V8::SetPlatformForTesting(default_platform_.get());
+
+ auto tracing =
+ base::make_unique<v8::platform::tracing::TracingController>();
+ tracing_controller_ = tracing.get();
+ static_cast<v8::platform::DefaultPlatform*>(default_platform_.get())
+ ->SetTracingController(std::move(tracing));
+
+ MockTraceWriter* writer = new MockTraceWriter();
+ TraceBuffer* ring_buffer =
+ TraceBuffer::CreateTraceBufferRingBuffer(1, writer);
+ tracing_controller_->Initialize(ring_buffer);
+ tracing_controller_->InitializeForPerfetto(&perfetto_json_stream_);
+ tracing_controller_->SetTraceEventListenerForTesting(&listener_);
+ }
+
+ ~TracingTestHarness() { i::V8::SetPlatformForTesting(old_platform_); }
+
+ void StartTracing() {
+ TraceConfig* trace_config = new TraceConfig();
+ trace_config->AddIncludedCategory("v8");
+ tracing_controller_->StartTracing(trace_config);
+ }
+
+ void StopTracing() { tracing_controller_->StopTracing(); }
+
+ TraceEvent* get_event(size_t index) { return listener_.get_event(index); }
+ size_t events_size() const { return listener_.events_size(); }
+
+ std::string perfetto_json_stream() { return perfetto_json_stream_.str(); }
+
+ private:
+ std::unique_ptr<v8::Platform> default_platform_;
+ v8::Platform* old_platform_;
+ v8::platform::tracing::TracingController* tracing_controller_;
+ TestListener listener_;
+ std::ostringstream perfetto_json_stream_;
+};
+
+TEST(Perfetto) {
+ TracingTestHarness harness;
+ harness.StartTracing();
+
+ uint64_t uint64_arg = 1024;
+ const char* str_arg = "str_arg";
+
+ {
+ TRACE_EVENT0("v8", "test1");
+ TRACE_EVENT1("v8", "test2", "arg1", uint64_arg);
+ TRACE_EVENT2("v8", "test3", "arg1", uint64_arg, "arg2", str_arg);
+ }
+ TRACE_EVENT_INSTANT0("v8", "final event not captured",
+ TRACE_EVENT_SCOPE_THREAD);
+
+ harness.StopTracing();
+
+ TraceEvent* event = harness.get_event(0);
+ int32_t thread_id = event->thread_id;
+ int32_t process_id = event->process_id;
+ CHECK_EQ("test1", event->name);
+ CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase);
+ int64_t timestamp = event->timestamp;
+
+ event = harness.get_event(1);
+ CHECK_EQ("test2", event->name);
+ CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase);
+ CHECK_EQ(thread_id, event->thread_id);
+ CHECK_EQ(process_id, event->process_id);
+ CHECK_GE(event->timestamp, timestamp);
+ timestamp = event->timestamp;
+
+ event = harness.get_event(2);
+ CHECK_EQ("test3", event->name);
+ CHECK_EQ(TRACE_EVENT_PHASE_BEGIN, event->phase);
+ CHECK_EQ(thread_id, event->thread_id);
+ CHECK_EQ(process_id, event->process_id);
+ CHECK_GE(event->timestamp, timestamp);
+ timestamp = event->timestamp;
+
+ event = harness.get_event(3);
+ CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase);
+ CHECK_EQ(thread_id, event->thread_id);
+ CHECK_EQ(process_id, event->process_id);
+ CHECK_GE(event->timestamp, timestamp);
+ timestamp = event->timestamp;
+
+ event = harness.get_event(4);
+ CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase);
+ CHECK_EQ(thread_id, event->thread_id);
+ CHECK_EQ(process_id, event->process_id);
+ CHECK_GE(event->timestamp, timestamp);
+ timestamp = event->timestamp;
+
+ event = harness.get_event(5);
+ CHECK_EQ(TRACE_EVENT_PHASE_END, event->phase);
+ CHECK_EQ(thread_id, event->thread_id);
+ CHECK_EQ(process_id, event->process_id);
+ CHECK_GE(event->timestamp, timestamp);
+ timestamp = event->timestamp;
+
+ CHECK_EQ(6, harness.events_size());
+}
+
+#endif // V8_USE_PERFETTO
+
} // namespace tracing
} // namespace platform
} // namespace v8
diff --git a/deps/v8/test/cctest/parsing/test-parse-decision.cc b/deps/v8/test/cctest/parsing/test-parse-decision.cc
index e3b046baef..31c6bba9d1 100644
--- a/deps/v8/test/cctest/parsing/test-parse-decision.cc
+++ b/deps/v8/test/cctest/parsing/test-parse-decision.cc
@@ -10,12 +10,12 @@
#include <unordered_map>
#include "include/v8.h"
-#include "src/api-inl.h"
-#include "src/handles-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info-inl.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
#include "test/cctest/cctest.h"
@@ -32,12 +32,12 @@ void GetTopLevelFunctionInfo(
// The API object 'wraps' the compiled top-level function, not the i::Script.
Handle<JSFunction> toplevel_fn = v8::Utils::OpenHandle(*script);
SharedFunctionInfo::ScriptIterator iterator(
- toplevel_fn->GetIsolate(), Script::cast(toplevel_fn->shared()->script()));
+ toplevel_fn->GetIsolate(), Script::cast(toplevel_fn->shared().script()));
for (SharedFunctionInfo shared = iterator.Next(); !shared.is_null();
shared = iterator.Next()) {
- std::unique_ptr<char[]> name = String::cast(shared->Name())->ToCString();
- is_compiled->insert(std::make_pair(name.get(), shared->is_compiled()));
+ std::unique_ptr<char[]> name = String::cast(shared.Name()).ToCString();
+ is_compiled->insert(std::make_pair(name.get(), shared.is_compiled()));
}
}
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index 64dd802c64..0aae610654 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/ast.h"
-#include "src/compiler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/preparse-data-impl.h"
@@ -110,9 +110,6 @@ TEST(PreParserScopeAnalysis) {
Inner(const char* p, const char* s, SkipTests skip, Bailout bailout)
: params(p), source(s), skip(skip), bailout(bailout) {}
- Inner(const char* s, std::function<void()> p, std::function<void()> e)
- : source(s), prologue(p), epilogue(e) {}
-
const char* params = "";
const char* source;
SkipTests skip = DONT_SKIP;
@@ -659,33 +656,11 @@ TEST(PreParserScopeAnalysis) {
{"class MyClass extends MyBase { static m() { var var1; function foo() { "
"var1 = 11; } } }"},
- {"class X { ['bar'] = 1; }; new X;",
- [] { i::FLAG_harmony_public_fields = true; },
- [] { i::FLAG_harmony_public_fields = false; }},
- {"class X { static ['foo'] = 2; }; new X;",
- [] {
- i::FLAG_harmony_public_fields = true;
- i::FLAG_harmony_static_fields = true;
- },
- [] {
- i::FLAG_harmony_public_fields = false;
- i::FLAG_harmony_static_fields = false;
- }},
- {"class X { ['bar'] = 1; static ['foo'] = 2; }; new X;",
- [] {
- i::FLAG_harmony_public_fields = true;
- i::FLAG_harmony_static_fields = true;
- },
- [] {
- i::FLAG_harmony_public_fields = false;
- i::FLAG_harmony_static_fields = false;
- }},
- {"class X { #x = 1 }; new X;",
- [] { i::FLAG_harmony_private_fields = true; },
- [] { i::FLAG_harmony_private_fields = false; }},
- {"function t() { return class { #x = 1 }; } new t();",
- [] { i::FLAG_harmony_private_fields = true; },
- [] { i::FLAG_harmony_private_fields = false; }},
+ {"class X { ['bar'] = 1; }; new X;"},
+ {"class X { static ['foo'] = 2; }; new X;"},
+ {"class X { ['bar'] = 1; static ['foo'] = 2; }; new X;"},
+ {"class X { #x = 1 }; new X;"},
+ {"function t() { return class { #x = 1 }; } new t();"},
};
for (unsigned i = 0; i < arraysize(outers); ++i) {
@@ -705,20 +680,18 @@ TEST(PreParserScopeAnalysis) {
int source_len = Utf8LengthHelper(inner.source);
int len = code_len + params_len + source_len;
- if (inner.prologue != nullptr) inner.prologue();
-
i::ScopedVector<char> program(len + 1);
i::SNPrintF(program, code, inner.params, inner.source);
i::HandleScope scope(isolate);
i::Handle<i::String> source =
- factory->InternalizeUtf8String(program.start());
+ factory->InternalizeUtf8String(program.begin());
source->PrintOn(stdout);
printf("\n");
// Compile and run the script to get a pointer to the lazy function.
- v8::Local<v8::Value> v = CompileRun(program.start());
+ v8::Local<v8::Value> v = CompileRun(program.begin());
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Handle<i::SharedFunctionInfo> shared = i::handle(f->shared(), isolate);
@@ -731,7 +704,7 @@ TEST(PreParserScopeAnalysis) {
CHECK(shared->HasUncompiledDataWithPreparseData());
i::Handle<i::PreparseData> produced_data_on_heap(
- shared->uncompiled_data_with_preparse_data()->preparse_data(),
+ shared->uncompiled_data_with_preparse_data().preparse_data(),
isolate);
// Parse the lazy function using the scope data.
@@ -770,8 +743,6 @@ TEST(PreParserScopeAnalysis) {
i::ScopeTestHelper::CompareScopes(
scope_without_skipped_functions, scope_with_skipped_functions,
inner.precise_maybe_assigned == PreciseMaybeAssigned::YES);
-
- if (inner.epilogue != nullptr) inner.epilogue();
}
}
}
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index 92d24bd12a..39d95897d6 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/heap/factory-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/parsing/test-scanner.cc b/deps/v8/test/cctest/parsing/test-scanner.cc
index df1153793b..9451d61d89 100644
--- a/deps/v8/test/cctest/parsing/test-scanner.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner.cc
@@ -5,8 +5,8 @@
// Tests v8::internal::Scanner. Note that presently most unit tests for the
// Scanner are in cctest/test-parsing.cc, rather than here.
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/setup-isolate-for-tests.h b/deps/v8/test/cctest/setup-isolate-for-tests.h
index c026c04afd..f9335338a3 100644
--- a/deps/v8/test/cctest/setup-isolate-for-tests.h
+++ b/deps/v8/test/cctest/setup-isolate-for-tests.h
@@ -5,7 +5,7 @@
#ifndef V8_TEST_CCTEST_SETUP_ISOLATE_FOR_TESTS_H_
#define V8_TEST_CCTEST_SETUP_ISOLATE_FOR_TESTS_H_
-#include "src/setup-isolate.h"
+#include "src/init/setup-isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-accessor-assembler.cc b/deps/v8/test/cctest/test-accessor-assembler.cc
index 8096b82b90..20a2bc2d80 100644
--- a/deps/v8/test/cctest/test-accessor-assembler.cc
+++ b/deps/v8/test/cctest/test-accessor-assembler.cc
@@ -7,7 +7,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/ic/accessor-assembler.h"
#include "src/ic/stub-cache.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -118,7 +118,7 @@ Handle<Code> CreateCodeOfKind(Code::Kind kind) {
} // namespace
TEST(TryProbeStubCache) {
- typedef CodeStubAssembler::Label Label;
+ using Label = CodeStubAssembler::Label;
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 3;
CodeAssemblerTester data(isolate, kNumParams);
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index d769c1ebd7..e0c93501c2 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -27,11 +27,11 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
-#include "src/frames-inl.h"
-#include "src/string-stream.h"
+#include "src/api/api-inl.h"
+#include "src/execution/frames-inl.h"
+#include "src/strings/string-stream.h"
#include "test/cctest/cctest.h"
using ::v8::ObjectTemplate;
@@ -539,8 +539,8 @@ static void StackCheck(Local<String> name,
i::StackFrame* frame = iter.frame();
CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT));
i::Code code = frame->LookupCode();
- CHECK(code->IsCode());
- CHECK(code->contains(frame->pc()));
+ CHECK(code.IsCode());
+ CHECK(code.contains(frame->pc()));
iter.Advance();
}
}
@@ -801,7 +801,8 @@ TEST(PrototypeGetterAccessCheck) {
" x = obj.foo;"
" }"
" return x;"
- "}");
+ "};"
+ "%PrepareFunctionForOptimization(f);");
security_check_value = true;
ExpectInt32("f()", 907);
diff --git a/deps/v8/test/cctest/test-allocation.cc b/deps/v8/test/cctest/test-allocation.cc
index bde7ef5df6..e416c554ef 100644
--- a/deps/v8/test/cctest/test-allocation.cc
+++ b/deps/v8/test/cctest/test-allocation.cc
@@ -10,7 +10,7 @@
#include <unistd.h> // NOLINT
#endif
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
@@ -20,7 +20,7 @@ using v8::IdleTask;
using v8::Isolate;
using v8::Task;
-#include "src/allocation.h"
+#include "src/utils/allocation.h"
#include "src/zone/accounting-allocator.h"
// ASAN isn't configured to return nullptr, so skip all of these tests.
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index 7c0a7ee8cb..8c2f92d665 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -5,8 +5,8 @@
#include "test/cctest/cctest.h"
#include "include/v8.h"
-#include "src/api.h"
-#include "src/objects-inl.h"
+#include "src/api/api.h"
+#include "src/objects/objects-inl.h"
namespace i = v8::internal;
@@ -108,7 +108,8 @@ TEST(CachedAccessorTurboFan) {
" x = obj.draft;"
" }"
" return x;"
- "}");
+ "};"
+ "%PrepareFunctionForOptimization(f);");
ExpectInt32("f()", 123);
@@ -132,7 +133,8 @@ TEST(CachedAccessorTurboFan) {
" r = x.draft;"
" }"
" return r;"
- "}");
+ "};"
+ "%PrepareFunctionForOptimization(g);");
ExpectInt32("g()", 456);
@@ -190,7 +192,8 @@ TEST(CachedAccessorOnGlobalObject) {
" x = draft;"
" }"
" return x;"
- "}");
+ "}"
+ "%PrepareFunctionForOptimization(f);");
ExpectInt32("f()", 123);
@@ -214,7 +217,8 @@ TEST(CachedAccessorOnGlobalObject) {
" r = x.draft;"
" }"
" return r;"
- "}");
+ "}"
+ "%PrepareFunctionForOptimization(g);");
ExpectInt32("g()", 456);
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 41678032af..e331d1a26a 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -7,16 +7,16 @@
#include "test/cctest/test-api.h"
#include "include/v8-util.h"
-#include "src/api-inl.h"
-#include "src/arguments.h"
+#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
-#include "src/compilation-cache.h"
-#include "src/execution.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/arguments.h"
+#include "src/execution/execution.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/runtime/runtime.h"
-#include "src/unicode-inl.h"
-#include "src/utils.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/utils.h"
using ::v8::Boolean;
using ::v8::BooleanObject;
@@ -1339,9 +1339,9 @@ THREADED_TEST(InterceptorLoadGlobalICGlobalWithInterceptor) {
v8::Utils::OpenHandle<Object, i::JSReceiver>(context->Global());
CHECK(global_proxy->IsJSGlobalProxy());
i::Handle<i::JSGlobalObject> global(
- i::JSGlobalObject::cast(global_proxy->map()->prototype()),
+ i::JSGlobalObject::cast(global_proxy->map().prototype()),
global_proxy->GetIsolate());
- CHECK(global->map()->has_named_interceptor());
+ CHECK(global->map().has_named_interceptor());
v8::Local<Value> value = CompileRun(
"var f = function() { "
@@ -1403,9 +1403,9 @@ THREADED_TEST(InterceptorLoadICGlobalWithInterceptor) {
v8::Utils::OpenHandle<Object, i::JSReceiver>(context->Global());
CHECK(global_proxy->IsJSGlobalProxy());
i::Handle<i::JSGlobalObject> global(
- i::JSGlobalObject::cast(global_proxy->map()->prototype()),
+ i::JSGlobalObject::cast(global_proxy->map().prototype()),
global_proxy->GetIsolate());
- CHECK(global->map()->has_named_interceptor());
+ CHECK(global->map().has_named_interceptor());
ExpectInt32(
"(function() {"
@@ -3962,6 +3962,7 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
" }"
" return result;"
"};"
+ "%PrepareFunctionForOptimization(test);"
"test();"
"test();"
"test();"
@@ -4491,6 +4492,7 @@ THREADED_TEST(Regress256330) {
CompileRun(
"\"use strict\"; var o = new Bug;"
"function f(o) { o.x = 10; };"
+ "%PrepareFunctionForOptimization(f);"
"f(o); f(o); f(o);"
"%OptimizeFunctionOnNextCall(f);"
"f(o);");
@@ -4519,6 +4521,7 @@ THREADED_TEST(OptimizedInterceptorSetter) {
"function getter() { return this.accessor_age; };"
"function setAge(i) { obj.age = i; };"
"Object.defineProperty(obj, 'age', { get:getter, set:setter });"
+ "%PrepareFunctionForOptimization(setAge);"
"setAge(1);"
"setAge(2);"
"setAge(3);"
@@ -4547,6 +4550,7 @@ THREADED_TEST(OptimizedInterceptorGetter) {
"function getter() { return this.accessor_age; };"
"function getAge() { return obj.interceptor_age; };"
"Object.defineProperty(obj, 'interceptor_age', { get:getter });"
+ "%PrepareFunctionForOptimization(getAge);"
"getAge();"
"getAge();"
"getAge();"
@@ -4569,7 +4573,8 @@ THREADED_TEST(OptimizedInterceptorFieldRead) {
"var obj = new Obj;"
"obj.__proto__.interceptor_age = 42;"
"obj.age = 100;"
- "function getAge() { return obj.interceptor_age; };");
+ "function getAge() { return obj.interceptor_age; };"
+ "%PrepareFunctionForOptimization(getAge);");
ExpectInt32("getAge();", 100);
ExpectInt32("getAge();", 100);
ExpectInt32("getAge();", 100);
@@ -4592,6 +4597,7 @@ THREADED_TEST(OptimizedInterceptorFieldWrite) {
"var obj = new Obj;"
"obj.age = 100000;"
"function setAge(i) { obj.age = i };"
+ "%PrepareFunctionForOptimization(setAge);"
"setAge(100);"
"setAge(101);"
"setAge(102);"
diff --git a/deps/v8/test/cctest/test-api-stack-traces.cc b/deps/v8/test/cctest/test-api-stack-traces.cc
new file mode 100644
index 0000000000..c1c8a28b05
--- /dev/null
+++ b/deps/v8/test/cctest/test-api-stack-traces.cc
@@ -0,0 +1,808 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/test-api.h"
+
+#include "src/api/api-inl.h"
+
+using ::v8::Array;
+using ::v8::Context;
+using ::v8::Local;
+using ::v8::ObjectTemplate;
+using ::v8::String;
+using ::v8::TryCatch;
+using ::v8::Value;
+
+static v8::MaybeLocal<Value> PrepareStackTrace42(v8::Local<Context> context,
+ v8::Local<Value> error,
+ v8::Local<Array> trace) {
+ return v8::Number::New(context->GetIsolate(), 42);
+}
+
+static v8::MaybeLocal<Value> PrepareStackTraceThrow(v8::Local<Context> context,
+ v8::Local<Value> error,
+ v8::Local<Array> trace) {
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<String> message = v8_str("42");
+ isolate->ThrowException(v8::Exception::Error(message));
+ return v8::MaybeLocal<Value>();
+}
+
+THREADED_TEST(IsolatePrepareStackTrace) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ isolate->SetPrepareStackTraceCallback(PrepareStackTrace42);
+
+ v8::Local<Value> v = CompileRun("new Error().stack");
+
+ CHECK(v->IsNumber());
+ CHECK_EQ(v.As<v8::Number>()->Int32Value(context.local()).FromJust(), 42);
+}
+
+THREADED_TEST(IsolatePrepareStackTraceThrow) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ isolate->SetPrepareStackTraceCallback(PrepareStackTraceThrow);
+
+ v8::Local<Value> v = CompileRun("try { new Error().stack } catch (e) { e }");
+
+ CHECK(v->IsNativeError());
+
+ v8::Local<String> message = v8::Exception::CreateMessage(isolate, v)->Get();
+
+ CHECK(message->StrictEquals(v8_str("Uncaught Error: 42")));
+}
+
+static void ThrowV8Exception(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Local<String> foo = v8_str("foo");
+ v8::Local<String> message = v8_str("message");
+ v8::Local<Value> error = v8::Exception::Error(foo);
+ CHECK(error->IsObject());
+ v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
+ CHECK(error.As<v8::Object>()
+ ->Get(context, message)
+ .ToLocalChecked()
+ ->Equals(context, foo)
+ .FromJust());
+ info.GetIsolate()->ThrowException(error);
+ info.GetReturnValue().SetUndefined();
+}
+
+THREADED_TEST(ExceptionCreateMessage) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ v8::Local<String> foo_str = v8_str("foo");
+ v8::Local<String> message_str = v8_str("message");
+
+ context->GetIsolate()->SetCaptureStackTraceForUncaughtExceptions(true);
+
+ Local<v8::FunctionTemplate> fun =
+ v8::FunctionTemplate::New(context->GetIsolate(), ThrowV8Exception);
+ v8::Local<v8::Object> global = context->Global();
+ CHECK(global
+ ->Set(context.local(), v8_str("throwV8Exception"),
+ fun->GetFunction(context.local()).ToLocalChecked())
+ .FromJust());
+
+ TryCatch try_catch(context->GetIsolate());
+ CompileRun(
+ "function f1() {\n"
+ " throwV8Exception();\n"
+ "};\n"
+ "f1();");
+ CHECK(try_catch.HasCaught());
+
+ v8::Local<v8::Value> error = try_catch.Exception();
+ CHECK(error->IsObject());
+ CHECK(error.As<v8::Object>()
+ ->Get(context.local(), message_str)
+ .ToLocalChecked()
+ ->Equals(context.local(), foo_str)
+ .FromJust());
+
+ v8::Local<v8::Message> message =
+ v8::Exception::CreateMessage(context->GetIsolate(), error);
+ CHECK(!message.IsEmpty());
+ CHECK_EQ(2, message->GetLineNumber(context.local()).FromJust());
+ CHECK_EQ(2, message->GetStartColumn(context.local()).FromJust());
+
+ v8::Local<v8::StackTrace> stackTrace = message->GetStackTrace();
+ CHECK(!stackTrace.IsEmpty());
+ CHECK_EQ(2, stackTrace->GetFrameCount());
+
+ stackTrace = v8::Exception::GetStackTrace(error);
+ CHECK(!stackTrace.IsEmpty());
+ CHECK_EQ(2, stackTrace->GetFrameCount());
+
+ context->GetIsolate()->SetCaptureStackTraceForUncaughtExceptions(false);
+
+ // Now check message location when SetCaptureStackTraceForUncaughtExceptions
+ // is false.
+ try_catch.Reset();
+
+ CompileRun(
+ "function f2() {\n"
+ " return throwV8Exception();\n"
+ "};\n"
+ "f2();");
+ CHECK(try_catch.HasCaught());
+
+ error = try_catch.Exception();
+ CHECK(error->IsObject());
+ CHECK(error.As<v8::Object>()
+ ->Get(context.local(), message_str)
+ .ToLocalChecked()
+ ->Equals(context.local(), foo_str)
+ .FromJust());
+
+ message = v8::Exception::CreateMessage(context->GetIsolate(), error);
+ CHECK(!message.IsEmpty());
+ CHECK_EQ(2, message->GetLineNumber(context.local()).FromJust());
+ CHECK_EQ(9, message->GetStartColumn(context.local()).FromJust());
+
+ // Should be empty stack trace.
+ stackTrace = message->GetStackTrace();
+ CHECK(stackTrace.IsEmpty());
+ CHECK(v8::Exception::GetStackTrace(error).IsEmpty());
+}
+
+// TODO(szuend): Re-enable as a threaded test once investigated and fixed.
+// THREADED_TEST(StackTrace) {
+TEST(StackTrace) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ v8::TryCatch try_catch(context->GetIsolate());
+ const char* source = "function foo() { FAIL.FAIL; }; foo();";
+ v8::Local<v8::String> src = v8_str(source);
+ v8::Local<v8::String> origin = v8_str("stack-trace-test");
+ v8::ScriptCompiler::Source script_source(src, v8::ScriptOrigin(origin));
+ CHECK(v8::ScriptCompiler::CompileUnboundScript(context->GetIsolate(),
+ &script_source)
+ .ToLocalChecked()
+ ->BindToCurrentContext()
+ ->Run(context.local())
+ .IsEmpty());
+ CHECK(try_catch.HasCaught());
+ v8::String::Utf8Value stack(
+ context->GetIsolate(),
+ try_catch.StackTrace(context.local()).ToLocalChecked());
+ CHECK_NOT_NULL(strstr(*stack, "at foo (stack-trace-test"));
+}
+
+// Checks that a StackFrame has certain expected values.
+static void checkStackFrame(const char* expected_script_name,
+ const char* expected_func_name,
+ int expected_line_number, int expected_column,
+ bool is_eval, bool is_constructor,
+ v8::Local<v8::StackFrame> frame) {
+ v8::HandleScope scope(CcTest::isolate());
+ v8::String::Utf8Value func_name(CcTest::isolate(), frame->GetFunctionName());
+ v8::String::Utf8Value script_name(CcTest::isolate(), frame->GetScriptName());
+ if (*script_name == nullptr) {
+ // The situation where there is no associated script, like for evals.
+ CHECK_NULL(expected_script_name);
+ } else {
+ CHECK_NOT_NULL(strstr(*script_name, expected_script_name));
+ }
+ CHECK_NOT_NULL(strstr(*func_name, expected_func_name));
+ CHECK_EQ(expected_line_number, frame->GetLineNumber());
+ CHECK_EQ(expected_column, frame->GetColumn());
+ CHECK_EQ(is_eval, frame->IsEval());
+ CHECK_EQ(is_constructor, frame->IsConstructor());
+ CHECK(frame->IsUserJavaScript());
+}
+
+static void AnalyzeStackInNativeCode(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::HandleScope scope(args.GetIsolate());
+ const char* origin = "capture-stack-trace-test";
+ const int kOverviewTest = 1;
+ const int kDetailedTest = 2;
+ const int kFunctionName = 3;
+ const int kDisplayName = 4;
+ const int kFunctionNameAndDisplayName = 5;
+ const int kDisplayNameIsNotString = 6;
+ const int kFunctionNameIsNotString = 7;
+
+ CHECK_EQ(args.Length(), 1);
+
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ v8::Isolate* isolate = args.GetIsolate();
+ int testGroup = args[0]->Int32Value(context).FromJust();
+ if (testGroup == kOverviewTest) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kOverview);
+ CHECK_EQ(4, stackTrace->GetFrameCount());
+ checkStackFrame(origin, "bar", 2, 10, false, false,
+ stackTrace->GetFrame(args.GetIsolate(), 0));
+ checkStackFrame(origin, "foo", 6, 3, false, true,
+ stackTrace->GetFrame(isolate, 1));
+ // This is the source string inside the eval which has the call to foo.
+ checkStackFrame(nullptr, "", 1, 1, true, false,
+ stackTrace->GetFrame(isolate, 2));
+ // The last frame is an anonymous function which has the initial eval call.
+ checkStackFrame(origin, "", 8, 7, false, false,
+ stackTrace->GetFrame(isolate, 3));
+ } else if (testGroup == kDetailedTest) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kDetailed);
+ CHECK_EQ(4, stackTrace->GetFrameCount());
+ checkStackFrame(origin, "bat", 4, 22, false, false,
+ stackTrace->GetFrame(isolate, 0));
+ checkStackFrame(origin, "baz", 8, 3, false, true,
+ stackTrace->GetFrame(isolate, 1));
+ bool is_eval = true;
+ // This is the source string inside the eval which has the call to baz.
+ checkStackFrame(nullptr, "", 1, 1, is_eval, false,
+ stackTrace->GetFrame(isolate, 2));
+ // The last frame is an anonymous function which has the initial eval call.
+ checkStackFrame(origin, "", 10, 1, false, false,
+ stackTrace->GetFrame(isolate, 3));
+ } else if (testGroup == kFunctionName) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(nullptr, "function.name", 3, 1, true, false,
+ stackTrace->GetFrame(isolate, 0));
+ } else if (testGroup == kDisplayName) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
+ stackTrace->GetFrame(isolate, 0));
+ } else if (testGroup == kFunctionNameAndDisplayName) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
+ stackTrace->GetFrame(isolate, 0));
+ } else if (testGroup == kDisplayNameIsNotString) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(nullptr, "function.name", 3, 1, true, false,
+ stackTrace->GetFrame(isolate, 0));
+ } else if (testGroup == kFunctionNameIsNotString) {
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 5, v8::StackTrace::kOverview);
+ CHECK_EQ(3, stackTrace->GetFrameCount());
+ checkStackFrame(nullptr, "", 3, 1, true, false,
+ stackTrace->GetFrame(isolate, 0));
+ }
+}
+
+// Tests the C++ StackTrace API.
+// TODO(3074796): Reenable this as a THREADED_TEST once it passes.
+// THREADED_TEST(CaptureStackTrace) {
+TEST(CaptureStackTrace) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::String> origin = v8_str("capture-stack-trace-test");
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(v8_str("AnalyzeStackInNativeCode"),
+ v8::FunctionTemplate::New(isolate, AnalyzeStackInNativeCode));
+ LocalContext context(nullptr, templ);
+
+ // Test getting OVERVIEW information. Should ignore information that is not
+ // script name, function name, line number, and column offset.
+ const char* overview_source =
+ "function bar() {\n"
+ " var y; AnalyzeStackInNativeCode(1);\n"
+ "}\n"
+ "function foo() {\n"
+ "\n"
+ " bar();\n"
+ "}\n"
+ "var x;eval('new foo();');";
+ v8::Local<v8::String> overview_src = v8_str(overview_source);
+ v8::ScriptCompiler::Source script_source(overview_src,
+ v8::ScriptOrigin(origin));
+ v8::Local<Value> overview_result(
+ v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source)
+ .ToLocalChecked()
+ ->BindToCurrentContext()
+ ->Run(context.local())
+ .ToLocalChecked());
+ CHECK(!overview_result.IsEmpty());
+ CHECK(overview_result->IsObject());
+
+ // Test getting DETAILED information.
+ const char* detailed_source =
+ "function bat() {AnalyzeStackInNativeCode(2);\n"
+ "}\n"
+ "\n"
+ "function baz() {\n"
+ " bat();\n"
+ "}\n"
+ "eval('new baz();');";
+ v8::Local<v8::String> detailed_src = v8_str(detailed_source);
+ // Make the script using a non-zero line and column offset.
+ v8::Local<v8::Integer> line_offset = v8::Integer::New(isolate, 3);
+ v8::Local<v8::Integer> column_offset = v8::Integer::New(isolate, 5);
+ v8::ScriptOrigin detailed_origin(origin, line_offset, column_offset);
+ v8::ScriptCompiler::Source script_source2(detailed_src, detailed_origin);
+ v8::Local<v8::UnboundScript> detailed_script(
+ v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source2)
+ .ToLocalChecked());
+ v8::Local<Value> detailed_result(detailed_script->BindToCurrentContext()
+ ->Run(context.local())
+ .ToLocalChecked());
+ CHECK(!detailed_result.IsEmpty());
+ CHECK(detailed_result->IsObject());
+
+ // Test using function.name and function.displayName in stack trace
+ const char* function_name_source =
+ "function bar(function_name, display_name, testGroup) {\n"
+ " var f = new Function(`AnalyzeStackInNativeCode(${testGroup});`);\n"
+ " if (function_name) {\n"
+ " Object.defineProperty(f, 'name', { value: function_name });\n"
+ " }\n"
+ " if (display_name) {\n"
+ " f.displayName = display_name;"
+ " }\n"
+ " f()\n"
+ "}\n"
+ "bar('function.name', undefined, 3);\n"
+ "bar(undefined, 'function.displayName', 4);\n"
+ "bar('function.name', 'function.displayName', 5);\n"
+ "bar('function.name', 239, 6);\n"
+ "bar(239, undefined, 7);\n";
+ v8::Local<v8::String> function_name_src =
+ v8::String::NewFromUtf8(isolate, function_name_source,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ v8::ScriptCompiler::Source script_source3(function_name_src,
+ v8::ScriptOrigin(origin));
+ v8::Local<Value> function_name_result(
+ v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source3)
+ .ToLocalChecked()
+ ->BindToCurrentContext()
+ ->Run(context.local())
+ .ToLocalChecked());
+ CHECK(!function_name_result.IsEmpty());
+}
+
+static int report_count = 0;
+static void StackTraceForUncaughtExceptionListener(
+ v8::Local<v8::Message> message, v8::Local<Value>) {
+ report_count++;
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
+ CHECK_EQ(2, stack_trace->GetFrameCount());
+ checkStackFrame("origin", "foo", 2, 3, false, false,
+ stack_trace->GetFrame(message->GetIsolate(), 0));
+ checkStackFrame("origin", "bar", 5, 3, false, false,
+ stack_trace->GetFrame(message->GetIsolate(), 1));
+}
+
+TEST(CaptureStackTraceForUncaughtException) {
+ report_count = 0;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ isolate->AddMessageListener(StackTraceForUncaughtExceptionListener);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
+
+ CompileRunWithOrigin(
+ "function foo() {\n"
+ " throw 1;\n"
+ "};\n"
+ "function bar() {\n"
+ " foo();\n"
+ "};",
+ "origin");
+ v8::Local<v8::Object> global = env->Global();
+ Local<Value> trouble =
+ global->Get(env.local(), v8_str("bar")).ToLocalChecked();
+ CHECK(trouble->IsFunction());
+ CHECK(v8::Function::Cast(*trouble)
+ ->Call(env.local(), global, 0, nullptr)
+ .IsEmpty());
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
+ CHECK_EQ(1, report_count);
+}
+
+TEST(CaptureStackTraceForUncaughtExceptionAndSetters) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true, 1024,
+ v8::StackTrace::kDetailed);
+
+ CompileRun(
+ "var setters = ['column', 'lineNumber', 'scriptName',\n"
+ " 'scriptNameOrSourceURL', 'functionName', 'isEval',\n"
+ " 'isConstructor'];\n"
+ "for (var i = 0; i < setters.length; i++) {\n"
+ " var prop = setters[i];\n"
+ " Object.prototype.__defineSetter__(prop, function() { throw prop; });\n"
+ "}\n");
+ CompileRun("throw 'exception';");
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+}
+
+static void StackTraceFunctionNameListener(v8::Local<v8::Message> message,
+ v8::Local<Value>) {
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
+ v8::Isolate* isolate = message->GetIsolate();
+ CHECK_EQ(5, stack_trace->GetFrameCount());
+ checkStackFrame("origin", "foo:0", 4, 7, false, false,
+ stack_trace->GetFrame(isolate, 0));
+ checkStackFrame("origin", "foo:1", 5, 27, false, false,
+ stack_trace->GetFrame(isolate, 1));
+ checkStackFrame("origin", "foo", 5, 27, false, false,
+ stack_trace->GetFrame(isolate, 2));
+ checkStackFrame("origin", "foo", 5, 27, false, false,
+ stack_trace->GetFrame(isolate, 3));
+ checkStackFrame("origin", "", 1, 14, false, false,
+ stack_trace->GetFrame(isolate, 4));
+}
+
+TEST(GetStackTraceContainsFunctionsWithFunctionName) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ CompileRunWithOrigin(
+ "function gen(name, counter) {\n"
+ " var f = function foo() {\n"
+ " if (counter === 0)\n"
+ " throw 1;\n"
+ " gen(name, counter - 1)();\n"
+ " };\n"
+ " if (counter == 3) {\n"
+ " Object.defineProperty(f, 'name', {get: function(){ throw 239; }});\n"
+ " } else {\n"
+ " Object.defineProperty(f, 'name', {writable:true});\n"
+ " if (counter == 2)\n"
+ " f.name = 42;\n"
+ " else\n"
+ " f.name = name + ':' + counter;\n"
+ " }\n"
+ " return f;\n"
+ "};",
+ "origin");
+
+ isolate->AddMessageListener(StackTraceFunctionNameListener);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
+ CompileRunWithOrigin("gen('foo', 3)();", "origin");
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(StackTraceFunctionNameListener);
+}
+
+static void RethrowStackTraceHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
+ // Use the frame where JavaScript is called from.
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
+ CHECK(!stack_trace.IsEmpty());
+ int frame_count = stack_trace->GetFrameCount();
+ CHECK_EQ(3, frame_count);
+ int line_number[] = {1, 2, 5};
+ for (int i = 0; i < frame_count; i++) {
+ CHECK_EQ(line_number[i],
+ stack_trace->GetFrame(message->GetIsolate(), i)->GetLineNumber());
+ }
+}
+
+// Test that we only return the stack trace at the site where the exception
+// is first thrown (not where it is rethrown).
+TEST(RethrowStackTrace) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ // We make sure that
+ // - the stack trace of the ReferenceError in g() is reported.
+ // - the stack trace is not overwritten when e1 is rethrown by t().
+ // - the stack trace of e2 does not overwrite that of e1.
+ const char* source =
+ "function g() { error; } \n"
+ "function f() { g(); } \n"
+ "function t(e) { throw e; } \n"
+ "try { \n"
+ " f(); \n"
+ "} catch (e1) { \n"
+ " try { \n"
+ " error; \n"
+ " } catch (e2) { \n"
+ " t(e1); \n"
+ " } \n"
+ "} \n";
+ isolate->AddMessageListener(RethrowStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
+ CompileRun(source);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(RethrowStackTraceHandler);
+}
+
+static void RethrowPrimitiveStackTraceHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
+ CHECK(!stack_trace.IsEmpty());
+ int frame_count = stack_trace->GetFrameCount();
+ CHECK_EQ(2, frame_count);
+ int line_number[] = {3, 7};
+ for (int i = 0; i < frame_count; i++) {
+ CHECK_EQ(line_number[i],
+ stack_trace->GetFrame(message->GetIsolate(), i)->GetLineNumber());
+ }
+}
+
+// Test that we do not recognize identity for primitive exceptions.
+TEST(RethrowPrimitiveStackTrace) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ // We do not capture stack trace for non Error objects on creation time.
+ // Instead, we capture the stack trace on last throw.
+ const char* source =
+ "function g() { throw 404; } \n"
+ "function f() { g(); } \n"
+ "function t(e) { throw e; } \n"
+ "try { \n"
+ " f(); \n"
+ "} catch (e1) { \n"
+ " t(e1) \n"
+ "} \n";
+ isolate->AddMessageListener(RethrowPrimitiveStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
+ CompileRun(source);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(RethrowPrimitiveStackTraceHandler);
+}
+
+static void RethrowExistingStackTraceHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
+ // Use the frame where JavaScript is called from.
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
+ CHECK(!stack_trace.IsEmpty());
+ CHECK_EQ(1, stack_trace->GetFrameCount());
+ CHECK_EQ(1, stack_trace->GetFrame(message->GetIsolate(), 0)->GetLineNumber());
+}
+
+// Test that the stack trace is captured when the error object is created and
+// not where it is thrown.
+TEST(RethrowExistingStackTrace) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ const char* source =
+ "var e = new Error(); \n"
+ "throw e; \n";
+ isolate->AddMessageListener(RethrowExistingStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
+ CompileRun(source);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(RethrowExistingStackTraceHandler);
+}
+
+static void RethrowBogusErrorStackTraceHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> data) {
+ // Use the frame where JavaScript is called from.
+ v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
+ CHECK(!stack_trace.IsEmpty());
+ CHECK_EQ(1, stack_trace->GetFrameCount());
+ CHECK_EQ(2, stack_trace->GetFrame(message->GetIsolate(), 0)->GetLineNumber());
+}
+
+// Test that the stack trace is captured where the bogus Error object is thrown.
+TEST(RethrowBogusErrorStackTrace) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ const char* source =
+ "var e = {__proto__: new Error()} \n"
+ "throw e; \n";
+ isolate->AddMessageListener(RethrowBogusErrorStackTraceHandler);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true);
+ CompileRun(source);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(false);
+ isolate->RemoveMessageListeners(RethrowBogusErrorStackTraceHandler);
+}
+
+void AnalyzeStackOfEvalWithSourceURL(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::HandleScope scope(args.GetIsolate());
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kDetailed);
+ CHECK_EQ(5, stackTrace->GetFrameCount());
+ v8::Local<v8::String> url = v8_str("eval_url");
+ for (int i = 0; i < 3; i++) {
+ v8::Local<v8::String> name =
+ stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptNameOrSourceURL();
+ CHECK(!name.IsEmpty());
+ CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
+ }
+}
+
+TEST(SourceURLInStackTrace) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(
+ v8_str("AnalyzeStackOfEvalWithSourceURL"),
+ v8::FunctionTemplate::New(isolate, AnalyzeStackOfEvalWithSourceURL));
+ LocalContext context(nullptr, templ);
+
+ const char* source =
+ "function outer() {\n"
+ "function bar() {\n"
+ " AnalyzeStackOfEvalWithSourceURL();\n"
+ "}\n"
+ "function foo() {\n"
+ "\n"
+ " bar();\n"
+ "}\n"
+ "foo();\n"
+ "}\n"
+ "eval('(' + outer +')()%s');";
+
+ i::ScopedVector<char> code(1024);
+ i::SNPrintF(code, source, "//# sourceURL=eval_url");
+ CHECK(CompileRun(code.begin())->IsUndefined());
+ i::SNPrintF(code, source, "//@ sourceURL=eval_url");
+ CHECK(CompileRun(code.begin())->IsUndefined());
+}
+
+static int scriptIdInStack[2];
+
+void AnalyzeScriptIdInStack(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::HandleScope scope(args.GetIsolate());
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kScriptId);
+ CHECK_EQ(2, stackTrace->GetFrameCount());
+ for (int i = 0; i < 2; i++) {
+ scriptIdInStack[i] =
+ stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptId();
+ }
+}
+
+TEST(ScriptIdInStackTrace) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(v8_str("AnalyzeScriptIdInStack"),
+ v8::FunctionTemplate::New(isolate, AnalyzeScriptIdInStack));
+ LocalContext context(nullptr, templ);
+
+ v8::Local<v8::String> scriptSource = v8_str(
+ "function foo() {\n"
+ " AnalyzeScriptIdInStack();"
+ "}\n"
+ "foo();\n");
+ v8::Local<v8::Script> script = CompileWithOrigin(scriptSource, "test", false);
+ script->Run(context.local()).ToLocalChecked();
+ for (int i = 0; i < 2; i++) {
+ CHECK_NE(scriptIdInStack[i], v8::Message::kNoScriptIdInfo);
+ CHECK_EQ(scriptIdInStack[i], script->GetUnboundScript()->GetId());
+ }
+}
+
+void AnalyzeStackOfInlineScriptWithSourceURL(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::HandleScope scope(args.GetIsolate());
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kDetailed);
+ CHECK_EQ(4, stackTrace->GetFrameCount());
+ v8::Local<v8::String> url = v8_str("source_url");
+ for (int i = 0; i < 3; i++) {
+ v8::Local<v8::String> name =
+ stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptNameOrSourceURL();
+ CHECK(!name.IsEmpty());
+ CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
+ }
+}
+
+TEST(InlineScriptWithSourceURLInStackTrace) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(v8_str("AnalyzeStackOfInlineScriptWithSourceURL"),
+ v8::FunctionTemplate::New(
+ CcTest::isolate(), AnalyzeStackOfInlineScriptWithSourceURL));
+ LocalContext context(nullptr, templ);
+
+ const char* source =
+ "function outer() {\n"
+ "function bar() {\n"
+ " AnalyzeStackOfInlineScriptWithSourceURL();\n"
+ "}\n"
+ "function foo() {\n"
+ "\n"
+ " bar();\n"
+ "}\n"
+ "foo();\n"
+ "}\n"
+ "outer()\n%s";
+
+ i::ScopedVector<char> code(1024);
+ i::SNPrintF(code, source, "//# sourceURL=source_url");
+ CHECK(CompileRunWithOrigin(code.begin(), "url", 0, 1)->IsUndefined());
+ i::SNPrintF(code, source, "//@ sourceURL=source_url");
+ CHECK(CompileRunWithOrigin(code.begin(), "url", 0, 1)->IsUndefined());
+}
+
+void AnalyzeStackOfDynamicScriptWithSourceURL(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::HandleScope scope(args.GetIsolate());
+ v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
+ args.GetIsolate(), 10, v8::StackTrace::kDetailed);
+ CHECK_EQ(4, stackTrace->GetFrameCount());
+ v8::Local<v8::String> url = v8_str("source_url");
+ for (int i = 0; i < 3; i++) {
+ v8::Local<v8::String> name =
+ stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptNameOrSourceURL();
+ CHECK(!name.IsEmpty());
+ CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
+ }
+}
+
+TEST(DynamicWithSourceURLInStackTrace) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->Set(v8_str("AnalyzeStackOfDynamicScriptWithSourceURL"),
+ v8::FunctionTemplate::New(
+ CcTest::isolate(), AnalyzeStackOfDynamicScriptWithSourceURL));
+ LocalContext context(nullptr, templ);
+
+ const char* source =
+ "function outer() {\n"
+ "function bar() {\n"
+ " AnalyzeStackOfDynamicScriptWithSourceURL();\n"
+ "}\n"
+ "function foo() {\n"
+ "\n"
+ " bar();\n"
+ "}\n"
+ "foo();\n"
+ "}\n"
+ "outer()\n%s";
+
+ i::ScopedVector<char> code(1024);
+ i::SNPrintF(code, source, "//# sourceURL=source_url");
+ CHECK(CompileRunWithOrigin(code.begin(), "url", 0, 0)->IsUndefined());
+ i::SNPrintF(code, source, "//@ sourceURL=source_url");
+ CHECK(CompileRunWithOrigin(code.begin(), "url", 0, 0)->IsUndefined());
+}
+
+TEST(DynamicWithSourceURLInStackTraceString) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+
+ const char* source =
+ "function outer() {\n"
+ " function foo() {\n"
+ " FAIL.FAIL;\n"
+ " }\n"
+ " foo();\n"
+ "}\n"
+ "outer()\n%s";
+
+ i::ScopedVector<char> code(1024);
+ i::SNPrintF(code, source, "//# sourceURL=source_url");
+ v8::TryCatch try_catch(context->GetIsolate());
+ CompileRunWithOrigin(code.begin(), "", 0, 0);
+ CHECK(try_catch.HasCaught());
+ v8::String::Utf8Value stack(
+ context->GetIsolate(),
+ try_catch.StackTrace(context.local()).ToLocalChecked());
+ CHECK_NOT_NULL(strstr(*stack, "at foo (source_url:3:5)"));
+}
+
+TEST(CaptureStackTraceForStackOverflow) {
+ v8::internal::FLAG_stack_size = 150;
+ LocalContext current;
+ v8::Isolate* isolate = current->GetIsolate();
+ v8::HandleScope scope(isolate);
+ isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
+ v8::StackTrace::kDetailed);
+ v8::TryCatch try_catch(isolate);
+ CompileRun("(function f(x) { f(x+1); })(0)");
+ CHECK(try_catch.HasCaught());
+}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 59903ddd92..63c980cf61 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -38,30 +38,30 @@
#endif
#include "include/v8-util.h"
-#include "src/api-inl.h"
-#include "src/arguments.h"
+#include "src/api/api-inl.h"
#include "src/base/overflowing-math.h"
#include "src/base/platform/platform.h"
-#include "src/compilation-cache.h"
+#include "src/codegen/compilation-cache.h"
#include "src/debug/debug.h"
-#include "src/execution.h"
-#include "src/feedback-vector-inl.h"
-#include "src/feedback-vector.h"
-#include "src/futex-emulation.h"
-#include "src/global-handles.h"
+#include "src/execution/arguments.h"
+#include "src/execution/execution.h"
+#include "src/execution/futex-emulation.h"
+#include "src/execution/vm-state.h"
+#include "src/handles/global-handles.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/local-allocator.h"
-#include "src/lookup.h"
-#include "src/objects-inl.h"
+#include "src/objects/feedback-vector-inl.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-promise-inl.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
-#include "src/unicode-inl.h"
-#include "src/utils.h"
-#include "src/vm-state.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/utils.h"
#include "src/wasm/wasm-js.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
@@ -90,7 +90,6 @@ using ::v8::ObjectTemplate;
using ::v8::Persistent;
using ::v8::PropertyAttribute;
using ::v8::Script;
-using ::v8::StackTrace;
using ::v8::String;
using ::v8::Symbol;
using ::v8::TryCatch;
@@ -217,7 +216,7 @@ static void TestSignatureLooped(const char* operation, Local<Value> receiver,
signature_expected_receiver = receiver;
bool expected_to_throw = receiver.IsEmpty();
v8::TryCatch try_catch(isolate);
- CompileRun(source.start());
+ CompileRun(source.begin());
CHECK_EQ(expected_to_throw, try_catch.HasCaught());
if (!expected_to_throw) {
CHECK_EQ(10, signature_callback_count);
@@ -237,7 +236,8 @@ static void TestSignatureOptimized(const char* operation, Local<Value> receiver,
i::SNPrintF(source,
"function test() {"
" %s"
- "}"
+ "};"
+ "%%PrepareFunctionForOptimization(test);"
"try { test() } catch(e) {}"
"try { test() } catch(e) {}"
"%%OptimizeFunctionOnNextCall(test);"
@@ -247,7 +247,7 @@ static void TestSignatureOptimized(const char* operation, Local<Value> receiver,
signature_expected_receiver = receiver;
bool expected_to_throw = receiver.IsEmpty();
v8::TryCatch try_catch(isolate);
- CompileRun(source.start());
+ CompileRun(source.begin());
CHECK_EQ(expected_to_throw, try_catch.HasCaught());
if (!expected_to_throw) {
CHECK_EQ(3, signature_callback_count);
@@ -358,7 +358,7 @@ THREADED_TEST(ReceiverSignature) {
i::ScopedVector<char> source(200);
i::SNPrintF(
source, "var test_object = %s; test_object", test_objects[i]);
- Local<Value> test_object = CompileRun(source.start());
+ Local<Value> test_object = CompileRun(source.begin());
TestSignature("test_object.prop();", test_object, isolate);
TestSignature("test_object.accessor;", test_object, isolate);
TestSignature("test_object[accessor_key];", test_object, isolate);
@@ -2585,9 +2585,9 @@ THREADED_TEST(AccessorIsPreservedOnAttributeChange) {
LocalContext env;
v8::Local<v8::Value> res = CompileRun("var a = []; a;");
i::Handle<i::JSReceiver> a(v8::Utils::OpenHandle(v8::Object::Cast(*res)));
- CHECK_EQ(1, a->map()->instance_descriptors()->number_of_descriptors());
+ CHECK_EQ(1, a->map().instance_descriptors().number_of_descriptors());
CompileRun("Object.defineProperty(a, 'length', { writable: false });");
- CHECK_EQ(0, a->map()->instance_descriptors()->number_of_descriptors());
+ CHECK_EQ(0, a->map().instance_descriptors().number_of_descriptors());
// But we should still have an AccessorInfo.
i::Handle<i::String> name(v8::Utils::OpenHandle(*v8_str("length")));
i::LookupIterator it(CcTest::i_isolate(), a, name,
@@ -2768,7 +2768,7 @@ TEST(InternalFieldsSubclassing) {
i::Handle<i::JSObject> i_obj =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
CHECK_EQ(nof_embedder_fields, obj->InternalFieldCount());
- CHECK_EQ(0, i_obj->map()->GetInObjectProperties());
+ CHECK_EQ(0, i_obj->map().GetInObjectProperties());
// Check writing and reading internal fields.
for (int j = 0; j < nof_embedder_fields; j++) {
CHECK(obj->GetInternalField(j)->IsUndefined());
@@ -2831,25 +2831,25 @@ TEST(InternalFieldsSubclassing) {
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*value));
#ifdef VERIFY_HEAP
i_value->HeapObjectVerify(i_isolate);
- i_value->map()->HeapObjectVerify(i_isolate);
- i_value->map()->FindRootMap(i_isolate)->HeapObjectVerify(i_isolate);
+ i_value->map().HeapObjectVerify(i_isolate);
+ i_value->map().FindRootMap(i_isolate).HeapObjectVerify(i_isolate);
#endif
CHECK_EQ(nof_embedder_fields, value->InternalFieldCount());
if (in_object_only) {
- CHECK_LE(nof_properties, i_value->map()->GetInObjectProperties());
+ CHECK_LE(nof_properties, i_value->map().GetInObjectProperties());
} else {
- CHECK_LE(i_value->map()->GetInObjectProperties(), kMaxNofProperties);
+ CHECK_LE(i_value->map().GetInObjectProperties(), kMaxNofProperties);
}
// Make Sure we get the precise property count.
- i_value->map()->FindRootMap(i_isolate)->CompleteInobjectSlackTracking(
+ i_value->map().FindRootMap(i_isolate).CompleteInobjectSlackTracking(
i_isolate);
// TODO(cbruni): fix accounting to make this condition true.
// CHECK_EQ(0, i_value->map()->UnusedPropertyFields());
if (in_object_only) {
- CHECK_EQ(nof_properties, i_value->map()->GetInObjectProperties());
+ CHECK_EQ(nof_properties, i_value->map().GetInObjectProperties());
} else {
- CHECK_LE(i_value->map()->GetInObjectProperties(), kMaxNofProperties);
+ CHECK_LE(i_value->map().GetInObjectProperties(), kMaxNofProperties);
}
}
}
@@ -2864,7 +2864,7 @@ THREADED_TEST(InternalFieldsOfRegularObjects) {
for (size_t i = 0; i < arraysize(sources); ++i) {
i::ScopedVector<char> source(128);
i::SNPrintF(source, "(function() { return %s })()", sources[i]);
- v8::Local<v8::Object> obj = CompileRun(source.start()).As<v8::Object>();
+ v8::Local<v8::Object> obj = CompileRun(source.begin()).As<v8::Object>();
CHECK_EQ(0, obj->InternalFieldCount());
}
}
@@ -3096,10 +3096,10 @@ void GlobalProxyIdentityHash(bool set_in_js) {
if (set_in_js) {
CompileRun("var m = new Set(); m.add(global);");
i::Object original_hash = i_global_proxy->GetHash();
- CHECK(original_hash->IsSmi());
+ CHECK(original_hash.IsSmi());
hash1 = i::Smi::ToInt(original_hash);
} else {
- hash1 = i_global_proxy->GetOrCreateHash(i_isolate)->value();
+ hash1 = i_global_proxy->GetOrCreateHash(i_isolate).value();
}
// Hash should be retained after being detached.
env->DetachGlobal();
@@ -3765,9 +3765,9 @@ THREADED_TEST(ArrayBuffer_External) {
v8::HandleScope handle_scope(isolate);
i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.start(), 0, 100);
+ memset(my_data.begin(), 0, 100);
Local<v8::ArrayBuffer> ab3 =
- v8::ArrayBuffer::New(isolate, my_data.start(), 100);
+ v8::ArrayBuffer::New(isolate, my_data.begin(), 100);
CheckInternalFieldsAreZero(ab3);
CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
CHECK(ab3->IsExternal());
@@ -3797,9 +3797,9 @@ THREADED_TEST(ArrayBuffer_DisableDetach) {
v8::HandleScope handle_scope(isolate);
i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.start(), 0, 100);
+ memset(my_data.begin(), 0, 100);
Local<v8::ArrayBuffer> ab =
- v8::ArrayBuffer::New(isolate, my_data.start(), 100);
+ v8::ArrayBuffer::New(isolate, my_data.begin(), 100);
CHECK(ab->IsDetachable());
i::Handle<i::JSArrayBuffer> buf = v8::Utils::OpenHandle(*ab);
@@ -3824,7 +3824,7 @@ static void CheckIsTypedArrayVarDetached(const char* name) {
i::SNPrintF(source,
"%s.byteLength == 0 && %s.byteOffset == 0 && %s.length == 0",
name, name, name);
- CHECK(CompileRun(source.start())->IsTrue());
+ CHECK(CompileRun(source.begin())->IsTrue());
v8::Local<v8::TypedArray> ta =
v8::Local<v8::TypedArray>::Cast(CompileRun(name));
CheckIsDetached(ta);
@@ -4085,9 +4085,9 @@ THREADED_TEST(SharedArrayBuffer_External) {
v8::HandleScope handle_scope(isolate);
i::ScopedVector<uint8_t> my_data(100);
- memset(my_data.start(), 0, 100);
+ memset(my_data.begin(), 0, 100);
Local<v8::SharedArrayBuffer> ab3 =
- v8::SharedArrayBuffer::New(isolate, my_data.start(), 100);
+ v8::SharedArrayBuffer::New(isolate, my_data.begin(), 100);
CheckInternalFieldsAreZero(ab3);
CHECK_EQ(100, static_cast<int>(ab3->ByteLength()));
CHECK(ab3->IsExternal());
@@ -4247,10 +4247,10 @@ THREADED_TEST(External) {
{
i::Handle<i::Object> obj = v8::Utils::OpenHandle(*ext);
- CHECK_EQ(i::HeapObject::cast(*obj)->map(), CcTest::heap()->external_map());
+ CHECK_EQ(i::HeapObject::cast(*obj).map(), CcTest::heap()->external_map());
CHECK(ext->IsExternal());
CHECK(!CompileRun("new Set().add(this.ext)").IsEmpty());
- CHECK_EQ(i::HeapObject::cast(*obj)->map(), CcTest::heap()->external_map());
+ CHECK_EQ(i::HeapObject::cast(*obj).map(), CcTest::heap()->external_map());
CHECK(ext->IsExternal());
}
@@ -4419,29 +4419,59 @@ class TwoPassCallbackData;
void FirstPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data);
void SecondPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data);
+struct GCCallbackMetadata {
+ int instance_counter = 0;
+ int depth = 0;
+ v8::Persistent<v8::Context> context;
+
+ GCCallbackMetadata() {
+ auto isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ context.Reset(isolate, CcTest::NewContext());
+ }
+
+ ~GCCallbackMetadata() {
+ CHECK_EQ(0, instance_counter);
+ CHECK_EQ(0, depth);
+ }
+
+ struct DepthCheck {
+ explicit DepthCheck(GCCallbackMetadata* counters) : counters(counters) {
+ CHECK_EQ(counters->depth, 0);
+ counters->depth++;
+ }
+
+ ~DepthCheck() {
+ counters->depth--;
+ CHECK_EQ(counters->depth, 0);
+ }
+
+ GCCallbackMetadata* counters;
+ };
+};
class TwoPassCallbackData {
public:
- TwoPassCallbackData(v8::Isolate* isolate, int* instance_counter)
+ TwoPassCallbackData(v8::Isolate* isolate, GCCallbackMetadata* metadata)
: first_pass_called_(false),
second_pass_called_(false),
trigger_gc_(false),
- instance_counter_(instance_counter) {
+ metadata_(metadata) {
HandleScope scope(isolate);
i::ScopedVector<char> buffer(40);
i::SNPrintF(buffer, "%p", static_cast<void*>(this));
- auto string =
- v8::String::NewFromUtf8(isolate, buffer.start(),
- v8::NewStringType::kNormal).ToLocalChecked();
+ auto string = v8::String::NewFromUtf8(isolate, buffer.begin(),
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
cell_.Reset(isolate, string);
- (*instance_counter_)++;
+ metadata_->instance_counter++;
}
~TwoPassCallbackData() {
CHECK(first_pass_called_);
CHECK(second_pass_called_);
CHECK(cell_.IsEmpty());
- (*instance_counter_)--;
+ metadata_->instance_counter--;
}
void FirstPass() {
@@ -4452,12 +4482,32 @@ class TwoPassCallbackData {
first_pass_called_ = true;
}
- void SecondPass() {
+ void SecondPass(v8::Isolate* isolate) {
+ ApiTestFuzzer::Fuzz();
+
+ GCCallbackMetadata::DepthCheck depth_check(metadata_);
CHECK(first_pass_called_);
CHECK(!second_pass_called_);
CHECK(cell_.IsEmpty());
second_pass_called_ = true;
+
+ GCCallbackMetadata* metadata = metadata_;
+ bool trigger_gc = trigger_gc_;
delete this;
+
+ {
+ // Make sure that running JS works inside the second pass callback.
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(metadata->context.Get(isolate));
+ v8::Local<v8::Value> value = CompileRun("(function() { return 42 })()");
+ CHECK(value->IsInt32());
+ CHECK_EQ(value.As<v8::Int32>()->Value(), 42);
+ }
+
+ if (!trigger_gc) return;
+ auto data_2 = new TwoPassCallbackData(isolate, metadata);
+ data_2->SetWeak();
+ CcTest::CollectAllGarbage();
}
void SetWeak() {
@@ -4465,28 +4515,18 @@ class TwoPassCallbackData {
}
void MarkTriggerGc() { trigger_gc_ = true; }
- bool trigger_gc() { return trigger_gc_; }
-
- int* instance_counter() { return instance_counter_; }
private:
bool first_pass_called_;
bool second_pass_called_;
bool trigger_gc_;
v8::Global<v8::String> cell_;
- int* instance_counter_;
+ GCCallbackMetadata* metadata_;
};
void SecondPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data) {
- ApiTestFuzzer::Fuzz();
- bool trigger_gc = data.GetParameter()->trigger_gc();
- int* instance_counter = data.GetParameter()->instance_counter();
- data.GetParameter()->SecondPass();
- if (!trigger_gc) return;
- auto data_2 = new TwoPassCallbackData(data.GetIsolate(), instance_counter);
- data_2->SetWeak();
- CcTest::CollectAllGarbage();
+ data.GetParameter()->SecondPass(data.GetIsolate());
}
@@ -4500,37 +4540,55 @@ void FirstPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data) {
TEST(TwoPassPhantomCallbacks) {
auto isolate = CcTest::isolate();
+ GCCallbackMetadata metadata;
const size_t kLength = 20;
- int instance_counter = 0;
for (size_t i = 0; i < kLength; ++i) {
- auto data = new TwoPassCallbackData(isolate, &instance_counter);
+ auto data = new TwoPassCallbackData(isolate, &metadata);
data->SetWeak();
}
- CHECK_EQ(static_cast<int>(kLength), instance_counter);
+ CHECK_EQ(static_cast<int>(kLength), metadata.instance_counter);
CcTest::CollectAllGarbage();
EmptyMessageQueues(isolate);
- CHECK_EQ(0, instance_counter);
}
TEST(TwoPassPhantomCallbacksNestedGc) {
auto isolate = CcTest::isolate();
+ GCCallbackMetadata metadata;
const size_t kLength = 20;
TwoPassCallbackData* array[kLength];
- int instance_counter = 0;
for (size_t i = 0; i < kLength; ++i) {
- array[i] = new TwoPassCallbackData(isolate, &instance_counter);
+ array[i] = new TwoPassCallbackData(isolate, &metadata);
array[i]->SetWeak();
}
array[5]->MarkTriggerGc();
array[10]->MarkTriggerGc();
array[15]->MarkTriggerGc();
- CHECK_EQ(static_cast<int>(kLength), instance_counter);
+ CHECK_EQ(static_cast<int>(kLength), metadata.instance_counter);
CcTest::CollectAllGarbage();
EmptyMessageQueues(isolate);
- CHECK_EQ(0, instance_counter);
}
+// The string creation API methods forbid executing JS code while they are
+// on the stack. Make sure that when such a string creation triggers GC,
+// the second pass callback can still execute JS as per its API contract.
+TEST(TwoPassPhantomCallbacksTriggeredByStringAlloc) {
+ auto isolate = CcTest::isolate();
+ GCCallbackMetadata metadata;
+ auto data = new TwoPassCallbackData(isolate, &metadata);
+ data->SetWeak();
+ CHECK_EQ(metadata.instance_counter, 1);
+
+ i::ScopedVector<uint8_t> source(200000);
+ v8::HandleScope handle_scope(isolate);
+ // Creating a few large strings suffices to trigger GC.
+ while (metadata.instance_counter == 1) {
+ USE(v8::String::NewFromOneByte(isolate, source.begin(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(source.size())));
+ }
+ EmptyMessageQueues(isolate);
+}
namespace {
@@ -4550,7 +4608,7 @@ Local<v8::Object> NewObjectForIntKey(
template <typename K, typename V>
class PhantomStdMapTraits : public v8::StdMapTraits<K, V> {
public:
- typedef typename v8::GlobalValueMap<K, V, PhantomStdMapTraits<K, V>> MapType;
+ using MapType = typename v8::GlobalValueMap<K, V, PhantomStdMapTraits<K, V>>;
static const v8::PersistentContainerCallbackType kCallbackType =
v8::kWeakWithInternalFields;
struct WeakCallbackDataType {
@@ -4661,8 +4719,8 @@ TEST(GlobalValueMap) {
TestGlobalValueMap<v8::StdGlobalValueMap<int, v8::Object>>();
// Custom traits with weak callbacks:
- typedef v8::GlobalValueMap<int, v8::Object,
- PhantomStdMapTraits<int, v8::Object>> WeakMap;
+ using WeakMap =
+ v8::GlobalValueMap<int, v8::Object, PhantomStdMapTraits<int, v8::Object>>;
TestGlobalValueMap<WeakMap>();
}
@@ -5047,6 +5105,81 @@ TEST(MessageHandler5) {
isolate->RemoveMessageListeners(check_message_5b);
}
+namespace {
+
+// Verifies that after throwing an exception the message object is set up in
+// some particular way by calling the supplied |tester| function. The tests that
+// use this purposely test only a single getter as the getter updates the cached
+// state of the object which could affect the results of other functions.
+void CheckMessageAttributes(std::function<void(v8::Local<v8::Context> context,
+ v8::Local<v8::Message> message)>
+ tester) {
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+
+ TryCatch try_catch(context->GetIsolate());
+ CompileRun(
+ R"javascript(
+ (function() {
+ throw new Error();
+ })();
+ )javascript");
+ CHECK(try_catch.HasCaught());
+
+ v8::Local<v8::Value> error = try_catch.Exception();
+ v8::Local<v8::Message> message =
+ v8::Exception::CreateMessage(context->GetIsolate(), error);
+ CHECK(!message.IsEmpty());
+
+ tester(context.local(), message);
+}
+
+} // namespace
+
+TEST(MessageGetLineNumber) {
+ CheckMessageAttributes(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
+ CHECK_EQ(3, message->GetLineNumber(context).FromJust());
+ });
+}
+
+TEST(MessageGetStartColumn) {
+ CheckMessageAttributes(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
+ CHECK_EQ(14, message->GetStartColumn(context).FromJust());
+ });
+}
+
+TEST(MessageGetEndColumn) {
+ CheckMessageAttributes(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
+ CHECK_EQ(15, message->GetEndColumn(context).FromJust());
+ });
+}
+
+TEST(MessageGetStartPosition) {
+ CheckMessageAttributes(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
+ CHECK_EQ(35, message->GetStartPosition());
+ });
+}
+
+TEST(MessageGetEndPosition) {
+ CheckMessageAttributes(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
+ CHECK_EQ(36, message->GetEndPosition());
+ });
+}
+
+TEST(MessageGetSourceLine) {
+ CheckMessageAttributes(
+ [](v8::Local<v8::Context> context, v8::Local<v8::Message> message) {
+ std::string result(*v8::String::Utf8Value(
+ context->GetIsolate(),
+ message->GetSourceLine(context).ToLocalChecked()));
+ CHECK_EQ(" throw new Error();", result);
+ });
+}
THREADED_TEST(GetSetProperty) {
LocalContext context;
@@ -7462,9 +7595,9 @@ TEST(ExtensionWithSourceLength) {
i::ScopedVector<char> extension_name(32);
i::SNPrintF(extension_name, "ext #%d", source_len);
v8::RegisterExtension(v8::base::make_unique<Extension>(
- extension_name.start(), kEmbeddedExtensionSource, 0, nullptr,
+ extension_name.begin(), kEmbeddedExtensionSource, 0, nullptr,
source_len));
- const char* extension_names[1] = {extension_name.start()};
+ const char* extension_names[1] = {extension_name.begin()};
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Local<Context> context = Context::New(CcTest::isolate(), &extensions);
if (source_len == kEmbeddedExtensionSourceValidLen) {
@@ -7943,8 +8076,8 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
// Verify that we created an array where the space was reserved up front.
big_array_size =
v8::internal::JSArray::cast(*v8::Utils::OpenHandle(*big_array))
- ->elements()
- ->Size();
+ .elements()
+ .Size();
CHECK_LE(20000, big_array_size);
a->Set(context, v8_str("y"), big_array).FromJust();
big_heap_size = CcTest::heap()->SizeOfObjects();
@@ -7956,18 +8089,6 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
v8::WeakCallbackType::kParameter);
object_b.handle.SetWeak(&object_b, &SetFlag,
v8::WeakCallbackType::kParameter);
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- // MarkIndependent is marked deprecated but we still rely on it temporarily.
- CHECK(!object_b.handle.IsIndependent());
- object_a.handle.MarkIndependent();
- object_b.handle.MarkIndependent();
- CHECK(object_b.handle.IsIndependent());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
if (global_gc) {
CcTest::CollectAllGarbage();
} else {
@@ -8118,19 +8239,6 @@ void v8::internal::heap::HeapTester::ResetWeakHandle(bool global_gc) {
v8::WeakCallbackType::kParameter);
object_b.handle.SetWeak(&object_b, &ResetUseValueAndSetFlag,
v8::WeakCallbackType::kParameter);
- if (!global_gc) {
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- // MarkIndependent is marked deprecated but we still rely on it temporarily.
- object_a.handle.MarkIndependent();
- object_b.handle.MarkIndependent();
- CHECK(object_b.handle.IsIndependent());
-#if __clang__
-#pragma clang diagnostic pop
-#endif
- }
if (global_gc) {
CcTest::PreciseCollectAllGarbage();
} else {
@@ -8181,11 +8289,11 @@ THREADED_TEST(GCFromWeakCallbacks) {
Context::Scope context_scope(context);
static const int kNumberOfGCTypes = 2;
- typedef v8::WeakCallbackInfo<FlagAndPersistent>::Callback Callback;
+ using Callback = v8::WeakCallbackInfo<FlagAndPersistent>::Callback;
Callback gc_forcing_callback[kNumberOfGCTypes] = {&ForceScavenge1,
&ForceMarkSweep1};
- typedef void (*GCInvoker)();
+ using GCInvoker = void (*)();
GCInvoker invoke_gc[kNumberOfGCTypes] = {&InvokeScavenge, &InvokeMarkSweep};
for (int outer_gc = 0; outer_gc < kNumberOfGCTypes; outer_gc++) {
@@ -8198,16 +8306,6 @@ THREADED_TEST(GCFromWeakCallbacks) {
object.flag = false;
object.handle.SetWeak(&object, gc_forcing_callback[inner_gc],
v8::WeakCallbackType::kParameter);
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- // MarkIndependent is marked deprecated but we still rely on it
- // temporarily.
- object.handle.MarkIndependent();
-#if __clang__
-#pragma clang diagnostic pop
-#endif
invoke_gc[outer_gc]();
EmptyMessageQueues(isolate);
CHECK(object.flag);
@@ -9046,50 +9144,6 @@ THREADED_TEST(ToArrayIndex) {
CHECK(index.IsEmpty());
}
-static v8::MaybeLocal<Value> PrepareStackTrace42(v8::Local<Context> context,
- v8::Local<Value> error,
- v8::Local<Array> trace) {
- return v8::Number::New(context->GetIsolate(), 42);
-}
-
-static v8::MaybeLocal<Value> PrepareStackTraceThrow(v8::Local<Context> context,
- v8::Local<Value> error,
- v8::Local<Array> trace) {
- v8::Isolate* isolate = context->GetIsolate();
- v8::Local<String> message = v8_str("42");
- isolate->ThrowException(v8::Exception::Error(message));
- return v8::MaybeLocal<Value>();
-}
-
-THREADED_TEST(IsolatePrepareStackTrace) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope scope(isolate);
-
- isolate->SetPrepareStackTraceCallback(PrepareStackTrace42);
-
- v8::Local<Value> v = CompileRun("new Error().stack");
-
- CHECK(v->IsNumber());
- CHECK_EQ(v.As<v8::Number>()->Int32Value(context.local()).FromJust(), 42);
-}
-
-THREADED_TEST(IsolatePrepareStackTraceThrow) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope scope(isolate);
-
- isolate->SetPrepareStackTraceCallback(PrepareStackTraceThrow);
-
- v8::Local<Value> v = CompileRun("try { new Error().stack } catch (e) { e }");
-
- CHECK(v->IsNativeError());
-
- v8::Local<String> message = v8::Exception::CreateMessage(isolate, v)->Get();
-
- CHECK(message->StrictEquals(v8_str("Uncaught Error: 42")));
-}
-
THREADED_TEST(ErrorConstruction) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -9133,102 +9187,6 @@ THREADED_TEST(ErrorConstruction) {
.FromJust());
}
-
-static void ThrowV8Exception(const v8::FunctionCallbackInfo<v8::Value>& info) {
- ApiTestFuzzer::Fuzz();
- v8::Local<String> foo = v8_str("foo");
- v8::Local<String> message = v8_str("message");
- v8::Local<Value> error = v8::Exception::Error(foo);
- CHECK(error->IsObject());
- v8::Local<v8::Context> context = info.GetIsolate()->GetCurrentContext();
- CHECK(error.As<v8::Object>()
- ->Get(context, message)
- .ToLocalChecked()
- ->Equals(context, foo)
- .FromJust());
- info.GetIsolate()->ThrowException(error);
- info.GetReturnValue().SetUndefined();
-}
-
-
-THREADED_TEST(ExceptionCreateMessage) {
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::Local<String> foo_str = v8_str("foo");
- v8::Local<String> message_str = v8_str("message");
-
- context->GetIsolate()->SetCaptureStackTraceForUncaughtExceptions(true);
-
- Local<v8::FunctionTemplate> fun =
- v8::FunctionTemplate::New(context->GetIsolate(), ThrowV8Exception);
- v8::Local<v8::Object> global = context->Global();
- CHECK(global->Set(context.local(), v8_str("throwV8Exception"),
- fun->GetFunction(context.local()).ToLocalChecked())
- .FromJust());
-
- TryCatch try_catch(context->GetIsolate());
- CompileRun(
- "function f1() {\n"
- " throwV8Exception();\n"
- "};\n"
- "f1();");
- CHECK(try_catch.HasCaught());
-
- v8::Local<v8::Value> error = try_catch.Exception();
- CHECK(error->IsObject());
- CHECK(error.As<v8::Object>()
- ->Get(context.local(), message_str)
- .ToLocalChecked()
- ->Equals(context.local(), foo_str)
- .FromJust());
-
- v8::Local<v8::Message> message =
- v8::Exception::CreateMessage(context->GetIsolate(), error);
- CHECK(!message.IsEmpty());
- CHECK_EQ(2, message->GetLineNumber(context.local()).FromJust());
- CHECK_EQ(2, message->GetStartColumn(context.local()).FromJust());
-
- v8::Local<v8::StackTrace> stackTrace = message->GetStackTrace();
- CHECK(!stackTrace.IsEmpty());
- CHECK_EQ(2, stackTrace->GetFrameCount());
-
- stackTrace = v8::Exception::GetStackTrace(error);
- CHECK(!stackTrace.IsEmpty());
- CHECK_EQ(2, stackTrace->GetFrameCount());
-
- context->GetIsolate()->SetCaptureStackTraceForUncaughtExceptions(false);
-
- // Now check message location when SetCaptureStackTraceForUncaughtExceptions
- // is false.
- try_catch.Reset();
-
- CompileRun(
- "function f2() {\n"
- " return throwV8Exception();\n"
- "};\n"
- "f2();");
- CHECK(try_catch.HasCaught());
-
- error = try_catch.Exception();
- CHECK(error->IsObject());
- CHECK(error.As<v8::Object>()
- ->Get(context.local(), message_str)
- .ToLocalChecked()
- ->Equals(context.local(), foo_str)
- .FromJust());
-
- message = v8::Exception::CreateMessage(context->GetIsolate(), error);
- CHECK(!message.IsEmpty());
- CHECK_EQ(2, message->GetLineNumber(context.local()).FromJust());
- CHECK_EQ(9, message->GetStartColumn(context.local()).FromJust());
-
- // Should be empty stack trace.
- stackTrace = message->GetStackTrace();
- CHECK(stackTrace.IsEmpty());
- CHECK(v8::Exception::GetStackTrace(error).IsEmpty());
-}
-
-
THREADED_TEST(ExceptionCreateMessageLength) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -11038,6 +10996,7 @@ THREADED_TEST(ShadowObjectAndDataProperty) {
// efficient access and good feedback for optimization.
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
+ i::FLAG_allow_natives_syntax = true;
Local<ObjectTemplate> global_template = v8::ObjectTemplate::New(isolate);
LocalContext context(nullptr, global_template);
@@ -11056,6 +11015,7 @@ THREADED_TEST(ShadowObjectAndDataProperty) {
CompileRun(
"function foo(x) { i = x; }"
+ "%EnsureFeedbackVectorForFunction(foo);"
"foo(0)");
i::Handle<i::JSFunction> foo(i::Handle<i::JSFunction>::cast(
@@ -11077,7 +11037,7 @@ THREADED_TEST(ShadowObjectAndDataProperty) {
// compiler downstream.
i::HeapObject heap_object;
CHECK(nexus.GetFeedback().GetHeapObject(&heap_object));
- CHECK(heap_object->IsPropertyCell());
+ CHECK(heap_object.IsPropertyCell());
}
THREADED_TEST(ShadowObjectAndDataPropertyTurbo) {
@@ -11105,7 +11065,8 @@ THREADED_TEST(ShadowObjectAndDataPropertyTurbo) {
.FromJust());
CompileRun(
- "function foo(x) { i = x; }"
+ "function foo(x) { i = x; };"
+ "%PrepareFunctionForOptimization(foo);"
"foo(0)");
i::Handle<i::JSFunction> foo(i::Handle<i::JSFunction>::cast(
@@ -11121,7 +11082,7 @@ THREADED_TEST(ShadowObjectAndDataPropertyTurbo) {
CHECK_EQ(i::MONOMORPHIC, nexus.ic_state());
i::HeapObject heap_object;
CHECK(nexus.GetFeedback().GetHeapObject(&heap_object));
- CHECK(heap_object->IsPropertyCell());
+ CHECK(heap_object.IsPropertyCell());
}
THREADED_TEST(SetPrototype) {
@@ -11243,7 +11204,7 @@ THREADED_TEST(Regress91517) {
i::ScopedVector<char> name_buf(1024);
for (int i = 1; i <= 1000; i++) {
i::SNPrintF(name_buf, "sdf%d", i);
- t2->InstanceTemplate()->Set(v8_str(name_buf.start()), v8_num(2));
+ t2->InstanceTemplate()->Set(v8_str(name_buf.begin()), v8_num(2));
}
Local<v8::Object> o1 = t1->GetFunction(context.local())
@@ -12952,9 +12913,9 @@ TEST(CallHandlerAsFunctionHasNoSideEffectNotSupported) {
v8::Utils::OpenHandle(*templ)->constructor());
i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
i::CallHandlerInfo handler_info =
- i::CallHandlerInfo::cast(cons->GetInstanceCallHandler());
- CHECK(!handler_info->IsSideEffectFreeCallHandlerInfo());
- handler_info->set_map(
+ i::CallHandlerInfo::cast(cons.GetInstanceCallHandler());
+ CHECK(!handler_info.IsSideEffectFreeCallHandlerInfo());
+ handler_info.set_map(
i::ReadOnlyRoots(heap).side_effect_free_call_handler_info_map());
CHECK(v8::debug::EvaluateGlobal(isolate, v8_str("obj()"), true).IsEmpty());
}
@@ -13652,10 +13613,10 @@ static int GetGlobalObjectsCount() {
i::HeapIterator it(CcTest::heap());
for (i::HeapObject object = it.next(); !object.is_null();
object = it.next()) {
- if (object->IsJSGlobalObject()) {
+ if (object.IsJSGlobalObject()) {
i::JSGlobalObject g = i::JSGlobalObject::cast(object);
// Skip dummy global object.
- if (g->global_dictionary()->NumberOfElements() != 0) {
+ if (g.global_dictionary().NumberOfElements() != 0) {
count++;
}
}
@@ -13722,8 +13683,8 @@ TEST(CopyablePersistent) {
i::GlobalHandles* globals =
reinterpret_cast<i::Isolate*>(isolate)->global_handles();
size_t initial_handles = globals->handles_count();
- typedef v8::Persistent<v8::Object, v8::CopyablePersistentTraits<v8::Object> >
- CopyableObject;
+ using CopyableObject =
+ v8::Persistent<v8::Object, v8::CopyablePersistentTraits<v8::Object>>;
{
CopyableObject handle1;
{
@@ -14937,7 +14898,7 @@ class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
: data_(vector) {}
~OneByteVectorResource() override = default;
size_t length() const override { return data_.length(); }
- const char* data() const override { return data_.start(); }
+ const char* data() const override { return data_.begin(); }
void Dispose() override {}
private:
@@ -14951,7 +14912,7 @@ class UC16VectorResource : public v8::String::ExternalStringResource {
: data_(vector) {}
~UC16VectorResource() override = default;
size_t length() const override { return data_.length(); }
- const i::uc16* data() const override { return data_.start(); }
+ const i::uc16* data() const override { return data_.begin(); }
void Dispose() override {}
private:
@@ -14964,22 +14925,22 @@ static void MorphAString(i::String string,
i::Isolate* isolate = CcTest::i_isolate();
CHECK(i::StringShape(string).IsExternal());
i::ReadOnlyRoots roots(CcTest::heap());
- if (string->IsOneByteRepresentation()) {
+ if (string.IsOneByteRepresentation()) {
// Check old map is not internalized or long.
- CHECK(string->map() == roots.external_one_byte_string_map());
+ CHECK(string.map() == roots.external_one_byte_string_map());
// Morph external string to be TwoByte string.
- string->set_map(roots.external_string_map());
+ string.set_map(roots.external_string_map());
i::ExternalTwoByteString morphed = i::ExternalTwoByteString::cast(string);
- CcTest::heap()->UpdateExternalString(morphed, string->length(), 0);
- morphed->SetResource(isolate, uc16_resource);
+ CcTest::heap()->UpdateExternalString(morphed, string.length(), 0);
+ morphed.SetResource(isolate, uc16_resource);
} else {
// Check old map is not internalized or long.
- CHECK(string->map() == roots.external_string_map());
+ CHECK(string.map() == roots.external_string_map());
// Morph external string to be one-byte string.
- string->set_map(roots.external_one_byte_string_map());
+ string.set_map(roots.external_one_byte_string_map());
i::ExternalOneByteString morphed = i::ExternalOneByteString::cast(string);
- CcTest::heap()->UpdateExternalString(morphed, string->length(), 0);
- morphed->SetResource(isolate, one_byte_resource);
+ CcTest::heap()->UpdateExternalString(morphed, string.length(), 0);
+ morphed.SetResource(isolate, one_byte_resource);
}
}
@@ -14997,9 +14958,9 @@ THREADED_TEST(MorphCompositeStringTest) {
i::Isolate* i_isolate = CcTest::i_isolate();
v8::HandleScope scope(isolate);
OneByteVectorResource one_byte_resource(
- i::Vector<const char>(c_string, i::StrLength(c_string)));
+ i::Vector<const char>(c_string, strlen(c_string)));
UC16VectorResource uc16_resource(
- i::Vector<const uint16_t>(two_byte_string, i::StrLength(c_string)));
+ i::Vector<const uint16_t>(two_byte_string, strlen(c_string)));
Local<String> lhs(v8::Utils::ToLocal(
factory->NewExternalStringFromOneByte(&one_byte_resource)
@@ -15067,14 +15028,14 @@ THREADED_TEST(MorphCompositeStringTest) {
.FromJust());
// This avoids the GC from trying to free a stack allocated resource.
- if (ilhs->IsExternalOneByteString())
- i::ExternalOneByteString::cast(ilhs)->SetResource(i_isolate, nullptr);
+ if (ilhs.IsExternalOneByteString())
+ i::ExternalOneByteString::cast(ilhs).SetResource(i_isolate, nullptr);
else
- i::ExternalTwoByteString::cast(ilhs)->SetResource(i_isolate, nullptr);
- if (irhs->IsExternalOneByteString())
- i::ExternalOneByteString::cast(irhs)->SetResource(i_isolate, nullptr);
+ i::ExternalTwoByteString::cast(ilhs).SetResource(i_isolate, nullptr);
+ if (irhs.IsExternalOneByteString())
+ i::ExternalOneByteString::cast(irhs).SetResource(i_isolate, nullptr);
else
- i::ExternalTwoByteString::cast(irhs)->SetResource(i_isolate, nullptr);
+ i::ExternalTwoByteString::cast(irhs).SetResource(i_isolate, nullptr);
}
i::DeleteArray(two_byte_string);
}
@@ -15696,14 +15657,13 @@ static void CheckElementValue(i::Isolate* isolate,
CHECK_EQ(expected, i::Smi::ToInt(element));
}
-
-template <class ExternalArrayClass, class ElementType>
+template <class ElementType>
static void ObjectWithExternalArrayTestHelper(Local<Context> context,
- v8::Local<Object> obj,
+ v8::Local<v8::TypedArray> obj,
int element_count,
i::ExternalArrayType array_type,
int64_t low, int64_t high) {
- i::Handle<i::JSReceiver> jsobj = v8::Utils::OpenHandle(*obj);
+ i::Handle<i::JSTypedArray> jsobj = v8::Utils::OpenHandle(*obj);
v8::Isolate* v8_isolate = context->GetIsolate();
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
obj->Set(context, v8_str("field"), v8::Int32::New(v8_isolate, 1503))
@@ -15791,13 +15751,13 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
i::SNPrintF(test_buf,
boundary_program,
low);
- result = CompileRun(test_buf.start());
+ result = CompileRun(test_buf.begin());
CHECK_EQ(low, result->IntegerValue(context).FromJust());
i::SNPrintF(test_buf,
boundary_program,
high);
- result = CompileRun(test_buf.start());
+ result = CompileRun(test_buf.begin());
CHECK_EQ(high, result->IntegerValue(context).FromJust());
// Check misprediction of type in IC.
@@ -15825,7 +15785,7 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
"}"
"caught_exception;",
element_count);
- result = CompileRun(test_buf.start());
+ result = CompileRun(test_buf.begin());
CHECK(!result->BooleanValue(v8_isolate));
// Make sure out-of-range stores do not throw.
@@ -15838,7 +15798,7 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
"}"
"caught_exception;",
element_count);
- result = CompileRun(test_buf.start());
+ result = CompileRun(test_buf.begin());
CHECK(!result->BooleanValue(v8_isolate));
// Check other boundary conditions, values and operations.
@@ -15930,15 +15890,15 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
(is_unsigned ?
unsigned_data :
(is_pixel_data ? pixel_data : signed_data)));
- result = CompileRun(test_buf.start());
+ result = CompileRun(test_buf.begin());
CHECK(result->BooleanValue(v8_isolate));
}
- i::Handle<ExternalArrayClass> array(
- ExternalArrayClass::cast(i::Handle<i::JSObject>::cast(jsobj)->elements()),
- isolate);
- for (int i = 0; i < element_count; i++) {
- array->set(i, static_cast<ElementType>(i));
+ {
+ ElementType* data_ptr = static_cast<ElementType*>(jsobj->DataPtr());
+ for (int i = 0; i < element_count; i++) {
+ data_ptr[i] = static_cast<ElementType>(i);
+ }
}
bool old_natives_flag_sentry = i::FLAG_allow_natives_syntax;
@@ -15952,7 +15912,8 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
" sum += (ext_array[i] -= 1);"
" } "
" return sum;"
- "}"
+ "};"
+ "%PrepareFunctionForOptimization(ee_op_test_complex_func);"
"sum=0;"
"sum=ee_op_test_complex_func(sum);"
"sum=ee_op_test_complex_func(sum);"
@@ -15969,7 +15930,8 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
" sum += (--ext_array[i]);"
" } "
" return sum;"
- "}"
+ "};"
+ "%PrepareFunctionForOptimization(ee_op_test_count_func);"
"sum=0;"
"sum=ee_op_test_count_func(sum);"
"sum=ee_op_test_count_func(sum);"
@@ -16021,100 +15983,7 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
CHECK_EQ(23, result->Int32Value(context).FromJust());
}
-
-template <class FixedTypedArrayClass, i::ElementsKind elements_kind,
- class ElementType>
-static void FixedTypedArrayTestHelper(i::ExternalArrayType array_type,
- ElementType low, ElementType high) {
- i::FLAG_allow_natives_syntax = true;
- LocalContext context;
- i::Isolate* isolate = CcTest::i_isolate();
- i::Factory* factory = isolate->factory();
- v8::HandleScope scope(context->GetIsolate());
- const int kElementCount = 260;
- i::Handle<i::JSTypedArray> jsobj =
- factory->NewJSTypedArray(elements_kind, kElementCount);
- i::Handle<FixedTypedArrayClass> fixed_array(
- FixedTypedArrayClass::cast(jsobj->elements()), isolate);
- CHECK_EQ(FixedTypedArrayClass::kInstanceType,
- fixed_array->map()->instance_type());
- CHECK_EQ(kElementCount, fixed_array->length());
- CcTest::CollectAllGarbage();
- for (int i = 0; i < kElementCount; i++) {
- fixed_array->set(i, static_cast<ElementType>(i));
- }
- // Force GC to trigger verification.
- CcTest::CollectAllGarbage();
- for (int i = 0; i < kElementCount; i++) {
- CHECK_EQ(static_cast<int64_t>(static_cast<ElementType>(i)),
- static_cast<int64_t>(fixed_array->get_scalar(i)));
- }
- v8::Local<v8::Object> obj = v8::Utils::ToLocal(jsobj);
-
- ObjectWithExternalArrayTestHelper<FixedTypedArrayClass, ElementType>(
- context.local(), obj, kElementCount, array_type,
- static_cast<int64_t>(low),
- static_cast<int64_t>(high));
-}
-
-
-THREADED_TEST(FixedUint8Array) {
- FixedTypedArrayTestHelper<i::FixedUint8Array, i::UINT8_ELEMENTS, uint8_t>(
- i::kExternalUint8Array, 0x0, 0xFF);
-}
-
-
-THREADED_TEST(FixedUint8ClampedArray) {
- FixedTypedArrayTestHelper<i::FixedUint8ClampedArray,
- i::UINT8_CLAMPED_ELEMENTS, uint8_t>(
- i::kExternalUint8ClampedArray, 0x0, 0xFF);
-}
-
-
-THREADED_TEST(FixedInt8Array) {
- FixedTypedArrayTestHelper<i::FixedInt8Array, i::INT8_ELEMENTS, int8_t>(
- i::kExternalInt8Array, -0x80, 0x7F);
-}
-
-
-THREADED_TEST(FixedUint16Array) {
- FixedTypedArrayTestHelper<i::FixedUint16Array, i::UINT16_ELEMENTS, uint16_t>(
- i::kExternalUint16Array, 0x0, 0xFFFF);
-}
-
-
-THREADED_TEST(FixedInt16Array) {
- FixedTypedArrayTestHelper<i::FixedInt16Array, i::INT16_ELEMENTS, int16_t>(
- i::kExternalInt16Array, -0x8000, 0x7FFF);
-}
-
-
-THREADED_TEST(FixedUint32Array) {
- FixedTypedArrayTestHelper<i::FixedUint32Array, i::UINT32_ELEMENTS, uint32_t>(
- i::kExternalUint32Array, 0x0, UINT_MAX);
-}
-
-
-THREADED_TEST(FixedInt32Array) {
- FixedTypedArrayTestHelper<i::FixedInt32Array, i::INT32_ELEMENTS, int32_t>(
- i::kExternalInt32Array, INT_MIN, INT_MAX);
-}
-
-
-THREADED_TEST(FixedFloat32Array) {
- FixedTypedArrayTestHelper<i::FixedFloat32Array, i::FLOAT32_ELEMENTS, float>(
- i::kExternalFloat32Array, -500, 500);
-}
-
-
-THREADED_TEST(FixedFloat64Array) {
- FixedTypedArrayTestHelper<i::FixedFloat64Array, i::FLOAT64_ELEMENTS, float>(
- i::kExternalFloat64Array, -500, 500);
-}
-
-
-template <typename ElementType, typename TypedArray, class ExternalArrayClass,
- class ArrayBufferType>
+template <typename ElementType, typename TypedArray, class ArrayBufferType>
void TypedArrayTestHelper(i::ExternalArrayType array_type, int64_t low,
int64_t high) {
const int kElementCount = 50;
@@ -16126,7 +15995,7 @@ void TypedArrayTestHelper(i::ExternalArrayType array_type, int64_t low,
v8::HandleScope handle_scope(isolate);
Local<ArrayBufferType> ab =
- ArrayBufferType::New(isolate, backing_store.start(),
+ ArrayBufferType::New(isolate, backing_store.begin(),
(kElementCount + 2) * sizeof(ElementType));
Local<TypedArray> ta =
TypedArray::New(ab, 2*sizeof(ElementType), kElementCount);
@@ -16136,69 +16005,65 @@ void TypedArrayTestHelper(i::ExternalArrayType array_type, int64_t low,
CHECK_EQ(kElementCount * sizeof(ElementType), ta->ByteLength());
CHECK(ab->Equals(env.local(), ta->Buffer()).FromJust());
- ElementType* data = backing_store.start() + 2;
+ ElementType* data = backing_store.begin() + 2;
for (int i = 0; i < kElementCount; i++) {
data[i] = static_cast<ElementType>(i);
}
- ObjectWithExternalArrayTestHelper<ExternalArrayClass, ElementType>(
- env.local(), ta, kElementCount, array_type, low, high);
+ ObjectWithExternalArrayTestHelper<ElementType>(env.local(), ta, kElementCount,
+ array_type, low, high);
}
-
THREADED_TEST(Uint8Array) {
- TypedArrayTestHelper<uint8_t, v8::Uint8Array, i::FixedUint8Array,
- v8::ArrayBuffer>(i::kExternalUint8Array, 0, 0xFF);
+ TypedArrayTestHelper<uint8_t, v8::Uint8Array, v8::ArrayBuffer>(
+ i::kExternalUint8Array, 0, 0xFF);
}
THREADED_TEST(Int8Array) {
- TypedArrayTestHelper<int8_t, v8::Int8Array, i::FixedInt8Array,
- v8::ArrayBuffer>(i::kExternalInt8Array, -0x80, 0x7F);
+ TypedArrayTestHelper<int8_t, v8::Int8Array, v8::ArrayBuffer>(
+ i::kExternalInt8Array, -0x80, 0x7F);
}
THREADED_TEST(Uint16Array) {
- TypedArrayTestHelper<uint16_t, v8::Uint16Array, i::FixedUint16Array,
- v8::ArrayBuffer>(i::kExternalUint16Array, 0, 0xFFFF);
+ TypedArrayTestHelper<uint16_t, v8::Uint16Array, v8::ArrayBuffer>(
+ i::kExternalUint16Array, 0, 0xFFFF);
}
THREADED_TEST(Int16Array) {
- TypedArrayTestHelper<int16_t, v8::Int16Array, i::FixedInt16Array,
- v8::ArrayBuffer>(i::kExternalInt16Array, -0x8000,
- 0x7FFF);
+ TypedArrayTestHelper<int16_t, v8::Int16Array, v8::ArrayBuffer>(
+ i::kExternalInt16Array, -0x8000, 0x7FFF);
}
THREADED_TEST(Uint32Array) {
- TypedArrayTestHelper<uint32_t, v8::Uint32Array, i::FixedUint32Array,
- v8::ArrayBuffer>(i::kExternalUint32Array, 0, UINT_MAX);
+ TypedArrayTestHelper<uint32_t, v8::Uint32Array, v8::ArrayBuffer>(
+ i::kExternalUint32Array, 0, UINT_MAX);
}
THREADED_TEST(Int32Array) {
- TypedArrayTestHelper<int32_t, v8::Int32Array, i::FixedInt32Array,
- v8::ArrayBuffer>(i::kExternalInt32Array, INT_MIN,
- INT_MAX);
+ TypedArrayTestHelper<int32_t, v8::Int32Array, v8::ArrayBuffer>(
+ i::kExternalInt32Array, INT_MIN, INT_MAX);
}
THREADED_TEST(Float32Array) {
- TypedArrayTestHelper<float, v8::Float32Array, i::FixedFloat32Array,
- v8::ArrayBuffer>(i::kExternalFloat32Array, -500, 500);
+ TypedArrayTestHelper<float, v8::Float32Array, v8::ArrayBuffer>(
+ i::kExternalFloat32Array, -500, 500);
}
THREADED_TEST(Float64Array) {
- TypedArrayTestHelper<double, v8::Float64Array, i::FixedFloat64Array,
- v8::ArrayBuffer>(i::kExternalFloat64Array, -500, 500);
+ TypedArrayTestHelper<double, v8::Float64Array, v8::ArrayBuffer>(
+ i::kExternalFloat64Array, -500, 500);
}
THREADED_TEST(Uint8ClampedArray) {
- TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray,
- i::FixedUint8ClampedArray, v8::ArrayBuffer>(
+ TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray, v8::ArrayBuffer>(
i::kExternalUint8ClampedArray, 0, 0xFF);
}
@@ -16213,7 +16078,7 @@ THREADED_TEST(DataView) {
v8::HandleScope handle_scope(isolate);
Local<v8::ArrayBuffer> ab =
- v8::ArrayBuffer::New(isolate, backing_store.start(), 2 + kSize);
+ v8::ArrayBuffer::New(isolate, backing_store.begin(), 2 + kSize);
Local<v8::DataView> dv = v8::DataView::New(ab, 2, kSize);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
CHECK_EQ(2u, dv->ByteOffset());
@@ -16272,71 +16137,63 @@ THREADED_TEST(SkipArrayBufferDuringScavenge) {
THREADED_TEST(SharedUint8Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint8_t, v8::Uint8Array, i::FixedUint8Array,
- v8::SharedArrayBuffer>(i::kExternalUint8Array, 0, 0xFF);
+ TypedArrayTestHelper<uint8_t, v8::Uint8Array, v8::SharedArrayBuffer>(
+ i::kExternalUint8Array, 0, 0xFF);
}
THREADED_TEST(SharedInt8Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<int8_t, v8::Int8Array, i::FixedInt8Array,
- v8::SharedArrayBuffer>(i::kExternalInt8Array, -0x80,
- 0x7F);
+ TypedArrayTestHelper<int8_t, v8::Int8Array, v8::SharedArrayBuffer>(
+ i::kExternalInt8Array, -0x80, 0x7F);
}
THREADED_TEST(SharedUint16Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint16_t, v8::Uint16Array, i::FixedUint16Array,
- v8::SharedArrayBuffer>(i::kExternalUint16Array, 0,
- 0xFFFF);
+ TypedArrayTestHelper<uint16_t, v8::Uint16Array, v8::SharedArrayBuffer>(
+ i::kExternalUint16Array, 0, 0xFFFF);
}
THREADED_TEST(SharedInt16Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<int16_t, v8::Int16Array, i::FixedInt16Array,
- v8::SharedArrayBuffer>(i::kExternalInt16Array, -0x8000,
- 0x7FFF);
+ TypedArrayTestHelper<int16_t, v8::Int16Array, v8::SharedArrayBuffer>(
+ i::kExternalInt16Array, -0x8000, 0x7FFF);
}
THREADED_TEST(SharedUint32Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint32_t, v8::Uint32Array, i::FixedUint32Array,
- v8::SharedArrayBuffer>(i::kExternalUint32Array, 0,
- UINT_MAX);
+ TypedArrayTestHelper<uint32_t, v8::Uint32Array, v8::SharedArrayBuffer>(
+ i::kExternalUint32Array, 0, UINT_MAX);
}
THREADED_TEST(SharedInt32Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<int32_t, v8::Int32Array, i::FixedInt32Array,
- v8::SharedArrayBuffer>(i::kExternalInt32Array, INT_MIN,
- INT_MAX);
+ TypedArrayTestHelper<int32_t, v8::Int32Array, v8::SharedArrayBuffer>(
+ i::kExternalInt32Array, INT_MIN, INT_MAX);
}
THREADED_TEST(SharedFloat32Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<float, v8::Float32Array, i::FixedFloat32Array,
- v8::SharedArrayBuffer>(i::kExternalFloat32Array, -500,
- 500);
+ TypedArrayTestHelper<float, v8::Float32Array, v8::SharedArrayBuffer>(
+ i::kExternalFloat32Array, -500, 500);
}
THREADED_TEST(SharedFloat64Array) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<double, v8::Float64Array, i::FixedFloat64Array,
- v8::SharedArrayBuffer>(i::kExternalFloat64Array, -500,
- 500);
+ TypedArrayTestHelper<double, v8::Float64Array, v8::SharedArrayBuffer>(
+ i::kExternalFloat64Array, -500, 500);
}
THREADED_TEST(SharedUint8ClampedArray) {
i::FLAG_harmony_sharedarraybuffer = true;
- TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray,
- i::FixedUint8ClampedArray, v8::SharedArrayBuffer>(
+ TypedArrayTestHelper<uint8_t, v8::Uint8ClampedArray, v8::SharedArrayBuffer>(
i::kExternalUint8ClampedArray, 0, 0xFF);
}
@@ -16352,7 +16209,7 @@ THREADED_TEST(SharedDataView) {
v8::HandleScope handle_scope(isolate);
Local<v8::SharedArrayBuffer> ab =
- v8::SharedArrayBuffer::New(isolate, backing_store.start(), 2 + kSize);
+ v8::SharedArrayBuffer::New(isolate, backing_store.begin(), 2 + kSize);
Local<v8::DataView> dv =
v8::DataView::New(ab, 2, kSize);
CheckInternalFieldsAreZero<v8::ArrayBufferView>(dv);
@@ -16436,280 +16293,6 @@ THREADED_TEST(ScriptContextDependence) {
}
-THREADED_TEST(StackTrace) {
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
- v8::TryCatch try_catch(context->GetIsolate());
- const char *source = "function foo() { FAIL.FAIL; }; foo();";
- v8::Local<v8::String> src = v8_str(source);
- v8::Local<v8::String> origin = v8_str("stack-trace-test");
- v8::ScriptCompiler::Source script_source(src, v8::ScriptOrigin(origin));
- CHECK(v8::ScriptCompiler::CompileUnboundScript(context->GetIsolate(),
- &script_source)
- .ToLocalChecked()
- ->BindToCurrentContext()
- ->Run(context.local())
- .IsEmpty());
- CHECK(try_catch.HasCaught());
- v8::String::Utf8Value stack(
- context->GetIsolate(),
- try_catch.StackTrace(context.local()).ToLocalChecked());
- CHECK_NOT_NULL(strstr(*stack, "at foo (stack-trace-test"));
-}
-
-
-// Checks that a StackFrame has certain expected values.
-void checkStackFrame(const char* expected_script_name,
- const char* expected_func_name, int expected_line_number,
- int expected_column, bool is_eval, bool is_constructor,
- v8::Local<v8::StackFrame> frame) {
- v8::HandleScope scope(CcTest::isolate());
- v8::String::Utf8Value func_name(CcTest::isolate(), frame->GetFunctionName());
- v8::String::Utf8Value script_name(CcTest::isolate(), frame->GetScriptName());
- if (*script_name == nullptr) {
- // The situation where there is no associated script, like for evals.
- CHECK_NULL(expected_script_name);
- } else {
- CHECK_NOT_NULL(strstr(*script_name, expected_script_name));
- }
- CHECK_NOT_NULL(strstr(*func_name, expected_func_name));
- CHECK_EQ(expected_line_number, frame->GetLineNumber());
- CHECK_EQ(expected_column, frame->GetColumn());
- CHECK_EQ(is_eval, frame->IsEval());
- CHECK_EQ(is_constructor, frame->IsConstructor());
-}
-
-
-void AnalyzeStackInNativeCode(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::HandleScope scope(args.GetIsolate());
- const char* origin = "capture-stack-trace-test";
- const int kOverviewTest = 1;
- const int kDetailedTest = 2;
- const int kFunctionName = 3;
- const int kDisplayName = 4;
- const int kFunctionNameAndDisplayName = 5;
- const int kDisplayNameIsNotString = 6;
- const int kFunctionNameIsNotString = 7;
-
- CHECK_EQ(args.Length(), 1);
-
- v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- v8::Isolate* isolate = args.GetIsolate();
- int testGroup = args[0]->Int32Value(context).FromJust();
- if (testGroup == kOverviewTest) {
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 10, v8::StackTrace::kOverview);
- CHECK_EQ(4, stackTrace->GetFrameCount());
- checkStackFrame(origin, "bar", 2, 10, false, false,
- stackTrace->GetFrame(args.GetIsolate(), 0));
- checkStackFrame(origin, "foo", 6, 3, false, true,
- stackTrace->GetFrame(isolate, 1));
- // This is the source string inside the eval which has the call to foo.
- checkStackFrame(nullptr, "", 1, 1, true, false,
- stackTrace->GetFrame(isolate, 2));
- // The last frame is an anonymous function which has the initial eval call.
- checkStackFrame(origin, "", 8, 7, false, false,
- stackTrace->GetFrame(isolate, 3));
- } else if (testGroup == kDetailedTest) {
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 10, v8::StackTrace::kDetailed);
- CHECK_EQ(4, stackTrace->GetFrameCount());
- checkStackFrame(origin, "bat", 4, 22, false, false,
- stackTrace->GetFrame(isolate, 0));
- checkStackFrame(origin, "baz", 8, 3, false, true,
- stackTrace->GetFrame(isolate, 1));
- bool is_eval = true;
- // This is the source string inside the eval which has the call to baz.
- checkStackFrame(nullptr, "", 1, 1, is_eval, false,
- stackTrace->GetFrame(isolate, 2));
- // The last frame is an anonymous function which has the initial eval call.
- checkStackFrame(origin, "", 10, 1, false, false,
- stackTrace->GetFrame(isolate, 3));
- } else if (testGroup == kFunctionName) {
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 5, v8::StackTrace::kOverview);
- CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.name", 3, 1, true, false,
- stackTrace->GetFrame(isolate, 0));
- } else if (testGroup == kDisplayName) {
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 5, v8::StackTrace::kOverview);
- CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
- stackTrace->GetFrame(isolate, 0));
- } else if (testGroup == kFunctionNameAndDisplayName) {
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 5, v8::StackTrace::kOverview);
- CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.displayName", 3, 1, true, false,
- stackTrace->GetFrame(isolate, 0));
- } else if (testGroup == kDisplayNameIsNotString) {
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 5, v8::StackTrace::kOverview);
- CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "function.name", 3, 1, true, false,
- stackTrace->GetFrame(isolate, 0));
- } else if (testGroup == kFunctionNameIsNotString) {
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 5, v8::StackTrace::kOverview);
- CHECK_EQ(3, stackTrace->GetFrameCount());
- checkStackFrame(nullptr, "", 3, 1, true, false,
- stackTrace->GetFrame(isolate, 0));
- }
-}
-
-
-// Tests the C++ StackTrace API.
-// TODO(3074796): Reenable this as a THREADED_TEST once it passes.
-// THREADED_TEST(CaptureStackTrace) {
-TEST(CaptureStackTrace) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- v8::Local<v8::String> origin = v8_str("capture-stack-trace-test");
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("AnalyzeStackInNativeCode"),
- v8::FunctionTemplate::New(isolate, AnalyzeStackInNativeCode));
- LocalContext context(nullptr, templ);
-
- // Test getting OVERVIEW information. Should ignore information that is not
- // script name, function name, line number, and column offset.
- const char *overview_source =
- "function bar() {\n"
- " var y; AnalyzeStackInNativeCode(1);\n"
- "}\n"
- "function foo() {\n"
- "\n"
- " bar();\n"
- "}\n"
- "var x;eval('new foo();');";
- v8::Local<v8::String> overview_src = v8_str(overview_source);
- v8::ScriptCompiler::Source script_source(overview_src,
- v8::ScriptOrigin(origin));
- v8::Local<Value> overview_result(
- v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source)
- .ToLocalChecked()
- ->BindToCurrentContext()
- ->Run(context.local())
- .ToLocalChecked());
- CHECK(!overview_result.IsEmpty());
- CHECK(overview_result->IsObject());
-
- // Test getting DETAILED information.
- const char *detailed_source =
- "function bat() {AnalyzeStackInNativeCode(2);\n"
- "}\n"
- "\n"
- "function baz() {\n"
- " bat();\n"
- "}\n"
- "eval('new baz();');";
- v8::Local<v8::String> detailed_src = v8_str(detailed_source);
- // Make the script using a non-zero line and column offset.
- v8::Local<v8::Integer> line_offset = v8::Integer::New(isolate, 3);
- v8::Local<v8::Integer> column_offset = v8::Integer::New(isolate, 5);
- v8::ScriptOrigin detailed_origin(origin, line_offset, column_offset);
- v8::ScriptCompiler::Source script_source2(detailed_src, detailed_origin);
- v8::Local<v8::UnboundScript> detailed_script(
- v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source2)
- .ToLocalChecked());
- v8::Local<Value> detailed_result(detailed_script->BindToCurrentContext()
- ->Run(context.local())
- .ToLocalChecked());
- CHECK(!detailed_result.IsEmpty());
- CHECK(detailed_result->IsObject());
-
- // Test using function.name and function.displayName in stack trace
- const char* function_name_source =
- "function bar(function_name, display_name, testGroup) {\n"
- " var f = new Function(`AnalyzeStackInNativeCode(${testGroup});`);\n"
- " if (function_name) {\n"
- " Object.defineProperty(f, 'name', { value: function_name });\n"
- " }\n"
- " if (display_name) {\n"
- " f.displayName = display_name;"
- " }\n"
- " f()\n"
- "}\n"
- "bar('function.name', undefined, 3);\n"
- "bar(undefined, 'function.displayName', 4);\n"
- "bar('function.name', 'function.displayName', 5);\n"
- "bar('function.name', 239, 6);\n"
- "bar(239, undefined, 7);\n";
- v8::Local<v8::String> function_name_src =
- v8::String::NewFromUtf8(isolate, function_name_source,
- v8::NewStringType::kNormal)
- .ToLocalChecked();
- v8::ScriptCompiler::Source script_source3(function_name_src,
- v8::ScriptOrigin(origin));
- v8::Local<Value> function_name_result(
- v8::ScriptCompiler::CompileUnboundScript(isolate, &script_source3)
- .ToLocalChecked()
- ->BindToCurrentContext()
- ->Run(context.local())
- .ToLocalChecked());
- CHECK(!function_name_result.IsEmpty());
-}
-
-
-static void StackTraceForUncaughtExceptionListener(
- v8::Local<v8::Message> message, v8::Local<Value>) {
- report_count++;
- v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
- CHECK_EQ(2, stack_trace->GetFrameCount());
- checkStackFrame("origin", "foo", 2, 3, false, false,
- stack_trace->GetFrame(message->GetIsolate(), 0));
- checkStackFrame("origin", "bar", 5, 3, false, false,
- stack_trace->GetFrame(message->GetIsolate(), 1));
-}
-
-
-TEST(CaptureStackTraceForUncaughtException) {
- report_count = 0;
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- isolate->AddMessageListener(StackTraceForUncaughtExceptionListener);
- isolate->SetCaptureStackTraceForUncaughtExceptions(true);
-
- CompileRunWithOrigin(
- "function foo() {\n"
- " throw 1;\n"
- "};\n"
- "function bar() {\n"
- " foo();\n"
- "};",
- "origin");
- v8::Local<v8::Object> global = env->Global();
- Local<Value> trouble =
- global->Get(env.local(), v8_str("bar")).ToLocalChecked();
- CHECK(trouble->IsFunction());
- CHECK(Function::Cast(*trouble)
- ->Call(env.local(), global, 0, nullptr)
- .IsEmpty());
- isolate->SetCaptureStackTraceForUncaughtExceptions(false);
- isolate->RemoveMessageListeners(StackTraceForUncaughtExceptionListener);
- CHECK_EQ(1, report_count);
-}
-
-TEST(CaptureStackTraceForUncaughtExceptionAndSetters) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- isolate->SetCaptureStackTraceForUncaughtExceptions(true, 1024,
- v8::StackTrace::kDetailed);
-
- CompileRun(
- "var setters = ['column', 'lineNumber', 'scriptName',\n"
- " 'scriptNameOrSourceURL', 'functionName', 'isEval',\n"
- " 'isConstructor'];\n"
- "for (var i = 0; i < setters.length; i++) {\n"
- " var prop = setters[i];\n"
- " Object.prototype.__defineSetter__(prop, function() { throw prop; });\n"
- "}\n");
- CompileRun("throw 'exception';");
- isolate->SetCaptureStackTraceForUncaughtExceptions(false);
-}
-
static int asm_warning_triggered = 0;
static void AsmJsWarningListener(v8::Local<v8::Message> message,
@@ -16759,7 +16342,7 @@ TEST(ErrorLevelWarning) {
v8::Local<v8::Script> lscript = CompileWithOrigin(source, "test", false);
i::Handle<i::SharedFunctionInfo> obj = i::Handle<i::SharedFunctionInfo>::cast(
v8::Utils::OpenHandle(*lscript->GetUnboundScript()));
- CHECK(obj->script()->IsScript());
+ CHECK(obj->script().IsScript());
i::Handle<i::Script> script(i::Script::cast(obj->script()), i_isolate);
int levels[] = {
@@ -16771,8 +16354,8 @@ TEST(ErrorLevelWarning) {
v8::Isolate::kMessageAll);
for (size_t i = 0; i < arraysize(levels); i++) {
i::MessageLocation location(script, 0, 0);
- i::Handle<i::String> msg(i_isolate->factory()->InternalizeOneByteString(
- i::StaticCharVector("test")));
+ i::Handle<i::String> msg(
+ i_isolate->factory()->InternalizeString(i::StaticCharVector("test")));
i::Handle<i::JSMessageObject> message =
i::MessageHandler::MakeMessageObject(
i_isolate, i::MessageTemplate::kAsmJsInvalid, &location, msg,
@@ -16785,194 +16368,6 @@ TEST(ErrorLevelWarning) {
DCHECK_EQ(arraysize(levels), error_level_message_count);
}
-static void StackTraceFunctionNameListener(v8::Local<v8::Message> message,
- v8::Local<Value>) {
- v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
- v8::Isolate* isolate = message->GetIsolate();
- CHECK_EQ(5, stack_trace->GetFrameCount());
- checkStackFrame("origin", "foo:0", 4, 7, false, false,
- stack_trace->GetFrame(isolate, 0));
- checkStackFrame("origin", "foo:1", 5, 27, false, false,
- stack_trace->GetFrame(isolate, 1));
- checkStackFrame("origin", "foo", 5, 27, false, false,
- stack_trace->GetFrame(isolate, 2));
- checkStackFrame("origin", "foo", 5, 27, false, false,
- stack_trace->GetFrame(isolate, 3));
- checkStackFrame("origin", "", 1, 14, false, false,
- stack_trace->GetFrame(isolate, 4));
-}
-
-
-TEST(GetStackTraceContainsFunctionsWithFunctionName) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
-
- CompileRunWithOrigin(
- "function gen(name, counter) {\n"
- " var f = function foo() {\n"
- " if (counter === 0)\n"
- " throw 1;\n"
- " gen(name, counter - 1)();\n"
- " };\n"
- " if (counter == 3) {\n"
- " Object.defineProperty(f, 'name', {get: function(){ throw 239; }});\n"
- " } else {\n"
- " Object.defineProperty(f, 'name', {writable:true});\n"
- " if (counter == 2)\n"
- " f.name = 42;\n"
- " else\n"
- " f.name = name + ':' + counter;\n"
- " }\n"
- " return f;\n"
- "};",
- "origin");
-
- isolate->AddMessageListener(StackTraceFunctionNameListener);
- isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRunWithOrigin("gen('foo', 3)();", "origin");
- isolate->SetCaptureStackTraceForUncaughtExceptions(false);
- isolate->RemoveMessageListeners(StackTraceFunctionNameListener);
-}
-
-
-static void RethrowStackTraceHandler(v8::Local<v8::Message> message,
- v8::Local<v8::Value> data) {
- // Use the frame where JavaScript is called from.
- v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
- CHECK(!stack_trace.IsEmpty());
- int frame_count = stack_trace->GetFrameCount();
- CHECK_EQ(3, frame_count);
- int line_number[] = {1, 2, 5};
- for (int i = 0; i < frame_count; i++) {
- CHECK_EQ(line_number[i],
- stack_trace->GetFrame(message->GetIsolate(), i)->GetLineNumber());
- }
-}
-
-
-// Test that we only return the stack trace at the site where the exception
-// is first thrown (not where it is rethrown).
-TEST(RethrowStackTrace) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- // We make sure that
- // - the stack trace of the ReferenceError in g() is reported.
- // - the stack trace is not overwritten when e1 is rethrown by t().
- // - the stack trace of e2 does not overwrite that of e1.
- const char* source =
- "function g() { error; } \n"
- "function f() { g(); } \n"
- "function t(e) { throw e; } \n"
- "try { \n"
- " f(); \n"
- "} catch (e1) { \n"
- " try { \n"
- " error; \n"
- " } catch (e2) { \n"
- " t(e1); \n"
- " } \n"
- "} \n";
- isolate->AddMessageListener(RethrowStackTraceHandler);
- isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRun(source);
- isolate->SetCaptureStackTraceForUncaughtExceptions(false);
- isolate->RemoveMessageListeners(RethrowStackTraceHandler);
-}
-
-
-static void RethrowPrimitiveStackTraceHandler(v8::Local<v8::Message> message,
- v8::Local<v8::Value> data) {
- v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
- CHECK(!stack_trace.IsEmpty());
- int frame_count = stack_trace->GetFrameCount();
- CHECK_EQ(2, frame_count);
- int line_number[] = {3, 7};
- for (int i = 0; i < frame_count; i++) {
- CHECK_EQ(line_number[i],
- stack_trace->GetFrame(message->GetIsolate(), i)->GetLineNumber());
- }
-}
-
-
-// Test that we do not recognize identity for primitive exceptions.
-TEST(RethrowPrimitiveStackTrace) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- // We do not capture stack trace for non Error objects on creation time.
- // Instead, we capture the stack trace on last throw.
- const char* source =
- "function g() { throw 404; } \n"
- "function f() { g(); } \n"
- "function t(e) { throw e; } \n"
- "try { \n"
- " f(); \n"
- "} catch (e1) { \n"
- " t(e1) \n"
- "} \n";
- isolate->AddMessageListener(RethrowPrimitiveStackTraceHandler);
- isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRun(source);
- isolate->SetCaptureStackTraceForUncaughtExceptions(false);
- isolate->RemoveMessageListeners(RethrowPrimitiveStackTraceHandler);
-}
-
-
-static void RethrowExistingStackTraceHandler(v8::Local<v8::Message> message,
- v8::Local<v8::Value> data) {
- // Use the frame where JavaScript is called from.
- v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
- CHECK(!stack_trace.IsEmpty());
- CHECK_EQ(1, stack_trace->GetFrameCount());
- CHECK_EQ(1, stack_trace->GetFrame(message->GetIsolate(), 0)->GetLineNumber());
-}
-
-
-// Test that the stack trace is captured when the error object is created and
-// not where it is thrown.
-TEST(RethrowExistingStackTrace) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- const char* source =
- "var e = new Error(); \n"
- "throw e; \n";
- isolate->AddMessageListener(RethrowExistingStackTraceHandler);
- isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRun(source);
- isolate->SetCaptureStackTraceForUncaughtExceptions(false);
- isolate->RemoveMessageListeners(RethrowExistingStackTraceHandler);
-}
-
-
-static void RethrowBogusErrorStackTraceHandler(v8::Local<v8::Message> message,
- v8::Local<v8::Value> data) {
- // Use the frame where JavaScript is called from.
- v8::Local<v8::StackTrace> stack_trace = message->GetStackTrace();
- CHECK(!stack_trace.IsEmpty());
- CHECK_EQ(1, stack_trace->GetFrameCount());
- CHECK_EQ(2, stack_trace->GetFrame(message->GetIsolate(), 0)->GetLineNumber());
-}
-
-
-// Test that the stack trace is captured where the bogus Error object is thrown.
-TEST(RethrowBogusErrorStackTrace) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
- const char* source =
- "var e = {__proto__: new Error()} \n"
- "throw e; \n";
- isolate->AddMessageListener(RethrowBogusErrorStackTraceHandler);
- isolate->SetCaptureStackTraceForUncaughtExceptions(true);
- CompileRun(source);
- isolate->SetCaptureStackTraceForUncaughtExceptions(false);
- isolate->RemoveMessageListeners(RethrowBogusErrorStackTraceHandler);
-}
-
-
v8::PromiseRejectEvent reject_event = v8::kPromiseRejectWithNoHandler;
int promise_reject_counter = 0;
int promise_revoke_counter = 0;
@@ -17548,7 +16943,8 @@ TEST(PromiseRejectCallbackConstructError) {
CompileRun(
"function f(p) {"
" p.catch(() => {});"
- "}"
+ "};"
+ "%PrepareFunctionForOptimization(f);"
"f(Promise.reject());"
"f(Promise.reject());"
"%OptimizeFunctionOnNextCall(f);"
@@ -17556,134 +16952,6 @@ TEST(PromiseRejectCallbackConstructError) {
"f(p);");
}
-void AnalyzeStackOfEvalWithSourceURL(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::HandleScope scope(args.GetIsolate());
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 10, v8::StackTrace::kDetailed);
- CHECK_EQ(5, stackTrace->GetFrameCount());
- v8::Local<v8::String> url = v8_str("eval_url");
- for (int i = 0; i < 3; i++) {
- v8::Local<v8::String> name =
- stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptNameOrSourceURL();
- CHECK(!name.IsEmpty());
- CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
- }
-}
-
-
-TEST(SourceURLInStackTrace) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("AnalyzeStackOfEvalWithSourceURL"),
- v8::FunctionTemplate::New(isolate,
- AnalyzeStackOfEvalWithSourceURL));
- LocalContext context(nullptr, templ);
-
- const char *source =
- "function outer() {\n"
- "function bar() {\n"
- " AnalyzeStackOfEvalWithSourceURL();\n"
- "}\n"
- "function foo() {\n"
- "\n"
- " bar();\n"
- "}\n"
- "foo();\n"
- "}\n"
- "eval('(' + outer +')()%s');";
-
- i::ScopedVector<char> code(1024);
- i::SNPrintF(code, source, "//# sourceURL=eval_url");
- CHECK(CompileRun(code.start())->IsUndefined());
- i::SNPrintF(code, source, "//@ sourceURL=eval_url");
- CHECK(CompileRun(code.start())->IsUndefined());
-}
-
-
-static int scriptIdInStack[2];
-
-void AnalyzeScriptIdInStack(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::HandleScope scope(args.GetIsolate());
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 10, v8::StackTrace::kScriptId);
- CHECK_EQ(2, stackTrace->GetFrameCount());
- for (int i = 0; i < 2; i++) {
- scriptIdInStack[i] =
- stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptId();
- }
-}
-
-
-TEST(ScriptIdInStackTrace) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("AnalyzeScriptIdInStack"),
- v8::FunctionTemplate::New(isolate, AnalyzeScriptIdInStack));
- LocalContext context(nullptr, templ);
-
- v8::Local<v8::String> scriptSource = v8_str(
- "function foo() {\n"
- " AnalyzeScriptIdInStack();"
- "}\n"
- "foo();\n");
- v8::Local<v8::Script> script = CompileWithOrigin(scriptSource, "test", false);
- script->Run(context.local()).ToLocalChecked();
- for (int i = 0; i < 2; i++) {
- CHECK_NE(scriptIdInStack[i], v8::Message::kNoScriptIdInfo);
- CHECK_EQ(scriptIdInStack[i], script->GetUnboundScript()->GetId());
- }
-}
-
-
-void AnalyzeStackOfInlineScriptWithSourceURL(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::HandleScope scope(args.GetIsolate());
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 10, v8::StackTrace::kDetailed);
- CHECK_EQ(4, stackTrace->GetFrameCount());
- v8::Local<v8::String> url = v8_str("source_url");
- for (int i = 0; i < 3; i++) {
- v8::Local<v8::String> name =
- stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptNameOrSourceURL();
- CHECK(!name.IsEmpty());
- CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
- }
-}
-
-
-TEST(InlineScriptWithSourceURLInStackTrace) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("AnalyzeStackOfInlineScriptWithSourceURL"),
- v8::FunctionTemplate::New(
- CcTest::isolate(), AnalyzeStackOfInlineScriptWithSourceURL));
- LocalContext context(nullptr, templ);
-
- const char *source =
- "function outer() {\n"
- "function bar() {\n"
- " AnalyzeStackOfInlineScriptWithSourceURL();\n"
- "}\n"
- "function foo() {\n"
- "\n"
- " bar();\n"
- "}\n"
- "foo();\n"
- "}\n"
- "outer()\n%s";
-
- i::ScopedVector<char> code(1024);
- i::SNPrintF(code, source, "//# sourceURL=source_url");
- CHECK(CompileRunWithOrigin(code.start(), "url", 0, 1)->IsUndefined());
- i::SNPrintF(code, source, "//@ sourceURL=source_url");
- CHECK(CompileRunWithOrigin(code.start(), "url", 0, 1)->IsUndefined());
-}
-
void SetPromise(const char* name, v8::Local<v8::Promise> promise) {
CcTest::global()
->Set(CcTest::isolate()->GetCurrentContext(), v8_str(name), promise)
@@ -18054,76 +17322,6 @@ TEST(PromiseHook) {
isolate->SetPromiseHook(nullptr);
}
-void AnalyzeStackOfDynamicScriptWithSourceURL(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::HandleScope scope(args.GetIsolate());
- v8::Local<v8::StackTrace> stackTrace = v8::StackTrace::CurrentStackTrace(
- args.GetIsolate(), 10, v8::StackTrace::kDetailed);
- CHECK_EQ(4, stackTrace->GetFrameCount());
- v8::Local<v8::String> url = v8_str("source_url");
- for (int i = 0; i < 3; i++) {
- v8::Local<v8::String> name =
- stackTrace->GetFrame(args.GetIsolate(), i)->GetScriptNameOrSourceURL();
- CHECK(!name.IsEmpty());
- CHECK(url->Equals(args.GetIsolate()->GetCurrentContext(), name).FromJust());
- }
-}
-
-
-TEST(DynamicWithSourceURLInStackTrace) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->Set(v8_str("AnalyzeStackOfDynamicScriptWithSourceURL"),
- v8::FunctionTemplate::New(
- CcTest::isolate(), AnalyzeStackOfDynamicScriptWithSourceURL));
- LocalContext context(nullptr, templ);
-
- const char *source =
- "function outer() {\n"
- "function bar() {\n"
- " AnalyzeStackOfDynamicScriptWithSourceURL();\n"
- "}\n"
- "function foo() {\n"
- "\n"
- " bar();\n"
- "}\n"
- "foo();\n"
- "}\n"
- "outer()\n%s";
-
- i::ScopedVector<char> code(1024);
- i::SNPrintF(code, source, "//# sourceURL=source_url");
- CHECK(CompileRunWithOrigin(code.start(), "url", 0, 0)->IsUndefined());
- i::SNPrintF(code, source, "//@ sourceURL=source_url");
- CHECK(CompileRunWithOrigin(code.start(), "url", 0, 0)->IsUndefined());
-}
-
-
-TEST(DynamicWithSourceURLInStackTraceString) {
- LocalContext context;
- v8::HandleScope scope(context->GetIsolate());
-
- const char *source =
- "function outer() {\n"
- " function foo() {\n"
- " FAIL.FAIL;\n"
- " }\n"
- " foo();\n"
- "}\n"
- "outer()\n%s";
-
- i::ScopedVector<char> code(1024);
- i::SNPrintF(code, source, "//# sourceURL=source_url");
- v8::TryCatch try_catch(context->GetIsolate());
- CompileRunWithOrigin(code.start(), "", 0, 0);
- CHECK(try_catch.HasCaught());
- v8::String::Utf8Value stack(
- context->GetIsolate(),
- try_catch.StackTrace(context.local()).ToLocalChecked());
- CHECK_NOT_NULL(strstr(*stack, "at foo (source_url:3:5)"));
-}
-
TEST(EvalWithSourceURLInMessageScriptResourceNameOrSourceURL) {
LocalContext context;
@@ -19681,8 +18879,8 @@ THREADED_TEST(TwoByteStringInOneByteCons) {
// happen in real pages.
CHECK(string->IsOneByteRepresentation());
i::ConsString cons = i::ConsString::cast(*string);
- CHECK_EQ(0, cons->second()->length());
- CHECK(cons->first()->IsTwoByteRepresentation());
+ CHECK_EQ(0, cons.second().length());
+ CHECK(cons.first().IsTwoByteRepresentation());
}
// Check that some string operations work.
@@ -20162,7 +19360,7 @@ static int CalcFibonacci(v8::Isolate* isolate, int limit) {
" return fib(n-1) + fib(n-2);"
"}"
"fib(%d)", limit);
- Local<Value> value = CompileRun(code.start());
+ Local<Value> value = CompileRun(code.begin());
CHECK(value->IsNumber());
return static_cast<int>(value->NumberValue(context.local()).FromJust());
}
@@ -21132,11 +20330,11 @@ THREADED_TEST(ReadOnlyIndexedProperties) {
}
static int CountLiveMapsInMapCache(i::Context context) {
- i::WeakFixedArray map_cache = i::WeakFixedArray::cast(context->map_cache());
- int length = map_cache->length();
+ i::WeakFixedArray map_cache = i::WeakFixedArray::cast(context.map_cache());
+ int length = map_cache.length();
int count = 0;
for (int i = 0; i < length; i++) {
- if (map_cache->Get(i)->IsWeak()) count++;
+ if (map_cache.Get(i)->IsWeak()) count++;
}
return count;
}
@@ -21332,7 +20530,7 @@ void RecursiveCall(const v8::FunctionCallbackInfo<v8::Value>& args) {
char script[64];
i::Vector<char> script_vector(script, sizeof(script));
i::SNPrintF(script_vector, "recursion(%d)", level);
- CompileRun(script_vector.start());
+ CompileRun(script_vector.begin());
v8::base::OS::Print("Leaving recursion level %d.\n", level);
CHECK_EQ(0, callback_fired);
} else {
@@ -21854,7 +21052,7 @@ void AssertCowElements(bool expected, const char* source) {
Local<Value> object = CompileRun(source);
i::Handle<i::JSObject> array =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*object.As<Object>()));
- CHECK_EQ(expected, array->elements()->IsCowArray());
+ CHECK_EQ(expected, array->elements().IsCowArray());
}
} // namespace
@@ -21983,15 +21181,19 @@ static void CheckInstanceCheckedAccessors(bool expects_callbacks) {
CheckInstanceCheckedResult(1, 1, expects_callbacks, &try_catch);
// Test path through generated LoadIC and StoredIC.
- CompileRun("function test_get(o) { o.foo; }"
- "test_get(obj);");
+ CompileRun(
+ "function test_get(o) { o.foo; };"
+ "%PrepareFunctionForOptimization(test_get);"
+ "test_get(obj);");
CheckInstanceCheckedResult(2, 1, expects_callbacks, &try_catch);
CompileRun("test_get(obj);");
CheckInstanceCheckedResult(3, 1, expects_callbacks, &try_catch);
CompileRun("test_get(obj);");
CheckInstanceCheckedResult(4, 1, expects_callbacks, &try_catch);
- CompileRun("function test_set(o) { o.foo = 23; }"
- "test_set(obj);");
+ CompileRun(
+ "function test_set(o) { o.foo = 23; }"
+ "%PrepareFunctionForOptimization(test_set);"
+ "test_set(obj);");
CheckInstanceCheckedResult(4, 2, expects_callbacks, &try_catch);
CompileRun("test_set(obj);");
CheckInstanceCheckedResult(4, 3, expects_callbacks, &try_catch);
@@ -22190,6 +21392,7 @@ static void Helper137002(bool do_store,
CompileRun(do_store ?
"function f(x) { x.foo = void 0; }" :
"function f(x) { return x.foo; }");
+ CompileRun("%PrepareFunctionForOptimization(f);");
CompileRun("obj.y = void 0;");
if (!interceptor) {
CompileRun("%OptimizeObjectForAddingMultipleProperties(obj, 1);");
@@ -22647,7 +21850,7 @@ void CheckCorrectThrow(const char* script) {
catch_callback_called = false;
i::ScopedVector<char> source(1024);
i::SNPrintF(source, "try { %s; } catch (e) { catcher(e); }", script);
- CompileRun(source.start());
+ CompileRun(source.begin());
CHECK(access_check_fail_thrown);
CHECK(catch_callback_called);
@@ -23123,21 +22326,23 @@ class RequestInterruptTestWithMathAbs
.FromJust();
i::FLAG_allow_natives_syntax = true;
- CompileRun("function loopish(o) {"
- " var pre = 10;"
- " while (o.abs(1) > 0) {"
- " if (o.abs(1) >= 0 && !ShouldContinue()) break;"
- " if (pre > 0) {"
- " if (--pre === 0) WakeUpInterruptor(o === Math);"
- " }"
- " }"
- "}"
- "var i = 50;"
- "var obj = {abs: function () { return i-- }, x: null};"
- "delete obj.x;"
- "loopish(obj);"
- "%OptimizeFunctionOnNextCall(loopish);"
- "loopish(Math);");
+ CompileRun(
+ "function loopish(o) {"
+ " var pre = 10;"
+ " while (o.abs(1) > 0) {"
+ " if (o.abs(1) >= 0 && !ShouldContinue()) break;"
+ " if (pre > 0) {"
+ " if (--pre === 0) WakeUpInterruptor(o === Math);"
+ " }"
+ " }"
+ "};"
+ "%PrepareFunctionForOptimization(loopish);"
+ "var i = 50;"
+ "var obj = {abs: function () { return i-- }, x: null};"
+ "delete obj.x;"
+ "loopish(obj);"
+ "%OptimizeFunctionOnNextCall(loopish);"
+ "loopish(Math);");
i::FLAG_allow_natives_syntax = false;
}
@@ -23289,9 +22494,9 @@ THREADED_TEST(FunctionNew) {
auto serial_number =
i::Smi::cast(i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*func))
->shared()
- ->get_api_func_data()
- ->serial_number())
- ->value();
+ .get_api_func_data()
+ .serial_number())
+ .value();
CHECK_EQ(i::FunctionTemplateInfo::kInvalidSerialNumber, serial_number);
// Verify that each Function::New creates a new function instance
@@ -23637,36 +22842,39 @@ class ApiCallOptimizationChecker {
}
// build source string
i::ScopedVector<char> source(1000);
- i::SNPrintF(
- source,
- "%s\n" // wrap functions
- "function wrap_f() { return wrap_f_%d(); }\n"
- "function wrap_get() { return wrap_get_%d(); }\n"
- "function wrap_set() { return wrap_set_%d(); }\n"
- "check = function(returned) {\n"
- " if (returned !== 'returned') { throw returned; }\n"
- "}\n"
- "\n"
- "check(wrap_f());\n"
- "check(wrap_f());\n"
- "%%OptimizeFunctionOnNextCall(wrap_f_%d);\n"
- "check(wrap_f());\n"
- "\n"
- "check(wrap_get());\n"
- "check(wrap_get());\n"
- "%%OptimizeFunctionOnNextCall(wrap_get_%d);\n"
- "check(wrap_get());\n"
- "\n"
- "check = function(returned) {\n"
- " if (returned !== 1) { throw returned; }\n"
- "}\n"
- "check(wrap_set());\n"
- "check(wrap_set());\n"
- "%%OptimizeFunctionOnNextCall(wrap_set_%d);\n"
- "check(wrap_set());\n",
- wrap_function.start(), key, key, key, key, key, key);
+ i::SNPrintF(source,
+ "%s\n" // wrap functions
+ "function wrap_f() { return wrap_f_%d(); }\n"
+ "function wrap_get() { return wrap_get_%d(); }\n"
+ "function wrap_set() { return wrap_set_%d(); }\n"
+ "check = function(returned) {\n"
+ " if (returned !== 'returned') { throw returned; }\n"
+ "};\n"
+ "\n"
+ "%%PrepareFunctionForOptimization(wrap_f_%d);"
+ "check(wrap_f());\n"
+ "check(wrap_f());\n"
+ "%%OptimizeFunctionOnNextCall(wrap_f_%d);\n"
+ "check(wrap_f());\n"
+ "\n"
+ "%%PrepareFunctionForOptimization(wrap_get_%d);"
+ "check(wrap_get());\n"
+ "check(wrap_get());\n"
+ "%%OptimizeFunctionOnNextCall(wrap_get_%d);\n"
+ "check(wrap_get());\n"
+ "\n"
+ "check = function(returned) {\n"
+ " if (returned !== 1) { throw returned; }\n"
+ "};\n"
+ "%%PrepareFunctionForOptimization(wrap_set_%d);"
+ "check(wrap_set());\n"
+ "check(wrap_set());\n"
+ "%%OptimizeFunctionOnNextCall(wrap_set_%d);\n"
+ "check(wrap_set());\n",
+ wrap_function.begin(), key, key, key, key, key, key, key, key,
+ key);
v8::TryCatch try_catch(isolate);
- CompileRun(source.start());
+ CompileRun(source.begin());
CHECK(!try_catch.HasCaught());
CHECK_EQ(9, count);
}
@@ -23702,6 +22910,7 @@ TEST(FunctionCallOptimizationMultipleArgs) {
" x(1,2,3);\n"
" }\n"
"}\n"
+ "%PrepareFunctionForOptimization(x_wrap);\n"
"x_wrap();\n"
"%OptimizeFunctionOnNextCall(x_wrap);"
"x_wrap();\n");
@@ -23729,6 +22938,7 @@ TEST(ApiCallbackCanReturnSymbols) {
" x();\n"
" }\n"
"}\n"
+ "%PrepareFunctionForOptimization(x_wrap);\n"
"x_wrap();\n"
"%OptimizeFunctionOnNextCall(x_wrap);"
"x_wrap();\n");
@@ -24452,18 +23662,6 @@ TEST(Regress354123) {
}
-TEST(CaptureStackTraceForStackOverflow) {
- v8::internal::FLAG_stack_size = 150;
- LocalContext current;
- v8::Isolate* isolate = current->GetIsolate();
- v8::HandleScope scope(isolate);
- isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
- v8::StackTrace::kDetailed);
- v8::TryCatch try_catch(isolate);
- CompileRun("(function f(x) { f(x+1); })(0)");
- CHECK(try_catch.HasCaught());
-}
-
namespace {
bool ValueEqualsString(v8::Isolate* isolate, Local<Value> lhs,
const char* rhs) {
@@ -24507,7 +23705,7 @@ TEST(ScriptPositionInfo) {
i::Handle<i::SharedFunctionInfo> obj = i::Handle<i::SharedFunctionInfo>::cast(
v8::Utils::OpenHandle(*script->GetUnboundScript()));
- CHECK(obj->script()->IsScript());
+ CHECK(obj->script().IsScript());
i::Handle<i::Script> script1(i::Script::cast(obj->script()), i_isolate);
@@ -25663,6 +24861,7 @@ TEST(TurboAsmDisablesDetach) {
"}"
"var buffer = new ArrayBuffer(4096);"
"var module = Module(this, {}, buffer);"
+ "%PrepareFunctionForOptimization(module.load);"
"%OptimizeFunctionOnNextCall(module.load);"
"module.load();"
"buffer";
@@ -25679,6 +24878,7 @@ TEST(TurboAsmDisablesDetach) {
"}"
"var buffer = new ArrayBuffer(4096);"
"var module = Module(this, {}, buffer);"
+ "%PrepareFunctionForOptimization(module.store);"
"%OptimizeFunctionOnNextCall(module.store);"
"module.store();"
"buffer";
@@ -25882,6 +25082,7 @@ TEST(ExtrasCreatePromise) {
CHECK(env->Global()->Set(env.local(), v8_str("func"), func).FromJust());
auto promise = CompileRun(
+ "%PrepareFunctionForOptimization(func);\n"
"func();\n"
"func();\n"
"%OptimizeFunctionOnNextCall(func);\n"
@@ -25906,6 +25107,7 @@ TEST(ExtrasCreatePromiseWithParent) {
auto promise = CompileRun(
"var parent = new Promise((a, b) => {});\n"
+ "%PrepareFunctionForOptimization(func);\n"
"func(parent);\n"
"func(parent);\n"
"%OptimizeFunctionOnNextCall(func);\n"
@@ -25932,6 +25134,7 @@ TEST(ExtrasRejectPromise) {
"function newPromise() {\n"
" return new Promise((a, b) => {});\n"
"}\n"
+ "%PrepareFunctionForOptimization(func);\n"
"func(newPromise(), 1);\n"
"func(newPromise(), 1);\n"
"%OptimizeFunctionOnNextCall(func);\n"
@@ -25961,6 +25164,7 @@ TEST(ExtrasResolvePromise) {
"function newPromise() {\n"
" return new Promise((a, b) => {});\n"
"}\n"
+ "%PrepareFunctionForOptimization(func);\n"
"func(newPromise(), newPromise());\n"
"func(newPromise(), newPromise());\n"
"%OptimizeFunctionOnNextCall(func);\n"
@@ -25974,6 +25178,7 @@ TEST(ExtrasResolvePromise) {
"function newPromise() {\n"
" return new Promise((a, b) => {});\n"
"}\n"
+ "%PrepareFunctionForOptimization(func);\n"
"func(newPromise(), 1);\n"
"func(newPromise(), 1);\n"
"%OptimizeFunctionOnNextCall(func);\n"
@@ -26652,25 +25857,25 @@ TEST(ObjectTemplateArrayProtoIntrinsics) {
i::SNPrintF(test_string, "typeof obj1.%s",
intrinsics_comparisons[i].object_property_name);
- ExpectString(test_string.start(), "function");
+ ExpectString(test_string.begin(), "function");
i::SNPrintF(test_string, "obj1.%s === %s",
intrinsics_comparisons[i].object_property_name,
intrinsics_comparisons[i].array_property_name);
- ExpectTrue(test_string.start());
+ ExpectTrue(test_string.begin());
i::SNPrintF(test_string, "obj1.%s = 42",
intrinsics_comparisons[i].object_property_name);
- CompileRun(test_string.start());
+ CompileRun(test_string.begin());
i::SNPrintF(test_string, "obj1.%s === %s",
intrinsics_comparisons[i].object_property_name,
intrinsics_comparisons[i].array_property_name);
- ExpectFalse(test_string.start());
+ ExpectFalse(test_string.begin());
i::SNPrintF(test_string, "typeof obj1.%s",
intrinsics_comparisons[i].object_property_name);
- ExpectString(test_string.start(), "number");
+ ExpectString(test_string.begin(), "number");
}
}
@@ -27794,8 +26999,7 @@ TEST(WasmI32AtomicWaitCallback) {
->Set(env.local(), v8_str("func"), v8::Utils::ToLocal(func))
.FromJust());
Handle<JSArrayBuffer> memory(
- r.builder().instance_object()->memory_object()->array_buffer(),
- i_isolate);
+ r.builder().instance_object()->memory_object().array_buffer(), i_isolate);
CHECK(env->Global()
->Set(env.local(), v8_str("sab"), v8::Utils::ToLocal(memory))
.FromJust());
@@ -27831,8 +27035,7 @@ TEST(WasmI64AtomicWaitCallback) {
->Set(env.local(), v8_str("func"), v8::Utils::ToLocal(func))
.FromJust());
Handle<JSArrayBuffer> memory(
- r.builder().instance_object()->memory_object()->array_buffer(),
- i_isolate);
+ r.builder().instance_object()->memory_object().array_buffer(), i_isolate);
CHECK(env->Global()
->Set(env.local(), v8_str("sab"), v8::Utils::ToLocal(memory))
.FromJust());
@@ -28041,8 +27244,8 @@ TEST(TestGetUnwindState) {
for (int id = 0; id < i::Builtins::builtin_count; id++) {
if (!i::Builtins::IsIsolateIndependent(id)) continue;
i::Code builtin = i_isolate->builtins()->builtin(id);
- i::Address start = builtin->InstructionStart();
- i::Address end = start + builtin->InstructionSize();
+ i::Address start = builtin.InstructionStart();
+ i::Address end = start + builtin.InstructionSize();
i::Address builtins_start =
reinterpret_cast<i::Address>(builtins_range.start);
@@ -28056,9 +27259,8 @@ TEST(TestGetUnwindState) {
v8::JSEntryStub js_entry_stub = unwind_state.js_entry_stub;
- CHECK_EQ(
- i_isolate->heap()->builtin(i::Builtins::kJSEntry)->InstructionStart(),
- reinterpret_cast<i::Address>(js_entry_stub.code.start));
+ CHECK_EQ(i_isolate->heap()->builtin(i::Builtins::kJSEntry).InstructionStart(),
+ reinterpret_cast<i::Address>(js_entry_stub.code.start));
}
TEST(MicrotaskContextShouldBeNativeContext) {
@@ -28569,6 +27771,8 @@ UNINITIALIZED_TEST(NestedIsolates) {
// call into the other isolate. Recurse a few times, trigger GC along the way,
// and finally capture a stack trace. Check that the stack trace only includes
// frames from its own isolate.
+ i::FLAG_stack_trace_limit = 20;
+ i::FLAG_experimental_stack_trace_frames = true;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
isolate_1 = v8::Isolate::New(create_params);
@@ -28625,18 +27829,23 @@ UNINITIALIZED_TEST(NestedIsolates) {
CompileRun("f2(); result //# sourceURL=isolate2c")
->ToString(context)
.ToLocalChecked();
- v8::Local<v8::String> expectation = v8_str(isolate_2,
- "Error\n"
- " at f2 (isolate2a:1:104)\n"
- " at isolate2b:1:1\n"
- " at f2 (isolate2a:1:71)\n"
- " at isolate2b:1:1\n"
- " at f2 (isolate2a:1:71)\n"
- " at isolate2b:1:1\n"
- " at f2 (isolate2a:1:71)\n"
- " at isolate2b:1:1\n"
- " at f2 (isolate2a:1:71)\n"
- " at isolate2c:1:1");
+ v8::Local<v8::String> expectation =
+ v8_str(isolate_2,
+ "Error\n"
+ " at f2 (isolate2a:1:104)\n"
+ " at isolate2b:1:1\n"
+ " at call_isolate_1 (<anonymous>)\n"
+ " at f2 (isolate2a:1:71)\n"
+ " at isolate2b:1:1\n"
+ " at call_isolate_1 (<anonymous>)\n"
+ " at f2 (isolate2a:1:71)\n"
+ " at isolate2b:1:1\n"
+ " at call_isolate_1 (<anonymous>)\n"
+ " at f2 (isolate2a:1:71)\n"
+ " at isolate2b:1:1\n"
+ " at call_isolate_1 (<anonymous>)\n"
+ " at f2 (isolate2a:1:71)\n"
+ " at isolate2c:1:1");
CHECK(result->StrictEquals(expectation));
}
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
index 37d858f5f4..c6d9ac3509 100644
--- a/deps/v8/test/cctest/test-api.h
+++ b/deps/v8/test/cctest/test-api.h
@@ -5,11 +5,11 @@
#ifndef V8_TEST_CCTEST_TEST_API_H_
#define V8_TEST_CCTEST_TEST_API_H_
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api.h"
-#include "src/isolate.h"
-#include "src/vm-state.h"
+#include "src/api/api.h"
+#include "src/execution/isolate.h"
+#include "src/execution/vm-state.h"
#include "test/cctest/cctest.h"
template <typename T>
@@ -19,9 +19,9 @@ static void CheckReturnValue(const T& t, i::Address callback) {
CHECK_EQ(CcTest::isolate(), t.GetIsolate());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(t.GetIsolate());
CHECK_EQ(t.GetIsolate(), rv.GetIsolate());
- CHECK((*o)->IsTheHole(isolate) || (*o)->IsUndefined(isolate));
+ CHECK((*o).IsTheHole(isolate) || (*o).IsUndefined(isolate));
// Verify reset
- bool is_runtime = (*o)->IsTheHole(isolate);
+ bool is_runtime = (*o).IsTheHole(isolate);
if (is_runtime) {
CHECK(rv.Get()->IsUndefined());
} else {
@@ -29,10 +29,10 @@ static void CheckReturnValue(const T& t, i::Address callback) {
CHECK_EQ(*v, *o);
}
rv.Set(true);
- CHECK(!(*o)->IsTheHole(isolate) && !(*o)->IsUndefined(isolate));
+ CHECK(!(*o).IsTheHole(isolate) && !(*o).IsUndefined(isolate));
rv.Set(v8::Local<v8::Object>());
- CHECK((*o)->IsTheHole(isolate) || (*o)->IsUndefined(isolate));
- CHECK_EQ(is_runtime, (*o)->IsTheHole(isolate));
+ CHECK((*o).IsTheHole(isolate) || (*o).IsUndefined(isolate));
+ CHECK_EQ(is_runtime, (*o).IsTheHole(isolate));
// If CPU profiler is active check that when API callback is invoked
// VMState is set to EXTERNAL.
if (isolate->is_profiling()) {
diff --git a/deps/v8/test/cctest/test-array-list.cc b/deps/v8/test/cctest/test-array-list.cc
index a8f2913de5..40ddffb83b 100644
--- a/deps/v8/test/cctest/test-array-list.cc
+++ b/deps/v8/test/cctest/test-array-list.cc
@@ -5,7 +5,7 @@
#include <stdlib.h>
#include "src/heap/factory.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index f93163c985..de238f20c2 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -27,15 +27,15 @@
#include <iostream> // NOLINT(readability/streams)
-#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/disassembler.h"
-#include "src/double.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/ostreams.h"
-#include "src/simulator.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/numbers/double.h"
+#include "src/utils/ostreams.h"
#include "test/cctest/assembler-helper-arm.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -60,8 +60,7 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -96,8 +95,7 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -141,8 +139,7 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -159,11 +156,11 @@ TEST(3) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
int i;
char c;
int16_t s;
- } T;
+ };
T t;
Assembler assm(AssemblerOptions{});
@@ -187,8 +184,7 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -212,7 +208,7 @@ TEST(4) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
double c;
@@ -229,7 +225,7 @@ TEST(4) {
float p;
float x;
float y;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -317,8 +313,7 @@ TEST(4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -379,8 +374,7 @@ TEST(5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -410,8 +404,7 @@ TEST(6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -459,7 +452,6 @@ static void TestRoundingMode(VCVTTypes types,
default:
UNREACHABLE();
- break;
}
// Check for vfp exceptions
__ vmrs(r2);
@@ -478,8 +470,7 @@ static void TestRoundingMode(VCVTTypes types,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -608,7 +599,7 @@ TEST(8) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct D {
double a;
double b;
double c;
@@ -617,10 +608,10 @@ TEST(8) {
double f;
double g;
double h;
- } D;
+ };
D d;
- typedef struct {
+ struct F {
float a;
float b;
float c;
@@ -629,7 +620,7 @@ TEST(8) {
float f;
float g;
float h;
- } F;
+ };
F f;
// Create a function that uses vldm/vstm to move some double and
@@ -660,8 +651,7 @@ TEST(8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -713,7 +703,7 @@ TEST(9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct D {
double a;
double b;
double c;
@@ -722,10 +712,10 @@ TEST(9) {
double f;
double g;
double h;
- } D;
+ };
D d;
- typedef struct {
+ struct F {
float a;
float b;
float c;
@@ -734,7 +724,7 @@ TEST(9) {
float f;
float g;
float h;
- } F;
+ };
F f;
// Create a function that uses vldm/vstm to move some double and
@@ -769,8 +759,7 @@ TEST(9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -822,7 +811,7 @@ TEST(10) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct D {
double a;
double b;
double c;
@@ -831,10 +820,10 @@ TEST(10) {
double f;
double g;
double h;
- } D;
+ };
D d;
- typedef struct {
+ struct F {
float a;
float b;
float c;
@@ -843,7 +832,7 @@ TEST(10) {
float f;
float g;
float h;
- } F;
+ };
F f;
// Create a function that uses vldm/vstm to move some double and
@@ -874,8 +863,7 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -927,12 +915,12 @@ TEST(11) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct I {
int32_t a;
int32_t b;
int32_t c;
int32_t d;
- } I;
+ };
I i;
i.a = 0xABCD0001;
@@ -968,8 +956,7 @@ TEST(11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -1009,7 +996,7 @@ TEST(13) {
return;
}
- typedef struct {
+ struct T {
double a;
double b;
double c;
@@ -1021,7 +1008,7 @@ TEST(13) {
double k;
uint32_t low;
uint32_t high;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -1093,8 +1080,7 @@ TEST(13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -1128,14 +1114,14 @@ TEST(14) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double left;
double right;
double add_result;
double sub_result;
double mul_result;
double div_result;
- } T;
+ };
T t;
// Create a function that makes the four basic operations.
@@ -1165,8 +1151,7 @@ TEST(14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -1239,7 +1224,7 @@ TEST(15) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint32_t src0;
uint32_t src1;
uint32_t src2;
@@ -1313,7 +1298,7 @@ TEST(15) {
uint32_t vtrnd8a[2], vtrnd8b[2], vtrnd16a[2], vtrnd16b[2], vtrnd32a[2],
vtrnd32b[2];
uint32_t vtbl[2], vtbx[2];
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -2045,8 +2030,7 @@ TEST(15) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2279,7 +2263,7 @@ TEST(16) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint32_t src0;
uint32_t src1;
uint32_t src2;
@@ -2288,7 +2272,7 @@ TEST(16) {
uint32_t dst2;
uint32_t dst3;
uint32_t dst4;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -2321,8 +2305,7 @@ TEST(16) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2399,8 +2382,7 @@ TEST(sdiv) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2459,8 +2441,7 @@ TEST(udiv) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2488,8 +2469,7 @@ TEST(smmla) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2513,8 +2493,7 @@ TEST(smmul) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2538,8 +2517,7 @@ TEST(sxtb) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2563,8 +2541,7 @@ TEST(sxtab) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2588,8 +2565,7 @@ TEST(sxth) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2613,8 +2589,7 @@ TEST(sxtah) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2638,8 +2613,7 @@ TEST(uxtb) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2663,8 +2637,7 @@ TEST(uxtab) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2688,8 +2661,7 @@ TEST(uxth) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2713,8 +2685,7 @@ TEST(uxtah) {
__ bx(lr);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2741,10 +2712,10 @@ TEST(rbit) {
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(&assm, ARMv7);
- typedef struct {
+ struct T {
uint32_t input;
uint32_t result;
- } T;
+ };
T t;
__ ldr(r1, MemOperand(r0, offsetof(T, input)));
@@ -2754,8 +2725,7 @@ TEST(rbit) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
@@ -2835,8 +2805,9 @@ TEST(code_relative_offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, code_object);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB)
+ .set_self_reference(code_object)
+ .Build();
auto f = GeneratedCode<F_iiiii>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(21, 0, 0, 0, 0));
::printf("f() = %d\n", res);
@@ -2874,8 +2845,7 @@ TEST(msr_mrs) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -2919,14 +2889,14 @@ TEST(ARMv8_float32_vrintX) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
float input;
float ar;
float nr;
float mr;
float pr;
float zr;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -2971,8 +2941,7 @@ TEST(ARMv8_float32_vrintX) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3020,14 +2989,14 @@ TEST(ARMv8_vrintX) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double input;
double ar;
double nr;
double mr;
double pr;
double zr;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -3072,8 +3041,7 @@ TEST(ARMv8_vrintX) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3209,8 +3177,7 @@ TEST(ARMv8_vsel) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3300,8 +3267,7 @@ TEST(ARMv8_vminmax_f64) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3380,8 +3346,7 @@ TEST(ARMv8_vminmax_f32) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3511,7 +3476,7 @@ static GeneratedCode<F_ppiii> GenerateMacroFloatMinMax(MacroAssembler& assm) {
CodeDesc desc;
assm.GetCode(assm.isolate(), &desc);
Handle<Code> code =
- assm.isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Factory::CodeBuilder(assm.isolate(), desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3655,11 +3620,11 @@ TEST(unaligned_loads) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint32_t ldrh;
uint32_t ldrsh;
uint32_t ldr;
- } T;
+ };
T t;
Assembler assm(AssemblerOptions{});
@@ -3673,8 +3638,7 @@ TEST(unaligned_loads) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3716,8 +3680,7 @@ TEST(unaligned_stores) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -3765,14 +3728,14 @@ TEST(vswp) {
HandleScope scope(isolate);
Assembler assm(AssemblerOptions{});
- typedef struct {
+ struct T {
uint64_t vswp_d0;
uint64_t vswp_d1;
uint64_t vswp_d30;
uint64_t vswp_d31;
uint32_t vswp_q4[4];
uint32_t vswp_q5[4];
- } T;
+ };
T t;
__ stm(db_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | lr.bit());
@@ -3816,8 +3779,7 @@ TEST(vswp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -4028,8 +3990,7 @@ TEST(split_add_immediate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -4048,8 +4009,7 @@ TEST(split_add_immediate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -4071,8 +4031,7 @@ TEST(split_add_immediate) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 44a54df80e..d49f8c8974 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -31,18 +31,18 @@
#include <cmath>
#include <limits>
-#include "src/v8.h"
-
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/decoder-arm64-inl.h"
-#include "src/arm64/disasm-arm64.h"
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/arm64/simulator-arm64.h"
-#include "src/arm64/utils-arm64.h"
+#include "src/init/v8.h"
+
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/decoder-arm64-inl.h"
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/arm64/utils-arm64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/arm64/disasm-arm64.h"
+#include "src/execution/arm64/simulator-arm64.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-utils-arm64.h"
#include "test/common/assembler-tester.h"
@@ -215,8 +215,8 @@ static void InitializeVM() {
#define CHECK_EQUAL_NZCV(expected) \
CHECK(EqualNzcv(expected, core.flags_nzcv()))
-#define CHECK_EQUAL_REGISTERS(expected) \
- CHECK(EqualRegisters(&expected, &core))
+#define CHECK_EQUAL_REGISTERS(expected) \
+ CHECK(EqualV8Registers(&expected, &core))
#define CHECK_EQUAL_32(expected, result) \
CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
@@ -6448,7 +6448,7 @@ namespace {
void LoadLiteral(MacroAssembler* masm, Register reg, uint64_t imm) {
// Since we do not allow non-relocatable entries in the literal pool, we need
// to fake a relocation mode that is not NONE here.
- masm->Ldr(reg, Immediate(imm, RelocInfo::EMBEDDED_OBJECT));
+ masm->Ldr(reg, Immediate(imm, RelocInfo::FULL_EMBEDDED_OBJECT));
}
} // namespace
@@ -6984,10 +6984,6 @@ TEST(claim_drop_zero) {
__ Drop(xzr, 0);
__ Claim(x7, 0);
__ Drop(x7, 0);
- __ ClaimBySMI(xzr, 8);
- __ DropBySMI(xzr, 8);
- __ ClaimBySMI(xzr, 0);
- __ DropBySMI(xzr, 0);
CHECK_EQ(0u, __ SizeOfCodeGeneratedSince(&start));
END();
@@ -12307,160 +12303,6 @@ TEST(push_pop) {
CHECK_EQUAL_32(0x33333333U, w29);
}
-TEST(push_queued) {
- INIT_V8();
- SETUP();
-
- START();
-
- MacroAssembler::PushPopQueue queue(&masm);
-
- // Queue up registers.
- queue.Queue(x0);
- queue.Queue(x1);
- queue.Queue(x2);
- queue.Queue(x3);
-
- queue.Queue(w4);
- queue.Queue(w5);
- queue.Queue(w6);
- queue.Queue(w7);
-
- queue.Queue(d0);
- queue.Queue(d1);
-
- queue.Queue(s2);
- queue.Queue(s3);
- queue.Queue(s4);
- queue.Queue(s5);
-
- __ Mov(x0, 0x1234000000000000);
- __ Mov(x1, 0x1234000100010001);
- __ Mov(x2, 0x1234000200020002);
- __ Mov(x3, 0x1234000300030003);
- __ Mov(w4, 0x12340004);
- __ Mov(w5, 0x12340005);
- __ Mov(w6, 0x12340006);
- __ Mov(w7, 0x12340007);
- __ Fmov(d0, 123400.0);
- __ Fmov(d1, 123401.0);
- __ Fmov(s2, 123402.0);
- __ Fmov(s3, 123403.0);
- __ Fmov(s4, 123404.0);
- __ Fmov(s5, 123405.0);
-
- // Actually push them.
- queue.PushQueued();
-
- Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 8));
- Clobber(&masm, CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, 6));
-
- // Pop them conventionally.
- __ Pop(s5, s4, s3, s2);
- __ Pop(d1, d0);
- __ Pop(w7, w6, w5, w4);
- __ Pop(x3, x2, x1, x0);
-
- END();
-
- RUN();
-
- CHECK_EQUAL_64(0x1234000000000000, x0);
- CHECK_EQUAL_64(0x1234000100010001, x1);
- CHECK_EQUAL_64(0x1234000200020002, x2);
- CHECK_EQUAL_64(0x1234000300030003, x3);
-
- CHECK_EQUAL_64(0x0000000012340004, x4);
- CHECK_EQUAL_64(0x0000000012340005, x5);
- CHECK_EQUAL_64(0x0000000012340006, x6);
- CHECK_EQUAL_64(0x0000000012340007, x7);
-
- CHECK_EQUAL_FP64(123400.0, d0);
- CHECK_EQUAL_FP64(123401.0, d1);
-
- CHECK_EQUAL_FP32(123402.0, s2);
- CHECK_EQUAL_FP32(123403.0, s3);
- CHECK_EQUAL_FP32(123404.0, s4);
- CHECK_EQUAL_FP32(123405.0, s5);
-}
-
-TEST(pop_queued) {
- INIT_V8();
- SETUP();
-
- START();
-
- MacroAssembler::PushPopQueue queue(&masm);
-
- __ Mov(x0, 0x1234000000000000);
- __ Mov(x1, 0x1234000100010001);
- __ Mov(x2, 0x1234000200020002);
- __ Mov(x3, 0x1234000300030003);
- __ Mov(w4, 0x12340004);
- __ Mov(w5, 0x12340005);
- __ Mov(w6, 0x12340006);
- __ Mov(w7, 0x12340007);
- __ Fmov(d0, 123400.0);
- __ Fmov(d1, 123401.0);
- __ Fmov(s2, 123402.0);
- __ Fmov(s3, 123403.0);
- __ Fmov(s4, 123404.0);
- __ Fmov(s5, 123405.0);
-
- // Push registers conventionally.
- __ Push(x0, x1, x2, x3);
- __ Push(w4, w5, w6, w7);
- __ Push(d0, d1);
- __ Push(s2, s3, s4, s5);
-
- // Queue up a pop.
- queue.Queue(s5);
- queue.Queue(s4);
- queue.Queue(s3);
- queue.Queue(s2);
-
- queue.Queue(d1);
- queue.Queue(d0);
-
- queue.Queue(w7);
- queue.Queue(w6);
- queue.Queue(w5);
- queue.Queue(w4);
-
- queue.Queue(x3);
- queue.Queue(x2);
- queue.Queue(x1);
- queue.Queue(x0);
-
- Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 8));
- Clobber(&masm, CPURegList(CPURegister::kVRegister, kDRegSizeInBits, 0, 6));
-
- // Actually pop them.
- queue.PopQueued();
-
- END();
-
- RUN();
-
- CHECK_EQUAL_64(0x1234000000000000, x0);
- CHECK_EQUAL_64(0x1234000100010001, x1);
- CHECK_EQUAL_64(0x1234000200020002, x2);
- CHECK_EQUAL_64(0x1234000300030003, x3);
-
- CHECK_EQUAL_64(0x0000000012340004, x4);
- CHECK_EQUAL_64(0x0000000012340005, x5);
- CHECK_EQUAL_64(0x0000000012340006, x6);
- CHECK_EQUAL_64(0x0000000012340007, x7);
-
- CHECK_EQUAL_FP64(123400.0, d0);
- CHECK_EQUAL_FP64(123401.0, d1);
-
- CHECK_EQUAL_FP32(123402.0, s2);
- CHECK_EQUAL_FP32(123403.0, s3);
- CHECK_EQUAL_FP32(123404.0, s4);
- CHECK_EQUAL_FP32(123405.0, s5);
-}
-
TEST(copy_slots_down) {
INIT_V8();
SETUP();
@@ -12759,146 +12601,6 @@ TEST(copy_noop) {
CHECK_EQUAL_64(0, x16);
}
-TEST(jump_both_smi) {
- INIT_V8();
- SETUP();
-
- Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
- Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
- Label return1, return2, return3, done;
-
- START();
-
- __ Mov(x0, 0x5555555500000001UL); // A pointer.
- __ Mov(x1, 0xAAAAAAAA00000001UL); // A pointer.
- __ Mov(x2, 0x1234567800000000UL); // A smi.
- __ Mov(x3, 0x8765432100000000UL); // A smi.
- __ Mov(x4, 0xDEAD);
- __ Mov(x5, 0xDEAD);
- __ Mov(x6, 0xDEAD);
- __ Mov(x7, 0xDEAD);
-
- __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
- __ Bind(&return1);
- __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
- __ Bind(&return2);
- __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
- __ Bind(&return3);
- __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
-
- __ Bind(&cond_fail_00);
- __ Mov(x4, 0);
- __ B(&return1);
- __ Bind(&cond_pass_00);
- __ Mov(x4, 1);
- __ B(&return1);
-
- __ Bind(&cond_fail_01);
- __ Mov(x5, 0);
- __ B(&return2);
- __ Bind(&cond_pass_01);
- __ Mov(x5, 1);
- __ B(&return2);
-
- __ Bind(&cond_fail_10);
- __ Mov(x6, 0);
- __ B(&return3);
- __ Bind(&cond_pass_10);
- __ Mov(x6, 1);
- __ B(&return3);
-
- __ Bind(&cond_fail_11);
- __ Mov(x7, 0);
- __ B(&done);
- __ Bind(&cond_pass_11);
- __ Mov(x7, 1);
-
- __ Bind(&done);
-
- END();
-
- RUN();
-
- CHECK_EQUAL_64(0x5555555500000001UL, x0);
- CHECK_EQUAL_64(0xAAAAAAAA00000001UL, x1);
- CHECK_EQUAL_64(0x1234567800000000UL, x2);
- CHECK_EQUAL_64(0x8765432100000000UL, x3);
- CHECK_EQUAL_64(0, x4);
- CHECK_EQUAL_64(0, x5);
- CHECK_EQUAL_64(0, x6);
- CHECK_EQUAL_64(1, x7);
-}
-
-TEST(jump_either_smi) {
- INIT_V8();
- SETUP();
-
- Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
- Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
- Label return1, return2, return3, done;
-
- START();
-
- __ Mov(x0, 0x5555555500000001UL); // A pointer.
- __ Mov(x1, 0xAAAAAAAA00000001UL); // A pointer.
- __ Mov(x2, 0x1234567800000000UL); // A smi.
- __ Mov(x3, 0x8765432100000000UL); // A smi.
- __ Mov(x4, 0xDEAD);
- __ Mov(x5, 0xDEAD);
- __ Mov(x6, 0xDEAD);
- __ Mov(x7, 0xDEAD);
-
- __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
- __ Bind(&return1);
- __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
- __ Bind(&return2);
- __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
- __ Bind(&return3);
- __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
-
- __ Bind(&cond_fail_00);
- __ Mov(x4, 0);
- __ B(&return1);
- __ Bind(&cond_pass_00);
- __ Mov(x4, 1);
- __ B(&return1);
-
- __ Bind(&cond_fail_01);
- __ Mov(x5, 0);
- __ B(&return2);
- __ Bind(&cond_pass_01);
- __ Mov(x5, 1);
- __ B(&return2);
-
- __ Bind(&cond_fail_10);
- __ Mov(x6, 0);
- __ B(&return3);
- __ Bind(&cond_pass_10);
- __ Mov(x6, 1);
- __ B(&return3);
-
- __ Bind(&cond_fail_11);
- __ Mov(x7, 0);
- __ B(&done);
- __ Bind(&cond_pass_11);
- __ Mov(x7, 1);
-
- __ Bind(&done);
-
- END();
-
- RUN();
-
- CHECK_EQUAL_64(0x5555555500000001UL, x0);
- CHECK_EQUAL_64(0xAAAAAAAA00000001UL, x1);
- CHECK_EQUAL_64(0x1234567800000000UL, x2);
- CHECK_EQUAL_64(0x8765432100000000UL, x3);
- CHECK_EQUAL_64(0, x4);
- CHECK_EQUAL_64(1, x5);
- CHECK_EQUAL_64(1, x6);
- CHECK_EQUAL_64(1, x7);
-}
-
TEST(noreg) {
// This test doesn't generate any code, but it verifies some invariants
// related to NoReg.
@@ -14722,8 +14424,9 @@ TEST(pool_size) {
HandleScope handle_scope(isolate);
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, masm.CodeObject());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB)
+ .set_self_reference(masm.CodeObject())
+ .Build();
unsigned pool_count = 0;
int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index a9c0b60485..246710bb4f 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -27,24 +27,23 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/assembler-inl.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/disassembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disassembler.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
-typedef int (*F0)();
-typedef int (*F1)(int x);
-typedef int (*F2)(int x, int y);
-
+using F0 = int (*)();
+using F1 = int (*)(int x);
+using F2 = int (*)(int x, int y);
#define __ assm.
@@ -63,8 +62,7 @@ TEST(AssemblerIa320) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -101,8 +99,7 @@ TEST(AssemblerIa321) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -143,8 +140,7 @@ TEST(AssemblerIa322) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -155,8 +151,7 @@ TEST(AssemblerIa322) {
CHECK_EQ(3628800, res);
}
-
-typedef int (*F3)(float x);
+using F3 = int (*)(float x);
TEST(AssemblerIa323) {
CcTest::InitializeVM();
@@ -173,8 +168,7 @@ TEST(AssemblerIa323) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -185,8 +179,7 @@ TEST(AssemblerIa323) {
CHECK_EQ(-3, res);
}
-
-typedef int (*F4)(double x);
+using F4 = int (*)(double x);
TEST(AssemblerIa324) {
CcTest::InitializeVM();
@@ -203,8 +196,7 @@ TEST(AssemblerIa324) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -231,15 +223,13 @@ TEST(AssemblerIa325) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
CHECK_EQ(42, res);
}
-
-typedef double (*F5)(double x, double y);
+using F5 = double (*)(double x, double y);
TEST(AssemblerIa326) {
CcTest::InitializeVM();
@@ -265,8 +255,7 @@ TEST(AssemblerIa326) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -277,8 +266,7 @@ TEST(AssemblerIa326) {
CHECK(2.29 < res && res < 2.31);
}
-
-typedef double (*F6)(int x);
+using F6 = double (*)(int x);
TEST(AssemblerIa328) {
CcTest::InitializeVM();
@@ -298,8 +286,7 @@ TEST(AssemblerIa328) {
__ ret(0);
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -381,8 +368,7 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
@@ -432,8 +418,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
@@ -497,8 +482,7 @@ TEST(AssemblerIa32Extractps) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -511,8 +495,7 @@ TEST(AssemblerIa32Extractps) {
CHECK_EQ(static_cast<int>(0x87654321), f(uint64_to_double(value2)));
}
-
-typedef int (*F8)(float x, float y);
+using F8 = int (*)(float x, float y);
TEST(AssemblerIa32SSE) {
CcTest::InitializeVM();
@@ -537,8 +520,7 @@ TEST(AssemblerIa32SSE) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -570,8 +552,7 @@ TEST(AssemblerIa32SSE3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -581,7 +562,7 @@ TEST(AssemblerIa32SSE3) {
CHECK_EQ(4, f(1.0, 2.0));
}
-typedef int (*F9)(double x, double y, double z);
+using F9 = int (*)(double x, double y, double z);
TEST(AssemblerX64FMA_sd) {
CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(FMA3)) return;
@@ -603,7 +584,7 @@ TEST(AssemblerX64FMA_sd) {
__ mulsd(xmm3, xmm1);
__ addsd(xmm3, xmm2); // Expected result in xmm3
- __ sub(esp, Immediate(kDoubleSize)); // For memory operand
+ __ AllocateStackSpace(kDoubleSize); // For memory operand
// vfmadd132sd
__ mov(eax, Immediate(1)); // Test number
__ movaps(xmm4, xmm0);
@@ -798,8 +779,7 @@ TEST(AssemblerX64FMA_sd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -809,8 +789,7 @@ TEST(AssemblerX64FMA_sd) {
CHECK_EQ(0, f(0.000092662107262076, -2.460774966188315, -1.0958787393627414));
}
-
-typedef int (*F10)(float x, float y, float z);
+using F10 = int (*)(float x, float y, float z);
TEST(AssemblerX64FMA_ss) {
CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(FMA3)) return;
@@ -832,7 +811,7 @@ TEST(AssemblerX64FMA_ss) {
__ mulss(xmm3, xmm1);
__ addss(xmm3, xmm2); // Expected result in xmm3
- __ sub(esp, Immediate(kDoubleSize)); // For memory operand
+ __ AllocateStackSpace(kDoubleSize); // For memory operand
// vfmadd132ss
__ mov(eax, Immediate(1)); // Test number
__ movaps(xmm4, xmm0);
@@ -1027,8 +1006,7 @@ TEST(AssemblerX64FMA_ss) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1135,8 +1113,7 @@ TEST(AssemblerIa32BMI1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1183,8 +1160,7 @@ TEST(AssemblerIa32LZCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1231,8 +1207,7 @@ TEST(AssemblerIa32POPCNT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1377,8 +1352,7 @@ TEST(AssemblerIa32BMI2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1421,8 +1395,7 @@ TEST(AssemblerIa32JumpTables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1469,8 +1442,7 @@ TEST(AssemblerIa32JumpTables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1512,8 +1484,7 @@ TEST(Regress621926) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index eb8fc67031..947120816b 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -27,14 +27,14 @@
#include <iostream> // NOLINT(readability/streams)
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/disassembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/simulator.h"
#include "test/cctest/cctest.h"
@@ -43,10 +43,10 @@ namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
// TODO(mips): Refine these signatures per test case.
-typedef void*(F1)(int x, int p1, int p2, int p3, int p4);
-typedef void*(F2)(int x, int y, int p2, int p3, int p4);
-typedef void*(F3)(void* p, int p1, int p2, int p3, int p4);
-typedef void*(F4)(void* p0, void* p1, int p2, int p3, int p4);
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F2 = void*(int x, int y, int p2, int p3, int p4);
+using F3 = void*(void* p, int p1, int p2, int p3, int p4);
+using F4 = void*(void* p0, void* p1, int p2, int p3, int p4);
#define __ assm.
@@ -64,8 +64,7 @@ TEST(MIPS0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(static_cast<int32_t>(0xABC), res);
@@ -99,8 +98,7 @@ TEST(MIPS1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F1>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(50, 0, 0, 0, 0));
CHECK_EQ(1275, res);
@@ -236,8 +234,7 @@ TEST(MIPS2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int res = reinterpret_cast<int>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(static_cast<int32_t>(0x31415926), res);
@@ -250,7 +247,7 @@ TEST(MIPS3) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
double c;
@@ -267,7 +264,7 @@ TEST(MIPS3) {
float fe;
float ff;
float fg;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -336,8 +333,7 @@ TEST(MIPS3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
t.a = 1.5e14;
@@ -392,11 +388,11 @@ TEST(MIPS4) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
double c;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -438,8 +434,7 @@ TEST(MIPS4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e22;
t.b = 2.75e11;
@@ -458,12 +453,12 @@ TEST(MIPS5) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
int i;
int j;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -499,8 +494,7 @@ TEST(MIPS5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e4;
t.b = 2.75e8;
@@ -521,7 +515,7 @@ TEST(MIPS6) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint32_t ui;
int32_t si;
int32_t r1;
@@ -530,7 +524,7 @@ TEST(MIPS6) {
int32_t r4;
int32_t r5;
int32_t r6;
- } T;
+ };
T t;
Assembler assm(AssemblerOptions{});
@@ -567,8 +561,7 @@ TEST(MIPS6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x11223344;
t.si = 0x99AABBCC;
@@ -599,7 +592,7 @@ TEST(MIPS7) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
double c;
@@ -607,7 +600,7 @@ TEST(MIPS7) {
double e;
double f;
int32_t result;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -659,8 +652,7 @@ TEST(MIPS7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -683,7 +675,7 @@ TEST(MIPS8) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
int32_t input;
int32_t result_rotr_4;
int32_t result_rotr_8;
@@ -699,7 +691,7 @@ TEST(MIPS8) {
int32_t result_rotrv_20;
int32_t result_rotrv_24;
int32_t result_rotrv_28;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -755,8 +747,7 @@ TEST(MIPS8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.input = 0x12345678;
f.Call(&t, 0x0, 0, 0, 0);
@@ -800,7 +791,8 @@ TEST(MIPS9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+ USE(code);
}
@@ -810,14 +802,14 @@ TEST(MIPS10) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
int32_t dbl_mant;
int32_t dbl_exp;
int32_t word;
int32_t b_word;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -849,8 +841,7 @@ TEST(MIPS10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
t.b_word = 0x0FF00FF0; // 0x0FF00FF0 -> 0x as double.
@@ -871,7 +862,7 @@ TEST(MIPS11) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
int32_t reg_init;
int32_t mem_init;
int32_t lwl_0;
@@ -890,7 +881,7 @@ TEST(MIPS11) {
int32_t swr_1;
int32_t swr_2;
int32_t swr_3;
- } T;
+ };
T t;
Assembler assm(AssemblerOptions{});
@@ -976,8 +967,7 @@ TEST(MIPS11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.reg_init = 0xAABBCCDD;
t.mem_init = 0x11223344;
@@ -1035,14 +1025,14 @@ TEST(MIPS12) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
- int32_t x;
- int32_t y;
- int32_t y1;
- int32_t y2;
- int32_t y3;
- int32_t y4;
- } T;
+ struct T {
+ int32_t x;
+ int32_t y;
+ int32_t y1;
+ int32_t y2;
+ int32_t y3;
+ int32_t y4;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -1101,8 +1091,7 @@ TEST(MIPS12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.x = 1;
t.y = 2;
@@ -1123,14 +1112,14 @@ TEST(MIPS13) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double cvt_big_out;
double cvt_small_out;
uint32_t trunc_big_out;
uint32_t trunc_small_out;
uint32_t cvt_big_in;
uint32_t cvt_small_in;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -1154,8 +1143,7 @@ TEST(MIPS13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.cvt_big_in = 0xFFFFFFFF;
@@ -1190,7 +1178,7 @@ TEST(MIPS14) {
uint32_t x##_err4_out; \
int32_t x##_invalid_result;
- typedef struct {
+ struct T {
double round_up_in;
double round_down_in;
double neg_round_up_in;
@@ -1205,7 +1193,7 @@ TEST(MIPS14) {
ROUND_STRUCT_ELEMENT(ceil)
ROUND_STRUCT_ELEMENT(trunc)
ROUND_STRUCT_ELEMENT(cvt)
- } T;
+ };
T t;
#undef ROUND_STRUCT_ELEMENT
@@ -1274,8 +1262,7 @@ TEST(MIPS14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.round_up_in = 123.51;
@@ -1335,7 +1322,7 @@ TEST(seleqz_selnez) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test {
+ struct Test {
int a;
int b;
int c;
@@ -1348,7 +1335,7 @@ TEST(seleqz_selnez) {
float j;
float k;
float l;
- } Test;
+ };
Test test;
// Integer part of test.
@@ -1378,8 +1365,7 @@ TEST(seleqz_selnez) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
(f.Call(&test, 0, 0, 0, 0));
@@ -1492,8 +1478,7 @@ TEST(min_max) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1520,11 +1505,11 @@ TEST(rint_d) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double a;
double b;
int fcsr;
- }TestFloat;
+ };
TestFloat test;
double inputs[kTableLength] = {18446744073709551617.0,
@@ -1602,8 +1587,7 @@ TEST(rint_d) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@@ -1625,14 +1609,14 @@ TEST(sel) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test {
+ struct Test {
double dd;
double ds;
double dt;
float fd;
float fs;
float ft;
- } Test;
+ };
Test test;
__ Ldc1(f0, MemOperand(a0, offsetof(Test, dd))); // test
@@ -1649,8 +1633,7 @@ TEST(sel) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const int test_size = 3;
@@ -1699,11 +1682,11 @@ TEST(rint_s) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float b;
int fcsr;
- }TestFloat;
+ };
TestFloat test;
float inputs[kTableLength] = {18446744073709551617.0,
@@ -1781,8 +1764,7 @@ TEST(rint_s) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@@ -1803,10 +1785,10 @@ TEST(Cvt_d_uw) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_struct {
+ struct TestStruct {
unsigned input;
uint64_t output;
- } TestStruct;
+ };
unsigned inputs[] = {0x0, 0xFFFFFFFF, 0x80000000, 0x7FFFFFFF};
@@ -1825,8 +1807,7 @@ TEST(Cvt_d_uw) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.input = inputs[i];
@@ -1906,8 +1887,7 @@ TEST(mina_maxa) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1939,13 +1919,13 @@ TEST(trunc_l) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int64_t c; // a trunc result
int64_t d; // b trunc result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -1986,8 +1966,7 @@ TEST(trunc_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2013,7 +1992,7 @@ TEST(movz_movn) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
int32_t rt;
double a;
double b;
@@ -2025,7 +2004,7 @@ TEST(movz_movn) {
float dold;
float d1;
float dold1;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2066,8 +2045,7 @@ TEST(movz_movn) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2097,7 +2075,7 @@ TEST(movt_movd) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- typedef struct test_float {
+ struct TestFloat {
double srcd;
double dstd;
double dstdold;
@@ -2110,7 +2088,7 @@ TEST(movt_movd) {
float dstfold1;
int32_t cc;
int32_t fcsr;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2168,7 +2146,7 @@ TEST(movt_movd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
(f.Call(&test, 0, 0, 0, 0));
@@ -2195,11 +2173,11 @@ TEST(cvt_w_d) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
double a;
int32_t b;
int32_t fcsr;
- }Test;
+ };
const int kTableLength = 24;
double inputs[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2252,8 +2230,7 @@ TEST(cvt_w_d) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
@@ -2272,13 +2249,13 @@ TEST(trunc_w) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int32_t c; // a trunc result
int32_t d; // b trunc result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2319,8 +2296,7 @@ TEST(trunc_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2342,13 +2318,13 @@ TEST(round_w) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int32_t c; // a trunc result
int32_t d; // b trunc result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2388,8 +2364,7 @@ TEST(round_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2412,13 +2387,13 @@ TEST(round_l) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int64_t c;
int64_t d;
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2459,8 +2434,7 @@ TEST(round_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2485,14 +2459,14 @@ TEST(sub) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float b;
float resultS;
double c;
double d;
double resultD;
- }TestFloat;
+ };
TestFloat test;
double inputfs_D[kTableLength] = {
@@ -2532,8 +2506,7 @@ TEST(sub) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2558,7 +2531,7 @@ TEST(sqrt_rsqrt_recip) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float resultS;
float resultS1;
@@ -2567,7 +2540,7 @@ TEST(sqrt_rsqrt_recip) {
double resultD;
double resultD1;
double resultD2;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2611,8 +2584,7 @@ TEST(sqrt_rsqrt_recip) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
@@ -2658,12 +2630,12 @@ TEST(neg) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float resultS;
double c;
double resultD;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2691,8 +2663,7 @@ TEST(neg) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
@@ -2711,14 +2682,14 @@ TEST(mul) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float b;
float resultS;
double c;
double d;
double resultD;
- }TestFloat;
+ };
TestFloat test;
double inputfs_D[kTableLength] = {
@@ -2748,8 +2719,7 @@ TEST(mul) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2770,12 +2740,12 @@ TEST(mov) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double a;
double b;
float c;
float d;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2804,8 +2774,7 @@ TEST(mov) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2824,13 +2793,13 @@ TEST(floor_w) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int32_t c; // a floor result
int32_t d; // b floor result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2871,8 +2840,7 @@ TEST(floor_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2895,13 +2863,13 @@ TEST(floor_l) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int64_t c;
int64_t d;
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2942,8 +2910,7 @@ TEST(floor_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2967,13 +2934,13 @@ TEST(ceil_w) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int32_t c; // a floor result
int32_t d; // b floor result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -3014,8 +2981,7 @@ TEST(ceil_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3038,13 +3004,13 @@ TEST(ceil_l) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int64_t c;
int64_t d;
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -3085,8 +3051,7 @@ TEST(ceil_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3122,7 +3087,6 @@ TEST(jump_tables1) {
Label done;
{
__ BlockTrampolinePoolFor(kNumCases + 7);
- PredictableCodeSizeScope predictable(&assm, (kNumCases + 7) * kInstrSize);
__ nal();
__ nop();
@@ -3154,8 +3118,7 @@ TEST(jump_tables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3198,7 +3161,6 @@ TEST(jump_tables2) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases + 7);
- PredictableCodeSizeScope predictable(&assm, (kNumCases + 7) * kInstrSize);
__ nal();
__ nop();
@@ -3220,8 +3182,7 @@ TEST(jump_tables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3261,7 +3222,7 @@ TEST(jump_tables3) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
obj = *values[i];
- imm32 = obj->ptr();
+ imm32 = obj.ptr();
__ lui(v0, (imm32 >> 16) & 0xFFFF);
__ ori(v0, v0, imm32 & 0xFFFF);
__ b(&done);
@@ -3271,7 +3232,6 @@ TEST(jump_tables3) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases + 7);
- PredictableCodeSizeScope predictable(&assm, (kNumCases + 7) * kInstrSize);
__ nal();
__ nop();
@@ -3293,8 +3253,7 @@ TEST(jump_tables3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3319,12 +3278,12 @@ TEST(BITSWAP) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
int32_t r1;
int32_t r2;
int32_t r3;
int32_t r4;
- } T;
+ };
T t;
Assembler assm(AssemblerOptions{});
@@ -3344,8 +3303,7 @@ TEST(BITSWAP) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.r1 = 0x781A15C3;
t.r2 = 0x8B71FCDE;
@@ -3364,7 +3322,7 @@ TEST(class_fmt) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double dSignalingNan;
double dQuietNan;
double dNegInf;
@@ -3384,7 +3342,8 @@ TEST(class_fmt) {
float fPosInf;
float fPosNorm;
float fPosSubnorm;
- float fPosZero; } T;
+ float fPosZero;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -3477,8 +3436,7 @@ TEST(class_fmt) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.dSignalingNan = std::numeric_limits<double>::signaling_NaN();
@@ -3538,12 +3496,12 @@ TEST(ABS) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
int64_t fir;
double a;
float b;
double fcsr;
- } TestFloat;
+ };
TestFloat test;
@@ -3568,8 +3526,7 @@ TEST(ABS) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = -2.0;
test.b = -2.0;
@@ -3635,14 +3592,14 @@ TEST(ADD_FMT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double a;
double b;
double c;
float fa;
float fb;
float fc;
- } TestFloat;
+ };
TestFloat test;
@@ -3661,8 +3618,7 @@ TEST(ADD_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = 2.0;
test.b = 3.0;
@@ -3705,7 +3661,7 @@ TEST(C_COND_FMT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double dOp1;
double dOp2;
uint32_t dF;
@@ -3726,7 +3682,7 @@ TEST(C_COND_FMT) {
uint32_t fUlt;
uint32_t fOle;
uint32_t fUle;
- } TestFloat;
+ };
TestFloat test;
@@ -3815,8 +3771,7 @@ TEST(C_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.dOp1 = 2.0;
test.dOp2 = 3.0;
@@ -3916,7 +3871,7 @@ TEST(CMP_COND_FMT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double dOp1;
double dOp2;
double dF;
@@ -3943,7 +3898,7 @@ TEST(CMP_COND_FMT) {
float fOr;
float fUne;
float fNe;
- } TestFloat;
+ };
TestFloat test;
@@ -4015,8 +3970,7 @@ TEST(CMP_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
@@ -4132,7 +4086,7 @@ TEST(CVT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float cvt_d_s_in;
double cvt_d_s_out;
int32_t cvt_d_w_in;
@@ -4156,7 +4110,7 @@ TEST(CVT) {
int32_t cvt_w_s_out;
double cvt_w_d_in;
int32_t cvt_w_d_out;
- } TestFloat;
+ };
TestFloat test;
@@ -4201,8 +4155,7 @@ TEST(CVT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.cvt_d_s_in = -0.51;
@@ -4377,14 +4330,14 @@ TEST(DIV_FMT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test {
+ struct Test {
double dOp1;
double dOp2;
double dRes;
float fOp1;
float fOp2;
float fRes;
- } Test;
+ };
Test test;
@@ -4413,8 +4366,7 @@ TEST(DIV_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
@@ -4505,8 +4457,7 @@ uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4560,8 +4511,7 @@ uint32_t run_aluipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint32_t)code->entry(); // Set the program counter.
@@ -4613,8 +4563,7 @@ uint32_t run_auipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint32_t)code->entry(); // Set the program counter.
@@ -4688,8 +4637,7 @@ uint32_t run_lwpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4768,8 +4716,7 @@ uint32_t run_jic(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4839,8 +4786,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4896,12 +4842,12 @@ inline void store_elements_of_vector(MacroAssembler& assm, MSARegister w,
__ st_d(w, MemOperand(a, 0));
}
-typedef union {
+union msa_reg_t {
uint8_t b[16];
uint16_t h[8];
uint32_t w[4];
uint64_t d[2];
-} msa_reg_t;
+};
struct TestCaseMsaBranch {
uint64_t wt_lo;
@@ -4917,12 +4863,12 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- typedef struct {
+ struct T {
uint64_t ws_lo;
uint64_t ws_hi;
uint64_t wd_lo;
uint64_t wd_hi;
- } T;
+ };
T t = {0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x0000000000000000,
0x0000000000000000};
msa_reg_t res;
@@ -4942,8 +4888,7 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5112,8 +5057,7 @@ uint32_t run_jialc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5160,8 +5104,7 @@ static uint32_t run_addiupc(int32_t imm19) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint32_t)code->entry(); // Set the program counter.
@@ -5242,8 +5185,7 @@ int32_t run_bc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5323,8 +5265,7 @@ int32_t run_balc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5347,8 +5288,7 @@ uint32_t run_aui(uint32_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5436,8 +5376,7 @@ uint32_t run_bal(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5488,8 +5427,7 @@ TEST(Trampoline) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int32_t res = reinterpret_cast<int32_t>(f.Call(42, 42, 0, 0, 0));
@@ -5554,8 +5492,7 @@ void helper_madd_msub_maddf_msubf(F func) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
@@ -5639,8 +5576,7 @@ uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
@@ -5701,14 +5637,14 @@ TEST(MSA_fill_copy) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint32_t u8;
uint32_t u16;
uint32_t u32;
uint32_t s8;
uint32_t s16;
uint32_t s32;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -5743,8 +5679,7 @@ TEST(MSA_fill_copy) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5767,12 +5702,12 @@ TEST(MSA_fill_copy_2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint32_t w0;
uint32_t w1;
uint32_t w2;
uint32_t w3;
- } T;
+ };
T t[2];
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -5811,8 +5746,7 @@ TEST(MSA_fill_copy_2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5837,10 +5771,10 @@ TEST(MSA_fill_copy_3) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint64_t d0;
uint64_t d1;
- } T;
+ };
T t[2];
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -5868,8 +5802,7 @@ TEST(MSA_fill_copy_3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5913,8 +5846,7 @@ void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5986,12 +5918,12 @@ TEST(MSA_move_v) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint64_t ws_lo;
uint64_t ws_hi;
uint64_t wd_lo;
uint64_t wd_hi;
- } T;
+ };
T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF,
0x706E51290AC76FB9},
{0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870,
@@ -6013,8 +5945,7 @@ TEST(MSA_move_v) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6031,12 +5962,12 @@ void run_msa_sldi(OperFunc GenerateOperation,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint64_t ws_lo;
uint64_t ws_hi;
uint64_t wd_lo;
uint64_t wd_hi;
- } T;
+ };
T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF,
0x706E51290AC76FB9},
{0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870,
@@ -6058,8 +5989,7 @@ void run_msa_sldi(OperFunc GenerateOperation,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6143,8 +6073,7 @@ void run_msa_ctc_cfc(uint32_t value) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6253,8 +6182,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6431,8 +6359,7 @@ uint32_t run_Ins(uint32_t imm, uint32_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
@@ -6481,8 +6408,7 @@ uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint32_t res = reinterpret_cast<uint32_t>(f.Call(0, 0, 0, 0, 0));
@@ -6543,8 +6469,7 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6963,8 +6888,7 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8013,8 +7937,7 @@ void run_msa_vector(struct TestCaseMsaVector* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8101,8 +8024,7 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8574,8 +8496,7 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8652,8 +8573,7 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8731,8 +8651,7 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9134,7 +9053,7 @@ TEST(MSA_3R_instructions) {
}
#define SUBSUS_U_DF(T, lanes, mask) \
- typedef typename std::make_unsigned<T>::type uT; \
+ using uT = typename std::make_unsigned<T>::type; \
int size_in_bits = kMSARegSize / lanes; \
for (int i = 0; i < 2; i++) { \
uint64_t res = 0; \
@@ -9163,7 +9082,7 @@ TEST(MSA_3R_instructions) {
}
#define SUBSUU_S_DF(T, lanes, mask) \
- typedef typename std::make_unsigned<T>::type uT; \
+ using uT = typename std::make_unsigned<T>::type; \
int size_in_bits = kMSARegSize / lanes; \
for (int i = 0; i < 2; i++) { \
uint64_t res = 0; \
@@ -9737,8 +9656,7 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 5fd1ed7c09..3e1ac5902b 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -27,14 +27,14 @@
#include <iostream> // NOLINT(readability/streams)
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/disassembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/simulator.h"
#include "test/cctest/cctest.h"
@@ -43,11 +43,11 @@ namespace internal {
// Define these function prototypes to match JSEntryFunction in execution.cc.
// TODO(mips64): Refine these signatures per test case.
-typedef void*(F1)(int x, int p1, int p2, int p3, int p4);
-typedef void*(F2)(int x, int y, int p2, int p3, int p4);
-typedef void*(F3)(void* p, int p1, int p2, int p3, int p4);
-typedef void*(F4)(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
-typedef void*(F5)(void* p0, void* p1, int p2, int p3, int p4);
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F2 = void*(int x, int y, int p2, int p3, int p4);
+using F3 = void*(void* p, int p1, int p2, int p3, int p4);
+using F4 = void*(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
+using F5 = void*(void* p0, void* p1, int p2, int p3, int p4);
#define __ assm.
@@ -65,8 +65,7 @@ TEST(MIPS0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
CHECK_EQ(0xABCL, res);
@@ -100,8 +99,7 @@ TEST(MIPS1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F1>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(50, 0, 0, 0, 0));
CHECK_EQ(1275L, res);
@@ -245,8 +243,7 @@ TEST(MIPS2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0xC, 0, 0, 0));
@@ -260,7 +257,7 @@ TEST(MIPS3) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
double c;
@@ -277,7 +274,7 @@ TEST(MIPS3) {
float fe;
float ff;
float fg;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -346,8 +343,7 @@ TEST(MIPS3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
t.a = 1.5e14;
@@ -394,14 +390,14 @@ TEST(MIPS4) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
double c;
double d;
int64_t high;
int64_t low;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -438,8 +434,7 @@ TEST(MIPS4) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e22;
t.b = 2.75e11;
@@ -461,12 +456,12 @@ TEST(MIPS5) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
int i;
int j;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -502,8 +497,7 @@ TEST(MIPS5) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e4;
t.b = 2.75e8;
@@ -524,7 +518,7 @@ TEST(MIPS6) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint32_t ui;
int32_t si;
int32_t r1;
@@ -533,7 +527,7 @@ TEST(MIPS6) {
int32_t r4;
int32_t r5;
int32_t r6;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -570,8 +564,7 @@ TEST(MIPS6) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x11223344;
t.si = 0x99AABBCC;
@@ -600,7 +593,7 @@ TEST(MIPS7) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
double c;
@@ -608,7 +601,7 @@ TEST(MIPS7) {
double e;
double f;
int32_t result;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -656,8 +649,7 @@ TEST(MIPS7) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -680,7 +672,7 @@ TEST(MIPS8) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
int32_t input;
int32_t result_rotr_4;
int32_t result_rotr_8;
@@ -696,7 +688,7 @@ TEST(MIPS8) {
int32_t result_rotrv_20;
int32_t result_rotrv_24;
int32_t result_rotrv_28;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -752,8 +744,7 @@ TEST(MIPS8) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.input = 0x12345678;
f.Call(&t, 0x0, 0, 0, 0);
@@ -797,7 +788,8 @@ TEST(MIPS9) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+ USE(code);
}
@@ -808,7 +800,7 @@ TEST(MIPS10) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double a_converted;
double b;
@@ -820,7 +812,7 @@ TEST(MIPS10) {
int32_t b_long_hi;
int32_t b_long_lo;
int64_t b_long_as_int64;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -871,8 +863,7 @@ TEST(MIPS10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.a = 2.147483647e9; // 0x7FFFFFFF -> 0x41DFFFFFFFC00000 as double.
t.b_long_hi = 0x000000FF; // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
@@ -900,7 +891,7 @@ TEST(MIPS11) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
int32_t reg_init;
int32_t mem_init;
int32_t lwl_0;
@@ -919,7 +910,7 @@ TEST(MIPS11) {
int32_t swr_1;
int32_t swr_2;
int32_t swr_3;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -1005,8 +996,7 @@ TEST(MIPS11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.reg_init = 0xAABBCCDD;
t.mem_init = 0x11223344;
@@ -1063,14 +1053,14 @@ TEST(MIPS12) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
- int32_t x;
- int32_t y;
- int32_t y1;
- int32_t y2;
- int32_t y3;
- int32_t y4;
- } T;
+ struct T {
+ int32_t x;
+ int32_t y;
+ int32_t y1;
+ int32_t y2;
+ int32_t y3;
+ int32_t y4;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -1129,8 +1119,7 @@ TEST(MIPS12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.x = 1;
t.y = 2;
@@ -1151,14 +1140,14 @@ TEST(MIPS13) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double cvt_big_out;
double cvt_small_out;
uint32_t trunc_big_out;
uint32_t trunc_small_out;
uint32_t cvt_big_in;
uint32_t cvt_small_in;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -1182,8 +1171,7 @@ TEST(MIPS13) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.cvt_big_in = 0xFFFFFFFF;
@@ -1218,7 +1206,7 @@ TEST(MIPS14) {
uint32_t x##_err4_out; \
int32_t x##_invalid_result;
- typedef struct {
+ struct T {
double round_up_in;
double round_down_in;
double neg_round_up_in;
@@ -1233,7 +1221,7 @@ TEST(MIPS14) {
ROUND_STRUCT_ELEMENT(ceil)
ROUND_STRUCT_ELEMENT(trunc)
ROUND_STRUCT_ELEMENT(cvt)
- } T;
+ };
T t;
#undef ROUND_STRUCT_ELEMENT
@@ -1302,8 +1290,7 @@ TEST(MIPS14) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.round_up_in = 123.51;
@@ -1430,8 +1417,7 @@ TEST(MIPS16) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.ui = 0x44332211;
t.si = 0x99AABBCC;
@@ -1514,7 +1500,7 @@ TEST(seleqz_selnez) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test {
+ struct Test {
int a;
int b;
int c;
@@ -1527,7 +1513,7 @@ TEST(seleqz_selnez) {
float j;
float k;
float l;
- } Test;
+ };
Test test;
// Integer part of test.
@@ -1557,8 +1543,7 @@ TEST(seleqz_selnez) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
f.Call(&test, 0, 0, 0, 0);
@@ -1672,8 +1657,7 @@ TEST(min_max) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 4; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -1700,11 +1684,11 @@ TEST(rint_d) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double a;
double b;
int fcsr;
- }TestFloat;
+ };
TestFloat test;
double inputs[kTableLength] = {18446744073709551617.0,
@@ -1780,8 +1764,7 @@ TEST(rint_d) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@@ -1803,14 +1786,14 @@ TEST(sel) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test {
+ struct Test {
double dd;
double ds;
double dt;
float fd;
float fs;
float ft;
- } Test;
+ };
Test test;
__ Ldc1(f0, MemOperand(a0, offsetof(Test, dd))); // test
@@ -1827,8 +1810,7 @@ TEST(sel) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const int test_size = 3;
@@ -1877,11 +1859,11 @@ TEST(rint_s) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float b;
int fcsr;
- }TestFloat;
+ };
TestFloat test;
float inputs[kTableLength] = {18446744073709551617.0,
@@ -1959,8 +1941,7 @@ TEST(rint_s) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
@@ -2044,8 +2025,7 @@ TEST(mina_maxa) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -2079,13 +2059,13 @@ TEST(trunc_l) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int64_t c; // a trunc result
int64_t d; // b trunc result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2125,8 +2105,7 @@ TEST(trunc_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2152,7 +2131,7 @@ TEST(movz_movn) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
int64_t rt;
double a;
double b;
@@ -2164,7 +2143,7 @@ TEST(movz_movn) {
float dold;
float d1;
float dold1;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2205,8 +2184,7 @@ TEST(movz_movn) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2235,7 +2213,7 @@ TEST(movt_movd) {
const int kTableLength = 4;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- typedef struct test_float {
+ struct TestFloat {
double srcd;
double dstd;
double dstdold;
@@ -2248,7 +2226,7 @@ TEST(movt_movd) {
float dstfold1;
int32_t cc;
int32_t fcsr;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2306,7 +2284,7 @@ TEST(movt_movd) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
f.Call(&test, 0, 0, 0, 0);
@@ -2334,11 +2312,11 @@ TEST(cvt_w_d) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
double a;
int32_t b;
int fcsr;
- }Test;
+ };
const int kTableLength = 24;
double inputs[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2391,8 +2369,7 @@ TEST(cvt_w_d) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int j = 0; j < 4; j++) {
test.fcsr = fcsr_inputs[j];
@@ -2411,13 +2388,13 @@ TEST(trunc_w) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int32_t c; // a trunc result
int32_t d; // b trunc result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2458,8 +2435,7 @@ TEST(trunc_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2481,13 +2457,13 @@ TEST(round_w) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int32_t c; // a trunc result
int32_t d; // b trunc result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2527,8 +2503,7 @@ TEST(round_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2550,13 +2525,13 @@ TEST(round_l) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int64_t c;
int64_t d;
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2597,8 +2572,7 @@ TEST(round_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2622,14 +2596,14 @@ TEST(sub) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float b;
float resultS;
double c;
double d;
double resultD;
- }TestFloat;
+ };
TestFloat test;
double inputfs_D[kTableLength] = {
@@ -2669,8 +2643,7 @@ TEST(sub) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2695,7 +2668,7 @@ TEST(sqrt_rsqrt_recip) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float resultS;
float resultS1;
@@ -2704,7 +2677,7 @@ TEST(sqrt_rsqrt_recip) {
double resultD;
double resultD1;
double resultD2;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2741,8 +2714,7 @@ TEST(sqrt_rsqrt_recip) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
@@ -2786,12 +2758,12 @@ TEST(neg) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float resultS;
double c;
double resultD;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2819,8 +2791,7 @@ TEST(neg) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_S[i];
@@ -2840,14 +2811,14 @@ TEST(mul) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float a;
float b;
float resultS;
double c;
double d;
double resultD;
- }TestFloat;
+ };
TestFloat test;
double inputfs_D[kTableLength] = {
@@ -2877,8 +2848,7 @@ TEST(mul) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputfs_S[i];
@@ -2899,12 +2869,12 @@ TEST(mov) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double a;
double b;
float c;
float d;
- }TestFloat;
+ };
TestFloat test;
double inputs_D[kTableLength] = {
@@ -2932,8 +2902,7 @@ TEST(mov) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -2952,13 +2921,13 @@ TEST(floor_w) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int32_t c; // a floor result
int32_t d; // b floor result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -2999,8 +2968,7 @@ TEST(floor_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3022,13 +2990,13 @@ TEST(floor_l) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int64_t c;
int64_t d;
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -3069,8 +3037,7 @@ TEST(floor_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3093,13 +3060,13 @@ TEST(ceil_w) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int32_t c; // a floor result
int32_t d; // b floor result
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -3140,8 +3107,7 @@ TEST(ceil_w) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3163,13 +3129,13 @@ TEST(ceil_l) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
const double dFPU64InvalidResult = static_cast<double>(kFPU64InvalidResult);
- typedef struct test_float {
+ struct Test {
uint32_t isNaN2008;
double a;
float b;
int64_t c;
int64_t d;
- }Test;
+ };
const int kTableLength = 15;
double inputs_D[kTableLength] = {
2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
@@ -3210,8 +3176,7 @@ TEST(ceil_l) {
Test test;
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputs_D[i];
@@ -3247,8 +3212,6 @@ TEST(jump_tables1) {
Label done;
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
- PredictableCodeSizeScope predictable(&assm,
- (kNumCases * 2 + 6) * kInstrSize);
__ nal();
__ dsll(at, a0, 3); // In delay slot.
@@ -3279,8 +3242,7 @@ TEST(jump_tables1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3324,8 +3286,6 @@ TEST(jump_tables2) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
- PredictableCodeSizeScope predictable(&assm,
- (kNumCases * 2 + 6) * kInstrSize);
__ nal();
__ dsll(at, a0, 3); // In delay slot.
@@ -3346,8 +3306,7 @@ TEST(jump_tables2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3388,7 +3347,7 @@ TEST(jump_tables3) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
obj = *values[i];
- imm64 = obj->ptr();
+ imm64 = obj.ptr();
__ lui(v0, (imm64 >> 32) & kImm16Mask);
__ ori(v0, v0, (imm64 >> 16) & kImm16Mask);
__ dsll(v0, v0, 16);
@@ -3401,8 +3360,6 @@ TEST(jump_tables3) {
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
- PredictableCodeSizeScope predictable(&assm,
- (kNumCases * 2 + 6) * kInstrSize);
__ nal();
__ dsll(at, a0, 3); // In delay slot.
@@ -3423,8 +3380,7 @@ TEST(jump_tables3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -3449,14 +3405,14 @@ TEST(BITSWAP) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
int64_t r1;
int64_t r2;
int64_t r3;
int64_t r4;
int64_t r5;
int64_t r6;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -3496,8 +3452,7 @@ TEST(BITSWAP) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.r1 = 0x00102100781A15C3;
t.r2 = 0x001021008B71FCDE;
@@ -3524,7 +3479,7 @@ TEST(class_fmt) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double dSignalingNan;
double dQuietNan;
double dNegInf;
@@ -3544,7 +3499,8 @@ TEST(class_fmt) {
float fPosInf;
float fPosNorm;
float fPosSubnorm;
- float fPosZero; } T;
+ float fPosZero;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -3637,8 +3593,7 @@ TEST(class_fmt) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
// Double test values.
@@ -3699,12 +3654,12 @@ TEST(ABS) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
int64_t fir;
double a;
float b;
double fcsr;
- } TestFloat;
+ };
TestFloat test;
@@ -3730,8 +3685,7 @@ TEST(ABS) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = -2.0;
test.b = -2.0;
@@ -3797,14 +3751,14 @@ TEST(ADD_FMT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double a;
double b;
double c;
float fa;
float fb;
float fc;
- } TestFloat;
+ };
TestFloat test;
@@ -3823,8 +3777,7 @@ TEST(ADD_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.a = 2.0;
test.b = 3.0;
@@ -3867,7 +3820,7 @@ TEST(C_COND_FMT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double dOp1;
double dOp2;
uint32_t dF;
@@ -3888,7 +3841,7 @@ TEST(C_COND_FMT) {
uint32_t fUlt;
uint32_t fOle;
uint32_t fUle;
- } TestFloat;
+ };
TestFloat test;
@@ -3977,8 +3930,7 @@ TEST(C_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.dOp1 = 2.0;
test.dOp2 = 3.0;
@@ -4078,7 +4030,7 @@ TEST(CMP_COND_FMT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double dOp1;
double dOp2;
double dF;
@@ -4105,7 +4057,7 @@ TEST(CMP_COND_FMT) {
float fOr;
float fUne;
float fNe;
- } TestFloat;
+ };
TestFloat test;
@@ -4177,8 +4129,7 @@ TEST(CMP_COND_FMT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
uint64_t dTrue = 0xFFFFFFFFFFFFFFFF;
uint64_t dFalse = 0x0000000000000000;
@@ -4294,7 +4245,7 @@ TEST(CVT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
float cvt_d_s_in;
double cvt_d_s_out;
int32_t cvt_d_w_in;
@@ -4318,7 +4269,7 @@ TEST(CVT) {
int32_t cvt_w_s_out;
double cvt_w_d_in;
int32_t cvt_w_d_out;
- } TestFloat;
+ };
TestFloat test;
@@ -4355,8 +4306,7 @@ TEST(CVT) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
test.cvt_d_s_in = -0.51;
@@ -4491,14 +4441,14 @@ TEST(DIV_FMT) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
- typedef struct test {
+ struct Test {
double dOp1;
double dOp2;
double dRes;
float fOp1;
float fOp2;
float fRes;
- } Test;
+ };
Test test;
@@ -4526,8 +4476,7 @@ TEST(DIV_FMT) {
__ nop();
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
f.Call(&test, 0, 0, 0, 0);
@@ -4617,8 +4566,7 @@ uint64_t run_align(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F4>::FromCode(*code);
@@ -4672,8 +4620,7 @@ uint64_t run_dalign(uint64_t rs_value, uint64_t rt_value, uint8_t bp) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F4>::FromCode(*code);
uint64_t res =
@@ -4732,8 +4679,7 @@ uint64_t run_aluipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint64_t)code->entry(); // Set the program counter.
@@ -4785,8 +4731,7 @@ uint64_t run_auipc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint64_t)code->entry(); // Set the program counter.
@@ -4839,8 +4784,7 @@ uint64_t run_aui(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4863,8 +4807,7 @@ uint64_t run_daui(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4887,8 +4830,7 @@ uint64_t run_dahi(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -4911,8 +4853,7 @@ uint64_t run_dati(uint64_t rs, uint16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5014,8 +4955,7 @@ uint64_t run_li_macro(uint64_t imm, LiFlags mode, int32_t num_instr = 0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5226,8 +5166,7 @@ uint64_t run_lwpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5303,8 +5242,7 @@ uint64_t run_lwupc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5383,8 +5321,7 @@ uint64_t run_jic(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5454,8 +5391,7 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5511,12 +5447,12 @@ inline void store_elements_of_vector(MacroAssembler& assm, MSARegister w,
__ st_d(w, MemOperand(a, 0));
}
-typedef union {
+union msa_reg_t {
uint8_t b[16];
uint16_t h[8];
uint32_t w[4];
uint64_t d[2];
-} msa_reg_t;
+};
struct TestCaseMsaBranch {
uint64_t wt_lo;
@@ -5532,12 +5468,12 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
CpuFeatureScope fscope(&assm, MIPS_SIMD);
- typedef struct {
+ struct T {
uint64_t ws_lo;
uint64_t ws_hi;
uint64_t wd_lo;
uint64_t wd_hi;
- } T;
+ };
T t = {0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x0000000000000000,
0x0000000000000000};
msa_reg_t res;
@@ -5557,8 +5493,7 @@ void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -5727,8 +5662,7 @@ uint64_t run_jialc(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5778,8 +5712,7 @@ uint64_t run_addiupc(int32_t imm19) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
PC = (uint64_t)code->entry(); // Set the program counter.
@@ -5853,8 +5786,7 @@ uint64_t run_ldpc(int offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -5941,8 +5873,7 @@ int64_t run_bc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -6022,8 +5953,7 @@ int64_t run_balc(int32_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -6071,8 +6001,7 @@ uint64_t run_dsll(uint64_t rt_value, uint16_t sa_value) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F4>::FromCode(*code);
@@ -6128,8 +6057,7 @@ uint64_t run_bal(int16_t offset) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
@@ -6182,8 +6110,7 @@ TEST(Trampoline) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(42, 42, 0, 0, 0));
@@ -6248,8 +6175,7 @@ void helper_madd_msub_maddf_msubf(F func) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
@@ -6331,8 +6257,7 @@ uint64_t run_Subu(uint64_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6414,8 +6339,7 @@ uint64_t run_Dsubu(uint64_t imm, int32_t num_instr) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6510,8 +6434,7 @@ uint64_t run_Dins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6570,8 +6493,7 @@ uint64_t run_Ins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6638,8 +6560,7 @@ uint64_t run_Ext(uint64_t source, uint16_t pos, uint16_t size) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F2>::FromCode(*code);
uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
@@ -6675,7 +6596,7 @@ TEST(MSA_fill_copy) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint64_t u8;
uint64_t u16;
uint64_t u32;
@@ -6683,7 +6604,7 @@ TEST(MSA_fill_copy) {
uint64_t s16;
uint64_t s32;
uint64_t s64;
- } T;
+ };
T t;
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -6721,8 +6642,7 @@ TEST(MSA_fill_copy) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6746,10 +6666,10 @@ TEST(MSA_fill_copy_2) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint64_t d0;
uint64_t d1;
- } T;
+ };
T t[2];
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -6784,8 +6704,7 @@ TEST(MSA_fill_copy_2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6806,10 +6725,10 @@ TEST(MSA_fill_copy_3) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint64_t d0;
uint64_t d1;
- } T;
+ };
T t[2];
MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
@@ -6837,8 +6756,7 @@ TEST(MSA_fill_copy_3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6886,8 +6804,7 @@ void run_msa_insert(int64_t rs_value, int n, msa_reg_t* w) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -6996,8 +6913,7 @@ void run_msa_ctc_cfc(uint64_t value) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7018,12 +6934,12 @@ TEST(MSA_move_v) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint64_t ws_lo;
uint64_t ws_hi;
uint64_t wd_lo;
uint64_t wd_hi;
- } T;
+ };
T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF,
0x706E51290AC76FB9},
{0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870,
@@ -7045,8 +6961,7 @@ TEST(MSA_move_v) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7063,12 +6978,12 @@ void run_msa_sldi(OperFunc GenerateOperation,
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
uint64_t ws_lo;
uint64_t ws_hi;
uint64_t wd_lo;
uint64_t wd_hi;
- } T;
+ };
T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF,
0x706E51290AC76FB9},
{0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870,
@@ -7090,8 +7005,7 @@ void run_msa_sldi(OperFunc GenerateOperation,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7250,8 +7164,7 @@ void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7454,8 +7367,7 @@ void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -7880,8 +7792,7 @@ void run_msa_2r(const struct TestCaseMsa2R* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -8930,8 +8841,7 @@ void run_msa_vector(struct TestCaseMsaVector* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9018,8 +8928,7 @@ void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9491,8 +9400,7 @@ void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9569,8 +9477,7 @@ void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -9648,8 +9555,7 @@ void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -10050,7 +9956,7 @@ TEST(MSA_3R_instructions) {
}
#define SUBSUS_U_DF(T, lanes, mask) \
- typedef typename std::make_unsigned<T>::type uT; \
+ using uT = typename std::make_unsigned<T>::type; \
int size_in_bits = kMSARegSize / lanes; \
for (int i = 0; i < 2; i++) { \
uint64_t res = 0; \
@@ -10079,7 +9985,7 @@ TEST(MSA_3R_instructions) {
}
#define SUBSUU_S_DF(T, lanes, mask) \
- typedef typename std::make_unsigned<T>::type uT; \
+ using uT = typename std::make_unsigned<T>::type; \
int size_in_bits = kMSARegSize / lanes; \
for (int i = 0; i < 2; i++) { \
uint64_t res = 0; \
@@ -10653,8 +10559,7 @@ void run_msa_3rf(const struct TestCaseMsa3RF* input,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index 1f7a9e0eec..64ca5edc88 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/disassembler.h"
+#include "src/codegen/ppc/assembler-ppc-inl.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
-#include "src/ppc/assembler-ppc-inl.h"
-#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
@@ -60,8 +60,7 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -96,8 +95,7 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -144,8 +142,7 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -161,11 +158,11 @@ TEST(3) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
int i;
char c;
int16_t s;
- } T;
+ };
T t;
Assembler assm(AssemblerOptions{});
@@ -213,8 +210,7 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -237,7 +233,7 @@ TEST(4) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct T {
double a;
double b;
double c;
@@ -251,7 +247,7 @@ TEST(4) {
double n;
float x;
float y;
- } T;
+ };
T t;
// Create a function that accepts &t, and loads, manipulates, and stores
@@ -627,7 +623,7 @@ TEST(8) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct D {
double a;
double b;
double c;
@@ -636,10 +632,10 @@ TEST(8) {
double f;
double g;
double h;
- } D;
+ };
D d;
- typedef struct {
+ struct F {
float a;
float b;
float c;
@@ -648,7 +644,7 @@ TEST(8) {
float f;
float g;
float h;
- } F;
+ };
F f;
// Create a function that uses vldm/vstm to move some double and
@@ -738,7 +734,7 @@ TEST(9) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct D {
double a;
double b;
double c;
@@ -747,10 +743,10 @@ TEST(9) {
double f;
double g;
double h;
- } D;
+ };
D d;
- typedef struct {
+ struct F {
float a;
float b;
float c;
@@ -759,7 +755,7 @@ TEST(9) {
float f;
float g;
float h;
- } F;
+ };
F f;
// Create a function that uses vldm/vstm to move some double and
@@ -853,7 +849,7 @@ TEST(10) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct D {
double a;
double b;
double c;
@@ -862,10 +858,10 @@ TEST(10) {
double f;
double g;
double h;
- } D;
+ };
D d;
- typedef struct {
+ struct F {
float a;
float b;
float c;
@@ -874,7 +870,7 @@ TEST(10) {
float f;
float g;
float h;
- } F;
+ };
F f;
// Create a function that uses vldm/vstm to move some double and
@@ -964,12 +960,12 @@ TEST(11) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- typedef struct {
+ struct I {
int32_t a;
int32_t b;
int32_t c;
int32_t d;
- } I;
+ };
I i;
i.a = 0xABCD0001;
diff --git a/deps/v8/test/cctest/test-assembler-s390.cc b/deps/v8/test/cctest/test-assembler-s390.cc
index 447a9c048f..18dc72a16e 100644
--- a/deps/v8/test/cctest/test-assembler-s390.cc
+++ b/deps/v8/test/cctest/test-assembler-s390.cc
@@ -25,13 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/disassembler.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/s390/assembler-s390-inl.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/s390/assembler-s390-inl.h"
-#include "src/simulator.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
@@ -63,8 +63,7 @@ TEST(0) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -102,8 +101,7 @@ TEST(1) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -153,8 +151,7 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -208,8 +205,7 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -486,8 +482,7 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -540,8 +535,7 @@ TEST(11) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -594,8 +588,7 @@ TEST(12) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
code->Print();
#endif
@@ -605,6 +598,341 @@ TEST(12) {
CHECK_EQ(0, static_cast<int>(res));
}
+// vector basics
+TEST(13) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(AssemblerOptions{});
+
+ Label done, error;
+
+ // vector loads, replicate, and arithmetics
+ __ vrepi(d2, Operand(100), Condition(2));
+ __ lay(sp, MemOperand(sp, -4));
+ __ sty(r3, MemOperand(sp));
+ __ vlrep(d3, MemOperand(sp), Condition(2));
+ __ lay(sp, MemOperand(sp, 4));
+ __ vlvg(d4, r2, MemOperand(r0, 2), Condition(2));
+ __ vrep(d4, d4, Operand(2), Condition(2));
+ __ lay(sp, MemOperand(sp, -kSimd128Size));
+ __ vst(d4, MemOperand(sp), Condition(0));
+ __ va(d2, d2, d3, Condition(0), Condition(0), Condition(2));
+ __ vl(d3, MemOperand(sp), Condition(0));
+ __ lay(sp, MemOperand(sp, kSimd128Size));
+ __ vs(d2, d2, d3, Condition(0), Condition(0), Condition(2));
+ __ vml(d3, d3, d2, Condition(0), Condition(0), Condition(2));
+ __ lay(sp, MemOperand(sp, -4));
+ __ vstef(d3, MemOperand(sp), Condition(3));
+ __ vlef(d2, MemOperand(sp), Condition(0));
+ __ lay(sp, MemOperand(sp, 4));
+ __ vlgv(r2, d2, MemOperand(r0, 0), Condition(2));
+ __ cfi(r2, Operand(15000));
+ __ bne(&error);
+ __ vrepi(d2, Operand(-30), Condition(3));
+ __ vlc(d2, d2, Condition(0), Condition(0), Condition(3));
+ __ vlgv(r2, d2, MemOperand(r0, 1), Condition(3));
+ __ lgfi(r1, Operand(-30));
+ __ lcgr(r1, r1);
+ __ cgr(r1, r2);
+ __ bne(&error);
+ __ lgfi(r2, Operand(0));
+ __ b(&done);
+ __ bind(&error);
+ __ lgfi(r2, Operand(1));
+ __ bind(&done);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+#ifdef DEBUG
+ code->Print();
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(50, 250, 0, 0, 0));
+ ::printf("f() = %" V8PRIxPTR "\n", res);
+ CHECK_EQ(0, static_cast<int>(res));
+}
+
+
+// vector sum, packs, unpacks
+TEST(14) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(AssemblerOptions{});
+
+ Label done, error;
+
+ // vector sum word and doubleword
+ __ vrepi(d2, Operand(100), Condition(2));
+ __ vsumg(d1, d2, d2, Condition(0), Condition(0), Condition(2));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(3));
+ __ cfi(r2, Operand(300));
+ __ bne(&error);
+ __ vrepi(d1, Operand(0), Condition(1));
+ __ vrepi(d2, Operand(75), Condition(1));
+ __ vsum(d1, d2, d1, Condition(0), Condition(0), Condition(1));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(2));
+ __ cfi(r2, Operand(150));
+ __ bne(&error);
+ // vector packs
+ __ vrepi(d1, Operand(200), Condition(2));
+ __ vpk(d1, d1, d1, Condition(0), Condition(0), Condition(2));
+ __ vlgv(r2, d1, MemOperand(r0, 5), Condition(1));
+ __ cfi(r2, Operand(200));
+ __ bne(&error);
+ __ vrepi(d2, Operand(30), Condition(1));
+ __ vpks(d1, d1, d2, Condition(0), Condition(1));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(0));
+ __ vlgv(r3, d1, MemOperand(r0, 8), Condition(0));
+ __ ar(r2, r3);
+ __ cfi(r2, Operand(157));
+ __ bne(&error);
+ __ vrepi(d1, Operand(270), Condition(1));
+ __ vrepi(d2, Operand(-30), Condition(1));
+ __ vpkls(d1, d1, d2, Condition(0), Condition(1));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(0));
+ __ vlgv(r3, d1, MemOperand(r0, 8), Condition(0));
+ __ cfi(r2, Operand(255));
+ __ bne(&error);
+ __ cfi(r3, Operand(255));
+ __ bne(&error);
+ // vector unpacks
+ __ vrepi(d1, Operand(50), Condition(2));
+ __ lgfi(r1, Operand(10));
+ __ lgfi(r2, Operand(20));
+ __ vlvg(d1, r1, MemOperand(r0, 0), Condition(2));
+ __ vlvg(d1, r2, MemOperand(r0, 2), Condition(2));
+ __ vuph(d2, d1, Condition(0), Condition(0), Condition(2));
+ __ vupl(d1, d1, Condition(0), Condition(0), Condition(2));
+ __ va(d1, d1, d2, Condition(0), Condition(0), Condition(3));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(3));
+ __ vlgv(r3, d1, MemOperand(r0, 1), Condition(3));
+ __ ar(r2, r3);
+ __ cfi(r2, Operand(130));
+ __ bne(&error);
+ __ vrepi(d1, Operand(-100), Condition(2));
+ __ vuplh(d2, d1, Condition(0), Condition(0), Condition(2));
+ __ vupll(d1, d1, Condition(0), Condition(0), Condition(2));
+ __ va(d1, d1, d1, Condition(0), Condition(0), Condition(3));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(3));
+ __ cfi(r2, Operand(0x1ffffff38));
+ __ bne(&error);
+ __ lgfi(r2, Operand(0));
+ __ b(&done);
+ __ bind(&error);
+ __ lgfi(r2, Operand(1));
+ __ bind(&done);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+#ifdef DEBUG
+ code->Print();
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIxPTR "\n", res);
+ CHECK_EQ(0, static_cast<int>(res));
+}
+
+// vector comparisons
+TEST(15) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(AssemblerOptions{});
+
+ Label done, error;
+
+ // vector max and min
+ __ vrepi(d2, Operand(-50), Condition(2));
+ __ vrepi(d3, Operand(40), Condition(2));
+ __ vmx(d1, d2, d3, Condition(0), Condition(0), Condition(2));
+ __ vlgv(r1, d1, MemOperand(r0, 0), Condition(2));
+ __ vmnl(d1, d2, d3, Condition(0), Condition(0), Condition(2));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(2));
+ __ cgr(r1, r2);
+ __ vmxl(d1, d2, d3, Condition(0), Condition(0), Condition(2));
+ __ vlgv(r1, d1, MemOperand(r0, 0), Condition(2));
+ __ vmn(d1, d2, d3, Condition(0), Condition(0), Condition(2));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(2));
+ __ cgr(r1, r2);
+ __ bne(&error);
+ // vector comparisons
+ __ vlr(d4, d3, Condition(0), Condition(0), Condition(0));
+ __ vceq(d1, d3, d4, Condition(0), Condition(2));
+ __ vlgv(r1, d1, MemOperand(r0, 0), Condition(2));
+ __ vch(d1, d2, d3, Condition(0), Condition(2));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(2));
+ __ vchl(d1, d2, d3, Condition(0), Condition(2));
+ __ vlgv(r3, d1, MemOperand(r0, 0), Condition(2));
+ __ ar(r2, r3);
+ __ cgr(r1, r2);
+ __ bne(&error);
+ // vector bitwise ops
+ __ vrepi(d2, Operand(0), Condition(2));
+ __ vn(d1, d2, d3, Condition(0), Condition(0), Condition(0));
+ __ vceq(d1, d1, d2, Condition(0), Condition(2));
+ __ vlgv(r1, d1, MemOperand(r0, 0), Condition(2));
+ __ vo(d1, d2, d3, Condition(0), Condition(0), Condition(0));
+ __ vx(d1, d1, d2, Condition(0), Condition(0), Condition(0));
+ __ vceq(d1, d1, d3, Condition(0), Condition(2));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(2));
+ __ cgr(r1, r2);
+ __ bne(&error);
+ // vector bitwise shift
+ __ vceq(d1, d1, d1, Condition(0), Condition(2));
+ __ vesra(d1, d1, MemOperand(r0, 5), Condition(2));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(2));
+ __ cgr(r3, r2);
+ __ bne(&error);
+ __ lgfi(r1, Operand(0xfffff895));
+ __ vlvg(d1, r1, MemOperand(r0, 0), Condition(3));
+ __ vrep(d1, d1, Operand(0), Condition(3));
+ __ slag(r1, r1, Operand(10));
+ __ vesl(d1, d1, MemOperand(r0, 10), Condition(3));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(3));
+ __ cgr(r1, r2);
+ __ bne(&error);
+ __ srlg(r1, r1, Operand(10));
+ __ vesrl(d1, d1, MemOperand(r0, 10), Condition(3));
+ __ vlgv(r2, d1, MemOperand(r0, 0), Condition(3));
+ __ cgr(r1, r2);
+ __ bne(&error);
+ __ lgfi(r2, Operand(0));
+ __ b(&done);
+ __ bind(&error);
+ __ lgfi(r2, Operand(1));
+ __ bind(&done);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+#ifdef DEBUG
+ code->Print();
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIxPTR "\n", res);
+ CHECK_EQ(0, static_cast<int>(res));
+}
+
+// vector select and test mask
+TEST(16) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(AssemblerOptions{});
+
+ Label done, error;
+
+ // vector select
+ __ vrepi(d1, Operand(0x1011), Condition(1));
+ __ vrepi(d2, Operand(0x4343), Condition(1));
+ __ vrepi(d3, Operand(0x3434), Condition(1));
+ __ vsel(d1, d2, d3, d1, Condition(0), Condition(0));
+ __ vlgv(r2, d1, MemOperand(r0, 2), Condition(1));
+ __ cfi(r2, Operand(0x2425));
+ __ bne(&error);
+ // vector test mask
+ __ vtm(d2, d1, Condition(0), Condition(0), Condition(0));
+ __ b(Condition(0x1), &error);
+ __ b(Condition(0x8), &error);
+ __ lgfi(r2, Operand(0));
+ __ b(&done);
+ __ bind(&error);
+ __ lgfi(r2, Operand(1));
+ __ bind(&done);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+#ifdef DEBUG
+ code->Print();
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIxPTR "\n", res);
+ CHECK_EQ(0, static_cast<int>(res));
+}
+
+// vector fp instructions
+TEST(17) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(AssemblerOptions{});
+
+ Label done, error;
+
+ // vector fp arithmetics
+ __ cdgbr(d1, r3);
+ __ ldr(d2, d1);
+ __ vfa(d1, d1, d2, Condition(0), Condition(0), Condition(3));
+ __ cdgbr(d3, r2);
+ __ vfm(d1, d1, d3, Condition(0), Condition(0), Condition(3));
+ __ vfs(d1, d1, d2, Condition(0), Condition(0), Condition(3));
+ __ vfd(d1, d1, d3, Condition(0), Condition(0), Condition(3));
+ __ vfsq(d1, d1, Condition(0), Condition(0), Condition(3));
+ __ cgdbr(Condition(4), r2, d1);
+ __ cgfi(r2, Operand(0x8));
+ __ bne(&error);
+ // vector fp comparisons
+ __ cdgbra(Condition(4), d1, r3);
+ __ ldr(d2, d1);
+ __ vfa(d1, d1, d2, Condition(0), Condition(0), Condition(3));
+#ifdef VECTOR_ENHANCE_FACILITY_1
+ __ vfmin(d3, d1, d2, Condition(1), Condition(0), Condition(3));
+ __ vfmax(d4, d1, d2, Condition(1), Condition(0), Condition(3));
+#else
+ __ vlr(d3, d2, Condition(0), Condition(0), Condition(0));
+ __ vlr(d4, d1, Condition(0), Condition(0), Condition(0));
+#endif
+ __ vfch(d5, d4, d3, Condition(0), Condition(0), Condition(3));
+ __ vfche(d3, d3, d4, Condition(0), Condition(0), Condition(3));
+ __ vfce(d4, d1, d4, Condition(0), Condition(0), Condition(3));
+ __ va(d3, d3, d4, Condition(0), Condition(0), Condition(3));
+ __ vs(d3, d3, d5, Condition(0), Condition(0), Condition(3));
+ __ vlgv(r2, d3, MemOperand(r0, 0), Condition(3));
+ // vector fp sign ops
+ __ lgfi(r1, Operand(-0x50));
+ __ cdgbra(Condition(4), d1, r1);
+ __ vfpso(d1, d1, Condition(0), Condition(0), Condition(3));
+ __ vfi(d1, d1, Condition(5), Condition(0), Condition(3));
+ __ vlgv(r1, d1, MemOperand(r0, 0), Condition(3));
+ __ agr(r2, r1);
+ __ srlg(r2, r2, Operand(32));
+ __ cgfi(r2, Operand(0x40540000));
+ __ bne(&error);
+ __ lgfi(r2, Operand(0));
+ __ b(&done);
+ __ bind(&error);
+ __ lgfi(r2, Operand(1));
+ __ bind(&done);
+ __ b(r14);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+#ifdef DEBUG
+ code->Print();
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(0x2, 0x30, 0, 0, 0));
+ ::printf("f() = %" V8PRIxPTR "\n", res);
+ CHECK_EQ(0, static_cast<int>(res));
+}
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index ae23af9f87..e22bed0029 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -28,16 +28,16 @@
#include <cstdlib>
#include <iostream>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/double.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-#include "src/simulator.h"
+#include "src/numbers/double.h"
+#include "src/utils/ostreams.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
@@ -54,12 +54,12 @@ namespace internal {
// with GCC. A different convention is used on 64-bit windows,
// where the first four integer arguments are passed in RCX, RDX, R8 and R9.
-typedef int(F0)();
-typedef int(F1)(int64_t x);
-typedef int(F2)(int64_t x, int64_t y);
-typedef unsigned(F3)(double x);
-typedef uint64_t(F4)(uint64_t* x, uint64_t* y);
-typedef uint64_t(F5)(uint64_t x);
+using F0 = int();
+using F1 = int(int64_t x);
+using F2 = int(int64_t x, int64_t y);
+using F3 = unsigned(double x);
+using F4 = uint64_t(uint64_t* x, uint64_t* y);
+using F5 = uint64_t(uint64_t x);
#ifdef _WIN64
static const Register arg1 = rcx;
@@ -477,7 +477,7 @@ TEST(AssemblerX64TestlOperations) {
}
TEST(AssemblerX64TestwOperations) {
- typedef uint16_t(F)(uint16_t * x);
+ using F = uint16_t(uint16_t * x);
CcTest::InitializeVM();
auto buffer = AllocateAssemblerBuffer();
Assembler masm(AssemblerOptions{}, buffer->CreateView());
@@ -743,8 +743,7 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F0>::FromCode(*code);
int res = f.Call();
@@ -800,8 +799,7 @@ void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F0>::FromCode(*code);
int res = f.Call();
@@ -865,8 +863,7 @@ TEST(AssemblerX64Extractps) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -879,7 +876,7 @@ TEST(AssemblerX64Extractps) {
CHECK_EQ(0x87654321u, f.Call(uint64_to_double(value2)));
}
-typedef int(F6)(float x, float y);
+using F6 = int(float x, float y);
TEST(AssemblerX64SSE) {
CcTest::InitializeVM();
@@ -902,8 +899,7 @@ TEST(AssemblerX64SSE) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -933,8 +929,7 @@ TEST(AssemblerX64SSE3) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -944,7 +939,7 @@ TEST(AssemblerX64SSE3) {
CHECK_EQ(4, f.Call(1.0, 2.0));
}
-typedef int(F7)(double x, double y, double z);
+using F7 = int(double x, double y, double z);
TEST(AssemblerX64FMA_sd) {
CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(FMA3)) return;
@@ -963,7 +958,7 @@ TEST(AssemblerX64FMA_sd) {
__ mulsd(xmm3, xmm1);
__ addsd(xmm3, xmm2); // Expected result in xmm3
- __ subq(rsp, Immediate(kDoubleSize)); // For memory operand
+ __ AllocateStackSpace(kDoubleSize); // For memory operand
// vfmadd132sd
__ movl(rax, Immediate(1)); // Test number
__ movaps(xmm8, xmm0);
@@ -1158,8 +1153,7 @@ TEST(AssemblerX64FMA_sd) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1170,7 +1164,7 @@ TEST(AssemblerX64FMA_sd) {
0, f.Call(0.000092662107262076, -2.460774966188315, -1.0958787393627414));
}
-typedef int(F8)(float x, float y, float z);
+using F8 = int(float x, float y, float z);
TEST(AssemblerX64FMA_ss) {
CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(FMA3)) return;
@@ -1189,7 +1183,7 @@ TEST(AssemblerX64FMA_ss) {
__ mulss(xmm3, xmm1);
__ addss(xmm3, xmm2); // Expected result in xmm3
- __ subq(rsp, Immediate(kDoubleSize)); // For memory operand
+ __ AllocateStackSpace(kDoubleSize); // For memory operand
// vfmadd132ss
__ movl(rax, Immediate(1)); // Test number
__ movaps(xmm8, xmm0);
@@ -1384,8 +1378,7 @@ TEST(AssemblerX64FMA_ss) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1460,8 +1453,7 @@ TEST(AssemblerX64SSE_ss) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1546,8 +1538,7 @@ TEST(AssemblerX64AVX_ss) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1786,8 +1777,7 @@ TEST(AssemblerX64AVX_sd) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -1978,8 +1968,7 @@ TEST(AssemblerX64BMI1) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -2038,8 +2027,7 @@ TEST(AssemblerX64LZCNT) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -2098,8 +2086,7 @@ TEST(AssemblerX64POPCNT) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -2361,8 +2348,7 @@ TEST(AssemblerX64BMI2) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
@@ -2405,8 +2391,7 @@ TEST(AssemblerX64JumpTables1) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2453,8 +2438,7 @@ TEST(AssemblerX64JumpTables2) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -2485,7 +2469,7 @@ TEST(AssemblerX64PslldWithXmm15) {
CHECK_EQ(uint64_t{0x22446688AACCEF10}, result);
}
-typedef float(F9)(float x, float y);
+using F9 = float(float x, float y);
TEST(AssemblerX64vmovups) {
CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(AVX)) return;
@@ -2500,7 +2484,7 @@ TEST(AssemblerX64vmovups) {
__ shufps(xmm0, xmm0, 0x0); // brocast first argument
__ shufps(xmm1, xmm1, 0x0); // brocast second argument
// copy xmm1 to xmm0 through the stack to test the "vmovups reg, mem".
- __ subq(rsp, Immediate(kSimd128Size));
+ __ AllocateStackSpace(kSimd128Size);
__ vmovups(Operand(rsp, 0), xmm1);
__ vmovups(xmm0, Operand(rsp, 0));
__ addq(rsp, Immediate(kSimd128Size));
@@ -2510,8 +2494,7 @@ TEST(AssemblerX64vmovups) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
StdoutStream os;
code->Print(os);
diff --git a/deps/v8/test/cctest/test-atomicops.cc b/deps/v8/test/cctest/test-atomicops.cc
index 92421138cb..3ab3ac7c37 100644
--- a/deps/v8/test/cctest/test-atomicops.cc
+++ b/deps/v8/test/cctest/test-atomicops.cc
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/atomicops.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-bignum-dtoa.cc b/deps/v8/test/cctest/test-bignum-dtoa.cc
index 42562958e6..a4df7d6f15 100644
--- a/deps/v8/test/cctest/test-bignum-dtoa.cc
+++ b/deps/v8/test/cctest/test-bignum-dtoa.cc
@@ -27,12 +27,12 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/bignum-dtoa.h"
+#include "src/numbers/bignum-dtoa.h"
#include "src/base/platform/platform.h"
-#include "src/double.h"
+#include "src/numbers/double.h"
#include "test/cctest/cctest.h"
#include "test/cctest/gay-fixed.h"
#include "test/cctest/gay-precision.h"
@@ -42,18 +42,14 @@ namespace v8 {
namespace internal {
namespace test_bignum_dtoa {
-// Removes trailing '0' digits.
-// Can return the empty string if all digits are 0.
-static void TrimRepresentation(Vector<char> representation) {
- int len = StrLength(representation.start());
- int i;
- for (i = len - 1; i >= 0; --i) {
- if (representation[i] != '0') break;
- }
- representation[i + 1] = '\0';
+// Removes trailing '0' digits (modifies {representation}). Can create an empty
+// string if all digits are 0.
+static void TrimRepresentation(char* representation) {
+ size_t len = strlen(representation);
+ while (len > 0 && representation[len - 1] == '0') --len;
+ representation[len] = '\0';
}
-
static const int kBufferSize = 100;
@@ -64,193 +60,193 @@ TEST(BignumDtoaVariousDoubles) {
int point;
BignumDtoa(1.0, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
BignumDtoa(1.0, BIGNUM_DTOA_FIXED, 3, buffer, &length, &point);
CHECK_GE(3, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
BignumDtoa(1.0, BIGNUM_DTOA_PRECISION, 3, buffer, &length, &point);
CHECK_GE(3, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
BignumDtoa(1.5, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ(0, strcmp("15", buffer.start()));
+ CHECK_EQ(0, strcmp("15", buffer.begin()));
CHECK_EQ(1, point);
BignumDtoa(1.5, BIGNUM_DTOA_FIXED, 10, buffer, &length, &point);
CHECK_GE(10, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("15", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("15", buffer.begin()));
CHECK_EQ(1, point);
BignumDtoa(1.5, BIGNUM_DTOA_PRECISION, 10, buffer, &length, &point);
CHECK_GE(10, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("15", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("15", buffer.begin()));
CHECK_EQ(1, point);
double min_double = 5e-324;
BignumDtoa(min_double, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ(0, strcmp("5", buffer.start()));
+ CHECK_EQ(0, strcmp("5", buffer.begin()));
CHECK_EQ(-323, point);
BignumDtoa(min_double, BIGNUM_DTOA_FIXED, 5, buffer, &length, &point);
CHECK_GE(5, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("", buffer.begin()));
BignumDtoa(min_double, BIGNUM_DTOA_PRECISION, 5, buffer, &length, &point);
CHECK_GE(5, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("49407", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("49407", buffer.begin()));
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
BignumDtoa(max_double, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ(0, strcmp("17976931348623157", buffer.start()));
+ CHECK_EQ(0, strcmp("17976931348623157", buffer.begin()));
CHECK_EQ(309, point);
BignumDtoa(max_double, BIGNUM_DTOA_PRECISION, 7, buffer, &length, &point);
CHECK_GE(7, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("1797693", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("1797693", buffer.begin()));
CHECK_EQ(309, point);
BignumDtoa(4294967272.0, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ(0, strcmp("4294967272", buffer.start()));
+ CHECK_EQ(0, strcmp("4294967272", buffer.begin()));
CHECK_EQ(10, point);
BignumDtoa(4294967272.0, BIGNUM_DTOA_FIXED, 5, buffer, &length, &point);
- CHECK_EQ(0, strcmp("429496727200000", buffer.start()));
+ CHECK_EQ(0, strcmp("429496727200000", buffer.begin()));
CHECK_EQ(10, point);
BignumDtoa(4294967272.0, BIGNUM_DTOA_PRECISION, 14, buffer, &length, &point);
CHECK_GE(14, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("4294967272", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("4294967272", buffer.begin()));
CHECK_EQ(10, point);
BignumDtoa(4.1855804968213567e298, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ(0, strcmp("4185580496821357", buffer.start()));
+ CHECK_EQ(0, strcmp("4185580496821357", buffer.begin()));
CHECK_EQ(299, point);
BignumDtoa(4.1855804968213567e298, BIGNUM_DTOA_PRECISION, 20,
buffer, &length, &point);
CHECK_GE(20, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("41855804968213567225", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("41855804968213567225", buffer.begin()));
CHECK_EQ(299, point);
BignumDtoa(5.5626846462680035e-309, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ(0, strcmp("5562684646268003", buffer.start()));
+ CHECK_EQ(0, strcmp("5562684646268003", buffer.begin()));
CHECK_EQ(-308, point);
BignumDtoa(5.5626846462680035e-309, BIGNUM_DTOA_PRECISION, 1,
buffer, &length, &point);
CHECK_GE(1, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("6", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("6", buffer.begin()));
CHECK_EQ(-308, point);
BignumDtoa(2147483648.0, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ(0, strcmp("2147483648", buffer.start()));
+ CHECK_EQ(0, strcmp("2147483648", buffer.begin()));
CHECK_EQ(10, point);
BignumDtoa(2147483648.0, BIGNUM_DTOA_FIXED, 2,
buffer, &length, &point);
CHECK_GE(2, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("2147483648", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("2147483648", buffer.begin()));
CHECK_EQ(10, point);
BignumDtoa(2147483648.0, BIGNUM_DTOA_PRECISION, 5,
buffer, &length, &point);
CHECK_GE(5, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("21475", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("21475", buffer.begin()));
CHECK_EQ(10, point);
BignumDtoa(3.5844466002796428e+298, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ(0, strcmp("35844466002796428", buffer.start()));
+ CHECK_EQ(0, strcmp("35844466002796428", buffer.begin()));
CHECK_EQ(299, point);
BignumDtoa(3.5844466002796428e+298, BIGNUM_DTOA_PRECISION, 10,
buffer, &length, &point);
CHECK_GE(10, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("35844466", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("35844466", buffer.begin()));
CHECK_EQ(299, point);
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
BignumDtoa(v, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ(0, strcmp("22250738585072014", buffer.start()));
+ CHECK_EQ(0, strcmp("22250738585072014", buffer.begin()));
CHECK_EQ(-307, point);
BignumDtoa(v, BIGNUM_DTOA_PRECISION, 20, buffer, &length, &point);
CHECK_GE(20, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("22250738585072013831", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("22250738585072013831", buffer.begin()));
CHECK_EQ(-307, point);
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
v = Double(largest_denormal64).value();
BignumDtoa(v, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ(0, strcmp("2225073858507201", buffer.start()));
+ CHECK_EQ(0, strcmp("2225073858507201", buffer.begin()));
CHECK_EQ(-307, point);
BignumDtoa(v, BIGNUM_DTOA_PRECISION, 20, buffer, &length, &point);
CHECK_GE(20, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("2225073858507200889", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("2225073858507200889", buffer.begin()));
CHECK_EQ(-307, point);
BignumDtoa(4128420500802942e-24, BIGNUM_DTOA_SHORTEST, 0,
buffer, &length, &point);
- CHECK_EQ(0, strcmp("4128420500802942", buffer.start()));
+ CHECK_EQ(0, strcmp("4128420500802942", buffer.begin()));
CHECK_EQ(-8, point);
v = 3.9292015898194142585311918e-10;
BignumDtoa(v, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
- CHECK_EQ(0, strcmp("39292015898194143", buffer.start()));
+ CHECK_EQ(0, strcmp("39292015898194143", buffer.begin()));
v = 4194304.0;
BignumDtoa(v, BIGNUM_DTOA_FIXED, 5, buffer, &length, &point);
CHECK_GE(5, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("4194304", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("4194304", buffer.begin()));
v = 3.3161339052167390562200598e-237;
BignumDtoa(v, BIGNUM_DTOA_PRECISION, 19, buffer, &length, &point);
CHECK_GE(19, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("3316133905216739056", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("3316133905216739056", buffer.begin()));
CHECK_EQ(-236, point);
v = 7.9885183916008099497815232e+191;
BignumDtoa(v, BIGNUM_DTOA_PRECISION, 4, buffer, &length, &point);
CHECK_GE(4, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("7989", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("7989", buffer.begin()));
CHECK_EQ(192, point);
v = 1.0000000000000012800000000e+17;
BignumDtoa(v, BIGNUM_DTOA_FIXED, 1, buffer, &length, &point);
CHECK_GE(1, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("100000000000000128", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("100000000000000128", buffer.begin()));
CHECK_EQ(18, point);
}
@@ -268,7 +264,7 @@ TEST(BignumDtoaGayShortest) {
double v = current_test.v;
BignumDtoa(v, BIGNUM_DTOA_SHORTEST, 0, buffer, &length, &point);
CHECK_EQ(current_test.decimal_point, point);
- CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.begin()));
}
}
@@ -288,8 +284,8 @@ TEST(BignumDtoaGayFixed) {
BignumDtoa(v, BIGNUM_DTOA_FIXED, number_digits, buffer, &length, &point);
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.begin()));
}
}
@@ -310,8 +306,8 @@ TEST(BignumDtoaGayPrecision) {
buffer, &length, &point);
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.begin()));
}
}
diff --git a/deps/v8/test/cctest/test-bignum.cc b/deps/v8/test/cctest/test-bignum.cc
index 966ee5b5d4..dc5018d7f3 100644
--- a/deps/v8/test/cctest/test-bignum.cc
+++ b/deps/v8/test/cctest/test-bignum.cc
@@ -27,10 +27,10 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/platform/platform.h"
-#include "src/bignum.h"
+#include "src/numbers/bignum.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -40,15 +40,13 @@ namespace test_bignum {
static const int kBufferSize = 1024;
static void AssignHexString(Bignum* bignum, const char* str) {
- bignum->AssignHexString(Vector<const char>(str, StrLength(str)));
+ bignum->AssignHexString(CStrVector(str));
}
-
static void AssignDecimalString(Bignum* bignum, const char* str) {
- bignum->AssignDecimalString(Vector<const char>(str, StrLength(str)));
+ bignum->AssignDecimalString(CStrVector(str));
}
-
TEST(Assign) {
char buffer[kBufferSize];
Bignum bignum;
diff --git a/deps/v8/test/cctest/test-bit-vector.cc b/deps/v8/test/cctest/test-bit-vector.cc
index 92deab7946..478bc47e7b 100644
--- a/deps/v8/test/cctest/test-bit-vector.cc
+++ b/deps/v8/test/cctest/test-bit-vector.cc
@@ -27,9 +27,9 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/bit-vector.h"
+#include "src/utils/bit-vector.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-circular-queue.cc b/deps/v8/test/cctest/test-circular-queue.cc
index 85ab4c4fad..7b0475ff80 100644
--- a/deps/v8/test/cctest/test-circular-queue.cc
+++ b/deps/v8/test/cctest/test-circular-queue.cc
@@ -27,7 +27,7 @@
//
// Tests of the circular queue.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/profiler/circular-queue-inl.h"
#include "test/cctest/cctest.h"
@@ -36,7 +36,7 @@ using i::SamplingCircularQueue;
TEST(SamplingCircularQueue) {
- typedef v8::base::AtomicWord Record;
+ using Record = v8::base::AtomicWord;
const int kMaxRecordsInQueue = 4;
SamplingCircularQueue<Record, kMaxRecordsInQueue> scq;
@@ -100,8 +100,8 @@ TEST(SamplingCircularQueue) {
namespace {
-typedef v8::base::AtomicWord Record;
-typedef SamplingCircularQueue<Record, 12> TestSampleQueue;
+using Record = v8::base::AtomicWord;
+using TestSampleQueue = SamplingCircularQueue<Record, 12>;
class ProducerThread: public v8::base::Thread {
public:
diff --git a/deps/v8/test/cctest/test-code-layout.cc b/deps/v8/test/cctest/test-code-layout.cc
index fa55e40af6..aa6f6a7b2c 100644
--- a/deps/v8/test/cctest/test-code-layout.cc
+++ b/deps/v8/test/cctest/test-code-layout.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -39,8 +39,8 @@ TEST(CodeLayoutWithoutUnwindingInfo) {
code_desc.unwinding_info_size = 0;
code_desc.origin = nullptr;
- Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
- code_desc, Code::STUB, Handle<Object>::null());
+ Handle<Code> code =
+ Factory::CodeBuilder(CcTest::i_isolate(), code_desc, Code::STUB).Build();
CHECK(!code->has_unwinding_info());
CHECK_EQ(code->raw_instruction_size(), buffer_size);
@@ -85,8 +85,8 @@ TEST(CodeLayoutWithUnwindingInfo) {
code_desc.unwinding_info_size = unwinding_info_size;
code_desc.origin = nullptr;
- Handle<Code> code = CcTest::i_isolate()->factory()->NewCode(
- code_desc, Code::STUB, Handle<Object>::null());
+ Handle<Code> code =
+ Factory::CodeBuilder(CcTest::i_isolate(), code_desc, Code::STUB).Build();
CHECK(code->has_unwinding_info());
CHECK_EQ(code->raw_instruction_size(), buffer_size);
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index bf42647f2c..63c0602638 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -4,29 +4,29 @@
#include <cmath>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-promise.h"
#include "src/builtins/builtins-string-gen.h"
-#include "src/char-predicates.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/compiler/node.h"
#include "src/debug/debug.h"
-#include "src/hash-seed-inl.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/promise-inl.h"
#include "src/objects/smi.h"
#include "src/objects/struct-inl.h"
-#include "src/transitions-inl.h"
+#include "src/objects/transitions-inl.h"
+#include "src/strings/char-predicates.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
@@ -50,7 +50,7 @@ Handle<String> MakeString(const char* str) {
Handle<String> MakeName(const char* str, int suffix) {
EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s%d", str, suffix);
- return MakeString(buffer.start());
+ return MakeString(buffer.begin());
}
int sum9(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7,
@@ -257,7 +257,7 @@ TEST(IsValidPositiveSmi) {
IsValidPositiveSmiCase(isolate, 0x40000000U);
IsValidPositiveSmiCase(isolate, 0xBFFFFFFFU);
- typedef std::numeric_limits<int32_t> int32_limits;
+ using int32_limits = std::numeric_limits<int32_t>;
IsValidPositiveSmiCase(isolate, int32_limits::max());
IsValidPositiveSmiCase(isolate, int32_limits::min());
#if V8_TARGET_ARCH_64_BIT
@@ -367,7 +367,8 @@ TEST(ToString) {
const int kNumParams = 1;
CodeAssemblerTester asm_tester(isolate, kNumParams);
CodeStubAssembler m(asm_tester.state());
- m.Return(m.ToString(m.Parameter(kNumParams + 2), m.Parameter(0)));
+ m.Return(m.ToStringImpl(m.CAST(m.Parameter(kNumParams + 2)),
+ m.CAST(m.Parameter(0))));
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
@@ -588,9 +589,9 @@ TEST(TryToName) {
if (FLAG_thin_strings) {
// TryToName(<thin two-byte string>) => internalized version.
uc16 array1[] = {2001, 2002, 2003};
- Vector<const uc16> str1(array1);
- Handle<String> s =
- isolate->factory()->NewStringFromTwoByte(str1).ToHandleChecked();
+ Handle<String> s = isolate->factory()
+ ->NewStringFromTwoByte(ArrayVector(array1))
+ .ToHandleChecked();
Handle<String> internalized = isolate->factory()->InternalizeString(s);
ft.CheckTrue(s, expect_unique, internalized);
}
@@ -921,7 +922,7 @@ TEST(TransitionLookup) {
CHECK(root_map->raw_transitions()
->GetHeapObjectAssumeStrong()
- ->IsTransitionArray());
+ .IsTransitionArray());
Handle<TransitionArray> transitions(
TransitionArray::cast(
root_map->raw_transitions()->GetHeapObjectAssumeStrong()),
@@ -1072,9 +1073,9 @@ TEST(TryHasOwnProperty) {
Handle<Map> map = Map::Create(isolate, inobject_properties);
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
AddProperties(object, names, arraysize(names));
- CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
- CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
- CHECK(!object->map()->is_dictionary_map());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map().instance_type());
+ CHECK_EQ(inobject_properties, object->map().GetInObjectProperties());
+ CHECK(!object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1084,9 +1085,9 @@ TEST(TryHasOwnProperty) {
Handle<Map> map = Map::Create(isolate, inobject_properties);
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
AddProperties(object, names, arraysize(names));
- CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
- CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
- CHECK(!object->map()->is_dictionary_map());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map().instance_type());
+ CHECK_EQ(inobject_properties, object->map().GetInObjectProperties());
+ CHECK(!object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1096,9 +1097,9 @@ TEST(TryHasOwnProperty) {
Handle<Map> map = Map::Create(isolate, inobject_properties);
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
AddProperties(object, names, arraysize(names));
- CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
- CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
- CHECK(!object->map()->is_dictionary_map());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map().instance_type());
+ CHECK_EQ(inobject_properties, object->map().GetInObjectProperties());
+ CHECK(!object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1115,8 +1116,8 @@ TEST(TryHasOwnProperty) {
LanguageMode::kSloppy)
.FromJust());
- CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
- CHECK(object->map()->is_dictionary_map());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map().instance_type());
+ CHECK(object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1125,10 +1126,10 @@ TEST(TryHasOwnProperty) {
Handle<JSFunction> function =
factory->NewFunctionForTest(factory->empty_string());
JSFunction::EnsureHasInitialMap(function);
- function->initial_map()->set_instance_type(JS_GLOBAL_OBJECT_TYPE);
- function->initial_map()->set_is_prototype_map(true);
- function->initial_map()->set_is_dictionary_map(true);
- function->initial_map()->set_may_have_interesting_symbols(true);
+ function->initial_map().set_instance_type(JS_GLOBAL_OBJECT_TYPE);
+ function->initial_map().set_is_prototype_map(true);
+ function->initial_map().set_is_dictionary_map(true);
+ function->initial_map().set_may_have_interesting_symbols(true);
Handle<JSObject> object = factory->NewJSGlobalObject(function);
AddProperties(object, names, arraysize(names));
@@ -1137,8 +1138,8 @@ TEST(TryHasOwnProperty) {
LanguageMode::kSloppy)
.FromJust());
- CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map()->instance_type());
- CHECK(object->map()->is_dictionary_map());
+ CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map().instance_type());
+ CHECK(object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1176,13 +1177,13 @@ TEST(TryHasOwnProperty) {
Handle<JSFunction> function =
factory->NewFunctionForTest(factory->empty_string());
Handle<JSProxy> object = factory->NewJSProxy(function, objects[0]);
- CHECK_EQ(JS_PROXY_TYPE, object->map()->instance_type());
+ CHECK_EQ(JS_PROXY_TYPE, object->map().instance_type());
ft.CheckTrue(object, names[0], expect_bailout);
}
{
Handle<JSObject> object = isolate->global_proxy();
- CHECK_EQ(JS_GLOBAL_PROXY_TYPE, object->map()->instance_type());
+ CHECK_EQ(JS_GLOBAL_PROXY_TYPE, object->map().instance_type());
ft.CheckTrue(object, names[0], expect_bailout);
}
}
@@ -1276,9 +1277,9 @@ TEST(TryGetOwnProperty) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
AddProperties(object, names, arraysize(names), values, arraysize(values),
rand_gen.NextInt());
- CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
- CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
- CHECK(!object->map()->is_dictionary_map());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map().instance_type());
+ CHECK_EQ(inobject_properties, object->map().GetInObjectProperties());
+ CHECK(!object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1289,9 +1290,9 @@ TEST(TryGetOwnProperty) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
AddProperties(object, names, arraysize(names), values, arraysize(values),
rand_gen.NextInt());
- CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
- CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
- CHECK(!object->map()->is_dictionary_map());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map().instance_type());
+ CHECK_EQ(inobject_properties, object->map().GetInObjectProperties());
+ CHECK(!object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1302,9 +1303,9 @@ TEST(TryGetOwnProperty) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
AddProperties(object, names, arraysize(names), values, arraysize(values),
rand_gen.NextInt());
- CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
- CHECK_EQ(inobject_properties, object->map()->GetInObjectProperties());
- CHECK(!object->map()->is_dictionary_map());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map().instance_type());
+ CHECK_EQ(inobject_properties, object->map().GetInObjectProperties());
+ CHECK(!object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1322,8 +1323,8 @@ TEST(TryGetOwnProperty) {
LanguageMode::kSloppy)
.FromJust());
- CHECK_EQ(JS_OBJECT_TYPE, object->map()->instance_type());
- CHECK(object->map()->is_dictionary_map());
+ CHECK_EQ(JS_OBJECT_TYPE, object->map().instance_type());
+ CHECK(object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1338,8 +1339,8 @@ TEST(TryGetOwnProperty) {
LanguageMode::kSloppy)
.FromJust());
- CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map()->instance_type());
- CHECK(object->map()->is_dictionary_map());
+ CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map().instance_type());
+ CHECK(object->map().is_dictionary_map());
objects.push_back(object);
}
@@ -1384,7 +1385,7 @@ TEST(TryGetOwnProperty) {
Handle<JSFunction> function =
factory->NewFunctionForTest(factory->empty_string());
Handle<JSProxy> object = factory->NewJSProxy(function, objects[0]);
- CHECK_EQ(JS_PROXY_TYPE, object->map()->instance_type());
+ CHECK_EQ(JS_PROXY_TYPE, object->map().instance_type());
Handle<Object> value = ft.Call(object, names[0]).ToHandleChecked();
// Proxies are not supported yet.
CHECK_EQ(*bailout_symbol, *value);
@@ -1392,7 +1393,7 @@ TEST(TryGetOwnProperty) {
{
Handle<JSObject> object = isolate->global_proxy();
- CHECK_EQ(JS_GLOBAL_PROXY_TYPE, object->map()->instance_type());
+ CHECK_EQ(JS_GLOBAL_PROXY_TYPE, object->map().instance_type());
// Global proxies are not supported yet.
Handle<Object> value = ft.Call(object, names[0]).ToHandleChecked();
CHECK_EQ(*bailout_symbol, *value);
@@ -1492,7 +1493,7 @@ TEST(TryLookupElement) {
Handle<JSArray> object = factory->NewJSArray(0, PACKED_SMI_ELEMENTS);
AddElement(object, 0, smi0);
AddElement(object, 1, smi0);
- CHECK_EQ(PACKED_SMI_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(PACKED_SMI_ELEMENTS, object->map().elements_kind());
CHECK_FOUND(object, 0);
CHECK_FOUND(object, 1);
@@ -1505,7 +1506,7 @@ TEST(TryLookupElement) {
Handle<JSArray> object = factory->NewJSArray(0, HOLEY_SMI_ELEMENTS);
AddElement(object, 0, smi0);
AddElement(object, 13, smi0);
- CHECK_EQ(HOLEY_SMI_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(HOLEY_SMI_ELEMENTS, object->map().elements_kind());
CHECK_FOUND(object, 0);
CHECK_NOT_FOUND(object, 1);
@@ -1518,7 +1519,7 @@ TEST(TryLookupElement) {
Handle<JSArray> object = factory->NewJSArray(0, PACKED_ELEMENTS);
AddElement(object, 0, smi0);
AddElement(object, 1, smi0);
- CHECK_EQ(PACKED_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(PACKED_ELEMENTS, object->map().elements_kind());
CHECK_FOUND(object, 0);
CHECK_FOUND(object, 1);
@@ -1531,7 +1532,7 @@ TEST(TryLookupElement) {
Handle<JSArray> object = factory->NewJSArray(0, HOLEY_ELEMENTS);
AddElement(object, 0, smi0);
AddElement(object, 13, smi0);
- CHECK_EQ(HOLEY_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(HOLEY_ELEMENTS, object->map().elements_kind());
CHECK_FOUND(object, 0);
CHECK_NOT_FOUND(object, 1);
@@ -1541,10 +1542,12 @@ TEST(TryLookupElement) {
}
{
- Handle<JSTypedArray> object = factory->NewJSTypedArray(INT32_ELEMENTS, 2);
- Local<v8::ArrayBuffer> buffer = Utils::ToLocal(object->GetBuffer());
+ v8::Local<v8::ArrayBuffer> buffer =
+ v8::ArrayBuffer::New(reinterpret_cast<v8::Isolate*>(isolate), 8);
+ Handle<JSTypedArray> object = factory->NewJSTypedArray(
+ kExternalInt32Array, v8::Utils::OpenHandle(*buffer), 0, 2);
- CHECK_EQ(INT32_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(INT32_ELEMENTS, object->map().elements_kind());
CHECK_FOUND(object, 0);
CHECK_FOUND(object, 1);
@@ -1570,7 +1573,7 @@ TEST(TryLookupElement) {
Handle<String> str = factory->InternalizeUtf8String("ab");
Handle<JSValue>::cast(object)->set_value(*str);
AddElement(object, 13, smi0);
- CHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(FAST_STRING_WRAPPER_ELEMENTS, object->map().elements_kind());
CHECK_FOUND(object, 0);
CHECK_FOUND(object, 1);
@@ -1586,7 +1589,7 @@ TEST(TryLookupElement) {
Handle<JSValue>::cast(object)->set_value(*str);
AddElement(object, 13, smi0);
JSObject::NormalizeElements(object);
- CHECK_EQ(SLOW_STRING_WRAPPER_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(SLOW_STRING_WRAPPER_ELEMENTS, object->map().elements_kind());
CHECK_FOUND(object, 0);
CHECK_FOUND(object, 1);
@@ -1617,19 +1620,19 @@ TEST(TryLookupElement) {
Handle<JSFunction> function =
factory->NewFunctionForTest(factory->empty_string());
Handle<JSProxy> object = factory->NewJSProxy(function, handler);
- CHECK_EQ(JS_PROXY_TYPE, object->map()->instance_type());
+ CHECK_EQ(JS_PROXY_TYPE, object->map().instance_type());
ft.CheckTrue(object, smi0, expect_bailout);
}
{
Handle<JSObject> object = isolate->global_object();
- CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map()->instance_type());
+ CHECK_EQ(JS_GLOBAL_OBJECT_TYPE, object->map().instance_type());
ft.CheckTrue(object, smi0, expect_bailout);
}
{
Handle<JSObject> object = isolate->global_proxy();
- CHECK_EQ(JS_GLOBAL_PROXY_TYPE, object->map()->instance_type());
+ CHECK_EQ(JS_GLOBAL_PROXY_TYPE, object->map().instance_type());
ft.CheckTrue(object, smi0, expect_bailout);
}
}
@@ -1793,9 +1796,9 @@ TEST(OneToTwoByteStringCopy) {
Handle<String> string1 = isolate->factory()->InternalizeUtf8String("abcde");
uc16 array[] = {1000, 1001, 1002, 1003, 1004};
- Vector<const uc16> str(array);
- Handle<String> string2 =
- isolate->factory()->NewStringFromTwoByte(str).ToHandleChecked();
+ Handle<String> string2 = isolate->factory()
+ ->NewStringFromTwoByte(ArrayVector(array))
+ .ToHandleChecked();
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call(string1, string2);
DisallowHeapAllocation no_gc;
@@ -1825,9 +1828,9 @@ TEST(OneToOneByteStringCopy) {
Handle<String> string1 = isolate->factory()->InternalizeUtf8String("abcde");
uint8_t array[] = {100, 101, 102, 103, 104};
- Vector<const uint8_t> str(array);
- Handle<String> string2 =
- isolate->factory()->NewStringFromOneByte(str).ToHandleChecked();
+ Handle<String> string2 = isolate->factory()
+ ->NewStringFromOneByte(ArrayVector(array))
+ .ToHandleChecked();
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call(string1, string2);
DisallowHeapAllocation no_gc;
@@ -1857,9 +1860,9 @@ TEST(OneToOneByteStringCopyNonZeroStart) {
Handle<String> string1 = isolate->factory()->InternalizeUtf8String("abcde");
uint8_t array[] = {100, 101, 102, 103, 104};
- Vector<const uint8_t> str(array);
- Handle<String> string2 =
- isolate->factory()->NewStringFromOneByte(str).ToHandleChecked();
+ Handle<String> string2 = isolate->factory()
+ ->NewStringFromOneByte(ArrayVector(array))
+ .ToHandleChecked();
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call(string1, string2);
DisallowHeapAllocation no_gc;
@@ -1885,13 +1888,13 @@ TEST(TwoToTwoByteStringCopy) {
m.Return(m.SmiConstant(Smi::FromInt(0)));
uc16 array1[] = {2000, 2001, 2002, 2003, 2004};
- Vector<const uc16> str1(array1);
- Handle<String> string1 =
- isolate->factory()->NewStringFromTwoByte(str1).ToHandleChecked();
+ Handle<String> string1 = isolate->factory()
+ ->NewStringFromTwoByte(ArrayVector(array1))
+ .ToHandleChecked();
uc16 array2[] = {1000, 1001, 1002, 1003, 1004};
- Vector<const uc16> str2(array2);
- Handle<String> string2 =
- isolate->factory()->NewStringFromTwoByte(str2).ToHandleChecked();
+ Handle<String> string2 = isolate->factory()
+ ->NewStringFromTwoByte(ArrayVector(array2))
+ .ToHandleChecked();
FunctionTester ft(asm_tester.GenerateCode(), kNumParams);
ft.Call(string1, string2);
DisallowHeapAllocation no_gc;
@@ -2369,7 +2372,7 @@ TEST(CreatePromiseResolvingFunctionsContext) {
CHECK_EQ(isolate->native_context()->scope_info(), context_js->scope_info());
CHECK_EQ(ReadOnlyRoots(isolate).the_hole_value(), context_js->extension());
CHECK_EQ(*isolate->native_context(), context_js->native_context());
- CHECK(context_js->get(PromiseBuiltins::kPromiseSlot)->IsJSPromise());
+ CHECK(context_js->get(PromiseBuiltins::kPromiseSlot).IsJSPromise());
CHECK_EQ(ReadOnlyRoots(isolate).false_value(),
context_js->get(PromiseBuiltins::kDebugEventSlot));
}
@@ -2400,8 +2403,8 @@ TEST(CreatePromiseResolvingFunctions) {
ft.Call(isolate->factory()->undefined_value()).ToHandleChecked();
CHECK(result_obj->IsFixedArray());
Handle<FixedArray> result_arr = Handle<FixedArray>::cast(result_obj);
- CHECK(result_arr->get(0)->IsJSFunction());
- CHECK(result_arr->get(1)->IsJSFunction());
+ CHECK(result_arr->get(0).IsJSFunction());
+ CHECK(result_arr->get(1).IsJSFunction());
}
TEST(NewElementsCapacity) {
@@ -2534,7 +2537,7 @@ TEST(CreatePromiseGetCapabilitiesExecutorContext) {
CHECK_EQ(ReadOnlyRoots(isolate).the_hole_value(), context_js->extension());
CHECK_EQ(*isolate->native_context(), context_js->native_context());
CHECK(
- context_js->get(PromiseBuiltins::kCapabilitySlot)->IsPromiseCapability());
+ context_js->get(PromiseBuiltins::kCapabilitySlot).IsPromiseCapability());
}
TEST(NewPromiseCapability) {
@@ -2564,13 +2567,13 @@ TEST(NewPromiseCapability) {
Handle<PromiseCapability> result =
Handle<PromiseCapability>::cast(result_obj);
- CHECK(result->promise()->IsJSPromise());
- CHECK(result->resolve()->IsJSFunction());
- CHECK(result->reject()->IsJSFunction());
+ CHECK(result->promise().IsJSPromise());
+ CHECK(result->resolve().IsJSFunction());
+ CHECK(result->reject().IsJSFunction());
CHECK_EQ(*isolate->promise_capability_default_reject_shared_fun(),
- JSFunction::cast(result->reject())->shared());
+ JSFunction::cast(result->reject()).shared());
CHECK_EQ(*isolate->promise_capability_default_resolve_shared_fun(),
- JSFunction::cast(result->resolve())->shared());
+ JSFunction::cast(result->resolve()).shared());
Handle<JSFunction> callbacks[] = {
handle(JSFunction::cast(result->resolve()), isolate),
@@ -2617,11 +2620,11 @@ TEST(NewPromiseCapability) {
Handle<PromiseCapability> result =
Handle<PromiseCapability>::cast(result_obj);
- CHECK(result->promise()->IsJSObject());
+ CHECK(result->promise().IsJSObject());
Handle<JSObject> promise(JSObject::cast(result->promise()), isolate);
CHECK_EQ(constructor_fn->prototype_or_initial_map(), promise->map());
- CHECK(result->resolve()->IsJSFunction());
- CHECK(result->reject()->IsJSFunction());
+ CHECK(result->resolve().IsJSFunction());
+ CHECK(result->reject().IsJSFunction());
Handle<String> resolved_str =
isolate->factory()->NewStringFromAsciiChecked("resolvedStr");
@@ -3099,7 +3102,7 @@ TEST(CloneEmptyFixedArray) {
Handle<FixedArray> source(isolate->factory()->empty_fixed_array());
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
FixedArray result(FixedArray::cast(*result_raw));
- CHECK_EQ(0, result->length());
+ CHECK_EQ(0, result.length());
CHECK_EQ(*(isolate->factory()->empty_fixed_array()), result);
}
@@ -3117,12 +3120,12 @@ TEST(CloneFixedArray) {
source->set(1, Smi::FromInt(1234));
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
FixedArray result(FixedArray::cast(*result_raw));
- CHECK_EQ(5, result->length());
- CHECK(result->get(0)->IsTheHole(isolate));
- CHECK_EQ(Smi::cast(result->get(1))->value(), 1234);
- CHECK(result->get(2)->IsTheHole(isolate));
- CHECK(result->get(3)->IsTheHole(isolate));
- CHECK(result->get(4)->IsTheHole(isolate));
+ CHECK_EQ(5, result.length());
+ CHECK(result.get(0).IsTheHole(isolate));
+ CHECK_EQ(Smi::cast(result.get(1)).value(), 1234);
+ CHECK(result.get(2).IsTheHole(isolate));
+ CHECK(result.get(3).IsTheHole(isolate));
+ CHECK(result.get(4).IsTheHole(isolate));
}
TEST(CloneFixedArrayCOW) {
@@ -3163,12 +3166,12 @@ TEST(ExtractFixedArrayCOWForceCopy) {
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
FixedArray result(FixedArray::cast(*result_raw));
CHECK_NE(*source, result);
- CHECK_EQ(5, result->length());
- CHECK(result->get(0)->IsTheHole(isolate));
- CHECK_EQ(Smi::cast(result->get(1))->value(), 1234);
- CHECK(result->get(2)->IsTheHole(isolate));
- CHECK(result->get(3)->IsTheHole(isolate));
- CHECK(result->get(4)->IsTheHole(isolate));
+ CHECK_EQ(5, result.length());
+ CHECK(result.get(0).IsTheHole(isolate));
+ CHECK_EQ(Smi::cast(result.get(1)).value(), 1234);
+ CHECK(result.get(2).IsTheHole(isolate));
+ CHECK(result.get(3).IsTheHole(isolate));
+ CHECK(result.get(4).IsTheHole(isolate));
}
TEST(ExtractFixedArraySimple) {
@@ -3193,9 +3196,9 @@ TEST(ExtractFixedArraySimple) {
Handle<Smi>(Smi::FromInt(2), isolate))
.ToHandleChecked();
FixedArray result(FixedArray::cast(*result_raw));
- CHECK_EQ(2, result->length());
- CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
- CHECK(result->get(1)->IsTheHole(isolate));
+ CHECK_EQ(2, result.length());
+ CHECK_EQ(Smi::cast(result.get(0)).value(), 1234);
+ CHECK(result.get(1).IsTheHole(isolate));
}
TEST(ExtractFixedArraySimpleSmiConstant) {
@@ -3217,9 +3220,9 @@ TEST(ExtractFixedArraySimpleSmiConstant) {
source->set(1, Smi::FromInt(1234));
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
FixedArray result(FixedArray::cast(*result_raw));
- CHECK_EQ(2, result->length());
- CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
- CHECK(result->get(1)->IsTheHole(isolate));
+ CHECK_EQ(2, result.length());
+ CHECK_EQ(Smi::cast(result.get(0)).value(), 1234);
+ CHECK(result.get(1).IsTheHole(isolate));
}
TEST(ExtractFixedArraySimpleIntPtrConstant) {
@@ -3241,9 +3244,9 @@ TEST(ExtractFixedArraySimpleIntPtrConstant) {
source->set(1, Smi::FromInt(1234));
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
FixedArray result(FixedArray::cast(*result_raw));
- CHECK_EQ(2, result->length());
- CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
- CHECK(result->get(1)->IsTheHole(isolate));
+ CHECK_EQ(2, result.length());
+ CHECK_EQ(Smi::cast(result.get(0)).value(), 1234);
+ CHECK(result.get(1).IsTheHole(isolate));
}
TEST(ExtractFixedArraySimpleIntPtrConstantNoDoubles) {
@@ -3263,9 +3266,9 @@ TEST(ExtractFixedArraySimpleIntPtrConstantNoDoubles) {
source->set(1, Smi::FromInt(1234));
Handle<Object> result_raw = ft.Call(source).ToHandleChecked();
FixedArray result(FixedArray::cast(*result_raw));
- CHECK_EQ(2, result->length());
- CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
- CHECK(result->get(1)->IsTheHole(isolate));
+ CHECK_EQ(2, result.length());
+ CHECK_EQ(Smi::cast(result.get(0)).value(), 1234);
+ CHECK(result.get(1).IsTheHole(isolate));
}
TEST(ExtractFixedArraySimpleIntPtrParameters) {
@@ -3287,9 +3290,9 @@ TEST(ExtractFixedArraySimpleIntPtrParameters) {
Handle<Smi>(Smi::FromInt(2), isolate))
.ToHandleChecked();
FixedArray result(FixedArray::cast(*result_raw));
- CHECK_EQ(2, result->length());
- CHECK_EQ(Smi::cast(result->get(0))->value(), 1234);
- CHECK(result->get(1)->IsTheHole(isolate));
+ CHECK_EQ(2, result.length());
+ CHECK_EQ(Smi::cast(result.get(0)).value(), 1234);
+ CHECK(result.get(1).IsTheHole(isolate));
Handle<FixedDoubleArray> source_double = Handle<FixedDoubleArray>::cast(
isolate->factory()->NewFixedDoubleArray(5));
@@ -3303,9 +3306,9 @@ TEST(ExtractFixedArraySimpleIntPtrParameters) {
Handle<Smi>(Smi::FromInt(2), isolate))
.ToHandleChecked();
FixedDoubleArray double_result = FixedDoubleArray::cast(*double_result_raw);
- CHECK_EQ(2, double_result->length());
- CHECK_EQ(double_result->get_scalar(0), 11);
- CHECK_EQ(double_result->get_scalar(1), 12);
+ CHECK_EQ(2, double_result.length());
+ CHECK_EQ(double_result.get_scalar(0), 11);
+ CHECK_EQ(double_result.get_scalar(1), 12);
}
TEST(SingleInputPhiElimination) {
@@ -3426,38 +3429,38 @@ TEST(IsDoubleElementsKind) {
(*Handle<Smi>::cast(
ft.Call(Handle<Smi>(Smi::FromInt(PACKED_DOUBLE_ELEMENTS), isolate))
.ToHandleChecked()))
- ->value(),
+ .value(),
1);
CHECK_EQ(
(*Handle<Smi>::cast(
ft.Call(Handle<Smi>(Smi::FromInt(HOLEY_DOUBLE_ELEMENTS), isolate))
.ToHandleChecked()))
- ->value(),
+ .value(),
1);
CHECK_EQ((*Handle<Smi>::cast(
ft.Call(Handle<Smi>(Smi::FromInt(HOLEY_ELEMENTS), isolate))
.ToHandleChecked()))
- ->value(),
+ .value(),
0);
CHECK_EQ((*Handle<Smi>::cast(
ft.Call(Handle<Smi>(Smi::FromInt(PACKED_ELEMENTS), isolate))
.ToHandleChecked()))
- ->value(),
+ .value(),
0);
CHECK_EQ((*Handle<Smi>::cast(
ft.Call(Handle<Smi>(Smi::FromInt(PACKED_SMI_ELEMENTS), isolate))
.ToHandleChecked()))
- ->value(),
+ .value(),
0);
CHECK_EQ((*Handle<Smi>::cast(
ft.Call(Handle<Smi>(Smi::FromInt(HOLEY_SMI_ELEMENTS), isolate))
.ToHandleChecked()))
- ->value(),
+ .value(),
0);
CHECK_EQ((*Handle<Smi>::cast(
ft.Call(Handle<Smi>(Smi::FromInt(DICTIONARY_ELEMENTS), isolate))
.ToHandleChecked()))
- ->value(),
+ .value(),
0);
}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index cb559d8fe2..28867a89ef 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -28,17 +28,17 @@
#include <stdlib.h>
#include <wchar.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
-#include "src/compilation-cache.h"
-#include "src/compiler.h"
-#include "src/disasm.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/codegen/compiler.h"
+#include "src/diagnostics/disasm.h"
#include "src/heap/factory.h"
#include "src/heap/spaces.h"
#include "src/interpreter/interpreter.h"
-#include "src/objects-inl.h"
#include "src/objects/allocation-site-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -55,7 +55,7 @@ static void SetGlobalProperty(const char* name, Object value) {
Handle<Object> object(value, isolate);
Handle<String> internalized_name =
isolate->factory()->InternalizeUtf8String(name);
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
Runtime::SetObjectProperty(isolate, global, internalized_name, object,
StoreOrigin::kMaybeKeyed, Just(kDontThrow))
.Check();
@@ -82,10 +82,10 @@ static double Inc(Isolate* isolate, int x) {
EmbeddedVector<char, 512> buffer;
SNPrintF(buffer, source, x);
- Handle<JSFunction> fun = Compile(buffer.start());
+ Handle<JSFunction> fun = Compile(buffer.begin());
if (fun.is_null()) return -1;
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
Execution::Call(isolate, fun, global, 0, nullptr).Check();
return GetGlobalProperty("result")->Number();
}
@@ -104,7 +104,7 @@ static double Add(Isolate* isolate, int x, int y) {
SetGlobalProperty("x", Smi::FromInt(x));
SetGlobalProperty("y", Smi::FromInt(y));
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
Execution::Call(isolate, fun, global, 0, nullptr).Check();
return GetGlobalProperty("result")->Number();
}
@@ -122,7 +122,7 @@ static double Abs(Isolate* isolate, int x) {
if (fun.is_null()) return -1;
SetGlobalProperty("x", Smi::FromInt(x));
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
Execution::Call(isolate, fun, global, 0, nullptr).Check();
return GetGlobalProperty("result")->Number();
}
@@ -141,7 +141,7 @@ static double Sum(Isolate* isolate, int n) {
if (fun.is_null()) return -1;
SetGlobalProperty("n", Smi::FromInt(n));
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
Execution::Call(isolate, fun, global, 0, nullptr).Check();
return GetGlobalProperty("result")->Number();
}
@@ -161,7 +161,7 @@ TEST(Print) {
const char* source = "for (n = 0; n < 100; ++n) print(n, 1, 2);";
Handle<JSFunction> fun = Compile(source);
if (fun.is_null()) return;
- Handle<JSObject> global(CcTest::i_isolate()->context()->global_object(),
+ Handle<JSObject> global(CcTest::i_isolate()->context().global_object(),
fun->GetIsolate());
Execution::Call(CcTest::i_isolate(), fun, global, 0, nullptr).Check();
}
@@ -193,7 +193,7 @@ TEST(Stuff) {
Handle<JSFunction> fun = Compile(source);
CHECK(!fun.is_null());
- Handle<JSObject> global(CcTest::i_isolate()->context()->global_object(),
+ Handle<JSObject> global(CcTest::i_isolate()->context().global_object(),
fun->GetIsolate());
Execution::Call(CcTest::i_isolate(), fun, global, 0, nullptr).Check();
CHECK_EQ(511.0, GetGlobalProperty("r")->Number());
@@ -208,9 +208,9 @@ TEST(UncaughtThrow) {
Handle<JSFunction> fun = Compile(source);
CHECK(!fun.is_null());
Isolate* isolate = fun->GetIsolate();
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
CHECK(Execution::Call(isolate, fun, global, 0, nullptr).is_null());
- CHECK_EQ(42.0, isolate->pending_exception()->Number());
+ CHECK_EQ(42.0, isolate->pending_exception().Number());
}
@@ -234,7 +234,7 @@ TEST(C2JSFrames) {
Isolate* isolate = fun0->GetIsolate();
// Run the generated code to populate the global object with 'foo'.
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
Execution::Call(isolate, fun0, global, 0, nullptr).Check();
Handle<Object> fun1 =
@@ -243,7 +243,7 @@ TEST(C2JSFrames) {
CHECK(fun1->IsJSFunction());
Handle<Object> argv[] = {
- isolate->factory()->InternalizeOneByteString(StaticCharVector("hello"))};
+ isolate->factory()->InternalizeString(StaticCharVector("hello"))};
Execution::Call(isolate,
Handle<JSFunction>::cast(fun1),
global,
@@ -276,14 +276,14 @@ TEST(GetScriptLineNumber) {
const int max_rows = 1000;
const int buffer_size = max_rows + sizeof(function_f);
ScopedVector<char> buffer(buffer_size);
- memset(buffer.start(), '\n', buffer_size - 1);
+ memset(buffer.begin(), '\n', buffer_size - 1);
buffer[buffer_size - 1] = '\0';
for (int i = 0; i < max_rows; ++i) {
if (i > 0)
buffer[i - 1] = '\n';
MemCopy(&buffer[i], function_f, sizeof(function_f) - 1);
- v8::Local<v8::String> script_body = v8_str(buffer.start());
+ v8::Local<v8::String> script_body = v8_str(buffer.begin());
v8::Script::Compile(context.local(), script_body, &origin)
.ToLocalChecked()
->Run(context.local())
@@ -304,9 +304,11 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
// Make sure function f has a call that uses a type feedback slot.
- CompileRun("function fun() {};"
- "fun1 = fun;"
- "function f(a) { a(); } f(fun1);");
+ CompileRun(
+ "function fun() {};"
+ "fun1 = fun;"
+ "%PrepareFunctionForOptimization(f);"
+ "function f(a) { a(); } f(fun1);");
Handle<JSFunction> f = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
@@ -320,7 +322,7 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
{
HeapObject heap_object;
CHECK(object->GetHeapObjectIfWeak(&heap_object));
- CHECK(heap_object->IsJSFunction());
+ CHECK(heap_object.IsJSFunction());
}
CompileRun("%OptimizeFunctionOnNextCall(f); f(fun1);");
@@ -328,11 +330,11 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
// Verify that the feedback is still "gathered" despite a recompilation
// of the full code.
CHECK(f->IsOptimized());
- object = f->feedback_vector()->Get(slot_for_a);
+ object = f->feedback_vector().Get(slot_for_a);
{
HeapObject heap_object;
CHECK(object->GetHeapObjectIfWeak(&heap_object));
- CHECK(heap_object->IsJSFunction());
+ CHECK(heap_object.IsJSFunction());
}
}
@@ -364,13 +366,13 @@ TEST(FeedbackVectorUnaffectedByScopeChanges) {
// If we are compiling lazily then it should not be compiled, and so no
// feedback vector allocated yet.
- CHECK(!f->shared()->is_compiled());
+ CHECK(!f->shared().is_compiled());
CompileRun("morphing_call();");
- // Now a feedback vector is allocated.
- CHECK(f->shared()->is_compiled());
- CHECK(!f->feedback_vector()->is_empty());
+ // Now a feedback vector / closure feedback cell array is allocated.
+ CHECK(f->shared().is_compiled());
+ CHECK(f->has_feedback_vector() || f->has_closure_feedback_cell_array());
}
// Test that optimized code for different closures is actually shared.
@@ -391,6 +393,7 @@ TEST(OptimizedCodeSharing1) {
"var closure0 = MakeClosure();"
"var closure1 = MakeClosure();" // We only share optimized code
// if there are at least two closures.
+ "%PrepareFunctionForOptimization(closure0);"
"%DebugPrint(closure0());"
"%OptimizeFunctionOnNextCall(closure0);"
"%DebugPrint(closure0());"
@@ -647,16 +650,11 @@ TEST(CompileFunctionInContextScriptOrigin) {
v8::Integer::New(CcTest::isolate(), 22),
v8::Integer::New(CcTest::isolate(), 41));
v8::ScriptCompiler::Source script_source(v8_str("throw new Error()"), origin);
- Local<ScriptOrModule> script;
v8::Local<v8::Function> fun =
- v8::ScriptCompiler::CompileFunctionInContext(
- env.local(), &script_source, 0, nullptr, 0, nullptr,
- v8::ScriptCompiler::CompileOptions::kNoCompileOptions,
- v8::ScriptCompiler::NoCacheReason::kNoCacheNoReason, &script)
+ v8::ScriptCompiler::CompileFunctionInContext(env.local(), &script_source,
+ 0, nullptr, 0, nullptr)
.ToLocalChecked();
CHECK(!fun.IsEmpty());
- CHECK(!script.IsEmpty());
- CHECK(script->GetResourceName()->StrictEquals(v8_str("test")));
v8::TryCatch try_catch(CcTest::isolate());
CcTest::isolate()->SetCaptureStackTraceForUncaughtExceptions(true);
CHECK(fun->Call(env.local(), env->Global(), 0, nullptr).IsEmpty());
@@ -672,17 +670,16 @@ TEST(CompileFunctionInContextScriptOrigin) {
}
void TestCompileFunctionInContextToStringImpl() {
-#define CHECK_NOT_CAUGHT(__local_context__, try_catch, __op__) \
- do { \
- const char* op = (__op__); \
- v8::Local<v8::Context> context = (__local_context__); \
- if (try_catch.HasCaught()) { \
- v8::String::Utf8Value error( \
- CcTest::isolate(), \
- try_catch.Exception()->ToString(context).ToLocalChecked()); \
- V8_Fatal(__FILE__, __LINE__, \
- "Unexpected exception thrown during %s:\n\t%s\n", op, *error); \
- } \
+#define CHECK_NOT_CAUGHT(__local_context__, try_catch, __op__) \
+ do { \
+ const char* op = (__op__); \
+ v8::Local<v8::Context> context = (__local_context__); \
+ if (try_catch.HasCaught()) { \
+ v8::String::Utf8Value error( \
+ CcTest::isolate(), \
+ try_catch.Exception()->ToString(context).ToLocalChecked()); \
+ FATAL("Unexpected exception thrown during %s:\n\t%s\n", op, *error); \
+ } \
} while (false)
{ // NOLINT
@@ -786,16 +783,18 @@ TEST(InvocationCount) {
CompileRun(
"function bar() {};"
+ "%EnsureFeedbackVectorForFunction(bar);"
"function foo() { return bar(); };"
+ "%EnsureFeedbackVectorForFunction(foo);"
"foo();");
Handle<JSFunction> foo = Handle<JSFunction>::cast(GetGlobalProperty("foo"));
- CHECK_EQ(1, foo->feedback_vector()->invocation_count());
+ CHECK_EQ(1, foo->feedback_vector().invocation_count());
CompileRun("foo()");
- CHECK_EQ(2, foo->feedback_vector()->invocation_count());
+ CHECK_EQ(2, foo->feedback_vector().invocation_count());
CompileRun("bar()");
- CHECK_EQ(2, foo->feedback_vector()->invocation_count());
+ CHECK_EQ(2, foo->feedback_vector().invocation_count());
CompileRun("foo(); foo()");
- CHECK_EQ(4, foo->feedback_vector()->invocation_count());
+ CHECK_EQ(4, foo->feedback_vector().invocation_count());
}
TEST(SafeToSkipArgumentsAdaptor) {
@@ -809,17 +808,17 @@ TEST(SafeToSkipArgumentsAdaptor) {
"function e() { \"use strict\"; return eval(\"\"); }; e();"
"function f(x, y) { \"use strict\"; return x + y; }; f(1, 2);");
Handle<JSFunction> a = Handle<JSFunction>::cast(GetGlobalProperty("a"));
- CHECK(a->shared()->is_safe_to_skip_arguments_adaptor());
+ CHECK(a->shared().is_safe_to_skip_arguments_adaptor());
Handle<JSFunction> b = Handle<JSFunction>::cast(GetGlobalProperty("b"));
- CHECK(!b->shared()->is_safe_to_skip_arguments_adaptor());
+ CHECK(!b->shared().is_safe_to_skip_arguments_adaptor());
Handle<JSFunction> c = Handle<JSFunction>::cast(GetGlobalProperty("c"));
- CHECK(!c->shared()->is_safe_to_skip_arguments_adaptor());
+ CHECK(!c->shared().is_safe_to_skip_arguments_adaptor());
Handle<JSFunction> d = Handle<JSFunction>::cast(GetGlobalProperty("d"));
- CHECK(!d->shared()->is_safe_to_skip_arguments_adaptor());
+ CHECK(!d->shared().is_safe_to_skip_arguments_adaptor());
Handle<JSFunction> e = Handle<JSFunction>::cast(GetGlobalProperty("e"));
- CHECK(!e->shared()->is_safe_to_skip_arguments_adaptor());
+ CHECK(!e->shared().is_safe_to_skip_arguments_adaptor());
Handle<JSFunction> f = Handle<JSFunction>::cast(GetGlobalProperty("f"));
- CHECK(f->shared()->is_safe_to_skip_arguments_adaptor());
+ CHECK(f->shared().is_safe_to_skip_arguments_adaptor());
}
TEST(ShallowEagerCompilation) {
@@ -945,10 +944,10 @@ TEST(DeepEagerCompilationPeakMemory) {
// TODO(mslekova): Remove the duplication with test-heap.cc
static int AllocationSitesCount(Heap* heap) {
int count = 0;
- for (Object site = heap->allocation_sites_list(); site->IsAllocationSite();) {
+ for (Object site = heap->allocation_sites_list(); site.IsAllocationSite();) {
AllocationSite cur = AllocationSite::cast(site);
- CHECK(cur->HasWeakNext());
- site = cur->weak_next();
+ CHECK(cur.HasWeakNext());
+ site = cur.weak_next();
count++;
}
return count;
@@ -1013,6 +1012,7 @@ TEST(DecideToPretenureDuringCompilation) {
" foo(shouldKeep);"
" }"
"}"
+ "%PrepareFunctionForOptimization(bar);"
"bar();");
// This number should be >= kPretenureRatio * 10000,
@@ -1036,7 +1036,9 @@ TEST(DecideToPretenureDuringCompilation) {
// Check `bar` can get optimized again, meaning the compiler state is
// recoverable from this point.
- CompileRun("%OptimizeFunctionOnNextCall(bar);");
+ CompileRun(
+ "%PrepareFunctionForOptimization(bar);"
+ "%OptimizeFunctionOnNextCall(bar);");
CompileRun("bar();");
Handle<Object> foo_obj =
diff --git a/deps/v8/test/cctest/test-constantpool.cc b/deps/v8/test/cctest/test-constantpool.cc
index 1e5f98b5b7..a861655adc 100644
--- a/deps/v8/test/cctest/test-constantpool.cc
+++ b/deps/v8/test/cctest/test-constantpool.cc
@@ -4,9 +4,9 @@
// Test embedded constant pool builder code.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/constant-pool.h"
+#include "src/codegen/constant-pool.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index 88ba562376..1ddd463795 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -28,13 +28,13 @@
#include <stdlib.h>
#include "src/base/platform/platform.h"
-#include "src/conversions.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory-inl.h"
-#include "src/isolate.h"
-#include "src/objects.h"
+#include "src/init/v8.h"
+#include "src/numbers/conversions.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/smi.h"
-#include "src/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index eeddb428fd..e978aff2ba 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -30,20 +30,20 @@
#include <limits>
#include <memory>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "include/v8-profiler.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
-#include "src/deoptimizer.h"
+#include "src/codegen/source-position-table.h"
+#include "src/deoptimizer/deoptimizer.h"
#include "src/libplatform/default-platform.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/profiler/cpu-profiler-inl.h"
#include "src/profiler/profiler-listener.h"
#include "src/profiler/tracing-cpu-profiler.h"
-#include "src/source-position-table.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
@@ -133,7 +133,7 @@ i::AbstractCode CreateCode(LocalContext* env) {
i::EmbeddedVector<char, 32> name;
i::SNPrintF(name, "function_%d", ++counter);
- const char* name_start = name.start();
+ const char* name_start = name.begin();
i::SNPrintF(script,
"function %s() {\n"
"var counter = 0;\n"
@@ -141,7 +141,7 @@ i::AbstractCode CreateCode(LocalContext* env) {
"return '%s_' + counter;\n"
"}\n"
"%s();\n", name_start, counter, name_start, name_start);
- CompileRun(script.start());
+ CompileRun(script.begin());
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*GetFunction(env->local(), name_start)));
@@ -182,26 +182,26 @@ TEST(CodeEvents) {
profiler_listener.CodeMoveEvent(comment2_code, moved_code);
// Enqueue a tick event to enable code events processing.
- EnqueueTickSampleEvent(processor, aaa_code->InstructionStart());
+ EnqueueTickSampleEvent(processor, aaa_code.InstructionStart());
isolate->logger()->RemoveCodeEventListener(&profiler_listener);
processor->StopSynchronously();
// Check the state of profile generator.
CodeEntry* aaa =
- generator->code_map()->FindEntry(aaa_code->InstructionStart());
+ generator->code_map()->FindEntry(aaa_code.InstructionStart());
CHECK(aaa);
CHECK_EQ(0, strcmp(aaa_str, aaa->name()));
CodeEntry* comment =
- generator->code_map()->FindEntry(comment_code->InstructionStart());
+ generator->code_map()->FindEntry(comment_code.InstructionStart());
CHECK(comment);
CHECK_EQ(0, strcmp("comment", comment->name()));
- CHECK(!generator->code_map()->FindEntry(comment2_code->InstructionStart()));
+ CHECK(!generator->code_map()->FindEntry(comment2_code.InstructionStart()));
CodeEntry* comment2 =
- generator->code_map()->FindEntry(moved_code->InstructionStart());
+ generator->code_map()->FindEntry(moved_code.InstructionStart());
CHECK(comment2);
CHECK_EQ(0, strcmp("comment2", comment2->name()));
}
@@ -226,8 +226,8 @@ TEST(TickEvents) {
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
CcTest::i_isolate(), generator,
v8::base::TimeDelta::FromMicroseconds(100), true);
- CpuProfiler profiler(isolate, profiles, generator, processor);
- profiles->StartProfiling("", false);
+ CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+ profiles->StartProfiling("");
processor->Start();
ProfilerListener profiler_listener(isolate, processor);
isolate->logger()->AddCodeEventListener(&profiler_listener);
@@ -236,14 +236,14 @@ TEST(TickEvents) {
profiler_listener.CodeCreateEvent(i::Logger::STUB_TAG, frame2_code, "ccc");
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame3_code, "ddd");
- EnqueueTickSampleEvent(processor, frame1_code->raw_instruction_start());
+ EnqueueTickSampleEvent(processor, frame1_code.raw_instruction_start());
EnqueueTickSampleEvent(
processor,
- frame2_code->raw_instruction_start() + frame2_code->ExecutableSize() / 2,
- frame1_code->raw_instruction_start() + frame1_code->ExecutableSize() / 2);
- EnqueueTickSampleEvent(processor, frame3_code->raw_instruction_end() - 1,
- frame2_code->raw_instruction_end() - 1,
- frame1_code->raw_instruction_end() - 1);
+ frame2_code.raw_instruction_start() + frame2_code.ExecutableSize() / 2,
+ frame1_code.raw_instruction_start() + frame1_code.ExecutableSize() / 2);
+ EnqueueTickSampleEvent(processor, frame3_code.raw_instruction_end() - 1,
+ frame2_code.raw_instruction_end() - 1,
+ frame1_code.raw_instruction_end() - 1);
isolate->logger()->RemoveCodeEventListener(&profiler_listener);
processor->StopSynchronously();
@@ -295,19 +295,19 @@ TEST(Issue1398) {
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
CcTest::i_isolate(), generator,
v8::base::TimeDelta::FromMicroseconds(100), true);
- CpuProfiler profiler(isolate, profiles, generator, processor);
- profiles->StartProfiling("", false);
+ CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+ profiles->StartProfiling("");
processor->Start();
ProfilerListener profiler_listener(isolate, processor);
profiler_listener.CodeCreateEvent(i::Logger::BUILTIN_TAG, code, "bbb");
v8::internal::TickSample sample;
- sample.pc = reinterpret_cast<void*>(code->InstructionStart());
+ sample.pc = reinterpret_cast<void*>(code.InstructionStart());
sample.tos = nullptr;
sample.frames_count = v8::TickSample::kMaxFramesCount;
for (unsigned i = 0; i < sample.frames_count; ++i) {
- sample.stack[i] = reinterpret_cast<void*>(code->InstructionStart());
+ sample.stack[i] = reinterpret_cast<void*>(code.InstructionStart());
}
sample.timestamp = base::TimeTicks::HighResolutionNow();
processor->AddSample(sample);
@@ -434,14 +434,13 @@ class ProfilerHelper {
profiler_->Dispose();
}
- typedef v8::CpuProfilingMode ProfilingMode;
+ using ProfilingMode = v8::CpuProfilingMode;
- v8::CpuProfile* Run(v8::Local<v8::Function> function,
- v8::Local<v8::Value> argv[], int argc,
- unsigned min_js_samples = 0,
- unsigned min_external_samples = 0,
- bool collect_samples = false,
- ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers);
+ v8::CpuProfile* Run(
+ v8::Local<v8::Function> function, v8::Local<v8::Value> argv[], int argc,
+ unsigned min_js_samples = 0, unsigned min_external_samples = 0,
+ ProfilingMode mode = ProfilingMode::kLeafNodeLineNumbers,
+ unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
v8::CpuProfiler* profiler() { return profiler_; }
@@ -454,11 +453,11 @@ v8::CpuProfile* ProfilerHelper::Run(v8::Local<v8::Function> function,
v8::Local<v8::Value> argv[], int argc,
unsigned min_js_samples,
unsigned min_external_samples,
- bool collect_samples, ProfilingMode mode) {
+ ProfilingMode mode, unsigned max_samples) {
v8::Local<v8::String> profile_name = v8_str("my_profile");
profiler_->SetSamplingInterval(100);
- profiler_->StartProfiling(profile_name, mode, collect_samples);
+ profiler_->StartProfiling(profile_name, {mode, max_samples});
v8::internal::CpuProfiler* iprofiler =
reinterpret_cast<v8::internal::CpuProfiler*>(profiler_);
@@ -666,11 +665,11 @@ TEST(CollectCpuProfileCallerLineNumbers) {
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
ProfilerHelper helper(env.local());
- helper.Run(function, args, arraysize(args), 1000, 0, false,
- v8::CpuProfilingMode::kCallerLineNumbers);
+ helper.Run(function, args, arraysize(args), 1000, 0,
+ v8::CpuProfilingMode::kCallerLineNumbers, 0);
v8::CpuProfile* profile =
- helper.Run(function, args, arraysize(args), 1000, 0, false,
- v8::CpuProfilingMode::kCallerLineNumbers);
+ helper.Run(function, args, arraysize(args), 1000, 0,
+ v8::CpuProfilingMode::kCallerLineNumbers, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(root, {"start", 27});
@@ -752,7 +751,7 @@ TEST(CollectCpuProfileSamples) {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
ProfilerHelper helper(env.local());
v8::CpuProfile* profile =
- helper.Run(function, args, arraysize(args), 1000, 0, true);
+ helper.Run(function, args, arraysize(args), 1000, 0);
CHECK_LE(200, profile->GetSamplesCount());
uint64_t end_time = profile->GetEndTime();
@@ -1112,13 +1111,17 @@ static void TickLines(bool optimize) {
i::HandleScope scope(isolate);
i::EmbeddedVector<char, 512> script;
+ i::EmbeddedVector<char, 64> prepare_opt;
i::EmbeddedVector<char, 64> optimize_call;
const char* func_name = "func";
if (optimize) {
+ i::SNPrintF(prepare_opt, "%%PrepareFunctionForOptimization(%s);\n",
+ func_name);
i::SNPrintF(optimize_call, "%%OptimizeFunctionOnNextCall(%s);\n",
func_name);
} else {
+ prepare_opt[0] = '\0';
optimize_call[0] = '\0';
}
i::SNPrintF(script,
@@ -1130,22 +1133,24 @@ static void TickLines(bool optimize) {
" n += m * m * m;\n"
" }\n"
"}\n"
+ "%s"
"%s();\n"
"%s"
"%s();\n",
- func_name, func_name, optimize_call.start(), func_name);
+ func_name, prepare_opt.begin(), func_name, optimize_call.begin(),
+ func_name);
- CompileRun(script.start());
+ CompileRun(script.begin());
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*GetFunction(env.local(), func_name)));
CHECK(!func->shared().is_null());
- CHECK(!func->shared()->abstract_code().is_null());
+ CHECK(!func->shared().abstract_code().is_null());
CHECK(!optimize || func->IsOptimized() ||
!CcTest::i_isolate()->use_optimizer());
i::AbstractCode code = func->abstract_code();
CHECK(!code.is_null());
- i::Address code_address = code->raw_instruction_start();
+ i::Address code_address = code.raw_instruction_start();
CHECK_NE(code_address, kNullAddress);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
@@ -1153,8 +1158,8 @@ static void TickLines(bool optimize) {
ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
CcTest::i_isolate(), generator,
v8::base::TimeDelta::FromMicroseconds(100), true);
- CpuProfiler profiler(isolate, profiles, generator, processor);
- profiles->StartProfiling("", false);
+ CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+ profiles->StartProfiling("");
// TODO(delphick): Stop using the CpuProfiler internals here: This forces
// LogCompiledFunctions so that source positions are collected everywhere.
// This would normally happen automatically with CpuProfiler::StartProfiling
@@ -1647,12 +1652,6 @@ TEST(JsNativeJsRuntimeJsSampleMultiple) {
}
static const char* inlining_test_source =
- "%NeverOptimizeFunction(action);\n"
- "%NeverOptimizeFunction(start);\n"
- "level1();\n"
- "%OptimizeFunctionOnNextCall(level1);\n"
- "%OptimizeFunctionOnNextCall(level2);\n"
- "%OptimizeFunctionOnNextCall(level3);\n"
"var finish = false;\n"
"function action(n) {\n"
" var s = 0;\n"
@@ -1670,7 +1669,16 @@ static const char* inlining_test_source =
" level1();\n"
" finish = true;\n"
" level1();\n"
- "}";
+ "}"
+ "%PrepareFunctionForOptimization(level1);\n"
+ "%PrepareFunctionForOptimization(level2);\n"
+ "%PrepareFunctionForOptimization(level3);\n"
+ "%NeverOptimizeFunction(action);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "level1();\n"
+ "%OptimizeFunctionOnNextCall(level1);\n"
+ "%OptimizeFunctionOnNextCall(level2);\n"
+ "%OptimizeFunctionOnNextCall(level3);\n";
// The test check multiple entrances/exits between JS and native code.
//
@@ -1710,15 +1718,6 @@ TEST(Inlining) {
}
static const char* inlining_test_source2 = R"(
- %NeverOptimizeFunction(action);
- %NeverOptimizeFunction(start);
- level1();
- level1();
- %OptimizeFunctionOnNextCall(level1);
- %OptimizeFunctionOnNextCall(level2);
- %OptimizeFunctionOnNextCall(level3);
- %OptimizeFunctionOnNextCall(level4);
- level1();
function action(n) {
var s = 0;
for (var i = 0; i < n; ++i) s += i*i*i;
@@ -1746,6 +1745,19 @@ static const char* inlining_test_source2 = R"(
while (--n)
level1();
};
+ %NeverOptimizeFunction(action);
+ %NeverOptimizeFunction(start);
+ %PrepareFunctionForOptimization(level1);
+ %PrepareFunctionForOptimization(level2);
+ %PrepareFunctionForOptimization(level3);
+ %PrepareFunctionForOptimization(level4);
+ level1();
+ level1();
+ %OptimizeFunctionOnNextCall(level1);
+ %OptimizeFunctionOnNextCall(level2);
+ %OptimizeFunctionOnNextCall(level3);
+ %OptimizeFunctionOnNextCall(level4);
+ level1();
)";
// The simulator builds are extremely slow. We run them with fewer iterations.
@@ -1781,7 +1793,9 @@ const double load_factor = 1.0;
// 2 (program):0 0 #2
TEST(Inlining2) {
FLAG_allow_natives_syntax = true;
- v8::HandleScope scope(CcTest::isolate());
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate);
+ v8::HandleScope scope(isolate);
v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
v8::Context::Scope context_scope(env);
@@ -1790,8 +1804,9 @@ TEST(Inlining2) {
v8::CpuProfiler* profiler = v8::CpuProfiler::New(CcTest::isolate());
v8::Local<v8::String> profile_name = v8_str("inlining");
- profiler->StartProfiling(profile_name,
- v8::CpuProfilingMode::kCallerLineNumbers);
+ profiler->StartProfiling(
+ profile_name,
+ CpuProfilingOptions{v8::CpuProfilingMode::kCallerLineNumbers});
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), 50000 * load_factor)};
@@ -1805,39 +1820,225 @@ TEST(Inlining2) {
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
- NameLinePair l421_a17[] = {{"level1", 36},
- {"level2", 32},
- {"level3", 26},
- {"level4", 21},
- {"action", 17}};
+ NameLinePair l421_a17[] = {{"level1", 27},
+ {"level2", 23},
+ {"level3", 17},
+ {"level4", 12},
+ {"action", 8}};
CheckBranch(start_node, l421_a17, arraysize(l421_a17));
- NameLinePair l422_a17[] = {{"level1", 36},
- {"level2", 32},
- {"level3", 26},
- {"level4", 22},
- {"action", 17}};
+ NameLinePair l422_a17[] = {{"level1", 27},
+ {"level2", 23},
+ {"level3", 17},
+ {"level4", 13},
+ {"action", 8}};
CheckBranch(start_node, l422_a17, arraysize(l422_a17));
- NameLinePair l421_a18[] = {{"level1", 36},
- {"level2", 32},
- {"level3", 26},
- {"level4", 21},
- {"action", 18}};
+ NameLinePair l421_a18[] = {{"level1", 27},
+ {"level2", 23},
+ {"level3", 17},
+ {"level4", 12},
+ {"action", 9}};
CheckBranch(start_node, l421_a18, arraysize(l421_a18));
- NameLinePair l422_a18[] = {{"level1", 36},
- {"level2", 32},
- {"level3", 26},
- {"level4", 22},
- {"action", 18}};
+ NameLinePair l422_a18[] = {{"level1", 27},
+ {"level2", 23},
+ {"level3", 17},
+ {"level4", 13},
+ {"action", 9}};
CheckBranch(start_node, l422_a18, arraysize(l422_a18));
- NameLinePair action_direct[] = {{"level1", 36}, {"action", 30}};
+ NameLinePair action_direct[] = {{"level1", 27}, {"action", 21}};
CheckBranch(start_node, action_direct, arraysize(action_direct));
profile->Delete();
profiler->Dispose();
}
+static const char* cross_script_source_a = R"(
+
+
+
+
+
+ %NeverOptimizeFunction(action);
+ function action(n) {
+ var s = 0;
+ for (var i = 0; i < n; ++i) s += i*i*i;
+ return s;
+ }
+ function level1() {
+ const a = action(1);
+ const b = action(200);
+ const c = action(1);
+ return a + b + c;
+ }
+ )";
+
+static const char* cross_script_source_b = R"(
+ %PrepareFunctionForOptimization(start);
+ %PrepareFunctionForOptimization(level1);
+ start(1);
+ start(1);
+ %OptimizeFunctionOnNextCall(start);
+ %OptimizeFunctionOnNextCall(level1);
+ start(1);
+ function start(n) {
+ while (--n)
+ level1();
+ };
+ )";
+
+TEST(CrossScriptInliningCallerLineNumbers) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::CpuProfiler::UseDetailedSourcePositionsForProfiling(isolate);
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
+ v8::Context::Scope context_scope(env);
+
+ v8::Local<v8::Script> script_a =
+ CompileWithOrigin(cross_script_source_a, "script_a", false);
+ v8::Local<v8::Script> script_b =
+ CompileWithOrigin(cross_script_source_b, "script_b", false);
+
+ script_a->Run(env).ToLocalChecked();
+ script_b->Run(env).ToLocalChecked();
+
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+
+ v8::CpuProfiler* profiler = v8::CpuProfiler::New(CcTest::isolate());
+ v8::Local<v8::String> profile_name = v8_str("inlining");
+ profiler->StartProfiling(profile_name,
+ v8::CpuProfilingMode::kCallerLineNumbers);
+
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), 20000 * load_factor)};
+ function->Call(env, env->Global(), arraysize(args), args).ToLocalChecked();
+ v8::CpuProfile* profile = profiler->StopProfiling(profile_name);
+ CHECK(profile);
+
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(profile)->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+ CHECK_EQ(0, strcmp("script_b", start_node->GetScriptResourceNameStr()));
+
+ NameLinePair l19_a10[] = {{"level1", 11}, {"action", 15}};
+ CheckBranch(start_node, l19_a10, arraysize(l19_a10));
+
+ const v8::CpuProfileNode* level1_node = GetChild(env, start_node, "level1");
+ CHECK_EQ(0, strcmp("script_a", level1_node->GetScriptResourceNameStr()));
+
+ const v8::CpuProfileNode* action_node = GetChild(env, level1_node, "action");
+ CHECK_EQ(0, strcmp("script_a", action_node->GetScriptResourceNameStr()));
+
+ profile->Delete();
+ profiler->Dispose();
+}
+
+static const char* cross_script_source_c = R"(
+ function level3() {
+ const a = action(1);
+ const b = action(100);
+ const c = action(1);
+ return a + b + c;
+ }
+ %NeverOptimizeFunction(action);
+ function action(n) {
+ var s = 0;
+ for (var i = 0; i < n; ++i) s += i*i*i;
+ return s;
+ }
+ )";
+
+static const char* cross_script_source_d = R"(
+ function level2() {
+ const p = level3();
+ const q = level3();
+ return p + q;
+ }
+ )";
+
+static const char* cross_script_source_e = R"(
+ function level1() {
+ return level2() + 1000;
+ }
+ )";
+
+static const char* cross_script_source_f = R"(
+ %PrepareFunctionForOptimization(start);
+ %PrepareFunctionForOptimization(level1);
+ %PrepareFunctionForOptimization(level2);
+ %PrepareFunctionForOptimization(level3);
+ start(1);
+ start(1);
+ %OptimizeFunctionOnNextCall(start);
+ %OptimizeFunctionOnNextCall(level1);
+ %OptimizeFunctionOnNextCall(level2);
+ %OptimizeFunctionOnNextCall(level3);
+ start(1);
+ function start(n) {
+ while (--n)
+ level1();
+ };
+ )";
+
+TEST(CrossScriptInliningCallerLineNumbers2) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext({PROFILER_EXTENSION_ID});
+ v8::Context::Scope context_scope(env);
+
+ v8::Local<v8::Script> script_c =
+ CompileWithOrigin(cross_script_source_c, "script_c", false);
+ v8::Local<v8::Script> script_d =
+ CompileWithOrigin(cross_script_source_d, "script_d", false);
+ v8::Local<v8::Script> script_e =
+ CompileWithOrigin(cross_script_source_e, "script_e", false);
+ v8::Local<v8::Script> script_f =
+ CompileWithOrigin(cross_script_source_f, "script_f", false);
+
+ script_c->Run(env).ToLocalChecked();
+ script_d->Run(env).ToLocalChecked();
+ script_e->Run(env).ToLocalChecked();
+ script_f->Run(env).ToLocalChecked();
+
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+
+ v8::CpuProfiler* profiler = v8::CpuProfiler::New(CcTest::isolate());
+ v8::Local<v8::String> profile_name = v8_str("inlining");
+ profiler->StartProfiling(profile_name,
+ v8::CpuProfilingMode::kCallerLineNumbers);
+
+ v8::Local<v8::Value> args[] = {
+ v8::Integer::New(env->GetIsolate(), 20000 * load_factor)};
+ function->Call(env, env->Global(), arraysize(args), args).ToLocalChecked();
+ v8::CpuProfile* profile = profiler->StopProfiling(profile_name);
+ CHECK(profile);
+
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(profile)->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+ CHECK_EQ(0, strcmp("script_f", start_node->GetScriptResourceNameStr()));
+
+ const v8::CpuProfileNode* level1_node = GetChild(env, start_node, "level1");
+ CHECK_EQ(0, strcmp("script_e", level1_node->GetScriptResourceNameStr()));
+
+ const v8::CpuProfileNode* level2_node = GetChild(env, level1_node, "level2");
+ CHECK_EQ(0, strcmp("script_d", level2_node->GetScriptResourceNameStr()));
+
+ const v8::CpuProfileNode* level3_node = GetChild(env, level2_node, "level3");
+ CHECK_EQ(0, strcmp("script_c", level3_node->GetScriptResourceNameStr()));
+
+ const v8::CpuProfileNode* action_node = GetChild(env, level3_node, "action");
+ CHECK_EQ(0, strcmp("script_c", action_node->GetScriptResourceNameStr()));
+
+ profile->Delete();
+ profiler->Dispose();
+}
+
// [Top down]:
// 0 (root) #0 1
// 2 (program) #0 2
@@ -1982,6 +2183,7 @@ TEST(FunctionDetailsInlining) {
"\n"
"\n"
"// Warm up before profiling or the inlining doesn't happen.\n"
+ "%PrepareFunctionForOptimization(alpha);\n"
"p = alpha(p);\n"
"p = alpha(p);\n"
"%OptimizeFunctionOnNextCall(alpha);\n"
@@ -2097,7 +2299,7 @@ TEST(CollectDeoptEvents) {
for (int i = 0; i < 3; ++i) {
i::EmbeddedVector<char, sizeof(opt_source) + 100> buffer;
i::SNPrintF(buffer, opt_source, i, i);
- v8::Script::Compile(env, v8_str(buffer.start()))
+ v8::Script::Compile(env, v8_str(buffer.begin()))
.ToLocalChecked()
->Run(env)
.ToLocalChecked();
@@ -2106,6 +2308,8 @@ TEST(CollectDeoptEvents) {
const char* source =
"startProfiling();\n"
"\n"
+ "%PrepareFunctionForOptimization(opt_function0);\n"
+ "\n"
"opt_function0(1, 1);\n"
"\n"
"%OptimizeFunctionOnNextCall(opt_function0)\n"
@@ -2114,6 +2318,8 @@ TEST(CollectDeoptEvents) {
"\n"
"opt_function0(undefined, 1);\n"
"\n"
+ "%PrepareFunctionForOptimization(opt_function1);\n"
+ "\n"
"opt_function1(1, 1);\n"
"\n"
"%OptimizeFunctionOnNextCall(opt_function1)\n"
@@ -2122,6 +2328,8 @@ TEST(CollectDeoptEvents) {
"\n"
"opt_function1(NaN, 1);\n"
"\n"
+ "%PrepareFunctionForOptimization(opt_function2);\n"
+ "\n"
"opt_function2(1, 1);\n"
"\n"
"%OptimizeFunctionOnNextCall(opt_function2)\n"
@@ -2225,6 +2433,8 @@ TEST(DeoptAtFirstLevelInlinedSource) {
"\n"
"startProfiling();\n"
"\n"
+ "%PrepareFunctionForOptimization(test);\n"
+ "\n"
"test(10, 10);\n"
"\n"
"%OptimizeFunctionOnNextCall(test)\n"
@@ -2296,6 +2506,10 @@ TEST(DeoptAtSecondLevelInlinedSource) {
"\n"
"startProfiling();\n"
"\n"
+ "%EnsureFeedbackVectorForFunction(opt_function);\n"
+ "%EnsureFeedbackVectorForFunction(test2);\n"
+ "%PrepareFunctionForOptimization(test1);\n"
+ "\n"
"test1(10, 10);\n"
"\n"
"%OptimizeFunctionOnNextCall(test1)\n"
@@ -2368,6 +2582,9 @@ TEST(DeoptUntrackedFunction) {
const char* source =
"function test(left, right) { return opt_function(left, right); }\n"
"\n"
+ "%EnsureFeedbackVectorForFunction(opt_function);"
+ "%PrepareFunctionForOptimization(test);\n"
+ "\n"
"test(10, 10);\n"
"\n"
"%OptimizeFunctionOnNextCall(test)\n"
@@ -2448,6 +2665,11 @@ TEST(TracingCpuProfiler) {
i::V8::GetCurrentPlatform()->GetTracingController());
tracing_controller->Initialize(ring_buffer);
+#ifdef V8_USE_PERFETTO
+ std::ostringstream perfetto_output;
+ tracing_controller->InitializeForPerfetto(&perfetto_output);
+#endif
+
bool result = false;
for (int run_duration = 50; !result; run_duration += 50) {
TraceConfig* trace_config = new TraceConfig();
@@ -2521,6 +2743,7 @@ TEST(Issue763073) {
"function f() { return function g(x) { }; }"
// Create first closure, optimize it, and deoptimize it.
"var g = f();"
+ "%PrepareFunctionForOptimization(g);\n"
"g(1);"
"%OptimizeFunctionOnNextCall(g);"
"g(1);"
@@ -2528,6 +2751,7 @@ TEST(Issue763073) {
// Create second closure, and optimize it. This will create another
// optimized code object and put in the (shared) type feedback vector.
"var h = f();"
+ "%PrepareFunctionForOptimization(h);\n"
"h(1);"
"%OptimizeFunctionOnNextCall(h);"
"h(1);");
@@ -2637,7 +2861,7 @@ TEST(NativeFrameStackTrace) {
ProfilerHelper helper(env);
- v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100, 0, true);
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100, 0);
// Count the fraction of samples landing in 'jsFunction' (valid stack)
// vs '(program)' (no stack captured).
@@ -2745,7 +2969,7 @@ TEST(MultipleProfilersSampleIndependently) {
std::unique_ptr<CpuProfiler> slow_profiler(
new CpuProfiler(CcTest::i_isolate()));
slow_profiler->set_sampling_interval(base::TimeDelta::FromSeconds(1));
- slow_profiler->StartProfiling("1", true);
+ slow_profiler->StartProfiling("1", {kLeafNodeLineNumbers});
CompileRun(R"(
function start() {
@@ -2758,7 +2982,7 @@ TEST(MultipleProfilersSampleIndependently) {
)");
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
ProfilerHelper helper(env.local());
- v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100, 0, true);
+ v8::CpuProfile* profile = helper.Run(function, nullptr, 0, 100, 0);
auto slow_profile = slow_profiler->StopProfiling("1");
CHECK_GT(profile->GetSamplesCount(), slow_profile->samples_count());
@@ -2824,7 +3048,7 @@ TEST(FastStopProfiling) {
std::unique_ptr<CpuProfiler> profiler(new CpuProfiler(CcTest::i_isolate()));
profiler->set_sampling_interval(kLongInterval);
- profiler->StartProfiling("", true);
+ profiler->StartProfiling("", {kLeafNodeLineNumbers});
v8::Platform* platform = v8::internal::V8::GetCurrentPlatform();
double start = platform->CurrentClockTimeMillis();
@@ -2857,6 +3081,262 @@ TEST(LowPrecisionSamplingStartStopPublic) {
cpu_profiler->Dispose();
}
+const char* naming_test_source = R"(
+ (function testAssignmentPropertyNamedFunction() {
+ let object = {};
+ object.propNamed = function () {
+ CallCollectSample();
+ };
+ object.propNamed();
+ })();
+ )";
+
+TEST(StandardNaming) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(env->GetIsolate(), CallCollectSample);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env.local()).ToLocalChecked();
+ func->SetName(v8_str("CallCollectSample"));
+ env->Global()->Set(env.local(), v8_str("CallCollectSample"), func).FromJust();
+
+ v8::CpuProfiler* profiler =
+ v8::CpuProfiler::New(env->GetIsolate(), kStandardNaming);
+
+ const auto profile_name = v8_str("");
+ profiler->StartProfiling(profile_name);
+ CompileRun(naming_test_source);
+ auto* profile = profiler->StopProfiling(profile_name);
+
+ auto* root = profile->GetTopDownRoot();
+ auto* toplevel = FindChild(root, "");
+ DCHECK(toplevel);
+
+ auto* prop_assignment_named_test =
+ GetChild(env.local(), toplevel, "testAssignmentPropertyNamedFunction");
+ CHECK(FindChild(prop_assignment_named_test, ""));
+
+ profiler->Dispose();
+}
+
+TEST(DebugNaming) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(env->GetIsolate(), CallCollectSample);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env.local()).ToLocalChecked();
+ func->SetName(v8_str("CallCollectSample"));
+ env->Global()->Set(env.local(), v8_str("CallCollectSample"), func).FromJust();
+
+ v8::CpuProfiler* profiler =
+ v8::CpuProfiler::New(env->GetIsolate(), kDebugNaming);
+
+ const auto profile_name = v8_str("");
+ profiler->StartProfiling(profile_name);
+ CompileRun(naming_test_source);
+ auto* profile = profiler->StopProfiling(profile_name);
+
+ auto* root = profile->GetTopDownRoot();
+ auto* toplevel = FindChild(root, "");
+ DCHECK(toplevel);
+
+ auto* prop_assignment_named_test =
+ GetChild(env.local(), toplevel, "testAssignmentPropertyNamedFunction");
+ CHECK(FindChild(prop_assignment_named_test, "object.propNamed"));
+
+ profiler->Dispose();
+}
+
+TEST(SampleLimit) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ CompileRun(R"(
+ function start() {
+ let val = 1;
+ for (let i = 0; i < 10e3; i++) {
+ val = (val * 2) % 3;
+ }
+ return val;
+ }
+ )");
+
+ // Take 100 samples of `start`, but set the max samples to 50.
+ v8::Local<v8::Function> function = GetFunction(env.local(), "start");
+ ProfilerHelper helper(env.local());
+ v8::CpuProfile* profile =
+ helper.Run(function, nullptr, 0, 100, 0,
+ v8::CpuProfilingMode::kLeafNodeLineNumbers, 50);
+
+ CHECK_EQ(profile->GetSamplesCount(), 50);
+}
+
+// Tests that a CpuProfile instance subsamples from a stream of tick samples
+// appropriately.
+TEST(ProflilerSubsampling) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
+ ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
+ isolate, generator, v8::base::TimeDelta::FromMicroseconds(1),
+ /* use_precise_sampling */ true);
+ CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+
+ // Create a new CpuProfile that wants samples at 8us.
+ CpuProfile profile(&profiler, "",
+ {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 8});
+ // Verify that the first sample is always included.
+ CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(10)));
+
+ // 4 2us samples should result in one 8us sample.
+ CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
+ CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
+ CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
+ CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
+
+ // Profiles should expect the source sample interval to change, in which case
+ // they should still take the first sample elapsed after their interval.
+ CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
+ CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
+ CHECK(!profile.CheckSubsample(base::TimeDelta::FromMicroseconds(2)));
+ CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(4)));
+
+ // Aligned samples (at 8us) are always included.
+ CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(8)));
+
+ // Samples with a rate of 0 should always be included.
+ CHECK(profile.CheckSubsample(base::TimeDelta::FromMicroseconds(0)));
+}
+
+// Tests that the base sampling rate of a CpuProfilesCollection is dynamically
+// chosen based on the GCD of its child profiles.
+TEST(DynamicResampling) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
+ ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
+ isolate, generator, v8::base::TimeDelta::FromMicroseconds(1),
+ /* use_precise_sampling */ true);
+ CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+
+ // Set a 1us base sampling rate, dividing all possible intervals.
+ profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(1));
+
+ // Verify that the sampling interval with no started profilers is unset.
+ CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
+
+ // Add a 10us profiler, verify that the base sampling interval is as high as
+ // possible (10us).
+ profiles->StartProfiling("10us",
+ {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 10});
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(10));
+
+ // Add a 5us profiler, verify that the base sampling interval is as high as
+ // possible given a 10us and 5us profiler (5us).
+ profiles->StartProfiling("5us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 5});
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(5));
+
+ // Add a 3us profiler, verify that the base sampling interval is 1us (due to
+ // coprime intervals).
+ profiles->StartProfiling("3us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 3});
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(1));
+
+ // Remove the 5us profiler, verify that the sample interval stays at 1us.
+ profiles->StopProfiling("5us");
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(1));
+
+ // Remove the 10us profiler, verify that the sample interval becomes 3us.
+ profiles->StopProfiling("10us");
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(3));
+
+ // Remove the 3us profiler, verify that the sample interval becomes unset.
+ profiles->StopProfiling("3us");
+ CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
+}
+
+// Ensures that when a non-unit base sampling interval is set on the profiler,
+// that the sampling rate gets snapped to the nearest multiple prior to GCD
+// computation.
+TEST(DynamicResamplingWithBaseInterval) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
+ ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfilerEventsProcessor* processor = new SamplingEventsProcessor(
+ isolate, generator, v8::base::TimeDelta::FromMicroseconds(1),
+ /* use_precise_sampling */ true);
+ CpuProfiler profiler(isolate, kDebugNaming, profiles, generator, processor);
+
+ profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(7));
+
+ // Verify that the sampling interval with no started profilers is unset.
+ CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
+
+ // Add a profiler with an unset sampling interval, verify that the common
+ // sampling interval is equal to the base.
+ profiles->StartProfiling("unset", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit});
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(7));
+ profiles->StopProfiling("unset");
+
+ // Adding a 8us sampling interval rounds to a 14us base interval.
+ profiles->StartProfiling("8us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 8});
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(14));
+
+ // Adding a 4us sampling interval should cause a lowering to a 7us interval.
+ profiles->StartProfiling("4us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 4});
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(7));
+
+ // Removing the 4us sampling interval should restore the 14us sampling
+ // interval.
+ profiles->StopProfiling("4us");
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(14));
+
+ // Removing the 8us sampling interval should unset the common sampling
+ // interval.
+ profiles->StopProfiling("8us");
+ CHECK_EQ(profiles->GetCommonSamplingInterval(), base::TimeDelta());
+
+ // A sampling interval of 0us should enforce all profiles to have a sampling
+ // interval of 0us (the only multiple of 0).
+ profiler.set_sampling_interval(base::TimeDelta::FromMicroseconds(0));
+ profiles->StartProfiling("5us", {v8::CpuProfilingMode::kLeafNodeLineNumbers,
+ v8::CpuProfilingOptions::kNoSampleLimit, 5});
+ CHECK_EQ(profiles->GetCommonSamplingInterval(),
+ base::TimeDelta::FromMicroseconds(0));
+ profiles->StopProfiling("5us");
+}
+
enum class EntryCountMode { kAll, kOnlyInlined };
// Count the number of unique source positions.
@@ -2893,6 +3373,7 @@ UNINITIALIZED_TEST(DetailedSourcePositionAPI) {
" return fib(i - 1) +"
" fib(i - 2);"
"}"
+ "%PrepareFunctionForOptimization(fib);\n"
"fib(5);"
"%OptimizeFunctionOnNextCall(fib);"
"fib(5);"
@@ -2943,6 +3424,8 @@ UNINITIALIZED_TEST(DetailedSourcePositionAPI_Inlining) {
return x;
}
+ %EnsureFeedbackVectorForFunction(bar);
+ %PrepareFunctionForOptimization(foo);
foo(5);
%OptimizeFunctionOnNextCall(foo);
foo(5);
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 66e5441ed1..a836f23172 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -25,10 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/date.h"
-#include "src/global-handles.h"
-#include "src/isolate.h"
-#include "src/v8.h"
+#include "src/date/date.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 34b2e0fef4..82ebc8ca46 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -27,18 +27,18 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
-#include "src/compilation-cache.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/compilation-cache.h"
#include "src/debug/debug-interface.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/frames.h"
-#include "src/objects-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frames.h"
+#include "src/objects/objects-inl.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
#include "test/cctest/cctest.h"
using ::v8::internal::Handle;
@@ -84,7 +84,7 @@ static i::Handle<i::BreakPoint> SetBreakPoint(v8::Local<v8::Function> fun,
const char* condition = nullptr) {
i::Handle<i::JSFunction> function =
i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*fun));
- position += function->shared()->StartPosition();
+ position += function->shared().StartPosition();
static int break_point_index = 0;
i::Isolate* isolate = function->GetIsolate();
i::Handle<i::String> condition_string =
@@ -94,7 +94,8 @@ static i::Handle<i::BreakPoint> SetBreakPoint(v8::Local<v8::Function> fun,
i::Handle<i::BreakPoint> break_point =
isolate->factory()->NewBreakPoint(++break_point_index, condition_string);
- debug->SetBreakPoint(function, break_point, &position);
+ debug->SetBreakpoint(handle(function->shared(), isolate), break_point,
+ &position);
return break_point;
}
@@ -167,7 +168,7 @@ void CheckDebuggerUnloaded() {
HeapIterator iterator(CcTest::heap());
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- CHECK(!obj->IsDebugInfo());
+ CHECK(!obj.IsDebugInfo());
}
}
@@ -568,6 +569,110 @@ TEST(BreakPointBuiltin) {
CheckDebuggerUnloaded();
}
+TEST(BreakPointApiIntrinsics) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ DebugEventCounter delegate;
+ v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
+
+ v8::Local<v8::Function> builtin;
+
+ // === Test that using API-exposed functions won't trigger breakpoints ===
+ {
+ v8::Local<v8::Function> weakmap_get =
+ CompileRun("WeakMap.prototype.get").As<v8::Function>();
+ SetBreakPoint(weakmap_get, 0);
+ v8::Local<v8::Function> weakmap_set =
+ CompileRun("WeakMap.prototype.set").As<v8::Function>();
+ SetBreakPoint(weakmap_set, 0);
+
+ // Run with breakpoint.
+ break_point_hit_count = 0;
+ CompileRun("var w = new WeakMap(); w.set(w, 1); w.get(w);");
+ CHECK_EQ(2, break_point_hit_count);
+
+ break_point_hit_count = 0;
+ v8::Local<v8::debug::WeakMap> weakmap =
+ v8::debug::WeakMap::New(env->GetIsolate());
+ CHECK(!weakmap->Set(env.local(), weakmap, v8_num(1)).IsEmpty());
+ CHECK(!weakmap->Get(env.local(), weakmap).IsEmpty());
+ CHECK_EQ(0, break_point_hit_count);
+ }
+
+ {
+ v8::Local<v8::Function> object_to_string =
+ CompileRun("Object.prototype.toString").As<v8::Function>();
+ SetBreakPoint(object_to_string, 0);
+
+ // Run with breakpoint.
+ break_point_hit_count = 0;
+ CompileRun("var o = {}; o.toString();");
+ CHECK_EQ(1, break_point_hit_count);
+
+ break_point_hit_count = 0;
+ v8::Local<v8::Object> object = v8::Object::New(env->GetIsolate());
+ CHECK(!object->ObjectProtoToString(env.local()).IsEmpty());
+ CHECK_EQ(0, break_point_hit_count);
+ }
+
+ {
+ v8::Local<v8::Function> map_set =
+ CompileRun("Map.prototype.set").As<v8::Function>();
+ v8::Local<v8::Function> map_get =
+ CompileRun("Map.prototype.get").As<v8::Function>();
+ v8::Local<v8::Function> map_has =
+ CompileRun("Map.prototype.has").As<v8::Function>();
+ v8::Local<v8::Function> map_delete =
+ CompileRun("Map.prototype.delete").As<v8::Function>();
+ SetBreakPoint(map_set, 0);
+ SetBreakPoint(map_get, 0);
+ SetBreakPoint(map_has, 0);
+ SetBreakPoint(map_delete, 0);
+
+ // Run with breakpoint.
+ break_point_hit_count = 0;
+ CompileRun(
+ "var m = new Map(); m.set(m, 1); m.get(m); m.has(m); m.delete(m);");
+ CHECK_EQ(4, break_point_hit_count);
+
+ break_point_hit_count = 0;
+ v8::Local<v8::Map> map = v8::Map::New(env->GetIsolate());
+ CHECK(!map->Set(env.local(), map, v8_num(1)).IsEmpty());
+ CHECK(!map->Get(env.local(), map).IsEmpty());
+ CHECK(map->Has(env.local(), map).FromJust());
+ CHECK(map->Delete(env.local(), map).FromJust());
+ CHECK_EQ(0, break_point_hit_count);
+ }
+
+ {
+ v8::Local<v8::Function> set_add =
+ CompileRun("Set.prototype.add").As<v8::Function>();
+ v8::Local<v8::Function> set_get =
+ CompileRun("Set.prototype.has").As<v8::Function>();
+ v8::Local<v8::Function> set_delete =
+ CompileRun("Set.prototype.delete").As<v8::Function>();
+ SetBreakPoint(set_add, 0);
+ SetBreakPoint(set_get, 0);
+ SetBreakPoint(set_delete, 0);
+
+ // Run with breakpoint.
+ break_point_hit_count = 0;
+ CompileRun("var s = new Set(); s.add(s); s.has(s); s.delete(s);");
+ CHECK_EQ(3, break_point_hit_count);
+
+ break_point_hit_count = 0;
+ v8::Local<v8::Set> set = v8::Set::New(env->GetIsolate());
+ CHECK(!set->Add(env.local(), set).IsEmpty());
+ CHECK(set->Has(env.local(), set).FromJust());
+ CHECK(set->Delete(env.local(), set).FromJust());
+ CHECK_EQ(0, break_point_hit_count);
+ }
+
+ v8::debug::SetDebugDelegate(env->GetIsolate(), nullptr);
+ CheckDebuggerUnloaded();
+}
+
TEST(BreakPointJSBuiltin) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -710,6 +815,7 @@ TEST(BreakPointInlinedBuiltin) {
builtin = CompileRun("Math.sin").As<v8::Function>();
CompileRun("function test(x) { return 1 + Math.sin(x) }");
CompileRun(
+ "%PrepareFunctionForOptimization(test);"
"test(0.5); test(0.6);"
"%OptimizeFunctionOnNextCall(test); test(0.7);");
CHECK_EQ(0, break_point_hit_count);
@@ -722,7 +828,9 @@ TEST(BreakPointInlinedBuiltin) {
CHECK_EQ(2, break_point_hit_count);
// Re-optimize.
- CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun(
+ "%PrepareFunctionForOptimization(test);"
+ "%OptimizeFunctionOnNextCall(test);");
ExpectBoolean("test(0.3) < 2", true);
CHECK_EQ(3, break_point_hit_count);
@@ -755,6 +863,7 @@ TEST(BreakPointInlineBoundBuiltin) {
.As<v8::Function>();
CompileRun("function test(x) { return 'a' + boundrepeat(x) }");
CompileRun(
+ "%PrepareFunctionForOptimization(test);"
"test(4); test(5);"
"%OptimizeFunctionOnNextCall(test); test(6);");
CHECK_EQ(0, break_point_hit_count);
@@ -767,7 +876,9 @@ TEST(BreakPointInlineBoundBuiltin) {
CHECK_EQ(2, break_point_hit_count);
// Re-optimize.
- CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun(
+ "%PrepareFunctionForOptimization(f);"
+ "%OptimizeFunctionOnNextCall(test);");
CompileRun("test(8);");
CHECK_EQ(3, break_point_hit_count);
@@ -797,6 +908,7 @@ TEST(BreakPointInlinedConstructorBuiltin) {
builtin = CompileRun("Promise").As<v8::Function>();
CompileRun("function test(x) { return new Promise(()=>x); }");
CompileRun(
+ "%PrepareFunctionForOptimization(test);"
"test(4); test(5);"
"%OptimizeFunctionOnNextCall(test); test(6);");
CHECK_EQ(0, break_point_hit_count);
@@ -809,7 +921,9 @@ TEST(BreakPointInlinedConstructorBuiltin) {
CHECK_EQ(2, break_point_hit_count);
// Re-optimize.
- CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun(
+ "%PrepareFunctionForOptimization(f);"
+ "%OptimizeFunctionOnNextCall(test);");
CompileRun("test(8);");
CHECK_EQ(3, break_point_hit_count);
@@ -840,6 +954,7 @@ TEST(BreakPointBuiltinConcurrentOpt) {
CompileRun("function test(x) { return 1 + Math.sin(x) }");
// Trigger concurrent compile job. It is suspended until unblock.
CompileRun(
+ "%PrepareFunctionForOptimization(test);"
"test(0.5); test(0.6);"
"%OptimizeFunctionOnNextCall(test, 'concurrent'); test(0.7);");
CHECK_EQ(0, break_point_hit_count);
@@ -878,6 +993,7 @@ TEST(BreakPointBuiltinTFOperator) {
builtin = CompileRun("String.prototype.indexOf").As<v8::Function>();
CompileRun("function test(x) { return 1 + 'foo'.indexOf(x) }");
CompileRun(
+ "%PrepareFunctionForOptimization(f);"
"test('a'); test('b');"
"%OptimizeFunctionOnNextCall(test); test('c');");
CHECK_EQ(0, break_point_hit_count);
@@ -890,7 +1006,9 @@ TEST(BreakPointBuiltinTFOperator) {
CHECK_EQ(2, break_point_hit_count);
// Re-optimize.
- CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun(
+ "%PrepareFunctionForOptimization(f);"
+ "%OptimizeFunctionOnNextCall(test);");
CompileRun("test('e');");
CHECK_EQ(3, break_point_hit_count);
@@ -1210,7 +1328,9 @@ TEST(BreakPointInlineApiFunction) {
function_template->GetFunction(env.local()).ToLocalChecked();
env->Global()->Set(env.local(), v8_str("f"), function).ToChecked();
- CompileRun("function g() { return 1 + f(); }");
+ CompileRun(
+ "function g() { return 1 + f(); };"
+ "%PrepareFunctionForOptimization(g);");
// === Test simple builtin ===
break_point_hit_count = 0;
@@ -1381,6 +1501,7 @@ TEST(BreakPointInlining) {
CompileRun("function f(x) { return x*2; } f").As<v8::Function>();
CompileRun("function test(x) { return 1 + f(x) }");
CompileRun(
+ "%PrepareFunctionForOptimization(test);"
"test(0.5); test(0.6);"
"%OptimizeFunctionOnNextCall(test); test(0.7);");
CHECK_EQ(0, break_point_hit_count);
@@ -1393,7 +1514,9 @@ TEST(BreakPointInlining) {
CHECK_EQ(2, break_point_hit_count);
// Re-optimize.
- CompileRun("%OptimizeFunctionOnNextCall(test);");
+ CompileRun(
+ "%PrepareFunctionForOptimization(test);"
+ "%OptimizeFunctionOnNextCall(test);");
CompileRun("test(0.3);");
CHECK_EQ(3, break_point_hit_count);
@@ -2690,7 +2813,7 @@ TEST(PauseInScript) {
// Set breakpoint in the script.
i::Handle<i::Script> i_script(
- i::Script::cast(v8::Utils::OpenHandle(*script)->shared()->script()),
+ i::Script::cast(v8::Utils::OpenHandle(*script)->shared().script()),
isolate);
i::Handle<i::String> condition = isolate->factory()->empty_string();
int position = 0;
@@ -3036,7 +3159,7 @@ class EmptyExternalStringResource : public v8::String::ExternalStringResource {
EmptyExternalStringResource() { empty_[0] = 0; }
~EmptyExternalStringResource() override = default;
size_t length() const override { return empty_.length(); }
- const uint16_t* data() const override { return empty_.start(); }
+ const uint16_t* data() const override { return empty_.begin(); }
private:
::v8::internal::EmbeddedVector<uint16_t, 1> empty_;
@@ -3072,11 +3195,11 @@ TEST(DebugScriptLineEndsAreAscending) {
v8::internal::Script::InitLineEnds(script);
v8::internal::FixedArray ends =
v8::internal::FixedArray::cast(script->line_ends());
- CHECK_GT(ends->length(), 0);
+ CHECK_GT(ends.length(), 0);
int prev_end = -1;
- for (int j = 0; j < ends->length(); j++) {
- const int curr_end = v8::internal::Smi::ToInt(ends->get(j));
+ for (int j = 0; j < ends.length(); j++) {
+ const int curr_end = v8::internal::Smi::ToInt(ends.get(j));
CHECK_GT(curr_end, prev_end);
prev_end = curr_end;
}
@@ -3449,7 +3572,7 @@ static void TestDebugBreakInLoop(const char* loop_head,
SNPrintF(buffer, "function f() {%s%s%s}", loop_head, loop_bodies[i],
loop_tail);
- i::PrintF("%s\n", buffer.start());
+ i::PrintF("%s\n", buffer.begin());
for (int j = 0; j < 3; j++) {
break_point_hit_count_deoptimize = j;
@@ -3462,7 +3585,7 @@ static void TestDebugBreakInLoop(const char* loop_head,
terminate_after_max_break_point_hit = true;
// Function with infinite loop.
- CompileRun(buffer.start());
+ CompileRun(buffer.begin());
// Set the debug break to enter the debugger as soon as possible.
v8::debug::SetBreakOnNextFunctionCall(CcTest::isolate());
@@ -3575,7 +3698,7 @@ class DebugBreakInlineListener : public v8::debug::DebugDelegate {
const std::vector<v8::debug::BreakpointId>&
inspector_break_points_hit) override {
int expected_frame_count = 4;
- int expected_line_number[] = {1, 4, 7, 12};
+ int expected_line_number[] = {1, 4, 7, 13};
int frame_count = 0;
auto iterator = v8::debug::StackTraceIterator::Create(CcTest::isolate());
@@ -3593,18 +3716,19 @@ TEST(DebugBreakInline) {
v8::HandleScope scope(env->GetIsolate());
v8::Local<v8::Context> context = env.local();
const char* source =
- "function debug(b) { \n"
- " if (b) debugger; \n"
- "} \n"
- "function f(b) { \n"
- " debug(b) \n"
- "}; \n"
- "function g(b) { \n"
- " f(b); \n"
- "}; \n"
- "g(false); \n"
- "g(false); \n"
- "%OptimizeFunctionOnNextCall(g); \n"
+ "function debug(b) { \n"
+ " if (b) debugger; \n"
+ "} \n"
+ "function f(b) { \n"
+ " debug(b) \n"
+ "}; \n"
+ "function g(b) { \n"
+ " f(b); \n"
+ "}; \n"
+ "%PrepareFunctionForOptimization(g); \n"
+ "g(false); \n"
+ "g(false); \n"
+ "%OptimizeFunctionOnNextCall(g); \n"
"g(true);";
DebugBreakInlineListener delegate;
v8::debug::SetDebugDelegate(env->GetIsolate(), &delegate);
@@ -4047,8 +4171,6 @@ UNINITIALIZED_TEST(DebugSetOutOfMemoryListener) {
}
TEST(DebugCoverage) {
- // Coverage needs feedback vectors.
- if (i::FLAG_lite_mode) return;
i::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -4103,8 +4225,6 @@ v8::debug::Coverage::ScriptData GetScriptDataAndDeleteCoverage(
} // namespace
TEST(DebugCoverageWithCoverageOutOfScope) {
- // Coverage needs feedback vectors.
- if (i::FLAG_lite_mode) return;
i::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -4175,8 +4295,6 @@ v8::debug::Coverage::FunctionData GetFunctionDataAndDeleteCoverage(
} // namespace
TEST(DebugCoverageWithScriptDataOutOfScope) {
- // Coverage needs feedback vectors.
- if (i::FLAG_lite_mode) return;
i::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -4207,8 +4325,8 @@ TEST(BuiltinsExceptionPrediction) {
bool fail = false;
for (int i = 0; i < i::Builtins::builtin_count; i++) {
i::Code builtin = builtins->builtin(i);
- if (builtin->kind() != i::Code::BUILTIN) continue;
- auto prediction = builtin->GetBuiltinCatchPrediction();
+ if (builtin.kind() != i::Code::BUILTIN) continue;
+ auto prediction = builtin.GetBuiltinCatchPrediction();
USE(prediction);
}
CHECK(!fail);
@@ -4253,7 +4371,7 @@ TEST(DebugEvaluateNoSideEffect) {
i::HeapIterator iterator(isolate->heap());
for (i::HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (!obj->IsJSFunction()) continue;
+ if (!obj.IsJSFunction()) continue;
i::JSFunction fun = i::JSFunction::cast(obj);
all_functions.emplace_back(fun, isolate);
}
@@ -4279,7 +4397,7 @@ i::MaybeHandle<i::Script> FindScript(
Handle<i::String> i_name =
isolate->factory()->NewStringFromAsciiChecked(name);
for (const auto& script : scripts) {
- if (!script->name()->IsString()) continue;
+ if (!script->name().IsString()) continue;
if (i_name->Equals(i::String::cast(script->name()))) return script;
}
return i::MaybeHandle<i::Script>();
@@ -4307,11 +4425,11 @@ UNINITIALIZED_TEST(LoadedAtStartupScripts) {
i::Script::Iterator iterator(i_isolate);
for (i::Script script = iterator.Next(); !script.is_null();
script = iterator.Next()) {
- if (script->type() == i::Script::TYPE_NATIVE &&
- script->name()->IsUndefined(i_isolate)) {
+ if (script.type() == i::Script::TYPE_NATIVE &&
+ script.name().IsUndefined(i_isolate)) {
continue;
}
- ++count_by_type[script->type()];
+ ++count_by_type[script.type()];
scripts.emplace_back(script, i_isolate);
}
}
@@ -4368,7 +4486,7 @@ TEST(SourceInfo) {
v8::Local<v8::Script> v8_script =
v8::Script::Compile(env.local(), v8_str(source)).ToLocalChecked();
i::Handle<i::Script> i_script(
- i::Script::cast(v8::Utils::OpenHandle(*v8_script)->shared()->script()),
+ i::Script::cast(v8::Utils::OpenHandle(*v8_script)->shared().script()),
CcTest::i_isolate());
v8::Local<v8::debug::Script> script =
v8::ToApiHandle<v8::debug::Script>(i_script);
@@ -4527,8 +4645,6 @@ TEST(GetPrivateFields) {
v8::internal::Isolate* isolate = CcTest::i_isolate();
v8::HandleScope scope(v8_isolate);
v8::Local<v8::Context> context = env.local();
- v8::internal::FLAG_harmony_class_fields = true;
- v8::internal::FLAG_harmony_private_fields = true;
v8::Local<v8::String> source = v8_str(
"var X = class {\n"
" #foo = 1;\n"
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 2e9bc90fac..14c30cd362 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -27,7 +27,7 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
@@ -865,8 +865,10 @@ TEST(CrossScriptLoadICs) {
SimpleContext context;
context.Check(
"x = 15;"
- "function f() { return x; }"
- "function g() { return x; }"
+ "function f() { return x; };"
+ "function g() { return x; };"
+ "%PrepareFunctionForOptimization(f);"
+ "%PrepareFunctionForOptimization(g);"
"f()",
EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
context.Check(
@@ -890,6 +892,7 @@ TEST(CrossScriptLoadICs) {
context.Check(
"x = 15;"
"function f() { return x; }"
+ "%PrepareFunctionForOptimization(f);"
"f()",
EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
for (int k = 0; k < 3; k++) {
@@ -900,6 +903,7 @@ TEST(CrossScriptLoadICs) {
context.Check(
"'use strict';"
"let x = 5;"
+ "%PrepareFunctionForOptimization(f);"
"f()",
EXPECT_RESULT, Number::New(CcTest::isolate(), 5));
for (int k = 0; k < 3; k++) {
@@ -921,8 +925,10 @@ TEST(CrossScriptStoreICs) {
context.Check(
"var global = this;"
"x = 15;"
- "function f(v) { x = v; }"
- "function g(v) { x = v; }"
+ "function f(v) { x = v; };"
+ "function g(v) { x = v; };"
+ "%PrepareFunctionForOptimization(f);"
+ "%PrepareFunctionForOptimization(g);"
"f(10); x",
EXPECT_RESULT, Number::New(CcTest::isolate(), 10));
context.Check(
@@ -958,7 +964,8 @@ TEST(CrossScriptStoreICs) {
context.Check(
"var global = this;"
"x = 15;"
- "function f(v) { x = v; }"
+ "function f(v) { x = v; };"
+ "%PrepareFunctionForOptimization(f);"
"f(10); x",
EXPECT_RESULT, Number::New(CcTest::isolate(), 10));
for (int k = 0; k < 3; k++) {
@@ -980,8 +987,10 @@ TEST(CrossScriptStoreICs) {
}
context.Check("global.x", EXPECT_RESULT,
Number::New(CcTest::isolate(), 20));
- context.Check("%OptimizeFunctionOnNextCall(f); f(41); x", EXPECT_RESULT,
- Number::New(CcTest::isolate(), 41));
+ context.Check(
+ "%PrepareFunctionForOptimization(f);"
+ "%OptimizeFunctionOnNextCall(f); f(41); x",
+ EXPECT_RESULT, Number::New(CcTest::isolate(), 41));
context.Check("global.x", EXPECT_RESULT,
Number::New(CcTest::isolate(), 20));
}
@@ -1000,7 +1009,7 @@ TEST(CrossScriptAssignmentToConst) {
Undefined(CcTest::isolate()));
context.Check("'use strict';const x = 1; x", EXPECT_RESULT,
Number::New(CcTest::isolate(), 1));
- context.Check("f();", EXPECT_EXCEPTION);
+ context.Check("%PrepareFunctionForOptimization(f);f();", EXPECT_EXCEPTION);
context.Check("x", EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
context.Check("f();", EXPECT_EXCEPTION);
context.Check("x", EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
@@ -1055,8 +1064,10 @@ TEST(Regress3941) {
{
// Optimize.
SimpleContext context;
- context.Check("function f() { x = 1; }", EXPECT_RESULT,
- Undefined(CcTest::isolate()));
+ context.Check(
+ "function f() { x = 1; };"
+ "%PrepareFunctionForOptimization(f);",
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
for (int i = 0; i < 4; i++) {
context.Check("f(); x", EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
}
@@ -1096,8 +1107,10 @@ TEST(Regress3941_Reads) {
{
// Optimize.
SimpleContext context;
- context.Check("function f() { return x; }", EXPECT_RESULT,
- Undefined(CcTest::isolate()));
+ context.Check(
+ "function f() { return x; };"
+ "%PrepareFunctionForOptimization(f);",
+ EXPECT_RESULT, Undefined(CcTest::isolate()));
for (int i = 0; i < 4; i++) {
context.Check("f()", EXPECT_EXCEPTION);
}
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 544a0f587d..acd3ac2f59 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -27,15 +27,15 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
-#include "src/compilation-cache.h"
+#include "src/codegen/compilation-cache.h"
#include "src/debug/debug.h"
-#include "src/deoptimizer.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
using ::v8::base::OS;
@@ -482,7 +482,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
SNPrintF(f_source_buffer,
"function f(x, y) { return x %s y; };",
binary_op);
- char* f_source = f_source_buffer.start();
+ char* f_source = f_source_buffer.begin();
AllowNativesSyntaxNoInlining options;
// Compile function f and collect to type feedback to insert binary op stub
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index a06c18df02..1f4a4c59a0 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -25,18 +25,18 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "src/builtins/builtins-constructor.h"
#include "src/debug/debug.h"
-#include "src/execution.h"
-#include "src/global-handles.h"
+#include "src/execution/execution.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/heap/spaces.h"
-#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
-#include "src/roots.h"
+#include "src/objects/objects-inl.h"
+#include "src/roots/roots.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
@@ -86,17 +86,17 @@ static void TestHashMap(Handle<HashMap> table) {
CHECK_EQ(table->NumberOfElements(), i + 1);
CHECK_NE(table->FindEntry(isolate, key), HashMap::kNotFound);
CHECK_EQ(table->Lookup(key), *value);
- CHECK(key->GetIdentityHash()->IsSmi());
+ CHECK(key->GetIdentityHash().IsSmi());
}
// Keys never added to the map which already have an identity hash
// code should not be found.
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
- CHECK(key->GetOrCreateIdentityHash(isolate)->IsSmi());
+ CHECK(key->GetOrCreateIdentityHash(isolate).IsSmi());
CHECK_EQ(table->FindEntry(isolate, key), HashMap::kNotFound);
CHECK_EQ(table->Lookup(key), roots.the_hole_value());
- CHECK(key->GetIdentityHash()->IsSmi());
+ CHECK(key->GetIdentityHash().IsSmi());
}
// Keys that don't have an identity hash should not be found and also
@@ -157,16 +157,16 @@ static void TestHashSet(Handle<HashSet> table) {
table = HashSet::Add(isolate, table, key);
CHECK_EQ(table->NumberOfElements(), i + 2);
CHECK(table->Has(isolate, key));
- CHECK(key->GetIdentityHash()->IsSmi());
+ CHECK(key->GetIdentityHash().IsSmi());
}
// Keys never added to the map which already have an identity hash
// code should not be found.
for (int i = 0; i < 100; i++) {
Handle<JSReceiver> key = factory->NewJSArray(7);
- CHECK(key->GetOrCreateIdentityHash(isolate)->IsSmi());
+ CHECK(key->GetOrCreateIdentityHash(isolate).IsSmi());
CHECK(!table->Has(isolate, key));
- CHECK(key->GetIdentityHash()->IsSmi());
+ CHECK(key->GetIdentityHash().IsSmi());
}
// Keys that don't have an identity hash should not be found and also
@@ -215,26 +215,26 @@ TEST(HashTableRehash) {
{
Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 100);
ObjectHashTableTest t(*table);
- int capacity = t->capacity();
+ int capacity = t.capacity();
for (int i = 0; i < capacity - 1; i++) {
- t->insert(i, i * i, i);
+ t.insert(i, i * i, i);
}
- t->Rehash(ReadOnlyRoots(isolate));
+ t.Rehash(ReadOnlyRoots(isolate));
for (int i = 0; i < capacity - 1; i++) {
- CHECK_EQ(i, t->lookup(i * i));
+ CHECK_EQ(i, t.lookup(i * i));
}
}
// Test half-filled table.
{
Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 100);
ObjectHashTableTest t(*table);
- int capacity = t->capacity();
+ int capacity = t.capacity();
for (int i = 0; i < capacity / 2; i++) {
- t->insert(i, i * i, i);
+ t.insert(i, i * i, i);
}
- t->Rehash(ReadOnlyRoots(isolate));
+ t.Rehash(ReadOnlyRoots(isolate));
for (int i = 0; i < capacity / 2; i++) {
- CHECK_EQ(i, t->lookup(i * i));
+ CHECK_EQ(i, t.lookup(i * i));
}
}
}
@@ -285,7 +285,7 @@ static void TestHashMapDoesNotCauseGC(Handle<HashMap> table) {
heap::SimulateFullSpace(CcTest::heap()->old_space());
// Calling Lookup() should not cause GC ever.
- CHECK(table->Lookup(key)->IsTheHole(isolate));
+ CHECK(table->Lookup(key).IsTheHole(isolate));
// Calling Put() should request GC by returning a failure.
int gc_count = isolate->heap()->gc_count();
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index a3244d37ee..76e06df47e 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -26,22 +26,23 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-#include <stdlib.h>
+#include <cinttypes>
+#include <cstdlib>
// The C++ style guide recommends using <re2> instead of <regex>. However, the
// former isn't available in V8.
#include <regex> // NOLINT(build/c++11)
-#include "src/assembler-inl.h"
-#include "src/boxed-float.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/double.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
+#include "src/init/v8.h"
+#include "src/numbers/double.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/boxed-float.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -63,7 +64,7 @@ bool DisassembleAndCompare(byte* begin, UseRegex use_regex,
std::vector<std::string> disassembly;
for (byte* pc = begin; pc < end;) {
pc += disasm.InstructionDecode(buffer, pc);
- disassembly.emplace_back(buffer.start());
+ disassembly.emplace_back(buffer.begin());
}
bool test_passed = true;
@@ -140,9 +141,9 @@ bool DisassembleAndCompare(byte* begin, UseRegex use_regex,
// Verify that all invocations of the COMPARE macro passed successfully.
// Exit with a failure if at least one of the tests failed.
-#define VERIFY_RUN() \
-if (failure) { \
- V8_Fatal(__FILE__, __LINE__, "ARM Disassembler tests failed.\n"); \
+#define VERIFY_RUN() \
+ if (failure) { \
+ FATAL("ARM Disassembler tests failed.\n"); \
}
// clang-format off
diff --git a/deps/v8/test/cctest/test-disasm-arm64.cc b/deps/v8/test/cctest/test-disasm-arm64.cc
index a401e031f9..ed4fe6c6e0 100644
--- a/deps/v8/test/cctest/test-disasm-arm64.cc
+++ b/deps/v8/test/cctest/test-disasm-arm64.cc
@@ -28,13 +28,13 @@
#include <stdio.h>
#include <cstring>
-#include "src/arm64/assembler-arm64.h"
-#include "src/arm64/decoder-arm64-inl.h"
-#include "src/arm64/disasm-arm64.h"
-#include "src/arm64/utils-arm64.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler-inl.h"
-#include "src/v8.h"
+#include "src/codegen/arm64/assembler-arm64.h"
+#include "src/codegen/arm64/decoder-arm64-inl.h"
+#include "src/codegen/arm64/utils-arm64.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/diagnostics/arm64/disasm-arm64.h"
+#include "src/execution/frames-inl.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index aeaf47f99c..f2c7e71a89 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -27,15 +27,15 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
-#include "src/ostreams.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
+#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -902,8 +902,7 @@ TEST(DisasmIa320) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
USE(code);
#ifdef OBJECT_PRINT
StdoutStream os;
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index 5a4f14fe9b..929e4e4777 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -28,13 +28,13 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -54,13 +54,13 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm.InstructionDecode(disasm_buffer, pc);
- if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+ if (strcmp(compare_string, disasm_buffer.begin()) != 0) {
fprintf(stderr,
"expected: \n"
"%s\n"
"disassembled: \n"
"%s\n\n",
- compare_string, disasm_buffer.start());
+ compare_string, disasm_buffer.begin());
return false;
}
return true;
@@ -94,9 +94,9 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// Verify that all invocations of the COMPARE macro passed successfully.
// Exit with a failure if at least one of the tests failed.
-#define VERIFY_RUN() \
-if (failure) { \
- V8_Fatal(__FILE__, __LINE__, "MIPS Disassembler tests failed.\n"); \
+#define VERIFY_RUN() \
+ if (failure) { \
+ FATAL("MIPS Disassembler tests failed.\n"); \
}
#define COMPARE_PC_REL_COMPACT(asm_, compare_string, offset) \
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index ad71c1598a..4e6324589d 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -28,13 +28,13 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -54,13 +54,13 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm.InstructionDecode(disasm_buffer, pc);
- if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+ if (strcmp(compare_string, disasm_buffer.begin()) != 0) {
fprintf(stderr,
"expected: \n"
"%s\n"
"disassembled: \n"
"%s\n\n",
- compare_string, disasm_buffer.start());
+ compare_string, disasm_buffer.begin());
return false;
}
return true;
@@ -94,9 +94,9 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// Verify that all invocations of the COMPARE macro passed successfully.
// Exit with a failure if at least one of the tests failed.
-#define VERIFY_RUN() \
-if (failure) { \
- V8_Fatal(__FILE__, __LINE__, "MIPS Disassembler tests failed.\n"); \
+#define VERIFY_RUN() \
+ if (failure) { \
+ FATAL("MIPS Disassembler tests failed.\n"); \
}
#define COMPARE_PC_REL_COMPACT(asm_, compare_string, offset) \
diff --git a/deps/v8/test/cctest/test-disasm-ppc.cc b/deps/v8/test/cctest/test-disasm-ppc.cc
index b64402b383..f28ebf4dac 100644
--- a/deps/v8/test/cctest/test-disasm-ppc.cc
+++ b/deps/v8/test/cctest/test-disasm-ppc.cc
@@ -28,13 +28,13 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -47,13 +47,13 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm.InstructionDecode(disasm_buffer, pc);
- if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+ if (strcmp(compare_string, disasm_buffer.begin()) != 0) {
fprintf(stderr,
"expected: \n"
"%s\n"
"disassembled: \n"
"%s\n\n",
- compare_string, disasm_buffer.start());
+ compare_string, disasm_buffer.begin());
return false;
}
return true;
@@ -90,9 +90,9 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// Verify that all invocations of the COMPARE macro passed successfully.
// Exit with a failure if at least one of the tests failed.
-#define VERIFY_RUN() \
- if (failure) { \
- V8_Fatal(__FILE__, __LINE__, "PPC Disassembler tests failed.\n"); \
+#define VERIFY_RUN() \
+ if (failure) { \
+ FATAL("PPC Disassembler tests failed.\n"); \
}
TEST(DisasmPPC) {
diff --git a/deps/v8/test/cctest/test-disasm-s390.cc b/deps/v8/test/cctest/test-disasm-s390.cc
index 8e664f0b03..a01760b6ba 100644
--- a/deps/v8/test/cctest/test-disasm-s390.cc
+++ b/deps/v8/test/cctest/test-disasm-s390.cc
@@ -28,13 +28,13 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -47,13 +47,13 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm.InstructionDecode(disasm_buffer, pc);
- if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+ if (strcmp(compare_string, disasm_buffer.begin()) != 0) {
fprintf(stderr,
"expected: \n"
"%s\n"
"disassembled: \n"
"%s\n\n",
- compare_string, disasm_buffer.start());
+ compare_string, disasm_buffer.begin());
return false;
}
return true;
@@ -88,9 +88,9 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// Verify that all invocations of the COMPARE macro passed successfully.
// Exit with a failure if at least one of the tests failed.
-#define VERIFY_RUN() \
- if (failure) { \
- V8_Fatal(__FILE__, __LINE__, "S390 Disassembler tests failed.\n"); \
+#define VERIFY_RUN() \
+ if (failure) { \
+ FATAL("S390 Disassembler tests failed.\n"); \
}
TEST(TwoBytes) {
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index c9abec3f8d..c84f502f23 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -27,16 +27,16 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/disasm.h"
-#include "src/disassembler.h"
-#include "src/frames-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -969,8 +969,7 @@ TEST(DisasmX64) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
USE(code);
#ifdef OBJECT_PRINT
StdoutStream os;
diff --git a/deps/v8/test/cctest/test-diy-fp.cc b/deps/v8/test/cctest/test-diy-fp.cc
index d46d0519d6..96bc69143a 100644
--- a/deps/v8/test/cctest/test-diy-fp.cc
+++ b/deps/v8/test/cctest/test-diy-fp.cc
@@ -27,10 +27,10 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/platform/platform.h"
-#include "src/diy-fp.h"
+#include "src/numbers/diy-fp.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-double.cc b/deps/v8/test/cctest/test-double.cc
index 46d6d55cc1..9941620fbb 100644
--- a/deps/v8/test/cctest/test-double.cc
+++ b/deps/v8/test/cctest/test-double.cc
@@ -27,11 +27,11 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/platform/platform.h"
-#include "src/diy-fp.h"
-#include "src/double.h"
+#include "src/numbers/diy-fp.h"
+#include "src/numbers/double.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-dtoa.cc b/deps/v8/test/cctest/test-dtoa.cc
index 8f156d9f2a..56d5f05ebf 100644
--- a/deps/v8/test/cctest/test-dtoa.cc
+++ b/deps/v8/test/cctest/test-dtoa.cc
@@ -27,12 +27,12 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/dtoa.h"
+#include "src/numbers/dtoa.h"
#include "src/base/platform/platform.h"
-#include "src/double.h"
+#include "src/numbers/double.h"
#include "test/cctest/cctest.h"
#include "test/cctest/gay-fixed.h"
#include "test/cctest/gay-precision.h"
@@ -42,17 +42,14 @@ namespace v8 {
namespace internal {
namespace test_dtoa {
-// Removes trailing '0' digits.
-static void TrimRepresentation(Vector<char> representation) {
- int len = StrLength(representation.start());
- int i;
- for (i = len - 1; i >= 0; --i) {
- if (representation[i] != '0') break;
- }
- representation[i + 1] = '\0';
+// Removes trailing '0' digits (modifies {representation}). Can create an empty
+// string if all digits are 0.
+static void TrimRepresentation(char* representation) {
+ size_t len = strlen(representation);
+ while (len > 0 && representation[len - 1] == '0') --len;
+ representation[len] = '\0';
}
-
static const int kBufferSize = 100;
@@ -64,202 +61,202 @@ TEST(DtoaVariousDoubles) {
int sign;
DoubleToAscii(0.0, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("0", buffer.start()));
+ CHECK_EQ(0, strcmp("0", buffer.begin()));
CHECK_EQ(1, point);
DoubleToAscii(0.0, DTOA_FIXED, 2, buffer, &sign, &length, &point);
CHECK_EQ(1, length);
- CHECK_EQ(0, strcmp("0", buffer.start()));
+ CHECK_EQ(0, strcmp("0", buffer.begin()));
CHECK_EQ(1, point);
DoubleToAscii(0.0, DTOA_PRECISION, 3, buffer, &sign, &length, &point);
CHECK_EQ(1, length);
- CHECK_EQ(0, strcmp("0", buffer.start()));
+ CHECK_EQ(0, strcmp("0", buffer.begin()));
CHECK_EQ(1, point);
DoubleToAscii(1.0, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
DoubleToAscii(1.0, DTOA_FIXED, 3, buffer, &sign, &length, &point);
CHECK_GE(3, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
DoubleToAscii(1.0, DTOA_PRECISION, 3, buffer, &sign, &length, &point);
CHECK_GE(3, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
DoubleToAscii(1.5, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("15", buffer.start()));
+ CHECK_EQ(0, strcmp("15", buffer.begin()));
CHECK_EQ(1, point);
DoubleToAscii(1.5, DTOA_FIXED, 10, buffer, &sign, &length, &point);
CHECK_GE(10, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("15", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("15", buffer.begin()));
CHECK_EQ(1, point);
DoubleToAscii(1.5, DTOA_PRECISION, 10, buffer, &sign, &length, &point);
CHECK_GE(10, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("15", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("15", buffer.begin()));
CHECK_EQ(1, point);
double min_double = 5e-324;
DoubleToAscii(min_double, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("5", buffer.start()));
+ CHECK_EQ(0, strcmp("5", buffer.begin()));
CHECK_EQ(-323, point);
DoubleToAscii(min_double, DTOA_FIXED, 5, buffer, &sign, &length, &point);
CHECK_GE(5, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("", buffer.begin()));
CHECK_GE(-5, point);
DoubleToAscii(min_double, DTOA_PRECISION, 5, buffer, &sign, &length, &point);
CHECK_GE(5, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("49407", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("49407", buffer.begin()));
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
DoubleToAscii(max_double, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("17976931348623157", buffer.start()));
+ CHECK_EQ(0, strcmp("17976931348623157", buffer.begin()));
CHECK_EQ(309, point);
DoubleToAscii(max_double, DTOA_PRECISION, 7, buffer, &sign, &length, &point);
CHECK_GE(7, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("1797693", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("1797693", buffer.begin()));
CHECK_EQ(309, point);
DoubleToAscii(4294967272.0, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("4294967272", buffer.start()));
+ CHECK_EQ(0, strcmp("4294967272", buffer.begin()));
CHECK_EQ(10, point);
DoubleToAscii(4294967272.0, DTOA_FIXED, 5, buffer, &sign, &length, &point);
CHECK_GE(5, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("4294967272", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("4294967272", buffer.begin()));
CHECK_EQ(10, point);
DoubleToAscii(4294967272.0, DTOA_PRECISION, 14,
buffer, &sign, &length, &point);
CHECK_GE(14, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("4294967272", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("4294967272", buffer.begin()));
CHECK_EQ(10, point);
DoubleToAscii(4.1855804968213567e298, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("4185580496821357", buffer.start()));
+ CHECK_EQ(0, strcmp("4185580496821357", buffer.begin()));
CHECK_EQ(299, point);
DoubleToAscii(4.1855804968213567e298, DTOA_PRECISION, 20,
buffer, &sign, &length, &point);
CHECK_GE(20, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("41855804968213567225", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("41855804968213567225", buffer.begin()));
CHECK_EQ(299, point);
DoubleToAscii(5.5626846462680035e-309, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("5562684646268003", buffer.start()));
+ CHECK_EQ(0, strcmp("5562684646268003", buffer.begin()));
CHECK_EQ(-308, point);
DoubleToAscii(5.5626846462680035e-309, DTOA_PRECISION, 1,
buffer, &sign, &length, &point);
CHECK_GE(1, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("6", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("6", buffer.begin()));
CHECK_EQ(-308, point);
DoubleToAscii(-2147483648.0, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
CHECK_EQ(1, sign);
- CHECK_EQ(0, strcmp("2147483648", buffer.start()));
+ CHECK_EQ(0, strcmp("2147483648", buffer.begin()));
CHECK_EQ(10, point);
DoubleToAscii(-2147483648.0, DTOA_FIXED, 2, buffer, &sign, &length, &point);
CHECK_GE(2, length - point);
- TrimRepresentation(buffer);
+ TrimRepresentation(buffer.begin());
CHECK_EQ(1, sign);
- CHECK_EQ(0, strcmp("2147483648", buffer.start()));
+ CHECK_EQ(0, strcmp("2147483648", buffer.begin()));
CHECK_EQ(10, point);
DoubleToAscii(-2147483648.0, DTOA_PRECISION, 5,
buffer, &sign, &length, &point);
CHECK_GE(5, length);
- TrimRepresentation(buffer);
+ TrimRepresentation(buffer.begin());
CHECK_EQ(1, sign);
- CHECK_EQ(0, strcmp("21475", buffer.start()));
+ CHECK_EQ(0, strcmp("21475", buffer.begin()));
CHECK_EQ(10, point);
DoubleToAscii(-3.5844466002796428e+298, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
CHECK_EQ(1, sign);
- CHECK_EQ(0, strcmp("35844466002796428", buffer.start()));
+ CHECK_EQ(0, strcmp("35844466002796428", buffer.begin()));
CHECK_EQ(299, point);
DoubleToAscii(-3.5844466002796428e+298, DTOA_PRECISION, 10,
buffer, &sign, &length, &point);
CHECK_EQ(1, sign);
CHECK_GE(10, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("35844466", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("35844466", buffer.begin()));
CHECK_EQ(299, point);
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
DoubleToAscii(v, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("22250738585072014", buffer.start()));
+ CHECK_EQ(0, strcmp("22250738585072014", buffer.begin()));
CHECK_EQ(-307, point);
DoubleToAscii(v, DTOA_PRECISION, 20, buffer, &sign, &length, &point);
CHECK_GE(20, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("22250738585072013831", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("22250738585072013831", buffer.begin()));
CHECK_EQ(-307, point);
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
v = Double(largest_denormal64).value();
DoubleToAscii(v, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("2225073858507201", buffer.start()));
+ CHECK_EQ(0, strcmp("2225073858507201", buffer.begin()));
CHECK_EQ(-307, point);
DoubleToAscii(v, DTOA_PRECISION, 20, buffer, &sign, &length, &point);
CHECK_GE(20, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("2225073858507200889", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("2225073858507200889", buffer.begin()));
CHECK_EQ(-307, point);
DoubleToAscii(4128420500802942e-24, DTOA_SHORTEST, 0,
buffer, &sign, &length, &point);
CHECK_EQ(0, sign);
- CHECK_EQ(0, strcmp("4128420500802942", buffer.start()));
+ CHECK_EQ(0, strcmp("4128420500802942", buffer.begin()));
CHECK_EQ(-8, point);
v = -3.9292015898194142585311918e-10;
DoubleToAscii(v, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
- CHECK_EQ(0, strcmp("39292015898194143", buffer.start()));
+ CHECK_EQ(0, strcmp("39292015898194143", buffer.begin()));
v = 4194304.0;
DoubleToAscii(v, DTOA_FIXED, 5, buffer, &sign, &length, &point);
CHECK_GE(5, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("4194304", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("4194304", buffer.begin()));
v = 3.3161339052167390562200598e-237;
DoubleToAscii(v, DTOA_PRECISION, 19, buffer, &sign, &length, &point);
CHECK_GE(19, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("3316133905216739056", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("3316133905216739056", buffer.begin()));
CHECK_EQ(-236, point);
}
@@ -279,7 +276,7 @@ TEST(DtoaGayShortest) {
DoubleToAscii(v, DTOA_SHORTEST, 0, buffer, &sign, &length, &point);
CHECK_EQ(0, sign); // All precomputed numbers are positive.
CHECK_EQ(current_test.decimal_point, point);
- CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.begin()));
}
}
@@ -301,8 +298,8 @@ TEST(DtoaGayFixed) {
CHECK_EQ(0, sign); // All precomputed numbers are positive.
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length - point);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.begin()));
}
}
@@ -325,8 +322,8 @@ TEST(DtoaGayPrecision) {
CHECK_EQ(0, sign); // All precomputed numbers are positive.
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.begin()));
}
}
diff --git a/deps/v8/test/cctest/test-elements-kind.cc b/deps/v8/test/cctest/test-elements-kind.cc
index d7f6ccb852..d08f6200ab 100644
--- a/deps/v8/test/cctest/test-elements-kind.cc
+++ b/deps/v8/test/cctest/test-elements-kind.cc
@@ -7,15 +7,15 @@
#include "test/cctest/test-api.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/compilation-cache.h"
-#include "src/execution.h"
-#include "src/global-handles.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/execution.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/ic/stub-cache.h"
-#include "src/objects-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -37,7 +37,7 @@ Handle<String> MakeString(const char* str) {
Handle<String> MakeName(const char* str, int suffix) {
EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s%d", str, suffix);
- return MakeString(buffer.start());
+ return MakeString(buffer.begin());
}
template <typename T, typename M>
@@ -58,6 +58,41 @@ bool EQUALS(Isolate* isolate, T left, Handle<M> right) {
return EQUALS(isolate, handle(left, isolate), right);
}
+bool ElementsKindIsHoleyElementsKindForRead(ElementsKind kind) {
+ switch (kind) {
+ case ElementsKind::HOLEY_SMI_ELEMENTS:
+ case ElementsKind::HOLEY_ELEMENTS:
+ case ElementsKind::HOLEY_DOUBLE_ELEMENTS:
+ case ElementsKind::HOLEY_SEALED_ELEMENTS:
+ case ElementsKind::HOLEY_FROZEN_ELEMENTS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool ElementsKindIsHoleyElementsKind(ElementsKind kind) {
+ switch (kind) {
+ case ElementsKind::HOLEY_SMI_ELEMENTS:
+ case ElementsKind::HOLEY_ELEMENTS:
+ case ElementsKind::HOLEY_DOUBLE_ELEMENTS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool ElementsKindIsFastPackedElementsKind(ElementsKind kind) {
+ switch (kind) {
+ case ElementsKind::PACKED_SMI_ELEMENTS:
+ case ElementsKind::PACKED_ELEMENTS:
+ case ElementsKind::PACKED_DOUBLE_ELEMENTS:
+ return true;
+ default:
+ return false;
+ }
+}
+
} // namespace
@@ -95,8 +130,8 @@ TEST(JSObjectAddingProperties) {
JSObject::DefinePropertyOrElementIgnoreAttributes(object, name, value, NONE)
.Check();
CHECK_NE(object->map(), *previous_map);
- CHECK_EQ(HOLEY_ELEMENTS, object->map()->elements_kind());
- CHECK_LE(1, object->property_array()->length());
+ CHECK_EQ(HOLEY_ELEMENTS, object->map().elements_kind());
+ CHECK_LE(1, object->property_array().length());
CHECK(EQUALS(isolate, object->elements(), empty_fixed_array));
}
@@ -114,7 +149,7 @@ TEST(JSObjectInObjectAddingProperties) {
int nof_inobject_properties = 10;
// force in object properties by changing the expected_nof_properties
// (we always reserve 8 inobject properties slack on top).
- function->shared()->set_expected_nof_properties(nof_inobject_properties - 8);
+ function->shared().set_expected_nof_properties(nof_inobject_properties - 8);
Handle<Object> value(Smi::FromInt(42), isolate);
Handle<JSObject> object = factory->NewJSObject(function);
@@ -131,7 +166,7 @@ TEST(JSObjectInObjectAddingProperties) {
.Check();
}
CHECK_NE(object->map(), *previous_map);
- CHECK_EQ(HOLEY_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(HOLEY_ELEMENTS, object->map().elements_kind());
CHECK(EQUALS(isolate, object->property_array(), empty_property_array));
CHECK(EQUALS(isolate, object->elements(), empty_fixed_array));
@@ -142,9 +177,9 @@ TEST(JSObjectInObjectAddingProperties) {
JSObject::DefinePropertyOrElementIgnoreAttributes(object, name, value, NONE)
.Check();
CHECK_NE(object->map(), *previous_map);
- CHECK_EQ(HOLEY_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(HOLEY_ELEMENTS, object->map().elements_kind());
// there must be at least 1 element in the properies store
- CHECK_LE(1, object->property_array()->length());
+ CHECK_LE(1, object->property_array().length());
CHECK(EQUALS(isolate, object->elements(), empty_fixed_array));
}
@@ -174,9 +209,9 @@ TEST(JSObjectAddingElements) {
.Check();
// no change in elements_kind => no map transition
CHECK_EQ(object->map(), *previous_map);
- CHECK_EQ(HOLEY_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(HOLEY_ELEMENTS, object->map().elements_kind());
CHECK(EQUALS(isolate, object->property_array(), empty_property_array));
- CHECK_LE(1, object->elements()->length());
+ CHECK_LE(1, object->elements().length());
// Adding more consecutive elements without a change in the backing store
int non_dict_backing_store_limit = 100;
@@ -187,9 +222,9 @@ TEST(JSObjectAddingElements) {
}
// no change in elements_kind => no map transition
CHECK_EQ(object->map(), *previous_map);
- CHECK_EQ(HOLEY_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(HOLEY_ELEMENTS, object->map().elements_kind());
CHECK(EQUALS(isolate, object->property_array(), empty_property_array));
- CHECK_LE(non_dict_backing_store_limit, object->elements()->length());
+ CHECK_LE(non_dict_backing_store_limit, object->elements().length());
// Adding an element at an very large index causes a change to
// DICTIONARY_ELEMENTS
@@ -198,9 +233,9 @@ TEST(JSObjectAddingElements) {
.Check();
// change in elements_kind => map transition
CHECK_NE(object->map(), *previous_map);
- CHECK_EQ(DICTIONARY_ELEMENTS, object->map()->elements_kind());
+ CHECK_EQ(DICTIONARY_ELEMENTS, object->map().elements_kind());
CHECK(EQUALS(isolate, object->property_array(), empty_property_array));
- CHECK_LE(non_dict_backing_store_limit, object->elements()->length());
+ CHECK_LE(non_dict_backing_store_limit, object->elements().length());
}
@@ -229,8 +264,8 @@ TEST(JSArrayAddingProperties) {
.Check();
// No change in elements_kind but added property => new map
CHECK_NE(array->map(), *previous_map);
- CHECK_EQ(PACKED_SMI_ELEMENTS, array->map()->elements_kind());
- CHECK_LE(1, array->property_array()->length());
+ CHECK_EQ(PACKED_SMI_ELEMENTS, array->map().elements_kind());
+ CHECK_LE(1, array->property_array().length());
CHECK(EQUALS(isolate, array->elements(), empty_fixed_array));
CHECK_EQ(0, Smi::ToInt(array->length()));
}
@@ -261,9 +296,9 @@ TEST(JSArrayAddingElements) {
.Check();
// no change in elements_kind => no map transition
CHECK_EQ(array->map(), *previous_map);
- CHECK_EQ(PACKED_SMI_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(PACKED_SMI_ELEMENTS, array->map().elements_kind());
CHECK(EQUALS(isolate, array->property_array(), empty_property_array));
- CHECK_LE(1, array->elements()->length());
+ CHECK_LE(1, array->elements().length());
CHECK_EQ(1, Smi::ToInt(array->length()));
// Adding more consecutive elements without a change in the backing store
@@ -275,9 +310,9 @@ TEST(JSArrayAddingElements) {
}
// no change in elements_kind => no map transition
CHECK_EQ(array->map(), *previous_map);
- CHECK_EQ(PACKED_SMI_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(PACKED_SMI_ELEMENTS, array->map().elements_kind());
CHECK(EQUALS(isolate, array->property_array(), empty_property_array));
- CHECK_LE(non_dict_backing_store_limit, array->elements()->length());
+ CHECK_LE(non_dict_backing_store_limit, array->elements().length());
CHECK_EQ(non_dict_backing_store_limit, Smi::ToInt(array->length()));
// Adding an element at an very large index causes a change to
@@ -288,10 +323,10 @@ TEST(JSArrayAddingElements) {
.Check();
// change in elements_kind => map transition
CHECK_NE(array->map(), *previous_map);
- CHECK_EQ(DICTIONARY_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(DICTIONARY_ELEMENTS, array->map().elements_kind());
CHECK(EQUALS(isolate, array->property_array(), empty_property_array));
- CHECK_LE(non_dict_backing_store_limit, array->elements()->length());
- CHECK_LE(array->elements()->length(), index);
+ CHECK_LE(non_dict_backing_store_limit, array->elements().length());
+ CHECK_LE(array->elements().length(), index);
CHECK_EQ(index + 1, Smi::ToInt(array->length()));
}
@@ -320,14 +355,14 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) {
.Check();
// no change in elements_kind => no map transition
CHECK_EQ(array->map(), *previous_map);
- CHECK_EQ(PACKED_SMI_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(PACKED_SMI_ELEMENTS, array->map().elements_kind());
CHECK_EQ(1, Smi::ToInt(array->length()));
// `delete array[0]` does not alter length, but changes the elments_kind
name = MakeString("0");
CHECK(JSReceiver::DeletePropertyOrElement(array, name).FromMaybe(false));
CHECK_NE(array->map(), *previous_map);
- CHECK_EQ(HOLEY_SMI_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(HOLEY_SMI_ELEMENTS, array->map().elements_kind());
CHECK_EQ(1, Smi::ToInt(array->length()));
previous_map = handle(array->map(), isolate);
@@ -341,7 +376,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) {
NONE)
.Check();
CHECK_EQ(array->map(), *previous_map);
- CHECK_EQ(HOLEY_SMI_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(HOLEY_SMI_ELEMENTS, array->map().elements_kind());
CHECK_EQ(2, Smi::ToInt(array->length()));
// Adding a string to the array changes from FAST_HOLEY_SMI to FAST_HOLEY
@@ -350,7 +385,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) {
NONE)
.Check();
CHECK_NE(array->map(), *previous_map);
- CHECK_EQ(HOLEY_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(HOLEY_ELEMENTS, array->map().elements_kind());
CHECK_EQ(2, Smi::ToInt(array->length()));
previous_map = handle(array->map(), isolate);
@@ -393,14 +428,14 @@ TEST(JSArrayAddingElementsGeneralizingFastElements) {
.Check();
// no change in elements_kind => no map transition
CHECK_EQ(array->map(), *previous_map);
- CHECK_EQ(PACKED_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(PACKED_ELEMENTS, array->map().elements_kind());
CHECK_EQ(1, Smi::ToInt(array->length()));
// `delete array[0]` does not alter length, but changes the elments_kind
name = MakeString("0");
CHECK(JSReceiver::DeletePropertyOrElement(array, name).FromMaybe(false));
CHECK_NE(array->map(), *previous_map);
- CHECK_EQ(HOLEY_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(HOLEY_ELEMENTS, array->map().elements_kind());
CHECK_EQ(1, Smi::ToInt(array->length()));
previous_map = handle(array->map(), isolate);
@@ -414,7 +449,7 @@ TEST(JSArrayAddingElementsGeneralizingFastElements) {
NONE)
.Check();
CHECK_EQ(array->map(), *previous_map);
- CHECK_EQ(HOLEY_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(HOLEY_ELEMENTS, array->map().elements_kind());
CHECK_EQ(2, Smi::ToInt(array->length()));
}
@@ -440,7 +475,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) {
NONE)
.Check();
CHECK_NE(array->map(), *previous_map);
- CHECK_EQ(PACKED_DOUBLE_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(PACKED_DOUBLE_ELEMENTS, array->map().elements_kind());
CHECK_EQ(1, Smi::ToInt(array->length()));
previous_map = handle(array->map(), isolate);
@@ -450,14 +485,14 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) {
NONE)
.Check();
CHECK_EQ(array->map(), *previous_map);
- CHECK_EQ(PACKED_DOUBLE_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(PACKED_DOUBLE_ELEMENTS, array->map().elements_kind());
CHECK_EQ(2, Smi::ToInt(array->length()));
// `delete array[0]` does not alter length, but changes the elments_kind
name = MakeString("0");
CHECK(JSReceiver::DeletePropertyOrElement(array, name).FromMaybe(false));
CHECK_NE(array->map(), *previous_map);
- CHECK_EQ(HOLEY_DOUBLE_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(HOLEY_DOUBLE_ELEMENTS, array->map().elements_kind());
CHECK_EQ(2, Smi::ToInt(array->length()));
previous_map = handle(array->map(), isolate);
@@ -467,7 +502,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) {
NONE)
.Check();
CHECK_EQ(array->map(), *previous_map);
- CHECK_EQ(HOLEY_DOUBLE_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(HOLEY_DOUBLE_ELEMENTS, array->map().elements_kind());
CHECK_EQ(2, Smi::ToInt(array->length()));
// Adding a string to the array changes to elements_kind PACKED_ELEMENTS
@@ -476,7 +511,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) {
NONE)
.Check();
CHECK_NE(array->map(), *previous_map);
- CHECK_EQ(HOLEY_ELEMENTS, array->map()->elements_kind());
+ CHECK_EQ(HOLEY_ELEMENTS, array->map().elements_kind());
CHECK_EQ(2, Smi::ToInt(array->length()));
previous_map = handle(array->map(), isolate);
@@ -488,6 +523,29 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) {
CHECK_EQ(array->map(), *previous_map);
}
+TEST(IsHoleyElementsKindForRead) {
+ for (int i = 0; i <= ElementsKind::LAST_ELEMENTS_KIND; i++) {
+ ElementsKind kind = static_cast<ElementsKind>(i);
+ CHECK_EQ(ElementsKindIsHoleyElementsKindForRead(kind),
+ IsHoleyElementsKindForRead(kind));
+ }
+}
+
+TEST(IsHoleyElementsKind) {
+ for (int i = 0; i <= ElementsKind::LAST_ELEMENTS_KIND; i++) {
+ ElementsKind kind = static_cast<ElementsKind>(i);
+ CHECK_EQ(ElementsKindIsHoleyElementsKind(kind), IsHoleyElementsKind(kind));
+ }
+}
+
+TEST(IsFastPackedElementsKind) {
+ for (int i = 0; i <= ElementsKind::LAST_ELEMENTS_KIND; i++) {
+ ElementsKind kind = static_cast<ElementsKind>(i);
+ CHECK_EQ(ElementsKindIsFastPackedElementsKind(kind),
+ IsFastPackedElementsKind(kind));
+ }
+}
+
} // namespace test_elements_kind
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-factory.cc b/deps/v8/test/cctest/test-factory.cc
index abb77b5b6b..4ed7cb7bca 100644
--- a/deps/v8/test/cctest/test-factory.cc
+++ b/deps/v8/test/cctest/test-factory.cc
@@ -2,22 +2,61 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
+
#include "include/v8.h"
-#include "src/code-desc.h"
-#include "src/handles-inl.h"
-#include "src/isolate.h"
+#include "src/codegen/code-desc.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace test_factory {
-TEST(Factory_NewCode) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- HandleScope scope(i_isolate);
+namespace {
+
+// This needs to be large enough to create a new nosnap Isolate, but smaller
+// than kMaximalCodeRangeSize so we can recover from the OOM.
+constexpr int kInstructionSize = 100 * MB;
+STATIC_ASSERT(kInstructionSize < kMaximalCodeRangeSize || !kRequiresCodeRange);
+
+size_t NearHeapLimitCallback(void* raw_bool, size_t current_heap_limit,
+ size_t initial_heap_limit) {
+ bool* oom_triggered = static_cast<bool*>(raw_bool);
+ *oom_triggered = true;
+ return kInstructionSize * 2;
+}
+
+class SetupIsolateWithSmallHeap {
+ public:
+ SetupIsolateWithSmallHeap() {
+ FLAG_max_old_space_size = kInstructionSize / MB / 2; // In MB.
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ isolate_ = reinterpret_cast<Isolate*>(v8::Isolate::New(create_params));
+ isolate_->heap()->AddNearHeapLimitCallback(NearHeapLimitCallback,
+ &oom_triggered_);
+ }
+
+ ~SetupIsolateWithSmallHeap() {
+ reinterpret_cast<v8::Isolate*>(isolate_)->Dispose();
+ }
+
+ Isolate* isolate() { return isolate_; }
+ bool oom_triggered() const { return oom_triggered_; }
+
+ private:
+ Isolate* isolate_;
+ bool oom_triggered_ = false;
+};
+
+} // namespace
+
+TEST(Factory_CodeBuilder) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
// Create a big function that ends up in CODE_LO_SPACE.
const int instruction_size = kMaxRegularHeapObjectSize + 1;
@@ -32,16 +71,47 @@ TEST(Factory_NewCode) {
desc.unwinding_info = nullptr;
desc.unwinding_info_size = 0;
desc.origin = nullptr;
- Handle<Object> self_ref;
Handle<Code> code =
- i_isolate->factory()->NewCode(desc, Code::WASM_FUNCTION, self_ref);
+ Factory::CodeBuilder(isolate, desc, Code::WASM_FUNCTION).Build();
- CHECK(i_isolate->heap()->InSpace(*code, CODE_LO_SPACE));
+ CHECK(isolate->heap()->InSpace(*code, CODE_LO_SPACE));
#if VERIFY_HEAP
- code->ObjectVerify(i_isolate);
+ code->ObjectVerify(isolate);
#endif
}
+UNINITIALIZED_TEST(Factory_CodeBuilder_BuildOOM) {
+ SetupIsolateWithSmallHeap isolate_scope;
+ HandleScope scope(isolate_scope.isolate());
+ std::unique_ptr<byte[]> instructions(new byte[kInstructionSize]);
+ CodeDesc desc;
+ desc.instr_size = kInstructionSize;
+ desc.buffer = instructions.get();
+
+ const Handle<Code> code =
+ Factory::CodeBuilder(isolate_scope.isolate(), desc, Code::WASM_FUNCTION)
+ .Build();
+
+ CHECK(!code.is_null());
+ CHECK(isolate_scope.oom_triggered());
+}
+
+UNINITIALIZED_TEST(Factory_CodeBuilder_TryBuildOOM) {
+ SetupIsolateWithSmallHeap isolate_scope;
+ HandleScope scope(isolate_scope.isolate());
+ std::unique_ptr<byte[]> instructions(new byte[kInstructionSize]);
+ CodeDesc desc;
+ desc.instr_size = kInstructionSize;
+ desc.buffer = instructions.get();
+
+ const MaybeHandle<Code> code =
+ Factory::CodeBuilder(isolate_scope.isolate(), desc, Code::WASM_FUNCTION)
+ .TryBuild();
+
+ CHECK(code.is_null());
+ CHECK(!isolate_scope.oom_triggered());
+}
+
} // namespace test_factory
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-fast-dtoa.cc b/deps/v8/test/cctest/test-fast-dtoa.cc
index c063d6348b..50c1935376 100644
--- a/deps/v8/test/cctest/test-fast-dtoa.cc
+++ b/deps/v8/test/cctest/test-fast-dtoa.cc
@@ -27,12 +27,12 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/platform/platform.h"
-#include "src/diy-fp.h"
-#include "src/double.h"
-#include "src/fast-dtoa.h"
+#include "src/numbers/diy-fp.h"
+#include "src/numbers/double.h"
+#include "src/numbers/fast-dtoa.h"
#include "test/cctest/cctest.h"
#include "test/cctest/gay-precision.h"
#include "test/cctest/gay-shortest.h"
@@ -43,18 +43,14 @@ namespace test_fast_dtoa {
static const int kBufferSize = 100;
-
-// Removes trailing '0' digits.
-static void TrimRepresentation(Vector<char> representation) {
- int len = StrLength(representation.start());
- int i;
- for (i = len - 1; i >= 0; --i) {
- if (representation[i] != '0') break;
- }
- representation[i + 1] = '\0';
+// Removes trailing '0' digits (modifies {representation}). Can create an empty
+// string if all digits are 0.
+static void TrimRepresentation(char* representation) {
+ size_t len = strlen(representation);
+ while (len > 0 && representation[len - 1] == '0') --len;
+ representation[len] = '\0';
}
-
TEST(FastDtoaShortestVariousDoubles) {
char buffer_container[kBufferSize];
Vector<char> buffer(buffer_container, kBufferSize);
@@ -66,44 +62,44 @@ TEST(FastDtoaShortestVariousDoubles) {
status = FastDtoa(min_double, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("5", buffer.start()));
+ CHECK_EQ(0, strcmp("5", buffer.begin()));
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
status = FastDtoa(max_double, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("17976931348623157", buffer.start()));
+ CHECK_EQ(0, strcmp("17976931348623157", buffer.begin()));
CHECK_EQ(309, point);
status = FastDtoa(4294967272.0, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("4294967272", buffer.start()));
+ CHECK_EQ(0, strcmp("4294967272", buffer.begin()));
CHECK_EQ(10, point);
status = FastDtoa(4.1855804968213567e298, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("4185580496821357", buffer.start()));
+ CHECK_EQ(0, strcmp("4185580496821357", buffer.begin()));
CHECK_EQ(299, point);
status = FastDtoa(5.5626846462680035e-309, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("5562684646268003", buffer.start()));
+ CHECK_EQ(0, strcmp("5562684646268003", buffer.begin()));
CHECK_EQ(-308, point);
status = FastDtoa(2147483648.0, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("2147483648", buffer.start()));
+ CHECK_EQ(0, strcmp("2147483648", buffer.begin()));
CHECK_EQ(10, point);
status = FastDtoa(3.5844466002796428e+298, FAST_DTOA_SHORTEST, 0,
buffer, &length, &point);
if (status) { // Not all FastDtoa variants manage to compute this number.
- CHECK_EQ(0, strcmp("35844466002796428", buffer.start()));
+ CHECK_EQ(0, strcmp("35844466002796428", buffer.begin()));
CHECK_EQ(299, point);
}
@@ -111,7 +107,7 @@ TEST(FastDtoaShortestVariousDoubles) {
double v = Double(smallest_normal64).value();
status = FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, &length, &point);
if (status) {
- CHECK_EQ(0, strcmp("22250738585072014", buffer.start()));
+ CHECK_EQ(0, strcmp("22250738585072014", buffer.begin()));
CHECK_EQ(-307, point);
}
@@ -119,7 +115,7 @@ TEST(FastDtoaShortestVariousDoubles) {
v = Double(largest_denormal64).value();
status = FastDtoa(v, FAST_DTOA_SHORTEST, 0, buffer, &length, &point);
if (status) {
- CHECK_EQ(0, strcmp("2225073858507201", buffer.start()));
+ CHECK_EQ(0, strcmp("2225073858507201", buffer.begin()));
CHECK_EQ(-307, point);
}
}
@@ -135,15 +131,15 @@ TEST(FastDtoaPrecisionVariousDoubles) {
status = FastDtoa(1.0, FAST_DTOA_PRECISION, 3, buffer, &length, &point);
CHECK(status);
CHECK_GE(3, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
status = FastDtoa(1.5, FAST_DTOA_PRECISION, 10, buffer, &length, &point);
if (status) {
CHECK_GE(10, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("15", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("15", buffer.begin()));
CHECK_EQ(1, point);
}
@@ -151,56 +147,56 @@ TEST(FastDtoaPrecisionVariousDoubles) {
status = FastDtoa(min_double, FAST_DTOA_PRECISION, 5,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("49407", buffer.start()));
+ CHECK_EQ(0, strcmp("49407", buffer.begin()));
CHECK_EQ(-323, point);
double max_double = 1.7976931348623157e308;
status = FastDtoa(max_double, FAST_DTOA_PRECISION, 7,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("1797693", buffer.start()));
+ CHECK_EQ(0, strcmp("1797693", buffer.begin()));
CHECK_EQ(309, point);
status = FastDtoa(4294967272.0, FAST_DTOA_PRECISION, 14,
buffer, &length, &point);
if (status) {
CHECK_GE(14, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("4294967272", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("4294967272", buffer.begin()));
CHECK_EQ(10, point);
}
status = FastDtoa(4.1855804968213567e298, FAST_DTOA_PRECISION, 17,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("41855804968213567", buffer.start()));
+ CHECK_EQ(0, strcmp("41855804968213567", buffer.begin()));
CHECK_EQ(299, point);
status = FastDtoa(5.5626846462680035e-309, FAST_DTOA_PRECISION, 1,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("6", buffer.start()));
+ CHECK_EQ(0, strcmp("6", buffer.begin()));
CHECK_EQ(-308, point);
status = FastDtoa(2147483648.0, FAST_DTOA_PRECISION, 5,
buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("21475", buffer.start()));
+ CHECK_EQ(0, strcmp("21475", buffer.begin()));
CHECK_EQ(10, point);
status = FastDtoa(3.5844466002796428e+298, FAST_DTOA_PRECISION, 10,
buffer, &length, &point);
CHECK(status);
CHECK_GE(10, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("35844466", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("35844466", buffer.begin()));
CHECK_EQ(299, point);
uint64_t smallest_normal64 = V8_2PART_UINT64_C(0x00100000, 00000000);
double v = Double(smallest_normal64).value();
status = FastDtoa(v, FAST_DTOA_PRECISION, 17, buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("22250738585072014", buffer.start()));
+ CHECK_EQ(0, strcmp("22250738585072014", buffer.begin()));
CHECK_EQ(-307, point);
uint64_t largest_denormal64 = V8_2PART_UINT64_C(0x000FFFFF, FFFFFFFF);
@@ -208,20 +204,20 @@ TEST(FastDtoaPrecisionVariousDoubles) {
status = FastDtoa(v, FAST_DTOA_PRECISION, 17, buffer, &length, &point);
CHECK(status);
CHECK_GE(20, length);
- TrimRepresentation(buffer);
- CHECK_EQ(0, strcmp("22250738585072009", buffer.start()));
+ TrimRepresentation(buffer.begin());
+ CHECK_EQ(0, strcmp("22250738585072009", buffer.begin()));
CHECK_EQ(-307, point);
v = 3.3161339052167390562200598e-237;
status = FastDtoa(v, FAST_DTOA_PRECISION, 18, buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("331613390521673906", buffer.start()));
+ CHECK_EQ(0, strcmp("331613390521673906", buffer.begin()));
CHECK_EQ(-236, point);
v = 7.9885183916008099497815232e+191;
status = FastDtoa(v, FAST_DTOA_PRECISION, 4, buffer, &length, &point);
CHECK(status);
- CHECK_EQ(0, strcmp("7989", buffer.start()));
+ CHECK_EQ(0, strcmp("7989", buffer.begin()));
CHECK_EQ(192, point);
}
@@ -248,7 +244,7 @@ TEST(FastDtoaGayShortest) {
if (length == kFastDtoaMaximalLength) needed_max_length = true;
succeeded++;
CHECK_EQ(current_test.decimal_point, point);
- CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.begin()));
}
CHECK_GT(succeeded*1.0/total, 0.99);
CHECK(needed_max_length);
@@ -281,9 +277,9 @@ TEST(FastDtoaGayPrecision) {
if (!status) continue;
succeeded++;
if (number_digits <= 15) succeeded_15++;
- TrimRepresentation(buffer);
+ TrimRepresentation(buffer.begin());
CHECK_EQ(current_test.decimal_point, point);
- CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.begin()));
}
// The precomputed numbers contain many entries with many requested
// digits. These have a high failure rate and we therefore expect a lower
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 22fdbead59..c487a1e2ec 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
-#include "src/execution.h"
-#include "src/global-handles.h"
+#include "src/execution/execution.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/test-feedback-vector.h"
namespace v8 {
@@ -94,7 +94,7 @@ TEST(VectorStructure) {
vector = NewFeedbackVector(isolate, &spec);
FeedbackVectorHelper helper(vector);
FeedbackCell cell = *vector->GetClosureFeedbackCell(0);
- CHECK_EQ(cell->value(), *factory->undefined_value());
+ CHECK_EQ(cell.value(), *factory->undefined_value());
}
}
diff --git a/deps/v8/test/cctest/test-feedback-vector.h b/deps/v8/test/cctest/test-feedback-vector.h
index e546f6a6ec..95e04a7e7b 100644
--- a/deps/v8/test/cctest/test-feedback-vector.h
+++ b/deps/v8/test/cctest/test-feedback-vector.h
@@ -5,8 +5,8 @@
#ifndef V8_TEST_FEEDBACK_VECTOR_H_
#define V8_TEST_FEEDBACK_VECTOR_H_
-#include "src/feedback-vector.h"
-#include "src/objects.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index c94d01805d..2a5e1dbb09 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -7,19 +7,19 @@
#include "test/cctest/test-api.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/execution.h"
-#include "src/field-type.h"
-#include "src/global-handles.h"
+#include "src/execution/execution.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/ic/stub-cache.h"
-#include "src/objects-inl.h"
+#include "src/objects/field-type.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property.h"
#include "src/objects/struct-inl.h"
-#include "src/ostreams.h"
-#include "src/property.h"
-#include "src/transitions.h"
+#include "src/objects/transitions.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -55,7 +55,7 @@ static Handle<String> MakeString(const char* str) {
static Handle<String> MakeName(const char* str, int suffix) {
EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s%d", str, suffix);
- return MakeString(buffer.start());
+ return MakeString(buffer.begin());
}
@@ -110,7 +110,7 @@ class Expectations {
explicit Expectations(Isolate* isolate)
: Expectations(
isolate,
- isolate->object_function()->initial_map()->elements_kind()) {}
+ isolate->object_function()->initial_map().elements_kind()) {}
void Init(int index, PropertyKind kind, PropertyAttributes attributes,
PropertyConstness constness, PropertyLocation location,
@@ -123,6 +123,7 @@ class Expectations {
// Maps with transitionable elements kinds must have the most general
// field type.
value = FieldType::Any(isolate_);
+ representation = Representation::Tagged();
}
constnesses_[index] = constness;
attributes_[index] = attributes;
@@ -138,7 +139,7 @@ class Expectations {
os << "Descriptor @ ";
if (kinds_[i] == kData) {
- os << Brief(*values_[i]);
+ Handle<FieldType>::cast(values_[i])->PrintTo(os);
} else {
// kAccessor
os << "(get: " << Brief(*values_[i])
@@ -193,15 +194,9 @@ class Expectations {
void SetDataConstant(int index, PropertyAttributes attrs,
Handle<JSFunction> value) {
- if (FLAG_track_constant_fields) {
- Handle<FieldType> field_type(FieldType::Class(value->map()), isolate_);
- Init(index, kData, attrs, PropertyConstness::kConst, kField,
- Representation::HeapObject(), field_type);
-
- } else {
- Init(index, kData, attrs, PropertyConstness::kConst, kDescriptor,
- Representation::HeapObject(), value);
- }
+ Handle<FieldType> field_type(FieldType::Class(value->map()), isolate_);
+ Init(index, kData, attrs, PropertyConstness::kConst, kField,
+ Representation::HeapObject(), field_type);
}
void SetDataConstant(int index, Handle<JSFunction> value) {
@@ -255,7 +250,7 @@ class Expectations {
}
bool Check(DescriptorArray descriptors, int descriptor) const {
- PropertyDetails details = descriptors->GetDetails(descriptor);
+ PropertyDetails details = descriptors.GetDetails(descriptor);
if (details.kind() != kinds_[descriptor]) return false;
if (details.location() != locations_[descriptor]) return false;
@@ -271,42 +266,36 @@ class Expectations {
Object expected_value = *values_[descriptor];
if (details.location() == kField) {
if (details.kind() == kData) {
- FieldType type = descriptors->GetFieldType(descriptor);
+ FieldType type = descriptors.GetFieldType(descriptor);
return FieldType::cast(expected_value) == type;
} else {
// kAccessor
UNREACHABLE();
}
} else {
- Object value = descriptors->GetStrongValue(descriptor);
- // kDescriptor
- if (details.kind() == kData) {
- CHECK(!FLAG_track_constant_fields);
- return value == expected_value;
- } else {
- // kAccessor
- if (value == expected_value) return true;
- if (!value->IsAccessorPair()) return false;
- AccessorPair pair = AccessorPair::cast(value);
- return pair->Equals(expected_value, *setter_values_[descriptor]);
- }
+ CHECK_EQ(kAccessor, details.kind());
+ Object value = descriptors.GetStrongValue(descriptor);
+ if (value == expected_value) return true;
+ if (!value.IsAccessorPair()) return false;
+ AccessorPair pair = AccessorPair::cast(value);
+ return pair.Equals(expected_value, *setter_values_[descriptor]);
}
UNREACHABLE();
}
bool Check(Map map, int expected_nof) const {
- CHECK_EQ(elements_kind_, map->elements_kind());
+ CHECK_EQ(elements_kind_, map.elements_kind());
CHECK(number_of_properties_ <= MAX_PROPERTIES);
- CHECK_EQ(expected_nof, map->NumberOfOwnDescriptors());
- CHECK(!map->is_dictionary_map());
+ CHECK_EQ(expected_nof, map.NumberOfOwnDescriptors());
+ CHECK(!map.is_dictionary_map());
- DescriptorArray descriptors = map->instance_descriptors();
+ DescriptorArray descriptors = map.instance_descriptors();
CHECK(expected_nof <= number_of_properties_);
for (int i = 0; i < expected_nof; i++) {
if (!Check(descriptors, i)) {
Print();
#ifdef OBJECT_PRINT
- descriptors->Print();
+ descriptors.Print();
#endif
Check(descriptors, i);
return false;
@@ -463,7 +452,7 @@ class Expectations {
Handle<Object> setter(pair->setter(), isolate);
int descriptor =
- map->instance_descriptors()->SearchWithCache(isolate, *name, *map);
+ map->instance_descriptors().SearchWithCache(isolate, *name, *map);
map = Map::TransitionToAccessorProperty(isolate, map, name, descriptor,
getter, setter, attributes);
CHECK(!map->is_deprecated());
@@ -534,7 +523,7 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
Handle<JSObject> obj = factory->NewJSObjectFromMap(map);
JSObject::MigrateToMap(obj, prepared_map);
FieldIndex index = FieldIndex::ForDescriptor(*prepared_map, 0);
- CHECK(obj->RawFastPropertyAt(index)->IsUninitialized(isolate));
+ CHECK(obj->RawFastPropertyAt(index).IsUninitialized(isolate));
#ifdef VERIFY_HEAP
obj->ObjectVerify(isolate);
#endif
@@ -567,18 +556,17 @@ TEST(ReconfigureAccessorToNonExistingDataFieldHeavy) {
CHECK(obj_value->IsJSObject());
Handle<JSObject> obj = Handle<JSObject>::cast(obj_value);
- CHECK_EQ(1, obj->map()->NumberOfOwnDescriptors());
- CHECK(
- obj->map()->instance_descriptors()->GetStrongValue(0)->IsAccessorPair());
+ CHECK_EQ(1, obj->map().NumberOfOwnDescriptors());
+ CHECK(obj->map().instance_descriptors().GetStrongValue(0).IsAccessorPair());
Handle<Object> value(Smi::FromInt(42), isolate);
JSObject::SetOwnPropertyIgnoreAttributes(obj, foo_str, value, NONE).Check();
// Check that the property contains |value|.
- CHECK_EQ(1, obj->map()->NumberOfOwnDescriptors());
+ CHECK_EQ(1, obj->map().NumberOfOwnDescriptors());
FieldIndex index = FieldIndex::ForDescriptor(obj->map(), 0);
Object the_value = obj->RawFastPropertyAt(index);
- CHECK(the_value->IsSmi());
+ CHECK(the_value.IsSmi());
CHECK_EQ(42, Smi::ToInt(the_value));
}
@@ -602,10 +590,9 @@ Handle<Code> CreateDummyOptimizedCode(Isolate* isolate) {
desc.buffer = buffer;
desc.buffer_size = arraysize(buffer);
desc.instr_size = arraysize(buffer);
- return isolate->factory()->NewCode(
- desc, Code::OPTIMIZED_FUNCTION, Handle<Object>(), Builtins::kNoBuiltinId,
- MaybeHandle<ByteArray>(), MaybeHandle<DeoptimizationData>(), kMovable,
- true);
+ return Factory::CodeBuilder(isolate, desc, Code::OPTIMIZED_FUNCTION)
+ .set_is_turbofanned()
+ .Build();
}
// This test ensures that field generalization at |property_index| is done
@@ -645,7 +632,7 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
map = expectations.AddDataField(map, NONE, from.constness,
from.representation, from.type);
} else {
- map = expectations.AddDataField(map, NONE, kDefaultFieldConstness,
+ map = expectations.AddDataField(map, NONE, PropertyConstness::kConst,
Representation::Double(), any_type);
if (i == detach_property_at_index) {
detach_point_map = map;
@@ -660,7 +647,8 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
detach_point_map = Map::ReconfigureProperty(
isolate, detach_point_map, detach_property_at_index, kData, NONE,
Representation::Tagged(), any_type);
- expectations.SetDataField(detach_property_at_index, kDefaultFieldConstness,
+ expectations.SetDataField(detach_property_at_index,
+ PropertyConstness::kConst,
Representation::Tagged(), any_type);
CHECK(map->is_deprecated());
CHECK(expectations.Check(*detach_point_map,
@@ -714,10 +702,10 @@ void TestGeneralizeField(int detach_property_at_index, int property_index,
// Check that all previous maps are not stable.
Map tmp = *new_map;
while (true) {
- Object back = tmp->GetBackPointer();
- if (back->IsUndefined(isolate)) break;
+ Object back = tmp.GetBackPointer();
+ if (back.IsUndefined(isolate)) break;
tmp = Map::cast(back);
- CHECK(!tmp->is_stable());
+ CHECK(!tmp.is_stable());
}
}
@@ -1190,22 +1178,20 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeSmiFieldToDouble) {
Handle<FieldType> any_type = FieldType::Any(isolate);
- if (FLAG_track_constant_fields) {
- TestReconfigureDataFieldAttribute_GeneralizeField(
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kConst, Representation::Double(), any_type},
- {PropertyConstness::kConst, Representation::Double(), any_type});
+ TestReconfigureDataFieldAttribute_GeneralizeField(
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kConst, Representation::Double(), any_type},
+ {PropertyConstness::kConst, Representation::Double(), any_type});
- TestReconfigureDataFieldAttribute_GeneralizeField(
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kMutable, Representation::Double(), any_type},
- {PropertyConstness::kMutable, Representation::Double(), any_type});
+ TestReconfigureDataFieldAttribute_GeneralizeField(
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kMutable, Representation::Double(), any_type},
+ {PropertyConstness::kMutable, Representation::Double(), any_type});
- TestReconfigureDataFieldAttribute_GeneralizeField(
- {PropertyConstness::kMutable, Representation::Smi(), any_type},
- {PropertyConstness::kConst, Representation::Double(), any_type},
- {PropertyConstness::kMutable, Representation::Double(), any_type});
- }
+ TestReconfigureDataFieldAttribute_GeneralizeField(
+ {PropertyConstness::kMutable, Representation::Smi(), any_type},
+ {PropertyConstness::kConst, Representation::Double(), any_type},
+ {PropertyConstness::kMutable, Representation::Double(), any_type});
TestReconfigureDataFieldAttribute_GeneralizeField(
{PropertyConstness::kMutable, Representation::Smi(), any_type},
@@ -1222,22 +1208,20 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeSmiFieldToTagged) {
Handle<FieldType> value_type =
FieldType::Class(Map::Create(isolate, 0), isolate);
- if (FLAG_track_constant_fields) {
- TestReconfigureDataFieldAttribute_GeneralizeField(
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kConst, Representation::Tagged(), any_type});
+ TestReconfigureDataFieldAttribute_GeneralizeField(
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kConst, Representation::Tagged(), any_type});
- TestReconfigureDataFieldAttribute_GeneralizeField(
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ TestReconfigureDataFieldAttribute_GeneralizeField(
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- TestReconfigureDataFieldAttribute_GeneralizeField(
- {PropertyConstness::kMutable, Representation::Smi(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- }
+ TestReconfigureDataFieldAttribute_GeneralizeField(
+ {PropertyConstness::kMutable, Representation::Smi(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
TestReconfigureDataFieldAttribute_GeneralizeField(
{PropertyConstness::kMutable, Representation::Smi(), any_type},
@@ -1254,22 +1238,20 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeDoubleFieldToTagged) {
Handle<FieldType> value_type =
FieldType::Class(Map::Create(isolate, 0), isolate);
- if (FLAG_track_constant_fields) {
- TestReconfigureDataFieldAttribute_GeneralizeField(
- {PropertyConstness::kConst, Representation::Double(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kConst, Representation::Tagged(), any_type});
+ TestReconfigureDataFieldAttribute_GeneralizeField(
+ {PropertyConstness::kConst, Representation::Double(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kConst, Representation::Tagged(), any_type});
- TestReconfigureDataFieldAttribute_GeneralizeField(
- {PropertyConstness::kConst, Representation::Double(), any_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ TestReconfigureDataFieldAttribute_GeneralizeField(
+ {PropertyConstness::kConst, Representation::Double(), any_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- TestReconfigureDataFieldAttribute_GeneralizeField(
- {PropertyConstness::kMutable, Representation::Double(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- }
+ TestReconfigureDataFieldAttribute_GeneralizeField(
+ {PropertyConstness::kMutable, Representation::Double(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
TestReconfigureDataFieldAttribute_GeneralizeField(
{PropertyConstness::kMutable, Representation::Double(), any_type},
@@ -1293,28 +1275,25 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeHeapObjFieldToHeapObj) {
Handle<FieldType> expected_type = any_type;
// Check generalizations that trigger deopts.
- if (FLAG_track_constant_fields) {
- TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
- {PropertyConstness::kConst, Representation::HeapObject(), current_type},
- {PropertyConstness::kConst, Representation::HeapObject(), new_type},
- {PropertyConstness::kConst, Representation::HeapObject(),
- expected_type});
-
- // PropertyConstness::kConst to PropertyConstness::kMutable migration does
- // not create a new map, therefore trivial generalization.
- TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
- {PropertyConstness::kConst, Representation::HeapObject(),
- current_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), new_type},
- {PropertyConstness::kMutable, Representation::HeapObject(),
- expected_type});
- TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
- {PropertyConstness::kMutable, Representation::HeapObject(),
- current_type},
- {PropertyConstness::kConst, Representation::HeapObject(), new_type},
- {PropertyConstness::kMutable, Representation::HeapObject(),
- expected_type});
- }
+ TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), current_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), new_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), expected_type});
+
+ // PropertyConstness::kConst to PropertyConstness::kMutable migration does
+ // not create a new map, therefore trivial generalization.
+ TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), current_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), new_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(),
+ expected_type});
+
+ TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
+ {PropertyConstness::kMutable, Representation::HeapObject(), current_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), new_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(),
+ expected_type});
+
TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
{PropertyConstness::kMutable, Representation::HeapObject(), current_type},
{PropertyConstness::kMutable, Representation::HeapObject(), new_type},
@@ -1325,27 +1304,25 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeHeapObjFieldToHeapObj) {
// Check generalizations that do not trigger deopts.
new_type = FieldType::Class(Map::Create(isolate, 0), isolate);
- if (FLAG_track_constant_fields) {
- TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
- {PropertyConstness::kConst, Representation::HeapObject(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), new_type},
- {PropertyConstness::kConst, Representation::HeapObject(), any_type},
- false);
-
- // PropertyConstness::kConst to PropertyConstness::kMutable migration does
- // not create a new map, therefore trivial generalization.
- TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
- {PropertyConstness::kConst, Representation::HeapObject(), any_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), new_type},
- {PropertyConstness::kMutable, Representation::HeapObject(),
- any_type});
-
- TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
- {PropertyConstness::kMutable, Representation::HeapObject(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), new_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), any_type},
- false);
- }
+ TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), new_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), any_type},
+ false);
+
+ // PropertyConstness::kConst to PropertyConstness::kMutable migration does
+ // not create a new map, therefore trivial generalization.
+ TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), any_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), new_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), any_type});
+
+ TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
+ {PropertyConstness::kMutable, Representation::HeapObject(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), new_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), any_type},
+ false);
+
TestReconfigureDataFieldAttribute_GeneralizeFieldTrivial(
{PropertyConstness::kMutable, Representation::HeapObject(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), new_type},
@@ -1431,7 +1408,7 @@ struct CheckCopyGeneralizeAllFields {
CHECK(!map->is_deprecated());
CHECK_NE(*map, *new_map);
- CHECK(new_map->GetBackPointer()->IsUndefined(isolate));
+ CHECK(new_map->GetBackPointer().IsUndefined(isolate));
for (int i = 0; i < kPropCount; i++) {
expectations.GeneralizeField(i);
}
@@ -1599,24 +1576,14 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToDataFieldAfterTargetMap) {
}
void UpdateExpectations(int property_index, Expectations& expectations) {
- PropertyConstness expected_constness = FLAG_track_constant_fields
- ? PropertyConstness::kConst
- : PropertyConstness::kMutable;
- expectations.SetDataField(property_index, expected_constness,
+ expectations.SetDataField(property_index, PropertyConstness::kConst,
Representation::HeapObject(), function_type_);
}
};
TestConfig config;
- if (FLAG_track_constant_fields) {
- CheckSameMap checker;
- TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
-
- } else {
- // Two branches are "incompatible" so the |map1| should be deprecated.
- CheckDeprecated checker;
- TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
- }
+ CheckSameMap checker;
+ TestReconfigureProperty_CustomPropertyAfterTargetMap(config, checker);
}
@@ -1739,7 +1706,7 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
} else {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
- return expectations.AddDataField(map, NONE, kDefaultFieldConstness,
+ return expectations.AddDataField(map, NONE, PropertyConstness::kConst,
Representation::Smi(), any_type);
}
}
@@ -1760,104 +1727,6 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
namespace {
-// This test ensures that field generalization is correctly propagated from one
-// branch of transition tree (|map2) to another (|map|).
-//
-// + - p0 - p1 - p2A - p3 - p4: |map|
-// |
-// ek
-// |
-// {} - p0 - p1 - p2B - p3 - p4: |map2|
-//
-// where "p2A" and "p2B" differ only in the representation/field type.
-//
-static void TestReconfigureElementsKind_GeneralizeField(
- const CRFTData& from, const CRFTData& to, const CRFTData& expected) {
- Isolate* isolate = CcTest::i_isolate();
-
- Expectations expectations(isolate, PACKED_SMI_ELEMENTS);
-
- // Create a map, add required properties to it and initialize expectations.
- Handle<Map> initial_map = Map::Create(isolate, 0);
- initial_map->set_instance_type(JS_ARRAY_TYPE);
- initial_map->set_elements_kind(PACKED_SMI_ELEMENTS);
-
- Handle<Map> map = initial_map;
- map = expectations.AsElementsKind(map, PACKED_ELEMENTS);
- for (int i = 0; i < kPropCount; i++) {
- map = expectations.AddDataField(map, NONE, from.constness,
- from.representation, from.type);
- }
- CHECK(!map->is_deprecated());
- CHECK(map->is_stable());
- CHECK(expectations.Check(*map));
-
- // Create another branch in transition tree (property at index |kDiffProp|
- // has different representatio/field type), initialize expectations.
- const int kDiffProp = kPropCount / 2;
- Expectations expectations2(isolate, PACKED_SMI_ELEMENTS);
-
- Handle<Map> map2 = initial_map;
- for (int i = 0; i < kPropCount; i++) {
- if (i == kDiffProp) {
- map2 = expectations2.AddDataField(map2, NONE, to.constness,
- to.representation, to.type);
- } else {
- map2 = expectations2.AddDataField(map2, NONE, from.constness,
- from.representation, from.type);
- }
- }
- CHECK(!map2->is_deprecated());
- CHECK(map2->is_stable());
- CHECK(expectations2.Check(*map2));
-
- // Create dummy optimized code object to test correct dependencies
- // on the field owner.
- Handle<Code> code = CreateDummyOptimizedCode(isolate);
- Handle<Map> field_owner(map->FindFieldOwner(isolate, kDiffProp), isolate);
- DependentCode::InstallDependency(isolate, MaybeObjectHandle::Weak(code),
- field_owner,
- DependentCode::kFieldOwnerGroup);
- CHECK(!code->marked_for_deoptimization());
-
- // Reconfigure elements kinds of |map2|, which should generalize
- // representations in |map|.
- Handle<Map> new_map =
- Map::ReconfigureElementsKind(isolate, map2, PACKED_ELEMENTS);
-
- // |map2| should be left unchanged but marked unstable.
- CHECK(!map2->is_stable());
- CHECK(!map2->is_deprecated());
- CHECK_NE(*map2, *new_map);
- CHECK(expectations2.Check(*map2));
-
- // |map| should be deprecated and |new_map| should match new expectations.
- expectations.SetDataField(kDiffProp, expected.constness,
- expected.representation, expected.type);
-
- CHECK(map->is_deprecated());
- CHECK(!code->marked_for_deoptimization());
- CHECK_NE(*map, *new_map);
-
- CHECK(!new_map->is_deprecated());
- CHECK(expectations.Check(*new_map));
-
- // Update deprecated |map|, it should become |new_map|.
- Handle<Map> updated_map = Map::Update(isolate, map);
- CHECK_EQ(*new_map, *updated_map);
- CheckMigrationTarget(isolate, *map, *updated_map);
-
- // Ensure Map::FindElementsKindTransitionedMap() is able to find the
- // transitioned map.
- {
- MapHandles map_list;
- map_list.push_back(updated_map);
- Map transitioned_map =
- map2->FindElementsKindTransitionedMap(isolate, map_list);
- CHECK_EQ(*updated_map, transitioned_map);
- }
-}
-
// This test ensures that trivial field generalization (from HeapObject to
// HeapObject) is correctly propagated from one branch of transition tree
// (|map2|) to another (|map|).
@@ -1967,23 +1836,22 @@ TEST(ReconfigureElementsKind_GeneralizeSmiFieldToDouble) {
Handle<FieldType> any_type = FieldType::Any(isolate);
- if (FLAG_track_constant_fields) {
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kConst, Representation::Double(), any_type},
- {PropertyConstness::kConst, Representation::Double(), any_type});
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kConst, Representation::Double(), any_type},
+ {PropertyConstness::kConst, Representation::Double(), any_type});
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kMutable, Representation::Double(), any_type},
- {PropertyConstness::kMutable, Representation::Double(), any_type});
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kMutable, Representation::Double(), any_type},
+ {PropertyConstness::kMutable, Representation::Double(), any_type});
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kMutable, Representation::Smi(), any_type},
- {PropertyConstness::kConst, Representation::Double(), any_type},
- {PropertyConstness::kMutable, Representation::Double(), any_type});
- }
- TestReconfigureElementsKind_GeneralizeField(
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kMutable, Representation::Smi(), any_type},
+ {PropertyConstness::kConst, Representation::Double(), any_type},
+ {PropertyConstness::kMutable, Representation::Double(), any_type});
+
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
{PropertyConstness::kMutable, Representation::Smi(), any_type},
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::Double(), any_type});
@@ -1998,23 +1866,22 @@ TEST(ReconfigureElementsKind_GeneralizeSmiFieldToTagged) {
Handle<FieldType> value_type =
FieldType::Class(Map::Create(isolate, 0), isolate);
- if (FLAG_track_constant_fields) {
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kConst, Representation::Tagged(), any_type});
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kConst, Representation::Tagged(), any_type});
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kMutable, Representation::Smi(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- }
- TestReconfigureElementsKind_GeneralizeField(
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kMutable, Representation::Smi(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
{PropertyConstness::kMutable, Representation::Smi(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type});
@@ -2029,23 +1896,22 @@ TEST(ReconfigureElementsKind_GeneralizeDoubleFieldToTagged) {
Handle<FieldType> value_type =
FieldType::Class(Map::Create(isolate, 0), isolate);
- if (FLAG_track_constant_fields) {
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kConst, Representation::Double(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kConst, Representation::Tagged(), any_type});
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::Double(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kConst, Representation::Tagged(), any_type});
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kConst, Representation::Double(), any_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::Double(), any_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kMutable, Representation::Double(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- }
- TestReconfigureElementsKind_GeneralizeField(
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kMutable, Representation::Double(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
{PropertyConstness::kMutable, Representation::Double(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type});
@@ -2067,28 +1933,25 @@ TEST(ReconfigureElementsKind_GeneralizeHeapObjFieldToHeapObj) {
Handle<FieldType> expected_type = any_type;
// Check generalizations that trigger deopts.
- if (FLAG_track_constant_fields) {
- TestReconfigureElementsKind_GeneralizeFieldTrivial(
- {PropertyConstness::kConst, Representation::HeapObject(), current_type},
- {PropertyConstness::kConst, Representation::HeapObject(), new_type},
- {PropertyConstness::kConst, Representation::HeapObject(),
- expected_type});
- // PropertyConstness::kConst to PropertyConstness::kMutable migration does
- // not create a new map, therefore trivial generalization.
- TestReconfigureElementsKind_GeneralizeFieldTrivial(
- {PropertyConstness::kConst, Representation::HeapObject(),
- current_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), new_type},
- {PropertyConstness::kMutable, Representation::HeapObject(),
- expected_type});
-
- TestReconfigureElementsKind_GeneralizeFieldTrivial(
- {PropertyConstness::kMutable, Representation::HeapObject(),
- current_type},
- {PropertyConstness::kConst, Representation::HeapObject(), new_type},
- {PropertyConstness::kMutable, Representation::HeapObject(),
- expected_type});
- }
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), current_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), new_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), expected_type});
+
+ // PropertyConstness::kConst to PropertyConstness::kMutable migration does
+ // not create a new map, therefore trivial generalization.
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), current_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), new_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(),
+ expected_type});
+
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kMutable, Representation::HeapObject(), current_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), new_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(),
+ expected_type});
+
TestReconfigureElementsKind_GeneralizeFieldTrivial(
{PropertyConstness::kMutable, Representation::HeapObject(), current_type},
{PropertyConstness::kMutable, Representation::HeapObject(), new_type},
@@ -2099,25 +1962,23 @@ TEST(ReconfigureElementsKind_GeneralizeHeapObjFieldToHeapObj) {
// Check generalizations that do not trigger deopts.
new_type = FieldType::Class(Map::Create(isolate, 0), isolate);
- if (FLAG_track_constant_fields) {
- TestReconfigureElementsKind_GeneralizeFieldTrivial(
- {PropertyConstness::kConst, Representation::HeapObject(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), new_type},
- {PropertyConstness::kConst, Representation::HeapObject(), any_type});
-
- // PropertyConstness::kConst to PropertyConstness::kMutable migration does
- // not create a new map, therefore trivial generalization.
- TestReconfigureElementsKind_GeneralizeFieldTrivial(
- {PropertyConstness::kConst, Representation::HeapObject(), any_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), new_type},
- {PropertyConstness::kMutable, Representation::HeapObject(),
- any_type});
-
- TestReconfigureElementsKind_GeneralizeFieldTrivial(
- {PropertyConstness::kMutable, Representation::HeapObject(), any_type},
- {PropertyConstness::kConst, Representation::HeapObject(), new_type},
- {PropertyConstness::kMutable, Representation::HeapObject(), any_type});
- }
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), new_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), any_type});
+
+ // PropertyConstness::kConst to PropertyConstness::kMutable migration does
+ // not create a new map, therefore trivial generalization.
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), any_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), new_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), any_type});
+
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kMutable, Representation::HeapObject(), any_type},
+ {PropertyConstness::kConst, Representation::HeapObject(), new_type},
+ {PropertyConstness::kMutable, Representation::HeapObject(), any_type});
+
TestReconfigureElementsKind_GeneralizeFieldTrivial(
{PropertyConstness::kMutable, Representation::HeapObject(), any_type},
{PropertyConstness::kMutable, Representation::HeapObject(), new_type},
@@ -2133,23 +1994,22 @@ TEST(ReconfigureElementsKind_GeneralizeHeapObjectFieldToTagged) {
Handle<FieldType> value_type =
FieldType::Class(Map::Create(isolate, 0), isolate);
- if (FLAG_track_constant_fields) {
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kConst, Representation::Tagged(), any_type});
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kConst, Representation::Tagged(), any_type});
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kConst, Representation::HeapObject(), value_type},
- {PropertyConstness::kMutable, Representation::Smi(), any_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kConst, Representation::HeapObject(), value_type},
+ {PropertyConstness::kMutable, Representation::Smi(), any_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- TestReconfigureElementsKind_GeneralizeField(
- {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
- {PropertyConstness::kConst, Representation::Smi(), any_type},
- {PropertyConstness::kMutable, Representation::Tagged(), any_type});
- }
- TestReconfigureElementsKind_GeneralizeField(
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
+ {PropertyConstness::kMutable, Representation::HeapObject(), value_type},
+ {PropertyConstness::kConst, Representation::Smi(), any_type},
+ {PropertyConstness::kMutable, Representation::Tagged(), any_type});
+
+ TestReconfigureElementsKind_GeneralizeFieldTrivial(
{PropertyConstness::kMutable, Representation::HeapObject(), value_type},
{PropertyConstness::kMutable, Representation::Smi(), any_type},
{PropertyConstness::kMutable, Representation::Tagged(), any_type});
@@ -2225,7 +2085,7 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
// Try to update |map|, since there is no place for propX transition at |map2|
// |map| should become "copy-generalized".
Handle<Map> updated_map = Map::Update(isolate, map);
- CHECK(updated_map->GetBackPointer()->IsUndefined(isolate));
+ CHECK(updated_map->GetBackPointer().IsUndefined(isolate));
for (int i = 0; i < kPropCount; i++) {
expectations.SetDataField(i, PropertyConstness::kMutable,
@@ -2330,13 +2190,13 @@ static void TestGeneralizeFieldWithSpecialTransition(
for (int i = 0; i < kPropCount; i++) {
expectations2.GeneralizeField(i);
}
- CHECK(new_map2->GetBackPointer()->IsUndefined(isolate));
+ CHECK(new_map2->GetBackPointer().IsUndefined(isolate));
CHECK(expectations2.Check(*new_map2));
} else {
expectations2.SetDataField(i, expected.constness,
expected.representation, expected.type);
- CHECK(!new_map2->GetBackPointer()->IsUndefined(isolate));
+ CHECK(!new_map2->GetBackPointer().IsUndefined(isolate));
CHECK(expectations2.Check(*new_map2));
}
} else {
@@ -2371,11 +2231,12 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
- TestConfig(PropertyAttributes attributes, Handle<Symbol> symbol)
- : attributes(attributes), symbol(symbol) {}
+ TestConfig(PropertyAttributes attributes, Handle<Symbol> symbol,
+ ElementsKind kind)
+ : attributes(attributes), symbol(symbol), elements_kind(kind) {}
Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
- expectations.SetElementsKind(DICTIONARY_ELEMENTS);
+ expectations.SetElementsKind(elements_kind);
expectations.ChangeAttributesForAllProperties(attributes);
return Map::CopyForPreventExtensions(CcTest::i_isolate(), map, attributes,
symbol, "CopyForPreventExtensions");
@@ -2386,11 +2247,17 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
PropertyAttributes attributes;
Handle<Symbol> symbol;
+ ElementsKind elements_kind;
};
Factory* factory = isolate->factory();
- TestConfig configs[] = {{FROZEN, factory->frozen_symbol()},
- {SEALED, factory->sealed_symbol()},
- {NONE, factory->nonextensible_symbol()}};
+ TestConfig configs[] = {
+ {FROZEN, factory->frozen_symbol(),
+ FLAG_enable_sealed_frozen_elements_kind ? HOLEY_FROZEN_ELEMENTS
+ : DICTIONARY_ELEMENTS},
+ {SEALED, factory->sealed_symbol(),
+ FLAG_enable_sealed_frozen_elements_kind ? HOLEY_SEALED_ELEMENTS
+ : DICTIONARY_ELEMENTS},
+ {NONE, factory->nonextensible_symbol(), DICTIONARY_ELEMENTS}};
for (size_t i = 0; i < arraysize(configs); i++) {
TestGeneralizeFieldWithSpecialTransition(
configs[i],
@@ -2418,8 +2285,9 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
- TestConfig(PropertyAttributes attributes, Handle<Symbol> symbol)
- : attributes(attributes), symbol(symbol) {}
+ TestConfig(PropertyAttributes attributes, Handle<Symbol> symbol,
+ ElementsKind kind)
+ : attributes(attributes), symbol(symbol), elements_kind(kind) {}
Handle<Map> Transition(Handle<Map> map, Expectations& expectations) {
Isolate* isolate = CcTest::i_isolate();
@@ -2434,7 +2302,7 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
.ToHandleChecked();
CHECK(!map->owns_descriptors());
- expectations.SetElementsKind(DICTIONARY_ELEMENTS);
+ expectations.SetElementsKind(elements_kind);
expectations.ChangeAttributesForAllProperties(attributes);
return Map::CopyForPreventExtensions(isolate, map, attributes, symbol,
"CopyForPreventExtensions");
@@ -2445,11 +2313,17 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
PropertyAttributes attributes;
Handle<Symbol> symbol;
+ ElementsKind elements_kind;
};
Factory* factory = isolate->factory();
- TestConfig configs[] = {{FROZEN, factory->frozen_symbol()},
- {SEALED, factory->sealed_symbol()},
- {NONE, factory->nonextensible_symbol()}};
+ TestConfig configs[] = {
+ {FROZEN, factory->frozen_symbol(),
+ FLAG_enable_sealed_frozen_elements_kind ? HOLEY_FROZEN_ELEMENTS
+ : DICTIONARY_ELEMENTS},
+ {SEALED, factory->sealed_symbol(),
+ FLAG_enable_sealed_frozen_elements_kind ? HOLEY_SEALED_ELEMENTS
+ : DICTIONARY_ELEMENTS},
+ {NONE, factory->nonextensible_symbol(), DICTIONARY_ELEMENTS}};
for (size_t i = 0; i < arraysize(configs); i++) {
TestGeneralizeFieldWithSpecialTransition(
configs[i],
@@ -2806,7 +2680,6 @@ TEST(TransitionDataConstantToAnotherDataConstant) {
Map::CopyInitialMap(isolate, isolate->sloppy_function_map());
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfoForBuiltin(name, Builtins::kIllegal);
- Handle<FieldType> function_type = FieldType::Class(sloppy_map, isolate);
CHECK(sloppy_map->is_stable());
Handle<JSFunction> js_func1 =
@@ -2817,16 +2690,8 @@ TEST(TransitionDataConstantToAnotherDataConstant) {
factory->NewFunction(sloppy_map, info, isolate->native_context());
TransitionToDataConstantOperator transition_op2(js_func2);
- if (FLAG_track_constant_fields) {
- SameMapChecker checker;
- TestTransitionTo(transition_op1, transition_op2, checker);
-
- } else {
- FieldGeneralizationChecker checker(
- kPropCount - 1, PropertyConstness::kMutable,
- Representation::HeapObject(), function_type);
- TestTransitionTo(transition_op1, transition_op2, checker);
- }
+ SameMapChecker checker;
+ TestTransitionTo(transition_op1, transition_op2, checker);
}
@@ -2846,7 +2711,7 @@ TEST(TransitionDataConstantToDataField) {
TransitionToDataFieldOperator transition_op2(
PropertyConstness::kMutable, Representation::Tagged(), any_type, value2);
- if (FLAG_track_constant_fields && FLAG_modify_field_representation_inplace) {
+ if (FLAG_modify_field_representation_inplace) {
SameMapChecker checker;
TestTransitionTo(transition_op1, transition_op2, checker);
} else {
@@ -2893,11 +2758,11 @@ TEST(HoleyMutableHeapNumber) {
Object::NewStorageFor(isolate, isolate->factory()->uninitialized_value(),
Representation::Double());
CHECK(obj->IsMutableHeapNumber());
- CHECK_EQ(kHoleNanInt64, MutableHeapNumber::cast(*obj)->value_as_bits());
+ CHECK_EQ(kHoleNanInt64, MutableHeapNumber::cast(*obj).value_as_bits());
obj = Object::NewStorageFor(isolate, mhn, Representation::Double());
CHECK(obj->IsMutableHeapNumber());
- CHECK_EQ(kHoleNanInt64, MutableHeapNumber::cast(*obj)->value_as_bits());
+ CHECK_EQ(kHoleNanInt64, MutableHeapNumber::cast(*obj).value_as_bits());
}
namespace {
@@ -2921,10 +2786,6 @@ void TestStoreToConstantField(const char* store_func_source,
Handle<JSFunction> store_func = GetGlobal<JSFunction>("store");
- const PropertyConstness kExpectedInitialFieldConstness =
- FLAG_track_constant_fields ? PropertyConstness::kConst
- : PropertyConstness::kMutable;
-
Handle<Map> initial_map = Map::Create(isolate, 4);
// Store value1 to obj1 and check that it got property with expected
@@ -2939,10 +2800,10 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors()->GetDetails(0).representation().Equals(
+ CHECK(map->instance_descriptors().GetDetails(0).representation().Equals(
expected_rep));
- CHECK_EQ(kExpectedInitialFieldConstness,
- map->instance_descriptors()->GetDetails(0).constness());
+ CHECK_EQ(PropertyConstness::kConst,
+ map->instance_descriptors().GetDetails(0).constness());
// Store value2 to obj2 and check that it got same map and property details
// did not change.
@@ -2954,10 +2815,10 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors()->GetDetails(0).representation().Equals(
+ CHECK(map->instance_descriptors().GetDetails(0).representation().Equals(
expected_rep));
- CHECK_EQ(kExpectedInitialFieldConstness,
- map->instance_descriptors()->GetDetails(0).constness());
+ CHECK_EQ(PropertyConstness::kConst,
+ map->instance_descriptors().GetDetails(0).constness());
// Store value2 to obj1 and check that property became mutable.
Call(isolate, store_func, obj1, value2).Check();
@@ -2967,10 +2828,10 @@ void TestStoreToConstantField(const char* store_func_source,
CHECK(!map->is_deprecated());
CHECK_EQ(1, map->NumberOfOwnDescriptors());
- CHECK(map->instance_descriptors()->GetDetails(0).representation().Equals(
+ CHECK(map->instance_descriptors().GetDetails(0).representation().Equals(
expected_rep));
CHECK_EQ(expected_constness,
- map->instance_descriptors()->GetDetails(0).constness());
+ map->instance_descriptors().GetDetails(0).constness());
}
void TestStoreToConstantField_PlusMinusZero(const char* store_func_source,
@@ -3006,12 +2867,8 @@ void TestStoreToConstantField_NaN(const char* store_func_source,
Handle<Object> nan2 = isolate->factory()->NewNumber(nan_double2);
// NaNs with different bit patters are treated as equal upon stores.
- const PropertyConstness kExpectedFieldConstness =
- FLAG_track_constant_fields ? PropertyConstness::kConst
- : PropertyConstness::kMutable;
-
TestStoreToConstantField(store_func_source, nan1, nan2,
- Representation::Double(), kExpectedFieldConstness,
+ Representation::Double(), PropertyConstness::kConst,
store_repetitions);
}
diff --git a/deps/v8/test/cctest/test-fixed-dtoa.cc b/deps/v8/test/cctest/test-fixed-dtoa.cc
index 9f3e2f22ca..62629c4e8f 100644
--- a/deps/v8/test/cctest/test-fixed-dtoa.cc
+++ b/deps/v8/test/cctest/test-fixed-dtoa.cc
@@ -27,11 +27,11 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/platform/platform.h"
-#include "src/double.h"
-#include "src/fixed-dtoa.h"
+#include "src/numbers/double.h"
+#include "src/numbers/fixed-dtoa.h"
#include "test/cctest/cctest.h"
#include "test/cctest/gay-fixed.h"
@@ -47,445 +47,445 @@ TEST(FastFixedVariousDoubles) {
int point;
CHECK(FastFixedDtoa(1.0, 1, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.0, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.0, 0, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0xFFFFFFFF, 5, buffer, &length, &point));
- CHECK_EQ(0, strcmp("4294967295", buffer.start()));
+ CHECK_EQ(0, strcmp("4294967295", buffer.begin()));
CHECK_EQ(10, point);
CHECK(FastFixedDtoa(4294967296.0, 5, buffer, &length, &point));
- CHECK_EQ(0, strcmp("4294967296", buffer.start()));
+ CHECK_EQ(0, strcmp("4294967296", buffer.begin()));
CHECK_EQ(10, point);
CHECK(FastFixedDtoa(1e21, 5, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
// CHECK_EQ(22, point);
CHECK_EQ(22, point);
CHECK(FastFixedDtoa(999999999999999868928.00, 2, buffer, &length, &point));
- CHECK_EQ(0, strcmp("999999999999999868928", buffer.start()));
+ CHECK_EQ(0, strcmp("999999999999999868928", buffer.begin()));
CHECK_EQ(21, point);
CHECK(FastFixedDtoa(6.9999999999999989514240000e+21, 5, buffer,
&length, &point));
- CHECK_EQ(0, strcmp("6999999999999998951424", buffer.start()));
+ CHECK_EQ(0, strcmp("6999999999999998951424", buffer.begin()));
CHECK_EQ(22, point);
CHECK(FastFixedDtoa(1.5, 5, buffer, &length, &point));
- CHECK_EQ(0, strcmp("15", buffer.start()));
+ CHECK_EQ(0, strcmp("15", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.55, 5, buffer, &length, &point));
- CHECK_EQ(0, strcmp("155", buffer.start()));
+ CHECK_EQ(0, strcmp("155", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.55, 1, buffer, &length, &point));
- CHECK_EQ(0, strcmp("16", buffer.start()));
+ CHECK_EQ(0, strcmp("16", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1.00000001, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("100000001", buffer.start()));
+ CHECK_EQ(0, strcmp("100000001", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.1, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(0.01, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.001, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.0001, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00001, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.000001, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(0.0000001, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-6, point);
CHECK(FastFixedDtoa(0.00000001, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-7, point);
CHECK(FastFixedDtoa(0.000000001, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(0.0000000001, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-9, point);
CHECK(FastFixedDtoa(0.00000000001, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(0.000000000001, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-11, point);
CHECK(FastFixedDtoa(0.0000000000001, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-12, point);
CHECK(FastFixedDtoa(0.00000000000001, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-13, point);
CHECK(FastFixedDtoa(0.000000000000001, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-14, point);
CHECK(FastFixedDtoa(0.0000000000000001, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-15, point);
CHECK(FastFixedDtoa(0.00000000000000001, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-16, point);
CHECK(FastFixedDtoa(0.000000000000000001, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-17, point);
CHECK(FastFixedDtoa(0.0000000000000000001, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-18, point);
CHECK(FastFixedDtoa(0.00000000000000000001, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(0.10000000004, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(0.01000000004, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.00100000004, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.00010000004, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00001000004, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.00000100004, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(0.00000010004, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-6, point);
CHECK(FastFixedDtoa(0.00000001004, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-7, point);
CHECK(FastFixedDtoa(0.00000000104, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(0.0000000001000004, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-9, point);
CHECK(FastFixedDtoa(0.0000000000100004, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(0.0000000000010004, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-11, point);
CHECK(FastFixedDtoa(0.0000000000001004, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-12, point);
CHECK(FastFixedDtoa(0.0000000000000104, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-13, point);
CHECK(FastFixedDtoa(0.000000000000001000004, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-14, point);
CHECK(FastFixedDtoa(0.000000000000000100004, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-15, point);
CHECK(FastFixedDtoa(0.000000000000000010004, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-16, point);
CHECK(FastFixedDtoa(0.000000000000000001004, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-17, point);
CHECK(FastFixedDtoa(0.000000000000000000104, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-18, point);
CHECK(FastFixedDtoa(0.000000000000000000014, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(0.10000000006, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1000000001", buffer.start()));
+ CHECK_EQ(0, strcmp("1000000001", buffer.begin()));
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(0.01000000006, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("100000001", buffer.start()));
+ CHECK_EQ(0, strcmp("100000001", buffer.begin()));
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.00100000006, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("10000001", buffer.start()));
+ CHECK_EQ(0, strcmp("10000001", buffer.begin()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.00010000006, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1000001", buffer.start()));
+ CHECK_EQ(0, strcmp("1000001", buffer.begin()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00001000006, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("100001", buffer.start()));
+ CHECK_EQ(0, strcmp("100001", buffer.begin()));
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.00000100006, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("10001", buffer.start()));
+ CHECK_EQ(0, strcmp("10001", buffer.begin()));
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(0.00000010006, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1001", buffer.start()));
+ CHECK_EQ(0, strcmp("1001", buffer.begin()));
CHECK_EQ(-6, point);
CHECK(FastFixedDtoa(0.00000001006, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("101", buffer.start()));
+ CHECK_EQ(0, strcmp("101", buffer.begin()));
CHECK_EQ(-7, point);
CHECK(FastFixedDtoa(0.00000000106, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("11", buffer.start()));
+ CHECK_EQ(0, strcmp("11", buffer.begin()));
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(0.0000000001000006, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("100001", buffer.start()));
+ CHECK_EQ(0, strcmp("100001", buffer.begin()));
CHECK_EQ(-9, point);
CHECK(FastFixedDtoa(0.0000000000100006, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("10001", buffer.start()));
+ CHECK_EQ(0, strcmp("10001", buffer.begin()));
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(0.0000000000010006, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1001", buffer.start()));
+ CHECK_EQ(0, strcmp("1001", buffer.begin()));
CHECK_EQ(-11, point);
CHECK(FastFixedDtoa(0.0000000000001006, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("101", buffer.start()));
+ CHECK_EQ(0, strcmp("101", buffer.begin()));
CHECK_EQ(-12, point);
CHECK(FastFixedDtoa(0.0000000000000106, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("11", buffer.start()));
+ CHECK_EQ(0, strcmp("11", buffer.begin()));
CHECK_EQ(-13, point);
CHECK(FastFixedDtoa(0.000000000000001000006, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("100001", buffer.start()));
+ CHECK_EQ(0, strcmp("100001", buffer.begin()));
CHECK_EQ(-14, point);
CHECK(FastFixedDtoa(0.000000000000000100006, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("10001", buffer.start()));
+ CHECK_EQ(0, strcmp("10001", buffer.begin()));
CHECK_EQ(-15, point);
CHECK(FastFixedDtoa(0.000000000000000010006, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1001", buffer.start()));
+ CHECK_EQ(0, strcmp("1001", buffer.begin()));
CHECK_EQ(-16, point);
CHECK(FastFixedDtoa(0.000000000000000001006, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("101", buffer.start()));
+ CHECK_EQ(0, strcmp("101", buffer.begin()));
CHECK_EQ(-17, point);
CHECK(FastFixedDtoa(0.000000000000000000106, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("11", buffer.start()));
+ CHECK_EQ(0, strcmp("11", buffer.begin()));
CHECK_EQ(-18, point);
CHECK(FastFixedDtoa(0.000000000000000000016, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("2", buffer.start()));
+ CHECK_EQ(0, strcmp("2", buffer.begin()));
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(0.6, 0, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.96, 1, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.996, 2, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9996, 3, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99996, 4, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999996, 5, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999996, 6, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99999996, 7, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999999996, 8, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999999996, 9, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99999999996, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999999999996, 11, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999999999996, 12, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.99999999999996, 13, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.999999999999996, 14, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.9999999999999996, 15, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(0.00999999999999996, 16, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-1, point);
CHECK(FastFixedDtoa(0.000999999999999996, 17, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(0.0000999999999999996, 18, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(0.00000999999999999996, 19, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-4, point);
CHECK(FastFixedDtoa(0.000000999999999999996, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-5, point);
CHECK(FastFixedDtoa(323423.234234, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("323423234234", buffer.start()));
+ CHECK_EQ(0, strcmp("323423234234", buffer.begin()));
CHECK_EQ(6, point);
CHECK(FastFixedDtoa(12345678.901234, 4, buffer, &length, &point));
- CHECK_EQ(0, strcmp("123456789012", buffer.start()));
+ CHECK_EQ(0, strcmp("123456789012", buffer.begin()));
CHECK_EQ(8, point);
CHECK(FastFixedDtoa(98765.432109, 5, buffer, &length, &point));
- CHECK_EQ(0, strcmp("9876543211", buffer.start()));
+ CHECK_EQ(0, strcmp("9876543211", buffer.begin()));
CHECK_EQ(5, point);
CHECK(FastFixedDtoa(42, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("42", buffer.start()));
+ CHECK_EQ(0, strcmp("42", buffer.begin()));
CHECK_EQ(2, point);
CHECK(FastFixedDtoa(0.5, 0, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(1, point);
CHECK(FastFixedDtoa(1e-23, 10, buffer, &length, &point));
- CHECK_EQ(0, strcmp("", buffer.start()));
+ CHECK_EQ(0, strcmp("", buffer.begin()));
CHECK_EQ(-10, point);
CHECK(FastFixedDtoa(1e-123, 2, buffer, &length, &point));
- CHECK_EQ(0, strcmp("", buffer.start()));
+ CHECK_EQ(0, strcmp("", buffer.begin()));
CHECK_EQ(-2, point);
CHECK(FastFixedDtoa(1e-123, 0, buffer, &length, &point));
- CHECK_EQ(0, strcmp("", buffer.start()));
+ CHECK_EQ(0, strcmp("", buffer.begin()));
CHECK_EQ(0, point);
CHECK(FastFixedDtoa(1e-23, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("", buffer.start()));
+ CHECK_EQ(0, strcmp("", buffer.begin()));
CHECK_EQ(-20, point);
CHECK(FastFixedDtoa(1e-21, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("", buffer.start()));
+ CHECK_EQ(0, strcmp("", buffer.begin()));
CHECK_EQ(-20, point);
CHECK(FastFixedDtoa(1e-22, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("", buffer.start()));
+ CHECK_EQ(0, strcmp("", buffer.begin()));
CHECK_EQ(-20, point);
CHECK(FastFixedDtoa(6e-21, 20, buffer, &length, &point));
- CHECK_EQ(0, strcmp("1", buffer.start()));
+ CHECK_EQ(0, strcmp("1", buffer.begin()));
CHECK_EQ(-19, point);
CHECK(FastFixedDtoa(9.1193616301674545152000000e+19, 0,
buffer, &length, &point));
- CHECK_EQ(0, strcmp("91193616301674545152", buffer.start()));
+ CHECK_EQ(0, strcmp("91193616301674545152", buffer.begin()));
CHECK_EQ(20, point);
CHECK(FastFixedDtoa(4.8184662102767651659096515e-04, 19,
buffer, &length, &point));
- CHECK_EQ(0, strcmp("4818466210276765", buffer.start()));
+ CHECK_EQ(0, strcmp("4818466210276765", buffer.begin()));
CHECK_EQ(-3, point);
CHECK(FastFixedDtoa(1.9023164229540652612705182e-23, 8,
buffer, &length, &point));
- CHECK_EQ(0, strcmp("", buffer.start()));
+ CHECK_EQ(0, strcmp("", buffer.begin()));
CHECK_EQ(-8, point);
CHECK(FastFixedDtoa(1000000000000000128.0, 0,
buffer, &length, &point));
- CHECK_EQ(0, strcmp("1000000000000000128", buffer.start()));
+ CHECK_EQ(0, strcmp("1000000000000000128", buffer.begin()));
CHECK_EQ(19, point);
}
@@ -508,7 +508,7 @@ TEST(FastFixedDtoaGayFixed) {
CHECK(status);
CHECK_EQ(current_test.decimal_point, point);
CHECK_GE(number_digits, length - point);
- CHECK_EQ(0, strcmp(current_test.representation, buffer.start()));
+ CHECK_EQ(0, strcmp(current_test.representation, buffer.begin()));
}
}
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index e38a61f4b3..93c7048f81 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -27,8 +27,8 @@
#include <stdlib.h>
-#include "src/flags.h"
-#include "src/v8.h"
+#include "src/flags/flags.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -80,7 +80,7 @@ TEST(Flags2b) {
"-notesting-maybe-bool-flag "
"-testing_float_flag=.25 "
"--testing_string_flag no_way! ";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
+ CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str)));
CHECK(!FLAG_testing_bool_flag);
CHECK(FLAG_testing_maybe_bool_flag.has_value);
CHECK(!FLAG_testing_maybe_bool_flag.value);
@@ -117,7 +117,7 @@ TEST(Flags3b) {
"--testing_int_flag -666 "
"--testing_float_flag -12E10 "
"-testing-string-flag=foo-bar";
- CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str)));
+ CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str)));
CHECK(FLAG_testing_bool_flag);
CHECK(FLAG_testing_maybe_bool_flag.has_value);
CHECK(FLAG_testing_maybe_bool_flag.value);
@@ -142,7 +142,7 @@ TEST(Flags4) {
TEST(Flags4b) {
SetFlagsToDefault();
const char* str = "--testing_bool_flag --foo";
- CHECK_EQ(2, FlagList::SetFlagsFromString(str, StrLength(str)));
+ CHECK_EQ(2, FlagList::SetFlagsFromString(str, strlen(str)));
CHECK(!FLAG_testing_maybe_bool_flag.has_value);
}
@@ -161,7 +161,7 @@ TEST(Flags5) {
TEST(Flags5b) {
SetFlagsToDefault();
const char* str = " --testing_int_flag=\"foobar\"";
- CHECK_EQ(1, FlagList::SetFlagsFromString(str, StrLength(str)));
+ CHECK_EQ(1, FlagList::SetFlagsFromString(str, strlen(str)));
}
@@ -180,7 +180,7 @@ TEST(Flags6) {
TEST(Flags6b) {
SetFlagsToDefault();
const char* str = " --testing-int-flag 0 --testing_float_flag ";
- CHECK_EQ(3, FlagList::SetFlagsFromString(str, StrLength(str)));
+ CHECK_EQ(3, FlagList::SetFlagsFromString(str, strlen(str)));
}
TEST(FlagsRemoveIncomplete) {
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 538be20e71..73f302f691 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -27,12 +27,12 @@
#include <memory>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/debug/debug.h"
-#include "src/objects-inl.h"
-#include "src/string-search.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/string-search.h"
#include "test/cctest/cctest.h"
@@ -59,11 +59,11 @@ static void CheckFunctionName(v8::Local<v8::Script> script,
Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(*obj), isolate);
} else {
shared_function =
- Handle<SharedFunctionInfo>(JSFunction::cast(*obj)->shared(), isolate);
+ Handle<SharedFunctionInfo>(JSFunction::cast(*obj).shared(), isolate);
}
Handle<i::Script> i_script(i::Script::cast(shared_function->script()),
isolate);
- CHECK(i_script->source()->IsString());
+ CHECK(i_script->source().IsString());
Handle<i::String> script_src(i::String::cast(i_script->source()), isolate);
// Find the position of a given func source substring in the source.
@@ -84,7 +84,7 @@ static void CheckFunctionName(v8::Local<v8::Script> script,
// Verify inferred function name.
std::unique_ptr<char[]> inferred_name =
- shared_func_info->inferred_name()->ToCString();
+ shared_func_info->inferred_name().ToCString();
i::PrintF("expected: %s, found: %s\n", ref_inferred_name,
inferred_name.get());
CHECK_EQ(0, strcmp(ref_inferred_name, inferred_name.get()));
diff --git a/deps/v8/test/cctest/test-fuzz-arm64.cc b/deps/v8/test/cctest/test-fuzz-arm64.cc
index 92f917a703..8650b261ab 100644
--- a/deps/v8/test/cctest/test-fuzz-arm64.cc
+++ b/deps/v8/test/cctest/test-fuzz-arm64.cc
@@ -25,9 +25,9 @@
#include <stdlib.h>
#include "test/cctest/cctest.h"
-#include "src/arm64/decoder-arm64.h"
-#include "src/arm64/decoder-arm64-inl.h"
-#include "src/arm64/disasm-arm64.h"
+#include "src/codegen/arm64/decoder-arm64-inl.h"
+#include "src/codegen/arm64/decoder-arm64.h"
+#include "src/diagnostics/arm64/disasm-arm64.h"
#if defined(V8_OS_WIN)
#define RANDGEN() rand()
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 2c620b4963..417679432b 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -25,12 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/api-inl.h"
-#include "src/global-handles.h"
+#include "src/api/api-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -39,6 +39,25 @@ namespace internal {
namespace {
+// Empty v8::EmbedderHeapTracer that never keeps objects alive on Scavenge. See
+// |IsRootForNonTracingGC|.
+class NonRootingEmbedderHeapTracer final : public v8::EmbedderHeapTracer {
+ public:
+ NonRootingEmbedderHeapTracer() = default;
+
+ void RegisterV8References(
+ const std::vector<std::pair<void*, void*>>& embedder_fields) final {}
+ bool AdvanceTracing(double deadline_in_ms) final { return true; }
+ bool IsTracingDone() final { return true; }
+ void TracePrologue() final {}
+ void TraceEpilogue() final {}
+ void EnterFinalPause(EmbedderStackState) final {}
+
+ bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) final {
+ return false;
+ }
+};
+
void InvokeScavenge() { CcTest::CollectGarbage(i::NEW_SPACE); }
void InvokeMarkSweep() { CcTest::CollectAllGarbage(); }
@@ -47,23 +66,23 @@ void SimpleCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
info.GetReturnValue().Set(v8_num(0));
}
-struct FlagAndPersistent {
+struct FlagAndGlobal {
bool flag;
v8::Global<v8::Object> handle;
};
-void ResetHandleAndSetFlag(
- const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+struct TracedGlobalWrapper {
+ v8::TracedGlobal<v8::Object> handle;
+};
+
+void ResetHandleAndSetFlag(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
data.GetParameter()->handle.Reset();
data.GetParameter()->flag = true;
}
-using ConstructFunction = void (*)(v8::Isolate* isolate,
- v8::Local<v8::Context> context,
- FlagAndPersistent* flag_and_persistent);
-
+template <typename HandleContainer>
void ConstructJSObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- FlagAndPersistent* flag_and_persistent) {
+ HandleContainer* flag_and_persistent) {
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Object> object(v8::Object::New(isolate));
CHECK(!object.IsEmpty());
@@ -79,8 +98,9 @@ void ConstructJSObject(v8::Isolate* isolate, v8::Global<v8::Object>* global) {
CHECK(!global->IsEmpty());
}
+template <typename HandleContainer>
void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
- FlagAndPersistent* flag_and_persistent) {
+ HandleContainer* flag_and_persistent) {
v8::HandleScope handle_scope(isolate);
v8::Local<v8::FunctionTemplate> fun =
v8::FunctionTemplate::New(isolate, SimpleCallback);
@@ -95,7 +115,8 @@ void ConstructJSApiObject(v8::Isolate* isolate, v8::Local<v8::Context> context,
enum class SurvivalMode { kSurvives, kDies };
-template <typename ModifierFunction, typename GCFunction>
+template <typename ConstructFunction, typename ModifierFunction,
+ typename GCFunction>
void WeakHandleTest(v8::Isolate* isolate, ConstructFunction construct_function,
ModifierFunction modifier_function, GCFunction gc_function,
SurvivalMode survives) {
@@ -103,7 +124,7 @@ void WeakHandleTest(v8::Isolate* isolate, ConstructFunction construct_function,
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- FlagAndPersistent fp;
+ FlagAndGlobal fp;
construct_function(isolate, context, &fp);
CHECK(heap::InYoungGeneration(isolate, fp.handle));
fp.handle.SetWeak(&fp, &ResetHandleAndSetFlag,
@@ -115,6 +136,28 @@ void WeakHandleTest(v8::Isolate* isolate, ConstructFunction construct_function,
CHECK_IMPLIES(survives == SurvivalMode::kDies, fp.flag);
}
+template <typename ConstructFunction, typename ModifierFunction,
+ typename GCFunction>
+void TracedGlobalTest(v8::Isolate* isolate,
+ ConstructFunction construct_function,
+ ModifierFunction modifier_function,
+ GCFunction gc_function, SurvivalMode survives) {
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ NonRootingEmbedderHeapTracer tracer;
+ heap::TemporaryEmbedderHeapTracerScope tracer_scope(isolate, &tracer);
+
+ TracedGlobalWrapper fp;
+ construct_function(isolate, context, &fp);
+ CHECK(heap::InYoungGeneration(isolate, fp.handle));
+ modifier_function(&fp);
+ gc_function();
+ CHECK_IMPLIES(survives == SurvivalMode::kSurvives, !fp.handle.IsEmpty());
+ CHECK_IMPLIES(survives == SurvivalMode::kDies, fp.handle.IsEmpty());
+}
+
void ResurrectingFinalizer(
const v8::WeakCallbackInfo<v8::Global<v8::Object>>& data) {
data.GetParameter()->ClearWeak();
@@ -277,25 +320,36 @@ TEST(PhatomHandlesWithoutCallbacks) {
CHECK_EQ(0u, isolate->NumberOfPhantomHandleResetsSinceLastCall());
}
-TEST(WeakHandleToUnmodifiedJSObjectSurvivesScavenge) {
+TEST(WeakHandleToUnmodifiedJSObjectDiesOnScavenge) {
CcTest::InitializeVM();
WeakHandleTest(
- CcTest::isolate(), &ConstructJSObject, [](FlagAndPersistent* fp) {},
- []() { InvokeScavenge(); }, SurvivalMode::kSurvives);
+ CcTest::isolate(), &ConstructJSObject<FlagAndGlobal>,
+ [](FlagAndGlobal* fp) {}, []() { InvokeScavenge(); },
+ SurvivalMode::kDies);
+}
+
+TEST(TracedGlobalToUnmodifiedJSObjectSurvivesScavenge) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ TracedGlobalTest(
+ CcTest::isolate(), &ConstructJSObject<TracedGlobalWrapper>,
+ [](TracedGlobalWrapper* fp) {}, []() { InvokeScavenge(); },
+ SurvivalMode::kSurvives);
}
TEST(WeakHandleToUnmodifiedJSObjectDiesOnMarkCompact) {
CcTest::InitializeVM();
WeakHandleTest(
- CcTest::isolate(), &ConstructJSObject, [](FlagAndPersistent* fp) {},
- []() { InvokeMarkSweep(); }, SurvivalMode::kDies);
+ CcTest::isolate(), &ConstructJSObject<FlagAndGlobal>,
+ [](FlagAndGlobal* fp) {}, []() { InvokeMarkSweep(); },
+ SurvivalMode::kDies);
}
TEST(WeakHandleToUnmodifiedJSObjectSurvivesMarkCompactWhenInHandle) {
CcTest::InitializeVM();
WeakHandleTest(
- CcTest::isolate(), &ConstructJSObject,
- [](FlagAndPersistent* fp) {
+ CcTest::isolate(), &ConstructJSObject<FlagAndGlobal>,
+ [](FlagAndGlobal* fp) {
v8::Local<v8::Object> handle =
v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
USE(handle);
@@ -306,24 +360,35 @@ TEST(WeakHandleToUnmodifiedJSObjectSurvivesMarkCompactWhenInHandle) {
TEST(WeakHandleToUnmodifiedJSApiObjectDiesOnScavenge) {
CcTest::InitializeVM();
WeakHandleTest(
- CcTest::isolate(), &ConstructJSApiObject, [](FlagAndPersistent* fp) {},
- []() { InvokeScavenge(); }, SurvivalMode::kDies);
+ CcTest::isolate(), &ConstructJSApiObject<FlagAndGlobal>,
+ [](FlagAndGlobal* fp) {}, []() { InvokeScavenge(); },
+ SurvivalMode::kDies);
+}
+
+TEST(TracedGlobalToUnmodifiedJSApiObjectDiesOnScavenge) {
+ ManualGCScope manual_gc;
+ CcTest::InitializeVM();
+ TracedGlobalTest(
+ CcTest::isolate(), &ConstructJSApiObject<TracedGlobalWrapper>,
+ [](TracedGlobalWrapper* fp) {}, []() { InvokeScavenge(); },
+ SurvivalMode::kDies);
}
-TEST(WeakHandleToJSApiObjectWithIdentityHashSurvivesScavenge) {
+TEST(TracedGlobalToJSApiObjectWithIdentityHashSurvivesScavenge) {
+ ManualGCScope manual_gc;
CcTest::InitializeVM();
Isolate* i_isolate = CcTest::i_isolate();
HandleScope scope(i_isolate);
Handle<JSWeakMap> weakmap = i_isolate->factory()->NewJSWeakMap();
- WeakHandleTest(
- CcTest::isolate(), &ConstructJSApiObject,
- [&weakmap, i_isolate](FlagAndPersistent* fp) {
+ TracedGlobalTest(
+ CcTest::isolate(), &ConstructJSApiObject<TracedGlobalWrapper>,
+ [&weakmap, i_isolate](TracedGlobalWrapper* fp) {
v8::HandleScope scope(CcTest::isolate());
Handle<JSReceiver> key =
Utils::OpenHandle(*fp->handle.Get(CcTest::isolate()));
Handle<Smi> smi(Smi::FromInt(23), i_isolate);
- int32_t hash = key->GetOrCreateHash(i_isolate)->value();
+ int32_t hash = key->GetOrCreateHash(i_isolate).value();
JSWeakCollection::Set(weakmap, key, smi, hash);
},
[]() { InvokeScavenge(); }, SurvivalMode::kSurvives);
@@ -332,8 +397,8 @@ TEST(WeakHandleToJSApiObjectWithIdentityHashSurvivesScavenge) {
TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesScavengeWhenInHandle) {
CcTest::InitializeVM();
WeakHandleTest(
- CcTest::isolate(), &ConstructJSApiObject,
- [](FlagAndPersistent* fp) {
+ CcTest::isolate(), &ConstructJSApiObject<FlagAndGlobal>,
+ [](FlagAndGlobal* fp) {
v8::Local<v8::Object> handle =
v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
USE(handle);
@@ -344,15 +409,16 @@ TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesScavengeWhenInHandle) {
TEST(WeakHandleToUnmodifiedJSApiObjectDiesOnMarkCompact) {
CcTest::InitializeVM();
WeakHandleTest(
- CcTest::isolate(), &ConstructJSApiObject, [](FlagAndPersistent* fp) {},
- []() { InvokeMarkSweep(); }, SurvivalMode::kDies);
+ CcTest::isolate(), &ConstructJSApiObject<FlagAndGlobal>,
+ [](FlagAndGlobal* fp) {}, []() { InvokeMarkSweep(); },
+ SurvivalMode::kDies);
}
TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
CcTest::InitializeVM();
WeakHandleTest(
- CcTest::isolate(), &ConstructJSApiObject,
- [](FlagAndPersistent* fp) {
+ CcTest::isolate(), &ConstructJSApiObject<FlagAndGlobal>,
+ [](FlagAndGlobal* fp) {
v8::Local<v8::Object> handle =
v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
USE(handle);
@@ -360,58 +426,57 @@ TEST(WeakHandleToUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
[]() { InvokeMarkSweep(); }, SurvivalMode::kSurvives);
}
-TEST(WeakHandleToActiveUnmodifiedJSApiObjectSurvivesScavenge) {
+TEST(TracedGlobalToJSApiObjectWithModifiedMapSurvivesScavenge) {
CcTest::InitializeVM();
- WeakHandleTest(
- CcTest::isolate(), &ConstructJSApiObject,
- [](FlagAndPersistent* fp) {
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- fp->handle.MarkActive();
-#if __clang__
-#pragma clang diagnostic pop
-#endif
- },
- []() { InvokeScavenge(); }, SurvivalMode::kSurvives);
-}
+ v8::Isolate* isolate = CcTest::isolate();
+ LocalContext context;
-TEST(WeakHandleToActiveUnmodifiedJSApiObjectDiesOnMarkCompact) {
- CcTest::InitializeVM();
- WeakHandleTest(
- CcTest::isolate(), &ConstructJSApiObject,
- [](FlagAndPersistent* fp) {
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- fp->handle.MarkActive();
-#if __clang__
-#pragma clang diagnostic pop
-#endif
- },
- []() { InvokeMarkSweep(); }, SurvivalMode::kDies);
+ TracedGlobal<v8::Object> handle;
+ {
+ v8::HandleScope scope(isolate);
+ // Create an API object which does not have the same map as constructor.
+ auto function_template = FunctionTemplate::New(isolate);
+ auto instance_t = function_template->InstanceTemplate();
+ instance_t->Set(
+ v8::String::NewFromUtf8(isolate, "a", NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(isolate, 10));
+ auto function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
+ auto i = function->NewInstance(context.local()).ToLocalChecked();
+ handle.Reset(isolate, i);
+ }
+ InvokeScavenge();
+ CHECK(!handle.IsEmpty());
}
-TEST(WeakHandleToActiveUnmodifiedJSApiObjectSurvivesMarkCompactWhenInHandle) {
+TEST(TracedGlobalTOJsApiObjectWithElementsSurvivesScavenge) {
CcTest::InitializeVM();
- WeakHandleTest(
- CcTest::isolate(), &ConstructJSApiObject,
- [](FlagAndPersistent* fp) {
-#if __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wdeprecated"
-#endif
- fp->handle.MarkActive();
-#if __clang__
-#pragma clang diagnostic pop
-#endif
- v8::Local<v8::Object> handle =
- v8::Local<v8::Object>::New(CcTest::isolate(), fp->handle);
- USE(handle);
- },
- []() { InvokeMarkSweep(); }, SurvivalMode::kSurvives);
+ v8::Isolate* isolate = CcTest::isolate();
+ LocalContext context;
+
+ TracedGlobal<v8::Object> handle;
+ {
+ v8::HandleScope scope(isolate);
+
+ // Create an API object which has elements.
+ auto function_template = FunctionTemplate::New(isolate);
+ auto instance_t = function_template->InstanceTemplate();
+ instance_t->Set(
+ v8::String::NewFromUtf8(isolate, "1", NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(isolate, 10));
+ instance_t->Set(
+ v8::String::NewFromUtf8(isolate, "2", NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(isolate, 10));
+ auto function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
+ auto i = function->NewInstance(context.local()).ToLocalChecked();
+ handle.Reset(isolate, i);
+ }
+ InvokeScavenge();
+ CHECK(!handle.IsEmpty());
}
TEST(FinalizerOnUnmodifiedJSApiObjectDoesNotCrash) {
@@ -422,8 +487,7 @@ TEST(FinalizerOnUnmodifiedJSApiObjectDoesNotCrash) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- FlagAndPersistent fp;
- // Could use a regular object and MarkIndependent too.
+ FlagAndGlobal fp;
ConstructJSApiObject(isolate, context, &fp);
fp.handle.SetWeak(&fp, &ResetHandleAndSetFlag,
v8::WeakCallbackType::kFinalizer);
@@ -495,22 +559,22 @@ TEST(FinalizerDiesAndKeepsPhantomAliveOnMarkCompact) {
namespace {
-void ForceScavenge2(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+void ForceScavenge2(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
data.GetParameter()->flag = true;
InvokeScavenge();
}
-void ForceScavenge1(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+void ForceScavenge1(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
data.GetParameter()->handle.Reset();
data.SetSecondPassCallback(ForceScavenge2);
}
-void ForceMarkSweep2(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+void ForceMarkSweep2(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
data.GetParameter()->flag = true;
InvokeMarkSweep();
}
-void ForceMarkSweep1(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+void ForceMarkSweep1(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
data.GetParameter()->handle.Reset();
data.SetSecondPassCallback(ForceMarkSweep2);
}
@@ -525,16 +589,16 @@ TEST(GCFromWeakCallbacks) {
v8::Context::Scope context_scope(context);
static const int kNumberOfGCTypes = 2;
- typedef v8::WeakCallbackInfo<FlagAndPersistent>::Callback Callback;
+ using Callback = v8::WeakCallbackInfo<FlagAndGlobal>::Callback;
Callback gc_forcing_callback[kNumberOfGCTypes] = {&ForceScavenge1,
&ForceMarkSweep1};
- typedef void (*GCInvoker)();
+ using GCInvoker = void (*)();
GCInvoker invoke_gc[kNumberOfGCTypes] = {&InvokeScavenge, &InvokeMarkSweep};
for (int outer_gc = 0; outer_gc < kNumberOfGCTypes; outer_gc++) {
for (int inner_gc = 0; inner_gc < kNumberOfGCTypes; inner_gc++) {
- FlagAndPersistent fp;
+ FlagAndGlobal fp;
ConstructJSApiObject(isolate, context, &fp);
CHECK(heap::InYoungGeneration(isolate, fp.handle));
fp.flag = false;
@@ -549,11 +613,11 @@ TEST(GCFromWeakCallbacks) {
namespace {
-void SecondPassCallback(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+void SecondPassCallback(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
data.GetParameter()->flag = true;
}
-void FirstPassCallback(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
+void FirstPassCallback(const v8::WeakCallbackInfo<FlagAndGlobal>& data) {
data.GetParameter()->handle.Reset();
data.SetSecondPassCallback(SecondPassCallback);
}
@@ -566,7 +630,7 @@ TEST(SecondPassPhantomCallbacks) {
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- FlagAndPersistent fp;
+ FlagAndGlobal fp;
ConstructJSApiObject(isolate, context, &fp);
fp.flag = false;
fp.handle.SetWeak(&fp, FirstPassCallback, v8::WeakCallbackType::kParameter);
diff --git a/deps/v8/test/cctest/test-global-object.cc b/deps/v8/test/cctest/test-global-object.cc
index 5c154565d9..a7bf03677e 100644
--- a/deps/v8/test/cctest/test-global-object.cc
+++ b/deps/v8/test/cctest/test-global-object.cc
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/objects-inl.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
using ::v8::Array;
diff --git a/deps/v8/test/cctest/test-hashcode.cc b/deps/v8/test/cctest/test-hashcode.cc
index 1dc4149495..3a46d2b0da 100644
--- a/deps/v8/test/cctest/test-hashcode.cc
+++ b/deps/v8/test/cctest/test-hashcode.cc
@@ -6,12 +6,12 @@
#include <sstream>
#include <utility>
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/objects/ordered-hash-table.h"
#include "src/third_party/siphash/halfsiphash.h"
-#include "src/utils.h"
-#include "src/v8.h"
+#include "src/utils/utils.h"
#include "test/cctest/cctest.h"
@@ -30,16 +30,16 @@ int AddToSetAndGetHash(Isolate* isolate, Handle<JSObject> obj,
void CheckFastObject(Handle<JSObject> obj, int hash) {
CHECK(obj->HasFastProperties());
- CHECK(obj->raw_properties_or_hash()->IsPropertyArray());
+ CHECK(obj->raw_properties_or_hash().IsPropertyArray());
CHECK_EQ(Smi::FromInt(hash), obj->GetHash());
- CHECK_EQ(hash, obj->property_array()->Hash());
+ CHECK_EQ(hash, obj->property_array().Hash());
}
void CheckDictionaryObject(Handle<JSObject> obj, int hash) {
CHECK(!obj->HasFastProperties());
- CHECK(obj->raw_properties_or_hash()->IsDictionary());
+ CHECK(obj->raw_properties_or_hash().IsNameDictionary());
CHECK_EQ(Smi::FromInt(hash), obj->GetHash());
- CHECK_EQ(hash, obj->property_dictionary()->Hash());
+ CHECK_EQ(hash, obj->property_dictionary().Hash());
}
TEST(AddHashCodeToFastObjectWithoutProperties) {
@@ -98,7 +98,7 @@ TEST(AddHashCodeToSlowObject) {
CHECK(obj->HasFastProperties());
JSObject::NormalizeProperties(obj, CLEAR_INOBJECT_PROPERTIES, 0,
"cctest/test-hashcode");
- CHECK(obj->raw_properties_or_hash()->IsDictionary());
+ CHECK(obj->raw_properties_or_hash().IsNameDictionary());
int hash = AddToSetAndGetHash(isolate, obj, false);
CheckDictionaryObject(obj, hash);
@@ -120,9 +120,9 @@ TEST(TransitionFastWithInObjectToFastWithPropertyArray) {
int hash = AddToSetAndGetHash(isolate, obj, true);
CHECK_EQ(Smi::FromInt(hash), obj->raw_properties_or_hash());
- int length = obj->property_array()->length();
+ int length = obj->property_array().length();
CompileRun("x.e = 5;");
- CHECK(obj->property_array()->length() > length);
+ CHECK(obj->property_array().length() > length);
CheckFastObject(obj, hash);
}
@@ -137,14 +137,14 @@ TEST(TransitionFastWithPropertyArray) {
CompileRun(source);
Handle<JSObject> obj = GetGlobal<JSObject>("x");
- CHECK(obj->raw_properties_or_hash()->IsPropertyArray());
+ CHECK(obj->raw_properties_or_hash().IsPropertyArray());
int hash = AddToSetAndGetHash(isolate, obj, true);
- CHECK_EQ(hash, obj->property_array()->Hash());
+ CHECK_EQ(hash, obj->property_array().Hash());
- int length = obj->property_array()->length();
+ int length = obj->property_array().length();
CompileRun("x.f = 2; x.g = 5; x.h = 2");
- CHECK(obj->property_array()->length() > length);
+ CHECK(obj->property_array().length() > length);
CheckFastObject(obj, hash);
}
@@ -159,11 +159,11 @@ TEST(TransitionFastWithPropertyArrayToSlow) {
CompileRun(source);
Handle<JSObject> obj = GetGlobal<JSObject>("x");
- CHECK(obj->raw_properties_or_hash()->IsPropertyArray());
+ CHECK(obj->raw_properties_or_hash().IsPropertyArray());
int hash = AddToSetAndGetHash(isolate, obj, true);
- CHECK(obj->raw_properties_or_hash()->IsPropertyArray());
- CHECK_EQ(hash, obj->property_array()->Hash());
+ CHECK(obj->raw_properties_or_hash().IsPropertyArray());
+ CHECK_EQ(hash, obj->property_array().Hash());
JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 0,
"cctest/test-hashcode");
@@ -181,14 +181,14 @@ TEST(TransitionSlowToSlow) {
Handle<JSObject> obj = GetGlobal<JSObject>("x");
JSObject::NormalizeProperties(obj, CLEAR_INOBJECT_PROPERTIES, 0,
"cctest/test-hashcode");
- CHECK(obj->raw_properties_or_hash()->IsDictionary());
+ CHECK(obj->raw_properties_or_hash().IsNameDictionary());
int hash = AddToSetAndGetHash(isolate, obj, false);
- CHECK_EQ(hash, obj->property_dictionary()->Hash());
+ CHECK_EQ(hash, obj->property_dictionary().Hash());
- int length = obj->property_dictionary()->length();
+ int length = obj->property_dictionary().length();
CompileRun("for(var i = 0; i < 10; i++) { x['f'+i] = i };");
- CHECK(obj->property_dictionary()->length() > length);
+ CHECK(obj->property_dictionary().length() > length);
CheckDictionaryObject(obj, hash);
}
@@ -201,10 +201,10 @@ TEST(TransitionSlowToFastWithoutProperties) {
isolate->factory()->NewJSObject(isolate->object_function());
JSObject::NormalizeProperties(obj, CLEAR_INOBJECT_PROPERTIES, 0,
"cctest/test-hashcode");
- CHECK(obj->raw_properties_or_hash()->IsDictionary());
+ CHECK(obj->raw_properties_or_hash().IsNameDictionary());
int hash = AddToSetAndGetHash(isolate, obj, false);
- CHECK_EQ(hash, obj->property_dictionary()->Hash());
+ CHECK_EQ(hash, obj->property_dictionary().Hash());
JSObject::MigrateSlowToFast(obj, 0, "cctest/test-hashcode");
CHECK_EQ(Smi::FromInt(hash), obj->GetHash());
@@ -221,10 +221,10 @@ TEST(TransitionSlowToFastWithPropertyArray) {
CompileRun(source);
Handle<JSObject> obj = GetGlobal<JSObject>("x");
- CHECK(obj->raw_properties_or_hash()->IsDictionary());
+ CHECK(obj->raw_properties_or_hash().IsNameDictionary());
int hash = AddToSetAndGetHash(isolate, obj, false);
- CHECK_EQ(hash, obj->property_dictionary()->Hash());
+ CHECK_EQ(hash, obj->property_dictionary().Hash());
JSObject::MigrateSlowToFast(obj, 0, "cctest/test-hashcode");
CheckFastObject(obj, hash);
@@ -232,7 +232,7 @@ TEST(TransitionSlowToFastWithPropertyArray) {
namespace {
-typedef uint32_t (*HashFunction)(uint32_t key, uint64_t seed);
+using HashFunction = uint32_t (*)(uint32_t key, uint64_t seed);
void TestIntegerHashQuality(const int samples_log2, int num_buckets_log2,
uint64_t seed, double max_var,
diff --git a/deps/v8/test/cctest/test-hashmap.cc b/deps/v8/test/cctest/test-hashmap.cc
index 4d93fe9bd5..e4e752a2bc 100644
--- a/deps/v8/test/cctest/test-hashmap.cc
+++ b/deps/v8/test/cctest/test-hashmap.cc
@@ -28,7 +28,7 @@
#include <stdlib.h>
#include "src/base/overflowing-math.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "src/base/hashmap.h"
@@ -37,7 +37,7 @@ namespace v8 {
namespace internal {
namespace test_hashmap {
-typedef uint32_t (*IntKeyHash)(uint32_t key);
+using IntKeyHash = uint32_t (*)(uint32_t key);
class IntSet {
public:
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 1435cb9bd7..815b7f51bb 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -31,21 +31,21 @@
#include <memory>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "include/v8-profiler.h"
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/hashmap.h"
#include "src/base/optional.h"
-#include "src/collector.h"
+#include "src/codegen/assembler-inl.h"
#include "src/debug/debug.h"
#include "src/heap/heap-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/collector.h"
using i::AllocationTraceNode;
using i::AllocationTraceTree;
@@ -1013,7 +1013,7 @@ class TestJSONStream : public v8::OutputStream {
if (abort_countdown_ == 0) return kAbort;
CHECK_GT(chars_written, 0);
i::Vector<char> chunk = buffer_.AddBlock(chars_written, '\0');
- i::MemCopy(chunk.start(), buffer, chars_written);
+ i::MemCopy(chunk.begin(), buffer, chars_written);
return kContinue;
}
virtual WriteResult WriteUint32Chunk(uint32_t* buffer, int chars_written) {
@@ -1031,7 +1031,7 @@ class TestJSONStream : public v8::OutputStream {
class OneByteResource : public v8::String::ExternalOneByteStringResource {
public:
- explicit OneByteResource(i::Vector<char> string) : data_(string.start()) {
+ explicit OneByteResource(i::Vector<char> string) : data_(string.begin()) {
length_ = string.length();
}
const char* data() const override { return data_; }
@@ -1899,7 +1899,7 @@ TEST(GetHeapValueForDeletedObject) {
}
static int StringCmp(const char* ref, i::String act) {
- std::unique_ptr<char[]> s_act = act->ToCString();
+ std::unique_ptr<char[]> s_act = act.ToCString();
int result = strcmp(ref, s_act.get());
if (result != 0)
fprintf(stderr, "Expected: \"%s\", Actual: \"%s\"\n", ref, s_act.get());
@@ -2440,15 +2440,13 @@ TEST(ManyLocalsInSharedContext) {
i::SNPrintF(var_name, "f_%d", i);
const v8::HeapGraphNode* f_object =
GetProperty(env->GetIsolate(), context_object,
- v8::HeapGraphEdge::kContextVariable, var_name.start());
+ v8::HeapGraphEdge::kContextVariable, var_name.begin());
CHECK(f_object);
}
}
TEST(AllocationSitesAreVisible) {
- if (i::FLAG_lite_mode) return;
-
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -2545,7 +2543,7 @@ static const v8::HeapGraphNode* GetNodeByPath(v8::Isolate* isolate,
v8::String::Utf8Value node_name(isolate, to_node->GetName());
i::EmbeddedVector<char, 100> name;
i::SNPrintF(name, "%s::%s", *edge_name, *node_name);
- if (strstr(name.start(), path[current_depth])) {
+ if (strstr(name.begin(), path[current_depth])) {
node = to_node;
break;
}
@@ -2928,6 +2926,7 @@ TEST(WeakContainers) {
CompileRun(
"function foo(a) { return a.x; }\n"
"obj = {x : 123};\n"
+ "%PrepareFunctionForOptimization(foo);"
"foo(obj);\n"
"foo(obj);\n"
"%OptimizeFunctionOnNextCall(foo);\n"
@@ -3096,7 +3095,7 @@ TEST(EmbedderGraph) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
v8::Local<v8::Value> global_object =
v8::Utils::ToLocal(i::Handle<i::JSObject>(
- (isolate->context()->native_context()->global_object()), isolate));
+ (isolate->context().native_context().global_object()), isolate));
global_object_pointer = &global_object;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
heap_profiler->AddBuildEmbedderGraphCallback(BuildEmbedderGraph, nullptr);
@@ -3160,7 +3159,7 @@ TEST(EmbedderGraphWithNamedEdges) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
v8::Local<v8::Value> global_object =
v8::Utils::ToLocal(i::Handle<i::JSObject>(
- (isolate->context()->native_context()->global_object()), isolate));
+ (isolate->context().native_context().global_object()), isolate));
global_object_pointer = &global_object;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
heap_profiler->AddBuildEmbedderGraphCallback(BuildEmbedderGraphWithNamedEdges,
@@ -3226,7 +3225,7 @@ TEST(EmbedderGraphMultipleCallbacks) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
v8::Local<v8::Value> global_object =
v8::Utils::ToLocal(i::Handle<i::JSObject>(
- (isolate->context()->native_context()->global_object()), isolate));
+ (isolate->context().native_context().global_object()), isolate));
global_object_pointer = &global_object;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
GraphBuildingContext context;
@@ -3303,7 +3302,7 @@ TEST(EmbedderGraphWithWrapperNode) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
v8::Local<v8::Value> global_object =
v8::Utils::ToLocal(i::Handle<i::JSObject>(
- (isolate->context()->native_context()->global_object()), isolate));
+ (isolate->context().native_context().global_object()), isolate));
global_object_pointer = &global_object;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
heap_profiler->AddBuildEmbedderGraphCallback(
@@ -3360,7 +3359,7 @@ TEST(EmbedderGraphWithPrefix) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env->GetIsolate());
v8::Local<v8::Value> global_object =
v8::Utils::ToLocal(i::Handle<i::JSObject>(
- (isolate->context()->native_context()->global_object()), isolate));
+ (isolate->context().native_context().global_object()), isolate));
global_object_pointer = &global_object;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
heap_profiler->AddBuildEmbedderGraphCallback(BuildEmbedderGraphWithPrefix,
@@ -3765,6 +3764,7 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
" }"
" return elements[number_elements - 1];"
"};"
+ "%%PrepareFunctionForOptimization(f);"
"f(); gc();"
"f(); f();"
"%%OptimizeFunctionOnNextCall(f);"
@@ -3773,7 +3773,7 @@ TEST(SamplingHeapProfilerPretenuredInlineAllocations) {
i::AllocationSite::kPretenureMinimumCreated + 1);
v8::Local<v8::Function> f =
- v8::Local<v8::Function>::Cast(CompileRun(source.start()));
+ v8::Local<v8::Function>::Cast(CompileRun(source.begin()));
// Make sure the function is producing pre-tenured objects.
auto res = f->Call(env.local(), env->Global(), 0, nullptr).ToLocalChecked();
@@ -3871,6 +3871,7 @@ TEST(SamplingHeapProfilerSampleDuringDeopt) {
" };"
" b.map(callback);"
" };"
+ " %PrepareFunctionForOptimization(lazyDeopt);"
" lazyDeopt();"
" lazyDeopt();"
" %OptimizeFunctionOnNextCall(lazyDeopt);"
@@ -3907,7 +3908,7 @@ TEST(WeakReference) {
i::Handle<i::Object> obj = v8::Utils::OpenHandle(*script);
i::Handle<i::SharedFunctionInfo> shared_function =
- i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared(),
+ i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj).shared(),
i_isolate);
i::Handle<i::ClosureFeedbackCellArray> feedback_cell_array =
i::ClosureFeedbackCellArray::New(i_isolate, shared_function);
@@ -3920,7 +3921,7 @@ TEST(WeakReference) {
i::CodeDesc desc;
assm.GetCode(i_isolate, &desc);
i::Handle<i::Code> code =
- factory->NewCode(desc, i::Code::STUB, i::Handle<i::Code>());
+ i::Factory::CodeBuilder(i_isolate, desc, i::Code::STUB).Build();
CHECK(code->IsCode());
fv->set_optimized_code_weak_or_smi(i::HeapObjectReference::Weak(*code));
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index ac03a6fc59..95cc3c7824 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
-#include "src/handles-inl.h"
-#include "src/macro-assembler-inl.h"
-#include "src/simulator.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/execution/simulator.h"
+#include "src/handles/handles-inl.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
diff --git a/deps/v8/test/cctest/test-identity-map.cc b/deps/v8/test/cctest/test-identity-map.cc
index aa5eb3e5c7..3b425a28e5 100644
--- a/deps/v8/test/cctest/test-identity-map.cc
+++ b/deps/v8/test/cctest/test-identity-map.cc
@@ -4,11 +4,11 @@
#include <set>
+#include "src/execution/isolate.h"
#include "src/heap/factory-inl.h"
-#include "src/identity-map.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/heap-number-inl.h"
+#include "src/utils/identity-map.h"
+#include "src/objects/objects.h"
#include "src/zone/zone.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index ac4ffb297e..58a3964619 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -6,10 +6,10 @@
#include <sstream>
#include <utility>
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/init/v8.h"
#include "src/objects/heap-number-inl.h"
-#include "src/v8.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
@@ -76,22 +76,25 @@ static inline Handle<T> CompileRunI(const char* script) {
}
static Object GetFieldValue(JSObject obj, int property_index) {
- FieldIndex index = FieldIndex::ForPropertyIndex(obj->map(), property_index);
- return obj->RawFastPropertyAt(index);
+ FieldIndex index = FieldIndex::ForPropertyIndex(obj.map(), property_index);
+ return obj.RawFastPropertyAt(index);
}
static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
- if (obj->IsUnboxedDoubleField(field_index)) {
- return obj->RawFastDoublePropertyAt(field_index);
+ if (obj.IsUnboxedDoubleField(field_index)) {
+ return obj.RawFastDoublePropertyAt(field_index);
} else {
- Object value = obj->RawFastPropertyAt(field_index);
- CHECK(value->IsMutableHeapNumber());
- return MutableHeapNumber::cast(value)->value();
+ Object value = obj.RawFastPropertyAt(field_index);
+ if (value.IsMutableHeapNumber()) {
+ return MutableHeapNumber::cast(value).value();
+ } else {
+ return value.Number();
+ }
}
}
static double GetDoubleFieldValue(JSObject obj, int property_index) {
- FieldIndex index = FieldIndex::ForPropertyIndex(obj->map(), property_index);
+ FieldIndex index = FieldIndex::ForPropertyIndex(obj.map(), property_index);
return GetDoubleFieldValue(obj, index);
}
@@ -99,8 +102,8 @@ bool IsObjectShrinkable(JSObject obj) {
Handle<Map> filler_map =
CcTest::i_isolate()->factory()->one_pointer_filler_map();
- int inobject_properties = obj->map()->GetInObjectProperties();
- int unused = obj->map()->UnusedPropertyFields();
+ int inobject_properties = obj.map().GetInObjectProperties();
+ int unused = obj.map().UnusedPropertyFields();
if (unused == 0) return false;
for (int i = inobject_properties - unused; i < inobject_properties; i++) {
@@ -142,7 +145,7 @@ TEST(JSObjectBasic) {
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
// There must be at least some slack.
- CHECK_LT(5, obj->map()->GetInObjectProperties());
+ CHECK_LT(5, obj->map().GetInObjectProperties());
CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj, 0));
CHECK_EQ(4.2, GetDoubleFieldValue(*obj, 1));
CHECK_EQ(*obj, GetFieldValue(*obj, 2));
@@ -159,7 +162,7 @@ TEST(JSObjectBasic) {
CHECK(!IsObjectShrinkable(*obj));
// No slack left.
- CHECK_EQ(3, obj->map()->GetInObjectProperties());
+ CHECK_EQ(3, obj->map().GetInObjectProperties());
}
@@ -203,7 +206,7 @@ TEST(JSObjectComplex) {
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
// There must be at least some slack.
- CHECK_LT(5, obj3->map()->GetInObjectProperties());
+ CHECK_LT(5, obj3->map().GetInObjectProperties());
CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj3, 0));
CHECK_EQ(4.2, GetDoubleFieldValue(*obj3, 1));
CHECK_EQ(*obj3, GetFieldValue(*obj3, 2));
@@ -223,14 +226,14 @@ TEST(JSObjectComplex) {
CHECK(IsObjectShrinkable(*obj3));
CHECK(!IsObjectShrinkable(*obj5));
- CHECK_EQ(5, obj1->map()->GetInObjectProperties());
- CHECK_EQ(4, obj1->map()->UnusedPropertyFields());
+ CHECK_EQ(5, obj1->map().GetInObjectProperties());
+ CHECK_EQ(4, obj1->map().UnusedPropertyFields());
- CHECK_EQ(5, obj3->map()->GetInObjectProperties());
- CHECK_EQ(2, obj3->map()->UnusedPropertyFields());
+ CHECK_EQ(5, obj3->map().GetInObjectProperties());
+ CHECK_EQ(2, obj3->map().UnusedPropertyFields());
- CHECK_EQ(5, obj5->map()->GetInObjectProperties());
- CHECK_EQ(0, obj5->map()->UnusedPropertyFields());
+ CHECK_EQ(5, obj5->map().GetInObjectProperties());
+ CHECK_EQ(0, obj5->map().UnusedPropertyFields());
// Since slack tracking is complete, the new objects should not be shrinkable.
obj1 = CompileRunI<JSObject>("new A(1);");
@@ -288,7 +291,7 @@ TEST(JSGeneratorObjectBasic) {
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
// There must be at least some slack.
- CHECK_LT(5, obj->map()->GetInObjectProperties());
+ CHECK_LT(5, obj->map().GetInObjectProperties());
CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj, 0));
CHECK_EQ(4.2, GetDoubleFieldValue(*obj, 1));
CHECK_EQ(*obj, GetFieldValue(*obj, 2));
@@ -305,7 +308,7 @@ TEST(JSGeneratorObjectBasic) {
CHECK(!IsObjectShrinkable(*obj));
// No slack left.
- CHECK_EQ(3, obj->map()->GetInObjectProperties());
+ CHECK_EQ(3, obj->map().GetInObjectProperties());
}
@@ -372,7 +375,7 @@ TEST(SubclassBasicNoBaseClassInstances) {
CHECK(b_initial_map->IsInobjectSlackTrackingInProgress());
// There must be at least some slack.
- CHECK_LT(10, obj->map()->GetInObjectProperties());
+ CHECK_LT(10, obj->map().GetInObjectProperties());
CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj, 0));
CHECK_EQ(4.2, GetDoubleFieldValue(*obj, 1));
CHECK_EQ(*obj, GetFieldValue(*obj, 2));
@@ -397,7 +400,7 @@ TEST(SubclassBasicNoBaseClassInstances) {
CHECK(a_initial_map->IsInobjectSlackTrackingInProgress());
// No slack left.
- CHECK_EQ(6, obj->map()->GetInObjectProperties());
+ CHECK_EQ(6, obj->map().GetInObjectProperties());
}
@@ -477,10 +480,10 @@ TEST(SubclassBasic) {
CHECK(!IsObjectShrinkable(*a_obj));
// No slack left.
- CHECK_EQ(3, a_obj->map()->GetInObjectProperties());
+ CHECK_EQ(3, a_obj->map().GetInObjectProperties());
// There must be at least some slack.
- CHECK_LT(10, b_obj->map()->GetInObjectProperties());
+ CHECK_LT(10, b_obj->map().GetInObjectProperties());
CHECK_EQ(Smi::FromInt(42), GetFieldValue(*b_obj, 0));
CHECK_EQ(4.2, GetDoubleFieldValue(*b_obj, 1));
CHECK_EQ(*b_obj, GetFieldValue(*b_obj, 2));
@@ -500,7 +503,7 @@ TEST(SubclassBasic) {
CHECK(!IsObjectShrinkable(*b_obj));
// No slack left.
- CHECK_EQ(6, b_obj->map()->GetInObjectProperties());
+ CHECK_EQ(6, b_obj->map().GetInObjectProperties());
}
@@ -577,10 +580,10 @@ static void TestClassHierarchy(const std::vector<int>& hierarchy_desc, int n) {
Handle<Map> initial_map(func->initial_map(), func->GetIsolate());
// If the object is slow-mode already, bail out.
- if (obj->map()->is_dictionary_map()) continue;
+ if (obj->map().is_dictionary_map()) continue;
// There must be at least some slack.
- CHECK_LT(fields_count, obj->map()->GetInObjectProperties());
+ CHECK_LT(fields_count, obj->map().GetInObjectProperties());
// One instance was created.
CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
@@ -604,7 +607,7 @@ static void TestClassHierarchy(const std::vector<int>& hierarchy_desc, int n) {
CHECK(!IsObjectShrinkable(*obj));
// No slack left.
- CHECK_EQ(fields_count, obj->map()->GetInObjectProperties());
+ CHECK_EQ(fields_count, obj->map().GetInObjectProperties());
}
}
@@ -687,8 +690,8 @@ TEST(InobjectPropetiesCountOverflowInSubclass) {
Handle<Map> initial_map(func->initial_map(), func->GetIsolate());
// There must be no slack left.
- CHECK_EQ(JSObject::kMaxInstanceSize, obj->map()->instance_size());
- CHECK_EQ(kMaxInobjectProperties, obj->map()->GetInObjectProperties());
+ CHECK_EQ(JSObject::kMaxInstanceSize, obj->map().instance_size());
+ CHECK_EQ(kMaxInobjectProperties, obj->map().GetInObjectProperties());
// One instance was created.
CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
@@ -705,7 +708,7 @@ TEST(InobjectPropetiesCountOverflowInSubclass) {
CHECK(!IsObjectShrinkable(*obj));
// No slack left.
- CHECK_EQ(kMaxInobjectProperties, obj->map()->GetInObjectProperties());
+ CHECK_EQ(kMaxInobjectProperties, obj->map().GetInObjectProperties());
}
// The other classes in the hierarchy are not affected.
@@ -715,7 +718,7 @@ TEST(InobjectPropetiesCountOverflowInSubclass) {
static void CheckExpectedProperties(int expected, std::ostringstream& os) {
Handle<HeapObject> obj = Handle<HeapObject>::cast(
v8::Utils::OpenHandle(*CompileRun(os.str().c_str())));
- CHECK_EQ(expected, obj->map()->GetInObjectProperties());
+ CHECK_EQ(expected, obj->map().GetInObjectProperties());
}
TEST(ObjectLiteralPropertyBackingStoreSize) {
@@ -871,8 +874,8 @@ TEST(SlowModeSubclass) {
Handle<Map> initial_map(func->initial_map(), func->GetIsolate());
// Object should go dictionary mode.
- CHECK_EQ(JSObject::kHeaderSize, obj->map()->instance_size());
- CHECK(obj->map()->is_dictionary_map());
+ CHECK_EQ(JSObject::kHeaderSize, obj->map().instance_size());
+ CHECK(obj->map().is_dictionary_map());
// One instance was created.
CHECK_EQ(Map::kSlackTrackingCounterStart - 1,
@@ -889,8 +892,8 @@ TEST(SlowModeSubclass) {
CHECK(!IsObjectShrinkable(*obj));
// Object should stay in dictionary mode.
- CHECK_EQ(JSObject::kHeaderSize, obj->map()->instance_size());
- CHECK(obj->map()->is_dictionary_map());
+ CHECK_EQ(JSObject::kHeaderSize, obj->map().instance_size());
+ CHECK(obj->map().is_dictionary_map());
}
// The other classes in the hierarchy are not affected.
@@ -953,7 +956,7 @@ static void TestSubclassBuiltin(const char* subclass_name,
CHECK(initial_map->IsInobjectSlackTrackingInProgress());
// There must be at least some slack.
- CHECK_LT(builtin_properties_count + 5, obj->map()->GetInObjectProperties());
+ CHECK_LT(builtin_properties_count + 5, obj->map().GetInObjectProperties());
CHECK_EQ(Smi::FromInt(42), GetFieldValue(*obj, builtin_properties_count + 0));
CHECK_EQ(4.2, GetDoubleFieldValue(*obj, builtin_properties_count + 1));
CHECK_EQ(*obj, GetFieldValue(*obj, builtin_properties_count + 2));
@@ -970,9 +973,9 @@ static void TestSubclassBuiltin(const char* subclass_name,
CHECK(!IsObjectShrinkable(*obj));
// No slack left.
- CHECK_EQ(builtin_properties_count + 3, obj->map()->GetInObjectProperties());
+ CHECK_EQ(builtin_properties_count + 3, obj->map().GetInObjectProperties());
- CHECK_EQ(instance_type, obj->map()->instance_type());
+ CHECK_EQ(instance_type, obj->map().instance_type());
}
@@ -1283,8 +1286,8 @@ TEST(SubclassTranspiledClassHierarchy) {
CHECK(!IsObjectShrinkable(*obj));
// No slack left.
- CHECK_EQ(21, obj->map()->GetInObjectProperties());
- CHECK_EQ(JS_OBJECT_TYPE, obj->map()->instance_type());
+ CHECK_EQ(21, obj->map().GetInObjectProperties());
+ CHECK_EQ(JS_OBJECT_TYPE, obj->map().instance_type());
}
TEST(Regress8853_ClassConstructor) {
@@ -1294,9 +1297,9 @@ TEST(Regress8853_ClassConstructor) {
// For classes without any this.prop assignments in their
// constructors we start out with 10 inobject properties.
Handle<JSObject> obj = CompileRunI<JSObject>("new (class {});\n");
- CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(obj->map().IsInobjectSlackTrackingInProgress());
CHECK(IsObjectShrinkable(*obj));
- CHECK_EQ(10, obj->map()->GetInObjectProperties());
+ CHECK_EQ(10, obj->map().GetInObjectProperties());
// For classes with N explicit this.prop assignments in their
// constructors we start out with N+8 inobject properties.
@@ -1308,9 +1311,9 @@ TEST(Regress8853_ClassConstructor) {
" this.z = 3;\n"
" }\n"
"});\n");
- CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(obj->map().IsInobjectSlackTrackingInProgress());
CHECK(IsObjectShrinkable(*obj));
- CHECK_EQ(3 + 8, obj->map()->GetInObjectProperties());
+ CHECK_EQ(3 + 8, obj->map().GetInObjectProperties());
}
TEST(Regress8853_ClassHierarchy) {
@@ -1324,9 +1327,9 @@ TEST(Regress8853_ClassHierarchy) {
for (int i = 1; i < 10; ++i) {
std::string script = "new " + base + ";\n";
Handle<JSObject> obj = CompileRunI<JSObject>(script.c_str());
- CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(obj->map().IsInobjectSlackTrackingInProgress());
CHECK(IsObjectShrinkable(*obj));
- CHECK_EQ(8 + 2 * i, obj->map()->GetInObjectProperties());
+ CHECK_EQ(8 + 2 * i, obj->map().GetInObjectProperties());
base = "(class extends " + base + " {})";
}
}
@@ -1338,9 +1341,9 @@ TEST(Regress8853_FunctionConstructor) {
// For constructor functions without any this.prop assignments in
// them we start out with 10 inobject properties.
Handle<JSObject> obj = CompileRunI<JSObject>("new (function() {});\n");
- CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(obj->map().IsInobjectSlackTrackingInProgress());
CHECK(IsObjectShrinkable(*obj));
- CHECK_EQ(10, obj->map()->GetInObjectProperties());
+ CHECK_EQ(10, obj->map().GetInObjectProperties());
// For constructor functions with N explicit this.prop assignments
// in them we start out with N+8 inobject properties.
@@ -1353,9 +1356,9 @@ TEST(Regress8853_FunctionConstructor) {
" this.c = 3;\n"
" this.f = 3;\n"
"});\n");
- CHECK(obj->map()->IsInobjectSlackTrackingInProgress());
+ CHECK(obj->map().IsInobjectSlackTrackingInProgress());
CHECK(IsObjectShrinkable(*obj));
- CHECK_EQ(6 + 8, obj->map()->GetInObjectProperties());
+ CHECK_EQ(6 + 8, obj->map().GetInObjectProperties());
}
TEST(InstanceFieldsArePropertiesDefaultConstructorLazy) {
@@ -1376,7 +1379,7 @@ TEST(InstanceFieldsArePropertiesDefaultConstructorLazy) {
" x09 = null;\n"
" x10 = null;\n"
"});\n");
- CHECK_EQ(11 + 8, obj->map()->GetInObjectProperties());
+ CHECK_EQ(11 + 8, obj->map().GetInObjectProperties());
}
TEST(InstanceFieldsArePropertiesFieldsAndConstructorLazy) {
@@ -1409,7 +1412,7 @@ TEST(InstanceFieldsArePropertiesFieldsAndConstructorLazy) {
" this.x20 = null;\n"
" }\n"
"});\n");
- CHECK_EQ(21 + 8, obj->map()->GetInObjectProperties());
+ CHECK_EQ(21 + 8, obj->map().GetInObjectProperties());
}
TEST(InstanceFieldsArePropertiesDefaultConstructorEager) {
@@ -1431,7 +1434,7 @@ TEST(InstanceFieldsArePropertiesDefaultConstructorEager) {
" x09 = null;\n"
" x10 = null;\n"
"});\n");
- CHECK_EQ(11 + 8, obj->map()->GetInObjectProperties());
+ CHECK_EQ(11 + 8, obj->map().GetInObjectProperties());
}
TEST(InstanceFieldsArePropertiesFieldsAndConstructorEager) {
@@ -1465,7 +1468,7 @@ TEST(InstanceFieldsArePropertiesFieldsAndConstructorEager) {
" this.x20 = null;\n"
" }\n"
"});\n");
- CHECK_EQ(21 + 8, obj->map()->GetInObjectProperties());
+ CHECK_EQ(21 + 8, obj->map().GetInObjectProperties());
}
} // namespace test_inobject_slack_tracking
diff --git a/deps/v8/test/cctest/test-intl.cc b/deps/v8/test/cctest/test-intl.cc
index d916507760..47add77b0a 100644
--- a/deps/v8/test/cctest/test-intl.cc
+++ b/deps/v8/test/cctest/test-intl.cc
@@ -4,8 +4,6 @@
#ifdef V8_INTL_SUPPORT
-#include "src/lookup.h"
-#include "src/objects-inl.h"
#include "src/objects/intl-objects.h"
#include "src/objects/js-break-iterator.h"
#include "src/objects/js-collator.h"
@@ -15,6 +13,8 @@
#include "src/objects/js-plural-rules.h"
#include "src/objects/js-relative-time-format.h"
#include "src/objects/js-segmenter.h"
+#include "src/objects/lookup.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-javascript-arm64.cc b/deps/v8/test/cctest/test-javascript-arm64.cc
index 428726fdc7..df3984572d 100644
--- a/deps/v8/test/cctest/test-javascript-arm64.cc
+++ b/deps/v8/test/cctest/test-javascript-arm64.cc
@@ -27,16 +27,16 @@
#include <limits.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/base/platform/platform.h"
-#include "src/compilation-cache.h"
-#include "src/execution.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/unicode-inl.h"
-#include "src/utils.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/utils.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-js-arm64-variables.cc b/deps/v8/test/cctest/test-js-arm64-variables.cc
index 442407a79e..46f2e20ba3 100644
--- a/deps/v8/test/cctest/test-js-arm64-variables.cc
+++ b/deps/v8/test/cctest/test-js-arm64-variables.cc
@@ -29,16 +29,16 @@
#include <limits.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/base/platform/platform.h"
-#include "src/compilation-cache.h"
-#include "src/execution.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/unicode-inl.h"
-#include "src/utils.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/utils.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index e529c7cac9..858c9f577a 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/handles-inl.h"
+#include "src/execution/isolate.h"
+#include "src/execution/microtask-queue.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory-inl.h"
-#include "src/isolate.h"
-#include "src/microtask-queue.h"
#include "src/objects/js-objects.h"
#include "src/objects/js-weak-refs-inl.h"
#include "test/cctest/cctest.h"
@@ -69,7 +69,7 @@ Handle<WeakCell> FinalizationGroupRegister(
Handle<Object> holdings, Handle<Object> key, Isolate* isolate) {
JSFinalizationGroup::Register(finalization_group, target, holdings, key,
isolate);
- CHECK(finalization_group->active_cells()->IsWeakCell());
+ CHECK(finalization_group->active_cells().IsWeakCell());
Handle<WeakCell> weak_cell =
handle(WeakCell::cast(finalization_group->active_cells()), isolate);
#ifdef VERIFY_HEAP
@@ -105,19 +105,19 @@ void VerifyWeakCellChain(Isolate* isolate, Object list_head, int n_args, ...) {
if (n_args == 0) {
// Verify empty list
- CHECK(list_head->IsUndefined(isolate));
+ CHECK(list_head.IsUndefined(isolate));
} else {
WeakCell current = WeakCell::cast(Object(va_arg(args, Address)));
CHECK_EQ(current, list_head);
- CHECK(current->prev()->IsUndefined(isolate));
+ CHECK(current.prev().IsUndefined(isolate));
for (int i = 1; i < n_args; i++) {
WeakCell next = WeakCell::cast(Object(va_arg(args, Address)));
- CHECK_EQ(current->next(), next);
- CHECK_EQ(next->prev(), current);
+ CHECK_EQ(current.next(), next);
+ CHECK_EQ(next.prev(), current);
current = next;
}
- CHECK(current->next()->IsUndefined(isolate));
+ CHECK(current.next().IsUndefined(isolate));
}
va_end(args);
}
@@ -133,19 +133,19 @@ void VerifyWeakCellKeyChain(Isolate* isolate, Object list_head, int n_args,
if (n_args == 0) {
// Verify empty list
- CHECK(list_head->IsTheHole(isolate));
+ CHECK(list_head.IsTheHole(isolate));
} else {
WeakCell current = WeakCell::cast(Object(va_arg(args, Address)));
CHECK_EQ(current, list_head);
- CHECK(current->key_list_prev()->IsUndefined(isolate));
+ CHECK(current.key_list_prev().IsUndefined(isolate));
for (int i = 1; i < n_args; i++) {
WeakCell next = WeakCell::cast(Object(va_arg(args, Address)));
- CHECK_EQ(current->key_list_next(), next);
- CHECK_EQ(next->key_list_prev(), current);
+ CHECK_EQ(current.key_list_next(), next);
+ CHECK_EQ(next.key_list_prev(), current);
current = next;
}
- CHECK(current->key_list_next()->IsUndefined(isolate));
+ CHECK(current.key_list_next().IsUndefined(isolate));
}
va_end(args);
}
@@ -169,13 +169,13 @@ TEST(TestRegister) {
VerifyWeakCellChain(isolate, finalization_group->active_cells(), 1,
*weak_cell1);
- CHECK(weak_cell1->key_list_prev()->IsUndefined(isolate));
- CHECK(weak_cell1->key_list_next()->IsUndefined(isolate));
+ CHECK(weak_cell1->key_list_prev().IsUndefined(isolate));
+ CHECK(weak_cell1->key_list_next().IsUndefined(isolate));
- CHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
+ CHECK(finalization_group->cleared_cells().IsUndefined(isolate));
// No key was used during registration, key-based map stays uninitialized.
- CHECK(finalization_group->key_map()->IsUndefined(isolate));
+ CHECK(finalization_group->key_map().IsUndefined(isolate));
// Register another weak reference and verify internal data structures.
Handle<WeakCell> weak_cell2 =
@@ -183,11 +183,11 @@ TEST(TestRegister) {
VerifyWeakCellChain(isolate, finalization_group->active_cells(), 2,
*weak_cell2, *weak_cell1);
- CHECK(weak_cell2->key_list_prev()->IsUndefined(isolate));
- CHECK(weak_cell2->key_list_next()->IsUndefined(isolate));
+ CHECK(weak_cell2->key_list_prev().IsUndefined(isolate));
+ CHECK(weak_cell2->key_list_next().IsUndefined(isolate));
- CHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
- CHECK(finalization_group->key_map()->IsUndefined(isolate));
+ CHECK(finalization_group->cleared_cells().IsUndefined(isolate));
+ CHECK(finalization_group->key_map().IsUndefined(isolate));
}
TEST(TestRegisterWithKey) {
@@ -211,7 +211,7 @@ TEST(TestRegisterWithKey) {
finalization_group, js_object, undefined, key1, isolate);
{
- CHECK(finalization_group->key_map()->IsObjectHashTable());
+ CHECK(finalization_group->key_map().IsObjectHashTable());
Handle<ObjectHashTable> key_map =
handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 1, *weak_cell1);
@@ -224,7 +224,7 @@ TEST(TestRegisterWithKey) {
finalization_group, js_object, undefined, key2, isolate);
{
- CHECK(finalization_group->key_map()->IsObjectHashTable());
+ CHECK(finalization_group->key_map().IsObjectHashTable());
Handle<ObjectHashTable> key_map =
handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 1, *weak_cell1);
@@ -237,7 +237,7 @@ TEST(TestRegisterWithKey) {
finalization_group, js_object, undefined, key1, isolate);
{
- CHECK(finalization_group->key_map()->IsObjectHashTable());
+ CHECK(finalization_group->key_map().IsObjectHashTable());
Handle<ObjectHashTable> key_map =
handle(ObjectHashTable::cast(finalization_group->key_map()), isolate);
VerifyWeakCellKeyChain(isolate, key_map->Lookup(key1), 2, *weak_cell3,
@@ -265,20 +265,20 @@ TEST(TestWeakCellNullify1) {
// Nullify the first WeakCell and verify internal data structures.
NullifyWeakCell(weak_cell1, isolate);
CHECK_EQ(finalization_group->active_cells(), *weak_cell2);
- CHECK(weak_cell2->prev()->IsUndefined(isolate));
- CHECK(weak_cell2->next()->IsUndefined(isolate));
+ CHECK(weak_cell2->prev().IsUndefined(isolate));
+ CHECK(weak_cell2->next().IsUndefined(isolate));
CHECK_EQ(finalization_group->cleared_cells(), *weak_cell1);
- CHECK(weak_cell1->prev()->IsUndefined(isolate));
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ CHECK(weak_cell1->prev().IsUndefined(isolate));
+ CHECK(weak_cell1->next().IsUndefined(isolate));
// Nullify the second WeakCell and verify internal data structures.
NullifyWeakCell(weak_cell2, isolate);
- CHECK(finalization_group->active_cells()->IsUndefined(isolate));
+ CHECK(finalization_group->active_cells().IsUndefined(isolate));
CHECK_EQ(finalization_group->cleared_cells(), *weak_cell2);
CHECK_EQ(weak_cell2->next(), *weak_cell1);
- CHECK(weak_cell2->prev()->IsUndefined(isolate));
+ CHECK(weak_cell2->prev().IsUndefined(isolate));
CHECK_EQ(weak_cell1->prev(), *weak_cell2);
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ CHECK(weak_cell1->next().IsUndefined(isolate));
}
TEST(TestWeakCellNullify2) {
@@ -300,19 +300,19 @@ TEST(TestWeakCellNullify2) {
// Like TestWeakCellNullify1 but nullify the WeakCells in opposite order.
NullifyWeakCell(weak_cell2, isolate);
CHECK_EQ(finalization_group->active_cells(), *weak_cell1);
- CHECK(weak_cell1->prev()->IsUndefined(isolate));
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ CHECK(weak_cell1->prev().IsUndefined(isolate));
+ CHECK(weak_cell1->next().IsUndefined(isolate));
CHECK_EQ(finalization_group->cleared_cells(), *weak_cell2);
- CHECK(weak_cell2->prev()->IsUndefined(isolate));
- CHECK(weak_cell2->next()->IsUndefined(isolate));
+ CHECK(weak_cell2->prev().IsUndefined(isolate));
+ CHECK(weak_cell2->next().IsUndefined(isolate));
NullifyWeakCell(weak_cell1, isolate);
- CHECK(finalization_group->active_cells()->IsUndefined(isolate));
+ CHECK(finalization_group->active_cells().IsUndefined(isolate));
CHECK_EQ(finalization_group->cleared_cells(), *weak_cell1);
CHECK_EQ(weak_cell1->next(), *weak_cell2);
- CHECK(weak_cell1->prev()->IsUndefined(isolate));
+ CHECK(weak_cell1->prev().IsUndefined(isolate));
CHECK_EQ(weak_cell2->prev(), *weak_cell1);
- CHECK(weak_cell2->next()->IsUndefined(isolate));
+ CHECK(weak_cell2->next().IsUndefined(isolate));
}
TEST(TestJSFinalizationGroupPopClearedCellHoldings1) {
@@ -346,15 +346,15 @@ TEST(TestJSFinalizationGroupPopClearedCellHoldings1) {
Object cleared1 =
JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
CHECK_EQ(cleared1, *holdings3);
- CHECK(weak_cell3->prev()->IsUndefined(isolate));
- CHECK(weak_cell3->next()->IsUndefined(isolate));
+ CHECK(weak_cell3->prev().IsUndefined(isolate));
+ CHECK(weak_cell3->next().IsUndefined(isolate));
CHECK(finalization_group->NeedsCleanup());
Object cleared2 =
JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
CHECK_EQ(cleared2, *holdings2);
- CHECK(weak_cell2->prev()->IsUndefined(isolate));
- CHECK(weak_cell2->next()->IsUndefined(isolate));
+ CHECK(weak_cell2->prev().IsUndefined(isolate));
+ CHECK(weak_cell2->next().IsUndefined(isolate));
CHECK(!finalization_group->NeedsCleanup());
@@ -364,12 +364,12 @@ TEST(TestJSFinalizationGroupPopClearedCellHoldings1) {
Object cleared3 =
JSFinalizationGroup::PopClearedCellHoldings(finalization_group, isolate);
CHECK_EQ(cleared3, *holdings1);
- CHECK(weak_cell1->prev()->IsUndefined(isolate));
- CHECK(weak_cell1->next()->IsUndefined(isolate));
+ CHECK(weak_cell1->prev().IsUndefined(isolate));
+ CHECK(weak_cell1->next().IsUndefined(isolate));
CHECK(!finalization_group->NeedsCleanup());
- CHECK(finalization_group->active_cells()->IsUndefined(isolate));
- CHECK(finalization_group->cleared_cells()->IsUndefined(isolate));
+ CHECK(finalization_group->active_cells().IsUndefined(isolate));
+ CHECK(finalization_group->cleared_cells().IsUndefined(isolate));
}
TEST(TestJSFinalizationGroupPopClearedCellHoldings2) {
@@ -656,16 +656,16 @@ TEST(TestJSWeakRef) {
Handle<JSWeakRef> inner_weak_ref = ConstructJSWeakRef(js_object, isolate);
CcTest::CollectAllGarbage();
- CHECK(!inner_weak_ref->target()->IsUndefined(isolate));
+ CHECK(!inner_weak_ref->target().IsUndefined(isolate));
weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
}
- CHECK(!weak_ref->target()->IsUndefined(isolate));
+ CHECK(!weak_ref->target().IsUndefined(isolate));
CcTest::CollectAllGarbage();
- CHECK(weak_ref->target()->IsUndefined(isolate));
+ CHECK(weak_ref->target().IsUndefined(isolate));
}
TEST(TestJSWeakRefIncrementalMarking) {
@@ -691,17 +691,17 @@ TEST(TestJSWeakRefIncrementalMarking) {
heap::SimulateIncrementalMarking(heap, true);
CcTest::CollectAllGarbage();
- CHECK(!inner_weak_ref->target()->IsUndefined(isolate));
+ CHECK(!inner_weak_ref->target().IsUndefined(isolate));
weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
}
- CHECK(!weak_ref->target()->IsUndefined(isolate));
+ CHECK(!weak_ref->target().IsUndefined(isolate));
heap::SimulateIncrementalMarking(heap, true);
CcTest::CollectAllGarbage();
- CHECK(weak_ref->target()->IsUndefined(isolate));
+ CHECK(weak_ref->target().IsUndefined(isolate));
}
TEST(TestJSWeakRefKeepDuringJob) {
@@ -724,17 +724,17 @@ TEST(TestJSWeakRefKeepDuringJob) {
weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
}
- CHECK(!weak_ref->target()->IsUndefined(isolate));
+ CHECK(!weak_ref->target().IsUndefined(isolate));
CcTest::CollectAllGarbage();
- CHECK(!weak_ref->target()->IsUndefined(isolate));
+ CHECK(!weak_ref->target().IsUndefined(isolate));
// Clears the KeepDuringJob set.
isolate->default_microtask_queue()->RunMicrotasks(isolate);
CcTest::CollectAllGarbage();
- CHECK(weak_ref->target()->IsUndefined(isolate));
+ CHECK(weak_ref->target().IsUndefined(isolate));
}
TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
@@ -761,19 +761,19 @@ TEST(TestJSWeakRefKeepDuringJobIncrementalMarking) {
weak_ref = inner_scope.CloseAndEscape(inner_weak_ref);
}
- CHECK(!weak_ref->target()->IsUndefined(isolate));
+ CHECK(!weak_ref->target().IsUndefined(isolate));
heap::SimulateIncrementalMarking(heap, true);
CcTest::CollectAllGarbage();
- CHECK(!weak_ref->target()->IsUndefined(isolate));
+ CHECK(!weak_ref->target().IsUndefined(isolate));
// Clears the KeepDuringJob set.
isolate->default_microtask_queue()->RunMicrotasks(isolate);
heap::SimulateIncrementalMarking(heap, true);
CcTest::CollectAllGarbage();
- CHECK(weak_ref->target()->IsUndefined(isolate));
+ CHECK(weak_ref->target().IsUndefined(isolate));
}
} // namespace internal
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index e2fec0a3ae..4319d5bebe 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -27,11 +27,11 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/debug/liveedit.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -46,8 +46,8 @@ void CompareStringsOneWay(const char* s1, const char* s2,
changes->clear();
LiveEdit::CompareStrings(isolate, i_s1, i_s2, changes);
- int len1 = StrLength(s1);
- int len2 = StrLength(s2);
+ int len1 = static_cast<int>(strlen(s1));
+ int len2 = static_cast<int>(strlen(s2));
int pos1 = 0;
int pos2 = 0;
@@ -208,7 +208,7 @@ void PatchFunctions(v8::Local<v8::Context> context, const char* source_a,
v8::Script::Compile(context, v8_str(isolate, source_a)).ToLocalChecked();
script_a->Run(context).ToLocalChecked();
i::Handle<i::Script> i_script_a(
- i::Script::cast(v8::Utils::OpenHandle(*script_a)->shared()->script()),
+ i::Script::cast(v8::Utils::OpenHandle(*script_a)->shared().script()),
i_isolate);
if (result) {
@@ -353,6 +353,7 @@ TEST(LiveEditPatchFunctions) {
i::FLAG_allow_natives_syntax = true;
PatchFunctions(context,
"function foo(a, b) { return a + b; }; "
+ "%PrepareFunctionForOptimization(foo);"
"%OptimizeFunctionOnNextCall(foo); foo(1,2);",
"function foo(a, b) { return a * b; };");
CHECK_EQ(CompileRunChecked(env->GetIsolate(), "foo(5,7)")
@@ -540,7 +541,7 @@ TEST(LiveEditFunctionExpression) {
v8::Local<v8::Function> f =
script->Run(context).ToLocalChecked().As<v8::Function>();
i::Handle<i::Script> i_script(
- i::Script::cast(v8::Utils::OpenHandle(*script)->shared()->script()),
+ i::Script::cast(v8::Utils::OpenHandle(*script)->shared().script()),
i_isolate);
debug::LiveEditResult result;
LiveEdit::PatchScript(
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index d7c13f8d5d..ed01d6ed21 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -29,15 +29,15 @@
#include <memory>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/platform/platform.h"
-#include "src/compilation-cache.h"
-#include "src/execution.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/unicode-inl.h"
-#include "src/utils.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/utils.h"
#include "test/cctest/cctest.h"
namespace {
@@ -157,6 +157,7 @@ TEST(LazyDeoptimizationMultithread) {
"function f() { g(); return obj.x; }"
"function g() { if (b) { unlock_for_deoptimization(); } }"
"%NeverOptimizeFunction(g);"
+ "%PrepareFunctionForOptimization(f);"
"f(); f(); %OptimizeFunctionOnNextCall(f);"
"f();");
@@ -212,6 +213,7 @@ TEST(LazyDeoptimizationMultithreadWithNatives) {
"function g() { "
" unlock_for_deoptimization(); }"
"%NeverOptimizeFunction(g);"
+ "%PrepareFunctionForOptimization(f);"
"f(); f(); %OptimizeFunctionOnNextCall(f);");
// Trigger the unlocking.
@@ -262,6 +264,7 @@ TEST(EagerDeoptimizationMultithread) {
// Optimizes a function f, which will be deoptimized by another thread.
CompileRun(
"function f(obj) { unlock_for_deoptimization(); return obj.x; }"
+ "%PrepareFunctionForOptimization(f);"
"f({x: 1}); f({x: 1});"
"%OptimizeFunctionOnNextCall(f);"
"f({x: 1});");
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index d49d9eb1d4..1ff517643e 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -30,12 +30,12 @@
#include <stdlib.h>
#include "include/v8-profiler.h"
-#include "src/api-inl.h"
-#include "src/disassembler.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
-#include "src/vm-state-inl.h"
+#include "src/api/api-inl.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/isolate.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/trace-extension.h"
@@ -43,8 +43,8 @@ namespace v8 {
namespace internal {
static bool IsAddressWithinFuncCode(JSFunction function, void* addr) {
- i::AbstractCode code = function->abstract_code();
- return code->contains(reinterpret_cast<Address>(addr));
+ i::AbstractCode code = function.abstract_code();
+ return code.contains(reinterpret_cast<Address>(addr));
}
static bool IsAddressWithinFuncCode(v8::Local<v8::Context> context,
@@ -130,7 +130,7 @@ static void CreateTraceCallerFunction(v8::Local<v8::Context> context,
CreateFramePointerGrabberConstructor(context, "FPGrabber");
// Compile the script.
- CompileRun(trace_call_buf.start());
+ CompileRun(trace_call_buf.begin());
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index ba3f2dc403..3a552dfb18 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -29,24 +29,23 @@
#include <unordered_set>
#include <vector>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/builtins/builtins.h"
-#include "src/compilation-cache.h"
-#include "src/log-utils.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/vm-state-inl.h"
+#include "src/init/v8.h"
+#include "src/logging/log-utils.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/snapshot/natives.h"
-#include "src/v8.h"
-#include "src/version.h"
-#include "src/vm-state-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/version.h"
#include "test/cctest/cctest.h"
using v8::internal::Address;
using v8::internal::EmbeddedVector;
using v8::internal::Logger;
-using v8::internal::StrLength;
namespace {
@@ -182,9 +181,8 @@ class ScopedLoggerInitializer {
printf("%s\n", log_.at(i).c_str());
}
printf("%zu\n", current);
- V8_Fatal(__FILE__, __LINE__, "%s, ... %p apperead twice:\n %s",
- search_term.c_str(), reinterpret_cast<void*>(address),
- current_line.c_str());
+ FATAL("%s, ... %p apperead twice:\n %s", search_term.c_str(),
+ reinterpret_cast<void*>(address), current_line.c_str());
}
}
map.insert({address, current_line});
@@ -287,15 +285,13 @@ namespace {
class SimpleExternalString : public v8::String::ExternalStringResource {
public:
explicit SimpleExternalString(const char* source)
- : utf_source_(StrLength(source)) {
- for (int i = 0; i < utf_source_.length(); ++i)
- utf_source_[i] = source[i];
- }
+ : utf_source_(i::OwnedVector<uint16_t>::Of(i::CStrVector(source))) {}
~SimpleExternalString() override = default;
- size_t length() const override { return utf_source_.length(); }
- const uint16_t* data() const override { return utf_source_.start(); }
+ size_t length() const override { return utf_source_.size(); }
+ const uint16_t* data() const override { return utf_source_.begin(); }
+
private:
- i::ScopedVector<uint16_t> utf_source_;
+ i::OwnedVector<uint16_t> utf_source_;
};
} // namespace
@@ -367,7 +363,7 @@ UNINITIALIZED_TEST(LogCallbacks) {
i::EmbeddedVector<char, 100> suffix_buffer;
i::SNPrintF(suffix_buffer, ",0x%" V8PRIxPTR ",1,method1", ObjMethod1_entry);
CHECK(logger.ContainsLine(
- {"code-creation,Callback,-2,", std::string(suffix_buffer.start())}));
+ {"code-creation,Callback,-2,", std::string(suffix_buffer.begin())}));
}
isolate->Dispose();
}
@@ -412,7 +408,7 @@ UNINITIALIZED_TEST(LogAccessorCallbacks) {
i::SNPrintF(prop1_getter_record, ",0x%" V8PRIxPTR ",1,get prop1",
Prop1Getter_entry);
CHECK(logger.ContainsLine({"code-creation,Callback,-2,",
- std::string(prop1_getter_record.start())}));
+ std::string(prop1_getter_record.begin())}));
Address Prop1Setter_entry = reinterpret_cast<Address>(Prop1Setter);
#if USES_FUNCTION_DESCRIPTORS
@@ -422,7 +418,7 @@ UNINITIALIZED_TEST(LogAccessorCallbacks) {
i::SNPrintF(prop1_setter_record, ",0x%" V8PRIxPTR ",1,set prop1",
Prop1Setter_entry);
CHECK(logger.ContainsLine({"code-creation,Callback,-2,",
- std::string(prop1_setter_record.start())}));
+ std::string(prop1_setter_record.begin())}));
Address Prop2Getter_entry = reinterpret_cast<Address>(Prop2Getter);
#if USES_FUNCTION_DESCRIPTORS
@@ -432,7 +428,7 @@ UNINITIALIZED_TEST(LogAccessorCallbacks) {
i::SNPrintF(prop2_getter_record, ",0x%" V8PRIxPTR ",1,get prop2",
Prop2Getter_entry);
CHECK(logger.ContainsLine({"code-creation,Callback,-2,",
- std::string(prop2_getter_record.start())}));
+ std::string(prop2_getter_record.begin())}));
}
isolate->Dispose();
}
@@ -480,7 +476,7 @@ UNINITIALIZED_TEST(EquivalenceOfLoggingAndTraversal) {
i::Vector<const char> source =
i::NativesCollection<i::TEST>::GetScriptsSource();
v8::Local<v8::String> source_str =
- v8::String::NewFromUtf8(isolate, source.start(),
+ v8::String::NewFromUtf8(isolate, source.begin(),
v8::NewStringType::kNormal, source.length())
.ToLocalChecked();
v8::TryCatch try_catch(isolate);
@@ -498,9 +494,9 @@ UNINITIALIZED_TEST(EquivalenceOfLoggingAndTraversal) {
if (!result->IsTrue()) {
v8::Local<v8::String> s = result->ToString(logger.env()).ToLocalChecked();
i::ScopedVector<char> data(s->Utf8Length(isolate) + 1);
- CHECK(data.start());
- s->WriteUtf8(isolate, data.start());
- FATAL("%s\n", data.start());
+ CHECK(data.begin());
+ s->WriteUtf8(isolate, data.begin());
+ FATAL("%s\n", data.begin());
}
}
isolate->Dispose();
@@ -520,7 +516,7 @@ UNINITIALIZED_TEST(LogVersion) {
i::Version::GetMinor(), i::Version::GetBuild(),
i::Version::GetPatch(), i::Version::IsCandidate());
CHECK(
- logger.ContainsLine({"v8-version,", std::string(line_buffer.start())}));
+ logger.ContainsLine({"v8-version,", std::string(line_buffer.begin())}));
}
isolate->Dispose();
}
@@ -603,6 +599,7 @@ UNINITIALIZED_TEST(LogAll) {
let result;
// Warm up the ICs.
+ %PrepareFunctionForOptimization(testAddFn);
for (let i = 0; i < 100000; i++) {
result = testAddFn(i, i);
};
@@ -937,28 +934,28 @@ void ValidateMapDetailsLogging(v8::Isolate* isolate,
size_t i = 0;
for (i::HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (!obj->IsMap()) continue;
+ if (!obj.IsMap()) continue;
i++;
- uintptr_t address = obj->ptr();
+ uintptr_t address = obj.ptr();
if (map_create_addresses.find(address) == map_create_addresses.end()) {
// logger->PrintLog();
- i::Map::cast(obj)->Print();
- V8_Fatal(__FILE__, __LINE__,
- "Map (%p, #%zu) creation not logged during startup with "
- "--trace-maps!"
- "\n# Expected Log Line: map-create, ... %p",
- reinterpret_cast<void*>(obj->ptr()), i,
- reinterpret_cast<void*>(obj->ptr()));
+ i::Map::cast(obj).Print();
+ FATAL(
+ "Map (%p, #%zu) creation not logged during startup with "
+ "--trace-maps!"
+ "\n# Expected Log Line: map-create, ... %p",
+ reinterpret_cast<void*>(obj.ptr()), i,
+ reinterpret_cast<void*>(obj.ptr()));
} else if (map_details_addresses.find(address) ==
map_details_addresses.end()) {
// logger->PrintLog();
- i::Map::cast(obj)->Print();
- V8_Fatal(__FILE__, __LINE__,
- "Map (%p, #%zu) details not logged during startup with "
- "--trace-maps!"
- "\n# Expected Log Line: map-details, ... %p",
- reinterpret_cast<void*>(obj->ptr()), i,
- reinterpret_cast<void*>(obj->ptr()));
+ i::Map::cast(obj).Print();
+ FATAL(
+ "Map (%p, #%zu) details not logged during startup with "
+ "--trace-maps!"
+ "\n# Expected Log Line: map-details, ... %p",
+ reinterpret_cast<void*>(obj.ptr()), i,
+ reinterpret_cast<void*>(obj.ptr()));
}
}
}
@@ -1260,12 +1257,12 @@ UNINITIALIZED_TEST(BuiltinsNotLoggedAsLazyCompile) {
i::SNPrintF(buffer, ",0x%" V8PRIxPTR ",%d,BooleanConstructor",
builtin->InstructionStart(), builtin->InstructionSize());
CHECK(logger.ContainsLine(
- {"code-creation,Builtin,3,", std::string(buffer.start())}));
+ {"code-creation,Builtin,3,", std::string(buffer.begin())}));
i::SNPrintF(buffer, ",0x%" V8PRIxPTR ",%d,", builtin->InstructionStart(),
builtin->InstructionSize());
CHECK(!logger.ContainsLine(
- {"code-creation,LazyCompile,3,", std::string(buffer.start())}));
+ {"code-creation,LazyCompile,3,", std::string(buffer.begin())}));
}
isolate->Dispose();
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 271c57b92d..a4cc98b884 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -27,12 +27,12 @@
#include <stdlib.h>
-#include "src/assembler-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
-#include "src/simulator.h"
-#include "src/v8.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
@@ -58,7 +58,7 @@ TEST(ExtractLane) {
buffer->CreateView());
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
- typedef struct {
+ struct T {
int32_t i32x4_low[4];
int32_t i32x4_high[4];
int32_t i16x8_low[8];
@@ -69,7 +69,7 @@ TEST(ExtractLane) {
int32_t f32x4_high[4];
int32_t i8x16_low_d[16];
int32_t i8x16_high_d[16];
- } T;
+ };
T t;
__ stm(db_w, sp, r4.bit() | r5.bit() | lr.bit());
@@ -146,8 +146,7 @@ TEST(ExtractLane) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -197,7 +196,7 @@ TEST(ReplaceLane) {
buffer->CreateView());
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
- typedef struct {
+ struct T {
int32_t i32x4_low[4];
int32_t i32x4_high[4];
int16_t i16x8_low[8];
@@ -206,7 +205,7 @@ TEST(ReplaceLane) {
int8_t i8x16_high[16];
int32_t f32x4_low[4];
int32_t f32x4_high[4];
- } T;
+ };
T t;
__ stm(db_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | lr.bit());
@@ -277,8 +276,7 @@ TEST(ReplaceLane) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 63d0794fc8..c20cac04e0 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -28,14 +28,15 @@
#include <stdlib.h>
#include <iostream> // NOLINT(readability/streams)
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
+#include "src/init/v8.h"
#include "src/objects/heap-number.h"
#include "src/objects/js-array-inl.h"
-#include "src/simulator.h"
-#include "src/v8.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -87,8 +88,7 @@ TEST(BYTESWAP) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (size_t i = 0; i < arraysize(test_values); i++) {
@@ -122,7 +122,7 @@ static void TestNaN(const char *code) {
i::Handle<i::JSReceiver> o = v8::Utils::OpenHandle(*result);
i::Handle<i::JSArray> array1(i::JSArray::cast(*o), o->GetIsolate());
i::FixedDoubleArray a = i::FixedDoubleArray::cast(array1->elements());
- double value = a->get_scalar(0);
+ double value = a.get_scalar(0);
CHECK(std::isnan(value) &&
bit_cast<uint64_t>(value) ==
bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN()));
@@ -197,8 +197,7 @@ TEST(jump_tables4) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -232,8 +231,6 @@ TEST(jump_tables5) {
{
__ BlockTrampolinePoolFor(kNumCases + 6 + 1);
- PredictableCodeSizeScope predictable(
- masm, kNumCases * kPointerSize + ((6 + 1) * kInstrSize));
__ addiupc(at, 6 + 1);
__ Lsa(at, at, a0, 2);
@@ -262,8 +259,7 @@ TEST(jump_tables5) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -350,8 +346,7 @@ TEST(jump_tables6) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -375,8 +370,7 @@ static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F1>::FromCode(*code);
@@ -488,7 +482,7 @@ static const std::vector<int32_t> cvt_trunc_int32_test_values() {
template <typename RET_TYPE, typename IN_TYPE, typename Func>
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
- typedef RET_TYPE(F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4);
+ using F_CVT = RET_TYPE(IN_TYPE x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -503,8 +497,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@@ -616,7 +609,7 @@ TEST(OverflowInstructions) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.lhs = ii;
t.rhs = jj;
@@ -738,8 +731,7 @@ TEST(min_max_nan) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -759,7 +751,7 @@ TEST(min_max_nan) {
template <typename IN_TYPE, typename Func>
bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
- typedef int32_t(F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+ using F_CVT = int32_t(char* x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -773,8 +765,7 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@@ -1007,7 +998,7 @@ static const std::vector<uint32_t> sltu_test_values() {
template <typename Func>
bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) {
- typedef int32_t(F_CVT)(uint32_t x0, uint32_t x1, int x2, int x3, int x4);
+ using F_CVT = int32_t(uint32_t x0, uint32_t x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1020,8 +1011,7 @@ bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
int32_t res = reinterpret_cast<int32_t>(f.Call(rs, rd, 0, 0, 0));
@@ -1115,7 +1105,7 @@ static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
- masm->isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -1256,7 +1246,7 @@ static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
- masm->isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 14acf2eb02..0b0ab7b032 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -28,14 +28,15 @@
#include <stdlib.h>
#include <iostream> // NOLINT(readability/streams)
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
#include "src/objects/heap-number.h"
-#include "src/simulator.h"
+#include "src/utils/ostreams.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -107,8 +108,7 @@ TEST(BYTESWAP) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (size_t i = 0; i < arraysize(test_values); i++) {
@@ -162,8 +162,7 @@ TEST(LoadConstants) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<FV>::FromCode(*code);
(void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
@@ -205,8 +204,7 @@ TEST(LoadAddress) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<FV>::FromCode(*code);
(void)f.Call(0, 0, 0, 0, 0);
@@ -262,8 +260,7 @@ TEST(jump_tables4) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -303,8 +300,6 @@ TEST(jump_tables5) {
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6 + 1);
- PredictableCodeSizeScope predictable(
- masm, kNumCases * kPointerSize + ((6 + 1) * kInstrSize));
__ addiupc(at, 6 + 1);
__ Dlsa(at, at, a0, 3);
@@ -334,8 +329,7 @@ TEST(jump_tables5) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -423,8 +417,7 @@ TEST(jump_tables6) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
#ifdef OBJECT_PRINT
code->Print(std::cout);
#endif
@@ -448,8 +441,7 @@ static uint64_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F1>::FromCode(*code);
@@ -528,8 +520,7 @@ static uint64_t run_dlsa(uint64_t rt, uint64_t rs, int8_t sa) {
CodeDesc desc;
assembler.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<FV>::FromCode(*code);
@@ -664,7 +655,7 @@ static const std::vector<int64_t> cvt_trunc_int64_test_values() {
template <typename RET_TYPE, typename IN_TYPE, typename Func>
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
- typedef RET_TYPE(F_CVT)(IN_TYPE x0, int x1, int x2, int x3, int x4);
+ using F_CVT = RET_TYPE(IN_TYPE x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -678,8 +669,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@@ -854,7 +844,7 @@ TEST(OverflowInstructions) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
t.lhs = ii;
t.rhs = jj;
@@ -976,8 +966,7 @@ TEST(min_max_nan) {
CodeDesc desc;
masm->GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F3>::FromCode(*code);
for (int i = 0; i < kTableLength; i++) {
test.a = inputsa[i];
@@ -997,7 +986,7 @@ TEST(min_max_nan) {
template <typename IN_TYPE, typename Func>
bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
- typedef int32_t(F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+ using F_CVT = int32_t(char* x0, int x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1011,8 +1000,7 @@ bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
@@ -1362,7 +1350,7 @@ static const std::vector<uint64_t> sltu_test_values() {
template <typename Func>
bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
- typedef int64_t(F_CVT)(uint64_t x0, uint64_t x1, int x2, int x3, int x4);
+ using F_CVT = int64_t(uint64_t x0, uint64_t x1, int x2, int x3, int x4);
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1375,8 +1363,7 @@ bool run_Sltu(uint64_t rs, uint64_t rd, Func GenerateSltuInstructionFunc) {
CodeDesc desc;
assm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
auto f = GeneratedCode<F_CVT>::FromCode(*code);
int64_t res = reinterpret_cast<int64_t>(f.Call(rs, rd, 0, 0, 0));
@@ -1470,7 +1457,7 @@ static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
- masm->isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
@@ -1611,7 +1598,7 @@ static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
CodeDesc desc;
masm->GetCode(masm->isolate(), &desc);
Handle<Code> code =
- masm->isolate()->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Factory::CodeBuilder(masm->isolate(), desc, Code::STUB).Build();
#ifdef DEBUG
StdoutStream os;
code->Print(os);
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 7dff5dbe8c..1344c0e9d1 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -27,14 +27,16 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/platform/platform.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/x64/assembler-x64-inl.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/simulator.h"
+#include "src/utils/ostreams.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
@@ -51,7 +53,7 @@ namespace test_macro_assembler_x64 {
// This calling convention is used on Linux, with GCC, and on Mac OS,
// with GCC. A different convention is used on 64-bit windows.
-typedef int(F0)();
+using F0 = int();
#define __ masm->
@@ -82,7 +84,7 @@ TEST(Smi) {
Smi smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
CHECK_EQ(smi_from_int, smi_from_intptr);
}
- int64_t smi_value = smi_from_intptr->value();
+ int64_t smi_value = smi_from_intptr.value();
CHECK_EQ(number, smi_value);
}
}
@@ -423,6 +425,59 @@ void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
}
}
+TEST(EmbeddedObj) {
+#ifdef V8_COMPRESS_POINTERS
+ FLAG_always_compact = true;
+ v8::V8::Initialize();
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
+
+ MacroAssembler* masm = &assembler;
+ EntryCode(masm);
+ Label exit;
+ Handle<HeapObject> old_array = isolate->factory()->NewFixedArray(2000);
+ Handle<HeapObject> my_array = isolate->factory()->NewFixedArray(1000);
+ __ Move(rcx, my_array, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
+ __ Move(rax, old_array, RelocInfo::FULL_EMBEDDED_OBJECT);
+ __ bind(&exit);
+ ExitCode(masm);
+ __ ret(0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
+#ifdef OBJECT_PRINT
+ StdoutStream os;
+ code->Print(os);
+#endif
+ using myF0 = Address();
+ auto f = GeneratedCode<myF0>::FromAddress(isolate, code->entry());
+ Object result = Object(f.Call());
+ CHECK_EQ(old_array->ptr(), result.ptr());
+
+ // Collect garbage to ensure reloc info can be walked by the heap.
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+
+ // Test the user-facing reloc interface.
+ const int mode_mask = RelocInfo::EmbeddedObjectModeMask();
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsCompressedEmbeddedObject(mode)) {
+ CHECK_EQ(*my_array, it.rinfo()->target_object());
+ } else {
+ CHECK(RelocInfo::IsFullEmbeddedObject(mode));
+ CHECK_EQ(*old_array, it.rinfo()->target_object());
+ }
+ }
+#endif // V8_COMPRESS_POINTERS
+}
+
TEST(SmiIndex) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
@@ -810,7 +865,7 @@ TEST(OperandOffset) {
void TestFloat32x4Abs(MacroAssembler* masm, Label* exit, float x, float y,
float z, float w) {
- __ subq(rsp, Immediate(kSimd128Size));
+ __ AllocateStackSpace(kSimd128Size);
__ Move(xmm1, x);
__ Movss(Operand(rsp, 0 * kFloatSize), xmm1);
@@ -847,7 +902,7 @@ void TestFloat32x4Abs(MacroAssembler* masm, Label* exit, float x, float y,
void TestFloat32x4Neg(MacroAssembler* masm, Label* exit, float x, float y,
float z, float w) {
- __ subq(rsp, Immediate(kSimd128Size));
+ __ AllocateStackSpace(kSimd128Size);
__ Move(xmm1, x);
__ Movss(Operand(rsp, 0 * kFloatSize), xmm1);
@@ -883,7 +938,7 @@ void TestFloat32x4Neg(MacroAssembler* masm, Label* exit, float x, float y,
}
void TestFloat64x2Abs(MacroAssembler* masm, Label* exit, double x, double y) {
- __ subq(rsp, Immediate(kSimd128Size));
+ __ AllocateStackSpace(kSimd128Size);
__ Move(xmm1, x);
__ Movsd(Operand(rsp, 0 * kDoubleSize), xmm1);
@@ -907,7 +962,7 @@ void TestFloat64x2Abs(MacroAssembler* masm, Label* exit, double x, double y) {
}
void TestFloat64x2Neg(MacroAssembler* masm, Label* exit, double x, double y) {
- __ subq(rsp, Immediate(kSimd128Size));
+ __ AllocateStackSpace(kSimd128Size);
__ Move(xmm1, x);
__ Movsd(Operand(rsp, 0 * kDoubleSize), xmm1);
diff --git a/deps/v8/test/cctest/test-managed.cc b/deps/v8/test/cctest/test-managed.cc
index f108b6210c..8d9185faa7 100644
--- a/deps/v8/test/cctest/test-managed.cc
+++ b/deps/v8/test/cctest/test-managed.cc
@@ -8,7 +8,7 @@
#include "src/objects/managed.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index 78b1e6a981..2e21b7f6b5 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -25,10 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -51,9 +51,9 @@ static void SetUpNewSpaceWithPoisonedMementoAtTop() {
// site pointer.
AllocationMemento memento = AllocationMemento::unchecked_cast(
Object(new_space->top() + kHeapObjectTag));
- memento->set_map_after_allocation(
- ReadOnlyRoots(heap).allocation_memento_map(), SKIP_WRITE_BARRIER);
- memento->set_allocation_site(
+ memento.set_map_after_allocation(ReadOnlyRoots(heap).allocation_memento_map(),
+ SKIP_WRITE_BARRIER);
+ memento.set_allocation_site(
AllocationSite::unchecked_cast(Object(kHeapObjectTag)),
SKIP_WRITE_BARRIER);
}
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index 2523b83a16..0f2bfd2a5f 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/flags.h"
+#include "src/flags/flags.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-object.cc b/deps/v8/test/cctest/test-object.cc
index 61945bc1fe..9104e850db 100644
--- a/deps/v8/test/cctest/test-object.cc
+++ b/deps/v8/test/cctest/test-object.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/function-kind.h"
-#include "src/globals.h"
-#include "src/handles-inl.h"
+#include "src/api/api-inl.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/objects/function-kind.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -17,8 +17,10 @@ namespace internal {
static void CheckObject(Isolate* isolate, Handle<Object> obj,
const char* string) {
- Object print_string = *Object::NoSideEffectsToString(isolate, obj);
- CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
+ Handle<String> print_string = String::Flatten(
+ isolate,
+ Handle<String>::cast(Object::NoSideEffectsToString(isolate, obj)));
+ CHECK(print_string->IsOneByteEqualTo(CStrVector(string)));
}
static void CheckSmi(Isolate* isolate, int value, const char* string) {
@@ -113,143 +115,143 @@ TEST(EnumCache) {
*env->Global()->Get(env.local(), v8_str("cc")).ToLocalChecked()));
// Check the transition tree.
- CHECK_EQ(a->map()->instance_descriptors(), b->map()->instance_descriptors());
- CHECK_EQ(b->map()->instance_descriptors(), c->map()->instance_descriptors());
- CHECK_NE(c->map()->instance_descriptors(), cc->map()->instance_descriptors());
- CHECK_NE(b->map()->instance_descriptors(), cc->map()->instance_descriptors());
+ CHECK_EQ(a->map().instance_descriptors(), b->map().instance_descriptors());
+ CHECK_EQ(b->map().instance_descriptors(), c->map().instance_descriptors());
+ CHECK_NE(c->map().instance_descriptors(), cc->map().instance_descriptors());
+ CHECK_NE(b->map().instance_descriptors(), cc->map().instance_descriptors());
// Check that the EnumLength is unset.
- CHECK_EQ(a->map()->EnumLength(), kInvalidEnumCacheSentinel);
- CHECK_EQ(b->map()->EnumLength(), kInvalidEnumCacheSentinel);
- CHECK_EQ(c->map()->EnumLength(), kInvalidEnumCacheSentinel);
- CHECK_EQ(cc->map()->EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(a->map().EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(b->map().EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(c->map().EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(cc->map().EnumLength(), kInvalidEnumCacheSentinel);
// Check that the EnumCache is empty.
- CHECK_EQ(a->map()->instance_descriptors()->enum_cache(),
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map()->instance_descriptors()->enum_cache(),
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map()->instance_descriptors()->enum_cache(),
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(cc->map()->instance_descriptors()->enum_cache(),
+ CHECK_EQ(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
// The EnumCache is shared on the DescriptorArray, creating it on {cc} has no
// effect on the other maps.
CompileRun("var s = 0; for (let key in cc) { s += cc[key] };");
{
- CHECK_EQ(a->map()->EnumLength(), kInvalidEnumCacheSentinel);
- CHECK_EQ(b->map()->EnumLength(), kInvalidEnumCacheSentinel);
- CHECK_EQ(c->map()->EnumLength(), kInvalidEnumCacheSentinel);
- CHECK_EQ(cc->map()->EnumLength(), 3);
+ CHECK_EQ(a->map().EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(b->map().EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(c->map().EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(cc->map().EnumLength(), 3);
- CHECK_EQ(a->map()->instance_descriptors()->enum_cache(),
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(b->map()->instance_descriptors()->enum_cache(),
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_EQ(c->map()->instance_descriptors()->enum_cache(),
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- EnumCache enum_cache = cc->map()->instance_descriptors()->enum_cache();
+ EnumCache enum_cache = cc->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
- CHECK_EQ(enum_cache->keys()->length(), 3);
- CHECK_EQ(enum_cache->indices()->length(), 3);
+ CHECK_EQ(enum_cache.keys().length(), 3);
+ CHECK_EQ(enum_cache.indices().length(), 3);
}
// Initializing the EnumCache for the the topmost map {a} will not create the
// cache for the other maps.
CompileRun("var s = 0; for (let key in a) { s += a[key] };");
{
- CHECK_EQ(a->map()->EnumLength(), 1);
- CHECK_EQ(b->map()->EnumLength(), kInvalidEnumCacheSentinel);
- CHECK_EQ(c->map()->EnumLength(), kInvalidEnumCacheSentinel);
- CHECK_EQ(cc->map()->EnumLength(), 3);
+ CHECK_EQ(a->map().EnumLength(), 1);
+ CHECK_EQ(b->map().EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(c->map().EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(cc->map().EnumLength(), 3);
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- EnumCache enum_cache = a->map()->instance_descriptors()->enum_cache();
+ EnumCache enum_cache = a->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
- CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map()->instance_descriptors()->enum_cache(), enum_cache);
- CHECK_EQ(a->map()->instance_descriptors()->enum_cache(), enum_cache);
- CHECK_EQ(b->map()->instance_descriptors()->enum_cache(), enum_cache);
- CHECK_EQ(c->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
- CHECK_EQ(enum_cache->keys()->length(), 1);
- CHECK_EQ(enum_cache->indices()->length(), 1);
+ CHECK_EQ(enum_cache.keys().length(), 1);
+ CHECK_EQ(enum_cache.indices().length(), 1);
}
// Creating the EnumCache for {c} will create a new EnumCache on the shared
// DescriptorArray.
Handle<EnumCache> previous_enum_cache(
- a->map()->instance_descriptors()->enum_cache(), a->GetIsolate());
+ a->map().instance_descriptors().enum_cache(), a->GetIsolate());
Handle<FixedArray> previous_keys(previous_enum_cache->keys(),
a->GetIsolate());
Handle<FixedArray> previous_indices(previous_enum_cache->indices(),
a->GetIsolate());
CompileRun("var s = 0; for (let key in c) { s += c[key] };");
{
- CHECK_EQ(a->map()->EnumLength(), 1);
- CHECK_EQ(b->map()->EnumLength(), kInvalidEnumCacheSentinel);
- CHECK_EQ(c->map()->EnumLength(), 3);
- CHECK_EQ(cc->map()->EnumLength(), 3);
+ CHECK_EQ(a->map().EnumLength(), 1);
+ CHECK_EQ(b->map().EnumLength(), kInvalidEnumCacheSentinel);
+ CHECK_EQ(c->map().EnumLength(), 3);
+ CHECK_EQ(cc->map().EnumLength(), 3);
- EnumCache enum_cache = c->map()->instance_descriptors()->enum_cache();
+ EnumCache enum_cache = c->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
- CHECK_NE(enum_cache->keys(), *previous_keys);
- CHECK_NE(enum_cache->indices(), *previous_indices);
+ CHECK_NE(enum_cache.keys(), *previous_keys);
+ CHECK_NE(enum_cache.indices(), *previous_indices);
CHECK_EQ(previous_keys->length(), 1);
CHECK_EQ(previous_indices->length(), 1);
- CHECK_EQ(enum_cache->keys()->length(), 3);
- CHECK_EQ(enum_cache->indices()->length(), 3);
+ CHECK_EQ(enum_cache.keys().length(), 3);
+ CHECK_EQ(enum_cache.indices().length(), 3);
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map()->instance_descriptors()->enum_cache(), enum_cache);
- CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map()->instance_descriptors()->enum_cache(), enum_cache);
- CHECK_EQ(b->map()->instance_descriptors()->enum_cache(), enum_cache);
- CHECK_EQ(c->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
}
// {b} can reuse the existing EnumCache, hence we only need to set the correct
// EnumLength on the map without modifying the cache itself.
previous_enum_cache =
- handle(a->map()->instance_descriptors()->enum_cache(), a->GetIsolate());
+ handle(a->map().instance_descriptors().enum_cache(), a->GetIsolate());
previous_keys = handle(previous_enum_cache->keys(), a->GetIsolate());
previous_indices = handle(previous_enum_cache->indices(), a->GetIsolate());
CompileRun("var s = 0; for (let key in b) { s += b[key] };");
{
- CHECK_EQ(a->map()->EnumLength(), 1);
- CHECK_EQ(b->map()->EnumLength(), 2);
- CHECK_EQ(c->map()->EnumLength(), 3);
- CHECK_EQ(cc->map()->EnumLength(), 3);
+ CHECK_EQ(a->map().EnumLength(), 1);
+ CHECK_EQ(b->map().EnumLength(), 2);
+ CHECK_EQ(c->map().EnumLength(), 3);
+ CHECK_EQ(cc->map().EnumLength(), 3);
- EnumCache enum_cache = c->map()->instance_descriptors()->enum_cache();
+ EnumCache enum_cache = c->map().instance_descriptors().enum_cache();
CHECK_NE(enum_cache, *factory->empty_enum_cache());
// The keys and indices caches are not updated.
CHECK_EQ(enum_cache, *previous_enum_cache);
- CHECK_EQ(enum_cache->keys(), *previous_keys);
- CHECK_EQ(enum_cache->indices(), *previous_indices);
- CHECK_EQ(enum_cache->keys()->length(), 3);
- CHECK_EQ(enum_cache->indices()->length(), 3);
+ CHECK_EQ(enum_cache.keys(), *previous_keys);
+ CHECK_EQ(enum_cache.indices(), *previous_indices);
+ CHECK_EQ(enum_cache.keys().length(), 3);
+ CHECK_EQ(enum_cache.indices().length(), 3);
// The enum cache is shared on the descriptor array of maps {a}, {b} and
// {c} only.
- CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*factory->empty_enum_cache());
- CHECK_NE(cc->map()->instance_descriptors()->enum_cache(), enum_cache);
- CHECK_NE(cc->map()->instance_descriptors()->enum_cache(),
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_NE(cc->map().instance_descriptors().enum_cache(),
*previous_enum_cache);
- CHECK_EQ(a->map()->instance_descriptors()->enum_cache(), enum_cache);
- CHECK_EQ(b->map()->instance_descriptors()->enum_cache(), enum_cache);
- CHECK_EQ(c->map()->instance_descriptors()->enum_cache(), enum_cache);
+ CHECK_EQ(a->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(b->map().instance_descriptors().enum_cache(), enum_cache);
+ CHECK_EQ(c->map().instance_descriptors().enum_cache(), enum_cache);
}
}
diff --git a/deps/v8/test/cctest/test-orderedhashtable.cc b/deps/v8/test/cctest/test-orderedhashtable.cc
index b1f7ae9068..9b1bc651fa 100644
--- a/deps/v8/test/cctest/test-orderedhashtable.cc
+++ b/deps/v8/test/cctest/test-orderedhashtable.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <utility>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index ede8e3e71d..aaef09b91e 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -31,18 +31,18 @@
#include <memory>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/base/enum-set.h"
-#include "src/compiler.h"
-#include "src/execution.h"
-#include "src/flags.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/codegen/compiler.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/parsing.h"
@@ -1044,14 +1044,14 @@ TEST(ScopeUsesArgumentsSuperThis) {
(source_data[i].expected == NONE)) && j != 2) {
continue;
}
- int kProgramByteSize = i::StrLength(surroundings[j].prefix) +
- i::StrLength(surroundings[j].suffix) +
- i::StrLength(source_data[i].body);
+ int kProgramByteSize = static_cast<int>(strlen(surroundings[j].prefix) +
+ strlen(surroundings[j].suffix) +
+ strlen(source_data[i].body));
i::ScopedVector<char> program(kProgramByteSize + 1);
i::SNPrintF(program, "%s%s%s", surroundings[j].prefix,
source_data[i].body, surroundings[j].suffix);
i::Handle<i::String> source =
- factory->NewStringFromUtf8(i::CStrVector(program.start()))
+ factory->NewStringFromUtf8(i::CStrVector(program.begin()))
.ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
i::ParseInfo info(isolate, script);
@@ -1412,9 +1412,9 @@ TEST(ScopePositions) {
int kPrefixLen = Utf8LengthHelper(source_data[i].outer_prefix);
int kInnerLen = Utf8LengthHelper(source_data[i].inner_source);
int kSuffixLen = Utf8LengthHelper(source_data[i].outer_suffix);
- int kPrefixByteLen = i::StrLength(source_data[i].outer_prefix);
- int kInnerByteLen = i::StrLength(source_data[i].inner_source);
- int kSuffixByteLen = i::StrLength(source_data[i].outer_suffix);
+ int kPrefixByteLen = static_cast<int>(strlen(source_data[i].outer_prefix));
+ int kInnerByteLen = static_cast<int>(strlen(source_data[i].inner_source));
+ int kSuffixByteLen = static_cast<int>(strlen(source_data[i].outer_suffix));
int kProgramSize = kPrefixLen + kInnerLen + kSuffixLen;
int kProgramByteSize = kPrefixByteLen + kInnerByteLen + kSuffixByteLen;
i::ScopedVector<char> program(kProgramByteSize + 1);
@@ -1424,8 +1424,9 @@ TEST(ScopePositions) {
source_data[i].outer_suffix);
// Parse program source.
- i::Handle<i::String> source = factory->NewStringFromUtf8(
- i::CStrVector(program.start())).ToHandleChecked();
+ i::Handle<i::String> source =
+ factory->NewStringFromUtf8(i::CStrVector(program.begin()))
+ .ToHandleChecked();
CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = factory->NewScript(source);
i::ParseInfo info(isolate, script);
@@ -1525,10 +1526,7 @@ const char* ReadString(unsigned* start) {
enum ParserFlag {
kAllowLazy,
kAllowNatives,
- kAllowHarmonyPublicFields,
- kAllowHarmonyPrivateFields,
kAllowHarmonyPrivateMethods,
- kAllowHarmonyStaticFields,
kAllowHarmonyDynamicImport,
kAllowHarmonyImportMeta,
kAllowHarmonyNumericSeparator
@@ -1542,10 +1540,7 @@ enum ParserSyncTestResult {
void SetGlobalFlags(base::EnumSet<ParserFlag> flags) {
i::FLAG_allow_natives_syntax = flags.contains(kAllowNatives);
- i::FLAG_harmony_public_fields = flags.contains(kAllowHarmonyPublicFields);
- i::FLAG_harmony_private_fields = flags.contains(kAllowHarmonyPrivateFields);
i::FLAG_harmony_private_methods = flags.contains(kAllowHarmonyPrivateMethods);
- i::FLAG_harmony_static_fields = flags.contains(kAllowHarmonyStaticFields);
i::FLAG_harmony_dynamic_import = flags.contains(kAllowHarmonyDynamicImport);
i::FLAG_harmony_import_meta = flags.contains(kAllowHarmonyImportMeta);
i::FLAG_harmony_numeric_separator =
@@ -1554,14 +1549,8 @@ void SetGlobalFlags(base::EnumSet<ParserFlag> flags) {
void SetParserFlags(i::PreParser* parser, base::EnumSet<ParserFlag> flags) {
parser->set_allow_natives(flags.contains(kAllowNatives));
- parser->set_allow_harmony_public_fields(
- flags.contains(kAllowHarmonyPublicFields));
- parser->set_allow_harmony_private_fields(
- flags.contains(kAllowHarmonyPrivateFields));
parser->set_allow_harmony_private_methods(
flags.contains(kAllowHarmonyPrivateMethods));
- parser->set_allow_harmony_static_fields(
- flags.contains(kAllowHarmonyStaticFields));
parser->set_allow_harmony_dynamic_import(
flags.contains(kAllowHarmonyDynamicImport));
parser->set_allow_harmony_import_meta(
@@ -1756,12 +1745,13 @@ TEST(ParserSync) {
for (int i = 0; context_data[i][0] != nullptr; ++i) {
for (int j = 0; statement_data[j] != nullptr; ++j) {
for (int k = 0; termination_data[k] != nullptr; ++k) {
- int kPrefixLen = i::StrLength(context_data[i][0]);
- int kStatementLen = i::StrLength(statement_data[j]);
- int kTerminationLen = i::StrLength(termination_data[k]);
- int kSuffixLen = i::StrLength(context_data[i][1]);
- int kProgramSize = kPrefixLen + kStatementLen + kTerminationLen
- + kSuffixLen + i::StrLength("label: for (;;) { }");
+ int kPrefixLen = static_cast<int>(strlen(context_data[i][0]));
+ int kStatementLen = static_cast<int>(strlen(statement_data[j]));
+ int kTerminationLen = static_cast<int>(strlen(termination_data[k]));
+ int kSuffixLen = static_cast<int>(strlen(context_data[i][1]));
+ int kProgramSize = kPrefixLen + kStatementLen + kTerminationLen +
+ kSuffixLen +
+ static_cast<int>(strlen("label: for (;;) { }"));
// Plug the source code pieces together.
i::ScopedVector<char> program(kProgramSize + 1);
@@ -1772,7 +1762,7 @@ TEST(ParserSync) {
termination_data[k],
context_data[i][1]);
CHECK_EQ(length, kProgramSize);
- TestParserSync(program.start(), nullptr, 0);
+ TestParserSync(program.begin(), nullptr, 0);
}
}
}
@@ -1858,9 +1848,9 @@ void RunParserSyncTest(
}
for (int i = 0; context_data[i][0] != nullptr; ++i) {
for (int j = 0; statement_data[j] != nullptr; ++j) {
- int kPrefixLen = i::StrLength(context_data[i][0]);
- int kStatementLen = i::StrLength(statement_data[j]);
- int kSuffixLen = i::StrLength(context_data[i][1]);
+ int kPrefixLen = static_cast<int>(strlen(context_data[i][0]));
+ int kStatementLen = static_cast<int>(strlen(statement_data[j]));
+ int kSuffixLen = static_cast<int>(strlen(context_data[i][1]));
int kProgramSize = kPrefixLen + kStatementLen + kSuffixLen;
// Plug the source code pieces together.
@@ -1870,9 +1860,9 @@ void RunParserSyncTest(
context_data[i][0],
statement_data[j],
context_data[i][1]);
- PrintF("%s\n", program.start());
+ PrintF("%s\n", program.begin());
CHECK_EQ(length, kProgramSize);
- TestParserSync(program.start(), flags, flags_len, result,
+ TestParserSync(program.begin(), flags, flags_len, result,
always_true_flags, always_true_len, always_false_flags,
always_false_len, is_module, test_preparser,
ignore_error_msg);
@@ -3215,7 +3205,7 @@ TEST(SerializationOfMaybeAssignmentFlag) {
i::ScopedVector<char> program(Utf8LengthHelper(src) + 1);
i::SNPrintF(program, "%s", src);
- i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
+ i::Handle<i::String> source = factory->InternalizeUtf8String(program.begin());
source->PrintOn(stdout);
printf("\n");
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
@@ -3232,7 +3222,7 @@ TEST(SerializationOfMaybeAssignmentFlag) {
i::DeclarationScope* script_scope =
new (&zone) i::DeclarationScope(&zone, &avf);
i::Scope* s = i::Scope::DeserializeScopeChain(
- isolate, &zone, context->scope_info(), script_scope, &avf,
+ isolate, &zone, context.scope_info(), script_scope, &avf,
i::Scope::DeserializationMode::kIncludingVariables);
CHECK(s != script_scope);
CHECK_NOT_NULL(name);
@@ -3265,7 +3255,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
i::ScopedVector<char> program(Utf8LengthHelper(src) + 1);
i::SNPrintF(program, "%s", src);
- i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
+ i::Handle<i::String> source = factory->InternalizeUtf8String(program.begin());
source->PrintOn(stdout);
printf("\n");
i::Zone zone(isolate->allocator(), ZONE_NAME);
@@ -3281,7 +3271,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
i::DeclarationScope* script_scope =
new (&zone) i::DeclarationScope(&zone, &avf);
i::Scope* s = i::Scope::DeserializeScopeChain(
- isolate, &zone, context->scope_info(), script_scope, &avf,
+ isolate, &zone, context.scope_info(), script_scope, &avf,
i::Scope::DeserializationMode::kIncludingVariables);
CHECK(s != script_scope);
@@ -3426,8 +3416,8 @@ TEST(InnerAssignment) {
std::unique_ptr<i::ParseInfo> info;
if (lazy) {
- printf("%s\n", program.start());
- v8::Local<v8::Value> v = CompileRun(program.start());
+ printf("%s\n", program.begin());
+ v8::Local<v8::Value> v = CompileRun(program.begin());
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Handle<i::SharedFunctionInfo> shared =
@@ -3437,7 +3427,7 @@ TEST(InnerAssignment) {
CHECK(i::parsing::ParseFunction(info.get(), shared, isolate));
} else {
i::Handle<i::String> source =
- factory->InternalizeUtf8String(program.start());
+ factory->InternalizeUtf8String(program.begin());
source->PrintOn(stdout);
printf("\n");
i::Handle<i::Script> script = factory->NewScript(source);
@@ -3537,8 +3527,8 @@ TEST(MaybeAssignedParameters) {
Utf8LengthHelper(suffix) + 1);
i::SNPrintF(program, "%s%s", source, suffix);
std::unique_ptr<i::ParseInfo> info;
- printf("%s\n", program.start());
- v8::Local<v8::Value> v = CompileRun(program.start());
+ printf("%s\n", program.begin());
+ v8::Local<v8::Value> v = CompileRun(program.begin());
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
i::Handle<i::SharedFunctionInfo> shared = i::handle(f->shared(), isolate);
@@ -4195,7 +4185,7 @@ i::Scope* DeserializeFunctionScope(i::Isolate* isolate, i::Zone* zone,
i::DeclarationScope* script_scope =
new (zone) i::DeclarationScope(zone, &avf);
i::Scope* s = i::Scope::DeserializeScopeChain(
- isolate, zone, f->context()->scope_info(), script_scope, &avf,
+ isolate, zone, f->context().scope_info(), script_scope, &avf,
i::Scope::DeserializationMode::kIncludingVariables);
return s;
}
@@ -5282,15 +5272,7 @@ TEST(StaticClassFieldsNoErrors) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields,
- kAllowHarmonyStaticFields};
- RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
- always_flags, arraysize(always_flags));
-
- // Without the static flag, all of these are errors
- static const ParserFlag no_static_flags[] = {kAllowHarmonyPublicFields};
- RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
- no_static_flags, arraysize(no_static_flags));
+ RunParserSyncTest(context_data, class_body_data, kSuccess);
}
TEST(ClassFieldsNoErrors) {
@@ -5374,14 +5356,7 @@ TEST(ClassFieldsNoErrors) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields};
- RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
- always_flags, arraysize(always_flags));
-
- static const ParserFlag static_flags[] = {kAllowHarmonyPublicFields,
- kAllowHarmonyStaticFields};
- RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
- static_flags, arraysize(static_flags));
+ RunParserSyncTest(context_data, class_body_data, kSuccess);
}
TEST(PrivateMethodsNoErrors) {
@@ -5471,8 +5446,7 @@ TEST(PrivateMethodsNoErrors) {
RunParserSyncTest(context_data, class_body_data, kError);
- static const ParserFlag private_methods[] = {kAllowHarmonyPrivateFields,
- kAllowHarmonyPrivateMethods};
+ static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
private_methods, arraysize(private_methods));
}
@@ -5531,7 +5505,6 @@ TEST(PrivateMethodsAndFieldsNoErrors) {
RunParserSyncTest(context_data, class_body_data, kError);
static const ParserFlag private_methods_and_fields[] = {
- kAllowHarmonyPrivateFields, kAllowHarmonyPublicFields,
kAllowHarmonyPrivateMethods};
RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
private_methods_and_fields,
@@ -5594,8 +5567,7 @@ TEST(PrivateMethodsErrors) {
RunParserSyncTest(context_data, class_body_data, kError);
- static const ParserFlag private_methods[] = {kAllowHarmonyPrivateFields,
- kAllowHarmonyPrivateMethods};
+ static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
private_methods, arraysize(private_methods));
}
@@ -5628,8 +5600,7 @@ TEST(PrivateMembersInNonClassNoErrors) {
RunParserSyncTest(context_data, class_body_data, kError);
- static const ParserFlag private_methods[] = {kAllowHarmonyPrivateFields,
- kAllowHarmonyPrivateMethods};
+ static const ParserFlag private_methods[] = {kAllowHarmonyPrivateMethods};
RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
private_methods, arraysize(private_methods));
}
@@ -5694,11 +5665,7 @@ TEST(PrivateClassFieldsNoErrors) {
};
// clang-format on
- RunParserSyncTest(context_data, class_body_data, kError);
-
- static const ParserFlag private_fields[] = {kAllowHarmonyPrivateFields};
- RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
- private_fields, arraysize(private_fields));
+ RunParserSyncTest(context_data, class_body_data, kSuccess);
}
TEST(StaticClassFieldsErrors) {
@@ -5743,14 +5710,7 @@ TEST(StaticClassFieldsErrors) {
};
// clang-format on
- static const ParserFlag no_static_flags[] = {kAllowHarmonyPublicFields};
- RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
- no_static_flags, arraysize(no_static_flags));
-
- static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields,
- kAllowHarmonyStaticFields};
- RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
- always_flags, arraysize(always_flags));
+ RunParserSyncTest(context_data, class_body_data, kError);
}
TEST(ClassFieldsErrors) {
@@ -5794,14 +5754,7 @@ TEST(ClassFieldsErrors) {
};
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyPublicFields};
- RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
- always_flags, arraysize(always_flags));
-
- static const ParserFlag static_flags[] = {kAllowHarmonyPublicFields,
- kAllowHarmonyStaticFields};
- RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
- static_flags, arraysize(static_flags));
+ RunParserSyncTest(context_data, class_body_data, kError);
}
TEST(PrivateClassFieldsErrors) {
@@ -5879,10 +5832,6 @@ TEST(PrivateClassFieldsErrors) {
// clang-format on
RunParserSyncTest(context_data, class_body_data, kError);
-
- static const ParserFlag private_fields[] = {kAllowHarmonyPrivateFields};
- RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
- private_fields, arraysize(private_fields));
}
TEST(PrivateStaticClassFieldsNoErrors) {
@@ -5947,18 +5896,7 @@ TEST(PrivateStaticClassFieldsNoErrors) {
};
// clang-format on
- RunParserSyncTest(context_data, class_body_data, kError);
-
- static const ParserFlag public_static_fields[] = {kAllowHarmonyPublicFields,
- kAllowHarmonyStaticFields};
- RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
- public_static_fields, arraysize(public_static_fields));
-
- static const ParserFlag private_static_fields[] = {
- kAllowHarmonyPublicFields, kAllowHarmonyStaticFields,
- kAllowHarmonyPrivateFields};
- RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr, 0,
- private_static_fields, arraysize(private_static_fields));
+ RunParserSyncTest(context_data, class_body_data, kSuccess, nullptr);
}
TEST(PrivateStaticClassFieldsErrors) {
@@ -6059,17 +5997,6 @@ TEST(PrivateStaticClassFieldsErrors) {
// clang-format on
RunParserSyncTest(context_data, class_body_data, kError);
-
- static const ParserFlag public_static_fields[] = {kAllowHarmonyPublicFields,
- kAllowHarmonyStaticFields};
- RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
- public_static_fields, arraysize(public_static_fields));
-
- static const ParserFlag private_static_fields[] = {
- kAllowHarmonyPublicFields, kAllowHarmonyStaticFields,
- kAllowHarmonyPrivateFields};
- RunParserSyncTest(context_data, class_body_data, kError, nullptr, 0,
- private_static_fields, arraysize(private_static_fields));
}
TEST(PrivateNameResolutionErrors) {
@@ -6117,10 +6044,6 @@ TEST(PrivateNameResolutionErrors) {
// clang-format on
RunParserSyncTest(context_data, statement_data, kError);
-
- static const ParserFlag private_fields[] = {kAllowHarmonyPrivateFields};
- RunParserSyncTest(context_data, statement_data, kError, nullptr, 0,
- private_fields, arraysize(private_fields));
}
TEST(PrivateNameErrors) {
@@ -6169,10 +6092,6 @@ TEST(PrivateNameErrors) {
// clang-format on
RunParserSyncTest(context_data, statement_data, kError);
-
- static const ParserFlag private_fields[] = {kAllowHarmonyPrivateFields};
- RunParserSyncTest(context_data, statement_data, kError, nullptr, 0,
- private_fields, arraysize(private_fields));
}
TEST(ClassExpressionErrors) {
@@ -6184,8 +6103,6 @@ TEST(ClassExpressionErrors) {
"class name extends",
"class extends",
"class {",
- "class { m }",
- "class { m; n }",
"class { m: 1 }",
"class { m(); n() }",
"class { get m }",
@@ -6208,8 +6125,6 @@ TEST(ClassDeclarationErrors) {
"class name extends",
"class extends",
"class name {",
- "class name { m }",
- "class name { m; n }",
"class name { m: 1 }",
"class name { m(); n() }",
"class name { get x }",
@@ -6583,6 +6498,34 @@ TEST(ForOfMultipleDeclarationsError) {
RunParserSyncTest(context_data, data, kError);
}
+TEST(ForInOfLetExpression) {
+ const char* sloppy_context_data[][2] = {
+ {"", ""}, {"function foo(){", "}"}, {nullptr, nullptr}};
+
+ const char* strict_context_data[][2] = {
+ {"'use strict';", ""},
+ {"function foo(){ 'use strict';", "}"},
+ {nullptr, nullptr}};
+
+ const char* async_context_data[][2] = {
+ {"async function foo(){", "}"},
+ {"async function foo(){ 'use strict';", "}"},
+ {nullptr, nullptr}};
+
+ const char* for_let_in[] = {"for (let.x in {}) {}", nullptr};
+
+ const char* for_let_of[] = {"for (let.x of []) {}", nullptr};
+
+ const char* for_await_let_of[] = {"for await (let.x of []) {}", nullptr};
+
+ // The only place `let.x` is legal as a left-hand side expression
+ // is in sloppy mode in a for-in loop.
+ RunParserSyncTest(sloppy_context_data, for_let_in, kSuccess);
+ RunParserSyncTest(strict_context_data, for_let_in, kError);
+ RunParserSyncTest(sloppy_context_data, for_let_of, kError);
+ RunParserSyncTest(strict_context_data, for_let_of, kError);
+ RunParserSyncTest(async_context_data, for_await_let_of, kError);
+}
TEST(ForInNoDeclarationsError) {
const char* context_data[][2] = {{"", ""},
@@ -10551,7 +10494,7 @@ TEST(NoPessimisticContextAllocation) {
"%s", suffix);
i::Handle<i::String> source =
- factory->InternalizeUtf8String(program.start());
+ factory->InternalizeUtf8String(program.begin());
source->PrintOn(stdout);
printf("\n");
@@ -11109,8 +11052,8 @@ TEST(LexicalLoopVariable) {
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
LocalContext env;
- typedef std::function<void(const i::ParseInfo& info, i::DeclarationScope*)>
- TestCB;
+ using TestCB =
+ std::function<void(const i::ParseInfo& info, i::DeclarationScope*)>;
auto TestProgram = [isolate](const char* program, TestCB test) {
i::Factory* const factory = isolate->factory();
i::Handle<i::String> source =
@@ -11362,8 +11305,6 @@ TEST(PrivateNamesSyntaxErrorEarly) {
nullptr};
- static const ParserFlag flags[] = {kAllowHarmonyPrivateFields};
- RunParserSyncTest(context_data, statement_data, kError, nullptr, 0, flags, 1);
RunParserSyncTest(context_data, statement_data, kError);
}
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm.cc b/deps/v8/test/cctest/test-poison-disasm-arm.cc
index 8b883cad4f..bde584c3fa 100644
--- a/deps/v8/test/cctest/test-poison-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-poison-disasm-arm.cc
@@ -6,9 +6,9 @@
// former isn't available in V8.
#include <regex> // NOLINT(build/c++11)
-#include "src/api-inl.h"
-#include "src/disassembler.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -20,8 +20,8 @@ std::string DisassembleFunction(const char* function) {
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
CcTest::global()->Get(context, v8_str(function)).ToLocalChecked())));
- Address begin = f->code()->raw_instruction_start();
- Address end = f->code()->raw_instruction_end();
+ Address begin = f->code().raw_instruction_start();
+ Address end = f->code().raw_instruction_end();
Isolate* isolate = CcTest::i_isolate();
std::ostringstream os;
Disassembler::Decode(isolate, &os, reinterpret_cast<byte*>(begin),
@@ -65,6 +65,7 @@ TEST(DisasmPoisonMonomorphicLoad) {
CompileRun(
"function mono(o) { return o.x; };"
+ "%PrepareFunctionForOptimization(mono);"
"mono({ x : 1 });"
"mono({ x : 1 });"
"%OptimizeFunctionOnNextCall(mono);"
@@ -146,6 +147,7 @@ TEST(DisasmPoisonPolymorphicLoad) {
"let o1 = { x : 1 };"
"let o2 = { y : 1 };"
"o2.x = 2;"
+ "%PrepareFunctionForOptimization(poly);"
"poly(o1);"
"poly(o2);"
"poly(o1);"
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index d652a2c16e..392782afea 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -28,12 +28,12 @@
// Tests of profiles generator and utilities.
#include "include/v8-profiler.h"
-#include "src/api-inl.h"
-#include "src/log.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/init/v8.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
-#include "src/v8.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
@@ -380,7 +380,7 @@ TEST(RecordTickSample) {
CpuProfilesCollection profiles(isolate);
CpuProfiler profiler(isolate);
profiles.set_cpu_profiler(&profiler);
- profiles.StartProfiling("", false);
+ profiles.StartProfiling("");
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
@@ -448,7 +448,7 @@ TEST(SampleIds) {
CpuProfilesCollection profiles(isolate);
CpuProfiler profiler(isolate);
profiles.set_cpu_profiler(&profiler);
- profiles.StartProfiling("", true);
+ profiles.StartProfiling("", {CpuProfilingMode::kLeafNodeLineNumbers});
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
@@ -502,7 +502,7 @@ TEST(NoSamples) {
CpuProfilesCollection profiles(isolate);
CpuProfiler profiler(isolate);
profiles.set_cpu_profiler(&profiler);
- profiles.StartProfiling("", false);
+ profiles.StartProfiling("");
ProfileGenerator generator(&profiles);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
@@ -590,10 +590,10 @@ TEST(Issue51919) {
for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i) {
i::Vector<char> title = i::Vector<char>::New(16);
i::SNPrintF(title, "%d", i);
- CHECK(collection.StartProfiling(title.start(), false));
- titles[i] = title.start();
+ CHECK(collection.StartProfiling(title.begin()));
+ titles[i] = title.begin();
}
- CHECK(!collection.StartProfiling("maximum", false));
+ CHECK(!collection.StartProfiling("maximum"));
for (int i = 0; i < CpuProfilesCollection::kMaxSimultaneousProfiles; ++i)
i::DeleteArray(titles[i]);
}
@@ -678,7 +678,7 @@ int GetFunctionLineNumber(CpuProfiler& profiler, LocalContext& env,
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
env->Global()->Get(env.local(), v8_str(name)).ToLocalChecked())));
CodeEntry* func_entry =
- code_map->FindEntry(func->abstract_code()->InstructionStart());
+ code_map->FindEntry(func->abstract_code().InstructionStart());
if (!func_entry) FATAL("%s", name);
return func_entry->line_number();
}
@@ -734,6 +734,7 @@ TEST(BailoutReason) {
USE(i_function);
CompileRun(
+ "%PrepareFunctionForOptimization(Debugger);"
"%OptimizeFunctionOnNextCall(Debugger);"
"%NeverOptimizeFunction(Debugger);"
"Debugger();"
diff --git a/deps/v8/test/cctest/test-random-number-generator.cc b/deps/v8/test/cctest/test-random-number-generator.cc
index 5b13bda3f9..00b5224099 100644
--- a/deps/v8/test/cctest/test-random-number-generator.cc
+++ b/deps/v8/test/cctest/test-random-number-generator.cc
@@ -25,9 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/flags.h"
-#include "src/isolate.h"
-#include "src/v8.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "src/base/utils/random-number-generator.h"
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index f5889dd3d6..d71223e73d 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -30,22 +30,22 @@
#include <sstream>
#include "include/v8.h"
-#include "src/api-inl.h"
-#include "src/assembler-arch.h"
+#include "src/api/api-inl.h"
#include "src/ast/ast.h"
-#include "src/char-predicates-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/codegen/assembler-arch.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/interpreter-irregexp.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-parser.h"
-#include "src/splay-tree-inl.h"
-#include "src/string-stream.h"
-#include "src/unicode-inl.h"
-#include "src/v8.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/strings/string-stream.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/ostreams.h"
+#include "src/utils/splay-tree-inl.h"
#include "src/zone/zone-list-inl.h"
#if V8_TARGET_ARCH_ARM
@@ -588,8 +588,8 @@ static void Execute(const char* input, bool multiline, bool unicode,
class TestConfig {
public:
- typedef int Key;
- typedef int Value;
+ using Key = int;
+ using Value = int;
static const int kNoKey;
static int NoValue() { return 0; }
static inline int Compare(int a, int b) {
@@ -674,7 +674,7 @@ TEST(DispatchTableConstruction) {
for (int j = 0; j < 2 * kRangeSize; j++) {
range[j] = PseudoRandom(i + 25, j + 87) % kLimit;
}
- range.Sort();
+ std::sort(range.begin(), range.end());
for (int j = 1; j < 2 * kRangeSize; j++) {
CHECK(range[j-1] <= range[j]);
}
@@ -733,23 +733,23 @@ TEST(ParsePossessiveRepetition) {
// Tests of interpreter.
#if V8_TARGET_ARCH_IA32
-typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerIA32;
#elif V8_TARGET_ARCH_X64
-typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerX64;
#elif V8_TARGET_ARCH_ARM
-typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerARM;
#elif V8_TARGET_ARCH_ARM64
-typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerARM64;
#elif V8_TARGET_ARCH_S390
-typedef RegExpMacroAssemblerS390 ArchRegExpMacroAssembler;
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerS390;
#elif V8_TARGET_ARCH_PPC
-typedef RegExpMacroAssemblerPPC ArchRegExpMacroAssembler;
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerPPC;
#elif V8_TARGET_ARCH_MIPS
-typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS;
#elif V8_TARGET_ARCH_MIPS64
-typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS;
#elif V8_TARGET_ARCH_X87
-typedef RegExpMacroAssemblerX87 ArchRegExpMacroAssembler;
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerX87;
#endif
class ContextInitializer {
diff --git a/deps/v8/test/cctest/test-representation.cc b/deps/v8/test/cctest/test-representation.cc
index af0051e0af..3b6171b405 100644
--- a/deps/v8/test/cctest/test-representation.cc
+++ b/deps/v8/test/cctest/test-representation.cc
@@ -27,7 +27,7 @@
#include "test/cctest/cctest.h"
-#include "src/property-details.h"
+#include "src/objects/property-details.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-roots.cc b/deps/v8/test/cctest/test-roots.cc
index a0ad1f06c6..137053788d 100644
--- a/deps/v8/test/cctest/test-roots.cc
+++ b/deps/v8/test/cctest/test-roots.cc
@@ -6,7 +6,7 @@
#include "src/objects/cell.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/script.h"
-#include "src/roots-inl.h"
+#include "src/roots/roots-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -14,7 +14,7 @@ namespace internal {
namespace {
AllocationSpace GetSpaceFromObject(Object object) {
- DCHECK(object->IsHeapObject());
+ DCHECK(object.IsHeapObject());
return MemoryChunk::FromHeapObject(HeapObject::cast(object))
->owner()
->identity();
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index eec773e0de..3c8f352551 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -7,8 +7,8 @@
#include <map>
#include <string>
#include "include/v8.h"
-#include "src/flags.h"
-#include "src/simulator.h"
+#include "src/execution/simulator.h"
+#include "src/flags/flags.h"
#include "test/cctest/cctest.h"
namespace {
@@ -19,8 +19,8 @@ class Sample {
Sample() = default;
- typedef const void* const* const_iterator;
- const_iterator begin() const { return data_.start(); }
+ using const_iterator = const void* const*;
+ const_iterator begin() const { return data_.begin(); }
const_iterator end() const { return &data_[data_.length()]; }
int size() const { return data_.length(); }
@@ -49,6 +49,8 @@ class SimulatorHelper {
simulator_->get_register(v8::internal::Simulator::sp));
state->fp = reinterpret_cast<void*>(
simulator_->get_register(v8::internal::Simulator::r11));
+ state->lr = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::lr));
#elif V8_TARGET_ARCH_ARM64
if (simulator_->sp() == 0 || simulator_->fp() == 0) {
// It's possible that the simulator is interrupted while it is updating
@@ -60,6 +62,7 @@ class SimulatorHelper {
state->pc = reinterpret_cast<void*>(simulator_->pc());
state->sp = reinterpret_cast<void*>(simulator_->sp());
state->fp = reinterpret_cast<void*>(simulator_->fp());
+ state->lr = reinterpret_cast<void*>(simulator_->lr());
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
state->pc = reinterpret_cast<void*>(simulator_->get_pc());
state->sp = reinterpret_cast<void*>(
@@ -72,12 +75,15 @@ class SimulatorHelper {
simulator_->get_register(v8::internal::Simulator::sp));
state->fp = reinterpret_cast<void*>(
simulator_->get_register(v8::internal::Simulator::fp));
+ state->lr = reinterpret_cast<void*>(simulator_->get_lr());
#elif V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
state->pc = reinterpret_cast<void*>(simulator_->get_pc());
state->sp = reinterpret_cast<void*>(
simulator_->get_register(v8::internal::Simulator::sp));
state->fp = reinterpret_cast<void*>(
simulator_->get_register(v8::internal::Simulator::fp));
+ state->lr = reinterpret_cast<void*>(
+ simulator_->get_register(v8::internal::Simulator::ra));
#endif
}
@@ -94,7 +100,7 @@ class SamplingTestHelper {
const void* code_start;
size_t code_len;
};
- typedef std::map<const void*, CodeEventEntry> CodeEntries;
+ using CodeEntries = std::map<const void*, CodeEventEntry>;
explicit SamplingTestHelper(const std::string& test_function)
: sample_is_taken_(false), isolate_(CcTest::isolate()) {
@@ -148,7 +154,7 @@ class SamplingTestHelper {
state.sp = &state;
#endif
v8::SampleInfo info;
- isolate_->GetStackSample(state, sample_.data().start(),
+ isolate_->GetStackSample(state, sample_.data().begin(),
static_cast<size_t>(sample_.size()), &info);
size_t frames_count = info.frames_count;
CHECK_LE(frames_count, static_cast<size_t>(sample_.size()));
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 878ff9168e..3c8b38898d 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -29,24 +29,24 @@
#include <sys/stat.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
-#include "src/bootstrapper.h"
-#include "src/compilation-cache.h"
-#include "src/compiler.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/macro-assembler-inl.h"
#include "src/debug/debug.h"
-#include "src/hash-seed-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/read-only-heap.h"
#include "src/heap/spaces.h"
+#include "src/init/bootstrapper.h"
#include "src/interpreter/interpreter.h"
-#include "src/macro-assembler-inl.h"
-#include "src/objects-inl.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
@@ -95,7 +95,6 @@ class TestSerializer {
v8::Isolate::Scope isolate_scope(v8_isolate);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->Init(nullptr, nullptr);
- isolate->heap()->read_only_space()->ClearStringPaddingIfNeeded();
return v8_isolate;
}
@@ -147,66 +146,12 @@ static Vector<const byte> WritePayload(const Vector<const byte>& payload) {
namespace {
-bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
- const char* utf8_source, const char* name) {
- v8::Context::Scope context_scope(context);
- v8::TryCatch try_catch(isolate);
- v8::Local<v8::String> source_string;
- if (!v8::String::NewFromUtf8(isolate, utf8_source, v8::NewStringType::kNormal)
- .ToLocal(&source_string)) {
- return false;
- }
- v8::Local<v8::String> resource_name =
- v8::String::NewFromUtf8(isolate, name, v8::NewStringType::kNormal)
- .ToLocalChecked();
- v8::ScriptOrigin origin(resource_name);
- v8::ScriptCompiler::Source source(source_string, origin);
- v8::Local<v8::Script> script;
- if (!v8::ScriptCompiler::Compile(context, &source).ToLocal(&script))
- return false;
- if (script->Run(context).IsEmpty()) return false;
- CHECK(!try_catch.HasCaught());
- return true;
-}
-
-v8::StartupData CreateSnapshotDataBlob(const char* embedded_source = nullptr) {
+// Convenience wrapper around the convenience wrapper.
+v8::StartupData CreateSnapshotDataBlob(const char* embedded_source) {
return CreateSnapshotDataBlobInternal(
v8::SnapshotCreator::FunctionCodeHandling::kClear, embedded_source);
}
-v8::StartupData WarmUpSnapshotDataBlob(v8::StartupData cold_snapshot_blob,
- const char* warmup_source) {
- CHECK(cold_snapshot_blob.raw_size > 0 && cold_snapshot_blob.data != nullptr);
- CHECK_NOT_NULL(warmup_source);
- // Use following steps to create a warmed up snapshot blob from a cold one:
- // - Create a new isolate from the cold snapshot.
- // - Create a new context to run the warmup script. This will trigger
- // compilation of executed functions.
- // - Create a new context. This context will be unpolluted.
- // - Serialize the isolate and the second context into a new snapshot blob.
- v8::StartupData result = {nullptr, 0};
- {
- v8::SnapshotCreator snapshot_creator(nullptr, &cold_snapshot_blob);
- v8::Isolate* isolate = snapshot_creator.GetIsolate();
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- if (!RunExtraCode(isolate, context, warmup_source, "<warm-up>")) {
- return result;
- }
- }
- {
- v8::HandleScope handle_scope(isolate);
- isolate->ContextDisposedNotification(false);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- snapshot_creator.SetDefaultContext(context);
- }
- result = snapshot_creator.CreateBlob(
- v8::SnapshotCreator::FunctionCodeHandling::kKeep);
- }
- return result;
-}
-
} // namespace
static StartupBlobs Serialize(v8::Isolate* isolate) {
@@ -245,12 +190,12 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
Vector<const uint8_t> tail, int repeats) {
int source_length = head.length() + body.length() * repeats + tail.length();
uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
- CopyChars(source, head.start(), head.length());
+ CopyChars(source, head.begin(), head.length());
for (int i = 0; i < repeats; i++) {
- CopyChars(source + head.length() + i * body.length(), body.start(),
+ CopyChars(source + head.length() + i * body.length(), body.begin(),
body.length());
}
- CopyChars(source + head.length() + repeats * body.length(), tail.start(),
+ CopyChars(source + head.length() + repeats * body.length(), tail.begin(),
tail.length());
return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
source_length);
@@ -272,13 +217,14 @@ static void SanityCheck(v8::Isolate* v8_isolate) {
#endif
CHECK(isolate->global_object()->IsJSObject());
CHECK(isolate->native_context()->IsContext());
- isolate->factory()->InternalizeOneByteString(StaticCharVector("Empty"));
+ isolate->factory()->InternalizeString(StaticCharVector("Empty"));
}
void TestStartupSerializerOnceImpl() {
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
+ ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs);
{
v8::HandleScope handle_scope(isolate);
@@ -383,6 +329,7 @@ UNINITIALIZED_TEST(StartupSerializerTwice) {
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
blobs1.Dispose();
+ ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs2);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -403,6 +350,7 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
v8::Isolate* isolate = TestSerializer::NewIsolateInitialized();
StartupBlobs blobs = Serialize(isolate);
isolate->Dispose();
+ ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -431,6 +379,7 @@ UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
StartupBlobs blobs2 = Serialize(isolate);
isolate->Dispose();
blobs1.Dispose();
+ ReadOnlyHeap::ClearSharedHeapForTest();
isolate = Deserialize(blobs2);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -510,6 +459,7 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
+ ReadOnlyHeap::ClearSharedHeapForTest();
}
UNINITIALIZED_TEST(PartialSerializerContext) {
@@ -594,7 +544,7 @@ static void PartiallySerializeCustomContext(
StaticCharVector("function g() { return [,"), StaticCharVector("1,"),
StaticCharVector("];} a = g(); b = g(); b.push(1);"), 100000);
v8::MaybeLocal<v8::String> source_str = v8::String::NewFromOneByte(
- v8_isolate, source.start(), v8::NewStringType::kNormal,
+ v8_isolate, source.begin(), v8::NewStringType::kNormal,
source.length());
CompileRun(source_str.ToLocalChecked());
source.Dispose();
@@ -639,6 +589,7 @@ static void PartiallySerializeCustomContext(
*read_only_blob_out = WritePayload(read_only_snapshot.RawData());
}
v8_isolate->Dispose();
+ ReadOnlyHeap::ClearSharedHeapForTest();
}
UNINITIALIZED_TEST(PartialSerializerCustomContext) {
@@ -834,7 +785,7 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobStringNotInternalized) {
v8::Local<v8::Value> result = CompileRun("f()").As<v8::Value>();
CHECK(result->IsString());
i::String str = *v8::Utils::OpenHandle(*result.As<v8::String>());
- CHECK_EQ(std::string(str->ToCString().get()), "A");
+ CHECK_EQ(std::string(str.ToCString().get()), "A");
CHECK(!str.IsInternalizedString());
CHECK(!i::ReadOnlyHeap::Contains(str));
}
@@ -973,7 +924,7 @@ void DeserializeInternalFields(v8::Local<v8::Object> holder, int index,
deserialized_data.push_back(embedder_field);
}
-typedef std::vector<std::tuple<const char*, int32_t>> Int32Expectations;
+using Int32Expectations = std::vector<std::tuple<const char*, int32_t>>;
void TestInt32Expectations(const Int32Expectations& expectations) {
for (const auto& e : expectations) {
@@ -1154,8 +1105,6 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobDetachedArrayBuffer) {
i::Handle<i::JSTypedArray> array =
i::Handle<i::JSTypedArray>::cast(v8::Utils::OpenHandle(*x));
CHECK(array->WasDetached());
- CHECK_NULL(
- FixedTypedArrayBase::cast(array->elements())->external_pointer());
}
isolate->Dispose();
delete[] blob.data; // We can dispose of the snapshot blob now.
@@ -1381,6 +1330,7 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobWithLocker) {
const char* source1 = "function f() { return 42; }";
DisableEmbeddedBlobRefcounting();
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::StartupData data1 = CreateSnapshotDataBlob(source1);
v8::Isolate::CreateParams params1;
@@ -1448,7 +1398,7 @@ bool IsCompiled(const char* name) {
return i::Handle<i::JSFunction>::cast(
v8::Utils::OpenHandle(*CompileRun(name)))
->shared()
- ->is_compiled();
+ .is_compiled();
}
UNINITIALIZED_TEST(SnapshotDataBlobWithWarmup) {
@@ -1456,8 +1406,9 @@ UNINITIALIZED_TEST(SnapshotDataBlobWithWarmup) {
const char* warmup = "Math.abs(1); Math.random = 1;";
DisableEmbeddedBlobRefcounting();
- v8::StartupData cold = CreateSnapshotDataBlob();
- v8::StartupData warm = WarmUpSnapshotDataBlob(cold, warmup);
+ v8::StartupData cold = CreateSnapshotDataBlob(nullptr);
+ v8::StartupData warm = WarmUpSnapshotDataBlobInternal(cold, warmup);
+ ReadOnlyHeap::ClearSharedHeapForTest();
delete[] cold.data;
v8::Isolate::CreateParams params;
@@ -1493,7 +1444,8 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobWithWarmup) {
DisableEmbeddedBlobRefcounting();
v8::StartupData cold = CreateSnapshotDataBlob(source);
- v8::StartupData warm = WarmUpSnapshotDataBlob(cold, warmup);
+ v8::StartupData warm = WarmUpSnapshotDataBlobInternal(cold, warmup);
+ ReadOnlyHeap::ClearSharedHeapForTest();
delete[] cold.data;
v8::Isolate::CreateParams params;
@@ -1533,7 +1485,7 @@ UNINITIALIZED_TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
DisableEmbeddedBlobRefcounting();
v8::StartupData data =
- CreateSnapshotDataBlob(reinterpret_cast<const char*>(source.start()));
+ CreateSnapshotDataBlob(reinterpret_cast<const char*>(source.begin()));
v8::Isolate::CreateParams params;
params.snapshot_blob = &data;
@@ -1571,7 +1523,7 @@ int CountBuiltins() {
int counter = 0;
for (HeapObject obj = iterator.next(); !obj.is_null();
obj = iterator.next()) {
- if (obj->IsCode() && Code::cast(obj)->kind() == Code::BUILTIN) counter++;
+ if (obj.IsCode() && Code::cast(obj).kind() == Code::BUILTIN) counter++;
}
return counter;
}
@@ -1638,12 +1590,12 @@ void TestCodeSerializerOnePlusOneImpl(bool verify_builtins_count = true) {
}
CHECK_NE(*orig, *copy);
- CHECK(Script::cast(copy->script())->source() == *copy_source);
+ CHECK(Script::cast(copy->script()).source() == *copy_source);
Handle<JSFunction> copy_fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
copy, isolate->native_context());
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
Handle<Object> copy_result =
Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
CHECK_EQ(2, Handle<Smi>::cast(copy_result)->value());
@@ -1735,7 +1687,7 @@ TEST(CodeSerializerInternalizedString) {
CHECK(!orig_source.is_identical_to(copy_source));
CHECK(orig_source->Equals(*copy_source));
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
i::ScriptData* script_data = nullptr;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
@@ -1757,7 +1709,7 @@ TEST(CodeSerializerInternalizedString) {
v8::ScriptCompiler::kConsumeCodeCache);
}
CHECK_NE(*orig, *copy);
- CHECK(Script::cast(copy->script())->source() == *copy_source);
+ CHECK(Script::cast(copy->script()).source() == *copy_source);
Handle<JSFunction> copy_fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -1794,7 +1746,7 @@ TEST(CodeSerializerLargeCodeObject) {
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
@@ -1831,7 +1783,7 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
ManualGCScope manual_gc_scope;
FLAG_always_opt = false;
const char* filter_flag = "--turbo-filter=NOTHING";
- FlagList::SetFlagsFromString(filter_flag, StrLength(filter_flag));
+ FlagList::SetFlagsFromString(filter_flag, strlen(filter_flag));
FLAG_manual_evacuation_candidates_selection = true;
LocalContext context;
@@ -1859,7 +1811,7 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
ec_page = Page::FromHeapObject(*moving_object);
}
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
@@ -1922,7 +1874,7 @@ TEST(CodeSerializerLargeStrings) {
f->NewStringFromOneByte(source_t).ToHandleChecked())
.ToHandleChecked();
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
@@ -1994,7 +1946,7 @@ TEST(CodeSerializerThreeBigStrings) {
f->NewConsString(source_a_str, source_b_str).ToHandleChecked(),
source_c_str).ToHandleChecked();
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
@@ -2113,7 +2065,7 @@ TEST(CodeSerializerExternalString) {
->NewStringFromUtf8(CStrVector(source))
.ToHandleChecked();
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
@@ -2160,7 +2112,7 @@ TEST(CodeSerializerLargeExternalString) {
StaticCharVector(""), 999999);
Handle<String> name = f->NewStringFromOneByte(string).ToHandleChecked();
SerializerOneByteResource one_byte_resource(
- reinterpret_cast<const char*>(string.start()), string.length());
+ reinterpret_cast<const char*>(string.begin()), string.length());
name = f->InternalizeString(name);
name->MakeExternal(&one_byte_resource);
CHECK(name->IsExternalOneByteString());
@@ -2175,7 +2127,7 @@ TEST(CodeSerializerLargeExternalString) {
f->NewConsString(f->NewStringFromAsciiChecked(" = 42; "), name)
.ToHandleChecked()).ToHandleChecked();
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig = CompileScriptAndProduceCache(
@@ -2227,7 +2179,7 @@ TEST(CodeSerializerExternalScriptName) {
CHECK(name->IsExternalOneByteString());
CHECK(!name->IsInternalizedString());
- Handle<JSObject> global(isolate->context()->global_object(), isolate);
+ Handle<JSObject> global(isolate->context().global_object(), isolate);
ScriptData* cache = nullptr;
Handle<SharedFunctionInfo> orig =
@@ -2440,7 +2392,7 @@ TEST(CodeSerializerAfterExecute) {
Handle<SharedFunctionInfo> sfi = v8::Utils::OpenHandle(*script);
CHECK(sfi->HasBytecodeArray());
BytecodeArray bytecode = sfi->GetBytecodeArray();
- CHECK_EQ(bytecode->osr_loop_nesting_level(), 0);
+ CHECK_EQ(bytecode.osr_loop_nesting_level(), 0);
{
DisallowCompilation no_compile_expected(
@@ -3328,6 +3280,7 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
}
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3408,6 +3361,7 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
isolate->Dispose();
}
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
SnapshotCreator creator(nullptr, &blob);
v8::Isolate* isolate = creator.GetIsolate();
{
@@ -3434,6 +3388,7 @@ UNINITIALIZED_TEST(SnapshotCreatorAddData) {
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
}
{
+ ReadOnlyHeap::ClearSharedHeapForTest();
v8::Isolate::CreateParams params;
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3778,6 +3733,7 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
CHECK(blob.CanBeRehashed());
}
+ ReadOnlyHeap::ClearSharedHeapForTest();
i::FLAG_hash_seed = 1337;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -3807,14 +3763,14 @@ UNINITIALIZED_TEST(ReinitializeHashSeedRehashable) {
}
void CheckSFIsAreWeak(WeakFixedArray sfis, Isolate* isolate) {
- CHECK_GT(sfis->length(), 0);
+ CHECK_GT(sfis.length(), 0);
int no_of_weak = 0;
- for (int i = 0; i < sfis->length(); ++i) {
- MaybeObject maybe_object = sfis->Get(i);
+ for (int i = 0; i < sfis.length(); ++i) {
+ MaybeObject maybe_object = sfis.Get(i);
HeapObject heap_object;
CHECK(maybe_object->IsWeakOrCleared() ||
(maybe_object->GetHeapObjectIfStrong(&heap_object) &&
- heap_object->IsUndefined(isolate)));
+ heap_object.IsUndefined(isolate)));
if (maybe_object->IsWeak()) {
++no_of_weak;
}
@@ -3867,7 +3823,7 @@ UNINITIALIZED_TEST(WeakArraySerializationInSnapshot) {
// Verify that the pointers in shared_function_infos are weak.
WeakFixedArray sfis =
- Script::cast(function->shared()->script())->shared_function_infos();
+ Script::cast(function->shared().script()).shared_function_infos();
CheckSFIsAreWeak(sfis, reinterpret_cast<i::Isolate*>(isolate));
}
isolate->Dispose();
@@ -3897,7 +3853,7 @@ TEST(WeakArraySerializationInCodeCache) {
isolate, src, src, cache, v8::ScriptCompiler::kConsumeCodeCache);
// Verify that the pointers in shared_function_infos are weak.
- WeakFixedArray sfis = Script::cast(copy->script())->shared_function_infos();
+ WeakFixedArray sfis = Script::cast(copy->script()).shared_function_infos();
CheckSFIsAreWeak(sfis, isolate);
delete cache;
diff --git a/deps/v8/test/cctest/test-smi-lexicographic-compare.cc b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
index 7e4f76698f..4e6b196e70 100644
--- a/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
+++ b/deps/v8/test/cctest/test-smi-lexicographic-compare.cc
@@ -4,9 +4,9 @@
#include <set>
-#include "src/objects-inl.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -23,8 +23,8 @@ void AddSigned(std::set<Smi>& smis, int64_t x) {
// Uses std::lexicographical_compare twice to convert the result to -1, 0 or 1.
int ExpectedCompareResult(Smi a, Smi b) {
- std::string str_a = std::to_string(a->value());
- std::string str_b = std::to_string(b->value());
+ std::string str_a = std::to_string(a.value());
+ std::string str_b = std::to_string(b.value());
bool expected_a_lt_b = std::lexicographical_compare(
str_a.begin(), str_a.end(), str_b.begin(), str_b.end());
bool expected_b_lt_a = std::lexicographical_compare(
diff --git a/deps/v8/test/cctest/test-stack-unwinding-x64.cc b/deps/v8/test/cctest/test-stack-unwinding-x64.cc
index 1802c1018a..583e14111a 100644
--- a/deps/v8/test/cctest/test-stack-unwinding-x64.cc
+++ b/deps/v8/test/cctest/test-stack-unwinding-x64.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/base/win32-headers.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
class UnwindingWinX64Callbacks {
@@ -55,7 +55,8 @@ UNINITIALIZED_TEST(StackUnwindingWinX64) {
" var o = instance.foo;\n"
" instance.foo = o + 1;\n"
" }\n"
- "}\n";
+ "};\n"
+ "%PrepareFunctionForOptimization(start);\n";
// This test may fail on Windows 7
if (!::IsWindows8OrGreater()) {
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index a84f0425dd..7166b6b41f 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -32,15 +32,15 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/platform/elapsed-timer.h"
+#include "src/execution/messages.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/messages.h"
-#include "src/objects-inl.h"
-#include "src/unicode-decoder.h"
+#include "src/objects/objects-inl.h"
+#include "src/strings/unicode-decoder.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -321,23 +321,23 @@ void ConsStringGenerationData::Reset() {
}
void AccumulateStats(ConsString cons_string, ConsStringStats* stats) {
- int left_length = cons_string->first()->length();
- int right_length = cons_string->second()->length();
- CHECK(cons_string->length() == left_length + right_length);
+ int left_length = cons_string.first().length();
+ int right_length = cons_string.second().length();
+ CHECK(cons_string.length() == left_length + right_length);
// Check left side.
- bool left_is_cons = cons_string->first()->IsConsString();
+ bool left_is_cons = cons_string.first().IsConsString();
if (left_is_cons) {
stats->left_traversals_++;
- AccumulateStats(ConsString::cast(cons_string->first()), stats);
+ AccumulateStats(ConsString::cast(cons_string.first()), stats);
} else {
CHECK_NE(left_length, 0);
stats->leaves_++;
stats->chars_ += left_length;
}
// Check right side.
- if (cons_string->second()->IsConsString()) {
+ if (cons_string.second().IsConsString()) {
stats->right_traversals_++;
- AccumulateStats(ConsString::cast(cons_string->second()), stats);
+ AccumulateStats(ConsString::cast(cons_string.second()), stats);
} else {
if (right_length == 0) {
stats->empty_leaves_++;
@@ -366,7 +366,7 @@ void AccumulateStatsWithOperator(ConsString cons_string,
// Accumulate stats.
CHECK_EQ(0, offset);
stats->leaves_++;
- stats->chars_ += string->length();
+ stats->chars_ += string.length();
}
}
@@ -640,10 +640,10 @@ TEST(ConsStringWithEmptyFirstFlatten) {
static void VerifyCharacterStream(String flat_string, String cons_string) {
// Do not want to test ConString traversal on flat string.
- CHECK(flat_string->IsFlat() && !flat_string->IsConsString());
- CHECK(cons_string->IsConsString());
+ CHECK(flat_string.IsFlat() && !flat_string.IsConsString());
+ CHECK(cons_string.IsConsString());
// TODO(dcarney) Test stream reset as well.
- int length = flat_string->length();
+ int length = flat_string.length();
// Iterate start search in multiple places in the string.
int outer_iterations = length > 20 ? 20 : length;
for (int j = 0; j <= outer_iterations; j++) {
@@ -654,7 +654,7 @@ static void VerifyCharacterStream(String flat_string, String cons_string) {
StringCharacterStream flat_stream(flat_string, offset);
StringCharacterStream cons_stream(cons_string, offset);
for (int i = offset; i < length; i++) {
- uint16_t c = flat_string->Get(i);
+ uint16_t c = flat_string.Get(i);
CHECK(flat_stream.HasMore());
CHECK(cons_stream.HasMore());
CHECK_EQ(c, flat_stream.GetNext());
@@ -704,7 +704,7 @@ void TestStringCharacterStream(BuildString build, int test_cases) {
cons_string_stats.VerifyEqual(data.stats_);
VerifyConsString(cons_string, &data);
String flat_string_ptr = flat_string->IsConsString()
- ? ConsString::cast(*flat_string)->first()
+ ? ConsString::cast(*flat_string).first()
: *flat_string;
VerifyCharacterStream(flat_string_ptr, *cons_string);
}
@@ -830,7 +830,6 @@ static void InitializeGenerationData(
break;
default:
UNREACHABLE();
- break;
}
// Must remove the influence of the above decision.
test_case /= kBalances;
@@ -854,7 +853,6 @@ static void InitializeGenerationData(
break;
default:
UNREACHABLE();
- break;
}
// Must remove the influence of the above decision.
test_case /= kTreeLengths;
@@ -916,10 +914,10 @@ TEST(Utf8Conversion) {
const char* one_byte_string = "abcdef12345";
int len = v8::String::NewFromUtf8(CcTest::isolate(), one_byte_string,
v8::NewStringType::kNormal,
- StrLength(one_byte_string))
+ static_cast<int>(strlen(one_byte_string)))
.ToLocalChecked()
->Utf8Length(CcTest::isolate());
- CHECK_EQ(StrLength(one_byte_string), len);
+ CHECK_EQ(strlen(one_byte_string), len);
// A mixed one-byte and two-byte string
// U+02E4 -> CB A4
// U+0064 -> 64
@@ -1228,7 +1226,6 @@ TEST(JSONStringifySliceMadeExternal) {
}
TEST(JSONStringifyWellFormed) {
- FLAG_harmony_json_stringify = true;
CcTest::InitializeVM();
v8::HandleScope handle_scope(CcTest::isolate());
v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
@@ -1384,11 +1381,11 @@ TEST(SliceFromCons) {
// After slicing, the original string becomes a flat cons.
CHECK(parent->IsFlat());
CHECK(slice->IsSlicedString());
- CHECK_EQ(SlicedString::cast(*slice)->parent(),
- // Parent could have been short-circuited.
- parent->IsConsString() ? ConsString::cast(*parent)->first()
- : *parent);
- CHECK(SlicedString::cast(*slice)->parent()->IsSeqString());
+ CHECK_EQ(
+ SlicedString::cast(*slice).parent(),
+ // Parent could have been short-circuited.
+ parent->IsConsString() ? ConsString::cast(*parent).first() : *parent);
+ CHECK(SlicedString::cast(*slice).parent().IsSeqString());
CHECK(slice->IsFlat());
}
@@ -1399,7 +1396,8 @@ class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
: data_(vector) {}
~OneByteVectorResource() override = default;
size_t length() const override { return data_.length(); }
- const char* data() const override { return data_.start(); }
+ const char* data() const override { return data_.begin(); }
+
private:
i::Vector<const char> data_;
};
@@ -1453,8 +1451,8 @@ TEST(SliceFromExternal) {
Handle<String> slice = factory->NewSubString(string, 1, 25);
CHECK(slice->IsSlicedString());
CHECK(string->IsExternalString());
- CHECK_EQ(SlicedString::cast(*slice)->parent(), *string);
- CHECK(SlicedString::cast(*slice)->parent()->IsExternalString());
+ CHECK_EQ(SlicedString::cast(*slice).parent(), *string);
+ CHECK(SlicedString::cast(*slice).parent().IsExternalString());
CHECK(slice->IsFlat());
// This avoids the GC from trying to free stack allocated resources.
i::Handle<i::ExternalOneByteString>::cast(string)->SetResource(
@@ -1509,14 +1507,14 @@ TEST(SliceFromSlice) {
CHECK(result->IsString());
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(string->IsSlicedString());
- CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
+ CHECK(SlicedString::cast(*string).parent().IsSeqString());
CHECK_EQ(0, strcmp("bcdefghijklmnopqrstuvwxy", string->ToCString().get()));
result = CompileRun(slice_from_slice);
CHECK(result->IsString());
string = v8::Utils::OpenHandle(v8::String::Cast(*result));
CHECK(string->IsSlicedString());
- CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
+ CHECK(SlicedString::cast(*string).parent().IsSeqString());
CHECK_EQ(0, strcmp("cdefghijklmnopqrstuvwx", string->ToCString().get()));
}
@@ -1706,7 +1704,6 @@ TEST(InvalidExternalString) {
}
}
-
#define INVALID_STRING_TEST(FUN, TYPE) \
TEST(StringOOM##FUN) { \
CcTest::InitializeVM(); \
@@ -1716,9 +1713,9 @@ TEST(InvalidExternalString) {
static const int invalid = String::kMaxLength + 1; \
HandleScope scope(isolate); \
Vector<TYPE> dummy = Vector<TYPE>::New(invalid); \
- memset(dummy.start(), 0x0, dummy.length() * sizeof(TYPE)); \
+ memset(dummy.begin(), 0x0, dummy.length() * sizeof(TYPE)); \
CHECK(isolate->factory()->FUN(Vector<const TYPE>::cast(dummy)).is_null()); \
- memset(dummy.start(), 0x20, dummy.length() * sizeof(TYPE)); \
+ memset(dummy.begin(), 0x20, dummy.length() * sizeof(TYPE)); \
CHECK(isolate->has_pending_exception()); \
isolate->clear_pending_exception(); \
dummy.Dispose(); \
@@ -1948,8 +1945,8 @@ TEST(Regress876759) {
factory->InternalizeString(parent);
CHECK(parent->IsThinString());
Handle<String> grandparent =
- handle(ThinString::cast(*parent)->actual(), isolate);
- CHECK_EQ(*parent, SlicedString::cast(*sliced)->parent());
+ handle(ThinString::cast(*parent).actual(), isolate);
+ CHECK_EQ(*parent, SlicedString::cast(*sliced).parent());
OneByteStringResource* resource =
new OneByteStringResource(external_one_byte_buf, kLength);
grandparent->MakeExternal(resource);
diff --git a/deps/v8/test/cctest/test-strtod.cc b/deps/v8/test/cctest/test-strtod.cc
index 68cf9783b1..2076b5b4ec 100644
--- a/deps/v8/test/cctest/test-strtod.cc
+++ b/deps/v8/test/cctest/test-strtod.cc
@@ -27,40 +27,34 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/bignum.h"
-#include "src/diy-fp.h"
-#include "src/double.h"
-#include "src/strtod.h"
+#include "src/numbers/bignum.h"
+#include "src/numbers/diy-fp.h"
+#include "src/numbers/double.h"
+#include "src/numbers/strtod.h"
#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
namespace test_strtod {
-static Vector<const char> StringToVector(const char* str) {
- return Vector<const char>(str, StrLength(str));
-}
-
-
static double StrtodChar(const char* str, int exponent) {
- return Strtod(StringToVector(str), exponent);
+ return Strtod(CStrVector(str), exponent);
}
-
TEST(Strtod) {
Vector<const char> vector;
- vector = StringToVector("0");
+ vector = CStrVector("0");
CHECK_EQ(0.0, Strtod(vector, 1));
CHECK_EQ(0.0, Strtod(vector, 2));
CHECK_EQ(0.0, Strtod(vector, -2));
CHECK_EQ(0.0, Strtod(vector, -999));
CHECK_EQ(0.0, Strtod(vector, +999));
- vector = StringToVector("1");
+ vector = CStrVector("1");
CHECK_EQ(1.0, Strtod(vector, 0));
CHECK_EQ(10.0, Strtod(vector, 1));
CHECK_EQ(100.0, Strtod(vector, 2));
@@ -79,7 +73,7 @@ TEST(Strtod) {
CHECK_EQ(1e-25, Strtod(vector, -25));
CHECK_EQ(1e-39, Strtod(vector, -39));
- vector = StringToVector("2");
+ vector = CStrVector("2");
CHECK_EQ(2.0, Strtod(vector, 0));
CHECK_EQ(20.0, Strtod(vector, 1));
CHECK_EQ(200.0, Strtod(vector, 2));
@@ -98,7 +92,7 @@ TEST(Strtod) {
CHECK_EQ(2e-25, Strtod(vector, -25));
CHECK_EQ(2e-39, Strtod(vector, -39));
- vector = StringToVector("9");
+ vector = CStrVector("9");
CHECK_EQ(9.0, Strtod(vector, 0));
CHECK_EQ(90.0, Strtod(vector, 1));
CHECK_EQ(900.0, Strtod(vector, 2));
@@ -117,7 +111,7 @@ TEST(Strtod) {
CHECK_EQ(9e-25, Strtod(vector, -25));
CHECK_EQ(9e-39, Strtod(vector, -39));
- vector = StringToVector("12345");
+ vector = CStrVector("12345");
CHECK_EQ(12345.0, Strtod(vector, 0));
CHECK_EQ(123450.0, Strtod(vector, 1));
CHECK_EQ(1234500.0, Strtod(vector, 2));
@@ -139,7 +133,7 @@ TEST(Strtod) {
CHECK_EQ(12345e-25, Strtod(vector, -25));
CHECK_EQ(12345e-39, Strtod(vector, -39));
- vector = StringToVector("12345678901234");
+ vector = CStrVector("12345678901234");
CHECK_EQ(12345678901234.0, Strtod(vector, 0));
CHECK_EQ(123456789012340.0, Strtod(vector, 1));
CHECK_EQ(1234567890123400.0, Strtod(vector, 2));
@@ -161,7 +155,7 @@ TEST(Strtod) {
CHECK_EQ(12345678901234e-25, Strtod(vector, -25));
CHECK_EQ(12345678901234e-39, Strtod(vector, -39));
- vector = StringToVector("123456789012345");
+ vector = CStrVector("123456789012345");
CHECK_EQ(123456789012345.0, Strtod(vector, 0));
CHECK_EQ(1234567890123450.0, Strtod(vector, 1));
CHECK_EQ(12345678901234500.0, Strtod(vector, 2));
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index 4a6b6af545..006bca9f1f 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -30,11 +30,11 @@
// of ConsStrings. These operations may not be very fast, but they
// should be possible without getting errors due to too deep recursion.
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects.h"
#include "src/objects/name-inl.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
+#include "src/objects/objects.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm.cc b/deps/v8/test/cctest/test-sync-primitives-arm.cc
index e32136b81d..84dc0575cf 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm.cc
@@ -25,15 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/assembler-helper-arm.h"
#include "test/cctest/cctest.h"
-#include "src/assembler-inl.h"
-#include "src/disassembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler.h"
-#include "src/simulator.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-sync-primitives-arm64.cc b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
index 3ea282e972..38adf8486a 100644
--- a/deps/v8/test/cctest/test-sync-primitives-arm64.cc
+++ b/deps/v8/test/cctest/test-sync-primitives-arm64.cc
@@ -25,13 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
-#include "src/arm64/simulator-arm64.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/execution/arm64/simulator-arm64.h"
#include "src/heap/factory.h"
-#include "src/macro-assembler-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -204,8 +204,7 @@ void TestInvalidateExclusiveAccess(TestData initial_data, MemoryAccess access1,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
TestData t = initial_data;
Simulator::current(isolate)->Call<void>(code->entry(), &t);
@@ -276,8 +275,7 @@ int ExecuteMemoryAccess(Isolate* isolate, TestData* test_data,
CodeDesc desc;
masm.GetCode(isolate, &desc);
- Handle<Code> code =
- isolate->factory()->NewCode(desc, Code::STUB, Handle<Code>());
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc, Code::STUB).Build();
Simulator::current(isolate)->Call<void>(code->entry(), test_data);
return Simulator::current(isolate)->wreg(0);
}
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 604fd77ed9..e21c23e82c 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -25,10 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/api-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
+#include "src/api/api-inl.h"
+#include "src/execution/isolate.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "src/base/platform/platform.h"
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index 679c7e4d85..be76f5e93f 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -27,7 +27,7 @@
#include "test/cctest/cctest.h"
-#include "src/thread-id.h"
+#include "src/execution/thread-id.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 36a207ffd1..7b3c215d69 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -4,7 +4,7 @@
#include <stdlib.h>
#include <string.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/template-utils.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index c43d7ba9b7..5f47cf8419 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -5,15 +5,15 @@
#include <stdlib.h>
#include <utility>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/compilation-cache.h"
-#include "src/execution.h"
-#include "src/field-type.h"
-#include "src/global-handles.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/execution.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
-#include "src/objects-inl.h"
-#include "src/transitions-inl.h"
+#include "src/objects/field-type.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/transitions-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/test-transitions.h"
@@ -151,7 +151,7 @@ TEST(TransitionArray_DifferentFieldNames) {
for (int i = 0; i < PROPS_COUNT; i++) {
EmbeddedVector<char, 64> buffer;
SNPrintF(buffer, "prop%d", i);
- Handle<String> name = factory->InternalizeUtf8String(buffer.start());
+ Handle<String> name = factory->InternalizeUtf8String(buffer.begin());
Handle<Map> map =
Map::CopyWithField(isolate, map0, name, FieldType::Any(isolate),
attributes, PropertyConstness::kMutable,
@@ -243,7 +243,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
for (int i = 0; i < PROPS_COUNT; i++) {
EmbeddedVector<char, 64> buffer;
SNPrintF(buffer, "prop%d", i);
- Handle<String> name = factory->InternalizeUtf8String(buffer.start());
+ Handle<String> name = factory->InternalizeUtf8String(buffer.begin());
Handle<Map> map =
Map::CopyWithField(isolate, map0, name, FieldType::Any(isolate), NONE,
PropertyConstness::kMutable,
@@ -289,7 +289,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
if (key == *name) {
// Attributes transition.
PropertyAttributes attributes =
- target->GetLastDescriptorDetails().attributes();
+ target.GetLastDescriptorDetails().attributes();
CHECK_EQ(*attr_maps[static_cast<int>(attributes)], target);
} else {
for (int j = 0; j < PROPS_COUNT; j++) {
diff --git a/deps/v8/test/cctest/test-transitions.h b/deps/v8/test/cctest/test-transitions.h
index f9def25a56..724eb3d3c5 100644
--- a/deps/v8/test/cctest/test-transitions.h
+++ b/deps/v8/test/cctest/test-transitions.h
@@ -5,7 +5,7 @@
#ifndef V8_TEST_CCTEST_TEST_TRANSITIONS_H_
#define V8_TEST_CCTEST_TEST_TRANSITIONS_H_
-#include "src/transitions.h"
+#include "src/objects/transitions.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index a0f9385bf1..b14debdba7 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -4,12 +4,12 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
#include "src/heap/heap.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 73257cc057..fc213b5bf9 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -5,10 +5,10 @@
#include <vector>
#include "src/compiler/types.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory-inl.h"
#include "src/heap/heap.h"
-#include "src/isolate.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "test/cctest/cctest.h"
#include "test/common/types-fuzz.h"
@@ -25,11 +25,11 @@ static bool IsInteger(double x) {
return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
}
-typedef uint32_t bitset;
+using bitset = uint32_t;
struct Tests {
- typedef Types::TypeVector::iterator TypeIterator;
- typedef Types::ValueVector::iterator ValueIterator;
+ using TypeIterator = Types::TypeVector::iterator;
+ using ValueIterator = Types::ValueVector::iterator;
Isolate* isolate;
HandleScope scope;
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 8918ab57a7..7f90a85625 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -5,25 +5,25 @@
#include <stdlib.h>
#include <utility>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/accessors.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/overflowing-math.h"
-#include "src/compilation-cache.h"
-#include "src/execution.h"
-#include "src/field-type.h"
-#include "src/global-handles.h"
+#include "src/builtins/accessors.h"
+#include "src/codegen/compilation-cache.h"
+#include "src/execution/execution.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
#include "src/ic/ic.h"
-#include "src/layout-descriptor.h"
-#include "src/objects-inl.h"
#include "src/objects/api-callbacks.h"
+#include "src/objects/field-type.h"
#include "src/objects/heap-number-inl.h"
-#include "src/property.h"
+#include "src/objects/layout-descriptor.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -41,8 +41,8 @@ namespace test_unboxed_doubles {
static void InitializeVerifiedMapDescriptors(
Isolate* isolate, Map map, DescriptorArray descriptors,
LayoutDescriptor layout_descriptor) {
- map->InitializeDescriptors(isolate, descriptors, layout_descriptor);
- CHECK(layout_descriptor->IsConsistentWithMap(map, true));
+ map.InitializeDescriptors(isolate, descriptors, layout_descriptor);
+ CHECK(layout_descriptor.IsConsistentWithMap(map, true));
}
static Handle<String> MakeString(const char* str) {
@@ -55,7 +55,7 @@ static Handle<String> MakeString(const char* str) {
static Handle<String> MakeName(const char* str, int suffix) {
EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s%d", str, suffix);
- return MakeString(buffer.start());
+ return MakeString(buffer.begin());
}
@@ -69,19 +69,19 @@ Handle<JSObject> GetObject(const char* name) {
}
static double GetDoubleFieldValue(JSObject obj, FieldIndex field_index) {
- if (obj->IsUnboxedDoubleField(field_index)) {
- return obj->RawFastDoublePropertyAt(field_index);
+ if (obj.IsUnboxedDoubleField(field_index)) {
+ return obj.RawFastDoublePropertyAt(field_index);
} else {
- Object value = obj->RawFastPropertyAt(field_index);
- CHECK(value->IsMutableHeapNumber());
- return MutableHeapNumber::cast(value)->value();
+ Object value = obj.RawFastPropertyAt(field_index);
+ CHECK(value.IsMutableHeapNumber());
+ return MutableHeapNumber::cast(value).value();
}
}
void WriteToField(JSObject object, int descriptor, Object value) {
- DescriptorArray descriptors = object->map()->instance_descriptors();
- PropertyDetails details = descriptors->GetDetails(descriptor);
- object->WriteToField(descriptor, details, value);
+ DescriptorArray descriptors = object.map().instance_descriptors();
+ PropertyDetails details = descriptors.GetDetails(descriptor);
+ object.WriteToField(descriptor, details, value);
}
const int kNumberOfBits = 32;
@@ -112,7 +112,7 @@ static Handle<DescriptorArray> CreateDescriptorArray(Isolate* isolate,
for (int i = 0; i < kPropsCount; i++) {
EmbeddedVector<char, 64> buffer;
SNPrintF(buffer, "prop%d", i);
- Handle<String> name = factory->InternalizeUtf8String(buffer.start());
+ Handle<String> name = factory->InternalizeUtf8String(buffer.begin());
TestPropertyKind kind = props[i];
@@ -142,32 +142,32 @@ TEST(LayoutDescriptorBasicFast) {
LayoutDescriptor layout_desc = LayoutDescriptor::FastPointerLayout();
- CHECK(!layout_desc->IsSlowLayout());
- CHECK(layout_desc->IsFastPointerLayout());
- CHECK_EQ(kBitsInSmiLayout, layout_desc->capacity());
+ CHECK(!layout_desc.IsSlowLayout());
+ CHECK(layout_desc.IsFastPointerLayout());
+ CHECK_EQ(kBitsInSmiLayout, layout_desc.capacity());
for (int i = 0; i < kBitsInSmiLayout + 13; i++) {
- CHECK(layout_desc->IsTagged(i));
+ CHECK(layout_desc.IsTagged(i));
}
- CHECK(layout_desc->IsTagged(-1));
- CHECK(layout_desc->IsTagged(-12347));
- CHECK(layout_desc->IsTagged(15635));
- CHECK(layout_desc->IsFastPointerLayout());
+ CHECK(layout_desc.IsTagged(-1));
+ CHECK(layout_desc.IsTagged(-12347));
+ CHECK(layout_desc.IsTagged(15635));
+ CHECK(layout_desc.IsFastPointerLayout());
for (int i = 0; i < kBitsInSmiLayout; i++) {
- layout_desc = layout_desc->SetTaggedForTesting(i, false);
- CHECK(!layout_desc->IsTagged(i));
- layout_desc = layout_desc->SetTaggedForTesting(i, true);
- CHECK(layout_desc->IsTagged(i));
+ layout_desc = layout_desc.SetTaggedForTesting(i, false);
+ CHECK(!layout_desc.IsTagged(i));
+ layout_desc = layout_desc.SetTaggedForTesting(i, true);
+ CHECK(layout_desc.IsTagged(i));
}
- CHECK(layout_desc->IsFastPointerLayout());
+ CHECK(layout_desc.IsFastPointerLayout());
int sequence_length;
- CHECK_EQ(true, layout_desc->IsTagged(0, std::numeric_limits<int>::max(),
- &sequence_length));
+ CHECK_EQ(true, layout_desc.IsTagged(0, std::numeric_limits<int>::max(),
+ &sequence_length));
CHECK_EQ(std::numeric_limits<int>::max(), sequence_length);
- CHECK(layout_desc->IsTagged(0, 7, &sequence_length));
+ CHECK(layout_desc.IsTagged(0, 7, &sequence_length));
CHECK_EQ(7, sequence_length);
}
@@ -252,13 +252,13 @@ TEST(LayoutDescriptorBasicSlow) {
LayoutDescriptor layout_desc = *layout_descriptor;
// Play with the bits but leave it in consistent state with map at the end.
for (int i = 1; i < kPropsCount - 1; i++) {
- layout_desc = layout_desc->SetTaggedForTesting(i, false);
- CHECK(!layout_desc->IsTagged(i));
- layout_desc = layout_desc->SetTaggedForTesting(i, true);
- CHECK(layout_desc->IsTagged(i));
+ layout_desc = layout_desc.SetTaggedForTesting(i, false);
+ CHECK(!layout_desc.IsTagged(i));
+ layout_desc = layout_desc.SetTaggedForTesting(i, true);
+ CHECK(layout_desc.IsTagged(i));
}
- CHECK(layout_desc->IsSlowLayout());
- CHECK(!layout_desc->IsFastPointerLayout());
+ CHECK(layout_desc.IsSlowLayout());
+ CHECK(!layout_desc.IsFastPointerLayout());
CHECK(layout_descriptor->IsConsistentWithMap(*map, true));
}
}
@@ -282,11 +282,11 @@ static void TestLayoutDescriptorQueries(int layout_descriptor_length,
++cur_bit_flip_index;
CHECK(i < bit_flip_positions[cur_bit_flip_index]); // check test data
}
- layout_desc = layout_desc->SetTaggedForTesting(i, tagged);
+ layout_desc = layout_desc.SetTaggedForTesting(i, tagged);
}
}
- if (layout_desc->IsFastPointerLayout()) {
+ if (layout_desc.IsFastPointerLayout()) {
return;
}
@@ -299,30 +299,29 @@ static void TestLayoutDescriptorQueries(int layout_descriptor_length,
tagged = !tagged;
++cur_bit_flip_index;
}
- CHECK_EQ(tagged, layout_desc->IsTagged(i));
+ CHECK_EQ(tagged, layout_desc.IsTagged(i));
int next_bit_flip_position = bit_flip_positions[cur_bit_flip_index];
int expected_sequence_length;
- if (next_bit_flip_position < layout_desc->capacity()) {
+ if (next_bit_flip_position < layout_desc.capacity()) {
expected_sequence_length = next_bit_flip_position - i;
} else {
expected_sequence_length = tagged ? std::numeric_limits<int>::max()
- : (layout_desc->capacity() - i);
+ : (layout_desc.capacity() - i);
}
expected_sequence_length =
Min(expected_sequence_length, max_sequence_length);
int sequence_length;
CHECK_EQ(tagged,
- layout_desc->IsTagged(i, max_sequence_length, &sequence_length));
+ layout_desc.IsTagged(i, max_sequence_length, &sequence_length));
CHECK_GT(sequence_length, 0);
CHECK_EQ(expected_sequence_length, sequence_length);
}
int sequence_length;
- CHECK_EQ(true,
- layout_desc->IsTagged(layout_descriptor_length,
- max_sequence_length, &sequence_length));
+ CHECK_EQ(true, layout_desc.IsTagged(layout_descriptor_length,
+ max_sequence_length, &sequence_length));
CHECK_EQ(max_sequence_length, sequence_length);
}
}
@@ -334,7 +333,7 @@ static void TestLayoutDescriptorQueriesFast(int max_sequence_length) {
int sequence_length;
for (int i = 0; i < kNumberOfBits; i++) {
CHECK_EQ(true,
- layout_desc->IsTagged(i, max_sequence_length, &sequence_length));
+ layout_desc.IsTagged(i, max_sequence_length, &sequence_length));
CHECK_GT(sequence_length, 0);
CHECK_EQ(max_sequence_length, sequence_length);
}
@@ -624,17 +623,17 @@ TEST(LayoutDescriptorCreateNewSlow) {
LayoutDescriptor layout_desc = *layout_descriptor;
CHECK_EQ(layout_desc, LayoutDescriptor::cast(layout_desc));
CHECK_EQ(layout_desc, LayoutDescriptor::cast_gc_safe(layout_desc));
- CHECK(layout_desc->IsSlowLayout());
+ CHECK(layout_desc.IsSlowLayout());
// Now make it look like a forwarding pointer to layout_descriptor_copy.
- MapWord map_word = layout_desc->map_word();
+ MapWord map_word = layout_desc.map_word();
CHECK(!map_word.IsForwardingAddress());
- layout_desc->set_map_word(
+ layout_desc.set_map_word(
MapWord::FromForwardingAddress(*layout_descriptor_copy));
- CHECK(layout_desc->map_word().IsForwardingAddress());
+ CHECK(layout_desc.map_word().IsForwardingAddress());
CHECK_EQ(layout_desc, LayoutDescriptor::cast_gc_safe(layout_desc));
// Restore it back.
- layout_desc->set_map_word(map_word);
+ layout_desc.set_map_word(map_word);
CHECK_EQ(layout_desc, LayoutDescriptor::cast(layout_desc));
}
}
@@ -656,7 +655,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
for (int i = 0; i < kPropsCount; i++) {
EmbeddedVector<char, 64> buffer;
SNPrintF(buffer, "prop%d", i);
- Handle<String> name = factory->InternalizeUtf8String(buffer.start());
+ Handle<String> name = factory->InternalizeUtf8String(buffer.begin());
Handle<LayoutDescriptor> layout_descriptor;
TestPropertyKind kind = props[i];
@@ -801,10 +800,10 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
Map map = *last_map;
for (int i = 0; i < descriptors_length; i++) {
maps[descriptors_length - 1 - i] = handle(map, isolate);
- Object maybe_map = map->GetBackPointer();
- CHECK(maybe_map->IsMap());
+ Object maybe_map = map.GetBackPointer();
+ CHECK(maybe_map.IsMap());
map = Map::cast(maybe_map);
- CHECK(!map->is_stable());
+ CHECK(!map.is_stable());
}
CHECK_EQ(1, maps[0]->NumberOfOwnDescriptors());
}
@@ -816,7 +815,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
map = maps[i];
LayoutDescriptor layout_desc = map->layout_descriptor();
- if (layout_desc->IsSlowLayout()) {
+ if (layout_desc.IsSlowLayout()) {
switched_to_slow_mode = true;
CHECK_EQ(*full_layout_descriptor, layout_desc);
} else {
@@ -829,12 +828,12 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
bool is_inobject = field_index < map->GetInObjectProperties();
for (int bit = 0; bit < field_width_in_words; bit++) {
CHECK_EQ(is_inobject && details.representation().IsDouble(),
- !layout_desc->IsTagged(field_index + bit));
+ !layout_desc.IsTagged(field_index + bit));
}
- CHECK(layout_desc->IsTagged(field_index + field_width_in_words));
+ CHECK(layout_desc.IsTagged(field_index + field_width_in_words));
}
}
- CHECK(map->layout_descriptor()->IsConsistentWithMap(*map));
+ CHECK(map->layout_descriptor().IsConsistentWithMap(*map));
}
Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
@@ -960,7 +959,7 @@ TEST(Regress436816) {
Address fake_address = static_cast<Address>(~kHeapObjectTagMask);
HeapObject fake_object = HeapObject::FromAddress(fake_address);
- CHECK(fake_object->IsHeapObject());
+ CHECK(fake_object.IsHeapObject());
uint64_t boom_value = bit_cast<uint64_t>(fake_object);
for (int i = 0; i < kPropsCount; i++) {
@@ -969,13 +968,13 @@ TEST(Regress436816) {
object->RawFastDoublePropertyAsBitsAtPut(index, boom_value);
}
CHECK(object->HasFastProperties());
- CHECK(!object->map()->HasFastPointerLayout());
+ CHECK(!object->map().HasFastPointerLayout());
Handle<Map> normalized_map =
Map::Normalize(isolate, map, KEEP_INOBJECT_PROPERTIES, "testing");
JSObject::MigrateToMap(object, normalized_map);
CHECK(!object->HasFastProperties());
- CHECK(object->map()->HasFastPointerLayout());
+ CHECK(object->map().HasFastPointerLayout());
// Trigger GCs and heap verification.
CcTest::CollectAllGarbage();
@@ -1004,10 +1003,10 @@ TEST(DescriptorArrayTrimming) {
any_type, NONE, PropertyConstness::kMutable,
Representation::Double(), INSERT_TRANSITION)
.ToHandleChecked();
- CHECK(map->layout_descriptor()->IsConsistentWithMap(*map, true));
- CHECK(map->layout_descriptor()->IsSlowLayout());
+ CHECK(map->layout_descriptor().IsConsistentWithMap(*map, true));
+ CHECK(map->layout_descriptor().IsSlowLayout());
CHECK(map->owns_descriptors());
- CHECK_EQ(8, map->layout_descriptor()->length());
+ CHECK_EQ(8, map->layout_descriptor().length());
{
// Add transitions to double fields.
@@ -1019,35 +1018,35 @@ TEST(DescriptorArrayTrimming) {
any_type, NONE, PropertyConstness::kMutable,
Representation::Double(), INSERT_TRANSITION)
.ToHandleChecked();
- CHECK(tmp_map->layout_descriptor()->IsConsistentWithMap(*tmp_map, true));
+ CHECK(tmp_map->layout_descriptor().IsConsistentWithMap(*tmp_map, true));
}
// Check that descriptors are shared.
CHECK(tmp_map->owns_descriptors());
CHECK_EQ(map->instance_descriptors(), tmp_map->instance_descriptors());
CHECK_EQ(map->layout_descriptor(), tmp_map->layout_descriptor());
}
- CHECK(map->layout_descriptor()->IsSlowLayout());
- CHECK_EQ(16, map->layout_descriptor()->length());
+ CHECK(map->layout_descriptor().IsSlowLayout());
+ CHECK_EQ(16, map->layout_descriptor().length());
// The unused tail of the layout descriptor is now "durty" because of sharing.
- CHECK(map->layout_descriptor()->IsConsistentWithMap(*map));
+ CHECK(map->layout_descriptor().IsConsistentWithMap(*map));
for (int i = kSplitFieldIndex + 1; i < kTrimmedLayoutDescriptorLength; i++) {
- CHECK(!map->layout_descriptor()->IsTagged(i));
+ CHECK(!map->layout_descriptor().IsTagged(i));
}
CHECK_LT(map->NumberOfOwnDescriptors(),
- map->instance_descriptors()->number_of_descriptors());
+ map->instance_descriptors().number_of_descriptors());
// Call GC that should trim both |map|'s descriptor array and layout
// descriptor.
CcTest::CollectAllGarbage();
// The unused tail of the layout descriptor is now "clean" again.
- CHECK(map->layout_descriptor()->IsConsistentWithMap(*map, true));
+ CHECK(map->layout_descriptor().IsConsistentWithMap(*map, true));
CHECK(map->owns_descriptors());
CHECK_EQ(map->NumberOfOwnDescriptors(),
- map->instance_descriptors()->number_of_descriptors());
- CHECK(map->layout_descriptor()->IsSlowLayout());
- CHECK_EQ(8, map->layout_descriptor()->length());
+ map->instance_descriptors().number_of_descriptors());
+ CHECK(map->layout_descriptor().IsSlowLayout());
+ CHECK_EQ(8, map->layout_descriptor().length());
{
// Add transitions to tagged fields.
@@ -1059,18 +1058,18 @@ TEST(DescriptorArrayTrimming) {
any_type, NONE, PropertyConstness::kMutable,
Representation::Tagged(), INSERT_TRANSITION)
.ToHandleChecked();
- CHECK(tmp_map->layout_descriptor()->IsConsistentWithMap(*tmp_map, true));
+ CHECK(tmp_map->layout_descriptor().IsConsistentWithMap(*tmp_map, true));
}
tmp_map = Map::CopyWithField(isolate, tmp_map, MakeString("dbl"), any_type,
NONE, PropertyConstness::kMutable,
Representation::Double(), INSERT_TRANSITION)
.ToHandleChecked();
- CHECK(tmp_map->layout_descriptor()->IsConsistentWithMap(*tmp_map, true));
+ CHECK(tmp_map->layout_descriptor().IsConsistentWithMap(*tmp_map, true));
// Check that descriptors are shared.
CHECK(tmp_map->owns_descriptors());
CHECK_EQ(map->instance_descriptors(), tmp_map->instance_descriptors());
}
- CHECK(map->layout_descriptor()->IsSlowLayout());
+ CHECK(map->layout_descriptor().IsSlowLayout());
}
@@ -1417,7 +1416,7 @@ TEST(LayoutDescriptorSharing) {
// Layout descriptors should be shared with |split_map|.
CHECK(map1->owns_descriptors());
CHECK_EQ(*split_layout_descriptor, map1->layout_descriptor());
- CHECK(map1->layout_descriptor()->IsConsistentWithMap(*map1, true));
+ CHECK(map1->layout_descriptor().IsConsistentWithMap(*map1, true));
Handle<Map> map2 =
Map::CopyWithField(isolate, split_map, MakeString("bar"), any_type, NONE,
@@ -1428,7 +1427,7 @@ TEST(LayoutDescriptorSharing) {
// Layout descriptors should not be shared with |split_map|.
CHECK(map2->owns_descriptors());
CHECK_NE(*split_layout_descriptor, map2->layout_descriptor());
- CHECK(map2->layout_descriptor()->IsConsistentWithMap(*map2, true));
+ CHECK(map2->layout_descriptor().IsConsistentWithMap(*map2, true));
}
diff --git a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
index d98e2739d5..2d19f5c835 100644
--- a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
+++ b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
@@ -4,7 +4,7 @@
#include <stdlib.h>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
namespace {
diff --git a/deps/v8/test/cctest/test-unwinder.cc b/deps/v8/test/cctest/test-unwinder.cc
index 5b3f3ef98e..c452db3c87 100644
--- a/deps/v8/test/cctest/test-unwinder.cc
+++ b/deps/v8/test/cctest/test-unwinder.cc
@@ -4,10 +4,10 @@
#include "include/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/builtins/builtins.h"
+#include "src/execution/isolate.h"
#include "src/heap/spaces.h"
-#include "src/isolate.h"
#include "src/objects/code-inl.h"
#include "test/cctest/cctest.h"
@@ -50,9 +50,9 @@ TEST(Unwind_BuiltinPCInMiddle_Success) {
// Put the current PC inside of a valid builtin.
Code builtin = i_isolate->builtins()->builtin(Builtins::kStringEqual);
const uintptr_t offset = 40;
- CHECK_LT(offset, builtin->InstructionSize());
+ CHECK_LT(offset, builtin.InstructionSize());
register_state.pc =
- reinterpret_cast<void*>(builtin->InstructionStart() + offset);
+ reinterpret_cast<void*>(builtin.InstructionStart() + offset);
bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
stack_base);
@@ -97,7 +97,7 @@ TEST(Unwind_BuiltinPCAtStart_Success) {
// Put the current PC at the start of a valid builtin, so that we are setting
// up the frame.
Code builtin = i_isolate->builtins()->builtin(Builtins::kStringEqual);
- register_state.pc = reinterpret_cast<void*>(builtin->InstructionStart());
+ register_state.pc = reinterpret_cast<void*>(builtin.InstructionStart());
bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
stack_base);
@@ -114,7 +114,8 @@ const char* foo_source = R"(
let y = x ^ b;
let z = y / a;
return x + y - z;
- }
+ };
+ %PrepareFunctionForOptimization(foo);
foo(1, 2);
foo(1, 2);
%OptimizeFunctionOnNextCall(foo);
@@ -153,16 +154,16 @@ TEST(Unwind_CodeObjectPCInMiddle_Success) {
// Put the current PC inside of the created code object.
AbstractCode abstract_code = foo->abstract_code();
// We don't produce optimized code when run with --no-opt.
- if (!abstract_code->IsCode() && FLAG_opt == false) return;
- CHECK(abstract_code->IsCode());
+ if (!abstract_code.IsCode() && FLAG_opt == false) return;
+ CHECK(abstract_code.IsCode());
- Code code = abstract_code->GetCode();
+ Code code = abstract_code.GetCode();
// We don't want the offset too early or it could be the `push rbp`
// instruction (which is not at the start of generated code, because the lazy
// deopt check happens before frame setup).
- const uintptr_t offset = code->InstructionSize() - 20;
- CHECK_LT(offset, code->InstructionSize());
- Address pc = code->InstructionStart() + offset;
+ const uintptr_t offset = code.InstructionSize() - 20;
+ CHECK_LT(offset, code.InstructionSize());
+ Address pc = code.InstructionStart() + offset;
register_state.pc = reinterpret_cast<void*>(pc);
// Check that the created code is within the code range that we get from the
@@ -336,7 +337,7 @@ TEST(Unwind_JSEntry_Fail) {
RegisterState register_state;
Code js_entry = i_isolate->heap()->builtin(Builtins::kJSEntry);
- byte* start = reinterpret_cast<byte*>(js_entry->InstructionStart());
+ byte* start = reinterpret_cast<byte*>(js_entry.InstructionStart());
register_state.pc = start + 10;
bool unwound = v8::Unwinder::TryUnwindV8Frames(unwind_state, &register_state,
@@ -494,8 +495,8 @@ TEST(PCIsInV8_InJSEntryRange) {
UnwindState unwind_state = isolate->GetUnwindState();
Code js_entry = i_isolate->heap()->builtin(Builtins::kJSEntry);
- byte* start = reinterpret_cast<byte*>(js_entry->InstructionStart());
- size_t length = js_entry->InstructionSize();
+ byte* start = reinterpret_cast<byte*>(js_entry.InstructionStart());
+ size_t length = js_entry.InstructionSize();
void* pc = start;
CHECK(v8::Unwinder::PCIsInV8(unwind_state, pc));
@@ -530,9 +531,8 @@ TEST(PCIsInV8_LargeCodeObject) {
desc.unwinding_info = nullptr;
desc.unwinding_info_size = 0;
desc.origin = nullptr;
- Handle<Object> self_ref;
Handle<Code> foo_code =
- i_isolate->factory()->NewCode(desc, Code::WASM_FUNCTION, self_ref);
+ Factory::CodeBuilder(i_isolate, desc, Code::WASM_FUNCTION).Build();
CHECK(i_isolate->heap()->InSpace(*foo_code, CODE_LO_SPACE));
byte* start = reinterpret_cast<byte*>(foo_code->InstructionStart());
diff --git a/deps/v8/test/cctest/test-usecounters.cc b/deps/v8/test/cctest/test-usecounters.cc
index 52a24a3046..2c4d007c4b 100644
--- a/deps/v8/test/cctest/test-usecounters.cc
+++ b/deps/v8/test/cctest/test-usecounters.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index 7086a26ec8..b6a4aad3bf 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -27,11 +27,11 @@
#include "test/cctest/test-utils-arm64.h"
-#include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/utils-arm64.h"
#include "src/base/template-utils.h"
-#include "src/macro-assembler-inl.h"
-#include "src/v8.h"
+#include "src/codegen/arm64/assembler-arm64-inl.h"
+#include "src/codegen/arm64/utils-arm64.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -205,9 +205,11 @@ bool EqualNzcv(uint32_t expected, uint32_t result) {
return true;
}
-
-bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
- for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+bool EqualV8Registers(const RegisterDump* a, const RegisterDump* b) {
+ CPURegList available_regs = kCallerSaved;
+ available_regs.Combine(kCalleeSaved);
+ while (!available_regs.IsEmpty()) {
+ int i = available_regs.PopLowestIndex().code();
if (a->xreg(i) != b->xreg(i)) {
printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
i, a->xreg(i), b->xreg(i));
diff --git a/deps/v8/test/cctest/test-utils-arm64.h b/deps/v8/test/cctest/test-utils-arm64.h
index 45970ac8b7..305f6bd938 100644
--- a/deps/v8/test/cctest/test-utils-arm64.h
+++ b/deps/v8/test/cctest/test-utils-arm64.h
@@ -28,9 +28,9 @@
#ifndef V8_ARM64_TEST_UTILS_ARM64_H_
#define V8_ARM64_TEST_UTILS_ARM64_H_
-#include "src/arm64/utils-arm64.h"
-#include "src/macro-assembler.h"
-#include "src/v8.h"
+#include "src/codegen/arm64/utils-arm64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/init/v8.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -206,7 +206,8 @@ bool Equal128(uint64_t expected_h, uint64_t expected_l,
bool EqualNzcv(uint32_t expected, uint32_t result);
-bool EqualRegisters(const RegisterDump* a, const RegisterDump* b);
+// Compares two RegisterDumps, only comparing registers that V8 uses.
+bool EqualV8Registers(const RegisterDump* a, const RegisterDump* b);
// Create an array of type {RegType}, size {Size}, filled with {NoReg}.
template <typename RegType, size_t Size>
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index 1f5c7c6a70..628ed1ba4f 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -29,13 +29,13 @@
#include <vector>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
-#include "src/collector.h"
-#include "src/conversions.h"
+#include "src/numbers/conversions.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/collector.h"
namespace v8 {
namespace internal {
@@ -79,7 +79,7 @@ TEST(Utils1) {
TEST(BitSetComputer) {
- typedef BitSetComputer<bool, 1, kSmiValueSize, uint32_t> BoolComputer;
+ using BoolComputer = BitSetComputer<bool, 1, kSmiValueSize, uint32_t>;
CHECK_EQ(0, BoolComputer::word_count(0));
CHECK_EQ(1, BoolComputer::word_count(8));
CHECK_EQ(2, BoolComputer::word_count(50));
@@ -97,12 +97,12 @@ TEST(BitSetComputer) {
// Lets store 2 bits per item with 3000 items and verify the values are
// correct.
- typedef BitSetComputer<unsigned char, 2, 8, unsigned char> TwoBits;
+ using TwoBits = BitSetComputer<unsigned char, 2, 8, unsigned char>;
const int words = 750;
CHECK_EQ(words, TwoBits::word_count(3000));
const int offset = 10;
Vector<unsigned char> buffer = Vector<unsigned char>::New(offset + words);
- memset(buffer.start(), 0, sizeof(unsigned char) * buffer.length());
+ memset(buffer.begin(), 0, sizeof(unsigned char) * buffer.length());
for (int i = 0; i < words; i++) {
const int index = TwoBits::index(offset, i);
unsigned char data = buffer[index];
@@ -123,20 +123,20 @@ TEST(SNPrintF) {
// Make sure that strings that are truncated because of too small
// buffers are zero-terminated anyway.
const char* s = "the quick lazy .... oh forget it!";
- int length = StrLength(s);
+ int length = static_cast<int>(strlen(s));
for (int i = 1; i < length * 2; i++) {
static const char kMarker = static_cast<char>(42);
Vector<char> buffer = Vector<char>::New(i + 1);
buffer[i] = kMarker;
- int n = SNPrintF(Vector<char>(buffer.start(), i), "%s", s);
+ int n = SNPrintF(Vector<char>(buffer.begin(), i), "%s", s);
CHECK(n <= i);
CHECK(n == length || n == -1);
- CHECK_EQ(0, strncmp(buffer.start(), s, i - 1));
+ CHECK_EQ(0, strncmp(buffer.begin(), s, i - 1));
CHECK_EQ(kMarker, buffer[i]);
if (i <= length) {
- CHECK_EQ(i - 1, StrLength(buffer.start()));
+ CHECK_EQ(i - 1, strlen(buffer.begin()));
} else {
- CHECK_EQ(length, StrLength(buffer.start()));
+ CHECK_EQ(length, strlen(buffer.begin()));
}
buffer.Dispose();
}
@@ -260,8 +260,8 @@ TEST(SequenceCollectorRegression) {
collector.AddBlock(
i::Vector<const char>("12345678901234567890123456789012", 32));
i::Vector<char> seq = collector.EndSequence();
- CHECK_EQ(0, strncmp("0123456789012345678901234567890123",
- seq.start(), seq.length()));
+ CHECK_EQ(0, strncmp("0123456789012345678901234567890123", seq.begin(),
+ seq.length()));
}
diff --git a/deps/v8/test/cctest/test-version.cc b/deps/v8/test/cctest/test-version.cc
index 301fe58c50..4ba87083de 100644
--- a/deps/v8/test/cctest/test-version.cc
+++ b/deps/v8/test/cctest/test-version.cc
@@ -25,9 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/version.h"
+#include "src/utils/version.h"
#include "test/cctest/cctest.h"
@@ -55,17 +55,17 @@ static void CheckVersion(int major, int minor, int build, int patch,
// Test version without specific SONAME.
SetVersion(major, minor, build, patch, embedder, candidate, "");
Version::GetString(version_str);
- CHECK_EQ(0, strcmp(expected_version_string, version_str.start()));
+ CHECK_EQ(0, strcmp(expected_version_string, version_str.begin()));
Version::GetSONAME(soname_str);
- CHECK_EQ(0, strcmp(expected_generic_soname, soname_str.start()));
+ CHECK_EQ(0, strcmp(expected_generic_soname, soname_str.begin()));
// Test version with specific SONAME.
const char* soname = "libv8.so.1";
SetVersion(major, minor, build, patch, embedder, candidate, soname);
Version::GetString(version_str);
- CHECK_EQ(0, strcmp(expected_version_string, version_str.start()));
+ CHECK_EQ(0, strcmp(expected_version_string, version_str.begin()));
Version::GetSONAME(soname_str);
- CHECK_EQ(0, strcmp(soname, soname_str.start()));
+ CHECK_EQ(0, strcmp(soname, soname_str.begin()));
}
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 8851179f94..4b1feaeef1 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -27,13 +27,13 @@
#include <utility>
-#include "src/global-handles.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -81,19 +81,19 @@ TEST(Weakness) {
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
Handle<Smi> smi(Smi::FromInt(23), isolate);
- int32_t hash = key->GetOrCreateHash(isolate)->value();
+ int32_t hash = key->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakmap, key, object, hash);
- int32_t object_hash = object->GetOrCreateHash(isolate)->value();
+ int32_t object_hash = object->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakmap, object, smi, object_hash);
}
- CHECK_EQ(2, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
+ CHECK_EQ(2, EphemeronHashTable::cast(weakmap->table()).NumberOfElements());
// Force a full GC.
CcTest::PreciseCollectAllGarbage();
CHECK_EQ(0, NumberOfWeakCalls);
- CHECK_EQ(2, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
+ CHECK_EQ(2, EphemeronHashTable::cast(weakmap->table()).NumberOfElements());
CHECK_EQ(
- 0, EphemeronHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+ 0, EphemeronHashTable::cast(weakmap->table()).NumberOfDeletedElements());
// Make the global reference to the key weak.
std::pair<Handle<Object>*, int> handle_and_id(&key, 1234);
@@ -104,9 +104,9 @@ TEST(Weakness) {
CcTest::PreciseCollectAllGarbage();
CHECK_EQ(1, NumberOfWeakCalls);
- CHECK_EQ(0, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
+ CHECK_EQ(0, EphemeronHashTable::cast(weakmap->table()).NumberOfElements());
CHECK_EQ(
- 2, EphemeronHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+ 2, EphemeronHashTable::cast(weakmap->table()).NumberOfDeletedElements());
}
@@ -118,7 +118,7 @@ TEST(Shrinking) {
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
// Check initial capacity.
- CHECK_EQ(32, EphemeronHashTable::cast(weakmap->table())->Capacity());
+ CHECK_EQ(32, EphemeronHashTable::cast(weakmap->table()).Capacity());
// Fill up weak map to trigger capacity change.
{
@@ -127,32 +127,31 @@ TEST(Shrinking) {
for (int i = 0; i < 32; i++) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
Handle<Smi> smi(Smi::FromInt(i), isolate);
- int32_t object_hash = object->GetOrCreateHash(isolate)->value();
+ int32_t object_hash = object->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakmap, object, smi, object_hash);
}
}
// Check increased capacity.
- CHECK_EQ(128, EphemeronHashTable::cast(weakmap->table())->Capacity());
+ CHECK_EQ(128, EphemeronHashTable::cast(weakmap->table()).Capacity());
// Force a full GC.
- CHECK_EQ(32, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
+ CHECK_EQ(32, EphemeronHashTable::cast(weakmap->table()).NumberOfElements());
CHECK_EQ(
- 0, EphemeronHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+ 0, EphemeronHashTable::cast(weakmap->table()).NumberOfDeletedElements());
CcTest::PreciseCollectAllGarbage();
- CHECK_EQ(0, EphemeronHashTable::cast(weakmap->table())->NumberOfElements());
+ CHECK_EQ(0, EphemeronHashTable::cast(weakmap->table()).NumberOfElements());
CHECK_EQ(
- 32,
- EphemeronHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+ 32, EphemeronHashTable::cast(weakmap->table()).NumberOfDeletedElements());
// Check shrunk capacity.
- CHECK_EQ(32, EphemeronHashTable::cast(weakmap->table())->Capacity());
+ CHECK_EQ(32, EphemeronHashTable::cast(weakmap->table()).Capacity());
}
namespace {
bool EphemeronHashTableContainsKey(EphemeronHashTable table, HeapObject key) {
for (int i = 0; i < table.Capacity(); ++i) {
- if (table->KeyAt(i) == key) return true;
+ if (table.KeyAt(i) == key) return true;
}
return false;
}
@@ -171,7 +170,7 @@ TEST(WeakMapPromotion) {
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
Handle<Smi> smi(Smi::FromInt(1), isolate);
- int32_t object_hash = object->GetOrCreateHash(isolate)->value();
+ int32_t object_hash = object->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakmap, object, smi, object_hash);
CHECK(EphemeronHashTableContainsKey(
@@ -203,7 +202,7 @@ TEST(WeakMapScavenge) {
Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
Handle<Smi> smi(Smi::FromInt(1), isolate);
- int32_t object_hash = object->GetOrCreateHash(isolate)->value();
+ int32_t object_hash = object->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakmap, object, smi, object_hash);
CHECK(EphemeronHashTableContainsKey(
@@ -249,7 +248,7 @@ TEST(Regress2060a) {
factory->NewJSObject(function, AllocationType::kOld);
CHECK(!Heap::InYoungGeneration(*object));
CHECK(!first_page->Contains(object->address()));
- int32_t hash = key->GetOrCreateHash(isolate)->value();
+ int32_t hash = key->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakmap, key, object, hash);
}
}
@@ -291,7 +290,7 @@ TEST(Regress2060b) {
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
for (int i = 0; i < 32; i++) {
Handle<Smi> smi(Smi::FromInt(i), isolate);
- int32_t hash = keys[i]->GetOrCreateHash(isolate)->value();
+ int32_t hash = keys[i]->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakmap, keys[i], smi, hash);
}
@@ -343,8 +342,8 @@ TEST(WeakMapsWithChainedEntries) {
g2.SetWeak();
Handle<Object> i_o1 = v8::Utils::OpenHandle(*o1);
Handle<Object> i_o2 = v8::Utils::OpenHandle(*o2);
- int32_t hash1 = i_o1->GetOrCreateHash(i_isolate)->value();
- int32_t hash2 = i_o2->GetOrCreateHash(i_isolate)->value();
+ int32_t hash1 = i_o1->GetOrCreateHash(i_isolate).value();
+ int32_t hash2 = i_o2->GetOrCreateHash(i_isolate).value();
JSWeakCollection::Set(weakmap1, i_o1, i_o2, hash1);
JSWeakCollection::Set(weakmap2, i_o2, i_o1, hash2);
}
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 42445b3610..a65405d9ef 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -27,13 +27,13 @@
#include <utility>
-#include "src/global-handles.h"
+#include "src/execution/isolate.h"
+#include "src/handles/global-handles.h"
#include "src/heap/factory.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-collection-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-utils.h"
@@ -94,17 +94,17 @@ TEST(WeakSet_Weakness) {
{
HandleScope scope(isolate);
Handle<Smi> smi(Smi::FromInt(23), isolate);
- int32_t hash = key->GetOrCreateHash(isolate)->value();
+ int32_t hash = key->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakset, key, smi, hash);
}
- CHECK_EQ(1, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(1, EphemeronHashTable::cast(weakset->table()).NumberOfElements());
// Force a full GC.
CcTest::PreciseCollectAllGarbage();
CHECK_EQ(0, NumberOfWeakCalls);
- CHECK_EQ(1, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(1, EphemeronHashTable::cast(weakset->table()).NumberOfElements());
CHECK_EQ(
- 0, EphemeronHashTable::cast(weakset->table())->NumberOfDeletedElements());
+ 0, EphemeronHashTable::cast(weakset->table()).NumberOfDeletedElements());
// Make the global reference to the key weak.
std::pair<Handle<Object>*, int> handle_and_id(&key, 1234);
@@ -115,9 +115,9 @@ TEST(WeakSet_Weakness) {
CcTest::PreciseCollectAllGarbage();
CHECK_EQ(1, NumberOfWeakCalls);
- CHECK_EQ(0, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(0, EphemeronHashTable::cast(weakset->table()).NumberOfElements());
CHECK_EQ(
- 1, EphemeronHashTable::cast(weakset->table())->NumberOfDeletedElements());
+ 1, EphemeronHashTable::cast(weakset->table()).NumberOfDeletedElements());
}
@@ -129,7 +129,7 @@ TEST(WeakSet_Shrinking) {
Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
// Check initial capacity.
- CHECK_EQ(32, EphemeronHashTable::cast(weakset->table())->Capacity());
+ CHECK_EQ(32, EphemeronHashTable::cast(weakset->table()).Capacity());
// Fill up weak set to trigger capacity change.
{
@@ -138,26 +138,25 @@ TEST(WeakSet_Shrinking) {
for (int i = 0; i < 32; i++) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
Handle<Smi> smi(Smi::FromInt(i), isolate);
- int32_t hash = object->GetOrCreateHash(isolate)->value();
+ int32_t hash = object->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakset, object, smi, hash);
}
}
// Check increased capacity.
- CHECK_EQ(128, EphemeronHashTable::cast(weakset->table())->Capacity());
+ CHECK_EQ(128, EphemeronHashTable::cast(weakset->table()).Capacity());
// Force a full GC.
- CHECK_EQ(32, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(32, EphemeronHashTable::cast(weakset->table()).NumberOfElements());
CHECK_EQ(
- 0, EphemeronHashTable::cast(weakset->table())->NumberOfDeletedElements());
+ 0, EphemeronHashTable::cast(weakset->table()).NumberOfDeletedElements());
CcTest::PreciseCollectAllGarbage();
- CHECK_EQ(0, EphemeronHashTable::cast(weakset->table())->NumberOfElements());
+ CHECK_EQ(0, EphemeronHashTable::cast(weakset->table()).NumberOfElements());
CHECK_EQ(
- 32,
- EphemeronHashTable::cast(weakset->table())->NumberOfDeletedElements());
+ 32, EphemeronHashTable::cast(weakset->table()).NumberOfDeletedElements());
// Check shrunk capacity.
- CHECK_EQ(32, EphemeronHashTable::cast(weakset->table())->Capacity());
+ CHECK_EQ(32, EphemeronHashTable::cast(weakset->table()).Capacity());
}
@@ -188,7 +187,7 @@ TEST(WeakSet_Regress2060a) {
factory->NewJSObject(function, AllocationType::kOld);
CHECK(!Heap::InYoungGeneration(*object));
CHECK(!first_page->Contains(object->address()));
- int32_t hash = key->GetOrCreateHash(isolate)->value();
+ int32_t hash = key->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakset, key, object, hash);
}
}
@@ -230,7 +229,7 @@ TEST(WeakSet_Regress2060b) {
Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
for (int i = 0; i < 32; i++) {
Handle<Smi> smi(Smi::FromInt(i), isolate);
- int32_t hash = keys[i]->GetOrCreateHash(isolate)->value();
+ int32_t hash = keys[i]->GetOrCreateHash(isolate).value();
JSWeakCollection::Set(weakset, keys[i], smi, hash);
}
diff --git a/deps/v8/test/cctest/torque/test-torque.cc b/deps/v8/test/cctest/torque/test-torque.cc
index 5a4b439cd4..75d80329f5 100644
--- a/deps/v8/test/cctest/torque/test-torque.cc
+++ b/deps/v8/test/cctest/torque/test-torque.cc
@@ -4,22 +4,22 @@
#include <cmath>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/builtins/builtins-promise-gen.h"
#include "src/builtins/builtins-string-gen.h"
-#include "src/char-predicates.h"
-#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/code-stub-assembler.h"
#include "src/compiler/node.h"
#include "src/debug/debug.h"
-#include "src/elements-kind.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/promise-inl.h"
+#include "src/strings/char-predicates.h"
#include "test/cctest/compiler/code-assembler-tester.h"
#include "test/cctest/compiler/function-tester.h"
-#include "torque-generated/builtins-test-from-dsl-gen.h"
+#include "torque-generated/builtins-test-gen-tq.h"
namespace v8 {
namespace internal {
@@ -27,14 +27,13 @@ namespace compiler {
namespace {
-typedef CodeAssemblerLabel Label;
-typedef CodeAssemblerVariable Variable;
+using Label = CodeAssemblerLabel;
+using Variable = CodeAssemblerVariable;
-class TestTorqueAssembler : public CodeStubAssembler,
- public TestBuiltinsFromDSLAssembler {
+class TestTorqueAssembler : public CodeStubAssembler {
public:
explicit TestTorqueAssembler(CodeAssemblerState* state)
- : CodeStubAssembler(state), TestBuiltinsFromDSLAssembler(state) {}
+ : CodeStubAssembler(state) {}
};
} // namespace
@@ -486,6 +485,20 @@ TEST(TestReferences) {
ft.Call();
}
+TEST(TestStaticAssert) {
+ CcTest::InitializeVM();
+ Isolate* isolate(CcTest::i_isolate());
+ i::HandleScope scope(isolate);
+ CodeAssemblerTester asm_tester(isolate);
+ TestTorqueAssembler m(asm_tester.state());
+ {
+ m.TestStaticAssert();
+ m.Return(m.UndefinedConstant());
+ }
+ FunctionTester ft(asm_tester.GenerateCode(), 0);
+ ft.Call();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index 3e2a9192d7..08cc024c90 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -28,8 +28,8 @@
#include "test/cctest/trace-extension.h"
#include "include/v8-profiler.h"
+#include "src/execution/vm-state-inl.h"
#include "src/objects/smi.h"
-#include "src/vm-state-inl.h"
#include "test/cctest/cctest.h"
namespace v8 {
@@ -77,9 +77,9 @@ Address TraceExtension::GetFP(const v8::FunctionCallbackInfo<v8::Value>& args) {
uint64_t kSmiValueMask =
(static_cast<uintptr_t>(1) << (kSmiValueSize - 1)) - 1;
uint64_t low_bits =
- Smi(*reinterpret_cast<Address*>(*args[0]))->value() & kSmiValueMask;
+ Smi(*reinterpret_cast<Address*>(*args[0])).value() & kSmiValueMask;
uint64_t high_bits =
- Smi(*reinterpret_cast<Address*>(*args[1]))->value() & kSmiValueMask;
+ Smi(*reinterpret_cast<Address*>(*args[1])).value() & kSmiValueMask;
Address fp =
static_cast<Address>((high_bits << (kSmiValueSize - 1)) | low_bits);
#else
diff --git a/deps/v8/test/cctest/trace-extension.h b/deps/v8/test/cctest/trace-extension.h
index 385f0c23c2..fe62c006b7 100644
--- a/deps/v8/test/cctest/trace-extension.h
+++ b/deps/v8/test/cctest/trace-extension.h
@@ -29,7 +29,7 @@
#define V8_TEST_CCTEST_TRACE_EXTENSION_H_
#include "include/v8.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
namespace v8 {
struct TickSample;
diff --git a/deps/v8/test/cctest/unicode-helpers.cc b/deps/v8/test/cctest/unicode-helpers.cc
index 1a74e0ca94..c16eafc8f7 100644
--- a/deps/v8/test/cctest/unicode-helpers.cc
+++ b/deps/v8/test/cctest/unicode-helpers.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "test/cctest/unicode-helpers.h"
-#include "src/unicode-inl.h"
+#include "src/strings/unicode-inl.h"
int Ucs2CharLength(unibrow::uchar c) {
if (c == unibrow::Utf8::kIncomplete || c == unibrow::Utf8::kBufferEmpty) {
diff --git a/deps/v8/test/cctest/unicode-helpers.h b/deps/v8/test/cctest/unicode-helpers.h
index 06c3fcd8ea..6f7504571e 100644
--- a/deps/v8/test/cctest/unicode-helpers.h
+++ b/deps/v8/test/cctest/unicode-helpers.h
@@ -5,7 +5,7 @@
#ifndef V8_CCTEST_UNICODE_HELPERS_H_
#define V8_CCTEST_UNICODE_HELPERS_H_
-#include "src/unicode.h"
+#include "src/strings/unicode.h"
int Ucs2CharLength(unibrow::uchar c);
int Utf8LengthHelper(const char* s);
diff --git a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
index 7c0aacddc6..c4b8adddf4 100644
--- a/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-c-wasm-entry.cc
@@ -4,9 +4,9 @@
#include <cstdint>
-#include "src/assembler-inl.h"
#include "src/base/overflowing-math.h"
-#include "src/objects-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index ca29eaa66b..90b9f6e642 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -4,10 +4,10 @@
#include <bitset>
-#include "src/assembler-inl.h"
-#include "src/macro-assembler-inl.h"
-#include "src/simulator.h"
-#include "src/utils.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/execution/simulator.h"
+#include "src/utils/utils.h"
#include "src/wasm/jump-table-assembler.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index dfce94b20c..c8dd901161 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -6,10 +6,10 @@
#include <stdlib.h>
#include <string.h>
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/overflowing-math.h"
-#include "src/objects-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
index ea45762a7c..278a6ec7bc 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -6,8 +6,8 @@
#include <stdlib.h>
#include <string.h>
-#include "src/assembler-inl.h"
#include "src/base/platform/elapsed-timer.h"
+#include "src/codegen/assembler-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
index 6d7c9df92f..097287c41d 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-bulk-memory.cc
@@ -50,7 +50,7 @@ WASM_EXEC_TEST(MemoryInit) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
- r.builder().AddPassiveDataSegment(Vector<const byte>(data));
+ r.builder().AddPassiveDataSegment(ArrayVector(data));
BUILD(r,
WASM_MEMORY_INIT(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
WASM_GET_LOCAL(2)),
@@ -87,7 +87,7 @@ WASM_EXEC_TEST(MemoryInitOutOfBoundsData) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
- r.builder().AddPassiveDataSegment(Vector<const byte>(data));
+ r.builder().AddPassiveDataSegment(ArrayVector(data));
BUILD(r,
WASM_MEMORY_INIT(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
WASM_GET_LOCAL(2)),
@@ -110,7 +110,7 @@ WASM_EXEC_TEST(MemoryInitOutOfBounds) {
WasmRunner<uint32_t, uint32_t, uint32_t, uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[kWasmPageSize] = {};
- r.builder().AddPassiveDataSegment(Vector<const byte>(data));
+ r.builder().AddPassiveDataSegment(ArrayVector(data));
BUILD(r,
WASM_MEMORY_INIT(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
WASM_GET_LOCAL(2)),
@@ -331,7 +331,7 @@ WASM_EXEC_TEST(DataDropTwice) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0};
- r.builder().AddPassiveDataSegment(Vector<const byte>(data));
+ r.builder().AddPassiveDataSegment(ArrayVector(data));
BUILD(r, WASM_DATA_DROP(0), kExprI32Const, 0);
CHECK_EQ(0, r.Call());
@@ -343,7 +343,7 @@ WASM_EXEC_TEST(DataDropThenMemoryInit) {
WasmRunner<uint32_t> r(execution_tier);
r.builder().AddMemory(kWasmPageSize);
const byte data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
- r.builder().AddPassiveDataSegment(Vector<const byte>(data));
+ r.builder().AddPassiveDataSegment(ArrayVector(data));
BUILD(r, WASM_DATA_DROP(0),
WASM_MEMORY_INIT(0, WASM_I32V_1(0), WASM_I32V_1(1), WASM_I32V_1(2)),
kExprI32Const, 0);
@@ -543,6 +543,8 @@ WASM_EXEC_TEST(TableCopyElems) {
WASM_TABLE_COPY(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
kExprI32Const, 0);
+ r.builder().FreezeSignatureMapAndInitializeWrapperCache();
+
auto table = handle(
WasmTableObject::cast(r.builder().instance_object()->tables().get(0)),
isolate);
@@ -628,6 +630,8 @@ WASM_EXEC_TEST(TableCopyOobWrites) {
WASM_TABLE_COPY(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
kExprI32Const, 0);
+ r.builder().FreezeSignatureMapAndInitializeWrapperCache();
+
auto table = handle(
WasmTableObject::cast(r.builder().instance_object()->tables().get(0)),
isolate);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
index fdf5905c88..4d7983cc8f 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "test/cctest/wasm/wasm-atomics-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
@@ -71,7 +71,7 @@ WASM_EXEC_TEST(TryCatchCallIndirect) {
// Build a throwing helper function.
WasmFunctionCompiler& throw_func = r.NewFunction(sigs.i_ii());
BUILD(throw_func, WASM_THROW(except));
- r.builder().AddSignature(sigs.i_ii());
+ byte sig_index = r.builder().AddSignature(sigs.i_ii());
throw_func.SetSigIndex(0);
// Add an indirect function table.
@@ -86,7 +86,7 @@ WASM_EXEC_TEST(TryCatchCallIndirect) {
WASM_STMTS(WASM_I32V(kResult1),
WASM_IF(WASM_I32_EQZ(WASM_GET_LOCAL(0)),
WASM_STMTS(WASM_CALL_INDIRECT2(
- 0, WASM_GET_LOCAL(0),
+ sig_index, WASM_GET_LOCAL(0),
WASM_I32V(7), WASM_I32V(9)),
WASM_DROP))),
WASM_STMTS(WASM_DROP, WASM_I32V(kResult0))));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 0a452a2c59..4c1842b537 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -8,7 +8,7 @@
#include <memory>
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/wasm/wasm-interpreter.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -220,7 +220,7 @@ TEST(Run_Wasm_returnCallIndirectFactorial) {
WasmFunctionCompiler& fact_aux_fn = r.NewFunction(sigs.i_ii(), "fact_aux");
fact_aux_fn.SetSigIndex(0);
- r.builder().AddSignature(sigs.i_ii());
+ byte sig_index = r.builder().AddSignature(sigs.i_ii());
// Function table.
uint16_t indirect_function_table[] = {
@@ -229,14 +229,15 @@ TEST(Run_Wasm_returnCallIndirectFactorial) {
r.builder().AddIndirectFunctionTable(indirect_function_table,
arraysize(indirect_function_table));
- BUILD(r, WASM_RETURN_CALL_INDIRECT(0, WASM_I32V(0), WASM_GET_LOCAL(0),
+ BUILD(r, WASM_RETURN_CALL_INDIRECT(sig_index, WASM_I32V(0), WASM_GET_LOCAL(0),
WASM_I32V(1)));
BUILD(fact_aux_fn,
WASM_IF_ELSE_I(
WASM_I32_EQ(WASM_I32V(1), WASM_GET_LOCAL(0)), WASM_GET_LOCAL(1),
WASM_RETURN_CALL_INDIRECT(
- 0, WASM_I32V(0), WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I32V(1)),
+ sig_index, WASM_I32V(0),
+ WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I32V(1)),
WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))));
uint32_t test_values[] = {1, 2, 5, 10, 20};
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 0c7a6ac408..a06c2f8720 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -7,8 +7,8 @@
#include <stdlib.h>
#include <string.h>
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/objects/heap-number-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -64,7 +64,7 @@ ManuallyImportedJSFunction CreateJSSelector(FunctionSig* sig, int which) {
Handle<JSFunction> js_function =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast(CompileRun(source.start()))));
+ *v8::Local<v8::Function>::Cast(CompileRun(source.begin()))));
ManuallyImportedJSFunction import = {sig, js_function};
return import;
@@ -141,14 +141,15 @@ WASM_EXEC_TEST(Run_IndirectCallJSFunction) {
WasmFunctionCompiler& rc_fn = r.NewFunction(sigs.i_i(), "rc");
- r.builder().AddSignature(sigs.i_iii());
+ byte sig_index = r.builder().AddSignature(sigs.i_iii());
uint16_t indirect_function_table[] = {static_cast<uint16_t>(js_index)};
r.builder().AddIndirectFunctionTable(indirect_function_table,
arraysize(indirect_function_table));
- BUILD(rc_fn, WASM_CALL_INDIRECT3(0, WASM_I32V(js_index), WASM_I32V(left),
- WASM_I32V(right), WASM_GET_LOCAL(0)));
+ BUILD(rc_fn,
+ WASM_CALL_INDIRECT3(sig_index, WASM_I32V(js_index), WASM_I32V(left),
+ WASM_I32V(right), WASM_GET_LOCAL(0)));
Handle<Object> args_left[] = {isolate->factory()->NewNumber(1)};
r.CheckCallApplyViaJS(left, rc_fn.function_index(), args_left, 1);
@@ -538,15 +539,15 @@ void RunPickerTest(ExecutionTier tier, bool indirect) {
WasmFunctionCompiler& rc_fn = r.NewFunction(sigs.i_i(), "rc");
if (indirect) {
- r.builder().AddSignature(sigs.i_iii());
+ byte sig_index = r.builder().AddSignature(sigs.i_iii());
uint16_t indirect_function_table[] = {static_cast<uint16_t>(js_index)};
r.builder().AddIndirectFunctionTable(indirect_function_table,
arraysize(indirect_function_table));
- BUILD(rc_fn,
- WASM_RETURN_CALL_INDIRECT(0, WASM_I32V(js_index), WASM_I32V(left),
- WASM_I32V(right), WASM_GET_LOCAL(0)));
+ BUILD(rc_fn, WASM_RETURN_CALL_INDIRECT(sig_index, WASM_I32V(js_index),
+ WASM_I32V(left), WASM_I32V(right),
+ WASM_GET_LOCAL(0)));
} else {
BUILD(rc_fn,
WASM_RETURN_CALL_FUNCTION(js_index, WASM_I32V(left), WASM_I32V(right),
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index d23bdc133f..8ef9f1fe94 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -5,10 +5,10 @@
#include <stdlib.h>
#include <string.h>
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/snapshot/code-serializer.h"
-#include "src/version.h"
+#include "src/utils/version.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
@@ -93,6 +93,235 @@ TEST(Run_WasmModule_Return114) {
Cleanup();
}
+TEST(Run_WasmModule_CompilationHintsLazy) {
+ if (!FLAG_wasm_tier_up || !FLAG_liftoff) return;
+ {
+ EXPERIMENTAL_FLAG_SCOPE(compilation_hints);
+
+ static const int32_t kReturnValue = 114;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ // Build module with one lazy function.
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_I32V_2(kReturnValue)};
+ EMIT_CODE_WITH_END(f, code);
+ f->SetCompilationHint(WasmCompilationHintStrategy::kLazy,
+ WasmCompilationHintTier::kBaseline,
+ WasmCompilationHintTier::kOptimized);
+
+ // Compile module. No function is actually compiled as the function is lazy.
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+ MaybeHandle<WasmModuleObject> module = testing::CompileForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+ CHECK(!module.is_null());
+
+ // Lazy function was not invoked and therefore not compiled yet.
+ static const int kFuncIndex = 0;
+ NativeModule* native_module = module.ToHandleChecked()->native_module();
+ CHECK(!native_module->HasCode(kFuncIndex));
+ auto* compilation_state = native_module->compilation_state();
+ CHECK(compilation_state->baseline_compilation_finished());
+
+ // Instantiate and invoke function.
+ MaybeHandle<WasmInstanceObject> instance =
+ isolate->wasm_engine()->SyncInstantiate(
+ isolate, &thrower, module.ToHandleChecked(), {}, {});
+ CHECK(!instance.is_null());
+ int32_t result = testing::RunWasmModuleForTesting(
+ isolate, instance.ToHandleChecked(), 0, nullptr);
+ CHECK_EQ(kReturnValue, result);
+
+ // Lazy function was invoked and therefore compiled.
+ CHECK(native_module->HasCode(kFuncIndex));
+ WasmCodeRefScope code_ref_scope;
+ ExecutionTier actual_tier = native_module->GetCode(kFuncIndex)->tier();
+ static_assert(ExecutionTier::kInterpreter < ExecutionTier::kLiftoff &&
+ ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+ ExecutionTier baseline_tier = ExecutionTier::kLiftoff;
+ CHECK_LE(baseline_tier, actual_tier);
+ CHECK(compilation_state->baseline_compilation_finished());
+ }
+ Cleanup();
+}
+
+TEST(Run_WasmModule_CompilationHintsNoTiering) {
+ if (!FLAG_wasm_tier_up || !FLAG_liftoff) return;
+ {
+ EXPERIMENTAL_FLAG_SCOPE(compilation_hints);
+
+ static const int32_t kReturnValue = 114;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ // Build module with regularly compiled function (no tiering).
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_I32V_2(kReturnValue)};
+ EMIT_CODE_WITH_END(f, code);
+ f->SetCompilationHint(WasmCompilationHintStrategy::kEager,
+ WasmCompilationHintTier::kBaseline,
+ WasmCompilationHintTier::kBaseline);
+
+ // Compile module.
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+ MaybeHandle<WasmModuleObject> module = testing::CompileForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+ CHECK(!module.is_null());
+
+ // Synchronous compilation finished and no tiering units were initialized.
+ static const int kFuncIndex = 0;
+ NativeModule* native_module = module.ToHandleChecked()->native_module();
+ CHECK(native_module->HasCode(kFuncIndex));
+ ExecutionTier expected_tier = ExecutionTier::kLiftoff;
+ WasmCodeRefScope code_ref_scope;
+ ExecutionTier actual_tier = native_module->GetCode(kFuncIndex)->tier();
+ CHECK_EQ(expected_tier, actual_tier);
+ auto* compilation_state = native_module->compilation_state();
+ CHECK(compilation_state->baseline_compilation_finished());
+ CHECK(compilation_state->top_tier_compilation_finished());
+ }
+ Cleanup();
+}
+
+TEST(Run_WasmModule_CompilationHintsTierUp) {
+ if (!FLAG_wasm_tier_up || !FLAG_liftoff) return;
+ {
+ EXPERIMENTAL_FLAG_SCOPE(compilation_hints);
+
+ static const int32_t kReturnValue = 114;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ // Build module with tiering compilation hint.
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_I32V_2(kReturnValue)};
+ EMIT_CODE_WITH_END(f, code);
+ f->SetCompilationHint(WasmCompilationHintStrategy::kEager,
+ WasmCompilationHintTier::kBaseline,
+ WasmCompilationHintTier::kOptimized);
+
+ // Compile module.
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+ MaybeHandle<WasmModuleObject> module = testing::CompileForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+ CHECK(!module.is_null());
+
+ // Expect baseline or top tier code.
+ static const int kFuncIndex = 0;
+ NativeModule* native_module = module.ToHandleChecked()->native_module();
+ auto* compilation_state = native_module->compilation_state();
+ static_assert(ExecutionTier::kInterpreter < ExecutionTier::kLiftoff &&
+ ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+ ExecutionTier baseline_tier = ExecutionTier::kLiftoff;
+ {
+ CHECK(native_module->HasCode(kFuncIndex));
+ WasmCodeRefScope code_ref_scope;
+ ExecutionTier actual_tier = native_module->GetCode(kFuncIndex)->tier();
+ CHECK_LE(baseline_tier, actual_tier);
+ CHECK(compilation_state->baseline_compilation_finished());
+ }
+
+ // Busy wait for top tier compilation to finish.
+ while (!compilation_state->top_tier_compilation_finished()) {
+ }
+
+ // Expect top tier code.
+ ExecutionTier top_tier = ExecutionTier::kTurbofan;
+ {
+ CHECK(native_module->HasCode(kFuncIndex));
+ WasmCodeRefScope code_ref_scope;
+ ExecutionTier actual_tier = native_module->GetCode(kFuncIndex)->tier();
+ CHECK_EQ(top_tier, actual_tier);
+ CHECK(compilation_state->baseline_compilation_finished());
+ CHECK(compilation_state->top_tier_compilation_finished());
+ }
+ }
+ Cleanup();
+}
+
+TEST(Run_WasmModule_CompilationHintsLazyBaselineEagerTopTier) {
+ if (!FLAG_wasm_tier_up || !FLAG_liftoff) return;
+ {
+ EXPERIMENTAL_FLAG_SCOPE(compilation_hints);
+
+ static const int32_t kReturnValue = 114;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ // Build module with tiering compilation hint.
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_I32V_2(kReturnValue)};
+ EMIT_CODE_WITH_END(f, code);
+ f->SetCompilationHint(
+ WasmCompilationHintStrategy::kLazyBaselineEagerTopTier,
+ WasmCompilationHintTier::kBaseline,
+ WasmCompilationHintTier::kOptimized);
+
+ // Compile module.
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+ MaybeHandle<WasmModuleObject> module = testing::CompileForTesting(
+ isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+ CHECK(!module.is_null());
+
+ NativeModule* native_module = module.ToHandleChecked()->native_module();
+ auto* compilation_state = native_module->compilation_state();
+
+ // Busy wait for top tier compilation to finish.
+ while (!compilation_state->top_tier_compilation_finished()) {
+ }
+
+ // Expect top tier code.
+ static_assert(ExecutionTier::kInterpreter < ExecutionTier::kLiftoff &&
+ ExecutionTier::kLiftoff < ExecutionTier::kTurbofan,
+ "Assume an order on execution tiers");
+ static const int kFuncIndex = 0;
+ ExecutionTier top_tier = ExecutionTier::kTurbofan;
+ {
+ CHECK(native_module->HasCode(kFuncIndex));
+ WasmCodeRefScope code_ref_scope;
+ ExecutionTier actual_tier = native_module->GetCode(kFuncIndex)->tier();
+ CHECK_EQ(top_tier, actual_tier);
+ CHECK(compilation_state->baseline_compilation_finished());
+ CHECK(compilation_state->top_tier_compilation_finished());
+ }
+ }
+ Cleanup();
+}
+
TEST(Run_WasmModule_CallAdd) {
{
v8::internal::AccountingAllocator allocator;
@@ -347,7 +576,7 @@ TEST(TestInterruptLoop) {
isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
.ToHandleChecked();
- Handle<JSArrayBuffer> memory(instance->memory_object()->array_buffer(),
+ Handle<JSArrayBuffer> memory(instance->memory_object().array_buffer(),
isolate);
int32_t* memory_array = reinterpret_cast<int32_t*>(memory->backing_store());
@@ -723,12 +952,12 @@ struct ManuallyExternalizedBuffer {
ManuallyExternalizedBuffer(JSArrayBuffer buffer, Isolate* isolate)
: isolate_(isolate),
buffer_(buffer, isolate),
- allocation_base_(buffer->allocation_base()),
- allocation_length_(buffer->allocation_length()),
+ allocation_base_(buffer.allocation_base()),
+ allocation_length_(buffer.allocation_length()),
should_free_(!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory(
- buffer->backing_store())) {
+ buffer.backing_store())) {
if (!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory(
- buffer->backing_store())) {
+ buffer.backing_store())) {
v8::Utils::ToLocal(buffer_)->Externalize();
}
}
@@ -843,7 +1072,7 @@ TEST(Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree) {
contents.Data(), is_wasm_memory));
// Make sure we can write to the buffer without crashing
uint32_t* int_buffer =
- reinterpret_cast<uint32_t*>(mem->array_buffer()->backing_store());
+ reinterpret_cast<uint32_t*>(mem->array_buffer().backing_store());
int_buffer[0] = 0;
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 5229514dba..77488325b4 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -4,9 +4,9 @@
#include <type_traits>
-#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/base/overflowing-math.h"
+#include "src/codegen/assembler-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index ef20384166..6437a4a0d9 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -6,11 +6,11 @@
#include <stdlib.h>
#include <string.h>
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/overflowing-math.h"
#include "src/base/platform/elapsed-timer.h"
-#include "src/utils.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/utils/utils.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
@@ -122,15 +122,14 @@ WASM_EXEC_TEST(Int32Add_P2) {
WASM_EXEC_TEST(Int32Add_block1) {
EXPERIMENTAL_FLAG_SCOPE(mv);
static const byte code[] = {
- WASM_BLOCK_X(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- kExprI32Add};
+ WASM_BLOCK_X(1, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)), kExprI32Add};
RunInt32AddTest(execution_tier, code, sizeof(code));
}
WASM_EXEC_TEST(Int32Add_block2) {
EXPERIMENTAL_FLAG_SCOPE(mv);
static const byte code[] = {
- WASM_BLOCK_X(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), kExprBr, DEPTH_0),
+ WASM_BLOCK_X(1, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), kExprBr, DEPTH_0),
kExprI32Add};
RunInt32AddTest(execution_tier, code, sizeof(code));
}
@@ -138,7 +137,7 @@ WASM_EXEC_TEST(Int32Add_block2) {
WASM_EXEC_TEST(Int32Add_multi_if) {
EXPERIMENTAL_FLAG_SCOPE(mv);
static const byte code[] = {
- WASM_IF_ELSE_X(0, WASM_GET_LOCAL(0),
+ WASM_IF_ELSE_X(1, WASM_GET_LOCAL(0),
WASM_SEQ(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
WASM_SEQ(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0))),
kExprI32Add};
@@ -753,12 +752,19 @@ WASM_EXEC_TEST(Return_F64) {
WASM_EXEC_TEST(Select_float_parameters) {
WasmRunner<float, float, float, int32_t> r(execution_tier);
- // return select(11, 22, a);
BUILD(r,
WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)));
CHECK_FLOAT_EQ(2.0f, r.Call(2.0f, 1.0f, 1));
}
+WASM_EXEC_TEST(SelectWithType_float_parameters) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WasmRunner<float, float, float, int32_t> r(execution_tier);
+ BUILD(r,
+ WASM_SELECT_F(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)));
+ CHECK_FLOAT_EQ(2.0f, r.Call(2.0f, 1.0f, 1));
+}
+
WASM_EXEC_TEST(Select) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// return select(11, 22, a);
@@ -769,6 +775,17 @@ WASM_EXEC_TEST(Select) {
}
}
+WASM_EXEC_TEST(SelectWithType) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
+ // return select(11, 22, a);
+ BUILD(r, WASM_SELECT_I(WASM_I32V_1(11), WASM_I32V_1(22), WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = i ? 11 : 22;
+ CHECK_EQ(expected, r.Call(i));
+ }
+}
+
WASM_EXEC_TEST(Select_strict1) {
WasmRunner<int32_t, int32_t> r(execution_tier);
// select(a=0, a=1, a=2); return a
@@ -779,6 +796,18 @@ WASM_EXEC_TEST(Select_strict1) {
FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(i)); }
}
+WASM_EXEC_TEST(SelectWithType_strict1) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
+ // select(a=0, a=1, a=2); return a
+ BUILD(r,
+ WASM_SELECT_I(WASM_TEE_LOCAL(0, WASM_ZERO),
+ WASM_TEE_LOCAL(0, WASM_I32V_1(1)),
+ WASM_TEE_LOCAL(0, WASM_I32V_1(2))),
+ WASM_DROP, WASM_GET_LOCAL(0));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(i)); }
+}
+
WASM_EXEC_TEST(Select_strict2) {
WasmRunner<int32_t, int32_t> r(execution_tier);
r.AllocateLocal(kWasmI32);
@@ -792,6 +821,20 @@ WASM_EXEC_TEST(Select_strict2) {
}
}
+WASM_EXEC_TEST(SelectWithType_strict2) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
+ r.AllocateLocal(kWasmI32);
+ r.AllocateLocal(kWasmI32);
+ // select(b=5, c=6, a)
+ BUILD(r, WASM_SELECT_I(WASM_TEE_LOCAL(1, WASM_I32V_1(5)),
+ WASM_TEE_LOCAL(2, WASM_I32V_1(6)), WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = i ? 5 : 6;
+ CHECK_EQ(expected, r.Call(i));
+ }
+}
+
WASM_EXEC_TEST(Select_strict3) {
WasmRunner<int32_t, int32_t> r(execution_tier);
r.AllocateLocal(kWasmI32);
@@ -806,6 +849,21 @@ WASM_EXEC_TEST(Select_strict3) {
}
}
+WASM_EXEC_TEST(SelectWithType_strict3) {
+ EXPERIMENTAL_FLAG_SCOPE(anyref);
+ WasmRunner<int32_t, int32_t> r(execution_tier);
+ r.AllocateLocal(kWasmI32);
+ r.AllocateLocal(kWasmI32);
+ // select(b=5, c=6, a=b)
+ BUILD(r, WASM_SELECT_I(WASM_TEE_LOCAL(1, WASM_I32V_1(5)),
+ WASM_TEE_LOCAL(2, WASM_I32V_1(6)),
+ WASM_TEE_LOCAL(0, WASM_GET_LOCAL(1))));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = 5;
+ CHECK_EQ(expected, r.Call(i));
+ }
+}
+
WASM_EXEC_TEST(BrIf_strict) {
WasmRunner<int32_t, int32_t> r(execution_tier);
BUILD(r, WASM_BLOCK_I(WASM_BRV_IF(0, WASM_GET_LOCAL(0),
@@ -3704,7 +3762,7 @@ TEST(Liftoff_tier_up) {
WASM_GET_LOCAL(1)));
NativeModule* native_module =
- r.builder().instance_object()->module_object()->native_module();
+ r.builder().instance_object()->module_object().native_module();
// This test only works if we managed to compile with Liftoff.
if (native_module->GetCode(add.function_index())->is_liftoff()) {
@@ -3718,7 +3776,7 @@ TEST(Liftoff_tier_up) {
WasmCode* sub_code = native_module->GetCode(sub.function_index());
size_t sub_size = sub_code->instructions().size();
std::unique_ptr<byte[]> buffer(new byte[sub_code->instructions().size()]);
- memcpy(buffer.get(), sub_code->instructions().start(), sub_size);
+ memcpy(buffer.get(), sub_code->instructions().begin(), sub_size);
desc.buffer = buffer.get();
desc.instr_size = static_cast<int>(sub_size);
std::unique_ptr<WasmCode> new_code = native_module->AddCode(
diff --git a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
index 3c4d25f9e2..5e06db3ba0 100644
--- a/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
+++ b/deps/v8/test/cctest/wasm/test-streaming-compilation.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/init/v8.h"
#include "src/objects/managed.h"
-#include "src/v8.h"
-#include "src/vector.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/vector.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
@@ -132,6 +132,7 @@ class StreamTester {
stream_ = i_isolate->wasm_engine()->StartStreamingCompilation(
i_isolate, kAllWasmFeatures, v8::Utils::OpenHandle(*context),
+ "WebAssembly.compileStreaming()",
std::make_shared<TestResolver>(&state_, &error_message_,
&native_module_));
}
@@ -1211,8 +1212,8 @@ STREAM_TEST(TestCompileErrorFunctionName) {
CHECK(tester.IsPromiseRejected());
CHECK_EQ(
- "CompileError: WebAssembly.compile(): Compiling function #0:\"f\" "
- "failed: function body must end with \"end\" opcode @+25",
+ "CompileError: WebAssembly.compileStreaming(): Compiling function "
+ "#0:\"f\" failed: function body must end with \"end\" opcode @+25",
tester.error_message());
}
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 0ba2b95864..597201da92 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/debug/debug-interface.h"
-#include "src/frames-inl.h"
-#include "src/property-descriptor.h"
-#include "src/utils.h"
+#include "src/execution/frames-inl.h"
+#include "src/objects/property-descriptor.h"
+#include "src/utils/utils.h"
#include "src/wasm/wasm-objects-inl.h"
#include "test/cctest/cctest.h"
@@ -25,7 +25,7 @@ void CheckLocations(
WasmModuleObject module_object, debug::Location start, debug::Location end,
std::initializer_list<debug::Location> expected_locations_init) {
std::vector<debug::BreakLocation> locations;
- bool success = module_object->GetPossibleBreakpoints(start, end, &locations);
+ bool success = module_object.GetPossibleBreakpoints(start, end, &locations);
CHECK(success);
printf("got %d locations: ", static_cast<int>(locations.size()));
@@ -48,7 +48,7 @@ void CheckLocations(
void CheckLocationsFail(WasmModuleObject module_object, debug::Location start,
debug::Location end) {
std::vector<debug::BreakLocation> locations;
- bool success = module_object->GetPossibleBreakpoints(start, end, &locations);
+ bool success = module_object.GetPossibleBreakpoints(start, end, &locations);
CHECK(!success);
}
@@ -205,7 +205,7 @@ class CollectValuesBreakHandler : public debug::DebugDelegate {
Handle<WasmInstanceObject> instance = summ.wasm_instance();
auto frame =
- instance->debug_info()->GetInterpretedFrame(frame_it.frame()->fp(), 0);
+ instance->debug_info().GetInterpretedFrame(frame_it.frame()->fp(), 0);
CHECK_EQ(expected.locals.size(), frame->GetLocalCount());
for (int i = 0; i < frame->GetLocalCount(); ++i) {
CHECK_EQ(WasmValWrapper{expected.locals[i]},
@@ -247,7 +247,7 @@ WASM_COMPILED_EXEC_TEST(WasmCollectPossibleBreakpoints) {
BUILD(runner, WASM_NOP, WASM_I32_ADD(WASM_ZERO, WASM_ONE));
WasmInstanceObject instance = *runner.builder().instance_object();
- WasmModuleObject module_object = instance->module_object();
+ WasmModuleObject module_object = instance.module_object();
std::vector<debug::Location> locations;
// Check all locations for function 0.
@@ -280,7 +280,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleBreak) {
BreakHandler count_breaks(isolate, {{4, BreakHandler::Continue}});
- Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> global(isolate->context().global_object(), isolate);
MaybeHandle<Object> retval =
Execution::Call(isolate, main_fun_wrapper, global, 0, nullptr);
CHECK(!retval.is_null());
@@ -307,7 +307,7 @@ WASM_COMPILED_EXEC_TEST(WasmSimpleStepping) {
{5, BreakHandler::Continue} // I32Add
});
- Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> global(isolate->context().global_object(), isolate);
MaybeHandle<Object> retval =
Execution::Call(isolate, main_fun_wrapper, global, 0, nullptr);
CHECK(!retval.is_null());
@@ -351,7 +351,7 @@ WASM_COMPILED_EXEC_TEST(WasmStepInAndOut) {
{23, BreakHandler::Continue} // After Call
});
- Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> global(isolate->context().global_object(), isolate);
CHECK(!Execution::Call(isolate, main_fun_wrapper, global, 0, nullptr)
.is_null());
}
@@ -396,7 +396,7 @@ WASM_COMPILED_EXEC_TEST(WasmGetLocalsAndStack) {
{wasmVec(7, 17L, 7.f, 8.5), wasmVec()}, // 10: end
});
- Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> global(isolate->context().global_object(), isolate);
Handle<Object> args[]{handle(Smi::FromInt(7), isolate)};
CHECK(!Execution::Call(isolate, main_fun_wrapper, global, 1, args).is_null());
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
index 47d7d2e69b..299c039698 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-import-wrapper-cache.cc
@@ -4,6 +4,7 @@
#include "src/compiler/wasm-compiler.h"
#include "src/wasm/function-compiler.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
@@ -32,17 +33,19 @@ TEST(CacheHit) {
auto module = NewModule(isolate);
TestSignatures sigs;
WasmCodeRefScope wasm_code_ref_scope;
+ WasmImportWrapperCache::ModificationScope cache_scope(
+ module->import_wrapper_cache());
auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch;
- WasmCode* c1 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+ WasmCode* c1 =
+ CompileImportWrapper(isolate->wasm_engine(), module.get(),
+ isolate->counters(), kind, sigs.i_i(), &cache_scope);
CHECK_NOT_NULL(c1);
CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
- WasmCode* c2 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+ WasmCode* c2 = cache_scope[{kind, sigs.i_i()}];
CHECK_NOT_NULL(c2);
CHECK_EQ(c1, c2);
@@ -53,20 +56,21 @@ TEST(CacheMissSig) {
auto module = NewModule(isolate);
TestSignatures sigs;
WasmCodeRefScope wasm_code_ref_scope;
+ WasmImportWrapperCache::ModificationScope cache_scope(
+ module->import_wrapper_cache());
auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch;
- WasmCode* c1 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+ WasmCode* c1 =
+ CompileImportWrapper(isolate->wasm_engine(), module.get(),
+ isolate->counters(), kind, sigs.i_i(), &cache_scope);
CHECK_NOT_NULL(c1);
CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
- WasmCode* c2 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind, sigs.i_ii());
+ WasmCode* c2 = cache_scope[{kind, sigs.i_ii()}];
- CHECK_NOT_NULL(c2);
- CHECK_NE(c1, c2);
+ CHECK_NULL(c2);
}
TEST(CacheMissKind) {
@@ -74,21 +78,22 @@ TEST(CacheMissKind) {
auto module = NewModule(isolate);
TestSignatures sigs;
WasmCodeRefScope wasm_code_ref_scope;
+ WasmImportWrapperCache::ModificationScope cache_scope(
+ module->import_wrapper_cache());
auto kind1 = compiler::WasmImportCallKind::kJSFunctionArityMatch;
auto kind2 = compiler::WasmImportCallKind::kJSFunctionArityMismatch;
- WasmCode* c1 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind1, sigs.i_i());
+ WasmCode* c1 = CompileImportWrapper(isolate->wasm_engine(), module.get(),
+ isolate->counters(), kind1, sigs.i_i(),
+ &cache_scope);
CHECK_NOT_NULL(c1);
CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
- WasmCode* c2 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind2, sigs.i_i());
+ WasmCode* c2 = cache_scope[{kind2, sigs.i_i()}];
- CHECK_NOT_NULL(c2);
- CHECK_NE(c1, c2);
+ CHECK_NULL(c2);
}
TEST(CacheHitMissSig) {
@@ -96,29 +101,34 @@ TEST(CacheHitMissSig) {
auto module = NewModule(isolate);
TestSignatures sigs;
WasmCodeRefScope wasm_code_ref_scope;
+ WasmImportWrapperCache::ModificationScope cache_scope(
+ module->import_wrapper_cache());
auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch;
- WasmCode* c1 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+ WasmCode* c1 =
+ CompileImportWrapper(isolate->wasm_engine(), module.get(),
+ isolate->counters(), kind, sigs.i_i(), &cache_scope);
CHECK_NOT_NULL(c1);
CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind());
- WasmCode* c2 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind, sigs.i_ii());
+ WasmCode* c2 = cache_scope[{kind, sigs.i_ii()}];
+
+ CHECK_NULL(c2);
+
+ c2 = CompileImportWrapper(isolate->wasm_engine(), module.get(),
+ isolate->counters(), kind, sigs.i_ii(),
+ &cache_scope);
- CHECK_NOT_NULL(c2);
CHECK_NE(c1, c2);
- WasmCode* c3 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind, sigs.i_i());
+ WasmCode* c3 = cache_scope[{kind, sigs.i_i()}];
CHECK_NOT_NULL(c3);
CHECK_EQ(c1, c3);
- WasmCode* c4 = module->import_wrapper_cache()->GetOrCompile(
- isolate->wasm_engine(), isolate->counters(), kind, sigs.i_ii());
+ WasmCode* c4 = cache_scope[{kind, sigs.i_ii()}];
CHECK_NOT_NULL(c4);
CHECK_EQ(c2, c4);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index fda981ab43..a6901072de 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -4,9 +4,9 @@
#include <cstdint>
-#include "src/assembler-inl.h"
#include "src/base/overflowing-math.h"
-#include "src/objects-inl.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -59,7 +59,7 @@ class ArgPassingHelper {
Handle<Object> arg_objs[] = {isolate_->factory()->NewNumber(args)...};
uint64_t num_interpreted_before = debug_info_->NumInterpretedCalls();
- Handle<Object> global(isolate_->context()->global_object(), isolate_);
+ Handle<Object> global(isolate_->context().global_object(), isolate_);
MaybeHandle<Object> retval = Execution::Call(
isolate_, main_fun_wrapper_, global, arraysize(arg_objs), arg_objs);
uint64_t num_interpreted_after = debug_info_->NumInterpretedCalls();
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index 7929b23891..901127055e 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -5,10 +5,10 @@
#include <stdlib.h>
#include <string.h>
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/snapshot/code-serializer.h"
-#include "src/version.h"
+#include "src/utils/version.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
@@ -103,7 +103,7 @@ class WasmSerializationTest {
Vector<const byte> deserialized_module_wire_bytes =
module_object->native_module()->wire_bytes();
CHECK_EQ(deserialized_module_wire_bytes.size(), wire_bytes_.size());
- CHECK_EQ(memcmp(deserialized_module_wire_bytes.start(),
+ CHECK_EQ(memcmp(deserialized_module_wire_bytes.begin(),
wire_bytes_.data(), wire_bytes_.size()),
0);
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
index 13f4b23ebc..4fe4425e2f 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-shared-engine.cc
@@ -4,8 +4,8 @@
#include <memory>
-#include "src/microtask-queue.h"
-#include "src/objects-inl.h"
+#include "src/execution/microtask-queue.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
@@ -95,7 +95,7 @@ class SharedEngineIsolate {
}
SharedModule ExportInstance(Handle<WasmInstanceObject> instance) {
- return instance->module_object()->shared_native_module();
+ return instance->module_object().shared_native_module();
}
int32_t Run(Handle<WasmInstanceObject> instance) {
@@ -189,10 +189,11 @@ Handle<WasmInstanceObject> CompileAndInstantiateAsync(
SharedEngineIsolate& isolate, ZoneBuffer* buffer) {
Handle<Object> maybe_instance = handle(Smi::kZero, isolate.isolate());
auto enabled_features = WasmFeaturesFromIsolate(isolate.isolate());
+ constexpr const char* kAPIMethodName = "Test.CompileAndInstantiateAsync";
isolate.isolate()->wasm_engine()->AsyncCompile(
isolate.isolate(), enabled_features,
base::make_unique<MockCompilationResolver>(isolate, &maybe_instance),
- ModuleWireBytes(buffer->begin(), buffer->end()), true);
+ ModuleWireBytes(buffer->begin(), buffer->end()), true, kAPIMethodName);
while (!maybe_instance->IsWasmInstanceObject()) PumpMessageLoop(isolate);
Handle<WasmInstanceObject> instance =
Handle<WasmInstanceObject>::cast(maybe_instance);
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 303fb75878..3fc9614023 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
@@ -20,16 +20,15 @@ using v8::Utils;
namespace {
-#define CHECK_CSTREQ(exp, found) \
- do { \
- const char* exp_ = (exp); \
- const char* found_ = (found); \
- DCHECK_NOT_NULL(exp); \
- if (V8_UNLIKELY(found_ == nullptr || strcmp(exp_, found_) != 0)) { \
- V8_Fatal(__FILE__, __LINE__, \
- "Check failed: (%s) != (%s) ('%s' vs '%s').", #exp, #found, \
- exp_, found_ ? found_ : "<null>"); \
- } \
+#define CHECK_CSTREQ(exp, found) \
+ do { \
+ const char* exp_ = (exp); \
+ const char* found_ = (found); \
+ DCHECK_NOT_NULL(exp); \
+ if (V8_UNLIKELY(found_ == nullptr || strcmp(exp_, found_) != 0)) { \
+ FATAL("Check failed: (%s) != (%s) ('%s' vs '%s').", #exp, #found, exp_, \
+ found_ ? found_ : "<null>"); \
+ } \
} while (false)
void PrintStackTrace(v8::Isolate* isolate, v8::Local<v8::StackTrace> stack) {
@@ -86,10 +85,10 @@ void CheckComputeLocation(v8::internal::Isolate* i_isolate, Handle<Object> exc,
printf("loc start: %d, end: %d\n", loc.start_pos(), loc.end_pos());
Handle<JSMessageObject> message = i_isolate->CreateMessage(exc, nullptr);
printf("msg start: %d, end: %d, line: %d, col: %d\n",
- message->start_position(), message->end_position(),
+ message->GetStartPosition(), message->GetEndPosition(),
message->GetLineNumber(), message->GetColumnNumber());
- CHECK_EQ(loc.start_pos(), message->start_position());
- CHECK_EQ(loc.end_pos(), message->end_position());
+ CHECK_EQ(loc.start_pos(), message->GetStartPosition());
+ CHECK_EQ(loc.end_pos(), message->GetEndPosition());
// In the message, the line is 1-based, but the column is 0-based.
CHECK_EQ(topLocation.line_nr, message->GetLineNumber());
CHECK_LE(1, topLocation.column);
@@ -130,7 +129,7 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
Isolate* isolate = js_wasm_wrapper->GetIsolate();
isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
v8::StackTrace::kOverview);
- Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> global(isolate->context().global_object(), isolate);
MaybeHandle<Object> maybe_exc;
Handle<Object> args[] = {js_wasm_wrapper};
MaybeHandle<Object> returnObjMaybe =
@@ -179,7 +178,7 @@ WASM_EXEC_TEST(CollectDetailedWasmStack_WasmError) {
Isolate* isolate = js_wasm_wrapper->GetIsolate();
isolate->SetCaptureStackTraceForUncaughtExceptions(
true, 10, v8::StackTrace::kOverview);
- Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> global(isolate->context().global_object(), isolate);
MaybeHandle<Object> maybe_exc;
Handle<Object> args[] = {js_wasm_wrapper};
MaybeHandle<Object> maybe_return_obj =
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 7b34ed824b..31e661adb5 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/assembler-inl.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -21,16 +21,15 @@ using v8::Utils;
namespace {
-#define CHECK_CSTREQ(exp, found) \
- do { \
- const char* exp_ = (exp); \
- const char* found_ = (found); \
- DCHECK_NOT_NULL(exp); \
- if (V8_UNLIKELY(found_ == nullptr || strcmp(exp_, found_) != 0)) { \
- V8_Fatal(__FILE__, __LINE__, \
- "Check failed: (%s) != (%s) ('%s' vs '%s').", #exp, #found, \
- exp_, found_ ? found_ : "<null>"); \
- } \
+#define CHECK_CSTREQ(exp, found) \
+ do { \
+ const char* exp_ = (exp); \
+ const char* found_ = (found); \
+ DCHECK_NOT_NULL(exp); \
+ if (V8_UNLIKELY(found_ == nullptr || strcmp(exp_, found_) != 0)) { \
+ FATAL("Check failed: (%s) != (%s) ('%s' vs '%s').", #exp, #found, exp_, \
+ found_ ? found_ : "<null>"); \
+ } \
} while (false)
struct ExceptionInfo {
@@ -84,7 +83,7 @@ WASM_EXEC_TEST(Unreachable) {
Isolate* isolate = js_wasm_wrapper->GetIsolate();
isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
v8::StackTrace::kOverview);
- Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> global(isolate->context().global_object(), isolate);
MaybeHandle<Object> maybe_exc;
Handle<Object> args[] = {js_wasm_wrapper};
MaybeHandle<Object> returnObjMaybe =
@@ -127,7 +126,7 @@ WASM_EXEC_TEST(IllegalLoad) {
Isolate* isolate = js_wasm_wrapper->GetIsolate();
isolate->SetCaptureStackTraceForUncaughtExceptions(true, 10,
v8::StackTrace::kOverview);
- Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> global(isolate->context().global_object(), isolate);
MaybeHandle<Object> maybe_exc;
Handle<Object> args[] = {js_wasm_wrapper};
MaybeHandle<Object> returnObjMaybe =
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 70a773948d..c006966160 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -4,10 +4,11 @@
#include "test/cctest/wasm/wasm-run-utils.h"
-#include "src/assembler-inl.h"
-#include "src/code-tracer.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/diagnostics/code-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/wasm/graph-builder-interface.h"
+#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -48,8 +49,15 @@ TestingModuleBuilder::TestingModuleBuilder(
CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
auto kind = compiler::GetWasmImportCallKind(maybe_import->js_function,
maybe_import->sig, false);
- auto import_wrapper = native_module_->import_wrapper_cache()->GetOrCompile(
- isolate_->wasm_engine(), isolate_->counters(), kind, maybe_import->sig);
+ WasmImportWrapperCache::ModificationScope cache_scope(
+ native_module_->import_wrapper_cache());
+ WasmImportWrapperCache::CacheKey key(kind, maybe_import->sig);
+ auto import_wrapper = cache_scope[key];
+ if (import_wrapper == nullptr) {
+ import_wrapper = CompileImportWrapper(
+ isolate_->wasm_engine(), native_module_, isolate_->counters(), kind,
+ maybe_import->sig, &cache_scope);
+ }
ImportedFunctionEntry(instance_object_, maybe_import_index)
.SetWasmToJs(isolate_, maybe_import->js_function, import_wrapper);
@@ -137,27 +145,20 @@ uint32_t TestingModuleBuilder::AddFunction(FunctionSig* sig, const char* name,
return index;
}
+void TestingModuleBuilder::FreezeSignatureMapAndInitializeWrapperCache() {
+ if (test_module_->signature_map.is_frozen()) return;
+ test_module_->signature_map.Freeze();
+ size_t max_num_sigs = MaxNumExportWrappers(test_module_.get());
+ Handle<FixedArray> export_wrappers =
+ isolate_->factory()->NewFixedArray(static_cast<int>(max_num_sigs));
+ instance_object_->module_object().set_export_wrappers(*export_wrappers);
+}
+
Handle<JSFunction> TestingModuleBuilder::WrapCode(uint32_t index) {
+ FreezeSignatureMapAndInitializeWrapperCache();
SetExecutable();
- FunctionSig* sig = test_module_->functions[index].sig;
- MaybeHandle<Code> maybe_ret_code =
- compiler::CompileJSToWasmWrapper(isolate_, sig, false);
- Handle<Code> ret_code = maybe_ret_code.ToHandleChecked();
- Handle<JSFunction> ret = WasmExportedFunction::New(
- isolate_, instance_object(), MaybeHandle<String>(),
- static_cast<int>(index), static_cast<int>(sig->parameter_count()),
- ret_code);
-
- // Add reference to the exported wrapper code.
- Handle<WasmModuleObject> module_object(instance_object()->module_object(),
- isolate_);
- Handle<FixedArray> old_arr(module_object->export_wrappers(), isolate_);
- Handle<FixedArray> new_arr =
- isolate_->factory()->NewFixedArray(old_arr->length() + 1);
- old_arr->CopyTo(0, *new_arr, 0, old_arr->length());
- new_arr->set(old_arr->length(), *ret_code);
- module_object->set_export_wrappers(*new_arr);
- return ret;
+ return WasmInstanceObject::GetOrCreateWasmExportedFunction(
+ isolate_, instance_object(), index);
}
void TestingModuleBuilder::AddIndirectFunctionTable(
@@ -206,9 +207,9 @@ uint32_t TestingModuleBuilder::AddBytes(Vector<const byte> bytes) {
size_t new_size = bytes_offset + bytes.size();
OwnedVector<uint8_t> new_bytes = OwnedVector<uint8_t>::New(new_size);
if (old_size > 0) {
- memcpy(new_bytes.start(), old_bytes.start(), old_size);
+ memcpy(new_bytes.start(), old_bytes.begin(), old_size);
}
- memcpy(new_bytes.start() + bytes_offset, bytes.start(), bytes.length());
+ memcpy(new_bytes.start() + bytes_offset, bytes.begin(), bytes.length());
native_module_->SetWireBytes(std::move(new_bytes));
return bytes_offset;
}
@@ -248,7 +249,7 @@ uint32_t TestingModuleBuilder::AddPassiveDataSegment(Vector<const byte> bytes) {
Address new_data_address =
reinterpret_cast<Address>(data_segment_data_.data());
- memcpy(data_segment_data_.data() + old_data_size, bytes.start(),
+ memcpy(data_segment_data_.data() + old_data_size, bytes.begin(),
bytes.length());
// The data_segment_data_ offset may have moved, so update all the starts.
@@ -415,7 +416,8 @@ void WasmFunctionWrapper::Init(CallDescriptor* call_descriptor,
if (!return_type.IsNone()) {
effect = graph()->NewNode(
machine()->Store(compiler::StoreRepresentation(
- return_type.representation(), WriteBarrierKind::kNoWriteBarrier)),
+ return_type.representation(),
+ compiler::WriteBarrierKind::kNoWriteBarrier)),
graph()->NewNode(common()->Parameter(param_types.length()),
graph()->start()),
graph()->NewNode(common()->Int32Constant(0)), call, effect,
@@ -498,18 +500,18 @@ void WasmFunctionCompiler::Build(const byte* start, const byte* end) {
Vector<const uint8_t> wire_bytes = builder_->instance_object()
->module_object()
- ->native_module()
+ .native_module()
->wire_bytes();
CompilationEnv env = builder_->CreateCompilationEnv();
ScopedVector<uint8_t> func_wire_bytes(function_->code.length());
- memcpy(func_wire_bytes.start(), wire_bytes.start() + function_->code.offset(),
+ memcpy(func_wire_bytes.begin(), wire_bytes.begin() + function_->code.offset(),
func_wire_bytes.length());
FunctionBody func_body{function_->sig, function_->code.offset(),
- func_wire_bytes.start(), func_wire_bytes.end()};
+ func_wire_bytes.begin(), func_wire_bytes.end()};
NativeModule* native_module =
- builder_->instance_object()->module_object()->native_module();
+ builder_->instance_object()->module_object().native_module();
WasmCompilationUnit unit(function_->func_index, builder_->execution_tier());
WasmFeatures unused_detected_features;
WasmCompilationResult result = unit.ExecuteCompilation(
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 2317e048f4..98ec5e1048 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -13,6 +13,7 @@
#include <memory>
#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/int64-lowering.h"
@@ -38,7 +39,7 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/call-tester.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/graph-and-builders.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/common/wasm/flag-utils.h"
@@ -169,7 +170,7 @@ class TestingModuleBuilder {
void SetMaxMemPages(uint32_t maximum_pages) {
test_module_->maximum_pages = maximum_pages;
if (instance_object()->has_memory_object()) {
- instance_object()->memory_object()->set_maximum_pages(maximum_pages);
+ instance_object()->memory_object().set_maximum_pages(maximum_pages);
}
}
@@ -178,6 +179,10 @@ class TestingModuleBuilder {
enum FunctionType { kImport, kWasm };
uint32_t AddFunction(FunctionSig* sig, const char* name, FunctionType type);
+ // Freezes the signature map of the module and allocates the storage for
+ // export wrappers.
+ void FreezeSignatureMapAndInitializeWrapperCache();
+
// Wrap the code so it can be called as a JS function.
Handle<JSFunction> WrapCode(uint32_t index);
@@ -379,6 +384,7 @@ class WasmRunnerBase : public HandleAndZoneScope {
const char* name = nullptr) {
functions_.emplace_back(
new WasmFunctionCompiler(&zone_, sig, &builder_, name));
+ builder().AddSignature(sig);
return *functions_.back();
}
@@ -521,7 +527,7 @@ class WasmRunner : public WasmRunnerBase {
jsfuncs_[function_index] = builder_.WrapCode(function_index);
}
Handle<JSFunction> jsfunc = jsfuncs_[function_index];
- Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> global(isolate->context().global_object(), isolate);
MaybeHandle<Object> retval =
Execution::TryCall(isolate, jsfunc, global, count, buffer,
Execution::MessageHandling::kReport, nullptr);
@@ -534,7 +540,7 @@ class WasmRunner : public WasmRunnerBase {
CHECK_EQ(expected, Smi::ToInt(*result));
} else {
CHECK(result->IsHeapNumber());
- CHECK_DOUBLE_EQ(expected, HeapNumber::cast(*result)->value());
+ CHECK_DOUBLE_EQ(expected, HeapNumber::cast(*result).value());
}
}
diff --git a/deps/v8/test/common/assembler-tester.h b/deps/v8/test/common/assembler-tester.h
index 4b3499b149..4c3d8ff618 100644
--- a/deps/v8/test/common/assembler-tester.h
+++ b/deps/v8/test/common/assembler-tester.h
@@ -5,8 +5,8 @@
#ifndef V8_TEST_COMMON_ASSEMBLER_TESTER_H_
#define V8_TEST_COMMON_ASSEMBLER_TESTER_H_
-#include "src/assembler.h"
-#include "src/code-desc.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/code-desc.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/common/types-fuzz.h b/deps/v8/test/common/types-fuzz.h
index f539ed9701..06ab9067d8 100644
--- a/deps/v8/test/common/types-fuzz.h
+++ b/deps/v8/test/common/types-fuzz.h
@@ -29,9 +29,9 @@
#define V8_TEST_CCTEST_TYPES_H_
#include "src/base/utils/random-number-generator.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
namespace v8 {
namespace internal {
@@ -135,8 +135,8 @@ class Types {
Type Integer;
- typedef std::vector<Type> TypeVector;
- typedef std::vector<Handle<i::Object> > ValueVector;
+ using TypeVector = std::vector<Type>;
+ using ValueVector = std::vector<Handle<i::Object> >;
TypeVector types;
ValueVector values;
diff --git a/deps/v8/test/common/wasm/test-signatures.h b/deps/v8/test/common/wasm/test-signatures.h
index 8b47720870..120e81cf1e 100644
--- a/deps/v8/test/common/wasm/test-signatures.h
+++ b/deps/v8/test/common/wasm/test-signatures.h
@@ -5,7 +5,7 @@
#ifndef TEST_SIGNATURES_H
#define TEST_SIGNATURES_H
-#include "src/signature.h"
+#include "src/codegen/signature.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-opcodes.h"
@@ -25,8 +25,9 @@ class TestSignatures {
sig_i_ff(1, 2, kIntFloatTypes4),
sig_i_d(1, 1, kIntDoubleTypes4),
sig_i_dd(1, 2, kIntDoubleTypes4),
- sig_i_r(1, 1, kIntRefTypes4),
- sig_i_rr(1, 2, kIntRefTypes4),
+ sig_i_r(1, 1, kIntAnyRefTypes4),
+ sig_i_rr(1, 2, kIntAnyRefTypes4),
+ sig_i_a(1, 1, kIntAnyFuncTypes4),
sig_l_v(1, 0, kLongTypes4),
sig_l_l(1, 1, kLongTypes4),
sig_l_ll(1, 2, kLongTypes4),
@@ -36,10 +37,15 @@ class TestSignatures {
sig_d_d(1, 1, kDoubleTypes4),
sig_d_dd(1, 2, kDoubleTypes4),
sig_r_v(1, 0, kRefTypes4),
+ sig_a_v(1, 0, kFuncTypes4),
+ sig_r_r(1, 1, kRefTypes4),
+ sig_a_a(1, 1, kFuncTypes4),
sig_v_v(0, 0, kIntTypes4),
sig_v_i(0, 1, kIntTypes4),
sig_v_ii(0, 2, kIntTypes4),
sig_v_iii(0, 3, kIntTypes4),
+ sig_v_r(0, 1, kRefTypes4),
+ sig_v_a(0, 1, kFuncTypes4),
sig_s_i(1, 1, kSimd128IntTypes4),
sig_ii_v(2, 0, kIntTypes4),
sig_iii_v(3, 0, kIntTypes4) {
@@ -49,15 +55,18 @@ class TestSignatures {
for (int i = 0; i < 4; i++) kFloatTypes4[i] = kWasmF32;
for (int i = 0; i < 4; i++) kDoubleTypes4[i] = kWasmF64;
for (int i = 0; i < 4; i++) kRefTypes4[i] = kWasmAnyRef;
- for (int i = 0; i < 4; i++) kIntLongTypes4[i] = kWasmI64;
- for (int i = 0; i < 4; i++) kIntFloatTypes4[i] = kWasmF32;
- for (int i = 0; i < 4; i++) kIntDoubleTypes4[i] = kWasmF64;
- for (int i = 0; i < 4; i++) kIntRefTypes4[i] = kWasmAnyRef;
+ for (int i = 0; i < 4; i++) kFuncTypes4[i] = kWasmAnyFunc;
+ for (int i = 1; i < 4; i++) kIntLongTypes4[i] = kWasmI64;
+ for (int i = 1; i < 4; i++) kIntFloatTypes4[i] = kWasmF32;
+ for (int i = 1; i < 4; i++) kIntDoubleTypes4[i] = kWasmF64;
+ for (int i = 1; i < 4; i++) kIntAnyRefTypes4[i] = kWasmAnyRef;
+ for (int i = 1; i < 4; i++) kIntAnyFuncTypes4[i] = kWasmAnyFunc;
for (int i = 0; i < 4; i++) kSimd128IntTypes4[i] = kWasmS128;
kIntLongTypes4[0] = kWasmI32;
kIntFloatTypes4[0] = kWasmI32;
kIntDoubleTypes4[0] = kWasmI32;
- kIntRefTypes4[0] = kWasmI32;
+ kIntAnyRefTypes4[0] = kWasmI32;
+ kIntAnyFuncTypes4[0] = kWasmI32;
kSimd128IntTypes4[1] = kWasmI32;
}
@@ -77,6 +86,7 @@ class TestSignatures {
FunctionSig* i_ll() { return &sig_i_ll; }
FunctionSig* i_r() { return &sig_i_r; }
FunctionSig* i_rr() { return &sig_i_rr; }
+ FunctionSig* i_a() { return &sig_i_a; }
FunctionSig* f_f() { return &sig_f_f; }
FunctionSig* f_ff() { return &sig_f_ff; }
@@ -84,11 +94,16 @@ class TestSignatures {
FunctionSig* d_dd() { return &sig_d_dd; }
FunctionSig* r_v() { return &sig_r_v; }
+ FunctionSig* a_v() { return &sig_a_v; }
+ FunctionSig* r_r() { return &sig_r_r; }
+ FunctionSig* a_a() { return &sig_a_a; }
FunctionSig* v_v() { return &sig_v_v; }
FunctionSig* v_i() { return &sig_v_i; }
FunctionSig* v_ii() { return &sig_v_ii; }
FunctionSig* v_iii() { return &sig_v_iii; }
+ FunctionSig* v_r() { return &sig_v_r; }
+ FunctionSig* v_a() { return &sig_v_a; }
FunctionSig* s_i() { return &sig_s_i; }
FunctionSig* ii_v() { return &sig_ii_v; }
@@ -109,10 +124,12 @@ class TestSignatures {
ValueType kFloatTypes4[4];
ValueType kDoubleTypes4[4];
ValueType kRefTypes4[4];
+ ValueType kFuncTypes4[4];
ValueType kIntLongTypes4[4];
ValueType kIntFloatTypes4[4];
ValueType kIntDoubleTypes4[4];
- ValueType kIntRefTypes4[4];
+ ValueType kIntAnyRefTypes4[4];
+ ValueType kIntAnyFuncTypes4[4];
ValueType kSimd128IntTypes4[4];
FunctionSig sig_i_v;
@@ -126,6 +143,7 @@ class TestSignatures {
FunctionSig sig_i_dd;
FunctionSig sig_i_r;
FunctionSig sig_i_rr;
+ FunctionSig sig_i_a;
FunctionSig sig_l_v;
FunctionSig sig_l_l;
@@ -138,11 +156,16 @@ class TestSignatures {
FunctionSig sig_d_dd;
FunctionSig sig_r_v;
+ FunctionSig sig_a_v;
+ FunctionSig sig_r_r;
+ FunctionSig sig_a_a;
FunctionSig sig_v_v;
FunctionSig sig_v_i;
FunctionSig sig_v_ii;
FunctionSig sig_v_iii;
+ FunctionSig sig_v_r;
+ FunctionSig sig_v_a;
FunctionSig sig_s_i;
FunctionSig sig_ii_v;
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index cdeb3d3cdf..ecdd0a8b30 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -140,6 +140,18 @@
kExprCatch, catchstmt, kExprEnd
#define WASM_SELECT(tval, fval, cond) tval, fval, cond, kExprSelect
+#define WASM_SELECT_I(tval, fval, cond) \
+ tval, fval, cond, kExprSelectWithType, U32V_1(1), kLocalI32
+#define WASM_SELECT_L(tval, fval, cond) \
+ tval, fval, cond, kExprSelectWithType, U32V_1(1), kLocalI64
+#define WASM_SELECT_F(tval, fval, cond) \
+ tval, fval, cond, kExprSelectWithType, U32V_1(1), kLocalF32
+#define WASM_SELECT_D(tval, fval, cond) \
+ tval, fval, cond, kExprSelectWithType, U32V_1(1), kLocalF64
+#define WASM_SELECT_R(tval, fval, cond) \
+ tval, fval, cond, kExprSelectWithType, U32V_1(1), kLocalAnyRef
+#define WASM_SELECT_A(tval, fval, cond) \
+ tval, fval, cond, kExprSelectWithType, U32V_1(1), kLocalAnyFunc
#define WASM_RETURN0 kExprReturn
#define WASM_RETURN1(val) val, kExprReturn
@@ -346,6 +358,7 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 56)
#define WASM_REF_NULL kExprRefNull
+#define WASM_REF_FUNC(val) kExprRefFunc, val
#define WASM_REF_IS_NULL(val) val, kExprRefIsNull
#define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
@@ -618,6 +631,13 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define WASM_ELEM_DROP(seg) WASM_NUMERIC_OP(kExprElemDrop), U32V_1(seg)
#define WASM_TABLE_COPY(dst, src, size) \
dst, src, size, WASM_NUMERIC_OP(kExprTableCopy), TABLE_ZERO, TABLE_ZERO
+#define WASM_TABLE_GROW(table, initial_value, delta) \
+ initial_value, delta, WASM_NUMERIC_OP(kExprTableGrow), \
+ static_cast<byte>(table)
+#define WASM_TABLE_SIZE(table) \
+ WASM_NUMERIC_OP(kExprTableSize), static_cast<byte>(table)
+#define WASM_TABLE_FILL(table, times, value, start) \
+ times, value, start, WASM_NUMERIC_OP(kExprTableFill), static_cast<byte>(table)
//------------------------------------------------------------------------------
// Memory Operations.
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index d5ee914ffc..d193cbc5ac 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -4,11 +4,11 @@
#include "test/common/wasm/wasm-module-runner.h"
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
#include "src/objects/heap-number-inl.h"
-#include "src/property-descriptor.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/property-descriptor.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-interpreter.h"
@@ -26,14 +26,21 @@ uint32_t GetInitialMemSize(const WasmModule* module) {
return kWasmPageSize * module->initial_pages;
}
-MaybeHandle<WasmInstanceObject> CompileAndInstantiateForTesting(
- Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) {
+MaybeHandle<WasmModuleObject> CompileForTesting(Isolate* isolate,
+ ErrorThrower* thrower,
+ const ModuleWireBytes& bytes) {
auto enabled_features = WasmFeaturesFromIsolate(isolate);
MaybeHandle<WasmModuleObject> module = isolate->wasm_engine()->SyncCompile(
isolate, enabled_features, thrower, bytes);
DCHECK_EQ(thrower->error(), module.is_null());
- if (module.is_null()) return {};
+ return module;
+}
+MaybeHandle<WasmInstanceObject> CompileAndInstantiateForTesting(
+ Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes) {
+ MaybeHandle<WasmModuleObject> module =
+ CompileForTesting(isolate, thrower, bytes);
+ if (module.is_null()) return {};
return isolate->wasm_engine()->SyncInstantiate(
isolate, thrower, module.ToHandleChecked(), {}, {});
}
@@ -254,7 +261,7 @@ int32_t CallWasmFunctionForTesting(Isolate* isolate,
return Smi::ToInt(*result);
}
if (result->IsHeapNumber()) {
- return static_cast<int32_t>(HeapNumber::cast(*result)->value());
+ return static_cast<int32_t>(HeapNumber::cast(*result).value());
}
thrower->RuntimeError(
"Calling exported wasm function failed: Return value should be number");
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.h b/deps/v8/test/common/wasm/wasm-module-runner.h
index f3ed508e40..e78b852a7e 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.h
+++ b/deps/v8/test/common/wasm/wasm-module-runner.h
@@ -5,8 +5,8 @@
#ifndef V8_WASM_MODULE_RUNNER_H_
#define V8_WASM_MODULE_RUNNER_H_
-#include "src/isolate.h"
-#include "src/objects.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
@@ -57,6 +57,11 @@ bool InterpretWasmModuleForTesting(Isolate* isolate,
int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end);
+// Decode and compile the given module with no imports.
+MaybeHandle<WasmModuleObject> CompileForTesting(Isolate* isolate,
+ ErrorThrower* thrower,
+ const ModuleWireBytes& bytes);
+
// Decode, compile, and instantiate the given module with no imports.
MaybeHandle<WasmInstanceObject> CompileAndInstantiateForTesting(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
diff --git a/deps/v8/test/debugger/debug/compiler/debug-catch-prediction.js b/deps/v8/test/debugger/debug/compiler/debug-catch-prediction.js
index 50fbf58222..60ac95b72c 100644
--- a/deps/v8/test/debugger/debug/compiler/debug-catch-prediction.js
+++ b/deps/v8/test/debugger/debug/compiler/debug-catch-prediction.js
@@ -34,6 +34,7 @@ Debug.setListener(listener);
return e;
}
}
+ %PrepareFunctionForOptimization(f);
assertEquals("boom1", f(1));
assertEquals("boom2", f(2));
%OptimizeFunctionOnNextCall(f);
@@ -51,6 +52,7 @@ Debug.setListener(listener);
return a + 10;
}
}
+ %PrepareFunctionForOptimization(f);
assertEquals(11, f(1));
assertEquals(12, f(2));
%OptimizeFunctionOnNextCall(f);
@@ -70,6 +72,7 @@ Debug.setListener(listener);
// Nothing.
}
}
+ %PrepareFunctionForOptimization(f);
assertEquals("wosh11", f(1));
assertEquals("wosh22", f(2));
%OptimizeFunctionOnNextCall(f);
@@ -91,6 +94,7 @@ Debug.setListener(listener);
return e + a;
}
}
+ %PrepareFunctionForOptimization(f);
assertEquals("bang11", f(1));
assertEquals("bang22", f(2));
%OptimizeFunctionOnNextCall(f);
@@ -112,6 +116,7 @@ Debug.setListener(listener);
return a + 10;
}
}
+ %PrepareFunctionForOptimization(f);
assertEquals(11, f(1));
assertEquals(12, f(2));
%OptimizeFunctionOnNextCall(f);
@@ -133,6 +138,7 @@ Debug.setListener(listener);
return a + 10;
}
}
+ %PrepareFunctionForOptimization(f);
assertEquals(11, f(1));
assertEquals(12, f(2));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/debugger/debug/compiler/osr-typing-debug-change.js b/deps/v8/test/debugger/debug/compiler/osr-typing-debug-change.js
index 92eb899036..e0346dd358 100644
--- a/deps/v8/test/debugger/debug/compiler/osr-typing-debug-change.js
+++ b/deps/v8/test/debugger/debug/compiler/osr-typing-debug-change.js
@@ -37,6 +37,7 @@ function ChangeSmiConstantAndOsr() {
}
return j;
}
+%PrepareFunctionForOptimization(ChangeSmiConstantAndOsr);
var r1 = ChangeSmiConstantAndOsr();
if (changed) {
assertEquals("result", r1);
@@ -54,6 +55,7 @@ function ChangeFloatConstantAndOsr() {
}
return j;
}
+%PrepareFunctionForOptimization(ChangeFloatConstantAndOsr);
var r2 = ChangeFloatConstantAndOsr();
if (changed) {
assertEquals("result", r2);
@@ -72,6 +74,7 @@ function ChangeFloatVarAndOsr() {
}
return j;
}
+%PrepareFunctionForOptimization(ChangeFloatVarAndOsr);
var r3 = ChangeFloatVarAndOsr();
if (changed) {
assertEquals("result0.1", r3);
@@ -105,6 +108,7 @@ function ChangeIntVarAndOsr() {
}
return j;
}
+%PrepareFunctionForOptimization(ChangeIntVarAndOsr);
var r4 = ChangeIntVarAndOsr();
if (changed) {
diff --git a/deps/v8/test/debugger/debug/debug-break-inline.js b/deps/v8/test/debugger/debug/debug-break-inline.js
index 18574ecea1..1b23f3de28 100644
--- a/deps/v8/test/debugger/debug/debug-break-inline.js
+++ b/deps/v8/test/debugger/debug/debug-break-inline.js
@@ -63,6 +63,7 @@ function h() {
debugger;
}
+%PrepareFunctionForOptimization(f);
f();f();f();
%OptimizeFunctionOnNextCall(f);
f();
diff --git a/deps/v8/test/debugger/debug/debug-compile-optimized.js b/deps/v8/test/debugger/debug/debug-compile-optimized.js
index 33f199ac51..a48b2502a0 100644
--- a/deps/v8/test/debugger/debug/debug-compile-optimized.js
+++ b/deps/v8/test/debugger/debug/debug-compile-optimized.js
@@ -9,6 +9,7 @@ Debug = debug.Debug;
Debug.setListener(function() {});
function f() {}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -17,6 +18,7 @@ assertOptimized(f);
var bp = Debug.setBreakPoint(f);
assertUnoptimized(f);
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
@@ -24,6 +26,7 @@ f();
assertUnoptimized(f);
Debug.clearBreakPoint(bp);
+%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f();
assertOptimized(f);
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-arguments.js b/deps/v8/test/debugger/debug/debug-evaluate-arguments.js
index 8cf18d7dc8..0c65a8644b 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-arguments.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-arguments.js
@@ -30,6 +30,7 @@ function foo(a) {
}
return bar(1,2,a);
}
+%PrepareFunctionForOptimization(foo);
listened = false;
foo_expected = [3];
@@ -53,6 +54,7 @@ assertTrue(listened);
listened = false;
foo_expected = [3,4,5];
bar_expected = [1,2,3];
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
assertEquals(6, foo(3,4,5));
assertTrue(listened);
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized-double.js b/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized-double.js
index fa33725d6f..2160b977e4 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized-double.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized-double.js
@@ -131,6 +131,7 @@ function listener(event, exec_state, event_data, data) {
};
};
+%PrepareFunctionForOptimization(f);
for (var i = 0; i < 4; i++) f(input.length - 1, 11.11, 12.12);
%OptimizeFunctionOnNextCall(f);
f(input.length - 1, 11.11, 12.12);
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized.js b/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized.js
index be87068421..057cefff0f 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized.js
@@ -122,6 +122,7 @@ function listener(event, exec_state, event_data, data) {
};
};
+%PrepareFunctionForOptimization(f);
for (var i = 0; i < 4; i++) f(expected.length - 1, 11, 12);
%OptimizeFunctionOnNextCall(f);
f(expected.length - 1, 11, 12);
diff --git a/deps/v8/test/debugger/debug/debug-liveedit-inline.js b/deps/v8/test/debugger/debug/debug-liveedit-inline.js
index fd9f28eb23..1e0671e4f0 100644
--- a/deps/v8/test/debugger/debug/debug-liveedit-inline.js
+++ b/deps/v8/test/debugger/debug/debug-liveedit-inline.js
@@ -12,8 +12,9 @@ eval("var something1 = 25; "
function foo() { return ChooseAnimal() }
+%PrepareFunctionForOptimization(foo);
assertEquals("Cat", foo());
- %OptimizeFunctionOnNextCall(foo);
+%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/debugger/debug/debug-materialized.js b/deps/v8/test/debugger/debug/debug-materialized.js
index dd22e1eb79..857bbb8c60 100644
--- a/deps/v8/test/debugger/debug/debug-materialized.js
+++ b/deps/v8/test/debugger/debug/debug-materialized.js
@@ -18,6 +18,8 @@ function bar() {
return t.a;
}
+%PrepareFunctionForOptimization(foo);
+%PrepareFunctionForOptimization(bar);
foo(1);
foo(1);
bar(1);
diff --git a/deps/v8/test/debugger/debug/debug-optimize.js b/deps/v8/test/debugger/debug/debug-optimize.js
index 7ee65e29f6..1945683a72 100644
--- a/deps/v8/test/debugger/debug/debug-optimize.js
+++ b/deps/v8/test/debugger/debug/debug-optimize.js
@@ -24,6 +24,7 @@ function f4() {
function optimize(f) {
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/debugger/debug/debug-scopes.js b/deps/v8/test/debugger/debug/debug-scopes.js
index dc4b2882cd..08e8d36623 100644
--- a/deps/v8/test/debugger/debug/debug-scopes.js
+++ b/deps/v8/test/debugger/debug/debug-scopes.js
@@ -457,6 +457,7 @@ function with_7() {
debugger;
}
}
+%PrepareFunctionForOptimization(with_7);
listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.With,
@@ -825,6 +826,7 @@ listener_delegate = function(exec_state) {
CheckScopeChainNames(
["closure_11", "closure_10", undefined, undefined], exec_state);
};
+%PrepareFunctionForOptimization(closure_10);
begin_test_count++; closure_10(5); end_test_count++;
begin_test_count++; closure_10(5); end_test_count++;
%OptimizeFunctionOnNextCall(closure_10);
@@ -1152,6 +1154,7 @@ function catch_block_7() {
debugger;
}
};
+%PrepareFunctionForOptimization(catch_block_7);
listener_delegate = function(exec_state) {
diff --git a/deps/v8/test/debugger/debug/debug-step-turbofan.js b/deps/v8/test/debugger/debug/debug-step-turbofan.js
index a40114b28b..0ffc2c88ff 100644
--- a/deps/v8/test/debugger/debug/debug-step-turbofan.js
+++ b/deps/v8/test/debugger/debug/debug-step-turbofan.js
@@ -44,6 +44,7 @@ function listener(event, exec_state, event_data, data) {
}
}
+%PrepareFunctionForOptimization(g);
f(0);
f(0);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js b/deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js
index 2b8ebb1c0b..d0658fceac 100644
--- a/deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/reject-in-constructor-opt.js
@@ -39,6 +39,8 @@ function bar(a,b) {
throw new Error("uncaught"); // EXCEPTION
}
+%PrepareFunctionForOptimization(foo);
+
foo();
%PerformMicrotaskCheckpoint();
@@ -52,6 +54,7 @@ foo();
%PerformMicrotaskCheckpoint();
%NeverOptimizeFunction(bar);
+%PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
// bar does not get inlined into foo.
diff --git a/deps/v8/test/debugger/debug/for-in-opt.js b/deps/v8/test/debugger/debug/for-in-opt.js
index 405199d538..2b97e49c79 100644
--- a/deps/v8/test/debugger/debug/for-in-opt.js
+++ b/deps/v8/test/debugger/debug/for-in-opt.js
@@ -14,6 +14,8 @@ function f(o) {
return result;
}
+%PrepareFunctionForOptimization(f);
+
assertEquals(["0"], f("a"));
assertEquals(["0"], f("a"));
@@ -62,6 +64,8 @@ function check_f2() {
property_descriptor_keys.length = 0;
}
+%PrepareFunctionForOptimization(f2);
+
check_f2();
check_f2();
@@ -71,6 +75,7 @@ deopt_enum = true;
check_f2();
// Test lazy deopt after FILTER_KEY
+%PrepareFunctionForOptimization(f2);
%OptimizeFunctionOnNextCall(f2);
deopt_property_descriptor = true;
check_f2();
@@ -81,6 +86,7 @@ function f3(o) {
}
}
+%PrepareFunctionForOptimization(f3);
f3({__proto__:{x:1}});
f3({__proto__:{x:1}});
@@ -106,6 +112,8 @@ function check_f4() {
property_descriptor_keys.length = 0;
}
+%PrepareFunctionForOptimization(f4);
+
check_f4();
check_f4();
@@ -146,6 +154,7 @@ function f5() {
x = false;
+%PrepareFunctionForOptimization(f5);
f5(); f5(); f5();
%OptimizeFunctionOnNextCall(f5);
x = true;
diff --git a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js
index d8452ff76d..4c0f188f87 100644
--- a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js
+++ b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js
@@ -678,6 +678,7 @@ function catch_block_7() {
}
};
+%PrepareFunctionForOptimization(catch_block_7);
listener_delegate = function(exec_state) {
CheckScopeChain([debug.ScopeType.Catch,
diff --git a/deps/v8/test/debugger/debug/ignition/optimized-debug-frame.js b/deps/v8/test/debugger/debug/ignition/optimized-debug-frame.js
index cc85b4786a..a317350be0 100644
--- a/deps/v8/test/debugger/debug/ignition/optimized-debug-frame.js
+++ b/deps/v8/test/debugger/debug/ignition/optimized-debug-frame.js
@@ -23,6 +23,7 @@ function listener(event, exec_state, event_data, data) {
break_count++;
}
+%PrepareFunctionForOptimization(g);
g();
g();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js b/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js
index bc4cd29cee..6820dc05de 100644
--- a/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js
+++ b/deps/v8/test/debugger/debug/lazy-deopt-then-flush-bytecode.js
@@ -15,6 +15,7 @@ function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
// Optimize foo.
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo();
assertOptimized(foo);
diff --git a/deps/v8/test/debugger/debug/regress/regress-392114.js b/deps/v8/test/debugger/debug/regress/regress-392114.js
index b9ca4ed2a7..26619092ee 100644
--- a/deps/v8/test/debugger/debug/regress/regress-392114.js
+++ b/deps/v8/test/debugger/debug/regress/regress-392114.js
@@ -52,6 +52,7 @@ c();
Debug.setListener(function () {});
var d = create_closure();
+%PrepareFunctionForOptimization(d);
%OptimizeFunctionOnNextCall(d);
// Thanks to the debugger, we recreate the full code too. We deopt and run
// it, stomping on the unexpected AllocationSite in the type vector slot.
diff --git a/deps/v8/test/debugger/debug/regress/regress-4309-1.js b/deps/v8/test/debugger/debug/regress/regress-4309-1.js
index 2e7ef47c09..ef1aee60a4 100644
--- a/deps/v8/test/debugger/debug/regress/regress-4309-1.js
+++ b/deps/v8/test/debugger/debug/regress/regress-4309-1.js
@@ -25,6 +25,8 @@ function f() {
debugger;
}
+%PrepareFunctionForOptimization(f);
+
f();
f();
diff --git a/deps/v8/test/debugger/debug/regress/regress-4309-2.js b/deps/v8/test/debugger/debug/regress/regress-4309-2.js
index e93c8ec56e..698be3c484 100644
--- a/deps/v8/test/debugger/debug/regress/regress-4309-2.js
+++ b/deps/v8/test/debugger/debug/regress/regress-4309-2.js
@@ -22,6 +22,8 @@ function f() {
debugger;
}
+%PrepareFunctionForOptimization(f);
+
f();
f();
diff --git a/deps/v8/test/debugger/debug/regress/regress-4309-3.js b/deps/v8/test/debugger/debug/regress/regress-4309-3.js
index 026a7cb5cb..71664d66ca 100644
--- a/deps/v8/test/debugger/debug/regress/regress-4309-3.js
+++ b/deps/v8/test/debugger/debug/regress/regress-4309-3.js
@@ -27,6 +27,8 @@ function f() {
}
}
+%PrepareFunctionForOptimization(f);
+
f();
f();
diff --git a/deps/v8/test/debugger/debug/regress/regress-4320.js b/deps/v8/test/debugger/debug/regress/regress-4320.js
index 5d88cc33d4..763a9f340f 100644
--- a/deps/v8/test/debugger/debug/regress/regress-4320.js
+++ b/deps/v8/test/debugger/debug/regress/regress-4320.js
@@ -9,6 +9,7 @@ function f() { g(); }
function g() { }
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/debugger/debug/regress/regress-514362.js b/deps/v8/test/debugger/debug/regress/regress-514362.js
index beebf4c063..8422c99c98 100644
--- a/deps/v8/test/debugger/debug/regress/regress-514362.js
+++ b/deps/v8/test/debugger/debug/regress/regress-514362.js
@@ -7,6 +7,7 @@ function bar(x) { debugger; }
function foo() { bar(arguments[0]); }
function wrap() { return foo(1); }
+%PrepareFunctionForOptimization(wrap);
wrap();
wrap();
%OptimizeFunctionOnNextCall(wrap);
diff --git a/deps/v8/test/debugger/debug/regress/regress-5279.js b/deps/v8/test/debugger/debug/regress/regress-5279.js
index 4a30ac5f1c..f2031dc518 100644
--- a/deps/v8/test/debugger/debug/regress/regress-5279.js
+++ b/deps/v8/test/debugger/debug/regress/regress-5279.js
@@ -7,9 +7,13 @@ var Debug = debug.Debug;
Debug.setListener(() => undefined);
-const myObj = {};
+function f() {
+ const myObj = {};
-for (let i = 0; i < 10; i++) {
- %OptimizeOsr();
- %ScheduleBreak();
+ for (let i = 0; i < 10; i++) {
+ %OptimizeOsr();
+ %ScheduleBreak();
+ }
}
+%PrepareFunctionForOptimization(f);
+f()
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js b/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js
index bf15cbaec4..985f62ea8c 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js
@@ -9,6 +9,7 @@ Debug.setListener(function() {});
function f() {
for (var i = 0; i < 100; i++) %OptimizeOsr();
}
+%PrepareFunctionForOptimization(f);
Debug.setBreakPoint(f, 0, 0);
f();
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js b/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js
index ebaabd7104..94ca89069b 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js
@@ -28,6 +28,7 @@ Debug.setListener(listener);
} catch (e) {
}
}
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/debugger/debug/regress/regress-debug-code-recompilation.js b/deps/v8/test/debugger/debug/regress/regress-debug-code-recompilation.js
index ce6ce8692d..5223ed6357 100644
--- a/deps/v8/test/debugger/debug/regress/regress-debug-code-recompilation.js
+++ b/deps/v8/test/debugger/debug/regress/regress-debug-code-recompilation.js
@@ -34,6 +34,7 @@ function g() {
b=2;
}
+%PrepareFunctionForOptimization(Debug.setBreakPoint);
bp = Debug.setBreakPoint(f, 0, 0);
Debug.clearBreakPoint(bp);
%OptimizeFunctionOnNextCall(Debug.setBreakPoint);
@@ -41,6 +42,7 @@ bp = Debug.setBreakPoint(f, 0, 0);
Debug.clearBreakPoint(bp);
bp = Debug.setBreakPoint(f, 0, 0);
Debug.clearBreakPoint(bp);
+%PrepareFunctionForOptimization(Debug.setBreakPoint);
%OptimizeFunctionOnNextCall(Debug.setBreakPoint);
bp = Debug.setBreakPoint(f, 0, 0);
Debug.clearBreakPoint(bp);
diff --git a/deps/v8/test/debugger/debug/regress/regress-debug-deopt-while-recompile.js b/deps/v8/test/debugger/debug/regress/regress-debug-deopt-while-recompile.js
index e8336a8cf1..c6b078bf56 100644
--- a/deps/v8/test/debugger/debug/regress/regress-debug-deopt-while-recompile.js
+++ b/deps/v8/test/debugger/debug/regress/regress-debug-deopt-while-recompile.js
@@ -45,13 +45,16 @@ var f = function() {
var bar = "foo";
var baz = bar; // Break point should be here.
return bar;
-}
+};
var g = function() {
var bar = "foo";
var baz = bar; // Break point should be here.
return bar;
-}
+};
+
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(g);
f();
f();
diff --git a/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js b/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js
index 2b11357f1c..b39c97b7d7 100644
--- a/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js
+++ b/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js
@@ -53,8 +53,9 @@ var f = function() {
var b = a.substring("1");
[a, b].sort();
return a;
-}
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f, "concurrent"); // Mark with builtin.
diff --git a/deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js b/deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js
index 3b56254922..83d2181b99 100644
--- a/deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js
+++ b/deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js
@@ -45,6 +45,7 @@ function bar() {
return x;
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
// Mark and kick off recompilation.
diff --git a/deps/v8/test/debugger/regress/regress-5901-1.js b/deps/v8/test/debugger/regress/regress-5901-1.js
index 0edffe7965..1902767b79 100644
--- a/deps/v8/test/debugger/regress/regress-5901-1.js
+++ b/deps/v8/test/debugger/regress/regress-5901-1.js
@@ -18,6 +18,7 @@ function h() {
return g();
}
+%PrepareFunctionForOptimization(h);
h();
h();
diff --git a/deps/v8/test/debugger/regress/regress-5901-2.js b/deps/v8/test/debugger/regress/regress-5901-2.js
index cf6f0efd60..1614ac94d7 100644
--- a/deps/v8/test/debugger/regress/regress-5901-2.js
+++ b/deps/v8/test/debugger/regress/regress-5901-2.js
@@ -15,6 +15,7 @@ function h() {
return g();
}
+%PrepareFunctionForOptimization(h);
h();
h();
diff --git a/deps/v8/test/debugger/regress/regress-5950.js b/deps/v8/test/debugger/regress/regress-5950.js
index 9f0ea5bc90..8f0d03c258 100644
--- a/deps/v8/test/debugger/regress/regress-5950.js
+++ b/deps/v8/test/debugger/regress/regress-5950.js
@@ -15,6 +15,7 @@ function h() {
return g();
}
+%PrepareFunctionForOptimization(h);
h();
h();
diff --git a/deps/v8/test/debugger/regress/regress-6526.js b/deps/v8/test/debugger/regress/regress-6526.js
index 90df244b3c..80a82b2b4f 100644
--- a/deps/v8/test/debugger/regress/regress-6526.js
+++ b/deps/v8/test/debugger/regress/regress-6526.js
@@ -16,8 +16,9 @@ var f = function() {
} catch (e) {
}
});
-}
+};
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/debugger/regress/regress-7421.js b/deps/v8/test/debugger/regress/regress-7421.js
index e82cc8a6db..cb1e612d8b 100644
--- a/deps/v8/test/debugger/regress/regress-7421.js
+++ b/deps/v8/test/debugger/regress/regress-7421.js
@@ -23,6 +23,7 @@ function wrapper1() {
}
f1();
}
+%PrepareFunctionForOptimization(wrapper1);
function f2() {
counter++;
@@ -47,14 +48,17 @@ function listener(event, exec_state, event_data, data) {
wrapper1();
fail("wrapper1()");
+ %PrepareFunctionForOptimization(wrapper2);
wrapper2(true);
wrapper2(false);
wrapper2(true);
%OptimizeFunctionOnNextCall(wrapper2);
wrapper2(false);
fail("wrapper2(true)");
- fail("%OptimizeFunctionOnNextCall(wrapper2); wrapper2(true)");
+ fail("%PrepareFunctionForOptimization(wrapper2); "+
+ "%OptimizeFunctionOnNextCall(wrapper2); wrapper2(true)");
+ %PrepareFunctionForOptimization(wrapper2);
%OptimizeFunctionOnNextCall(wrapper2, "concurrent");
wrapper2(false);
fail("%UnblockConcurrentRecompilation();" +
diff --git a/deps/v8/test/debugger/regress/regress-crbug-736758.js b/deps/v8/test/debugger/regress/regress-crbug-736758.js
index d483af25b2..551b0d7413 100644
--- a/deps/v8/test/debugger/regress/regress-crbug-736758.js
+++ b/deps/v8/test/debugger/regress/regress-crbug-736758.js
@@ -8,6 +8,7 @@ function listener() {}
function f() { [1,2,3].forEach(g) }
function g() { debugger }
+%PrepareFunctionForOptimization(f);
f();
f();
Debug.setListener(listener);
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index 43d21ea59e..5d6861dd62 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -10,7 +10,7 @@
#include "include/libplatform/libplatform.h"
-#include "src/flags.h"
+#include "src/flags/flags.h"
namespace v8_fuzzer {
diff --git a/deps/v8/test/fuzzer/multi-return.cc b/deps/v8/test/fuzzer/multi-return.cc
index dc809bdc71..12513b58af 100644
--- a/deps/v8/test/fuzzer/multi-return.cc
+++ b/deps/v8/test/fuzzer/multi-return.cc
@@ -5,6 +5,8 @@
#include <cstddef>
#include <cstdint>
+#include "src/codegen/machine-type.h"
+#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
@@ -13,11 +15,9 @@
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/machine-type.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/optimized-compilation-info.h"
-#include "src/simulator.h"
+#include "src/execution/simulator.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
@@ -246,7 +246,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
std::shared_ptr<wasm::NativeModule> module =
AllocateNativeModule(i_isolate, code->raw_instruction_size());
wasm::WasmCodeRefScope wasm_code_ref_scope;
- byte* code_start = module->AddCodeForTesting(code)->instructions().start();
+ byte* code_start = module->AddCodeForTesting(code)->instructions().begin();
// Generate wrapper.
int expect = 0;
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
index 01ec69cbda..1e89fb0e31 100644
--- a/deps/v8/test/fuzzer/parser.cc
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -10,8 +10,8 @@
#include <list>
#include "include/v8.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/parsing/preparser.h"
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index c6192e2cf7..08d68600c6 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -11,7 +11,7 @@
#include "include/v8.h"
#include "src/heap/factory.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/jsregexp.h"
#include "test/fuzzer/fuzzer-support.h"
@@ -374,8 +374,7 @@ void CompileRunAndVerify(FuzzerArgs* args, const std::string& source) {
uint32_t hash = StringHasher::HashSequentialString(
args->input_data, static_cast<int>(args->input_length),
kRegExpBuiltinsFuzzerHashSeed);
- V8_Fatal(__FILE__, __LINE__,
- "!ResultAreIdentical(args); RegExpBuiltinsFuzzerHash=%x", hash);
+ FATAL("!ResultAreIdentical(args); RegExpBuiltinsFuzzerHash=%x", hash);
}
}
diff --git a/deps/v8/test/fuzzer/regexp.cc b/deps/v8/test/fuzzer/regexp.cc
index f39e709457..e532af8d2d 100644
--- a/deps/v8/test/fuzzer/regexp.cc
+++ b/deps/v8/test/fuzzer/regexp.cc
@@ -8,7 +8,7 @@
#include "include/v8.h"
#include "src/heap/factory.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/regexp/jsregexp.h"
#include "test/fuzzer/fuzzer-support.h"
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index 8e140b71f2..36e3757f59 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -7,10 +7,10 @@
#include <stdint.h>
#include "include/v8.h"
-#include "src/api.h"
+#include "src/api/api.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "test/common/wasm/flag-utils.h"
@@ -69,10 +69,11 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
bool done = false;
auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
+ constexpr const char* kAPIMethodName = "WasmAsyncFuzzer.compile";
i_isolate->wasm_engine()->AsyncCompile(
i_isolate, enabled_features,
std::make_shared<AsyncFuzzerResolver>(i_isolate, &done),
- ModuleWireBytes(data, data + size), false);
+ ModuleWireBytes(data, data + size), false, kAPIMethodName);
// Wait for the promise to resolve.
while (!done) {
diff --git a/deps/v8/test/fuzzer/wasm-code.cc b/deps/v8/test/fuzzer/wasm-code.cc
index b159fad3da..c888dd823c 100644
--- a/deps/v8/test/fuzzer/wasm-code.cc
+++ b/deps/v8/test/fuzzer/wasm-code.cc
@@ -5,9 +5,9 @@
#include <stddef.h>
#include <stdint.h>
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-module-builder.h"
#include "test/common/wasm/test-signatures.h"
@@ -27,7 +27,7 @@ class WasmCodeFuzzer : public WasmExecutionFuzzer {
TestSignatures sigs;
WasmModuleBuilder builder(zone);
WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
- f->EmitCode(data.start(), static_cast<uint32_t>(data.size()));
+ f->EmitCode(data.begin(), static_cast<uint32_t>(data.size()));
uint8_t end_opcode = kExprEnd;
f->EmitCode(&end_opcode, 1);
builder.AddExport(CStrVector("main"), f);
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 003ed19014..a373ca665e 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -9,10 +9,10 @@
#include <algorithm>
#include "include/v8.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/ostreams.h"
+#include "src/execution/isolate.h"
+#include "src/utils/ostreams.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
@@ -69,7 +69,7 @@ class DataRange {
// arbitrary expressions.
const size_t num_bytes = std::min(max_bytes, data_.size());
T result = T();
- memcpy(&result, data_.start(), num_bytes);
+ memcpy(&result, data_.begin(), num_bytes);
data_ += num_bytes;
return result;
}
@@ -389,6 +389,16 @@ class WasmGenerator {
global_op<wanted_type>(data);
}
+ template <ValueType select_type>
+ void select_with_type(DataRange& data) {
+ static_assert(select_type != kWasmStmt, "illegal type for select");
+ Generate<select_type, select_type, kWasmI32>(data);
+ // num_types is always 1.
+ uint8_t num_types = 1;
+ builder_->EmitWithU8U8(kExprSelectWithType, num_types,
+ ValueTypes::ValueTypeCodeFor(select_type));
+ }
+
void set_global(DataRange& data) { global_op<kWasmStmt>(data); }
template <ValueType... Types>
@@ -603,6 +613,8 @@ void WasmGenerator::Generate<kWasmI32>(DataRange& data) {
&WasmGenerator::get_local<kWasmI32>,
&WasmGenerator::tee_local<kWasmI32>,
&WasmGenerator::get_global<kWasmI32>,
+ &WasmGenerator::op<kExprSelect, kWasmI32, kWasmI32, kWasmI32>,
+ &WasmGenerator::select_with_type<kWasmI32>,
&WasmGenerator::call<kWasmI32>};
@@ -669,6 +681,8 @@ void WasmGenerator::Generate<kWasmI64>(DataRange& data) {
&WasmGenerator::get_local<kWasmI64>,
&WasmGenerator::tee_local<kWasmI64>,
&WasmGenerator::get_global<kWasmI64>,
+ &WasmGenerator::op<kExprSelect, kWasmI64, kWasmI64, kWasmI32>,
+ &WasmGenerator::select_with_type<kWasmI64>,
&WasmGenerator::call<kWasmI64>};
@@ -702,6 +716,8 @@ void WasmGenerator::Generate<kWasmF32>(DataRange& data) {
&WasmGenerator::get_local<kWasmF32>,
&WasmGenerator::tee_local<kWasmF32>,
&WasmGenerator::get_global<kWasmF32>,
+ &WasmGenerator::op<kExprSelect, kWasmF32, kWasmF32, kWasmI32>,
+ &WasmGenerator::select_with_type<kWasmF32>,
&WasmGenerator::call<kWasmF32>};
@@ -735,6 +751,8 @@ void WasmGenerator::Generate<kWasmF64>(DataRange& data) {
&WasmGenerator::get_local<kWasmF64>,
&WasmGenerator::tee_local<kWasmF64>,
&WasmGenerator::get_global<kWasmF64>,
+ &WasmGenerator::op<kExprSelect, kWasmF64, kWasmF64, kWasmI32>,
+ &WasmGenerator::select_with_type<kWasmF64>,
&WasmGenerator::call<kWasmF64>};
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 2bfe1cbd74..b35d2ee2a6 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -7,9 +7,9 @@
#include <ctime>
#include "include/v8.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/ostreams.h"
+#include "src/execution/isolate.h"
+#include "src/utils/ostreams.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
@@ -106,7 +106,7 @@ struct PrintName {
: name(wire_bytes.GetNameOrNull(ref)) {}
};
std::ostream& operator<<(std::ostream& os, const PrintName& name) {
- return os.write(name.name.start(), name.name.size());
+ return os.write(name.name.begin(), name.name.size());
}
} // namespace
@@ -207,7 +207,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
// Add locals.
BodyLocalDecls decls(&tmp_zone);
- DecodeLocalDecls(enabled_features, &decls, func_code.start(),
+ DecodeLocalDecls(enabled_features, &decls, func_code.begin(),
func_code.end());
if (!decls.type_list.empty()) {
os << " ";
@@ -225,7 +225,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
// Add body.
os << " .addBodyWithEnd([\n";
- FunctionBody func_body(func.sig, func.code.offset(), func_code.start(),
+ FunctionBody func_body(func.sig, func.code.offset(), func_code.begin(),
func_code.end());
PrintRawWasmCode(isolate->allocator(), func_body, module, kOmitLocals);
os << " ]);\n";
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index 8815762826..53bbac6a01 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -7,9 +7,9 @@
#include <stdint.h>
#include "include/v8.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
-#include "src/isolate-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-module.h"
#include "test/common/wasm/flag-utils.h"
diff --git a/deps/v8/test/inspector/DEPS b/deps/v8/test/inspector/DEPS
index fda9364d65..1b6dd06238 100644
--- a/deps/v8/test/inspector/DEPS
+++ b/deps/v8/test/inspector/DEPS
@@ -3,9 +3,9 @@ include_rules = [
"+src/base/atomic-utils.h",
"+src/base/macros.h",
"+src/base/platform/platform.h",
- "+src/flags.h",
+ "+src/flags/flags.h",
"+src/inspector/test-interface.h",
- "+src/locked-queue-inl.h",
- "+src/utils.h",
- "+src/vector.h",
-] \ No newline at end of file
+ "+src/utils/locked-queue-inl.h",
+ "+src/utils/utils.h",
+ "+src/utils/vector.h",
+]
diff --git a/deps/v8/test/inspector/OWNERS b/deps/v8/test/inspector/OWNERS
index c5325d9c97..b1ddebddfc 100644
--- a/deps/v8/test/inspector/OWNERS
+++ b/deps/v8/test/inspector/OWNERS
@@ -1,3 +1,4 @@
+alph@chromium.org
dgozman@chromium.org
kozyatinskiy@chromium.org
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt b/deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt
index cc48cfa85b..c99c1710af 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt
+++ b/deps/v8/test/inspector/cpu-profiler/coverage-block-expected.txt
@@ -34,7 +34,7 @@ Running test: testPreciseCountCoverage
[0] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -94,7 +94,7 @@ Running test: testPreciseCountCoverage
[1] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -106,7 +106,7 @@ Running test: testPreciseCountCoverage
}
]
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -137,7 +137,7 @@ Running test: testPreciseCountCoverageIncremental
[0] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -221,7 +221,7 @@ Running test: testPreciseCountCoverageIncremental
result : {
result : {
type : string
- value : unoptimized
+ value : optimized
}
}
}
@@ -262,10 +262,15 @@ Running test: testPreciseCountCoverageIncremental
startOffset : 74
}
[1] : {
- count : 0
+ count : 1
endOffset : 156
startOffset : 143
}
+ [2] : {
+ count : 1
+ endOffset : 172
+ startOffset : 157
+ }
]
}
]
@@ -275,7 +280,7 @@ Running test: testPreciseCountCoverageIncremental
[1] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -287,12 +292,12 @@ Running test: testPreciseCountCoverageIncremental
}
]
scriptId : <scriptId>
- url :
+ url :
}
[2] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -304,7 +309,7 @@ Running test: testPreciseCountCoverageIncremental
}
]
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -373,7 +378,7 @@ Running test: testBestEffortCoverageWithPreciseBinaryEnabled
[0] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : false
ranges : [
[0] : {
@@ -423,7 +428,7 @@ Running test: testBestEffortCoverageWithPreciseBinaryEnabled
[1] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : false
ranges : [
[0] : {
@@ -435,7 +440,7 @@ Running test: testBestEffortCoverageWithPreciseBinaryEnabled
}
]
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -447,7 +452,7 @@ Running test: testBestEffortCoverageWithPreciseBinaryEnabled
[0] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : false
ranges : [
[0] : {
@@ -497,7 +502,7 @@ Running test: testBestEffortCoverageWithPreciseBinaryEnabled
[1] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : false
ranges : [
[0] : {
@@ -509,7 +514,7 @@ Running test: testBestEffortCoverageWithPreciseBinaryEnabled
}
]
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -533,7 +538,7 @@ Running test: testBestEffortCoverageWithPreciseCountEnabled
[0] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : false
ranges : [
[0] : {
@@ -583,7 +588,7 @@ Running test: testBestEffortCoverageWithPreciseCountEnabled
[1] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : false
ranges : [
[0] : {
@@ -595,7 +600,7 @@ Running test: testBestEffortCoverageWithPreciseCountEnabled
}
]
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -607,7 +612,7 @@ Running test: testBestEffortCoverageWithPreciseCountEnabled
[0] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : false
ranges : [
[0] : {
@@ -657,7 +662,7 @@ Running test: testBestEffortCoverageWithPreciseCountEnabled
[1] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : false
ranges : [
[0] : {
@@ -669,7 +674,7 @@ Running test: testBestEffortCoverageWithPreciseCountEnabled
}
]
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -691,7 +696,7 @@ Running test: testEnablePreciseCountCoverageAtPause
[0] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -703,7 +708,7 @@ Running test: testEnablePreciseCountCoverageAtPause
}
]
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -727,7 +732,7 @@ Running test: testPreciseBinaryCoverage
[0] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -812,22 +817,6 @@ Running test: testPreciseBinaryCoverage
[0] : {
functions : [
[0] : {
- functionName : fib
- isBlockCoverage : true
- ranges : [
- [0] : {
- count : 0
- endOffset : 73
- startOffset : 1
- }
- [1] : {
- count : 1
- endOffset : 72
- startOffset : 32
- }
- ]
- }
- [1] : {
functionName : is_optimized
isBlockCoverage : true
ranges : [
@@ -845,7 +834,7 @@ Running test: testPreciseBinaryCoverage
[1] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -857,12 +846,12 @@ Running test: testPreciseBinaryCoverage
}
]
scriptId : <scriptId>
- url :
+ url :
}
[2] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -874,7 +863,7 @@ Running test: testPreciseBinaryCoverage
}
]
scriptId : <scriptId>
- url :
+ url :
}
]
}
@@ -905,7 +894,7 @@ Running test: testPreciseCountCoveragePartial
[0] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -1023,7 +1012,7 @@ Running test: testPreciseCountCoveragePartial
[1] : {
functions : [
[0] : {
- functionName :
+ functionName :
isBlockCoverage : true
ranges : [
[0] : {
@@ -1035,7 +1024,7 @@ Running test: testPreciseCountCoveragePartial
}
]
scriptId : <scriptId>
- url :
+ url :
}
]
}
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
index ae15e5d2d7..c5a8d155c4 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
@@ -5,7 +5,7 @@ Running test: enableDebugger
Running test: addScript
Script nr 1 parsed!
First script; assuming testFunction.
-Flooding script with breakpoints for the lines 3 to 20...
+Flooding script with breakpoints for the lines 3 to 21...
Setting breakpoint on line 3
error: undefined
Setting breakpoint on line 4
@@ -40,6 +40,8 @@ Setting breakpoint on line 18
error: undefined
Setting breakpoint on line 19
error: undefined
+Setting breakpoint on line 20
+error: undefined
Running test: runTestFunction
Script nr 2 parsed!
@@ -47,22 +49,25 @@ Paused #1
- [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":17,"columnNumber":2}
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Paused #2
- - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":12}
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Paused #3
- - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":12}
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Paused #4
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
+ - [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #5
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
- [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
- - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
-Paused #5
+Paused #6
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":15,"columnNumber":2}
- [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
- - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Running test: finished
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
index ec760ecf08..2b4c8343d5 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
@@ -25,6 +25,7 @@ function testFunction() {
debugger;
}
+ %PrepareFunctionForOptimization(generateAsmJs);
%OptimizeFunctionOnNextCall(generateAsmJs);
var fun = generateAsmJs(this, {'call_debugger': call_debugger}, undefined);
fun();
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
index feb4a3be04..18b61dcf4d 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
@@ -11,10 +11,10 @@ Paused #1
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
- [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
- - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
First time paused, setting breakpoints!
-Flooding script with breakpoints for all lines (0 - 23)...
+Flooding script with breakpoints for all lines (0 - 24)...
Setting breakpoint on line 0
error: undefined
Setting breakpoint on line 1
@@ -61,23 +61,25 @@ Setting breakpoint on line 21
error: undefined
Setting breakpoint on line 22
error: undefined
+Setting breakpoint on line 23
+error: undefined
Script nr 3 parsed!
Resuming...
Paused #2
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":15,"columnNumber":2}
- [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
- [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
- - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":20,"columnNumber":2}
- [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Script nr 4 parsed!
Resuming...
Paused #3
- - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":21,"columnNumber":17}
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":22,"columnNumber":17}
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Script nr 5 parsed!
Resuming...
Paused #4
- - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":22,"columnNumber":2}
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":23,"columnNumber":2}
- [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
Script nr 6 parsed!
Resuming...
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
index af3ac518b3..5a5d1fcf69 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
@@ -25,6 +25,7 @@ function testFunction() {
debugger;
}
+ %PrepareFunctionForOptimization(generateAsmJs);
%OptimizeFunctionOnNextCall(generateAsmJs);
var fun = generateAsmJs(this, {'call_debugger': call_debugger}, undefined);
fun();
diff --git a/deps/v8/test/inspector/debugger/framework-break-expected.txt b/deps/v8/test/inspector/debugger/framework-break-expected.txt
index e858e836e8..b8469f4ecb 100644
--- a/deps/v8/test/inspector/debugger/framework-break-expected.txt
+++ b/deps/v8/test/inspector/debugger/framework-break-expected.txt
@@ -22,7 +22,7 @@ Running test: testUncaughtExceptionWithInlinedFrame
> mixed top frame in framework:
throwUserException (user.js:66:2)
inlinedWrapper (framework.js:56:4)
-throwInlinedUncaughtError (framework.js:59:2)
+throwInlinedUncaughtError (framework.js:60:2)
(anonymous) (framework.js:0:0)
@@ -51,8 +51,8 @@ Running test: testSyncDOMBreakpointWithInlinedUserFrame
> mixed, top frame in framework:
syncDOMBreakpoint (framework.js:33:12)
userFunction (user.js:70:2)
-inlinedWrapper (framework.js:64:4)
-syncDOMBreakpointWithInlinedUserFrame (framework.js:67:2)
+inlinedWrapper (framework.js:65:4)
+syncDOMBreakpointWithInlinedUserFrame (framework.js:69:2)
(anonymous) (framework.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/framework-break.js b/deps/v8/test/inspector/debugger/framework-break.js
index 9b9fee6f3e..45cdf5c2b2 100644
--- a/deps/v8/test/inspector/debugger/framework-break.js
+++ b/deps/v8/test/inspector/debugger/framework-break.js
@@ -55,6 +55,7 @@ function throwInlinedUncaughtError() {
function inlinedWrapper() {
throwUserException();
}
+ %PrepareFunctionForOptimization(inlinedWrapper);
%OptimizeFunctionOnNextCall(inlinedWrapper);
inlinedWrapper();
}
@@ -63,6 +64,7 @@ function syncDOMBreakpointWithInlinedUserFrame() {
function inlinedWrapper() {
userFunction();
}
+ %PrepareFunctionForOptimization(inlinedWrapper);
%OptimizeFunctionOnNextCall(inlinedWrapper);
inlinedWrapper();
}
diff --git a/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js b/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js
index 5d5d7334b2..36f523d8ad 100644
--- a/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js
+++ b/deps/v8/test/inspector/debugger/pause-inside-blackboxed-optimized.js
@@ -18,6 +18,7 @@ contextGroup.addScript(`
function bar() {
return 2;
}
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/inspector/debugger/script-on-after-compile-snapshot-expected.txt b/deps/v8/test/inspector/debugger/script-on-after-compile-snapshot-expected.txt
new file mode 100644
index 0000000000..5c4acceb4a
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-on-after-compile-snapshot-expected.txt
@@ -0,0 +1,28 @@
+Embedding script 'function f() { return 42; }'
+Tests that getPossibleBreakpoints works on a snapshotted function
+scriptParsed
+{
+ scriptSource : function f() { return 42; }
+}
+{
+ endColumn : 27
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : 05296308743b1ae80b7cf865a54ed2d12b19b77f
+ isLiveEdit : false
+ isModule : false
+ length : 27
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : <embedded>
+}
+{
+ error : {
+ code : -32000
+ message : Cannot retrive script context
+ }
+ id : <messageId>
+}
diff --git a/deps/v8/test/inspector/debugger/script-on-after-compile-snapshot.js b/deps/v8/test/inspector/debugger/script-on-after-compile-snapshot.js
new file mode 100644
index 0000000000..6d26b29838
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-on-after-compile-snapshot.js
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Embed a user function in the snapshot and listen for scriptParsed events.
+
+// Flags: --embed 'function f() { return 42; }; f();'
+// Flags: --no-turbo-rewrite-far-jumps
+
+let {session, contextGroup, Protocol} = InspectorTest.start(
+ 'Tests that getPossibleBreakpoints works on a snapshotted function');
+session.setupScriptMap();
+
+Protocol.Debugger.onScriptParsed(requestSourceAndDump);
+
+Protocol.Debugger.enable()
+ .then(InspectorTest.waitForPendingTasks)
+ .then(InspectorTest.completeTest);
+
+function requestSourceAndDump(scriptParsedMessage) {
+ const scriptId = scriptParsedMessage.params.scriptId;
+ Protocol.Debugger.getScriptSource({ scriptId: scriptId })
+ .then((sourceMessage) => dumpScriptParsed(
+ scriptParsedMessage, sourceMessage))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({
+ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId },
+ end: { lineNumber: 0, columnNumber: 1, scriptId: scriptId },
+ restrictToFunction: false
+ }))
+ .then(InspectorTest.logMessage);
+}
+
+function dumpScriptParsed(scriptParsedMessage, sourceMessage) {
+ var sourceResult = sourceMessage.result;
+ sourceResult.scriptSource = sourceResult.scriptSource.replace(/\n/g, "<nl>");
+ InspectorTest.log("scriptParsed");
+ InspectorTest.logObject(sourceResult);
+ InspectorTest.logMessage(scriptParsedMessage.params);
+}
diff --git a/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint-expected.txt
new file mode 100644
index 0000000000..94f58aacd1
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint-expected.txt
@@ -0,0 +1,89 @@
+Debugger.setInstrumentationBreakpoint
+
+Running test: testSetTwice
+set breakpoint..
+{
+ breakpointId : <breakpointId>
+}
+set breakpoint again..
+{
+ error : {
+ code : -32000
+ message : Instrumentation breakpoint is already enabled.
+ }
+ id : <messageId>
+}
+remove breakpoint..
+{
+ id : <messageId>
+ result : {
+ }
+}
+
+Running test: testScriptParsed
+set breakpoint and evaluate script..
+paused with reason: instrumentation
+{
+ scriptId : <scriptId>
+ url : foo.js
+}
+set breakpoint and evaluate script with sourceMappingURL..
+paused with reason: instrumentation
+{
+ scriptId : <scriptId>
+ sourceMapURL : map.js
+ url : foo.js
+}
+remove breakpoint..
+{
+ id : <messageId>
+ result : {
+ }
+}
+evaluate script again..
+
+Running test: testScriptWithSourceMapParsed
+set breakpoint for scriptWithSourceMapParsed..
+evaluate script without sourceMappingURL..
+evaluate script with sourceMappingURL..
+paused with reason: instrumentation
+{
+ scriptId : <scriptId>
+ sourceMapURL : map.js
+ url : foo.js
+}
+remove breakpoint..
+{
+ id : <messageId>
+ result : {
+ }
+}
+evaluate script without sourceMappingURL..
+evaluate script with sourceMappingURL..
+
+Running test: testBlackboxing
+set breakpoint and evaluate blackboxed script..
+evaluate not blackboxed script..
+paused with reason: instrumentation
+{
+ scriptId : <scriptId>
+ url : bar.js
+}
+evaluate blackboxed script that contains not blackboxed one..
+paused with reason: instrumentation
+{
+ scriptId : <scriptId>
+ url : bar.js
+}
+
+Running test: testCompileFirstRunLater
+set breakpoint for scriptWithSourceMapParsed..
+compile script with sourceMappingURL..
+evaluate script without sourceMappingURL..
+run previously compiled script with sourceMappingURL..
+paused with reason: instrumentation
+{
+ scriptId : <scriptId>
+ sourceMapURL : boo.js
+ url : foo.js
+}
diff --git a/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint.js b/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint.js
new file mode 100644
index 0000000000..3c52bdf16d
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-instrumentation-breakpoint.js
@@ -0,0 +1,131 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const { session, contextGroup, Protocol } = InspectorTest.start(
+ 'Debugger.setInstrumentationBreakpoint');
+
+InspectorTest.runAsyncTestSuite([
+ async function testSetTwice() {
+ await Protocol.Debugger.enable();
+ const { result : firstResult } = await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptExecution'
+ });
+ InspectorTest.log('set breakpoint..');
+ InspectorTest.logMessage(firstResult);
+ InspectorTest.log('set breakpoint again..');
+ InspectorTest.logMessage(await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptExecution'
+ }));
+ InspectorTest.log('remove breakpoint..');
+ InspectorTest.logMessage(await Protocol.Debugger.removeBreakpoint({
+ breakpointId: firstResult.breakpointId
+ }));
+ await Protocol.Debugger.disable();
+ },
+
+ async function testScriptParsed() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('set breakpoint and evaluate script..');
+ const { result : firstResult } = await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptExecution'
+ });
+ Protocol.Runtime.evaluate({expression: '//# sourceURL=foo.js'});
+ {
+ const { params: { reason, data } } = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`paused with reason: ${reason}`);
+ InspectorTest.logMessage(data);
+ }
+ await Protocol.Debugger.resume();
+ InspectorTest.log('set breakpoint and evaluate script with sourceMappingURL..');
+ Protocol.Runtime.evaluate({expression: '//# sourceURL=foo.js\n//# sourceMappingURL=map.js'});
+ {
+ const { params: { reason, data } } = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`paused with reason: ${reason}`);
+ InspectorTest.logMessage(data);
+ }
+ InspectorTest.log('remove breakpoint..');
+ InspectorTest.logMessage(await Protocol.Debugger.removeBreakpoint({
+ breakpointId: firstResult.breakpointId
+ }));
+ InspectorTest.log('evaluate script again..');
+ await Protocol.Runtime.evaluate({expression: '//# sourceURL=foo.js'});
+ await Protocol.Debugger.disable();
+ },
+
+ async function testScriptWithSourceMapParsed() {
+ await Protocol.Debugger.enable();
+ InspectorTest.log('set breakpoint for scriptWithSourceMapParsed..');
+ const { result : firstResult } = await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptWithSourceMapExecution'
+ });
+ InspectorTest.log('evaluate script without sourceMappingURL..')
+ await Protocol.Runtime.evaluate({expression: '//# sourceURL=foo.js'});
+ InspectorTest.log('evaluate script with sourceMappingURL..')
+ Protocol.Runtime.evaluate({expression: '//# sourceURL=foo.js\n//# sourceMappingURL=map.js'});
+ {
+ const { params: { reason, data } } = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`paused with reason: ${reason}`);
+ InspectorTest.logMessage(data);
+ }
+ InspectorTest.log('remove breakpoint..')
+ InspectorTest.logMessage(await Protocol.Debugger.removeBreakpoint({
+ breakpointId: firstResult.breakpointId
+ }));
+ InspectorTest.log('evaluate script without sourceMappingURL..')
+ await Protocol.Runtime.evaluate({expression: '//# sourceURL=foo.js'});
+ InspectorTest.log('evaluate script with sourceMappingURL..')
+ await Protocol.Runtime.evaluate({expression: '//# sourceURL=foo.js\n//# sourceMappingURL=map.js'});
+ await Protocol.Debugger.disable();
+ },
+
+ async function testBlackboxing() {
+ await Protocol.Debugger.enable();
+ await Protocol.Debugger.setBlackboxPatterns({patterns: ['foo\.js']});
+ InspectorTest.log('set breakpoint and evaluate blackboxed script..');
+ const { result : firstResult } = await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptExecution'
+ });
+ await Protocol.Runtime.evaluate({expression: '//# sourceURL=foo.js'});
+ InspectorTest.log('evaluate not blackboxed script..');
+ Protocol.Runtime.evaluate({expression: '//# sourceURL=bar.js'});
+ {
+ const { params: { reason, data } } = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`paused with reason: ${reason}`);
+ InspectorTest.logMessage(data);
+ }
+ await Protocol.Debugger.resume();
+ InspectorTest.log('evaluate blackboxed script that contains not blackboxed one..');
+ Protocol.Runtime.evaluate({expression: `eval('//# sourceURL=bar.js')//# sourceURL=foo.js`});
+ {
+ const { params: { reason, data } } = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`paused with reason: ${reason}`);
+ InspectorTest.logMessage(data);
+ }
+ await Protocol.Debugger.resume();
+ await Protocol.Debugger.disable();
+ },
+
+ async function testCompileFirstRunLater() {
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ InspectorTest.log('set breakpoint for scriptWithSourceMapParsed..');
+ const { result : firstResult } = await Protocol.Debugger.setInstrumentationBreakpoint({
+ instrumentation: 'beforeScriptWithSourceMapExecution'
+ });
+ InspectorTest.log('compile script with sourceMappingURL..');
+ const { result: { scriptId } } = await Protocol.Runtime.compileScript({
+ expression: '//# sourceMappingURL=boo.js', sourceURL: 'foo.js', persistScript: true });
+ InspectorTest.log('evaluate script without sourceMappingURL..');
+ await Protocol.Runtime.evaluate({ expression: '' });
+ InspectorTest.log('run previously compiled script with sourceMappingURL..');
+ Protocol.Runtime.runScript({ scriptId });
+ {
+ const { params: { reason, data } } = await Protocol.Debugger.oncePaused();
+ InspectorTest.log(`paused with reason: ${reason}`);
+ InspectorTest.logMessage(data);
+ }
+ await Protocol.Debugger.disable();
+ await Protocol.Runtime.disable();
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/step-into-optimized-blackbox.js b/deps/v8/test/inspector/debugger/step-into-optimized-blackbox.js
index 75505891bb..1c68c1a14c 100644
--- a/deps/v8/test/inspector/debugger/step-into-optimized-blackbox.js
+++ b/deps/v8/test/inspector/debugger/step-into-optimized-blackbox.js
@@ -24,7 +24,8 @@ Protocol.Debugger.setBlackboxPatterns({ patterns: [ "bar.js" ] });
Protocol.Debugger.onPaused(PerformSteps);
Protocol.Runtime.evaluate({
- "expression": "bar(); bar(); %OptimizeFunctionOnNextCall(bar); bar()"
+ "expression": "%PrepareFunctionForOptimization(bar); bar(); bar(); " +
+ "%OptimizeFunctionOnNextCall(bar); bar()"
});
Protocol.Runtime.evaluate({ "expression": "debugger; bar();" });
diff --git a/deps/v8/test/inspector/debugger/wasm-anyref-global-expected.txt b/deps/v8/test/inspector/debugger/wasm-anyref-global-expected.txt
new file mode 100644
index 0000000000..d7b67a93de
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-anyref-global-expected.txt
@@ -0,0 +1,7 @@
+Test wasm scope information with anyref globals
+Waiting for wasm script to be parsed.
+Setting breakpoint in wasm.
+Running main.
+Paused in debugger.
+ globals: {"global#0": hello, world}
+Finished.
diff --git a/deps/v8/test/inspector/debugger/wasm-anyref-global.js b/deps/v8/test/inspector/debugger/wasm-anyref-global.js
new file mode 100644
index 0000000000..d4c88ac694
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-anyref-global.js
@@ -0,0 +1,83 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-anyref
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Test wasm scope information with anyref globals');
+
+(async function() {
+ try {
+ utils.load('test/mjsunit/wasm/wasm-module-builder.js');
+
+ let builder = new WasmModuleBuilder();
+ builder.addImportedGlobal('m', 'global', kWasmAnyRef, false);
+ builder.addFunction('func', kSig_v_v)
+ .addBody([
+ kExprGetGlobal, 0, //
+ kExprDrop, //
+ ])
+ .exportAs('main');
+ let moduleBytes = JSON.stringify(builder.toArray());
+
+ function test(moduleBytes) {
+ let module = new WebAssembly.Module((new Uint8Array(moduleBytes)).buffer);
+ let global = 'hello, world';
+ instance = new WebAssembly.Instance(module, {m: {global}});
+ }
+
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({
+ expression: `
+ let instance;
+ ${test.toString()}
+ test(${moduleBytes});`
+ });
+
+ InspectorTest.log('Waiting for wasm script to be parsed.');
+ let scriptId;
+ while (true) {
+ let msg = await Protocol.Debugger.onceScriptParsed();
+ if (msg.params.url.startsWith('wasm://')) {
+ scriptId = msg.params.scriptId;
+ break;
+ }
+ }
+
+ InspectorTest.log('Setting breakpoint in wasm.');
+ await Protocol.Debugger.setBreakpoint(
+ {location: {scriptId, lineNumber: 2}});
+
+ InspectorTest.log('Running main.');
+ Protocol.Runtime.evaluate({expression: 'instance.exports.main()'});
+
+ let msg = await Protocol.Debugger.oncePaused();
+ let callFrames = msg.params.callFrames;
+ InspectorTest.log('Paused in debugger.');
+ let scopeChain = callFrames[0].scopeChain;
+ for (let scope of scopeChain) {
+ if (scope.type != 'global') continue;
+
+ let globalObjectProps = (await Protocol.Runtime.getProperties({
+ 'objectId': scope.object.objectId
+ })).result.result;
+
+ for (let prop of globalObjectProps) {
+ let subProps = (await Protocol.Runtime.getProperties({
+ objectId: prop.value.objectId
+ })).result.result;
+ let values =
+ subProps.map((value) => `"${value.name}": ${value.value.value}`)
+ .join(', ');
+ InspectorTest.log(` ${prop.name}: {${values}}`);
+ }
+ }
+
+ InspectorTest.log('Finished.');
+ } catch (exc) {
+ InspectorTest.log(`Failed with exception: ${exc}.`);
+ } finally {
+ InspectorTest.completeTest();
+ }
+})();
diff --git a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt
index 96c7a64bd4..8fec6bc2df 100644
--- a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt
@@ -11,18 +11,18 @@ Requesting all breakable locations in wasm script 0
4 breakable location(s):
[0] 2:2 || >nop
[1] 3:2 || >i32.const 12
-[2] 4:2 || >set_local 0
+[2] 4:2 || >local.set 0
[3] 5:0 || >end
Requesting breakable locations in lines [0,3)
1 breakable location(s):
[0] 2:2 || >nop
Requesting breakable locations in lines [4,6)
2 breakable location(s):
-[0] 4:2 || >set_local 0
+[0] 4:2 || >local.set 0
[1] 5:0 || >end
Requesting all breakable locations in wasm script 1
7 breakable location(s):
-[0] 1:2 || >get_local 0
+[0] 1:2 || >local.get 0
[1] 2:2 || >if
[2] 3:4 || >block
[3] 4:6 || >call 0
@@ -31,7 +31,7 @@ Requesting all breakable locations in wasm script 1
[6] 7:0 || >end
Requesting breakable locations in lines [0,3)
2 breakable location(s):
-[0] 1:2 || >get_local 0
+[0] 1:2 || >local.get 0
[1] 2:2 || >if
Requesting breakable locations in lines [4,6)
2 breakable location(s):
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
index f9e900d0d1..cc2df0326c 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info-expected.txt
@@ -12,12 +12,12 @@ Setting breakpoint on line 2 (first instruction)
Paused:
(local i32 i64 f64)
#i32.const 11
- set_local 0
+ local.set 0
Scope:
at func (2:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 4 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack:
@@ -27,13 +27,13 @@ at (anonymous) (0:17):
Paused:
i32.const 11
- #set_local 0
+ #local.set 0
i32.const 47
Scope:
at func (3:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 4 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack: "0": 11 (number)
@@ -42,14 +42,14 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
- set_local 0
+ local.set 0
#i32.const 47
- set_local 1
+ local.set 1
Scope:
at func (4:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack:
@@ -59,13 +59,13 @@ at (anonymous) (0:17):
Paused:
i32.const 47
- #set_local 1
+ #local.set 1
i64.const 9223372036854775807
Scope:
at func (5:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 0 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack: "0": 47 (number)
@@ -74,14 +74,14 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
- set_local 1
+ local.set 1
#i64.const 9223372036854775807
- set_local 2
+ local.set 2
Scope:
at func (6:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack:
@@ -91,13 +91,13 @@ at (anonymous) (0:17):
Paused:
i64.const 9223372036854775807
- #set_local 2
+ #local.set 2
i64.const -9223372036854775808
Scope:
at func (7:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 0 (number), "unicode☼f64": 0 (number)
stack: "0": 9223372036854775807 (string)
@@ -106,14 +106,14 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
- set_local 2
+ local.set 2
#i64.const -9223372036854775808
- set_local 2
+ local.set 2
Scope:
at func (8:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 9223372036854775807 (string), "unicode☼f64": 0 (number)
stack:
@@ -123,13 +123,13 @@ at (anonymous) (0:17):
Paused:
i64.const -9223372036854775808
- #set_local 2
+ #local.set 2
i32.const 1
Scope:
at func (9:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": 9223372036854775807 (string), "unicode☼f64": 0 (number)
stack: "0": -9223372036854775808 (string)
@@ -138,14 +138,14 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
- set_local 2
+ local.set 2
#i32.const 1
- f64.convert_u/i32
+ f64.convert_i32_u
Scope:
at func (10:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack:
@@ -155,13 +155,13 @@ at (anonymous) (0:17):
Paused:
i32.const 1
- #f64.convert_u/i32
+ #f64.convert_i32_u
i32.const 7
Scope:
at func (11:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number)
@@ -170,14 +170,14 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
- f64.convert_u/i32
+ f64.convert_i32_u
#i32.const 7
- f64.convert_u/i32
+ f64.convert_i32_u
Scope:
at func (12:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number)
@@ -187,13 +187,13 @@ at (anonymous) (0:17):
Paused:
i32.const 7
- #f64.convert_u/i32
+ #f64.convert_i32_u
f64.div
Scope:
at func (13:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number), "1": 7 (number)
@@ -202,14 +202,14 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
- f64.convert_u/i32
+ f64.convert_i32_u
#f64.div
- set_local 3
+ local.set 3
Scope:
at func (14:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 1 (number), "1": 7 (number)
@@ -219,13 +219,13 @@ at (anonymous) (0:17):
Paused:
f64.div
- #set_local 3
-end
+ #local.set 3
+ i32.const 15
Scope:
at func (15:2):
- scope (global):
- -- skipped globals
+ globals: "global#0": 0 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0 (number)
stack: "0": 0.14285714285714285 (number)
@@ -234,14 +234,46 @@ at (anonymous) (0:17):
-- skipped globals
Paused:
- set_local 3
-#end
+ local.set 3
+ #i32.const 15
+ global.set 0
+
+Scope:
+at func (16:2):
+ - scope (global):
+ globals: "global#0": 0 (number)
+ - scope (local):
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
+ stack:
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped globals
+Paused:
+ i32.const 15
+ #global.set 0
+end
Scope:
-at func (16:0):
+at func (17:2):
+ - scope (global):
+ globals: "global#0": 0 (number)
+ - scope (local):
+ locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
+ stack: "0": 15 (number)
+at (anonymous) (0:17):
- scope (global):
-- skipped globals
+
+Paused:
+ global.set 0
+#end
+
+
+Scope:
+at func (18:0):
+ - scope (global):
+ globals: "global#0": 15 (number)
- scope (local):
locals: "i32Arg": 11 (number), "local#1": 47 (number), "i64_local": -9223372036854775808 (string), "unicode☼f64": 0.14285714285714285 (number)
stack:
diff --git a/deps/v8/test/inspector/debugger/wasm-scope-info.js b/deps/v8/test/inspector/debugger/wasm-scope-info.js
index f7a0df497f..116b0ce146 100644
--- a/deps/v8/test/inspector/debugger/wasm-scope-info.js
+++ b/deps/v8/test/inspector/debugger/wasm-scope-info.js
@@ -33,6 +33,7 @@ async function instantiateWasm() {
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
var builder = new WasmModuleBuilder();
+ builder.addGlobal(kWasmI32, true);
builder.addFunction('func', kSig_v_i)
.addLocals(
@@ -51,7 +52,10 @@ async function instantiateWasm() {
kExprSetLocal, 2,
// Set local 3 to 1/7.
kExprI32Const, 1, kExprF64UConvertI32, kExprI32Const, 7,
- kExprF64UConvertI32, kExprF64Div, kExprSetLocal, 3
+ kExprF64UConvertI32, kExprF64Div, kExprSetLocal, 3,
+
+ // Set global 0 to 15
+ kExprI32Const, 15, kExprSetGlobal, 0,
])
.exportAs('main');
@@ -129,13 +133,15 @@ async function dumpScopeProperties(message) {
async function dumpScopeChainsOnPause(message) {
InspectorTest.log(`Scope:`);
for (var frame of message.params.callFrames) {
+ var isWasmFrame = /^wasm/.test(frame.url);
var functionName = frame.functionName || '(anonymous)';
var lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
var columnNumber = frame.location ? frame.location.columnNumber : frame.columnNumber;
InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
for (var scope of frame.scopeChain) {
InspectorTest.logObject(' - scope (' + scope.type + '):');
- if (scope.type == 'global') {
+ if (!isWasmFrame && scope.type == 'global') {
+ // Skip global scope for non wasm-functions.
InspectorTest.logObject(' -- skipped globals');
continue;
}
diff --git a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
index 09a9395eaa..406d39dd95 100644
--- a/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-set-breakpoint-expected.txt
@@ -10,12 +10,12 @@ Source of script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:
Source of script wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:
1: func $wasm_B (param i32)
2: loop
-3: get_local 0
+3: local.get 0
4: if
-5: get_local 0
+5: local.get 0
6: i32.const 1
7: i32.sub
-8: set_local 0
+8: local.set 0
9: call 0
10: br 1
11: end
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
index 7af75aa575..c951dce4ba 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
@@ -14,12 +14,12 @@ end
func $wasm_B (param i32)
loop
- get_local 0
+ local.get 0
if
- get_local 0
+ local.get 0
i32.const 1
i32.sub
- set_local 0
+ local.set 0
call 0
br 1
end
@@ -32,7 +32,7 @@ Setting breakpoint on line 7 (on the setlocal before the call), url wasm://wasm/
lineNumber : 7
scriptId : <scriptId>
}
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >set_local 0
+Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >local.set 0
at wasm_B (7:6):
- scope (global):
-- skipped
@@ -97,7 +97,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepOut called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >set_local 0
+Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >local.set 0
at wasm_B (7:6):
- scope (global):
-- skipped
@@ -130,7 +130,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.resume called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >set_local 0
+Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >local.set 0
at wasm_B (7:6):
- scope (global):
-- skipped
@@ -190,7 +190,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:2:4: >get_local 0
+Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:2:4: >local.get 0
at wasm_B (2:4):
- scope (global):
-- skipped
@@ -212,7 +212,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:4:6: >get_local 0
+Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:4:6: >local.get 0
at wasm_B (4:6):
- scope (global):
-- skipped
@@ -245,7 +245,7 @@ at (anonymous) (0:17):
- scope (global):
-- skipped
Debugger.stepInto called
-Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >set_local 0
+Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >local.set 0
at wasm_B (7:6):
- scope (global):
-- skipped
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 4f5a31d290..4321edccac 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -14,9 +14,9 @@
#include "include/v8.h"
#include "src/base/platform/platform.h"
-#include "src/flags.h"
-#include "src/utils.h"
-#include "src/vector.h"
+#include "src/flags/flags.h"
+#include "src/utils/utils.h"
+#include "src/utils/vector.h"
#include "test/inspector/isolate-data.h"
#include "test/inspector/task-runner.h"
@@ -27,6 +27,12 @@ namespace internal {
extern void DisableEmbeddedBlobRefcounting();
extern void FreeCurrentEmbeddedBlob();
+extern v8::StartupData CreateSnapshotDataBlobInternal(
+ v8::SnapshotCreator::FunctionCodeHandling function_code_handling,
+ const char* embedded_source, v8::Isolate* isolate);
+extern v8::StartupData WarmUpSnapshotDataBlobInternal(
+ v8::StartupData cold_snapshot_blob, const char* warmup_source);
+
} // namespace internal
} // namespace v8
@@ -56,12 +62,26 @@ std::vector<uint16_t> ToVector(v8::Isolate* isolate,
return buffer;
}
+std::vector<uint8_t> ToBytes(v8::Isolate* isolate, v8::Local<v8::String> str) {
+ std::vector<uint8_t> buffer(str->Length());
+ str->WriteOneByte(isolate, buffer.data(), 0, str->Length());
+ return buffer;
+}
+
v8::Local<v8::String> ToV8String(v8::Isolate* isolate, const char* str) {
return v8::String::NewFromUtf8(isolate, str, v8::NewStringType::kNormal)
.ToLocalChecked();
}
v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
+ const std::vector<uint8_t>& bytes) {
+ return v8::String::NewFromOneByte(isolate, bytes.data(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(bytes.size()))
+ .ToLocalChecked();
+}
+
+v8::Local<v8::String> ToV8String(v8::Isolate* isolate,
const std::string& buffer) {
int length = static_cast<int>(buffer.size());
return v8::String::NewFromUtf8(isolate, buffer.data(),
@@ -262,7 +282,7 @@ class ExecuteStringTask : public TaskRunner::Task {
int length = static_cast<int>(name_.size());
v8::internal::Vector<uint16_t> buffer =
v8::internal::Vector<uint16_t>::New(length);
- std::copy(name_.begin(), name_.end(), buffer.start());
+ std::copy(name_.begin(), name_.end(), buffer.begin());
data->RegisterModule(context, buffer, &scriptSource);
}
}
@@ -564,8 +584,8 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
IsolateData::FromContext(context)->GetContextGroupId(context),
args.GetIsolate(), args[2].As<v8::Function>());
- std::vector<uint16_t> state =
- ToVector(args.GetIsolate(), args[1].As<v8::String>());
+ std::vector<uint8_t> state =
+ ToBytes(args.GetIsolate(), args[1].As<v8::String>());
int context_group_id = args[0].As<v8::Int32>()->Value();
int session_id = 0;
RunSyncTask(backend_runner_, [&context_group_id, &session_id, &channel,
@@ -587,9 +607,9 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
Exit();
}
int session_id = args[0].As<v8::Int32>()->Value();
- std::vector<uint16_t> state;
+ std::vector<uint8_t> state;
RunSyncTask(backend_runner_, [&session_id, &state](IsolateData* data) {
- state = ToVector(data->DisconnectSession(session_id)->string());
+ state = data->DisconnectSession(session_id);
});
channels_.erase(session_id);
args.GetReturnValue().Set(ToV8String(args.GetIsolate(), state));
@@ -1030,50 +1050,6 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
};
-bool RunExtraCode(v8::Isolate* isolate, v8::Local<v8::Context> context,
- const char* utf8_source, const char* name) {
- v8::Context::Scope context_scope(context);
- v8::TryCatch try_catch(isolate);
- v8::Local<v8::String> source_string;
- if (!v8::String::NewFromUtf8(isolate, utf8_source, v8::NewStringType::kNormal)
- .ToLocal(&source_string)) {
- return false;
- }
- v8::Local<v8::String> resource_name =
- v8::String::NewFromUtf8(isolate, name, v8::NewStringType::kNormal)
- .ToLocalChecked();
- v8::ScriptOrigin origin(resource_name);
- v8::ScriptCompiler::Source source(source_string, origin);
- v8::Local<v8::Script> script;
- if (!v8::ScriptCompiler::Compile(context, &source).ToLocal(&script))
- return false;
- if (script->Run(context).IsEmpty()) return false;
- CHECK(!try_catch.HasCaught());
- return true;
-}
-
-v8::StartupData CreateSnapshotDataBlob(const char* embedded_source = nullptr) {
- // Create a new isolate and a new context from scratch, optionally run
- // a script to embed, and serialize to create a snapshot blob.
- v8::StartupData result = {nullptr, 0};
- {
- v8::SnapshotCreator snapshot_creator;
- v8::Isolate* isolate = snapshot_creator.GetIsolate();
- {
- v8::HandleScope scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- if (embedded_source != nullptr &&
- !RunExtraCode(isolate, context, embedded_source, "<embedded>")) {
- return result;
- }
- snapshot_creator.SetDefaultContext(context);
- }
- result = snapshot_creator.CreateBlob(
- v8::SnapshotCreator::FunctionCodeHandling::kClear);
- }
- return result;
-}
-
} // namespace
int main(int argc, char* argv[]) {
@@ -1092,7 +1068,8 @@ int main(int argc, char* argv[]) {
if (strcmp(argv[i], "--embed") == 0) {
argv[i++] = nullptr;
printf("Embedding script '%s'\n", argv[i]);
- startup_data = CreateSnapshotDataBlob(argv[i]);
+ startup_data = i::CreateSnapshotDataBlobInternal(
+ v8::SnapshotCreator::FunctionCodeHandling::kClear, argv[i], nullptr);
argv[i] = nullptr;
}
}
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index 2e37f2126f..621fc163e5 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -14,6 +14,9 @@
# Bad OOM timing on noembed builds (https://crbug.com/v8/8494).
'debugger/pause-on-oom': [PASS, ['embedded_builtins == False', SKIP]],
+
+ # https://crbug.com/v8/9029
+ 'debugger/script-on-after-compile-snapshot': [SKIP],
}], # ALWAYS
##############################################################################
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 3cdd1f968c..bfc7934d31 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -16,7 +16,7 @@ v8::internal::Vector<uint16_t> ToVector(v8::Isolate* isolate,
v8::Local<v8::String> str) {
v8::internal::Vector<uint16_t> buffer =
v8::internal::Vector<uint16_t>::New(str->Length());
- str->Write(isolate, buffer.start(), 0, str->Length());
+ str->Write(isolate, buffer.begin(), 0, str->Length());
return buffer;
}
@@ -157,13 +157,12 @@ int IsolateData::ConnectSession(int context_group_id,
return session_id;
}
-std::unique_ptr<v8_inspector::StringBuffer> IsolateData::DisconnectSession(
- int session_id) {
+std::vector<uint8_t> IsolateData::DisconnectSession(int session_id) {
v8::SealHandleScope seal_handle_scope(isolate());
auto it = sessions_.find(session_id);
CHECK(it != sessions_.end());
context_group_by_session_.erase(it->second.get());
- std::unique_ptr<v8_inspector::StringBuffer> result = it->second->stateJSON();
+ std::vector<uint8_t> result = it->second->state();
sessions_.erase(it);
return result;
}
@@ -282,14 +281,14 @@ int IsolateData::HandleMessage(v8::Local<v8::Message> message,
v8_inspector::StringView detailed_message;
v8::internal::Vector<uint16_t> message_text_string =
ToVector(isolate, message->Get());
- v8_inspector::StringView message_text(message_text_string.start(),
+ v8_inspector::StringView message_text(message_text_string.begin(),
message_text_string.length());
v8::internal::Vector<uint16_t> url_string;
if (message->GetScriptOrigin().ResourceName()->IsString()) {
url_string = ToVector(
isolate, message->GetScriptOrigin().ResourceName().As<v8::String>());
}
- v8_inspector::StringView url(url_string.start(), url_string.length());
+ v8_inspector::StringView url(url_string.begin(), url_string.length());
v8::SealHandleScope seal_handle_scope(isolate);
return inspector->exceptionThrown(
@@ -464,7 +463,7 @@ class StringBufferImpl : public v8_inspector::StringBuffer {
public:
StringBufferImpl(v8::Isolate* isolate, v8::Local<v8::String> string)
: data_(ToVector(isolate, string)),
- view_(data_.start(), data_.length()) {}
+ view_(data_.begin(), data_.length()) {}
const v8_inspector::StringView& string() override { return view_; }
private:
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index 6d68a85776..d569ab11e0 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -12,7 +12,7 @@
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/vector.h"
+#include "src/utils/vector.h"
class TaskRunner;
@@ -46,7 +46,7 @@ class IsolateData : public v8_inspector::V8InspectorClient {
int ConnectSession(int context_group_id,
const v8_inspector::StringView& state,
v8_inspector::V8Inspector::Channel* channel);
- std::unique_ptr<v8_inspector::StringBuffer> DisconnectSession(int session_id);
+ std::vector<uint8_t> DisconnectSession(int session_id);
void SendMessage(int session_id, const v8_inspector::StringView& message);
void BreakProgram(int context_group_id,
const v8_inspector::StringView& reason,
diff --git a/deps/v8/test/inspector/runtime/enable-async-stack-expected.txt b/deps/v8/test/inspector/runtime/enable-async-stack-expected.txt
index a030656744..549ceae15c 100644
--- a/deps/v8/test/inspector/runtime/enable-async-stack-expected.txt
+++ b/deps/v8/test/inspector/runtime/enable-async-stack-expected.txt
@@ -33,10 +33,10 @@ Checks that async stack is captured when Runtime.setAsyncCallStackDepth is calle
callFrames : [
[0] : {
columnNumber : 32
- functionName :
+ functionName :
lineNumber : 0
scriptId : <scriptId>
- url :
+ url :
}
]
parentId : {
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index d98f935d6d..a605c4d791 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -103,6 +103,16 @@ Running test: testArrayBuffer
1 own number 16843009
__proto__ own object undefined
+Running test: testDetachedArrayBuffer
+[[Int8Array]]
+ __proto__ own object undefined
+[[Uint8Array]]
+ __proto__ own object undefined
+[[Int16Array]]
+ __proto__ own object undefined
+[[Int32Array]]
+ __proto__ own object undefined
+
Running test: testArrayBufferWithBrokenUintCtor
[[Int8Array]] own object undefined
[[Uint8Array]] own object undefined
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index 35bc0dd895..6cb3e39909 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-private-fields
+// Flags: --harmony-private-fields --allow-natives-syntax
let {session, contextGroup, Protocol} = InspectorTest.start('Checks Runtime.getProperties method');
@@ -52,6 +52,21 @@ InspectorTest.runAsyncTestSuite([
}
},
+ async function testDetachedArrayBuffer() {
+ await Protocol.Runtime.evaluate({ expression: 'var a = new ArrayBuffer(16)' });
+ await Protocol.Runtime.evaluate({ expression: 'var b = new Uint32Array(a)' });
+ let objectId = await evaluateToObjectId('a');
+ await Protocol.Runtime.evaluate({ expression: '%ArrayBufferDetach(a)' });
+ await Protocol.Runtime.evaluate({ expression: 'b', generatePreview: true })
+ let props = await Protocol.Runtime.getProperties({ objectId, ownProperties: true });
+ for (let prop of props.result.result) {
+ if (prop.name === '__proto__')
+ continue;
+ InspectorTest.log(prop.name);
+ await logGetPropertiesResult(prop.value.objectId);
+ }
+ },
+
async function testArrayBufferWithBrokenUintCtor() {
await evaluateToObjectId(`(function() {
this.uint8array_old = this.Uint8Array;
diff --git a/deps/v8/test/inspector/task-runner.h b/deps/v8/test/inspector/task-runner.h
index 8df1f394a5..41a5729571 100644
--- a/deps/v8/test/inspector/task-runner.h
+++ b/deps/v8/test/inspector/task-runner.h
@@ -12,8 +12,8 @@
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/locked-queue-inl.h"
-#include "src/vector.h"
+#include "src/utils/locked-queue-inl.h"
+#include "src/utils/vector.h"
#include "test/inspector/isolate-data.h"
class TaskRunner : public v8::base::Thread {
diff --git a/deps/v8/test/intl/date-format/check-calendar.js b/deps/v8/test/intl/date-format/check-calendar.js
new file mode 100644
index 0000000000..b6c7c58ea3
--- /dev/null
+++ b/deps/v8/test/intl/date-format/check-calendar.js
@@ -0,0 +1,60 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-add-calendar-numbering-system
+
+let invalidCalendar = [
+ "invalid",
+ "abce",
+];
+
+// https://www.unicode.org/repos/cldr/tags/latest/common/bcp47/calendar.xml
+let validCalendar= [
+ "buddhist",
+ "chinese",
+ "coptic",
+ "dangi",
+ "ethioaa",
+ "ethiopic",
+ "gregory",
+ "hebrew",
+ "indian",
+ "islamic",
+ "islamic-umalqura",
+ "islamic-tbla",
+ "islamic-civil",
+ "islamic-rgsa",
+ "iso8601",
+ "japanese",
+ "persian",
+ "roc",
+];
+
+let locales = [
+ "en",
+ "ar",
+];
+
+
+invalidCalendar.forEach(function(calendar) {
+ assertThrows(
+ () => new Intl.DateTimeFormat(["en"], {calendar}),
+ RangeError);
+}
+);
+
+let value = new Date();
+validCalendar.forEach(function(calendar) {
+ locales.forEach(function(base) {
+ let l = base + "-u-ca-" + calendar;
+ let dtf = new Intl.DateTimeFormat([base], {calendar});
+ assertEquals(l, dtf.resolvedOptions().locale);
+
+ // Test the formatting result is the same as passing in via u-ca-
+ // in the locale.
+ let dtf2 = new Intl.DateTimeFormat([l]);
+ assertEquals(dtf2.format(value), dtf.format(value));
+ });
+}
+);
diff --git a/deps/v8/test/intl/date-format/check-numbering-system.js b/deps/v8/test/intl/date-format/check-numbering-system.js
new file mode 100644
index 0000000000..0bb71c5358
--- /dev/null
+++ b/deps/v8/test/intl/date-format/check-numbering-system.js
@@ -0,0 +1,68 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-add-calendar-numbering-system
+
+let invalidNumberingSystem = [
+ "invalid",
+ "abce",
+ "finance",
+ "native",
+ "traditio",
+];
+
+// https://tc39.github.io/ecma402/#table-numbering-system-digits
+let validNumberingSystem= [
+ "arab",
+ "arabext",
+ "bali",
+ "beng",
+ "deva",
+ "fullwide",
+ "gujr",
+ "guru",
+ "hanidec",
+ "khmr",
+ "knda",
+ "laoo",
+ "latn",
+ "limb",
+ "mlym",
+ "mong",
+ "mymr",
+ "orya",
+ "tamldec",
+ "telu",
+ "thai",
+ "tibt",
+];
+
+let locales = [
+ "en",
+ "ar",
+];
+
+
+invalidNumberingSystem.forEach(function(numberingSystem) {
+ assertThrows(
+ () => new Intl.DateTimeFormat(["en"], {numberingSystem}),
+ RangeError);
+}
+);
+
+let value = new Date();
+validNumberingSystem.forEach(function(numberingSystem) {
+ locales.forEach(function(base) {
+ let l = base + "-u-nu-" + numberingSystem;
+ let dtf = new Intl.DateTimeFormat([base], {numberingSystem});
+ assertEquals(l, dtf.resolvedOptions().locale);
+ assertEquals(numberingSystem, dtf.resolvedOptions().numberingSystem);
+
+ // Test the formatting result is the same as passing in via u-nu-
+ // in the locale.
+ let dtf2 = new Intl.DateTimeFormat([l]);
+ assertEquals(dtf2.format(value), dtf.format(value));
+ });
+}
+);
diff --git a/deps/v8/test/intl/date-format/en-format-range-to-parts.js b/deps/v8/test/intl/date-format/en-format-range-to-parts.js
new file mode 100644
index 0000000000..c2421812f8
--- /dev/null
+++ b/deps/v8/test/intl/date-format/en-format-range-to-parts.js
@@ -0,0 +1,49 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-date-format-range
+
+const date1 = new Date("2019-01-03T03:20");
+const date2 = new Date("2019-01-05T19:33");
+const date3 = new Date("2019-01-05T22:57");
+
+// value: "Jan 3 – 5, 2019"
+// source: hhhhShhhEhhhhhh
+// type: mmmldllldllyyyy
+// h: Shared, S: startRange, E: endRange
+// m: month, l: literal, d: day, y: year
+const expected1 = [
+ {type: "month", value: "Jan", source: "shared"},
+ {type: "literal", value: " ", source: "shared"},
+ {type: "day", value: "3", source: "startRange"},
+ {type: "literal", value: " – ", source: "shared"},
+ {type: "day", value: "5", source: "endRange"},
+ {type: "literal", value: ", ", source: "shared"},
+ {type: "year", value: "2019", source: "shared"}
+];
+
+var dtf = new Intl.DateTimeFormat(["en"], {year: "numeric", month: "short", day: "numeric"});
+const ret1 = dtf.formatRangeToParts(date1, date2);
+assertEquals(expected1, ret1);
+
+// value: "Jan 5, 7 – 10 PM"
+// source: hhhhhhhShhhEEhhh
+// type: mmmldlldlllhhlpp
+// h: Shared, S: startRange, E: endRange
+// m: month, l: literal, d: day, h: hour, p: dayPeriod
+
+const expected2 = [
+ {type: "month", value: "Jan", source: "shared"},
+ {type: "literal", value: " ", source: "shared"},
+ {type: "day", value: "5", source: "shared"},
+ {type: "literal", value: ", ", source: "shared"},
+ {type: "hour", value: "7", source: "startRange"},
+ {type: "literal", value: " – ", source: "shared"},
+ {type: "hour", value: "10", source: "endRange"},
+ {type: "literal", value: " ", source: "shared"},
+ {type: "dayPeriod", value: "PM", source: "shared"}
+];
+dtf = new Intl.DateTimeFormat(["en"], {month: "short", day: "numeric", hour: "numeric"});
+const ret2 = dtf.formatRangeToParts(date2, date3);
+assertEquals(expected2, ret2);
diff --git a/deps/v8/test/intl/date-format/format-range-to-parts.js b/deps/v8/test/intl/date-format/format-range-to-parts.js
index 472ec275dd..b2eac1765c 100644
--- a/deps/v8/test/intl/date-format/format-range-to-parts.js
+++ b/deps/v8/test/intl/date-format/format-range-to-parts.js
@@ -11,8 +11,10 @@ assertFalse(descriptor.enumerable);
assertTrue(descriptor.configurable);
const date1 = new Date("2019-1-3");
-const date2 = new Date("2019-3-4");
-const dtf = new Intl.DateTimeFormat();
+const date2 = new Date("2019-1-5");
+const date3 = new Date("2019-3-4");
+const date4 = new Date("2020-3-4");
+let dtf = new Intl.DateTimeFormat();
assertThrows(() => dtf.formatRangeToParts(), RangeError);
assertThrows(() => dtf.formatRangeToParts(date1), RangeError);
assertThrows(() => dtf.formatRangeToParts(undefined, date2), RangeError);
@@ -22,3 +24,60 @@ assertThrows(() => dtf.formatRangeToParts(date1, "2019-5-4"), RangeError);
assertThrows(() => dtf.formatRangeToParts(date2, date1), RangeError);
assertDoesNotThrow(() =>dtf.formatRangeToParts(date1, date2));
+
+function partsToString(parts) {
+ return parts.map(x => x.value).join("");
+}
+
+const validSources = ["startRange", "endRange", "shared"];
+const validTypes = ["literal", "year", "month", "day", "hour", "minute", "second",
+ "weekday", "dayPeriod", "timeZoneName", "era"];
+
+function assertParts(parts) {
+ const str = partsToString(parts);
+ parts.forEach(function(part) {
+ // Check the range of part.source
+ assertTrue(validSources.includes(part.source),
+ "Invalid source '" + part.source + "' in '" + str + "' for '" + part.value + "'");
+ // Check the range of part.type
+ assertTrue(validTypes.includes(part.type),
+ "Invalid type '" + part.type + "' in '" + str + "' for '" + part.value + "'");
+ // Check the part.value is a string
+ assertEquals("string", typeof part.value, "Invalid value for '" + str + "'");
+ });
+}
+
+function verifyFormatRangeToParts(a, b, dtf) {
+ var parts = dtf.formatRangeToParts(a, b);
+ // Check each parts fulfill basic property of the parts.
+ assertParts(parts);
+ // ensure the 'value' in the parts is the same as the output of
+ // the formatRange.
+ assertEquals(dtf.formatRange(a, b), partsToString(parts));
+}
+
+verifyFormatRangeToParts(date1, date2, dtf);
+verifyFormatRangeToParts(date1, date3, dtf);
+verifyFormatRangeToParts(date1, date4, dtf);
+verifyFormatRangeToParts(date2, date3, dtf);
+verifyFormatRangeToParts(date2, date4, dtf);
+verifyFormatRangeToParts(date3, date4, dtf);
+
+dtf = new Intl.DateTimeFormat(["en"], {year: "numeric", month: "short", day: "numeric"});
+
+verifyFormatRangeToParts(date1, date2, dtf);
+verifyFormatRangeToParts(date1, date3, dtf);
+verifyFormatRangeToParts(date1, date4, dtf);
+verifyFormatRangeToParts(date2, date3, dtf);
+verifyFormatRangeToParts(date2, date4, dtf);
+verifyFormatRangeToParts(date3, date4, dtf);
+
+// Test the sequence of ToNumber and TimeClip
+var secondDateAccessed = false;
+assertThrows(
+ () =>
+ dtf.formatRangeToParts(
+ new Date(864000000*10000000 + 1), // a date will cause TimeClip return NaN
+ { get [Symbol.toPrimitive]() { secondDateAccessed = true; return {}} }),
+ TypeError);
+assertTrue(secondDateAccessed);
diff --git a/deps/v8/test/intl/general/CanonicalizeLocaleListTakeLocale.js b/deps/v8/test/intl/general/CanonicalizeLocaleListTakeLocale.js
index 8dcdf70b97..0643a28336 100644
--- a/deps/v8/test/intl/general/CanonicalizeLocaleListTakeLocale.js
+++ b/deps/v8/test/intl/general/CanonicalizeLocaleListTakeLocale.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-locale
-//
// Test NumberFormat will accept Intl.Locale as first parameter, or
// as in the array.
diff --git a/deps/v8/test/intl/general/supported-locales-of.js b/deps/v8/test/intl/general/supported-locales-of.js
index 556e525828..5cc0095614 100644
--- a/deps/v8/test/intl/general/supported-locales-of.js
+++ b/deps/v8/test/intl/general/supported-locales-of.js
@@ -27,11 +27,17 @@
// Tests supportedLocalesOf method.
+// Flags: --harmony-intl-segmenter
+
var services = [
- Intl.DateTimeFormat,
Intl.Collator,
+ Intl.DateTimeFormat,
Intl.NumberFormat,
- Intl.PluralRules
+ Intl.ListFormat,
+ Intl.PluralRules,
+ Intl.RelativeTimeFormat,
+ Intl.Segmenter,
+ Intl.v8BreakIterator,
];
for (const service of services) {
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index b2b022fbd0..81950f13b9 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -33,6 +33,9 @@
# https://code.google.com/p/v8/issues/detail?id=7481
'collator/check-kf-option': [FAIL],
'collator/check-kn-option': [FAIL],
+
+ # Slow tests.
+ 'regress-903566': [PASS, SLOW],
}], # ALWAYS
['variant == no_wasm_traps', {
@@ -66,6 +69,12 @@
}], # 'system == android'
##############################################################################
+['variant == stress', {
+ # Too slow.
+ 'regress-903566': [SKIP],
+}], # 'variant == stress'
+
+##############################################################################
['variant == jitless and not embedded_builtins', {
'*': [SKIP],
}], # variant == jitless and not embedded_builtins
diff --git a/deps/v8/test/intl/list-format/format.js b/deps/v8/test/intl/list-format/format.js
index 0d66bc1d46..1a6f1ee538 100644
--- a/deps/v8/test/intl/list-format/format.js
+++ b/deps/v8/test/intl/list-format/format.js
@@ -41,6 +41,10 @@ function testFormatter(listFormat) {
assertThrows(() => listFormat.format([null, 'world']), TypeError);
assertThrows(() => listFormat.format(['hello', null]), TypeError);
assertThrows(() => listFormat.format([null]), TypeError);
+
+ // Test that Cons strings are handled correctly.
+ let arr = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"];
+ assertListFormat(listFormat, [arr + "n"]);
}
testFormatter(new Intl.ListFormat());
testFormatter(new Intl.ListFormat(["en"]));
diff --git a/deps/v8/test/intl/locale/locale-canonicalization.js b/deps/v8/test/intl/locale/locale-canonicalization.js
index cc0478fdb6..487cfe55d0 100644
--- a/deps/v8/test/intl/locale/locale-canonicalization.js
+++ b/deps/v8/test/intl/locale/locale-canonicalization.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-locale
-
// Make sure that locale string got canonicalized by the spec,
// keys are sorted and unique, region upper cased, script title cased and
// language lower cased.
diff --git a/deps/v8/test/intl/locale/locale-constructor.js b/deps/v8/test/intl/locale/locale-constructor.js
index bf2510553f..95a6f3d24f 100644
--- a/deps/v8/test/intl/locale/locale-constructor.js
+++ b/deps/v8/test/intl/locale/locale-constructor.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-locale
-
// Locale constructor can't be called as function.
assertThrows(() => Intl.Locale('sr'), TypeError);
diff --git a/deps/v8/test/intl/locale/locale-properties.js b/deps/v8/test/intl/locale/locale-properties.js
index 9800e8d6cf..7755c7e87e 100644
--- a/deps/v8/test/intl/locale/locale-properties.js
+++ b/deps/v8/test/intl/locale/locale-properties.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-locale
-
// Make sure that locale exposes all required properties. Those not specified
// should have undefined value.
diff --git a/deps/v8/test/intl/locale/maximize_minimize.js b/deps/v8/test/intl/locale/maximize_minimize.js
index 823a6670e3..51702ab410 100644
--- a/deps/v8/test/intl/locale/maximize_minimize.js
+++ b/deps/v8/test/intl/locale/maximize_minimize.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-locale
-
// Make sure that maximize and minimize of all locales work reasonbly.
assertEquals(new Intl.Locale("zh-TW").maximize().toString(), "zh-Hant-TW",
diff --git a/deps/v8/test/intl/locale/property.js b/deps/v8/test/intl/locale/property.js
index cbe076842f..b89705fe57 100644
--- a/deps/v8/test/intl/locale/property.js
+++ b/deps/v8/test/intl/locale/property.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-locale
-
// Make sure that accessing locale property will return undefined instead of
// crash.
diff --git a/deps/v8/test/intl/locale/regress-8032.js b/deps/v8/test/intl/locale/regress-8032.js
index b8219b1b50..647c4865ec 100644
--- a/deps/v8/test/intl/locale/regress-8032.js
+++ b/deps/v8/test/intl/locale/regress-8032.js
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-locale
-
assertThrows(() => new Intl.Locale(''), RangeError);
diff --git a/deps/v8/test/intl/number-format/check-numbering-system.js b/deps/v8/test/intl/number-format/check-numbering-system.js
new file mode 100644
index 0000000000..cd7884b8dc
--- /dev/null
+++ b/deps/v8/test/intl/number-format/check-numbering-system.js
@@ -0,0 +1,68 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-add-calendar-numbering-system
+
+let invalidNumberingSystem = [
+ "invalid",
+ "abce",
+ "finance",
+ "native",
+ "traditio",
+];
+
+// https://tc39.github.io/ecma402/#table-numbering-system-digits
+let validNumberingSystem= [
+ "arab",
+ "arabext",
+ "bali",
+ "beng",
+ "deva",
+ "fullwide",
+ "gujr",
+ "guru",
+ "hanidec",
+ "khmr",
+ "knda",
+ "laoo",
+ "latn",
+ "limb",
+ "mlym",
+ "mong",
+ "mymr",
+ "orya",
+ "tamldec",
+ "telu",
+ "thai",
+ "tibt",
+];
+
+let locales = [
+ "en",
+ "ar",
+];
+
+
+invalidNumberingSystem.forEach(function(numberingSystem) {
+ assertThrows(
+ () => new Intl.NumberFormat(["en"], {numberingSystem}),
+ RangeError);
+}
+);
+
+let value = 1234567.89;
+validNumberingSystem.forEach(function(numberingSystem) {
+ locales.forEach(function(base) {
+ let l = base + "-u-nu-" + numberingSystem;
+ let nf = new Intl.NumberFormat([base], {numberingSystem});
+ assertEquals(l, nf.resolvedOptions().locale);
+ assertEquals(numberingSystem, nf.resolvedOptions().numberingSystem);
+
+ // Test the formatting result is the same as passing in via u-nu-
+ // in the locale.
+ let nf2 = new Intl.NumberFormat([l]);
+ assertEquals(nf2.format(value), nf.format(value));
+ });
+}
+);
diff --git a/deps/v8/test/intl/number-format/unified/compact-display.js b/deps/v8/test/intl/number-format/unified/compact-display.js
new file mode 100644
index 0000000000..228a2b6259
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/compact-display.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+const testData = [
+ ["short"],
+ ["long"],
+];
+
+for (const [compactDisplay] of testData) {
+ nf = new Intl.NumberFormat("en", {compactDisplay, notation: "compact"});
+ assertEquals(compactDisplay, nf.resolvedOptions().compactDisplay);
+}
diff --git a/deps/v8/test/intl/number-format/unified/constructor-order.js b/deps/v8/test/intl/number-format/unified/constructor-order.js
new file mode 100644
index 0000000000..266426c7d4
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/constructor-order.js
@@ -0,0 +1,70 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+// Similar to constructor-order.js but also consider the new options
+// in https://tc39-transfer.github.io/proposal-unified-intl-numberformat/
+
+// Throws only once during construction.
+// Check for all getters to prevent regression.
+// Preserve the order of getter initialization.
+let getCount = 0;
+
+new Intl.NumberFormat(['en-US'], {
+ get localeMatcher() {
+ assertEquals(0, getCount++);
+ },
+ get style() {
+ assertEquals(1, getCount++);
+ },
+ get currency() {
+ assertEquals(2, getCount++);
+ },
+ get currencyDisplay() {
+ assertEquals(3, getCount++);
+ },
+ // Begin of new options
+ get currencySign() {
+ assertEquals(4, getCount++);
+ },
+ get unit() {
+ assertEquals(5, getCount++);
+ },
+ get unitDisplay() {
+ assertEquals(6, getCount++);
+ },
+ // End of new options
+ get minimumIntegerDigits() {
+ assertEquals(7, getCount++);
+ },
+ get minimumFractionDigits() {
+ assertEquals(8, getCount++);
+ },
+ get maximumFractionDigits() {
+ assertEquals(9, getCount++);
+ },
+ get minimumSignificantDigits() {
+ assertEquals(10, getCount++);
+ },
+ get maximumSignificantDigits() {
+ assertEquals(11, getCount++);
+ },
+ // Begin of new options
+ get notation() {
+ assertEquals(12, getCount++);
+ },
+ get compactDisplay() {
+ assertEquals(13, getCount++);
+ },
+ // End of new options
+ get useGrouping() {
+ assertEquals(14, getCount++);
+ },
+ // Begin of new options
+ get signDisplay() {
+ assertEquals(15, getCount++);
+ },
+ // End of new options
+});
+assertEquals(16, getCount);
diff --git a/deps/v8/test/intl/number-format/unified/currency-display.js b/deps/v8/test/intl/number-format/unified/currency-display.js
new file mode 100644
index 0000000000..4f7acb97c8
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/currency-display.js
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+// Test defaults
+let nf = new Intl.NumberFormat();
+assertEquals(undefined, nf.resolvedOptions().currencyDisplay);
+
+nf = new Intl.NumberFormat("en");
+assertEquals(undefined, nf.resolvedOptions().currencyDisplay);
+
+nf = new Intl.NumberFormat("en", {style: "decimal"});
+assertEquals(undefined, nf.resolvedOptions().currencyDisplay);
+
+nf = new Intl.NumberFormat("en", {style: "percent"});
+assertEquals(undefined, nf.resolvedOptions().currencyDisplay);
+
+nf = new Intl.NumberFormat("en", {style: "unit", unit: "meter"});
+assertEquals(undefined, nf.resolvedOptions().currencyDisplay);
+
+nf = new Intl.NumberFormat("en", {style: "currency", currency: "TWD"});
+assertEquals("symbol", nf.resolvedOptions().currencyDisplay);
+
+const testData = [
+ ["name", "123.00 New Taiwan dollars"],
+ ["code", "TWD 123.00"],
+ ["symbol", "NT$123.00"],
+ ["narrow-symbol", "$123.00"], // new
+];
+
+for (const [currencyDisplay, expectation] of testData) {
+ nf = new Intl.NumberFormat("en",
+ {style: 'currency', currency: "TWD", currencyDisplay});
+ assertEquals('currency', nf.resolvedOptions().style);
+ assertEquals(currencyDisplay, nf.resolvedOptions().currencyDisplay);
+ assertEquals(expectation, nf.format(123));
+}
diff --git a/deps/v8/test/intl/number-format/unified/currency-sign.js b/deps/v8/test/intl/number-format/unified/currency-sign.js
new file mode 100644
index 0000000000..3f2941a8e9
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/currency-sign.js
@@ -0,0 +1,41 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+// Test default.
+let nf = new Intl.NumberFormat();
+assertEquals(undefined, nf.resolvedOptions().currencySign);
+
+nf = new Intl.NumberFormat("en");
+assertEquals(undefined, nf.resolvedOptions().currencySign);
+
+nf = new Intl.NumberFormat("en", {style: 'decimal'});
+assertEquals(undefined, nf.resolvedOptions().currencySign);
+
+nf = new Intl.NumberFormat("en", {style: 'percent'});
+assertEquals(undefined, nf.resolvedOptions().currencySign);
+
+nf = new Intl.NumberFormat("en", {style: 'unit', unit: "meter"});
+assertEquals(undefined, nf.resolvedOptions().currencySign);
+
+
+nf = new Intl.NumberFormat("en", {style: 'currency', currency: "TWD"});
+assertEquals("standard", nf.resolvedOptions().currencySign);
+
+const testData = [
+ ["standard", "-NT$123.40", "-NT$0.00", "NT$0.00", "NT$123.40"],
+ ["accounting", "(NT$123.40)", "(NT$0.00)", "NT$0.00", "NT$123.40"],
+];
+
+for (const [currencySign, neg, negZero, zero, pos] of testData) {
+ nf = new Intl.NumberFormat("en", {style: 'currency', currency: "TWD",
+ currencySign});
+ assertEquals('currency', nf.resolvedOptions().style);
+ assertEquals(currencySign, nf.resolvedOptions().currencySign);
+ assertEquals(neg, nf.format(-123.4));
+ assertEquals(negZero, nf.format(-0));
+ assertEquals(zero, nf.format(0));
+ assertEquals(pos, nf.format(123.4));
+}
diff --git a/deps/v8/test/intl/number-format/unified/no-compact-display.js b/deps/v8/test/intl/number-format/unified/no-compact-display.js
new file mode 100644
index 0000000000..95611e90fc
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/no-compact-display.js
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+// Except when the notation is "compact", the resolvedOptions().compactDisplay
+// should be undefined.
+//
+// Test default
+let nf = new Intl.NumberFormat();
+assertEquals(undefined, nf.resolvedOptions().compactDisplay);
+
+nf = new Intl.NumberFormat("en");
+assertEquals(undefined, nf.resolvedOptions().compactDisplay);
+
+const testData = [
+ ["scientific"],
+ ["engineering"],
+ ["standard"],
+];
+
+for (const [notation] of testData) {
+ nf = new Intl.NumberFormat("en", {notation});
+ assertEquals(undefined, nf.resolvedOptions().compactDisplay);
+ for (const compactDisplay of ["short", "long"]) {
+ nf = new Intl.NumberFormat("en", {compactDisplay, notation});
+ assertEquals(undefined, nf.resolvedOptions().compactDisplay);
+ }
+}
diff --git a/deps/v8/test/intl/number-format/unified/notation.js b/deps/v8/test/intl/number-format/unified/notation.js
new file mode 100644
index 0000000000..9c451773bd
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/notation.js
@@ -0,0 +1,89 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+// Test defaults.
+
+let nf = new Intl.NumberFormat();
+assertEquals("standard", nf.resolvedOptions().notation);
+
+nf = new Intl.NumberFormat("en");
+assertEquals("standard", nf.resolvedOptions().notation);
+
+nf = new Intl.NumberFormat("en", {style: "percent"});
+assertEquals("standard", nf.resolvedOptions().notation);
+
+const testData = [
+ ["standard", undefined, "987,654,321"],
+ ["scientific", undefined, "9.877E8"],
+ ["engineering", undefined, "987.654E6"],
+ ["compact", undefined, "987.654M"],
+ ["compact", "short", "987.654M"],
+ ["compact", "long", "987.654 million"],
+];
+
+for (const [notation, compactDisplay, expect1] of testData) {
+ nf = new Intl.NumberFormat("en", {notation, compactDisplay});
+ assertEquals(notation, nf.resolvedOptions().notation);
+ if (notation != "compact") {
+ assertEquals(undefined, nf.resolvedOptions().compactDisplay);
+ } else if (compactDisplay == "long") {
+ assertEquals("long", nf.resolvedOptions().compactDisplay);
+ } else {
+ assertEquals("short", nf.resolvedOptions().compactDisplay);
+ }
+ assertEquals(expect1, nf.format(987654321));
+}
+
+// Test Germany which has different decimal marks.
+let notation = "compact";
+nf = new Intl.NumberFormat("de", {notation, compactDisplay: "short"});
+assertEquals("987,654 Mio.", nf.format(987654321));
+assertEquals("98,765 Mio.", nf.format(98765432));
+assertEquals("98.765", nf.format(98765));
+assertEquals("9876", nf.format(9876));
+nf = new Intl.NumberFormat("de", {notation, compactDisplay: "long"});
+assertEquals("987,654 Millionen", nf.format(987654321));
+assertEquals("98,765 Millionen", nf.format(98765432));
+assertEquals("98,765 Tausend", nf.format(98765));
+assertEquals("9,876 Tausend", nf.format(9876));
+
+// Test Chinese, Japanese and Korean, which group by 4 digits.
+nf = new Intl.NumberFormat("zh-TW", {notation, compactDisplay: "short"});
+assertEquals("9.877億", nf.format(987654321));
+assertEquals("9876.543萬", nf.format(98765432));
+assertEquals("9.877萬", nf.format(98765));
+assertEquals("9876", nf.format(9876));
+nf = new Intl.NumberFormat("zh-TW", {notation, compactDisplay: "long"});
+assertEquals("9.877億", nf.format(987654321));
+assertEquals("9876.543萬", nf.format(98765432));
+assertEquals("9.877萬", nf.format(98765));
+assertEquals("9876", nf.format(9876));
+
+// Test Japanese with compact.
+nf = new Intl.NumberFormat("ja", {notation, compactDisplay: "short"});
+assertEquals("9.877億", nf.format(987654321));
+assertEquals("9876.543万", nf.format(98765432));
+assertEquals("9.877万", nf.format(98765));
+assertEquals("9876", nf.format(9876));
+nf = new Intl.NumberFormat("ja", {notation, compactDisplay: "long"});
+assertEquals("9.877億", nf.format(987654321));
+assertEquals("9876.543万", nf.format(98765432));
+assertEquals("9.877万", nf.format(98765));
+assertEquals("9876", nf.format(9876));
+
+// Test Korean with compact.
+nf = new Intl.NumberFormat("ko", {notation, compactDisplay: "short"});
+assertEquals("9.877억", nf.format(987654321));
+assertEquals("9876.543만", nf.format(98765432));
+assertEquals("9.877만", nf.format(98765));
+assertEquals("9.876천", nf.format(9876));
+assertEquals("987", nf.format(987));
+nf = new Intl.NumberFormat("ko", {notation, compactDisplay: "long"});
+assertEquals("9.877억", nf.format(987654321));
+assertEquals("9876.543만", nf.format(98765432));
+assertEquals("9.877만", nf.format(98765));
+assertEquals("9.876천", nf.format(9876));
+assertEquals("987", nf.format(987));
diff --git a/deps/v8/test/intl/number-format/unified/sign-display.js b/deps/v8/test/intl/number-format/unified/sign-display.js
new file mode 100644
index 0000000000..cdd7de0061
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/sign-display.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+// Test default.
+let nf = new Intl.NumberFormat();
+assertEquals("auto", nf.resolvedOptions().signDisplay);
+
+nf = new Intl.NumberFormat("en");
+assertEquals("auto", nf.resolvedOptions().signDisplay);
+
+const testData = [
+ ["auto", "-123", "-0", "0", "123"],
+ ["always", "-123", "-0", "+0", "+123"],
+ ["never", "123", "0", "0", "123"],
+ ["except-zero", "-123", "-0", "0", "+123"],
+];
+
+for (const [signDisplay, neg, negZero, zero, pos] of testData) {
+ nf = new Intl.NumberFormat("en", {signDisplay});
+ assertEquals(signDisplay, nf.resolvedOptions().signDisplay);
+ assertEquals(neg, nf.format(-123));
+ assertEquals(negZero, nf.format(-0));
+ assertEquals(zero, nf.format(0));
+ assertEquals(pos, nf.format(123));
+}
diff --git a/deps/v8/test/intl/number-format/unified/style-unit.js b/deps/v8/test/intl/number-format/unified/style-unit.js
new file mode 100644
index 0000000000..af35618fda
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/style-unit.js
@@ -0,0 +1,180 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+// Test default.
+
+let nf = new Intl.NumberFormat();
+assertEquals(undefined, nf.resolvedOptions().unit);
+
+nf = new Intl.NumberFormat("en");
+assertEquals(undefined, nf.resolvedOptions().unit);
+
+nf = new Intl.NumberFormat("en", {style: 'decimal'});
+assertEquals(undefined, nf.resolvedOptions().unit);
+
+nf = new Intl.NumberFormat("en", {style: 'currency', currency: 'TWD'});
+assertEquals(undefined, nf.resolvedOptions().unit);
+
+nf = new Intl.NumberFormat("en", {style: 'percent'});
+assertEquals('percent', nf.resolvedOptions().unit);
+
+assertThrows(() => new Intl.NumberFormat("en", {style: 'unit'}), TypeError);
+
+const validUnits = [
+ 'acre',
+ 'bit',
+ 'byte',
+ 'celsius',
+ 'centimeter',
+ 'day',
+ 'degree',
+ 'fahrenheit',
+ 'foot',
+ 'gigabit',
+ 'gigabyte',
+ 'gram',
+ 'hectare',
+ 'hour',
+ 'inch',
+ 'kilobit',
+ 'kilobyte',
+ 'kilogram',
+ 'kilometer',
+ 'megabit',
+ 'megabyte',
+ 'meter',
+ 'mile-scandinavian',
+ 'mile',
+ 'millimeter',
+ 'millisecond',
+ 'minute',
+ 'month',
+ 'ounce',
+ 'petabyte',
+ 'pound',
+ 'second',
+ 'stone',
+ 'terabit',
+ 'terabyte',
+ 'week',
+ 'yard',
+ 'year',
+ 'percent',
+ 'kilometer-per-hour',
+ 'mile-per-hour',
+ 'meter-per-second',
+ 'yard-per-second',
+ 'yard-per-hour',
+];
+
+for (const unit of validUnits) {
+ let resolved = new Intl.NumberFormat(
+ "en", {style: 'unit', unit}).resolvedOptions();
+ assertEquals('unit', resolved.style);
+ assertEquals(resolved.unit, unit);
+}
+
+function c(u) {
+ return new Intl.NumberFormat('en', { style: 'unit', unit: u});
+}
+assertThrows(() => c('acre-foot'), RangeError);
+assertThrows(() => c('ampere'), RangeError);
+assertThrows(() => c('arc-minute'), RangeError);
+assertThrows(() => c('arc-second'), RangeError);
+assertThrows(() => c('astronomical-unit'), RangeError);
+assertThrows(() => c('bushel'), RangeError);
+assertThrows(() => c('calorie'), RangeError);
+assertThrows(() => c('carat'), RangeError);
+assertThrows(() => c('centiliter'), RangeError);
+assertThrows(() => c('century'), RangeError);
+assertThrows(() => c('cubic-centimeter'), RangeError);
+assertThrows(() => c('cubic-foot'), RangeError);
+assertThrows(() => c('cubic-inch'), RangeError);
+assertThrows(() => c('cubic-kilometer'), RangeError);
+assertThrows(() => c('cubic-meter'), RangeError);
+assertThrows(() => c('cubic-mile'), RangeError);
+assertThrows(() => c('cubic-yard'), RangeError);
+assertThrows(() => c('cup-metric'), RangeError);
+assertThrows(() => c('cup'), RangeError);
+assertThrows(() => c('day-person'), RangeError);
+assertThrows(() => c('deciliter'), RangeError);
+assertThrows(() => c('decimeter'), RangeError);
+assertThrows(() => c('fathom'), RangeError);
+assertThrows(() => c('fluid-ounce'), RangeError);
+assertThrows(() => c('foodcalorie'), RangeError);
+assertThrows(() => c('furlong'), RangeError);
+assertThrows(() => c('g-force'), RangeError);
+assertThrows(() => c('gallon-imperial'), RangeError);
+assertThrows(() => c('gallon'), RangeError);
+assertThrows(() => c('generic'), RangeError);
+assertThrows(() => c('gigahertz'), RangeError);
+assertThrows(() => c('gigawatt'), RangeError);
+assertThrows(() => c('hectoliter'), RangeError);
+assertThrows(() => c('hectopascal'), RangeError);
+assertThrows(() => c('hertz'), RangeError);
+assertThrows(() => c('horsepower'), RangeError);
+assertThrows(() => c('inch-hg'), RangeError);
+assertThrows(() => c('joule'), RangeError);
+assertThrows(() => c('karat'), RangeError);
+assertThrows(() => c('kelvin'), RangeError);
+assertThrows(() => c('kilocalorie'), RangeError);
+assertThrows(() => c('kilohertz'), RangeError);
+assertThrows(() => c('kilojoule'), RangeError);
+assertThrows(() => c('kilowatt-hour'), RangeError);
+assertThrows(() => c('kilowatt'), RangeError);
+assertThrows(() => c('knot'), RangeError);
+assertThrows(() => c('light-year'), RangeError);
+assertThrows(() => c('liter-per-100kilometers'), RangeError);
+assertThrows(() => c('liter-per-kilometer'), RangeError);
+assertThrows(() => c('liter'), RangeError);
+assertThrows(() => c('lux'), RangeError);
+assertThrows(() => c('megahertz'), RangeError);
+assertThrows(() => c('megaliter'), RangeError);
+assertThrows(() => c('megawatt'), RangeError);
+assertThrows(() => c('meter-per-second-squared'), RangeError);
+assertThrows(() => c('metric-ton'), RangeError);
+assertThrows(() => c('microgram'), RangeError);
+assertThrows(() => c('micrometer'), RangeError);
+assertThrows(() => c('microsecond'), RangeError);
+assertThrows(() => c('mile-per-gallon-imperial'), RangeError);
+assertThrows(() => c('mile-per-gallon'), RangeError);
+assertThrows(() => c('milliampere'), RangeError);
+assertThrows(() => c('millibar'), RangeError);
+assertThrows(() => c('milligram-per-deciliter'), RangeError);
+assertThrows(() => c('milligram'), RangeError);
+assertThrows(() => c('milliliter'), RangeError);
+assertThrows(() => c('millimeter-of-mercury'), RangeError);
+assertThrows(() => c('millimole-per-liter'), RangeError);
+assertThrows(() => c('milliwatt'), RangeError);
+assertThrows(() => c('month-person'), RangeError);
+assertThrows(() => c('nanometer'), RangeError);
+assertThrows(() => c('nanosecond'), RangeError);
+assertThrows(() => c('nautical-mile'), RangeError);
+assertThrows(() => c('ohm'), RangeError);
+assertThrows(() => c('ounce-troy'), RangeError);
+assertThrows(() => c('parsec'), RangeError);
+assertThrows(() => c('part-per-million'), RangeError);
+assertThrows(() => c('picometer'), RangeError);
+assertThrows(() => c('pint-metric'), RangeError);
+assertThrows(() => c('pint'), RangeError);
+assertThrows(() => c('pound-per-square-inch'), RangeError);
+assertThrows(() => c('quart'), RangeError);
+assertThrows(() => c('radian'), RangeError);
+assertThrows(() => c('revolution'), RangeError);
+assertThrows(() => c('square-centimeter'), RangeError);
+assertThrows(() => c('square-foot'), RangeError);
+assertThrows(() => c('square-inch'), RangeError);
+assertThrows(() => c('square-kilometer'), RangeError);
+assertThrows(() => c('square-meter'), RangeError);
+assertThrows(() => c('square-mile'), RangeError);
+assertThrows(() => c('square-yard'), RangeError);
+assertThrows(() => c('tablespoon'), RangeError);
+assertThrows(() => c('teaspoon'), RangeError);
+assertThrows(() => c('ton'), RangeError);
+assertThrows(() => c('volt'), RangeError);
+assertThrows(() => c('watt'), RangeError);
+assertThrows(() => c('week-person'), RangeError);
+assertThrows(() => c('year-person'), RangeError);
diff --git a/deps/v8/test/intl/number-format/unified/unit-display.js b/deps/v8/test/intl/number-format/unified/unit-display.js
new file mode 100644
index 0000000000..eeb2c69ece
--- /dev/null
+++ b/deps/v8/test/intl/number-format/unified/unit-display.js
@@ -0,0 +1,36 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-numberformat-unified
+
+// Test default.
+let nf = new Intl.NumberFormat();
+assertEquals(undefined, nf.resolvedOptions().unitDisplay);
+
+nf = new Intl.NumberFormat("en");
+assertEquals(undefined, nf.resolvedOptions().unitDisplay);
+
+nf = new Intl.NumberFormat("en", {style: 'decimal'});
+assertEquals(undefined, nf.resolvedOptions().unitDisplay);
+
+nf = new Intl.NumberFormat("en", {style: 'currency', currency: 'TWD'});
+assertEquals(undefined, nf.resolvedOptions().unitDisplay);
+
+nf = new Intl.NumberFormat("en", {style: 'unit', unit: "meter"});
+assertEquals("short", nf.resolvedOptions().unitDisplay);
+
+nf = new Intl.NumberFormat("en", {style: 'percent'});
+assertEquals("short", nf.resolvedOptions().unitDisplay);
+
+const testData = [
+ ["short"],
+ ["narrow"],
+ ["long"],
+];
+
+for (const [unitDisplay] of testData) {
+ nf = new Intl.NumberFormat("en", {style: 'unit', unit: "meter", unitDisplay});
+ assertEquals('unit', nf.resolvedOptions().style);
+ assertEquals(unitDisplay, nf.resolvedOptions().unitDisplay);
+}
diff --git a/deps/v8/test/intl/regress-7982.js b/deps/v8/test/intl/regress-7982.js
index bd251c5939..92bcaf9d82 100644
--- a/deps/v8/test/intl/regress-7982.js
+++ b/deps/v8/test/intl/regress-7982.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-locale
-
// Make sure that maximize and minimize of locales work reasonbly.
assertEquals("zh-Hans-CN-u-ca-chinese", new Intl.Locale("zh-u-ca-Chinese").maximize().toString());
diff --git a/deps/v8/test/intl/regress-8604.js b/deps/v8/test/intl/regress-8604.js
new file mode 100644
index 0000000000..e773452479
--- /dev/null
+++ b/deps/v8/test/intl/regress-8604.js
@@ -0,0 +1,8 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var number = 3500;
+assertEquals(
+ "3.500,00\u00a0KM",
+ number.toLocaleString('hr-BA', { style: 'currency', currency: 'BAM'}));
diff --git a/deps/v8/test/intl/regress-8657.js b/deps/v8/test/intl/regress-8657.js
index c1c5cea708..a1f4f73445 100644
--- a/deps/v8/test/intl/regress-8657.js
+++ b/deps/v8/test/intl/regress-8657.js
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-locale
-
assertDoesNotThrow(() => new Intl.Locale('und'));
diff --git a/deps/v8/test/intl/regress-966285.js b/deps/v8/test/intl/regress-966285.js
new file mode 100644
index 0000000000..4c5cfa9491
--- /dev/null
+++ b/deps/v8/test/intl/regress-966285.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-intl-add-calendar-numbering-system
+
+var v = {};
+Object.defineProperty(v.__proto__, "calendar",
+ { get: function() { return -1; } });
+assertThrows(() => new Intl.DateTimeFormat(v, 0), RangeError);
diff --git a/deps/v8/test/intl/regress-971636.js b/deps/v8/test/intl/regress-971636.js
new file mode 100644
index 0000000000..9fd42f59e8
--- /dev/null
+++ b/deps/v8/test/intl/regress-971636.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Regression test about \W w/ i will match S
+
+assertEquals("RST", "RST".replace(/[\W_]/gi, ""));
+assertEquals("RST", "RST".replace(/[\W]/gi, ""));
+assertEquals("RST", "RST".replace(/[\Wa]/gi, ""));
+assertEquals(null, "s".match(/[\u00A0-\u0180]/i));
diff --git a/deps/v8/test/intl/relative-time-format/numberingSystems.js b/deps/v8/test/intl/relative-time-format/numberingSystems.js
new file mode 100644
index 0000000000..4b946eae5c
--- /dev/null
+++ b/deps/v8/test/intl/relative-time-format/numberingSystems.js
@@ -0,0 +1,46 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Based on https://www.ecma-international.org/ecma-402/#table-numbering-system-digits
+let testCases = [
+ ["arab", "in ١٢٣ days"], // U+0660 to U+0669
+ ["arabext", "in ۱۲۳ days"], // U+06F0 to U+06F9
+ ["bali", "in ᭑᭒᭓ days"], // U+1B50 to U+1B59
+ ["beng", "in ১২৩ days"], // U+09E6 to U+09EF
+ ["deva", "in १२३ days"], // U+0966 to U+096F
+ ["fullwide", "in 123 days"], // U+FF10 to U+FF19
+ ["gujr", "in ૧૨૩ days"], // U+0AE6 to U+0AEF
+ ["guru", "in ੧੨੩ days"], // U+0A66 to U+0A6F
+ // U+3007, U+4E00, U+4E8C, U+4E09, U+56DB, U+4E94, U+516D, U+4E03, U+516B, U+4E5D
+ ["hanidec", "in 一二三 days"],
+ ["khmr", "in ១២៣ days"], // U+17E0 to U+17E9
+ ["knda", "in ೧೨೩ days"], // U+0CE6 to U+0CEF
+ ["laoo", "in ໑໒໓ days"], // U+0ED0 to U+0ED9
+ ["latn", "in 123 days"], // U+0030 to U+0039
+ ["limb", "in ᥇᥈᥉ days"], // U+1946 to U+194F
+ ["mlym", "in ൧൨൩ days"], // U+0D66 to U+0D6F
+ ["mong", "in ᠑᠒᠓ days"], // U+1810 to U+1819
+ ["mymr", "in ၁၂၃ days"], // U+1040 to U+1049
+ ["orya", "in ୧୨୩ days"], // U+0B66 to U+0B6F
+ ["tamldec", "in ௧௨௩ days"], // U+0BE6 to U+0BEF
+ ["telu", "in ౧౨౩ days"], // U+0C66 to U+0C6F
+ ["thai", "in ๑๒๓ days"], // U+0E50 to U+0E59
+ ["tibt", "in ༡༢༣ days"], // U+0F20 to U+0F29
+];
+
+for ([numberingSystem, expected] of testCases) {
+ let byLocale = new Intl.RelativeTimeFormat("en-u-nu-" + numberingSystem);
+ let byOptions = new Intl.RelativeTimeFormat("en", { numberingSystem });
+
+ // Check the numberingSystem in the resolvedOptions matched.
+ assertEquals(numberingSystem,
+ byOptions.resolvedOptions().numberingSystem, numberingSystem);
+ assertEquals(byLocale.resolvedOptions().numberingSystem,
+ byOptions.resolvedOptions().numberingSystem, numberingSystem);
+
+ // Check the formatted result are the same as if creating by using -u-nu- in
+ // locale.
+ assertEquals(byLocale.format(123, "day"), byOptions.format(123, "day"), numberingSystem);
+ assertEquals(expected, byLocale.format(123, "day"), numberingSystem);
+}
diff --git a/deps/v8/test/js-perf-test/Array/slice.js b/deps/v8/test/js-perf-test/Array/slice.js
index af99c092b1..5f72314070 100644
--- a/deps/v8/test/js-perf-test/Array/slice.js
+++ b/deps/v8/test/js-perf-test/Array/slice.js
@@ -1,14 +1,23 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(() => {
- const A = new Array(1000);
+const kArraySize = 1000;
+
+(() => {
+ const A = new Array(kArraySize);
for (let i = 0; i < A.length; i++) {
A[i] = i;
}
+ assert(%HasSmiElements(A), "A should have SMI elements for this test");
+
+ // Commonly used to copy.
+ function testArraySlice0() {
+ return A.slice(0);
+ }
+
function testArraySlice500() {
return A.slice(500);
}
@@ -33,6 +42,7 @@
return A.slice(200, -300);
}
+ createSuiteWithWarmup("Array.slice(0)", 1, testArraySlice0);
createSuiteWithWarmup("Array.slice(500)", 1, testArraySlice500);
createSuiteWithWarmup("Array.slice(500,999)", 1, testArraySlice500_999);
createSuiteWithWarmup("Array.slice(-500)", 1, testArraySliceN500);
@@ -43,7 +53,7 @@
})();
(() => {
- const A = new Array(1000);
+ const A = new Array(kArraySize);
for (let i = 0; i < A.length; i++) {
A[i] = i;
@@ -52,6 +62,10 @@
A[100000] = 255;
assert(%HasDictionaryElements(A), "A should be in dictionary mode for this test");
+ function testArraySlice0() {
+ return A.slice(0);
+ }
+
function testArraySlice500_999() {
return A.slice(500, 999);
}
@@ -68,9 +82,130 @@
return A.slice(200, -300);
}
+ createSuiteWithWarmup("Array.slice(0)-dict", 1, testArraySlice0);
createSuiteWithWarmup("Array.slice(500,999)-dict", 1, testArraySlice500_999);
createSuiteWithWarmup("Array.slice(200,700)-dict", 1, testArraySlice200_700);
createSuiteWithWarmup("Array.slice(200,-300)-dict", 1, testArraySlice200_N300);
createSuiteWithWarmup("Array.slice(4,1)-dict", 1, testArraySlice4_1);
})();
+
+(() => {
+ const A = new Array(kArraySize);
+
+ for (let i = 0; i < A.length; i++) {
+ A[i] = i + 0.5;
+ }
+
+ assert(%HasDoubleElements(A), "A should have double elements for this test");
+
+ function testArraySlice0() {
+ return A.slice(0);
+ }
+
+ function testArraySlice500_999() {
+ return A.slice(500, 999);
+ }
+
+ function testArraySlice200_700() {
+ return A.slice(200, 700);
+ }
+
+ function testArraySlice200_N300() {
+ return A.slice(200, -300);
+ }
+
+ function testArraySlice4_1() {
+ return A.slice(200, -300);
+ }
+
+ createSuiteWithWarmup("Array.slice(0)-double", 1, testArraySlice0);
+ createSuiteWithWarmup("Array.slice(500,999)-double", 1, testArraySlice500_999);
+ createSuiteWithWarmup("Array.slice(200,700)-double", 1, testArraySlice200_700);
+ createSuiteWithWarmup("Array.slice(200,-300)-double", 1, testArraySlice200_N300);
+ createSuiteWithWarmup("Array.slice(4,1)-double", 1, testArraySlice4_1);
+
+})();
+
+(() => {
+ const A = new Array(kArraySize);
+
+ for (let i = 0; i < A.length; i++) {
+ A[i] = new Object();
+ }
+
+ assert(%HasObjectElements(A), "A should have object elements for this test");
+
+ function testArraySlice0() {
+ return A.slice(0);
+ }
+
+ function testArraySlice500_999() {
+ return A.slice(500, 999);
+ }
+
+ function testArraySlice200_700() {
+ return A.slice(200, 700);
+ }
+
+ function testArraySlice200_N300() {
+ return A.slice(200, -300);
+ }
+
+ function testArraySlice4_1() {
+ return A.slice(200, -300);
+ }
+
+ createSuiteWithWarmup("Array.slice(0)-object", 1, testArraySlice0);
+ createSuiteWithWarmup("Array.slice(500,999)-object", 1, testArraySlice500_999);
+ createSuiteWithWarmup("Array.slice(200,700)-object", 1, testArraySlice200_700);
+ createSuiteWithWarmup("Array.slice(200,-300)-object", 1, testArraySlice200_N300);
+ createSuiteWithWarmup("Array.slice(4,1)-object", 1, testArraySlice4_1);
+
+})();
+
+(() => {
+ const A = new Array(kArraySize);
+
+ for (let i = 0; i < A.length; i++) {
+ A[i] = i;
+ }
+
+ assert(%HasSmiElements(A), "A should have SMI elements for this test");
+
+ let arguments_array;
+ function sloppy_aliased(a) {
+ arguments_array = arguments;
+ }
+ sloppy_aliased.apply(null, A);
+
+ assert(%HasSloppyArgumentsElements(arguments_array),
+ "arguments_array should have sloppy arguments elements for this test");
+
+ function testArraySlice0() {
+ return Array.prototype.slice.call(arguments_array, 0);
+ }
+
+ function testArraySlice500_999() {
+ return Array.prototype.slice.call(arguments_array, 500, 999);
+ }
+
+ function testArraySlice200_700() {
+ return Array.prototype.slice.call(arguments_array, 200, 700);
+ }
+
+ function testArraySlice200_N300() {
+ return Array.prototype.slice.call(arguments_array, 200, -300);
+ }
+
+ function testArraySlice4_1() {
+ return Array.prototype.slice.call(arguments_array, 200, -300);
+ }
+
+ createSuiteWithWarmup("Array.slice(0)-sloppy-args", 1, testArraySlice0);
+ createSuiteWithWarmup("Array.slice(500,999)-sloppy-args", 1, testArraySlice500_999);
+ createSuiteWithWarmup("Array.slice(200,700)-sloppy-args", 1, testArraySlice200_700);
+ createSuiteWithWarmup("Array.slice(200,-300)-sloppy-args", 1, testArraySlice200_N300);
+ createSuiteWithWarmup("Array.slice(4,1)-sloppy-args", 1, testArraySlice4_1);
+
+})();
diff --git a/deps/v8/test/js-perf-test/ArraySort/sort-base.js b/deps/v8/test/js-perf-test/ArraySort/sort-base.js
index c888972191..776d45e776 100644
--- a/deps/v8/test/js-perf-test/ArraySort/sort-base.js
+++ b/deps/v8/test/js-perf-test/ArraySort/sort-base.js
@@ -93,7 +93,9 @@ function CreateHoleyObjectArray() {
function CreateDictionaryArray() {
array_to_sort = Array.from(template_array);
- array_to_sort[%MaxSmi()] = 42;
+ Object.defineProperty(array_to_sort, kArraySize - 2,
+ { get: () => this.foo,
+ set: (v) => this.foo = v });
AssertDictionaryElements();
}
diff --git a/deps/v8/test/js-perf-test/BigInt/run.js b/deps/v8/test/js-perf-test/BigInt/run.js
new file mode 100644
index 0000000000..8589f20f34
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BigInt/run.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+load('../base.js');
+load('to-boolean.js');
+
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-BigInt(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/BigInt/to-boolean.js b/deps/v8/test/js-perf-test/BigInt/to-boolean.js
new file mode 100644
index 0000000000..031c292a7d
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BigInt/to-boolean.js
@@ -0,0 +1,59 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+const ITERATIONS = 100000;
+
+// This dummy ensures that the feedback for benchmark.run() in the Measure function
+// from base.js is not monomorphic, thereby preventing the benchmarks below from being inlined.
+// This ensures consistent behavior and comparable results.
+new BenchmarkSuite('Prevent-Inline-Dummy', [10000], [
+ new Benchmark('Prevent-Inline-Dummy', true, false, 0, () => {})
+]);
+
+new BenchmarkSuite('BigInt-ToBoolean', [10000], [
+ new Benchmark('BigInt-ToBoolean', true, false, 0, TestToBoolean),
+]);
+
+new BenchmarkSuite('BigInt-BooleanConstructor', [10000], [
+ new Benchmark('BigInt-BooleanConstructor', true, false, 0, TestBooleanConstructor),
+]);
+
+new BenchmarkSuite('BigInt-NewBooleanConstructor', [10000], [
+ new Benchmark('BigInt-NewBooleanConstructor', true, false, 0, TestNewBooleanConstructor),
+]);
+
+
+function TestBooleanConstructor() {
+ let kl = true;
+ for (let i = 0; i < ITERATIONS; ++i) {
+ // Store to a variable to prevent elimination.
+ // Keep a depedency on the loop counter to prevent hoisting.
+ kl = Boolean(i % 2 == 0 ? 42n : 32n);
+ }
+ return kl;
+}
+
+
+function TestNewBooleanConstructor() {
+ let kl = true;
+ for (let i = 0; i < ITERATIONS; ++i) {
+ // Store to a variable to prevent elimination.
+ // Keep a depedency on the loop counter to prevent hoisting.
+ kl = new Boolean(i % 2 == 0 ? 42n : 32n);
+ }
+ return kl;
+}
+
+
+function TestToBoolean() {
+ let kl = true;
+ for (let i = 0; i < ITERATIONS; ++i) {
+ // Store to a variable to prevent elimination.
+ // Keep a depedency on the loop counter to prevent hoisting.
+ kl = (i % 2 == 0 ? 42n : 32n) ? true : false;
+ }
+ return kl;
+}
diff --git a/deps/v8/test/js-perf-test/Intl/constructor.js b/deps/v8/test/js-perf-test/Intl/constructor.js
index e5b3a86694..72dc7e6ee7 100644
--- a/deps/v8/test/js-perf-test/Intl/constructor.js
+++ b/deps/v8/test/js-perf-test/Intl/constructor.js
@@ -30,3 +30,15 @@ function NewIntlRelativeTimeFormat() {
let obj = new Intl.RelativeTimeFormat();
}
createSuite('NewIntlRelativeTimeFormat', 100, NewIntlRelativeTimeFormat, ()=>{});
+
+function NewIntlLocaleWithOptions() {
+ let obj = new Intl.Locale("en-Latn-US-u-nu-thai",
+ { language: "zh", region: "TW", script: "Hant", calendar: "roc", collation: "zhuyin",
+ hourCycle: "h11", caseFirst: "upper", numberingSystem: "hanidec"});
+}
+createSuite('NewIntlLocaleWithOptions', 100, NewIntlLocaleWithOptions, ()=>{});
+
+function NewIntlLocale() {
+ let obj = new Intl.Locale("zh");
+}
+createSuite('NewIntlLocale', 100, NewIntlLocale, ()=>{});
diff --git a/deps/v8/test/js-perf-test/JSTests1.json b/deps/v8/test/js-perf-test/JSTests1.json
index 8b36b919db..afec057821 100644
--- a/deps/v8/test/js-perf-test/JSTests1.json
+++ b/deps/v8/test/js-perf-test/JSTests1.json
@@ -46,6 +46,35 @@
]
},
{
+ "name": "BigInt",
+ "path": ["BigInt"],
+ "main": "run.js",
+ "resources": [
+ "to-boolean.js"
+ ],
+ "results_regexp": "^%s\\-BigInt\\(Score\\): (.+)$",
+ "tests": [
+ { "name": "BigInt-ToBoolean" },
+ { "name": "BigInt-BooleanConstructor" },
+ { "name": "BigInt-NewBooleanConstructor" }
+ ]
+ },
+ {
+ "name": "BigInt-Jitless",
+ "path": ["BigInt"],
+ "main": "run.js",
+ "resources": [
+ "to-boolean.js"
+ ],
+ "flags": ["--jitless"],
+ "results_regexp": "^%s\\-BigInt\\(Score\\): (.+)$",
+ "tests": [
+ { "name": "BigInt-ToBoolean" },
+ { "name": "BigInt-BooleanConstructor" },
+ { "name": "BigInt-NewBooleanConstructor" }
+ ]
+ },
+ {
"name": "TypedArrays",
"path": ["TypedArrays"],
"results_regexp": "^TypedArrays\\-%s\\(Score\\): (.+)$",
diff --git a/deps/v8/test/js-perf-test/JSTests2.json b/deps/v8/test/js-perf-test/JSTests2.json
index f1bae8dcf7..0933c7da07 100644
--- a/deps/v8/test/js-perf-test/JSTests2.json
+++ b/deps/v8/test/js-perf-test/JSTests2.json
@@ -151,16 +151,33 @@
{"name": "MixedFrom"},
{"name": "MixedCowNoMapFrom"},
{"name": "MixedNonCowNoMapFrom"},
+ {"name": "Array.slice(0)"},
{"name": "Array.slice(500)"},
{"name": "Array.slice(500,999)"},
{"name": "Array.slice(-500)"},
{"name": "Array.slice(200,700)"},
{"name": "Array.slice(200,-300)"},
{"name": "Array.slice(4,1)"},
+ {"name": "Array.slice(0)-dict"},
{"name": "Array.slice(500,999)-dict"},
{"name": "Array.slice(200,700)-dict"},
{"name": "Array.slice(200,-300)-dict"},
{"name": "Array.slice(4,1)-dict"},
+ {"name": "Array.slice(0)-double"},
+ {"name": "Array.slice(500,999)-double"},
+ {"name": "Array.slice(200,700)-double"},
+ {"name": "Array.slice(200,-300)-double"},
+ {"name": "Array.slice(4,1)-double"},
+ {"name": "Array.slice(0)-object"},
+ {"name": "Array.slice(500,999)-object"},
+ {"name": "Array.slice(200,700)-object"},
+ {"name": "Array.slice(200,-300)-object"},
+ {"name": "Array.slice(4,1)-object"},
+ {"name": "Array.slice(0)-sloppy-args"},
+ {"name": "Array.slice(500,999)-sloppy-args"},
+ {"name": "Array.slice(200,700)-sloppy-args"},
+ {"name": "Array.slice(200,-300)-sloppy-args"},
+ {"name": "Array.slice(4,1)-sloppy-args"},
{"name": "SmiCopyWithin"},
{"name": "StringCopyWithin"},
{"name": "SparseSmiCopyWithin"},
diff --git a/deps/v8/test/js-perf-test/JSTests3.json b/deps/v8/test/js-perf-test/JSTests3.json
index 816d463922..d9a2735d04 100644
--- a/deps/v8/test/js-perf-test/JSTests3.json
+++ b/deps/v8/test/js-perf-test/JSTests3.json
@@ -160,9 +160,6 @@
"name": "StringMatchAll",
"main": "run.js",
"resources": [ "string-matchall.js" ],
- "flags": [
- "--harmony-string-matchall"
- ],
"test_flags": [ "string-matchall" ],
"results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
"run_count": 1,
@@ -225,10 +222,7 @@
{"name": "StringDropLastSubstring"},
{"name": "StringTakeLastSlice"},
{"name": "StringTakeLastSubstr"},
- {"name": "StringTakeLastSubstring"},
- {"name": "StringDropFirstSlice"},
- {"name": "StringDropFirstSubstr"},
- {"name": "StringDropFirstSubstring"}
+ {"name": "StringTakeLastSubstring"}
]
}
]
diff --git a/deps/v8/test/js-perf-test/JSTests5.json b/deps/v8/test/js-perf-test/JSTests5.json
index 0daa57333b..376a8cae27 100644
--- a/deps/v8/test/js-perf-test/JSTests5.json
+++ b/deps/v8/test/js-perf-test/JSTests5.json
@@ -40,7 +40,6 @@
"main": "run.js",
"resources": [],
"results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargePacked\\(Score\\): (.+)$",
- "retry_count_arm64": 1,
"tests": [
{"name": "Spread"},
{"name": "ForLength"},
@@ -57,7 +56,6 @@
"main": "run.js",
"resources": [],
"results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargeHoley\\(Score\\): (.+)$",
- "retry_count_arm64": 1,
"tests": [
{"name": "Spread"},
{"name": "ForLength"},
@@ -74,7 +72,6 @@
"main": "run.js",
"resources": [],
"results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargeDoublePacked\\(Score\\): (.+)$",
- "retry_count_arm64": 1,
"tests": [
{"name": "Spread"},
{"name": "ForLength"},
@@ -91,7 +88,6 @@
"main": "run.js",
"resources": [],
"results_regexp": "^%s\\-ArrayLiteralInitialSpreadLargeDoubleHoley\\(Score\\): (.+)$",
- "retry_count_arm64": 1,
"tests": [
{"name": "Spread"},
{"name": "ForLength"},
@@ -508,7 +504,9 @@
{"name": "NewIntlNumberFormat"},
{"name": "NewIntlPluralRules"},
{"name": "NewIntlListFormat"},
- {"name": "NewIntlRelativeTimeFormat"}
+ {"name": "NewIntlRelativeTimeFormat"},
+ {"name": "NewIntlLocale"},
+ {"name": "NewIntlLocaleWithOptions"}
]
},
{
@@ -570,12 +568,19 @@
"main": "run.js",
"flags": [],
"resources": [
+ "array-indexof-includes.js",
+ "spread-call.js",
"tagged-template.js"
],
"results_regexp": "^%s\\-Numbers\\(Score\\): (.+)$",
"tests": [
{"name": "TaggedTemplate"},
- {"name": "TaggedTemplateLoose"}
+ {"name": "TaggedTemplateLoose"},
+ {"name": "ArrayIndexOf"},
+ {"name": "ArrayIncludes"},
+ {"name": "ApplySpreadLiteral"},
+ {"name": "SpreadCall"},
+ {"name": "SpreadCallSpreadLiteral"}
]
},
{
diff --git a/deps/v8/test/js-perf-test/ManyClosures/create-many-closures.js b/deps/v8/test/js-perf-test/ManyClosures/create-many-closures.js
index 548e666d2f..7932f36701 100644
--- a/deps/v8/test/js-perf-test/ManyClosures/create-many-closures.js
+++ b/deps/v8/test/js-perf-test/ManyClosures/create-many-closures.js
@@ -29,6 +29,7 @@ function CreateManyClosures_Setup() {
// Create a closure and optimize.
var f = g();
+ %PrepareFunctionForOptimization(f);
f(0);
f(0);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/js-perf-test/ObjectFreeze/array-indexof-includes.js b/deps/v8/test/js-perf-test/ObjectFreeze/array-indexof-includes.js
new file mode 100644
index 0000000000..01e1849634
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ObjectFreeze/array-indexof-includes.js
@@ -0,0 +1,53 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function setupArray(length) {
+ var a = new Array(length);
+ for (var i=0;i<length;i++) {
+ a[i] = ''+i;
+ }
+ return Object.freeze(a);
+}
+
+const frozenArray = setupArray(200);
+
+function driverArrayIndexOf(n) {
+ let result = 0;
+ for (var i=0;i<n;i++) {
+ result += frozenArray.indexOf(''+i)==-1?0:1;
+ }
+ return result;
+}
+
+function ArrayIndexOf() {
+ driverArrayIndexOf(1e4);
+}
+
+function ArrayIndexOfWarmUp() {
+ driverArrayIndexOf(1e1);
+ driverArrayIndexOf(1e2);
+ driverArrayIndexOf(1e3);
+}
+
+createSuite('ArrayIndexOf', 10, ArrayIndexOf, ArrayIndexOfWarmUp);
+
+function driverArrayIncludes(n) {
+ let result = 0;
+ for (var i=0;i<n;i++) {
+ result += frozenArray.includes(''+i)?0:1;
+ }
+ return result;
+}
+
+function ArrayIncludes() {
+ driverArrayIncludes(1e4);
+}
+
+function ArrayIncludesWarmUp() {
+ driverArrayIncludes(1e1);
+ driverArrayIncludes(1e2);
+ driverArrayIncludes(1e3);
+}
+
+createSuite('ArrayIncludes', 10, ArrayIncludes, ArrayIncludesWarmUp);
diff --git a/deps/v8/test/js-perf-test/ObjectFreeze/run.js b/deps/v8/test/js-perf-test/ObjectFreeze/run.js
index 63eb1d69ec..adc1a475b4 100644
--- a/deps/v8/test/js-perf-test/ObjectFreeze/run.js
+++ b/deps/v8/test/js-perf-test/ObjectFreeze/run.js
@@ -3,6 +3,8 @@
// found in the LICENSE file.
load('../base.js');
load('tagged-template.js');
+load('array-indexof-includes.js');
+load('spread-call.js');
function PrintResult(name, result) {
console.log(name);
diff --git a/deps/v8/test/js-perf-test/ObjectFreeze/spread-call.js b/deps/v8/test/js-perf-test/ObjectFreeze/spread-call.js
new file mode 100644
index 0000000000..ec1cd5d277
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ObjectFreeze/spread-call.js
@@ -0,0 +1,59 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function setupArray(length) {
+ var a = new Array(length);
+ for (var i=0;i<length;i++) {
+ a[i] = ''+i;
+ }
+ return Object.freeze(a);
+}
+
+const frozenSpreadArray = setupArray(100);
+
+function foo() {
+ var result = arguments[0];
+ for (var i = 1; i < arguments.length; ++i) {
+ result += arguments[i];
+ }
+ return result;
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadCall
+// ----------------------------------------------------------------------------
+
+function SpreadCall() {
+ foo(...frozenSpreadArray);
+}
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: SpreadCallSpreadLiteral
+// ----------------------------------------------------------------------------
+
+function SpreadCallSpreadLiteral() {
+ foo(...[...frozenSpreadArray]);
+}
+
+
+// ----------------------------------------------------------------------------
+// Benchmark: ApplySpreadLiteral
+// ----------------------------------------------------------------------------
+
+function ApplySpreadLiteral() {
+ foo.apply(this, [...frozenSpreadArray]);
+}
+
+// ----------------------------------------------------------------------------
+// Setup and Run
+// ----------------------------------------------------------------------------
+
+function CreateBenchmark(name, f) {
+ new BenchmarkSuite(name, [10], [ new Benchmark(name, false, false, 0, f) ]);
+}
+
+CreateBenchmark('ApplySpreadLiteral', ApplySpreadLiteral);
+CreateBenchmark('SpreadCall', SpreadCall);
+CreateBenchmark('SpreadCallSpreadLiteral', SpreadCallSpreadLiteral);
diff --git a/deps/v8/test/js-perf-test/StackTrace/capture.js b/deps/v8/test/js-perf-test/StackTrace/capture.js
index db2b997122..54cb2921a5 100644
--- a/deps/v8/test/js-perf-test/StackTrace/capture.js
+++ b/deps/v8/test/js-perf-test/StackTrace/capture.js
@@ -20,6 +20,7 @@ function Inline() {
function Middle() { Inner(); }
function Outer() { Middle(); }
+ %PrepareFunctionForOptimization(Outer);
Outer();
Outer();
%OptimizeFunctionOnNextCall(Outer);
diff --git a/deps/v8/test/js-perf-test/StackTrace/serialize.js b/deps/v8/test/js-perf-test/StackTrace/serialize.js
index bbac87647c..a3dac31b2f 100644
--- a/deps/v8/test/js-perf-test/StackTrace/serialize.js
+++ b/deps/v8/test/js-perf-test/StackTrace/serialize.js
@@ -31,6 +31,7 @@ function InlineSetup() {
function Middle() { return Inner(); }
function Outer() { return Middle(); }
+ %PrepareFunctionForOptimization(Outer);
Outer();
Outer();
%OptimizeFunctionOnNextCall(Outer);
diff --git a/deps/v8/test/message/fail/class-field-constructor.js b/deps/v8/test/message/fail/class-field-constructor.js
index baeb04e94d..4c6f42d0f2 100644
--- a/deps/v8/test/message/fail/class-field-constructor.js
+++ b/deps/v8/test/message/fail/class-field-constructor.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-public-fields
class X {
constructor = function() {};
diff --git a/deps/v8/test/message/fail/class-field-constructor.out b/deps/v8/test/message/fail/class-field-constructor.out
index 51f26957ad..29c0588100 100644
--- a/deps/v8/test/message/fail/class-field-constructor.out
+++ b/deps/v8/test/message/fail/class-field-constructor.out
@@ -1,4 +1,4 @@
-*%(basename)s:8: SyntaxError: Classes may not have a field named 'constructor'
+*%(basename)s:6: SyntaxError: Classes may not have a field named 'constructor'
constructor = function() {};
^^^^^^^^^^^
SyntaxError: Classes may not have a field named 'constructor' \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-field-static-constructor.js b/deps/v8/test/message/fail/class-field-static-constructor.js
index 63ce1c04d7..d01be68c21 100644
--- a/deps/v8/test/message/fail/class-field-static-constructor.js
+++ b/deps/v8/test/message/fail/class-field-static-constructor.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-public-fields --harmony-static-fields
class X {
static constructor = function() {};
diff --git a/deps/v8/test/message/fail/class-field-static-constructor.out b/deps/v8/test/message/fail/class-field-static-constructor.out
index 6831d83552..7f330b7bc0 100644
--- a/deps/v8/test/message/fail/class-field-static-constructor.out
+++ b/deps/v8/test/message/fail/class-field-static-constructor.out
@@ -1,4 +1,4 @@
-*%(basename)s:8: SyntaxError: Classes may not have a field named 'constructor'
+*%(basename)s:6: SyntaxError: Classes may not have a field named 'constructor'
static constructor = function() {};
^^^^^^^^^^^
SyntaxError: Classes may not have a field named 'constructor' \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-field-static-prototype.js b/deps/v8/test/message/fail/class-field-static-prototype.js
index 656518879a..299d2aa041 100644
--- a/deps/v8/test/message/fail/class-field-static-prototype.js
+++ b/deps/v8/test/message/fail/class-field-static-prototype.js
@@ -1,8 +1,6 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-public-fields --harmony-static-fields
class X {
static prototype = function() {};
diff --git a/deps/v8/test/message/fail/class-field-static-prototype.out b/deps/v8/test/message/fail/class-field-static-prototype.out
index 06d6a75842..767a78e5ff 100644
--- a/deps/v8/test/message/fail/class-field-static-prototype.out
+++ b/deps/v8/test/message/fail/class-field-static-prototype.out
@@ -1,4 +1,4 @@
-*%(basename)s:8: SyntaxError: Classes may not have a static property named 'prototype'
+*%(basename)s:6: SyntaxError: Classes may not have a static property named 'prototype'
static prototype = function() {};
^^^^^^^^^
SyntaxError: Classes may not have a static property named 'prototype' \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-computed.js b/deps/v8/test/message/fail/class-fields-computed.js
index d9b41906ab..22a5981978 100644
--- a/deps/v8/test/message/fail/class-fields-computed.js
+++ b/deps/v8/test/message/fail/class-fields-computed.js
@@ -1,8 +1,6 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-public-fields
class X {
[foo()] = 1;
diff --git a/deps/v8/test/message/fail/class-fields-computed.out b/deps/v8/test/message/fail/class-fields-computed.out
index 214b273af5..bc4ccdb7e3 100644
--- a/deps/v8/test/message/fail/class-fields-computed.out
+++ b/deps/v8/test/message/fail/class-fields-computed.out
@@ -1,5 +1,5 @@
-*%(basename)s:8: ReferenceError: foo is not defined
+*%(basename)s:6: ReferenceError: foo is not defined
[foo()] = 1;
^
ReferenceError: foo is not defined
- at *%(basename)s:8:4 \ No newline at end of file
+ at *%(basename)s:6:4 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-class-in-function.js b/deps/v8/test/message/fail/class-fields-private-class-in-function.js
index 215e083962..946cc90a6a 100644
--- a/deps/v8/test/message/fail/class-fields-private-class-in-function.js
+++ b/deps/v8/test/message/fail/class-fields-private-class-in-function.js
@@ -1,8 +1,6 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-private-fields
class Y {
makeClass() {
diff --git a/deps/v8/test/message/fail/class-fields-private-class-in-function.out b/deps/v8/test/message/fail/class-fields-private-class-in-function.out
index 1e564497c5..3d07d644d8 100644
--- a/deps/v8/test/message/fail/class-fields-private-class-in-function.out
+++ b/deps/v8/test/message/fail/class-fields-private-class-in-function.out
@@ -1,4 +1,4 @@
-*%(basename)s:12: SyntaxError: Undefined private field #b: must be declared in an enclosing class
+*%(basename)s:10: SyntaxError: Private field '#b' must be declared in an enclosing class
getB() { return this.#b; }
^
-SyntaxError: Undefined private field #b: must be declared in an enclosing class
+SyntaxError: Private field '#b' must be declared in an enclosing class
diff --git a/deps/v8/test/message/fail/class-fields-private-outside-class.js b/deps/v8/test/message/fail/class-fields-private-outside-class.js
new file mode 100644
index 0000000000..cacb90a33c
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-outside-class.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class Foo { #x = 42; }
+new Foo().#x;
diff --git a/deps/v8/test/message/fail/class-fields-private-outside-class.out b/deps/v8/test/message/fail/class-fields-private-outside-class.out
new file mode 100644
index 0000000000..34919122fc
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-outside-class.out
@@ -0,0 +1,4 @@
+*%(basename)s:6: SyntaxError: Private field '#x' must be declared in an enclosing class
+new Foo().#x;
+ ^
+SyntaxError: Private field '#x' must be declared in an enclosing class \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-source-positions.js b/deps/v8/test/message/fail/class-fields-private-source-positions.js
index b4f8093a1f..d3b9cbdafa 100644
--- a/deps/v8/test/message/fail/class-fields-private-source-positions.js
+++ b/deps/v8/test/message/fail/class-fields-private-source-positions.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-private-fields
-
var o = {};
class C {
#a = 0;
diff --git a/deps/v8/test/message/fail/class-fields-private-source-positions.out b/deps/v8/test/message/fail/class-fields-private-source-positions.out
index cc70fde7b6..7c2f99964b 100644
--- a/deps/v8/test/message/fail/class-fields-private-source-positions.out
+++ b/deps/v8/test/message/fail/class-fields-private-source-positions.out
@@ -1,5 +1,5 @@
-*%(basename)s:11: TypeError: Read of private field #a from an object which did not contain the field
+*%(basename)s:9: TypeError: Read of private field #a from an object which did not contain the field
[o.#a](){}
^
TypeError: Read of private field #a from an object which did not contain the field
- at *%(basename)s:11:8
+ at *%(basename)s:9:8
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-early-2.js b/deps/v8/test/message/fail/class-fields-private-throw-early-2.js
index 2831d4d91a..9ec86caafe 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-early-2.js
+++ b/deps/v8/test/message/fail/class-fields-private-throw-early-2.js
@@ -1,8 +1,6 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-private-fields
class X {
#x;
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-early-2.out b/deps/v8/test/message/fail/class-fields-private-throw-early-2.out
index fdcdfbd414..1cf7bb41ae 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-early-2.out
+++ b/deps/v8/test/message/fail/class-fields-private-throw-early-2.out
@@ -1,6 +1,6 @@
-*%(basename)s:10: TypeError: Write of private field #x to an object which did not contain the field
+*%(basename)s:8: TypeError: Write of private field #x to an object which did not contain the field
({}).#x = 1;
^
TypeError: Write of private field #x to an object which did not contain the field
- at new X (*%(basename)s:10:13)
- at *%(basename)s:14:1 \ No newline at end of file
+ at new X (*%(basename)s:8:13)
+ at *%(basename)s:12:1 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-early.js b/deps/v8/test/message/fail/class-fields-private-throw-early.js
index b224a8d04e..e0b2501ad0 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-early.js
+++ b/deps/v8/test/message/fail/class-fields-private-throw-early.js
@@ -1,8 +1,6 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-private-fields
class X {
constructor() {
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-early.out b/deps/v8/test/message/fail/class-fields-private-throw-early.out
index 86395a2ee5..7fa9da0951 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-early.out
+++ b/deps/v8/test/message/fail/class-fields-private-throw-early.out
@@ -1,4 +1,4 @@
-*%(basename)s:9: SyntaxError: Undefined private field #x: must be declared in an enclosing class
+*%(basename)s:7: SyntaxError: Private field '#x' must be declared in an enclosing class
this.#x = 1;
^
-SyntaxError: Undefined private field #x: must be declared in an enclosing class \ No newline at end of file
+SyntaxError: Private field '#x' must be declared in an enclosing class \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-in-module.js b/deps/v8/test/message/fail/class-fields-private-throw-in-module.js
index 4b90436f46..5e75c72086 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-in-module.js
+++ b/deps/v8/test/message/fail/class-fields-private-throw-in-module.js
@@ -3,7 +3,6 @@
// found in the LICENSE file.
//
// MODULE
-// Flags: --harmony-private-fields
class X {
constructor() {
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-in-module.out b/deps/v8/test/message/fail/class-fields-private-throw-in-module.out
index e63207a815..2c0df1acef 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-in-module.out
+++ b/deps/v8/test/message/fail/class-fields-private-throw-in-module.out
@@ -1,4 +1,4 @@
-*%(basename)s:10: SyntaxError: Undefined private field #x: must be declared in an enclosing class
+*%(basename)s:9: SyntaxError: Private field '#x' must be declared in an enclosing class
this.#x = 1;
^
-SyntaxError: Undefined private field #x: must be declared in an enclosing class \ No newline at end of file
+SyntaxError: Private field '#x' must be declared in an enclosing class \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-read.js b/deps/v8/test/message/fail/class-fields-private-throw-read.js
index 3be60efdfc..b04353395d 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-read.js
+++ b/deps/v8/test/message/fail/class-fields-private-throw-read.js
@@ -1,8 +1,6 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-private-fields
class X {
#x;
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-read.out b/deps/v8/test/message/fail/class-fields-private-throw-read.out
index 4b49cfd354..ec8dcf5108 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-read.out
+++ b/deps/v8/test/message/fail/class-fields-private-throw-read.out
@@ -1,6 +1,6 @@
-*%(basename)s:9: TypeError: Read of private field #x from an object which did not contain the field
+*%(basename)s:7: TypeError: Read of private field #x from an object which did not contain the field
eq(o) { return this.#x === o.#x; }
^
TypeError: Read of private field #x from an object which did not contain the field
- at X.eq (*%(basename)s:9:32)
- at *%(basename)s:12:9 \ No newline at end of file
+ at X.eq (*%(basename)s:7:32)
+ at *%(basename)s:10:9 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-write.js b/deps/v8/test/message/fail/class-fields-private-throw-write.js
index 93e9c135b9..1c51b1a6fd 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-write.js
+++ b/deps/v8/test/message/fail/class-fields-private-throw-write.js
@@ -1,8 +1,6 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-private-fields
class X {
#x;
diff --git a/deps/v8/test/message/fail/class-fields-private-throw-write.out b/deps/v8/test/message/fail/class-fields-private-throw-write.out
index e0a11d90bc..8d9047cc62 100644
--- a/deps/v8/test/message/fail/class-fields-private-throw-write.out
+++ b/deps/v8/test/message/fail/class-fields-private-throw-write.out
@@ -1,6 +1,6 @@
-*%(basename)s:9: TypeError: Write of private field #x to an object which did not contain the field
+*%(basename)s:7: TypeError: Write of private field #x to an object which did not contain the field
setX(o, val) { o.#x = val; }
^
TypeError: Write of private field #x to an object which did not contain the field
- at X.setX (*%(basename)s:9:23)
- at *%(basename)s:12:9 \ No newline at end of file
+ at X.setX (*%(basename)s:7:23)
+ at *%(basename)s:10:9 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-undeclared-lazy-class.js b/deps/v8/test/message/fail/class-fields-private-undeclared-lazy-class.js
new file mode 100644
index 0000000000..e6bccbfe2e
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-undeclared-lazy-class.js
@@ -0,0 +1,13 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function x() {
+ class Foo {
+ constructor () {
+ class Bar {
+ x = this.#foo;
+ }
+ }
+ }
+}
diff --git a/deps/v8/test/message/fail/class-fields-private-undeclared-lazy-class.out b/deps/v8/test/message/fail/class-fields-private-undeclared-lazy-class.out
new file mode 100644
index 0000000000..556e432ab1
--- /dev/null
+++ b/deps/v8/test/message/fail/class-fields-private-undeclared-lazy-class.out
@@ -0,0 +1,4 @@
+*%(basename)s:9: SyntaxError: Private field '#foo' must be declared in an enclosing class
+ x = this.#foo;
+ ^
+SyntaxError: Private field '#foo' must be declared in an enclosing class \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-private-undefined-inner-class.js b/deps/v8/test/message/fail/class-fields-private-undefined-inner-class.js
index 1e115b61e7..c2c9f4da48 100644
--- a/deps/v8/test/message/fail/class-fields-private-undefined-inner-class.js
+++ b/deps/v8/test/message/fail/class-fields-private-undefined-inner-class.js
@@ -1,8 +1,6 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-private-fields
class A {
fn() {
diff --git a/deps/v8/test/message/fail/class-fields-private-undefined-inner-class.out b/deps/v8/test/message/fail/class-fields-private-undefined-inner-class.out
index a2fb293968..ec3c8c308e 100644
--- a/deps/v8/test/message/fail/class-fields-private-undefined-inner-class.out
+++ b/deps/v8/test/message/fail/class-fields-private-undefined-inner-class.out
@@ -1,4 +1,4 @@
-*%(basename)s:10: SyntaxError: Undefined private field #b: must be declared in an enclosing class
+*%(basename)s:8: SyntaxError: Private field '#b' must be declared in an enclosing class
getA() { return this.#b; }
^
-SyntaxError: Undefined private field #b: must be declared in an enclosing class
+SyntaxError: Private field '#b' must be declared in an enclosing class
diff --git a/deps/v8/test/message/fail/class-fields-static-throw.js b/deps/v8/test/message/fail/class-fields-static-throw.js
index e7c9fec1ba..5de3fa744b 100644
--- a/deps/v8/test/message/fail/class-fields-static-throw.js
+++ b/deps/v8/test/message/fail/class-fields-static-throw.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-public-fields --harmony-static-fields
-//
// TODO(gsathya): Remove 'Function' from stack trace.
class X {
diff --git a/deps/v8/test/message/fail/class-fields-static-throw.out b/deps/v8/test/message/fail/class-fields-static-throw.out
index a16b050bbd..456d1f38c0 100644
--- a/deps/v8/test/message/fail/class-fields-static-throw.out
+++ b/deps/v8/test/message/fail/class-fields-static-throw.out
@@ -1,6 +1,6 @@
-*%(basename)s:10: ReferenceError: foo is not defined
+*%(basename)s:8: ReferenceError: foo is not defined
static x = foo();
^
ReferenceError: foo is not defined
- at Function.<static_fields_initializer> (*%(basename)s:10:14)
+ at Function.<static_fields_initializer> (*%(basename)s:8:14)
at *%(basename)s:1:1 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/class-fields-throw.js b/deps/v8/test/message/fail/class-fields-throw.js
index 235a964ae8..69194e677d 100644
--- a/deps/v8/test/message/fail/class-fields-throw.js
+++ b/deps/v8/test/message/fail/class-fields-throw.js
@@ -1,8 +1,6 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-public-fields
class X {
x = foo();
diff --git a/deps/v8/test/message/fail/class-fields-throw.out b/deps/v8/test/message/fail/class-fields-throw.out
index 3c347a9529..b7bce52a7f 100644
--- a/deps/v8/test/message/fail/class-fields-throw.out
+++ b/deps/v8/test/message/fail/class-fields-throw.out
@@ -1,7 +1,7 @@
-*%(basename)s:8: ReferenceError: foo is not defined
+*%(basename)s:6: ReferenceError: foo is not defined
x = foo();
^
ReferenceError: foo is not defined
- at X.<instance_members_initializer> (*%(basename)s:8:7)
- at new X (*%(basename)s:7:1)
- at *%(basename)s:11:1 \ No newline at end of file
+ at X.<instance_members_initializer> (*%(basename)s:6:7)
+ at new X (*%(basename)s:5:1)
+ at *%(basename)s:9:1 \ No newline at end of file
diff --git a/deps/v8/test/message/fail/destructuring-object-private-name.js b/deps/v8/test/message/fail/destructuring-object-private-name.js
index 3e30bd2321..aaf3244cb9 100644
--- a/deps/v8/test/message/fail/destructuring-object-private-name.js
+++ b/deps/v8/test/message/fail/destructuring-object-private-name.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-class-fields
-
class Foo {
#x = 1;
destructureX() {
diff --git a/deps/v8/test/message/fail/destructuring-object-private-name.out b/deps/v8/test/message/fail/destructuring-object-private-name.out
index 83b6b8eb80..6a3ecd37c3 100644
--- a/deps/v8/test/message/fail/destructuring-object-private-name.out
+++ b/deps/v8/test/message/fail/destructuring-object-private-name.out
@@ -1,4 +1,4 @@
-*%(basename)s:10: SyntaxError: Unexpected identifier
+*%(basename)s:8: SyntaxError: Unexpected identifier
const { #x: x } = this;
^^
SyntaxError: Unexpected identifier
diff --git a/deps/v8/test/message/fail/json-stringify-circular-ellipsis.out b/deps/v8/test/message/fail/json-stringify-circular-ellipsis.out
index c288ee95ea..88414552f0 100644
--- a/deps/v8/test/message/fail/json-stringify-circular-ellipsis.out
+++ b/deps/v8/test/message/fail/json-stringify-circular-ellipsis.out
@@ -16,3 +16,4 @@ TypeError: Converting circular structure to JSON
--- property 'y' closes the circle
at JSON.stringify (<anonymous>)
at *%(basename)s:27:6
+
diff --git a/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.out b/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.out
index bf81266dd5..388a7f2644 100644
--- a/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.out
+++ b/deps/v8/test/message/fail/json-stringify-circular-max-display-depth.out
@@ -14,3 +14,4 @@ TypeError: Converting circular structure to JSON
--- index 1 closes the circle
at JSON.stringify (<anonymous>)
at *%(basename)s:21:6
+
diff --git a/deps/v8/test/message/fail/json-stringify-circular-proxy.out b/deps/v8/test/message/fail/json-stringify-circular-proxy.out
index 6004cfb42d..b7abf8867a 100644
--- a/deps/v8/test/message/fail/json-stringify-circular-proxy.out
+++ b/deps/v8/test/message/fail/json-stringify-circular-proxy.out
@@ -16,3 +16,4 @@ TypeError: Converting circular structure to JSON
--- property 'y' closes the circle
at JSON.stringify (<anonymous>)
at *%(basename)s:28:6
+
diff --git a/deps/v8/test/message/fail/json-stringify-circular-substructure.out b/deps/v8/test/message/fail/json-stringify-circular-substructure.out
index 7633ea24e8..2477be764f 100644
--- a/deps/v8/test/message/fail/json-stringify-circular-substructure.out
+++ b/deps/v8/test/message/fail/json-stringify-circular-substructure.out
@@ -8,3 +8,4 @@ TypeError: Converting circular structure to JSON
--- property 'key' closes the circle
at JSON.stringify (<anonymous>)
at *%(basename)s:9:6
+
diff --git a/deps/v8/test/message/fail/json-stringify-circular.out b/deps/v8/test/message/fail/json-stringify-circular.out
index bfea54ef00..a701672c84 100644
--- a/deps/v8/test/message/fail/json-stringify-circular.out
+++ b/deps/v8/test/message/fail/json-stringify-circular.out
@@ -8,3 +8,4 @@ TypeError: Converting circular structure to JSON
--- property 'key' closes the circle
at JSON.stringify (<anonymous>)
at *%(basename)s:8:6
+
diff --git a/deps/v8/test/message/fail/wasm-async-compile-fail.js b/deps/v8/test/message/fail/wasm-async-compile-fail.js
new file mode 100644
index 0000000000..250a389f9e
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-async-compile-fail.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+builder.addFunction('f', kSig_i_v).addBody([]);
+let rethrow = e => setTimeout(_ => {throw e}, 0);
+WebAssembly.compile(builder.toBuffer()).catch(rethrow);
diff --git a/deps/v8/test/message/fail/wasm-async-compile-fail.out b/deps/v8/test/message/fail/wasm-async-compile-fail.out
new file mode 100644
index 0000000000..0ecf9b1968
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-async-compile-fail.out
@@ -0,0 +1,5 @@
+*%(basename)s:9: CompileError: WebAssembly.compile(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+let rethrow = e => setTimeout(_ => {throw e}, 0);
+ ^
+CompileError: WebAssembly.compile(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+
diff --git a/deps/v8/test/message/fail/wasm-async-instantiate-fail.js b/deps/v8/test/message/fail/wasm-async-instantiate-fail.js
new file mode 100644
index 0000000000..624a17b5dd
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-async-instantiate-fail.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+builder.addFunction('f', kSig_i_v).addBody([]);
+let rethrow = e => setTimeout(_ => {throw e}, 0);
+WebAssembly.instantiate(builder.toBuffer()).catch(rethrow);
diff --git a/deps/v8/test/message/fail/wasm-async-instantiate-fail.out b/deps/v8/test/message/fail/wasm-async-instantiate-fail.out
new file mode 100644
index 0000000000..ebe2760dd2
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-async-instantiate-fail.out
@@ -0,0 +1,5 @@
+*%(basename)s:9: CompileError: WebAssembly.instantiate(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+let rethrow = e => setTimeout(_ => {throw e}, 0);
+ ^
+CompileError: WebAssembly.instantiate(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+
diff --git a/deps/v8/test/message/fail/wasm-exception-rethrow.js b/deps/v8/test/message/fail/wasm-exception-rethrow.js
new file mode 100644
index 0000000000..9731914ede
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-exception-rethrow.js
@@ -0,0 +1,21 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-eh
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+let except = builder.addException(kSig_v_i);
+builder.addFunction("rethrow0", kSig_v_v)
+ .addBody([
+ kExprTry, kWasmStmt,
+ kExprI32Const, 23,
+ kExprThrow, except,
+ kExprCatch,
+ kExprRethrow,
+ kExprEnd,
+]).exportFunc();
+let instance = builder.instantiate();
+instance.exports.rethrow0();
diff --git a/deps/v8/test/message/fail/wasm-exception-rethrow.out b/deps/v8/test/message/fail/wasm-exception-rethrow.out
new file mode 100644
index 0000000000..98fdbc9376
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-exception-rethrow.out
@@ -0,0 +1,4 @@
+wasm-function[0]:5: RuntimeError: wasm exception
+RuntimeError: wasm exception
+ at rethrow0 (wasm-function[0]:5)
+ at *%(basename)s:21:18
diff --git a/deps/v8/test/message/fail/wasm-exception-throw.js b/deps/v8/test/message/fail/wasm-exception-throw.js
new file mode 100644
index 0000000000..620d693e02
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-exception-throw.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-eh
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+let except = builder.addException(kSig_v_i);
+builder.addFunction("throw0", kSig_v_v)
+ .addBody([
+ kExprI32Const, 23,
+ kExprThrow, except,
+]).exportFunc();
+let instance = builder.instantiate();
+instance.exports.throw0();
diff --git a/deps/v8/test/message/fail/wasm-exception-throw.out b/deps/v8/test/message/fail/wasm-exception-throw.out
new file mode 100644
index 0000000000..65083190c6
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-exception-throw.out
@@ -0,0 +1,4 @@
+wasm-function[0]:3: RuntimeError: wasm exception
+RuntimeError: wasm exception
+ at throw0 (wasm-function[0]:3)
+ at *%(basename)s:17:18
diff --git a/deps/v8/test/message/fail/wasm-streaming-compile-fail.js b/deps/v8/test/message/fail/wasm-streaming-compile-fail.js
new file mode 100644
index 0000000000..e7c223b384
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-streaming-compile-fail.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-test-streaming
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+builder.addFunction('f', kSig_i_v).addBody([]);
+let rethrow = e => setTimeout(_ => {throw e}, 0);
+WebAssembly.compileStreaming(builder.toBuffer()).catch(rethrow);
diff --git a/deps/v8/test/message/fail/wasm-streaming-compile-fail.out b/deps/v8/test/message/fail/wasm-streaming-compile-fail.out
new file mode 100644
index 0000000000..d11d5ebf84
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-streaming-compile-fail.out
@@ -0,0 +1,5 @@
+*%(basename)s:11: CompileError: WebAssembly.compileStreaming(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+let rethrow = e => setTimeout(_ => {throw e}, 0);
+ ^
+CompileError: WebAssembly.compileStreaming(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+
diff --git a/deps/v8/test/message/fail/wasm-streaming-instantiate-fail.js b/deps/v8/test/message/fail/wasm-streaming-instantiate-fail.js
new file mode 100644
index 0000000000..39d9158e1a
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-streaming-instantiate-fail.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-test-streaming
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+builder.addFunction('f', kSig_i_v).addBody([]);
+let rethrow = e => setTimeout(_ => {throw e}, 0);
+WebAssembly.instantiateStreaming(builder.toBuffer()).catch(rethrow);
diff --git a/deps/v8/test/message/fail/wasm-streaming-instantiate-fail.out b/deps/v8/test/message/fail/wasm-streaming-instantiate-fail.out
new file mode 100644
index 0000000000..d8cd334de0
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-streaming-instantiate-fail.out
@@ -0,0 +1,5 @@
+*%(basename)s:11: CompileError: WebAssembly.instantiateStreaming(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+let rethrow = e => setTimeout(_ => {throw e}, 0);
+ ^
+CompileError: WebAssembly.instantiateStreaming(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+
diff --git a/deps/v8/test/message/fail/wasm-sync-compile-fail.js b/deps/v8/test/message/fail/wasm-sync-compile-fail.js
new file mode 100644
index 0000000000..98e09efbc1
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-sync-compile-fail.js
@@ -0,0 +1,9 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+builder.addFunction('f', kSig_i_v).addBody([]);
+new WebAssembly.Module(builder.toBuffer());
diff --git a/deps/v8/test/message/fail/wasm-sync-compile-fail.out b/deps/v8/test/message/fail/wasm-sync-compile-fail.out
new file mode 100644
index 0000000000..849fc202ee
--- /dev/null
+++ b/deps/v8/test/message/fail/wasm-sync-compile-fail.out
@@ -0,0 +1,6 @@
+*%(basename)s:9: CompileError: WebAssembly.Module(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+new WebAssembly.Module(builder.toBuffer());
+^
+CompileError: WebAssembly.Module(): Compiling function #0:"f" failed: expected 1 elements on the stack for fallthru to @1, found 0 @+24
+ at *%(basename)s:9:1
+
diff --git a/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.js b/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.js
index ebfa83d042..6ab9d6c7b2 100644
--- a/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.js
+++ b/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --experimental-stack-trace-frames
+
// Test wasm compilation explicitly, since this creates a promise which is only
// resolved later, i.e. the message queue gets empty in-between.
// The important part here is that d8 exits with a non-zero exit code.
diff --git a/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.out b/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.out
index ecee922cbc..f42185f51c 100644
--- a/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.out
+++ b/deps/v8/test/message/mjsunit/fail/assert-promise-result-wasm-compile-fail.out
@@ -6,4 +6,6 @@ Error
at *%(basename)s:{NUMBER}:1
CompileError: WebAssembly.compile(): BufferSource argument is empty
+ at WebAssembly.compile (<anonymous>)
at test (*%(basename)s:{NUMBER}:23)
+
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index 66ebe49369..7a2a222841 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -105,6 +105,8 @@ function get_standard_literal() {
return literal;
}
+%PrepareFunctionForOptimization(get_standard_literal);
+
// Case: [1,2,3] as allocation site
obj = fastliteralcase(get_standard_literal(), 1);
assertKind(elements_kind.fast_smi_only, obj);
@@ -321,6 +323,8 @@ function instanceof_check2(type) {
assertTrue(new type(1,2,3) instanceof type);
}
+%PrepareFunctionForOptimization(instanceof_check);
+
var realmBArray = Realm.eval(realmB, "Array");
// Two calls with Array because ES6 instanceof desugars into a load of Array,
// and load has a premonomorphic state.
@@ -354,10 +358,12 @@ assertOptimized(instanceof_check);
// Try to optimize again, but first clear all type feedback, and allow it
// to be monomorphic on first call. Only after optimizing do we introduce
// realmBArray. This should deopt the method.
+ %PrepareFunctionForOptimization(instanceof_check);
%DeoptimizeFunction(instanceof_check);
%ClearFunctionFeedback(instanceof_check);
instanceof_check(Array);
instanceof_check(Array);
+ %PrepareFunctionForOptimization(instanceof_check);
%OptimizeFunctionOnNextCall(instanceof_check);
instanceof_check(Array);
assertOptimized(instanceof_check);
diff --git a/deps/v8/test/mjsunit/arguments-deopt.js b/deps/v8/test/mjsunit/arguments-deopt.js
index 20e5f69152..192739fdba 100644
--- a/deps/v8/test/mjsunit/arguments-deopt.js
+++ b/deps/v8/test/mjsunit/arguments-deopt.js
@@ -16,11 +16,13 @@
return f.apply(null, arguments);
}
+ %PrepareFunctionForOptimization(test1);
assertEquals(test1(1, 2), 3);
assertEquals(test1(1, 2, 3), 3);
%OptimizeFunctionOnNextCall(test1);
assertEquals(test1(1, 2), 3);
+ %PrepareFunctionForOptimization(test1);
%OptimizeFunctionOnNextCall(test1);
assertEquals(test1(1, 2, 3), 3);
})();
@@ -35,11 +37,13 @@
return f.apply(null, arguments);
}
+ %PrepareFunctionForOptimization(test2);
assertEquals(test2(1, 2), 3);
assertEquals(test2(1, 2, 3), 3);
%OptimizeFunctionOnNextCall(test2);
assertEquals(test2(1, 2), 3);
+ %PrepareFunctionForOptimization(test2);
%OptimizeFunctionOnNextCall(test2);
assertEquals(test2(1, 2, 3), 3);
})();
@@ -57,11 +61,13 @@
return f.apply(null, arguments);
}
+ %PrepareFunctionForOptimization(test3);
assertEquals(test3(1, 2), 3);
assertEquals(test3(1, 2, 3), 3);
%OptimizeFunctionOnNextCall(test3);
assertEquals(test3(11, 12), 23);
+ %PrepareFunctionForOptimization(test3);
%OptimizeFunctionOnNextCall(test3);
assertEquals(test3(11, 12, 13), 23);
})();
@@ -80,11 +86,13 @@
test4(1, 2);
test4(3, 4, 5);
+ %PrepareFunctionForOptimization(test4);
assertEquals(test4(1, 2), 6);
assertEquals(test4(1, 2, 3), 6);
%OptimizeFunctionOnNextCall(test4);
assertEquals(test4(1, 2), 6);
+ %PrepareFunctionForOptimization(test4);
%OptimizeFunctionOnNextCall(test4);
assertEquals(test4(1, 2, 3), 6);
})();
@@ -102,6 +110,7 @@
return sum;
};
+ %PrepareFunctionForOptimization(sum1);
var args = []
for (var i = 1; i < 30; ++i) {
args.push(i);
@@ -121,6 +130,7 @@
return sum;
};
+ %PrepareFunctionForOptimization(sum2);
var args = []
for (var i = 1; i < 30; ++i) {
args.push(i);
@@ -140,6 +150,7 @@
return sum;
};
+ %PrepareFunctionForOptimization(sum3);
var args = []
for (var i = 1; i < 30; ++i) {
args.push(i);
@@ -159,6 +170,7 @@
return sum;
};
+ %PrepareFunctionForOptimization(sum4);
var args = []
for (var i = 1; i < 30; ++i) {
args.push(i);
@@ -174,6 +186,7 @@
return arguments[arguments.length-1];
};
+ %PrepareFunctionForOptimization(read);
var args = []
for (var i = 1; i < 30; ++i) {
args.push(i);
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index aa6fbbed54..f26781ee95 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -67,6 +67,7 @@ function assertKind(expected, obj, name_opt) {
function bar0(t) {
return new t();
}
+ %PrepareFunctionForOptimization(bar0);
a = bar0(Array);
a[0] = 3.5;
b = bar0(Array);
@@ -77,7 +78,8 @@ function assertKind(expected, obj, name_opt) {
assertOptimized(bar0);
// bar0 should deopt
b = bar0(Object);
- assertUnoptimized(bar0)
+ assertUnoptimized(bar0);
+ %PrepareFunctionForOptimization(bar0);
// When it's re-optimized, we should call through the full stub
bar0(Array);
%OptimizeFunctionOnNextCall(bar0);
@@ -99,6 +101,7 @@ function assertKind(expected, obj, name_opt) {
function bar() {
return new Array();
}
+ %PrepareFunctionForOptimization(bar);
a = bar();
bar();
%OptimizeFunctionOnNextCall(bar);
@@ -115,6 +118,7 @@ function assertKind(expected, obj, name_opt) {
// map for Array in that context will be used.
(function() {
function bar() { return new Array(); }
+ %PrepareFunctionForOptimization(bar);
bar();
bar();
%OptimizeFunctionOnNextCall(bar);
@@ -134,6 +138,7 @@ function assertKind(expected, obj, name_opt) {
// should deal with arguments that create holey arrays.
(function() {
function bar(len) { return new Array(len); }
+ %PrepareFunctionForOptimization(bar);
bar(0);
bar(0);
%OptimizeFunctionOnNextCall(bar);
@@ -153,6 +158,7 @@ function assertKind(expected, obj, name_opt) {
// Test: Make sure that crankshaft continues with feedback for large arrays.
(function() {
function bar(len) { return new Array(len); }
+ %PrepareFunctionForOptimization(bar);
var size = 100001;
// Perform a gc, because we are allocating a very large array and if a gc
// happens during the allocation we could lose our memento.
diff --git a/deps/v8/test/mjsunit/array-literal-feedback.js b/deps/v8/test/mjsunit/array-literal-feedback.js
index 6ad9cd08dd..a54f37a39b 100644
--- a/deps/v8/test/mjsunit/array-literal-feedback.js
+++ b/deps/v8/test/mjsunit/array-literal-feedback.js
@@ -62,8 +62,9 @@ function assertKind(expected, obj, name_opt) {
function get_literal(x) {
var literal = [1, 2, x];
return literal;
-}
+};
+%PrepareFunctionForOptimization(get_literal);
get_literal(3);
// It's important to store a from before we crankshaft get_literal, because
// mementos won't be created from crankshafted code at all.
@@ -84,6 +85,7 @@ assertEquals([1, 2, 3], b);
assertUnoptimized(get_literal);
// Optimize again
+%PrepareFunctionForOptimization(get_literal);
get_literal(3);
%OptimizeFunctionOnNextCall(get_literal);
b = get_literal(3);
@@ -109,7 +111,8 @@ assertOptimized(get_literal);
(function changeOptimizedEmptyArrayKind() {
function f() {
return new Array();
- }
+ };
+ %PrepareFunctionForOptimization(f);
var a = f();
assertKind('packed smi elements', a);
a = f();
@@ -125,7 +128,8 @@ assertOptimized(get_literal);
(function changeOptimizedArrayLiteralKind() {
function f() {
return [1, 2];
- }
+ };
+ %PrepareFunctionForOptimization(f);
var a = f();
assertKind('packed smi elements', a);
@@ -160,7 +164,8 @@ assertOptimized(get_literal);
(function changeOptimizedEmptyArrayLiteralKind() {
function f() {
return [];
- }
+ };
+ %PrepareFunctionForOptimization(f);
var a = f();
assertKind('packed smi elements', a);
assertFalse(isHoley(a));
@@ -190,7 +195,8 @@ assertOptimized(get_literal);
var literal = [];
%HeapObjectVerify(literal);
return literal;
- }
+ };
+ %PrepareFunctionForOptimization(f);
var a = f();
assertKind('packed smi elements', a);
assertFalse(isHoley(a));
diff --git a/deps/v8/test/mjsunit/array-literal-transitions.js b/deps/v8/test/mjsunit/array-literal-transitions.js
index 6366839a62..84fbc27c54 100644
--- a/deps/v8/test/mjsunit/array-literal-transitions.js
+++ b/deps/v8/test/mjsunit/array-literal-transitions.js
@@ -79,7 +79,9 @@ function array_literal_test() {
assertEquals(2, f0[1]);
assertEquals(1, f0[0]);
}
+%PrepareFunctionForOptimization(array_literal_test);
+%PrepareFunctionForOptimization(array_literal_test);
for (var i = 0; i < 3; i++) {
array_literal_test();
}
@@ -108,7 +110,9 @@ function test_large_literal() {
[0, 1, 2, 3, 4, 5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5,
new Object(), new Object(), new Object(), new Object()]);
}
+%PrepareFunctionForOptimization(test_large_literal);
+%PrepareFunctionForOptimization(test_large_literal);
for (var i = 0; i < 3; i++) {
test_large_literal();
}
@@ -122,7 +126,9 @@ function deopt_array(use_literal) {
return new Array();
}
}
+%PrepareFunctionForOptimization(deopt_array);
+%PrepareFunctionForOptimization(deopt_array);
deopt_array(false);
deopt_array(false);
deopt_array(false);
@@ -139,7 +145,9 @@ assertOptimized(deopt_array);
function deopt_array_literal_all_smis(a) {
return [0, 1, a];
}
+%PrepareFunctionForOptimization(deopt_array_literal_all_smis);
+%PrepareFunctionForOptimization(deopt_array_literal_all_smis);
deopt_array_literal_all_smis(2);
deopt_array_literal_all_smis(3);
deopt_array_literal_all_smis(4);
@@ -164,7 +172,9 @@ assertEquals(.5, array[2]);
function deopt_array_literal_all_doubles(a) {
return [0.5, 1, a];
}
+%PrepareFunctionForOptimization(deopt_array_literal_all_doubles);
+%PrepareFunctionForOptimization(deopt_array_literal_all_doubles);
deopt_array_literal_all_doubles(.5);
deopt_array_literal_all_doubles(.5);
deopt_array_literal_all_doubles(.5);
diff --git a/deps/v8/test/mjsunit/array-methods-read-only-length.js b/deps/v8/test/mjsunit/array-methods-read-only-length.js
index 7de580d074..a6b3091dd1 100644
--- a/deps/v8/test/mjsunit/array-methods-read-only-length.js
+++ b/deps/v8/test/mjsunit/array-methods-read-only-length.js
@@ -20,6 +20,7 @@ function testAdd(mode) {
if (mode == "fast properties") %ToFastProperties(a);
+ %PrepareFunctionForOptimization(push);
check(push);
check(push);
check(push);
@@ -30,6 +31,7 @@ function testAdd(mode) {
a.unshift(3);
}
+ %PrepareFunctionForOptimization(unshift);
check(unshift);
check(unshift);
check(unshift);
@@ -40,6 +42,7 @@ function testAdd(mode) {
a.splice(0, 0, 3);
}
+ %PrepareFunctionForOptimization(splice);
check(splice);
check(splice);
check(splice);
@@ -65,6 +68,7 @@ function testRemove(a, mode) {
a.pop();
}
+ %PrepareFunctionForOptimization(pop);
check(pop);
check(pop);
check(pop);
@@ -75,6 +79,7 @@ function testRemove(a, mode) {
a.shift();
}
+ %PrepareFunctionForOptimization(shift);
check(shift);
check(shift);
check(shift);
@@ -85,6 +90,7 @@ function testRemove(a, mode) {
a.splice(0, 1);
}
+ %PrepareFunctionForOptimization(splice);
check(splice);
check(splice);
check(splice);
diff --git a/deps/v8/test/mjsunit/array-natives-elements.js b/deps/v8/test/mjsunit/array-natives-elements.js
index aa3bea49d0..0b389918b9 100644
--- a/deps/v8/test/mjsunit/array-natives-elements.js
+++ b/deps/v8/test/mjsunit/array-natives-elements.js
@@ -305,6 +305,7 @@ function array_natives_test() {
assertEquals([1.1,{},2,3], a4);
}
+%PrepareFunctionForOptimization(array_natives_test);
for (var i = 0; i < 3; i++) {
array_natives_test();
}
diff --git a/deps/v8/test/mjsunit/array-push12.js b/deps/v8/test/mjsunit/array-push12.js
index f4c15b484b..93d59fd335 100644
--- a/deps/v8/test/mjsunit/array-push12.js
+++ b/deps/v8/test/mjsunit/array-push12.js
@@ -17,6 +17,7 @@ function f() {
g();
}
+%PrepareFunctionForOptimization(f);
g();
g();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/array-push3.js b/deps/v8/test/mjsunit/array-push3.js
index 99bd857a73..e1f438b356 100644
--- a/deps/v8/test/mjsunit/array-push3.js
+++ b/deps/v8/test/mjsunit/array-push3.js
@@ -8,8 +8,9 @@ var array = [];
function push(array, value) {
array.push(value);
-}
+};
+%PrepareFunctionForOptimization(push);
push(array, 0);
push(array, 1);
push(array, 2);
diff --git a/deps/v8/test/mjsunit/array-push9.js b/deps/v8/test/mjsunit/array-push9.js
index f1427f2c9f..0bdcda1d8d 100644
--- a/deps/v8/test/mjsunit/array-push9.js
+++ b/deps/v8/test/mjsunit/array-push9.js
@@ -10,6 +10,7 @@ function push(array, value) {
array.push(value);
}
+%PrepareFunctionForOptimization(push);
push(array, 0);
push(array, 1);
push(array, 2);
diff --git a/deps/v8/test/mjsunit/array-shift2.js b/deps/v8/test/mjsunit/array-shift2.js
index 75233ffec2..46653ac1ef 100644
--- a/deps/v8/test/mjsunit/array-shift2.js
+++ b/deps/v8/test/mjsunit/array-shift2.js
@@ -13,6 +13,7 @@ function test(array) {
return array;
}
+%PrepareFunctionForOptimization(test);
var result = test(["0",,2]);
assertEquals(["element 1","element 1"], result);
assertTrue(result.hasOwnProperty("0"));
diff --git a/deps/v8/test/mjsunit/array-shift4.js b/deps/v8/test/mjsunit/array-shift4.js
index 5d28fd306a..40378ae554 100644
--- a/deps/v8/test/mjsunit/array-shift4.js
+++ b/deps/v8/test/mjsunit/array-shift4.js
@@ -17,6 +17,7 @@ function makeArray() {
return a;
}
+%PrepareFunctionForOptimization(doShift);
doShift(makeArray());
doShift(makeArray());
%OptimizeFunctionOnNextCall(doShift);
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index ca0daadf04..6db875947a 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -127,9 +127,8 @@ function TestSparseNonArraySorting(length) {
assertFalse(4 in obj, "objsort non-existing retained");
}
+TestSparseNonArraySorting(1000);
TestSparseNonArraySorting(5000);
-TestSparseNonArraySorting(500000);
-TestSparseNonArraySorting(Math.pow(2, 31) + 1);
function TestArrayLongerLength(length) {
@@ -147,8 +146,7 @@ function TestArrayLongerLength(length) {
TestArrayLongerLength(4);
TestArrayLongerLength(10);
TestArrayLongerLength(1000);
-TestArrayLongerLength(500000);
-TestArrayLongerLength(Math.pow(2,32) - 1);
+TestArrayLongerLength(5000);
function TestNonArrayLongerLength(length) {
@@ -166,8 +164,7 @@ function TestNonArrayLongerLength(length) {
TestNonArrayLongerLength(4);
TestNonArrayLongerLength(10);
TestNonArrayLongerLength(1000);
-TestNonArrayLongerLength(500000);
-TestNonArrayLongerLength(Math.pow(2,32) - 1);
+TestNonArrayLongerLength(5000);
function TestNonArrayWithAccessors() {
diff --git a/deps/v8/test/mjsunit/array-store-and-grow.js b/deps/v8/test/mjsunit/array-store-and-grow.js
index ee831ad061..d717c6dfa6 100644
--- a/deps/v8/test/mjsunit/array-store-and-grow.js
+++ b/deps/v8/test/mjsunit/array-store-and-grow.js
@@ -197,6 +197,7 @@ assertEquals(0.5, array_store_1([], 0, 0.5));
a[b] = c;
}
+ %PrepareFunctionForOptimization(grow_store);
a = new Array(1);
grow_store(a,1,1);
grow_store(a,2,1);
@@ -216,6 +217,7 @@ assertEquals(0.5, array_store_1([], 0, 0.5));
function f(o, k, v) {
o[k] = v;
}
+ %PrepareFunctionForOptimization(f);
a = [3.5];
f(a, 1, "hi"); // DOUBLE packed array -> tagged packed grow
@@ -238,6 +240,7 @@ assertEquals(0.5, array_store_1([], 0, 0.5));
function f(o, k, v) {
o[k] = v;
}
+ %PrepareFunctionForOptimization(f);
a = [3.5];
f(a, 0, "hi"); // DOUBLE packed array -> tagged packed grow
diff --git a/deps/v8/test/mjsunit/async-stack-traces-promise-all.js b/deps/v8/test/mjsunit/async-stack-traces-promise-all.js
index 7f8457c961..b677635b9d 100644
--- a/deps/v8/test/mjsunit/async-stack-traces-promise-all.js
+++ b/deps/v8/test/mjsunit/async-stack-traces-promise-all.js
@@ -28,6 +28,8 @@
}
assertPromiseResult((async () => {
+ %PrepareFunctionForOptimization(thrower);
+ %PrepareFunctionForOptimization(driver);
await test(driver);
await test(driver);
%OptimizeFunctionOnNextCall(thrower);
diff --git a/deps/v8/test/mjsunit/async-stack-traces.js b/deps/v8/test/mjsunit/async-stack-traces.js
index c945f4e37b..2932cfc1da 100644
--- a/deps/v8/test/mjsunit/async-stack-traces.js
+++ b/deps/v8/test/mjsunit/async-stack-traces.js
@@ -26,6 +26,8 @@
}
assertPromiseResult((async () => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
await test(one);
await test(one);
%OptimizeFunctionOnNextCall(two);
@@ -57,6 +59,8 @@
}
assertPromiseResult((async() => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
await test(one);
await test(one);
%OptimizeFunctionOnNextCall(two);
@@ -92,6 +96,8 @@
}
assertPromiseResult((async() => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
await test(one);
await test(one);
%OptimizeFunctionOnNextCall(two);
@@ -131,6 +137,8 @@
}
assertPromiseResult((async() => {
+ %PrepareFunctionForOptimization(callOne);
+ %PrepareFunctionForOptimization(callTwo);
await test(callOne);
await test(callOne);
%OptimizeFunctionOnNextCall(callTwo);
@@ -164,6 +172,8 @@
}
assertPromiseResult((async() => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
await test(one);
await test(one);
%OptimizeFunctionOnNextCall(two);
@@ -196,6 +206,8 @@
}
assertPromiseResult((async () => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
await test(one);
await test(one);
%OptimizeFunctionOnNextCall(two);
@@ -228,6 +240,8 @@
}
assertPromiseResult((async () => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
await test(one);
await test(one);
%OptimizeFunctionOnNextCall(two);
@@ -260,6 +274,8 @@
}
assertPromiseResult((async () => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
await test(one);
await test(one);
%OptimizeFunctionOnNextCall(two);
@@ -291,6 +307,8 @@
}
assertPromiseResult((async () => {
+ %PrepareFunctionForOptimization(one);
+ %PrepareFunctionForOptimization(two);
await test(one);
await test(one);
%OptimizeFunctionOnNextCall(two);
diff --git a/deps/v8/test/mjsunit/code-coverage-block-opt.js b/deps/v8/test/mjsunit/code-coverage-block-opt.js
index 204c9bdbea..3901fad0ed 100644
--- a/deps/v8/test/mjsunit/code-coverage-block-opt.js
+++ b/deps/v8/test/mjsunit/code-coverage-block-opt.js
@@ -22,9 +22,8 @@ f(); f(); %OptimizeFunctionOnNextCall(f); // 0100
f(); f(); f(); f(); f(); f(); // 0150
`,
[{"start":0,"end":199,"count":1},
- {"start":0,"end":33,"count":4}, // TODO(jgruber): Invocation count is off.
- {"start":25,"end":31,"count":16},
- {"start":50,"end":76,"count":2}] // TODO(jgruber): Invocation count is off.
+ {"start":0,"end":33,"count":16},
+ {"start":50,"end":76,"count":8}]
);
// This test is tricky: it requires a non-toplevel, optimized function.
@@ -44,8 +43,8 @@ TestCoverage("Partial coverage collection",
f(false); // 0350
}(); // 0400
`,
-[{"start":52,"end":153,"count":0},
- {"start":121,"end":137,"count":1}]
+[{"start":52,"end":153,"count":1},
+ {"start":111,"end":121,"count":0}]
);
%DebugToggleBlockCoverage(false);
diff --git a/deps/v8/test/mjsunit/code-coverage-block.js b/deps/v8/test/mjsunit/code-coverage-block.js
index 866c7bac94..c441342cdf 100644
--- a/deps/v8/test/mjsunit/code-coverage-block.js
+++ b/deps/v8/test/mjsunit/code-coverage-block.js
@@ -216,9 +216,8 @@ TestCoverage(
%PerformMicrotaskCheckpoint(); // 0250
`,
[{"start":0,"end":299,"count":1},
- {"start":1,"end":201,"count":6}, // TODO(jgruber): Invocation count is off.
- {"start":83,"end":153,"count":4},
- {"start":153,"end":200,"count":1}]
+ {"start":1,"end":201,"count":1},
+ {"start":83,"end":153,"count":4}]
);
TestCoverage(
@@ -415,7 +414,7 @@ TestCoverage(
{"start":286,"end":350,"count":0},
{"start":401,"end":701,"count":1},
{"start":603,"end":700,"count":0},
- {"start":561,"end":568,"count":0}, // TODO(jgruber): Sorting.
+ {"start":561,"end":568,"count":0},
{"start":751,"end":1051,"count":1},
{"start":819,"end":820,"count":0},
{"start":861,"end":1050,"count":0}]
@@ -540,10 +539,25 @@ const it = function*() { // 0000
it.next(); it.next(); // 0250
`,
[{"start":0,"end":299,"count":1},
- {"start":11,"end":201,"count":3},
- {"start":64,"end":114,"count":1},
+ {"start":11,"end":201,"count":1},
+ {"start":114,"end":121,"count":0},
+ {"start":129,"end":200,"count":0}]
+);
+
+TestCoverage(
+"yield expressions twice",
+`
+function* gen() { // 0000
+ yield nop(); // 0050
+ yield nop() ? nop() : nop() // 0100
+ return nop(); // 0150
+}; // 0200
+{const it = gen(); it.next(); it.next();} // 0250
+{const it = gen(); it.next(); it.next();} // 0300
+`,
+[{"start":0,"end":349,"count":1},
+ {"start":0,"end":201,"count":2},
{"start":114,"end":121,"count":0},
- {"start":122,"end":129,"count":1},
{"start":129,"end":200,"count":0}]
);
@@ -563,9 +577,9 @@ try { // 0200
`,
[{"start":0,"end":499,"count":1},
{"start":451,"end":452,"count":0},
- {"start":12,"end":101,"count":3},
+ {"start":12,"end":101,"count":1},
{"start":60,"end":100,"count":0},
- {"start":264,"end":353,"count":3},
+ {"start":264,"end":353,"count":1},
{"start":312,"end":352,"count":0}]
);
@@ -582,9 +596,8 @@ const it = function*() { // 0000
it.next(); it.return(); // 0450
`,
[{"start":0,"end":449,"count":1},
- {"start":11,"end":351,"count":3},
+ {"start":11,"end":351,"count":1},
{"start":112,"end":254,"count":0},
- {"start":254,"end":272,"count":1},
{"start":272,"end":350,"count":0}]
);
@@ -601,9 +614,8 @@ const it = function*() { // 0000
it.next(); it.throw(42); // 0550
`,
[{"start":0,"end":449,"count":1},
- {"start":11,"end":351,"count":3},
+ {"start":11,"end":351,"count":1},
{"start":112,"end":154,"count":0},
- {"start":154,"end":310,"count":1},
{"start":310,"end":350,"count":0}]
);
@@ -619,10 +631,8 @@ it.next(); it.next(); it.next(); // 0250
it.next(); it.next(); it.next(); // 0300
`,
[{"start":0,"end":349,"count":1},
- {"start":11,"end":201,"count":7},
- {"start":65,"end":115,"count":1},
+ {"start":11,"end":201,"count":1},
{"start":115,"end":122,"count":0},
- {"start":123,"end":130,"count":1},
{"start":130,"end":200,"count":0}]
);
@@ -642,9 +652,9 @@ try { // 0200
`,
[{"start":0,"end":499,"count":1},
{"start":451,"end":452,"count":0},
- {"start":12,"end":101,"count":3},
+ {"start":12,"end":101,"count":1},
{"start":65,"end":100,"count":0},
- {"start":264,"end":353,"count":3},
+ {"start":264,"end":353,"count":1},
{"start":317,"end":352,"count":0}]
);
@@ -659,8 +669,7 @@ f(); // 0200
%PerformMicrotaskCheckpoint(); // 0250
`,
[{"start":0,"end":299,"count":1},
- {"start":0,"end":151,"count":3},
- {"start":61,"end":150,"count":1}]
+ {"start":0,"end":151,"count":1}]
);
TestCoverage(
@@ -676,7 +685,8 @@ b() // 0250
[{"start":0,"end":299,"count":1},
{"start":15,"end":20,"count":0},
{"start":50,"end":151,"count":2},
- {"start":114,"end":118,"count":0}]);
+ {"start":114,"end":118,"count":0}]
+);
TestCoverage(
"LogicalOrExpression IsTest()",
@@ -705,7 +715,8 @@ const c = true && 50 // 0300
[{"start":0,"end":349,"count":1},
{"start":16,"end":21,"count":0},
{"start":50,"end":151,"count":2},
- {"start":114,"end":118,"count":0}]);
+ {"start":114,"end":118,"count":0}]
+);
TestCoverage(
"LogicalAndExpression IsTest()",
diff --git a/deps/v8/test/mjsunit/code-coverage-class-fields.js b/deps/v8/test/mjsunit/code-coverage-class-fields.js
index 8db45d142b..15b5478fca 100644
--- a/deps/v8/test/mjsunit/code-coverage-class-fields.js
+++ b/deps/v8/test/mjsunit/code-coverage-class-fields.js
@@ -10,62 +10,55 @@
TestCoverage(
"class with no fields",
-`class X { // 000
+`
+class X { // 000
}; // 050
`,
- [
- { start: 0, end: 98, count: 1 },
- { start: 0, end: 0, count: 0 },
- ]
+[{"start":0,"end":99,"count":1}]
);
TestCoverage(
"class that's not created",
-`class X { // 000
+`
+class X { // 000
x = function() { } // 050
}; // 100
`,
- [
- { start: 0, end: 148, count: 1 },
- { start: 0, end: 0, count: 0 },
- { start: 51, end: 69, count: 0 },
- ]
+[{"start":0,"end":149,"count":1},
+ {"start":52,"end":70,"count":0}]
);
TestCoverage(
"class with field thats not called",
-`class X { // 000
+`
+class X { // 000
x = function() { } // 050
}; // 100
let x = new X(); // 150
`,
- [
- { start: 0, end: 198, count: 1 },
- { start: 0, end: 0, count: 1 },
- { start: 51, end: 69, count: 1 },
- { start: 55, end: 69, count: 0 }
- ]
+[{"start":0,"end":199,"count":1},
+ {"start":52,"end":70,"count":1},
+ {"start":56,"end":70,"count":0}]
);
TestCoverage(
"class field",
-`class X { // 000
+`
+class X { // 000
x = function() { } // 050
}; // 100
let x = new X(); // 150
x.x(); // 200
`,
- [
- { start: 0, end: 248, count: 1 },
- { start: 0, end: 0, count: 1 },
- { start: 51, end: 69, count: 1 },
- { start: 55, end: 69, count: 1 }
- ]
+[{"start":0,"end":249,"count":1},
+ {"start":52,"end":70,"count":1},
+ {"start":56,"end":70,"count":1}]
);
TestCoverage(
"non contiguous class field",
-`class X { // 000
+`
+class X { // 000
x = function() { } // 050
foo() { } // 100
y = function() {} // 150
@@ -74,19 +67,17 @@ let x = new X(); // 250
x.x(); // 300
x.y(); // 350
`,
- [
- { start: 0, end: 398, count: 1 },
- { start: 0, end: 0, count: 1 },
- { start: 51, end: 168, count: 1 },
- { start: 55, end: 69, count: 1 },
- { start: 101, end: 110, count: 0 },
- { start: 155, end: 168, count: 1 },
- ]
+[{"start":0,"end":399,"count":1},
+ {"start":52,"end":169,"count":1},
+ {"start":56,"end":70,"count":1},
+ {"start":102,"end":111,"count":0},
+ {"start":156,"end":169,"count":1}]
);
TestCoverage(
"non contiguous class field thats called",
-`class X { // 000
+`
+class X { // 000
x = function() { } // 050
foo() { } // 100
y = function() {} // 150
@@ -96,29 +87,24 @@ x.x(); // 300
x.y(); // 350
x.foo(); // 400
`,
- [
- { start: 0, end: 448, count: 1 },
- { start: 0, end: 0, count: 1 },
- { start: 51, end: 168, count: 1 },
- { start: 55, end: 69, count: 1 },
- { start: 101, end: 110, count: 1 },
- { start: 155, end: 168, count: 1 },
- ]
+[{"start":0,"end":449,"count":1},
+ {"start":52,"end":169,"count":1},
+ {"start":56,"end":70,"count":1},
+ {"start":102,"end":111,"count":1},
+ {"start":156,"end":169,"count":1}]
);
TestCoverage(
"class with initializer iife",
-`class X { // 000
+`
+class X { // 000
x = (function() { })() // 050
}; // 100
let x = new X(); // 150
`,
- [
- { start: 0, end: 198, count: 1 },
- { start: 0, end: 0, count: 1 },
- { start: 51, end: 73, count: 1 },
- { start: 56, end: 70, count: 1 }
- ]
+[{"start":0,"end":199,"count":1},
+ {"start":52,"end":74,"count":1},
+ {"start":57,"end":71,"count":1}]
);
TestCoverage(
@@ -130,56 +116,47 @@ class X { // 050
}; // 150
let x = new X(); // 200
`,
- [
- { start: 0, end: 249, count: 1 },
- { start: 0, end: 15, count: 1 },
- { start: 50, end: 50, count: 1 },
- { start: 102, end: 128, count: 1 },
- { start: 111, end: 125, count: 1 }
- ]
+[{"start":0,"end":249,"count":1},
+ {"start":0,"end":15,"count":1},
+ {"start":102,"end":128,"count":1},
+ {"start":111,"end":125,"count":1}]
);
TestCoverage(
"static class field that's not called",
-`class X { // 000
+`
+class X { // 000
static x = function() { } // 050
}; // 100
`,
- [
- { start: 0, end: 148, count: 1 },
- { start: 0, end: 0, count: 0 },
- { start: 51, end: 76, count: 1 },
- { start: 62, end: 76, count: 0 }
- ]
+[{"start":0,"end":149,"count":1},
+ {"start":52,"end":77,"count":1},
+ {"start":63,"end":77,"count":0}]
);
TestCoverage(
"static class field",
-`class X { // 000
+`
+class X { // 000
static x = function() { } // 050
}; // 100
X.x(); // 150
`,
- [
- { start: 0, end: 198, count: 1 },
- { start: 0, end: 0, count: 0 },
- { start: 51, end: 76, count: 1 },
- { start: 62, end: 76, count: 1 }
- ]
+[{"start":0,"end":199,"count":1},
+ {"start":52,"end":77,"count":1},
+ {"start":63,"end":77,"count":1}]
);
TestCoverage(
"static class field with iife",
-`class X { // 000
+`
+class X { // 000
static x = (function() { })() // 050
}; // 100
`,
- [
- { start: 0, end: 148, count: 1 },
- { start: 0, end: 0, count: 0 },
- { start: 51, end: 80, count: 1 },
- { start: 63, end: 77, count: 1 }
- ]
+[{"start":0,"end":149,"count":1},
+ {"start":52,"end":81,"count":1},
+ {"start":64,"end":78,"count":1}]
);
TestCoverage(
@@ -190,11 +167,8 @@ class X { // 050
static [f()] = (function() { })() // 100
}; // 150
`,
- [
- { start: 0, end: 199, count: 1 },
- { start: 0, end: 15, count: 1 },
- { start: 50, end: 50, count: 0 },
- { start: 102, end: 135, count: 1 },
- { start: 118, end: 132, count: 1 }
- ]
+[{"start":0,"end":199,"count":1},
+ {"start":0,"end":15,"count":1},
+ {"start":102,"end":135,"count":1},
+ {"start":118,"end":132,"count":1}]
);
diff --git a/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js b/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js
index 4eacc8a8be..b1749f0353 100644
--- a/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js
+++ b/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js
@@ -47,6 +47,10 @@ Object.defineProperty(O.prototype, Symbol.toStringTag, {
var obj1 = new O;
var obj2 = new O;
+%PrepareFunctionForOptimization(le);
+%PrepareFunctionForOptimization(lt);
+%PrepareFunctionForOptimization(ge);
+%PrepareFunctionForOptimization(gt);
assertTrue(%HaveSameMap(obj1, obj2));
test(obj1, obj2);
test(obj1, obj2);
diff --git a/deps/v8/test/mjsunit/compiler-regress-787301.js b/deps/v8/test/mjsunit/compiler-regress-787301.js
index 851e22a0cb..343ae1efdf 100644
--- a/deps/v8/test/mjsunit/compiler-regress-787301.js
+++ b/deps/v8/test/mjsunit/compiler-regress-787301.js
@@ -13,6 +13,7 @@ function opt(b) {
return arr.slice();
}
+%PrepareFunctionForOptimization(opt);
opt(false);
opt(false);
%OptimizeFunctionOnNextCall(opt);
diff --git a/deps/v8/test/mjsunit/compiler/array-access.js b/deps/v8/test/mjsunit/compiler/array-access.js
index a1811ad509..99a6259b30 100644
--- a/deps/v8/test/mjsunit/compiler/array-access.js
+++ b/deps/v8/test/mjsunit/compiler/array-access.js
@@ -45,23 +45,43 @@ function GetAAN(a,n) {
return a[a[a[n]]];
}
-function RunGetTests() {
- var a = [2,0,1];
- assertEquals(2, Get0(a));
+function RunGetTests(packed=true) {
+ if (packed) {
+ var a = [2,0,1];
+ assertEquals(2, Get0(a));
- assertEquals(2, GetN(a, 0));
- assertEquals(0, GetN(a, 1));
- assertEquals(1, GetN(a, 2));
+ assertEquals(2, GetN(a, 0));
+ assertEquals(0, GetN(a, 1));
+ assertEquals(1, GetN(a, 2));
- assertEquals(1, GetA0(a));
+ assertEquals(1, GetA0(a));
- assertEquals(1, GetAN(a,0));
- assertEquals(2, GetAN(a,1));
- assertEquals(0, GetAN(a,2));
+ assertEquals(1, GetAN(a,0));
+ assertEquals(2, GetAN(a,1));
+ assertEquals(0, GetAN(a,2));
- assertEquals(0, GetAAN(a,0));
- assertEquals(1, GetAAN(a,1));
- assertEquals(2, GetAAN(a,2));
+ assertEquals(0, GetAAN(a,0));
+ assertEquals(1, GetAAN(a,1));
+ assertEquals(2, GetAAN(a,2));
+ }
+ else {
+ var a = ['2','0','1'];
+ assertEquals('2', Get0(a));
+
+ assertEquals('2', GetN(a, 0));
+ assertEquals('0', GetN(a, 1));
+ assertEquals('1', GetN(a, 2));
+
+ assertEquals('1', GetA0(a));
+
+ assertEquals('1', GetAN(a,0));
+ assertEquals('2', GetAN(a,1));
+ assertEquals('0', GetAN(a,2));
+
+ assertEquals('0', GetAAN(a,0));
+ assertEquals('1', GetAAN(a,1));
+ assertEquals('2', GetAAN(a,2));
+ }
}
@@ -81,29 +101,39 @@ function SetNX(a, n, x) {
a[n] = x;
}
-function RunSetTests(a) {
+function RunSetTests(a, packed=true) {
Set07(a);
- assertEquals(7, a[0]);
+ if (packed) {
+ assertEquals(7, a[0]);
+ }
assertEquals(0, a[1]);
assertEquals(0, a[2]);
Set0V(a, 1);
- assertEquals(1, a[0]);
+ if (packed) {
+ assertEquals(1, a[0]);
+ }
assertEquals(0, a[1]);
assertEquals(0, a[2]);
SetN7(a, 2);
- assertEquals(1, a[0]);
+ if (packed) {
+ assertEquals(1, a[0]);
+ }
assertEquals(0, a[1]);
assertEquals(7, a[2]);
SetNX(a, 1, 5);
- assertEquals(1, a[0]);
+ if (packed) {
+ assertEquals(1, a[0]);
+ }
assertEquals(5, a[1]);
assertEquals(7, a[2]);
for (var i = 0; i < 3; i++) SetNX(a, i, 0);
- assertEquals(0, a[0]);
+ if (packed) {
+ assertEquals(0, a[0]);
+ }
assertEquals(0, a[1]);
assertEquals(0, a[2]);
}
@@ -131,31 +161,55 @@ for (var i = 0; i < 1000; i++) {
RunArrayBoundsCheckTest();
+// Packed
// Non-extensible
-a = Object.seal([0,0,0]);
-o = Object.seal({0: 0, 1: 0, 2: 0});
+a = Object.preventExtensions([0,0,0,'a']);
+o = Object.preventExtensions({0: 0, 1: 0, 2: 0});
for (var i = 0; i < 1000; i++) {
RunGetTests();
+ RunGetTests(false);
RunSetTests(a);
RunSetTests(o);
}
-RunArrayBoundsCheckTest();
-
// Sealed
-a = Object.seal([0,0,0]);
+a = Object.seal([0,0,0,'a']);
o = Object.seal({0: 0, 1: 0, 2: 0});
for (var i = 0; i < 1000; i++) {
RunGetTests();
+ RunGetTests(false);
RunSetTests(a);
RunSetTests(o);
}
-RunArrayBoundsCheckTest();
+// Frozen
+a = Object.freeze([0,0,0,'a']);
+o = Object.freeze({0: 0, 1: 0, 2: 0});
+for (var i = 0; i < 1000; i++) {
+ RunGetTests();
+ RunGetTests(false);
+}
+
+// Holey
+// Non-extensible
+a = Object.preventExtensions([,0,0,'a']);
+for (var i = 0; i < 1000; i++) {
+ RunGetTests();
+ RunGetTests(false);
+ RunSetTests(a, false);
+}
+
+// Sealed
+a = Object.seal([,0,0,'a']);
+for (var i = 0; i < 1000; i++) {
+ RunGetTests();
+ RunGetTests(false);
+ RunSetTests(a, false);
+}
// Frozen
-a = Object.seal([0,0,0]);
-o = Object.seal({0: 0, 1: 0, 2: 0});
+a = Object.freeze([,0,0,'a']);
for (var i = 0; i < 1000; i++) {
RunGetTests();
+ RunGetTests(false);
}
diff --git a/deps/v8/test/mjsunit/compiler/array-constructor.js b/deps/v8/test/mjsunit/compiler/array-constructor.js
index 56278bbb2d..aafe639df2 100644
--- a/deps/v8/test/mjsunit/compiler/array-constructor.js
+++ b/deps/v8/test/mjsunit/compiler/array-constructor.js
@@ -108,41 +108,82 @@
assertInstanceof(foo(-1), RangeError);
})();
+// Packed
// Test non-extensible Array call with multiple parameters.
(() => {
- function foo(x, y, z) { return Object.preventExtensions(new Array(x, y, z)); }
+ function foo(x, y, z, t) { return Object.preventExtensions(new Array(x, y, z, t)); }
%PrepareFunctionForOptimization(foo);
- assertEquals([1, 2, 3], foo(1, 2, 3));
- assertEquals([1, 2, 3], foo(1, 2, 3));
- assertFalse(Object.isExtensible(foo(1,2,3)));
+ assertEquals([1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertEquals([1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertFalse(Object.isExtensible(foo(1,2,3, 'a')));
%OptimizeFunctionOnNextCall(foo);
- assertEquals([1, 2, 3], foo(1, 2, 3));
- assertFalse(Object.isExtensible(foo(1,2,3)));
+ assertEquals([1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertFalse(Object.isExtensible(foo(1,2,3, 'a')));
})();
// Test sealed Array call with multiple parameters.
(() => {
- function foo(x, y, z) { return Object.seal(new Array(x, y, z)); }
+ function foo(x, y, z, t) { return Object.seal(new Array(x, y, z, t)); }
%PrepareFunctionForOptimization(foo);
- assertEquals([1, 2, 3], foo(1, 2, 3));
- assertEquals([1, 2, 3], foo(1, 2, 3));
- assertTrue(Object.isSealed(foo(1,2,3)));
+ assertEquals([1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertEquals([1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertTrue(Object.isSealed(foo(1,2,3, 'a')));
%OptimizeFunctionOnNextCall(foo);
- assertEquals([1, 2, 3], foo(1, 2, 3));
- assertTrue(Object.isSealed(foo(1,2,3)));
+ assertEquals([1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertTrue(Object.isSealed(foo(1,2,3, 'a')));
})();
// Test frozen Array call with multiple parameters.
(() => {
- function foo(x, y, z) { return Object.freeze(new Array(x, y, z)); }
+ function foo(x, y, z, t) { return Object.freeze(new Array(x, y, z, t)); }
%PrepareFunctionForOptimization(foo);
- assertEquals([1, 2, 3], foo(1, 2, 3));
- assertEquals([1, 2, 3], foo(1, 2, 3));
- assertTrue(Object.isFrozen(foo(1,2,3)));
+ assertEquals([1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertEquals([1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertTrue(Object.isFrozen(foo(1,2,3, 'a')));
%OptimizeFunctionOnNextCall(foo);
- assertEquals([1, 2, 3], foo(1, 2, 3));
- assertTrue(Object.isFrozen(foo(1,2,3)));
+ assertEquals([1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertTrue(Object.isFrozen(foo(1,2,3, 'a')));
+})();
+
+// Holey
+// Test non-extensible Array call with multiple parameters.
+(() => {
+ function foo(x, y, z, t) { return Object.preventExtensions([, x, y, z, t]); }
+
+ %PrepareFunctionForOptimization(foo);
+ assertEquals([, 1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertEquals([, 1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertFalse(Object.isExtensible(foo(1,2,3, 'a')));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([, 1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertFalse(Object.isExtensible(foo(1,2,3, 'a')));
+})();
+
+// Test sealed Array call with multiple parameters.
+(() => {
+ function foo(x, y, z, t) { return Object.seal([, x, y, z, t]); }
+
+ %PrepareFunctionForOptimization(foo);
+ assertEquals([, 1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertEquals([, 1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertTrue(Object.isSealed(foo(1,2,3, 'a')));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([, 1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertTrue(Object.isSealed(foo(1,2,3, 'a')));
+})();
+
+// Test frozen Array call with multiple parameters.
+(() => {
+ function foo(x, y, z, t) { return Object.freeze([, x, y, z, t]); }
+
+ %PrepareFunctionForOptimization(foo);
+ assertEquals([, 1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertEquals([, 1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertTrue(Object.isFrozen(foo(1,2,3, 'a')));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([, 1, 2, 3, 'a'], foo(1, 2, 3, 'a'));
+ assertTrue(Object.isFrozen(foo(1,2,3, 'a')));
})();
diff --git a/deps/v8/test/mjsunit/compiler/array-every.js b/deps/v8/test/mjsunit/compiler/array-every.js
index ec925b7937..66230c5e06 100644
--- a/deps/v8/test/mjsunit/compiler/array-every.js
+++ b/deps/v8/test/mjsunit/compiler/array-every.js
@@ -17,27 +17,53 @@
assertTrue(foo([3, 3, 3], {x:3}));
assertFalse(foo([3, 3, 2], {x:3}));
+ // Packed
// Non-extensible array
%PrepareFunctionForOptimization(foo);
- assertTrue(foo(Object.preventExtensions([3, 3, 3]), {x:3}));
- assertFalse(foo(Object.preventExtensions([3, 3, 2]), {x:3}));
+ assertTrue(foo(Object.preventExtensions(['3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.preventExtensions(['3', '3', '2']), {x:'3'}));
%OptimizeFunctionOnNextCall(foo);
- assertTrue(foo(Object.preventExtensions([3, 3, 3]), {x:3}));
- assertFalse(foo(Object.preventExtensions([3, 3, 2]), {x:3}));
+ assertTrue(foo(Object.preventExtensions(['3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.preventExtensions(['3', '3', '2']), {x:'3'}));
// Sealed array
%PrepareFunctionForOptimization(foo);
- assertTrue(foo(Object.seal([3, 3, 3]), {x:3}));
- assertFalse(foo(Object.seal([3, 3, 2]), {x:3}));
+ assertTrue(foo(Object.seal(['3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.seal(['3', '3', '2']), {x:'3'}));
%OptimizeFunctionOnNextCall(foo);
- assertTrue(foo(Object.seal([3, 3, 3]), {x:3}));
- assertFalse(foo(Object.seal([3, 3, 2]), {x:3}));
+ assertTrue(foo(Object.seal(['3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.seal(['3', '3', '2']), {x:'3'}));
// Frozen array
%PrepareFunctionForOptimization(foo);
- assertTrue(foo(Object.freeze([3, 3, 3]), {x:3}));
- assertFalse(foo(Object.freeze([3, 3, 2]), {x:3}));
+ assertTrue(foo(Object.freeze(['3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.freeze(['3', '3', '2']), {x:'3'}));
%OptimizeFunctionOnNextCall(foo);
- assertTrue(foo(Object.freeze([3, 3, 3]), {x:3}));
- assertFalse(foo(Object.freeze([3, 3, 2]), {x:3}));
+ assertTrue(foo(Object.freeze(['3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.freeze(['3', '3', '2']), {x:'3'}));
+
+ // Holey
+ // Non-extensible array
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo(Object.preventExtensions([, '3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.preventExtensions([, '3', '3', '2']), {x:'3'}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(Object.preventExtensions([, '3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.preventExtensions([, '3', '3', '2']), {x:'3'}));
+
+ // Sealed array
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo(Object.seal([, '3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.seal([, '3', '3', '2']), {x:'3'}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(Object.seal([, '3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.seal([, '3', '3', '2']), {x:'3'}));
+
+ // Frozen array
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo(Object.freeze([, '3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.freeze([, '3', '3', '2']), {x:'3'}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(Object.freeze([, '3', '3', '3']), {x:'3'}));
+ assertFalse(foo(Object.freeze([, '3', '3', '2']), {x:'3'}));
})();
diff --git a/deps/v8/test/mjsunit/compiler/array-find.js b/deps/v8/test/mjsunit/compiler/array-find.js
index 29d15d096e..39404cc0a2 100644
--- a/deps/v8/test/mjsunit/compiler/array-find.js
+++ b/deps/v8/test/mjsunit/compiler/array-find.js
@@ -17,27 +17,53 @@
assertEquals(3, foo([1, 2, 3], {x:3}));
assertEquals(undefined, foo([0, 1, 2], {x:3}));
+ // Packed
// Non-extensible
%PrepareFunctionForOptimization(foo);
- assertEquals(3, foo(Object.preventExtensions([1, 2, 3]), {x:3}));
- assertEquals(undefined, foo(Object.preventExtensions([0, 1, 2]), {x:3}));
+ assertEquals(3, foo(Object.preventExtensions(['1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.preventExtensions(['0', 1, 2]), {x:3}));
%OptimizeFunctionOnNextCall(foo);
- assertEquals(3, foo(Object.preventExtensions([1, 2, 3]), {x:3}));
- assertEquals(undefined, foo(Object.preventExtensions([0, 1, 2]), {x:3}));
+ assertEquals(3, foo(Object.preventExtensions(['1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.preventExtensions(['0', 1, 2]), {x:3}));
// Sealed
%PrepareFunctionForOptimization(foo);
- assertEquals(3, foo(Object.seal([1, 2, 3]), {x:3}));
- assertEquals(undefined, foo(Object.seal([0, 1, 2]), {x:3}));
+ assertEquals(3, foo(Object.seal(['1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.seal(['0', 1, 2]), {x:3}));
%OptimizeFunctionOnNextCall(foo);
- assertEquals(3, foo(Object.seal([1, 2, 3]), {x:3}));
- assertEquals(undefined, foo(Object.seal([0, 1, 2]), {x:3}));
+ assertEquals(3, foo(Object.seal(['1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.seal(['0', 1, 2]), {x:3}));
// Frozen
%PrepareFunctionForOptimization(foo);
- assertEquals(3, foo(Object.freeze([1, 2, 3]), {x:3}));
- assertEquals(undefined, foo(Object.freeze([0, 1, 2]), {x:3}));
+ assertEquals(3, foo(Object.freeze(['1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.freeze(['0', 1, 2]), {x:3}));
%OptimizeFunctionOnNextCall(foo);
- assertEquals(3, foo(Object.freeze([1, 2, 3]), {x:3}));
- assertEquals(undefined, foo(Object.freeze([0, 1, 2]), {x:3}));
+ assertEquals(3, foo(Object.freeze(['1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.freeze(['0', 1, 2]), {x:3}));
+
+ // Holey
+ // Non-extensible
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(3, foo(Object.preventExtensions([, '1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.preventExtensions([, '0', 1, 2]), {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo(Object.preventExtensions([, '1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.preventExtensions([, '0', 1, 2]), {x:3}));
+
+ // Sealed
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(3, foo(Object.seal([, '1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.seal([, '0', 1, 2]), {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo(Object.seal([, '1', 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.seal([, '0', 1, 2]), {x:3}));
+
+ // Frozen
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(3, foo(Object.freeze([, 1, 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.freeze([, 0, 1, 2]), {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo(Object.freeze([, 1, 2, 3]), {x:3}));
+ assertEquals(undefined, foo(Object.freeze([, 0, 1, 2]), {x:3}));
})();
diff --git a/deps/v8/test/mjsunit/compiler/array-findindex.js b/deps/v8/test/mjsunit/compiler/array-findindex.js
index 13a77442bd..6da94c105c 100644
--- a/deps/v8/test/mjsunit/compiler/array-findindex.js
+++ b/deps/v8/test/mjsunit/compiler/array-findindex.js
@@ -17,27 +17,53 @@
assertEquals(2, foo([1, 2, 3], {x:3}));
assertEquals(-1, foo([0, 1, 2], {x:3}));
+ // Packed
// Non-extensible
%PrepareFunctionForOptimization(foo);
- assertEquals(2, foo(Object.preventExtensions([1, 2, 3]), {x:3}));
- assertEquals(-1, foo(Object.preventExtensions([0, 1, 2]), {x:3}));
+ assertEquals(2, foo(Object.preventExtensions(['1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.preventExtensions(['0', 1, 2]), {x:3}));
%OptimizeFunctionOnNextCall(foo);
- assertEquals(2, foo(Object.preventExtensions([1, 2, 3]), {x:3}));
- assertEquals(-1, foo(Object.preventExtensions([0, 1, 2]), {x:3}));
+ assertEquals(2, foo(Object.preventExtensions(['1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.preventExtensions(['0', 1, 2]), {x:3}));
// Sealed
%PrepareFunctionForOptimization(foo);
- assertEquals(2, foo(Object.seal([1, 2, 3]), {x:3}));
- assertEquals(-1, foo(Object.seal([0, 1, 2]), {x:3}));
+ assertEquals(2, foo(Object.seal(['1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.seal(['0', 1, 2]), {x:3}));
%OptimizeFunctionOnNextCall(foo);
- assertEquals(2, foo(Object.seal([1, 2, 3]), {x:3}));
- assertEquals(-1, foo(Object.seal([0, 1, 2]), {x:3}));
+ assertEquals(2, foo(Object.seal(['1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.seal(['0', 1, 2]), {x:3}));
// Frozen
%PrepareFunctionForOptimization(foo);
- assertEquals(2, foo(Object.freeze([1, 2, 3]), {x:3}));
- assertEquals(-1, foo(Object.freeze([0, 1, 2]), {x:3}));
+ assertEquals(2, foo(Object.freeze(['1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.freeze(['0', 1, 2]), {x:3}));
%OptimizeFunctionOnNextCall(foo);
- assertEquals(2, foo(Object.freeze([1, 2, 3]), {x:3}));
- assertEquals(-1, foo(Object.freeze([0, 1, 2]), {x:3}));
+ assertEquals(2, foo(Object.freeze(['1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.freeze(['0', 1, 2]), {x:3}));
+
+ // Holey
+ // Non-extensible
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(3, foo(Object.preventExtensions([, '1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.preventExtensions([, '0', 1, 2]), {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo(Object.preventExtensions([, '1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.preventExtensions([, '0', 1, 2]), {x:3}));
+
+ // Sealed
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(3, foo(Object.seal([, '1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.seal([, '0', 1, 2]), {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo(Object.seal([, '1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.seal([, '0', 1, 2]), {x:3}));
+
+ // Frozen
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(3, foo(Object.freeze([, '1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.freeze([, '0', 1, 2]), {x:3}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(3, foo(Object.freeze([, '1', 2, 3]), {x:3}));
+ assertEquals(-1, foo(Object.freeze([, '0', 1, 2]), {x:3}));
})();
diff --git a/deps/v8/test/mjsunit/compiler/array-is-array.js b/deps/v8/test/mjsunit/compiler/array-is-array.js
index 60efa2234b..90785d9f26 100644
--- a/deps/v8/test/mjsunit/compiler/array-is-array.js
+++ b/deps/v8/test/mjsunit/compiler/array-is-array.js
@@ -110,6 +110,7 @@
assertInstanceof(foo({}), TypeError);
})();
+// Packed
// Test JSObjectIsArray in JSTypedLowering for the case that the
// input value is known to be a non-extensible Array literal.
(function() {
@@ -151,3 +152,46 @@
%OptimizeFunctionOnNextCall(foo);
assertTrue(foo());
})();
+
+// Holey
+// Test JSObjectIsArray in JSTypedLowering for the case that the
+// input value is known to be a non-extensible Array literal.
+(function() {
+ function foo() {
+ return Array.isArray(Object.preventExtensions([,]));
+ }
+
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Test JSObjectIsArray in JSTypedLowering for the case that the
+// input value is known to be a sealed Array literal.
+(function() {
+ function foo() {
+ return Array.isArray(Object.seal([,]));
+ }
+
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
+
+// Test JSObjectIsArray in JSTypedLowering for the case that the
+// input value is known to be a frozen Array literal.
+(function() {
+ function foo() {
+ return Array.isArray(Object.freeze([,]));
+ }
+
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/array-length.js b/deps/v8/test/mjsunit/compiler/array-length.js
index aa2210893c..bc1ccf3743 100644
--- a/deps/v8/test/mjsunit/compiler/array-length.js
+++ b/deps/v8/test/mjsunit/compiler/array-length.js
@@ -56,18 +56,36 @@ function MainTest() {
}
MainTest();
+// Packed
// Non-extensible, sealed, frozen
a0 = Object.preventExtensions([]);
-a2 = Object.seal([1,2]);
-a5 = Object.freeze([1,2,3,4,5]);
+a2 = Object.seal([1,'2']);
+a5 = Object.freeze([1,2,'3',4,5]);
MainTest();
a0 = Object.seal([]);
-a2 = Object.freeze([1,2]);
-a5 = Object.preventExtensions([1,2,3,4,5]);
+a2 = Object.freeze([1,'2']);
+a5 = Object.preventExtensions([1,2,'3',4,5]);
MainTest();
a0 = Object.freeze([]);
-a2 = Object.preventExtensions([1,2]);
-a5 = Object.seal([1,2,3,4,5]);
+a2 = Object.preventExtensions([1,'2']);
+a5 = Object.seal([1,2,'3',4,5]);
+MainTest();
+
+// Holey
+// Non-extensible, sealed, frozen
+a0 = Object.preventExtensions([]);
+a2 = Object.seal([,'2']);
+a5 = Object.freeze([,2,'3',4,5]);
+MainTest();
+
+a0 = Object.seal([]);
+a2 = Object.freeze([,'2']);
+a5 = Object.preventExtensions([,2,'3',4,5]);
+MainTest();
+
+a0 = Object.freeze([]);
+a2 = Object.preventExtensions([,'2']);
+a5 = Object.seal([,2,3,4,5]);
MainTest();
diff --git a/deps/v8/test/mjsunit/compiler/array-slice-clone.js b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
index 29d53ff29d..fc002da2c3 100644
--- a/deps/v8/test/mjsunit/compiler/array-slice-clone.js
+++ b/deps/v8/test/mjsunit/compiler/array-slice-clone.js
@@ -21,6 +21,7 @@
}
%PrepareFunctionForOptimization(slice0);
+ %PrepareFunctionForOptimization(slice);
assertEquals(arr, slice());
assertFalse(arr === slice());
@@ -29,7 +30,6 @@
%OptimizeFunctionOnNextCall(slice0);
assertEquals(slice(), slice0());
- %PrepareFunctionForOptimization(slice);
%OptimizeFunctionOnNextCall(slice);
assertEquals(slice(), slice0());
@@ -399,10 +399,11 @@
assertEquals(narr, [6,6,6]);
})();
+// Packed
// Trigger JSCallReducer on slice() and slice(0)
(function() {
// Non-extensible:
- var arr = Object.preventExtensions([1,2,3,4,5]);
+ var arr = Object.preventExtensions([1,2,'a',4,5]);
function slice() {
return arr.slice();
@@ -414,6 +415,7 @@
function test() {
%PrepareFunctionForOptimization(slice0);
+ %PrepareFunctionForOptimization(slice);
assertEquals(arr, slice());
assertFalse(arr === slice());
@@ -422,7 +424,6 @@
%OptimizeFunctionOnNextCall(slice0);
assertEquals(slice(), slice0());
- %PrepareFunctionForOptimization(slice);
%OptimizeFunctionOnNextCall(slice);
assertEquals(slice(), slice0());
@@ -431,10 +432,51 @@
test();
// Sealed
- arr = Object.seal([1,2,3,4,5]);
+ arr = Object.seal([1,2,'a',4,5]);
+ test();
+
+ // Frozen
+ arr = Object.freeze([1,2,'a',4,5]);
+ test();
+})();
+
+// Holey
+// Trigger JSCallReducer on slice() and slice(0)
+(function() {
+ // Non-extensible:
+ var arr = Object.preventExtensions([,1,2,'a',4,5]);
+
+ function slice() {
+ return arr.slice();
+ }
+
+ function slice0() {
+ return arr.slice(0);
+ }
+
+ function test() {
+ %PrepareFunctionForOptimization(slice0);
+ %PrepareFunctionForOptimization(slice);
+ assertEquals(arr, slice());
+ assertFalse(arr === slice());
+ assertEquals(slice(), slice0());
+ assertEquals(slice0(), slice());
+
+ %OptimizeFunctionOnNextCall(slice0);
+ assertEquals(slice(), slice0());
+ %OptimizeFunctionOnNextCall(slice);
+
+ assertEquals(slice(), slice0());
+ assertOptimized(slice0);
+ assertOptimized(slice);
+ }
+ test();
+
+ // Sealed
+ arr = Object.seal([,1,2,'a',4,5]);
test();
// Frozen
- arr = Object.freeze([1,2,3,4,5]);
+ arr = Object.freeze([,1,2,'a',4,5]);
test();
})();
diff --git a/deps/v8/test/mjsunit/compiler/array-some.js b/deps/v8/test/mjsunit/compiler/array-some.js
index eb1fc4814d..5411b17fdd 100644
--- a/deps/v8/test/mjsunit/compiler/array-some.js
+++ b/deps/v8/test/mjsunit/compiler/array-some.js
@@ -17,27 +17,53 @@
assertTrue(foo([1, 2, 3], {x:3}));
assertFalse(foo([0, 1, 2], {x:3}));
+ // Packed
// Non-extensible
%PrepareFunctionForOptimization(foo);
- assertTrue(foo(Object.preventExtensions([1, 2, 3]), {x:3}));
- assertFalse(foo(Object.preventExtensions([0, 1, 2]), {x:3}));
+ assertTrue(foo(Object.preventExtensions([1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.preventExtensions([0, 1, '2']), {x:'3'}));
%OptimizeFunctionOnNextCall(foo);
- assertTrue(foo(Object.preventExtensions([1, 2, 3]), {x:3}));
- assertFalse(foo(Object.preventExtensions([0, 1, 2]), {x:3}));
+ assertTrue(foo(Object.preventExtensions([1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.preventExtensions([0, 1, '2']), {x:'3'}));
// Sealed
%PrepareFunctionForOptimization(foo);
- assertTrue(foo(Object.seal([1, 2, 3]), {x:3}));
- assertFalse(foo(Object.seal([0, 1, 2]), {x:3}));
+ assertTrue(foo(Object.seal([1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.seal([0, 1, '2']), {x:'3'}));
%OptimizeFunctionOnNextCall(foo);
- assertTrue(foo(Object.seal([1, 2, 3]), {x:3}));
- assertFalse(foo(Object.seal([0, 1, 2]), {x:3}));
+ assertTrue(foo(Object.seal([1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.seal([0, 1, '2']), {x:'3'}));
// Frozen
%PrepareFunctionForOptimization(foo);
- assertTrue(foo(Object.freeze([1, 2, 3]), {x:3}));
- assertFalse(foo(Object.freeze([0, 1, 2]), {x:3}));
+ assertTrue(foo(Object.freeze([1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.freeze([0, 1, '2']), {x:'3'}));
%OptimizeFunctionOnNextCall(foo);
- assertTrue(foo(Object.freeze([1, 2, 3]), {x:3}));
- assertFalse(foo(Object.freeze([0, 1, 2]), {x:3}));
+ assertTrue(foo(Object.freeze([1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.freeze([0, 1, '2']), {x:'3'}));
+
+ // Holey
+ // Non-extensible
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo(Object.preventExtensions([, 1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.preventExtensions([, 0, 1, '2']), {x:'3'}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(Object.preventExtensions([, 1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.preventExtensions([, 0, 1, '2']), {x:'3'}));
+
+ // Sealed
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo(Object.seal([, 1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.seal([, 0, 1, '2']), {x:'3'}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(Object.seal([, 1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.seal([, 0, 1, '2']), {x:'3'}));
+
+ // Frozen
+ %PrepareFunctionForOptimization(foo);
+ assertTrue(foo(Object.freeze([, 1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.freeze([, 0, 1, '2']), {x:'3'}));
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo(Object.freeze([, 1, 2, '3']), {x:'3'}));
+ assertFalse(foo(Object.freeze([, 0, 1, '2']), {x:'3'}));
})();
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
index eca898c966..50318b5639 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
@@ -43,7 +43,6 @@ function new_object() {
function add_field(obj) {
// Assign twice to make the field non-constant.
- // TODO(ishell): update test once constant field tracking is done.
obj.c = 0;
obj.c = 3;
}
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js b/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
new file mode 100644
index 0000000000..cdeb7f2ffc
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-add-static.js
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Check that constant-folding of arithmetic results in identical nodes.
+(function() {
+ function foo(x) {
+ %TurbofanStaticAssert(1 * x == x + 0);
+ }
+ foo(121);
+ foo(122);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(123);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
index 746277d67b..0f8891769b 100644
--- a/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
+++ b/deps/v8/test/mjsunit/compiler/constant-fold-cow-array.js
@@ -36,9 +36,10 @@
assertUnoptimized(foo);
})();
+// Packed
// Non-extensible
(function() {
- const a = Object.preventExtensions([1, 2, 3]);
+ const a = Object.preventExtensions([1, 2, '3']);
const foo = () => a[0];
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
@@ -52,7 +53,7 @@
// Sealed
(function() {
- const a = Object.seal([1, 2, 3]);
+ const a = Object.seal([1, 2, '3']);
const foo = () => a[0];
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
@@ -66,7 +67,50 @@
// Frozen
(function() {
- const a = Object.freeze([1, 2, 3]);
+ const a = Object.freeze([1, 2, '3']);
+ const foo = () => a[0];
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(1, foo());
+ assertEquals(1, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo());
+ assertOptimized(foo);
+ a[0] = 42;
+ assertEquals(1, foo());
+})();
+
+// Holey
+// Non-extensible
+(function() {
+ const a = Object.preventExtensions([1, 2, , '3']);
+ const foo = () => a[0];
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(1, foo());
+ assertEquals(1, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo());
+ assertOptimized(foo);
+ a[0] = 42;
+ assertEquals(42, foo());
+})();
+
+// Sealed
+(function() {
+ const a = Object.seal([1, 2, , '3']);
+ const foo = () => a[0];
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(1, foo());
+ assertEquals(1, foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(1, foo());
+ assertOptimized(foo);
+ a[0] = 42;
+ assertEquals(42, foo());
+})();
+
+// Frozen
+(function() {
+ const a = Object.freeze([1, 2, , '3']);
const foo = () => a[0];
%PrepareFunctionForOptimization(foo);
assertEquals(1, foo());
diff --git a/deps/v8/test/mjsunit/compiler/field-representation-tracking.js b/deps/v8/test/mjsunit/compiler/field-representation-tracking.js
index 660271a858..7f03348970 100644
--- a/deps/v8/test/mjsunit/compiler/field-representation-tracking.js
+++ b/deps/v8/test/mjsunit/compiler/field-representation-tracking.js
@@ -12,7 +12,9 @@
function foo(o) { return o.x; }
%PrepareFunctionForOptimization(foo);
- foo(new O(1));
+ // We need to keep an instance around to make the GC stress testing work.
+ const o1 = new O(1);
+ foo(o1);
foo(new O(2));
%OptimizeFunctionOnNextCall(foo);
foo(new O(3));
@@ -29,7 +31,9 @@
function foo(o) { o.x = 0; }
%PrepareFunctionForOptimization(foo);
- foo(new O(1));
+ // We need to keep an instance around to make the GC stress testing work.
+ const o1 = new O(1);
+ foo(o1);
foo(new O(2));
%OptimizeFunctionOnNextCall(foo);
foo(new O(3));
@@ -46,7 +50,9 @@
function foo(o) { return o.x; }
%PrepareFunctionForOptimization(foo);
- foo(new O(null));
+ // We need to keep an instance around to make the GC stress testing work.
+ const onull = new O(null);
+ foo(onull);
foo(new O("Hello"));
%OptimizeFunctionOnNextCall(foo);
foo(new O({}));
@@ -63,7 +69,9 @@
function foo(o) { o.x = true; }
%PrepareFunctionForOptimization(foo);
- foo(new O(null));
+ // We need to keep an instance around to make the GC stress testing work.
+ const onull = new O(null);
+ foo(onull);
foo(new O("Hello"));
%OptimizeFunctionOnNextCall(foo);
foo(new O({}));
diff --git a/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
new file mode 100644
index 0000000000..e873dd0e55
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/load-elimination-const-field.js
@@ -0,0 +1,156 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Check that load elimination on const-marked fields works
+(function() {
+ function maybe_sideeffect(b) { return 42; }
+
+ %NeverOptimizeFunction(maybe_sideeffect);
+
+ class B {
+ constructor(x) {
+ this.value = x;
+ }
+ }
+ %EnsureFeedbackVectorForFunction(B);
+
+
+ function lit_const_smi() {
+ let b = { value: 123 };
+ maybe_sideeffect(b);
+ let v1 = b.value;
+ maybe_sideeffect(b);
+ let v2 = b.value;
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, 123));
+ }
+
+ lit_const_smi(); lit_const_smi();
+ %OptimizeFunctionOnNextCall(lit_const_smi); lit_const_smi();
+
+
+ function lit_const_object() {
+ let o = {x: 123};
+ let b = { value: o };
+ maybe_sideeffect(b);
+ let v1 = b.value;
+ maybe_sideeffect(b);
+ let v2 = b.value;
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, o));
+ }
+
+ lit_const_object(); lit_const_object();
+ %OptimizeFunctionOnNextCall(lit_const_object); lit_const_object();
+
+
+ function lit_computed_smi(k) {
+ let kk = 2 * k;
+ let b = { value: kk };
+ maybe_sideeffect(b);
+ let v1 = b.value;
+ maybe_sideeffect(b);
+ let v2 = b.value;
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, kk));
+ }
+
+ lit_computed_smi(1); lit_computed_smi(2);
+ %OptimizeFunctionOnNextCall(lit_computed_smi); lit_computed_smi(3);
+
+ // TODO(bmeurer): Fix const tracking for double fields in object literals
+ // lit_computed_smi(1.1); lit_computed_smi(2.2);
+ // %OptimizeFunctionOnNextCall(lit_computed_smi); lit_computed_smi(3.3);
+
+
+ function lit_param_object(k) {
+ let b = { value: k };
+ maybe_sideeffect(b);
+ let v1 = b.value;
+ maybe_sideeffect(b);
+ let v2 = b.value;
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, k));
+ }
+
+ lit_param_object({x: 1}); lit_param_object({x: 2});
+ %OptimizeFunctionOnNextCall(lit_param_object); lit_param_object({x: 3});
+
+
+ function nested_lit_param(k) {
+ let b = { x: { value: k } };
+ maybe_sideeffect(b);
+ let v1 = b.x.value;
+ maybe_sideeffect(b);
+ let v2 = b.x.value;
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, k));
+ }
+
+ nested_lit_param(1); nested_lit_param(2);
+ %OptimizeFunctionOnNextCall(nested_lit_param); nested_lit_param(3);
+
+ // TODO(bmeurer): Fix const tracking for double fields in object literals
+ // nested_lit_param(1.1); nested_lit_param(2.2);
+ // %OptimizeFunctionOnNextCall(nested_lit_param); nested_lit_param(3.3);
+
+
+ function nested_lit_param_object(k) {
+ let b = { x: { value: k } };
+ maybe_sideeffect(b);
+ let v1 = b.x.value;
+ maybe_sideeffect(b);
+ let v2 = b.x.value;
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, k));
+ }
+
+ nested_lit_param_object({x: 1}); nested_lit_param_object({x: 2});
+ %OptimizeFunctionOnNextCall(nested_lit_param_object);
+ nested_lit_param_object({x: 3});
+
+
+ %EnsureFeedbackVectorForFunction(inst_param);
+ function inst_param(k) {
+ let b = new B(k);
+ maybe_sideeffect(b);
+ let v1 = b.value;
+ maybe_sideeffect(b);
+ let v2 = b.value;
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, k));
+ }
+
+ inst_param(1); inst_param(2);
+ %OptimizeFunctionOnNextCall(inst_param); inst_param(3);
+
+ // TODO(gsps): Reenable once we fully support const field information
+ // tracking in the presence of pointer compression.
+ // inst_param(1.1); inst_param(2.2);
+ // %OptimizeFunctionOnNextCall(inst_param); inst_param(3.3);
+
+ inst_param({x: 1}); inst_param({x: 2});
+ %OptimizeFunctionOnNextCall(inst_param); inst_param({x: 3});
+
+
+ %EnsureFeedbackVectorForFunction(inst_computed);
+ function inst_computed(k) {
+ let kk = 2 * k;
+ let b = new B(kk);
+ maybe_sideeffect(b);
+ let v1 = b.value;
+ maybe_sideeffect(b);
+ let v2 = b.value;
+ %TurbofanStaticAssert(Object.is(v1, v2));
+ %TurbofanStaticAssert(Object.is(v2, kk));
+ }
+
+ inst_computed(1); inst_computed(2);
+ %OptimizeFunctionOnNextCall(inst_computed); inst_computed(3);
+
+ inst_computed(1.1); inst_computed(2.2);
+ %OptimizeFunctionOnNextCall(inst_computed); inst_computed(3.3);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js b/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js
index b0b0cb28d0..94b30db2d0 100644
--- a/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js
+++ b/deps/v8/test/mjsunit/compiler/native-context-specialization-hole-check.js
@@ -38,6 +38,7 @@ function f() {
arr[500] = 20;
arr[10] = arr[50];
}
+%EnsureFeedbackVectorForFunction(f);
function g() {
f();
@@ -47,4 +48,4 @@ g();
g();
%OptimizeFunctionOnNextCall(g);
g();
-assertTrue(%GetDeoptCount(g) > 0);
+assertUnoptimized(g);
diff --git a/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js b/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js
index 8ca710a5ef..f0df98a0cd 100644
--- a/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js
+++ b/deps/v8/test/mjsunit/compiler/number-comparison-truncations.js
@@ -32,6 +32,7 @@
(function() {
// Produce a SpeculativeNumberEqual with Number feedback.
function bar(x, y) { return x === y; }
+ %EnsureFeedbackVectorForFunction(bar);
bar(0.1, 0.5);
bar(-0, 100);
@@ -83,6 +84,7 @@
(function() {
// Produce a SpeculativeNumberLessThan with Number feedback.
function bar(x, y) { return x < y; }
+ %EnsureFeedbackVectorForFunction(bar);
bar(0.1, 0.5);
bar(-0, 100);
@@ -134,6 +136,7 @@
(function() {
// Produce a SpeculativeNumberLessThanOrEqual with Number feedback.
function bar(x, y) { return x <= y; }
+ %EnsureFeedbackVectorForFunction(bar);
bar(0.1, 0.5);
bar(-0, 100);
diff --git a/deps/v8/test/mjsunit/compiler/optimized-for-in.js b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
index 5af7caaef2..70c45cecfe 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-for-in.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
@@ -269,6 +269,7 @@ function osr_inner(t, limit) {
}
return r;
}
+%PrepareFunctionForOptimization(osr_inner);
function osr_outer(t, osr_after) {
var r = 1;
@@ -281,6 +282,7 @@ function osr_outer(t, osr_after) {
}
return r;
}
+%PrepareFunctionForOptimization(osr_outer);
function osr_outer_and_deopt(t, osr_after) {
var r = 1;
@@ -290,6 +292,7 @@ function osr_outer_and_deopt(t, osr_after) {
}
return r;
}
+%PrepareFunctionForOptimization(osr_outer_and_deopt);
function test_osr() {
with ({}) {} // Disable optimizations of this function.
diff --git a/deps/v8/test/mjsunit/compiler/osr-alignment.js b/deps/v8/test/mjsunit/compiler/osr-alignment.js
index f815e712ee..7a348ca377 100644
--- a/deps/v8/test/mjsunit/compiler/osr-alignment.js
+++ b/deps/v8/test/mjsunit/compiler/osr-alignment.js
@@ -38,6 +38,7 @@ function f1() {
}
return sum;
}
+%PrepareFunctionForOptimization(f1);
function f2() {
var sum = 0;
@@ -50,6 +51,7 @@ function f2() {
}
return sum;
}
+%PrepareFunctionForOptimization(f2);
function f3() {
var sum = 0;
@@ -62,6 +64,7 @@ function f3() {
}
return sum;
}
+%PrepareFunctionForOptimization(f3);
function test1() {
var j = 11;
diff --git a/deps/v8/test/mjsunit/compiler/osr-array-len.js b/deps/v8/test/mjsunit/compiler/osr-array-len.js
index aaee860d61..f691380e6a 100644
--- a/deps/v8/test/mjsunit/compiler/osr-array-len.js
+++ b/deps/v8/test/mjsunit/compiler/osr-array-len.js
@@ -14,6 +14,7 @@ function fastaRandom(n, table) {
n--;
}
}
+%PrepareFunctionForOptimization(fastaRandom);
print("---BEGIN 1");
assertEquals(undefined, fastaRandom(6, null));
diff --git a/deps/v8/test/mjsunit/compiler/osr-assert.js b/deps/v8/test/mjsunit/compiler/osr-assert.js
index c67ad536ad..5a0ff2e71c 100644
--- a/deps/v8/test/mjsunit/compiler/osr-assert.js
+++ b/deps/v8/test/mjsunit/compiler/osr-assert.js
@@ -41,4 +41,5 @@ function f(x, b, c) {
return a + 4;
}
+%PrepareFunctionForOptimization(f);
assertEquals(55, f(5, "122", "1221"));
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
index bcc7cdd47d..ed5bbf1b57 100644
--- a/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope-id.js
@@ -24,6 +24,7 @@ function foo() {
result.push(out);
return result;
}
+%PrepareFunctionForOptimization(foo);
function check() {
diff --git a/deps/v8/test/mjsunit/compiler/osr-block-scope.js b/deps/v8/test/mjsunit/compiler/osr-block-scope.js
index c60f8af6c9..7593909673 100644
--- a/deps/v8/test/mjsunit/compiler/osr-block-scope.js
+++ b/deps/v8/test/mjsunit/compiler/osr-block-scope.js
@@ -28,6 +28,7 @@ function test(expected, func, depth) {
for (var depth = 1; depth < 4; depth++) {
var body = nest(orig, name, depth);
func = eval("(" + body + ")");
+ %PrepareFunctionForOptimization(func);
assertEquals(expected, func());
assertEquals(expected, func());
@@ -47,6 +48,7 @@ function foo() {
}
return result;
}
+%PrepareFunctionForOptimization(foo);
test(45, foo);
@@ -58,6 +60,7 @@ function bar() {
}
return sum;
}
+%PrepareFunctionForOptimization(bar);
test(45, bar);
@@ -71,6 +74,7 @@ function bon() {
return sum;
}
}
+%PrepareFunctionForOptimization(bon);
test(45, bon);
@@ -87,6 +91,7 @@ function row() {
}
return 11;
}
+%PrepareFunctionForOptimization(row);
test(7, row);
@@ -98,6 +103,7 @@ function nub() {
}
return i;
}
+%PrepareFunctionForOptimization(nub);
test(2, nub);
@@ -112,5 +118,6 @@ function kub() {
}
return result;
}
+%PrepareFunctionForOptimization(kub);
test(1, kub);
diff --git a/deps/v8/test/mjsunit/compiler/osr-for-let.js b/deps/v8/test/mjsunit/compiler/osr-for-let.js
index b8cef780b5..3ecbe04ed2 100644
--- a/deps/v8/test/mjsunit/compiler/osr-for-let.js
+++ b/deps/v8/test/mjsunit/compiler/osr-for-let.js
@@ -24,6 +24,7 @@ function bar() {
}
return result;
}
+%PrepareFunctionForOptimization(bar);
test(4005, bar);
@@ -35,6 +36,7 @@ function baz() {
}
return sum;
}
+%PrepareFunctionForOptimization(baz);
test(2, baz);
@@ -46,6 +48,7 @@ function qux() {
}
return result;
}
+%PrepareFunctionForOptimization(qux);
test(1, qux);
@@ -60,6 +63,7 @@ function nux() {
}
return result;
}
+%PrepareFunctionForOptimization(nux);
test(1, nux);
@@ -78,5 +82,6 @@ function blo() {
}
return result;
}
+%PrepareFunctionForOptimization(blo);
test(4005, blo());
diff --git a/deps/v8/test/mjsunit/compiler/osr-forin-nested.js b/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
index dd810897e0..d3e0c19721 100644
--- a/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
+++ b/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
@@ -18,6 +18,7 @@ function foo(t) {
}
return 5;
}
+%PrepareFunctionForOptimization(foo);
test(5, foo, {x:20});
@@ -31,5 +32,6 @@ function bar(t) {
}
return sum;
}
+%PrepareFunctionForOptimization(bar);
test(62, bar, {x:20,y:11});
diff --git a/deps/v8/test/mjsunit/compiler/osr-infinite.js b/deps/v8/test/mjsunit/compiler/osr-infinite.js
index 24c7add272..bfd4b7e3c8 100644
--- a/deps/v8/test/mjsunit/compiler/osr-infinite.js
+++ b/deps/v8/test/mjsunit/compiler/osr-infinite.js
@@ -11,6 +11,7 @@ function thrower() {
if (x == 5) %OptimizeOsr(1);
if (x == 10) throw "terminate";
}
+%PrepareFunctionForOptimization(thrower);
%NeverOptimizeFunction(thrower); // Don't want to inline the thrower.
%NeverOptimizeFunction(test); // Don't want to inline the func into test.
@@ -25,18 +26,22 @@ function test(func) {
function n1() {
while (true) thrower();
}
+%PrepareFunctionForOptimization(n1);
function n2() {
while (true) while (true) thrower();
}
+%PrepareFunctionForOptimization(n2);
function n3() {
while (true) while (true) while (true) thrower();
}
+%PrepareFunctionForOptimization(n3);
function n4() {
while (true) while (true) while (true) while (true) thrower();
}
+%PrepareFunctionForOptimization(n4);
function b1(a) {
while (true) {
@@ -44,6 +49,7 @@ function b1(a) {
if (a) break
}
}
+%PrepareFunctionForOptimization(b1);
function b2(a) {
@@ -54,6 +60,7 @@ function b2(a) {
}
}
}
+%PrepareFunctionForOptimization(b2);
function b3(a) {
@@ -67,6 +74,7 @@ function b3(a) {
}
}
}
+%PrepareFunctionForOptimization(b3);
test(n1);
diff --git a/deps/v8/test/mjsunit/compiler/osr-labeled.js b/deps/v8/test/mjsunit/compiler/osr-labeled.js
index 1384e9a715..ba905f394e 100644
--- a/deps/v8/test/mjsunit/compiler/osr-labeled.js
+++ b/deps/v8/test/mjsunit/compiler/osr-labeled.js
@@ -17,6 +17,7 @@ function foo() {
}
return sum;
}
+%PrepareFunctionForOptimization(foo);
assertEquals(30, foo());
assertEquals(30, foo());
@@ -36,6 +37,7 @@ function bar(a) {
}
return sum;
}
+%PrepareFunctionForOptimization(bar);
assertEquals(1, bar(1));
assertEquals(1, bar(1));
diff --git a/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js b/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
index 4d1798c929..4b6fd766f7 100644
--- a/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
+++ b/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
@@ -11,6 +11,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f0);
function f1(a) {
for (var i = 0; i < 3; i = i + 1 | 0) {
@@ -18,6 +19,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f1);
function f2(a,b) {
for (var i = 0; i < 3; i = i + 1 | 0) {
@@ -25,6 +27,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f2);
function f3(a,b,c) {
for (var i = 0; i < 3; i = i + 1 | 0) {
@@ -32,6 +35,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f3);
function f4(a,b,c,d) {
for (var i = 0; i < 3; i = i + 1 | 0) {
@@ -39,6 +43,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f4);
function bar() {
assertEquals(3, f0().blah);
diff --git a/deps/v8/test/mjsunit/compiler/osr-literals.js b/deps/v8/test/mjsunit/compiler/osr-literals.js
index f2051dced7..c5179d0e18 100644
--- a/deps/v8/test/mjsunit/compiler/osr-literals.js
+++ b/deps/v8/test/mjsunit/compiler/osr-literals.js
@@ -11,6 +11,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f0);
function f1(a) {
for (var i = 0; i < 3; i = i + 1 | 0) {
@@ -18,6 +19,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f1);
function f2(a,b) {
for (var i = 0; i < 3; i = i + 1 | 0) {
@@ -25,6 +27,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f2);
function f3(a,b,c) {
for (var i = 0; i < 3; i = i + 1 | 0) {
@@ -32,6 +35,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f3);
function f4(a,b,c,d) {
for (var i = 0; i < 3; i = i + 1 | 0) {
@@ -39,6 +43,7 @@ function mod() {
}
return {blah: i};
}
+ %PrepareFunctionForOptimization(f4);
function bar() {
assertEquals(3, f0().blah);
diff --git a/deps/v8/test/mjsunit/compiler/osr-manual1.js b/deps/v8/test/mjsunit/compiler/osr-manual1.js
index c3db796f11..0a24ba6d3e 100644
--- a/deps/v8/test/mjsunit/compiler/osr-manual1.js
+++ b/deps/v8/test/mjsunit/compiler/osr-manual1.js
@@ -22,10 +22,12 @@ function gen(w) { // defeat compiler cache.
"} f" + num;
return eval(src);
}
+%PrepareFunctionForOptimization(gen);
function check(x,a,b,c) {
for (var i = 0; i < 3; i++) {
var f = gen(i);
+ %PrepareFunctionForOptimization(f);
assertEquals(x, f(a, b, c));
}
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-manual2.js b/deps/v8/test/mjsunit/compiler/osr-manual2.js
index de7ec243fe..3359e83d2e 100644
--- a/deps/v8/test/mjsunit/compiler/osr-manual2.js
+++ b/deps/v8/test/mjsunit/compiler/osr-manual2.js
@@ -26,6 +26,7 @@ function gen(w) { // defeat compiler cache.
function check(x,a,b,c) {
for (var i = 0; i < 3; i++) {
var f = gen(i);
+ %PrepareFunctionForOptimization(f);
assertEquals(x, f(a, b, c));
}
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-maze1.js b/deps/v8/test/mjsunit/compiler/osr-maze1.js
index da17282742..2f51d3de21 100644
--- a/deps/v8/test/mjsunit/compiler/osr-maze1.js
+++ b/deps/v8/test/mjsunit/compiler/osr-maze1.js
@@ -44,6 +44,7 @@ function bar(goal) {
}
return sum;
}
+%PrepareFunctionForOptimization(bar);
for (var i = 0; i < 13; i++) {
%DeoptimizeFunction(bar);
diff --git a/deps/v8/test/mjsunit/compiler/osr-maze2.js b/deps/v8/test/mjsunit/compiler/osr-maze2.js
index 1fc1cd2db1..5255dab191 100644
--- a/deps/v8/test/mjsunit/compiler/osr-maze2.js
+++ b/deps/v8/test/mjsunit/compiler/osr-maze2.js
@@ -59,5 +59,6 @@ function gen(i) {
for (var i = 1; i < 10; i++) {
var f = gen(i);
+ %PrepareFunctionForOptimization(f);
assertEquals(1979, f());
}
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested.js b/deps/v8/test/mjsunit/compiler/osr-nested.js
index 28c42c8f73..7add40ed53 100644
--- a/deps/v8/test/mjsunit/compiler/osr-nested.js
+++ b/deps/v8/test/mjsunit/compiler/osr-nested.js
@@ -40,6 +40,7 @@ function f() {
}
return sum;
}
+%PrepareFunctionForOptimization(f);
assertEquals(1450, f());
diff --git a/deps/v8/test/mjsunit/compiler/osr-nested2b.js b/deps/v8/test/mjsunit/compiler/osr-nested2b.js
index 18088114a4..390979dda7 100644
--- a/deps/v8/test/mjsunit/compiler/osr-nested2b.js
+++ b/deps/v8/test/mjsunit/compiler/osr-nested2b.js
@@ -19,7 +19,7 @@ function f() {
return sum;
}
-
+%PrepareFunctionForOptimization(f);
assertEquals(15000, f());
assertEquals(15000, f());
assertEquals(15000, f());
diff --git a/deps/v8/test/mjsunit/compiler/osr-one.js b/deps/v8/test/mjsunit/compiler/osr-one.js
index d52a1c8089..3b38715a31 100644
--- a/deps/v8/test/mjsunit/compiler/osr-one.js
+++ b/deps/v8/test/mjsunit/compiler/osr-one.js
@@ -16,5 +16,6 @@ function f(x) {
}
return sum;
}
+%PrepareFunctionForOptimization(f);
assertEquals(50, f(5));
diff --git a/deps/v8/test/mjsunit/compiler/osr-regex-id.js b/deps/v8/test/mjsunit/compiler/osr-regex-id.js
index e0b4dad1dc..1f66297ccd 100644
--- a/deps/v8/test/mjsunit/compiler/osr-regex-id.js
+++ b/deps/v8/test/mjsunit/compiler/osr-regex-id.js
@@ -13,6 +13,7 @@ function foo(a) {
}
return r;
}
+%PrepareFunctionForOptimization(foo);
function bar(a) {
for (var i = 0; i < 10; i++) {
@@ -21,6 +22,7 @@ function bar(a) {
}
return r;
}
+%PrepareFunctionForOptimization(bar);
function baz(a) {
for (var i = 0; i < 10; i++) {
@@ -28,6 +30,7 @@ function baz(a) {
}
return /\0/;
}
+%PrepareFunctionForOptimization(baz);
function qux(a) {
for (var i = 0; i < 10; i++) {
@@ -39,6 +42,7 @@ function qux(a) {
}
return r;
}
+%PrepareFunctionForOptimization(qux);
function test(f) {
// Test the reference equality of regex's created in OSR'd function.
diff --git a/deps/v8/test/mjsunit/compiler/osr-simple.js b/deps/v8/test/mjsunit/compiler/osr-simple.js
index ddbc5f8867..c8585f2991 100644
--- a/deps/v8/test/mjsunit/compiler/osr-simple.js
+++ b/deps/v8/test/mjsunit/compiler/osr-simple.js
@@ -15,6 +15,7 @@ function f() {
}
return sum;
}
+%PrepareFunctionForOptimization(f);
for (var i = 0; i < 2; i++) {
diff --git a/deps/v8/test/mjsunit/compiler/osr-try-catch.js b/deps/v8/test/mjsunit/compiler/osr-try-catch.js
index 9924e88d55..79389fd8ca 100644
--- a/deps/v8/test/mjsunit/compiler/osr-try-catch.js
+++ b/deps/v8/test/mjsunit/compiler/osr-try-catch.js
@@ -19,6 +19,7 @@ function SingleLoop() {
}
}
}
+%PrepareFunctionForOptimization(SingleLoop);
// These function could also fail if the exception handlers are not updated at
@@ -38,6 +39,7 @@ function EmptyBody() {
a++;
}
}
+%PrepareFunctionForOptimization(EmptyBody);
function NestedLoops() {
for (var a = 0; a < 2; a++) {
@@ -52,6 +54,7 @@ function NestedLoops() {
}
}
}
+%PrepareFunctionForOptimization(NestedLoops);
SingleLoop();
diff --git a/deps/v8/test/mjsunit/compiler/osr-two.js b/deps/v8/test/mjsunit/compiler/osr-two.js
index 36842b4beb..4409dc3bae 100644
--- a/deps/v8/test/mjsunit/compiler/osr-two.js
+++ b/deps/v8/test/mjsunit/compiler/osr-two.js
@@ -20,5 +20,6 @@ function f(x) {
}
return sum;
}
+%PrepareFunctionForOptimization(f);
assertEquals(500, f(5));
diff --git a/deps/v8/test/mjsunit/compiler/osr-while-let.js b/deps/v8/test/mjsunit/compiler/osr-while-let.js
index 11ebc4bb35..906e92bb21 100644
--- a/deps/v8/test/mjsunit/compiler/osr-while-let.js
+++ b/deps/v8/test/mjsunit/compiler/osr-while-let.js
@@ -35,6 +35,7 @@ function foo() {
}
return result;
}
+%PrepareFunctionForOptimization(foo);
test(1, foo);
@@ -54,5 +55,6 @@ function smo() {
}
return result;
}
+%PrepareFunctionForOptimization(smo);
test(11, smo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-607493.js b/deps/v8/test/mjsunit/compiler/regress-607493.js
index 540b47e2d2..adfef8a6ee 100644
--- a/deps/v8/test/mjsunit/compiler/regress-607493.js
+++ b/deps/v8/test/mjsunit/compiler/regress-607493.js
@@ -18,6 +18,7 @@
}
}
+ %PrepareFunctionForOptimization(g);
g();
})();
@@ -33,5 +34,6 @@
}
}
+ %PrepareFunctionForOptimization(g);
g();
})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-645851.js b/deps/v8/test/mjsunit/compiler/regress-645851.js
index 0ea70bd71e..f79a942405 100644
--- a/deps/v8/test/mjsunit/compiler/regress-645851.js
+++ b/deps/v8/test/mjsunit/compiler/regress-645851.js
@@ -16,4 +16,5 @@ function f() {
return sum;
}
+%PrepareFunctionForOptimization(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-650215.js b/deps/v8/test/mjsunit/compiler/regress-650215.js
index 95ae6cfed1..67ac3d99a9 100644
--- a/deps/v8/test/mjsunit/compiler/regress-650215.js
+++ b/deps/v8/test/mjsunit/compiler/regress-650215.js
@@ -13,4 +13,5 @@ function f() {
return x;
}
+%PrepareFunctionForOptimization(f);
assertEquals(0, f());
diff --git a/deps/v8/test/mjsunit/compiler/regress-669517.js b/deps/v8/test/mjsunit/compiler/regress-669517.js
index 1905260978..d6bbfd8cd1 100644
--- a/deps/v8/test/mjsunit/compiler/regress-669517.js
+++ b/deps/v8/test/mjsunit/compiler/regress-669517.js
@@ -6,12 +6,14 @@
(function() {
"use asm";
- return function() {
+ var f = function() {
for (var i = 0; i < 10; i++) {
if (i == 5) {
%OptimizeOsr();
}
}
with (Object());
- }
+ };
+ %PrepareFunctionForOptimization(f);
+ return f;
})()();
diff --git a/deps/v8/test/mjsunit/compiler/regress-673244.js b/deps/v8/test/mjsunit/compiler/regress-673244.js
index b18d47b8dd..b962e6a517 100644
--- a/deps/v8/test/mjsunit/compiler/regress-673244.js
+++ b/deps/v8/test/mjsunit/compiler/regress-673244.js
@@ -12,4 +12,5 @@ function f() {
}
}
+%PrepareFunctionForOptimization(f);
f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-803022.js b/deps/v8/test/mjsunit/compiler/regress-803022.js
index 30e13cf032..eea754544f 100644
--- a/deps/v8/test/mjsunit/compiler/regress-803022.js
+++ b/deps/v8/test/mjsunit/compiler/regress-803022.js
@@ -13,4 +13,5 @@ function foo() {
}
}
+%PrepareFunctionForOptimization(foo);
foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-9017.js b/deps/v8/test/mjsunit/compiler/regress-9017.js
new file mode 100644
index 0000000000..7cbd4e0178
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-9017.js
@@ -0,0 +1,39 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --noturbo-inlining --noturbo-verify-allocation
+
+// Ensure that very large stack frames can be used successfully.
+// The flag --noturbo-verify-allocation is to make this run a little faster; it
+// shouldn't affect the behavior.
+
+const frame_size = 4096 * 4; // 4 pages
+const num_locals = frame_size / 8; // Assume 8-byte floating point values
+
+function f() { return 0.1; }
+
+// Function g, on positive inputs, will call itself recursively. On negative
+// inputs, it does a computation that requires a large number of locals.
+// The flag --noturbo-inlining is important to keep the compiler from realizing
+// that all of this work is for nothing.
+let g_text = "if (input === 0) return; if (input > 0) return g(input - 1);";
+g_text += " var inc = f(); var a0 = 0;";
+for (let i = 1; i < num_locals; ++i) {
+ g_text += " var a" + i + " = a" + (i - 1) + " + inc;";
+}
+g_text += " return f(a0";
+for (let i = 1; i < num_locals; ++i) {
+ g_text += ", a" + i;
+}
+g_text += ");";
+const g = new Function("input", g_text);
+
+%PrepareFunctionForOptimization(g);
+g(1);
+g(-1);
+%OptimizeFunctionOnNextCall(g);
+
+// Use recursion to get past whatever stack space is already committed.
+// 20 * 16kB = 320kB, comfortably below the default 1MB stack reservation limit.
+g(20);
diff --git a/deps/v8/test/mjsunit/compiler/regress-9137-1.js b/deps/v8/test/mjsunit/compiler/regress-9137-1.js
new file mode 100644
index 0000000000..32cbe32b15
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-9137-1.js
@@ -0,0 +1,24 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+// Flags: --no-flush-bytecode --no-stress-flush-bytecode
+
+function changeMap(obj) {
+ obj.blub = 42;
+}
+
+function foo(obj) {
+ return obj.bind(changeMap(obj));
+}
+
+%NeverOptimizeFunction(changeMap);
+%PrepareFunctionForOptimization(foo);
+foo(function(){});
+foo(function(){});
+%OptimizeFunctionOnNextCall(foo);
+foo(function(){});
+%OptimizeFunctionOnNextCall(foo);
+foo(function(){});
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-9137-2.js b/deps/v8/test/mjsunit/compiler/regress-9137-2.js
new file mode 100644
index 0000000000..ee4c157269
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-9137-2.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+// Flags: --no-flush-bytecode --no-stress-flush-bytecode
+
+function changeMap(obj) {
+ obj.blub = 42;
+}
+
+function reducer(acc, val, i, obj) {
+ return changeMap(obj);
+}
+
+function foo(obj) {
+ return obj.reduce(reducer);
+}
+
+%NeverOptimizeFunction(reducer);
+%PrepareFunctionForOptimization(foo);
+foo([0, 1, 2]);
+foo([0, 1, 2]);
+%OptimizeFunctionOnNextCall(foo);
+foo([0, 1, 2]);
+%OptimizeFunctionOnNextCall(foo);
+foo([0, 1, 2]);
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-919754.js b/deps/v8/test/mjsunit/compiler/regress-919754.js
index 5f20aad928..25d76a0615 100644
--- a/deps/v8/test/mjsunit/compiler/regress-919754.js
+++ b/deps/v8/test/mjsunit/compiler/regress-919754.js
@@ -12,4 +12,5 @@ function f(get, ...a) {
}
return get();
}
+%PrepareFunctionForOptimization(f);
assertThrows(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-957559.js b/deps/v8/test/mjsunit/compiler/regress-957559.js
new file mode 100644
index 0000000000..b32f6b85f6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-957559.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-always-opt --opt
+
+
+const v0 = [];
+function f(b) {
+ for (let v13 = 0; v13 <= 3; v13 = v13 + 2241165261) {
+ for (let i = 0; i < 8; i++) {}
+ const v23 = Math.max(v13,-0.0,-2523259642);
+ const v24 = v0[v23];
+ }
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-958021.js b/deps/v8/test/mjsunit/compiler/regress-958021.js
new file mode 100644
index 0000000000..252ea84365
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-958021.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function v0() {
+ let v7 = -4294967295;
+ try {
+ for (let v11 = 0; v11 < 8; v11++) {
+ const v13 = Symbol.isConcatSpreadable;
+ const v14 = v11 && v13;
+ const v15 = v7 <= v14;
+ for (var i = 0; i < 10; i++) {}
+ }
+ } catch(v20) {}
+}
+
+v0();
+v0();
+%OptimizeFunctionOnNextCall(v0);
+v0();
diff --git a/deps/v8/test/mjsunit/compiler/regress-958350.js b/deps/v8/test/mjsunit/compiler/regress-958350.js
new file mode 100644
index 0000000000..5f6d9162c3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-958350.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(o) {
+ for (const x of o) {
+ o[100] = 1;
+ try { x.push(); } catch (e) {}
+ }
+}
+
+%PrepareFunctionForOptimization(foo);
+foo([1]);
+foo([1]);
+%OptimizeFunctionOnNextCall(foo);
+foo([1]);
diff --git a/deps/v8/test/mjsunit/compiler/regress-958420.js b/deps/v8/test/mjsunit/compiler/regress-958420.js
new file mode 100644
index 0000000000..1601abc6bb
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-958420.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = [];
+
+function foo() {
+ return a[Symbol.iterator]().next();
+}
+
+a.__proto__.push(5);
+a.bla = {};
+
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-961986.js b/deps/v8/test/mjsunit/compiler/regress-961986.js
new file mode 100644
index 0000000000..de8b5fe458
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-961986.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ const proto = [];
+ const obj = Object.create(proto);
+ obj[1] = "";
+ proto[1];
+ proto.bla = 42;
+}
+
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-966560-1.js b/deps/v8/test/mjsunit/compiler/regress-966560-1.js
new file mode 100644
index 0000000000..d4d04f23dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-966560-1.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+async function __f_3() {
+ return await __f_4();
+}
+async function __f_4() {
+ await x.then();
+ throw new Error();
+}
+async function __f_5(f) {
+ try {
+ await f();
+ } catch (e) {
+ }
+}
+(async() => {; %OptimizeFunctionOnNextCall(__f_4); await __f_5(__f_3); })();
diff --git a/deps/v8/test/mjsunit/compiler/regress-966560-2.js b/deps/v8/test/mjsunit/compiler/regress-966560-2.js
new file mode 100644
index 0000000000..33eff6020c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-966560-2.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function* get() {
+ for (let x of [1,2,3]) {
+ yield;
+ get = [];
+ }
+}
+%OptimizeFunctionOnNextCall(get);
+get();
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-965513.js b/deps/v8/test/mjsunit/compiler/regress-crbug-965513.js
new file mode 100644
index 0000000000..d1cb0545e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-965513.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --allow-natives-syntax --opt
+
+%EnsureFeedbackVectorForFunction(foo);
+function foo(x) {
+ return x * (x == 1);
+}
+
+foo(0.5);
+foo(1.5);
+%OptimizeFunctionOnNextCall(foo);
+foo(1.5);
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/compiler/regress-v8-9139.js b/deps/v8/test/mjsunit/compiler/regress-v8-9139.js
new file mode 100644
index 0000000000..0587bdba6a
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-v8-9139.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+let dummy = { x : {} };
+
+let o = { x : 0.1 };
+
+function f(o, a, b) {
+ o.x = a + b;
+}
+
+%PrepareFunctionForOptimization(f);
+f(o, 0.05, 0.05);
+f(o, 0.05, 0.05);
+%OptimizeFunctionOnNextCall(f);
+f(o, 0.05, 0.05);
+assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/compiler/spread-call.js b/deps/v8/test/mjsunit/compiler/spread-call.js
index 0a8527ed76..12234edaf3 100644
--- a/deps/v8/test/mjsunit/compiler/spread-call.js
+++ b/deps/v8/test/mjsunit/compiler/spread-call.js
@@ -14,6 +14,9 @@ function tests() {
assertEquals(3, countArgs(...[1.1, 2, 3])); // Double
assertEquals(4, countArgs(...[1.1, 2, , 3])); // HoleyDouble
assertEquals(3, countArgs(...[{valueOf: () => 0}, 1.1, '2'])); // Object
+ assertEquals(3, countArgs(...Object.freeze([{valueOf: () => 0}, 1.1, '2']))); // Frozen Object
+ assertEquals(3, countArgs(...Object.seal([{valueOf: () => 0}, 1.1, '2']))); // Sealed Object
+ assertEquals(3, countArgs(...Object.preventExtensions([{valueOf: () => 0}, 1.1, '2']))); // Non-extensible Object
assertEquals(
4, countArgs(...[{valueOf: () => 0}, 1.1, , '2'])); // HoleyObject
diff --git a/deps/v8/test/mjsunit/compiler/string-startswith.js b/deps/v8/test/mjsunit/compiler/string-startswith.js
new file mode 100644
index 0000000000..c060a5e67b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/string-startswith.js
@@ -0,0 +1,81 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+(function() {
+ function foo(string) { return string.startsWith('a'); }
+
+ %PrepareFunctionForOptimization(foo);
+ assertEquals(false, foo(''));
+ assertEquals(true, foo('a'));
+ assertEquals(false, foo('ba'));
+ assertEquals(true, foo('abc'));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(false, foo(''));
+ assertEquals(true, foo('a'));
+ assertEquals(false, foo('ba'));
+ assertEquals(true, foo('abc'));
+ assertOptimized(foo);
+})();
+
+(function() {
+ function f() { return "abc".startsWith(); }
+
+ %PrepareFunctionForOptimization(f);
+ assertEquals(false, f());
+ assertEquals(false, f());
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(false, f());
+ assertOptimized(f);
+})();
+
+(function() {
+ function g(n) { return "abc".startsWith("a", n); }
+
+ %PrepareFunctionForOptimization(g);
+ assertEquals(true, g(-1));
+ assertEquals(true, g(0));
+ assertEquals(false, g(1));
+ assertEquals(false, g(2));
+ assertEquals(false, g(3));
+ assertEquals(false, g(4));
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(true, g(-1));
+ assertEquals(true, g(0));
+ assertEquals(false, g(1));
+ assertEquals(false, g(2));
+ assertEquals(false, g(3));
+ assertEquals(false, g(4));
+ assertOptimized(g);
+})();
+
+(function() {
+ function g(n) { return "cba".startsWith("a", n); }
+
+ %PrepareFunctionForOptimization(g);
+ assertEquals(false, g(-1));
+ assertEquals(false, g(0));
+ assertEquals(false, g(1));
+ assertEquals(true, g(2));
+ assertEquals(false, g(3));
+ assertEquals(false, g(4));
+ %OptimizeFunctionOnNextCall(g);
+ assertEquals(false, g(-1));
+ assertEquals(false, g(0));
+ assertEquals(false, g(1));
+ assertEquals(true, g(2));
+ assertEquals(false, g(3));
+ assertEquals(false, g(4));
+ assertOptimized(g);
+})();
+
+(function() {
+ function f(n) { return "cba".startsWith("a", n); }
+ %PrepareFunctionForOptimization(f);
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(false, f(1073741824));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/try-osr.js b/deps/v8/test/mjsunit/compiler/try-osr.js
index c0ef27add3..fc6c9e43b0 100644
--- a/deps/v8/test/mjsunit/compiler/try-osr.js
+++ b/deps/v8/test/mjsunit/compiler/try-osr.js
@@ -13,6 +13,7 @@ function OSRInsideTry(x) {
}
return x + 2;
}
+%PrepareFunctionForOptimization(OSRInsideTry);
assertEquals(24, OSRInsideTry(23));
@@ -25,6 +26,7 @@ function OSRInsideCatch(x) {
}
return x + 2;
}
+%PrepareFunctionForOptimization(OSRInsideCatch);
assertEquals(24, OSRInsideCatch(23));
@@ -37,6 +39,7 @@ function OSRInsideFinally_Return(x) {
}
return x + 2;
}
+%PrepareFunctionForOptimization(OSRInsideFinally_Return);
assertEquals(24, OSRInsideFinally_Return(23));
@@ -48,4 +51,5 @@ function OSRInsideFinally_ReThrow(x) {
}
return x + 2;
}
+%PrepareFunctionForOptimization(OSRInsideFinally_ReThrow);
assertThrows("OSRInsideFinally_ReThrow(new Error)", Error);
diff --git a/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js b/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js
index 4a7241c84f..f4edee5907 100644
--- a/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js
+++ b/deps/v8/test/mjsunit/concurrent-initial-prototype-change.js
@@ -44,7 +44,9 @@ if (!%IsConcurrentRecompilationSupported()) {
function f1(a, i) {
return a[i] + 0.5;
}
+%PrepareFunctionForOptimization(f1);
+%PrepareFunctionForOptimization(f1);
var arr = [0.0,,2.5];
assertEquals(0.5, f1(arr, 0));
assertEquals(0.5, f1(arr, 0));
diff --git a/deps/v8/test/mjsunit/const-field-tracking.js b/deps/v8/test/mjsunit/const-field-tracking.js
index 3da6d29bf6..bc979b80b4 100644
--- a/deps/v8/test/mjsunit/const-field-tracking.js
+++ b/deps/v8/test/mjsunit/const-field-tracking.js
@@ -5,11 +5,6 @@
// Flags: --allow-natives-syntax --opt --no-always-opt
var global = this;
-
-// TODO(ishell): update the test once const->mutable migration does not
-// create a new map.
-var IS_INPLACE_MAP_MODIFICATION_SUPPORTED = false;
-
var unique_id = 0;
// Creates a function with unique SharedFunctionInfo to ensure the feedback
// vector is unique for each test case.
@@ -42,40 +37,21 @@ function TestLoadFromConstantFieldOfAConstantObject(the_value, other_value) {
// {constant_object} is known to the compiler via global property cell
// tracking.
var load = MakeFunctionWithUniqueSFI("return constant_object.a.v;");
+ %PrepareFunctionForOptimization(load);
load();
load();
%OptimizeFunctionOnNextCall(load);
assertEquals(the_value, load());
assertOptimized(load);
- if (IS_INPLACE_MAP_MODIFICATION_SUPPORTED) {
- var a = new A(other_value);
- assertTrue(%HaveSameMap(a, the_object.a));
- // Make constant field mutable by assigning another value
- // to some other instance of A.
- new A(the_value).v = other_value;
- assertTrue(%HaveSameMap(a, new A(the_value)));
- assertTrue(%HaveSameMap(a, the_object.a));
- assertUnoptimized(load);
- assertEquals(the_value, load());
- } else {
- var a = new A(other_value);
- assertTrue(%HaveSameMap(a, the_object.a));
- // Make constant field mutable by assigning another value
- // to some other instance of A.
- new A(the_value).v = other_value;
- assertOptimized(load);
- assertTrue(!%HaveSameMap(a, new A(the_value)));
-
- assertTrue(%HaveSameMap(a, the_object.a));
- // Ensure the {the_object.a} migrated to an up-to date version of a map
- // by loading a property through IC.
- assertEquals(the_value, the_object.a.v);
- assertTrue(!%HaveSameMap(a, the_object.a));
- assertOptimized(load);
-
- // Now attempt to call load should deoptimize because of failed map check.
- assertEquals(the_value, load());
- }
+ var a = new A(other_value);
+ assertTrue(%HaveSameMap(a, the_object.a));
+ // Make constant field mutable by assigning another value
+ // to some other instance of A.
+ new A(the_value).v = other_value;
+ assertTrue(%HaveSameMap(a, new A(the_value)));
+ assertTrue(%HaveSameMap(a, the_object.a));
+ assertUnoptimized(load);
+ assertEquals(the_value, load());
assertUnoptimized(load);
assertEquals(the_value, load());
}
@@ -123,6 +99,7 @@ function TestLoadFromConstantFieldOfAPrototype(the_value, other_value) {
// Ensure O.prototype is in fast mode by loading from its field.
function warmup() { return new O().v; }
+ %EnsureFeedbackVectorForFunction(warmup);
warmup(); warmup(); warmup();
assertTrue(%HasFastProperties(O.prototype));
@@ -130,20 +107,15 @@ function TestLoadFromConstantFieldOfAPrototype(the_value, other_value) {
// map and therefore the compiler knows the prototype object and can
// optimize load of "v".
var load = MakeFunctionWithUniqueSFI("o", "return o.v;");
+ %PrepareFunctionForOptimization(load);
load(new O());
load(new O());
%OptimizeFunctionOnNextCall(load);
assertEquals(the_value, load(new O()));
assertOptimized(load);
- if (IS_INPLACE_MAP_MODIFICATION_SUPPORTED) {
- // Invalidation of mutability should trigger deoptimization with a
- // "field-owner" reason.
- the_prototype.v = other_value;
- } else {
- // Invalidation of mutability should trigger deoptimization with a
- // "prototype-check" (stability) reason.
- the_prototype.v = other_value;
- }
+ // Invalidation of mutability should trigger deoptimization with a
+ // "field-owner" reason.
+ the_prototype.v = other_value;
assertUnoptimized(load);
}
@@ -199,6 +171,7 @@ function TestStoreToConstantFieldOfConstantObject(the_value, other_value) {
// {constant_object} is known to the compiler via global property cell
// tracking.
var store = MakeFunctionWithUniqueSFI("v", "constant_object.a.v = v;");
+ %PrepareFunctionForOptimization(store);
store(the_value);
store(the_value);
%OptimizeFunctionOnNextCall(store);
@@ -210,32 +183,25 @@ function TestStoreToConstantFieldOfConstantObject(the_value, other_value) {
assertEquals(the_value, constant_object.a.v);
assertOptimized(store);
- if (IS_INPLACE_MAP_MODIFICATION_SUPPORTED) {
- var a = new A(other_value);
-
- if (typeof the_value == "function" || typeof the_value == "object") {
- // For heap object fields "field-owner" dependency is installed for
- // any access of the field, therefore making constant field mutable by
- // assigning other value to some other instance of A should already
- // trigger deoptimization.
- assertTrue(%HaveSameMap(a, the_object.a));
- new A(the_value).v = other_value;
- assertTrue(%HaveSameMap(a, new A(the_value)));
- assertTrue(%HaveSameMap(a, the_object.a));
- assertUnoptimized(store);
- } else {
- assertOptimized(store);
- }
- // Storing other value deoptimizes because of failed value check.
- store(other_value);
+ var a = new A(other_value);
+
+ if (typeof the_value == "function" || typeof the_value == "object") {
+ // For heap object fields "field-owner" dependency is installed for
+ // any access of the field, therefore making constant field mutable by
+ // assigning other value to some other instance of A should already
+ // trigger deoptimization.
+ assertTrue(%HaveSameMap(a, the_object.a));
+ new A(the_value).v = other_value;
+ assertTrue(%HaveSameMap(a, new A(the_value)));
+ assertTrue(%HaveSameMap(a, the_object.a));
assertUnoptimized(store);
- assertEquals(other_value, constant_object.a.v);
} else {
- // Storing other value deoptimizes because of failed value check.
- store(other_value);
- assertUnoptimized(store);
- assertEquals(other_value, constant_object.a.v);
+ assertOptimized(store);
}
+ // Storing other value deoptimizes because of failed value check.
+ store(other_value);
+ assertUnoptimized(store);
+ assertEquals(other_value, constant_object.a.v);
}
// Test constant tracking with Smi values.
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index 8359dc2c0e..f7c809bc1d 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -29,6 +29,7 @@
// Flags: --allow-natives-syntax --nostress-opt --opt
function test(f, iterations) {
+ %PrepareFunctionForOptimization(f);
f();
f();
// Some of the tests need to learn until they stabilize.
diff --git a/deps/v8/test/mjsunit/constant-folding.js b/deps/v8/test/mjsunit/constant-folding.js
index d6ac9fe3d5..c14789651e 100644
--- a/deps/v8/test/mjsunit/constant-folding.js
+++ b/deps/v8/test/mjsunit/constant-folding.js
@@ -147,7 +147,6 @@ function test() {
switch(3) {
case 5:
assertUnreachable();
- break;
case 3:
j = 13;
default:
@@ -158,7 +157,6 @@ function test() {
case 9:
j = 19;
assertUnreachable();
- break;
}
assertEquals(17, j, "switch with constant value");
}
diff --git a/deps/v8/test/mjsunit/context-calls-maintained.js b/deps/v8/test/mjsunit/context-calls-maintained.js
index 95bf55240b..15094d2cb7 100644
--- a/deps/v8/test/mjsunit/context-calls-maintained.js
+++ b/deps/v8/test/mjsunit/context-calls-maintained.js
@@ -44,6 +44,7 @@ function clear_all_ics() {
function f() { foo(1); }
// Drive to monomorphic
+ %PrepareFunctionForOptimization(f);
f(); f(); f();
delete foo;
@@ -63,6 +64,7 @@ function clear_all_ics() {
foo = function(arg) { return arg * 3; }
function g() { this.foo(1); }
+ %PrepareFunctionForOptimization(g);
g(); g(); g();
delete foo;
assertThrows(function() { g(); }, TypeError);
@@ -77,7 +79,8 @@ function clear_all_ics() {
// Test: verify that a load with IC does the right thing.
(function() {
- var foo = function() { return a; }
+ var foo = function() { return a; };
+ %PrepareFunctionForOptimization(foo);
a = 3;
foo(); foo(); foo();
delete a;
@@ -100,7 +103,8 @@ function clear_all_ics() {
// if the variable isn't found.
(function() {
var foo = function() { a = 3; }
- var bar = function() { "use strict"; a = 3; }
+ var bar = function() { "use strict"; a = 3; };
+ %PrepareFunctionForOptimization(bar);
foo(); foo(); foo();
delete a;
assertThrows(function() { bar(); }, ReferenceError);
diff --git a/deps/v8/test/mjsunit/cross-realm-filtering.js b/deps/v8/test/mjsunit/cross-realm-filtering.js
index b4e2520a11..60bf5b0b6f 100644
--- a/deps/v8/test/mjsunit/cross-realm-filtering.js
+++ b/deps/v8/test/mjsunit/cross-realm-filtering.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --experimental-stack-trace-frames
+
var realms = [Realm.current(), Realm.create()];
// Check stack trace filtering across security contexts.
@@ -34,17 +36,17 @@ function assertNotIn(thrower, error) {
Realm.eval(realms[1], script);
assertSame(2, Realm.shared.error_0.length);
-assertSame(3, Realm.shared.error_1.length);
+assertSame(4, Realm.shared.error_1.length);
-assertTrue(Realm.shared.thrower_1 === Realm.shared.error_1[1].getFunction());
+assertTrue(Realm.shared.thrower_1 === Realm.shared.error_1[2].getFunction());
assertNotIn(Realm.shared.thrower_0, Realm.shared.error_0);
assertNotIn(Realm.shared.thrower_0, Realm.shared.error_1);
Realm.eval(realms[0], script);
-assertSame(4, Realm.shared.error_0.length);
-assertSame(3, Realm.shared.error_1.length);
+assertSame(6, Realm.shared.error_0.length);
+assertSame(4, Realm.shared.error_1.length);
-assertTrue(Realm.shared.thrower_0 === Realm.shared.error_0[1].getFunction());
+assertTrue(Realm.shared.thrower_0 === Realm.shared.error_0[2].getFunction());
assertNotIn(Realm.shared.thrower_1, Realm.shared.error_0);
assertNotIn(Realm.shared.thrower_1, Realm.shared.error_1);
diff --git a/deps/v8/test/mjsunit/dehoisted-array-index.js b/deps/v8/test/mjsunit/dehoisted-array-index.js
index f4a32c1033..8e660c940c 100644
--- a/deps/v8/test/mjsunit/dehoisted-array-index.js
+++ b/deps/v8/test/mjsunit/dehoisted-array-index.js
@@ -34,6 +34,7 @@ function aoo(i) {
return a[i + 1];
}
+%PrepareFunctionForOptimization(aoo);
aoo(1);
aoo(-1);
%OptimizeFunctionOnNextCall(aoo);
@@ -51,6 +52,7 @@ function boo(i) {
return ret;
}
+%PrepareFunctionForOptimization(boo);
boo(1);
boo(-1);
%OptimizeFunctionOnNextCall(boo);
@@ -67,6 +69,7 @@ function coo() {
return ret;
}
+%PrepareFunctionForOptimization(coo);
coo();
coo();
%OptimizeFunctionOnNextCall(coo);
@@ -81,6 +84,7 @@ function doo() {
}
return ret;
}
+%PrepareFunctionForOptimization(doo);
doo();
doo();
%OptimizeFunctionOnNextCall(doo);
@@ -97,6 +101,7 @@ function eoo() {
return ret;
}
+%PrepareFunctionForOptimization(eoo);
eoo();
eoo();
%OptimizeFunctionOnNextCall(eoo);
@@ -118,6 +123,7 @@ function foo() {
return ret;
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -133,6 +139,7 @@ function goo(i) {
return a[i + 3];
}
+%PrepareFunctionForOptimization(goo);
goo(-1);
goo(-1);
%OptimizeFunctionOnNextCall(goo);
@@ -147,6 +154,7 @@ function hoo() {
return a[index() + 3];
}
+%PrepareFunctionForOptimization(hoo);
hoo();
hoo();
%OptimizeFunctionOnNextCall(hoo);
@@ -157,6 +165,7 @@ function ioo(i) {
return a[i] + a[i + 1];
}
+%PrepareFunctionForOptimization(ioo);
ioo(1);
ioo(1);
%OptimizeFunctionOnNextCall(ioo);
diff --git a/deps/v8/test/mjsunit/deopt-recursive-eager-once.js b/deps/v8/test/mjsunit/deopt-recursive-eager-once.js
index 1b4e155091..f0bca16c1d 100644
--- a/deps/v8/test/mjsunit/deopt-recursive-eager-once.js
+++ b/deps/v8/test/mjsunit/deopt-recursive-eager-once.js
@@ -14,18 +14,16 @@ function foo(i, deopt = false) {
foo(i - 1, deopt);
}
}
+%PrepareFunctionForOptimization(foo);
-assertEquals(0, %GetDeoptCount(foo));
-
+%PrepareFunctionForOptimization(foo);
foo(10);
foo(10);
%OptimizeFunctionOnNextCall(foo);
foo(10);
assertOptimized(foo);
-assertEquals(0, %GetDeoptCount(foo));
foo(10, true);
assertUnoptimized(foo);
-assertEquals(1, %GetDeoptCount(foo));
diff --git a/deps/v8/test/mjsunit/deopt-recursive-lazy-once.js b/deps/v8/test/mjsunit/deopt-recursive-lazy-once.js
index f75dde7dc2..06f62efc51 100644
--- a/deps/v8/test/mjsunit/deopt-recursive-lazy-once.js
+++ b/deps/v8/test/mjsunit/deopt-recursive-lazy-once.js
@@ -14,18 +14,16 @@ function foo(i, deopt = false) {
foo(i - 1, deopt);
}
}
+%PrepareFunctionForOptimization(foo);
-assertEquals(0, %GetDeoptCount(foo));
-
+%PrepareFunctionForOptimization(foo);
foo(10);
foo(10);
%OptimizeFunctionOnNextCall(foo);
foo(10);
assertOptimized(foo);
-assertEquals(0, %GetDeoptCount(foo));
foo(10, true);
assertUnoptimized(foo);
-assertEquals(1, %GetDeoptCount(foo));
diff --git a/deps/v8/test/mjsunit/deopt-recursive-soft-once.js b/deps/v8/test/mjsunit/deopt-recursive-soft-once.js
index f2cadf2e54..cb27a8d733 100644
--- a/deps/v8/test/mjsunit/deopt-recursive-soft-once.js
+++ b/deps/v8/test/mjsunit/deopt-recursive-soft-once.js
@@ -16,18 +16,14 @@ function foo(i, deopt = false, deoptobj = null) {
}
}
-assertEquals(0, %GetDeoptCount(foo));
-
+%PrepareFunctionForOptimization(foo);
foo(10);
foo(10);
%OptimizeFunctionOnNextCall(foo);
foo(10);
assertOptimized(foo);
-assertEquals(0, %GetDeoptCount(foo));
foo(10, true, { bar: function(){} });
assertUnoptimized(foo);
-// Soft deopts don't count to the deopt count.
-assertEquals(0, %GetDeoptCount(foo));
diff --git a/deps/v8/test/mjsunit/deopt-unlinked.js b/deps/v8/test/mjsunit/deopt-unlinked.js
index 1005737988..06a5cc4041 100644
--- a/deps/v8/test/mjsunit/deopt-unlinked.js
+++ b/deps/v8/test/mjsunit/deopt-unlinked.js
@@ -3,26 +3,24 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt --no-always-opt
+// The deopt count is stored in the feedback vector which gets cleared when
+// bytecode is flushed, which --gc-interval can cause in stress modes.
+// Flags: --noflush-bytecode --nostress-flush-bytecode
function foo() {}
-assertEquals(0, %GetDeoptCount(foo));
-
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
assertOptimized(foo);
-assertEquals(0, %GetDeoptCount(foo));
// Unlink the function.
%DeoptimizeFunction(foo);
assertUnoptimized(foo);
-assertEquals(1, %GetDeoptCount(foo));
foo();
assertUnoptimized(foo);
-assertEquals(1, %GetDeoptCount(foo));
diff --git a/deps/v8/test/mjsunit/deopt-with-fp-regs.js b/deps/v8/test/mjsunit/deopt-with-fp-regs.js
index bdb08053ae..d5f02ac363 100644
--- a/deps/v8/test/mjsunit/deopt-with-fp-regs.js
+++ b/deps/v8/test/mjsunit/deopt-with-fp-regs.js
@@ -75,6 +75,7 @@ function test(a, b, c, d, e, v) {
assertEquals(24, d);
assertEquals(37.5, e);
}
+%PrepareFunctionForOptimization(test);
test(10.0, 20.0, 30.0, 40.0, 50.0, 1.5);
diff --git a/deps/v8/test/mjsunit/deserialize-optimize-inner.js b/deps/v8/test/mjsunit/deserialize-optimize-inner.js
index ca78b8a4b0..50976bce11 100644
--- a/deps/v8/test/mjsunit/deserialize-optimize-inner.js
+++ b/deps/v8/test/mjsunit/deserialize-optimize-inner.js
@@ -6,6 +6,7 @@
function f(x, y) { return x + y; }
+%PrepareFunctionForOptimization(f);
assertEquals(1, f(0, 1));
assertEquals(5, f(2, 3));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/dictionary-prototypes.js b/deps/v8/test/mjsunit/dictionary-prototypes.js
index 109f8d42a6..0186c63f91 100644
--- a/deps/v8/test/mjsunit/dictionary-prototypes.js
+++ b/deps/v8/test/mjsunit/dictionary-prototypes.js
@@ -47,6 +47,7 @@ function TestAddingPropertyToDictionaryPrototype() {
assertEquals(1, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestAddingPropertyToDictionaryPrototype);
TestAddingPropertyToDictionaryPrototype();
// Same as TestAddingPropertyToDictionaryPrototype, but using o["foo"] access
@@ -82,6 +83,7 @@ function TestAddingPropertyToDictionaryPrototype2() {
assertEquals(1, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestAddingPropertyToDictionaryPrototype2);
TestAddingPropertyToDictionaryPrototype2();
function TestAddingPropertyToDictionaryPrototype_DefineProperty() {
@@ -113,6 +115,7 @@ function TestAddingPropertyToDictionaryPrototype_DefineProperty() {
assertEquals(1, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestAddingPropertyToDictionaryPrototype_DefineProperty);
TestAddingPropertyToDictionaryPrototype_DefineProperty();
function TestAddingPropertyToDictionaryPrototype_DictionaryAddSlowPath() {
@@ -146,6 +149,7 @@ function TestAddingPropertyToDictionaryPrototype_DictionaryAddSlowPath() {
assertEquals(1, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestAddingPropertyToDictionaryPrototype_DictionaryAddSlowPath);
TestAddingPropertyToDictionaryPrototype_DictionaryAddSlowPath();
function TestAddingAccessorPropertyToDictionaryPrototype() {
@@ -177,6 +181,7 @@ function TestAddingAccessorPropertyToDictionaryPrototype() {
assertEquals(1, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestAddingAccessorPropertyToDictionaryPrototype);
TestAddingAccessorPropertyToDictionaryPrototype();
function TestRemovingPropertyFromDictionaryPrototype() {
@@ -208,6 +213,7 @@ function TestRemovingPropertyFromDictionaryPrototype() {
assertEquals(10, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestRemovingPropertyFromDictionaryPrototype);
TestRemovingPropertyFromDictionaryPrototype();
// Same as TestRemovingPropertyFromDictionaryPrototype, but using o["foo"] access
@@ -242,6 +248,7 @@ function TestRemovingPropertyFromDictionaryPrototype2() {
assertEquals(10, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestRemovingPropertyFromDictionaryPrototype2);
TestRemovingPropertyFromDictionaryPrototype2();
function TestAddingPropertyToDictionaryPrototype_Monomorphic() {
@@ -286,6 +293,7 @@ function TestAddingPropertyToDictionaryPrototype_Monomorphic() {
assertEquals(1, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestAddingPropertyToDictionaryPrototype_Monomorphic);
TestAddingPropertyToDictionaryPrototype_Monomorphic();
function TestAddingKeyedPropertyToDictionaryPrototype_Monomorphic() {
@@ -331,6 +339,7 @@ function TestAddingKeyedPropertyToDictionaryPrototype_Monomorphic() {
assertEquals(1, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestAddingKeyedPropertyToDictionaryPrototype_Monomorphic);
TestAddingKeyedPropertyToDictionaryPrototype_Monomorphic();
// Like TestAddingPropertyToDictionaryPrototype, except that the prototype isn't
@@ -369,6 +378,7 @@ function TestAddingPropertyToAlmostDictionaryPrototype() {
assertEquals(1, bar_func_called);
}
+%EnsureFeedbackVectorForFunction(TestAddingPropertyToAlmostDictionaryPrototype);
TestAddingPropertyToAlmostDictionaryPrototype();
function TestReconfiguringDataToAccessor() {
@@ -406,4 +416,5 @@ function TestReconfiguringDataToAccessor() {
assertEquals(1, setter_called);
}
+%EnsureFeedbackVectorForFunction(TestReconfiguringDataToAccessor);
TestReconfiguringDataToAccessor();
diff --git a/deps/v8/test/mjsunit/div-mul-minus-one.js b/deps/v8/test/mjsunit/div-mul-minus-one.js
index 71ad299610..da6f8e3c30 100644
--- a/deps/v8/test/mjsunit/div-mul-minus-one.js
+++ b/deps/v8/test/mjsunit/div-mul-minus-one.js
@@ -30,7 +30,7 @@
function div(g) {
return (g/-1) ^ 1
}
-
+%PrepareFunctionForOptimization(div);
var kMinInt = 1 << 31;
var expected_MinInt = div(kMinInt);
var expected_minus_zero = div(0);
@@ -42,6 +42,7 @@ function mul(g) {
return (g * -1) ^ 1
}
+%PrepareFunctionForOptimization(mul);
expected_MinInt = mul(kMinInt);
expected_minus_zero = mul(0);
%OptimizeFunctionOnNextCall(mul);
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index 5a2355ecf6..54c5e33e9e 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -151,6 +151,7 @@ function test_wrapper() {
}
var smi_only = new Array(1, 2, 3);
assertKind(elements_kind.fast_smi_only, smi_only);
+ %PrepareFunctionForOptimization(monomorphic);
for (var i = 0; i < 3; i++) monomorphic(smi_only);
%OptimizeFunctionOnNextCall(monomorphic);
monomorphic(smi_only);
@@ -206,7 +207,8 @@ function convert_to_double(array) {
array[1] = 2.5;
assertKind(elements_kind.fast_double, array);
assertEquals(2.5, array[1]);
-}
+};
+%PrepareFunctionForOptimization(convert_to_double);
var smis = construct_smis();
for (var i = 0; i < 3; i++) convert_to_double(smis);
%OptimizeFunctionOnNextCall(convert_to_double);
@@ -218,7 +220,8 @@ function convert_to_fast(array) {
array[1] = "two";
assertKind(elements_kind.fast, array);
assertEquals("two", array[1]);
-}
+};
+%PrepareFunctionForOptimization(convert_to_fast);
smis = construct_smis();
for (var i = 0; i < 3; i++) convert_to_fast(smis);
var doubles = construct_doubles();
diff --git a/deps/v8/test/mjsunit/elements-transition-hoisting.js b/deps/v8/test/mjsunit/elements-transition-hoisting.js
index dcd742e621..0406dc7f17 100644
--- a/deps/v8/test/mjsunit/elements-transition-hoisting.js
+++ b/deps/v8/test/mjsunit/elements-transition-hoisting.js
@@ -45,6 +45,7 @@ function test_wrapper() {
} while (--count > 0);
}
+ %PrepareFunctionForOptimization(testDoubleConversion4);
testDoubleConversion4(new Array(5));
testDoubleConversion4(new Array(5)); // Call twice to make sure that second
// store is a transition and not
@@ -71,6 +72,7 @@ function test_wrapper() {
} while (--count > 0);
}
+ %PrepareFunctionForOptimization(testExactMapHoisting);
testExactMapHoisting(new Array(5));
testExactMapHoisting(new Array(5)); // Call twice to make sure that second
// store is a transition and not
@@ -102,6 +104,7 @@ function test_wrapper() {
} while (--count > 0);
}
+ %PrepareFunctionForOptimization(testExactMapHoisting2);
testExactMapHoisting2(new Array(5));
testExactMapHoisting2(new Array(5)); // Call twice to make sure that second
// store is a transition and not
@@ -129,6 +132,7 @@ function test_wrapper() {
} while (--count > 0);
}
+ %PrepareFunctionForOptimization(testExactMapHoisting3);
var add_transition = new Array(5);
add_transition.foo = 0;
add_transition[0] = new Object(); // For FAST_ELEMENT transition to be created
@@ -155,6 +159,7 @@ function test_wrapper() {
}
/*
+ %PrepareFunctionForOptimization(testDominatingTransitionHoisting1);
testDominatingTransitionHoisting1(new Array(5));
testDominatingTransitionHoisting1(new Array(5)); // Call twice to make sure
// that second store is a
@@ -180,6 +185,7 @@ function test_wrapper() {
} while (--count > 3);
}
+ %PrepareFunctionForOptimization(testHoistingWithSideEffect);
testHoistingWithSideEffect(new Array(5));
testHoistingWithSideEffect(new Array(5)); // Call twice to make sure that
// second store is a transition and
@@ -203,6 +209,7 @@ function test_wrapper() {
} while (--count > 3);
}
+ %PrepareFunctionForOptimization(testStraightLineDupeElinination);
testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,0,0,0,.5);
testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,0,0,.5,0);
testStraightLineDupeElinination(new Array(0, 0, 0, 0, 0),0,0,.5,0,0);
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-1.js b/deps/v8/test/mjsunit/elide-double-hole-check-1.js
index 63569df294..0fc2530bac 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-1.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-1.js
@@ -32,6 +32,7 @@ function f1(a, i) {
}
var a1 = [,,,,,,,,,,,,,,,,,,0.5];
+%PrepareFunctionForOptimization(f1);
assertEquals(undefined, f1(a1, 1));
assertEquals(undefined, f1(a1, 1));
%OptimizeFunctionOnNextCall(f1);
@@ -42,6 +43,7 @@ function f2(a, i) {
return a[i] + 0.5;
}
var a2_b = [0.0,,];
+%PrepareFunctionForOptimization(f2);
assertEquals(0.5, f2(a2_b, 0));
assertEquals(0.5, f2(a2_b, 0));
%OptimizeFunctionOnNextCall(f2);
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-10.js b/deps/v8/test/mjsunit/elide-double-hole-check-10.js
index e9f2a9aaf5..066f802e54 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-10.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-10.js
@@ -8,6 +8,7 @@ function f1(a, i) {
return a[i] + 0.5;
}
var arr = [,0.0,2.5];
+%PrepareFunctionForOptimization(f1);
assertEquals(0.5, f1(arr, 1));
assertEquals(0.5, f1(arr, 1));
%OptimizeFunctionOnNextCall(f1);
@@ -28,6 +29,7 @@ function foo() {
optopush(array_prototype);
}
+%PrepareFunctionForOptimization(foo);
optopush([]);
optopush([]);
optopush([]);
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-11.js b/deps/v8/test/mjsunit/elide-double-hole-check-11.js
index 4603ac0ac5..829049a5ce 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-11.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-11.js
@@ -8,6 +8,7 @@ function f1(a, i) {
return a[i] + 0.5;
}
var arr = [,0.0,2.5];
+%PrepareFunctionForOptimization(f1);
assertEquals(0.5, f1(arr, 1));
assertEquals(0.5, f1(arr, 1));
%OptimizeFunctionOnNextCall(f1);
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-12.js b/deps/v8/test/mjsunit/elide-double-hole-check-12.js
index 758734db89..21a4afe775 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-12.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-12.js
@@ -10,6 +10,7 @@ function f1(a, i) {
var other_realm = Realm.create();
var arr = [,0.0,2.5];
+%PrepareFunctionForOptimization(f1);
assertEquals(0.5, f1(arr, 1));
assertEquals(0.5, f1(arr, 1));
%OptimizeFunctionOnNextCall(f1);
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-4.js b/deps/v8/test/mjsunit/elide-double-hole-check-4.js
index e2a5505571..77eb57c22f 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-4.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-4.js
@@ -30,6 +30,7 @@
function f1(a, i) {
return a[i] + 0.5;
}
+%PrepareFunctionForOptimization(f1);
var arr = [0.0,,2.5];
assertEquals(0.5, f1(arr, 0));
assertEquals(0.5, f1(arr, 0));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-5.js b/deps/v8/test/mjsunit/elide-double-hole-check-5.js
index d0970c8fe1..80035cced1 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-5.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-5.js
@@ -29,7 +29,8 @@
function f1(a, i) {
return a[i] + 0.5;
-}
+};
+%PrepareFunctionForOptimization(f1);
var arr = [0.0,,2.5];
assertEquals(0.5, f1(arr, 0));
assertEquals(0.5, f1(arr, 0));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-6.js b/deps/v8/test/mjsunit/elide-double-hole-check-6.js
index 01a8096f85..b46534827e 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-6.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-6.js
@@ -31,6 +31,7 @@ function f1(a, i) {
return a[i] + 0.5;
}
var arr = [0.0,,2.5];
+%PrepareFunctionForOptimization(f1);
assertEquals(0.5, f1(arr, 0));
assertEquals(0.5, f1(arr, 0));
%OptimizeFunctionOnNextCall(f1);
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-7.js b/deps/v8/test/mjsunit/elide-double-hole-check-7.js
index 2b13aff881..673d2b0c86 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-7.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-7.js
@@ -30,6 +30,7 @@
function f1(a, i) {
return a[i] + 0.5;
}
+%PrepareFunctionForOptimization(f1);
var arr = [0.0,,2.5];
assertEquals(0.5, f1(arr, 0));
assertEquals(0.5, f1(arr, 0));
diff --git a/deps/v8/test/mjsunit/elide-double-hole-check-8.js b/deps/v8/test/mjsunit/elide-double-hole-check-8.js
index 35cc91fa8e..e0aca86a27 100644
--- a/deps/v8/test/mjsunit/elide-double-hole-check-8.js
+++ b/deps/v8/test/mjsunit/elide-double-hole-check-8.js
@@ -31,6 +31,7 @@ function f1(a, i) {
return a[i] + 0.5;
}
var arr = [0.0,,2.5];
+%PrepareFunctionForOptimization(f1);
assertEquals(0.5, f1(arr, 0));
assertEquals(0.5, f1(arr, 0));
%OptimizeFunctionOnNextCall(f1);
diff --git a/deps/v8/test/mjsunit/ensure-growing-store-learns.js b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
index 25fa0e6d01..5340919472 100644
--- a/deps/v8/test/mjsunit/ensure-growing-store-learns.js
+++ b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
@@ -18,6 +18,7 @@
a[i] = 5.3;
}
+ %PrepareFunctionForOptimization(foo);
foo(a, 1);
foo(a, 2);
foo(a, 3);
@@ -29,6 +30,7 @@
assertUnoptimized(foo);
assertTrue(%HasDictionaryElements(a));
+ %PrepareFunctionForOptimization(foo);
var b = [];
foo(b, 1);
foo(b, 2);
@@ -36,6 +38,7 @@
b[10000] = 5;
assertTrue(%HasDictionaryElements(b));
foo(b, 3);
+ %PrepareFunctionForOptimization(foo);
%OptimizeFunctionOnNextCall(foo);
foo(b, 50000);
assertOptimized(foo);
@@ -55,6 +58,7 @@
}
// The KeyedStoreIC will learn GROW_MODE.
+ %PrepareFunctionForOptimization(foo2);
foo2(a, 10);
foo2(a, 12);
foo2(a, 31);
diff --git a/deps/v8/test/mjsunit/es6/array-iterator-detached.js b/deps/v8/test/mjsunit/es6/array-iterator-detached.js
index 2a92ee4ff9..f385039b4d 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator-detached.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator-detached.js
@@ -39,6 +39,7 @@ function Turbo(count = 10000) {
return sum;
}
+%PrepareFunctionForOptimization(Turbo);
Turbo(10);
Turbo(10);
%OptimizeFunctionOnNextCall(Turbo);
diff --git a/deps/v8/test/mjsunit/es6/array-iterator-turbo.js b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
index 7dcdbe10fa..40f4b2af36 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
@@ -101,6 +101,7 @@ let tests = {
let { array, expected, array2, expected2 } = tests[key];
// Warmup:
+ %PrepareFunctionForOptimization(fn);
fn(array);
fn(array);
%OptimizeFunctionOnNextCall(fn);
@@ -203,6 +204,7 @@ let tests = {
};
// Warmup
+ %PrepareFunctionForOptimization(sum);
sum(array);
sum(array);
%OptimizeFunctionOnNextCall(sum);
diff --git a/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
index e50667ecc9..39ebea9ceb 100644
--- a/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
@@ -34,6 +34,7 @@ var functions = [ f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
for (var i = 0; i < functions.length; ++i) {
var func = functions[i];
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var j = 0; j < 10; ++j) {
@@ -315,6 +316,7 @@ function TestThrow() {
throw x;
}
}
+ %PrepareFunctionForOptimization(f);
for (var i = 0; i < 5; i++) {
try {
f();
@@ -339,6 +341,7 @@ TestThrow();
function TestFunctionLocal(s) {
'use strict';
var func = eval("(function baz(){" + s + "; })");
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var i = 0; i < 5; ++i) {
@@ -361,6 +364,7 @@ function TestFunctionLocal(s) {
function TestFunctionContext(s) {
'use strict';
var func = eval("(function baz(){ " + s + "; (function() { x; }); })");
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var i = 0; i < 5; ++i) {
@@ -387,6 +391,7 @@ function TestFunctionContext(s) {
function TestBlockLocal(s) {
'use strict';
var func = eval("(function baz(){ { " + s + "; } })");
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var i = 0; i < 5; ++i) {
@@ -409,6 +414,7 @@ function TestBlockLocal(s) {
function TestBlockContext(s) {
'use strict';
var func = eval("(function baz(){ { " + s + "; (function() { x; }); } })");
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var i = 0; i < 5; ++i) {
diff --git a/deps/v8/test/mjsunit/es6/block-let-crankshaft.js b/deps/v8/test/mjsunit/es6/block-let-crankshaft.js
index 97de765c8a..2b135a890f 100644
--- a/deps/v8/test/mjsunit/es6/block-let-crankshaft.js
+++ b/deps/v8/test/mjsunit/es6/block-let-crankshaft.js
@@ -36,6 +36,7 @@ var functions = [ f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
for (var i = 0; i < functions.length; ++i) {
var func = functions[i];
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var j = 0; j < 10; ++j) {
@@ -317,6 +318,7 @@ function TestThrow() {
throw x;
}
}
+ %PrepareFunctionForOptimization(f);
for (var i = 0; i < 5; i++) {
try {
f();
@@ -341,6 +343,7 @@ TestThrow();
function TestFunctionLocal(s) {
'use strict';
var func = eval("(function baz(){" + s + "; })");
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var i = 0; i < 5; ++i) {
@@ -363,6 +366,7 @@ function TestFunctionLocal(s) {
function TestFunctionContext(s) {
'use strict';
var func = eval("(function baz(){ " + s + "; (function() { x; }); })");
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var i = 0; i < 5; ++i) {
@@ -389,6 +393,7 @@ function TestFunctionContext(s) {
function TestBlockLocal(s) {
'use strict';
var func = eval("(function baz(){ { " + s + "; } })");
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var i = 0; i < 5; ++i) {
@@ -411,6 +416,7 @@ function TestBlockLocal(s) {
function TestBlockContext(s) {
'use strict';
var func = eval("(function baz(){ { " + s + "; (function() { x; }); } })");
+ %PrepareFunctionForOptimization(func);
print("Testing:");
print(func);
for (var i = 0; i < 5; ++i) {
@@ -469,6 +475,9 @@ function g(x) {
}
}
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(g);
+
for (var i=0; i<10; i++) {
f(i);
g(i);
diff --git a/deps/v8/test/mjsunit/es6/block-let-declaration-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-declaration-sloppy.js
index ea0e39bd07..1611d44558 100644
--- a/deps/v8/test/mjsunit/es6/block-let-declaration-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-declaration-sloppy.js
@@ -118,19 +118,19 @@ TestLocalDoesNotThrow("switch (true) { case true: class x { }; }");
TestLocalDoesNotThrow("switch (true) { default: class x { }; }");
// Test that redeclarations of functions are only allowed in outermost scope.
-TestLocalThrows("{ let f; var f; }");
-TestLocalThrows("{ var f; let f; }");
-TestLocalThrows("{ function f() {} let f; }");
-TestLocalThrows("{ let f; function f() {} }");
-TestLocalThrows("{ function f() {} var f; }");
-TestLocalThrows("{ var f; function f() {} }");
-TestLocalThrows("{ function f() {} class f {} }");
-TestLocalThrows("{ class f {}; function f() {} }");
-TestLocalThrows("{ function f() {} function f() {} }");
-TestLocalThrows("function f() {} let f;");
-TestLocalThrows("let f; function f() {}");
-TestLocalThrows("function f() {} class f {}");
-TestLocalThrows("class f {}; function f() {}");
+TestLocalThrows("{ let f; var f; }", SyntaxError);
+TestLocalThrows("{ var f; let f; }", SyntaxError);
+TestLocalThrows("{ function f() {} let f; }", SyntaxError);
+TestLocalThrows("{ let f; function f() {} }", SyntaxError);
+TestLocalThrows("{ function f() {} var f; }", SyntaxError);
+TestLocalThrows("{ var f; function f() {} }", SyntaxError);
+TestLocalThrows("{ function f() {} class f {} }", SyntaxError);
+TestLocalThrows("{ class f {}; function f() {} }", SyntaxError);
+TestLocalThrows("{ function f() {} function f() {} }", SyntaxError);
+TestLocalThrows("function f() {} let f;", SyntaxError);
+TestLocalThrows("let f; function f() {}", SyntaxError);
+TestLocalThrows("function f() {} class f {}", SyntaxError);
+TestLocalThrows("class f {}; function f() {}", SyntaxError);
TestLocalDoesNotThrow("function arg() {}");
TestLocalDoesNotThrow("function f() {} var f;");
TestLocalDoesNotThrow("var f; function f() {}");
diff --git a/deps/v8/test/mjsunit/es6/block-let-declaration.js b/deps/v8/test/mjsunit/es6/block-let-declaration.js
index a138144d18..f6b2b7dd41 100644
--- a/deps/v8/test/mjsunit/es6/block-let-declaration.js
+++ b/deps/v8/test/mjsunit/es6/block-let-declaration.js
@@ -120,19 +120,19 @@ TestLocalDoesNotThrow("switch (true) { case true: class x { }; }");
TestLocalDoesNotThrow("switch (true) { default: class x { }; }");
// Test that redeclarations of functions are only allowed in outermost scope.
-TestLocalThrows("{ let f; var f; }");
-TestLocalThrows("{ var f; let f; }");
-TestLocalThrows("{ function f() {} let f; }");
-TestLocalThrows("{ let f; function f() {} }");
-TestLocalThrows("{ function f() {} var f; }");
-TestLocalThrows("{ var f; function f() {} }");
-TestLocalThrows("{ function f() {} class f {} }");
-TestLocalThrows("{ class f {}; function f() {} }");
-TestLocalThrows("{ function f() {} function f() {} }");
-TestLocalThrows("function f() {} let f;");
-TestLocalThrows("let f; function f() {}");
-TestLocalThrows("function f() {} class f {}");
-TestLocalThrows("class f {}; function f() {}");
+TestLocalThrows("{ let f; var f; }", SyntaxError);
+TestLocalThrows("{ var f; let f; }", SyntaxError);
+TestLocalThrows("{ function f() {} let f; }", SyntaxError);
+TestLocalThrows("{ let f; function f() {} }", SyntaxError);
+TestLocalThrows("{ function f() {} var f; }", SyntaxError);
+TestLocalThrows("{ var f; function f() {} }", SyntaxError);
+TestLocalThrows("{ function f() {} class f {} }", SyntaxError);
+TestLocalThrows("{ class f {}; function f() {} }", SyntaxError);
+TestLocalThrows("{ function f() {} function f() {} }", SyntaxError);
+TestLocalThrows("function f() {} let f;", SyntaxError);
+TestLocalThrows("let f; function f() {}", SyntaxError);
+TestLocalThrows("function f() {} class f {}", SyntaxError);
+TestLocalThrows("class f {}; function f() {}", SyntaxError);
TestLocalDoesNotThrow("function arg() {}");
TestLocalDoesNotThrow("function f() {} var f;");
TestLocalDoesNotThrow("var f; function f() {}");
diff --git a/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js b/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js
index d86eb0794f..1eb1a54d23 100644
--- a/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js
@@ -37,6 +37,7 @@ function f1() {
assertEquals(1, x)
assertEquals(undefined, y)
}
+%PrepareFunctionForOptimization(f1);
for (var j = 0; j < 5; ++j) f1();
%OptimizeFunctionOnNextCall(f1);
f1();
@@ -85,6 +86,7 @@ function f3(one) {
assertEquals(8, b.foo());
}
}
+%PrepareFunctionForOptimization(f3);
for (var j = 0; j < 5; ++j) f3(1);
%OptimizeFunctionOnNextCall(f3);
f3(1);
diff --git a/deps/v8/test/mjsunit/es6/block-scoping.js b/deps/v8/test/mjsunit/es6/block-scoping.js
index 9fa22cddc3..cf42054769 100644
--- a/deps/v8/test/mjsunit/es6/block-scoping.js
+++ b/deps/v8/test/mjsunit/es6/block-scoping.js
@@ -39,6 +39,7 @@ function f1() {
assertEquals(1, x)
assertEquals(undefined, y)
}
+%PrepareFunctionForOptimization(f1);
for (var j = 0; j < 5; ++j) f1();
%OptimizeFunctionOnNextCall(f1);
f1();
@@ -87,6 +88,7 @@ function f3(one) {
assertEquals(8, b.foo());
}
}
+%PrepareFunctionForOptimization(f3);
for (var j = 0; j < 5; ++j) f3(1);
%OptimizeFunctionOnNextCall(f3);
f3(1);
diff --git a/deps/v8/test/mjsunit/es6/call-with-spread-modify-array-iterator.js b/deps/v8/test/mjsunit/es6/call-with-spread-modify-array-iterator.js
index ef18cd3f33..aeeeb31f6b 100644
--- a/deps/v8/test/mjsunit/es6/call-with-spread-modify-array-iterator.js
+++ b/deps/v8/test/mjsunit/es6/call-with-spread-modify-array-iterator.js
@@ -15,6 +15,7 @@
return maxWithZero(x, y);
}
+ %PrepareFunctionForOptimization(testMax);
testMax(1, 2);
testMax(1, 2);
%OptimizeFunctionOnNextCall(testMax);
diff --git a/deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js b/deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js
index 3cae94ff9d..3ac6b024ca 100644
--- a/deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js
+++ b/deps/v8/test/mjsunit/es6/call-with-spread-modify-next.js
@@ -20,6 +20,7 @@
return maxWithZero(x, y);
}
+ %PrepareFunctionForOptimization(testMax);
testMax(1, 2);
testMax(1, 2);
%OptimizeFunctionOnNextCall(testMax);
diff --git a/deps/v8/test/mjsunit/es6/call-with-spread.js b/deps/v8/test/mjsunit/es6/call-with-spread.js
index 3a1744a72a..be47d428ee 100644
--- a/deps/v8/test/mjsunit/es6/call-with-spread.js
+++ b/deps/v8/test/mjsunit/es6/call-with-spread.js
@@ -8,6 +8,7 @@
'use strict';
function testBaselineAndOpt(func) {
+ %PrepareFunctionForOptimization(func);
func(-1, -2);
func(-1, -2);
%OptimizeFunctionOnNextCall(func);
@@ -76,6 +77,7 @@
(function() {
function testBaselineAndOpt(func) {
+ %PrepareFunctionForOptimization(func);
func(-1, -2);
func(-1, -2);
%OptimizeFunctionOnNextCall(func);
diff --git a/deps/v8/test/mjsunit/es6/classes.js b/deps/v8/test/mjsunit/es6/classes.js
index 27121ec007..6c7a0fb869 100644
--- a/deps/v8/test/mjsunit/es6/classes.js
+++ b/deps/v8/test/mjsunit/es6/classes.js
@@ -728,6 +728,9 @@ function assertAccessorDescriptor(object, name) {
function invoke_constructor() { A() }
function call_constructor() { A.call() }
function apply_constructor() { A.apply() }
+ %PrepareFunctionForOptimization(invoke_constructor);
+ %PrepareFunctionForOptimization(call_constructor);
+ %PrepareFunctionForOptimization(apply_constructor);
for (var i=0; i<3; i++) {
assertThrows(invoke_constructor);
@@ -1106,6 +1109,7 @@ function testClassRestrictedProperties(C) {
" return new clazz(i); })";
let fn = eval(evalString);
+ %PrepareFunctionForOptimization(fn);
assertEquals(fn(1).value, 1);
assertEquals(fn(2).value, 2);
assertEquals(fn(3).value, 3);
@@ -1138,6 +1142,7 @@ function testClassRestrictedProperties(C) {
let fn = eval(evalString);
+ %PrepareFunctionForOptimization(fn);
assertEquals(fn(1).value, 1);
assertEquals(fn(2).value, 2);
assertEquals(fn(3).value, 3);
@@ -1174,6 +1179,7 @@ function testClassRestrictedProperties(C) {
" return (new clazz(i)); })";
let fn = eval(evalString);
+ %PrepareFunctionForOptimization(fn);
assertEquals(fn(1).value, 1);
assertEquals(fn(2).value, 2);
assertEquals(fn(3).value, 3);
diff --git a/deps/v8/test/mjsunit/es6/collection-iterator.js b/deps/v8/test/mjsunit/es6/collection-iterator.js
index 8257d96664..027584a845 100644
--- a/deps/v8/test/mjsunit/es6/collection-iterator.js
+++ b/deps/v8/test/mjsunit/es6/collection-iterator.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function test(f) {
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
index d6fa548179..29b65dc358 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-custom-iterator.js
@@ -25,6 +25,7 @@ function TestSetWithCustomIterator(ctor) {
// code which causes the code to deopt.
global = entries;
}
+%PrepareFunctionForOptimization(TestSetWithCustomIterator);
TestSetWithCustomIterator(Set);
TestSetWithCustomIterator(Set);
TestSetWithCustomIterator(Set);
@@ -33,6 +34,7 @@ TestSetWithCustomIterator(Set);
assertOptimized(TestSetWithCustomIterator);
TestSetWithCustomIterator(WeakSet);
+%PrepareFunctionForOptimization(TestSetWithCustomIterator);
TestSetWithCustomIterator(WeakSet);
TestSetWithCustomIterator(WeakSet);
%OptimizeFunctionOnNextCall(TestSetWithCustomIterator);
@@ -58,6 +60,7 @@ function TestMapWithCustomIterator(ctor) {
// code which causes the code to deopt.
global = entries;
}
+%PrepareFunctionForOptimization(TestMapWithCustomIterator);
TestMapWithCustomIterator(Map);
TestMapWithCustomIterator(Map);
TestMapWithCustomIterator(Map);
@@ -66,6 +69,7 @@ TestMapWithCustomIterator(Map);
assertOptimized(TestMapWithCustomIterator);
TestMapWithCustomIterator(WeakMap);
+%PrepareFunctionForOptimization(TestMapWithCustomIterator);
TestMapWithCustomIterator(WeakMap);
TestMapWithCustomIterator(WeakMap);
%OptimizeFunctionOnNextCall(TestMapWithCustomIterator);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js b/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js
index 50308fdde3..514e54630c 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-iterator-side-effect.js
@@ -25,6 +25,7 @@ function TestSetWithModifiedIterator(ctor) {
arrayIteratorProto.next = originalNext;
}
+%PrepareFunctionForOptimization(TestSetWithModifiedIterator);
TestSetWithModifiedIterator(Set);
TestSetWithModifiedIterator(Set);
TestSetWithModifiedIterator(Set);
@@ -33,6 +34,7 @@ TestSetWithModifiedIterator(Set);
assertOptimized(TestSetWithModifiedIterator);
%DeoptimizeFunction(TestSetWithModifiedIterator);
+%PrepareFunctionForOptimization(TestSetWithModifiedIterator);
TestSetWithModifiedIterator(WeakSet);
TestSetWithModifiedIterator(WeakSet);
TestSetWithModifiedIterator(WeakSet);
@@ -63,6 +65,7 @@ function TestMapWithModifiedIterator(ctor) {
arrayIteratorProto.next = originalNext;
}
+%PrepareFunctionForOptimization(TestMapWithModifiedIterator);
TestMapWithModifiedIterator(Map);
TestMapWithModifiedIterator(Map);
TestMapWithModifiedIterator(Map);
@@ -71,6 +74,7 @@ TestMapWithModifiedIterator(Map);
assertOptimized(TestMapWithModifiedIterator);
%DeoptimizeFunction(TestMapWithModifiedIterator);
+%PrepareFunctionForOptimization(TestMapWithModifiedIterator);
TestMapWithModifiedIterator(WeakMap);
TestMapWithModifiedIterator(WeakMap);
TestMapWithModifiedIterator(WeakMap);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
index cc441b1ad4..0353be3205 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-array-prototype.js
@@ -20,6 +20,7 @@ function TestSetWithCustomIterator(ctor) {
assertTrue(set.has(k2));
assertEquals(2, callCount);
}
+%PrepareFunctionForOptimization(TestSetWithCustomIterator);
TestSetWithCustomIterator(Set);
TestSetWithCustomIterator(Set);
TestSetWithCustomIterator(Set);
@@ -28,6 +29,7 @@ TestSetWithCustomIterator(Set);
assertOptimized(TestSetWithCustomIterator);
TestSetWithCustomIterator(WeakSet);
+%PrepareFunctionForOptimization(TestSetWithCustomIterator);
TestSetWithCustomIterator(WeakSet);
TestSetWithCustomIterator(WeakSet);
%OptimizeFunctionOnNextCall(TestSetWithCustomIterator);
@@ -50,6 +52,7 @@ function TestMapWithCustomIterator(ctor) {
assertEquals(2, map.get(k2));
assertEquals(2, callCount);
}
+%PrepareFunctionForOptimization(TestMapWithCustomIterator);
TestMapWithCustomIterator(Map);
TestMapWithCustomIterator(Map);
TestMapWithCustomIterator(Map);
@@ -58,6 +61,7 @@ TestMapWithCustomIterator(Map);
assertOptimized(TestMapWithCustomIterator);
TestMapWithCustomIterator(WeakMap);
+%PrepareFunctionForOptimization(TestMapWithCustomIterator);
TestMapWithCustomIterator(WeakMap);
TestMapWithCustomIterator(WeakMap);
%OptimizeFunctionOnNextCall(TestMapWithCustomIterator);
diff --git a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
index a427895243..91b8767403 100644
--- a/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
+++ b/deps/v8/test/mjsunit/es6/collections-constructor-with-modified-protoype.js
@@ -24,6 +24,7 @@ function TestSetPrototypeModified(ctor) {
ctor.prototype.add = originalPrototypeAdd;
}
+%PrepareFunctionForOptimization(TestSetPrototypeModified);
TestSetPrototypeModified(Set);
TestSetPrototypeModified(Set);
TestSetPrototypeModified(Set);
@@ -32,6 +33,7 @@ TestSetPrototypeModified(Set);
assertOptimized(TestSetPrototypeModified);
%DeoptimizeFunction(TestSetPrototypeModified);
+%PrepareFunctionForOptimization(TestSetPrototypeModified);
TestSetPrototypeModified(WeakSet);
TestSetPrototypeModified(WeakSet);
TestSetPrototypeModified(WeakSet);
@@ -60,6 +62,7 @@ function TestMapPrototypeModified(ctor) {
ctor.prototype.set = originalPrototypeSet;
}
+%PrepareFunctionForOptimization(TestMapPrototypeModified);
TestMapPrototypeModified(Map);
TestMapPrototypeModified(Map);
TestMapPrototypeModified(Map);
@@ -68,6 +71,7 @@ TestMapPrototypeModified(Map);
assertOptimized(TestMapPrototypeModified);
%DeoptimizeFunction(TestMapPrototypeModified);
+%PrepareFunctionForOptimization(TestMapPrototypeModified);
TestMapPrototypeModified(WeakMap);
TestMapPrototypeModified(WeakMap);
TestMapPrototypeModified(WeakMap);
diff --git a/deps/v8/test/mjsunit/es6/computed-property-names-deopt.js b/deps/v8/test/mjsunit/es6/computed-property-names-deopt.js
index 2f3a597f11..5d0e257fc7 100644
--- a/deps/v8/test/mjsunit/es6/computed-property-names-deopt.js
+++ b/deps/v8/test/mjsunit/es6/computed-property-names-deopt.js
@@ -23,6 +23,7 @@
return { [name]: value, __proto__: deoptMe() };
}
+ %PrepareFunctionForOptimization(f);
checkObject("a", 1, f("a", 1));
checkObject("b", 2, f("b", 2));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount-nolazy.js b/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount-nolazy.js
index 0317509194..1ee6c8c858 100644
--- a/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount-nolazy.js
+++ b/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount-nolazy.js
@@ -20,6 +20,7 @@ function g({x = {a:10,b:20}},
assertSame(0, n.length);
assertTrue(p.test("abc"));
}
+%PrepareFunctionForOptimization(g);
g({},{});
%OptimizeFunctionOnNextCall(g);
g({},{});
@@ -35,6 +36,7 @@ var h = ({x = {a:10,b:20}},
assertSame(0, n.length);
assertTrue(p.test("abc"));
};
+%PrepareFunctionForOptimization(h);
h({},{});
%OptimizeFunctionOnNextCall(h);
h({},{});
diff --git a/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount.js b/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount.js
index 77a3226788..f399f90b5e 100644
--- a/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount.js
+++ b/deps/v8/test/mjsunit/es6/destructuring-parameters-literalcount.js
@@ -20,6 +20,7 @@ function g({x = {a:10,b:20}},
assertSame(0, n.length);
assertTrue(p.test("abc"));
}
+%PrepareFunctionForOptimization(g);
g({},{});
%OptimizeFunctionOnNextCall(g);
g({},{});
@@ -35,6 +36,7 @@ var h = ({x = {a:10,b:20}},
assertSame(0, n.length);
assertTrue(p.test("abc"));
};
+%PrepareFunctionForOptimization(h);
h({},{});
%OptimizeFunctionOnNextCall(h);
h({},{});
diff --git a/deps/v8/test/mjsunit/es6/indexed-integer-exotics.js b/deps/v8/test/mjsunit/es6/indexed-integer-exotics.js
index 85ae3692d8..7c2ba9e605 100644
--- a/deps/v8/test/mjsunit/es6/indexed-integer-exotics.js
+++ b/deps/v8/test/mjsunit/es6/indexed-integer-exotics.js
@@ -49,6 +49,7 @@ check();
function f() { return array["-1"]; }
+%PrepareFunctionForOptimization(f);
for (var i = 0; i < 3; i++) {
assertEquals(undefined, f());
}
diff --git a/deps/v8/test/mjsunit/es6/instanceof-proxies.js b/deps/v8/test/mjsunit/es6/instanceof-proxies.js
index 86b104ce70..3294ccb625 100644
--- a/deps/v8/test/mjsunit/es6/instanceof-proxies.js
+++ b/deps/v8/test/mjsunit/es6/instanceof-proxies.js
@@ -11,6 +11,7 @@
function foo(x) {
return x instanceof Array;
}
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo([]));
assertFalse(foo({}));
%OptimizeFunctionOnNextCall(foo);
@@ -42,6 +43,7 @@
}
return false;
}
+ %PrepareFunctionForOptimization(foo_catch);
assertTrue(foo_catch(o));
%OptimizeFunctionOnNextCall(foo_catch);
assertTrue(foo_catch(o));
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect.js
index 813fffccf7..b8a6ec0467 100644
--- a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect.js
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect.js
@@ -33,6 +33,7 @@ function TestMapConstructorEntrySideEffect(ctor) {
assertFalse(col.has(k3));
}
+%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
@@ -41,6 +42,7 @@ TestMapConstructorEntrySideEffect(Map);
assertOptimized(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(WeakMap);
+%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(WeakMap);
TestMapConstructorEntrySideEffect(WeakMap);
%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js
index 0c167c1bfa..de92b8d211 100644
--- a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect2.js
@@ -38,6 +38,7 @@ function TestMapConstructorEntrySideEffect(ctor) {
ctor.prototype.set = originalPrototypeSet;
}
+%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
@@ -45,7 +46,9 @@ TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
assertOptimized(TestMapConstructorEntrySideEffect);
+// This call would deopt
TestMapConstructorEntrySideEffect(WeakMap);
+%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(WeakMap);
TestMapConstructorEntrySideEffect(WeakMap);
%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js
index 7dd7aa7852..c56a552bdc 100644
--- a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect3.js
@@ -28,6 +28,7 @@ function TestMapConstructorEntrySideEffect(ctor) {
assertTrue(col.has(k3));
}
+%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
@@ -36,6 +37,7 @@ TestMapConstructorEntrySideEffect(Map);
assertOptimized(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(WeakMap);
+%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(WeakMap);
TestMapConstructorEntrySideEffect(WeakMap);
%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
diff --git a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect4.js b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect4.js
index ebf8c790ed..3ed5e79d4d 100644
--- a/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect4.js
+++ b/deps/v8/test/mjsunit/es6/map-constructor-entry-side-effect4.js
@@ -38,6 +38,7 @@ function TestMapConstructorEntrySideEffect(ctor) {
assertEquals(3, col.get(k3));
}
+%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
@@ -45,7 +46,9 @@ TestMapConstructorEntrySideEffect(Map);
TestMapConstructorEntrySideEffect(Map);
assertOptimized(TestMapConstructorEntrySideEffect);
+// This call would deopt
TestMapConstructorEntrySideEffect(WeakMap);
+%PrepareFunctionForOptimization(TestMapConstructorEntrySideEffect);
TestMapConstructorEntrySideEffect(WeakMap);
TestMapConstructorEntrySideEffect(WeakMap);
%OptimizeFunctionOnNextCall(TestMapConstructorEntrySideEffect);
diff --git a/deps/v8/test/mjsunit/es6/math-clz32.js b/deps/v8/test/mjsunit/es6/math-clz32.js
index 3cbd4c3fcc..9fa6c8461d 100644
--- a/deps/v8/test/mjsunit/es6/math-clz32.js
+++ b/deps/v8/test/mjsunit/es6/math-clz32.js
@@ -30,6 +30,7 @@ function f(e) {
}
}
+%PrepareFunctionForOptimization(f);
f(5);
f(5);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/es6/math-fround.js b/deps/v8/test/mjsunit/es6/math-fround.js
index c53396a38a..0c15737e58 100644
--- a/deps/v8/test/mjsunit/es6/math-fround.js
+++ b/deps/v8/test/mjsunit/es6/math-fround.js
@@ -19,6 +19,7 @@ assertTrue(isNaN(Math.fround({ valueOf: function() { return "abc"; } })));
function unopt(x) { return Math.fround(x); }
function opt(y) { return Math.fround(y); }
+%PrepareFunctionForOptimization(opt);
opt(0.1);
opt(0.1);
unopt(0.1);
diff --git a/deps/v8/test/mjsunit/es6/math-trunc.js b/deps/v8/test/mjsunit/es6/math-trunc.js
index c925b5b363..9a79a1f2d4 100644
--- a/deps/v8/test/mjsunit/es6/math-trunc.js
+++ b/deps/v8/test/mjsunit/es6/math-trunc.js
@@ -35,12 +35,14 @@ function testTrunc(expected, input) {
assertEquals(expected, test(input));
assertEquals(expected, test(input));
assertEquals(expected, test(input));
+ %PrepareFunctionForOptimization(test);
%OptimizeFunctionOnNextCall(test);
assertEquals(expected, test(input));
var test_double_input = new Function(
'n',
'"' + (test_id++) + '";return Math.trunc(+n)');
+ %PrepareFunctionForOptimization(test_double_input);
assertEquals(expected, test_double_input(input));
assertEquals(expected, test_double_input(input));
assertEquals(expected, test_double_input(input));
@@ -50,6 +52,7 @@ function testTrunc(expected, input) {
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.trunc(n) + -0.0');
+ %PrepareFunctionForOptimization(test_double_output);
assertEquals(expected, test_double_output(input));
assertEquals(expected, test_double_output(input));
assertEquals(expected, test_double_output(input));
@@ -63,6 +66,7 @@ function test() {
function itrunc(x) {
return 1 / Math.trunc(x);
}
+ %PrepareFunctionForOptimization(itrunc);
assertEquals(Infinity, itrunc(0));
assertEquals(-Infinity, itrunc(-0));
assertEquals(Infinity, itrunc(Math.PI / 4));
diff --git a/deps/v8/test/mjsunit/es6/object-literals-method.js b/deps/v8/test/mjsunit/es6/object-literals-method.js
index 90bc51ec03..c4a87263db 100644
--- a/deps/v8/test/mjsunit/es6/object-literals-method.js
+++ b/deps/v8/test/mjsunit/es6/object-literals-method.js
@@ -144,6 +144,7 @@
var object = {
method() { return 42; }
};
+ %PrepareFunctionForOptimization(object.method);
assertEquals(42, object.method());
assertEquals(42, object.method());
%OptimizeFunctionOnNextCall(object.method);
diff --git a/deps/v8/test/mjsunit/es6/proxies-cross-realm-exception.js b/deps/v8/test/mjsunit/es6/proxies-cross-realm-exception.js
index ffba5c2d81..571c7ca86f 100644
--- a/deps/v8/test/mjsunit/es6/proxies-cross-realm-exception.js
+++ b/deps/v8/test/mjsunit/es6/proxies-cross-realm-exception.js
@@ -20,6 +20,7 @@ assertFalse(Realm.eval(realm, "1; Realm.global(0) instanceof Object"));
// Test that the instannceof check works in optimized code.
var test = Realm.eval(realm,
"()=>{1.1; return Realm.global(0) instanceof Object; }");
+%PrepareFunctionForOptimization(test);
assertFalse(test());
test();
test();
diff --git a/deps/v8/test/mjsunit/es6/proxies-get-own-property-descriptor.js b/deps/v8/test/mjsunit/es6/proxies-get-own-property-descriptor.js
index 441ff16ad9..ca50f6f3c0 100644
--- a/deps/v8/test/mjsunit/es6/proxies-get-own-property-descriptor.js
+++ b/deps/v8/test/mjsunit/es6/proxies-get-own-property-descriptor.js
@@ -93,11 +93,11 @@ assertEquals(undefined, Object.getOwnPropertyDescriptor(proxy, "nonexistent"));
// (Inv-4) "A property cannot be reported as existent, if it does not exist as
// an own property of the target object and the target object is not
// extensible."
-var existent_desc = {value: "yes"};
+var existent_desc = {value: "yes", writable: true};
handler.getOwnPropertyDescriptor = function() { return existent_desc; };
assertThrows('Object.getOwnPropertyDescriptor(proxy, "nonexistent")');
assertEquals(
- {value: "yes", writable: false, enumerable: false, configurable: false},
+ {value: "yes", writable: true, enumerable: false, configurable: false},
Object.getOwnPropertyDescriptor(proxy, "configurable"));
// Checking individual bailout points in the implementation:
diff --git a/deps/v8/test/mjsunit/es6/proxies.js b/deps/v8/test/mjsunit/es6/proxies.js
index fc59b346b7..d96e5b350e 100644
--- a/deps/v8/test/mjsunit/es6/proxies.js
+++ b/deps/v8/test/mjsunit/es6/proxies.js
@@ -1510,6 +1510,7 @@ function TestConstructorWithProxyPrototype2(create, handler) {
function f() {
return o.x;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(10, f());
assertEquals(10, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/es6/reflect-define-property.js b/deps/v8/test/mjsunit/es6/reflect-define-property.js
index 6ead02ba74..8045982dbc 100644
--- a/deps/v8/test/mjsunit/es6/reflect-define-property.js
+++ b/deps/v8/test/mjsunit/es6/reflect-define-property.js
@@ -966,6 +966,7 @@ assertTrue(
obj2 = Object.create(obj1);
obj3 = Object.create(obj2);
+%PrepareFunctionForOptimization(testGetterOnProto);
testGetterOnProto(111, obj3);
testGetterOnProto(111, obj3);
%OptimizeFunctionOnNextCall(testGetterOnProto);
@@ -974,6 +975,7 @@ testGetterOnProto(111, obj3);
assertTrue(Reflect.defineProperty(obj1, "quebec", { get: anotherGetter }));
+%PrepareFunctionForOptimization(testGetterOnProto);
testGetterOnProto(222, obj3);
testGetterOnProto(222, obj3);
%OptimizeFunctionOnNextCall(testGetterOnProto);
@@ -997,6 +999,7 @@ assertTrue(
obj2 = Object.create(obj1);
obj3 = Object.create(obj2);
+%PrepareFunctionForOptimization(testSetterOnProto);
testSetterOnProto(445, obj3);
testSetterOnProto(445, obj3);
%OptimizeFunctionOnNextCall(testSetterOnProto);
@@ -1005,6 +1008,7 @@ testSetterOnProto(445, obj3);
assertTrue(Reflect.defineProperty(obj1, "romeo", { set: anotherSetter }));
+%PrepareFunctionForOptimization(testSetterOnProto);
testSetterOnProto(446, obj3);
testSetterOnProto(446, obj3);
%OptimizeFunctionOnNextCall(testSetterOnProto);
@@ -1023,6 +1027,7 @@ assertTrue(Reflect.defineProperty(obj1, "sierra",
obj2 = Object.create(obj1);
obj3 = Object.create(obj2);
+%PrepareFunctionForOptimization(testSetterOnProtoStrict);
testSetterOnProtoStrict(obj3);
testSetterOnProtoStrict(obj3);
%OptimizeFunctionOnNextCall(testSetterOnProtoStrict);
@@ -1051,6 +1056,7 @@ function Assign(o) {
function C() {}
+%PrepareFunctionForOptimization(Assign);
Assign(new C);
Assign(new C);
%OptimizeFunctionOnNextCall(Assign);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-347906.js b/deps/v8/test/mjsunit/es6/regress/regress-347906.js
index daa62f5df7..4358b895df 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-347906.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-347906.js
@@ -8,6 +8,7 @@ function foo() {
return Math.clz32(12.34);
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-3741.js b/deps/v8/test/mjsunit/es6/regress/regress-3741.js
index 0c5074a0f8..fe5bc05f17 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-3741.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-3741.js
@@ -19,6 +19,7 @@ function f24(deopt) {
}
+%PrepareFunctionForOptimization(f24);
for (var j = 0; j < 10; ++j) {
f24(12);
}
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-411237.js b/deps/v8/test/mjsunit/es6/regress/regress-411237.js
index ece6481737..b2b1a39bf6 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-411237.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-411237.js
@@ -4,6 +4,7 @@
// Flags: --allow-natives-syntax
+%PrepareFunctionForOptimization(print);
try {
%OptimizeFunctionOnNextCall(print);
} catch(e) { }
@@ -11,5 +12,6 @@ try {
try {
function* f() {
}
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
} catch(e) { }
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-4160.js b/deps/v8/test/mjsunit/es6/regress/regress-4160.js
index d5dd27022d..5c44061c43 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-4160.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-4160.js
@@ -7,6 +7,7 @@
(function(x) {
(function(x) {
var boom = (() => eval(x));
+ %PrepareFunctionForOptimization(boom);
assertEquals(23, boom());
assertEquals(23, boom());
%OptimizeFunctionOnNextCall(boom);
@@ -19,6 +20,7 @@
(function(x) {
(function(x) {
var boom = (() => (eval("var x = 66"), x));
+ %PrepareFunctionForOptimization(boom);
assertEquals(66, boom());
assertEquals(66, boom());
%OptimizeFunctionOnNextCall(boom);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-508074.js b/deps/v8/test/mjsunit/es6/regress/regress-508074.js
index f4d1a44255..0dd22ab46f 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-508074.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-508074.js
@@ -17,6 +17,7 @@ function g() {
f(6, 5, 4, 3, 2, 1);
};
+%PrepareFunctionForOptimization(g);
g();
g();
g();
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-5598.js b/deps/v8/test/mjsunit/es6/regress/regress-5598.js
index b07894f0fa..10956fd94c 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-5598.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-5598.js
@@ -9,6 +9,7 @@ function fn(a) {
return b;
}
+%PrepareFunctionForOptimization(fn);
fn('a');
fn('a');
%OptimizeFunctionOnNextCall(fn);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-666622.js b/deps/v8/test/mjsunit/es6/regress/regress-666622.js
index 56731ab28e..f73842dbfe 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-666622.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-666622.js
@@ -22,6 +22,7 @@ function testArray() {
} catch (e) {
}
}
+%PrepareFunctionForOptimization(testArray);
testArray();
testArray();
%OptimizeFunctionOnNextCall(testArray);
@@ -33,6 +34,7 @@ function testTypedArray() {
} catch (e) {
}
}
+%PrepareFunctionForOptimization(testTypedArray);
testTypedArray();
testTypedArray();
%OptimizeFunctionOnNextCall(testTypedArray);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-9234.js b/deps/v8/test/mjsunit/es6/regress/regress-9234.js
new file mode 100644
index 0000000000..e0c8b0582c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-9234.js
@@ -0,0 +1,35 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function returnFalsishStrict() {
+ "use strict";
+
+ function trySet(o) {
+ o["bla"] = 0;
+ }
+
+ var proxy = new Proxy({}, {});
+ var proxy2 = new Proxy({}, { set() { return ""; } });
+
+ trySet(proxy);
+ trySet(proxy);
+ assertThrows(() => trySet(proxy2), TypeError);
+})();
+
+(function privateSymbolStrict() {
+ "use strict";
+ var proxy = new Proxy({}, {});
+ var proxy2 = new Proxy({a: 1}, { set() { return true; } });
+
+ function trySet(o) {
+ var symbol = o == proxy2 ? %CreatePrivateSymbol("private"): 1;
+ o[symbol] = 0;
+ }
+
+ trySet(proxy);
+ trySet(proxy);
+ assertThrows(() => trySet(proxy2), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-crbug-448730.js b/deps/v8/test/mjsunit/es6/regress/regress-crbug-448730.js
index a3c70acf6d..8b9a8a330c 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-crbug-448730.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-crbug-448730.js
@@ -8,6 +8,7 @@ function bar() {}
bar({ a: new Proxy({}, {}) });
function foo(x) { x.a.b == ""; }
var x = {a: {b: "" }};
+%PrepareFunctionForOptimization(foo);
foo(x);
foo(x);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-inlined-new-target.js b/deps/v8/test/mjsunit/es6/regress/regress-inlined-new-target.js
index 59932f6b4c..c6a617ffd1 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-inlined-new-target.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-inlined-new-target.js
@@ -7,6 +7,7 @@
function g() { return { val: new.target }; }
function f() { return (new g()).val; }
+%PrepareFunctionForOptimization(f);
assertEquals(g, f());
assertEquals(g, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/es6/spread-array-misc.js b/deps/v8/test/mjsunit/es6/spread-array-misc.js
index baed14629b..ceef31a451 100644
--- a/deps/v8/test/mjsunit/es6/spread-array-misc.js
+++ b/deps/v8/test/mjsunit/es6/spread-array-misc.js
@@ -45,6 +45,7 @@ assertEquals([1, 1, 1], f1(1));
function f1_(x) {
return [...[x, x, x]];
}
+%PrepareFunctionForOptimization(f1_);
assertEquals([1, 1, 1], f1_(1));
%OptimizeFunctionOnNextCall(f1_);
assertEquals([1, 1, 1], f1_(1));
@@ -64,6 +65,7 @@ assertEquals([1, 1, 1, ,], f2(1));
function f2_(x) {
return [...[x, x, x], ,];
}
+%PrepareFunctionForOptimization(f2_);
assertEquals([1, 1, 1, ,], f2_(1));
%OptimizeFunctionOnNextCall(f2_);
assertEquals([1, 1, 1, ,], f2_(1));
@@ -83,6 +85,7 @@ assertEquals([1, 0.1, "1", , ], f3(g(1, 0.1, "1")));
function f3_(it) {
return [...it, ,];
}
+%PrepareFunctionForOptimization(f3_);
assertEquals([1, 0.1, "1", , ], f3_(g(1, 0.1, "1")));
%OptimizeFunctionOnNextCall(f3_);
assertEquals([1, 0.1, "1", , ], f3_(g(1, 0.1, "1")));
@@ -102,6 +105,7 @@ assertEquals([1, 1, 1], f4(1));
function f4_(x) {
return [...[x, x, x]];
}
+%PrepareFunctionForOptimization(f4_);
assertEquals([1, 1, 1], f4_(1));
%OptimizeFunctionOnNextCall(f4_);
assertEquals([1, 1, 1], f4_(1));
@@ -121,6 +125,7 @@ assertEquals([1, 1, 1, ,], f5(1));
function f5_(x) {
return [...[x, x, x], ,];
}
+%PrepareFunctionForOptimization(f5_);
assertEquals([1, 1, 1, ,], f5_(1));
%OptimizeFunctionOnNextCall(f5_);
assertEquals([1, 1, 1, ,], f5_(1));
@@ -140,6 +145,7 @@ assertEquals([1, 0.1, "1", , ], f6(g(1, 0.1, "1")));
function f6_(it) {
return [...it, ,];
}
+%PrepareFunctionForOptimization(f6_);
assertEquals([1, 0.1, "1", , ], f6_(g(1, 0.1, "1")));
%OptimizeFunctionOnNextCall(f6_);
assertEquals([1, 0.1, "1", , ], f6_(g(1, 0.1, "1")));
@@ -159,6 +165,7 @@ assertEquals([1, 0.1, "1"], f7(G(1, 0.1, "1")));
function f7_(it) {
return [...it];
}
+%PrepareFunctionForOptimization(f7_);
assertEquals([1, 0.1, "1"], f7_(G(1, 0.1, "1")));
%OptimizeFunctionOnNextCall(f7_);
assertEquals([1, 0.1, "1"], f7_(G(1, 0.1, "1")));
@@ -178,6 +185,7 @@ assertEquals([1, 0.1, "1", , ], f8(G(1, 0.1, "1")));
function f8_(it) {
return [...it, ,];
}
+%PrepareFunctionForOptimization(f8_);
assertEquals([1, 0.1, "1", , ], f8_(G(1, 0.1, "1")));
%OptimizeFunctionOnNextCall(f8_);
assertEquals([1, 0.1, "1", , ], f8_(G(1, 0.1, "1")));
@@ -190,6 +198,7 @@ assertEquals([1, 0.1, "1", , ], f8_(G(1, 0.1, "1")));
function* f9() {
for (let i = 0; i < 160000; ++i) yield i;
}
+%PrepareFunctionForOptimization(f9);
let a = [...f9()];
assertEquals(160000, a.length);
assertEquals(0, a[0]);
@@ -263,6 +272,7 @@ function f10(b) {
...b];
return x.length;
}
+%PrepareFunctionForOptimization(f10);
assertEquals(4335, f10([3.3, 3.3, 3.3]));
assertEquals(4335, f10([{}, "", 3.3]));
%OptimizeFunctionOnNextCall(f10);
diff --git a/deps/v8/test/mjsunit/es6/spread-array-mutated-prototype.js b/deps/v8/test/mjsunit/es6/spread-array-mutated-prototype.js
index 5d29e7a8f0..029983b097 100644
--- a/deps/v8/test/mjsunit/es6/spread-array-mutated-prototype.js
+++ b/deps/v8/test/mjsunit/es6/spread-array-mutated-prototype.js
@@ -160,6 +160,7 @@ function id(v) {
function f() {
return [...'abc'];
}
+ %PrepareFunctionForOptimization(f);
assertArrayEquals(['a', 'b', 'c'], f());
%OptimizeFunctionOnNextCall(f);
assertArrayEquals(['a', 'b', 'c'], f());
@@ -194,6 +195,7 @@ function id(v) {
return ['a', ...['b', 'c', 'd'], 'e']
}
+ %PrepareFunctionForOptimization(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
%OptimizeFunctionOnNextCall(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
@@ -211,6 +213,7 @@ function id(v) {
return ['a', ...['b', 'c', 'd'], 'e']
}
+ %PrepareFunctionForOptimization(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
%OptimizeFunctionOnNextCall(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
@@ -228,6 +231,7 @@ function id(v) {
return ['a', ...['b', 'c', 'd'], 'e']
}
+ %PrepareFunctionForOptimization(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
%OptimizeFunctionOnNextCall(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
diff --git a/deps/v8/test/mjsunit/es6/spread-array-pristine-prototype.js b/deps/v8/test/mjsunit/es6/spread-array-pristine-prototype.js
index ea4d133703..9bf3a9a3f7 100644
--- a/deps/v8/test/mjsunit/es6/spread-array-pristine-prototype.js
+++ b/deps/v8/test/mjsunit/es6/spread-array-pristine-prototype.js
@@ -157,6 +157,7 @@ function id(v) {
function f() {
return [...'abc'];
}
+ %PrepareFunctionForOptimization(f);
assertArrayEquals(['a', 'b', 'c'], f());
%OptimizeFunctionOnNextCall(f);
assertArrayEquals(['a', 'b', 'c'], f());
diff --git a/deps/v8/test/mjsunit/es6/spread-array-prototype-proxy.js b/deps/v8/test/mjsunit/es6/spread-array-prototype-proxy.js
index ed38228c28..0c59c85086 100644
--- a/deps/v8/test/mjsunit/es6/spread-array-prototype-proxy.js
+++ b/deps/v8/test/mjsunit/es6/spread-array-prototype-proxy.js
@@ -13,6 +13,7 @@
return ['a', ...['b', 'c', 'd'], 'e']
}
+ %PrepareFunctionForOptimization(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
%OptimizeFunctionOnNextCall(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
diff --git a/deps/v8/test/mjsunit/es6/spread-array-prototype-setter1.js b/deps/v8/test/mjsunit/es6/spread-array-prototype-setter1.js
index 2ca9e21787..85cb9965e5 100644
--- a/deps/v8/test/mjsunit/es6/spread-array-prototype-setter1.js
+++ b/deps/v8/test/mjsunit/es6/spread-array-prototype-setter1.js
@@ -13,6 +13,7 @@
return ['a', ...['b', 'c', 'd'], 'e']
}
+ %PrepareFunctionForOptimization(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
%OptimizeFunctionOnNextCall(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
diff --git a/deps/v8/test/mjsunit/es6/spread-array-prototype-setter2.js b/deps/v8/test/mjsunit/es6/spread-array-prototype-setter2.js
index 736d50b46b..d20b9e2c5d 100644
--- a/deps/v8/test/mjsunit/es6/spread-array-prototype-setter2.js
+++ b/deps/v8/test/mjsunit/es6/spread-array-prototype-setter2.js
@@ -13,6 +13,7 @@
return ['a', ...['b', 'c', 'd'], 'e']
}
+ %PrepareFunctionForOptimization(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
%OptimizeFunctionOnNextCall(f);
assertArrayEquals(['a', 'b', 'c', 'd', 'e'], f());
diff --git a/deps/v8/test/mjsunit/es6/spread-call.js b/deps/v8/test/mjsunit/es6/spread-call.js
index 7403e0726e..3d3232fe05 100644
--- a/deps/v8/test/mjsunit/es6/spread-call.js
+++ b/deps/v8/test/mjsunit/es6/spread-call.js
@@ -161,6 +161,7 @@ function testSpreadCallsStrict() {
assertEquals(36, O.sum(0, ...[1], 2, 3, ...[4, 5], 6, 7, 8));
assertEquals(45, O.sum(0, ...[1], 2, 3, ...[4, 5], 6, 7, 8, ...[9]));
};
+%PrepareFunctionForOptimization(testSpreadCallsStrict);
testSpreadCallsStrict();
%OptimizeFunctionOnNextCall(testSpreadCallsStrict);
testSpreadCallsStrict();
diff --git a/deps/v8/test/mjsunit/es6/super-with-spread-modify-array-iterator.js b/deps/v8/test/mjsunit/es6/super-with-spread-modify-array-iterator.js
index c1311af81f..09f43b71b7 100644
--- a/deps/v8/test/mjsunit/es6/super-with-spread-modify-array-iterator.js
+++ b/deps/v8/test/mjsunit/es6/super-with-spread-modify-array-iterator.js
@@ -23,6 +23,7 @@
function testRestPoint(x, y) {
return new RestPoint(x, y);
}
+ %PrepareFunctionForOptimization(testRestPoint);
testRestPoint(1, 2);
testRestPoint(1, 2);
%OptimizeFunctionOnNextCall(testRestPoint);
diff --git a/deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js b/deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js
index cd7798b8d1..105830e1f3 100644
--- a/deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js
+++ b/deps/v8/test/mjsunit/es6/super-with-spread-modify-next.js
@@ -28,6 +28,7 @@
function testArgumentsPoint(x, y) {
return new ArgumentsPoint(x, y);
}
+ %PrepareFunctionForOptimization(testArgumentsPoint);
testArgumentsPoint(1, 2);
testArgumentsPoint(1, 2);
%OptimizeFunctionOnNextCall(testArgumentsPoint);
diff --git a/deps/v8/test/mjsunit/es6/super-with-spread.js b/deps/v8/test/mjsunit/es6/super-with-spread.js
index b0aea826d4..6fa73c04cd 100644
--- a/deps/v8/test/mjsunit/es6/super-with-spread.js
+++ b/deps/v8/test/mjsunit/es6/super-with-spread.js
@@ -15,6 +15,7 @@
}
function testBaselineAndOpt(func) {
+ %PrepareFunctionForOptimization(func);
func(1, 2);
func(1, 2);
%OptimizeFunctionOnNextCall(func);
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index a6c12909b4..20e6416495 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -43,6 +43,7 @@ function isValidSymbolString(s) {
function TestNew() {
function indirectSymbol() { return Symbol() }
function indirect() { return indirectSymbol() }
+ %PrepareFunctionForOptimization(indirect);
for (var i = 0; i < 2; ++i) {
for (var j = 0; j < 5; ++j) {
symbols.push(Symbol())
@@ -519,6 +520,7 @@ function TestComparison() {
var throwFuncs = [lt, gt, le, ge, lt_same, gt_same, le_same, ge_same];
for (var f of throwFuncs) {
+ %PrepareFunctionForOptimization(f);
assertThrows(f, TypeError);
%OptimizeFunctionOnNextCall(f);
assertThrows(f, TypeError);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-neutered.js b/deps/v8/test/mjsunit/es6/typedarray-detached.js
index 55a76cdc4d..bd8e2a1766 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-neutered.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-detached.js
@@ -26,11 +26,9 @@ function TestArrayBufferCreation() {
assertThrows(function() { new ArrayBuffer(-10); }, RangeError);
assertThrows(function() { new ArrayBuffer(-2.567); }, RangeError);
-/* TODO[dslomov]: Reenable the test
assertThrows(function() {
var ab1 = new ArrayBuffer(0xFFFFFFFFFFFF)
}, RangeError);
-*/
var ab = new ArrayBuffer();
assertSame(0, ab.byteLength);
@@ -612,10 +610,9 @@ function TestTypedArraysWithIllegalIndicesStrict() {
assertEquals(255, a[s2]);
assertEquals(0, a[-0]);
- /* Chromium bug: 424619
- * a[-Infinity] = 50;
- * assertEquals(undefined, a[-Infinity]);
- */
+ a[-Infinity] = 50;
+ assertEquals(undefined, a[-Infinity]);
+
a[1.5] = 10;
assertEquals(undefined, a[1.5]);
var nan = Math.sqrt(-1);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-tostring.js b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
index 16c6319b7a..f388881494 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-tostring.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
@@ -71,10 +71,8 @@ for (var constructor of typedArrayConstructors) {
assertEquals("1,2,3", o1.join());
assertEquals("1,2,3", o1.toString());
assertThrows(function() { o1.toLocaleString() }, TypeError);
- // TODO(littledan): Use the same function for TypedArray as for
- // Array, as the spec says (but Firefox doesn't do either).
- // Currently, using the same method leads to a bootstrap failure.
- // assertEquals(o1.toString, Array.prototype.toString);
+
+ assertEquals(o1.toString, Array.prototype.toString);
// Redefining length does not change result
var a5 = new constructor([1, 2, 3])
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index aab12341ac..1da65a5d65 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -46,11 +46,9 @@ function TestArrayBufferCreation() {
assertThrows(function() { new ArrayBuffer(-10); }, RangeError);
assertThrows(function() { new ArrayBuffer(-2.567); }, RangeError);
-/* TODO[dslomov]: Reenable the test
assertThrows(function() {
var ab1 = new ArrayBuffer(0xFFFFFFFFFFFF)
}, RangeError);
-*/
var ab = new ArrayBuffer();
assertSame(0, ab.byteLength);
@@ -815,10 +813,10 @@ function TestTypedArraysWithIllegalIndicesStrict() {
assertEquals(255, a[s2]);
assertEquals(0, a[-0]);
- /* Chromium bug: 424619
- * a[-Infinity] = 50;
- * assertEquals(undefined, a[-Infinity]);
- */
+
+ a[-Infinity] = 50;
+ assertEquals(undefined, a[-Infinity]);
+
a[1.5] = 10;
assertEquals(undefined, a[1.5]);
var nan = Math.sqrt(-1);
diff --git a/deps/v8/test/mjsunit/es9/object-spread-basic.js b/deps/v8/test/mjsunit/es9/object-spread-basic.js
index 8264da47a5..a0769b3a66 100644
--- a/deps/v8/test/mjsunit/es9/object-spread-basic.js
+++ b/deps/v8/test/mjsunit/es9/object-spread-basic.js
@@ -104,6 +104,52 @@ assertEquals(z, y = { ...p });
var x = { a:1 };
assertEquals(x, y = { set a(_) { throw new Error(); }, ...x });
+var prop = Object.getOwnPropertyDescriptor(y, 'a');
+assertEquals(prop.value, 1);
+assertFalse("set" in prop);
+assertTrue(prop.enumerable);
+assertTrue(prop.configurable);
+assertTrue(prop.writable);
-var x = { a:1 };
+var x = { a:2 };
assertEquals(x, y = { get a() { throw new Error(); }, ...x });
+var prop = Object.getOwnPropertyDescriptor(y, 'a');
+assertEquals(prop.value, 2);
+assertFalse("get" in prop);
+assertTrue(prop.enumerable);
+assertTrue(prop.configurable);
+assertTrue(prop.writable);
+
+var x = { a:3 };
+assertEquals(x, y = {
+ get a() {
+ throw new Error();
+ },
+ set a(_) {
+ throw new Error();
+ },
+ ...x
+});
+var prop = Object.getOwnPropertyDescriptor(y, 'a');
+assertEquals(prop.value, 3);
+assertFalse("get" in prop);
+assertFalse("set" in prop);
+assertTrue(prop.enumerable);
+assertTrue(prop.configurable);
+assertTrue(prop.writable);
+
+var x = Object.seal({ a:4 });
+assertEquals(x, y = { ...x });
+var prop = Object.getOwnPropertyDescriptor(y, 'a');
+assertEquals(prop.value, 4);
+assertTrue(prop.enumerable);
+assertTrue(prop.configurable);
+assertTrue(prop.writable);
+
+var x = Object.freeze({ a:5 });
+assertEquals(x, y = { ...x });
+var prop = Object.getOwnPropertyDescriptor(y, 'a');
+assertEquals(prop.value, 5);
+assertTrue(prop.enumerable);
+assertTrue(prop.configurable);
+assertTrue(prop.writable);
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-866357.js b/deps/v8/test/mjsunit/es9/regress/regress-866357.js
index 3b6230b0f4..2267fc27ea 100644
--- a/deps/v8/test/mjsunit/es9/regress/regress-866357.js
+++ b/deps/v8/test/mjsunit/es9/regress/regress-866357.js
@@ -10,6 +10,7 @@ var then = p.then = () => {};
function spread() { return { ...p }; }
+%PrepareFunctionForOptimization(spread);
assertEquals({ then }, spread());
assertEquals({ then }, spread());
assertEquals({ then }, spread());
diff --git a/deps/v8/test/mjsunit/es9/regress/regress-866727.js b/deps/v8/test/mjsunit/es9/regress/regress-866727.js
index ddfcf7edf9..1c9d1dd6f1 100644
--- a/deps/v8/test/mjsunit/es9/regress/regress-866727.js
+++ b/deps/v8/test/mjsunit/es9/regress/regress-866727.js
@@ -10,8 +10,9 @@ function test() {
try {
assertEquals({}, spread());
} catch (e) {}
-}
+};
+%PrepareFunctionForOptimization(test);
test();
test();
test();
diff --git a/deps/v8/test/mjsunit/fast-prototype.js b/deps/v8/test/mjsunit/fast-prototype.js
index ad00bec629..eb3331f674 100644
--- a/deps/v8/test/mjsunit/fast-prototype.js
+++ b/deps/v8/test/mjsunit/fast-prototype.js
@@ -43,6 +43,7 @@ function AddProps(obj) {
obj["x" + i] = 0;
}
}
+%EnsureFeedbackVectorForFunction(AddProps);
function DoProtoMagic(proto, set__proto__) {
@@ -58,9 +59,11 @@ function DoProtoMagic(proto, set__proto__) {
}
// Prototypes are made fast when ICs encounter them.
function ic() { return typeof receiver.foo; }
+ %EnsureFeedbackVectorForFunction(ic);
ic();
ic();
}
+%EnsureFeedbackVectorForFunction(DoProtoMagic);
function test(use_new, add_first, set__proto__) {
@@ -86,36 +89,41 @@ function test(use_new, add_first, set__proto__) {
}
return proto;
}
+%EnsureFeedbackVectorForFunction(test);
// TODO(mstarzinger): This test fails easily if gc happens at the wrong time.
gc();
-for (var i = 0; i < 4; i++) {
- var set__proto__ = ((i & 1) != 0);
- var use_new = ((i & 2) != 0);
+function test_fast_prototype() {
+ for (var i = 0; i < 4; i++) {
+ var set__proto__ = ((i & 1) != 0);
+ var use_new = ((i & 2) != 0);
- test(use_new, true, set__proto__);
- test(use_new, false, set__proto__);
-}
+ test(use_new, true, set__proto__);
+ test(use_new, false, set__proto__);
+ }
-var x = {a: 1, b: 2, c: 3};
-var o = { __proto__: x };
-assertFalse(%HasFastProperties(x));
-for (key in x) {
- assertTrue(key == 'a');
- break;
-}
-assertTrue(%HasFastProperties(x));
-delete x.b;
-for (key in x) {
- assertTrue(key == 'a');
- break;
-}
-assertTrue(%HasFastProperties(x));
-x.d = 4;
-assertTrue(%HasFastProperties(x));
-for (key in x) {
- assertTrue(key == 'a');
- break;
+ var x = {a: 1, b: 2, c: 3};
+ var o = { __proto__: x };
+ assertFalse(%HasFastProperties(x));
+ for (key in x) {
+ assertTrue(key == 'a');
+ break;
+ }
+ assertTrue(%HasFastProperties(x));
+ delete x.b;
+ for (key in x) {
+ assertTrue(key == 'a');
+ break;
+ }
+ assertTrue(%HasFastProperties(x));
+ x.d = 4;
+ assertTrue(%HasFastProperties(x));
+ for (key in x) {
+ assertTrue(key == 'a');
+ break;
+ }
}
+%EnsureFeedbackVectorForFunction(test_fast_prototype);
+test_fast_prototype();
diff --git a/deps/v8/test/mjsunit/field-type-tracking.js b/deps/v8/test/mjsunit/field-type-tracking.js
index 6fc8558469..1ff336a6b3 100644
--- a/deps/v8/test/mjsunit/field-type-tracking.js
+++ b/deps/v8/test/mjsunit/field-type-tracking.js
@@ -9,13 +9,13 @@
var o = { text: "Hello World!" };
function A() {
// Assign twice to make the field non-constant.
- // TODO(ishell): update test once constant field tracking is done.
this.a = {text: 'foo'};
this.a = o;
}
function readA(x) {
return x.a;
}
+ %PrepareFunctionForOptimization(readA);
var a = new A();
assertUnoptimized(readA);
readA(a); readA(a); readA(a);
@@ -27,6 +27,7 @@
b.b = o;
assertEquals(readA(b), o);
assertUnoptimized(readA);
+ %PrepareFunctionForOptimization(readA);
%OptimizeFunctionOnNextCall(readA);
assertEquals(readA(a), o);
assertOptimized(readA);
@@ -38,6 +39,7 @@
return x.a;
}
assertUnoptimized(readAFromB);
+ %PrepareFunctionForOptimization(readAFromB);
readAFromB(b); readAFromB(b); readAFromB(b);
%OptimizeFunctionOnNextCall(readAFromB);
assertEquals(readAFromB(b), o);
@@ -50,6 +52,8 @@
c.a = [1];
assertUnoptimized(readA);
assertUnoptimized(readAFromB);
+ %PrepareFunctionForOptimization(readA);
+ %PrepareFunctionForOptimization(readAFromB);
assertEquals(readA(a), o);
assertEquals(readA(b), o);
assertEquals(readA(c), [1]);
@@ -78,7 +82,9 @@
A.prototype = {y: 20};
function B(o) { return o.a.y; }
function C() { this.a = new A(); }
+ %EnsureFeedbackVectorForFunction(C);
+ %PrepareFunctionForOptimization(B);
B(new C());
B(new C());
%OptimizeFunctionOnNextCall(B);
@@ -89,6 +95,7 @@
assertEquals(10, B(c));
assertUnoptimized(B);
+ %PrepareFunctionForOptimization(B);
var c = new C();
%OptimizeFunctionOnNextCall(B);
assertEquals(20, B(c));
@@ -111,13 +118,13 @@
(function() {
function Foo(x) { this.x = x; }
- // TODO(ishell): update test once constant field tracking is done.
var f0 = new Foo({x: 0});
f0.x = {x: 0}; // make Foo.x non-constant here.
var f1 = new Foo({x: 1});
var f2 = new Foo({x: 2});
var f3 = new Foo({x: 3});
function readX(f) { return f.x.x; }
+ %PrepareFunctionForOptimization(readX);
assertEquals(readX(f1), 1);
assertEquals(readX(f2), 2);
assertUnoptimized(readX);
@@ -125,6 +132,7 @@
assertEquals(readX(f3), 3);
assertOptimized(readX);
function writeX(f, x) { f.x = x; }
+ %PrepareFunctionForOptimization(writeX);
writeX(f1, {x: 11});
writeX(f2, {x: 22});
assertUnoptimized(writeX);
@@ -148,6 +156,7 @@
var f2 = new Narf(2);
var f3 = new Narf(3);
function baz(f, y) { f.y = y; }
+ %PrepareFunctionForOptimization(baz);
baz(f1, {b: 9});
baz(f2, {b: 9});
baz(f2, {b: 9});
@@ -163,6 +172,7 @@
function readA(o) { return o.x.a; }
var f = new Foo({a:1});
var b = new Bar({a:2});
+ %PrepareFunctionForOptimization(readA);
assertEquals(readA(f), 1);
assertEquals(readA(b), 2);
assertEquals(readA(f), 1);
diff --git a/deps/v8/test/mjsunit/filter-element-kinds.js b/deps/v8/test/mjsunit/filter-element-kinds.js
index 7853a33b9c..24dd31a5d1 100644
--- a/deps/v8/test/mjsunit/filter-element-kinds.js
+++ b/deps/v8/test/mjsunit/filter-element-kinds.js
@@ -77,6 +77,7 @@ function create(a) {
function runTest(test, kind, holey_predicate) {
// Verify built-in implementation produces correct results.
+ %PrepareFunctionForOptimization(test);
let a = test();
assertKind(kind, a);
holey_predicate(a);
diff --git a/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js b/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
index 8221665ccb..3289db7fc8 100644
--- a/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
+++ b/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
@@ -7,7 +7,7 @@
// Test that the information on which variables to allocate in context doesn't
// change when recompiling.
-(function TestVarInInnerFunction() {
+function TestVarInInnerFunction() {
// Introduce variables which would potentially be context allocated, depending
// on whether an inner function refers to them or not.
var a = 1;
@@ -26,13 +26,15 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestVarInInnerFunction);
+TestVarInInnerFunction();
// Other tests are the same, except that the shadowing variable "a" in inner
// functions is declared differently.
-(function TestLetInInnerFunction() {
+function TestLetInInnerFunction() {
var a = 1;
var b = 2;
var c = 3;
@@ -48,9 +50,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestLetInInnerFunction);
+TestLetInInnerFunction();
-(function TestConstInInnerFunction() {
+function TestConstInInnerFunction() {
var a = 1;
var b = 2;
var c = 3;
@@ -66,9 +70,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestConstInInnerFunction);
+TestConstInInnerFunction();
-(function TestInnerFunctionParameter() {
+function TestInnerFunctionParameter() {
var a = 1;
var b = 2;
var c = 3;
@@ -83,9 +89,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionParameter);
+TestInnerFunctionParameter();
-(function TestInnerFunctionRestParameter() {
+function TestInnerFunctionRestParameter() {
var a = 1;
var b = 2;
var c = 3;
@@ -100,9 +108,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionRestParameter);
+TestInnerFunctionRestParameter();
-(function TestInnerFunctionDestructuredParameter_1() {
+function TestInnerFunctionDestructuredParameter_1() {
var a = 1;
var b = 2;
var c = 3;
@@ -117,9 +127,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuredParameter_1);
+TestInnerFunctionDestructuredParameter_1();
-(function TestInnerFunctionDestructuredParameter_2() {
+function TestInnerFunctionDestructuredParameter_2() {
var a = 1;
var b = 2;
var c = 3;
@@ -134,9 +146,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuredParameter_2);
+TestInnerFunctionDestructuredParameter_2();
-(function TestInnerArrowFunctionParameter() {
+function TestInnerArrowFunctionParameter() {
var a = 1;
var b = 2;
var c = 3;
@@ -149,9 +163,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerArrowFunctionParameter);
+TestInnerArrowFunctionParameter();
-(function TestInnerArrowFunctionRestParameter() {
+function TestInnerArrowFunctionRestParameter() {
var a = 1;
var b = 2;
var c = 3;
@@ -164,9 +180,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerArrowFunctionRestParameter);
+TestInnerArrowFunctionRestParameter();
-(function TestInnerArrowFunctionDestructuredParameter_1() {
+function TestInnerArrowFunctionDestructuredParameter_1() {
var a = 1;
var b = 2;
var c = 3;
@@ -179,9 +197,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerArrowFunctionDestructuredParameter_1);
+TestInnerArrowFunctionDestructuredParameter_1();
-(function TestInnerArrowFunctionDestructuredParameter_2() {
+function TestInnerArrowFunctionDestructuredParameter_2() {
var a = 1;
var b = 2;
var c = 3;
@@ -194,9 +214,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerArrowFunctionDestructuredParameter_2);
+TestInnerArrowFunctionDestructuredParameter_2();
-(function TestInnerInnerFunctionParameter() {
+function TestInnerInnerFunctionParameter() {
var a = 1;
var b = 2;
var c = 3;
@@ -211,9 +233,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerInnerFunctionParameter);
+TestInnerInnerFunctionParameter();
-(function TestInnerInnerFunctionRestParameter() {
+function TestInnerInnerFunctionRestParameter() {
var a = 1;
var b = 2;
var c = 3;
@@ -228,9 +252,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerInnerFunctionRestParameter);
+TestInnerInnerFunctionRestParameter();
-(function TestInnerInnerFunctionDestructuredParameter_1() {
+function TestInnerInnerFunctionDestructuredParameter_1() {
var a = 1;
var b = 2;
var c = 3;
@@ -245,9 +271,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerInnerFunctionDestructuredParameter_1);
+TestInnerInnerFunctionDestructuredParameter_1();
-(function TestInnerInnerFunctionDestructuredParameter_2() {
+function TestInnerInnerFunctionDestructuredParameter_2() {
var a = 1;
var b = 2;
var c = 3;
@@ -262,9 +290,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerInnerFunctionDestructuredParameter_2);
+TestInnerInnerFunctionDestructuredParameter_2();
-(function TestInnerInnerArrowFunctionParameter() {
+function TestInnerInnerArrowFunctionParameter() {
var a = 1;
var b = 2;
var c = 3;
@@ -279,9 +309,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerInnerArrowFunctionParameter);
+TestInnerInnerArrowFunctionParameter();
-(function TestInnerInnerArrowFunctionRestParameter() {
+function TestInnerInnerArrowFunctionRestParameter() {
var a = 1;
var b = 2;
var c = 3;
@@ -296,9 +328,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerInnerArrowFunctionRestParameter);
+TestInnerInnerArrowFunctionRestParameter();
-(function TestInnerInnerArrowFunctionDestructuredParameter_1() {
+function TestInnerInnerArrowFunctionDestructuredParameter_1() {
var a = 1;
var b = 2;
var c = 3;
@@ -313,9 +347,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerInnerArrowFunctionDestructuredParameter_1);
+TestInnerInnerArrowFunctionDestructuredParameter_1();
-(function TestInnerInnerArrowFunctionDestructuredParameter_2() {
+function TestInnerInnerArrowFunctionDestructuredParameter_2() {
var a = 1;
var b = 2;
var c = 3;
@@ -330,9 +366,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerInnerArrowFunctionDestructuredParameter_2);
+TestInnerInnerArrowFunctionDestructuredParameter_2();
-(function TestInnerFunctionInnerFunction() {
+function TestInnerFunctionInnerFunction() {
var a = 1;
var b = 2;
var c = 3;
@@ -348,9 +386,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionInnerFunction);
+TestInnerFunctionInnerFunction();
-(function TestInnerFunctionSloppyBlockFunction() {
+function TestInnerFunctionSloppyBlockFunction() {
var a = 1;
var b = 2;
var c = 3;
@@ -366,9 +406,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionSloppyBlockFunction);
+TestInnerFunctionSloppyBlockFunction();
-(function TestInnerFunctionCatchVariable() {
+function TestInnerFunctionCatchVariable() {
var a = 1;
var b = 2;
var c = 3;
@@ -387,9 +429,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+};
+%PrepareFunctionForOptimization(TestInnerFunctionCatchVariable);
+TestInnerFunctionCatchVariable();
-(function TestInnerFunctionLoopVariable1() {
+function TestInnerFunctionLoopVariable1() {
var a = 1;
var b = 2;
var c = 3;
@@ -406,9 +450,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionLoopVariable1);
+TestInnerFunctionLoopVariable1();
-(function TestInnerFunctionLoopVariable2() {
+function TestInnerFunctionLoopVariable2() {
var a = 1;
var b = 2;
var c = 3;
@@ -425,9 +471,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionLoopVariable2);
+TestInnerFunctionLoopVariable2();
-(function TestInnerFunctionLoopVariable3() {
+function TestInnerFunctionLoopVariable3() {
var a = 1;
var b = 2;
var c = 3;
@@ -444,9 +492,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionLoopVariable3);
+TestInnerFunctionLoopVariable3();
-(function TestInnerFunctionLoopVariable4() {
+function TestInnerFunctionLoopVariable4() {
var a = 1;
var b = 2;
var c = 3;
@@ -463,9 +513,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionLoopVariable4);
+TestInnerFunctionLoopVariable4();
-(function TestInnerFunctionClass() {
+function TestInnerFunctionClass() {
var a = 1;
var b = 2;
var c = 3;
@@ -481,9 +533,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionClass);
+TestInnerFunctionClass();
-(function TestInnerFunctionDestructuring1() {
+function TestInnerFunctionDestructuring1() {
var a = 1;
var b = 2;
var c = 3;
@@ -499,9 +553,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuring1);
+TestInnerFunctionDestructuring1();
-(function TestInnerFunctionDestructuring2() {
+function TestInnerFunctionDestructuring2() {
var a = 1;
var b = 2;
var c = 3;
@@ -517,9 +573,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuring2);
+TestInnerFunctionDestructuring2();
-(function TestInnerFunctionDestructuring3() {
+function TestInnerFunctionDestructuring3() {
var a = 1;
var b = 2;
var c = 3;
@@ -535,9 +593,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuring3);
+TestInnerFunctionDestructuring3();
-(function TestInnerFunctionDestructuring4() {
+function TestInnerFunctionDestructuring4() {
var a = 1;
var b = 2;
var c = 3;
@@ -553,9 +613,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuring4);
+TestInnerFunctionDestructuring4();
-(function TestInnerFunctionDestructuring5() {
+function TestInnerFunctionDestructuring5() {
var a = 1;
var b = 2;
var c = 3;
@@ -571,9 +633,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuring5);
+TestInnerFunctionDestructuring5();
-(function TestInnerFunctionDestructuring6() {
+function TestInnerFunctionDestructuring6() {
var a = 1;
var b = 2;
var c = 3;
@@ -589,9 +653,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuring6);
+TestInnerFunctionDestructuring6();
-(function TestInnerFunctionDestructuring7() {
+function TestInnerFunctionDestructuring7() {
var a = 1;
var b = 2;
var c = 3;
@@ -607,9 +673,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuring7);
+TestInnerFunctionDestructuring7();
-(function TestInnerFunctionDestructuring8() {
+function TestInnerFunctionDestructuring8() {
var a = 1;
var b = 2;
var c = 3;
@@ -625,9 +693,11 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuring8);
+TestInnerFunctionDestructuring8();
-(function TestInnerFunctionDestructuring9() {
+function TestInnerFunctionDestructuring9() {
var a = 1;
var b = 2;
var c = 3;
@@ -643,11 +713,13 @@
assertEquals(2, b);
assertEquals(3, c);
}
-})();
+}
+%PrepareFunctionForOptimization(TestInnerFunctionDestructuring9);
+TestInnerFunctionDestructuring9();
// A cluster of similar tests where the inner function only declares a variable
// whose name clashes with an outer function variable name, but doesn't use it.
-(function TestRegress650969_1_var() {
+function TestRegress650969_1_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -657,9 +729,11 @@
var a;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_1_var);
+TestRegress650969_1_var();
-(function TestRegress650969_1_let() {
+function TestRegress650969_1_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -669,9 +743,11 @@
let a;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_1_let);
+TestRegress650969_1_let();
-(function TestRegress650969_2_var() {
+function TestRegress650969_2_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -681,9 +757,11 @@
var a = 6;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_2_var);
+TestRegress650969_2_var();
-(function TestRegress650969_2_let() {
+function TestRegress650969_2_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -693,9 +771,11 @@
let a = 6;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_2_let);
+TestRegress650969_2_let();
-(function TestRegress650969_2_const() {
+function TestRegress650969_2_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -705,9 +785,11 @@
const a = 6;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_2_const);
+TestRegress650969_2_const();
-(function TestRegress650969_3_var() {
+function TestRegress650969_3_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -717,9 +799,11 @@
var a, b;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_3_var);
+TestRegress650969_3_var();
-(function TestRegress650969_3_let() {
+function TestRegress650969_3_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -729,9 +813,11 @@
let a, b;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_3_let);
+TestRegress650969_3_let();
-(function TestRegress650969_4_var() {
+function TestRegress650969_4_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -741,9 +827,11 @@
var a = 6, b;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_4_var);
+TestRegress650969_4_var();
-(function TestRegress650969_4_let() {
+function TestRegress650969_4_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -753,9 +841,11 @@
let a = 6, b;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_4_let);
+TestRegress650969_4_let();
-(function TestRegress650969_4_const() {
+function TestRegress650969_4_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -765,9 +855,11 @@
const a = 0, b = 0;
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_4_const);
+TestRegress650969_4_const();
-(function TestRegress650969_9_parameter() {
+function TestRegress650969_9_parameter() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -775,9 +867,11 @@
var a;
function inner(a) {}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_9_parameter);
+TestRegress650969_9_parameter();
-(function TestRegress650969_9_restParameter() {
+function TestRegress650969_9_restParameter() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -785,9 +879,11 @@
var a;
function inner(...a) {}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_9_restParameter);
+TestRegress650969_9_restParameter();
-(function TestRegress650969_9_destructuredParameter_1() {
+function TestRegress650969_9_destructuredParameter_1() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -795,9 +891,11 @@
var a;
function inner([d, a]) {}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_9_destructuredParameter_1);
+TestRegress650969_9_destructuredParameter_1();
-(function TestRegress650969_9_destructuredParameter_2() {
+function TestRegress650969_9_destructuredParameter_2() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -805,9 +903,11 @@
var a;
function inner({d, a}) {}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_9_destructuredParameter_2);
+TestRegress650969_9_destructuredParameter_2();
-(function TestRegress650969_10_parameter() {
+function TestRegress650969_10_parameter() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -817,9 +917,11 @@
function innerinner(a) {}
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_10_parameter);
+TestRegress650969_10_parameter();
-(function TestRegress650969_10_restParameter() {
+function TestRegress650969_10_restParameter() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -829,9 +931,11 @@
function innerinner(...a) {}
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_10_restParameter);
+TestRegress650969_10_restParameter();
-(function TestRegress650969_10_destructuredParameter_1() {
+function TestRegress650969_10_destructuredParameter_1() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -841,9 +945,11 @@
function innerinner([d, a]) {}
}
}
-})();
+}
+%PrepareFunctionForOptimization( TestRegress650969_10_destructuredParameter_1);
+TestRegress650969_10_destructuredParameter_1();
-(function TestRegress650969_10_destructuredParameter_2() {
+function TestRegress650969_10_destructuredParameter_2() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -853,9 +959,11 @@
function innerinner({d, a}) {}
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_10_destructuredParameter_2);
+TestRegress650969_10_destructuredParameter_2();
-(function TestRegress650969_11_var() {
+function TestRegress650969_11_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -865,9 +973,12 @@
var [a, b] = [1, 2];
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_11_var);
+TestRegress650969_11_var();
-(function TestRegress650969_11_let() {
+
+function TestRegress650969_11_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -877,9 +988,11 @@
let [a, b] = [1, 2];
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_11_let);
+TestRegress650969_11_let();
-(function TestRegress650969_11_const() {
+function TestRegress650969_11_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -889,9 +1002,11 @@
const [a, b] = [1, 2];
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_11_const);
+TestRegress650969_11_const();
-(function TestRegress650969_12_var() {
+function TestRegress650969_12_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -901,9 +1016,11 @@
var [b, a] = [1, 2];
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_12_var);
+TestRegress650969_12_var();
-(function TestRegress650969_12_let() {
+function TestRegress650969_12_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -913,9 +1030,11 @@
let [b, a] = [1, 2];
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_12_let);
+TestRegress650969_12_let();
-(function TestRegress650969_12_const() {
+function TestRegress650969_12_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -925,9 +1044,11 @@
const [b, a] = [1, 2];
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_12_const);
+TestRegress650969_12_const();
-(function TestRegress650969_13_var() {
+function TestRegress650969_13_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -937,9 +1058,11 @@
var [b, ...a] = [1, 2];
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_13_var);
+TestRegress650969_13_var();
-(function TestRegress650969_13_let() {
+function TestRegress650969_13_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -949,9 +1072,11 @@
let [b, ...a] = [1, 2];
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_13_let);
+TestRegress650969_13_let();
-(function TestRegress650969_13_const() {
+function TestRegress650969_13_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -961,9 +1086,11 @@
const [b, ...a] = [1, 2];
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_13_const);
+TestRegress650969_13_const();
-(function TestRegress650969_14_var() {
+function TestRegress650969_14_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -973,9 +1100,11 @@
var {a, b} = {a: 1, b: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_14_var);
+TestRegress650969_14_var();
-(function TestRegress650969_14_let() {
+function TestRegress650969_14_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -985,9 +1114,11 @@
let {a, b} = {a: 1, b: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_14_let);
+TestRegress650969_14_let();
-(function TestRegress650969_14_const() {
+function TestRegress650969_14_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -997,9 +1128,11 @@
const {a, b} = {a: 1, b: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_14_const);
+TestRegress650969_14_const();
-(function TestRegress650969_15_var() {
+function TestRegress650969_15_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1009,9 +1142,11 @@
var {b: {a}, c} = {b: {a: 1}, c: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_15_var);
+TestRegress650969_15_var();
-(function TestRegress650969_15_let() {
+function TestRegress650969_15_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1021,9 +1156,11 @@
let {b: {a}, c} = {b: {a: 1}, c: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_15_let);
+TestRegress650969_15_let();
-(function TestRegress650969_15_const() {
+function TestRegress650969_15_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1033,9 +1170,11 @@
const {b: {a}, c} = {b: {a: 1}, c: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_15_const);
+TestRegress650969_15_const();
-(function TestRegress650969_16_var() {
+function TestRegress650969_16_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1045,9 +1184,11 @@
var {a: {b}, c} = {a: {b: 1}, c: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_16_var);
+TestRegress650969_16_var();
-(function TestRegress650969_16_let() {
+function TestRegress650969_16_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1057,9 +1198,11 @@
let {a: {b}, c} = {a: {b: 1}, c: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_16_let);
+TestRegress650969_16_let();
-(function TestRegress650969_16_const() {
+function TestRegress650969_16_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1069,9 +1212,11 @@
const {a: {b}, c} = {a: {b: 1}, c: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_16_const);
+TestRegress650969_16_const();
-(function TestRegress650969_17_var() {
+function TestRegress650969_17_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1081,9 +1226,11 @@
for (var a = 0; 0 == 1; ) { }
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_17_var);
+TestRegress650969_17_var();
-(function TestRegress650969_17_let() {
+function TestRegress650969_17_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1093,9 +1240,11 @@
for (let a = 0; 0 == 1; ) { }
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_17_let);
+TestRegress650969_17_let();
-(function TestRegress650969_17_const() {
+function TestRegress650969_17_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1105,9 +1254,11 @@
for (const a = 0; 0 == 1; ) { }
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_17_const);
+TestRegress650969_17_const();
-(function TestRegress650969_18() {
+function TestRegress650969_18() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1117,9 +1268,11 @@
function innerinner([a, b]) {}
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_18);
+TestRegress650969_18();
-(function TestRegress650969_18() {
+function TestRegress650969_18() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1129,11 +1282,13 @@
function innerinner(a) {}
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_18);
+TestRegress650969_18();
// Regression tests for an intermediate stage where unresolved references were
// discarded too aggressively.
-(function TestRegress650969_sidetrack_var() {
+function TestRegress650969_sidetrack_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1144,9 +1299,11 @@
var {b: {a}, c} = {b: {a: 1}, c: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_sidetrack_var);
+TestRegress650969_sidetrack_var();
-(function TestRegress650969_sidetrack_let() {
+function TestRegress650969_sidetrack_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1157,9 +1314,11 @@
let {b: {a}, c} = {b: {a: 1}, c: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_sidetrack_let);
+TestRegress650969_sidetrack_let();
-(function TestRegress650969_sidetrack_const() {
+function TestRegress650969_sidetrack_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -1170,4 +1329,6 @@
const {b: {a}, c} = {b: {a: 1}, c: 2};
}
}
-})();
+}
+%PrepareFunctionForOptimization(TestRegress650969_sidetrack_const);
+TestRegress650969_sidetrack_const();
diff --git a/deps/v8/test/mjsunit/generated-transition-stub.js b/deps/v8/test/mjsunit/generated-transition-stub.js
index e6d949bbaa..8da3265053 100644
--- a/deps/v8/test/mjsunit/generated-transition-stub.js
+++ b/deps/v8/test/mjsunit/generated-transition-stub.js
@@ -17,6 +17,7 @@ function test() {
// Test PACKED SMI -> PACKED DOUBLE
//
+ %PrepareFunctionForOptimization(transition1);
const a1 = [0, 1, 2, 3, 4];
transition1(a1, 0, 2.5);
const a2 = [0, 1, 2, 3, 4];
@@ -59,6 +60,7 @@ function test() {
a[i] = v;
}
+ %PrepareFunctionForOptimization(transition2);
const b1 = [0, 1, 2, , 4];
transition2(b1, 0, 2.5);
const b2 = [0, 1, 2, , 4];
@@ -93,6 +95,7 @@ function test() {
a[i] = v;
}
+ %PrepareFunctionForOptimization(transition3);
const c1 = [0, 1, 2, 3.5, 4];
transition3(c1, 0, new Object());
const c2 = [0, 1, 2, 3.5, 4];
@@ -147,6 +150,7 @@ function test() {
a[i] = v;
}
+ %PrepareFunctionForOptimization(transition4);
const d1 = [0, 1, , 3.5, 4];
transition4(d1, 0, new Object());
const d2 = [0, 1, , 3.5, 4];
diff --git a/deps/v8/test/mjsunit/getters-on-elements.js b/deps/v8/test/mjsunit/getters-on-elements.js
index d8cda83ad1..a8d2b9ea69 100644
--- a/deps/v8/test/mjsunit/getters-on-elements.js
+++ b/deps/v8/test/mjsunit/getters-on-elements.js
@@ -52,6 +52,7 @@ if (standalone) {
assertUnoptimized = empty_func;
assertOptimized = empty_func;
+ prepareForOptimize = emtpy_func;
optimize = empty_func;
clearFunctionTypeFeedback = empty_func;
deoptimizeFunction = empty_func;
@@ -59,6 +60,9 @@ if (standalone) {
optimize = function(name) {
%OptimizeFunctionOnNextCall(name);
}
+ prepareForOptimize = function(name) {
+ %PrepareFunctionForOptimization(name);
+ }
clearFunctionTypeFeedback = function(name) {
%ClearFunctionFeedback(name);
}
@@ -76,6 +80,7 @@ function base_getter_test(create_func) {
var ap = [];
ap.__defineGetter__(0, function() { calls++; return 0; });
+ prepareForOptimize(foo);
foo(a);
assertUnoptimized(foo);
// Smi and Double elements transition the KeyedLoadIC to Generic state
@@ -145,6 +150,7 @@ function base_getter_test(create_func) {
a = create_func();
ap2 = [];
a.__proto__ = ap2;
+ prepareForOptimize(foo);
foo(a);
foo(a);
foo(a);
@@ -165,6 +171,7 @@ function base_getter_test(create_func) {
a = create_func();
a.__proto__ = ap2;
bar = function(a) { return a[3] + 600; }
+ prepareForOptimize(bar);
bar(a);
bar(a);
bar(a);
@@ -207,6 +214,7 @@ for(var c = 0; c < cf.length; c++) {
var a = [3.5,,,3.5];
fun = function(a) { return a[0] + 5.5; }
+prepareForOptimize(fun);
fun(a);
fun(a);
fun(a); // should have a monomorphic KeyedLoadIC.
@@ -229,6 +237,7 @@ var a = [3.5,,,,3.5];
var ap = [,,3.5];
ap.__proto__ = a.__proto__;
a.__proto__ = ap;
+prepareForOptimize(fun);
fun(a);
optimize(fun);
fun(a);
diff --git a/deps/v8/test/mjsunit/global-infinity-strict.js b/deps/v8/test/mjsunit/global-infinity-strict.js
index 8ab1683411..e4ff33d04e 100644
--- a/deps/v8/test/mjsunit/global-infinity-strict.js
+++ b/deps/v8/test/mjsunit/global-infinity-strict.js
@@ -6,6 +6,7 @@
"use strict";
function test(expected, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(expected, f());
assertEquals(expected, f());
%OptimizeFunctionOnNextCall(f);
@@ -14,6 +15,7 @@ function test(expected, f) {
}
function testThrows(f) {
+ %PrepareFunctionForOptimization(f);
assertThrows(f);
assertThrows(f);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/global-nan-strict.js b/deps/v8/test/mjsunit/global-nan-strict.js
index 075d03ca87..79200e30ea 100644
--- a/deps/v8/test/mjsunit/global-nan-strict.js
+++ b/deps/v8/test/mjsunit/global-nan-strict.js
@@ -6,6 +6,7 @@
"use strict";
function test(expected, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(expected, f());
assertEquals(expected, f());
%OptimizeFunctionOnNextCall(f);
@@ -14,6 +15,7 @@ function test(expected, f) {
}
function testThrows(f) {
+ %PrepareFunctionForOptimization(f);
assertThrows(f);
assertThrows(f);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/global-nan.js b/deps/v8/test/mjsunit/global-nan.js
index 5a98eff1c8..0ed4cbf2dd 100644
--- a/deps/v8/test/mjsunit/global-nan.js
+++ b/deps/v8/test/mjsunit/global-nan.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function test(expected, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(expected, f());
assertEquals(expected, f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/global-undefined-strict.js b/deps/v8/test/mjsunit/global-undefined-strict.js
index 9a0578a2fb..a50d8279b8 100644
--- a/deps/v8/test/mjsunit/global-undefined-strict.js
+++ b/deps/v8/test/mjsunit/global-undefined-strict.js
@@ -6,6 +6,7 @@
"use strict";
function test(expected, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(expected, f());
assertEquals(expected, f());
%OptimizeFunctionOnNextCall(f);
@@ -14,6 +15,7 @@ function test(expected, f) {
}
function testThrows(f) {
+ %PrepareFunctionForOptimization(f);
assertThrows(f);
assertThrows(f);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/global-undefined.js b/deps/v8/test/mjsunit/global-undefined.js
index 6190f6f552..53d8279996 100644
--- a/deps/v8/test/mjsunit/global-undefined.js
+++ b/deps/v8/test/mjsunit/global-undefined.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
function test(expected, f) {
+ %PrepareFunctionForOptimization(f);
assertEquals(expected, f());
assertEquals(expected, f());
%OptimizeFunctionOnNextCall(f);
@@ -13,6 +14,7 @@ function test(expected, f) {
}
function testThrows(f) {
+ %PrepareFunctionForOptimization(f);
assertThrows(f);
assertThrows(f);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js b/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js
index 3bf0148c95..21237488f4 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/regress-tonumbercode.js
@@ -9,6 +9,7 @@ function f(x, b) {
else return Math.trunc(Number(x))
}
+%PrepareFunctionForOptimization(f);
f("1", true);
f("2", true);
f("2", false);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/turbo.js b/deps/v8/test/mjsunit/harmony/bigint/turbo.js
index d0f00050c8..f32f22e982 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/turbo.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/turbo.js
@@ -19,6 +19,7 @@ function test(f, {input, check}) {
function Test(f, ...cases) {
for (let i = 0; i < cases.length; ++i) {
+ %PrepareFunctionForOptimization(f);
test(f, cases[i]);
%OptimizeFunctionOnNextCall(f);
for (let j = 0; j < cases.length; ++j) {
diff --git a/deps/v8/test/mjsunit/harmony/bigint/typedarray.js b/deps/v8/test/mjsunit/harmony/bigint/typedarray.js
index e530441dd4..93021a3983 100644
--- a/deps/v8/test/mjsunit/harmony/bigint/typedarray.js
+++ b/deps/v8/test/mjsunit/harmony/bigint/typedarray.js
@@ -8,6 +8,7 @@ var intarray = new BigInt64Array(8);
var uintarray = new BigUint64Array(8);
function test(f) {
+ %PrepareFunctionForOptimization(f);
f();
f(); // Make sure we test ICs.
f();
diff --git a/deps/v8/test/mjsunit/harmony/block-lazy-compile.js b/deps/v8/test/mjsunit/harmony/block-lazy-compile.js
index a6efcbfd10..2957a65430 100644
--- a/deps/v8/test/mjsunit/harmony/block-lazy-compile.js
+++ b/deps/v8/test/mjsunit/harmony/block-lazy-compile.js
@@ -43,6 +43,7 @@ function f() {
}
var o = f();
+%PrepareFunctionForOptimization(o);
assertEquals(1, o());
assertEquals(2, o());
assertEquals(3, o());
diff --git a/deps/v8/test/mjsunit/harmony/generators-reduced.js b/deps/v8/test/mjsunit/harmony/generators-reduced.js
index 8ea96c6ba5..158966632c 100644
--- a/deps/v8/test/mjsunit/harmony/generators-reduced.js
+++ b/deps/v8/test/mjsunit/harmony/generators-reduced.js
@@ -8,6 +8,7 @@
function* h() { try {yield 42} finally {yield 43} };
function* g() { yield* h(); };
+%PrepareFunctionForOptimization(g);
let x = g();
x.next();
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/harmony/generators.js b/deps/v8/test/mjsunit/harmony/generators.js
index b98164c135..65c88a5fe5 100644
--- a/deps/v8/test/mjsunit/harmony/generators.js
+++ b/deps/v8/test/mjsunit/harmony/generators.js
@@ -6,6 +6,7 @@
function MaybeOptimizeOrDeoptimize(f) {
+ %PrepareFunctionForOptimization(f);
let x = Math.random(); // --random-seed makes this deterministic
if (x <= 0.33) {
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/harmony/modules-import-namespace.js b/deps/v8/test/mjsunit/harmony/modules-import-namespace.js
index dfcd6cd502..fd0ad05fa4 100644
--- a/deps/v8/test/mjsunit/harmony/modules-import-namespace.js
+++ b/deps/v8/test/mjsunit/harmony/modules-import-namespace.js
@@ -12,6 +12,7 @@ function get() {
return m.stringlife;
}
+%PrepareFunctionForOptimization(get);
assertEquals("42", get());
assertEquals("42", get());
assertEquals("42", get());
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-ic.js b/deps/v8/test/mjsunit/harmony/private-fields-ic.js
index e7889b1e7b..95f91479fa 100644
--- a/deps/v8/test/mjsunit/harmony/private-fields-ic.js
+++ b/deps/v8/test/mjsunit/harmony/private-fields-ic.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-private-fields
-
{
class X {
#x = 1;
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-special-object.js b/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
index 0ade4305bf..a85019dd45 100644
--- a/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
+++ b/deps/v8/test/mjsunit/harmony/private-fields-special-object.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-private-fields --allow-natives-syntax
+// Flags: --allow-natives-syntax
load('test/mjsunit/test-async.js');
diff --git a/deps/v8/test/mjsunit/harmony/private-fields-static.js b/deps/v8/test/mjsunit/harmony/private-fields-static.js
index e4019cc32e..a731a35548 100644
--- a/deps/v8/test/mjsunit/harmony/private-fields-static.js
+++ b/deps/v8/test/mjsunit/harmony/private-fields-static.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-private-fields --allow-natives-syntax
+// Flags: --allow-natives-syntax
"use strict";
diff --git a/deps/v8/test/mjsunit/harmony/private-fields.js b/deps/v8/test/mjsunit/harmony/private-fields.js
index ec25bb743e..0c1c04bc75 100644
--- a/deps/v8/test/mjsunit/harmony/private-fields.js
+++ b/deps/v8/test/mjsunit/harmony/private-fields.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-private-fields --allow-natives-syntax
+// Flags: --allow-natives-syntax
"use strict";
diff --git a/deps/v8/test/mjsunit/harmony/private-methods.js b/deps/v8/test/mjsunit/harmony/private-methods.js
new file mode 100644
index 0000000000..e7784a29f5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/private-methods.js
@@ -0,0 +1,95 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-private-methods
+
+"use strict";
+
+{
+ class C {
+ #a() {}
+ }
+ new C;
+}
+
+{
+ class C {
+ #a() {
+ class B {
+ #a() { }
+ }
+ new B;
+ }
+ }
+ new C;
+}
+
+{
+ class A {
+ #a() {
+ class C extends A {
+ #c() { }
+ }
+ new C;
+ }
+ }
+
+ new A;
+}
+
+{
+ const C = class {
+ #a() { }
+ }
+ new C;
+}
+
+{
+ const C = class {
+ #a() {
+ const B = class {
+ #a() { }
+ }
+ new B;
+ }
+ }
+ new C;
+}
+
+{
+ class A {
+ constructor(arg) {
+ return arg;
+ }
+ }
+
+ class C extends A {
+ #x() { }
+
+ constructor(arg) {
+ super(arg);
+ }
+ }
+
+ // Add the brand twice on the same object.
+ let c1 = new C({});
+ assertThrows(() => new C(c1), TypeError);
+}
+
+{
+ // TODO(v8:9177): test extending a class expression that does not have
+ // a private method.
+ class D extends class {
+ #c() {}
+ } {
+ #d() {}
+ }
+
+ class E extends D {
+ #e() {}
+ }
+
+ new D;
+ new E;
+}
diff --git a/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js b/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
index 56c7e201aa..8ab1e31848 100644
--- a/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
+++ b/deps/v8/test/mjsunit/harmony/public-instance-class-fields.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-public-fields
"use strict";
{
diff --git a/deps/v8/test/mjsunit/harmony/public-static-class-fields.js b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
index 0334a87786..0c7a3e5516 100644
--- a/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
+++ b/deps/v8/test/mjsunit/harmony/public-static-class-fields.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-public-fields --harmony-static-fields
-
"use strict";
{
diff --git a/deps/v8/test/mjsunit/harmony/regress-generators-resume.js b/deps/v8/test/mjsunit/harmony/regress-generators-resume.js
index c72ac30b48..77c933e2ff 100644
--- a/deps/v8/test/mjsunit/harmony/regress-generators-resume.js
+++ b/deps/v8/test/mjsunit/harmony/regress-generators-resume.js
@@ -12,6 +12,7 @@ function* foo() {
return 0;
}
+%PrepareFunctionForOptimization(foo);
g = foo();
%OptimizeFunctionOnNextCall(foo);
g.next();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-772649.js b/deps/v8/test/mjsunit/harmony/regress/regress-772649.js
index 2ff27670df..cb88b4ad70 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-772649.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-772649.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax
async function* gen([[notIterable]] = [null]) {}
+%PrepareFunctionForOptimization(gen);
assertThrows(() => gen(), TypeError);
assertThrows(() => gen(), TypeError);
%OptimizeFunctionOnNextCall(gen);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-8808.js b/deps/v8/test/mjsunit/harmony/regress/regress-8808.js
index 8c63936382..61277497ca 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-8808.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-8808.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-class-fields
-
assertThrows(() => eval(`
class Foo {
#x = 1;
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-347528.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-347528.js
index e4e8efbc9c..8fd8eac0c5 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-347528.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-347528.js
@@ -30,6 +30,7 @@
"use strict";
let unused_var = 1;
function __f_12() { new Array(); }
+%PrepareFunctionForOptimization(__f_12);
__f_12();
__f_12();
%OptimizeFunctionOnNextCall(__f_12);
diff --git a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
index a79574d69f..6fa8494f88 100644
--- a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
@@ -47,11 +47,9 @@ function TestArrayBufferCreation() {
assertThrows(function() { new SharedArrayBuffer(-10); }, RangeError);
assertThrows(function() { new SharedArrayBuffer(-2.567); }, RangeError);
-/* TODO[dslomov]: Reenable the test
assertThrows(function() {
var ab1 = new SharedArrayBuffer(0xFFFFFFFFFFFF)
}, RangeError);
-*/
var sab = new SharedArrayBuffer();
assertSame(0, sab.byteLength);
@@ -450,10 +448,10 @@ function TestTypedArraysWithIllegalIndices() {
assertEquals(255, a[s2]);
assertEquals(0, a[-0]);
- /* Chromium bug: 424619
- * a[-Infinity] = 50;
- * assertEquals(undefined, a[-Infinity]);
- */
+
+ a[-Infinity] = 50;
+ assertEquals(undefined, a[-Infinity]);
+
a[1.5] = 10;
assertEquals(undefined, a[1.5]);
var nan = Math.sqrt(-1);
@@ -499,10 +497,9 @@ function TestTypedArraysWithIllegalIndicesStrict() {
assertEquals(255, a[s2]);
assertEquals(0, a[-0]);
- /* Chromium bug: 424619
- * a[-Infinity] = 50;
- * assertEquals(undefined, a[-Infinity]);
- */
+ a[-Infinity] = 50;
+ assertEquals(undefined, a[-Infinity]);
+
a[1.5] = 10;
assertEquals(undefined, a[1.5]);
var nan = Math.sqrt(-1);
diff --git a/deps/v8/test/mjsunit/harmony/string-matchAll-deleted-matchAll.js b/deps/v8/test/mjsunit/harmony/string-matchAll-deleted-matchAll.js
index 5d2985f318..2de260fccf 100644
--- a/deps/v8/test/mjsunit/harmony/string-matchAll-deleted-matchAll.js
+++ b/deps/v8/test/mjsunit/harmony/string-matchAll-deleted-matchAll.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-string-matchall
-
delete RegExp.prototype[Symbol.matchAll];
const str = 'a';
assertThrows(() => str.matchAll(/\w/g), TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/string-matchAll.js b/deps/v8/test/mjsunit/harmony/string-matchAll.js
index e8b212529a..39c2d0dfe3 100644
--- a/deps/v8/test/mjsunit/harmony/string-matchAll.js
+++ b/deps/v8/test/mjsunit/harmony/string-matchAll.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-string-matchall
-
(function TestReceiverNonString() {
const iter = 'a'.matchAll(/./);
assertThrows(
diff --git a/deps/v8/test/mjsunit/harmony/to-string.js b/deps/v8/test/mjsunit/harmony/to-string.js
index dfe36c2dd9..e0589e0d82 100644
--- a/deps/v8/test/mjsunit/harmony/to-string.js
+++ b/deps/v8/test/mjsunit/harmony/to-string.js
@@ -4,51 +4,51 @@
// Flags: --allow-natives-syntax
-assertEquals("1", %ToString(1));
-assertEquals("1", %_ToString(1));
+assertEquals("1", %ToStringRT(1));
+assertEquals("1", %_ToStringRT(1));
-assertEquals("0.5", %ToString(.5));
-assertEquals("0.5", %_ToString(.5));
+assertEquals("0.5", %ToStringRT(.5));
+assertEquals("0.5", %_ToStringRT(.5));
-assertEquals("null", %ToString(null));
-assertEquals("null", %_ToString(null));
+assertEquals("null", %ToStringRT(null));
+assertEquals("null", %_ToStringRT(null));
-assertEquals("true", %ToString(true));
-assertEquals("true", %_ToString(true));
+assertEquals("true", %ToStringRT(true));
+assertEquals("true", %_ToStringRT(true));
-assertEquals("false", %ToString(false));
-assertEquals("false", %_ToString(false));
+assertEquals("false", %ToStringRT(false));
+assertEquals("false", %_ToStringRT(false));
-assertEquals("undefined", %ToString(undefined));
-assertEquals("undefined", %_ToString(undefined));
+assertEquals("undefined", %ToStringRT(undefined));
+assertEquals("undefined", %_ToStringRT(undefined));
-assertEquals("random text", %ToString("random text"));
-assertEquals("random text", %_ToString("random text"));
+assertEquals("random text", %ToStringRT("random text"));
+assertEquals("random text", %_ToStringRT("random text"));
-assertThrows(function() { %ToString(Symbol.toPrimitive) }, TypeError);
-assertThrows(function() { %_ToString(Symbol.toPrimitive) }, TypeError);
+assertThrows(function() { %ToStringRT(Symbol.toPrimitive) }, TypeError);
+assertThrows(function() { %_ToStringRT(Symbol.toPrimitive) }, TypeError);
var a = { toString: function() { return "xyz" }};
-assertEquals("xyz", %ToString(a));
-assertEquals("xyz", %_ToString(a));
+assertEquals("xyz", %ToStringRT(a));
+assertEquals("xyz", %_ToStringRT(a));
var b = { valueOf: function() { return 42 }};
-assertEquals("[object Object]", %ToString(b));
-assertEquals("[object Object]", %_ToString(b));
+assertEquals("[object Object]", %ToStringRT(b));
+assertEquals("[object Object]", %_ToStringRT(b));
var c = {
toString: function() { return "x"},
valueOf: function() { return 123 }
};
-assertEquals("x", %ToString(c));
-assertEquals("x", %_ToString(c));
+assertEquals("x", %ToStringRT(c));
+assertEquals("x", %_ToStringRT(c));
var d = {
[Symbol.toPrimitive]: function(hint) { return hint }
};
-assertEquals("string", %ToString(d));
-assertEquals("string", %_ToString(d));
+assertEquals("string", %ToStringRT(d));
+assertEquals("string", %_ToStringRT(d));
var e = new Date(0);
-assertEquals(e.toString(), %ToString(e));
-assertEquals(e.toString(), %_ToString(e));
+assertEquals(e.toString(), %ToStringRT(e));
+assertEquals(e.toString(), %_ToStringRT(e));
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
new file mode 100644
index 0000000000..ca156e0574
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanup-on-detached-realm.js
@@ -0,0 +1,22 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-weak-refs --expose-gc --noincremental-marking
+
+let r = Realm.create();
+let FG = Realm.eval(r, "FinalizationGroup");
+Realm.detachGlobal(r);
+
+let fg = new FG(()=> {
+ assertUnreachable();
+});
+
+(() => {
+ let object = {};
+ fg.register(object, {});
+
+ // object goes out of scope.
+})();
+
+gc();
diff --git a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
index 0cef0a1af5..363fc4a524 100644
--- a/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
+++ b/deps/v8/test/mjsunit/harmony/weakrefs/cleanupsome-after-unregister.js
@@ -22,6 +22,9 @@ let key = {"k": "this is the key"};
// cleanupSome won't do anything since there are no reclaimed targets.
fg.cleanupSome();
assertEquals(0, cleanup_count);
+ // Keep o alive to the end of the function, so that --stress-opt mode
+ // is robust towards --gc-interval timing.
+ return o;
})();
// GC will detect the WeakCell as dirty.
diff --git a/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js b/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js
index d1179d3855..a42187f3da 100644
--- a/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js
+++ b/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-checked.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-json-stringify
-
// Test JSON.stringify for cases that hit
// JsonStringifier::SerializeString_.
diff --git a/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js b/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js
index 260d748ece..20d7520b53 100644
--- a/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js
+++ b/deps/v8/test/mjsunit/harmony/well-formed-json-stringify-unchecked.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-json-stringify
-
// Test JSON.stringify for cases that hit
// JsonStringifier::SerializeStringUnchecked_.
diff --git a/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js b/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
index a0c80f8e3b..ae9286b100 100644
--- a/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
+++ b/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
@@ -9,4 +9,5 @@ function f() {
if (i == 5) %OptimizeOsr();
}
}
+%EnsureFeedbackVectorForFunction(f);
f();
diff --git a/deps/v8/test/mjsunit/ignition/osr-from-generator.js b/deps/v8/test/mjsunit/ignition/osr-from-generator.js
index 2344a31ce4..e2d628819f 100644
--- a/deps/v8/test/mjsunit/ignition/osr-from-generator.js
+++ b/deps/v8/test/mjsunit/ignition/osr-from-generator.js
@@ -11,6 +11,7 @@
}
return 23;
}
+ %PrepareFunctionForOptimization(gen1);
var g = gen1();
assertEquals({ value:23, done:true }, g.next());
})();
@@ -23,6 +24,7 @@
}
return 23;
}
+ %PrepareFunctionForOptimization(gen2);
var g = gen2();
assertEquals({ value:0, done:false }, g.next());
assertEquals({ value:1, done:false }, g.next());
@@ -38,6 +40,7 @@
}
return 23;
}
+ %PrepareFunctionForOptimization(gen3);
var g = gen3();
assertEquals({ value:0, done:false }, g.next());
assertEquals({ value:1, done:false }, g.next());
@@ -57,6 +60,7 @@
}
return 23;
}
+ %PrepareFunctionForOptimization(gen4);
var g = gen4();
assertEquals({ value:0, done:false }, g.next());
assertEquals({ value:1, done:false }, g.next());
diff --git a/deps/v8/test/mjsunit/ignition/throw-if-hole.js b/deps/v8/test/mjsunit/ignition/throw-if-hole.js
index ee7e4c8872..c9578a5228 100644
--- a/deps/v8/test/mjsunit/ignition/throw-if-hole.js
+++ b/deps/v8/test/mjsunit/ignition/throw-if-hole.js
@@ -11,6 +11,10 @@ function f(b) {
}
f(0);
+assertThrows(() => {f(1)}, ReferenceError);
+
+%PrepareFunctionForOptimization(f);
+f(0);
f(0);
%OptimizeFunctionOnNextCall(f);
f(0);
diff --git a/deps/v8/test/mjsunit/ignition/throw-if-not-hole.js b/deps/v8/test/mjsunit/ignition/throw-if-not-hole.js
index c14972a859..554b622bd8 100644
--- a/deps/v8/test/mjsunit/ignition/throw-if-not-hole.js
+++ b/deps/v8/test/mjsunit/ignition/throw-if-not-hole.js
@@ -15,7 +15,13 @@ class B extends A {
}
}
}
+// No feedback case
+test = new B(0);
+assertThrows(() => {new B(1)}, ReferenceError);
+// Ensure Feedback
+%PrepareFunctionForOptimization(B);
+%EnsureFeedbackVectorForFunction(A);
test = new B(0);
test = new B(0);
assertThrows(() => {new B(1)}, ReferenceError);
diff --git a/deps/v8/test/mjsunit/ignition/throw-super-not-called.js b/deps/v8/test/mjsunit/ignition/throw-super-not-called.js
index ec60de8599..ee85129d2f 100644
--- a/deps/v8/test/mjsunit/ignition/throw-super-not-called.js
+++ b/deps/v8/test/mjsunit/ignition/throw-super-not-called.js
@@ -16,6 +16,10 @@ class B extends A {
}
test = new B(1);
+assertThrows(() => {new B(0)}, ReferenceError);
+
+%PrepareFunctionForOptimization(B);
+test = new B(1);
test = new B(1);
%OptimizeFunctionOnNextCall(B);
test = new B(1);
diff --git a/deps/v8/test/mjsunit/immutable-context-slot-inlining.js b/deps/v8/test/mjsunit/immutable-context-slot-inlining.js
index 3b278c3c25..58b1b813c8 100644
--- a/deps/v8/test/mjsunit/immutable-context-slot-inlining.js
+++ b/deps/v8/test/mjsunit/immutable-context-slot-inlining.js
@@ -16,6 +16,7 @@ function f() {
g = function() {
return y;
};
+ %PrepareFunctionForOptimization(h);
assertEquals(5, h(g));
assertEquals(5, h(g));
%OptimizeFunctionOnNextCall(h);
diff --git a/deps/v8/test/mjsunit/induction-variable-turbofan.js b/deps/v8/test/mjsunit/induction-variable-turbofan.js
index 6ef804eb1c..edb2859be8 100644
--- a/deps/v8/test/mjsunit/induction-variable-turbofan.js
+++ b/deps/v8/test/mjsunit/induction-variable-turbofan.js
@@ -85,6 +85,7 @@ function variable_bound() {
}
function test(f) {
+ %PrepareFunctionForOptimization(f);
f();
assertTrue(f());
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/integrity-level-map-update.js b/deps/v8/test/mjsunit/integrity-level-map-update.js
index b4e066f7de..9c02470158 100644
--- a/deps/v8/test/mjsunit/integrity-level-map-update.js
+++ b/deps/v8/test/mjsunit/integrity-level-map-update.js
@@ -100,6 +100,7 @@
o.x = 0.1;
}
+ %PrepareFunctionForOptimization(g);
g(c1);
g(c2);
g(c3);
diff --git a/deps/v8/test/mjsunit/interrupt-budget-override.js b/deps/v8/test/mjsunit/interrupt-budget-override.js
index acbe837c56..6dbf0785a7 100644
--- a/deps/v8/test/mjsunit/interrupt-budget-override.js
+++ b/deps/v8/test/mjsunit/interrupt-budget-override.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --opt --interrupt-budget=100 --allow-natives-syntax
+// Flags: --opt --interrupt-budget=100 --budget-for-feedback-vector-allocation=10 --allow-natives-syntax
function f() {
let s = 0;
diff --git a/deps/v8/test/mjsunit/json-parse-slice.js b/deps/v8/test/mjsunit/json-parse-slice.js
new file mode 100644
index 0000000000..b2b36c15e2
--- /dev/null
+++ b/deps/v8/test/mjsunit/json-parse-slice.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var s = 'abcabcabcabcabc["possibly a sliced string"]'.slice(15)
+assertEquals(["possibly a sliced string"], JSON.parse(s));
diff --git a/deps/v8/test/mjsunit/json-parser-recursive.js b/deps/v8/test/mjsunit/json-parser-recursive.js
index 1e00c83c87..0f086e39c3 100644
--- a/deps/v8/test/mjsunit/json-parser-recursive.js
+++ b/deps/v8/test/mjsunit/json-parser-recursive.js
@@ -30,4 +30,5 @@ for (var i = 0; i < 100000; i++) {
str = "[1," + str + "]";
}
-assertThrows(function() { JSON.parse(str); }, RangeError);
+// Make sure we don't overflow on very deeply nested JSON objects.
+JSON.parse(str);
diff --git a/deps/v8/test/mjsunit/json-stringify-typedarray.js b/deps/v8/test/mjsunit/json-stringify-typedarray.js
new file mode 100644
index 0000000000..48a4fbb0d4
--- /dev/null
+++ b/deps/v8/test/mjsunit/json-stringify-typedarray.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[Int8Array, Uint8Array, Uint8ClampedArray, Int16Array, Uint16Array, Int32Array,
+ Uint32Array, Float32Array, Float64Array]
+ .forEach(constructor => {
+ const empty = new constructor(0);
+ assertEquals('{}', JSON.stringify(empty));
+
+ const tiny = new constructor(2).fill(123);
+ assertEquals('{"0":123,"1":123}', JSON.stringify(tiny));
+
+ const huge = new constructor(64).fill(123);
+ assertEquals(
+ '{"0":123,"1":123,"2":123,"3":123,"4":123,"5":123,"6":123,"7":123,"8":123,"9":123,"10":123,"11":123,"12":123,"13":123,"14":123,"15":123,"16":123,"17":123,"18":123,"19":123,"20":123,"21":123,"22":123,"23":123,"24":123,"25":123,"26":123,"27":123,"28":123,"29":123,"30":123,"31":123,"32":123,"33":123,"34":123,"35":123,"36":123,"37":123,"38":123,"39":123,"40":123,"41":123,"42":123,"43":123,"44":123,"45":123,"46":123,"47":123,"48":123,"49":123,"50":123,"51":123,"52":123,"53":123,"54":123,"55":123,"56":123,"57":123,"58":123,"59":123,"60":123,"61":123,"62":123,"63":123}',
+ JSON.stringify(huge));
+ });
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index 05997b3a84..f6e4c20da2 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -520,3 +520,6 @@ assertEquals({a: 0, b: 1}, JSON.parse('{"a":0,"b":1}', reviver));
reviver = (k, v) => (v === Infinity) ? "inf" : v;
assertEquals('{"":"inf"}', JSON.stringify({"":Infinity}, reviver));
+
+assertEquals([10.4, "\u1234"], JSON.parse("[10.4, \"\u1234\"]"));
+assertEquals(10, JSON.parse('{"10":10}')["10"]);
diff --git a/deps/v8/test/mjsunit/keyed-has-ic.js b/deps/v8/test/mjsunit/keyed-has-ic.js
index f0b95de2c6..ee15ea4859 100644
--- a/deps/v8/test/mjsunit/keyed-has-ic.js
+++ b/deps/v8/test/mjsunit/keyed-has-ic.js
@@ -405,12 +405,27 @@ for (test in tests) {
return true;
}
- var ary = [0,1,2,3];
+ var ary = [0, 1, 2, '3'];
function testArray(ary) {
assertTrue(test(ary, 1));
assertTrue(test(ary, 1));
}
testArray(ary);
+ // Packed
+ // Non-extensible
+ var b = Object.preventExtensions(ary);
+ testArray(b);
+
+ // Sealed
+ var c = Object.seal(ary);
+ testArray(c);
+
+ // Frozen
+ var d = Object.freeze(ary);
+ testArray(d);
+
+ // Holey
+ var ary = [, 0, 1, 2, '3'];
// Non-extensible
var b = Object.preventExtensions(ary);
testArray(b);
@@ -430,7 +445,7 @@ for (test in tests) {
assertFalse(test(str, 0));
})();
-const heap_constant_ary = [0,1,2,3];
+const heap_constant_ary = [0,1,2,'3'];
function testHeapConstantArray(heap_constant_ary) {
@@ -450,6 +465,7 @@ function testHeapConstantArray(heap_constant_ary) {
}
testHeapConstantArray(heap_constant_ary);
+// Packed
// Non-extensible
var b = Object.preventExtensions(heap_constant_ary);
testHeapConstantArray(b);
@@ -461,3 +477,17 @@ testHeapConstantArray(c);
// Frozen
var d = Object.freeze(heap_constant_ary);
testHeapConstantArray(d);
+
+// Holey
+const holey_heap_constant_ary = [,0,1,2,'3'];
+// Non-extensible
+var b = Object.preventExtensions(holey_heap_constant_ary);
+testHeapConstantArray(b);
+
+// Sealed
+var c = Object.seal(holey_heap_constant_ary);
+testHeapConstantArray(c);
+
+// Frozen
+var d = Object.freeze(holey_heap_constant_ary);
+testHeapConstantArray(d);
diff --git a/deps/v8/test/mjsunit/keyed-ic.js b/deps/v8/test/mjsunit/keyed-ic.js
index 01736c7791..277593550b 100644
--- a/deps/v8/test/mjsunit/keyed-ic.js
+++ b/deps/v8/test/mjsunit/keyed-ic.js
@@ -187,6 +187,73 @@ runTest = function() {
runTest();
+// ----------------------------------------------------------------------
+// Indexed access for packed/holey elements
+// ----------------------------------------------------------------------
+runTest = function() {
+ var o = [ 'a', 43 ];
+
+ function test(o, holey=false) {
+ var initial_X = 0;
+ var X = initial_X;
+ var Y = 1;
+
+ function fieldTest(change_index) {
+ for (var i = 0; i < 10; i++) {
+ var property = o[X];
+ if (i <= change_index) {
+ if (holey) {
+ assertEquals(undefined, property);
+ } else {
+ assertEquals('a', property);
+ }
+ } else {
+ if (holey) {
+ assertEquals('a', property);
+ }
+ else {
+ assertEquals(43, property);
+ }
+ }
+ if (i == change_index) X = Y;
+ }
+ X = initial_X;
+ };
+
+ for (var i = 0; i < 10; i++) fieldTest(i);
+ }
+ test(o);
+
+ // Packed
+ // Non-extensible
+ var b = Object.preventExtensions(o);
+ test(b);
+
+ // Sealed
+ var c = Object.seal(o);
+ test(c);
+
+ // Frozen
+ var d = Object.freeze(o);
+ test(d);
+
+ // Holey
+ // Non-extensible
+ o = [, 'a'];
+ var b = Object.preventExtensions(o);
+ test(b, true);
+
+ // Sealed
+ var c = Object.seal(o);
+ test(c, true);
+
+ // Frozen
+ var d = Object.freeze(o);
+ test(d, true);
+}
+
+runTest();
+
// ----------------------------------------------------------------------
// Constant function access.
diff --git a/deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js b/deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js
index ffac5488c0..0f2d3b933f 100644
--- a/deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js
+++ b/deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js
@@ -11,6 +11,7 @@
function foo(a, i) { return a[i]; }
+%PrepareFunctionForOptimization(foo);
var a = ['one', , 'three'];
foo(a, 0);
foo(a, 0);
diff --git a/deps/v8/test/mjsunit/keyed-load-null-receiver.js b/deps/v8/test/mjsunit/keyed-load-null-receiver.js
new file mode 100644
index 0000000000..b5b844b171
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-load-null-receiver.js
@@ -0,0 +1,42 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var count = 0;
+function load(a) {
+ var prop = {
+ toString: function() {
+ count++;
+ return 'z';
+ }
+ };
+
+ a[prop] ^= 1;
+}
+
+function f(null_or_undefined) {
+ // Turn the LoadIC megamorphic
+ load({a0:1, z:2});
+ load({a1:1, z:2});
+ load({a2:1, z:2});
+ load({a3:1, z:2});
+ load({a4:1, z:2});
+ // Now try null to check if generic IC handles this correctly.
+ // It shouldn't call prop.toString.
+ load(null_or_undefined);
+}
+
+try {
+ f(null);
+} catch(error) {
+ assertInstanceof(error, TypeError);
+ assertSame(10, count);
+}
+
+try {
+ count = 0;
+ f(undefined);
+} catch(error) {
+ assertInstanceof(error, TypeError);
+ assertSame(10, count);
+}
diff --git a/deps/v8/test/mjsunit/keyed-load-with-string-key.js b/deps/v8/test/mjsunit/keyed-load-with-string-key.js
index ee055e4790..bb2a24246b 100644
--- a/deps/v8/test/mjsunit/keyed-load-with-string-key.js
+++ b/deps/v8/test/mjsunit/keyed-load-with-string-key.js
@@ -36,6 +36,7 @@ function get(obj, key) {
return obj[key];
}
+%PrepareFunctionForOptimization(get);
get(o, "foo");
get(o, "foo");
get(o, "foo");
diff --git a/deps/v8/test/mjsunit/keyed-load-with-symbol-key.js b/deps/v8/test/mjsunit/keyed-load-with-symbol-key.js
index d0be0a0545..8015cdf7b2 100644
--- a/deps/v8/test/mjsunit/keyed-load-with-symbol-key.js
+++ b/deps/v8/test/mjsunit/keyed-load-with-symbol-key.js
@@ -37,6 +37,7 @@ function get(obj, key) {
return obj[key];
}
+%PrepareFunctionForOptimization(get);
assertEquals("bar", get(o, s));
get(o, s);
get(o, s);
diff --git a/deps/v8/test/mjsunit/large-object-literal-slow-elements.js b/deps/v8/test/mjsunit/large-object-literal-slow-elements.js
index eefde1b988..c977e0465d 100644
--- a/deps/v8/test/mjsunit/large-object-literal-slow-elements.js
+++ b/deps/v8/test/mjsunit/large-object-literal-slow-elements.js
@@ -27,6 +27,7 @@ function TestLargeObjectElements() {
%HeapObjectVerify(object);
}
+%PrepareFunctionForOptimization(TestLargeObjectElements);
TestLargeObjectElements();
TestLargeObjectElements();
%OptimizeFunctionOnNextCall(TestLargeObjectElements);
diff --git a/deps/v8/test/mjsunit/lea-add.js b/deps/v8/test/mjsunit/lea-add.js
index 28a1494706..cc1224ed18 100644
--- a/deps/v8/test/mjsunit/lea-add.js
+++ b/deps/v8/test/mjsunit/lea-add.js
@@ -59,24 +59,28 @@ function d() {
return sum;
}
+%PrepareFunctionForOptimization(a);
a();
a();
%OptimizeFunctionOnNextCall(a);
assertEquals(124750, a());
assertEquals(124750, a());
+%PrepareFunctionForOptimization(b);
b();
b();
%OptimizeFunctionOnNextCall(b);
assertEquals(-125250, b());
assertEquals(-125250, b());
+%PrepareFunctionForOptimization(c);
c();
c();
%OptimizeFunctionOnNextCall(c);
assertEquals(-1073741698750, c());
assertEquals(-1073741698750, c());
+%PrepareFunctionForOptimization(d);
d();
d();
%OptimizeFunctionOnNextCall(d);
diff --git a/deps/v8/test/mjsunit/lithium/DivI.js b/deps/v8/test/mjsunit/lithium/DivI.js
index 5420d8c8d0..3f511687a3 100644
--- a/deps/v8/test/mjsunit/lithium/DivI.js
+++ b/deps/v8/test/mjsunit/lithium/DivI.js
@@ -38,6 +38,7 @@ function foo(a, b) {
return result / b;
}
+%PrepareFunctionForOptimization(foo);
foo(700, 5);
var r1 = foo(700, 5);
%OptimizeFunctionOnNextCall(foo);
@@ -50,6 +51,7 @@ function boo(value) {
}
// Test deoptimization of MinInt / -1.
+%PrepareFunctionForOptimization(boo);
assertEquals(2147483600, boo(-2147483600));
assertEquals(2147483600, boo(-2147483600));
%OptimizeFunctionOnNextCall(boo);
diff --git a/deps/v8/test/mjsunit/lithium/MathExp.js b/deps/v8/test/mjsunit/lithium/MathExp.js
index 854ff5fd7f..4157092c5d 100644
--- a/deps/v8/test/mjsunit/lithium/MathExp.js
+++ b/deps/v8/test/mjsunit/lithium/MathExp.js
@@ -31,6 +31,7 @@ function foo(x) {
return Math.exp(x);
}
+%PrepareFunctionForOptimization(foo);
foo(12.3);
var r1 = foo(12.3);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/lithium/MulI.js b/deps/v8/test/mjsunit/lithium/MulI.js
index 68588bd512..dd6226d8f1 100644
--- a/deps/v8/test/mjsunit/lithium/MulI.js
+++ b/deps/v8/test/mjsunit/lithium/MulI.js
@@ -45,6 +45,7 @@ function foo_int(a, b) {
return result * a;
}
+%PrepareFunctionForOptimization(foo_smi);
foo_smi(10, 5);
var r1 = foo_smi(10, 5);
%OptimizeFunctionOnNextCall(foo_smi);
@@ -52,6 +53,7 @@ var r2 = foo_smi(10, 5);
assertEquals(r1, r2);
+%PrepareFunctionForOptimization(foo_int);
foo_int(10, 21474800);
var r3 = foo_int(10, 21474800);
%OptimizeFunctionOnNextCall(foo_int);
@@ -64,6 +66,7 @@ function foo2(value) {
return value * -1;
}
+%PrepareFunctionForOptimization(foo2);
foo2(-2147483600);
foo2(-2147483600);
%OptimizeFunctionOnNextCall(foo2);
diff --git a/deps/v8/test/mjsunit/lithium/StoreKeyed.js b/deps/v8/test/mjsunit/lithium/StoreKeyed.js
index d34f390d25..16c0172a37 100644
--- a/deps/v8/test/mjsunit/lithium/StoreKeyed.js
+++ b/deps/v8/test/mjsunit/lithium/StoreKeyed.js
@@ -45,11 +45,13 @@ var A1_int = [12, 23];
var A2_int = [12, 23];
var A3_int = [12, 23];
+%PrepareFunctionForOptimization(foo);
foo(A1, 1, 3.4);
foo(A2, 1, 3.4);
%OptimizeFunctionOnNextCall(foo);
foo(A3, 1, 3.4);
+%PrepareFunctionForOptimization(foo_int);
foo_int(A1_int, 1, 34);
foo_int(A2_int, 1, 34);
%OptimizeFunctionOnNextCall(foo_int);
diff --git a/deps/v8/test/mjsunit/lithium/StoreKeyedExternal.js b/deps/v8/test/mjsunit/lithium/StoreKeyedExternal.js
index a5670fee95..e8944bb8f4 100644
--- a/deps/v8/test/mjsunit/lithium/StoreKeyedExternal.js
+++ b/deps/v8/test/mjsunit/lithium/StoreKeyedExternal.js
@@ -72,26 +72,31 @@ var A1_double = new Float64Array(2);
var A2_double = new Float64Array(2);
var A3_double = new Float64Array(2);
+%PrepareFunctionForOptimization(foo_pixel);
foo_pixel(A1_pixel, 1, 34);
foo_pixel(A2_pixel, 1, 34);
%OptimizeFunctionOnNextCall(foo_pixel);
foo_pixel(A3_pixel, 1, 34);
+%PrepareFunctionForOptimization(foo_uint16);
foo_uint16(A1_uint16, 1, 3.4);
foo_uint16(A2_uint16, 1, 3.4);
%OptimizeFunctionOnNextCall(foo_uint16);
foo_uint16(A3_uint16, 1, 3.4);
+%PrepareFunctionForOptimization(foo_uint32);
foo_uint32(A1_uint32, 1, 3.4);
foo_uint32(A2_uint32, 1, 3.4);
%OptimizeFunctionOnNextCall(foo_uint32);
foo_uint32(A3_uint32, 1, 3.4);
+%PrepareFunctionForOptimization(foo_float);
foo_float(A1_float, 1, 3.4);
foo_float(A2_float, 1, 3.4);
%OptimizeFunctionOnNextCall(foo_float);
foo_float(A3_float, 1, 3.4);
+%PrepareFunctionForOptimization(foo_double);
foo_double(A1_double, 1, 3.4);
foo_double(A2_double, 1, 3.4);
%OptimizeFunctionOnNextCall(foo_double);
diff --git a/deps/v8/test/mjsunit/load_poly_effect.js b/deps/v8/test/mjsunit/load_poly_effect.js
index 7663d86ad0..8ac483730f 100644
--- a/deps/v8/test/mjsunit/load_poly_effect.js
+++ b/deps/v8/test/mjsunit/load_poly_effect.js
@@ -39,6 +39,7 @@ var deopt = false;
var o2 = {x_tagged:{}};
o2.x_tagged = 1;
+%PrepareFunctionForOptimization(load);
load({x:1}, o2);
load({x:1}, o2);
print(load(o, o2));
diff --git a/deps/v8/test/mjsunit/math-abs.js b/deps/v8/test/mjsunit/math-abs.js
index 4fb72baaa9..d688516d60 100644
--- a/deps/v8/test/mjsunit/math-abs.js
+++ b/deps/v8/test/mjsunit/math-abs.js
@@ -106,6 +106,7 @@ function foo(x) {
for(var i = 0; i < 1000; i++) {
foo(-i);
}
+%PrepareFunctionForOptimization(foo);
assertEquals(42, foo(-42));
%OptimizeFunctionOnNextCall(foo)
assertEquals(42, foo(-42));
@@ -115,7 +116,8 @@ assertEquals(42, foo(-42));
var a = [-1, -2];
function foo2() {
return Math.abs(a[0]);
-}
+};
+%PrepareFunctionForOptimization(foo2);
assertEquals(1, foo2());
assertEquals(1, foo2());
%OptimizeFunctionOnNextCall(foo2);
@@ -132,6 +134,7 @@ function absHalf(bits) {
// Create minimum integer input for abs() using bitwise operations
// that should overflow.
bits = 32;
+%PrepareFunctionForOptimization(absHalf);
assertEquals(2147483648, absHalf(bits));
assertEquals(2147483648, absHalf(bits));
%OptimizeFunctionOnNextCall(absHalf);
diff --git a/deps/v8/test/mjsunit/math-ceil.js b/deps/v8/test/mjsunit/math-ceil.js
index 05794f4bb2..314fd0a08b 100644
--- a/deps/v8/test/mjsunit/math-ceil.js
+++ b/deps/v8/test/mjsunit/math-ceil.js
@@ -18,6 +18,7 @@ function testCeil(expect, input) {
var test_double_input = new Function(
'n',
'"' + (test_id++) + '";return Math.ceil(+n)');
+ %PrepareFunctionForOptimization(test_double_input);
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
@@ -27,6 +28,7 @@ function testCeil(expect, input) {
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.ceil(n) + -0.0');
+ %PrepareFunctionForOptimization(test_double_output);
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
@@ -36,6 +38,7 @@ function testCeil(expect, input) {
var test_via_floor = new Function(
'n',
'"' + (test_id++) + '";return -Math.floor(-n)');
+ %PrepareFunctionForOptimization(test_via_floor);
assertEquals(expect, test_via_floor(input));
assertEquals(expect, test_via_floor(input));
assertEquals(expect, test_via_floor(input));
@@ -46,6 +49,7 @@ function testCeil(expect, input) {
var test_via_trunc = new Function(
'n',
'"' + (test_id++) + '";return Math.trunc(n)');
+ %PrepareFunctionForOptimization(test_via_trunc);
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
diff --git a/deps/v8/test/mjsunit/math-deopt.js b/deps/v8/test/mjsunit/math-deopt.js
index b5fff7d149..609e406ecd 100644
--- a/deps/v8/test/mjsunit/math-deopt.js
+++ b/deps/v8/test/mjsunit/math-deopt.js
@@ -8,10 +8,12 @@
function f(a) {
return Math.abs(a);
}
+ %PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
f("100");
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("100");
assertOptimized(f);
@@ -21,10 +23,12 @@
function f(a) {
return Math.min(1,a);
}
+ %PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
f("100");
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("100");
assertOptimized(f);
@@ -34,10 +38,12 @@
function f(a) {
return Math.pow(a,10);
}
+ %PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
f("100");
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("100");
assertOptimized(f);
@@ -47,10 +53,12 @@
function f(a) {
return Math.clz32(a);
}
+ %PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
f("100");
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("100");
assertOptimized(f);
@@ -60,10 +68,12 @@
function f(a) {
return Math.imul(a, 10);
}
+ %PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
f("100");
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("100");
assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
index a721467b5a..0c424c5d0c 100644
--- a/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
+++ b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
@@ -34,6 +34,7 @@ function test_div_no_deopt_minus_zero() {
assertTrue(0 === (Math.floor((zero_in_array[0] | 0) / -1) | 0));
}
+%PrepareFunctionForOptimization(test_div_no_deopt_minus_zero);
test_div_no_deopt_minus_zero();
test_div_no_deopt_minus_zero();
%OptimizeFunctionOnNextCall(test_div_no_deopt_minus_zero);
diff --git a/deps/v8/test/mjsunit/math-floor-of-div-nosudiv.js b/deps/v8/test/mjsunit/math-floor-of-div-nosudiv.js
index 5340f8353b..9b977849af 100644
--- a/deps/v8/test/mjsunit/math-floor-of-div-nosudiv.js
+++ b/deps/v8/test/mjsunit/math-floor-of-div-nosudiv.js
@@ -180,6 +180,7 @@ function test_div() {
}
}
+%PrepareFunctionForOptimization(test_div);
test_div();
%OptimizeFunctionOnNextCall(test_div);
test_div();
@@ -201,6 +202,7 @@ function test_div2() {
}
}
+%PrepareFunctionForOptimization(test_div2);
test_div2();
%OptimizeFunctionOnNextCall(test_div2);
test_div2();
@@ -244,6 +246,9 @@ function test_div_deopt_div_by_zero() {
}
}
+%PrepareFunctionForOptimization(test_div_deopt_minus_zero);
+%PrepareFunctionForOptimization(test_div_deopt_overflow);
+%PrepareFunctionForOptimization(test_div_deopt_div_by_zero);
test_div_deopt_minus_zero();
test_div_deopt_overflow();
test_div_deopt_div_by_zero();
@@ -277,6 +282,9 @@ function test_div_deopt_div_by_zero_v() {
}
}
+%PrepareFunctionForOptimization(test_div_deopt_minus_zero_v);
+%PrepareFunctionForOptimization(test_div_deopt_overflow_v);
+%PrepareFunctionForOptimization(test_div_deopt_div_by_zero_v);
test_div_deopt_minus_zero_v();
test_div_deopt_overflow_v();
test_div_deopt_div_by_zero_v();
diff --git a/deps/v8/test/mjsunit/math-floor-part1.js b/deps/v8/test/mjsunit/math-floor-part1.js
index bad1edd081..c386c1161c 100644
--- a/deps/v8/test/mjsunit/math-floor-part1.js
+++ b/deps/v8/test/mjsunit/math-floor-part1.js
@@ -32,6 +32,7 @@ var test_id = 0;
function testFloor(expect, input) {
var test = new Function('n',
'"' + (test_id++) + '";return Math.floor(n)');
+ %PrepareFunctionForOptimization(test);
assertEquals(expect, test(input));
assertEquals(expect, test(input));
assertEquals(expect, test(input));
@@ -41,6 +42,7 @@ function testFloor(expect, input) {
var test_double_input = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(+n)');
+ %PrepareFunctionForOptimization(test_double_input);
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
@@ -50,6 +52,7 @@ function testFloor(expect, input) {
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(n) + -0.0');
+ %PrepareFunctionForOptimization(test_double_output);
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
@@ -59,6 +62,7 @@ function testFloor(expect, input) {
var test_via_ceil = new Function(
'n',
'"' + (test_id++) + '";return -Math.ceil(-n)');
+ %PrepareFunctionForOptimization(test_via_ceil);
assertEquals(expect, test_via_ceil(input));
assertEquals(expect, test_via_ceil(input));
assertEquals(expect, test_via_ceil(input));
@@ -69,6 +73,7 @@ function testFloor(expect, input) {
var test_via_trunc = new Function(
'n',
'"' + (test_id++) + '";return Math.trunc(n)');
+ %PrepareFunctionForOptimization(test_via_trunc);
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
@@ -88,6 +93,7 @@ function test() {
function ifloor(x) {
return 1 / Math.floor(x);
}
+ %PrepareFunctionForOptimization(ifloor);
assertEquals(-Infinity, ifloor(-0));
assertEquals(-Infinity, ifloor(-0));
assertEquals(-Infinity, ifloor(-0));
diff --git a/deps/v8/test/mjsunit/math-floor-part4.js b/deps/v8/test/mjsunit/math-floor-part4.js
index 499200288e..ebabb7496e 100644
--- a/deps/v8/test/mjsunit/math-floor-part4.js
+++ b/deps/v8/test/mjsunit/math-floor-part4.js
@@ -32,6 +32,7 @@ var test_id = 0;
function testFloor(expect, input) {
var test = new Function('n',
'"' + (test_id++) + '";return Math.floor(n)');
+ %PrepareFunctionForOptimization(test);
assertEquals(expect, test(input));
assertEquals(expect, test(input));
assertEquals(expect, test(input));
@@ -41,6 +42,7 @@ function testFloor(expect, input) {
var test_double_input = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(+n)');
+ %PrepareFunctionForOptimization(test_double_input);
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
assertEquals(expect, test_double_input(input));
@@ -50,6 +52,7 @@ function testFloor(expect, input) {
var test_double_output = new Function(
'n',
'"' + (test_id++) + '";return Math.floor(n) + -0.0');
+ %PrepareFunctionForOptimization(test_double_output);
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
assertEquals(expect, test_double_output(input));
@@ -59,6 +62,7 @@ function testFloor(expect, input) {
var test_via_ceil = new Function(
'n',
'"' + (test_id++) + '";return -Math.ceil(-n)');
+ %PrepareFunctionForOptimization(test_via_ceil);
assertEquals(expect, test_via_ceil(input));
assertEquals(expect, test_via_ceil(input));
assertEquals(expect, test_via_ceil(input));
@@ -69,6 +73,7 @@ function testFloor(expect, input) {
var test_via_trunc = new Function(
'n',
'"' + (test_id++) + '";return Math.trunc(n)');
+ %PrepareFunctionForOptimization(test_via_trunc);
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
assertEquals(expect, test_via_trunc(input));
@@ -106,7 +111,8 @@ function floorsum(i, n) {
ret += Math.floor(n);
}
return ret;
-}
+};
+%PrepareFunctionForOptimization(floorsum);
assertEquals(-0, floorsum(1, -0));
%OptimizeFunctionOnNextCall(floorsum);
// The optimized function will deopt. Run it with enough iterations to try
diff --git a/deps/v8/test/mjsunit/math-min-max.js b/deps/v8/test/mjsunit/math-min-max.js
index a4d1b27c70..b3e84be2e8 100644
--- a/deps/v8/test/mjsunit/math-min-max.js
+++ b/deps/v8/test/mjsunit/math-min-max.js
@@ -117,6 +117,7 @@ assertEquals(Infinity, 1/Math.max(ZERO, -0));
assertEquals(Infinity, 1/Math.max(-0, ZERO));
function run(crankshaft_test) {
+ %PrepareFunctionForOptimization(crankshaft_test);
crankshaft_test(1);
crankshaft_test(1);
%OptimizeFunctionOnNextCall(crankshaft_test);
@@ -184,6 +185,7 @@ function f(o) {
return Math.min(o.a, o.b);
}
+%PrepareFunctionForOptimization(f);
assertEquals(1, f(o));
assertEquals(1, f(o));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/math-pow.js b/deps/v8/test/mjsunit/math-pow.js
index ffbf418242..d347d539c9 100644
--- a/deps/v8/test/mjsunit/math-pow.js
+++ b/deps/v8/test/mjsunit/math-pow.js
@@ -167,6 +167,7 @@ function test() {
(-1*(Math.pow(2,52))*(Math.pow(2,-1074))) === -2.2250738585072014e-308);
}
+%PrepareFunctionForOptimization(test);
test();
test();
%OptimizeFunctionOnNextCall(test);
diff --git a/deps/v8/test/mjsunit/math-round.js b/deps/v8/test/mjsunit/math-round.js
index 12a92657a1..aeed206d22 100644
--- a/deps/v8/test/mjsunit/math-round.js
+++ b/deps/v8/test/mjsunit/math-round.js
@@ -33,6 +33,7 @@ function testRound(expect, input) {
// sure it gets optimized each time.
var doRound = new Function('input',
'"' + (test_id++) + '";return Math.round(input)');
+ %PrepareFunctionForOptimization(doRound);
assertEquals(expect, doRound(input));
assertEquals(expect, doRound(input));
assertEquals(expect, doRound(input));
@@ -43,6 +44,7 @@ function testRound(expect, input) {
// optimized code.
var doRoundToDouble = new Function('input',
'"' + (test_id++) + '";return Math.round(input) + -0.0');
+ %PrepareFunctionForOptimization(doRoundToDouble);
assertEquals(expect, doRoundToDouble(input));
assertEquals(expect, doRoundToDouble(input));
assertEquals(expect, doRoundToDouble(input));
@@ -64,7 +66,8 @@ function roundsum(i, n) {
ret += Math.round(n);
}
return ret;
-}
+};
+%PrepareFunctionForOptimization(roundsum);
assertEquals(-0, roundsum(1, -0));
%OptimizeFunctionOnNextCall(roundsum);
// The optimized function will deopt. Run it with enough iterations to try
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 87977c2516..4fb95b7aa7 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -508,6 +508,9 @@ var prettyPrinted;
}
assertThrows = function assertThrows(code, type_opt, cause_opt) {
+ if (arguments.length > 1 && type_opt === undefined) {
+ failWithMessage('invalid use of assertThrows, unknown type_opt given');
+ }
if (type_opt !== undefined && typeof type_opt !== 'function') {
failWithMessage(
'invalid use of assertThrows, maybe you want assertThrowsEquals');
@@ -535,6 +538,9 @@ var prettyPrinted;
};
assertThrowsAsync = function assertThrowsAsync(promise, type_opt, cause_opt) {
+ if (arguments.length > 1 && type_opt === undefined) {
+ failWithMessage('invalid use of assertThrows, unknown type_opt given');
+ }
if (type_opt !== undefined && typeof type_opt !== 'function') {
failWithMessage(
'invalid use of assertThrows, maybe you want assertThrowsEquals');
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 83be1d0eee..b55e8d790c 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -54,9 +54,6 @@
# Issue 3784: setters-on-elements is flaky
'setters-on-elements': [PASS, FAIL],
- # Issue 5495: enable the test when the constant field tracking in enabled.
- 'const-field-tracking': [SKIP],
-
# Issue 8505: Math.pow is incorrect for asm.js
'regress/wasm/regress-8505': [SKIP],
@@ -78,7 +75,7 @@
'regress/regress-4595': [PASS, NO_VARIANTS, ['mode == debug', SKIP]],
# Too slow in debug mode, due to large allocations.
- 'regress/regress-crbug-941743': [PASS, ['mode == debug', SKIP], ['(arch == arm or arch == arm64) and simulator_run == True', SKIP]],
+ 'regress/regress-crbug-941743': [PASS, ['mode == debug', SKIP], ['(arch == arm or arch == arm64 or arch == mipsel or arch == mips64el) and simulator_run == True', SKIP]],
##############################################################################
# Only RegExp stuff tested, no need for extensive optimizing compiler tests.
@@ -112,6 +109,7 @@
'generated-transition-stub': [PASS, ['mode == debug', SKIP]],
'migrations': [SKIP],
'array-functions-prototype-misc': [PASS, SLOW, ['mode == debug', SKIP]],
+ 'compiler/regress-9017': [PASS, SLOW],
'compiler/regress-808472': [PASS, ['mode == debug', SKIP]],
'es6/promise-all-overflow-1': [SKIP],
'es6/promise-all-overflow-2': [PASS, SLOW, ['mode == debug or arch != x64', SKIP]],
@@ -167,7 +165,9 @@
# Slow tests.
'copy-on-write-assert': [PASS, SLOW],
'es6/typedarray-construct-offset-not-smi': [PASS, SLOW],
+ 'harmony/futex': [PASS, SLOW],
'harmony/regexp-property-script-extensions': [PASS, SLOW],
+ 'ignition/regress-672027': [PASS, SLOW],
'md5': [PASS, SLOW],
'numops-fuzz-part*': [PASS, ['mode == debug', SLOW]],
'readonly': [PASS, SLOW],
@@ -235,6 +235,20 @@
# BUG(v8:8169)
'external-backing-store-gc': [SKIP],
+
+ # Test is only enabled on ASAN. Takes too long on many other bots.
+ 'regress/regress-crbug-9161': [SKIP],
+
+ # BUG(v8:9260)
+ 'tools/profviz': [SKIP],
+ # Test doesn't work on 32-bit architectures (it would require a
+ # regexp pattern with too many captures).
+ 'regress/regress-976627': [FAIL, ['arch == x64 or arch == arm64 or arch == mips64el or arch == ppc64 or arch == s390x', PASS]],
+
+ # To be re-enabled once https://crbug.com/v8/9534 is fixed.
+ 'es6/regress/regress-crbug-465671': [SKIP],
+ 'es6/regress/regress-crbug-465671-null': [SKIP],
+ 'regress/regress-543994': [SKIP],
}], # ALWAYS
['novfp3 == True', {
@@ -309,6 +323,7 @@
# Slow tests.
'array-constructor': [PASS, SLOW],
'json': [PASS, SLOW],
+ 'large-object-literal-slow-elements': [PASS, SLOW],
# BUG(v8:4779): Crashes flakily with stress mode on arm64.
'array-splice': [PASS, SLOW, ['arch == arm64', NO_VARIANTS]],
@@ -328,23 +343,6 @@
['lite_mode or variant == jitless', {
# Skip tests not suitable for lite_mode.
- # TODO(8596): We cache the templates in the feedback vector. In lite mode
- # without feedback vectors we need to implement some other mechanism to cache
- # them. Enable this test after fixing it.
- 'es6/templates': [SKIP],
-
- # code coverage needs feedback vectors
- 'code-coverage-ad-hoc': [SKIP],
- 'code-coverage-class-fields': [SKIP],
- 'code-coverage-block-noopt': [SKIP],
- 'code-coverage-block': [SKIP],
- 'code-coverage-precise': [SKIP],
-
- # Needs feedback vector - tests for allocation sites
- 'array-constructor-feedback': [SKIP],
- 'regress/regress-trap-allocation-memento': [SKIP],
- 'regress/regress-4121': [SKIP],
-
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
'regress/regress-5888': [SKIP],
'regress/regress-5911': [SKIP],
@@ -354,6 +352,7 @@
'regress/regress-crbug-816961': [SKIP],
'regress/wasm/*': [SKIP],
'regress/regress-8947': [SKIP],
+ 'regress/regress-9165': [SKIP],
'regress/regress-v8-9106': [SKIP],
'wasm/*': [SKIP],
@@ -389,14 +388,19 @@
# These tests check that we can trace the compiler.
'tools/compiler-trace-flags': [SKIP],
- 'tools/compiler-trace-flags-wasm': [SKIP]
+ 'tools/compiler-trace-flags-wasm': [SKIP],
+
+ # Slow with pointer compression.
+ 'regress/regress-crbug-319860': [PASS, ['pointer_compression', SLOW]],
}], # 'lite_mode or variant == jitless'
##############################################################################
-['lite_mode', {
- # TODO(v8:8510): Tests that currently fail with lazy source positions.
- 'stack-traces-overflow': [SKIP],
-}], # lite_mode
+['is_full_debug', {
+ # Tests too slow in non-optimized debug mode.
+ 'compiler/regress-9017': [SKIP],
+ 'regress/regress-2790': [SKIP],
+ 'regress/regress-740784': [SKIP],
+}], # 'is_full_debug'
##############################################################################
['byteorder == big', {
@@ -474,6 +478,9 @@
# BUG(v8:7247).
'regress/regress-779407': [PASS, SLOW, NO_VARIANTS],
+
+ # BUG(v8:9256). Slow with pointer compression.
+ 'regress/regress-708247': [PASS, ['pointer_compression', SLOW]],
}], # 'arch == arm64'
['arch == arm64 and mode == debug and simulator_run', {
@@ -514,6 +521,9 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7102
# Flaky due to huge string allocation.
'regress/regress-748069': [SKIP],
+
+ # Test is tailored for ASAN. Takes too long on many other bots.
+ 'regress/regress-crbug-9161': [PASS, SLOW],
}], # 'asan == True'
##############################################################################
@@ -527,13 +537,16 @@
'string-localecompare': [SKIP],
# Too slow.
+ 'asm/embenchen/zlib': [SKIP],
'harmony/regexp-property-lu-ui': [SKIP],
+ 'regress/regress-779407': [SKIP],
+ 'wasm/embenchen/box2d': [SKIP],
+ 'wasm/embenchen/lua_binarytrees': [SKIP],
+ 'wasm/embenchen/zlib': [SKIP],
# https://bugs.chromium.org/p/v8/issues/detail?id=7102
# Flaky due to huge string allocation.
'regress/regress-748069': [SKIP],
- # Slow test.
- 'regress/regress-779407': [PASS, SLOW],
}], # 'msan == True'
##############################################################################
@@ -554,7 +567,19 @@
# BUG(v8:6924). The test uses a lot of memory.
'regress/wasm/regress-694433': [SKIP],
'es6/typedarray': [PASS, NO_VARIANTS],
- 'regress/regress-752764': [PASS, NO_VARIANTS],
+ 'regress/regress-752764': [PASS, SLOW, NO_VARIANTS],
+
+ # BUG(v8:9242). Uses a lot of memory.
+ 'regress/regress-599414-array-concat-fast-path': [PASS, SLOW],
+
+ # BUG(v8:9026). Flaky timeouts.
+ 'es6/classes': [SKIP],
+
+ # Slow tests.
+ 'compiler/regress-9017': [PASS, SLOW],
+ 'es6/block-conflicts-sloppy': [PASS, SLOW],
+ 'math-floor-part1': [PASS, SLOW],
+ 'regress/regress-500980': [PASS, SLOW],
}], # 'tsan == True'
##############################################################################
@@ -768,6 +793,7 @@
'never-optimize': [SKIP],
'readonly': [SKIP],
'array-feedback': [SKIP],
+ 'array-reduce': [SKIP],
'deopt-recursive-eager-once': [SKIP],
'deopt-recursive-lazy-once': [SKIP],
'deopt-recursive-soft-once': [SKIP],
@@ -811,6 +837,7 @@
# Tests that fail some assertions due to checking internal state sensitive
# to GC.
'compiler/native-context-specialization-hole-check': [SKIP],
+ 'regress/regress-trap-allocation-memento': [SKIP],
'shared-function-tier-up-turbo': [SKIP],
}], # 'gc_fuzzer'
@@ -859,8 +886,11 @@
'regress/regress-crbug-941743': [SKIP],
'regress/regress-crbug-482998': [PASS, SLOW],
'regress/regress-91008': [PASS, SLOW],
+ 'regress/regress-779407': [PASS, SLOW],
+ 'packed-elements': [PASS, SLOW],
'harmony/regexp-property-lu-ui': [PASS, SLOW],
'whitespaces': [PASS, SLOW],
+ 'generated-transition-stub': [PASS, SLOW],
'wasm/atomics-stress': [SKIP],
'wasm/atomics64-stress': [SKIP],
}], # 'simulator_run and (arch in [ppc64, s390x])'
@@ -893,7 +923,8 @@
# Too slow for TSAN in stress mode.
# Goes OOM on ODROID devices: https://crbug.com/v8/9026
- 'es6/classes': [PASS, ['tsan or (arch == arm and not simulator_run)', SKIP]],
+ # Too slow on PPC: https://crbug.com/v8/9246
+ 'es6/classes': [PASS, SLOW, ['tsan or (arch == arm and not simulator_run) or arch in [ppc, ppc64]', SKIP]],
'regress/regress-1122': [PASS, ['tsan', SKIP]],
# Too slow with gc_stress on arm64.
@@ -902,6 +933,10 @@
# Slow on arm64 simulator: https://crbug.com/v8/7783
'string-replace-gc': [PASS, ['arch == arm64 and simulator_run', SKIP]],
+ # Too memory hungry.
+ 'regress/regress-779407': [PASS, ['tsan', SKIP]],
+ 'regress/regress-599414-array-concat-fast-path': [PASS, ['tsan', SKIP]],
+
# Too memory hungry on Odroid devices.
'regress/regress-678917': [PASS, ['arch == arm and not simulator_run', SKIP]],
}], # variant == stress
@@ -909,6 +944,7 @@
##############################################################################
['variant == stress and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
+ 'es6/classes': [SKIP],
'generated-transition-stub': [SKIP],
'regress/regress-336820': [SKIP],
'wasm/grow-memory': [SKIP],
@@ -986,6 +1022,9 @@
# Slow tests.
'regress/regress-crbug-493779': [SKIP],
'string-replace-gc': [SKIP],
+
+ # https://crbug.com/v8/9221
+ 'wasm/grow-shared-memory': [SKIP],
}], # variant == slow_path
##############################################################################
diff --git a/deps/v8/test/mjsunit/modules-turbo1.js b/deps/v8/test/mjsunit/modules-turbo1.js
index 20df5c04dd..ce7e0b8f34 100644
--- a/deps/v8/test/mjsunit/modules-turbo1.js
+++ b/deps/v8/test/mjsunit/modules-turbo1.js
@@ -7,6 +7,7 @@
export let x = 0;
function foo() { x++ };
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
diff --git a/deps/v8/test/mjsunit/modules-turbo2.js b/deps/v8/test/mjsunit/modules-turbo2.js
index 2e08086377..18d84bad3d 100644
--- a/deps/v8/test/mjsunit/modules-turbo2.js
+++ b/deps/v8/test/mjsunit/modules-turbo2.js
@@ -11,6 +11,7 @@ function foo() { return x++ };
function gaga(f) { return f() };
+%PrepareFunctionForOptimization(gaga);
assertEquals(0, gaga(foo));
assertEquals(1, gaga(foo));
%OptimizeFunctionOnNextCall(gaga);
diff --git a/deps/v8/test/mjsunit/never-optimize.js b/deps/v8/test/mjsunit/never-optimize.js
index 5efaa47de3..f2b764e16c 100644
--- a/deps/v8/test/mjsunit/never-optimize.js
+++ b/deps/v8/test/mjsunit/never-optimize.js
@@ -29,6 +29,7 @@
function o1() {
}
+%PrepareFunctionForOptimization(o1);
o1(); o1();
%OptimizeFunctionOnNextCall(o1);
@@ -45,6 +46,8 @@ function u1() {
function u2() {
u1();
}
+%PrepareFunctionForOptimization(u1);
+%PrepareFunctionForOptimization(u2);
u1(); u1();
u2(); u2();
diff --git a/deps/v8/test/mjsunit/number-isnan-opt.js b/deps/v8/test/mjsunit/number-isnan-opt.js
index a5d4b9f337..052eb0b1fc 100644
--- a/deps/v8/test/mjsunit/number-isnan-opt.js
+++ b/deps/v8/test/mjsunit/number-isnan-opt.js
@@ -7,6 +7,7 @@
(function() {
function foo(x) { return Number.isNaN(x); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(+undefined));
assertFalse(foo(undefined));
%OptimizeFunctionOnNextCall(foo);
@@ -17,6 +18,7 @@
(function() {
function foo(x) { return Number.isNaN(+x); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo(+undefined));
assertFalse(foo(0));
%OptimizeFunctionOnNextCall(foo);
@@ -27,6 +29,7 @@
(function() {
function foo(x) { return Number.isNaN(x|0); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(+undefined));
assertFalse(foo(0));
%OptimizeFunctionOnNextCall(foo);
@@ -37,6 +40,7 @@
(function() {
function foo(x) { return Number.isNaN("" + x); }
+ %PrepareFunctionForOptimization(foo);
assertFalse(foo(undefined));
assertFalse(foo(0));
%OptimizeFunctionOnNextCall(foo);
@@ -47,6 +51,7 @@
(function() {
function foo(x) { return Number.isNaN(0/0); }
+ %PrepareFunctionForOptimization(foo);
assertTrue(foo());
assertTrue(foo());
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/object-freeze.js b/deps/v8/test/mjsunit/object-freeze.js
index fe392e749c..98ad163895 100644
--- a/deps/v8/test/mjsunit/object-freeze.js
+++ b/deps/v8/test/mjsunit/object-freeze.js
@@ -574,3 +574,222 @@ arr[2] = 'c';
assertEquals(arr[2], undefined);
arr.length = 1;
assertEquals(arr.length, 2);
+
+// Start testing with holey array
+// Test holey element array built-in functions with freeze.
+function testHoleyFrozenArray1(obj) {
+ assertTrue(Object.isSealed(obj));
+ // Verify that the value can't be written
+ obj1 = new Array(...obj);
+ var length = obj.length;
+ for (var i = 0; i < length-1; i++) {
+ obj[i] = 'new';
+ assertEquals(obj1[i], obj[i]);
+ }
+ // for symbol we cannot compare directly
+ assertTrue(typeof obj[length-1] == 'symbol');
+
+ // Verify that the length can't be written by builtins.
+ assertTrue(Array.isArray(obj));
+ assertThrows(function() { obj.pop(); }, TypeError);
+ assertThrows(function() { obj.push(); }, TypeError);
+ assertThrows(function() { obj.shift(); }, TypeError);
+ assertThrows(function() { obj.unshift(); }, TypeError);
+ assertThrows(function() { obj.copyWithin(0,0); }, TypeError);
+ assertThrows(function() { obj.fill(0); }, TypeError);
+ assertThrows(function() { obj.reverse(); }, TypeError);
+ assertThrows(function() { obj.sort(); }, TypeError);
+ assertThrows(function() { obj.splice(0); }, TypeError);
+ assertThrows(function() { obj.splice(0, 0); }, TypeError);
+ assertTrue(Object.isFrozen(obj));
+
+ // Verify search, filter, iterator
+ assertEquals(obj.lastIndexOf(1), 2);
+ assertEquals(obj.indexOf('a'), 5);
+ assertEquals(obj.indexOf(undefined), 0);
+ assertFalse(obj.includes(Symbol("test")));
+ assertTrue(obj.includes(undefined));
+ assertFalse(obj.includes(NaN));
+ assertTrue(obj.includes());
+ assertEquals(obj.find(x => x==0), undefined);
+ assertEquals(obj.findIndex(x => x=='a'), 5);
+ assertTrue(obj.some(x => typeof x == 'symbol'));
+ assertFalse(obj.every(x => x == -1));
+ var filteredArray = obj.filter(e => typeof e == "symbol");
+ assertEquals(filteredArray.length, 1);
+ assertEquals(obj.map(x => x), obj);
+ var countPositiveNumber = 0;
+ obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+ });
+ assertEquals(countPositiveNumber, 1);
+ assertEquals(obj.length, obj.concat([]).length);
+ var iterator = obj.values();
+ assertEquals(iterator.next().value, undefined);
+ assertEquals(iterator.next().value, null);
+ var iterator = obj.keys();
+ assertEquals(iterator.next().value, 0);
+ assertEquals(iterator.next().value, 1);
+ var iterator = obj.entries();
+ assertEquals(iterator.next().value, [0, undefined]);
+ assertEquals(iterator.next().value, [1, null]);
+}
+
+obj = [undefined, null, 1, , -1, 'a', Symbol("test")];
+assertTrue(%HasHoleyElements(obj));
+Object.freeze(obj);
+testHoleyFrozenArray1(obj);
+
+// Verify change from sealed to frozen
+obj = [undefined, null, 1, , -1, 'a', Symbol("test")];
+assertTrue(%HasHoleyElements(obj));
+Object.seal(obj);
+Object.freeze(obj);
+assertTrue(Object.isSealed(obj));
+testHoleyFrozenArray1(obj);
+
+// Verify change from non-extensible to frozen
+obj = [undefined, null, 1, ,-1, 'a', Symbol("test")];
+assertTrue(%HasHoleyElements(obj));
+Object.preventExtensions(obj);
+Object.freeze(obj);
+assertTrue(Object.isSealed(obj));
+testHoleyFrozenArray1(obj);
+
+// Verify flat, map, slice, flatMap, join, reduce, reduceRight for frozen packed array
+function testHoleyFrozenArray2(arr) {
+ assertTrue(Object.isFrozen(arr));
+ assertTrue(Array.isArray(arr));
+ assertEquals(arr.map(x => [x]), [, ['a'], ['b'], ['c']]);
+ assertEquals(arr.flatMap(x => [x]), ["a", "b", "c"]);
+ assertEquals(arr.flat(), ["a", "b", "c"]);
+ assertEquals(arr.join('-'), "-a-b-c");
+ const reducer = (accumulator, currentValue) => accumulator + currentValue;
+ assertEquals(arr.reduce(reducer), "abc");
+ assertEquals(arr.reduceRight(reducer), "cba");
+ assertEquals(arr.slice(0, 1), [,]);
+ assertEquals(arr.slice(1, 2), ["a"]);
+}
+var arr1 = [, 'a', 'b', 'c'];
+assertTrue(%HasHoleyElements(arr1));
+Object.freeze(arr1);
+testHoleyFrozenArray2(arr1);
+
+// Verify change from sealed to frozen
+var arr2 = [, 'a', 'b', 'c'];
+assertTrue(%HasHoleyElements(arr2));
+Object.seal(arr2);
+Object.freeze(arr2);
+testHoleyFrozenArray2(arr2);
+
+// Verify change from non-extensible to frozen
+var arr2 = [, 'a', 'b', 'c'];
+assertTrue(%HasHoleyElements(arr2));
+Object.preventExtensions(arr2);
+Object.freeze(arr2);
+testHoleyFrozenArray2(arr2);
+
+// Test regression with Object.defineProperty
+var obj = ['a', , 'b'];
+obj.propertyA = 42;
+obj[0] = true;
+Object.freeze(obj);
+assertThrows(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ });
+}, TypeError);
+assertEquals(42, obj.propertyA);
+assertThrows(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ writable: false,
+ });
+}, TypeError);
+assertDoesNotThrow(function() {obj.propertyA = 2;});
+assertEquals(obj.propertyA, 42);
+assertThrows(function() {
+ Object.defineProperty(obj, 'abc', {
+ value: obj,
+ });
+}, TypeError);
+
+// Regression test with simple holey array
+var arr = [, 'a'];
+Object.freeze(arr);
+arr[1] = 'b';
+assertEquals(arr[1], 'a');
+arr[0] = 1;
+assertEquals(arr[0], undefined);
+
+// Test regression Array.concat with double
+var arr = ['a', , 'b'];
+Object.freeze(arr);
+arr = arr.concat(0.5);
+assertEquals(arr, ['a', ,'b', 0.5]);
+Object.freeze(arr);
+arr = arr.concat([1.5, 'c']);
+assertEquals(arr, ['a', ,'b', 0.5, 1.5, 'c']);
+
+// Regression test with change length
+var arr = ['a', ,'b'];
+Object.freeze(arr);
+assertEquals(arr.length, 3);
+arr.length = 4;
+assertEquals(arr.length, 3);
+arr[3] = 'c';
+assertEquals(arr[2], 'b');
+assertEquals(arr[3], undefined);
+arr.length = 2;
+assertEquals(arr.length, 3);
+
+// Change length with holey entries at the end
+var arr = ['a', ,];
+Object.freeze(arr);
+assertEquals(arr.length, 2);
+arr.length = 0;
+assertEquals(arr.length, 2);
+arr.length = 3;
+assertEquals(arr.length, 2);
+arr.length = 0;
+assertEquals(arr.length, 2);
+
+// Spread with array
+var arr = ['a', 'b', 'c'];
+Object.freeze(arr);
+var arrSpread = [...arr];
+assertEquals(arrSpread.length, arr.length);
+assertEquals(arrSpread[0], 'a');
+assertEquals(arrSpread[1], 'b');
+assertEquals(arrSpread[2], 'c');
+
+// Spread with array-like
+function returnArgs() {
+ return Object.freeze(arguments);
+}
+var arrLike = returnArgs('a', 'b', 'c');
+assertTrue(Object.isFrozen(arrLike));
+var arrSpread = [...arrLike];
+assertEquals(arrSpread.length, arrLike.length);
+assertEquals(arrSpread[0], 'a');
+assertEquals(arrSpread[1], 'b');
+assertEquals(arrSpread[2], 'c');
+
+// Spread with holey
+function countArgs() {
+ return arguments.length;
+}
+var arr = [, 'b','c'];
+Object.freeze(arr);
+assertEquals(countArgs(...arr), 3);
+assertEquals(countArgs(...[...arr]), 3);
+assertEquals(countArgs.apply(this, [...arr]), 3);
+function checkUndefined() {
+ return arguments[0] === undefined;
+}
+assertTrue(checkUndefined(...arr));
+assertTrue(checkUndefined(...[...arr]));
+assertTrue(checkUndefined.apply(this, [...arr]));
diff --git a/deps/v8/test/mjsunit/object-keys-typedarray.js b/deps/v8/test/mjsunit/object-keys-typedarray.js
new file mode 100644
index 0000000000..b80608b527
--- /dev/null
+++ b/deps/v8/test/mjsunit/object-keys-typedarray.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[Int8Array, Uint8Array, Uint8ClampedArray, Int16Array, Uint16Array, Int32Array,
+ Uint32Array, Float32Array, Float64Array]
+ .forEach(constructor => {
+ const huge = new constructor(128);
+ assertEquals(Array.from({length: 128}).map((_, i) => String(i)),
+ Object.keys(huge));
+
+ const tiny = new constructor(2);
+ assertEquals(["0", "1"], Object.keys(tiny));
+
+ const empty = new constructor(0);
+ assertEquals([], Object.keys(empty));
+});
diff --git a/deps/v8/test/mjsunit/object-prevent-extensions.js b/deps/v8/test/mjsunit/object-prevent-extensions.js
index a2da9372a2..419a4351af 100644
--- a/deps/v8/test/mjsunit/object-prevent-extensions.js
+++ b/deps/v8/test/mjsunit/object-prevent-extensions.js
@@ -225,7 +225,7 @@ for (var i = 0; i < length-1; i++) {
assertEquals(obj[i], 'new');
}
-// Verify flat, map, flatMap, join, reduce, reduceRight for sealed packed array
+// Verify flat, map, flatMap, join, reduce, reduceRight for non-extensible packed array
var arr = ['a', 'b', 'c'];
assertTrue(%HasPackedElements(arr));
Object.preventExtensions(arr);
@@ -242,7 +242,7 @@ assertEquals(arr.reduce(reducer), "abc");
assertEquals(arr.reduceRight(reducer), "cba");
assertEquals(arr.slice(0, 1), ['a']);
-// Verify change content of sealed packed array
+// Verify change content of non-extensible packed array
arr.sort();
assertEquals(arr.join(''), "abc");
arr.reverse();
@@ -280,3 +280,180 @@ assertEquals(arr[2], undefined);
arr.length = 1;
assertEquals(arr.length, 1);
assertEquals(arr[1], undefined);
+
+// Test for holey array
+// Test holey element array built-in functions with preventExtensions.
+obj = [undefined, null, 1, , -1, 'a', Symbol("test")];
+assertTrue(%HasHoleyElements(obj));
+Object.preventExtensions(obj);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+
+// Verify that the length can't be written by builtins.
+assertThrows(function() { obj.push(1); }, TypeError);
+assertThrows(function() { obj.shift(); }, TypeError);
+assertThrows(function() { obj.unshift(1); }, TypeError);
+assertThrows(function() { obj.splice(0, 0, 1); }, TypeError);
+assertDoesNotThrow(function() {obj.splice(0, 0)});
+
+// Verify search, filter, iterator
+obj = [undefined, null, 1, ,-1, 'a', Symbol("test")];
+assertTrue(%HasHoleyElements(obj));
+Object.preventExtensions(obj);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+assertEquals(obj.lastIndexOf(1), 2);
+assertEquals(obj.indexOf('a'), 5);
+assertEquals(obj.indexOf(undefined), 0);
+assertFalse(obj.includes(Symbol("test")));
+assertTrue(obj.includes(undefined));
+assertFalse(obj.includes(NaN));
+assertTrue(obj.includes());
+assertEquals(obj.find(x => x==0), undefined);
+assertEquals(obj.findIndex(x => x=='a'), 5);
+assertTrue(obj.some(x => typeof x == 'symbol'));
+assertFalse(obj.every(x => x == -1));
+var filteredArray = obj.filter(e => typeof e == "symbol");
+assertEquals(filteredArray.length, 1);
+assertEquals(obj.map(x => x), obj);
+var countPositiveNumber = 0;
+obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+});
+assertEquals(countPositiveNumber, 1);
+assertEquals(obj.length, obj.concat([]).length);
+var iterator = obj.values();
+assertEquals(iterator.next().value, undefined);
+assertEquals(iterator.next().value, null);
+var iterator = obj.keys();
+assertEquals(iterator.next().value, 0);
+assertEquals(iterator.next().value, 1);
+var iterator = obj.entries();
+assertEquals(iterator.next().value, [0, undefined]);
+assertEquals(iterator.next().value, [1, null]);
+
+// Verify that the value can be written
+var length = obj.length;
+for (var i = 0; i < length-1; i++) {
+ if (i==3) continue;
+ obj[i] = 'new';
+ assertEquals(obj[i], 'new');
+}
+
+// Verify flat, map, flatMap, join, reduce, reduceRight for non-extensible holey array
+var arr = [, 'a', 'b', 'c'];
+assertTrue(%HasHoleyElements(arr));
+Object.preventExtensions(arr);
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+assertTrue(Array.isArray(obj));
+assertEquals(arr.map(x => [x]), [, ['a'], ['b'], ['c']]);
+assertEquals(arr.flatMap(x => [x]), ["a", "b", "c"]);
+assertEquals(arr.flat(), ["a", "b", "c"]);
+assertEquals(arr.join('-'), "-a-b-c");
+const reducer1 = (accumulator, currentValue) => accumulator + currentValue;
+assertEquals(arr.reduce(reducer1), "abc");
+assertEquals(arr.reduceRight(reducer1), "cba");
+assertEquals(arr.slice(0, 1), [,]);
+assertEquals(arr.slice(1, 2), ["a"]);
+
+// Verify change content of non-extensible holey array
+assertThrows(function(){arr.sort();}, TypeError);
+assertEquals(arr.join(''), "abc");
+assertThrows(function(){arr.reverse();}, TypeError);
+assertEquals(arr.join(''), "abc");
+assertThrows(function(){arr.copyWithin(0, 1, 2);}, TypeError);
+arr.copyWithin(1, 2, 3);
+assertEquals(arr.join(''),"bbc");
+assertThrows(function(){arr.fill('d');}, TypeError);
+assertEquals(arr.join(''), "bbc");
+arr.pop();
+assertEquals(arr.join(''), "bb");
+
+// Regression test with simple holey array
+var arr = [, 'a'];
+Object.preventExtensions(arr);
+arr[1] = 'b';
+assertEquals(arr[1], 'b');
+arr[0] = 1;
+assertEquals(arr[0], undefined);
+
+// Test regression Array.concat with double
+var arr = ['a', , 'b'];
+Object.preventExtensions(arr);
+arr = arr.concat(0.5);
+assertEquals(arr, ['a', ,'b', 0.5]);
+Object.preventExtensions(arr);
+arr = arr.concat([1.5, 'c']);
+assertEquals(arr, ['a', ,'b', 0.5, 1.5, 'c']);
+
+// Regression test with change length
+var arr = ['a', , 'b'];
+Object.preventExtensions(arr);
+assertEquals(arr.length, 3);
+arr.length = 4;
+assertEquals(arr.length, 4);
+arr[3] = 'c';
+assertEquals(arr[3], undefined);
+arr.length = 2;
+assertEquals(arr.length, 2);
+assertEquals(arr[2], undefined);
+assertEquals(arr.pop(), undefined);
+assertEquals(arr.length, 1);
+assertEquals(arr[1], undefined);
+
+// Change length with holey entries at the end
+var arr = ['a', ,];
+Object.preventExtensions(arr);
+assertEquals(arr.length, 2);
+arr.length = 0;
+assertEquals(arr.length, 0);
+arr.length = 3;
+assertEquals(arr.length, 3);
+arr.length = 0;
+assertEquals(arr.length, 0);
+
+// Spread with array
+var arr = ['a', 'b', 'c'];
+Object.preventExtensions(arr);
+var arrSpread = [...arr];
+assertEquals(arrSpread.length, arr.length);
+assertEquals(arrSpread[0], 'a');
+assertEquals(arrSpread[1], 'b');
+assertEquals(arrSpread[2], 'c');
+
+// Spread with array-like
+function returnArgs() {
+ return Object.preventExtensions(arguments);
+}
+var arrLike = returnArgs('a', 'b', 'c');
+assertFalse(Object.isExtensible(arrLike));
+var arrSpread = [...arrLike];
+assertEquals(arrSpread.length, arrLike.length);
+assertEquals(arrSpread[0], 'a');
+assertEquals(arrSpread[1], 'b');
+assertEquals(arrSpread[2], 'c');
+
+// Spread with holey
+function countArgs() {
+ return arguments.length;
+}
+var arr = [, 'b','c'];
+Object.preventExtensions(arr);
+assertEquals(countArgs(...arr), 3);
+assertEquals(countArgs(...[...arr]), 3);
+assertEquals(countArgs.apply(this, [...arr]), 3);
+function checkUndefined() {
+ return arguments[0] === undefined;
+}
+assertTrue(checkUndefined(...arr));
+assertTrue(checkUndefined(...[...arr]));
+assertTrue(checkUndefined.apply(this, [...arr]));
diff --git a/deps/v8/test/mjsunit/object-seal.js b/deps/v8/test/mjsunit/object-seal.js
index 4de0e1c7e2..a82e3a82ae 100644
--- a/deps/v8/test/mjsunit/object-seal.js
+++ b/deps/v8/test/mjsunit/object-seal.js
@@ -220,9 +220,11 @@ assertDoesNotThrow(function() { objControl.splice(0, 0, 100, 101, 102); });
// Verify that crankshaft still does the right thing.
obj = [1, 2, 3];
-push_call = function(a) { a.push(1000); return a; }
+push_call = function(a) { a.push(1000); return a; };
+%PrepareFunctionForOptimization(push_call);
// Include a call site that doesn't have a custom built-in.
-var shift_call = function(a) { a.shift(1000); return a; }
+var shift_call = function(a) { a.shift(1000); return a; };
+%PrepareFunctionForOptimization(shift_call);
for (var i = 0; i < 3; i++) {
push_call(obj);
shift_call(obj);
@@ -550,3 +552,216 @@ arr[2] = 'c';
assertEquals(arr[2], undefined);
arr.length = 1;
assertEquals(arr.length, 2);
+
+// Start testing for holey element array
+// Test holey element array built-in functions with seal.
+function testHoleySealedArray1(obj) {
+ assertTrue(Object.isSealed(obj));
+ assertFalse(Object.isFrozen(obj));
+ assertTrue(Array.isArray(obj));
+
+ // Verify that the length can't be written by builtins.
+ assertThrows(function() { obj.pop(); }, TypeError);
+ assertThrows(function() { obj.push(1); }, TypeError);
+ assertThrows(function() { obj.shift(); }, TypeError);
+ assertThrows(function() { obj.unshift(1); }, TypeError);
+ assertThrows(function() { obj.splice(0); }, TypeError);
+ assertDoesNotThrow(function() { obj.splice(0, 0); });
+
+ // Verify search, filter, iterator
+ obj = [undefined, null, 1, , -1, 'a', Symbol("test")];
+ assertTrue(%HasHoleyElements(obj));
+ Object.seal(obj);
+ assertTrue(Object.isSealed(obj));
+ assertFalse(Object.isFrozen(obj));
+ assertTrue(Array.isArray(obj));
+ assertEquals(obj.lastIndexOf(1), 2);
+ assertEquals(obj.indexOf('a'), 5);
+ assertEquals(obj.indexOf(undefined), 0);
+ assertFalse(obj.includes(Symbol("test")));
+ assertTrue(obj.includes(undefined));
+ assertFalse(obj.includes(NaN));
+ assertTrue(obj.includes());
+ assertEquals(obj.find(x => x==0), undefined);
+ assertEquals(obj.findIndex(x => x=='a'), 5);
+ assertTrue(obj.some(x => typeof x == 'symbol'));
+ assertFalse(obj.every(x => x == -1));
+ var filteredArray = obj.filter(e => typeof e == "symbol");
+ assertEquals(filteredArray.length, 1);
+ assertEquals(obj.map(x => x), obj);
+ var countPositiveNumber = 0;
+ obj.forEach(function(item, index) {
+ if (item === 1) {
+ countPositiveNumber++;
+ assertEquals(index, 2);
+ }
+ });
+ assertEquals(countPositiveNumber, 1);
+ assertEquals(obj.length, obj.concat([]).length);
+ var iterator = obj.values();
+ assertEquals(iterator.next().value, undefined);
+ assertEquals(iterator.next().value, null);
+ var iterator = obj.keys();
+ assertEquals(iterator.next().value, 0);
+ assertEquals(iterator.next().value, 1);
+ var iterator = obj.entries();
+ assertEquals(iterator.next().value, [0, undefined]);
+ assertEquals(iterator.next().value, [1, null]);
+
+ // Verify that the value can be written
+ var length = obj.length;
+ for (var i = 0; i < length; i++) {
+ if (i==3) continue;
+ obj[i] = 'new';
+ assertEquals(obj[i], 'new');
+ }
+};
+obj = [undefined, null, 1, , -1, 'a', Symbol("test")];
+assertTrue(%HasHoleyElements(obj));
+Object.seal(obj);
+testHoleySealedArray1(obj);
+
+// Verify after transition from preventExtensions
+obj = [undefined, null, 1, , -1, 'a', Symbol("test")];
+assertTrue(%HasHoleyElements(obj));
+Object.preventExtensions(obj);
+Object.seal(obj);
+testHoleySealedArray1(obj);
+
+// Verify flat, map, slice, flatMap, join, reduce, reduceRight for sealed holey array
+function testHoleySealedArray2(arr) {
+ assertTrue(Object.isSealed(arr));
+ assertFalse(Object.isFrozen(arr));
+ assertEquals(arr.map(x => [x]), [, ['a'], ['b'], ['c']]);
+ assertEquals(arr.flatMap(x => [x]), ["a", "b", "c"]);
+ assertEquals(arr.flat(), ["a", "b", "c"]);
+ assertEquals(arr.join('-'), "-a-b-c");
+ const reducer = (accumulator, currentValue) => accumulator + currentValue;
+ assertEquals(arr.reduce(reducer), "abc");
+ assertEquals(arr.reduceRight(reducer), "cba");
+ assertEquals(arr.slice(0, 1), [,]);
+ assertEquals(arr.slice(1, 2), ["a"]);
+ // Verify change content of sealed holey array
+ assertThrows(function(){arr.sort();}, TypeError);
+ assertEquals(arr.join(''), "abc");
+ assertThrows(function(){arr.reverse();}, TypeError);
+ assertEquals(arr.join(''), "abc");
+ assertThrows(function(){arr.copyWithin(0, 1, 2);}, TypeError);
+ assertEquals(arr.join(''),"abc");
+ arr.copyWithin(1, 2, 3);
+ assertEquals(arr.join(''),"bbc");
+ assertThrows(function(){arr.fill('d');}, TypeError);
+ assertEquals(arr.join(''), "bbc");
+}
+
+var arr1 = [, 'a', 'b', 'c'];
+assertTrue(%HasHoleyElements(arr1));
+Object.seal(arr1);
+testHoleySealedArray2(arr1);
+
+var arr2 = [, 'a', 'b', 'c'];
+assertTrue(%HasHoleyElements(arr2));
+Object.preventExtensions(arr2);
+Object.seal(arr2);
+testHoleySealedArray2(arr2);
+
+// Test regression with Object.defineProperty
+var obj = ['a', , 'b'];
+obj.propertyA = 42;
+obj[0] = true;
+Object.seal(obj);
+assertDoesNotThrow(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ });
+});
+assertEquals(obj, obj.propertyA);
+assertDoesNotThrow(function() {
+ Object.defineProperty(obj, 'propertyA', {
+ value: obj,
+ writable: false,
+ });
+});
+obj.propertyA = 42;
+assertEquals(obj.propertyA, 42);
+assertThrows(function() {
+ Object.defineProperty(obj, 'abc', {
+ value: obj,
+ });
+}, TypeError);
+
+// Regression test with simple holey array
+var arr = [, 'a'];
+Object.seal(arr);
+arr[1] = 'b';
+assertEquals(arr[1], 'b');
+arr[0] = 1;
+assertEquals(arr[0], undefined);
+
+// Test regression Array.concat with double
+var arr = ['a', , 'b'];
+Object.seal(arr);
+arr = arr.concat(0.5);
+assertEquals(arr, ['a', ,'b', 0.5]);
+Object.seal(arr);
+arr = arr.concat([1.5, 'c']);
+assertEquals(arr, ['a', ,'b', 0.5, 1.5, 'c']);
+
+// Regression test with change length
+var arr = ['a', ,'b'];
+Object.seal(arr);
+assertEquals(arr.length, 3);
+arr.length = 4;
+assertEquals(arr.length, 4);
+arr[3] = 'c';
+assertEquals(arr[3], undefined);
+arr.length = 2;
+assertEquals(arr.length, 3);
+
+// Change length with holey entries at the end
+var arr = ['a', ,];
+Object.seal(arr);
+assertEquals(arr.length, 2);
+arr.length = 0;
+assertEquals(arr.length, 1);
+arr.length = 3;
+assertEquals(arr.length, 3);
+arr.length = 0;
+assertEquals(arr.length, 1);
+
+// Spread with array
+var arr = ['a', 'b', 'c'];
+Object.seal(arr);
+var arrSpread = [...arr];
+assertEquals(arrSpread.length, arr.length);
+assertEquals(arrSpread[0], 'a');
+assertEquals(arrSpread[1], 'b');
+assertEquals(arrSpread[2], 'c');
+
+// Spread with array-like
+function returnArgs() {
+ return Object.seal(arguments);
+}
+var arrLike = returnArgs('a', 'b', 'c');
+assertTrue(Object.isSealed(arrLike));
+var arrSpread = [...arrLike];
+assertEquals(arrSpread.length, arrLike.length);
+assertEquals(arrSpread[0], 'a');
+assertEquals(arrSpread[1], 'b');
+assertEquals(arrSpread[2], 'c');
+
+// Spread with holey
+function countArgs() {
+ return arguments.length;
+}
+var arr = [, 'b','c'];
+Object.seal(arr);
+assertEquals(countArgs(...arr), 3);
+assertEquals(countArgs(...[...arr]), 3);
+assertEquals(countArgs.apply(this, [...arr]), 3);
+function checkUndefined() {
+ return arguments[0] === undefined;
+}
+assertTrue(checkUndefined(...arr));
+assertTrue(checkUndefined(...[...arr]));
+assertTrue(checkUndefined.apply(this, [...arr]));
diff --git a/deps/v8/test/mjsunit/optimized-filter.js b/deps/v8/test/mjsunit/optimized-filter.js
index 3c7d827e0f..97eb1f2378 100644
--- a/deps/v8/test/mjsunit/optimized-filter.js
+++ b/deps/v8/test/mjsunit/optimized-filter.js
@@ -23,7 +23,8 @@
return true;
}
return a.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -43,7 +44,8 @@
return i == 0 ? false : true;
}
return a.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
var like_a = [1,2,3,4,5,6,7,8,9,10];
assertEquals(like_a.slice(1), eagerDeoptInCalled());
eagerDeoptInCalled();
@@ -66,7 +68,8 @@
return true;
}
return a.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyChanger);
assertEquals(a, lazyChanger());
lazyChanger();
%OptimizeFunctionOnNextCall(lazyChanger);
@@ -88,7 +91,8 @@
return true;
}
return a.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeselection);
assertEquals(a, lazyDeselection());
lazyDeselection();
%OptimizeFunctionOnNextCall(lazyDeselection);
@@ -111,7 +115,8 @@
return true;
}
a_noescape.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -138,7 +143,8 @@
};
%NeverOptimizeFunction(callback);
b.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -160,7 +166,8 @@
return true;
}
a.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -184,7 +191,8 @@
};
%NeverOptimizeFunction(callback);
a.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -209,7 +217,8 @@
return true;
}
a.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -238,7 +247,8 @@
} catch (e) {
caught = true;
}
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -268,7 +278,8 @@
} catch (e) {
caught = true;
}
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -299,7 +310,8 @@
result = "nope";
}
return result;
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
assertEquals([1,2,3,4], lazyDeopt(false));
assertEquals([1,2,3,4], lazyDeopt(false));
assertEquals("nope", lazyDeopt(true));
@@ -326,7 +338,8 @@
};
var o = [1,2,3];
b.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -351,7 +364,8 @@
%NeverOptimizeFunction(callback);
var o = [1,2,3];
b.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -377,7 +391,8 @@
};
var o = [1,2,3];
b.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -401,7 +416,8 @@
return true;
};
a.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
assertThrows(() => lazyDeopt());
assertThrows(() => lazyDeopt());
try {
@@ -428,6 +444,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -445,6 +462,7 @@
return true;
});
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -461,6 +479,7 @@
return a.filter(x => x % 2 === 0, side_effect(a, b));
}
+ %PrepareFunctionForOptimization(unreliable);
let a = [1, 2, 3];
unreliable(a, false);
unreliable(a, false);
@@ -480,7 +499,8 @@
return true;
}
a.filter(callback);
- }
+ };
+ %PrepareFunctionForOptimization(species_breakage);
species_breakage();
species_breakage();
%OptimizeFunctionOnNextCall(species_breakage);
diff --git a/deps/v8/test/mjsunit/optimized-foreach-holey-2.js b/deps/v8/test/mjsunit/optimized-foreach-holey-2.js
index 6779377dbb..637aa1b3c1 100644
--- a/deps/v8/test/mjsunit/optimized-foreach-holey-2.js
+++ b/deps/v8/test/mjsunit/optimized-foreach-holey-2.js
@@ -21,7 +21,8 @@
result += v;
};
b.forEach(sum);
- }
+ };
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/optimized-foreach-holey.js b/deps/v8/test/mjsunit/optimized-foreach-holey.js
index 90145bfe5d..95ccbf9622 100644
--- a/deps/v8/test/mjsunit/optimized-foreach-holey.js
+++ b/deps/v8/test/mjsunit/optimized-foreach-holey.js
@@ -14,7 +14,8 @@
result += i;
};
b.forEach(sum);
- }
+ };
+ %PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/optimized-foreach-polymorph.js b/deps/v8/test/mjsunit/optimized-foreach-polymorph.js
index ed4958354f..ca5653c203 100644
--- a/deps/v8/test/mjsunit/optimized-foreach-polymorph.js
+++ b/deps/v8/test/mjsunit/optimized-foreach-polymorph.js
@@ -19,7 +19,8 @@ var e = [,,,,0.5,3,4];
result += i;
}
arg.forEach(sum);
- }
+ };
+ %PrepareFunctionForOptimization(polymorph1);
polymorph1(a);
polymorph1(a);
polymorph1(b);
@@ -39,7 +40,8 @@ var e = [,,,,0.5,3,4];
result += i;
}
arg.forEach(sum);
- }
+ };
+ %PrepareFunctionForOptimization(polymorph1);
polymorph1(a);
polymorph1(a);
polymorph1(b);
@@ -61,7 +63,8 @@ var e = [,,,,0.5,3,4];
result += i;
}
arg.forEach(sum);
- }
+ };
+ %PrepareFunctionForOptimization(polymorph1);
polymorph1(a);
polymorph1(a);
polymorph1(b);
@@ -82,7 +85,8 @@ var e = [,,,,0.5,3,4];
result += v;
}
arg.forEach(sum);
- }
+ };
+ %PrepareFunctionForOptimization(polymorph1);
polymorph1(d);
polymorph1(d);
polymorph1(d);
@@ -100,7 +104,8 @@ var e = [,,,,0.5,3,4];
result += v;
}
arg.forEach(sum);
- }
+ };
+ %PrepareFunctionForOptimization(polymorph1);
polymorph1(d);
polymorph1(e);
polymorph1(d);
diff --git a/deps/v8/test/mjsunit/optimized-includes-polymorph.js b/deps/v8/test/mjsunit/optimized-includes-polymorph.js
index 55dc22978b..d71428acc6 100644
--- a/deps/v8/test/mjsunit/optimized-includes-polymorph.js
+++ b/deps/v8/test/mjsunit/optimized-includes-polymorph.js
@@ -107,6 +107,7 @@ function runTests(tests, func) {
for (test in tests) {
%DeoptimizeFunction(func);
%ClearFunctionFeedback(func);
+ %PrepareFunctionForOptimization(func);
tests[test]();
%OptimizeFunctionOnNextCall(func);
tests[test]();
diff --git a/deps/v8/test/mjsunit/optimized-map.js b/deps/v8/test/mjsunit/optimized-map.js
index 6a3df4d7d4..1095f7baf2 100644
--- a/deps/v8/test/mjsunit/optimized-map.js
+++ b/deps/v8/test/mjsunit/optimized-map.js
@@ -20,9 +20,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
a.abc = 25;
}
return v;
- }
+ };
+ %EnsureFeedbackVectorForFunction(callback);
a.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -40,9 +42,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
result += v;
a.length = (i == 13 && deopt) ? 25 : 27;
return v;
- }
+ };
+ %EnsureFeedbackVectorForFunction(callback);
a.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -63,9 +67,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
a_noescape.length = 25;
}
return v;
- }
+ };
+ %EnsureFeedbackVectorForFunction(callback);
a_noescape.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -90,9 +96,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
gc(); gc();
return v;
};
+ %EnsureFeedbackVectorForFunction(callback);
%NeverOptimizeFunction(callback);
b.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -113,9 +121,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
}
return 2 * v;
};
+ %EnsureFeedbackVectorForFunction(callback);
%NeverOptimizeFunction(callback);
return b.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
assertEquals([2,4,6], lazyDeopt());
assertEquals([2,4,6], lazyDeopt());
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -132,9 +142,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
%DeoptimizeNow();
}
return v;
- }
+ };
+ %EnsureFeedbackVectorForFunction(callback);
b.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -155,9 +167,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
}
return v;
};
+ %EnsureFeedbackVectorForFunction(callback);
%NeverOptimizeFunction(callback);
b.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -179,9 +193,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
gc();
}
return v;
- }
+ };
+ %EnsureFeedbackVectorForFunction(callback);
c.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -203,13 +219,15 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
throw("a");
}
return v;
- }
+ };
+ %EnsureFeedbackVectorForFunction(callback);
try {
c.map(callback);
} catch (e) {
caught = true;
}
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -232,13 +250,15 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
}
return v;
};
+ %EnsureFeedbackVectorForFunction(callback);
%NeverOptimizeFunction(callback);
try {
c.map(callback);
} catch (e) {
caught = true;
}
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -260,6 +280,7 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
}
return 2 * v;
};
+ %EnsureFeedbackVectorForFunction(callback);
%NeverOptimizeFunction(callback);
var result = 0;
try {
@@ -269,7 +290,8 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
result = "nope";
}
return result;
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
assertEquals([2,4,6,8], lazyDeopt(false));
assertEquals([2,4,6,8], lazyDeopt(false));
assertEquals("nope", lazyDeopt(true));
@@ -292,9 +314,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
}
return v;
};
+ %EnsureFeedbackVectorForFunction(callback);
var o = [1,2,3];
b.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -314,10 +338,12 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
}
return v;
};
+ %EnsureFeedbackVectorForFunction(callback);
%NeverOptimizeFunction(callback);
var o = [1,2,3];
b.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -339,9 +365,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
}
return v;
};
+ %EnsureFeedbackVectorForFunction(callback);
var o = [1,2,3];
b.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
lazyDeopt();
lazyDeopt();
%OptimizeFunctionOnNextCall(lazyDeopt);
@@ -361,8 +389,10 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
}
return v;
};
+ %EnsureFeedbackVectorForFunction(callback);
a.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(lazyDeopt);
assertThrows(() => lazyDeopt());
assertThrows(() => lazyDeopt());
try {
@@ -393,9 +423,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
// array.
return v;
}
- }
+ };
+ %EnsureFeedbackVectorForFunction(callback);
return c.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(to_double);
to_double();
to_double();
%OptimizeFunctionOnNextCall(to_double);
@@ -423,9 +455,11 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
// Later, to PACKED_ELEMENTS.
return v + 'hello';
}
- }
+ };
+ %EnsureFeedbackVectorForFunction(callback);
return c.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(to_fast);
to_fast();
to_fast();
%OptimizeFunctionOnNextCall(to_fast);
@@ -443,8 +477,10 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
function double_results() {
// TurboFan recognizes the result is a double.
var callback = v => v + 0.5;
+ %EnsureFeedbackVectorForFunction(callback);
return a.map(callback);
}
+ %PrepareFunctionForOptimization(double_results);
double_results();
double_results();
%OptimizeFunctionOnNextCall(double_results);
@@ -461,6 +497,7 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
var callback = v => "hello" + v.toString();
return a.map(callback);
}
+ %PrepareFunctionForOptimization(string_results);
string_results();
string_results();
%OptimizeFunctionOnNextCall(string_results);
@@ -479,6 +516,7 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
return v;
});
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -496,6 +534,7 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
return v;
});
}
+ %PrepareFunctionForOptimization(withHoles);
withHoles();
withHoles();
%OptimizeFunctionOnNextCall(withHoles);
@@ -513,6 +552,7 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
}
let a = [1, 2, 3];
+ %PrepareFunctionForOptimization(unreliable);
unreliable(a, false);
unreliable(a, false);
%OptimizeFunctionOnNextCall(unreliable);
@@ -531,7 +571,8 @@ var c = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25];
return v;
}
a.map(callback);
- }
+ };
+ %PrepareFunctionForOptimization(species_breakage);
species_breakage();
species_breakage();
%OptimizeFunctionOnNextCall(species_breakage);
diff --git a/deps/v8/test/mjsunit/optimized-reduce.js b/deps/v8/test/mjsunit/optimized-reduce.js
index 345f731947..67904a39df 100644
--- a/deps/v8/test/mjsunit/optimized-reduce.js
+++ b/deps/v8/test/mjsunit/optimized-reduce.js
@@ -22,6 +22,7 @@
return r + "S";
}, "H");
}
+ %PrepareFunctionForOptimization(eagerDeoptInCalled);
eagerDeoptInCalled();
eagerDeoptInCalled();
%OptimizeFunctionOnNextCall(eagerDeoptInCalled);
@@ -38,7 +39,8 @@
a = [,,,]; // also a holey smi array.
}
return a.reduce((r,v,i,o)=>r+v);
- }
+ };
+ %PrepareFunctionForOptimization(nothingThere);
nothingThere();
nothingThere();
%OptimizeFunctionOnNextCall(nothingThere);
@@ -62,6 +64,7 @@
} catch (e) {
assertTrue(re.exec(e.stack) !== null);
}
+ %PrepareFunctionForOptimization(alwaysThrows);
try { alwaysThrows(); } catch (e) {}
try { alwaysThrows(); } catch (e) {}
%OptimizeFunctionOnNextCall(alwaysThrows);
diff --git a/deps/v8/test/mjsunit/parallel-optimize-disabled.js b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
index 83970ae096..631c3817d0 100644
--- a/deps/v8/test/mjsunit/parallel-optimize-disabled.js
+++ b/deps/v8/test/mjsunit/parallel-optimize-disabled.js
@@ -43,6 +43,8 @@ function f(x) {
g();
}
+%PrepareFunctionForOptimization(f);
+%PrepareFunctionForOptimization(g);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/pixel-array-rounding.js b/deps/v8/test/mjsunit/pixel-array-rounding.js
index b7db51c2c9..8050c13a6d 100644
--- a/deps/v8/test/mjsunit/pixel-array-rounding.js
+++ b/deps/v8/test/mjsunit/pixel-array-rounding.js
@@ -39,6 +39,7 @@ function f() {
return pixels[1] + pixels[6];
}
+%PrepareFunctionForOptimization(f);
f();
f();
assertEquals(6, pixels[5]);
diff --git a/deps/v8/test/mjsunit/promise-perform-all-resolve-lookup.js b/deps/v8/test/mjsunit/promise-perform-all-resolve-lookup.js
new file mode 100644
index 0000000000..8e877df63b
--- /dev/null
+++ b/deps/v8/test/mjsunit/promise-perform-all-resolve-lookup.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+let count = 0;
+class MyPromise extends Promise {
+ static get resolve() {
+ count++;
+ return super.resolve;
+ }
+}
+
+MyPromise.all([1, 2, 3, 4, 5]);
+assertEquals(1, count);
+%PerformMicrotaskCheckpoint();
+assertEquals(1, count);
+
+count = 0;
+MyPromise.all([
+ Promise.resolve(1),
+ Promise.resolve(2),
+ Promise.reject(3)
+]);
+assertEquals(1, count);
+%PerformMicrotaskCheckpoint();
+assertEquals(1, count);
diff --git a/deps/v8/test/mjsunit/promise-perform-all-settled-resolve-lookup.js b/deps/v8/test/mjsunit/promise-perform-all-settled-resolve-lookup.js
new file mode 100644
index 0000000000..a2f5f01837
--- /dev/null
+++ b/deps/v8/test/mjsunit/promise-perform-all-settled-resolve-lookup.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --harmony-promise-all-settled
+
+let count = 0;
+class MyPromise extends Promise {
+ static get resolve() {
+ count++;
+ return super.resolve;
+ }
+}
+
+MyPromise.allSettled([1, 2, 3, 4, 5]);
+assertEquals(1, count);
+%PerformMicrotaskCheckpoint();
+assertEquals(1, count);
+
+count = 0;
+MyPromise.allSettled([
+ Promise.resolve(1),
+ Promise.resolve(2),
+ Promise.reject(3)
+]);
+assertEquals(1, count);
+%PerformMicrotaskCheckpoint();
+assertEquals(1, count);
diff --git a/deps/v8/test/mjsunit/promise-perfrom-race-resolve-lookup.js b/deps/v8/test/mjsunit/promise-perfrom-race-resolve-lookup.js
new file mode 100644
index 0000000000..72c9c401e1
--- /dev/null
+++ b/deps/v8/test/mjsunit/promise-perfrom-race-resolve-lookup.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+let count = 0;
+class MyPromise extends Promise {
+ static get resolve() {
+ count++;
+ return super.resolve;
+ }
+}
+
+MyPromise.race([1, 2, 3, 4, 5]);
+assertEquals(1, count);
+%PerformMicrotaskCheckpoint();
+assertEquals(1, count);
+
+count = 0;
+MyPromise.race([
+ Promise.resolve(1),
+ Promise.resolve(2),
+ Promise.reject(3)
+]);
+assertEquals(1, count);
+%PerformMicrotaskCheckpoint();
+assertEquals(1, count);
diff --git a/deps/v8/test/mjsunit/prototype-non-existing.js b/deps/v8/test/mjsunit/prototype-non-existing.js
index 367a59f547..293aefb136 100644
--- a/deps/v8/test/mjsunit/prototype-non-existing.js
+++ b/deps/v8/test/mjsunit/prototype-non-existing.js
@@ -34,6 +34,7 @@
var result = c.z;
assertEquals(expected, result);
}
+ %PrepareFunctionForOptimization(f);
f("a");
f("a");
f("a");
@@ -74,6 +75,7 @@
var result = c.z;
assertEquals(expected, result);
}
+ %PrepareFunctionForOptimization(f);
f("a");
f("a");
f("a");
diff --git a/deps/v8/test/mjsunit/regexp-override-symbol-match-all.js b/deps/v8/test/mjsunit/regexp-override-symbol-match-all.js
index b5b99f232d..10a78a6b61 100644
--- a/deps/v8/test/mjsunit/regexp-override-symbol-match-all.js
+++ b/deps/v8/test/mjsunit/regexp-override-symbol-match-all.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-string-matchall
-
var s = "baa";
assertEquals([["b"]], [...s.matchAll(/./)]);
diff --git a/deps/v8/test/mjsunit/regress-906893.js b/deps/v8/test/mjsunit/regress-906893.js
index 4b4942d665..981b5824cb 100644
--- a/deps/v8/test/mjsunit/regress-906893.js
+++ b/deps/v8/test/mjsunit/regress-906893.js
@@ -13,6 +13,7 @@ function f() {
r.test("ABcd");
}
+%PrepareFunctionForOptimization(f);
f();
assertEquals(1, counter);
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress-918763.js b/deps/v8/test/mjsunit/regress-918763.js
index 45916f015a..2e70536a18 100644
--- a/deps/v8/test/mjsunit/regress-918763.js
+++ b/deps/v8/test/mjsunit/regress-918763.js
@@ -9,6 +9,7 @@ C.__proto__ = null;
function f(c) { return 0 instanceof c; }
+%PrepareFunctionForOptimization(f);
f(C);
%OptimizeFunctionOnNextCall(f);
assertThrows(() => f(0));
diff --git a/deps/v8/test/mjsunit/regress-958725.js b/deps/v8/test/mjsunit/regress-958725.js
new file mode 100644
index 0000000000..37706e8adf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-958725.js
@@ -0,0 +1,25 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(v3) {
+ Symbol[Symbol.replace] = Object;
+ const v8 = {};
+ let i = 0;
+ do {
+ const v12 = v3[3];
+ for (let v17 = 0; v17 < 100000; v17++) {
+ }
+ const v18 = Object();
+ function v19(v20,v21,v22) {
+ }
+ i++;;
+ } while (i < 1);
+ const v25 = Object.freeze(v8);
+}
+
+f(Object);
+%OptimizeFunctionOnNextCall(f);
+f(Object);
diff --git a/deps/v8/test/mjsunit/regress-963346.js b/deps/v8/test/mjsunit/regress-963346.js
new file mode 100644
index 0000000000..8f29556210
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-963346.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = ['3'];
+function foo(i) { o.x = i; }
+foo("string");
+Object.preventExtensions(o);
+Object.seal(o);
+print('foo');
+foo(0);
+%HeapObjectVerify(o);
+assertEquals(o.x, 0);
diff --git a/deps/v8/test/mjsunit/regress-966460.js b/deps/v8/test/mjsunit/regress-966460.js
new file mode 100644
index 0000000000..8acf49b5a5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-966460.js
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+PI = [];
+PI[250] = PI;
+Object.seal(PI);
+assertTrue(Object.isSealed(PI));
+var proxy = new Proxy(PI, PI);
+Object.freeze(proxy);
+assertTrue(Object.isFrozen(proxy));
diff --git a/deps/v8/test/mjsunit/regress-v8-8445-2.js b/deps/v8/test/mjsunit/regress-v8-8445-2.js
index 828b877d0c..e37c7a02ea 100644
--- a/deps/v8/test/mjsunit/regress-v8-8445-2.js
+++ b/deps/v8/test/mjsunit/regress-v8-8445-2.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-string-matchall
+// Flags: --allow-natives-syntax
class MyRegExp {
exec() { return null; }
diff --git a/deps/v8/test/mjsunit/regress-v8-8445.js b/deps/v8/test/mjsunit/regress-v8-8445.js
index 7641416ba0..94443be231 100644
--- a/deps/v8/test/mjsunit/regress-v8-8445.js
+++ b/deps/v8/test/mjsunit/regress-v8-8445.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-string-matchall
+// Flags: --allow-natives-syntax
class MyRegExp {
exec() { return null; }
diff --git a/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
index 8d60e9015e..a935a49c10 100644
--- a/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
+++ b/deps/v8/test/mjsunit/regress/binop-in-effect-context-deopt.js
@@ -34,6 +34,7 @@
for (var i = 0; osr && i < 2; i++) %OptimizeOsr();
return result;
}
+ %PrepareFunctionForOptimization(f);
assertEquals("result", f(true, 3, false));
assertEquals("result", f(true, 3, false));
@@ -58,6 +59,7 @@
function f() {
return g(void(h() + ""));
};
+ %PrepareFunctionForOptimization(f);
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js b/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
index 72d3938511..b20645ce22 100644
--- a/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
+++ b/deps/v8/test/mjsunit/regress/call-function-in-effect-context-deopt.js
@@ -35,6 +35,7 @@ function f(deopt, osr) {
return result;
}
+%PrepareFunctionForOptimization(f);
assertEquals("result", f(3, false));
assertEquals("result", f(3, false));
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-1118.js b/deps/v8/test/mjsunit/regress/regress-1118.js
index 05b192d6f3..08b7239e33 100644
--- a/deps/v8/test/mjsunit/regress/regress-1118.js
+++ b/deps/v8/test/mjsunit/regress/regress-1118.js
@@ -31,21 +31,25 @@
// should be able to construct a receiver from all optimized stack frames.
function A() { }
+%EnsureFeedbackVectorForFunction(A);
A.prototype.f = function() { }
function B() { }
+%EnsureFeedbackVectorForFunction(B);
var o = new A();
// This function throws if o does not have an f property, and should not be
// inlined.
function g() { try { return o.f(); } finally { }}
+%EnsureFeedbackVectorForFunction(g);
// This function should be optimized via OSR.
function h() {
for (var i = 0; i < 10; i++) %OptimizeOsr();
g();
}
+%PrepareFunctionForOptimization(h);
h();
o = new B();
diff --git a/deps/v8/test/mjsunit/regress/regress-1257.js b/deps/v8/test/mjsunit/regress/regress-1257.js
index c5ed14dd3b..4ea42c9b0e 100644
--- a/deps/v8/test/mjsunit/regress/regress-1257.js
+++ b/deps/v8/test/mjsunit/regress/regress-1257.js
@@ -31,4 +31,5 @@ function foo () {
};
}
+%PrepareFunctionForOptimization(foo);
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-2132.js b/deps/v8/test/mjsunit/regress/regress-2132.js
index c2f6c297b4..9df1c1ba44 100644
--- a/deps/v8/test/mjsunit/regress/regress-2132.js
+++ b/deps/v8/test/mjsunit/regress/regress-2132.js
@@ -31,6 +31,7 @@ function mul(x, y) {
return (x * y) | 0;
}
+%PrepareFunctionForOptimization(mul);
mul(0, 0);
mul(0, 0);
%OptimizeFunctionOnNextCall(mul);
@@ -41,6 +42,7 @@ function div(x, y) {
return (x / y) | 0;
}
+%PrepareFunctionForOptimization(div);
div(4, 2);
div(4, 2);
%OptimizeFunctionOnNextCall(div);
diff --git a/deps/v8/test/mjsunit/regress/regress-2339.js b/deps/v8/test/mjsunit/regress/regress-2339.js
index d7d2bb398a..abad9fda96 100644
--- a/deps/v8/test/mjsunit/regress/regress-2339.js
+++ b/deps/v8/test/mjsunit/regress/regress-2339.js
@@ -35,8 +35,10 @@ function simple_two_args(always_zero, always_undefined) {
var always_five = always_undefined || 5;
return always_zero * always_five * .5;
}
+%EnsureFeedbackVectorForFunction(simple_two_args);
+%PrepareFunctionForOptimization(simple);
simple();
simple();
%OptimizeFunctionOnNextCall(simple);
diff --git a/deps/v8/test/mjsunit/regress/regress-2451.js b/deps/v8/test/mjsunit/regress/regress-2451.js
index 08efda2325..0e63cfebd6 100644
--- a/deps/v8/test/mjsunit/regress/regress-2451.js
+++ b/deps/v8/test/mjsunit/regress/regress-2451.js
@@ -33,6 +33,7 @@ function f() {
assertEquals(-1.0, Math.round(-0.5000000000000001));
}
+%PrepareFunctionForOptimization(f);
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-252797.js b/deps/v8/test/mjsunit/regress/regress-252797.js
index 08b22176af..4d68ff8b29 100644
--- a/deps/v8/test/mjsunit/regress/regress-252797.js
+++ b/deps/v8/test/mjsunit/regress/regress-252797.js
@@ -55,6 +55,7 @@ function callConstantFunctionOnPrototype(obj) {
obj.holderMethod();
}
+%PrepareFunctionForOptimization(callConstantFunctionOnPrototype);
callConstantFunctionOnPrototype(receiver);
callConstantFunctionOnPrototype(receiver);
%OptimizeFunctionOnNextCall(callConstantFunctionOnPrototype);
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index 551605c6b3..6b80fbc2af 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr --allow-natives-syntax --ignition-osr --opt
+// Flags: --use-osr --allow-natives-syntax --opt
// Flags: --no-always-opt
// Can't OSR with always-opt or in Lite mode.
@@ -50,6 +50,7 @@ function f() {
} while (false);
}
+%PrepareFunctionForOptimization(f);
f();
function g() {
@@ -85,4 +86,5 @@ function g() {
} while (false);
}
+%PrepareFunctionForOptimization(g);
g();
diff --git a/deps/v8/test/mjsunit/regress/regress-298269.js b/deps/v8/test/mjsunit/regress/regress-298269.js
index 46f525edb8..dd6b6a00fc 100644
--- a/deps/v8/test/mjsunit/regress/regress-298269.js
+++ b/deps/v8/test/mjsunit/regress/regress-298269.js
@@ -14,6 +14,7 @@ function Cb(a, trigger) {
}
return g;
}
+%PrepareFunctionForOptimization(Cb);
var s1 = "long string to make cons string 1";
var s2 = "long string to make cons string 2";
diff --git a/deps/v8/test/mjsunit/regress/regress-2989.js b/deps/v8/test/mjsunit/regress/regress-2989.js
index ff09c08a41..213c0df7b5 100644
--- a/deps/v8/test/mjsunit/regress/regress-2989.js
+++ b/deps/v8/test/mjsunit/regress/regress-2989.js
@@ -33,7 +33,7 @@ if (isNeverOptimizeLiteMode()) {
x = 42;
return f.arguments[0];
}
-
+ %EnsureFeedbackVectorForFunction(f);
f(0);
%OptimizeFunctionOnNextCall(f);
assertEquals(42, f(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-3032.js b/deps/v8/test/mjsunit/regress/regress-3032.js
index 9b18e146ce..e7cd58dc25 100644
--- a/deps/v8/test/mjsunit/regress/regress-3032.js
+++ b/deps/v8/test/mjsunit/regress/regress-3032.js
@@ -27,6 +27,10 @@
// Flags: --allow-natives-syntax
-for (var i = 0; i < 10; i++) { if (i == 5) %OptimizeOsr(); }
-var xl = 4096;
-var z = i % xl;
+function f() {
+ for (var i = 0; i < 10; i++) { if (i == 5) %OptimizeOsr(); }
+ var xl = 4096;
+ var z = i % xl;
+}
+%PrepareFunctionForOptimization(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-3650-3.js b/deps/v8/test/mjsunit/regress/regress-3650-3.js
index 6195b12441..e613235428 100644
--- a/deps/v8/test/mjsunit/regress/regress-3650-3.js
+++ b/deps/v8/test/mjsunit/regress/regress-3650-3.js
@@ -10,6 +10,7 @@ function foo(a) {
}
}
+%PrepareFunctionForOptimization(foo);
foo([1,2,3]);
foo([2,3,4]);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-3709.js b/deps/v8/test/mjsunit/regress/regress-3709.js
index ecb906a908..17b4612809 100644
--- a/deps/v8/test/mjsunit/regress/regress-3709.js
+++ b/deps/v8/test/mjsunit/regress/regress-3709.js
@@ -18,6 +18,7 @@ function foo() {
}
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-379770.js b/deps/v8/test/mjsunit/regress/regress-379770.js
index ab1b339f7d..6234899c05 100644
--- a/deps/v8/test/mjsunit/regress/regress-379770.js
+++ b/deps/v8/test/mjsunit/regress/regress-379770.js
@@ -10,6 +10,7 @@ function foo(obj) {
counter += obj;
return counter;
}
+%PrepareFunctionForOptimization(foo);
function bar() {
var a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
diff --git a/deps/v8/test/mjsunit/regress/regress-385565.js b/deps/v8/test/mjsunit/regress/regress-385565.js
index 541e461d96..4281fe0f8e 100644
--- a/deps/v8/test/mjsunit/regress/regress-385565.js
+++ b/deps/v8/test/mjsunit/regress/regress-385565.js
@@ -43,6 +43,7 @@ Array.prototype.f = function() {
var o1 = {m: 1};
var o2 = {a: 0, m:1};
+%PrepareFunctionForOptimization(callsFReceiver);
var r1 = callsFReceiver(o1);
callsFReceiver(o1);
%OptimizeFunctionOnNextCall(callsFReceiver);
@@ -50,12 +51,15 @@ var r2 = callsFReceiver(o1);
assertOptimized(callsFReceiver);
callsFReceiver(o2);
assertUnoptimized(callsFReceiver);
+
+%PrepareFunctionForOptimization(callsFReceiver);
var r3 = callsFReceiver(o1);
assertEquals(1, r1);
assertTrue(r1 === r2);
assertTrue(r2 === r3);
+%OptimizeFunctionOnNextCall(callsFReceiver);
r1 = callsFReceiver(o1);
callsFReceiver(o1);
%OptimizeFunctionOnNextCall(callsFReceiver);
diff --git a/deps/v8/test/mjsunit/regress/regress-3976.js b/deps/v8/test/mjsunit/regress/regress-3976.js
index efa3ac03bc..9b37c2cd23 100644
--- a/deps/v8/test/mjsunit/regress/regress-3976.js
+++ b/deps/v8/test/mjsunit/regress/regress-3976.js
@@ -72,7 +72,7 @@ function generate(n) {
print("generating");
-var str = generate(50000);
+var str = generate(10000);
print("parsing " + str.length);
JSON.parse(str);
diff --git a/deps/v8/test/mjsunit/regress/regress-4121.js b/deps/v8/test/mjsunit/regress/regress-4121.js
index 0f03e79cf2..fcf625a061 100644
--- a/deps/v8/test/mjsunit/regress/regress-4121.js
+++ b/deps/v8/test/mjsunit/regress/regress-4121.js
@@ -23,6 +23,7 @@ function literals_sharing_test(warmup, optimize) {
// propagated to the next closure.
assertTrue(%HasDoubleElements(a));
};
+ %EnsureFeedbackVectorForFunction(closure);
if (optimize) %OptimizeFunctionOnNextCall(closure);
closure();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-4380.js b/deps/v8/test/mjsunit/regress/regress-4380.js
index 06a64790ef..5eb773fc1e 100644
--- a/deps/v8/test/mjsunit/regress/regress-4380.js
+++ b/deps/v8/test/mjsunit/regress/regress-4380.js
@@ -9,6 +9,7 @@ function bar(a) {
return x == undefined;
}
+%PrepareFunctionForOptimization(bar);
// Make the keyed load be polymorphic on holey smi and holey fast.
bar([, 2, 3]);
bar([, 'two', 'three']);
diff --git a/deps/v8/test/mjsunit/regress/regress-5252.js b/deps/v8/test/mjsunit/regress/regress-5252.js
index 41f3feeec2..5dd0310637 100644
--- a/deps/v8/test/mjsunit/regress/regress-5252.js
+++ b/deps/v8/test/mjsunit/regress/regress-5252.js
@@ -11,6 +11,7 @@
return 23;
} while(false)
}
+ %PrepareFunctionForOptimization(f);
assertEquals(23, f());
assertEquals(23, f());
})();
@@ -24,6 +25,7 @@
} while(false)
return 999;
}
+ %PrepareFunctionForOptimization(g);
var gen = g();
assertEquals({ value:23, done:false }, gen.next());
assertEquals({ value:42, done:false }, gen.next());
diff --git a/deps/v8/test/mjsunit/regress/regress-5262.js b/deps/v8/test/mjsunit/regress/regress-5262.js
index 06932a2f0f..d980ba8e91 100644
--- a/deps/v8/test/mjsunit/regress/regress-5262.js
+++ b/deps/v8/test/mjsunit/regress/regress-5262.js
@@ -20,6 +20,7 @@ function f(osr_and_recurse) {
}
return 65;
}
+%PrepareFunctionForOptimization(f);
assertEquals(65, f(false));
assertEquals(65, f(false));
assertEquals(42, f(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-612412.js b/deps/v8/test/mjsunit/regress/regress-612412.js
index 3debe66f32..4cd9b9f83e 100644
--- a/deps/v8/test/mjsunit/regress/regress-612412.js
+++ b/deps/v8/test/mjsunit/regress/regress-612412.js
@@ -15,6 +15,7 @@ var f = (function() {
}
}
})();
+%PrepareFunctionForOptimization(f);
g = (function() { f((Array), counter()); });
g();
diff --git a/deps/v8/test/mjsunit/regress/regress-6607-1.js b/deps/v8/test/mjsunit/regress/regress-6607-1.js
index 74b702b228..92177dfbc9 100644
--- a/deps/v8/test/mjsunit/regress/regress-6607-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-6607-1.js
@@ -8,6 +8,7 @@ function get(a, i) {
return a[i];
}
+%PrepareFunctionForOptimization(get);
get([1,,3], 0);
get([1,,3], 2);
%OptimizeFunctionOnNextCall(get);
diff --git a/deps/v8/test/mjsunit/regress/regress-6607-2.js b/deps/v8/test/mjsunit/regress/regress-6607-2.js
index cfb0009845..26aafa3a02 100644
--- a/deps/v8/test/mjsunit/regress/regress-6607-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-6607-2.js
@@ -8,6 +8,7 @@ function get(a, i) {
return a[i];
}
+%PrepareFunctionForOptimization(get);
get([1,,3], 0);
get([1,,3], 2);
%OptimizeFunctionOnNextCall(get);
diff --git a/deps/v8/test/mjsunit/regress/regress-666046.js b/deps/v8/test/mjsunit/regress/regress-666046.js
index b4615383e0..23e991dc17 100644
--- a/deps/v8/test/mjsunit/regress/regress-666046.js
+++ b/deps/v8/test/mjsunit/regress/regress-666046.js
@@ -21,6 +21,7 @@ A.prototype = proto;
function foo(o) {
return o.a0;
}
+%EnsureFeedbackVectorForFunction(foo);
// Ensure |proto| is in old space.
gc();
diff --git a/deps/v8/test/mjsunit/regress/regress-6941.js b/deps/v8/test/mjsunit/regress/regress-6941.js
index a8370831f8..aba4d5cdbc 100644
--- a/deps/v8/test/mjsunit/regress/regress-6941.js
+++ b/deps/v8/test/mjsunit/regress/regress-6941.js
@@ -7,6 +7,7 @@
function foo(x) {
return Symbol.iterator == x;
}
+%PrepareFunctionForOptimization(foo);
function main() {
foo(Symbol());
diff --git a/deps/v8/test/mjsunit/regress/regress-6948.js b/deps/v8/test/mjsunit/regress/regress-6948.js
index c7e0fae28f..4ebf6e1010 100644
--- a/deps/v8/test/mjsunit/regress/regress-6948.js
+++ b/deps/v8/test/mjsunit/regress/regress-6948.js
@@ -8,6 +8,7 @@ var o = {};
function foo(s) { return o[s]; }
+%PrepareFunctionForOptimization(foo);
var s = 'c' + 'c';
foo(s);
foo(s);
diff --git a/deps/v8/test/mjsunit/regress/regress-6989.js b/deps/v8/test/mjsunit/regress/regress-6989.js
index b4a33c59c9..26035dd6fd 100644
--- a/deps/v8/test/mjsunit/regress/regress-6989.js
+++ b/deps/v8/test/mjsunit/regress/regress-6989.js
@@ -7,6 +7,7 @@
(function() {
function foo(o) { o["x"] = 1; }
+ %PrepareFunctionForOptimization(foo);
assertThrows(() => foo(undefined));
assertThrows(() => foo(undefined));
%OptimizeFunctionOnNextCall(foo);
@@ -17,6 +18,7 @@
(function() {
function foo(o) { o["x"] = 1; }
+ %PrepareFunctionForOptimization(foo);
assertThrows(() => foo(null));
assertThrows(() => foo(null));
%OptimizeFunctionOnNextCall(foo);
@@ -27,6 +29,7 @@
(function() {
function foo(o) { return o["x"]; }
+ %PrepareFunctionForOptimization(foo);
assertThrows(() => foo(undefined));
assertThrows(() => foo(undefined));
%OptimizeFunctionOnNextCall(foo);
@@ -37,6 +40,7 @@
(function() {
function foo(o) { return o["x"]; }
+ %PrepareFunctionForOptimization(foo);
assertThrows(() => foo(null));
assertThrows(() => foo(null));
%OptimizeFunctionOnNextCall(foo);
@@ -47,6 +51,7 @@
(function() {
function foo(o) { o.x = 1; }
+ %PrepareFunctionForOptimization(foo);
assertThrows(() => foo(undefined));
assertThrows(() => foo(undefined));
%OptimizeFunctionOnNextCall(foo);
@@ -57,6 +62,7 @@
(function() {
function foo(o) { o.x = 1; }
+ %PrepareFunctionForOptimization(foo);
assertThrows(() => foo(null));
assertThrows(() => foo(null));
%OptimizeFunctionOnNextCall(foo);
@@ -67,6 +73,7 @@
(function() {
function foo(o) { return o.x; }
+ %PrepareFunctionForOptimization(foo);
assertThrows(() => foo(undefined));
assertThrows(() => foo(undefined));
%OptimizeFunctionOnNextCall(foo);
@@ -77,6 +84,7 @@
(function() {
function foo(o) { return o.x; }
+ %PrepareFunctionForOptimization(foo);
assertThrows(() => foo(null));
assertThrows(() => foo(null));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-6991.js b/deps/v8/test/mjsunit/regress/regress-6991.js
index 1c6b976977..e88b5e3825 100644
--- a/deps/v8/test/mjsunit/regress/regress-6991.js
+++ b/deps/v8/test/mjsunit/regress/regress-6991.js
@@ -6,6 +6,7 @@
function foo(o) { return o.x; }
+%PrepareFunctionForOptimization(foo);
assertEquals(undefined, foo({}));
assertEquals(undefined, foo(1));
assertEquals(undefined, foo({}));
diff --git a/deps/v8/test/mjsunit/regress/regress-7014-1.js b/deps/v8/test/mjsunit/regress/regress-7014-1.js
index 6aadf91aa2..c678087738 100644
--- a/deps/v8/test/mjsunit/regress/regress-7014-1.js
+++ b/deps/v8/test/mjsunit/regress/regress-7014-1.js
@@ -8,6 +8,7 @@ function foo(s) {
return s[5];
}
+%PrepareFunctionForOptimization(foo);
assertEquals("f", foo("abcdef"));
assertEquals(undefined, foo("a"));
%OptimizeFunctionOnNextCall(foo);
@@ -19,6 +20,7 @@ assertOptimized(foo);
String.prototype[5] = "5";
assertEquals("f", foo("abcdef"));
+%PrepareFunctionForOptimization(foo);
assertEquals("5", foo("a"));
%OptimizeFunctionOnNextCall(foo);
assertEquals("f", foo("abcdef"));
diff --git a/deps/v8/test/mjsunit/regress/regress-7014-2.js b/deps/v8/test/mjsunit/regress/regress-7014-2.js
index 057e170d90..6ec6df6b3f 100644
--- a/deps/v8/test/mjsunit/regress/regress-7014-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-7014-2.js
@@ -8,6 +8,7 @@ function foo(s) {
return s[5];
}
+%PrepareFunctionForOptimization(foo);
assertEquals("f", foo("abcdef"));
assertEquals(undefined, foo("a"));
%OptimizeFunctionOnNextCall(foo);
@@ -23,6 +24,7 @@ String.prototype.__proto__ = new Proxy(String.prototype.__proto__, {
});
assertEquals("f", foo("abcdef"));
+%PrepareFunctionForOptimization(foo);
assertEquals("5", foo("a"));
%OptimizeFunctionOnNextCall(foo);
assertEquals("f", foo("abcdef"));
diff --git a/deps/v8/test/mjsunit/regress/regress-7135.js b/deps/v8/test/mjsunit/regress/regress-7135.js
index 2387241eee..3621dec5ce 100644
--- a/deps/v8/test/mjsunit/regress/regress-7135.js
+++ b/deps/v8/test/mjsunit/regress/regress-7135.js
@@ -5,12 +5,14 @@
// Flags: --allow-natives-syntax --opt
function foo() { return -"0" }
+%PrepareFunctionForOptimization(foo);
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
assertOptimized(foo);
function bar() { return -"1" }
+%PrepareFunctionForOptimization(bar);
bar();
%OptimizeFunctionOnNextCall(bar);
bar();
diff --git a/deps/v8/test/mjsunit/regress/regress-852765.js b/deps/v8/test/mjsunit/regress/regress-852765.js
index 393adf2079..13fbf4653e 100644
--- a/deps/v8/test/mjsunit/regress/regress-852765.js
+++ b/deps/v8/test/mjsunit/regress/regress-852765.js
@@ -3,15 +3,15 @@
// found in the LICENSE file.
// The actual regression test
-assertThrows("(import(foo)) =>", undefined, "Invalid destructuring assignment target");
+assertThrows("(import(foo)) =>", SyntaxError, "Invalid destructuring assignment target");
// Other related tests
-assertThrows("import(foo) =>", undefined, "Malformed arrow function parameter list");
-assertThrows("(a, import(foo)) =>", undefined, "Invalid destructuring assignment target");
-assertThrows("(1, import(foo)) =>", undefined, "Invalid destructuring assignment target");
-assertThrows("(super(foo)) =>", undefined, "'super' keyword unexpected here");
-assertThrows("(bar(foo)) =>", undefined, "Invalid destructuring assignment target");
+assertThrows("import(foo) =>", SyntaxError, "Malformed arrow function parameter list");
+assertThrows("(a, import(foo)) =>", SyntaxError, "Invalid destructuring assignment target");
+assertThrows("(1, import(foo)) =>", SyntaxError, "Invalid destructuring assignment target");
+assertThrows("(super(foo)) =>", SyntaxError, "'super' keyword unexpected here");
+assertThrows("(bar(foo)) =>", SyntaxError, "Invalid destructuring assignment target");
// No syntax errors
-assertThrows("[import(foo).then] = [1];", undefined, "foo is not defined");
-assertThrows("[[import(foo).then]] = [[1]];", undefined, "foo is not defined");
+assertThrows("[import(foo).then] = [1];", ReferenceError, "foo is not defined");
+assertThrows("[[import(foo).then]] = [[1]];", ReferenceError, "foo is not defined");
diff --git a/deps/v8/test/mjsunit/regress/regress-8913.js b/deps/v8/test/mjsunit/regress/regress-8913.js
index 9403334d72..7ebdd063f5 100644
--- a/deps/v8/test/mjsunit/regress/regress-8913.js
+++ b/deps/v8/test/mjsunit/regress/regress-8913.js
@@ -6,6 +6,7 @@
function foo(t) { return 'a'.concat(t); }
+%PrepareFunctionForOptimization(foo);
foo(1);
foo(1);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-9017.js b/deps/v8/test/mjsunit/regress/regress-9017.js
new file mode 100644
index 0000000000..1b9b9e7101
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-9017.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Call a recursive function that uses large numbers of bound arguments. If we
+// are failing to touch consecutive guard pages on Windows when extending the
+// stack for bound arguments, then this would crash.
+
+const frameSize = 4096 * 5;
+const numValues = frameSize / 4;
+const arr = new Array(numValues);
+let counter = 10;
+function f() { --counter; return 1 + (counter > 0 ? bound() : 0); }
+const bound = f.bind.apply(f, arr);
+bound();
diff --git a/deps/v8/test/mjsunit/regress/regress-902552.js b/deps/v8/test/mjsunit/regress/regress-902552.js
index 081df058e2..41a6ea86e7 100644
--- a/deps/v8/test/mjsunit/regress/regress-902552.js
+++ b/deps/v8/test/mjsunit/regress/regress-902552.js
@@ -4,8 +4,12 @@
// Flags: --allow-natives-syntax
-var C = class {};
-for (var i = 0; i < 4; ++i) {
- if (i == 2) %OptimizeOsr();
- C.prototype.foo = 42;
+function f() {
+ var C = class {};
+ for (var i = 0; i < 4; ++i) {
+ if (i == 2) %OptimizeOsr();
+ C.prototype.foo = 42;
+ }
}
+%PrepareFunctionForOptimization(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-903697.js b/deps/v8/test/mjsunit/regress/regress-903697.js
index 85f970fe8e..ad2f56058d 100644
--- a/deps/v8/test/mjsunit/regress/regress-903697.js
+++ b/deps/v8/test/mjsunit/regress/regress-903697.js
@@ -4,9 +4,13 @@
// Flags: --allow-natives-syntax --expose-gc --verify-heap
-C = class {};
-for (var i = 0; i < 5; ++i) {
- gc();
- if (i == 2) %OptimizeOsr();
- C.prototype.foo = i + 9000000000000000;
+function f() {
+ C = class {};
+ for (var i = 0; i < 5; ++i) {
+ gc();
+ if (i == 2) %OptimizeOsr();
+ C.prototype.foo = i + 9000000000000000;
+ }
}
+%PrepareFunctionForOptimization(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-9105.js b/deps/v8/test/mjsunit/regress/regress-9105.js
new file mode 100644
index 0000000000..bd73af09e1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-9105.js
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let array = new Uint32Array(32);
+array[10] = 10; array[20] = 20;
+
+Array.prototype.sort.call(array);
+assertEquals(32, array.length);
+assertEquals(10, array[30]);
+assertEquals(20, array[31]);
diff --git a/deps/v8/test/mjsunit/regress/regress-9165.js b/deps/v8/test/mjsunit/regress/regress-9165.js
new file mode 100644
index 0000000000..1de6e9db2a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-9165.js
@@ -0,0 +1,47 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-anyref
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+let kSig_r_i = makeSig([kWasmI32], [kWasmAnyRef]);
+
+(function TestMergeOfAnyFuncIntoAnyRef() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("merge", kSig_r_i)
+ .addLocals({anyref_count: 1, anyfunc_count: 1})
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI32Eqz,
+ kExprIf, kWasmAnyRef,
+ kExprGetLocal, 1,
+ kExprElse,
+ kExprGetLocal, 2,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ assertEquals(null, instance.exports.merge(0));
+ assertEquals(null, instance.exports.merge(1));
+})();
+
+(function TestMergeOfAnyFuncIntoNullRef() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("merge", kSig_r_i)
+ .addLocals({anyfunc_count: 1})
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI32Eqz,
+ kExprIf, kWasmAnyRef,
+ kExprRefNull,
+ kExprElse,
+ kExprGetLocal, 1,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ assertEquals(null, instance.exports.merge(0));
+ assertEquals(null, instance.exports.merge(1));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-961237.js b/deps/v8/test/mjsunit/regress/regress-961237.js
new file mode 100644
index 0000000000..a1e57b7662
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-961237.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+const a = 1.1;
+const b = null;
+
+function f(x) { return -0 == (x ? a : b); }
+%PrepareFunctionForOptimization(f);
+assertEquals(false, f(true));
+assertEquals(false, f(true));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(false, f(false));
diff --git a/deps/v8/test/mjsunit/regress/regress-961508.js b/deps/v8/test/mjsunit/regress/regress-961508.js
new file mode 100644
index 0000000000..45ab0f8d2b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-961508.js
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --print-ast
+
+const foo = new class bar extends async function () {}.constructor {}();
diff --git a/deps/v8/test/mjsunit/regress/regress-963891.js b/deps/v8/test/mjsunit/regress/regress-963891.js
new file mode 100644
index 0000000000..28bf920d1d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-963891.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var bar = true;
+bar = false;
+function foo() {
+ return !bar;
+}
+assertEquals(foo(), true);
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(foo(), true);
diff --git a/deps/v8/test/mjsunit/regress/regress-976627.js b/deps/v8/test/mjsunit/regress/regress-976627.js
new file mode 100644
index 0000000000..1dde87e196
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-976627.js
@@ -0,0 +1,40 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --young-generation-large-objects
+
+function v2() {
+ const v8 = Symbol || 9007199254740991;
+ function v9(v10,v11,v12) {
+ }
+ const v16 = String();
+ const v100 = String();//add
+ const v106 = String();// add
+ const v116 = String();// add
+ const v17 = Int32Array();
+ const v18 = Map();
+ const v19 = [];
+ const v20 = v18.values();
+ function v21(v22,v23,v24,v25,v26) {
+ }
+ function v28(v29,v30,v31) {
+ function v32(v33,v34,v35,v36) {
+ }
+ let v39 = 0;
+ do {
+ const v40 = v32();
+ function v99() {
+ }
+ } while (v39 < 8);
+ }
+ const v41 = Promise();
+}
+const v46 = ["has",13.37,-9007199254740991,Reflect];
+for (let v50 = 64; v50 <= 2000; v50++) {
+ v46.push(v50,v2);
+}
+const v54 = RegExp(v46);
+const v55 = v54.exec();
+
+assertTrue(%HasElementsInALargeObjectSpace(v55));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-150545.js b/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
index cfee0618ec..fb21f3d57d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-150545.js
@@ -42,11 +42,13 @@
assertSame(2, arguments[1]);
assertSame(3, arguments[2]);
}
+ %EnsureFeedbackVectorForFunction(inner);
function outer() {
inner(1,2,3);
for (var i = 0; i < 3; i++) %OptimizeOsr();
}
+ %PrepareFunctionForOptimization(outer);
outer();
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-319860.js b/deps/v8/test/mjsunit/regress/regress-crbug-319860.js
index b81fb85ba7..e0fd4812d0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-319860.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-319860.js
@@ -36,6 +36,7 @@ function read(a, index) {
return result;
}
+%PrepareFunctionForOptimization(read);
var a = new Int8Array(0x2000001);
read(a, 0);
read(a, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-480807.js b/deps/v8/test/mjsunit/regress/regress-crbug-480807.js
index a1448d6de6..7913a3a8bf 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-480807.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-480807.js
@@ -16,6 +16,8 @@ function foo() {
}
return c;
}
+%PrepareFunctionForOptimization(foo);
+
try {
foo();
} catch (e) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-513507.js b/deps/v8/test/mjsunit/regress/regress-crbug-513507.js
index ae321ba906..87ba8b9606 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-513507.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-513507.js
@@ -16,8 +16,10 @@ function makeFun() {
if (i == osr_fuse) %OptimizeOsr();
}
}
+ %PrepareFunctionForOptimization(fun);
return fun;
}
+%PrepareFunctionForOptimization(makeFun);
makeFun()(7); // Warm up.
makeFun()(4); // Optimize once.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-522895.js b/deps/v8/test/mjsunit/regress/regress-crbug-522895.js
index f28f3a1cb9..b2c9dc929c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-522895.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-522895.js
@@ -19,4 +19,6 @@ function gen() {
return eval("(" + body + ")");
}
-gen()();
+var f = gen();
+%PrepareFunctionForOptimization(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-554831.js b/deps/v8/test/mjsunit/regress/regress-crbug-554831.js
index 3d022b257b..27106d5efa 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-554831.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-554831.js
@@ -7,6 +7,7 @@
(function() {
var key = "s";
function f(object) { return object[key]; };
+ %PrepareFunctionForOptimization(f);
f("");
f("");
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-587068.js b/deps/v8/test/mjsunit/regress/regress-crbug-587068.js
index 864f8ce7d2..8704d7230a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-587068.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-587068.js
@@ -7,6 +7,7 @@
// The Crankshaft fast case for String.fromCharCode used to unconditionally
// deoptimize on non int32 indices.
function foo(i) { return String.fromCharCode(i); }
+%PrepareFunctionForOptimization(foo);
foo(33);
foo(33);
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-594183.js b/deps/v8/test/mjsunit/regress/regress-crbug-594183.js
index cb8003404d..ddf515907e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-594183.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-594183.js
@@ -24,6 +24,7 @@ function load() {
return sum;
}
+%PrepareFunctionForOptimization(load);
load();
load();
%OptimizeFunctionOnNextCall(load);
@@ -37,6 +38,7 @@ function store() {
}
}
+%PrepareFunctionForOptimization(store);
store();
store();
%OptimizeFunctionOnNextCall(store);
@@ -70,6 +72,7 @@ function inferrable_store(key) {
store_element(o5, key);
}
+%PrepareFunctionForOptimization(inferrable_store);
inferrable_store(0);
inferrable_store(0);
%OptimizeFunctionOnNextCall(inferrable_store);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-638551.js b/deps/v8/test/mjsunit/regress/regress-crbug-638551.js
index f812359d8d..46f307e559 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-638551.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-638551.js
@@ -7,9 +7,11 @@
function f() {
for (var i = 0; i < 10; i++) if (i == 5) %OptimizeOsr();
function g() {}
+ %PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
}
+%PrepareFunctionForOptimization(f);
f();
gc(); // Make sure that ...
gc(); // ... code flushing ...
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-640369.js b/deps/v8/test/mjsunit/regress/regress-crbug-640369.js
index 97982d1224..3ca396bb93 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-640369.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-640369.js
@@ -8,8 +8,10 @@ function A() {
this.x = 0;
for (var i = 0; i < max; ) {}
}
+%EnsureFeedbackVectorForFunction(A);
function foo() {
for (var i = 0; i < 1; i = 2) %OptimizeOsr();
return new A();
}
+%PrepareFunctionForOptimization(foo);
try { foo(); } catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-645888.js b/deps/v8/test/mjsunit/regress/regress-crbug-645888.js
index 06879723ea..f29dbebcc0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-645888.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-645888.js
@@ -15,4 +15,5 @@ function f() {
throw "no loop, thank you";
}
}
+%PrepareFunctionForOptimization(f);
assertThrows(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-662830.js b/deps/v8/test/mjsunit/regress/regress-crbug-662830.js
index 3126978d7d..eec1da2193 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-662830.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-662830.js
@@ -14,6 +14,7 @@ function g() {
for (var i = 0; i < 3; ++i) if (i === 1) %OptimizeOsr();
%_DeoptimizeNow();
}
+%PrepareFunctionForOptimization(g);
%OptimizeFunctionOnNextCall(g);
g();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-665587.js b/deps/v8/test/mjsunit/regress/regress-crbug-665587.js
index a9b3841218..48f31c576c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-665587.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-665587.js
@@ -13,4 +13,5 @@ function f() {
gc();
}
}
+%PrepareFunctionForOptimization(f);
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-668795.js b/deps/v8/test/mjsunit/regress/regress-crbug-668795.js
index b85d222080..42c2d5243b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-668795.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-668795.js
@@ -17,5 +17,6 @@ function f() {
}
return result;
}
+%PrepareFunctionForOptimization(f);
assertEquals("R:121212", f());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-827013.js b/deps/v8/test/mjsunit/regress/regress-crbug-827013.js
index 83ace68d8d..9281fbf600 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-827013.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-827013.js
@@ -6,13 +6,16 @@
(function Test() {
var f = () => 42;
- delete f.length;
- delete f.name;
+ function modify_f() {
+ delete f.length;
+ delete f.name;
- var g = Object.create(f);
- for (var i = 0; i < 5; i++) {
- g.dummy;
+ var g = Object.create(f);
+ for (var i = 0; i < 5; i++) {
+ g.dummy;
+ }
}
+ %EnsureFeedbackVectorForFunction(f);
assertTrue(%HasFastProperties(f));
var h = f.bind(this);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js
index 565e5fbc23..b79ac2ab29 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-882233-2.js
@@ -12,6 +12,7 @@ function shift_array() {
return array.shift();
}
+%PrepareFunctionForOptimization(shift_array);
assertThrows(shift_array);
assertThrows(shift_array);
%OptimizeFunctionOnNextCall(shift_array);
@@ -25,6 +26,7 @@ function shift_object() {
return object.shift();
}
+%PrepareFunctionForOptimization(shift_object);
assertThrows(shift_object);
assertThrows(shift_object);
%OptimizeFunctionOnNextCall(shift_object);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-899464.js b/deps/v8/test/mjsunit/regress/regress-crbug-899464.js
index 1deaa30c2d..b2f594ce80 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-899464.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-899464.js
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-string-matchall
-
''.matchAll(/./u);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-9161.js b/deps/v8/test/mjsunit/regress/regress-crbug-9161.js
new file mode 100644
index 0000000000..a90a8ad6ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-9161.js
@@ -0,0 +1,59 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This test is a reproduction of a crash that happens when a TypedArray
+// backed by a SharedArrayBuffer is concurrently modified while sorting.
+// Segfaults would need a long time to trigger in normal builds, so this
+// reproduction is tailored to trigger on ASAN builds. On ASAN builds,
+// out-of-bounds accesses while sorting would result in an immediate failure.
+
+const lock = new Int32Array(new SharedArrayBuffer(4));
+
+const kIterations = 5000;
+const kLength = 2000;
+
+const kStageIndex = 0;
+const kStageInit = 0;
+const kStageRunning = 1;
+const kStageDone = 2;
+
+Atomics.store(lock, kStageIndex, kStageInit);
+
+function WaitUntil(expected) {
+ while (true) {
+ const value = Atomics.load(lock, kStageIndex);
+ if (value === expected) break;
+ }
+}
+
+const workerScript = `
+ onmessage = function([sab, lock]) {
+ const i32a = new Int32Array(sab);
+ Atomics.store(lock, ${kStageIndex}, ${kStageRunning});
+
+ for (let j = 1; j < ${kIterations}; ++j) {
+ for (let i = 0; i < i32a.length; ++i) {
+ i32a[i] = j;
+ }
+ }
+
+ postMessage("done");
+ Atomics.store(lock, ${kStageIndex}, ${kStageDone});
+ };`;
+
+const worker = new Worker(workerScript, {type: 'string'});
+
+const i32a = new Int32Array(
+ new SharedArrayBuffer(Int32Array.BYTES_PER_ELEMENT * kLength)
+);
+
+worker.postMessage([i32a.buffer, lock]);
+WaitUntil(kStageRunning);
+
+for (let i = 0; i < kIterations; ++i) {
+ i32a.sort();
+}
+
+WaitUntil(kStageDone);
+assertEquals(worker.getMessage(), "done");
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-934166.js b/deps/v8/test/mjsunit/regress/regress-crbug-934166.js
index d6fae7136b..b23026f8f0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-934166.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-934166.js
@@ -5,14 +5,18 @@
// Flags: --allow-natives-syntax
{
- for(let i = 0; i < 10; ++i){
- try{
- // Carefully constructed by a fuzzer to use a new register for s(), whose
- // write is dead due to the unconditional throw after s()=N, but which is
- // read in the ({...g}) call, which therefore must also be marked dead and
- // elided.
- with(f&&g&&(s()=N)({...g})){}
- } catch {}
- %OptimizeOsr();
+ function f() {
+ for(let i = 0; i < 10; ++i){
+ try{
+ // Carefully constructed by a fuzzer to use a new register for s(), whose
+ // write is dead due to the unconditional throw after s()=N, but which is
+ // read in the ({...g}) call, which therefore must also be marked dead and
+ // elided.
+ with(f&&g&&(s()=N)({...g})){}
+ } catch {}
+ %OptimizeOsr();
+ }
}
+ %PrepareFunctionForOptimization(f);
+ f();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-935800.js b/deps/v8/test/mjsunit/regress/regress-crbug-935800.js
new file mode 100644
index 0000000000..18f735d614
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-935800.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo() {
+ "use asm";
+ function bar() {}
+ return {bar: bar};
+}
+var module = foo();
+assertTrue(Object.getOwnPropertyNames(module.bar).includes("prototype"));
+assertInstanceof(new module.bar(), module.bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-937734.js b/deps/v8/test/mjsunit/regress/regress-crbug-937734.js
index 26ab8645a6..2972956db3 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-937734.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-937734.js
@@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --opt
function foo()
{
return 1 in [0];
}
+%PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
foo();
-assertEquals(0, %GetDeoptCount(foo));
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-941703.js b/deps/v8/test/mjsunit/regress/regress-crbug-941703.js
new file mode 100644
index 0000000000..ec4847c891
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-941703.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("(this) , this =>", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-951400.js b/deps/v8/test/mjsunit/regress/regress-crbug-951400.js
new file mode 100644
index 0000000000..f43a1be897
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-951400.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+function foo(arr) {
+ gc();
+ eval(arr);
+}
+
+try {
+ foo("tag`Hello${tag}`");
+} catch (e) {}
+
+%OptimizeFunctionOnNextCall(foo);
+
+try {
+ foo("tag.prop`${tag}`");
+} catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-959645-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-959645-1.js
new file mode 100644
index 0000000000..afe9612db4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-959645-1.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --modify-field-representations-inplace
+
+function f(array, x) {
+ array.x = x;
+ array[0] = 1.1;
+ return array;
+}
+
+f([1], 1);
+f([2], 1);
+%HeapObjectVerify(f([3], undefined));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-959645-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-959645-2.js
new file mode 100644
index 0000000000..634bfa9543
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-959645-2.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --modify-field-representations-inplace
+
+function f(array, x) {
+ array.x = x;
+ array[0] = undefined;
+ return array;
+}
+
+f([1.1], 1);
+f([2.2], 1);
+%HeapObjectVerify(f([3.3], undefined));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-959727.js b/deps/v8/test/mjsunit/regress/regress-crbug-959727.js
new file mode 100644
index 0000000000..36bda5b99c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-959727.js
@@ -0,0 +1,11 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+let r = Realm.createAllowCrossRealmAccess();
+Realm.detachGlobal(r);
+try {
+ Realm.global(r)[1] = 0;
+} catch (e) {
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-961522.js b/deps/v8/test/mjsunit/regress/regress-crbug-961522.js
new file mode 100644
index 0000000000..c7e1eb8bb5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-961522.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --nolazy
+
+(function () {
+ let arr = [, 3];
+ function inlined() {
+ }
+ function foo() {
+ arr.reduce(inlined);
+ }
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-961709-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-961709-1.js
new file mode 100644
index 0000000000..4cc40c5127
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-961709-1.js
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ const a = [];
+ a[0] = 1;
+ return a[0];
+}
+
+Object.setPrototypeOf(Array.prototype, new Int8Array());
+assertEquals(undefined, foo());
+assertEquals(undefined, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-961709-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-961709-2.js
new file mode 100644
index 0000000000..dcbf8dcb2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-961709-2.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt
+
+function foo() {
+ const a = [];
+ a[0] = 1;
+ return a[0];
+}
+
+%EnsureFeedbackVectorForFunction(foo);
+Object.setPrototypeOf(Array.prototype, new Int8Array());
+assertEquals(undefined, foo());
+assertEquals(undefined, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(undefined, foo());
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-963568.js b/deps/v8/test/mjsunit/regress/regress-crbug-963568.js
new file mode 100644
index 0000000000..bc902b83ba
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-963568.js
@@ -0,0 +1,6 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+JSON.parse('{"0":true,"1":true,"2":true,"3":true,"4":true,"9":true," ":true,"D":true,"B":true,"-1":true,"A":true,"C":true}');
+JSON.parse('{"0":true,"1":true,"2":true,"3":true,"4":true,"9":true," ":true,"D":true,"B":true,"-1":true,"A":true,"C":true}');
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-964833.js b/deps/v8/test/mjsunit/regress/regress-crbug-964833.js
new file mode 100644
index 0000000000..094f86cefa
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-964833.js
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ var n = 3;
+ var obj = {};
+
+ var m = n;
+ for (;;) {
+ m++;
+
+ if (m == 456) {
+ break;
+ }
+
+ var i = 0;
+ var j = 0;
+ while (i < 1) {
+ j = i;
+ i++;
+ }
+ obj.y = j;
+ }
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-964869.js b/deps/v8/test/mjsunit/regress/regress-crbug-964869.js
new file mode 100644
index 0000000000..d630669bdc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-964869.js
@@ -0,0 +1,5 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const o = {x: JSON.parse('{"x":1.1}').x};
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-967065.js b/deps/v8/test/mjsunit/regress/regress-crbug-967065.js
new file mode 100644
index 0000000000..ca6744c13b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-967065.js
@@ -0,0 +1,16 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests that the receiver {length} property conversion works on 32-bit
+// systems (i.e. it should not crash).
+
+function ThrowingSort() {
+ const __v_3 = new Array(2147549152);
+ Object.defineProperty(__v_3, 0, {
+ get: () => { throw new Error("Do not actually sort!"); }
+ });
+ __v_3.sort();
+}
+
+assertThrows(() => ThrowingSort());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-967151.js b/deps/v8/test/mjsunit/regress/regress-crbug-967151.js
new file mode 100644
index 0000000000..d98c01adf3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-967151.js
@@ -0,0 +1,12 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-externalize-string
+
+__v_3 = "100 external string turned into two byte";
+__v_2 = __v_3.substring(0, 28);
+try {
+ externalizeString(__v_3, true);
+} catch (e) {}
+assertEquals(100, JSON.parse(__v_2));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-967254.js b/deps/v8/test/mjsunit/regress/regress-crbug-967254.js
new file mode 100644
index 0000000000..95333d5b37
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-967254.js
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test that fast COW arrays are properly handled by Array#sort.
+
+function COWSort() {
+ const array = ["cc", "c", "aa", "bb", "b", "ab", "ac"];
+ array.sort();
+ return array;
+}
+
+assertArrayEquals(["aa", "ab", "ac", "b", "bb", "c", "cc"], COWSort());
+
+Array.prototype.sort = () => {};
+
+assertArrayEquals(["cc", "c", "aa", "bb", "b", "ab", "ac"], COWSort());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-967434.js b/deps/v8/test/mjsunit/regress/regress-crbug-967434.js
new file mode 100644
index 0000000000..bd7b4073e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-967434.js
@@ -0,0 +1,36 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f1(h_also_eval) {
+ this.x = h_also_eval;
+}
+
+function f2(h, h_eval) {
+ var o = new f1(h());
+ // During the last call to f3 with g2 as an argument, this store is
+ // bi-morphic, including a version that refers to the old map (before
+ // the replacement of f1's prototype). As a result, during load elimination
+ // we see two stores with incompatible representations: One in the
+ // constructor, and one in the impossible branch of the bi-morphic store
+ // site.
+ o.x = h_eval;
+}
+
+function f3(h) {
+ f2(h, h());
+ %OptimizeFunctionOnNextCall(f2);
+ f2(h, h());
+}
+
+function g1() { return {}; };
+function g2() { return 4.2; };
+
+f3(g1);
+f3(g2);
+
+f3(g1);
+f1.prototype = {};
+f3(g2);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-971383.js b/deps/v8/test/mjsunit/regress/regress-crbug-971383.js
new file mode 100644
index 0000000000..0d5595befb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-971383.js
@@ -0,0 +1,7 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --icu-locale=tr
+
+assertEquals(["HIJK"], "HIJK".match(/[a-z]+/gi));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-980529.js b/deps/v8/test/mjsunit/regress/regress-crbug-980529.js
new file mode 100644
index 0000000000..2fdf7ad78f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-980529.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --no-always-opt
+
+const a = {toString: () => {
+ console.log("print arguments", print.arguments);
+}};
+
+function g(x) {
+ print(x);
+}
+
+%PrepareFunctionForOptimization(g);
+g(a);
+g(a);
+%OptimizeFunctionOnNextCall(g);
+g(a);
diff --git a/deps/v8/test/mjsunit/regress/regress-osr-context.js b/deps/v8/test/mjsunit/regress/regress-osr-context.js
index a73954156c..7df16bb5eb 100644
--- a/deps/v8/test/mjsunit/regress/regress-osr-context.js
+++ b/deps/v8/test/mjsunit/regress/regress-osr-context.js
@@ -16,5 +16,6 @@
}
return a;
}
+ %PrepareFunctionForOptimization(f);
assertEquals(18, f());
})();
diff --git a/deps/v8/test/mjsunit/regress/regress-param-local-type.js b/deps/v8/test/mjsunit/regress/regress-param-local-type.js
index 0eaca50af5..99f2b3f520 100644
--- a/deps/v8/test/mjsunit/regress/regress-param-local-type.js
+++ b/deps/v8/test/mjsunit/regress/regress-param-local-type.js
@@ -37,6 +37,7 @@ function f(a) { // First parameter is tagged.
n = i + a;
}
+%PrepareFunctionForOptimization(f);
f(1);
f(1);
%OptimizeFunctionOnNextCall(f);
@@ -51,6 +52,7 @@ function g() { // 0th parameter (receiver) is tagged.
n = i + this;
}
+%PrepareFunctionForOptimization(g);
g.call(1);
g.call(1);
%OptimizeFunctionOnNextCall(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-store-uncacheable.js b/deps/v8/test/mjsunit/regress/regress-store-uncacheable.js
index 4baedbacae..8a31f2c087 100644
--- a/deps/v8/test/mjsunit/regress/regress-store-uncacheable.js
+++ b/deps/v8/test/mjsunit/regress/regress-store-uncacheable.js
@@ -32,6 +32,7 @@ function f() {
o["<abc>"] = 123;
}
+%PrepareFunctionForOptimization(f);
f();
f();
f();
diff --git a/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js b/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js
index 9297c2df54..c51fa8f98b 100644
--- a/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js
+++ b/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js
@@ -19,7 +19,7 @@ function get_closure() {
return x;
}
}
-
+%PrepareFunctionForOptimization(get_closure);
var f1 = get_closure();
f1(new Ctor(), false);
f1(new Ctor(), false);
diff --git a/deps/v8/test/mjsunit/regress/regress-trap-allocation-memento.js b/deps/v8/test/mjsunit/regress/regress-trap-allocation-memento.js
index e31fb88354..185f908586 100644
--- a/deps/v8/test/mjsunit/regress/regress-trap-allocation-memento.js
+++ b/deps/v8/test/mjsunit/regress/regress-trap-allocation-memento.js
@@ -27,6 +27,10 @@ function assertKind(expected, obj, name_opt) {
function make2() { return new Array(); }
function make3() { return new Array(); }
function foo(a, i) { a[0] = i; }
+ %EnsureFeedbackVectorForFunction(make1);
+ %EnsureFeedbackVectorForFunction(make2);
+ %EnsureFeedbackVectorForFunction(make3);
+ %EnsureFeedbackVectorForFunction(foo);
function run_test(maker_function) {
var one = maker_function();
@@ -37,6 +41,7 @@ function assertKind(expected, obj, name_opt) {
var two = maker_function();
assertKind(elements_kind.fast_double, two);
}
+ %EnsureFeedbackVectorForFunction(run_test);
// Initialize the KeyedStoreIC in foo; the actual operation will be done
// in the runtime.
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-4153-1.js b/deps/v8/test/mjsunit/regress/regress-v8-4153-1.js
new file mode 100644
index 0000000000..125b4e2f01
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-4153-1.js
@@ -0,0 +1,14 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap
+
+// Create tiny (on-heap) instances of TypedArrays to make sure
+// that the ByteArrays are properly sized (in new space).
+var arrays = [
+ Int8Array, Uint8Array, Int16Array, Uint16Array, Int32Array, Uint32Array,
+ Float32Array, Float64Array, Uint8ClampedArray, BigInt64Array, BigUint64Array
+].map(C => {
+ new C(1)
+});
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-7682.js b/deps/v8/test/mjsunit/regress/regress-v8-7682.js
index 86f12f5b74..68f9e0b761 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-7682.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-7682.js
@@ -22,5 +22,5 @@ Array.prototype.sort.call(xs);
// the spec:
// - "xs" is sparse and IsExtensible(xs) is false (its frozen).
// - "xs" is sparse and the prototype has properties in the sort range.
-assertEquals(2, xs[0]);
-assertEquals(1, xs[1]);
+assertEquals(1, xs[0]);
+assertEquals(2, xs[1]);
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9243.js b/deps/v8/test/mjsunit/regress/regress-v8-9243.js
new file mode 100644
index 0000000000..23ca935f6b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9243.js
@@ -0,0 +1,26 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// The special IterResultObject map that builtins use should be the same
+// as the one produced by the `{value, done}` object literal.
+const user = {value:undefined, done:true};
+
+// Array iterator.
+const arrayResult = (new Array())[Symbol.iterator]().next();
+assertTrue(%HaveSameMap(user, arrayResult));
+
+// Map iterator.
+const mapResult = (new Map())[Symbol.iterator]().next();
+assertTrue(%HaveSameMap(user, mapResult));
+
+// Set iterator.
+const setResult = (new Set())[Symbol.iterator]().next();
+assertTrue(%HaveSameMap(user, setResult));
+
+// Generator.
+function* generator() {}
+const generatorResult = generator().next();
+assertTrue(%HaveSameMap(user, setResult));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9267-1.js b/deps/v8/test/mjsunit/regress/regress-v8-9267-1.js
new file mode 100644
index 0000000000..fb3abea634
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9267-1.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar(a) {
+ return Object.defineProperty(a, 'x', {get() { return 1; }});
+}
+
+function foo() {
+ return Array(1);
+}
+
+%NeverOptimizeFunction(bar);
+%PrepareFunctionForOptimization(foo);
+const o = foo(); // Keep a reference so the GC doesn't kill the map.
+bar(o);
+const a = bar(foo());
+%OptimizeFunctionOnNextCall(foo);
+const b = bar(foo());
+
+assertTrue(%HaveSameMap(a, b));
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-9267-2.js b/deps/v8/test/mjsunit/regress/regress-v8-9267-2.js
new file mode 100644
index 0000000000..cfe1b50ab7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-9267-2.js
@@ -0,0 +1,23 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar(a) {
+ return Object.defineProperty(a, 'x', {get() { return 1; }});
+}
+
+function foo() {
+ return {};
+}
+
+%NeverOptimizeFunction(bar);
+%PrepareFunctionForOptimization(foo);
+const o = foo(); // Keep a reference so the GC doesn't kill the map.
+bar(o);
+const a = bar(foo());
+%OptimizeFunctionOnNextCall(foo);
+const b = bar(foo());
+
+assertTrue(%HaveSameMap(a, b));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-834619.js b/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
index cfa6e7bb3b..1062d5547a 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-834619.js
@@ -26,6 +26,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
let builder = new WasmModuleBuilder();
builder.addImport("q", "f2", kSig_i_v);
builder.addImport("q", "f1", kSig_i_v);
+ builder.addTable(kWasmAnyFunc, 4);
builder.addFunction("main", kSig_i_i)
.addBody([
kExprGetLocal, 0,
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-9017.js b/deps/v8/test/mjsunit/regress/wasm/regress-9017.js
new file mode 100644
index 0000000000..7a8930a146
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-9017.js
@@ -0,0 +1,38 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --liftoff --nowasm-tier-up
+//
+// This test is intended to make Liftoff generate code that uses a very large
+// stack frame, and then try to call another function (which would write to the
+// stack pointer location). On Windows, large frames need extra code to touch
+// every page in order, because the OS only leaves a small guard area for the
+// stack, and trying to access past that area, even into memory that was
+// intentionally reserved for this thread's stack, will crash the program.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+var func_idx = builder.addFunction('helper', kSig_i_v)
+ .addLocals({i32_count: 1})
+ .addBody([
+ kExprI32Const, 0x01,
+ ]).index;
+
+var large_function_body = [];
+const num_temporaries = 16 * 1024;
+for (let i = 0; i < num_temporaries; ++i) {
+ large_function_body.push(kExprCallFunction, func_idx);
+}
+for (let i = 1; i < num_temporaries; ++i) {
+ large_function_body.push(kExprI32Add);
+}
+
+builder.addFunction('test', kSig_i_v)
+ .addBody(large_function_body)
+ .exportFunc();
+var module = builder.instantiate();
+
+assertEquals(num_temporaries, module.exports.test());
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-952342.js b/deps/v8/test/mjsunit/regress/wasm/regress-952342.js
new file mode 100644
index 0000000000..eb81f5a9c6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-952342.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const memory = new WebAssembly.Memory({initial: 1});
+
+let builder = new WasmModuleBuilder();
+builder.addImportedMemory("imports", "mem", 1);
+builder.addFunction("copy", kSig_v_iii)
+ .addBody([kExprGetLocal, 0, // dst
+ kExprGetLocal, 1, // src
+ kExprGetLocal, 2, // size
+ kNumericPrefix, kExprMemoryCopy, 0, 0]).exportAs("copy");
+let instance = builder.instantiate({imports: {mem: memory}});
+memory.grow(1);
+instance.exports.copy(0, kPageSize, 11);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-956771.js b/deps/v8/test/mjsunit/regress/wasm/regress-956771.js
new file mode 100644
index 0000000000..3fac6c871b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-956771.js
@@ -0,0 +1,28 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function testLazyModuleAsyncCompilation() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("some", kSig_i_ii)
+ assertPromiseResult(WebAssembly.compile(builder.toBuffer())
+ .then(assertUnreachable,
+ error => assertEquals("WebAssembly.compile(): function body must " +
+ "end with \"end\" opcode @+26",
+ error.message)));
+})();
+
+(function testLazyModuleSyncCompilation() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("some", kSig_i_ii)
+ assertThrows(() => builder.toModule(),
+ WebAssembly.CompileError,
+ "WebAssembly.Module(): Compiling function #0:\"some\" failed: " +
+ "function body must end with \"end\" opcode @+26");
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-956771b.js b/deps/v8/test/mjsunit/regress/wasm/regress-956771b.js
new file mode 100644
index 0000000000..1c819cb49d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-956771b.js
@@ -0,0 +1,19 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-lazy-compilation --wasm-test-streaming
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function testLazyModuleStreamingCompilation() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("some", kSig_i_ii);
+ let bytes = builder.toBuffer();
+ assertPromiseResult(WebAssembly.compileStreaming(Promise.resolve(bytes))
+ .then(assertUnreachable,
+ error => assertEquals("WebAssembly.compileStreaming(): function " +
+ "body must end with \"end\" opcode @+26",
+ error.message)));
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-957405.js b/deps/v8/test/mjsunit/regress/wasm/regress-957405.js
new file mode 100644
index 0000000000..a83104297e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-957405.js
@@ -0,0 +1,20 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const memory = new WebAssembly.Memory({initial: 1});
+
+let builder = new WasmModuleBuilder();
+builder.addImportedMemory("imports", "mem");
+builder.addFunction("fill", kSig_v_iii)
+ .addBody([kExprGetLocal, 0, // dst
+ kExprGetLocal, 1, // value
+ kExprGetLocal, 2, // size
+ kNumericPrefix, kExprMemoryFill, 0]).exportAs("fill");
+let instance = builder.instantiate({imports: {mem: memory}});
+memory.grow(1);
+assertTraps(
+ kTrapMemOutOfBounds,
+ () => instance.exports.fill(kPageSize + 1, 123, kPageSize));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-961129.js b/deps/v8/test/mjsunit/regress/wasm/regress-961129.js
new file mode 100644
index 0000000000..7a4903a66a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-961129.js
@@ -0,0 +1,18 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+%EnableCodeLoggingForTesting();
+
+function module() {
+ "use asm";
+ function f() {
+ var i = 4;
+ return i | 0;
+ }
+ return {f: f};
+}
+
+module().f();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-964607.js b/deps/v8/test/mjsunit/regress/wasm/regress-964607.js
new file mode 100644
index 0000000000..0a6d7628c6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-964607.js
@@ -0,0 +1,29 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-anyref
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+let builder = new WasmModuleBuilder();
+
+builder.addImportedTable('ffi', 't1', 5, 5, kWasmAnyFunc);
+builder.addImportedTable('ffi', 't2', 9, 9, kWasmAnyFunc);
+
+builder.addFunction('foo', kSig_v_v).addBody([]).exportFunc();
+
+let module = builder.toModule();
+let table1 =
+ new WebAssembly.Table({element: 'anyfunc', initial: 5, maximum: 5});
+
+let table2 =
+ new WebAssembly.Table({element: 'anyfunc', initial: 9, maximum: 9});
+
+let instance =
+ new WebAssembly.Instance(module, {ffi: {t1: table1, t2: table2}});
+let table3 =
+ new WebAssembly.Table({element: 'anyfunc', initial: 9, maximum: 9});
+
+table3.set(8, instance.exports.foo);
+new WebAssembly.Instance(module, {ffi: {t1: table1, t2: table3}});
diff --git a/deps/v8/test/mjsunit/reindexing-in-classes.js b/deps/v8/test/mjsunit/reindexing-in-classes.js
new file mode 100644
index 0000000000..9bb9ae4945
--- /dev/null
+++ b/deps/v8/test/mjsunit/reindexing-in-classes.js
@@ -0,0 +1,72 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test the reindexer visiting classes, avoiding repeat visits of the same
+// function.
+//
+// For each test, create function literals inside a class, where the functions
+// have to be reindexed due to the whole thing being inside an arrow head scope.
+
+((arg = (function wrapper() {
+ // Class with field that has computed property name with a function in the
+ // computation.
+ class g {
+ [{b: function in_computed_field_name() {}}]
+ }
+})) => {})();
+
+((arg = (function wrapper() {
+ // Class with initialized field that has computed property name with a
+ // function in the computation.
+ class g {
+ [{b: function in_computed_field_name_with_init() {}}] = ""
+ }
+})) => {})();
+
+((arg = (function wrapper() {
+ // Class with initialized field that has literal property name with a function
+ // in the initializer value.
+ class g {
+ b = (function in_init_value_of_field(){})()
+ }
+})) => {})();
+
+((arg = (function wrapper() {
+ // Class with initialized field that has private property name with a function
+ // in the initializer value.
+ class g {
+ #b = (function in_init_value_of_private_field(){})()
+ }
+})) => {})();
+
+((arg = (function wrapper() {
+ // Class with initialized field that has computed property name with a
+ // function in the initializer value.
+ class g {
+ ["b"] = (function in_init_value_of_computed_field_name(){})()
+ }
+})) => {})();
+
+((arg = (function wrapper() {
+ // Class with method that has computed property name with a function in the
+ // computation.
+ class g {
+ [{b: function in_computed_method_name() {}}] () {}
+ }
+})) => {})();
+
+((arg = (function wrapper() {
+ // Class with method that has an argument with a default function init.
+ class g {
+ b(arg = function in_method_arg_default_init() {}) {}
+ }
+})) => {})();
+
+((arg = (function wrapper() {
+ // Class with method that has a computed property name and an argument with a
+ // default function init.
+ class g {
+ ["b"] (arg = function in_computed_method_arg_default_init() {}) {}
+ }
+})) => {})();
diff --git a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
index abce803fb0..de53699570 100644
--- a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
+++ b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
@@ -19,7 +19,8 @@ assertFalse(isNeverOptimize());
for (var i = 0; i < 3; ++i) {
var f = function(x) {
return 2 * x;
- }
+ };
+ %PrepareFunctionForOptimization(f);
sum += f(i);
if (i == 1) {
diff --git a/deps/v8/test/mjsunit/shift-for-integer-div.js b/deps/v8/test/mjsunit/shift-for-integer-div.js
index 9264242879..353f79bec8 100644
--- a/deps/v8/test/mjsunit/shift-for-integer-div.js
+++ b/deps/v8/test/mjsunit/shift-for-integer-div.js
@@ -31,6 +31,7 @@ function divp4(x) {
return x / 4;
}
+%PrepareFunctionForOptimization(divp4);
divp4(8);
divp4(8);
%OptimizeFunctionOnNextCall(divp4);
@@ -42,6 +43,7 @@ function divn4(x) {
return x / (-4);
}
+%PrepareFunctionForOptimization(divn4);
divn4(8);
divn4(8);
%OptimizeFunctionOnNextCall(divn4);
@@ -55,6 +57,7 @@ function divn1(x) {
return x / (-1);
}
+%PrepareFunctionForOptimization(divn1);
var two_31 = 1 << 31;
divn1(2);
divn1(2);
@@ -68,6 +71,7 @@ function divp4t(x) {
return (x / 4) | 0;
}
+%PrepareFunctionForOptimization(divp4t);
divp4t(8);
divp4t(8);
%OptimizeFunctionOnNextCall(divp4t);
@@ -79,6 +83,7 @@ function divn4t(x) {
return (x / -4) | 0;
}
+%PrepareFunctionForOptimization(divn4t);
divn4t(8);
divn4t(8);
%OptimizeFunctionOnNextCall(divn4t);
@@ -91,6 +96,7 @@ function div_by_two(x) {
return (x / 2) | 0;
}
+%PrepareFunctionForOptimization(div_by_two);
div_by_two(12);
div_by_two(34);
%OptimizeFunctionOnNextCall(div_by_two);
diff --git a/deps/v8/test/mjsunit/sin-cos.js b/deps/v8/test/mjsunit/sin-cos.js
index 7af471d3c0..17d48ce068 100644
--- a/deps/v8/test/mjsunit/sin-cos.js
+++ b/deps/v8/test/mjsunit/sin-cos.js
@@ -38,6 +38,7 @@ function no_deopt_on_minus_zero(x) {
return Math.sin(x) + Math.cos(x) + Math.tan(x);
}
+%PrepareFunctionForOptimization(no_deopt_on_minus_zero);
no_deopt_on_minus_zero(1);
no_deopt_on_minus_zero(1);
%OptimizeFunctionOnNextCall(no_deopt_on_minus_zero);
diff --git a/deps/v8/test/mjsunit/smi-mul-const.js b/deps/v8/test/mjsunit/smi-mul-const.js
index e5255014dc..1501231ff5 100644
--- a/deps/v8/test/mjsunit/smi-mul-const.js
+++ b/deps/v8/test/mjsunit/smi-mul-const.js
@@ -28,6 +28,7 @@
// Flags: --allow-natives-syntax --opt --noalways-opt
function check(func, input, expected) {
+ %PrepareFunctionForOptimization(func);
func(-1);
func(-1);
%OptimizeFunctionOnNextCall(func);
diff --git a/deps/v8/test/mjsunit/smi-mul.js b/deps/v8/test/mjsunit/smi-mul.js
index 12d206abec..a99b27af13 100644
--- a/deps/v8/test/mjsunit/smi-mul.js
+++ b/deps/v8/test/mjsunit/smi-mul.js
@@ -32,6 +32,7 @@ function mul(a, b) {
}
+%PrepareFunctionForOptimization(mul);
mul(-1, 2);
mul(-1, 2);
%OptimizeFunctionOnNextCall(mul);
@@ -47,6 +48,7 @@ function mul2(a, b) {
return a * b;
}
+%PrepareFunctionForOptimization(mul2);
mul2(-1, 2);
mul2(-1, 2);
%OptimizeFunctionOnNextCall(mul2);
diff --git a/deps/v8/test/mjsunit/smi-representation.js b/deps/v8/test/mjsunit/smi-representation.js
index 10545c7680..fdb41db1c5 100644
--- a/deps/v8/test/mjsunit/smi-representation.js
+++ b/deps/v8/test/mjsunit/smi-representation.js
@@ -29,7 +29,6 @@
function smi_field() {
// Assign twice to make the field non-constant.
- // TODO(ishell): update test once constant field tracking is done.
var o = {smi: 1};
o.smi = 0;
return o;
diff --git a/deps/v8/test/mjsunit/stack-trace-cpp-function-template-1.js b/deps/v8/test/mjsunit/stack-trace-cpp-function-template-1.js
new file mode 100644
index 0000000000..c3b7cc064c
--- /dev/null
+++ b/deps/v8/test/mjsunit/stack-trace-cpp-function-template-1.js
@@ -0,0 +1,37 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-stack-trace-frames
+
+// Verifies that "print" shows up in Error.stack:
+// Error
+// at foo (...)
+// at Object.toString (...)
+// at print (<anonymous>)
+// at bar (...)
+// at (...)
+let prepareStackTraceCalled = false;
+Error.prepareStackTrace = (e, frames) => {
+ prepareStackTraceCalled = true;
+ assertEquals(5, frames.length);
+
+ assertEquals(foo, frames[0].getFunction());
+ assertEquals(object.toString, frames[1].getFunction());
+ assertEquals("print", frames[2].getFunctionName());
+ assertEquals(bar, frames[3].getFunction());
+ return frames;
+};
+
+function foo() { throw new Error(); }
+const object = { toString: () => { return foo(); } };
+
+function bar() {
+ print(object);
+}
+
+try { bar(); } catch(e) {
+ // Trigger prepareStackTrace.
+ e.stack;
+}
+assertTrue(prepareStackTraceCalled);
diff --git a/deps/v8/test/mjsunit/stack-trace-cpp-function-template-2.js b/deps/v8/test/mjsunit/stack-trace-cpp-function-template-2.js
new file mode 100644
index 0000000000..14fb85164e
--- /dev/null
+++ b/deps/v8/test/mjsunit/stack-trace-cpp-function-template-2.js
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --opt --experimental-stack-trace-frames
+
+// Verifies that "print" shows up in Error.stack when "bar" is optimized
+// by Turbofan:
+// Error
+// at foo (...)
+// at Object.toString (...)
+// at print (<anonymous>)
+// at bar (...)
+// at (...)
+let prepareStackTraceCalled = false;
+Error.prepareStackTrace = (e, frames) => {
+ prepareStackTraceCalled = true;
+ assertEquals(5, frames.length);
+
+ assertEquals(foo, frames[0].getFunction());
+ assertEquals(object.toString, frames[1].getFunction());
+ assertEquals("print", frames[2].getFunctionName());
+ assertEquals(bar, frames[3].getFunction());
+ return frames;
+};
+
+function foo() { throw new Error(); }
+const object = { toString: () => { return foo(); } };
+
+function bar() {
+ print(object);
+}
+
+%PrepareFunctionForOptimization(bar);
+try { bar(); } catch (e) {}
+try { bar(); } catch (e) {}
+%OptimizeFunctionOnNextCall(bar);
+
+try { bar(); } catch(e) {
+ // Trigger prepareStackTrace.
+ e.stack;
+}
+
+assertOptimized(bar);
+assertTrue(prepareStackTraceCalled);
diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js
index 736ff1b9f1..27b89ec746 100644
--- a/deps/v8/test/mjsunit/strict-mode.js
+++ b/deps/v8/test/mjsunit/strict-mode.js
@@ -147,11 +147,11 @@ function foo(eval) {\
})();
// Octal literal
-CheckStrictMode("var x = 012");
-CheckStrictMode("012");
-CheckStrictMode("'Hello octal\\032'");
-CheckStrictMode("function octal() { return 012; }");
-CheckStrictMode("function octal() { return '\\032'; }");
+CheckStrictMode("var x = 012", SyntaxError);
+CheckStrictMode("012", SyntaxError);
+CheckStrictMode("'Hello octal\\032'", SyntaxError);
+CheckStrictMode("function octal() { return 012; }", SyntaxError);
+CheckStrictMode("function octal() { return '\\032'; }", SyntaxError);
(function ValidEscape() {
"use strict";
diff --git a/deps/v8/test/mjsunit/string-charcodeat-external.js b/deps/v8/test/mjsunit/string-charcodeat-external.js
index 8b291dad33..be74650267 100644
--- a/deps/v8/test/mjsunit/string-charcodeat-external.js
+++ b/deps/v8/test/mjsunit/string-charcodeat-external.js
@@ -11,6 +11,7 @@ function foo(s) {
var extern = "internalized dummy";
extern = "1234567890qiaipppiúöäöáœba"+"jalsdjasldjasdlasjdalsdjasldk";
externalizeString(extern, true /* force two-byte */);
+%PrepareFunctionForOptimization(foo);
assertEquals(97, foo(extern));
assertEquals(97, foo(extern));
%OptimizeFunctionOnNextCall(foo);
diff --git a/deps/v8/test/mjsunit/string-deopt.js b/deps/v8/test/mjsunit/string-deopt.js
index 15549186ea..2574484a03 100644
--- a/deps/v8/test/mjsunit/string-deopt.js
+++ b/deps/v8/test/mjsunit/string-deopt.js
@@ -7,11 +7,13 @@
(()=> {
function f(a, b, c) {
return a.indexOf(b, c);
- }
+ };
+ %PrepareFunctionForOptimization(f);
f("abc", "de", 1);
f("abc", "de", 1);
%OptimizeFunctionOnNextCall(f);
f("abc", "de", {});
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("abc", "de", {});
assertOptimized(f);
@@ -20,11 +22,13 @@
(()=> {
function f(a, b, c) {
return a.indexOf(b, c);
- }
+ };
+ %PrepareFunctionForOptimization(f);
f("abc", "de", 1);
f("abc", "de", 1);
%OptimizeFunctionOnNextCall(f);
f("abc", {}, 1);
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("abc", {}, 1);
assertOptimized(f);
@@ -33,11 +37,13 @@
(()=> {
function f(a, b, c) {
return a.substring(b, c);
- }
+ };
+ %PrepareFunctionForOptimization(f);
f("abcde", 1, 4);
f("abcde", 1, 4);
%OptimizeFunctionOnNextCall(f);
f("abcde", 1, {});
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("abcde", 1, {});
assertOptimized(f);
@@ -46,11 +52,13 @@
(()=> {
function f(a, b, c) {
return a.substring(b, c);
- }
+ };
+ %PrepareFunctionForOptimization(f);
f("abcde", 1, 4);
f("abcde", 1, 4);
%OptimizeFunctionOnNextCall(f);
f("abcde", {}, 4);
+ %PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
f("abcde", {}, 4);
assertOptimized(f);
diff --git a/deps/v8/test/mjsunit/strong-rooted-literals.js b/deps/v8/test/mjsunit/strong-rooted-literals.js
index 68804e5a32..92dfb0e9c4 100644
--- a/deps/v8/test/mjsunit/strong-rooted-literals.js
+++ b/deps/v8/test/mjsunit/strong-rooted-literals.js
@@ -13,6 +13,7 @@
return a;
}
+ %PrepareFunctionForOptimization(foo);
foo();
foo();
%OptimizeFunctionOnNextCall(foo);
@@ -26,9 +27,13 @@
return o.x + o.y;
}
function mapPlus(a, y) {
- return a.map(x => hot({x, y}));
+ var f = (x => hot({x, y}));
+ %EnsureFeedbackVectorForFunction(f);
+ return a.map(f);
}
+ %EnsureFeedbackVectorForFunction(mapPlus);
+ %PrepareFunctionForOptimization(hot);
var a = [1, 2, 3];
print(mapPlus(a, 1));
print(mapPlus(a, 2));
@@ -44,10 +49,12 @@
(function() {
var sopen = 'function wrapper() { ';
var s1 = 'function foo() { return bar(5); } ';
- var s2 = 'foo(); foo(); %OptimizeFunctionOnNextCall(foo); foo(); ';
+ var s2 = '%PrepareFunctionForOptimization(foo); ';
+ var s3 = 'foo(); foo(); %OptimizeFunctionOnNextCall(foo); foo(); ';
var sclose = '} wrapper(); ';
- var s = sopen + s1 + s2 + sclose;
+ var s = sopen + s1 + s2 + s3 + sclose;
function bar(i){return i + 3};
+ %EnsureFeedbackVectorForFunction(bar);
for (var i = 0; i < 4; i++) {
eval(s);
diff --git a/deps/v8/test/mjsunit/switch.js b/deps/v8/test/mjsunit/switch.js
index 4b27789ad9..4096f416e8 100644
--- a/deps/v8/test/mjsunit/switch.js
+++ b/deps/v8/test/mjsunit/switch.js
@@ -350,6 +350,7 @@ function switch_gen(clause_type, feedback, optimize) {
var values = clause_values[clause_type];
function opt(fn) {
+ if (optimize) %PrepareFunctionForOptimization(fn);
if (feedback === 'all') {
values.forEach(fn);
} else if (Array.isArray(feedback)) {
diff --git a/deps/v8/test/mjsunit/tools/trace-ic.js b/deps/v8/test/mjsunit/tools/trace-ic.js
new file mode 100644
index 0000000000..35fe209c17
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/trace-ic.js
@@ -0,0 +1,62 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --trace-ic --logfile=test/mjsunit/tools/trace-ic-test.log
+// Flags: --allow-natives-syntax
+
+// The idea behind this test is to make sure we do not crash when using the
+// --trace-ic flag.
+
+
+(function testLoadIC() {
+ function loadIC(obj) {
+ return obj.field;
+ }
+
+ %EnsureFeedbackVectorForFunction(loadIC);
+
+ var obj = {field: 'hello'};
+ loadIC(obj);
+ loadIC(obj);
+ loadIC(obj);
+})();
+
+(function testStoreIC() {
+ function storeIC(obj, value) {
+ return obj.field = value;
+ }
+
+ %EnsureFeedbackVectorForFunction(storeIC);
+
+ var obj = {field: 'hello'};
+ storeIC(obj, 'world');
+ storeIC(obj, 'world');
+ storeIC(obj, 'world');
+})();
+
+(function testKeyedLoadIC() {
+ function keyedLoadIC(obj, field) {
+ return obj[field];
+ }
+
+ %EnsureFeedbackVectorForFunction(keyedLoadIC);
+
+ var obj = {field: 'hello'};
+ keyedLoadIC(obj, 'field');
+ keyedLoadIC(obj, 'field');
+ keyedLoadIC(obj, 'field');
+})();
+
+(function testKeyedStoreIC() {
+ function keyedStoreIC(obj, field, value) {
+ return obj[field] = value;
+ }
+
+ %EnsureFeedbackVectorForFunction(keyedStoreIC);
+
+ var obj = {field: 'hello'};
+ keyedStoreIC(obj, 'field', 'world');
+ keyedStoreIC(obj, 'field', 'world');
+ keyedStoreIC(obj, 'field', 'world');
+})();
diff --git a/deps/v8/test/mjsunit/track-fields.js b/deps/v8/test/mjsunit/track-fields.js
index 566171fed0..9edd260d90 100644
--- a/deps/v8/test/mjsunit/track-fields.js
+++ b/deps/v8/test/mjsunit/track-fields.js
@@ -114,6 +114,7 @@ of1.field = {};
var of2 = {b:0};
of2.field = 10;
+%PrepareFunctionForOptimization(poly_load);
poly_load(of1, false);
poly_load(of1, false);
poly_load(of2, true);
@@ -132,6 +133,7 @@ function load_poly(o) {
return o.a;
}
+%PrepareFunctionForOptimization(load_poly);
var o10 = { "a": 1.6 };
var o11 = { "b": 1, "a": 1.7 };
load_poly(o10);
@@ -150,6 +152,7 @@ function load_mono(o) {
return o.a1;
}
+%PrepareFunctionForOptimization(load_mono);
var object = {"x": 1};
var o10 = { "a1": 1.6 };
var o11 = { "a1": object, "b": 1 };
@@ -164,6 +167,7 @@ function load_mono2(o) {
return o.a2;
}
+%PrepareFunctionForOptimization(load_mono2);
var o12 = { "a2": 5 };
var o13 = { "a2": object, "b": 1 };
load_mono2(o12);
@@ -177,6 +181,7 @@ function load_mono3(o) {
return o.a3;
}
+%PrepareFunctionForOptimization(load_mono3);
var o14 = { "a3": 1.6 };
var o15 = { "a3": 1.8, "b": 1 };
load_mono3(o14);
@@ -265,7 +270,6 @@ assertEquals(100, o20.dbl);
function attr_mismatch_obj(v, writable) {
var o = {};
// Assign twice to make the field non-constant.
- // TODO(ishell): update test once constant field tracking is done.
o.some_value = 0;
o.some_value = v;
Object.defineProperty(o, "second_value", {value:10, writable:writable});
@@ -332,7 +336,8 @@ read_first_double(df1);
// Test boilerplates with computed values.
function none_boilerplate(a) {
return {"a_none":a};
-}
+};
+%PrepareFunctionForOptimization(none_boilerplate);
%OptimizeFunctionOnNextCall(none_boilerplate);
var none_double1 = none_boilerplate(1.7);
var none_double2 = none_boilerplate(1.9);
@@ -347,6 +352,7 @@ function none_to_smi(a) {
return {"a_smi":a};
}
+%PrepareFunctionForOptimization(none_to_smi);
var none_smi1 = none_to_smi(1);
var none_smi2 = none_to_smi(2);
%OptimizeFunctionOnNextCall(none_to_smi);
@@ -361,6 +367,7 @@ function none_to_double(a) {
return {"a_double":a};
}
+%PrepareFunctionForOptimization(none_to_double);
var none_to_double1 = none_to_double(1.5);
var none_to_double2 = none_to_double(2.8);
%OptimizeFunctionOnNextCall(none_to_double);
@@ -375,6 +382,7 @@ function none_to_object(a) {
return {"an_object":a};
}
+%PrepareFunctionForOptimization(none_to_object);
var none_to_object1 = none_to_object(true);
var none_to_object2 = none_to_object(false);
%OptimizeFunctionOnNextCall(none_to_object);
diff --git a/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js b/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js
index 5a7594de13..ae590b6676 100644
--- a/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js
+++ b/deps/v8/test/mjsunit/ubsan-fuzzerbugs.js
@@ -72,21 +72,3 @@ float_array[0] = 1e51;
%OptimizeFunctionOnNextCall(f);
f();
})();
-
-// crbug.com/935133
-(function() {
- var called_has = false;
- var proxy = new Proxy({}, {
- has: function(x, p) {
- called_has = true;
- throw "The test may finish now";
- },
- });
- proxy.length = 2147483648;
- try {
- Array.prototype.sort.call(proxy);
- } catch(e) {
- assertTrue(e === "The test may finish now");
- }
- assertTrue(called_has);
-})();
diff --git a/deps/v8/test/mjsunit/unary-minus-deopt.js b/deps/v8/test/mjsunit/unary-minus-deopt.js
index 07f7e0e497..dc60d07375 100644
--- a/deps/v8/test/mjsunit/unary-minus-deopt.js
+++ b/deps/v8/test/mjsunit/unary-minus-deopt.js
@@ -37,6 +37,7 @@ function unaryMinusTest(x) {
return (g & -g) - 1 | 0;
}
+%PrepareFunctionForOptimization(unaryMinusTest);
unaryMinusTest(3);
unaryMinusTest(3);
%OptimizeFunctionOnNextCall(unaryMinusTest);
@@ -47,6 +48,7 @@ assertOptimized(unaryMinusTest);
unaryMinusTest(31);
// The following is normally true, but not with --stress-opt. :-/
// assertUnoptimized(unaryMinusTest);
+%PrepareFunctionForOptimization(unaryMinusTest);
// We should have learned something from the deopt.
unaryMinusTest(31);
diff --git a/deps/v8/test/mjsunit/unbox-double-arrays.js b/deps/v8/test/mjsunit/unbox-double-arrays.js
index d6fc0938f9..ee08cdd3f0 100644
--- a/deps/v8/test/mjsunit/unbox-double-arrays.js
+++ b/deps/v8/test/mjsunit/unbox-double-arrays.js
@@ -149,6 +149,13 @@ function testOneArrayType(allocator) {
assertTrue(%HasDoubleElements(a));
}
+ %PrepareFunctionForOptimization(test_various_loads);
+ %PrepareFunctionForOptimization(test_various_loads2);
+ %PrepareFunctionForOptimization(test_various_loads3);
+ %PrepareFunctionForOptimization(test_various_loads6);
+ %PrepareFunctionForOptimization(test_various_loads7);
+ %PrepareFunctionForOptimization(test_various_stores);
+
// Test double and integer values
test_various_loads(large_array,
expected_array_value(5),
@@ -463,6 +470,7 @@ function call_apply() {
called_by_apply.apply({}, large_array3);
}
+%PrepareFunctionForOptimization(call_apply);
call_apply();
call_apply();
call_apply();
@@ -483,6 +491,7 @@ function test_for_in() {
assertTrue(next_expected == 96);
}
+%PrepareFunctionForOptimization(test_for_in);
test_for_in();
test_for_in();
test_for_in();
@@ -503,6 +512,7 @@ function test_getter() {
assertEquals(expected_array_value(10), large_array3[2]);
}
+%PrepareFunctionForOptimization(test_getter);
test_getter();
test_getter();
test_getter();
@@ -531,6 +541,7 @@ function test_setter() {
assertEquals(expected_array_value(2), large_array4[2]);
}
+%PrepareFunctionForOptimization(test_setter);
test_setter();
test_setter();
test_setter();
diff --git a/deps/v8/test/mjsunit/unbox-smi-field.js b/deps/v8/test/mjsunit/unbox-smi-field.js
index 361911800b..e0561f1e3e 100644
--- a/deps/v8/test/mjsunit/unbox-smi-field.js
+++ b/deps/v8/test/mjsunit/unbox-smi-field.js
@@ -15,6 +15,7 @@ function add(a, b) {
return a.x + b.x;
}
+%PrepareFunctionForOptimization(add);
assertEquals(3, add(f, g));
assertEquals(3, add(g, f));
%OptimizeFunctionOnNextCall(add);
diff --git a/deps/v8/test/mjsunit/undetectable.js b/deps/v8/test/mjsunit/undetectable.js
index 69a370a00a..873aa307b1 100644
--- a/deps/v8/test/mjsunit/undetectable.js
+++ b/deps/v8/test/mjsunit/undetectable.js
@@ -76,6 +76,11 @@ function testCall() {
obj();
}
+%PrepareFunctionForOptimization(testCompares);
+%PrepareFunctionForOptimization(testIfs);
+%PrepareFunctionForOptimization(testWhiles);
+%PrepareFunctionForOptimization(testFors);
+%PrepareFunctionForOptimization(testCall);
for (var j = 0; j < 5; j++) {
testCompares();
testIfs();
diff --git a/deps/v8/test/mjsunit/value-wrapper-accessor.js b/deps/v8/test/mjsunit/value-wrapper-accessor.js
index 79db407121..e4fd83ac9f 100644
--- a/deps/v8/test/mjsunit/value-wrapper-accessor.js
+++ b/deps/v8/test/mjsunit/value-wrapper-accessor.js
@@ -49,6 +49,7 @@ function test(object, prototype) {
return s.strict;
}
+ %PrepareFunctionForOptimization(nonstrict);
nonstrict(object);
nonstrict(object);
%OptimizeFunctionOnNextCall(nonstrict);
@@ -56,6 +57,7 @@ function test(object, prototype) {
nonstrict(object);
assertEquals("object", typeof result);
+ %PrepareFunctionForOptimization(strict);
strict(object);
strict(object);
%OptimizeFunctionOnNextCall(strict);
@@ -72,6 +74,7 @@ function test(object, prototype) {
return s.strict = 10;
}
+ %PrepareFunctionForOptimization(nonstrict);
nonstrict(object);
nonstrict(object);
%OptimizeFunctionOnNextCall(nonstrict);
@@ -79,6 +82,7 @@ function test(object, prototype) {
nonstrict(object);
assertEquals("object", typeof result);
+ %PrepareFunctionForOptimization(strict);
strict(object);
strict(object);
%OptimizeFunctionOnNextCall(strict);
diff --git a/deps/v8/test/mjsunit/wasm/anyfunc.js b/deps/v8/test/mjsunit/wasm/anyfunc.js
index 19415fe2b8..f0d587b25a 100644
--- a/deps/v8/test/mjsunit/wasm/anyfunc.js
+++ b/deps/v8/test/mjsunit/wasm/anyfunc.js
@@ -4,7 +4,7 @@
// Flags: --expose-wasm --experimental-wasm-anyref --expose-gc
-load("test/mjsunit/wasm/wasm-module-builder.js");
+load('test/mjsunit/wasm/wasm-module-builder.js');
(function testAnyFuncIdentityFunction() {
print(arguments.callee.name);
@@ -13,11 +13,10 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.addBody([kExprGetLocal, 0])
.exportFunc();
-
const instance = builder.instantiate();
assertThrows(() => instance.exports.main(print), TypeError);
- assertThrows(() => instance.exports.main({'hello' : 'world'}), TypeError);
+ assertThrows(() => instance.exports.main({'hello': 'world'}), TypeError);
assertSame(
instance.exports.main, instance.exports.main(instance.exports.main));
})();
@@ -26,10 +25,9 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
const sig_index = builder.addType(kSig_v_a);
- const imp_index = builder.addImport("q", "func", sig_index);
+ const imp_index = builder.addImport('q', 'func', sig_index);
builder.addFunction('main', sig_index)
- .addBody([kExprGetLocal, 0,
- kExprCallFunction, imp_index])
+ .addBody([kExprGetLocal, 0, kExprCallFunction, imp_index])
.exportFunc();
const main = builder.instantiate({q: {func: checkFunction}}).exports.main;
@@ -46,24 +44,35 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
const ref_sig = builder.addType(kSig_v_a);
const void_sig = builder.addType(kSig_v_v);
- const imp_index = builder.addImport("q", "func", ref_sig);
- const gc_index = builder.addImport("q", "gc", void_sig);
+ const imp_index = builder.addImport('q', 'func', ref_sig);
+ const gc_index = builder.addImport('q', 'gc', void_sig);
// First call the gc, then check if the object still exists.
builder.addFunction('main', ref_sig)
.addLocals({anyfunc_count: 10})
.addBody([
- kExprGetLocal, 0, kExprSetLocal, 1, // Set local
- kExprGetLocal, 0, kExprSetLocal, 2, // Set local
- kExprGetLocal, 0, kExprSetLocal, 3, // Set local
- kExprGetLocal, 0, kExprSetLocal, 4, // Set local
- kExprGetLocal, 0, kExprSetLocal, 5, // Set local
- kExprGetLocal, 0, kExprSetLocal, 6, // Set local
- kExprGetLocal, 0, kExprSetLocal, 7, // Set local
- kExprGetLocal, 0, kExprSetLocal, 8, // Set local
- kExprGetLocal, 0, kExprSetLocal, 9, // Set local
- kExprGetLocal, 0, kExprSetLocal, 10, // Set local
- kExprCallFunction, gc_index, // call gc
- kExprGetLocal, 9, kExprCallFunction, imp_index // call import
+ kExprGetLocal, 0,
+ kExprSetLocal, 1, // Set local
+ kExprGetLocal, 0,
+ kExprSetLocal, 2, // Set local
+ kExprGetLocal, 0,
+ kExprSetLocal, 3, // Set local
+ kExprGetLocal, 0,
+ kExprSetLocal, 4, // Set local
+ kExprGetLocal, 0,
+ kExprSetLocal, 5, // Set local
+ kExprGetLocal, 0,
+ kExprSetLocal, 6, // Set local
+ kExprGetLocal, 0,
+ kExprSetLocal, 7, // Set local
+ kExprGetLocal, 0,
+ kExprSetLocal, 8, // Set local
+ kExprGetLocal, 0,
+ kExprSetLocal, 9, // Set local
+ kExprGetLocal, 0,
+ kExprSetLocal, 10, // Set local
+ kExprCallFunction, gc_index, // call gc
+ kExprGetLocal, 9,
+ kExprCallFunction, imp_index // call import
])
.exportFunc();
@@ -82,8 +91,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const builder = new WasmModuleBuilder();
const ref_sig = builder.addType(kSig_v_a);
const void_sig = builder.addType(kSig_v_v);
- const imp_index = builder.addImport("q", "func", ref_sig);
- const gc_index = builder.addImport("q", "gc", void_sig);
+ const imp_index = builder.addImport('q', 'func', ref_sig);
+ const gc_index = builder.addImport('q', 'gc', void_sig);
// First call the gc, then check if the object still exists.
builder.addFunction('main', ref_sig)
.addBody([
@@ -96,7 +105,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertSame(main, value);
}
- const main = builder.instantiate({q: {func: checkFunction, gc: gc}}).exports.main;
+ const main =
+ builder.instantiate({q: {func: checkFunction, gc: gc}}).exports.main;
main(main);
})();
@@ -104,7 +114,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function testPassAnyFuncWithGCInWrapper() {
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
- const kSig_a_iai = makeSig([kWasmI32, kWasmAnyFunc, kWasmI32], [kWasmAnyFunc]);
+ const kSig_a_iai =
+ makeSig([kWasmI32, kWasmAnyFunc, kWasmI32], [kWasmAnyFunc]);
const sig_index = builder.addType(kSig_a_iai);
builder.addFunction('main', sig_index)
.addBody([kExprGetLocal, 1])
@@ -152,9 +163,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
const builder = new WasmModuleBuilder();
const sig_index = builder.addType(kSig_a_v);
- builder.addFunction('main', sig_index)
- .addBody([kExprRefNull])
- .exportFunc();
+ builder.addFunction('main', sig_index).addBody([kExprRefNull]).exportFunc();
const main = builder.instantiate().exports.main;
assertEquals(null, main());
@@ -197,3 +206,41 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const main = builder.instantiate().exports.main;
assertEquals(null, main());
})();
+
+(function testRefFuncOutOfBounds() {
+ print(arguments.callee.name);
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('main', kSig_a_v).addBody([kExprRefFunc, 10]);
+
+ assertThrows(() => builder.toModule(), WebAssembly.CompileError);
+})();
+
+(function testRefFuncIsCallable() {
+ print(arguments.callee.name);
+ const expected = 54;
+ const builder = new WasmModuleBuilder();
+ const function_index = builder.addFunction('hidden', kSig_i_v)
+ .addBody([kExprI32Const, expected])
+ .index;
+ builder.addFunction('main', kSig_a_v)
+ .addBody([kExprRefFunc, function_index])
+ .exportFunc();
+
+ const instance = builder.instantiate();
+ assertEquals(expected, instance.exports.main()());
+})();
+
+(function testRefFuncPreservesIdentity() {
+ print(arguments.callee.name);
+ const expected = 54;
+ const builder = new WasmModuleBuilder();
+ const foo = builder.addFunction('foo', kSig_i_v)
+ .addBody([kExprI32Const, expected])
+ .exportFunc();
+ builder.addFunction('main', kSig_a_v)
+ .addBody([kExprRefFunc, foo.index])
+ .exportFunc();
+
+ const instance = builder.instantiate();
+ assertSame(instance.exports.foo, instance.exports.main());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/anyref-globals.js b/deps/v8/test/mjsunit/wasm/anyref-globals.js
index c0d9de4e69..39d3bcb147 100644
--- a/deps/v8/test/mjsunit/wasm/anyref-globals.js
+++ b/deps/v8/test/mjsunit/wasm/anyref-globals.js
@@ -528,3 +528,89 @@ function dummy_func() {
assertEquals(obj2, instance2.exports.reexport2.value);
assertEquals(obj3, instance2.exports.reexport3.value);
})();
+
+(function TestImportImmutableAnyFuncGlobalAsAnyRef() {
+ print(arguments.callee.name);
+ let builder1 = new WasmModuleBuilder();
+ const g3 = builder1.addGlobal(kWasmAnyFunc, true).exportAs("e3");
+ builder1.addGlobal(kWasmAnyRef, false).exportAs("e1"); // Dummy.
+ builder1.addGlobal(kWasmAnyFunc, false).exportAs("e2"); // Dummy.
+ const instance1 = builder1.instantiate();
+
+ let builder2 = new WasmModuleBuilder();
+ const i1 = builder2.addImportedGlobal('exports', 'e1', kWasmAnyRef, false);
+ const i2 = builder2.addImportedGlobal('exports', 'e2', kWasmAnyRef, false);
+ builder2.instantiate(instance1);
+})();
+
+(function TestImportMutableAnyFuncGlobalAsAnyRefFails() {
+ print(arguments.callee.name);
+ let builder1 = new WasmModuleBuilder();
+ const g3 = builder1.addGlobal(kWasmAnyFunc, true).exportAs("e3");
+ builder1.addGlobal(kWasmAnyRef, true).exportAs("e1"); // Dummy.
+ builder1.addGlobal(kWasmAnyFunc, true).exportAs("e2"); // Dummy.
+ const instance1 = builder1.instantiate();
+
+ let builder2 = new WasmModuleBuilder();
+ const i1 = builder2.addImportedGlobal('exports', 'e1', kWasmAnyRef, true);
+ const i2 = builder2.addImportedGlobal('exports', 'e2', kWasmAnyRef, true);
+ assertThrows(() => builder2.instantiate(instance1), WebAssembly.LinkError);
+})();
+
+(function TestRefFuncGlobalInit() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const g_ref = builder.addGlobal(kWasmAnyRef, true);
+ const g_func = builder.addGlobal(kWasmAnyFunc, true);
+ const f_ref = builder.addFunction('get_anyref_global', kSig_r_v)
+ .addBody([kExprGetGlobal, g_ref.index])
+ .exportAs('get_anyref_global');
+ const f_func = builder.addFunction('get_anyfunc_global', kSig_a_v)
+ .addBody([kExprGetGlobal, g_func.index])
+ .exportAs('get_anyfunc_global');
+
+ g_ref.function_index = f_ref.index;
+ g_func.function_index = f_func.index;
+
+ const instance = builder.instantiate();
+ assertEquals(
+ instance.exports.get_anyref_global, instance.exports.get_anyref_global());
+ assertEquals(
+ instance.exports.get_anyfunc_global,
+ instance.exports.get_anyfunc_global());
+})();
+
+(function TestRefFuncGlobalInitWithImport() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const sig_index = builder.addType(kSig_i_v);
+ const import_wasm = builder.addImport('m', 'wasm', sig_index);
+ const import_js = builder.addImport('m', 'js', sig_index);
+ const g_wasm = builder.addGlobal(kWasmAnyFunc, true);
+ const g_js = builder.addGlobal(kWasmAnyFunc, true);
+ g_wasm.function_index = import_wasm;
+ g_js.function_index = import_js;
+ builder.addFunction('get_global_wasm', kSig_a_v)
+ .addBody([kExprGetGlobal, g_wasm.index])
+ .exportFunc();
+ builder.addFunction('get_global_js', kSig_a_v)
+ .addBody([kExprGetGlobal, g_js.index])
+ .exportFunc();
+
+ const expected_wasm = dummy_func();
+ const expected_val = 27;
+ // I want to test here that imported JS functions get wrapped by wasm-to-js
+ // and js-to-wasm wrappers. That's why {expected_js} does not return an
+ // integer directly but an object with a {valueOf} function.
+ function expected_js() {
+ const result = {};
+ result.valueOf = () => expected_val;
+ return result;
+ };
+
+ const instance =
+ builder.instantiate({m: {wasm: expected_wasm, js: expected_js}});
+
+ assertSame(expected_wasm, instance.exports.get_global_wasm());
+ assertSame(expected_val, instance.exports.get_global_js()());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/anyref-table.js b/deps/v8/test/mjsunit/wasm/anyref-table.js
index f9248199a2..f4e82d32c8 100644
--- a/deps/v8/test/mjsunit/wasm/anyref-table.js
+++ b/deps/v8/test/mjsunit/wasm/anyref-table.js
@@ -29,3 +29,19 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertThrows(() => table.set(12), RangeError);
})();
+
+(function TestImportAnyRefTable() {
+ print(arguments.callee.name);
+
+ const builder = new WasmModuleBuilder();
+ const table_index = builder.addImportedTable("imp", "table", 3, 10, kWasmAnyRef);
+ builder.addFunction('get', kSig_r_v)
+ .addBody([kExprI32Const, 0, kExprGetTable, table_index]);
+
+ let table_ref = new WebAssembly.Table({element: "anyref", initial: 3, maximum: 10});
+ builder.instantiate({imp:{table: table_ref}});
+
+ let table_func = new WebAssembly.Table({ element: "anyfunc", initial: 3, maximum: 10 });
+ assertThrows(() => builder.instantiate({ imp: { table: table_func } }),
+ WebAssembly.LinkError, /imported table does not match the expected type/);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/README b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/README
new file mode 100644
index 0000000000..8fd8d3498b
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/README
@@ -0,0 +1,19 @@
+This directory contains the bulk-memory proposal tests, converted to JS using
+the reference interpreter using the following shell command:
+
+```
+for f in *.wast; do wasm $f -o $f.js; done
+```
+
+Where `wasm` is the reference interpreter compiled from the bulk memory
+proposal (https://github.com/WebAssembly/bulk-memory-operations).
+
+This only includes the tests that are different than the spec repo. The
+testsuite repo (https://github.com/WebAssembly/testsuite) has a tool which
+calculates this, see
+https://github.com/WebAssembly/testsuite/tree/master/proposals/bulk-memory-operations
+
+The contents are copied from the following revisions:
+
+WebAssembly/testsuite: 2a2099d52103215962707fbe9f44cd51fd146636
+WebAssembly/bulk-memory-operations: 47b4ae718b42081a220ac7f405bed1391661a635
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast
new file mode 100644
index 0000000000..e88c72ca7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast
@@ -0,0 +1,1047 @@
+(module binary "\00asm\01\00\00\00")
+(module binary "\00asm" "\01\00\00\00")
+(module $M1 binary "\00asm\01\00\00\00")
+(module $M2 binary "\00asm" "\01\00\00\00")
+
+(assert_malformed (module binary "") "unexpected end")
+(assert_malformed (module binary "\01") "unexpected end")
+(assert_malformed (module binary "\00as") "unexpected end")
+(assert_malformed (module binary "asm\00") "magic header not detected")
+(assert_malformed (module binary "msa\00") "magic header not detected")
+(assert_malformed (module binary "msa\00\01\00\00\00") "magic header not detected")
+(assert_malformed (module binary "msa\00\00\00\00\01") "magic header not detected")
+(assert_malformed (module binary "asm\01\00\00\00\00") "magic header not detected")
+(assert_malformed (module binary "wasm\01\00\00\00") "magic header not detected")
+(assert_malformed (module binary "\7fasm\01\00\00\00") "magic header not detected")
+(assert_malformed (module binary "\80asm\01\00\00\00") "magic header not detected")
+(assert_malformed (module binary "\82asm\01\00\00\00") "magic header not detected")
+(assert_malformed (module binary "\ffasm\01\00\00\00") "magic header not detected")
+
+;; 8-byte endian-reversed.
+(assert_malformed (module binary "\00\00\00\01msa\00") "magic header not detected")
+
+;; Middle-endian byte orderings.
+(assert_malformed (module binary "a\00ms\00\01\00\00") "magic header not detected")
+(assert_malformed (module binary "sm\00a\00\00\01\00") "magic header not detected")
+
+;; Upper-cased.
+(assert_malformed (module binary "\00ASM\01\00\00\00") "magic header not detected")
+
+;; EBCDIC-encoded magic.
+(assert_malformed (module binary "\00\81\a2\94\01\00\00\00") "magic header not detected")
+
+;; Leading UTF-8 BOM.
+(assert_malformed (module binary "\ef\bb\bf\00asm\01\00\00\00") "magic header not detected")
+
+(assert_malformed (module binary "\00asm") "unexpected end")
+(assert_malformed (module binary "\00asm\01") "unexpected end")
+(assert_malformed (module binary "\00asm\01\00\00") "unexpected end")
+(assert_malformed (module binary "\00asm\00\00\00\00") "unknown binary version")
+(assert_malformed (module binary "\00asm\0d\00\00\00") "unknown binary version")
+(assert_malformed (module binary "\00asm\0e\00\00\00") "unknown binary version")
+(assert_malformed (module binary "\00asm\00\01\00\00") "unknown binary version")
+(assert_malformed (module binary "\00asm\00\00\01\00") "unknown binary version")
+(assert_malformed (module binary "\00asm\00\00\00\01") "unknown binary version")
+
+;; Unsigned LEB128 can have non-minimal length
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\05\04\01" ;; Memory section with 1 entry
+ "\00\82\00" ;; no max, minimum 2
+)
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\05\07\01" ;; Memory section with 1 entry
+ "\00\82\80\80\80\00" ;; no max, minimum 2
+)
+
+;; Signed LEB128 can have non-minimal length
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\06\07\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\80\00" ;; i32.const 0
+ "\0b" ;; end
+)
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\06\07\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\ff\7f" ;; i32.const -1
+ "\0b" ;; end
+)
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0a\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\80\80\80\80\00" ;; i32.const 0
+ "\0b" ;; end
+)
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0a\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\ff\ff\ff\ff\7f" ;; i32.const -1
+ "\0b" ;; end
+)
+
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\06\07\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\80\00" ;; i64.const 0 with unused bits set
+ "\0b" ;; end
+)
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\06\07\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\ff\7f" ;; i64.const -1 with unused bits unset
+ "\0b" ;; end
+)
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0f\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\80\80\80\80\80\80\80\80\80\00" ;; i64.const 0 with unused bits set
+ "\0b" ;; end
+)
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0f\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\ff\ff\ff\ff\ff\ff\ff\ff\ff\7f" ;; i64.const -1 with unused bits unset
+ "\0b" ;; end
+)
+
+;; Data segment memory index can have non-minimal length
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\05\03\01" ;; Memory section with 1 entry
+ "\00\00" ;; no max, minimum 0
+ "\0b\07\01" ;; Data section with 1 entry
+ "\80\00" ;; Memory index 0, encoded with 2 bytes
+ "\41\00\0b\00" ;; (i32.const 0) with contents ""
+)
+
+;; Element segment table index can have non-minimal length
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\04\04\01" ;; Table section with 1 entry
+ "\70\00\00" ;; no max, minimum 0, funcref
+ "\09\07\01" ;; Element section with 1 entry
+ "\80\00" ;; Table index 0, encoded with 2 bytes
+ "\41\00\0b\00" ;; (i32.const 0) with no elements
+)
+
+;; Unsigned LEB128 must not be overlong
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\05\08\01" ;; Memory section with 1 entry
+ "\00\82\80\80\80\80\00" ;; no max, minimum 2 with one byte too many
+ )
+ "integer representation too long"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\11\01" ;; Code section
+ ;; function 0
+ "\0f\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\28" ;; i32.load
+ "\02" ;; alignment 2
+ "\82\80\80\80\80\00" ;; offset 2 with one byte too many
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "integer representation too long"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\11\01" ;; Code section
+ ;; function 0
+ "\0f\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\28" ;; i32.load
+ "\82\80\80\80\80\00" ;; alignment 2 with one byte too many
+ "\00" ;; offset 0
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "integer representation too long"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\12\01" ;; Code section
+ ;; function 0
+ "\10\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\41\03" ;; i32.const 3
+ "\36" ;; i32.store
+ "\82\80\80\80\80\00" ;; alignment 2 with one byte too many
+ "\03" ;; offset 3
+ "\0b" ;; end
+ )
+ "integer representation too long"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\12\01" ;; Code section
+ ;; function 0
+ "\10\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\41\03" ;; i32.const 3
+ "\36" ;; i32.store
+ "\02" ;; alignment 2
+ "\82\80\80\80\80\00" ;; offset 2 with one byte too many
+ "\0b" ;; end
+ )
+ "integer representation too long"
+)
+
+;; Signed LEB128 must not be overlong
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0b\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\80\80\80\80\80\00" ;; i32.const 0 with one byte too many
+ "\0b" ;; end
+ )
+ "integer representation too long"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0b\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\ff\ff\ff\ff\ff\7f" ;; i32.const -1 with one byte too many
+ "\0b" ;; end
+ )
+ "integer representation too long"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\10\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\80\80\80\80\80\80\80\80\80\80\00" ;; i64.const 0 with one byte too many
+ "\0b" ;; end
+ )
+ "integer representation too long"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\10\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\ff\ff\ff\ff\ff\ff\ff\ff\ff\ff\7f" ;; i64.const -1 with one byte too many
+ "\0b" ;; end
+ )
+ "integer representation too long"
+)
+
+;; Unsigned LEB128s zero-extend
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\05\07\01" ;; Memory section with 1 entry
+ "\00\82\80\80\80\70" ;; no max, minimum 2 with unused bits set
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\05\07\01" ;; Memory section with 1 entry
+ "\00\82\80\80\80\40" ;; no max, minimum 2 with some unused bits set
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\10\01" ;; Code section
+ ;; function 0
+ "\0e\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\28" ;; i32.load
+ "\02" ;; alignment 2
+ "\82\80\80\80\10" ;; offset 2 with unused bits set
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\10\01" ;; Code section
+ ;; function 0
+ "\0e\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\28" ;; i32.load
+ "\02" ;; alignment 2
+ "\82\80\80\80\40" ;; offset 2 with some unused bits set
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\10\01" ;; Code section
+ "\0e\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\28" ;; i32.load
+ "\82\80\80\80\10" ;; alignment 2 with unused bits set
+ "\00" ;; offset 0
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\10\01" ;; Code section
+ ;; function 0
+ "\0e\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\28" ;; i32.load
+ "\82\80\80\80\40" ;; alignment 2 with some unused bits set
+ "\00" ;; offset 0
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\11\01" ;; Code section
+ ;; function 0
+ "\0f\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\41\03" ;; i32.const 3
+ "\36" ;; i32.store
+ "\82\80\80\80\10" ;; alignment 2 with unused bits set
+ "\03" ;; offset 3
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\11\01" ;; Code section
+ ;; function 0
+ "\0f\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\41\03" ;; i32.const 3
+ "\36" ;; i32.store
+ "\82\80\80\80\40" ;; alignment 2 with some unused bits set
+ "\03" ;; offset 3
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\11\01" ;; Code section
+ ;; function 0
+ "\0f\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\41\03" ;; i32.const 3
+ "\36" ;; i32.store
+ "\03" ;; alignment 2
+ "\82\80\80\80\10" ;; offset 2 with unused bits set
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\01" ;; Memory section
+ "\0a\11\01" ;; Code section
+
+ ;; function 0
+ "\0f\01\01" ;; local type count
+ "\7f" ;; i32
+ "\41\00" ;; i32.const 0
+ "\41\03" ;; i32.const 3
+ "\36" ;; i32.store
+ "\02" ;; alignment 2
+ "\82\80\80\80\40" ;; offset 2 with some unused bits set
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+
+;; Signed LEB128s sign-extend
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0a\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\80\80\80\80\70" ;; i32.const 0 with unused bits set
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0a\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\ff\ff\ff\ff\0f" ;; i32.const -1 with unused bits unset
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0a\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\80\80\80\80\1f" ;; i32.const 0 with some unused bits set
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0a\01" ;; Global section with 1 entry
+ "\7f\00" ;; i32, immutable
+ "\41\ff\ff\ff\ff\4f" ;; i32.const -1 with some unused bits unset
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0f\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\80\80\80\80\80\80\80\80\80\7e" ;; i64.const 0 with unused bits set
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0f\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\ff\ff\ff\ff\ff\ff\ff\ff\ff\01" ;; i64.const -1 with unused bits unset
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0f\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\80\80\80\80\80\80\80\80\80\02" ;; i64.const 0 with some unused bits set
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\06\0f\01" ;; Global section with 1 entry
+ "\7e\00" ;; i64, immutable
+ "\42\ff\ff\ff\ff\ff\ff\ff\ff\ff\41" ;; i64.const -1 with some unused bits unset
+ "\0b" ;; end
+ )
+ "integer too large"
+)
+
+;; call_indirect reserved byte equal to zero.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\04\04\01\70\00\00" ;; Table section
+ "\0a\09\01" ;; Code section
+
+ ;; function 0
+ "\07\00"
+ "\41\00" ;; i32.const 0
+ "\11\00" ;; call_indirect (type 0)
+ "\01" ;; call_indirect reserved byte is not equal to zero!
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+;; call_indirect reserved byte should not be a "long" LEB128 zero.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\04\04\01\70\00\00" ;; Table section
+ "\0a\0a\01" ;; Code section
+
+ ;; function 0
+ "\07\00"
+ "\41\00" ;; i32.const 0
+ "\11\00" ;; call_indirect (type 0)
+ "\80\00" ;; call_indirect reserved byte
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+;; Same as above for 3, 4, and 5-byte zero encodings.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\04\04\01\70\00\00" ;; Table section
+ "\0a\0b\01" ;; Code section
+
+ ;; function 0
+ "\08\00"
+ "\41\00" ;; i32.const 0
+ "\11\00" ;; call_indirect (type 0)
+ "\80\80\00" ;; call_indirect reserved byte
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\04\04\01\70\00\00" ;; Table section
+ "\0a\0c\01" ;; Code section
+
+ ;; function 0
+ "\09\00"
+ "\41\00" ;; i32.const 0
+ "\11\00" ;; call_indirect (type 0)
+ "\80\80\80\00" ;; call_indirect reserved byte
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\04\04\01\70\00\00" ;; Table section
+ "\0a\0d\01" ;; Code section
+
+ ;; function 0
+ "\0a\00"
+ "\41\00" ;; i32.const 0
+ "\11\00" ;; call_indirect (type 0)
+ "\80\80\80\80\00" ;; call_indirect reserved byte
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+;; memory.grow reserved byte equal to zero.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\09\01" ;; Code section
+
+ ;; function 0
+ "\07\00"
+ "\41\00" ;; i32.const 0
+ "\40" ;; memory.grow
+ "\01" ;; memory.grow reserved byte is not equal to zero!
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+;; memory.grow reserved byte should not be a "long" LEB128 zero.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\0a\01" ;; Code section
+
+ ;; function 0
+ "\08\00"
+ "\41\00" ;; i32.const 0
+ "\40" ;; memory.grow
+ "\80\00" ;; memory.grow reserved byte
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+;; Same as above for 3, 4, and 5-byte zero encodings.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\0b\01" ;; Code section
+
+ ;; function 0
+ "\09\00"
+ "\41\00" ;; i32.const 0
+ "\40" ;; memory.grow
+ "\80\80\00" ;; memory.grow reserved byte
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\0c\01" ;; Code section
+
+ ;; function 0
+ "\0a\00"
+ "\41\00" ;; i32.const 0
+ "\40" ;; memory.grow
+ "\80\80\80\00" ;; memory.grow reserved byte
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\0d\01" ;; Code section
+
+ ;; function 0
+ "\0b\00"
+ "\41\00" ;; i32.const 0
+ "\40" ;; memory.grow
+ "\80\80\80\80\00" ;; memory.grow reserved byte
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+;; memory.size reserved byte equal to zero.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\07\01" ;; Code section
+
+ ;; function 0
+ "\05\00"
+ "\3f" ;; memory.size
+ "\01" ;; memory.size reserved byte is not equal to zero!
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+;; memory.size reserved byte should not be a "long" LEB128 zero.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\08\01" ;; Code section
+
+ ;; function 0
+ "\06\00"
+ "\3f" ;; memory.size
+ "\80\00" ;; memory.size reserved byte
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+;; Same as above for 3, 4, and 5-byte zero encodings.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\09\01" ;; Code section
+
+ ;; function 0
+ "\07\00"
+ "\3f" ;; memory.size
+ "\80\80\00" ;; memory.size reserved byte
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\0a\01" ;; Code section
+
+ ;; function 0
+ "\08\00"
+ "\3f" ;; memory.size
+ "\80\80\80\00" ;; memory.size reserved byte
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\0b\01" ;; Code section
+
+ ;; function 0
+ "\09\00"
+ "\3f" ;; memory.size
+ "\80\80\80\80\00" ;; memory.size reserved byte
+ "\1a" ;; drop
+ "\0b" ;; end
+ )
+ "zero flag expected"
+)
+
+;; No more than 2^32 locals.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\0a\0c\01" ;; Code section
+
+ ;; function 0
+ "\0a\02"
+ "\ff\ff\ff\ff\0f\7f" ;; 0xFFFFFFFF i32
+ "\02\7e" ;; 0x00000002 i64
+ "\0b" ;; end
+ )
+ "too many locals"
+)
+
+;; Local count can be 0.
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\0a\0a\01" ;; Code section
+
+ ;; function 0
+ "\08\03"
+ "\00\7f" ;; 0 i32
+ "\00\7e" ;; 0 i64
+ "\02\7d" ;; 2 f32
+ "\0b" ;; end
+)
+
+;; Function section has non-zero count, but code section is absent.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\03\02\00\00" ;; Function section with 2 functions
+ )
+ "function and code section have inconsistent lengths"
+)
+
+;; Code section has non-zero count, but function section is absent.
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\0a\04\01\02\00\0b" ;; Code section with 1 empty function
+ )
+ "function and code section have inconsistent lengths"
+)
+
+;; Function section count > code section count
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\03\02\00\00" ;; Function section with 2 functions
+ "\0a\04\01\02\00\0b" ;; Code section with 1 empty function
+ )
+ "function and code section have inconsistent lengths"
+)
+
+;; Function section count < code section count
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section with 1 function
+ "\0a\07\02\02\00\0b\02\00\0b" ;; Code section with 2 empty functions
+ )
+ "function and code section have inconsistent lengths"
+)
+
+;; Function section has zero count, and code section is absent.
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\03\01\00" ;; Function section with 0 functions
+)
+
+;; Code section has zero count, and function section is absent.
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\0a\01\00" ;; Code section with 0 functions
+)
+
+;; Fewer passive segments than datacount
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\0c\01\03" ;; Datacount section with value "3"
+ "\0b\05\02" ;; Data section with two entries
+ "\01\00" ;; Passive data section
+ "\01\00") ;; Passive data section
+ "data count and data section have inconsistent lengths")
+
+;; More passive segments than datacount
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\0c\01\01" ;; Datacount section with value "1"
+ "\0b\05\02" ;; Data section with two entries
+ "\01\00" ;; Passive data section
+ "\01\00") ;; Passive data section
+ "data count and data section have inconsistent lengths")
+
+;; memory.init requires a datacount section
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\0e\01" ;; Code section
+
+ ;; function 0
+ "\0c\00"
+ "\41\00" ;; zero args
+ "\41\00"
+ "\41\00"
+ "\fc\08\00\00" ;; memory.init
+ "\0b"
+
+ "\0b\03\01\01\00" ;; Data section
+ ) ;; end
+ "data count section required")
+
+;; data.drop requires a datacount section
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+
+ "\01\04\01\60\00\00" ;; Type section
+ "\03\02\01\00" ;; Function section
+ "\05\03\01\00\00" ;; Memory section
+ "\0a\07\01" ;; Code section
+
+ ;; function 0
+ "\05\00"
+ "\fc\09\00" ;; data.drop
+ "\0b"
+
+ "\0b\03\01\01\00" ;; Data section
+ ) ;; end
+ "data count section required")
+
+;; passive element segment containing opcode other than ref.func or ref.null
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+
+ "\01\04\01\60\00\00" ;; Type section
+
+ "\03\02\01\00" ;; Function section
+
+ "\04\04\01" ;; Table section with 1 entry
+ "\70\00\00" ;; no max, minimum 0, funcref
+
+ "\05\03\01\00\00" ;; Memory section
+
+ "\09\07\01" ;; Element section with one segment
+ "\01\70" ;; Passive, funcref
+ "\01" ;; 1 element
+ "\d3\00\0b" ;; bad opcode, index 0, end
+
+ "\0a\04\01" ;; Code section
+
+ ;; function 0
+ "\02\00"
+ "\0b") ;; end
+ "invalid elem")
+
+;; passive element segment containing type other than funcref
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+
+ "\01\04\01\60\00\00" ;; Type section
+
+ "\03\02\01\00" ;; Function section
+
+ "\04\04\01" ;; Table section with 1 entry
+ "\70\00\00" ;; no max, minimum 0, funcref
+
+ "\05\03\01\00\00" ;; Memory section
+
+ "\09\07\01" ;; Element section with one segment
+ "\01\7f" ;; Passive, i32
+ "\01" ;; 1 element
+ "\d2\00\0b" ;; ref.func, index 0, end
+
+ "\0a\04\01" ;; Code section
+
+ ;; function 0
+ "\02\00"
+ "\0b") ;; end
+ "invalid element type")
+
+;; passive element segment containing opcode ref.func
+(module binary
+ "\00asm" "\01\00\00\00"
+
+ "\01\04\01\60\00\00" ;; Type section
+
+ "\03\02\01\00" ;; Function section
+
+ "\04\04\01" ;; Table section with 1 entry
+ "\70\00\00" ;; no max, minimum 0, funcref
+
+ "\05\03\01\00\00" ;; Memory section
+
+ "\09\07\01" ;; Element section with one segment
+ "\01\70" ;; Passive, funcref
+ "\01" ;; 1 element
+ "\d2\00\0b" ;; ref.func, index 0, end
+
+ "\0a\04\01" ;; Code section
+
+ ;; function 0
+ "\02\00"
+ "\0b") ;; end
+
+;; passive element segment containing opcode ref.null
+(module binary
+ "\00asm" "\01\00\00\00"
+
+ "\01\04\01\60\00\00" ;; Type section
+
+ "\03\02\01\00" ;; Function section
+
+ "\04\04\01" ;; Table section with 1 entry
+ "\70\00\00" ;; no max, minimum 0, funcref
+
+ "\05\03\01\00\00" ;; Memory section
+
+ "\09\06\01" ;; Element section with one segment
+ "\01\70" ;; Passive, funcref
+ "\01" ;; 1 element
+ "\d0\0b" ;; ref.null, end
+
+ "\0a\04\01" ;; Code section
+
+ ;; function 0
+ "\02\00"
+ "\0b") ;; end
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast.js
new file mode 100644
index 0000000000..134a2a339b
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/binary.wast.js
@@ -0,0 +1,445 @@
+
+'use strict';
+
+let spectest = {
+ print: console.log.bind(console),
+ print_i32: console.log.bind(console),
+ print_i32_f32: console.log.bind(console),
+ print_f64_f64: console.log.bind(console),
+ print_f32: console.log.bind(console),
+ print_f64: console.log.bind(console),
+ global_i32: 666,
+ global_f32: 666,
+ global_f64: 666,
+ table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
+ memory: new WebAssembly.Memory({initial: 1, maximum: 2})
+};
+let handler = {
+ get(target, prop) {
+ return (prop in target) ? target[prop] : {};
+ }
+};
+let registry = new Proxy({spectest}, handler);
+
+function register(name, instance) {
+ registry[name] = instance.exports;
+}
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function get(instance, name) {
+ let v = instance.exports[name];
+ return (v instanceof WebAssembly.Global) ? v.value : v;
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function run(action) {
+ action();
+}
+
+function assert_malformed(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm decoding failure expected");
+}
+
+function assert_invalid(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm validation failure expected");
+}
+
+function assert_unlinkable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.LinkError) return;
+ }
+ throw new Error("Wasm linking failure expected");
+}
+
+function assert_uninstantiable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+function assert_trap(action) {
+ try { action() } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+let StackOverflow;
+try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
+
+function assert_exhaustion(action) {
+ try { action() } catch (e) {
+ if (e instanceof StackOverflow) return;
+ }
+ throw new Error("Wasm resource exhaustion expected");
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+function assert_return_canonical_nan(action) {
+ let actual = action();
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test that it's a canonical NaN.
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+function assert_return_arithmetic_nan(action) {
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test for specific bitpatterns here.
+ let actual = action();
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+// binary.wast:1
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00");
+
+// binary.wast:2
+let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00");
+
+// binary.wast:3
+let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00");
+let $M1 = $3;
+
+// binary.wast:4
+let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00");
+let $M2 = $4;
+
+// binary.wast:6
+assert_malformed("");
+
+// binary.wast:7
+assert_malformed("\x01");
+
+// binary.wast:8
+assert_malformed("\x00\x61\x73");
+
+// binary.wast:9
+assert_malformed("\x61\x73\x6d\x00");
+
+// binary.wast:10
+assert_malformed("\x6d\x73\x61\x00");
+
+// binary.wast:11
+assert_malformed("\x6d\x73\x61\x00\x01\x00\x00\x00");
+
+// binary.wast:12
+assert_malformed("\x6d\x73\x61\x00\x00\x00\x00\x01");
+
+// binary.wast:13
+assert_malformed("\x61\x73\x6d\x01\x00\x00\x00\x00");
+
+// binary.wast:14
+assert_malformed("\x77\x61\x73\x6d\x01\x00\x00\x00");
+
+// binary.wast:15
+assert_malformed("\x7f\x61\x73\x6d\x01\x00\x00\x00");
+
+// binary.wast:16
+assert_malformed("\x80\x61\x73\x6d\x01\x00\x00\x00");
+
+// binary.wast:17
+assert_malformed("\x82\x61\x73\x6d\x01\x00\x00\x00");
+
+// binary.wast:18
+assert_malformed("\xff\x61\x73\x6d\x01\x00\x00\x00");
+
+// binary.wast:21
+assert_malformed("\x00\x00\x00\x01\x6d\x73\x61\x00");
+
+// binary.wast:24
+assert_malformed("\x61\x00\x6d\x73\x00\x01\x00\x00");
+
+// binary.wast:25
+assert_malformed("\x73\x6d\x00\x61\x00\x00\x01\x00");
+
+// binary.wast:28
+assert_malformed("\x00\x41\x53\x4d\x01\x00\x00\x00");
+
+// binary.wast:31
+assert_malformed("\x00\x81\xa2\x94\x01\x00\x00\x00");
+
+// binary.wast:34
+assert_malformed("\xef\xbb\xbf\x00\x61\x73\x6d\x01\x00\x00\x00");
+
+// binary.wast:36
+assert_malformed("\x00\x61\x73\x6d");
+
+// binary.wast:37
+assert_malformed("\x00\x61\x73\x6d\x01");
+
+// binary.wast:38
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00");
+
+// binary.wast:39
+assert_malformed("\x00\x61\x73\x6d\x00\x00\x00\x00");
+
+// binary.wast:40
+assert_malformed("\x00\x61\x73\x6d\x0d\x00\x00\x00");
+
+// binary.wast:41
+assert_malformed("\x00\x61\x73\x6d\x0e\x00\x00\x00");
+
+// binary.wast:42
+assert_malformed("\x00\x61\x73\x6d\x00\x01\x00\x00");
+
+// binary.wast:43
+assert_malformed("\x00\x61\x73\x6d\x00\x00\x01\x00");
+
+// binary.wast:44
+assert_malformed("\x00\x61\x73\x6d\x00\x00\x00\x01");
+
+// binary.wast:47
+let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x04\x01\x00\x82\x00");
+
+// binary.wast:52
+let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x07\x01\x00\x82\x80\x80\x80\x00");
+
+// binary.wast:59
+let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x07\x01\x7f\x00\x41\x80\x00\x0b");
+
+// binary.wast:66
+let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x07\x01\x7f\x00\x41\xff\x7f\x0b");
+
+// binary.wast:73
+let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\x80\x80\x80\x80\x00\x0b");
+
+// binary.wast:80
+let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\xff\xff\xff\xff\x7f\x0b");
+
+// binary.wast:88
+let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x07\x01\x7e\x00\x42\x80\x00\x0b");
+
+// binary.wast:95
+let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x07\x01\x7e\x00\x42\xff\x7f\x0b");
+
+// binary.wast:102
+let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\x80\x80\x80\x80\x80\x80\x80\x80\x80\x00\x0b");
+
+// binary.wast:109
+let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\xff\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x0b");
+
+// binary.wast:118
+let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x03\x01\x00\x00\x0b\x07\x01\x80\x00\x41\x00\x0b\x00");
+
+// binary.wast:128
+let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x04\x04\x01\x70\x00\x00\x09\x07\x01\x80\x00\x41\x00\x0b\x00");
+
+// binary.wast:138
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x08\x01\x00\x82\x80\x80\x80\x80\x00");
+
+// binary.wast:146
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x28\x02\x82\x80\x80\x80\x80\x00\x1a\x0b");
+
+// binary.wast:165
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x28\x82\x80\x80\x80\x80\x00\x00\x1a\x0b");
+
+// binary.wast:184
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x12\x01\x10\x01\x01\x7f\x41\x00\x41\x03\x36\x82\x80\x80\x80\x80\x00\x03\x0b");
+
+// binary.wast:203
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x12\x01\x10\x01\x01\x7f\x41\x00\x41\x03\x36\x02\x82\x80\x80\x80\x80\x00\x0b");
+
+// binary.wast:224
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0b\x01\x7f\x00\x41\x80\x80\x80\x80\x80\x00\x0b");
+
+// binary.wast:234
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0b\x01\x7f\x00\x41\xff\xff\xff\xff\xff\x7f\x0b");
+
+// binary.wast:245
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x10\x01\x7e\x00\x42\x80\x80\x80\x80\x80\x80\x80\x80\x80\x80\x00\x0b");
+
+// binary.wast:255
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x10\x01\x7e\x00\x42\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x0b");
+
+// binary.wast:267
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x07\x01\x00\x82\x80\x80\x80\x70");
+
+// binary.wast:275
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x07\x01\x00\x82\x80\x80\x80\x40");
+
+// binary.wast:283
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x10\x01\x0e\x01\x01\x7f\x41\x00\x28\x02\x82\x80\x80\x80\x10\x1a\x0b");
+
+// binary.wast:302
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x10\x01\x0e\x01\x01\x7f\x41\x00\x28\x02\x82\x80\x80\x80\x40\x1a\x0b");
+
+// binary.wast:321
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x10\x01\x0e\x01\x01\x7f\x41\x00\x28\x82\x80\x80\x80\x10\x00\x1a\x0b");
+
+// binary.wast:339
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x10\x01\x0e\x01\x01\x7f\x41\x00\x28\x82\x80\x80\x80\x40\x00\x1a\x0b");
+
+// binary.wast:358
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x41\x03\x36\x82\x80\x80\x80\x10\x03\x0b");
+
+// binary.wast:377
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x41\x03\x36\x82\x80\x80\x80\x40\x03\x0b");
+
+// binary.wast:396
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x41\x03\x36\x03\x82\x80\x80\x80\x10\x0b");
+
+// binary.wast:415
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x01\x0a\x11\x01\x0f\x01\x01\x7f\x41\x00\x41\x03\x36\x02\x82\x80\x80\x80\x40\x0b");
+
+// binary.wast:437
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\x80\x80\x80\x80\x70\x0b");
+
+// binary.wast:447
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\xff\xff\xff\xff\x0f\x0b");
+
+// binary.wast:457
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\x80\x80\x80\x80\x1f\x0b");
+
+// binary.wast:467
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0a\x01\x7f\x00\x41\xff\xff\xff\xff\x4f\x0b");
+
+// binary.wast:478
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7e\x0b");
+
+// binary.wast:488
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x0b");
+
+// binary.wast:498
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\x80\x80\x80\x80\x80\x80\x80\x80\x80\x02\x0b");
+
+// binary.wast:508
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x0f\x01\x7e\x00\x42\xff\xff\xff\xff\xff\xff\xff\xff\xff\x41\x0b");
+
+// binary.wast:520
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x09\x01\x07\x00\x41\x00\x11\x00\x01\x0b");
+
+// binary.wast:539
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x0a\x01\x07\x00\x41\x00\x11\x00\x80\x00\x0b");
+
+// binary.wast:558
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x0b\x01\x08\x00\x41\x00\x11\x00\x80\x80\x00\x0b");
+
+// binary.wast:576
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x0c\x01\x09\x00\x41\x00\x11\x00\x80\x80\x80\x00\x0b");
+
+// binary.wast:594
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x0a\x0d\x01\x0a\x00\x41\x00\x11\x00\x80\x80\x80\x80\x00\x0b");
+
+// binary.wast:613
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x09\x01\x07\x00\x41\x00\x40\x01\x1a\x0b");
+
+// binary.wast:633
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0a\x01\x08\x00\x41\x00\x40\x80\x00\x1a\x0b");
+
+// binary.wast:653
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0b\x01\x09\x00\x41\x00\x40\x80\x80\x00\x1a\x0b");
+
+// binary.wast:672
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0c\x01\x0a\x00\x41\x00\x40\x80\x80\x80\x00\x1a\x0b");
+
+// binary.wast:691
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0d\x01\x0b\x00\x41\x00\x40\x80\x80\x80\x80\x00\x1a\x0b");
+
+// binary.wast:711
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x07\x01\x05\x00\x3f\x01\x1a\x0b");
+
+// binary.wast:730
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x08\x01\x06\x00\x3f\x80\x00\x1a\x0b");
+
+// binary.wast:749
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x09\x01\x07\x00\x3f\x80\x80\x00\x1a\x0b");
+
+// binary.wast:767
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0a\x01\x08\x00\x3f\x80\x80\x80\x00\x1a\x0b");
+
+// binary.wast:785
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0b\x01\x09\x00\x3f\x80\x80\x80\x80\x00\x1a\x0b");
+
+// binary.wast:804
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x0a\x0c\x01\x0a\x02\xff\xff\xff\xff\x0f\x7f\x02\x7e\x0b");
+
+// binary.wast:821
+let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x0a\x0a\x01\x08\x03\x00\x7f\x00\x7e\x02\x7d\x0b");
+
+// binary.wast:836
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x03\x02\x00\x00");
+
+// binary.wast:846
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x0a\x04\x01\x02\x00\x0b");
+
+// binary.wast:855
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x03\x02\x00\x00\x0a\x04\x01\x02\x00\x0b");
+
+// binary.wast:866
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x0a\x07\x02\x02\x00\x0b\x02\x00\x0b");
+
+// binary.wast:877
+let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x03\x01\x00");
+
+// binary.wast:883
+let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x0a\x01\x00");
+
+// binary.wast:889
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x0c\x01\x03\x0b\x05\x02\x01\x00\x01\x00");
+
+// binary.wast:899
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x0c\x01\x01\x0b\x05\x02\x01\x00\x01\x00");
+
+// binary.wast:909
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x0e\x01\x0c\x00\x41\x00\x41\x00\x41\x00\xfc\x08\x00\x00\x0b\x0b\x03\x01\x01\x00");
+
+// binary.wast:931
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x05\x03\x01\x00\x00\x0a\x07\x01\x05\x00\xfc\x09\x00\x0b\x0b\x03\x01\x01\x00");
+
+// binary.wast:950
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x05\x03\x01\x00\x00\x09\x07\x01\x01\x70\x01\xd3\x00\x0b\x0a\x04\x01\x02\x00\x0b");
+
+// binary.wast:976
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x05\x03\x01\x00\x00\x09\x07\x01\x01\x7f\x01\xd2\x00\x0b\x0a\x04\x01\x02\x00\x0b");
+
+// binary.wast:1002
+let $20 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x05\x03\x01\x00\x00\x09\x07\x01\x01\x70\x01\xd2\x00\x0b\x0a\x04\x01\x02\x00\x0b");
+
+// binary.wast:1026
+let $21 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x04\x01\x60\x00\x00\x03\x02\x01\x00\x04\x04\x01\x70\x00\x00\x05\x03\x01\x00\x00\x09\x06\x01\x01\x70\x01\xd0\x0b\x0a\x04\x01\x02\x00\x0b");
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast
new file mode 100644
index 0000000000..bb71f493d6
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast
@@ -0,0 +1,308 @@
+;; Passive segment syntax
+(module
+ (memory 1)
+ (data passive "foo"))
+
+(module
+ (table 3 funcref)
+ (elem passive funcref (ref.func 0) (ref.null) (ref.func 1))
+ (func)
+ (func))
+
+;; memory.fill
+(module
+ (memory 1)
+
+ (func (export "fill") (param i32 i32 i32)
+ (memory.fill
+ (local.get 0)
+ (local.get 1)
+ (local.get 2)))
+
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0)))
+)
+
+;; Basic fill test.
+(invoke "fill" (i32.const 1) (i32.const 0xff) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0xff))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 0xff))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 0xff))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 0))
+
+;; Fill value is stored as a byte.
+(invoke "fill" (i32.const 0) (i32.const 0xbbaa) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0xaa))
+
+;; Fill all of memory
+(invoke "fill" (i32.const 0) (i32.const 0) (i32.const 0x10000))
+
+;; Out-of-bounds writes trap, but all previous writes succeed.
+(assert_trap (invoke "fill" (i32.const 0xff00) (i32.const 1) (i32.const 0x101))
+ "out of bounds memory access")
+(assert_return (invoke "load8_u" (i32.const 0xff00)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 0xffff)) (i32.const 1))
+
+;; Succeed when writing 0 bytes at the end of the region.
+(invoke "fill" (i32.const 0x10000) (i32.const 0) (i32.const 0))
+
+;; Fail on out-of-bounds when writing 0 bytes outside of memory.
+(assert_trap (invoke "fill" (i32.const 0x10001) (i32.const 0) (i32.const 0))
+ "out of bounds memory access")
+
+
+;; memory.copy
+(module
+ (memory (data "\aa\bb\cc\dd"))
+
+ (func (export "copy") (param i32 i32 i32)
+ (memory.copy
+ (local.get 0)
+ (local.get 1)
+ (local.get 2)))
+
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0)))
+)
+
+;; Non-overlapping copy.
+(invoke "copy" (i32.const 10) (i32.const 0) (i32.const 4))
+
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xbb))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xdd))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 0))
+
+;; Overlap, source > dest
+(invoke "copy" (i32.const 8) (i32.const 10) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0xbb))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xdd))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xdd))
+
+;; Overlap, source < dest
+(invoke "copy" (i32.const 10) (i32.const 7) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0xbb))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 0xdd))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 0))
+
+;; Copy ending at memory limit is ok.
+(invoke "copy" (i32.const 0xff00) (i32.const 0) (i32.const 0x100))
+(invoke "copy" (i32.const 0xfe00) (i32.const 0xff00) (i32.const 0x100))
+
+;; Out-of-bounds writes trap, but all previous writes succeed.
+(assert_trap (invoke "copy" (i32.const 0xfffe) (i32.const 0) (i32.const 3))
+ "out of bounds memory access")
+(assert_return (invoke "load8_u" (i32.const 0xfffe)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 0xffff)) (i32.const 0xbb))
+
+;; Succeed when copying 0 bytes at the end of the region.
+(invoke "copy" (i32.const 0x10000) (i32.const 0) (i32.const 0))
+(invoke "copy" (i32.const 0) (i32.const 0x10000) (i32.const 0))
+
+;; Fail on out-of-bounds when copying 0 bytes outside of memory.
+(assert_trap (invoke "copy" (i32.const 0x10001) (i32.const 0) (i32.const 0))
+ "out of bounds memory access")
+(assert_trap (invoke "copy" (i32.const 0) (i32.const 0x10001) (i32.const 0))
+ "out of bounds memory access")
+
+
+;; memory.init
+(module
+ (memory 1)
+ (data passive "\aa\bb\cc\dd")
+
+ (func (export "init") (param i32 i32 i32)
+ (memory.init 0
+ (local.get 0)
+ (local.get 1)
+ (local.get 2)))
+
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0)))
+)
+
+(invoke "init" (i32.const 0) (i32.const 1) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0xbb))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0xcc))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 0))
+
+;; Init ending at memory limit and segment limit is ok.
+(invoke "init" (i32.const 0xfffc) (i32.const 0) (i32.const 4))
+
+;; Out-of-bounds writes trap, but all previous writes succeed.
+(assert_trap (invoke "init" (i32.const 0xfffe) (i32.const 0) (i32.const 3))
+ "out of bounds memory access")
+(assert_return (invoke "load8_u" (i32.const 0xfffe)) (i32.const 0xaa))
+(assert_return (invoke "load8_u" (i32.const 0xffff)) (i32.const 0xbb))
+
+;; Succeed when writing 0 bytes at the end of either region.
+(invoke "init" (i32.const 0x10000) (i32.const 0) (i32.const 0))
+(invoke "init" (i32.const 0) (i32.const 4) (i32.const 0))
+
+;; Fail on out-of-bounds when writing 0 bytes outside of memory or segment.
+(assert_trap (invoke "init" (i32.const 0x10001) (i32.const 0) (i32.const 0))
+ "out of bounds memory access")
+(assert_trap (invoke "init" (i32.const 0) (i32.const 5) (i32.const 0))
+ "out of bounds memory access")
+
+;; data.drop
+(module
+ (memory 1)
+ (data $p passive "")
+ (data $a 0 (i32.const 0) "")
+
+ (func (export "drop_passive") (data.drop $p))
+ (func (export "init_passive")
+ (memory.init $p (i32.const 0) (i32.const 0) (i32.const 0)))
+
+ (func (export "drop_active") (data.drop $a))
+ (func (export "init_active")
+ (memory.init $a (i32.const 0) (i32.const 0) (i32.const 0)))
+)
+
+(invoke "init_passive")
+(invoke "drop_passive")
+(assert_trap (invoke "drop_passive") "data segment dropped")
+(assert_trap (invoke "init_passive") "data segment dropped")
+(assert_trap (invoke "drop_active") "data segment dropped")
+(assert_trap (invoke "init_active") "data segment dropped")
+
+
+;; table.init
+(module
+ (table 3 funcref)
+ (elem passive funcref
+ (ref.func $zero) (ref.func $one) (ref.func $zero) (ref.func $one))
+
+ (func $zero (result i32) (i32.const 0))
+ (func $one (result i32) (i32.const 1))
+
+ (func (export "init") (param i32 i32 i32)
+ (table.init 0
+ (local.get 0)
+ (local.get 1)
+ (local.get 2)))
+
+ (func (export "call") (param i32) (result i32)
+ (call_indirect (result i32)
+ (local.get 0)))
+)
+
+(invoke "init" (i32.const 0) (i32.const 1) (i32.const 2))
+(assert_return (invoke "call" (i32.const 0)) (i32.const 1))
+(assert_return (invoke "call" (i32.const 1)) (i32.const 0))
+(assert_trap (invoke "call" (i32.const 2)) "uninitialized element")
+
+;; Init ending at table limit and segment limit is ok.
+(invoke "init" (i32.const 1) (i32.const 2) (i32.const 2))
+
+;; Out-of-bounds stores trap, but all previous stores succeed.
+(assert_trap (invoke "init" (i32.const 2) (i32.const 0) (i32.const 2))
+ "out of bounds table access")
+(assert_return (invoke "call" (i32.const 2)) (i32.const 0))
+
+;; Succeed when storing 0 elements at the end of either region.
+(invoke "init" (i32.const 3) (i32.const 0) (i32.const 0))
+(invoke "init" (i32.const 0) (i32.const 4) (i32.const 0))
+
+;; Fail on out-of-bounds when storing 0 elements outside of table or segment.
+(assert_trap (invoke "init" (i32.const 4) (i32.const 0) (i32.const 0))
+ "out of bounds table access")
+(assert_trap (invoke "init" (i32.const 0) (i32.const 5) (i32.const 0))
+ "out of bounds table access")
+
+
+;; elem.drop
+(module
+ (table 1 funcref)
+ (func $f)
+ (elem $p passive funcref (ref.func $f))
+ (elem $a 0 (i32.const 0) $f)
+
+ (func (export "drop_passive") (elem.drop $p))
+ (func (export "init_passive")
+ (table.init $p (i32.const 0) (i32.const 0) (i32.const 0)))
+
+ (func (export "drop_active") (elem.drop $a))
+ (func (export "init_active")
+ (table.init $a (i32.const 0) (i32.const 0) (i32.const 0)))
+)
+
+(invoke "init_passive")
+(invoke "drop_passive")
+(assert_trap (invoke "drop_passive") "element segment dropped")
+(assert_trap (invoke "init_passive") "element segment dropped")
+(assert_trap (invoke "drop_active") "element segment dropped")
+(assert_trap (invoke "init_active") "element segment dropped")
+
+
+;; table.copy
+(module
+ (table 10 funcref)
+ (elem (i32.const 0) $zero $one $two)
+ (func $zero (result i32) (i32.const 0))
+ (func $one (result i32) (i32.const 1))
+ (func $two (result i32) (i32.const 2))
+
+ (func (export "copy") (param i32 i32 i32)
+ (table.copy
+ (local.get 0)
+ (local.get 1)
+ (local.get 2)))
+
+ (func (export "call") (param i32) (result i32)
+ (call_indirect (result i32)
+ (local.get 0)))
+)
+
+;; Non-overlapping copy.
+(invoke "copy" (i32.const 3) (i32.const 0) (i32.const 3))
+;; Now [$zero, $one, $two, $zero, $one, $two, ...]
+(assert_return (invoke "call" (i32.const 3)) (i32.const 0))
+(assert_return (invoke "call" (i32.const 4)) (i32.const 1))
+(assert_return (invoke "call" (i32.const 5)) (i32.const 2))
+
+;; Overlap, source > dest
+(invoke "copy" (i32.const 0) (i32.const 1) (i32.const 3))
+;; Now [$one, $two, $zero, $zero, $one, $two, ...]
+(assert_return (invoke "call" (i32.const 0)) (i32.const 1))
+(assert_return (invoke "call" (i32.const 1)) (i32.const 2))
+(assert_return (invoke "call" (i32.const 2)) (i32.const 0))
+
+;; Overlap, source < dest
+(invoke "copy" (i32.const 2) (i32.const 0) (i32.const 3))
+;; Now [$one, $two, $one, $two, $zero, $two, ...]
+(assert_return (invoke "call" (i32.const 2)) (i32.const 1))
+(assert_return (invoke "call" (i32.const 3)) (i32.const 2))
+(assert_return (invoke "call" (i32.const 4)) (i32.const 0))
+
+;; Copy ending at table limit is ok.
+(invoke "copy" (i32.const 6) (i32.const 8) (i32.const 2))
+(invoke "copy" (i32.const 8) (i32.const 6) (i32.const 2))
+
+;; Out-of-bounds writes trap, but all previous writes succeed.
+(assert_trap (invoke "call" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "copy" (i32.const 9) (i32.const 0) (i32.const 2))
+ "out of bounds table access")
+(assert_return (invoke "call" (i32.const 9)) (i32.const 1))
+
+;; Succeed when copying 0 elements at the end of the region.
+(invoke "copy" (i32.const 10) (i32.const 0) (i32.const 0))
+(invoke "copy" (i32.const 0) (i32.const 10) (i32.const 0))
+
+;; Fail on out-of-bounds when copying 0 elements outside of table.
+(assert_trap (invoke "copy" (i32.const 11) (i32.const 0) (i32.const 0))
+ "out of bounds table access")
+(assert_trap (invoke "copy" (i32.const 0) (i32.const 11) (i32.const 0))
+ "out of bounds table access")
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast.js
new file mode 100644
index 0000000000..294aca0c5c
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/bulk.wast.js
@@ -0,0 +1,470 @@
+
+'use strict';
+
+let spectest = {
+ print: console.log.bind(console),
+ print_i32: console.log.bind(console),
+ print_i32_f32: console.log.bind(console),
+ print_f64_f64: console.log.bind(console),
+ print_f32: console.log.bind(console),
+ print_f64: console.log.bind(console),
+ global_i32: 666,
+ global_f32: 666,
+ global_f64: 666,
+ table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
+ memory: new WebAssembly.Memory({initial: 1, maximum: 2})
+};
+let handler = {
+ get(target, prop) {
+ return (prop in target) ? target[prop] : {};
+ }
+};
+let registry = new Proxy({spectest}, handler);
+
+function register(name, instance) {
+ registry[name] = instance.exports;
+}
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function get(instance, name) {
+ let v = instance.exports[name];
+ return (v instanceof WebAssembly.Global) ? v.value : v;
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function run(action) {
+ action();
+}
+
+function assert_malformed(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm decoding failure expected");
+}
+
+function assert_invalid(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm validation failure expected");
+}
+
+function assert_unlinkable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.LinkError) return;
+ }
+ throw new Error("Wasm linking failure expected");
+}
+
+function assert_uninstantiable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+function assert_trap(action) {
+ try { action() } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+let StackOverflow;
+try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
+
+function assert_exhaustion(action) {
+ try { action() } catch (e) {
+ if (e instanceof StackOverflow) return;
+ }
+ throw new Error("Wasm resource exhaustion expected");
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+function assert_return_canonical_nan(action) {
+ let actual = action();
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test that it's a canonical NaN.
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+function assert_return_arithmetic_nan(action) {
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test for specific bitpatterns here.
+ let actual = action();
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+// bulk.wast:2
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x0b\x86\x80\x80\x80\x00\x01\x01\x03\x66\x6f\x6f");
+
+// bulk.wast:6
+let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x03\x09\x8c\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd0\x0b\xd2\x01\x0b\x0a\x8f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x82\x80\x80\x80\x00\x00\x0b");
+
+// bulk.wast:13
+let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x92\x80\x80\x80\x00\x02\x04\x66\x69\x6c\x6c\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9d\x80\x80\x80\x00\x02\x8b\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0b\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b");
+
+// bulk.wast:27
+run(() => call($3, "fill", [1, 255, 3]));
+
+// bulk.wast:28
+assert_return(() => call($3, "load8_u", [0]), 0);
+
+// bulk.wast:29
+assert_return(() => call($3, "load8_u", [1]), 255);
+
+// bulk.wast:30
+assert_return(() => call($3, "load8_u", [2]), 255);
+
+// bulk.wast:31
+assert_return(() => call($3, "load8_u", [3]), 255);
+
+// bulk.wast:32
+assert_return(() => call($3, "load8_u", [4]), 0);
+
+// bulk.wast:35
+run(() => call($3, "fill", [0, 48042, 2]));
+
+// bulk.wast:36
+assert_return(() => call($3, "load8_u", [0]), 170);
+
+// bulk.wast:37
+assert_return(() => call($3, "load8_u", [1]), 170);
+
+// bulk.wast:40
+run(() => call($3, "fill", [0, 0, 65536]));
+
+// bulk.wast:43
+assert_trap(() => call($3, "fill", [65280, 1, 257]));
+
+// bulk.wast:45
+assert_return(() => call($3, "load8_u", [65280]), 1);
+
+// bulk.wast:46
+assert_return(() => call($3, "load8_u", [65535]), 1);
+
+// bulk.wast:49
+run(() => call($3, "fill", [65536, 0, 0]));
+
+// bulk.wast:52
+assert_trap(() => call($3, "fill", [65537, 0, 0]));
+
+// bulk.wast:57
+let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x92\x80\x80\x80\x00\x02\x04\x63\x6f\x70\x79\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x8a\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x04\xaa\xbb\xcc\xdd");
+
+// bulk.wast:71
+run(() => call($4, "copy", [10, 0, 4]));
+
+// bulk.wast:73
+assert_return(() => call($4, "load8_u", [9]), 0);
+
+// bulk.wast:74
+assert_return(() => call($4, "load8_u", [10]), 170);
+
+// bulk.wast:75
+assert_return(() => call($4, "load8_u", [11]), 187);
+
+// bulk.wast:76
+assert_return(() => call($4, "load8_u", [12]), 204);
+
+// bulk.wast:77
+assert_return(() => call($4, "load8_u", [13]), 221);
+
+// bulk.wast:78
+assert_return(() => call($4, "load8_u", [14]), 0);
+
+// bulk.wast:81
+run(() => call($4, "copy", [8, 10, 4]));
+
+// bulk.wast:82
+assert_return(() => call($4, "load8_u", [8]), 170);
+
+// bulk.wast:83
+assert_return(() => call($4, "load8_u", [9]), 187);
+
+// bulk.wast:84
+assert_return(() => call($4, "load8_u", [10]), 204);
+
+// bulk.wast:85
+assert_return(() => call($4, "load8_u", [11]), 221);
+
+// bulk.wast:86
+assert_return(() => call($4, "load8_u", [12]), 204);
+
+// bulk.wast:87
+assert_return(() => call($4, "load8_u", [13]), 221);
+
+// bulk.wast:90
+run(() => call($4, "copy", [10, 7, 6]));
+
+// bulk.wast:91
+assert_return(() => call($4, "load8_u", [10]), 0);
+
+// bulk.wast:92
+assert_return(() => call($4, "load8_u", [11]), 170);
+
+// bulk.wast:93
+assert_return(() => call($4, "load8_u", [12]), 187);
+
+// bulk.wast:94
+assert_return(() => call($4, "load8_u", [13]), 204);
+
+// bulk.wast:95
+assert_return(() => call($4, "load8_u", [14]), 221);
+
+// bulk.wast:96
+assert_return(() => call($4, "load8_u", [15]), 204);
+
+// bulk.wast:97
+assert_return(() => call($4, "load8_u", [16]), 0);
+
+// bulk.wast:100
+run(() => call($4, "copy", [65280, 0, 256]));
+
+// bulk.wast:101
+run(() => call($4, "copy", [65024, 65280, 256]));
+
+// bulk.wast:104
+assert_trap(() => call($4, "copy", [65534, 0, 3]));
+
+// bulk.wast:106
+assert_return(() => call($4, "load8_u", [65534]), 170);
+
+// bulk.wast:107
+assert_return(() => call($4, "load8_u", [65535]), 187);
+
+// bulk.wast:110
+run(() => call($4, "copy", [65536, 0, 0]));
+
+// bulk.wast:111
+run(() => call($4, "copy", [0, 65536, 0]));
+
+// bulk.wast:114
+assert_trap(() => call($4, "copy", [65537, 0, 0]));
+
+// bulk.wast:116
+assert_trap(() => call($4, "copy", [0, 65537, 0]));
+
+// bulk.wast:121
+let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x92\x80\x80\x80\x00\x02\x04\x69\x6e\x69\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x08\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x87\x80\x80\x80\x00\x01\x01\x04\xaa\xbb\xcc\xdd");
+
+// bulk.wast:135
+run(() => call($5, "init", [0, 1, 2]));
+
+// bulk.wast:136
+assert_return(() => call($5, "load8_u", [0]), 187);
+
+// bulk.wast:137
+assert_return(() => call($5, "load8_u", [1]), 204);
+
+// bulk.wast:138
+assert_return(() => call($5, "load8_u", [2]), 0);
+
+// bulk.wast:141
+run(() => call($5, "init", [65532, 0, 4]));
+
+// bulk.wast:144
+assert_trap(() => call($5, "init", [65534, 0, 3]));
+
+// bulk.wast:146
+assert_return(() => call($5, "load8_u", [65534]), 170);
+
+// bulk.wast:147
+assert_return(() => call($5, "load8_u", [65535]), 187);
+
+// bulk.wast:150
+run(() => call($5, "init", [65536, 0, 0]));
+
+// bulk.wast:151
+run(() => call($5, "init", [0, 4, 0]));
+
+// bulk.wast:154
+assert_trap(() => call($5, "init", [65537, 0, 0]));
+
+// bulk.wast:156
+assert_trap(() => call($5, "init", [0, 5, 0]));
+
+// bulk.wast:160
+let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x85\x80\x80\x80\x00\x04\x00\x00\x00\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\xbb\x80\x80\x80\x00\x04\x0c\x64\x72\x6f\x70\x5f\x70\x61\x73\x73\x69\x76\x65\x00\x00\x0c\x69\x6e\x69\x74\x5f\x70\x61\x73\x73\x69\x76\x65\x00\x01\x0b\x64\x72\x6f\x70\x5f\x61\x63\x74\x69\x76\x65\x00\x02\x0b\x69\x6e\x69\x74\x5f\x61\x63\x74\x69\x76\x65\x00\x03\x0c\x81\x80\x80\x80\x00\x02\x0a\xb7\x80\x80\x80\x00\x04\x85\x80\x80\x80\x00\x00\xfc\x09\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x00\x41\x00\x41\x00\xfc\x08\x00\x00\x0b\x85\x80\x80\x80\x00\x00\xfc\x09\x01\x0b\x8c\x80\x80\x80\x00\x00\x41\x00\x41\x00\x41\x00\xfc\x08\x01\x00\x0b\x0b\x88\x80\x80\x80\x00\x02\x01\x00\x00\x41\x00\x0b\x00");
+
+// bulk.wast:174
+run(() => call($6, "init_passive", []));
+
+// bulk.wast:175
+run(() => call($6, "drop_passive", []));
+
+// bulk.wast:176
+assert_trap(() => call($6, "drop_passive", []));
+
+// bulk.wast:177
+assert_trap(() => call($6, "init_passive", []));
+
+// bulk.wast:178
+assert_trap(() => call($6, "drop_active", []));
+
+// bulk.wast:179
+assert_trap(() => call($6, "init_active", []));
+
+// bulk.wast:183
+let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x85\x80\x80\x80\x00\x04\x00\x00\x01\x02\x04\x84\x80\x80\x80\x00\x01\x70\x00\x03\x07\x8f\x80\x80\x80\x00\x02\x04\x69\x6e\x69\x74\x00\x02\x04\x63\x61\x6c\x6c\x00\x03\x09\x90\x80\x80\x80\x00\x01\x01\x70\x04\xd2\x00\x0b\xd2\x01\x0b\xd2\x00\x0b\xd2\x01\x0b\x0a\xb0\x80\x80\x80\x00\x04\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0c\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// bulk.wast:202
+run(() => call($7, "init", [0, 1, 2]));
+
+// bulk.wast:203
+assert_return(() => call($7, "call", [0]), 1);
+
+// bulk.wast:204
+assert_return(() => call($7, "call", [1]), 0);
+
+// bulk.wast:205
+assert_trap(() => call($7, "call", [2]));
+
+// bulk.wast:208
+run(() => call($7, "init", [1, 2, 2]));
+
+// bulk.wast:211
+assert_trap(() => call($7, "init", [2, 0, 2]));
+
+// bulk.wast:213
+assert_return(() => call($7, "call", [2]), 0);
+
+// bulk.wast:216
+run(() => call($7, "init", [3, 0, 0]));
+
+// bulk.wast:217
+run(() => call($7, "init", [0, 4, 0]));
+
+// bulk.wast:220
+assert_trap(() => call($7, "init", [4, 0, 0]));
+
+// bulk.wast:222
+assert_trap(() => call($7, "init", [0, 5, 0]));
+
+// bulk.wast:227
+let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x86\x80\x80\x80\x00\x05\x00\x00\x00\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x01\x07\xbb\x80\x80\x80\x00\x04\x0c\x64\x72\x6f\x70\x5f\x70\x61\x73\x73\x69\x76\x65\x00\x01\x0c\x69\x6e\x69\x74\x5f\x70\x61\x73\x73\x69\x76\x65\x00\x02\x0b\x64\x72\x6f\x70\x5f\x61\x63\x74\x69\x76\x65\x00\x03\x0b\x69\x6e\x69\x74\x5f\x61\x63\x74\x69\x76\x65\x00\x04\x09\x8d\x80\x80\x80\x00\x02\x01\x70\x01\xd2\x00\x0b\x00\x41\x00\x0b\x01\x00\x0a\xbe\x80\x80\x80\x00\x05\x82\x80\x80\x80\x00\x00\x0b\x85\x80\x80\x80\x00\x00\xfc\x0d\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x00\x41\x00\x41\x00\xfc\x0c\x00\x00\x0b\x85\x80\x80\x80\x00\x00\xfc\x0d\x01\x0b\x8c\x80\x80\x80\x00\x00\x41\x00\x41\x00\x41\x00\xfc\x0c\x01\x00\x0b");
+
+// bulk.wast:242
+run(() => call($8, "init_passive", []));
+
+// bulk.wast:243
+run(() => call($8, "drop_passive", []));
+
+// bulk.wast:244
+assert_trap(() => call($8, "drop_passive", []));
+
+// bulk.wast:245
+assert_trap(() => call($8, "init_passive", []));
+
+// bulk.wast:246
+assert_trap(() => call($8, "drop_active", []));
+
+// bulk.wast:247
+assert_trap(() => call($8, "init_active", []));
+
+// bulk.wast:251
+let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x86\x80\x80\x80\x00\x05\x00\x00\x00\x01\x02\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x8f\x80\x80\x80\x00\x02\x04\x63\x6f\x70\x79\x00\x03\x04\x63\x61\x6c\x6c\x00\x04\x09\x89\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x03\x00\x01\x02\x0a\xb9\x80\x80\x80\x00\x05\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// bulk.wast:270
+run(() => call($9, "copy", [3, 0, 3]));
+
+// bulk.wast:272
+assert_return(() => call($9, "call", [3]), 0);
+
+// bulk.wast:273
+assert_return(() => call($9, "call", [4]), 1);
+
+// bulk.wast:274
+assert_return(() => call($9, "call", [5]), 2);
+
+// bulk.wast:277
+run(() => call($9, "copy", [0, 1, 3]));
+
+// bulk.wast:279
+assert_return(() => call($9, "call", [0]), 1);
+
+// bulk.wast:280
+assert_return(() => call($9, "call", [1]), 2);
+
+// bulk.wast:281
+assert_return(() => call($9, "call", [2]), 0);
+
+// bulk.wast:284
+run(() => call($9, "copy", [2, 0, 3]));
+
+// bulk.wast:286
+assert_return(() => call($9, "call", [2]), 1);
+
+// bulk.wast:287
+assert_return(() => call($9, "call", [3]), 2);
+
+// bulk.wast:288
+assert_return(() => call($9, "call", [4]), 0);
+
+// bulk.wast:291
+run(() => call($9, "copy", [6, 8, 2]));
+
+// bulk.wast:292
+run(() => call($9, "copy", [8, 6, 2]));
+
+// bulk.wast:295
+assert_trap(() => call($9, "call", [9]));
+
+// bulk.wast:296
+assert_trap(() => call($9, "copy", [9, 0, 2]));
+
+// bulk.wast:298
+assert_return(() => call($9, "call", [9]), 1);
+
+// bulk.wast:301
+run(() => call($9, "copy", [10, 0, 0]));
+
+// bulk.wast:302
+run(() => call($9, "copy", [0, 10, 0]));
+
+// bulk.wast:305
+assert_trap(() => call($9, "copy", [11, 0, 0]));
+
+// bulk.wast:307
+assert_trap(() => call($9, "copy", [0, 11, 0]));
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast
new file mode 100644
index 0000000000..0310f76b54
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast
@@ -0,0 +1,130 @@
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\00\24\10" "a custom section" "this is the payload"
+ "\00\20\10" "a custom section" "this is payload"
+ "\00\11\10" "a custom section" ""
+ "\00\10\00" "" "this is payload"
+ "\00\01\00" "" ""
+ "\00\24\10" "\00\00custom sectio\00" "this is the payload"
+ "\00\24\10" "\ef\bb\bfa custom sect" "this is the payload"
+ "\00\24\10" "a custom sect\e2\8c\a3" "this is the payload"
+ "\00\1f\16" "module within a module" "\00asm" "\01\00\00\00"
+)
+
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\01\01\00" ;; type section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\02\01\00" ;; import section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\03\01\00" ;; function section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\04\01\00" ;; table section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\05\01\00" ;; memory section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\06\01\00" ;; global section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\07\01\00" ;; export section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\09\01\00" ;; element section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\0a\01\00" ;; code section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+ "\0b\01\00" ;; data section
+ "\00\0e\06" "custom" "payload"
+ "\00\0e\06" "custom" "payload"
+)
+
+(module binary
+ "\00asm" "\01\00\00\00"
+ "\01\07\01\60\02\7f\7f\01\7f" ;; type section
+ "\00\1a\06" "custom" "this is the payload" ;; custom section
+ "\03\02\01\00" ;; function section
+ "\07\0a\01\06\61\64\64\54\77\6f\00\00" ;; export section
+ "\0a\09\01\07\00\20\00\20\01\6a\0b" ;; code section
+ "\00\1b\07" "custom2" "this is the payload" ;; custom section
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\00"
+ )
+ "unexpected end"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\00\00"
+ )
+ "unexpected end"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\00\00\00\05\01\00\07\00\00"
+ )
+ "unexpected end"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\00\26\10" "a custom section" "this is the payload"
+ )
+ "unexpected end"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\00\25\10" "a custom section" "this is the payload"
+ "\00\24\10" "a custom section" "this is the payload"
+ )
+ "invalid section id"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\01\07\01\60\02\7f\7f\01\7f" ;; type section
+ "\00\25\10" "a custom section" "this is the payload" ;; invalid length!
+ "\03\02\01\00" ;; function section
+ "\0a\09\01\07\00\20\00\20\01\6a\0b" ;; code section
+ "\00\1b\07" "custom2" "this is the payload" ;; custom section
+ )
+ "function and code section have inconsistent lengths"
+)
+
+;; Test concatenated modules.
+(assert_malformed
+ (module binary
+ "\00asm\01\00\00\00"
+ "\00asm\01\00\00\00"
+ )
+ "length out of bounds"
+)
+
+(assert_malformed
+ (module binary
+ "\00asm" "\01\00\00\00"
+ "\05\03\01\00\01" ;; memory section
+ "\0c\01\02" ;; data count section (2 segments)
+ "\0b\06\01\00\41\00\0b\00" ;; data section (1 segment)
+ )
+ "data count and data section have inconsistent lengths"
+)
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast.js
new file mode 100644
index 0000000000..d078b10fee
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/custom.wast.js
@@ -0,0 +1,170 @@
+
+'use strict';
+
+let spectest = {
+ print: console.log.bind(console),
+ print_i32: console.log.bind(console),
+ print_i32_f32: console.log.bind(console),
+ print_f64_f64: console.log.bind(console),
+ print_f32: console.log.bind(console),
+ print_f64: console.log.bind(console),
+ global_i32: 666,
+ global_f32: 666,
+ global_f64: 666,
+ table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
+ memory: new WebAssembly.Memory({initial: 1, maximum: 2})
+};
+let handler = {
+ get(target, prop) {
+ return (prop in target) ? target[prop] : {};
+ }
+};
+let registry = new Proxy({spectest}, handler);
+
+function register(name, instance) {
+ registry[name] = instance.exports;
+}
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function get(instance, name) {
+ let v = instance.exports[name];
+ return (v instanceof WebAssembly.Global) ? v.value : v;
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function run(action) {
+ action();
+}
+
+function assert_malformed(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm decoding failure expected");
+}
+
+function assert_invalid(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm validation failure expected");
+}
+
+function assert_unlinkable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.LinkError) return;
+ }
+ throw new Error("Wasm linking failure expected");
+}
+
+function assert_uninstantiable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+function assert_trap(action) {
+ try { action() } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+let StackOverflow;
+try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
+
+function assert_exhaustion(action) {
+ try { action() } catch (e) {
+ if (e instanceof StackOverflow) return;
+ }
+ throw new Error("Wasm resource exhaustion expected");
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+function assert_return_canonical_nan(action) {
+ let actual = action();
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test that it's a canonical NaN.
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+function assert_return_arithmetic_nan(action) {
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test for specific bitpatterns here.
+ let actual = action();
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+// custom.wast:1
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x24\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x20\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x11\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x00\x10\x00\x74\x68\x69\x73\x20\x69\x73\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x01\x00\x00\x24\x10\x00\x00\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x00\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x24\x10\xef\xbb\xbf\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x24\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\xe2\x8c\xa3\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x1f\x16\x6d\x6f\x64\x75\x6c\x65\x20\x77\x69\x74\x68\x69\x6e\x20\x61\x20\x6d\x6f\x64\x75\x6c\x65\x00\x61\x73\x6d\x01\x00\x00\x00");
+
+// custom.wast:14
+let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x01\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x02\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x03\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x04\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x05\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x06\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x07\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x09\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x0a\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x0b\x01\x00\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64\x00\x0e\x06\x63\x75\x73\x74\x6f\x6d\x70\x61\x79\x6c\x6f\x61\x64");
+
+// custom.wast:50
+let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x07\x01\x60\x02\x7f\x7f\x01\x7f\x00\x1a\x06\x63\x75\x73\x74\x6f\x6d\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x03\x02\x01\x00\x07\x0a\x01\x06\x61\x64\x64\x54\x77\x6f\x00\x00\x0a\x09\x01\x07\x00\x20\x00\x20\x01\x6a\x0b\x00\x1b\x07\x63\x75\x73\x74\x6f\x6d\x32\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64");
+
+// custom.wast:60
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00");
+
+// custom.wast:68
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x00");
+
+// custom.wast:76
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x00\x00\x05\x01\x00\x07\x00\x00");
+
+// custom.wast:84
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x26\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64");
+
+// custom.wast:92
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x25\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x00\x24\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64");
+
+// custom.wast:101
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x07\x01\x60\x02\x7f\x7f\x01\x7f\x00\x25\x10\x61\x20\x63\x75\x73\x74\x6f\x6d\x20\x73\x65\x63\x74\x69\x6f\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64\x03\x02\x01\x00\x0a\x09\x01\x07\x00\x20\x00\x20\x01\x6a\x0b\x00\x1b\x07\x63\x75\x73\x74\x6f\x6d\x32\x74\x68\x69\x73\x20\x69\x73\x20\x74\x68\x65\x20\x70\x61\x79\x6c\x6f\x61\x64");
+
+// custom.wast:114
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x00\x61\x73\x6d\x01\x00\x00\x00");
+
+// custom.wast:122
+assert_malformed("\x00\x61\x73\x6d\x01\x00\x00\x00\x05\x03\x01\x00\x01\x0c\x01\x02\x0b\x06\x01\x00\x41\x00\x0b\x00");
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast
new file mode 100644
index 0000000000..5edb6eb87d
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast
@@ -0,0 +1,392 @@
+;; Functions
+
+(module $Mf
+ (func (export "call") (result i32) (call $g))
+ (func $g (result i32) (i32.const 2))
+)
+(register "Mf" $Mf)
+
+(module $Nf
+ (func $f (import "Mf" "call") (result i32))
+ (export "Mf.call" (func $f))
+ (func (export "call Mf.call") (result i32) (call $f))
+ (func (export "call") (result i32) (call $g))
+ (func $g (result i32) (i32.const 3))
+)
+
+(assert_return (invoke $Mf "call") (i32.const 2))
+(assert_return (invoke $Nf "Mf.call") (i32.const 2))
+(assert_return (invoke $Nf "call") (i32.const 3))
+(assert_return (invoke $Nf "call Mf.call") (i32.const 2))
+
+(module
+ (import "spectest" "print_i32" (func $f (param i32)))
+ (export "print" (func $f))
+)
+(register "reexport_f")
+(assert_unlinkable
+ (module (import "reexport_f" "print" (func (param i64))))
+ "incompatible import type"
+)
+(assert_unlinkable
+ (module (import "reexport_f" "print" (func (param i32) (result i32))))
+ "incompatible import type"
+)
+
+
+;; Globals
+
+(module $Mg
+ (global $glob (export "glob") i32 (i32.const 42))
+ (func (export "get") (result i32) (global.get $glob))
+
+ ;; export mutable globals
+ (global $mut_glob (export "mut_glob") (mut i32) (i32.const 142))
+ (func (export "get_mut") (result i32) (global.get $mut_glob))
+ (func (export "set_mut") (param i32) (global.set $mut_glob (local.get 0)))
+)
+(register "Mg" $Mg)
+
+(module $Ng
+ (global $x (import "Mg" "glob") i32)
+ (global $mut_glob (import "Mg" "mut_glob") (mut i32))
+ (func $f (import "Mg" "get") (result i32))
+ (func $get_mut (import "Mg" "get_mut") (result i32))
+ (func $set_mut (import "Mg" "set_mut") (param i32))
+
+ (export "Mg.glob" (global $x))
+ (export "Mg.get" (func $f))
+ (global $glob (export "glob") i32 (i32.const 43))
+ (func (export "get") (result i32) (global.get $glob))
+
+ (export "Mg.mut_glob" (global $mut_glob))
+ (export "Mg.get_mut" (func $get_mut))
+ (export "Mg.set_mut" (func $set_mut))
+)
+
+(assert_return (get $Mg "glob") (i32.const 42))
+(assert_return (get $Ng "Mg.glob") (i32.const 42))
+(assert_return (get $Ng "glob") (i32.const 43))
+(assert_return (invoke $Mg "get") (i32.const 42))
+(assert_return (invoke $Ng "Mg.get") (i32.const 42))
+(assert_return (invoke $Ng "get") (i32.const 43))
+
+(assert_return (get $Mg "mut_glob") (i32.const 142))
+(assert_return (get $Ng "Mg.mut_glob") (i32.const 142))
+(assert_return (invoke $Mg "get_mut") (i32.const 142))
+(assert_return (invoke $Ng "Mg.get_mut") (i32.const 142))
+
+(assert_return (invoke $Mg "set_mut" (i32.const 241)))
+(assert_return (get $Mg "mut_glob") (i32.const 241))
+(assert_return (get $Ng "Mg.mut_glob") (i32.const 241))
+(assert_return (invoke $Mg "get_mut") (i32.const 241))
+(assert_return (invoke $Ng "Mg.get_mut") (i32.const 241))
+
+
+(assert_unlinkable
+ (module (import "Mg" "mut_glob" (global i32)))
+ "incompatible import type"
+)
+(assert_unlinkable
+ (module (import "Mg" "glob" (global (mut i32))))
+ "incompatible import type"
+)
+
+;; Tables
+
+(module $Mt
+ (type (func (result i32)))
+ (type (func))
+
+ (table (export "tab") 10 funcref)
+ (elem (i32.const 2) $g $g $g $g)
+ (func $g (result i32) (i32.const 4))
+ (func (export "h") (result i32) (i32.const -4))
+
+ (func (export "call") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0))
+ )
+)
+(register "Mt" $Mt)
+
+(module $Nt
+ (type (func))
+ (type (func (result i32)))
+
+ (func $f (import "Mt" "call") (param i32) (result i32))
+ (func $h (import "Mt" "h") (result i32))
+
+ (table funcref (elem $g $g $g $h $f))
+ (func $g (result i32) (i32.const 5))
+
+ (export "Mt.call" (func $f))
+ (func (export "call Mt.call") (param i32) (result i32)
+ (call $f (local.get 0))
+ )
+ (func (export "call") (param i32) (result i32)
+ (call_indirect (type 1) (local.get 0))
+ )
+)
+
+(assert_return (invoke $Mt "call" (i32.const 2)) (i32.const 4))
+(assert_return (invoke $Nt "Mt.call" (i32.const 2)) (i32.const 4))
+(assert_return (invoke $Nt "call" (i32.const 2)) (i32.const 5))
+(assert_return (invoke $Nt "call Mt.call" (i32.const 2)) (i32.const 4))
+
+(assert_trap (invoke $Mt "call" (i32.const 1)) "uninitialized")
+(assert_trap (invoke $Nt "Mt.call" (i32.const 1)) "uninitialized")
+(assert_return (invoke $Nt "call" (i32.const 1)) (i32.const 5))
+(assert_trap (invoke $Nt "call Mt.call" (i32.const 1)) "uninitialized")
+
+(assert_trap (invoke $Mt "call" (i32.const 0)) "uninitialized")
+(assert_trap (invoke $Nt "Mt.call" (i32.const 0)) "uninitialized")
+(assert_return (invoke $Nt "call" (i32.const 0)) (i32.const 5))
+(assert_trap (invoke $Nt "call Mt.call" (i32.const 0)) "uninitialized")
+
+(assert_trap (invoke $Mt "call" (i32.const 20)) "undefined")
+(assert_trap (invoke $Nt "Mt.call" (i32.const 20)) "undefined")
+(assert_trap (invoke $Nt "call" (i32.const 7)) "undefined")
+(assert_trap (invoke $Nt "call Mt.call" (i32.const 20)) "undefined")
+
+(assert_return (invoke $Nt "call" (i32.const 3)) (i32.const -4))
+(assert_trap (invoke $Nt "call" (i32.const 4)) "indirect call")
+
+(module $Ot
+ (type (func (result i32)))
+
+ (func $h (import "Mt" "h") (result i32))
+ (table (import "Mt" "tab") 5 funcref)
+ (elem (i32.const 1) $i $h)
+ (func $i (result i32) (i32.const 6))
+
+ (func (export "call") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0))
+ )
+)
+
+(assert_return (invoke $Mt "call" (i32.const 3)) (i32.const 4))
+(assert_return (invoke $Nt "Mt.call" (i32.const 3)) (i32.const 4))
+(assert_return (invoke $Nt "call Mt.call" (i32.const 3)) (i32.const 4))
+(assert_return (invoke $Ot "call" (i32.const 3)) (i32.const 4))
+
+(assert_return (invoke $Mt "call" (i32.const 2)) (i32.const -4))
+(assert_return (invoke $Nt "Mt.call" (i32.const 2)) (i32.const -4))
+(assert_return (invoke $Nt "call" (i32.const 2)) (i32.const 5))
+(assert_return (invoke $Nt "call Mt.call" (i32.const 2)) (i32.const -4))
+(assert_return (invoke $Ot "call" (i32.const 2)) (i32.const -4))
+
+(assert_return (invoke $Mt "call" (i32.const 1)) (i32.const 6))
+(assert_return (invoke $Nt "Mt.call" (i32.const 1)) (i32.const 6))
+(assert_return (invoke $Nt "call" (i32.const 1)) (i32.const 5))
+(assert_return (invoke $Nt "call Mt.call" (i32.const 1)) (i32.const 6))
+(assert_return (invoke $Ot "call" (i32.const 1)) (i32.const 6))
+
+(assert_trap (invoke $Mt "call" (i32.const 0)) "uninitialized")
+(assert_trap (invoke $Nt "Mt.call" (i32.const 0)) "uninitialized")
+(assert_return (invoke $Nt "call" (i32.const 0)) (i32.const 5))
+(assert_trap (invoke $Nt "call Mt.call" (i32.const 0)) "uninitialized")
+(assert_trap (invoke $Ot "call" (i32.const 0)) "uninitialized")
+
+(assert_trap (invoke $Ot "call" (i32.const 20)) "undefined")
+
+(module
+ (table (import "Mt" "tab") 0 funcref)
+ (elem (i32.const 9) $f)
+ (func $f)
+)
+
+(module $G1 (global (export "g") i32 (i32.const 5)))
+(register "G1" $G1)
+(module $G2
+ (global (import "G1" "g") i32)
+ (global (export "g") i32 (global.get 0))
+)
+(assert_return (get $G2 "g") (i32.const 5))
+
+(assert_unlinkable
+ (module
+ (table (import "Mt" "tab") 0 funcref)
+ (elem (i32.const 10) $f)
+ (func $f)
+ )
+ "elements segment does not fit"
+)
+
+(assert_unlinkable
+ (module
+ (table (import "Mt" "tab") 10 funcref)
+ (memory (import "Mt" "mem") 1) ;; does not exist
+ (func $f (result i32) (i32.const 0))
+ (elem (i32.const 7) $f)
+ (elem (i32.const 9) $f)
+ )
+ "unknown import"
+)
+(assert_trap (invoke $Mt "call" (i32.const 7)) "uninitialized")
+
+;; Unlike in the v1 spec, the elements stored before an out-of-bounds access
+;; persist after the instantiation failure.
+(assert_unlinkable
+ (module
+ (table (import "Mt" "tab") 10 funcref)
+ (func $f (result i32) (i32.const 0))
+ (elem (i32.const 7) $f)
+ (elem (i32.const 12) $f) ;; out of bounds
+ )
+ "elements segment does not fit"
+)
+(assert_return (invoke $Mt "call" (i32.const 7)) (i32.const 0))
+
+(assert_unlinkable
+ (module
+ (table (import "Mt" "tab") 10 funcref)
+ (func $f (result i32) (i32.const 0))
+ (elem (i32.const 7) $f)
+ (memory 1)
+ (data (i32.const 0x10000) "d") ;; out of bounds
+ )
+ "data segment does not fit"
+)
+(assert_return (invoke $Mt "call" (i32.const 7)) (i32.const 0))
+
+
+;; Memories
+
+(module $Mm
+ (memory (export "mem") 1 5)
+ (data (i32.const 10) "\00\01\02\03\04\05\06\07\08\09")
+
+ (func (export "load") (param $a i32) (result i32)
+ (i32.load8_u (local.get 0))
+ )
+)
+(register "Mm" $Mm)
+
+(module $Nm
+ (func $loadM (import "Mm" "load") (param i32) (result i32))
+
+ (memory 1)
+ (data (i32.const 10) "\f0\f1\f2\f3\f4\f5")
+
+ (export "Mm.load" (func $loadM))
+ (func (export "load") (param $a i32) (result i32)
+ (i32.load8_u (local.get 0))
+ )
+)
+
+(assert_return (invoke $Mm "load" (i32.const 12)) (i32.const 2))
+(assert_return (invoke $Nm "Mm.load" (i32.const 12)) (i32.const 2))
+(assert_return (invoke $Nm "load" (i32.const 12)) (i32.const 0xf2))
+
+(module $Om
+ (memory (import "Mm" "mem") 1)
+ (data (i32.const 5) "\a0\a1\a2\a3\a4\a5\a6\a7")
+
+ (func (export "load") (param $a i32) (result i32)
+ (i32.load8_u (local.get 0))
+ )
+)
+
+(assert_return (invoke $Mm "load" (i32.const 12)) (i32.const 0xa7))
+(assert_return (invoke $Nm "Mm.load" (i32.const 12)) (i32.const 0xa7))
+(assert_return (invoke $Nm "load" (i32.const 12)) (i32.const 0xf2))
+(assert_return (invoke $Om "load" (i32.const 12)) (i32.const 0xa7))
+
+(module
+ (memory (import "Mm" "mem") 0)
+ (data (i32.const 0xffff) "a")
+)
+
+(assert_unlinkable
+ (module
+ (memory (import "Mm" "mem") 0)
+ (data (i32.const 0x10000) "a")
+ )
+ "data segment does not fit"
+)
+
+(module $Pm
+ (memory (import "Mm" "mem") 1 8)
+
+ (func (export "grow") (param $a i32) (result i32)
+ (memory.grow (local.get 0))
+ )
+)
+
+(assert_return (invoke $Pm "grow" (i32.const 0)) (i32.const 1))
+(assert_return (invoke $Pm "grow" (i32.const 2)) (i32.const 1))
+(assert_return (invoke $Pm "grow" (i32.const 0)) (i32.const 3))
+(assert_return (invoke $Pm "grow" (i32.const 1)) (i32.const 3))
+(assert_return (invoke $Pm "grow" (i32.const 1)) (i32.const 4))
+(assert_return (invoke $Pm "grow" (i32.const 0)) (i32.const 5))
+(assert_return (invoke $Pm "grow" (i32.const 1)) (i32.const -1))
+(assert_return (invoke $Pm "grow" (i32.const 0)) (i32.const 5))
+
+(assert_unlinkable
+ (module
+ (func $host (import "spectest" "print"))
+ (memory (import "Mm" "mem") 1)
+ (table (import "Mm" "tab") 0 funcref) ;; does not exist
+ (data (i32.const 0) "abc")
+ )
+ "unknown import"
+)
+(assert_return (invoke $Mm "load" (i32.const 0)) (i32.const 0))
+
+;; Unlike in v1 spec, bytes written before an out-of-bounds access persist
+;; after the instantiation failure.
+(assert_unlinkable
+ (module
+ (memory (import "Mm" "mem") 1)
+ (data (i32.const 0) "abc")
+ (data (i32.const 0x50000) "d") ;; out of bounds
+ )
+ "data segment does not fit"
+)
+(assert_return (invoke $Mm "load" (i32.const 0)) (i32.const 97))
+
+(assert_unlinkable
+ (module
+ (memory (import "Mm" "mem") 1)
+ (data (i32.const 0) "abc")
+ (table 0 funcref)
+ (func)
+ (elem (i32.const 0) 0) ;; out of bounds
+ )
+ "elements segment does not fit"
+)
+(assert_return (invoke $Mm "load" (i32.const 0)) (i32.const 97))
+
+;; Store is modified if the start function traps.
+(module $Ms
+ (type $t (func (result i32)))
+ (memory (export "memory") 1)
+ (table (export "table") 1 funcref)
+ (func (export "get memory[0]") (type $t)
+ (i32.load8_u (i32.const 0))
+ )
+ (func (export "get table[0]") (type $t)
+ (call_indirect (type $t) (i32.const 0))
+ )
+)
+(register "Ms" $Ms)
+
+(assert_trap
+ (module
+ (import "Ms" "memory" (memory 1))
+ (import "Ms" "table" (table 1 funcref))
+ (data (i32.const 0) "hello")
+ (elem (i32.const 0) $f)
+ (func $f (result i32)
+ (i32.const 0xdead)
+ )
+ (func $main
+ (unreachable)
+ )
+ (start $main)
+ )
+ "unreachable"
+)
+
+(assert_return (invoke $Ms "get memory[0]") (i32.const 104)) ;; 'h'
+(assert_return (invoke $Ms "get table[0]") (i32.const 0xdead))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast.js
new file mode 100644
index 0000000000..729b41d5ca
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/linking.wast.js
@@ -0,0 +1,505 @@
+
+'use strict';
+
+let spectest = {
+ print: console.log.bind(console),
+ print_i32: console.log.bind(console),
+ print_i32_f32: console.log.bind(console),
+ print_f64_f64: console.log.bind(console),
+ print_f32: console.log.bind(console),
+ print_f64: console.log.bind(console),
+ global_i32: 666,
+ global_f32: 666,
+ global_f64: 666,
+ table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
+ memory: new WebAssembly.Memory({initial: 1, maximum: 2})
+};
+let handler = {
+ get(target, prop) {
+ return (prop in target) ? target[prop] : {};
+ }
+};
+let registry = new Proxy({spectest}, handler);
+
+function register(name, instance) {
+ registry[name] = instance.exports;
+}
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function get(instance, name) {
+ let v = instance.exports[name];
+ return (v instanceof WebAssembly.Global) ? v.value : v;
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function run(action) {
+ action();
+}
+
+function assert_malformed(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm decoding failure expected");
+}
+
+function assert_invalid(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm validation failure expected");
+}
+
+function assert_unlinkable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.LinkError) return;
+ }
+ throw new Error("Wasm linking failure expected");
+}
+
+function assert_uninstantiable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+function assert_trap(action) {
+ try { action() } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+let StackOverflow;
+try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
+
+function assert_exhaustion(action) {
+ try { action() } catch (e) {
+ if (e instanceof StackOverflow) return;
+ }
+ throw new Error("Wasm resource exhaustion expected");
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+function assert_return_canonical_nan(action) {
+ let actual = action();
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test that it's a canonical NaN.
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+function assert_return_arithmetic_nan(action) {
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test for specific bitpatterns here.
+ let actual = action();
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+// linking.wast:3
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x00\x07\x88\x80\x80\x80\x00\x01\x04\x63\x61\x6c\x6c\x00\x00\x0a\x93\x80\x80\x80\x00\x02\x84\x80\x80\x80\x00\x00\x10\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b");
+let $Mf = $1;
+
+// linking.wast:7
+register("Mf", $Mf)
+
+// linking.wast:9
+let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x66\x04\x63\x61\x6c\x6c\x00\x00\x03\x84\x80\x80\x80\x00\x03\x00\x00\x00\x07\xa1\x80\x80\x80\x00\x03\x07\x4d\x66\x2e\x63\x61\x6c\x6c\x00\x00\x0c\x63\x61\x6c\x6c\x20\x4d\x66\x2e\x63\x61\x6c\x6c\x00\x01\x04\x63\x61\x6c\x6c\x00\x02\x0a\x9c\x80\x80\x80\x00\x03\x84\x80\x80\x80\x00\x00\x10\x00\x0b\x84\x80\x80\x80\x00\x00\x10\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b");
+let $Nf = $2;
+
+// linking.wast:17
+assert_return(() => call($Mf, "call", []), 2);
+
+// linking.wast:18
+assert_return(() => call($Nf, "Mf.call", []), 2);
+
+// linking.wast:19
+assert_return(() => call($Nf, "call", []), 3);
+
+// linking.wast:20
+assert_return(() => call($Nf, "call Mf.call", []), 2);
+
+// linking.wast:22
+let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x01\x7f\x00\x02\x96\x80\x80\x80\x00\x01\x08\x73\x70\x65\x63\x74\x65\x73\x74\x09\x70\x72\x69\x6e\x74\x5f\x69\x33\x32\x00\x00\x07\x89\x80\x80\x80\x00\x01\x05\x70\x72\x69\x6e\x74\x00\x00");
+
+// linking.wast:26
+register("reexport_f", $3)
+
+// linking.wast:27
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x01\x7e\x00\x02\x94\x80\x80\x80\x00\x01\x0a\x72\x65\x65\x78\x70\x6f\x72\x74\x5f\x66\x05\x70\x72\x69\x6e\x74\x00\x00");
+
+// linking.wast:31
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x02\x94\x80\x80\x80\x00\x01\x0a\x72\x65\x65\x78\x70\x6f\x72\x74\x5f\x66\x05\x70\x72\x69\x6e\x74\x00\x00");
+
+// linking.wast:39
+let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x01\x7f\x00\x03\x84\x80\x80\x80\x00\x03\x00\x00\x01\x06\x8c\x80\x80\x80\x00\x02\x7f\x00\x41\x2a\x0b\x7f\x01\x41\x8e\x01\x0b\x07\xad\x80\x80\x80\x00\x05\x04\x67\x6c\x6f\x62\x03\x00\x03\x67\x65\x74\x00\x00\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x01\x07\x67\x65\x74\x5f\x6d\x75\x74\x00\x01\x07\x73\x65\x74\x5f\x6d\x75\x74\x00\x02\x0a\x9e\x80\x80\x80\x00\x03\x84\x80\x80\x80\x00\x00\x23\x00\x0b\x84\x80\x80\x80\x00\x00\x23\x01\x0b\x86\x80\x80\x80\x00\x00\x20\x00\x24\x01\x0b");
+let $Mg = $4;
+
+// linking.wast:48
+register("Mg", $Mg)
+
+// linking.wast:50
+let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x01\x7f\x00\x02\xbe\x80\x80\x80\x00\x05\x02\x4d\x67\x04\x67\x6c\x6f\x62\x03\x7f\x00\x02\x4d\x67\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x02\x4d\x67\x03\x67\x65\x74\x00\x00\x02\x4d\x67\x07\x67\x65\x74\x5f\x6d\x75\x74\x00\x00\x02\x4d\x67\x07\x73\x65\x74\x5f\x6d\x75\x74\x00\x01\x03\x82\x80\x80\x80\x00\x01\x00\x06\x86\x80\x80\x80\x00\x01\x7f\x00\x41\x2b\x0b\x07\xc9\x80\x80\x80\x00\x07\x07\x4d\x67\x2e\x67\x6c\x6f\x62\x03\x00\x06\x4d\x67\x2e\x67\x65\x74\x00\x00\x04\x67\x6c\x6f\x62\x03\x02\x03\x67\x65\x74\x00\x03\x0b\x4d\x67\x2e\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x01\x0a\x4d\x67\x2e\x67\x65\x74\x5f\x6d\x75\x74\x00\x01\x0a\x4d\x67\x2e\x73\x65\x74\x5f\x6d\x75\x74\x00\x02\x0a\x8a\x80\x80\x80\x00\x01\x84\x80\x80\x80\x00\x00\x23\x02\x0b");
+let $Ng = $5;
+
+// linking.wast:67
+assert_return(() => get($Mg, "glob"), 42);
+
+// linking.wast:68
+assert_return(() => get($Ng, "Mg.glob"), 42);
+
+// linking.wast:69
+assert_return(() => get($Ng, "glob"), 43);
+
+// linking.wast:70
+assert_return(() => call($Mg, "get", []), 42);
+
+// linking.wast:71
+assert_return(() => call($Ng, "Mg.get", []), 42);
+
+// linking.wast:72
+assert_return(() => call($Ng, "get", []), 43);
+
+// linking.wast:74
+run(() => call(instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x91\x80\x80\x80\x00\x01\x03\x24\x4d\x67\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x87\x80\x80\x80\x00\x01\x03\x72\x75\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x02\x40\x23\x00\x01\x41\x8e\x01\x01\x46\x45\x0d\x00\x0f\x0b\x00\x0b", exports("$Mg", $Mg)), "run", [])); // assert_return(() => get($Mg, "mut_glob"), 142)
+
+// linking.wast:75
+run(() => call(instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x94\x80\x80\x80\x00\x01\x03\x24\x4e\x67\x0b\x4d\x67\x2e\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x87\x80\x80\x80\x00\x01\x03\x72\x75\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x02\x40\x23\x00\x01\x41\x8e\x01\x01\x46\x45\x0d\x00\x0f\x0b\x00\x0b", exports("$Ng", $Ng)), "run", [])); // assert_return(() => get($Ng, "Mg.mut_glob"), 142)
+
+// linking.wast:76
+assert_return(() => call($Mg, "get_mut", []), 142);
+
+// linking.wast:77
+assert_return(() => call($Ng, "Mg.get_mut", []), 142);
+
+// linking.wast:79
+assert_return(() => call($Mg, "set_mut", [241]));
+
+// linking.wast:80
+run(() => call(instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x91\x80\x80\x80\x00\x01\x03\x24\x4d\x67\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x87\x80\x80\x80\x00\x01\x03\x72\x75\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x02\x40\x23\x00\x01\x41\xf1\x01\x01\x46\x45\x0d\x00\x0f\x0b\x00\x0b", exports("$Mg", $Mg)), "run", [])); // assert_return(() => get($Mg, "mut_glob"), 241)
+
+// linking.wast:81
+run(() => call(instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x94\x80\x80\x80\x00\x01\x03\x24\x4e\x67\x0b\x4d\x67\x2e\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x87\x80\x80\x80\x00\x01\x03\x72\x75\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x02\x40\x23\x00\x01\x41\xf1\x01\x01\x46\x45\x0d\x00\x0f\x0b\x00\x0b", exports("$Ng", $Ng)), "run", [])); // assert_return(() => get($Ng, "Mg.mut_glob"), 241)
+
+// linking.wast:82
+assert_return(() => call($Mg, "get_mut", []), 241);
+
+// linking.wast:83
+assert_return(() => call($Ng, "Mg.get_mut", []), 241);
+
+// linking.wast:86
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x90\x80\x80\x80\x00\x01\x02\x4d\x67\x08\x6d\x75\x74\x5f\x67\x6c\x6f\x62\x03\x7f\x00");
+
+// linking.wast:90
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x67\x04\x67\x6c\x6f\x62\x03\x7f\x01");
+
+// linking.wast:97
+let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x84\x80\x80\x80\x00\x03\x00\x00\x02\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x92\x80\x80\x80\x00\x03\x03\x74\x61\x62\x01\x00\x01\x68\x00\x01\x04\x63\x61\x6c\x6c\x00\x02\x09\x8a\x80\x80\x80\x00\x01\x00\x41\x02\x0b\x04\x00\x00\x00\x00\x0a\x9f\x80\x80\x80\x00\x03\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x7c\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+let $Mt = $6;
+
+// linking.wast:110
+register("Mt", $Mt)
+
+// linking.wast:112
+let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x00\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x02\x92\x80\x80\x80\x00\x02\x02\x4d\x74\x04\x63\x61\x6c\x6c\x00\x02\x02\x4d\x74\x01\x68\x00\x01\x03\x84\x80\x80\x80\x00\x03\x01\x02\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x05\x05\x07\xa1\x80\x80\x80\x00\x03\x07\x4d\x74\x2e\x63\x61\x6c\x6c\x00\x00\x0c\x63\x61\x6c\x6c\x20\x4d\x74\x2e\x63\x61\x6c\x6c\x00\x03\x04\x63\x61\x6c\x6c\x00\x04\x09\x8b\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x05\x02\x02\x02\x01\x00\x0a\xa1\x80\x80\x80\x00\x03\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x86\x80\x80\x80\x00\x00\x20\x00\x10\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x01\x00\x0b");
+let $Nt = $7;
+
+// linking.wast:131
+assert_return(() => call($Mt, "call", [2]), 4);
+
+// linking.wast:132
+assert_return(() => call($Nt, "Mt.call", [2]), 4);
+
+// linking.wast:133
+assert_return(() => call($Nt, "call", [2]), 5);
+
+// linking.wast:134
+assert_return(() => call($Nt, "call Mt.call", [2]), 4);
+
+// linking.wast:136
+assert_trap(() => call($Mt, "call", [1]));
+
+// linking.wast:137
+assert_trap(() => call($Nt, "Mt.call", [1]));
+
+// linking.wast:138
+assert_return(() => call($Nt, "call", [1]), 5);
+
+// linking.wast:139
+assert_trap(() => call($Nt, "call Mt.call", [1]));
+
+// linking.wast:141
+assert_trap(() => call($Mt, "call", [0]));
+
+// linking.wast:142
+assert_trap(() => call($Nt, "Mt.call", [0]));
+
+// linking.wast:143
+assert_return(() => call($Nt, "call", [0]), 5);
+
+// linking.wast:144
+assert_trap(() => call($Nt, "call Mt.call", [0]));
+
+// linking.wast:146
+assert_trap(() => call($Mt, "call", [20]));
+
+// linking.wast:147
+assert_trap(() => call($Nt, "Mt.call", [20]));
+
+// linking.wast:148
+assert_trap(() => call($Nt, "call", [7]));
+
+// linking.wast:149
+assert_trap(() => call($Nt, "call Mt.call", [20]));
+
+// linking.wast:151
+assert_return(() => call($Nt, "call", [3]), -4);
+
+// linking.wast:152
+assert_trap(() => call($Nt, "call", [4]));
+
+// linking.wast:154
+let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8a\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x02\x93\x80\x80\x80\x00\x02\x02\x4d\x74\x01\x68\x00\x00\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x05\x03\x83\x80\x80\x80\x00\x02\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x63\x61\x6c\x6c\x00\x02\x09\x88\x80\x80\x80\x00\x01\x00\x41\x01\x0b\x02\x01\x00\x0a\x96\x80\x80\x80\x00\x02\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+let $Ot = $8;
+
+// linking.wast:167
+assert_return(() => call($Mt, "call", [3]), 4);
+
+// linking.wast:168
+assert_return(() => call($Nt, "Mt.call", [3]), 4);
+
+// linking.wast:169
+assert_return(() => call($Nt, "call Mt.call", [3]), 4);
+
+// linking.wast:170
+assert_return(() => call($Ot, "call", [3]), 4);
+
+// linking.wast:172
+assert_return(() => call($Mt, "call", [2]), -4);
+
+// linking.wast:173
+assert_return(() => call($Nt, "Mt.call", [2]), -4);
+
+// linking.wast:174
+assert_return(() => call($Nt, "call", [2]), 5);
+
+// linking.wast:175
+assert_return(() => call($Nt, "call Mt.call", [2]), -4);
+
+// linking.wast:176
+assert_return(() => call($Ot, "call", [2]), -4);
+
+// linking.wast:178
+assert_return(() => call($Mt, "call", [1]), 6);
+
+// linking.wast:179
+assert_return(() => call($Nt, "Mt.call", [1]), 6);
+
+// linking.wast:180
+assert_return(() => call($Nt, "call", [1]), 5);
+
+// linking.wast:181
+assert_return(() => call($Nt, "call Mt.call", [1]), 6);
+
+// linking.wast:182
+assert_return(() => call($Ot, "call", [1]), 6);
+
+// linking.wast:184
+assert_trap(() => call($Mt, "call", [0]));
+
+// linking.wast:185
+assert_trap(() => call($Nt, "Mt.call", [0]));
+
+// linking.wast:186
+assert_return(() => call($Nt, "call", [0]), 5);
+
+// linking.wast:187
+assert_trap(() => call($Nt, "call Mt.call", [0]));
+
+// linking.wast:188
+assert_trap(() => call($Ot, "call", [0]));
+
+// linking.wast:190
+assert_trap(() => call($Ot, "call", [20]));
+
+// linking.wast:192
+let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x09\x87\x80\x80\x80\x00\x01\x00\x41\x09\x0b\x01\x00\x0a\x88\x80\x80\x80\x00\x01\x82\x80\x80\x80\x00\x00\x0b");
+
+// linking.wast:198
+let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x06\x86\x80\x80\x80\x00\x01\x7f\x00\x41\x05\x0b\x07\x85\x80\x80\x80\x00\x01\x01\x67\x03\x00");
+let $G1 = $10;
+
+// linking.wast:199
+register("G1", $G1)
+
+// linking.wast:200
+let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x89\x80\x80\x80\x00\x01\x02\x47\x31\x01\x67\x03\x7f\x00\x06\x86\x80\x80\x80\x00\x01\x7f\x00\x23\x00\x0b\x07\x85\x80\x80\x80\x00\x01\x01\x67\x03\x01");
+let $G2 = $11;
+
+// linking.wast:204
+assert_return(() => get($G2, "g"), 5);
+
+// linking.wast:206
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x09\x87\x80\x80\x80\x00\x01\x00\x41\x0a\x0b\x01\x00\x0a\x88\x80\x80\x80\x00\x01\x82\x80\x80\x80\x00\x00\x0b");
+
+// linking.wast:215
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x02\x96\x80\x80\x80\x00\x02\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x0a\x02\x4d\x74\x03\x6d\x65\x6d\x02\x00\x01\x03\x82\x80\x80\x80\x00\x01\x00\x09\x8d\x80\x80\x80\x00\x02\x00\x41\x07\x0b\x01\x00\x00\x41\x09\x0b\x01\x00\x0a\x8a\x80\x80\x80\x00\x01\x84\x80\x80\x80\x00\x00\x41\x00\x0b");
+
+// linking.wast:225
+assert_trap(() => call($Mt, "call", [7]));
+
+// linking.wast:229
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x0a\x03\x82\x80\x80\x80\x00\x01\x00\x09\x8d\x80\x80\x80\x00\x02\x00\x41\x07\x0b\x01\x00\x00\x41\x0c\x0b\x01\x00\x0a\x8a\x80\x80\x80\x00\x01\x84\x80\x80\x80\x00\x00\x41\x00\x0b");
+
+// linking.wast:238
+assert_return(() => call($Mt, "call", [7]), 0);
+
+// linking.wast:240
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x74\x03\x74\x61\x62\x01\x70\x00\x0a\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x09\x87\x80\x80\x80\x00\x01\x00\x41\x07\x0b\x01\x00\x0a\x8a\x80\x80\x80\x00\x01\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x0b\x89\x80\x80\x80\x00\x01\x00\x41\x80\x80\x04\x0b\x01\x64");
+
+// linking.wast:250
+assert_return(() => call($Mt, "call", [7]), 0);
+
+// linking.wast:255
+let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x05\x07\x8e\x80\x80\x80\x00\x02\x03\x6d\x65\x6d\x02\x00\x04\x6c\x6f\x61\x64\x00\x00\x0a\x8d\x80\x80\x80\x00\x01\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x90\x80\x80\x80\x00\x01\x00\x41\x0a\x0b\x0a\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09");
+let $Mm = $12;
+
+// linking.wast:263
+register("Mm", $Mm)
+
+// linking.wast:265
+let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x04\x6c\x6f\x61\x64\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x92\x80\x80\x80\x00\x02\x07\x4d\x6d\x2e\x6c\x6f\x61\x64\x00\x00\x04\x6c\x6f\x61\x64\x00\x01\x0a\x8d\x80\x80\x80\x00\x01\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x8c\x80\x80\x80\x00\x01\x00\x41\x0a\x0b\x06\xf0\xf1\xf2\xf3\xf4\xf5");
+let $Nm = $13;
+
+// linking.wast:277
+assert_return(() => call($Mm, "load", [12]), 2);
+
+// linking.wast:278
+assert_return(() => call($Nm, "Mm.load", [12]), 2);
+
+// linking.wast:279
+assert_return(() => call($Nm, "load", [12]), 242);
+
+// linking.wast:281
+let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x01\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x6c\x6f\x61\x64\x00\x00\x0a\x8d\x80\x80\x80\x00\x01\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x8e\x80\x80\x80\x00\x01\x00\x41\x05\x0b\x08\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7");
+let $Om = $14;
+
+// linking.wast:290
+assert_return(() => call($Mm, "load", [12]), 167);
+
+// linking.wast:291
+assert_return(() => call($Nm, "Mm.load", [12]), 167);
+
+// linking.wast:292
+assert_return(() => call($Nm, "load", [12]), 242);
+
+// linking.wast:293
+assert_return(() => call($Om, "load", [12]), 167);
+
+// linking.wast:295
+let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x00\x0b\x89\x80\x80\x80\x00\x01\x00\x41\xff\xff\x03\x0b\x01\x61");
+
+// linking.wast:300
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x00\x0b\x89\x80\x80\x80\x00\x01\x00\x41\x80\x80\x04\x0b\x01\x61");
+
+// linking.wast:308
+let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x86\x80\x80\x80\x00\x01\x60\x01\x7f\x01\x7f\x02\x8c\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x01\x01\x08\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x67\x72\x6f\x77\x00\x00\x0a\x8c\x80\x80\x80\x00\x01\x86\x80\x80\x80\x00\x00\x20\x00\x40\x00\x0b");
+let $Pm = $16;
+
+// linking.wast:316
+assert_return(() => call($Pm, "grow", [0]), 1);
+
+// linking.wast:317
+assert_return(() => call($Pm, "grow", [2]), 1);
+
+// linking.wast:318
+assert_return(() => call($Pm, "grow", [0]), 3);
+
+// linking.wast:319
+assert_return(() => call($Pm, "grow", [1]), 3);
+
+// linking.wast:320
+assert_return(() => call($Pm, "grow", [1]), 4);
+
+// linking.wast:321
+assert_return(() => call($Pm, "grow", [0]), 5);
+
+// linking.wast:322
+assert_return(() => call($Pm, "grow", [1]), -1);
+
+// linking.wast:323
+assert_return(() => call($Pm, "grow", [0]), 5);
+
+// linking.wast:325
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\xa7\x80\x80\x80\x00\x03\x08\x73\x70\x65\x63\x74\x65\x73\x74\x05\x70\x72\x69\x6e\x74\x00\x00\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x01\x02\x4d\x6d\x03\x74\x61\x62\x01\x70\x00\x00\x0b\x89\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x03\x61\x62\x63");
+
+// linking.wast:334
+assert_return(() => call($Mm, "load", [0]), 0);
+
+// linking.wast:338
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x01\x0b\x91\x80\x80\x80\x00\x02\x00\x41\x00\x0b\x03\x61\x62\x63\x00\x41\x80\x80\x14\x0b\x01\x64");
+
+// linking.wast:346
+assert_return(() => call($Mm, "load", [0]), 97);
+
+// linking.wast:348
+assert_unlinkable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x02\x8b\x80\x80\x80\x00\x01\x02\x4d\x6d\x03\x6d\x65\x6d\x02\x00\x01\x03\x82\x80\x80\x80\x00\x01\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x00\x09\x87\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x01\x00\x0a\x88\x80\x80\x80\x00\x01\x82\x80\x80\x80\x00\x00\x0b\x0b\x89\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x03\x61\x62\x63");
+
+// linking.wast:358
+assert_return(() => call($Mm, "load", [0]), 97);
+
+// linking.wast:361
+let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\xb1\x80\x80\x80\x00\x04\x06\x6d\x65\x6d\x6f\x72\x79\x02\x00\x05\x74\x61\x62\x6c\x65\x01\x00\x0d\x67\x65\x74\x20\x6d\x65\x6d\x6f\x72\x79\x5b\x30\x5d\x00\x00\x0c\x67\x65\x74\x20\x74\x61\x62\x6c\x65\x5b\x30\x5d\x00\x01\x0a\x99\x80\x80\x80\x00\x02\x87\x80\x80\x80\x00\x00\x41\x00\x2d\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x41\x00\x11\x00\x00\x0b");
+let $Ms = $17;
+
+// linking.wast:372
+register("Ms", $Ms)
+
+// linking.wast:374
+assert_uninstantiable("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x02\x9b\x80\x80\x80\x00\x02\x02\x4d\x73\x06\x6d\x65\x6d\x6f\x72\x79\x02\x00\x01\x02\x4d\x73\x05\x74\x61\x62\x6c\x65\x01\x70\x00\x01\x03\x83\x80\x80\x80\x00\x02\x00\x01\x08\x81\x80\x80\x80\x00\x01\x09\x87\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x01\x00\x0a\x94\x80\x80\x80\x00\x02\x86\x80\x80\x80\x00\x00\x41\xad\xbd\x03\x0b\x83\x80\x80\x80\x00\x00\x00\x0b\x0b\x8b\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x05\x68\x65\x6c\x6c\x6f");
+
+// linking.wast:391
+assert_return(() => call($Ms, "get memory[0]", []), 104);
+
+// linking.wast:392
+assert_return(() => call($Ms, "get table[0]", []), 57005);
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast
new file mode 100644
index 0000000000..b5f25c009b
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast
@@ -0,0 +1,5685 @@
+;;
+;; Generated by ../meta/generate_memory_copy.js
+;;
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (func (export "test")
+ (nop))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (func (export "test")
+ (memory.copy (i32.const 13) (i32.const 2) (i32.const 3)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (func (export "test")
+ (memory.copy (i32.const 25) (i32.const 15) (i32.const 2)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (func (export "test")
+ (memory.copy (i32.const 13) (i32.const 25) (i32.const 3)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (func (export "test")
+ (memory.copy (i32.const 20) (i32.const 22) (i32.const 4)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (func (export "test")
+ (memory.copy (i32.const 25) (i32.const 1) (i32.const 3)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (func (export "test")
+ (memory.copy (i32.const 10) (i32.const 12) (i32.const 7)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (func (export "test")
+ (memory.copy (i32.const 12) (i32.const 10) (i32.const 7)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 0) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 65516) (i32.const 0) (i32.const 40))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 218)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 417)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 616)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 815)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1014)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1213)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1412)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1611)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1810)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2009)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2208)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2407)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2606)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2805)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3004)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3203)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3402)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3601)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3800)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3999)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25690)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25889)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26088)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26287)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26685)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26884)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27083)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27282)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27481)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27680)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27879)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28078)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28277)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28476)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28675)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28874)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29073)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29272)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29471)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29670)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29869)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30068)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30267)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30466)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30665)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30864)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31063)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31262)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31461)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31660)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31859)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32058)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32257)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32456)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32655)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32854)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33053)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33252)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33451)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33650)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33849)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34048)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34247)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34446)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34645)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34844)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35043)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35242)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35441)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35640)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35839)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36038)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36237)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36436)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36635)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36834)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37033)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37232)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37431)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37630)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37829)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38028)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38227)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38426)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38625)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38824)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39023)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39222)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39421)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39620)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39819)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40018)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40217)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40416)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40615)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40814)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41013)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41212)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41411)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41610)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41809)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42008)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42207)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42406)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42605)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42804)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43003)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43202)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43401)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43600)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43799)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43998)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44197)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44396)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44595)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44794)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44993)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45192)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45391)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45590)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45789)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45988)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46187)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46386)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46585)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46784)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46983)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47182)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47381)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47580)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47779)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47978)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48177)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48376)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48575)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48774)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48973)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49172)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49371)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49570)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49769)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49968)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50167)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50366)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50565)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50764)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50963)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51162)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51361)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51560)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51759)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51958)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52157)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52356)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52555)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52754)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52953)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53152)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53351)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53550)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53749)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53948)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54147)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54346)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54545)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54744)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54943)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55142)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55341)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55540)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55739)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55938)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56137)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56336)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56535)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56734)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56933)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57132)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57331)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57530)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57729)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57928)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58127)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58326)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58525)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58724)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58923)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59122)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59321)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59520)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59719)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59918)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60117)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60316)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60714)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60913)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61112)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61311)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 0) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13\14")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 65515) (i32.const 0) (i32.const 39))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 20))
+(assert_return (invoke "load8_u" (i32.const 219)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 418)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 617)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 816)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1015)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1214)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1413)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1612)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1811)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2010)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2209)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2408)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2607)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2806)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3005)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3204)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3403)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3602)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3801)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4000)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4199)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4398)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4597)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4796)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4995)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5194)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5393)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5592)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5791)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5990)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6189)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6388)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6587)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6786)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6985)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7184)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7383)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7582)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7781)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7980)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8179)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8378)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8577)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8776)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8975)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9174)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9373)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9572)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9771)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9970)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10169)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10368)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10567)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10766)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10965)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11164)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11363)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11562)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11761)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11960)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12159)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12358)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12557)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12756)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12955)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13154)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13353)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13552)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13751)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13950)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14149)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14348)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14547)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14746)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14945)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15144)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15343)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15542)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15741)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15940)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16139)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16338)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16537)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16736)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16935)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17134)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17333)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17532)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17731)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17930)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18129)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18328)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18527)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18726)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18925)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19124)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19323)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19522)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19721)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19920)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20119)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20318)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20517)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20716)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20915)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21114)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21313)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21512)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21711)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21910)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22109)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22308)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22507)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22706)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22905)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23104)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23303)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23502)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23701)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23900)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24099)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24298)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24497)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24696)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24895)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25094)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25293)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25492)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25691)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25890)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26089)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26288)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26487)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26686)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26885)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27084)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27283)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27482)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27681)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27880)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28079)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28278)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28477)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28676)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28875)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29074)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29273)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29472)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29671)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29870)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30069)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30268)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30467)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30666)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30865)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31064)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31263)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31462)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31661)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31860)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32059)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32258)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32457)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32656)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32855)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33054)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33253)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33452)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33651)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33850)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34049)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34248)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34447)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34646)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34845)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35044)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35243)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35442)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35641)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35840)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36039)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36238)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36437)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36636)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36835)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37034)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37233)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37432)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37631)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37830)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38029)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38228)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38427)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38626)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38825)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39024)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39223)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39422)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39621)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39820)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40019)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40218)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40417)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40616)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40815)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41014)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41213)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41412)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41611)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41810)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42009)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42208)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42407)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42606)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42805)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43004)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43203)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43402)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43601)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43800)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43999)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 20))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 0) (i32.const 65516) (i32.const 40))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 218)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 417)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 616)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 815)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1014)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1213)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1412)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1611)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1810)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2009)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2208)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2407)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2606)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2805)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3004)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3203)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3402)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3601)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3800)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3999)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25690)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25889)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26088)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26287)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26685)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26884)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27083)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27282)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27481)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27680)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27879)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28078)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28277)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28476)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28675)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28874)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29073)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29272)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29471)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29670)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29869)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30068)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30267)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30466)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30665)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30864)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31063)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31262)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31461)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31660)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31859)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32058)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32257)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32456)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32655)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32854)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33053)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33252)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33451)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33650)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33849)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34048)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34247)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34446)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34645)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34844)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35043)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35242)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35441)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35640)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35839)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36038)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36237)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36436)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36635)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36834)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37033)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37232)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37431)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37630)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37829)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38028)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38227)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38426)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38625)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38824)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39023)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39222)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39421)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39620)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39819)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40018)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40217)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40416)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40615)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40814)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41013)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41212)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41411)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41610)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41809)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42008)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42207)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42406)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42605)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42804)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43003)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43202)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43401)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43600)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43799)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43998)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44197)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44396)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44595)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44794)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44993)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45192)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45391)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45590)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45789)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45988)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46187)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46386)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46585)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46784)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46983)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47182)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47381)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47580)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47779)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47978)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48177)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48376)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48575)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48774)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48973)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49172)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49371)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49570)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49769)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49968)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50167)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50366)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50565)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50764)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50963)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51162)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51361)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51560)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51759)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51958)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52157)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52356)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52555)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52754)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52953)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53152)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53351)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53550)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53749)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53948)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54147)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54346)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54545)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54744)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54943)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55142)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55341)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55540)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55739)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55938)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56137)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56336)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56535)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56734)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56933)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57132)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57331)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57530)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57729)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57928)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58127)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58326)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58525)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58724)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58923)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59122)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59321)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59520)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59719)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59918)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60117)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60316)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60714)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60913)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61112)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61311)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 65515) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13\14")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 0) (i32.const 65515) (i32.const 39))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 20))
+(assert_return (invoke "load8_u" (i32.const 219)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 418)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 617)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 816)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1015)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1214)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1413)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1612)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1811)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2010)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2209)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2408)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2607)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2806)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3005)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3204)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3403)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3602)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3801)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4000)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4199)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4398)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4597)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4796)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4995)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5194)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5393)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5592)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5791)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5990)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6189)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6388)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6587)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6786)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6985)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7184)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7383)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7582)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7781)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7980)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8179)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8378)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8577)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8776)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8975)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9174)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9373)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9572)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9771)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9970)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10169)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10368)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10567)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10766)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10965)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11164)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11363)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11562)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11761)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11960)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12159)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12358)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12557)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12756)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12955)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13154)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13353)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13552)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13751)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13950)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14149)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14348)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14547)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14746)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14945)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15144)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15343)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15542)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15741)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15940)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16139)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16338)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16537)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16736)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16935)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17134)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17333)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17532)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17731)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17930)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18129)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18328)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18527)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18726)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18925)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19124)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19323)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19522)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19721)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19920)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20119)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20318)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20517)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20716)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20915)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21114)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21313)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21512)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21711)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21910)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22109)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22308)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22507)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22706)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22905)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23104)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23303)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23502)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23701)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23900)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24099)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24298)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24497)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24696)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24895)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25094)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25293)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25492)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25691)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25890)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26089)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26288)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26487)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26686)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26885)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27084)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27283)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27482)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27681)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27880)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28079)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28278)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28477)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28676)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28875)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29074)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29273)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29472)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29671)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29870)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30069)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30268)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30467)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30666)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30865)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31064)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31263)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31462)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31661)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31860)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32059)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32258)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32457)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32656)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32855)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33054)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33253)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33452)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33651)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33850)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34049)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34248)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34447)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34646)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34845)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35044)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35243)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35442)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35641)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35840)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36039)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36238)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36437)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36636)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36835)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37034)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37233)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37432)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37631)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37830)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38029)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38228)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38427)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38626)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38825)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39024)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39223)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39422)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39621)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39820)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40019)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40218)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40417)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40616)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40815)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41014)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41213)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41412)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41611)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41810)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42009)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42208)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42407)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42606)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42805)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43004)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43203)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43402)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43601)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43800)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43999)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 20))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 65486) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 65516) (i32.const 65486) (i32.const 40))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65487)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65488)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65489)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65491)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65492)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65493)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65494)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65495)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65496)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65497)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65498)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65499)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65500)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65501)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65502)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65503)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65504)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65505)) (i32.const 19))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 65486) (i32.const 65516) (i32.const 40))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65487)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65488)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65489)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65491)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65492)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65493)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65494)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65495)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65496)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65497)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65498)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65499)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65500)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65501)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65502)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65503)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65504)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65505)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 65506) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 65516) (i32.const 65506) (i32.const 40))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65507)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65508)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65509)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65510)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65511)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65512)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65513)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65514)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65515)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 19))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 65506) (i32.const 65516) (i32.const 40))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65507)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65508)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65509)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65510)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65511)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65512)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65513)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65514)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65515)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 65516) (i32.const 65516) (i32.const 40))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61490)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61689)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61888)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62087)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62286)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62485)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62684)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62883)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63082)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63281)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63480)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63679)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63878)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64077)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64276)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64475)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64674)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64873)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65072)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65271)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65470)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
+
+(module
+ (memory (export "mem") 1 )
+ (data (i32.const 65516) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 0) (i32.const 65516) (i32.const 4294963200))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 218)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 417)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 616)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 815)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1014)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1213)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1412)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1611)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1810)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2009)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2208)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2407)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2606)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2805)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3004)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3203)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3402)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3601)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3800)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3999)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25690)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25889)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26088)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26287)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26685)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26884)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27083)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27282)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27481)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27680)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27879)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28078)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28277)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28476)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28675)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28874)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29073)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29272)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29471)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29670)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29869)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30068)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30267)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30466)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30665)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30864)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31063)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31262)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31461)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31660)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31859)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32058)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32257)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32456)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32655)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32854)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33053)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33252)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33451)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33650)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33849)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34048)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34247)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34446)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34645)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34844)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35043)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35242)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35441)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35640)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35839)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36038)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36237)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36436)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36635)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36834)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37033)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37232)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37431)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37630)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37829)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38028)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38227)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38426)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38625)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38824)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39023)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39222)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39421)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39620)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39819)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40018)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40217)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40416)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40615)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40814)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41013)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41212)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41411)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41610)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41809)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42008)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42207)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42406)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42605)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42804)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43003)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43202)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43401)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43600)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43799)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43998)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44197)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44396)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44595)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44794)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44993)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45192)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45391)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45590)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45789)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45988)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46187)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46386)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46585)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46784)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46983)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47182)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47381)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47580)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47779)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47978)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48177)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48376)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48575)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48774)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48973)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49172)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49371)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49570)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49769)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49968)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50167)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50366)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50565)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50764)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50963)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51162)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51361)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51560)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51759)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51958)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52157)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52356)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52555)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52754)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52953)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53152)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53351)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53550)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53749)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53948)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54147)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54346)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54545)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54744)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54943)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55142)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55341)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55540)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55739)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55938)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56137)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56336)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56535)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56734)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56933)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57132)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57331)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57530)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57729)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57928)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58127)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58326)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58525)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58724)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58923)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59122)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59321)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59520)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59719)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59918)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60117)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60316)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60714)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60913)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61112)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61311)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65517)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 65518)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 65519)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 65520)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 65521)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 65522)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 65523)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 65524)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 65525)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 65526)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 65527)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 65528)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 65529)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 65530)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 65531)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 65532)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 65533)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 65534)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 65535)) (i32.const 19))
+
+(module
+ (memory (export "mem") 1 1 )
+ (data (i32.const 61440) "\00\01\02\03\04\05\06\07\08\09\0a\0b\0c\0d\0e\0f\10\11\12\13")
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (memory.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(assert_trap (invoke "run" (i32.const 65516) (i32.const 61440) (i32.const 4294967040))
+ "out of bounds")
+
+(assert_return (invoke "load8_u" (i32.const 198)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 397)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 596)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 795)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 994)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1193)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1392)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1591)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1790)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1989)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2188)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2387)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2586)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2785)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2984)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3183)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3382)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3581)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3780)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 3979)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4178)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4377)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4576)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4775)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 4974)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5173)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5372)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5571)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5770)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 5969)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6168)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6367)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6566)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6765)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 6964)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7163)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7362)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7561)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7760)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7959)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8158)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8357)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8556)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8755)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8954)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9153)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9352)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9551)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9750)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9949)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10148)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10347)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10546)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10745)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10944)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11143)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11342)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11541)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11740)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11939)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12138)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12337)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12536)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12735)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12934)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13133)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13332)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13531)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13730)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 13929)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14128)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14327)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14526)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14725)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14924)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15123)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15322)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15521)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15720)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 15919)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16118)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16317)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16516)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16715)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 16914)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17113)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17312)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17511)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17710)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 17909)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18108)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18307)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18506)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18705)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18904)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19103)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19302)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19501)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19700)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19899)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20098)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20297)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20496)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20695)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20894)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21093)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21292)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21491)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21690)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21889)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22088)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22287)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22486)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22685)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22884)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23083)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23282)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23481)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23680)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23879)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24078)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24277)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24476)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24675)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24874)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25073)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25272)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25471)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25670)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25869)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26068)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26267)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26466)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26665)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26864)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27063)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27262)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27461)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27660)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27859)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28058)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28257)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28456)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28655)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28854)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29053)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29252)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29451)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29650)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29849)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30048)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30247)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30446)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30645)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 30844)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31043)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31242)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31441)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31640)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 31839)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32038)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32237)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32436)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32635)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 32834)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33033)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33232)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33431)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33630)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 33829)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34028)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34227)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34426)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34625)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 34824)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35023)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35222)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35421)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35620)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 35819)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36018)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36217)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36416)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36615)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 36814)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37013)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37212)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37411)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37610)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 37809)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38008)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38207)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38406)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38605)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 38804)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39003)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39202)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39401)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39600)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39799)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 39998)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40197)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40396)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40595)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40794)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 40993)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41192)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41391)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41590)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41789)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 41988)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42187)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42386)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42585)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42784)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 42983)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43182)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43381)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43580)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43779)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 43978)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44177)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44376)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44575)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44774)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 44973)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45172)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45371)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45570)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45769)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 45968)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46167)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46366)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46565)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46764)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 46963)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47162)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47361)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47560)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47759)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 47958)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48157)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48356)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48555)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48754)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 48953)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49152)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49351)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49550)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49749)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 49948)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50147)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50346)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50545)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50744)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 50943)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51142)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51341)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51540)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51739)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 51938)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52137)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52336)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52535)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52734)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 52933)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53132)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53331)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53530)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53729)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 53928)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54127)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54326)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54525)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54724)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 54923)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55122)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55321)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55520)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55719)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 55918)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56117)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56316)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56515)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56714)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 56913)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57112)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57311)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 57908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 58903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 59898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 60893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61440)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61441)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 61442)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 61443)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 61444)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 61445)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 61446)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 61447)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 61448)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 61449)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 61450)) (i32.const 10))
+(assert_return (invoke "load8_u" (i32.const 61451)) (i32.const 11))
+(assert_return (invoke "load8_u" (i32.const 61452)) (i32.const 12))
+(assert_return (invoke "load8_u" (i32.const 61453)) (i32.const 13))
+(assert_return (invoke "load8_u" (i32.const 61454)) (i32.const 14))
+(assert_return (invoke "load8_u" (i32.const 61455)) (i32.const 15))
+(assert_return (invoke "load8_u" (i32.const 61456)) (i32.const 16))
+(assert_return (invoke "load8_u" (i32.const 61457)) (i32.const 17))
+(assert_return (invoke "load8_u" (i32.const 61458)) (i32.const 18))
+(assert_return (invoke "load8_u" (i32.const 61459)) (i32.const 19))
+(assert_return (invoke "load8_u" (i32.const 61510)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61709)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 61908)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62107)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62306)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62505)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62704)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 62903)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63102)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63301)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63500)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63699)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 63898)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64097)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64296)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64495)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64694)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 64893)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65092)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65291)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 65490)) (i32.const 0))
+
+(assert_invalid
+ (module
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (i32.const 20) (i32.const 30))))
+ "unknown memory 0")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (i32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (i32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (i32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (f32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (f32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (f32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (f32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (i64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (i64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (i64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (i64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (f64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (f64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (f64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i32.const 10) (f64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (i32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (i32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (i32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (i32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (f32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (f32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (f32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (f32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (i64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (i64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (i64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (i64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (f64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (f64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (f64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f32.const 10) (f64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (i32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (i32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (i32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (i32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (f32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (f32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (f32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (f32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (i64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (i64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (i64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (i64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (f64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (f64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (f64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (i64.const 10) (f64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (i32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (i32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (i32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (i32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (f32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (f32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (f32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (f32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (i64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (i64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (i64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (i64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (f64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (f64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (f64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.copy (f64.const 10) (f64.const 20) (f64.const 30))))
+ "type mismatch")
+
+
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.fill (i32.const 10) (i32.const 0x55) (i32.const 10))
+ (memory.copy (i32.const 9) (i32.const 10) (i32.const 5)))
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+)
+(invoke "test")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 9) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 9) (i32.const 20) (i32.const 85))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 20) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.fill (i32.const 10) (i32.const 0x55) (i32.const 10))
+ (memory.copy (i32.const 16) (i32.const 15) (i32.const 5)))
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+)
+(invoke "test")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 10) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 10) (i32.const 21) (i32.const 85))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 21) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.copy (i32.const 0xFF00) (i32.const 0x8000) (i32.const 257))))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.copy (i32.const 0xFFFFFF00) (i32.const 0x4000) (i32.const 257))))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.copy (i32.const 0x8000) (i32.const 0xFF00) (i32.const 257))))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.copy (i32.const 0x4000) (i32.const 0xFFFFFF00) (i32.const 257))))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.fill (i32.const 0x0000) (i32.const 0x55) (i32.const 0x8000))
+ (memory.fill (i32.const 0x8000) (i32.const 0xAA) (i32.const 0x8000))
+ (memory.copy (i32.const 0x9000) (i32.const 0x7000) (i32.const 0)))
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+)
+(invoke "test")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 32768) (i32.const 85))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 32768) (i32.const 65536) (i32.const 170))
+ (i32.const -1))
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.copy (i32.const 0x10000) (i32.const 0x7000) (i32.const 0))))
+(invoke "test")
+
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.copy (i32.const 0x9000) (i32.const 0x10000) (i32.const 0))))
+(invoke "test")
+
+(module
+ (memory 1 1)
+ (func (export "test")
+ (memory.fill (i32.const 17767) (i32.const 1) (i32.const 1344))
+ (memory.fill (i32.const 39017) (i32.const 2) (i32.const 1055))
+ (memory.fill (i32.const 56401) (i32.const 3) (i32.const 988))
+ (memory.fill (i32.const 37962) (i32.const 4) (i32.const 322))
+ (memory.fill (i32.const 7977) (i32.const 5) (i32.const 1994))
+ (memory.fill (i32.const 22714) (i32.const 6) (i32.const 3036))
+ (memory.fill (i32.const 16882) (i32.const 7) (i32.const 2372))
+ (memory.fill (i32.const 43491) (i32.const 8) (i32.const 835))
+ (memory.fill (i32.const 124) (i32.const 9) (i32.const 1393))
+ (memory.fill (i32.const 2132) (i32.const 10) (i32.const 2758))
+ (memory.fill (i32.const 8987) (i32.const 11) (i32.const 3098))
+ (memory.fill (i32.const 52711) (i32.const 12) (i32.const 741))
+ (memory.fill (i32.const 3958) (i32.const 13) (i32.const 2823))
+ (memory.fill (i32.const 49715) (i32.const 14) (i32.const 1280))
+ (memory.fill (i32.const 50377) (i32.const 15) (i32.const 1466))
+ (memory.fill (i32.const 20493) (i32.const 16) (i32.const 3158))
+ (memory.fill (i32.const 47665) (i32.const 17) (i32.const 544))
+ (memory.fill (i32.const 12451) (i32.const 18) (i32.const 2669))
+ (memory.fill (i32.const 24869) (i32.const 19) (i32.const 2651))
+ (memory.fill (i32.const 45317) (i32.const 20) (i32.const 1570))
+ (memory.fill (i32.const 43096) (i32.const 21) (i32.const 1691))
+ (memory.fill (i32.const 33886) (i32.const 22) (i32.const 646))
+ (memory.fill (i32.const 48555) (i32.const 23) (i32.const 1858))
+ (memory.fill (i32.const 53453) (i32.const 24) (i32.const 2657))
+ (memory.fill (i32.const 30363) (i32.const 25) (i32.const 981))
+ (memory.fill (i32.const 9300) (i32.const 26) (i32.const 1807))
+ (memory.fill (i32.const 50190) (i32.const 27) (i32.const 487))
+ (memory.fill (i32.const 62753) (i32.const 28) (i32.const 530))
+ (memory.fill (i32.const 36316) (i32.const 29) (i32.const 943))
+ (memory.fill (i32.const 6768) (i32.const 30) (i32.const 381))
+ (memory.fill (i32.const 51262) (i32.const 31) (i32.const 3089))
+ (memory.fill (i32.const 49729) (i32.const 32) (i32.const 658))
+ (memory.fill (i32.const 44540) (i32.const 33) (i32.const 1702))
+ (memory.fill (i32.const 33342) (i32.const 34) (i32.const 1092))
+ (memory.fill (i32.const 50814) (i32.const 35) (i32.const 1410))
+ (memory.fill (i32.const 47594) (i32.const 36) (i32.const 2204))
+ (memory.fill (i32.const 54123) (i32.const 37) (i32.const 2394))
+ (memory.fill (i32.const 55183) (i32.const 38) (i32.const 250))
+ (memory.fill (i32.const 22620) (i32.const 39) (i32.const 2097))
+ (memory.fill (i32.const 17132) (i32.const 40) (i32.const 3264))
+ (memory.fill (i32.const 54331) (i32.const 41) (i32.const 3299))
+ (memory.fill (i32.const 39474) (i32.const 42) (i32.const 2796))
+ (memory.fill (i32.const 36156) (i32.const 43) (i32.const 2070))
+ (memory.fill (i32.const 35308) (i32.const 44) (i32.const 2763))
+ (memory.fill (i32.const 32731) (i32.const 45) (i32.const 312))
+ (memory.fill (i32.const 63746) (i32.const 46) (i32.const 192))
+ (memory.fill (i32.const 30974) (i32.const 47) (i32.const 596))
+ (memory.fill (i32.const 16635) (i32.const 48) (i32.const 501))
+ (memory.fill (i32.const 57002) (i32.const 49) (i32.const 686))
+ (memory.fill (i32.const 34299) (i32.const 50) (i32.const 385))
+ (memory.fill (i32.const 60881) (i32.const 51) (i32.const 903))
+ (memory.fill (i32.const 61445) (i32.const 52) (i32.const 2390))
+ (memory.fill (i32.const 46972) (i32.const 53) (i32.const 1441))
+ (memory.fill (i32.const 25973) (i32.const 54) (i32.const 3162))
+ (memory.fill (i32.const 5566) (i32.const 55) (i32.const 2135))
+ (memory.fill (i32.const 35977) (i32.const 56) (i32.const 519))
+ (memory.fill (i32.const 44892) (i32.const 57) (i32.const 3280))
+ (memory.fill (i32.const 46760) (i32.const 58) (i32.const 1678))
+ (memory.fill (i32.const 46607) (i32.const 59) (i32.const 3168))
+ (memory.fill (i32.const 22449) (i32.const 60) (i32.const 1441))
+ (memory.fill (i32.const 58609) (i32.const 61) (i32.const 663))
+ (memory.fill (i32.const 32261) (i32.const 62) (i32.const 1671))
+ (memory.fill (i32.const 3063) (i32.const 63) (i32.const 721))
+ (memory.fill (i32.const 34025) (i32.const 64) (i32.const 84))
+ (memory.fill (i32.const 33338) (i32.const 65) (i32.const 2029))
+ (memory.fill (i32.const 36810) (i32.const 66) (i32.const 29))
+ (memory.fill (i32.const 19147) (i32.const 67) (i32.const 3034))
+ (memory.fill (i32.const 12616) (i32.const 68) (i32.const 1043))
+ (memory.fill (i32.const 18276) (i32.const 69) (i32.const 3324))
+ (memory.fill (i32.const 4639) (i32.const 70) (i32.const 1091))
+ (memory.fill (i32.const 16158) (i32.const 71) (i32.const 1997))
+ (memory.fill (i32.const 18204) (i32.const 72) (i32.const 2259))
+ (memory.fill (i32.const 50532) (i32.const 73) (i32.const 3189))
+ (memory.fill (i32.const 11028) (i32.const 74) (i32.const 1968))
+ (memory.fill (i32.const 15962) (i32.const 75) (i32.const 1455))
+ (memory.fill (i32.const 45406) (i32.const 76) (i32.const 1177))
+ (memory.fill (i32.const 54137) (i32.const 77) (i32.const 1568))
+ (memory.fill (i32.const 33083) (i32.const 78) (i32.const 1642))
+ (memory.fill (i32.const 61028) (i32.const 79) (i32.const 3284))
+ (memory.fill (i32.const 51729) (i32.const 80) (i32.const 223))
+ (memory.fill (i32.const 4361) (i32.const 81) (i32.const 2171))
+ (memory.fill (i32.const 57514) (i32.const 82) (i32.const 1322))
+ (memory.fill (i32.const 55724) (i32.const 83) (i32.const 2648))
+ (memory.fill (i32.const 24091) (i32.const 84) (i32.const 1045))
+ (memory.fill (i32.const 43183) (i32.const 85) (i32.const 3097))
+ (memory.fill (i32.const 32307) (i32.const 86) (i32.const 2796))
+ (memory.fill (i32.const 3811) (i32.const 87) (i32.const 2010))
+ (memory.fill (i32.const 54856) (i32.const 88) (i32.const 0))
+ (memory.fill (i32.const 49941) (i32.const 89) (i32.const 2069))
+ (memory.fill (i32.const 20411) (i32.const 90) (i32.const 2896))
+ (memory.fill (i32.const 33826) (i32.const 91) (i32.const 192))
+ (memory.fill (i32.const 9402) (i32.const 92) (i32.const 2195))
+ (memory.fill (i32.const 12413) (i32.const 93) (i32.const 24))
+ (memory.fill (i32.const 14091) (i32.const 94) (i32.const 577))
+ (memory.fill (i32.const 44058) (i32.const 95) (i32.const 2089))
+ (memory.fill (i32.const 36735) (i32.const 96) (i32.const 3436))
+ (memory.fill (i32.const 23288) (i32.const 97) (i32.const 2765))
+ (memory.fill (i32.const 6392) (i32.const 98) (i32.const 830))
+ (memory.fill (i32.const 33307) (i32.const 99) (i32.const 1938))
+ (memory.fill (i32.const 21941) (i32.const 100) (i32.const 2750))
+ (memory.copy (i32.const 59214) (i32.const 54248) (i32.const 2098))
+ (memory.copy (i32.const 63026) (i32.const 39224) (i32.const 230))
+ (memory.copy (i32.const 51833) (i32.const 23629) (i32.const 2300))
+ (memory.copy (i32.const 6708) (i32.const 23996) (i32.const 639))
+ (memory.copy (i32.const 6990) (i32.const 33399) (i32.const 1097))
+ (memory.copy (i32.const 19403) (i32.const 10348) (i32.const 3197))
+ (memory.copy (i32.const 27308) (i32.const 54406) (i32.const 100))
+ (memory.copy (i32.const 27221) (i32.const 43682) (i32.const 1717))
+ (memory.copy (i32.const 60528) (i32.const 8629) (i32.const 119))
+ (memory.copy (i32.const 5947) (i32.const 2308) (i32.const 658))
+ (memory.copy (i32.const 4787) (i32.const 51631) (i32.const 2269))
+ (memory.copy (i32.const 12617) (i32.const 19197) (i32.const 833))
+ (memory.copy (i32.const 11854) (i32.const 46505) (i32.const 3300))
+ (memory.copy (i32.const 11376) (i32.const 45012) (i32.const 2281))
+ (memory.copy (i32.const 34186) (i32.const 6697) (i32.const 2572))
+ (memory.copy (i32.const 4936) (i32.const 1690) (i32.const 1328))
+ (memory.copy (i32.const 63164) (i32.const 7637) (i32.const 1670))
+ (memory.copy (i32.const 44568) (i32.const 18344) (i32.const 33))
+ (memory.copy (i32.const 43918) (i32.const 22348) (i32.const 1427))
+ (memory.copy (i32.const 46637) (i32.const 49819) (i32.const 1434))
+ (memory.copy (i32.const 63684) (i32.const 8755) (i32.const 834))
+ (memory.copy (i32.const 33485) (i32.const 20131) (i32.const 3317))
+ (memory.copy (i32.const 40575) (i32.const 54317) (i32.const 3201))
+ (memory.copy (i32.const 25812) (i32.const 59254) (i32.const 2452))
+ (memory.copy (i32.const 19678) (i32.const 56882) (i32.const 346))
+ (memory.copy (i32.const 15852) (i32.const 35914) (i32.const 2430))
+ (memory.copy (i32.const 11824) (i32.const 35574) (i32.const 300))
+ (memory.copy (i32.const 59427) (i32.const 13957) (i32.const 3153))
+ (memory.copy (i32.const 34299) (i32.const 60594) (i32.const 1281))
+ (memory.copy (i32.const 8964) (i32.const 12276) (i32.const 943))
+ (memory.copy (i32.const 2827) (i32.const 10425) (i32.const 1887))
+ (memory.copy (i32.const 43194) (i32.const 43910) (i32.const 738))
+ (memory.copy (i32.const 63038) (i32.const 18949) (i32.const 122))
+ (memory.copy (i32.const 24044) (i32.const 44761) (i32.const 1755))
+ (memory.copy (i32.const 22608) (i32.const 14755) (i32.const 702))
+ (memory.copy (i32.const 11284) (i32.const 26579) (i32.const 1830))
+ (memory.copy (i32.const 23092) (i32.const 20471) (i32.const 1064))
+ (memory.copy (i32.const 57248) (i32.const 54770) (i32.const 2631))
+ (memory.copy (i32.const 25492) (i32.const 1025) (i32.const 3113))
+ (memory.copy (i32.const 49588) (i32.const 44220) (i32.const 975))
+ (memory.copy (i32.const 28280) (i32.const 41722) (i32.const 2336))
+ (memory.copy (i32.const 61289) (i32.const 230) (i32.const 2872))
+ (memory.copy (i32.const 22480) (i32.const 52506) (i32.const 2197))
+ (memory.copy (i32.const 40553) (i32.const 9578) (i32.const 1958))
+ (memory.copy (i32.const 29004) (i32.const 20862) (i32.const 2186))
+ (memory.copy (i32.const 53029) (i32.const 43955) (i32.const 1037))
+ (memory.copy (i32.const 25476) (i32.const 35667) (i32.const 1650))
+ (memory.copy (i32.const 58516) (i32.const 45819) (i32.const 1986))
+ (memory.copy (i32.const 38297) (i32.const 5776) (i32.const 1955))
+ (memory.copy (i32.const 28503) (i32.const 55364) (i32.const 2368))
+ (memory.copy (i32.const 62619) (i32.const 18108) (i32.const 1356))
+ (memory.copy (i32.const 50149) (i32.const 13861) (i32.const 382))
+ (memory.copy (i32.const 16904) (i32.const 36341) (i32.const 1900))
+ (memory.copy (i32.const 48098) (i32.const 11358) (i32.const 2807))
+ (memory.copy (i32.const 28512) (i32.const 40362) (i32.const 323))
+ (memory.copy (i32.const 35506) (i32.const 27856) (i32.const 1670))
+ (memory.copy (i32.const 62970) (i32.const 53332) (i32.const 1341))
+ (memory.copy (i32.const 14133) (i32.const 46312) (i32.const 644))
+ (memory.copy (i32.const 29030) (i32.const 19074) (i32.const 496))
+ (memory.copy (i32.const 44952) (i32.const 47577) (i32.const 2784))
+ (memory.copy (i32.const 39559) (i32.const 44661) (i32.const 1350))
+ (memory.copy (i32.const 10352) (i32.const 29274) (i32.const 1475))
+ (memory.copy (i32.const 46911) (i32.const 46178) (i32.const 1467))
+ (memory.copy (i32.const 4905) (i32.const 28740) (i32.const 1895))
+ (memory.copy (i32.const 38012) (i32.const 57253) (i32.const 1751))
+ (memory.copy (i32.const 26446) (i32.const 27223) (i32.const 1127))
+ (memory.copy (i32.const 58835) (i32.const 24657) (i32.const 1063))
+ (memory.copy (i32.const 61356) (i32.const 38790) (i32.const 766))
+ (memory.copy (i32.const 44160) (i32.const 2284) (i32.const 1520))
+ (memory.copy (i32.const 32740) (i32.const 47237) (i32.const 3014))
+ (memory.copy (i32.const 11148) (i32.const 21260) (i32.const 1011))
+ (memory.copy (i32.const 7665) (i32.const 31612) (i32.const 3034))
+ (memory.copy (i32.const 18044) (i32.const 12987) (i32.const 3320))
+ (memory.copy (i32.const 57306) (i32.const 55905) (i32.const 308))
+ (memory.copy (i32.const 24675) (i32.const 16815) (i32.const 1155))
+ (memory.copy (i32.const 19900) (i32.const 10115) (i32.const 722))
+ (memory.copy (i32.const 2921) (i32.const 5935) (i32.const 2370))
+ (memory.copy (i32.const 32255) (i32.const 50095) (i32.const 2926))
+ (memory.copy (i32.const 15126) (i32.const 17299) (i32.const 2607))
+ (memory.copy (i32.const 45575) (i32.const 28447) (i32.const 2045))
+ (memory.copy (i32.const 55149) (i32.const 36113) (i32.const 2596))
+ (memory.copy (i32.const 28461) (i32.const 54157) (i32.const 1168))
+ (memory.copy (i32.const 47951) (i32.const 53385) (i32.const 3137))
+ (memory.copy (i32.const 30646) (i32.const 45155) (i32.const 2649))
+ (memory.copy (i32.const 5057) (i32.const 4295) (i32.const 52))
+ (memory.copy (i32.const 6692) (i32.const 24195) (i32.const 441))
+ (memory.copy (i32.const 32984) (i32.const 27117) (i32.const 3445))
+ (memory.copy (i32.const 32530) (i32.const 59372) (i32.const 2785))
+ (memory.copy (i32.const 34361) (i32.const 8962) (i32.const 2406))
+ (memory.copy (i32.const 17893) (i32.const 54538) (i32.const 3381))
+ (memory.copy (i32.const 22685) (i32.const 44151) (i32.const 136))
+ (memory.copy (i32.const 59089) (i32.const 7077) (i32.const 1045))
+ (memory.copy (i32.const 42945) (i32.const 55028) (i32.const 2389))
+ (memory.copy (i32.const 44693) (i32.const 20138) (i32.const 877))
+ (memory.copy (i32.const 36810) (i32.const 25196) (i32.const 3447))
+ (memory.copy (i32.const 45742) (i32.const 31888) (i32.const 854))
+ (memory.copy (i32.const 24236) (i32.const 31866) (i32.const 1377))
+ (memory.copy (i32.const 33778) (i32.const 692) (i32.const 1594))
+ (memory.copy (i32.const 60618) (i32.const 18585) (i32.const 2987))
+ (memory.copy (i32.const 50370) (i32.const 41271) (i32.const 1406))
+ )
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+)
+(invoke "test")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 124) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 124) (i32.const 1517) (i32.const 9))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 1517) (i32.const 2132) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 2132) (i32.const 2827) (i32.const 10))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 2827) (i32.const 2921) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 2921) (i32.const 3538) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 3538) (i32.const 3786) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 3786) (i32.const 4042) (i32.const 97))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 4042) (i32.const 4651) (i32.const 99))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 4651) (i32.const 5057) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 5057) (i32.const 5109) (i32.const 99))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 5109) (i32.const 5291) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 5291) (i32.const 5524) (i32.const 72))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 5524) (i32.const 5691) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 5691) (i32.const 6552) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 6552) (i32.const 7133) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 7133) (i32.const 7665) (i32.const 99))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 7665) (i32.const 8314) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 8314) (i32.const 8360) (i32.const 62))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 8360) (i32.const 8793) (i32.const 86))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 8793) (i32.const 8979) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 8979) (i32.const 9373) (i32.const 79))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 9373) (i32.const 9518) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 9518) (i32.const 9934) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 9934) (i32.const 10087) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 10087) (i32.const 10206) (i32.const 5))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 10206) (i32.const 10230) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 10230) (i32.const 10249) (i32.const 41))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 10249) (i32.const 11148) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 11148) (i32.const 11356) (i32.const 74))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 11356) (i32.const 11380) (i32.const 93))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 11380) (i32.const 11939) (i32.const 74))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 11939) (i32.const 12159) (i32.const 68))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 12159) (i32.const 12575) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 12575) (i32.const 12969) (i32.const 79))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 12969) (i32.const 13114) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 13114) (i32.const 14133) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 14133) (i32.const 14404) (i32.const 76))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 14404) (i32.const 14428) (i32.const 57))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 14428) (i32.const 14458) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 14458) (i32.const 14580) (i32.const 32))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 14580) (i32.const 14777) (i32.const 89))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 14777) (i32.const 15124) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 15124) (i32.const 15126) (i32.const 36))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 15126) (i32.const 15192) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 15192) (i32.const 15871) (i32.const 96))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 15871) (i32.const 15998) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 15998) (i32.const 17017) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 17017) (i32.const 17288) (i32.const 76))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 17288) (i32.const 17312) (i32.const 57))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 17312) (i32.const 17342) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 17342) (i32.const 17464) (i32.const 32))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 17464) (i32.const 17661) (i32.const 89))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 17661) (i32.const 17727) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 17727) (i32.const 17733) (i32.const 5))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 17733) (i32.const 17893) (i32.const 96))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 17893) (i32.const 18553) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 18553) (i32.const 18744) (i32.const 42))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 18744) (i32.const 18801) (i32.const 76))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 18801) (i32.const 18825) (i32.const 57))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 18825) (i32.const 18876) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 18876) (i32.const 18885) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 18885) (i32.const 18904) (i32.const 41))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 18904) (i32.const 19567) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 19567) (i32.const 20403) (i32.const 96))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 20403) (i32.const 21274) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 21274) (i32.const 21364) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 21364) (i32.const 21468) (i32.const 74))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 21468) (i32.const 21492) (i32.const 93))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 21492) (i32.const 22051) (i32.const 74))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 22051) (i32.const 22480) (i32.const 68))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 22480) (i32.const 22685) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 22685) (i32.const 22694) (i32.const 68))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 22694) (i32.const 22821) (i32.const 10))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 22821) (i32.const 22869) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 22869) (i32.const 24107) (i32.const 97))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 24107) (i32.const 24111) (i32.const 37))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 24111) (i32.const 24236) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 24236) (i32.const 24348) (i32.const 72))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 24348) (i32.const 24515) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 24515) (i32.const 24900) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 24900) (i32.const 25136) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 25136) (i32.const 25182) (i32.const 85))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 25182) (i32.const 25426) (i32.const 68))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 25426) (i32.const 25613) (i32.const 89))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 25613) (i32.const 25830) (i32.const 96))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 25830) (i32.const 26446) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 26446) (i32.const 26517) (i32.const 10))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 26517) (i32.const 27468) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 27468) (i32.const 27503) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 27503) (i32.const 27573) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 27573) (i32.const 28245) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 28245) (i32.const 28280) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 28280) (i32.const 29502) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 29502) (i32.const 29629) (i32.const 42))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 29629) (i32.const 30387) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 30387) (i32.const 30646) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 30646) (i32.const 31066) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31066) (i32.const 31131) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31131) (i32.const 31322) (i32.const 42))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31322) (i32.const 31379) (i32.const 76))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31379) (i32.const 31403) (i32.const 57))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31403) (i32.const 31454) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31454) (i32.const 31463) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31463) (i32.const 31482) (i32.const 41))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31482) (i32.const 31649) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31649) (i32.const 31978) (i32.const 72))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 31978) (i32.const 32145) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 32145) (i32.const 32530) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 32530) (i32.const 32766) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 32766) (i32.const 32812) (i32.const 85))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 32812) (i32.const 33056) (i32.const 68))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 33056) (i32.const 33660) (i32.const 89))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 33660) (i32.const 33752) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 33752) (i32.const 33775) (i32.const 36))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 33775) (i32.const 33778) (i32.const 32))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 33778) (i32.const 34603) (i32.const 9))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 34603) (i32.const 35218) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 35218) (i32.const 35372) (i32.const 10))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 35372) (i32.const 35486) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 35486) (i32.const 35605) (i32.const 5))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 35605) (i32.const 35629) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 35629) (i32.const 35648) (i32.const 41))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 35648) (i32.const 36547) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 36547) (i32.const 36755) (i32.const 74))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 36755) (i32.const 36767) (i32.const 93))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 36767) (i32.const 36810) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 36810) (i32.const 36839) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 36839) (i32.const 37444) (i32.const 96))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 37444) (i32.const 38060) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 38060) (i32.const 38131) (i32.const 10))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 38131) (i32.const 39082) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 39082) (i32.const 39117) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 39117) (i32.const 39187) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 39187) (i32.const 39859) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 39859) (i32.const 39894) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 39894) (i32.const 40257) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 40257) (i32.const 40344) (i32.const 89))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 40344) (i32.const 40371) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 40371) (i32.const 40804) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 40804) (i32.const 40909) (i32.const 5))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 40909) (i32.const 42259) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 42259) (i32.const 42511) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 42511) (i32.const 42945) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 42945) (i32.const 43115) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 43115) (i32.const 43306) (i32.const 42))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 43306) (i32.const 43363) (i32.const 76))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 43363) (i32.const 43387) (i32.const 57))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 43387) (i32.const 43438) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 43438) (i32.const 43447) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 43447) (i32.const 43466) (i32.const 41))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 43466) (i32.const 44129) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 44129) (i32.const 44958) (i32.const 96))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 44958) (i32.const 45570) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 45570) (i32.const 45575) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 45575) (i32.const 45640) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 45640) (i32.const 45742) (i32.const 42))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 45742) (i32.const 45832) (i32.const 72))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 45832) (i32.const 45999) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 45999) (i32.const 46384) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 46384) (i32.const 46596) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 46596) (i32.const 46654) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 46654) (i32.const 47515) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 47515) (i32.const 47620) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 47620) (i32.const 47817) (i32.const 79))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 47817) (i32.const 47951) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 47951) (i32.const 48632) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 48632) (i32.const 48699) (i32.const 97))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 48699) (i32.const 48703) (i32.const 37))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 48703) (i32.const 49764) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 49764) (i32.const 49955) (i32.const 42))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 49955) (i32.const 50012) (i32.const 76))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 50012) (i32.const 50036) (i32.const 57))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 50036) (i32.const 50087) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 50087) (i32.const 50096) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 50096) (i32.const 50115) (i32.const 41))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 50115) (i32.const 50370) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 50370) (i32.const 51358) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 51358) (i32.const 51610) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 51610) (i32.const 51776) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 51776) (i32.const 51833) (i32.const 89))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 51833) (i32.const 52895) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 52895) (i32.const 53029) (i32.const 97))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 53029) (i32.const 53244) (i32.const 68))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 53244) (i32.const 54066) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 54066) (i32.const 54133) (i32.const 97))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 54133) (i32.const 54137) (i32.const 37))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 54137) (i32.const 55198) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 55198) (i32.const 55389) (i32.const 42))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 55389) (i32.const 55446) (i32.const 76))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 55446) (i32.const 55470) (i32.const 57))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 55470) (i32.const 55521) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 55521) (i32.const 55530) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 55530) (i32.const 55549) (i32.const 41))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 55549) (i32.const 56212) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 56212) (i32.const 57048) (i32.const 96))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 57048) (i32.const 58183) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 58183) (i32.const 58202) (i32.const 41))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 58202) (i32.const 58516) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 58516) (i32.const 58835) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 58835) (i32.const 58855) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 58855) (i32.const 59089) (i32.const 95))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 59089) (i32.const 59145) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 59145) (i32.const 59677) (i32.const 99))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 59677) (i32.const 60134) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60134) (i32.const 60502) (i32.const 89))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60502) (i32.const 60594) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60594) (i32.const 60617) (i32.const 36))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60617) (i32.const 60618) (i32.const 32))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60618) (i32.const 60777) (i32.const 42))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60777) (i32.const 60834) (i32.const 76))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60834) (i32.const 60858) (i32.const 57))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60858) (i32.const 60909) (i32.const 59))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60909) (i32.const 60918) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60918) (i32.const 60937) (i32.const 41))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 60937) (i32.const 61600) (i32.const 83))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 61600) (i32.const 62436) (i32.const 96))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 62436) (i32.const 63307) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 63307) (i32.const 63397) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 63397) (i32.const 63501) (i32.const 74))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 63501) (i32.const 63525) (i32.const 93))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 63525) (i32.const 63605) (i32.const 74))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 63605) (i32.const 63704) (i32.const 100))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 63704) (i32.const 63771) (i32.const 97))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 63771) (i32.const 63775) (i32.const 37))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 63775) (i32.const 64311) (i32.const 77))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 64311) (i32.const 64331) (i32.const 26))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 64331) (i32.const 64518) (i32.const 92))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 64518) (i32.const 64827) (i32.const 11))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 64827) (i32.const 64834) (i32.const 26))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 64834) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast.js
new file mode 100644
index 0000000000..3db49b5b24
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_copy.wast.js
@@ -0,0 +1,13859 @@
+
+'use strict';
+
+let spectest = {
+ print: console.log.bind(console),
+ print_i32: console.log.bind(console),
+ print_i32_f32: console.log.bind(console),
+ print_f64_f64: console.log.bind(console),
+ print_f32: console.log.bind(console),
+ print_f64: console.log.bind(console),
+ global_i32: 666,
+ global_f32: 666,
+ global_f64: 666,
+ table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
+ memory: new WebAssembly.Memory({initial: 1, maximum: 2})
+};
+let handler = {
+ get(target, prop) {
+ return (prop in target) ? target[prop] : {};
+ }
+};
+let registry = new Proxy({spectest}, handler);
+
+function register(name, instance) {
+ registry[name] = instance.exports;
+}
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function get(instance, name) {
+ let v = instance.exports[name];
+ return (v instanceof WebAssembly.Global) ? v.value : v;
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function run(action) {
+ action();
+}
+
+function assert_malformed(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm decoding failure expected");
+}
+
+function assert_invalid(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm validation failure expected");
+}
+
+function assert_unlinkable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.LinkError) return;
+ }
+ throw new Error("Wasm linking failure expected");
+}
+
+function assert_uninstantiable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+function assert_trap(action) {
+ try { action() } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+let StackOverflow;
+try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
+
+function assert_exhaustion(action) {
+ try { action() } catch (e) {
+ if (e instanceof StackOverflow) return;
+ }
+ throw new Error("Wasm resource exhaustion expected");
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+function assert_return_canonical_nan(action) {
+ let actual = action();
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test that it's a canonical NaN.
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+function assert_return_arithmetic_nan(action) {
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test for specific bitpatterns here.
+ let actual = action();
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+// memory_copy.wast:5
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x95\x80\x80\x80\x00\x02\x83\x80\x80\x80\x00\x00\x01\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
+
+// memory_copy.wast:14
+run(() => call($1, "test", []));
+
+// memory_copy.wast:16
+assert_return(() => call($1, "load8_u", [0]), 0);
+
+// memory_copy.wast:17
+assert_return(() => call($1, "load8_u", [1]), 0);
+
+// memory_copy.wast:18
+assert_return(() => call($1, "load8_u", [2]), 3);
+
+// memory_copy.wast:19
+assert_return(() => call($1, "load8_u", [3]), 1);
+
+// memory_copy.wast:20
+assert_return(() => call($1, "load8_u", [4]), 4);
+
+// memory_copy.wast:21
+assert_return(() => call($1, "load8_u", [5]), 1);
+
+// memory_copy.wast:22
+assert_return(() => call($1, "load8_u", [6]), 0);
+
+// memory_copy.wast:23
+assert_return(() => call($1, "load8_u", [7]), 0);
+
+// memory_copy.wast:24
+assert_return(() => call($1, "load8_u", [8]), 0);
+
+// memory_copy.wast:25
+assert_return(() => call($1, "load8_u", [9]), 0);
+
+// memory_copy.wast:26
+assert_return(() => call($1, "load8_u", [10]), 0);
+
+// memory_copy.wast:27
+assert_return(() => call($1, "load8_u", [11]), 0);
+
+// memory_copy.wast:28
+assert_return(() => call($1, "load8_u", [12]), 7);
+
+// memory_copy.wast:29
+assert_return(() => call($1, "load8_u", [13]), 5);
+
+// memory_copy.wast:30
+assert_return(() => call($1, "load8_u", [14]), 2);
+
+// memory_copy.wast:31
+assert_return(() => call($1, "load8_u", [15]), 3);
+
+// memory_copy.wast:32
+assert_return(() => call($1, "load8_u", [16]), 6);
+
+// memory_copy.wast:33
+assert_return(() => call($1, "load8_u", [17]), 0);
+
+// memory_copy.wast:34
+assert_return(() => call($1, "load8_u", [18]), 0);
+
+// memory_copy.wast:35
+assert_return(() => call($1, "load8_u", [19]), 0);
+
+// memory_copy.wast:36
+assert_return(() => call($1, "load8_u", [20]), 0);
+
+// memory_copy.wast:37
+assert_return(() => call($1, "load8_u", [21]), 0);
+
+// memory_copy.wast:38
+assert_return(() => call($1, "load8_u", [22]), 0);
+
+// memory_copy.wast:39
+assert_return(() => call($1, "load8_u", [23]), 0);
+
+// memory_copy.wast:40
+assert_return(() => call($1, "load8_u", [24]), 0);
+
+// memory_copy.wast:41
+assert_return(() => call($1, "load8_u", [25]), 0);
+
+// memory_copy.wast:42
+assert_return(() => call($1, "load8_u", [26]), 0);
+
+// memory_copy.wast:43
+assert_return(() => call($1, "load8_u", [27]), 0);
+
+// memory_copy.wast:44
+assert_return(() => call($1, "load8_u", [28]), 0);
+
+// memory_copy.wast:45
+assert_return(() => call($1, "load8_u", [29]), 0);
+
+// memory_copy.wast:47
+let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0d\x41\x02\x41\x03\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
+
+// memory_copy.wast:56
+run(() => call($2, "test", []));
+
+// memory_copy.wast:58
+assert_return(() => call($2, "load8_u", [0]), 0);
+
+// memory_copy.wast:59
+assert_return(() => call($2, "load8_u", [1]), 0);
+
+// memory_copy.wast:60
+assert_return(() => call($2, "load8_u", [2]), 3);
+
+// memory_copy.wast:61
+assert_return(() => call($2, "load8_u", [3]), 1);
+
+// memory_copy.wast:62
+assert_return(() => call($2, "load8_u", [4]), 4);
+
+// memory_copy.wast:63
+assert_return(() => call($2, "load8_u", [5]), 1);
+
+// memory_copy.wast:64
+assert_return(() => call($2, "load8_u", [6]), 0);
+
+// memory_copy.wast:65
+assert_return(() => call($2, "load8_u", [7]), 0);
+
+// memory_copy.wast:66
+assert_return(() => call($2, "load8_u", [8]), 0);
+
+// memory_copy.wast:67
+assert_return(() => call($2, "load8_u", [9]), 0);
+
+// memory_copy.wast:68
+assert_return(() => call($2, "load8_u", [10]), 0);
+
+// memory_copy.wast:69
+assert_return(() => call($2, "load8_u", [11]), 0);
+
+// memory_copy.wast:70
+assert_return(() => call($2, "load8_u", [12]), 7);
+
+// memory_copy.wast:71
+assert_return(() => call($2, "load8_u", [13]), 3);
+
+// memory_copy.wast:72
+assert_return(() => call($2, "load8_u", [14]), 1);
+
+// memory_copy.wast:73
+assert_return(() => call($2, "load8_u", [15]), 4);
+
+// memory_copy.wast:74
+assert_return(() => call($2, "load8_u", [16]), 6);
+
+// memory_copy.wast:75
+assert_return(() => call($2, "load8_u", [17]), 0);
+
+// memory_copy.wast:76
+assert_return(() => call($2, "load8_u", [18]), 0);
+
+// memory_copy.wast:77
+assert_return(() => call($2, "load8_u", [19]), 0);
+
+// memory_copy.wast:78
+assert_return(() => call($2, "load8_u", [20]), 0);
+
+// memory_copy.wast:79
+assert_return(() => call($2, "load8_u", [21]), 0);
+
+// memory_copy.wast:80
+assert_return(() => call($2, "load8_u", [22]), 0);
+
+// memory_copy.wast:81
+assert_return(() => call($2, "load8_u", [23]), 0);
+
+// memory_copy.wast:82
+assert_return(() => call($2, "load8_u", [24]), 0);
+
+// memory_copy.wast:83
+assert_return(() => call($2, "load8_u", [25]), 0);
+
+// memory_copy.wast:84
+assert_return(() => call($2, "load8_u", [26]), 0);
+
+// memory_copy.wast:85
+assert_return(() => call($2, "load8_u", [27]), 0);
+
+// memory_copy.wast:86
+assert_return(() => call($2, "load8_u", [28]), 0);
+
+// memory_copy.wast:87
+assert_return(() => call($2, "load8_u", [29]), 0);
+
+// memory_copy.wast:89
+let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x19\x41\x0f\x41\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
+
+// memory_copy.wast:98
+run(() => call($3, "test", []));
+
+// memory_copy.wast:100
+assert_return(() => call($3, "load8_u", [0]), 0);
+
+// memory_copy.wast:101
+assert_return(() => call($3, "load8_u", [1]), 0);
+
+// memory_copy.wast:102
+assert_return(() => call($3, "load8_u", [2]), 3);
+
+// memory_copy.wast:103
+assert_return(() => call($3, "load8_u", [3]), 1);
+
+// memory_copy.wast:104
+assert_return(() => call($3, "load8_u", [4]), 4);
+
+// memory_copy.wast:105
+assert_return(() => call($3, "load8_u", [5]), 1);
+
+// memory_copy.wast:106
+assert_return(() => call($3, "load8_u", [6]), 0);
+
+// memory_copy.wast:107
+assert_return(() => call($3, "load8_u", [7]), 0);
+
+// memory_copy.wast:108
+assert_return(() => call($3, "load8_u", [8]), 0);
+
+// memory_copy.wast:109
+assert_return(() => call($3, "load8_u", [9]), 0);
+
+// memory_copy.wast:110
+assert_return(() => call($3, "load8_u", [10]), 0);
+
+// memory_copy.wast:111
+assert_return(() => call($3, "load8_u", [11]), 0);
+
+// memory_copy.wast:112
+assert_return(() => call($3, "load8_u", [12]), 7);
+
+// memory_copy.wast:113
+assert_return(() => call($3, "load8_u", [13]), 5);
+
+// memory_copy.wast:114
+assert_return(() => call($3, "load8_u", [14]), 2);
+
+// memory_copy.wast:115
+assert_return(() => call($3, "load8_u", [15]), 3);
+
+// memory_copy.wast:116
+assert_return(() => call($3, "load8_u", [16]), 6);
+
+// memory_copy.wast:117
+assert_return(() => call($3, "load8_u", [17]), 0);
+
+// memory_copy.wast:118
+assert_return(() => call($3, "load8_u", [18]), 0);
+
+// memory_copy.wast:119
+assert_return(() => call($3, "load8_u", [19]), 0);
+
+// memory_copy.wast:120
+assert_return(() => call($3, "load8_u", [20]), 0);
+
+// memory_copy.wast:121
+assert_return(() => call($3, "load8_u", [21]), 0);
+
+// memory_copy.wast:122
+assert_return(() => call($3, "load8_u", [22]), 0);
+
+// memory_copy.wast:123
+assert_return(() => call($3, "load8_u", [23]), 0);
+
+// memory_copy.wast:124
+assert_return(() => call($3, "load8_u", [24]), 0);
+
+// memory_copy.wast:125
+assert_return(() => call($3, "load8_u", [25]), 3);
+
+// memory_copy.wast:126
+assert_return(() => call($3, "load8_u", [26]), 6);
+
+// memory_copy.wast:127
+assert_return(() => call($3, "load8_u", [27]), 0);
+
+// memory_copy.wast:128
+assert_return(() => call($3, "load8_u", [28]), 0);
+
+// memory_copy.wast:129
+assert_return(() => call($3, "load8_u", [29]), 0);
+
+// memory_copy.wast:131
+let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0d\x41\x19\x41\x03\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
+
+// memory_copy.wast:140
+run(() => call($4, "test", []));
+
+// memory_copy.wast:142
+assert_return(() => call($4, "load8_u", [0]), 0);
+
+// memory_copy.wast:143
+assert_return(() => call($4, "load8_u", [1]), 0);
+
+// memory_copy.wast:144
+assert_return(() => call($4, "load8_u", [2]), 3);
+
+// memory_copy.wast:145
+assert_return(() => call($4, "load8_u", [3]), 1);
+
+// memory_copy.wast:146
+assert_return(() => call($4, "load8_u", [4]), 4);
+
+// memory_copy.wast:147
+assert_return(() => call($4, "load8_u", [5]), 1);
+
+// memory_copy.wast:148
+assert_return(() => call($4, "load8_u", [6]), 0);
+
+// memory_copy.wast:149
+assert_return(() => call($4, "load8_u", [7]), 0);
+
+// memory_copy.wast:150
+assert_return(() => call($4, "load8_u", [8]), 0);
+
+// memory_copy.wast:151
+assert_return(() => call($4, "load8_u", [9]), 0);
+
+// memory_copy.wast:152
+assert_return(() => call($4, "load8_u", [10]), 0);
+
+// memory_copy.wast:153
+assert_return(() => call($4, "load8_u", [11]), 0);
+
+// memory_copy.wast:154
+assert_return(() => call($4, "load8_u", [12]), 7);
+
+// memory_copy.wast:155
+assert_return(() => call($4, "load8_u", [13]), 0);
+
+// memory_copy.wast:156
+assert_return(() => call($4, "load8_u", [14]), 0);
+
+// memory_copy.wast:157
+assert_return(() => call($4, "load8_u", [15]), 0);
+
+// memory_copy.wast:158
+assert_return(() => call($4, "load8_u", [16]), 6);
+
+// memory_copy.wast:159
+assert_return(() => call($4, "load8_u", [17]), 0);
+
+// memory_copy.wast:160
+assert_return(() => call($4, "load8_u", [18]), 0);
+
+// memory_copy.wast:161
+assert_return(() => call($4, "load8_u", [19]), 0);
+
+// memory_copy.wast:162
+assert_return(() => call($4, "load8_u", [20]), 0);
+
+// memory_copy.wast:163
+assert_return(() => call($4, "load8_u", [21]), 0);
+
+// memory_copy.wast:164
+assert_return(() => call($4, "load8_u", [22]), 0);
+
+// memory_copy.wast:165
+assert_return(() => call($4, "load8_u", [23]), 0);
+
+// memory_copy.wast:166
+assert_return(() => call($4, "load8_u", [24]), 0);
+
+// memory_copy.wast:167
+assert_return(() => call($4, "load8_u", [25]), 0);
+
+// memory_copy.wast:168
+assert_return(() => call($4, "load8_u", [26]), 0);
+
+// memory_copy.wast:169
+assert_return(() => call($4, "load8_u", [27]), 0);
+
+// memory_copy.wast:170
+assert_return(() => call($4, "load8_u", [28]), 0);
+
+// memory_copy.wast:171
+assert_return(() => call($4, "load8_u", [29]), 0);
+
+// memory_copy.wast:173
+let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x14\x41\x16\x41\x04\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
+
+// memory_copy.wast:182
+run(() => call($5, "test", []));
+
+// memory_copy.wast:184
+assert_return(() => call($5, "load8_u", [0]), 0);
+
+// memory_copy.wast:185
+assert_return(() => call($5, "load8_u", [1]), 0);
+
+// memory_copy.wast:186
+assert_return(() => call($5, "load8_u", [2]), 3);
+
+// memory_copy.wast:187
+assert_return(() => call($5, "load8_u", [3]), 1);
+
+// memory_copy.wast:188
+assert_return(() => call($5, "load8_u", [4]), 4);
+
+// memory_copy.wast:189
+assert_return(() => call($5, "load8_u", [5]), 1);
+
+// memory_copy.wast:190
+assert_return(() => call($5, "load8_u", [6]), 0);
+
+// memory_copy.wast:191
+assert_return(() => call($5, "load8_u", [7]), 0);
+
+// memory_copy.wast:192
+assert_return(() => call($5, "load8_u", [8]), 0);
+
+// memory_copy.wast:193
+assert_return(() => call($5, "load8_u", [9]), 0);
+
+// memory_copy.wast:194
+assert_return(() => call($5, "load8_u", [10]), 0);
+
+// memory_copy.wast:195
+assert_return(() => call($5, "load8_u", [11]), 0);
+
+// memory_copy.wast:196
+assert_return(() => call($5, "load8_u", [12]), 7);
+
+// memory_copy.wast:197
+assert_return(() => call($5, "load8_u", [13]), 5);
+
+// memory_copy.wast:198
+assert_return(() => call($5, "load8_u", [14]), 2);
+
+// memory_copy.wast:199
+assert_return(() => call($5, "load8_u", [15]), 3);
+
+// memory_copy.wast:200
+assert_return(() => call($5, "load8_u", [16]), 6);
+
+// memory_copy.wast:201
+assert_return(() => call($5, "load8_u", [17]), 0);
+
+// memory_copy.wast:202
+assert_return(() => call($5, "load8_u", [18]), 0);
+
+// memory_copy.wast:203
+assert_return(() => call($5, "load8_u", [19]), 0);
+
+// memory_copy.wast:204
+assert_return(() => call($5, "load8_u", [20]), 0);
+
+// memory_copy.wast:205
+assert_return(() => call($5, "load8_u", [21]), 0);
+
+// memory_copy.wast:206
+assert_return(() => call($5, "load8_u", [22]), 0);
+
+// memory_copy.wast:207
+assert_return(() => call($5, "load8_u", [23]), 0);
+
+// memory_copy.wast:208
+assert_return(() => call($5, "load8_u", [24]), 0);
+
+// memory_copy.wast:209
+assert_return(() => call($5, "load8_u", [25]), 0);
+
+// memory_copy.wast:210
+assert_return(() => call($5, "load8_u", [26]), 0);
+
+// memory_copy.wast:211
+assert_return(() => call($5, "load8_u", [27]), 0);
+
+// memory_copy.wast:212
+assert_return(() => call($5, "load8_u", [28]), 0);
+
+// memory_copy.wast:213
+assert_return(() => call($5, "load8_u", [29]), 0);
+
+// memory_copy.wast:215
+let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x19\x41\x01\x41\x03\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
+
+// memory_copy.wast:224
+run(() => call($6, "test", []));
+
+// memory_copy.wast:226
+assert_return(() => call($6, "load8_u", [0]), 0);
+
+// memory_copy.wast:227
+assert_return(() => call($6, "load8_u", [1]), 0);
+
+// memory_copy.wast:228
+assert_return(() => call($6, "load8_u", [2]), 3);
+
+// memory_copy.wast:229
+assert_return(() => call($6, "load8_u", [3]), 1);
+
+// memory_copy.wast:230
+assert_return(() => call($6, "load8_u", [4]), 4);
+
+// memory_copy.wast:231
+assert_return(() => call($6, "load8_u", [5]), 1);
+
+// memory_copy.wast:232
+assert_return(() => call($6, "load8_u", [6]), 0);
+
+// memory_copy.wast:233
+assert_return(() => call($6, "load8_u", [7]), 0);
+
+// memory_copy.wast:234
+assert_return(() => call($6, "load8_u", [8]), 0);
+
+// memory_copy.wast:235
+assert_return(() => call($6, "load8_u", [9]), 0);
+
+// memory_copy.wast:236
+assert_return(() => call($6, "load8_u", [10]), 0);
+
+// memory_copy.wast:237
+assert_return(() => call($6, "load8_u", [11]), 0);
+
+// memory_copy.wast:238
+assert_return(() => call($6, "load8_u", [12]), 7);
+
+// memory_copy.wast:239
+assert_return(() => call($6, "load8_u", [13]), 5);
+
+// memory_copy.wast:240
+assert_return(() => call($6, "load8_u", [14]), 2);
+
+// memory_copy.wast:241
+assert_return(() => call($6, "load8_u", [15]), 3);
+
+// memory_copy.wast:242
+assert_return(() => call($6, "load8_u", [16]), 6);
+
+// memory_copy.wast:243
+assert_return(() => call($6, "load8_u", [17]), 0);
+
+// memory_copy.wast:244
+assert_return(() => call($6, "load8_u", [18]), 0);
+
+// memory_copy.wast:245
+assert_return(() => call($6, "load8_u", [19]), 0);
+
+// memory_copy.wast:246
+assert_return(() => call($6, "load8_u", [20]), 0);
+
+// memory_copy.wast:247
+assert_return(() => call($6, "load8_u", [21]), 0);
+
+// memory_copy.wast:248
+assert_return(() => call($6, "load8_u", [22]), 0);
+
+// memory_copy.wast:249
+assert_return(() => call($6, "load8_u", [23]), 0);
+
+// memory_copy.wast:250
+assert_return(() => call($6, "load8_u", [24]), 0);
+
+// memory_copy.wast:251
+assert_return(() => call($6, "load8_u", [25]), 0);
+
+// memory_copy.wast:252
+assert_return(() => call($6, "load8_u", [26]), 3);
+
+// memory_copy.wast:253
+assert_return(() => call($6, "load8_u", [27]), 1);
+
+// memory_copy.wast:254
+assert_return(() => call($6, "load8_u", [28]), 0);
+
+// memory_copy.wast:255
+assert_return(() => call($6, "load8_u", [29]), 0);
+
+// memory_copy.wast:257
+let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0a\x41\x0c\x41\x07\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
+
+// memory_copy.wast:266
+run(() => call($7, "test", []));
+
+// memory_copy.wast:268
+assert_return(() => call($7, "load8_u", [0]), 0);
+
+// memory_copy.wast:269
+assert_return(() => call($7, "load8_u", [1]), 0);
+
+// memory_copy.wast:270
+assert_return(() => call($7, "load8_u", [2]), 3);
+
+// memory_copy.wast:271
+assert_return(() => call($7, "load8_u", [3]), 1);
+
+// memory_copy.wast:272
+assert_return(() => call($7, "load8_u", [4]), 4);
+
+// memory_copy.wast:273
+assert_return(() => call($7, "load8_u", [5]), 1);
+
+// memory_copy.wast:274
+assert_return(() => call($7, "load8_u", [6]), 0);
+
+// memory_copy.wast:275
+assert_return(() => call($7, "load8_u", [7]), 0);
+
+// memory_copy.wast:276
+assert_return(() => call($7, "load8_u", [8]), 0);
+
+// memory_copy.wast:277
+assert_return(() => call($7, "load8_u", [9]), 0);
+
+// memory_copy.wast:278
+assert_return(() => call($7, "load8_u", [10]), 7);
+
+// memory_copy.wast:279
+assert_return(() => call($7, "load8_u", [11]), 5);
+
+// memory_copy.wast:280
+assert_return(() => call($7, "load8_u", [12]), 2);
+
+// memory_copy.wast:281
+assert_return(() => call($7, "load8_u", [13]), 3);
+
+// memory_copy.wast:282
+assert_return(() => call($7, "load8_u", [14]), 6);
+
+// memory_copy.wast:283
+assert_return(() => call($7, "load8_u", [15]), 0);
+
+// memory_copy.wast:284
+assert_return(() => call($7, "load8_u", [16]), 0);
+
+// memory_copy.wast:285
+assert_return(() => call($7, "load8_u", [17]), 0);
+
+// memory_copy.wast:286
+assert_return(() => call($7, "load8_u", [18]), 0);
+
+// memory_copy.wast:287
+assert_return(() => call($7, "load8_u", [19]), 0);
+
+// memory_copy.wast:288
+assert_return(() => call($7, "load8_u", [20]), 0);
+
+// memory_copy.wast:289
+assert_return(() => call($7, "load8_u", [21]), 0);
+
+// memory_copy.wast:290
+assert_return(() => call($7, "load8_u", [22]), 0);
+
+// memory_copy.wast:291
+assert_return(() => call($7, "load8_u", [23]), 0);
+
+// memory_copy.wast:292
+assert_return(() => call($7, "load8_u", [24]), 0);
+
+// memory_copy.wast:293
+assert_return(() => call($7, "load8_u", [25]), 0);
+
+// memory_copy.wast:294
+assert_return(() => call($7, "load8_u", [26]), 0);
+
+// memory_copy.wast:295
+assert_return(() => call($7, "load8_u", [27]), 0);
+
+// memory_copy.wast:296
+assert_return(() => call($7, "load8_u", [28]), 0);
+
+// memory_copy.wast:297
+assert_return(() => call($7, "load8_u", [29]), 0);
+
+// memory_copy.wast:299
+let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x0a\x41\x07\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x94\x80\x80\x80\x00\x02\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06");
+
+// memory_copy.wast:308
+run(() => call($8, "test", []));
+
+// memory_copy.wast:310
+assert_return(() => call($8, "load8_u", [0]), 0);
+
+// memory_copy.wast:311
+assert_return(() => call($8, "load8_u", [1]), 0);
+
+// memory_copy.wast:312
+assert_return(() => call($8, "load8_u", [2]), 3);
+
+// memory_copy.wast:313
+assert_return(() => call($8, "load8_u", [3]), 1);
+
+// memory_copy.wast:314
+assert_return(() => call($8, "load8_u", [4]), 4);
+
+// memory_copy.wast:315
+assert_return(() => call($8, "load8_u", [5]), 1);
+
+// memory_copy.wast:316
+assert_return(() => call($8, "load8_u", [6]), 0);
+
+// memory_copy.wast:317
+assert_return(() => call($8, "load8_u", [7]), 0);
+
+// memory_copy.wast:318
+assert_return(() => call($8, "load8_u", [8]), 0);
+
+// memory_copy.wast:319
+assert_return(() => call($8, "load8_u", [9]), 0);
+
+// memory_copy.wast:320
+assert_return(() => call($8, "load8_u", [10]), 0);
+
+// memory_copy.wast:321
+assert_return(() => call($8, "load8_u", [11]), 0);
+
+// memory_copy.wast:322
+assert_return(() => call($8, "load8_u", [12]), 0);
+
+// memory_copy.wast:323
+assert_return(() => call($8, "load8_u", [13]), 0);
+
+// memory_copy.wast:324
+assert_return(() => call($8, "load8_u", [14]), 7);
+
+// memory_copy.wast:325
+assert_return(() => call($8, "load8_u", [15]), 5);
+
+// memory_copy.wast:326
+assert_return(() => call($8, "load8_u", [16]), 2);
+
+// memory_copy.wast:327
+assert_return(() => call($8, "load8_u", [17]), 3);
+
+// memory_copy.wast:328
+assert_return(() => call($8, "load8_u", [18]), 6);
+
+// memory_copy.wast:329
+assert_return(() => call($8, "load8_u", [19]), 0);
+
+// memory_copy.wast:330
+assert_return(() => call($8, "load8_u", [20]), 0);
+
+// memory_copy.wast:331
+assert_return(() => call($8, "load8_u", [21]), 0);
+
+// memory_copy.wast:332
+assert_return(() => call($8, "load8_u", [22]), 0);
+
+// memory_copy.wast:333
+assert_return(() => call($8, "load8_u", [23]), 0);
+
+// memory_copy.wast:334
+assert_return(() => call($8, "load8_u", [24]), 0);
+
+// memory_copy.wast:335
+assert_return(() => call($8, "load8_u", [25]), 0);
+
+// memory_copy.wast:336
+assert_return(() => call($8, "load8_u", [26]), 0);
+
+// memory_copy.wast:337
+assert_return(() => call($8, "load8_u", [27]), 0);
+
+// memory_copy.wast:338
+assert_return(() => call($8, "load8_u", [28]), 0);
+
+// memory_copy.wast:339
+assert_return(() => call($8, "load8_u", [29]), 0);
+
+// memory_copy.wast:341
+let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9a\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
+
+// memory_copy.wast:349
+assert_trap(() => call($9, "run", [65516, 0, 40]));
+
+// memory_copy.wast:352
+assert_return(() => call($9, "load8_u", [0]), 0);
+
+// memory_copy.wast:353
+assert_return(() => call($9, "load8_u", [1]), 1);
+
+// memory_copy.wast:354
+assert_return(() => call($9, "load8_u", [2]), 2);
+
+// memory_copy.wast:355
+assert_return(() => call($9, "load8_u", [3]), 3);
+
+// memory_copy.wast:356
+assert_return(() => call($9, "load8_u", [4]), 4);
+
+// memory_copy.wast:357
+assert_return(() => call($9, "load8_u", [5]), 5);
+
+// memory_copy.wast:358
+assert_return(() => call($9, "load8_u", [6]), 6);
+
+// memory_copy.wast:359
+assert_return(() => call($9, "load8_u", [7]), 7);
+
+// memory_copy.wast:360
+assert_return(() => call($9, "load8_u", [8]), 8);
+
+// memory_copy.wast:361
+assert_return(() => call($9, "load8_u", [9]), 9);
+
+// memory_copy.wast:362
+assert_return(() => call($9, "load8_u", [10]), 10);
+
+// memory_copy.wast:363
+assert_return(() => call($9, "load8_u", [11]), 11);
+
+// memory_copy.wast:364
+assert_return(() => call($9, "load8_u", [12]), 12);
+
+// memory_copy.wast:365
+assert_return(() => call($9, "load8_u", [13]), 13);
+
+// memory_copy.wast:366
+assert_return(() => call($9, "load8_u", [14]), 14);
+
+// memory_copy.wast:367
+assert_return(() => call($9, "load8_u", [15]), 15);
+
+// memory_copy.wast:368
+assert_return(() => call($9, "load8_u", [16]), 16);
+
+// memory_copy.wast:369
+assert_return(() => call($9, "load8_u", [17]), 17);
+
+// memory_copy.wast:370
+assert_return(() => call($9, "load8_u", [18]), 18);
+
+// memory_copy.wast:371
+assert_return(() => call($9, "load8_u", [19]), 19);
+
+// memory_copy.wast:372
+assert_return(() => call($9, "load8_u", [218]), 0);
+
+// memory_copy.wast:373
+assert_return(() => call($9, "load8_u", [417]), 0);
+
+// memory_copy.wast:374
+assert_return(() => call($9, "load8_u", [616]), 0);
+
+// memory_copy.wast:375
+assert_return(() => call($9, "load8_u", [815]), 0);
+
+// memory_copy.wast:376
+assert_return(() => call($9, "load8_u", [1014]), 0);
+
+// memory_copy.wast:377
+assert_return(() => call($9, "load8_u", [1213]), 0);
+
+// memory_copy.wast:378
+assert_return(() => call($9, "load8_u", [1412]), 0);
+
+// memory_copy.wast:379
+assert_return(() => call($9, "load8_u", [1611]), 0);
+
+// memory_copy.wast:380
+assert_return(() => call($9, "load8_u", [1810]), 0);
+
+// memory_copy.wast:381
+assert_return(() => call($9, "load8_u", [2009]), 0);
+
+// memory_copy.wast:382
+assert_return(() => call($9, "load8_u", [2208]), 0);
+
+// memory_copy.wast:383
+assert_return(() => call($9, "load8_u", [2407]), 0);
+
+// memory_copy.wast:384
+assert_return(() => call($9, "load8_u", [2606]), 0);
+
+// memory_copy.wast:385
+assert_return(() => call($9, "load8_u", [2805]), 0);
+
+// memory_copy.wast:386
+assert_return(() => call($9, "load8_u", [3004]), 0);
+
+// memory_copy.wast:387
+assert_return(() => call($9, "load8_u", [3203]), 0);
+
+// memory_copy.wast:388
+assert_return(() => call($9, "load8_u", [3402]), 0);
+
+// memory_copy.wast:389
+assert_return(() => call($9, "load8_u", [3601]), 0);
+
+// memory_copy.wast:390
+assert_return(() => call($9, "load8_u", [3800]), 0);
+
+// memory_copy.wast:391
+assert_return(() => call($9, "load8_u", [3999]), 0);
+
+// memory_copy.wast:392
+assert_return(() => call($9, "load8_u", [4198]), 0);
+
+// memory_copy.wast:393
+assert_return(() => call($9, "load8_u", [4397]), 0);
+
+// memory_copy.wast:394
+assert_return(() => call($9, "load8_u", [4596]), 0);
+
+// memory_copy.wast:395
+assert_return(() => call($9, "load8_u", [4795]), 0);
+
+// memory_copy.wast:396
+assert_return(() => call($9, "load8_u", [4994]), 0);
+
+// memory_copy.wast:397
+assert_return(() => call($9, "load8_u", [5193]), 0);
+
+// memory_copy.wast:398
+assert_return(() => call($9, "load8_u", [5392]), 0);
+
+// memory_copy.wast:399
+assert_return(() => call($9, "load8_u", [5591]), 0);
+
+// memory_copy.wast:400
+assert_return(() => call($9, "load8_u", [5790]), 0);
+
+// memory_copy.wast:401
+assert_return(() => call($9, "load8_u", [5989]), 0);
+
+// memory_copy.wast:402
+assert_return(() => call($9, "load8_u", [6188]), 0);
+
+// memory_copy.wast:403
+assert_return(() => call($9, "load8_u", [6387]), 0);
+
+// memory_copy.wast:404
+assert_return(() => call($9, "load8_u", [6586]), 0);
+
+// memory_copy.wast:405
+assert_return(() => call($9, "load8_u", [6785]), 0);
+
+// memory_copy.wast:406
+assert_return(() => call($9, "load8_u", [6984]), 0);
+
+// memory_copy.wast:407
+assert_return(() => call($9, "load8_u", [7183]), 0);
+
+// memory_copy.wast:408
+assert_return(() => call($9, "load8_u", [7382]), 0);
+
+// memory_copy.wast:409
+assert_return(() => call($9, "load8_u", [7581]), 0);
+
+// memory_copy.wast:410
+assert_return(() => call($9, "load8_u", [7780]), 0);
+
+// memory_copy.wast:411
+assert_return(() => call($9, "load8_u", [7979]), 0);
+
+// memory_copy.wast:412
+assert_return(() => call($9, "load8_u", [8178]), 0);
+
+// memory_copy.wast:413
+assert_return(() => call($9, "load8_u", [8377]), 0);
+
+// memory_copy.wast:414
+assert_return(() => call($9, "load8_u", [8576]), 0);
+
+// memory_copy.wast:415
+assert_return(() => call($9, "load8_u", [8775]), 0);
+
+// memory_copy.wast:416
+assert_return(() => call($9, "load8_u", [8974]), 0);
+
+// memory_copy.wast:417
+assert_return(() => call($9, "load8_u", [9173]), 0);
+
+// memory_copy.wast:418
+assert_return(() => call($9, "load8_u", [9372]), 0);
+
+// memory_copy.wast:419
+assert_return(() => call($9, "load8_u", [9571]), 0);
+
+// memory_copy.wast:420
+assert_return(() => call($9, "load8_u", [9770]), 0);
+
+// memory_copy.wast:421
+assert_return(() => call($9, "load8_u", [9969]), 0);
+
+// memory_copy.wast:422
+assert_return(() => call($9, "load8_u", [10168]), 0);
+
+// memory_copy.wast:423
+assert_return(() => call($9, "load8_u", [10367]), 0);
+
+// memory_copy.wast:424
+assert_return(() => call($9, "load8_u", [10566]), 0);
+
+// memory_copy.wast:425
+assert_return(() => call($9, "load8_u", [10765]), 0);
+
+// memory_copy.wast:426
+assert_return(() => call($9, "load8_u", [10964]), 0);
+
+// memory_copy.wast:427
+assert_return(() => call($9, "load8_u", [11163]), 0);
+
+// memory_copy.wast:428
+assert_return(() => call($9, "load8_u", [11362]), 0);
+
+// memory_copy.wast:429
+assert_return(() => call($9, "load8_u", [11561]), 0);
+
+// memory_copy.wast:430
+assert_return(() => call($9, "load8_u", [11760]), 0);
+
+// memory_copy.wast:431
+assert_return(() => call($9, "load8_u", [11959]), 0);
+
+// memory_copy.wast:432
+assert_return(() => call($9, "load8_u", [12158]), 0);
+
+// memory_copy.wast:433
+assert_return(() => call($9, "load8_u", [12357]), 0);
+
+// memory_copy.wast:434
+assert_return(() => call($9, "load8_u", [12556]), 0);
+
+// memory_copy.wast:435
+assert_return(() => call($9, "load8_u", [12755]), 0);
+
+// memory_copy.wast:436
+assert_return(() => call($9, "load8_u", [12954]), 0);
+
+// memory_copy.wast:437
+assert_return(() => call($9, "load8_u", [13153]), 0);
+
+// memory_copy.wast:438
+assert_return(() => call($9, "load8_u", [13352]), 0);
+
+// memory_copy.wast:439
+assert_return(() => call($9, "load8_u", [13551]), 0);
+
+// memory_copy.wast:440
+assert_return(() => call($9, "load8_u", [13750]), 0);
+
+// memory_copy.wast:441
+assert_return(() => call($9, "load8_u", [13949]), 0);
+
+// memory_copy.wast:442
+assert_return(() => call($9, "load8_u", [14148]), 0);
+
+// memory_copy.wast:443
+assert_return(() => call($9, "load8_u", [14347]), 0);
+
+// memory_copy.wast:444
+assert_return(() => call($9, "load8_u", [14546]), 0);
+
+// memory_copy.wast:445
+assert_return(() => call($9, "load8_u", [14745]), 0);
+
+// memory_copy.wast:446
+assert_return(() => call($9, "load8_u", [14944]), 0);
+
+// memory_copy.wast:447
+assert_return(() => call($9, "load8_u", [15143]), 0);
+
+// memory_copy.wast:448
+assert_return(() => call($9, "load8_u", [15342]), 0);
+
+// memory_copy.wast:449
+assert_return(() => call($9, "load8_u", [15541]), 0);
+
+// memory_copy.wast:450
+assert_return(() => call($9, "load8_u", [15740]), 0);
+
+// memory_copy.wast:451
+assert_return(() => call($9, "load8_u", [15939]), 0);
+
+// memory_copy.wast:452
+assert_return(() => call($9, "load8_u", [16138]), 0);
+
+// memory_copy.wast:453
+assert_return(() => call($9, "load8_u", [16337]), 0);
+
+// memory_copy.wast:454
+assert_return(() => call($9, "load8_u", [16536]), 0);
+
+// memory_copy.wast:455
+assert_return(() => call($9, "load8_u", [16735]), 0);
+
+// memory_copy.wast:456
+assert_return(() => call($9, "load8_u", [16934]), 0);
+
+// memory_copy.wast:457
+assert_return(() => call($9, "load8_u", [17133]), 0);
+
+// memory_copy.wast:458
+assert_return(() => call($9, "load8_u", [17332]), 0);
+
+// memory_copy.wast:459
+assert_return(() => call($9, "load8_u", [17531]), 0);
+
+// memory_copy.wast:460
+assert_return(() => call($9, "load8_u", [17730]), 0);
+
+// memory_copy.wast:461
+assert_return(() => call($9, "load8_u", [17929]), 0);
+
+// memory_copy.wast:462
+assert_return(() => call($9, "load8_u", [18128]), 0);
+
+// memory_copy.wast:463
+assert_return(() => call($9, "load8_u", [18327]), 0);
+
+// memory_copy.wast:464
+assert_return(() => call($9, "load8_u", [18526]), 0);
+
+// memory_copy.wast:465
+assert_return(() => call($9, "load8_u", [18725]), 0);
+
+// memory_copy.wast:466
+assert_return(() => call($9, "load8_u", [18924]), 0);
+
+// memory_copy.wast:467
+assert_return(() => call($9, "load8_u", [19123]), 0);
+
+// memory_copy.wast:468
+assert_return(() => call($9, "load8_u", [19322]), 0);
+
+// memory_copy.wast:469
+assert_return(() => call($9, "load8_u", [19521]), 0);
+
+// memory_copy.wast:470
+assert_return(() => call($9, "load8_u", [19720]), 0);
+
+// memory_copy.wast:471
+assert_return(() => call($9, "load8_u", [19919]), 0);
+
+// memory_copy.wast:472
+assert_return(() => call($9, "load8_u", [20118]), 0);
+
+// memory_copy.wast:473
+assert_return(() => call($9, "load8_u", [20317]), 0);
+
+// memory_copy.wast:474
+assert_return(() => call($9, "load8_u", [20516]), 0);
+
+// memory_copy.wast:475
+assert_return(() => call($9, "load8_u", [20715]), 0);
+
+// memory_copy.wast:476
+assert_return(() => call($9, "load8_u", [20914]), 0);
+
+// memory_copy.wast:477
+assert_return(() => call($9, "load8_u", [21113]), 0);
+
+// memory_copy.wast:478
+assert_return(() => call($9, "load8_u", [21312]), 0);
+
+// memory_copy.wast:479
+assert_return(() => call($9, "load8_u", [21511]), 0);
+
+// memory_copy.wast:480
+assert_return(() => call($9, "load8_u", [21710]), 0);
+
+// memory_copy.wast:481
+assert_return(() => call($9, "load8_u", [21909]), 0);
+
+// memory_copy.wast:482
+assert_return(() => call($9, "load8_u", [22108]), 0);
+
+// memory_copy.wast:483
+assert_return(() => call($9, "load8_u", [22307]), 0);
+
+// memory_copy.wast:484
+assert_return(() => call($9, "load8_u", [22506]), 0);
+
+// memory_copy.wast:485
+assert_return(() => call($9, "load8_u", [22705]), 0);
+
+// memory_copy.wast:486
+assert_return(() => call($9, "load8_u", [22904]), 0);
+
+// memory_copy.wast:487
+assert_return(() => call($9, "load8_u", [23103]), 0);
+
+// memory_copy.wast:488
+assert_return(() => call($9, "load8_u", [23302]), 0);
+
+// memory_copy.wast:489
+assert_return(() => call($9, "load8_u", [23501]), 0);
+
+// memory_copy.wast:490
+assert_return(() => call($9, "load8_u", [23700]), 0);
+
+// memory_copy.wast:491
+assert_return(() => call($9, "load8_u", [23899]), 0);
+
+// memory_copy.wast:492
+assert_return(() => call($9, "load8_u", [24098]), 0);
+
+// memory_copy.wast:493
+assert_return(() => call($9, "load8_u", [24297]), 0);
+
+// memory_copy.wast:494
+assert_return(() => call($9, "load8_u", [24496]), 0);
+
+// memory_copy.wast:495
+assert_return(() => call($9, "load8_u", [24695]), 0);
+
+// memory_copy.wast:496
+assert_return(() => call($9, "load8_u", [24894]), 0);
+
+// memory_copy.wast:497
+assert_return(() => call($9, "load8_u", [25093]), 0);
+
+// memory_copy.wast:498
+assert_return(() => call($9, "load8_u", [25292]), 0);
+
+// memory_copy.wast:499
+assert_return(() => call($9, "load8_u", [25491]), 0);
+
+// memory_copy.wast:500
+assert_return(() => call($9, "load8_u", [25690]), 0);
+
+// memory_copy.wast:501
+assert_return(() => call($9, "load8_u", [25889]), 0);
+
+// memory_copy.wast:502
+assert_return(() => call($9, "load8_u", [26088]), 0);
+
+// memory_copy.wast:503
+assert_return(() => call($9, "load8_u", [26287]), 0);
+
+// memory_copy.wast:504
+assert_return(() => call($9, "load8_u", [26486]), 0);
+
+// memory_copy.wast:505
+assert_return(() => call($9, "load8_u", [26685]), 0);
+
+// memory_copy.wast:506
+assert_return(() => call($9, "load8_u", [26884]), 0);
+
+// memory_copy.wast:507
+assert_return(() => call($9, "load8_u", [27083]), 0);
+
+// memory_copy.wast:508
+assert_return(() => call($9, "load8_u", [27282]), 0);
+
+// memory_copy.wast:509
+assert_return(() => call($9, "load8_u", [27481]), 0);
+
+// memory_copy.wast:510
+assert_return(() => call($9, "load8_u", [27680]), 0);
+
+// memory_copy.wast:511
+assert_return(() => call($9, "load8_u", [27879]), 0);
+
+// memory_copy.wast:512
+assert_return(() => call($9, "load8_u", [28078]), 0);
+
+// memory_copy.wast:513
+assert_return(() => call($9, "load8_u", [28277]), 0);
+
+// memory_copy.wast:514
+assert_return(() => call($9, "load8_u", [28476]), 0);
+
+// memory_copy.wast:515
+assert_return(() => call($9, "load8_u", [28675]), 0);
+
+// memory_copy.wast:516
+assert_return(() => call($9, "load8_u", [28874]), 0);
+
+// memory_copy.wast:517
+assert_return(() => call($9, "load8_u", [29073]), 0);
+
+// memory_copy.wast:518
+assert_return(() => call($9, "load8_u", [29272]), 0);
+
+// memory_copy.wast:519
+assert_return(() => call($9, "load8_u", [29471]), 0);
+
+// memory_copy.wast:520
+assert_return(() => call($9, "load8_u", [29670]), 0);
+
+// memory_copy.wast:521
+assert_return(() => call($9, "load8_u", [29869]), 0);
+
+// memory_copy.wast:522
+assert_return(() => call($9, "load8_u", [30068]), 0);
+
+// memory_copy.wast:523
+assert_return(() => call($9, "load8_u", [30267]), 0);
+
+// memory_copy.wast:524
+assert_return(() => call($9, "load8_u", [30466]), 0);
+
+// memory_copy.wast:525
+assert_return(() => call($9, "load8_u", [30665]), 0);
+
+// memory_copy.wast:526
+assert_return(() => call($9, "load8_u", [30864]), 0);
+
+// memory_copy.wast:527
+assert_return(() => call($9, "load8_u", [31063]), 0);
+
+// memory_copy.wast:528
+assert_return(() => call($9, "load8_u", [31262]), 0);
+
+// memory_copy.wast:529
+assert_return(() => call($9, "load8_u", [31461]), 0);
+
+// memory_copy.wast:530
+assert_return(() => call($9, "load8_u", [31660]), 0);
+
+// memory_copy.wast:531
+assert_return(() => call($9, "load8_u", [31859]), 0);
+
+// memory_copy.wast:532
+assert_return(() => call($9, "load8_u", [32058]), 0);
+
+// memory_copy.wast:533
+assert_return(() => call($9, "load8_u", [32257]), 0);
+
+// memory_copy.wast:534
+assert_return(() => call($9, "load8_u", [32456]), 0);
+
+// memory_copy.wast:535
+assert_return(() => call($9, "load8_u", [32655]), 0);
+
+// memory_copy.wast:536
+assert_return(() => call($9, "load8_u", [32854]), 0);
+
+// memory_copy.wast:537
+assert_return(() => call($9, "load8_u", [33053]), 0);
+
+// memory_copy.wast:538
+assert_return(() => call($9, "load8_u", [33252]), 0);
+
+// memory_copy.wast:539
+assert_return(() => call($9, "load8_u", [33451]), 0);
+
+// memory_copy.wast:540
+assert_return(() => call($9, "load8_u", [33650]), 0);
+
+// memory_copy.wast:541
+assert_return(() => call($9, "load8_u", [33849]), 0);
+
+// memory_copy.wast:542
+assert_return(() => call($9, "load8_u", [34048]), 0);
+
+// memory_copy.wast:543
+assert_return(() => call($9, "load8_u", [34247]), 0);
+
+// memory_copy.wast:544
+assert_return(() => call($9, "load8_u", [34446]), 0);
+
+// memory_copy.wast:545
+assert_return(() => call($9, "load8_u", [34645]), 0);
+
+// memory_copy.wast:546
+assert_return(() => call($9, "load8_u", [34844]), 0);
+
+// memory_copy.wast:547
+assert_return(() => call($9, "load8_u", [35043]), 0);
+
+// memory_copy.wast:548
+assert_return(() => call($9, "load8_u", [35242]), 0);
+
+// memory_copy.wast:549
+assert_return(() => call($9, "load8_u", [35441]), 0);
+
+// memory_copy.wast:550
+assert_return(() => call($9, "load8_u", [35640]), 0);
+
+// memory_copy.wast:551
+assert_return(() => call($9, "load8_u", [35839]), 0);
+
+// memory_copy.wast:552
+assert_return(() => call($9, "load8_u", [36038]), 0);
+
+// memory_copy.wast:553
+assert_return(() => call($9, "load8_u", [36237]), 0);
+
+// memory_copy.wast:554
+assert_return(() => call($9, "load8_u", [36436]), 0);
+
+// memory_copy.wast:555
+assert_return(() => call($9, "load8_u", [36635]), 0);
+
+// memory_copy.wast:556
+assert_return(() => call($9, "load8_u", [36834]), 0);
+
+// memory_copy.wast:557
+assert_return(() => call($9, "load8_u", [37033]), 0);
+
+// memory_copy.wast:558
+assert_return(() => call($9, "load8_u", [37232]), 0);
+
+// memory_copy.wast:559
+assert_return(() => call($9, "load8_u", [37431]), 0);
+
+// memory_copy.wast:560
+assert_return(() => call($9, "load8_u", [37630]), 0);
+
+// memory_copy.wast:561
+assert_return(() => call($9, "load8_u", [37829]), 0);
+
+// memory_copy.wast:562
+assert_return(() => call($9, "load8_u", [38028]), 0);
+
+// memory_copy.wast:563
+assert_return(() => call($9, "load8_u", [38227]), 0);
+
+// memory_copy.wast:564
+assert_return(() => call($9, "load8_u", [38426]), 0);
+
+// memory_copy.wast:565
+assert_return(() => call($9, "load8_u", [38625]), 0);
+
+// memory_copy.wast:566
+assert_return(() => call($9, "load8_u", [38824]), 0);
+
+// memory_copy.wast:567
+assert_return(() => call($9, "load8_u", [39023]), 0);
+
+// memory_copy.wast:568
+assert_return(() => call($9, "load8_u", [39222]), 0);
+
+// memory_copy.wast:569
+assert_return(() => call($9, "load8_u", [39421]), 0);
+
+// memory_copy.wast:570
+assert_return(() => call($9, "load8_u", [39620]), 0);
+
+// memory_copy.wast:571
+assert_return(() => call($9, "load8_u", [39819]), 0);
+
+// memory_copy.wast:572
+assert_return(() => call($9, "load8_u", [40018]), 0);
+
+// memory_copy.wast:573
+assert_return(() => call($9, "load8_u", [40217]), 0);
+
+// memory_copy.wast:574
+assert_return(() => call($9, "load8_u", [40416]), 0);
+
+// memory_copy.wast:575
+assert_return(() => call($9, "load8_u", [40615]), 0);
+
+// memory_copy.wast:576
+assert_return(() => call($9, "load8_u", [40814]), 0);
+
+// memory_copy.wast:577
+assert_return(() => call($9, "load8_u", [41013]), 0);
+
+// memory_copy.wast:578
+assert_return(() => call($9, "load8_u", [41212]), 0);
+
+// memory_copy.wast:579
+assert_return(() => call($9, "load8_u", [41411]), 0);
+
+// memory_copy.wast:580
+assert_return(() => call($9, "load8_u", [41610]), 0);
+
+// memory_copy.wast:581
+assert_return(() => call($9, "load8_u", [41809]), 0);
+
+// memory_copy.wast:582
+assert_return(() => call($9, "load8_u", [42008]), 0);
+
+// memory_copy.wast:583
+assert_return(() => call($9, "load8_u", [42207]), 0);
+
+// memory_copy.wast:584
+assert_return(() => call($9, "load8_u", [42406]), 0);
+
+// memory_copy.wast:585
+assert_return(() => call($9, "load8_u", [42605]), 0);
+
+// memory_copy.wast:586
+assert_return(() => call($9, "load8_u", [42804]), 0);
+
+// memory_copy.wast:587
+assert_return(() => call($9, "load8_u", [43003]), 0);
+
+// memory_copy.wast:588
+assert_return(() => call($9, "load8_u", [43202]), 0);
+
+// memory_copy.wast:589
+assert_return(() => call($9, "load8_u", [43401]), 0);
+
+// memory_copy.wast:590
+assert_return(() => call($9, "load8_u", [43600]), 0);
+
+// memory_copy.wast:591
+assert_return(() => call($9, "load8_u", [43799]), 0);
+
+// memory_copy.wast:592
+assert_return(() => call($9, "load8_u", [43998]), 0);
+
+// memory_copy.wast:593
+assert_return(() => call($9, "load8_u", [44197]), 0);
+
+// memory_copy.wast:594
+assert_return(() => call($9, "load8_u", [44396]), 0);
+
+// memory_copy.wast:595
+assert_return(() => call($9, "load8_u", [44595]), 0);
+
+// memory_copy.wast:596
+assert_return(() => call($9, "load8_u", [44794]), 0);
+
+// memory_copy.wast:597
+assert_return(() => call($9, "load8_u", [44993]), 0);
+
+// memory_copy.wast:598
+assert_return(() => call($9, "load8_u", [45192]), 0);
+
+// memory_copy.wast:599
+assert_return(() => call($9, "load8_u", [45391]), 0);
+
+// memory_copy.wast:600
+assert_return(() => call($9, "load8_u", [45590]), 0);
+
+// memory_copy.wast:601
+assert_return(() => call($9, "load8_u", [45789]), 0);
+
+// memory_copy.wast:602
+assert_return(() => call($9, "load8_u", [45988]), 0);
+
+// memory_copy.wast:603
+assert_return(() => call($9, "load8_u", [46187]), 0);
+
+// memory_copy.wast:604
+assert_return(() => call($9, "load8_u", [46386]), 0);
+
+// memory_copy.wast:605
+assert_return(() => call($9, "load8_u", [46585]), 0);
+
+// memory_copy.wast:606
+assert_return(() => call($9, "load8_u", [46784]), 0);
+
+// memory_copy.wast:607
+assert_return(() => call($9, "load8_u", [46983]), 0);
+
+// memory_copy.wast:608
+assert_return(() => call($9, "load8_u", [47182]), 0);
+
+// memory_copy.wast:609
+assert_return(() => call($9, "load8_u", [47381]), 0);
+
+// memory_copy.wast:610
+assert_return(() => call($9, "load8_u", [47580]), 0);
+
+// memory_copy.wast:611
+assert_return(() => call($9, "load8_u", [47779]), 0);
+
+// memory_copy.wast:612
+assert_return(() => call($9, "load8_u", [47978]), 0);
+
+// memory_copy.wast:613
+assert_return(() => call($9, "load8_u", [48177]), 0);
+
+// memory_copy.wast:614
+assert_return(() => call($9, "load8_u", [48376]), 0);
+
+// memory_copy.wast:615
+assert_return(() => call($9, "load8_u", [48575]), 0);
+
+// memory_copy.wast:616
+assert_return(() => call($9, "load8_u", [48774]), 0);
+
+// memory_copy.wast:617
+assert_return(() => call($9, "load8_u", [48973]), 0);
+
+// memory_copy.wast:618
+assert_return(() => call($9, "load8_u", [49172]), 0);
+
+// memory_copy.wast:619
+assert_return(() => call($9, "load8_u", [49371]), 0);
+
+// memory_copy.wast:620
+assert_return(() => call($9, "load8_u", [49570]), 0);
+
+// memory_copy.wast:621
+assert_return(() => call($9, "load8_u", [49769]), 0);
+
+// memory_copy.wast:622
+assert_return(() => call($9, "load8_u", [49968]), 0);
+
+// memory_copy.wast:623
+assert_return(() => call($9, "load8_u", [50167]), 0);
+
+// memory_copy.wast:624
+assert_return(() => call($9, "load8_u", [50366]), 0);
+
+// memory_copy.wast:625
+assert_return(() => call($9, "load8_u", [50565]), 0);
+
+// memory_copy.wast:626
+assert_return(() => call($9, "load8_u", [50764]), 0);
+
+// memory_copy.wast:627
+assert_return(() => call($9, "load8_u", [50963]), 0);
+
+// memory_copy.wast:628
+assert_return(() => call($9, "load8_u", [51162]), 0);
+
+// memory_copy.wast:629
+assert_return(() => call($9, "load8_u", [51361]), 0);
+
+// memory_copy.wast:630
+assert_return(() => call($9, "load8_u", [51560]), 0);
+
+// memory_copy.wast:631
+assert_return(() => call($9, "load8_u", [51759]), 0);
+
+// memory_copy.wast:632
+assert_return(() => call($9, "load8_u", [51958]), 0);
+
+// memory_copy.wast:633
+assert_return(() => call($9, "load8_u", [52157]), 0);
+
+// memory_copy.wast:634
+assert_return(() => call($9, "load8_u", [52356]), 0);
+
+// memory_copy.wast:635
+assert_return(() => call($9, "load8_u", [52555]), 0);
+
+// memory_copy.wast:636
+assert_return(() => call($9, "load8_u", [52754]), 0);
+
+// memory_copy.wast:637
+assert_return(() => call($9, "load8_u", [52953]), 0);
+
+// memory_copy.wast:638
+assert_return(() => call($9, "load8_u", [53152]), 0);
+
+// memory_copy.wast:639
+assert_return(() => call($9, "load8_u", [53351]), 0);
+
+// memory_copy.wast:640
+assert_return(() => call($9, "load8_u", [53550]), 0);
+
+// memory_copy.wast:641
+assert_return(() => call($9, "load8_u", [53749]), 0);
+
+// memory_copy.wast:642
+assert_return(() => call($9, "load8_u", [53948]), 0);
+
+// memory_copy.wast:643
+assert_return(() => call($9, "load8_u", [54147]), 0);
+
+// memory_copy.wast:644
+assert_return(() => call($9, "load8_u", [54346]), 0);
+
+// memory_copy.wast:645
+assert_return(() => call($9, "load8_u", [54545]), 0);
+
+// memory_copy.wast:646
+assert_return(() => call($9, "load8_u", [54744]), 0);
+
+// memory_copy.wast:647
+assert_return(() => call($9, "load8_u", [54943]), 0);
+
+// memory_copy.wast:648
+assert_return(() => call($9, "load8_u", [55142]), 0);
+
+// memory_copy.wast:649
+assert_return(() => call($9, "load8_u", [55341]), 0);
+
+// memory_copy.wast:650
+assert_return(() => call($9, "load8_u", [55540]), 0);
+
+// memory_copy.wast:651
+assert_return(() => call($9, "load8_u", [55739]), 0);
+
+// memory_copy.wast:652
+assert_return(() => call($9, "load8_u", [55938]), 0);
+
+// memory_copy.wast:653
+assert_return(() => call($9, "load8_u", [56137]), 0);
+
+// memory_copy.wast:654
+assert_return(() => call($9, "load8_u", [56336]), 0);
+
+// memory_copy.wast:655
+assert_return(() => call($9, "load8_u", [56535]), 0);
+
+// memory_copy.wast:656
+assert_return(() => call($9, "load8_u", [56734]), 0);
+
+// memory_copy.wast:657
+assert_return(() => call($9, "load8_u", [56933]), 0);
+
+// memory_copy.wast:658
+assert_return(() => call($9, "load8_u", [57132]), 0);
+
+// memory_copy.wast:659
+assert_return(() => call($9, "load8_u", [57331]), 0);
+
+// memory_copy.wast:660
+assert_return(() => call($9, "load8_u", [57530]), 0);
+
+// memory_copy.wast:661
+assert_return(() => call($9, "load8_u", [57729]), 0);
+
+// memory_copy.wast:662
+assert_return(() => call($9, "load8_u", [57928]), 0);
+
+// memory_copy.wast:663
+assert_return(() => call($9, "load8_u", [58127]), 0);
+
+// memory_copy.wast:664
+assert_return(() => call($9, "load8_u", [58326]), 0);
+
+// memory_copy.wast:665
+assert_return(() => call($9, "load8_u", [58525]), 0);
+
+// memory_copy.wast:666
+assert_return(() => call($9, "load8_u", [58724]), 0);
+
+// memory_copy.wast:667
+assert_return(() => call($9, "load8_u", [58923]), 0);
+
+// memory_copy.wast:668
+assert_return(() => call($9, "load8_u", [59122]), 0);
+
+// memory_copy.wast:669
+assert_return(() => call($9, "load8_u", [59321]), 0);
+
+// memory_copy.wast:670
+assert_return(() => call($9, "load8_u", [59520]), 0);
+
+// memory_copy.wast:671
+assert_return(() => call($9, "load8_u", [59719]), 0);
+
+// memory_copy.wast:672
+assert_return(() => call($9, "load8_u", [59918]), 0);
+
+// memory_copy.wast:673
+assert_return(() => call($9, "load8_u", [60117]), 0);
+
+// memory_copy.wast:674
+assert_return(() => call($9, "load8_u", [60316]), 0);
+
+// memory_copy.wast:675
+assert_return(() => call($9, "load8_u", [60515]), 0);
+
+// memory_copy.wast:676
+assert_return(() => call($9, "load8_u", [60714]), 0);
+
+// memory_copy.wast:677
+assert_return(() => call($9, "load8_u", [60913]), 0);
+
+// memory_copy.wast:678
+assert_return(() => call($9, "load8_u", [61112]), 0);
+
+// memory_copy.wast:679
+assert_return(() => call($9, "load8_u", [61311]), 0);
+
+// memory_copy.wast:680
+assert_return(() => call($9, "load8_u", [61510]), 0);
+
+// memory_copy.wast:681
+assert_return(() => call($9, "load8_u", [61709]), 0);
+
+// memory_copy.wast:682
+assert_return(() => call($9, "load8_u", [61908]), 0);
+
+// memory_copy.wast:683
+assert_return(() => call($9, "load8_u", [62107]), 0);
+
+// memory_copy.wast:684
+assert_return(() => call($9, "load8_u", [62306]), 0);
+
+// memory_copy.wast:685
+assert_return(() => call($9, "load8_u", [62505]), 0);
+
+// memory_copy.wast:686
+assert_return(() => call($9, "load8_u", [62704]), 0);
+
+// memory_copy.wast:687
+assert_return(() => call($9, "load8_u", [62903]), 0);
+
+// memory_copy.wast:688
+assert_return(() => call($9, "load8_u", [63102]), 0);
+
+// memory_copy.wast:689
+assert_return(() => call($9, "load8_u", [63301]), 0);
+
+// memory_copy.wast:690
+assert_return(() => call($9, "load8_u", [63500]), 0);
+
+// memory_copy.wast:691
+assert_return(() => call($9, "load8_u", [63699]), 0);
+
+// memory_copy.wast:692
+assert_return(() => call($9, "load8_u", [63898]), 0);
+
+// memory_copy.wast:693
+assert_return(() => call($9, "load8_u", [64097]), 0);
+
+// memory_copy.wast:694
+assert_return(() => call($9, "load8_u", [64296]), 0);
+
+// memory_copy.wast:695
+assert_return(() => call($9, "load8_u", [64495]), 0);
+
+// memory_copy.wast:696
+assert_return(() => call($9, "load8_u", [64694]), 0);
+
+// memory_copy.wast:697
+assert_return(() => call($9, "load8_u", [64893]), 0);
+
+// memory_copy.wast:698
+assert_return(() => call($9, "load8_u", [65092]), 0);
+
+// memory_copy.wast:699
+assert_return(() => call($9, "load8_u", [65291]), 0);
+
+// memory_copy.wast:700
+assert_return(() => call($9, "load8_u", [65490]), 0);
+
+// memory_copy.wast:701
+assert_return(() => call($9, "load8_u", [65516]), 0);
+
+// memory_copy.wast:702
+assert_return(() => call($9, "load8_u", [65517]), 1);
+
+// memory_copy.wast:703
+assert_return(() => call($9, "load8_u", [65518]), 2);
+
+// memory_copy.wast:704
+assert_return(() => call($9, "load8_u", [65519]), 3);
+
+// memory_copy.wast:705
+assert_return(() => call($9, "load8_u", [65520]), 4);
+
+// memory_copy.wast:706
+assert_return(() => call($9, "load8_u", [65521]), 5);
+
+// memory_copy.wast:707
+assert_return(() => call($9, "load8_u", [65522]), 6);
+
+// memory_copy.wast:708
+assert_return(() => call($9, "load8_u", [65523]), 7);
+
+// memory_copy.wast:709
+assert_return(() => call($9, "load8_u", [65524]), 8);
+
+// memory_copy.wast:710
+assert_return(() => call($9, "load8_u", [65525]), 9);
+
+// memory_copy.wast:711
+assert_return(() => call($9, "load8_u", [65526]), 10);
+
+// memory_copy.wast:712
+assert_return(() => call($9, "load8_u", [65527]), 11);
+
+// memory_copy.wast:713
+assert_return(() => call($9, "load8_u", [65528]), 12);
+
+// memory_copy.wast:714
+assert_return(() => call($9, "load8_u", [65529]), 13);
+
+// memory_copy.wast:715
+assert_return(() => call($9, "load8_u", [65530]), 14);
+
+// memory_copy.wast:716
+assert_return(() => call($9, "load8_u", [65531]), 15);
+
+// memory_copy.wast:717
+assert_return(() => call($9, "load8_u", [65532]), 16);
+
+// memory_copy.wast:718
+assert_return(() => call($9, "load8_u", [65533]), 17);
+
+// memory_copy.wast:719
+assert_return(() => call($9, "load8_u", [65534]), 18);
+
+// memory_copy.wast:720
+assert_return(() => call($9, "load8_u", [65535]), 19);
+
+// memory_copy.wast:722
+let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9b\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x15\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14");
+
+// memory_copy.wast:730
+assert_trap(() => call($10, "run", [65515, 0, 39]));
+
+// memory_copy.wast:733
+assert_return(() => call($10, "load8_u", [0]), 0);
+
+// memory_copy.wast:734
+assert_return(() => call($10, "load8_u", [1]), 1);
+
+// memory_copy.wast:735
+assert_return(() => call($10, "load8_u", [2]), 2);
+
+// memory_copy.wast:736
+assert_return(() => call($10, "load8_u", [3]), 3);
+
+// memory_copy.wast:737
+assert_return(() => call($10, "load8_u", [4]), 4);
+
+// memory_copy.wast:738
+assert_return(() => call($10, "load8_u", [5]), 5);
+
+// memory_copy.wast:739
+assert_return(() => call($10, "load8_u", [6]), 6);
+
+// memory_copy.wast:740
+assert_return(() => call($10, "load8_u", [7]), 7);
+
+// memory_copy.wast:741
+assert_return(() => call($10, "load8_u", [8]), 8);
+
+// memory_copy.wast:742
+assert_return(() => call($10, "load8_u", [9]), 9);
+
+// memory_copy.wast:743
+assert_return(() => call($10, "load8_u", [10]), 10);
+
+// memory_copy.wast:744
+assert_return(() => call($10, "load8_u", [11]), 11);
+
+// memory_copy.wast:745
+assert_return(() => call($10, "load8_u", [12]), 12);
+
+// memory_copy.wast:746
+assert_return(() => call($10, "load8_u", [13]), 13);
+
+// memory_copy.wast:747
+assert_return(() => call($10, "load8_u", [14]), 14);
+
+// memory_copy.wast:748
+assert_return(() => call($10, "load8_u", [15]), 15);
+
+// memory_copy.wast:749
+assert_return(() => call($10, "load8_u", [16]), 16);
+
+// memory_copy.wast:750
+assert_return(() => call($10, "load8_u", [17]), 17);
+
+// memory_copy.wast:751
+assert_return(() => call($10, "load8_u", [18]), 18);
+
+// memory_copy.wast:752
+assert_return(() => call($10, "load8_u", [19]), 19);
+
+// memory_copy.wast:753
+assert_return(() => call($10, "load8_u", [20]), 20);
+
+// memory_copy.wast:754
+assert_return(() => call($10, "load8_u", [219]), 0);
+
+// memory_copy.wast:755
+assert_return(() => call($10, "load8_u", [418]), 0);
+
+// memory_copy.wast:756
+assert_return(() => call($10, "load8_u", [617]), 0);
+
+// memory_copy.wast:757
+assert_return(() => call($10, "load8_u", [816]), 0);
+
+// memory_copy.wast:758
+assert_return(() => call($10, "load8_u", [1015]), 0);
+
+// memory_copy.wast:759
+assert_return(() => call($10, "load8_u", [1214]), 0);
+
+// memory_copy.wast:760
+assert_return(() => call($10, "load8_u", [1413]), 0);
+
+// memory_copy.wast:761
+assert_return(() => call($10, "load8_u", [1612]), 0);
+
+// memory_copy.wast:762
+assert_return(() => call($10, "load8_u", [1811]), 0);
+
+// memory_copy.wast:763
+assert_return(() => call($10, "load8_u", [2010]), 0);
+
+// memory_copy.wast:764
+assert_return(() => call($10, "load8_u", [2209]), 0);
+
+// memory_copy.wast:765
+assert_return(() => call($10, "load8_u", [2408]), 0);
+
+// memory_copy.wast:766
+assert_return(() => call($10, "load8_u", [2607]), 0);
+
+// memory_copy.wast:767
+assert_return(() => call($10, "load8_u", [2806]), 0);
+
+// memory_copy.wast:768
+assert_return(() => call($10, "load8_u", [3005]), 0);
+
+// memory_copy.wast:769
+assert_return(() => call($10, "load8_u", [3204]), 0);
+
+// memory_copy.wast:770
+assert_return(() => call($10, "load8_u", [3403]), 0);
+
+// memory_copy.wast:771
+assert_return(() => call($10, "load8_u", [3602]), 0);
+
+// memory_copy.wast:772
+assert_return(() => call($10, "load8_u", [3801]), 0);
+
+// memory_copy.wast:773
+assert_return(() => call($10, "load8_u", [4000]), 0);
+
+// memory_copy.wast:774
+assert_return(() => call($10, "load8_u", [4199]), 0);
+
+// memory_copy.wast:775
+assert_return(() => call($10, "load8_u", [4398]), 0);
+
+// memory_copy.wast:776
+assert_return(() => call($10, "load8_u", [4597]), 0);
+
+// memory_copy.wast:777
+assert_return(() => call($10, "load8_u", [4796]), 0);
+
+// memory_copy.wast:778
+assert_return(() => call($10, "load8_u", [4995]), 0);
+
+// memory_copy.wast:779
+assert_return(() => call($10, "load8_u", [5194]), 0);
+
+// memory_copy.wast:780
+assert_return(() => call($10, "load8_u", [5393]), 0);
+
+// memory_copy.wast:781
+assert_return(() => call($10, "load8_u", [5592]), 0);
+
+// memory_copy.wast:782
+assert_return(() => call($10, "load8_u", [5791]), 0);
+
+// memory_copy.wast:783
+assert_return(() => call($10, "load8_u", [5990]), 0);
+
+// memory_copy.wast:784
+assert_return(() => call($10, "load8_u", [6189]), 0);
+
+// memory_copy.wast:785
+assert_return(() => call($10, "load8_u", [6388]), 0);
+
+// memory_copy.wast:786
+assert_return(() => call($10, "load8_u", [6587]), 0);
+
+// memory_copy.wast:787
+assert_return(() => call($10, "load8_u", [6786]), 0);
+
+// memory_copy.wast:788
+assert_return(() => call($10, "load8_u", [6985]), 0);
+
+// memory_copy.wast:789
+assert_return(() => call($10, "load8_u", [7184]), 0);
+
+// memory_copy.wast:790
+assert_return(() => call($10, "load8_u", [7383]), 0);
+
+// memory_copy.wast:791
+assert_return(() => call($10, "load8_u", [7582]), 0);
+
+// memory_copy.wast:792
+assert_return(() => call($10, "load8_u", [7781]), 0);
+
+// memory_copy.wast:793
+assert_return(() => call($10, "load8_u", [7980]), 0);
+
+// memory_copy.wast:794
+assert_return(() => call($10, "load8_u", [8179]), 0);
+
+// memory_copy.wast:795
+assert_return(() => call($10, "load8_u", [8378]), 0);
+
+// memory_copy.wast:796
+assert_return(() => call($10, "load8_u", [8577]), 0);
+
+// memory_copy.wast:797
+assert_return(() => call($10, "load8_u", [8776]), 0);
+
+// memory_copy.wast:798
+assert_return(() => call($10, "load8_u", [8975]), 0);
+
+// memory_copy.wast:799
+assert_return(() => call($10, "load8_u", [9174]), 0);
+
+// memory_copy.wast:800
+assert_return(() => call($10, "load8_u", [9373]), 0);
+
+// memory_copy.wast:801
+assert_return(() => call($10, "load8_u", [9572]), 0);
+
+// memory_copy.wast:802
+assert_return(() => call($10, "load8_u", [9771]), 0);
+
+// memory_copy.wast:803
+assert_return(() => call($10, "load8_u", [9970]), 0);
+
+// memory_copy.wast:804
+assert_return(() => call($10, "load8_u", [10169]), 0);
+
+// memory_copy.wast:805
+assert_return(() => call($10, "load8_u", [10368]), 0);
+
+// memory_copy.wast:806
+assert_return(() => call($10, "load8_u", [10567]), 0);
+
+// memory_copy.wast:807
+assert_return(() => call($10, "load8_u", [10766]), 0);
+
+// memory_copy.wast:808
+assert_return(() => call($10, "load8_u", [10965]), 0);
+
+// memory_copy.wast:809
+assert_return(() => call($10, "load8_u", [11164]), 0);
+
+// memory_copy.wast:810
+assert_return(() => call($10, "load8_u", [11363]), 0);
+
+// memory_copy.wast:811
+assert_return(() => call($10, "load8_u", [11562]), 0);
+
+// memory_copy.wast:812
+assert_return(() => call($10, "load8_u", [11761]), 0);
+
+// memory_copy.wast:813
+assert_return(() => call($10, "load8_u", [11960]), 0);
+
+// memory_copy.wast:814
+assert_return(() => call($10, "load8_u", [12159]), 0);
+
+// memory_copy.wast:815
+assert_return(() => call($10, "load8_u", [12358]), 0);
+
+// memory_copy.wast:816
+assert_return(() => call($10, "load8_u", [12557]), 0);
+
+// memory_copy.wast:817
+assert_return(() => call($10, "load8_u", [12756]), 0);
+
+// memory_copy.wast:818
+assert_return(() => call($10, "load8_u", [12955]), 0);
+
+// memory_copy.wast:819
+assert_return(() => call($10, "load8_u", [13154]), 0);
+
+// memory_copy.wast:820
+assert_return(() => call($10, "load8_u", [13353]), 0);
+
+// memory_copy.wast:821
+assert_return(() => call($10, "load8_u", [13552]), 0);
+
+// memory_copy.wast:822
+assert_return(() => call($10, "load8_u", [13751]), 0);
+
+// memory_copy.wast:823
+assert_return(() => call($10, "load8_u", [13950]), 0);
+
+// memory_copy.wast:824
+assert_return(() => call($10, "load8_u", [14149]), 0);
+
+// memory_copy.wast:825
+assert_return(() => call($10, "load8_u", [14348]), 0);
+
+// memory_copy.wast:826
+assert_return(() => call($10, "load8_u", [14547]), 0);
+
+// memory_copy.wast:827
+assert_return(() => call($10, "load8_u", [14746]), 0);
+
+// memory_copy.wast:828
+assert_return(() => call($10, "load8_u", [14945]), 0);
+
+// memory_copy.wast:829
+assert_return(() => call($10, "load8_u", [15144]), 0);
+
+// memory_copy.wast:830
+assert_return(() => call($10, "load8_u", [15343]), 0);
+
+// memory_copy.wast:831
+assert_return(() => call($10, "load8_u", [15542]), 0);
+
+// memory_copy.wast:832
+assert_return(() => call($10, "load8_u", [15741]), 0);
+
+// memory_copy.wast:833
+assert_return(() => call($10, "load8_u", [15940]), 0);
+
+// memory_copy.wast:834
+assert_return(() => call($10, "load8_u", [16139]), 0);
+
+// memory_copy.wast:835
+assert_return(() => call($10, "load8_u", [16338]), 0);
+
+// memory_copy.wast:836
+assert_return(() => call($10, "load8_u", [16537]), 0);
+
+// memory_copy.wast:837
+assert_return(() => call($10, "load8_u", [16736]), 0);
+
+// memory_copy.wast:838
+assert_return(() => call($10, "load8_u", [16935]), 0);
+
+// memory_copy.wast:839
+assert_return(() => call($10, "load8_u", [17134]), 0);
+
+// memory_copy.wast:840
+assert_return(() => call($10, "load8_u", [17333]), 0);
+
+// memory_copy.wast:841
+assert_return(() => call($10, "load8_u", [17532]), 0);
+
+// memory_copy.wast:842
+assert_return(() => call($10, "load8_u", [17731]), 0);
+
+// memory_copy.wast:843
+assert_return(() => call($10, "load8_u", [17930]), 0);
+
+// memory_copy.wast:844
+assert_return(() => call($10, "load8_u", [18129]), 0);
+
+// memory_copy.wast:845
+assert_return(() => call($10, "load8_u", [18328]), 0);
+
+// memory_copy.wast:846
+assert_return(() => call($10, "load8_u", [18527]), 0);
+
+// memory_copy.wast:847
+assert_return(() => call($10, "load8_u", [18726]), 0);
+
+// memory_copy.wast:848
+assert_return(() => call($10, "load8_u", [18925]), 0);
+
+// memory_copy.wast:849
+assert_return(() => call($10, "load8_u", [19124]), 0);
+
+// memory_copy.wast:850
+assert_return(() => call($10, "load8_u", [19323]), 0);
+
+// memory_copy.wast:851
+assert_return(() => call($10, "load8_u", [19522]), 0);
+
+// memory_copy.wast:852
+assert_return(() => call($10, "load8_u", [19721]), 0);
+
+// memory_copy.wast:853
+assert_return(() => call($10, "load8_u", [19920]), 0);
+
+// memory_copy.wast:854
+assert_return(() => call($10, "load8_u", [20119]), 0);
+
+// memory_copy.wast:855
+assert_return(() => call($10, "load8_u", [20318]), 0);
+
+// memory_copy.wast:856
+assert_return(() => call($10, "load8_u", [20517]), 0);
+
+// memory_copy.wast:857
+assert_return(() => call($10, "load8_u", [20716]), 0);
+
+// memory_copy.wast:858
+assert_return(() => call($10, "load8_u", [20915]), 0);
+
+// memory_copy.wast:859
+assert_return(() => call($10, "load8_u", [21114]), 0);
+
+// memory_copy.wast:860
+assert_return(() => call($10, "load8_u", [21313]), 0);
+
+// memory_copy.wast:861
+assert_return(() => call($10, "load8_u", [21512]), 0);
+
+// memory_copy.wast:862
+assert_return(() => call($10, "load8_u", [21711]), 0);
+
+// memory_copy.wast:863
+assert_return(() => call($10, "load8_u", [21910]), 0);
+
+// memory_copy.wast:864
+assert_return(() => call($10, "load8_u", [22109]), 0);
+
+// memory_copy.wast:865
+assert_return(() => call($10, "load8_u", [22308]), 0);
+
+// memory_copy.wast:866
+assert_return(() => call($10, "load8_u", [22507]), 0);
+
+// memory_copy.wast:867
+assert_return(() => call($10, "load8_u", [22706]), 0);
+
+// memory_copy.wast:868
+assert_return(() => call($10, "load8_u", [22905]), 0);
+
+// memory_copy.wast:869
+assert_return(() => call($10, "load8_u", [23104]), 0);
+
+// memory_copy.wast:870
+assert_return(() => call($10, "load8_u", [23303]), 0);
+
+// memory_copy.wast:871
+assert_return(() => call($10, "load8_u", [23502]), 0);
+
+// memory_copy.wast:872
+assert_return(() => call($10, "load8_u", [23701]), 0);
+
+// memory_copy.wast:873
+assert_return(() => call($10, "load8_u", [23900]), 0);
+
+// memory_copy.wast:874
+assert_return(() => call($10, "load8_u", [24099]), 0);
+
+// memory_copy.wast:875
+assert_return(() => call($10, "load8_u", [24298]), 0);
+
+// memory_copy.wast:876
+assert_return(() => call($10, "load8_u", [24497]), 0);
+
+// memory_copy.wast:877
+assert_return(() => call($10, "load8_u", [24696]), 0);
+
+// memory_copy.wast:878
+assert_return(() => call($10, "load8_u", [24895]), 0);
+
+// memory_copy.wast:879
+assert_return(() => call($10, "load8_u", [25094]), 0);
+
+// memory_copy.wast:880
+assert_return(() => call($10, "load8_u", [25293]), 0);
+
+// memory_copy.wast:881
+assert_return(() => call($10, "load8_u", [25492]), 0);
+
+// memory_copy.wast:882
+assert_return(() => call($10, "load8_u", [25691]), 0);
+
+// memory_copy.wast:883
+assert_return(() => call($10, "load8_u", [25890]), 0);
+
+// memory_copy.wast:884
+assert_return(() => call($10, "load8_u", [26089]), 0);
+
+// memory_copy.wast:885
+assert_return(() => call($10, "load8_u", [26288]), 0);
+
+// memory_copy.wast:886
+assert_return(() => call($10, "load8_u", [26487]), 0);
+
+// memory_copy.wast:887
+assert_return(() => call($10, "load8_u", [26686]), 0);
+
+// memory_copy.wast:888
+assert_return(() => call($10, "load8_u", [26885]), 0);
+
+// memory_copy.wast:889
+assert_return(() => call($10, "load8_u", [27084]), 0);
+
+// memory_copy.wast:890
+assert_return(() => call($10, "load8_u", [27283]), 0);
+
+// memory_copy.wast:891
+assert_return(() => call($10, "load8_u", [27482]), 0);
+
+// memory_copy.wast:892
+assert_return(() => call($10, "load8_u", [27681]), 0);
+
+// memory_copy.wast:893
+assert_return(() => call($10, "load8_u", [27880]), 0);
+
+// memory_copy.wast:894
+assert_return(() => call($10, "load8_u", [28079]), 0);
+
+// memory_copy.wast:895
+assert_return(() => call($10, "load8_u", [28278]), 0);
+
+// memory_copy.wast:896
+assert_return(() => call($10, "load8_u", [28477]), 0);
+
+// memory_copy.wast:897
+assert_return(() => call($10, "load8_u", [28676]), 0);
+
+// memory_copy.wast:898
+assert_return(() => call($10, "load8_u", [28875]), 0);
+
+// memory_copy.wast:899
+assert_return(() => call($10, "load8_u", [29074]), 0);
+
+// memory_copy.wast:900
+assert_return(() => call($10, "load8_u", [29273]), 0);
+
+// memory_copy.wast:901
+assert_return(() => call($10, "load8_u", [29472]), 0);
+
+// memory_copy.wast:902
+assert_return(() => call($10, "load8_u", [29671]), 0);
+
+// memory_copy.wast:903
+assert_return(() => call($10, "load8_u", [29870]), 0);
+
+// memory_copy.wast:904
+assert_return(() => call($10, "load8_u", [30069]), 0);
+
+// memory_copy.wast:905
+assert_return(() => call($10, "load8_u", [30268]), 0);
+
+// memory_copy.wast:906
+assert_return(() => call($10, "load8_u", [30467]), 0);
+
+// memory_copy.wast:907
+assert_return(() => call($10, "load8_u", [30666]), 0);
+
+// memory_copy.wast:908
+assert_return(() => call($10, "load8_u", [30865]), 0);
+
+// memory_copy.wast:909
+assert_return(() => call($10, "load8_u", [31064]), 0);
+
+// memory_copy.wast:910
+assert_return(() => call($10, "load8_u", [31263]), 0);
+
+// memory_copy.wast:911
+assert_return(() => call($10, "load8_u", [31462]), 0);
+
+// memory_copy.wast:912
+assert_return(() => call($10, "load8_u", [31661]), 0);
+
+// memory_copy.wast:913
+assert_return(() => call($10, "load8_u", [31860]), 0);
+
+// memory_copy.wast:914
+assert_return(() => call($10, "load8_u", [32059]), 0);
+
+// memory_copy.wast:915
+assert_return(() => call($10, "load8_u", [32258]), 0);
+
+// memory_copy.wast:916
+assert_return(() => call($10, "load8_u", [32457]), 0);
+
+// memory_copy.wast:917
+assert_return(() => call($10, "load8_u", [32656]), 0);
+
+// memory_copy.wast:918
+assert_return(() => call($10, "load8_u", [32855]), 0);
+
+// memory_copy.wast:919
+assert_return(() => call($10, "load8_u", [33054]), 0);
+
+// memory_copy.wast:920
+assert_return(() => call($10, "load8_u", [33253]), 0);
+
+// memory_copy.wast:921
+assert_return(() => call($10, "load8_u", [33452]), 0);
+
+// memory_copy.wast:922
+assert_return(() => call($10, "load8_u", [33651]), 0);
+
+// memory_copy.wast:923
+assert_return(() => call($10, "load8_u", [33850]), 0);
+
+// memory_copy.wast:924
+assert_return(() => call($10, "load8_u", [34049]), 0);
+
+// memory_copy.wast:925
+assert_return(() => call($10, "load8_u", [34248]), 0);
+
+// memory_copy.wast:926
+assert_return(() => call($10, "load8_u", [34447]), 0);
+
+// memory_copy.wast:927
+assert_return(() => call($10, "load8_u", [34646]), 0);
+
+// memory_copy.wast:928
+assert_return(() => call($10, "load8_u", [34845]), 0);
+
+// memory_copy.wast:929
+assert_return(() => call($10, "load8_u", [35044]), 0);
+
+// memory_copy.wast:930
+assert_return(() => call($10, "load8_u", [35243]), 0);
+
+// memory_copy.wast:931
+assert_return(() => call($10, "load8_u", [35442]), 0);
+
+// memory_copy.wast:932
+assert_return(() => call($10, "load8_u", [35641]), 0);
+
+// memory_copy.wast:933
+assert_return(() => call($10, "load8_u", [35840]), 0);
+
+// memory_copy.wast:934
+assert_return(() => call($10, "load8_u", [36039]), 0);
+
+// memory_copy.wast:935
+assert_return(() => call($10, "load8_u", [36238]), 0);
+
+// memory_copy.wast:936
+assert_return(() => call($10, "load8_u", [36437]), 0);
+
+// memory_copy.wast:937
+assert_return(() => call($10, "load8_u", [36636]), 0);
+
+// memory_copy.wast:938
+assert_return(() => call($10, "load8_u", [36835]), 0);
+
+// memory_copy.wast:939
+assert_return(() => call($10, "load8_u", [37034]), 0);
+
+// memory_copy.wast:940
+assert_return(() => call($10, "load8_u", [37233]), 0);
+
+// memory_copy.wast:941
+assert_return(() => call($10, "load8_u", [37432]), 0);
+
+// memory_copy.wast:942
+assert_return(() => call($10, "load8_u", [37631]), 0);
+
+// memory_copy.wast:943
+assert_return(() => call($10, "load8_u", [37830]), 0);
+
+// memory_copy.wast:944
+assert_return(() => call($10, "load8_u", [38029]), 0);
+
+// memory_copy.wast:945
+assert_return(() => call($10, "load8_u", [38228]), 0);
+
+// memory_copy.wast:946
+assert_return(() => call($10, "load8_u", [38427]), 0);
+
+// memory_copy.wast:947
+assert_return(() => call($10, "load8_u", [38626]), 0);
+
+// memory_copy.wast:948
+assert_return(() => call($10, "load8_u", [38825]), 0);
+
+// memory_copy.wast:949
+assert_return(() => call($10, "load8_u", [39024]), 0);
+
+// memory_copy.wast:950
+assert_return(() => call($10, "load8_u", [39223]), 0);
+
+// memory_copy.wast:951
+assert_return(() => call($10, "load8_u", [39422]), 0);
+
+// memory_copy.wast:952
+assert_return(() => call($10, "load8_u", [39621]), 0);
+
+// memory_copy.wast:953
+assert_return(() => call($10, "load8_u", [39820]), 0);
+
+// memory_copy.wast:954
+assert_return(() => call($10, "load8_u", [40019]), 0);
+
+// memory_copy.wast:955
+assert_return(() => call($10, "load8_u", [40218]), 0);
+
+// memory_copy.wast:956
+assert_return(() => call($10, "load8_u", [40417]), 0);
+
+// memory_copy.wast:957
+assert_return(() => call($10, "load8_u", [40616]), 0);
+
+// memory_copy.wast:958
+assert_return(() => call($10, "load8_u", [40815]), 0);
+
+// memory_copy.wast:959
+assert_return(() => call($10, "load8_u", [41014]), 0);
+
+// memory_copy.wast:960
+assert_return(() => call($10, "load8_u", [41213]), 0);
+
+// memory_copy.wast:961
+assert_return(() => call($10, "load8_u", [41412]), 0);
+
+// memory_copy.wast:962
+assert_return(() => call($10, "load8_u", [41611]), 0);
+
+// memory_copy.wast:963
+assert_return(() => call($10, "load8_u", [41810]), 0);
+
+// memory_copy.wast:964
+assert_return(() => call($10, "load8_u", [42009]), 0);
+
+// memory_copy.wast:965
+assert_return(() => call($10, "load8_u", [42208]), 0);
+
+// memory_copy.wast:966
+assert_return(() => call($10, "load8_u", [42407]), 0);
+
+// memory_copy.wast:967
+assert_return(() => call($10, "load8_u", [42606]), 0);
+
+// memory_copy.wast:968
+assert_return(() => call($10, "load8_u", [42805]), 0);
+
+// memory_copy.wast:969
+assert_return(() => call($10, "load8_u", [43004]), 0);
+
+// memory_copy.wast:970
+assert_return(() => call($10, "load8_u", [43203]), 0);
+
+// memory_copy.wast:971
+assert_return(() => call($10, "load8_u", [43402]), 0);
+
+// memory_copy.wast:972
+assert_return(() => call($10, "load8_u", [43601]), 0);
+
+// memory_copy.wast:973
+assert_return(() => call($10, "load8_u", [43800]), 0);
+
+// memory_copy.wast:974
+assert_return(() => call($10, "load8_u", [43999]), 0);
+
+// memory_copy.wast:975
+assert_return(() => call($10, "load8_u", [44198]), 0);
+
+// memory_copy.wast:976
+assert_return(() => call($10, "load8_u", [44397]), 0);
+
+// memory_copy.wast:977
+assert_return(() => call($10, "load8_u", [44596]), 0);
+
+// memory_copy.wast:978
+assert_return(() => call($10, "load8_u", [44795]), 0);
+
+// memory_copy.wast:979
+assert_return(() => call($10, "load8_u", [44994]), 0);
+
+// memory_copy.wast:980
+assert_return(() => call($10, "load8_u", [45193]), 0);
+
+// memory_copy.wast:981
+assert_return(() => call($10, "load8_u", [45392]), 0);
+
+// memory_copy.wast:982
+assert_return(() => call($10, "load8_u", [45591]), 0);
+
+// memory_copy.wast:983
+assert_return(() => call($10, "load8_u", [45790]), 0);
+
+// memory_copy.wast:984
+assert_return(() => call($10, "load8_u", [45989]), 0);
+
+// memory_copy.wast:985
+assert_return(() => call($10, "load8_u", [46188]), 0);
+
+// memory_copy.wast:986
+assert_return(() => call($10, "load8_u", [46387]), 0);
+
+// memory_copy.wast:987
+assert_return(() => call($10, "load8_u", [46586]), 0);
+
+// memory_copy.wast:988
+assert_return(() => call($10, "load8_u", [46785]), 0);
+
+// memory_copy.wast:989
+assert_return(() => call($10, "load8_u", [46984]), 0);
+
+// memory_copy.wast:990
+assert_return(() => call($10, "load8_u", [47183]), 0);
+
+// memory_copy.wast:991
+assert_return(() => call($10, "load8_u", [47382]), 0);
+
+// memory_copy.wast:992
+assert_return(() => call($10, "load8_u", [47581]), 0);
+
+// memory_copy.wast:993
+assert_return(() => call($10, "load8_u", [47780]), 0);
+
+// memory_copy.wast:994
+assert_return(() => call($10, "load8_u", [47979]), 0);
+
+// memory_copy.wast:995
+assert_return(() => call($10, "load8_u", [48178]), 0);
+
+// memory_copy.wast:996
+assert_return(() => call($10, "load8_u", [48377]), 0);
+
+// memory_copy.wast:997
+assert_return(() => call($10, "load8_u", [48576]), 0);
+
+// memory_copy.wast:998
+assert_return(() => call($10, "load8_u", [48775]), 0);
+
+// memory_copy.wast:999
+assert_return(() => call($10, "load8_u", [48974]), 0);
+
+// memory_copy.wast:1000
+assert_return(() => call($10, "load8_u", [49173]), 0);
+
+// memory_copy.wast:1001
+assert_return(() => call($10, "load8_u", [49372]), 0);
+
+// memory_copy.wast:1002
+assert_return(() => call($10, "load8_u", [49571]), 0);
+
+// memory_copy.wast:1003
+assert_return(() => call($10, "load8_u", [49770]), 0);
+
+// memory_copy.wast:1004
+assert_return(() => call($10, "load8_u", [49969]), 0);
+
+// memory_copy.wast:1005
+assert_return(() => call($10, "load8_u", [50168]), 0);
+
+// memory_copy.wast:1006
+assert_return(() => call($10, "load8_u", [50367]), 0);
+
+// memory_copy.wast:1007
+assert_return(() => call($10, "load8_u", [50566]), 0);
+
+// memory_copy.wast:1008
+assert_return(() => call($10, "load8_u", [50765]), 0);
+
+// memory_copy.wast:1009
+assert_return(() => call($10, "load8_u", [50964]), 0);
+
+// memory_copy.wast:1010
+assert_return(() => call($10, "load8_u", [51163]), 0);
+
+// memory_copy.wast:1011
+assert_return(() => call($10, "load8_u", [51362]), 0);
+
+// memory_copy.wast:1012
+assert_return(() => call($10, "load8_u", [51561]), 0);
+
+// memory_copy.wast:1013
+assert_return(() => call($10, "load8_u", [51760]), 0);
+
+// memory_copy.wast:1014
+assert_return(() => call($10, "load8_u", [51959]), 0);
+
+// memory_copy.wast:1015
+assert_return(() => call($10, "load8_u", [52158]), 0);
+
+// memory_copy.wast:1016
+assert_return(() => call($10, "load8_u", [52357]), 0);
+
+// memory_copy.wast:1017
+assert_return(() => call($10, "load8_u", [52556]), 0);
+
+// memory_copy.wast:1018
+assert_return(() => call($10, "load8_u", [52755]), 0);
+
+// memory_copy.wast:1019
+assert_return(() => call($10, "load8_u", [52954]), 0);
+
+// memory_copy.wast:1020
+assert_return(() => call($10, "load8_u", [53153]), 0);
+
+// memory_copy.wast:1021
+assert_return(() => call($10, "load8_u", [53352]), 0);
+
+// memory_copy.wast:1022
+assert_return(() => call($10, "load8_u", [53551]), 0);
+
+// memory_copy.wast:1023
+assert_return(() => call($10, "load8_u", [53750]), 0);
+
+// memory_copy.wast:1024
+assert_return(() => call($10, "load8_u", [53949]), 0);
+
+// memory_copy.wast:1025
+assert_return(() => call($10, "load8_u", [54148]), 0);
+
+// memory_copy.wast:1026
+assert_return(() => call($10, "load8_u", [54347]), 0);
+
+// memory_copy.wast:1027
+assert_return(() => call($10, "load8_u", [54546]), 0);
+
+// memory_copy.wast:1028
+assert_return(() => call($10, "load8_u", [54745]), 0);
+
+// memory_copy.wast:1029
+assert_return(() => call($10, "load8_u", [54944]), 0);
+
+// memory_copy.wast:1030
+assert_return(() => call($10, "load8_u", [55143]), 0);
+
+// memory_copy.wast:1031
+assert_return(() => call($10, "load8_u", [55342]), 0);
+
+// memory_copy.wast:1032
+assert_return(() => call($10, "load8_u", [55541]), 0);
+
+// memory_copy.wast:1033
+assert_return(() => call($10, "load8_u", [55740]), 0);
+
+// memory_copy.wast:1034
+assert_return(() => call($10, "load8_u", [55939]), 0);
+
+// memory_copy.wast:1035
+assert_return(() => call($10, "load8_u", [56138]), 0);
+
+// memory_copy.wast:1036
+assert_return(() => call($10, "load8_u", [56337]), 0);
+
+// memory_copy.wast:1037
+assert_return(() => call($10, "load8_u", [56536]), 0);
+
+// memory_copy.wast:1038
+assert_return(() => call($10, "load8_u", [56735]), 0);
+
+// memory_copy.wast:1039
+assert_return(() => call($10, "load8_u", [56934]), 0);
+
+// memory_copy.wast:1040
+assert_return(() => call($10, "load8_u", [57133]), 0);
+
+// memory_copy.wast:1041
+assert_return(() => call($10, "load8_u", [57332]), 0);
+
+// memory_copy.wast:1042
+assert_return(() => call($10, "load8_u", [57531]), 0);
+
+// memory_copy.wast:1043
+assert_return(() => call($10, "load8_u", [57730]), 0);
+
+// memory_copy.wast:1044
+assert_return(() => call($10, "load8_u", [57929]), 0);
+
+// memory_copy.wast:1045
+assert_return(() => call($10, "load8_u", [58128]), 0);
+
+// memory_copy.wast:1046
+assert_return(() => call($10, "load8_u", [58327]), 0);
+
+// memory_copy.wast:1047
+assert_return(() => call($10, "load8_u", [58526]), 0);
+
+// memory_copy.wast:1048
+assert_return(() => call($10, "load8_u", [58725]), 0);
+
+// memory_copy.wast:1049
+assert_return(() => call($10, "load8_u", [58924]), 0);
+
+// memory_copy.wast:1050
+assert_return(() => call($10, "load8_u", [59123]), 0);
+
+// memory_copy.wast:1051
+assert_return(() => call($10, "load8_u", [59322]), 0);
+
+// memory_copy.wast:1052
+assert_return(() => call($10, "load8_u", [59521]), 0);
+
+// memory_copy.wast:1053
+assert_return(() => call($10, "load8_u", [59720]), 0);
+
+// memory_copy.wast:1054
+assert_return(() => call($10, "load8_u", [59919]), 0);
+
+// memory_copy.wast:1055
+assert_return(() => call($10, "load8_u", [60118]), 0);
+
+// memory_copy.wast:1056
+assert_return(() => call($10, "load8_u", [60317]), 0);
+
+// memory_copy.wast:1057
+assert_return(() => call($10, "load8_u", [60516]), 0);
+
+// memory_copy.wast:1058
+assert_return(() => call($10, "load8_u", [60715]), 0);
+
+// memory_copy.wast:1059
+assert_return(() => call($10, "load8_u", [60914]), 0);
+
+// memory_copy.wast:1060
+assert_return(() => call($10, "load8_u", [61113]), 0);
+
+// memory_copy.wast:1061
+assert_return(() => call($10, "load8_u", [61312]), 0);
+
+// memory_copy.wast:1062
+assert_return(() => call($10, "load8_u", [61511]), 0);
+
+// memory_copy.wast:1063
+assert_return(() => call($10, "load8_u", [61710]), 0);
+
+// memory_copy.wast:1064
+assert_return(() => call($10, "load8_u", [61909]), 0);
+
+// memory_copy.wast:1065
+assert_return(() => call($10, "load8_u", [62108]), 0);
+
+// memory_copy.wast:1066
+assert_return(() => call($10, "load8_u", [62307]), 0);
+
+// memory_copy.wast:1067
+assert_return(() => call($10, "load8_u", [62506]), 0);
+
+// memory_copy.wast:1068
+assert_return(() => call($10, "load8_u", [62705]), 0);
+
+// memory_copy.wast:1069
+assert_return(() => call($10, "load8_u", [62904]), 0);
+
+// memory_copy.wast:1070
+assert_return(() => call($10, "load8_u", [63103]), 0);
+
+// memory_copy.wast:1071
+assert_return(() => call($10, "load8_u", [63302]), 0);
+
+// memory_copy.wast:1072
+assert_return(() => call($10, "load8_u", [63501]), 0);
+
+// memory_copy.wast:1073
+assert_return(() => call($10, "load8_u", [63700]), 0);
+
+// memory_copy.wast:1074
+assert_return(() => call($10, "load8_u", [63899]), 0);
+
+// memory_copy.wast:1075
+assert_return(() => call($10, "load8_u", [64098]), 0);
+
+// memory_copy.wast:1076
+assert_return(() => call($10, "load8_u", [64297]), 0);
+
+// memory_copy.wast:1077
+assert_return(() => call($10, "load8_u", [64496]), 0);
+
+// memory_copy.wast:1078
+assert_return(() => call($10, "load8_u", [64695]), 0);
+
+// memory_copy.wast:1079
+assert_return(() => call($10, "load8_u", [64894]), 0);
+
+// memory_copy.wast:1080
+assert_return(() => call($10, "load8_u", [65093]), 0);
+
+// memory_copy.wast:1081
+assert_return(() => call($10, "load8_u", [65292]), 0);
+
+// memory_copy.wast:1082
+assert_return(() => call($10, "load8_u", [65491]), 0);
+
+// memory_copy.wast:1083
+assert_return(() => call($10, "load8_u", [65515]), 0);
+
+// memory_copy.wast:1084
+assert_return(() => call($10, "load8_u", [65516]), 1);
+
+// memory_copy.wast:1085
+assert_return(() => call($10, "load8_u", [65517]), 2);
+
+// memory_copy.wast:1086
+assert_return(() => call($10, "load8_u", [65518]), 3);
+
+// memory_copy.wast:1087
+assert_return(() => call($10, "load8_u", [65519]), 4);
+
+// memory_copy.wast:1088
+assert_return(() => call($10, "load8_u", [65520]), 5);
+
+// memory_copy.wast:1089
+assert_return(() => call($10, "load8_u", [65521]), 6);
+
+// memory_copy.wast:1090
+assert_return(() => call($10, "load8_u", [65522]), 7);
+
+// memory_copy.wast:1091
+assert_return(() => call($10, "load8_u", [65523]), 8);
+
+// memory_copy.wast:1092
+assert_return(() => call($10, "load8_u", [65524]), 9);
+
+// memory_copy.wast:1093
+assert_return(() => call($10, "load8_u", [65525]), 10);
+
+// memory_copy.wast:1094
+assert_return(() => call($10, "load8_u", [65526]), 11);
+
+// memory_copy.wast:1095
+assert_return(() => call($10, "load8_u", [65527]), 12);
+
+// memory_copy.wast:1096
+assert_return(() => call($10, "load8_u", [65528]), 13);
+
+// memory_copy.wast:1097
+assert_return(() => call($10, "load8_u", [65529]), 14);
+
+// memory_copy.wast:1098
+assert_return(() => call($10, "load8_u", [65530]), 15);
+
+// memory_copy.wast:1099
+assert_return(() => call($10, "load8_u", [65531]), 16);
+
+// memory_copy.wast:1100
+assert_return(() => call($10, "load8_u", [65532]), 17);
+
+// memory_copy.wast:1101
+assert_return(() => call($10, "load8_u", [65533]), 18);
+
+// memory_copy.wast:1102
+assert_return(() => call($10, "load8_u", [65534]), 19);
+
+// memory_copy.wast:1103
+assert_return(() => call($10, "load8_u", [65535]), 20);
+
+// memory_copy.wast:1105
+let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
+
+// memory_copy.wast:1113
+assert_trap(() => call($11, "run", [0, 65516, 40]));
+
+// memory_copy.wast:1116
+assert_return(() => call($11, "load8_u", [0]), 0);
+
+// memory_copy.wast:1117
+assert_return(() => call($11, "load8_u", [1]), 1);
+
+// memory_copy.wast:1118
+assert_return(() => call($11, "load8_u", [2]), 2);
+
+// memory_copy.wast:1119
+assert_return(() => call($11, "load8_u", [3]), 3);
+
+// memory_copy.wast:1120
+assert_return(() => call($11, "load8_u", [4]), 4);
+
+// memory_copy.wast:1121
+assert_return(() => call($11, "load8_u", [5]), 5);
+
+// memory_copy.wast:1122
+assert_return(() => call($11, "load8_u", [6]), 6);
+
+// memory_copy.wast:1123
+assert_return(() => call($11, "load8_u", [7]), 7);
+
+// memory_copy.wast:1124
+assert_return(() => call($11, "load8_u", [8]), 8);
+
+// memory_copy.wast:1125
+assert_return(() => call($11, "load8_u", [9]), 9);
+
+// memory_copy.wast:1126
+assert_return(() => call($11, "load8_u", [10]), 10);
+
+// memory_copy.wast:1127
+assert_return(() => call($11, "load8_u", [11]), 11);
+
+// memory_copy.wast:1128
+assert_return(() => call($11, "load8_u", [12]), 12);
+
+// memory_copy.wast:1129
+assert_return(() => call($11, "load8_u", [13]), 13);
+
+// memory_copy.wast:1130
+assert_return(() => call($11, "load8_u", [14]), 14);
+
+// memory_copy.wast:1131
+assert_return(() => call($11, "load8_u", [15]), 15);
+
+// memory_copy.wast:1132
+assert_return(() => call($11, "load8_u", [16]), 16);
+
+// memory_copy.wast:1133
+assert_return(() => call($11, "load8_u", [17]), 17);
+
+// memory_copy.wast:1134
+assert_return(() => call($11, "load8_u", [18]), 18);
+
+// memory_copy.wast:1135
+assert_return(() => call($11, "load8_u", [19]), 19);
+
+// memory_copy.wast:1136
+assert_return(() => call($11, "load8_u", [218]), 0);
+
+// memory_copy.wast:1137
+assert_return(() => call($11, "load8_u", [417]), 0);
+
+// memory_copy.wast:1138
+assert_return(() => call($11, "load8_u", [616]), 0);
+
+// memory_copy.wast:1139
+assert_return(() => call($11, "load8_u", [815]), 0);
+
+// memory_copy.wast:1140
+assert_return(() => call($11, "load8_u", [1014]), 0);
+
+// memory_copy.wast:1141
+assert_return(() => call($11, "load8_u", [1213]), 0);
+
+// memory_copy.wast:1142
+assert_return(() => call($11, "load8_u", [1412]), 0);
+
+// memory_copy.wast:1143
+assert_return(() => call($11, "load8_u", [1611]), 0);
+
+// memory_copy.wast:1144
+assert_return(() => call($11, "load8_u", [1810]), 0);
+
+// memory_copy.wast:1145
+assert_return(() => call($11, "load8_u", [2009]), 0);
+
+// memory_copy.wast:1146
+assert_return(() => call($11, "load8_u", [2208]), 0);
+
+// memory_copy.wast:1147
+assert_return(() => call($11, "load8_u", [2407]), 0);
+
+// memory_copy.wast:1148
+assert_return(() => call($11, "load8_u", [2606]), 0);
+
+// memory_copy.wast:1149
+assert_return(() => call($11, "load8_u", [2805]), 0);
+
+// memory_copy.wast:1150
+assert_return(() => call($11, "load8_u", [3004]), 0);
+
+// memory_copy.wast:1151
+assert_return(() => call($11, "load8_u", [3203]), 0);
+
+// memory_copy.wast:1152
+assert_return(() => call($11, "load8_u", [3402]), 0);
+
+// memory_copy.wast:1153
+assert_return(() => call($11, "load8_u", [3601]), 0);
+
+// memory_copy.wast:1154
+assert_return(() => call($11, "load8_u", [3800]), 0);
+
+// memory_copy.wast:1155
+assert_return(() => call($11, "load8_u", [3999]), 0);
+
+// memory_copy.wast:1156
+assert_return(() => call($11, "load8_u", [4198]), 0);
+
+// memory_copy.wast:1157
+assert_return(() => call($11, "load8_u", [4397]), 0);
+
+// memory_copy.wast:1158
+assert_return(() => call($11, "load8_u", [4596]), 0);
+
+// memory_copy.wast:1159
+assert_return(() => call($11, "load8_u", [4795]), 0);
+
+// memory_copy.wast:1160
+assert_return(() => call($11, "load8_u", [4994]), 0);
+
+// memory_copy.wast:1161
+assert_return(() => call($11, "load8_u", [5193]), 0);
+
+// memory_copy.wast:1162
+assert_return(() => call($11, "load8_u", [5392]), 0);
+
+// memory_copy.wast:1163
+assert_return(() => call($11, "load8_u", [5591]), 0);
+
+// memory_copy.wast:1164
+assert_return(() => call($11, "load8_u", [5790]), 0);
+
+// memory_copy.wast:1165
+assert_return(() => call($11, "load8_u", [5989]), 0);
+
+// memory_copy.wast:1166
+assert_return(() => call($11, "load8_u", [6188]), 0);
+
+// memory_copy.wast:1167
+assert_return(() => call($11, "load8_u", [6387]), 0);
+
+// memory_copy.wast:1168
+assert_return(() => call($11, "load8_u", [6586]), 0);
+
+// memory_copy.wast:1169
+assert_return(() => call($11, "load8_u", [6785]), 0);
+
+// memory_copy.wast:1170
+assert_return(() => call($11, "load8_u", [6984]), 0);
+
+// memory_copy.wast:1171
+assert_return(() => call($11, "load8_u", [7183]), 0);
+
+// memory_copy.wast:1172
+assert_return(() => call($11, "load8_u", [7382]), 0);
+
+// memory_copy.wast:1173
+assert_return(() => call($11, "load8_u", [7581]), 0);
+
+// memory_copy.wast:1174
+assert_return(() => call($11, "load8_u", [7780]), 0);
+
+// memory_copy.wast:1175
+assert_return(() => call($11, "load8_u", [7979]), 0);
+
+// memory_copy.wast:1176
+assert_return(() => call($11, "load8_u", [8178]), 0);
+
+// memory_copy.wast:1177
+assert_return(() => call($11, "load8_u", [8377]), 0);
+
+// memory_copy.wast:1178
+assert_return(() => call($11, "load8_u", [8576]), 0);
+
+// memory_copy.wast:1179
+assert_return(() => call($11, "load8_u", [8775]), 0);
+
+// memory_copy.wast:1180
+assert_return(() => call($11, "load8_u", [8974]), 0);
+
+// memory_copy.wast:1181
+assert_return(() => call($11, "load8_u", [9173]), 0);
+
+// memory_copy.wast:1182
+assert_return(() => call($11, "load8_u", [9372]), 0);
+
+// memory_copy.wast:1183
+assert_return(() => call($11, "load8_u", [9571]), 0);
+
+// memory_copy.wast:1184
+assert_return(() => call($11, "load8_u", [9770]), 0);
+
+// memory_copy.wast:1185
+assert_return(() => call($11, "load8_u", [9969]), 0);
+
+// memory_copy.wast:1186
+assert_return(() => call($11, "load8_u", [10168]), 0);
+
+// memory_copy.wast:1187
+assert_return(() => call($11, "load8_u", [10367]), 0);
+
+// memory_copy.wast:1188
+assert_return(() => call($11, "load8_u", [10566]), 0);
+
+// memory_copy.wast:1189
+assert_return(() => call($11, "load8_u", [10765]), 0);
+
+// memory_copy.wast:1190
+assert_return(() => call($11, "load8_u", [10964]), 0);
+
+// memory_copy.wast:1191
+assert_return(() => call($11, "load8_u", [11163]), 0);
+
+// memory_copy.wast:1192
+assert_return(() => call($11, "load8_u", [11362]), 0);
+
+// memory_copy.wast:1193
+assert_return(() => call($11, "load8_u", [11561]), 0);
+
+// memory_copy.wast:1194
+assert_return(() => call($11, "load8_u", [11760]), 0);
+
+// memory_copy.wast:1195
+assert_return(() => call($11, "load8_u", [11959]), 0);
+
+// memory_copy.wast:1196
+assert_return(() => call($11, "load8_u", [12158]), 0);
+
+// memory_copy.wast:1197
+assert_return(() => call($11, "load8_u", [12357]), 0);
+
+// memory_copy.wast:1198
+assert_return(() => call($11, "load8_u", [12556]), 0);
+
+// memory_copy.wast:1199
+assert_return(() => call($11, "load8_u", [12755]), 0);
+
+// memory_copy.wast:1200
+assert_return(() => call($11, "load8_u", [12954]), 0);
+
+// memory_copy.wast:1201
+assert_return(() => call($11, "load8_u", [13153]), 0);
+
+// memory_copy.wast:1202
+assert_return(() => call($11, "load8_u", [13352]), 0);
+
+// memory_copy.wast:1203
+assert_return(() => call($11, "load8_u", [13551]), 0);
+
+// memory_copy.wast:1204
+assert_return(() => call($11, "load8_u", [13750]), 0);
+
+// memory_copy.wast:1205
+assert_return(() => call($11, "load8_u", [13949]), 0);
+
+// memory_copy.wast:1206
+assert_return(() => call($11, "load8_u", [14148]), 0);
+
+// memory_copy.wast:1207
+assert_return(() => call($11, "load8_u", [14347]), 0);
+
+// memory_copy.wast:1208
+assert_return(() => call($11, "load8_u", [14546]), 0);
+
+// memory_copy.wast:1209
+assert_return(() => call($11, "load8_u", [14745]), 0);
+
+// memory_copy.wast:1210
+assert_return(() => call($11, "load8_u", [14944]), 0);
+
+// memory_copy.wast:1211
+assert_return(() => call($11, "load8_u", [15143]), 0);
+
+// memory_copy.wast:1212
+assert_return(() => call($11, "load8_u", [15342]), 0);
+
+// memory_copy.wast:1213
+assert_return(() => call($11, "load8_u", [15541]), 0);
+
+// memory_copy.wast:1214
+assert_return(() => call($11, "load8_u", [15740]), 0);
+
+// memory_copy.wast:1215
+assert_return(() => call($11, "load8_u", [15939]), 0);
+
+// memory_copy.wast:1216
+assert_return(() => call($11, "load8_u", [16138]), 0);
+
+// memory_copy.wast:1217
+assert_return(() => call($11, "load8_u", [16337]), 0);
+
+// memory_copy.wast:1218
+assert_return(() => call($11, "load8_u", [16536]), 0);
+
+// memory_copy.wast:1219
+assert_return(() => call($11, "load8_u", [16735]), 0);
+
+// memory_copy.wast:1220
+assert_return(() => call($11, "load8_u", [16934]), 0);
+
+// memory_copy.wast:1221
+assert_return(() => call($11, "load8_u", [17133]), 0);
+
+// memory_copy.wast:1222
+assert_return(() => call($11, "load8_u", [17332]), 0);
+
+// memory_copy.wast:1223
+assert_return(() => call($11, "load8_u", [17531]), 0);
+
+// memory_copy.wast:1224
+assert_return(() => call($11, "load8_u", [17730]), 0);
+
+// memory_copy.wast:1225
+assert_return(() => call($11, "load8_u", [17929]), 0);
+
+// memory_copy.wast:1226
+assert_return(() => call($11, "load8_u", [18128]), 0);
+
+// memory_copy.wast:1227
+assert_return(() => call($11, "load8_u", [18327]), 0);
+
+// memory_copy.wast:1228
+assert_return(() => call($11, "load8_u", [18526]), 0);
+
+// memory_copy.wast:1229
+assert_return(() => call($11, "load8_u", [18725]), 0);
+
+// memory_copy.wast:1230
+assert_return(() => call($11, "load8_u", [18924]), 0);
+
+// memory_copy.wast:1231
+assert_return(() => call($11, "load8_u", [19123]), 0);
+
+// memory_copy.wast:1232
+assert_return(() => call($11, "load8_u", [19322]), 0);
+
+// memory_copy.wast:1233
+assert_return(() => call($11, "load8_u", [19521]), 0);
+
+// memory_copy.wast:1234
+assert_return(() => call($11, "load8_u", [19720]), 0);
+
+// memory_copy.wast:1235
+assert_return(() => call($11, "load8_u", [19919]), 0);
+
+// memory_copy.wast:1236
+assert_return(() => call($11, "load8_u", [20118]), 0);
+
+// memory_copy.wast:1237
+assert_return(() => call($11, "load8_u", [20317]), 0);
+
+// memory_copy.wast:1238
+assert_return(() => call($11, "load8_u", [20516]), 0);
+
+// memory_copy.wast:1239
+assert_return(() => call($11, "load8_u", [20715]), 0);
+
+// memory_copy.wast:1240
+assert_return(() => call($11, "load8_u", [20914]), 0);
+
+// memory_copy.wast:1241
+assert_return(() => call($11, "load8_u", [21113]), 0);
+
+// memory_copy.wast:1242
+assert_return(() => call($11, "load8_u", [21312]), 0);
+
+// memory_copy.wast:1243
+assert_return(() => call($11, "load8_u", [21511]), 0);
+
+// memory_copy.wast:1244
+assert_return(() => call($11, "load8_u", [21710]), 0);
+
+// memory_copy.wast:1245
+assert_return(() => call($11, "load8_u", [21909]), 0);
+
+// memory_copy.wast:1246
+assert_return(() => call($11, "load8_u", [22108]), 0);
+
+// memory_copy.wast:1247
+assert_return(() => call($11, "load8_u", [22307]), 0);
+
+// memory_copy.wast:1248
+assert_return(() => call($11, "load8_u", [22506]), 0);
+
+// memory_copy.wast:1249
+assert_return(() => call($11, "load8_u", [22705]), 0);
+
+// memory_copy.wast:1250
+assert_return(() => call($11, "load8_u", [22904]), 0);
+
+// memory_copy.wast:1251
+assert_return(() => call($11, "load8_u", [23103]), 0);
+
+// memory_copy.wast:1252
+assert_return(() => call($11, "load8_u", [23302]), 0);
+
+// memory_copy.wast:1253
+assert_return(() => call($11, "load8_u", [23501]), 0);
+
+// memory_copy.wast:1254
+assert_return(() => call($11, "load8_u", [23700]), 0);
+
+// memory_copy.wast:1255
+assert_return(() => call($11, "load8_u", [23899]), 0);
+
+// memory_copy.wast:1256
+assert_return(() => call($11, "load8_u", [24098]), 0);
+
+// memory_copy.wast:1257
+assert_return(() => call($11, "load8_u", [24297]), 0);
+
+// memory_copy.wast:1258
+assert_return(() => call($11, "load8_u", [24496]), 0);
+
+// memory_copy.wast:1259
+assert_return(() => call($11, "load8_u", [24695]), 0);
+
+// memory_copy.wast:1260
+assert_return(() => call($11, "load8_u", [24894]), 0);
+
+// memory_copy.wast:1261
+assert_return(() => call($11, "load8_u", [25093]), 0);
+
+// memory_copy.wast:1262
+assert_return(() => call($11, "load8_u", [25292]), 0);
+
+// memory_copy.wast:1263
+assert_return(() => call($11, "load8_u", [25491]), 0);
+
+// memory_copy.wast:1264
+assert_return(() => call($11, "load8_u", [25690]), 0);
+
+// memory_copy.wast:1265
+assert_return(() => call($11, "load8_u", [25889]), 0);
+
+// memory_copy.wast:1266
+assert_return(() => call($11, "load8_u", [26088]), 0);
+
+// memory_copy.wast:1267
+assert_return(() => call($11, "load8_u", [26287]), 0);
+
+// memory_copy.wast:1268
+assert_return(() => call($11, "load8_u", [26486]), 0);
+
+// memory_copy.wast:1269
+assert_return(() => call($11, "load8_u", [26685]), 0);
+
+// memory_copy.wast:1270
+assert_return(() => call($11, "load8_u", [26884]), 0);
+
+// memory_copy.wast:1271
+assert_return(() => call($11, "load8_u", [27083]), 0);
+
+// memory_copy.wast:1272
+assert_return(() => call($11, "load8_u", [27282]), 0);
+
+// memory_copy.wast:1273
+assert_return(() => call($11, "load8_u", [27481]), 0);
+
+// memory_copy.wast:1274
+assert_return(() => call($11, "load8_u", [27680]), 0);
+
+// memory_copy.wast:1275
+assert_return(() => call($11, "load8_u", [27879]), 0);
+
+// memory_copy.wast:1276
+assert_return(() => call($11, "load8_u", [28078]), 0);
+
+// memory_copy.wast:1277
+assert_return(() => call($11, "load8_u", [28277]), 0);
+
+// memory_copy.wast:1278
+assert_return(() => call($11, "load8_u", [28476]), 0);
+
+// memory_copy.wast:1279
+assert_return(() => call($11, "load8_u", [28675]), 0);
+
+// memory_copy.wast:1280
+assert_return(() => call($11, "load8_u", [28874]), 0);
+
+// memory_copy.wast:1281
+assert_return(() => call($11, "load8_u", [29073]), 0);
+
+// memory_copy.wast:1282
+assert_return(() => call($11, "load8_u", [29272]), 0);
+
+// memory_copy.wast:1283
+assert_return(() => call($11, "load8_u", [29471]), 0);
+
+// memory_copy.wast:1284
+assert_return(() => call($11, "load8_u", [29670]), 0);
+
+// memory_copy.wast:1285
+assert_return(() => call($11, "load8_u", [29869]), 0);
+
+// memory_copy.wast:1286
+assert_return(() => call($11, "load8_u", [30068]), 0);
+
+// memory_copy.wast:1287
+assert_return(() => call($11, "load8_u", [30267]), 0);
+
+// memory_copy.wast:1288
+assert_return(() => call($11, "load8_u", [30466]), 0);
+
+// memory_copy.wast:1289
+assert_return(() => call($11, "load8_u", [30665]), 0);
+
+// memory_copy.wast:1290
+assert_return(() => call($11, "load8_u", [30864]), 0);
+
+// memory_copy.wast:1291
+assert_return(() => call($11, "load8_u", [31063]), 0);
+
+// memory_copy.wast:1292
+assert_return(() => call($11, "load8_u", [31262]), 0);
+
+// memory_copy.wast:1293
+assert_return(() => call($11, "load8_u", [31461]), 0);
+
+// memory_copy.wast:1294
+assert_return(() => call($11, "load8_u", [31660]), 0);
+
+// memory_copy.wast:1295
+assert_return(() => call($11, "load8_u", [31859]), 0);
+
+// memory_copy.wast:1296
+assert_return(() => call($11, "load8_u", [32058]), 0);
+
+// memory_copy.wast:1297
+assert_return(() => call($11, "load8_u", [32257]), 0);
+
+// memory_copy.wast:1298
+assert_return(() => call($11, "load8_u", [32456]), 0);
+
+// memory_copy.wast:1299
+assert_return(() => call($11, "load8_u", [32655]), 0);
+
+// memory_copy.wast:1300
+assert_return(() => call($11, "load8_u", [32854]), 0);
+
+// memory_copy.wast:1301
+assert_return(() => call($11, "load8_u", [33053]), 0);
+
+// memory_copy.wast:1302
+assert_return(() => call($11, "load8_u", [33252]), 0);
+
+// memory_copy.wast:1303
+assert_return(() => call($11, "load8_u", [33451]), 0);
+
+// memory_copy.wast:1304
+assert_return(() => call($11, "load8_u", [33650]), 0);
+
+// memory_copy.wast:1305
+assert_return(() => call($11, "load8_u", [33849]), 0);
+
+// memory_copy.wast:1306
+assert_return(() => call($11, "load8_u", [34048]), 0);
+
+// memory_copy.wast:1307
+assert_return(() => call($11, "load8_u", [34247]), 0);
+
+// memory_copy.wast:1308
+assert_return(() => call($11, "load8_u", [34446]), 0);
+
+// memory_copy.wast:1309
+assert_return(() => call($11, "load8_u", [34645]), 0);
+
+// memory_copy.wast:1310
+assert_return(() => call($11, "load8_u", [34844]), 0);
+
+// memory_copy.wast:1311
+assert_return(() => call($11, "load8_u", [35043]), 0);
+
+// memory_copy.wast:1312
+assert_return(() => call($11, "load8_u", [35242]), 0);
+
+// memory_copy.wast:1313
+assert_return(() => call($11, "load8_u", [35441]), 0);
+
+// memory_copy.wast:1314
+assert_return(() => call($11, "load8_u", [35640]), 0);
+
+// memory_copy.wast:1315
+assert_return(() => call($11, "load8_u", [35839]), 0);
+
+// memory_copy.wast:1316
+assert_return(() => call($11, "load8_u", [36038]), 0);
+
+// memory_copy.wast:1317
+assert_return(() => call($11, "load8_u", [36237]), 0);
+
+// memory_copy.wast:1318
+assert_return(() => call($11, "load8_u", [36436]), 0);
+
+// memory_copy.wast:1319
+assert_return(() => call($11, "load8_u", [36635]), 0);
+
+// memory_copy.wast:1320
+assert_return(() => call($11, "load8_u", [36834]), 0);
+
+// memory_copy.wast:1321
+assert_return(() => call($11, "load8_u", [37033]), 0);
+
+// memory_copy.wast:1322
+assert_return(() => call($11, "load8_u", [37232]), 0);
+
+// memory_copy.wast:1323
+assert_return(() => call($11, "load8_u", [37431]), 0);
+
+// memory_copy.wast:1324
+assert_return(() => call($11, "load8_u", [37630]), 0);
+
+// memory_copy.wast:1325
+assert_return(() => call($11, "load8_u", [37829]), 0);
+
+// memory_copy.wast:1326
+assert_return(() => call($11, "load8_u", [38028]), 0);
+
+// memory_copy.wast:1327
+assert_return(() => call($11, "load8_u", [38227]), 0);
+
+// memory_copy.wast:1328
+assert_return(() => call($11, "load8_u", [38426]), 0);
+
+// memory_copy.wast:1329
+assert_return(() => call($11, "load8_u", [38625]), 0);
+
+// memory_copy.wast:1330
+assert_return(() => call($11, "load8_u", [38824]), 0);
+
+// memory_copy.wast:1331
+assert_return(() => call($11, "load8_u", [39023]), 0);
+
+// memory_copy.wast:1332
+assert_return(() => call($11, "load8_u", [39222]), 0);
+
+// memory_copy.wast:1333
+assert_return(() => call($11, "load8_u", [39421]), 0);
+
+// memory_copy.wast:1334
+assert_return(() => call($11, "load8_u", [39620]), 0);
+
+// memory_copy.wast:1335
+assert_return(() => call($11, "load8_u", [39819]), 0);
+
+// memory_copy.wast:1336
+assert_return(() => call($11, "load8_u", [40018]), 0);
+
+// memory_copy.wast:1337
+assert_return(() => call($11, "load8_u", [40217]), 0);
+
+// memory_copy.wast:1338
+assert_return(() => call($11, "load8_u", [40416]), 0);
+
+// memory_copy.wast:1339
+assert_return(() => call($11, "load8_u", [40615]), 0);
+
+// memory_copy.wast:1340
+assert_return(() => call($11, "load8_u", [40814]), 0);
+
+// memory_copy.wast:1341
+assert_return(() => call($11, "load8_u", [41013]), 0);
+
+// memory_copy.wast:1342
+assert_return(() => call($11, "load8_u", [41212]), 0);
+
+// memory_copy.wast:1343
+assert_return(() => call($11, "load8_u", [41411]), 0);
+
+// memory_copy.wast:1344
+assert_return(() => call($11, "load8_u", [41610]), 0);
+
+// memory_copy.wast:1345
+assert_return(() => call($11, "load8_u", [41809]), 0);
+
+// memory_copy.wast:1346
+assert_return(() => call($11, "load8_u", [42008]), 0);
+
+// memory_copy.wast:1347
+assert_return(() => call($11, "load8_u", [42207]), 0);
+
+// memory_copy.wast:1348
+assert_return(() => call($11, "load8_u", [42406]), 0);
+
+// memory_copy.wast:1349
+assert_return(() => call($11, "load8_u", [42605]), 0);
+
+// memory_copy.wast:1350
+assert_return(() => call($11, "load8_u", [42804]), 0);
+
+// memory_copy.wast:1351
+assert_return(() => call($11, "load8_u", [43003]), 0);
+
+// memory_copy.wast:1352
+assert_return(() => call($11, "load8_u", [43202]), 0);
+
+// memory_copy.wast:1353
+assert_return(() => call($11, "load8_u", [43401]), 0);
+
+// memory_copy.wast:1354
+assert_return(() => call($11, "load8_u", [43600]), 0);
+
+// memory_copy.wast:1355
+assert_return(() => call($11, "load8_u", [43799]), 0);
+
+// memory_copy.wast:1356
+assert_return(() => call($11, "load8_u", [43998]), 0);
+
+// memory_copy.wast:1357
+assert_return(() => call($11, "load8_u", [44197]), 0);
+
+// memory_copy.wast:1358
+assert_return(() => call($11, "load8_u", [44396]), 0);
+
+// memory_copy.wast:1359
+assert_return(() => call($11, "load8_u", [44595]), 0);
+
+// memory_copy.wast:1360
+assert_return(() => call($11, "load8_u", [44794]), 0);
+
+// memory_copy.wast:1361
+assert_return(() => call($11, "load8_u", [44993]), 0);
+
+// memory_copy.wast:1362
+assert_return(() => call($11, "load8_u", [45192]), 0);
+
+// memory_copy.wast:1363
+assert_return(() => call($11, "load8_u", [45391]), 0);
+
+// memory_copy.wast:1364
+assert_return(() => call($11, "load8_u", [45590]), 0);
+
+// memory_copy.wast:1365
+assert_return(() => call($11, "load8_u", [45789]), 0);
+
+// memory_copy.wast:1366
+assert_return(() => call($11, "load8_u", [45988]), 0);
+
+// memory_copy.wast:1367
+assert_return(() => call($11, "load8_u", [46187]), 0);
+
+// memory_copy.wast:1368
+assert_return(() => call($11, "load8_u", [46386]), 0);
+
+// memory_copy.wast:1369
+assert_return(() => call($11, "load8_u", [46585]), 0);
+
+// memory_copy.wast:1370
+assert_return(() => call($11, "load8_u", [46784]), 0);
+
+// memory_copy.wast:1371
+assert_return(() => call($11, "load8_u", [46983]), 0);
+
+// memory_copy.wast:1372
+assert_return(() => call($11, "load8_u", [47182]), 0);
+
+// memory_copy.wast:1373
+assert_return(() => call($11, "load8_u", [47381]), 0);
+
+// memory_copy.wast:1374
+assert_return(() => call($11, "load8_u", [47580]), 0);
+
+// memory_copy.wast:1375
+assert_return(() => call($11, "load8_u", [47779]), 0);
+
+// memory_copy.wast:1376
+assert_return(() => call($11, "load8_u", [47978]), 0);
+
+// memory_copy.wast:1377
+assert_return(() => call($11, "load8_u", [48177]), 0);
+
+// memory_copy.wast:1378
+assert_return(() => call($11, "load8_u", [48376]), 0);
+
+// memory_copy.wast:1379
+assert_return(() => call($11, "load8_u", [48575]), 0);
+
+// memory_copy.wast:1380
+assert_return(() => call($11, "load8_u", [48774]), 0);
+
+// memory_copy.wast:1381
+assert_return(() => call($11, "load8_u", [48973]), 0);
+
+// memory_copy.wast:1382
+assert_return(() => call($11, "load8_u", [49172]), 0);
+
+// memory_copy.wast:1383
+assert_return(() => call($11, "load8_u", [49371]), 0);
+
+// memory_copy.wast:1384
+assert_return(() => call($11, "load8_u", [49570]), 0);
+
+// memory_copy.wast:1385
+assert_return(() => call($11, "load8_u", [49769]), 0);
+
+// memory_copy.wast:1386
+assert_return(() => call($11, "load8_u", [49968]), 0);
+
+// memory_copy.wast:1387
+assert_return(() => call($11, "load8_u", [50167]), 0);
+
+// memory_copy.wast:1388
+assert_return(() => call($11, "load8_u", [50366]), 0);
+
+// memory_copy.wast:1389
+assert_return(() => call($11, "load8_u", [50565]), 0);
+
+// memory_copy.wast:1390
+assert_return(() => call($11, "load8_u", [50764]), 0);
+
+// memory_copy.wast:1391
+assert_return(() => call($11, "load8_u", [50963]), 0);
+
+// memory_copy.wast:1392
+assert_return(() => call($11, "load8_u", [51162]), 0);
+
+// memory_copy.wast:1393
+assert_return(() => call($11, "load8_u", [51361]), 0);
+
+// memory_copy.wast:1394
+assert_return(() => call($11, "load8_u", [51560]), 0);
+
+// memory_copy.wast:1395
+assert_return(() => call($11, "load8_u", [51759]), 0);
+
+// memory_copy.wast:1396
+assert_return(() => call($11, "load8_u", [51958]), 0);
+
+// memory_copy.wast:1397
+assert_return(() => call($11, "load8_u", [52157]), 0);
+
+// memory_copy.wast:1398
+assert_return(() => call($11, "load8_u", [52356]), 0);
+
+// memory_copy.wast:1399
+assert_return(() => call($11, "load8_u", [52555]), 0);
+
+// memory_copy.wast:1400
+assert_return(() => call($11, "load8_u", [52754]), 0);
+
+// memory_copy.wast:1401
+assert_return(() => call($11, "load8_u", [52953]), 0);
+
+// memory_copy.wast:1402
+assert_return(() => call($11, "load8_u", [53152]), 0);
+
+// memory_copy.wast:1403
+assert_return(() => call($11, "load8_u", [53351]), 0);
+
+// memory_copy.wast:1404
+assert_return(() => call($11, "load8_u", [53550]), 0);
+
+// memory_copy.wast:1405
+assert_return(() => call($11, "load8_u", [53749]), 0);
+
+// memory_copy.wast:1406
+assert_return(() => call($11, "load8_u", [53948]), 0);
+
+// memory_copy.wast:1407
+assert_return(() => call($11, "load8_u", [54147]), 0);
+
+// memory_copy.wast:1408
+assert_return(() => call($11, "load8_u", [54346]), 0);
+
+// memory_copy.wast:1409
+assert_return(() => call($11, "load8_u", [54545]), 0);
+
+// memory_copy.wast:1410
+assert_return(() => call($11, "load8_u", [54744]), 0);
+
+// memory_copy.wast:1411
+assert_return(() => call($11, "load8_u", [54943]), 0);
+
+// memory_copy.wast:1412
+assert_return(() => call($11, "load8_u", [55142]), 0);
+
+// memory_copy.wast:1413
+assert_return(() => call($11, "load8_u", [55341]), 0);
+
+// memory_copy.wast:1414
+assert_return(() => call($11, "load8_u", [55540]), 0);
+
+// memory_copy.wast:1415
+assert_return(() => call($11, "load8_u", [55739]), 0);
+
+// memory_copy.wast:1416
+assert_return(() => call($11, "load8_u", [55938]), 0);
+
+// memory_copy.wast:1417
+assert_return(() => call($11, "load8_u", [56137]), 0);
+
+// memory_copy.wast:1418
+assert_return(() => call($11, "load8_u", [56336]), 0);
+
+// memory_copy.wast:1419
+assert_return(() => call($11, "load8_u", [56535]), 0);
+
+// memory_copy.wast:1420
+assert_return(() => call($11, "load8_u", [56734]), 0);
+
+// memory_copy.wast:1421
+assert_return(() => call($11, "load8_u", [56933]), 0);
+
+// memory_copy.wast:1422
+assert_return(() => call($11, "load8_u", [57132]), 0);
+
+// memory_copy.wast:1423
+assert_return(() => call($11, "load8_u", [57331]), 0);
+
+// memory_copy.wast:1424
+assert_return(() => call($11, "load8_u", [57530]), 0);
+
+// memory_copy.wast:1425
+assert_return(() => call($11, "load8_u", [57729]), 0);
+
+// memory_copy.wast:1426
+assert_return(() => call($11, "load8_u", [57928]), 0);
+
+// memory_copy.wast:1427
+assert_return(() => call($11, "load8_u", [58127]), 0);
+
+// memory_copy.wast:1428
+assert_return(() => call($11, "load8_u", [58326]), 0);
+
+// memory_copy.wast:1429
+assert_return(() => call($11, "load8_u", [58525]), 0);
+
+// memory_copy.wast:1430
+assert_return(() => call($11, "load8_u", [58724]), 0);
+
+// memory_copy.wast:1431
+assert_return(() => call($11, "load8_u", [58923]), 0);
+
+// memory_copy.wast:1432
+assert_return(() => call($11, "load8_u", [59122]), 0);
+
+// memory_copy.wast:1433
+assert_return(() => call($11, "load8_u", [59321]), 0);
+
+// memory_copy.wast:1434
+assert_return(() => call($11, "load8_u", [59520]), 0);
+
+// memory_copy.wast:1435
+assert_return(() => call($11, "load8_u", [59719]), 0);
+
+// memory_copy.wast:1436
+assert_return(() => call($11, "load8_u", [59918]), 0);
+
+// memory_copy.wast:1437
+assert_return(() => call($11, "load8_u", [60117]), 0);
+
+// memory_copy.wast:1438
+assert_return(() => call($11, "load8_u", [60316]), 0);
+
+// memory_copy.wast:1439
+assert_return(() => call($11, "load8_u", [60515]), 0);
+
+// memory_copy.wast:1440
+assert_return(() => call($11, "load8_u", [60714]), 0);
+
+// memory_copy.wast:1441
+assert_return(() => call($11, "load8_u", [60913]), 0);
+
+// memory_copy.wast:1442
+assert_return(() => call($11, "load8_u", [61112]), 0);
+
+// memory_copy.wast:1443
+assert_return(() => call($11, "load8_u", [61311]), 0);
+
+// memory_copy.wast:1444
+assert_return(() => call($11, "load8_u", [61510]), 0);
+
+// memory_copy.wast:1445
+assert_return(() => call($11, "load8_u", [61709]), 0);
+
+// memory_copy.wast:1446
+assert_return(() => call($11, "load8_u", [61908]), 0);
+
+// memory_copy.wast:1447
+assert_return(() => call($11, "load8_u", [62107]), 0);
+
+// memory_copy.wast:1448
+assert_return(() => call($11, "load8_u", [62306]), 0);
+
+// memory_copy.wast:1449
+assert_return(() => call($11, "load8_u", [62505]), 0);
+
+// memory_copy.wast:1450
+assert_return(() => call($11, "load8_u", [62704]), 0);
+
+// memory_copy.wast:1451
+assert_return(() => call($11, "load8_u", [62903]), 0);
+
+// memory_copy.wast:1452
+assert_return(() => call($11, "load8_u", [63102]), 0);
+
+// memory_copy.wast:1453
+assert_return(() => call($11, "load8_u", [63301]), 0);
+
+// memory_copy.wast:1454
+assert_return(() => call($11, "load8_u", [63500]), 0);
+
+// memory_copy.wast:1455
+assert_return(() => call($11, "load8_u", [63699]), 0);
+
+// memory_copy.wast:1456
+assert_return(() => call($11, "load8_u", [63898]), 0);
+
+// memory_copy.wast:1457
+assert_return(() => call($11, "load8_u", [64097]), 0);
+
+// memory_copy.wast:1458
+assert_return(() => call($11, "load8_u", [64296]), 0);
+
+// memory_copy.wast:1459
+assert_return(() => call($11, "load8_u", [64495]), 0);
+
+// memory_copy.wast:1460
+assert_return(() => call($11, "load8_u", [64694]), 0);
+
+// memory_copy.wast:1461
+assert_return(() => call($11, "load8_u", [64893]), 0);
+
+// memory_copy.wast:1462
+assert_return(() => call($11, "load8_u", [65092]), 0);
+
+// memory_copy.wast:1463
+assert_return(() => call($11, "load8_u", [65291]), 0);
+
+// memory_copy.wast:1464
+assert_return(() => call($11, "load8_u", [65490]), 0);
+
+// memory_copy.wast:1465
+assert_return(() => call($11, "load8_u", [65516]), 0);
+
+// memory_copy.wast:1466
+assert_return(() => call($11, "load8_u", [65517]), 1);
+
+// memory_copy.wast:1467
+assert_return(() => call($11, "load8_u", [65518]), 2);
+
+// memory_copy.wast:1468
+assert_return(() => call($11, "load8_u", [65519]), 3);
+
+// memory_copy.wast:1469
+assert_return(() => call($11, "load8_u", [65520]), 4);
+
+// memory_copy.wast:1470
+assert_return(() => call($11, "load8_u", [65521]), 5);
+
+// memory_copy.wast:1471
+assert_return(() => call($11, "load8_u", [65522]), 6);
+
+// memory_copy.wast:1472
+assert_return(() => call($11, "load8_u", [65523]), 7);
+
+// memory_copy.wast:1473
+assert_return(() => call($11, "load8_u", [65524]), 8);
+
+// memory_copy.wast:1474
+assert_return(() => call($11, "load8_u", [65525]), 9);
+
+// memory_copy.wast:1475
+assert_return(() => call($11, "load8_u", [65526]), 10);
+
+// memory_copy.wast:1476
+assert_return(() => call($11, "load8_u", [65527]), 11);
+
+// memory_copy.wast:1477
+assert_return(() => call($11, "load8_u", [65528]), 12);
+
+// memory_copy.wast:1478
+assert_return(() => call($11, "load8_u", [65529]), 13);
+
+// memory_copy.wast:1479
+assert_return(() => call($11, "load8_u", [65530]), 14);
+
+// memory_copy.wast:1480
+assert_return(() => call($11, "load8_u", [65531]), 15);
+
+// memory_copy.wast:1481
+assert_return(() => call($11, "load8_u", [65532]), 16);
+
+// memory_copy.wast:1482
+assert_return(() => call($11, "load8_u", [65533]), 17);
+
+// memory_copy.wast:1483
+assert_return(() => call($11, "load8_u", [65534]), 18);
+
+// memory_copy.wast:1484
+assert_return(() => call($11, "load8_u", [65535]), 19);
+
+// memory_copy.wast:1486
+let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9d\x80\x80\x80\x00\x01\x00\x41\xeb\xff\x03\x0b\x15\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14");
+
+// memory_copy.wast:1494
+assert_trap(() => call($12, "run", [0, 65515, 39]));
+
+// memory_copy.wast:1497
+assert_return(() => call($12, "load8_u", [0]), 0);
+
+// memory_copy.wast:1498
+assert_return(() => call($12, "load8_u", [1]), 1);
+
+// memory_copy.wast:1499
+assert_return(() => call($12, "load8_u", [2]), 2);
+
+// memory_copy.wast:1500
+assert_return(() => call($12, "load8_u", [3]), 3);
+
+// memory_copy.wast:1501
+assert_return(() => call($12, "load8_u", [4]), 4);
+
+// memory_copy.wast:1502
+assert_return(() => call($12, "load8_u", [5]), 5);
+
+// memory_copy.wast:1503
+assert_return(() => call($12, "load8_u", [6]), 6);
+
+// memory_copy.wast:1504
+assert_return(() => call($12, "load8_u", [7]), 7);
+
+// memory_copy.wast:1505
+assert_return(() => call($12, "load8_u", [8]), 8);
+
+// memory_copy.wast:1506
+assert_return(() => call($12, "load8_u", [9]), 9);
+
+// memory_copy.wast:1507
+assert_return(() => call($12, "load8_u", [10]), 10);
+
+// memory_copy.wast:1508
+assert_return(() => call($12, "load8_u", [11]), 11);
+
+// memory_copy.wast:1509
+assert_return(() => call($12, "load8_u", [12]), 12);
+
+// memory_copy.wast:1510
+assert_return(() => call($12, "load8_u", [13]), 13);
+
+// memory_copy.wast:1511
+assert_return(() => call($12, "load8_u", [14]), 14);
+
+// memory_copy.wast:1512
+assert_return(() => call($12, "load8_u", [15]), 15);
+
+// memory_copy.wast:1513
+assert_return(() => call($12, "load8_u", [16]), 16);
+
+// memory_copy.wast:1514
+assert_return(() => call($12, "load8_u", [17]), 17);
+
+// memory_copy.wast:1515
+assert_return(() => call($12, "load8_u", [18]), 18);
+
+// memory_copy.wast:1516
+assert_return(() => call($12, "load8_u", [19]), 19);
+
+// memory_copy.wast:1517
+assert_return(() => call($12, "load8_u", [20]), 20);
+
+// memory_copy.wast:1518
+assert_return(() => call($12, "load8_u", [219]), 0);
+
+// memory_copy.wast:1519
+assert_return(() => call($12, "load8_u", [418]), 0);
+
+// memory_copy.wast:1520
+assert_return(() => call($12, "load8_u", [617]), 0);
+
+// memory_copy.wast:1521
+assert_return(() => call($12, "load8_u", [816]), 0);
+
+// memory_copy.wast:1522
+assert_return(() => call($12, "load8_u", [1015]), 0);
+
+// memory_copy.wast:1523
+assert_return(() => call($12, "load8_u", [1214]), 0);
+
+// memory_copy.wast:1524
+assert_return(() => call($12, "load8_u", [1413]), 0);
+
+// memory_copy.wast:1525
+assert_return(() => call($12, "load8_u", [1612]), 0);
+
+// memory_copy.wast:1526
+assert_return(() => call($12, "load8_u", [1811]), 0);
+
+// memory_copy.wast:1527
+assert_return(() => call($12, "load8_u", [2010]), 0);
+
+// memory_copy.wast:1528
+assert_return(() => call($12, "load8_u", [2209]), 0);
+
+// memory_copy.wast:1529
+assert_return(() => call($12, "load8_u", [2408]), 0);
+
+// memory_copy.wast:1530
+assert_return(() => call($12, "load8_u", [2607]), 0);
+
+// memory_copy.wast:1531
+assert_return(() => call($12, "load8_u", [2806]), 0);
+
+// memory_copy.wast:1532
+assert_return(() => call($12, "load8_u", [3005]), 0);
+
+// memory_copy.wast:1533
+assert_return(() => call($12, "load8_u", [3204]), 0);
+
+// memory_copy.wast:1534
+assert_return(() => call($12, "load8_u", [3403]), 0);
+
+// memory_copy.wast:1535
+assert_return(() => call($12, "load8_u", [3602]), 0);
+
+// memory_copy.wast:1536
+assert_return(() => call($12, "load8_u", [3801]), 0);
+
+// memory_copy.wast:1537
+assert_return(() => call($12, "load8_u", [4000]), 0);
+
+// memory_copy.wast:1538
+assert_return(() => call($12, "load8_u", [4199]), 0);
+
+// memory_copy.wast:1539
+assert_return(() => call($12, "load8_u", [4398]), 0);
+
+// memory_copy.wast:1540
+assert_return(() => call($12, "load8_u", [4597]), 0);
+
+// memory_copy.wast:1541
+assert_return(() => call($12, "load8_u", [4796]), 0);
+
+// memory_copy.wast:1542
+assert_return(() => call($12, "load8_u", [4995]), 0);
+
+// memory_copy.wast:1543
+assert_return(() => call($12, "load8_u", [5194]), 0);
+
+// memory_copy.wast:1544
+assert_return(() => call($12, "load8_u", [5393]), 0);
+
+// memory_copy.wast:1545
+assert_return(() => call($12, "load8_u", [5592]), 0);
+
+// memory_copy.wast:1546
+assert_return(() => call($12, "load8_u", [5791]), 0);
+
+// memory_copy.wast:1547
+assert_return(() => call($12, "load8_u", [5990]), 0);
+
+// memory_copy.wast:1548
+assert_return(() => call($12, "load8_u", [6189]), 0);
+
+// memory_copy.wast:1549
+assert_return(() => call($12, "load8_u", [6388]), 0);
+
+// memory_copy.wast:1550
+assert_return(() => call($12, "load8_u", [6587]), 0);
+
+// memory_copy.wast:1551
+assert_return(() => call($12, "load8_u", [6786]), 0);
+
+// memory_copy.wast:1552
+assert_return(() => call($12, "load8_u", [6985]), 0);
+
+// memory_copy.wast:1553
+assert_return(() => call($12, "load8_u", [7184]), 0);
+
+// memory_copy.wast:1554
+assert_return(() => call($12, "load8_u", [7383]), 0);
+
+// memory_copy.wast:1555
+assert_return(() => call($12, "load8_u", [7582]), 0);
+
+// memory_copy.wast:1556
+assert_return(() => call($12, "load8_u", [7781]), 0);
+
+// memory_copy.wast:1557
+assert_return(() => call($12, "load8_u", [7980]), 0);
+
+// memory_copy.wast:1558
+assert_return(() => call($12, "load8_u", [8179]), 0);
+
+// memory_copy.wast:1559
+assert_return(() => call($12, "load8_u", [8378]), 0);
+
+// memory_copy.wast:1560
+assert_return(() => call($12, "load8_u", [8577]), 0);
+
+// memory_copy.wast:1561
+assert_return(() => call($12, "load8_u", [8776]), 0);
+
+// memory_copy.wast:1562
+assert_return(() => call($12, "load8_u", [8975]), 0);
+
+// memory_copy.wast:1563
+assert_return(() => call($12, "load8_u", [9174]), 0);
+
+// memory_copy.wast:1564
+assert_return(() => call($12, "load8_u", [9373]), 0);
+
+// memory_copy.wast:1565
+assert_return(() => call($12, "load8_u", [9572]), 0);
+
+// memory_copy.wast:1566
+assert_return(() => call($12, "load8_u", [9771]), 0);
+
+// memory_copy.wast:1567
+assert_return(() => call($12, "load8_u", [9970]), 0);
+
+// memory_copy.wast:1568
+assert_return(() => call($12, "load8_u", [10169]), 0);
+
+// memory_copy.wast:1569
+assert_return(() => call($12, "load8_u", [10368]), 0);
+
+// memory_copy.wast:1570
+assert_return(() => call($12, "load8_u", [10567]), 0);
+
+// memory_copy.wast:1571
+assert_return(() => call($12, "load8_u", [10766]), 0);
+
+// memory_copy.wast:1572
+assert_return(() => call($12, "load8_u", [10965]), 0);
+
+// memory_copy.wast:1573
+assert_return(() => call($12, "load8_u", [11164]), 0);
+
+// memory_copy.wast:1574
+assert_return(() => call($12, "load8_u", [11363]), 0);
+
+// memory_copy.wast:1575
+assert_return(() => call($12, "load8_u", [11562]), 0);
+
+// memory_copy.wast:1576
+assert_return(() => call($12, "load8_u", [11761]), 0);
+
+// memory_copy.wast:1577
+assert_return(() => call($12, "load8_u", [11960]), 0);
+
+// memory_copy.wast:1578
+assert_return(() => call($12, "load8_u", [12159]), 0);
+
+// memory_copy.wast:1579
+assert_return(() => call($12, "load8_u", [12358]), 0);
+
+// memory_copy.wast:1580
+assert_return(() => call($12, "load8_u", [12557]), 0);
+
+// memory_copy.wast:1581
+assert_return(() => call($12, "load8_u", [12756]), 0);
+
+// memory_copy.wast:1582
+assert_return(() => call($12, "load8_u", [12955]), 0);
+
+// memory_copy.wast:1583
+assert_return(() => call($12, "load8_u", [13154]), 0);
+
+// memory_copy.wast:1584
+assert_return(() => call($12, "load8_u", [13353]), 0);
+
+// memory_copy.wast:1585
+assert_return(() => call($12, "load8_u", [13552]), 0);
+
+// memory_copy.wast:1586
+assert_return(() => call($12, "load8_u", [13751]), 0);
+
+// memory_copy.wast:1587
+assert_return(() => call($12, "load8_u", [13950]), 0);
+
+// memory_copy.wast:1588
+assert_return(() => call($12, "load8_u", [14149]), 0);
+
+// memory_copy.wast:1589
+assert_return(() => call($12, "load8_u", [14348]), 0);
+
+// memory_copy.wast:1590
+assert_return(() => call($12, "load8_u", [14547]), 0);
+
+// memory_copy.wast:1591
+assert_return(() => call($12, "load8_u", [14746]), 0);
+
+// memory_copy.wast:1592
+assert_return(() => call($12, "load8_u", [14945]), 0);
+
+// memory_copy.wast:1593
+assert_return(() => call($12, "load8_u", [15144]), 0);
+
+// memory_copy.wast:1594
+assert_return(() => call($12, "load8_u", [15343]), 0);
+
+// memory_copy.wast:1595
+assert_return(() => call($12, "load8_u", [15542]), 0);
+
+// memory_copy.wast:1596
+assert_return(() => call($12, "load8_u", [15741]), 0);
+
+// memory_copy.wast:1597
+assert_return(() => call($12, "load8_u", [15940]), 0);
+
+// memory_copy.wast:1598
+assert_return(() => call($12, "load8_u", [16139]), 0);
+
+// memory_copy.wast:1599
+assert_return(() => call($12, "load8_u", [16338]), 0);
+
+// memory_copy.wast:1600
+assert_return(() => call($12, "load8_u", [16537]), 0);
+
+// memory_copy.wast:1601
+assert_return(() => call($12, "load8_u", [16736]), 0);
+
+// memory_copy.wast:1602
+assert_return(() => call($12, "load8_u", [16935]), 0);
+
+// memory_copy.wast:1603
+assert_return(() => call($12, "load8_u", [17134]), 0);
+
+// memory_copy.wast:1604
+assert_return(() => call($12, "load8_u", [17333]), 0);
+
+// memory_copy.wast:1605
+assert_return(() => call($12, "load8_u", [17532]), 0);
+
+// memory_copy.wast:1606
+assert_return(() => call($12, "load8_u", [17731]), 0);
+
+// memory_copy.wast:1607
+assert_return(() => call($12, "load8_u", [17930]), 0);
+
+// memory_copy.wast:1608
+assert_return(() => call($12, "load8_u", [18129]), 0);
+
+// memory_copy.wast:1609
+assert_return(() => call($12, "load8_u", [18328]), 0);
+
+// memory_copy.wast:1610
+assert_return(() => call($12, "load8_u", [18527]), 0);
+
+// memory_copy.wast:1611
+assert_return(() => call($12, "load8_u", [18726]), 0);
+
+// memory_copy.wast:1612
+assert_return(() => call($12, "load8_u", [18925]), 0);
+
+// memory_copy.wast:1613
+assert_return(() => call($12, "load8_u", [19124]), 0);
+
+// memory_copy.wast:1614
+assert_return(() => call($12, "load8_u", [19323]), 0);
+
+// memory_copy.wast:1615
+assert_return(() => call($12, "load8_u", [19522]), 0);
+
+// memory_copy.wast:1616
+assert_return(() => call($12, "load8_u", [19721]), 0);
+
+// memory_copy.wast:1617
+assert_return(() => call($12, "load8_u", [19920]), 0);
+
+// memory_copy.wast:1618
+assert_return(() => call($12, "load8_u", [20119]), 0);
+
+// memory_copy.wast:1619
+assert_return(() => call($12, "load8_u", [20318]), 0);
+
+// memory_copy.wast:1620
+assert_return(() => call($12, "load8_u", [20517]), 0);
+
+// memory_copy.wast:1621
+assert_return(() => call($12, "load8_u", [20716]), 0);
+
+// memory_copy.wast:1622
+assert_return(() => call($12, "load8_u", [20915]), 0);
+
+// memory_copy.wast:1623
+assert_return(() => call($12, "load8_u", [21114]), 0);
+
+// memory_copy.wast:1624
+assert_return(() => call($12, "load8_u", [21313]), 0);
+
+// memory_copy.wast:1625
+assert_return(() => call($12, "load8_u", [21512]), 0);
+
+// memory_copy.wast:1626
+assert_return(() => call($12, "load8_u", [21711]), 0);
+
+// memory_copy.wast:1627
+assert_return(() => call($12, "load8_u", [21910]), 0);
+
+// memory_copy.wast:1628
+assert_return(() => call($12, "load8_u", [22109]), 0);
+
+// memory_copy.wast:1629
+assert_return(() => call($12, "load8_u", [22308]), 0);
+
+// memory_copy.wast:1630
+assert_return(() => call($12, "load8_u", [22507]), 0);
+
+// memory_copy.wast:1631
+assert_return(() => call($12, "load8_u", [22706]), 0);
+
+// memory_copy.wast:1632
+assert_return(() => call($12, "load8_u", [22905]), 0);
+
+// memory_copy.wast:1633
+assert_return(() => call($12, "load8_u", [23104]), 0);
+
+// memory_copy.wast:1634
+assert_return(() => call($12, "load8_u", [23303]), 0);
+
+// memory_copy.wast:1635
+assert_return(() => call($12, "load8_u", [23502]), 0);
+
+// memory_copy.wast:1636
+assert_return(() => call($12, "load8_u", [23701]), 0);
+
+// memory_copy.wast:1637
+assert_return(() => call($12, "load8_u", [23900]), 0);
+
+// memory_copy.wast:1638
+assert_return(() => call($12, "load8_u", [24099]), 0);
+
+// memory_copy.wast:1639
+assert_return(() => call($12, "load8_u", [24298]), 0);
+
+// memory_copy.wast:1640
+assert_return(() => call($12, "load8_u", [24497]), 0);
+
+// memory_copy.wast:1641
+assert_return(() => call($12, "load8_u", [24696]), 0);
+
+// memory_copy.wast:1642
+assert_return(() => call($12, "load8_u", [24895]), 0);
+
+// memory_copy.wast:1643
+assert_return(() => call($12, "load8_u", [25094]), 0);
+
+// memory_copy.wast:1644
+assert_return(() => call($12, "load8_u", [25293]), 0);
+
+// memory_copy.wast:1645
+assert_return(() => call($12, "load8_u", [25492]), 0);
+
+// memory_copy.wast:1646
+assert_return(() => call($12, "load8_u", [25691]), 0);
+
+// memory_copy.wast:1647
+assert_return(() => call($12, "load8_u", [25890]), 0);
+
+// memory_copy.wast:1648
+assert_return(() => call($12, "load8_u", [26089]), 0);
+
+// memory_copy.wast:1649
+assert_return(() => call($12, "load8_u", [26288]), 0);
+
+// memory_copy.wast:1650
+assert_return(() => call($12, "load8_u", [26487]), 0);
+
+// memory_copy.wast:1651
+assert_return(() => call($12, "load8_u", [26686]), 0);
+
+// memory_copy.wast:1652
+assert_return(() => call($12, "load8_u", [26885]), 0);
+
+// memory_copy.wast:1653
+assert_return(() => call($12, "load8_u", [27084]), 0);
+
+// memory_copy.wast:1654
+assert_return(() => call($12, "load8_u", [27283]), 0);
+
+// memory_copy.wast:1655
+assert_return(() => call($12, "load8_u", [27482]), 0);
+
+// memory_copy.wast:1656
+assert_return(() => call($12, "load8_u", [27681]), 0);
+
+// memory_copy.wast:1657
+assert_return(() => call($12, "load8_u", [27880]), 0);
+
+// memory_copy.wast:1658
+assert_return(() => call($12, "load8_u", [28079]), 0);
+
+// memory_copy.wast:1659
+assert_return(() => call($12, "load8_u", [28278]), 0);
+
+// memory_copy.wast:1660
+assert_return(() => call($12, "load8_u", [28477]), 0);
+
+// memory_copy.wast:1661
+assert_return(() => call($12, "load8_u", [28676]), 0);
+
+// memory_copy.wast:1662
+assert_return(() => call($12, "load8_u", [28875]), 0);
+
+// memory_copy.wast:1663
+assert_return(() => call($12, "load8_u", [29074]), 0);
+
+// memory_copy.wast:1664
+assert_return(() => call($12, "load8_u", [29273]), 0);
+
+// memory_copy.wast:1665
+assert_return(() => call($12, "load8_u", [29472]), 0);
+
+// memory_copy.wast:1666
+assert_return(() => call($12, "load8_u", [29671]), 0);
+
+// memory_copy.wast:1667
+assert_return(() => call($12, "load8_u", [29870]), 0);
+
+// memory_copy.wast:1668
+assert_return(() => call($12, "load8_u", [30069]), 0);
+
+// memory_copy.wast:1669
+assert_return(() => call($12, "load8_u", [30268]), 0);
+
+// memory_copy.wast:1670
+assert_return(() => call($12, "load8_u", [30467]), 0);
+
+// memory_copy.wast:1671
+assert_return(() => call($12, "load8_u", [30666]), 0);
+
+// memory_copy.wast:1672
+assert_return(() => call($12, "load8_u", [30865]), 0);
+
+// memory_copy.wast:1673
+assert_return(() => call($12, "load8_u", [31064]), 0);
+
+// memory_copy.wast:1674
+assert_return(() => call($12, "load8_u", [31263]), 0);
+
+// memory_copy.wast:1675
+assert_return(() => call($12, "load8_u", [31462]), 0);
+
+// memory_copy.wast:1676
+assert_return(() => call($12, "load8_u", [31661]), 0);
+
+// memory_copy.wast:1677
+assert_return(() => call($12, "load8_u", [31860]), 0);
+
+// memory_copy.wast:1678
+assert_return(() => call($12, "load8_u", [32059]), 0);
+
+// memory_copy.wast:1679
+assert_return(() => call($12, "load8_u", [32258]), 0);
+
+// memory_copy.wast:1680
+assert_return(() => call($12, "load8_u", [32457]), 0);
+
+// memory_copy.wast:1681
+assert_return(() => call($12, "load8_u", [32656]), 0);
+
+// memory_copy.wast:1682
+assert_return(() => call($12, "load8_u", [32855]), 0);
+
+// memory_copy.wast:1683
+assert_return(() => call($12, "load8_u", [33054]), 0);
+
+// memory_copy.wast:1684
+assert_return(() => call($12, "load8_u", [33253]), 0);
+
+// memory_copy.wast:1685
+assert_return(() => call($12, "load8_u", [33452]), 0);
+
+// memory_copy.wast:1686
+assert_return(() => call($12, "load8_u", [33651]), 0);
+
+// memory_copy.wast:1687
+assert_return(() => call($12, "load8_u", [33850]), 0);
+
+// memory_copy.wast:1688
+assert_return(() => call($12, "load8_u", [34049]), 0);
+
+// memory_copy.wast:1689
+assert_return(() => call($12, "load8_u", [34248]), 0);
+
+// memory_copy.wast:1690
+assert_return(() => call($12, "load8_u", [34447]), 0);
+
+// memory_copy.wast:1691
+assert_return(() => call($12, "load8_u", [34646]), 0);
+
+// memory_copy.wast:1692
+assert_return(() => call($12, "load8_u", [34845]), 0);
+
+// memory_copy.wast:1693
+assert_return(() => call($12, "load8_u", [35044]), 0);
+
+// memory_copy.wast:1694
+assert_return(() => call($12, "load8_u", [35243]), 0);
+
+// memory_copy.wast:1695
+assert_return(() => call($12, "load8_u", [35442]), 0);
+
+// memory_copy.wast:1696
+assert_return(() => call($12, "load8_u", [35641]), 0);
+
+// memory_copy.wast:1697
+assert_return(() => call($12, "load8_u", [35840]), 0);
+
+// memory_copy.wast:1698
+assert_return(() => call($12, "load8_u", [36039]), 0);
+
+// memory_copy.wast:1699
+assert_return(() => call($12, "load8_u", [36238]), 0);
+
+// memory_copy.wast:1700
+assert_return(() => call($12, "load8_u", [36437]), 0);
+
+// memory_copy.wast:1701
+assert_return(() => call($12, "load8_u", [36636]), 0);
+
+// memory_copy.wast:1702
+assert_return(() => call($12, "load8_u", [36835]), 0);
+
+// memory_copy.wast:1703
+assert_return(() => call($12, "load8_u", [37034]), 0);
+
+// memory_copy.wast:1704
+assert_return(() => call($12, "load8_u", [37233]), 0);
+
+// memory_copy.wast:1705
+assert_return(() => call($12, "load8_u", [37432]), 0);
+
+// memory_copy.wast:1706
+assert_return(() => call($12, "load8_u", [37631]), 0);
+
+// memory_copy.wast:1707
+assert_return(() => call($12, "load8_u", [37830]), 0);
+
+// memory_copy.wast:1708
+assert_return(() => call($12, "load8_u", [38029]), 0);
+
+// memory_copy.wast:1709
+assert_return(() => call($12, "load8_u", [38228]), 0);
+
+// memory_copy.wast:1710
+assert_return(() => call($12, "load8_u", [38427]), 0);
+
+// memory_copy.wast:1711
+assert_return(() => call($12, "load8_u", [38626]), 0);
+
+// memory_copy.wast:1712
+assert_return(() => call($12, "load8_u", [38825]), 0);
+
+// memory_copy.wast:1713
+assert_return(() => call($12, "load8_u", [39024]), 0);
+
+// memory_copy.wast:1714
+assert_return(() => call($12, "load8_u", [39223]), 0);
+
+// memory_copy.wast:1715
+assert_return(() => call($12, "load8_u", [39422]), 0);
+
+// memory_copy.wast:1716
+assert_return(() => call($12, "load8_u", [39621]), 0);
+
+// memory_copy.wast:1717
+assert_return(() => call($12, "load8_u", [39820]), 0);
+
+// memory_copy.wast:1718
+assert_return(() => call($12, "load8_u", [40019]), 0);
+
+// memory_copy.wast:1719
+assert_return(() => call($12, "load8_u", [40218]), 0);
+
+// memory_copy.wast:1720
+assert_return(() => call($12, "load8_u", [40417]), 0);
+
+// memory_copy.wast:1721
+assert_return(() => call($12, "load8_u", [40616]), 0);
+
+// memory_copy.wast:1722
+assert_return(() => call($12, "load8_u", [40815]), 0);
+
+// memory_copy.wast:1723
+assert_return(() => call($12, "load8_u", [41014]), 0);
+
+// memory_copy.wast:1724
+assert_return(() => call($12, "load8_u", [41213]), 0);
+
+// memory_copy.wast:1725
+assert_return(() => call($12, "load8_u", [41412]), 0);
+
+// memory_copy.wast:1726
+assert_return(() => call($12, "load8_u", [41611]), 0);
+
+// memory_copy.wast:1727
+assert_return(() => call($12, "load8_u", [41810]), 0);
+
+// memory_copy.wast:1728
+assert_return(() => call($12, "load8_u", [42009]), 0);
+
+// memory_copy.wast:1729
+assert_return(() => call($12, "load8_u", [42208]), 0);
+
+// memory_copy.wast:1730
+assert_return(() => call($12, "load8_u", [42407]), 0);
+
+// memory_copy.wast:1731
+assert_return(() => call($12, "load8_u", [42606]), 0);
+
+// memory_copy.wast:1732
+assert_return(() => call($12, "load8_u", [42805]), 0);
+
+// memory_copy.wast:1733
+assert_return(() => call($12, "load8_u", [43004]), 0);
+
+// memory_copy.wast:1734
+assert_return(() => call($12, "load8_u", [43203]), 0);
+
+// memory_copy.wast:1735
+assert_return(() => call($12, "load8_u", [43402]), 0);
+
+// memory_copy.wast:1736
+assert_return(() => call($12, "load8_u", [43601]), 0);
+
+// memory_copy.wast:1737
+assert_return(() => call($12, "load8_u", [43800]), 0);
+
+// memory_copy.wast:1738
+assert_return(() => call($12, "load8_u", [43999]), 0);
+
+// memory_copy.wast:1739
+assert_return(() => call($12, "load8_u", [44198]), 0);
+
+// memory_copy.wast:1740
+assert_return(() => call($12, "load8_u", [44397]), 0);
+
+// memory_copy.wast:1741
+assert_return(() => call($12, "load8_u", [44596]), 0);
+
+// memory_copy.wast:1742
+assert_return(() => call($12, "load8_u", [44795]), 0);
+
+// memory_copy.wast:1743
+assert_return(() => call($12, "load8_u", [44994]), 0);
+
+// memory_copy.wast:1744
+assert_return(() => call($12, "load8_u", [45193]), 0);
+
+// memory_copy.wast:1745
+assert_return(() => call($12, "load8_u", [45392]), 0);
+
+// memory_copy.wast:1746
+assert_return(() => call($12, "load8_u", [45591]), 0);
+
+// memory_copy.wast:1747
+assert_return(() => call($12, "load8_u", [45790]), 0);
+
+// memory_copy.wast:1748
+assert_return(() => call($12, "load8_u", [45989]), 0);
+
+// memory_copy.wast:1749
+assert_return(() => call($12, "load8_u", [46188]), 0);
+
+// memory_copy.wast:1750
+assert_return(() => call($12, "load8_u", [46387]), 0);
+
+// memory_copy.wast:1751
+assert_return(() => call($12, "load8_u", [46586]), 0);
+
+// memory_copy.wast:1752
+assert_return(() => call($12, "load8_u", [46785]), 0);
+
+// memory_copy.wast:1753
+assert_return(() => call($12, "load8_u", [46984]), 0);
+
+// memory_copy.wast:1754
+assert_return(() => call($12, "load8_u", [47183]), 0);
+
+// memory_copy.wast:1755
+assert_return(() => call($12, "load8_u", [47382]), 0);
+
+// memory_copy.wast:1756
+assert_return(() => call($12, "load8_u", [47581]), 0);
+
+// memory_copy.wast:1757
+assert_return(() => call($12, "load8_u", [47780]), 0);
+
+// memory_copy.wast:1758
+assert_return(() => call($12, "load8_u", [47979]), 0);
+
+// memory_copy.wast:1759
+assert_return(() => call($12, "load8_u", [48178]), 0);
+
+// memory_copy.wast:1760
+assert_return(() => call($12, "load8_u", [48377]), 0);
+
+// memory_copy.wast:1761
+assert_return(() => call($12, "load8_u", [48576]), 0);
+
+// memory_copy.wast:1762
+assert_return(() => call($12, "load8_u", [48775]), 0);
+
+// memory_copy.wast:1763
+assert_return(() => call($12, "load8_u", [48974]), 0);
+
+// memory_copy.wast:1764
+assert_return(() => call($12, "load8_u", [49173]), 0);
+
+// memory_copy.wast:1765
+assert_return(() => call($12, "load8_u", [49372]), 0);
+
+// memory_copy.wast:1766
+assert_return(() => call($12, "load8_u", [49571]), 0);
+
+// memory_copy.wast:1767
+assert_return(() => call($12, "load8_u", [49770]), 0);
+
+// memory_copy.wast:1768
+assert_return(() => call($12, "load8_u", [49969]), 0);
+
+// memory_copy.wast:1769
+assert_return(() => call($12, "load8_u", [50168]), 0);
+
+// memory_copy.wast:1770
+assert_return(() => call($12, "load8_u", [50367]), 0);
+
+// memory_copy.wast:1771
+assert_return(() => call($12, "load8_u", [50566]), 0);
+
+// memory_copy.wast:1772
+assert_return(() => call($12, "load8_u", [50765]), 0);
+
+// memory_copy.wast:1773
+assert_return(() => call($12, "load8_u", [50964]), 0);
+
+// memory_copy.wast:1774
+assert_return(() => call($12, "load8_u", [51163]), 0);
+
+// memory_copy.wast:1775
+assert_return(() => call($12, "load8_u", [51362]), 0);
+
+// memory_copy.wast:1776
+assert_return(() => call($12, "load8_u", [51561]), 0);
+
+// memory_copy.wast:1777
+assert_return(() => call($12, "load8_u", [51760]), 0);
+
+// memory_copy.wast:1778
+assert_return(() => call($12, "load8_u", [51959]), 0);
+
+// memory_copy.wast:1779
+assert_return(() => call($12, "load8_u", [52158]), 0);
+
+// memory_copy.wast:1780
+assert_return(() => call($12, "load8_u", [52357]), 0);
+
+// memory_copy.wast:1781
+assert_return(() => call($12, "load8_u", [52556]), 0);
+
+// memory_copy.wast:1782
+assert_return(() => call($12, "load8_u", [52755]), 0);
+
+// memory_copy.wast:1783
+assert_return(() => call($12, "load8_u", [52954]), 0);
+
+// memory_copy.wast:1784
+assert_return(() => call($12, "load8_u", [53153]), 0);
+
+// memory_copy.wast:1785
+assert_return(() => call($12, "load8_u", [53352]), 0);
+
+// memory_copy.wast:1786
+assert_return(() => call($12, "load8_u", [53551]), 0);
+
+// memory_copy.wast:1787
+assert_return(() => call($12, "load8_u", [53750]), 0);
+
+// memory_copy.wast:1788
+assert_return(() => call($12, "load8_u", [53949]), 0);
+
+// memory_copy.wast:1789
+assert_return(() => call($12, "load8_u", [54148]), 0);
+
+// memory_copy.wast:1790
+assert_return(() => call($12, "load8_u", [54347]), 0);
+
+// memory_copy.wast:1791
+assert_return(() => call($12, "load8_u", [54546]), 0);
+
+// memory_copy.wast:1792
+assert_return(() => call($12, "load8_u", [54745]), 0);
+
+// memory_copy.wast:1793
+assert_return(() => call($12, "load8_u", [54944]), 0);
+
+// memory_copy.wast:1794
+assert_return(() => call($12, "load8_u", [55143]), 0);
+
+// memory_copy.wast:1795
+assert_return(() => call($12, "load8_u", [55342]), 0);
+
+// memory_copy.wast:1796
+assert_return(() => call($12, "load8_u", [55541]), 0);
+
+// memory_copy.wast:1797
+assert_return(() => call($12, "load8_u", [55740]), 0);
+
+// memory_copy.wast:1798
+assert_return(() => call($12, "load8_u", [55939]), 0);
+
+// memory_copy.wast:1799
+assert_return(() => call($12, "load8_u", [56138]), 0);
+
+// memory_copy.wast:1800
+assert_return(() => call($12, "load8_u", [56337]), 0);
+
+// memory_copy.wast:1801
+assert_return(() => call($12, "load8_u", [56536]), 0);
+
+// memory_copy.wast:1802
+assert_return(() => call($12, "load8_u", [56735]), 0);
+
+// memory_copy.wast:1803
+assert_return(() => call($12, "load8_u", [56934]), 0);
+
+// memory_copy.wast:1804
+assert_return(() => call($12, "load8_u", [57133]), 0);
+
+// memory_copy.wast:1805
+assert_return(() => call($12, "load8_u", [57332]), 0);
+
+// memory_copy.wast:1806
+assert_return(() => call($12, "load8_u", [57531]), 0);
+
+// memory_copy.wast:1807
+assert_return(() => call($12, "load8_u", [57730]), 0);
+
+// memory_copy.wast:1808
+assert_return(() => call($12, "load8_u", [57929]), 0);
+
+// memory_copy.wast:1809
+assert_return(() => call($12, "load8_u", [58128]), 0);
+
+// memory_copy.wast:1810
+assert_return(() => call($12, "load8_u", [58327]), 0);
+
+// memory_copy.wast:1811
+assert_return(() => call($12, "load8_u", [58526]), 0);
+
+// memory_copy.wast:1812
+assert_return(() => call($12, "load8_u", [58725]), 0);
+
+// memory_copy.wast:1813
+assert_return(() => call($12, "load8_u", [58924]), 0);
+
+// memory_copy.wast:1814
+assert_return(() => call($12, "load8_u", [59123]), 0);
+
+// memory_copy.wast:1815
+assert_return(() => call($12, "load8_u", [59322]), 0);
+
+// memory_copy.wast:1816
+assert_return(() => call($12, "load8_u", [59521]), 0);
+
+// memory_copy.wast:1817
+assert_return(() => call($12, "load8_u", [59720]), 0);
+
+// memory_copy.wast:1818
+assert_return(() => call($12, "load8_u", [59919]), 0);
+
+// memory_copy.wast:1819
+assert_return(() => call($12, "load8_u", [60118]), 0);
+
+// memory_copy.wast:1820
+assert_return(() => call($12, "load8_u", [60317]), 0);
+
+// memory_copy.wast:1821
+assert_return(() => call($12, "load8_u", [60516]), 0);
+
+// memory_copy.wast:1822
+assert_return(() => call($12, "load8_u", [60715]), 0);
+
+// memory_copy.wast:1823
+assert_return(() => call($12, "load8_u", [60914]), 0);
+
+// memory_copy.wast:1824
+assert_return(() => call($12, "load8_u", [61113]), 0);
+
+// memory_copy.wast:1825
+assert_return(() => call($12, "load8_u", [61312]), 0);
+
+// memory_copy.wast:1826
+assert_return(() => call($12, "load8_u", [61511]), 0);
+
+// memory_copy.wast:1827
+assert_return(() => call($12, "load8_u", [61710]), 0);
+
+// memory_copy.wast:1828
+assert_return(() => call($12, "load8_u", [61909]), 0);
+
+// memory_copy.wast:1829
+assert_return(() => call($12, "load8_u", [62108]), 0);
+
+// memory_copy.wast:1830
+assert_return(() => call($12, "load8_u", [62307]), 0);
+
+// memory_copy.wast:1831
+assert_return(() => call($12, "load8_u", [62506]), 0);
+
+// memory_copy.wast:1832
+assert_return(() => call($12, "load8_u", [62705]), 0);
+
+// memory_copy.wast:1833
+assert_return(() => call($12, "load8_u", [62904]), 0);
+
+// memory_copy.wast:1834
+assert_return(() => call($12, "load8_u", [63103]), 0);
+
+// memory_copy.wast:1835
+assert_return(() => call($12, "load8_u", [63302]), 0);
+
+// memory_copy.wast:1836
+assert_return(() => call($12, "load8_u", [63501]), 0);
+
+// memory_copy.wast:1837
+assert_return(() => call($12, "load8_u", [63700]), 0);
+
+// memory_copy.wast:1838
+assert_return(() => call($12, "load8_u", [63899]), 0);
+
+// memory_copy.wast:1839
+assert_return(() => call($12, "load8_u", [64098]), 0);
+
+// memory_copy.wast:1840
+assert_return(() => call($12, "load8_u", [64297]), 0);
+
+// memory_copy.wast:1841
+assert_return(() => call($12, "load8_u", [64496]), 0);
+
+// memory_copy.wast:1842
+assert_return(() => call($12, "load8_u", [64695]), 0);
+
+// memory_copy.wast:1843
+assert_return(() => call($12, "load8_u", [64894]), 0);
+
+// memory_copy.wast:1844
+assert_return(() => call($12, "load8_u", [65093]), 0);
+
+// memory_copy.wast:1845
+assert_return(() => call($12, "load8_u", [65292]), 0);
+
+// memory_copy.wast:1846
+assert_return(() => call($12, "load8_u", [65491]), 0);
+
+// memory_copy.wast:1847
+assert_return(() => call($12, "load8_u", [65515]), 0);
+
+// memory_copy.wast:1848
+assert_return(() => call($12, "load8_u", [65516]), 1);
+
+// memory_copy.wast:1849
+assert_return(() => call($12, "load8_u", [65517]), 2);
+
+// memory_copy.wast:1850
+assert_return(() => call($12, "load8_u", [65518]), 3);
+
+// memory_copy.wast:1851
+assert_return(() => call($12, "load8_u", [65519]), 4);
+
+// memory_copy.wast:1852
+assert_return(() => call($12, "load8_u", [65520]), 5);
+
+// memory_copy.wast:1853
+assert_return(() => call($12, "load8_u", [65521]), 6);
+
+// memory_copy.wast:1854
+assert_return(() => call($12, "load8_u", [65522]), 7);
+
+// memory_copy.wast:1855
+assert_return(() => call($12, "load8_u", [65523]), 8);
+
+// memory_copy.wast:1856
+assert_return(() => call($12, "load8_u", [65524]), 9);
+
+// memory_copy.wast:1857
+assert_return(() => call($12, "load8_u", [65525]), 10);
+
+// memory_copy.wast:1858
+assert_return(() => call($12, "load8_u", [65526]), 11);
+
+// memory_copy.wast:1859
+assert_return(() => call($12, "load8_u", [65527]), 12);
+
+// memory_copy.wast:1860
+assert_return(() => call($12, "load8_u", [65528]), 13);
+
+// memory_copy.wast:1861
+assert_return(() => call($12, "load8_u", [65529]), 14);
+
+// memory_copy.wast:1862
+assert_return(() => call($12, "load8_u", [65530]), 15);
+
+// memory_copy.wast:1863
+assert_return(() => call($12, "load8_u", [65531]), 16);
+
+// memory_copy.wast:1864
+assert_return(() => call($12, "load8_u", [65532]), 17);
+
+// memory_copy.wast:1865
+assert_return(() => call($12, "load8_u", [65533]), 18);
+
+// memory_copy.wast:1866
+assert_return(() => call($12, "load8_u", [65534]), 19);
+
+// memory_copy.wast:1867
+assert_return(() => call($12, "load8_u", [65535]), 20);
+
+// memory_copy.wast:1869
+let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xce\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
+
+// memory_copy.wast:1877
+assert_trap(() => call($13, "run", [65516, 65486, 40]));
+
+// memory_copy.wast:1880
+assert_return(() => call($13, "load8_u", [198]), 0);
+
+// memory_copy.wast:1881
+assert_return(() => call($13, "load8_u", [397]), 0);
+
+// memory_copy.wast:1882
+assert_return(() => call($13, "load8_u", [596]), 0);
+
+// memory_copy.wast:1883
+assert_return(() => call($13, "load8_u", [795]), 0);
+
+// memory_copy.wast:1884
+assert_return(() => call($13, "load8_u", [994]), 0);
+
+// memory_copy.wast:1885
+assert_return(() => call($13, "load8_u", [1193]), 0);
+
+// memory_copy.wast:1886
+assert_return(() => call($13, "load8_u", [1392]), 0);
+
+// memory_copy.wast:1887
+assert_return(() => call($13, "load8_u", [1591]), 0);
+
+// memory_copy.wast:1888
+assert_return(() => call($13, "load8_u", [1790]), 0);
+
+// memory_copy.wast:1889
+assert_return(() => call($13, "load8_u", [1989]), 0);
+
+// memory_copy.wast:1890
+assert_return(() => call($13, "load8_u", [2188]), 0);
+
+// memory_copy.wast:1891
+assert_return(() => call($13, "load8_u", [2387]), 0);
+
+// memory_copy.wast:1892
+assert_return(() => call($13, "load8_u", [2586]), 0);
+
+// memory_copy.wast:1893
+assert_return(() => call($13, "load8_u", [2785]), 0);
+
+// memory_copy.wast:1894
+assert_return(() => call($13, "load8_u", [2984]), 0);
+
+// memory_copy.wast:1895
+assert_return(() => call($13, "load8_u", [3183]), 0);
+
+// memory_copy.wast:1896
+assert_return(() => call($13, "load8_u", [3382]), 0);
+
+// memory_copy.wast:1897
+assert_return(() => call($13, "load8_u", [3581]), 0);
+
+// memory_copy.wast:1898
+assert_return(() => call($13, "load8_u", [3780]), 0);
+
+// memory_copy.wast:1899
+assert_return(() => call($13, "load8_u", [3979]), 0);
+
+// memory_copy.wast:1900
+assert_return(() => call($13, "load8_u", [4178]), 0);
+
+// memory_copy.wast:1901
+assert_return(() => call($13, "load8_u", [4377]), 0);
+
+// memory_copy.wast:1902
+assert_return(() => call($13, "load8_u", [4576]), 0);
+
+// memory_copy.wast:1903
+assert_return(() => call($13, "load8_u", [4775]), 0);
+
+// memory_copy.wast:1904
+assert_return(() => call($13, "load8_u", [4974]), 0);
+
+// memory_copy.wast:1905
+assert_return(() => call($13, "load8_u", [5173]), 0);
+
+// memory_copy.wast:1906
+assert_return(() => call($13, "load8_u", [5372]), 0);
+
+// memory_copy.wast:1907
+assert_return(() => call($13, "load8_u", [5571]), 0);
+
+// memory_copy.wast:1908
+assert_return(() => call($13, "load8_u", [5770]), 0);
+
+// memory_copy.wast:1909
+assert_return(() => call($13, "load8_u", [5969]), 0);
+
+// memory_copy.wast:1910
+assert_return(() => call($13, "load8_u", [6168]), 0);
+
+// memory_copy.wast:1911
+assert_return(() => call($13, "load8_u", [6367]), 0);
+
+// memory_copy.wast:1912
+assert_return(() => call($13, "load8_u", [6566]), 0);
+
+// memory_copy.wast:1913
+assert_return(() => call($13, "load8_u", [6765]), 0);
+
+// memory_copy.wast:1914
+assert_return(() => call($13, "load8_u", [6964]), 0);
+
+// memory_copy.wast:1915
+assert_return(() => call($13, "load8_u", [7163]), 0);
+
+// memory_copy.wast:1916
+assert_return(() => call($13, "load8_u", [7362]), 0);
+
+// memory_copy.wast:1917
+assert_return(() => call($13, "load8_u", [7561]), 0);
+
+// memory_copy.wast:1918
+assert_return(() => call($13, "load8_u", [7760]), 0);
+
+// memory_copy.wast:1919
+assert_return(() => call($13, "load8_u", [7959]), 0);
+
+// memory_copy.wast:1920
+assert_return(() => call($13, "load8_u", [8158]), 0);
+
+// memory_copy.wast:1921
+assert_return(() => call($13, "load8_u", [8357]), 0);
+
+// memory_copy.wast:1922
+assert_return(() => call($13, "load8_u", [8556]), 0);
+
+// memory_copy.wast:1923
+assert_return(() => call($13, "load8_u", [8755]), 0);
+
+// memory_copy.wast:1924
+assert_return(() => call($13, "load8_u", [8954]), 0);
+
+// memory_copy.wast:1925
+assert_return(() => call($13, "load8_u", [9153]), 0);
+
+// memory_copy.wast:1926
+assert_return(() => call($13, "load8_u", [9352]), 0);
+
+// memory_copy.wast:1927
+assert_return(() => call($13, "load8_u", [9551]), 0);
+
+// memory_copy.wast:1928
+assert_return(() => call($13, "load8_u", [9750]), 0);
+
+// memory_copy.wast:1929
+assert_return(() => call($13, "load8_u", [9949]), 0);
+
+// memory_copy.wast:1930
+assert_return(() => call($13, "load8_u", [10148]), 0);
+
+// memory_copy.wast:1931
+assert_return(() => call($13, "load8_u", [10347]), 0);
+
+// memory_copy.wast:1932
+assert_return(() => call($13, "load8_u", [10546]), 0);
+
+// memory_copy.wast:1933
+assert_return(() => call($13, "load8_u", [10745]), 0);
+
+// memory_copy.wast:1934
+assert_return(() => call($13, "load8_u", [10944]), 0);
+
+// memory_copy.wast:1935
+assert_return(() => call($13, "load8_u", [11143]), 0);
+
+// memory_copy.wast:1936
+assert_return(() => call($13, "load8_u", [11342]), 0);
+
+// memory_copy.wast:1937
+assert_return(() => call($13, "load8_u", [11541]), 0);
+
+// memory_copy.wast:1938
+assert_return(() => call($13, "load8_u", [11740]), 0);
+
+// memory_copy.wast:1939
+assert_return(() => call($13, "load8_u", [11939]), 0);
+
+// memory_copy.wast:1940
+assert_return(() => call($13, "load8_u", [12138]), 0);
+
+// memory_copy.wast:1941
+assert_return(() => call($13, "load8_u", [12337]), 0);
+
+// memory_copy.wast:1942
+assert_return(() => call($13, "load8_u", [12536]), 0);
+
+// memory_copy.wast:1943
+assert_return(() => call($13, "load8_u", [12735]), 0);
+
+// memory_copy.wast:1944
+assert_return(() => call($13, "load8_u", [12934]), 0);
+
+// memory_copy.wast:1945
+assert_return(() => call($13, "load8_u", [13133]), 0);
+
+// memory_copy.wast:1946
+assert_return(() => call($13, "load8_u", [13332]), 0);
+
+// memory_copy.wast:1947
+assert_return(() => call($13, "load8_u", [13531]), 0);
+
+// memory_copy.wast:1948
+assert_return(() => call($13, "load8_u", [13730]), 0);
+
+// memory_copy.wast:1949
+assert_return(() => call($13, "load8_u", [13929]), 0);
+
+// memory_copy.wast:1950
+assert_return(() => call($13, "load8_u", [14128]), 0);
+
+// memory_copy.wast:1951
+assert_return(() => call($13, "load8_u", [14327]), 0);
+
+// memory_copy.wast:1952
+assert_return(() => call($13, "load8_u", [14526]), 0);
+
+// memory_copy.wast:1953
+assert_return(() => call($13, "load8_u", [14725]), 0);
+
+// memory_copy.wast:1954
+assert_return(() => call($13, "load8_u", [14924]), 0);
+
+// memory_copy.wast:1955
+assert_return(() => call($13, "load8_u", [15123]), 0);
+
+// memory_copy.wast:1956
+assert_return(() => call($13, "load8_u", [15322]), 0);
+
+// memory_copy.wast:1957
+assert_return(() => call($13, "load8_u", [15521]), 0);
+
+// memory_copy.wast:1958
+assert_return(() => call($13, "load8_u", [15720]), 0);
+
+// memory_copy.wast:1959
+assert_return(() => call($13, "load8_u", [15919]), 0);
+
+// memory_copy.wast:1960
+assert_return(() => call($13, "load8_u", [16118]), 0);
+
+// memory_copy.wast:1961
+assert_return(() => call($13, "load8_u", [16317]), 0);
+
+// memory_copy.wast:1962
+assert_return(() => call($13, "load8_u", [16516]), 0);
+
+// memory_copy.wast:1963
+assert_return(() => call($13, "load8_u", [16715]), 0);
+
+// memory_copy.wast:1964
+assert_return(() => call($13, "load8_u", [16914]), 0);
+
+// memory_copy.wast:1965
+assert_return(() => call($13, "load8_u", [17113]), 0);
+
+// memory_copy.wast:1966
+assert_return(() => call($13, "load8_u", [17312]), 0);
+
+// memory_copy.wast:1967
+assert_return(() => call($13, "load8_u", [17511]), 0);
+
+// memory_copy.wast:1968
+assert_return(() => call($13, "load8_u", [17710]), 0);
+
+// memory_copy.wast:1969
+assert_return(() => call($13, "load8_u", [17909]), 0);
+
+// memory_copy.wast:1970
+assert_return(() => call($13, "load8_u", [18108]), 0);
+
+// memory_copy.wast:1971
+assert_return(() => call($13, "load8_u", [18307]), 0);
+
+// memory_copy.wast:1972
+assert_return(() => call($13, "load8_u", [18506]), 0);
+
+// memory_copy.wast:1973
+assert_return(() => call($13, "load8_u", [18705]), 0);
+
+// memory_copy.wast:1974
+assert_return(() => call($13, "load8_u", [18904]), 0);
+
+// memory_copy.wast:1975
+assert_return(() => call($13, "load8_u", [19103]), 0);
+
+// memory_copy.wast:1976
+assert_return(() => call($13, "load8_u", [19302]), 0);
+
+// memory_copy.wast:1977
+assert_return(() => call($13, "load8_u", [19501]), 0);
+
+// memory_copy.wast:1978
+assert_return(() => call($13, "load8_u", [19700]), 0);
+
+// memory_copy.wast:1979
+assert_return(() => call($13, "load8_u", [19899]), 0);
+
+// memory_copy.wast:1980
+assert_return(() => call($13, "load8_u", [20098]), 0);
+
+// memory_copy.wast:1981
+assert_return(() => call($13, "load8_u", [20297]), 0);
+
+// memory_copy.wast:1982
+assert_return(() => call($13, "load8_u", [20496]), 0);
+
+// memory_copy.wast:1983
+assert_return(() => call($13, "load8_u", [20695]), 0);
+
+// memory_copy.wast:1984
+assert_return(() => call($13, "load8_u", [20894]), 0);
+
+// memory_copy.wast:1985
+assert_return(() => call($13, "load8_u", [21093]), 0);
+
+// memory_copy.wast:1986
+assert_return(() => call($13, "load8_u", [21292]), 0);
+
+// memory_copy.wast:1987
+assert_return(() => call($13, "load8_u", [21491]), 0);
+
+// memory_copy.wast:1988
+assert_return(() => call($13, "load8_u", [21690]), 0);
+
+// memory_copy.wast:1989
+assert_return(() => call($13, "load8_u", [21889]), 0);
+
+// memory_copy.wast:1990
+assert_return(() => call($13, "load8_u", [22088]), 0);
+
+// memory_copy.wast:1991
+assert_return(() => call($13, "load8_u", [22287]), 0);
+
+// memory_copy.wast:1992
+assert_return(() => call($13, "load8_u", [22486]), 0);
+
+// memory_copy.wast:1993
+assert_return(() => call($13, "load8_u", [22685]), 0);
+
+// memory_copy.wast:1994
+assert_return(() => call($13, "load8_u", [22884]), 0);
+
+// memory_copy.wast:1995
+assert_return(() => call($13, "load8_u", [23083]), 0);
+
+// memory_copy.wast:1996
+assert_return(() => call($13, "load8_u", [23282]), 0);
+
+// memory_copy.wast:1997
+assert_return(() => call($13, "load8_u", [23481]), 0);
+
+// memory_copy.wast:1998
+assert_return(() => call($13, "load8_u", [23680]), 0);
+
+// memory_copy.wast:1999
+assert_return(() => call($13, "load8_u", [23879]), 0);
+
+// memory_copy.wast:2000
+assert_return(() => call($13, "load8_u", [24078]), 0);
+
+// memory_copy.wast:2001
+assert_return(() => call($13, "load8_u", [24277]), 0);
+
+// memory_copy.wast:2002
+assert_return(() => call($13, "load8_u", [24476]), 0);
+
+// memory_copy.wast:2003
+assert_return(() => call($13, "load8_u", [24675]), 0);
+
+// memory_copy.wast:2004
+assert_return(() => call($13, "load8_u", [24874]), 0);
+
+// memory_copy.wast:2005
+assert_return(() => call($13, "load8_u", [25073]), 0);
+
+// memory_copy.wast:2006
+assert_return(() => call($13, "load8_u", [25272]), 0);
+
+// memory_copy.wast:2007
+assert_return(() => call($13, "load8_u", [25471]), 0);
+
+// memory_copy.wast:2008
+assert_return(() => call($13, "load8_u", [25670]), 0);
+
+// memory_copy.wast:2009
+assert_return(() => call($13, "load8_u", [25869]), 0);
+
+// memory_copy.wast:2010
+assert_return(() => call($13, "load8_u", [26068]), 0);
+
+// memory_copy.wast:2011
+assert_return(() => call($13, "load8_u", [26267]), 0);
+
+// memory_copy.wast:2012
+assert_return(() => call($13, "load8_u", [26466]), 0);
+
+// memory_copy.wast:2013
+assert_return(() => call($13, "load8_u", [26665]), 0);
+
+// memory_copy.wast:2014
+assert_return(() => call($13, "load8_u", [26864]), 0);
+
+// memory_copy.wast:2015
+assert_return(() => call($13, "load8_u", [27063]), 0);
+
+// memory_copy.wast:2016
+assert_return(() => call($13, "load8_u", [27262]), 0);
+
+// memory_copy.wast:2017
+assert_return(() => call($13, "load8_u", [27461]), 0);
+
+// memory_copy.wast:2018
+assert_return(() => call($13, "load8_u", [27660]), 0);
+
+// memory_copy.wast:2019
+assert_return(() => call($13, "load8_u", [27859]), 0);
+
+// memory_copy.wast:2020
+assert_return(() => call($13, "load8_u", [28058]), 0);
+
+// memory_copy.wast:2021
+assert_return(() => call($13, "load8_u", [28257]), 0);
+
+// memory_copy.wast:2022
+assert_return(() => call($13, "load8_u", [28456]), 0);
+
+// memory_copy.wast:2023
+assert_return(() => call($13, "load8_u", [28655]), 0);
+
+// memory_copy.wast:2024
+assert_return(() => call($13, "load8_u", [28854]), 0);
+
+// memory_copy.wast:2025
+assert_return(() => call($13, "load8_u", [29053]), 0);
+
+// memory_copy.wast:2026
+assert_return(() => call($13, "load8_u", [29252]), 0);
+
+// memory_copy.wast:2027
+assert_return(() => call($13, "load8_u", [29451]), 0);
+
+// memory_copy.wast:2028
+assert_return(() => call($13, "load8_u", [29650]), 0);
+
+// memory_copy.wast:2029
+assert_return(() => call($13, "load8_u", [29849]), 0);
+
+// memory_copy.wast:2030
+assert_return(() => call($13, "load8_u", [30048]), 0);
+
+// memory_copy.wast:2031
+assert_return(() => call($13, "load8_u", [30247]), 0);
+
+// memory_copy.wast:2032
+assert_return(() => call($13, "load8_u", [30446]), 0);
+
+// memory_copy.wast:2033
+assert_return(() => call($13, "load8_u", [30645]), 0);
+
+// memory_copy.wast:2034
+assert_return(() => call($13, "load8_u", [30844]), 0);
+
+// memory_copy.wast:2035
+assert_return(() => call($13, "load8_u", [31043]), 0);
+
+// memory_copy.wast:2036
+assert_return(() => call($13, "load8_u", [31242]), 0);
+
+// memory_copy.wast:2037
+assert_return(() => call($13, "load8_u", [31441]), 0);
+
+// memory_copy.wast:2038
+assert_return(() => call($13, "load8_u", [31640]), 0);
+
+// memory_copy.wast:2039
+assert_return(() => call($13, "load8_u", [31839]), 0);
+
+// memory_copy.wast:2040
+assert_return(() => call($13, "load8_u", [32038]), 0);
+
+// memory_copy.wast:2041
+assert_return(() => call($13, "load8_u", [32237]), 0);
+
+// memory_copy.wast:2042
+assert_return(() => call($13, "load8_u", [32436]), 0);
+
+// memory_copy.wast:2043
+assert_return(() => call($13, "load8_u", [32635]), 0);
+
+// memory_copy.wast:2044
+assert_return(() => call($13, "load8_u", [32834]), 0);
+
+// memory_copy.wast:2045
+assert_return(() => call($13, "load8_u", [33033]), 0);
+
+// memory_copy.wast:2046
+assert_return(() => call($13, "load8_u", [33232]), 0);
+
+// memory_copy.wast:2047
+assert_return(() => call($13, "load8_u", [33431]), 0);
+
+// memory_copy.wast:2048
+assert_return(() => call($13, "load8_u", [33630]), 0);
+
+// memory_copy.wast:2049
+assert_return(() => call($13, "load8_u", [33829]), 0);
+
+// memory_copy.wast:2050
+assert_return(() => call($13, "load8_u", [34028]), 0);
+
+// memory_copy.wast:2051
+assert_return(() => call($13, "load8_u", [34227]), 0);
+
+// memory_copy.wast:2052
+assert_return(() => call($13, "load8_u", [34426]), 0);
+
+// memory_copy.wast:2053
+assert_return(() => call($13, "load8_u", [34625]), 0);
+
+// memory_copy.wast:2054
+assert_return(() => call($13, "load8_u", [34824]), 0);
+
+// memory_copy.wast:2055
+assert_return(() => call($13, "load8_u", [35023]), 0);
+
+// memory_copy.wast:2056
+assert_return(() => call($13, "load8_u", [35222]), 0);
+
+// memory_copy.wast:2057
+assert_return(() => call($13, "load8_u", [35421]), 0);
+
+// memory_copy.wast:2058
+assert_return(() => call($13, "load8_u", [35620]), 0);
+
+// memory_copy.wast:2059
+assert_return(() => call($13, "load8_u", [35819]), 0);
+
+// memory_copy.wast:2060
+assert_return(() => call($13, "load8_u", [36018]), 0);
+
+// memory_copy.wast:2061
+assert_return(() => call($13, "load8_u", [36217]), 0);
+
+// memory_copy.wast:2062
+assert_return(() => call($13, "load8_u", [36416]), 0);
+
+// memory_copy.wast:2063
+assert_return(() => call($13, "load8_u", [36615]), 0);
+
+// memory_copy.wast:2064
+assert_return(() => call($13, "load8_u", [36814]), 0);
+
+// memory_copy.wast:2065
+assert_return(() => call($13, "load8_u", [37013]), 0);
+
+// memory_copy.wast:2066
+assert_return(() => call($13, "load8_u", [37212]), 0);
+
+// memory_copy.wast:2067
+assert_return(() => call($13, "load8_u", [37411]), 0);
+
+// memory_copy.wast:2068
+assert_return(() => call($13, "load8_u", [37610]), 0);
+
+// memory_copy.wast:2069
+assert_return(() => call($13, "load8_u", [37809]), 0);
+
+// memory_copy.wast:2070
+assert_return(() => call($13, "load8_u", [38008]), 0);
+
+// memory_copy.wast:2071
+assert_return(() => call($13, "load8_u", [38207]), 0);
+
+// memory_copy.wast:2072
+assert_return(() => call($13, "load8_u", [38406]), 0);
+
+// memory_copy.wast:2073
+assert_return(() => call($13, "load8_u", [38605]), 0);
+
+// memory_copy.wast:2074
+assert_return(() => call($13, "load8_u", [38804]), 0);
+
+// memory_copy.wast:2075
+assert_return(() => call($13, "load8_u", [39003]), 0);
+
+// memory_copy.wast:2076
+assert_return(() => call($13, "load8_u", [39202]), 0);
+
+// memory_copy.wast:2077
+assert_return(() => call($13, "load8_u", [39401]), 0);
+
+// memory_copy.wast:2078
+assert_return(() => call($13, "load8_u", [39600]), 0);
+
+// memory_copy.wast:2079
+assert_return(() => call($13, "load8_u", [39799]), 0);
+
+// memory_copy.wast:2080
+assert_return(() => call($13, "load8_u", [39998]), 0);
+
+// memory_copy.wast:2081
+assert_return(() => call($13, "load8_u", [40197]), 0);
+
+// memory_copy.wast:2082
+assert_return(() => call($13, "load8_u", [40396]), 0);
+
+// memory_copy.wast:2083
+assert_return(() => call($13, "load8_u", [40595]), 0);
+
+// memory_copy.wast:2084
+assert_return(() => call($13, "load8_u", [40794]), 0);
+
+// memory_copy.wast:2085
+assert_return(() => call($13, "load8_u", [40993]), 0);
+
+// memory_copy.wast:2086
+assert_return(() => call($13, "load8_u", [41192]), 0);
+
+// memory_copy.wast:2087
+assert_return(() => call($13, "load8_u", [41391]), 0);
+
+// memory_copy.wast:2088
+assert_return(() => call($13, "load8_u", [41590]), 0);
+
+// memory_copy.wast:2089
+assert_return(() => call($13, "load8_u", [41789]), 0);
+
+// memory_copy.wast:2090
+assert_return(() => call($13, "load8_u", [41988]), 0);
+
+// memory_copy.wast:2091
+assert_return(() => call($13, "load8_u", [42187]), 0);
+
+// memory_copy.wast:2092
+assert_return(() => call($13, "load8_u", [42386]), 0);
+
+// memory_copy.wast:2093
+assert_return(() => call($13, "load8_u", [42585]), 0);
+
+// memory_copy.wast:2094
+assert_return(() => call($13, "load8_u", [42784]), 0);
+
+// memory_copy.wast:2095
+assert_return(() => call($13, "load8_u", [42983]), 0);
+
+// memory_copy.wast:2096
+assert_return(() => call($13, "load8_u", [43182]), 0);
+
+// memory_copy.wast:2097
+assert_return(() => call($13, "load8_u", [43381]), 0);
+
+// memory_copy.wast:2098
+assert_return(() => call($13, "load8_u", [43580]), 0);
+
+// memory_copy.wast:2099
+assert_return(() => call($13, "load8_u", [43779]), 0);
+
+// memory_copy.wast:2100
+assert_return(() => call($13, "load8_u", [43978]), 0);
+
+// memory_copy.wast:2101
+assert_return(() => call($13, "load8_u", [44177]), 0);
+
+// memory_copy.wast:2102
+assert_return(() => call($13, "load8_u", [44376]), 0);
+
+// memory_copy.wast:2103
+assert_return(() => call($13, "load8_u", [44575]), 0);
+
+// memory_copy.wast:2104
+assert_return(() => call($13, "load8_u", [44774]), 0);
+
+// memory_copy.wast:2105
+assert_return(() => call($13, "load8_u", [44973]), 0);
+
+// memory_copy.wast:2106
+assert_return(() => call($13, "load8_u", [45172]), 0);
+
+// memory_copy.wast:2107
+assert_return(() => call($13, "load8_u", [45371]), 0);
+
+// memory_copy.wast:2108
+assert_return(() => call($13, "load8_u", [45570]), 0);
+
+// memory_copy.wast:2109
+assert_return(() => call($13, "load8_u", [45769]), 0);
+
+// memory_copy.wast:2110
+assert_return(() => call($13, "load8_u", [45968]), 0);
+
+// memory_copy.wast:2111
+assert_return(() => call($13, "load8_u", [46167]), 0);
+
+// memory_copy.wast:2112
+assert_return(() => call($13, "load8_u", [46366]), 0);
+
+// memory_copy.wast:2113
+assert_return(() => call($13, "load8_u", [46565]), 0);
+
+// memory_copy.wast:2114
+assert_return(() => call($13, "load8_u", [46764]), 0);
+
+// memory_copy.wast:2115
+assert_return(() => call($13, "load8_u", [46963]), 0);
+
+// memory_copy.wast:2116
+assert_return(() => call($13, "load8_u", [47162]), 0);
+
+// memory_copy.wast:2117
+assert_return(() => call($13, "load8_u", [47361]), 0);
+
+// memory_copy.wast:2118
+assert_return(() => call($13, "load8_u", [47560]), 0);
+
+// memory_copy.wast:2119
+assert_return(() => call($13, "load8_u", [47759]), 0);
+
+// memory_copy.wast:2120
+assert_return(() => call($13, "load8_u", [47958]), 0);
+
+// memory_copy.wast:2121
+assert_return(() => call($13, "load8_u", [48157]), 0);
+
+// memory_copy.wast:2122
+assert_return(() => call($13, "load8_u", [48356]), 0);
+
+// memory_copy.wast:2123
+assert_return(() => call($13, "load8_u", [48555]), 0);
+
+// memory_copy.wast:2124
+assert_return(() => call($13, "load8_u", [48754]), 0);
+
+// memory_copy.wast:2125
+assert_return(() => call($13, "load8_u", [48953]), 0);
+
+// memory_copy.wast:2126
+assert_return(() => call($13, "load8_u", [49152]), 0);
+
+// memory_copy.wast:2127
+assert_return(() => call($13, "load8_u", [49351]), 0);
+
+// memory_copy.wast:2128
+assert_return(() => call($13, "load8_u", [49550]), 0);
+
+// memory_copy.wast:2129
+assert_return(() => call($13, "load8_u", [49749]), 0);
+
+// memory_copy.wast:2130
+assert_return(() => call($13, "load8_u", [49948]), 0);
+
+// memory_copy.wast:2131
+assert_return(() => call($13, "load8_u", [50147]), 0);
+
+// memory_copy.wast:2132
+assert_return(() => call($13, "load8_u", [50346]), 0);
+
+// memory_copy.wast:2133
+assert_return(() => call($13, "load8_u", [50545]), 0);
+
+// memory_copy.wast:2134
+assert_return(() => call($13, "load8_u", [50744]), 0);
+
+// memory_copy.wast:2135
+assert_return(() => call($13, "load8_u", [50943]), 0);
+
+// memory_copy.wast:2136
+assert_return(() => call($13, "load8_u", [51142]), 0);
+
+// memory_copy.wast:2137
+assert_return(() => call($13, "load8_u", [51341]), 0);
+
+// memory_copy.wast:2138
+assert_return(() => call($13, "load8_u", [51540]), 0);
+
+// memory_copy.wast:2139
+assert_return(() => call($13, "load8_u", [51739]), 0);
+
+// memory_copy.wast:2140
+assert_return(() => call($13, "load8_u", [51938]), 0);
+
+// memory_copy.wast:2141
+assert_return(() => call($13, "load8_u", [52137]), 0);
+
+// memory_copy.wast:2142
+assert_return(() => call($13, "load8_u", [52336]), 0);
+
+// memory_copy.wast:2143
+assert_return(() => call($13, "load8_u", [52535]), 0);
+
+// memory_copy.wast:2144
+assert_return(() => call($13, "load8_u", [52734]), 0);
+
+// memory_copy.wast:2145
+assert_return(() => call($13, "load8_u", [52933]), 0);
+
+// memory_copy.wast:2146
+assert_return(() => call($13, "load8_u", [53132]), 0);
+
+// memory_copy.wast:2147
+assert_return(() => call($13, "load8_u", [53331]), 0);
+
+// memory_copy.wast:2148
+assert_return(() => call($13, "load8_u", [53530]), 0);
+
+// memory_copy.wast:2149
+assert_return(() => call($13, "load8_u", [53729]), 0);
+
+// memory_copy.wast:2150
+assert_return(() => call($13, "load8_u", [53928]), 0);
+
+// memory_copy.wast:2151
+assert_return(() => call($13, "load8_u", [54127]), 0);
+
+// memory_copy.wast:2152
+assert_return(() => call($13, "load8_u", [54326]), 0);
+
+// memory_copy.wast:2153
+assert_return(() => call($13, "load8_u", [54525]), 0);
+
+// memory_copy.wast:2154
+assert_return(() => call($13, "load8_u", [54724]), 0);
+
+// memory_copy.wast:2155
+assert_return(() => call($13, "load8_u", [54923]), 0);
+
+// memory_copy.wast:2156
+assert_return(() => call($13, "load8_u", [55122]), 0);
+
+// memory_copy.wast:2157
+assert_return(() => call($13, "load8_u", [55321]), 0);
+
+// memory_copy.wast:2158
+assert_return(() => call($13, "load8_u", [55520]), 0);
+
+// memory_copy.wast:2159
+assert_return(() => call($13, "load8_u", [55719]), 0);
+
+// memory_copy.wast:2160
+assert_return(() => call($13, "load8_u", [55918]), 0);
+
+// memory_copy.wast:2161
+assert_return(() => call($13, "load8_u", [56117]), 0);
+
+// memory_copy.wast:2162
+assert_return(() => call($13, "load8_u", [56316]), 0);
+
+// memory_copy.wast:2163
+assert_return(() => call($13, "load8_u", [56515]), 0);
+
+// memory_copy.wast:2164
+assert_return(() => call($13, "load8_u", [56714]), 0);
+
+// memory_copy.wast:2165
+assert_return(() => call($13, "load8_u", [56913]), 0);
+
+// memory_copy.wast:2166
+assert_return(() => call($13, "load8_u", [57112]), 0);
+
+// memory_copy.wast:2167
+assert_return(() => call($13, "load8_u", [57311]), 0);
+
+// memory_copy.wast:2168
+assert_return(() => call($13, "load8_u", [57510]), 0);
+
+// memory_copy.wast:2169
+assert_return(() => call($13, "load8_u", [57709]), 0);
+
+// memory_copy.wast:2170
+assert_return(() => call($13, "load8_u", [57908]), 0);
+
+// memory_copy.wast:2171
+assert_return(() => call($13, "load8_u", [58107]), 0);
+
+// memory_copy.wast:2172
+assert_return(() => call($13, "load8_u", [58306]), 0);
+
+// memory_copy.wast:2173
+assert_return(() => call($13, "load8_u", [58505]), 0);
+
+// memory_copy.wast:2174
+assert_return(() => call($13, "load8_u", [58704]), 0);
+
+// memory_copy.wast:2175
+assert_return(() => call($13, "load8_u", [58903]), 0);
+
+// memory_copy.wast:2176
+assert_return(() => call($13, "load8_u", [59102]), 0);
+
+// memory_copy.wast:2177
+assert_return(() => call($13, "load8_u", [59301]), 0);
+
+// memory_copy.wast:2178
+assert_return(() => call($13, "load8_u", [59500]), 0);
+
+// memory_copy.wast:2179
+assert_return(() => call($13, "load8_u", [59699]), 0);
+
+// memory_copy.wast:2180
+assert_return(() => call($13, "load8_u", [59898]), 0);
+
+// memory_copy.wast:2181
+assert_return(() => call($13, "load8_u", [60097]), 0);
+
+// memory_copy.wast:2182
+assert_return(() => call($13, "load8_u", [60296]), 0);
+
+// memory_copy.wast:2183
+assert_return(() => call($13, "load8_u", [60495]), 0);
+
+// memory_copy.wast:2184
+assert_return(() => call($13, "load8_u", [60694]), 0);
+
+// memory_copy.wast:2185
+assert_return(() => call($13, "load8_u", [60893]), 0);
+
+// memory_copy.wast:2186
+assert_return(() => call($13, "load8_u", [61092]), 0);
+
+// memory_copy.wast:2187
+assert_return(() => call($13, "load8_u", [61291]), 0);
+
+// memory_copy.wast:2188
+assert_return(() => call($13, "load8_u", [61490]), 0);
+
+// memory_copy.wast:2189
+assert_return(() => call($13, "load8_u", [61689]), 0);
+
+// memory_copy.wast:2190
+assert_return(() => call($13, "load8_u", [61888]), 0);
+
+// memory_copy.wast:2191
+assert_return(() => call($13, "load8_u", [62087]), 0);
+
+// memory_copy.wast:2192
+assert_return(() => call($13, "load8_u", [62286]), 0);
+
+// memory_copy.wast:2193
+assert_return(() => call($13, "load8_u", [62485]), 0);
+
+// memory_copy.wast:2194
+assert_return(() => call($13, "load8_u", [62684]), 0);
+
+// memory_copy.wast:2195
+assert_return(() => call($13, "load8_u", [62883]), 0);
+
+// memory_copy.wast:2196
+assert_return(() => call($13, "load8_u", [63082]), 0);
+
+// memory_copy.wast:2197
+assert_return(() => call($13, "load8_u", [63281]), 0);
+
+// memory_copy.wast:2198
+assert_return(() => call($13, "load8_u", [63480]), 0);
+
+// memory_copy.wast:2199
+assert_return(() => call($13, "load8_u", [63679]), 0);
+
+// memory_copy.wast:2200
+assert_return(() => call($13, "load8_u", [63878]), 0);
+
+// memory_copy.wast:2201
+assert_return(() => call($13, "load8_u", [64077]), 0);
+
+// memory_copy.wast:2202
+assert_return(() => call($13, "load8_u", [64276]), 0);
+
+// memory_copy.wast:2203
+assert_return(() => call($13, "load8_u", [64475]), 0);
+
+// memory_copy.wast:2204
+assert_return(() => call($13, "load8_u", [64674]), 0);
+
+// memory_copy.wast:2205
+assert_return(() => call($13, "load8_u", [64873]), 0);
+
+// memory_copy.wast:2206
+assert_return(() => call($13, "load8_u", [65072]), 0);
+
+// memory_copy.wast:2207
+assert_return(() => call($13, "load8_u", [65271]), 0);
+
+// memory_copy.wast:2208
+assert_return(() => call($13, "load8_u", [65470]), 0);
+
+// memory_copy.wast:2209
+assert_return(() => call($13, "load8_u", [65486]), 0);
+
+// memory_copy.wast:2210
+assert_return(() => call($13, "load8_u", [65487]), 1);
+
+// memory_copy.wast:2211
+assert_return(() => call($13, "load8_u", [65488]), 2);
+
+// memory_copy.wast:2212
+assert_return(() => call($13, "load8_u", [65489]), 3);
+
+// memory_copy.wast:2213
+assert_return(() => call($13, "load8_u", [65490]), 4);
+
+// memory_copy.wast:2214
+assert_return(() => call($13, "load8_u", [65491]), 5);
+
+// memory_copy.wast:2215
+assert_return(() => call($13, "load8_u", [65492]), 6);
+
+// memory_copy.wast:2216
+assert_return(() => call($13, "load8_u", [65493]), 7);
+
+// memory_copy.wast:2217
+assert_return(() => call($13, "load8_u", [65494]), 8);
+
+// memory_copy.wast:2218
+assert_return(() => call($13, "load8_u", [65495]), 9);
+
+// memory_copy.wast:2219
+assert_return(() => call($13, "load8_u", [65496]), 10);
+
+// memory_copy.wast:2220
+assert_return(() => call($13, "load8_u", [65497]), 11);
+
+// memory_copy.wast:2221
+assert_return(() => call($13, "load8_u", [65498]), 12);
+
+// memory_copy.wast:2222
+assert_return(() => call($13, "load8_u", [65499]), 13);
+
+// memory_copy.wast:2223
+assert_return(() => call($13, "load8_u", [65500]), 14);
+
+// memory_copy.wast:2224
+assert_return(() => call($13, "load8_u", [65501]), 15);
+
+// memory_copy.wast:2225
+assert_return(() => call($13, "load8_u", [65502]), 16);
+
+// memory_copy.wast:2226
+assert_return(() => call($13, "load8_u", [65503]), 17);
+
+// memory_copy.wast:2227
+assert_return(() => call($13, "load8_u", [65504]), 18);
+
+// memory_copy.wast:2228
+assert_return(() => call($13, "load8_u", [65505]), 19);
+
+// memory_copy.wast:2230
+let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
+
+// memory_copy.wast:2238
+assert_trap(() => call($14, "run", [65486, 65516, 40]));
+
+// memory_copy.wast:2241
+assert_return(() => call($14, "load8_u", [198]), 0);
+
+// memory_copy.wast:2242
+assert_return(() => call($14, "load8_u", [397]), 0);
+
+// memory_copy.wast:2243
+assert_return(() => call($14, "load8_u", [596]), 0);
+
+// memory_copy.wast:2244
+assert_return(() => call($14, "load8_u", [795]), 0);
+
+// memory_copy.wast:2245
+assert_return(() => call($14, "load8_u", [994]), 0);
+
+// memory_copy.wast:2246
+assert_return(() => call($14, "load8_u", [1193]), 0);
+
+// memory_copy.wast:2247
+assert_return(() => call($14, "load8_u", [1392]), 0);
+
+// memory_copy.wast:2248
+assert_return(() => call($14, "load8_u", [1591]), 0);
+
+// memory_copy.wast:2249
+assert_return(() => call($14, "load8_u", [1790]), 0);
+
+// memory_copy.wast:2250
+assert_return(() => call($14, "load8_u", [1989]), 0);
+
+// memory_copy.wast:2251
+assert_return(() => call($14, "load8_u", [2188]), 0);
+
+// memory_copy.wast:2252
+assert_return(() => call($14, "load8_u", [2387]), 0);
+
+// memory_copy.wast:2253
+assert_return(() => call($14, "load8_u", [2586]), 0);
+
+// memory_copy.wast:2254
+assert_return(() => call($14, "load8_u", [2785]), 0);
+
+// memory_copy.wast:2255
+assert_return(() => call($14, "load8_u", [2984]), 0);
+
+// memory_copy.wast:2256
+assert_return(() => call($14, "load8_u", [3183]), 0);
+
+// memory_copy.wast:2257
+assert_return(() => call($14, "load8_u", [3382]), 0);
+
+// memory_copy.wast:2258
+assert_return(() => call($14, "load8_u", [3581]), 0);
+
+// memory_copy.wast:2259
+assert_return(() => call($14, "load8_u", [3780]), 0);
+
+// memory_copy.wast:2260
+assert_return(() => call($14, "load8_u", [3979]), 0);
+
+// memory_copy.wast:2261
+assert_return(() => call($14, "load8_u", [4178]), 0);
+
+// memory_copy.wast:2262
+assert_return(() => call($14, "load8_u", [4377]), 0);
+
+// memory_copy.wast:2263
+assert_return(() => call($14, "load8_u", [4576]), 0);
+
+// memory_copy.wast:2264
+assert_return(() => call($14, "load8_u", [4775]), 0);
+
+// memory_copy.wast:2265
+assert_return(() => call($14, "load8_u", [4974]), 0);
+
+// memory_copy.wast:2266
+assert_return(() => call($14, "load8_u", [5173]), 0);
+
+// memory_copy.wast:2267
+assert_return(() => call($14, "load8_u", [5372]), 0);
+
+// memory_copy.wast:2268
+assert_return(() => call($14, "load8_u", [5571]), 0);
+
+// memory_copy.wast:2269
+assert_return(() => call($14, "load8_u", [5770]), 0);
+
+// memory_copy.wast:2270
+assert_return(() => call($14, "load8_u", [5969]), 0);
+
+// memory_copy.wast:2271
+assert_return(() => call($14, "load8_u", [6168]), 0);
+
+// memory_copy.wast:2272
+assert_return(() => call($14, "load8_u", [6367]), 0);
+
+// memory_copy.wast:2273
+assert_return(() => call($14, "load8_u", [6566]), 0);
+
+// memory_copy.wast:2274
+assert_return(() => call($14, "load8_u", [6765]), 0);
+
+// memory_copy.wast:2275
+assert_return(() => call($14, "load8_u", [6964]), 0);
+
+// memory_copy.wast:2276
+assert_return(() => call($14, "load8_u", [7163]), 0);
+
+// memory_copy.wast:2277
+assert_return(() => call($14, "load8_u", [7362]), 0);
+
+// memory_copy.wast:2278
+assert_return(() => call($14, "load8_u", [7561]), 0);
+
+// memory_copy.wast:2279
+assert_return(() => call($14, "load8_u", [7760]), 0);
+
+// memory_copy.wast:2280
+assert_return(() => call($14, "load8_u", [7959]), 0);
+
+// memory_copy.wast:2281
+assert_return(() => call($14, "load8_u", [8158]), 0);
+
+// memory_copy.wast:2282
+assert_return(() => call($14, "load8_u", [8357]), 0);
+
+// memory_copy.wast:2283
+assert_return(() => call($14, "load8_u", [8556]), 0);
+
+// memory_copy.wast:2284
+assert_return(() => call($14, "load8_u", [8755]), 0);
+
+// memory_copy.wast:2285
+assert_return(() => call($14, "load8_u", [8954]), 0);
+
+// memory_copy.wast:2286
+assert_return(() => call($14, "load8_u", [9153]), 0);
+
+// memory_copy.wast:2287
+assert_return(() => call($14, "load8_u", [9352]), 0);
+
+// memory_copy.wast:2288
+assert_return(() => call($14, "load8_u", [9551]), 0);
+
+// memory_copy.wast:2289
+assert_return(() => call($14, "load8_u", [9750]), 0);
+
+// memory_copy.wast:2290
+assert_return(() => call($14, "load8_u", [9949]), 0);
+
+// memory_copy.wast:2291
+assert_return(() => call($14, "load8_u", [10148]), 0);
+
+// memory_copy.wast:2292
+assert_return(() => call($14, "load8_u", [10347]), 0);
+
+// memory_copy.wast:2293
+assert_return(() => call($14, "load8_u", [10546]), 0);
+
+// memory_copy.wast:2294
+assert_return(() => call($14, "load8_u", [10745]), 0);
+
+// memory_copy.wast:2295
+assert_return(() => call($14, "load8_u", [10944]), 0);
+
+// memory_copy.wast:2296
+assert_return(() => call($14, "load8_u", [11143]), 0);
+
+// memory_copy.wast:2297
+assert_return(() => call($14, "load8_u", [11342]), 0);
+
+// memory_copy.wast:2298
+assert_return(() => call($14, "load8_u", [11541]), 0);
+
+// memory_copy.wast:2299
+assert_return(() => call($14, "load8_u", [11740]), 0);
+
+// memory_copy.wast:2300
+assert_return(() => call($14, "load8_u", [11939]), 0);
+
+// memory_copy.wast:2301
+assert_return(() => call($14, "load8_u", [12138]), 0);
+
+// memory_copy.wast:2302
+assert_return(() => call($14, "load8_u", [12337]), 0);
+
+// memory_copy.wast:2303
+assert_return(() => call($14, "load8_u", [12536]), 0);
+
+// memory_copy.wast:2304
+assert_return(() => call($14, "load8_u", [12735]), 0);
+
+// memory_copy.wast:2305
+assert_return(() => call($14, "load8_u", [12934]), 0);
+
+// memory_copy.wast:2306
+assert_return(() => call($14, "load8_u", [13133]), 0);
+
+// memory_copy.wast:2307
+assert_return(() => call($14, "load8_u", [13332]), 0);
+
+// memory_copy.wast:2308
+assert_return(() => call($14, "load8_u", [13531]), 0);
+
+// memory_copy.wast:2309
+assert_return(() => call($14, "load8_u", [13730]), 0);
+
+// memory_copy.wast:2310
+assert_return(() => call($14, "load8_u", [13929]), 0);
+
+// memory_copy.wast:2311
+assert_return(() => call($14, "load8_u", [14128]), 0);
+
+// memory_copy.wast:2312
+assert_return(() => call($14, "load8_u", [14327]), 0);
+
+// memory_copy.wast:2313
+assert_return(() => call($14, "load8_u", [14526]), 0);
+
+// memory_copy.wast:2314
+assert_return(() => call($14, "load8_u", [14725]), 0);
+
+// memory_copy.wast:2315
+assert_return(() => call($14, "load8_u", [14924]), 0);
+
+// memory_copy.wast:2316
+assert_return(() => call($14, "load8_u", [15123]), 0);
+
+// memory_copy.wast:2317
+assert_return(() => call($14, "load8_u", [15322]), 0);
+
+// memory_copy.wast:2318
+assert_return(() => call($14, "load8_u", [15521]), 0);
+
+// memory_copy.wast:2319
+assert_return(() => call($14, "load8_u", [15720]), 0);
+
+// memory_copy.wast:2320
+assert_return(() => call($14, "load8_u", [15919]), 0);
+
+// memory_copy.wast:2321
+assert_return(() => call($14, "load8_u", [16118]), 0);
+
+// memory_copy.wast:2322
+assert_return(() => call($14, "load8_u", [16317]), 0);
+
+// memory_copy.wast:2323
+assert_return(() => call($14, "load8_u", [16516]), 0);
+
+// memory_copy.wast:2324
+assert_return(() => call($14, "load8_u", [16715]), 0);
+
+// memory_copy.wast:2325
+assert_return(() => call($14, "load8_u", [16914]), 0);
+
+// memory_copy.wast:2326
+assert_return(() => call($14, "load8_u", [17113]), 0);
+
+// memory_copy.wast:2327
+assert_return(() => call($14, "load8_u", [17312]), 0);
+
+// memory_copy.wast:2328
+assert_return(() => call($14, "load8_u", [17511]), 0);
+
+// memory_copy.wast:2329
+assert_return(() => call($14, "load8_u", [17710]), 0);
+
+// memory_copy.wast:2330
+assert_return(() => call($14, "load8_u", [17909]), 0);
+
+// memory_copy.wast:2331
+assert_return(() => call($14, "load8_u", [18108]), 0);
+
+// memory_copy.wast:2332
+assert_return(() => call($14, "load8_u", [18307]), 0);
+
+// memory_copy.wast:2333
+assert_return(() => call($14, "load8_u", [18506]), 0);
+
+// memory_copy.wast:2334
+assert_return(() => call($14, "load8_u", [18705]), 0);
+
+// memory_copy.wast:2335
+assert_return(() => call($14, "load8_u", [18904]), 0);
+
+// memory_copy.wast:2336
+assert_return(() => call($14, "load8_u", [19103]), 0);
+
+// memory_copy.wast:2337
+assert_return(() => call($14, "load8_u", [19302]), 0);
+
+// memory_copy.wast:2338
+assert_return(() => call($14, "load8_u", [19501]), 0);
+
+// memory_copy.wast:2339
+assert_return(() => call($14, "load8_u", [19700]), 0);
+
+// memory_copy.wast:2340
+assert_return(() => call($14, "load8_u", [19899]), 0);
+
+// memory_copy.wast:2341
+assert_return(() => call($14, "load8_u", [20098]), 0);
+
+// memory_copy.wast:2342
+assert_return(() => call($14, "load8_u", [20297]), 0);
+
+// memory_copy.wast:2343
+assert_return(() => call($14, "load8_u", [20496]), 0);
+
+// memory_copy.wast:2344
+assert_return(() => call($14, "load8_u", [20695]), 0);
+
+// memory_copy.wast:2345
+assert_return(() => call($14, "load8_u", [20894]), 0);
+
+// memory_copy.wast:2346
+assert_return(() => call($14, "load8_u", [21093]), 0);
+
+// memory_copy.wast:2347
+assert_return(() => call($14, "load8_u", [21292]), 0);
+
+// memory_copy.wast:2348
+assert_return(() => call($14, "load8_u", [21491]), 0);
+
+// memory_copy.wast:2349
+assert_return(() => call($14, "load8_u", [21690]), 0);
+
+// memory_copy.wast:2350
+assert_return(() => call($14, "load8_u", [21889]), 0);
+
+// memory_copy.wast:2351
+assert_return(() => call($14, "load8_u", [22088]), 0);
+
+// memory_copy.wast:2352
+assert_return(() => call($14, "load8_u", [22287]), 0);
+
+// memory_copy.wast:2353
+assert_return(() => call($14, "load8_u", [22486]), 0);
+
+// memory_copy.wast:2354
+assert_return(() => call($14, "load8_u", [22685]), 0);
+
+// memory_copy.wast:2355
+assert_return(() => call($14, "load8_u", [22884]), 0);
+
+// memory_copy.wast:2356
+assert_return(() => call($14, "load8_u", [23083]), 0);
+
+// memory_copy.wast:2357
+assert_return(() => call($14, "load8_u", [23282]), 0);
+
+// memory_copy.wast:2358
+assert_return(() => call($14, "load8_u", [23481]), 0);
+
+// memory_copy.wast:2359
+assert_return(() => call($14, "load8_u", [23680]), 0);
+
+// memory_copy.wast:2360
+assert_return(() => call($14, "load8_u", [23879]), 0);
+
+// memory_copy.wast:2361
+assert_return(() => call($14, "load8_u", [24078]), 0);
+
+// memory_copy.wast:2362
+assert_return(() => call($14, "load8_u", [24277]), 0);
+
+// memory_copy.wast:2363
+assert_return(() => call($14, "load8_u", [24476]), 0);
+
+// memory_copy.wast:2364
+assert_return(() => call($14, "load8_u", [24675]), 0);
+
+// memory_copy.wast:2365
+assert_return(() => call($14, "load8_u", [24874]), 0);
+
+// memory_copy.wast:2366
+assert_return(() => call($14, "load8_u", [25073]), 0);
+
+// memory_copy.wast:2367
+assert_return(() => call($14, "load8_u", [25272]), 0);
+
+// memory_copy.wast:2368
+assert_return(() => call($14, "load8_u", [25471]), 0);
+
+// memory_copy.wast:2369
+assert_return(() => call($14, "load8_u", [25670]), 0);
+
+// memory_copy.wast:2370
+assert_return(() => call($14, "load8_u", [25869]), 0);
+
+// memory_copy.wast:2371
+assert_return(() => call($14, "load8_u", [26068]), 0);
+
+// memory_copy.wast:2372
+assert_return(() => call($14, "load8_u", [26267]), 0);
+
+// memory_copy.wast:2373
+assert_return(() => call($14, "load8_u", [26466]), 0);
+
+// memory_copy.wast:2374
+assert_return(() => call($14, "load8_u", [26665]), 0);
+
+// memory_copy.wast:2375
+assert_return(() => call($14, "load8_u", [26864]), 0);
+
+// memory_copy.wast:2376
+assert_return(() => call($14, "load8_u", [27063]), 0);
+
+// memory_copy.wast:2377
+assert_return(() => call($14, "load8_u", [27262]), 0);
+
+// memory_copy.wast:2378
+assert_return(() => call($14, "load8_u", [27461]), 0);
+
+// memory_copy.wast:2379
+assert_return(() => call($14, "load8_u", [27660]), 0);
+
+// memory_copy.wast:2380
+assert_return(() => call($14, "load8_u", [27859]), 0);
+
+// memory_copy.wast:2381
+assert_return(() => call($14, "load8_u", [28058]), 0);
+
+// memory_copy.wast:2382
+assert_return(() => call($14, "load8_u", [28257]), 0);
+
+// memory_copy.wast:2383
+assert_return(() => call($14, "load8_u", [28456]), 0);
+
+// memory_copy.wast:2384
+assert_return(() => call($14, "load8_u", [28655]), 0);
+
+// memory_copy.wast:2385
+assert_return(() => call($14, "load8_u", [28854]), 0);
+
+// memory_copy.wast:2386
+assert_return(() => call($14, "load8_u", [29053]), 0);
+
+// memory_copy.wast:2387
+assert_return(() => call($14, "load8_u", [29252]), 0);
+
+// memory_copy.wast:2388
+assert_return(() => call($14, "load8_u", [29451]), 0);
+
+// memory_copy.wast:2389
+assert_return(() => call($14, "load8_u", [29650]), 0);
+
+// memory_copy.wast:2390
+assert_return(() => call($14, "load8_u", [29849]), 0);
+
+// memory_copy.wast:2391
+assert_return(() => call($14, "load8_u", [30048]), 0);
+
+// memory_copy.wast:2392
+assert_return(() => call($14, "load8_u", [30247]), 0);
+
+// memory_copy.wast:2393
+assert_return(() => call($14, "load8_u", [30446]), 0);
+
+// memory_copy.wast:2394
+assert_return(() => call($14, "load8_u", [30645]), 0);
+
+// memory_copy.wast:2395
+assert_return(() => call($14, "load8_u", [30844]), 0);
+
+// memory_copy.wast:2396
+assert_return(() => call($14, "load8_u", [31043]), 0);
+
+// memory_copy.wast:2397
+assert_return(() => call($14, "load8_u", [31242]), 0);
+
+// memory_copy.wast:2398
+assert_return(() => call($14, "load8_u", [31441]), 0);
+
+// memory_copy.wast:2399
+assert_return(() => call($14, "load8_u", [31640]), 0);
+
+// memory_copy.wast:2400
+assert_return(() => call($14, "load8_u", [31839]), 0);
+
+// memory_copy.wast:2401
+assert_return(() => call($14, "load8_u", [32038]), 0);
+
+// memory_copy.wast:2402
+assert_return(() => call($14, "load8_u", [32237]), 0);
+
+// memory_copy.wast:2403
+assert_return(() => call($14, "load8_u", [32436]), 0);
+
+// memory_copy.wast:2404
+assert_return(() => call($14, "load8_u", [32635]), 0);
+
+// memory_copy.wast:2405
+assert_return(() => call($14, "load8_u", [32834]), 0);
+
+// memory_copy.wast:2406
+assert_return(() => call($14, "load8_u", [33033]), 0);
+
+// memory_copy.wast:2407
+assert_return(() => call($14, "load8_u", [33232]), 0);
+
+// memory_copy.wast:2408
+assert_return(() => call($14, "load8_u", [33431]), 0);
+
+// memory_copy.wast:2409
+assert_return(() => call($14, "load8_u", [33630]), 0);
+
+// memory_copy.wast:2410
+assert_return(() => call($14, "load8_u", [33829]), 0);
+
+// memory_copy.wast:2411
+assert_return(() => call($14, "load8_u", [34028]), 0);
+
+// memory_copy.wast:2412
+assert_return(() => call($14, "load8_u", [34227]), 0);
+
+// memory_copy.wast:2413
+assert_return(() => call($14, "load8_u", [34426]), 0);
+
+// memory_copy.wast:2414
+assert_return(() => call($14, "load8_u", [34625]), 0);
+
+// memory_copy.wast:2415
+assert_return(() => call($14, "load8_u", [34824]), 0);
+
+// memory_copy.wast:2416
+assert_return(() => call($14, "load8_u", [35023]), 0);
+
+// memory_copy.wast:2417
+assert_return(() => call($14, "load8_u", [35222]), 0);
+
+// memory_copy.wast:2418
+assert_return(() => call($14, "load8_u", [35421]), 0);
+
+// memory_copy.wast:2419
+assert_return(() => call($14, "load8_u", [35620]), 0);
+
+// memory_copy.wast:2420
+assert_return(() => call($14, "load8_u", [35819]), 0);
+
+// memory_copy.wast:2421
+assert_return(() => call($14, "load8_u", [36018]), 0);
+
+// memory_copy.wast:2422
+assert_return(() => call($14, "load8_u", [36217]), 0);
+
+// memory_copy.wast:2423
+assert_return(() => call($14, "load8_u", [36416]), 0);
+
+// memory_copy.wast:2424
+assert_return(() => call($14, "load8_u", [36615]), 0);
+
+// memory_copy.wast:2425
+assert_return(() => call($14, "load8_u", [36814]), 0);
+
+// memory_copy.wast:2426
+assert_return(() => call($14, "load8_u", [37013]), 0);
+
+// memory_copy.wast:2427
+assert_return(() => call($14, "load8_u", [37212]), 0);
+
+// memory_copy.wast:2428
+assert_return(() => call($14, "load8_u", [37411]), 0);
+
+// memory_copy.wast:2429
+assert_return(() => call($14, "load8_u", [37610]), 0);
+
+// memory_copy.wast:2430
+assert_return(() => call($14, "load8_u", [37809]), 0);
+
+// memory_copy.wast:2431
+assert_return(() => call($14, "load8_u", [38008]), 0);
+
+// memory_copy.wast:2432
+assert_return(() => call($14, "load8_u", [38207]), 0);
+
+// memory_copy.wast:2433
+assert_return(() => call($14, "load8_u", [38406]), 0);
+
+// memory_copy.wast:2434
+assert_return(() => call($14, "load8_u", [38605]), 0);
+
+// memory_copy.wast:2435
+assert_return(() => call($14, "load8_u", [38804]), 0);
+
+// memory_copy.wast:2436
+assert_return(() => call($14, "load8_u", [39003]), 0);
+
+// memory_copy.wast:2437
+assert_return(() => call($14, "load8_u", [39202]), 0);
+
+// memory_copy.wast:2438
+assert_return(() => call($14, "load8_u", [39401]), 0);
+
+// memory_copy.wast:2439
+assert_return(() => call($14, "load8_u", [39600]), 0);
+
+// memory_copy.wast:2440
+assert_return(() => call($14, "load8_u", [39799]), 0);
+
+// memory_copy.wast:2441
+assert_return(() => call($14, "load8_u", [39998]), 0);
+
+// memory_copy.wast:2442
+assert_return(() => call($14, "load8_u", [40197]), 0);
+
+// memory_copy.wast:2443
+assert_return(() => call($14, "load8_u", [40396]), 0);
+
+// memory_copy.wast:2444
+assert_return(() => call($14, "load8_u", [40595]), 0);
+
+// memory_copy.wast:2445
+assert_return(() => call($14, "load8_u", [40794]), 0);
+
+// memory_copy.wast:2446
+assert_return(() => call($14, "load8_u", [40993]), 0);
+
+// memory_copy.wast:2447
+assert_return(() => call($14, "load8_u", [41192]), 0);
+
+// memory_copy.wast:2448
+assert_return(() => call($14, "load8_u", [41391]), 0);
+
+// memory_copy.wast:2449
+assert_return(() => call($14, "load8_u", [41590]), 0);
+
+// memory_copy.wast:2450
+assert_return(() => call($14, "load8_u", [41789]), 0);
+
+// memory_copy.wast:2451
+assert_return(() => call($14, "load8_u", [41988]), 0);
+
+// memory_copy.wast:2452
+assert_return(() => call($14, "load8_u", [42187]), 0);
+
+// memory_copy.wast:2453
+assert_return(() => call($14, "load8_u", [42386]), 0);
+
+// memory_copy.wast:2454
+assert_return(() => call($14, "load8_u", [42585]), 0);
+
+// memory_copy.wast:2455
+assert_return(() => call($14, "load8_u", [42784]), 0);
+
+// memory_copy.wast:2456
+assert_return(() => call($14, "load8_u", [42983]), 0);
+
+// memory_copy.wast:2457
+assert_return(() => call($14, "load8_u", [43182]), 0);
+
+// memory_copy.wast:2458
+assert_return(() => call($14, "load8_u", [43381]), 0);
+
+// memory_copy.wast:2459
+assert_return(() => call($14, "load8_u", [43580]), 0);
+
+// memory_copy.wast:2460
+assert_return(() => call($14, "load8_u", [43779]), 0);
+
+// memory_copy.wast:2461
+assert_return(() => call($14, "load8_u", [43978]), 0);
+
+// memory_copy.wast:2462
+assert_return(() => call($14, "load8_u", [44177]), 0);
+
+// memory_copy.wast:2463
+assert_return(() => call($14, "load8_u", [44376]), 0);
+
+// memory_copy.wast:2464
+assert_return(() => call($14, "load8_u", [44575]), 0);
+
+// memory_copy.wast:2465
+assert_return(() => call($14, "load8_u", [44774]), 0);
+
+// memory_copy.wast:2466
+assert_return(() => call($14, "load8_u", [44973]), 0);
+
+// memory_copy.wast:2467
+assert_return(() => call($14, "load8_u", [45172]), 0);
+
+// memory_copy.wast:2468
+assert_return(() => call($14, "load8_u", [45371]), 0);
+
+// memory_copy.wast:2469
+assert_return(() => call($14, "load8_u", [45570]), 0);
+
+// memory_copy.wast:2470
+assert_return(() => call($14, "load8_u", [45769]), 0);
+
+// memory_copy.wast:2471
+assert_return(() => call($14, "load8_u", [45968]), 0);
+
+// memory_copy.wast:2472
+assert_return(() => call($14, "load8_u", [46167]), 0);
+
+// memory_copy.wast:2473
+assert_return(() => call($14, "load8_u", [46366]), 0);
+
+// memory_copy.wast:2474
+assert_return(() => call($14, "load8_u", [46565]), 0);
+
+// memory_copy.wast:2475
+assert_return(() => call($14, "load8_u", [46764]), 0);
+
+// memory_copy.wast:2476
+assert_return(() => call($14, "load8_u", [46963]), 0);
+
+// memory_copy.wast:2477
+assert_return(() => call($14, "load8_u", [47162]), 0);
+
+// memory_copy.wast:2478
+assert_return(() => call($14, "load8_u", [47361]), 0);
+
+// memory_copy.wast:2479
+assert_return(() => call($14, "load8_u", [47560]), 0);
+
+// memory_copy.wast:2480
+assert_return(() => call($14, "load8_u", [47759]), 0);
+
+// memory_copy.wast:2481
+assert_return(() => call($14, "load8_u", [47958]), 0);
+
+// memory_copy.wast:2482
+assert_return(() => call($14, "load8_u", [48157]), 0);
+
+// memory_copy.wast:2483
+assert_return(() => call($14, "load8_u", [48356]), 0);
+
+// memory_copy.wast:2484
+assert_return(() => call($14, "load8_u", [48555]), 0);
+
+// memory_copy.wast:2485
+assert_return(() => call($14, "load8_u", [48754]), 0);
+
+// memory_copy.wast:2486
+assert_return(() => call($14, "load8_u", [48953]), 0);
+
+// memory_copy.wast:2487
+assert_return(() => call($14, "load8_u", [49152]), 0);
+
+// memory_copy.wast:2488
+assert_return(() => call($14, "load8_u", [49351]), 0);
+
+// memory_copy.wast:2489
+assert_return(() => call($14, "load8_u", [49550]), 0);
+
+// memory_copy.wast:2490
+assert_return(() => call($14, "load8_u", [49749]), 0);
+
+// memory_copy.wast:2491
+assert_return(() => call($14, "load8_u", [49948]), 0);
+
+// memory_copy.wast:2492
+assert_return(() => call($14, "load8_u", [50147]), 0);
+
+// memory_copy.wast:2493
+assert_return(() => call($14, "load8_u", [50346]), 0);
+
+// memory_copy.wast:2494
+assert_return(() => call($14, "load8_u", [50545]), 0);
+
+// memory_copy.wast:2495
+assert_return(() => call($14, "load8_u", [50744]), 0);
+
+// memory_copy.wast:2496
+assert_return(() => call($14, "load8_u", [50943]), 0);
+
+// memory_copy.wast:2497
+assert_return(() => call($14, "load8_u", [51142]), 0);
+
+// memory_copy.wast:2498
+assert_return(() => call($14, "load8_u", [51341]), 0);
+
+// memory_copy.wast:2499
+assert_return(() => call($14, "load8_u", [51540]), 0);
+
+// memory_copy.wast:2500
+assert_return(() => call($14, "load8_u", [51739]), 0);
+
+// memory_copy.wast:2501
+assert_return(() => call($14, "load8_u", [51938]), 0);
+
+// memory_copy.wast:2502
+assert_return(() => call($14, "load8_u", [52137]), 0);
+
+// memory_copy.wast:2503
+assert_return(() => call($14, "load8_u", [52336]), 0);
+
+// memory_copy.wast:2504
+assert_return(() => call($14, "load8_u", [52535]), 0);
+
+// memory_copy.wast:2505
+assert_return(() => call($14, "load8_u", [52734]), 0);
+
+// memory_copy.wast:2506
+assert_return(() => call($14, "load8_u", [52933]), 0);
+
+// memory_copy.wast:2507
+assert_return(() => call($14, "load8_u", [53132]), 0);
+
+// memory_copy.wast:2508
+assert_return(() => call($14, "load8_u", [53331]), 0);
+
+// memory_copy.wast:2509
+assert_return(() => call($14, "load8_u", [53530]), 0);
+
+// memory_copy.wast:2510
+assert_return(() => call($14, "load8_u", [53729]), 0);
+
+// memory_copy.wast:2511
+assert_return(() => call($14, "load8_u", [53928]), 0);
+
+// memory_copy.wast:2512
+assert_return(() => call($14, "load8_u", [54127]), 0);
+
+// memory_copy.wast:2513
+assert_return(() => call($14, "load8_u", [54326]), 0);
+
+// memory_copy.wast:2514
+assert_return(() => call($14, "load8_u", [54525]), 0);
+
+// memory_copy.wast:2515
+assert_return(() => call($14, "load8_u", [54724]), 0);
+
+// memory_copy.wast:2516
+assert_return(() => call($14, "load8_u", [54923]), 0);
+
+// memory_copy.wast:2517
+assert_return(() => call($14, "load8_u", [55122]), 0);
+
+// memory_copy.wast:2518
+assert_return(() => call($14, "load8_u", [55321]), 0);
+
+// memory_copy.wast:2519
+assert_return(() => call($14, "load8_u", [55520]), 0);
+
+// memory_copy.wast:2520
+assert_return(() => call($14, "load8_u", [55719]), 0);
+
+// memory_copy.wast:2521
+assert_return(() => call($14, "load8_u", [55918]), 0);
+
+// memory_copy.wast:2522
+assert_return(() => call($14, "load8_u", [56117]), 0);
+
+// memory_copy.wast:2523
+assert_return(() => call($14, "load8_u", [56316]), 0);
+
+// memory_copy.wast:2524
+assert_return(() => call($14, "load8_u", [56515]), 0);
+
+// memory_copy.wast:2525
+assert_return(() => call($14, "load8_u", [56714]), 0);
+
+// memory_copy.wast:2526
+assert_return(() => call($14, "load8_u", [56913]), 0);
+
+// memory_copy.wast:2527
+assert_return(() => call($14, "load8_u", [57112]), 0);
+
+// memory_copy.wast:2528
+assert_return(() => call($14, "load8_u", [57311]), 0);
+
+// memory_copy.wast:2529
+assert_return(() => call($14, "load8_u", [57510]), 0);
+
+// memory_copy.wast:2530
+assert_return(() => call($14, "load8_u", [57709]), 0);
+
+// memory_copy.wast:2531
+assert_return(() => call($14, "load8_u", [57908]), 0);
+
+// memory_copy.wast:2532
+assert_return(() => call($14, "load8_u", [58107]), 0);
+
+// memory_copy.wast:2533
+assert_return(() => call($14, "load8_u", [58306]), 0);
+
+// memory_copy.wast:2534
+assert_return(() => call($14, "load8_u", [58505]), 0);
+
+// memory_copy.wast:2535
+assert_return(() => call($14, "load8_u", [58704]), 0);
+
+// memory_copy.wast:2536
+assert_return(() => call($14, "load8_u", [58903]), 0);
+
+// memory_copy.wast:2537
+assert_return(() => call($14, "load8_u", [59102]), 0);
+
+// memory_copy.wast:2538
+assert_return(() => call($14, "load8_u", [59301]), 0);
+
+// memory_copy.wast:2539
+assert_return(() => call($14, "load8_u", [59500]), 0);
+
+// memory_copy.wast:2540
+assert_return(() => call($14, "load8_u", [59699]), 0);
+
+// memory_copy.wast:2541
+assert_return(() => call($14, "load8_u", [59898]), 0);
+
+// memory_copy.wast:2542
+assert_return(() => call($14, "load8_u", [60097]), 0);
+
+// memory_copy.wast:2543
+assert_return(() => call($14, "load8_u", [60296]), 0);
+
+// memory_copy.wast:2544
+assert_return(() => call($14, "load8_u", [60495]), 0);
+
+// memory_copy.wast:2545
+assert_return(() => call($14, "load8_u", [60694]), 0);
+
+// memory_copy.wast:2546
+assert_return(() => call($14, "load8_u", [60893]), 0);
+
+// memory_copy.wast:2547
+assert_return(() => call($14, "load8_u", [61092]), 0);
+
+// memory_copy.wast:2548
+assert_return(() => call($14, "load8_u", [61291]), 0);
+
+// memory_copy.wast:2549
+assert_return(() => call($14, "load8_u", [61490]), 0);
+
+// memory_copy.wast:2550
+assert_return(() => call($14, "load8_u", [61689]), 0);
+
+// memory_copy.wast:2551
+assert_return(() => call($14, "load8_u", [61888]), 0);
+
+// memory_copy.wast:2552
+assert_return(() => call($14, "load8_u", [62087]), 0);
+
+// memory_copy.wast:2553
+assert_return(() => call($14, "load8_u", [62286]), 0);
+
+// memory_copy.wast:2554
+assert_return(() => call($14, "load8_u", [62485]), 0);
+
+// memory_copy.wast:2555
+assert_return(() => call($14, "load8_u", [62684]), 0);
+
+// memory_copy.wast:2556
+assert_return(() => call($14, "load8_u", [62883]), 0);
+
+// memory_copy.wast:2557
+assert_return(() => call($14, "load8_u", [63082]), 0);
+
+// memory_copy.wast:2558
+assert_return(() => call($14, "load8_u", [63281]), 0);
+
+// memory_copy.wast:2559
+assert_return(() => call($14, "load8_u", [63480]), 0);
+
+// memory_copy.wast:2560
+assert_return(() => call($14, "load8_u", [63679]), 0);
+
+// memory_copy.wast:2561
+assert_return(() => call($14, "load8_u", [63878]), 0);
+
+// memory_copy.wast:2562
+assert_return(() => call($14, "load8_u", [64077]), 0);
+
+// memory_copy.wast:2563
+assert_return(() => call($14, "load8_u", [64276]), 0);
+
+// memory_copy.wast:2564
+assert_return(() => call($14, "load8_u", [64475]), 0);
+
+// memory_copy.wast:2565
+assert_return(() => call($14, "load8_u", [64674]), 0);
+
+// memory_copy.wast:2566
+assert_return(() => call($14, "load8_u", [64873]), 0);
+
+// memory_copy.wast:2567
+assert_return(() => call($14, "load8_u", [65072]), 0);
+
+// memory_copy.wast:2568
+assert_return(() => call($14, "load8_u", [65271]), 0);
+
+// memory_copy.wast:2569
+assert_return(() => call($14, "load8_u", [65470]), 0);
+
+// memory_copy.wast:2570
+assert_return(() => call($14, "load8_u", [65486]), 0);
+
+// memory_copy.wast:2571
+assert_return(() => call($14, "load8_u", [65487]), 1);
+
+// memory_copy.wast:2572
+assert_return(() => call($14, "load8_u", [65488]), 2);
+
+// memory_copy.wast:2573
+assert_return(() => call($14, "load8_u", [65489]), 3);
+
+// memory_copy.wast:2574
+assert_return(() => call($14, "load8_u", [65490]), 4);
+
+// memory_copy.wast:2575
+assert_return(() => call($14, "load8_u", [65491]), 5);
+
+// memory_copy.wast:2576
+assert_return(() => call($14, "load8_u", [65492]), 6);
+
+// memory_copy.wast:2577
+assert_return(() => call($14, "load8_u", [65493]), 7);
+
+// memory_copy.wast:2578
+assert_return(() => call($14, "load8_u", [65494]), 8);
+
+// memory_copy.wast:2579
+assert_return(() => call($14, "load8_u", [65495]), 9);
+
+// memory_copy.wast:2580
+assert_return(() => call($14, "load8_u", [65496]), 10);
+
+// memory_copy.wast:2581
+assert_return(() => call($14, "load8_u", [65497]), 11);
+
+// memory_copy.wast:2582
+assert_return(() => call($14, "load8_u", [65498]), 12);
+
+// memory_copy.wast:2583
+assert_return(() => call($14, "load8_u", [65499]), 13);
+
+// memory_copy.wast:2584
+assert_return(() => call($14, "load8_u", [65500]), 14);
+
+// memory_copy.wast:2585
+assert_return(() => call($14, "load8_u", [65501]), 15);
+
+// memory_copy.wast:2586
+assert_return(() => call($14, "load8_u", [65502]), 16);
+
+// memory_copy.wast:2587
+assert_return(() => call($14, "load8_u", [65503]), 17);
+
+// memory_copy.wast:2588
+assert_return(() => call($14, "load8_u", [65504]), 18);
+
+// memory_copy.wast:2589
+assert_return(() => call($14, "load8_u", [65505]), 19);
+
+// memory_copy.wast:2590
+assert_return(() => call($14, "load8_u", [65516]), 0);
+
+// memory_copy.wast:2591
+assert_return(() => call($14, "load8_u", [65517]), 1);
+
+// memory_copy.wast:2592
+assert_return(() => call($14, "load8_u", [65518]), 2);
+
+// memory_copy.wast:2593
+assert_return(() => call($14, "load8_u", [65519]), 3);
+
+// memory_copy.wast:2594
+assert_return(() => call($14, "load8_u", [65520]), 4);
+
+// memory_copy.wast:2595
+assert_return(() => call($14, "load8_u", [65521]), 5);
+
+// memory_copy.wast:2596
+assert_return(() => call($14, "load8_u", [65522]), 6);
+
+// memory_copy.wast:2597
+assert_return(() => call($14, "load8_u", [65523]), 7);
+
+// memory_copy.wast:2598
+assert_return(() => call($14, "load8_u", [65524]), 8);
+
+// memory_copy.wast:2599
+assert_return(() => call($14, "load8_u", [65525]), 9);
+
+// memory_copy.wast:2600
+assert_return(() => call($14, "load8_u", [65526]), 10);
+
+// memory_copy.wast:2601
+assert_return(() => call($14, "load8_u", [65527]), 11);
+
+// memory_copy.wast:2602
+assert_return(() => call($14, "load8_u", [65528]), 12);
+
+// memory_copy.wast:2603
+assert_return(() => call($14, "load8_u", [65529]), 13);
+
+// memory_copy.wast:2604
+assert_return(() => call($14, "load8_u", [65530]), 14);
+
+// memory_copy.wast:2605
+assert_return(() => call($14, "load8_u", [65531]), 15);
+
+// memory_copy.wast:2606
+assert_return(() => call($14, "load8_u", [65532]), 16);
+
+// memory_copy.wast:2607
+assert_return(() => call($14, "load8_u", [65533]), 17);
+
+// memory_copy.wast:2608
+assert_return(() => call($14, "load8_u", [65534]), 18);
+
+// memory_copy.wast:2609
+assert_return(() => call($14, "load8_u", [65535]), 19);
+
+// memory_copy.wast:2611
+let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xe2\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
+
+// memory_copy.wast:2619
+assert_trap(() => call($15, "run", [65516, 65506, 40]));
+
+// memory_copy.wast:2622
+assert_return(() => call($15, "load8_u", [198]), 0);
+
+// memory_copy.wast:2623
+assert_return(() => call($15, "load8_u", [397]), 0);
+
+// memory_copy.wast:2624
+assert_return(() => call($15, "load8_u", [596]), 0);
+
+// memory_copy.wast:2625
+assert_return(() => call($15, "load8_u", [795]), 0);
+
+// memory_copy.wast:2626
+assert_return(() => call($15, "load8_u", [994]), 0);
+
+// memory_copy.wast:2627
+assert_return(() => call($15, "load8_u", [1193]), 0);
+
+// memory_copy.wast:2628
+assert_return(() => call($15, "load8_u", [1392]), 0);
+
+// memory_copy.wast:2629
+assert_return(() => call($15, "load8_u", [1591]), 0);
+
+// memory_copy.wast:2630
+assert_return(() => call($15, "load8_u", [1790]), 0);
+
+// memory_copy.wast:2631
+assert_return(() => call($15, "load8_u", [1989]), 0);
+
+// memory_copy.wast:2632
+assert_return(() => call($15, "load8_u", [2188]), 0);
+
+// memory_copy.wast:2633
+assert_return(() => call($15, "load8_u", [2387]), 0);
+
+// memory_copy.wast:2634
+assert_return(() => call($15, "load8_u", [2586]), 0);
+
+// memory_copy.wast:2635
+assert_return(() => call($15, "load8_u", [2785]), 0);
+
+// memory_copy.wast:2636
+assert_return(() => call($15, "load8_u", [2984]), 0);
+
+// memory_copy.wast:2637
+assert_return(() => call($15, "load8_u", [3183]), 0);
+
+// memory_copy.wast:2638
+assert_return(() => call($15, "load8_u", [3382]), 0);
+
+// memory_copy.wast:2639
+assert_return(() => call($15, "load8_u", [3581]), 0);
+
+// memory_copy.wast:2640
+assert_return(() => call($15, "load8_u", [3780]), 0);
+
+// memory_copy.wast:2641
+assert_return(() => call($15, "load8_u", [3979]), 0);
+
+// memory_copy.wast:2642
+assert_return(() => call($15, "load8_u", [4178]), 0);
+
+// memory_copy.wast:2643
+assert_return(() => call($15, "load8_u", [4377]), 0);
+
+// memory_copy.wast:2644
+assert_return(() => call($15, "load8_u", [4576]), 0);
+
+// memory_copy.wast:2645
+assert_return(() => call($15, "load8_u", [4775]), 0);
+
+// memory_copy.wast:2646
+assert_return(() => call($15, "load8_u", [4974]), 0);
+
+// memory_copy.wast:2647
+assert_return(() => call($15, "load8_u", [5173]), 0);
+
+// memory_copy.wast:2648
+assert_return(() => call($15, "load8_u", [5372]), 0);
+
+// memory_copy.wast:2649
+assert_return(() => call($15, "load8_u", [5571]), 0);
+
+// memory_copy.wast:2650
+assert_return(() => call($15, "load8_u", [5770]), 0);
+
+// memory_copy.wast:2651
+assert_return(() => call($15, "load8_u", [5969]), 0);
+
+// memory_copy.wast:2652
+assert_return(() => call($15, "load8_u", [6168]), 0);
+
+// memory_copy.wast:2653
+assert_return(() => call($15, "load8_u", [6367]), 0);
+
+// memory_copy.wast:2654
+assert_return(() => call($15, "load8_u", [6566]), 0);
+
+// memory_copy.wast:2655
+assert_return(() => call($15, "load8_u", [6765]), 0);
+
+// memory_copy.wast:2656
+assert_return(() => call($15, "load8_u", [6964]), 0);
+
+// memory_copy.wast:2657
+assert_return(() => call($15, "load8_u", [7163]), 0);
+
+// memory_copy.wast:2658
+assert_return(() => call($15, "load8_u", [7362]), 0);
+
+// memory_copy.wast:2659
+assert_return(() => call($15, "load8_u", [7561]), 0);
+
+// memory_copy.wast:2660
+assert_return(() => call($15, "load8_u", [7760]), 0);
+
+// memory_copy.wast:2661
+assert_return(() => call($15, "load8_u", [7959]), 0);
+
+// memory_copy.wast:2662
+assert_return(() => call($15, "load8_u", [8158]), 0);
+
+// memory_copy.wast:2663
+assert_return(() => call($15, "load8_u", [8357]), 0);
+
+// memory_copy.wast:2664
+assert_return(() => call($15, "load8_u", [8556]), 0);
+
+// memory_copy.wast:2665
+assert_return(() => call($15, "load8_u", [8755]), 0);
+
+// memory_copy.wast:2666
+assert_return(() => call($15, "load8_u", [8954]), 0);
+
+// memory_copy.wast:2667
+assert_return(() => call($15, "load8_u", [9153]), 0);
+
+// memory_copy.wast:2668
+assert_return(() => call($15, "load8_u", [9352]), 0);
+
+// memory_copy.wast:2669
+assert_return(() => call($15, "load8_u", [9551]), 0);
+
+// memory_copy.wast:2670
+assert_return(() => call($15, "load8_u", [9750]), 0);
+
+// memory_copy.wast:2671
+assert_return(() => call($15, "load8_u", [9949]), 0);
+
+// memory_copy.wast:2672
+assert_return(() => call($15, "load8_u", [10148]), 0);
+
+// memory_copy.wast:2673
+assert_return(() => call($15, "load8_u", [10347]), 0);
+
+// memory_copy.wast:2674
+assert_return(() => call($15, "load8_u", [10546]), 0);
+
+// memory_copy.wast:2675
+assert_return(() => call($15, "load8_u", [10745]), 0);
+
+// memory_copy.wast:2676
+assert_return(() => call($15, "load8_u", [10944]), 0);
+
+// memory_copy.wast:2677
+assert_return(() => call($15, "load8_u", [11143]), 0);
+
+// memory_copy.wast:2678
+assert_return(() => call($15, "load8_u", [11342]), 0);
+
+// memory_copy.wast:2679
+assert_return(() => call($15, "load8_u", [11541]), 0);
+
+// memory_copy.wast:2680
+assert_return(() => call($15, "load8_u", [11740]), 0);
+
+// memory_copy.wast:2681
+assert_return(() => call($15, "load8_u", [11939]), 0);
+
+// memory_copy.wast:2682
+assert_return(() => call($15, "load8_u", [12138]), 0);
+
+// memory_copy.wast:2683
+assert_return(() => call($15, "load8_u", [12337]), 0);
+
+// memory_copy.wast:2684
+assert_return(() => call($15, "load8_u", [12536]), 0);
+
+// memory_copy.wast:2685
+assert_return(() => call($15, "load8_u", [12735]), 0);
+
+// memory_copy.wast:2686
+assert_return(() => call($15, "load8_u", [12934]), 0);
+
+// memory_copy.wast:2687
+assert_return(() => call($15, "load8_u", [13133]), 0);
+
+// memory_copy.wast:2688
+assert_return(() => call($15, "load8_u", [13332]), 0);
+
+// memory_copy.wast:2689
+assert_return(() => call($15, "load8_u", [13531]), 0);
+
+// memory_copy.wast:2690
+assert_return(() => call($15, "load8_u", [13730]), 0);
+
+// memory_copy.wast:2691
+assert_return(() => call($15, "load8_u", [13929]), 0);
+
+// memory_copy.wast:2692
+assert_return(() => call($15, "load8_u", [14128]), 0);
+
+// memory_copy.wast:2693
+assert_return(() => call($15, "load8_u", [14327]), 0);
+
+// memory_copy.wast:2694
+assert_return(() => call($15, "load8_u", [14526]), 0);
+
+// memory_copy.wast:2695
+assert_return(() => call($15, "load8_u", [14725]), 0);
+
+// memory_copy.wast:2696
+assert_return(() => call($15, "load8_u", [14924]), 0);
+
+// memory_copy.wast:2697
+assert_return(() => call($15, "load8_u", [15123]), 0);
+
+// memory_copy.wast:2698
+assert_return(() => call($15, "load8_u", [15322]), 0);
+
+// memory_copy.wast:2699
+assert_return(() => call($15, "load8_u", [15521]), 0);
+
+// memory_copy.wast:2700
+assert_return(() => call($15, "load8_u", [15720]), 0);
+
+// memory_copy.wast:2701
+assert_return(() => call($15, "load8_u", [15919]), 0);
+
+// memory_copy.wast:2702
+assert_return(() => call($15, "load8_u", [16118]), 0);
+
+// memory_copy.wast:2703
+assert_return(() => call($15, "load8_u", [16317]), 0);
+
+// memory_copy.wast:2704
+assert_return(() => call($15, "load8_u", [16516]), 0);
+
+// memory_copy.wast:2705
+assert_return(() => call($15, "load8_u", [16715]), 0);
+
+// memory_copy.wast:2706
+assert_return(() => call($15, "load8_u", [16914]), 0);
+
+// memory_copy.wast:2707
+assert_return(() => call($15, "load8_u", [17113]), 0);
+
+// memory_copy.wast:2708
+assert_return(() => call($15, "load8_u", [17312]), 0);
+
+// memory_copy.wast:2709
+assert_return(() => call($15, "load8_u", [17511]), 0);
+
+// memory_copy.wast:2710
+assert_return(() => call($15, "load8_u", [17710]), 0);
+
+// memory_copy.wast:2711
+assert_return(() => call($15, "load8_u", [17909]), 0);
+
+// memory_copy.wast:2712
+assert_return(() => call($15, "load8_u", [18108]), 0);
+
+// memory_copy.wast:2713
+assert_return(() => call($15, "load8_u", [18307]), 0);
+
+// memory_copy.wast:2714
+assert_return(() => call($15, "load8_u", [18506]), 0);
+
+// memory_copy.wast:2715
+assert_return(() => call($15, "load8_u", [18705]), 0);
+
+// memory_copy.wast:2716
+assert_return(() => call($15, "load8_u", [18904]), 0);
+
+// memory_copy.wast:2717
+assert_return(() => call($15, "load8_u", [19103]), 0);
+
+// memory_copy.wast:2718
+assert_return(() => call($15, "load8_u", [19302]), 0);
+
+// memory_copy.wast:2719
+assert_return(() => call($15, "load8_u", [19501]), 0);
+
+// memory_copy.wast:2720
+assert_return(() => call($15, "load8_u", [19700]), 0);
+
+// memory_copy.wast:2721
+assert_return(() => call($15, "load8_u", [19899]), 0);
+
+// memory_copy.wast:2722
+assert_return(() => call($15, "load8_u", [20098]), 0);
+
+// memory_copy.wast:2723
+assert_return(() => call($15, "load8_u", [20297]), 0);
+
+// memory_copy.wast:2724
+assert_return(() => call($15, "load8_u", [20496]), 0);
+
+// memory_copy.wast:2725
+assert_return(() => call($15, "load8_u", [20695]), 0);
+
+// memory_copy.wast:2726
+assert_return(() => call($15, "load8_u", [20894]), 0);
+
+// memory_copy.wast:2727
+assert_return(() => call($15, "load8_u", [21093]), 0);
+
+// memory_copy.wast:2728
+assert_return(() => call($15, "load8_u", [21292]), 0);
+
+// memory_copy.wast:2729
+assert_return(() => call($15, "load8_u", [21491]), 0);
+
+// memory_copy.wast:2730
+assert_return(() => call($15, "load8_u", [21690]), 0);
+
+// memory_copy.wast:2731
+assert_return(() => call($15, "load8_u", [21889]), 0);
+
+// memory_copy.wast:2732
+assert_return(() => call($15, "load8_u", [22088]), 0);
+
+// memory_copy.wast:2733
+assert_return(() => call($15, "load8_u", [22287]), 0);
+
+// memory_copy.wast:2734
+assert_return(() => call($15, "load8_u", [22486]), 0);
+
+// memory_copy.wast:2735
+assert_return(() => call($15, "load8_u", [22685]), 0);
+
+// memory_copy.wast:2736
+assert_return(() => call($15, "load8_u", [22884]), 0);
+
+// memory_copy.wast:2737
+assert_return(() => call($15, "load8_u", [23083]), 0);
+
+// memory_copy.wast:2738
+assert_return(() => call($15, "load8_u", [23282]), 0);
+
+// memory_copy.wast:2739
+assert_return(() => call($15, "load8_u", [23481]), 0);
+
+// memory_copy.wast:2740
+assert_return(() => call($15, "load8_u", [23680]), 0);
+
+// memory_copy.wast:2741
+assert_return(() => call($15, "load8_u", [23879]), 0);
+
+// memory_copy.wast:2742
+assert_return(() => call($15, "load8_u", [24078]), 0);
+
+// memory_copy.wast:2743
+assert_return(() => call($15, "load8_u", [24277]), 0);
+
+// memory_copy.wast:2744
+assert_return(() => call($15, "load8_u", [24476]), 0);
+
+// memory_copy.wast:2745
+assert_return(() => call($15, "load8_u", [24675]), 0);
+
+// memory_copy.wast:2746
+assert_return(() => call($15, "load8_u", [24874]), 0);
+
+// memory_copy.wast:2747
+assert_return(() => call($15, "load8_u", [25073]), 0);
+
+// memory_copy.wast:2748
+assert_return(() => call($15, "load8_u", [25272]), 0);
+
+// memory_copy.wast:2749
+assert_return(() => call($15, "load8_u", [25471]), 0);
+
+// memory_copy.wast:2750
+assert_return(() => call($15, "load8_u", [25670]), 0);
+
+// memory_copy.wast:2751
+assert_return(() => call($15, "load8_u", [25869]), 0);
+
+// memory_copy.wast:2752
+assert_return(() => call($15, "load8_u", [26068]), 0);
+
+// memory_copy.wast:2753
+assert_return(() => call($15, "load8_u", [26267]), 0);
+
+// memory_copy.wast:2754
+assert_return(() => call($15, "load8_u", [26466]), 0);
+
+// memory_copy.wast:2755
+assert_return(() => call($15, "load8_u", [26665]), 0);
+
+// memory_copy.wast:2756
+assert_return(() => call($15, "load8_u", [26864]), 0);
+
+// memory_copy.wast:2757
+assert_return(() => call($15, "load8_u", [27063]), 0);
+
+// memory_copy.wast:2758
+assert_return(() => call($15, "load8_u", [27262]), 0);
+
+// memory_copy.wast:2759
+assert_return(() => call($15, "load8_u", [27461]), 0);
+
+// memory_copy.wast:2760
+assert_return(() => call($15, "load8_u", [27660]), 0);
+
+// memory_copy.wast:2761
+assert_return(() => call($15, "load8_u", [27859]), 0);
+
+// memory_copy.wast:2762
+assert_return(() => call($15, "load8_u", [28058]), 0);
+
+// memory_copy.wast:2763
+assert_return(() => call($15, "load8_u", [28257]), 0);
+
+// memory_copy.wast:2764
+assert_return(() => call($15, "load8_u", [28456]), 0);
+
+// memory_copy.wast:2765
+assert_return(() => call($15, "load8_u", [28655]), 0);
+
+// memory_copy.wast:2766
+assert_return(() => call($15, "load8_u", [28854]), 0);
+
+// memory_copy.wast:2767
+assert_return(() => call($15, "load8_u", [29053]), 0);
+
+// memory_copy.wast:2768
+assert_return(() => call($15, "load8_u", [29252]), 0);
+
+// memory_copy.wast:2769
+assert_return(() => call($15, "load8_u", [29451]), 0);
+
+// memory_copy.wast:2770
+assert_return(() => call($15, "load8_u", [29650]), 0);
+
+// memory_copy.wast:2771
+assert_return(() => call($15, "load8_u", [29849]), 0);
+
+// memory_copy.wast:2772
+assert_return(() => call($15, "load8_u", [30048]), 0);
+
+// memory_copy.wast:2773
+assert_return(() => call($15, "load8_u", [30247]), 0);
+
+// memory_copy.wast:2774
+assert_return(() => call($15, "load8_u", [30446]), 0);
+
+// memory_copy.wast:2775
+assert_return(() => call($15, "load8_u", [30645]), 0);
+
+// memory_copy.wast:2776
+assert_return(() => call($15, "load8_u", [30844]), 0);
+
+// memory_copy.wast:2777
+assert_return(() => call($15, "load8_u", [31043]), 0);
+
+// memory_copy.wast:2778
+assert_return(() => call($15, "load8_u", [31242]), 0);
+
+// memory_copy.wast:2779
+assert_return(() => call($15, "load8_u", [31441]), 0);
+
+// memory_copy.wast:2780
+assert_return(() => call($15, "load8_u", [31640]), 0);
+
+// memory_copy.wast:2781
+assert_return(() => call($15, "load8_u", [31839]), 0);
+
+// memory_copy.wast:2782
+assert_return(() => call($15, "load8_u", [32038]), 0);
+
+// memory_copy.wast:2783
+assert_return(() => call($15, "load8_u", [32237]), 0);
+
+// memory_copy.wast:2784
+assert_return(() => call($15, "load8_u", [32436]), 0);
+
+// memory_copy.wast:2785
+assert_return(() => call($15, "load8_u", [32635]), 0);
+
+// memory_copy.wast:2786
+assert_return(() => call($15, "load8_u", [32834]), 0);
+
+// memory_copy.wast:2787
+assert_return(() => call($15, "load8_u", [33033]), 0);
+
+// memory_copy.wast:2788
+assert_return(() => call($15, "load8_u", [33232]), 0);
+
+// memory_copy.wast:2789
+assert_return(() => call($15, "load8_u", [33431]), 0);
+
+// memory_copy.wast:2790
+assert_return(() => call($15, "load8_u", [33630]), 0);
+
+// memory_copy.wast:2791
+assert_return(() => call($15, "load8_u", [33829]), 0);
+
+// memory_copy.wast:2792
+assert_return(() => call($15, "load8_u", [34028]), 0);
+
+// memory_copy.wast:2793
+assert_return(() => call($15, "load8_u", [34227]), 0);
+
+// memory_copy.wast:2794
+assert_return(() => call($15, "load8_u", [34426]), 0);
+
+// memory_copy.wast:2795
+assert_return(() => call($15, "load8_u", [34625]), 0);
+
+// memory_copy.wast:2796
+assert_return(() => call($15, "load8_u", [34824]), 0);
+
+// memory_copy.wast:2797
+assert_return(() => call($15, "load8_u", [35023]), 0);
+
+// memory_copy.wast:2798
+assert_return(() => call($15, "load8_u", [35222]), 0);
+
+// memory_copy.wast:2799
+assert_return(() => call($15, "load8_u", [35421]), 0);
+
+// memory_copy.wast:2800
+assert_return(() => call($15, "load8_u", [35620]), 0);
+
+// memory_copy.wast:2801
+assert_return(() => call($15, "load8_u", [35819]), 0);
+
+// memory_copy.wast:2802
+assert_return(() => call($15, "load8_u", [36018]), 0);
+
+// memory_copy.wast:2803
+assert_return(() => call($15, "load8_u", [36217]), 0);
+
+// memory_copy.wast:2804
+assert_return(() => call($15, "load8_u", [36416]), 0);
+
+// memory_copy.wast:2805
+assert_return(() => call($15, "load8_u", [36615]), 0);
+
+// memory_copy.wast:2806
+assert_return(() => call($15, "load8_u", [36814]), 0);
+
+// memory_copy.wast:2807
+assert_return(() => call($15, "load8_u", [37013]), 0);
+
+// memory_copy.wast:2808
+assert_return(() => call($15, "load8_u", [37212]), 0);
+
+// memory_copy.wast:2809
+assert_return(() => call($15, "load8_u", [37411]), 0);
+
+// memory_copy.wast:2810
+assert_return(() => call($15, "load8_u", [37610]), 0);
+
+// memory_copy.wast:2811
+assert_return(() => call($15, "load8_u", [37809]), 0);
+
+// memory_copy.wast:2812
+assert_return(() => call($15, "load8_u", [38008]), 0);
+
+// memory_copy.wast:2813
+assert_return(() => call($15, "load8_u", [38207]), 0);
+
+// memory_copy.wast:2814
+assert_return(() => call($15, "load8_u", [38406]), 0);
+
+// memory_copy.wast:2815
+assert_return(() => call($15, "load8_u", [38605]), 0);
+
+// memory_copy.wast:2816
+assert_return(() => call($15, "load8_u", [38804]), 0);
+
+// memory_copy.wast:2817
+assert_return(() => call($15, "load8_u", [39003]), 0);
+
+// memory_copy.wast:2818
+assert_return(() => call($15, "load8_u", [39202]), 0);
+
+// memory_copy.wast:2819
+assert_return(() => call($15, "load8_u", [39401]), 0);
+
+// memory_copy.wast:2820
+assert_return(() => call($15, "load8_u", [39600]), 0);
+
+// memory_copy.wast:2821
+assert_return(() => call($15, "load8_u", [39799]), 0);
+
+// memory_copy.wast:2822
+assert_return(() => call($15, "load8_u", [39998]), 0);
+
+// memory_copy.wast:2823
+assert_return(() => call($15, "load8_u", [40197]), 0);
+
+// memory_copy.wast:2824
+assert_return(() => call($15, "load8_u", [40396]), 0);
+
+// memory_copy.wast:2825
+assert_return(() => call($15, "load8_u", [40595]), 0);
+
+// memory_copy.wast:2826
+assert_return(() => call($15, "load8_u", [40794]), 0);
+
+// memory_copy.wast:2827
+assert_return(() => call($15, "load8_u", [40993]), 0);
+
+// memory_copy.wast:2828
+assert_return(() => call($15, "load8_u", [41192]), 0);
+
+// memory_copy.wast:2829
+assert_return(() => call($15, "load8_u", [41391]), 0);
+
+// memory_copy.wast:2830
+assert_return(() => call($15, "load8_u", [41590]), 0);
+
+// memory_copy.wast:2831
+assert_return(() => call($15, "load8_u", [41789]), 0);
+
+// memory_copy.wast:2832
+assert_return(() => call($15, "load8_u", [41988]), 0);
+
+// memory_copy.wast:2833
+assert_return(() => call($15, "load8_u", [42187]), 0);
+
+// memory_copy.wast:2834
+assert_return(() => call($15, "load8_u", [42386]), 0);
+
+// memory_copy.wast:2835
+assert_return(() => call($15, "load8_u", [42585]), 0);
+
+// memory_copy.wast:2836
+assert_return(() => call($15, "load8_u", [42784]), 0);
+
+// memory_copy.wast:2837
+assert_return(() => call($15, "load8_u", [42983]), 0);
+
+// memory_copy.wast:2838
+assert_return(() => call($15, "load8_u", [43182]), 0);
+
+// memory_copy.wast:2839
+assert_return(() => call($15, "load8_u", [43381]), 0);
+
+// memory_copy.wast:2840
+assert_return(() => call($15, "load8_u", [43580]), 0);
+
+// memory_copy.wast:2841
+assert_return(() => call($15, "load8_u", [43779]), 0);
+
+// memory_copy.wast:2842
+assert_return(() => call($15, "load8_u", [43978]), 0);
+
+// memory_copy.wast:2843
+assert_return(() => call($15, "load8_u", [44177]), 0);
+
+// memory_copy.wast:2844
+assert_return(() => call($15, "load8_u", [44376]), 0);
+
+// memory_copy.wast:2845
+assert_return(() => call($15, "load8_u", [44575]), 0);
+
+// memory_copy.wast:2846
+assert_return(() => call($15, "load8_u", [44774]), 0);
+
+// memory_copy.wast:2847
+assert_return(() => call($15, "load8_u", [44973]), 0);
+
+// memory_copy.wast:2848
+assert_return(() => call($15, "load8_u", [45172]), 0);
+
+// memory_copy.wast:2849
+assert_return(() => call($15, "load8_u", [45371]), 0);
+
+// memory_copy.wast:2850
+assert_return(() => call($15, "load8_u", [45570]), 0);
+
+// memory_copy.wast:2851
+assert_return(() => call($15, "load8_u", [45769]), 0);
+
+// memory_copy.wast:2852
+assert_return(() => call($15, "load8_u", [45968]), 0);
+
+// memory_copy.wast:2853
+assert_return(() => call($15, "load8_u", [46167]), 0);
+
+// memory_copy.wast:2854
+assert_return(() => call($15, "load8_u", [46366]), 0);
+
+// memory_copy.wast:2855
+assert_return(() => call($15, "load8_u", [46565]), 0);
+
+// memory_copy.wast:2856
+assert_return(() => call($15, "load8_u", [46764]), 0);
+
+// memory_copy.wast:2857
+assert_return(() => call($15, "load8_u", [46963]), 0);
+
+// memory_copy.wast:2858
+assert_return(() => call($15, "load8_u", [47162]), 0);
+
+// memory_copy.wast:2859
+assert_return(() => call($15, "load8_u", [47361]), 0);
+
+// memory_copy.wast:2860
+assert_return(() => call($15, "load8_u", [47560]), 0);
+
+// memory_copy.wast:2861
+assert_return(() => call($15, "load8_u", [47759]), 0);
+
+// memory_copy.wast:2862
+assert_return(() => call($15, "load8_u", [47958]), 0);
+
+// memory_copy.wast:2863
+assert_return(() => call($15, "load8_u", [48157]), 0);
+
+// memory_copy.wast:2864
+assert_return(() => call($15, "load8_u", [48356]), 0);
+
+// memory_copy.wast:2865
+assert_return(() => call($15, "load8_u", [48555]), 0);
+
+// memory_copy.wast:2866
+assert_return(() => call($15, "load8_u", [48754]), 0);
+
+// memory_copy.wast:2867
+assert_return(() => call($15, "load8_u", [48953]), 0);
+
+// memory_copy.wast:2868
+assert_return(() => call($15, "load8_u", [49152]), 0);
+
+// memory_copy.wast:2869
+assert_return(() => call($15, "load8_u", [49351]), 0);
+
+// memory_copy.wast:2870
+assert_return(() => call($15, "load8_u", [49550]), 0);
+
+// memory_copy.wast:2871
+assert_return(() => call($15, "load8_u", [49749]), 0);
+
+// memory_copy.wast:2872
+assert_return(() => call($15, "load8_u", [49948]), 0);
+
+// memory_copy.wast:2873
+assert_return(() => call($15, "load8_u", [50147]), 0);
+
+// memory_copy.wast:2874
+assert_return(() => call($15, "load8_u", [50346]), 0);
+
+// memory_copy.wast:2875
+assert_return(() => call($15, "load8_u", [50545]), 0);
+
+// memory_copy.wast:2876
+assert_return(() => call($15, "load8_u", [50744]), 0);
+
+// memory_copy.wast:2877
+assert_return(() => call($15, "load8_u", [50943]), 0);
+
+// memory_copy.wast:2878
+assert_return(() => call($15, "load8_u", [51142]), 0);
+
+// memory_copy.wast:2879
+assert_return(() => call($15, "load8_u", [51341]), 0);
+
+// memory_copy.wast:2880
+assert_return(() => call($15, "load8_u", [51540]), 0);
+
+// memory_copy.wast:2881
+assert_return(() => call($15, "load8_u", [51739]), 0);
+
+// memory_copy.wast:2882
+assert_return(() => call($15, "load8_u", [51938]), 0);
+
+// memory_copy.wast:2883
+assert_return(() => call($15, "load8_u", [52137]), 0);
+
+// memory_copy.wast:2884
+assert_return(() => call($15, "load8_u", [52336]), 0);
+
+// memory_copy.wast:2885
+assert_return(() => call($15, "load8_u", [52535]), 0);
+
+// memory_copy.wast:2886
+assert_return(() => call($15, "load8_u", [52734]), 0);
+
+// memory_copy.wast:2887
+assert_return(() => call($15, "load8_u", [52933]), 0);
+
+// memory_copy.wast:2888
+assert_return(() => call($15, "load8_u", [53132]), 0);
+
+// memory_copy.wast:2889
+assert_return(() => call($15, "load8_u", [53331]), 0);
+
+// memory_copy.wast:2890
+assert_return(() => call($15, "load8_u", [53530]), 0);
+
+// memory_copy.wast:2891
+assert_return(() => call($15, "load8_u", [53729]), 0);
+
+// memory_copy.wast:2892
+assert_return(() => call($15, "load8_u", [53928]), 0);
+
+// memory_copy.wast:2893
+assert_return(() => call($15, "load8_u", [54127]), 0);
+
+// memory_copy.wast:2894
+assert_return(() => call($15, "load8_u", [54326]), 0);
+
+// memory_copy.wast:2895
+assert_return(() => call($15, "load8_u", [54525]), 0);
+
+// memory_copy.wast:2896
+assert_return(() => call($15, "load8_u", [54724]), 0);
+
+// memory_copy.wast:2897
+assert_return(() => call($15, "load8_u", [54923]), 0);
+
+// memory_copy.wast:2898
+assert_return(() => call($15, "load8_u", [55122]), 0);
+
+// memory_copy.wast:2899
+assert_return(() => call($15, "load8_u", [55321]), 0);
+
+// memory_copy.wast:2900
+assert_return(() => call($15, "load8_u", [55520]), 0);
+
+// memory_copy.wast:2901
+assert_return(() => call($15, "load8_u", [55719]), 0);
+
+// memory_copy.wast:2902
+assert_return(() => call($15, "load8_u", [55918]), 0);
+
+// memory_copy.wast:2903
+assert_return(() => call($15, "load8_u", [56117]), 0);
+
+// memory_copy.wast:2904
+assert_return(() => call($15, "load8_u", [56316]), 0);
+
+// memory_copy.wast:2905
+assert_return(() => call($15, "load8_u", [56515]), 0);
+
+// memory_copy.wast:2906
+assert_return(() => call($15, "load8_u", [56714]), 0);
+
+// memory_copy.wast:2907
+assert_return(() => call($15, "load8_u", [56913]), 0);
+
+// memory_copy.wast:2908
+assert_return(() => call($15, "load8_u", [57112]), 0);
+
+// memory_copy.wast:2909
+assert_return(() => call($15, "load8_u", [57311]), 0);
+
+// memory_copy.wast:2910
+assert_return(() => call($15, "load8_u", [57510]), 0);
+
+// memory_copy.wast:2911
+assert_return(() => call($15, "load8_u", [57709]), 0);
+
+// memory_copy.wast:2912
+assert_return(() => call($15, "load8_u", [57908]), 0);
+
+// memory_copy.wast:2913
+assert_return(() => call($15, "load8_u", [58107]), 0);
+
+// memory_copy.wast:2914
+assert_return(() => call($15, "load8_u", [58306]), 0);
+
+// memory_copy.wast:2915
+assert_return(() => call($15, "load8_u", [58505]), 0);
+
+// memory_copy.wast:2916
+assert_return(() => call($15, "load8_u", [58704]), 0);
+
+// memory_copy.wast:2917
+assert_return(() => call($15, "load8_u", [58903]), 0);
+
+// memory_copy.wast:2918
+assert_return(() => call($15, "load8_u", [59102]), 0);
+
+// memory_copy.wast:2919
+assert_return(() => call($15, "load8_u", [59301]), 0);
+
+// memory_copy.wast:2920
+assert_return(() => call($15, "load8_u", [59500]), 0);
+
+// memory_copy.wast:2921
+assert_return(() => call($15, "load8_u", [59699]), 0);
+
+// memory_copy.wast:2922
+assert_return(() => call($15, "load8_u", [59898]), 0);
+
+// memory_copy.wast:2923
+assert_return(() => call($15, "load8_u", [60097]), 0);
+
+// memory_copy.wast:2924
+assert_return(() => call($15, "load8_u", [60296]), 0);
+
+// memory_copy.wast:2925
+assert_return(() => call($15, "load8_u", [60495]), 0);
+
+// memory_copy.wast:2926
+assert_return(() => call($15, "load8_u", [60694]), 0);
+
+// memory_copy.wast:2927
+assert_return(() => call($15, "load8_u", [60893]), 0);
+
+// memory_copy.wast:2928
+assert_return(() => call($15, "load8_u", [61092]), 0);
+
+// memory_copy.wast:2929
+assert_return(() => call($15, "load8_u", [61291]), 0);
+
+// memory_copy.wast:2930
+assert_return(() => call($15, "load8_u", [61490]), 0);
+
+// memory_copy.wast:2931
+assert_return(() => call($15, "load8_u", [61689]), 0);
+
+// memory_copy.wast:2932
+assert_return(() => call($15, "load8_u", [61888]), 0);
+
+// memory_copy.wast:2933
+assert_return(() => call($15, "load8_u", [62087]), 0);
+
+// memory_copy.wast:2934
+assert_return(() => call($15, "load8_u", [62286]), 0);
+
+// memory_copy.wast:2935
+assert_return(() => call($15, "load8_u", [62485]), 0);
+
+// memory_copy.wast:2936
+assert_return(() => call($15, "load8_u", [62684]), 0);
+
+// memory_copy.wast:2937
+assert_return(() => call($15, "load8_u", [62883]), 0);
+
+// memory_copy.wast:2938
+assert_return(() => call($15, "load8_u", [63082]), 0);
+
+// memory_copy.wast:2939
+assert_return(() => call($15, "load8_u", [63281]), 0);
+
+// memory_copy.wast:2940
+assert_return(() => call($15, "load8_u", [63480]), 0);
+
+// memory_copy.wast:2941
+assert_return(() => call($15, "load8_u", [63679]), 0);
+
+// memory_copy.wast:2942
+assert_return(() => call($15, "load8_u", [63878]), 0);
+
+// memory_copy.wast:2943
+assert_return(() => call($15, "load8_u", [64077]), 0);
+
+// memory_copy.wast:2944
+assert_return(() => call($15, "load8_u", [64276]), 0);
+
+// memory_copy.wast:2945
+assert_return(() => call($15, "load8_u", [64475]), 0);
+
+// memory_copy.wast:2946
+assert_return(() => call($15, "load8_u", [64674]), 0);
+
+// memory_copy.wast:2947
+assert_return(() => call($15, "load8_u", [64873]), 0);
+
+// memory_copy.wast:2948
+assert_return(() => call($15, "load8_u", [65072]), 0);
+
+// memory_copy.wast:2949
+assert_return(() => call($15, "load8_u", [65271]), 0);
+
+// memory_copy.wast:2950
+assert_return(() => call($15, "load8_u", [65470]), 0);
+
+// memory_copy.wast:2951
+assert_return(() => call($15, "load8_u", [65506]), 0);
+
+// memory_copy.wast:2952
+assert_return(() => call($15, "load8_u", [65507]), 1);
+
+// memory_copy.wast:2953
+assert_return(() => call($15, "load8_u", [65508]), 2);
+
+// memory_copy.wast:2954
+assert_return(() => call($15, "load8_u", [65509]), 3);
+
+// memory_copy.wast:2955
+assert_return(() => call($15, "load8_u", [65510]), 4);
+
+// memory_copy.wast:2956
+assert_return(() => call($15, "load8_u", [65511]), 5);
+
+// memory_copy.wast:2957
+assert_return(() => call($15, "load8_u", [65512]), 6);
+
+// memory_copy.wast:2958
+assert_return(() => call($15, "load8_u", [65513]), 7);
+
+// memory_copy.wast:2959
+assert_return(() => call($15, "load8_u", [65514]), 8);
+
+// memory_copy.wast:2960
+assert_return(() => call($15, "load8_u", [65515]), 9);
+
+// memory_copy.wast:2961
+assert_return(() => call($15, "load8_u", [65516]), 10);
+
+// memory_copy.wast:2962
+assert_return(() => call($15, "load8_u", [65517]), 11);
+
+// memory_copy.wast:2963
+assert_return(() => call($15, "load8_u", [65518]), 12);
+
+// memory_copy.wast:2964
+assert_return(() => call($15, "load8_u", [65519]), 13);
+
+// memory_copy.wast:2965
+assert_return(() => call($15, "load8_u", [65520]), 14);
+
+// memory_copy.wast:2966
+assert_return(() => call($15, "load8_u", [65521]), 15);
+
+// memory_copy.wast:2967
+assert_return(() => call($15, "load8_u", [65522]), 16);
+
+// memory_copy.wast:2968
+assert_return(() => call($15, "load8_u", [65523]), 17);
+
+// memory_copy.wast:2969
+assert_return(() => call($15, "load8_u", [65524]), 18);
+
+// memory_copy.wast:2970
+assert_return(() => call($15, "load8_u", [65525]), 19);
+
+// memory_copy.wast:2972
+let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
+
+// memory_copy.wast:2980
+assert_trap(() => call($16, "run", [65506, 65516, 40]));
+
+// memory_copy.wast:2983
+assert_return(() => call($16, "load8_u", [198]), 0);
+
+// memory_copy.wast:2984
+assert_return(() => call($16, "load8_u", [397]), 0);
+
+// memory_copy.wast:2985
+assert_return(() => call($16, "load8_u", [596]), 0);
+
+// memory_copy.wast:2986
+assert_return(() => call($16, "load8_u", [795]), 0);
+
+// memory_copy.wast:2987
+assert_return(() => call($16, "load8_u", [994]), 0);
+
+// memory_copy.wast:2988
+assert_return(() => call($16, "load8_u", [1193]), 0);
+
+// memory_copy.wast:2989
+assert_return(() => call($16, "load8_u", [1392]), 0);
+
+// memory_copy.wast:2990
+assert_return(() => call($16, "load8_u", [1591]), 0);
+
+// memory_copy.wast:2991
+assert_return(() => call($16, "load8_u", [1790]), 0);
+
+// memory_copy.wast:2992
+assert_return(() => call($16, "load8_u", [1989]), 0);
+
+// memory_copy.wast:2993
+assert_return(() => call($16, "load8_u", [2188]), 0);
+
+// memory_copy.wast:2994
+assert_return(() => call($16, "load8_u", [2387]), 0);
+
+// memory_copy.wast:2995
+assert_return(() => call($16, "load8_u", [2586]), 0);
+
+// memory_copy.wast:2996
+assert_return(() => call($16, "load8_u", [2785]), 0);
+
+// memory_copy.wast:2997
+assert_return(() => call($16, "load8_u", [2984]), 0);
+
+// memory_copy.wast:2998
+assert_return(() => call($16, "load8_u", [3183]), 0);
+
+// memory_copy.wast:2999
+assert_return(() => call($16, "load8_u", [3382]), 0);
+
+// memory_copy.wast:3000
+assert_return(() => call($16, "load8_u", [3581]), 0);
+
+// memory_copy.wast:3001
+assert_return(() => call($16, "load8_u", [3780]), 0);
+
+// memory_copy.wast:3002
+assert_return(() => call($16, "load8_u", [3979]), 0);
+
+// memory_copy.wast:3003
+assert_return(() => call($16, "load8_u", [4178]), 0);
+
+// memory_copy.wast:3004
+assert_return(() => call($16, "load8_u", [4377]), 0);
+
+// memory_copy.wast:3005
+assert_return(() => call($16, "load8_u", [4576]), 0);
+
+// memory_copy.wast:3006
+assert_return(() => call($16, "load8_u", [4775]), 0);
+
+// memory_copy.wast:3007
+assert_return(() => call($16, "load8_u", [4974]), 0);
+
+// memory_copy.wast:3008
+assert_return(() => call($16, "load8_u", [5173]), 0);
+
+// memory_copy.wast:3009
+assert_return(() => call($16, "load8_u", [5372]), 0);
+
+// memory_copy.wast:3010
+assert_return(() => call($16, "load8_u", [5571]), 0);
+
+// memory_copy.wast:3011
+assert_return(() => call($16, "load8_u", [5770]), 0);
+
+// memory_copy.wast:3012
+assert_return(() => call($16, "load8_u", [5969]), 0);
+
+// memory_copy.wast:3013
+assert_return(() => call($16, "load8_u", [6168]), 0);
+
+// memory_copy.wast:3014
+assert_return(() => call($16, "load8_u", [6367]), 0);
+
+// memory_copy.wast:3015
+assert_return(() => call($16, "load8_u", [6566]), 0);
+
+// memory_copy.wast:3016
+assert_return(() => call($16, "load8_u", [6765]), 0);
+
+// memory_copy.wast:3017
+assert_return(() => call($16, "load8_u", [6964]), 0);
+
+// memory_copy.wast:3018
+assert_return(() => call($16, "load8_u", [7163]), 0);
+
+// memory_copy.wast:3019
+assert_return(() => call($16, "load8_u", [7362]), 0);
+
+// memory_copy.wast:3020
+assert_return(() => call($16, "load8_u", [7561]), 0);
+
+// memory_copy.wast:3021
+assert_return(() => call($16, "load8_u", [7760]), 0);
+
+// memory_copy.wast:3022
+assert_return(() => call($16, "load8_u", [7959]), 0);
+
+// memory_copy.wast:3023
+assert_return(() => call($16, "load8_u", [8158]), 0);
+
+// memory_copy.wast:3024
+assert_return(() => call($16, "load8_u", [8357]), 0);
+
+// memory_copy.wast:3025
+assert_return(() => call($16, "load8_u", [8556]), 0);
+
+// memory_copy.wast:3026
+assert_return(() => call($16, "load8_u", [8755]), 0);
+
+// memory_copy.wast:3027
+assert_return(() => call($16, "load8_u", [8954]), 0);
+
+// memory_copy.wast:3028
+assert_return(() => call($16, "load8_u", [9153]), 0);
+
+// memory_copy.wast:3029
+assert_return(() => call($16, "load8_u", [9352]), 0);
+
+// memory_copy.wast:3030
+assert_return(() => call($16, "load8_u", [9551]), 0);
+
+// memory_copy.wast:3031
+assert_return(() => call($16, "load8_u", [9750]), 0);
+
+// memory_copy.wast:3032
+assert_return(() => call($16, "load8_u", [9949]), 0);
+
+// memory_copy.wast:3033
+assert_return(() => call($16, "load8_u", [10148]), 0);
+
+// memory_copy.wast:3034
+assert_return(() => call($16, "load8_u", [10347]), 0);
+
+// memory_copy.wast:3035
+assert_return(() => call($16, "load8_u", [10546]), 0);
+
+// memory_copy.wast:3036
+assert_return(() => call($16, "load8_u", [10745]), 0);
+
+// memory_copy.wast:3037
+assert_return(() => call($16, "load8_u", [10944]), 0);
+
+// memory_copy.wast:3038
+assert_return(() => call($16, "load8_u", [11143]), 0);
+
+// memory_copy.wast:3039
+assert_return(() => call($16, "load8_u", [11342]), 0);
+
+// memory_copy.wast:3040
+assert_return(() => call($16, "load8_u", [11541]), 0);
+
+// memory_copy.wast:3041
+assert_return(() => call($16, "load8_u", [11740]), 0);
+
+// memory_copy.wast:3042
+assert_return(() => call($16, "load8_u", [11939]), 0);
+
+// memory_copy.wast:3043
+assert_return(() => call($16, "load8_u", [12138]), 0);
+
+// memory_copy.wast:3044
+assert_return(() => call($16, "load8_u", [12337]), 0);
+
+// memory_copy.wast:3045
+assert_return(() => call($16, "load8_u", [12536]), 0);
+
+// memory_copy.wast:3046
+assert_return(() => call($16, "load8_u", [12735]), 0);
+
+// memory_copy.wast:3047
+assert_return(() => call($16, "load8_u", [12934]), 0);
+
+// memory_copy.wast:3048
+assert_return(() => call($16, "load8_u", [13133]), 0);
+
+// memory_copy.wast:3049
+assert_return(() => call($16, "load8_u", [13332]), 0);
+
+// memory_copy.wast:3050
+assert_return(() => call($16, "load8_u", [13531]), 0);
+
+// memory_copy.wast:3051
+assert_return(() => call($16, "load8_u", [13730]), 0);
+
+// memory_copy.wast:3052
+assert_return(() => call($16, "load8_u", [13929]), 0);
+
+// memory_copy.wast:3053
+assert_return(() => call($16, "load8_u", [14128]), 0);
+
+// memory_copy.wast:3054
+assert_return(() => call($16, "load8_u", [14327]), 0);
+
+// memory_copy.wast:3055
+assert_return(() => call($16, "load8_u", [14526]), 0);
+
+// memory_copy.wast:3056
+assert_return(() => call($16, "load8_u", [14725]), 0);
+
+// memory_copy.wast:3057
+assert_return(() => call($16, "load8_u", [14924]), 0);
+
+// memory_copy.wast:3058
+assert_return(() => call($16, "load8_u", [15123]), 0);
+
+// memory_copy.wast:3059
+assert_return(() => call($16, "load8_u", [15322]), 0);
+
+// memory_copy.wast:3060
+assert_return(() => call($16, "load8_u", [15521]), 0);
+
+// memory_copy.wast:3061
+assert_return(() => call($16, "load8_u", [15720]), 0);
+
+// memory_copy.wast:3062
+assert_return(() => call($16, "load8_u", [15919]), 0);
+
+// memory_copy.wast:3063
+assert_return(() => call($16, "load8_u", [16118]), 0);
+
+// memory_copy.wast:3064
+assert_return(() => call($16, "load8_u", [16317]), 0);
+
+// memory_copy.wast:3065
+assert_return(() => call($16, "load8_u", [16516]), 0);
+
+// memory_copy.wast:3066
+assert_return(() => call($16, "load8_u", [16715]), 0);
+
+// memory_copy.wast:3067
+assert_return(() => call($16, "load8_u", [16914]), 0);
+
+// memory_copy.wast:3068
+assert_return(() => call($16, "load8_u", [17113]), 0);
+
+// memory_copy.wast:3069
+assert_return(() => call($16, "load8_u", [17312]), 0);
+
+// memory_copy.wast:3070
+assert_return(() => call($16, "load8_u", [17511]), 0);
+
+// memory_copy.wast:3071
+assert_return(() => call($16, "load8_u", [17710]), 0);
+
+// memory_copy.wast:3072
+assert_return(() => call($16, "load8_u", [17909]), 0);
+
+// memory_copy.wast:3073
+assert_return(() => call($16, "load8_u", [18108]), 0);
+
+// memory_copy.wast:3074
+assert_return(() => call($16, "load8_u", [18307]), 0);
+
+// memory_copy.wast:3075
+assert_return(() => call($16, "load8_u", [18506]), 0);
+
+// memory_copy.wast:3076
+assert_return(() => call($16, "load8_u", [18705]), 0);
+
+// memory_copy.wast:3077
+assert_return(() => call($16, "load8_u", [18904]), 0);
+
+// memory_copy.wast:3078
+assert_return(() => call($16, "load8_u", [19103]), 0);
+
+// memory_copy.wast:3079
+assert_return(() => call($16, "load8_u", [19302]), 0);
+
+// memory_copy.wast:3080
+assert_return(() => call($16, "load8_u", [19501]), 0);
+
+// memory_copy.wast:3081
+assert_return(() => call($16, "load8_u", [19700]), 0);
+
+// memory_copy.wast:3082
+assert_return(() => call($16, "load8_u", [19899]), 0);
+
+// memory_copy.wast:3083
+assert_return(() => call($16, "load8_u", [20098]), 0);
+
+// memory_copy.wast:3084
+assert_return(() => call($16, "load8_u", [20297]), 0);
+
+// memory_copy.wast:3085
+assert_return(() => call($16, "load8_u", [20496]), 0);
+
+// memory_copy.wast:3086
+assert_return(() => call($16, "load8_u", [20695]), 0);
+
+// memory_copy.wast:3087
+assert_return(() => call($16, "load8_u", [20894]), 0);
+
+// memory_copy.wast:3088
+assert_return(() => call($16, "load8_u", [21093]), 0);
+
+// memory_copy.wast:3089
+assert_return(() => call($16, "load8_u", [21292]), 0);
+
+// memory_copy.wast:3090
+assert_return(() => call($16, "load8_u", [21491]), 0);
+
+// memory_copy.wast:3091
+assert_return(() => call($16, "load8_u", [21690]), 0);
+
+// memory_copy.wast:3092
+assert_return(() => call($16, "load8_u", [21889]), 0);
+
+// memory_copy.wast:3093
+assert_return(() => call($16, "load8_u", [22088]), 0);
+
+// memory_copy.wast:3094
+assert_return(() => call($16, "load8_u", [22287]), 0);
+
+// memory_copy.wast:3095
+assert_return(() => call($16, "load8_u", [22486]), 0);
+
+// memory_copy.wast:3096
+assert_return(() => call($16, "load8_u", [22685]), 0);
+
+// memory_copy.wast:3097
+assert_return(() => call($16, "load8_u", [22884]), 0);
+
+// memory_copy.wast:3098
+assert_return(() => call($16, "load8_u", [23083]), 0);
+
+// memory_copy.wast:3099
+assert_return(() => call($16, "load8_u", [23282]), 0);
+
+// memory_copy.wast:3100
+assert_return(() => call($16, "load8_u", [23481]), 0);
+
+// memory_copy.wast:3101
+assert_return(() => call($16, "load8_u", [23680]), 0);
+
+// memory_copy.wast:3102
+assert_return(() => call($16, "load8_u", [23879]), 0);
+
+// memory_copy.wast:3103
+assert_return(() => call($16, "load8_u", [24078]), 0);
+
+// memory_copy.wast:3104
+assert_return(() => call($16, "load8_u", [24277]), 0);
+
+// memory_copy.wast:3105
+assert_return(() => call($16, "load8_u", [24476]), 0);
+
+// memory_copy.wast:3106
+assert_return(() => call($16, "load8_u", [24675]), 0);
+
+// memory_copy.wast:3107
+assert_return(() => call($16, "load8_u", [24874]), 0);
+
+// memory_copy.wast:3108
+assert_return(() => call($16, "load8_u", [25073]), 0);
+
+// memory_copy.wast:3109
+assert_return(() => call($16, "load8_u", [25272]), 0);
+
+// memory_copy.wast:3110
+assert_return(() => call($16, "load8_u", [25471]), 0);
+
+// memory_copy.wast:3111
+assert_return(() => call($16, "load8_u", [25670]), 0);
+
+// memory_copy.wast:3112
+assert_return(() => call($16, "load8_u", [25869]), 0);
+
+// memory_copy.wast:3113
+assert_return(() => call($16, "load8_u", [26068]), 0);
+
+// memory_copy.wast:3114
+assert_return(() => call($16, "load8_u", [26267]), 0);
+
+// memory_copy.wast:3115
+assert_return(() => call($16, "load8_u", [26466]), 0);
+
+// memory_copy.wast:3116
+assert_return(() => call($16, "load8_u", [26665]), 0);
+
+// memory_copy.wast:3117
+assert_return(() => call($16, "load8_u", [26864]), 0);
+
+// memory_copy.wast:3118
+assert_return(() => call($16, "load8_u", [27063]), 0);
+
+// memory_copy.wast:3119
+assert_return(() => call($16, "load8_u", [27262]), 0);
+
+// memory_copy.wast:3120
+assert_return(() => call($16, "load8_u", [27461]), 0);
+
+// memory_copy.wast:3121
+assert_return(() => call($16, "load8_u", [27660]), 0);
+
+// memory_copy.wast:3122
+assert_return(() => call($16, "load8_u", [27859]), 0);
+
+// memory_copy.wast:3123
+assert_return(() => call($16, "load8_u", [28058]), 0);
+
+// memory_copy.wast:3124
+assert_return(() => call($16, "load8_u", [28257]), 0);
+
+// memory_copy.wast:3125
+assert_return(() => call($16, "load8_u", [28456]), 0);
+
+// memory_copy.wast:3126
+assert_return(() => call($16, "load8_u", [28655]), 0);
+
+// memory_copy.wast:3127
+assert_return(() => call($16, "load8_u", [28854]), 0);
+
+// memory_copy.wast:3128
+assert_return(() => call($16, "load8_u", [29053]), 0);
+
+// memory_copy.wast:3129
+assert_return(() => call($16, "load8_u", [29252]), 0);
+
+// memory_copy.wast:3130
+assert_return(() => call($16, "load8_u", [29451]), 0);
+
+// memory_copy.wast:3131
+assert_return(() => call($16, "load8_u", [29650]), 0);
+
+// memory_copy.wast:3132
+assert_return(() => call($16, "load8_u", [29849]), 0);
+
+// memory_copy.wast:3133
+assert_return(() => call($16, "load8_u", [30048]), 0);
+
+// memory_copy.wast:3134
+assert_return(() => call($16, "load8_u", [30247]), 0);
+
+// memory_copy.wast:3135
+assert_return(() => call($16, "load8_u", [30446]), 0);
+
+// memory_copy.wast:3136
+assert_return(() => call($16, "load8_u", [30645]), 0);
+
+// memory_copy.wast:3137
+assert_return(() => call($16, "load8_u", [30844]), 0);
+
+// memory_copy.wast:3138
+assert_return(() => call($16, "load8_u", [31043]), 0);
+
+// memory_copy.wast:3139
+assert_return(() => call($16, "load8_u", [31242]), 0);
+
+// memory_copy.wast:3140
+assert_return(() => call($16, "load8_u", [31441]), 0);
+
+// memory_copy.wast:3141
+assert_return(() => call($16, "load8_u", [31640]), 0);
+
+// memory_copy.wast:3142
+assert_return(() => call($16, "load8_u", [31839]), 0);
+
+// memory_copy.wast:3143
+assert_return(() => call($16, "load8_u", [32038]), 0);
+
+// memory_copy.wast:3144
+assert_return(() => call($16, "load8_u", [32237]), 0);
+
+// memory_copy.wast:3145
+assert_return(() => call($16, "load8_u", [32436]), 0);
+
+// memory_copy.wast:3146
+assert_return(() => call($16, "load8_u", [32635]), 0);
+
+// memory_copy.wast:3147
+assert_return(() => call($16, "load8_u", [32834]), 0);
+
+// memory_copy.wast:3148
+assert_return(() => call($16, "load8_u", [33033]), 0);
+
+// memory_copy.wast:3149
+assert_return(() => call($16, "load8_u", [33232]), 0);
+
+// memory_copy.wast:3150
+assert_return(() => call($16, "load8_u", [33431]), 0);
+
+// memory_copy.wast:3151
+assert_return(() => call($16, "load8_u", [33630]), 0);
+
+// memory_copy.wast:3152
+assert_return(() => call($16, "load8_u", [33829]), 0);
+
+// memory_copy.wast:3153
+assert_return(() => call($16, "load8_u", [34028]), 0);
+
+// memory_copy.wast:3154
+assert_return(() => call($16, "load8_u", [34227]), 0);
+
+// memory_copy.wast:3155
+assert_return(() => call($16, "load8_u", [34426]), 0);
+
+// memory_copy.wast:3156
+assert_return(() => call($16, "load8_u", [34625]), 0);
+
+// memory_copy.wast:3157
+assert_return(() => call($16, "load8_u", [34824]), 0);
+
+// memory_copy.wast:3158
+assert_return(() => call($16, "load8_u", [35023]), 0);
+
+// memory_copy.wast:3159
+assert_return(() => call($16, "load8_u", [35222]), 0);
+
+// memory_copy.wast:3160
+assert_return(() => call($16, "load8_u", [35421]), 0);
+
+// memory_copy.wast:3161
+assert_return(() => call($16, "load8_u", [35620]), 0);
+
+// memory_copy.wast:3162
+assert_return(() => call($16, "load8_u", [35819]), 0);
+
+// memory_copy.wast:3163
+assert_return(() => call($16, "load8_u", [36018]), 0);
+
+// memory_copy.wast:3164
+assert_return(() => call($16, "load8_u", [36217]), 0);
+
+// memory_copy.wast:3165
+assert_return(() => call($16, "load8_u", [36416]), 0);
+
+// memory_copy.wast:3166
+assert_return(() => call($16, "load8_u", [36615]), 0);
+
+// memory_copy.wast:3167
+assert_return(() => call($16, "load8_u", [36814]), 0);
+
+// memory_copy.wast:3168
+assert_return(() => call($16, "load8_u", [37013]), 0);
+
+// memory_copy.wast:3169
+assert_return(() => call($16, "load8_u", [37212]), 0);
+
+// memory_copy.wast:3170
+assert_return(() => call($16, "load8_u", [37411]), 0);
+
+// memory_copy.wast:3171
+assert_return(() => call($16, "load8_u", [37610]), 0);
+
+// memory_copy.wast:3172
+assert_return(() => call($16, "load8_u", [37809]), 0);
+
+// memory_copy.wast:3173
+assert_return(() => call($16, "load8_u", [38008]), 0);
+
+// memory_copy.wast:3174
+assert_return(() => call($16, "load8_u", [38207]), 0);
+
+// memory_copy.wast:3175
+assert_return(() => call($16, "load8_u", [38406]), 0);
+
+// memory_copy.wast:3176
+assert_return(() => call($16, "load8_u", [38605]), 0);
+
+// memory_copy.wast:3177
+assert_return(() => call($16, "load8_u", [38804]), 0);
+
+// memory_copy.wast:3178
+assert_return(() => call($16, "load8_u", [39003]), 0);
+
+// memory_copy.wast:3179
+assert_return(() => call($16, "load8_u", [39202]), 0);
+
+// memory_copy.wast:3180
+assert_return(() => call($16, "load8_u", [39401]), 0);
+
+// memory_copy.wast:3181
+assert_return(() => call($16, "load8_u", [39600]), 0);
+
+// memory_copy.wast:3182
+assert_return(() => call($16, "load8_u", [39799]), 0);
+
+// memory_copy.wast:3183
+assert_return(() => call($16, "load8_u", [39998]), 0);
+
+// memory_copy.wast:3184
+assert_return(() => call($16, "load8_u", [40197]), 0);
+
+// memory_copy.wast:3185
+assert_return(() => call($16, "load8_u", [40396]), 0);
+
+// memory_copy.wast:3186
+assert_return(() => call($16, "load8_u", [40595]), 0);
+
+// memory_copy.wast:3187
+assert_return(() => call($16, "load8_u", [40794]), 0);
+
+// memory_copy.wast:3188
+assert_return(() => call($16, "load8_u", [40993]), 0);
+
+// memory_copy.wast:3189
+assert_return(() => call($16, "load8_u", [41192]), 0);
+
+// memory_copy.wast:3190
+assert_return(() => call($16, "load8_u", [41391]), 0);
+
+// memory_copy.wast:3191
+assert_return(() => call($16, "load8_u", [41590]), 0);
+
+// memory_copy.wast:3192
+assert_return(() => call($16, "load8_u", [41789]), 0);
+
+// memory_copy.wast:3193
+assert_return(() => call($16, "load8_u", [41988]), 0);
+
+// memory_copy.wast:3194
+assert_return(() => call($16, "load8_u", [42187]), 0);
+
+// memory_copy.wast:3195
+assert_return(() => call($16, "load8_u", [42386]), 0);
+
+// memory_copy.wast:3196
+assert_return(() => call($16, "load8_u", [42585]), 0);
+
+// memory_copy.wast:3197
+assert_return(() => call($16, "load8_u", [42784]), 0);
+
+// memory_copy.wast:3198
+assert_return(() => call($16, "load8_u", [42983]), 0);
+
+// memory_copy.wast:3199
+assert_return(() => call($16, "load8_u", [43182]), 0);
+
+// memory_copy.wast:3200
+assert_return(() => call($16, "load8_u", [43381]), 0);
+
+// memory_copy.wast:3201
+assert_return(() => call($16, "load8_u", [43580]), 0);
+
+// memory_copy.wast:3202
+assert_return(() => call($16, "load8_u", [43779]), 0);
+
+// memory_copy.wast:3203
+assert_return(() => call($16, "load8_u", [43978]), 0);
+
+// memory_copy.wast:3204
+assert_return(() => call($16, "load8_u", [44177]), 0);
+
+// memory_copy.wast:3205
+assert_return(() => call($16, "load8_u", [44376]), 0);
+
+// memory_copy.wast:3206
+assert_return(() => call($16, "load8_u", [44575]), 0);
+
+// memory_copy.wast:3207
+assert_return(() => call($16, "load8_u", [44774]), 0);
+
+// memory_copy.wast:3208
+assert_return(() => call($16, "load8_u", [44973]), 0);
+
+// memory_copy.wast:3209
+assert_return(() => call($16, "load8_u", [45172]), 0);
+
+// memory_copy.wast:3210
+assert_return(() => call($16, "load8_u", [45371]), 0);
+
+// memory_copy.wast:3211
+assert_return(() => call($16, "load8_u", [45570]), 0);
+
+// memory_copy.wast:3212
+assert_return(() => call($16, "load8_u", [45769]), 0);
+
+// memory_copy.wast:3213
+assert_return(() => call($16, "load8_u", [45968]), 0);
+
+// memory_copy.wast:3214
+assert_return(() => call($16, "load8_u", [46167]), 0);
+
+// memory_copy.wast:3215
+assert_return(() => call($16, "load8_u", [46366]), 0);
+
+// memory_copy.wast:3216
+assert_return(() => call($16, "load8_u", [46565]), 0);
+
+// memory_copy.wast:3217
+assert_return(() => call($16, "load8_u", [46764]), 0);
+
+// memory_copy.wast:3218
+assert_return(() => call($16, "load8_u", [46963]), 0);
+
+// memory_copy.wast:3219
+assert_return(() => call($16, "load8_u", [47162]), 0);
+
+// memory_copy.wast:3220
+assert_return(() => call($16, "load8_u", [47361]), 0);
+
+// memory_copy.wast:3221
+assert_return(() => call($16, "load8_u", [47560]), 0);
+
+// memory_copy.wast:3222
+assert_return(() => call($16, "load8_u", [47759]), 0);
+
+// memory_copy.wast:3223
+assert_return(() => call($16, "load8_u", [47958]), 0);
+
+// memory_copy.wast:3224
+assert_return(() => call($16, "load8_u", [48157]), 0);
+
+// memory_copy.wast:3225
+assert_return(() => call($16, "load8_u", [48356]), 0);
+
+// memory_copy.wast:3226
+assert_return(() => call($16, "load8_u", [48555]), 0);
+
+// memory_copy.wast:3227
+assert_return(() => call($16, "load8_u", [48754]), 0);
+
+// memory_copy.wast:3228
+assert_return(() => call($16, "load8_u", [48953]), 0);
+
+// memory_copy.wast:3229
+assert_return(() => call($16, "load8_u", [49152]), 0);
+
+// memory_copy.wast:3230
+assert_return(() => call($16, "load8_u", [49351]), 0);
+
+// memory_copy.wast:3231
+assert_return(() => call($16, "load8_u", [49550]), 0);
+
+// memory_copy.wast:3232
+assert_return(() => call($16, "load8_u", [49749]), 0);
+
+// memory_copy.wast:3233
+assert_return(() => call($16, "load8_u", [49948]), 0);
+
+// memory_copy.wast:3234
+assert_return(() => call($16, "load8_u", [50147]), 0);
+
+// memory_copy.wast:3235
+assert_return(() => call($16, "load8_u", [50346]), 0);
+
+// memory_copy.wast:3236
+assert_return(() => call($16, "load8_u", [50545]), 0);
+
+// memory_copy.wast:3237
+assert_return(() => call($16, "load8_u", [50744]), 0);
+
+// memory_copy.wast:3238
+assert_return(() => call($16, "load8_u", [50943]), 0);
+
+// memory_copy.wast:3239
+assert_return(() => call($16, "load8_u", [51142]), 0);
+
+// memory_copy.wast:3240
+assert_return(() => call($16, "load8_u", [51341]), 0);
+
+// memory_copy.wast:3241
+assert_return(() => call($16, "load8_u", [51540]), 0);
+
+// memory_copy.wast:3242
+assert_return(() => call($16, "load8_u", [51739]), 0);
+
+// memory_copy.wast:3243
+assert_return(() => call($16, "load8_u", [51938]), 0);
+
+// memory_copy.wast:3244
+assert_return(() => call($16, "load8_u", [52137]), 0);
+
+// memory_copy.wast:3245
+assert_return(() => call($16, "load8_u", [52336]), 0);
+
+// memory_copy.wast:3246
+assert_return(() => call($16, "load8_u", [52535]), 0);
+
+// memory_copy.wast:3247
+assert_return(() => call($16, "load8_u", [52734]), 0);
+
+// memory_copy.wast:3248
+assert_return(() => call($16, "load8_u", [52933]), 0);
+
+// memory_copy.wast:3249
+assert_return(() => call($16, "load8_u", [53132]), 0);
+
+// memory_copy.wast:3250
+assert_return(() => call($16, "load8_u", [53331]), 0);
+
+// memory_copy.wast:3251
+assert_return(() => call($16, "load8_u", [53530]), 0);
+
+// memory_copy.wast:3252
+assert_return(() => call($16, "load8_u", [53729]), 0);
+
+// memory_copy.wast:3253
+assert_return(() => call($16, "load8_u", [53928]), 0);
+
+// memory_copy.wast:3254
+assert_return(() => call($16, "load8_u", [54127]), 0);
+
+// memory_copy.wast:3255
+assert_return(() => call($16, "load8_u", [54326]), 0);
+
+// memory_copy.wast:3256
+assert_return(() => call($16, "load8_u", [54525]), 0);
+
+// memory_copy.wast:3257
+assert_return(() => call($16, "load8_u", [54724]), 0);
+
+// memory_copy.wast:3258
+assert_return(() => call($16, "load8_u", [54923]), 0);
+
+// memory_copy.wast:3259
+assert_return(() => call($16, "load8_u", [55122]), 0);
+
+// memory_copy.wast:3260
+assert_return(() => call($16, "load8_u", [55321]), 0);
+
+// memory_copy.wast:3261
+assert_return(() => call($16, "load8_u", [55520]), 0);
+
+// memory_copy.wast:3262
+assert_return(() => call($16, "load8_u", [55719]), 0);
+
+// memory_copy.wast:3263
+assert_return(() => call($16, "load8_u", [55918]), 0);
+
+// memory_copy.wast:3264
+assert_return(() => call($16, "load8_u", [56117]), 0);
+
+// memory_copy.wast:3265
+assert_return(() => call($16, "load8_u", [56316]), 0);
+
+// memory_copy.wast:3266
+assert_return(() => call($16, "load8_u", [56515]), 0);
+
+// memory_copy.wast:3267
+assert_return(() => call($16, "load8_u", [56714]), 0);
+
+// memory_copy.wast:3268
+assert_return(() => call($16, "load8_u", [56913]), 0);
+
+// memory_copy.wast:3269
+assert_return(() => call($16, "load8_u", [57112]), 0);
+
+// memory_copy.wast:3270
+assert_return(() => call($16, "load8_u", [57311]), 0);
+
+// memory_copy.wast:3271
+assert_return(() => call($16, "load8_u", [57510]), 0);
+
+// memory_copy.wast:3272
+assert_return(() => call($16, "load8_u", [57709]), 0);
+
+// memory_copy.wast:3273
+assert_return(() => call($16, "load8_u", [57908]), 0);
+
+// memory_copy.wast:3274
+assert_return(() => call($16, "load8_u", [58107]), 0);
+
+// memory_copy.wast:3275
+assert_return(() => call($16, "load8_u", [58306]), 0);
+
+// memory_copy.wast:3276
+assert_return(() => call($16, "load8_u", [58505]), 0);
+
+// memory_copy.wast:3277
+assert_return(() => call($16, "load8_u", [58704]), 0);
+
+// memory_copy.wast:3278
+assert_return(() => call($16, "load8_u", [58903]), 0);
+
+// memory_copy.wast:3279
+assert_return(() => call($16, "load8_u", [59102]), 0);
+
+// memory_copy.wast:3280
+assert_return(() => call($16, "load8_u", [59301]), 0);
+
+// memory_copy.wast:3281
+assert_return(() => call($16, "load8_u", [59500]), 0);
+
+// memory_copy.wast:3282
+assert_return(() => call($16, "load8_u", [59699]), 0);
+
+// memory_copy.wast:3283
+assert_return(() => call($16, "load8_u", [59898]), 0);
+
+// memory_copy.wast:3284
+assert_return(() => call($16, "load8_u", [60097]), 0);
+
+// memory_copy.wast:3285
+assert_return(() => call($16, "load8_u", [60296]), 0);
+
+// memory_copy.wast:3286
+assert_return(() => call($16, "load8_u", [60495]), 0);
+
+// memory_copy.wast:3287
+assert_return(() => call($16, "load8_u", [60694]), 0);
+
+// memory_copy.wast:3288
+assert_return(() => call($16, "load8_u", [60893]), 0);
+
+// memory_copy.wast:3289
+assert_return(() => call($16, "load8_u", [61092]), 0);
+
+// memory_copy.wast:3290
+assert_return(() => call($16, "load8_u", [61291]), 0);
+
+// memory_copy.wast:3291
+assert_return(() => call($16, "load8_u", [61490]), 0);
+
+// memory_copy.wast:3292
+assert_return(() => call($16, "load8_u", [61689]), 0);
+
+// memory_copy.wast:3293
+assert_return(() => call($16, "load8_u", [61888]), 0);
+
+// memory_copy.wast:3294
+assert_return(() => call($16, "load8_u", [62087]), 0);
+
+// memory_copy.wast:3295
+assert_return(() => call($16, "load8_u", [62286]), 0);
+
+// memory_copy.wast:3296
+assert_return(() => call($16, "load8_u", [62485]), 0);
+
+// memory_copy.wast:3297
+assert_return(() => call($16, "load8_u", [62684]), 0);
+
+// memory_copy.wast:3298
+assert_return(() => call($16, "load8_u", [62883]), 0);
+
+// memory_copy.wast:3299
+assert_return(() => call($16, "load8_u", [63082]), 0);
+
+// memory_copy.wast:3300
+assert_return(() => call($16, "load8_u", [63281]), 0);
+
+// memory_copy.wast:3301
+assert_return(() => call($16, "load8_u", [63480]), 0);
+
+// memory_copy.wast:3302
+assert_return(() => call($16, "load8_u", [63679]), 0);
+
+// memory_copy.wast:3303
+assert_return(() => call($16, "load8_u", [63878]), 0);
+
+// memory_copy.wast:3304
+assert_return(() => call($16, "load8_u", [64077]), 0);
+
+// memory_copy.wast:3305
+assert_return(() => call($16, "load8_u", [64276]), 0);
+
+// memory_copy.wast:3306
+assert_return(() => call($16, "load8_u", [64475]), 0);
+
+// memory_copy.wast:3307
+assert_return(() => call($16, "load8_u", [64674]), 0);
+
+// memory_copy.wast:3308
+assert_return(() => call($16, "load8_u", [64873]), 0);
+
+// memory_copy.wast:3309
+assert_return(() => call($16, "load8_u", [65072]), 0);
+
+// memory_copy.wast:3310
+assert_return(() => call($16, "load8_u", [65271]), 0);
+
+// memory_copy.wast:3311
+assert_return(() => call($16, "load8_u", [65470]), 0);
+
+// memory_copy.wast:3312
+assert_return(() => call($16, "load8_u", [65506]), 0);
+
+// memory_copy.wast:3313
+assert_return(() => call($16, "load8_u", [65507]), 1);
+
+// memory_copy.wast:3314
+assert_return(() => call($16, "load8_u", [65508]), 2);
+
+// memory_copy.wast:3315
+assert_return(() => call($16, "load8_u", [65509]), 3);
+
+// memory_copy.wast:3316
+assert_return(() => call($16, "load8_u", [65510]), 4);
+
+// memory_copy.wast:3317
+assert_return(() => call($16, "load8_u", [65511]), 5);
+
+// memory_copy.wast:3318
+assert_return(() => call($16, "load8_u", [65512]), 6);
+
+// memory_copy.wast:3319
+assert_return(() => call($16, "load8_u", [65513]), 7);
+
+// memory_copy.wast:3320
+assert_return(() => call($16, "load8_u", [65514]), 8);
+
+// memory_copy.wast:3321
+assert_return(() => call($16, "load8_u", [65515]), 9);
+
+// memory_copy.wast:3322
+assert_return(() => call($16, "load8_u", [65516]), 10);
+
+// memory_copy.wast:3323
+assert_return(() => call($16, "load8_u", [65517]), 11);
+
+// memory_copy.wast:3324
+assert_return(() => call($16, "load8_u", [65518]), 12);
+
+// memory_copy.wast:3325
+assert_return(() => call($16, "load8_u", [65519]), 13);
+
+// memory_copy.wast:3326
+assert_return(() => call($16, "load8_u", [65520]), 14);
+
+// memory_copy.wast:3327
+assert_return(() => call($16, "load8_u", [65521]), 15);
+
+// memory_copy.wast:3328
+assert_return(() => call($16, "load8_u", [65522]), 16);
+
+// memory_copy.wast:3329
+assert_return(() => call($16, "load8_u", [65523]), 17);
+
+// memory_copy.wast:3330
+assert_return(() => call($16, "load8_u", [65524]), 18);
+
+// memory_copy.wast:3331
+assert_return(() => call($16, "load8_u", [65525]), 19);
+
+// memory_copy.wast:3332
+assert_return(() => call($16, "load8_u", [65526]), 10);
+
+// memory_copy.wast:3333
+assert_return(() => call($16, "load8_u", [65527]), 11);
+
+// memory_copy.wast:3334
+assert_return(() => call($16, "load8_u", [65528]), 12);
+
+// memory_copy.wast:3335
+assert_return(() => call($16, "load8_u", [65529]), 13);
+
+// memory_copy.wast:3336
+assert_return(() => call($16, "load8_u", [65530]), 14);
+
+// memory_copy.wast:3337
+assert_return(() => call($16, "load8_u", [65531]), 15);
+
+// memory_copy.wast:3338
+assert_return(() => call($16, "load8_u", [65532]), 16);
+
+// memory_copy.wast:3339
+assert_return(() => call($16, "load8_u", [65533]), 17);
+
+// memory_copy.wast:3340
+assert_return(() => call($16, "load8_u", [65534]), 18);
+
+// memory_copy.wast:3341
+assert_return(() => call($16, "load8_u", [65535]), 19);
+
+// memory_copy.wast:3343
+let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
+
+// memory_copy.wast:3351
+assert_trap(() => call($17, "run", [65516, 65516, 40]));
+
+// memory_copy.wast:3354
+assert_return(() => call($17, "load8_u", [198]), 0);
+
+// memory_copy.wast:3355
+assert_return(() => call($17, "load8_u", [397]), 0);
+
+// memory_copy.wast:3356
+assert_return(() => call($17, "load8_u", [596]), 0);
+
+// memory_copy.wast:3357
+assert_return(() => call($17, "load8_u", [795]), 0);
+
+// memory_copy.wast:3358
+assert_return(() => call($17, "load8_u", [994]), 0);
+
+// memory_copy.wast:3359
+assert_return(() => call($17, "load8_u", [1193]), 0);
+
+// memory_copy.wast:3360
+assert_return(() => call($17, "load8_u", [1392]), 0);
+
+// memory_copy.wast:3361
+assert_return(() => call($17, "load8_u", [1591]), 0);
+
+// memory_copy.wast:3362
+assert_return(() => call($17, "load8_u", [1790]), 0);
+
+// memory_copy.wast:3363
+assert_return(() => call($17, "load8_u", [1989]), 0);
+
+// memory_copy.wast:3364
+assert_return(() => call($17, "load8_u", [2188]), 0);
+
+// memory_copy.wast:3365
+assert_return(() => call($17, "load8_u", [2387]), 0);
+
+// memory_copy.wast:3366
+assert_return(() => call($17, "load8_u", [2586]), 0);
+
+// memory_copy.wast:3367
+assert_return(() => call($17, "load8_u", [2785]), 0);
+
+// memory_copy.wast:3368
+assert_return(() => call($17, "load8_u", [2984]), 0);
+
+// memory_copy.wast:3369
+assert_return(() => call($17, "load8_u", [3183]), 0);
+
+// memory_copy.wast:3370
+assert_return(() => call($17, "load8_u", [3382]), 0);
+
+// memory_copy.wast:3371
+assert_return(() => call($17, "load8_u", [3581]), 0);
+
+// memory_copy.wast:3372
+assert_return(() => call($17, "load8_u", [3780]), 0);
+
+// memory_copy.wast:3373
+assert_return(() => call($17, "load8_u", [3979]), 0);
+
+// memory_copy.wast:3374
+assert_return(() => call($17, "load8_u", [4178]), 0);
+
+// memory_copy.wast:3375
+assert_return(() => call($17, "load8_u", [4377]), 0);
+
+// memory_copy.wast:3376
+assert_return(() => call($17, "load8_u", [4576]), 0);
+
+// memory_copy.wast:3377
+assert_return(() => call($17, "load8_u", [4775]), 0);
+
+// memory_copy.wast:3378
+assert_return(() => call($17, "load8_u", [4974]), 0);
+
+// memory_copy.wast:3379
+assert_return(() => call($17, "load8_u", [5173]), 0);
+
+// memory_copy.wast:3380
+assert_return(() => call($17, "load8_u", [5372]), 0);
+
+// memory_copy.wast:3381
+assert_return(() => call($17, "load8_u", [5571]), 0);
+
+// memory_copy.wast:3382
+assert_return(() => call($17, "load8_u", [5770]), 0);
+
+// memory_copy.wast:3383
+assert_return(() => call($17, "load8_u", [5969]), 0);
+
+// memory_copy.wast:3384
+assert_return(() => call($17, "load8_u", [6168]), 0);
+
+// memory_copy.wast:3385
+assert_return(() => call($17, "load8_u", [6367]), 0);
+
+// memory_copy.wast:3386
+assert_return(() => call($17, "load8_u", [6566]), 0);
+
+// memory_copy.wast:3387
+assert_return(() => call($17, "load8_u", [6765]), 0);
+
+// memory_copy.wast:3388
+assert_return(() => call($17, "load8_u", [6964]), 0);
+
+// memory_copy.wast:3389
+assert_return(() => call($17, "load8_u", [7163]), 0);
+
+// memory_copy.wast:3390
+assert_return(() => call($17, "load8_u", [7362]), 0);
+
+// memory_copy.wast:3391
+assert_return(() => call($17, "load8_u", [7561]), 0);
+
+// memory_copy.wast:3392
+assert_return(() => call($17, "load8_u", [7760]), 0);
+
+// memory_copy.wast:3393
+assert_return(() => call($17, "load8_u", [7959]), 0);
+
+// memory_copy.wast:3394
+assert_return(() => call($17, "load8_u", [8158]), 0);
+
+// memory_copy.wast:3395
+assert_return(() => call($17, "load8_u", [8357]), 0);
+
+// memory_copy.wast:3396
+assert_return(() => call($17, "load8_u", [8556]), 0);
+
+// memory_copy.wast:3397
+assert_return(() => call($17, "load8_u", [8755]), 0);
+
+// memory_copy.wast:3398
+assert_return(() => call($17, "load8_u", [8954]), 0);
+
+// memory_copy.wast:3399
+assert_return(() => call($17, "load8_u", [9153]), 0);
+
+// memory_copy.wast:3400
+assert_return(() => call($17, "load8_u", [9352]), 0);
+
+// memory_copy.wast:3401
+assert_return(() => call($17, "load8_u", [9551]), 0);
+
+// memory_copy.wast:3402
+assert_return(() => call($17, "load8_u", [9750]), 0);
+
+// memory_copy.wast:3403
+assert_return(() => call($17, "load8_u", [9949]), 0);
+
+// memory_copy.wast:3404
+assert_return(() => call($17, "load8_u", [10148]), 0);
+
+// memory_copy.wast:3405
+assert_return(() => call($17, "load8_u", [10347]), 0);
+
+// memory_copy.wast:3406
+assert_return(() => call($17, "load8_u", [10546]), 0);
+
+// memory_copy.wast:3407
+assert_return(() => call($17, "load8_u", [10745]), 0);
+
+// memory_copy.wast:3408
+assert_return(() => call($17, "load8_u", [10944]), 0);
+
+// memory_copy.wast:3409
+assert_return(() => call($17, "load8_u", [11143]), 0);
+
+// memory_copy.wast:3410
+assert_return(() => call($17, "load8_u", [11342]), 0);
+
+// memory_copy.wast:3411
+assert_return(() => call($17, "load8_u", [11541]), 0);
+
+// memory_copy.wast:3412
+assert_return(() => call($17, "load8_u", [11740]), 0);
+
+// memory_copy.wast:3413
+assert_return(() => call($17, "load8_u", [11939]), 0);
+
+// memory_copy.wast:3414
+assert_return(() => call($17, "load8_u", [12138]), 0);
+
+// memory_copy.wast:3415
+assert_return(() => call($17, "load8_u", [12337]), 0);
+
+// memory_copy.wast:3416
+assert_return(() => call($17, "load8_u", [12536]), 0);
+
+// memory_copy.wast:3417
+assert_return(() => call($17, "load8_u", [12735]), 0);
+
+// memory_copy.wast:3418
+assert_return(() => call($17, "load8_u", [12934]), 0);
+
+// memory_copy.wast:3419
+assert_return(() => call($17, "load8_u", [13133]), 0);
+
+// memory_copy.wast:3420
+assert_return(() => call($17, "load8_u", [13332]), 0);
+
+// memory_copy.wast:3421
+assert_return(() => call($17, "load8_u", [13531]), 0);
+
+// memory_copy.wast:3422
+assert_return(() => call($17, "load8_u", [13730]), 0);
+
+// memory_copy.wast:3423
+assert_return(() => call($17, "load8_u", [13929]), 0);
+
+// memory_copy.wast:3424
+assert_return(() => call($17, "load8_u", [14128]), 0);
+
+// memory_copy.wast:3425
+assert_return(() => call($17, "load8_u", [14327]), 0);
+
+// memory_copy.wast:3426
+assert_return(() => call($17, "load8_u", [14526]), 0);
+
+// memory_copy.wast:3427
+assert_return(() => call($17, "load8_u", [14725]), 0);
+
+// memory_copy.wast:3428
+assert_return(() => call($17, "load8_u", [14924]), 0);
+
+// memory_copy.wast:3429
+assert_return(() => call($17, "load8_u", [15123]), 0);
+
+// memory_copy.wast:3430
+assert_return(() => call($17, "load8_u", [15322]), 0);
+
+// memory_copy.wast:3431
+assert_return(() => call($17, "load8_u", [15521]), 0);
+
+// memory_copy.wast:3432
+assert_return(() => call($17, "load8_u", [15720]), 0);
+
+// memory_copy.wast:3433
+assert_return(() => call($17, "load8_u", [15919]), 0);
+
+// memory_copy.wast:3434
+assert_return(() => call($17, "load8_u", [16118]), 0);
+
+// memory_copy.wast:3435
+assert_return(() => call($17, "load8_u", [16317]), 0);
+
+// memory_copy.wast:3436
+assert_return(() => call($17, "load8_u", [16516]), 0);
+
+// memory_copy.wast:3437
+assert_return(() => call($17, "load8_u", [16715]), 0);
+
+// memory_copy.wast:3438
+assert_return(() => call($17, "load8_u", [16914]), 0);
+
+// memory_copy.wast:3439
+assert_return(() => call($17, "load8_u", [17113]), 0);
+
+// memory_copy.wast:3440
+assert_return(() => call($17, "load8_u", [17312]), 0);
+
+// memory_copy.wast:3441
+assert_return(() => call($17, "load8_u", [17511]), 0);
+
+// memory_copy.wast:3442
+assert_return(() => call($17, "load8_u", [17710]), 0);
+
+// memory_copy.wast:3443
+assert_return(() => call($17, "load8_u", [17909]), 0);
+
+// memory_copy.wast:3444
+assert_return(() => call($17, "load8_u", [18108]), 0);
+
+// memory_copy.wast:3445
+assert_return(() => call($17, "load8_u", [18307]), 0);
+
+// memory_copy.wast:3446
+assert_return(() => call($17, "load8_u", [18506]), 0);
+
+// memory_copy.wast:3447
+assert_return(() => call($17, "load8_u", [18705]), 0);
+
+// memory_copy.wast:3448
+assert_return(() => call($17, "load8_u", [18904]), 0);
+
+// memory_copy.wast:3449
+assert_return(() => call($17, "load8_u", [19103]), 0);
+
+// memory_copy.wast:3450
+assert_return(() => call($17, "load8_u", [19302]), 0);
+
+// memory_copy.wast:3451
+assert_return(() => call($17, "load8_u", [19501]), 0);
+
+// memory_copy.wast:3452
+assert_return(() => call($17, "load8_u", [19700]), 0);
+
+// memory_copy.wast:3453
+assert_return(() => call($17, "load8_u", [19899]), 0);
+
+// memory_copy.wast:3454
+assert_return(() => call($17, "load8_u", [20098]), 0);
+
+// memory_copy.wast:3455
+assert_return(() => call($17, "load8_u", [20297]), 0);
+
+// memory_copy.wast:3456
+assert_return(() => call($17, "load8_u", [20496]), 0);
+
+// memory_copy.wast:3457
+assert_return(() => call($17, "load8_u", [20695]), 0);
+
+// memory_copy.wast:3458
+assert_return(() => call($17, "load8_u", [20894]), 0);
+
+// memory_copy.wast:3459
+assert_return(() => call($17, "load8_u", [21093]), 0);
+
+// memory_copy.wast:3460
+assert_return(() => call($17, "load8_u", [21292]), 0);
+
+// memory_copy.wast:3461
+assert_return(() => call($17, "load8_u", [21491]), 0);
+
+// memory_copy.wast:3462
+assert_return(() => call($17, "load8_u", [21690]), 0);
+
+// memory_copy.wast:3463
+assert_return(() => call($17, "load8_u", [21889]), 0);
+
+// memory_copy.wast:3464
+assert_return(() => call($17, "load8_u", [22088]), 0);
+
+// memory_copy.wast:3465
+assert_return(() => call($17, "load8_u", [22287]), 0);
+
+// memory_copy.wast:3466
+assert_return(() => call($17, "load8_u", [22486]), 0);
+
+// memory_copy.wast:3467
+assert_return(() => call($17, "load8_u", [22685]), 0);
+
+// memory_copy.wast:3468
+assert_return(() => call($17, "load8_u", [22884]), 0);
+
+// memory_copy.wast:3469
+assert_return(() => call($17, "load8_u", [23083]), 0);
+
+// memory_copy.wast:3470
+assert_return(() => call($17, "load8_u", [23282]), 0);
+
+// memory_copy.wast:3471
+assert_return(() => call($17, "load8_u", [23481]), 0);
+
+// memory_copy.wast:3472
+assert_return(() => call($17, "load8_u", [23680]), 0);
+
+// memory_copy.wast:3473
+assert_return(() => call($17, "load8_u", [23879]), 0);
+
+// memory_copy.wast:3474
+assert_return(() => call($17, "load8_u", [24078]), 0);
+
+// memory_copy.wast:3475
+assert_return(() => call($17, "load8_u", [24277]), 0);
+
+// memory_copy.wast:3476
+assert_return(() => call($17, "load8_u", [24476]), 0);
+
+// memory_copy.wast:3477
+assert_return(() => call($17, "load8_u", [24675]), 0);
+
+// memory_copy.wast:3478
+assert_return(() => call($17, "load8_u", [24874]), 0);
+
+// memory_copy.wast:3479
+assert_return(() => call($17, "load8_u", [25073]), 0);
+
+// memory_copy.wast:3480
+assert_return(() => call($17, "load8_u", [25272]), 0);
+
+// memory_copy.wast:3481
+assert_return(() => call($17, "load8_u", [25471]), 0);
+
+// memory_copy.wast:3482
+assert_return(() => call($17, "load8_u", [25670]), 0);
+
+// memory_copy.wast:3483
+assert_return(() => call($17, "load8_u", [25869]), 0);
+
+// memory_copy.wast:3484
+assert_return(() => call($17, "load8_u", [26068]), 0);
+
+// memory_copy.wast:3485
+assert_return(() => call($17, "load8_u", [26267]), 0);
+
+// memory_copy.wast:3486
+assert_return(() => call($17, "load8_u", [26466]), 0);
+
+// memory_copy.wast:3487
+assert_return(() => call($17, "load8_u", [26665]), 0);
+
+// memory_copy.wast:3488
+assert_return(() => call($17, "load8_u", [26864]), 0);
+
+// memory_copy.wast:3489
+assert_return(() => call($17, "load8_u", [27063]), 0);
+
+// memory_copy.wast:3490
+assert_return(() => call($17, "load8_u", [27262]), 0);
+
+// memory_copy.wast:3491
+assert_return(() => call($17, "load8_u", [27461]), 0);
+
+// memory_copy.wast:3492
+assert_return(() => call($17, "load8_u", [27660]), 0);
+
+// memory_copy.wast:3493
+assert_return(() => call($17, "load8_u", [27859]), 0);
+
+// memory_copy.wast:3494
+assert_return(() => call($17, "load8_u", [28058]), 0);
+
+// memory_copy.wast:3495
+assert_return(() => call($17, "load8_u", [28257]), 0);
+
+// memory_copy.wast:3496
+assert_return(() => call($17, "load8_u", [28456]), 0);
+
+// memory_copy.wast:3497
+assert_return(() => call($17, "load8_u", [28655]), 0);
+
+// memory_copy.wast:3498
+assert_return(() => call($17, "load8_u", [28854]), 0);
+
+// memory_copy.wast:3499
+assert_return(() => call($17, "load8_u", [29053]), 0);
+
+// memory_copy.wast:3500
+assert_return(() => call($17, "load8_u", [29252]), 0);
+
+// memory_copy.wast:3501
+assert_return(() => call($17, "load8_u", [29451]), 0);
+
+// memory_copy.wast:3502
+assert_return(() => call($17, "load8_u", [29650]), 0);
+
+// memory_copy.wast:3503
+assert_return(() => call($17, "load8_u", [29849]), 0);
+
+// memory_copy.wast:3504
+assert_return(() => call($17, "load8_u", [30048]), 0);
+
+// memory_copy.wast:3505
+assert_return(() => call($17, "load8_u", [30247]), 0);
+
+// memory_copy.wast:3506
+assert_return(() => call($17, "load8_u", [30446]), 0);
+
+// memory_copy.wast:3507
+assert_return(() => call($17, "load8_u", [30645]), 0);
+
+// memory_copy.wast:3508
+assert_return(() => call($17, "load8_u", [30844]), 0);
+
+// memory_copy.wast:3509
+assert_return(() => call($17, "load8_u", [31043]), 0);
+
+// memory_copy.wast:3510
+assert_return(() => call($17, "load8_u", [31242]), 0);
+
+// memory_copy.wast:3511
+assert_return(() => call($17, "load8_u", [31441]), 0);
+
+// memory_copy.wast:3512
+assert_return(() => call($17, "load8_u", [31640]), 0);
+
+// memory_copy.wast:3513
+assert_return(() => call($17, "load8_u", [31839]), 0);
+
+// memory_copy.wast:3514
+assert_return(() => call($17, "load8_u", [32038]), 0);
+
+// memory_copy.wast:3515
+assert_return(() => call($17, "load8_u", [32237]), 0);
+
+// memory_copy.wast:3516
+assert_return(() => call($17, "load8_u", [32436]), 0);
+
+// memory_copy.wast:3517
+assert_return(() => call($17, "load8_u", [32635]), 0);
+
+// memory_copy.wast:3518
+assert_return(() => call($17, "load8_u", [32834]), 0);
+
+// memory_copy.wast:3519
+assert_return(() => call($17, "load8_u", [33033]), 0);
+
+// memory_copy.wast:3520
+assert_return(() => call($17, "load8_u", [33232]), 0);
+
+// memory_copy.wast:3521
+assert_return(() => call($17, "load8_u", [33431]), 0);
+
+// memory_copy.wast:3522
+assert_return(() => call($17, "load8_u", [33630]), 0);
+
+// memory_copy.wast:3523
+assert_return(() => call($17, "load8_u", [33829]), 0);
+
+// memory_copy.wast:3524
+assert_return(() => call($17, "load8_u", [34028]), 0);
+
+// memory_copy.wast:3525
+assert_return(() => call($17, "load8_u", [34227]), 0);
+
+// memory_copy.wast:3526
+assert_return(() => call($17, "load8_u", [34426]), 0);
+
+// memory_copy.wast:3527
+assert_return(() => call($17, "load8_u", [34625]), 0);
+
+// memory_copy.wast:3528
+assert_return(() => call($17, "load8_u", [34824]), 0);
+
+// memory_copy.wast:3529
+assert_return(() => call($17, "load8_u", [35023]), 0);
+
+// memory_copy.wast:3530
+assert_return(() => call($17, "load8_u", [35222]), 0);
+
+// memory_copy.wast:3531
+assert_return(() => call($17, "load8_u", [35421]), 0);
+
+// memory_copy.wast:3532
+assert_return(() => call($17, "load8_u", [35620]), 0);
+
+// memory_copy.wast:3533
+assert_return(() => call($17, "load8_u", [35819]), 0);
+
+// memory_copy.wast:3534
+assert_return(() => call($17, "load8_u", [36018]), 0);
+
+// memory_copy.wast:3535
+assert_return(() => call($17, "load8_u", [36217]), 0);
+
+// memory_copy.wast:3536
+assert_return(() => call($17, "load8_u", [36416]), 0);
+
+// memory_copy.wast:3537
+assert_return(() => call($17, "load8_u", [36615]), 0);
+
+// memory_copy.wast:3538
+assert_return(() => call($17, "load8_u", [36814]), 0);
+
+// memory_copy.wast:3539
+assert_return(() => call($17, "load8_u", [37013]), 0);
+
+// memory_copy.wast:3540
+assert_return(() => call($17, "load8_u", [37212]), 0);
+
+// memory_copy.wast:3541
+assert_return(() => call($17, "load8_u", [37411]), 0);
+
+// memory_copy.wast:3542
+assert_return(() => call($17, "load8_u", [37610]), 0);
+
+// memory_copy.wast:3543
+assert_return(() => call($17, "load8_u", [37809]), 0);
+
+// memory_copy.wast:3544
+assert_return(() => call($17, "load8_u", [38008]), 0);
+
+// memory_copy.wast:3545
+assert_return(() => call($17, "load8_u", [38207]), 0);
+
+// memory_copy.wast:3546
+assert_return(() => call($17, "load8_u", [38406]), 0);
+
+// memory_copy.wast:3547
+assert_return(() => call($17, "load8_u", [38605]), 0);
+
+// memory_copy.wast:3548
+assert_return(() => call($17, "load8_u", [38804]), 0);
+
+// memory_copy.wast:3549
+assert_return(() => call($17, "load8_u", [39003]), 0);
+
+// memory_copy.wast:3550
+assert_return(() => call($17, "load8_u", [39202]), 0);
+
+// memory_copy.wast:3551
+assert_return(() => call($17, "load8_u", [39401]), 0);
+
+// memory_copy.wast:3552
+assert_return(() => call($17, "load8_u", [39600]), 0);
+
+// memory_copy.wast:3553
+assert_return(() => call($17, "load8_u", [39799]), 0);
+
+// memory_copy.wast:3554
+assert_return(() => call($17, "load8_u", [39998]), 0);
+
+// memory_copy.wast:3555
+assert_return(() => call($17, "load8_u", [40197]), 0);
+
+// memory_copy.wast:3556
+assert_return(() => call($17, "load8_u", [40396]), 0);
+
+// memory_copy.wast:3557
+assert_return(() => call($17, "load8_u", [40595]), 0);
+
+// memory_copy.wast:3558
+assert_return(() => call($17, "load8_u", [40794]), 0);
+
+// memory_copy.wast:3559
+assert_return(() => call($17, "load8_u", [40993]), 0);
+
+// memory_copy.wast:3560
+assert_return(() => call($17, "load8_u", [41192]), 0);
+
+// memory_copy.wast:3561
+assert_return(() => call($17, "load8_u", [41391]), 0);
+
+// memory_copy.wast:3562
+assert_return(() => call($17, "load8_u", [41590]), 0);
+
+// memory_copy.wast:3563
+assert_return(() => call($17, "load8_u", [41789]), 0);
+
+// memory_copy.wast:3564
+assert_return(() => call($17, "load8_u", [41988]), 0);
+
+// memory_copy.wast:3565
+assert_return(() => call($17, "load8_u", [42187]), 0);
+
+// memory_copy.wast:3566
+assert_return(() => call($17, "load8_u", [42386]), 0);
+
+// memory_copy.wast:3567
+assert_return(() => call($17, "load8_u", [42585]), 0);
+
+// memory_copy.wast:3568
+assert_return(() => call($17, "load8_u", [42784]), 0);
+
+// memory_copy.wast:3569
+assert_return(() => call($17, "load8_u", [42983]), 0);
+
+// memory_copy.wast:3570
+assert_return(() => call($17, "load8_u", [43182]), 0);
+
+// memory_copy.wast:3571
+assert_return(() => call($17, "load8_u", [43381]), 0);
+
+// memory_copy.wast:3572
+assert_return(() => call($17, "load8_u", [43580]), 0);
+
+// memory_copy.wast:3573
+assert_return(() => call($17, "load8_u", [43779]), 0);
+
+// memory_copy.wast:3574
+assert_return(() => call($17, "load8_u", [43978]), 0);
+
+// memory_copy.wast:3575
+assert_return(() => call($17, "load8_u", [44177]), 0);
+
+// memory_copy.wast:3576
+assert_return(() => call($17, "load8_u", [44376]), 0);
+
+// memory_copy.wast:3577
+assert_return(() => call($17, "load8_u", [44575]), 0);
+
+// memory_copy.wast:3578
+assert_return(() => call($17, "load8_u", [44774]), 0);
+
+// memory_copy.wast:3579
+assert_return(() => call($17, "load8_u", [44973]), 0);
+
+// memory_copy.wast:3580
+assert_return(() => call($17, "load8_u", [45172]), 0);
+
+// memory_copy.wast:3581
+assert_return(() => call($17, "load8_u", [45371]), 0);
+
+// memory_copy.wast:3582
+assert_return(() => call($17, "load8_u", [45570]), 0);
+
+// memory_copy.wast:3583
+assert_return(() => call($17, "load8_u", [45769]), 0);
+
+// memory_copy.wast:3584
+assert_return(() => call($17, "load8_u", [45968]), 0);
+
+// memory_copy.wast:3585
+assert_return(() => call($17, "load8_u", [46167]), 0);
+
+// memory_copy.wast:3586
+assert_return(() => call($17, "load8_u", [46366]), 0);
+
+// memory_copy.wast:3587
+assert_return(() => call($17, "load8_u", [46565]), 0);
+
+// memory_copy.wast:3588
+assert_return(() => call($17, "load8_u", [46764]), 0);
+
+// memory_copy.wast:3589
+assert_return(() => call($17, "load8_u", [46963]), 0);
+
+// memory_copy.wast:3590
+assert_return(() => call($17, "load8_u", [47162]), 0);
+
+// memory_copy.wast:3591
+assert_return(() => call($17, "load8_u", [47361]), 0);
+
+// memory_copy.wast:3592
+assert_return(() => call($17, "load8_u", [47560]), 0);
+
+// memory_copy.wast:3593
+assert_return(() => call($17, "load8_u", [47759]), 0);
+
+// memory_copy.wast:3594
+assert_return(() => call($17, "load8_u", [47958]), 0);
+
+// memory_copy.wast:3595
+assert_return(() => call($17, "load8_u", [48157]), 0);
+
+// memory_copy.wast:3596
+assert_return(() => call($17, "load8_u", [48356]), 0);
+
+// memory_copy.wast:3597
+assert_return(() => call($17, "load8_u", [48555]), 0);
+
+// memory_copy.wast:3598
+assert_return(() => call($17, "load8_u", [48754]), 0);
+
+// memory_copy.wast:3599
+assert_return(() => call($17, "load8_u", [48953]), 0);
+
+// memory_copy.wast:3600
+assert_return(() => call($17, "load8_u", [49152]), 0);
+
+// memory_copy.wast:3601
+assert_return(() => call($17, "load8_u", [49351]), 0);
+
+// memory_copy.wast:3602
+assert_return(() => call($17, "load8_u", [49550]), 0);
+
+// memory_copy.wast:3603
+assert_return(() => call($17, "load8_u", [49749]), 0);
+
+// memory_copy.wast:3604
+assert_return(() => call($17, "load8_u", [49948]), 0);
+
+// memory_copy.wast:3605
+assert_return(() => call($17, "load8_u", [50147]), 0);
+
+// memory_copy.wast:3606
+assert_return(() => call($17, "load8_u", [50346]), 0);
+
+// memory_copy.wast:3607
+assert_return(() => call($17, "load8_u", [50545]), 0);
+
+// memory_copy.wast:3608
+assert_return(() => call($17, "load8_u", [50744]), 0);
+
+// memory_copy.wast:3609
+assert_return(() => call($17, "load8_u", [50943]), 0);
+
+// memory_copy.wast:3610
+assert_return(() => call($17, "load8_u", [51142]), 0);
+
+// memory_copy.wast:3611
+assert_return(() => call($17, "load8_u", [51341]), 0);
+
+// memory_copy.wast:3612
+assert_return(() => call($17, "load8_u", [51540]), 0);
+
+// memory_copy.wast:3613
+assert_return(() => call($17, "load8_u", [51739]), 0);
+
+// memory_copy.wast:3614
+assert_return(() => call($17, "load8_u", [51938]), 0);
+
+// memory_copy.wast:3615
+assert_return(() => call($17, "load8_u", [52137]), 0);
+
+// memory_copy.wast:3616
+assert_return(() => call($17, "load8_u", [52336]), 0);
+
+// memory_copy.wast:3617
+assert_return(() => call($17, "load8_u", [52535]), 0);
+
+// memory_copy.wast:3618
+assert_return(() => call($17, "load8_u", [52734]), 0);
+
+// memory_copy.wast:3619
+assert_return(() => call($17, "load8_u", [52933]), 0);
+
+// memory_copy.wast:3620
+assert_return(() => call($17, "load8_u", [53132]), 0);
+
+// memory_copy.wast:3621
+assert_return(() => call($17, "load8_u", [53331]), 0);
+
+// memory_copy.wast:3622
+assert_return(() => call($17, "load8_u", [53530]), 0);
+
+// memory_copy.wast:3623
+assert_return(() => call($17, "load8_u", [53729]), 0);
+
+// memory_copy.wast:3624
+assert_return(() => call($17, "load8_u", [53928]), 0);
+
+// memory_copy.wast:3625
+assert_return(() => call($17, "load8_u", [54127]), 0);
+
+// memory_copy.wast:3626
+assert_return(() => call($17, "load8_u", [54326]), 0);
+
+// memory_copy.wast:3627
+assert_return(() => call($17, "load8_u", [54525]), 0);
+
+// memory_copy.wast:3628
+assert_return(() => call($17, "load8_u", [54724]), 0);
+
+// memory_copy.wast:3629
+assert_return(() => call($17, "load8_u", [54923]), 0);
+
+// memory_copy.wast:3630
+assert_return(() => call($17, "load8_u", [55122]), 0);
+
+// memory_copy.wast:3631
+assert_return(() => call($17, "load8_u", [55321]), 0);
+
+// memory_copy.wast:3632
+assert_return(() => call($17, "load8_u", [55520]), 0);
+
+// memory_copy.wast:3633
+assert_return(() => call($17, "load8_u", [55719]), 0);
+
+// memory_copy.wast:3634
+assert_return(() => call($17, "load8_u", [55918]), 0);
+
+// memory_copy.wast:3635
+assert_return(() => call($17, "load8_u", [56117]), 0);
+
+// memory_copy.wast:3636
+assert_return(() => call($17, "load8_u", [56316]), 0);
+
+// memory_copy.wast:3637
+assert_return(() => call($17, "load8_u", [56515]), 0);
+
+// memory_copy.wast:3638
+assert_return(() => call($17, "load8_u", [56714]), 0);
+
+// memory_copy.wast:3639
+assert_return(() => call($17, "load8_u", [56913]), 0);
+
+// memory_copy.wast:3640
+assert_return(() => call($17, "load8_u", [57112]), 0);
+
+// memory_copy.wast:3641
+assert_return(() => call($17, "load8_u", [57311]), 0);
+
+// memory_copy.wast:3642
+assert_return(() => call($17, "load8_u", [57510]), 0);
+
+// memory_copy.wast:3643
+assert_return(() => call($17, "load8_u", [57709]), 0);
+
+// memory_copy.wast:3644
+assert_return(() => call($17, "load8_u", [57908]), 0);
+
+// memory_copy.wast:3645
+assert_return(() => call($17, "load8_u", [58107]), 0);
+
+// memory_copy.wast:3646
+assert_return(() => call($17, "load8_u", [58306]), 0);
+
+// memory_copy.wast:3647
+assert_return(() => call($17, "load8_u", [58505]), 0);
+
+// memory_copy.wast:3648
+assert_return(() => call($17, "load8_u", [58704]), 0);
+
+// memory_copy.wast:3649
+assert_return(() => call($17, "load8_u", [58903]), 0);
+
+// memory_copy.wast:3650
+assert_return(() => call($17, "load8_u", [59102]), 0);
+
+// memory_copy.wast:3651
+assert_return(() => call($17, "load8_u", [59301]), 0);
+
+// memory_copy.wast:3652
+assert_return(() => call($17, "load8_u", [59500]), 0);
+
+// memory_copy.wast:3653
+assert_return(() => call($17, "load8_u", [59699]), 0);
+
+// memory_copy.wast:3654
+assert_return(() => call($17, "load8_u", [59898]), 0);
+
+// memory_copy.wast:3655
+assert_return(() => call($17, "load8_u", [60097]), 0);
+
+// memory_copy.wast:3656
+assert_return(() => call($17, "load8_u", [60296]), 0);
+
+// memory_copy.wast:3657
+assert_return(() => call($17, "load8_u", [60495]), 0);
+
+// memory_copy.wast:3658
+assert_return(() => call($17, "load8_u", [60694]), 0);
+
+// memory_copy.wast:3659
+assert_return(() => call($17, "load8_u", [60893]), 0);
+
+// memory_copy.wast:3660
+assert_return(() => call($17, "load8_u", [61092]), 0);
+
+// memory_copy.wast:3661
+assert_return(() => call($17, "load8_u", [61291]), 0);
+
+// memory_copy.wast:3662
+assert_return(() => call($17, "load8_u", [61490]), 0);
+
+// memory_copy.wast:3663
+assert_return(() => call($17, "load8_u", [61689]), 0);
+
+// memory_copy.wast:3664
+assert_return(() => call($17, "load8_u", [61888]), 0);
+
+// memory_copy.wast:3665
+assert_return(() => call($17, "load8_u", [62087]), 0);
+
+// memory_copy.wast:3666
+assert_return(() => call($17, "load8_u", [62286]), 0);
+
+// memory_copy.wast:3667
+assert_return(() => call($17, "load8_u", [62485]), 0);
+
+// memory_copy.wast:3668
+assert_return(() => call($17, "load8_u", [62684]), 0);
+
+// memory_copy.wast:3669
+assert_return(() => call($17, "load8_u", [62883]), 0);
+
+// memory_copy.wast:3670
+assert_return(() => call($17, "load8_u", [63082]), 0);
+
+// memory_copy.wast:3671
+assert_return(() => call($17, "load8_u", [63281]), 0);
+
+// memory_copy.wast:3672
+assert_return(() => call($17, "load8_u", [63480]), 0);
+
+// memory_copy.wast:3673
+assert_return(() => call($17, "load8_u", [63679]), 0);
+
+// memory_copy.wast:3674
+assert_return(() => call($17, "load8_u", [63878]), 0);
+
+// memory_copy.wast:3675
+assert_return(() => call($17, "load8_u", [64077]), 0);
+
+// memory_copy.wast:3676
+assert_return(() => call($17, "load8_u", [64276]), 0);
+
+// memory_copy.wast:3677
+assert_return(() => call($17, "load8_u", [64475]), 0);
+
+// memory_copy.wast:3678
+assert_return(() => call($17, "load8_u", [64674]), 0);
+
+// memory_copy.wast:3679
+assert_return(() => call($17, "load8_u", [64873]), 0);
+
+// memory_copy.wast:3680
+assert_return(() => call($17, "load8_u", [65072]), 0);
+
+// memory_copy.wast:3681
+assert_return(() => call($17, "load8_u", [65271]), 0);
+
+// memory_copy.wast:3682
+assert_return(() => call($17, "load8_u", [65470]), 0);
+
+// memory_copy.wast:3683
+assert_return(() => call($17, "load8_u", [65516]), 0);
+
+// memory_copy.wast:3684
+assert_return(() => call($17, "load8_u", [65517]), 1);
+
+// memory_copy.wast:3685
+assert_return(() => call($17, "load8_u", [65518]), 2);
+
+// memory_copy.wast:3686
+assert_return(() => call($17, "load8_u", [65519]), 3);
+
+// memory_copy.wast:3687
+assert_return(() => call($17, "load8_u", [65520]), 4);
+
+// memory_copy.wast:3688
+assert_return(() => call($17, "load8_u", [65521]), 5);
+
+// memory_copy.wast:3689
+assert_return(() => call($17, "load8_u", [65522]), 6);
+
+// memory_copy.wast:3690
+assert_return(() => call($17, "load8_u", [65523]), 7);
+
+// memory_copy.wast:3691
+assert_return(() => call($17, "load8_u", [65524]), 8);
+
+// memory_copy.wast:3692
+assert_return(() => call($17, "load8_u", [65525]), 9);
+
+// memory_copy.wast:3693
+assert_return(() => call($17, "load8_u", [65526]), 10);
+
+// memory_copy.wast:3694
+assert_return(() => call($17, "load8_u", [65527]), 11);
+
+// memory_copy.wast:3695
+assert_return(() => call($17, "load8_u", [65528]), 12);
+
+// memory_copy.wast:3696
+assert_return(() => call($17, "load8_u", [65529]), 13);
+
+// memory_copy.wast:3697
+assert_return(() => call($17, "load8_u", [65530]), 14);
+
+// memory_copy.wast:3698
+assert_return(() => call($17, "load8_u", [65531]), 15);
+
+// memory_copy.wast:3699
+assert_return(() => call($17, "load8_u", [65532]), 16);
+
+// memory_copy.wast:3700
+assert_return(() => call($17, "load8_u", [65533]), 17);
+
+// memory_copy.wast:3701
+assert_return(() => call($17, "load8_u", [65534]), 18);
+
+// memory_copy.wast:3702
+assert_return(() => call($17, "load8_u", [65535]), 19);
+
+// memory_copy.wast:3704
+let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\xec\xff\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
+
+// memory_copy.wast:3712
+assert_trap(() => call($18, "run", [0, 65516, -4096]));
+
+// memory_copy.wast:3715
+assert_return(() => call($18, "load8_u", [0]), 0);
+
+// memory_copy.wast:3716
+assert_return(() => call($18, "load8_u", [1]), 1);
+
+// memory_copy.wast:3717
+assert_return(() => call($18, "load8_u", [2]), 2);
+
+// memory_copy.wast:3718
+assert_return(() => call($18, "load8_u", [3]), 3);
+
+// memory_copy.wast:3719
+assert_return(() => call($18, "load8_u", [4]), 4);
+
+// memory_copy.wast:3720
+assert_return(() => call($18, "load8_u", [5]), 5);
+
+// memory_copy.wast:3721
+assert_return(() => call($18, "load8_u", [6]), 6);
+
+// memory_copy.wast:3722
+assert_return(() => call($18, "load8_u", [7]), 7);
+
+// memory_copy.wast:3723
+assert_return(() => call($18, "load8_u", [8]), 8);
+
+// memory_copy.wast:3724
+assert_return(() => call($18, "load8_u", [9]), 9);
+
+// memory_copy.wast:3725
+assert_return(() => call($18, "load8_u", [10]), 10);
+
+// memory_copy.wast:3726
+assert_return(() => call($18, "load8_u", [11]), 11);
+
+// memory_copy.wast:3727
+assert_return(() => call($18, "load8_u", [12]), 12);
+
+// memory_copy.wast:3728
+assert_return(() => call($18, "load8_u", [13]), 13);
+
+// memory_copy.wast:3729
+assert_return(() => call($18, "load8_u", [14]), 14);
+
+// memory_copy.wast:3730
+assert_return(() => call($18, "load8_u", [15]), 15);
+
+// memory_copy.wast:3731
+assert_return(() => call($18, "load8_u", [16]), 16);
+
+// memory_copy.wast:3732
+assert_return(() => call($18, "load8_u", [17]), 17);
+
+// memory_copy.wast:3733
+assert_return(() => call($18, "load8_u", [18]), 18);
+
+// memory_copy.wast:3734
+assert_return(() => call($18, "load8_u", [19]), 19);
+
+// memory_copy.wast:3735
+assert_return(() => call($18, "load8_u", [218]), 0);
+
+// memory_copy.wast:3736
+assert_return(() => call($18, "load8_u", [417]), 0);
+
+// memory_copy.wast:3737
+assert_return(() => call($18, "load8_u", [616]), 0);
+
+// memory_copy.wast:3738
+assert_return(() => call($18, "load8_u", [815]), 0);
+
+// memory_copy.wast:3739
+assert_return(() => call($18, "load8_u", [1014]), 0);
+
+// memory_copy.wast:3740
+assert_return(() => call($18, "load8_u", [1213]), 0);
+
+// memory_copy.wast:3741
+assert_return(() => call($18, "load8_u", [1412]), 0);
+
+// memory_copy.wast:3742
+assert_return(() => call($18, "load8_u", [1611]), 0);
+
+// memory_copy.wast:3743
+assert_return(() => call($18, "load8_u", [1810]), 0);
+
+// memory_copy.wast:3744
+assert_return(() => call($18, "load8_u", [2009]), 0);
+
+// memory_copy.wast:3745
+assert_return(() => call($18, "load8_u", [2208]), 0);
+
+// memory_copy.wast:3746
+assert_return(() => call($18, "load8_u", [2407]), 0);
+
+// memory_copy.wast:3747
+assert_return(() => call($18, "load8_u", [2606]), 0);
+
+// memory_copy.wast:3748
+assert_return(() => call($18, "load8_u", [2805]), 0);
+
+// memory_copy.wast:3749
+assert_return(() => call($18, "load8_u", [3004]), 0);
+
+// memory_copy.wast:3750
+assert_return(() => call($18, "load8_u", [3203]), 0);
+
+// memory_copy.wast:3751
+assert_return(() => call($18, "load8_u", [3402]), 0);
+
+// memory_copy.wast:3752
+assert_return(() => call($18, "load8_u", [3601]), 0);
+
+// memory_copy.wast:3753
+assert_return(() => call($18, "load8_u", [3800]), 0);
+
+// memory_copy.wast:3754
+assert_return(() => call($18, "load8_u", [3999]), 0);
+
+// memory_copy.wast:3755
+assert_return(() => call($18, "load8_u", [4198]), 0);
+
+// memory_copy.wast:3756
+assert_return(() => call($18, "load8_u", [4397]), 0);
+
+// memory_copy.wast:3757
+assert_return(() => call($18, "load8_u", [4596]), 0);
+
+// memory_copy.wast:3758
+assert_return(() => call($18, "load8_u", [4795]), 0);
+
+// memory_copy.wast:3759
+assert_return(() => call($18, "load8_u", [4994]), 0);
+
+// memory_copy.wast:3760
+assert_return(() => call($18, "load8_u", [5193]), 0);
+
+// memory_copy.wast:3761
+assert_return(() => call($18, "load8_u", [5392]), 0);
+
+// memory_copy.wast:3762
+assert_return(() => call($18, "load8_u", [5591]), 0);
+
+// memory_copy.wast:3763
+assert_return(() => call($18, "load8_u", [5790]), 0);
+
+// memory_copy.wast:3764
+assert_return(() => call($18, "load8_u", [5989]), 0);
+
+// memory_copy.wast:3765
+assert_return(() => call($18, "load8_u", [6188]), 0);
+
+// memory_copy.wast:3766
+assert_return(() => call($18, "load8_u", [6387]), 0);
+
+// memory_copy.wast:3767
+assert_return(() => call($18, "load8_u", [6586]), 0);
+
+// memory_copy.wast:3768
+assert_return(() => call($18, "load8_u", [6785]), 0);
+
+// memory_copy.wast:3769
+assert_return(() => call($18, "load8_u", [6984]), 0);
+
+// memory_copy.wast:3770
+assert_return(() => call($18, "load8_u", [7183]), 0);
+
+// memory_copy.wast:3771
+assert_return(() => call($18, "load8_u", [7382]), 0);
+
+// memory_copy.wast:3772
+assert_return(() => call($18, "load8_u", [7581]), 0);
+
+// memory_copy.wast:3773
+assert_return(() => call($18, "load8_u", [7780]), 0);
+
+// memory_copy.wast:3774
+assert_return(() => call($18, "load8_u", [7979]), 0);
+
+// memory_copy.wast:3775
+assert_return(() => call($18, "load8_u", [8178]), 0);
+
+// memory_copy.wast:3776
+assert_return(() => call($18, "load8_u", [8377]), 0);
+
+// memory_copy.wast:3777
+assert_return(() => call($18, "load8_u", [8576]), 0);
+
+// memory_copy.wast:3778
+assert_return(() => call($18, "load8_u", [8775]), 0);
+
+// memory_copy.wast:3779
+assert_return(() => call($18, "load8_u", [8974]), 0);
+
+// memory_copy.wast:3780
+assert_return(() => call($18, "load8_u", [9173]), 0);
+
+// memory_copy.wast:3781
+assert_return(() => call($18, "load8_u", [9372]), 0);
+
+// memory_copy.wast:3782
+assert_return(() => call($18, "load8_u", [9571]), 0);
+
+// memory_copy.wast:3783
+assert_return(() => call($18, "load8_u", [9770]), 0);
+
+// memory_copy.wast:3784
+assert_return(() => call($18, "load8_u", [9969]), 0);
+
+// memory_copy.wast:3785
+assert_return(() => call($18, "load8_u", [10168]), 0);
+
+// memory_copy.wast:3786
+assert_return(() => call($18, "load8_u", [10367]), 0);
+
+// memory_copy.wast:3787
+assert_return(() => call($18, "load8_u", [10566]), 0);
+
+// memory_copy.wast:3788
+assert_return(() => call($18, "load8_u", [10765]), 0);
+
+// memory_copy.wast:3789
+assert_return(() => call($18, "load8_u", [10964]), 0);
+
+// memory_copy.wast:3790
+assert_return(() => call($18, "load8_u", [11163]), 0);
+
+// memory_copy.wast:3791
+assert_return(() => call($18, "load8_u", [11362]), 0);
+
+// memory_copy.wast:3792
+assert_return(() => call($18, "load8_u", [11561]), 0);
+
+// memory_copy.wast:3793
+assert_return(() => call($18, "load8_u", [11760]), 0);
+
+// memory_copy.wast:3794
+assert_return(() => call($18, "load8_u", [11959]), 0);
+
+// memory_copy.wast:3795
+assert_return(() => call($18, "load8_u", [12158]), 0);
+
+// memory_copy.wast:3796
+assert_return(() => call($18, "load8_u", [12357]), 0);
+
+// memory_copy.wast:3797
+assert_return(() => call($18, "load8_u", [12556]), 0);
+
+// memory_copy.wast:3798
+assert_return(() => call($18, "load8_u", [12755]), 0);
+
+// memory_copy.wast:3799
+assert_return(() => call($18, "load8_u", [12954]), 0);
+
+// memory_copy.wast:3800
+assert_return(() => call($18, "load8_u", [13153]), 0);
+
+// memory_copy.wast:3801
+assert_return(() => call($18, "load8_u", [13352]), 0);
+
+// memory_copy.wast:3802
+assert_return(() => call($18, "load8_u", [13551]), 0);
+
+// memory_copy.wast:3803
+assert_return(() => call($18, "load8_u", [13750]), 0);
+
+// memory_copy.wast:3804
+assert_return(() => call($18, "load8_u", [13949]), 0);
+
+// memory_copy.wast:3805
+assert_return(() => call($18, "load8_u", [14148]), 0);
+
+// memory_copy.wast:3806
+assert_return(() => call($18, "load8_u", [14347]), 0);
+
+// memory_copy.wast:3807
+assert_return(() => call($18, "load8_u", [14546]), 0);
+
+// memory_copy.wast:3808
+assert_return(() => call($18, "load8_u", [14745]), 0);
+
+// memory_copy.wast:3809
+assert_return(() => call($18, "load8_u", [14944]), 0);
+
+// memory_copy.wast:3810
+assert_return(() => call($18, "load8_u", [15143]), 0);
+
+// memory_copy.wast:3811
+assert_return(() => call($18, "load8_u", [15342]), 0);
+
+// memory_copy.wast:3812
+assert_return(() => call($18, "load8_u", [15541]), 0);
+
+// memory_copy.wast:3813
+assert_return(() => call($18, "load8_u", [15740]), 0);
+
+// memory_copy.wast:3814
+assert_return(() => call($18, "load8_u", [15939]), 0);
+
+// memory_copy.wast:3815
+assert_return(() => call($18, "load8_u", [16138]), 0);
+
+// memory_copy.wast:3816
+assert_return(() => call($18, "load8_u", [16337]), 0);
+
+// memory_copy.wast:3817
+assert_return(() => call($18, "load8_u", [16536]), 0);
+
+// memory_copy.wast:3818
+assert_return(() => call($18, "load8_u", [16735]), 0);
+
+// memory_copy.wast:3819
+assert_return(() => call($18, "load8_u", [16934]), 0);
+
+// memory_copy.wast:3820
+assert_return(() => call($18, "load8_u", [17133]), 0);
+
+// memory_copy.wast:3821
+assert_return(() => call($18, "load8_u", [17332]), 0);
+
+// memory_copy.wast:3822
+assert_return(() => call($18, "load8_u", [17531]), 0);
+
+// memory_copy.wast:3823
+assert_return(() => call($18, "load8_u", [17730]), 0);
+
+// memory_copy.wast:3824
+assert_return(() => call($18, "load8_u", [17929]), 0);
+
+// memory_copy.wast:3825
+assert_return(() => call($18, "load8_u", [18128]), 0);
+
+// memory_copy.wast:3826
+assert_return(() => call($18, "load8_u", [18327]), 0);
+
+// memory_copy.wast:3827
+assert_return(() => call($18, "load8_u", [18526]), 0);
+
+// memory_copy.wast:3828
+assert_return(() => call($18, "load8_u", [18725]), 0);
+
+// memory_copy.wast:3829
+assert_return(() => call($18, "load8_u", [18924]), 0);
+
+// memory_copy.wast:3830
+assert_return(() => call($18, "load8_u", [19123]), 0);
+
+// memory_copy.wast:3831
+assert_return(() => call($18, "load8_u", [19322]), 0);
+
+// memory_copy.wast:3832
+assert_return(() => call($18, "load8_u", [19521]), 0);
+
+// memory_copy.wast:3833
+assert_return(() => call($18, "load8_u", [19720]), 0);
+
+// memory_copy.wast:3834
+assert_return(() => call($18, "load8_u", [19919]), 0);
+
+// memory_copy.wast:3835
+assert_return(() => call($18, "load8_u", [20118]), 0);
+
+// memory_copy.wast:3836
+assert_return(() => call($18, "load8_u", [20317]), 0);
+
+// memory_copy.wast:3837
+assert_return(() => call($18, "load8_u", [20516]), 0);
+
+// memory_copy.wast:3838
+assert_return(() => call($18, "load8_u", [20715]), 0);
+
+// memory_copy.wast:3839
+assert_return(() => call($18, "load8_u", [20914]), 0);
+
+// memory_copy.wast:3840
+assert_return(() => call($18, "load8_u", [21113]), 0);
+
+// memory_copy.wast:3841
+assert_return(() => call($18, "load8_u", [21312]), 0);
+
+// memory_copy.wast:3842
+assert_return(() => call($18, "load8_u", [21511]), 0);
+
+// memory_copy.wast:3843
+assert_return(() => call($18, "load8_u", [21710]), 0);
+
+// memory_copy.wast:3844
+assert_return(() => call($18, "load8_u", [21909]), 0);
+
+// memory_copy.wast:3845
+assert_return(() => call($18, "load8_u", [22108]), 0);
+
+// memory_copy.wast:3846
+assert_return(() => call($18, "load8_u", [22307]), 0);
+
+// memory_copy.wast:3847
+assert_return(() => call($18, "load8_u", [22506]), 0);
+
+// memory_copy.wast:3848
+assert_return(() => call($18, "load8_u", [22705]), 0);
+
+// memory_copy.wast:3849
+assert_return(() => call($18, "load8_u", [22904]), 0);
+
+// memory_copy.wast:3850
+assert_return(() => call($18, "load8_u", [23103]), 0);
+
+// memory_copy.wast:3851
+assert_return(() => call($18, "load8_u", [23302]), 0);
+
+// memory_copy.wast:3852
+assert_return(() => call($18, "load8_u", [23501]), 0);
+
+// memory_copy.wast:3853
+assert_return(() => call($18, "load8_u", [23700]), 0);
+
+// memory_copy.wast:3854
+assert_return(() => call($18, "load8_u", [23899]), 0);
+
+// memory_copy.wast:3855
+assert_return(() => call($18, "load8_u", [24098]), 0);
+
+// memory_copy.wast:3856
+assert_return(() => call($18, "load8_u", [24297]), 0);
+
+// memory_copy.wast:3857
+assert_return(() => call($18, "load8_u", [24496]), 0);
+
+// memory_copy.wast:3858
+assert_return(() => call($18, "load8_u", [24695]), 0);
+
+// memory_copy.wast:3859
+assert_return(() => call($18, "load8_u", [24894]), 0);
+
+// memory_copy.wast:3860
+assert_return(() => call($18, "load8_u", [25093]), 0);
+
+// memory_copy.wast:3861
+assert_return(() => call($18, "load8_u", [25292]), 0);
+
+// memory_copy.wast:3862
+assert_return(() => call($18, "load8_u", [25491]), 0);
+
+// memory_copy.wast:3863
+assert_return(() => call($18, "load8_u", [25690]), 0);
+
+// memory_copy.wast:3864
+assert_return(() => call($18, "load8_u", [25889]), 0);
+
+// memory_copy.wast:3865
+assert_return(() => call($18, "load8_u", [26088]), 0);
+
+// memory_copy.wast:3866
+assert_return(() => call($18, "load8_u", [26287]), 0);
+
+// memory_copy.wast:3867
+assert_return(() => call($18, "load8_u", [26486]), 0);
+
+// memory_copy.wast:3868
+assert_return(() => call($18, "load8_u", [26685]), 0);
+
+// memory_copy.wast:3869
+assert_return(() => call($18, "load8_u", [26884]), 0);
+
+// memory_copy.wast:3870
+assert_return(() => call($18, "load8_u", [27083]), 0);
+
+// memory_copy.wast:3871
+assert_return(() => call($18, "load8_u", [27282]), 0);
+
+// memory_copy.wast:3872
+assert_return(() => call($18, "load8_u", [27481]), 0);
+
+// memory_copy.wast:3873
+assert_return(() => call($18, "load8_u", [27680]), 0);
+
+// memory_copy.wast:3874
+assert_return(() => call($18, "load8_u", [27879]), 0);
+
+// memory_copy.wast:3875
+assert_return(() => call($18, "load8_u", [28078]), 0);
+
+// memory_copy.wast:3876
+assert_return(() => call($18, "load8_u", [28277]), 0);
+
+// memory_copy.wast:3877
+assert_return(() => call($18, "load8_u", [28476]), 0);
+
+// memory_copy.wast:3878
+assert_return(() => call($18, "load8_u", [28675]), 0);
+
+// memory_copy.wast:3879
+assert_return(() => call($18, "load8_u", [28874]), 0);
+
+// memory_copy.wast:3880
+assert_return(() => call($18, "load8_u", [29073]), 0);
+
+// memory_copy.wast:3881
+assert_return(() => call($18, "load8_u", [29272]), 0);
+
+// memory_copy.wast:3882
+assert_return(() => call($18, "load8_u", [29471]), 0);
+
+// memory_copy.wast:3883
+assert_return(() => call($18, "load8_u", [29670]), 0);
+
+// memory_copy.wast:3884
+assert_return(() => call($18, "load8_u", [29869]), 0);
+
+// memory_copy.wast:3885
+assert_return(() => call($18, "load8_u", [30068]), 0);
+
+// memory_copy.wast:3886
+assert_return(() => call($18, "load8_u", [30267]), 0);
+
+// memory_copy.wast:3887
+assert_return(() => call($18, "load8_u", [30466]), 0);
+
+// memory_copy.wast:3888
+assert_return(() => call($18, "load8_u", [30665]), 0);
+
+// memory_copy.wast:3889
+assert_return(() => call($18, "load8_u", [30864]), 0);
+
+// memory_copy.wast:3890
+assert_return(() => call($18, "load8_u", [31063]), 0);
+
+// memory_copy.wast:3891
+assert_return(() => call($18, "load8_u", [31262]), 0);
+
+// memory_copy.wast:3892
+assert_return(() => call($18, "load8_u", [31461]), 0);
+
+// memory_copy.wast:3893
+assert_return(() => call($18, "load8_u", [31660]), 0);
+
+// memory_copy.wast:3894
+assert_return(() => call($18, "load8_u", [31859]), 0);
+
+// memory_copy.wast:3895
+assert_return(() => call($18, "load8_u", [32058]), 0);
+
+// memory_copy.wast:3896
+assert_return(() => call($18, "load8_u", [32257]), 0);
+
+// memory_copy.wast:3897
+assert_return(() => call($18, "load8_u", [32456]), 0);
+
+// memory_copy.wast:3898
+assert_return(() => call($18, "load8_u", [32655]), 0);
+
+// memory_copy.wast:3899
+assert_return(() => call($18, "load8_u", [32854]), 0);
+
+// memory_copy.wast:3900
+assert_return(() => call($18, "load8_u", [33053]), 0);
+
+// memory_copy.wast:3901
+assert_return(() => call($18, "load8_u", [33252]), 0);
+
+// memory_copy.wast:3902
+assert_return(() => call($18, "load8_u", [33451]), 0);
+
+// memory_copy.wast:3903
+assert_return(() => call($18, "load8_u", [33650]), 0);
+
+// memory_copy.wast:3904
+assert_return(() => call($18, "load8_u", [33849]), 0);
+
+// memory_copy.wast:3905
+assert_return(() => call($18, "load8_u", [34048]), 0);
+
+// memory_copy.wast:3906
+assert_return(() => call($18, "load8_u", [34247]), 0);
+
+// memory_copy.wast:3907
+assert_return(() => call($18, "load8_u", [34446]), 0);
+
+// memory_copy.wast:3908
+assert_return(() => call($18, "load8_u", [34645]), 0);
+
+// memory_copy.wast:3909
+assert_return(() => call($18, "load8_u", [34844]), 0);
+
+// memory_copy.wast:3910
+assert_return(() => call($18, "load8_u", [35043]), 0);
+
+// memory_copy.wast:3911
+assert_return(() => call($18, "load8_u", [35242]), 0);
+
+// memory_copy.wast:3912
+assert_return(() => call($18, "load8_u", [35441]), 0);
+
+// memory_copy.wast:3913
+assert_return(() => call($18, "load8_u", [35640]), 0);
+
+// memory_copy.wast:3914
+assert_return(() => call($18, "load8_u", [35839]), 0);
+
+// memory_copy.wast:3915
+assert_return(() => call($18, "load8_u", [36038]), 0);
+
+// memory_copy.wast:3916
+assert_return(() => call($18, "load8_u", [36237]), 0);
+
+// memory_copy.wast:3917
+assert_return(() => call($18, "load8_u", [36436]), 0);
+
+// memory_copy.wast:3918
+assert_return(() => call($18, "load8_u", [36635]), 0);
+
+// memory_copy.wast:3919
+assert_return(() => call($18, "load8_u", [36834]), 0);
+
+// memory_copy.wast:3920
+assert_return(() => call($18, "load8_u", [37033]), 0);
+
+// memory_copy.wast:3921
+assert_return(() => call($18, "load8_u", [37232]), 0);
+
+// memory_copy.wast:3922
+assert_return(() => call($18, "load8_u", [37431]), 0);
+
+// memory_copy.wast:3923
+assert_return(() => call($18, "load8_u", [37630]), 0);
+
+// memory_copy.wast:3924
+assert_return(() => call($18, "load8_u", [37829]), 0);
+
+// memory_copy.wast:3925
+assert_return(() => call($18, "load8_u", [38028]), 0);
+
+// memory_copy.wast:3926
+assert_return(() => call($18, "load8_u", [38227]), 0);
+
+// memory_copy.wast:3927
+assert_return(() => call($18, "load8_u", [38426]), 0);
+
+// memory_copy.wast:3928
+assert_return(() => call($18, "load8_u", [38625]), 0);
+
+// memory_copy.wast:3929
+assert_return(() => call($18, "load8_u", [38824]), 0);
+
+// memory_copy.wast:3930
+assert_return(() => call($18, "load8_u", [39023]), 0);
+
+// memory_copy.wast:3931
+assert_return(() => call($18, "load8_u", [39222]), 0);
+
+// memory_copy.wast:3932
+assert_return(() => call($18, "load8_u", [39421]), 0);
+
+// memory_copy.wast:3933
+assert_return(() => call($18, "load8_u", [39620]), 0);
+
+// memory_copy.wast:3934
+assert_return(() => call($18, "load8_u", [39819]), 0);
+
+// memory_copy.wast:3935
+assert_return(() => call($18, "load8_u", [40018]), 0);
+
+// memory_copy.wast:3936
+assert_return(() => call($18, "load8_u", [40217]), 0);
+
+// memory_copy.wast:3937
+assert_return(() => call($18, "load8_u", [40416]), 0);
+
+// memory_copy.wast:3938
+assert_return(() => call($18, "load8_u", [40615]), 0);
+
+// memory_copy.wast:3939
+assert_return(() => call($18, "load8_u", [40814]), 0);
+
+// memory_copy.wast:3940
+assert_return(() => call($18, "load8_u", [41013]), 0);
+
+// memory_copy.wast:3941
+assert_return(() => call($18, "load8_u", [41212]), 0);
+
+// memory_copy.wast:3942
+assert_return(() => call($18, "load8_u", [41411]), 0);
+
+// memory_copy.wast:3943
+assert_return(() => call($18, "load8_u", [41610]), 0);
+
+// memory_copy.wast:3944
+assert_return(() => call($18, "load8_u", [41809]), 0);
+
+// memory_copy.wast:3945
+assert_return(() => call($18, "load8_u", [42008]), 0);
+
+// memory_copy.wast:3946
+assert_return(() => call($18, "load8_u", [42207]), 0);
+
+// memory_copy.wast:3947
+assert_return(() => call($18, "load8_u", [42406]), 0);
+
+// memory_copy.wast:3948
+assert_return(() => call($18, "load8_u", [42605]), 0);
+
+// memory_copy.wast:3949
+assert_return(() => call($18, "load8_u", [42804]), 0);
+
+// memory_copy.wast:3950
+assert_return(() => call($18, "load8_u", [43003]), 0);
+
+// memory_copy.wast:3951
+assert_return(() => call($18, "load8_u", [43202]), 0);
+
+// memory_copy.wast:3952
+assert_return(() => call($18, "load8_u", [43401]), 0);
+
+// memory_copy.wast:3953
+assert_return(() => call($18, "load8_u", [43600]), 0);
+
+// memory_copy.wast:3954
+assert_return(() => call($18, "load8_u", [43799]), 0);
+
+// memory_copy.wast:3955
+assert_return(() => call($18, "load8_u", [43998]), 0);
+
+// memory_copy.wast:3956
+assert_return(() => call($18, "load8_u", [44197]), 0);
+
+// memory_copy.wast:3957
+assert_return(() => call($18, "load8_u", [44396]), 0);
+
+// memory_copy.wast:3958
+assert_return(() => call($18, "load8_u", [44595]), 0);
+
+// memory_copy.wast:3959
+assert_return(() => call($18, "load8_u", [44794]), 0);
+
+// memory_copy.wast:3960
+assert_return(() => call($18, "load8_u", [44993]), 0);
+
+// memory_copy.wast:3961
+assert_return(() => call($18, "load8_u", [45192]), 0);
+
+// memory_copy.wast:3962
+assert_return(() => call($18, "load8_u", [45391]), 0);
+
+// memory_copy.wast:3963
+assert_return(() => call($18, "load8_u", [45590]), 0);
+
+// memory_copy.wast:3964
+assert_return(() => call($18, "load8_u", [45789]), 0);
+
+// memory_copy.wast:3965
+assert_return(() => call($18, "load8_u", [45988]), 0);
+
+// memory_copy.wast:3966
+assert_return(() => call($18, "load8_u", [46187]), 0);
+
+// memory_copy.wast:3967
+assert_return(() => call($18, "load8_u", [46386]), 0);
+
+// memory_copy.wast:3968
+assert_return(() => call($18, "load8_u", [46585]), 0);
+
+// memory_copy.wast:3969
+assert_return(() => call($18, "load8_u", [46784]), 0);
+
+// memory_copy.wast:3970
+assert_return(() => call($18, "load8_u", [46983]), 0);
+
+// memory_copy.wast:3971
+assert_return(() => call($18, "load8_u", [47182]), 0);
+
+// memory_copy.wast:3972
+assert_return(() => call($18, "load8_u", [47381]), 0);
+
+// memory_copy.wast:3973
+assert_return(() => call($18, "load8_u", [47580]), 0);
+
+// memory_copy.wast:3974
+assert_return(() => call($18, "load8_u", [47779]), 0);
+
+// memory_copy.wast:3975
+assert_return(() => call($18, "load8_u", [47978]), 0);
+
+// memory_copy.wast:3976
+assert_return(() => call($18, "load8_u", [48177]), 0);
+
+// memory_copy.wast:3977
+assert_return(() => call($18, "load8_u", [48376]), 0);
+
+// memory_copy.wast:3978
+assert_return(() => call($18, "load8_u", [48575]), 0);
+
+// memory_copy.wast:3979
+assert_return(() => call($18, "load8_u", [48774]), 0);
+
+// memory_copy.wast:3980
+assert_return(() => call($18, "load8_u", [48973]), 0);
+
+// memory_copy.wast:3981
+assert_return(() => call($18, "load8_u", [49172]), 0);
+
+// memory_copy.wast:3982
+assert_return(() => call($18, "load8_u", [49371]), 0);
+
+// memory_copy.wast:3983
+assert_return(() => call($18, "load8_u", [49570]), 0);
+
+// memory_copy.wast:3984
+assert_return(() => call($18, "load8_u", [49769]), 0);
+
+// memory_copy.wast:3985
+assert_return(() => call($18, "load8_u", [49968]), 0);
+
+// memory_copy.wast:3986
+assert_return(() => call($18, "load8_u", [50167]), 0);
+
+// memory_copy.wast:3987
+assert_return(() => call($18, "load8_u", [50366]), 0);
+
+// memory_copy.wast:3988
+assert_return(() => call($18, "load8_u", [50565]), 0);
+
+// memory_copy.wast:3989
+assert_return(() => call($18, "load8_u", [50764]), 0);
+
+// memory_copy.wast:3990
+assert_return(() => call($18, "load8_u", [50963]), 0);
+
+// memory_copy.wast:3991
+assert_return(() => call($18, "load8_u", [51162]), 0);
+
+// memory_copy.wast:3992
+assert_return(() => call($18, "load8_u", [51361]), 0);
+
+// memory_copy.wast:3993
+assert_return(() => call($18, "load8_u", [51560]), 0);
+
+// memory_copy.wast:3994
+assert_return(() => call($18, "load8_u", [51759]), 0);
+
+// memory_copy.wast:3995
+assert_return(() => call($18, "load8_u", [51958]), 0);
+
+// memory_copy.wast:3996
+assert_return(() => call($18, "load8_u", [52157]), 0);
+
+// memory_copy.wast:3997
+assert_return(() => call($18, "load8_u", [52356]), 0);
+
+// memory_copy.wast:3998
+assert_return(() => call($18, "load8_u", [52555]), 0);
+
+// memory_copy.wast:3999
+assert_return(() => call($18, "load8_u", [52754]), 0);
+
+// memory_copy.wast:4000
+assert_return(() => call($18, "load8_u", [52953]), 0);
+
+// memory_copy.wast:4001
+assert_return(() => call($18, "load8_u", [53152]), 0);
+
+// memory_copy.wast:4002
+assert_return(() => call($18, "load8_u", [53351]), 0);
+
+// memory_copy.wast:4003
+assert_return(() => call($18, "load8_u", [53550]), 0);
+
+// memory_copy.wast:4004
+assert_return(() => call($18, "load8_u", [53749]), 0);
+
+// memory_copy.wast:4005
+assert_return(() => call($18, "load8_u", [53948]), 0);
+
+// memory_copy.wast:4006
+assert_return(() => call($18, "load8_u", [54147]), 0);
+
+// memory_copy.wast:4007
+assert_return(() => call($18, "load8_u", [54346]), 0);
+
+// memory_copy.wast:4008
+assert_return(() => call($18, "load8_u", [54545]), 0);
+
+// memory_copy.wast:4009
+assert_return(() => call($18, "load8_u", [54744]), 0);
+
+// memory_copy.wast:4010
+assert_return(() => call($18, "load8_u", [54943]), 0);
+
+// memory_copy.wast:4011
+assert_return(() => call($18, "load8_u", [55142]), 0);
+
+// memory_copy.wast:4012
+assert_return(() => call($18, "load8_u", [55341]), 0);
+
+// memory_copy.wast:4013
+assert_return(() => call($18, "load8_u", [55540]), 0);
+
+// memory_copy.wast:4014
+assert_return(() => call($18, "load8_u", [55739]), 0);
+
+// memory_copy.wast:4015
+assert_return(() => call($18, "load8_u", [55938]), 0);
+
+// memory_copy.wast:4016
+assert_return(() => call($18, "load8_u", [56137]), 0);
+
+// memory_copy.wast:4017
+assert_return(() => call($18, "load8_u", [56336]), 0);
+
+// memory_copy.wast:4018
+assert_return(() => call($18, "load8_u", [56535]), 0);
+
+// memory_copy.wast:4019
+assert_return(() => call($18, "load8_u", [56734]), 0);
+
+// memory_copy.wast:4020
+assert_return(() => call($18, "load8_u", [56933]), 0);
+
+// memory_copy.wast:4021
+assert_return(() => call($18, "load8_u", [57132]), 0);
+
+// memory_copy.wast:4022
+assert_return(() => call($18, "load8_u", [57331]), 0);
+
+// memory_copy.wast:4023
+assert_return(() => call($18, "load8_u", [57530]), 0);
+
+// memory_copy.wast:4024
+assert_return(() => call($18, "load8_u", [57729]), 0);
+
+// memory_copy.wast:4025
+assert_return(() => call($18, "load8_u", [57928]), 0);
+
+// memory_copy.wast:4026
+assert_return(() => call($18, "load8_u", [58127]), 0);
+
+// memory_copy.wast:4027
+assert_return(() => call($18, "load8_u", [58326]), 0);
+
+// memory_copy.wast:4028
+assert_return(() => call($18, "load8_u", [58525]), 0);
+
+// memory_copy.wast:4029
+assert_return(() => call($18, "load8_u", [58724]), 0);
+
+// memory_copy.wast:4030
+assert_return(() => call($18, "load8_u", [58923]), 0);
+
+// memory_copy.wast:4031
+assert_return(() => call($18, "load8_u", [59122]), 0);
+
+// memory_copy.wast:4032
+assert_return(() => call($18, "load8_u", [59321]), 0);
+
+// memory_copy.wast:4033
+assert_return(() => call($18, "load8_u", [59520]), 0);
+
+// memory_copy.wast:4034
+assert_return(() => call($18, "load8_u", [59719]), 0);
+
+// memory_copy.wast:4035
+assert_return(() => call($18, "load8_u", [59918]), 0);
+
+// memory_copy.wast:4036
+assert_return(() => call($18, "load8_u", [60117]), 0);
+
+// memory_copy.wast:4037
+assert_return(() => call($18, "load8_u", [60316]), 0);
+
+// memory_copy.wast:4038
+assert_return(() => call($18, "load8_u", [60515]), 0);
+
+// memory_copy.wast:4039
+assert_return(() => call($18, "load8_u", [60714]), 0);
+
+// memory_copy.wast:4040
+assert_return(() => call($18, "load8_u", [60913]), 0);
+
+// memory_copy.wast:4041
+assert_return(() => call($18, "load8_u", [61112]), 0);
+
+// memory_copy.wast:4042
+assert_return(() => call($18, "load8_u", [61311]), 0);
+
+// memory_copy.wast:4043
+assert_return(() => call($18, "load8_u", [61510]), 0);
+
+// memory_copy.wast:4044
+assert_return(() => call($18, "load8_u", [61709]), 0);
+
+// memory_copy.wast:4045
+assert_return(() => call($18, "load8_u", [61908]), 0);
+
+// memory_copy.wast:4046
+assert_return(() => call($18, "load8_u", [62107]), 0);
+
+// memory_copy.wast:4047
+assert_return(() => call($18, "load8_u", [62306]), 0);
+
+// memory_copy.wast:4048
+assert_return(() => call($18, "load8_u", [62505]), 0);
+
+// memory_copy.wast:4049
+assert_return(() => call($18, "load8_u", [62704]), 0);
+
+// memory_copy.wast:4050
+assert_return(() => call($18, "load8_u", [62903]), 0);
+
+// memory_copy.wast:4051
+assert_return(() => call($18, "load8_u", [63102]), 0);
+
+// memory_copy.wast:4052
+assert_return(() => call($18, "load8_u", [63301]), 0);
+
+// memory_copy.wast:4053
+assert_return(() => call($18, "load8_u", [63500]), 0);
+
+// memory_copy.wast:4054
+assert_return(() => call($18, "load8_u", [63699]), 0);
+
+// memory_copy.wast:4055
+assert_return(() => call($18, "load8_u", [63898]), 0);
+
+// memory_copy.wast:4056
+assert_return(() => call($18, "load8_u", [64097]), 0);
+
+// memory_copy.wast:4057
+assert_return(() => call($18, "load8_u", [64296]), 0);
+
+// memory_copy.wast:4058
+assert_return(() => call($18, "load8_u", [64495]), 0);
+
+// memory_copy.wast:4059
+assert_return(() => call($18, "load8_u", [64694]), 0);
+
+// memory_copy.wast:4060
+assert_return(() => call($18, "load8_u", [64893]), 0);
+
+// memory_copy.wast:4061
+assert_return(() => call($18, "load8_u", [65092]), 0);
+
+// memory_copy.wast:4062
+assert_return(() => call($18, "load8_u", [65291]), 0);
+
+// memory_copy.wast:4063
+assert_return(() => call($18, "load8_u", [65490]), 0);
+
+// memory_copy.wast:4064
+assert_return(() => call($18, "load8_u", [65516]), 0);
+
+// memory_copy.wast:4065
+assert_return(() => call($18, "load8_u", [65517]), 1);
+
+// memory_copy.wast:4066
+assert_return(() => call($18, "load8_u", [65518]), 2);
+
+// memory_copy.wast:4067
+assert_return(() => call($18, "load8_u", [65519]), 3);
+
+// memory_copy.wast:4068
+assert_return(() => call($18, "load8_u", [65520]), 4);
+
+// memory_copy.wast:4069
+assert_return(() => call($18, "load8_u", [65521]), 5);
+
+// memory_copy.wast:4070
+assert_return(() => call($18, "load8_u", [65522]), 6);
+
+// memory_copy.wast:4071
+assert_return(() => call($18, "load8_u", [65523]), 7);
+
+// memory_copy.wast:4072
+assert_return(() => call($18, "load8_u", [65524]), 8);
+
+// memory_copy.wast:4073
+assert_return(() => call($18, "load8_u", [65525]), 9);
+
+// memory_copy.wast:4074
+assert_return(() => call($18, "load8_u", [65526]), 10);
+
+// memory_copy.wast:4075
+assert_return(() => call($18, "load8_u", [65527]), 11);
+
+// memory_copy.wast:4076
+assert_return(() => call($18, "load8_u", [65528]), 12);
+
+// memory_copy.wast:4077
+assert_return(() => call($18, "load8_u", [65529]), 13);
+
+// memory_copy.wast:4078
+assert_return(() => call($18, "load8_u", [65530]), 14);
+
+// memory_copy.wast:4079
+assert_return(() => call($18, "load8_u", [65531]), 15);
+
+// memory_copy.wast:4080
+assert_return(() => call($18, "load8_u", [65532]), 16);
+
+// memory_copy.wast:4081
+assert_return(() => call($18, "load8_u", [65533]), 17);
+
+// memory_copy.wast:4082
+assert_return(() => call($18, "load8_u", [65534]), 18);
+
+// memory_copy.wast:4083
+assert_return(() => call($18, "load8_u", [65535]), 19);
+
+// memory_copy.wast:4085
+let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8c\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x97\x80\x80\x80\x00\x03\x03\x6d\x65\x6d\x02\x00\x03\x72\x75\x6e\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\x9c\x80\x80\x80\x00\x01\x00\x41\x80\xe0\x03\x0b\x14\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13");
+
+// memory_copy.wast:4093
+assert_trap(() => call($19, "run", [65516, 61440, -256]));
+
+// memory_copy.wast:4096
+assert_return(() => call($19, "load8_u", [198]), 0);
+
+// memory_copy.wast:4097
+assert_return(() => call($19, "load8_u", [397]), 0);
+
+// memory_copy.wast:4098
+assert_return(() => call($19, "load8_u", [596]), 0);
+
+// memory_copy.wast:4099
+assert_return(() => call($19, "load8_u", [795]), 0);
+
+// memory_copy.wast:4100
+assert_return(() => call($19, "load8_u", [994]), 0);
+
+// memory_copy.wast:4101
+assert_return(() => call($19, "load8_u", [1193]), 0);
+
+// memory_copy.wast:4102
+assert_return(() => call($19, "load8_u", [1392]), 0);
+
+// memory_copy.wast:4103
+assert_return(() => call($19, "load8_u", [1591]), 0);
+
+// memory_copy.wast:4104
+assert_return(() => call($19, "load8_u", [1790]), 0);
+
+// memory_copy.wast:4105
+assert_return(() => call($19, "load8_u", [1989]), 0);
+
+// memory_copy.wast:4106
+assert_return(() => call($19, "load8_u", [2188]), 0);
+
+// memory_copy.wast:4107
+assert_return(() => call($19, "load8_u", [2387]), 0);
+
+// memory_copy.wast:4108
+assert_return(() => call($19, "load8_u", [2586]), 0);
+
+// memory_copy.wast:4109
+assert_return(() => call($19, "load8_u", [2785]), 0);
+
+// memory_copy.wast:4110
+assert_return(() => call($19, "load8_u", [2984]), 0);
+
+// memory_copy.wast:4111
+assert_return(() => call($19, "load8_u", [3183]), 0);
+
+// memory_copy.wast:4112
+assert_return(() => call($19, "load8_u", [3382]), 0);
+
+// memory_copy.wast:4113
+assert_return(() => call($19, "load8_u", [3581]), 0);
+
+// memory_copy.wast:4114
+assert_return(() => call($19, "load8_u", [3780]), 0);
+
+// memory_copy.wast:4115
+assert_return(() => call($19, "load8_u", [3979]), 0);
+
+// memory_copy.wast:4116
+assert_return(() => call($19, "load8_u", [4178]), 0);
+
+// memory_copy.wast:4117
+assert_return(() => call($19, "load8_u", [4377]), 0);
+
+// memory_copy.wast:4118
+assert_return(() => call($19, "load8_u", [4576]), 0);
+
+// memory_copy.wast:4119
+assert_return(() => call($19, "load8_u", [4775]), 0);
+
+// memory_copy.wast:4120
+assert_return(() => call($19, "load8_u", [4974]), 0);
+
+// memory_copy.wast:4121
+assert_return(() => call($19, "load8_u", [5173]), 0);
+
+// memory_copy.wast:4122
+assert_return(() => call($19, "load8_u", [5372]), 0);
+
+// memory_copy.wast:4123
+assert_return(() => call($19, "load8_u", [5571]), 0);
+
+// memory_copy.wast:4124
+assert_return(() => call($19, "load8_u", [5770]), 0);
+
+// memory_copy.wast:4125
+assert_return(() => call($19, "load8_u", [5969]), 0);
+
+// memory_copy.wast:4126
+assert_return(() => call($19, "load8_u", [6168]), 0);
+
+// memory_copy.wast:4127
+assert_return(() => call($19, "load8_u", [6367]), 0);
+
+// memory_copy.wast:4128
+assert_return(() => call($19, "load8_u", [6566]), 0);
+
+// memory_copy.wast:4129
+assert_return(() => call($19, "load8_u", [6765]), 0);
+
+// memory_copy.wast:4130
+assert_return(() => call($19, "load8_u", [6964]), 0);
+
+// memory_copy.wast:4131
+assert_return(() => call($19, "load8_u", [7163]), 0);
+
+// memory_copy.wast:4132
+assert_return(() => call($19, "load8_u", [7362]), 0);
+
+// memory_copy.wast:4133
+assert_return(() => call($19, "load8_u", [7561]), 0);
+
+// memory_copy.wast:4134
+assert_return(() => call($19, "load8_u", [7760]), 0);
+
+// memory_copy.wast:4135
+assert_return(() => call($19, "load8_u", [7959]), 0);
+
+// memory_copy.wast:4136
+assert_return(() => call($19, "load8_u", [8158]), 0);
+
+// memory_copy.wast:4137
+assert_return(() => call($19, "load8_u", [8357]), 0);
+
+// memory_copy.wast:4138
+assert_return(() => call($19, "load8_u", [8556]), 0);
+
+// memory_copy.wast:4139
+assert_return(() => call($19, "load8_u", [8755]), 0);
+
+// memory_copy.wast:4140
+assert_return(() => call($19, "load8_u", [8954]), 0);
+
+// memory_copy.wast:4141
+assert_return(() => call($19, "load8_u", [9153]), 0);
+
+// memory_copy.wast:4142
+assert_return(() => call($19, "load8_u", [9352]), 0);
+
+// memory_copy.wast:4143
+assert_return(() => call($19, "load8_u", [9551]), 0);
+
+// memory_copy.wast:4144
+assert_return(() => call($19, "load8_u", [9750]), 0);
+
+// memory_copy.wast:4145
+assert_return(() => call($19, "load8_u", [9949]), 0);
+
+// memory_copy.wast:4146
+assert_return(() => call($19, "load8_u", [10148]), 0);
+
+// memory_copy.wast:4147
+assert_return(() => call($19, "load8_u", [10347]), 0);
+
+// memory_copy.wast:4148
+assert_return(() => call($19, "load8_u", [10546]), 0);
+
+// memory_copy.wast:4149
+assert_return(() => call($19, "load8_u", [10745]), 0);
+
+// memory_copy.wast:4150
+assert_return(() => call($19, "load8_u", [10944]), 0);
+
+// memory_copy.wast:4151
+assert_return(() => call($19, "load8_u", [11143]), 0);
+
+// memory_copy.wast:4152
+assert_return(() => call($19, "load8_u", [11342]), 0);
+
+// memory_copy.wast:4153
+assert_return(() => call($19, "load8_u", [11541]), 0);
+
+// memory_copy.wast:4154
+assert_return(() => call($19, "load8_u", [11740]), 0);
+
+// memory_copy.wast:4155
+assert_return(() => call($19, "load8_u", [11939]), 0);
+
+// memory_copy.wast:4156
+assert_return(() => call($19, "load8_u", [12138]), 0);
+
+// memory_copy.wast:4157
+assert_return(() => call($19, "load8_u", [12337]), 0);
+
+// memory_copy.wast:4158
+assert_return(() => call($19, "load8_u", [12536]), 0);
+
+// memory_copy.wast:4159
+assert_return(() => call($19, "load8_u", [12735]), 0);
+
+// memory_copy.wast:4160
+assert_return(() => call($19, "load8_u", [12934]), 0);
+
+// memory_copy.wast:4161
+assert_return(() => call($19, "load8_u", [13133]), 0);
+
+// memory_copy.wast:4162
+assert_return(() => call($19, "load8_u", [13332]), 0);
+
+// memory_copy.wast:4163
+assert_return(() => call($19, "load8_u", [13531]), 0);
+
+// memory_copy.wast:4164
+assert_return(() => call($19, "load8_u", [13730]), 0);
+
+// memory_copy.wast:4165
+assert_return(() => call($19, "load8_u", [13929]), 0);
+
+// memory_copy.wast:4166
+assert_return(() => call($19, "load8_u", [14128]), 0);
+
+// memory_copy.wast:4167
+assert_return(() => call($19, "load8_u", [14327]), 0);
+
+// memory_copy.wast:4168
+assert_return(() => call($19, "load8_u", [14526]), 0);
+
+// memory_copy.wast:4169
+assert_return(() => call($19, "load8_u", [14725]), 0);
+
+// memory_copy.wast:4170
+assert_return(() => call($19, "load8_u", [14924]), 0);
+
+// memory_copy.wast:4171
+assert_return(() => call($19, "load8_u", [15123]), 0);
+
+// memory_copy.wast:4172
+assert_return(() => call($19, "load8_u", [15322]), 0);
+
+// memory_copy.wast:4173
+assert_return(() => call($19, "load8_u", [15521]), 0);
+
+// memory_copy.wast:4174
+assert_return(() => call($19, "load8_u", [15720]), 0);
+
+// memory_copy.wast:4175
+assert_return(() => call($19, "load8_u", [15919]), 0);
+
+// memory_copy.wast:4176
+assert_return(() => call($19, "load8_u", [16118]), 0);
+
+// memory_copy.wast:4177
+assert_return(() => call($19, "load8_u", [16317]), 0);
+
+// memory_copy.wast:4178
+assert_return(() => call($19, "load8_u", [16516]), 0);
+
+// memory_copy.wast:4179
+assert_return(() => call($19, "load8_u", [16715]), 0);
+
+// memory_copy.wast:4180
+assert_return(() => call($19, "load8_u", [16914]), 0);
+
+// memory_copy.wast:4181
+assert_return(() => call($19, "load8_u", [17113]), 0);
+
+// memory_copy.wast:4182
+assert_return(() => call($19, "load8_u", [17312]), 0);
+
+// memory_copy.wast:4183
+assert_return(() => call($19, "load8_u", [17511]), 0);
+
+// memory_copy.wast:4184
+assert_return(() => call($19, "load8_u", [17710]), 0);
+
+// memory_copy.wast:4185
+assert_return(() => call($19, "load8_u", [17909]), 0);
+
+// memory_copy.wast:4186
+assert_return(() => call($19, "load8_u", [18108]), 0);
+
+// memory_copy.wast:4187
+assert_return(() => call($19, "load8_u", [18307]), 0);
+
+// memory_copy.wast:4188
+assert_return(() => call($19, "load8_u", [18506]), 0);
+
+// memory_copy.wast:4189
+assert_return(() => call($19, "load8_u", [18705]), 0);
+
+// memory_copy.wast:4190
+assert_return(() => call($19, "load8_u", [18904]), 0);
+
+// memory_copy.wast:4191
+assert_return(() => call($19, "load8_u", [19103]), 0);
+
+// memory_copy.wast:4192
+assert_return(() => call($19, "load8_u", [19302]), 0);
+
+// memory_copy.wast:4193
+assert_return(() => call($19, "load8_u", [19501]), 0);
+
+// memory_copy.wast:4194
+assert_return(() => call($19, "load8_u", [19700]), 0);
+
+// memory_copy.wast:4195
+assert_return(() => call($19, "load8_u", [19899]), 0);
+
+// memory_copy.wast:4196
+assert_return(() => call($19, "load8_u", [20098]), 0);
+
+// memory_copy.wast:4197
+assert_return(() => call($19, "load8_u", [20297]), 0);
+
+// memory_copy.wast:4198
+assert_return(() => call($19, "load8_u", [20496]), 0);
+
+// memory_copy.wast:4199
+assert_return(() => call($19, "load8_u", [20695]), 0);
+
+// memory_copy.wast:4200
+assert_return(() => call($19, "load8_u", [20894]), 0);
+
+// memory_copy.wast:4201
+assert_return(() => call($19, "load8_u", [21093]), 0);
+
+// memory_copy.wast:4202
+assert_return(() => call($19, "load8_u", [21292]), 0);
+
+// memory_copy.wast:4203
+assert_return(() => call($19, "load8_u", [21491]), 0);
+
+// memory_copy.wast:4204
+assert_return(() => call($19, "load8_u", [21690]), 0);
+
+// memory_copy.wast:4205
+assert_return(() => call($19, "load8_u", [21889]), 0);
+
+// memory_copy.wast:4206
+assert_return(() => call($19, "load8_u", [22088]), 0);
+
+// memory_copy.wast:4207
+assert_return(() => call($19, "load8_u", [22287]), 0);
+
+// memory_copy.wast:4208
+assert_return(() => call($19, "load8_u", [22486]), 0);
+
+// memory_copy.wast:4209
+assert_return(() => call($19, "load8_u", [22685]), 0);
+
+// memory_copy.wast:4210
+assert_return(() => call($19, "load8_u", [22884]), 0);
+
+// memory_copy.wast:4211
+assert_return(() => call($19, "load8_u", [23083]), 0);
+
+// memory_copy.wast:4212
+assert_return(() => call($19, "load8_u", [23282]), 0);
+
+// memory_copy.wast:4213
+assert_return(() => call($19, "load8_u", [23481]), 0);
+
+// memory_copy.wast:4214
+assert_return(() => call($19, "load8_u", [23680]), 0);
+
+// memory_copy.wast:4215
+assert_return(() => call($19, "load8_u", [23879]), 0);
+
+// memory_copy.wast:4216
+assert_return(() => call($19, "load8_u", [24078]), 0);
+
+// memory_copy.wast:4217
+assert_return(() => call($19, "load8_u", [24277]), 0);
+
+// memory_copy.wast:4218
+assert_return(() => call($19, "load8_u", [24476]), 0);
+
+// memory_copy.wast:4219
+assert_return(() => call($19, "load8_u", [24675]), 0);
+
+// memory_copy.wast:4220
+assert_return(() => call($19, "load8_u", [24874]), 0);
+
+// memory_copy.wast:4221
+assert_return(() => call($19, "load8_u", [25073]), 0);
+
+// memory_copy.wast:4222
+assert_return(() => call($19, "load8_u", [25272]), 0);
+
+// memory_copy.wast:4223
+assert_return(() => call($19, "load8_u", [25471]), 0);
+
+// memory_copy.wast:4224
+assert_return(() => call($19, "load8_u", [25670]), 0);
+
+// memory_copy.wast:4225
+assert_return(() => call($19, "load8_u", [25869]), 0);
+
+// memory_copy.wast:4226
+assert_return(() => call($19, "load8_u", [26068]), 0);
+
+// memory_copy.wast:4227
+assert_return(() => call($19, "load8_u", [26267]), 0);
+
+// memory_copy.wast:4228
+assert_return(() => call($19, "load8_u", [26466]), 0);
+
+// memory_copy.wast:4229
+assert_return(() => call($19, "load8_u", [26665]), 0);
+
+// memory_copy.wast:4230
+assert_return(() => call($19, "load8_u", [26864]), 0);
+
+// memory_copy.wast:4231
+assert_return(() => call($19, "load8_u", [27063]), 0);
+
+// memory_copy.wast:4232
+assert_return(() => call($19, "load8_u", [27262]), 0);
+
+// memory_copy.wast:4233
+assert_return(() => call($19, "load8_u", [27461]), 0);
+
+// memory_copy.wast:4234
+assert_return(() => call($19, "load8_u", [27660]), 0);
+
+// memory_copy.wast:4235
+assert_return(() => call($19, "load8_u", [27859]), 0);
+
+// memory_copy.wast:4236
+assert_return(() => call($19, "load8_u", [28058]), 0);
+
+// memory_copy.wast:4237
+assert_return(() => call($19, "load8_u", [28257]), 0);
+
+// memory_copy.wast:4238
+assert_return(() => call($19, "load8_u", [28456]), 0);
+
+// memory_copy.wast:4239
+assert_return(() => call($19, "load8_u", [28655]), 0);
+
+// memory_copy.wast:4240
+assert_return(() => call($19, "load8_u", [28854]), 0);
+
+// memory_copy.wast:4241
+assert_return(() => call($19, "load8_u", [29053]), 0);
+
+// memory_copy.wast:4242
+assert_return(() => call($19, "load8_u", [29252]), 0);
+
+// memory_copy.wast:4243
+assert_return(() => call($19, "load8_u", [29451]), 0);
+
+// memory_copy.wast:4244
+assert_return(() => call($19, "load8_u", [29650]), 0);
+
+// memory_copy.wast:4245
+assert_return(() => call($19, "load8_u", [29849]), 0);
+
+// memory_copy.wast:4246
+assert_return(() => call($19, "load8_u", [30048]), 0);
+
+// memory_copy.wast:4247
+assert_return(() => call($19, "load8_u", [30247]), 0);
+
+// memory_copy.wast:4248
+assert_return(() => call($19, "load8_u", [30446]), 0);
+
+// memory_copy.wast:4249
+assert_return(() => call($19, "load8_u", [30645]), 0);
+
+// memory_copy.wast:4250
+assert_return(() => call($19, "load8_u", [30844]), 0);
+
+// memory_copy.wast:4251
+assert_return(() => call($19, "load8_u", [31043]), 0);
+
+// memory_copy.wast:4252
+assert_return(() => call($19, "load8_u", [31242]), 0);
+
+// memory_copy.wast:4253
+assert_return(() => call($19, "load8_u", [31441]), 0);
+
+// memory_copy.wast:4254
+assert_return(() => call($19, "load8_u", [31640]), 0);
+
+// memory_copy.wast:4255
+assert_return(() => call($19, "load8_u", [31839]), 0);
+
+// memory_copy.wast:4256
+assert_return(() => call($19, "load8_u", [32038]), 0);
+
+// memory_copy.wast:4257
+assert_return(() => call($19, "load8_u", [32237]), 0);
+
+// memory_copy.wast:4258
+assert_return(() => call($19, "load8_u", [32436]), 0);
+
+// memory_copy.wast:4259
+assert_return(() => call($19, "load8_u", [32635]), 0);
+
+// memory_copy.wast:4260
+assert_return(() => call($19, "load8_u", [32834]), 0);
+
+// memory_copy.wast:4261
+assert_return(() => call($19, "load8_u", [33033]), 0);
+
+// memory_copy.wast:4262
+assert_return(() => call($19, "load8_u", [33232]), 0);
+
+// memory_copy.wast:4263
+assert_return(() => call($19, "load8_u", [33431]), 0);
+
+// memory_copy.wast:4264
+assert_return(() => call($19, "load8_u", [33630]), 0);
+
+// memory_copy.wast:4265
+assert_return(() => call($19, "load8_u", [33829]), 0);
+
+// memory_copy.wast:4266
+assert_return(() => call($19, "load8_u", [34028]), 0);
+
+// memory_copy.wast:4267
+assert_return(() => call($19, "load8_u", [34227]), 0);
+
+// memory_copy.wast:4268
+assert_return(() => call($19, "load8_u", [34426]), 0);
+
+// memory_copy.wast:4269
+assert_return(() => call($19, "load8_u", [34625]), 0);
+
+// memory_copy.wast:4270
+assert_return(() => call($19, "load8_u", [34824]), 0);
+
+// memory_copy.wast:4271
+assert_return(() => call($19, "load8_u", [35023]), 0);
+
+// memory_copy.wast:4272
+assert_return(() => call($19, "load8_u", [35222]), 0);
+
+// memory_copy.wast:4273
+assert_return(() => call($19, "load8_u", [35421]), 0);
+
+// memory_copy.wast:4274
+assert_return(() => call($19, "load8_u", [35620]), 0);
+
+// memory_copy.wast:4275
+assert_return(() => call($19, "load8_u", [35819]), 0);
+
+// memory_copy.wast:4276
+assert_return(() => call($19, "load8_u", [36018]), 0);
+
+// memory_copy.wast:4277
+assert_return(() => call($19, "load8_u", [36217]), 0);
+
+// memory_copy.wast:4278
+assert_return(() => call($19, "load8_u", [36416]), 0);
+
+// memory_copy.wast:4279
+assert_return(() => call($19, "load8_u", [36615]), 0);
+
+// memory_copy.wast:4280
+assert_return(() => call($19, "load8_u", [36814]), 0);
+
+// memory_copy.wast:4281
+assert_return(() => call($19, "load8_u", [37013]), 0);
+
+// memory_copy.wast:4282
+assert_return(() => call($19, "load8_u", [37212]), 0);
+
+// memory_copy.wast:4283
+assert_return(() => call($19, "load8_u", [37411]), 0);
+
+// memory_copy.wast:4284
+assert_return(() => call($19, "load8_u", [37610]), 0);
+
+// memory_copy.wast:4285
+assert_return(() => call($19, "load8_u", [37809]), 0);
+
+// memory_copy.wast:4286
+assert_return(() => call($19, "load8_u", [38008]), 0);
+
+// memory_copy.wast:4287
+assert_return(() => call($19, "load8_u", [38207]), 0);
+
+// memory_copy.wast:4288
+assert_return(() => call($19, "load8_u", [38406]), 0);
+
+// memory_copy.wast:4289
+assert_return(() => call($19, "load8_u", [38605]), 0);
+
+// memory_copy.wast:4290
+assert_return(() => call($19, "load8_u", [38804]), 0);
+
+// memory_copy.wast:4291
+assert_return(() => call($19, "load8_u", [39003]), 0);
+
+// memory_copy.wast:4292
+assert_return(() => call($19, "load8_u", [39202]), 0);
+
+// memory_copy.wast:4293
+assert_return(() => call($19, "load8_u", [39401]), 0);
+
+// memory_copy.wast:4294
+assert_return(() => call($19, "load8_u", [39600]), 0);
+
+// memory_copy.wast:4295
+assert_return(() => call($19, "load8_u", [39799]), 0);
+
+// memory_copy.wast:4296
+assert_return(() => call($19, "load8_u", [39998]), 0);
+
+// memory_copy.wast:4297
+assert_return(() => call($19, "load8_u", [40197]), 0);
+
+// memory_copy.wast:4298
+assert_return(() => call($19, "load8_u", [40396]), 0);
+
+// memory_copy.wast:4299
+assert_return(() => call($19, "load8_u", [40595]), 0);
+
+// memory_copy.wast:4300
+assert_return(() => call($19, "load8_u", [40794]), 0);
+
+// memory_copy.wast:4301
+assert_return(() => call($19, "load8_u", [40993]), 0);
+
+// memory_copy.wast:4302
+assert_return(() => call($19, "load8_u", [41192]), 0);
+
+// memory_copy.wast:4303
+assert_return(() => call($19, "load8_u", [41391]), 0);
+
+// memory_copy.wast:4304
+assert_return(() => call($19, "load8_u", [41590]), 0);
+
+// memory_copy.wast:4305
+assert_return(() => call($19, "load8_u", [41789]), 0);
+
+// memory_copy.wast:4306
+assert_return(() => call($19, "load8_u", [41988]), 0);
+
+// memory_copy.wast:4307
+assert_return(() => call($19, "load8_u", [42187]), 0);
+
+// memory_copy.wast:4308
+assert_return(() => call($19, "load8_u", [42386]), 0);
+
+// memory_copy.wast:4309
+assert_return(() => call($19, "load8_u", [42585]), 0);
+
+// memory_copy.wast:4310
+assert_return(() => call($19, "load8_u", [42784]), 0);
+
+// memory_copy.wast:4311
+assert_return(() => call($19, "load8_u", [42983]), 0);
+
+// memory_copy.wast:4312
+assert_return(() => call($19, "load8_u", [43182]), 0);
+
+// memory_copy.wast:4313
+assert_return(() => call($19, "load8_u", [43381]), 0);
+
+// memory_copy.wast:4314
+assert_return(() => call($19, "load8_u", [43580]), 0);
+
+// memory_copy.wast:4315
+assert_return(() => call($19, "load8_u", [43779]), 0);
+
+// memory_copy.wast:4316
+assert_return(() => call($19, "load8_u", [43978]), 0);
+
+// memory_copy.wast:4317
+assert_return(() => call($19, "load8_u", [44177]), 0);
+
+// memory_copy.wast:4318
+assert_return(() => call($19, "load8_u", [44376]), 0);
+
+// memory_copy.wast:4319
+assert_return(() => call($19, "load8_u", [44575]), 0);
+
+// memory_copy.wast:4320
+assert_return(() => call($19, "load8_u", [44774]), 0);
+
+// memory_copy.wast:4321
+assert_return(() => call($19, "load8_u", [44973]), 0);
+
+// memory_copy.wast:4322
+assert_return(() => call($19, "load8_u", [45172]), 0);
+
+// memory_copy.wast:4323
+assert_return(() => call($19, "load8_u", [45371]), 0);
+
+// memory_copy.wast:4324
+assert_return(() => call($19, "load8_u", [45570]), 0);
+
+// memory_copy.wast:4325
+assert_return(() => call($19, "load8_u", [45769]), 0);
+
+// memory_copy.wast:4326
+assert_return(() => call($19, "load8_u", [45968]), 0);
+
+// memory_copy.wast:4327
+assert_return(() => call($19, "load8_u", [46167]), 0);
+
+// memory_copy.wast:4328
+assert_return(() => call($19, "load8_u", [46366]), 0);
+
+// memory_copy.wast:4329
+assert_return(() => call($19, "load8_u", [46565]), 0);
+
+// memory_copy.wast:4330
+assert_return(() => call($19, "load8_u", [46764]), 0);
+
+// memory_copy.wast:4331
+assert_return(() => call($19, "load8_u", [46963]), 0);
+
+// memory_copy.wast:4332
+assert_return(() => call($19, "load8_u", [47162]), 0);
+
+// memory_copy.wast:4333
+assert_return(() => call($19, "load8_u", [47361]), 0);
+
+// memory_copy.wast:4334
+assert_return(() => call($19, "load8_u", [47560]), 0);
+
+// memory_copy.wast:4335
+assert_return(() => call($19, "load8_u", [47759]), 0);
+
+// memory_copy.wast:4336
+assert_return(() => call($19, "load8_u", [47958]), 0);
+
+// memory_copy.wast:4337
+assert_return(() => call($19, "load8_u", [48157]), 0);
+
+// memory_copy.wast:4338
+assert_return(() => call($19, "load8_u", [48356]), 0);
+
+// memory_copy.wast:4339
+assert_return(() => call($19, "load8_u", [48555]), 0);
+
+// memory_copy.wast:4340
+assert_return(() => call($19, "load8_u", [48754]), 0);
+
+// memory_copy.wast:4341
+assert_return(() => call($19, "load8_u", [48953]), 0);
+
+// memory_copy.wast:4342
+assert_return(() => call($19, "load8_u", [49152]), 0);
+
+// memory_copy.wast:4343
+assert_return(() => call($19, "load8_u", [49351]), 0);
+
+// memory_copy.wast:4344
+assert_return(() => call($19, "load8_u", [49550]), 0);
+
+// memory_copy.wast:4345
+assert_return(() => call($19, "load8_u", [49749]), 0);
+
+// memory_copy.wast:4346
+assert_return(() => call($19, "load8_u", [49948]), 0);
+
+// memory_copy.wast:4347
+assert_return(() => call($19, "load8_u", [50147]), 0);
+
+// memory_copy.wast:4348
+assert_return(() => call($19, "load8_u", [50346]), 0);
+
+// memory_copy.wast:4349
+assert_return(() => call($19, "load8_u", [50545]), 0);
+
+// memory_copy.wast:4350
+assert_return(() => call($19, "load8_u", [50744]), 0);
+
+// memory_copy.wast:4351
+assert_return(() => call($19, "load8_u", [50943]), 0);
+
+// memory_copy.wast:4352
+assert_return(() => call($19, "load8_u", [51142]), 0);
+
+// memory_copy.wast:4353
+assert_return(() => call($19, "load8_u", [51341]), 0);
+
+// memory_copy.wast:4354
+assert_return(() => call($19, "load8_u", [51540]), 0);
+
+// memory_copy.wast:4355
+assert_return(() => call($19, "load8_u", [51739]), 0);
+
+// memory_copy.wast:4356
+assert_return(() => call($19, "load8_u", [51938]), 0);
+
+// memory_copy.wast:4357
+assert_return(() => call($19, "load8_u", [52137]), 0);
+
+// memory_copy.wast:4358
+assert_return(() => call($19, "load8_u", [52336]), 0);
+
+// memory_copy.wast:4359
+assert_return(() => call($19, "load8_u", [52535]), 0);
+
+// memory_copy.wast:4360
+assert_return(() => call($19, "load8_u", [52734]), 0);
+
+// memory_copy.wast:4361
+assert_return(() => call($19, "load8_u", [52933]), 0);
+
+// memory_copy.wast:4362
+assert_return(() => call($19, "load8_u", [53132]), 0);
+
+// memory_copy.wast:4363
+assert_return(() => call($19, "load8_u", [53331]), 0);
+
+// memory_copy.wast:4364
+assert_return(() => call($19, "load8_u", [53530]), 0);
+
+// memory_copy.wast:4365
+assert_return(() => call($19, "load8_u", [53729]), 0);
+
+// memory_copy.wast:4366
+assert_return(() => call($19, "load8_u", [53928]), 0);
+
+// memory_copy.wast:4367
+assert_return(() => call($19, "load8_u", [54127]), 0);
+
+// memory_copy.wast:4368
+assert_return(() => call($19, "load8_u", [54326]), 0);
+
+// memory_copy.wast:4369
+assert_return(() => call($19, "load8_u", [54525]), 0);
+
+// memory_copy.wast:4370
+assert_return(() => call($19, "load8_u", [54724]), 0);
+
+// memory_copy.wast:4371
+assert_return(() => call($19, "load8_u", [54923]), 0);
+
+// memory_copy.wast:4372
+assert_return(() => call($19, "load8_u", [55122]), 0);
+
+// memory_copy.wast:4373
+assert_return(() => call($19, "load8_u", [55321]), 0);
+
+// memory_copy.wast:4374
+assert_return(() => call($19, "load8_u", [55520]), 0);
+
+// memory_copy.wast:4375
+assert_return(() => call($19, "load8_u", [55719]), 0);
+
+// memory_copy.wast:4376
+assert_return(() => call($19, "load8_u", [55918]), 0);
+
+// memory_copy.wast:4377
+assert_return(() => call($19, "load8_u", [56117]), 0);
+
+// memory_copy.wast:4378
+assert_return(() => call($19, "load8_u", [56316]), 0);
+
+// memory_copy.wast:4379
+assert_return(() => call($19, "load8_u", [56515]), 0);
+
+// memory_copy.wast:4380
+assert_return(() => call($19, "load8_u", [56714]), 0);
+
+// memory_copy.wast:4381
+assert_return(() => call($19, "load8_u", [56913]), 0);
+
+// memory_copy.wast:4382
+assert_return(() => call($19, "load8_u", [57112]), 0);
+
+// memory_copy.wast:4383
+assert_return(() => call($19, "load8_u", [57311]), 0);
+
+// memory_copy.wast:4384
+assert_return(() => call($19, "load8_u", [57510]), 0);
+
+// memory_copy.wast:4385
+assert_return(() => call($19, "load8_u", [57709]), 0);
+
+// memory_copy.wast:4386
+assert_return(() => call($19, "load8_u", [57908]), 0);
+
+// memory_copy.wast:4387
+assert_return(() => call($19, "load8_u", [58107]), 0);
+
+// memory_copy.wast:4388
+assert_return(() => call($19, "load8_u", [58306]), 0);
+
+// memory_copy.wast:4389
+assert_return(() => call($19, "load8_u", [58505]), 0);
+
+// memory_copy.wast:4390
+assert_return(() => call($19, "load8_u", [58704]), 0);
+
+// memory_copy.wast:4391
+assert_return(() => call($19, "load8_u", [58903]), 0);
+
+// memory_copy.wast:4392
+assert_return(() => call($19, "load8_u", [59102]), 0);
+
+// memory_copy.wast:4393
+assert_return(() => call($19, "load8_u", [59301]), 0);
+
+// memory_copy.wast:4394
+assert_return(() => call($19, "load8_u", [59500]), 0);
+
+// memory_copy.wast:4395
+assert_return(() => call($19, "load8_u", [59699]), 0);
+
+// memory_copy.wast:4396
+assert_return(() => call($19, "load8_u", [59898]), 0);
+
+// memory_copy.wast:4397
+assert_return(() => call($19, "load8_u", [60097]), 0);
+
+// memory_copy.wast:4398
+assert_return(() => call($19, "load8_u", [60296]), 0);
+
+// memory_copy.wast:4399
+assert_return(() => call($19, "load8_u", [60495]), 0);
+
+// memory_copy.wast:4400
+assert_return(() => call($19, "load8_u", [60694]), 0);
+
+// memory_copy.wast:4401
+assert_return(() => call($19, "load8_u", [60893]), 0);
+
+// memory_copy.wast:4402
+assert_return(() => call($19, "load8_u", [61092]), 0);
+
+// memory_copy.wast:4403
+assert_return(() => call($19, "load8_u", [61291]), 0);
+
+// memory_copy.wast:4404
+assert_return(() => call($19, "load8_u", [61440]), 0);
+
+// memory_copy.wast:4405
+assert_return(() => call($19, "load8_u", [61441]), 1);
+
+// memory_copy.wast:4406
+assert_return(() => call($19, "load8_u", [61442]), 2);
+
+// memory_copy.wast:4407
+assert_return(() => call($19, "load8_u", [61443]), 3);
+
+// memory_copy.wast:4408
+assert_return(() => call($19, "load8_u", [61444]), 4);
+
+// memory_copy.wast:4409
+assert_return(() => call($19, "load8_u", [61445]), 5);
+
+// memory_copy.wast:4410
+assert_return(() => call($19, "load8_u", [61446]), 6);
+
+// memory_copy.wast:4411
+assert_return(() => call($19, "load8_u", [61447]), 7);
+
+// memory_copy.wast:4412
+assert_return(() => call($19, "load8_u", [61448]), 8);
+
+// memory_copy.wast:4413
+assert_return(() => call($19, "load8_u", [61449]), 9);
+
+// memory_copy.wast:4414
+assert_return(() => call($19, "load8_u", [61450]), 10);
+
+// memory_copy.wast:4415
+assert_return(() => call($19, "load8_u", [61451]), 11);
+
+// memory_copy.wast:4416
+assert_return(() => call($19, "load8_u", [61452]), 12);
+
+// memory_copy.wast:4417
+assert_return(() => call($19, "load8_u", [61453]), 13);
+
+// memory_copy.wast:4418
+assert_return(() => call($19, "load8_u", [61454]), 14);
+
+// memory_copy.wast:4419
+assert_return(() => call($19, "load8_u", [61455]), 15);
+
+// memory_copy.wast:4420
+assert_return(() => call($19, "load8_u", [61456]), 16);
+
+// memory_copy.wast:4421
+assert_return(() => call($19, "load8_u", [61457]), 17);
+
+// memory_copy.wast:4422
+assert_return(() => call($19, "load8_u", [61458]), 18);
+
+// memory_copy.wast:4423
+assert_return(() => call($19, "load8_u", [61459]), 19);
+
+// memory_copy.wast:4424
+assert_return(() => call($19, "load8_u", [61510]), 0);
+
+// memory_copy.wast:4425
+assert_return(() => call($19, "load8_u", [61709]), 0);
+
+// memory_copy.wast:4426
+assert_return(() => call($19, "load8_u", [61908]), 0);
+
+// memory_copy.wast:4427
+assert_return(() => call($19, "load8_u", [62107]), 0);
+
+// memory_copy.wast:4428
+assert_return(() => call($19, "load8_u", [62306]), 0);
+
+// memory_copy.wast:4429
+assert_return(() => call($19, "load8_u", [62505]), 0);
+
+// memory_copy.wast:4430
+assert_return(() => call($19, "load8_u", [62704]), 0);
+
+// memory_copy.wast:4431
+assert_return(() => call($19, "load8_u", [62903]), 0);
+
+// memory_copy.wast:4432
+assert_return(() => call($19, "load8_u", [63102]), 0);
+
+// memory_copy.wast:4433
+assert_return(() => call($19, "load8_u", [63301]), 0);
+
+// memory_copy.wast:4434
+assert_return(() => call($19, "load8_u", [63500]), 0);
+
+// memory_copy.wast:4435
+assert_return(() => call($19, "load8_u", [63699]), 0);
+
+// memory_copy.wast:4436
+assert_return(() => call($19, "load8_u", [63898]), 0);
+
+// memory_copy.wast:4437
+assert_return(() => call($19, "load8_u", [64097]), 0);
+
+// memory_copy.wast:4438
+assert_return(() => call($19, "load8_u", [64296]), 0);
+
+// memory_copy.wast:4439
+assert_return(() => call($19, "load8_u", [64495]), 0);
+
+// memory_copy.wast:4440
+assert_return(() => call($19, "load8_u", [64694]), 0);
+
+// memory_copy.wast:4441
+assert_return(() => call($19, "load8_u", [64893]), 0);
+
+// memory_copy.wast:4442
+assert_return(() => call($19, "load8_u", [65092]), 0);
+
+// memory_copy.wast:4443
+assert_return(() => call($19, "load8_u", [65291]), 0);
+
+// memory_copy.wast:4444
+assert_return(() => call($19, "load8_u", [65490]), 0);
+
+// memory_copy.wast:4446
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4452
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4459
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4466
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4473
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4480
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4487
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4494
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4501
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4508
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4515
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4522
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4529
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4536
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4543
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4550
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4557
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4564
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4571
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4578
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4585
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4592
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4599
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4606
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4613
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4620
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4627
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4634
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4641
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4648
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4655
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4662
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4669
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4676
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4683
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4690
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4697
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4704
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4711
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4718
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4725
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4732
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4739
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4746
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4753
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4760
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4767
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4774
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4781
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4788
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4795
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4802
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4809
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4816
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4823
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4830
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4837
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4844
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4851
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4858
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4865
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4872
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4879
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4886
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa7\x80\x80\x80\x00\x01\xa1\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4894
+let $20 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x00\x00\x60\x03\x7f\x7f\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x00\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x01\x0a\xc8\x80\x80\x80\x00\x02\x96\x80\x80\x80\x00\x00\x41\x0a\x41\xd5\x00\x41\x0a\xfc\x0b\x00\x41\x09\x41\x0a\x41\x05\xfc\x0a\x00\x00\x0b\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b");
+
+// memory_copy.wast:4911
+run(() => call($20, "test", []));
+
+// memory_copy.wast:4913
+assert_return(() => call($20, "checkRange", [0, 9, 0]), -1);
+
+// memory_copy.wast:4915
+assert_return(() => call($20, "checkRange", [9, 20, 85]), -1);
+
+// memory_copy.wast:4917
+assert_return(() => call($20, "checkRange", [20, 65536, 0]), -1);
+
+// memory_copy.wast:4920
+let $21 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x00\x00\x60\x03\x7f\x7f\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x00\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x01\x0a\xc8\x80\x80\x80\x00\x02\x96\x80\x80\x80\x00\x00\x41\x0a\x41\xd5\x00\x41\x0a\xfc\x0b\x00\x41\x10\x41\x0f\x41\x05\xfc\x0a\x00\x00\x0b\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b");
+
+// memory_copy.wast:4937
+run(() => call($21, "test", []));
+
+// memory_copy.wast:4939
+assert_return(() => call($21, "checkRange", [0, 10, 0]), -1);
+
+// memory_copy.wast:4941
+assert_return(() => call($21, "checkRange", [10, 21, 85]), -1);
+
+// memory_copy.wast:4943
+assert_return(() => call($21, "checkRange", [21, 65536, 0]), -1);
+
+// memory_copy.wast:4946
+let $22 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x41\x80\xfe\x03\x41\x80\x80\x02\x41\x81\x02\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4950
+assert_trap(() => call($22, "test", []));
+
+// memory_copy.wast:4952
+let $23 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\x41\x80\x7e\x41\x80\x80\x01\x41\x81\x02\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4956
+assert_trap(() => call($23, "test", []));
+
+// memory_copy.wast:4958
+let $24 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x41\x80\x80\x02\x41\x80\xfe\x03\x41\x81\x02\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4962
+assert_trap(() => call($24, "test", []));
+
+// memory_copy.wast:4964
+let $25 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\x41\x80\x80\x01\x41\x80\x7e\x41\x81\x02\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4968
+assert_trap(() => call($25, "test", []));
+
+// memory_copy.wast:4970
+let $26 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x00\x00\x60\x03\x7f\x7f\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x00\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x01\x0a\xdc\x80\x80\x80\x00\x02\xaa\x80\x80\x80\x00\x00\x41\x00\x41\xd5\x00\x41\x80\x80\x02\xfc\x0b\x00\x41\x80\x80\x02\x41\xaa\x01\x41\x80\x80\x02\xfc\x0b\x00\x41\x80\xa0\x02\x41\x80\xe0\x01\x41\x00\xfc\x0a\x00\x00\x0b\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b");
+
+// memory_copy.wast:4988
+run(() => call($26, "test", []));
+
+// memory_copy.wast:4990
+assert_return(() => call($26, "checkRange", [0, 32768, 85]), -1);
+
+// memory_copy.wast:4992
+assert_return(() => call($26, "checkRange", [32768, 65536, 170]), -1);
+
+// memory_copy.wast:4994
+let $27 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\x41\x80\x80\x04\x41\x80\xe0\x01\x41\x00\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:4998
+run(() => call($27, "test", []));
+
+// memory_copy.wast:5000
+let $28 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\x41\x80\xa0\x02\x41\x80\x80\x04\x41\x00\xfc\x0a\x00\x00\x0b");
+
+// memory_copy.wast:5004
+run(() => call($28, "test", []));
+
+// memory_copy.wast:5006
+let $29 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x00\x00\x60\x03\x7f\x7f\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x00\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x01\x0a\xbe\x95\x80\x80\x00\x02\x8c\x95\x80\x80\x00\x00\x41\xe7\x8a\x01\x41\x01\x41\xc0\x0a\xfc\x0b\x00\x41\xe9\xb0\x02\x41\x02\x41\x9f\x08\xfc\x0b\x00\x41\xd1\xb8\x03\x41\x03\x41\xdc\x07\xfc\x0b\x00\x41\xca\xa8\x02\x41\x04\x41\xc2\x02\xfc\x0b\x00\x41\xa9\x3e\x41\x05\x41\xca\x0f\xfc\x0b\x00\x41\xba\xb1\x01\x41\x06\x41\xdc\x17\xfc\x0b\x00\x41\xf2\x83\x01\x41\x07\x41\xc4\x12\xfc\x0b\x00\x41\xe3\xd3\x02\x41\x08\x41\xc3\x06\xfc\x0b\x00\x41\xfc\x00\x41\x09\x41\xf1\x0a\xfc\x0b\x00\x41\xd4\x10\x41\x0a\x41\xc6\x15\xfc\x0b\x00\x41\x9b\xc6\x00\x41\x0b\x41\x9a\x18\xfc\x0b\x00\x41\xe7\x9b\x03\x41\x0c\x41\xe5\x05\xfc\x0b\x00\x41\xf6\x1e\x41\x0d\x41\x87\x16\xfc\x0b\x00\x41\xb3\x84\x03\x41\x0e\x41\x80\x0a\xfc\x0b\x00\x41\xc9\x89\x03\x41\x0f\x41\xba\x0b\xfc\x0b\x00\x41\x8d\xa0\x01\x41\x10\x41\xd6\x18\xfc\x0b\x00\x41\xb1\xf4\x02\x41\x11\x41\xa0\x04\xfc\x0b\x00\x41\xa3\xe1\x00\x41\x12\x41\xed\x14\xfc\x0b\x00\x41\xa5\xc2\x01\x41\x13\x41\xdb\x14\xfc\x0b\x00\x41\x85\xe2\x02\x41\x14\x41\xa2\x0c\xfc\x0b\x00\x41\xd8\xd0\x02\x41\x15\x41\x9b\x0d\xfc\x0b\x00\x41\xde\x88\x02\x41\x16\x41\x86\x05\xfc\x0b\x00\x41\xab\xfb\x02\x41\x17\x41\xc2\x0e\xfc\x0b\x00\x41\xcd\xa1\x03\x41\x18\x41\xe1\x14\xfc\x0b\x00\x41\x9b\xed\x01\x41\x19\x41\xd5\x07\xfc\x0b\x00\x41\xd4\xc8\x00\x41\x1a\x41\x8f\x0e\xfc\x0b\x00\x41\x8e\x88\x03\x41\x1b\x41\xe7\x03\xfc\x0b\x00\x41\xa1\xea\x03\x41\x1c\x41\x92\x04\xfc\x0b\x00\x41\xdc\x9b\x02\x41\x1d\x41\xaf\x07\xfc\x0b\x00\x41\xf0\x34\x41\x1e\x41\xfd\x02\xfc\x0b\x00\x41\xbe\x90\x03\x41\x1f\x41\x91\x18\xfc\x0b\x00\x41\xc1\x84\x03\x41\x20\x41\x92\x05\xfc\x0b\x00\x41\xfc\xdb\x02\x41\x21\x41\xa6\x0d\xfc\x0b\x00\x41\xbe\x84\x02\x41\x22\x41\xc4\x08\xfc\x0b\x00\x41\xfe\x8c\x03\x41\x23\x41\x82\x0b\xfc\x0b\x00\x41\xea\xf3\x02\x41\x24\x41\x9c\x11\xfc\x0b\x00\x41\xeb\xa6\x03\x41\x25\x41\xda\x12\xfc\x0b\x00\x41\x8f\xaf\x03\x41\x26\x41\xfa\x01\xfc\x0b\x00\x41\xdc\xb0\x01\x41\x27\x41\xb1\x10\xfc\x0b\x00\x41\xec\x85\x01\x41\x28\x41\xc0\x19\xfc\x0b\x00\x41\xbb\xa8\x03\x41\x29\x41\xe3\x19\xfc\x0b\x00\x41\xb2\xb4\x02\x41\x2a\x41\xec\x15\xfc\x0b\x00\x41\xbc\x9a\x02\x41\x2b\x41\x96\x10\xfc\x0b\x00\x41\xec\x93\x02\x41\x2c\x41\xcb\x15\xfc\x0b\x00\x41\xdb\xff\x01\x41\x2d\x41\xb8\x02\xfc\x0b\x00\x41\x82\xf2\x03\x41\x2e\x41\xc0\x01\xfc\x0b\x00\x41\xfe\xf1\x01\x41\x2f\x41\xd4\x04\xfc\x0b\x00\x41\xfb\x81\x01\x41\x30\x41\xf5\x03\xfc\x0b\x00\x41\xaa\xbd\x03\x41\x31\x41\xae\x05\xfc\x0b\x00\x41\xfb\x8b\x02\x41\x32\x41\x81\x03\xfc\x0b\x00\x41\xd1\xdb\x03\x41\x33\x41\x87\x07\xfc\x0b\x00\x41\x85\xe0\x03\x41\x34\x41\xd6\x12\xfc\x0b\x00\x41\xfc\xee\x02\x41\x35\x41\xa1\x0b\xfc\x0b\x00\x41\xf5\xca\x01\x41\x36\x41\xda\x18\xfc\x0b\x00\x41\xbe\x2b\x41\x37\x41\xd7\x10\xfc\x0b\x00\x41\x89\x99\x02\x41\x38\x41\x87\x04\xfc\x0b\x00\x41\xdc\xde\x02\x41\x39\x41\xd0\x19\xfc\x0b\x00\x41\xa8\xed\x02\x41\x3a\x41\x8e\x0d\xfc\x0b\x00\x41\x8f\xec\x02\x41\x3b\x41\xe0\x18\xfc\x0b\x00\x41\xb1\xaf\x01\x41\x3c\x41\xa1\x0b\xfc\x0b\x00\x41\xf1\xc9\x03\x41\x3d\x41\x97\x05\xfc\x0b\x00\x41\x85\xfc\x01\x41\x3e\x41\x87\x0d\xfc\x0b\x00\x41\xf7\x17\x41\x3f\x41\xd1\x05\xfc\x0b\x00\x41\xe9\x89\x02\x41\xc0\x00\x41\xd4\x00\xfc\x0b\x00\x41\xba\x84\x02\x41\xc1\x00\x41\xed\x0f\xfc\x0b\x00\x41\xca\x9f\x02\x41\xc2\x00\x41\x1d\xfc\x0b\x00\x41\xcb\x95\x01\x41\xc3\x00\x41\xda\x17\xfc\x0b\x00\x41\xc8\xe2\x00\x41\xc4\x00\x41\x93\x08\xfc\x0b\x00\x41\xe4\x8e\x01\x41\xc5\x00\x41\xfc\x19\xfc\x0b\x00\x41\x9f\x24\x41\xc6\x00\x41\xc3\x08\xfc\x0b\x00\x41\x9e\xfe\x00\x41\xc7\x00\x41\xcd\x0f\xfc\x0b\x00\x41\x9c\x8e\x01\x41\xc8\x00\x41\xd3\x11\xfc\x0b\x00\x41\xe4\x8a\x03\x41\xc9\x00\x41\xf5\x18\xfc\x0b\x00\x41\x94\xd6\x00\x41\xca\x00\x41\xb0\x0f\xfc\x0b\x00\x41\xda\xfc\x00\x41\xcb\x00\x41\xaf\x0b\xfc\x0b\x00\x41\xde\xe2\x02\x41\xcc\x00\x41\x99\x09\xfc\x0b\x00\x41\xf9\xa6\x03\x41\xcd\x00\x41\xa0\x0c\xfc\x0b\x00\x41\xbb\x82\x02\x41\xce\x00\x41\xea\x0c\xfc\x0b\x00\x41\xe4\xdc\x03\x41\xcf\x00\x41\xd4\x19\xfc\x0b\x00\x41\x91\x94\x03\x41\xd0\x00\x41\xdf\x01\xfc\x0b\x00\x41\x89\x22\x41\xd1\x00\x41\xfb\x10\xfc\x0b\x00\x41\xaa\xc1\x03\x41\xd2\x00\x41\xaa\x0a\xfc\x0b\x00\x41\xac\xb3\x03\x41\xd3\x00\x41\xd8\x14\xfc\x0b\x00\x41\x9b\xbc\x01\x41\xd4\x00\x41\x95\x08\xfc\x0b\x00\x41\xaf\xd1\x02\x41\xd5\x00\x41\x99\x18\xfc\x0b\x00\x41\xb3\xfc\x01\x41\xd6\x00\x41\xec\x15\xfc\x0b\x00\x41\xe3\x1d\x41\xd7\x00\x41\xda\x0f\xfc\x0b\x00\x41\xc8\xac\x03\x41\xd8\x00\x41\x00\xfc\x0b\x00\x41\x95\x86\x03\x41\xd9\x00\x41\x95\x10\xfc\x0b\x00\x41\xbb\x9f\x01\x41\xda\x00\x41\xd0\x16\xfc\x0b\x00\x41\xa2\x88\x02\x41\xdb\x00\x41\xc0\x01\xfc\x0b\x00\x41\xba\xc9\x00\x41\xdc\x00\x41\x93\x11\xfc\x0b\x00\x41\xfd\xe0\x00\x41\xdd\x00\x41\x18\xfc\x0b\x00\x41\x8b\xee\x00\x41\xde\x00\x41\xc1\x04\xfc\x0b\x00\x41\x9a\xd8\x02\x41\xdf\x00\x41\xa9\x10\xfc\x0b\x00\x41\xff\x9e\x02\x41\xe0\x00\x41\xec\x1a\xfc\x0b\x00\x41\xf8\xb5\x01\x41\xe1\x00\x41\xcd\x15\xfc\x0b\x00\x41\xf8\x31\x41\xe2\x00\x41\xbe\x06\xfc\x0b\x00\x41\x9b\x84\x02\x41\xe3\x00\x41\x92\x0f\xfc\x0b\x00\x41\xb5\xab\x01\x41\xe4\x00\x41\xbe\x15\xfc\x0b\x00\x41\xce\xce\x03\x41\xe8\xa7\x03\x41\xb2\x10\xfc\x0a\x00\x00\x41\xb2\xec\x03\x41\xb8\xb2\x02\x41\xe6\x01\xfc\x0a\x00\x00\x41\xf9\x94\x03\x41\xcd\xb8\x01\x41\xfc\x11\xfc\x0a\x00\x00\x41\xb4\x34\x41\xbc\xbb\x01\x41\xff\x04\xfc\x0a\x00\x00\x41\xce\x36\x41\xf7\x84\x02\x41\xc9\x08\xfc\x0a\x00\x00\x41\xcb\x97\x01\x41\xec\xd0\x00\x41\xfd\x18\xfc\x0a\x00\x00\x41\xac\xd5\x01\x41\x86\xa9\x03\x41\xe4\x00\xfc\x0a\x00\x00\x41\xd5\xd4\x01\x41\xa2\xd5\x02\x41\xb5\x0d\xfc\x0a\x00\x00\x41\xf0\xd8\x03\x41\xb5\xc3\x00\x41\xf7\x00\xfc\x0a\x00\x00\x41\xbb\x2e\x41\x84\x12\x41\x92\x05\xfc\x0a\x00\x00\x41\xb3\x25\x41\xaf\x93\x03\x41\xdd\x11\xfc\x0a\x00\x00\x41\xc9\xe2\x00\x41\xfd\x95\x01\x41\xc1\x06\xfc\x0a\x00\x00\x41\xce\xdc\x00\x41\xa9\xeb\x02\x41\xe4\x19\xfc\x0a\x00\x00\x41\xf0\xd8\x00\x41\xd4\xdf\x02\x41\xe9\x11\xfc\x0a\x00\x00\x41\x8a\x8b\x02\x41\xa9\x34\x41\x8c\x14\xfc\x0a\x00\x00\x41\xc8\x26\x41\x9a\x0d\x41\xb0\x0a\xfc\x0a\x00\x00\x41\xbc\xed\x03\x41\xd5\x3b\x41\x86\x0d\xfc\x0a\x00\x00\x41\x98\xdc\x02\x41\xa8\x8f\x01\x41\x21\xfc\x0a\x00\x00\x41\x8e\xd7\x02\x41\xcc\xae\x01\x41\x93\x0b\xfc\x0a\x00\x00\x41\xad\xec\x02\x41\x9b\x85\x03\x41\x9a\x0b\xfc\x0a\x00\x00\x41\xc4\xf1\x03\x41\xb3\xc4\x00\x41\xc2\x06\xfc\x0a\x00\x00\x41\xcd\x85\x02\x41\xa3\x9d\x01\x41\xf5\x19\xfc\x0a\x00\x00\x41\xff\xbc\x02\x41\xad\xa8\x03\x41\x81\x19\xfc\x0a\x00\x00\x41\xd4\xc9\x01\x41\xf6\xce\x03\x41\x94\x13\xfc\x0a\x00\x00\x41\xde\x99\x01\x41\xb2\xbc\x03\x41\xda\x02\xfc\x0a\x00\x00\x41\xec\xfb\x00\x41\xca\x98\x02\x41\xfe\x12\xfc\x0a\x00\x00\x41\xb0\xdc\x00\x41\xf6\x95\x02\x41\xac\x02\xfc\x0a\x00\x00\x41\xa3\xd0\x03\x41\x85\xed\x00\x41\xd1\x18\xfc\x0a\x00\x00\x41\xfb\x8b\x02\x41\xb2\xd9\x03\x41\x81\x0a\xfc\x0a\x00\x00\x41\x84\xc6\x00\x41\xf4\xdf\x00\x41\xaf\x07\xfc\x0a\x00\x00\x41\x8b\x16\x41\xb9\xd1\x00\x41\xdf\x0e\xfc\x0a\x00\x00\x41\xba\xd1\x02\x41\x86\xd7\x02\x41\xe2\x05\xfc\x0a\x00\x00\x41\xbe\xec\x03\x41\x85\x94\x01\x41\xfa\x00\xfc\x0a\x00\x00\x41\xec\xbb\x01\x41\xd9\xdd\x02\x41\xdb\x0d\xfc\x0a\x00\x00\x41\xd0\xb0\x01\x41\xa3\xf3\x00\x41\xbe\x05\xfc\x0a\x00\x00\x41\x94\xd8\x00\x41\xd3\xcf\x01\x41\xa6\x0e\xfc\x0a\x00\x00\x41\xb4\xb4\x01\x41\xf7\x9f\x01\x41\xa8\x08\xfc\x0a\x00\x00\x41\xa0\xbf\x03\x41\xf2\xab\x03\x41\xc7\x14\xfc\x0a\x00\x00\x41\x94\xc7\x01\x41\x81\x08\x41\xa9\x18\xfc\x0a\x00\x00\x41\xb4\x83\x03\x41\xbc\xd9\x02\x41\xcf\x07\xfc\x0a\x00\x00\x41\xf8\xdc\x01\x41\xfa\xc5\x02\x41\xa0\x12\xfc\x0a\x00\x00\x41\xe9\xde\x03\x41\xe6\x01\x41\xb8\x16\xfc\x0a\x00\x00\x41\xd0\xaf\x01\x41\x9a\x9a\x03\x41\x95\x11\xfc\x0a\x00\x00\x41\xe9\xbc\x02\x41\xea\xca\x00\x41\xa6\x0f\xfc\x0a\x00\x00\x41\xcc\xe2\x01\x41\xfe\xa2\x01\x41\x8a\x11\xfc\x0a\x00\x00\x41\xa5\x9e\x03\x41\xb3\xd7\x02\x41\x8d\x08\xfc\x0a\x00\x00\x41\x84\xc7\x01\x41\xd3\x96\x02\x41\xf2\x0c\xfc\x0a\x00\x00\x41\x94\xc9\x03\x41\xfb\xe5\x02\x41\xc2\x0f\xfc\x0a\x00\x00\x41\x99\xab\x02\x41\x90\x2d\x41\xa3\x0f\xfc\x0a\x00\x00\x41\xd7\xde\x01\x41\xc4\xb0\x03\x41\xc0\x12\xfc\x0a\x00\x00\x41\x9b\xe9\x03\x41\xbc\x8d\x01\x41\xcc\x0a\xfc\x0a\x00\x00\x41\xe5\x87\x03\x41\xa5\xec\x00\x41\xfe\x02\xfc\x0a\x00\x00\x41\x88\x84\x01\x41\xf5\x9b\x02\x41\xec\x0e\xfc\x0a\x00\x00\x41\xe2\xf7\x02\x41\xde\xd8\x00\x41\xf7\x15\xfc\x0a\x00\x00\x41\xe0\xde\x01\x41\xaa\xbb\x02\x41\xc3\x02\xfc\x0a\x00\x00\x41\xb2\x95\x02\x41\xd0\xd9\x01\x41\x86\x0d\xfc\x0a\x00\x00\x41\xfa\xeb\x03\x41\xd4\xa0\x03\x41\xbd\x0a\xfc\x0a\x00\x00\x41\xb5\xee\x00\x41\xe8\xe9\x02\x41\x84\x05\xfc\x0a\x00\x00\x41\xe6\xe2\x01\x41\x82\x95\x01\x41\xf0\x03\xfc\x0a\x00\x00\x41\x98\xdf\x02\x41\xd9\xf3\x02\x41\xe0\x15\xfc\x0a\x00\x00\x41\x87\xb5\x02\x41\xf5\xdc\x02\x41\xc6\x0a\xfc\x0a\x00\x00\x41\xf0\xd0\x00\x41\xda\xe4\x01\x41\xc3\x0b\xfc\x0a\x00\x00\x41\xbf\xee\x02\x41\xe2\xe8\x02\x41\xbb\x0b\xfc\x0a\x00\x00\x41\xa9\x26\x41\xc4\xe0\x01\x41\xe7\x0e\xfc\x0a\x00\x00\x41\xfc\xa8\x02\x41\xa5\xbf\x03\x41\xd7\x0d\xfc\x0a\x00\x00\x41\xce\xce\x01\x41\xd7\xd4\x01\x41\xe7\x08\xfc\x0a\x00\x00\x41\xd3\xcb\x03\x41\xd1\xc0\x01\x41\xa7\x08\xfc\x0a\x00\x00\x41\xac\xdf\x03\x41\x86\xaf\x02\x41\xfe\x05\xfc\x0a\x00\x00\x41\x80\xd9\x02\x41\xec\x11\x41\xf0\x0b\xfc\x0a\x00\x00\x41\xe4\xff\x01\x41\x85\xf1\x02\x41\xc6\x17\xfc\x0a\x00\x00\x41\x8c\xd7\x00\x41\x8c\xa6\x01\x41\xf3\x07\xfc\x0a\x00\x00\x41\xf1\x3b\x41\xfc\xf6\x01\x41\xda\x17\xfc\x0a\x00\x00\x41\xfc\x8c\x01\x41\xbb\xe5\x00\x41\xf8\x19\xfc\x0a\x00\x00\x41\xda\xbf\x03\x41\xe1\xb4\x03\x41\xb4\x02\xfc\x0a\x00\x00\x41\xe3\xc0\x01\x41\xaf\x83\x01\x41\x83\x09\xfc\x0a\x00\x00\x41\xbc\x9b\x01\x41\x83\xcf\x00\x41\xd2\x05\xfc\x0a\x00\x00\x41\xe9\x16\x41\xaf\x2e\x41\xc2\x12\xfc\x0a\x00\x00\x41\xff\xfb\x01\x41\xaf\x87\x03\x41\xee\x16\xfc\x0a\x00\x00\x41\x96\xf6\x00\x41\x93\x87\x01\x41\xaf\x14\xfc\x0a\x00\x00\x41\x87\xe4\x02\x41\x9f\xde\x01\x41\xfd\x0f\xfc\x0a\x00\x00\x41\xed\xae\x03\x41\x91\x9a\x02\x41\xa4\x14\xfc\x0a\x00\x00\x41\xad\xde\x01\x41\x8d\xa7\x03\x41\x90\x09\xfc\x0a\x00\x00\x41\xcf\xf6\x02\x41\x89\xa1\x03\x41\xc1\x18\xfc\x0a\x00\x00\x41\xb6\xef\x01\x41\xe3\xe0\x02\x41\xd9\x14\xfc\x0a\x00\x00\x41\xc1\x27\x41\xc7\x21\x41\x34\xfc\x0a\x00\x00\x41\xa4\x34\x41\x83\xbd\x01\x41\xb9\x03\xfc\x0a\x00\x00\x41\xd8\x81\x02\x41\xed\xd3\x01\x41\xf5\x1a\xfc\x0a\x00\x00\x41\x92\xfe\x01\x41\xec\xcf\x03\x41\xe1\x15\xfc\x0a\x00\x00\x41\xb9\x8c\x02\x41\x82\xc6\x00\x41\xe6\x12\xfc\x0a\x00\x00\x41\xe5\x8b\x01\x41\x8a\xaa\x03\x41\xb5\x1a\xfc\x0a\x00\x00\x41\x9d\xb1\x01\x41\xf7\xd8\x02\x41\x88\x01\xfc\x0a\x00\x00\x41\xd1\xcd\x03\x41\xa5\x37\x41\x95\x08\xfc\x0a\x00\x00\x41\xc1\xcf\x02\x41\xf4\xad\x03\x41\xd5\x12\xfc\x0a\x00\x00\x41\x95\xdd\x02\x41\xaa\x9d\x01\x41\xed\x06\xfc\x0a\x00\x00\x41\xca\x9f\x02\x41\xec\xc4\x01\x41\xf7\x1a\xfc\x0a\x00\x00\x41\xae\xe5\x02\x41\x90\xf9\x01\x41\xd6\x06\xfc\x0a\x00\x00\x41\xac\xbd\x01\x41\xfa\xf8\x01\x41\xe1\x0a\xfc\x0a\x00\x00\x41\xf2\x87\x02\x41\xb4\x05\x41\xba\x0c\xfc\x0a\x00\x00\x41\xca\xd9\x03\x41\x99\x91\x01\x41\xab\x17\xfc\x0a\x00\x00\x41\xc2\x89\x03\x41\xb7\xc2\x02\x41\xfe\x0a\xfc\x0a\x00\x00\x0b\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b");
+
+// memory_copy.wast:5222
+run(() => call($29, "test", []));
+
+// memory_copy.wast:5224
+assert_return(() => call($29, "checkRange", [0, 124, 0]), -1);
+
+// memory_copy.wast:5226
+assert_return(() => call($29, "checkRange", [124, 1517, 9]), -1);
+
+// memory_copy.wast:5228
+assert_return(() => call($29, "checkRange", [1517, 2132, 0]), -1);
+
+// memory_copy.wast:5230
+assert_return(() => call($29, "checkRange", [2132, 2827, 10]), -1);
+
+// memory_copy.wast:5232
+assert_return(() => call($29, "checkRange", [2827, 2921, 92]), -1);
+
+// memory_copy.wast:5234
+assert_return(() => call($29, "checkRange", [2921, 3538, 83]), -1);
+
+// memory_copy.wast:5236
+assert_return(() => call($29, "checkRange", [3538, 3786, 77]), -1);
+
+// memory_copy.wast:5238
+assert_return(() => call($29, "checkRange", [3786, 4042, 97]), -1);
+
+// memory_copy.wast:5240
+assert_return(() => call($29, "checkRange", [4042, 4651, 99]), -1);
+
+// memory_copy.wast:5242
+assert_return(() => call($29, "checkRange", [4651, 5057, 0]), -1);
+
+// memory_copy.wast:5244
+assert_return(() => call($29, "checkRange", [5057, 5109, 99]), -1);
+
+// memory_copy.wast:5246
+assert_return(() => call($29, "checkRange", [5109, 5291, 0]), -1);
+
+// memory_copy.wast:5248
+assert_return(() => call($29, "checkRange", [5291, 5524, 72]), -1);
+
+// memory_copy.wast:5250
+assert_return(() => call($29, "checkRange", [5524, 5691, 92]), -1);
+
+// memory_copy.wast:5252
+assert_return(() => call($29, "checkRange", [5691, 6552, 83]), -1);
+
+// memory_copy.wast:5254
+assert_return(() => call($29, "checkRange", [6552, 7133, 77]), -1);
+
+// memory_copy.wast:5256
+assert_return(() => call($29, "checkRange", [7133, 7665, 99]), -1);
+
+// memory_copy.wast:5258
+assert_return(() => call($29, "checkRange", [7665, 8314, 0]), -1);
+
+// memory_copy.wast:5260
+assert_return(() => call($29, "checkRange", [8314, 8360, 62]), -1);
+
+// memory_copy.wast:5262
+assert_return(() => call($29, "checkRange", [8360, 8793, 86]), -1);
+
+// memory_copy.wast:5264
+assert_return(() => call($29, "checkRange", [8793, 8979, 83]), -1);
+
+// memory_copy.wast:5266
+assert_return(() => call($29, "checkRange", [8979, 9373, 79]), -1);
+
+// memory_copy.wast:5268
+assert_return(() => call($29, "checkRange", [9373, 9518, 95]), -1);
+
+// memory_copy.wast:5270
+assert_return(() => call($29, "checkRange", [9518, 9934, 59]), -1);
+
+// memory_copy.wast:5272
+assert_return(() => call($29, "checkRange", [9934, 10087, 77]), -1);
+
+// memory_copy.wast:5274
+assert_return(() => call($29, "checkRange", [10087, 10206, 5]), -1);
+
+// memory_copy.wast:5276
+assert_return(() => call($29, "checkRange", [10206, 10230, 77]), -1);
+
+// memory_copy.wast:5278
+assert_return(() => call($29, "checkRange", [10230, 10249, 41]), -1);
+
+// memory_copy.wast:5280
+assert_return(() => call($29, "checkRange", [10249, 11148, 83]), -1);
+
+// memory_copy.wast:5282
+assert_return(() => call($29, "checkRange", [11148, 11356, 74]), -1);
+
+// memory_copy.wast:5284
+assert_return(() => call($29, "checkRange", [11356, 11380, 93]), -1);
+
+// memory_copy.wast:5286
+assert_return(() => call($29, "checkRange", [11380, 11939, 74]), -1);
+
+// memory_copy.wast:5288
+assert_return(() => call($29, "checkRange", [11939, 12159, 68]), -1);
+
+// memory_copy.wast:5290
+assert_return(() => call($29, "checkRange", [12159, 12575, 83]), -1);
+
+// memory_copy.wast:5292
+assert_return(() => call($29, "checkRange", [12575, 12969, 79]), -1);
+
+// memory_copy.wast:5294
+assert_return(() => call($29, "checkRange", [12969, 13114, 95]), -1);
+
+// memory_copy.wast:5296
+assert_return(() => call($29, "checkRange", [13114, 14133, 59]), -1);
+
+// memory_copy.wast:5298
+assert_return(() => call($29, "checkRange", [14133, 14404, 76]), -1);
+
+// memory_copy.wast:5300
+assert_return(() => call($29, "checkRange", [14404, 14428, 57]), -1);
+
+// memory_copy.wast:5302
+assert_return(() => call($29, "checkRange", [14428, 14458, 59]), -1);
+
+// memory_copy.wast:5304
+assert_return(() => call($29, "checkRange", [14458, 14580, 32]), -1);
+
+// memory_copy.wast:5306
+assert_return(() => call($29, "checkRange", [14580, 14777, 89]), -1);
+
+// memory_copy.wast:5308
+assert_return(() => call($29, "checkRange", [14777, 15124, 59]), -1);
+
+// memory_copy.wast:5310
+assert_return(() => call($29, "checkRange", [15124, 15126, 36]), -1);
+
+// memory_copy.wast:5312
+assert_return(() => call($29, "checkRange", [15126, 15192, 100]), -1);
+
+// memory_copy.wast:5314
+assert_return(() => call($29, "checkRange", [15192, 15871, 96]), -1);
+
+// memory_copy.wast:5316
+assert_return(() => call($29, "checkRange", [15871, 15998, 95]), -1);
+
+// memory_copy.wast:5318
+assert_return(() => call($29, "checkRange", [15998, 17017, 59]), -1);
+
+// memory_copy.wast:5320
+assert_return(() => call($29, "checkRange", [17017, 17288, 76]), -1);
+
+// memory_copy.wast:5322
+assert_return(() => call($29, "checkRange", [17288, 17312, 57]), -1);
+
+// memory_copy.wast:5324
+assert_return(() => call($29, "checkRange", [17312, 17342, 59]), -1);
+
+// memory_copy.wast:5326
+assert_return(() => call($29, "checkRange", [17342, 17464, 32]), -1);
+
+// memory_copy.wast:5328
+assert_return(() => call($29, "checkRange", [17464, 17661, 89]), -1);
+
+// memory_copy.wast:5330
+assert_return(() => call($29, "checkRange", [17661, 17727, 59]), -1);
+
+// memory_copy.wast:5332
+assert_return(() => call($29, "checkRange", [17727, 17733, 5]), -1);
+
+// memory_copy.wast:5334
+assert_return(() => call($29, "checkRange", [17733, 17893, 96]), -1);
+
+// memory_copy.wast:5336
+assert_return(() => call($29, "checkRange", [17893, 18553, 77]), -1);
+
+// memory_copy.wast:5338
+assert_return(() => call($29, "checkRange", [18553, 18744, 42]), -1);
+
+// memory_copy.wast:5340
+assert_return(() => call($29, "checkRange", [18744, 18801, 76]), -1);
+
+// memory_copy.wast:5342
+assert_return(() => call($29, "checkRange", [18801, 18825, 57]), -1);
+
+// memory_copy.wast:5344
+assert_return(() => call($29, "checkRange", [18825, 18876, 59]), -1);
+
+// memory_copy.wast:5346
+assert_return(() => call($29, "checkRange", [18876, 18885, 77]), -1);
+
+// memory_copy.wast:5348
+assert_return(() => call($29, "checkRange", [18885, 18904, 41]), -1);
+
+// memory_copy.wast:5350
+assert_return(() => call($29, "checkRange", [18904, 19567, 83]), -1);
+
+// memory_copy.wast:5352
+assert_return(() => call($29, "checkRange", [19567, 20403, 96]), -1);
+
+// memory_copy.wast:5354
+assert_return(() => call($29, "checkRange", [20403, 21274, 77]), -1);
+
+// memory_copy.wast:5356
+assert_return(() => call($29, "checkRange", [21274, 21364, 100]), -1);
+
+// memory_copy.wast:5358
+assert_return(() => call($29, "checkRange", [21364, 21468, 74]), -1);
+
+// memory_copy.wast:5360
+assert_return(() => call($29, "checkRange", [21468, 21492, 93]), -1);
+
+// memory_copy.wast:5362
+assert_return(() => call($29, "checkRange", [21492, 22051, 74]), -1);
+
+// memory_copy.wast:5364
+assert_return(() => call($29, "checkRange", [22051, 22480, 68]), -1);
+
+// memory_copy.wast:5366
+assert_return(() => call($29, "checkRange", [22480, 22685, 100]), -1);
+
+// memory_copy.wast:5368
+assert_return(() => call($29, "checkRange", [22685, 22694, 68]), -1);
+
+// memory_copy.wast:5370
+assert_return(() => call($29, "checkRange", [22694, 22821, 10]), -1);
+
+// memory_copy.wast:5372
+assert_return(() => call($29, "checkRange", [22821, 22869, 100]), -1);
+
+// memory_copy.wast:5374
+assert_return(() => call($29, "checkRange", [22869, 24107, 97]), -1);
+
+// memory_copy.wast:5376
+assert_return(() => call($29, "checkRange", [24107, 24111, 37]), -1);
+
+// memory_copy.wast:5378
+assert_return(() => call($29, "checkRange", [24111, 24236, 77]), -1);
+
+// memory_copy.wast:5380
+assert_return(() => call($29, "checkRange", [24236, 24348, 72]), -1);
+
+// memory_copy.wast:5382
+assert_return(() => call($29, "checkRange", [24348, 24515, 92]), -1);
+
+// memory_copy.wast:5384
+assert_return(() => call($29, "checkRange", [24515, 24900, 83]), -1);
+
+// memory_copy.wast:5386
+assert_return(() => call($29, "checkRange", [24900, 25136, 95]), -1);
+
+// memory_copy.wast:5388
+assert_return(() => call($29, "checkRange", [25136, 25182, 85]), -1);
+
+// memory_copy.wast:5390
+assert_return(() => call($29, "checkRange", [25182, 25426, 68]), -1);
+
+// memory_copy.wast:5392
+assert_return(() => call($29, "checkRange", [25426, 25613, 89]), -1);
+
+// memory_copy.wast:5394
+assert_return(() => call($29, "checkRange", [25613, 25830, 96]), -1);
+
+// memory_copy.wast:5396
+assert_return(() => call($29, "checkRange", [25830, 26446, 100]), -1);
+
+// memory_copy.wast:5398
+assert_return(() => call($29, "checkRange", [26446, 26517, 10]), -1);
+
+// memory_copy.wast:5400
+assert_return(() => call($29, "checkRange", [26517, 27468, 92]), -1);
+
+// memory_copy.wast:5402
+assert_return(() => call($29, "checkRange", [27468, 27503, 95]), -1);
+
+// memory_copy.wast:5404
+assert_return(() => call($29, "checkRange", [27503, 27573, 77]), -1);
+
+// memory_copy.wast:5406
+assert_return(() => call($29, "checkRange", [27573, 28245, 92]), -1);
+
+// memory_copy.wast:5408
+assert_return(() => call($29, "checkRange", [28245, 28280, 95]), -1);
+
+// memory_copy.wast:5410
+assert_return(() => call($29, "checkRange", [28280, 29502, 77]), -1);
+
+// memory_copy.wast:5412
+assert_return(() => call($29, "checkRange", [29502, 29629, 42]), -1);
+
+// memory_copy.wast:5414
+assert_return(() => call($29, "checkRange", [29629, 30387, 83]), -1);
+
+// memory_copy.wast:5416
+assert_return(() => call($29, "checkRange", [30387, 30646, 77]), -1);
+
+// memory_copy.wast:5418
+assert_return(() => call($29, "checkRange", [30646, 31066, 92]), -1);
+
+// memory_copy.wast:5420
+assert_return(() => call($29, "checkRange", [31066, 31131, 77]), -1);
+
+// memory_copy.wast:5422
+assert_return(() => call($29, "checkRange", [31131, 31322, 42]), -1);
+
+// memory_copy.wast:5424
+assert_return(() => call($29, "checkRange", [31322, 31379, 76]), -1);
+
+// memory_copy.wast:5426
+assert_return(() => call($29, "checkRange", [31379, 31403, 57]), -1);
+
+// memory_copy.wast:5428
+assert_return(() => call($29, "checkRange", [31403, 31454, 59]), -1);
+
+// memory_copy.wast:5430
+assert_return(() => call($29, "checkRange", [31454, 31463, 77]), -1);
+
+// memory_copy.wast:5432
+assert_return(() => call($29, "checkRange", [31463, 31482, 41]), -1);
+
+// memory_copy.wast:5434
+assert_return(() => call($29, "checkRange", [31482, 31649, 83]), -1);
+
+// memory_copy.wast:5436
+assert_return(() => call($29, "checkRange", [31649, 31978, 72]), -1);
+
+// memory_copy.wast:5438
+assert_return(() => call($29, "checkRange", [31978, 32145, 92]), -1);
+
+// memory_copy.wast:5440
+assert_return(() => call($29, "checkRange", [32145, 32530, 83]), -1);
+
+// memory_copy.wast:5442
+assert_return(() => call($29, "checkRange", [32530, 32766, 95]), -1);
+
+// memory_copy.wast:5444
+assert_return(() => call($29, "checkRange", [32766, 32812, 85]), -1);
+
+// memory_copy.wast:5446
+assert_return(() => call($29, "checkRange", [32812, 33056, 68]), -1);
+
+// memory_copy.wast:5448
+assert_return(() => call($29, "checkRange", [33056, 33660, 89]), -1);
+
+// memory_copy.wast:5450
+assert_return(() => call($29, "checkRange", [33660, 33752, 59]), -1);
+
+// memory_copy.wast:5452
+assert_return(() => call($29, "checkRange", [33752, 33775, 36]), -1);
+
+// memory_copy.wast:5454
+assert_return(() => call($29, "checkRange", [33775, 33778, 32]), -1);
+
+// memory_copy.wast:5456
+assert_return(() => call($29, "checkRange", [33778, 34603, 9]), -1);
+
+// memory_copy.wast:5458
+assert_return(() => call($29, "checkRange", [34603, 35218, 0]), -1);
+
+// memory_copy.wast:5460
+assert_return(() => call($29, "checkRange", [35218, 35372, 10]), -1);
+
+// memory_copy.wast:5462
+assert_return(() => call($29, "checkRange", [35372, 35486, 77]), -1);
+
+// memory_copy.wast:5464
+assert_return(() => call($29, "checkRange", [35486, 35605, 5]), -1);
+
+// memory_copy.wast:5466
+assert_return(() => call($29, "checkRange", [35605, 35629, 77]), -1);
+
+// memory_copy.wast:5468
+assert_return(() => call($29, "checkRange", [35629, 35648, 41]), -1);
+
+// memory_copy.wast:5470
+assert_return(() => call($29, "checkRange", [35648, 36547, 83]), -1);
+
+// memory_copy.wast:5472
+assert_return(() => call($29, "checkRange", [36547, 36755, 74]), -1);
+
+// memory_copy.wast:5474
+assert_return(() => call($29, "checkRange", [36755, 36767, 93]), -1);
+
+// memory_copy.wast:5476
+assert_return(() => call($29, "checkRange", [36767, 36810, 83]), -1);
+
+// memory_copy.wast:5478
+assert_return(() => call($29, "checkRange", [36810, 36839, 100]), -1);
+
+// memory_copy.wast:5480
+assert_return(() => call($29, "checkRange", [36839, 37444, 96]), -1);
+
+// memory_copy.wast:5482
+assert_return(() => call($29, "checkRange", [37444, 38060, 100]), -1);
+
+// memory_copy.wast:5484
+assert_return(() => call($29, "checkRange", [38060, 38131, 10]), -1);
+
+// memory_copy.wast:5486
+assert_return(() => call($29, "checkRange", [38131, 39082, 92]), -1);
+
+// memory_copy.wast:5488
+assert_return(() => call($29, "checkRange", [39082, 39117, 95]), -1);
+
+// memory_copy.wast:5490
+assert_return(() => call($29, "checkRange", [39117, 39187, 77]), -1);
+
+// memory_copy.wast:5492
+assert_return(() => call($29, "checkRange", [39187, 39859, 92]), -1);
+
+// memory_copy.wast:5494
+assert_return(() => call($29, "checkRange", [39859, 39894, 95]), -1);
+
+// memory_copy.wast:5496
+assert_return(() => call($29, "checkRange", [39894, 40257, 77]), -1);
+
+// memory_copy.wast:5498
+assert_return(() => call($29, "checkRange", [40257, 40344, 89]), -1);
+
+// memory_copy.wast:5500
+assert_return(() => call($29, "checkRange", [40344, 40371, 59]), -1);
+
+// memory_copy.wast:5502
+assert_return(() => call($29, "checkRange", [40371, 40804, 77]), -1);
+
+// memory_copy.wast:5504
+assert_return(() => call($29, "checkRange", [40804, 40909, 5]), -1);
+
+// memory_copy.wast:5506
+assert_return(() => call($29, "checkRange", [40909, 42259, 92]), -1);
+
+// memory_copy.wast:5508
+assert_return(() => call($29, "checkRange", [42259, 42511, 77]), -1);
+
+// memory_copy.wast:5510
+assert_return(() => call($29, "checkRange", [42511, 42945, 83]), -1);
+
+// memory_copy.wast:5512
+assert_return(() => call($29, "checkRange", [42945, 43115, 77]), -1);
+
+// memory_copy.wast:5514
+assert_return(() => call($29, "checkRange", [43115, 43306, 42]), -1);
+
+// memory_copy.wast:5516
+assert_return(() => call($29, "checkRange", [43306, 43363, 76]), -1);
+
+// memory_copy.wast:5518
+assert_return(() => call($29, "checkRange", [43363, 43387, 57]), -1);
+
+// memory_copy.wast:5520
+assert_return(() => call($29, "checkRange", [43387, 43438, 59]), -1);
+
+// memory_copy.wast:5522
+assert_return(() => call($29, "checkRange", [43438, 43447, 77]), -1);
+
+// memory_copy.wast:5524
+assert_return(() => call($29, "checkRange", [43447, 43466, 41]), -1);
+
+// memory_copy.wast:5526
+assert_return(() => call($29, "checkRange", [43466, 44129, 83]), -1);
+
+// memory_copy.wast:5528
+assert_return(() => call($29, "checkRange", [44129, 44958, 96]), -1);
+
+// memory_copy.wast:5530
+assert_return(() => call($29, "checkRange", [44958, 45570, 77]), -1);
+
+// memory_copy.wast:5532
+assert_return(() => call($29, "checkRange", [45570, 45575, 92]), -1);
+
+// memory_copy.wast:5534
+assert_return(() => call($29, "checkRange", [45575, 45640, 77]), -1);
+
+// memory_copy.wast:5536
+assert_return(() => call($29, "checkRange", [45640, 45742, 42]), -1);
+
+// memory_copy.wast:5538
+assert_return(() => call($29, "checkRange", [45742, 45832, 72]), -1);
+
+// memory_copy.wast:5540
+assert_return(() => call($29, "checkRange", [45832, 45999, 92]), -1);
+
+// memory_copy.wast:5542
+assert_return(() => call($29, "checkRange", [45999, 46384, 83]), -1);
+
+// memory_copy.wast:5544
+assert_return(() => call($29, "checkRange", [46384, 46596, 95]), -1);
+
+// memory_copy.wast:5546
+assert_return(() => call($29, "checkRange", [46596, 46654, 92]), -1);
+
+// memory_copy.wast:5548
+assert_return(() => call($29, "checkRange", [46654, 47515, 83]), -1);
+
+// memory_copy.wast:5550
+assert_return(() => call($29, "checkRange", [47515, 47620, 77]), -1);
+
+// memory_copy.wast:5552
+assert_return(() => call($29, "checkRange", [47620, 47817, 79]), -1);
+
+// memory_copy.wast:5554
+assert_return(() => call($29, "checkRange", [47817, 47951, 95]), -1);
+
+// memory_copy.wast:5556
+assert_return(() => call($29, "checkRange", [47951, 48632, 100]), -1);
+
+// memory_copy.wast:5558
+assert_return(() => call($29, "checkRange", [48632, 48699, 97]), -1);
+
+// memory_copy.wast:5560
+assert_return(() => call($29, "checkRange", [48699, 48703, 37]), -1);
+
+// memory_copy.wast:5562
+assert_return(() => call($29, "checkRange", [48703, 49764, 77]), -1);
+
+// memory_copy.wast:5564
+assert_return(() => call($29, "checkRange", [49764, 49955, 42]), -1);
+
+// memory_copy.wast:5566
+assert_return(() => call($29, "checkRange", [49955, 50012, 76]), -1);
+
+// memory_copy.wast:5568
+assert_return(() => call($29, "checkRange", [50012, 50036, 57]), -1);
+
+// memory_copy.wast:5570
+assert_return(() => call($29, "checkRange", [50036, 50087, 59]), -1);
+
+// memory_copy.wast:5572
+assert_return(() => call($29, "checkRange", [50087, 50096, 77]), -1);
+
+// memory_copy.wast:5574
+assert_return(() => call($29, "checkRange", [50096, 50115, 41]), -1);
+
+// memory_copy.wast:5576
+assert_return(() => call($29, "checkRange", [50115, 50370, 83]), -1);
+
+// memory_copy.wast:5578
+assert_return(() => call($29, "checkRange", [50370, 51358, 92]), -1);
+
+// memory_copy.wast:5580
+assert_return(() => call($29, "checkRange", [51358, 51610, 77]), -1);
+
+// memory_copy.wast:5582
+assert_return(() => call($29, "checkRange", [51610, 51776, 83]), -1);
+
+// memory_copy.wast:5584
+assert_return(() => call($29, "checkRange", [51776, 51833, 89]), -1);
+
+// memory_copy.wast:5586
+assert_return(() => call($29, "checkRange", [51833, 52895, 100]), -1);
+
+// memory_copy.wast:5588
+assert_return(() => call($29, "checkRange", [52895, 53029, 97]), -1);
+
+// memory_copy.wast:5590
+assert_return(() => call($29, "checkRange", [53029, 53244, 68]), -1);
+
+// memory_copy.wast:5592
+assert_return(() => call($29, "checkRange", [53244, 54066, 100]), -1);
+
+// memory_copy.wast:5594
+assert_return(() => call($29, "checkRange", [54066, 54133, 97]), -1);
+
+// memory_copy.wast:5596
+assert_return(() => call($29, "checkRange", [54133, 54137, 37]), -1);
+
+// memory_copy.wast:5598
+assert_return(() => call($29, "checkRange", [54137, 55198, 77]), -1);
+
+// memory_copy.wast:5600
+assert_return(() => call($29, "checkRange", [55198, 55389, 42]), -1);
+
+// memory_copy.wast:5602
+assert_return(() => call($29, "checkRange", [55389, 55446, 76]), -1);
+
+// memory_copy.wast:5604
+assert_return(() => call($29, "checkRange", [55446, 55470, 57]), -1);
+
+// memory_copy.wast:5606
+assert_return(() => call($29, "checkRange", [55470, 55521, 59]), -1);
+
+// memory_copy.wast:5608
+assert_return(() => call($29, "checkRange", [55521, 55530, 77]), -1);
+
+// memory_copy.wast:5610
+assert_return(() => call($29, "checkRange", [55530, 55549, 41]), -1);
+
+// memory_copy.wast:5612
+assert_return(() => call($29, "checkRange", [55549, 56212, 83]), -1);
+
+// memory_copy.wast:5614
+assert_return(() => call($29, "checkRange", [56212, 57048, 96]), -1);
+
+// memory_copy.wast:5616
+assert_return(() => call($29, "checkRange", [57048, 58183, 77]), -1);
+
+// memory_copy.wast:5618
+assert_return(() => call($29, "checkRange", [58183, 58202, 41]), -1);
+
+// memory_copy.wast:5620
+assert_return(() => call($29, "checkRange", [58202, 58516, 83]), -1);
+
+// memory_copy.wast:5622
+assert_return(() => call($29, "checkRange", [58516, 58835, 95]), -1);
+
+// memory_copy.wast:5624
+assert_return(() => call($29, "checkRange", [58835, 58855, 77]), -1);
+
+// memory_copy.wast:5626
+assert_return(() => call($29, "checkRange", [58855, 59089, 95]), -1);
+
+// memory_copy.wast:5628
+assert_return(() => call($29, "checkRange", [59089, 59145, 77]), -1);
+
+// memory_copy.wast:5630
+assert_return(() => call($29, "checkRange", [59145, 59677, 99]), -1);
+
+// memory_copy.wast:5632
+assert_return(() => call($29, "checkRange", [59677, 60134, 0]), -1);
+
+// memory_copy.wast:5634
+assert_return(() => call($29, "checkRange", [60134, 60502, 89]), -1);
+
+// memory_copy.wast:5636
+assert_return(() => call($29, "checkRange", [60502, 60594, 59]), -1);
+
+// memory_copy.wast:5638
+assert_return(() => call($29, "checkRange", [60594, 60617, 36]), -1);
+
+// memory_copy.wast:5640
+assert_return(() => call($29, "checkRange", [60617, 60618, 32]), -1);
+
+// memory_copy.wast:5642
+assert_return(() => call($29, "checkRange", [60618, 60777, 42]), -1);
+
+// memory_copy.wast:5644
+assert_return(() => call($29, "checkRange", [60777, 60834, 76]), -1);
+
+// memory_copy.wast:5646
+assert_return(() => call($29, "checkRange", [60834, 60858, 57]), -1);
+
+// memory_copy.wast:5648
+assert_return(() => call($29, "checkRange", [60858, 60909, 59]), -1);
+
+// memory_copy.wast:5650
+assert_return(() => call($29, "checkRange", [60909, 60918, 77]), -1);
+
+// memory_copy.wast:5652
+assert_return(() => call($29, "checkRange", [60918, 60937, 41]), -1);
+
+// memory_copy.wast:5654
+assert_return(() => call($29, "checkRange", [60937, 61600, 83]), -1);
+
+// memory_copy.wast:5656
+assert_return(() => call($29, "checkRange", [61600, 62436, 96]), -1);
+
+// memory_copy.wast:5658
+assert_return(() => call($29, "checkRange", [62436, 63307, 77]), -1);
+
+// memory_copy.wast:5660
+assert_return(() => call($29, "checkRange", [63307, 63397, 100]), -1);
+
+// memory_copy.wast:5662
+assert_return(() => call($29, "checkRange", [63397, 63501, 74]), -1);
+
+// memory_copy.wast:5664
+assert_return(() => call($29, "checkRange", [63501, 63525, 93]), -1);
+
+// memory_copy.wast:5666
+assert_return(() => call($29, "checkRange", [63525, 63605, 74]), -1);
+
+// memory_copy.wast:5668
+assert_return(() => call($29, "checkRange", [63605, 63704, 100]), -1);
+
+// memory_copy.wast:5670
+assert_return(() => call($29, "checkRange", [63704, 63771, 97]), -1);
+
+// memory_copy.wast:5672
+assert_return(() => call($29, "checkRange", [63771, 63775, 37]), -1);
+
+// memory_copy.wast:5674
+assert_return(() => call($29, "checkRange", [63775, 64311, 77]), -1);
+
+// memory_copy.wast:5676
+assert_return(() => call($29, "checkRange", [64311, 64331, 26]), -1);
+
+// memory_copy.wast:5678
+assert_return(() => call($29, "checkRange", [64331, 64518, 92]), -1);
+
+// memory_copy.wast:5680
+assert_return(() => call($29, "checkRange", [64518, 64827, 11]), -1);
+
+// memory_copy.wast:5682
+assert_return(() => call($29, "checkRange", [64827, 64834, 26]), -1);
+
+// memory_copy.wast:5684
+assert_return(() => call($29, "checkRange", [64834, 65536, 0]), -1);
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast
new file mode 100644
index 0000000000..8cc21af317
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast
@@ -0,0 +1,673 @@
+;;
+;; Generated by ../meta/generate_memory_fill.js
+;;
+
+(module
+ (memory 1 1)
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "test")
+ (memory.fill (i32.const 0xFF00) (i32.const 0x55) (i32.const 256))))
+(invoke "test")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65280) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65280) (i32.const 65536) (i32.const 85))
+ (i32.const -1))
+(module
+ (memory 1 1)
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "test")
+ (memory.fill (i32.const 0xFF00) (i32.const 0x55) (i32.const 257))))
+(assert_trap (invoke "test") "out of bounds memory access")
+
+(module
+ (memory 1 1)
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "test")
+ (memory.fill (i32.const 0xFFFFFF00) (i32.const 0x55) (i32.const 257))))
+(assert_trap (invoke "test") "out of bounds memory access")
+
+(module
+ (memory 1 1)
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "test")
+ (memory.fill (i32.const 0x12) (i32.const 0x55) (i32.const 0))))
+(invoke "test")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+(module
+ (memory 1 1)
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "test")
+ (memory.fill (i32.const 0x10000) (i32.const 0x55) (i32.const 0))))
+(invoke "test")
+
+(module
+ (memory 1 1)
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "test")
+ (memory.fill (i32.const 0x1) (i32.const 0xAA) (i32.const 0xFFFE))))
+(invoke "test")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 1) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 1) (i32.const 65535) (i32.const 170))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65535) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+
+(module
+ (memory 1 1)
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "test")
+ (memory.fill (i32.const 0x12) (i32.const 0x55) (i32.const 10))
+ (memory.fill (i32.const 0x15) (i32.const 0xAA) (i32.const 4))))
+(invoke "test")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 18) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 18) (i32.const 21) (i32.const 85))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 21) (i32.const 25) (i32.const 170))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 25) (i32.const 28) (i32.const 85))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 28) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+(assert_invalid
+ (module
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (i32.const 20) (i32.const 30))))
+ "unknown memory 0")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (i32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (i32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (i32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (f32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (f32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (f32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (f32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (i64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (i64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (i64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (i64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (f64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (f64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (f64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i32.const 10) (f64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (i32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (i32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (i32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (i32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (f32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (f32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (f32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (f32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (i64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (i64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (i64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (i64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (f64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (f64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (f64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f32.const 10) (f64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (i32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (i32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (i32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (i32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (f32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (f32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (f32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (f32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (i64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (i64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (i64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (i64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (f64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (f64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (f64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (i64.const 10) (f64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (i32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (i32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (i32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (i32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (f32.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (f32.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (f32.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (f32.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (i64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (i64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (i64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (i64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (f64.const 20) (i32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (f64.const 20) (f32.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (f64.const 20) (i64.const 30))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1 1)
+ (func (export "testfn")
+ (memory.fill (f64.const 10) (f64.const 20) (f64.const 30))))
+ "type mismatch")
+
+(module
+ (memory 1 1 )
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "run") (param $offs i32) (param $val i32) (param $len i32)
+ (memory.fill (local.get $offs) (local.get $val) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 65280) (i32.const 37) (i32.const 512))
+ "out of bounds")
+
+(assert_return (invoke "checkRange" (i32.const 65280) (i32.const 65536) (i32.const 37))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65280) (i32.const 0))
+ (i32.const -1))
+(module
+ (memory 1 1 )
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "run") (param $offs i32) (param $val i32) (param $len i32)
+ (memory.fill (local.get $offs) (local.get $val) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 65279) (i32.const 37) (i32.const 514))
+ "out of bounds")
+
+(assert_return (invoke "checkRange" (i32.const 65279) (i32.const 65536) (i32.const 37))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65279) (i32.const 0))
+ (i32.const -1))
+(module
+ (memory 1 1 )
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "run") (param $offs i32) (param $val i32) (param $len i32)
+ (memory.fill (local.get $offs) (local.get $val) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 65279) (i32.const 37) (i32.const 4294967295))
+ "out of bounds")
+
+(assert_return (invoke "checkRange" (i32.const 65279) (i32.const 65536) (i32.const 37))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65279) (i32.const 0))
+ (i32.const -1))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast.js
new file mode 100644
index 0000000000..5277588dcd
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_fill.wast.js
@@ -0,0 +1,440 @@
+
+'use strict';
+
+let spectest = {
+ print: console.log.bind(console),
+ print_i32: console.log.bind(console),
+ print_i32_f32: console.log.bind(console),
+ print_f64_f64: console.log.bind(console),
+ print_f32: console.log.bind(console),
+ print_f64: console.log.bind(console),
+ global_i32: 666,
+ global_f32: 666,
+ global_f64: 666,
+ table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
+ memory: new WebAssembly.Memory({initial: 1, maximum: 2})
+};
+let handler = {
+ get(target, prop) {
+ return (prop in target) ? target[prop] : {};
+ }
+};
+let registry = new Proxy({spectest}, handler);
+
+function register(name, instance) {
+ registry[name] = instance.exports;
+}
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function get(instance, name) {
+ let v = instance.exports[name];
+ return (v instanceof WebAssembly.Global) ? v.value : v;
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function run(action) {
+ action();
+}
+
+function assert_malformed(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm decoding failure expected");
+}
+
+function assert_invalid(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm validation failure expected");
+}
+
+function assert_unlinkable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.LinkError) return;
+ }
+ throw new Error("Wasm linking failure expected");
+}
+
+function assert_uninstantiable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+function assert_trap(action) {
+ try { action() } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+let StackOverflow;
+try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
+
+function assert_exhaustion(action) {
+ try { action() } catch (e) {
+ if (e instanceof StackOverflow) return;
+ }
+ throw new Error("Wasm resource exhaustion expected");
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+function assert_return_canonical_nan(action) {
+ let actual = action();
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test that it's a canonical NaN.
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+function assert_return_arithmetic_nan(action) {
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test for specific bitpatterns here.
+ let actual = action();
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+// memory_fill.wast:5
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc1\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8f\x80\x80\x80\x00\x00\x41\x80\xfe\x03\x41\xd5\x00\x41\x80\x02\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:21
+run(() => call($1, "test", []));
+
+// memory_fill.wast:23
+assert_return(() => call($1, "checkRange", [0, 65280, 0]), -1);
+
+// memory_fill.wast:25
+assert_return(() => call($1, "checkRange", [65280, 65536, 85]), -1);
+
+// memory_fill.wast:27
+let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc1\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8f\x80\x80\x80\x00\x00\x41\x80\xfe\x03\x41\xd5\x00\x41\x81\x02\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:43
+assert_trap(() => call($2, "test", []));
+
+// memory_fill.wast:45
+let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc0\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8e\x80\x80\x80\x00\x00\x41\x80\x7e\x41\xd5\x00\x41\x81\x02\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:61
+assert_trap(() => call($3, "test", []));
+
+// memory_fill.wast:63
+let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x41\x12\x41\xd5\x00\x41\x00\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:79
+run(() => call($4, "test", []));
+
+// memory_fill.wast:81
+assert_return(() => call($4, "checkRange", [0, 65536, 0]), -1);
+
+// memory_fill.wast:83
+let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc0\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8e\x80\x80\x80\x00\x00\x41\x80\x80\x04\x41\xd5\x00\x41\x00\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:99
+run(() => call($5, "test", []));
+
+// memory_fill.wast:101
+let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc0\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8e\x80\x80\x80\x00\x00\x41\x01\x41\xaa\x01\x41\xfe\xff\x03\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:117
+run(() => call($6, "test", []));
+
+// memory_fill.wast:119
+assert_return(() => call($6, "checkRange", [0, 1, 0]), -1);
+
+// memory_fill.wast:121
+assert_return(() => call($6, "checkRange", [1, 65535, 170]), -1);
+
+// memory_fill.wast:123
+assert_return(() => call($6, "checkRange", [65535, 65536, 0]), -1);
+
+// memory_fill.wast:126
+let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8b\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x95\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x04\x74\x65\x73\x74\x00\x01\x0a\xc8\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x96\x80\x80\x80\x00\x00\x41\x12\x41\xd5\x00\x41\x0a\xfc\x0b\x00\x41\x15\x41\xaa\x01\x41\x04\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:143
+run(() => call($7, "test", []));
+
+// memory_fill.wast:145
+assert_return(() => call($7, "checkRange", [0, 18, 0]), -1);
+
+// memory_fill.wast:147
+assert_return(() => call($7, "checkRange", [18, 21, 85]), -1);
+
+// memory_fill.wast:149
+assert_return(() => call($7, "checkRange", [21, 25, 170]), -1);
+
+// memory_fill.wast:151
+assert_return(() => call($7, "checkRange", [25, 28, 85]), -1);
+
+// memory_fill.wast:153
+assert_return(() => call($7, "checkRange", [28, 65536, 0]), -1);
+
+// memory_fill.wast:155
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:161
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:168
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:175
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:182
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:189
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:196
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:203
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x41\x0a\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:210
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:217
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:224
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:231
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:238
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:245
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:252
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:259
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x41\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:266
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:273
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:280
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:287
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:294
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:301
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9a\x80\x80\x80\x00\x01\x94\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:308
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:315
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9e\x80\x80\x80\x00\x01\x98\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:322
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:329
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:336
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:343
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:350
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:357
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9e\x80\x80\x80\x00\x01\x98\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:364
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:371
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa2\x80\x80\x80\x00\x01\x9c\x80\x80\x80\x00\x00\x43\x00\x00\x20\x41\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:378
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:385
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:392
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:399
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:406
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:413
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x97\x80\x80\x80\x00\x01\x91\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:420
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:427
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x42\x0a\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:434
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:441
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:448
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x91\x80\x80\x80\x00\x01\x8b\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:455
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:462
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:469
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:476
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:483
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x42\x0a\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:490
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:497
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:504
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:511
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x41\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:518
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:525
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9e\x80\x80\x80\x00\x01\x98\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:532
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:539
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa2\x80\x80\x80\x00\x01\x9c\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x43\x00\x00\xa0\x41\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:546
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:553
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:560
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:567
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x42\x14\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:574
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x41\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:581
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa2\x80\x80\x80\x00\x01\x9c\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x43\x00\x00\xf0\x41\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:588
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x42\x1e\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:595
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x8a\x80\x80\x80\x00\x01\x06\x74\x65\x73\x74\x66\x6e\x00\x00\x0a\xa6\x80\x80\x80\x00\x01\xa0\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\x24\x40\x44\x00\x00\x00\x00\x00\x00\x34\x40\x44\x00\x00\x00\x00\x00\x00\x3e\x40\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:602
+let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8e\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0a\xbd\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8b\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:619
+assert_trap(() => call($8, "run", [65280, 37, 512]));
+
+// memory_fill.wast:622
+assert_return(() => call($8, "checkRange", [65280, 65536, 37]), -1);
+
+// memory_fill.wast:624
+assert_return(() => call($8, "checkRange", [0, 65280, 0]), -1);
+
+// memory_fill.wast:626
+let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8e\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0a\xbd\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8b\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:643
+assert_trap(() => call($9, "run", [65279, 37, 514]));
+
+// memory_fill.wast:646
+assert_return(() => call($9, "checkRange", [65279, 65536, 37]), -1);
+
+// memory_fill.wast:648
+assert_return(() => call($9, "checkRange", [0, 65279, 0]), -1);
+
+// memory_fill.wast:650
+let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8e\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0a\xbd\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8b\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0b\x00\x0b");
+
+// memory_fill.wast:667
+assert_trap(() => call($10, "run", [65279, 37, -1]));
+
+// memory_fill.wast:670
+assert_return(() => call($10, "checkRange", [65279, 65536, 37]), -1);
+
+// memory_fill.wast:672
+assert_return(() => call($10, "checkRange", [0, 65279, 0]), -1);
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast
new file mode 100644
index 0000000000..59c9fe8606
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast
@@ -0,0 +1,947 @@
+;;
+;; Generated by ../meta/generate_memory_init.js
+;;
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data passive "\02\07\01\08")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (data passive "\05\09\02\07\06")
+ (func (export "test")
+ (nop))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data passive "\02\07\01\08")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (data passive "\05\09\02\07\06")
+ (func (export "test")
+ (memory.init 1 (i32.const 7) (i32.const 0) (i32.const 4)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 6))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data passive "\02\07\01\08")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (data passive "\05\09\02\07\06")
+ (func (export "test")
+ (memory.init 3 (i32.const 15) (i32.const 1) (i32.const 3)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+
+(module
+ (memory (export "memory0") 1 1)
+ (data (i32.const 2) "\03\01\04\01")
+ (data passive "\02\07\01\08")
+ (data (i32.const 12) "\07\05\02\03\06")
+ (data passive "\05\09\02\07\06")
+ (func (export "test")
+ (memory.init 1 (i32.const 7) (i32.const 0) (i32.const 4))
+ (data.drop 1)
+ (memory.init 3 (i32.const 15) (i32.const 1) (i32.const 3))
+ (data.drop 3)
+ (memory.copy (i32.const 20) (i32.const 15) (i32.const 5))
+ (memory.copy (i32.const 21) (i32.const 29) (i32.const 1))
+ (memory.copy (i32.const 24) (i32.const 10) (i32.const 1))
+ (memory.copy (i32.const 13) (i32.const 11) (i32.const 4))
+ (memory.copy (i32.const 19) (i32.const 20) (i32.const 5)))
+ (func (export "load8_u") (param i32) (result i32)
+ (i32.load8_u (local.get 0))))
+
+(invoke "test")
+
+(assert_return (invoke "load8_u" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 1)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "load8_u" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "load8_u" (i32.const 5)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 6)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 7)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 8)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 9)) (i32.const 1))
+(assert_return (invoke "load8_u" (i32.const 10)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 13)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 14)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 15)) (i32.const 5))
+(assert_return (invoke "load8_u" (i32.const 16)) (i32.const 2))
+(assert_return (invoke "load8_u" (i32.const 17)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 18)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 19)) (i32.const 9))
+(assert_return (invoke "load8_u" (i32.const 20)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 21)) (i32.const 7))
+(assert_return (invoke "load8_u" (i32.const 22)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 23)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 24)) (i32.const 8))
+(assert_return (invoke "load8_u" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 26)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 27)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 28)) (i32.const 0))
+(assert_return (invoke "load8_u" (i32.const 29)) (i32.const 0))
+(assert_invalid
+ (module
+ (func (export "test")
+ (data.drop 0)))
+ "unknown memory 0")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (data.drop 4)))
+ "unknown data segment")
+
+(module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (data.drop 0)
+ (data.drop 0)))
+(assert_trap (invoke "test") "data segment dropped")
+
+(module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (data.drop 0)
+ (memory.init 0 (i32.const 1234) (i32.const 1) (i32.const 1))))
+(assert_trap (invoke "test") "data segment dropped")
+
+(module
+ (memory 1)
+ (data (i32.const 0) "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1234) (i32.const 1) (i32.const 1))))
+(assert_trap (invoke "test") "data segment dropped")
+
+(assert_invalid
+ (module
+ (func (export "test")
+ (memory.init 1 (i32.const 1234) (i32.const 1) (i32.const 1))))
+ "unknown memory 0")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 1 (i32.const 1234) (i32.const 1) (i32.const 1))))
+ "unknown data segment 1")
+
+(module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (i32.const 0) (i32.const 1))
+ (memory.init 0 (i32.const 1) (i32.const 0) (i32.const 1))))
+(invoke "test")
+
+(module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1234) (i32.const 0) (i32.const 5))))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1234) (i32.const 2) (i32.const 3))))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 0xFFFE) (i32.const 1) (i32.const 3))))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1234) (i32.const 4) (i32.const 0))))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 0x10000) (i32.const 2) (i32.const 0))))
+(assert_trap (invoke "test") "out of bounds")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (i32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (i32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (i32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (f32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (f32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (f32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (f32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (i64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (i64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (i64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (i64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (f64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (f64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (f64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i32.const 1) (f64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (i32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (i32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (i32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (i32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (f32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (f32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (f32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (f32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (i64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (i64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (i64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (i64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (f64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (f64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (f64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f32.const 1) (f64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (i32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (i32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (i32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (i32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (f32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (f32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (f32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (f32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (i64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (i64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (i64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (i64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (f64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (f64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (f64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (i64.const 1) (f64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (i32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (i32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (i32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (i32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (f32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (f32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (f32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (f32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (i64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (i64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (i64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (i64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (f64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (f64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (f64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (memory 1)
+ (data passive "\37")
+ (func (export "test")
+ (memory.init 0 (f64.const 1) (f64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(module
+ (memory 1 1 )
+ (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "run") (param $offs i32) (param $len i32)
+ (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 65528) (i32.const 16))
+ "out of bounds")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65528) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65528) (i32.const 65536) (i32.const 66))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65536) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+(module
+ (memory 1 1 )
+ (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "run") (param $offs i32) (param $len i32)
+ (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 65527) (i32.const 16))
+ "out of bounds")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65527) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65527) (i32.const 65536) (i32.const 66))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65536) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+(module
+ (memory 1 1 )
+ (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "run") (param $offs i32) (param $len i32)
+ (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 65472) (i32.const 30))
+ "out of bounds")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65472) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65472) (i32.const 65488) (i32.const 66))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65488) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+(module
+ (memory 1 1 )
+ (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "run") (param $offs i32) (param $len i32)
+ (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 65473) (i32.const 31))
+ "out of bounds")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65473) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65473) (i32.const 65489) (i32.const 66))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65489) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+(module
+ (memory 1 )
+ (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "run") (param $offs i32) (param $len i32)
+ (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 65528) (i32.const 4294967040))
+ "out of bounds")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 65528) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65528) (i32.const 65536) (i32.const 66))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 65536) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
+(module
+ (memory 1 )
+ (data passive "\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42\42")
+
+ (func (export "checkRange") (param $from i32) (param $to i32) (param $expected i32) (result i32)
+ (loop $cont
+ (if (i32.eq (local.get $from) (local.get $to))
+ (then
+ (return (i32.const -1))))
+ (if (i32.eq (i32.load8_u (local.get $from)) (local.get $expected))
+ (then
+ (local.set $from (i32.add (local.get $from) (i32.const 1)))
+ (br $cont))))
+ (return (local.get $from)))
+
+ (func (export "run") (param $offs i32) (param $len i32)
+ (memory.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 0) (i32.const 4294967292))
+ "out of bounds")
+
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 0) (i32.const 0))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 0) (i32.const 16) (i32.const 66))
+ (i32.const -1))
+(assert_return (invoke "checkRange" (i32.const 16) (i32.const 65536) (i32.const 0))
+ (i32.const -1))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast.js
new file mode 100644
index 0000000000..04b43c73ff
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/memory_init.wast.js
@@ -0,0 +1,866 @@
+
+'use strict';
+
+let spectest = {
+ print: console.log.bind(console),
+ print_i32: console.log.bind(console),
+ print_i32_f32: console.log.bind(console),
+ print_f64_f64: console.log.bind(console),
+ print_f32: console.log.bind(console),
+ print_f64: console.log.bind(console),
+ global_i32: 666,
+ global_f32: 666,
+ global_f64: 666,
+ table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
+ memory: new WebAssembly.Memory({initial: 1, maximum: 2})
+};
+let handler = {
+ get(target, prop) {
+ return (prop in target) ? target[prop] : {};
+ }
+};
+let registry = new Proxy({spectest}, handler);
+
+function register(name, instance) {
+ registry[name] = instance.exports;
+}
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function get(instance, name) {
+ let v = instance.exports[name];
+ return (v instanceof WebAssembly.Global) ? v.value : v;
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function run(action) {
+ action();
+}
+
+function assert_malformed(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm decoding failure expected");
+}
+
+function assert_invalid(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm validation failure expected");
+}
+
+function assert_unlinkable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.LinkError) return;
+ }
+ throw new Error("Wasm linking failure expected");
+}
+
+function assert_uninstantiable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+function assert_trap(action) {
+ try { action() } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+let StackOverflow;
+try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
+
+function assert_exhaustion(action) {
+ try { action() } catch (e) {
+ if (e instanceof StackOverflow) return;
+ }
+ throw new Error("Wasm resource exhaustion expected");
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+function assert_return_canonical_nan(action) {
+ let actual = action();
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test that it's a canonical NaN.
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+function assert_return_arithmetic_nan(action) {
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test for specific bitpatterns here.
+ let actual = action();
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+// memory_init.wast:5
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0a\x95\x80\x80\x80\x00\x02\x83\x80\x80\x80\x00\x00\x01\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\xa1\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x04\x02\x07\x01\x08\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x05\x05\x09\x02\x07\x06");
+
+// memory_init.wast:16
+run(() => call($1, "test", []));
+
+// memory_init.wast:18
+assert_return(() => call($1, "load8_u", [0]), 0);
+
+// memory_init.wast:19
+assert_return(() => call($1, "load8_u", [1]), 0);
+
+// memory_init.wast:20
+assert_return(() => call($1, "load8_u", [2]), 3);
+
+// memory_init.wast:21
+assert_return(() => call($1, "load8_u", [3]), 1);
+
+// memory_init.wast:22
+assert_return(() => call($1, "load8_u", [4]), 4);
+
+// memory_init.wast:23
+assert_return(() => call($1, "load8_u", [5]), 1);
+
+// memory_init.wast:24
+assert_return(() => call($1, "load8_u", [6]), 0);
+
+// memory_init.wast:25
+assert_return(() => call($1, "load8_u", [7]), 0);
+
+// memory_init.wast:26
+assert_return(() => call($1, "load8_u", [8]), 0);
+
+// memory_init.wast:27
+assert_return(() => call($1, "load8_u", [9]), 0);
+
+// memory_init.wast:28
+assert_return(() => call($1, "load8_u", [10]), 0);
+
+// memory_init.wast:29
+assert_return(() => call($1, "load8_u", [11]), 0);
+
+// memory_init.wast:30
+assert_return(() => call($1, "load8_u", [12]), 7);
+
+// memory_init.wast:31
+assert_return(() => call($1, "load8_u", [13]), 5);
+
+// memory_init.wast:32
+assert_return(() => call($1, "load8_u", [14]), 2);
+
+// memory_init.wast:33
+assert_return(() => call($1, "load8_u", [15]), 3);
+
+// memory_init.wast:34
+assert_return(() => call($1, "load8_u", [16]), 6);
+
+// memory_init.wast:35
+assert_return(() => call($1, "load8_u", [17]), 0);
+
+// memory_init.wast:36
+assert_return(() => call($1, "load8_u", [18]), 0);
+
+// memory_init.wast:37
+assert_return(() => call($1, "load8_u", [19]), 0);
+
+// memory_init.wast:38
+assert_return(() => call($1, "load8_u", [20]), 0);
+
+// memory_init.wast:39
+assert_return(() => call($1, "load8_u", [21]), 0);
+
+// memory_init.wast:40
+assert_return(() => call($1, "load8_u", [22]), 0);
+
+// memory_init.wast:41
+assert_return(() => call($1, "load8_u", [23]), 0);
+
+// memory_init.wast:42
+assert_return(() => call($1, "load8_u", [24]), 0);
+
+// memory_init.wast:43
+assert_return(() => call($1, "load8_u", [25]), 0);
+
+// memory_init.wast:44
+assert_return(() => call($1, "load8_u", [26]), 0);
+
+// memory_init.wast:45
+assert_return(() => call($1, "load8_u", [27]), 0);
+
+// memory_init.wast:46
+assert_return(() => call($1, "load8_u", [28]), 0);
+
+// memory_init.wast:47
+assert_return(() => call($1, "load8_u", [29]), 0);
+
+// memory_init.wast:49
+let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0c\x81\x80\x80\x80\x00\x04\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x07\x41\x00\x41\x04\xfc\x08\x01\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\xa1\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x04\x02\x07\x01\x08\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x05\x05\x09\x02\x07\x06");
+
+// memory_init.wast:60
+run(() => call($2, "test", []));
+
+// memory_init.wast:62
+assert_return(() => call($2, "load8_u", [0]), 0);
+
+// memory_init.wast:63
+assert_return(() => call($2, "load8_u", [1]), 0);
+
+// memory_init.wast:64
+assert_return(() => call($2, "load8_u", [2]), 3);
+
+// memory_init.wast:65
+assert_return(() => call($2, "load8_u", [3]), 1);
+
+// memory_init.wast:66
+assert_return(() => call($2, "load8_u", [4]), 4);
+
+// memory_init.wast:67
+assert_return(() => call($2, "load8_u", [5]), 1);
+
+// memory_init.wast:68
+assert_return(() => call($2, "load8_u", [6]), 0);
+
+// memory_init.wast:69
+assert_return(() => call($2, "load8_u", [7]), 2);
+
+// memory_init.wast:70
+assert_return(() => call($2, "load8_u", [8]), 7);
+
+// memory_init.wast:71
+assert_return(() => call($2, "load8_u", [9]), 1);
+
+// memory_init.wast:72
+assert_return(() => call($2, "load8_u", [10]), 8);
+
+// memory_init.wast:73
+assert_return(() => call($2, "load8_u", [11]), 0);
+
+// memory_init.wast:74
+assert_return(() => call($2, "load8_u", [12]), 7);
+
+// memory_init.wast:75
+assert_return(() => call($2, "load8_u", [13]), 5);
+
+// memory_init.wast:76
+assert_return(() => call($2, "load8_u", [14]), 2);
+
+// memory_init.wast:77
+assert_return(() => call($2, "load8_u", [15]), 3);
+
+// memory_init.wast:78
+assert_return(() => call($2, "load8_u", [16]), 6);
+
+// memory_init.wast:79
+assert_return(() => call($2, "load8_u", [17]), 0);
+
+// memory_init.wast:80
+assert_return(() => call($2, "load8_u", [18]), 0);
+
+// memory_init.wast:81
+assert_return(() => call($2, "load8_u", [19]), 0);
+
+// memory_init.wast:82
+assert_return(() => call($2, "load8_u", [20]), 0);
+
+// memory_init.wast:83
+assert_return(() => call($2, "load8_u", [21]), 0);
+
+// memory_init.wast:84
+assert_return(() => call($2, "load8_u", [22]), 0);
+
+// memory_init.wast:85
+assert_return(() => call($2, "load8_u", [23]), 0);
+
+// memory_init.wast:86
+assert_return(() => call($2, "load8_u", [24]), 0);
+
+// memory_init.wast:87
+assert_return(() => call($2, "load8_u", [25]), 0);
+
+// memory_init.wast:88
+assert_return(() => call($2, "load8_u", [26]), 0);
+
+// memory_init.wast:89
+assert_return(() => call($2, "load8_u", [27]), 0);
+
+// memory_init.wast:90
+assert_return(() => call($2, "load8_u", [28]), 0);
+
+// memory_init.wast:91
+assert_return(() => call($2, "load8_u", [29]), 0);
+
+// memory_init.wast:93
+let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0c\x81\x80\x80\x80\x00\x04\x0a\x9e\x80\x80\x80\x00\x02\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x01\x41\x03\xfc\x08\x03\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\xa1\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x04\x02\x07\x01\x08\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x05\x05\x09\x02\x07\x06");
+
+// memory_init.wast:104
+run(() => call($3, "test", []));
+
+// memory_init.wast:106
+assert_return(() => call($3, "load8_u", [0]), 0);
+
+// memory_init.wast:107
+assert_return(() => call($3, "load8_u", [1]), 0);
+
+// memory_init.wast:108
+assert_return(() => call($3, "load8_u", [2]), 3);
+
+// memory_init.wast:109
+assert_return(() => call($3, "load8_u", [3]), 1);
+
+// memory_init.wast:110
+assert_return(() => call($3, "load8_u", [4]), 4);
+
+// memory_init.wast:111
+assert_return(() => call($3, "load8_u", [5]), 1);
+
+// memory_init.wast:112
+assert_return(() => call($3, "load8_u", [6]), 0);
+
+// memory_init.wast:113
+assert_return(() => call($3, "load8_u", [7]), 0);
+
+// memory_init.wast:114
+assert_return(() => call($3, "load8_u", [8]), 0);
+
+// memory_init.wast:115
+assert_return(() => call($3, "load8_u", [9]), 0);
+
+// memory_init.wast:116
+assert_return(() => call($3, "load8_u", [10]), 0);
+
+// memory_init.wast:117
+assert_return(() => call($3, "load8_u", [11]), 0);
+
+// memory_init.wast:118
+assert_return(() => call($3, "load8_u", [12]), 7);
+
+// memory_init.wast:119
+assert_return(() => call($3, "load8_u", [13]), 5);
+
+// memory_init.wast:120
+assert_return(() => call($3, "load8_u", [14]), 2);
+
+// memory_init.wast:121
+assert_return(() => call($3, "load8_u", [15]), 9);
+
+// memory_init.wast:122
+assert_return(() => call($3, "load8_u", [16]), 2);
+
+// memory_init.wast:123
+assert_return(() => call($3, "load8_u", [17]), 7);
+
+// memory_init.wast:124
+assert_return(() => call($3, "load8_u", [18]), 0);
+
+// memory_init.wast:125
+assert_return(() => call($3, "load8_u", [19]), 0);
+
+// memory_init.wast:126
+assert_return(() => call($3, "load8_u", [20]), 0);
+
+// memory_init.wast:127
+assert_return(() => call($3, "load8_u", [21]), 0);
+
+// memory_init.wast:128
+assert_return(() => call($3, "load8_u", [22]), 0);
+
+// memory_init.wast:129
+assert_return(() => call($3, "load8_u", [23]), 0);
+
+// memory_init.wast:130
+assert_return(() => call($3, "load8_u", [24]), 0);
+
+// memory_init.wast:131
+assert_return(() => call($3, "load8_u", [25]), 0);
+
+// memory_init.wast:132
+assert_return(() => call($3, "load8_u", [26]), 0);
+
+// memory_init.wast:133
+assert_return(() => call($3, "load8_u", [27]), 0);
+
+// memory_init.wast:134
+assert_return(() => call($3, "load8_u", [28]), 0);
+
+// memory_init.wast:135
+assert_return(() => call($3, "load8_u", [29]), 0);
+
+// memory_init.wast:137
+let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x89\x80\x80\x80\x00\x02\x60\x00\x00\x60\x01\x7f\x01\x7f\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x9c\x80\x80\x80\x00\x03\x07\x6d\x65\x6d\x6f\x72\x79\x30\x02\x00\x04\x74\x65\x73\x74\x00\x00\x07\x6c\x6f\x61\x64\x38\x5f\x75\x00\x01\x0c\x81\x80\x80\x80\x00\x04\x0a\xe0\x80\x80\x80\x00\x02\xce\x80\x80\x80\x00\x00\x41\x07\x41\x00\x41\x04\xfc\x08\x01\x00\xfc\x09\x01\x41\x0f\x41\x01\x41\x03\xfc\x08\x03\x00\xfc\x09\x03\x41\x14\x41\x0f\x41\x05\xfc\x0a\x00\x00\x41\x15\x41\x1d\x41\x01\xfc\x0a\x00\x00\x41\x18\x41\x0a\x41\x01\xfc\x0a\x00\x00\x41\x0d\x41\x0b\x41\x04\xfc\x0a\x00\x00\x41\x13\x41\x14\x41\x05\xfc\x0a\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x2d\x00\x00\x0b\x0b\xa1\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x04\x02\x07\x01\x08\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x05\x05\x09\x02\x07\x06");
+
+// memory_init.wast:156
+run(() => call($4, "test", []));
+
+// memory_init.wast:158
+assert_return(() => call($4, "load8_u", [0]), 0);
+
+// memory_init.wast:159
+assert_return(() => call($4, "load8_u", [1]), 0);
+
+// memory_init.wast:160
+assert_return(() => call($4, "load8_u", [2]), 3);
+
+// memory_init.wast:161
+assert_return(() => call($4, "load8_u", [3]), 1);
+
+// memory_init.wast:162
+assert_return(() => call($4, "load8_u", [4]), 4);
+
+// memory_init.wast:163
+assert_return(() => call($4, "load8_u", [5]), 1);
+
+// memory_init.wast:164
+assert_return(() => call($4, "load8_u", [6]), 0);
+
+// memory_init.wast:165
+assert_return(() => call($4, "load8_u", [7]), 2);
+
+// memory_init.wast:166
+assert_return(() => call($4, "load8_u", [8]), 7);
+
+// memory_init.wast:167
+assert_return(() => call($4, "load8_u", [9]), 1);
+
+// memory_init.wast:168
+assert_return(() => call($4, "load8_u", [10]), 8);
+
+// memory_init.wast:169
+assert_return(() => call($4, "load8_u", [11]), 0);
+
+// memory_init.wast:170
+assert_return(() => call($4, "load8_u", [12]), 7);
+
+// memory_init.wast:171
+assert_return(() => call($4, "load8_u", [13]), 0);
+
+// memory_init.wast:172
+assert_return(() => call($4, "load8_u", [14]), 7);
+
+// memory_init.wast:173
+assert_return(() => call($4, "load8_u", [15]), 5);
+
+// memory_init.wast:174
+assert_return(() => call($4, "load8_u", [16]), 2);
+
+// memory_init.wast:175
+assert_return(() => call($4, "load8_u", [17]), 7);
+
+// memory_init.wast:176
+assert_return(() => call($4, "load8_u", [18]), 0);
+
+// memory_init.wast:177
+assert_return(() => call($4, "load8_u", [19]), 9);
+
+// memory_init.wast:178
+assert_return(() => call($4, "load8_u", [20]), 0);
+
+// memory_init.wast:179
+assert_return(() => call($4, "load8_u", [21]), 7);
+
+// memory_init.wast:180
+assert_return(() => call($4, "load8_u", [22]), 0);
+
+// memory_init.wast:181
+assert_return(() => call($4, "load8_u", [23]), 8);
+
+// memory_init.wast:182
+assert_return(() => call($4, "load8_u", [24]), 8);
+
+// memory_init.wast:183
+assert_return(() => call($4, "load8_u", [25]), 0);
+
+// memory_init.wast:184
+assert_return(() => call($4, "load8_u", [26]), 0);
+
+// memory_init.wast:185
+assert_return(() => call($4, "load8_u", [27]), 0);
+
+// memory_init.wast:186
+assert_return(() => call($4, "load8_u", [28]), 0);
+
+// memory_init.wast:187
+assert_return(() => call($4, "load8_u", [29]), 0);
+
+// memory_init.wast:188
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x00\x0a\x8b\x80\x80\x80\x00\x01\x85\x80\x80\x80\x00\x00\xfc\x09\x00\x0b");
+
+// memory_init.wast:194
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x8b\x80\x80\x80\x00\x01\x85\x80\x80\x80\x00\x00\xfc\x09\x04\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:202
+let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x8e\x80\x80\x80\x00\x01\x88\x80\x80\x80\x00\x00\xfc\x09\x00\xfc\x09\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:208
+assert_trap(() => call($5, "test", []));
+
+// memory_init.wast:210
+let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x96\x80\x80\x80\x00\x01\x90\x80\x80\x80\x00\x00\xfc\x09\x00\x41\xd2\x09\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:216
+assert_trap(() => call($6, "test", []));
+
+// memory_init.wast:218
+let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x87\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x01\x37");
+
+// memory_init.wast:223
+assert_trap(() => call($7, "test", []));
+
+// memory_init.wast:225
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x00\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x01\x41\x01\xfc\x08\x01\x00\x0b");
+
+// memory_init.wast:231
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x01\x41\x01\xfc\x08\x01\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:239
+let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x01\x41\x00\x41\x01\xfc\x08\x00\x00\x41\x01\x41\x00\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:245
+run(() => call($8, "test", []));
+
+// memory_init.wast:247
+let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x00\x41\x05\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:252
+assert_trap(() => call($9, "test", []));
+
+// memory_init.wast:254
+let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x02\x41\x03\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:259
+assert_trap(() => call($10, "test", []));
+
+// memory_init.wast:261
+let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\xfe\xff\x03\x41\x01\x41\x03\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:266
+assert_trap(() => call($11, "test", []));
+
+// memory_init.wast:268
+let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x93\x80\x80\x80\x00\x01\x8d\x80\x80\x80\x00\x00\x41\xd2\x09\x41\x04\x41\x00\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:273
+assert_trap(() => call($12, "test", []));
+
+// memory_init.wast:275
+let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x94\x80\x80\x80\x00\x01\x8e\x80\x80\x80\x00\x00\x41\x80\x80\x04\x41\x02\x41\x00\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:280
+assert_trap(() => call($13, "test", []));
+
+// memory_init.wast:282
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x01\x41\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:290
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x01\x41\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:298
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x01\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:306
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:314
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:322
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:330
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:338
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x01\x42\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:346
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x41\x01\x42\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:354
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x01\x42\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:362
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x01\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:370
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:378
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:386
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:394
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:402
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:410
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:418
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:426
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:434
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:442
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9b\x80\x80\x80\x00\x01\x95\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:450
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:458
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:466
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:474
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:482
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:490
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:498
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:506
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:514
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:522
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:530
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x01\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:538
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x01\x41\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:546
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x01\x41\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:554
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x01\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:562
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:570
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x98\x80\x80\x80\x00\x01\x92\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:578
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:586
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:594
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x01\x42\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:602
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x95\x80\x80\x80\x00\x01\x8f\x80\x80\x80\x00\x00\x42\x01\x42\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:610
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x42\x01\x42\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:618
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x01\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:626
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:634
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:642
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:650
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:658
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:666
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:674
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:682
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:690
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:698
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9f\x80\x80\x80\x00\x01\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:706
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:714
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:722
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:730
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x9c\x80\x80\x80\x00\x01\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:738
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\x99\x80\x80\x80\x00\x01\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:746
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:754
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:762
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa3\x80\x80\x80\x00\x01\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:770
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa0\x80\x80\x80\x00\x01\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:778
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0c\x81\x80\x80\x80\x00\x01\x0a\xa7\x80\x80\x80\x00\x01\xa1\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x08\x00\x00\x0b\x0b\x84\x80\x80\x80\x00\x01\x01\x01\x37");
+
+// memory_init.wast:786
+let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
+
+// memory_init.wast:804
+assert_trap(() => call($14, "run", [65528, 16]));
+
+// memory_init.wast:807
+assert_return(() => call($14, "checkRange", [0, 65528, 0]), -1);
+
+// memory_init.wast:809
+assert_return(() => call($14, "checkRange", [65528, 65536, 66]), -1);
+
+// memory_init.wast:811
+assert_return(() => call($14, "checkRange", [65536, 65536, 0]), -1);
+
+// memory_init.wast:813
+let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
+
+// memory_init.wast:831
+assert_trap(() => call($15, "run", [65527, 16]));
+
+// memory_init.wast:834
+assert_return(() => call($15, "checkRange", [0, 65527, 0]), -1);
+
+// memory_init.wast:836
+assert_return(() => call($15, "checkRange", [65527, 65536, 66]), -1);
+
+// memory_init.wast:838
+assert_return(() => call($15, "checkRange", [65536, 65536, 0]), -1);
+
+// memory_init.wast:840
+let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
+
+// memory_init.wast:858
+assert_trap(() => call($16, "run", [65472, 30]));
+
+// memory_init.wast:861
+assert_return(() => call($16, "checkRange", [0, 65472, 0]), -1);
+
+// memory_init.wast:863
+assert_return(() => call($16, "checkRange", [65472, 65488, 66]), -1);
+
+// memory_init.wast:865
+assert_return(() => call($16, "checkRange", [65488, 65536, 0]), -1);
+
+// memory_init.wast:867
+let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x84\x80\x80\x80\x00\x01\x01\x01\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
+
+// memory_init.wast:885
+assert_trap(() => call($17, "run", [65473, 31]));
+
+// memory_init.wast:888
+assert_return(() => call($17, "checkRange", [0, 65473, 0]), -1);
+
+// memory_init.wast:890
+assert_return(() => call($17, "checkRange", [65473, 65489, 66]), -1);
+
+// memory_init.wast:892
+assert_return(() => call($17, "checkRange", [65489, 65536, 0]), -1);
+
+// memory_init.wast:894
+let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
+
+// memory_init.wast:912
+assert_trap(() => call($18, "run", [65528, -256]));
+
+// memory_init.wast:915
+assert_return(() => call($18, "checkRange", [0, 65528, 0]), -1);
+
+// memory_init.wast:917
+assert_return(() => call($18, "checkRange", [65528, 65536, 66]), -1);
+
+// memory_init.wast:919
+assert_return(() => call($18, "checkRange", [65536, 65536, 0]), -1);
+
+// memory_init.wast:921
+let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x02\x60\x03\x7f\x7f\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x05\x83\x80\x80\x80\x00\x01\x00\x01\x07\x94\x80\x80\x80\x00\x02\x0a\x63\x68\x65\x63\x6b\x52\x61\x6e\x67\x65\x00\x00\x03\x72\x75\x6e\x00\x01\x0c\x81\x80\x80\x80\x00\x01\x0a\xbe\x80\x80\x80\x00\x02\xa7\x80\x80\x80\x00\x00\x03\x40\x20\x00\x20\x01\x46\x04\x40\x41\x7f\x0f\x0b\x20\x00\x2d\x00\x00\x20\x02\x46\x04\x40\x20\x00\x41\x01\x6a\x21\x00\x0c\x01\x0b\x0b\x20\x00\x0f\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x08\x00\x00\x0b\x0b\x93\x80\x80\x80\x00\x01\x01\x10\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42");
+
+// memory_init.wast:939
+assert_trap(() => call($19, "run", [0, -4]));
+
+// memory_init.wast:942
+assert_return(() => call($19, "checkRange", [0, 0, 0]), -1);
+
+// memory_init.wast:944
+assert_return(() => call($19, "checkRange", [0, 16, 66]), -1);
+
+// memory_init.wast:946
+assert_return(() => call($19, "checkRange", [16, 65536, 0]), -1);
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast
new file mode 100644
index 0000000000..51c4ae148b
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast
@@ -0,0 +1,1469 @@
+;;
+;; Generated by ../meta/generate_table_copy.js
+;;
+
+(module
+ (func (export "ef0") (result i32) (i32.const 0))
+ (func (export "ef1") (result i32) (i32.const 1))
+ (func (export "ef2") (result i32) (i32.const 2))
+ (func (export "ef3") (result i32) (i32.const 3))
+ (func (export "ef4") (result i32) (i32.const 4))
+)
+(register "a")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (nop))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.copy (i32.const 13) (i32.const 2) (i32.const 3)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 13)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 14)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 15)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.copy (i32.const 25) (i32.const 15) (i32.const 2)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 25)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 26)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.copy (i32.const 13) (i32.const 25) (i32.const 3)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
+(assert_trap (invoke "check" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 15)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.copy (i32.const 20) (i32.const 22) (i32.const 4)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.copy (i32.const 25) (i32.const 1) (i32.const 3)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 26)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 27)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.copy (i32.const 10) (i32.const 12) (i32.const 7)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 10)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 11)) (i32.const 5))
+(assert_return (invoke "check" (i32.const 12)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 13)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 14)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.copy (i32.const 12) (i32.const 10) (i32.const 7)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 13)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 14)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 15)) (i32.const 5))
+(assert_return (invoke "check" (i32.const 16)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 17)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 18)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.copy (i32.const 28) (i32.const 1) (i32.const 3))
+ ))
+
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.copy (i32.const 0xFFFFFFFE) (i32.const 1) (i32.const 2))
+ ))
+
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.copy (i32.const 15) (i32.const 25) (i32.const 6))
+ ))
+
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.copy (i32.const 15) (i32.const 0xFFFFFFFE) (i32.const 2))
+ ))
+
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.copy (i32.const 15) (i32.const 25) (i32.const 0))
+ ))
+
+(invoke "test")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.copy (i32.const 30) (i32.const 15) (i32.const 0))
+ ))
+
+(invoke "test")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.copy (i32.const 15) (i32.const 30) (i32.const 0))
+ ))
+
+(invoke "test")
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem (i32.const 0)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 24) (i32.const 0) (i32.const 16))
+ "out of bounds")
+(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 25)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem (i32.const 0)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 23) (i32.const 0) (i32.const 15))
+ "out of bounds")
+(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 8)) (i32.const 8))
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 24)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 25)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 29)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 30)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 31)) (i32.const 8))
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem (i32.const 24)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 0) (i32.const 24) (i32.const 16))
+ "out of bounds")
+(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 25)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem (i32.const 23)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 0) (i32.const 23) (i32.const 15))
+ "out of bounds")
+(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 8)) (i32.const 8))
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 23)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 24)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 25)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 29)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 30)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 31)) (i32.const 8))
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem (i32.const 11)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 24) (i32.const 11) (i32.const 16))
+ "out of bounds")
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 12)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 13)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 14)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 15)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 16)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 17)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 18)) (i32.const 7))
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem (i32.const 24)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 11) (i32.const 24) (i32.const 16))
+ "out of bounds")
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 11)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 12)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 13)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 14)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 15)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 16)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 17)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 18)) (i32.const 7))
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 25)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem (i32.const 21)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 24) (i32.const 21) (i32.const 16))
+ "out of bounds")
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 22)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 23)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 24)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 25)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 7))
+(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem (i32.const 24)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 21) (i32.const 24) (i32.const 16))
+ "out of bounds")
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 22)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 23)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 24)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 25)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem (i32.const 21)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 21) (i32.const 21) (i32.const 16))
+ "out of bounds")
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 21)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 22)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 23)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 24)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 25)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 29)) (i32.const 8))
+(assert_return (invoke "test" (i32.const 30)) (i32.const 9))
+(assert_return (invoke "test" (i32.const 31)) (i32.const 10))
+
+(module
+ (type (func (result i32)))
+ (table 128 128 funcref)
+ (elem (i32.const 112)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 0) (i32.const 112) (i32.const 4294967264))
+ "out of bounds")
+(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 8)) (i32.const 8))
+(assert_return (invoke "test" (i32.const 9)) (i32.const 9))
+(assert_return (invoke "test" (i32.const 10)) (i32.const 10))
+(assert_return (invoke "test" (i32.const 11)) (i32.const 11))
+(assert_return (invoke "test" (i32.const 12)) (i32.const 12))
+(assert_return (invoke "test" (i32.const 13)) (i32.const 13))
+(assert_return (invoke "test" (i32.const 14)) (i32.const 14))
+(assert_return (invoke "test" (i32.const 15)) (i32.const 15))
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 48)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 49)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 50)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 51)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 52)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 53)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 54)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 55)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 56)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 57)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 58)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 59)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 60)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 61)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 62)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 63)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 64)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 65)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 66)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 67)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 68)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 69)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 70)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 71)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 72)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 73)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 74)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 75)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 76)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 77)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 78)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 79)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 80)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 81)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 82)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 83)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 84)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 85)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 86)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 87)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 88)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 89)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 90)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 91)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 92)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 93)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 94)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 95)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 96)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 97)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 98)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 99)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 100)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 101)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 102)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 103)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 104)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 105)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 106)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 107)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 108)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 109)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 110)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 111)) "uninitialized element")
+(assert_return (invoke "test" (i32.const 112)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 113)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 114)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 115)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 116)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 117)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 118)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 119)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 120)) (i32.const 8))
+(assert_return (invoke "test" (i32.const 121)) (i32.const 9))
+(assert_return (invoke "test" (i32.const 122)) (i32.const 10))
+(assert_return (invoke "test" (i32.const 123)) (i32.const 11))
+(assert_return (invoke "test" (i32.const 124)) (i32.const 12))
+(assert_return (invoke "test" (i32.const 125)) (i32.const 13))
+(assert_return (invoke "test" (i32.const 126)) (i32.const 14))
+(assert_return (invoke "test" (i32.const 127)) (i32.const 15))
+
+(module
+ (type (func (result i32)))
+ (table 128 128 funcref)
+ (elem (i32.const 0)
+ $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $targetOffs i32) (param $srcOffs i32) (param $len i32)
+ (table.copy (local.get $targetOffs) (local.get $srcOffs) (local.get $len))))
+
+(assert_trap (invoke "run" (i32.const 112) (i32.const 0) (i32.const 4294967264))
+ "out of bounds")
+(assert_return (invoke "test" (i32.const 0)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 1)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 2)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 3)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 5)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 6)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 7)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 8)) (i32.const 8))
+(assert_return (invoke "test" (i32.const 9)) (i32.const 9))
+(assert_return (invoke "test" (i32.const 10)) (i32.const 10))
+(assert_return (invoke "test" (i32.const 11)) (i32.const 11))
+(assert_return (invoke "test" (i32.const 12)) (i32.const 12))
+(assert_return (invoke "test" (i32.const 13)) (i32.const 13))
+(assert_return (invoke "test" (i32.const 14)) (i32.const 14))
+(assert_return (invoke "test" (i32.const 15)) (i32.const 15))
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 48)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 49)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 50)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 51)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 52)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 53)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 54)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 55)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 56)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 57)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 58)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 59)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 60)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 61)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 62)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 63)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 64)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 65)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 66)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 67)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 68)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 69)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 70)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 71)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 72)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 73)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 74)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 75)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 76)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 77)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 78)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 79)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 80)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 81)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 82)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 83)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 84)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 85)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 86)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 87)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 88)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 89)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 90)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 91)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 92)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 93)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 94)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 95)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 96)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 97)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 98)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 99)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 100)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 101)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 102)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 103)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 104)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 105)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 106)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 107)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 108)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 109)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 110)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 111)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 112)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 113)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 114)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 115)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 116)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 117)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 118)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 119)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 120)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 121)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 122)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 123)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 124)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 125)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 126)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 127)) "uninitialized element")
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast.js
new file mode 100644
index 0000000000..67c1e94e0e
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_copy.wast.js
@@ -0,0 +1,2651 @@
+
+'use strict';
+
+let spectest = {
+ print: console.log.bind(console),
+ print_i32: console.log.bind(console),
+ print_i32_f32: console.log.bind(console),
+ print_f64_f64: console.log.bind(console),
+ print_f32: console.log.bind(console),
+ print_f64: console.log.bind(console),
+ global_i32: 666,
+ global_f32: 666,
+ global_f64: 666,
+ table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
+ memory: new WebAssembly.Memory({initial: 1, maximum: 2})
+};
+let handler = {
+ get(target, prop) {
+ return (prop in target) ? target[prop] : {};
+ }
+};
+let registry = new Proxy({spectest}, handler);
+
+function register(name, instance) {
+ registry[name] = instance.exports;
+}
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function get(instance, name) {
+ let v = instance.exports[name];
+ return (v instanceof WebAssembly.Global) ? v.value : v;
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function run(action) {
+ action();
+}
+
+function assert_malformed(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm decoding failure expected");
+}
+
+function assert_invalid(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm validation failure expected");
+}
+
+function assert_unlinkable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.LinkError) return;
+ }
+ throw new Error("Wasm linking failure expected");
+}
+
+function assert_uninstantiable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+function assert_trap(action) {
+ try { action() } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+let StackOverflow;
+try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
+
+function assert_exhaustion(action) {
+ try { action() } catch (e) {
+ if (e instanceof StackOverflow) return;
+ }
+ throw new Error("Wasm resource exhaustion expected");
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+function assert_return_canonical_nan(action) {
+ let actual = action();
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test that it's a canonical NaN.
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+function assert_return_arithmetic_nan(action) {
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test for specific bitpatterns here.
+ let actual = action();
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+// table_copy.wast:5
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x03\x86\x80\x80\x80\x00\x05\x00\x00\x00\x00\x00\x07\x9f\x80\x80\x80\x00\x05\x03\x65\x66\x30\x00\x00\x03\x65\x66\x31\x00\x01\x03\x65\x66\x32\x00\x02\x03\x65\x66\x33\x00\x03\x03\x65\x66\x34\x00\x04\x0a\xae\x80\x80\x80\x00\x05\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b");
+
+// table_copy.wast:12
+register("a", $1)
+
+// table_copy.wast:14
+let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xc2\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x83\x80\x80\x80\x00\x00\x01\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_copy.wast:37
+run(() => call($2, "test", []));
+
+// table_copy.wast:38
+assert_trap(() => call($2, "check", [0]));
+
+// table_copy.wast:39
+assert_trap(() => call($2, "check", [1]));
+
+// table_copy.wast:40
+assert_return(() => call($2, "check", [2]), 3);
+
+// table_copy.wast:41
+assert_return(() => call($2, "check", [3]), 1);
+
+// table_copy.wast:42
+assert_return(() => call($2, "check", [4]), 4);
+
+// table_copy.wast:43
+assert_return(() => call($2, "check", [5]), 1);
+
+// table_copy.wast:44
+assert_trap(() => call($2, "check", [6]));
+
+// table_copy.wast:45
+assert_trap(() => call($2, "check", [7]));
+
+// table_copy.wast:46
+assert_trap(() => call($2, "check", [8]));
+
+// table_copy.wast:47
+assert_trap(() => call($2, "check", [9]));
+
+// table_copy.wast:48
+assert_trap(() => call($2, "check", [10]));
+
+// table_copy.wast:49
+assert_trap(() => call($2, "check", [11]));
+
+// table_copy.wast:50
+assert_return(() => call($2, "check", [12]), 7);
+
+// table_copy.wast:51
+assert_return(() => call($2, "check", [13]), 5);
+
+// table_copy.wast:52
+assert_return(() => call($2, "check", [14]), 2);
+
+// table_copy.wast:53
+assert_return(() => call($2, "check", [15]), 3);
+
+// table_copy.wast:54
+assert_return(() => call($2, "check", [16]), 6);
+
+// table_copy.wast:55
+assert_trap(() => call($2, "check", [17]));
+
+// table_copy.wast:56
+assert_trap(() => call($2, "check", [18]));
+
+// table_copy.wast:57
+assert_trap(() => call($2, "check", [19]));
+
+// table_copy.wast:58
+assert_trap(() => call($2, "check", [20]));
+
+// table_copy.wast:59
+assert_trap(() => call($2, "check", [21]));
+
+// table_copy.wast:60
+assert_trap(() => call($2, "check", [22]));
+
+// table_copy.wast:61
+assert_trap(() => call($2, "check", [23]));
+
+// table_copy.wast:62
+assert_trap(() => call($2, "check", [24]));
+
+// table_copy.wast:63
+assert_trap(() => call($2, "check", [25]));
+
+// table_copy.wast:64
+assert_trap(() => call($2, "check", [26]));
+
+// table_copy.wast:65
+assert_trap(() => call($2, "check", [27]));
+
+// table_copy.wast:66
+assert_trap(() => call($2, "check", [28]));
+
+// table_copy.wast:67
+assert_trap(() => call($2, "check", [29]));
+
+// table_copy.wast:69
+let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0d\x41\x02\x41\x03\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_copy.wast:92
+run(() => call($3, "test", []));
+
+// table_copy.wast:93
+assert_trap(() => call($3, "check", [0]));
+
+// table_copy.wast:94
+assert_trap(() => call($3, "check", [1]));
+
+// table_copy.wast:95
+assert_return(() => call($3, "check", [2]), 3);
+
+// table_copy.wast:96
+assert_return(() => call($3, "check", [3]), 1);
+
+// table_copy.wast:97
+assert_return(() => call($3, "check", [4]), 4);
+
+// table_copy.wast:98
+assert_return(() => call($3, "check", [5]), 1);
+
+// table_copy.wast:99
+assert_trap(() => call($3, "check", [6]));
+
+// table_copy.wast:100
+assert_trap(() => call($3, "check", [7]));
+
+// table_copy.wast:101
+assert_trap(() => call($3, "check", [8]));
+
+// table_copy.wast:102
+assert_trap(() => call($3, "check", [9]));
+
+// table_copy.wast:103
+assert_trap(() => call($3, "check", [10]));
+
+// table_copy.wast:104
+assert_trap(() => call($3, "check", [11]));
+
+// table_copy.wast:105
+assert_return(() => call($3, "check", [12]), 7);
+
+// table_copy.wast:106
+assert_return(() => call($3, "check", [13]), 3);
+
+// table_copy.wast:107
+assert_return(() => call($3, "check", [14]), 1);
+
+// table_copy.wast:108
+assert_return(() => call($3, "check", [15]), 4);
+
+// table_copy.wast:109
+assert_return(() => call($3, "check", [16]), 6);
+
+// table_copy.wast:110
+assert_trap(() => call($3, "check", [17]));
+
+// table_copy.wast:111
+assert_trap(() => call($3, "check", [18]));
+
+// table_copy.wast:112
+assert_trap(() => call($3, "check", [19]));
+
+// table_copy.wast:113
+assert_trap(() => call($3, "check", [20]));
+
+// table_copy.wast:114
+assert_trap(() => call($3, "check", [21]));
+
+// table_copy.wast:115
+assert_trap(() => call($3, "check", [22]));
+
+// table_copy.wast:116
+assert_trap(() => call($3, "check", [23]));
+
+// table_copy.wast:117
+assert_trap(() => call($3, "check", [24]));
+
+// table_copy.wast:118
+assert_trap(() => call($3, "check", [25]));
+
+// table_copy.wast:119
+assert_trap(() => call($3, "check", [26]));
+
+// table_copy.wast:120
+assert_trap(() => call($3, "check", [27]));
+
+// table_copy.wast:121
+assert_trap(() => call($3, "check", [28]));
+
+// table_copy.wast:122
+assert_trap(() => call($3, "check", [29]));
+
+// table_copy.wast:124
+let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x19\x41\x0f\x41\x02\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_copy.wast:147
+run(() => call($4, "test", []));
+
+// table_copy.wast:148
+assert_trap(() => call($4, "check", [0]));
+
+// table_copy.wast:149
+assert_trap(() => call($4, "check", [1]));
+
+// table_copy.wast:150
+assert_return(() => call($4, "check", [2]), 3);
+
+// table_copy.wast:151
+assert_return(() => call($4, "check", [3]), 1);
+
+// table_copy.wast:152
+assert_return(() => call($4, "check", [4]), 4);
+
+// table_copy.wast:153
+assert_return(() => call($4, "check", [5]), 1);
+
+// table_copy.wast:154
+assert_trap(() => call($4, "check", [6]));
+
+// table_copy.wast:155
+assert_trap(() => call($4, "check", [7]));
+
+// table_copy.wast:156
+assert_trap(() => call($4, "check", [8]));
+
+// table_copy.wast:157
+assert_trap(() => call($4, "check", [9]));
+
+// table_copy.wast:158
+assert_trap(() => call($4, "check", [10]));
+
+// table_copy.wast:159
+assert_trap(() => call($4, "check", [11]));
+
+// table_copy.wast:160
+assert_return(() => call($4, "check", [12]), 7);
+
+// table_copy.wast:161
+assert_return(() => call($4, "check", [13]), 5);
+
+// table_copy.wast:162
+assert_return(() => call($4, "check", [14]), 2);
+
+// table_copy.wast:163
+assert_return(() => call($4, "check", [15]), 3);
+
+// table_copy.wast:164
+assert_return(() => call($4, "check", [16]), 6);
+
+// table_copy.wast:165
+assert_trap(() => call($4, "check", [17]));
+
+// table_copy.wast:166
+assert_trap(() => call($4, "check", [18]));
+
+// table_copy.wast:167
+assert_trap(() => call($4, "check", [19]));
+
+// table_copy.wast:168
+assert_trap(() => call($4, "check", [20]));
+
+// table_copy.wast:169
+assert_trap(() => call($4, "check", [21]));
+
+// table_copy.wast:170
+assert_trap(() => call($4, "check", [22]));
+
+// table_copy.wast:171
+assert_trap(() => call($4, "check", [23]));
+
+// table_copy.wast:172
+assert_trap(() => call($4, "check", [24]));
+
+// table_copy.wast:173
+assert_return(() => call($4, "check", [25]), 3);
+
+// table_copy.wast:174
+assert_return(() => call($4, "check", [26]), 6);
+
+// table_copy.wast:175
+assert_trap(() => call($4, "check", [27]));
+
+// table_copy.wast:176
+assert_trap(() => call($4, "check", [28]));
+
+// table_copy.wast:177
+assert_trap(() => call($4, "check", [29]));
+
+// table_copy.wast:179
+let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0d\x41\x19\x41\x03\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_copy.wast:202
+run(() => call($5, "test", []));
+
+// table_copy.wast:203
+assert_trap(() => call($5, "check", [0]));
+
+// table_copy.wast:204
+assert_trap(() => call($5, "check", [1]));
+
+// table_copy.wast:205
+assert_return(() => call($5, "check", [2]), 3);
+
+// table_copy.wast:206
+assert_return(() => call($5, "check", [3]), 1);
+
+// table_copy.wast:207
+assert_return(() => call($5, "check", [4]), 4);
+
+// table_copy.wast:208
+assert_return(() => call($5, "check", [5]), 1);
+
+// table_copy.wast:209
+assert_trap(() => call($5, "check", [6]));
+
+// table_copy.wast:210
+assert_trap(() => call($5, "check", [7]));
+
+// table_copy.wast:211
+assert_trap(() => call($5, "check", [8]));
+
+// table_copy.wast:212
+assert_trap(() => call($5, "check", [9]));
+
+// table_copy.wast:213
+assert_trap(() => call($5, "check", [10]));
+
+// table_copy.wast:214
+assert_trap(() => call($5, "check", [11]));
+
+// table_copy.wast:215
+assert_return(() => call($5, "check", [12]), 7);
+
+// table_copy.wast:216
+assert_trap(() => call($5, "check", [13]));
+
+// table_copy.wast:217
+assert_trap(() => call($5, "check", [14]));
+
+// table_copy.wast:218
+assert_trap(() => call($5, "check", [15]));
+
+// table_copy.wast:219
+assert_return(() => call($5, "check", [16]), 6);
+
+// table_copy.wast:220
+assert_trap(() => call($5, "check", [17]));
+
+// table_copy.wast:221
+assert_trap(() => call($5, "check", [18]));
+
+// table_copy.wast:222
+assert_trap(() => call($5, "check", [19]));
+
+// table_copy.wast:223
+assert_trap(() => call($5, "check", [20]));
+
+// table_copy.wast:224
+assert_trap(() => call($5, "check", [21]));
+
+// table_copy.wast:225
+assert_trap(() => call($5, "check", [22]));
+
+// table_copy.wast:226
+assert_trap(() => call($5, "check", [23]));
+
+// table_copy.wast:227
+assert_trap(() => call($5, "check", [24]));
+
+// table_copy.wast:228
+assert_trap(() => call($5, "check", [25]));
+
+// table_copy.wast:229
+assert_trap(() => call($5, "check", [26]));
+
+// table_copy.wast:230
+assert_trap(() => call($5, "check", [27]));
+
+// table_copy.wast:231
+assert_trap(() => call($5, "check", [28]));
+
+// table_copy.wast:232
+assert_trap(() => call($5, "check", [29]));
+
+// table_copy.wast:234
+let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x14\x41\x16\x41\x04\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_copy.wast:257
+run(() => call($6, "test", []));
+
+// table_copy.wast:258
+assert_trap(() => call($6, "check", [0]));
+
+// table_copy.wast:259
+assert_trap(() => call($6, "check", [1]));
+
+// table_copy.wast:260
+assert_return(() => call($6, "check", [2]), 3);
+
+// table_copy.wast:261
+assert_return(() => call($6, "check", [3]), 1);
+
+// table_copy.wast:262
+assert_return(() => call($6, "check", [4]), 4);
+
+// table_copy.wast:263
+assert_return(() => call($6, "check", [5]), 1);
+
+// table_copy.wast:264
+assert_trap(() => call($6, "check", [6]));
+
+// table_copy.wast:265
+assert_trap(() => call($6, "check", [7]));
+
+// table_copy.wast:266
+assert_trap(() => call($6, "check", [8]));
+
+// table_copy.wast:267
+assert_trap(() => call($6, "check", [9]));
+
+// table_copy.wast:268
+assert_trap(() => call($6, "check", [10]));
+
+// table_copy.wast:269
+assert_trap(() => call($6, "check", [11]));
+
+// table_copy.wast:270
+assert_return(() => call($6, "check", [12]), 7);
+
+// table_copy.wast:271
+assert_return(() => call($6, "check", [13]), 5);
+
+// table_copy.wast:272
+assert_return(() => call($6, "check", [14]), 2);
+
+// table_copy.wast:273
+assert_return(() => call($6, "check", [15]), 3);
+
+// table_copy.wast:274
+assert_return(() => call($6, "check", [16]), 6);
+
+// table_copy.wast:275
+assert_trap(() => call($6, "check", [17]));
+
+// table_copy.wast:276
+assert_trap(() => call($6, "check", [18]));
+
+// table_copy.wast:277
+assert_trap(() => call($6, "check", [19]));
+
+// table_copy.wast:278
+assert_trap(() => call($6, "check", [20]));
+
+// table_copy.wast:279
+assert_trap(() => call($6, "check", [21]));
+
+// table_copy.wast:280
+assert_trap(() => call($6, "check", [22]));
+
+// table_copy.wast:281
+assert_trap(() => call($6, "check", [23]));
+
+// table_copy.wast:282
+assert_trap(() => call($6, "check", [24]));
+
+// table_copy.wast:283
+assert_trap(() => call($6, "check", [25]));
+
+// table_copy.wast:284
+assert_trap(() => call($6, "check", [26]));
+
+// table_copy.wast:285
+assert_trap(() => call($6, "check", [27]));
+
+// table_copy.wast:286
+assert_trap(() => call($6, "check", [28]));
+
+// table_copy.wast:287
+assert_trap(() => call($6, "check", [29]));
+
+// table_copy.wast:289
+let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x19\x41\x01\x41\x03\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_copy.wast:312
+run(() => call($7, "test", []));
+
+// table_copy.wast:313
+assert_trap(() => call($7, "check", [0]));
+
+// table_copy.wast:314
+assert_trap(() => call($7, "check", [1]));
+
+// table_copy.wast:315
+assert_return(() => call($7, "check", [2]), 3);
+
+// table_copy.wast:316
+assert_return(() => call($7, "check", [3]), 1);
+
+// table_copy.wast:317
+assert_return(() => call($7, "check", [4]), 4);
+
+// table_copy.wast:318
+assert_return(() => call($7, "check", [5]), 1);
+
+// table_copy.wast:319
+assert_trap(() => call($7, "check", [6]));
+
+// table_copy.wast:320
+assert_trap(() => call($7, "check", [7]));
+
+// table_copy.wast:321
+assert_trap(() => call($7, "check", [8]));
+
+// table_copy.wast:322
+assert_trap(() => call($7, "check", [9]));
+
+// table_copy.wast:323
+assert_trap(() => call($7, "check", [10]));
+
+// table_copy.wast:324
+assert_trap(() => call($7, "check", [11]));
+
+// table_copy.wast:325
+assert_return(() => call($7, "check", [12]), 7);
+
+// table_copy.wast:326
+assert_return(() => call($7, "check", [13]), 5);
+
+// table_copy.wast:327
+assert_return(() => call($7, "check", [14]), 2);
+
+// table_copy.wast:328
+assert_return(() => call($7, "check", [15]), 3);
+
+// table_copy.wast:329
+assert_return(() => call($7, "check", [16]), 6);
+
+// table_copy.wast:330
+assert_trap(() => call($7, "check", [17]));
+
+// table_copy.wast:331
+assert_trap(() => call($7, "check", [18]));
+
+// table_copy.wast:332
+assert_trap(() => call($7, "check", [19]));
+
+// table_copy.wast:333
+assert_trap(() => call($7, "check", [20]));
+
+// table_copy.wast:334
+assert_trap(() => call($7, "check", [21]));
+
+// table_copy.wast:335
+assert_trap(() => call($7, "check", [22]));
+
+// table_copy.wast:336
+assert_trap(() => call($7, "check", [23]));
+
+// table_copy.wast:337
+assert_trap(() => call($7, "check", [24]));
+
+// table_copy.wast:338
+assert_trap(() => call($7, "check", [25]));
+
+// table_copy.wast:339
+assert_return(() => call($7, "check", [26]), 3);
+
+// table_copy.wast:340
+assert_return(() => call($7, "check", [27]), 1);
+
+// table_copy.wast:341
+assert_trap(() => call($7, "check", [28]));
+
+// table_copy.wast:342
+assert_trap(() => call($7, "check", [29]));
+
+// table_copy.wast:344
+let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0a\x41\x0c\x41\x07\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_copy.wast:367
+run(() => call($8, "test", []));
+
+// table_copy.wast:368
+assert_trap(() => call($8, "check", [0]));
+
+// table_copy.wast:369
+assert_trap(() => call($8, "check", [1]));
+
+// table_copy.wast:370
+assert_return(() => call($8, "check", [2]), 3);
+
+// table_copy.wast:371
+assert_return(() => call($8, "check", [3]), 1);
+
+// table_copy.wast:372
+assert_return(() => call($8, "check", [4]), 4);
+
+// table_copy.wast:373
+assert_return(() => call($8, "check", [5]), 1);
+
+// table_copy.wast:374
+assert_trap(() => call($8, "check", [6]));
+
+// table_copy.wast:375
+assert_trap(() => call($8, "check", [7]));
+
+// table_copy.wast:376
+assert_trap(() => call($8, "check", [8]));
+
+// table_copy.wast:377
+assert_trap(() => call($8, "check", [9]));
+
+// table_copy.wast:378
+assert_return(() => call($8, "check", [10]), 7);
+
+// table_copy.wast:379
+assert_return(() => call($8, "check", [11]), 5);
+
+// table_copy.wast:380
+assert_return(() => call($8, "check", [12]), 2);
+
+// table_copy.wast:381
+assert_return(() => call($8, "check", [13]), 3);
+
+// table_copy.wast:382
+assert_return(() => call($8, "check", [14]), 6);
+
+// table_copy.wast:383
+assert_trap(() => call($8, "check", [15]));
+
+// table_copy.wast:384
+assert_trap(() => call($8, "check", [16]));
+
+// table_copy.wast:385
+assert_trap(() => call($8, "check", [17]));
+
+// table_copy.wast:386
+assert_trap(() => call($8, "check", [18]));
+
+// table_copy.wast:387
+assert_trap(() => call($8, "check", [19]));
+
+// table_copy.wast:388
+assert_trap(() => call($8, "check", [20]));
+
+// table_copy.wast:389
+assert_trap(() => call($8, "check", [21]));
+
+// table_copy.wast:390
+assert_trap(() => call($8, "check", [22]));
+
+// table_copy.wast:391
+assert_trap(() => call($8, "check", [23]));
+
+// table_copy.wast:392
+assert_trap(() => call($8, "check", [24]));
+
+// table_copy.wast:393
+assert_trap(() => call($8, "check", [25]));
+
+// table_copy.wast:394
+assert_trap(() => call($8, "check", [26]));
+
+// table_copy.wast:395
+assert_trap(() => call($8, "check", [27]));
+
+// table_copy.wast:396
+assert_trap(() => call($8, "check", [28]));
+
+// table_copy.wast:397
+assert_trap(() => call($8, "check", [29]));
+
+// table_copy.wast:399
+let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x0a\x41\x07\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_copy.wast:422
+run(() => call($9, "test", []));
+
+// table_copy.wast:423
+assert_trap(() => call($9, "check", [0]));
+
+// table_copy.wast:424
+assert_trap(() => call($9, "check", [1]));
+
+// table_copy.wast:425
+assert_return(() => call($9, "check", [2]), 3);
+
+// table_copy.wast:426
+assert_return(() => call($9, "check", [3]), 1);
+
+// table_copy.wast:427
+assert_return(() => call($9, "check", [4]), 4);
+
+// table_copy.wast:428
+assert_return(() => call($9, "check", [5]), 1);
+
+// table_copy.wast:429
+assert_trap(() => call($9, "check", [6]));
+
+// table_copy.wast:430
+assert_trap(() => call($9, "check", [7]));
+
+// table_copy.wast:431
+assert_trap(() => call($9, "check", [8]));
+
+// table_copy.wast:432
+assert_trap(() => call($9, "check", [9]));
+
+// table_copy.wast:433
+assert_trap(() => call($9, "check", [10]));
+
+// table_copy.wast:434
+assert_trap(() => call($9, "check", [11]));
+
+// table_copy.wast:435
+assert_trap(() => call($9, "check", [12]));
+
+// table_copy.wast:436
+assert_trap(() => call($9, "check", [13]));
+
+// table_copy.wast:437
+assert_return(() => call($9, "check", [14]), 7);
+
+// table_copy.wast:438
+assert_return(() => call($9, "check", [15]), 5);
+
+// table_copy.wast:439
+assert_return(() => call($9, "check", [16]), 2);
+
+// table_copy.wast:440
+assert_return(() => call($9, "check", [17]), 3);
+
+// table_copy.wast:441
+assert_return(() => call($9, "check", [18]), 6);
+
+// table_copy.wast:442
+assert_trap(() => call($9, "check", [19]));
+
+// table_copy.wast:443
+assert_trap(() => call($9, "check", [20]));
+
+// table_copy.wast:444
+assert_trap(() => call($9, "check", [21]));
+
+// table_copy.wast:445
+assert_trap(() => call($9, "check", [22]));
+
+// table_copy.wast:446
+assert_trap(() => call($9, "check", [23]));
+
+// table_copy.wast:447
+assert_trap(() => call($9, "check", [24]));
+
+// table_copy.wast:448
+assert_trap(() => call($9, "check", [25]));
+
+// table_copy.wast:449
+assert_trap(() => call($9, "check", [26]));
+
+// table_copy.wast:450
+assert_trap(() => call($9, "check", [27]));
+
+// table_copy.wast:451
+assert_trap(() => call($9, "check", [28]));
+
+// table_copy.wast:452
+assert_trap(() => call($9, "check", [29]));
+
+// table_copy.wast:454
+let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x1c\x41\x01\x41\x03\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:474
+assert_trap(() => call($10, "test", []));
+
+// table_copy.wast:476
+let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x7e\x41\x01\x41\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:496
+assert_trap(() => call($11, "test", []));
+
+// table_copy.wast:498
+let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x19\x41\x06\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:518
+assert_trap(() => call($12, "test", []));
+
+// table_copy.wast:520
+let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x7e\x41\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:540
+assert_trap(() => call($13, "test", []));
+
+// table_copy.wast:542
+let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x19\x41\x00\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:562
+run(() => call($14, "test", []));
+
+// table_copy.wast:564
+let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x1e\x41\x0f\x41\x00\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:584
+run(() => call($15, "test", []));
+
+// table_copy.wast:586
+let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x1e\x41\x00\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:606
+run(() => call($16, "test", []));
+
+// table_copy.wast:608
+let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:634
+assert_trap(() => call($17, "run", [24, 0, 16]));
+
+// table_copy.wast:636
+assert_return(() => call($17, "test", [0]), 0);
+
+// table_copy.wast:637
+assert_return(() => call($17, "test", [1]), 1);
+
+// table_copy.wast:638
+assert_return(() => call($17, "test", [2]), 2);
+
+// table_copy.wast:639
+assert_return(() => call($17, "test", [3]), 3);
+
+// table_copy.wast:640
+assert_return(() => call($17, "test", [4]), 4);
+
+// table_copy.wast:641
+assert_return(() => call($17, "test", [5]), 5);
+
+// table_copy.wast:642
+assert_return(() => call($17, "test", [6]), 6);
+
+// table_copy.wast:643
+assert_return(() => call($17, "test", [7]), 7);
+
+// table_copy.wast:644
+assert_trap(() => call($17, "test", [8]));
+
+// table_copy.wast:645
+assert_trap(() => call($17, "test", [9]));
+
+// table_copy.wast:646
+assert_trap(() => call($17, "test", [10]));
+
+// table_copy.wast:647
+assert_trap(() => call($17, "test", [11]));
+
+// table_copy.wast:648
+assert_trap(() => call($17, "test", [12]));
+
+// table_copy.wast:649
+assert_trap(() => call($17, "test", [13]));
+
+// table_copy.wast:650
+assert_trap(() => call($17, "test", [14]));
+
+// table_copy.wast:651
+assert_trap(() => call($17, "test", [15]));
+
+// table_copy.wast:652
+assert_trap(() => call($17, "test", [16]));
+
+// table_copy.wast:653
+assert_trap(() => call($17, "test", [17]));
+
+// table_copy.wast:654
+assert_trap(() => call($17, "test", [18]));
+
+// table_copy.wast:655
+assert_trap(() => call($17, "test", [19]));
+
+// table_copy.wast:656
+assert_trap(() => call($17, "test", [20]));
+
+// table_copy.wast:657
+assert_trap(() => call($17, "test", [21]));
+
+// table_copy.wast:658
+assert_trap(() => call($17, "test", [22]));
+
+// table_copy.wast:659
+assert_trap(() => call($17, "test", [23]));
+
+// table_copy.wast:660
+assert_return(() => call($17, "test", [24]), 0);
+
+// table_copy.wast:661
+assert_return(() => call($17, "test", [25]), 1);
+
+// table_copy.wast:662
+assert_return(() => call($17, "test", [26]), 2);
+
+// table_copy.wast:663
+assert_return(() => call($17, "test", [27]), 3);
+
+// table_copy.wast:664
+assert_return(() => call($17, "test", [28]), 4);
+
+// table_copy.wast:665
+assert_return(() => call($17, "test", [29]), 5);
+
+// table_copy.wast:666
+assert_return(() => call($17, "test", [30]), 6);
+
+// table_copy.wast:667
+assert_return(() => call($17, "test", [31]), 7);
+
+// table_copy.wast:669
+let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8f\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x09\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:695
+assert_trap(() => call($18, "run", [23, 0, 15]));
+
+// table_copy.wast:697
+assert_return(() => call($18, "test", [0]), 0);
+
+// table_copy.wast:698
+assert_return(() => call($18, "test", [1]), 1);
+
+// table_copy.wast:699
+assert_return(() => call($18, "test", [2]), 2);
+
+// table_copy.wast:700
+assert_return(() => call($18, "test", [3]), 3);
+
+// table_copy.wast:701
+assert_return(() => call($18, "test", [4]), 4);
+
+// table_copy.wast:702
+assert_return(() => call($18, "test", [5]), 5);
+
+// table_copy.wast:703
+assert_return(() => call($18, "test", [6]), 6);
+
+// table_copy.wast:704
+assert_return(() => call($18, "test", [7]), 7);
+
+// table_copy.wast:705
+assert_return(() => call($18, "test", [8]), 8);
+
+// table_copy.wast:706
+assert_trap(() => call($18, "test", [9]));
+
+// table_copy.wast:707
+assert_trap(() => call($18, "test", [10]));
+
+// table_copy.wast:708
+assert_trap(() => call($18, "test", [11]));
+
+// table_copy.wast:709
+assert_trap(() => call($18, "test", [12]));
+
+// table_copy.wast:710
+assert_trap(() => call($18, "test", [13]));
+
+// table_copy.wast:711
+assert_trap(() => call($18, "test", [14]));
+
+// table_copy.wast:712
+assert_trap(() => call($18, "test", [15]));
+
+// table_copy.wast:713
+assert_trap(() => call($18, "test", [16]));
+
+// table_copy.wast:714
+assert_trap(() => call($18, "test", [17]));
+
+// table_copy.wast:715
+assert_trap(() => call($18, "test", [18]));
+
+// table_copy.wast:716
+assert_trap(() => call($18, "test", [19]));
+
+// table_copy.wast:717
+assert_trap(() => call($18, "test", [20]));
+
+// table_copy.wast:718
+assert_trap(() => call($18, "test", [21]));
+
+// table_copy.wast:719
+assert_trap(() => call($18, "test", [22]));
+
+// table_copy.wast:720
+assert_return(() => call($18, "test", [23]), 0);
+
+// table_copy.wast:721
+assert_return(() => call($18, "test", [24]), 1);
+
+// table_copy.wast:722
+assert_return(() => call($18, "test", [25]), 2);
+
+// table_copy.wast:723
+assert_return(() => call($18, "test", [26]), 3);
+
+// table_copy.wast:724
+assert_return(() => call($18, "test", [27]), 4);
+
+// table_copy.wast:725
+assert_return(() => call($18, "test", [28]), 5);
+
+// table_copy.wast:726
+assert_return(() => call($18, "test", [29]), 6);
+
+// table_copy.wast:727
+assert_return(() => call($18, "test", [30]), 7);
+
+// table_copy.wast:728
+assert_return(() => call($18, "test", [31]), 8);
+
+// table_copy.wast:730
+let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x18\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:756
+assert_trap(() => call($19, "run", [0, 24, 16]));
+
+// table_copy.wast:758
+assert_return(() => call($19, "test", [0]), 0);
+
+// table_copy.wast:759
+assert_return(() => call($19, "test", [1]), 1);
+
+// table_copy.wast:760
+assert_return(() => call($19, "test", [2]), 2);
+
+// table_copy.wast:761
+assert_return(() => call($19, "test", [3]), 3);
+
+// table_copy.wast:762
+assert_return(() => call($19, "test", [4]), 4);
+
+// table_copy.wast:763
+assert_return(() => call($19, "test", [5]), 5);
+
+// table_copy.wast:764
+assert_return(() => call($19, "test", [6]), 6);
+
+// table_copy.wast:765
+assert_return(() => call($19, "test", [7]), 7);
+
+// table_copy.wast:766
+assert_trap(() => call($19, "test", [8]));
+
+// table_copy.wast:767
+assert_trap(() => call($19, "test", [9]));
+
+// table_copy.wast:768
+assert_trap(() => call($19, "test", [10]));
+
+// table_copy.wast:769
+assert_trap(() => call($19, "test", [11]));
+
+// table_copy.wast:770
+assert_trap(() => call($19, "test", [12]));
+
+// table_copy.wast:771
+assert_trap(() => call($19, "test", [13]));
+
+// table_copy.wast:772
+assert_trap(() => call($19, "test", [14]));
+
+// table_copy.wast:773
+assert_trap(() => call($19, "test", [15]));
+
+// table_copy.wast:774
+assert_trap(() => call($19, "test", [16]));
+
+// table_copy.wast:775
+assert_trap(() => call($19, "test", [17]));
+
+// table_copy.wast:776
+assert_trap(() => call($19, "test", [18]));
+
+// table_copy.wast:777
+assert_trap(() => call($19, "test", [19]));
+
+// table_copy.wast:778
+assert_trap(() => call($19, "test", [20]));
+
+// table_copy.wast:779
+assert_trap(() => call($19, "test", [21]));
+
+// table_copy.wast:780
+assert_trap(() => call($19, "test", [22]));
+
+// table_copy.wast:781
+assert_trap(() => call($19, "test", [23]));
+
+// table_copy.wast:782
+assert_return(() => call($19, "test", [24]), 0);
+
+// table_copy.wast:783
+assert_return(() => call($19, "test", [25]), 1);
+
+// table_copy.wast:784
+assert_return(() => call($19, "test", [26]), 2);
+
+// table_copy.wast:785
+assert_return(() => call($19, "test", [27]), 3);
+
+// table_copy.wast:786
+assert_return(() => call($19, "test", [28]), 4);
+
+// table_copy.wast:787
+assert_return(() => call($19, "test", [29]), 5);
+
+// table_copy.wast:788
+assert_return(() => call($19, "test", [30]), 6);
+
+// table_copy.wast:789
+assert_return(() => call($19, "test", [31]), 7);
+
+// table_copy.wast:791
+let $20 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8f\x80\x80\x80\x00\x01\x00\x41\x17\x0b\x09\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:817
+assert_trap(() => call($20, "run", [0, 23, 15]));
+
+// table_copy.wast:819
+assert_return(() => call($20, "test", [0]), 0);
+
+// table_copy.wast:820
+assert_return(() => call($20, "test", [1]), 1);
+
+// table_copy.wast:821
+assert_return(() => call($20, "test", [2]), 2);
+
+// table_copy.wast:822
+assert_return(() => call($20, "test", [3]), 3);
+
+// table_copy.wast:823
+assert_return(() => call($20, "test", [4]), 4);
+
+// table_copy.wast:824
+assert_return(() => call($20, "test", [5]), 5);
+
+// table_copy.wast:825
+assert_return(() => call($20, "test", [6]), 6);
+
+// table_copy.wast:826
+assert_return(() => call($20, "test", [7]), 7);
+
+// table_copy.wast:827
+assert_return(() => call($20, "test", [8]), 8);
+
+// table_copy.wast:828
+assert_trap(() => call($20, "test", [9]));
+
+// table_copy.wast:829
+assert_trap(() => call($20, "test", [10]));
+
+// table_copy.wast:830
+assert_trap(() => call($20, "test", [11]));
+
+// table_copy.wast:831
+assert_trap(() => call($20, "test", [12]));
+
+// table_copy.wast:832
+assert_trap(() => call($20, "test", [13]));
+
+// table_copy.wast:833
+assert_trap(() => call($20, "test", [14]));
+
+// table_copy.wast:834
+assert_trap(() => call($20, "test", [15]));
+
+// table_copy.wast:835
+assert_trap(() => call($20, "test", [16]));
+
+// table_copy.wast:836
+assert_trap(() => call($20, "test", [17]));
+
+// table_copy.wast:837
+assert_trap(() => call($20, "test", [18]));
+
+// table_copy.wast:838
+assert_trap(() => call($20, "test", [19]));
+
+// table_copy.wast:839
+assert_trap(() => call($20, "test", [20]));
+
+// table_copy.wast:840
+assert_trap(() => call($20, "test", [21]));
+
+// table_copy.wast:841
+assert_trap(() => call($20, "test", [22]));
+
+// table_copy.wast:842
+assert_return(() => call($20, "test", [23]), 0);
+
+// table_copy.wast:843
+assert_return(() => call($20, "test", [24]), 1);
+
+// table_copy.wast:844
+assert_return(() => call($20, "test", [25]), 2);
+
+// table_copy.wast:845
+assert_return(() => call($20, "test", [26]), 3);
+
+// table_copy.wast:846
+assert_return(() => call($20, "test", [27]), 4);
+
+// table_copy.wast:847
+assert_return(() => call($20, "test", [28]), 5);
+
+// table_copy.wast:848
+assert_return(() => call($20, "test", [29]), 6);
+
+// table_copy.wast:849
+assert_return(() => call($20, "test", [30]), 7);
+
+// table_copy.wast:850
+assert_return(() => call($20, "test", [31]), 8);
+
+// table_copy.wast:852
+let $21 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x0b\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:878
+assert_trap(() => call($21, "run", [24, 11, 16]));
+
+// table_copy.wast:880
+assert_trap(() => call($21, "test", [0]));
+
+// table_copy.wast:881
+assert_trap(() => call($21, "test", [1]));
+
+// table_copy.wast:882
+assert_trap(() => call($21, "test", [2]));
+
+// table_copy.wast:883
+assert_trap(() => call($21, "test", [3]));
+
+// table_copy.wast:884
+assert_trap(() => call($21, "test", [4]));
+
+// table_copy.wast:885
+assert_trap(() => call($21, "test", [5]));
+
+// table_copy.wast:886
+assert_trap(() => call($21, "test", [6]));
+
+// table_copy.wast:887
+assert_trap(() => call($21, "test", [7]));
+
+// table_copy.wast:888
+assert_trap(() => call($21, "test", [8]));
+
+// table_copy.wast:889
+assert_trap(() => call($21, "test", [9]));
+
+// table_copy.wast:890
+assert_trap(() => call($21, "test", [10]));
+
+// table_copy.wast:891
+assert_return(() => call($21, "test", [11]), 0);
+
+// table_copy.wast:892
+assert_return(() => call($21, "test", [12]), 1);
+
+// table_copy.wast:893
+assert_return(() => call($21, "test", [13]), 2);
+
+// table_copy.wast:894
+assert_return(() => call($21, "test", [14]), 3);
+
+// table_copy.wast:895
+assert_return(() => call($21, "test", [15]), 4);
+
+// table_copy.wast:896
+assert_return(() => call($21, "test", [16]), 5);
+
+// table_copy.wast:897
+assert_return(() => call($21, "test", [17]), 6);
+
+// table_copy.wast:898
+assert_return(() => call($21, "test", [18]), 7);
+
+// table_copy.wast:899
+assert_trap(() => call($21, "test", [19]));
+
+// table_copy.wast:900
+assert_trap(() => call($21, "test", [20]));
+
+// table_copy.wast:901
+assert_trap(() => call($21, "test", [21]));
+
+// table_copy.wast:902
+assert_trap(() => call($21, "test", [22]));
+
+// table_copy.wast:903
+assert_trap(() => call($21, "test", [23]));
+
+// table_copy.wast:904
+assert_trap(() => call($21, "test", [24]));
+
+// table_copy.wast:905
+assert_trap(() => call($21, "test", [25]));
+
+// table_copy.wast:906
+assert_trap(() => call($21, "test", [26]));
+
+// table_copy.wast:907
+assert_trap(() => call($21, "test", [27]));
+
+// table_copy.wast:908
+assert_trap(() => call($21, "test", [28]));
+
+// table_copy.wast:909
+assert_trap(() => call($21, "test", [29]));
+
+// table_copy.wast:910
+assert_trap(() => call($21, "test", [30]));
+
+// table_copy.wast:911
+assert_trap(() => call($21, "test", [31]));
+
+// table_copy.wast:913
+let $22 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x18\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:939
+assert_trap(() => call($22, "run", [11, 24, 16]));
+
+// table_copy.wast:941
+assert_trap(() => call($22, "test", [0]));
+
+// table_copy.wast:942
+assert_trap(() => call($22, "test", [1]));
+
+// table_copy.wast:943
+assert_trap(() => call($22, "test", [2]));
+
+// table_copy.wast:944
+assert_trap(() => call($22, "test", [3]));
+
+// table_copy.wast:945
+assert_trap(() => call($22, "test", [4]));
+
+// table_copy.wast:946
+assert_trap(() => call($22, "test", [5]));
+
+// table_copy.wast:947
+assert_trap(() => call($22, "test", [6]));
+
+// table_copy.wast:948
+assert_trap(() => call($22, "test", [7]));
+
+// table_copy.wast:949
+assert_trap(() => call($22, "test", [8]));
+
+// table_copy.wast:950
+assert_trap(() => call($22, "test", [9]));
+
+// table_copy.wast:951
+assert_trap(() => call($22, "test", [10]));
+
+// table_copy.wast:952
+assert_return(() => call($22, "test", [11]), 0);
+
+// table_copy.wast:953
+assert_return(() => call($22, "test", [12]), 1);
+
+// table_copy.wast:954
+assert_return(() => call($22, "test", [13]), 2);
+
+// table_copy.wast:955
+assert_return(() => call($22, "test", [14]), 3);
+
+// table_copy.wast:956
+assert_return(() => call($22, "test", [15]), 4);
+
+// table_copy.wast:957
+assert_return(() => call($22, "test", [16]), 5);
+
+// table_copy.wast:958
+assert_return(() => call($22, "test", [17]), 6);
+
+// table_copy.wast:959
+assert_return(() => call($22, "test", [18]), 7);
+
+// table_copy.wast:960
+assert_trap(() => call($22, "test", [19]));
+
+// table_copy.wast:961
+assert_trap(() => call($22, "test", [20]));
+
+// table_copy.wast:962
+assert_trap(() => call($22, "test", [21]));
+
+// table_copy.wast:963
+assert_trap(() => call($22, "test", [22]));
+
+// table_copy.wast:964
+assert_trap(() => call($22, "test", [23]));
+
+// table_copy.wast:965
+assert_return(() => call($22, "test", [24]), 0);
+
+// table_copy.wast:966
+assert_return(() => call($22, "test", [25]), 1);
+
+// table_copy.wast:967
+assert_return(() => call($22, "test", [26]), 2);
+
+// table_copy.wast:968
+assert_return(() => call($22, "test", [27]), 3);
+
+// table_copy.wast:969
+assert_return(() => call($22, "test", [28]), 4);
+
+// table_copy.wast:970
+assert_return(() => call($22, "test", [29]), 5);
+
+// table_copy.wast:971
+assert_return(() => call($22, "test", [30]), 6);
+
+// table_copy.wast:972
+assert_return(() => call($22, "test", [31]), 7);
+
+// table_copy.wast:974
+let $23 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x15\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:1000
+assert_trap(() => call($23, "run", [24, 21, 16]));
+
+// table_copy.wast:1002
+assert_trap(() => call($23, "test", [0]));
+
+// table_copy.wast:1003
+assert_trap(() => call($23, "test", [1]));
+
+// table_copy.wast:1004
+assert_trap(() => call($23, "test", [2]));
+
+// table_copy.wast:1005
+assert_trap(() => call($23, "test", [3]));
+
+// table_copy.wast:1006
+assert_trap(() => call($23, "test", [4]));
+
+// table_copy.wast:1007
+assert_trap(() => call($23, "test", [5]));
+
+// table_copy.wast:1008
+assert_trap(() => call($23, "test", [6]));
+
+// table_copy.wast:1009
+assert_trap(() => call($23, "test", [7]));
+
+// table_copy.wast:1010
+assert_trap(() => call($23, "test", [8]));
+
+// table_copy.wast:1011
+assert_trap(() => call($23, "test", [9]));
+
+// table_copy.wast:1012
+assert_trap(() => call($23, "test", [10]));
+
+// table_copy.wast:1013
+assert_trap(() => call($23, "test", [11]));
+
+// table_copy.wast:1014
+assert_trap(() => call($23, "test", [12]));
+
+// table_copy.wast:1015
+assert_trap(() => call($23, "test", [13]));
+
+// table_copy.wast:1016
+assert_trap(() => call($23, "test", [14]));
+
+// table_copy.wast:1017
+assert_trap(() => call($23, "test", [15]));
+
+// table_copy.wast:1018
+assert_trap(() => call($23, "test", [16]));
+
+// table_copy.wast:1019
+assert_trap(() => call($23, "test", [17]));
+
+// table_copy.wast:1020
+assert_trap(() => call($23, "test", [18]));
+
+// table_copy.wast:1021
+assert_trap(() => call($23, "test", [19]));
+
+// table_copy.wast:1022
+assert_trap(() => call($23, "test", [20]));
+
+// table_copy.wast:1023
+assert_return(() => call($23, "test", [21]), 0);
+
+// table_copy.wast:1024
+assert_return(() => call($23, "test", [22]), 1);
+
+// table_copy.wast:1025
+assert_return(() => call($23, "test", [23]), 2);
+
+// table_copy.wast:1026
+assert_return(() => call($23, "test", [24]), 3);
+
+// table_copy.wast:1027
+assert_return(() => call($23, "test", [25]), 4);
+
+// table_copy.wast:1028
+assert_return(() => call($23, "test", [26]), 5);
+
+// table_copy.wast:1029
+assert_return(() => call($23, "test", [27]), 6);
+
+// table_copy.wast:1030
+assert_return(() => call($23, "test", [28]), 7);
+
+// table_copy.wast:1031
+assert_trap(() => call($23, "test", [29]));
+
+// table_copy.wast:1032
+assert_trap(() => call($23, "test", [30]));
+
+// table_copy.wast:1033
+assert_trap(() => call($23, "test", [31]));
+
+// table_copy.wast:1035
+let $24 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x8e\x80\x80\x80\x00\x01\x00\x41\x18\x0b\x08\x00\x01\x02\x03\x04\x05\x06\x07\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:1061
+assert_trap(() => call($24, "run", [21, 24, 16]));
+
+// table_copy.wast:1063
+assert_trap(() => call($24, "test", [0]));
+
+// table_copy.wast:1064
+assert_trap(() => call($24, "test", [1]));
+
+// table_copy.wast:1065
+assert_trap(() => call($24, "test", [2]));
+
+// table_copy.wast:1066
+assert_trap(() => call($24, "test", [3]));
+
+// table_copy.wast:1067
+assert_trap(() => call($24, "test", [4]));
+
+// table_copy.wast:1068
+assert_trap(() => call($24, "test", [5]));
+
+// table_copy.wast:1069
+assert_trap(() => call($24, "test", [6]));
+
+// table_copy.wast:1070
+assert_trap(() => call($24, "test", [7]));
+
+// table_copy.wast:1071
+assert_trap(() => call($24, "test", [8]));
+
+// table_copy.wast:1072
+assert_trap(() => call($24, "test", [9]));
+
+// table_copy.wast:1073
+assert_trap(() => call($24, "test", [10]));
+
+// table_copy.wast:1074
+assert_trap(() => call($24, "test", [11]));
+
+// table_copy.wast:1075
+assert_trap(() => call($24, "test", [12]));
+
+// table_copy.wast:1076
+assert_trap(() => call($24, "test", [13]));
+
+// table_copy.wast:1077
+assert_trap(() => call($24, "test", [14]));
+
+// table_copy.wast:1078
+assert_trap(() => call($24, "test", [15]));
+
+// table_copy.wast:1079
+assert_trap(() => call($24, "test", [16]));
+
+// table_copy.wast:1080
+assert_trap(() => call($24, "test", [17]));
+
+// table_copy.wast:1081
+assert_trap(() => call($24, "test", [18]));
+
+// table_copy.wast:1082
+assert_trap(() => call($24, "test", [19]));
+
+// table_copy.wast:1083
+assert_trap(() => call($24, "test", [20]));
+
+// table_copy.wast:1084
+assert_return(() => call($24, "test", [21]), 0);
+
+// table_copy.wast:1085
+assert_return(() => call($24, "test", [22]), 1);
+
+// table_copy.wast:1086
+assert_return(() => call($24, "test", [23]), 2);
+
+// table_copy.wast:1087
+assert_return(() => call($24, "test", [24]), 3);
+
+// table_copy.wast:1088
+assert_return(() => call($24, "test", [25]), 4);
+
+// table_copy.wast:1089
+assert_return(() => call($24, "test", [26]), 5);
+
+// table_copy.wast:1090
+assert_return(() => call($24, "test", [27]), 6);
+
+// table_copy.wast:1091
+assert_return(() => call($24, "test", [28]), 7);
+
+// table_copy.wast:1092
+assert_return(() => call($24, "test", [29]), 5);
+
+// table_copy.wast:1093
+assert_return(() => call($24, "test", [30]), 6);
+
+// table_copy.wast:1094
+assert_return(() => call($24, "test", [31]), 7);
+
+// table_copy.wast:1096
+let $25 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x91\x80\x80\x80\x00\x01\x00\x41\x15\x0b\x0b\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:1122
+assert_trap(() => call($25, "run", [21, 21, 16]));
+
+// table_copy.wast:1124
+assert_trap(() => call($25, "test", [0]));
+
+// table_copy.wast:1125
+assert_trap(() => call($25, "test", [1]));
+
+// table_copy.wast:1126
+assert_trap(() => call($25, "test", [2]));
+
+// table_copy.wast:1127
+assert_trap(() => call($25, "test", [3]));
+
+// table_copy.wast:1128
+assert_trap(() => call($25, "test", [4]));
+
+// table_copy.wast:1129
+assert_trap(() => call($25, "test", [5]));
+
+// table_copy.wast:1130
+assert_trap(() => call($25, "test", [6]));
+
+// table_copy.wast:1131
+assert_trap(() => call($25, "test", [7]));
+
+// table_copy.wast:1132
+assert_trap(() => call($25, "test", [8]));
+
+// table_copy.wast:1133
+assert_trap(() => call($25, "test", [9]));
+
+// table_copy.wast:1134
+assert_trap(() => call($25, "test", [10]));
+
+// table_copy.wast:1135
+assert_trap(() => call($25, "test", [11]));
+
+// table_copy.wast:1136
+assert_trap(() => call($25, "test", [12]));
+
+// table_copy.wast:1137
+assert_trap(() => call($25, "test", [13]));
+
+// table_copy.wast:1138
+assert_trap(() => call($25, "test", [14]));
+
+// table_copy.wast:1139
+assert_trap(() => call($25, "test", [15]));
+
+// table_copy.wast:1140
+assert_trap(() => call($25, "test", [16]));
+
+// table_copy.wast:1141
+assert_trap(() => call($25, "test", [17]));
+
+// table_copy.wast:1142
+assert_trap(() => call($25, "test", [18]));
+
+// table_copy.wast:1143
+assert_trap(() => call($25, "test", [19]));
+
+// table_copy.wast:1144
+assert_trap(() => call($25, "test", [20]));
+
+// table_copy.wast:1145
+assert_return(() => call($25, "test", [21]), 0);
+
+// table_copy.wast:1146
+assert_return(() => call($25, "test", [22]), 1);
+
+// table_copy.wast:1147
+assert_return(() => call($25, "test", [23]), 2);
+
+// table_copy.wast:1148
+assert_return(() => call($25, "test", [24]), 3);
+
+// table_copy.wast:1149
+assert_return(() => call($25, "test", [25]), 4);
+
+// table_copy.wast:1150
+assert_return(() => call($25, "test", [26]), 5);
+
+// table_copy.wast:1151
+assert_return(() => call($25, "test", [27]), 6);
+
+// table_copy.wast:1152
+assert_return(() => call($25, "test", [28]), 7);
+
+// table_copy.wast:1153
+assert_return(() => call($25, "test", [29]), 8);
+
+// table_copy.wast:1154
+assert_return(() => call($25, "test", [30]), 9);
+
+// table_copy.wast:1155
+assert_return(() => call($25, "test", [31]), 10);
+
+// table_copy.wast:1157
+let $26 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x87\x80\x80\x80\x00\x01\x70\x01\x80\x01\x80\x01\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x97\x80\x80\x80\x00\x01\x00\x41\xf0\x00\x0b\x10\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:1183
+assert_trap(() => call($26, "run", [0, 112, -32]));
+
+// table_copy.wast:1185
+assert_return(() => call($26, "test", [0]), 0);
+
+// table_copy.wast:1186
+assert_return(() => call($26, "test", [1]), 1);
+
+// table_copy.wast:1187
+assert_return(() => call($26, "test", [2]), 2);
+
+// table_copy.wast:1188
+assert_return(() => call($26, "test", [3]), 3);
+
+// table_copy.wast:1189
+assert_return(() => call($26, "test", [4]), 4);
+
+// table_copy.wast:1190
+assert_return(() => call($26, "test", [5]), 5);
+
+// table_copy.wast:1191
+assert_return(() => call($26, "test", [6]), 6);
+
+// table_copy.wast:1192
+assert_return(() => call($26, "test", [7]), 7);
+
+// table_copy.wast:1193
+assert_return(() => call($26, "test", [8]), 8);
+
+// table_copy.wast:1194
+assert_return(() => call($26, "test", [9]), 9);
+
+// table_copy.wast:1195
+assert_return(() => call($26, "test", [10]), 10);
+
+// table_copy.wast:1196
+assert_return(() => call($26, "test", [11]), 11);
+
+// table_copy.wast:1197
+assert_return(() => call($26, "test", [12]), 12);
+
+// table_copy.wast:1198
+assert_return(() => call($26, "test", [13]), 13);
+
+// table_copy.wast:1199
+assert_return(() => call($26, "test", [14]), 14);
+
+// table_copy.wast:1200
+assert_return(() => call($26, "test", [15]), 15);
+
+// table_copy.wast:1201
+assert_trap(() => call($26, "test", [16]));
+
+// table_copy.wast:1202
+assert_trap(() => call($26, "test", [17]));
+
+// table_copy.wast:1203
+assert_trap(() => call($26, "test", [18]));
+
+// table_copy.wast:1204
+assert_trap(() => call($26, "test", [19]));
+
+// table_copy.wast:1205
+assert_trap(() => call($26, "test", [20]));
+
+// table_copy.wast:1206
+assert_trap(() => call($26, "test", [21]));
+
+// table_copy.wast:1207
+assert_trap(() => call($26, "test", [22]));
+
+// table_copy.wast:1208
+assert_trap(() => call($26, "test", [23]));
+
+// table_copy.wast:1209
+assert_trap(() => call($26, "test", [24]));
+
+// table_copy.wast:1210
+assert_trap(() => call($26, "test", [25]));
+
+// table_copy.wast:1211
+assert_trap(() => call($26, "test", [26]));
+
+// table_copy.wast:1212
+assert_trap(() => call($26, "test", [27]));
+
+// table_copy.wast:1213
+assert_trap(() => call($26, "test", [28]));
+
+// table_copy.wast:1214
+assert_trap(() => call($26, "test", [29]));
+
+// table_copy.wast:1215
+assert_trap(() => call($26, "test", [30]));
+
+// table_copy.wast:1216
+assert_trap(() => call($26, "test", [31]));
+
+// table_copy.wast:1217
+assert_trap(() => call($26, "test", [32]));
+
+// table_copy.wast:1218
+assert_trap(() => call($26, "test", [33]));
+
+// table_copy.wast:1219
+assert_trap(() => call($26, "test", [34]));
+
+// table_copy.wast:1220
+assert_trap(() => call($26, "test", [35]));
+
+// table_copy.wast:1221
+assert_trap(() => call($26, "test", [36]));
+
+// table_copy.wast:1222
+assert_trap(() => call($26, "test", [37]));
+
+// table_copy.wast:1223
+assert_trap(() => call($26, "test", [38]));
+
+// table_copy.wast:1224
+assert_trap(() => call($26, "test", [39]));
+
+// table_copy.wast:1225
+assert_trap(() => call($26, "test", [40]));
+
+// table_copy.wast:1226
+assert_trap(() => call($26, "test", [41]));
+
+// table_copy.wast:1227
+assert_trap(() => call($26, "test", [42]));
+
+// table_copy.wast:1228
+assert_trap(() => call($26, "test", [43]));
+
+// table_copy.wast:1229
+assert_trap(() => call($26, "test", [44]));
+
+// table_copy.wast:1230
+assert_trap(() => call($26, "test", [45]));
+
+// table_copy.wast:1231
+assert_trap(() => call($26, "test", [46]));
+
+// table_copy.wast:1232
+assert_trap(() => call($26, "test", [47]));
+
+// table_copy.wast:1233
+assert_trap(() => call($26, "test", [48]));
+
+// table_copy.wast:1234
+assert_trap(() => call($26, "test", [49]));
+
+// table_copy.wast:1235
+assert_trap(() => call($26, "test", [50]));
+
+// table_copy.wast:1236
+assert_trap(() => call($26, "test", [51]));
+
+// table_copy.wast:1237
+assert_trap(() => call($26, "test", [52]));
+
+// table_copy.wast:1238
+assert_trap(() => call($26, "test", [53]));
+
+// table_copy.wast:1239
+assert_trap(() => call($26, "test", [54]));
+
+// table_copy.wast:1240
+assert_trap(() => call($26, "test", [55]));
+
+// table_copy.wast:1241
+assert_trap(() => call($26, "test", [56]));
+
+// table_copy.wast:1242
+assert_trap(() => call($26, "test", [57]));
+
+// table_copy.wast:1243
+assert_trap(() => call($26, "test", [58]));
+
+// table_copy.wast:1244
+assert_trap(() => call($26, "test", [59]));
+
+// table_copy.wast:1245
+assert_trap(() => call($26, "test", [60]));
+
+// table_copy.wast:1246
+assert_trap(() => call($26, "test", [61]));
+
+// table_copy.wast:1247
+assert_trap(() => call($26, "test", [62]));
+
+// table_copy.wast:1248
+assert_trap(() => call($26, "test", [63]));
+
+// table_copy.wast:1249
+assert_trap(() => call($26, "test", [64]));
+
+// table_copy.wast:1250
+assert_trap(() => call($26, "test", [65]));
+
+// table_copy.wast:1251
+assert_trap(() => call($26, "test", [66]));
+
+// table_copy.wast:1252
+assert_trap(() => call($26, "test", [67]));
+
+// table_copy.wast:1253
+assert_trap(() => call($26, "test", [68]));
+
+// table_copy.wast:1254
+assert_trap(() => call($26, "test", [69]));
+
+// table_copy.wast:1255
+assert_trap(() => call($26, "test", [70]));
+
+// table_copy.wast:1256
+assert_trap(() => call($26, "test", [71]));
+
+// table_copy.wast:1257
+assert_trap(() => call($26, "test", [72]));
+
+// table_copy.wast:1258
+assert_trap(() => call($26, "test", [73]));
+
+// table_copy.wast:1259
+assert_trap(() => call($26, "test", [74]));
+
+// table_copy.wast:1260
+assert_trap(() => call($26, "test", [75]));
+
+// table_copy.wast:1261
+assert_trap(() => call($26, "test", [76]));
+
+// table_copy.wast:1262
+assert_trap(() => call($26, "test", [77]));
+
+// table_copy.wast:1263
+assert_trap(() => call($26, "test", [78]));
+
+// table_copy.wast:1264
+assert_trap(() => call($26, "test", [79]));
+
+// table_copy.wast:1265
+assert_trap(() => call($26, "test", [80]));
+
+// table_copy.wast:1266
+assert_trap(() => call($26, "test", [81]));
+
+// table_copy.wast:1267
+assert_trap(() => call($26, "test", [82]));
+
+// table_copy.wast:1268
+assert_trap(() => call($26, "test", [83]));
+
+// table_copy.wast:1269
+assert_trap(() => call($26, "test", [84]));
+
+// table_copy.wast:1270
+assert_trap(() => call($26, "test", [85]));
+
+// table_copy.wast:1271
+assert_trap(() => call($26, "test", [86]));
+
+// table_copy.wast:1272
+assert_trap(() => call($26, "test", [87]));
+
+// table_copy.wast:1273
+assert_trap(() => call($26, "test", [88]));
+
+// table_copy.wast:1274
+assert_trap(() => call($26, "test", [89]));
+
+// table_copy.wast:1275
+assert_trap(() => call($26, "test", [90]));
+
+// table_copy.wast:1276
+assert_trap(() => call($26, "test", [91]));
+
+// table_copy.wast:1277
+assert_trap(() => call($26, "test", [92]));
+
+// table_copy.wast:1278
+assert_trap(() => call($26, "test", [93]));
+
+// table_copy.wast:1279
+assert_trap(() => call($26, "test", [94]));
+
+// table_copy.wast:1280
+assert_trap(() => call($26, "test", [95]));
+
+// table_copy.wast:1281
+assert_trap(() => call($26, "test", [96]));
+
+// table_copy.wast:1282
+assert_trap(() => call($26, "test", [97]));
+
+// table_copy.wast:1283
+assert_trap(() => call($26, "test", [98]));
+
+// table_copy.wast:1284
+assert_trap(() => call($26, "test", [99]));
+
+// table_copy.wast:1285
+assert_trap(() => call($26, "test", [100]));
+
+// table_copy.wast:1286
+assert_trap(() => call($26, "test", [101]));
+
+// table_copy.wast:1287
+assert_trap(() => call($26, "test", [102]));
+
+// table_copy.wast:1288
+assert_trap(() => call($26, "test", [103]));
+
+// table_copy.wast:1289
+assert_trap(() => call($26, "test", [104]));
+
+// table_copy.wast:1290
+assert_trap(() => call($26, "test", [105]));
+
+// table_copy.wast:1291
+assert_trap(() => call($26, "test", [106]));
+
+// table_copy.wast:1292
+assert_trap(() => call($26, "test", [107]));
+
+// table_copy.wast:1293
+assert_trap(() => call($26, "test", [108]));
+
+// table_copy.wast:1294
+assert_trap(() => call($26, "test", [109]));
+
+// table_copy.wast:1295
+assert_trap(() => call($26, "test", [110]));
+
+// table_copy.wast:1296
+assert_trap(() => call($26, "test", [111]));
+
+// table_copy.wast:1297
+assert_return(() => call($26, "test", [112]), 0);
+
+// table_copy.wast:1298
+assert_return(() => call($26, "test", [113]), 1);
+
+// table_copy.wast:1299
+assert_return(() => call($26, "test", [114]), 2);
+
+// table_copy.wast:1300
+assert_return(() => call($26, "test", [115]), 3);
+
+// table_copy.wast:1301
+assert_return(() => call($26, "test", [116]), 4);
+
+// table_copy.wast:1302
+assert_return(() => call($26, "test", [117]), 5);
+
+// table_copy.wast:1303
+assert_return(() => call($26, "test", [118]), 6);
+
+// table_copy.wast:1304
+assert_return(() => call($26, "test", [119]), 7);
+
+// table_copy.wast:1305
+assert_return(() => call($26, "test", [120]), 8);
+
+// table_copy.wast:1306
+assert_return(() => call($26, "test", [121]), 9);
+
+// table_copy.wast:1307
+assert_return(() => call($26, "test", [122]), 10);
+
+// table_copy.wast:1308
+assert_return(() => call($26, "test", [123]), 11);
+
+// table_copy.wast:1309
+assert_return(() => call($26, "test", [124]), 12);
+
+// table_copy.wast:1310
+assert_return(() => call($26, "test", [125]), 13);
+
+// table_copy.wast:1311
+assert_return(() => call($26, "test", [126]), 14);
+
+// table_copy.wast:1312
+assert_return(() => call($26, "test", [127]), 15);
+
+// table_copy.wast:1314
+let $27 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x90\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x03\x7f\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x87\x80\x80\x80\x00\x01\x70\x01\x80\x01\x80\x01\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\x96\x80\x80\x80\x00\x01\x00\x41\x00\x0b\x10\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x20\x01\x20\x02\xfc\x0e\x00\x00\x0b");
+
+// table_copy.wast:1340
+assert_trap(() => call($27, "run", [112, 0, -32]));
+
+// table_copy.wast:1342
+assert_return(() => call($27, "test", [0]), 0);
+
+// table_copy.wast:1343
+assert_return(() => call($27, "test", [1]), 1);
+
+// table_copy.wast:1344
+assert_return(() => call($27, "test", [2]), 2);
+
+// table_copy.wast:1345
+assert_return(() => call($27, "test", [3]), 3);
+
+// table_copy.wast:1346
+assert_return(() => call($27, "test", [4]), 4);
+
+// table_copy.wast:1347
+assert_return(() => call($27, "test", [5]), 5);
+
+// table_copy.wast:1348
+assert_return(() => call($27, "test", [6]), 6);
+
+// table_copy.wast:1349
+assert_return(() => call($27, "test", [7]), 7);
+
+// table_copy.wast:1350
+assert_return(() => call($27, "test", [8]), 8);
+
+// table_copy.wast:1351
+assert_return(() => call($27, "test", [9]), 9);
+
+// table_copy.wast:1352
+assert_return(() => call($27, "test", [10]), 10);
+
+// table_copy.wast:1353
+assert_return(() => call($27, "test", [11]), 11);
+
+// table_copy.wast:1354
+assert_return(() => call($27, "test", [12]), 12);
+
+// table_copy.wast:1355
+assert_return(() => call($27, "test", [13]), 13);
+
+// table_copy.wast:1356
+assert_return(() => call($27, "test", [14]), 14);
+
+// table_copy.wast:1357
+assert_return(() => call($27, "test", [15]), 15);
+
+// table_copy.wast:1358
+assert_trap(() => call($27, "test", [16]));
+
+// table_copy.wast:1359
+assert_trap(() => call($27, "test", [17]));
+
+// table_copy.wast:1360
+assert_trap(() => call($27, "test", [18]));
+
+// table_copy.wast:1361
+assert_trap(() => call($27, "test", [19]));
+
+// table_copy.wast:1362
+assert_trap(() => call($27, "test", [20]));
+
+// table_copy.wast:1363
+assert_trap(() => call($27, "test", [21]));
+
+// table_copy.wast:1364
+assert_trap(() => call($27, "test", [22]));
+
+// table_copy.wast:1365
+assert_trap(() => call($27, "test", [23]));
+
+// table_copy.wast:1366
+assert_trap(() => call($27, "test", [24]));
+
+// table_copy.wast:1367
+assert_trap(() => call($27, "test", [25]));
+
+// table_copy.wast:1368
+assert_trap(() => call($27, "test", [26]));
+
+// table_copy.wast:1369
+assert_trap(() => call($27, "test", [27]));
+
+// table_copy.wast:1370
+assert_trap(() => call($27, "test", [28]));
+
+// table_copy.wast:1371
+assert_trap(() => call($27, "test", [29]));
+
+// table_copy.wast:1372
+assert_trap(() => call($27, "test", [30]));
+
+// table_copy.wast:1373
+assert_trap(() => call($27, "test", [31]));
+
+// table_copy.wast:1374
+assert_trap(() => call($27, "test", [32]));
+
+// table_copy.wast:1375
+assert_trap(() => call($27, "test", [33]));
+
+// table_copy.wast:1376
+assert_trap(() => call($27, "test", [34]));
+
+// table_copy.wast:1377
+assert_trap(() => call($27, "test", [35]));
+
+// table_copy.wast:1378
+assert_trap(() => call($27, "test", [36]));
+
+// table_copy.wast:1379
+assert_trap(() => call($27, "test", [37]));
+
+// table_copy.wast:1380
+assert_trap(() => call($27, "test", [38]));
+
+// table_copy.wast:1381
+assert_trap(() => call($27, "test", [39]));
+
+// table_copy.wast:1382
+assert_trap(() => call($27, "test", [40]));
+
+// table_copy.wast:1383
+assert_trap(() => call($27, "test", [41]));
+
+// table_copy.wast:1384
+assert_trap(() => call($27, "test", [42]));
+
+// table_copy.wast:1385
+assert_trap(() => call($27, "test", [43]));
+
+// table_copy.wast:1386
+assert_trap(() => call($27, "test", [44]));
+
+// table_copy.wast:1387
+assert_trap(() => call($27, "test", [45]));
+
+// table_copy.wast:1388
+assert_trap(() => call($27, "test", [46]));
+
+// table_copy.wast:1389
+assert_trap(() => call($27, "test", [47]));
+
+// table_copy.wast:1390
+assert_trap(() => call($27, "test", [48]));
+
+// table_copy.wast:1391
+assert_trap(() => call($27, "test", [49]));
+
+// table_copy.wast:1392
+assert_trap(() => call($27, "test", [50]));
+
+// table_copy.wast:1393
+assert_trap(() => call($27, "test", [51]));
+
+// table_copy.wast:1394
+assert_trap(() => call($27, "test", [52]));
+
+// table_copy.wast:1395
+assert_trap(() => call($27, "test", [53]));
+
+// table_copy.wast:1396
+assert_trap(() => call($27, "test", [54]));
+
+// table_copy.wast:1397
+assert_trap(() => call($27, "test", [55]));
+
+// table_copy.wast:1398
+assert_trap(() => call($27, "test", [56]));
+
+// table_copy.wast:1399
+assert_trap(() => call($27, "test", [57]));
+
+// table_copy.wast:1400
+assert_trap(() => call($27, "test", [58]));
+
+// table_copy.wast:1401
+assert_trap(() => call($27, "test", [59]));
+
+// table_copy.wast:1402
+assert_trap(() => call($27, "test", [60]));
+
+// table_copy.wast:1403
+assert_trap(() => call($27, "test", [61]));
+
+// table_copy.wast:1404
+assert_trap(() => call($27, "test", [62]));
+
+// table_copy.wast:1405
+assert_trap(() => call($27, "test", [63]));
+
+// table_copy.wast:1406
+assert_trap(() => call($27, "test", [64]));
+
+// table_copy.wast:1407
+assert_trap(() => call($27, "test", [65]));
+
+// table_copy.wast:1408
+assert_trap(() => call($27, "test", [66]));
+
+// table_copy.wast:1409
+assert_trap(() => call($27, "test", [67]));
+
+// table_copy.wast:1410
+assert_trap(() => call($27, "test", [68]));
+
+// table_copy.wast:1411
+assert_trap(() => call($27, "test", [69]));
+
+// table_copy.wast:1412
+assert_trap(() => call($27, "test", [70]));
+
+// table_copy.wast:1413
+assert_trap(() => call($27, "test", [71]));
+
+// table_copy.wast:1414
+assert_trap(() => call($27, "test", [72]));
+
+// table_copy.wast:1415
+assert_trap(() => call($27, "test", [73]));
+
+// table_copy.wast:1416
+assert_trap(() => call($27, "test", [74]));
+
+// table_copy.wast:1417
+assert_trap(() => call($27, "test", [75]));
+
+// table_copy.wast:1418
+assert_trap(() => call($27, "test", [76]));
+
+// table_copy.wast:1419
+assert_trap(() => call($27, "test", [77]));
+
+// table_copy.wast:1420
+assert_trap(() => call($27, "test", [78]));
+
+// table_copy.wast:1421
+assert_trap(() => call($27, "test", [79]));
+
+// table_copy.wast:1422
+assert_trap(() => call($27, "test", [80]));
+
+// table_copy.wast:1423
+assert_trap(() => call($27, "test", [81]));
+
+// table_copy.wast:1424
+assert_trap(() => call($27, "test", [82]));
+
+// table_copy.wast:1425
+assert_trap(() => call($27, "test", [83]));
+
+// table_copy.wast:1426
+assert_trap(() => call($27, "test", [84]));
+
+// table_copy.wast:1427
+assert_trap(() => call($27, "test", [85]));
+
+// table_copy.wast:1428
+assert_trap(() => call($27, "test", [86]));
+
+// table_copy.wast:1429
+assert_trap(() => call($27, "test", [87]));
+
+// table_copy.wast:1430
+assert_trap(() => call($27, "test", [88]));
+
+// table_copy.wast:1431
+assert_trap(() => call($27, "test", [89]));
+
+// table_copy.wast:1432
+assert_trap(() => call($27, "test", [90]));
+
+// table_copy.wast:1433
+assert_trap(() => call($27, "test", [91]));
+
+// table_copy.wast:1434
+assert_trap(() => call($27, "test", [92]));
+
+// table_copy.wast:1435
+assert_trap(() => call($27, "test", [93]));
+
+// table_copy.wast:1436
+assert_trap(() => call($27, "test", [94]));
+
+// table_copy.wast:1437
+assert_trap(() => call($27, "test", [95]));
+
+// table_copy.wast:1438
+assert_trap(() => call($27, "test", [96]));
+
+// table_copy.wast:1439
+assert_trap(() => call($27, "test", [97]));
+
+// table_copy.wast:1440
+assert_trap(() => call($27, "test", [98]));
+
+// table_copy.wast:1441
+assert_trap(() => call($27, "test", [99]));
+
+// table_copy.wast:1442
+assert_trap(() => call($27, "test", [100]));
+
+// table_copy.wast:1443
+assert_trap(() => call($27, "test", [101]));
+
+// table_copy.wast:1444
+assert_trap(() => call($27, "test", [102]));
+
+// table_copy.wast:1445
+assert_trap(() => call($27, "test", [103]));
+
+// table_copy.wast:1446
+assert_trap(() => call($27, "test", [104]));
+
+// table_copy.wast:1447
+assert_trap(() => call($27, "test", [105]));
+
+// table_copy.wast:1448
+assert_trap(() => call($27, "test", [106]));
+
+// table_copy.wast:1449
+assert_trap(() => call($27, "test", [107]));
+
+// table_copy.wast:1450
+assert_trap(() => call($27, "test", [108]));
+
+// table_copy.wast:1451
+assert_trap(() => call($27, "test", [109]));
+
+// table_copy.wast:1452
+assert_trap(() => call($27, "test", [110]));
+
+// table_copy.wast:1453
+assert_trap(() => call($27, "test", [111]));
+
+// table_copy.wast:1454
+assert_trap(() => call($27, "test", [112]));
+
+// table_copy.wast:1455
+assert_trap(() => call($27, "test", [113]));
+
+// table_copy.wast:1456
+assert_trap(() => call($27, "test", [114]));
+
+// table_copy.wast:1457
+assert_trap(() => call($27, "test", [115]));
+
+// table_copy.wast:1458
+assert_trap(() => call($27, "test", [116]));
+
+// table_copy.wast:1459
+assert_trap(() => call($27, "test", [117]));
+
+// table_copy.wast:1460
+assert_trap(() => call($27, "test", [118]));
+
+// table_copy.wast:1461
+assert_trap(() => call($27, "test", [119]));
+
+// table_copy.wast:1462
+assert_trap(() => call($27, "test", [120]));
+
+// table_copy.wast:1463
+assert_trap(() => call($27, "test", [121]));
+
+// table_copy.wast:1464
+assert_trap(() => call($27, "test", [122]));
+
+// table_copy.wast:1465
+assert_trap(() => call($27, "test", [123]));
+
+// table_copy.wast:1466
+assert_trap(() => call($27, "test", [124]));
+
+// table_copy.wast:1467
+assert_trap(() => call($27, "test", [125]));
+
+// table_copy.wast:1468
+assert_trap(() => call($27, "test", [126]));
+
+// table_copy.wast:1469
+assert_trap(() => call($27, "test", [127]));
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast
new file mode 100644
index 0000000000..11012a317a
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast
@@ -0,0 +1,1602 @@
+;;
+;; Generated by ../meta/generate_table_init.js
+;;
+
+(module
+ (func (export "ef0") (result i32) (i32.const 0))
+ (func (export "ef1") (result i32) (i32.const 1))
+ (func (export "ef2") (result i32) (i32.const 2))
+ (func (export "ef3") (result i32) (i32.const 3))
+ (func (export "ef4") (result i32) (i32.const 4))
+)
+(register "a")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.init 1 (i32.const 7) (i32.const 0) (i32.const 4)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 7)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 8)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 9)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 10)) (i32.const 8))
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 15)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 16)) (i32.const 6))
+(assert_trap (invoke "check" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.init 3 (i32.const 15) (i32.const 1) (i32.const 3)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 13)) (i32.const 5))
+(assert_return (invoke "check" (i32.const 14)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 15)) (i32.const 9))
+(assert_return (invoke "check" (i32.const 16)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 17)) (i32.const 7))
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+
+(module
+ (type (func (result i32))) ;; type #0
+ (import "a" "ef0" (func (result i32))) ;; index 0
+ (import "a" "ef1" (func (result i32)))
+ (import "a" "ef2" (func (result i32)))
+ (import "a" "ef3" (func (result i32)))
+ (import "a" "ef4" (func (result i32))) ;; index 4
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 5)) ;; index 5
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9)) ;; index 9
+ (func (export "test")
+ (table.init 1 (i32.const 7) (i32.const 0) (i32.const 4))
+ (elem.drop 1)
+ (table.init 3 (i32.const 15) (i32.const 1) (i32.const 3))
+ (elem.drop 3)
+ (table.copy (i32.const 20) (i32.const 15) (i32.const 5))
+ (table.copy (i32.const 21) (i32.const 29) (i32.const 1))
+ (table.copy (i32.const 24) (i32.const 10) (i32.const 1))
+ (table.copy (i32.const 13) (i32.const 11) (i32.const 4))
+ (table.copy (i32.const 19) (i32.const 20) (i32.const 5)))
+ (func (export "check") (param i32) (result i32)
+ (call_indirect (type 0) (local.get 0)))
+)
+
+(invoke "test")
+(assert_trap (invoke "check" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 1)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 2)) (i32.const 3))
+(assert_return (invoke "check" (i32.const 3)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 4)) (i32.const 4))
+(assert_return (invoke "check" (i32.const 5)) (i32.const 1))
+(assert_trap (invoke "check" (i32.const 6)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 7)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 8)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 9)) (i32.const 1))
+(assert_return (invoke "check" (i32.const 10)) (i32.const 8))
+(assert_trap (invoke "check" (i32.const 11)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 12)) (i32.const 7))
+(assert_trap (invoke "check" (i32.const 13)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 14)) (i32.const 7))
+(assert_return (invoke "check" (i32.const 15)) (i32.const 5))
+(assert_return (invoke "check" (i32.const 16)) (i32.const 2))
+(assert_return (invoke "check" (i32.const 17)) (i32.const 7))
+(assert_trap (invoke "check" (i32.const 18)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 19)) (i32.const 9))
+(assert_trap (invoke "check" (i32.const 20)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 21)) (i32.const 7))
+(assert_trap (invoke "check" (i32.const 22)) "uninitialized element")
+(assert_return (invoke "check" (i32.const 23)) (i32.const 8))
+(assert_return (invoke "check" (i32.const 24)) (i32.const 8))
+(assert_trap (invoke "check" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "check" (i32.const 29)) "uninitialized element")
+(assert_invalid
+ (module
+ (func (export "test")
+ (elem.drop 0)))
+ "unknown table 0")
+
+(assert_invalid
+ (module
+ (func (export "test")
+ (table.init 0 (i32.const 12) (i32.const 1) (i32.const 1))))
+ "unknown table 0")
+
+(assert_invalid
+ (module
+ (elem passive funcref 0)
+ (func (result i32) (i32.const 0))
+ (func (export "test")
+ (elem.drop 4)))
+ "unknown table 0")
+
+(assert_invalid
+ (module
+ (elem passive funcref 0)
+ (func (result i32) (i32.const 0))
+ (func (export "test")
+ (table.init 4 (i32.const 12) (i32.const 1) (i32.const 1))))
+ "unknown table 0")
+
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (elem.drop 2)
+ ))
+(assert_trap (invoke "test") "element segment dropped")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.init 2 (i32.const 12) (i32.const 1) (i32.const 1))
+ ))
+(assert_trap (invoke "test") "element segment dropped")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.init 1 (i32.const 12) (i32.const 1) (i32.const 1))
+ (table.init 1 (i32.const 21) (i32.const 1) (i32.const 1))))
+(invoke "test")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (elem.drop 1)
+ (elem.drop 1)))
+(assert_trap (invoke "test") "element segment dropped")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (elem.drop 1)
+ (table.init 1 (i32.const 12) (i32.const 1) (i32.const 1))))
+(assert_trap (invoke "test") "element segment dropped")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.init 1 (i32.const 12) (i32.const 0) (i32.const 5))
+ ))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.init 1 (i32.const 12) (i32.const 2) (i32.const 3))
+ ))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.init 1 (i32.const 28) (i32.const 1) (i32.const 3))
+ ))
+(assert_trap (invoke "test") "out of bounds")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.init 1 (i32.const 12) (i32.const 4) (i32.const 0))
+ ))
+(invoke "test")
+
+(module
+ (table 30 30 funcref)
+ (elem (i32.const 2) 3 1 4 1)
+ (elem passive funcref 2 7 1 8)
+ (elem (i32.const 12) 7 5 2 3 6)
+ (elem passive funcref 5 9 2 7 6)
+ (func (result i32) (i32.const 0))
+ (func (result i32) (i32.const 1))
+ (func (result i32) (i32.const 2))
+ (func (result i32) (i32.const 3))
+ (func (result i32) (i32.const 4))
+ (func (result i32) (i32.const 5))
+ (func (result i32) (i32.const 6))
+ (func (result i32) (i32.const 7))
+ (func (result i32) (i32.const 8))
+ (func (result i32) (i32.const 9))
+ (func (export "test")
+ (table.init 1 (i32.const 30) (i32.const 2) (i32.const 0))
+ ))
+(invoke "test")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (i32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (i32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (i32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (f32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (f32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (f32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (f32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (i64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (i64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (i64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (i64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (f64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (f64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (f64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i32.const 1) (f64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (i32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (i32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (i32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (i32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (f32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (f32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (f32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (f32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (i64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (i64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (i64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (i64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (f64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (f64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (f64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f32.const 1) (f64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (i32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (i32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (i32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (i32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (f32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (f32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (f32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (f32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (i64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (i64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (i64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (i64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (f64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (f64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (f64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (i64.const 1) (f64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (i32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (i32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (i32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (i32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (f32.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (f32.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (f32.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (f32.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (i64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (i64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (i64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (i64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (f64.const 1) (i32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (f64.const 1) (f32.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (f64.const 1) (i64.const 1))))
+ "type mismatch")
+
+(assert_invalid
+ (module
+ (table 10 funcref)
+ (elem passive funcref $f0 $f0 $f0)
+ (func $f0)
+ (func (export "test")
+ (table.init 0 (f64.const 1) (f64.const 1) (f64.const 1))))
+ "type mismatch")
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $offs i32) (param $len i32)
+ (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+(assert_trap (invoke "run" (i32.const 24) (i32.const 16)) "out of bounds")
+(assert_return (invoke "test" (i32.const 24)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 25)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 29)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 30)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 31)) (i32.const 7))
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+
+(module
+ (type (func (result i32)))
+ (table 32 64 funcref)
+ (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $offs i32) (param $len i32)
+ (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+(assert_trap (invoke "run" (i32.const 25) (i32.const 16)) "out of bounds")
+(assert_return (invoke "test" (i32.const 25)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 26)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 27)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 28)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 29)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 30)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 31)) (i32.const 6))
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
+
+(module
+ (type (func (result i32)))
+ (table 160 320 funcref)
+ (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $offs i32) (param $len i32)
+ (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+(assert_trap (invoke "run" (i32.const 96) (i32.const 32)) "out of bounds")
+(assert_return (invoke "test" (i32.const 96)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 97)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 98)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 99)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 100)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 101)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 102)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 103)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 104)) (i32.const 8))
+(assert_return (invoke "test" (i32.const 105)) (i32.const 9))
+(assert_return (invoke "test" (i32.const 106)) (i32.const 10))
+(assert_return (invoke "test" (i32.const 107)) (i32.const 11))
+(assert_return (invoke "test" (i32.const 108)) (i32.const 12))
+(assert_return (invoke "test" (i32.const 109)) (i32.const 13))
+(assert_return (invoke "test" (i32.const 110)) (i32.const 14))
+(assert_return (invoke "test" (i32.const 111)) (i32.const 15))
+(assert_trap (invoke "test" (i32.const 112)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 113)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 114)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 115)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 116)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 117)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 118)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 119)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 120)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 121)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 122)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 123)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 124)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 125)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 126)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 127)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 128)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 129)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 130)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 131)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 132)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 133)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 134)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 135)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 136)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 137)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 138)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 139)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 140)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 141)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 142)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 143)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 144)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 145)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 146)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 147)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 148)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 149)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 150)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 151)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 152)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 153)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 154)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 155)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 156)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 157)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 158)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 159)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 48)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 49)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 50)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 51)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 52)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 53)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 54)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 55)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 56)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 57)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 58)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 59)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 60)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 61)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 62)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 63)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 64)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 65)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 66)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 67)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 68)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 69)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 70)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 71)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 72)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 73)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 74)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 75)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 76)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 77)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 78)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 79)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 80)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 81)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 82)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 83)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 84)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 85)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 86)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 87)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 88)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 89)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 90)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 91)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 92)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 93)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 94)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 95)) "uninitialized element")
+
+(module
+ (type (func (result i32)))
+ (table 160 320 funcref)
+ (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $offs i32) (param $len i32)
+ (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+(assert_trap (invoke "run" (i32.const 97) (i32.const 31)) "out of bounds")
+(assert_return (invoke "test" (i32.const 97)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 98)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 99)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 100)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 101)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 102)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 103)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 104)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 105)) (i32.const 8))
+(assert_return (invoke "test" (i32.const 106)) (i32.const 9))
+(assert_return (invoke "test" (i32.const 107)) (i32.const 10))
+(assert_return (invoke "test" (i32.const 108)) (i32.const 11))
+(assert_return (invoke "test" (i32.const 109)) (i32.const 12))
+(assert_return (invoke "test" (i32.const 110)) (i32.const 13))
+(assert_return (invoke "test" (i32.const 111)) (i32.const 14))
+(assert_return (invoke "test" (i32.const 112)) (i32.const 15))
+(assert_trap (invoke "test" (i32.const 113)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 114)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 115)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 116)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 117)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 118)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 119)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 120)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 121)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 122)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 123)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 124)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 125)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 126)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 127)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 128)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 129)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 130)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 131)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 132)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 133)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 134)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 135)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 136)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 137)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 138)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 139)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 140)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 141)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 142)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 143)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 144)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 145)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 146)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 147)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 148)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 149)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 150)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 151)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 152)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 153)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 154)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 155)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 156)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 157)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 158)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 159)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 48)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 49)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 50)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 51)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 52)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 53)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 54)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 55)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 56)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 57)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 58)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 59)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 60)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 61)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 62)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 63)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 64)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 65)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 66)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 67)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 68)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 69)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 70)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 71)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 72)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 73)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 74)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 75)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 76)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 77)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 78)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 79)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 80)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 81)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 82)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 83)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 84)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 85)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 86)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 87)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 88)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 89)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 90)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 91)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 92)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 93)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 94)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 95)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 96)) "uninitialized element")
+
+(module
+ (type (func (result i32)))
+ (table 64 64 funcref)
+ (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $offs i32) (param $len i32)
+ (table.init 0 (local.get $offs) (i32.const 0) (local.get $len))))
+(assert_trap (invoke "run" (i32.const 48) (i32.const 4294967280)) "out of bounds")
+(assert_return (invoke "test" (i32.const 48)) (i32.const 0))
+(assert_return (invoke "test" (i32.const 49)) (i32.const 1))
+(assert_return (invoke "test" (i32.const 50)) (i32.const 2))
+(assert_return (invoke "test" (i32.const 51)) (i32.const 3))
+(assert_return (invoke "test" (i32.const 52)) (i32.const 4))
+(assert_return (invoke "test" (i32.const 53)) (i32.const 5))
+(assert_return (invoke "test" (i32.const 54)) (i32.const 6))
+(assert_return (invoke "test" (i32.const 55)) (i32.const 7))
+(assert_return (invoke "test" (i32.const 56)) (i32.const 8))
+(assert_return (invoke "test" (i32.const 57)) (i32.const 9))
+(assert_return (invoke "test" (i32.const 58)) (i32.const 10))
+(assert_return (invoke "test" (i32.const 59)) (i32.const 11))
+(assert_return (invoke "test" (i32.const 60)) (i32.const 12))
+(assert_return (invoke "test" (i32.const 61)) (i32.const 13))
+(assert_return (invoke "test" (i32.const 62)) (i32.const 14))
+(assert_return (invoke "test" (i32.const 63)) (i32.const 15))
+(assert_trap (invoke "test" (i32.const 0)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 1)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 2)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 3)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 4)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 5)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 6)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 7)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 8)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 9)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 10)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 11)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 12)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 13)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 14)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 15)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 16)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 17)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 18)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 19)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 20)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 21)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 22)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 23)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 24)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 25)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 26)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 27)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 28)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 29)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 30)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 31)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 32)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 33)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 34)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 35)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 36)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 37)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 38)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 39)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 40)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 41)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 42)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 43)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 44)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 45)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 46)) "uninitialized element")
+(assert_trap (invoke "test" (i32.const 47)) "uninitialized element")
+
+(module
+ (type (func (result i32)))
+ (table 16 16 funcref)
+ (elem passive funcref $f0 $f1 $f2 $f3 $f4 $f5 $f6 $f7 $f8 $f9 $f10 $f11 $f12 $f13 $f14 $f15)
+ (func $f0 (export "f0") (result i32) (i32.const 0))
+ (func $f1 (export "f1") (result i32) (i32.const 1))
+ (func $f2 (export "f2") (result i32) (i32.const 2))
+ (func $f3 (export "f3") (result i32) (i32.const 3))
+ (func $f4 (export "f4") (result i32) (i32.const 4))
+ (func $f5 (export "f5") (result i32) (i32.const 5))
+ (func $f6 (export "f6") (result i32) (i32.const 6))
+ (func $f7 (export "f7") (result i32) (i32.const 7))
+ (func $f8 (export "f8") (result i32) (i32.const 8))
+ (func $f9 (export "f9") (result i32) (i32.const 9))
+ (func $f10 (export "f10") (result i32) (i32.const 10))
+ (func $f11 (export "f11") (result i32) (i32.const 11))
+ (func $f12 (export "f12") (result i32) (i32.const 12))
+ (func $f13 (export "f13") (result i32) (i32.const 13))
+ (func $f14 (export "f14") (result i32) (i32.const 14))
+ (func $f15 (export "f15") (result i32) (i32.const 15))
+ (func (export "test") (param $n i32) (result i32)
+ (call_indirect (type 0) (local.get $n)))
+ (func (export "run") (param $offs i32) (param $len i32)
+ (table.init 0 (local.get $offs) (i32.const 8) (local.get $len))))
+(assert_trap (invoke "run" (i32.const 0) (i32.const 4294967292)) "out of bounds")
+(assert_return (invoke "test" (i32.const 0)) (i32.const 8))
+(assert_return (invoke "test" (i32.const 1)) (i32.const 9))
+(assert_return (invoke "test" (i32.const 2)) (i32.const 10))
+(assert_return (invoke "test" (i32.const 3)) (i32.const 11))
+(assert_return (invoke "test" (i32.const 4)) (i32.const 12))
+(assert_return (invoke "test" (i32.const 5)) (i32.const 13))
+(assert_return (invoke "test" (i32.const 6)) (i32.const 14))
+(assert_return (invoke "test" (i32.const 7)) (i32.const 15))
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast.js b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast.js
new file mode 100644
index 0000000000..bbbc512f8e
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory-spec/table_init.wast.js
@@ -0,0 +1,2096 @@
+
+'use strict';
+
+let spectest = {
+ print: console.log.bind(console),
+ print_i32: console.log.bind(console),
+ print_i32_f32: console.log.bind(console),
+ print_f64_f64: console.log.bind(console),
+ print_f32: console.log.bind(console),
+ print_f64: console.log.bind(console),
+ global_i32: 666,
+ global_f32: 666,
+ global_f64: 666,
+ table: new WebAssembly.Table({initial: 10, maximum: 20, element: 'anyfunc'}),
+ memory: new WebAssembly.Memory({initial: 1, maximum: 2})
+};
+let handler = {
+ get(target, prop) {
+ return (prop in target) ? target[prop] : {};
+ }
+};
+let registry = new Proxy({spectest}, handler);
+
+function register(name, instance) {
+ registry[name] = instance.exports;
+}
+
+function module(bytes, valid = true) {
+ let buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ let validated;
+ try {
+ validated = WebAssembly.validate(buffer);
+ } catch (e) {
+ throw new Error("Wasm validate throws");
+ }
+ if (validated !== valid) {
+ throw new Error("Wasm validate failure" + (valid ? "" : " expected"));
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = registry) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function call(instance, name, args) {
+ return instance.exports[name](...args);
+}
+
+function get(instance, name) {
+ let v = instance.exports[name];
+ return (v instanceof WebAssembly.Global) ? v.value : v;
+}
+
+function exports(name, instance) {
+ return {[name]: instance.exports};
+}
+
+function run(action) {
+ action();
+}
+
+function assert_malformed(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm decoding failure expected");
+}
+
+function assert_invalid(bytes) {
+ try { module(bytes, false) } catch (e) {
+ if (e instanceof WebAssembly.CompileError) return;
+ }
+ throw new Error("Wasm validation failure expected");
+}
+
+function assert_unlinkable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.LinkError) return;
+ }
+ throw new Error("Wasm linking failure expected");
+}
+
+function assert_uninstantiable(bytes) {
+ let mod = module(bytes);
+ try { new WebAssembly.Instance(mod, registry) } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+function assert_trap(action) {
+ try { action() } catch (e) {
+ if (e instanceof WebAssembly.RuntimeError) return;
+ }
+ throw new Error("Wasm trap expected");
+}
+
+let StackOverflow;
+try { (function f() { 1 + f() })() } catch (e) { StackOverflow = e.constructor }
+
+function assert_exhaustion(action) {
+ try { action() } catch (e) {
+ if (e instanceof StackOverflow) return;
+ }
+ throw new Error("Wasm resource exhaustion expected");
+}
+
+function assert_return(action, expected) {
+ let actual = action();
+ if (!Object.is(actual, expected)) {
+ throw new Error("Wasm return value " + expected + " expected, got " + actual);
+ };
+}
+
+function assert_return_canonical_nan(action) {
+ let actual = action();
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test that it's a canonical NaN.
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+function assert_return_arithmetic_nan(action) {
+ // Note that JS can't reliably distinguish different NaN values,
+ // so there's no good way to test for specific bitpatterns here.
+ let actual = action();
+ if (!Number.isNaN(actual)) {
+ throw new Error("Wasm return value NaN expected, got " + actual);
+ };
+}
+
+// table_init.wast:5
+let $1 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x85\x80\x80\x80\x00\x01\x60\x00\x01\x7f\x03\x86\x80\x80\x80\x00\x05\x00\x00\x00\x00\x00\x07\x9f\x80\x80\x80\x00\x05\x03\x65\x66\x30\x00\x00\x03\x65\x66\x31\x00\x01\x03\x65\x66\x32\x00\x02\x03\x65\x66\x33\x00\x03\x03\x65\x66\x34\x00\x04\x0a\xae\x80\x80\x80\x00\x05\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b");
+
+// table_init.wast:12
+register("a", $1)
+
+// table_init.wast:14
+let $2 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x07\x41\x00\x41\x04\xfc\x0c\x01\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_init.wast:37
+run(() => call($2, "test", []));
+
+// table_init.wast:38
+assert_trap(() => call($2, "check", [0]));
+
+// table_init.wast:39
+assert_trap(() => call($2, "check", [1]));
+
+// table_init.wast:40
+assert_return(() => call($2, "check", [2]), 3);
+
+// table_init.wast:41
+assert_return(() => call($2, "check", [3]), 1);
+
+// table_init.wast:42
+assert_return(() => call($2, "check", [4]), 4);
+
+// table_init.wast:43
+assert_return(() => call($2, "check", [5]), 1);
+
+// table_init.wast:44
+assert_trap(() => call($2, "check", [6]));
+
+// table_init.wast:45
+assert_return(() => call($2, "check", [7]), 2);
+
+// table_init.wast:46
+assert_return(() => call($2, "check", [8]), 7);
+
+// table_init.wast:47
+assert_return(() => call($2, "check", [9]), 1);
+
+// table_init.wast:48
+assert_return(() => call($2, "check", [10]), 8);
+
+// table_init.wast:49
+assert_trap(() => call($2, "check", [11]));
+
+// table_init.wast:50
+assert_return(() => call($2, "check", [12]), 7);
+
+// table_init.wast:51
+assert_return(() => call($2, "check", [13]), 5);
+
+// table_init.wast:52
+assert_return(() => call($2, "check", [14]), 2);
+
+// table_init.wast:53
+assert_return(() => call($2, "check", [15]), 3);
+
+// table_init.wast:54
+assert_return(() => call($2, "check", [16]), 6);
+
+// table_init.wast:55
+assert_trap(() => call($2, "check", [17]));
+
+// table_init.wast:56
+assert_trap(() => call($2, "check", [18]));
+
+// table_init.wast:57
+assert_trap(() => call($2, "check", [19]));
+
+// table_init.wast:58
+assert_trap(() => call($2, "check", [20]));
+
+// table_init.wast:59
+assert_trap(() => call($2, "check", [21]));
+
+// table_init.wast:60
+assert_trap(() => call($2, "check", [22]));
+
+// table_init.wast:61
+assert_trap(() => call($2, "check", [23]));
+
+// table_init.wast:62
+assert_trap(() => call($2, "check", [24]));
+
+// table_init.wast:63
+assert_trap(() => call($2, "check", [25]));
+
+// table_init.wast:64
+assert_trap(() => call($2, "check", [26]));
+
+// table_init.wast:65
+assert_trap(() => call($2, "check", [27]));
+
+// table_init.wast:66
+assert_trap(() => call($2, "check", [28]));
+
+// table_init.wast:67
+assert_trap(() => call($2, "check", [29]));
+
+// table_init.wast:69
+let $3 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xcb\x80\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0f\x41\x01\x41\x03\xfc\x0c\x03\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_init.wast:92
+run(() => call($3, "test", []));
+
+// table_init.wast:93
+assert_trap(() => call($3, "check", [0]));
+
+// table_init.wast:94
+assert_trap(() => call($3, "check", [1]));
+
+// table_init.wast:95
+assert_return(() => call($3, "check", [2]), 3);
+
+// table_init.wast:96
+assert_return(() => call($3, "check", [3]), 1);
+
+// table_init.wast:97
+assert_return(() => call($3, "check", [4]), 4);
+
+// table_init.wast:98
+assert_return(() => call($3, "check", [5]), 1);
+
+// table_init.wast:99
+assert_trap(() => call($3, "check", [6]));
+
+// table_init.wast:100
+assert_trap(() => call($3, "check", [7]));
+
+// table_init.wast:101
+assert_trap(() => call($3, "check", [8]));
+
+// table_init.wast:102
+assert_trap(() => call($3, "check", [9]));
+
+// table_init.wast:103
+assert_trap(() => call($3, "check", [10]));
+
+// table_init.wast:104
+assert_trap(() => call($3, "check", [11]));
+
+// table_init.wast:105
+assert_return(() => call($3, "check", [12]), 7);
+
+// table_init.wast:106
+assert_return(() => call($3, "check", [13]), 5);
+
+// table_init.wast:107
+assert_return(() => call($3, "check", [14]), 2);
+
+// table_init.wast:108
+assert_return(() => call($3, "check", [15]), 9);
+
+// table_init.wast:109
+assert_return(() => call($3, "check", [16]), 2);
+
+// table_init.wast:110
+assert_return(() => call($3, "check", [17]), 7);
+
+// table_init.wast:111
+assert_trap(() => call($3, "check", [18]));
+
+// table_init.wast:112
+assert_trap(() => call($3, "check", [19]));
+
+// table_init.wast:113
+assert_trap(() => call($3, "check", [20]));
+
+// table_init.wast:114
+assert_trap(() => call($3, "check", [21]));
+
+// table_init.wast:115
+assert_trap(() => call($3, "check", [22]));
+
+// table_init.wast:116
+assert_trap(() => call($3, "check", [23]));
+
+// table_init.wast:117
+assert_trap(() => call($3, "check", [24]));
+
+// table_init.wast:118
+assert_trap(() => call($3, "check", [25]));
+
+// table_init.wast:119
+assert_trap(() => call($3, "check", [26]));
+
+// table_init.wast:120
+assert_trap(() => call($3, "check", [27]));
+
+// table_init.wast:121
+assert_trap(() => call($3, "check", [28]));
+
+// table_init.wast:122
+assert_trap(() => call($3, "check", [29]));
+
+// table_init.wast:124
+let $4 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8d\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x00\x00\x60\x01\x7f\x01\x7f\x02\xa9\x80\x80\x80\x00\x05\x01\x61\x03\x65\x66\x30\x00\x00\x01\x61\x03\x65\x66\x31\x00\x00\x01\x61\x03\x65\x66\x32\x00\x00\x01\x61\x03\x65\x66\x33\x00\x00\x01\x61\x03\x65\x66\x34\x00\x00\x03\x88\x80\x80\x80\x00\x07\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x90\x80\x80\x80\x00\x02\x04\x74\x65\x73\x74\x00\x0a\x05\x63\x68\x65\x63\x6b\x00\x0b\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\x8d\x81\x80\x80\x00\x07\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\xce\x80\x80\x80\x00\x00\x41\x07\x41\x00\x41\x04\xfc\x0c\x01\x00\xfc\x0d\x01\x41\x0f\x41\x01\x41\x03\xfc\x0c\x03\x00\xfc\x0d\x03\x41\x14\x41\x0f\x41\x05\xfc\x0e\x00\x00\x41\x15\x41\x1d\x41\x01\xfc\x0e\x00\x00\x41\x18\x41\x0a\x41\x01\xfc\x0e\x00\x00\x41\x0d\x41\x0b\x41\x04\xfc\x0e\x00\x00\x41\x13\x41\x14\x41\x05\xfc\x0e\x00\x00\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b");
+
+// table_init.wast:155
+run(() => call($4, "test", []));
+
+// table_init.wast:156
+assert_trap(() => call($4, "check", [0]));
+
+// table_init.wast:157
+assert_trap(() => call($4, "check", [1]));
+
+// table_init.wast:158
+assert_return(() => call($4, "check", [2]), 3);
+
+// table_init.wast:159
+assert_return(() => call($4, "check", [3]), 1);
+
+// table_init.wast:160
+assert_return(() => call($4, "check", [4]), 4);
+
+// table_init.wast:161
+assert_return(() => call($4, "check", [5]), 1);
+
+// table_init.wast:162
+assert_trap(() => call($4, "check", [6]));
+
+// table_init.wast:163
+assert_return(() => call($4, "check", [7]), 2);
+
+// table_init.wast:164
+assert_return(() => call($4, "check", [8]), 7);
+
+// table_init.wast:165
+assert_return(() => call($4, "check", [9]), 1);
+
+// table_init.wast:166
+assert_return(() => call($4, "check", [10]), 8);
+
+// table_init.wast:167
+assert_trap(() => call($4, "check", [11]));
+
+// table_init.wast:168
+assert_return(() => call($4, "check", [12]), 7);
+
+// table_init.wast:169
+assert_trap(() => call($4, "check", [13]));
+
+// table_init.wast:170
+assert_return(() => call($4, "check", [14]), 7);
+
+// table_init.wast:171
+assert_return(() => call($4, "check", [15]), 5);
+
+// table_init.wast:172
+assert_return(() => call($4, "check", [16]), 2);
+
+// table_init.wast:173
+assert_return(() => call($4, "check", [17]), 7);
+
+// table_init.wast:174
+assert_trap(() => call($4, "check", [18]));
+
+// table_init.wast:175
+assert_return(() => call($4, "check", [19]), 9);
+
+// table_init.wast:176
+assert_trap(() => call($4, "check", [20]));
+
+// table_init.wast:177
+assert_return(() => call($4, "check", [21]), 7);
+
+// table_init.wast:178
+assert_trap(() => call($4, "check", [22]));
+
+// table_init.wast:179
+assert_return(() => call($4, "check", [23]), 8);
+
+// table_init.wast:180
+assert_return(() => call($4, "check", [24]), 8);
+
+// table_init.wast:181
+assert_trap(() => call($4, "check", [25]));
+
+// table_init.wast:182
+assert_trap(() => call($4, "check", [26]));
+
+// table_init.wast:183
+assert_trap(() => call($4, "check", [27]));
+
+// table_init.wast:184
+assert_trap(() => call($4, "check", [28]));
+
+// table_init.wast:185
+assert_trap(() => call($4, "check", [29]));
+
+// table_init.wast:186
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x8b\x80\x80\x80\x00\x01\x85\x80\x80\x80\x00\x00\xfc\x0d\x00\x0b");
+
+// table_init.wast:192
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x82\x80\x80\x80\x00\x01\x00\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x00\x0a\x92\x80\x80\x80\x00\x01\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x01\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:198
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x87\x80\x80\x80\x00\x01\x01\x70\x01\xd2\x00\x0b\x0a\x94\x80\x80\x80\x00\x02\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x85\x80\x80\x80\x00\x00\xfc\x0d\x04\x0b");
+
+// table_init.wast:206
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x01\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x87\x80\x80\x80\x00\x01\x01\x70\x01\xd2\x00\x0b\x0a\x9b\x80\x80\x80\x00\x02\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x01\x41\x01\xfc\x0c\x04\x00\x0b");
+
+// table_init.wast:215
+let $5 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xe5\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x85\x80\x80\x80\x00\x00\xfc\x0d\x02\x0b");
+
+// table_init.wast:234
+assert_trap(() => call($5, "test", []));
+
+// table_init.wast:236
+let $6 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x01\x41\x01\xfc\x0c\x02\x00\x0b");
+
+// table_init.wast:255
+assert_trap(() => call($6, "test", []));
+
+// table_init.wast:257
+let $7 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xf6\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x96\x80\x80\x80\x00\x00\x41\x0c\x41\x01\x41\x01\xfc\x0c\x01\x00\x41\x15\x41\x01\x41\x01\xfc\x0c\x01\x00\x0b");
+
+// table_init.wast:276
+run(() => call($7, "test", []));
+
+// table_init.wast:278
+let $8 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xe8\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x88\x80\x80\x80\x00\x00\xfc\x0d\x01\xfc\x0d\x01\x0b");
+
+// table_init.wast:297
+assert_trap(() => call($8, "test", []));
+
+// table_init.wast:299
+let $9 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xef\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8f\x80\x80\x80\x00\x00\xfc\x0d\x01\x41\x0c\x41\x01\x41\x01\xfc\x0c\x01\x00\x0b");
+
+// table_init.wast:318
+assert_trap(() => call($9, "test", []));
+
+// table_init.wast:320
+let $10 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x00\x41\x05\xfc\x0c\x01\x00\x0b");
+
+// table_init.wast:339
+assert_trap(() => call($10, "test", []));
+
+// table_init.wast:341
+let $11 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x02\x41\x03\xfc\x0c\x01\x00\x0b");
+
+// table_init.wast:360
+assert_trap(() => call($11, "test", []));
+
+// table_init.wast:362
+let $12 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x1c\x41\x01\x41\x03\xfc\x0c\x01\x00\x0b");
+
+// table_init.wast:381
+assert_trap(() => call($12, "test", []));
+
+// table_init.wast:383
+let $13 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x0c\x41\x04\x41\x00\xfc\x0c\x01\x00\x0b");
+
+// table_init.wast:402
+run(() => call($13, "test", []));
+
+// table_init.wast:404
+let $14 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x88\x80\x80\x80\x00\x02\x60\x00\x01\x7f\x60\x00\x00\x03\x8c\x80\x80\x80\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x04\x85\x80\x80\x80\x00\x01\x70\x01\x1e\x1e\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x0a\x09\xb5\x80\x80\x80\x00\x04\x00\x41\x02\x0b\x04\x03\x01\x04\x01\x01\x70\x04\xd2\x02\x0b\xd2\x07\x0b\xd2\x01\x0b\xd2\x08\x0b\x00\x41\x0c\x0b\x05\x07\x05\x02\x03\x06\x01\x70\x05\xd2\x05\x0b\xd2\x09\x0b\xd2\x02\x0b\xd2\x07\x0b\xd2\x06\x0b\x0a\xec\x80\x80\x80\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x8c\x80\x80\x80\x00\x00\x41\x1e\x41\x02\x41\x00\xfc\x0c\x01\x00\x0b");
+
+// table_init.wast:423
+run(() => call($14, "test", []));
+
+// table_init.wast:425
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x41\x01\x41\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:434
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x01\x41\x01\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:443
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x41\x01\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:452
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:461
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:470
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:479
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x41\x01\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:488
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x01\x42\x01\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:497
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x41\x01\x42\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:506
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x41\x01\x42\x01\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:515
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x41\x01\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:524
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:533
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:542
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:551
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:560
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:569
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:578
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:587
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:596
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:605
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa2\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x95\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:614
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:623
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa6\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x99\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:632
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:641
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:650
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:659
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:668
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:677
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa6\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x99\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:686
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:695
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xaa\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9d\x80\x80\x80\x00\x00\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:704
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x42\x01\x41\x01\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:713
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x42\x01\x41\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:722
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x42\x01\x41\x01\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:731
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x42\x01\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:740
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:749
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9f\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x92\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:758
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:767
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x42\x01\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:776
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x42\x01\x42\x01\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:785
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x9c\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8f\x80\x80\x80\x00\x00\x42\x01\x42\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:794
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\x99\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x42\x01\x42\x01\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:803
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x42\x01\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:812
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:821
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:830
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:839
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:848
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:857
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:866
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:875
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:884
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:893
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa6\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x99\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:902
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:911
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xaa\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:920
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:929
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa3\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x96\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:938
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa0\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x93\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:947
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:956
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x41\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:965
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xaa\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9d\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x43\x00\x00\x80\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:974
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xa7\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\x9a\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x42\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:983
+assert_invalid("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x84\x80\x80\x80\x00\x01\x60\x00\x00\x03\x83\x80\x80\x80\x00\x02\x00\x00\x04\x84\x80\x80\x80\x00\x01\x70\x00\x0a\x07\x88\x80\x80\x80\x00\x01\x04\x74\x65\x73\x74\x00\x01\x09\x8d\x80\x80\x80\x00\x01\x01\x70\x03\xd2\x00\x0b\xd2\x00\x0b\xd2\x00\x0b\x0a\xae\x80\x80\x80\x00\x02\x82\x80\x80\x80\x00\x00\x0b\xa1\x80\x80\x80\x00\x00\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\x44\x00\x00\x00\x00\x00\x00\xf0\x3f\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:992
+let $15 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:1016
+assert_trap(() => call($15, "run", [24, 16]));
+
+// table_init.wast:1017
+assert_return(() => call($15, "test", [24]), 0);
+
+// table_init.wast:1018
+assert_return(() => call($15, "test", [25]), 1);
+
+// table_init.wast:1019
+assert_return(() => call($15, "test", [26]), 2);
+
+// table_init.wast:1020
+assert_return(() => call($15, "test", [27]), 3);
+
+// table_init.wast:1021
+assert_return(() => call($15, "test", [28]), 4);
+
+// table_init.wast:1022
+assert_return(() => call($15, "test", [29]), 5);
+
+// table_init.wast:1023
+assert_return(() => call($15, "test", [30]), 6);
+
+// table_init.wast:1024
+assert_return(() => call($15, "test", [31]), 7);
+
+// table_init.wast:1025
+assert_trap(() => call($15, "test", [0]));
+
+// table_init.wast:1026
+assert_trap(() => call($15, "test", [1]));
+
+// table_init.wast:1027
+assert_trap(() => call($15, "test", [2]));
+
+// table_init.wast:1028
+assert_trap(() => call($15, "test", [3]));
+
+// table_init.wast:1029
+assert_trap(() => call($15, "test", [4]));
+
+// table_init.wast:1030
+assert_trap(() => call($15, "test", [5]));
+
+// table_init.wast:1031
+assert_trap(() => call($15, "test", [6]));
+
+// table_init.wast:1032
+assert_trap(() => call($15, "test", [7]));
+
+// table_init.wast:1033
+assert_trap(() => call($15, "test", [8]));
+
+// table_init.wast:1034
+assert_trap(() => call($15, "test", [9]));
+
+// table_init.wast:1035
+assert_trap(() => call($15, "test", [10]));
+
+// table_init.wast:1036
+assert_trap(() => call($15, "test", [11]));
+
+// table_init.wast:1037
+assert_trap(() => call($15, "test", [12]));
+
+// table_init.wast:1038
+assert_trap(() => call($15, "test", [13]));
+
+// table_init.wast:1039
+assert_trap(() => call($15, "test", [14]));
+
+// table_init.wast:1040
+assert_trap(() => call($15, "test", [15]));
+
+// table_init.wast:1041
+assert_trap(() => call($15, "test", [16]));
+
+// table_init.wast:1042
+assert_trap(() => call($15, "test", [17]));
+
+// table_init.wast:1043
+assert_trap(() => call($15, "test", [18]));
+
+// table_init.wast:1044
+assert_trap(() => call($15, "test", [19]));
+
+// table_init.wast:1045
+assert_trap(() => call($15, "test", [20]));
+
+// table_init.wast:1046
+assert_trap(() => call($15, "test", [21]));
+
+// table_init.wast:1047
+assert_trap(() => call($15, "test", [22]));
+
+// table_init.wast:1048
+assert_trap(() => call($15, "test", [23]));
+
+// table_init.wast:1050
+let $16 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x20\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:1074
+assert_trap(() => call($16, "run", [25, 16]));
+
+// table_init.wast:1075
+assert_return(() => call($16, "test", [25]), 0);
+
+// table_init.wast:1076
+assert_return(() => call($16, "test", [26]), 1);
+
+// table_init.wast:1077
+assert_return(() => call($16, "test", [27]), 2);
+
+// table_init.wast:1078
+assert_return(() => call($16, "test", [28]), 3);
+
+// table_init.wast:1079
+assert_return(() => call($16, "test", [29]), 4);
+
+// table_init.wast:1080
+assert_return(() => call($16, "test", [30]), 5);
+
+// table_init.wast:1081
+assert_return(() => call($16, "test", [31]), 6);
+
+// table_init.wast:1082
+assert_trap(() => call($16, "test", [0]));
+
+// table_init.wast:1083
+assert_trap(() => call($16, "test", [1]));
+
+// table_init.wast:1084
+assert_trap(() => call($16, "test", [2]));
+
+// table_init.wast:1085
+assert_trap(() => call($16, "test", [3]));
+
+// table_init.wast:1086
+assert_trap(() => call($16, "test", [4]));
+
+// table_init.wast:1087
+assert_trap(() => call($16, "test", [5]));
+
+// table_init.wast:1088
+assert_trap(() => call($16, "test", [6]));
+
+// table_init.wast:1089
+assert_trap(() => call($16, "test", [7]));
+
+// table_init.wast:1090
+assert_trap(() => call($16, "test", [8]));
+
+// table_init.wast:1091
+assert_trap(() => call($16, "test", [9]));
+
+// table_init.wast:1092
+assert_trap(() => call($16, "test", [10]));
+
+// table_init.wast:1093
+assert_trap(() => call($16, "test", [11]));
+
+// table_init.wast:1094
+assert_trap(() => call($16, "test", [12]));
+
+// table_init.wast:1095
+assert_trap(() => call($16, "test", [13]));
+
+// table_init.wast:1096
+assert_trap(() => call($16, "test", [14]));
+
+// table_init.wast:1097
+assert_trap(() => call($16, "test", [15]));
+
+// table_init.wast:1098
+assert_trap(() => call($16, "test", [16]));
+
+// table_init.wast:1099
+assert_trap(() => call($16, "test", [17]));
+
+// table_init.wast:1100
+assert_trap(() => call($16, "test", [18]));
+
+// table_init.wast:1101
+assert_trap(() => call($16, "test", [19]));
+
+// table_init.wast:1102
+assert_trap(() => call($16, "test", [20]));
+
+// table_init.wast:1103
+assert_trap(() => call($16, "test", [21]));
+
+// table_init.wast:1104
+assert_trap(() => call($16, "test", [22]));
+
+// table_init.wast:1105
+assert_trap(() => call($16, "test", [23]));
+
+// table_init.wast:1106
+assert_trap(() => call($16, "test", [24]));
+
+// table_init.wast:1108
+let $17 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x87\x80\x80\x80\x00\x01\x70\x01\xa0\x01\xc0\x02\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:1132
+assert_trap(() => call($17, "run", [96, 32]));
+
+// table_init.wast:1133
+assert_return(() => call($17, "test", [96]), 0);
+
+// table_init.wast:1134
+assert_return(() => call($17, "test", [97]), 1);
+
+// table_init.wast:1135
+assert_return(() => call($17, "test", [98]), 2);
+
+// table_init.wast:1136
+assert_return(() => call($17, "test", [99]), 3);
+
+// table_init.wast:1137
+assert_return(() => call($17, "test", [100]), 4);
+
+// table_init.wast:1138
+assert_return(() => call($17, "test", [101]), 5);
+
+// table_init.wast:1139
+assert_return(() => call($17, "test", [102]), 6);
+
+// table_init.wast:1140
+assert_return(() => call($17, "test", [103]), 7);
+
+// table_init.wast:1141
+assert_return(() => call($17, "test", [104]), 8);
+
+// table_init.wast:1142
+assert_return(() => call($17, "test", [105]), 9);
+
+// table_init.wast:1143
+assert_return(() => call($17, "test", [106]), 10);
+
+// table_init.wast:1144
+assert_return(() => call($17, "test", [107]), 11);
+
+// table_init.wast:1145
+assert_return(() => call($17, "test", [108]), 12);
+
+// table_init.wast:1146
+assert_return(() => call($17, "test", [109]), 13);
+
+// table_init.wast:1147
+assert_return(() => call($17, "test", [110]), 14);
+
+// table_init.wast:1148
+assert_return(() => call($17, "test", [111]), 15);
+
+// table_init.wast:1149
+assert_trap(() => call($17, "test", [112]));
+
+// table_init.wast:1150
+assert_trap(() => call($17, "test", [113]));
+
+// table_init.wast:1151
+assert_trap(() => call($17, "test", [114]));
+
+// table_init.wast:1152
+assert_trap(() => call($17, "test", [115]));
+
+// table_init.wast:1153
+assert_trap(() => call($17, "test", [116]));
+
+// table_init.wast:1154
+assert_trap(() => call($17, "test", [117]));
+
+// table_init.wast:1155
+assert_trap(() => call($17, "test", [118]));
+
+// table_init.wast:1156
+assert_trap(() => call($17, "test", [119]));
+
+// table_init.wast:1157
+assert_trap(() => call($17, "test", [120]));
+
+// table_init.wast:1158
+assert_trap(() => call($17, "test", [121]));
+
+// table_init.wast:1159
+assert_trap(() => call($17, "test", [122]));
+
+// table_init.wast:1160
+assert_trap(() => call($17, "test", [123]));
+
+// table_init.wast:1161
+assert_trap(() => call($17, "test", [124]));
+
+// table_init.wast:1162
+assert_trap(() => call($17, "test", [125]));
+
+// table_init.wast:1163
+assert_trap(() => call($17, "test", [126]));
+
+// table_init.wast:1164
+assert_trap(() => call($17, "test", [127]));
+
+// table_init.wast:1165
+assert_trap(() => call($17, "test", [128]));
+
+// table_init.wast:1166
+assert_trap(() => call($17, "test", [129]));
+
+// table_init.wast:1167
+assert_trap(() => call($17, "test", [130]));
+
+// table_init.wast:1168
+assert_trap(() => call($17, "test", [131]));
+
+// table_init.wast:1169
+assert_trap(() => call($17, "test", [132]));
+
+// table_init.wast:1170
+assert_trap(() => call($17, "test", [133]));
+
+// table_init.wast:1171
+assert_trap(() => call($17, "test", [134]));
+
+// table_init.wast:1172
+assert_trap(() => call($17, "test", [135]));
+
+// table_init.wast:1173
+assert_trap(() => call($17, "test", [136]));
+
+// table_init.wast:1174
+assert_trap(() => call($17, "test", [137]));
+
+// table_init.wast:1175
+assert_trap(() => call($17, "test", [138]));
+
+// table_init.wast:1176
+assert_trap(() => call($17, "test", [139]));
+
+// table_init.wast:1177
+assert_trap(() => call($17, "test", [140]));
+
+// table_init.wast:1178
+assert_trap(() => call($17, "test", [141]));
+
+// table_init.wast:1179
+assert_trap(() => call($17, "test", [142]));
+
+// table_init.wast:1180
+assert_trap(() => call($17, "test", [143]));
+
+// table_init.wast:1181
+assert_trap(() => call($17, "test", [144]));
+
+// table_init.wast:1182
+assert_trap(() => call($17, "test", [145]));
+
+// table_init.wast:1183
+assert_trap(() => call($17, "test", [146]));
+
+// table_init.wast:1184
+assert_trap(() => call($17, "test", [147]));
+
+// table_init.wast:1185
+assert_trap(() => call($17, "test", [148]));
+
+// table_init.wast:1186
+assert_trap(() => call($17, "test", [149]));
+
+// table_init.wast:1187
+assert_trap(() => call($17, "test", [150]));
+
+// table_init.wast:1188
+assert_trap(() => call($17, "test", [151]));
+
+// table_init.wast:1189
+assert_trap(() => call($17, "test", [152]));
+
+// table_init.wast:1190
+assert_trap(() => call($17, "test", [153]));
+
+// table_init.wast:1191
+assert_trap(() => call($17, "test", [154]));
+
+// table_init.wast:1192
+assert_trap(() => call($17, "test", [155]));
+
+// table_init.wast:1193
+assert_trap(() => call($17, "test", [156]));
+
+// table_init.wast:1194
+assert_trap(() => call($17, "test", [157]));
+
+// table_init.wast:1195
+assert_trap(() => call($17, "test", [158]));
+
+// table_init.wast:1196
+assert_trap(() => call($17, "test", [159]));
+
+// table_init.wast:1197
+assert_trap(() => call($17, "test", [0]));
+
+// table_init.wast:1198
+assert_trap(() => call($17, "test", [1]));
+
+// table_init.wast:1199
+assert_trap(() => call($17, "test", [2]));
+
+// table_init.wast:1200
+assert_trap(() => call($17, "test", [3]));
+
+// table_init.wast:1201
+assert_trap(() => call($17, "test", [4]));
+
+// table_init.wast:1202
+assert_trap(() => call($17, "test", [5]));
+
+// table_init.wast:1203
+assert_trap(() => call($17, "test", [6]));
+
+// table_init.wast:1204
+assert_trap(() => call($17, "test", [7]));
+
+// table_init.wast:1205
+assert_trap(() => call($17, "test", [8]));
+
+// table_init.wast:1206
+assert_trap(() => call($17, "test", [9]));
+
+// table_init.wast:1207
+assert_trap(() => call($17, "test", [10]));
+
+// table_init.wast:1208
+assert_trap(() => call($17, "test", [11]));
+
+// table_init.wast:1209
+assert_trap(() => call($17, "test", [12]));
+
+// table_init.wast:1210
+assert_trap(() => call($17, "test", [13]));
+
+// table_init.wast:1211
+assert_trap(() => call($17, "test", [14]));
+
+// table_init.wast:1212
+assert_trap(() => call($17, "test", [15]));
+
+// table_init.wast:1213
+assert_trap(() => call($17, "test", [16]));
+
+// table_init.wast:1214
+assert_trap(() => call($17, "test", [17]));
+
+// table_init.wast:1215
+assert_trap(() => call($17, "test", [18]));
+
+// table_init.wast:1216
+assert_trap(() => call($17, "test", [19]));
+
+// table_init.wast:1217
+assert_trap(() => call($17, "test", [20]));
+
+// table_init.wast:1218
+assert_trap(() => call($17, "test", [21]));
+
+// table_init.wast:1219
+assert_trap(() => call($17, "test", [22]));
+
+// table_init.wast:1220
+assert_trap(() => call($17, "test", [23]));
+
+// table_init.wast:1221
+assert_trap(() => call($17, "test", [24]));
+
+// table_init.wast:1222
+assert_trap(() => call($17, "test", [25]));
+
+// table_init.wast:1223
+assert_trap(() => call($17, "test", [26]));
+
+// table_init.wast:1224
+assert_trap(() => call($17, "test", [27]));
+
+// table_init.wast:1225
+assert_trap(() => call($17, "test", [28]));
+
+// table_init.wast:1226
+assert_trap(() => call($17, "test", [29]));
+
+// table_init.wast:1227
+assert_trap(() => call($17, "test", [30]));
+
+// table_init.wast:1228
+assert_trap(() => call($17, "test", [31]));
+
+// table_init.wast:1229
+assert_trap(() => call($17, "test", [32]));
+
+// table_init.wast:1230
+assert_trap(() => call($17, "test", [33]));
+
+// table_init.wast:1231
+assert_trap(() => call($17, "test", [34]));
+
+// table_init.wast:1232
+assert_trap(() => call($17, "test", [35]));
+
+// table_init.wast:1233
+assert_trap(() => call($17, "test", [36]));
+
+// table_init.wast:1234
+assert_trap(() => call($17, "test", [37]));
+
+// table_init.wast:1235
+assert_trap(() => call($17, "test", [38]));
+
+// table_init.wast:1236
+assert_trap(() => call($17, "test", [39]));
+
+// table_init.wast:1237
+assert_trap(() => call($17, "test", [40]));
+
+// table_init.wast:1238
+assert_trap(() => call($17, "test", [41]));
+
+// table_init.wast:1239
+assert_trap(() => call($17, "test", [42]));
+
+// table_init.wast:1240
+assert_trap(() => call($17, "test", [43]));
+
+// table_init.wast:1241
+assert_trap(() => call($17, "test", [44]));
+
+// table_init.wast:1242
+assert_trap(() => call($17, "test", [45]));
+
+// table_init.wast:1243
+assert_trap(() => call($17, "test", [46]));
+
+// table_init.wast:1244
+assert_trap(() => call($17, "test", [47]));
+
+// table_init.wast:1245
+assert_trap(() => call($17, "test", [48]));
+
+// table_init.wast:1246
+assert_trap(() => call($17, "test", [49]));
+
+// table_init.wast:1247
+assert_trap(() => call($17, "test", [50]));
+
+// table_init.wast:1248
+assert_trap(() => call($17, "test", [51]));
+
+// table_init.wast:1249
+assert_trap(() => call($17, "test", [52]));
+
+// table_init.wast:1250
+assert_trap(() => call($17, "test", [53]));
+
+// table_init.wast:1251
+assert_trap(() => call($17, "test", [54]));
+
+// table_init.wast:1252
+assert_trap(() => call($17, "test", [55]));
+
+// table_init.wast:1253
+assert_trap(() => call($17, "test", [56]));
+
+// table_init.wast:1254
+assert_trap(() => call($17, "test", [57]));
+
+// table_init.wast:1255
+assert_trap(() => call($17, "test", [58]));
+
+// table_init.wast:1256
+assert_trap(() => call($17, "test", [59]));
+
+// table_init.wast:1257
+assert_trap(() => call($17, "test", [60]));
+
+// table_init.wast:1258
+assert_trap(() => call($17, "test", [61]));
+
+// table_init.wast:1259
+assert_trap(() => call($17, "test", [62]));
+
+// table_init.wast:1260
+assert_trap(() => call($17, "test", [63]));
+
+// table_init.wast:1261
+assert_trap(() => call($17, "test", [64]));
+
+// table_init.wast:1262
+assert_trap(() => call($17, "test", [65]));
+
+// table_init.wast:1263
+assert_trap(() => call($17, "test", [66]));
+
+// table_init.wast:1264
+assert_trap(() => call($17, "test", [67]));
+
+// table_init.wast:1265
+assert_trap(() => call($17, "test", [68]));
+
+// table_init.wast:1266
+assert_trap(() => call($17, "test", [69]));
+
+// table_init.wast:1267
+assert_trap(() => call($17, "test", [70]));
+
+// table_init.wast:1268
+assert_trap(() => call($17, "test", [71]));
+
+// table_init.wast:1269
+assert_trap(() => call($17, "test", [72]));
+
+// table_init.wast:1270
+assert_trap(() => call($17, "test", [73]));
+
+// table_init.wast:1271
+assert_trap(() => call($17, "test", [74]));
+
+// table_init.wast:1272
+assert_trap(() => call($17, "test", [75]));
+
+// table_init.wast:1273
+assert_trap(() => call($17, "test", [76]));
+
+// table_init.wast:1274
+assert_trap(() => call($17, "test", [77]));
+
+// table_init.wast:1275
+assert_trap(() => call($17, "test", [78]));
+
+// table_init.wast:1276
+assert_trap(() => call($17, "test", [79]));
+
+// table_init.wast:1277
+assert_trap(() => call($17, "test", [80]));
+
+// table_init.wast:1278
+assert_trap(() => call($17, "test", [81]));
+
+// table_init.wast:1279
+assert_trap(() => call($17, "test", [82]));
+
+// table_init.wast:1280
+assert_trap(() => call($17, "test", [83]));
+
+// table_init.wast:1281
+assert_trap(() => call($17, "test", [84]));
+
+// table_init.wast:1282
+assert_trap(() => call($17, "test", [85]));
+
+// table_init.wast:1283
+assert_trap(() => call($17, "test", [86]));
+
+// table_init.wast:1284
+assert_trap(() => call($17, "test", [87]));
+
+// table_init.wast:1285
+assert_trap(() => call($17, "test", [88]));
+
+// table_init.wast:1286
+assert_trap(() => call($17, "test", [89]));
+
+// table_init.wast:1287
+assert_trap(() => call($17, "test", [90]));
+
+// table_init.wast:1288
+assert_trap(() => call($17, "test", [91]));
+
+// table_init.wast:1289
+assert_trap(() => call($17, "test", [92]));
+
+// table_init.wast:1290
+assert_trap(() => call($17, "test", [93]));
+
+// table_init.wast:1291
+assert_trap(() => call($17, "test", [94]));
+
+// table_init.wast:1292
+assert_trap(() => call($17, "test", [95]));
+
+// table_init.wast:1294
+let $18 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x87\x80\x80\x80\x00\x01\x70\x01\xa0\x01\xc0\x02\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:1318
+assert_trap(() => call($18, "run", [97, 31]));
+
+// table_init.wast:1319
+assert_return(() => call($18, "test", [97]), 0);
+
+// table_init.wast:1320
+assert_return(() => call($18, "test", [98]), 1);
+
+// table_init.wast:1321
+assert_return(() => call($18, "test", [99]), 2);
+
+// table_init.wast:1322
+assert_return(() => call($18, "test", [100]), 3);
+
+// table_init.wast:1323
+assert_return(() => call($18, "test", [101]), 4);
+
+// table_init.wast:1324
+assert_return(() => call($18, "test", [102]), 5);
+
+// table_init.wast:1325
+assert_return(() => call($18, "test", [103]), 6);
+
+// table_init.wast:1326
+assert_return(() => call($18, "test", [104]), 7);
+
+// table_init.wast:1327
+assert_return(() => call($18, "test", [105]), 8);
+
+// table_init.wast:1328
+assert_return(() => call($18, "test", [106]), 9);
+
+// table_init.wast:1329
+assert_return(() => call($18, "test", [107]), 10);
+
+// table_init.wast:1330
+assert_return(() => call($18, "test", [108]), 11);
+
+// table_init.wast:1331
+assert_return(() => call($18, "test", [109]), 12);
+
+// table_init.wast:1332
+assert_return(() => call($18, "test", [110]), 13);
+
+// table_init.wast:1333
+assert_return(() => call($18, "test", [111]), 14);
+
+// table_init.wast:1334
+assert_return(() => call($18, "test", [112]), 15);
+
+// table_init.wast:1335
+assert_trap(() => call($18, "test", [113]));
+
+// table_init.wast:1336
+assert_trap(() => call($18, "test", [114]));
+
+// table_init.wast:1337
+assert_trap(() => call($18, "test", [115]));
+
+// table_init.wast:1338
+assert_trap(() => call($18, "test", [116]));
+
+// table_init.wast:1339
+assert_trap(() => call($18, "test", [117]));
+
+// table_init.wast:1340
+assert_trap(() => call($18, "test", [118]));
+
+// table_init.wast:1341
+assert_trap(() => call($18, "test", [119]));
+
+// table_init.wast:1342
+assert_trap(() => call($18, "test", [120]));
+
+// table_init.wast:1343
+assert_trap(() => call($18, "test", [121]));
+
+// table_init.wast:1344
+assert_trap(() => call($18, "test", [122]));
+
+// table_init.wast:1345
+assert_trap(() => call($18, "test", [123]));
+
+// table_init.wast:1346
+assert_trap(() => call($18, "test", [124]));
+
+// table_init.wast:1347
+assert_trap(() => call($18, "test", [125]));
+
+// table_init.wast:1348
+assert_trap(() => call($18, "test", [126]));
+
+// table_init.wast:1349
+assert_trap(() => call($18, "test", [127]));
+
+// table_init.wast:1350
+assert_trap(() => call($18, "test", [128]));
+
+// table_init.wast:1351
+assert_trap(() => call($18, "test", [129]));
+
+// table_init.wast:1352
+assert_trap(() => call($18, "test", [130]));
+
+// table_init.wast:1353
+assert_trap(() => call($18, "test", [131]));
+
+// table_init.wast:1354
+assert_trap(() => call($18, "test", [132]));
+
+// table_init.wast:1355
+assert_trap(() => call($18, "test", [133]));
+
+// table_init.wast:1356
+assert_trap(() => call($18, "test", [134]));
+
+// table_init.wast:1357
+assert_trap(() => call($18, "test", [135]));
+
+// table_init.wast:1358
+assert_trap(() => call($18, "test", [136]));
+
+// table_init.wast:1359
+assert_trap(() => call($18, "test", [137]));
+
+// table_init.wast:1360
+assert_trap(() => call($18, "test", [138]));
+
+// table_init.wast:1361
+assert_trap(() => call($18, "test", [139]));
+
+// table_init.wast:1362
+assert_trap(() => call($18, "test", [140]));
+
+// table_init.wast:1363
+assert_trap(() => call($18, "test", [141]));
+
+// table_init.wast:1364
+assert_trap(() => call($18, "test", [142]));
+
+// table_init.wast:1365
+assert_trap(() => call($18, "test", [143]));
+
+// table_init.wast:1366
+assert_trap(() => call($18, "test", [144]));
+
+// table_init.wast:1367
+assert_trap(() => call($18, "test", [145]));
+
+// table_init.wast:1368
+assert_trap(() => call($18, "test", [146]));
+
+// table_init.wast:1369
+assert_trap(() => call($18, "test", [147]));
+
+// table_init.wast:1370
+assert_trap(() => call($18, "test", [148]));
+
+// table_init.wast:1371
+assert_trap(() => call($18, "test", [149]));
+
+// table_init.wast:1372
+assert_trap(() => call($18, "test", [150]));
+
+// table_init.wast:1373
+assert_trap(() => call($18, "test", [151]));
+
+// table_init.wast:1374
+assert_trap(() => call($18, "test", [152]));
+
+// table_init.wast:1375
+assert_trap(() => call($18, "test", [153]));
+
+// table_init.wast:1376
+assert_trap(() => call($18, "test", [154]));
+
+// table_init.wast:1377
+assert_trap(() => call($18, "test", [155]));
+
+// table_init.wast:1378
+assert_trap(() => call($18, "test", [156]));
+
+// table_init.wast:1379
+assert_trap(() => call($18, "test", [157]));
+
+// table_init.wast:1380
+assert_trap(() => call($18, "test", [158]));
+
+// table_init.wast:1381
+assert_trap(() => call($18, "test", [159]));
+
+// table_init.wast:1382
+assert_trap(() => call($18, "test", [0]));
+
+// table_init.wast:1383
+assert_trap(() => call($18, "test", [1]));
+
+// table_init.wast:1384
+assert_trap(() => call($18, "test", [2]));
+
+// table_init.wast:1385
+assert_trap(() => call($18, "test", [3]));
+
+// table_init.wast:1386
+assert_trap(() => call($18, "test", [4]));
+
+// table_init.wast:1387
+assert_trap(() => call($18, "test", [5]));
+
+// table_init.wast:1388
+assert_trap(() => call($18, "test", [6]));
+
+// table_init.wast:1389
+assert_trap(() => call($18, "test", [7]));
+
+// table_init.wast:1390
+assert_trap(() => call($18, "test", [8]));
+
+// table_init.wast:1391
+assert_trap(() => call($18, "test", [9]));
+
+// table_init.wast:1392
+assert_trap(() => call($18, "test", [10]));
+
+// table_init.wast:1393
+assert_trap(() => call($18, "test", [11]));
+
+// table_init.wast:1394
+assert_trap(() => call($18, "test", [12]));
+
+// table_init.wast:1395
+assert_trap(() => call($18, "test", [13]));
+
+// table_init.wast:1396
+assert_trap(() => call($18, "test", [14]));
+
+// table_init.wast:1397
+assert_trap(() => call($18, "test", [15]));
+
+// table_init.wast:1398
+assert_trap(() => call($18, "test", [16]));
+
+// table_init.wast:1399
+assert_trap(() => call($18, "test", [17]));
+
+// table_init.wast:1400
+assert_trap(() => call($18, "test", [18]));
+
+// table_init.wast:1401
+assert_trap(() => call($18, "test", [19]));
+
+// table_init.wast:1402
+assert_trap(() => call($18, "test", [20]));
+
+// table_init.wast:1403
+assert_trap(() => call($18, "test", [21]));
+
+// table_init.wast:1404
+assert_trap(() => call($18, "test", [22]));
+
+// table_init.wast:1405
+assert_trap(() => call($18, "test", [23]));
+
+// table_init.wast:1406
+assert_trap(() => call($18, "test", [24]));
+
+// table_init.wast:1407
+assert_trap(() => call($18, "test", [25]));
+
+// table_init.wast:1408
+assert_trap(() => call($18, "test", [26]));
+
+// table_init.wast:1409
+assert_trap(() => call($18, "test", [27]));
+
+// table_init.wast:1410
+assert_trap(() => call($18, "test", [28]));
+
+// table_init.wast:1411
+assert_trap(() => call($18, "test", [29]));
+
+// table_init.wast:1412
+assert_trap(() => call($18, "test", [30]));
+
+// table_init.wast:1413
+assert_trap(() => call($18, "test", [31]));
+
+// table_init.wast:1414
+assert_trap(() => call($18, "test", [32]));
+
+// table_init.wast:1415
+assert_trap(() => call($18, "test", [33]));
+
+// table_init.wast:1416
+assert_trap(() => call($18, "test", [34]));
+
+// table_init.wast:1417
+assert_trap(() => call($18, "test", [35]));
+
+// table_init.wast:1418
+assert_trap(() => call($18, "test", [36]));
+
+// table_init.wast:1419
+assert_trap(() => call($18, "test", [37]));
+
+// table_init.wast:1420
+assert_trap(() => call($18, "test", [38]));
+
+// table_init.wast:1421
+assert_trap(() => call($18, "test", [39]));
+
+// table_init.wast:1422
+assert_trap(() => call($18, "test", [40]));
+
+// table_init.wast:1423
+assert_trap(() => call($18, "test", [41]));
+
+// table_init.wast:1424
+assert_trap(() => call($18, "test", [42]));
+
+// table_init.wast:1425
+assert_trap(() => call($18, "test", [43]));
+
+// table_init.wast:1426
+assert_trap(() => call($18, "test", [44]));
+
+// table_init.wast:1427
+assert_trap(() => call($18, "test", [45]));
+
+// table_init.wast:1428
+assert_trap(() => call($18, "test", [46]));
+
+// table_init.wast:1429
+assert_trap(() => call($18, "test", [47]));
+
+// table_init.wast:1430
+assert_trap(() => call($18, "test", [48]));
+
+// table_init.wast:1431
+assert_trap(() => call($18, "test", [49]));
+
+// table_init.wast:1432
+assert_trap(() => call($18, "test", [50]));
+
+// table_init.wast:1433
+assert_trap(() => call($18, "test", [51]));
+
+// table_init.wast:1434
+assert_trap(() => call($18, "test", [52]));
+
+// table_init.wast:1435
+assert_trap(() => call($18, "test", [53]));
+
+// table_init.wast:1436
+assert_trap(() => call($18, "test", [54]));
+
+// table_init.wast:1437
+assert_trap(() => call($18, "test", [55]));
+
+// table_init.wast:1438
+assert_trap(() => call($18, "test", [56]));
+
+// table_init.wast:1439
+assert_trap(() => call($18, "test", [57]));
+
+// table_init.wast:1440
+assert_trap(() => call($18, "test", [58]));
+
+// table_init.wast:1441
+assert_trap(() => call($18, "test", [59]));
+
+// table_init.wast:1442
+assert_trap(() => call($18, "test", [60]));
+
+// table_init.wast:1443
+assert_trap(() => call($18, "test", [61]));
+
+// table_init.wast:1444
+assert_trap(() => call($18, "test", [62]));
+
+// table_init.wast:1445
+assert_trap(() => call($18, "test", [63]));
+
+// table_init.wast:1446
+assert_trap(() => call($18, "test", [64]));
+
+// table_init.wast:1447
+assert_trap(() => call($18, "test", [65]));
+
+// table_init.wast:1448
+assert_trap(() => call($18, "test", [66]));
+
+// table_init.wast:1449
+assert_trap(() => call($18, "test", [67]));
+
+// table_init.wast:1450
+assert_trap(() => call($18, "test", [68]));
+
+// table_init.wast:1451
+assert_trap(() => call($18, "test", [69]));
+
+// table_init.wast:1452
+assert_trap(() => call($18, "test", [70]));
+
+// table_init.wast:1453
+assert_trap(() => call($18, "test", [71]));
+
+// table_init.wast:1454
+assert_trap(() => call($18, "test", [72]));
+
+// table_init.wast:1455
+assert_trap(() => call($18, "test", [73]));
+
+// table_init.wast:1456
+assert_trap(() => call($18, "test", [74]));
+
+// table_init.wast:1457
+assert_trap(() => call($18, "test", [75]));
+
+// table_init.wast:1458
+assert_trap(() => call($18, "test", [76]));
+
+// table_init.wast:1459
+assert_trap(() => call($18, "test", [77]));
+
+// table_init.wast:1460
+assert_trap(() => call($18, "test", [78]));
+
+// table_init.wast:1461
+assert_trap(() => call($18, "test", [79]));
+
+// table_init.wast:1462
+assert_trap(() => call($18, "test", [80]));
+
+// table_init.wast:1463
+assert_trap(() => call($18, "test", [81]));
+
+// table_init.wast:1464
+assert_trap(() => call($18, "test", [82]));
+
+// table_init.wast:1465
+assert_trap(() => call($18, "test", [83]));
+
+// table_init.wast:1466
+assert_trap(() => call($18, "test", [84]));
+
+// table_init.wast:1467
+assert_trap(() => call($18, "test", [85]));
+
+// table_init.wast:1468
+assert_trap(() => call($18, "test", [86]));
+
+// table_init.wast:1469
+assert_trap(() => call($18, "test", [87]));
+
+// table_init.wast:1470
+assert_trap(() => call($18, "test", [88]));
+
+// table_init.wast:1471
+assert_trap(() => call($18, "test", [89]));
+
+// table_init.wast:1472
+assert_trap(() => call($18, "test", [90]));
+
+// table_init.wast:1473
+assert_trap(() => call($18, "test", [91]));
+
+// table_init.wast:1474
+assert_trap(() => call($18, "test", [92]));
+
+// table_init.wast:1475
+assert_trap(() => call($18, "test", [93]));
+
+// table_init.wast:1476
+assert_trap(() => call($18, "test", [94]));
+
+// table_init.wast:1477
+assert_trap(() => call($18, "test", [95]));
+
+// table_init.wast:1478
+assert_trap(() => call($18, "test", [96]));
+
+// table_init.wast:1480
+let $19 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x40\x40\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x00\x20\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:1504
+assert_trap(() => call($19, "run", [48, -16]));
+
+// table_init.wast:1505
+assert_return(() => call($19, "test", [48]), 0);
+
+// table_init.wast:1506
+assert_return(() => call($19, "test", [49]), 1);
+
+// table_init.wast:1507
+assert_return(() => call($19, "test", [50]), 2);
+
+// table_init.wast:1508
+assert_return(() => call($19, "test", [51]), 3);
+
+// table_init.wast:1509
+assert_return(() => call($19, "test", [52]), 4);
+
+// table_init.wast:1510
+assert_return(() => call($19, "test", [53]), 5);
+
+// table_init.wast:1511
+assert_return(() => call($19, "test", [54]), 6);
+
+// table_init.wast:1512
+assert_return(() => call($19, "test", [55]), 7);
+
+// table_init.wast:1513
+assert_return(() => call($19, "test", [56]), 8);
+
+// table_init.wast:1514
+assert_return(() => call($19, "test", [57]), 9);
+
+// table_init.wast:1515
+assert_return(() => call($19, "test", [58]), 10);
+
+// table_init.wast:1516
+assert_return(() => call($19, "test", [59]), 11);
+
+// table_init.wast:1517
+assert_return(() => call($19, "test", [60]), 12);
+
+// table_init.wast:1518
+assert_return(() => call($19, "test", [61]), 13);
+
+// table_init.wast:1519
+assert_return(() => call($19, "test", [62]), 14);
+
+// table_init.wast:1520
+assert_return(() => call($19, "test", [63]), 15);
+
+// table_init.wast:1521
+assert_trap(() => call($19, "test", [0]));
+
+// table_init.wast:1522
+assert_trap(() => call($19, "test", [1]));
+
+// table_init.wast:1523
+assert_trap(() => call($19, "test", [2]));
+
+// table_init.wast:1524
+assert_trap(() => call($19, "test", [3]));
+
+// table_init.wast:1525
+assert_trap(() => call($19, "test", [4]));
+
+// table_init.wast:1526
+assert_trap(() => call($19, "test", [5]));
+
+// table_init.wast:1527
+assert_trap(() => call($19, "test", [6]));
+
+// table_init.wast:1528
+assert_trap(() => call($19, "test", [7]));
+
+// table_init.wast:1529
+assert_trap(() => call($19, "test", [8]));
+
+// table_init.wast:1530
+assert_trap(() => call($19, "test", [9]));
+
+// table_init.wast:1531
+assert_trap(() => call($19, "test", [10]));
+
+// table_init.wast:1532
+assert_trap(() => call($19, "test", [11]));
+
+// table_init.wast:1533
+assert_trap(() => call($19, "test", [12]));
+
+// table_init.wast:1534
+assert_trap(() => call($19, "test", [13]));
+
+// table_init.wast:1535
+assert_trap(() => call($19, "test", [14]));
+
+// table_init.wast:1536
+assert_trap(() => call($19, "test", [15]));
+
+// table_init.wast:1537
+assert_trap(() => call($19, "test", [16]));
+
+// table_init.wast:1538
+assert_trap(() => call($19, "test", [17]));
+
+// table_init.wast:1539
+assert_trap(() => call($19, "test", [18]));
+
+// table_init.wast:1540
+assert_trap(() => call($19, "test", [19]));
+
+// table_init.wast:1541
+assert_trap(() => call($19, "test", [20]));
+
+// table_init.wast:1542
+assert_trap(() => call($19, "test", [21]));
+
+// table_init.wast:1543
+assert_trap(() => call($19, "test", [22]));
+
+// table_init.wast:1544
+assert_trap(() => call($19, "test", [23]));
+
+// table_init.wast:1545
+assert_trap(() => call($19, "test", [24]));
+
+// table_init.wast:1546
+assert_trap(() => call($19, "test", [25]));
+
+// table_init.wast:1547
+assert_trap(() => call($19, "test", [26]));
+
+// table_init.wast:1548
+assert_trap(() => call($19, "test", [27]));
+
+// table_init.wast:1549
+assert_trap(() => call($19, "test", [28]));
+
+// table_init.wast:1550
+assert_trap(() => call($19, "test", [29]));
+
+// table_init.wast:1551
+assert_trap(() => call($19, "test", [30]));
+
+// table_init.wast:1552
+assert_trap(() => call($19, "test", [31]));
+
+// table_init.wast:1553
+assert_trap(() => call($19, "test", [32]));
+
+// table_init.wast:1554
+assert_trap(() => call($19, "test", [33]));
+
+// table_init.wast:1555
+assert_trap(() => call($19, "test", [34]));
+
+// table_init.wast:1556
+assert_trap(() => call($19, "test", [35]));
+
+// table_init.wast:1557
+assert_trap(() => call($19, "test", [36]));
+
+// table_init.wast:1558
+assert_trap(() => call($19, "test", [37]));
+
+// table_init.wast:1559
+assert_trap(() => call($19, "test", [38]));
+
+// table_init.wast:1560
+assert_trap(() => call($19, "test", [39]));
+
+// table_init.wast:1561
+assert_trap(() => call($19, "test", [40]));
+
+// table_init.wast:1562
+assert_trap(() => call($19, "test", [41]));
+
+// table_init.wast:1563
+assert_trap(() => call($19, "test", [42]));
+
+// table_init.wast:1564
+assert_trap(() => call($19, "test", [43]));
+
+// table_init.wast:1565
+assert_trap(() => call($19, "test", [44]));
+
+// table_init.wast:1566
+assert_trap(() => call($19, "test", [45]));
+
+// table_init.wast:1567
+assert_trap(() => call($19, "test", [46]));
+
+// table_init.wast:1568
+assert_trap(() => call($19, "test", [47]));
+
+// table_init.wast:1570
+let $20 = instance("\x00\x61\x73\x6d\x01\x00\x00\x00\x01\x8f\x80\x80\x80\x00\x03\x60\x00\x01\x7f\x60\x01\x7f\x01\x7f\x60\x02\x7f\x7f\x00\x03\x93\x80\x80\x80\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x04\x85\x80\x80\x80\x00\x01\x70\x01\x10\x10\x07\xe4\x80\x80\x80\x00\x12\x02\x66\x30\x00\x00\x02\x66\x31\x00\x01\x02\x66\x32\x00\x02\x02\x66\x33\x00\x03\x02\x66\x34\x00\x04\x02\x66\x35\x00\x05\x02\x66\x36\x00\x06\x02\x66\x37\x00\x07\x02\x66\x38\x00\x08\x02\x66\x39\x00\x09\x03\x66\x31\x30\x00\x0a\x03\x66\x31\x31\x00\x0b\x03\x66\x31\x32\x00\x0c\x03\x66\x31\x33\x00\x0d\x03\x66\x31\x34\x00\x0e\x03\x66\x31\x35\x00\x0f\x04\x74\x65\x73\x74\x00\x10\x03\x72\x75\x6e\x00\x11\x09\xb4\x80\x80\x80\x00\x01\x01\x70\x10\xd2\x00\x0b\xd2\x01\x0b\xd2\x02\x0b\xd2\x03\x0b\xd2\x04\x0b\xd2\x05\x0b\xd2\x06\x0b\xd2\x07\x0b\xd2\x08\x0b\xd2\x09\x0b\xd2\x0a\x0b\xd2\x0b\x0b\xd2\x0c\x0b\xd2\x0d\x0b\xd2\x0e\x0b\xd2\x0f\x0b\x0a\xae\x81\x80\x80\x00\x12\x84\x80\x80\x80\x00\x00\x41\x00\x0b\x84\x80\x80\x80\x00\x00\x41\x01\x0b\x84\x80\x80\x80\x00\x00\x41\x02\x0b\x84\x80\x80\x80\x00\x00\x41\x03\x0b\x84\x80\x80\x80\x00\x00\x41\x04\x0b\x84\x80\x80\x80\x00\x00\x41\x05\x0b\x84\x80\x80\x80\x00\x00\x41\x06\x0b\x84\x80\x80\x80\x00\x00\x41\x07\x0b\x84\x80\x80\x80\x00\x00\x41\x08\x0b\x84\x80\x80\x80\x00\x00\x41\x09\x0b\x84\x80\x80\x80\x00\x00\x41\x0a\x0b\x84\x80\x80\x80\x00\x00\x41\x0b\x0b\x84\x80\x80\x80\x00\x00\x41\x0c\x0b\x84\x80\x80\x80\x00\x00\x41\x0d\x0b\x84\x80\x80\x80\x00\x00\x41\x0e\x0b\x84\x80\x80\x80\x00\x00\x41\x0f\x0b\x87\x80\x80\x80\x00\x00\x20\x00\x11\x00\x00\x0b\x8c\x80\x80\x80\x00\x00\x20\x00\x41\x08\x20\x01\xfc\x0c\x00\x00\x0b");
+
+// table_init.wast:1594
+assert_trap(() => call($20, "run", [0, -4]));
+
+// table_init.wast:1595
+assert_return(() => call($20, "test", [0]), 8);
+
+// table_init.wast:1596
+assert_return(() => call($20, "test", [1]), 9);
+
+// table_init.wast:1597
+assert_return(() => call($20, "test", [2]), 10);
+
+// table_init.wast:1598
+assert_return(() => call($20, "test", [3]), 11);
+
+// table_init.wast:1599
+assert_return(() => call($20, "test", [4]), 12);
+
+// table_init.wast:1600
+assert_return(() => call($20, "test", [5]), 13);
+
+// table_init.wast:1601
+assert_return(() => call($20, "test", [6]), 14);
+
+// table_init.wast:1602
+assert_return(() => call($20, "test", [7]), 15);
diff --git a/deps/v8/test/mjsunit/wasm/bulk-memory.js b/deps/v8/test/mjsunit/wasm/bulk-memory.js
index b02f9ea56c..d783c6bf59 100644
--- a/deps/v8/test/mjsunit/wasm/bulk-memory.js
+++ b/deps/v8/test/mjsunit/wasm/bulk-memory.js
@@ -192,11 +192,8 @@ function getMemoryFill(mem) {
const tableIndex = 0;
const isGlobal = false;
- const isImport = true;
- builder.addElementSegment(
- tableIndex, 2, isGlobal, [f.index, f.index], isImport);
- builder.addElementSegment(
- tableIndex, 0, isGlobal, [f.index, f.index], isImport);
+ builder.addElementSegment(tableIndex, 2, isGlobal, [f.index, f.index]);
+ builder.addElementSegment(tableIndex, 0, isGlobal, [f.index, f.index]);
assertEquals(null, table.get(0));
assertEquals(null, table.get(1));
@@ -223,9 +220,7 @@ function getMemoryFill(mem) {
const tableIndex = 0;
const isGlobal = false;
- const isImport = true;
- builder.addElementSegment(
- tableIndex, 0, isGlobal, [f.index, f.index], isImport);
+ builder.addElementSegment(tableIndex, 0, isGlobal, [f.index, f.index]);
builder.addDataSegment(0, [42]);
// Instantiation fails, but still modifies the table. The memory is not
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js
index 6778fc9c4c..5ca20cbb95 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-async-compilation.js
@@ -11,9 +11,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
.addBody([kExprGetLocal, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierOptimized,
- kCompilationHintTierBaseline)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierOptimized,
+ kCompilationHintTierBaseline)
.exportFunc();
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.compile(bytes)
@@ -27,9 +27,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_l)
.addBody([kExprGetLocal, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierDefault,
- kCompilationHintTierDefault)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
.exportFunc();
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.compile(bytes)
@@ -50,9 +50,23 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
.addBody([kExprGetLocal, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierDefault,
- kCompilationHintTierDefault)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
+ .exportFunc();
+ let bytes = builder.toBuffer();
+ assertPromiseResult(WebAssembly.instantiate(bytes)
+ .then(({module, instance}) => assertEquals(42, instance.exports.id(42))));
+})();
+
+(function testCompileLazyBaselineEagerTopTierModule() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('id', kSig_i_i)
+ .addBody([kExprGetLocal, 0])
+ .setCompilationHint(kCompilationHintStrategyLazyBaselineEagerTopTier,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
.exportFunc();
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.instantiate(bytes)
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js b/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
index dfb6e49ba1..e39e15feeb 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-decoder.js
@@ -7,15 +7,16 @@
load('test/mjsunit/wasm/wasm-module-builder.js');
(function testDecodeCompilationHintsSectionNoDowngrade() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow', kSig_i_i)
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprCallFunction, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierOptimized,
- kCompilationHintTierBaseline)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierOptimized,
+ kCompilationHintTierBaseline)
.exportFunc();
assertThrows(() => builder.instantiate({mod: {pow: Math.pow}}),
WebAssembly.CompileError,
@@ -24,15 +25,16 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
})();
(function testDecodeCompilationHintsSectionNoTiering() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow', kSig_i_i)
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprCallFunction, 0])
- .giveCompilationHint(kCompilationHintStrategyDefault,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter)
+ .setCompilationHint(kCompilationHintStrategyDefault,
+ kCompilationHintTierInterpreter,
+ kCompilationHintTierInterpreter)
.exportFunc();
builder.addFunction('upow2', kSig_i_i)
.addBody([kExprGetLocal, 0,
@@ -47,6 +49,7 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
})();
(function testDecodeCompilationHintsSectionUpgrade() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addImport('mod', 'pow', kSig_i_ii);
builder.addFunction('upow2', kSig_i_i)
@@ -61,66 +64,67 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprCallFunction, 0])
- .giveCompilationHint(kCompilationHintStrategyEager,
- kCompilationHintTierBaseline,
- kCompilationHintTierOptimized)
+ .setCompilationHint(kCompilationHintStrategyEager,
+ kCompilationHintTierBaseline,
+ kCompilationHintTierOptimized)
.exportFunc();
let instance = builder.instantiate({mod: {pow: Math.pow}});
assertEquals(27, instance.exports.upow(3))
})();
(function testDecodeCompilationHintsSectionNoImport() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('sq', kSig_i_i)
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprI32Mul])
- .giveCompilationHint(kCompilationHintStrategyEager,
- kCompilationHintTierDefault,
- kCompilationHintTierOptimized)
+ .setCompilationHint(kCompilationHintStrategyEager,
+ kCompilationHintTierDefault,
+ kCompilationHintTierOptimized)
.exportFunc();
let instance = builder.instantiate();
assertEquals(9, instance.exports.sq(3))
})();
(function testDecodeCompilationHintsSectionNoExport() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('sq', kSig_i_i)
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprI32Mul])
- .giveCompilationHint(kCompilationHintStrategyEager,
- kCompilationHintTierDefault,
- kCompilationHintTierOptimized)
+ .setCompilationHint(kCompilationHintStrategyEager,
+ kCompilationHintTierDefault,
+ kCompilationHintTierOptimized)
builder.instantiate();
})();
(function testDecodeCompilationHintsSectionTopTierDefault() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('sq', kSig_i_i)
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprI32Mul])
- .giveCompilationHint(kCompilationHintStrategyEager,
- kCompilationHintTierOptimized,
- kCompilationHintTierDefault)
+ .setCompilationHint(kCompilationHintStrategyEager,
+ kCompilationHintTierOptimized,
+ kCompilationHintTierDefault)
.exportFunc();
let instance = builder.instantiate();
assertEquals(9, instance.exports.sq(3))
})();
-(function testDecodeCompilationHintsInvalidStrategy() {
+(function testDecodeCompilationHintsLazyBaselineEagerTopTier() {
+ print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction('sq', kSig_i_i)
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprI32Mul])
- .giveCompilationHint(0x3,
- kCompilationHintTierOptimized,
- kCompilationHintTierDefault)
+ .setCompilationHint(kCompilationHintStrategyLazyBaselineEagerTopTier,
+ kCompilationHintTierOptimized,
+ kCompilationHintTierDefault)
.exportFunc();
- assertThrows(() => builder.instantiate(),
- WebAssembly.CompileError,
- "WebAssembly.Module(): Invalid compilation hint 0xf " +
- "(unknown strategy) @+49");
+ builder.instantiate();
})();
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js b/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js
index 76dad56a60..553426db08 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-ignored.js
@@ -12,9 +12,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprCallFunction, 0])
- .giveCompilationHint(kCompilationHintStrategyDefault,
- kCompilationHintTierInterpreter,
- kCompilationHintTierInterpreter)
+ .setCompilationHint(kCompilationHintStrategyDefault,
+ kCompilationHintTierInterpreter,
+ kCompilationHintTierInterpreter)
.exportFunc();
let instance = builder.instantiate({mod: {pow: Math.pow}});
assertEquals(27, instance.exports.upow(3))
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js b/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js
new file mode 100644
index 0000000000..f0a46b9ec7
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-interpreter.js
@@ -0,0 +1,113 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Flags: --experimental-wasm-compilation-hints --wasm-lazy-validation
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function testInterpreterCallsLazyFunctionInOtherInstance() {
+ print(arguments.callee.name);
+ let builder0 = new WasmModuleBuilder();
+ builder0.addFunction("getX", kSig_i_v)
+ .addBody([kExprI32Const, 42])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierBaseline,
+ kCompilationHintTierBaseline)
+ .exportFunc();
+ let builder1 = new WasmModuleBuilder();
+ builder1.addImport("otherModule", "getX", kSig_i_v);
+ builder1.addFunction("plusX", kSig_i_i)
+ .addBody([kExprCallFunction, 0,
+ kExprGetLocal, 0,
+ kExprI32Add])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierInterpreter,
+ kCompilationHintTierInterpreter)
+ .exportFunc();
+ let instance0 = builder0.instantiate();
+ let instance1 = builder1.instantiate(
+ {otherModule: {getX: instance0.exports.getX}});
+ assertEquals(46, instance1.exports.plusX(4));
+})();
+
+(function testInterpreterCallsLazyBadFunctionInOtherInstance() {
+ print(arguments.callee.name);
+ let builder0 = new WasmModuleBuilder();
+ builder0.addFunction("getX", kSig_i_v)
+ .addBody([kExprI64Const, 42])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierBaseline,
+ kCompilationHintTierBaseline)
+ .exportFunc();
+ let builder1 = new WasmModuleBuilder();
+ builder1.addImport("otherModule", "getX", kSig_i_v);
+ builder1.addFunction("plusX", kSig_i_i)
+ .addBody([kExprCallFunction, 0,
+ kExprGetLocal, 0,
+ kExprI32Add])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierInterpreter,
+ kCompilationHintTierInterpreter)
+ .exportFunc();
+ let instance0 = builder0.instantiate();
+ let instance1 = builder1.instantiate(
+ {otherModule: {getX: instance0.exports.getX}});
+ assertThrows(() => instance1.exports.plusX(4),
+ WebAssembly.CompileError,
+ "Compiling function #0:\"getX\" failed: type error in " +
+ "merge[0] (expected i32, got i64) @+57");
+})();
+
+(function testInterpreterCallsLazyFunctionThroughIndirection() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+ let add = builder.addFunction('add', sig_i_ii)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprI32Add])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierInterpreter,
+ kCompilationHintTierInterpreter);
+ builder.appendToTable([add.index]);
+ builder.addFunction('main', kSig_i_iii)
+ .addBody([// Call indirect #0 with args <#1, #2>.
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_i_ii, kTableZero])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierInterpreter,
+ kCompilationHintTierInterpreter)
+ .exportFunc();
+ assertEquals(99, builder.instantiate().exports.main(0, 22, 77));
+})();
+
+(function testInterpreterCallsLazyBadFunctionThroughIndirection() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let sig_i_ii = builder.addType(kSig_i_ii);
+ let add = builder.addFunction('add', sig_i_ii)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprI64Add])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierInterpreter,
+ kCompilationHintTierInterpreter);
+ builder.appendToTable([add.index]);
+ builder.addFunction('main', kSig_i_iii)
+ .addBody([// Call indirect #0 with args <#1, #2>.
+ kExprGetLocal, 1,
+ kExprGetLocal, 2,
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_i_ii, kTableZero])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierInterpreter,
+ kCompilationHintTierInterpreter)
+ .exportFunc();
+ assertThrows(() => builder.instantiate().exports.main(0, 22, 77),
+ WebAssembly.CompileError,
+ "Compiling function #0:\"add\" failed: i64.add[1] expected " +
+ "type i64, found local.get of type i32 @+83");
+})();
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-lazy-validation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-lazy-validation.js
new file mode 100644
index 0000000000..e6958cb554
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-lazy-validation.js
@@ -0,0 +1,38 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Flags: --experimental-wasm-compilation-hints --wasm-lazy-validation
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function testInstantiateLazyValidation() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('id', kSig_i_i)
+ .addBody([kExprGetLocal, 0,
+ kExprI64Const, 1,
+ kExprI32Mul])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierBaseline,
+ kCompilationHintTierBaseline)
+ .exportFunc();
+
+ let expected_error_msg = "Compiling function #0:\"id\" failed: i32.mul[1] " +
+ "expected type i32, found i64.const of type i64 " +
+ "@+56";
+ let assertCompileErrorOnInvocation = function(instance) {
+ assertThrows(() => instance.exports.id(3),
+ WebAssembly.CompileError,
+ expected_error_msg)
+ };
+
+ // Synchronous case.
+ let instance = builder.instantiate();
+ assertCompileErrorOnInvocation(instance);
+
+ // Asynchronous case.
+ let bytes = builder.toBuffer();
+ assertPromiseResult(WebAssembly.instantiate(bytes)
+ .then(p => assertCompileErrorOnInvocation(p.instance)));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
index dcc795e54e..f48169fa0a 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-compilation.js
@@ -18,9 +18,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprCallFunction, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierDefault,
- kCompilationHintTierDefault)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
.exportFunc();
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes),
@@ -40,16 +40,17 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprCallFunction, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierOptimized,
- kCompilationHintTierBaseline)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierOptimized,
+ kCompilationHintTierBaseline)
.exportFunc();
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes),
{mod: {pow: Math.pow}})
.then(assertUnreachable,
- error => assertEquals("WebAssembly.compile(): Invalid compilation " +
- "hint 0x2d (forbidden downgrade) @+78",
+ error => assertEquals("WebAssembly.instantiateStreaming(): Invalid " +
+ "compilation hint 0x2d (forbidden downgrade) " +
+ "@+78",
error.message)));
})();
@@ -65,16 +66,18 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprCallFunction, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierDefault,
- kCompilationHintTierDefault)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
.exportFunc();
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes),
{mod: {pow: Math.pow}})
.then(assertUnreachable,
- error => assertEquals("WebAssembly.compile(): call[1] expected " +
- "type f32, found get_local of type i32 @+94", error.message)));
+ error => assertEquals("WebAssembly.instantiateStreaming(): call[1] " +
+ "expected type f32, found local.get of type " +
+ "i32 @+94",
+ error.message)));
})();
(function testInstantiateStreamingEmptyModule() {
@@ -94,9 +97,27 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
.addBody([kExprGetLocal, 0,
kExprGetLocal, 0,
kExprCallFunction, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierDefault,
- kCompilationHintTierDefault)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
+ .exportFunc();
+ let bytes = builder.toBuffer();
+ assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes),
+ {mod: {pow: Math.pow}})
+ .then(({module, instance}) => assertEquals(27, instance.exports.upow(3))));
+})();
+
+(function testInstantiateStreamingLazyBaselineModule() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addImport('mod', 'pow', kSig_i_ii);
+ builder.addFunction('upow', kSig_i_i)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprCallFunction, 0])
+ .setCompilationHint(kCompilationHintStrategyLazyBaselineEagerTopTier,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
.exportFunc();
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes),
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-lazy-validation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-lazy-validation.js
new file mode 100644
index 0000000000..6db4c0e328
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-streaming-lazy-validation.js
@@ -0,0 +1,33 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Flags: --experimental-wasm-compilation-hints --wasm-test-streaming --wasm-lazy-validation
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+(function testInstantiateStreamingLazyValidation() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('id', kSig_i_i)
+ .addBody([kExprGetLocal, 0,
+ kExprI64Const, 1,
+ kExprI32Mul])
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
+ .exportFunc();
+
+ let expected_error_msg = "Compiling function #0:\"id\" failed: i32.mul[1] " +
+ "expected type i32, found i64.const of type i64 " +
+ "@+56";
+ let assertCompileErrorOnInvocation = function(instance) {
+ assertThrows(() => instance.exports.id(3),
+ WebAssembly.CompileError,
+ expected_error_msg)
+ };
+
+ let bytes = builder.toBuffer();
+ assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes))
+ .then(({module, instance}) => assertCompileErrorOnInvocation(instance)));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js b/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js
index 0212ae0d66..6c4364b6d3 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-hints-sync-compilation.js
@@ -11,9 +11,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
.addBody([kExprGetLocal, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierOptimized,
- kCompilationHintTierBaseline)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierOptimized,
+ kCompilationHintTierBaseline)
.exportFunc();
assertThrows(() => builder.toModule(),
WebAssembly.CompileError,
@@ -26,9 +26,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_l)
.addBody([kExprGetLocal, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierDefault,
- kCompilationHintTierDefault)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
.exportFunc();
assertThrows(() => builder.toModule(),
WebAssembly.CompileError,
@@ -47,9 +47,21 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
let builder = new WasmModuleBuilder();
builder.addFunction('id', kSig_i_i)
.addBody([kExprGetLocal, 0])
- .giveCompilationHint(kCompilationHintStrategyLazy,
- kCompilationHintTierDefault,
- kCompilationHintTierDefault)
+ .setCompilationHint(kCompilationHintStrategyLazy,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
+ .exportFunc();
+ assertEquals(42, builder.instantiate().exports.id(42));
+})();
+
+(function testCompileLazyBaselineEagerTopTierModule() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('id', kSig_i_i)
+ .addBody([kExprGetLocal, 0])
+ .setCompilationHint(kCompilationHintStrategyLazyBaselineEagerTopTier,
+ kCompilationHintTierDefault,
+ kCompilationHintTierDefault)
.exportFunc();
assertEquals(42, builder.instantiate().exports.id(42));
})();
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index eceb5b00fb..c95e4d05b7 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -239,7 +239,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
.exportAs("main");
builder.addImportedTable("z", "table", kTableSize, kTableSize);
- builder.addElementSegment(0, 1, false, [f2.index], true);
+ builder.addElementSegment(0, 1, false, [f2.index]);
var m2_bytes = builder.toBuffer();
var m2 = new WebAssembly.Module(m2_bytes);
diff --git a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
index 48e3c85127..65e7a84c45 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions-anyref.js
@@ -98,3 +98,48 @@ load("test/mjsunit/wasm/exceptions-utils.js");
assertEquals(2.3, instance.exports.throw_catch_param(2.3));
assertEquals("str", instance.exports.throw_catch_param("str"));
})();
+
+// Test throwing/catching a function reference type value.
+(function TestThrowCatchAnyFunc() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_a);
+ builder.addFunction("throw_catch_local", kSig_r_v)
+ .addLocals({anyfunc_count: 1})
+ .addBody([
+ kExprTry, kWasmAnyFunc,
+ kExprGetLocal, 0,
+ kExprThrow, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+
+ assertEquals(null, instance.exports.throw_catch_local());
+})();
+
+// Test throwing/catching an encapsulated exception type value.
+(function TestThrowCatchExceptRef() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addException(kSig_v_e);
+ builder.addFunction("throw_catch_param", kSig_e_e)
+ .addBody([
+ kExprTry, kWasmExceptRef,
+ kExprGetLocal, 0,
+ kExprThrow, except,
+ kExprCatch,
+ kExprBrOnExn, 0, except,
+ kExprRethrow,
+ kExprEnd,
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ let e = new Error("my encapsulated error");
+
+ assertEquals(e, instance.exports.throw_catch_param(e));
+ assertEquals(1, instance.exports.throw_catch_param(1));
+ assertEquals(2.3, instance.exports.throw_catch_param(2.3));
+ assertEquals("str", instance.exports.throw_catch_param("str"));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-table.js
index a7e347b37c..098d03d4d6 100644
--- a/deps/v8/test/mjsunit/wasm/import-table.js
+++ b/deps/v8/test/mjsunit/wasm/import-table.js
@@ -41,8 +41,8 @@ let kTableSize = 50;
let f17 = addConstFunc(builder, 17);
builder.addExport("f15", f15);
builder.addExport("f17", f17);
- builder.addElementSegment(0, 15, false, [f15], true);
- builder.addElementSegment(0, 1, false, [call.index], true);
+ builder.addElementSegment(0, 15, false, [f15]);
+ builder.addElementSegment(0, 1, false, [call.index]);
var mod1 = builder.toModule();
}
@@ -62,10 +62,10 @@ let kTableSize = 50;
])
.exportAs("call");
let f26 = addConstFunc(builder, 26);
- builder.addElementSegment(0, 17, false, [f17], true);
- builder.addElementSegment(0, 21, false, [f21], true);
- builder.addElementSegment(0, 26, false, [f26], true);
- builder.addElementSegment(0, 5, false, [call.index], true);
+ builder.addElementSegment(0, 17, false, [f17]);
+ builder.addElementSegment(0, 21, false, [f21]);
+ builder.addElementSegment(0, 26, false, [f26]);
+ builder.addElementSegment(0, 5, false, [call.index]);
var mod2 = builder.toModule();
}
@@ -113,8 +113,8 @@ function addConstFuncUsingGlobal(builder, val) {
let f18 = addConstFuncUsingGlobal(builder, 18);
builder.addExport("f14", f14);
builder.addExport("f18", f18);
- builder.addElementSegment(0, 14, false, [f14], true);
- builder.addElementSegment(0, 1, false, [call.index], true);
+ builder.addElementSegment(0, 14, false, [f14]);
+ builder.addElementSegment(0, 1, false, [call.index]);
var mod1 = builder.toModule();
}
@@ -134,10 +134,10 @@ function addConstFuncUsingGlobal(builder, val) {
])
.exportAs("call");
let f28 = addConstFuncUsingGlobal(builder, 28);
- builder.addElementSegment(0, 18, false, [f18], true);
- builder.addElementSegment(0, 22, false, [f22], true);
- builder.addElementSegment(0, 28, false, [f28], true);
- builder.addElementSegment(0, 5, false, [call.index], true);
+ builder.addElementSegment(0, 18, false, [f18]);
+ builder.addElementSegment(0, 22, false, [f22]);
+ builder.addElementSegment(0, 28, false, [f28]);
+ builder.addElementSegment(0, 5, false, [call.index]);
var mod2 = builder.toModule();
}
@@ -193,8 +193,8 @@ function addConstFuncUsingMemory(builder, val) {
let f19 = addConstFuncUsingMemory(builder, 19);
builder.addExport("f13", f13);
builder.addExport("f19", f19);
- builder.addElementSegment(0, 13, false, [f13], true);
- builder.addElementSegment(0, 1, false, [call.index], true);
+ builder.addElementSegment(0, 13, false, [f13]);
+ builder.addElementSegment(0, 1, false, [call.index]);
var mod1 = builder.toModule();
}
@@ -216,10 +216,10 @@ function addConstFuncUsingMemory(builder, val) {
])
.exportAs("call");
let f29 = addConstFuncUsingMemory(builder, 29);
- builder.addElementSegment(0, 19, false, [f19], true);
- builder.addElementSegment(0, 23, false, [f23], true);
- builder.addElementSegment(0, 29, false, [f29], true);
- builder.addElementSegment(0, 5, false, [call.index], true);
+ builder.addElementSegment(0, 19, false, [f19]);
+ builder.addElementSegment(0, 23, false, [f23]);
+ builder.addElementSegment(0, 29, false, [f29]);
+ builder.addElementSegment(0, 5, false, [call.index]);
var mod2 = builder.toModule();
}
diff --git a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
index d4947313b3..414ca19c99 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-call-non-zero-table.js
@@ -68,11 +68,11 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// We want to crash if we call through the table with index 0.
builder.addElementSegment(placeholder, 0, false,
- [f_unreachable, f_unreachable, f_unreachable], false);
- builder.addElementSegment(table1, 0, false, [f1, f2, f3], false);
+ [f_unreachable, f_unreachable, f_unreachable]);
+ builder.addElementSegment(table1, 0, false, [f1, f2, f3]);
// Keep one slot in table2 uninitialized. We should trap if we call it.
builder.addElementSegment(table2, 1, false,
- [f_unreachable, f_unreachable, f4, f5], false);
+ [f_unreachable, f_unreachable, f4, f5]);
const instance = builder.instantiate();
@@ -126,7 +126,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
kExprCallIndirect, sig_index, t1])
.exportAs('call');
- builder.addElementSegment(t1, g, true, [f1.index], true);
+ builder.addElementSegment(t1, g, true, [f1.index]);
const base1 = 3;
const base2 = 5;
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index b07f09e108..58df978859 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -316,7 +316,7 @@ function js_div(a, b) { return (a / b) | 0; }
.exportAs("main");
builder.addImportedTable("z", "table", kTableSize, kTableSize);
- builder.addElementSegment(0, 1, false, [f2.index], true);
+ builder.addElementSegment(0, 1, false, [f2.index]);
var m2 = new WebAssembly.Module(builder.toBuffer());
@@ -586,7 +586,7 @@ function js_div(a, b) { return (a / b) | 0; }
builder1.setName('module_1');
builder1.addFunction('f', kSig_i_i).addBody([kExprGetLocal, 0]);
builder1.addImportedTable('z', 'table');
- builder1.addElementSegment(0, 0, false, [0], true);
+ builder1.addElementSegment(0, 0, false, [0]);
let module1 = new WebAssembly.Module(builder1.toBuffer());
let instance1 =
new WebAssembly.Instance(module1, {z: {table: instance0.exports.table}});
@@ -624,7 +624,7 @@ function js_div(a, b) { return (a / b) | 0; }
builder = new WasmModuleBuilder();
builder.addFunction('main', kSig_i_v).addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0]);
builder.addImportedTable('', 'table');
- builder.addElementSegment(0, 0, false, [0], true);
+ builder.addElementSegment(0, 0, false, [0]);
builder.addImportedMemory('', 'memory', 1);
@@ -812,6 +812,7 @@ function js_div(a, b) { return (a / b) | 0; }
let i2 = (() => {
let builder = new WasmModuleBuilder();
+ builder.addTable(kWasmAnyFunc, 4);
builder.addImport("q", "f2", kSig_i_v);
builder.addImport("q", "f1", kSig_i_v);
builder.addFunction("main", kSig_i_i)
@@ -867,6 +868,7 @@ function js_div(a, b) { return (a / b) | 0; }
let main = (() => {
let builder = new WasmModuleBuilder();
builder.addMemory(1, 1, false);
+ builder.addTable(kWasmAnyFunc, 4);
builder.addImport("q", "f1", kSig_i_v);
builder.addImport("q", "f2", kSig_i_v);
builder.addImport("q", "f3", kSig_i_v);
diff --git a/deps/v8/test/mjsunit/wasm/interpreter.js b/deps/v8/test/mjsunit/wasm/interpreter.js
index aa479ac0b0..970e71a646 100644
--- a/deps/v8/test/mjsunit/wasm/interpreter.js
+++ b/deps/v8/test/mjsunit/wasm/interpreter.js
@@ -537,7 +537,7 @@ function checkStack(stack, expected_lines) {
const builder1 = new WasmModuleBuilder();
builder1.addFunction('main', kSig_i_v).addBody([kExprUnreachable]);
builder1.addImportedTable('z', 'table');
- builder1.addElementSegment(0, 0, false, [0], true);
+ builder1.addElementSegment(0, 0, false, [0]);
const module1 = new WebAssembly.Module(builder1.toBuffer());
const instance1 =
new WebAssembly.Instance(module1, {z: {table: instance0.exports.table}});
diff --git a/deps/v8/test/mjsunit/wasm/js-api.js b/deps/v8/test/mjsunit/wasm/js-api.js
index 6b7db744d3..031f2e07ec 100644
--- a/deps/v8/test/mjsunit/wasm/js-api.js
+++ b/deps/v8/test/mjsunit/wasm/js-api.js
@@ -734,7 +734,7 @@ assertEq(tbl.length, 1);
assertEq(tbl.grow(1, 4), 1);
assertEq(tbl.length, 2);
assertEq(tbl.length, 2);
-assertThrows(() => tbl.grow(1), Error, /maximum table size exceeded/);
+assertThrows(() => tbl.grow(1), Error, /failed to grow table by \d+/);
assertThrows(
() => tbl.grow(Infinity), TypeError, /must be convertible to a valid number/);
assertThrows(
diff --git a/deps/v8/test/mjsunit/wasm/multi-table-element-section.js b/deps/v8/test/mjsunit/wasm/multi-table-element-section.js
index 0c12c98bdc..59a21efc18 100644
--- a/deps/v8/test/mjsunit/wasm/multi-table-element-section.js
+++ b/deps/v8/test/mjsunit/wasm/multi-table-element-section.js
@@ -6,71 +6,93 @@
load("test/mjsunit/wasm/wasm-module-builder.js");
-const value1 = 11;
-const value2 = 22;
-const value3 = 46;
-const value4 = 57;
-const value5 = 13;
-
-// The offsets for the initialization of tables. The segement for table2 should
-// overlap with the segment of table1, because table2 is actually the imported
-// table1.
-const offset1 = 2;
-const offset2 = offset1 + 1;
-const offset3 = 4;
-const offset4 = 1;
-
-const instance_for_import = (function() {
+(function TestInitMultipleTables() {
+ print(arguments.callee.name);
+
+ const value1 = 11;
+ const value2 = 22;
+ const value3 = 46;
+ const value4 = 57;
+ const value5 = 13;
+
+ // The offsets for the initialization of tables. The segement for table2 should
+ // overlap with the segment of table1, because table2 is actually the imported
+ // table1.
+ const offset1 = 2;
+ const offset2 = offset1 + 1;
+ const offset3 = 4;
+ const offset4 = 1;
+
+ const instance_for_import = (function () {
const builder_for_import = new WasmModuleBuilder();
const t1 = builder_for_import.addTable(kWasmAnyFunc, 15, 15)
- .exportAs("table").index;
+ .exportAs("table").index;
const f1 = builder_for_import.addFunction('f1', kSig_i_v)
- .addBody([kExprI32Const, value1]).index;
+ .addBody([kExprI32Const, value1]).index;
const f2 = builder_for_import.addFunction('f2', kSig_i_v)
- .addBody([kExprI32Const, value2]).index;
+ .addBody([kExprI32Const, value2]).index;
- builder_for_import.addElementSegment(t1, offset1, false, [f1, f2], false);
+ builder_for_import.addElementSegment(t1, offset1, false, [f1, f2]);
const instance_for_import = builder_for_import.instantiate();
const table1 = instance_for_import.exports.table;
assertEquals(value1, table1.get(offset1)());
assertEquals(value2, table1.get(offset1 + 1)());
return instance_for_import;
+ })();
+
+ const builder = new WasmModuleBuilder();
+
+ const t2 = builder.addImportedTable("exports", "table", 15, 15);
+ builder.addExportOfKind("table2", kExternalTable, t2);
+ const t3 = builder.addTable(kWasmAnyFunc, 10).exportAs("table3").index;
+ const t4 = builder.addTable(kWasmAnyFunc, 12).exportAs("table4").index;
+
+ const f3 = builder.addFunction('f3', kSig_i_v)
+ .addBody([kExprI32Const, value3]).index;
+ const f4 = builder.addFunction('f4', kSig_i_v)
+ .addBody([kExprI32Const, value4]).index;
+ const f5 = builder.addFunction('f5', kSig_i_v)
+ .addBody([kExprI32Const, value5]).index;
+
+
+ builder.addElementSegment(t2, offset2, false, [f3, f4]);
+ builder.addElementSegment(t3, offset3, false, [f5, f4]);
+ builder.addElementSegment(t4, offset4, false, [f3, f5]);
+ // Add one more overlapping offset
+ builder.addElementSegment(t4, offset4 + 1, false, [f4, f3]);
+
+ const instance = builder.instantiate(instance_for_import);
+ // table2 == table1
+ const table2 = instance.exports.table2;
+ const table3 = instance.exports.table3;
+ const table4 = instance.exports.table4;
+ // table1 == table2
+ assertEquals(value1, table2.get(offset1)());
+ assertEquals(value3, table2.get(offset2)());
+ assertEquals(value4, table2.get(offset2 + 1)());
+
+ assertEquals(value5, table3.get(offset3)());
+ assertEquals(value4, table3.get(offset3 + 1)());
+
+ assertEquals(value3, table4.get(offset4)());
+ assertEquals(value4, table4.get(offset4 + 1)());
+ assertEquals(value3, table4.get(offset4 + 2)());
})();
-const builder = new WasmModuleBuilder();
-
-const t2 = builder.addImportedTable("exports", "table", 15, 15);
-builder.addExportOfKind("table2", kExternalTable, t2);
-const t3 = builder.addTable(kWasmAnyFunc, 10).exportAs("table3").index;
-const t4 = builder.addTable(kWasmAnyFunc, 12).exportAs("table4").index;
-
-const f3 = builder.addFunction('f3', kSig_i_v)
- .addBody([kExprI32Const, value3]).index;
-const f4 = builder.addFunction('f4', kSig_i_v)
- .addBody([kExprI32Const, value4]).index;
-const f5 = builder.addFunction('f5', kSig_i_v)
- .addBody([kExprI32Const, value5]).index;
-
-
-builder.addElementSegment(t2, offset2, false, [f3, f4], false);
-builder.addElementSegment(t3, offset3, false, [f5, f4], false);
-builder.addElementSegment(t4, offset4, false, [f3, f5], false);
-// Add one more overlapping offset
-builder.addElementSegment(t4, offset4 + 1, false, [f4, f3], false);
-
-const instance = builder.instantiate(instance_for_import);
-// table2 == table1
-const table2 = instance.exports.table2;
-const table3 = instance.exports.table3;
-const table4 = instance.exports.table4;
-// table1 == table2
-assertEquals(value1, table2.get(offset1)());
-assertEquals(value3, table2.get(offset2)());
-assertEquals(value4, table2.get(offset2 + 1)());
-
-assertEquals(value5, table3.get(offset3)());
-assertEquals(value4, table3.get(offset3 + 1)());
-
-assertEquals(value3, table4.get(offset4)());
-assertEquals(value4, table4.get(offset4 + 1)());
-assertEquals(value3, table4.get(offset4 + 2)());
+(function TestAnyRefTableWithAnyFuncInit() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ const table = builder.addTable(kWasmAnyRef, 5).index;
+ builder.addExportOfKind("table", kExternalTable, table);
+ const f1 = builder.addFunction('f1', kSig_i_v)
+ .addBody([kExprI32Const, 11])
+ .exportFunc().index;
+ const f2 = builder.addFunction('f2', kSig_i_v)
+ .addBody([kExprI32Const, 22])
+ .exportFunc().index;
+
+ builder.addElementSegment(table, 1, false, [f1, f2]);
+ const instance = builder.instantiate();
+ assertEquals(instance.exports.table.get(1)(), 11);
+ assertEquals(instance.exports.table.get(2)(), 22);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/return-calls.js b/deps/v8/test/mjsunit/wasm/return-calls.js
index 55b295d7a1..22d2860df1 100644
--- a/deps/v8/test/mjsunit/wasm/return-calls.js
+++ b/deps/v8/test/mjsunit/wasm/return-calls.js
@@ -133,7 +133,9 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const sig_i_iii = builder.addType(kSig_i_iii);
let pick = builder.addImport("q", "pick", sig_i_iii);
- const tableIndex = 3; // Arbitrary location of import
+ builder.addTable(kWasmAnyFunc, 4);
+ // Arbitrary location in the table.
+ const tableIndex = 3;
builder.addElementSegment(0, tableIndex,false,[pick]);
diff --git a/deps/v8/test/mjsunit/wasm/streaming-api.js b/deps/v8/test/mjsunit/wasm/streaming-api.js
index e7ad3d6bfa..3decc1a70c 100644
--- a/deps/v8/test/mjsunit/wasm/streaming-api.js
+++ b/deps/v8/test/mjsunit/wasm/streaming-api.js
@@ -7,11 +7,11 @@
load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestCompileStreaming() {
- print(arguments.callee.name);
- let builder = new WasmModuleBuilder();
- builder.addFunction("main", kSig_i_i)
- .addBody([kExprGetLocal, 0])
- .exportAs("main");
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprGetLocal, 0])
+ .exportAs("main");
let bytes = builder.toBuffer();
assertPromiseResult(WebAssembly.compileStreaming(Promise.resolve(bytes)).then(
module => WebAssembly.instantiate(module)).then(
@@ -22,11 +22,11 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print(arguments.callee.name);
let builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_i_i)
- .addBody([kExprGetLocal, 0])
- .exportAs("main");
-let bytes = builder.toBuffer();
-assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes)).then(
- ({module, instance}) => assertEquals(5, instance.exports.main(5))));
+ .addBody([kExprGetLocal, 0])
+ .exportAs("main");
+ let bytes = builder.toBuffer();
+ assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes)).then(
+ ({module, instance}) => assertEquals(5, instance.exports.main(5))));
})();
(function TestCompileStreamingRejectedInputPromise() {
@@ -42,3 +42,26 @@ assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes)).the
assertUnreachable,
error => assertEquals(error, "myError"));
})();
+
+(function TestStreamingErrorMessage() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 0,
+ kExprF32Mul])
+ .exportAs("main");
+ let bytes = builder.toBuffer();
+ assertPromiseResult(WebAssembly.compileStreaming(Promise.resolve(bytes)),
+ assertUnreachable,
+ error => assertEquals("WebAssembly.compileStreaming(): Compiling " +
+ "function #0:\"main\" failed: f32.mul[1] expected " +
+ "type f32, found local.get of type i32 @+37",
+ error.message));
+ assertPromiseResult(WebAssembly.instantiateStreaming(Promise.resolve(bytes)),
+ assertUnreachable,
+ error => assertEquals("WebAssembly.instantiateStreaming(): Compiling " +
+ "function #0:\"main\" failed: f32.mul[1] expected " +
+ "type f32, found local.get of type i32 @+37",
+ error.message));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-access.js b/deps/v8/test/mjsunit/wasm/table-access.js
index 9a6f0ebc3a..3203b76d0b 100644
--- a/deps/v8/test/mjsunit/wasm/table-access.js
+++ b/deps/v8/test/mjsunit/wasm/table-access.js
@@ -117,8 +117,8 @@ const dummy_func = exports.set_table_func1;
const offset1 = 3;
const offset2 = 9;
- builder.addElementSegment(t1, offset1, false, [f1.index, f2.index], false);
- builder.addElementSegment(t2, offset2, false, [f3.index, f1.index], false);
+ builder.addElementSegment(t1, offset1, false, [f1.index, f2.index]);
+ builder.addElementSegment(t2, offset2, false, [f3.index, f1.index]);
const instance = builder.instantiate();
@@ -127,3 +127,29 @@ const dummy_func = exports.set_table_func1;
assertEquals(value3, instance.exports.get_t2(offset2)());
assertEquals(value1, instance.exports.get_t2(offset2 + 1)());
})();
+
+(function testRefFuncInTableIsCallable() {
+ print(arguments.callee.name);
+ const expected = 54;
+ const index = 3;
+ const builder = new WasmModuleBuilder();
+ const table_index = builder.addTable(kWasmAnyFunc, 15, 15).index;
+ const sig_index = builder.addType(kSig_i_v);
+ const function_index = builder.addFunction('hidden', sig_index)
+ .addBody([kExprI32Const, expected])
+ .index;
+
+ builder.addFunction('main', kSig_i_v)
+ .addBody([
+ kExprI32Const, index, // entry index
+ kExprRefFunc, function_index, // function reference
+ kExprSetTable, table_index, // --
+ kExprI32Const, index, // entry index
+ kExprCallIndirect, sig_index, table_index // --
+
+ ])
+ .exportFunc();
+
+ const instance = builder.instantiate();
+ assertEquals(expected, instance.exports.main());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-fill.js b/deps/v8/test/mjsunit/wasm/table-fill.js
new file mode 100644
index 0000000000..4f61eee4d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-fill.js
@@ -0,0 +1,200 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-anyref
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+function dummy_func(val) {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('dummy', kSig_i_v)
+ .addBody([kExprI32Const, val])
+ .exportAs('dummy');
+ return builder.instantiate().exports.dummy;
+}
+
+let kSig_v_iri = makeSig([kWasmI32, kWasmAnyRef, kWasmI32], []);
+let kSig_v_iai = makeSig([kWasmI32, kWasmAnyFunc, kWasmI32], []);
+let kSig_r_i = makeSig([kWasmI32], [kWasmAnyRef]);
+
+const builder = new WasmModuleBuilder();
+const size = 10;
+const maximum = size;
+const import_ref =
+ builder.addImportedTable('imp', 'table_ref', size, maximum, kWasmAnyRef);
+const import_func =
+ builder.addImportedTable('imp', 'table_func', size, maximum, kWasmAnyFunc);
+const internal_ref = builder.addTable(kWasmAnyRef, size, maximum).index;
+const internal_func = builder.addTable(kWasmAnyFunc, size, maximum).index;
+
+// Add fill and get functions for the anyref tables.
+for (index of [import_ref, internal_ref]) {
+ builder.addFunction(`fill${index}`, kSig_v_iri)
+ .addBody([
+ kExprGetLocal, 0, kExprGetLocal, 1, kExprGetLocal, 2, kNumericPrefix,
+ kExprTableFill, index
+ ])
+ .exportFunc();
+
+ builder.addFunction(`get${index}`, kSig_r_i)
+ .addBody([kExprGetLocal, 0, kExprGetTable, index])
+ .exportFunc();
+}
+
+// Add fill and call functions for the anyfunc tables.
+const sig_index = builder.addType(kSig_i_v);
+for (index of [import_func, internal_func]) {
+ builder.addFunction(`fill${index}`, kSig_v_iai)
+ .addBody([
+ kExprGetLocal, 0, kExprGetLocal, 1, kExprGetLocal, 2, kNumericPrefix,
+ kExprTableFill, index
+ ])
+ .exportFunc();
+
+ builder.addFunction(`call${index}`, kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprCallIndirect, sig_index, index])
+ .exportFunc();
+}
+
+const table_ref =
+ new WebAssembly.Table({element: 'anyref', initial: size, maximum: maximum});
+const table_func = new WebAssembly.Table(
+ {element: 'anyfunc', initial: size, maximum: maximum});
+
+const instance =
+ builder.instantiate({imp: {table_ref: table_ref, table_func: table_func}});
+
+function checkAnyRefTable(getter, start, count, value) {
+ for (i = 0; i < count; ++i) {
+ assertEquals(value, getter(start + i));
+ }
+}
+
+(function testAnyRefTableIsUninitialized() {
+ print(arguments.callee.name);
+
+ checkAnyRefTable(instance.exports[`get${import_ref}`], 0, size, null);
+ checkAnyRefTable(instance.exports[`get${internal_ref}`], 0, size, null);
+})();
+
+(function testAnyRefTableFill() {
+ print(arguments.callee.name);
+ // Fill table and check the content.
+ let start = 1;
+ let value = {foo: 23};
+ let count = 3;
+ instance.exports[`fill${import_ref}`](start, value, count);
+ checkAnyRefTable(instance.exports[`get${import_ref}`], start, count, value);
+ value = 'foo';
+ instance.exports[`fill${internal_ref}`](start, value, count);
+ checkAnyRefTable(instance.exports[`get${internal_ref}`], start, count, value);
+})();
+
+(function testAnyRefTableFillOOB() {
+ print(arguments.callee.name);
+ // Fill table out-of-bounds, check if the table got filled as much as
+ // possible.
+ let start = 7;
+ let value = {foo: 27};
+ // {maximum + 4} elements definitely don't fit into the table.
+ let count = maximum + 4;
+ assertTraps(
+ kTrapTableOutOfBounds,
+ () => instance.exports[`fill${import_ref}`](start, value, count));
+ checkAnyRefTable(
+ instance.exports[`get${import_ref}`], start, size - start, value);
+
+ value = 45;
+ assertTraps(
+ kTrapTableOutOfBounds,
+ () => instance.exports[`fill${internal_ref}`](start, value, count));
+ checkAnyRefTable(
+ instance.exports[`get${internal_ref}`], start, size - start, value);
+})();
+
+(function testAnyRefTableFillOOBCountZero() {
+ print(arguments.callee.name);
+ // Fill 0 elements at an oob position. This should trap.
+ let start = size + 32;
+ let value = 'bar';
+ assertTraps(
+ kTrapTableOutOfBounds,
+ () => instance.exports[`fill${import_ref}`](start, value, 0));
+ assertTraps(
+ kTrapTableOutOfBounds,
+ () => instance.exports[`fill${internal_ref}`](start, value, 0));
+})();
+
+function checkAnyFuncTable(call, start, count, value) {
+ for (i = 0; i < count; ++i) {
+ if (value) {
+ assertEquals(value, call(start + i));
+ } else {
+ assertTraps(kTrapFuncSigMismatch, () => call(start + i));
+ }
+ }
+}
+
+(function testAnyRefTableIsUninitialized() {
+ print(arguments.callee.name);
+ // Check that the table is uninitialized.
+ checkAnyFuncTable(instance.exports[`call${import_func}`], 0, size);
+ checkAnyFuncTable(instance.exports[`call${internal_func}`], 0, size);
+})();
+
+(function testAnyFuncTableFill() {
+ print(arguments.callee.name);
+ // Fill and check the result.
+ let start = 1;
+ let value = 44;
+ let count = 3;
+ instance.exports[`fill${import_func}`](start, dummy_func(value), count);
+ checkAnyFuncTable(
+ instance.exports[`call${import_func}`], start, count, value);
+ value = 21;
+ instance.exports[`fill${internal_func}`](start, dummy_func(value), count);
+ checkAnyFuncTable(
+ instance.exports[`call${internal_func}`], start, count, value);
+})();
+
+(function testAnyFuncTableFillOOB() {
+ print(arguments.callee.name);
+ // Fill table out-of-bounds, check if the table got filled as much as
+ // possible.
+ let start = 7;
+ let value = 38;
+ // {maximum + 4} elements definitely don't fit into the table.
+ let count = maximum + 4;
+ assertTraps(
+ kTrapTableOutOfBounds,
+ () => instance.exports[`fill${import_func}`](
+ start, dummy_func(value), count));
+ checkAnyFuncTable(
+ instance.exports[`call${import_func}`], start, size - start, value);
+
+ value = 46;
+ assertTraps(
+ kTrapTableOutOfBounds,
+ () => instance.exports[`fill${internal_func}`](
+ start, dummy_func(value), count));
+ checkAnyFuncTable(
+ instance.exports[`call${internal_func}`], start, size - start, value);
+})();
+
+(function testAnyFuncTableFillOOBCountZero() {
+ print(arguments.callee.name);
+ // Fill 0 elements at an oob position. This should trap.
+ let start = size + 32;
+ let value = dummy_func(33);
+ assertTraps(
+ kTrapTableOutOfBounds,
+ () => instance.exports[`fill${import_func}`](start, null, 0));
+ assertTraps(
+ kTrapTableOutOfBounds,
+ () => instance.exports[`fill${internal_func}`](start, null, 0));
+
+ // Check that table.fill at position `size` is still valid.
+ instance.exports[`fill${import_func}`](size, null, 0);
+ instance.exports[`fill${internal_func}`](size, null, 0);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-get.js b/deps/v8/test/mjsunit/wasm/table-get.js
index c159735cc5..9ec0667dbc 100644
--- a/deps/v8/test/mjsunit/wasm/table-get.js
+++ b/deps/v8/test/mjsunit/wasm/table-get.js
@@ -11,7 +11,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const f1 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 11]);
const f2 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 22]);
const offset = 3;
- builder.addElementSegment(0, offset, false, [f1.index, f2.index], false);
+ builder.addElementSegment(0, offset, false, [f1.index, f2.index]);
const instance = builder.instantiate();
@@ -27,7 +27,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const f2 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 22])
.exportAs("f2");
const offset = 3;
- builder.addElementSegment(0, offset, false, [f1.index, f2.index], false);
+ builder.addElementSegment(0, offset, false, [f1.index, f2.index]);
const instance = builder.instantiate();
@@ -42,8 +42,8 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const f1 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 11]);
const f2 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 22]);
const offset = 3;
- builder.addElementSegment(0, offset, false, [f1.index, f2.index], false);
- builder.addElementSegment(0, offset + 1, false, [f1.index, f2.index], false);
+ builder.addElementSegment(0, offset, false, [f1.index, f2.index]);
+ builder.addElementSegment(0, offset + 1, false, [f1.index, f2.index]);
const instance = builder.instantiate();
@@ -58,7 +58,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const f1 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 11]).exportAs("f1");
const f2 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 22]);
const offset = 3;
- builder.addElementSegment(0, offset, false, [f1.index, f1.index, f1.index], false);
+ builder.addElementSegment(0, offset, false, [f1.index, f1.index, f1.index]);
const instance = builder.instantiate();
@@ -78,7 +78,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const f1 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 11]);
const f2 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 22]);
const offset = 3;
- builder.addElementSegment(0, offset, false, [f1.index, f1.index, f1.index], false);
+ builder.addElementSegment(0, offset, false, [f1.index, f1.index, f1.index]);
const instance = builder.instantiate();
@@ -97,7 +97,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const f1 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 11]);
const f2 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 22]);
const offset = 3;
- builder.addElementSegment(0, offset, false, [f1.index, f1.index, f1.index], false);
+ builder.addElementSegment(0, offset, false, [f1.index, f1.index, f1.index]);
const instance = builder.instantiate();
assertEquals(null, instance.exports.table.get(offset - 1));
@@ -111,7 +111,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const f1 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 11]);
const f2 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 22]);
const offset = 3;
- builder.addElementSegment(0, offset, false, [f1.index, f1.index, f1.index], false);
+ builder.addElementSegment(0, offset, false, [f1.index, f1.index, f1.index]);
const instance = builder.instantiate();
assertThrows(() => instance.exports.table.get(size + 3), RangeError);
@@ -125,7 +125,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
const import1 = builder.addImport("q", "fun", kSig_i_ii);
const f1 = builder.addFunction('f', kSig_i_v).addBody([kExprI32Const, 11]);
const offset = 3;
- builder.addElementSegment(0, offset, false, [f1.index, import1], false);
+ builder.addElementSegment(0, offset, false, [f1.index, import1]);
const instance = builder.instantiate({q: {fun: () => 33}});
assertEquals(33, instance.exports.table.get(offset + 1)());
diff --git a/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
new file mode 100644
index 0000000000..7a7d916dea
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/table-grow-from-wasm.js
@@ -0,0 +1,240 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --experimental-wasm-anyref
+
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function dummy_func(val) {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("dummy", kSig_i_v)
+ .addBody([kExprI32Const, val])
+ .exportAs("dummy");
+ return builder.instantiate().exports.dummy;
+}
+
+let kSig_i_ri = makeSig([kWasmAnyRef, kWasmI32], [kWasmI32]);
+let kSig_r_i = makeSig([kWasmI32], [kWasmAnyRef]);
+let kSig_i_ai = makeSig([kWasmAnyFunc, kWasmI32], [kWasmI32]);
+
+function testGrowInternalAnyRefTable(table_index) {
+ print(arguments.callee.name, table_index);
+
+ const builder = new WasmModuleBuilder();
+ const initial_size = 5;
+ // Add 10 tables, we only test one.
+ for (let i = 0; i < 10; ++i) {
+ builder.addTable(kWasmAnyRef, initial_size).index;
+ }
+ builder.addFunction('grow', kSig_i_ri)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kNumericPrefix, kExprTableGrow, table_index])
+ .exportFunc();
+
+ builder.addFunction('size', kSig_i_v)
+ .addBody([kNumericPrefix, kExprTableSize, table_index])
+ .exportFunc();
+
+ builder.addFunction('get', kSig_r_i)
+ .addBody([kExprGetLocal, 0, kExprGetTable, table_index])
+ .exportFunc();
+
+ const instance = builder.instantiate();
+
+ let size = initial_size;
+ assertEquals(null, instance.exports.get(size - 2));
+
+ function growAndCheck(element, grow_by) {
+ assertEquals(size, instance.exports.size());
+ assertTraps(kTrapTableOutOfBounds, () => instance.exports.get(size));
+ assertEquals(size, instance.exports.grow(element, grow_by));
+ for (let i = 0; i < grow_by; ++i) {
+ assertEquals(element, instance.exports.get(size + i));
+ }
+ size += grow_by;
+ }
+ growAndCheck("Hello", 3);
+ growAndCheck(undefined, 4);
+ growAndCheck(4, 2);
+ growAndCheck({Hello: "World"}, 3);
+ growAndCheck(null, 2);
+}
+
+testGrowInternalAnyRefTable(0);
+testGrowInternalAnyRefTable(7);
+testGrowInternalAnyRefTable(9);
+
+function testGrowInternalAnyFuncTable(table_index) {
+ print(arguments.callee.name, table_index);
+
+ const builder = new WasmModuleBuilder();
+ let size = 5;
+ for (let i = 0; i < 10; ++i) {
+ builder.addTable(kWasmAnyFunc, size).index;
+ }
+ builder.addFunction('grow', kSig_i_ai)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kNumericPrefix, kExprTableGrow, table_index])
+ .exportFunc();
+
+ builder.addFunction('size', kSig_i_v)
+ .addBody([kNumericPrefix, kExprTableSize, table_index])
+ .exportFunc();
+
+ const sig_index = builder.addType(kSig_i_v);
+ builder.addFunction('call', kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprCallIndirect, sig_index, table_index])
+ .exportFunc();
+
+ const instance = builder.instantiate();
+ assertTraps(kTrapFuncSigMismatch, () => instance.exports.call(size - 2));
+ function growAndCheck(element, grow_by) {
+ assertEquals(size, instance.exports.size());
+ assertTraps(kTrapFuncInvalid, () => instance.exports.call(size));
+ assertEquals(size, instance.exports.grow(dummy_func(element), grow_by));
+ for (let i = 0; i < grow_by; ++i) {
+ assertEquals(element, instance.exports.call(size + i));
+ }
+ size += grow_by;
+ }
+ growAndCheck(56, 3);
+ growAndCheck(12, 4);
+
+ assertEquals(size, instance.exports.grow(null, 1));
+ assertTraps(kTrapFuncSigMismatch, () => instance.exports.call(size));
+}
+
+testGrowInternalAnyFuncTable(0);
+testGrowInternalAnyFuncTable(7);
+testGrowInternalAnyFuncTable(9);
+
+(function testGrowImportedTable() {
+ print(arguments.callee.name);
+
+ let size = 3;
+ const builder = new WasmModuleBuilder();
+ const table_index = builder.addImportedTable("imp", "table", size, undefined, kWasmAnyRef);
+ builder.addFunction('grow', kSig_i_ri)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kNumericPrefix, kExprTableGrow, table_index])
+ .exportFunc();
+
+ builder.addFunction('size', kSig_i_v)
+ .addBody([kNumericPrefix, kExprTableSize, table_index])
+ .exportFunc();
+
+ const table = new WebAssembly.Table({element: "anyref", initial: size});
+
+ const instance = builder.instantiate({imp: {table: table}});
+ assertEquals(null, table.get(size - 2));
+
+ function growAndCheck(element, grow_by) {
+ assertEquals(size, instance.exports.size());
+ assertEquals(size, instance.exports.grow(element, grow_by));
+ for (let i = 0; i < grow_by; ++i) {
+ assertEquals(element, table.get(size + i));
+ }
+ size += grow_by;
+ }
+ growAndCheck("Hello", 3);
+ growAndCheck(undefined, 4);
+ growAndCheck(4, 2);
+ growAndCheck({ Hello: "World" }, 3);
+ growAndCheck(null, 2);
+})();
+
+(function testGrowTableOutOfBounds() {
+ print(arguments.callee.name);
+
+ const initial = 3;
+ const maximum = 10;
+ const max_delta = maximum - initial;
+ const invalid_delta = max_delta + 1;
+
+ const builder = new WasmModuleBuilder();
+ const import_ref = builder.addImportedTable(
+ "imp", "table_ref", initial, maximum, kWasmAnyRef);
+ const import_func = builder.addImportedTable(
+ "imp", "table_func", initial, maximum, kWasmAnyFunc);
+ const internal_ref = builder.addTable(kWasmAnyRef, initial, maximum).index;
+ const internal_func = builder.addTable(kWasmAnyFunc, initial, maximum).index;
+
+ builder.addFunction('grow_imported_ref', kSig_i_ri)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kNumericPrefix, kExprTableGrow, import_ref])
+ .exportFunc();
+
+ builder.addFunction('grow_imported_func', kSig_i_ai)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kNumericPrefix, kExprTableGrow, import_func])
+ .exportFunc();
+
+ builder.addFunction('grow_internal_ref', kSig_i_ri)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kNumericPrefix, kExprTableGrow, internal_ref])
+ .exportFunc();
+
+ builder.addFunction('grow_internal_func', kSig_i_ai)
+ .addBody([kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kNumericPrefix, kExprTableGrow, internal_func])
+ .exportFunc();
+
+ builder.addFunction('size_imported_ref', kSig_i_v)
+ .addBody([kNumericPrefix, kExprTableSize, import_ref])
+ .exportFunc();
+
+ builder.addFunction('size_imported_func', kSig_i_v)
+ .addBody([kNumericPrefix, kExprTableSize, import_func])
+ .exportFunc();
+
+ builder.addFunction('size_internal_ref', kSig_i_v)
+ .addBody([kNumericPrefix, kExprTableSize, internal_ref])
+ .exportFunc();
+
+ builder.addFunction('size_internal_func', kSig_i_v)
+ .addBody([kNumericPrefix, kExprTableSize, internal_func])
+ .exportFunc();
+
+ const table_ref = new WebAssembly.Table(
+ { element: "anyref", initial: initial, maximum: maximum });
+ const table_func = new WebAssembly.Table(
+ {element: "anyfunc", initial: initial, maximum: maximum});
+
+ const instance = builder.instantiate(
+ {imp: {table_ref: table_ref, table_func: table_func}});
+
+ const ref = { foo: "bar" };
+ const func = dummy_func(17);
+
+ // First check that growing out-of-bounds is not possible.
+ assertEquals(-1, instance.exports.grow_imported_ref(ref, invalid_delta));
+ assertEquals(initial, table_ref.length);
+ assertEquals(initial, instance.exports.size_imported_ref());
+ assertEquals(-1, instance.exports.grow_imported_func(func, invalid_delta));
+ assertEquals(initial, table_func.length);
+ assertEquals(initial, instance.exports.size_imported_func());
+ assertEquals(-1, instance.exports.grow_internal_ref(ref, invalid_delta));
+ assertEquals(initial, instance.exports.size_internal_ref());
+ assertEquals(-1, instance.exports.grow_internal_func(func, invalid_delta));
+ assertEquals(initial, instance.exports.size_internal_func());
+
+ // Check that we can grow to the maximum size.
+ assertEquals(initial, instance.exports.grow_imported_ref(ref, max_delta));
+ assertEquals(maximum, table_ref.length);
+ assertEquals(maximum, instance.exports.size_imported_ref());
+ assertEquals(initial, instance.exports.grow_imported_func(func, max_delta));
+ assertEquals(maximum, table_func.length);
+ assertEquals(maximum, instance.exports.size_imported_func());
+ assertEquals(initial, instance.exports.grow_internal_ref(ref, max_delta));
+ assertEquals(maximum, instance.exports.size_internal_ref());
+ assertEquals(initial, instance.exports.grow_internal_func(func, max_delta));
+ assertEquals(maximum, instance.exports.size_internal_func());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/table-grow.js b/deps/v8/test/mjsunit/wasm/table-grow.js
index 7defd42c2e..a8508b4bdd 100644
--- a/deps/v8/test/mjsunit/wasm/table-grow.js
+++ b/deps/v8/test/mjsunit/wasm/table-grow.js
@@ -179,8 +179,6 @@ let id = (() => { // identity exported function
let t = builder.addImport("q", "exp_ten", sig_i_v);
builder.setTableBounds(7, 35);
- // builder.addElementSegment(0, g1, true,
- // [funcs.mul.index, funcs.add.index, funcs.sub.index]);
builder.addElementSegment(0, g1, true, [a, i, t]);
builder.addExportOfKind("table", kExternalTable, 0);
@@ -266,7 +264,7 @@ let id = (() => { // identity exported function
kExprGetLocal, 0,
kExprCallIndirect, index_i_ii, kTableZero])
.exportAs("main");
- builder.addElementSegment(0, 0, false, [0], true);
+ builder.addElementSegment(0, 0, false, [0]);
return new WebAssembly.Module(builder.toBuffer());
}
diff --git a/deps/v8/test/mjsunit/wasm/type-reflection.js b/deps/v8/test/mjsunit/wasm/type-reflection.js
index 2e992edeb3..77a58bc261 100644
--- a/deps/v8/test/mjsunit/wasm/type-reflection.js
+++ b/deps/v8/test/mjsunit/wasm/type-reflection.js
@@ -7,7 +7,7 @@
load('test/mjsunit/wasm/wasm-module-builder.js');
(function TestInvalidArgumentToType() {
- ["abc", 123, {}].forEach(function(invalidInput) {
+ ["abc", 123, {}, _ => 0].forEach(function(invalidInput) {
assertThrows(
() => WebAssembly.Memory.type(invalidInput), TypeError,
"WebAssembly.Memory.type(): Argument 0 must be a WebAssembly.Memory");
@@ -17,6 +17,9 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
assertThrows(
() => WebAssembly.Global.type(invalidInput), TypeError,
"WebAssembly.Global.type(): Argument 0 must be a WebAssembly.Global");
+ assertThrows(
+ () => WebAssembly.Function.type(invalidInput), TypeError,
+ "WebAssembly.Function.type(): Argument 0 must be a WebAssembly.Function");
});
assertThrows(
@@ -34,6 +37,11 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
() => WebAssembly.Global.type(
new WebAssembly.Memory({initial:1})), TypeError,
"WebAssembly.Global.type(): Argument 0 must be a WebAssembly.Global");
+
+ assertThrows(
+ () => WebAssembly.Function.type(
+ new WebAssembly.Memory({initial:1})), TypeError,
+ "WebAssembly.Function.type(): Argument 0 must be a WebAssembly.Function");
})();
(function TestMemoryType() {
@@ -157,3 +165,93 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
assertEquals('anyfunc', type.element);
assertEquals(3, Object.getOwnPropertyNames(type).length);
})();
+
+(function TestFunctionConstructor() {
+ let toolong = new Array(1000 + 1);
+ let desc = Object.getOwnPropertyDescriptor(WebAssembly, 'Function');
+ assertEquals(typeof desc.value, 'function');
+ assertTrue(desc.writable);
+ assertFalse(desc.enumerable);
+ assertTrue(desc.configurable);
+ // TODO(7742): The length should probably be 2 instead.
+ assertEquals(WebAssembly.Function.length, 1);
+ assertEquals(WebAssembly.Function.name, 'Function');
+ assertThrows(
+ () => WebAssembly.Function(), TypeError, /must be invoked with 'new'/);
+ assertThrows(
+ () => new WebAssembly.Function(), TypeError,
+ /Argument 0 must be a function type/);
+ assertThrows(
+ () => new WebAssembly.Function({}), TypeError,
+ /Argument 0 must be a function type with 'parameters'/);
+ assertThrows(
+ () => new WebAssembly.Function({parameters:[]}), TypeError,
+ /Argument 0 must be a function type with 'results'/);
+ assertThrows(
+ () => new WebAssembly.Function({parameters:['foo'], results:[]}), TypeError,
+ /Argument 0 parameter type at index #0 must be a value type/);
+ assertThrows(
+ () => new WebAssembly.Function({parameters:[], results:['foo']}), TypeError,
+ /Argument 0 result type at index #0 must be a value type/);
+ assertThrows(
+ () => new WebAssembly.Function({parameters:toolong, results:[]}), TypeError,
+ /Argument 0 contains too many parameters/);
+ assertThrows(
+ () => new WebAssembly.Function({parameters:[], results:toolong}), TypeError,
+ /Argument 0 contains too many results/);
+ assertThrows(
+ () => new WebAssembly.Function({parameters:[], results:[]}), TypeError,
+ /Argument 1 must be a function/);
+ assertThrows(
+ () => new WebAssembly.Function({parameters:[], results:[]}, {}), TypeError,
+ /Argument 1 must be a function/);
+ assertDoesNotThrow(
+ () => new WebAssembly.Function({parameters:[], results:[]}, _ => 0));
+})();
+
+(function TestFunctionConstructedFunction() {
+ let fun = new WebAssembly.Function({parameters:[], results:[]}, _ => 0);
+ assertTrue(fun instanceof WebAssembly.Function);
+ assertTrue(fun instanceof Function);
+ assertTrue(fun instanceof Object);
+ assertSame(fun.__proto__, WebAssembly.Function.prototype);
+ assertSame(fun.__proto__.__proto__, Function.prototype);
+ assertSame(fun.__proto__.__proto__.__proto__, Object.prototype);
+ assertSame(fun.constructor, WebAssembly.Function);
+ assertEquals(typeof fun, 'function');
+ // TODO(7742): Enable once it is callable.
+ // assertDoesNotThrow(() => fun());
+})();
+
+(function TestFunctionExportedFunction() {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("fun", kSig_v_v).addBody([]).exportFunc();
+ let instance = builder.instantiate();
+ let fun = instance.exports.fun;
+ assertTrue(fun instanceof WebAssembly.Function);
+ assertTrue(fun instanceof Function);
+ assertTrue(fun instanceof Object);
+ assertSame(fun.__proto__, WebAssembly.Function.prototype);
+ assertSame(fun.__proto__.__proto__, Function.prototype);
+ assertSame(fun.__proto__.__proto__.__proto__, Object.prototype);
+ assertSame(fun.constructor, WebAssembly.Function);
+ assertEquals(typeof fun, 'function');
+ assertDoesNotThrow(() => fun());
+})();
+
+(function TestFunctionTypeOfExportedFunction() {
+ let testcases = [
+ [kSig_v_v, {parameters:[], results:[]}],
+ [kSig_v_i, {parameters:["i32"], results:[]}],
+ [kSig_i_l, {parameters:["i64"], results:["i32"]}],
+ [kSig_v_ddi, {parameters:["f64", "f64", "i32"], results:[]}],
+ [kSig_f_f, {parameters:["f32"], results:["f32"]}],
+ ];
+ testcases.forEach(function([sig, expected]) {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("fun", sig).addBody([kExprUnreachable]).exportFunc();
+ let instance = builder.instantiate();
+ let type = WebAssembly.Function.type(instance.exports.fun);
+ assertEquals(expected, type)
+ });
+})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 82420d6692..3f2f80ee2f 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -150,6 +150,7 @@ let kSig_f_d = makeSig([kWasmF64], [kWasmF32]);
let kSig_d_d = makeSig([kWasmF64], [kWasmF64]);
let kSig_r_r = makeSig([kWasmAnyRef], [kWasmAnyRef]);
let kSig_a_a = makeSig([kWasmAnyFunc], [kWasmAnyFunc]);
+let kSig_e_e = makeSig([kWasmExceptRef], [kWasmExceptRef]);
let kSig_i_r = makeSig([kWasmAnyRef], [kWasmI32]);
let kSig_v_r = makeSig([kWasmAnyRef], []);
let kSig_v_a = makeSig([kWasmAnyFunc], []);
@@ -389,6 +390,9 @@ let kExprMemoryFill = 0x0b;
let kExprTableInit = 0x0c;
let kExprElemDrop = 0x0d;
let kExprTableCopy = 0x0e;
+let kExprTableGrow = 0x0f;
+let kExprTableSize = 0x10;
+let kExprTableFill = 0x11;
// Atomic opcodes.
let kExprAtomicNotify = 0x00;
@@ -466,6 +470,7 @@ let kExprF32x4Min = 0x9e;
let kCompilationHintStrategyDefault = 0x00;
let kCompilationHintStrategyLazy = 0x01;
let kCompilationHintStrategyEager = 0x02;
+let kCompilationHintStrategyLazyBaselineEagerTopTier = 0x03;
let kCompilationHintTierDefault = 0x00;
let kCompilationHintTierInterpreter = 0x01;
let kCompilationHintTierBaseline = 0x02;
@@ -640,8 +645,8 @@ class WasmFunctionBuilder {
return this;
}
- giveCompilationHint(strategy, baselineTier, topTier) {
- this.module.giveCompilationHint(strategy, baselineTier, topTier, this.index);
+ setCompilationHint(strategy, baselineTier, topTier) {
+ this.module.setCompilationHint(strategy, baselineTier, topTier, this.index);
return this;
}
@@ -841,12 +846,12 @@ class WasmModuleBuilder {
return this;
}
- addImportedTable(module, name, initial, maximum) {
+ addImportedTable(module, name, initial, maximum, type) {
if (this.tables.length != 0) {
throw new Error('Imported tables must be declared before local ones');
}
let o = {module: module, name: name, kind: kExternalTable, initial: initial,
- maximum: maximum};
+ maximum: maximum, type: type || kWasmAnyFunctionTypeForm};
this.imports.push(o);
return this.num_imported_tables++;
}
@@ -871,7 +876,7 @@ class WasmModuleBuilder {
return this;
}
- giveCompilationHint(strategy, baselineTier, topTier, index) {
+ setCompilationHint(strategy, baselineTier, topTier, index) {
this.compilation_hints[index] = {strategy: strategy, baselineTier:
baselineTier, topTier: topTier};
return this;
@@ -892,28 +897,9 @@ class WasmModuleBuilder {
this.exports.push({name: name, kind: kExternalMemory, index: 0});
}
- addElementSegment(table, base, is_global, array, is_import = false) {
- if (this.tables.length + this.num_imported_tables == 0) {
- this.addTable(kWasmAnyFunc, 0);
- }
+ addElementSegment(table, base, is_global, array) {
this.element_segments.push({table: table, base: base, is_global: is_global,
array: array, is_active: true});
-
- // As a testing convenience, update the table length when adding an element
- // segment. If the table is imported, we can't do this because we don't
- // know how long the table actually is. If |is_global| is true, then the
- // base is a global index, instead of an integer offset, so we can't update
- // the table then either.
- if (!(is_import || is_global)) {
- var length = base + array.length;
- if (length > this.tables[0].initial_size) {
- this.tables[0].initial_size = length;
- }
- if (this.tables[0].has_max &&
- length > this.tables[0].max_size) {
- this.tables[0].max_size = length;
- }
- }
return this;
}
@@ -930,7 +916,15 @@ class WasmModuleBuilder {
if (this.tables.length == 0) {
this.addTable(kWasmAnyFunc, 0);
}
- return this.addElementSegment(0, this.tables[0].initial_size, false, array);
+ // Adjust the table to the correct size.
+ let table = this.tables[0];
+ const base = table.initial_size;
+ const table_size = base + array.length;
+ table.initial_size = table_size;
+ if (table.has_max && table_size > table.max_size) {
+ table.max_size = table_size;
+ }
+ return this.addElementSegment(0, base, false, array);
}
setTableBounds(min, max = undefined) {
@@ -997,7 +991,7 @@ class WasmModuleBuilder {
section.emit_u32v(imp.initial); // initial
if (has_max) section.emit_u32v(imp.maximum); // maximum
} else if (imp.kind == kExternalTable) {
- section.emit_u8(kWasmAnyFunctionTypeForm);
+ section.emit_u8(imp.type);
var has_max = (typeof imp.maximum) != "undefined";
section.emit_u8(has_max ? 1 : 0); // flags
section.emit_u32v(imp.initial); // initial
@@ -1084,8 +1078,15 @@ class WasmModuleBuilder {
f64_view[0] = global.init;
section.emit_bytes(f64_bytes_view);
break;
- case kWasmAnyRef:
case kWasmAnyFunc:
+ case kWasmAnyRef:
+ if (global.function_index !== undefined) {
+ section.emit_u8(kExprRefFunc);
+ section.emit_u32v(global.function_index);
+ } else {
+ section.emit_u8(kExprRefNull);
+ }
+ break;
case kWasmExceptRef:
section.emit_u8(kExprRefNull);
break;
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index 0cd6b7d7f6..611238d951 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -7,11 +7,12 @@
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
-#include "src/frames.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
+#include "src/heap/read-only-heap.h"
#include "src/heap/spaces.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
@@ -41,29 +42,60 @@ class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
void Free(void* p, size_t) override {}
};
+static void DumpKnownMap(i::Heap* heap, const char* space_name,
+ i::HeapObject object) {
#define RO_ROOT_LIST_CASE(type, name, CamelName) \
- if (n == NULL && o == roots.name()) n = #CamelName;
+ if (root_name == nullptr && object == roots.name()) root_name = #CamelName;
#define MUTABLE_ROOT_LIST_CASE(type, name, CamelName) \
- if (n == NULL && o == space->heap()->name()) n = #CamelName;
-static void DumpMaps(i::PagedSpace* space) {
- i::HeapObjectIterator it(space);
- i::ReadOnlyRoots roots(space->heap());
- for (i::HeapObject o = it.Next(); !o.is_null(); o = it.Next()) {
- if (!o->IsMap()) continue;
- i::Map m = i::Map::cast(o);
- const char* n = nullptr;
- intptr_t p = static_cast<intptr_t>(m.ptr()) & (i::Page::kPageSize - 1);
- int t = m->instance_type();
- READ_ONLY_ROOT_LIST(RO_ROOT_LIST_CASE)
- MUTABLE_ROOT_LIST(MUTABLE_ROOT_LIST_CASE)
- if (n == nullptr) continue;
- const char* sname = space->name();
- i::PrintF(" (\"%s\", 0x%05" V8PRIxPTR "): (%d, \"%s\"),\n", sname, p, t,
- n);
- }
-}
+ if (root_name == nullptr && object == heap->name()) root_name = #CamelName;
+
+ i::ReadOnlyRoots roots(heap);
+ const char* root_name = nullptr;
+ i::Map map = i::Map::cast(object);
+ intptr_t root_ptr =
+ static_cast<intptr_t>(map.ptr()) & (i::Page::kPageSize - 1);
+
+ READ_ONLY_ROOT_LIST(RO_ROOT_LIST_CASE)
+ MUTABLE_ROOT_LIST(MUTABLE_ROOT_LIST_CASE)
+
+ if (root_name == nullptr) return;
+ i::PrintF(" (\"%s\", 0x%05" V8PRIxPTR "): (%d, \"%s\"),\n", space_name,
+ root_ptr, map.instance_type(), root_name);
+
#undef MUTABLE_ROOT_LIST_CASE
#undef RO_ROOT_LIST_CASE
+}
+
+static void DumpKnownObject(i::Heap* heap, const char* space_name,
+ i::HeapObject object) {
+#define RO_ROOT_LIST_CASE(type, name, CamelName) \
+ if (root_name == nullptr && object == roots.name()) { \
+ root_name = #CamelName; \
+ root_index = i::RootIndex::k##CamelName; \
+ }
+#define ROOT_LIST_CASE(type, name, CamelName) \
+ if (root_name == nullptr && object == heap->name()) { \
+ root_name = #CamelName; \
+ root_index = i::RootIndex::k##CamelName; \
+ }
+
+ i::ReadOnlyRoots roots(heap);
+ const char* root_name = nullptr;
+ i::RootIndex root_index = i::RootIndex::kFirstSmiRoot;
+ intptr_t root_ptr = object.ptr() & (i::Page::kPageSize - 1);
+
+ STRONG_READ_ONLY_ROOT_LIST(RO_ROOT_LIST_CASE)
+ MUTABLE_ROOT_LIST(ROOT_LIST_CASE)
+
+ if (root_name == nullptr) return;
+ if (!i::RootsTable::IsImmortalImmovable(root_index)) return;
+
+ i::PrintF(" (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", space_name, root_ptr,
+ root_name);
+
+#undef ROOT_LIST_CASE
+#undef RO_ROOT_LIST_CASE
+}
static int DumpHeapConstants(const char* argv0) {
// Start up V8.
@@ -78,7 +110,6 @@ static int DumpHeapConstants(const char* argv0) {
{
Isolate::Scope scope(isolate);
i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
- i::ReadOnlyRoots roots(heap);
i::PrintF("%s", kHeader);
#define DUMP_TYPE(T) i::PrintF(" %d: \"%s\",\n", i::T, #T);
i::PrintF("INSTANCE_TYPES = {\n");
@@ -86,49 +117,50 @@ static int DumpHeapConstants(const char* argv0) {
i::PrintF("}\n");
#undef DUMP_TYPE
- // Dump the KNOWN_MAP table to the console.
- i::PrintF("\n# List of known V8 maps.\n");
- i::PrintF("KNOWN_MAPS = {\n");
- DumpMaps(heap->read_only_space());
- DumpMaps(heap->map_space());
- i::PrintF("}\n");
+ {
+ // Dump the KNOWN_MAP table to the console.
+ i::PrintF("\n# List of known V8 maps.\n");
+ i::PrintF("KNOWN_MAPS = {\n");
+ i::ReadOnlyHeapIterator ro_iterator(heap->read_only_heap());
+ for (i::HeapObject object = ro_iterator.Next(); !object.is_null();
+ object = ro_iterator.Next()) {
+ if (!object.IsMap()) continue;
+ DumpKnownMap(heap, i::Heap::GetSpaceName(i::RO_SPACE), object);
+ }
+ i::HeapObjectIterator iterator(heap->map_space());
+ for (i::HeapObject object = iterator.Next(); !object.is_null();
+ object = iterator.Next()) {
+ if (!object.IsMap()) continue;
+ DumpKnownMap(heap, i::Heap::GetSpaceName(i::MAP_SPACE), object);
+ }
+ i::PrintF("}\n");
+ }
- // Dump the KNOWN_OBJECTS table to the console.
- i::PrintF("\n# List of known V8 objects.\n");
-#define RO_ROOT_LIST_CASE(type, name, CamelName) \
- if (n == NULL && o == roots.name()) { \
- n = #CamelName; \
- i = i::RootIndex::k##CamelName; \
- }
-#define ROOT_LIST_CASE(type, name, CamelName) \
- if (n == NULL && o == heap->name()) { \
- n = #CamelName; \
- i = i::RootIndex::k##CamelName; \
- }
- i::PagedSpaces spit(heap, i::PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
- i::PrintF("KNOWN_OBJECTS = {\n");
- for (i::PagedSpace* s = spit.next(); s != nullptr; s = spit.next()) {
- i::HeapObjectIterator it(s);
- // Code objects are generally platform-dependent.
- if (s->identity() == i::CODE_SPACE || s->identity() == i::MAP_SPACE)
- continue;
- const char* sname = s->name();
- for (i::HeapObject o = it.Next(); !o.is_null(); o = it.Next()) {
- // Skip maps in RO_SPACE since they will be reported elsewhere.
- if (o->IsMap()) continue;
- const char* n = nullptr;
- i::RootIndex i = i::RootIndex::kFirstSmiRoot;
- intptr_t p = o.ptr() & (i::Page::kPageSize - 1);
- STRONG_READ_ONLY_ROOT_LIST(RO_ROOT_LIST_CASE)
- MUTABLE_ROOT_LIST(ROOT_LIST_CASE)
- if (n == nullptr) continue;
- if (!i::RootsTable::IsImmortalImmovable(i)) continue;
- i::PrintF(" (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", sname, p, n);
+ {
+ // Dump the KNOWN_OBJECTS table to the console.
+ i::PrintF("\n# List of known V8 objects.\n");
+ i::PrintF("KNOWN_OBJECTS = {\n");
+ i::ReadOnlyHeapIterator ro_iterator(heap->read_only_heap());
+ for (i::HeapObject object = ro_iterator.Next(); !object.is_null();
+ object = ro_iterator.Next()) {
+ // Skip read-only heap maps, they will be reported elsewhere.
+ if (object.IsMap()) continue;
+ DumpKnownObject(heap, i::Heap::GetSpaceName(i::RO_SPACE), object);
+ }
+
+ i::PagedSpaces spit(heap);
+ for (i::PagedSpace* s = spit.next(); s != nullptr; s = spit.next()) {
+ i::HeapObjectIterator it(s);
+ // Code objects are generally platform-dependent.
+ if (s->identity() == i::CODE_SPACE || s->identity() == i::MAP_SPACE)
+ continue;
+ const char* sname = s->name();
+ for (i::HeapObject o = it.Next(); !o.is_null(); o = it.Next()) {
+ DumpKnownObject(heap, sname, o);
+ }
}
+ i::PrintF("}\n");
}
- i::PrintF("}\n");
-#undef ROOT_LIST_CASE
-#undef RO_ROOT_LIST_CASE
// Dump frame markers
i::PrintF("\n# List of known V8 Frame Markers.\n");
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 43728649b8..216d962ff3 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -150,7 +150,7 @@
'js1_2/function/Number': [SKIP],
# TODO(2018): Might timeout in debug mode.
- 'js1_5/GC/regress-203278-2': [PASS, ['mode == debug', NO_VARIANTS, FAIL]],
+ 'js1_5/GC/regress-203278-2': [PASS, SLOW, ['mode == debug', NO_VARIANTS, FAIL]],
# These tests use invalid LHS expressions in assignments.
'js1_5/Regress/regress-319391': [SKIP],
@@ -163,6 +163,13 @@
# https://crbug.com/v8/8120
'ecma_3/Array/regress-322135-04': [SKIP],
+ # These tests try to sort very large arrays. Array#sort pre-processing does
+ # not support huge sparse Arrays, so these tests run a very long time.
+ # https://crbug.com/v8/8714
+ 'js1_5/Array/regress-330812': [SKIP],
+ 'js1_5/Regress/regress-422348': [SKIP],
+ 'js1_5/Array/regress-157652': [SKIP],
+
##################### SLOW TESTS #####################
# Compiles a long chain of && or || operations, can time out under slower
@@ -202,18 +209,22 @@
# Slow with arm64 simulator in debug.
'ecma_3/Statements/regress-302439': [PASS, ['mode == debug', SLOW]],
+ # More slow tests.
+ 'ecma/Date/15.9.5.9': [PASS, SLOW],
+ 'ecma/Expressions/11.7.2': [PASS, SLOW],
+
##################### FLAKY TESTS #####################
# These tests time out in debug mode but pass in product mode
- 'js1_5/Regress/regress-360969-05': [PASS, ['mode == debug', NO_VARIANTS]],
- 'js1_5/Regress/regress-360969-06': [PASS, ['mode == debug', NO_VARIANTS]],
+ 'js1_5/Regress/regress-360969-05': [PASS, SLOW, ['mode == debug', NO_VARIANTS]],
+ 'js1_5/Regress/regress-360969-06': [PASS, SLOW, ['mode == debug', NO_VARIANTS]],
'js1_5/extensions/regress-365527': [PASS, SLOW, ['mode == debug', NO_VARIANTS]],
'js1_5/Regress/regress-280769-3': [PASS, ['mode == debug', FAIL]],
'js1_5/Regress/regress-203278-1': [PASS, ['mode == debug', FAIL]],
'js1_5/Regress/regress-244470': [PASS, ['mode == debug', FAIL]],
'ecma_3/RegExp/regress-209067': [PASS, ['mode == debug', FAIL]],
- 'js1_5/GC/regress-278725': [PASS, ['mode == debug', FAIL]],
+ 'js1_5/GC/regress-278725': [PASS, SLOW, ['mode == debug', FAIL]],
# http://b/issue?id=1206983
'js1_5/Regress/regress-367561-03': [PASS, ['mode == debug', FAIL], NO_VARIANTS],
'ecma/FunctionObjects/15.3.1.1-3': [PASS, FAIL, ['mode == debug', NO_VARIANTS]],
@@ -505,7 +516,6 @@
'ecma_3/extensions/regress-274152': [FAIL_OK],
'js1_5/Regress/regress-372364': [FAIL_OK],
'js1_5/Regress/regress-420919': [FAIL_OK],
- 'js1_5/Regress/regress-422348': [FAIL_OK],
'js1_5/Regress/regress-410852': [FAIL_OK],
'ecma_3/RegExp/regress-375715-04': [FAIL_OK],
'js1_5/decompilation/regress-456964-01': [FAIL_OK],
@@ -708,7 +718,7 @@
# This test seems designed to fail (it produces a 700Mbyte string).
# We fail on out of memory. The important thing is not to crash.
- 'js1_5/Regress/regress-303213': [FAIL, ['mode == debug', NO_VARIANTS]],
+ 'js1_5/Regress/regress-303213': [FAIL, SLOW, ['mode == debug', NO_VARIANTS]],
# This test fails since we now throw in String.prototype.match when apply
# is given null or undefined as this argument (and so does firefox nightly).
@@ -1033,6 +1043,14 @@
'ecma_3/RegExp/regress-85721': [SKIP],
}], # tsan
+##############################################################################
+['variant == stress', {
+ # Slow tests.
+ 'js1_5/Regress/regress-360969-05': [SKIP],
+ 'js1_5/Regress/regress-360969-06': [SKIP],
+}], # variant == stress
+
+##############################################################################
['variant == no_wasm_traps', {
'*': [SKIP],
}], # variant == no_wasm_traps
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 5b45a6eced..0a231ef11d 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -60,10 +60,12 @@
'language/expressions/assignment/S11.13.1_A6*': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4709
- 'built-ins/Promise/reject-function-name': [FAIL],
- 'built-ins/Promise/resolve-function-name': [FAIL],
'built-ins/Promise/all/resolve-element-function-name': [FAIL],
+ 'built-ins/Promise/allSettled/reject-element-function-name': [FAIL],
+ 'built-ins/Promise/allSettled/resolve-element-function-name': [FAIL],
'built-ins/Promise/executor-function-name': [FAIL],
+ 'built-ins/Promise/reject-function-name': [FAIL],
+ 'built-ins/Promise/resolve-function-name': [FAIL],
'built-ins/Proxy/revocable/revocation-function-name': [FAIL],
'language/expressions/assignment/fn-name-lhs-cover': [FAIL],
'language/expressions/assignment/fn-name-lhs-member': [FAIL],
@@ -73,6 +75,10 @@
'intl402/DateTimeFormat/prototype/format/format-function-name': [FAIL],
'intl402/Collator/prototype/compare/compare-function-name': [FAIL],
+ # intl tests which require flags. https://bugs.chromium.org/p/v8/issues/detail?id=9154
+ 'intl402/NumberFormat/numbering-system-options': ['--harmony-intl-add-calendar-numbering-system'],
+ 'intl402/DateTimeFormat/numbering-system-calendar-options': ['--harmony-intl-add-calendar-numbering-system'],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=9084
'intl402/supportedLocalesOf-consistent-with-resolvedOptions': [FAIL],
'intl402/fallback-locales-are-supported': [FAIL],
@@ -147,17 +153,6 @@
'built-ins/DataView/prototype/byteLength/detached-buffer': [FAIL],
'built-ins/DataView/prototype/byteOffset/detached-buffer': [FAIL],
- # https://crbug.com/v8/9131
- 'built-ins/RegExp/property-escapes/generated/Assigned': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/Changes_When_NFKC_Casefolded': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Other': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Other_Symbol': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Symbol': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/General_Category_-_Unassigned': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/Grapheme_Base': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/Script_-_Common': [SKIP],
- 'built-ins/RegExp/property-escapes/generated/Script_Extensions_-_Han': [SKIP],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4951
'language/expressions/assignment/destructuring/iterator-destructuring-property-reference-target-evaluation-order': [FAIL],
'language/expressions/assignment/destructuring/keyed-destructuring-property-reference-target-evaluation-order': [FAIL],
@@ -519,17 +514,18 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=9047
'annexB/built-ins/Function/createdynfn-no-line-terminator-html-close-comment-body': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=9048
- 'built-ins/JSON/stringify/bigint-tojson-receiver': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=9049
'language/comments/hashbang/use-strict': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=9050
- 'language/statements/async-generator/return-undefined-implicit-and-explicit': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=9051
- 'language/statements/async-generator/yield-star-return-then-getter-ticks': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=9229
+ 'language/expressions/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage': [FAIL],
+ 'language/expressions/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-chained-usage': [FAIL],
+ 'language/expressions/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-function-expression': [FAIL],
+ 'language/expressions/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-recursive': [FAIL],
+ 'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage': [FAIL],
+ 'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-chained-usage': [FAIL],
+ 'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-function-expression': [FAIL],
+ 'language/statements/class/elements/syntax/early-errors/grammar-private-environment-on-class-heritage-recursive': [FAIL],
######################## NEEDS INVESTIGATION ###########################
@@ -654,6 +650,10 @@
['asan == True', {
# BUG(v8:4653): Test262 tests which rely on quit() are not compatible with
# asan's --omit-quit flag.
+ 'built-ins/Promise/allSettled/reject-deferred': [FAIL],
+ 'built-ins/Promise/allSettled/reject-ignored-deferred': [FAIL],
+ 'built-ins/Promise/allSettled/reject-ignored-immed': [FAIL],
+ 'built-ins/Promise/allSettled/reject-immed': [FAIL],
'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
'language/expressions/dynamic-import/always-create-new-promise': [SKIP],
'language/expressions/dynamic-import/assign-expr-get-value-abrupt-throws': [SKIP],
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index da35224687..6ce0834cb1 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -44,28 +44,22 @@ from testrunner.outproc import test262
# TODO(littledan): move the flag mapping into the status file
FEATURE_FLAGS = {
- 'class-fields-public': '--harmony-public-fields',
- 'class-static-fields-public': '--harmony-class-fields',
- 'class-fields-private': '--harmony-private-fields',
- 'class-static-fields-private': '--harmony-private-fields',
- 'String.prototype.matchAll': '--harmony-string-matchall',
- 'Symbol.matchAll': '--harmony-string-matchall',
'numeric-separator-literal': '--harmony-numeric-separator',
'Intl.DateTimeFormat-datetimestyle': '--harmony-intl-datetime-style',
- 'Intl.Locale': '--harmony-locale',
+ 'Intl.DateTimeFormat-formatRange': '--harmony-intl-date-format-range',
+ 'Intl.NumberFormat-unified': '--harmony-intl-numberformat-unified',
'Intl.Segmenter': '--harmony-intl-segmenter',
'Symbol.prototype.description': '--harmony-symbol-description',
'globalThis': '--harmony-global',
- 'well-formed-json-stringify': '--harmony-json-stringify',
'export-star-as-namespace-from-module': '--harmony-namespace-exports',
'Object.fromEntries': '--harmony-object-from-entries',
'hashbang': '--harmony-hashbang',
'BigInt': '--harmony-intl-bigint',
+ 'Promise.allSettled': '--harmony-promise-all-settled',
}
SKIPPED_FEATURES = set(['class-methods-private',
- 'class-static-methods-private',
- 'Intl.NumberFormat-unified'])
+ 'class-static-methods-private'])
DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
diff --git a/deps/v8/test/torque/test-torque.tq b/deps/v8/test/torque/test-torque.tq
index 8221f72d86..8f6635a459 100644
--- a/deps/v8/test/torque/test-torque.tq
+++ b/deps/v8/test/torque/test-torque.tq
@@ -34,16 +34,19 @@ namespace test {
goto Label3(Null, 7);
}
+ @export
macro TestConstexpr1() {
check(FromConstexpr<bool>(IsFastElementsKind(PACKED_SMI_ELEMENTS)));
}
+ @export
macro TestConstexprIf() {
check(ElementsKindTestHelper1(UINT8_ELEMENTS));
check(ElementsKindTestHelper1(UINT16_ELEMENTS));
check(!ElementsKindTestHelper1(UINT32_ELEMENTS));
}
+ @export
macro TestConstexprReturn() {
check(FromConstexpr<bool>(ElementsKindTestHelper3(UINT8_ELEMENTS)));
check(FromConstexpr<bool>(ElementsKindTestHelper3(UINT16_ELEMENTS)));
@@ -51,6 +54,7 @@ namespace test {
check(FromConstexpr<bool>(!ElementsKindTestHelper3(UINT32_ELEMENTS)));
}
+ @export
macro TestGotoLabel(): Boolean {
try {
LabelTestHelper1() otherwise Label1;
@@ -60,6 +64,7 @@ namespace test {
}
}
+ @export
macro TestGotoLabelWithOneParameter(): Boolean {
try {
LabelTestHelper2() otherwise Label2;
@@ -70,6 +75,7 @@ namespace test {
}
}
+ @export
macro TestGotoLabelWithTwoParameters(): Boolean {
try {
LabelTestHelper3() otherwise Label3;
@@ -89,6 +95,7 @@ namespace test {
return param;
}
+ @export
macro TestBuiltinSpecialization(c: Context) {
check(GenericBuiltinTest<Smi>(c, 0) == Null);
check(GenericBuiltinTest<Smi>(c, 1) == Null);
@@ -117,6 +124,7 @@ namespace test {
}
}
+ @export
macro TestPartiallyUnusedLabel(): Boolean {
let r1: bool = CallLabelTestHelper4(true);
let r2: bool = CallLabelTestHelper4(false);
@@ -146,6 +154,7 @@ namespace test {
return Cast<Smi>(param2) otherwise Y;
}
+ @export
macro TestMacroSpecialization() {
try {
const smi0: Smi = 0;
@@ -173,6 +182,7 @@ namespace test {
return x + 2;
}
+ @export
macro TestFunctionPointers(implicit context: Context)(): Boolean {
let fptr: builtin(Context, Smi) => Smi = TestHelperPlus1;
check(fptr(context, 42) == 43);
@@ -181,17 +191,20 @@ namespace test {
return True;
}
+ @export
macro TestVariableRedeclaration(implicit context: Context)(): Boolean {
let var1: int31 = FromConstexpr<bool>(42 == 0) ? 0 : 1;
let var2: int31 = FromConstexpr<bool>(42 == 0) ? 1 : 0;
return True;
}
+ @export
macro TestTernaryOperator(x: Smi): Smi {
let b: bool = x < 0 ? true : false;
return b ? x - 10 : x + 100;
}
+ @export
macro TestFunctionPointerToGeneric(c: Context) {
let fptr1: builtin(Context, Smi) => Object = GenericBuiltinTest<Smi>;
let fptr2: builtin(Context, Object) => Object = GenericBuiltinTest<Object>;
@@ -203,10 +216,12 @@ namespace test {
}
type ObjectToObject = builtin(Context, Object) => Object;
+ @export
macro TestTypeAlias(x: ObjectToObject): BuiltinPtr {
return x;
}
+ @export
macro TestUnsafeCast(implicit context: Context)(n: Number): Boolean {
if (TaggedIsSmi(n)) {
let m: Smi = UnsafeCast<Smi>(n);
@@ -217,16 +232,19 @@ namespace test {
return False;
}
+ @export
macro TestHexLiteral() {
check(Convert<intptr>(0xffff) + 1 == 0x10000);
check(Convert<intptr>(-0xffff) == -65535);
}
+ @export
macro TestLargeIntegerLiterals(implicit c: Context)() {
let x: int32 = 0x40000000;
let y: int32 = 0x7fffffff;
}
+ @export
macro TestMultilineAssert() {
let someVeryLongVariableNameThatWillCauseLineBreaks: Smi = 5;
check(
@@ -234,6 +252,7 @@ namespace test {
someVeryLongVariableNameThatWillCauseLineBreaks < 10);
}
+ @export
macro TestNewlineInString() {
Print('Hello, World!\n');
}
@@ -242,12 +261,14 @@ namespace test {
const kIntptrConst: intptr = 4;
const kSmiConst: Smi = 3;
+ @export
macro TestModuleConstBindings() {
check(kConstexprConst == Int32Constant(5));
check(kIntptrConst == 4);
check(kSmiConst == 3);
}
+ @export
macro TestLocalConstBindings() {
const x: constexpr int31 = 3;
const xSmi: Smi = x;
@@ -273,10 +294,12 @@ namespace test {
y: Smi;
}
+ @export
macro TestStruct1(i: TestStructA): Smi {
return i.i;
}
+ @export
macro TestStruct2(implicit context: Context)(): TestStructA {
return TestStructA{
indexes: UnsafeCast<FixedArray>(kEmptyFixedArray),
@@ -285,6 +308,7 @@ namespace test {
};
}
+ @export
macro TestStruct3(implicit context: Context)(): TestStructA {
let a: TestStructA =
TestStructA{indexes: UnsafeCast<FixedArray>(kEmptyFixedArray), i: 13, k: 5};
@@ -310,12 +334,13 @@ namespace test {
y: TestStructA;
}
+ @export
macro TestStruct4(implicit context: Context)(): TestStructC {
return TestStructC{x: TestStruct2(), y: TestStruct2()};
}
- macro TestStructInLabel(implicit context: Context)(): never
- labels Foo(TestStructA) {
+ macro TestStructInLabel(implicit context: Context)(): never labels
+ Foo(TestStructA) {
goto Foo(TestStruct2());
}
macro CallTestStructInLabel(implicit context: Context)() {
@@ -327,6 +352,7 @@ namespace test {
// This macro tests different versions of the for-loop where some parts
// are (not) present.
+ @export
macro TestForLoop() {
let sum: Smi = 0;
for (let i: Smi = 0; i < 5; ++i) sum += i;
@@ -426,6 +452,7 @@ namespace test {
}
}
+ @export
macro TestSubtyping(x: Smi) {
const foo: Object = x;
}
@@ -471,6 +498,7 @@ namespace test {
return result;
}
+ @export
macro TestTypeswitch(implicit context: Context)() {
check(TypeswitchExample(FromConstexpr<Smi>(5)) == 26);
const a: FixedArray = AllocateZeroedFixedArray(3);
@@ -478,6 +506,7 @@ namespace test {
check(TypeswitchExample(FromConstexpr<Number>(0.5)) == 27);
}
+ @export
macro TestTypeswitchAsanLsanFailure(implicit context: Context)(obj: Object) {
typeswitch (obj) {
case (o: Smi): {
@@ -498,6 +527,7 @@ namespace test {
return o + 1;
}
+ @export
macro TestGenericOverload(implicit context: Context)() {
const xSmi: Smi = 5;
const xObject: Object = xSmi;
@@ -505,6 +535,7 @@ namespace test {
check(UnsafeCast<Smi>(ExampleGenericOverload<Object>(xObject)) == 5);
}
+ @export
macro TestEquality(implicit context: Context)() {
const notEqual: bool =
AllocateHeapNumberWithValue(0.5) != AllocateHeapNumberWithValue(0.5);
@@ -523,30 +554,37 @@ namespace test {
}
}
+ @export
macro TestOrAnd1(x: bool, y: bool, z: bool): bool {
return BoolToBranch(x) || y && z ? true : false;
}
+ @export
macro TestOrAnd2(x: bool, y: bool, z: bool): bool {
return x || BoolToBranch(y) && z ? true : false;
}
+ @export
macro TestOrAnd3(x: bool, y: bool, z: bool): bool {
return x || y && BoolToBranch(z) ? true : false;
}
+ @export
macro TestAndOr1(x: bool, y: bool, z: bool): bool {
return BoolToBranch(x) && y || z ? true : false;
}
+ @export
macro TestAndOr2(x: bool, y: bool, z: bool): bool {
return x && BoolToBranch(y) || z ? true : false;
}
+ @export
macro TestAndOr3(x: bool, y: bool, z: bool): bool {
return x && y || BoolToBranch(z) ? true : false;
}
+ @export
macro TestLogicalOperators() {
check(TestAndOr1(true, true, true));
check(TestAndOr2(true, true, true));
@@ -598,12 +636,13 @@ namespace test {
check(!TestOrAnd3(false, false, false));
}
- macro TestCall(i: Smi): Smi
- labels A {
+ @export
+ macro TestCall(i: Smi): Smi labels A {
if (i < 5) return i;
goto A;
}
+ @export
macro TestOtherwiseWithCode1() {
let v: Smi = 0;
let s: Smi = 1;
@@ -616,6 +655,7 @@ namespace test {
assert(v == 2);
}
+ @export
macro TestOtherwiseWithCode2() {
let s: Smi = 0;
for (let i: Smi = 0; i < 10; ++i) {
@@ -625,6 +665,7 @@ namespace test {
assert(s == 5);
}
+ @export
macro TestOtherwiseWithCode3() {
let s: Smi = 0;
for (let i: Smi = 0; i < 10; ++i) {
@@ -633,6 +674,7 @@ namespace test {
assert(s == 10);
}
+ @export
macro TestForwardLabel() {
try {
goto A;
@@ -645,11 +687,13 @@ namespace test {
}
}
+ @export
macro TestQualifiedAccess(implicit context: Context)() {
let s: Smi = 0;
check(!array::IsJSArray(s));
}
+ @export
macro TestCatch1(implicit context: Context)(): Smi {
let r: Smi = 0;
try {
@@ -660,10 +704,12 @@ namespace test {
}
}
+ @export
macro TestCatch2Wrapper(implicit context: Context)(): never {
ThrowTypeError(kInvalidArrayLength);
}
+ @export
macro TestCatch2(implicit context: Context)(): Smi {
let r: Smi = 0;
try {
@@ -674,11 +720,13 @@ namespace test {
}
}
- macro TestCatch3WrapperWithLabel(implicit context: Context)(): never
- labels Abort {
+ @export
+ macro TestCatch3WrapperWithLabel(implicit context: Context)():
+ never labels Abort {
ThrowTypeError(kInvalidArrayLength);
}
+ @export
macro TestCatch3(implicit context: Context)(): Smi {
let r: Smi = 0;
try {
@@ -697,6 +745,7 @@ namespace test {
// it's only purpose is to make sure tha the CSA macros in the
// IteratorBuiltinsAssembler match the signatures provided in
// iterator.tq.
+ @export
macro TestIterator(implicit context: Context)(o: Object, map: Map) {
try {
const t1: Object = iterator::GetIteratorMethod(o);
@@ -715,6 +764,7 @@ namespace test {
label Fail {}
}
+ @export
macro TestFrame1(implicit context: Context)() {
const f: Frame = LoadFramePointer();
const frameType: FrameType =
@@ -733,6 +783,7 @@ namespace test {
}
}
+ @export
macro TestNew(implicit context: Context)() {
const f: JSArray = NewJSArray();
assert(f.IsEmpty());
@@ -756,6 +807,7 @@ namespace test {
c: int32;
}
+ @export
macro TestStructConstructor(implicit context: Context)() {
// Test default constructor
let a: TestOuter = TestOuter{a: 5, b: TestInner{x: 6, y: 7}, c: 8};
@@ -770,43 +822,6 @@ namespace test {
assert(a.b.GetX() == 2);
}
- extern class TestClassWithAllTypes extends JSObject {
- a: int8;
- b: uint8;
- b2: uint8;
- b3: uint8;
- c: int16;
- d: uint16;
- e: int32;
- f: uint32;
- g: RawPtr;
- h: intptr;
- i: uintptr;
- }
-
- macro TestClassWithAllTypesLoadsAndStores(
- t: TestClassWithAllTypes, r: RawPtr, v1: int8, v2: uint8, v3: int16,
- v4: uint16) {
- t.a = v1;
- t.b = v2;
- t.c = v3;
- t.d = v4;
- t.e = 0;
- t.f = 0;
- t.g = r;
- t.h = 0;
- t.i = 0;
- t.a = t.a;
- t.b = t.b;
- t.c = t.c;
- t.d = t.d;
- t.e = t.e;
- t.f = t.f;
- t.g = t.g;
- t.h = t.h;
- t.i = t.i;
- }
-
class InternalClass {
Flip() labels NotASmi {
const tmp = Cast<Smi>(this.b) otherwise NotASmi;
@@ -821,6 +836,7 @@ namespace test {
return new InternalClass{a: x, b: x + 1};
}
+ @export
macro TestInternalClass(implicit context: Context)() {
const o = NewInternalClass(5);
o.Flip() otherwise unreachable;
@@ -839,6 +855,7 @@ namespace test {
const b: int32;
}
+ @export
macro TestConstInStructs() {
const x = StructWithConst{a: Null, b: 1};
let y = StructWithConst{a: Null, b: 1};
@@ -854,6 +871,7 @@ namespace test {
count: Smi;
}
+ @export
macro TestNewFixedArrayFromSpread(implicit context: Context)(): Object {
const i = TestIterator{count: 5};
return new FixedArray{map: kFixedArrayMap, length: 5, objects: ...i};
@@ -873,6 +891,7 @@ namespace test {
* b = tmp;
}
+ @export
macro TestReferences() {
const array = new SmiPair{a: 7, b: 2};
const ref:&Smi = & array.a;
@@ -882,4 +901,10 @@ namespace test {
check(array.a == 2);
check(array.b == 9);
}
+
+ @export
+ macro TestStaticAssert() {
+ StaticAssert(1 + 2 == 3);
+ }
+
}
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 8cb64c3865..39af3fbc06 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -45,7 +45,6 @@ v8_source_set("unittests_sources") {
"../../test/common/wasm/wasm-macro-gen.h",
"../../testing/gmock-support.h",
"../../testing/gtest-support.h",
- "allocation-unittest.cc",
"api/access-check-unittest.cc",
"api/exception-unittest.cc",
"api/interceptor-unittest.cc",
@@ -54,7 +53,6 @@ v8_source_set("unittests_sources") {
"api/v8-object-unittest.cc",
"asmjs/asm-scanner-unittest.cc",
"asmjs/asm-types-unittest.cc",
- "background-compile-task-unittest.cc",
"base/address-region-unittest.cc",
"base/atomic-utils-unittest.cc",
"base/bits-unittest.cc",
@@ -78,11 +76,10 @@ v8_source_set("unittests_sources") {
"base/template-utils-unittest.cc",
"base/threaded-list-unittest.cc",
"base/utils/random-number-generator-unittest.cc",
- "bigint-unittest.cc",
- "cancelable-tasks-unittest.cc",
- "char-predicates-unittest.cc",
- "code-stub-assembler-unittest.cc",
- "code-stub-assembler-unittest.h",
+ "codegen/code-stub-assembler-unittest.cc",
+ "codegen/code-stub-assembler-unittest.h",
+ "codegen/register-configuration-unittest.cc",
+ "codegen/source-position-table-unittest.cc",
"compiler-dispatcher/compiler-dispatcher-unittest.cc",
"compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc",
"compiler/backend/instruction-selector-unittest.cc",
@@ -102,6 +99,7 @@ v8_source_set("unittests_sources") {
"compiler/control-equivalence-unittest.cc",
"compiler/control-flow-optimizer-unittest.cc",
"compiler/dead-code-elimination-unittest.cc",
+ "compiler/decompression-elimination-unittest.cc",
"compiler/diamond-unittest.cc",
"compiler/effect-control-linearizer-unittest.cc",
"compiler/graph-reducer-unittest.cc",
@@ -144,14 +142,14 @@ v8_source_set("unittests_sources") {
"compiler/typer-unittest.cc",
"compiler/value-numbering-reducer-unittest.cc",
"compiler/zone-stats-unittest.cc",
- "conversions-unittest.cc",
- "counters-unittest.cc",
- "detachable-vector-unittest.cc",
- "eh-frame-iterator-unittest.cc",
- "eh-frame-writer-unittest.cc",
+ "date/date-cache-unittest.cc",
+ "diagnostics/eh-frame-iterator-unittest.cc",
+ "diagnostics/eh-frame-writer-unittest.cc",
+ "execution/microtask-queue-unittest.cc",
"heap/barrier-unittest.cc",
"heap/bitmap-test-utils.h",
"heap/bitmap-unittest.cc",
+ "heap/code-object-registry-unittest.cc",
"heap/embedder-tracing-unittest.cc",
"heap/gc-idle-time-handler-unittest.cc",
"heap/gc-tracer-unittest.cc",
@@ -185,16 +183,20 @@ v8_source_set("unittests_sources") {
"libplatform/default-worker-threads-task-runner-unittest.cc",
"libplatform/task-queue-unittest.cc",
"libplatform/worker-thread-unittest.cc",
- "locked-queue-unittest.cc",
- "microtask-queue-unittest.cc",
- "object-unittest.cc",
+ "logging/counters-unittest.cc",
+ "numbers/bigint-unittest.cc",
+ "numbers/conversions-unittest.cc",
+ "objects/object-unittest.cc",
+ "objects/value-serializer-unittest.cc",
"parser/ast-value-unittest.cc",
"parser/preparser-unittest.cc",
- "register-configuration-unittest.cc",
+ "profiler/strings-storage-unittest.cc",
"regress/regress-crbug-938251-unittest.cc",
"run-all-unittests.cc",
- "source-position-table-unittest.cc",
- "strings-storage-unittest.cc",
+ "strings/char-predicates-unittest.cc",
+ "strings/unicode-unittest.cc",
+ "tasks/background-compile-task-unittest.cc",
+ "tasks/cancelable-tasks-unittest.cc",
"test-helpers.cc",
"test-helpers.h",
"test-utils.cc",
@@ -205,9 +207,10 @@ v8_source_set("unittests_sources") {
"torque/ls-server-data-unittest.cc",
"torque/torque-unittest.cc",
"torque/torque-utils-unittest.cc",
- "unicode-unittest.cc",
- "utils-unittest.cc",
- "value-serializer-unittest.cc",
+ "utils/allocation-unittest.cc",
+ "utils/detachable-vector-unittest.cc",
+ "utils/locked-queue-unittest.cc",
+ "utils/utils-unittest.cc",
"wasm/control-transfer-unittest.cc",
"wasm/decoder-unittest.cc",
"wasm/function-body-decoder-unittest.cc",
@@ -295,6 +298,7 @@ v8_source_set("unittests_sources") {
"../..:v8_for_testing",
"../..:v8_libbase",
"../..:v8_libplatform",
+ "../../third_party/inspector_protocol:encoding_test",
"//build/win:default_exe_manifest",
"//testing/gmock",
"//testing/gtest",
diff --git a/deps/v8/test/unittests/api/exception-unittest.cc b/deps/v8/test/unittests/api/exception-unittest.cc
index b8f21cc4e2..36b87d00f7 100644
--- a/deps/v8/test/unittests/api/exception-unittest.cc
+++ b/deps/v8/test/unittests/api/exception-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "include/v8.h"
-#include "src/flags.h"
+#include "src/flags/flags.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/api/isolate-unittest.cc b/deps/v8/test/unittests/api/isolate-unittest.cc
index 10fa7bba22..8d1a5dd84f 100644
--- a/deps/v8/test/unittests/api/isolate-unittest.cc
+++ b/deps/v8/test/unittests/api/isolate-unittest.cc
@@ -10,14 +10,14 @@
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
-#include "src/execution.h"
-#include "src/isolate.h"
-#include "src/v8.h"
+#include "src/execution/execution.h"
+#include "src/execution/isolate.h"
+#include "src/init/v8.h"
#include "test/unittests/test-utils.h"
namespace v8 {
-typedef TestWithIsolate IsolateTest;
+using IsolateTest = TestWithIsolate;
namespace {
diff --git a/deps/v8/test/unittests/api/remote-object-unittest.cc b/deps/v8/test/unittests/api/remote-object-unittest.cc
index 5fa0646425..39434a8f9a 100644
--- a/deps/v8/test/unittests/api/remote-object-unittest.cc
+++ b/deps/v8/test/unittests/api/remote-object-unittest.cc
@@ -5,15 +5,15 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "include/v8.h"
-#include "src/api-inl.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace remote_object_unittest {
-typedef TestWithIsolate RemoteObjectTest;
+using RemoteObjectTest = TestWithIsolate;
namespace {
diff --git a/deps/v8/test/unittests/api/v8-object-unittest.cc b/deps/v8/test/unittests/api/v8-object-unittest.cc
index d11dba69cf..6e5c9131fd 100644
--- a/deps/v8/test/unittests/api/v8-object-unittest.cc
+++ b/deps/v8/test/unittests/api/v8-object-unittest.cc
@@ -3,8 +3,8 @@
// found in the LICENSE file.
#include "include/v8.h"
-#include "src/api.h"
-#include "src/objects-inl.h"
+#include "src/api/api.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -97,6 +97,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPrototype) {
caller_context->Global()->Set(caller_context, object_key, object).ToChecked();
const char script[] =
"function f() { object.property; object.property = 0; } "
+ "%PrepareFunctionForOptimization(f); "
"f(); f(); "
"%OptimizeFunctionOnNextCall(f); "
"f();";
diff --git a/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc b/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
index fe061f8e2a..5ec85a9ada 100644
--- a/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/asmjs/asm-scanner.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
index 63c68ff48f..76dd04d77c 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm-unittest.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm/assembler-arm-inl.h"
-#include "src/macro-assembler.h"
-#include "src/simulator.h"
+#include "src/codegen/arm/assembler-arm-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
+#include "src/utils/ostreams.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
@@ -69,6 +70,117 @@ TEST_F(TurboAssemblerTest, TestCheck) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason"));
}
+struct MoveObjectAndSlotTestCase {
+ const char* comment;
+ Register dst_object;
+ Register dst_slot;
+ Register object;
+ Register offset_register = no_reg;
+};
+
+const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
+ {"no overlap", r0, r1, r2},
+ {"no overlap", r0, r1, r2, r3},
+
+ {"object == dst_object", r2, r1, r2},
+ {"object == dst_object", r2, r1, r2, r3},
+
+ {"object == dst_slot", r1, r2, r2},
+ {"object == dst_slot", r1, r2, r2, r3},
+
+ {"offset == dst_object", r0, r1, r2, r0},
+
+ {"offset == dst_object && object == dst_slot", r0, r1, r1, r0},
+
+ {"offset == dst_slot", r0, r1, r2, r1},
+
+ {"offset == dst_slot && object == dst_object", r0, r1, r0, r1}};
+
+// Make sure we include offsets that cannot be encoded in an add instruction.
+const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
+
+template <typename T>
+class TurboAssemblerTestWithParam : public TurboAssemblerTest,
+ public ::testing::WithParamInterface<T> {};
+
+using TurboAssemblerTestMoveObjectAndSlot =
+ TurboAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
+
+TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
+ const MoveObjectAndSlotTestCase test_case = GetParam();
+ TRACED_FOREACH(int32_t, offset, kOffsets) {
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ __ Push(r0);
+ __ Move(test_case.object, r1);
+
+ Register src_object = test_case.object;
+ Register dst_object = test_case.dst_object;
+ Register dst_slot = test_case.dst_slot;
+
+ Operand offset_operand(0);
+ if (test_case.offset_register == no_reg) {
+ offset_operand = Operand(offset);
+ } else {
+ __ mov(test_case.offset_register, Operand(offset));
+ offset_operand = Operand(test_case.offset_register);
+ }
+
+ std::stringstream comment;
+ comment << "-- " << test_case.comment << ": MoveObjectAndSlot("
+ << dst_object << ", " << dst_slot << ", " << src_object << ", ";
+ if (test_case.offset_register == no_reg) {
+ comment << "#" << offset;
+ } else {
+ comment << test_case.offset_register;
+ }
+ comment << ") --";
+ __ RecordComment(comment.str().c_str());
+ __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand);
+ __ RecordComment("--");
+
+ // The `result` pointer was saved on the stack.
+ UseScratchRegisterScope temps(&tasm);
+ Register scratch = temps.Acquire();
+ __ Pop(scratch);
+ __ str(dst_object, MemOperand(scratch));
+ __ str(dst_slot, MemOperand(scratch, kSystemPointerSize));
+
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ if (FLAG_print_code) {
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate(), desc, Code::STUB).Build();
+ StdoutStream os;
+ code->Print(os);
+ }
+
+ buffer->MakeExecutable();
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, byte**, byte*>::FromBuffer(isolate(),
+ buffer->start());
+
+ byte* object = new byte[offset];
+ byte* result[] = {nullptr, nullptr};
+
+ f.Call(result, object);
+
+ // The first element must be the address of the object, and the second the
+ // slot addressed by `offset`.
+ EXPECT_EQ(result[0], &object[0]);
+ EXPECT_EQ(result[1], &object[offset]);
+
+ delete[] object;
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest,
+ TurboAssemblerTestMoveObjectAndSlot,
+ ::testing::ValuesIn(kMoveObjectAndSlotTestCases));
+
#undef __
#undef ERROR_MESSAGE
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
index 57e82ecde3..b78cc71037 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-arm64-unittest.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/arm64/macro-assembler-arm64-inl.h"
-#include "src/macro-assembler.h"
-#include "src/simulator.h"
+#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
+#include "src/utils/ostreams.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
@@ -69,6 +70,118 @@ TEST_F(TurboAssemblerTest, TestCheck) {
ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason"));
}
+struct MoveObjectAndSlotTestCase {
+ const char* comment;
+ Register dst_object;
+ Register dst_slot;
+ Register object;
+ Register offset_register = no_reg;
+};
+
+const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = {
+ {"no overlap", x0, x1, x2},
+ {"no overlap", x0, x1, x2, x3},
+
+ {"object == dst_object", x2, x1, x2},
+ {"object == dst_object", x2, x1, x2, x3},
+
+ {"object == dst_slot", x1, x2, x2},
+ {"object == dst_slot", x1, x2, x2, x3},
+
+ {"offset == dst_object", x0, x1, x2, x0},
+
+ {"offset == dst_object && object == dst_slot", x0, x1, x1, x0},
+
+ {"offset == dst_slot", x0, x1, x2, x1},
+
+ {"offset == dst_slot && object == dst_object", x0, x1, x0, x1}};
+
+// Make sure we include offsets that cannot be encoded in an add instruction.
+const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001};
+
+template <typename T>
+class TurboAssemblerTestWithParam : public TurboAssemblerTest,
+ public ::testing::WithParamInterface<T> {};
+
+using TurboAssemblerTestMoveObjectAndSlot =
+ TurboAssemblerTestWithParam<MoveObjectAndSlotTestCase>;
+
+TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) {
+ const MoveObjectAndSlotTestCase test_case = GetParam();
+ TRACED_FOREACH(int32_t, offset, kOffsets) {
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+
+ __ Push(x0, padreg);
+ __ Mov(test_case.object, x1);
+
+ Register src_object = test_case.object;
+ Register dst_object = test_case.dst_object;
+ Register dst_slot = test_case.dst_slot;
+
+ Operand offset_operand(0);
+ if (test_case.offset_register.Is(no_reg)) {
+ offset_operand = Operand(offset);
+ } else {
+ __ Mov(test_case.offset_register, Operand(offset));
+ offset_operand = Operand(test_case.offset_register);
+ }
+
+ std::stringstream comment;
+ comment << "-- " << test_case.comment << ": MoveObjectAndSlot("
+ << dst_object << ", " << dst_slot << ", " << src_object << ", ";
+ if (test_case.offset_register.Is(no_reg)) {
+ comment << "#" << offset;
+ } else {
+ comment << test_case.offset_register;
+ }
+ comment << ") --";
+ __ RecordComment(comment.str().c_str());
+ __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand);
+ __ RecordComment("--");
+
+ // The `result` pointer was saved on the stack.
+ UseScratchRegisterScope temps(&tasm);
+ Register scratch = temps.AcquireX();
+ __ Pop(padreg, scratch);
+ __ Str(dst_object, MemOperand(scratch));
+ __ Str(dst_slot, MemOperand(scratch, kSystemPointerSize));
+
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(nullptr, &desc);
+ if (FLAG_print_code) {
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate(), desc, Code::STUB).Build();
+ StdoutStream os;
+ code->Print(os);
+ }
+
+ buffer->MakeExecutable();
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, byte**, byte*>::FromBuffer(isolate(),
+ buffer->start());
+
+ byte* object = new byte[offset];
+ byte* result[] = {nullptr, nullptr};
+
+ f.Call(result, object);
+
+ // The first element must be the address of the object, and the second the
+ // slot addressed by `offset`.
+ EXPECT_EQ(result[0], &object[0]);
+ EXPECT_EQ(result[1], &object[offset]);
+
+ delete[] object;
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest,
+ TurboAssemblerTestMoveObjectAndSlot,
+ ::testing::ValuesIn(kMoveObjectAndSlotTestCases));
+
#undef __
#undef ERROR_MESSAGE
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
index 3ef812e07a..548cb34fc7 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-ia32-unittest.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/macro-assembler.h"
-#include "src/simulator.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
#include "test/common/assembler-tester.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
index 6da112c5dd..5ea6b2f3f8 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-mips-unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/macro-assembler.h"
-#include "src/mips/assembler-mips-inl.h"
-#include "src/simulator.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips/assembler-mips-inl.h"
+#include "src/execution/simulator.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
index 5b798b8e02..fe9e815981 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-mips64-unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/macro-assembler.h"
-#include "src/mips64/assembler-mips64-inl.h"
-#include "src/simulator.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/mips64/assembler-mips64-inl.h"
+#include "src/execution/simulator.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
index 24e2e71fd8..51744bd92d 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-ppc-unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/macro-assembler.h"
-#include "src/ppc/assembler-ppc-inl.h"
-#include "src/simulator.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/ppc/assembler-ppc-inl.h"
+#include "src/execution/simulator.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
index f3f0a532d6..959ec03157 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-s390-unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/macro-assembler.h"
-#include "src/s390/assembler-s390-inl.h"
-#include "src/simulator.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/s390/assembler-s390-inl.h"
+#include "src/execution/simulator.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
index 8142cbc274..621f598f75 100644
--- a/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-x64-unittest.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/macro-assembler.h"
-#include "src/simulator.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
#include "test/common/assembler-tester.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/base/division-by-constant-unittest.cc b/deps/v8/test/unittests/base/division-by-constant-unittest.cc
index 58816db79e..19a084c5de 100644
--- a/deps/v8/test/unittests/base/division-by-constant-unittest.cc
+++ b/deps/v8/test/unittests/base/division-by-constant-unittest.cc
@@ -25,9 +25,8 @@ std::ostream& operator<<(std::ostream& os,
// Some abbreviations...
-typedef MagicNumbersForDivision<uint32_t> M32;
-typedef MagicNumbersForDivision<uint64_t> M64;
-
+using M32 = MagicNumbersForDivision<uint32_t>;
+using M64 = MagicNumbersForDivision<uint64_t>;
static M32 s32(int32_t d) {
return SignedDivisionByConstant<uint32_t>(static_cast<uint32_t>(d));
diff --git a/deps/v8/test/unittests/base/flags-unittest.cc b/deps/v8/test/unittests/base/flags-unittest.cc
index 6f19399dc2..826234e2d6 100644
--- a/deps/v8/test/unittests/base/flags-unittest.cc
+++ b/deps/v8/test/unittests/base/flags-unittest.cc
@@ -17,8 +17,7 @@ enum Flag1 {
kFlag1Second = 1u << 2,
kFlag1All = kFlag1None | kFlag1First | kFlag1Second
};
-typedef Flags<Flag1> Flags1;
-
+using Flags1 = Flags<Flag1>;
DEFINE_OPERATORS_FOR_FLAGS(Flags1)
@@ -61,7 +60,7 @@ enum Option {
kOption2 = 2,
kAllOptions = kNoOptions | kOption1 | kOption2
};
-typedef Flags<Option> Options;
+using Options = Flags<Option>;
} // namespace foo
@@ -83,7 +82,7 @@ namespace {
struct Foo {
enum Enum { kEnum1 = 1, kEnum2 = 2 };
- typedef Flags<Enum, uint32_t> Enums;
+ using Enums = Flags<Enum, uint32_t>;
};
diff --git a/deps/v8/test/unittests/base/functional-unittest.cc b/deps/v8/test/unittests/base/functional-unittest.cc
index 857a9de5de..43b3fe6ebb 100644
--- a/deps/v8/test/unittests/base/functional-unittest.cc
+++ b/deps/v8/test/unittests/base/functional-unittest.cc
@@ -54,15 +54,16 @@ class FunctionalTest : public ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(FunctionalTest);
};
-typedef ::testing::Types<signed char, unsigned char,
- short, // NOLINT(runtime/int)
- unsigned short, // NOLINT(runtime/int)
- int, unsigned int, long, // NOLINT(runtime/int)
- unsigned long, // NOLINT(runtime/int)
- long long, // NOLINT(runtime/int)
- unsigned long long, // NOLINT(runtime/int)
- int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
- int64_t, uint64_t, float, double> FunctionalTypes;
+using FunctionalTypes =
+ ::testing::Types<signed char, unsigned char,
+ short, // NOLINT(runtime/int)
+ unsigned short, // NOLINT(runtime/int)
+ int, unsigned int, long, // NOLINT(runtime/int)
+ unsigned long, // NOLINT(runtime/int)
+ long long, // NOLINT(runtime/int)
+ unsigned long long, // NOLINT(runtime/int)
+ int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
+ int64_t, uint64_t, float, double>;
TYPED_TEST_SUITE(FunctionalTest, FunctionalTypes);
diff --git a/deps/v8/test/unittests/base/iterator-unittest.cc b/deps/v8/test/unittests/base/iterator-unittest.cc
index c5fe7bc505..955828e028 100644
--- a/deps/v8/test/unittests/base/iterator-unittest.cc
+++ b/deps/v8/test/unittests/base/iterator-unittest.cc
@@ -42,7 +42,7 @@ TEST(IteratorTest, IteratorRangeArray) {
TEST(IteratorTest, IteratorRangeDeque) {
- typedef std::deque<int> C;
+ using C = std::deque<int>;
C c;
c.push_back(1);
c.push_back(2);
diff --git a/deps/v8/test/unittests/base/logging-unittest.cc b/deps/v8/test/unittests/base/logging-unittest.cc
index b720331c9e..762d6762cd 100644
--- a/deps/v8/test/unittests/base/logging-unittest.cc
+++ b/deps/v8/test/unittests/base/logging-unittest.cc
@@ -5,7 +5,7 @@
#include <cstdint>
#include "src/base/logging.h"
-#include "src/objects.h"
+#include "src/objects/objects.h"
#include "src/objects/smi.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/base/ostreams-unittest.cc b/deps/v8/test/unittests/base/ostreams-unittest.cc
index 1444eb7a5c..5d29b9a223 100644
--- a/deps/v8/test/unittests/base/ostreams-unittest.cc
+++ b/deps/v8/test/unittests/base/ostreams-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/base/platform/time-unittest.cc b/deps/v8/test/unittests/base/platform/time-unittest.cc
index d357a8aa62..eedabed934 100644
--- a/deps/v8/test/unittests/base/platform/time-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/time-unittest.cc
@@ -426,7 +426,7 @@ TEST(TimeTicks, TimerPerformance) {
// Note: This is a somewhat arbitrary test.
const int kLoops = 10000;
- typedef TimeTicks (*TestFunc)();
+ using TestFunc = TimeTicks (*)();
struct TestCase {
TestFunc func;
const char *description;
diff --git a/deps/v8/test/unittests/base/threaded-list-unittest.cc b/deps/v8/test/unittests/base/threaded-list-unittest.cc
index effe9b08f7..2af95c93f6 100644
--- a/deps/v8/test/unittests/base/threaded-list-unittest.cc
+++ b/deps/v8/test/unittests/base/threaded-list-unittest.cc
@@ -4,7 +4,7 @@
#include <iterator>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/base/threaded-list.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
index 836a18e2e6..287a11442b 100644
--- a/deps/v8/test/unittests/code-stub-assembler-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/code-stub-assembler-unittest.h"
+#include "test/unittests/codegen/code-stub-assembler-unittest.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors.h"
#include "src/compiler/node.h"
-#include "src/interface-descriptors.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/node-test-utils.h"
diff --git a/deps/v8/test/unittests/code-stub-assembler-unittest.h b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.h
index c48eb772c0..c71090f7ad 100644
--- a/deps/v8/test/unittests/code-stub-assembler-unittest.h
+++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.h
@@ -5,7 +5,7 @@
#ifndef V8_UNITTESTS_CODE_STUB_ASSEMBLER_UNITTEST_H_
#define V8_UNITTESTS_CODE_STUB_ASSEMBLER_UNITTEST_H_
-#include "src/code-stub-assembler.h"
+#include "src/codegen/code-stub-assembler.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock-support.h"
diff --git a/deps/v8/test/unittests/register-configuration-unittest.cc b/deps/v8/test/unittests/codegen/register-configuration-unittest.cc
index 15873dd69c..060370b156 100644
--- a/deps/v8/test/unittests/register-configuration-unittest.cc
+++ b/deps/v8/test/unittests/codegen/register-configuration-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/register-configuration.h"
+#include "src/codegen/register-configuration.h"
#include "testing/gtest-support.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/source-position-table-unittest.cc b/deps/v8/test/unittests/codegen/source-position-table-unittest.cc
index 23fd1a95d2..c2a581674b 100644
--- a/deps/v8/test/unittests/source-position-table-unittest.cc
+++ b/deps/v8/test/unittests/codegen/source-position-table-unittest.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/objects.h"
-#include "src/source-position-table.h"
+#include "src/codegen/source-position-table.h"
+#include "src/objects/objects.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index 312b5cdb25..fb6dc163b2 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -7,19 +7,19 @@
#include <sstream>
#include "include/v8-platform.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
-#include "src/compiler.h"
-#include "src/flags.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/flags/flags.h"
+#include "src/handles/handles.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
-#include "src/v8.h"
#include "src/zone/zone-list-inl.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index d9e1731dc1..ae2e42b61f 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -4,14 +4,14 @@
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/atomic-utils.h"
#include "src/base/platform/semaphore.h"
-#include "src/compiler.h"
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/optimized-compilation-info.h"
+#include "src/codegen/compiler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
@@ -20,7 +20,7 @@
namespace v8 {
namespace internal {
-typedef TestWithNativeContext OptimizingCompileDispatcherTest;
+using OptimizingCompileDispatcherTest = TestWithNativeContext;
namespace {
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index af2e6c68ca..a26a8d9192 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -6,7 +6,7 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -14,8 +14,7 @@ namespace compiler {
namespace {
-typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
-
+using Constructor = Node* (RawMachineAssembler::*)(Node*, Node*);
// Data processing instructions.
struct DPI {
@@ -143,9 +142,7 @@ const int32_t kImmediates[] = {
// -----------------------------------------------------------------------------
// Data processing instructions.
-
-typedef InstructionSelectorTestWithParam<DPI> InstructionSelectorDPITest;
-
+using InstructionSelectorDPITest = InstructionSelectorTestWithParam<DPI>;
TEST_P(InstructionSelectorDPITest, Parameters) {
const DPI dpi = GetParam();
@@ -536,9 +533,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorDPITest,
// -----------------------------------------------------------------------------
// Data processing instructions with overflow.
-
-typedef InstructionSelectorTestWithParam<ODPI> InstructionSelectorODPITest;
-
+using InstructionSelectorODPITest = InstructionSelectorTestWithParam<ODPI>;
TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
const ODPI odpi = GetParam();
@@ -1035,9 +1030,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorODPITest,
// -----------------------------------------------------------------------------
// Shifts.
-
-typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
-
+using InstructionSelectorShiftTest = InstructionSelectorTestWithParam<Shift>;
TEST_P(InstructionSelectorShiftTest, Parameters) {
const Shift shift = GetParam();
@@ -1321,10 +1314,8 @@ const MemoryAccess kMemoryAccesses[] = {
} // namespace
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
- InstructionSelectorMemoryAccessTest;
-
+using InstructionSelectorMemoryAccessTest =
+ InstructionSelectorTestWithParam<MemoryAccess>;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
@@ -1493,10 +1484,8 @@ const Comparison kComparisons[] = {
} // namespace
-
-typedef InstructionSelectorTestWithParam<Comparison>
- InstructionSelectorComparisonTest;
-
+using InstructionSelectorComparisonTest =
+ InstructionSelectorTestWithParam<Comparison>;
TEST_P(InstructionSelectorComparisonTest, Parameters) {
const Comparison& cmp = GetParam();
@@ -1586,9 +1575,8 @@ const Comparison kF32Comparisons[] = {
} // namespace
-typedef InstructionSelectorTestWithParam<Comparison>
- InstructionSelectorF32ComparisonTest;
-
+using InstructionSelectorF32ComparisonTest =
+ InstructionSelectorTestWithParam<Comparison>;
TEST_P(InstructionSelectorF32ComparisonTest, WithParameters) {
const Comparison& cmp = GetParam();
@@ -1667,9 +1655,8 @@ const Comparison kF64Comparisons[] = {
} // namespace
-typedef InstructionSelectorTestWithParam<Comparison>
- InstructionSelectorF64ComparisonTest;
-
+using InstructionSelectorF64ComparisonTest =
+ InstructionSelectorTestWithParam<Comparison>;
TEST_P(InstructionSelectorF64ComparisonTest, WithParameters) {
const Comparison& cmp = GetParam();
@@ -1738,9 +1725,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// -----------------------------------------------------------------------------
// Floating point arithmetic.
-
-typedef InstructionSelectorTestWithParam<FAI> InstructionSelectorFAITest;
-
+using InstructionSelectorFAITest = InstructionSelectorTestWithParam<FAI>;
TEST_P(InstructionSelectorFAITest, Parameters) {
const FAI& fai = GetParam();
@@ -2009,8 +1994,8 @@ const FlagSettingInst kFlagSettingInstructions[] = {
{&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr},
{&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmTeq}};
-typedef InstructionSelectorTestWithParam<FlagSettingInst>
- InstructionSelectorFlagSettingTest;
+using InstructionSelectorFlagSettingTest =
+ InstructionSelectorTestWithParam<FlagSettingInst>;
TEST_P(InstructionSelectorFlagSettingTest, CmpZeroRight) {
const FlagSettingInst inst = GetParam();
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index aa14a81d7b..78663c52a5 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
namespace v8 {
@@ -19,9 +19,8 @@ struct MachInst {
MachineType machine_type;
};
-typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
-typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
-
+using MachInst1 = MachInst<Node* (RawMachineAssembler::*)(Node*)>;
+using MachInst2 = MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)>;
template <typename T>
std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
@@ -380,10 +379,8 @@ const MachInst2 kCanElideChangeUint32ToUint64[] = {
// -----------------------------------------------------------------------------
// Logical instructions.
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorLogicalTest;
-
+using InstructionSelectorLogicalTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorLogicalTest, Parameter) {
const MachInst2 dpi = GetParam();
@@ -503,8 +500,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// -----------------------------------------------------------------------------
// Add and Sub instructions.
-typedef InstructionSelectorTestWithParam<AddSub> InstructionSelectorAddSubTest;
-
+using InstructionSelectorAddSubTest = InstructionSelectorTestWithParam<AddSub>;
TEST_P(InstructionSelectorAddSubTest, Parameter) {
const AddSub dpi = GetParam();
@@ -985,10 +981,8 @@ TEST_F(InstructionSelectorTest, AddSignedExtendHalfwordOnLeft) {
// -----------------------------------------------------------------------------
// Data processing controlled branches.
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorDPFlagSetTest;
-
+using InstructionSelectorDPFlagSetTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorDPFlagSetTest, BranchWithParameters) {
const MachInst2 dpi = GetParam();
@@ -1253,8 +1247,8 @@ const TestAndBranch kTestAndBranchMatchers32[] = {
"if (mask != (mask and x))", kArm64TestAndBranch32, MachineType::Int32()},
kEqual}};
-typedef InstructionSelectorTestWithParam<TestAndBranch>
- InstructionSelectorTestAndBranchTest;
+using InstructionSelectorTestAndBranchTest =
+ InstructionSelectorTestWithParam<TestAndBranch>;
TEST_P(InstructionSelectorTestAndBranchTest, TestAndBranch32) {
const TestAndBranch inst = GetParam();
@@ -1581,10 +1575,8 @@ TEST_F(InstructionSelectorTest, EqualZeroAndBranch) {
// -----------------------------------------------------------------------------
// Add and subtract instructions with overflow.
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorOvfAddSubTest;
-
+using InstructionSelectorOvfAddSubTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorOvfAddSubTest, OvfParameter) {
const MachInst2 dpi = GetParam();
@@ -1839,9 +1831,7 @@ TEST_F(InstructionSelectorTest, OvfBranchWithImmediateOnLeft) {
// -----------------------------------------------------------------------------
// Shift instructions.
-
-typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
-
+using InstructionSelectorShiftTest = InstructionSelectorTestWithParam<Shift>;
TEST_P(InstructionSelectorShiftTest, Parameter) {
const Shift shift = GetParam();
@@ -1947,10 +1937,8 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
// -----------------------------------------------------------------------------
// Mul and Div instructions.
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorMulDivTest;
-
+using InstructionSelectorMulDivTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorMulDivTest, Parameter) {
const MachInst2 dpi = GetParam();
@@ -1996,10 +1984,8 @@ static const MulDPInst kMulDPInstructions[] = {
&RawMachineAssembler::Int64Sub, kArm64Madd, kArm64Msub, kArm64Mneg,
MachineType::Int64()}};
-
-typedef InstructionSelectorTestWithParam<MulDPInst>
- InstructionSelectorIntDPWithIntMulTest;
-
+using InstructionSelectorIntDPWithIntMulTest =
+ InstructionSelectorTestWithParam<MulDPInst>;
TEST_P(InstructionSelectorIntDPWithIntMulTest, AddWithMul) {
const MulDPInst mdpi = GetParam();
@@ -2341,9 +2327,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
// -----------------------------------------------------------------------------
// Floating point instructions.
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorFPArithTest;
-
+using InstructionSelectorFPArithTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorFPArithTest, Parameter) {
const MachInst2 fpa = GetParam();
@@ -2360,8 +2345,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorFPArithTest,
::testing::ValuesIn(kFPArithInstructions));
-typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
-
+using InstructionSelectorFPCmpTest = InstructionSelectorTestWithParam<FPCmp>;
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
@@ -2421,9 +2405,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
// -----------------------------------------------------------------------------
// Conversions.
-typedef InstructionSelectorTestWithParam<Conversion>
- InstructionSelectorConversionTest;
-
+using InstructionSelectorConversionTest =
+ InstructionSelectorTestWithParam<Conversion>;
TEST_P(InstructionSelectorConversionTest, Parameter) {
const Conversion conv = GetParam();
@@ -2444,8 +2427,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorConversionTest,
::testing::ValuesIn(kConversionInstructions));
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorElidedChangeUint32ToUint64Test;
+using InstructionSelectorElidedChangeUint32ToUint64Test =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
const MachInst2 binop = GetParam();
@@ -2666,10 +2649,8 @@ static const MemoryAccess kMemoryAccesses[] = {
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
16384, 16392, 32752, 32760}}};
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
- InstructionSelectorMemoryAccessTest;
-
+using InstructionSelectorMemoryAccessTest =
+ InstructionSelectorTestWithParam<MemoryAccess>;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
@@ -2871,10 +2852,8 @@ static const MachInst2 kComparisonInstructions[] = {
MachineType::Int64()},
};
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorComparisonTest;
-
+using InstructionSelectorComparisonTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorComparisonTest, WithParameters) {
const MachInst2 cmp = GetParam();
@@ -3510,8 +3489,8 @@ const FlagSettingInst kFlagSettingInstructions[] = {
MachineType::Int32()},
kArm64Tst32}};
-typedef InstructionSelectorTestWithParam<FlagSettingInst>
- InstructionSelectorFlagSettingTest;
+using InstructionSelectorFlagSettingTest =
+ InstructionSelectorTestWithParam<FlagSettingInst>;
TEST_P(InstructionSelectorFlagSettingTest, CmpZeroRight) {
const FlagSettingInst inst = GetParam();
@@ -3747,10 +3726,8 @@ static const MachInst2 kLogicalWithNotRHSs[] = {
{&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eon,
MachineType::Int64()}};
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorLogicalWithNotRHSTest;
-
+using InstructionSelectorLogicalWithNotRHSTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorLogicalWithNotRHSTest, Parameter) {
const MachInst2 inst = GetParam();
@@ -4368,6 +4345,78 @@ TEST_F(InstructionSelectorTest, Float64Neg) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Float32NegWithMul) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n1 = m.AddNode(m.machine()->Float32Mul(), p0, p1);
+ Node* const n2 = m.AddNode(m.machine()->Float32Neg(), n1);
+ m.Return(n2);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float32Fnmul, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n2), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64NegWithMul) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n1 = m.AddNode(m.machine()->Float64Mul(), p0, p1);
+ Node* const n2 = m.AddNode(m.machine()->Float64Neg(), n1);
+ m.Return(n2);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Fnmul, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n2), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float32MulWithNeg) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n1 = m.AddNode(m.machine()->Float32Neg(), p0);
+ Node* const n2 = m.AddNode(m.machine()->Float32Mul(), n1, p1);
+ m.Return(n2);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float32Fnmul, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n2), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64MulWithNeg) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n1 = m.AddNode(m.machine()->Float64Neg(), p0);
+ Node* const n2 = m.AddNode(m.machine()->Float64Mul(), n1, p1);
+ m.Return(n2);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Fnmul, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n2), s.ToVreg(s[0]->Output()));
+}
+
TEST_F(InstructionSelectorTest, LoadAndShiftRight) {
{
int32_t immediates[] = {-256, -255, -3, -2, -1, 0, 1,
@@ -4548,6 +4597,209 @@ TEST_F(InstructionSelectorTest, ExternalReferenceLoad2) {
EXPECT_NE(kMode_Root, s[0]->addressing_mode());
}
+namespace {
+// Builds a call with the specified signature and nodes as arguments.
+// Then checks that the correct number of kArm64Poke and kArm64PokePair were
+// generated.
+void TestPokePair(InstructionSelectorTest::StreamBuilder& m, Zone* zone,
+ MachineSignature::Builder& builder, Node* nodes[],
+ int num_nodes, int expected_poke_pair, int expected_poke) {
+ auto call_descriptor =
+ InstructionSelectorTest::StreamBuilder::MakeSimpleCallDescriptor(
+ zone, builder.Build());
+
+ m.CallN(call_descriptor, num_nodes, nodes);
+ m.Return(m.UndefinedConstant());
+
+ auto s = m.Build();
+ int num_poke_pair = 0;
+ int num_poke = 0;
+ for (size_t i = 0; i < s.size(); ++i) {
+ if (s[i]->arch_opcode() == kArm64PokePair) {
+ num_poke_pair++;
+ }
+
+ if (s[i]->arch_opcode() == kArm64Poke) {
+ num_poke++;
+ }
+ }
+
+ EXPECT_EQ(expected_poke_pair, num_poke_pair);
+ EXPECT_EQ(expected_poke, num_poke);
+}
+} // namespace
+
+TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsInt32) {
+ {
+ MachineSignature::Builder builder(zone(), 0, 3);
+ builder.AddParam(MachineType::Int32());
+ builder.AddParam(MachineType::Int32());
+ builder.AddParam(MachineType::Int32());
+
+ StreamBuilder m(this, MachineType::AnyTagged());
+ Node* nodes[] = {
+ m.UndefinedConstant(),
+ m.Int32Constant(0),
+ m.Int32Constant(0),
+ m.Int32Constant(0),
+ };
+
+ const int expected_poke_pair = 1;
+ // Note: The `+ 1` here comes from the padding Poke in
+ // EmitPrepareArguments.
+ const int expected_poke = 1 + 1;
+
+ TestPokePair(m, zone(), builder, nodes, arraysize(nodes),
+ expected_poke_pair, expected_poke);
+ }
+
+ {
+ MachineSignature::Builder builder(zone(), 0, 4);
+ builder.AddParam(MachineType::Int32());
+ builder.AddParam(MachineType::Int32());
+ builder.AddParam(MachineType::Int32());
+ builder.AddParam(MachineType::Int32());
+
+ StreamBuilder m(this, MachineType::AnyTagged());
+ Node* nodes[] = {
+ m.UndefinedConstant(), m.Int32Constant(0), m.Int32Constant(0),
+ m.Int32Constant(0), m.Int32Constant(0),
+ };
+
+ const int expected_poke_pair = 2;
+ const int expected_poke = 0;
+
+ TestPokePair(m, zone(), builder, nodes, arraysize(nodes),
+ expected_poke_pair, expected_poke);
+ }
+}
+
+TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsInt64) {
+ MachineSignature::Builder builder(zone(), 0, 4);
+ builder.AddParam(MachineType::Int64());
+ builder.AddParam(MachineType::Int64());
+ builder.AddParam(MachineType::Int64());
+ builder.AddParam(MachineType::Int64());
+
+ StreamBuilder m(this, MachineType::AnyTagged());
+ Node* nodes[] = {
+ m.UndefinedConstant(), m.Int64Constant(0), m.Int64Constant(0),
+ m.Int64Constant(0), m.Int64Constant(0),
+ };
+
+ const int expected_poke_pair = 2;
+ const int expected_poke = 0;
+
+ TestPokePair(m, zone(), builder, nodes, arraysize(nodes), expected_poke_pair,
+ expected_poke);
+}
+
+TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsFloat32) {
+ MachineSignature::Builder builder(zone(), 0, 4);
+ builder.AddParam(MachineType::Float32());
+ builder.AddParam(MachineType::Float32());
+ builder.AddParam(MachineType::Float32());
+ builder.AddParam(MachineType::Float32());
+
+ StreamBuilder m(this, MachineType::AnyTagged());
+ Node* nodes[] = {
+ m.UndefinedConstant(), m.Float32Constant(0.0f), m.Float32Constant(0.0f),
+ m.Float32Constant(0.0f), m.Float32Constant(0.0f),
+ };
+
+ const int expected_poke_pair = 2;
+ const int expected_poke = 0;
+
+ TestPokePair(m, zone(), builder, nodes, arraysize(nodes), expected_poke_pair,
+ expected_poke);
+}
+
+TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsFloat64) {
+ MachineSignature::Builder builder(zone(), 0, 4);
+ builder.AddParam(MachineType::Float64());
+ builder.AddParam(MachineType::Float64());
+ builder.AddParam(MachineType::Float64());
+ builder.AddParam(MachineType::Float64());
+
+ StreamBuilder m(this, MachineType::AnyTagged());
+ Node* nodes[] = {
+ m.UndefinedConstant(), m.Float64Constant(0.0f), m.Float64Constant(0.0f),
+ m.Float64Constant(0.0f), m.Float64Constant(0.0f),
+ };
+
+ const int expected_poke_pair = 2;
+ const int expected_poke = 0;
+
+ TestPokePair(m, zone(), builder, nodes, arraysize(nodes), expected_poke_pair,
+ expected_poke);
+}
+
+TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsIntFloatMixed) {
+ {
+ MachineSignature::Builder builder(zone(), 0, 4);
+ builder.AddParam(MachineType::Int32());
+ builder.AddParam(MachineType::Float32());
+ builder.AddParam(MachineType::Int32());
+ builder.AddParam(MachineType::Float32());
+
+ StreamBuilder m(this, MachineType::AnyTagged());
+ Node* nodes[] = {
+ m.UndefinedConstant(), m.Int32Constant(0), m.Float32Constant(0.0f),
+ m.Int32Constant(0), m.Float32Constant(0.0f),
+ };
+
+ const int expected_poke_pair = 0;
+ const int expected_poke = 4;
+
+ TestPokePair(m, zone(), builder, nodes, arraysize(nodes),
+ expected_poke_pair, expected_poke);
+ }
+
+ {
+ MachineSignature::Builder builder(zone(), 0, 7);
+ builder.AddParam(MachineType::Float32());
+ builder.AddParam(MachineType::Int32());
+ builder.AddParam(MachineType::Int32());
+ builder.AddParam(MachineType::Float64());
+ builder.AddParam(MachineType::Int64());
+ builder.AddParam(MachineType::Float64());
+ builder.AddParam(MachineType::Float64());
+
+ StreamBuilder m(this, MachineType::AnyTagged());
+ Node* nodes[] = {m.UndefinedConstant(), m.Float32Constant(0.0f),
+ m.Int32Constant(0), m.Int32Constant(0),
+ m.Float64Constant(0.0f), m.Int64Constant(0),
+ m.Float64Constant(0.0f), m.Float64Constant(0.0f)};
+
+ const int expected_poke_pair = 2;
+
+ // Note: The `+ 1` here comes from the padding Poke in
+ // EmitPrepareArguments.
+ const int expected_poke = 3 + 1;
+
+ TestPokePair(m, zone(), builder, nodes, arraysize(nodes),
+ expected_poke_pair, expected_poke);
+ }
+}
+
+TEST_F(InstructionSelectorTest, PokePairPrepareArgumentsSimd128) {
+ MachineSignature::Builder builder(zone(), 0, 2);
+ builder.AddParam(MachineType::Simd128());
+ builder.AddParam(MachineType::Simd128());
+
+ StreamBuilder m(this, MachineType::AnyTagged());
+ Node* nodes[] = {m.UndefinedConstant(),
+ m.AddNode(m.machine()->I32x4Splat(), m.Int32Constant(0)),
+ m.AddNode(m.machine()->I32x4Splat(), m.Int32Constant(0))};
+
+ const int expected_poke_pair = 0;
+ const int expected_poke = 2;
+
+ // Using kArm64PokePair is not currently supported for Simd128.
+ TestPokePair(m, zone(), builder, nodes, arraysize(nodes), expected_poke_pair,
+ expected_poke);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index 01400041a8..ecc1712e3d 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -4,12 +4,12 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/schedule.h"
-#include "src/flags.h"
-#include "src/objects-inl.h"
+#include "src/flags/flags.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/compiler-test-utils.h"
namespace v8 {
@@ -243,8 +243,8 @@ TARGET_TEST_F(InstructionSelectorTest, FinishRegion) {
// -----------------------------------------------------------------------------
// Phi.
-typedef InstructionSelectorTestWithParam<MachineType>
- InstructionSelectorPhiTest;
+using InstructionSelectorPhiTest =
+ InstructionSelectorTestWithParam<MachineType>;
TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
const MachineType type = GetParam();
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
index 3c4374101c..0ffe182e04 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.h
@@ -9,9 +9,9 @@
#include <set>
#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/macro-assembler.h"
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/raw-machine-assembler.h"
-#include "src/macro-assembler.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -88,6 +88,52 @@ class InstructionSelectorTest : public TestWithNativeContextAndZone {
const FrameStateFunctionInfo* GetFrameStateFunctionInfo(int parameter_count,
int local_count);
+ // Create a simple call descriptor for testing.
+ static CallDescriptor* MakeSimpleCallDescriptor(Zone* zone,
+ MachineSignature* msig) {
+ LocationSignature::Builder locations(zone, msig->return_count(),
+ msig->parameter_count());
+
+ // Add return location(s).
+ const int return_count = static_cast<int>(msig->return_count());
+ for (int i = 0; i < return_count; i++) {
+ locations.AddReturn(
+ LinkageLocation::ForCallerFrameSlot(-1 - i, msig->GetReturn(i)));
+ }
+
+ // Just put all parameters on the stack.
+ const int parameter_count = static_cast<int>(msig->parameter_count());
+ unsigned slot_index = -1;
+ for (int i = 0; i < parameter_count; i++) {
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(slot_index, msig->GetParam(i)));
+
+ // Slots are kSystemPointerSize sized. This reserves enough for space
+ // for types that might be bigger, eg. Simd128.
+ slot_index -=
+ std::max(1, ElementSizeInBytes(msig->GetParam(i).representation()) /
+ kSystemPointerSize);
+ }
+
+ const RegList kCalleeSaveRegisters = 0;
+ const RegList kCalleeSaveFPRegisters = 0;
+
+ MachineType target_type = MachineType::Pointer();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallAddress, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kCanUseRoots, // flags
+ "iselect-test-call");
+ }
+
private:
CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type) {
MachineSignature::Builder builder(zone, 1, 0);
@@ -125,46 +171,7 @@ class InstructionSelectorTest : public TestWithNativeContextAndZone {
return MakeSimpleCallDescriptor(zone, builder.Build());
}
- private:
InstructionSelectorTest* test_;
-
- // Create a simple call descriptor for testing.
- CallDescriptor* MakeSimpleCallDescriptor(Zone* zone,
- MachineSignature* msig) {
- LocationSignature::Builder locations(zone, msig->return_count(),
- msig->parameter_count());
-
- // Add return location(s).
- const int return_count = static_cast<int>(msig->return_count());
- for (int i = 0; i < return_count; i++) {
- locations.AddReturn(
- LinkageLocation::ForCallerFrameSlot(-1 - i, msig->GetReturn(i)));
- }
-
- // Just put all parameters on the stack.
- const int parameter_count = static_cast<int>(msig->parameter_count());
- for (int i = 0; i < parameter_count; i++) {
- locations.AddParam(
- LinkageLocation::ForCallerFrameSlot(-1 - i, msig->GetParam(i)));
- }
-
- const RegList kCalleeSaveRegisters = 0;
- const RegList kCalleeSaveFPRegisters = 0;
-
- MachineType target_type = MachineType::Pointer();
- LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallAddress, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- 0, // stack_parameter_count
- Operator::kNoProperties, // properties
- kCalleeSaveRegisters, // callee-saved registers
- kCalleeSaveFPRegisters, // callee-saved fp regs
- CallDescriptor::kCanUseRoots, // flags
- "iselect-test-call");
- }
};
class Stream final {
@@ -272,8 +279,8 @@ class InstructionSelectorTest : public TestWithNativeContextAndZone {
friend class StreamBuilder;
- typedef std::map<int, Constant> ConstantMap;
- typedef std::map<NodeId, int> VirtualRegisters;
+ using ConstantMap = std::map<int, Constant>;
+ using VirtualRegisters = std::map<NodeId, int>;
ConstantMap constants_;
ConstantMap immediates_;
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
index 55dbe167c1..82a8b3019d 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/backend/instruction-sequence-unittest.h
@@ -7,8 +7,8 @@
#include <memory>
+#include "src/codegen/register-configuration.h"
#include "src/compiler/backend/instruction.h"
-#include "src/register-configuration.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -27,7 +27,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
static constexpr MachineRepresentation kSimd128 =
MachineRepresentation::kSimd128;
- typedef RpoNumber Rpo;
+ using Rpo = RpoNumber;
struct VReg {
VReg() : value_(kNoValue) {}
@@ -38,7 +38,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
MachineRepresentation rep_ = kNoRep;
};
- typedef std::pair<VReg, VReg> VRegPair;
+ using VRegPair = std::pair<VReg, VReg>;
enum TestOperandType {
kInvalid,
@@ -272,9 +272,9 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
int expected_blocks_;
};
- typedef std::vector<LoopData> LoopBlocks;
- typedef std::map<int, const Instruction*> Instructions;
- typedef std::vector<BlockCompletion> Completions;
+ using LoopBlocks = std::vector<LoopData>;
+ using Instructions = std::map<int, const Instruction*>;
+ using Completions = std::vector<BlockCompletion>;
std::unique_ptr<RegisterConfiguration> config_;
InstructionSequence* sequence_;
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
index 09b4ea9295..93e7244f05 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/backend/instruction.h"
-#include "src/register-configuration.h"
+#include "src/codegen/register-configuration.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest-support.h"
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 53b9c6a241..fa779891ac 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/interpreter/bytecode-array-builder.h"
@@ -10,7 +10,7 @@
#include "src/interpreter/bytecode-decoder.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/control-flow-builders.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
index 68a701136f..0541f68440 100644
--- a/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/code-assembler-unittest.cc
@@ -4,11 +4,11 @@
#include "test/unittests/compiler/code-assembler-unittest.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors.h"
#include "src/compiler/node.h"
-#include "src/interface-descriptors.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/node-test-utils.h"
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index f40cab2758..c97bb96b49 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/common-operator.h"
#include "src/compiler/common-operator-reducer.h"
+#include "src/codegen/machine-type.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/operator.h"
#include "src/compiler/simplified-operator.h"
-#include "src/machine-type.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index 6ee11be686..d30449daa7 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/constant-folding-reducer.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
@@ -11,7 +11,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
diff --git a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
index 368c297a38..c37aeeb839 100644
--- a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
@@ -3,11 +3,11 @@
// found in the LICENSE file.
#include "src/compiler/control-equivalence.h"
-#include "src/bit-vector.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/node-properties.h"
+#include "src/utils/bit-vector.h"
#include "src/zone/zone-containers.h"
#include "test/unittests/compiler/graph-unittest.h"
diff --git a/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc b/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc
new file mode 100644
index 0000000000..65bdb4c46e
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc
@@ -0,0 +1,1094 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/decompression-elimination.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class DecompressionEliminationTest : public GraphTest {
+ public:
+ DecompressionEliminationTest()
+ : GraphTest(),
+ machine_(zone(), MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kNoFlags),
+ simplified_(zone()) {}
+ ~DecompressionEliminationTest() override = default;
+
+ protected:
+ Reduction Reduce(Node* node) {
+ StrictMock<MockAdvancedReducerEditor> editor;
+ DecompressionElimination decompression_elimination(&editor, graph(),
+ machine(), common());
+ return decompression_elimination.Reduce(node);
+ }
+ MachineOperatorBuilder* machine() { return &machine_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+ SimplifiedOperatorBuilder simplified_;
+};
+
+// -----------------------------------------------------------------------------
+// Direct Decompression & Compression
+
+TEST_F(DecompressionEliminationTest, BasicDecompressionCompression) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ // Create the graph
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
+ index, effect, control);
+ Node* changeToTagged =
+ graph()->NewNode(machine()->ChangeCompressedToTagged(), load);
+ Node* changeToCompressed =
+ graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
+ effect = graph()->NewNode(simplified()->StoreElement(access), object, index,
+ changeToCompressed, effect, control);
+
+ // Reduce
+ Reduction r = Reduce(changeToCompressed);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+TEST_F(DecompressionEliminationTest, BasicDecompressionCompressionSigned) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::TaggedSigned(), kNoWriteBarrier};
+
+ // Create the graph
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
+ index, effect, control);
+ Node* changeToTagged =
+ graph()->NewNode(machine()->ChangeCompressedSignedToTaggedSigned(), load);
+ Node* changeToCompressed = graph()->NewNode(
+ machine()->ChangeTaggedSignedToCompressedSigned(), changeToTagged);
+ effect = graph()->NewNode(simplified()->StoreElement(access), object, index,
+ changeToCompressed, effect, control);
+
+ // Reduce
+ Reduction r = Reduce(changeToCompressed);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+TEST_F(DecompressionEliminationTest, BasicDecompressionCompressionPointer) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::TaggedPointer(), kNoWriteBarrier};
+
+ // Create the graph
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
+ index, effect, control);
+ Node* changeToTagged = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load);
+ Node* changeToCompressed = graph()->NewNode(
+ machine()->ChangeTaggedPointerToCompressedPointer(), changeToTagged);
+ effect = graph()->NewNode(simplified()->StoreElement(access), object, index,
+ changeToCompressed, effect, control);
+
+ // Reduce
+ Reduction r = Reduce(changeToCompressed);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+// -----------------------------------------------------------------------------
+// Direct Decompression & Compression - border cases
+
+// For example, if we are lowering a CheckedCompressedToTaggedPointer in the
+// effect linearization phase we will change that to
+// ChangeCompressedPointerToTaggedPointer. Then, we might end up with a chain of
+// Parent <- ChangeCompressedPointerToTaggedPointer <- ChangeTaggedToCompressed
+// <- Child.
+// Similarly, we have cases with Signed instead of pointer.
+// The following border case tests will test that the functionality is robust
+// enough to handle that.
+
+TEST_F(DecompressionEliminationTest,
+ BasicDecompressionCompressionBorderCaseSigned) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ ElementAccess const loadAccess = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+ ElementAccess const storeAccess = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+
+ // Create the graph
+ Node* load = graph()->NewNode(simplified()->LoadElement(loadAccess), object,
+ index, effect, control);
+ Node* changeToTagged =
+ graph()->NewNode(machine()->ChangeCompressedSignedToTaggedSigned(), load);
+ Node* changeToCompressed =
+ graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
+ effect = graph()->NewNode(simplified()->StoreElement(storeAccess), object,
+ index, changeToCompressed, effect, control);
+
+ // Reduce
+ Reduction r = Reduce(changeToCompressed);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+TEST_F(DecompressionEliminationTest,
+ BasicDecompressionCompressionBorderCasePointer) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ ElementAccess const loadAccess = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+ ElementAccess const storeAccess = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::TaggedPointer(),
+ kNoWriteBarrier};
+
+ // Create the graph
+ Node* load = graph()->NewNode(simplified()->LoadElement(loadAccess), object,
+ index, effect, control);
+ Node* changeToTagged = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load);
+ Node* changeToCompressed =
+ graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
+ effect = graph()->NewNode(simplified()->StoreElement(storeAccess), object,
+ index, changeToCompressed, effect, control);
+
+ // Reduce
+ Reduction r = Reduce(changeToCompressed);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+// We also have cases of ChangeCompressedToTagged <-
+// ChangeTaggedPointerToCompressedPointer, where the
+// ChangeTaggedPointerToCompressedPointer was introduced while lowering a
+// NewConsString on effect control linearizer
+
+TEST_F(DecompressionEliminationTest,
+ BasicDecompressionCompressionBorderCasePointerDecompression) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ ElementAccess const loadAccess = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::TaggedPointer(),
+ kNoWriteBarrier};
+ ElementAccess const storeAccess = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::AnyTagged(), kNoWriteBarrier};
+
+ // Create the graph
+ Node* load = graph()->NewNode(simplified()->LoadElement(loadAccess), object,
+ index, effect, control);
+ Node* changeToTagged = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load);
+ Node* changeToCompressed =
+ graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
+ effect = graph()->NewNode(simplified()->StoreElement(storeAccess), object,
+ index, changeToCompressed, effect, control);
+
+ // Reduce
+ Reduction r = Reduce(changeToCompressed);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
+}
+
+// -----------------------------------------------------------------------------
+// Compress after constant
+
+TEST_F(DecompressionEliminationTest,
+ DecompressionConstantStoreElementInt64Constant) {
+ // Skip test if pointer compression is not enabled.
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables.
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+
+ const ElementAccess element_accesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
+ kNoWriteBarrier}};
+
+ const Operator* compression_ops[] = {
+ machine()->ChangeTaggedToCompressed(),
+ machine()->ChangeTaggedSignedToCompressedSigned(),
+ machine()->ChangeTaggedPointerToCompressedPointer()};
+
+ ASSERT_EQ(arraysize(compression_ops), arraysize(element_accesses));
+
+ const int64_t constants[] = {static_cast<int64_t>(0x0000000000000000),
+ static_cast<int64_t>(0x0000000000000001),
+ static_cast<int64_t>(0x0000FFFFFFFF0000),
+ static_cast<int64_t>(0x7FFFFFFFFFFFFFFF),
+ static_cast<int64_t>(0x8000000000000000),
+ static_cast<int64_t>(0x8000000000000001),
+ static_cast<int64_t>(0x8000FFFFFFFF0000),
+ static_cast<int64_t>(0x8FFFFFFFFFFFFFFF),
+ static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
+
+ // For every compression.
+ for (size_t i = 0; i < arraysize(compression_ops); ++i) {
+ // For every Int64Constant.
+ for (size_t j = 0; j < arraysize(constants); ++j) {
+ // Create the graph.
+ Node* constant = graph()->NewNode(common()->Int64Constant(constants[j]));
+ Node* changeToCompressed = graph()->NewNode(compression_ops[i], constant);
+ effect =
+ graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
+ object, index, changeToCompressed, effect, control);
+ // Reduce.
+ Reduction r = Reduce(changeToCompressed);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kInt32Constant);
+ }
+ }
+}
+
+TEST_F(DecompressionEliminationTest,
+ DecompressionConstantStoreElementHeapConstant) {
+ // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
+ // exists, since it breaks with verify CSA on.
+ if (COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+ // Skip test if pointer compression is not enabled.
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables.
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+
+ const ElementAccess element_accesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
+ kNoWriteBarrier}};
+
+ const Operator* compression_ops[] = {
+ machine()->ChangeTaggedToCompressed(),
+ machine()->ChangeTaggedSignedToCompressedSigned(),
+ machine()->ChangeTaggedPointerToCompressedPointer()};
+
+ ASSERT_EQ(arraysize(compression_ops), arraysize(element_accesses));
+
+ const Handle<HeapNumber> heap_constants[] = {
+ factory()->NewHeapNumber(0.0),
+ factory()->NewHeapNumber(-0.0),
+ factory()->NewHeapNumber(11.2),
+ factory()->NewHeapNumber(-11.2),
+ factory()->NewHeapNumber(3.1415 + 1.4142),
+ factory()->NewHeapNumber(3.1415 - 1.4142),
+ factory()->NewHeapNumber(0x0000000000000000),
+ factory()->NewHeapNumber(0x0000000000000001),
+ factory()->NewHeapNumber(0x0000FFFFFFFF0000),
+ factory()->NewHeapNumber(0x7FFFFFFFFFFFFFFF),
+ factory()->NewHeapNumber(0x8000000000000000),
+ factory()->NewHeapNumber(0x8000000000000001),
+ factory()->NewHeapNumber(0x8000FFFFFFFF0000),
+ factory()->NewHeapNumber(0x8FFFFFFFFFFFFFFF),
+ factory()->NewHeapNumber(0xFFFFFFFFFFFFFFFF)};
+
+ // For every compression.
+ for (size_t i = 0; i < arraysize(compression_ops); ++i) {
+ // For every HeapNumber.
+ for (size_t j = 0; j < arraysize(heap_constants); ++j) {
+ // Create the graph.
+ Node* constant =
+ graph()->NewNode(common()->HeapConstant(heap_constants[j]));
+ Node* changeToCompressed = graph()->NewNode(compression_ops[i], constant);
+ effect =
+ graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
+ object, index, changeToCompressed, effect, control);
+ // Reduce.
+ Reduction r = Reduce(changeToCompressed);
+ ASSERT_TRUE(r.Changed());
+ // TODO(v8:8977): Change the IrOpcode here to kCompressedHeapConstant when
+ // that is in place.
+ EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kHeapConstant);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Phi
+
+TEST_F(DecompressionEliminationTest, PhiOneDecompress) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ const int number_of_inputs = 1;
+
+ const Operator* decompression_ops[] = {
+ machine()->ChangeCompressedToTagged(),
+ machine()->ChangeCompressedSignedToTaggedSigned(),
+ machine()->ChangeCompressedPointerToTaggedPointer()};
+
+ const ElementAccess element_accesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
+ kNoWriteBarrier}};
+
+ const IrOpcode::Value opcodes[] = {
+ IrOpcode::kChangeCompressedToTagged,
+ IrOpcode::kChangeCompressedSignedToTaggedSigned,
+ IrOpcode::kChangeCompressedPointerToTaggedPointer};
+
+ ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
+ ASSERT_EQ(arraysize(opcodes), arraysize(element_accesses));
+
+ // For every access
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph
+ Node* load =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* change_to_tagged = graph()->NewNode(decompression_ops[i], load);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
+ change_to_tagged, control);
+
+ // Reduce
+ Reduction r = Reduce(phi);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(opcodes[i], r.replacement()->opcode());
+ }
+}
+
+TEST_F(DecompressionEliminationTest, PhiThreeDecompressSameRepresentation) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ const int number_of_inputs = 3;
+
+ const Operator* decompression_ops[] = {
+ machine()->ChangeCompressedToTagged(),
+ machine()->ChangeCompressedSignedToTaggedSigned(),
+ machine()->ChangeCompressedPointerToTaggedPointer()};
+
+ const ElementAccess element_accesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
+ kNoWriteBarrier}};
+
+ const IrOpcode::Value opcodes[] = {
+ IrOpcode::kChangeCompressedToTagged,
+ IrOpcode::kChangeCompressedSignedToTaggedSigned,
+ IrOpcode::kChangeCompressedPointerToTaggedPointer};
+
+ ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
+ ASSERT_EQ(arraysize(opcodes), arraysize(element_accesses));
+
+ // For every access
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph
+ Node* load1 =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* load2 =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* load3 =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* change_to_tagged1 = graph()->NewNode(decompression_ops[i], load1);
+ Node* change_to_tagged2 = graph()->NewNode(decompression_ops[i], load2);
+ Node* change_to_tagged3 = graph()->NewNode(decompression_ops[i], load3);
+
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
+ change_to_tagged1, change_to_tagged2, change_to_tagged3, control);
+
+ // Reduce
+ Reduction r = Reduce(phi);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(opcodes[i], r.replacement()->opcode());
+ }
+}
+
+TEST_F(DecompressionEliminationTest, PhiThreeDecompressOneAnyRepresentation) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ const int number_of_inputs = 3;
+
+ const Operator* decompression_ops[] = {
+ machine()->ChangeCompressedSignedToTaggedSigned(),
+ machine()->ChangeCompressedPointerToTaggedPointer()};
+
+ const ElementAccess element_accesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
+ kNoWriteBarrier}};
+
+ const ElementAccess any_access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::AnyCompressed(),
+ kNoWriteBarrier};
+
+ ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
+
+ // For every access
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph
+ Node* load1 =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* load2 =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ // Note that load3 loads a CompressedAny instead of element_accesses[i]
+ Node* load3 = graph()->NewNode(simplified()->LoadElement(any_access),
+ object, index, effect, control);
+ Node* change_to_tagged1 = graph()->NewNode(decompression_ops[i], load1);
+ Node* change_to_tagged2 = graph()->NewNode(decompression_ops[i], load2);
+ Node* change_to_tagged3 =
+ graph()->NewNode(machine()->ChangeCompressedToTagged(), load3);
+
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
+ change_to_tagged1, change_to_tagged2, change_to_tagged3, control);
+
+ // Reduce
+ Reduction r = Reduce(phi);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(IrOpcode::kChangeCompressedToTagged, r.replacement()->opcode());
+ }
+}
+
+TEST_F(DecompressionEliminationTest, PhiThreeInputsOneNotDecompressed) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ const int number_of_inputs = 3;
+
+ const Operator* decompression_ops[] = {
+ machine()->ChangeCompressedToTagged(),
+ machine()->ChangeCompressedSignedToTaggedSigned(),
+ machine()->ChangeCompressedPointerToTaggedPointer()};
+
+ const ElementAccess element_accesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
+ kNoWriteBarrier}};
+
+ const IrOpcode::Value opcodes[] = {
+ IrOpcode::kChangeCompressedToTagged,
+ IrOpcode::kChangeCompressedSignedToTaggedSigned,
+ IrOpcode::kChangeCompressedPointerToTaggedPointer};
+
+ ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
+ ASSERT_EQ(arraysize(opcodes), arraysize(element_accesses));
+
+ // For every access
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph
+ Node* load1 =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* load2 =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* load3 =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* change_to_tagged1 = graph()->NewNode(decompression_ops[i], load1);
+ Node* change_to_tagged2 = graph()->NewNode(decompression_ops[i], load2);
+
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
+ change_to_tagged1, change_to_tagged2, load3, control);
+
+ // Reduce
+ Reduction r = Reduce(phi);
+ ASSERT_FALSE(r.Changed());
+ }
+}
+
+// In the case of having one decompress Signed and one Pointer, we have to
+// generate the conservative decompress any after the Phi.
+TEST_F(DecompressionEliminationTest, PhiTwoDecompressesOneSignedOnePointer) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ const int number_of_inputs = 2;
+ const ElementAccess signed_access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::CompressedSigned(),
+ kNoWriteBarrier};
+ const ElementAccess pointer_access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::CompressedPointer(),
+ kNoWriteBarrier};
+
+ // Create the graph
+ Node* load1 = graph()->NewNode(simplified()->LoadElement(signed_access),
+ object, index, effect, control);
+ Node* load2 = graph()->NewNode(simplified()->LoadElement(pointer_access),
+ object, index, effect, control);
+ Node* change_to_tagged1 = graph()->NewNode(
+ machine()->ChangeCompressedSignedToTaggedSigned(), load1);
+ Node* change_to_tagged2 = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load2);
+
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
+ change_to_tagged1, change_to_tagged2, control);
+
+ // Reduce
+ Reduction r = Reduce(phi);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(IrOpcode::kChangeCompressedToTagged, r.replacement()->opcode());
+}
+
+// -----------------------------------------------------------------------------
+// TypedStateValues
+
+TEST_F(DecompressionEliminationTest, TypedStateValuesOneDecompress) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ const int numberOfInputs = 1;
+ const ZoneVector<MachineType>* types =
+ new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ SparseInputMask dense = SparseInputMask::Dense();
+
+ const ElementAccess ElementAccesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
+ kNoWriteBarrier}};
+
+ // For every access
+ for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
+ // Create the graph
+ Node* load = graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]),
+ object, index, effect, control);
+ Node* changeToTagged = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load);
+ Node* typedStateValuesOneDecompress = graph()->NewNode(
+ common()->TypedStateValues(types, dense), changeToTagged);
+
+ // Reduce
+ StrictMock<MockAdvancedReducerEditor> editor;
+ DecompressionElimination decompression_elimination(&editor, graph(),
+ machine(), common());
+ Reduction r =
+ decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ ASSERT_TRUE(r.Changed());
+ }
+}
+
+TEST_F(DecompressionEliminationTest, TypedStateValuesTwoDecompresses) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ const int numberOfInputs = 3;
+ const ZoneVector<MachineType>* types =
+ new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ SparseInputMask dense = SparseInputMask::Dense();
+ const ElementAccess ElementAccesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
+ kNoWriteBarrier}};
+
+ // For every access
+ for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
+ // Create the graph
+ Node* load1 =
+ graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ index, effect, control);
+ Node* changeToTagged1 = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load1);
+ Node* load2 =
+ graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ index, effect, control);
+ Node* changeToTagged2 = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load2);
+ Node* typedStateValuesOneDecompress =
+ graph()->NewNode(common()->TypedStateValues(types, dense),
+ changeToTagged1, load1, changeToTagged2);
+
+ // Reduce
+ StrictMock<MockAdvancedReducerEditor> editor;
+ DecompressionElimination decompression_elimination(&editor, graph(),
+ machine(), common());
+ Reduction r =
+ decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ ASSERT_TRUE(r.Changed());
+ }
+}
+
+TEST_F(DecompressionEliminationTest, TypedStateValuesAllDecompresses) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ const int numberOfInputs = 3;
+ const ZoneVector<MachineType>* types =
+ new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ SparseInputMask dense = SparseInputMask::Dense();
+ const ElementAccess ElementAccesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
+ kNoWriteBarrier}};
+
+ // For every access
+ for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
+ // Create the graph
+ Node* load1 =
+ graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ index, effect, control);
+ Node* changeToTagged1 = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load1);
+ Node* load2 =
+ graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ index, effect, control);
+ Node* changeToTagged2 = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load2);
+ Node* load3 =
+ graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ index, effect, control);
+ Node* changeToTagged3 = graph()->NewNode(
+ machine()->ChangeCompressedPointerToTaggedPointer(), load3);
+ Node* typedStateValuesOneDecompress =
+ graph()->NewNode(common()->TypedStateValues(types, dense),
+ changeToTagged1, changeToTagged2, changeToTagged3);
+
+ // Reduce
+ StrictMock<MockAdvancedReducerEditor> editor;
+ DecompressionElimination decompression_elimination(&editor, graph(),
+ machine(), common());
+ Reduction r =
+ decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ ASSERT_TRUE(r.Changed());
+ }
+}
+
+TEST_F(DecompressionEliminationTest, TypedStateValuesNoDecompresses) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+ const int numberOfInputs = 3;
+ const ZoneVector<MachineType>* types =
+ new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ SparseInputMask dense = SparseInputMask::Dense();
+ const ElementAccess ElementAccesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
+ kNoWriteBarrier}};
+
+ // For every access
+ for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
+ // Create the graph
+ Node* load = graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]),
+ object, index, effect, control);
+ Node* typedStateValuesOneDecompress = graph()->NewNode(
+ common()->TypedStateValues(types, dense), load, load, load);
+
+ // Reduce
+ StrictMock<MockAdvancedReducerEditor> editor;
+ DecompressionElimination decompression_elimination(&editor, graph(),
+ machine(), common());
+ Reduction r =
+ decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ ASSERT_FALSE(r.Changed());
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Word64Equal comparison of two decompressions
+
+TEST_F(DecompressionEliminationTest, TwoDecompressionWord64Equal) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+
+ const Operator* DecompressionOps[] = {
+ machine()->ChangeCompressedToTagged(),
+ machine()->ChangeCompressedSignedToTaggedSigned(),
+ machine()->ChangeCompressedPointerToTaggedPointer()};
+
+ const ElementAccess ElementAccesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
+ kNoWriteBarrier}};
+
+ ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
+
+ // For every decompression (lhs)
+ for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
+ // For every decompression (rhs)
+ for (size_t k = 0; k < arraysize(DecompressionOps); ++k) {
+ // Create the graph
+ Node* load1 =
+ graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ object, index, effect, control);
+ Node* changeToTagged1 = graph()->NewNode(DecompressionOps[j], load1);
+ Node* load2 =
+ graph()->NewNode(simplified()->LoadElement(ElementAccesses[k]),
+ object, index, effect, control);
+ Node* changeToTagged2 = graph()->NewNode(DecompressionOps[j], load2);
+ Node* comparison = graph()->NewNode(machine()->Word64Equal(),
+ changeToTagged1, changeToTagged2);
+ // Reduce
+ Reduction r = Reduce(comparison);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Word64Equal comparison of two decompressions, where lhs == rhs
+
+TEST_F(DecompressionEliminationTest, TwoDecompressionWord64EqualSameInput) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+
+ const Operator* DecompressionOps[] = {
+ machine()->ChangeCompressedToTagged(),
+ machine()->ChangeCompressedSignedToTaggedSigned(),
+ machine()->ChangeCompressedPointerToTaggedPointer()};
+
+ const ElementAccess ElementAccesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
+ kNoWriteBarrier}};
+
+ ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
+
+ // For every decompression (same for lhs and rhs)
+ for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
+ // Create the graph
+ Node* load = graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ object, index, effect, control);
+ Node* changeToTagged = graph()->NewNode(DecompressionOps[j], load);
+ Node* comparison = graph()->NewNode(machine()->Word64Equal(),
+ changeToTagged, changeToTagged);
+ // Reduce
+ Reduction r = Reduce(comparison);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Word64Equal comparison of decompress and a constant
+
+TEST_F(DecompressionEliminationTest, DecompressionConstantWord64Equal) {
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+
+ const Operator* DecompressionOps[] = {
+ machine()->ChangeCompressedToTagged(),
+ machine()->ChangeCompressedSignedToTaggedSigned(),
+ machine()->ChangeCompressedPointerToTaggedPointer()};
+
+ const ElementAccess ElementAccesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
+ kNoWriteBarrier}};
+
+ ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
+
+ const int64_t constants[] = {static_cast<int64_t>(0x0000000000000000),
+ static_cast<int64_t>(0x0000000000000001),
+ static_cast<int64_t>(0x0000FFFFFFFF0000),
+ static_cast<int64_t>(0x7FFFFFFFFFFFFFFF),
+ static_cast<int64_t>(0x8000000000000000),
+ static_cast<int64_t>(0x8000000000000001),
+ static_cast<int64_t>(0x8000FFFFFFFF0000),
+ static_cast<int64_t>(0x8FFFFFFFFFFFFFFF),
+ static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
+
+ // For every decompression (lhs)
+ for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
+ // For every constant (rhs)
+ for (size_t k = 0; k < arraysize(constants); ++k) {
+ // Test with both (lhs, rhs) combinations
+ for (bool lhsIsDecompression : {false, true}) {
+ // Create the graph
+ Node* load =
+ graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ object, index, effect, control);
+ Node* changeToTagged = graph()->NewNode(DecompressionOps[j], load);
+ Node* constant =
+ graph()->NewNode(common()->Int64Constant(constants[k]));
+
+ Node* lhs = lhsIsDecompression ? changeToTagged : constant;
+ Node* rhs = lhsIsDecompression ? constant : changeToTagged;
+ Node* comparison = graph()->NewNode(machine()->Word64Equal(), lhs, rhs);
+ // Reduce
+ Reduction r = Reduce(comparison);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
+ }
+ }
+ }
+}
+
+TEST_F(DecompressionEliminationTest, DecompressionHeapConstantWord64Equal) {
+ // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
+ // exists, since it breaks with verify CSA on.
+ if (COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+ // Skip test if pointer compression is not enabled
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+
+ // Define variables
+ Node* const control = graph()->start();
+ Node* object = Parameter(Type::Any(), 0);
+ Node* effect = graph()->start();
+ Node* index = Parameter(Type::UnsignedSmall(), 1);
+
+ const Operator* DecompressionOps[] = {
+ machine()->ChangeCompressedToTagged(),
+ machine()->ChangeCompressedSignedToTaggedSigned(),
+ machine()->ChangeCompressedPointerToTaggedPointer()};
+
+ const ElementAccess ElementAccesses[] = {
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
+ kNoWriteBarrier},
+ {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
+ kNoWriteBarrier}};
+
+ ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
+
+ const Handle<HeapNumber> heapConstants[] = {
+ factory()->NewHeapNumber(0.0),
+ factory()->NewHeapNumber(-0.0),
+ factory()->NewHeapNumber(11.2),
+ factory()->NewHeapNumber(-11.2),
+ factory()->NewHeapNumber(3.1415 + 1.4142),
+ factory()->NewHeapNumber(3.1415 - 1.4142),
+ factory()->NewHeapNumber(0x0000000000000000),
+ factory()->NewHeapNumber(0x0000000000000001),
+ factory()->NewHeapNumber(0x0000FFFFFFFF0000),
+ factory()->NewHeapNumber(0x7FFFFFFFFFFFFFFF),
+ factory()->NewHeapNumber(0x8000000000000000),
+ factory()->NewHeapNumber(0x8000000000000001),
+ factory()->NewHeapNumber(0x8000FFFFFFFF0000),
+ factory()->NewHeapNumber(0x8FFFFFFFFFFFFFFF),
+ factory()->NewHeapNumber(0xFFFFFFFFFFFFFFFF)};
+
+ // For every decompression (lhs)
+ for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
+ // For every constant (rhs)
+ for (size_t k = 0; k < arraysize(heapConstants); ++k) {
+ // Test with both (lhs, rhs) combinations
+ for (bool lhsIsDecompression : {false, true}) {
+ // Create the graph
+ Node* load =
+ graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ object, index, effect, control);
+ Node* changeToTagged = graph()->NewNode(DecompressionOps[j], load);
+ Node* constant =
+ graph()->NewNode(common()->HeapConstant(heapConstants[k]));
+
+ Node* lhs = lhsIsDecompression ? changeToTagged : constant;
+ Node* rhs = lhsIsDecompression ? constant : changeToTagged;
+ Node* comparison = graph()->NewNode(machine()->Word64Equal(), lhs, rhs);
+ // Reduce
+ Reduction r = Reduce(comparison);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
+ }
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
index 057d164d63..e658f281d7 100644
--- a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -40,7 +40,6 @@ class EffectControlLinearizerTest : public GraphTest {
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
SourcePositionTable* source_positions() { return source_positions_; }
NodeOriginTable* node_origins() { return node_origins_; }
- std::vector<Handle<Map>>* maps() { return &maps_; }
private:
MachineOperatorBuilder machine_;
@@ -49,7 +48,6 @@ class EffectControlLinearizerTest : public GraphTest {
JSGraph jsgraph_;
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
- std::vector<Handle<Map>> maps_;
};
namespace {
@@ -87,10 +85,9 @@ TEST_F(EffectControlLinearizerTest, SimpleLoad) {
schedule.AddReturn(start, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- EffectControlLinearizer::kDoNotMaskArrayIndex, maps());
- introducer.Run();
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex);
EXPECT_THAT(load,
IsLoadField(AccessBuilder::ForHeapNumberValue(), heap_number,
@@ -150,10 +147,9 @@ TEST_F(EffectControlLinearizerTest, DiamondLoad) {
schedule.AddReturn(mblock, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- EffectControlLinearizer::kDoNotMaskArrayIndex, maps());
- introducer.Run();
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex);
// The effect input to the return should be an effect phi with the
// newly introduced effectful change operators.
@@ -218,10 +214,9 @@ TEST_F(EffectControlLinearizerTest, LoopLoad) {
schedule.AddReturn(rblock, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- EffectControlLinearizer::kDoNotMaskArrayIndex, maps());
- introducer.Run();
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex);
ASSERT_THAT(ret, IsReturn(load, load, if_true));
EXPECT_THAT(load, IsLoadField(AccessBuilder::ForHeapNumberValue(),
@@ -282,10 +277,9 @@ TEST_F(EffectControlLinearizerTest, CloneBranch) {
schedule.AddNode(mblock, merge);
schedule.AddNode(mblock, graph()->end());
- EffectControlLinearizer introducer(
- jsgraph(), &schedule, zone(), source_positions(), node_origins(),
- EffectControlLinearizer::kDoNotMaskArrayIndex, maps());
- introducer.Run();
+ LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
+ node_origins(),
+ MaskArrayIndexEnable::kDoNotMaskArrayIndex);
Capture<Node *> branch1_capture, branch2_capture;
EXPECT_THAT(
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 051aa68e64..f433dda42e 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -7,7 +7,7 @@
#include "src/compiler/js-heap-copy-reducer.h"
#include "src/compiler/node-properties.h"
#include "src/heap/factory.h"
-#include "src/objects-inl.h" // TODO(everyone): Make typer.h IWYU compliant.
+#include "src/objects/objects-inl.h" // TODO(everyone): Make typer.h IWYU compliant.
#include "test/unittests/compiler/node-test-utils.h"
namespace v8 {
@@ -26,7 +26,6 @@ GraphTest::GraphTest(int num_parameters)
broker()->SetNativeContextRef();
}
-
GraphTest::~GraphTest() = default;
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index a4b719fe6b..fa42294a65 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -10,7 +10,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/typer.h"
-#include "src/handles.h"
+#include "src/handles/handles.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index 7b2c2c27f0..8851a6a2df 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -4,7 +4,7 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -195,10 +195,8 @@ static const MemoryAccess kMemoryAccesses[] = {
} // namespace
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
- InstructionSelectorMemoryAccessTest;
-
+using InstructionSelectorMemoryAccessTest =
+ InstructionSelectorTestWithParam<MemoryAccess>;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
@@ -508,9 +506,7 @@ const MultParam kMultParams[] = {{-1, false, kMode_None},
} // namespace
-
-typedef InstructionSelectorTestWithParam<MultParam> InstructionSelectorMultTest;
-
+using InstructionSelectorMultTest = InstructionSelectorTestWithParam<MultParam>;
static unsigned InputCountForLea(AddressingMode mode) {
switch (mode) {
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 917ed347bc..84d42b31d0 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -3,14 +3,14 @@
// found in the LICENSE file.
#include "src/compiler/int64-lowering.h"
+#include "src/codegen/signature.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/objects-inl.h"
-#include "src/signature.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-module.h"
#include "test/unittests/compiler/graph-unittest.h"
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index 495cb6db5c..b9f3ff8056 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -8,9 +8,9 @@
#include "src/compiler/js-call-reducer.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/simplified-operator.h"
-#include "src/feedback-vector.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
+#include "src/objects/feedback-vector.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 1647a9dad7..5a0d54e861 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/js-create-lowering.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
@@ -11,9 +11,9 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/feedback-vector.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/objects/arguments.h"
+#include "src/objects/feedback-vector.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -85,7 +85,7 @@ TEST_F(JSCreateLoweringTest, JSCreate) {
EXPECT_THAT(
r.replacement(),
IsFinishRegion(
- IsAllocate(IsNumberConstant(function->initial_map()->instance_size()),
+ IsAllocate(IsNumberConstant(function->initial_map().instance_size()),
IsBeginRegion(effect), control),
_));
}
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 234fe940eb..d6e9876e64 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -97,37 +97,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsArray) {
// -----------------------------------------------------------------------------
-// %_IsTypedArray
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineIsTypedArray) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineIsTypedArray, 1), input,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
-
- Node* phi = r.replacement();
- Capture<Node*> branch, if_false;
- EXPECT_THAT(
- phi,
- IsPhi(
- MachineRepresentation::kTagged, IsFalseConstant(),
- IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
- IsLoadField(AccessBuilder::ForMap(), input,
- effect, CaptureEq(&if_false)),
- _, _),
- IsNumberConstant(JS_TYPED_ARRAY_TYPE)),
- IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
- IsBranch(IsObjectIsSmi(input), control))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-
-// -----------------------------------------------------------------------------
// %_IsJSReceiver
diff --git a/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc b/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
index bf9a144fab..db11bd28ca 100644
--- a/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-native-context-specialization-unittest.cc
@@ -8,7 +8,7 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/simplified-operator.h"
-#include "src/dtoa.h"
+#include "src/numbers/dtoa.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index b3326b0ad4..765a79db40 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -3,14 +3,14 @@
// found in the LICENSE file.
#include "src/compiler/js-typed-lowering.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index e7ff126702..e85bc09e1e 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -9,7 +9,7 @@
#include "src/base/overflowing-math.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/typer.h"
-#include "src/conversions-inl.h"
+#include "src/numbers/conversions-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
@@ -1414,6 +1414,39 @@ TEST_F(MachineOperatorReducerTest, Int32AddWithInt32SubWithConstantZero) {
EXPECT_THAT(r2.replacement(), IsInt32Sub(p0, p1));
}
+TEST_F(MachineOperatorReducerTest, Int32AddMergeConstants) {
+ Node* const p0 = Parameter(0);
+
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Int32Add(),
+ graph()->NewNode(machine()->Int32Add(), p0, Int32Constant(1)),
+ Int32Constant(2)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsInt32Add(p0, IsInt32Constant(3)));
+
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Int32Add(), Int32Constant(2),
+ graph()->NewNode(machine()->Int32Add(), p0, Int32Constant(1))));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsInt32Add(p0, IsInt32Constant(3)));
+}
+
+TEST_F(MachineOperatorReducerTest, Int64AddMergeConstants) {
+ Node* const p0 = Parameter(0);
+
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Int64Add(),
+ graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(1)),
+ Int64Constant(2)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsInt64Add(p0, IsInt64Constant(3)));
+
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Int64Add(), Int64Constant(2),
+ graph()->NewNode(machine()->Int64Add(), p0, Int64Constant(1))));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsInt64Add(p0, IsInt64Constant(3)));
+}
// -----------------------------------------------------------------------------
// Int32AddWithOverflow
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index c4a86afffd..24fc6a31c7 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -25,8 +25,8 @@ class MachineOperatorTestWithParam
const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
private:
- typedef ::testing::WithParamInterface<
- ::testing::tuple<MachineRepresentation, T> > B;
+ using B = ::testing::WithParamInterface<
+ ::testing::tuple<MachineRepresentation, T> >;
};
@@ -51,10 +51,8 @@ const MachineRepresentation kRepresentationsForStore[] = {
// -----------------------------------------------------------------------------
// Load operator.
-
-typedef MachineOperatorTestWithParam<LoadRepresentation>
- MachineLoadOperatorTest;
-
+using MachineLoadOperatorTest =
+ MachineOperatorTestWithParam<LoadRepresentation>;
TEST_P(MachineLoadOperatorTest, InstancesAreGloballyShared) {
MachineOperatorBuilder machine1(zone(), representation());
@@ -347,9 +345,7 @@ TEST_F(MachineOptionalOperatorTest, OptionalOperators) {
// -----------------------------------------------------------------------------
// Pseudo operators.
-
-typedef TestWithZone MachineOperatorTest;
-
+using MachineOperatorTest = TestWithZone;
TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
MachineOperatorBuilder machine(zone(), MachineRepresentation::kWord32);
diff --git a/deps/v8/test/unittests/compiler/mips/OWNERS b/deps/v8/test/unittests/compiler/mips/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/test/unittests/compiler/mips/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 09a897a54e..0728d32304 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -4,7 +4,7 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -25,8 +25,8 @@ std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
return os << mi.constructor_name;
}
-typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
-typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+using MachInst1 = MachInst<Node* (RawMachineAssembler::*)(Node*)>;
+using MachInst2 = MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)>;
// To avoid duplicated code IntCmp helper structure
// is created. It contains MachInst2 with two nodes and expected_size
@@ -274,9 +274,7 @@ const Conversion kFloat32RoundInstructions[] = {
} // namespace
-
-typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
-
+using InstructionSelectorFPCmpTest = InstructionSelectorTestWithParam<FPCmp>;
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
@@ -299,9 +297,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
// Arithmetic compare instructions integers.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<IntCmp> InstructionSelectorCmpTest;
-
+using InstructionSelectorCmpTest = InstructionSelectorTestWithParam<IntCmp>;
TEST_P(InstructionSelectorCmpTest, Parameter) {
const IntCmp cmp = GetParam();
@@ -322,10 +318,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
// Shift instructions.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorShiftTest;
-
+using InstructionSelectorShiftTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorShiftTest, Immediate) {
const MachInst2 dpi = GetParam();
@@ -442,10 +436,8 @@ TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
// Logical instructions.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorLogicalTest;
-
+using InstructionSelectorLogicalTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorLogicalTest, Parameter) {
const MachInst2 dpi = GetParam();
@@ -578,10 +570,8 @@ TEST_F(InstructionSelectorTest, Word32AndToClearBits) {
// MUL/DIV instructions.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorMulDivTest;
-
+using InstructionSelectorMulDivTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorMulDivTest, Parameter) {
const MachInst2 dpi = GetParam();
@@ -602,9 +592,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
// MOD instructions.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MachInst2> InstructionSelectorModTest;
-
+using InstructionSelectorModTest = InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorModTest, Parameter) {
const MachInst2 dpi = GetParam();
@@ -625,10 +613,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorModTest,
// Floating point instructions.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorFPArithTest;
-
+using InstructionSelectorFPArithTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorFPArithTest, Parameter) {
const MachInst2 fpa = GetParam();
@@ -649,10 +635,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// Integer arithmetic.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorIntArithTwoTest;
-
+using InstructionSelectorIntArithTwoTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
const MachInst2 intpa = GetParam();
@@ -674,10 +658,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// One node.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MachInst1>
- InstructionSelectorIntArithOneTest;
-
+using InstructionSelectorIntArithOneTest =
+ InstructionSelectorTestWithParam<MachInst1>;
TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
const MachInst1 intpa = GetParam();
@@ -699,10 +681,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// Conversions.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<Conversion>
- InstructionSelectorConversionTest;
-
+using InstructionSelectorConversionTest =
+ InstructionSelectorTestWithParam<Conversion>;
TEST_P(InstructionSelectorConversionTest, Parameter) {
const Conversion conv = GetParam();
@@ -719,8 +699,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorConversionTest,
::testing::ValuesIn(kConversionInstructions));
-typedef InstructionSelectorTestWithParam<Conversion>
- CombineChangeFloat64ToInt32WithRoundFloat64;
+using CombineChangeFloat64ToInt32WithRoundFloat64 =
+ InstructionSelectorTestWithParam<Conversion>;
TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) {
{
@@ -740,8 +720,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
CombineChangeFloat64ToInt32WithRoundFloat64,
::testing::ValuesIn(kFloat64RoundInstructions));
-typedef InstructionSelectorTestWithParam<Conversion>
- CombineChangeFloat32ToInt32WithRoundFloat32;
+using CombineChangeFloat32ToInt32WithRoundFloat32 =
+ InstructionSelectorTestWithParam<Conversion>;
TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) {
{
@@ -986,10 +966,8 @@ const MemoryAccessImm2 kMemoryAccessesImmUnaligned[] = {
} // namespace
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
- InstructionSelectorMemoryAccessTest;
-
+using InstructionSelectorMemoryAccessTest =
+ InstructionSelectorTestWithParam<MemoryAccess>;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
@@ -1024,10 +1002,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// Load immediate.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MemoryAccessImm>
- InstructionSelectorMemoryAccessImmTest;
-
+using InstructionSelectorMemoryAccessImmTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm>;
TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
@@ -1095,8 +1071,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessImmTest,
::testing::ValuesIn(kMemoryAccessesImm));
-typedef InstructionSelectorTestWithParam<MemoryAccessImm2>
- InstructionSelectorMemoryAccessUnalignedImmTest;
+using InstructionSelectorMemoryAccessUnalignedImmTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm2>;
TEST_P(InstructionSelectorMemoryAccessUnalignedImmTest, StoreZero) {
const MemoryAccessImm2 memacc = GetParam();
@@ -1130,10 +1106,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// Load/store offsets more than 16 bits.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MemoryAccessImm1>
- InstructionSelectorMemoryAccessImmMoreThan16bitTest;
-
+using InstructionSelectorMemoryAccessImmMoreThan16bitTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm1>;
TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
LoadWithImmediateIndex) {
diff --git a/deps/v8/test/unittests/compiler/mips64/OWNERS b/deps/v8/test/unittests/compiler/mips64/OWNERS
deleted file mode 100644
index cab3679d65..0000000000
--- a/deps/v8/test/unittests/compiler/mips64/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-xwafish@gmail.com
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index cd73fe3c9b..0b8c75cc44 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -4,7 +4,7 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -24,9 +24,8 @@ std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
return os << mi.constructor_name;
}
-typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
-typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
-
+using MachInst1 = MachInst<Node* (RawMachineAssembler::*)(Node*)>;
+using MachInst2 = MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)>;
// To avoid duplicated code IntCmp helper structure
// is created. It contains MachInst2 with two nodes and expected_size
@@ -299,8 +298,7 @@ const MachInst2 kCanElideChangeUint32ToUint64[] = {
} // namespace
-
-typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
+using InstructionSelectorFPCmpTest = InstructionSelectorTestWithParam<FPCmp>;
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
@@ -322,8 +320,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
// ----------------------------------------------------------------------------
// Arithmetic compare instructions integers
// ----------------------------------------------------------------------------
-typedef InstructionSelectorTestWithParam<IntCmp> InstructionSelectorCmpTest;
-
+using InstructionSelectorCmpTest = InstructionSelectorTestWithParam<IntCmp>;
TEST_P(InstructionSelectorCmpTest, Parameter) {
const IntCmp cmp = GetParam();
@@ -373,8 +370,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
// ----------------------------------------------------------------------------
// Shift instructions.
// ----------------------------------------------------------------------------
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorShiftTest;
+using InstructionSelectorShiftTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorShiftTest, Immediate) {
const MachInst2 dpi = GetParam();
@@ -533,9 +530,8 @@ TEST_F(InstructionSelectorTest, Word64AndToClearBits) {
// ----------------------------------------------------------------------------
// Logical instructions.
// ----------------------------------------------------------------------------
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorLogicalTest;
-
+using InstructionSelectorLogicalTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorLogicalTest, Parameter) {
const MachInst2 dpi = GetParam();
@@ -809,8 +805,8 @@ TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
// ----------------------------------------------------------------------------
// MUL/DIV instructions.
// ----------------------------------------------------------------------------
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorMulDivTest;
+using InstructionSelectorMulDivTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorMulDivTest, Parameter) {
const MachInst2 dpi = GetParam();
@@ -830,7 +826,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
// ----------------------------------------------------------------------------
// MOD instructions.
// ----------------------------------------------------------------------------
-typedef InstructionSelectorTestWithParam<MachInst2> InstructionSelectorModTest;
+using InstructionSelectorModTest = InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorModTest, Parameter) {
const MachInst2 dpi = GetParam();
@@ -850,8 +846,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorModTest,
// ----------------------------------------------------------------------------
// Floating point instructions.
// ----------------------------------------------------------------------------
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorFPArithTest;
+using InstructionSelectorFPArithTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorFPArithTest, Parameter) {
const MachInst2 fpa = GetParam();
@@ -870,8 +866,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// ----------------------------------------------------------------------------
// Integer arithmetic
// ----------------------------------------------------------------------------
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorIntArithTwoTest;
+using InstructionSelectorIntArithTwoTest =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
const MachInst2 intpa = GetParam();
@@ -893,9 +889,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// One node.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MachInst1>
- InstructionSelectorIntArithOneTest;
+using InstructionSelectorIntArithOneTest =
+ InstructionSelectorTestWithParam<MachInst1>;
TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
const MachInst1 intpa = GetParam();
@@ -915,8 +910,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// ----------------------------------------------------------------------------
// Conversions.
// ----------------------------------------------------------------------------
-typedef InstructionSelectorTestWithParam<Conversion>
- InstructionSelectorConversionTest;
+using InstructionSelectorConversionTest =
+ InstructionSelectorTestWithParam<Conversion>;
TEST_P(InstructionSelectorConversionTest, Parameter) {
const Conversion conv = GetParam();
@@ -957,9 +952,8 @@ TEST_F(InstructionSelectorTest, ChangesFromToSmi) {
}
}
-
-typedef InstructionSelectorTestWithParam<Conversion>
- CombineChangeFloat64ToInt32WithRoundFloat64;
+using CombineChangeFloat64ToInt32WithRoundFloat64 =
+ InstructionSelectorTestWithParam<Conversion>;
TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) {
{
@@ -979,8 +973,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
CombineChangeFloat64ToInt32WithRoundFloat64,
::testing::ValuesIn(kFloat64RoundInstructions));
-typedef InstructionSelectorTestWithParam<Conversion>
- CombineChangeFloat32ToInt32WithRoundFloat32;
+using CombineChangeFloat32ToInt32WithRoundFloat32 =
+ InstructionSelectorTestWithParam<Conversion>;
TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) {
{
@@ -1154,8 +1148,8 @@ TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
}
}
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorElidedChangeUint32ToUint64Test;
+using InstructionSelectorElidedChangeUint32ToUint64Test =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
const MachInst2 binop = GetParam();
@@ -1447,9 +1441,8 @@ const MemoryAccessImm2 kMemoryAccessesImmUnaligned[] = {
} // namespace
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
- InstructionSelectorMemoryAccessTest;
+using InstructionSelectorMemoryAccessTest =
+ InstructionSelectorTestWithParam<MemoryAccess>;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
@@ -1484,9 +1477,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// Load immediate.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MemoryAccessImm>
- InstructionSelectorMemoryAccessImmTest;
+using InstructionSelectorMemoryAccessImmTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm>;
TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
@@ -1554,8 +1546,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessImmTest,
::testing::ValuesIn(kMemoryAccessesImm));
-typedef InstructionSelectorTestWithParam<MemoryAccessImm2>
- InstructionSelectorMemoryAccessUnalignedImmTest;
+using InstructionSelectorMemoryAccessUnalignedImmTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm2>;
TEST_P(InstructionSelectorMemoryAccessUnalignedImmTest, StoreZero) {
const MemoryAccessImm2 memacc = GetParam();
@@ -1589,9 +1581,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
// Load/store offsets more than 16 bits.
// ----------------------------------------------------------------------------
-
-typedef InstructionSelectorTestWithParam<MemoryAccessImm1>
- InstructionSelectorMemoryAccessImmMoreThan16bitTest;
+using InstructionSelectorMemoryAccessImmMoreThan16bitTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm1>;
TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
LoadWithImmediateIndex) {
diff --git a/deps/v8/test/unittests/compiler/node-cache-unittest.cc b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
index b699fb38ca..10118c3a41 100644
--- a/deps/v8/test/unittests/compiler/node-cache-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-cache-unittest.cc
@@ -14,7 +14,7 @@ namespace internal {
namespace compiler {
namespace node_cache_unittest {
-typedef GraphTest NodeCacheTest;
+using NodeCacheTest = GraphTest;
TEST_F(NodeCacheTest, Int32Constant_back_to_back) {
Int32NodeCache cache;
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index ac93f46274..8ffdaf27d0 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -10,9 +10,9 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/handles/handles-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
using testing::_;
using testing::MakeMatcher;
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 72deb73057..a71f05964f 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -5,11 +5,11 @@
#ifndef V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
#define V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
+#include "src/codegen/machine-type.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/simplified-operator.h"
-#include "src/machine-type.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/compiler/node-unittest.cc b/deps/v8/test/unittests/compiler/node-unittest.cc
index b333c20cd1..1e23b1faa6 100644
--- a/deps/v8/test/unittests/compiler/node-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-unittest.cc
@@ -17,8 +17,7 @@ namespace internal {
namespace compiler {
namespace node_unittest {
-typedef TestWithZone NodeTest;
-
+using NodeTest = TestWithZone;
const IrOpcode::Value kOpcode0 = static_cast<IrOpcode::Value>(0);
const IrOpcode::Value kOpcode1 = static_cast<IrOpcode::Value>(1);
diff --git a/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc b/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
index 611e766edb..b602e62c35 100644
--- a/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
@@ -4,7 +4,7 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
index 9ac6ca8810..a71a5315f1 100644
--- a/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
@@ -70,8 +70,8 @@ class TestRangeBuilder {
}
private:
- typedef std::pair<int, int> Interval;
- typedef std::vector<Interval> IntervalList;
+ using Interval = std::pair<int, int>;
+ using IntervalList = std::vector<Interval>;
int id_;
IntervalList pairs_;
std::set<int> uses_;
diff --git a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
index 23f17b2b6c..e72afd5601 100644
--- a/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/backend/move-optimizer.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
#include "test/unittests/compiler/backend/instruction-sequence-unittest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
index 76fc39e2f6..262c51d31e 100644
--- a/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
#include "src/compiler/pipeline.h"
#include "test/unittests/compiler/backend/instruction-sequence-unittest.h"
@@ -625,22 +625,18 @@ TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
const int var_def_index = 1;
const int call_index = 3;
- const bool spill_in_deferred =
- FLAG_turbo_preprocess_ranges || FLAG_turbo_control_flow_aware_allocation;
- int expect_no_moves = spill_in_deferred ? var_def_index : call_index;
- int expect_spill_move = spill_in_deferred ? call_index : var_def_index;
- // We should have no parallel moves at the "expect_no_moves" position.
+ // We should have no parallel moves at the "var_def_index" position.
EXPECT_EQ(
- 0, GetParallelMoveCount(expect_no_moves, Instruction::START, sequence()));
+ 0, GetParallelMoveCount(var_def_index, Instruction::START, sequence()));
- // The spill should be performed at the position expect_spill_move.
- EXPECT_TRUE(IsParallelMovePresent(expect_spill_move, Instruction::START,
- sequence(), Reg(0), Slot(0)));
+ // The spill should be performed at the position "call_index".
+ EXPECT_TRUE(IsParallelMovePresent(call_index, Instruction::START, sequence(),
+ Reg(0), Slot(0)));
}
TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
- if (!FLAG_turbo_preprocess_ranges) return;
+ if (FLAG_turbo_control_flow_aware_allocation) return;
StartBlock(); // B0
auto var1 = EmitOI(Reg(0));
@@ -775,7 +771,7 @@ class SlotConstraintTest : public RegisterAllocatorTest,
int variant() const { return ::testing::get<1>(B::GetParam()); }
private:
- typedef ::testing::WithParamInterface<::testing::tuple<ParameterType, int>> B;
+ using B = ::testing::WithParamInterface<::testing::tuple<ParameterType, int>>;
};
} // namespace
@@ -817,7 +813,6 @@ TEST_P(SlotConstraintTest, SlotConstraint) {
break;
default:
UNREACHABLE();
- break;
}
EndBlock(Last());
diff --git a/deps/v8/test/unittests/compiler/s390/OWNERS b/deps/v8/test/unittests/compiler/s390/OWNERS
deleted file mode 100644
index 6d1a8fc472..0000000000
--- a/deps/v8/test/unittests/compiler/s390/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-jyan@ca.ibm.com
-joransiu@ca.ibm.com
-michael_dawson@ca.ibm.com
-miladfar@ca.ibm.com \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc b/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
index 611e766edb..b602e62c35 100644
--- a/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
+++ b/deps/v8/test/unittests/compiler/s390/instruction-selector-s390-unittest.cc
@@ -4,7 +4,7 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
-#include "src/assembler-inl.h"
+#include "src/codegen/assembler-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/unittests/compiler/schedule-unittest.cc b/deps/v8/test/unittests/compiler/schedule-unittest.cc
index ab2f384db4..80a7cf003f 100644
--- a/deps/v8/test/unittests/compiler/schedule-unittest.cc
+++ b/deps/v8/test/unittests/compiler/schedule-unittest.cc
@@ -14,8 +14,7 @@ namespace internal {
namespace compiler {
namespace schedule_unittest {
-typedef TestWithIsolateAndZone BasicBlockTest;
-
+using BasicBlockTest = TestWithIsolateAndZone;
TEST_F(BasicBlockTest, Constructor) {
int const id = random_number_generator()->NextInt();
@@ -68,9 +67,7 @@ TEST_F(BasicBlockTest, GetCommonDominator3) {
EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b3, &b1));
}
-
-typedef TestWithZone ScheduleTest;
-
+using ScheduleTest = TestWithZone;
const Operator kCallOperator(IrOpcode::kCall, Operator::kNoProperties,
"MockCall", 0, 0, 0, 0, 0, 0);
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index f3573d6379..1f44eb088b 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/simplified-operator.h"
+#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/types.h"
-#include "src/conversions-inl.h"
+#include "src/numbers/conversions-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index 280afef4c9..1b416628fc 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -146,26 +146,25 @@ const ElementAccess kElementAccesses[] = {
{kUntaggedBase, 0, Type::Number(),
MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone),
kNoWriteBarrier},
- {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- MachineType::Int8(), kNoWriteBarrier},
- {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+ {kTaggedBase, ByteArray::kHeaderSize, Type::Signed32(), MachineType::Int8(),
+ kNoWriteBarrier},
+ {kTaggedBase, ByteArray::kHeaderSize, Type::Unsigned32(),
MachineType::Uint8(), kNoWriteBarrier},
- {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+ {kTaggedBase, ByteArray::kHeaderSize, Type::Signed32(),
MachineType::Int16(), kNoWriteBarrier},
- {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+ {kTaggedBase, ByteArray::kHeaderSize, Type::Unsigned32(),
MachineType::Uint16(), kNoWriteBarrier},
- {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+ {kTaggedBase, ByteArray::kHeaderSize, Type::Signed32(),
MachineType::Int32(), kNoWriteBarrier},
- {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+ {kTaggedBase, ByteArray::kHeaderSize, Type::Unsigned32(),
MachineType::Uint32(), kNoWriteBarrier},
- {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+ {kTaggedBase, ByteArray::kHeaderSize, Type::Number(),
MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone),
kNoWriteBarrier},
- {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+ {kTaggedBase, ByteArray::kHeaderSize, Type::Number(),
MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone),
kNoWriteBarrier}};
-
class SimplifiedElementAccessOperatorTest
: public TestWithZone,
public ::testing::WithParamInterface<ElementAccess> {};
diff --git a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
index d53e7d9462..e6ba7696c5 100644
--- a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
+++ b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/state-values-utils.h"
-#include "src/bit-vector.h"
+#include "src/utils/bit-vector.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index c8aaafb6dc..70c0b69047 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/typed-optimization.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
@@ -11,7 +11,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/isolate-inl.h"
+#include "src/execution/isolate-inl.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 5d712bd220..2eaa379f30 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -9,7 +9,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/common/types-fuzz.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -253,8 +253,8 @@ class TyperTest : public TypedGraphTest {
}
}
- typedef std::function<Type(Type)> UnaryTyper;
- typedef std::function<Type(Type, Type)> BinaryTyper;
+ using UnaryTyper = std::function<Type(Type)>;
+ using BinaryTyper = std::function<Type(Type, Type)>;
void TestUnaryMonotonicity(UnaryTyper typer, Type upper1 = Type::Any()) {
Type type1 = Type::Intersect(types_.Fuzz(), upper1, zone());
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index 0aa5c389bf..f8e3e26aa9 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -5,7 +5,7 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/compiler/node-matchers.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
@@ -91,8 +91,8 @@ static const LoadWithToInt64Extension kLoadWithToInt64Extensions[] = {
} // namespace
-typedef InstructionSelectorTestWithParam<LoadWithToInt64Extension>
- InstructionSelectorChangeInt32ToInt64Test;
+using InstructionSelectorChangeInt32ToInt64Test =
+ InstructionSelectorTestWithParam<LoadWithToInt64Extension>;
TEST_P(InstructionSelectorChangeInt32ToInt64Test, ChangeInt32ToInt64WithLoad) {
const LoadWithToInt64Extension extension = GetParam();
@@ -139,10 +139,8 @@ static const MemoryAccess kMemoryAccesses[] = {
} // namespace
-
-typedef InstructionSelectorTestWithParam<MemoryAccess>
- InstructionSelectorMemoryAccessTest;
-
+using InstructionSelectorMemoryAccessTest =
+ InstructionSelectorTestWithParam<MemoryAccess>;
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
@@ -181,8 +179,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
namespace {
-typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
-
+using Constructor = Node* (RawMachineAssembler::*)(Node*, Node*);
struct BinaryOperation {
Constructor constructor;
@@ -219,10 +216,8 @@ const BinaryOperation kWord32BinaryOperations[] = {
} // namespace
-
-typedef InstructionSelectorTestWithParam<BinaryOperation>
- InstructionSelectorChangeUint32ToUint64Test;
-
+using InstructionSelectorChangeUint32ToUint64Test =
+ InstructionSelectorTestWithParam<BinaryOperation>;
TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) {
const BinaryOperation& bop = GetParam();
@@ -252,7 +247,7 @@ struct MachInst {
MachineType machine_type;
};
-typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+using MachInst2 = MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)>;
// X64 instructions that clear the top 32 bits of the destination.
const MachInst2 kCanElideChangeUint32ToUint64[] = {
@@ -300,8 +295,8 @@ const MachInst2 kCanElideChangeUint32ToUint64[] = {
} // namespace
-typedef InstructionSelectorTestWithParam<MachInst2>
- InstructionSelectorElidedChangeUint32ToUint64Test;
+using InstructionSelectorElidedChangeUint32ToUint64Test =
+ InstructionSelectorTestWithParam<MachInst2>;
TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
const MachInst2 binop = GetParam();
diff --git a/deps/v8/test/unittests/date/date-cache-unittest.cc b/deps/v8/test/unittests/date/date-cache-unittest.cc
new file mode 100644
index 0000000000..2ceaaebabb
--- /dev/null
+++ b/deps/v8/test/unittests/date/date-cache-unittest.cc
@@ -0,0 +1,109 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifdef V8_INTL_SUPPORT
+#include "src/base/platform/platform.h"
+#include "src/date/date.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "unicode/strenum.h"
+#include "unicode/timezone.h"
+
+namespace v8 {
+namespace internal {
+
+// A recent time for the test.
+// 2019-05-08T04:16:04.845Z
+static const int64_t kStartTime = 1557288964845;
+
+class AdoptDefaultThread final : public base::Thread {
+ public:
+ AdoptDefaultThread() : base::Thread(Options("AdoptDefault")) {}
+
+ void Run() override {
+ printf("AdoptDefaultThread Start\n");
+ std::unique_ptr<icu::StringEnumeration> timezones(
+ icu::TimeZone::createEnumeration());
+ UErrorCode status = U_ZERO_ERROR;
+ const icu::UnicodeString* timezone = timezones->snext(status);
+ icu::TimeZone::adoptDefault(icu::TimeZone::createTimeZone(*timezone));
+ printf("AdoptDefaultThread End\n");
+ }
+};
+
+class GetLocalOffsetFromOSThread final : public base::Thread {
+ public:
+ explicit GetLocalOffsetFromOSThread(bool utc)
+ : base::Thread(Options("GetLocalOffsetFromOS")), utc_(utc) {}
+
+ void Run() override {
+ printf("GetLocalOffsetFromOSThread Start\n");
+ DateCache date_cache;
+ date_cache.GetLocalOffsetFromOS(kStartTime, utc_);
+ printf("GetLocalOffsetFromOSThread End\n");
+ }
+
+ private:
+ bool utc_;
+};
+
+class LocalTimezoneThread final : public base::Thread {
+ public:
+ LocalTimezoneThread() : base::Thread(Options("LocalTimezone")) {}
+
+ void Run() override {
+ printf("LocalTimezoneThread Start\n");
+ DateCache date_cache;
+ date_cache.LocalTimezone(kStartTime);
+ printf("LocalTimezoneThread End\n");
+ }
+};
+
+TEST(DateCache, AdoptDefaultFirst) {
+ AdoptDefaultThread t1;
+ GetLocalOffsetFromOSThread t2(true);
+ GetLocalOffsetFromOSThread t3(false);
+ LocalTimezoneThread t4;
+
+ // The AdoptDefaultFirst will always pass. Just a test to ensure
+ // our testing code itself is correct.
+ // We finish all the operation AdoptDefaultThread before
+ // running all other thread so it won't show the problem of
+ // AdoptDefault trashing newly create default.
+ t1.Start();
+ t1.Join();
+
+ t2.Start();
+ t3.Start();
+ t4.Start();
+
+ t2.Join();
+ t3.Join();
+ t4.Join();
+}
+
+TEST(DateCache, AdoptDefaultMixed) {
+ AdoptDefaultThread t1;
+ GetLocalOffsetFromOSThread t2(true);
+ GetLocalOffsetFromOSThread t3(false);
+ LocalTimezoneThread t4;
+
+ // The AdoptDefaultMixed run AdoptDefaultThread concurrently
+ // with other thread so if the AdoptDefault is not thread safe
+ // it will cause crash in other thread because the TimeZone
+ // newly created by createDefault could be trashed by AdoptDefault
+ // while a deleted DEFAULT_ZONE got cloned.
+ t1.Start();
+ t2.Start();
+ t3.Start();
+ t4.Start();
+
+ t1.Join();
+ t2.Join();
+ t3.Join();
+ t4.Join();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTL_SUPPORT
diff --git a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc b/deps/v8/test/unittests/diagnostics/eh-frame-iterator-unittest.cc
index fff38209c5..3a97c63553 100644
--- a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
+++ b/deps/v8/test/unittests/diagnostics/eh-frame-iterator-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/eh-frame.h"
+#include "src/diagnostics/eh-frame.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/eh-frame-writer-unittest.cc b/deps/v8/test/unittests/diagnostics/eh-frame-writer-unittest.cc
index 52501b462e..25a54b3849 100644
--- a/deps/v8/test/unittests/eh-frame-writer-unittest.cc
+++ b/deps/v8/test/unittests/diagnostics/eh-frame-writer-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/eh-frame.h"
+#include "src/diagnostics/eh-frame.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index 36c38ecb32..37b037147b 100644
--- a/deps/v8/test/unittests/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/microtask-queue.h"
+#include "src/execution/microtask-queue.h"
#include <algorithm>
#include <functional>
@@ -10,12 +10,12 @@
#include <vector>
#include "src/heap/factory.h"
-#include "src/objects-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/js-objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/promise-inl.h"
-#include "src/visitors.h"
+#include "src/objects/visitors.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -240,9 +240,9 @@ TEST_F(MicrotaskQueueTest, PromiseHandlerContext) {
Handle<Context> context2 = Utils::OpenHandle(*v8_context2, isolate());
Handle<Context> context3 = Utils::OpenHandle(*v8_context3, isolate());
Handle<Context> context4 = Utils::OpenHandle(*v8_context3, isolate());
- context2->native_context()->set_microtask_queue(microtask_queue());
- context3->native_context()->set_microtask_queue(microtask_queue());
- context4->native_context()->set_microtask_queue(microtask_queue());
+ context2->native_context().set_microtask_queue(microtask_queue());
+ context3->native_context().set_microtask_queue(microtask_queue());
+ context4->native_context().set_microtask_queue(microtask_queue());
Handle<JSFunction> handler;
Handle<JSProxy> proxy;
@@ -546,7 +546,7 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_InactiveHandler) {
Local<v8::Context> sub_context = v8::Context::New(v8_isolate());
Utils::OpenHandle(*sub_context)
->native_context()
- ->set_microtask_queue(microtask_queue());
+ .set_microtask_queue(microtask_queue());
Handle<JSArray> result;
Handle<JSFunction> stale_handler;
diff --git a/deps/v8/test/unittests/heap/code-object-registry-unittest.cc b/deps/v8/test/unittests/heap/code-object-registry-unittest.cc
new file mode 100644
index 0000000000..9b7ac1853e
--- /dev/null
+++ b/deps/v8/test/unittests/heap/code-object-registry-unittest.cc
@@ -0,0 +1,92 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/spaces.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(CodeObjectRegistry, RegisterAlreadyExistingObjectsAndContains) {
+ CodeObjectRegistry registry;
+ const int elements = 10;
+ const int offset = 100;
+ for (int i = 0; i < elements; i++) {
+ registry.RegisterAlreadyExistingCodeObject(i * offset);
+ }
+
+ for (int i = 0; i < elements; i++) {
+ CHECK(registry.Contains(i * offset));
+ }
+}
+
+TEST(CodeObjectRegistry, RegisterNewlyAllocatedObjectsAndContains) {
+ CodeObjectRegistry registry;
+ const int elements = 10;
+ const int offset = 100;
+ for (int i = 0; i < elements; i++) {
+ registry.RegisterNewlyAllocatedCodeObject(i * offset);
+ }
+
+ for (int i = 0; i < elements; i++) {
+ CHECK(registry.Contains(i * offset));
+ }
+}
+
+TEST(CodeObjectRegistry, FindAlreadyExistingObjects) {
+ CodeObjectRegistry registry;
+ const int elements = 10;
+ const int offset = 100;
+ const int inner = 2;
+ for (int i = 1; i <= elements; i++) {
+ registry.RegisterAlreadyExistingCodeObject(i * offset);
+ }
+
+ for (int i = 1; i <= elements; i++) {
+ for (int j = 0; j < inner; j++) {
+ CHECK_EQ(registry.GetCodeObjectStartFromInnerAddress(i * offset + j),
+ i * offset);
+ }
+ }
+}
+
+TEST(CodeObjectRegistry, FindNewlyAllocatedObjects) {
+ CodeObjectRegistry registry;
+ const int elements = 10;
+ const int offset = 100;
+ const int inner = 2;
+ for (int i = 1; i <= elements; i++) {
+ registry.RegisterNewlyAllocatedCodeObject(i * offset);
+ }
+
+ for (int i = 1; i <= elements; i++) {
+ for (int j = 0; j < inner; j++) {
+ CHECK_EQ(registry.GetCodeObjectStartFromInnerAddress(i * offset + j),
+ i * offset);
+ }
+ }
+}
+
+TEST(CodeObjectRegistry, FindAlternatingObjects) {
+ CodeObjectRegistry registry;
+ const int elements = 10;
+ const int offset = 100;
+ const int inner = 2;
+ for (int i = 1; i <= elements; i++) {
+ if (i % 2 == 0) {
+ registry.RegisterAlreadyExistingCodeObject(i * offset);
+ } else {
+ registry.RegisterNewlyAllocatedCodeObject(i * offset);
+ }
+ }
+
+ for (int i = 1; i <= elements; i++) {
+ for (int j = 0; j < inner; j++) {
+ CHECK_EQ(registry.GetCodeObjectStartFromInnerAddress(i * offset + j),
+ i * offset);
+ }
+ }
+}
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
index 5bbbaceb3c..ba08701b59 100644
--- a/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
+++ b/deps/v8/test/unittests/heap/embedder-tracing-unittest.cc
@@ -31,8 +31,8 @@ LocalEmbedderHeapTracer::WrapperInfo CreateWrapperInfo() {
class MockEmbedderHeapTracer : public EmbedderHeapTracer {
public:
- MOCK_METHOD0(TracePrologue, void());
- MOCK_METHOD0(TraceEpilogue, void());
+ MOCK_METHOD1(TracePrologue, void(EmbedderHeapTracer::TraceFlags));
+ MOCK_METHOD1(TraceEpilogue, void(EmbedderHeapTracer::TraceSummary*));
MOCK_METHOD1(EnterFinalPause, void(EmbedderHeapTracer::EmbedderStackState));
MOCK_METHOD0(IsTracingDone, bool());
MOCK_METHOD1(RegisterV8References,
@@ -52,7 +52,7 @@ TEST(LocalEmbedderHeapTracer, NoRemoteTracer) {
// We should be able to call all functions without a remote tracer being
// attached.
EXPECT_FALSE(local_tracer.InUse());
- local_tracer.TracePrologue();
+ local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kNoFlags);
local_tracer.EnterFinalPause();
bool done = local_tracer.Trace(std::numeric_limits<double>::infinity());
EXPECT_TRUE(done);
@@ -63,15 +63,24 @@ TEST(LocalEmbedderHeapTracer, TracePrologueForwards) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(nullptr);
local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, TracePrologue());
- local_tracer.TracePrologue();
+ EXPECT_CALL(remote_tracer, TracePrologue(_));
+ local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kNoFlags);
+}
+
+TEST(LocalEmbedderHeapTracer, TracePrologueForwardsMemoryReducingFlag) {
+ StrictMock<MockEmbedderHeapTracer> remote_tracer;
+ LocalEmbedderHeapTracer local_tracer(nullptr);
+ local_tracer.SetRemoteTracer(&remote_tracer);
+ EXPECT_CALL(remote_tracer,
+ TracePrologue(EmbedderHeapTracer::TraceFlags::kReduceMemory));
+ local_tracer.TracePrologue(EmbedderHeapTracer::TraceFlags::kReduceMemory);
}
TEST(LocalEmbedderHeapTracer, TraceEpilogueForwards) {
StrictMock<MockEmbedderHeapTracer> remote_tracer;
LocalEmbedderHeapTracer local_tracer(nullptr);
local_tracer.SetRemoteTracer(&remote_tracer);
- EXPECT_CALL(remote_tracer, TraceEpilogue());
+ EXPECT_CALL(remote_tracer, TraceEpilogue(_));
local_tracer.TraceEpilogue();
}
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index eeec78759e..53b919a860 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -6,9 +6,9 @@
#include <limits>
#include "src/base/platform/platform.h"
-#include "src/globals.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate.h"
#include "src/heap/gc-tracer.h"
-#include "src/isolate.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -54,104 +54,122 @@ TEST(GCTracer, AverageSpeed) {
namespace {
void SampleAndAddAllocaton(v8::internal::GCTracer* tracer, double time_ms,
- size_t new_space_counter_bytes,
- size_t old_generation_counter_bytes) {
- tracer->SampleAllocation(time_ms, new_space_counter_bytes,
- old_generation_counter_bytes);
+ size_t per_space_counter_bytes) {
+ // Increment counters of all spaces.
+ tracer->SampleAllocation(time_ms, per_space_counter_bytes,
+ per_space_counter_bytes, per_space_counter_bytes);
tracer->AddAllocation(time_ms);
}
} // namespace
TEST_F(GCTracerTest, AllocationThroughput) {
+ // GCTracer::AllocationThroughputInBytesPerMillisecond ignores global memory.
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
- int time1 = 100;
- size_t counter1 = 1000;
- // First sample creates baseline but is not part of the recorded samples.
- tracer->SampleAllocation(time1, counter1, counter1);
- SampleAndAddAllocaton(tracer, time1, counter1, counter1);
- int time2 = 200;
- size_t counter2 = 2000;
- SampleAndAddAllocaton(tracer, time2, counter2, counter2);
+ const int time1 = 100;
+ const size_t counter1 = 1000;
+ SampleAndAddAllocaton(tracer, time1, counter1);
+ const int time2 = 200;
+ const size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, counter2);
// Will only consider the current sample.
- size_t throughput = static_cast<size_t>(
- tracer->AllocationThroughputInBytesPerMillisecond(100));
- EXPECT_EQ(2 * (counter2 - counter1) / (time2 - time1), throughput);
- int time3 = 1000;
- size_t counter3 = 30000;
- SampleAndAddAllocaton(tracer, time3, counter3, counter3);
+ EXPECT_EQ(2 * (counter2 - counter1) / (time2 - time1),
+ static_cast<size_t>(
+ tracer->AllocationThroughputInBytesPerMillisecond(100)));
+ const int time3 = 1000;
+ const size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, counter3);
+ // Only consider last sample.
+ EXPECT_EQ(2 * (counter3 - counter2) / (time3 - time2),
+ static_cast<size_t>(
+ tracer->AllocationThroughputInBytesPerMillisecond(800)));
// Considers last 2 samples.
- throughput = tracer->AllocationThroughputInBytesPerMillisecond(801);
- EXPECT_EQ(2 * (counter3 - counter1) / (time3 - time1), throughput);
+ EXPECT_EQ(2 * (counter3 - counter1) / (time3 - time1),
+ static_cast<size_t>(
+ tracer->AllocationThroughputInBytesPerMillisecond(801)));
}
-TEST_F(GCTracerTest, NewSpaceAllocationThroughput) {
+TEST_F(GCTracerTest, PerGenerationAllocationThroughput) {
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
- int time1 = 100;
- size_t counter1 = 1000;
- SampleAndAddAllocaton(tracer, time1, counter1, 0);
- int time2 = 200;
- size_t counter2 = 2000;
- SampleAndAddAllocaton(tracer, time2, counter2, 0);
- size_t throughput =
- tracer->NewSpaceAllocationThroughputInBytesPerMillisecond();
- EXPECT_EQ((counter2 - counter1) / (time2 - time1), throughput);
- int time3 = 1000;
- size_t counter3 = 30000;
- SampleAndAddAllocaton(tracer, time3, counter3, 0);
- throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond();
- EXPECT_EQ((counter3 - counter1) / (time3 - time1), throughput);
-}
-
-TEST_F(GCTracerTest, NewSpaceAllocationThroughputWithProvidedTime) {
- GCTracer* tracer = i_isolate()->heap()->tracer();
- tracer->ResetForTesting();
-
- int time1 = 100;
- size_t counter1 = 1000;
- // First sample creates baseline but is not part of the recorded samples.
- SampleAndAddAllocaton(tracer, time1, counter1, 0);
- int time2 = 200;
- size_t counter2 = 2000;
- SampleAndAddAllocaton(tracer, time2, counter2, 0);
- // Will only consider the current sample.
- size_t throughput =
- tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
- EXPECT_EQ((counter2 - counter1) / (time2 - time1), throughput);
- int time3 = 1000;
- size_t counter3 = 30000;
- SampleAndAddAllocaton(tracer, time3, counter3, 0);
- // Considers last 2 samples.
- throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(801);
- EXPECT_EQ((counter3 - counter1) / (time3 - time1), throughput);
+ const int time1 = 100;
+ const size_t counter1 = 1000;
+ SampleAndAddAllocaton(tracer, time1, counter1);
+ const int time2 = 200;
+ const size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, counter2);
+ const size_t expected_throughput1 = (counter2 - counter1) / (time2 - time1);
+ EXPECT_EQ(expected_throughput1,
+ static_cast<size_t>(
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond()));
+ EXPECT_EQ(
+ expected_throughput1,
+ static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond()));
+ EXPECT_EQ(expected_throughput1,
+ static_cast<size_t>(
+ tracer->EmbedderAllocationThroughputInBytesPerMillisecond()));
+ const int time3 = 1000;
+ const size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, counter3);
+ const size_t expected_throughput2 = (counter3 - counter1) / (time3 - time1);
+ EXPECT_EQ(expected_throughput2,
+ static_cast<size_t>(
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond()));
+ EXPECT_EQ(
+ expected_throughput2,
+ static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond()));
+ EXPECT_EQ(expected_throughput2,
+ static_cast<size_t>(
+ tracer->EmbedderAllocationThroughputInBytesPerMillisecond()));
}
-TEST_F(GCTracerTest, OldGenerationAllocationThroughputWithProvidedTime) {
+TEST_F(GCTracerTest, PerGenerationAllocationThroughputWithProvidedTime) {
GCTracer* tracer = i_isolate()->heap()->tracer();
tracer->ResetForTesting();
- int time1 = 100;
- size_t counter1 = 1000;
- // First sample creates baseline but is not part of the recorded samples.
- SampleAndAddAllocaton(tracer, time1, 0, counter1);
- int time2 = 200;
- size_t counter2 = 2000;
- SampleAndAddAllocaton(tracer, time2, 0, counter2);
- // Will only consider the current sample.
- size_t throughput = static_cast<size_t>(
- tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100));
- EXPECT_EQ((counter2 - counter1) / (time2 - time1), throughput);
- int time3 = 1000;
- size_t counter3 = 30000;
- SampleAndAddAllocaton(tracer, time3, 0, counter3);
- // Considers last 2 samples.
- throughput = static_cast<size_t>(
- tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(801));
- EXPECT_EQ((counter3 - counter1) / (time3 - time1), throughput);
+ const int time1 = 100;
+ const size_t counter1 = 1000;
+ SampleAndAddAllocaton(tracer, time1, counter1);
+ const int time2 = 200;
+ const size_t counter2 = 2000;
+ SampleAndAddAllocaton(tracer, time2, counter2);
+ const size_t expected_throughput1 = (counter2 - counter1) / (time2 - time1);
+ EXPECT_EQ(
+ expected_throughput1,
+ static_cast<size_t>(
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100)));
+ EXPECT_EQ(
+ expected_throughput1,
+ static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100)));
+ const int time3 = 1000;
+ const size_t counter3 = 30000;
+ SampleAndAddAllocaton(tracer, time3, counter3);
+ const size_t expected_throughput2 = (counter3 - counter2) / (time3 - time2);
+ // Only consider last sample.
+ EXPECT_EQ(
+ expected_throughput2,
+ static_cast<size_t>(
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(800)));
+ EXPECT_EQ(
+ expected_throughput2,
+ static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(800)));
+ const size_t expected_throughput3 = (counter3 - counter1) / (time3 - time1);
+ // Consider last two samples.
+ EXPECT_EQ(
+ expected_throughput3,
+ static_cast<size_t>(
+ tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(801)));
+ EXPECT_EQ(
+ expected_throughput3,
+ static_cast<size_t>(
+ tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(801)));
}
TEST_F(GCTracerTest, RegularScope) {
diff --git a/deps/v8/test/unittests/heap/heap-controller-unittest.cc b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
index d3bb177aa3..445c49052c 100644
--- a/deps/v8/test/unittests/heap/heap-controller-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
@@ -6,11 +6,11 @@
#include <iostream>
#include <limits>
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
-#include "src/handles-inl.h"
-#include "src/handles.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/handles.h"
#include "src/heap/heap-controller.h"
#include "test/unittests/test-utils.h"
@@ -19,7 +19,7 @@
namespace v8 {
namespace internal {
-using HeapControllerTest = TestWithIsolate;
+using MemoryControllerTest = TestWithIsolate;
double Round(double x) {
// Round to three digits.
@@ -32,86 +32,81 @@ void CheckEqualRounded(double expected, double actual) {
EXPECT_DOUBLE_EQ(expected, actual);
}
-TEST_F(HeapControllerTest, HeapGrowingFactor) {
- HeapController heap_controller(i_isolate()->heap());
- double min_factor = heap_controller.min_growing_factor_;
- double max_factor = heap_controller.max_growing_factor_;
-
- CheckEqualRounded(max_factor, heap_controller.GrowingFactor(34, 1, 4.0));
- CheckEqualRounded(3.553, heap_controller.GrowingFactor(45, 1, 4.0));
- CheckEqualRounded(2.830, heap_controller.GrowingFactor(50, 1, 4.0));
- CheckEqualRounded(1.478, heap_controller.GrowingFactor(100, 1, 4.0));
- CheckEqualRounded(1.193, heap_controller.GrowingFactor(200, 1, 4.0));
- CheckEqualRounded(1.121, heap_controller.GrowingFactor(300, 1, 4.0));
- CheckEqualRounded(heap_controller.GrowingFactor(300, 1, 4.0),
- heap_controller.GrowingFactor(600, 2, 4.0));
- CheckEqualRounded(min_factor, heap_controller.GrowingFactor(400, 1, 4.0));
+namespace {
+
+using V8Controller = MemoryController<V8HeapTrait>;
+
+} // namespace
+
+TEST_F(MemoryControllerTest, HeapGrowingFactor) {
+ CheckEqualRounded(V8HeapTrait::kMaxGrowingFactor,
+ V8Controller::DynamicGrowingFactor(34, 1, 4.0));
+ CheckEqualRounded(3.553, V8Controller::DynamicGrowingFactor(45, 1, 4.0));
+ CheckEqualRounded(2.830, V8Controller::DynamicGrowingFactor(50, 1, 4.0));
+ CheckEqualRounded(1.478, V8Controller::DynamicGrowingFactor(100, 1, 4.0));
+ CheckEqualRounded(1.193, V8Controller::DynamicGrowingFactor(200, 1, 4.0));
+ CheckEqualRounded(1.121, V8Controller::DynamicGrowingFactor(300, 1, 4.0));
+ CheckEqualRounded(V8Controller::DynamicGrowingFactor(300, 1, 4.0),
+ V8Controller::DynamicGrowingFactor(600, 2, 4.0));
+ CheckEqualRounded(V8HeapTrait::kMinGrowingFactor,
+ V8Controller::DynamicGrowingFactor(400, 1, 4.0));
}
-TEST_F(HeapControllerTest, MaxHeapGrowingFactor) {
- HeapController heap_controller(i_isolate()->heap());
+TEST_F(MemoryControllerTest, MaxHeapGrowingFactor) {
+ CheckEqualRounded(1.3,
+ V8Controller::MaxGrowingFactor(V8HeapTrait::kMinSize * MB));
CheckEqualRounded(
- 1.3, heap_controller.MaxGrowingFactor(HeapController::kMinSize * MB));
- CheckEqualRounded(1.600, heap_controller.MaxGrowingFactor(
- HeapController::kMaxSize / 2 * MB));
+ 1.600, V8Controller::MaxGrowingFactor(V8HeapTrait::kMaxSize / 2 * MB));
CheckEqualRounded(
- 1.999, heap_controller.MaxGrowingFactor(
- (HeapController::kMaxSize - Heap::kPointerMultiplier) * MB));
- CheckEqualRounded(4.0,
- heap_controller.MaxGrowingFactor(
- static_cast<size_t>(HeapController::kMaxSize) * MB));
+ 1.999, V8Controller::MaxGrowingFactor(
+ (V8HeapTrait::kMaxSize - Heap::kPointerMultiplier) * MB));
+ CheckEqualRounded(4.0, V8Controller::MaxGrowingFactor(
+ static_cast<size_t>(V8HeapTrait::kMaxSize) * MB));
}
-TEST_F(HeapControllerTest, OldGenerationAllocationLimit) {
+TEST_F(MemoryControllerTest, OldGenerationAllocationLimit) {
Heap* heap = i_isolate()->heap();
- HeapController heap_controller(heap);
size_t old_gen_size = 128 * MB;
size_t max_old_generation_size = 512 * MB;
double gc_speed = 100;
double mutator_speed = 1;
size_t new_space_capacity = 16 * MB;
- double max_factor = heap_controller.MaxGrowingFactor(max_old_generation_size);
- double factor =
- heap_controller.GrowingFactor(gc_speed, mutator_speed, max_factor);
+ double factor = V8Controller::GrowingFactor(heap, max_old_generation_size,
+ gc_speed, mutator_speed);
- EXPECT_EQ(
- static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, max_factor, gc_speed,
- mutator_speed, new_space_capacity, Heap::HeapGrowingMode::kDefault));
+ EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ V8Controller::CalculateAllocationLimit(
+ heap, old_gen_size, max_old_generation_size, new_space_capacity,
+ factor, Heap::HeapGrowingMode::kDefault));
- factor = Min(factor, heap_controller.conservative_growing_factor_);
- EXPECT_EQ(
- static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, max_factor, gc_speed,
- mutator_speed, new_space_capacity, Heap::HeapGrowingMode::kSlow));
+ factor = Min(factor, V8HeapTrait::kConservativeGrowingFactor);
+ EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ V8Controller::CalculateAllocationLimit(
+ heap, old_gen_size, max_old_generation_size, new_space_capacity,
+ factor, Heap::HeapGrowingMode::kSlow));
+
+ factor = Min(factor, V8HeapTrait::kConservativeGrowingFactor);
+ EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ V8Controller::CalculateAllocationLimit(
+ heap, old_gen_size, max_old_generation_size, new_space_capacity,
+ factor, Heap::HeapGrowingMode::kConservative));
- factor = Min(factor, heap_controller.conservative_growing_factor_);
+ factor = V8HeapTrait::kMinGrowingFactor;
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, max_factor, gc_speed,
- mutator_speed, new_space_capacity,
- Heap::HeapGrowingMode::kConservative));
-
- factor = heap_controller.min_growing_factor_;
- EXPECT_EQ(
- static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- heap->heap_controller()->CalculateAllocationLimit(
- old_gen_size, max_old_generation_size, max_factor, gc_speed,
- mutator_speed, new_space_capacity, Heap::HeapGrowingMode::kMinimal));
+ V8Controller::CalculateAllocationLimit(
+ heap, old_gen_size, max_old_generation_size, new_space_capacity,
+ factor, Heap::HeapGrowingMode::kMinimal));
}
-TEST_F(HeapControllerTest, MaxOldGenerationSize) {
- HeapController heap_controller(i_isolate()->heap());
+TEST_F(MemoryControllerTest, MaxOldGenerationSize) {
uint64_t configurations[][2] = {
- {0, HeapController::kMinSize},
- {512, HeapController::kMinSize},
+ {0, V8HeapTrait::kMinSize},
+ {512, V8HeapTrait::kMinSize},
{1 * GB, 256 * Heap::kPointerMultiplier},
{2 * static_cast<uint64_t>(GB), 512 * Heap::kPointerMultiplier},
- {4 * static_cast<uint64_t>(GB), HeapController::kMaxSize},
- {8 * static_cast<uint64_t>(GB), HeapController::kMaxSize}};
+ {4 * static_cast<uint64_t>(GB), V8HeapTrait::kMaxSize},
+ {8 * static_cast<uint64_t>(GB), V8HeapTrait::kMaxSize}};
for (auto configuration : configurations) {
ASSERT_EQ(configuration[1],
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index 81b8857d35..fbc384ef1d 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -6,10 +6,10 @@
#include <iostream>
#include <limits>
-#include "src/handles-inl.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/heap.h"
#include "src/heap/spaces-inl.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
index e42f22c4e1..7c88f58521 100644
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
@@ -4,7 +4,7 @@
#include "src/heap/item-parallel-job.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/heap/marking-unittest.cc b/deps/v8/test/unittests/heap/marking-unittest.cc
index 60aa28c4a9..63923c0fa8 100644
--- a/deps/v8/test/unittests/heap/marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-unittest.cc
@@ -4,7 +4,7 @@
#include <stdlib.h>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/heap/marking.h"
#include "test/unittests/heap/bitmap-test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
index 27585dc78d..d787f0348d 100644
--- a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
+++ b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
@@ -4,7 +4,7 @@
#include <limits>
-#include "src/flags.h"
+#include "src/flags/flags.h"
#include "src/heap/memory-reducer.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/heap/object-stats-unittest.cc b/deps/v8/test/unittests/heap/object-stats-unittest.cc
index 678fdd2a05..912bcafcb5 100644
--- a/deps/v8/test/unittests/heap/object-stats-unittest.cc
+++ b/deps/v8/test/unittests/heap/object-stats-unittest.cc
@@ -5,8 +5,8 @@
#include <unordered_set>
#include "src/heap/object-stats.h"
-#include "src/objects-inl.h"
#include "src/objects/fixed-array-inl.h"
+#include "src/objects/objects-inl.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/heap/scavenge-job-unittest.cc b/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
index 43386ac385..36d089f03b 100644
--- a/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
@@ -4,9 +4,9 @@
#include <limits>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/heap/scavenge-job.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index 168bc9c7bc..54b60f55e8 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -5,7 +5,7 @@
#include <limits>
#include <map>
-#include "src/globals.h"
+#include "src/common/globals.h"
#include "src/heap/slot-set.h"
#include "src/heap/spaces.h"
#include "src/objects/slots.h"
@@ -216,8 +216,8 @@ TEST(TypedSlotSet, Merge) {
TypedSlotSet set0(0), set1(0);
static const uint32_t kEntries = 10000;
for (uint32_t i = 0; i < kEntries; i++) {
- set0.Insert(EMBEDDED_OBJECT_SLOT, 2 * i);
- set1.Insert(EMBEDDED_OBJECT_SLOT, 2 * i + 1);
+ set0.Insert(FULL_EMBEDDED_OBJECT_SLOT, 2 * i);
+ set1.Insert(FULL_EMBEDDED_OBJECT_SLOT, 2 * i + 1);
}
uint32_t count = 0;
set0.Merge(&set1);
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index eed1275954..140d3d45b3 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/isolate.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -39,7 +39,7 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
HeapObject object =
compaction_space->AllocateRawUnaligned(kMaxRegularHeapObjectSize)
.ToObjectChecked();
- heap->CreateFillerObjectAt(object->address(), kMaxRegularHeapObjectSize,
+ heap->CreateFillerObjectAt(object.address(), kMaxRegularHeapObjectSize,
ClearRecordedSlots::kNo);
}
int pages_in_old_space = old_space->CountTotalPages();
diff --git a/deps/v8/test/unittests/heap/unmapper-unittest.cc b/deps/v8/test/unittests/heap/unmapper-unittest.cc
index 703c41c79d..6c3407b535 100644
--- a/deps/v8/test/unittests/heap/unmapper-unittest.cc
+++ b/deps/v8/test/unittests/heap/unmapper-unittest.cc
@@ -5,10 +5,10 @@
#include <map>
#include "src/base/region-allocator.h"
+#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/spaces-inl.h"
-#include "src/isolate.h"
-#include "src/ostreams.h"
+#include "src/utils/ostreams.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -267,6 +267,9 @@ TrackingPageAllocator* SequentialUnmapperTest::tracking_page_allocator_ =
v8::PageAllocator* SequentialUnmapperTest::old_page_allocator_ = nullptr;
bool SequentialUnmapperTest::old_flag_;
+// TODO(v8:7464): Enable these once there is a good way to free the shared
+// read-only space.
+#ifndef V8_SHARED_RO_HEAP
// See v8:5945.
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
Page* page = allocator()->AllocatePage(
@@ -326,6 +329,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckIsFree(page->address(), page_size);
}
}
+#endif // V8_SHARED_RO_HEAP
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index a77e074411..c3aa7de234 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -4,16 +4,16 @@
#include <limits>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/ast/scopes.h"
-#include "src/hash-seed-inl.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
-#include "src/objects-inl.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
@@ -560,7 +560,7 @@ TEST_F(BytecodeArrayBuilderTest, Constants) {
ast_factory.Internalize(isolate());
Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
// Should only have one entry for each identical constant.
- EXPECT_EQ(4, array->constant_pool()->length());
+ EXPECT_EQ(4, array->constant_pool().length());
}
TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 0e72e2ec8d..a8907ba62a 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/hash-seed-inl.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
@@ -81,7 +81,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
@@ -98,7 +98,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 6ec19fb726..0f6f0e99b0 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/hash-seed-inl.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-random-iterator.h"
-#include "src/objects-inl.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
@@ -184,7 +184,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), 0);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
ASSERT_TRUE(iterator.IsValid());
}
@@ -331,7 +331,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
ASSERT_TRUE(iterator.IsValid());
iterator.GoToIndex(18);
@@ -488,7 +488,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
@@ -507,7 +507,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
@@ -968,7 +968,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
@@ -987,7 +987,7 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0)->Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
ASSERT_TRUE(iterator.IsValid());
--iterator;
ASSERT_FALSE(iterator.IsValid());
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 35cc3b3c28..6e7b945231 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/api.h"
+#include "src/api/api.h"
+#include "src/codegen/source-position-table.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-label.h"
@@ -12,10 +14,8 @@
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/constant-array-builder.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/source-position-table.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
index 9c010f25e3..35a63cf1bc 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-decoder-unittest.cc
@@ -4,10 +4,10 @@
#include <vector>
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/contexts.h"
#include "src/interpreter/bytecode-decoder.h"
+#include "src/objects/contexts.h"
#include "src/runtime/runtime.h"
#include "test/unittests/interpreter/bytecode-utils.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
index 8b8cae50ea..d789412760 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-node.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/interpreter/bytecode-operands-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-operands-unittest.cc
index a02d7f01c6..02db7a6b93 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-operands-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-operands-unittest.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/execution/isolate.h"
#include "src/interpreter/bytecode-operands.h"
-#include "src/isolate.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
index 2ba28b2306..e5866a789e 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-register-allocator.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
index 9879b2a84a..fd3de90604 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-optimizer.h"
diff --git a/deps/v8/test/unittests/interpreter/bytecode-source-info-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-source-info-unittest.cc
index f08bfe307f..6fff3d53b2 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-source-info-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-source-info-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-source-info.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/interpreter/bytecode-utils.h b/deps/v8/test/unittests/interpreter/bytecode-utils.h
index 912e9dcb7b..6e6423a784 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-utils.h
+++ b/deps/v8/test/unittests/interpreter/bytecode-utils.h
@@ -5,7 +5,7 @@
#ifndef V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
#define V8_UNITTESTS_INTERPRETER_BYTECODE_UTILS_H_
-#include "src/frames.h"
+#include "src/execution/frames.h"
#include "src/interpreter/bytecode-register.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index 6f5a11c0c7..f390631e9f 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -4,7 +4,7 @@
#include <vector>
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 894aee16a4..bfe83b03ca 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/ast/ast-value-factory.h"
-#include "src/handles-inl.h"
-#include "src/hash-seed-inl.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/interpreter/constant-array-builder.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/numbers/hash-seed-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -158,12 +158,12 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
for (size_t i = 0; i < k8BitCapacity - reserved; i++) {
Object value = constant_array->get(static_cast<int>(i));
Smi smi = Smi::FromInt(static_cast<int>(i));
- CHECK(value->SameValue(smi));
+ CHECK(value.SameValue(smi));
}
for (size_t i = k8BitCapacity; i < 2 * k8BitCapacity + reserved; i++) {
Object value = constant_array->get(static_cast<int>(i));
Smi smi = Smi::FromInt(static_cast<int>(i - reserved));
- CHECK(value->SameValue(smi));
+ CHECK(value.SameValue(smi));
}
}
}
@@ -209,7 +209,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
static_cast<int>(k8BitCapacity + reserved));
for (size_t i = 0; i < k8BitCapacity + reserved; i++) {
Object value = constant_array->get(static_cast<int>(i));
- CHECK(value->SameValue(*isolate()->factory()->NewNumberFromSize(i)));
+ CHECK(value.SameValue(*isolate()->factory()->NewNumberFromSize(i)));
}
}
}
@@ -240,9 +240,9 @@ TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
for (size_t i = 0; i < k8BitCapacity; i++) {
Object original = constant_array->get(static_cast<int>(k8BitCapacity + i));
Object duplicate = constant_array->get(static_cast<int>(i));
- CHECK(original->SameValue(duplicate));
+ CHECK(original.SameValue(duplicate));
Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
- CHECK(original->SameValue(*reference));
+ CHECK(original.SameValue(*reference));
}
}
@@ -304,13 +304,13 @@ TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), k8BitCapacity + 1);
for (int i = kNumberOfHoles; i < k8BitCapacity; i++) {
- CHECK(constant_array->get(i)->SameValue(
+ CHECK(constant_array->get(i).SameValue(
*isolate()->factory()->the_hole_value()));
}
CHECK(!constant_array->get(kNumberOfHoles - 1)
- ->SameValue(*isolate()->factory()->the_hole_value()));
+ .SameValue(*isolate()->factory()->the_hole_value()));
CHECK(!constant_array->get(k8BitCapacity)
- ->SameValue(*isolate()->factory()->the_hole_value()));
+ .SameValue(*isolate()->factory()->the_hole_value()));
}
TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
@@ -354,7 +354,7 @@ TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
} else {
expected = isolate()->factory()->the_hole_value();
}
- CHECK(constant_array->get(i)->SameValue(*expected));
+ CHECK(constant_array->get(i).SameValue(*expected));
}
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index fe0f6e521a..1286591752 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -4,12 +4,12 @@
#include "test/unittests/interpreter/interpreter-assembler-unittest.h"
-#include "src/code-factory.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
-#include "src/interface-descriptors.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/execution/isolate.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/node-test-utils.h"
diff --git a/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc b/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc
index e9eaf98851..8d52e80e39 100644
--- a/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc
+++ b/deps/v8/test/unittests/libplatform/default-worker-threads-task-runner-unittest.cc
@@ -274,5 +274,25 @@ TEST(DefaultWorkerThreadsTaskRunnerUnittest, NoIdleTasks) {
runner.Terminate();
}
+TEST(DefaultWorkerThreadsTaskRunnerUnittest, RunsTasksOnCurrentThread) {
+ DefaultWorkerThreadsTaskRunner runner(1, RealTime);
+
+ base::Semaphore semaphore(0);
+
+ EXPECT_FALSE(runner.RunsTasksOnCurrentThread());
+
+ std::unique_ptr<TestTask> task1 = base::make_unique<TestTask>([&] {
+ EXPECT_TRUE(runner.RunsTasksOnCurrentThread());
+ semaphore.Signal();
+ });
+ runner.PostTask(std::move(task1));
+
+ semaphore.Wait();
+ EXPECT_FALSE(runner.RunsTasksOnCurrentThread());
+
+ runner.Terminate();
+ EXPECT_FALSE(runner.RunsTasksOnCurrentThread());
+}
+
} // namespace platform
} // namespace v8
diff --git a/deps/v8/test/unittests/counters-unittest.cc b/deps/v8/test/unittests/logging/counters-unittest.cc
index 0410ef70dc..3dfb0ff92f 100644
--- a/deps/v8/test/unittests/counters-unittest.cc
+++ b/deps/v8/test/unittests/logging/counters-unittest.cc
@@ -4,13 +4,13 @@
#include <vector>
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/atomic-utils.h"
#include "src/base/platform/time.h"
-#include "src/counters-inl.h"
-#include "src/counters.h"
-#include "src/handles-inl.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/logging/counters-inl.h"
+#include "src/logging/counters.h"
+#include "src/objects/objects-inl.h"
#include "src/tracing/tracing-category-observer.h"
#include "test/unittests/test-utils.h"
@@ -30,7 +30,6 @@ class MockHistogram : public Histogram {
std::vector<int> samples_;
};
-
class AggregatedMemoryHistogramTest : public ::testing::Test {
public:
AggregatedMemoryHistogramTest() : aggregated_(&mock_) {}
@@ -182,7 +181,6 @@ class SnapshotNativeCounterTest : public TestWithNativeContextAndCounters {
} // namespace
-
TEST_F(AggregatedMemoryHistogramTest, OneSample1) {
FLAG_histogram_interval = 10;
AddSample(10, 1000);
@@ -191,7 +189,6 @@ TEST_F(AggregatedMemoryHistogramTest, OneSample1) {
EXPECT_EQ(1000, (*samples())[0]);
}
-
TEST_F(AggregatedMemoryHistogramTest, OneSample2) {
FLAG_histogram_interval = 10;
AddSample(10, 500);
@@ -200,7 +197,6 @@ TEST_F(AggregatedMemoryHistogramTest, OneSample2) {
EXPECT_EQ(750, (*samples())[0]);
}
-
TEST_F(AggregatedMemoryHistogramTest, OneSample3) {
FLAG_histogram_interval = 10;
AddSample(10, 500);
@@ -211,7 +207,6 @@ TEST_F(AggregatedMemoryHistogramTest, OneSample3) {
EXPECT_EQ(750, (*samples())[0]);
}
-
TEST_F(AggregatedMemoryHistogramTest, OneSample4) {
FLAG_histogram_interval = 10;
AddSample(10, 500);
@@ -221,7 +216,6 @@ TEST_F(AggregatedMemoryHistogramTest, OneSample4) {
EXPECT_EQ(750, (*samples())[0]);
}
-
TEST_F(AggregatedMemoryHistogramTest, TwoSamples1) {
FLAG_histogram_interval = 10;
AddSample(10, 1000);
@@ -231,7 +225,6 @@ TEST_F(AggregatedMemoryHistogramTest, TwoSamples1) {
EXPECT_EQ(1000, (*samples())[1]);
}
-
TEST_F(AggregatedMemoryHistogramTest, TwoSamples2) {
FLAG_histogram_interval = 10;
AddSample(10, 1000);
@@ -242,7 +235,6 @@ TEST_F(AggregatedMemoryHistogramTest, TwoSamples2) {
EXPECT_EQ(1000, (*samples())[1]);
}
-
TEST_F(AggregatedMemoryHistogramTest, TwoSamples3) {
FLAG_histogram_interval = 10;
AddSample(10, 1000);
@@ -254,7 +246,6 @@ TEST_F(AggregatedMemoryHistogramTest, TwoSamples3) {
EXPECT_EQ(500, (*samples())[1]);
}
-
TEST_F(AggregatedMemoryHistogramTest, TwoSamples4) {
FLAG_histogram_interval = 10;
AddSample(10, 1000);
@@ -264,7 +255,6 @@ TEST_F(AggregatedMemoryHistogramTest, TwoSamples4) {
EXPECT_EQ(250, (*samples())[1]);
}
-
TEST_F(AggregatedMemoryHistogramTest, TwoSamples5) {
FLAG_histogram_interval = 10;
AddSample(10, 0);
@@ -274,7 +264,6 @@ TEST_F(AggregatedMemoryHistogramTest, TwoSamples5) {
EXPECT_EQ(750, (*samples())[1]);
}
-
TEST_F(AggregatedMemoryHistogramTest, TwoSamples6) {
FLAG_histogram_interval = 10;
AddSample(10, 0);
@@ -285,7 +274,6 @@ TEST_F(AggregatedMemoryHistogramTest, TwoSamples6) {
EXPECT_EQ(1000, (*samples())[1]);
}
-
TEST_F(AggregatedMemoryHistogramTest, TwoSamples7) {
FLAG_histogram_interval = 10;
AddSample(10, 0);
@@ -297,7 +285,6 @@ TEST_F(AggregatedMemoryHistogramTest, TwoSamples7) {
EXPECT_EQ((250 + 500) / 2, (*samples())[1]);
}
-
TEST_F(AggregatedMemoryHistogramTest, TwoSamples8) {
FLAG_histogram_interval = 10;
AddSample(10, 1000);
@@ -309,7 +296,6 @@ TEST_F(AggregatedMemoryHistogramTest, TwoSamples8) {
EXPECT_EQ((750 + 500) / 2, (*samples())[1]);
}
-
TEST_F(AggregatedMemoryHistogramTest, ManySamples1) {
FLAG_histogram_interval = 10;
const int kMaxSamples = 1000;
@@ -321,7 +307,6 @@ TEST_F(AggregatedMemoryHistogramTest, ManySamples1) {
}
}
-
TEST_F(AggregatedMemoryHistogramTest, ManySamples2) {
FLAG_histogram_interval = 10;
const int kMaxSamples = 1000;
diff --git a/deps/v8/test/unittests/bigint-unittest.cc b/deps/v8/test/unittests/numbers/bigint-unittest.cc
index d69c512162..ae76ca1ca1 100644
--- a/deps/v8/test/unittests/bigint-unittest.cc
+++ b/deps/v8/test/unittests/numbers/bigint-unittest.cc
@@ -4,11 +4,11 @@
#include <cmath>
-#include "src/conversions.h"
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
+#include "src/numbers/conversions.h"
#include "src/objects/bigint.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -16,7 +16,7 @@
namespace v8 {
namespace internal {
-typedef TestWithIsolate BigIntWithIsolate;
+using BigIntWithIsolate = TestWithIsolate;
void Compare(Handle<BigInt> x, double value, ComparisonResult expected) {
CHECK_EQ(expected, BigInt::CompareToDouble(x, value));
diff --git a/deps/v8/test/unittests/conversions-unittest.cc b/deps/v8/test/unittests/numbers/conversions-unittest.cc
index 7c4bd96a6f..e0c1c55aae 100644
--- a/deps/v8/test/unittests/conversions-unittest.cc
+++ b/deps/v8/test/unittests/numbers/conversions-unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/conversions.h"
+#include "src/numbers/conversions.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/object-unittest.cc b/deps/v8/test/unittests/objects/object-unittest.cc
index 505d76df8b..67dfc0f9db 100644
--- a/deps/v8/test/unittests/object-unittest.cc
+++ b/deps/v8/test/unittests/objects/object-unittest.cc
@@ -6,11 +6,11 @@
#include <iostream>
#include <limits>
-#include "src/api-inl.h"
-#include "src/compiler.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/api/api-inl.h"
+#include "src/codegen/compiler.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -94,7 +94,7 @@ TEST(Object, StructListOrder) {
#undef TEST_STRUCT
}
-typedef TestWithIsolate ObjectWithIsolate;
+using ObjectWithIsolate = TestWithIsolate;
TEST_F(ObjectWithIsolate, DictionaryGrowth) {
Handle<NumberDictionary> dict = NumberDictionary::New(isolate(), 1);
@@ -156,10 +156,10 @@ TEST_F(TestWithNativeContext, EmptyFunctionScopeInfo) {
// Check that the empty_function has a properly set up ScopeInfo.
Handle<JSFunction> function = RunJS<JSFunction>("(function(){})");
- Handle<ScopeInfo> scope_info(function->shared()->scope_info(),
+ Handle<ScopeInfo> scope_info(function->shared().scope_info(),
function->GetIsolate());
Handle<ScopeInfo> empty_function_scope_info(
- isolate()->empty_function()->shared()->scope_info(),
+ isolate()->empty_function()->shared().scope_info(),
function->GetIsolate());
EXPECT_EQ(scope_info->length(), empty_function_scope_info->length());
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 3e6cac2175..38aae33809 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/value-serializer.h"
+#include "src/objects/value-serializer.h"
#include <algorithm>
#include <string>
#include "include/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/build_config.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
diff --git a/deps/v8/test/unittests/parser/ast-value-unittest.cc b/deps/v8/test/unittests/parser/ast-value-unittest.cc
index c30823b4b1..91efc9e42a 100644
--- a/deps/v8/test/unittests/parser/ast-value-unittest.cc
+++ b/deps/v8/test/unittests/parser/ast-value-unittest.cc
@@ -4,9 +4,9 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
-#include "src/hash-seed-inl.h"
+#include "src/execution/isolate-inl.h"
#include "src/heap/heap-inl.h"
-#include "src/isolate-inl.h"
+#include "src/numbers/hash-seed-inl.h"
#include "src/zone/zone.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/parser/preparser-unittest.cc b/deps/v8/test/unittests/parser/preparser-unittest.cc
index f9f5556892..13676af82b 100644
--- a/deps/v8/test/unittests/parser/preparser-unittest.cc
+++ b/deps/v8/test/unittests/parser/preparser-unittest.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/api-inl.h"
-#include "src/objects-inl.h"
+#include "src/api/api-inl.h"
+#include "src/objects/objects-inl.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/strings-storage-unittest.cc b/deps/v8/test/unittests/profiler/strings-storage-unittest.cc
index b7f95e5db1..31225f46c2 100644
--- a/deps/v8/test/unittests/strings-storage-unittest.cc
+++ b/deps/v8/test/unittests/profiler/strings-storage-unittest.cc
@@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
-typedef TestWithIsolate StringsStorageWithIsolate;
+using StringsStorageWithIsolate = TestWithIsolate;
bool StringEq(const char* left, const char* right) {
return strcmp(left, right) == 0;
diff --git a/deps/v8/test/unittests/regress/regress-crbug-938251-unittest.cc b/deps/v8/test/unittests/regress/regress-crbug-938251-unittest.cc
index 3a04fc46e6..c5109d92c5 100644
--- a/deps/v8/test/unittests/regress/regress-crbug-938251-unittest.cc
+++ b/deps/v8/test/unittests/regress/regress-crbug-938251-unittest.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/isolate.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
index 712770e9dc..ca142b4e46 100644
--- a/deps/v8/test/unittests/run-all-unittests.cc
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -42,5 +42,6 @@ int main(int argc, char** argv) {
testing::AddGlobalTestEnvironment(new DefaultPlatformEnvironment);
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeExternalStartupData(argv[0]);
+ v8::V8::InitializeICUDefaultLocation(argv[0]);
return RUN_ALL_TESTS();
}
diff --git a/deps/v8/test/unittests/char-predicates-unittest.cc b/deps/v8/test/unittests/strings/char-predicates-unittest.cc
index 85c550a7e2..6511c6a42e 100644
--- a/deps/v8/test/unittests/char-predicates-unittest.cc
+++ b/deps/v8/test/unittests/strings/char-predicates-unittest.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/char-predicates.h"
-#include "src/char-predicates-inl.h"
-#include "src/unicode.h"
+#include "src/strings/char-predicates.h"
+#include "src/strings/char-predicates-inl.h"
+#include "src/strings/unicode.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -26,7 +26,6 @@ TEST(CharPredicatesTest, WhiteSpace) {
EXPECT_FALSE(IsWhiteSpace(0x180E));
}
-
TEST(CharPredicatesTest, WhiteSpaceOrLineTerminator) {
EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x0009));
EXPECT_TRUE(IsWhiteSpaceOrLineTerminator(0x000B));
@@ -47,7 +46,6 @@ TEST(CharPredicatesTest, WhiteSpaceOrLineTerminator) {
EXPECT_FALSE(IsWhiteSpaceOrLineTerminator(0x180E));
}
-
TEST(CharPredicatesTest, IdentifierStart) {
EXPECT_TRUE(IsIdentifierStart('$'));
EXPECT_TRUE(IsIdentifierStart('_'));
@@ -89,7 +87,6 @@ TEST(CharPredicatesTest, IdentifierStart) {
#endif
}
-
TEST(CharPredicatesTest, IdentifierPart) {
EXPECT_TRUE(IsIdentifierPart('$'));
EXPECT_TRUE(IsIdentifierPart('_'));
diff --git a/deps/v8/test/unittests/unicode-unittest.cc b/deps/v8/test/unittests/strings/unicode-unittest.cc
index da1383c22c..d401899730 100644
--- a/deps/v8/test/unittests/unicode-unittest.cc
+++ b/deps/v8/test/unittests/strings/unicode-unittest.cc
@@ -6,9 +6,9 @@
#include <string>
#include <vector>
-#include "src/unicode-decoder.h"
-#include "src/unicode-inl.h"
-#include "src/vector.h"
+#include "src/strings/unicode-decoder.h"
+#include "src/strings/unicode-inl.h"
+#include "src/utils/vector.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
@@ -25,15 +25,13 @@ void DecodeNormally(const std::vector<byte>& bytes,
}
}
-template <size_t kBufferSize>
-void DecodeUtf16(unibrow::Utf8Decoder<kBufferSize>* decoder,
- const std::vector<byte>& bytes,
+void DecodeUtf16(const std::vector<uint8_t>& bytes,
std::vector<unibrow::uchar>* output) {
- auto vector = Vector<const char>::cast(VectorOf(bytes));
- decoder->Reset(vector);
+ auto utf8_data = Vector<const uint8_t>::cast(VectorOf(bytes));
+ Utf8Decoder decoder(utf8_data);
- std::vector<uint16_t> utf16(decoder->Utf16Length());
- decoder->WriteUtf16(&(*utf16.begin()), decoder->Utf16Length(), vector);
+ std::vector<uint16_t> utf16(decoder.utf16_length());
+ decoder.Decode(&utf16[0], utf8_data);
// Decode back into code points
for (size_t i = 0; i < utf16.size(); i++) {
@@ -68,13 +66,11 @@ void DecodeIncrementally(const std::vector<byte>& bytes,
} // namespace
TEST(UnicodeTest, Utf16BufferReuse) {
- unibrow::Utf8Decoder<4> utf16_decoder;
-
// Not enough continuation bytes before string ends.
- typedef struct {
+ struct TestCase {
std::vector<byte> bytes;
std::vector<unibrow::uchar> unicode_expected;
- } TestCase;
+ };
TestCase data[] = {
{{0x00}, {0x0}},
@@ -94,7 +90,7 @@ TEST(UnicodeTest, Utf16BufferReuse) {
fprintf(stderr, "\n");
std::vector<unibrow::uchar> output_utf16;
- DecodeUtf16(&utf16_decoder, test.bytes, &output_utf16);
+ DecodeUtf16(test.bytes, &output_utf16);
CHECK_EQ(output_utf16.size(), test.unicode_expected.size());
for (size_t i = 0; i < output_utf16.size(); ++i) {
@@ -104,12 +100,9 @@ TEST(UnicodeTest, Utf16BufferReuse) {
}
TEST(UnicodeTest, SurrogateOverrunsBuffer) {
- unibrow::Utf8Decoder<2> utf16_decoder;
-
std::vector<unibrow::uchar> output_utf16;
// Not enough continuation bytes before string ends.
- DecodeUtf16(&utf16_decoder, {0x00, 0xF0, 0x90, 0x80, 0x80, 0x00},
- &output_utf16);
+ DecodeUtf16({0x00, 0xF0, 0x90, 0x80, 0x80, 0x00}, &output_utf16);
CHECK_EQ(output_utf16[0], 0x00);
CHECK_EQ(output_utf16[1], 0x10000);
CHECK_EQ(output_utf16[0], 0x00);
@@ -119,10 +112,10 @@ TEST(UnicodeTest, IncrementalUTF8DecodingVsNonIncrementalUtf8Decoding) {
// Unfortunately, V8 has two UTF-8 decoders. This test checks that they
// produce the same result. This test was inspired by
// https://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt .
- typedef struct {
+ struct TestCase {
std::vector<byte> bytes;
std::vector<unibrow::uchar> unicode_expected;
- } TestCase;
+ };
TestCase data[] = {
// Correct UTF-8 text.
@@ -466,8 +459,6 @@ TEST(UnicodeTest, IncrementalUTF8DecodingVsNonIncrementalUtf8Decoding) {
0x8FFFF}},
};
- unibrow::Utf8Decoder<50> utf16_decoder;
-
for (auto test : data) {
// For figuring out which test fails:
fprintf(stderr, "test: ");
@@ -493,7 +484,7 @@ TEST(UnicodeTest, IncrementalUTF8DecodingVsNonIncrementalUtf8Decoding) {
}
std::vector<unibrow::uchar> output_utf16;
- DecodeUtf16(&utf16_decoder, test.bytes, &output_utf16);
+ DecodeUtf16(test.bytes, &output_utf16);
CHECK_EQ(output_utf16.size(), test.unicode_expected.size());
for (size_t i = 0; i < output_utf16.size(); ++i) {
diff --git a/deps/v8/test/unittests/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index e1f050c9aa..f85b3bf128 100644
--- a/deps/v8/test/unittests/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -5,19 +5,19 @@
#include <memory>
#include "include/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/base/platform/semaphore.h"
#include "src/base/template-utils.h"
-#include "src/compiler.h"
-#include "src/flags.h"
-#include "src/isolate-inl.h"
+#include "src/codegen/compiler.h"
+#include "src/execution/isolate-inl.h"
+#include "src/flags/flags.h"
+#include "src/init/v8.h"
#include "src/objects/smi.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/preparse-data.h"
-#include "src/v8.h"
#include "src/zone/zone-list-inl.h"
#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
@@ -233,7 +233,7 @@ TEST_F(BackgroundCompileTaskTest, EagerInnerFunctions) {
Handle<JSFunction> e = RunJS<JSFunction>("f();");
- ASSERT_TRUE(e->shared()->is_compiled());
+ ASSERT_TRUE(e->shared().is_compiled());
}
TEST_F(BackgroundCompileTaskTest, LazyInnerFunctions) {
@@ -261,7 +261,7 @@ TEST_F(BackgroundCompileTaskTest, LazyInnerFunctions) {
Handle<JSFunction> e = RunJS<JSFunction>("f();");
- ASSERT_FALSE(e->shared()->is_compiled());
+ ASSERT_FALSE(e->shared().is_compiled());
}
} // namespace internal
diff --git a/deps/v8/test/unittests/cancelable-tasks-unittest.cc b/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
index 05048136aa..b3843db46d 100644
--- a/deps/v8/test/unittests/cancelable-tasks-unittest.cc
+++ b/deps/v8/test/unittests/tasks/cancelable-tasks-unittest.cc
@@ -4,7 +4,7 @@
#include "src/base/atomicops.h"
#include "src/base/platform/platform.h"
-#include "src/cancelable-task.h"
+#include "src/tasks/cancelable-task.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index 53b07da23b..94209b8b10 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -5,12 +5,12 @@
#include "test/unittests/test-helpers.h"
#include "include/v8.h"
-#include "src/api.h"
+#include "src/api/api.h"
#include "src/base/template-utils.h"
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 5975091ac1..5ac44b3d57 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -6,12 +6,12 @@
#include "include/libplatform/libplatform.h"
#include "include/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/platform/time.h"
-#include "src/flags.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/v8.h"
+#include "src/execution/isolate.h"
+#include "src/flags/flags.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
namespace v8 {
@@ -64,7 +64,7 @@ namespace internal {
SaveFlags::SaveFlags() {
// For each flag, save the current flag value.
#define FLAG_MODE_APPLY(ftype, ctype, nam, def, cmt) SAVED_##nam = FLAG_##nam;
-#include "src/flag-definitions.h" // NOLINT
+#include "src/flags/flag-definitions.h" // NOLINT
#undef FLAG_MODE_APPLY
}
@@ -75,7 +75,7 @@ SaveFlags::~SaveFlags() {
if (SAVED_##nam != FLAG_##nam) { \
FLAG_##nam = SAVED_##nam; \
}
-#include "src/flag-definitions.h" // NOLINT
+#include "src/flags/flag-definitions.h" // NOLINT
#undef FLAG_MODE_APPLY
}
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 05fad89c7e..3746ee267e 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -8,12 +8,12 @@
#include <vector>
#include "include/v8.h"
-#include "src/api-inl.h"
+#include "src/api/api-inl.h"
#include "src/base/macros.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
#include "src/zone/accounting-allocator.h"
#include "src/zone/zone.h"
#include "testing/gtest-support.h"
@@ -22,7 +22,7 @@ namespace v8 {
class ArrayBufferAllocator;
-typedef std::map<std::string, int> CounterMap;
+using CounterMap = std::map<std::string, int>;
// RAII-like Isolate instance wrapper.
class IsolateWrapper final {
@@ -351,7 +351,7 @@ class SaveFlags {
private:
#define FLAG_MODE_APPLY(ftype, ctype, nam, def, cmt) ctype SAVED_##nam;
-#include "src/flag-definitions.h" // NOLINT
+#include "src/flags/flag-definitions.h" // NOLINT
#undef FLAG_MODE_APPLY
DISALLOW_COPY_AND_ASSIGN(SaveFlags);
diff --git a/deps/v8/test/unittests/torque/ls-message-unittest.cc b/deps/v8/test/unittests/torque/ls-message-unittest.cc
index b8f7cf5864..06346d32bb 100644
--- a/deps/v8/test/unittests/torque/ls-message-unittest.cc
+++ b/deps/v8/test/unittests/torque/ls-message-unittest.cc
@@ -26,7 +26,8 @@ TEST(LanguageServerMessage, InitializeRequest) {
// Check that the response id matches up with the request id, and that
// the language server signals its support for definitions.
EXPECT_EQ(response.id(), 5);
- EXPECT_EQ(response.result().capabilities().definitionProvider(), true);
+ EXPECT_TRUE(response.result().capabilities().definitionProvider());
+ EXPECT_TRUE(response.result().capabilities().documentSymbolProvider());
});
}
@@ -111,6 +112,96 @@ TEST(LanguageServerMessage, GotoDefinition) {
});
}
+TEST(LanguageServerMessage, CompilationErrorSendsDiagnostics) {
+ DiagnosticsFiles::Scope diagnostic_files_scope;
+ LanguageServerData::Scope server_data_scope;
+ TorqueMessages::Scope messages_scope;
+ SourceFileMap::Scope source_file_map_scope;
+
+ TorqueCompilerResult result;
+ { Error("compilation failed somehow"); }
+ result.messages = std::move(TorqueMessages::Get());
+ result.source_file_map = SourceFileMap::Get();
+
+ CompilationFinished(std::move(result), [](JsonValue& raw_response) {
+ PublishDiagnosticsNotification notification(raw_response);
+
+ EXPECT_EQ(notification.method(), "textDocument/publishDiagnostics");
+ ASSERT_FALSE(notification.IsNull("params"));
+ EXPECT_EQ(notification.params().uri(), "<unknown>");
+
+ ASSERT_GT(notification.params().diagnostics_size(), static_cast<size_t>(0));
+ Diagnostic diagnostic = notification.params().diagnostics(0);
+ EXPECT_EQ(diagnostic.severity(), Diagnostic::kError);
+ EXPECT_EQ(diagnostic.message(), "compilation failed somehow");
+ });
+}
+
+TEST(LanguageServerMessage, LintErrorSendsDiagnostics) {
+ DiagnosticsFiles::Scope diagnostic_files_scope;
+ TorqueMessages::Scope messages_scope;
+ LanguageServerData::Scope server_data_scope;
+ SourceFileMap::Scope sourc_file_map_scope;
+ SourceId test_id = SourceFileMap::AddSource("test.tq");
+
+ // No compilation errors but two lint warnings.
+ {
+ SourcePosition pos1{test_id, {0, 0}, {0, 1}};
+ SourcePosition pos2{test_id, {1, 0}, {1, 1}};
+ Lint("lint error 1").Position(pos1);
+ Lint("lint error 2").Position(pos2);
+ }
+
+ TorqueCompilerResult result;
+ result.messages = std::move(TorqueMessages::Get());
+ result.source_file_map = SourceFileMap::Get();
+
+ CompilationFinished(std::move(result), [](JsonValue& raw_response) {
+ PublishDiagnosticsNotification notification(raw_response);
+
+ EXPECT_EQ(notification.method(), "textDocument/publishDiagnostics");
+ ASSERT_FALSE(notification.IsNull("params"));
+ EXPECT_EQ(notification.params().uri(), "test.tq");
+
+ ASSERT_EQ(notification.params().diagnostics_size(), static_cast<size_t>(2));
+ Diagnostic diagnostic1 = notification.params().diagnostics(0);
+ EXPECT_EQ(diagnostic1.severity(), Diagnostic::kWarning);
+ EXPECT_EQ(diagnostic1.message(), "lint error 1");
+
+ Diagnostic diagnostic2 = notification.params().diagnostics(1);
+ EXPECT_EQ(diagnostic2.severity(), Diagnostic::kWarning);
+ EXPECT_EQ(diagnostic2.message(), "lint error 2");
+ });
+}
+
+TEST(LanguageServerMessage, CleanCompileSendsNoDiagnostics) {
+ LanguageServerData::Scope server_data_scope;
+ SourceFileMap::Scope sourc_file_map_scope;
+
+ TorqueCompilerResult result;
+ result.source_file_map = SourceFileMap::Get();
+
+ CompilationFinished(std::move(result), [](JsonValue& raw_response) {
+ FAIL() << "Sending unexpected response!";
+ });
+}
+
+TEST(LanguageServerMessage, NoSymbolsSendsEmptyResponse) {
+ LanguageServerData::Scope server_data_scope;
+ SourceFileMap::Scope sourc_file_map_scope;
+
+ DocumentSymbolRequest request;
+ request.set_id(42);
+ request.set_method("textDocument/documentSymbol");
+ request.params().textDocument().set_uri("test.tq");
+
+ HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
+ DocumentSymbolResponse response(raw_response);
+ EXPECT_EQ(response.id(), 42);
+ EXPECT_EQ(response.result_size(), static_cast<size_t>(0));
+ });
+}
+
} // namespace ls
} // namespace torque
} // namespace internal
diff --git a/deps/v8/test/unittests/torque/ls-server-data-unittest.cc b/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
index 74dbefe80d..ad67bf0f21 100644
--- a/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
+++ b/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
@@ -18,14 +18,13 @@ struct TestCompiler {
void Compile(const std::string& source) {
TorqueCompilerOptions options;
- options.abort_on_lint_errors = false;
options.output_directory = "";
- options.verbose = false;
options.collect_language_server_data = true;
+ options.force_assert_statements = true;
- const TorqueCompilerResult result = CompileTorque(source, options);
+ TorqueCompilerResult result = CompileTorque(source, options);
SourceFileMap::Get() = result.source_file_map;
- LanguageServerData::Get() = result.language_server_data;
+ LanguageServerData::Get() = std::move(result.language_server_data);
}
};
@@ -71,6 +70,155 @@ TEST(LanguageServer, GotoTypeDefinitionExtends) {
EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 7}}));
}
+TEST(LanguageServer, GotoTypeDefinitionNoDataForFile) {
+ LanguageServerData::Scope server_data_scope;
+ SourceFileMap::Scope file_scope;
+ SourceId test_id = SourceFileMap::AddSource("test.tq");
+
+ // Regression test, this step should not crash.
+ EXPECT_FALSE(LanguageServerData::FindDefinition(test_id, {0, 0}));
+}
+
+TEST(LanguageServer, GotoLabelDefinitionInSignature) {
+ const std::string source =
+ "type void;\n"
+ "type never;\n"
+ "macro Foo(): never labels Fail {\n"
+ " goto Fail;\n"
+ "}\n"
+ "macro Bar() labels Bailout {\n"
+ " Foo() otherwise Bailout;\n"
+ "}\n";
+
+ TestCompiler compiler;
+ compiler.Compile(source);
+
+ // Find the definition for 'Bailout' of the otherwise clause on line 6.
+ const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ auto maybe_position = LanguageServerData::FindDefinition(id, {6, 18});
+ ASSERT_TRUE(maybe_position.has_value());
+ EXPECT_EQ(*maybe_position, (SourcePosition{id, {5, 19}, {5, 26}}));
+}
+
+TEST(LanguageServer, GotoLabelDefinitionInTryBlock) {
+ const std::string source =
+ "type void;\n"
+ "type never;\n"
+ "macro Foo(): never labels Fail {\n"
+ " goto Fail;\n"
+ "}\n"
+ "macro Bar() {\n"
+ " try { Foo() otherwise Bailout; }\n"
+ " label Bailout {}\n"
+ "}\n";
+
+ TestCompiler compiler;
+ compiler.Compile(source);
+
+ // Find the definition for 'Bailout' of the otherwise clause on line 6.
+ const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ auto maybe_position = LanguageServerData::FindDefinition(id, {6, 25});
+ ASSERT_TRUE(maybe_position.has_value());
+ EXPECT_EQ(*maybe_position, (SourcePosition{id, {7, 8}, {7, 15}}));
+}
+
+TEST(LanguageServer, GotoDefinitionClassSuperType) {
+ const std::string source =
+ "type void;\n"
+ "type never;\n"
+ "type Tagged generates 'TNode<Object>' constexpr 'ObjectPtr';\n"
+ "extern class HeapObject extends Tagged {}";
+
+ TestCompiler compiler;
+ compiler.Compile(source);
+
+ // Find the definition for 'Tagged' of the 'extends' on line 3.
+ const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ auto maybe_position = LanguageServerData::FindDefinition(id, {3, 33});
+ ASSERT_TRUE(maybe_position.has_value());
+ EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 11}}));
+}
+
+TEST(LanguageServer, GotoLabelDefinitionInSignatureGotoStmt) {
+ const std::string source =
+ "type void;\n"
+ "type never;\n"
+ "macro Foo(): never labels Fail {\n"
+ " goto Fail;\n"
+ "}\n";
+
+ TestCompiler compiler;
+ compiler.Compile(source);
+
+ // Find the definition for 'Fail' of the goto statement on line 3.
+ const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ auto maybe_position = LanguageServerData::FindDefinition(id, {3, 7});
+ ASSERT_TRUE(maybe_position.has_value());
+ EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 26}, {2, 30}}));
+}
+
+TEST(LanguageServer, GotoLabelDefinitionInTryBlockGoto) {
+ const std::string source =
+ "type void;\n"
+ "type never;\n"
+ "macro Bar() {\n"
+ " try { goto Bailout; }\n"
+ " label Bailout {}\n"
+ "}\n";
+
+ TestCompiler compiler;
+ compiler.Compile(source);
+
+ // Find the definition for 'Bailout' of the goto statement on line 3.
+ const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ auto maybe_position = LanguageServerData::FindDefinition(id, {3, 13});
+ ASSERT_TRUE(maybe_position.has_value());
+ EXPECT_EQ(*maybe_position, (SourcePosition{id, {4, 8}, {4, 15}}));
+}
+
+TEST(LanguageServer, GotoLabelDefinitionGotoInOtherwise) {
+ const std::string source =
+ "type void;\n"
+ "type never;\n"
+ "macro Foo(): never labels Fail {\n"
+ " goto Fail;\n"
+ "}\n"
+ "macro Bar() {\n"
+ " try { Foo() otherwise goto Bailout; }\n"
+ " label Bailout {}\n"
+ "}\n";
+
+ TestCompiler compiler;
+ compiler.Compile(source);
+
+ // Find the definition for 'Bailout' of the otherwise clause on line 6.
+ const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ auto maybe_position = LanguageServerData::FindDefinition(id, {6, 30});
+ ASSERT_TRUE(maybe_position.has_value());
+ EXPECT_EQ(*maybe_position, (SourcePosition{id, {7, 8}, {7, 15}}));
+}
+
+TEST(LanguageServer, SymbolsArePopulated) {
+ // Small test to ensure that the GlobalContext is correctly set in
+ // the LanguageServerData class and declarables are sorted into the
+ // SymbolsMap.
+ const std::string source = R"(
+ type void;
+ type never;
+
+ macro Foo(): never labels Fail {
+ goto Fail;
+ }
+ )";
+
+ TestCompiler compiler;
+ compiler.Compile(source);
+
+ const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const auto& symbols = LanguageServerData::SymbolsForSourceId(id);
+ ASSERT_FALSE(symbols.empty());
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index eca4e6fda2..9a82498ee4 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -2,13 +2,94 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/torque/torque-compiler.h"
#include "src/torque/utils.h"
#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
namespace v8 {
namespace internal {
namespace torque {
+namespace {
+
+// This is a simplified version of the basic Torque type definitions.
+// Some class types are replaced by abstact types to keep it self-contained and
+// small.
+constexpr const char* kTestTorquePrelude = R"(
+type void;
+type never;
+
+type Tagged generates 'TNode<Object>' constexpr 'ObjectPtr';
+type Smi extends Tagged generates 'TNode<Smi>' constexpr 'Smi';
+
+@abstract
+extern class HeapObject extends Tagged {
+ map: Map;
+}
+type Map extends HeapObject generates 'TNode<Map>';
+type Object = Smi | HeapObject;
+type JSReceiver extends HeapObject generates 'TNode<JSReceiver>';
+type JSObject extends JSReceiver generates 'TNode<JSObject>';
+type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
+type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
+type int31 extends int32
+ generates 'TNode<Int32T>' constexpr 'int31_t';
+type uint31 extends uint32
+ generates 'TNode<Uint32T>' constexpr 'uint31_t';
+type int16 extends int31
+ generates 'TNode<Int16T>' constexpr 'int16_t';
+type uint16 extends uint31
+ generates 'TNode<Uint16T>' constexpr 'uint16_t';
+type int8 extends int16 generates 'TNode<Int8T>' constexpr 'int8_t';
+type uint8 extends uint16
+ generates 'TNode<Uint8T>' constexpr 'uint8_t';
+type int64 generates 'TNode<Int64T>' constexpr 'int64_t';
+type intptr generates 'TNode<IntPtrT>' constexpr 'intptr_t';
+type uintptr generates 'TNode<UintPtrT>' constexpr 'uintptr_t';
+type float32 generates 'TNode<Float32T>' constexpr 'float';
+type float64 generates 'TNode<Float64T>' constexpr 'double';
+type bool generates 'TNode<BoolT>' constexpr 'bool';
+type bint generates 'TNode<BInt>' constexpr 'BInt';
+type string constexpr 'const char*';
+type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
+type Code extends HeapObject generates 'TNode<Code>';
+type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
+type Context extends HeapObject generates 'TNode<Context>';
+type NativeContext extends Context;
+)";
+
+TorqueCompilerResult TestCompileTorque(std::string source) {
+ TorqueCompilerOptions options;
+ options.output_directory = "";
+ options.collect_language_server_data = false;
+ options.force_assert_statements = false;
+
+ source = kTestTorquePrelude + source;
+ return CompileTorque(source, options);
+}
+
+void ExpectSuccessfulCompilation(std::string source) {
+ TorqueCompilerResult result = TestCompileTorque(std::move(source));
+ std::vector<std::string> messages;
+ for (const auto& message : result.messages) {
+ messages.push_back(message.message);
+ }
+ EXPECT_EQ(messages, std::vector<std::string>{});
+}
+
+template <class T>
+void ExpectFailingCompilation(
+ std::string source, ::testing::PolymorphicMatcher<T> message_pattern) {
+ TorqueCompilerResult result = TestCompileTorque(std::move(source));
+ ASSERT_FALSE(result.messages.empty());
+ EXPECT_THAT(result.messages[0].message, message_pattern);
+}
+
+} // namespace
+
+TEST(Torque, Prelude) { ExpectSuccessfulCompilation(""); }
+
TEST(Torque, StackDeleteRange) {
Stack<int> stack = {1, 2, 3, 4, 5, 6, 7};
stack.DeleteRange(StackRange{BottomOffset{2}, BottomOffset{4}});
@@ -16,6 +97,122 @@ TEST(Torque, StackDeleteRange) {
ASSERT_TRUE(stack == result);
}
+using ::testing::HasSubstr;
+TEST(Torque, TypeNamingConventionLintError) {
+ ExpectFailingCompilation(R"(
+ type foo generates 'TNode<Foo>';
+ )",
+ HasSubstr("\"foo\""));
+}
+
+TEST(Torque, StructNamingConventionLintError) {
+ ExpectFailingCompilation(R"(
+ struct foo {}
+ )",
+ HasSubstr("\"foo\""));
+}
+
+TEST(Torque, ClassDefinition) {
+ ExpectSuccessfulCompilation(R"(
+ extern class TestClassWithAllTypes extends HeapObject {
+ a: int8;
+ b: uint8;
+ b2: uint8;
+ b3: uint8;
+ c: int16;
+ d: uint16;
+ e: int32;
+ f: uint32;
+ g: RawPtr;
+ h: intptr;
+ i: uintptr;
+ }
+
+ macro TestClassWithAllTypesLoadsAndStores(
+ t: TestClassWithAllTypes, r: RawPtr, v1: int8, v2: uint8, v3: int16,
+ v4: uint16, v5: int32, v6: uint32, v7: intptr, v8: uintptr) {
+ t.a = v1;
+ t.b = v2;
+ t.c = v3;
+ t.d = v4;
+ t.e = v5;
+ t.f = v6;
+ t.g = r;
+ t.h = v7;
+ t.i = v8;
+ t.a = t.a;
+ t.b = t.b;
+ t.c = t.c;
+ t.d = t.d;
+ t.e = t.e;
+ t.f = t.f;
+ t.g = t.g;
+ t.h = t.h;
+ t.i = t.i;
+ }
+ )");
+}
+
+TEST(Torque, TypeDeclarationOrder) {
+ ExpectSuccessfulCompilation(R"(
+ type Baztype = Foo | FooType;
+
+ @abstract
+ @noVerifier
+ extern class Foo extends HeapObject {
+ fooField: FooType;
+ }
+
+ @noVerifier
+ extern class Bar extends Foo {
+ barField: Bartype;
+ bazfield: Baztype;
+ }
+
+ type Bartype = FooType;
+
+ type FooType = Smi | Bar;
+ )");
+}
+
+TEST(Torque, ConditionalFields) {
+ // This class should throw alignment errors if @if decorators aren't
+ // working.
+ ExpectSuccessfulCompilation(R"(
+ @noVerifier
+ extern class PreprocessingTest extends HeapObject {
+ @if(FALSE_FOR_TESTING) a: int8;
+ @if(TRUE_FOR_TESTING) a: int16;
+ b: int16;
+ d: int32;
+ @ifnot(TRUE_FOR_TESTING) e: int8;
+ @ifnot(FALSE_FOR_TESTING) f: int16;
+ g: int16;
+ h: int32;
+ }
+ )");
+ ExpectFailingCompilation(R"(
+ @noVerifier
+ extern class PreprocessingTest extends HeapObject {
+ @if(TRUE_FOR_TESTING) a: int8;
+ @if(FALSE_FOR_TESTING) a: int16;
+ b: int16;
+ d: int32;
+ @ifnot(FALSE_FOR_TESTING) e: int8;
+ @ifnot(TRUE_FOR_TESTING) f: int16;
+ g: int16;
+ h: int32;
+ }
+ )",
+ HasSubstr("aligned"));
+}
+
+TEST(Torque, ConstexprLetBindingDoesNotCrash) {
+ ExpectFailingCompilation(
+ R"(macro FooBar() { let foo = 0; check(foo >= 0); })",
+ HasSubstr("Use 'const' instead of 'let' for variable 'foo'"));
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/torque/torque-utils-unittest.cc b/deps/v8/test/unittests/torque/torque-utils-unittest.cc
index ff877a3c42..48c2a90d8f 100644
--- a/deps/v8/test/unittests/torque/torque-utils-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-utils-unittest.cc
@@ -17,12 +17,15 @@ TEST(TorqueUtils, FileUriDecodeIllegal) {
}
TEST(TorqueUtils, FileUriDecode) {
- EXPECT_EQ(FileUriDecode("file:///some/src/file.tq").value(),
- "/some/src/file.tq");
+#ifdef V8_OS_WIN
EXPECT_EQ(FileUriDecode("file:///c%3A/torque/base.tq").value(),
- "/c:/torque/base.tq");
+ "c:/torque/base.tq");
EXPECT_EQ(FileUriDecode("file:///d%3a/lower/hex.txt").value(),
- "/d:/lower/hex.txt");
+ "d:/lower/hex.txt");
+#else
+ EXPECT_EQ(FileUriDecode("file:///some/src/file.tq").value(),
+ "/some/src/file.tq");
+#endif
}
} // namespace torque
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index a105afe987..c36a0b70f8 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -34,4 +34,9 @@
'*': [SKIP],
}], # variant == jitless and not embedded_builtins
+['system == windows and asan', {
+ # BUG(893437).
+ 'Torque*': [SKIP],
+}], # 'system == windows and asan'
+
]
diff --git a/deps/v8/test/unittests/allocation-unittest.cc b/deps/v8/test/unittests/utils/allocation-unittest.cc
index 7b543ece24..d60aeef7b2 100644
--- a/deps/v8/test/unittests/allocation-unittest.cc
+++ b/deps/v8/test/unittests/utils/allocation-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/allocation.h"
+#include "src/utils/allocation.h"
#if V8_OS_POSIX
#include <setjmp.h>
diff --git a/deps/v8/test/unittests/detachable-vector-unittest.cc b/deps/v8/test/unittests/utils/detachable-vector-unittest.cc
index b805352a7e..dd494298c7 100644
--- a/deps/v8/test/unittests/detachable-vector-unittest.cc
+++ b/deps/v8/test/unittests/utils/detachable-vector-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/detachable-vector.h"
+#include "src/utils/detachable-vector.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/locked-queue-unittest.cc b/deps/v8/test/unittests/utils/locked-queue-unittest.cc
index cc176d937f..880080e72e 100644
--- a/deps/v8/test/unittests/locked-queue-unittest.cc
+++ b/deps/v8/test/unittests/utils/locked-queue-unittest.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/locked-queue-inl.h"
+#include "src/utils/locked-queue-inl.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
-typedef int Record;
+using Record = int;
} // namespace
@@ -19,7 +19,6 @@ TEST(LockedQueue, ConstructorEmpty) {
EXPECT_TRUE(queue.IsEmpty());
}
-
TEST(LockedQueue, SingleRecordEnqueueDequeue) {
LockedQueue<Record> queue;
EXPECT_TRUE(queue.IsEmpty());
@@ -32,7 +31,6 @@ TEST(LockedQueue, SingleRecordEnqueueDequeue) {
EXPECT_TRUE(queue.IsEmpty());
}
-
TEST(LockedQueue, Peek) {
LockedQueue<Record> queue;
EXPECT_TRUE(queue.IsEmpty());
@@ -49,7 +47,6 @@ TEST(LockedQueue, Peek) {
EXPECT_TRUE(queue.IsEmpty());
}
-
TEST(LockedQueue, PeekOnEmpty) {
LockedQueue<Record> queue;
EXPECT_TRUE(queue.IsEmpty());
@@ -58,7 +55,6 @@ TEST(LockedQueue, PeekOnEmpty) {
EXPECT_FALSE(success);
}
-
TEST(LockedQueue, MultipleRecords) {
LockedQueue<Record> queue;
EXPECT_TRUE(queue.IsEmpty());
diff --git a/deps/v8/test/unittests/utils-unittest.cc b/deps/v8/test/unittests/utils/utils-unittest.cc
index 614880f2e7..98771da9c9 100644
--- a/deps/v8/test/unittests/utils-unittest.cc
+++ b/deps/v8/test/unittests/utils/utils-unittest.cc
@@ -4,7 +4,7 @@
#include <limits>
-#include "src/utils.h"
+#include "src/utils/utils.h"
#include "testing/gtest-support.h"
namespace v8 {
@@ -13,16 +13,16 @@ namespace internal {
template <typename T>
class UtilsTest : public ::testing::Test {};
-typedef ::testing::Types<signed char, unsigned char,
- short, // NOLINT(runtime/int)
- unsigned short, // NOLINT(runtime/int)
- int, unsigned int, long, // NOLINT(runtime/int)
- unsigned long, // NOLINT(runtime/int)
- long long, // NOLINT(runtime/int)
- unsigned long long, // NOLINT(runtime/int)
- int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
- int64_t, uint64_t>
- IntegerTypes;
+using IntegerTypes =
+ ::testing::Types<signed char, unsigned char,
+ short, // NOLINT(runtime/int)
+ unsigned short, // NOLINT(runtime/int)
+ int, unsigned int, long, // NOLINT(runtime/int)
+ unsigned long, // NOLINT(runtime/int)
+ long long, // NOLINT(runtime/int)
+ unsigned long long, // NOLINT(runtime/int)
+ int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t,
+ int64_t, uint64_t>;
TYPED_TEST_SUITE(UtilsTest, IntegerTypes);
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
index 2b1a034179..938956f07d 100644
--- a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -5,7 +5,7 @@
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/wasm/wasm-interpreter.h"
#include "test/common/wasm/wasm-macro-gen.h"
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index 1d9d0ea15c..c2c0c87aa6 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -5,7 +5,7 @@
#include "test/unittests/test-utils.h"
#include "src/base/overflowing-math.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/decoder.h"
#include "test/common/wasm/wasm-macro-gen.h"
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index ab38ffdb7c..aaf6215a8a 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -4,10 +4,10 @@
#include "test/unittests/test-utils.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/ostreams.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
+#include "src/utils/ostreams.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/local-decl-encoder.h"
@@ -84,7 +84,7 @@ class FunctionBodyDecoderTest : public TestWithZone {
local_decls.Emit(buffer);
// Emit the code.
if (code.size() > 0) {
- memcpy(buffer + locals_size, code.start(), code.size());
+ memcpy(buffer + locals_size, code.begin(), code.size());
}
if (append_end == kAppendEnd) {
// Append an extra end opcode.
@@ -116,7 +116,7 @@ class FunctionBodyDecoderTest : public TestWithZone {
PrepareBytecode(CodeToVector(std::forward<Code>(raw_code)), append_end);
// Validate the code.
- FunctionBody body(sig, 0, code.start(), code.end());
+ FunctionBody body(sig, 0, code.begin(), code.end());
WasmFeatures unused_detected_features;
DecodeResult result =
VerifyWasmCode(zone()->allocator(), enabled_features_, module,
@@ -304,6 +304,16 @@ TEST_F(FunctionBodyDecoderTest, RefNull) {
ExpectValidates(sigs.r_v(), {kExprRefNull});
}
+TEST_F(FunctionBodyDecoderTest, RefFunc) {
+ WASM_FEATURE_SCOPE(anyref);
+ TestModuleBuilder builder;
+ module = builder.module();
+
+ builder.AddFunction(sigs.v_ii());
+ builder.AddFunction(sigs.ii_v());
+ ExpectValidates(sigs.a_v(), {kExprRefFunc, 1});
+}
+
TEST_F(FunctionBodyDecoderTest, EmptyFunction) {
ExpectValidates(sigs.v_v(), {});
ExpectFailure(sigs.i_i(), {});
@@ -984,6 +994,7 @@ TEST_F(FunctionBodyDecoderTest, ReturnVoid3) {
ExpectFailure(sigs.v_v(), {kExprF32Const, 0, 0, 0, 0});
ExpectFailure(sigs.v_v(), {kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0});
ExpectFailure(sigs.v_v(), {kExprRefNull});
+ ExpectFailure(sigs.v_v(), {kExprRefFunc, 0});
ExpectFailure(sigs.v_i(), {kExprGetLocal, 0});
}
@@ -2656,6 +2667,14 @@ TEST_F(FunctionBodyDecoderTest, Select) {
{WASM_SELECT(WASM_I64V_1(0), WASM_I64V_1(0), WASM_ZERO)});
}
+TEST_F(FunctionBodyDecoderTest, Select_needs_value_type) {
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectFailure(sigs.r_r(),
+ {WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_ZERO)});
+ ExpectFailure(sigs.a_a(),
+ {WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_ZERO)});
+}
+
TEST_F(FunctionBodyDecoderTest, Select_fail1) {
ExpectFailure(sigs.i_i(), {WASM_SELECT(WASM_F32(0.0), WASM_GET_LOCAL(0),
WASM_GET_LOCAL(0))});
@@ -2669,6 +2688,8 @@ TEST_F(FunctionBodyDecoderTest, Select_fail2) {
for (size_t i = 0; i < arraysize(kValueTypes); i++) {
ValueType type = kValueTypes[i];
if (type == kWasmI32) continue;
+ // Select without specified type is only allowed for number types.
+ if (type == kWasmAnyRef) continue;
ValueType types[] = {type, kWasmI32, type};
FunctionSig sig(1, 2, types);
@@ -2698,6 +2719,34 @@ TEST_F(FunctionBodyDecoderTest, Select_TypeCheck) {
WASM_I64V_1(0))});
}
+TEST_F(FunctionBodyDecoderTest, SelectWithType) {
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectValidates(sigs.i_i(), {WASM_SELECT_I(WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0), WASM_ZERO)});
+ ExpectValidates(sigs.f_ff(),
+ {WASM_SELECT_F(WASM_F32(0.0), WASM_F32(0.0), WASM_ZERO)});
+ ExpectValidates(sigs.d_dd(),
+ {WASM_SELECT_D(WASM_F64(0.0), WASM_F64(0.0), WASM_ZERO)});
+ ExpectValidates(sigs.l_l(),
+ {WASM_SELECT_L(WASM_I64V_1(0), WASM_I64V_1(0), WASM_ZERO)});
+ ExpectValidates(sigs.r_r(),
+ {WASM_SELECT_R(WASM_REF_NULL, WASM_REF_NULL, WASM_ZERO)});
+ ExpectValidates(sigs.a_a(),
+ {WASM_SELECT_A(WASM_REF_NULL, WASM_REF_NULL, WASM_ZERO)});
+}
+
+TEST_F(FunctionBodyDecoderTest, SelectWithType_fail) {
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectFailure(sigs.i_i(), {WASM_SELECT_F(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0),
+ WASM_ZERO)});
+ ExpectFailure(sigs.f_ff(),
+ {WASM_SELECT_D(WASM_F32(0.0), WASM_F32(0.0), WASM_ZERO)});
+ ExpectFailure(sigs.d_dd(),
+ {WASM_SELECT_L(WASM_F64(0.0), WASM_F64(0.0), WASM_ZERO)});
+ ExpectFailure(sigs.l_l(),
+ {WASM_SELECT_I(WASM_I64V_1(0), WASM_I64V_1(0), WASM_ZERO)});
+}
+
TEST_F(FunctionBodyDecoderTest, Throw) {
WASM_FEATURE_SCOPE(eh);
TestModuleBuilder builder;
@@ -3169,16 +3218,87 @@ TEST_F(FunctionBodyDecoderTest, TableCopy) {
{WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
-TEST_F(FunctionBodyDecoderTest, BulkTableOpsWithoutTable) {
+TEST_F(FunctionBodyDecoderTest, TableGrow) {
TestModuleBuilder builder;
- builder.InitializeTable();
- builder.AddPassiveElementSegment();
+ byte tab_func = builder.AddTable(kWasmAnyFunc, 10, true, 20);
+ byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
- WASM_FEATURE_SCOPE(bulk_memory);
- ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
- ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ module = builder.module();
+
+ ExpectFailure(sigs.i_a(),
+ {WASM_TABLE_GROW(tab_func, WASM_REF_NULL, WASM_ONE)});
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectValidates(sigs.i_a(),
+ {WASM_TABLE_GROW(tab_func, WASM_REF_NULL, WASM_ONE)});
+ ExpectValidates(sigs.i_r(),
+ {WASM_TABLE_GROW(tab_ref, WASM_REF_NULL, WASM_ONE)});
+ // Anyfunc table cannot be initialized with an anyref value.
+ ExpectFailure(sigs.i_r(),
+ {WASM_TABLE_GROW(tab_func, WASM_GET_LOCAL(0), WASM_ONE)});
+ // Anyref table can be initialized with an anyfunc value.
+ ExpectValidates(sigs.i_a(),
+ {WASM_TABLE_GROW(tab_ref, WASM_GET_LOCAL(0), WASM_ONE)});
+ // Check that the table index gets verified.
+ ExpectFailure(sigs.i_r(),
+ {WASM_TABLE_GROW(tab_ref + 2, WASM_REF_NULL, WASM_ONE)});
+}
+
+TEST_F(FunctionBodyDecoderTest, TableSize) {
+ TestModuleBuilder builder;
+ int tab = builder.AddTable(kWasmAnyFunc, 10, true, 20);
+
+ module = builder.module();
+
+ ExpectFailure(sigs.i_v(), {WASM_TABLE_SIZE(tab)});
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectValidates(sigs.i_v(), {WASM_TABLE_SIZE(tab)});
+ ExpectFailure(sigs.i_v(), {WASM_TABLE_SIZE(tab + 2)});
+}
+
+TEST_F(FunctionBodyDecoderTest, TableFill) {
+ TestModuleBuilder builder;
+ byte tab_func = builder.AddTable(kWasmAnyFunc, 10, true, 20);
+ byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
+
+ module = builder.module();
+
+ ExpectFailure(sigs.v_a(),
+ {WASM_TABLE_FILL(tab_func, WASM_ONE, WASM_REF_NULL, WASM_ONE)});
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectValidates(sigs.v_a(), {WASM_TABLE_FILL(tab_func, WASM_ONE,
+ WASM_REF_NULL, WASM_ONE)});
+ ExpectValidates(sigs.v_r(), {WASM_TABLE_FILL(tab_ref, WASM_ONE, WASM_REF_NULL,
+ WASM_ONE)});
+ // Anyfunc table cannot be initialized with an anyref value.
+ ExpectFailure(sigs.v_r(), {WASM_TABLE_FILL(tab_func, WASM_ONE,
+ WASM_GET_LOCAL(0), WASM_ONE)});
+ // Anyref table can be initialized with an anyfunc value.
+ ExpectValidates(sigs.v_a(), {WASM_TABLE_FILL(tab_ref, WASM_ONE,
+ WASM_GET_LOCAL(0), WASM_ONE)});
+ // Check that the table index gets verified.
+ ExpectFailure(sigs.v_r(), {WASM_TABLE_FILL(tab_ref + 2, WASM_ONE,
+ WASM_REF_NULL, WASM_ONE)});
+}
+
+TEST_F(FunctionBodyDecoderTest, TableOpsWithoutTable) {
+ TestModuleBuilder builder;
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ {
+ WASM_FEATURE_SCOPE(anyref);
+ ExpectFailure(sigs.i_v(), {WASM_TABLE_GROW(0, WASM_REF_NULL, WASM_ONE)});
+ ExpectFailure(sigs.i_v(), {WASM_TABLE_SIZE(0)});
+ ExpectFailure(sigs.i_r(),
+ {WASM_TABLE_FILL(0, WASM_ONE, WASM_REF_NULL, WASM_ONE)});
+ }
+ {
+ WASM_FEATURE_SCOPE(bulk_memory);
+ builder.AddPassiveElementSegment();
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ }
}
class BranchTableIteratorTest : public TestWithZone {
@@ -3326,6 +3446,12 @@ TEST_F(WasmOpcodeLengthTest, VariableLength) {
ExpectLength(4, kExprGetGlobal, U32V_3(44));
ExpectLength(5, kExprGetGlobal, U32V_4(66));
ExpectLength(6, kExprGetGlobal, U32V_5(77));
+
+ ExpectLength(2, kExprRefFunc, U32V_1(1));
+ ExpectLength(3, kExprRefFunc, U32V_2(33));
+ ExpectLength(4, kExprRefFunc, U32V_3(44));
+ ExpectLength(5, kExprRefFunc, U32V_4(66));
+ ExpectLength(6, kExprRefFunc, U32V_5(77));
}
TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
index 704703a3ea..601de59c57 100644
--- a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -4,7 +4,7 @@
#include "test/unittests/test-utils.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/decoder.h"
#include "src/wasm/leb-helper.h"
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index 20f3d2bf3b..5f56da3a23 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -4,10 +4,10 @@
#include "test/unittests/test-utils.h"
-#include "src/bit-vector.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/objects.h"
+#include "src/utils/bit-vector.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 6fd4902f78..d63819ba70 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -4,8 +4,8 @@
#include "test/unittests/test-utils.h"
-#include "src/handles.h"
-#include "src/objects-inl.h"
+#include "src/handles/handles.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
@@ -29,7 +29,8 @@ namespace module_decoder_unittest {
#define WASM_INIT_EXPR_F32(val) WASM_F32(val), kExprEnd
#define WASM_INIT_EXPR_I64(val) WASM_I64(val), kExprEnd
#define WASM_INIT_EXPR_F64(val) WASM_F64(val), kExprEnd
-#define WASM_INIT_EXPR_ANYREF WASM_REF_NULL, kExprEnd
+#define WASM_INIT_EXPR_REF_NULL WASM_REF_NULL, kExprEnd
+#define WASM_INIT_EXPR_REF_FUNC(val) WASM_REF_FUNC(val), kExprEnd
#define WASM_INIT_EXPR_GLOBAL(index) WASM_GET_GLOBAL(index), kExprEnd
#define REF_NULL_ELEMENT kExprRefNull, kExprEnd
@@ -270,26 +271,75 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
- SECTION(Global, // --
- ENTRY_COUNT(1), // --
- kLocalAnyRef, // local type
- 0, // immutable
- WASM_INIT_EXPR_ANYREF) // init
- };
+ // sig#0 ---------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs ---------------------------------------------------------------
+ TWO_EMPTY_FUNCTIONS(SIG_INDEX(0)),
+ SECTION(Global, // --
+ ENTRY_COUNT(2), // --
+ kLocalAnyRef, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_REF_NULL, // init
+ kLocalAnyRef, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_REF_FUNC(1)), // init
+ TWO_EMPTY_BODIES};
{
- // Should decode to exactly one global.
+ // Should decode to two globals.
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1u, result.value()->globals.size());
- EXPECT_EQ(0u, result.value()->functions.size());
+ EXPECT_EQ(2u, result.value()->globals.size());
+ EXPECT_EQ(2u, result.value()->functions.size());
EXPECT_EQ(0u, result.value()->data_segments.size());
- const WasmGlobal* global = &result.value()->globals.back();
+ const WasmGlobal* global = &result.value()->globals[0];
+ EXPECT_EQ(kWasmAnyRef, global->type);
+ EXPECT_FALSE(global->mutability);
+ EXPECT_EQ(WasmInitExpr::kRefNullConst, global->init.kind);
+ global = &result.value()->globals[1];
EXPECT_EQ(kWasmAnyRef, global->type);
EXPECT_FALSE(global->mutability);
+ EXPECT_EQ(WasmInitExpr::kRefFuncConst, global->init.kind);
+ EXPECT_EQ(uint32_t{1}, global->init.val.function_index);
+ }
+}
+
+TEST_F(WasmModuleVerifyTest, AnyFuncGlobal) {
+ WASM_FEATURE_SCOPE(anyref);
+ static const byte data[] = {
+ // sig#0 ---------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs ---------------------------------------------------------------
+ TWO_EMPTY_FUNCTIONS(SIG_INDEX(0)),
+ SECTION(Global, // --
+ ENTRY_COUNT(2), // --
+ kLocalAnyFunc, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_REF_NULL, // init
+ kLocalAnyFunc, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_REF_FUNC(1)), // init
+ TWO_EMPTY_BODIES};
+ {
+ // Should decode to two globals.
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ EXPECT_EQ(2u, result.value()->globals.size());
+ EXPECT_EQ(2u, result.value()->functions.size());
+ EXPECT_EQ(0u, result.value()->data_segments.size());
+
+ const WasmGlobal* global = &result.value()->globals[0];
+ EXPECT_EQ(kWasmAnyFunc, global->type);
+ EXPECT_FALSE(global->mutability);
EXPECT_EQ(WasmInitExpr::kRefNullConst, global->init.kind);
+
+ global = &result.value()->globals[1];
+ EXPECT_EQ(kWasmAnyFunc, global->type);
+ EXPECT_FALSE(global->mutability);
+ EXPECT_EQ(WasmInitExpr::kRefFuncConst, global->init.kind);
+ EXPECT_EQ(uint32_t{1}, global->init.val.function_index);
}
}
@@ -1175,9 +1225,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
EXPECT_VERIFIES(data);
}
-TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefTable) {
- // Test that tables of type 'AnyRef' cannot be initialized by the element
- // section.
+TEST_F(WasmModuleVerifyTest, ElementSectionInitAnyRefTableWithAnyFunc) {
WASM_FEATURE_SCOPE(anyref);
WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
@@ -1201,9 +1249,11 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefTable) {
2, // elements count
FUNC_INDEX(0), // entry 0
FUNC_INDEX(0)), // entry 1
+ // code ----------------------------------------------------------------
+ ONE_EMPTY_BODY,
};
- EXPECT_FAILURE(data);
+ EXPECT_VERIFIES(data);
}
TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefImportedTable) {
@@ -2433,7 +2483,8 @@ TEST_F(WasmModuleVerifyTest, DataCountSegmentCount_omitted) {
#undef WASM_INIT_EXPR_F32
#undef WASM_INIT_EXPR_I64
#undef WASM_INIT_EXPR_F64
-#undef WASM_INIT_EXPR_ANYREF
+#undef WASM_INIT_EXPR_REF_NULL
+#undef WASM_INIT_EXPR_REF_FUNC
#undef WASM_INIT_EXPR_GLOBAL
#undef REF_NULL_ELEMENT
#undef REF_FUNC_ELEMENT
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
index 78a9ab3a36..5166b13628 100644
--- a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -4,7 +4,7 @@
#include "test/unittests/test-utils.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/streaming-decoder.h"
diff --git a/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
index 58302bad74..006f1344ba 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
@@ -5,9 +5,9 @@
#include <windows.h>
#include "include/v8.h"
-#include "src/allocation.h"
#include "src/base/page-allocator.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/utils/allocation.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
diff --git a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
index 70d4badc62..1659370999 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-x64-unittest.cc
@@ -21,13 +21,13 @@
#elif V8_OS_WIN
#include "include/v8-wasm-trap-handler-win.h"
#endif
-#include "src/allocation.h"
-#include "src/assembler-inl.h"
#include "src/base/page-allocator.h"
-#include "src/macro-assembler-inl.h"
-#include "src/simulator.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/execution/simulator.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/vector.h"
+#include "src/utils/allocation.h"
+#include "src/utils/vector.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
diff --git a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
index 9c7ed8a702..eea1f8208d 100644
--- a/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-code-manager-unittest.cc
@@ -158,11 +158,17 @@ class WasmCodeManagerTest : public TestWithContext,
static constexpr uint32_t kNumFunctions = 10;
static constexpr uint32_t kJumpTableSize = RoundUp<kCodeAlignment>(
JumpTableAssembler::SizeForNumberOfSlots(kNumFunctions));
- static size_t page_size;
+ static size_t allocate_page_size;
+ static size_t commit_page_size;
WasmCodeManagerTest() {
- if (page_size == 0) page_size = AllocatePageSize();
- DCHECK_NE(0, page_size);
+ CHECK_EQ(allocate_page_size == 0, commit_page_size == 0);
+ if (allocate_page_size == 0) {
+ allocate_page_size = AllocatePageSize();
+ commit_page_size = CommitPageSize();
+ }
+ CHECK_NE(0, allocate_page_size);
+ CHECK_NE(0, commit_page_size);
}
using NativeModulePtr = std::shared_ptr<NativeModule>;
@@ -193,10 +199,17 @@ class WasmCodeManagerTest : public TestWithContext,
void SetMaxCommittedMemory(size_t limit) {
manager()->SetMaxCommittedMemoryForTesting(limit);
}
+
+ void DisableWin64UnwindInfoForTesting() {
+#if defined(V8_OS_WIN_X64)
+ manager()->DisableWin64UnwindInfoForTesting();
+#endif
+ }
};
// static
-size_t WasmCodeManagerTest::page_size = 0;
+size_t WasmCodeManagerTest::allocate_page_size = 0;
+size_t WasmCodeManagerTest::commit_page_size = 0;
INSTANTIATE_TEST_SUITE_P(Parameterized, WasmCodeManagerTest,
::testing::Values(Fixed, Growable),
@@ -206,93 +219,107 @@ TEST_P(WasmCodeManagerTest, EmptyCase) {
SetMaxCommittedMemory(0);
CHECK_EQ(0, manager()->committed_code_space());
- ASSERT_DEATH_IF_SUPPORTED(AllocModule(page_size, GetParam()),
- "OOM in NativeModule::AllocateForCode commit");
+ ASSERT_DEATH_IF_SUPPORTED(AllocModule(allocate_page_size, GetParam()),
+ "OOM in wasm code commit");
}
TEST_P(WasmCodeManagerTest, AllocateAndGoOverLimit) {
- SetMaxCommittedMemory(page_size);
+ SetMaxCommittedMemory(allocate_page_size);
+ DisableWin64UnwindInfoForTesting();
+
CHECK_EQ(0, manager()->committed_code_space());
- NativeModulePtr native_module = AllocModule(page_size, GetParam());
+ NativeModulePtr native_module = AllocModule(allocate_page_size, GetParam());
CHECK(native_module);
- CHECK_EQ(page_size, manager()->committed_code_space());
+ CHECK_EQ(commit_page_size, manager()->committed_code_space());
WasmCodeRefScope code_ref_scope;
uint32_t index = 0;
WasmCode* code = AddCode(native_module.get(), index++, 1 * kCodeAlignment);
CHECK_NOT_NULL(code);
- CHECK_EQ(page_size, manager()->committed_code_space());
+ CHECK_EQ(commit_page_size, manager()->committed_code_space());
code = AddCode(native_module.get(), index++, 3 * kCodeAlignment);
CHECK_NOT_NULL(code);
- CHECK_EQ(page_size, manager()->committed_code_space());
+ CHECK_EQ(commit_page_size, manager()->committed_code_space());
code = AddCode(native_module.get(), index++,
- page_size - 4 * kCodeAlignment - kJumpTableSize);
+ allocate_page_size - 4 * kCodeAlignment - kJumpTableSize);
CHECK_NOT_NULL(code);
- CHECK_EQ(page_size, manager()->committed_code_space());
+ CHECK_EQ(allocate_page_size, manager()->committed_code_space());
// This fails in "reservation" if we cannot extend the code space, or in
// "commit" it we can (since we hit the allocation limit in the
// WasmCodeManager). Hence don't check for that part of the OOM message.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(native_module.get(), index++, 1 * kCodeAlignment),
- "OOM in NativeModule::AllocateForCode");
+ "OOM in wasm code");
}
TEST_P(WasmCodeManagerTest, TotalLimitIrrespectiveOfModuleCount) {
- SetMaxCommittedMemory(3 * page_size);
- NativeModulePtr nm1 = AllocModule(2 * page_size, GetParam());
- NativeModulePtr nm2 = AllocModule(2 * page_size, GetParam());
+ SetMaxCommittedMemory(3 * allocate_page_size);
+ DisableWin64UnwindInfoForTesting();
+
+ NativeModulePtr nm1 = AllocModule(2 * allocate_page_size, GetParam());
+ NativeModulePtr nm2 = AllocModule(2 * allocate_page_size, GetParam());
CHECK(nm1);
CHECK(nm2);
WasmCodeRefScope code_ref_scope;
- WasmCode* code = AddCode(nm1.get(), 0, 2 * page_size - kJumpTableSize);
+ WasmCode* code =
+ AddCode(nm1.get(), 0, 2 * allocate_page_size - kJumpTableSize);
CHECK_NOT_NULL(code);
ASSERT_DEATH_IF_SUPPORTED(
- AddCode(nm2.get(), 0, 2 * page_size - kJumpTableSize),
- "OOM in NativeModule::AllocateForCode commit");
+ AddCode(nm2.get(), 0, 2 * allocate_page_size - kJumpTableSize),
+ "OOM in wasm code commit");
}
TEST_P(WasmCodeManagerTest, GrowingVsFixedModule) {
- SetMaxCommittedMemory(3 * page_size);
- NativeModulePtr nm = AllocModule(page_size, GetParam());
- size_t module_size = GetParam() == Fixed ? kMaxWasmCodeMemory : page_size;
+ SetMaxCommittedMemory(3 * allocate_page_size);
+ DisableWin64UnwindInfoForTesting();
+
+ NativeModulePtr nm = AllocModule(allocate_page_size, GetParam());
+ size_t module_size =
+ GetParam() == Fixed ? kMaxWasmCodeMemory : allocate_page_size;
size_t remaining_space_in_module = module_size - kJumpTableSize;
if (GetParam() == Fixed) {
// Requesting more than the remaining space fails because the module cannot
// grow.
ASSERT_DEATH_IF_SUPPORTED(
AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment),
- "OOM in NativeModule::AllocateForCode");
+ "OOM in wasm code reservation");
} else {
// The module grows by one page. One page remains uncommitted.
WasmCodeRefScope code_ref_scope;
CHECK_NOT_NULL(
AddCode(nm.get(), 0, remaining_space_in_module + kCodeAlignment));
- CHECK_EQ(2 * page_size, manager()->committed_code_space());
+ CHECK_EQ(commit_page_size + allocate_page_size,
+ manager()->committed_code_space());
}
}
TEST_P(WasmCodeManagerTest, CommitIncrements) {
- SetMaxCommittedMemory(10 * page_size);
- NativeModulePtr nm = AllocModule(3 * page_size, GetParam());
+ SetMaxCommittedMemory(10 * allocate_page_size);
+ DisableWin64UnwindInfoForTesting();
+
+ NativeModulePtr nm = AllocModule(3 * allocate_page_size, GetParam());
WasmCodeRefScope code_ref_scope;
WasmCode* code = AddCode(nm.get(), 0, kCodeAlignment);
CHECK_NOT_NULL(code);
- CHECK_EQ(page_size, manager()->committed_code_space());
- code = AddCode(nm.get(), 1, 2 * page_size);
+ CHECK_EQ(commit_page_size, manager()->committed_code_space());
+ code = AddCode(nm.get(), 1, 2 * allocate_page_size);
CHECK_NOT_NULL(code);
- CHECK_EQ(3 * page_size, manager()->committed_code_space());
- code = AddCode(nm.get(), 2, page_size - kCodeAlignment - kJumpTableSize);
+ CHECK_EQ(commit_page_size + 2 * allocate_page_size,
+ manager()->committed_code_space());
+ code = AddCode(nm.get(), 2,
+ allocate_page_size - kCodeAlignment - kJumpTableSize);
CHECK_NOT_NULL(code);
- CHECK_EQ(3 * page_size, manager()->committed_code_space());
+ CHECK_EQ(3 * allocate_page_size, manager()->committed_code_space());
}
TEST_P(WasmCodeManagerTest, Lookup) {
- SetMaxCommittedMemory(2 * page_size);
+ SetMaxCommittedMemory(2 * allocate_page_size);
+ DisableWin64UnwindInfoForTesting();
- NativeModulePtr nm1 = AllocModule(page_size, GetParam());
- NativeModulePtr nm2 = AllocModule(page_size, GetParam());
+ NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
+ NativeModulePtr nm2 = AllocModule(allocate_page_size, GetParam());
Address mid_code1_1;
{
// The {WasmCodeRefScope} needs to die before {nm1} dies.
@@ -334,9 +361,10 @@ TEST_P(WasmCodeManagerTest, Lookup) {
}
TEST_P(WasmCodeManagerTest, LookupWorksAfterRewrite) {
- SetMaxCommittedMemory(2 * page_size);
+ SetMaxCommittedMemory(2 * allocate_page_size);
+ DisableWin64UnwindInfoForTesting();
- NativeModulePtr nm1 = AllocModule(page_size, GetParam());
+ NativeModulePtr nm1 = AllocModule(allocate_page_size, GetParam());
WasmCodeRefScope code_ref_scope;
WasmCode* code0 = AddCode(nm1.get(), 0, kCodeAlignment);
diff --git a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
index 3cca4bc55c..4b9f78dfdc 100644
--- a/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-compiler-unittest.cc
@@ -4,10 +4,10 @@
#include "test/unittests/test-utils.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/signature.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/machine-type.h"
-#include "src/signature.h"
#include "src/wasm/value-type.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc b/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
index 807fc40959..2bfe2b6df5 100644
--- a/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-module-builder-unittest.cc
@@ -4,9 +4,9 @@
#include "test/unittests/test-utils.h"
-#include "src/v8.h"
+#include "src/init/v8.h"
-#include "src/objects-inl.h"
+#include "src/objects/objects-inl.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module-builder.h"
diff --git a/deps/v8/test/wasm-api-tests/BUILD.gn b/deps/v8/test/wasm-api-tests/BUILD.gn
new file mode 100644
index 0000000000..c411cb0eb2
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/BUILD.gn
@@ -0,0 +1,35 @@
+# Copyright 2019 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+v8_executable("wasm_api_tests") {
+ testonly = true
+
+ deps = [
+ "../..:v8_maybe_icu",
+ "../..:wee8",
+ "//build/win:default_exe_manifest",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+
+ data_deps = [
+ "../../tools:v8_testrunner",
+ ]
+
+ data = [
+ "testcfg.py",
+ "wasm-api-tests.status",
+ ]
+
+ configs = [ "../..:internal_config_base" ]
+
+ sources = [
+ "../../testing/gmock-support.h",
+ "../../testing/gtest-support.h",
+ "callbacks.cc",
+ "run-all-wasm-api-tests.cc",
+ ]
+}
diff --git a/deps/v8/test/wasm-api-tests/DEPS b/deps/v8/test/wasm-api-tests/DEPS
new file mode 100644
index 0000000000..0804c0e608
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/DEPS
@@ -0,0 +1,5 @@
+include_rules = [
+ "+src",
+ "+testing",
+ "+third_party/wasm-api"
+]
diff --git a/deps/v8/test/wasm-api-tests/OWNERS b/deps/v8/test/wasm-api-tests/OWNERS
new file mode 100644
index 0000000000..852d438bb0
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/OWNERS
@@ -0,0 +1 @@
+file://COMMON_OWNERS
diff --git a/deps/v8/test/wasm-api-tests/callbacks.cc b/deps/v8/test/wasm-api-tests/callbacks.cc
new file mode 100644
index 0000000000..1c91d9ca54
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/callbacks.cc
@@ -0,0 +1,195 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/isolate.h"
+#include "src/heap/heap.h"
+#include "src/wasm/c-api.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone/accounting-allocator.h"
+#include "src/zone/zone.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/wasm-api/wasm.hh"
+
+namespace wasm {
+
+// TODO(jkummerow): Drop these from the API.
+#ifdef DEBUG
+template <class T>
+void vec<T>::make_data() {}
+
+template <class T>
+void vec<T>::free_data() {}
+#endif
+
+} // namespace wasm
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+
+using ::wasm::Engine;
+using ::wasm::Extern;
+using ::wasm::Func;
+using ::wasm::FuncType;
+using ::wasm::Instance;
+using ::wasm::Module;
+using ::wasm::own;
+using ::wasm::Store;
+using ::wasm::Trap;
+using ::wasm::Val;
+using ::wasm::ValType;
+using ::wasm::vec;
+
+own<Trap*> Stage2(void* env, const Val args[], Val results[]);
+
+class WasmCapiTest : public ::testing::Test {
+ public:
+ WasmCapiTest()
+ : Test(),
+ zone_(&allocator_, ZONE_NAME),
+ builder_(&zone_),
+ exports_(vec<Extern*>::make()),
+ wasm_sig_(1, 1, wasm_sig_types_) {
+ engine_ = Engine::make();
+ store_ = Store::make(engine_.get());
+
+ // Build the following function:
+ // int32 stage1(int32 arg0) { return stage2(arg0); }
+ uint32_t stage2_index =
+ builder_.AddImport(ArrayVector("stage2"), wasm_sig());
+ byte code[] = {WASM_CALL_FUNCTION(stage2_index, WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("stage1"), code, sizeof(code));
+
+ cpp_sig_ = FuncType::make(vec<ValType*>::make(ValType::make(::wasm::I32)),
+ vec<ValType*>::make(ValType::make(::wasm::I32)));
+ stage2_ = Func::make(store(), cpp_sig_.get(), Stage2, this);
+ }
+
+ void Compile() {
+ ZoneBuffer buffer(&zone_);
+ builder_.WriteTo(buffer);
+ size_t size = buffer.end() - buffer.begin();
+ vec<byte_t> binary = vec<byte_t>::make(
+ size, reinterpret_cast<byte_t*>(const_cast<byte*>(buffer.begin())));
+
+ module_ = Module::make(store_.get(), binary);
+ DCHECK_NE(module_.get(), nullptr);
+ }
+
+ own<Trap*> Run(Extern* imports[], Val args[], Val results[]) {
+ instance_ = Instance::make(store_.get(), module_.get(), imports);
+ DCHECK_NE(instance_.get(), nullptr);
+ exports_ = instance_->exports();
+ Func* entry = GetExportedFunction(0);
+ return entry->call(args, results);
+ }
+
+ void AddExportedFunction(Vector<const char> name, byte code[],
+ size_t code_size) {
+ WasmFunctionBuilder* fun = builder()->AddFunction(wasm_sig());
+ fun->EmitCode(code, static_cast<uint32_t>(code_size));
+ fun->Emit(kExprEnd);
+ builder()->AddExport(name, fun);
+ }
+
+ Func* GetExportedFunction(size_t index) {
+ DCHECK_GT(exports_.size(), index);
+ Extern* exported = exports_[index];
+ DCHECK_EQ(exported->kind(), ::wasm::EXTERN_FUNC);
+ Func* func = exported->func();
+ DCHECK_NE(func, nullptr);
+ return func;
+ }
+
+ WasmModuleBuilder* builder() { return &builder_; }
+ Store* store() { return store_.get(); }
+ Func* stage2() { return stage2_.get(); }
+
+ FunctionSig* wasm_sig() { return &wasm_sig_; }
+ FuncType* cpp_sig() { return cpp_sig_.get(); }
+
+ private:
+ AccountingAllocator allocator_;
+ Zone zone_;
+ WasmModuleBuilder builder_;
+ own<Engine*> engine_;
+ own<Store*> store_;
+ own<Module*> module_;
+ own<Instance*> instance_;
+ vec<Extern*> exports_;
+ own<Func*> stage2_;
+ own<FuncType*> cpp_sig_;
+ ValueType wasm_sig_types_[2] = {kWasmI32, kWasmI32};
+ FunctionSig wasm_sig_;
+};
+
+own<Trap*> Stage2(void* env, const Val args[], Val results[]) {
+ printf("Stage2...\n");
+ WasmCapiTest* self = reinterpret_cast<WasmCapiTest*>(env);
+ Func* stage3 = self->GetExportedFunction(1);
+ own<Trap*> result = stage3->call(args, results);
+ if (result) {
+ printf("Stage2: got exception: %s\n", result->message().get());
+ } else {
+ printf("Stage2: call successful\n");
+ }
+ return result;
+}
+
+own<Trap*> Stage4_GC(void* env, const Val args[], Val results[]) {
+ printf("Stage4...\n");
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(env);
+ isolate->heap()->PreciseCollectAllGarbage(
+ i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
+ v8::kGCCallbackFlagForced);
+ results[0] = Val::i32(args[0].i32() + 1);
+ return nullptr;
+}
+
+} // namespace
+
+TEST_F(WasmCapiTest, Trap) {
+ // Build the following function:
+ // int32 stage3_trap(int32 arg0) { unreachable(); }
+ byte code[] = {WASM_UNREACHABLE};
+ AddExportedFunction(CStrVector("stage3_trap"), code, sizeof(code));
+ Compile();
+
+ Extern* imports[] = {stage2()};
+ Val args[] = {Val::i32(42)};
+ Val results[1];
+ own<Trap*> result = Run(imports, args, results);
+ EXPECT_NE(result, nullptr);
+ printf("Stage0: Got trap as expected: %s\n", result->message().get());
+}
+
+TEST_F(WasmCapiTest, GC) {
+ // Build the following function:
+ // int32 stage3_to4(int32 arg0) { return stage4(arg0); }
+ uint32_t stage4_index =
+ builder()->AddImport(ArrayVector("stage4"), wasm_sig());
+ byte code[] = {WASM_CALL_FUNCTION(stage4_index, WASM_GET_LOCAL(0))};
+ AddExportedFunction(CStrVector("stage3_to4"), code, sizeof(code));
+ Compile();
+
+ i::Isolate* isolate =
+ reinterpret_cast<::wasm::StoreImpl*>(store())->i_isolate();
+ own<Func*> stage4 = Func::make(store(), cpp_sig(), Stage4_GC, isolate);
+ EXPECT_EQ(cpp_sig()->params().size(), stage4->type()->params().size());
+ EXPECT_EQ(cpp_sig()->results().size(), stage4->type()->results().size());
+ Extern* imports[] = {stage2(), stage4.get()};
+ Val args[] = {Val::i32(42)};
+ Val results[1];
+ own<Trap*> result = Run(imports, args, results);
+ EXPECT_EQ(result, nullptr);
+ EXPECT_EQ(43, results[0].i32());
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/wasm-api-tests/run-all-wasm-api-tests.cc b/deps/v8/test/wasm-api-tests/run-all-wasm-api-tests.cc
new file mode 100644
index 0000000000..1b1c94923a
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/run-all-wasm-api-tests.cc
@@ -0,0 +1,17 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+int main(int argc, char** argv) {
+ // Don't catch SEH exceptions and continue as the following tests might hang
+ // in an broken environment on windows.
+ testing::GTEST_FLAG(catch_exceptions) = false;
+ testing::InitGoogleMock(&argc, argv);
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::V8::InitializeExternalStartupData(argv[0]);
+ return RUN_ALL_TESTS();
+}
diff --git a/deps/v8/test/wasm-api-tests/testcfg.py b/deps/v8/test/wasm-api-tests/testcfg.py
new file mode 100644
index 0000000000..12c6d6d6c3
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/testcfg.py
@@ -0,0 +1,85 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+
+from testrunner.local import command
+from testrunner.local import utils
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+
+class VariantsGenerator(testsuite.VariantsGenerator):
+ def _get_variants(self, test):
+ return self._standard_variant
+
+
+class TestLoader(testsuite.TestLoader):
+ def _list_test_filenames(self):
+ shell = os.path.abspath(
+ os.path.join(self.test_config.shell_dir, "wasm_api_tests"))
+ if utils.IsWindows():
+ shell += ".exe"
+
+ output = None
+ for i in range(3): # Try 3 times in case of errors.
+ cmd = command.Command(
+ cmd_prefix=self.test_config.command_prefix,
+ shell=shell,
+ args=['--gtest_list_tests'] + self.test_config.extra_flags)
+ output = cmd.execute()
+ if output.exit_code == 0:
+ break
+
+ print("Test executable failed to list the tests (try %d).\n\nCmd:" % i)
+ print(cmd)
+ print("\nStdout:")
+ print(output.stdout)
+ print("\nStderr:")
+ print(output.stderr)
+ print("\nExit code: %d" % output.exit_code)
+ else:
+ raise Exception("Test executable failed to list the tests.")
+
+ # TODO create an ExecutableTestLoader for refactoring this similar to
+ # JSTestLoader.
+ test_names = []
+ for line in output.stdout.splitlines():
+ test_desc = line.strip().split()[0]
+ if test_desc.endswith('.'):
+ test_case = test_desc
+ elif test_case and test_desc:
+ test_names.append(test_case + test_desc)
+
+ return sorted(test_names)
+
+
+class TestSuite(testsuite.TestSuite):
+ def _test_loader_class(self):
+ return TestLoader
+
+ def _test_class(self):
+ return TestCase
+
+ def _variants_gen_class(self):
+ return VariantsGenerator
+
+
+class TestCase(testcase.TestCase):
+ def _get_suite_flags(self):
+ return (
+ ["--gtest_filter=" + self.path] +
+ ["--gtest_random_seed=%s" % self.random_seed] +
+ ["--gtest_print_time=0"]
+ )
+
+ def get_shell(self):
+ return "wasm_api_tests"
+
+
+def GetSuite(*args, **kwargs):
+ return TestSuite(*args, **kwargs)
diff --git a/deps/v8/test/wasm-api-tests/wasm-api-tests.status b/deps/v8/test/wasm-api-tests/wasm-api-tests.status
new file mode 100644
index 0000000000..72aedb7368
--- /dev/null
+++ b/deps/v8/test/wasm-api-tests/wasm-api-tests.status
@@ -0,0 +1,16 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+['lite_mode or variant == jitless', {
+ # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.
+ '*': [SKIP],
+}], # lite_mode or variant == jitless
+
+##############################################################################
+['variant == jitless and not embedded_builtins', {
+ '*': [SKIP],
+}], # variant == jitless and not embedded_builtins
+
+]
diff --git a/deps/v8/test/wasm-spec-tests/OWNERS b/deps/v8/test/wasm-spec-tests/OWNERS
index 88cf9bea30..b347d0ae0c 100644
--- a/deps/v8/test/wasm-spec-tests/OWNERS
+++ b/deps/v8/test/wasm-spec-tests/OWNERS
@@ -1,5 +1,4 @@
ahaas@chromium.org
clemensh@chromium.org
-machenbach@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 0d7cbd316d..f663c38443 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-0f6a6867db9effcfdbbeead8476cfb08dd8ac684 \ No newline at end of file
+e8bdb558198b944ff8a0df43301f1ff4eb3a91fa \ No newline at end of file
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index 82584b56fe..014155e54c 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -36,6 +36,11 @@
# Irregexp interpreter overflows stack. We should just not crash.
'fast/js/regexp-stack-overflow': [PASS, FAIL],
+ # This test tries to sort very large array. Array#sort pre-processing does
+ # not support huge sparse Arrays, so this test runs a very long time.
+ # https://crbug.com/v8/8714
+ 'array-sort-small-sparse-array-with-large-length': [SKIP],
+
# Slow tests.
'dfg-double-vote-fuzz': [PASS, SLOW],
}], # ALWAYS
@@ -109,10 +114,10 @@
}], # arch == arm64 and msan
##############################################################################
-['variant in [nooptimization, stress, stress_background_compile] and (arch == arm or arch == arm64) and simulator_run', {
+['variant in [nooptimization, stress, stress_js_bg_compile_wasm_code_gc] and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783
'dfg-double-vote-fuzz': [SKIP],
-}], # variant in [nooptimization, stress, stress_background_compile] and (arch == arm or arch == arm64) and simulator_run
+}], # variant in [nooptimization, stress, stress_js_bg_compile_wasm_code_gc] and (arch == arm or arch == arm64) and simulator_run
##############################################################################
['gcov_coverage', {
diff --git a/deps/v8/third_party/inspector_protocol/.clang-format b/deps/v8/third_party/inspector_protocol/.clang-format
new file mode 100644
index 0000000000..fcbc9c321a
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/.clang-format
@@ -0,0 +1,36 @@
+# Defines the Chromium style for automatic reformatting.
+# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
+BasedOnStyle: Chromium
+# This defaults to 'Auto'. Explicitly set it for a while, so that
+# 'vector<vector<int> >' in existing files gets formatted to
+# 'vector<vector<int>>'. ('Auto' means that clang-format will only use
+# 'int>>' if the file already contains at least one such instance.)
+Standard: Cpp11
+
+# Make sure code like:
+# IPC_BEGIN_MESSAGE_MAP()
+# IPC_MESSAGE_HANDLER(WidgetHostViewHost_Update, OnUpdate)
+# IPC_END_MESSAGE_MAP()
+# gets correctly indented.
+MacroBlockBegin: "^\
+BEGIN_MSG_MAP|\
+BEGIN_MSG_MAP_EX|\
+BEGIN_SAFE_MSG_MAP_EX|\
+CR_BEGIN_MSG_MAP_EX|\
+IPC_BEGIN_MESSAGE_MAP|\
+IPC_BEGIN_MESSAGE_MAP_WITH_PARAM|\
+IPC_PROTOBUF_MESSAGE_TRAITS_BEGIN|\
+IPC_STRUCT_BEGIN|\
+IPC_STRUCT_BEGIN_WITH_PARENT|\
+IPC_STRUCT_TRAITS_BEGIN|\
+POLPARAMS_BEGIN|\
+PPAPI_BEGIN_MESSAGE_MAP$"
+MacroBlockEnd: "^\
+CR_END_MSG_MAP|\
+END_MSG_MAP|\
+IPC_END_MESSAGE_MAP|\
+IPC_PROTOBUF_MESSAGE_TRAITS_END|\
+IPC_STRUCT_END|\
+IPC_STRUCT_TRAITS_END|\
+POLPARAMS_END|\
+PPAPI_END_MESSAGE_MAP$"
diff --git a/deps/v8/third_party/inspector_protocol/BUILD.gn b/deps/v8/third_party/inspector_protocol/BUILD.gn
new file mode 100644
index 0000000000..974471bf27
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/BUILD.gn
@@ -0,0 +1,34 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+static_library("encoding") {
+ sources = [
+ "encoding/encoding.cc",
+ "encoding/encoding.h",
+ ]
+}
+
+# encoding_test is part of the unittests, defined in
+# test/unittests/BUILD.gn.
+
+import("../../gni/v8.gni")
+
+v8_source_set("encoding_test") {
+ sources = [
+ "encoding/encoding_test.cc",
+ "encoding/encoding_test_helper.h",
+ ]
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+ deps = [
+ ":encoding",
+ "../..:v8_libbase",
+ "../../src/inspector:inspector_string_conversions",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+ testonly = true
+}
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
index 3c795662f4..ade9e8e52c 100644
--- a/deps/v8/third_party/inspector_protocol/README.v8
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: 16b370abe6f4b59efea00377473b5dddb438defb
+Revision: fe0467fd105a9ea90fbb091dc2a7b4cdbf539803
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/deps/v8/third_party/inspector_protocol/code_generator.py b/deps/v8/third_party/inspector_protocol/code_generator.py
index 18777d0ff8..1e12343e05 100755
--- a/deps/v8/third_party/inspector_protocol/code_generator.py
+++ b/deps/v8/third_party/inspector_protocol/code_generator.py
@@ -5,7 +5,7 @@
import os.path
import sys
-import optparse
+import argparse
import collections
import functools
import re
@@ -17,6 +17,13 @@ except ImportError:
import pdl
+try:
+ unicode
+except NameError:
+ # Define unicode for Py3
+ def unicode(s, *_):
+ return s
+
# Path handling for libraries and templates
# Paths have to be normalized because Jinja uses the exact template path to
# determine the hash used in the cache filename, and we need a pre-caching step
@@ -53,28 +60,17 @@ def read_config():
return collections.namedtuple('X', keys)(*values)
try:
- cmdline_parser = optparse.OptionParser()
- cmdline_parser.add_option("--output_base")
- cmdline_parser.add_option("--jinja_dir")
- cmdline_parser.add_option("--config")
- cmdline_parser.add_option("--config_value", action="append", type="string")
- arg_options, _ = cmdline_parser.parse_args()
+ cmdline_parser = argparse.ArgumentParser()
+ cmdline_parser.add_argument("--output_base", type=unicode, required=True)
+ cmdline_parser.add_argument("--jinja_dir", type=unicode, required=True)
+ cmdline_parser.add_argument("--config", type=unicode, required=True)
+ cmdline_parser.add_argument("--config_value", default=[], action="append")
+ arg_options = cmdline_parser.parse_args()
jinja_dir = arg_options.jinja_dir
- if not jinja_dir:
- raise Exception("jinja directory must be specified")
- jinja_dir = jinja_dir.decode('utf8')
output_base = arg_options.output_base
- if not output_base:
- raise Exception("Base output directory must be specified")
- output_base = output_base.decode('utf8')
config_file = arg_options.config
- if not config_file:
- raise Exception("Config file name must be specified")
- config_file = config_file.decode('utf8')
config_base = os.path.dirname(config_file)
config_values = arg_options.config_value
- if not config_values:
- config_values = []
except Exception:
# Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
exc = sys.exc_info()[1]
diff --git a/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py b/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py
index 96048f793d..f98bebcd5e 100755
--- a/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py
+++ b/deps/v8/third_party/inspector_protocol/convert_protocol_to_json.py
@@ -4,10 +4,8 @@
# found in the LICENSE file.
import argparse
-import collections
import json
import os.path
-import re
import sys
import pdl
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding.cc b/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
new file mode 100644
index 0000000000..649cc060f5
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding.cc
@@ -0,0 +1,2190 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "encoding.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cmath>
+#include <cstring>
+#include <limits>
+#include <stack>
+
+namespace v8_inspector_protocol_encoding {
+// =============================================================================
+// Status and Error codes
+// =============================================================================
+
+std::string Status::ToASCIIString() const {
+ switch (error) {
+ case Error::OK:
+ return "OK";
+ case Error::JSON_PARSER_UNPROCESSED_INPUT_REMAINS:
+ return ToASCIIString("JSON: unprocessed input remains");
+ case Error::JSON_PARSER_STACK_LIMIT_EXCEEDED:
+ return ToASCIIString("JSON: stack limit exceeded");
+ case Error::JSON_PARSER_NO_INPUT:
+ return ToASCIIString("JSON: no input");
+ case Error::JSON_PARSER_INVALID_TOKEN:
+ return ToASCIIString("JSON: invalid token");
+ case Error::JSON_PARSER_INVALID_NUMBER:
+ return ToASCIIString("JSON: invalid number");
+ case Error::JSON_PARSER_INVALID_STRING:
+ return ToASCIIString("JSON: invalid string");
+ case Error::JSON_PARSER_UNEXPECTED_ARRAY_END:
+ return ToASCIIString("JSON: unexpected array end");
+ case Error::JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED:
+ return ToASCIIString("JSON: comma or array end expected");
+ case Error::JSON_PARSER_STRING_LITERAL_EXPECTED:
+ return ToASCIIString("JSON: string literal expected");
+ case Error::JSON_PARSER_COLON_EXPECTED:
+ return ToASCIIString("JSON: colon expected");
+ case Error::JSON_PARSER_UNEXPECTED_MAP_END:
+ return ToASCIIString("JSON: unexpected map end");
+ case Error::JSON_PARSER_COMMA_OR_MAP_END_EXPECTED:
+ return ToASCIIString("JSON: comma or map end expected");
+ case Error::JSON_PARSER_VALUE_EXPECTED:
+ return ToASCIIString("JSON: value expected");
+
+ case Error::CBOR_INVALID_INT32:
+ return ToASCIIString("CBOR: invalid int32");
+ case Error::CBOR_INVALID_DOUBLE:
+ return ToASCIIString("CBOR: invalid double");
+ case Error::CBOR_INVALID_ENVELOPE:
+ return ToASCIIString("CBOR: invalid envelope");
+ case Error::CBOR_INVALID_STRING8:
+ return ToASCIIString("CBOR: invalid string8");
+ case Error::CBOR_INVALID_STRING16:
+ return ToASCIIString("CBOR: invalid string16");
+ case Error::CBOR_INVALID_BINARY:
+ return ToASCIIString("CBOR: invalid binary");
+ case Error::CBOR_UNSUPPORTED_VALUE:
+ return ToASCIIString("CBOR: unsupported value");
+ case Error::CBOR_NO_INPUT:
+ return ToASCIIString("CBOR: no input");
+ case Error::CBOR_INVALID_START_BYTE:
+ return ToASCIIString("CBOR: invalid start byte");
+ case Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE:
+ return ToASCIIString("CBOR: unexpected eof expected value");
+ case Error::CBOR_UNEXPECTED_EOF_IN_ARRAY:
+ return ToASCIIString("CBOR: unexpected eof in array");
+ case Error::CBOR_UNEXPECTED_EOF_IN_MAP:
+ return ToASCIIString("CBOR: unexpected eof in map");
+ case Error::CBOR_INVALID_MAP_KEY:
+ return ToASCIIString("CBOR: invalid map key");
+ case Error::CBOR_STACK_LIMIT_EXCEEDED:
+ return ToASCIIString("CBOR: stack limit exceeded");
+ case Error::CBOR_TRAILING_JUNK:
+ return ToASCIIString("CBOR: trailing junk");
+ case Error::CBOR_MAP_START_EXPECTED:
+ return ToASCIIString("CBOR: map start expected");
+ case Error::CBOR_MAP_STOP_EXPECTED:
+ return ToASCIIString("CBOR: map stop expected");
+ case Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED:
+ return ToASCIIString("CBOR: envelope size limit exceeded");
+ }
+ // Some compilers can't figure out that we can't get here.
+ return "INVALID ERROR CODE";
+}
+
+std::string Status::ToASCIIString(const char* msg) const {
+ return std::string(msg) + " at position " + std::to_string(pos);
+}
+
+namespace cbor {
+namespace {
+// Indicates the number of bits the "initial byte" needs to be shifted to the
+// right after applying |kMajorTypeMask| to produce the major type in the
+// lowermost bits.
+static constexpr uint8_t kMajorTypeBitShift = 5u;
+// Mask selecting the low-order 5 bits of the "initial byte", which is where
+// the additional information is encoded.
+static constexpr uint8_t kAdditionalInformationMask = 0x1f;
+// Mask selecting the high-order 3 bits of the "initial byte", which indicates
+// the major type of the encoded value.
+static constexpr uint8_t kMajorTypeMask = 0xe0;
+// Indicates the integer is in the following byte.
+static constexpr uint8_t kAdditionalInformation1Byte = 24u;
+// Indicates the integer is in the next 2 bytes.
+static constexpr uint8_t kAdditionalInformation2Bytes = 25u;
+// Indicates the integer is in the next 4 bytes.
+static constexpr uint8_t kAdditionalInformation4Bytes = 26u;
+// Indicates the integer is in the next 8 bytes.
+static constexpr uint8_t kAdditionalInformation8Bytes = 27u;
+
+// Encodes the initial byte, consisting of the |type| in the first 3 bits
+// followed by 5 bits of |additional_info|.
+constexpr uint8_t EncodeInitialByte(MajorType type, uint8_t additional_info) {
+ return (static_cast<uint8_t>(type) << kMajorTypeBitShift) |
+ (additional_info & kAdditionalInformationMask);
+}
+
+// TAG 24 indicates that what follows is a byte string which is
+// encoded in CBOR format. We use this as a wrapper for
+// maps and arrays, allowing us to skip them, because the
+// byte string carries its size (byte length).
+// https://tools.ietf.org/html/rfc7049#section-2.4.4.1
+static constexpr uint8_t kInitialByteForEnvelope =
+ EncodeInitialByte(MajorType::TAG, 24);
+// The initial byte for a byte string with at most 2^32 bytes
+// of payload. This is used for envelope encoding, even if
+// the byte string is shorter.
+static constexpr uint8_t kInitialByteFor32BitLengthByteString =
+ EncodeInitialByte(MajorType::BYTE_STRING, 26);
+
+// See RFC 7049 Section 2.2.1, indefinite length arrays / maps have additional
+// info = 31.
+static constexpr uint8_t kInitialByteIndefiniteLengthArray =
+ EncodeInitialByte(MajorType::ARRAY, 31);
+static constexpr uint8_t kInitialByteIndefiniteLengthMap =
+ EncodeInitialByte(MajorType::MAP, 31);
+// See RFC 7049 Section 2.3, Table 1; this is used for finishing indefinite
+// length maps / arrays.
+static constexpr uint8_t kStopByte =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 31);
+
+// See RFC 7049 Section 2.3, Table 2.
+static constexpr uint8_t kEncodedTrue =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 21);
+static constexpr uint8_t kEncodedFalse =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 20);
+static constexpr uint8_t kEncodedNull =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 22);
+static constexpr uint8_t kInitialByteForDouble =
+ EncodeInitialByte(MajorType::SIMPLE_VALUE, 27);
+
+// See RFC 7049 Table 3 and Section 2.4.4.2. This is used as a prefix for
+// arbitrary binary data encoded as BYTE_STRING.
+static constexpr uint8_t kExpectedConversionToBase64Tag =
+ EncodeInitialByte(MajorType::TAG, 22);
+
+// Writes the bytes for |v| to |out|, starting with the most significant byte.
+// See also: https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
+template <typename T, class C>
+void WriteBytesMostSignificantByteFirst(T v, C* out) {
+ for (int shift_bytes = sizeof(T) - 1; shift_bytes >= 0; --shift_bytes)
+ out->push_back(0xff & (v >> (shift_bytes * 8)));
+}
+
+// Extracts sizeof(T) bytes from |in| to extract a value of type T
+// (e.g. uint64_t, uint32_t, ...), most significant byte first.
+// See also: https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
+template <typename T>
+T ReadBytesMostSignificantByteFirst(span<uint8_t> in) {
+ assert(in.size() >= sizeof(T));
+ T result = 0;
+ for (size_t shift_bytes = 0; shift_bytes < sizeof(T); ++shift_bytes)
+ result |= T(in[sizeof(T) - 1 - shift_bytes]) << (shift_bytes * 8);
+ return result;
+}
+} // namespace
+
+namespace internals {
+// Reads the start of a token with definitive size from |bytes|.
+// |type| is the major type as specified in RFC 7049 Section 2.1.
+// |value| is the payload (e.g. for MajorType::UNSIGNED) or is the size
+// (e.g. for BYTE_STRING).
+// If successful, returns the number of bytes read. Otherwise returns -1.
+// TODO(johannes): change return type to size_t and use 0 for error.
+int8_t ReadTokenStart(span<uint8_t> bytes, MajorType* type, uint64_t* value) {
+ if (bytes.empty())
+ return -1;
+ uint8_t initial_byte = bytes[0];
+ *type = MajorType((initial_byte & kMajorTypeMask) >> kMajorTypeBitShift);
+
+ uint8_t additional_information = initial_byte & kAdditionalInformationMask;
+ if (additional_information < 24) {
+ // Values 0-23 are encoded directly into the additional info of the
+ // initial byte.
+ *value = additional_information;
+ return 1;
+ }
+ if (additional_information == kAdditionalInformation1Byte) {
+ // Values 24-255 are encoded with one initial byte, followed by the value.
+ if (bytes.size() < 2)
+ return -1;
+ *value = ReadBytesMostSignificantByteFirst<uint8_t>(bytes.subspan(1));
+ return 2;
+ }
+ if (additional_information == kAdditionalInformation2Bytes) {
+ // Values 256-65535: 1 initial byte + 2 bytes payload.
+ if (bytes.size() < 1 + sizeof(uint16_t))
+ return -1;
+ *value = ReadBytesMostSignificantByteFirst<uint16_t>(bytes.subspan(1));
+ return 3;
+ }
+ if (additional_information == kAdditionalInformation4Bytes) {
+ // 32 bit uint: 1 initial byte + 4 bytes payload.
+ if (bytes.size() < 1 + sizeof(uint32_t))
+ return -1;
+ *value = ReadBytesMostSignificantByteFirst<uint32_t>(bytes.subspan(1));
+ return 5;
+ }
+ if (additional_information == kAdditionalInformation8Bytes) {
+ // 64 bit uint: 1 initial byte + 8 bytes payload.
+ if (bytes.size() < 1 + sizeof(uint64_t))
+ return -1;
+ *value = ReadBytesMostSignificantByteFirst<uint64_t>(bytes.subspan(1));
+ return 9;
+ }
+ return -1;
+}
+
+// Writes the start of a token with |type|. The |value| may indicate the size,
+// or it may be the payload if the value is an unsigned integer.
+template <typename C>
+void WriteTokenStartTmpl(MajorType type, uint64_t value, C* encoded) {
+ if (value < 24) {
+ // Values 0-23 are encoded directly into the additional info of the
+ // initial byte.
+ encoded->push_back(EncodeInitialByte(type, /*additional_info=*/value));
+ return;
+ }
+ if (value <= std::numeric_limits<uint8_t>::max()) {
+ // Values 24-255 are encoded with one initial byte, followed by the value.
+ encoded->push_back(EncodeInitialByte(type, kAdditionalInformation1Byte));
+ encoded->push_back(value);
+ return;
+ }
+ if (value <= std::numeric_limits<uint16_t>::max()) {
+ // Values 256-65535: 1 initial byte + 2 bytes payload.
+ encoded->push_back(EncodeInitialByte(type, kAdditionalInformation2Bytes));
+ WriteBytesMostSignificantByteFirst<uint16_t>(value, encoded);
+ return;
+ }
+ if (value <= std::numeric_limits<uint32_t>::max()) {
+ // 32 bit uint: 1 initial byte + 4 bytes payload.
+ encoded->push_back(EncodeInitialByte(type, kAdditionalInformation4Bytes));
+ WriteBytesMostSignificantByteFirst<uint32_t>(static_cast<uint32_t>(value),
+ encoded);
+ return;
+ }
+ // 64 bit uint: 1 initial byte + 8 bytes payload.
+ encoded->push_back(EncodeInitialByte(type, kAdditionalInformation8Bytes));
+ WriteBytesMostSignificantByteFirst<uint64_t>(value, encoded);
+}
+void WriteTokenStart(MajorType type,
+ uint64_t value,
+ std::vector<uint8_t>* encoded) {
+ WriteTokenStartTmpl(type, value, encoded);
+}
+void WriteTokenStart(MajorType type, uint64_t value, std::string* encoded) {
+ WriteTokenStartTmpl(type, value, encoded);
+}
+} // namespace internals
+
+// =============================================================================
+// Detecting CBOR content
+// =============================================================================
+
+uint8_t InitialByteForEnvelope() {
+ return kInitialByteForEnvelope;
+}
+uint8_t InitialByteFor32BitLengthByteString() {
+ return kInitialByteFor32BitLengthByteString;
+}
+bool IsCBORMessage(span<uint8_t> msg) {
+ return msg.size() >= 6 && msg[0] == InitialByteForEnvelope() &&
+ msg[1] == InitialByteFor32BitLengthByteString();
+}
+
+// =============================================================================
+// Encoding invidiual CBOR items
+// =============================================================================
+
+uint8_t EncodeTrue() {
+ return kEncodedTrue;
+}
+uint8_t EncodeFalse() {
+ return kEncodedFalse;
+}
+uint8_t EncodeNull() {
+ return kEncodedNull;
+}
+
+uint8_t EncodeIndefiniteLengthArrayStart() {
+ return kInitialByteIndefiniteLengthArray;
+}
+
+uint8_t EncodeIndefiniteLengthMapStart() {
+ return kInitialByteIndefiniteLengthMap;
+}
+
+uint8_t EncodeStop() {
+ return kStopByte;
+}
+
+template <typename C>
+void EncodeInt32Tmpl(int32_t value, C* out) {
+ if (value >= 0) {
+ internals::WriteTokenStart(MajorType::UNSIGNED, value, out);
+ } else {
+ uint64_t representation = static_cast<uint64_t>(-(value + 1));
+ internals::WriteTokenStart(MajorType::NEGATIVE, representation, out);
+ }
+}
+void EncodeInt32(int32_t value, std::vector<uint8_t>* out) {
+ EncodeInt32Tmpl(value, out);
+}
+void EncodeInt32(int32_t value, std::string* out) {
+ EncodeInt32Tmpl(value, out);
+}
+
+template <typename C>
+void EncodeString16Tmpl(span<uint16_t> in, C* out) {
+ uint64_t byte_length = static_cast<uint64_t>(in.size_bytes());
+ internals::WriteTokenStart(MajorType::BYTE_STRING, byte_length, out);
+ // When emitting UTF16 characters, we always write the least significant byte
+ // first; this is because it's the native representation for X86.
+ // TODO(johannes): Implement a more efficient thing here later, e.g.
+ // casting *iff* the machine has this byte order.
+ // The wire format for UTF16 chars will probably remain the same
+ // (least significant byte first) since this way we can have
+ // golden files, unittests, etc. that port easily and universally.
+ // See also:
+ // https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
+ for (const uint16_t two_bytes : in) {
+ out->push_back(two_bytes);
+ out->push_back(two_bytes >> 8);
+ }
+}
+void EncodeString16(span<uint16_t> in, std::vector<uint8_t>* out) {
+ EncodeString16Tmpl(in, out);
+}
+void EncodeString16(span<uint16_t> in, std::string* out) {
+ EncodeString16Tmpl(in, out);
+}
+
+template <typename C>
+void EncodeString8Tmpl(span<uint8_t> in, C* out) {
+ internals::WriteTokenStart(MajorType::STRING,
+ static_cast<uint64_t>(in.size_bytes()), out);
+ out->insert(out->end(), in.begin(), in.end());
+}
+void EncodeString8(span<uint8_t> in, std::vector<uint8_t>* out) {
+ EncodeString8Tmpl(in, out);
+}
+void EncodeString8(span<uint8_t> in, std::string* out) {
+ EncodeString8Tmpl(in, out);
+}
+
+template <typename C>
+void EncodeFromLatin1Tmpl(span<uint8_t> latin1, C* out) {
+ for (size_t ii = 0; ii < latin1.size(); ++ii) {
+ if (latin1[ii] <= 127)
+ continue;
+ // If there's at least one non-ASCII char, convert to UTF8.
+ std::vector<uint8_t> utf8(latin1.begin(), latin1.begin() + ii);
+ for (; ii < latin1.size(); ++ii) {
+ if (latin1[ii] <= 127) {
+ utf8.push_back(latin1[ii]);
+ } else {
+ // 0xC0 means it's a UTF8 sequence with 2 bytes.
+ utf8.push_back((latin1[ii] >> 6) | 0xc0);
+ utf8.push_back((latin1[ii] | 0x80) & 0xbf);
+ }
+ }
+ EncodeString8(SpanFrom(utf8), out);
+ return;
+ }
+ EncodeString8(latin1, out);
+}
+void EncodeFromLatin1(span<uint8_t> latin1, std::vector<uint8_t>* out) {
+ EncodeFromLatin1Tmpl(latin1, out);
+}
+void EncodeFromLatin1(span<uint8_t> latin1, std::string* out) {
+ EncodeFromLatin1Tmpl(latin1, out);
+}
+
+template <typename C>
+void EncodeFromUTF16Tmpl(span<uint16_t> utf16, C* out) {
+ // If there's at least one non-ASCII char, encode as STRING16 (UTF16).
+ for (uint16_t ch : utf16) {
+ if (ch <= 127)
+ continue;
+ EncodeString16(utf16, out);
+ return;
+ }
+ // It's all US-ASCII, strip out every second byte and encode as UTF8.
+ internals::WriteTokenStart(MajorType::STRING,
+ static_cast<uint64_t>(utf16.size()), out);
+ out->insert(out->end(), utf16.begin(), utf16.end());
+}
+void EncodeFromUTF16(span<uint16_t> utf16, std::vector<uint8_t>* out) {
+ EncodeFromUTF16Tmpl(utf16, out);
+}
+void EncodeFromUTF16(span<uint16_t> utf16, std::string* out) {
+ EncodeFromUTF16Tmpl(utf16, out);
+}
+
+template <typename C>
+void EncodeBinaryTmpl(span<uint8_t> in, C* out) {
+ out->push_back(kExpectedConversionToBase64Tag);
+ uint64_t byte_length = static_cast<uint64_t>(in.size_bytes());
+ internals::WriteTokenStart(MajorType::BYTE_STRING, byte_length, out);
+ out->insert(out->end(), in.begin(), in.end());
+}
+void EncodeBinary(span<uint8_t> in, std::vector<uint8_t>* out) {
+ EncodeBinaryTmpl(in, out);
+}
+void EncodeBinary(span<uint8_t> in, std::string* out) {
+ EncodeBinaryTmpl(in, out);
+}
+
+// A double is encoded with a specific initial byte
+// (kInitialByteForDouble) plus the 64 bits of payload for its value.
+constexpr size_t kEncodedDoubleSize = 1 + sizeof(uint64_t);
+
+// An envelope is encoded with a specific initial byte
+// (kInitialByteForEnvelope), plus the start byte for a BYTE_STRING with a 32
+// bit wide length, plus a 32 bit length for that string.
+constexpr size_t kEncodedEnvelopeHeaderSize = 1 + 1 + sizeof(uint32_t);
+
+template <typename C>
+void EncodeDoubleTmpl(double value, C* out) {
+ // The additional_info=27 indicates 64 bits for the double follow.
+ // See RFC 7049 Section 2.3, Table 1.
+ out->push_back(kInitialByteForDouble);
+ union {
+ double from_double;
+ uint64_t to_uint64;
+ } reinterpret;
+ reinterpret.from_double = value;
+ WriteBytesMostSignificantByteFirst<uint64_t>(reinterpret.to_uint64, out);
+}
+void EncodeDouble(double value, std::vector<uint8_t>* out) {
+ EncodeDoubleTmpl(value, out);
+}
+void EncodeDouble(double value, std::string* out) {
+ EncodeDoubleTmpl(value, out);
+}
+
+// =============================================================================
+// cbor::EnvelopeEncoder - for wrapping submessages
+// =============================================================================
+
+template <typename C>
+void EncodeStartTmpl(C* out, size_t* byte_size_pos) {
+ assert(*byte_size_pos == 0);
+ out->push_back(kInitialByteForEnvelope);
+ out->push_back(kInitialByteFor32BitLengthByteString);
+ *byte_size_pos = out->size();
+ out->resize(out->size() + sizeof(uint32_t));
+}
+
+void EnvelopeEncoder::EncodeStart(std::vector<uint8_t>* out) {
+ EncodeStartTmpl<std::vector<uint8_t>>(out, &byte_size_pos_);
+}
+
+void EnvelopeEncoder::EncodeStart(std::string* out) {
+ EncodeStartTmpl<std::string>(out, &byte_size_pos_);
+}
+
+template <typename C>
+bool EncodeStopTmpl(C* out, size_t* byte_size_pos) {
+ assert(*byte_size_pos != 0);
+ // The byte size is the size of the payload, that is, all the
+ // bytes that were written past the byte size position itself.
+ uint64_t byte_size = out->size() - (*byte_size_pos + sizeof(uint32_t));
+ // We store exactly 4 bytes, so at most INT32MAX, with most significant
+ // byte first.
+ if (byte_size > std::numeric_limits<uint32_t>::max())
+ return false;
+ for (int shift_bytes = sizeof(uint32_t) - 1; shift_bytes >= 0;
+ --shift_bytes) {
+ (*out)[(*byte_size_pos)++] = 0xff & (byte_size >> (shift_bytes * 8));
+ }
+ return true;
+}
+
+bool EnvelopeEncoder::EncodeStop(std::vector<uint8_t>* out) {
+ return EncodeStopTmpl(out, &byte_size_pos_);
+}
+
+bool EnvelopeEncoder::EncodeStop(std::string* out) {
+ return EncodeStopTmpl(out, &byte_size_pos_);
+}
+
+// =============================================================================
+// cbor::NewCBOREncoder - for encoding from a streaming parser
+// =============================================================================
+
+namespace {
+template <typename C>
+class CBOREncoder : public StreamingParserHandler {
+ public:
+ CBOREncoder(C* out, Status* status) : out_(out), status_(status) {
+ *status_ = Status();
+ }
+
+ void HandleMapBegin() override {
+ if (!status_->ok())
+ return;
+ envelopes_.emplace_back();
+ envelopes_.back().EncodeStart(out_);
+ out_->push_back(kInitialByteIndefiniteLengthMap);
+ }
+
+ void HandleMapEnd() override {
+ if (!status_->ok())
+ return;
+ out_->push_back(kStopByte);
+ assert(!envelopes_.empty());
+ if (!envelopes_.back().EncodeStop(out_)) {
+ HandleError(
+ Status(Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED, out_->size()));
+ return;
+ }
+ envelopes_.pop_back();
+ }
+
+ void HandleArrayBegin() override {
+ if (!status_->ok())
+ return;
+ envelopes_.emplace_back();
+ envelopes_.back().EncodeStart(out_);
+ out_->push_back(kInitialByteIndefiniteLengthArray);
+ }
+
+ void HandleArrayEnd() override {
+ if (!status_->ok())
+ return;
+ out_->push_back(kStopByte);
+ assert(!envelopes_.empty());
+ if (!envelopes_.back().EncodeStop(out_)) {
+ HandleError(
+ Status(Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED, out_->size()));
+ return;
+ }
+ envelopes_.pop_back();
+ }
+
+ void HandleString8(span<uint8_t> chars) override {
+ if (!status_->ok())
+ return;
+ EncodeString8(chars, out_);
+ }
+
+ void HandleString16(span<uint16_t> chars) override {
+ if (!status_->ok())
+ return;
+ EncodeFromUTF16(chars, out_);
+ }
+
+ void HandleBinary(span<uint8_t> bytes) override {
+ if (!status_->ok())
+ return;
+ EncodeBinary(bytes, out_);
+ }
+
+ void HandleDouble(double value) override {
+ if (!status_->ok())
+ return;
+ EncodeDouble(value, out_);
+ }
+
+ void HandleInt32(int32_t value) override {
+ if (!status_->ok())
+ return;
+ EncodeInt32(value, out_);
+ }
+
+ void HandleBool(bool value) override {
+ if (!status_->ok())
+ return;
+ // See RFC 7049 Section 2.3, Table 2.
+ out_->push_back(value ? kEncodedTrue : kEncodedFalse);
+ }
+
+ void HandleNull() override {
+ if (!status_->ok())
+ return;
+ // See RFC 7049 Section 2.3, Table 2.
+ out_->push_back(kEncodedNull);
+ }
+
+ void HandleError(Status error) override {
+ if (!status_->ok())
+ return;
+ *status_ = error;
+ out_->clear();
+ }
+
+ private:
+ C* out_;
+ std::vector<EnvelopeEncoder> envelopes_;
+ Status* status_;
+};
+} // namespace
+
+std::unique_ptr<StreamingParserHandler> NewCBOREncoder(
+ std::vector<uint8_t>* out,
+ Status* status) {
+ return std::unique_ptr<StreamingParserHandler>(
+ new CBOREncoder<std::vector<uint8_t>>(out, status));
+}
+std::unique_ptr<StreamingParserHandler> NewCBOREncoder(std::string* out,
+ Status* status) {
+ return std::unique_ptr<StreamingParserHandler>(
+ new CBOREncoder<std::string>(out, status));
+}
+
+// =============================================================================
+// cbor::CBORTokenizer - for parsing individual CBOR items
+// =============================================================================
+
+CBORTokenizer::CBORTokenizer(span<uint8_t> bytes) : bytes_(bytes) {
+ ReadNextToken(/*enter_envelope=*/false);
+}
+CBORTokenizer::~CBORTokenizer() {}
+
+CBORTokenTag CBORTokenizer::TokenTag() const {
+ return token_tag_;
+}
+
+void CBORTokenizer::Next() {
+ if (token_tag_ == CBORTokenTag::ERROR_VALUE ||
+ token_tag_ == CBORTokenTag::DONE)
+ return;
+ ReadNextToken(/*enter_envelope=*/false);
+}
+
+void CBORTokenizer::EnterEnvelope() {
+ assert(token_tag_ == CBORTokenTag::ENVELOPE);
+ ReadNextToken(/*enter_envelope=*/true);
+}
+
+Status CBORTokenizer::Status() const {
+ return status_;
+}
+
+// The following accessor functions ::GetInt32, ::GetDouble,
+// ::GetString8, ::GetString16WireRep, ::GetBinary, ::GetEnvelopeContents
+// assume that a particular token was recognized in ::ReadNextToken.
+// That's where all the error checking is done. By design,
+// the accessors (assuming the token was recognized) never produce
+// an error.
+
+int32_t CBORTokenizer::GetInt32() const {
+ assert(token_tag_ == CBORTokenTag::INT32);
+ // The range checks happen in ::ReadNextToken().
+ return static_cast<int32_t>(
+ token_start_type_ == MajorType::UNSIGNED
+ ? token_start_internal_value_
+ : -static_cast<int64_t>(token_start_internal_value_) - 1);
+}
+
+double CBORTokenizer::GetDouble() const {
+ assert(token_tag_ == CBORTokenTag::DOUBLE);
+ union {
+ uint64_t from_uint64;
+ double to_double;
+ } reinterpret;
+ reinterpret.from_uint64 = ReadBytesMostSignificantByteFirst<uint64_t>(
+ bytes_.subspan(status_.pos + 1));
+ return reinterpret.to_double;
+}
+
+span<uint8_t> CBORTokenizer::GetString8() const {
+ assert(token_tag_ == CBORTokenTag::STRING8);
+ auto length = static_cast<size_t>(token_start_internal_value_);
+ return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
+}
+
+span<uint8_t> CBORTokenizer::GetString16WireRep() const {
+ assert(token_tag_ == CBORTokenTag::STRING16);
+ auto length = static_cast<size_t>(token_start_internal_value_);
+ return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
+}
+
+span<uint8_t> CBORTokenizer::GetBinary() const {
+ assert(token_tag_ == CBORTokenTag::BINARY);
+ auto length = static_cast<size_t>(token_start_internal_value_);
+ return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
+}
+
+span<uint8_t> CBORTokenizer::GetEnvelopeContents() const {
+ assert(token_tag_ == CBORTokenTag::ENVELOPE);
+ auto length = static_cast<size_t>(token_start_internal_value_);
+ return bytes_.subspan(status_.pos + kEncodedEnvelopeHeaderSize, length);
+}
+
+// All error checking happens in ::ReadNextToken, so that the accessors
+// can avoid having to carry an error return value.
+//
+// With respect to checking the encoded lengths of strings, arrays, etc:
+// On the wire, CBOR uses 1,2,4, and 8 byte unsigned integers, so
+// we initially read them as uint64_t, usually into token_start_internal_value_.
+//
+// However, since these containers have a representation on the machine,
+// we need to do corresponding size computations on the input byte array,
+// output span (e.g. the payload for a string), etc., and size_t is
+// machine specific (in practice either 32 bit or 64 bit).
+//
+// Further, we must avoid overflowing size_t. Therefore, we use this
+// kMaxValidLength constant to:
+// - Reject values that are larger than the architecture specific
+// max size_t (differs between 32 bit and 64 bit arch).
+// - Reserve at least one bit so that we can check against overflows
+// when adding lengths (array / string length / etc.); we do this by
+// ensuring that the inputs to an addition are <= kMaxValidLength,
+// and then checking whether the sum went past it.
+//
+// See also
+// https://chromium.googlesource.com/chromium/src/+/master/docs/security/integer-semantics.md
+static const uint64_t kMaxValidLength =
+ std::min<uint64_t>(std::numeric_limits<uint64_t>::max() >> 2,
+ std::numeric_limits<size_t>::max());
+
+void CBORTokenizer::ReadNextToken(bool enter_envelope) {
+ if (enter_envelope) {
+ status_.pos += kEncodedEnvelopeHeaderSize;
+ } else {
+ status_.pos =
+ status_.pos == Status::npos() ? 0 : status_.pos + token_byte_length_;
+ }
+ status_.error = Error::OK;
+ if (status_.pos >= bytes_.size()) {
+ token_tag_ = CBORTokenTag::DONE;
+ return;
+ }
+ const size_t remaining_bytes = bytes_.size() - status_.pos;
+ switch (bytes_[status_.pos]) {
+ case kStopByte:
+ SetToken(CBORTokenTag::STOP, 1);
+ return;
+ case kInitialByteIndefiniteLengthMap:
+ SetToken(CBORTokenTag::MAP_START, 1);
+ return;
+ case kInitialByteIndefiniteLengthArray:
+ SetToken(CBORTokenTag::ARRAY_START, 1);
+ return;
+ case kEncodedTrue:
+ SetToken(CBORTokenTag::TRUE_VALUE, 1);
+ return;
+ case kEncodedFalse:
+ SetToken(CBORTokenTag::FALSE_VALUE, 1);
+ return;
+ case kEncodedNull:
+ SetToken(CBORTokenTag::NULL_VALUE, 1);
+ return;
+ case kExpectedConversionToBase64Tag: { // BINARY
+ const int8_t bytes_read = internals::ReadTokenStart(
+ bytes_.subspan(status_.pos + 1), &token_start_type_,
+ &token_start_internal_value_);
+ if (bytes_read < 0 || token_start_type_ != MajorType::BYTE_STRING ||
+ token_start_internal_value_ > kMaxValidLength) {
+ SetError(Error::CBOR_INVALID_BINARY);
+ return;
+ }
+ const uint64_t token_byte_length = token_start_internal_value_ +
+ /* tag before token start: */ 1 +
+ /* token start: */ bytes_read;
+ if (token_byte_length > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_BINARY);
+ return;
+ }
+ SetToken(CBORTokenTag::BINARY, static_cast<size_t>(token_byte_length));
+ return;
+ }
+ case kInitialByteForDouble: { // DOUBLE
+ if (kEncodedDoubleSize > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_DOUBLE);
+ return;
+ }
+ SetToken(CBORTokenTag::DOUBLE, kEncodedDoubleSize);
+ return;
+ }
+ case kInitialByteForEnvelope: { // ENVELOPE
+ if (kEncodedEnvelopeHeaderSize > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_ENVELOPE);
+ return;
+ }
+ // The envelope must be a byte string with 32 bit length.
+ if (bytes_[status_.pos + 1] != kInitialByteFor32BitLengthByteString) {
+ SetError(Error::CBOR_INVALID_ENVELOPE);
+ return;
+ }
+ // Read the length of the byte string.
+ token_start_internal_value_ = ReadBytesMostSignificantByteFirst<uint32_t>(
+ bytes_.subspan(status_.pos + 2));
+ if (token_start_internal_value_ > kMaxValidLength) {
+ SetError(Error::CBOR_INVALID_ENVELOPE);
+ return;
+ }
+ uint64_t token_byte_length =
+ token_start_internal_value_ + kEncodedEnvelopeHeaderSize;
+ if (token_byte_length > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_ENVELOPE);
+ return;
+ }
+ SetToken(CBORTokenTag::ENVELOPE, static_cast<size_t>(token_byte_length));
+ return;
+ }
+ default: {
+ const int8_t token_start_length = internals::ReadTokenStart(
+ bytes_.subspan(status_.pos), &token_start_type_,
+ &token_start_internal_value_);
+ const bool success = token_start_length >= 0;
+ switch (token_start_type_) {
+ case MajorType::UNSIGNED: // INT32.
+ // INT32 is a signed int32 (int32 makes sense for the
+ // inspector_protocol, it's not a CBOR limitation), so we check
+ // against the signed max, so that the allowable values are
+ // 0, 1, 2, ... 2^31 - 1.
+ if (!success || std::numeric_limits<int32_t>::max() <
+ token_start_internal_value_) {
+ SetError(Error::CBOR_INVALID_INT32);
+ return;
+ }
+ SetToken(CBORTokenTag::INT32, token_start_length);
+ return;
+ case MajorType::NEGATIVE: { // INT32.
+ // INT32 is a signed int32 (int32 makes sense for the
+ // inspector_protocol, it's not a CBOR limitation); in CBOR, the
+ // negative values for INT32 are represented as NEGATIVE, that is, -1
+ // INT32 is represented as 1 << 5 | 0 (major type 1, additional info
+ // value 0). The minimal allowed INT32 value in our protocol is
+ // std::numeric_limits<int32_t>::min(). We check for it by directly
+ // checking the payload against the maximal allowed signed (!) int32
+ // value.
+ if (!success || token_start_internal_value_ >
+ std::numeric_limits<int32_t>::max()) {
+ SetError(Error::CBOR_INVALID_INT32);
+ return;
+ }
+ SetToken(CBORTokenTag::INT32, token_start_length);
+ return;
+ }
+ case MajorType::STRING: { // STRING8.
+ if (!success || token_start_internal_value_ > kMaxValidLength) {
+ SetError(Error::CBOR_INVALID_STRING8);
+ return;
+ }
+ uint64_t token_byte_length =
+ token_start_internal_value_ + token_start_length;
+ if (token_byte_length > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_STRING8);
+ return;
+ }
+ SetToken(CBORTokenTag::STRING8,
+ static_cast<size_t>(token_byte_length));
+ return;
+ }
+ case MajorType::BYTE_STRING: { // STRING16.
+ // Length must be divisible by 2 since UTF16 is 2 bytes per
+ // character, hence the &1 check.
+ if (!success || token_start_internal_value_ > kMaxValidLength ||
+ token_start_internal_value_ & 1) {
+ SetError(Error::CBOR_INVALID_STRING16);
+ return;
+ }
+ uint64_t token_byte_length =
+ token_start_internal_value_ + token_start_length;
+ if (token_byte_length > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_STRING16);
+ return;
+ }
+ SetToken(CBORTokenTag::STRING16,
+ static_cast<size_t>(token_byte_length));
+ return;
+ }
+ case MajorType::ARRAY:
+ case MajorType::MAP:
+ case MajorType::TAG:
+ case MajorType::SIMPLE_VALUE:
+ SetError(Error::CBOR_UNSUPPORTED_VALUE);
+ return;
+ }
+ }
+ }
+}
+
+void CBORTokenizer::SetToken(CBORTokenTag token_tag, size_t token_byte_length) {
+ token_tag_ = token_tag;
+ token_byte_length_ = token_byte_length;
+}
+
+void CBORTokenizer::SetError(Error error) {
+ token_tag_ = CBORTokenTag::ERROR_VALUE;
+ status_.error = error;
+}
+
+// =============================================================================
+// cbor::ParseCBOR - for receiving streaming parser events for CBOR messages
+// =============================================================================
+
+namespace {
+// When parsing CBOR, we limit recursion depth for objects and arrays
+// to this constant.
+static constexpr int kStackLimit = 300;
+
+// Below are three parsing routines for CBOR, which cover enough
+// to roundtrip JSON messages.
+bool ParseMap(int32_t stack_depth,
+ CBORTokenizer* tokenizer,
+ StreamingParserHandler* out);
+bool ParseArray(int32_t stack_depth,
+ CBORTokenizer* tokenizer,
+ StreamingParserHandler* out);
+bool ParseValue(int32_t stack_depth,
+ CBORTokenizer* tokenizer,
+ StreamingParserHandler* out);
+
+void ParseUTF16String(CBORTokenizer* tokenizer, StreamingParserHandler* out) {
+ std::vector<uint16_t> value;
+ span<uint8_t> rep = tokenizer->GetString16WireRep();
+ for (size_t ii = 0; ii < rep.size(); ii += 2)
+ value.push_back((rep[ii + 1] << 8) | rep[ii]);
+ out->HandleString16(span<uint16_t>(value.data(), value.size()));
+ tokenizer->Next();
+}
+
+bool ParseUTF8String(CBORTokenizer* tokenizer, StreamingParserHandler* out) {
+ assert(tokenizer->TokenTag() == CBORTokenTag::STRING8);
+ out->HandleString8(tokenizer->GetString8());
+ tokenizer->Next();
+ return true;
+}
+
+bool ParseValue(int32_t stack_depth,
+ CBORTokenizer* tokenizer,
+ StreamingParserHandler* out) {
+ if (stack_depth > kStackLimit) {
+ out->HandleError(
+ Status{Error::CBOR_STACK_LIMIT_EXCEEDED, tokenizer->Status().pos});
+ return false;
+ }
+ // Skip past the envelope to get to what's inside.
+ if (tokenizer->TokenTag() == CBORTokenTag::ENVELOPE)
+ tokenizer->EnterEnvelope();
+ switch (tokenizer->TokenTag()) {
+ case CBORTokenTag::ERROR_VALUE:
+ out->HandleError(tokenizer->Status());
+ return false;
+ case CBORTokenTag::DONE:
+ out->HandleError(Status{Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE,
+ tokenizer->Status().pos});
+ return false;
+ case CBORTokenTag::TRUE_VALUE:
+ out->HandleBool(true);
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::FALSE_VALUE:
+ out->HandleBool(false);
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::NULL_VALUE:
+ out->HandleNull();
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::INT32:
+ out->HandleInt32(tokenizer->GetInt32());
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::DOUBLE:
+ out->HandleDouble(tokenizer->GetDouble());
+ tokenizer->Next();
+ return true;
+ case CBORTokenTag::STRING8:
+ return ParseUTF8String(tokenizer, out);
+ case CBORTokenTag::STRING16:
+ ParseUTF16String(tokenizer, out);
+ return true;
+ case CBORTokenTag::BINARY: {
+ out->HandleBinary(tokenizer->GetBinary());
+ tokenizer->Next();
+ return true;
+ }
+ case CBORTokenTag::MAP_START:
+ return ParseMap(stack_depth + 1, tokenizer, out);
+ case CBORTokenTag::ARRAY_START:
+ return ParseArray(stack_depth + 1, tokenizer, out);
+ default:
+ out->HandleError(
+ Status{Error::CBOR_UNSUPPORTED_VALUE, tokenizer->Status().pos});
+ return false;
+ }
+}
+
+// |bytes| must start with the indefinite length array byte, so basically,
+// ParseArray may only be called after an indefinite length array has been
+// detected.
+bool ParseArray(int32_t stack_depth,
+ CBORTokenizer* tokenizer,
+ StreamingParserHandler* out) {
+ assert(tokenizer->TokenTag() == CBORTokenTag::ARRAY_START);
+ tokenizer->Next();
+ out->HandleArrayBegin();
+ while (tokenizer->TokenTag() != CBORTokenTag::STOP) {
+ if (tokenizer->TokenTag() == CBORTokenTag::DONE) {
+ out->HandleError(
+ Status{Error::CBOR_UNEXPECTED_EOF_IN_ARRAY, tokenizer->Status().pos});
+ return false;
+ }
+ if (tokenizer->TokenTag() == CBORTokenTag::ERROR_VALUE) {
+ out->HandleError(tokenizer->Status());
+ return false;
+ }
+ // Parse value.
+ if (!ParseValue(stack_depth, tokenizer, out))
+ return false;
+ }
+ out->HandleArrayEnd();
+ tokenizer->Next();
+ return true;
+}
+
+// |bytes| must start with the indefinite length array byte, so basically,
+// ParseArray may only be called after an indefinite length array has been
+// detected.
+bool ParseMap(int32_t stack_depth,
+ CBORTokenizer* tokenizer,
+ StreamingParserHandler* out) {
+ assert(tokenizer->TokenTag() == CBORTokenTag::MAP_START);
+ out->HandleMapBegin();
+ tokenizer->Next();
+ while (tokenizer->TokenTag() != CBORTokenTag::STOP) {
+ if (tokenizer->TokenTag() == CBORTokenTag::DONE) {
+ out->HandleError(
+ Status{Error::CBOR_UNEXPECTED_EOF_IN_MAP, tokenizer->Status().pos});
+ return false;
+ }
+ if (tokenizer->TokenTag() == CBORTokenTag::ERROR_VALUE) {
+ out->HandleError(tokenizer->Status());
+ return false;
+ }
+ // Parse key.
+ if (tokenizer->TokenTag() == CBORTokenTag::STRING8) {
+ if (!ParseUTF8String(tokenizer, out))
+ return false;
+ } else if (tokenizer->TokenTag() == CBORTokenTag::STRING16) {
+ ParseUTF16String(tokenizer, out);
+ } else {
+ out->HandleError(
+ Status{Error::CBOR_INVALID_MAP_KEY, tokenizer->Status().pos});
+ return false;
+ }
+ // Parse value.
+ if (!ParseValue(stack_depth, tokenizer, out))
+ return false;
+ }
+ out->HandleMapEnd();
+ tokenizer->Next();
+ return true;
+}
+} // namespace
+
+void ParseCBOR(span<uint8_t> bytes, StreamingParserHandler* out) {
+ if (bytes.empty()) {
+ out->HandleError(Status{Error::CBOR_NO_INPUT, 0});
+ return;
+ }
+ if (bytes[0] != kInitialByteForEnvelope) {
+ out->HandleError(Status{Error::CBOR_INVALID_START_BYTE, 0});
+ return;
+ }
+ CBORTokenizer tokenizer(bytes);
+ if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE) {
+ out->HandleError(tokenizer.Status());
+ return;
+ }
+ // We checked for the envelope start byte above, so the tokenizer
+ // must agree here, since it's not an error.
+ assert(tokenizer.TokenTag() == CBORTokenTag::ENVELOPE);
+ tokenizer.EnterEnvelope();
+ if (tokenizer.TokenTag() != CBORTokenTag::MAP_START) {
+ out->HandleError(
+ Status{Error::CBOR_MAP_START_EXPECTED, tokenizer.Status().pos});
+ return;
+ }
+ if (!ParseMap(/*stack_depth=*/1, &tokenizer, out))
+ return;
+ if (tokenizer.TokenTag() == CBORTokenTag::DONE)
+ return;
+ if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE) {
+ out->HandleError(tokenizer.Status());
+ return;
+ }
+ out->HandleError(Status{Error::CBOR_TRAILING_JUNK, tokenizer.Status().pos});
+}
+
+// =============================================================================
+// cbor::AppendString8EntryToMap - for limited in-place editing of messages
+// =============================================================================
+
+template <typename C>
+Status AppendString8EntryToCBORMapTmpl(span<uint8_t> string8_key,
+ span<uint8_t> string8_value,
+ C* cbor) {
+ // Careful below: Don't compare (*cbor)[idx] with a uint8_t, since
+ // it could be a char (signed!). Instead, use bytes.
+ span<uint8_t> bytes(reinterpret_cast<const uint8_t*>(cbor->data()),
+ cbor->size());
+ CBORTokenizer tokenizer(bytes);
+ if (tokenizer.TokenTag() == CBORTokenTag::ERROR_VALUE)
+ return tokenizer.Status();
+ if (tokenizer.TokenTag() != CBORTokenTag::ENVELOPE)
+ return Status(Error::CBOR_INVALID_ENVELOPE, 0);
+ size_t envelope_size = tokenizer.GetEnvelopeContents().size();
+ size_t old_size = cbor->size();
+ if (old_size != envelope_size + kEncodedEnvelopeHeaderSize)
+ return Status(Error::CBOR_INVALID_ENVELOPE, 0);
+ if (envelope_size == 0 ||
+ (tokenizer.GetEnvelopeContents()[0] != EncodeIndefiniteLengthMapStart()))
+ return Status(Error::CBOR_MAP_START_EXPECTED, kEncodedEnvelopeHeaderSize);
+ if (bytes[bytes.size() - 1] != EncodeStop())
+ return Status(Error::CBOR_MAP_STOP_EXPECTED, cbor->size() - 1);
+ cbor->pop_back();
+ EncodeString8(string8_key, cbor);
+ EncodeString8(string8_value, cbor);
+ cbor->push_back(EncodeStop());
+ size_t new_envelope_size = envelope_size + (cbor->size() - old_size);
+ if (new_envelope_size > std::numeric_limits<uint32_t>::max())
+ return Status(Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED, 0);
+ size_t size_pos = cbor->size() - new_envelope_size - sizeof(uint32_t);
+ uint8_t* out = reinterpret_cast<uint8_t*>(&cbor->at(size_pos));
+ *(out++) = (new_envelope_size >> 24) & 0xff;
+ *(out++) = (new_envelope_size >> 16) & 0xff;
+ *(out++) = (new_envelope_size >> 8) & 0xff;
+ *(out) = new_envelope_size & 0xff;
+ return Status();
+}
+Status AppendString8EntryToCBORMap(span<uint8_t> string8_key,
+ span<uint8_t> string8_value,
+ std::vector<uint8_t>* cbor) {
+ return AppendString8EntryToCBORMapTmpl(string8_key, string8_value, cbor);
+}
+Status AppendString8EntryToCBORMap(span<uint8_t> string8_key,
+ span<uint8_t> string8_value,
+ std::string* cbor) {
+ return AppendString8EntryToCBORMapTmpl(string8_key, string8_value, cbor);
+}
+} // namespace cbor
+
+namespace json {
+
+// =============================================================================
+// json::NewJSONEncoder - for encoding streaming parser events as JSON
+// =============================================================================
+
+namespace {
+// Prints |value| to |out| with 4 hex digits, most significant chunk first.
+template <typename C>
+void PrintHex(uint16_t value, C* out) {
+ for (int ii = 3; ii >= 0; --ii) {
+ int four_bits = 0xf & (value >> (4 * ii));
+ out->push_back(four_bits + ((four_bits <= 9) ? '0' : ('a' - 10)));
+ }
+}
+
+// In the writer below, we maintain a stack of State instances.
+// It is just enough to emit the appropriate delimiters and brackets
+// in JSON.
+enum class Container {
+ // Used for the top-level, initial state.
+ NONE,
+ // Inside a JSON object.
+ MAP,
+ // Inside a JSON array.
+ ARRAY
+};
+class State {
+ public:
+ explicit State(Container container) : container_(container) {}
+ void StartElement(std::vector<uint8_t>* out) { StartElementTmpl(out); }
+ void StartElement(std::string* out) { StartElementTmpl(out); }
+ Container container() const { return container_; }
+
+ private:
+ template <typename C>
+ void StartElementTmpl(C* out) {
+ assert(container_ != Container::NONE || size_ == 0);
+ if (size_ != 0) {
+ char delim = (!(size_ & 1) || container_ == Container::ARRAY) ? ',' : ':';
+ out->push_back(delim);
+ }
+ ++size_;
+ }
+
+ Container container_ = Container::NONE;
+ int size_ = 0;
+};
+
+constexpr char kBase64Table[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz0123456789+/";
+
+template <typename C>
+void Base64Encode(const span<uint8_t>& in, C* out) {
+ // The following three cases are based on the tables in the example
+ // section in https://en.wikipedia.org/wiki/Base64. We process three
+ // input bytes at a time, emitting 4 output bytes at a time.
+ size_t ii = 0;
+
+ // While possible, process three input bytes.
+ for (; ii + 3 <= in.size(); ii += 3) {
+ uint32_t twentyfour_bits = (in[ii] << 16) | (in[ii + 1] << 8) | in[ii + 2];
+ out->push_back(kBase64Table[(twentyfour_bits >> 18)]);
+ out->push_back(kBase64Table[(twentyfour_bits >> 12) & 0x3f]);
+ out->push_back(kBase64Table[(twentyfour_bits >> 6) & 0x3f]);
+ out->push_back(kBase64Table[twentyfour_bits & 0x3f]);
+ }
+ if (ii + 2 <= in.size()) { // Process two input bytes.
+ uint32_t twentyfour_bits = (in[ii] << 16) | (in[ii + 1] << 8);
+ out->push_back(kBase64Table[(twentyfour_bits >> 18)]);
+ out->push_back(kBase64Table[(twentyfour_bits >> 12) & 0x3f]);
+ out->push_back(kBase64Table[(twentyfour_bits >> 6) & 0x3f]);
+ out->push_back('='); // Emit padding.
+ return;
+ }
+ if (ii + 1 <= in.size()) { // Process a single input byte.
+ uint32_t twentyfour_bits = (in[ii] << 16);
+ out->push_back(kBase64Table[(twentyfour_bits >> 18)]);
+ out->push_back(kBase64Table[(twentyfour_bits >> 12) & 0x3f]);
+ out->push_back('='); // Emit padding.
+ out->push_back('='); // Emit padding.
+ }
+}
+
+// Implements a handler for JSON parser events to emit a JSON string.
+template <typename C>
+class JSONEncoder : public StreamingParserHandler {
+ public:
+ JSONEncoder(const Platform* platform, C* out, Status* status)
+ : platform_(platform), out_(out), status_(status) {
+ *status_ = Status();
+ state_.emplace(Container::NONE);
+ }
+
+ void HandleMapBegin() override {
+ if (!status_->ok())
+ return;
+ assert(!state_.empty());
+ state_.top().StartElement(out_);
+ state_.emplace(Container::MAP);
+ Emit('{');
+ }
+
+ void HandleMapEnd() override {
+ if (!status_->ok())
+ return;
+ assert(state_.size() >= 2 && state_.top().container() == Container::MAP);
+ state_.pop();
+ Emit('}');
+ }
+
+ void HandleArrayBegin() override {
+ if (!status_->ok())
+ return;
+ state_.top().StartElement(out_);
+ state_.emplace(Container::ARRAY);
+ Emit('[');
+ }
+
+ void HandleArrayEnd() override {
+ if (!status_->ok())
+ return;
+ assert(state_.size() >= 2 && state_.top().container() == Container::ARRAY);
+ state_.pop();
+ Emit(']');
+ }
+
+ void HandleString16(span<uint16_t> chars) override {
+ if (!status_->ok())
+ return;
+ state_.top().StartElement(out_);
+ Emit('"');
+ for (const uint16_t ch : chars) {
+ if (ch == '"') {
+ Emit("\\\"");
+ } else if (ch == '\\') {
+ Emit("\\\\");
+ } else if (ch == '\b') {
+ Emit("\\b");
+ } else if (ch == '\f') {
+ Emit("\\f");
+ } else if (ch == '\n') {
+ Emit("\\n");
+ } else if (ch == '\r') {
+ Emit("\\r");
+ } else if (ch == '\t') {
+ Emit("\\t");
+ } else if (ch >= 32 && ch <= 126) {
+ Emit(ch);
+ } else {
+ Emit("\\u");
+ PrintHex(ch, out_);
+ }
+ }
+ Emit('"');
+ }
+
+ void HandleString8(span<uint8_t> chars) override {
+ if (!status_->ok())
+ return;
+ state_.top().StartElement(out_);
+ Emit('"');
+ for (size_t ii = 0; ii < chars.size(); ++ii) {
+ uint8_t c = chars[ii];
+ if (c == '"') {
+ Emit("\\\"");
+ } else if (c == '\\') {
+ Emit("\\\\");
+ } else if (c == '\b') {
+ Emit("\\b");
+ } else if (c == '\f') {
+ Emit("\\f");
+ } else if (c == '\n') {
+ Emit("\\n");
+ } else if (c == '\r') {
+ Emit("\\r");
+ } else if (c == '\t') {
+ Emit("\\t");
+ } else if (c >= 32 && c <= 126) {
+ Emit(c);
+ } else if (c < 32) {
+ Emit("\\u");
+ PrintHex(static_cast<uint16_t>(c), out_);
+ } else {
+ // Inspect the leading byte to figure out how long the utf8
+ // byte sequence is; while doing this initialize |codepoint|
+ // with the first few bits.
+ // See table in: https://en.wikipedia.org/wiki/UTF-8
+ // byte one is 110x xxxx -> 2 byte utf8 sequence
+ // byte one is 1110 xxxx -> 3 byte utf8 sequence
+ // byte one is 1111 0xxx -> 4 byte utf8 sequence
+ uint32_t codepoint;
+ int num_bytes_left;
+ if ((c & 0xe0) == 0xc0) { // 2 byte utf8 sequence
+ num_bytes_left = 1;
+ codepoint = c & 0x1f;
+ } else if ((c & 0xf0) == 0xe0) { // 3 byte utf8 sequence
+ num_bytes_left = 2;
+ codepoint = c & 0x0f;
+ } else if ((c & 0xf8) == 0xf0) { // 4 byte utf8 sequence
+ codepoint = c & 0x07;
+ num_bytes_left = 3;
+ } else {
+ continue; // invalid leading byte
+ }
+
+ // If we have enough bytes in our input, decode the remaining ones
+ // belonging to this Unicode character into |codepoint|.
+ if (ii + num_bytes_left > chars.size())
+ continue;
+ while (num_bytes_left > 0) {
+ c = chars[++ii];
+ --num_bytes_left;
+ // Check the next byte is a continuation byte, that is 10xx xxxx.
+ if ((c & 0xc0) != 0x80)
+ continue;
+ codepoint = (codepoint << 6) | (c & 0x3f);
+ }
+
+ // Disallow overlong encodings for ascii characters, as these
+ // would include " and other characters significant to JSON
+ // string termination / control.
+ if (codepoint < 0x7f)
+ continue;
+ // Invalid in UTF8, and can't be represented in UTF16 anyway.
+ if (codepoint > 0x10ffff)
+ continue;
+
+ // So, now we transcode to UTF16,
+ // using the math described at https://en.wikipedia.org/wiki/UTF-16,
+ // for either one or two 16 bit characters.
+ if (codepoint < 0xffff) {
+ Emit("\\u");
+ PrintHex(static_cast<uint16_t>(codepoint), out_);
+ continue;
+ }
+ codepoint -= 0x10000;
+ // high surrogate
+ Emit("\\u");
+ PrintHex(static_cast<uint16_t>((codepoint >> 10) + 0xd800), out_);
+ // low surrogate
+ Emit("\\u");
+ PrintHex(static_cast<uint16_t>((codepoint & 0x3ff) + 0xdc00), out_);
+ }
+ }
+ Emit('"');
+ }
+
+ void HandleBinary(span<uint8_t> bytes) override {
+ if (!status_->ok())
+ return;
+ state_.top().StartElement(out_);
+ Emit('"');
+ Base64Encode(bytes, out_);
+ Emit('"');
+ }
+
+ void HandleDouble(double value) override {
+ if (!status_->ok())
+ return;
+ state_.top().StartElement(out_);
+ // JSON cannot represent NaN or Infinity. So, for compatibility,
+ // we behave like the JSON object in web browsers: emit 'null'.
+ if (!std::isfinite(value)) {
+ Emit("null");
+ return;
+ }
+ std::unique_ptr<char[]> str_value = platform_->DToStr(value);
+
+ // DToStr may fail to emit a 0 before the decimal dot. E.g. this is
+ // the case in base::NumberToString in Chromium (which is based on
+ // dmg_fp). So, much like
+ // https://cs.chromium.org/chromium/src/base/json/json_writer.cc
+ // we probe for this and emit the leading 0 anyway if necessary.
+ const char* chars = str_value.get();
+ if (chars[0] == '.') {
+ Emit('0');
+ } else if (chars[0] == '-' && chars[1] == '.') {
+ Emit("-0");
+ ++chars;
+ }
+ Emit(chars);
+ }
+
+ void HandleInt32(int32_t value) override {
+ if (!status_->ok())
+ return;
+ state_.top().StartElement(out_);
+ Emit(std::to_string(value));
+ }
+
+ void HandleBool(bool value) override {
+ if (!status_->ok())
+ return;
+ state_.top().StartElement(out_);
+ Emit(value ? "true" : "false");
+ }
+
+ void HandleNull() override {
+ if (!status_->ok())
+ return;
+ state_.top().StartElement(out_);
+ Emit("null");
+ }
+
+ void HandleError(Status error) override {
+ assert(!error.ok());
+ *status_ = error;
+ out_->clear();
+ }
+
+ private:
+ void Emit(char c) { out_->push_back(c); }
+ void Emit(const char* str) {
+ out_->insert(out_->end(), str, str + strlen(str));
+ }
+ void Emit(const std::string& str) {
+ out_->insert(out_->end(), str.begin(), str.end());
+ }
+
+ const Platform* platform_;
+ C* out_;
+ Status* status_;
+ std::stack<State> state_;
+};
+} // namespace
+
+std::unique_ptr<StreamingParserHandler> NewJSONEncoder(
+ const Platform* platform,
+ std::vector<uint8_t>* out,
+ Status* status) {
+ return std::unique_ptr<StreamingParserHandler>(
+ new JSONEncoder<std::vector<uint8_t>>(platform, out, status));
+}
+std::unique_ptr<StreamingParserHandler> NewJSONEncoder(const Platform* platform,
+ std::string* out,
+ Status* status) {
+ return std::unique_ptr<StreamingParserHandler>(
+ new JSONEncoder<std::string>(platform, out, status));
+}
+
+// =============================================================================
+// json::ParseJSON - for receiving streaming parser events for JSON.
+// =============================================================================
+
+namespace {
+const int kStackLimit = 300;
+
+enum Token {
+ ObjectBegin,
+ ObjectEnd,
+ ArrayBegin,
+ ArrayEnd,
+ StringLiteral,
+ Number,
+ BoolTrue,
+ BoolFalse,
+ NullToken,
+ ListSeparator,
+ ObjectPairSeparator,
+ InvalidToken,
+ NoInput
+};
+
+const char* const kNullString = "null";
+const char* const kTrueString = "true";
+const char* const kFalseString = "false";
+
+template <typename Char>
+class JsonParser {
+ public:
+ JsonParser(const Platform* platform, StreamingParserHandler* handler)
+ : platform_(platform), handler_(handler) {}
+
+ void Parse(const Char* start, size_t length) {
+ start_pos_ = start;
+ const Char* end = start + length;
+ const Char* tokenEnd = nullptr;
+ ParseValue(start, end, &tokenEnd, 0);
+ if (error_)
+ return;
+ if (tokenEnd != end) {
+ HandleError(Error::JSON_PARSER_UNPROCESSED_INPUT_REMAINS, tokenEnd);
+ }
+ }
+
+ private:
+ bool CharsToDouble(const uint16_t* chars, size_t length, double* result) {
+ std::string buffer;
+ buffer.reserve(length + 1);
+ for (size_t ii = 0; ii < length; ++ii) {
+ bool is_ascii = !(chars[ii] & ~0x7F);
+ if (!is_ascii)
+ return false;
+ buffer.push_back(static_cast<char>(chars[ii]));
+ }
+ return platform_->StrToD(buffer.c_str(), result);
+ }
+
+ bool CharsToDouble(const uint8_t* chars, size_t length, double* result) {
+ std::string buffer(reinterpret_cast<const char*>(chars), length);
+ return platform_->StrToD(buffer.c_str(), result);
+ }
+
+ static bool ParseConstToken(const Char* start,
+ const Char* end,
+ const Char** token_end,
+ const char* token) {
+ // |token| is \0 terminated, it's one of the constants at top of the file.
+ while (start < end && *token != '\0' && *start++ == *token++) {
+ }
+ if (*token != '\0')
+ return false;
+ *token_end = start;
+ return true;
+ }
+
+ static bool ReadInt(const Char* start,
+ const Char* end,
+ const Char** token_end,
+ bool allow_leading_zeros) {
+ if (start == end)
+ return false;
+ bool has_leading_zero = '0' == *start;
+ int length = 0;
+ while (start < end && '0' <= *start && *start <= '9') {
+ ++start;
+ ++length;
+ }
+ if (!length)
+ return false;
+ if (!allow_leading_zeros && length > 1 && has_leading_zero)
+ return false;
+ *token_end = start;
+ return true;
+ }
+
+ static bool ParseNumberToken(const Char* start,
+ const Char* end,
+ const Char** token_end) {
+ // We just grab the number here. We validate the size in DecodeNumber.
+ // According to RFC4627, a valid number is: [minus] int [frac] [exp]
+ if (start == end)
+ return false;
+ Char c = *start;
+ if ('-' == c)
+ ++start;
+
+ if (!ReadInt(start, end, &start, /*allow_leading_zeros=*/false))
+ return false;
+ if (start == end) {
+ *token_end = start;
+ return true;
+ }
+
+ // Optional fraction part
+ c = *start;
+ if ('.' == c) {
+ ++start;
+ if (!ReadInt(start, end, &start, /*allow_leading_zeros=*/true))
+ return false;
+ if (start == end) {
+ *token_end = start;
+ return true;
+ }
+ c = *start;
+ }
+
+ // Optional exponent part
+ if ('e' == c || 'E' == c) {
+ ++start;
+ if (start == end)
+ return false;
+ c = *start;
+ if ('-' == c || '+' == c) {
+ ++start;
+ if (start == end)
+ return false;
+ }
+ if (!ReadInt(start, end, &start, /*allow_leading_zeros=*/true))
+ return false;
+ }
+
+ *token_end = start;
+ return true;
+ }
+
+ static bool ReadHexDigits(const Char* start,
+ const Char* end,
+ const Char** token_end,
+ int digits) {
+ if (end - start < digits)
+ return false;
+ for (int i = 0; i < digits; ++i) {
+ Char c = *start++;
+ if (!(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') ||
+ ('A' <= c && c <= 'F')))
+ return false;
+ }
+ *token_end = start;
+ return true;
+ }
+
+ static bool ParseStringToken(const Char* start,
+ const Char* end,
+ const Char** token_end) {
+ while (start < end) {
+ Char c = *start++;
+ if ('\\' == c) {
+ if (start == end)
+ return false;
+ c = *start++;
+ // Make sure the escaped char is valid.
+ switch (c) {
+ case 'x':
+ if (!ReadHexDigits(start, end, &start, 2))
+ return false;
+ break;
+ case 'u':
+ if (!ReadHexDigits(start, end, &start, 4))
+ return false;
+ break;
+ case '\\':
+ case '/':
+ case 'b':
+ case 'f':
+ case 'n':
+ case 'r':
+ case 't':
+ case 'v':
+ case '"':
+ break;
+ default:
+ return false;
+ }
+ } else if ('"' == c) {
+ *token_end = start;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ static bool SkipComment(const Char* start,
+ const Char* end,
+ const Char** comment_end) {
+ if (start == end)
+ return false;
+
+ if (*start != '/' || start + 1 >= end)
+ return false;
+ ++start;
+
+ if (*start == '/') {
+ // Single line comment, read to newline.
+ for (++start; start < end; ++start) {
+ if (*start == '\n' || *start == '\r') {
+ *comment_end = start + 1;
+ return true;
+ }
+ }
+ *comment_end = end;
+ // Comment reaches end-of-input, which is fine.
+ return true;
+ }
+
+ if (*start == '*') {
+ Char previous = '\0';
+ // Block comment, read until end marker.
+ for (++start; start < end; previous = *start++) {
+ if (previous == '*' && *start == '/') {
+ *comment_end = start + 1;
+ return true;
+ }
+ }
+ // Block comment must close before end-of-input.
+ return false;
+ }
+
+ return false;
+ }
+
+ static bool IsSpaceOrNewLine(Char c) {
+ // \v = vertial tab; \f = form feed page break.
+ return c == ' ' || c == '\n' || c == '\v' || c == '\f' || c == '\r' ||
+ c == '\t';
+ }
+
+ static void SkipWhitespaceAndComments(const Char* start,
+ const Char* end,
+ const Char** whitespace_end) {
+ while (start < end) {
+ if (IsSpaceOrNewLine(*start)) {
+ ++start;
+ } else if (*start == '/') {
+ const Char* comment_end = nullptr;
+ if (!SkipComment(start, end, &comment_end))
+ break;
+ start = comment_end;
+ } else {
+ break;
+ }
+ }
+ *whitespace_end = start;
+ }
+
+ static Token ParseToken(const Char* start,
+ const Char* end,
+ const Char** tokenStart,
+ const Char** token_end) {
+ SkipWhitespaceAndComments(start, end, tokenStart);
+ start = *tokenStart;
+
+ if (start == end)
+ return NoInput;
+
+ switch (*start) {
+ case 'n':
+ if (ParseConstToken(start, end, token_end, kNullString))
+ return NullToken;
+ break;
+ case 't':
+ if (ParseConstToken(start, end, token_end, kTrueString))
+ return BoolTrue;
+ break;
+ case 'f':
+ if (ParseConstToken(start, end, token_end, kFalseString))
+ return BoolFalse;
+ break;
+ case '[':
+ *token_end = start + 1;
+ return ArrayBegin;
+ case ']':
+ *token_end = start + 1;
+ return ArrayEnd;
+ case ',':
+ *token_end = start + 1;
+ return ListSeparator;
+ case '{':
+ *token_end = start + 1;
+ return ObjectBegin;
+ case '}':
+ *token_end = start + 1;
+ return ObjectEnd;
+ case ':':
+ *token_end = start + 1;
+ return ObjectPairSeparator;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '-':
+ if (ParseNumberToken(start, end, token_end))
+ return Number;
+ break;
+ case '"':
+ if (ParseStringToken(start + 1, end, token_end))
+ return StringLiteral;
+ break;
+ }
+ return InvalidToken;
+ }
+
+ static int HexToInt(Char c) {
+ if ('0' <= c && c <= '9')
+ return c - '0';
+ if ('A' <= c && c <= 'F')
+ return c - 'A' + 10;
+ if ('a' <= c && c <= 'f')
+ return c - 'a' + 10;
+ assert(false); // Unreachable.
+ return 0;
+ }
+
+ static bool DecodeString(const Char* start,
+ const Char* end,
+ std::vector<uint16_t>* output) {
+ if (start == end)
+ return true;
+ if (start > end)
+ return false;
+ output->reserve(end - start);
+ while (start < end) {
+ uint16_t c = *start++;
+ // If the |Char| we're dealing with is really a byte, then
+ // we have utf8 here, and we need to check for multibyte characters
+ // and transcode them to utf16 (either one or two utf16 chars).
+ if (sizeof(Char) == sizeof(uint8_t) && c > 0x7f) {
+ // Inspect the leading byte to figure out how long the utf8
+ // byte sequence is; while doing this initialize |codepoint|
+ // with the first few bits.
+ // See table in: https://en.wikipedia.org/wiki/UTF-8
+ // byte one is 110x xxxx -> 2 byte utf8 sequence
+ // byte one is 1110 xxxx -> 3 byte utf8 sequence
+ // byte one is 1111 0xxx -> 4 byte utf8 sequence
+ uint32_t codepoint;
+ int num_bytes_left;
+ if ((c & 0xe0) == 0xc0) { // 2 byte utf8 sequence
+ num_bytes_left = 1;
+ codepoint = c & 0x1f;
+ } else if ((c & 0xf0) == 0xe0) { // 3 byte utf8 sequence
+ num_bytes_left = 2;
+ codepoint = c & 0x0f;
+ } else if ((c & 0xf8) == 0xf0) { // 4 byte utf8 sequence
+ codepoint = c & 0x07;
+ num_bytes_left = 3;
+ } else {
+ return false; // invalid leading byte
+ }
+
+ // If we have enough bytes in our inpput, decode the remaining ones
+ // belonging to this Unicode character into |codepoint|.
+ if (start + num_bytes_left > end)
+ return false;
+ while (num_bytes_left > 0) {
+ c = *start++;
+ --num_bytes_left;
+ // Check the next byte is a continuation byte, that is 10xx xxxx.
+ if ((c & 0xc0) != 0x80)
+ return false;
+ codepoint = (codepoint << 6) | (c & 0x3f);
+ }
+
+ // Disallow overlong encodings for ascii characters, as these
+ // would include " and other characters significant to JSON
+ // string termination / control.
+ if (codepoint <= 0x7f)
+ return false;
+ // Invalid in UTF8, and can't be represented in UTF16 anyway.
+ if (codepoint > 0x10ffff)
+ return false;
+
+ // So, now we transcode to UTF16,
+ // using the math described at https://en.wikipedia.org/wiki/UTF-16,
+ // for either one or two 16 bit characters.
+ if (codepoint < 0xffff) {
+ output->push_back(codepoint);
+ continue;
+ }
+ codepoint -= 0x10000;
+ output->push_back((codepoint >> 10) + 0xd800); // high surrogate
+ output->push_back((codepoint & 0x3ff) + 0xdc00); // low surrogate
+ continue;
+ }
+ if ('\\' != c) {
+ output->push_back(c);
+ continue;
+ }
+ if (start == end)
+ return false;
+ c = *start++;
+
+ if (c == 'x') {
+ // \x is not supported.
+ return false;
+ }
+
+ switch (c) {
+ case '"':
+ case '/':
+ case '\\':
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'f':
+ c = '\f';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ case 'r':
+ c = '\r';
+ break;
+ case 't':
+ c = '\t';
+ break;
+ case 'v':
+ c = '\v';
+ break;
+ case 'u':
+ c = (HexToInt(*start) << 12) + (HexToInt(*(start + 1)) << 8) +
+ (HexToInt(*(start + 2)) << 4) + HexToInt(*(start + 3));
+ start += 4;
+ break;
+ default:
+ return false;
+ }
+ output->push_back(c);
+ }
+ return true;
+ }
+
+ void ParseValue(const Char* start,
+ const Char* end,
+ const Char** value_token_end,
+ int depth) {
+ if (depth > kStackLimit) {
+ HandleError(Error::JSON_PARSER_STACK_LIMIT_EXCEEDED, start);
+ return;
+ }
+ const Char* token_start = nullptr;
+ const Char* token_end = nullptr;
+ Token token = ParseToken(start, end, &token_start, &token_end);
+ switch (token) {
+ case NoInput:
+ HandleError(Error::JSON_PARSER_NO_INPUT, token_start);
+ return;
+ case InvalidToken:
+ HandleError(Error::JSON_PARSER_INVALID_TOKEN, token_start);
+ return;
+ case NullToken:
+ handler_->HandleNull();
+ break;
+ case BoolTrue:
+ handler_->HandleBool(true);
+ break;
+ case BoolFalse:
+ handler_->HandleBool(false);
+ break;
+ case Number: {
+ double value;
+ if (!CharsToDouble(token_start, token_end - token_start, &value)) {
+ HandleError(Error::JSON_PARSER_INVALID_NUMBER, token_start);
+ return;
+ }
+ if (value >= std::numeric_limits<int32_t>::min() &&
+ value <= std::numeric_limits<int32_t>::max() &&
+ static_cast<int32_t>(value) == value)
+ handler_->HandleInt32(static_cast<int32_t>(value));
+ else
+ handler_->HandleDouble(value);
+ break;
+ }
+ case StringLiteral: {
+ std::vector<uint16_t> value;
+ bool ok = DecodeString(token_start + 1, token_end - 1, &value);
+ if (!ok) {
+ HandleError(Error::JSON_PARSER_INVALID_STRING, token_start);
+ return;
+ }
+ handler_->HandleString16(span<uint16_t>(value.data(), value.size()));
+ break;
+ }
+ case ArrayBegin: {
+ handler_->HandleArrayBegin();
+ start = token_end;
+ token = ParseToken(start, end, &token_start, &token_end);
+ while (token != ArrayEnd) {
+ ParseValue(start, end, &token_end, depth + 1);
+ if (error_)
+ return;
+
+ // After a list value, we expect a comma or the end of the list.
+ start = token_end;
+ token = ParseToken(start, end, &token_start, &token_end);
+ if (token == ListSeparator) {
+ start = token_end;
+ token = ParseToken(start, end, &token_start, &token_end);
+ if (token == ArrayEnd) {
+ HandleError(Error::JSON_PARSER_UNEXPECTED_ARRAY_END, token_start);
+ return;
+ }
+ } else if (token != ArrayEnd) {
+ // Unexpected value after list value. Bail out.
+ HandleError(Error::JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED,
+ token_start);
+ return;
+ }
+ }
+ handler_->HandleArrayEnd();
+ break;
+ }
+ case ObjectBegin: {
+ handler_->HandleMapBegin();
+ start = token_end;
+ token = ParseToken(start, end, &token_start, &token_end);
+ while (token != ObjectEnd) {
+ if (token != StringLiteral) {
+ HandleError(Error::JSON_PARSER_STRING_LITERAL_EXPECTED,
+ token_start);
+ return;
+ }
+ std::vector<uint16_t> key;
+ if (!DecodeString(token_start + 1, token_end - 1, &key)) {
+ HandleError(Error::JSON_PARSER_INVALID_STRING, token_start);
+ return;
+ }
+ handler_->HandleString16(span<uint16_t>(key.data(), key.size()));
+ start = token_end;
+
+ token = ParseToken(start, end, &token_start, &token_end);
+ if (token != ObjectPairSeparator) {
+ HandleError(Error::JSON_PARSER_COLON_EXPECTED, token_start);
+ return;
+ }
+ start = token_end;
+
+ ParseValue(start, end, &token_end, depth + 1);
+ if (error_)
+ return;
+ start = token_end;
+
+ // After a key/value pair, we expect a comma or the end of the
+ // object.
+ token = ParseToken(start, end, &token_start, &token_end);
+ if (token == ListSeparator) {
+ start = token_end;
+ token = ParseToken(start, end, &token_start, &token_end);
+ if (token == ObjectEnd) {
+ HandleError(Error::JSON_PARSER_UNEXPECTED_MAP_END, token_start);
+ return;
+ }
+ } else if (token != ObjectEnd) {
+ // Unexpected value after last object value. Bail out.
+ HandleError(Error::JSON_PARSER_COMMA_OR_MAP_END_EXPECTED,
+ token_start);
+ return;
+ }
+ }
+ handler_->HandleMapEnd();
+ break;
+ }
+
+ default:
+ // We got a token that's not a value.
+ HandleError(Error::JSON_PARSER_VALUE_EXPECTED, token_start);
+ return;
+ }
+
+ SkipWhitespaceAndComments(token_end, end, value_token_end);
+ }
+
+ void HandleError(Error error, const Char* pos) {
+ assert(error != Error::OK);
+ if (!error_) {
+ handler_->HandleError(
+ Status{error, static_cast<size_t>(pos - start_pos_)});
+ error_ = true;
+ }
+ }
+
+ const Char* start_pos_ = nullptr;
+ bool error_ = false;
+ const Platform* platform_;
+ StreamingParserHandler* handler_;
+};
+} // namespace
+
+void ParseJSON(const Platform& platform,
+ span<uint8_t> chars,
+ StreamingParserHandler* handler) {
+ JsonParser<uint8_t> parser(&platform, handler);
+ parser.Parse(chars.data(), chars.size());
+}
+
+void ParseJSON(const Platform& platform,
+ span<uint16_t> chars,
+ StreamingParserHandler* handler) {
+ JsonParser<uint16_t> parser(&platform, handler);
+ parser.Parse(chars.data(), chars.size());
+}
+
+// =============================================================================
+// json::ConvertCBORToJSON, json::ConvertJSONToCBOR - for transcoding
+// =============================================================================
+template <typename C>
+Status ConvertCBORToJSONTmpl(const Platform& platform,
+ span<uint8_t> cbor,
+ C* json) {
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&platform, json, &status);
+ cbor::ParseCBOR(cbor, json_writer.get());
+ return status;
+}
+
+Status ConvertCBORToJSON(const Platform& platform,
+ span<uint8_t> cbor,
+ std::vector<uint8_t>* json) {
+ return ConvertCBORToJSONTmpl(platform, cbor, json);
+}
+Status ConvertCBORToJSON(const Platform& platform,
+ span<uint8_t> cbor,
+ std::string* json) {
+ return ConvertCBORToJSONTmpl(platform, cbor, json);
+}
+
+template <typename T, typename C>
+Status ConvertJSONToCBORTmpl(const Platform& platform, span<T> json, C* cbor) {
+ Status status;
+ std::unique_ptr<StreamingParserHandler> encoder =
+ cbor::NewCBOREncoder(cbor, &status);
+ ParseJSON(platform, json, encoder.get());
+ return status;
+}
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint8_t> json,
+ std::string* cbor) {
+ return ConvertJSONToCBORTmpl(platform, json, cbor);
+}
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint16_t> json,
+ std::string* cbor) {
+ return ConvertJSONToCBORTmpl(platform, json, cbor);
+}
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint8_t> json,
+ std::vector<uint8_t>* cbor) {
+ return ConvertJSONToCBORTmpl(platform, json, cbor);
+}
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint16_t> json,
+ std::vector<uint8_t>* cbor) {
+ return ConvertJSONToCBORTmpl(platform, json, cbor);
+}
+} // namespace json
+} // namespace v8_inspector_protocol_encoding
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding.h b/deps/v8/third_party/inspector_protocol/encoding/encoding.h
new file mode 100644
index 0000000000..90916d42b3
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding.h
@@ -0,0 +1,510 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_PROTOCOL_ENCODING_ENCODING_H_
+#define V8_INSPECTOR_PROTOCOL_ENCODING_ENCODING_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <limits>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace v8_inspector_protocol_encoding {
+
+// =============================================================================
+// span - sequence of bytes
+// =============================================================================
+
+// This template is similar to std::span, which will be included in C++20.
+template <typename T>
+class span {
+ public:
+ using index_type = size_t;
+
+ span() : data_(nullptr), size_(0) {}
+ span(const T* data, index_type size) : data_(data), size_(size) {}
+
+ const T* data() const { return data_; }
+
+ const T* begin() const { return data_; }
+ const T* end() const { return data_ + size_; }
+
+ const T& operator[](index_type idx) const { return data_[idx]; }
+
+ span<T> subspan(index_type offset, index_type count) const {
+ return span(data_ + offset, count);
+ }
+
+ span<T> subspan(index_type offset) const {
+ return span(data_ + offset, size_ - offset);
+ }
+
+ bool empty() const { return size_ == 0; }
+
+ index_type size() const { return size_; }
+ index_type size_bytes() const { return size_ * sizeof(T); }
+
+ private:
+ const T* data_;
+ index_type size_;
+};
+
+template <typename T>
+span<T> SpanFrom(const std::vector<T>& v) {
+ return span<T>(v.data(), v.size());
+}
+
+template <size_t N>
+span<uint8_t> SpanFrom(const char (&str)[N]) {
+ return span<uint8_t>(reinterpret_cast<const uint8_t*>(str), N - 1);
+}
+
+inline span<uint8_t> SpanFrom(const char* str) {
+ return str ? span<uint8_t>(reinterpret_cast<const uint8_t*>(str), strlen(str))
+ : span<uint8_t>();
+}
+
+inline span<uint8_t> SpanFrom(const std::string& v) {
+ return span<uint8_t>(reinterpret_cast<const uint8_t*>(v.data()), v.size());
+}
+
+// =============================================================================
+// Status and Error codes
+// =============================================================================
+enum class Error {
+ OK = 0,
+ // JSON parsing errors - json_parser.{h,cc}.
+ JSON_PARSER_UNPROCESSED_INPUT_REMAINS = 0x01,
+ JSON_PARSER_STACK_LIMIT_EXCEEDED = 0x02,
+ JSON_PARSER_NO_INPUT = 0x03,
+ JSON_PARSER_INVALID_TOKEN = 0x04,
+ JSON_PARSER_INVALID_NUMBER = 0x05,
+ JSON_PARSER_INVALID_STRING = 0x06,
+ JSON_PARSER_UNEXPECTED_ARRAY_END = 0x07,
+ JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED = 0x08,
+ JSON_PARSER_STRING_LITERAL_EXPECTED = 0x09,
+ JSON_PARSER_COLON_EXPECTED = 0x0a,
+ JSON_PARSER_UNEXPECTED_MAP_END = 0x0b,
+ JSON_PARSER_COMMA_OR_MAP_END_EXPECTED = 0x0c,
+ JSON_PARSER_VALUE_EXPECTED = 0x0d,
+
+ CBOR_INVALID_INT32 = 0x0e,
+ CBOR_INVALID_DOUBLE = 0x0f,
+ CBOR_INVALID_ENVELOPE = 0x10,
+ CBOR_INVALID_STRING8 = 0x11,
+ CBOR_INVALID_STRING16 = 0x12,
+ CBOR_INVALID_BINARY = 0x13,
+ CBOR_UNSUPPORTED_VALUE = 0x14,
+ CBOR_NO_INPUT = 0x15,
+ CBOR_INVALID_START_BYTE = 0x16,
+ CBOR_UNEXPECTED_EOF_EXPECTED_VALUE = 0x17,
+ CBOR_UNEXPECTED_EOF_IN_ARRAY = 0x18,
+ CBOR_UNEXPECTED_EOF_IN_MAP = 0x19,
+ CBOR_INVALID_MAP_KEY = 0x1a,
+ CBOR_STACK_LIMIT_EXCEEDED = 0x1b,
+ CBOR_TRAILING_JUNK = 0x1c,
+ CBOR_MAP_START_EXPECTED = 0x1d,
+ CBOR_MAP_STOP_EXPECTED = 0x1e,
+ CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x1f,
+};
+
+// A status value with position that can be copied. The default status
+// is OK. Usually, error status values should come with a valid position.
+struct Status {
+ static constexpr size_t npos() { return std::numeric_limits<size_t>::max(); }
+
+ bool ok() const { return error == Error::OK; }
+
+ Error error = Error::OK;
+ size_t pos = npos();
+ Status(Error error, size_t pos) : error(error), pos(pos) {}
+ Status() = default;
+
+ // Returns a 7 bit US-ASCII string, either "OK" or an error message
+ // that includes the position.
+ std::string ToASCIIString() const;
+
+ private:
+ std::string ToASCIIString(const char* msg) const;
+};
+
+// Handler interface for parser events emitted by a streaming parser.
+// See cbor::NewCBOREncoder, cbor::ParseCBOR, json::NewJSONEncoder,
+// json::ParseJSON.
+class StreamingParserHandler {
+ public:
+ virtual ~StreamingParserHandler() = default;
+ virtual void HandleMapBegin() = 0;
+ virtual void HandleMapEnd() = 0;
+ virtual void HandleArrayBegin() = 0;
+ virtual void HandleArrayEnd() = 0;
+ virtual void HandleString8(span<uint8_t> chars) = 0;
+ virtual void HandleString16(span<uint16_t> chars) = 0;
+ virtual void HandleBinary(span<uint8_t> bytes) = 0;
+ virtual void HandleDouble(double value) = 0;
+ virtual void HandleInt32(int32_t value) = 0;
+ virtual void HandleBool(bool value) = 0;
+ virtual void HandleNull() = 0;
+
+ // The parser may send one error even after other events have already
+ // been received. Client code is reponsible to then discard the
+ // already processed events.
+ // |error| must be an eror, as in, |error.is_ok()| can't be true.
+ virtual void HandleError(Status error) = 0;
+};
+
+namespace cbor {
+// The binary encoding for the inspector protocol follows the CBOR specification
+// (RFC 7049). Additional constraints:
+// - Only indefinite length maps and arrays are supported.
+// - Maps and arrays are wrapped with an envelope, that is, a
+// CBOR tag with value 24 followed by a byte string specifying
+// the byte length of the enclosed map / array. The byte string
+// must use a 32 bit wide length.
+// - At the top level, a message must be an indefinite length map
+// wrapped by an envelope.
+// - Maximal size for messages is 2^32 (4 GB).
+// - For scalars, we support only the int32_t range, encoded as
+// UNSIGNED/NEGATIVE (major types 0 / 1).
+// - UTF16 strings, including with unbalanced surrogate pairs, are encoded
+// as CBOR BYTE_STRING (major type 2). For such strings, the number of
+// bytes encoded must be even.
+// - UTF8 strings (major type 3) are supported.
+// - 7 bit US-ASCII strings must always be encoded as UTF8 strings, never
+// as UTF16 strings.
+// - Arbitrary byte arrays, in the inspector protocol called 'binary',
+// are encoded as BYTE_STRING (major type 2), prefixed with a byte
+// indicating base64 when rendered as JSON.
+
+// =============================================================================
+// Detecting CBOR content
+// =============================================================================
+
+// The first byte for an envelope, which we use for wrapping dictionaries
+// and arrays; and the byte that indicates a byte string with 32 bit length.
+// These two bytes start an envelope, and thereby also any CBOR message
+// produced or consumed by this protocol. See also |EnvelopeEncoder| below.
+uint8_t InitialByteForEnvelope();
+uint8_t InitialByteFor32BitLengthByteString();
+
+// Checks whether |msg| is a cbor message.
+bool IsCBORMessage(span<uint8_t> msg);
+
+// =============================================================================
+// Encoding individual CBOR items
+// =============================================================================
+
+// Some constants for CBOR tokens that only take a single byte on the wire.
+uint8_t EncodeTrue();
+uint8_t EncodeFalse();
+uint8_t EncodeNull();
+uint8_t EncodeIndefiniteLengthArrayStart();
+uint8_t EncodeIndefiniteLengthMapStart();
+uint8_t EncodeStop();
+
+// Encodes |value| as |UNSIGNED| (major type 0) iff >= 0, or |NEGATIVE|
+// (major type 1) iff < 0.
+void EncodeInt32(int32_t value, std::vector<uint8_t>* out);
+void EncodeInt32(int32_t value, std::string* out);
+
+// Encodes a UTF16 string as a BYTE_STRING (major type 2). Each utf16
+// character in |in| is emitted with most significant byte first,
+// appending to |out|.
+void EncodeString16(span<uint16_t> in, std::vector<uint8_t>* out);
+void EncodeString16(span<uint16_t> in, std::string* out);
+
+// Encodes a UTF8 string |in| as STRING (major type 3).
+void EncodeString8(span<uint8_t> in, std::vector<uint8_t>* out);
+void EncodeString8(span<uint8_t> in, std::string* out);
+
+// Encodes the given |latin1| string as STRING8.
+// If any non-ASCII character is present, it will be represented
+// as a 2 byte UTF8 sequence.
+void EncodeFromLatin1(span<uint8_t> latin1, std::vector<uint8_t>* out);
+void EncodeFromLatin1(span<uint8_t> latin1, std::string* out);
+
+// Encodes the given |utf16| string as STRING8 if it's entirely US-ASCII.
+// Otherwise, encodes as STRING16.
+void EncodeFromUTF16(span<uint16_t> utf16, std::vector<uint8_t>* out);
+void EncodeFromUTF16(span<uint16_t> utf16, std::string* out);
+
+// Encodes arbitrary binary data in |in| as a BYTE_STRING (major type 2) with
+// definitive length, prefixed with tag 22 indicating expected conversion to
+// base64 (see RFC 7049, Table 3 and Section 2.4.4.2).
+void EncodeBinary(span<uint8_t> in, std::vector<uint8_t>* out);
+void EncodeBinary(span<uint8_t> in, std::string* out);
+
+// Encodes / decodes a double as Major type 7 (SIMPLE_VALUE),
+// with additional info = 27, followed by 8 bytes in big endian.
+void EncodeDouble(double value, std::vector<uint8_t>* out);
+void EncodeDouble(double value, std::string* out);
+
+// =============================================================================
+// cbor::EnvelopeEncoder - for wrapping submessages
+// =============================================================================
+
+// An envelope indicates the byte length of a wrapped item.
+// We use this for maps and array, which allows the decoder
+// to skip such (nested) values whole sale.
+// It's implemented as a CBOR tag (major type 6) with additional
+// info = 24, followed by a byte string with a 32 bit length value;
+// so the maximal structure that we can wrap is 2^32 bits long.
+// See also: https://tools.ietf.org/html/rfc7049#section-2.4.4.1
+class EnvelopeEncoder {
+ public:
+ // Emits the envelope start bytes and records the position for the
+ // byte size in |byte_size_pos_|. Also emits empty bytes for the
+ // byte sisze so that encoding can continue.
+ void EncodeStart(std::vector<uint8_t>* out);
+ void EncodeStart(std::string* out);
+ // This records the current size in |out| at position byte_size_pos_.
+ // Returns true iff successful.
+ bool EncodeStop(std::vector<uint8_t>* out);
+ bool EncodeStop(std::string* out);
+
+ private:
+ size_t byte_size_pos_ = 0;
+};
+
+// =============================================================================
+// cbor::NewCBOREncoder - for encoding from a streaming parser
+// =============================================================================
+
+// This can be used to convert to CBOR, by passing the return value to a parser
+// that drives it. The handler will encode into |out|, and iff an error occurs
+// it will set |status| to an error and clear |out|. Otherwise, |status.ok()|
+// will be |true|.
+std::unique_ptr<StreamingParserHandler> NewCBOREncoder(
+ std::vector<uint8_t>* out,
+ Status* status);
+std::unique_ptr<StreamingParserHandler> NewCBOREncoder(std::string* out,
+ Status* status);
+
+// =============================================================================
+// cbor::CBORTokenizer - for parsing individual CBOR items
+// =============================================================================
+
+// Tags for the tokens within a CBOR message that CBORTokenizer understands.
+// Note that this is not the same terminology as the CBOR spec (RFC 7049),
+// but rather, our adaptation. For instance, we lump unsigned and signed
+// major type into INT32 here (and disallow values outside the int32_t range).
+enum class CBORTokenTag {
+ // Encountered an error in the structure of the message. Consult
+ // status() for details.
+ ERROR_VALUE,
+ // Booleans and NULL.
+ TRUE_VALUE,
+ FALSE_VALUE,
+ NULL_VALUE,
+ // An int32_t (signed 32 bit integer).
+ INT32,
+ // A double (64 bit floating point).
+ DOUBLE,
+ // A UTF8 string.
+ STRING8,
+ // A UTF16 string.
+ STRING16,
+ // A binary string.
+ BINARY,
+ // Starts an indefinite length map; after the map start we expect
+ // alternating keys and values, followed by STOP.
+ MAP_START,
+ // Starts an indefinite length array; after the array start we
+ // expect values, followed by STOP.
+ ARRAY_START,
+ // Ends a map or an array.
+ STOP,
+ // An envelope indicator, wrapping a map or array.
+ // Internally this carries the byte length of the wrapped
+ // map or array. While CBORTokenizer::Next() will read / skip the entire
+ // envelope, CBORTokenizer::EnterEnvelope() reads the tokens
+ // inside of it.
+ ENVELOPE,
+ // We've reached the end there is nothing else to read.
+ DONE,
+};
+
+// The major types from RFC 7049 Section 2.1.
+enum class MajorType {
+ UNSIGNED = 0,
+ NEGATIVE = 1,
+ BYTE_STRING = 2,
+ STRING = 3,
+ ARRAY = 4,
+ MAP = 5,
+ TAG = 6,
+ SIMPLE_VALUE = 7
+};
+
+// CBORTokenizer segments a CBOR message, presenting the tokens therein as
+// numbers, strings, etc. This is not a complete CBOR parser, but makes it much
+// easier to implement one (e.g. ParseCBOR, above). It can also be used to parse
+// messages partially.
+class CBORTokenizer {
+ public:
+ explicit CBORTokenizer(span<uint8_t> bytes);
+ ~CBORTokenizer();
+
+ // Identifies the current token that we're looking at,
+ // or ERROR_VALUE (in which ase ::Status() has details)
+ // or DONE (if we're past the last token).
+ CBORTokenTag TokenTag() const;
+
+ // Advances to the next token.
+ void Next();
+ // Can only be called if TokenTag() == CBORTokenTag::ENVELOPE.
+ // While Next() would skip past the entire envelope / what it's
+ // wrapping, EnterEnvelope positions the cursor inside of the envelope,
+ // letting the client explore the nested structure.
+ void EnterEnvelope();
+
+ // If TokenTag() is CBORTokenTag::ERROR_VALUE, then Status().error describes
+ // the error more precisely; otherwise it'll be set to Error::OK.
+ // In either case, Status().pos is the current position.
+ struct Status Status() const;
+
+ // The following methods retrieve the token values. They can only
+ // be called if TokenTag() matches.
+
+ // To be called only if ::TokenTag() == CBORTokenTag::INT32.
+ int32_t GetInt32() const;
+
+ // To be called only if ::TokenTag() == CBORTokenTag::DOUBLE.
+ double GetDouble() const;
+
+ // To be called only if ::TokenTag() == CBORTokenTag::STRING8.
+ span<uint8_t> GetString8() const;
+
+ // Wire representation for STRING16 is low byte first (little endian).
+ // To be called only if ::TokenTag() == CBORTokenTag::STRING16.
+ span<uint8_t> GetString16WireRep() const;
+
+ // To be called only if ::TokenTag() == CBORTokenTag::BINARY.
+ span<uint8_t> GetBinary() const;
+
+ // To be called only if ::TokenTag() == CBORTokenTag::ENVELOPE.
+ span<uint8_t> GetEnvelopeContents() const;
+
+ private:
+ void ReadNextToken(bool enter_envelope);
+ void SetToken(CBORTokenTag token, size_t token_byte_length);
+ void SetError(Error error);
+
+ span<uint8_t> bytes_;
+ CBORTokenTag token_tag_;
+ struct Status status_;
+ size_t token_byte_length_;
+ MajorType token_start_type_;
+ uint64_t token_start_internal_value_;
+};
+
+// =============================================================================
+// cbor::ParseCBOR - for receiving streaming parser events for CBOR messages
+// =============================================================================
+
+// Parses a CBOR encoded message from |bytes|, sending events to
+// |out|. If an error occurs, sends |out->HandleError|, and parsing stops.
+// The client is responsible for discarding the already received information in
+// that case.
+void ParseCBOR(span<uint8_t> bytes, StreamingParserHandler* out);
+
+// =============================================================================
+// cbor::AppendString8EntryToMap - for limited in-place editing of messages
+// =============================================================================
+
+// Modifies the |cbor| message by appending a new key/value entry at the end
+// of the map. Patches up the envelope size; Status.ok() iff successful.
+// If not successful, |cbor| may be corrupted after this call.
+Status AppendString8EntryToCBORMap(span<uint8_t> string8_key,
+ span<uint8_t> string8_value,
+ std::vector<uint8_t>* cbor);
+Status AppendString8EntryToCBORMap(span<uint8_t> string8_key,
+ span<uint8_t> string8_value,
+ std::string* cbor);
+
+namespace internals { // Exposed only for writing tests.
+int8_t ReadTokenStart(span<uint8_t> bytes,
+ cbor::MajorType* type,
+ uint64_t* value);
+
+void WriteTokenStart(cbor::MajorType type,
+ uint64_t value,
+ std::vector<uint8_t>* encoded);
+void WriteTokenStart(cbor::MajorType type,
+ uint64_t value,
+ std::string* encoded);
+} // namespace internals
+} // namespace cbor
+
+namespace json {
+// Client code must provide an instance. Implementation should delegate
+// to whatever is appropriate.
+class Platform {
+ public:
+ virtual ~Platform() = default;
+ // Parses |str| into |result|. Returns false iff there are
+ // leftover characters or parsing errors.
+ virtual bool StrToD(const char* str, double* result) const = 0;
+
+ // Prints |value| in a format suitable for JSON.
+ virtual std::unique_ptr<char[]> DToStr(double value) const = 0;
+};
+
+// =============================================================================
+// json::NewJSONEncoder - for encoding streaming parser events as JSON
+// =============================================================================
+
+// Returns a handler object which will write ascii characters to |out|.
+// |status->ok()| will be false iff the handler routine HandleError() is called.
+// In that case, we'll stop emitting output.
+// Except for calling the HandleError routine at any time, the client
+// code must call the Handle* methods in an order in which they'd occur
+// in valid JSON; otherwise we may crash (the code uses assert).
+std::unique_ptr<StreamingParserHandler> NewJSONEncoder(
+ const Platform* platform,
+ std::vector<uint8_t>* out,
+ Status* status);
+std::unique_ptr<StreamingParserHandler> NewJSONEncoder(const Platform* platform,
+ std::string* out,
+ Status* status);
+
+// =============================================================================
+// json::ParseJSON - for receiving streaming parser events for JSON
+// =============================================================================
+
+void ParseJSON(const Platform& platform,
+ span<uint8_t> chars,
+ StreamingParserHandler* handler);
+void ParseJSON(const Platform& platform,
+ span<uint16_t> chars,
+ StreamingParserHandler* handler);
+
+// =============================================================================
+// json::ConvertCBORToJSON, json::ConvertJSONToCBOR - for transcoding
+// =============================================================================
+Status ConvertCBORToJSON(const Platform& platform,
+ span<uint8_t> cbor,
+ std::string* json);
+Status ConvertCBORToJSON(const Platform& platform,
+ span<uint8_t> cbor,
+ std::vector<uint8_t>* json);
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint8_t> json,
+ std::vector<uint8_t>* cbor);
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint16_t> json,
+ std::vector<uint8_t>* cbor);
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint8_t> json,
+ std::string* cbor);
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint16_t> json,
+ std::string* cbor);
+} // namespace json
+} // namespace v8_inspector_protocol_encoding
+
+#endif // V8_INSPECTOR_PROTOCOL_ENCODING_ENCODING_H_
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc b/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
new file mode 100644
index 0000000000..338d1ece10
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding_test.cc
@@ -0,0 +1,1878 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "encoding.h"
+
+#include <array>
+#include <clocale>
+#include <cmath>
+#include <cstdlib>
+#include <cstring>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+#include <string>
+
+#include "encoding_test_helper.h"
+
+using testing::ElementsAreArray;
+
+namespace v8_inspector_protocol_encoding {
+
+class TestPlatform : public json::Platform {
+ bool StrToD(const char* str, double* result) const override {
+ // This is not thread-safe
+ // (see https://en.cppreference.com/w/cpp/locale/setlocale)
+ // but good enough for a unittest.
+ const char* saved_locale = std::setlocale(LC_NUMERIC, nullptr);
+ char* end;
+ *result = std::strtod(str, &end);
+ std::setlocale(LC_NUMERIC, saved_locale);
+ if (errno == ERANGE) {
+ // errno must be reset, e.g. see the example here:
+ // https://en.cppreference.com/w/cpp/string/byte/strtof
+ errno = 0;
+ return false;
+ }
+ return end == str + strlen(str);
+ }
+
+ std::unique_ptr<char[]> DToStr(double value) const override {
+ std::stringstream ss;
+ ss.imbue(std::locale("C"));
+ ss << value;
+ std::string str = ss.str();
+ std::unique_ptr<char[]> result(new char[str.size() + 1]);
+ memcpy(result.get(), str.c_str(), str.size() + 1);
+ return result;
+ }
+};
+
+const json::Platform& GetTestPlatform() {
+ static TestPlatform* platform = new TestPlatform;
+ return *platform;
+}
+
+// =============================================================================
+// span - sequence of bytes
+// =============================================================================
+
+template <typename T>
+class SpanTest : public ::testing::Test {};
+
+using TestTypes = ::testing::Types<uint8_t, uint16_t>;
+TYPED_TEST_SUITE(SpanTest, TestTypes);
+
+TYPED_TEST(SpanTest, Empty) {
+ span<TypeParam> empty;
+ EXPECT_TRUE(empty.empty());
+ EXPECT_EQ(0u, empty.size());
+ EXPECT_EQ(0u, empty.size_bytes());
+ EXPECT_EQ(empty.begin(), empty.end());
+}
+
+TYPED_TEST(SpanTest, SingleItem) {
+ TypeParam single_item = 42;
+ span<TypeParam> singular(&single_item, 1);
+ EXPECT_FALSE(singular.empty());
+ EXPECT_EQ(1u, singular.size());
+ EXPECT_EQ(sizeof(TypeParam), singular.size_bytes());
+ EXPECT_EQ(singular.begin() + 1, singular.end());
+ EXPECT_EQ(42, singular[0]);
+}
+
+TYPED_TEST(SpanTest, FiveItems) {
+ std::vector<TypeParam> test_input = {31, 32, 33, 34, 35};
+ span<TypeParam> five_items(test_input.data(), 5);
+ EXPECT_FALSE(five_items.empty());
+ EXPECT_EQ(5u, five_items.size());
+ EXPECT_EQ(sizeof(TypeParam) * 5, five_items.size_bytes());
+ EXPECT_EQ(five_items.begin() + 5, five_items.end());
+ EXPECT_EQ(31, five_items[0]);
+ EXPECT_EQ(32, five_items[1]);
+ EXPECT_EQ(33, five_items[2]);
+ EXPECT_EQ(34, five_items[3]);
+ EXPECT_EQ(35, five_items[4]);
+ span<TypeParam> three_items = five_items.subspan(2);
+ EXPECT_EQ(3u, three_items.size());
+ EXPECT_EQ(33, three_items[0]);
+ EXPECT_EQ(34, three_items[1]);
+ EXPECT_EQ(35, three_items[2]);
+ span<TypeParam> two_items = five_items.subspan(2, 2);
+ EXPECT_EQ(2u, two_items.size());
+ EXPECT_EQ(33, two_items[0]);
+ EXPECT_EQ(34, two_items[1]);
+}
+
+TEST(SpanFromTest, FromConstCharAndLiteral) {
+ // Testing this is useful because strlen(nullptr) is undefined.
+ EXPECT_EQ(nullptr, SpanFrom(nullptr).data());
+ EXPECT_EQ(0u, SpanFrom(nullptr).size());
+
+ const char* kEmpty = "";
+ EXPECT_EQ(kEmpty, reinterpret_cast<const char*>(SpanFrom(kEmpty).data()));
+ EXPECT_EQ(0u, SpanFrom(kEmpty).size());
+
+ const char* kFoo = "foo";
+ EXPECT_EQ(kFoo, reinterpret_cast<const char*>(SpanFrom(kFoo).data()));
+ EXPECT_EQ(3u, SpanFrom(kFoo).size());
+
+ EXPECT_EQ(3u, SpanFrom("foo").size());
+}
+
+// =============================================================================
+// Status and Error codes
+// =============================================================================
+
+TEST(StatusTest, StatusToASCIIString) {
+ Status ok_status;
+ EXPECT_EQ("OK", ok_status.ToASCIIString());
+ Status json_error(Error::JSON_PARSER_COLON_EXPECTED, 42);
+ EXPECT_EQ("JSON: colon expected at position 42", json_error.ToASCIIString());
+ Status cbor_error(Error::CBOR_TRAILING_JUNK, 21);
+ EXPECT_EQ("CBOR: trailing junk at position 21", cbor_error.ToASCIIString());
+}
+
+namespace cbor {
+
+// =============================================================================
+// Detecting CBOR content
+// =============================================================================
+
+TEST(IsCBORMessage, SomeSmokeTests) {
+ std::vector<uint8_t> empty;
+ EXPECT_FALSE(IsCBORMessage(SpanFrom(empty)));
+ std::vector<uint8_t> hello = {'H', 'e', 'l', 'o', ' ', 't',
+ 'h', 'e', 'r', 'e', '!'};
+ EXPECT_FALSE(IsCBORMessage(SpanFrom(hello)));
+ std::vector<uint8_t> example = {0xd8, 0x5a, 0, 0, 0, 0};
+ EXPECT_TRUE(IsCBORMessage(SpanFrom(example)));
+ std::vector<uint8_t> one = {0xd8, 0x5a, 0, 0, 0, 1, 1};
+ EXPECT_TRUE(IsCBORMessage(SpanFrom(one)));
+}
+
+// =============================================================================
+// Encoding individual CBOR items
+// cbor::CBORTokenizer - for parsing individual CBOR items
+// =============================================================================
+
+//
+// EncodeInt32 / CBORTokenTag::INT32
+//
+TEST(EncodeDecodeInt32Test, Roundtrips23) {
+ // This roundtrips the int32_t value 23 through the pair of EncodeInt32 /
+ // CBORTokenizer; this is interesting since 23 is encoded as a single byte.
+ std::vector<uint8_t> encoded;
+ EncodeInt32(23, &encoded);
+ // first three bits: major type = 0; remaining five bits: additional info =
+ // value 23.
+ EXPECT_THAT(encoded, ElementsAreArray(std::array<uint8_t, 1>{{23}}));
+
+ // Reverse direction: decode with CBORTokenizer.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::INT32, tokenizer.TokenTag());
+ EXPECT_EQ(23, tokenizer.GetInt32());
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeInt32Test, RoundtripsUint8) {
+ // This roundtrips the int32_t value 42 through the pair of EncodeInt32 /
+ // CBORTokenizer. This is different from Roundtrip23 because 42 is encoded
+ // in an extra byte after the initial one.
+ std::vector<uint8_t> encoded;
+ EncodeInt32(42, &encoded);
+ // first three bits: major type = 0;
+ // remaining five bits: additional info = 24, indicating payload is uint8.
+ EXPECT_THAT(encoded, ElementsAreArray(std::array<uint8_t, 2>{{24, 42}}));
+
+ // Reverse direction: decode with CBORTokenizer.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::INT32, tokenizer.TokenTag());
+ EXPECT_EQ(42, tokenizer.GetInt32());
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeInt32Test, RoundtripsUint16) {
+ // 500 is encoded as a uint16 after the initial byte.
+ std::vector<uint8_t> encoded;
+ EncodeInt32(500, &encoded);
+ // 1 for initial byte, 2 for uint16.
+ EXPECT_EQ(3u, encoded.size());
+ // first three bits: major type = 0;
+ // remaining five bits: additional info = 25, indicating payload is uint16.
+ EXPECT_EQ(25, encoded[0]);
+ EXPECT_EQ(0x01, encoded[1]);
+ EXPECT_EQ(0xf4, encoded[2]);
+
+ // Reverse direction: decode with CBORTokenizer.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::INT32, tokenizer.TokenTag());
+ EXPECT_EQ(500, tokenizer.GetInt32());
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeInt32Test, RoundtripsInt32Max) {
+ // std::numeric_limits<int32_t> is encoded as a uint32 after the initial byte.
+ std::vector<uint8_t> encoded;
+ EncodeInt32(std::numeric_limits<int32_t>::max(), &encoded);
+ // 1 for initial byte, 4 for the uint32.
+ // first three bits: major type = 0;
+ // remaining five bits: additional info = 26, indicating payload is uint32.
+ EXPECT_THAT(
+ encoded,
+ ElementsAreArray(std::array<uint8_t, 5>{{26, 0x7f, 0xff, 0xff, 0xff}}));
+
+ // Reverse direction: decode with CBORTokenizer.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::INT32, tokenizer.TokenTag());
+ EXPECT_EQ(std::numeric_limits<int32_t>::max(), tokenizer.GetInt32());
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeInt32Test, RoundtripsInt32Min) {
+ // std::numeric_limits<int32_t> is encoded as a uint32 after the initial byte.
+ std::vector<uint8_t> encoded;
+ EncodeInt32(std::numeric_limits<int32_t>::min(), &encoded);
+ // 1 for initial byte, 4 for the uint32.
+ // first three bits: major type = 1;
+ // remaining five bits: additional info = 26, indicating payload is uint32.
+ EXPECT_THAT(encoded, ElementsAreArray(std::array<uint8_t, 5>{
+ {1 << 5 | 26, 0x7f, 0xff, 0xff, 0xff}}));
+
+ // Reverse direction: decode with CBORTokenizer.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::INT32, tokenizer.TokenTag());
+ EXPECT_EQ(std::numeric_limits<int32_t>::min(), tokenizer.GetInt32());
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeInt32Test, CantRoundtripUint32) {
+ // 0xdeadbeef is a value which does not fit below
+ // std::numerical_limits<int32_t>::max(), so we can't encode
+ // it with EncodeInt32. However, CBOR does support this, so we
+ // encode it here manually with the internal routine, just to observe
+ // that it's considered an invalid int32 by CBORTokenizer.
+ std::vector<uint8_t> encoded;
+ internals::WriteTokenStart(MajorType::UNSIGNED, 0xdeadbeef, &encoded);
+ // 1 for initial byte, 4 for the uint32.
+ // first three bits: major type = 0;
+ // remaining five bits: additional info = 26, indicating payload is uint32.
+ EXPECT_THAT(
+ encoded,
+ ElementsAreArray(std::array<uint8_t, 5>{{26, 0xde, 0xad, 0xbe, 0xef}}));
+
+ // Now try to decode; we treat this as an invalid INT32.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ // 0xdeadbeef is > std::numerical_limits<int32_t>::max().
+ EXPECT_EQ(CBORTokenTag::ERROR_VALUE, tokenizer.TokenTag());
+ EXPECT_EQ(Error::CBOR_INVALID_INT32, tokenizer.Status().error);
+}
+
+TEST(EncodeDecodeInt32Test, DecodeErrorCases) {
+ struct TestCase {
+ std::vector<uint8_t> data;
+ std::string msg;
+ };
+ std::vector<TestCase> tests{{
+ TestCase{
+ {24},
+ "additional info = 24 would require 1 byte of payload (but it's 0)"},
+ TestCase{{27, 0xaa, 0xbb, 0xcc},
+ "additional info = 27 would require 8 bytes of payload (but "
+ "it's 3)"},
+ TestCase{{29}, "additional info = 29 isn't recognized"},
+ TestCase{{1 << 5 | 27, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ "Max UINT64 payload is outside the allowed range"},
+ TestCase{{1 << 5 | 26, 0xff, 0xff, 0xff, 0xff},
+ "Max UINT32 payload is outside the allowed range"},
+ TestCase{{1 << 5 | 26, 0x80, 0x00, 0x00, 0x00},
+ "UINT32 payload w/ high bit set is outside the allowed range"},
+ }};
+ for (const TestCase& test : tests) {
+ SCOPED_TRACE(test.msg);
+ CBORTokenizer tokenizer(SpanFrom(test.data));
+ EXPECT_EQ(CBORTokenTag::ERROR_VALUE, tokenizer.TokenTag());
+ EXPECT_EQ(Error::CBOR_INVALID_INT32, tokenizer.Status().error);
+ }
+}
+
+TEST(EncodeDecodeInt32Test, RoundtripsMinus24) {
+ // This roundtrips the int32_t value -24 through the pair of EncodeInt32 /
+ // CBORTokenizer; this is interesting since -24 is encoded as
+ // a single byte as NEGATIVE, and it tests the specific encoding
+ // (note how for unsigned the single byte covers values up to 23).
+ // Additional examples are covered in RoundtripsAdditionalExamples.
+ std::vector<uint8_t> encoded;
+ EncodeInt32(-24, &encoded);
+ // first three bits: major type = 1; remaining five bits: additional info =
+ // value 23.
+ EXPECT_THAT(encoded, ElementsAreArray(std::array<uint8_t, 1>{{1 << 5 | 23}}));
+
+ // Reverse direction: decode with CBORTokenizer.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::INT32, tokenizer.TokenTag());
+ EXPECT_EQ(-24, tokenizer.GetInt32());
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeInt32Test, RoundtripsAdditionalNegativeExamples) {
+ std::vector<int32_t> examples = {-1,
+ -10,
+ -24,
+ -25,
+ -300,
+ -30000,
+ -300 * 1000,
+ -1000 * 1000,
+ -1000 * 1000 * 1000,
+ std::numeric_limits<int32_t>::min()};
+ for (int32_t example : examples) {
+ SCOPED_TRACE(std::string("example ") + std::to_string(example));
+ std::vector<uint8_t> encoded;
+ EncodeInt32(example, &encoded);
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::INT32, tokenizer.TokenTag());
+ EXPECT_EQ(example, tokenizer.GetInt32());
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+ }
+}
+
+//
+// EncodeString16 / CBORTokenTag::STRING16
+//
+TEST(EncodeDecodeString16Test, RoundtripsEmpty) {
+ // This roundtrips the empty utf16 string through the pair of EncodeString16 /
+ // CBORTokenizer.
+ std::vector<uint8_t> encoded;
+ EncodeString16(span<uint16_t>(), &encoded);
+ EXPECT_EQ(1u, encoded.size());
+ // first three bits: major type = 2; remaining five bits: additional info =
+ // size 0.
+ EXPECT_EQ(2 << 5, encoded[0]);
+
+ // Reverse direction: decode with CBORTokenizer.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::STRING16, tokenizer.TokenTag());
+ span<uint8_t> decoded_string16_wirerep = tokenizer.GetString16WireRep();
+ EXPECT_TRUE(decoded_string16_wirerep.empty());
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+// On the wire, we STRING16 is encoded as little endian (least
+// significant byte first). The host may or may not be little endian,
+// so this routine follows the advice in
+// https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html.
+std::vector<uint16_t> String16WireRepToHost(span<uint8_t> in) {
+ // must be even number of bytes.
+ CHECK_EQ(in.size() & 1, 0u);
+ std::vector<uint16_t> host_out;
+ for (size_t ii = 0; ii < in.size(); ii += 2)
+ host_out.push_back(in[ii + 1] << 8 | in[ii]);
+ return host_out;
+}
+
+TEST(EncodeDecodeString16Test, RoundtripsHelloWorld) {
+ // This roundtrips the hello world message which is given here in utf16
+ // characters. 0xd83c, 0xdf0e: UTF16 encoding for the "Earth Globe Americas"
+ // character, 🌎.
+ std::array<uint16_t, 10> msg{
+ {'H', 'e', 'l', 'l', 'o', ',', ' ', 0xd83c, 0xdf0e, '.'}};
+ std::vector<uint8_t> encoded;
+ EncodeString16(span<uint16_t>(msg.data(), msg.size()), &encoded);
+ // This will be encoded as BYTE_STRING of length 20, so the 20 is encoded in
+ // the additional info part of the initial byte. Payload is two bytes for each
+ // UTF16 character.
+ uint8_t initial_byte = /*major type=*/2 << 5 | /*additional info=*/20;
+ std::array<uint8_t, 21> encoded_expected = {
+ {initial_byte, 'H', 0, 'e', 0, 'l', 0, 'l', 0, 'o', 0,
+ ',', 0, ' ', 0, 0x3c, 0xd8, 0x0e, 0xdf, '.', 0}};
+ EXPECT_THAT(encoded, ElementsAreArray(encoded_expected));
+
+ // Now decode to complete the roundtrip.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::STRING16, tokenizer.TokenTag());
+ std::vector<uint16_t> decoded =
+ String16WireRepToHost(tokenizer.GetString16WireRep());
+ EXPECT_THAT(decoded, ElementsAreArray(msg));
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+
+ // For bonus points, we look at the decoded message in UTF8 as well so we can
+ // easily see it on the terminal screen.
+ std::string utf8_decoded = UTF16ToUTF8(SpanFrom(decoded));
+ EXPECT_EQ("Hello, 🌎.", utf8_decoded);
+}
+
+TEST(EncodeDecodeString16Test, Roundtrips500) {
+ // We roundtrip a message that has 250 16 bit values. Each of these are just
+ // set to their index. 250 is interesting because the cbor spec uses a
+ // BYTE_STRING of length 500 for one of their examples of how to encode the
+ // start of it (section 2.1) so it's easy for us to look at the first three
+ // bytes closely.
+ std::vector<uint16_t> two_fifty;
+ for (uint16_t ii = 0; ii < 250; ++ii)
+ two_fifty.push_back(ii);
+ std::vector<uint8_t> encoded;
+ EncodeString16(span<uint16_t>(two_fifty.data(), two_fifty.size()), &encoded);
+ EXPECT_EQ(3u + 250u * 2, encoded.size());
+ // Now check the first three bytes:
+ // Major type: 2 (BYTE_STRING)
+ // Additional information: 25, indicating size is represented by 2 bytes.
+ // Bytes 1 and 2 encode 500 (0x01f4).
+ EXPECT_EQ(2 << 5 | 25, encoded[0]);
+ EXPECT_EQ(0x01, encoded[1]);
+ EXPECT_EQ(0xf4, encoded[2]);
+
+ // Now decode to complete the roundtrip.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::STRING16, tokenizer.TokenTag());
+ std::vector<uint16_t> decoded =
+ String16WireRepToHost(tokenizer.GetString16WireRep());
+ EXPECT_THAT(decoded, ElementsAreArray(two_fifty));
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeString16Test, ErrorCases) {
+ struct TestCase {
+ std::vector<uint8_t> data;
+ std::string msg;
+ };
+ std::vector<TestCase> tests{
+ {TestCase{{2 << 5 | 1, 'a'},
+ "length must be divisible by 2 (but it's 1)"},
+ TestCase{{2 << 5 | 29}, "additional info = 29 isn't recognized"},
+ TestCase{{2 << 5 | 9, 1, 2, 3, 4, 5, 6, 7, 8},
+ "length (9) points just past the end of the test case"},
+ TestCase{{2 << 5 | 27, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 'a', 'b', 'c'},
+ "large length pointing past the end of the test case"}}};
+ for (const TestCase& test : tests) {
+ SCOPED_TRACE(test.msg);
+ CBORTokenizer tokenizer(SpanFrom(test.data));
+ EXPECT_EQ(CBORTokenTag::ERROR_VALUE, tokenizer.TokenTag());
+ EXPECT_EQ(Error::CBOR_INVALID_STRING16, tokenizer.Status().error);
+ }
+}
+
+//
+// EncodeString8 / CBORTokenTag::STRING8
+//
+TEST(EncodeDecodeString8Test, RoundtripsHelloWorld) {
+ // This roundtrips the hello world message which is given here in utf8
+ // characters. 🌎 is a four byte utf8 character.
+ std::string utf8_msg = "Hello, 🌎.";
+ std::vector<uint8_t> msg(utf8_msg.begin(), utf8_msg.end());
+ std::vector<uint8_t> encoded;
+ EncodeString8(SpanFrom(utf8_msg), &encoded);
+ // This will be encoded as STRING of length 12, so the 12 is encoded in
+ // the additional info part of the initial byte. Payload is one byte per
+ // utf8 byte.
+ uint8_t initial_byte = /*major type=*/3 << 5 | /*additional info=*/12;
+ std::array<uint8_t, 13> encoded_expected = {{initial_byte, 'H', 'e', 'l', 'l',
+ 'o', ',', ' ', 0xF0, 0x9f, 0x8c,
+ 0x8e, '.'}};
+ EXPECT_THAT(encoded, ElementsAreArray(encoded_expected));
+
+ // Now decode to complete the roundtrip.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::STRING8, tokenizer.TokenTag());
+ std::vector<uint8_t> decoded(tokenizer.GetString8().begin(),
+ tokenizer.GetString8().end());
+ EXPECT_THAT(decoded, ElementsAreArray(msg));
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeString8Test, ErrorCases) {
+ struct TestCase {
+ std::vector<uint8_t> data;
+ std::string msg;
+ };
+ std::vector<TestCase> tests{
+ {TestCase{{3 << 5 | 29}, "additional info = 29 isn't recognized"},
+ TestCase{{3 << 5 | 9, 1, 2, 3, 4, 5, 6, 7, 8},
+ "length (9) points just past the end of the test case"},
+ TestCase{{3 << 5 | 27, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 'a', 'b', 'c'},
+ "large length pointing past the end of the test case"}}};
+ for (const TestCase& test : tests) {
+ SCOPED_TRACE(test.msg);
+ CBORTokenizer tokenizer(SpanFrom(test.data));
+ EXPECT_EQ(CBORTokenTag::ERROR_VALUE, tokenizer.TokenTag());
+ EXPECT_EQ(Error::CBOR_INVALID_STRING8, tokenizer.Status().error);
+ }
+}
+
+TEST(EncodeFromLatin1Test, ConvertsToUTF8IfNeeded) {
+ std::vector<std::pair<std::string, std::string>> examples = {
+ {"Hello, world.", "Hello, world."},
+ {"Above: \xDC"
+ "ber",
+ "Above: Über"},
+ {"\xA5 500 are about \xA3 3.50; a y with umlaut is \xFF",
+ "¥ 500 are about £ 3.50; a y with umlaut is ÿ"}};
+
+ for (const auto& example : examples) {
+ const std::string& latin1 = example.first;
+ const std::string& expected_utf8 = example.second;
+ std::vector<uint8_t> encoded;
+ EncodeFromLatin1(SpanFrom(latin1), &encoded);
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::STRING8, tokenizer.TokenTag());
+ std::vector<uint8_t> decoded(tokenizer.GetString8().begin(),
+ tokenizer.GetString8().end());
+ std::string decoded_str(decoded.begin(), decoded.end());
+ EXPECT_THAT(decoded_str, testing::Eq(expected_utf8));
+ }
+}
+
+TEST(EncodeFromUTF16Test, ConvertsToUTF8IfEasy) {
+ std::vector<uint16_t> ascii = {'e', 'a', 's', 'y'};
+ std::vector<uint8_t> encoded;
+ EncodeFromUTF16(span<uint16_t>(ascii.data(), ascii.size()), &encoded);
+
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::STRING8, tokenizer.TokenTag());
+ std::vector<uint8_t> decoded(tokenizer.GetString8().begin(),
+ tokenizer.GetString8().end());
+ std::string decoded_str(decoded.begin(), decoded.end());
+ EXPECT_THAT(decoded_str, testing::Eq("easy"));
+}
+
+TEST(EncodeFromUTF16Test, EncodesAsString16IfNeeded) {
+ // Since this message contains non-ASCII characters, the routine is
+ // forced to encode as UTF16. We see this below by checking that the
+ // token tag is STRING16.
+ std::vector<uint16_t> msg = {'H', 'e', 'l', 'l', 'o',
+ ',', ' ', 0xd83c, 0xdf0e, '.'};
+ std::vector<uint8_t> encoded;
+ EncodeFromUTF16(span<uint16_t>(msg.data(), msg.size()), &encoded);
+
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::STRING16, tokenizer.TokenTag());
+ std::vector<uint16_t> decoded =
+ String16WireRepToHost(tokenizer.GetString16WireRep());
+ std::string utf8_decoded = UTF16ToUTF8(SpanFrom(decoded));
+ EXPECT_EQ("Hello, 🌎.", utf8_decoded);
+}
+
+//
+// EncodeBinary / CBORTokenTag::BINARY
+//
+TEST(EncodeDecodeBinaryTest, RoundtripsHelloWorld) {
+ std::vector<uint8_t> binary = {'H', 'e', 'l', 'l', 'o', ',', ' ',
+ 'w', 'o', 'r', 'l', 'd', '.'};
+ std::vector<uint8_t> encoded;
+ EncodeBinary(span<uint8_t>(binary.data(), binary.size()), &encoded);
+ // So, on the wire we see that the binary blob travels unmodified.
+ EXPECT_THAT(
+ encoded,
+ ElementsAreArray(std::array<uint8_t, 15>{
+ {(6 << 5 | 22), // tag 22 indicating base64 interpretation in JSON
+ (2 << 5 | 13), // BYTE_STRING (type 2) of length 13
+ 'H', 'e', 'l', 'l', 'o', ',', ' ', 'w', 'o', 'r', 'l', 'd', '.'}}));
+ std::vector<uint8_t> decoded;
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::BINARY, tokenizer.TokenTag());
+ EXPECT_EQ(0, static_cast<int>(tokenizer.Status().error));
+ decoded = std::vector<uint8_t>(tokenizer.GetBinary().begin(),
+ tokenizer.GetBinary().end());
+ EXPECT_THAT(decoded, ElementsAreArray(binary));
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeBinaryTest, ErrorCases) {
+ struct TestCase {
+ std::vector<uint8_t> data;
+ std::string msg;
+ };
+ std::vector<TestCase> tests{{TestCase{
+ {6 << 5 | 22, // tag 22 indicating base64 interpretation in JSON
+ 2 << 5 | 27, // BYTE_STRING (type 2), followed by 8 bytes length
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ "large length pointing past the end of the test case"}}};
+ for (const TestCase& test : tests) {
+ SCOPED_TRACE(test.msg);
+ CBORTokenizer tokenizer(SpanFrom(test.data));
+ EXPECT_EQ(CBORTokenTag::ERROR_VALUE, tokenizer.TokenTag());
+ EXPECT_EQ(Error::CBOR_INVALID_BINARY, tokenizer.Status().error);
+ }
+}
+
+//
+// EncodeDouble / CBORTokenTag::DOUBLE
+//
+TEST(EncodeDecodeDoubleTest, RoundtripsWikipediaExample) {
+ // https://en.wikipedia.org/wiki/Double-precision_floating-point_format
+ // provides the example of a hex representation 3FD5 5555 5555 5555, which
+ // approximates 1/3.
+
+ const double kOriginalValue = 1.0 / 3;
+ std::vector<uint8_t> encoded;
+ EncodeDouble(kOriginalValue, &encoded);
+ // first three bits: major type = 7; remaining five bits: additional info =
+ // value 27. This is followed by 8 bytes of payload (which match Wikipedia).
+ EXPECT_THAT(
+ encoded,
+ ElementsAreArray(std::array<uint8_t, 9>{
+ {7 << 5 | 27, 0x3f, 0xd5, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55}}));
+
+ // Reverse direction: decode and compare with original value.
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::DOUBLE, tokenizer.TokenTag());
+ EXPECT_THAT(tokenizer.GetDouble(), testing::DoubleEq(kOriginalValue));
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+}
+
+TEST(EncodeDecodeDoubleTest, RoundtripsAdditionalExamples) {
+ std::vector<double> examples = {0.0,
+ 1.0,
+ -1.0,
+ 3.1415,
+ std::numeric_limits<double>::min(),
+ std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::infinity(),
+ std::numeric_limits<double>::quiet_NaN()};
+ for (double example : examples) {
+ SCOPED_TRACE(std::string("example ") + std::to_string(example));
+ std::vector<uint8_t> encoded;
+ EncodeDouble(example, &encoded);
+ CBORTokenizer tokenizer(SpanFrom(encoded));
+ EXPECT_EQ(CBORTokenTag::DOUBLE, tokenizer.TokenTag());
+ if (std::isnan(example))
+ EXPECT_TRUE(std::isnan(tokenizer.GetDouble()));
+ else
+ EXPECT_THAT(tokenizer.GetDouble(), testing::DoubleEq(example));
+ tokenizer.Next();
+ EXPECT_EQ(CBORTokenTag::DONE, tokenizer.TokenTag());
+ }
+}
+
+// =============================================================================
+// cbor::NewCBOREncoder - for encoding from a streaming parser
+// =============================================================================
+
+void EncodeUTF8ForTest(const std::string& key, std::vector<uint8_t>* out) {
+ EncodeString8(SpanFrom(key), out);
+}
+TEST(JSONToCBOREncoderTest, SevenBitStrings) {
+ // When a string can be represented as 7 bit ASCII, the encoder will use the
+ // STRING (major Type 3) type, so the actual characters end up as bytes on the
+ // wire.
+ std::vector<uint8_t> encoded;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> encoder =
+ NewCBOREncoder(&encoded, &status);
+ std::vector<uint16_t> utf16 = {'f', 'o', 'o'};
+ encoder->HandleString16(span<uint16_t>(utf16.data(), utf16.size()));
+ EXPECT_EQ(Error::OK, status.error);
+ // Here we assert that indeed, seven bit strings are represented as
+ // bytes on the wire, "foo" is just "foo".
+ EXPECT_THAT(encoded,
+ ElementsAreArray(std::array<uint8_t, 4>{
+ {/*major type 3*/ 3 << 5 | /*length*/ 3, 'f', 'o', 'o'}}));
+}
+
+TEST(JsonCborRoundtrip, EncodingDecoding) {
+ // Hits all the cases except binary and error in StreamingParserHandler, first
+ // parsing a JSON message into CBOR, then parsing it back from CBOR into JSON.
+ std::string json =
+ "{"
+ "\"string\":\"Hello, \\ud83c\\udf0e.\","
+ "\"double\":3.1415,"
+ "\"int\":1,"
+ "\"negative int\":-1,"
+ "\"bool\":true,"
+ "\"null\":null,"
+ "\"array\":[1,2,3]"
+ "}";
+ std::vector<uint8_t> encoded;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> encoder =
+ NewCBOREncoder(&encoded, &status);
+ span<uint8_t> ascii_in = SpanFrom(json);
+ json::ParseJSON(GetTestPlatform(), ascii_in, encoder.get());
+ std::vector<uint8_t> expected = {
+ 0xd8, // envelope
+ 0x5a, // byte string with 32 bit length
+ 0, 0, 0, 94, // length is 94 bytes
+ };
+ expected.push_back(0xbf); // indef length map start
+ EncodeString8(SpanFrom("string"), &expected);
+ // This is followed by the encoded string for "Hello, 🌎."
+ // So, it's the same bytes that we tested above in
+ // EncodeDecodeString16Test.RoundtripsHelloWorld.
+ expected.push_back(/*major type=*/2 << 5 | /*additional info=*/20);
+ for (uint8_t ch : std::array<uint8_t, 20>{
+ {'H', 0, 'e', 0, 'l', 0, 'l', 0, 'o', 0,
+ ',', 0, ' ', 0, 0x3c, 0xd8, 0x0e, 0xdf, '.', 0}})
+ expected.push_back(ch);
+ EncodeString8(SpanFrom("double"), &expected);
+ EncodeDouble(3.1415, &expected);
+ EncodeString8(SpanFrom("int"), &expected);
+ EncodeInt32(1, &expected);
+ EncodeString8(SpanFrom("negative int"), &expected);
+ EncodeInt32(-1, &expected);
+ EncodeString8(SpanFrom("bool"), &expected);
+ expected.push_back(7 << 5 | 21); // RFC 7049 Section 2.3, Table 2: true
+ EncodeString8(SpanFrom("null"), &expected);
+ expected.push_back(7 << 5 | 22); // RFC 7049 Section 2.3, Table 2: null
+ EncodeString8(SpanFrom("array"), &expected);
+ expected.push_back(0xd8); // envelope
+ expected.push_back(0x5a); // byte string with 32 bit length
+ // the length is 5 bytes (that's up to end indef length array below).
+ for (uint8_t ch : std::array<uint8_t, 4>{{0, 0, 0, 5}})
+ expected.push_back(ch);
+ expected.push_back(0x9f); // RFC 7049 Section 2.2.1, indef length array start
+ expected.push_back(1); // Three UNSIGNED values (easy since Major Type 0)
+ expected.push_back(2);
+ expected.push_back(3);
+ expected.push_back(0xff); // End indef length array
+ expected.push_back(0xff); // End indef length map
+ EXPECT_TRUE(status.ok());
+ EXPECT_THAT(encoded, ElementsAreArray(expected));
+
+ // And now we roundtrip, decoding the message we just encoded.
+ std::string decoded;
+ std::unique_ptr<StreamingParserHandler> json_encoder =
+ NewJSONEncoder(&GetTestPlatform(), &decoded, &status);
+ ParseCBOR(span<uint8_t>(encoded.data(), encoded.size()), json_encoder.get());
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(json, decoded);
+}
+
+TEST(JsonCborRoundtrip, MoreRoundtripExamples) {
+ std::vector<std::string> examples = {
+ // Tests that after closing a nested objects, additional key/value pairs
+ // are considered.
+ "{\"foo\":{\"bar\":1},\"baz\":2}", "{\"foo\":[1,2,3],\"baz\":2}"};
+ for (const std::string& json : examples) {
+ SCOPED_TRACE(std::string("example: ") + json);
+ std::vector<uint8_t> encoded;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> encoder =
+ NewCBOREncoder(&encoded, &status);
+ span<uint8_t> ascii_in = SpanFrom(json);
+ ParseJSON(GetTestPlatform(), ascii_in, encoder.get());
+ std::string decoded;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &decoded, &status);
+ ParseCBOR(span<uint8_t>(encoded.data(), encoded.size()), json_writer.get());
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(json, decoded);
+ }
+}
+
+TEST(JSONToCBOREncoderTest, HelloWorldBinary_WithTripToJson) {
+ // The StreamingParserHandler::HandleBinary is a special case: The JSON parser
+ // will never call this method, because JSON does not natively support the
+ // binary type. So, we can't fully roundtrip. However, the other direction
+ // works: binary will be rendered in JSON, as a base64 string. So, we make
+ // calls to the encoder directly here, to construct a message, and one of
+ // these calls is ::HandleBinary, to which we pass a "binary" string
+ // containing "Hello, world.".
+ std::vector<uint8_t> encoded;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> encoder =
+ NewCBOREncoder(&encoded, &status);
+ encoder->HandleMapBegin();
+ // Emit a key.
+ std::vector<uint16_t> key = {'f', 'o', 'o'};
+ encoder->HandleString16(SpanFrom(key));
+ // Emit the binary payload, an arbitrary array of bytes that happens to
+ // be the ascii message "Hello, world.".
+ encoder->HandleBinary(SpanFrom(std::vector<uint8_t>{
+ 'H', 'e', 'l', 'l', 'o', ',', ' ', 'w', 'o', 'r', 'l', 'd', '.'}));
+ encoder->HandleMapEnd();
+ EXPECT_EQ(Error::OK, status.error);
+
+ // Now drive the json writer via the CBOR decoder.
+ std::string decoded;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &decoded, &status);
+ ParseCBOR(SpanFrom(encoded), json_writer.get());
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+ // "Hello, world." in base64 is "SGVsbG8sIHdvcmxkLg==".
+ EXPECT_EQ("{\"foo\":\"SGVsbG8sIHdvcmxkLg==\"}", decoded);
+}
+
+// =============================================================================
+// cbor::ParseCBOR - for receiving streaming parser events for CBOR messages
+// =============================================================================
+
+TEST(ParseCBORTest, ParseEmptyCBORMessage) {
+ // An envelope starting with 0xd8, 0x5a, with the byte length
+ // of 2, containing a map that's empty (0xbf for map
+ // start, and 0xff for map end).
+ std::vector<uint8_t> in = {0xd8, 0x5a, 0, 0, 0, 2, 0xbf, 0xff};
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(in.data(), in.size()), json_writer.get());
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ("{}", out);
+}
+
+TEST(ParseCBORTest, ParseCBORHelloWorld) {
+ const uint8_t kPayloadLen = 27;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen};
+ bytes.push_back(0xbf); // start indef length map.
+ EncodeString8(SpanFrom("msg"), &bytes); // key: msg
+ // Now write the value, the familiar "Hello, 🌎." where the globe is expressed
+ // as two utf16 chars.
+ bytes.push_back(/*major type=*/2 << 5 | /*additional info=*/20);
+ for (uint8_t ch : std::array<uint8_t, 20>{
+ {'H', 0, 'e', 0, 'l', 0, 'l', 0, 'o', 0,
+ ',', 0, ' ', 0, 0x3c, 0xd8, 0x0e, 0xdf, '.', 0}})
+ bytes.push_back(ch);
+ bytes.push_back(0xff); // stop byte
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ("{\"msg\":\"Hello, \\ud83c\\udf0e.\"}", out);
+}
+
+TEST(ParseCBORTest, UTF8IsSupportedInKeys) {
+ const uint8_t kPayloadLen = 11;
+ std::vector<uint8_t> bytes = {cbor::InitialByteForEnvelope(),
+ cbor::InitialByteFor32BitLengthByteString(),
+ 0,
+ 0,
+ 0,
+ kPayloadLen};
+ bytes.push_back(cbor::EncodeIndefiniteLengthMapStart());
+ // Two UTF16 chars.
+ EncodeString8(SpanFrom("🌎"), &bytes);
+ // Can be encoded as a single UTF16 char.
+ EncodeString8(SpanFrom("☾"), &bytes);
+ bytes.push_back(cbor::EncodeStop());
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ("{\"\\ud83c\\udf0e\":\"\\u263e\"}", out);
+}
+
+TEST(ParseCBORTest, NoInputError) {
+ std::vector<uint8_t> in = {};
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(in.data(), in.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_NO_INPUT, status.error);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, InvalidStartByteError) {
+ // Here we test that some actual json, which usually starts with {,
+ // is not considered CBOR. CBOR messages must start with 0x5a, the
+ // envelope start byte.
+ std::string json = "{\"msg\": \"Hello, world.\"}";
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(SpanFrom(json), json_writer.get());
+ EXPECT_EQ(Error::CBOR_INVALID_START_BYTE, status.error);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, UnexpectedEofExpectedValueError) {
+ constexpr uint8_t kPayloadLen = 5;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // map start
+ // A key; so value would be next.
+ EncodeString8(SpanFrom("key"), &bytes);
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE, status.error);
+ EXPECT_EQ(bytes.size(), status.pos);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, UnexpectedEofInArrayError) {
+ constexpr uint8_t kPayloadLen = 8;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // The byte for starting a map.
+ // A key; so value would be next.
+ EncodeString8(SpanFrom("array"), &bytes);
+ bytes.push_back(0x9f); // byte for indefinite length array start.
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_UNEXPECTED_EOF_IN_ARRAY, status.error);
+ EXPECT_EQ(bytes.size(), status.pos);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, UnexpectedEofInMapError) {
+ constexpr uint8_t kPayloadLen = 1;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // The byte for starting a map.
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_UNEXPECTED_EOF_IN_MAP, status.error);
+ EXPECT_EQ(7u, status.pos);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, InvalidMapKeyError) {
+ constexpr uint8_t kPayloadLen = 2;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0,
+ 0, 0, kPayloadLen, // envelope
+ 0xbf, // map start
+ 7 << 5 | 22}; // null (not a valid map key)
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_INVALID_MAP_KEY, status.error);
+ EXPECT_EQ(7u, status.pos);
+ EXPECT_EQ("", out);
+}
+
+std::vector<uint8_t> MakeNestedCBOR(int depth) {
+ std::vector<uint8_t> bytes;
+ std::vector<EnvelopeEncoder> envelopes;
+ for (int ii = 0; ii < depth; ++ii) {
+ envelopes.emplace_back();
+ envelopes.back().EncodeStart(&bytes);
+ bytes.push_back(0xbf); // indef length map start
+ EncodeString8(SpanFrom("key"), &bytes);
+ }
+ EncodeString8(SpanFrom("innermost_value"), &bytes);
+ for (int ii = 0; ii < depth; ++ii) {
+ bytes.push_back(0xff); // stop byte, finishes map.
+ envelopes.back().EncodeStop(&bytes);
+ envelopes.pop_back();
+ }
+ return bytes;
+}
+
+TEST(ParseCBORTest, StackLimitExceededError) {
+ { // Depth 3: no stack limit exceeded error and is easy to inspect.
+ std::vector<uint8_t> bytes = MakeNestedCBOR(3);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+ EXPECT_EQ("{\"key\":{\"key\":{\"key\":\"innermost_value\"}}}", out);
+ }
+ { // Depth 300: no stack limit exceeded.
+ std::vector<uint8_t> bytes = MakeNestedCBOR(300);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+ }
+
+ // We just want to know the length of one opening map so we can compute
+ // where the error is encountered. So we look at a small example and find
+ // the second envelope start.
+ std::vector<uint8_t> small_example = MakeNestedCBOR(3);
+ size_t opening_segment_size = 1; // Start after the first envelope start.
+ while (opening_segment_size < small_example.size() &&
+ small_example[opening_segment_size] != 0xd8)
+ opening_segment_size++;
+
+ { // Depth 301: limit exceeded.
+ std::vector<uint8_t> bytes = MakeNestedCBOR(301);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_STACK_LIMIT_EXCEEDED, status.error);
+ EXPECT_EQ(opening_segment_size * 301, status.pos);
+ }
+ { // Depth 320: still limit exceeded, and at the same pos as for 1001
+ std::vector<uint8_t> bytes = MakeNestedCBOR(320);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_STACK_LIMIT_EXCEEDED, status.error);
+ EXPECT_EQ(opening_segment_size * 301, status.pos);
+ }
+}
+
+TEST(ParseCBORTest, UnsupportedValueError) {
+ constexpr uint8_t kPayloadLen = 6;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // map start
+ EncodeString8(SpanFrom("key"), &bytes);
+ size_t error_pos = bytes.size();
+ bytes.push_back(6 << 5 | 5); // tags aren't supported yet.
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_UNSUPPORTED_VALUE, status.error);
+ EXPECT_EQ(error_pos, status.pos);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, InvalidString16Error) {
+ constexpr uint8_t kPayloadLen = 11;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // map start
+ EncodeString8(SpanFrom("key"), &bytes);
+ size_t error_pos = bytes.size();
+ // a BYTE_STRING of length 5 as value; since we interpret these as string16,
+ // it's going to be invalid as each character would need two bytes, but
+ // 5 isn't divisible by 2.
+ bytes.push_back(2 << 5 | 5);
+ for (int ii = 0; ii < 5; ++ii)
+ bytes.push_back(' ');
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_INVALID_STRING16, status.error);
+ EXPECT_EQ(error_pos, status.pos);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, InvalidString8Error) {
+ constexpr uint8_t kPayloadLen = 6;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // map start
+ EncodeString8(SpanFrom("key"), &bytes);
+ size_t error_pos = bytes.size();
+ // a STRING of length 5 as value, but we're at the end of the bytes array
+ // so it can't be decoded successfully.
+ bytes.push_back(3 << 5 | 5);
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_INVALID_STRING8, status.error);
+ EXPECT_EQ(error_pos, status.pos);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, InvalidBinaryError) {
+ constexpr uint8_t kPayloadLen = 9;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // map start
+ EncodeString8(SpanFrom("key"), &bytes);
+ size_t error_pos = bytes.size();
+ bytes.push_back(6 << 5 | 22); // base64 hint for JSON; indicates binary
+ bytes.push_back(2 << 5 | 10); // BYTE_STRING (major type 2) of length 10
+ // Just two garbage bytes, not enough for the binary.
+ bytes.push_back(0x31);
+ bytes.push_back(0x23);
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_INVALID_BINARY, status.error);
+ EXPECT_EQ(error_pos, status.pos);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, InvalidDoubleError) {
+ constexpr uint8_t kPayloadLen = 8;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // map start
+ EncodeString8(SpanFrom("key"), &bytes);
+ size_t error_pos = bytes.size();
+ bytes.push_back(7 << 5 | 27); // initial byte for double
+ // Just two garbage bytes, not enough to represent an actual double.
+ bytes.push_back(0x31);
+ bytes.push_back(0x23);
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_INVALID_DOUBLE, status.error);
+ EXPECT_EQ(error_pos, status.pos);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, InvalidSignedError) {
+ constexpr uint8_t kPayloadLen = 14;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // map start
+ EncodeString8(SpanFrom("key"), &bytes);
+ size_t error_pos = bytes.size();
+ // uint64_t max is a perfectly fine value to encode as CBOR unsigned,
+ // but we don't support this since we only cover the int32_t range.
+ internals::WriteTokenStart(MajorType::UNSIGNED,
+ std::numeric_limits<uint64_t>::max(), &bytes);
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_INVALID_INT32, status.error);
+ EXPECT_EQ(error_pos, status.pos);
+ EXPECT_EQ("", out);
+}
+
+TEST(ParseCBORTest, TrailingJunk) {
+ constexpr uint8_t kPayloadLen = 35;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // map start
+ EncodeString8(SpanFrom("key"), &bytes);
+ EncodeString8(SpanFrom("value"), &bytes);
+ bytes.push_back(0xff); // Up to here, it's a perfectly fine msg.
+ size_t error_pos = bytes.size();
+ EncodeString8(SpanFrom("trailing junk"), &bytes);
+
+ internals::WriteTokenStart(MajorType::UNSIGNED,
+ std::numeric_limits<uint64_t>::max(), &bytes);
+ EXPECT_EQ(kPayloadLen, bytes.size() - 6);
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(span<uint8_t>(bytes.data(), bytes.size()), json_writer.get());
+ EXPECT_EQ(Error::CBOR_TRAILING_JUNK, status.error);
+ EXPECT_EQ(error_pos, status.pos);
+ EXPECT_EQ("", out);
+}
+
+// =============================================================================
+// cbor::AppendString8EntryToMap - for limited in-place editing of messages
+// =============================================================================
+
+template <typename T>
+class AppendString8EntryToMapTest : public ::testing::Test {};
+
+using ContainerTestTypes = ::testing::Types<std::vector<uint8_t>, std::string>;
+TYPED_TEST_SUITE(AppendString8EntryToMapTest, ContainerTestTypes);
+
+TYPED_TEST(AppendString8EntryToMapTest, AppendsEntrySuccessfully) {
+ constexpr uint8_t kPayloadLen = 12;
+ std::vector<uint8_t> bytes = {0xd8, 0x5a, 0, 0, 0, kPayloadLen, // envelope
+ 0xbf}; // map start
+ size_t pos_before_payload = bytes.size() - 1;
+ EncodeString8(SpanFrom("key"), &bytes);
+ EncodeString8(SpanFrom("value"), &bytes);
+ bytes.push_back(0xff); // A perfectly fine cbor message.
+ EXPECT_EQ(kPayloadLen, bytes.size() - pos_before_payload);
+
+ TypeParam msg(bytes.begin(), bytes.end());
+
+ Status status =
+ AppendString8EntryToCBORMap(SpanFrom("foo"), SpanFrom("bar"), &msg);
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+ std::string out;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(SpanFrom(msg), json_writer.get());
+ EXPECT_EQ("{\"key\":\"value\",\"foo\":\"bar\"}", out);
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+}
+
+TYPED_TEST(AppendString8EntryToMapTest, AppendThreeEntries) {
+ std::vector<uint8_t> encoded = {
+ 0xd8, 0x5a, 0, 0, 0, 2, EncodeIndefiniteLengthMapStart(), EncodeStop()};
+ EXPECT_EQ(Error::OK, AppendString8EntryToCBORMap(SpanFrom("key"),
+ SpanFrom("value"), &encoded)
+ .error);
+ EXPECT_EQ(Error::OK, AppendString8EntryToCBORMap(SpanFrom("key1"),
+ SpanFrom("value1"), &encoded)
+ .error);
+ EXPECT_EQ(Error::OK, AppendString8EntryToCBORMap(SpanFrom("key2"),
+ SpanFrom("value2"), &encoded)
+ .error);
+ TypeParam msg(encoded.begin(), encoded.end());
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> json_writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ ParseCBOR(SpanFrom(msg), json_writer.get());
+ EXPECT_EQ("{\"key\":\"value\",\"key1\":\"value1\",\"key2\":\"value2\"}", out);
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+}
+
+TYPED_TEST(AppendString8EntryToMapTest, MapStartExpected_Error) {
+ std::vector<uint8_t> bytes = {
+ 0xd8, 0x5a, 0, 0, 0, 1, EncodeIndefiniteLengthArrayStart()};
+ TypeParam msg(bytes.begin(), bytes.end());
+ Status status =
+ AppendString8EntryToCBORMap(SpanFrom("key"), SpanFrom("value"), &msg);
+ EXPECT_EQ(Error::CBOR_MAP_START_EXPECTED, status.error);
+ EXPECT_EQ(6u, status.pos);
+}
+
+TYPED_TEST(AppendString8EntryToMapTest, MapStopExpected_Error) {
+ std::vector<uint8_t> bytes = {
+ 0xd8, 0x5a, 0, 0, 0, 2, EncodeIndefiniteLengthMapStart(), 42};
+ TypeParam msg(bytes.begin(), bytes.end());
+ Status status =
+ AppendString8EntryToCBORMap(SpanFrom("key"), SpanFrom("value"), &msg);
+ EXPECT_EQ(Error::CBOR_MAP_STOP_EXPECTED, status.error);
+ EXPECT_EQ(7u, status.pos);
+}
+
+TYPED_TEST(AppendString8EntryToMapTest, InvalidEnvelope_Error) {
+ { // Second byte is wrong.
+ std::vector<uint8_t> bytes = {
+ 0x5a, 0, 0, 0, 2, EncodeIndefiniteLengthMapStart(), EncodeStop(), 0};
+ TypeParam msg(bytes.begin(), bytes.end());
+ Status status =
+ AppendString8EntryToCBORMap(SpanFrom("key"), SpanFrom("value"), &msg);
+ EXPECT_EQ(Error::CBOR_INVALID_ENVELOPE, status.error);
+ EXPECT_EQ(0u, status.pos);
+ }
+ { // Second byte is wrong.
+ std::vector<uint8_t> bytes = {
+ 0xd8, 0x7a, 0, 0, 0, 2, EncodeIndefiniteLengthMapStart(), EncodeStop()};
+ TypeParam msg(bytes.begin(), bytes.end());
+ Status status =
+ AppendString8EntryToCBORMap(SpanFrom("key"), SpanFrom("value"), &msg);
+ EXPECT_EQ(Error::CBOR_INVALID_ENVELOPE, status.error);
+ EXPECT_EQ(0u, status.pos);
+ }
+ { // Invalid envelope size example.
+ std::vector<uint8_t> bytes = {
+ 0xd8, 0x5a, 0, 0, 0, 3, EncodeIndefiniteLengthMapStart(), EncodeStop(),
+ };
+ TypeParam msg(bytes.begin(), bytes.end());
+ Status status =
+ AppendString8EntryToCBORMap(SpanFrom("key"), SpanFrom("value"), &msg);
+ EXPECT_EQ(Error::CBOR_INVALID_ENVELOPE, status.error);
+ EXPECT_EQ(0u, status.pos);
+ }
+ { // Invalid envelope size example.
+ std::vector<uint8_t> bytes = {
+ 0xd8, 0x5a, 0, 0, 0, 1, EncodeIndefiniteLengthMapStart(), EncodeStop(),
+ };
+ TypeParam msg(bytes.begin(), bytes.end());
+ Status status =
+ AppendString8EntryToCBORMap(SpanFrom("key"), SpanFrom("value"), &msg);
+ EXPECT_EQ(Error::CBOR_INVALID_ENVELOPE, status.error);
+ EXPECT_EQ(0u, status.pos);
+ }
+}
+} // namespace cbor
+
+namespace json {
+
+// =============================================================================
+// json::NewJSONEncoder - for encoding streaming parser events as JSON
+// =============================================================================
+
+void WriteUTF8AsUTF16(StreamingParserHandler* writer, const std::string& utf8) {
+ writer->HandleString16(SpanFrom(UTF8ToUTF16(SpanFrom(utf8))));
+}
+
+TEST(JsonStdStringWriterTest, HelloWorld) {
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ writer->HandleMapBegin();
+ WriteUTF8AsUTF16(writer.get(), "msg1");
+ WriteUTF8AsUTF16(writer.get(), "Hello, 🌎.");
+ std::string key = "msg1-as-utf8";
+ std::string value = "Hello, 🌎.";
+ writer->HandleString8(SpanFrom(key));
+ writer->HandleString8(SpanFrom(value));
+ WriteUTF8AsUTF16(writer.get(), "msg2");
+ WriteUTF8AsUTF16(writer.get(), "\\\b\r\n\t\f\"");
+ WriteUTF8AsUTF16(writer.get(), "nested");
+ writer->HandleMapBegin();
+ WriteUTF8AsUTF16(writer.get(), "double");
+ writer->HandleDouble(3.1415);
+ WriteUTF8AsUTF16(writer.get(), "int");
+ writer->HandleInt32(-42);
+ WriteUTF8AsUTF16(writer.get(), "bool");
+ writer->HandleBool(false);
+ WriteUTF8AsUTF16(writer.get(), "null");
+ writer->HandleNull();
+ writer->HandleMapEnd();
+ WriteUTF8AsUTF16(writer.get(), "array");
+ writer->HandleArrayBegin();
+ writer->HandleInt32(1);
+ writer->HandleInt32(2);
+ writer->HandleInt32(3);
+ writer->HandleArrayEnd();
+ writer->HandleMapEnd();
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(
+ "{\"msg1\":\"Hello, \\ud83c\\udf0e.\","
+ "\"msg1-as-utf8\":\"Hello, \\ud83c\\udf0e.\","
+ "\"msg2\":\"\\\\\\b\\r\\n\\t\\f\\\"\","
+ "\"nested\":{\"double\":3.1415,\"int\":-42,"
+ "\"bool\":false,\"null\":null},\"array\":[1,2,3]}",
+ out);
+}
+
+TEST(JsonStdStringWriterTest, RepresentingNonFiniteValuesAsNull) {
+ // JSON can't represent +Infinity, -Infinity, or NaN.
+ // So in practice it's mapped to null.
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ writer->HandleMapBegin();
+ writer->HandleString8(SpanFrom("Infinity"));
+ writer->HandleDouble(std::numeric_limits<double>::infinity());
+ writer->HandleString8(SpanFrom("-Infinity"));
+ writer->HandleDouble(-std::numeric_limits<double>::infinity());
+ writer->HandleString8(SpanFrom("NaN"));
+ writer->HandleDouble(std::numeric_limits<double>::quiet_NaN());
+ writer->HandleMapEnd();
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ("{\"Infinity\":null,\"-Infinity\":null,\"NaN\":null}", out);
+}
+
+TEST(JsonStdStringWriterTest, BinaryEncodedAsJsonString) {
+ // The encoder emits binary submitted to StreamingParserHandler::HandleBinary
+ // as base64. The following three examples are taken from
+ // https://en.wikipedia.org/wiki/Base64.
+ {
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ writer->HandleBinary(SpanFrom(std::vector<uint8_t>({'M', 'a', 'n'})));
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ("\"TWFu\"", out);
+ }
+ {
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ writer->HandleBinary(SpanFrom(std::vector<uint8_t>({'M', 'a'})));
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ("\"TWE=\"", out);
+ }
+ {
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ writer->HandleBinary(SpanFrom(std::vector<uint8_t>({'M'})));
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ("\"TQ==\"", out);
+ }
+ { // "Hello, world.", verified with base64decode.org.
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ writer->HandleBinary(SpanFrom(std::vector<uint8_t>(
+ {'H', 'e', 'l', 'l', 'o', ',', ' ', 'w', 'o', 'r', 'l', 'd', '.'})));
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ("\"SGVsbG8sIHdvcmxkLg==\"", out);
+ }
+}
+
+TEST(JsonStdStringWriterTest, HandlesErrors) {
+ // When an error is sent via HandleError, it saves it in the provided
+ // status and clears the output.
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&GetTestPlatform(), &out, &status);
+ writer->HandleMapBegin();
+ WriteUTF8AsUTF16(writer.get(), "msg1");
+ writer->HandleError(Status{Error::JSON_PARSER_VALUE_EXPECTED, 42});
+ EXPECT_EQ(Error::JSON_PARSER_VALUE_EXPECTED, status.error);
+ EXPECT_EQ(42u, status.pos);
+ EXPECT_EQ("", out);
+}
+
+// We'd use Gmock but unfortunately it only handles copyable return types.
+class MockPlatform : public Platform {
+ public:
+ // Not implemented.
+ bool StrToD(const char* str, double* result) const override { return false; }
+
+ // A map with pre-registered responses for DToSTr.
+ std::map<double, std::string> dtostr_responses_;
+
+ std::unique_ptr<char[]> DToStr(double value) const override {
+ auto it = dtostr_responses_.find(value);
+ CHECK(it != dtostr_responses_.end());
+ const std::string& str = it->second;
+ std::unique_ptr<char[]> response(new char[str.size() + 1]);
+ memcpy(response.get(), str.c_str(), str.size() + 1);
+ return response;
+ }
+};
+
+TEST(JsonStdStringWriterTest, DoubleToString) {
+ // This "broken" platform responds without the leading 0 before the
+ // decimal dot, so it'd be invalid JSON.
+ MockPlatform platform;
+ platform.dtostr_responses_[.1] = ".1";
+ platform.dtostr_responses_[-.7] = "-.7";
+
+ std::string out;
+ Status status;
+ std::unique_ptr<StreamingParserHandler> writer =
+ NewJSONEncoder(&platform, &out, &status);
+ writer->HandleArrayBegin();
+ writer->HandleDouble(.1);
+ writer->HandleDouble(-.7);
+ writer->HandleArrayEnd();
+ EXPECT_EQ("[0.1,-0.7]", out);
+}
+
+// =============================================================================
+// json::ParseJSON - for receiving streaming parser events for JSON
+// =============================================================================
+
+class Log : public StreamingParserHandler {
+ public:
+ void HandleMapBegin() override { log_ << "map begin\n"; }
+
+ void HandleMapEnd() override { log_ << "map end\n"; }
+
+ void HandleArrayBegin() override { log_ << "array begin\n"; }
+
+ void HandleArrayEnd() override { log_ << "array end\n"; }
+
+ void HandleString8(span<uint8_t> chars) override {
+ log_ << "string8: " << std::string(chars.begin(), chars.end()) << "\n";
+ }
+
+ void HandleString16(span<uint16_t> chars) override {
+ log_ << "string16: " << UTF16ToUTF8(chars) << "\n";
+ }
+
+ void HandleBinary(span<uint8_t> bytes) override {
+ // JSON doesn't have native support for arbitrary bytes, so our parser will
+ // never call this.
+ CHECK(false);
+ }
+
+ void HandleDouble(double value) override {
+ log_ << "double: " << value << "\n";
+ }
+
+ void HandleInt32(int32_t value) override { log_ << "int: " << value << "\n"; }
+
+ void HandleBool(bool value) override { log_ << "bool: " << value << "\n"; }
+
+ void HandleNull() override { log_ << "null\n"; }
+
+ void HandleError(Status status) override { status_ = status; }
+
+ std::string str() const { return status_.ok() ? log_.str() : ""; }
+
+ Status status() const { return status_; }
+
+ private:
+ std::ostringstream log_;
+ Status status_;
+};
+
+class JsonParserTest : public ::testing::Test {
+ protected:
+ Log log_;
+};
+
+TEST_F(JsonParserTest, SimpleDictionary) {
+ std::string json = "{\"foo\": 42}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_TRUE(log_.status().ok());
+ EXPECT_EQ(
+ "map begin\n"
+ "string16: foo\n"
+ "int: 42\n"
+ "map end\n",
+ log_.str());
+}
+
+TEST_F(JsonParserTest, UsAsciiDelCornerCase) {
+ // DEL (0x7f) is a 7 bit US-ASCII character, and while it is a control
+ // character according to Unicode, it's not considered a control
+ // character in https://tools.ietf.org/html/rfc7159#section-7, so
+ // it can be placed directly into the JSON string, without JSON escaping.
+ std::string json = "{\"foo\": \"a\x7f\"}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_TRUE(log_.status().ok());
+ EXPECT_EQ(
+ "map begin\n"
+ "string16: foo\n"
+ "string16: a\x7f\n"
+ "map end\n",
+ log_.str());
+}
+
+TEST_F(JsonParserTest, Whitespace) {
+ std::string json = "\n {\n\"msg\"\n: \v\"Hello, world.\"\t\r}\t";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_TRUE(log_.status().ok());
+ EXPECT_EQ(
+ "map begin\n"
+ "string16: msg\n"
+ "string16: Hello, world.\n"
+ "map end\n",
+ log_.str());
+}
+
+TEST_F(JsonParserTest, NestedDictionary) {
+ std::string json = "{\"foo\": {\"bar\": {\"baz\": 1}, \"bar2\": 2}}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_TRUE(log_.status().ok());
+ EXPECT_EQ(
+ "map begin\n"
+ "string16: foo\n"
+ "map begin\n"
+ "string16: bar\n"
+ "map begin\n"
+ "string16: baz\n"
+ "int: 1\n"
+ "map end\n"
+ "string16: bar2\n"
+ "int: 2\n"
+ "map end\n"
+ "map end\n",
+ log_.str());
+}
+
+TEST_F(JsonParserTest, Doubles) {
+ std::string json = "{\"foo\": 3.1415, \"bar\": 31415e-4}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_TRUE(log_.status().ok());
+ EXPECT_EQ(
+ "map begin\n"
+ "string16: foo\n"
+ "double: 3.1415\n"
+ "string16: bar\n"
+ "double: 3.1415\n"
+ "map end\n",
+ log_.str());
+}
+
+TEST_F(JsonParserTest, Unicode) {
+ // Globe character. 0xF0 0x9F 0x8C 0x8E in utf8, 0xD83C 0xDF0E in utf16.
+ std::string json = "{\"msg\": \"Hello, \\uD83C\\uDF0E.\"}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_TRUE(log_.status().ok());
+ EXPECT_EQ(
+ "map begin\n"
+ "string16: msg\n"
+ "string16: Hello, 🌎.\n"
+ "map end\n",
+ log_.str());
+}
+
+TEST_F(JsonParserTest, Unicode_ParseUtf16) {
+ // Globe character. utf8: 0xF0 0x9F 0x8C 0x8E; utf16: 0xD83C 0xDF0E.
+ // Crescent moon character. utf8: 0xF0 0x9F 0x8C 0x99; utf16: 0xD83C 0xDF19.
+
+ // We provide the moon with json escape, but the earth as utf16 input.
+ // Either way they arrive as utf8 (after decoding in log_.str()).
+ std::vector<uint16_t> json =
+ UTF8ToUTF16(SpanFrom("{\"space\": \"🌎 \\uD83C\\uDF19.\"}"));
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_TRUE(log_.status().ok());
+ EXPECT_EQ(
+ "map begin\n"
+ "string16: space\n"
+ "string16: 🌎 🌙.\n"
+ "map end\n",
+ log_.str());
+}
+
+TEST_F(JsonParserTest, Unicode_ParseUtf8) {
+ // Used below:
+ // гласность - example for 2 byte utf8, Russian word "glasnost"
+ // 屋 - example for 3 byte utf8, Chinese word for "house"
+ // 🌎 - example for 4 byte utf8: 0xF0 0x9F 0x8C 0x8E; utf16: 0xD83C 0xDF0E.
+ // 🌙 - example for escapes: utf8: 0xF0 0x9F 0x8C 0x99; utf16: 0xD83C 0xDF19.
+
+ // We provide the moon with json escape, but the earth as utf8 input.
+ // Either way they arrive as utf8 (after decoding in log_.str()).
+ std::string json =
+ "{"
+ "\"escapes\": \"\\uD83C\\uDF19\","
+ "\"2 byte\":\"гласность\","
+ "\"3 byte\":\"屋\","
+ "\"4 byte\":\"🌎\""
+ "}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_TRUE(log_.status().ok());
+ EXPECT_EQ(
+ "map begin\n"
+ "string16: escapes\n"
+ "string16: 🌙\n"
+ "string16: 2 byte\n"
+ "string16: гласность\n"
+ "string16: 3 byte\n"
+ "string16: 屋\n"
+ "string16: 4 byte\n"
+ "string16: 🌎\n"
+ "map end\n",
+ log_.str());
+}
+
+TEST_F(JsonParserTest, UnprocessedInputRemainsError) {
+ // Trailing junk after the valid JSON.
+ std::string json = "{\"foo\": 3.1415} junk";
+ size_t junk_idx = json.find("junk");
+ EXPECT_NE(junk_idx, std::string::npos);
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_UNPROCESSED_INPUT_REMAINS, log_.status().error);
+ EXPECT_EQ(junk_idx, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+std::string MakeNestedJson(int depth) {
+ std::string json;
+ for (int ii = 0; ii < depth; ++ii)
+ json += "{\"foo\":";
+ json += "42";
+ for (int ii = 0; ii < depth; ++ii)
+ json += "}";
+ return json;
+}
+
+TEST_F(JsonParserTest, StackLimitExceededError_BelowLimit) {
+ // kStackLimit is 300 (see json_parser.cc). First let's
+ // try with a small nested example.
+ std::string json_3 = MakeNestedJson(3);
+ ParseJSON(GetTestPlatform(), SpanFrom(json_3), &log_);
+ EXPECT_TRUE(log_.status().ok());
+ EXPECT_EQ(
+ "map begin\n"
+ "string16: foo\n"
+ "map begin\n"
+ "string16: foo\n"
+ "map begin\n"
+ "string16: foo\n"
+ "int: 42\n"
+ "map end\n"
+ "map end\n"
+ "map end\n",
+ log_.str());
+}
+
+TEST_F(JsonParserTest, StackLimitExceededError_AtLimit) {
+ // Now with kStackLimit (300).
+ std::string json_limit = MakeNestedJson(300);
+ ParseJSON(GetTestPlatform(),
+ span<uint8_t>(reinterpret_cast<const uint8_t*>(json_limit.data()),
+ json_limit.size()),
+ &log_);
+ EXPECT_TRUE(log_.status().ok());
+}
+
+TEST_F(JsonParserTest, StackLimitExceededError_AboveLimit) {
+ // Now with kStackLimit + 1 (301) - it exceeds in the innermost instance.
+ std::string exceeded = MakeNestedJson(301);
+ ParseJSON(GetTestPlatform(), SpanFrom(exceeded), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_STACK_LIMIT_EXCEEDED, log_.status().error);
+ EXPECT_EQ(strlen("{\"foo\":") * 301, log_.status().pos);
+}
+
+TEST_F(JsonParserTest, StackLimitExceededError_WayAboveLimit) {
+ // Now way past the limit. Still, the point of exceeding is 301.
+ std::string far_out = MakeNestedJson(320);
+ ParseJSON(GetTestPlatform(), SpanFrom(far_out), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_STACK_LIMIT_EXCEEDED, log_.status().error);
+ EXPECT_EQ(strlen("{\"foo\":") * 301, log_.status().pos);
+}
+
+TEST_F(JsonParserTest, NoInputError) {
+ std::string json = "";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_NO_INPUT, log_.status().error);
+ EXPECT_EQ(0u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, InvalidTokenError) {
+ std::string json = "|";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_INVALID_TOKEN, log_.status().error);
+ EXPECT_EQ(0u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, InvalidNumberError) {
+ // Mantissa exceeds max (the constant used here is int64_t max).
+ std::string json = "1E9223372036854775807";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_INVALID_NUMBER, log_.status().error);
+ EXPECT_EQ(0u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, InvalidStringError) {
+ // \x22 is an unsupported escape sequence
+ std::string json = "\"foo\\x22\"";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_INVALID_STRING, log_.status().error);
+ EXPECT_EQ(0u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, UnexpectedArrayEndError) {
+ std::string json = "[1,2,]";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_UNEXPECTED_ARRAY_END, log_.status().error);
+ EXPECT_EQ(5u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, CommaOrArrayEndExpectedError) {
+ std::string json = "[1,2 2";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED,
+ log_.status().error);
+ EXPECT_EQ(5u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, StringLiteralExpectedError) {
+ // There's an error because the key bar, a string, is not terminated.
+ std::string json = "{\"foo\": 3.1415, \"bar: 31415e-4}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_STRING_LITERAL_EXPECTED, log_.status().error);
+ EXPECT_EQ(16u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, ColonExpectedError) {
+ std::string json = "{\"foo\", 42}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_COLON_EXPECTED, log_.status().error);
+ EXPECT_EQ(6u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, UnexpectedMapEndError) {
+ std::string json = "{\"foo\": 42, }";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_UNEXPECTED_MAP_END, log_.status().error);
+ EXPECT_EQ(12u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, CommaOrMapEndExpectedError) {
+ // The second separator should be a comma.
+ std::string json = "{\"foo\": 3.1415: \"bar\": 0}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_COMMA_OR_MAP_END_EXPECTED, log_.status().error);
+ EXPECT_EQ(14u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+TEST_F(JsonParserTest, ValueExpectedError) {
+ std::string json = "}";
+ ParseJSON(GetTestPlatform(), SpanFrom(json), &log_);
+ EXPECT_EQ(Error::JSON_PARSER_VALUE_EXPECTED, log_.status().error);
+ EXPECT_EQ(0u, log_.status().pos);
+ EXPECT_EQ("", log_.str());
+}
+
+template <typename T>
+class ConvertJSONToCBORTest : public ::testing::Test {};
+
+using ContainerTestTypes = ::testing::Types<std::vector<uint8_t>, std::string>;
+TYPED_TEST_SUITE(ConvertJSONToCBORTest, ContainerTestTypes);
+
+TYPED_TEST(ConvertJSONToCBORTest, RoundTripValidJson) {
+ std::string json_in = "{\"msg\":\"Hello, world.\",\"lst\":[1,2,3]}";
+ TypeParam json(json_in.begin(), json_in.end());
+ TypeParam cbor;
+ {
+ Status status = ConvertJSONToCBOR(GetTestPlatform(), SpanFrom(json), &cbor);
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+ }
+ TypeParam roundtrip_json;
+ {
+ Status status =
+ ConvertCBORToJSON(GetTestPlatform(), SpanFrom(cbor), &roundtrip_json);
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+ }
+ EXPECT_EQ(json, roundtrip_json);
+}
+
+TYPED_TEST(ConvertJSONToCBORTest, RoundTripValidJson16) {
+ std::vector<uint16_t> json16 = {
+ '{', '"', 'm', 's', 'g', '"', ':', '"', 'H', 'e', 'l', 'l',
+ 'o', ',', ' ', 0xd83c, 0xdf0e, '.', '"', ',', '"', 'l', 's', 't',
+ '"', ':', '[', '1', ',', '2', ',', '3', ']', '}'};
+ TypeParam cbor;
+ {
+ Status status = ConvertJSONToCBOR(
+ GetTestPlatform(), span<uint16_t>(json16.data(), json16.size()), &cbor);
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+ }
+ TypeParam roundtrip_json;
+ {
+ Status status =
+ ConvertCBORToJSON(GetTestPlatform(), SpanFrom(cbor), &roundtrip_json);
+ EXPECT_EQ(Error::OK, status.error);
+ EXPECT_EQ(Status::npos(), status.pos);
+ }
+ std::string json = "{\"msg\":\"Hello, \\ud83c\\udf0e.\",\"lst\":[1,2,3]}";
+ TypeParam expected_json(json.begin(), json.end());
+ EXPECT_EQ(expected_json, roundtrip_json);
+}
+} // namespace json
+} // namespace v8_inspector_protocol_encoding
diff --git a/deps/v8/third_party/inspector_protocol/encoding/encoding_test_helper.h b/deps/v8/third_party/inspector_protocol/encoding/encoding_test_helper.h
new file mode 100644
index 0000000000..84da2e72e8
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/encoding/encoding_test_helper.h
@@ -0,0 +1,33 @@
+// Copyright 2019 The V8 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is V8 specific, to make encoding_test.cc work.
+// It is not rolled from the upstream project.
+
+#ifndef V8_INSPECTOR_PROTOCOL_ENCODING_ENCODING_TEST_HELPER_H_
+#define V8_INSPECTOR_PROTOCOL_ENCODING_ENCODING_TEST_HELPER_H_
+
+#include <string>
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/inspector/v8-string-conversions.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8_inspector_protocol_encoding {
+
+std::string UTF16ToUTF8(span<uint16_t> in) {
+ return v8_inspector::UTF16ToUTF8(in.data(), in.size());
+}
+
+std::vector<uint16_t> UTF8ToUTF16(span<uint8_t> in) {
+ std::basic_string<uint16_t> utf16 = v8_inspector::UTF8ToUTF16(
+ reinterpret_cast<const char*>(in.data()), in.size());
+ return std::vector<uint16_t>(utf16.begin(), utf16.end());
+}
+
+} // namespace v8_inspector_protocol_encoding
+
+#endif // V8_INSPECTOR_PROTOCOL_ENCODING_ENCODING_TEST_HELPER_H_
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
index 2d4463e2d6..7d3b907a26 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
@@ -137,7 +137,7 @@ std::unique_ptr<Value> parseValue(
}
case cbor::CBORTokenTag::STRING16: {
span<uint8_t> wire = tokenizer->GetString16WireRep();
- DCHECK_EQ(wire.size() & 1, 0);
+ DCHECK_EQ(wire.size() & 1, 0u);
std::unique_ptr<Value> value = StringValue::create(StringUtil::fromUTF16(
reinterpret_cast<const uint16_t*>(wire.data()), wire.size() / 2));
tokenizer->Next();
@@ -178,7 +178,12 @@ std::unique_ptr<DictionaryValue> parseMap(
key = StringUtil::fromUTF8(key_span.data(), key_span.size());
tokenizer->Next();
} else if (tokenizer->TokenTag() == cbor::CBORTokenTag::STRING16) {
- return nullptr; // STRING16 not supported yet.
+ span<uint8_t> key_span = tokenizer->GetString16WireRep();
+ if (key_span.size() & 1) return nullptr; // UTF16 is 2 byte multiple.
+ key = StringUtil::fromUTF16(
+ reinterpret_cast<const uint16_t*>(key_span.data()),
+ key_span.size() / 2);
+ tokenizer->Next();
} else {
// Error::CBOR_INVALID_MAP_KEY
return nullptr;
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
index 94bcd8891e..639b39bb52 100644
--- a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
@@ -237,75 +237,6 @@ Binary Binary::fromSpan(const uint8_t* data, size_t size) {
new base::RefCountedBytes(data, size)));
}
-namespace {
-int32_t ReadEnvelopeSize(const uint8_t* in) {
- return (in[0] << 24) + (in[1] << 16) + (in[2] << 8) + in[3];
-}
-
-void WriteEnvelopeSize(uint32_t value, uint8_t* out) {
- *(out++) = (value >> 24) & 0xFF;
- *(out++) = (value >> 16) & 0xFF;
- *(out++) = (value >> 8) & 0xFF;
- *(out++) = (value) & 0xFF;
-}
-
-}
-
-bool AppendStringValueToMapBinary(base::StringPiece in,
- base::StringPiece key, base::StringPiece value, std::string* out) {
- if (in.size() < 1 + 1 + 4 + 1 + 1)
- return false;
- const uint8_t* envelope = reinterpret_cast<const uint8_t*>(in.data());
- if (cbor::InitialByteForEnvelope() != envelope[0])
- return false;
- if (cbor::InitialByteFor32BitLengthByteString() != envelope[1])
- return false;
- if (cbor::EncodeIndefiniteLengthMapStart() != envelope[6])
- return false;
-
- uint32_t envelope_size = ReadEnvelopeSize(envelope + 2);
- if (envelope_size + 2 + 4 != in.size())
- return false;
- if (cbor::EncodeStop() != static_cast<uint8_t>(*in.rbegin()))
- return false;
-
- std::vector<uint8_t> encoded_entry;
- encoded_entry.reserve(1 + 4 + key.size() + 1 + 4 + value.size());
- span<uint8_t> key_span(
- reinterpret_cast<const uint8_t*>(key.data()), key.size());
- cbor::EncodeString8(key_span, &encoded_entry);
- span<uint8_t> value_span(
- reinterpret_cast<const uint8_t*>(value.data()), value.size());
- cbor::EncodeString8(value_span, &encoded_entry);
-
- out->clear();
- out->reserve(in.size() + encoded_entry.size());
- out->append(in.begin(), in.end() - 1);
- out->append(reinterpret_cast<const char*>(encoded_entry.data()),
- encoded_entry.size());
- out->append(1, static_cast<char>(cbor::EncodeStop()));
- std::size_t new_size = envelope_size + out->size() - in.size();
- if (new_size > static_cast<std::size_t>(
- std::numeric_limits<uint32_t>::max())) {
- return false;
- }
- WriteEnvelopeSize(new_size, reinterpret_cast<uint8_t*>(&*out->begin() + 2));
- return true;
-}
-
-bool AppendStringValueToMapJSON(base::StringPiece in,
- base::StringPiece key, base::StringPiece value, std::string* out) {
- if (!in.length() || *in.rbegin() != '}')
- return false;
- std::string suffix =
- base::StringPrintf(", \"%s\": \"%s\"}", key.begin(), value.begin());
- out->clear();
- out->reserve(in.length() + suffix.length() - 1);
- out->append(in.data(), in.length() - 1);
- out->append(suffix);
- return true;
-}
-
{% for namespace in config.protocol.namespace %}
} // namespace {{namespace}}
{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
index 082c7c037e..8bf3c355c0 100644
--- a/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/base_string_adapter_h.template
@@ -136,12 +136,6 @@ class {{config.lib.export_macro}} Binary {
std::unique_ptr<Value> toProtocolValue(const base::Value* value, int depth);
std::unique_ptr<base::Value> toBaseValue(Value* value, int depth);
-
-bool AppendStringValueToMapBinary(base::StringPiece in,
- base::StringPiece key, base::StringPiece value, std::string* out);
-bool AppendStringValueToMapJSON(base::StringPiece in,
- base::StringPiece key, base::StringPiece value, std::string* out);
-
{% for namespace in config.protocol.namespace %}
} // namespace {{namespace}}
{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template b/deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template
index 3009e3bccf..e55dffb5fd 100644
--- a/deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/encoding_cpp.template
@@ -6,6 +6,7 @@
// found in the LICENSE file.
+#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstring>
@@ -18,6 +19,86 @@ namespace {{namespace}} {
// ===== encoding/encoding.cc =====
+// =============================================================================
+// Status and Error codes
+// =============================================================================
+
+std::string Status::ToASCIIString() const {
+ switch (error) {
+ case Error::OK:
+ return "OK";
+ case Error::JSON_PARSER_UNPROCESSED_INPUT_REMAINS:
+ return ToASCIIString("JSON: unprocessed input remains");
+ case Error::JSON_PARSER_STACK_LIMIT_EXCEEDED:
+ return ToASCIIString("JSON: stack limit exceeded");
+ case Error::JSON_PARSER_NO_INPUT:
+ return ToASCIIString("JSON: no input");
+ case Error::JSON_PARSER_INVALID_TOKEN:
+ return ToASCIIString("JSON: invalid token");
+ case Error::JSON_PARSER_INVALID_NUMBER:
+ return ToASCIIString("JSON: invalid number");
+ case Error::JSON_PARSER_INVALID_STRING:
+ return ToASCIIString("JSON: invalid string");
+ case Error::JSON_PARSER_UNEXPECTED_ARRAY_END:
+ return ToASCIIString("JSON: unexpected array end");
+ case Error::JSON_PARSER_COMMA_OR_ARRAY_END_EXPECTED:
+ return ToASCIIString("JSON: comma or array end expected");
+ case Error::JSON_PARSER_STRING_LITERAL_EXPECTED:
+ return ToASCIIString("JSON: string literal expected");
+ case Error::JSON_PARSER_COLON_EXPECTED:
+ return ToASCIIString("JSON: colon expected");
+ case Error::JSON_PARSER_UNEXPECTED_MAP_END:
+ return ToASCIIString("JSON: unexpected map end");
+ case Error::JSON_PARSER_COMMA_OR_MAP_END_EXPECTED:
+ return ToASCIIString("JSON: comma or map end expected");
+ case Error::JSON_PARSER_VALUE_EXPECTED:
+ return ToASCIIString("JSON: value expected");
+
+ case Error::CBOR_INVALID_INT32:
+ return ToASCIIString("CBOR: invalid int32");
+ case Error::CBOR_INVALID_DOUBLE:
+ return ToASCIIString("CBOR: invalid double");
+ case Error::CBOR_INVALID_ENVELOPE:
+ return ToASCIIString("CBOR: invalid envelope");
+ case Error::CBOR_INVALID_STRING8:
+ return ToASCIIString("CBOR: invalid string8");
+ case Error::CBOR_INVALID_STRING16:
+ return ToASCIIString("CBOR: invalid string16");
+ case Error::CBOR_INVALID_BINARY:
+ return ToASCIIString("CBOR: invalid binary");
+ case Error::CBOR_UNSUPPORTED_VALUE:
+ return ToASCIIString("CBOR: unsupported value");
+ case Error::CBOR_NO_INPUT:
+ return ToASCIIString("CBOR: no input");
+ case Error::CBOR_INVALID_START_BYTE:
+ return ToASCIIString("CBOR: invalid start byte");
+ case Error::CBOR_UNEXPECTED_EOF_EXPECTED_VALUE:
+ return ToASCIIString("CBOR: unexpected eof expected value");
+ case Error::CBOR_UNEXPECTED_EOF_IN_ARRAY:
+ return ToASCIIString("CBOR: unexpected eof in array");
+ case Error::CBOR_UNEXPECTED_EOF_IN_MAP:
+ return ToASCIIString("CBOR: unexpected eof in map");
+ case Error::CBOR_INVALID_MAP_KEY:
+ return ToASCIIString("CBOR: invalid map key");
+ case Error::CBOR_STACK_LIMIT_EXCEEDED:
+ return ToASCIIString("CBOR: stack limit exceeded");
+ case Error::CBOR_TRAILING_JUNK:
+ return ToASCIIString("CBOR: trailing junk");
+ case Error::CBOR_MAP_START_EXPECTED:
+ return ToASCIIString("CBOR: map start expected");
+ case Error::CBOR_MAP_STOP_EXPECTED:
+ return ToASCIIString("CBOR: map stop expected");
+ case Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED:
+ return ToASCIIString("CBOR: envelope size limit exceeded");
+ }
+ // Some compilers can't figure out that we can't get here.
+ return "INVALID ERROR CODE";
+}
+
+std::string Status::ToASCIIString(const char* msg) const {
+ return std::string(msg) + " at position " + std::to_string(pos);
+}
+
namespace cbor {
namespace {
// Indicates the number of bits the "initial byte" needs to be shifted to the
@@ -98,9 +179,9 @@ void WriteBytesMostSignificantByteFirst(T v, C* out) {
// See also: https://commandcenter.blogspot.com/2012/04/byte-order-fallacy.html
template <typename T>
T ReadBytesMostSignificantByteFirst(span<uint8_t> in) {
- assert(static_cast<std::size_t>(in.size()) >= sizeof(T));
+ assert(in.size() >= sizeof(T));
T result = 0;
- for (std::size_t shift_bytes = 0; shift_bytes < sizeof(T); ++shift_bytes)
+ for (size_t shift_bytes = 0; shift_bytes < sizeof(T); ++shift_bytes)
result |= T(in[sizeof(T) - 1 - shift_bytes]) << (shift_bytes * 8);
return result;
}
@@ -112,6 +193,7 @@ namespace internals {
// |value| is the payload (e.g. for MajorType::UNSIGNED) or is the size
// (e.g. for BYTE_STRING).
// If successful, returns the number of bytes read. Otherwise returns -1.
+// TODO(johannes): change return type to size_t and use 0 for error.
int8_t ReadTokenStart(span<uint8_t> bytes, MajorType* type, uint64_t* value) {
if (bytes.empty())
return -1;
@@ -134,21 +216,21 @@ int8_t ReadTokenStart(span<uint8_t> bytes, MajorType* type, uint64_t* value) {
}
if (additional_information == kAdditionalInformation2Bytes) {
// Values 256-65535: 1 initial byte + 2 bytes payload.
- if (static_cast<std::size_t>(bytes.size()) < 1 + sizeof(uint16_t))
+ if (bytes.size() < 1 + sizeof(uint16_t))
return -1;
*value = ReadBytesMostSignificantByteFirst<uint16_t>(bytes.subspan(1));
return 3;
}
if (additional_information == kAdditionalInformation4Bytes) {
// 32 bit uint: 1 initial byte + 4 bytes payload.
- if (static_cast<std::size_t>(bytes.size()) < 1 + sizeof(uint32_t))
+ if (bytes.size() < 1 + sizeof(uint32_t))
return -1;
*value = ReadBytesMostSignificantByteFirst<uint32_t>(bytes.subspan(1));
return 5;
}
if (additional_information == kAdditionalInformation8Bytes) {
// 64 bit uint: 1 initial byte + 8 bytes payload.
- if (static_cast<std::size_t>(bytes.size()) < 1 + sizeof(uint64_t))
+ if (bytes.size() < 1 + sizeof(uint64_t))
return -1;
*value = ReadBytesMostSignificantByteFirst<uint64_t>(bytes.subspan(1));
return 9;
@@ -296,7 +378,7 @@ void EncodeString8(span<uint8_t> in, std::string* out) {
template <typename C>
void EncodeFromLatin1Tmpl(span<uint8_t> latin1, C* out) {
- for (std::ptrdiff_t ii = 0; ii < latin1.size(); ++ii) {
+ for (size_t ii = 0; ii < latin1.size(); ++ii) {
if (latin1[ii] <= 127)
continue;
// If there's at least one non-ASCII char, convert to UTF8.
@@ -310,7 +392,7 @@ void EncodeFromLatin1Tmpl(span<uint8_t> latin1, C* out) {
utf8.push_back((latin1[ii] | 0x80) & 0xbf);
}
}
- EncodeString8(SpanFromVector(utf8), out);
+ EncodeString8(SpanFrom(utf8), out);
return;
}
EncodeString8(latin1, out);
@@ -359,12 +441,12 @@ void EncodeBinary(span<uint8_t> in, std::string* out) {
// A double is encoded with a specific initial byte
// (kInitialByteForDouble) plus the 64 bits of payload for its value.
-constexpr std::ptrdiff_t kEncodedDoubleSize = 1 + sizeof(uint64_t);
+constexpr size_t kEncodedDoubleSize = 1 + sizeof(uint64_t);
// An envelope is encoded with a specific initial byte
// (kInitialByteForEnvelope), plus the start byte for a BYTE_STRING with a 32
// bit wide length, plus a 32 bit length for that string.
-constexpr std::ptrdiff_t kEncodedEnvelopeHeaderSize = 1 + 1 + sizeof(uint32_t);
+constexpr size_t kEncodedEnvelopeHeaderSize = 1 + 1 + sizeof(uint32_t);
template <typename C>
void EncodeDoubleTmpl(double value, C* out) {
@@ -390,45 +472,45 @@ void EncodeDouble(double value, std::string* out) {
// =============================================================================
template <typename C>
-void EncodeStartTmpl(C* out, std::size_t& byte_size_pos) {
- assert(byte_size_pos == 0);
+void EncodeStartTmpl(C* out, size_t* byte_size_pos) {
+ assert(*byte_size_pos == 0);
out->push_back(kInitialByteForEnvelope);
out->push_back(kInitialByteFor32BitLengthByteString);
- byte_size_pos = out->size();
+ *byte_size_pos = out->size();
out->resize(out->size() + sizeof(uint32_t));
}
void EnvelopeEncoder::EncodeStart(std::vector<uint8_t>* out) {
- EncodeStartTmpl<std::vector<uint8_t>>(out, byte_size_pos_);
+ EncodeStartTmpl<std::vector<uint8_t>>(out, &byte_size_pos_);
}
void EnvelopeEncoder::EncodeStart(std::string* out) {
- EncodeStartTmpl<std::string>(out, byte_size_pos_);
+ EncodeStartTmpl<std::string>(out, &byte_size_pos_);
}
template <typename C>
-bool EncodeStopTmpl(C* out, std::size_t& byte_size_pos) {
- assert(byte_size_pos != 0);
+bool EncodeStopTmpl(C* out, size_t* byte_size_pos) {
+ assert(*byte_size_pos != 0);
// The byte size is the size of the payload, that is, all the
// bytes that were written past the byte size position itself.
- uint64_t byte_size = out->size() - (byte_size_pos + sizeof(uint32_t));
+ uint64_t byte_size = out->size() - (*byte_size_pos + sizeof(uint32_t));
// We store exactly 4 bytes, so at most INT32MAX, with most significant
// byte first.
if (byte_size > std::numeric_limits<uint32_t>::max())
return false;
for (int shift_bytes = sizeof(uint32_t) - 1; shift_bytes >= 0;
--shift_bytes) {
- (*out)[byte_size_pos++] = 0xff & (byte_size >> (shift_bytes * 8));
+ (*out)[(*byte_size_pos)++] = 0xff & (byte_size >> (shift_bytes * 8));
}
return true;
}
bool EnvelopeEncoder::EncodeStop(std::vector<uint8_t>* out) {
- return EncodeStopTmpl(out, byte_size_pos_);
+ return EncodeStopTmpl(out, &byte_size_pos_);
}
bool EnvelopeEncoder::EncodeStop(std::string* out) {
- return EncodeStopTmpl(out, byte_size_pos_);
+ return EncodeStopTmpl(out, &byte_size_pos_);
}
// =============================================================================
@@ -444,57 +526,94 @@ class CBOREncoder : public StreamingParserHandler {
}
void HandleMapBegin() override {
+ if (!status_->ok())
+ return;
envelopes_.emplace_back();
envelopes_.back().EncodeStart(out_);
out_->push_back(kInitialByteIndefiniteLengthMap);
}
void HandleMapEnd() override {
+ if (!status_->ok())
+ return;
out_->push_back(kStopByte);
assert(!envelopes_.empty());
- envelopes_.back().EncodeStop(out_);
+ if (!envelopes_.back().EncodeStop(out_)) {
+ HandleError(
+ Status(Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED, out_->size()));
+ return;
+ }
envelopes_.pop_back();
}
void HandleArrayBegin() override {
+ if (!status_->ok())
+ return;
envelopes_.emplace_back();
envelopes_.back().EncodeStart(out_);
out_->push_back(kInitialByteIndefiniteLengthArray);
}
void HandleArrayEnd() override {
+ if (!status_->ok())
+ return;
out_->push_back(kStopByte);
assert(!envelopes_.empty());
- envelopes_.back().EncodeStop(out_);
+ if (!envelopes_.back().EncodeStop(out_)) {
+ HandleError(
+ Status(Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED, out_->size()));
+ return;
+ }
envelopes_.pop_back();
}
void HandleString8(span<uint8_t> chars) override {
+ if (!status_->ok())
+ return;
EncodeString8(chars, out_);
}
void HandleString16(span<uint16_t> chars) override {
+ if (!status_->ok())
+ return;
EncodeFromUTF16(chars, out_);
}
- void HandleBinary(span<uint8_t> bytes) override { EncodeBinary(bytes, out_); }
+ void HandleBinary(span<uint8_t> bytes) override {
+ if (!status_->ok())
+ return;
+ EncodeBinary(bytes, out_);
+ }
- void HandleDouble(double value) override { EncodeDouble(value, out_); }
+ void HandleDouble(double value) override {
+ if (!status_->ok())
+ return;
+ EncodeDouble(value, out_);
+ }
- void HandleInt32(int32_t value) override { EncodeInt32(value, out_); }
+ void HandleInt32(int32_t value) override {
+ if (!status_->ok())
+ return;
+ EncodeInt32(value, out_);
+ }
void HandleBool(bool value) override {
+ if (!status_->ok())
+ return;
// See RFC 7049 Section 2.3, Table 2.
out_->push_back(value ? kEncodedTrue : kEncodedFalse);
}
void HandleNull() override {
+ if (!status_->ok())
+ return;
// See RFC 7049 Section 2.3, Table 2.
out_->push_back(kEncodedNull);
}
void HandleError(Status error) override {
- assert(!error.ok());
+ if (!status_->ok())
+ return;
*status_ = error;
out_->clear();
}
@@ -547,10 +666,17 @@ Status CBORTokenizer::Status() const {
return status_;
}
+// The following accessor functions ::GetInt32, ::GetDouble,
+// ::GetString8, ::GetString16WireRep, ::GetBinary, ::GetEnvelopeContents
+// assume that a particular token was recognized in ::ReadNextToken.
+// That's where all the error checking is done. By design,
+// the accessors (assuming the token was recognized) never produce
+// an error.
+
int32_t CBORTokenizer::GetInt32() const {
assert(token_tag_ == CBORTokenTag::INT32);
// The range checks happen in ::ReadNextToken().
- return static_cast<uint32_t>(
+ return static_cast<int32_t>(
token_start_type_ == MajorType::UNSIGNED
? token_start_internal_value_
: -static_cast<int64_t>(token_start_internal_value_) - 1);
@@ -569,28 +695,55 @@ double CBORTokenizer::GetDouble() const {
span<uint8_t> CBORTokenizer::GetString8() const {
assert(token_tag_ == CBORTokenTag::STRING8);
- auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ auto length = static_cast<size_t>(token_start_internal_value_);
return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
}
span<uint8_t> CBORTokenizer::GetString16WireRep() const {
assert(token_tag_ == CBORTokenTag::STRING16);
- auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ auto length = static_cast<size_t>(token_start_internal_value_);
return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
}
span<uint8_t> CBORTokenizer::GetBinary() const {
assert(token_tag_ == CBORTokenTag::BINARY);
- auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ auto length = static_cast<size_t>(token_start_internal_value_);
return bytes_.subspan(status_.pos + (token_byte_length_ - length), length);
}
span<uint8_t> CBORTokenizer::GetEnvelopeContents() const {
assert(token_tag_ == CBORTokenTag::ENVELOPE);
- auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
+ auto length = static_cast<size_t>(token_start_internal_value_);
return bytes_.subspan(status_.pos + kEncodedEnvelopeHeaderSize, length);
}
+// All error checking happens in ::ReadNextToken, so that the accessors
+// can avoid having to carry an error return value.
+//
+// With respect to checking the encoded lengths of strings, arrays, etc:
+// On the wire, CBOR uses 1,2,4, and 8 byte unsigned integers, so
+// we initially read them as uint64_t, usually into token_start_internal_value_.
+//
+// However, since these containers have a representation on the machine,
+// we need to do corresponding size computations on the input byte array,
+// output span (e.g. the payload for a string), etc., and size_t is
+// machine specific (in practice either 32 bit or 64 bit).
+//
+// Further, we must avoid overflowing size_t. Therefore, we use this
+// kMaxValidLength constant to:
+// - Reject values that are larger than the architecture specific
+// max size_t (differs between 32 bit and 64 bit arch).
+// - Reserve at least one bit so that we can check against overflows
+// when adding lengths (array / string length / etc.); we do this by
+// ensuring that the inputs to an addition are <= kMaxValidLength,
+// and then checking whether the sum went past it.
+//
+// See also
+// https://chromium.googlesource.com/chromium/src/+/master/docs/security/integer-semantics.md
+static const uint64_t kMaxValidLength =
+ std::min<uint64_t>(std::numeric_limits<uint64_t>::max() >> 2,
+ std::numeric_limits<size_t>::max());
+
void CBORTokenizer::ReadNextToken(bool enter_envelope) {
if (enter_envelope) {
status_.pos += kEncodedEnvelopeHeaderSize;
@@ -603,6 +756,7 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
token_tag_ = CBORTokenTag::DONE;
return;
}
+ const size_t remaining_bytes = bytes_.size() - status_.pos;
switch (bytes_[status_.pos]) {
case kStopByte:
SetToken(CBORTokenTag::STOP, 1);
@@ -623,21 +777,26 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
SetToken(CBORTokenTag::NULL_VALUE, 1);
return;
case kExpectedConversionToBase64Tag: { // BINARY
- int8_t bytes_read = internals::ReadTokenStart(
+ const int8_t bytes_read = internals::ReadTokenStart(
bytes_.subspan(status_.pos + 1), &token_start_type_,
&token_start_internal_value_);
- int64_t token_byte_length = 1 + bytes_read + token_start_internal_value_;
- if (-1 == bytes_read || token_start_type_ != MajorType::BYTE_STRING ||
- status_.pos + token_byte_length > bytes_.size()) {
+ if (bytes_read < 0 || token_start_type_ != MajorType::BYTE_STRING ||
+ token_start_internal_value_ > kMaxValidLength) {
SetError(Error::CBOR_INVALID_BINARY);
return;
}
- SetToken(CBORTokenTag::BINARY,
- static_cast<std::ptrdiff_t>(token_byte_length));
+ const uint64_t token_byte_length = token_start_internal_value_ +
+ /* tag before token start: */ 1 +
+ /* token start: */ bytes_read;
+ if (token_byte_length > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_BINARY);
+ return;
+ }
+ SetToken(CBORTokenTag::BINARY, static_cast<size_t>(token_byte_length));
return;
}
case kInitialByteForDouble: { // DOUBLE
- if (status_.pos + kEncodedDoubleSize > bytes_.size()) {
+ if (kEncodedDoubleSize > remaining_bytes) {
SetError(Error::CBOR_INVALID_DOUBLE);
return;
}
@@ -645,7 +804,7 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
return;
}
case kInitialByteForEnvelope: { // ENVELOPE
- if (status_.pos + kEncodedEnvelopeHeaderSize > bytes_.size()) {
+ if (kEncodedEnvelopeHeaderSize > remaining_bytes) {
SetError(Error::CBOR_INVALID_ENVELOPE);
return;
}
@@ -657,26 +816,30 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
// Read the length of the byte string.
token_start_internal_value_ = ReadBytesMostSignificantByteFirst<uint32_t>(
bytes_.subspan(status_.pos + 2));
- // Make sure the payload is contained within the message.
- if (token_start_internal_value_ + kEncodedEnvelopeHeaderSize +
- status_.pos >
- static_cast<std::size_t>(bytes_.size())) {
+ if (token_start_internal_value_ > kMaxValidLength) {
SetError(Error::CBOR_INVALID_ENVELOPE);
return;
}
- auto length = static_cast<std::ptrdiff_t>(token_start_internal_value_);
- SetToken(CBORTokenTag::ENVELOPE, kEncodedEnvelopeHeaderSize + length);
+ uint64_t token_byte_length =
+ token_start_internal_value_ + kEncodedEnvelopeHeaderSize;
+ if (token_byte_length > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_ENVELOPE);
+ return;
+ }
+ SetToken(CBORTokenTag::ENVELOPE, static_cast<size_t>(token_byte_length));
return;
}
default: {
- span<uint8_t> remainder =
- bytes_.subspan(status_.pos, bytes_.size() - status_.pos);
- assert(!remainder.empty());
- int8_t token_start_length = internals::ReadTokenStart(
- remainder, &token_start_type_, &token_start_internal_value_);
- bool success = token_start_length != -1;
+ const int8_t token_start_length = internals::ReadTokenStart(
+ bytes_.subspan(status_.pos), &token_start_type_,
+ &token_start_internal_value_);
+ const bool success = token_start_length >= 0;
switch (token_start_type_) {
case MajorType::UNSIGNED: // INT32.
+ // INT32 is a signed int32 (int32 makes sense for the
+ // inspector_protocol, it's not a CBOR limitation), so we check
+ // against the signed max, so that the allowable values are
+ // 0, 1, 2, ... 2^31 - 1.
if (!success || std::numeric_limits<int32_t>::max() <
token_start_internal_value_) {
SetError(Error::CBOR_INVALID_INT32);
@@ -684,38 +847,54 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
}
SetToken(CBORTokenTag::INT32, token_start_length);
return;
- case MajorType::NEGATIVE: // INT32.
- if (!success ||
- std::numeric_limits<int32_t>::min() >
- -static_cast<int64_t>(token_start_internal_value_) - 1) {
+ case MajorType::NEGATIVE: { // INT32.
+ // INT32 is a signed int32 (int32 makes sense for the
+ // inspector_protocol, it's not a CBOR limitation); in CBOR, the
+ // negative values for INT32 are represented as NEGATIVE, that is, -1
+ // INT32 is represented as 1 << 5 | 0 (major type 1, additional info
+ // value 0). The minimal allowed INT32 value in our protocol is
+ // std::numeric_limits<int32_t>::min(). We check for it by directly
+ // checking the payload against the maximal allowed signed (!) int32
+ // value.
+ if (!success || token_start_internal_value_ >
+ std::numeric_limits<int32_t>::max()) {
SetError(Error::CBOR_INVALID_INT32);
return;
}
SetToken(CBORTokenTag::INT32, token_start_length);
return;
+ }
case MajorType::STRING: { // STRING8.
- if (!success || remainder.size() < static_cast<int64_t>(
- token_start_internal_value_)) {
+ if (!success || token_start_internal_value_ > kMaxValidLength) {
SetError(Error::CBOR_INVALID_STRING8);
return;
}
- auto length =
- static_cast<std::ptrdiff_t>(token_start_internal_value_);
- SetToken(CBORTokenTag::STRING8, token_start_length + length);
+ uint64_t token_byte_length =
+ token_start_internal_value_ + token_start_length;
+ if (token_byte_length > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_STRING8);
+ return;
+ }
+ SetToken(CBORTokenTag::STRING8,
+ static_cast<size_t>(token_byte_length));
return;
}
case MajorType::BYTE_STRING: { // STRING16.
- if (!success ||
- remainder.size() <
- static_cast<int64_t>(token_start_internal_value_) ||
- // Must be divisible by 2 since UTF16 is 2 bytes per character.
+ // Length must be divisible by 2 since UTF16 is 2 bytes per
+ // character, hence the &1 check.
+ if (!success || token_start_internal_value_ > kMaxValidLength ||
token_start_internal_value_ & 1) {
SetError(Error::CBOR_INVALID_STRING16);
return;
}
- auto length =
- static_cast<std::ptrdiff_t>(token_start_internal_value_);
- SetToken(CBORTokenTag::STRING16, token_start_length + length);
+ uint64_t token_byte_length =
+ token_start_internal_value_ + token_start_length;
+ if (token_byte_length > remaining_bytes) {
+ SetError(Error::CBOR_INVALID_STRING16);
+ return;
+ }
+ SetToken(CBORTokenTag::STRING16,
+ static_cast<size_t>(token_byte_length));
return;
}
case MajorType::ARRAY:
@@ -729,8 +908,7 @@ void CBORTokenizer::ReadNextToken(bool enter_envelope) {
}
}
-void CBORTokenizer::SetToken(CBORTokenTag token_tag,
- std::ptrdiff_t token_byte_length) {
+void CBORTokenizer::SetToken(CBORTokenTag token_tag, size_t token_byte_length) {
token_tag_ = token_tag;
token_byte_length_ = token_byte_length;
}
@@ -764,7 +942,7 @@ bool ParseValue(int32_t stack_depth,
void ParseUTF16String(CBORTokenizer* tokenizer, StreamingParserHandler* out) {
std::vector<uint16_t> value;
span<uint8_t> rep = tokenizer->GetString16WireRep();
- for (std::ptrdiff_t ii = 0; ii < rep.size(); ii += 2)
+ for (size_t ii = 0; ii < rep.size(); ii += 2)
value.push_back((rep[ii + 1] << 8) | rep[ii]);
out->HandleString16(span<uint16_t>(value.data(), value.size()));
tokenizer->Next();
@@ -947,6 +1125,8 @@ template <typename C>
Status AppendString8EntryToCBORMapTmpl(span<uint8_t> string8_key,
span<uint8_t> string8_value,
C* cbor) {
+ // Careful below: Don't compare (*cbor)[idx] with a uint8_t, since
+ // it could be a char (signed!). Instead, use bytes.
span<uint8_t> bytes(reinterpret_cast<const uint8_t*>(cbor->data()),
cbor->size());
CBORTokenizer tokenizer(bytes);
@@ -954,23 +1134,23 @@ Status AppendString8EntryToCBORMapTmpl(span<uint8_t> string8_key,
return tokenizer.Status();
if (tokenizer.TokenTag() != CBORTokenTag::ENVELOPE)
return Status(Error::CBOR_INVALID_ENVELOPE, 0);
- std::ptrdiff_t envelope_size = tokenizer.GetEnvelopeContents().size();
- std::size_t old_size = cbor->size();
- if (old_size != std::size_t(envelope_size) + kEncodedEnvelopeHeaderSize)
+ size_t envelope_size = tokenizer.GetEnvelopeContents().size();
+ size_t old_size = cbor->size();
+ if (old_size != envelope_size + kEncodedEnvelopeHeaderSize)
return Status(Error::CBOR_INVALID_ENVELOPE, 0);
if (envelope_size == 0 ||
(tokenizer.GetEnvelopeContents()[0] != EncodeIndefiniteLengthMapStart()))
return Status(Error::CBOR_MAP_START_EXPECTED, kEncodedEnvelopeHeaderSize);
- if (cbor->back() != EncodeStop())
+ if (bytes[bytes.size() - 1] != EncodeStop())
return Status(Error::CBOR_MAP_STOP_EXPECTED, cbor->size() - 1);
cbor->pop_back();
EncodeString8(string8_key, cbor);
EncodeString8(string8_value, cbor);
cbor->push_back(EncodeStop());
- std::size_t new_envelope_size = envelope_size + (cbor->size() - old_size);
+ size_t new_envelope_size = envelope_size + (cbor->size() - old_size);
if (new_envelope_size > std::numeric_limits<uint32_t>::max())
return Status(Error::CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED, 0);
- std::size_t size_pos = cbor->size() - new_envelope_size - sizeof(uint32_t);
+ size_t size_pos = cbor->size() - new_envelope_size - sizeof(uint32_t);
uint8_t* out = reinterpret_cast<uint8_t*>(&cbor->at(size_pos));
*(out++) = (new_envelope_size >> 24) & 0xff;
*(out++) = (new_envelope_size >> 16) & 0xff;
@@ -1020,20 +1200,21 @@ enum class Container {
class State {
public:
explicit State(Container container) : container_(container) {}
- void StartElement(std::vector<uint8_t>* out) {
- // FIXME!!!
- }
- void StartElement(std::string* out) {
+ void StartElement(std::vector<uint8_t>* out) { StartElementTmpl(out); }
+ void StartElement(std::string* out) { StartElementTmpl(out); }
+ Container container() const { return container_; }
+
+ private:
+ template <typename C>
+ void StartElementTmpl(C* out) {
assert(container_ != Container::NONE || size_ == 0);
if (size_ != 0) {
char delim = (!(size_ & 1) || container_ == Container::ARRAY) ? ',' : ':';
- out->append(1, delim);
+ out->push_back(delim);
}
++size_;
}
- Container container() const { return container_; }
- private:
Container container_ = Container::NONE;
int size_ = 0;
};
@@ -1047,7 +1228,7 @@ void Base64Encode(const span<uint8_t>& in, C* out) {
// The following three cases are based on the tables in the example
// section in https://en.wikipedia.org/wiki/Base64. We process three
// input bytes at a time, emitting 4 output bytes at a time.
- std::ptrdiff_t ii = 0;
+ size_t ii = 0;
// While possible, process three input bytes.
for (; ii + 3 <= in.size(); ii += 3) {
@@ -1152,7 +1333,7 @@ class JSONEncoder : public StreamingParserHandler {
return;
state_.top().StartElement(out_);
Emit('"');
- for (std::ptrdiff_t ii = 0; ii < chars.size(); ++ii) {
+ for (size_t ii = 0; ii < chars.size(); ++ii) {
uint8_t c = chars[ii];
if (c == '"') {
Emit("\\\"");
@@ -1364,23 +1545,23 @@ class JsonParser {
JsonParser(const Platform* platform, StreamingParserHandler* handler)
: platform_(platform), handler_(handler) {}
- void Parse(const Char* start, std::size_t length) {
+ void Parse(const Char* start, size_t length) {
start_pos_ = start;
const Char* end = start + length;
- const Char* tokenEnd;
+ const Char* tokenEnd = nullptr;
ParseValue(start, end, &tokenEnd, 0);
+ if (error_)
+ return;
if (tokenEnd != end) {
HandleError(Error::JSON_PARSER_UNPROCESSED_INPUT_REMAINS, tokenEnd);
}
}
private:
- bool CharsToDouble(const uint16_t* chars,
- std::size_t length,
- double* result) {
+ bool CharsToDouble(const uint16_t* chars, size_t length, double* result) {
std::string buffer;
buffer.reserve(length + 1);
- for (std::size_t ii = 0; ii < length; ++ii) {
+ for (size_t ii = 0; ii < length; ++ii) {
bool is_ascii = !(chars[ii] & ~0x7F);
if (!is_ascii)
return false;
@@ -1389,7 +1570,7 @@ class JsonParser {
return platform_->StrToD(buffer.c_str(), result);
}
- bool CharsToDouble(const uint8_t* chars, std::size_t length, double* result) {
+ bool CharsToDouble(const uint8_t* chars, size_t length, double* result) {
std::string buffer(reinterpret_cast<const char*>(chars), length);
return platform_->StrToD(buffer.c_str(), result);
}
@@ -1585,7 +1766,7 @@ class JsonParser {
if (IsSpaceOrNewLine(*start)) {
++start;
} else if (*start == '/') {
- const Char* comment_end;
+ const Char* comment_end = nullptr;
if (!SkipComment(start, end, &comment_end))
break;
start = comment_end;
@@ -1683,7 +1864,7 @@ class JsonParser {
// If the |Char| we're dealing with is really a byte, then
// we have utf8 here, and we need to check for multibyte characters
// and transcode them to utf16 (either one or two utf16 chars).
- if (sizeof(Char) == sizeof(uint8_t) && c >= 0x7f) {
+ if (sizeof(Char) == sizeof(uint8_t) && c > 0x7f) {
// Inspect the leading byte to figure out how long the utf8
// byte sequence is; while doing this initialize |codepoint|
// with the first few bits.
@@ -1722,7 +1903,7 @@ class JsonParser {
// Disallow overlong encodings for ascii characters, as these
// would include " and other characters significant to JSON
// string termination / control.
- if (codepoint < 0x7f)
+ if (codepoint <= 0x7f)
return false;
// Invalid in UTF8, and can't be represented in UTF16 anyway.
if (codepoint > 0x10ffff)
@@ -1797,8 +1978,8 @@ class JsonParser {
HandleError(Error::JSON_PARSER_STACK_LIMIT_EXCEEDED, start);
return;
}
- const Char* token_start;
- const Char* token_end;
+ const Char* token_start = nullptr;
+ const Char* token_end = nullptr;
Token token = ParseToken(start, end, &token_start, &token_end);
switch (token) {
case NoInput:
@@ -1932,7 +2113,8 @@ class JsonParser {
void HandleError(Error error, const Char* pos) {
assert(error != Error::OK);
if (!error_) {
- handler_->HandleError(Status{error, pos - start_pos_});
+ handler_->HandleError(
+ Status{error, static_cast<size_t>(pos - start_pos_)});
error_ = true;
}
}
@@ -1983,10 +2165,8 @@ Status ConvertCBORToJSON(const Platform& platform,
return ConvertCBORToJSONTmpl(platform, cbor, json);
}
-template <typename C>
-Status ConvertJSONToCBORTmpl(const Platform& platform,
- span<uint8_t> json,
- C* cbor) {
+template <typename T, typename C>
+Status ConvertJSONToCBORTmpl(const Platform& platform, span<T> json, C* cbor) {
Status status;
std::unique_ptr<StreamingParserHandler> encoder =
cbor::NewCBOREncoder(cbor, &status);
@@ -1999,12 +2179,23 @@ Status ConvertJSONToCBOR(const Platform& platform,
return ConvertJSONToCBORTmpl(platform, json, cbor);
}
Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint16_t> json,
+ std::string* cbor) {
+ return ConvertJSONToCBORTmpl(platform, json, cbor);
+}
+Status ConvertJSONToCBOR(const Platform& platform,
span<uint8_t> json,
std::vector<uint8_t>* cbor) {
return ConvertJSONToCBORTmpl(platform, json, cbor);
}
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint16_t> json,
+ std::vector<uint8_t>* cbor) {
+ return ConvertJSONToCBORTmpl(platform, json, cbor);
+}
} // namespace json
{% for namespace in config.protocol.namespace %}
} // namespace {{namespace}}
{% endfor %}
+
diff --git a/deps/v8/third_party/inspector_protocol/lib/encoding_h.template b/deps/v8/third_party/inspector_protocol/lib/encoding_h.template
index bc10ed83d6..f1a52a1958 100644
--- a/deps/v8/third_party/inspector_protocol/lib/encoding_h.template
+++ b/deps/v8/third_party/inspector_protocol/lib/encoding_h.template
@@ -10,6 +10,8 @@
#include <cstddef>
#include <cstdint>
+#include <cstring>
+#include <limits>
#include <memory>
#include <string>
#include <vector>
@@ -25,13 +27,11 @@ namespace {{namespace}} {
// span - sequence of bytes
// =============================================================================
-// This template is similar to std::span, which will be included in C++20. Like
-// std::span it uses ptrdiff_t, which is signed (and thus a bit annoying
-// sometimes when comparing with size_t), but other than this it's much simpler.
+// This template is similar to std::span, which will be included in C++20.
template <typename T>
class span {
public:
- using index_type = std::ptrdiff_t;
+ using index_type = size_t;
span() : data_(nullptr), size_(0) {}
span(const T* data, index_type size) : data_(data), size_(size) {}
@@ -62,15 +62,27 @@ class span {
};
template <typename T>
-span<T> SpanFromVector(const std::vector<T>& v) {
+span<T> SpanFrom(const std::vector<T>& v) {
return span<T>(v.data(), v.size());
}
-inline span<uint8_t> SpanFromStdString(const std::string& v) {
+template <size_t N>
+span<uint8_t> SpanFrom(const char (&str)[N]) {
+ return span<uint8_t>(reinterpret_cast<const uint8_t*>(str), N - 1);
+}
+
+inline span<uint8_t> SpanFrom(const char* str) {
+ return str ? span<uint8_t>(reinterpret_cast<const uint8_t*>(str), strlen(str))
+ : span<uint8_t>();
+}
+
+inline span<uint8_t> SpanFrom(const std::string& v) {
return span<uint8_t>(reinterpret_cast<const uint8_t*>(v.data()), v.size());
}
-// Error codes.
+// =============================================================================
+// Status and Error codes
+// =============================================================================
enum class Error {
OK = 0,
// JSON parsing errors - json_parser.{h,cc}.
@@ -102,24 +114,30 @@ enum class Error {
CBOR_UNEXPECTED_EOF_IN_MAP = 0x19,
CBOR_INVALID_MAP_KEY = 0x1a,
CBOR_STACK_LIMIT_EXCEEDED = 0x1b,
- CBOR_STRING8_MUST_BE_7BIT = 0x1c,
- CBOR_TRAILING_JUNK = 0x1d,
- CBOR_MAP_START_EXPECTED = 0x1e,
- CBOR_MAP_STOP_EXPECTED = 0x1f,
- CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x20,
+ CBOR_TRAILING_JUNK = 0x1c,
+ CBOR_MAP_START_EXPECTED = 0x1d,
+ CBOR_MAP_STOP_EXPECTED = 0x1e,
+ CBOR_ENVELOPE_SIZE_LIMIT_EXCEEDED = 0x1f,
};
// A status value with position that can be copied. The default status
// is OK. Usually, error status values should come with a valid position.
struct Status {
- static constexpr std::ptrdiff_t npos() { return -1; }
+ static constexpr size_t npos() { return std::numeric_limits<size_t>::max(); }
bool ok() const { return error == Error::OK; }
Error error = Error::OK;
- std::ptrdiff_t pos = npos();
- Status(Error error, std::ptrdiff_t pos) : error(error), pos(pos) {}
+ size_t pos = npos();
+ Status(Error error, size_t pos) : error(error), pos(pos) {}
Status() = default;
+
+ // Returns a 7 bit US-ASCII string, either "OK" or an error message
+ // that includes the position.
+ std::string ToASCIIString() const;
+
+ private:
+ std::string ToASCIIString(const char* msg) const;
};
// Handler interface for parser events emitted by a streaming parser.
@@ -257,7 +275,7 @@ class EnvelopeEncoder {
bool EncodeStop(std::string* out);
private:
- std::size_t byte_size_pos_ = 0;
+ size_t byte_size_pos_ = 0;
};
// =============================================================================
@@ -381,13 +399,13 @@ class CBORTokenizer {
private:
void ReadNextToken(bool enter_envelope);
- void SetToken(CBORTokenTag token, std::ptrdiff_t token_byte_length);
+ void SetToken(CBORTokenTag token, size_t token_byte_length);
void SetError(Error error);
span<uint8_t> bytes_;
CBORTokenTag token_tag_;
struct Status status_;
- std::ptrdiff_t token_byte_length_;
+ size_t token_byte_length_;
MajorType token_start_type_;
uint64_t token_start_internal_value_;
};
@@ -486,8 +504,14 @@ Status ConvertJSONToCBOR(const Platform& platform,
span<uint8_t> json,
std::vector<uint8_t>* cbor);
Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint16_t> json,
+ std::vector<uint8_t>* cbor);
+Status ConvertJSONToCBOR(const Platform& platform,
span<uint8_t> json,
std::string* cbor);
+Status ConvertJSONToCBOR(const Platform& platform,
+ span<uint16_t> json,
+ std::string* cbor);
} // namespace json
{% for namespace in config.protocol.namespace %}
diff --git a/deps/v8/third_party/inspector_protocol/pdl.py b/deps/v8/third_party/inspector_protocol/pdl.py
index 43111e944b..03d11b39d6 100644
--- a/deps/v8/third_party/inspector_protocol/pdl.py
+++ b/deps/v8/third_party/inspector_protocol/pdl.py
@@ -74,20 +74,20 @@ def parse(data, file_name, map_binary_to_string=False):
if len(trimLine) == 0:
continue
- match = re.compile('^(experimental )?(deprecated )?domain (.*)').match(line)
+ match = re.compile(r'^(experimental )?(deprecated )?domain (.*)').match(line)
if match:
domain = createItem({'domain' : match.group(3)}, match.group(1), match.group(2))
protocol['domains'].append(domain)
continue
- match = re.compile('^ depends on ([^\s]+)').match(line)
+ match = re.compile(r'^ depends on ([^\s]+)').match(line)
if match:
if 'dependencies' not in domain:
domain['dependencies'] = []
domain['dependencies'].append(match.group(1))
continue
- match = re.compile('^ (experimental )?(deprecated )?type (.*) extends (array of )?([^\s]+)').match(line)
+ match = re.compile(r'^ (experimental )?(deprecated )?type (.*) extends (array of )?([^\s]+)').match(line)
if match:
if 'types' not in domain:
domain['types'] = []
@@ -96,7 +96,7 @@ def parse(data, file_name, map_binary_to_string=False):
domain['types'].append(item)
continue
- match = re.compile('^ (experimental )?(deprecated )?(command|event) (.*)').match(line)
+ match = re.compile(r'^ (experimental )?(deprecated )?(command|event) (.*)').match(line)
if match:
list = []
if match.group(3) == 'command':
@@ -114,7 +114,7 @@ def parse(data, file_name, map_binary_to_string=False):
list.append(item)
continue
- match = re.compile('^ (experimental )?(deprecated )?(optional )?(array of )?([^\s]+) ([^\s]+)').match(line)
+ match = re.compile(r'^ (experimental )?(deprecated )?(optional )?(array of )?([^\s]+) ([^\s]+)').match(line)
if match:
param = createItem({}, match.group(1), match.group(2), match.group(6))
if match.group(3):
@@ -125,36 +125,36 @@ def parse(data, file_name, map_binary_to_string=False):
subitems.append(param)
continue
- match = re.compile('^ (parameters|returns|properties)').match(line)
+ match = re.compile(r'^ (parameters|returns|properties)').match(line)
if match:
subitems = item[match.group(1)] = []
continue
- match = re.compile('^ enum').match(line)
+ match = re.compile(r'^ enum').match(line)
if match:
enumliterals = item['enum'] = []
continue
- match = re.compile('^version').match(line)
+ match = re.compile(r'^version').match(line)
if match:
continue
- match = re.compile('^ major (\d+)').match(line)
+ match = re.compile(r'^ major (\d+)').match(line)
if match:
protocol['version']['major'] = match.group(1)
continue
- match = re.compile('^ minor (\d+)').match(line)
+ match = re.compile(r'^ minor (\d+)').match(line)
if match:
protocol['version']['minor'] = match.group(1)
continue
- match = re.compile('^ redirect ([^\s]+)').match(line)
+ match = re.compile(r'^ redirect ([^\s]+)').match(line)
if match:
item['redirect'] = match.group(1)
continue
- match = re.compile('^ ( )?[^\s]+$').match(line)
+ match = re.compile(r'^ ( )?[^\s]+$').match(line)
if match:
# enum literal
enumliterals.append(trimLine)
diff --git a/deps/v8/third_party/inspector_protocol/roll.py b/deps/v8/third_party/inspector_protocol/roll.py
index ee9c1d099e..abe636e270 100755
--- a/deps/v8/third_party/inspector_protocol/roll.py
+++ b/deps/v8/third_party/inspector_protocol/roll.py
@@ -18,6 +18,9 @@ FILES_TO_SYNC = [
'code_generator.py',
'concatenate_protocols.py',
'convert_protocol_to_json.py',
+ 'encoding/encoding.h',
+ 'encoding/encoding.cc',
+ 'encoding/encoding_test.cc',
'inspector_protocol.gni',
'inspector_protocol.gypi',
'lib/*',
@@ -95,11 +98,6 @@ def main(argv):
parser.add_argument("--v8_src_downstream",
help="The V8 src tree.",
default="~/v8/v8")
- parser.add_argument('--reverse', dest='reverse', action='store_true',
- help=("Whether to roll the opposite direction, from "
- "V8 (downstream) to inspector_protocol "
- "(upstream)."))
- parser.set_defaults(reverse=False)
parser.add_argument('--force', dest='force', action='store_true',
help=("Whether to carry out the modifications "
"in the destination tree."))
@@ -116,14 +114,9 @@ def main(argv):
# Check that the destination Git repo isn't at the master branch - it's
# generally a bad idea to check into the master branch, so we catch this
# common pilot error here early.
- if args.reverse:
- CheckRepoIsNotAtMasterBranch(upstream)
- src_dir = os.path.join(downstream, 'third_party/inspector_protocol')
- dest_dir = upstream
- else:
- CheckRepoIsNotAtMasterBranch(downstream)
- src_dir = upstream
- dest_dir = os.path.join(downstream, 'third_party/inspector_protocol')
+ CheckRepoIsNotAtMasterBranch(downstream)
+ src_dir = upstream
+ dest_dir = os.path.join(downstream, 'third_party/inspector_protocol')
print('Rolling %s into %s ...' % (src_dir, dest_dir))
src_files = set(FindFilesToSyncIn(src_dir))
dest_files = set(FindFilesToSyncIn(dest_dir))
@@ -143,20 +136,26 @@ def main(argv):
sys.exit(1)
print('You said --force ... as you wish, modifying the destination.')
for f in to_add + to_copy:
- shutil.copyfile(os.path.join(src_dir, f), os.path.join(dest_dir, f))
+ contents = open(os.path.join(src_dir, f)).read()
+ contents = contents.replace(
+ 'INSPECTOR_PROTOCOL_ENCODING_ENCODING_H_',
+ 'V8_INSPECTOR_PROTOCOL_ENCODING_ENCODING_H_')
+ contents = contents.replace(
+ 'namespace inspector_protocol_encoding',
+ 'namespace v8_inspector_protocol_encoding')
+ open(os.path.join(dest_dir, f), 'w').write(contents)
shutil.copymode(os.path.join(src_dir, f), os.path.join(dest_dir, f))
for f in to_delete:
os.unlink(os.path.join(dest_dir, f))
- if not args.reverse:
- head_revision = GetHeadRevision(upstream)
- lines = open(os.path.join(dest_dir, 'README.v8')).readlines()
- f = open(os.path.join(dest_dir, 'README.v8'), 'w')
- for line in lines:
- if line.startswith('Revision: '):
- f.write('Revision: %s' % head_revision)
- else:
- f.write(line)
- f.close()
+ head_revision = GetHeadRevision(upstream)
+ lines = open(os.path.join(dest_dir, 'README.v8')).readlines()
+ f = open(os.path.join(dest_dir, 'README.v8'), 'w')
+ for line in lines:
+ if line.startswith('Revision: '):
+ f.write('Revision: %s' % head_revision)
+ else:
+ f.write(line)
+ f.close()
if __name__ == '__main__':
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
index 4ef60a6ea2..982e2c61b8 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
@@ -203,12 +203,12 @@ void Frontend::flush()
m_frontendChannel->flushProtocolNotifications();
}
-void Frontend::sendRawNotification(String notification)
+void Frontend::sendRawJSONNotification(String notification)
{
m_frontendChannel->sendProtocolNotification(InternalRawNotification::fromJSON(std::move(notification)));
}
-void Frontend::sendRawNotification(std::vector<uint8_t> notification)
+void Frontend::sendRawCBORNotification(std::vector<uint8_t> notification)
{
m_frontendChannel->sendProtocolNotification(InternalRawNotification::fromBinary(std::move(notification)));
}
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
index c670d65c46..9d86d7a4ac 100644
--- a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
@@ -269,8 +269,8 @@ public:
{% endfor %}
void flush();
- void sendRawNotification(String);
- void sendRawNotification(std::vector<uint8_t>);
+ void sendRawJSONNotification(String);
+ void sendRawCBORNotification(std::vector<uint8_t>);
private:
FrontendChannel* m_frontendChannel;
};
diff --git a/deps/v8/third_party/v8/builtins/array-sort.tq b/deps/v8/third_party/v8/builtins/array-sort.tq
index e5df6768de..c751e4831d 100644
--- a/deps/v8/third_party/v8/builtins/array-sort.tq
+++ b/deps/v8/third_party/v8/builtins/array-sort.tq
@@ -34,7 +34,7 @@ namespace array {
ResetToGenericAccessor() {
this.loadFn = Load<GenericElementsAccessor>;
this.storeFn = Store<GenericElementsAccessor>;
- this.bailoutStatus = kSuccess;
+ this.deleteFn = Delete<GenericElementsAccessor>;
}
// The receiver of the Array.p.sort call.
@@ -54,17 +54,14 @@ namespace array {
// uses ToString and a lexicographical compare.
sortComparePtr: CompareBuiltinFn;
- // The following three function pointer represent a Accessor/Path.
- // These are used to Load/Store elements and to check whether to bail to the
- // baseline GenericElementsAccessor.
+ // The following four function pointer represent a Accessor/Path.
+ // These are used to Load/Store/Delete elements and to check whether
+ // to bail to the baseline GenericElementsAccessor.
loadFn: LoadFn;
storeFn: StoreFn;
+ deleteFn: DeleteFn;
canUseSameAccessorFn: CanUseSameAccessorFn;
- // If this field has the value kFailure, we need to bail to the baseline
- // GenericElementsAccessor.
- bailoutStatus: Smi;
-
// This controls when we get *into* galloping mode. It's initialized to
// kMinGallop. mergeLow and mergeHigh tend to nudge it higher for random
// data, and lower for highly structured data.
@@ -90,49 +87,96 @@ namespace array {
// Pointer to the temporary array.
tempArray: FixedArray;
+
+ // The initialReceiverLength converted and clamped to Smi.
+ sortLength: Smi;
+
+ // The number of undefined that need to be inserted after sorting
+ // when the elements are copied back from the workArray to the receiver.
+ numberOfUndefined: Smi;
+ }
+
+ type FastSmiElements;
+ type FastObjectElements;
+
+ // With the pre-processing step in Torque, the exact number of elements
+ // to sort is unknown at the time the sort state is created.
+ // The 'length' property is an upper bound (as per spec),
+ // while the actual size of the backing store is a good guess.
+ // After the pre-processing step, the workarray won't change in length.
+ macro CalculateWorkArrayLength(
+ receiver: JSReceiver, initialReceiverLength: Number): intptr {
+ // TODO(szuend): Implement full range sorting, not only up to MaxSmi.
+ // https://crbug.com/v8/7970.
+ let clampedReceiverLength: uintptr =
+ Convert<uintptr>(initialReceiverLength);
+ if (clampedReceiverLength > kSmiMaxValue) {
+ clampedReceiverLength = kSmiMaxValue;
+ }
+
+ let workArrayLength: intptr = Convert<intptr>(clampedReceiverLength);
+ try {
+ const object = Cast<JSObject>(receiver) otherwise NoJsObject;
+ const elementsLength = Convert<intptr>(object.elements.length);
+
+ // In some cases, elements are only on prototypes, but not on the receiver
+ // itself. Do nothing then, as {workArrayLength} got initialized with the
+ // {length} property.
+ if (elementsLength != 0) {
+ workArrayLength = IntPtrMin(workArrayLength, elementsLength);
+ }
+ }
+ label NoJsObject {}
+
+ return workArrayLength;
}
transitioning macro NewSortState(implicit context: Context)(
receiver: JSReceiver, comparefn: Undefined | Callable,
- initialReceiverLength: Number, sortLength: Smi,
- forceGeneric: constexpr bool): SortState {
+ initialReceiverLength: Number): SortState {
const sortComparePtr =
comparefn != Undefined ? SortCompareUserFn : SortCompareDefault;
const map = receiver.map;
- let loadFn = Load<GenericElementsAccessor>;
- let storeFn = Store<GenericElementsAccessor>;
- let canUseSameAccessorFn = CanUseSameAccessor<GenericElementsAccessor>;
+ let loadFn: LoadFn;
+ let storeFn: StoreFn;
+ let deleteFn: DeleteFn;
+ let canUseSameAccessorFn: CanUseSameAccessorFn;
try {
- if constexpr (!forceGeneric) {
- GotoIfForceSlowPath() otherwise Slow;
- let a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
-
- const elementsKind: ElementsKind = map.elements_kind;
- if (IsDoubleElementsKind(elementsKind)) {
- loadFn = Load<FastDoubleElements>;
- storeFn = Store<FastDoubleElements>;
- canUseSameAccessorFn = CanUseSameAccessor<FastDoubleElements>;
- } else if (elementsKind == PACKED_SMI_ELEMENTS) {
- loadFn = Load<FastPackedSmiElements>;
- storeFn = Store<FastPackedSmiElements>;
- canUseSameAccessorFn = CanUseSameAccessor<FastPackedSmiElements>;
- } else {
- loadFn = Load<FastSmiOrObjectElements>;
- storeFn = Store<FastSmiOrObjectElements>;
- canUseSameAccessorFn = CanUseSameAccessor<FastSmiOrObjectElements>;
- }
+ GotoIfForceSlowPath() otherwise Slow;
+ let a: FastJSArray = Cast<FastJSArray>(receiver) otherwise Slow;
+
+ // Copy copy-on-write (COW) arrays.
+ array::EnsureWriteableFastElements(a);
+
+ const elementsKind: ElementsKind = map.elements_kind;
+ if (IsDoubleElementsKind(elementsKind)) {
+ loadFn = Load<FastDoubleElements>;
+ storeFn = Store<FastDoubleElements>;
+ deleteFn = Delete<FastDoubleElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<FastDoubleElements>;
+ } else if (IsFastSmiElementsKind(elementsKind)) {
+ loadFn = Load<FastSmiElements>;
+ storeFn = Store<FastSmiElements>;
+ deleteFn = Delete<FastSmiElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<FastSmiElements>;
+ } else {
+ loadFn = Load<FastObjectElements>;
+ storeFn = Store<FastObjectElements>;
+ deleteFn = Delete<FastObjectElements>;
+ canUseSameAccessorFn = CanUseSameAccessor<FastObjectElements>;
}
}
label Slow {
- if (map.elements_kind == DICTIONARY_ELEMENTS && IsExtensibleMap(map) &&
- !IsCustomElementsReceiverInstanceType(map.instance_type)) {
- loadFn = Load<DictionaryElements>;
- storeFn = Store<DictionaryElements>;
- canUseSameAccessorFn = CanUseSameAccessor<DictionaryElements>;
- }
+ loadFn = Load<GenericElementsAccessor>;
+ storeFn = Store<GenericElementsAccessor>;
+ deleteFn = Delete<GenericElementsAccessor>;
+ canUseSameAccessorFn = CanUseSameAccessor<GenericElementsAccessor>;
}
+ const workArrayLength =
+ CalculateWorkArrayLength(receiver, initialReceiverLength);
+
return new SortState{
receiver,
initialReceiverMap: map,
@@ -141,17 +185,18 @@ namespace array {
sortComparePtr,
loadFn,
storeFn,
+ deleteFn,
canUseSameAccessorFn,
- bailoutStatus: kSuccess,
minGallop: kMinGallopWins,
pendingRunsSize: 0,
pendingRuns: AllocateZeroedFixedArray(Convert<intptr>(kMaxMergePending)),
- workArray: AllocateZeroedFixedArray(Convert<intptr>(sortLength)),
- tempArray: kEmptyFixedArray
+ workArray: AllocateZeroedFixedArray(workArrayLength),
+ tempArray: kEmptyFixedArray,
+ sortLength: 0,
+ numberOfUndefined: 0
};
}
- const kFailure: Smi = -1;
const kSuccess: Smi = 0;
// The maximum number of entries in a SortState's pending-runs stack.
@@ -171,6 +216,7 @@ namespace array {
type LoadFn = builtin(Context, SortState, Smi) => Object;
type StoreFn = builtin(Context, SortState, Smi, Object) => Smi;
+ type DeleteFn = builtin(Context, SortState, Smi) => Smi;
type CanUseSameAccessorFn = builtin(Context, JSReceiver, Object, Number) =>
Boolean;
type CompareBuiltinFn = builtin(Context, Object, Object, Object) => Number;
@@ -183,28 +229,23 @@ namespace array {
transitioning builtin Load<ElementsAccessor: type>(
context: Context, sortState: SortState, index: Smi): Object {
- return GetProperty(sortState.receiver, index);
+ const receiver = sortState.receiver;
+ if (!HasProperty_Inline(receiver, index)) return Hole;
+ return GetProperty(receiver, index);
}
- Load<FastPackedSmiElements>(
- context: Context, sortState: SortState, index: Smi): Object {
+ Load<FastSmiElements>(context: Context, sortState: SortState, index: Smi):
+ Object {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
return elements.objects[index];
}
- Load<FastSmiOrObjectElements>(
- context: Context, sortState: SortState, index: Smi): Object {
+ Load<FastObjectElements>(context: Context, sortState: SortState, index: Smi):
+ Object {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
- const result: Object = elements.objects[index];
- if (IsTheHole(result)) {
- // The pre-processing step removed all holes by compacting all elements
- // at the start of the array. Finding a hole means the cmp function or
- // ToString changes the array.
- return Failure(sortState);
- }
- return result;
+ return elements.objects[index];
}
Load<FastDoubleElements>(context: Context, sortState: SortState, index: Smi):
@@ -212,28 +253,11 @@ namespace array {
try {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedDoubleArray>(object.elements);
- const value = LoadDoubleWithHoleCheck(elements, index) otherwise Bailout;
+ const value = LoadDoubleWithHoleCheck(elements, index) otherwise IfHole;
return AllocateHeapNumberWithValue(value);
}
- label Bailout {
- // The pre-processing step removed all holes by compacting all elements
- // at the start of the array. Finding a hole means the cmp function or
- // ToString changes the array.
- return Failure(sortState);
- }
- }
-
- Load<DictionaryElements>(context: Context, sortState: SortState, index: Smi):
- Object {
- try {
- const object = UnsafeCast<JSObject>(sortState.receiver);
- const dictionary = UnsafeCast<NumberDictionary>(object.elements);
- const intptrIndex = Convert<intptr>(index);
- return BasicLoadNumberDictionaryElement(dictionary, intptrIndex)
- otherwise Bailout, Bailout;
- }
- label Bailout {
- return Failure(sortState);
+ label IfHole {
+ return Hole;
}
}
@@ -243,15 +267,16 @@ namespace array {
return kSuccess;
}
- Store<FastPackedSmiElements>(
+ Store<FastSmiElements>(
context: Context, sortState: SortState, index: Smi, value: Object): Smi {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
- StoreFixedArrayElementSmi(elements, index, value, SKIP_WRITE_BARRIER);
+ const value = UnsafeCast<Smi>(value);
+ StoreFixedArrayElement(elements, index, value, SKIP_WRITE_BARRIER);
return kSuccess;
}
- Store<FastSmiOrObjectElements>(
+ Store<FastObjectElements>(
context: Context, sortState: SortState, index: Smi, value: Object): Smi {
const object = UnsafeCast<JSObject>(sortState.receiver);
const elements = UnsafeCast<FixedArray>(object.elements);
@@ -269,26 +294,42 @@ namespace array {
return kSuccess;
}
- Store<DictionaryElements>(
- context: Context, sortState: SortState, index: Smi, value: Object): Smi {
+ transitioning builtin Delete<ElementsAccessor: type>(
+ context: Context, sortState: SortState, index: Smi): Smi {
+ const receiver = sortState.receiver;
+ if (!HasProperty_Inline(receiver, index)) return kSuccess;
+ DeleteProperty(receiver, index, kSloppy);
+ return kSuccess;
+ }
+
+ Delete<FastSmiElements>(context: Context, sortState: SortState, index: Smi):
+ Smi {
+ assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+
const object = UnsafeCast<JSObject>(sortState.receiver);
- const dictionary = UnsafeCast<NumberDictionary>(object.elements);
- const intptrIndex = Convert<intptr>(index);
- try {
- BasicStoreNumberDictionaryElement(dictionary, intptrIndex, value)
- otherwise Fail, Fail, ReadOnly;
- return kSuccess;
- }
- label ReadOnly {
- // We cannot write to read-only data properties. Throw the same TypeError
- // as SetProperty would.
- const receiver = sortState.receiver;
- ThrowTypeError(
- kStrictReadOnlyProperty, index, Typeof(receiver), receiver);
- }
- label Fail {
- return Failure(sortState);
- }
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ elements.objects[index] = Hole;
+ return kSuccess;
+ }
+
+ Delete<FastObjectElements>(
+ context: Context, sortState: SortState, index: Smi): Smi {
+ assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedArray>(object.elements);
+ elements.objects[index] = Hole;
+ return kSuccess;
+ }
+
+ Delete<FastDoubleElements>(
+ context: Context, sortState: SortState, index: Smi): Smi {
+ assert(IsHoleyFastElementsKind(sortState.receiver.map.elements_kind));
+
+ const object = UnsafeCast<JSObject>(sortState.receiver);
+ const elements = UnsafeCast<FixedDoubleArray>(object.elements);
+ StoreFixedDoubleArrayHoleSmi(elements, index);
+ return kSuccess;
}
transitioning builtin SortCompareDefault(
@@ -354,12 +395,6 @@ namespace array {
return True;
}
- CanUseSameAccessor<DictionaryElements>(
- context: Context, receiver: JSReceiver, initialReceiverMap: Object,
- initialReceiverLength: Number): Boolean {
- return SelectBooleanConstant(receiver.map == initialReceiverMap);
- }
-
// Re-loading the stack-size is done in a few places. The small macro allows
// for easier invariant checks at all use sites.
macro GetPendingRunsSize(implicit context: Context)(sortState: SortState):
@@ -418,36 +453,6 @@ namespace array {
return tempArray;
}
- // This macro jumps to the Bailout label iff kBailoutStatus is kFailure.
- macro EnsureSuccess(implicit context: Context)(sortState:
- SortState) labels Bailout {
- if (sortState.bailoutStatus == kFailure) goto Bailout;
- }
-
- // Sets kBailoutStatus to kFailure and returns kFailure.
- macro Failure(sortState: SortState): Smi {
- sortState.bailoutStatus = kFailure;
- return kFailure;
- }
-
- // The following Call* macros wrap builtin calls, making call sites more
- // readable since we can use labels and do not have to check kBailoutStatus
- // or the return value.
-
- macro CallLoad(implicit context: Context, sortState: SortState)(
- load: LoadFn, index: Smi): Object
- labels Bailout {
- const result: Object = load(context, sortState, index);
- EnsureSuccess(sortState) otherwise Bailout;
- return result;
- }
-
- macro CallStore(implicit context: Context, sortState: SortState)(
- store: StoreFn, index: Smi, value: Object) labels Bailout {
- store(context, sortState, index, value);
- EnsureSuccess(sortState) otherwise Bailout;
- }
-
transitioning builtin
Copy(implicit context: Context)(
source: FixedArray, srcPos: Smi, target: FixedArray, dstPos: Smi,
@@ -1267,49 +1272,87 @@ namespace array {
}
transitioning macro
- CopyReceiverElementsToWorkArray(
- implicit context: Context, sortState: SortState)(length: Smi) {
- // TODO(szuend): Investigate if we can use COW arrays or a memcpy + range
- // barrier to speed this step up.
- let loadFn = sortState.loadFn;
- const workArray = sortState.workArray;
+ CompactReceiverElementsIntoWorkArray(
+ implicit context: Context, sortState: SortState)(): Smi {
+ let growableWorkArray = growable_fixed_array::GrowableFixedArray{
+ array: sortState.workArray,
+ capacity: Convert<intptr>(sortState.workArray.length),
+ length: 0
+ };
- for (let i: Smi = 0; i < length; ++i) {
- try {
- workArray.objects[i] = CallLoad(loadFn, i) otherwise Bailout;
- }
- label Bailout deferred {
- sortState.ResetToGenericAccessor();
- loadFn = sortState.loadFn;
- workArray.objects[i] = CallLoad(loadFn, i) otherwise unreachable;
+ const loadFn = sortState.loadFn;
+
+ // TODO(szuend): Implement full range sorting, not only up to MaxSmi.
+ // https://crbug.com/v8/7970.
+ const receiverLength: Number = sortState.initialReceiverLength;
+ assert(IsNumberNormalized(receiverLength));
+
+ const sortLength: Smi = TaggedIsSmi(receiverLength) ?
+ UnsafeCast<Smi>(receiverLength) :
+ Convert<PositiveSmi>(kSmiMax) otherwise unreachable;
+
+ // Move all non-undefined elements into {sortState.workArray}, holes
+ // are ignored.
+ let numberOfUndefined: Smi = 0;
+ for (let i: Smi = 0; i < receiverLength; ++i) {
+ const element: Object = loadFn(context, sortState, i);
+
+ if (element == Hole) {
+ // Do nothing for holes. The result is that elements are
+ // compacted at the front of the work array.
+ } else if (element == Undefined) {
+ numberOfUndefined++;
+ } else {
+ growableWorkArray.Push(element);
}
}
+
+ // Reset the workArray on the frameState, as it may have grown.
+ sortState.workArray = growableWorkArray.array;
+ sortState.sortLength = sortLength;
+ sortState.numberOfUndefined = numberOfUndefined;
+
+ return Convert<Smi>(growableWorkArray.length);
}
transitioning macro
CopyWorkArrayToReceiver(implicit context: Context, sortState: SortState)(
- length: Smi) {
- // TODO(szuend): Build fast-path that simply installs the work array as the
- // new backing store where applicable.
- let storeFn = sortState.storeFn;
+ numberOfNonUndefined: Smi) {
+ const storeFn = sortState.storeFn;
const workArray = sortState.workArray;
- for (let i: Smi = 0; i < length; ++i) {
- try {
- CallStore(storeFn, i, workArray.objects[i]) otherwise Bailout;
- }
- label Bailout deferred {
- sortState.ResetToGenericAccessor();
- storeFn = sortState.storeFn;
- CallStore(storeFn, i, workArray.objects[i]) otherwise unreachable;
- }
+ assert(numberOfNonUndefined <= workArray.length);
+ assert(
+ numberOfNonUndefined + sortState.numberOfUndefined <=
+ sortState.sortLength);
+
+ // Writing the elements back is a 3 step process:
+ // 1. Copy the sorted elements from the workarray to the receiver.
+ // 2. Add {nOfUndefined} undefineds to the receiver.
+ // 3. Depending on the backing store either delete properties or
+ // set them to the Hole up to {sortState.sortLength}.
+ let index: Smi = 0;
+ for (; index < numberOfNonUndefined; ++index) {
+ storeFn(context, sortState, index, workArray.objects[index]);
+ }
+
+ const numberOfUndefinedEnd: Smi =
+ sortState.numberOfUndefined + numberOfNonUndefined;
+ for (; index < numberOfUndefinedEnd; ++index) {
+ storeFn(context, sortState, index, Undefined);
+ }
+
+ const end: Smi = sortState.sortLength;
+ const deleteFn = sortState.deleteFn;
+ for (; index < end; ++index) {
+ deleteFn(context, sortState, index);
}
}
transitioning builtin
- ArrayTimSort(context: Context, sortState: SortState, length: Smi): Object {
- CopyReceiverElementsToWorkArray(length);
- ArrayTimSortImpl(context, sortState, length);
+ ArrayTimSort(context: Context, sortState: SortState): Object {
+ const numberOfNonUndefined: Smi = CompactReceiverElementsIntoWorkArray();
+ ArrayTimSortImpl(context, sortState, numberOfNonUndefined);
try {
// The comparison function or toString might have changed the
@@ -1320,24 +1363,10 @@ namespace array {
sortState.ResetToGenericAccessor();
}
- CopyWorkArrayToReceiver(length);
+ CopyWorkArrayToReceiver(numberOfNonUndefined);
return kSuccess;
}
- // For compatibility with JSC, we also sort elements inherited from
- // the prototype chain on non-Array objects.
- // We do this by copying them to this object and sorting only
- // own elements. This is not very efficient, but sorting with
- // inherited elements happens very, very rarely, if at all.
- // The specification allows "implementation dependent" behavior
- // if an element on the prototype chain has an element that
- // might interact with sorting.
- //
- // We also move all non-undefined elements to the front of the
- // array and move the undefineds after that. Holes are removed.
- // This happens for Array as well as non-Array objects.
- extern runtime PrepareElementsForSort(Context, Object, Number): Smi;
-
// https://tc39.github.io/ecma262/#sec-array.prototype.sort
transitioning javascript builtin
ArrayPrototypeSort(context: Context, receiver: Object, ...arguments): Object {
@@ -1355,16 +1384,8 @@ namespace array {
if (len < 2) return receiver;
- // TODO(szuend): Investigate performance tradeoff of skipping this step
- // for PACKED_* and handling Undefineds during sorting.
- const nofNonUndefined: Smi = PrepareElementsForSort(context, obj, len);
- assert(nofNonUndefined <= len);
-
- if (nofNonUndefined < 2) return receiver;
-
- const sortState: SortState =
- NewSortState(obj, comparefn, len, nofNonUndefined, false);
- ArrayTimSort(context, sortState, nofNonUndefined);
+ const sortState: SortState = NewSortState(obj, comparefn, len);
+ ArrayTimSort(context, sortState);
return receiver;
}
diff --git a/deps/v8/tools/BUILD.gn b/deps/v8/tools/BUILD.gn
index 7b019ad0b2..e6fd743715 100644
--- a/deps/v8/tools/BUILD.gn
+++ b/deps/v8/tools/BUILD.gn
@@ -43,6 +43,7 @@ group("v8_testrunner") {
testonly = true
data_deps = [
+ "..:v8_python_base",
"..:v8_dump_build_config",
":v8_android_test_runner_deps",
]
diff --git a/deps/v8/tools/OWNERS b/deps/v8/tools/OWNERS
index 85f514c4ab..bdb1d555a4 100644
--- a/deps/v8/tools/OWNERS
+++ b/deps/v8/tools/OWNERS
@@ -1,3 +1 @@
-machenbach@chromium.org
-sergiyb@chromium.org
-tmrts@chromium.org \ No newline at end of file
+file://INFRA_OWNERS
diff --git a/deps/v8/tools/bash-completion.sh b/deps/v8/tools/bash-completion.sh
index 5b9f7f5073..27e73b7ad6 100755
--- a/deps/v8/tools/bash-completion.sh
+++ b/deps/v8/tools/bash-completion.sh
@@ -37,11 +37,11 @@ v8_source=$(readlink -f $(dirname $BASH_SOURCE)/..)
_v8_flag() {
local cur defines targets
cur="${COMP_WORDS[COMP_CWORD]}"
- defines=$(cat $v8_source/src/flag-definitions.h \
+ defines=$(cat $v8_source/src/flags/flag-definitions.h \
| grep "^DEFINE" \
| grep -v "DEFINE_IMPLICATION" \
| sed -e 's/_/-/g'; \
- cat $v8_source/src/flag-definitions.h \
+ cat $v8_source/src/flags/flag-definitions.h \
| grep "^ V(harmony_" \
| sed -e 's/^ V/DEFINE-BOOL/' \
| sed -e 's/_/-/g')
@@ -49,7 +49,7 @@ _v8_flag() {
| sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
echo "$defines" \
| sed -ne 's/^DEFINE-BOOL(\([^,]*\).*/--no\1/p'; \
- cat $v8_source/src/d8.cc \
+ cat $v8_source/src/d8/d8.cc \
| grep "strcmp(argv\[i\]" \
| sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
COMPREPLY=($(compgen -W "$targets" -- "$cur"))
diff --git a/deps/v8/tools/cfi/blacklist.txt b/deps/v8/tools/cfi/blacklist.txt
index c1571b8c65..9886fd37fb 100644
--- a/deps/v8/tools/cfi/blacklist.txt
+++ b/deps/v8/tools/cfi/blacklist.txt
@@ -15,7 +15,7 @@ type:std::*
fun:*LocaleConvertCase*
# PropertyCallbackArguments::Call methods cast function pointers
-src:*src/api-arguments-inl.h
+src:*src/api/api-arguments-inl.h
# v8 callback that casts argument template parameters
fun:*PendingPhantomCallback*Invoke*
diff --git a/deps/v8/tools/check-static-initializers.sh b/deps/v8/tools/check-static-initializers.sh
index da43170f6e..fdd1e8417d 100755
--- a/deps/v8/tools/check-static-initializers.sh
+++ b/deps/v8/tools/check-static-initializers.sh
@@ -30,8 +30,8 @@
# initializer in d8 matches the one defined below.
# Allow:
-# - _GLOBAL__I__ZN2v810LineEditor6first_E
-# - _GLOBAL__I__ZN2v88internal32AtomicOps_Internalx86CPUFeaturesE
+# _GLOBAL__sub_I_d8.cc
+# _GLOBAL__sub_I_iostream.cpp
expected_static_init_count=2
v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
diff --git a/deps/v8/tools/clusterfuzz/OWNERS b/deps/v8/tools/clusterfuzz/OWNERS
index c8693c972c..50b5741785 100644
--- a/deps/v8/tools/clusterfuzz/OWNERS
+++ b/deps/v8/tools/clusterfuzz/OWNERS
@@ -1,5 +1,3 @@
set noparent
-machenbach@chromium.org
-sergiyb@chromium.org
-tmrts@chromium.org \ No newline at end of file
+file://INFRA_OWNERS
diff --git a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
index 27440742e8..dae84cbbb1 100644
--- a/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/failure_output.txt
@@ -9,9 +9,9 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---abort-on-stack-or-string-length-overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---abort-on-stack-or-string-length-overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
#
# Difference:
- unknown
diff --git a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
index 72bf95d0b1..fa3d672f00 100644
--- a/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
+++ b/deps/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
@@ -2,16 +2,16 @@
# V8 correctness failure
# V8 correctness configs: x64,ignition:x64,ignition_turbo
# V8 correctness sources: sanity check failed
-# V8 correctness suppression:
+# V8 correctness suppression:
#
# CHECK
#
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---abort-on-stack-or-string-length-overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
# Flags of x64,ignition_turbo:
---abort-on-stack-or-string-length-overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --stress-scavenge=100
#
# Difference:
- unknown
@@ -44,3 +44,4 @@ not unknown
### End of configuration x64,ignition_turbo
+
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index 26b189e27f..159fea9496 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
@@ -43,7 +43,17 @@ CONFIGS = dict(
'--no-lazy',
'--no-lazy-inner-functions',
],
+ ignition_no_ic=[
+ '--turbo-filter=~',
+ '--noopt',
+ '--liftoff',
+ '--no-wasm-tier-up',
+ '--no-use-ic',
+ ],
ignition_turbo=[],
+ ignition_turbo_no_ic=[
+ '--no-use-ic',
+ ],
ignition_turbo_opt=[
'--always-opt',
'--no-liftoff',
@@ -86,6 +96,7 @@ ADDITIONAL_FLAGS = [
(0.01, '--thread-pool-size=2'),
(0.01, '--thread-pool-size=4'),
(0.01, '--thread-pool-size=8'),
+ (0.1, '--interrupt-budget=1000'),
]
# Timeout in seconds for one d8 run.
@@ -103,7 +114,7 @@ PREAMBLE = [
ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
SANITY_CHECKS = os.path.join(BASE_PATH, 'v8_sanity_checks.js')
-FLAGS = ['--abort-on-stack-or-string-length-overflow', '--expose-gc',
+FLAGS = ['--correctness-fuzzer-suppressions', '--expose-gc',
'--allow-natives-syntax', '--invoke-weak-callbacks', '--omit-quit',
'--es-staging', '--no-wasm-async-compilation',
'--suppress-asm-messages']
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
index e9559f6e0c..b13d3d7677 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -39,7 +39,7 @@ class ConfigTest(unittest.TestCase):
self.assertEqual(
[
'--first-config=ignition',
- '--second-config=ignition_turbo',
+ '--second-config=ignition_turbo_no_ic',
'--second-d8=d8',
],
v8_fuzz_config.Config('foo', Rng()).choose_foozzie_flags(),
diff --git a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
index 39e983f74a..1cd353225b 100644
--- a/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
+++ b/deps/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -11,8 +11,11 @@ FOOZZIE_EXPERIMENTS = [
[10, 'ignition', 'jitless', 'd8'],
[10, 'ignition', 'slow_path', 'd8'],
[5, 'ignition', 'slow_path_opt', 'd8'],
- [30, 'ignition', 'ignition_turbo', 'd8'],
- [20, 'ignition', 'ignition_turbo_opt', 'd8'],
+ [10, 'ignition', 'ignition_turbo', 'd8'],
+ [10, 'ignition_no_ic', 'ignition_turbo', 'd8'],
+ [10, 'ignition', 'ignition_turbo_no_ic', 'd8'],
+ [10, 'ignition', 'ignition_turbo_opt', 'd8'],
+ [10, 'ignition_no_ic', 'ignition_turbo_opt', 'd8'],
[5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
[5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
[5, 'ignition', 'ignition', 'clang_x86/d8'],
diff --git a/deps/v8/tools/clusterfuzz/v8_sanity_checks.js b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
index 2b7cb65a1b..1b682432ce 100644
--- a/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
+++ b/deps/v8/tools/clusterfuzz/v8_sanity_checks.js
@@ -17,6 +17,5 @@ print("https://crbug.com/935800");
function baz() {}
return {bar: baz};
}
- // TODO(mstarzinger): Uncomment once https://crbug.com/935800 is resolved.
- // print(Object.getOwnPropertyNames(foo().bar));
+ print(Object.getOwnPropertyNames(foo().bar));
})();
diff --git a/deps/v8/tools/codemap.js b/deps/v8/tools/codemap.js
index 4c185b0464..df6770f9a8 100644
--- a/deps/v8/tools/codemap.js
+++ b/deps/v8/tools/codemap.js
@@ -178,15 +178,6 @@ CodeMap.prototype.findInTree_ = function(tree, addr) {
return node && this.isAddressBelongsTo_(addr, node) ? node : null;
};
-/**
- * Embedded builtins are located in the shared library but should be attributed
- * according to the dynamically generated code-create events.
- *
- * @private
- */
-CodeMap.prototype.isIsolateIndependentBuiltin_ = function(entry) {
- return entry.type == "CPP" && /v8_\w*embedded_blob_/.test(entry.name);
-};
/**
* Finds a code entry that contains the specified address. Both static and
@@ -205,10 +196,7 @@ CodeMap.prototype.findAddress = function(addr) {
result = this.findInTree_(this.libraries_, addr);
if (!result) return null;
}
- if (!this.isIsolateIndependentBuiltin_(result.value)) {
- // Embedded builtins are handled in the following dynamic section.
- return { entry : result.value, offset : addr - result.key };
- }
+ return { entry : result.value, offset : addr - result.key };
}
var min = this.dynamics_.findMin();
var max = this.dynamics_.findMax();
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index bc808c31ae..0e01f4f8d7 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -43,8 +43,8 @@ MODES = ["release", "debug", "optdebug"]
# Modes that get built/run when you don't specify any.
DEFAULT_MODES = ["release", "debug"]
# Build targets that can be manually specified.
-TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "mkgrokdump",
- "generate-bytecode-expectations", "inspector-test"]
+TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "wasm_api_tests", "wee8",
+ "mkgrokdump", "generate-bytecode-expectations", "inspector-test"]
# Build targets that get built when you don't specify any (and specified tests
# don't imply any other targets).
DEFAULT_TARGETS = ["d8"]
@@ -64,13 +64,14 @@ ACTIONS = {
HELP = """<arch> can be any of: %(arches)s
<mode> can be any of: %(modes)s
<target> can be any of:
- - cctest, d8, unittests, v8_fuzzers (build respective binary)
+ - %(targets)s (build respective binary)
- all (build all binaries)
- tests (build test binaries)
- check (build test binaries, run most tests)
- checkall (build all binaries, run more tests)
""" % {"arches": " ".join(ARCHES),
- "modes": " ".join(MODES)}
+ "modes": " ".join(MODES),
+ "targets": ", ".join(TARGETS)}
TESTSUITES_TARGETS = {"benchmarks": "d8",
"cctest": "cctest",
@@ -84,6 +85,7 @@ TESTSUITES_TARGETS = {"benchmarks": "d8",
"preparser": "d8",
"test262": "d8",
"unittests": "unittests",
+ "wasm-api-tests": "wasm_api_tests",
"webkit": "d8"}
OUTDIR = "out"
diff --git a/deps/v8/tools/dumpcpp-driver.js b/deps/v8/tools/dumpcpp-driver.js
index 44527771e4..6073dea738 100644
--- a/deps/v8/tools/dumpcpp-driver.js
+++ b/deps/v8/tools/dumpcpp-driver.js
@@ -39,7 +39,8 @@ if (params.sourceMap) {
}
var cppProcessor = new CppProcessor(
- new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
+ new (entriesProviders[params.platform])(params.nm, params.targetRootFS,
+ params.apkEmbeddedLibrary),
params.timedRange, params.pairwiseTimedRange);
cppProcessor.processLogFile(params.logFileName);
cppProcessor.dumpCppSymbols();
diff --git a/deps/v8/tools/gcmole/BUILD.gn b/deps/v8/tools/gcmole/BUILD.gn
index f10667e6c2..2ef4472207 100644
--- a/deps/v8/tools/gcmole/BUILD.gn
+++ b/deps/v8/tools/gcmole/BUILD.gn
@@ -24,6 +24,8 @@ group("v8_run_gcmole") {
"../../testing/gtest/include/gtest/gtest_prod.h",
"../../third_party/googletest/src/googletest/include/gtest/gtest_prod.h",
"../../third_party/icu/source/",
+ "../../third_party/wasm-api/wasm.h",
+ "../../third_party/wasm-api/wasm.hh",
"$target_gen_dir/../../",
"$target_gen_dir/../../torque-generated/",
]
diff --git a/deps/v8/tools/gcmole/README b/deps/v8/tools/gcmole/README
index 7e25da3aa1..578ea56219 100644
--- a/deps/v8/tools/gcmole/README
+++ b/deps/v8/tools/gcmole/README
@@ -27,7 +27,7 @@ PREREQUISITES -----------------------------------------------------------------
Follow the instructions on http://clang.llvm.org/get_started.html.
- Make sure to pass -DCMAKE_BUILD_TYPE=Release to cmake to get Release build
+ Make sure to pass -DCMAKE_BUILD_TYPE=Release to cmake to get Release build
instead of a Debug one.
(3) Build gcmole Clang plugin (libgcmole.so)
diff --git a/deps/v8/tools/gcmole/gcmole-test.cc b/deps/v8/tools/gcmole/gcmole-test.cc
index b0a341bb55..c00c6e5539 100644
--- a/deps/v8/tools/gcmole/gcmole-test.cc
+++ b/deps/v8/tools/gcmole/gcmole-test.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/handles-inl.h"
-#include "src/handles.h"
-#include "src/isolate.h"
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/handles.h"
#include "src/objects/maybe-object.h"
#include "src/objects/object-macros.h"
diff --git a/deps/v8/tools/gcmole/package.sh b/deps/v8/tools/gcmole/package.sh
index 6206e7bb2e..6206e7bb2e 100644..100755
--- a/deps/v8/tools/gcmole/package.sh
+++ b/deps/v8/tools/gcmole/package.sh
diff --git a/deps/v8/tools/gcmole/run-gcmole.py b/deps/v8/tools/gcmole/run-gcmole.py
index 76a6b55d44..6f2a091c3c 100755
--- a/deps/v8/tools/gcmole/run-gcmole.py
+++ b/deps/v8/tools/gcmole/run-gcmole.py
@@ -21,7 +21,7 @@ BASE_PATH = os.path.dirname(os.path.dirname(GCMOLE_PATH))
assert len(sys.argv) == 2
-if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-from-dsl.h"):
+if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-tq.h"):
print("Expected generated headers in out/Release/gen.")
print("Either build v8 in out/Release or change gcmole.lua:115")
sys.exit(-1)
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index 6c3778fca7..a91554c3fa 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -163,3 +163,88 @@ def dcheck_stop_handler(event):
gdb.events.stop.connect(dcheck_stop_handler)
end
+
+# Code imported from chromium/src/tools/gdb/gdbinit
+python
+
+import os
+import subprocess
+import sys
+
+compile_dirs = set()
+
+
+def get_current_debug_file_directories():
+ dir = gdb.execute("show debug-file-directory", to_string=True)
+ dir = dir[
+ len('The directory where separate debug symbols are searched for is "'
+ ):-len('".') - 1]
+ return set(dir.split(":"))
+
+
+def add_debug_file_directory(dir):
+ # gdb has no function to add debug-file-directory, simulates that by using
+ # `show debug-file-directory` and `set debug-file-directory <directories>`.
+ current_dirs = get_current_debug_file_directories()
+ current_dirs.add(dir)
+ gdb.execute(
+ "set debug-file-directory %s" % ":".join(current_dirs), to_string=True)
+
+
+def load_libcxx_pretty_printers(src_dir):
+ libcxx_pretty_printers = os.path.join(src_dir, 'third_party',
+ 'libcxx-pretty-printers')
+ if not os.path.isdir(libcxx_pretty_printers):
+ return
+ sys.path.insert(1, libcxx_pretty_printers)
+ from printers import register_libcxx_printers
+ register_libcxx_printers(None)
+
+
+def load_gdb_chrome(src_dir):
+ tools_gdb = os.path.join(src_dir, 'tools', 'gdb')
+
+ sys.path.insert(1, tools_gdb)
+ import gdb_chrome
+
+ gdb.execute('source %s' % os.path.join(tools_gdb, 'viewg.gdb'))
+
+
+def newobj_handler(event):
+ global compile_dirs
+ compile_dir = os.path.dirname(event.new_objfile.filename)
+ if not compile_dir:
+ return
+ if compile_dir in compile_dirs:
+ return
+ compile_dirs.add(compile_dir)
+
+ # Add source path
+ gdb.execute("dir %s" % compile_dir)
+
+ # Need to tell the location of .dwo files.
+ # https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html
+ # https://crbug.com/603286#c35
+ add_debug_file_directory(compile_dir)
+
+ git = subprocess.Popen(
+ ['git', '-C', compile_dir, 'rev-parse', '--show-toplevel'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ src_dir, _ = git.communicate()
+ if git.returncode:
+ return
+ src_dir = str(src_dir).rstrip()
+
+ load_libcxx_pretty_printers(src_dir)
+
+ load_gdb_chrome(src_dir)
+
+
+# Event hook for newly loaded objfiles.
+# https://sourceware.org/gdb/onlinedocs/gdb/Events-In-Python.html
+gdb.events.new_objfile.connect(newobj_handler)
+
+gdb.execute("set environment CHROMIUM_GDBINIT_SOURCED=1")
+
+end
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 48265b2418..1c10eb4443 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -85,10 +85,6 @@ consts_misc = [
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
{ 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
- { 'name': 'SystemPointerSize', 'value': 'kSystemPointerSize' },
- { 'name': 'SystemPointerSizeLog2', 'value': 'kSystemPointerSizeLog2' },
- { 'name': 'TaggedSize', 'value': 'kTaggedSize' },
- { 'name': 'TaggedSizeLog2', 'value': 'kTaggedSizeLog2' },
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
@@ -165,8 +161,6 @@ consts_misc = [
'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
{ 'name': 'bit_field3_number_of_own_descriptors_shift',
'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
- { 'name': 'class_Map__instance_descriptors_offset',
- 'value': 'Map::kDescriptorsOffset' },
{ 'name': 'off_fp_context_or_frame_type',
'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'},
@@ -246,7 +240,6 @@ extras_accessors = [
'JSObject, elements, Object, kElementsOffset',
'JSObject, internal_fields, uintptr_t, kHeaderSize',
'FixedArray, data, uintptr_t, kHeaderSize',
- 'FixedTypedArrayBase, external_pointer, uintptr_t, kExternalPointerOffset',
'JSArrayBuffer, backing_store, uintptr_t, kBackingStoreOffset',
'JSArrayBuffer, byte_length, size_t, kByteLengthOffset',
'JSArrayBufferView, byte_length, size_t, kByteLengthOffset',
@@ -304,11 +297,11 @@ header = '''
* This file is generated by %s. Do not edit directly.
*/
-#include "src/v8.h"
-#include "src/frames.h"
-#include "src/frames-inl.h" /* for architecture-specific frame constants */
-#include "src/contexts.h"
-#include "src/objects.h"
+#include "src/init/v8.h"
+#include "src/execution/frames.h"
+#include "src/execution/frames-inl.h" /* for architecture-specific frame constants */
+#include "src/objects/contexts.h"
+#include "src/objects/objects.h"
#include "src/objects/data-handler.h"
#include "src/objects/js-promise.h"
#include "src/objects/js-regexp-string-iterator.h"
@@ -408,7 +401,11 @@ def load_objects_from_file(objfilename, checktypes):
klass = match.group(1).strip();
pklass = match.group(2);
if (pklass):
- pklass = pklass.strip();
+ # Strip potential template arguments from parent
+ # class.
+ match = re.match(r'(\w+)(<.*>)?', pklass.strip());
+ pklass = match.group(1).strip();
+
klasses[klass] = { 'parent': pklass };
#
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index e5ee98794d..fa18d85bf5 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -30,10 +30,12 @@ V8_DIR = os.path.dirname(MY_DIR)
OUT_DIR = os.path.join(V8_DIR, 'check-header-includes')
AUTO_EXCLUDE = [
# flag-definitions.h needs a mode set for being included.
- 'src/flag-definitions.h',
+ 'src/flags/flag-definitions.h',
]
AUTO_EXCLUDE_PATTERNS = [
'src/base/atomicops_internals_.*',
+ # TODO(petermarshall): Enable once Perfetto is built by default.
+ 'src/libplatform/tracing/perfetto*',
] + [
# platform-specific headers
'\\b{}\\b'.format(p) for p in
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index 561e4547e1..b94f3add23 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -105,9 +105,9 @@ HEADER_TEMPLATE = """\
// want to make changes to this file you should either change the
// javascript source files or the GYP script.
-#include "src/v8.h"
+#include "src/init/v8.h"
#include "src/snapshot/natives.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
namespace v8 {
namespace internal {
@@ -245,7 +245,10 @@ def BuildMetadata(sources, source_bytes, native_type):
raw_sources = "".join(sources.modules)
# The sources are expected to be ASCII-only.
- assert not filter(lambda value: ord(value) >= 128, raw_sources)
+ try:
+ raw_sources.encode('ascii')
+ except UnicodeEncodeError:
+ assert False
# Loop over modules and build up indices into the source blob:
get_index_cases = []
@@ -300,8 +303,8 @@ def PutInt(blob_file, value):
def PutStr(blob_file, value):
- PutInt(blob_file, len(value));
- blob_file.write(value);
+ PutInt(blob_file, len(value.encode()))
+ blob_file.write(value.encode())
def WriteStartupBlob(sources, startup_blob):
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index 1466079e26..b79a380796 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -131,6 +131,8 @@ class MetaBuildWrapper(object):
subp.add_argument('output_path', nargs=1,
help='path to a file containing the output arguments '
'as a JSON object.')
+ subp.add_argument('--json-output',
+ help='Write errors to json.output')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('export',
@@ -149,6 +151,8 @@ class MetaBuildWrapper(object):
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
+ subp.add_argument('--json-output',
+ help='Write errors to json.output')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
@@ -167,6 +171,12 @@ class MetaBuildWrapper(object):
help='look up the command for a given config or '
'builder')
AddCommonOptions(subp)
+ subp.add_argument('--quiet', default=False, action='store_true',
+ help='Print out just the arguments, '
+ 'do not emulate the output of the gen subcommand.')
+ subp.add_argument('--recursive', default=False, action='store_true',
+ help='Lookup arguments from imported files, '
+ 'implies --quiet')
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser(
@@ -307,12 +317,15 @@ class MetaBuildWrapper(object):
def CmdLookup(self):
vals = self.Lookup()
- cmd = self.GNCmd('gen', '_path_')
- gn_args = self.GNArgs(vals)
- self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
- env = None
+ gn_args = self.GNArgs(vals, expand_imports=self.args.recursive)
+ if self.args.quiet or self.args.recursive:
+ self.Print(gn_args, end='')
+ else:
+ cmd = self.GNCmd('gen', '_path_')
+ self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
+ env = None
- self.PrintCmd(cmd, env)
+ self.PrintCmd(cmd, env)
return 0
def CmdRun(self):
@@ -702,8 +715,11 @@ class MetaBuildWrapper(object):
self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
- ret, _, _ = self.Run(cmd)
+ ret, output, _ = self.Run(cmd)
if ret:
+ if self.args.json_output:
+ # write errors to json.output
+ self.WriteJSON({'output': output}, self.args.json_output)
# If `gn gen` failed, we should exit early rather than trying to
# generate isolates. Run() will have already logged any error output.
self.Print('GN gen failed: %d' % ret)
@@ -852,7 +868,7 @@ class MetaBuildWrapper(object):
return [gn_path, subcommand, path] + list(args)
- def GNArgs(self, vals):
+ def GNArgs(self, vals, expand_imports=False):
if vals['cros_passthrough']:
if not 'GN_ARGS' in os.environ:
raise MBErr('MB is expecting GN_ARGS to be in the environment')
@@ -874,15 +890,24 @@ class MetaBuildWrapper(object):
if android_version_name:
gn_args += ' android_default_version_name="%s"' % android_version_name
+ args_gn_lines = []
+ parsed_gn_args = {}
+
+ args_file = vals.get('args_file', None)
+ if args_file:
+ if expand_imports:
+ content = self.ReadFile(self.ToAbsPath(args_file))
+ parsed_gn_args = gn_helpers.FromGNArgs(content)
+ else:
+ args_gn_lines.append('import("%s")' % args_file)
+
# Canonicalize the arg string into a sorted, newline-separated list
# of key-value pairs, and de-dup the keys if need be so that only
# the last instance of each arg is listed.
- gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args))
+ parsed_gn_args.update(gn_helpers.FromGNArgs(gn_args))
+ args_gn_lines.append(gn_helpers.ToGNString(parsed_gn_args))
- args_file = vals.get('args_file', None)
- if args_file:
- gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
- return gn_args
+ return '\n'.join(args_gn_lines)
def ToAbsPath(self, build_path, *comps):
return self.PathJoin(self.chromium_src_dir,
@@ -949,8 +974,11 @@ class MetaBuildWrapper(object):
try:
self.WriteJSON(gn_inp, gn_input_path)
cmd = self.GNCmd('analyze', build_path, gn_input_path, gn_output_path)
- ret, _, _ = self.Run(cmd, force_verbose=True)
+ ret, output, _ = self.Run(cmd, force_verbose=True)
if ret:
+ if self.args.json_output:
+ # write errors to json.output
+ self.WriteJSON({'output': output}, self.args.json_output)
return ret
gn_outp_str = self.ReadFile(gn_output_path)
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
index 1889f18a3a..a22686a5ee 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -549,7 +549,18 @@ class UnitTest(unittest.TestCase):
'//out/Default', 'base_unittests'], mbw=mbw, ret=0)
def test_lookup(self):
- self.check(['lookup', '-c', 'debug_goma'], ret=0)
+ self.check(['lookup', '-c', 'debug_goma'], ret=0,
+ out=('\n'
+ 'Writing """\\\n'
+ 'is_debug = true\n'
+ 'use_goma = true\n'
+ '""" to _path_/args.gn.\n\n'
+ '/fake_src/buildtools/linux64/gn gen _path_\n'))
+
+ def test_quiet_lookup(self):
+ self.check(['lookup', '-c', 'debug_goma', '--quiet'], ret=0,
+ out=('is_debug = true\n'
+ 'use_goma = true\n'))
def test_lookup_goma_dir_expansion(self):
self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0,
@@ -597,6 +608,19 @@ class UnitTest(unittest.TestCase):
'--phase', 'phase_2'], ret=0)
self.assertIn('phase = 2', mbw.out)
+ def test_recursive_lookup(self):
+ files = {
+ '/fake_src/build/args/fake.gn': (
+ 'enable_doom_melon = true\n'
+ 'enable_antidoom_banana = true\n'
+ )
+ }
+ self.check(['lookup', '-m', 'fake_master', '-b', 'fake_args_file',
+ '--recursive'], files=files, ret=0,
+ out=('enable_antidoom_banana = true\n'
+ 'enable_doom_melon = true\n'
+ 'use_goma = true\n'))
+
def test_validate(self):
mbw = self.fake_mbw()
self.check(['validate'], mbw=mbw, ret=0)
diff --git a/deps/v8/tools/node/build_gn.py b/deps/v8/tools/node/build_gn.py
deleted file mode 100755
index 83071adbfe..0000000000
--- a/deps/v8/tools/node/build_gn.py
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Use this script to build libv8_monolith.a as dependency for Node.js
-Required dependencies can be fetched with fetch_deps.py.
-
-Usage: build_gn.py <Debug/Release> <v8-path> <build-path> [<build-flags>]...
-
-Build flags are passed either as "strings" or numeric value. True/false
-are represented as 1/0. E.g.
-
- v8_promise_internal_field_count=2
- target_cpu="x64"
- v8_enable_disassembler=0
-"""
-
-import argparse
-import os
-import subprocess
-import sys
-
-import node_common
-
-GN_ARGS = [
- "v8_monolithic=true",
- "is_component_build=false",
- "v8_use_external_startup_data=false",
- "use_custom_libcxx=false",
-]
-
-BUILD_TARGET = "v8_monolith"
-
-def FindTargetOs(flags):
- for flag in flags:
- if flag.startswith("target_os="):
- return flag[len("target_os="):].strip('"')
- raise Exception('No target_os was set.')
-
-def FindGn(options):
- if options.host_os == "linux":
- os_path = "linux64"
- elif options.host_os == "mac":
- os_path = "mac"
- elif options.host_os == "win":
- os_path = "win"
- else:
- raise "Operating system not supported by GN"
- return os.path.join(options.v8_path, "buildtools", os_path, "gn")
-
-def GenerateBuildFiles(options):
- gn = FindGn(options)
- gn_args = list(GN_ARGS)
- target_os = FindTargetOs(options.flag)
- if target_os != "win":
- gn_args.append("use_sysroot=false")
-
- for flag in options.flag:
- flag = flag.replace("=1", "=true")
- flag = flag.replace("=0", "=false")
- flag = flag.replace("target_cpu=ia32", "target_cpu=\"x86\"")
- gn_args.append(flag)
- if options.mode == "Debug":
- gn_args.append("is_debug=true")
- else:
- gn_args.append("is_debug=false")
-
- flattened_args = ' '.join(gn_args)
- if options.extra_gn_args:
- flattened_args += ' ' + options.extra_gn_args
-
- args = [gn, "gen", options.build_path, "-q", "--args=" + flattened_args]
- subprocess.check_call(args)
-
-def Build(options):
- depot_tools = node_common.EnsureDepotTools(options.v8_path, False)
- ninja = os.path.join(depot_tools, "ninja")
- if sys.platform == 'win32':
- # Required because there is an extension-less file called "ninja".
- ninja += ".exe"
- args = [ninja, "-C", options.build_path, BUILD_TARGET]
- if options.max_load:
- args += ["-l" + options.max_load]
- if options.max_jobs:
- args += ["-j" + options.max_jobs]
- else:
- with open(os.path.join(options.build_path, "args.gn")) as f:
- if "use_goma = true" in f.read():
- args += ["-j500"]
- subprocess.check_call(args)
-
-def ParseOptions(args):
- parser = argparse.ArgumentParser(
- description="Build %s with GN" % BUILD_TARGET)
- parser.add_argument("--mode", help="Build mode (Release/Debug)")
- parser.add_argument("--v8_path", help="Path to V8", required=True)
- parser.add_argument("--build_path", help="Path to build result",
- required=True)
- parser.add_argument("--flag", help="Translate GYP flag to GN",
- action="append")
- parser.add_argument("--host_os", help="Current operating system")
- parser.add_argument("--bundled-win-toolchain",
- help="Value for DEPOT_TOOLS_WIN_TOOLCHAIN")
- parser.add_argument("--bundled-win-toolchain-root",
- help="Value for DEPOT_TOOLS_WIN_TOOLCHAIN_ROOT")
- parser.add_argument("--depot-tools", help="Absolute path to depot_tools")
- parser.add_argument("--extra-gn-args", help="Additional GN args")
- parser.add_argument("--build", help="Run ninja as opposed to gn gen.",
- action="store_true")
- parser.add_argument("--max-jobs", help="ninja's -j parameter")
- parser.add_argument("--max-load", help="ninja's -l parameter")
- options = parser.parse_args(args)
-
- options.build_path = os.path.abspath(options.build_path)
-
- if not options.build:
- assert options.host_os
- assert options.mode == "Debug" or options.mode == "Release"
-
- options.v8_path = os.path.abspath(options.v8_path)
- assert os.path.isdir(options.v8_path)
-
- return options
-
-
-if __name__ == "__main__":
- options = ParseOptions(sys.argv[1:])
- # Build can result in running gn gen, so need to set environment variables
- # for build as well as generate.
- if options.bundled_win_toolchain:
- os.environ['DEPOT_TOOLS_WIN_TOOLCHAIN'] = options.bundled_win_toolchain
- if options.bundled_win_toolchain_root:
- os.environ['DEPOT_TOOLS_WIN_TOOLCHAIN_ROOT'] = (
- options.bundled_win_toolchain_root)
- if options.depot_tools:
- os.environ['PATH'] = (
- options.depot_tools + os.path.pathsep + os.environ['PATH'])
- if not options.build:
- GenerateBuildFiles(options)
- else:
- Build(options)
diff --git a/deps/v8/tools/node/test_update_node.py b/deps/v8/tools/node/test_update_node.py
deleted file mode 100755
index 785517b8c8..0000000000
--- a/deps/v8/tools/node/test_update_node.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import os
-import shutil
-import subprocess
-import sys
-import tempfile
-import unittest
-
-import update_node
-
-# Base paths.
-BASE_DIR = os.path.dirname(os.path.abspath(__file__))
-TEST_DATA = os.path.join(BASE_DIR, 'testdata')
-
-# Expectations.
-EXPECTED_GITIGNORE = """
-/third_party/googletest/*
-!/third_party/googletest/src
-/third_party/googletest/src/*
-!/third_party/googletest/src/googletest
-/third_party/googletest/src/googletest/*
-!/third_party/googletest/src/googletest/include
-/third_party/googletest/src/googletest/include/*
-!/third_party/googletest/src/googletest/include/gtest
-/third_party/googletest/src/googletest/include/gtest/*
-!/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
-!/third_party/jinja2
-!/third_party/markupsafe
-/unrelated
-"""
-
-EXPECTED_GIT_DIFF = """
- create mode 100644 deps/v8/base/trace_event/common/common
- rename deps/v8/baz/{delete_me => v8_new} (100%)
- delete mode 100644 deps/v8/include/v8-version.h
- rename deps/v8/{delete_me => new/v8_new} (100%)
- create mode 100644 deps/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
- create mode 100644 deps/v8/third_party/jinja2/jinja2
- create mode 100644 deps/v8/third_party/markupsafe/markupsafe
- create mode 100644 deps/v8/v8_new
-"""
-
-ADDED_FILES = [
- 'v8_new',
- 'new/v8_new',
- 'baz/v8_new',
- '/third_party/googletest/src/googletest/include/gtest/gtest_new',
- '/third_party/googletest/src/googletest/include/gtest/new/gtest_new',
- '/third_party/googletest/src/googletest/include/gtest/baz/gtest_new',
- 'third_party/jinja2/jinja2',
- 'third_party/markupsafe/markupsafe'
-]
-
-REMOVED_FILES = [
- 'delete_me',
- 'baz/delete_me',
- 'testing/gtest/delete_me',
- 'testing/gtest/baz/delete_me',
-]
-
-def gitify(path):
- files = os.listdir(path)
- subprocess.check_call(['git', 'init'], cwd=path)
- subprocess.check_call(['git', 'add'] + files, cwd=path)
- subprocess.check_call(['git', 'commit', '-m', 'Initial'], cwd=path)
-
-
-class TestUpdateNode(unittest.TestCase):
- def setUp(self):
- self.workdir = tempfile.mkdtemp(prefix='tmp_test_node_')
-
- def tearDown(self):
- shutil.rmtree(self.workdir)
-
- def testUpdate(self):
- v8_cwd = os.path.join(self.workdir, 'v8')
- node_cwd = os.path.join(self.workdir, 'node')
-
- # Set up V8 test fixture.
- shutil.copytree(src=os.path.join(TEST_DATA, 'v8'), dst=v8_cwd)
- gitify(v8_cwd)
- for repository in update_node.SUB_REPOSITORIES:
- gitify(os.path.join(v8_cwd, *repository))
-
- # Set up node test fixture.
- shutil.copytree(src=os.path.join(TEST_DATA, 'node'), dst=node_cwd)
- gitify(os.path.join(node_cwd))
-
- # Add a patch.
- with open(os.path.join(v8_cwd, 'v8_foo'), 'w') as f:
- f.write('zonk')
- subprocess.check_call(['git', 'add', 'v8_foo'], cwd=v8_cwd)
-
- # Run update script.
- update_node.Main([v8_cwd, node_cwd, "--commit", "--with-patch"])
-
- # Check expectations.
- with open(os.path.join(node_cwd, 'deps', 'v8', '.gitignore')) as f:
- actual_gitignore = f.read()
- self.assertEquals(EXPECTED_GITIGNORE.strip(), actual_gitignore.strip())
- for f in ADDED_FILES:
- added_file = os.path.join(node_cwd, 'deps', 'v8', *f.split('/'))
- self.assertTrue(os.path.exists(added_file))
- for f in REMOVED_FILES:
- removed_file = os.path.join(node_cwd, 'deps', 'v8', *f.split('/'))
- self.assertFalse(os.path.exists(removed_file))
- gitlog = subprocess.check_output(
- ['git', 'diff', 'master', '--summary'],
- cwd=node_cwd,
- )
- self.assertEquals(EXPECTED_GIT_DIFF.strip(), gitlog.strip())
-
- # Check patch.
- gitlog = subprocess.check_output(
- ['git', 'diff', 'master', '--cached', '--', 'deps/v8/v8_foo'],
- cwd=node_cwd,
- )
- self.assertIn('+zonk', gitlog.strip())
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/.gitignore b/deps/v8/tools/node/testdata/node/deps/v8/.gitignore
deleted file mode 100644
index 23c2024827..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-/unrelated
-/testing/gtest/*
-!/testing/gtest/include
-/testing/gtest/include/*
-!/testing/gtest/include/gtest
-/testing/gtest/include/gtest/*
-!/testing/gtest/include/gtest/gtest_prod.h
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me b/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/baz/delete_me
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo b/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/baz/v8_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/delete_me b/deps/v8/tools/node/testdata/node/deps/v8/delete_me
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/delete_me
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h b/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h
deleted file mode 100644
index fe8b2712e3..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/include/v8-version.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INCLUDE_VERSION_H_ // V8_VERSION_H_ conflicts with src/version.h
-#define V8_INCLUDE_VERSION_H_
-
-// These macros define the version number for the current version.
-// NOTE these macros are used by some of the tool scripts and the build
-// system so their names cannot be changed without changing the scripts.
-#define V8_MAJOR_VERSION 1
-#define V8_MINOR_VERSION 2
-#define V8_BUILD_NUMBER 3
-#define V8_PATCH_LEVEL 4321
-
-// Use 1 for candidates and 0 otherwise.
-// (Boolean macro values are not supported by all preprocessors.)
-#define V8_IS_CANDIDATE_VERSION 0
-
-#endif // V8_INCLUDE_VERSION_H_
diff --git a/deps/v8/tools/node/testdata/node/deps/v8/v8_foo b/deps/v8/tools/node/testdata/node/deps/v8/v8_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/node/deps/v8/v8_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/.gitignore b/deps/v8/tools/node/testdata/v8/.gitignore
deleted file mode 100644
index cc2f1ca202..0000000000
--- a/deps/v8/tools/node/testdata/v8/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-/unrelated
-/third_party/jinja2
-/third_party/markupsafe
diff --git a/deps/v8/tools/node/testdata/v8/base/trace_event/common/common b/deps/v8/tools/node/testdata/v8/base/trace_event/common/common
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/tools/node/testdata/v8/base/trace_event/common/common
+++ /dev/null
diff --git a/deps/v8/tools/node/testdata/v8/baz/v8_foo b/deps/v8/tools/node/testdata/v8/baz/v8_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/baz/v8_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/baz/v8_new b/deps/v8/tools/node/testdata/v8/baz/v8_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/baz/v8_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/new/v8_new b/deps/v8/tools/node/testdata/v8/new/v8_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/new/v8_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo b/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new b/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_bar b/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_bar
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_bar
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_new b/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/gtest_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new b/deps/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/v8_foo b/deps/v8/tools/node/testdata/v8/v8_foo
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/v8_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/testdata/v8/v8_new b/deps/v8/tools/node/testdata/v8/v8_new
deleted file mode 100644
index eb1ae458f8..0000000000
--- a/deps/v8/tools/node/testdata/v8/v8_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/deps/v8/tools/node/update_node.py b/deps/v8/tools/node/update_node.py
deleted file mode 100755
index 2ebf799c5e..0000000000
--- a/deps/v8/tools/node/update_node.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Use this script to update V8 in a Node.js checkout.
-
-Requirements:
- - Node.js checkout in which V8 should be updated.
- - V8 checkout at the commit to which Node.js should be updated.
-
-Usage:
- $ update_node.py <path_to_v8> <path_to_node>
-
- This will synchronize the content of <path_to_node>/deps/v8 with <path_to_v8>,
- and a few V8 dependencies require in Node.js. It will also update .gitignore
- appropriately.
-
-Optional flags:
- --gclient Run `gclient sync` on the V8 checkout before updating.
- --commit Create commit with the updated V8 in the Node.js checkout.
- --with-patch Also include currently staged files in the V8 checkout.
-"""
-
-# for py2/py3 compatibility
-from __future__ import print_function
-
-import argparse
-import os
-import shutil
-import subprocess
-import sys
-import stat
-import node_common
-
-TARGET_SUBDIR = os.path.join("deps", "v8")
-
-SUB_REPOSITORIES = [ ["base", "trace_event", "common"],
- ["third_party", "googletest", "src"] ]
-
-DELETE_FROM_GITIGNORE = [ "/base",
- "/third_party/googletest/src" ]
-
-# Node.js requires only a single header file from gtest to build V8.
-ADD_TO_GITIGNORE = [ "/third_party/googletest/*",
- "!/third_party/googletest/BUILD.gn",
- "!/third_party/googletest/src",
- "/third_party/googletest/src/*",
- "!/third_party/googletest/src/googletest",
- "/third_party/googletest/src/googletest/*",
- "!/third_party/googletest/src/googletest/include",
- "/third_party/googletest/src/googletest/include/*",
- "!/third_party/googletest/src/googletest/include/gtest",
- "/third_party/googletest/src/googletest/include/gtest/*",
- "!/third_party/googletest/src/googletest/include/gtest/gtest_prod.h" ]
-
-# Node.js owns deps/v8/gypfiles in their downstream repository.
-FILES_TO_KEEP = [ "gypfiles" ]
-
-def RunGclient(path):
- assert os.path.isdir(path)
- print(">> Running gclient sync")
- subprocess.check_call(["gclient", "sync", "--nohooks"], cwd=path)
-
-def CommitPatch(options):
- """Makes a dummy commit for the changes in the index.
-
- On trybots, bot_updated applies the patch to the index. We commit it to make
- the fake git clone fetch it into node.js. We can leave the commit, as
- bot_update will ensure a clean state on each run.
- """
- print(">> Committing patch")
- subprocess.check_call(
- ["git", "-c", "user.name=fake", "-c", "user.email=fake@chromium.org",
- "commit", "--allow-empty", "-m", "placeholder-commit"],
- cwd=options.v8_path,
- )
-
-def UpdateTarget(repository, options, files_to_keep):
- source = os.path.join(options.v8_path, *repository)
- target = os.path.join(options.node_path, TARGET_SUBDIR, *repository)
- print(">> Updating target directory %s" % target)
- print(">> from active branch at %s" % source)
- if not os.path.exists(target):
- os.makedirs(target)
- # Remove possible remnants of previous incomplete runs.
- node_common.UninitGit(target)
-
- git_args = []
- git_args.append(["init"]) # initialize target repo
-
- if files_to_keep:
- git_args.append(["add"] + files_to_keep) # add and commit
- git_args.append(["commit", "-m", "keep files"]) # files we want to keep
-
- git_args.append(["clean", "-fxd"]) # nuke everything else
- git_args.append(["remote", "add", "source", source]) # point to source repo
- git_args.append(["fetch", "source", "HEAD"]) # sync to current branch
- git_args.append(["checkout", "-f", "FETCH_HEAD"]) # switch to that branch
- git_args.append(["clean", "-fxd"]) # delete removed files
-
- if files_to_keep:
- git_args.append(["cherry-pick", "master"]) # restore kept files
-
- try:
- for args in git_args:
- subprocess.check_call(["git"] + args, cwd=target)
- except:
- raise
- finally:
- node_common.UninitGit(target)
-
-def UpdateGitIgnore(options):
- file_name = os.path.join(options.node_path, TARGET_SUBDIR, ".gitignore")
- assert os.path.isfile(file_name)
- print(">> Updating .gitignore with lines")
- with open(file_name) as gitignore:
- content = gitignore.readlines()
- content = [x.strip() for x in content]
- for x in DELETE_FROM_GITIGNORE:
- if x in content:
- print("- %s" % x)
- content.remove(x)
- for x in ADD_TO_GITIGNORE:
- if x not in content:
- print("+ %s" % x)
- content.append(x)
- content.sort(key=lambda x: x[1:] if x.startswith("!") else x)
- with open(file_name, "w") as gitignore:
- for x in content:
- gitignore.write("%s\n" % x)
-
-def CreateCommit(options):
- print(">> Creating commit.")
- # Find git hash from source.
- githash = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"],
- cwd=options.v8_path).strip()
- # Create commit at target.
- git_commands = [
- ["git", "checkout", "-b", "update_v8_to_%s" % githash], # new branch
- ["git", "add", "."], # add files
- ["git", "commit", "-m", "Update V8 to %s" % githash] # new commit
- ]
- for command in git_commands:
- subprocess.check_call(command, cwd=options.node_path)
-
-def ParseOptions(args):
- parser = argparse.ArgumentParser(description="Update V8 in Node.js")
- parser.add_argument("v8_path", help="Path to V8 checkout")
- parser.add_argument("node_path", help="Path to Node.js checkout")
- parser.add_argument("--gclient", action="store_true", help="Run gclient sync")
- parser.add_argument("--commit", action="store_true", help="Create commit")
- parser.add_argument("--with-patch", action="store_true",
- help="Apply also staged files")
- options = parser.parse_args(args)
- assert os.path.isdir(options.v8_path)
- options.v8_path = os.path.abspath(options.v8_path)
- assert os.path.isdir(options.node_path)
- options.node_path = os.path.abspath(options.node_path)
- return options
-
-def Main(args):
- options = ParseOptions(args)
- if options.gclient:
- RunGclient(options.v8_path)
- # Commit patch on trybots to main V8 repository.
- if options.with_patch:
- CommitPatch(options)
- # Update main V8 repository.
- UpdateTarget([""], options, FILES_TO_KEEP)
- # Patch .gitignore before updating sub-repositories.
- UpdateGitIgnore(options)
- for repo in SUB_REPOSITORIES:
- UpdateTarget(repo, options, None)
- if options.commit:
- CreateCommit(options)
-
-if __name__ == "__main__":
- Main(sys.argv[1:])
diff --git a/deps/v8/tools/profviz/worker.js b/deps/v8/tools/profviz/worker.js
index 7f163088e4..95ed40b89b 100644
--- a/deps/v8/tools/profviz/worker.js
+++ b/deps/v8/tools/profviz/worker.js
@@ -100,7 +100,7 @@ function run(args) {
var profile = "";
print = function(text) { profile += text + "\n"; };
// Dummy entries provider, as we cannot call nm.
- var entriesProvider = new UnixCppEntriesProvider("", "");
+ var entriesProvider = new UnixCppEntriesProvider("", "", "");
var targetRootFS = "";
var separateIc = false;
var callGraphSize = 5;
diff --git a/deps/v8/tools/run-wasm-api-tests.py b/deps/v8/tools/run-wasm-api-tests.py
index 46e13d3255..79f53cb927 100644..100755
--- a/deps/v8/tools/run-wasm-api-tests.py
+++ b/deps/v8/tools/run-wasm-api-tests.py
@@ -30,7 +30,7 @@ import shutil
import subprocess
import sys
-CFLAGS = "-DDEBUG -Wall -Werror -O0 -fsanitize=address"
+CFLAGS = "-DDEBUG -Wall -Werror -O0 -ggdb -fsanitize=address"
CHECKOUT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
WASM_PATH = os.path.join(CHECKOUT_PATH, "third_party", "wasm-api")
@@ -87,12 +87,12 @@ class Runner(object):
dst_wasm_file = self.dst_file_basename + ".wasm"
shutil.copyfile(src_wasm_file, dst_wasm_file)
- def _Error(self, step, lang, compiler):
+ def _Error(self, step, lang, compiler, code):
print("Error: %s failed. To repro: tools/run-wasm-api-tests.py "
"%s %s %s %s %s" %
(step, self.outdir, self.tempdir, self.name, lang,
compiler["name"].lower()))
-
+ return code
def CompileAndRun(self, compiler, language):
print("==== %s %s/%s ====" %
@@ -104,15 +104,15 @@ class Runner(object):
# Compile.
c = _Call([compiler[lang], "-c", language["cflags"], CFLAGS,
"-I", WASM_PATH, "-o", obj_file, src_file])
- if c: return self._Error("compilation", lang, compiler)
+ if c: return self._Error("compilation", lang, compiler, c)
# Link.
c = _Call([compiler["cc"], CFLAGS, compiler["ldflags"], obj_file,
"-o", exe_file, self.lib_file, "-ldl -pthread"])
- if c: return self._Error("linking", lang, compiler)
+ if c: return self._Error("linking", lang, compiler, c)
# Execute.
exe_file = "./%s-%s" % (self.name, lang)
c = _Call(["cd", self.tempdir, ";", exe_file])
- if c: return self._Error("execution", lang, compiler)
+ if c: return self._Error("execution", lang, compiler, c)
return 0
def Main(args):
@@ -157,6 +157,10 @@ def Main(args):
for language in languages:
c = runner.CompileAndRun(compiler, language)
if c: result = c
+ if result:
+ print("\nFinished with errors.")
+ else:
+ print("\nFinished successfully.")
return result
if __name__ == "__main__":
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 9e05be99e5..419cc47847 100755..100644
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -108,27 +107,30 @@ from __future__ import print_function
from functools import reduce
from collections import OrderedDict
-import datetime
+import copy
import json
import logging
import math
-import optparse
+import argparse
import os
import re
import subprocess
import sys
+import time
import traceback
+import numpy
+
from testrunner.local import android
from testrunner.local import command
from testrunner.local import utils
+from testrunner.objects.output import Output, NULL_OUTPUT
try:
basestring # Python 2
except NameError: # Python 3
basestring = str
-ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ['arm',
'ia32',
'mips',
@@ -141,6 +143,7 @@ RESULT_STDDEV_RE = re.compile(r'^\{([^\}]+)\}$')
RESULT_LIST_RE = re.compile(r'^\[([^\]]+)\]$')
TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
INFRA_FAILURE_RETCODE = 87
+MIN_RUNS_FOR_CONFIDENCE = 10
def GeometricMean(values):
@@ -149,116 +152,130 @@ def GeometricMean(values):
The mean is calculated using log to avoid overflow.
"""
values = map(float, values)
- return str(math.exp(sum(map(math.log, values)) / len(values)))
-
-
-class TestFailedError(Exception):
- """Error raised when a test has failed due to a non-infra issue."""
- pass
-
-
-class Results(object):
- """Place holder for result traces."""
- def __init__(self, traces=None, errors=None):
- self.traces = traces or []
- self.errors = errors or []
- self.timeouts = []
- self.near_timeouts = [] # > 90% of the max runtime
+ return math.exp(sum(map(math.log, values)) / len(values))
+
+
+class ResultTracker(object):
+ """Class that tracks trace/runnable results and produces script output.
+
+ The output is structured like this:
+ {
+ "traces": [
+ {
+ "graphs": ["path", "to", "trace", "config"],
+ "units": <string describing units, e.g. "ms" or "KB">,
+ "results": [<list of values measured over several runs>],
+ "stddev": <stddev of the value if measure by script or ''>
+ },
+ ...
+ ],
+ "runnables": [
+ {
+ "graphs": ["path", "to", "runnable", "config"],
+ "durations": [<list of durations of each runnable run in seconds>],
+ "timeout": <timeout configured for runnable in seconds>,
+ },
+ ...
+ ],
+ "errors": [<list of strings describing errors>],
+ }
+ """
+ def __init__(self):
+ self.traces = {}
+ self.errors = []
+ self.runnables = {}
+
+ def AddTraceResult(self, trace, result, stddev):
+ if trace.name not in self.traces:
+ self.traces[trace.name] = {
+ 'graphs': trace.graphs,
+ 'units': trace.units,
+ 'results': [result],
+ 'stddev': stddev or '',
+ }
+ else:
+ existing_entry = self.traces[trace.name]
+ assert trace.graphs == existing_entry['graphs']
+ assert trace.units == existing_entry['units']
+ if stddev:
+ existing_entry['stddev'] = stddev
+ existing_entry['results'].append(result)
+
+ def TraceHasStdDev(self, trace):
+ return trace.name in self.traces and self.traces[trace.name]['stddev'] != ''
+
+ def AddError(self, error):
+ self.errors.append(error)
+
+ def AddRunnableDuration(self, runnable, duration):
+ """Records a duration of a specific run of the runnable."""
+ if runnable.name not in self.runnables:
+ self.runnables[runnable.name] = {
+ 'graphs': runnable.graphs,
+ 'durations': [duration],
+ 'timeout': runnable.timeout,
+ }
+ else:
+ existing_entry = self.runnables[runnable.name]
+ assert runnable.timeout == existing_entry['timeout']
+ assert runnable.graphs == existing_entry['graphs']
+ existing_entry['durations'].append(duration)
def ToDict(self):
return {
- 'traces': self.traces,
+ 'traces': self.traces.values(),
'errors': self.errors,
- 'timeouts': self.timeouts,
- 'near_timeouts': self.near_timeouts,
+ 'runnables': self.runnables.values(),
}
def WriteToFile(self, file_name):
with open(file_name, 'w') as f:
f.write(json.dumps(self.ToDict()))
- def __add__(self, other):
- self.traces += other.traces
- self.errors += other.errors
- self.timeouts += other.timeouts
- self.near_timeouts += other.near_timeouts
- return self
-
- def __str__(self): # pragma: no cover
- return str(self.ToDict())
-
-
-class Measurement(object):
- """Represents a series of results of one trace.
-
- The results are from repetitive runs of the same executable. They are
- gathered by repeated calls to ConsumeOutput.
- """
- def __init__(self, graphs, units, results_regexp, stddev_regexp):
- self.name = '/'.join(graphs)
- self.graphs = graphs
- self.units = units
- self.results_regexp = results_regexp
- self.stddev_regexp = stddev_regexp
- self.results = []
- self.errors = []
- self.stddev = ''
- self.process_size = False
-
- def ConsumeOutput(self, stdout):
- try:
- result = re.search(self.results_regexp, stdout, re.M).group(1)
- self.results.append(str(float(result)))
- except ValueError:
- self.errors.append('Regexp "%s" returned a non-numeric for test %s.'
- % (self.results_regexp, self.name))
- except:
- self.errors.append('Regexp "%s" did not match for test %s.'
- % (self.results_regexp, self.name))
-
- try:
- if self.stddev_regexp and self.stddev:
- self.errors.append('Test %s should only run once since a stddev '
- 'is provided by the test.' % self.name)
- if self.stddev_regexp:
- self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
- except:
- self.errors.append('Regexp "%s" did not match for test %s.'
- % (self.stddev_regexp, self.name))
+ def HasEnoughRuns(self, graph_config, confidence_level):
+ """Checks if the mean of the results for a given trace config is within
+ 0.1% of the true value with the specified confidence level.
- def GetResults(self):
- return Results([{
- 'graphs': self.graphs,
- 'units': self.units,
- 'results': self.results,
- 'stddev': self.stddev,
- }], self.errors)
+ This assumes Gaussian distribution of the noise and based on
+ https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule.
+ Args:
+ graph_config: An instance of GraphConfig.
+ confidence_level: Number of standard deviations from the mean that all
+ values must lie within. Typical values are 1, 2 and 3 and correspond
+ to 68%, 95% and 99.7% probability that the measured value is within
+ 0.1% of the true value.
+
+ Returns:
+ True if specified confidence level have been achieved.
+ """
+ if not isinstance(graph_config, TraceConfig):
+ return all(self.HasEnoughRuns(child, confidence_level)
+ for child in graph_config.children)
-class NullMeasurement(object):
- """Null object to avoid having extra logic for configurations that don't
- require secondary run, e.g. CI bots.
- """
- def ConsumeOutput(self, stdout):
- pass
+ trace = self.traces.get(graph_config.name, {})
+ results = trace.get('results', [])
+ logging.debug('HasEnoughRuns for %s', graph_config.name)
- def GetResults(self):
- return Results()
+ if len(results) < MIN_RUNS_FOR_CONFIDENCE:
+ logging.debug(' Ran %d times, need at least %d',
+ len(results), MIN_RUNS_FOR_CONFIDENCE)
+ return False
+ logging.debug(' Results: %d entries', len(results))
+ mean = numpy.mean(results)
+ mean_stderr = numpy.std(results) / numpy.sqrt(len(results))
+ logging.debug(' Mean: %.2f, mean_stderr: %.2f', mean, mean_stderr)
+ return confidence_level * mean_stderr < mean / 1000.0
-def Unzip(iterable):
- left = []
- right = []
- for l, r in iterable:
- left.append(l)
- right.append(r)
- return lambda: iter(left), lambda: iter(right)
+ def __str__(self): # pragma: no cover
+ return json.dumps(self.ToDict(), indent=2, separators=(',', ': '))
-def RunResultsProcessor(results_processor, stdout, count):
+def RunResultsProcessor(results_processor, output, count):
# Dummy pass through for null-runs.
- if stdout is None:
- return None
+ if output.stdout is None:
+ return output
# We assume the results processor is relative to the suite.
assert os.path.exists(results_processor)
@@ -268,112 +285,10 @@ def RunResultsProcessor(results_processor, stdout, count):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
- result, _ = p.communicate(input=stdout)
- logging.info('>>> Processed stdout (#%d):\n%s', count, result)
- return result
-
-
-def AccumulateResults(
- graph_names, trace_configs, iter_output, perform_measurement, calc_total):
- """Iterates over the output of multiple benchmark reruns and accumulates
- results for a configured list of traces.
-
- Args:
- graph_names: List of names that configure the base path of the traces. E.g.
- ['v8', 'Octane'].
- trace_configs: List of 'TraceConfig' instances. Each trace config defines
- how to perform a measurement.
- iter_output: Iterator over the standard output of each test run.
- perform_measurement: Whether to actually run tests and perform measurements.
- This is needed so that we reuse this script for both CI
- and trybot, but want to ignore second run on CI without
- having to spread this logic throughout the script.
- calc_total: Boolean flag to speficy the calculation of a summary trace.
- Returns: A 'Results' object.
- """
- measurements = [
- trace.CreateMeasurement(perform_measurement) for trace in trace_configs]
- for stdout in iter_output():
- for measurement in measurements:
- measurement.ConsumeOutput(stdout)
-
- res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
-
- if not res.traces or not calc_total:
- return res
-
- # Assume all traces have the same structure.
- if len(set(map(lambda t: len(t['results']), res.traces))) != 1:
- res.errors.append('Not all traces have the same number of results.')
- return res
-
- # Calculate the geometric means for all traces. Above we made sure that
- # there is at least one trace and that the number of results is the same
- # for each trace.
- n_results = len(res.traces[0]['results'])
- total_results = [GeometricMean(t['results'][i] for t in res.traces)
- for i in range(0, n_results)]
- res.traces.append({
- 'graphs': graph_names + ['Total'],
- 'units': res.traces[0]['units'],
- 'results': total_results,
- 'stddev': '',
- })
- return res
-
-
-def AccumulateGenericResults(graph_names, suite_units, iter_output):
- """Iterates over the output of multiple benchmark reruns and accumulates
- generic results.
-
- Args:
- graph_names: List of names that configure the base path of the traces. E.g.
- ['v8', 'Octane'].
- suite_units: Measurement default units as defined by the benchmark suite.
- iter_output: Iterator over the standard output of each test run.
- Returns: A 'Results' object.
- """
- traces = OrderedDict()
- for stdout in iter_output():
- if stdout is None:
- # The None value is used as a null object to simplify logic.
- continue
- for line in stdout.strip().splitlines():
- match = GENERIC_RESULTS_RE.match(line)
- if match:
- stddev = ''
- graph = match.group(1)
- trace = match.group(2)
- body = match.group(3)
- units = match.group(4)
- match_stddev = RESULT_STDDEV_RE.match(body)
- match_list = RESULT_LIST_RE.match(body)
- errors = []
- if match_stddev:
- result, stddev = map(str.strip, match_stddev.group(1).split(','))
- results = [result]
- elif match_list:
- results = map(str.strip, match_list.group(1).split(','))
- else:
- results = [body.strip()]
-
- try:
- results = map(lambda r: str(float(r)), results)
- except ValueError:
- results = []
- errors = ['Found non-numeric in %s' %
- '/'.join(graph_names + [graph, trace])]
-
- trace_result = traces.setdefault(trace, Results([{
- 'graphs': graph_names + [graph, trace],
- 'units': (units or suite_units).strip(),
- 'results': [],
- 'stddev': '',
- }], errors))
- trace_result.traces[0]['results'].extend(results)
- trace_result.traces[0]['stddev'] = stddev
-
- return reduce(lambda r, t: r + t, traces.itervalues(), Results())
+ new_output = copy.copy(output)
+ new_output.stdout, _ = p.communicate(input=output.stdout)
+ logging.info('>>> Processed stdout (#%d):\n%s', count, output.stdout)
+ return new_output
class Node(object):
@@ -384,6 +299,10 @@ class Node(object):
def AppendChild(self, child):
self._children.append(child)
+ @property
+ def children(self):
+ return self._children
+
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
@@ -392,7 +311,7 @@ class DefaultSentinel(Node):
self.binary = binary
self.run_count = 10
self.timeout = 60
- self.retry_count = 0
+ self.retry_count = 4
self.path = []
self.graphs = []
self.flags = []
@@ -465,6 +384,10 @@ class GraphConfig(Node):
stddev_default = None
self.stddev_regexp = suite.get('stddev_regexp', stddev_default)
+ @property
+ def name(self):
+ return '/'.join(self.graphs)
+
class TraceConfig(GraphConfig):
"""Represents a leaf in the suite tree structure."""
@@ -473,16 +396,46 @@ class TraceConfig(GraphConfig):
assert self.results_regexp
assert self.owners
- def CreateMeasurement(self, perform_measurement):
- if not perform_measurement:
- return NullMeasurement()
+ def ConsumeOutput(self, output, result_tracker):
+ """Extracts trace results from the output.
+
+ Args:
+ output: Output object from the test run.
+ result_tracker: Result tracker to be updated.
+
+ Returns:
+ The raw extracted result value or None if an error occurred.
+ """
+ result = None
+ stddev = None
+
+ try:
+ result = float(
+ re.search(self.results_regexp, output.stdout, re.M).group(1))
+ except ValueError:
+ result_tracker.AddError(
+ 'Regexp "%s" returned a non-numeric for test %s.' %
+ (self.results_regexp, self.name))
+ except:
+ result_tracker.AddError(
+ 'Regexp "%s" did not match for test %s.' %
+ (self.results_regexp, self.name))
- return Measurement(
- self.graphs,
- self.units,
- self.results_regexp,
- self.stddev_regexp,
- )
+ try:
+ if self.stddev_regexp:
+ if result_tracker.TraceHasStdDev(self):
+ result_tracker.AddError(
+ 'Test %s should only run once since a stddev is provided by the '
+ 'test.' % self.name)
+ stddev = re.search(self.stddev_regexp, output.stdout, re.M).group(1)
+ except:
+ result_tracker.AddError(
+ 'Regexp "%s" did not match for test %s.' %
+ (self.stddev_regexp, self.name))
+
+ if result:
+ result_tracker.AddTraceResult(self, result, stddev)
+ return result
class RunnableConfig(GraphConfig):
@@ -490,22 +443,12 @@ class RunnableConfig(GraphConfig):
"""
def __init__(self, suite, parent, arch):
super(RunnableConfig, self).__init__(suite, parent, arch)
- self.has_timeouts = False
- self.has_near_timeouts = False
+ self.arch = arch
@property
def main(self):
return self._suite.get('main', '')
- def PostProcess(self, stdouts_iter):
- if self.results_processor:
- def it():
- for i, stdout in enumerate(stdouts_iter()):
- yield RunResultsProcessor(self.results_processor, stdout, i + 1)
- return it
- else:
- return stdouts_iter
-
def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph.
@@ -537,25 +480,36 @@ class RunnableConfig(GraphConfig):
args=self.GetCommandFlags(extra_flags=extra_flags),
timeout=self.timeout or 60)
- def Run(self, runner, trybot):
- """Iterates over several runs and handles the output for all traces."""
- stdout, stdout_secondary = Unzip(runner())
- return (
- AccumulateResults(
- self.graphs,
- self._children,
- iter_output=self.PostProcess(stdout),
- perform_measurement=True,
- calc_total=self.total,
- ),
- AccumulateResults(
- self.graphs,
- self._children,
- iter_output=self.PostProcess(stdout_secondary),
- perform_measurement=trybot, # only run second time on trybots
- calc_total=self.total,
- ),
- )
+ def ProcessOutput(self, output, result_tracker, count):
+ """Processes test run output and updates result tracker.
+
+ Args:
+ output: Output object from the test run.
+ result_tracker: ResultTracker object to be updated.
+ count: Index of the test run (used for better logging).
+ """
+ if self.results_processor:
+ output = RunResultsProcessor(self.results_processor, output, count)
+
+ results_for_total = []
+ for trace in self.children:
+ result = trace.ConsumeOutput(output, result_tracker)
+ if result:
+ results_for_total.append(result)
+
+ if self.total:
+ # Produce total metric only when all traces have produced results.
+ if len(self.children) != len(results_for_total):
+ result_tracker.AddError(
+ 'Not all traces have produced results. Can not compute total for '
+ '%s.' % self.name)
+ return
+
+ # Calculate total as a the geometric mean for results from all traces.
+ total_trace = TraceConfig(
+ {'name': 'Total', 'units': self.children[0].units}, self, self.arch)
+ result_tracker.AddTraceResult(
+ total_trace, GeometricMean(results_for_total), '')
class RunnableTraceConfig(TraceConfig, RunnableConfig):
@@ -563,30 +517,9 @@ class RunnableTraceConfig(TraceConfig, RunnableConfig):
def __init__(self, suite, parent, arch):
super(RunnableTraceConfig, self).__init__(suite, parent, arch)
- def Run(self, runner, trybot):
- """Iterates over several runs and handles the output."""
- measurement = self.CreateMeasurement(perform_measurement=True)
- measurement_secondary = self.CreateMeasurement(perform_measurement=trybot)
- for stdout, stdout_secondary in runner():
- measurement.ConsumeOutput(stdout)
- measurement_secondary.ConsumeOutput(stdout_secondary)
- return (
- measurement.GetResults(),
- measurement_secondary.GetResults(),
- )
-
-
-class RunnableGenericConfig(RunnableConfig):
- """Represents a runnable suite definition with generic traces."""
- def __init__(self, suite, parent, arch):
- super(RunnableGenericConfig, self).__init__(suite, parent, arch)
-
- def Run(self, runner, trybot):
- stdout, stdout_secondary = Unzip(runner())
- return (
- AccumulateGenericResults(self.graphs, self.units, stdout),
- AccumulateGenericResults(self.graphs, self.units, stdout_secondary),
- )
+ def ProcessOutput(self, output, result_tracker, count):
+ result_tracker.AddRunnableDuration(self, output.duration)
+ self.ConsumeOutput(output, result_tracker)
def MakeGraphConfig(suite, arch, parent):
@@ -602,10 +535,6 @@ def MakeGraphConfig(suite, arch, parent):
else:
# This graph has no subgraphs, it's a leaf.
return RunnableTraceConfig(suite, parent, arch)
- elif suite.get('generic'):
- # This is a generic suite definition. It is either a runnable executable
- # or has a main js file.
- return RunnableGenericConfig(suite, parent, arch)
elif suite.get('tests'):
# This is neither a leaf nor a runnable.
return GraphConfig(suite, parent, arch)
@@ -645,74 +574,85 @@ def FlattenRunnables(node, node_cb):
class Platform(object):
- def __init__(self, options):
- self.shell_dir = options.shell_dir
- self.shell_dir_secondary = options.shell_dir_secondary
- self.extra_flags = options.extra_flags.split()
- self.options = options
+ def __init__(self, args):
+ self.shell_dir = args.shell_dir
+ self.shell_dir_secondary = args.shell_dir_secondary
+ self.extra_flags = args.extra_flags.split()
+ self.args = args
@staticmethod
- def ReadBuildConfig(options):
- config_path = os.path.join(options.shell_dir, 'v8_build_config.json')
+ def ReadBuildConfig(args):
+ config_path = os.path.join(args.shell_dir, 'v8_build_config.json')
if not os.path.isfile(config_path):
return {}
with open(config_path) as f:
return json.load(f)
@staticmethod
- def GetPlatform(options):
- if Platform.ReadBuildConfig(options).get('is_android', False):
- return AndroidPlatform(options)
+ def GetPlatform(args):
+ if Platform.ReadBuildConfig(args).get('is_android', False):
+ return AndroidPlatform(args)
else:
- return DesktopPlatform(options)
+ return DesktopPlatform(args)
def _Run(self, runnable, count, secondary=False):
raise NotImplementedError() # pragma: no cover
- def _TimedRun(self, runnable, count, secondary=False):
- runnable_start_time = datetime.datetime.utcnow()
- stdout = self._Run(runnable, count, secondary)
- runnable_duration = datetime.datetime.utcnow() - runnable_start_time
- if runnable_duration.total_seconds() > 0.9 * runnable.timeout:
- runnable.has_near_timeouts = True
- return stdout
+ def _LoggedRun(self, runnable, count, secondary=False):
+ suffix = ' - secondary' if secondary else ''
+ title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
+ try:
+ output = self._Run(runnable, count, secondary)
+ except OSError:
+ logging.exception(title % 'OSError')
+ raise
+ if output.stdout:
+ logging.info(title % 'Stdout' + '\n%s', output.stdout)
+ if output.stderr: # pragma: no cover
+ # Print stderr for debugging.
+ logging.info(title % 'Stderr' + '\n%s', output.stderr)
+ logging.warning('>>> Test timed out after %ss.', runnable.timeout)
+ if output.exit_code != 0:
+ logging.warning('>>> Test crashed with exit code %d.', output.exit_code)
+ return output
- def Run(self, runnable, count):
+ def Run(self, runnable, count, secondary):
"""Execute the benchmark's main file.
- If options.shell_dir_secondary is specified, the benchmark is run twice,
- e.g. with and without patch.
Args:
runnable: A Runnable benchmark instance.
count: The number of this (repeated) run.
- Returns: A tuple with the two benchmark outputs. The latter will be None if
- options.shell_dir_secondary was not specified.
+ secondary: True if secondary run should be executed.
+
+ Returns:
+ A tuple with the two benchmark outputs. The latter will be NULL_OUTPUT if
+ secondary is False.
"""
- stdout = self._TimedRun(runnable, count, secondary=False)
- if self.shell_dir_secondary:
- return stdout, self._TimedRun(runnable, count, secondary=True)
+ output = self._LoggedRun(runnable, count, secondary=False)
+ if secondary:
+ return output, self._LoggedRun(runnable, count, secondary=True)
else:
- return stdout, None
+ return output, NULL_OUTPUT
class DesktopPlatform(Platform):
- def __init__(self, options):
- super(DesktopPlatform, self).__init__(options)
+ def __init__(self, args):
+ super(DesktopPlatform, self).__init__(args)
self.command_prefix = []
# Setup command class to OS specific version.
- command.setup(utils.GuessOS(), options.device)
+ command.setup(utils.GuessOS(), args.device)
- if options.prioritize or options.affinitize != None:
+ if args.prioritize or args.affinitize != None:
self.command_prefix = ['schedtool']
- if options.prioritize:
+ if args.prioritize:
self.command_prefix += ['-n', '-20']
- if options.affinitize != None:
+ if args.affinitize != None:
# schedtool expects a bit pattern when setting affinity, where each
# bit set to '1' corresponds to a core where the process may run on.
# First bit corresponds to CPU 0. Since the 'affinitize' parameter is
# a core number, we need to map to said bit pattern.
- cpu = int(options.affinitize)
+ cpu = int(args.affinitize)
core = 1 << cpu
self.command_prefix += ['-a', ('0x%x' % core)]
self.command_prefix += ['-e']
@@ -728,28 +668,11 @@ class DesktopPlatform(Platform):
node.ChangeCWD(path)
def _Run(self, runnable, count, secondary=False):
- suffix = ' - secondary' if secondary else ''
shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
- title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
- try:
- output = cmd.execute()
- except OSError: # pragma: no cover
- logging.exception(title % 'OSError')
- raise
+ output = cmd.execute()
- logging.info(title % 'Stdout' + '\n%s', output.stdout)
- if output.stderr: # pragma: no cover
- # Print stderr for debugging.
- logging.info(title % 'Stderr' + '\n%s', output.stderr)
- if output.timed_out:
- logging.warning('>>> Test timed out after %ss.', runnable.timeout)
- runnable.has_timeouts = True
- raise TestFailedError()
- if output.exit_code != 0:
- logging.warning('>>> Test crashed.')
- raise TestFailedError()
- if '--prof' in self.extra_flags:
+ if output.IsSuccess() and '--prof' in self.extra_flags:
os_prefix = {'linux': 'linux', 'macos': 'mac'}.get(utils.GuessOS())
if os_prefix:
tick_tools = os.path.join(TOOLS_BASE, '%s-tick-processor' % os_prefix)
@@ -758,17 +681,17 @@ class DesktopPlatform(Platform):
logging.warning(
'Profiler option currently supported on Linux and Mac OS.')
- # time outputs to stderr
+ # /usr/bin/time outputs to stderr
if runnable.process_size:
- return output.stdout + output.stderr
- return output.stdout
+ output.stdout += output.stderr
+ return output
class AndroidPlatform(Platform): # pragma: no cover
- def __init__(self, options):
- super(AndroidPlatform, self).__init__(options)
- self.driver = android.android_driver(options.device)
+ def __init__(self, args):
+ super(AndroidPlatform, self).__init__(args)
+ self.driver = android.android_driver(args.device)
def PreExecution(self):
self.driver.set_high_perf_mode()
@@ -799,9 +722,7 @@ class AndroidPlatform(Platform): # pragma: no cover
self.driver.push_file(bench_abs, resource, bench_rel)
def _Run(self, runnable, count, secondary=False):
- suffix = ' - secondary' if secondary else ''
target_dir = 'bin_secondary' if secondary else 'bin'
- title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
self.driver.drop_ram_caches()
# Relative path to benchmark directory.
@@ -811,15 +732,17 @@ class AndroidPlatform(Platform): # pragma: no cover
bench_rel = '.'
logcat_file = None
- if self.options.dump_logcats_to:
+ if self.args.dump_logcats_to:
runnable_name = '-'.join(runnable.graphs)
logcat_file = os.path.join(
- self.options.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
+ self.args.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
runnable_name, count + 1, '-secondary' if secondary else ''))
logging.debug('Dumping logcat into %s', logcat_file)
+ output = Output()
+ start = time.time()
try:
- stdout = self.driver.run(
+ output.stdout = self.driver.run(
target_dir=target_dir,
binary=runnable.binary,
args=runnable.GetCommandFlags(self.extra_flags),
@@ -827,20 +750,17 @@ class AndroidPlatform(Platform): # pragma: no cover
timeout=runnable.timeout,
logcat_file=logcat_file,
)
- logging.info(title % 'Stdout' + '\n%s', stdout)
except android.CommandFailedException as e:
- logging.info(title % 'Stdout' + '\n%s', e.output)
- logging.warning('>>> Test crashed.')
- raise TestFailedError()
+ output.stdout = e.output
+ output.exit_code = e.status
except android.TimeoutException as e:
- if e.output is not None:
- logging.info(title % 'Stdout' + '\n%s', e.output)
- logging.warning('>>> Test timed out after %ss.', runnable.timeout)
- runnable.has_timeouts = True
- raise TestFailedError()
+ output.stdout = e.output
+ output.timed_out = True
if runnable.process_size:
- return stdout + 'MaxMemory: Unsupported'
- return stdout
+ output.stdout += 'MaxMemory: Unsupported'
+ output.duration = time.time() - start
+ return output
+
class CustomMachineConfiguration:
def __init__(self, disable_aslr = False, governor = None):
@@ -946,146 +866,164 @@ class CustomMachineConfiguration:
raise Exception('Could not set CPU governor. Present value is %s'
% cur_value )
-def Main(args):
- parser = optparse.OptionParser()
- parser.add_option('--android-build-tools', help='Deprecated.')
- parser.add_option('--arch',
- help=('The architecture to run tests for, '
- '"auto" or "native" for auto-detect'),
- default='x64')
- parser.add_option('--buildbot',
- help='Adapt to path structure used on buildbots and adds '
- 'timestamps/level to all logged status messages',
- default=False, action='store_true')
- parser.add_option('-d', '--device',
- help='The device ID to run Android tests on. If not given '
- 'it will be autodetected.')
- parser.add_option('--extra-flags',
- help='Additional flags to pass to the test executable',
- default='')
- parser.add_option('--json-test-results',
- help='Path to a file for storing json results.')
- parser.add_option('--json-test-results-secondary',
- '--json-test-results-no-patch', # TODO(sergiyb): Deprecate.
- help='Path to a file for storing json results from run '
- 'without patch or for reference build run.')
- parser.add_option('--outdir', help='Base directory with compile output',
- default='out')
- parser.add_option('--outdir-secondary',
- '--outdir-no-patch', # TODO(sergiyb): Deprecate.
- help='Base directory with compile output without patch or '
- 'for reference build')
- parser.add_option('--binary-override-path',
- help='JavaScript engine binary. By default, d8 under '
- 'architecture-specific build dir. '
- 'Not supported in conjunction with outdir-secondary.')
- parser.add_option('--prioritize',
- help='Raise the priority to nice -20 for the benchmarking '
- 'process.Requires Linux, schedtool, and sudo privileges.',
- default=False, action='store_true')
- parser.add_option('--affinitize',
- help='Run benchmarking process on the specified core. '
- 'For example: '
- '--affinitize=0 will run the benchmark process on core 0. '
- '--affinitize=3 will run the benchmark process on core 3. '
- 'Requires Linux, schedtool, and sudo privileges.',
- default=None)
- parser.add_option('--noaslr',
- help='Disable ASLR for the duration of the benchmarked '
- 'process. Requires Linux and sudo privileges.',
- default=False, action='store_true')
- parser.add_option('--cpu-governor',
- help='Set cpu governor to specified policy for the '
- 'duration of the benchmarked process. Typical options: '
- '"powersave" for more stable results, or "performance" '
- 'for shorter completion time of suite, with potentially '
- 'more noise in results.')
- parser.add_option('--filter',
- help='Only run the benchmarks beginning with this string. '
- 'For example: '
- '--filter=JSTests/TypedArrays/ will run only TypedArray '
- 'benchmarks from the JSTests suite.',
- default='')
- parser.add_option('--run-count-multiplier', default=1, type='int',
- help='Multipled used to increase number of times each test '
- 'is retried.')
- parser.add_option('--dump-logcats-to',
- help='Writes logcat output from each test into specified '
- 'directory. Only supported for android targets.')
-
- (options, args) = parser.parse_args(args)
- logging.basicConfig(
- level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s')
+class MaxTotalDurationReachedError(Exception):
+ """Exception used to stop running tests when max total duration is reached."""
+ pass
- if len(args) == 0: # pragma: no cover
- parser.print_help()
- return INFRA_FAILURE_RETCODE
- if options.arch in ['auto', 'native']: # pragma: no cover
- options.arch = ARCH_GUESS
+def Main(argv):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--arch',
+ help='The architecture to run tests for. Pass "auto" '
+ 'to auto-detect.', default='x64',
+ choices=SUPPORTED_ARCHS + ['auto'])
+ parser.add_argument('--buildbot',
+ help='Adapt to path structure used on buildbots and adds '
+ 'timestamps/level to all logged status messages',
+ default=False, action='store_true')
+ parser.add_argument('-d', '--device',
+ help='The device ID to run Android tests on. If not '
+ 'given it will be autodetected.')
+ parser.add_argument('--extra-flags',
+ help='Additional flags to pass to the test executable',
+ default='')
+ parser.add_argument('--json-test-results',
+ help='Path to a file for storing json results.')
+ parser.add_argument('--json-test-results-secondary',
+ help='Path to a file for storing json results from run '
+ 'without patch or for reference build run.')
+ parser.add_argument('--outdir', help='Base directory with compile output',
+ default='out')
+ parser.add_argument('--outdir-secondary',
+ help='Base directory with compile output without patch '
+ 'or for reference build')
+ parser.add_argument('--binary-override-path',
+ help='JavaScript engine binary. By default, d8 under '
+ 'architecture-specific build dir. '
+ 'Not supported in conjunction with outdir-secondary.')
+ parser.add_argument('--prioritize',
+ help='Raise the priority to nice -20 for the '
+ 'benchmarking process.Requires Linux, schedtool, and '
+ 'sudo privileges.', default=False, action='store_true')
+ parser.add_argument('--affinitize',
+ help='Run benchmarking process on the specified core. '
+ 'For example: --affinitize=0 will run the benchmark '
+ 'process on core 0. --affinitize=3 will run the '
+ 'benchmark process on core 3. Requires Linux, schedtool, '
+ 'and sudo privileges.', default=None)
+ parser.add_argument('--noaslr',
+ help='Disable ASLR for the duration of the benchmarked '
+ 'process. Requires Linux and sudo privileges.',
+ default=False, action='store_true')
+ parser.add_argument('--cpu-governor',
+ help='Set cpu governor to specified policy for the '
+ 'duration of the benchmarked process. Typical options: '
+ '"powersave" for more stable results, or "performance" '
+ 'for shorter completion time of suite, with potentially '
+ 'more noise in results.')
+ parser.add_argument('--filter',
+ help='Only run the benchmarks beginning with this '
+ 'string. For example: '
+ '--filter=JSTests/TypedArrays/ will run only TypedArray '
+ 'benchmarks from the JSTests suite.',
+ default='')
+ parser.add_argument('--confidence-level', type=int,
+ help='Repeatedly runs each benchmark until specified '
+ 'confidence level is reached. The value is interpreted '
+ 'as the number of standard deviations from the mean that '
+ 'all values must lie within. Typical values are 1, 2 and '
+ '3 and correspond to 68%, 95% and 99.7% probability that '
+ 'the measured value is within 0.1% of the true value. '
+ 'Larger values result in more retries and thus longer '
+ 'runtime, but also provide more reliable results. Also '
+ 'see --max-total-duration flag.')
+ parser.add_argument('--max-total-duration', type=int, default=7140, # 1h 59m
+ help='Max total duration in seconds allowed for retries '
+ 'across all tests. This is especially useful in '
+ 'combination with the --confidence-level flag.')
+ parser.add_argument('--dump-logcats-to',
+ help='Writes logcat output from each test into specified '
+ 'directory. Only supported for android targets.')
+ parser.add_argument('--run-count', type=int, default=0,
+ help='Override the run count specified by the test '
+ 'suite. The default 0 uses the suite\'s config.')
+ parser.add_argument('-v', '--verbose', default=False, action='store_true',
+ help='Be verbose and print debug output.')
+ parser.add_argument('suite', nargs='+', help='Path to the suite config file.')
- if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
- logging.error('Unknown architecture %s', options.arch)
+ try:
+ args = parser.parse_args(argv)
+ except SystemExit:
return INFRA_FAILURE_RETCODE
- if (options.json_test_results_secondary and
- not options.outdir_secondary): # pragma: no cover
+ logging.basicConfig(
+ level=logging.DEBUG if args.verbose else logging.INFO,
+ format='%(asctime)s %(levelname)-8s %(message)s')
+
+ if args.arch == 'auto': # pragma: no cover
+ args.arch = utils.DefaultArch()
+ if args.arch not in SUPPORTED_ARCHS:
+ logging.error(
+ 'Auto-detected architecture "%s" is not supported.', args.arch)
+ return INFRA_FAILURE_RETCODE
+
+ if (args.json_test_results_secondary and
+ not args.outdir_secondary): # pragma: no cover
logging.error('For writing secondary json test results, a secondary outdir '
'patch must be specified.')
return INFRA_FAILURE_RETCODE
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
- if options.buildbot:
+ if args.buildbot:
build_config = 'Release'
else:
- build_config = '%s.release' % options.arch
+ build_config = '%s.release' % args.arch
- if options.binary_override_path == None:
- options.shell_dir = os.path.join(workspace, options.outdir, build_config)
+ if args.binary_override_path == None:
+ args.shell_dir = os.path.join(workspace, args.outdir, build_config)
default_binary_name = 'd8'
else:
- if not os.path.isfile(options.binary_override_path):
+ if not os.path.isfile(args.binary_override_path):
logging.error('binary-override-path must be a file name')
return INFRA_FAILURE_RETCODE
- if options.outdir_secondary:
+ if args.outdir_secondary:
logging.error('specify either binary-override-path or outdir-secondary')
return INFRA_FAILURE_RETCODE
- options.shell_dir = os.path.abspath(
- os.path.dirname(options.binary_override_path))
- default_binary_name = os.path.basename(options.binary_override_path)
+ args.shell_dir = os.path.abspath(
+ os.path.dirname(args.binary_override_path))
+ default_binary_name = os.path.basename(args.binary_override_path)
- if options.outdir_secondary:
- options.shell_dir_secondary = os.path.join(
- workspace, options.outdir_secondary, build_config)
+ if args.outdir_secondary:
+ args.shell_dir_secondary = os.path.join(
+ workspace, args.outdir_secondary, build_config)
else:
- options.shell_dir_secondary = None
+ args.shell_dir_secondary = None
- if options.json_test_results:
- options.json_test_results = os.path.abspath(options.json_test_results)
+ if args.json_test_results:
+ args.json_test_results = os.path.abspath(args.json_test_results)
- if options.json_test_results_secondary:
- options.json_test_results_secondary = os.path.abspath(
- options.json_test_results_secondary)
+ if args.json_test_results_secondary:
+ args.json_test_results_secondary = os.path.abspath(
+ args.json_test_results_secondary)
# Ensure all arguments have absolute path before we start changing current
# directory.
- args = map(os.path.abspath, args)
+ args.suite = map(os.path.abspath, args.suite)
prev_aslr = None
prev_cpu_gov = None
- platform = Platform.GetPlatform(options)
-
- results = Results()
- results_secondary = Results()
- # We use list here to allow modification in nested function below.
- have_failed_tests = [False]
- with CustomMachineConfiguration(governor = options.cpu_governor,
- disable_aslr = options.noaslr) as conf:
- for path in args:
+ platform = Platform.GetPlatform(args)
+
+ result_tracker = ResultTracker()
+ result_tracker_secondary = ResultTracker()
+ have_failed_tests = False
+ with CustomMachineConfiguration(governor = args.cpu_governor,
+ disable_aslr = args.noaslr) as conf:
+ for path in args.suite:
if not os.path.exists(path): # pragma: no cover
- results.errors.append('Configuration file %s does not exist.' % path)
+ result_tracker.AddError('Configuration file %s does not exist.' % path)
continue
with open(path) as f:
@@ -1099,59 +1037,78 @@ def Main(args):
# Build the graph/trace tree structure.
default_parent = DefaultSentinel(default_binary_name)
- root = BuildGraphConfigs(suite, options.arch, default_parent)
+ root = BuildGraphConfigs(suite, args.arch, default_parent)
# Callback to be called on each node on traversal.
def NodeCB(node):
platform.PreTests(node, path)
# Traverse graph/trace tree and iterate over all runnables.
- for runnable in FlattenRunnables(root, NodeCB):
- runnable_name = '/'.join(runnable.graphs)
- if (not runnable_name.startswith(options.filter) and
- runnable_name + '/' != options.filter):
- continue
- logging.info('>>> Running suite: %s', runnable_name)
-
- def Runner():
- """Output generator that reruns several times."""
- total_runs = runnable.run_count * options.run_count_multiplier
- for i in range(0, max(1, total_runs)):
+ start = time.time()
+ try:
+ for runnable in FlattenRunnables(root, NodeCB):
+ runnable_name = '/'.join(runnable.graphs)
+ if (not runnable_name.startswith(args.filter) and
+ runnable_name + '/' != args.filter):
+ continue
+ logging.info('>>> Running suite: %s', runnable_name)
+
+ def RunGenerator(runnable):
+ if args.confidence_level:
+ counter = 0
+ while not result_tracker.HasEnoughRuns(
+ runnable, args.confidence_level):
+ yield counter
+ counter += 1
+ else:
+ for i in range(0, max(1, args.run_count or runnable.run_count)):
+ yield i
+
+ for i in RunGenerator(runnable):
attempts_left = runnable.retry_count + 1
while attempts_left:
- try:
- yield platform.Run(runnable, i)
- except TestFailedError:
- attempts_left -= 1
- if not attempts_left: # ignore failures until last attempt
- have_failed_tests[0] = True
- else:
- logging.info('>>> Retrying suite: %s', runnable_name)
- else:
+ total_duration = time.time() - start
+ if total_duration > args.max_total_duration:
+ logging.info(
+ '>>> Stopping now since running for too long (%ds > %ds)',
+ total_duration, args.max_total_duration)
+ raise MaxTotalDurationReachedError()
+
+ output, output_secondary = platform.Run(
+ runnable, i, secondary=args.shell_dir_secondary)
+ result_tracker.AddRunnableDuration(runnable, output.duration)
+ result_tracker_secondary.AddRunnableDuration(
+ runnable, output_secondary.duration)
+
+ if output.IsSuccess() and output_secondary.IsSuccess():
+ runnable.ProcessOutput(output, result_tracker, i)
+ if output_secondary is not NULL_OUTPUT:
+ runnable.ProcessOutput(
+ output_secondary, result_tracker_secondary, i)
break
- # Let runnable iterate over all runs and handle output.
- result, result_secondary = runnable.Run(
- Runner, trybot=options.shell_dir_secondary)
- results += result
- results_secondary += result_secondary
- if runnable.has_timeouts:
- results.timeouts.append(runnable_name)
- if runnable.has_near_timeouts:
- results.near_timeouts.append(runnable_name)
+ attempts_left -= 1
+ have_failed_tests = True
+ if attempts_left:
+ logging.info('>>> Retrying suite: %s', runnable_name)
+ except MaxTotalDurationReachedError:
+ have_failed_tests = True
+
platform.PostExecution()
- if options.json_test_results:
- results.WriteToFile(options.json_test_results)
+ if args.json_test_results:
+ result_tracker.WriteToFile(args.json_test_results)
else: # pragma: no cover
- print(results)
+ print('Primary results:', result_tracker)
- if options.json_test_results_secondary:
- results_secondary.WriteToFile(options.json_test_results_secondary)
- else: # pragma: no cover
- print(results_secondary)
+ if args.shell_dir_secondary:
+ if args.json_test_results_secondary:
+ result_tracker_secondary.WriteToFile(args.json_test_results_secondary)
+ else: # pragma: no cover
+ print('Secondary results:', result_tracker_secondary)
- if results.errors or have_failed_tests[0]:
+ if (result_tracker.errors or result_tracker_secondary.errors or
+ have_failed_tests):
return 1
return 0
diff --git a/deps/v8/tools/shell-utils.h b/deps/v8/tools/shell-utils.h
index bfd729d9b5..b41d3277aa 100644
--- a/deps/v8/tools/shell-utils.h
+++ b/deps/v8/tools/shell-utils.h
@@ -27,7 +27,7 @@
// Utility functions used by parser-shell.
-#include "src/globals.h"
+#include "src/common/globals.h"
#include <stdio.h>
diff --git a/deps/v8/tools/testrunner/OWNERS b/deps/v8/tools/testrunner/OWNERS
index c8693c972c..50b5741785 100644
--- a/deps/v8/tools/testrunner/OWNERS
+++ b/deps/v8/tools/testrunner/OWNERS
@@ -1,5 +1,3 @@
set noparent
-machenbach@chromium.org
-sergiyb@chromium.org
-tmrts@chromium.org \ No newline at end of file
+file://INFRA_OWNERS
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index 5e6a3c11a5..caed59356e 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -185,11 +185,13 @@ class BuildConfig(object):
self.is_android = build_config['is_android']
self.is_clang = build_config['is_clang']
self.is_debug = build_config['is_debug']
+ self.is_full_debug = build_config['is_full_debug']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
self.no_snap = not build_config['v8_use_snapshot']
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
+ # TODO(machenbach): We only have ubsan not ubsan_vptr.
self.ubsan_vptr = build_config['is_ubsan_vptr']
self.embedded_builtins = build_config['v8_enable_embedded_builtins']
self.verify_csa = build_config['v8_enable_verify_csa']
@@ -200,6 +202,11 @@ class BuildConfig(object):
self.mips_arch_variant = build_config['mips_arch_variant']
self.mips_use_msa = build_config['mips_use_msa']
+ @property
+ def use_sanitizer(self):
+ return (self.asan or self.cfi_vptr or self.msan or self.tsan or
+ self.ubsan_vptr)
+
def __str__(self):
detected_options = []
@@ -341,9 +348,6 @@ class BaseTestRunner(object):
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -673,6 +677,7 @@ class BaseTestRunner(object):
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"is_clang": self.build_config.is_clang,
+ "is_full_debug": self.build_config.is_full_debug,
"mips_arch_variant": mips_arch_variant,
"mode": self.mode_options.status_mode
if not self.build_config.dcheck_always_on
@@ -712,15 +717,18 @@ class BaseTestRunner(object):
)
def _timeout_scalefactor(self, options):
+ """Increases timeout for slow build configurations."""
factor = self.mode_options.timeout_scalefactor
-
- # Simulators are slow, therefore allow a longer timeout.
if self.build_config.arch in SLOW_ARCHS:
+ factor *= 4
+ if self.build_config.lite_mode:
factor *= 2
-
- # Predictable mode is slower.
if self.build_config.predictable:
- factor *= 2
+ factor *= 4
+ if self.build_config.use_sanitizer:
+ factor *= 1.5
+ if self.build_config.is_full_debug:
+ factor *= 4
return factor
@@ -779,9 +787,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py
index 5eb0d8b20a..b68252c139 100644
--- a/deps/v8/tools/testrunner/local/command.py
+++ b/deps/v8/tools/testrunner/local/command.py
@@ -129,8 +129,12 @@ class BaseCommand(object):
def _abort(self, process, abort_called):
abort_called[0] = True
try:
+ print('Attempting to kill process %s' % process.pid)
+ sys.stdout.flush()
self._kill_process(process)
- except OSError:
+ except OSError as e:
+ print(e)
+ sys.stdout.flush()
pass
def __str__(self):
@@ -207,9 +211,6 @@ class WindowsCommand(BaseCommand):
return subprocess.list2cmdline(self._to_args_list())
def _kill_process(self, process):
- if self.verbose:
- print('Attempting to kill process %d' % process.pid)
- sys.stdout.flush()
tk = subprocess.Popen(
'taskkill /T /F /PID %d' % process.pid,
stdout=subprocess.PIPE,
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index ed9b1b87f5..dc92db6099 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -23,7 +23,9 @@ ALL_VARIANT_FLAGS = {
"slow_path": [["--force-slow-path"]],
"stress": [["--stress-opt", "--always-opt", "--no-liftoff",
"--no-wasm-tier-up"]],
- "stress_background_compile": [["--stress-background-compile"]],
+ "stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
+ "--wasm-code-gc",
+ "--stress-wasm-code-gc"]],
"stress_incremental_marking": [["--stress-incremental-marking"]],
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
diff --git a/deps/v8/tools/testrunner/objects/output.py b/deps/v8/tools/testrunner/objects/output.py
index 74cec56a85..78aa63d4c9 100644
--- a/deps/v8/tools/testrunner/objects/output.py
+++ b/deps/v8/tools/testrunner/objects/output.py
@@ -34,7 +34,8 @@ from ..local import utils
class Output(object):
- def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
+ def __init__(self, exit_code=0, timed_out=False, stdout=None, stderr=None,
+ pid=None, duration=None):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
@@ -61,3 +62,16 @@ class Output(object):
def HasTimedOut(self):
return self.timed_out
+
+ def IsSuccess(self):
+ return not self.HasCrashed() and not self.HasTimedOut()
+
+
+class _NullOutput(Output):
+ """Useful to signal that the binary has not been run."""
+ def __init__(self):
+ super(_NullOutput, self).__init__()
+
+
+# Default instance of the _NullOutput class above.
+NULL_OUTPUT = _NullOutput()
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index 80c7c29ed1..6d4dcd1352 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -244,13 +244,16 @@ class TestCase(object):
timeout = self._test_config.timeout
if "--stress-opt" in params:
timeout *= 4
+ if "--jitless" in params:
+ timeout *= 2
+ if "--no-opt" in params:
+ timeout *= 2
if "--noenable-vfp3" in params:
timeout *= 2
if self._get_timeout_param() == TIMEOUT_LONG:
timeout *= 10
-
- # TODO(majeski): make it slow outcome dependent.
- timeout *= 2
+ if self.is_slow:
+ timeout *= 4
return timeout
def get_shell(self):
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 2a08d2d97e..bc79c015bd 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -34,7 +34,7 @@ VARIANTS = ['default']
MORE_VARIANTS = [
'jitless',
'stress',
- 'stress_background_compile',
+ 'stress_js_bg_compile_wasm_code_gc',
'stress_incremental_marking',
]
@@ -53,7 +53,8 @@ GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
'--concurrent-recompilation-queue-length=64',
'--concurrent-recompilation-delay=500',
'--concurrent-recompilation',
- '--stress-flush-bytecode']
+ '--stress-flush-bytecode',
+ '--wasm-code-gc', '--stress-wasm-code-gc']
RANDOM_GC_STRESS_FLAGS = ['--random-gc-interval=5000',
'--stress-compaction-random']
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index 12d9503088..aad6740c1c 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -7,11 +7,17 @@ from __future__ import print_function
import json
import os
+import platform
+import subprocess
import sys
import time
from . import base
-from ..local import junit_output
+
+
+# Base dir of the build products for Release and Debug.
+OUT_DIR = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out'))
def print_failure_header(test):
@@ -121,11 +127,27 @@ class VerboseProgressIndicator(SimpleProgressIndicator):
self._print('Done running %s %s: %s' % (
test, test.variant or 'default', outcome))
+ # TODO(machenbach): Remove this platform specific hack and implement a proper
+ # feedback channel from the workers, providing which tests are currently run.
+ def _print_processes_linux(self):
+ if platform.system() == 'Linux':
+ try:
+ cmd = 'ps -aux | grep "%s"' % OUT_DIR
+ output = subprocess.check_output(cmd, shell=True)
+ self._print('List of processes:')
+ for line in (output or '').splitlines():
+ # Show command with pid, but other process info cut off.
+ self._print('pid: %s cmd: %s' %
+ (line.split()[1], line[line.index(OUT_DIR):]))
+ except:
+ pass
+
def _on_heartbeat(self):
if time.time() - self._last_printed_time > 30:
# Print something every 30 seconds to not get killed by an output
# timeout.
self._print('Still working...')
+ self._print_processes_linux()
class DotsProgressIndicator(SimpleProgressIndicator):
@@ -259,45 +281,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name, json_test_results, arch, mode):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/tick-processor.html b/deps/v8/tools/tick-processor.html
index bfebfc9e6a..32f8d6608e 100644
--- a/deps/v8/tools/tick-processor.html
+++ b/deps/v8/tools/tick-processor.html
@@ -89,6 +89,7 @@ function start_process() {
ignoreUnknown: false,
separateIc: true,
targetRootFS: '',
+ apkEmbeddedLibrary: '',
nm: 'nm'
};
@@ -100,7 +101,7 @@ function start_process() {
var tickProcessor = new TickProcessor(
new (entriesProviders[DEFAULTS.platform])(
- DEFAULTS.nm, DEFAULTS.targetRootFS),
+ DEFAULTS.nm, DEFAULTS.targetRootFS, DEFAULTS.apkEmbeddedLibrary),
DEFAULTS.separateIc, DEFAULTS.callGraphSize,
DEFAULTS.ignoreUnknown, DEFAULTS.stateFilter);
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
index 58844c127e..93331cfa2d 100644
--- a/deps/v8/tools/tickprocessor-driver.js
+++ b/deps/v8/tools/tickprocessor-driver.js
@@ -62,7 +62,8 @@ if (params.sourceMap) {
sourceMap = SourceMap.load(params.sourceMap);
}
var tickProcessor = new TickProcessor(
- new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
+ new (entriesProviders[params.platform])(params.nm, params.targetRootFS,
+ params.apkEmbeddedLibrary),
params.separateIc,
params.separateBytecodes,
params.separateBuiltins,
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index 31acd3d7be..ddb6d029f6 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -685,11 +685,12 @@ CppEntriesProvider.prototype.parseNextLine = function() {
};
-function UnixCppEntriesProvider(nmExec, targetRootFS) {
+function UnixCppEntriesProvider(nmExec, targetRootFS, apkEmbeddedLibrary) {
this.symbols = [];
this.parsePos = 0;
this.nmExec = nmExec;
this.targetRootFS = targetRootFS;
+ this.apkEmbeddedLibrary = apkEmbeddedLibrary;
this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
};
inherits(UnixCppEntriesProvider, CppEntriesProvider);
@@ -697,6 +698,9 @@ inherits(UnixCppEntriesProvider, CppEntriesProvider);
UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
this.parsePos = 0;
+ if (this.apkEmbeddedLibrary && libName.endsWith('.apk')) {
+ libName = this.apkEmbeddedLibrary;
+ }
libName = this.targetRootFS + libName;
try {
this.symbols = [
@@ -735,8 +739,8 @@ UnixCppEntriesProvider.prototype.parseNextLine = function() {
};
-function MacCppEntriesProvider(nmExec, targetRootFS) {
- UnixCppEntriesProvider.call(this, nmExec, targetRootFS);
+function MacCppEntriesProvider(nmExec, targetRootFS, apkEmbeddedLibrary) {
+ UnixCppEntriesProvider.call(this, nmExec, targetRootFS, apkEmbeddedLibrary);
// Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
this.FUNC_RE = /^([0-9a-fA-F]{8,16})() (.*)$/;
};
@@ -758,7 +762,8 @@ MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
};
-function WindowsCppEntriesProvider(_ignored_nmExec, targetRootFS) {
+function WindowsCppEntriesProvider(_ignored_nmExec, targetRootFS,
+ _ignored_apkEmbeddedLibrary) {
this.targetRootFS = targetRootFS;
this.symbols = '';
this.parsePos = 0;
@@ -882,6 +887,8 @@ class ArgumentsProcessor extends BaseArgumentsProcessor {
'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
'--target': ['targetRootFS', '',
'Specify the target root directory for cross environment'],
+ '--apk-embedded-library': ['apkEmbeddedLibrary', '',
+ 'Specify the path of the embedded library for Android traces'],
'--range': ['range', 'auto,auto',
'Specify the range limit as [start],[end]'],
'--distortion': ['distortion', 0,
diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py
index 761f727e6f..51b588f90b 100755
--- a/deps/v8/tools/torque/format-torque.py
+++ b/deps/v8/tools/torque/format-torque.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# -*- coding: utf-8 -*-
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
@@ -14,6 +15,8 @@ import sys
import re
from subprocess import Popen, PIPE
+kPercentEscape = r'α'; # Unicode alpha
+
def preprocess(input):
input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input)
input = re.sub(r'(\s+)operator\s*(\'[^\']+\')', r'\1/*_OPE \2*/', input)
@@ -46,14 +49,17 @@ def preprocess(input):
r'\n otherwise', input)
input = re.sub(r'(\n\s*\S[^\n]*\s)otherwise',
r'\1_OtheSaLi', input)
+ input = re.sub(r'@if\(', r'@iF(', input)
+ input = re.sub(r'@export', r'@eXpOrT', input)
+
+ # Special handing of '%' for intrinsics, turn the percent
+ # into a unicode character so that it gets treated as part of the
+ # intrinsic's name if it's already adjacent to it.
+ input = re.sub(r'%([A-Za-z])', kPercentEscape + r'\1', input)
+
return input
def postprocess(output):
- output = re.sub(r'%\s*RawDownCast', r'%RawDownCast', output)
- output = re.sub(r'%\s*RawConstexprCast', r'%RawConstexprCast', output)
- output = re.sub(r'%\s*FromConstexpr', r'%FromConstexpr', output)
- output = re.sub(r'%\s*Allocate', r'%Allocate', output)
- output = re.sub(r'%\s*GetAllocationBaseSize', r'%GetAllocationBaseSize', output)
output = re.sub(r'\/\*COxp\*\/', r'constexpr', output)
output = re.sub(r'(\S+)\s*: type([,>])', r'\1: type\2', output)
output = re.sub(r'(\n\s*)labels( [A-Z])', r'\1 labels\2', output)
@@ -79,6 +85,9 @@ def postprocess(output):
r"\n\1otherwise", output)
output = re.sub(r'_OtheSaLi',
r"otherwise", output)
+ output = re.sub(r'@iF\(', r'@if(', output)
+ output = re.sub(r'@eXpOrT',
+ r"@export", output)
while True:
old = output
@@ -87,6 +96,8 @@ def postprocess(output):
if old == output:
break;
+ output = re.sub(kPercentEscape, r'%', output)
+
return output
def process(filename, lint, should_format):
diff --git a/deps/v8/tools/torque/make-torque-parser.py b/deps/v8/tools/torque/make-torque-parser.py
deleted file mode 100755
index 807b68bf36..0000000000
--- a/deps/v8/tools/torque/make-torque-parser.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""This program either generates the parser files for Torque, generating
-the source and header files directly in V8's src directory."""
-
-import subprocess
-import sys
-import os
-import ntpath
-import re
-
-cwd = os.getcwd()
-tools = ntpath.dirname(sys.argv[0]);
-grammar = tools + '/../../src/torque/Torque.g4'
-basename = ntpath.basename(grammar)
-dirname = ntpath.dirname(grammar)
-os.chdir(dirname)
-cargs = ['java', '-Xmx500M', 'org.antlr.v4.Tool', '-visitor', basename]
-result = subprocess.call(cargs)
-os.chdir(cwd)
-
-def fix_file(filename):
- is_header = re.search(r'\.h', filename) is not None;
- header_macro = filename.upper();
- header_macro = re.sub('\.', '_', header_macro);
- header_macro = "V8_TORQUE_" + header_macro + '_';
-
- copyright = '// Copyright 2018 the V8 project authors. All rights reserved.\n'
- copyright += '// Use of this source code is governed by a BSD-style license that can be\n'
- copyright += '// found in the LICENSE file.\n'
- file_path = tools + '/../../src/torque/' + filename;
- temp_file_path = file_path + '.tmp'
- output_file = open(temp_file_path, 'w')
- output_file.write(copyright);
- if is_header:
- output_file.write('#ifndef ' + header_macro + '\n');
- output_file.write('#define ' + header_macro + '\n');
-
- with open(file_path) as f:
- content = f.readlines()
- for x in content:
- x = re.sub(';;', ';', x)
- x = re.sub('antlr4-runtime\.h', './antlr4-runtime.h', x)
- x = re.sub(' TorqueParser.antlr4', ' explicit TorqueParser(antlr4', x)
- x = re.sub(' TorqueLexer.antlr4', ' explicit TorqueLexer(antlr4', x)
- if not re.search('= 0', x):
- x = re.sub('virtual', '', x)
- output_file.write(x)
-
- if is_header:
- output_file.write('#endif // ' + header_macro + '\n');
- output_file.close();
-
- subprocess.call(['rm', file_path])
- subprocess.call(['mv', temp_file_path, file_path])
-
-fix_file('TorqueBaseListener.h');
-fix_file('TorqueBaseListener.cpp');
-fix_file('TorqueBaseVisitor.h');
-fix_file('TorqueBaseVisitor.cpp');
-fix_file('TorqueLexer.h');
-fix_file('TorqueLexer.cpp');
-fix_file('TorqueParser.h');
-fix_file('TorqueParser.cpp');
-fix_file('TorqueListener.h');
-fix_file('TorqueListener.cpp');
-fix_file('TorqueVisitor.h');
-fix_file('TorqueVisitor.cpp');
diff --git a/deps/v8/tools/torque/vim-torque/syntax/torque.vim b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
index c2e4ba0f7a..1a4ce987c7 100644
--- a/deps/v8/tools/torque/vim-torque/syntax/torque.vim
+++ b/deps/v8/tools/torque/vim-torque/syntax/torque.vim
@@ -30,7 +30,7 @@ syn keyword torqueFunction macro builtin runtime intrinsic
syn keyword torqueKeyword cast convert from_constexpr min max unsafe_cast
syn keyword torqueLabel case
syn keyword torqueMatching try label catch
-syn keyword torqueModifier extern javascript constexpr transitioning transient weak
+syn keyword torqueModifier extern javascript constexpr transitioning transient weak export
syn match torqueNumber /\v<[0-9]+(\.[0-9]*)?>/
syn match torqueNumber /\v<0x[0-9a-fA-F]+>/
syn keyword torqueOperator operator
diff --git a/deps/v8/tools/torque/vscode-torque/package.json b/deps/v8/tools/torque/vscode-torque/package.json
index 42174a6c9f..16c8095f86 100644
--- a/deps/v8/tools/torque/vscode-torque/package.json
+++ b/deps/v8/tools/torque/vscode-torque/package.json
@@ -41,13 +41,13 @@
"torque.trace.server": {
"type": "string",
"enum": [
- "off",
- "messages",
- "verbose"
+ "off",
+ "messages",
+ "verbose"
],
"default": "off",
"description": "Trace the communication with the Torque language server from VSCode."
- }
+ }
}
},
"languages": [
diff --git a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
index cbbf381da8..dea5be517b 100644
--- a/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
+++ b/deps/v8/tools/torque/vscode-torque/syntaxes/torque.tmLanguage.json
@@ -1,175 +1,177 @@
{
- "$schema": "https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json",
- "name": "Torque",
- "patterns": [
- {
- "name": "comment.line.double-slash.torque",
- "begin": "//",
- "end": "$"
- },
- {
- "name": "comment.block.torque",
- "begin": "/\\*",
- "end": "\\*/"
- },
- {
- "name": "support.function.torque",
- "match": "\\b(assert|check|debug|unreachable|Cast|Convert|FromConstexpr|UnsafeCast)\\b"
- },
- {
- "name": "constant.other.torque",
- "match": "\\b(true|True|false|False|Undefined|Hole|Null|k[A-Z][A-Za-z0-9]+)\\b"
- },
- {
- "begin": "\\b<(?=[A-Za-z][0-9A-Za-z_|, ]*>)",
- "end": ">",
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- },
- {
- "begin": "\\b(?=extern\\b)",
- "end": ";",
- "patterns": [
- {
- "begin": "\\)\\(|(?=(\\b[a-zA-Z0-9_]+)\\((?!\\s*implicit))",
- "end": "\\)",
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- },
- {
- "include": "#common"
- }
- ]
- },
- {
- "begin": "\\b(type)\\b",
- "end": ";",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- }
- },
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "\\b([A-Za-z][0-9A-Za-z_]*)\\b"
- }
- ]
- },
- {
- "name": "keyword.control.torque",
- "match": "#include"
- },
- {
- "include": "#common"
- }
- ],
- "repository": {
- "common": {
- "patterns": [
- {
- "match": "\\b(extends)\\s+([A-Za-z0-9]+)",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- },
- "2": {
- "name": "support.type.torque"
- }
- }
- },
- {
- "name": "keyword.control.torque",
- "match": "\\b(if|else|while|for|return|continue|break|goto|otherwise|try|label|catch)\\b"
- },
- {
- "name": "keyword.other.torque",
- "match": "\\b(constexpr|macro|builtin|runtime|intrinsic|javascript|implicit|deferred|label|labels|tail|let|generates|weak|extern|const|typeswitch|case|transient|transitioning|operator|namespace)\\b"
- },
- {
- "name": "keyword.operator.torque",
- "match": "\\b(=|\\*=)\\b"
- },
- {
- "match": "\\b(class)\\s+([A-Za-z0-9]+)",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- },
- "2": {
- "name": "support.type.torque"
- }
- }
- },
- {
- "match": "\\b(struct)\\s+([A-Za-z0-9]+)",
- "captures": {
- "1": {
- "name": "keyword.other.torque"
- },
- "2": {
- "name": "support.type.torque"
- }
- }
- },
- {
- "name": "string.quoted.double.torque",
- "begin": "\"",
- "end": "\"",
- "patterns": [
- {
- "name": "constant.character.escape.torque",
- "match": "\\\\."
- }
- ]
- },
- {
- "name": "string.quoted.single.torque",
- "begin": "'",
- "end": "'",
- "patterns": [
- {
- "name": "constant.character.escape.torque",
- "match": "\\\\."
- }
- ]
- },
- {
- "begin": ":(\\s*)?",
- "end": "(?=(generates|[^0-9A-Za-z_| ]))",
- "patterns": [
- {
- "include": "#common"
- },
- {
- "name": "support.type.torque",
- "match": "([A-Za-z][0-9A-Za-z_]*)"
- }
- ]
- },
- {
- "name": "support.function.torque",
- "match": "\\b[A-Za-z0-9_]+\\b(?=(<[ ,:A-Za-z0-9_]+>)?\\()"
- }
- ]
- }
- },
- "scopeName": "source.torque"
-} \ No newline at end of file
+ "$schema": "https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json",
+ "name": "Torque",
+ "patterns": [
+ {
+ "name": "comment.line.double-slash.torque",
+ "begin": "//",
+ "end": "$"
+ },
+ {
+ "name": "comment.block.torque",
+ "begin": "/\\*",
+ "end": "\\*/"
+ },
+ {
+ "name": "support.function.torque",
+ "match": "\\b(assert|check|debug|unreachable|Cast|Convert|FromConstexpr|UnsafeCast)\\b"
+ },
+ {
+ "name": "constant.other.torque",
+ "match": "\\b(true|True|false|False|Undefined|Hole|Null|k[A-Z][A-Za-z0-9]+)\\b"
+ },
+ {
+ "begin": "\\b<(?=[A-Za-z][0-9A-Za-z_|, ]*>)",
+ "end": ">",
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "([A-Za-z][0-9A-Za-z_]*)"
+ }
+ ]
+ },
+ {
+ "begin": "\\b(?=(macro|runtime|builtin)\\b)",
+ "end": ";|\\{",
+ "patterns": [
+ {
+ "begin": "\\(",
+ "end": "\\)",
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "match": "(([A-Za-z][0-9A-Za-z_]*):\\s*)?([A-Za-z][0-9A-Za-z_]*)",
+ "captures":{
+ "3": {"name": "support.type.torque"}
+ }
+ }
+ ]
+ },
+ {
+ "include": "#common"
+ }
+ ]
+ },
+ {
+ "begin": "\\b(type)\\b",
+ "end": ";",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ }
+ },
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "\\b([A-Za-z][0-9A-Za-z_]*)\\b"
+ }
+ ]
+ },
+ {
+ "name": "keyword.control.torque",
+ "match": "#include"
+ },
+ {
+ "include": "#common"
+ }
+ ],
+ "repository": {
+ "common": {
+ "patterns": [
+ {
+ "match": "\\b(extends)\\s+([A-Za-z0-9]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ },
+ "2": {
+ "name": "support.type.torque"
+ }
+ }
+ },
+ {
+ "name": "keyword.control.torque",
+ "match": "\\b(if|else|while|for|return|continue|break|goto|otherwise|try|label|catch)\\b"
+ },
+ {
+ "name": "keyword.other.torque",
+ "match": "\\b(constexpr|macro|builtin|runtime|intrinsic|javascript|implicit|deferred|label|labels|tail|let|generates|weak|extern|const|typeswitch|case|transient|transitioning|operator|namespace|export)\\b"
+ },
+ {
+ "name": "keyword.operator.torque",
+ "match": "\\b(=|\\*=)\\b"
+ },
+ {
+ "match": "\\b(class|new)\\s+([A-Za-z0-9]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ },
+ "2": {
+ "name": "support.type.torque"
+ }
+ }
+ },
+ {
+ "match": "\\b(struct)\\s+([A-Za-z0-9]+)",
+ "captures": {
+ "1": {
+ "name": "keyword.other.torque"
+ },
+ "2": {
+ "name": "support.type.torque"
+ }
+ }
+ },
+ {
+ "name": "string.quoted.double.torque",
+ "begin": "\"",
+ "end": "\"",
+ "patterns": [
+ {
+ "name": "constant.character.escape.torque",
+ "match": "\\\\."
+ }
+ ]
+ },
+ {
+ "name": "string.quoted.single.torque",
+ "begin": "'",
+ "end": "'",
+ "patterns": [
+ {
+ "name": "constant.character.escape.torque",
+ "match": "\\\\."
+ }
+ ]
+ },
+ {
+ "begin": ":(\\s*)?",
+ "end": "(?=(generates|[^0-9A-Za-z_| ]))",
+ "patterns": [
+ {
+ "include": "#common"
+ },
+ {
+ "name": "support.type.torque",
+ "match": "([A-Za-z][0-9A-Za-z_]*)"
+ }
+ ]
+ },
+ {
+ "name": "support.function.torque",
+ "match": "\\b[A-Za-z0-9_]+\\b(?=(<[ ,:A-Za-z0-9_]+>)?\\()"
+ }
+ ]
+ }
+ },
+ "scopeName": "source.torque"
+}
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index 5e009ebd6b..083d224b2d 100755
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -7,9 +7,7 @@
from __future__ import print_function
from collections import namedtuple
-import coverage
import json
-import mock
import os
import platform
import shutil
@@ -18,6 +16,9 @@ import sys
import tempfile
import unittest
+import coverage
+import mock
+
# Requires python-coverage and python-mock. Native python coverage
# version >= 3.7.1 should be installed to get the best speed.
@@ -31,6 +32,7 @@ V8_JSON = {
'path': ['.'],
'owners': ['username@chromium.org'],
'binary': 'd7',
+ 'timeout': 60,
'flags': ['--flag'],
'main': 'run.js',
'run_count': 1,
@@ -88,8 +90,6 @@ V8_GENERIC_JSON = {
'units': 'ms',
}
-Output = namedtuple('Output', 'stdout, stderr, timed_out, exit_code')
-
class PerfTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
@@ -99,8 +99,8 @@ class PerfTest(unittest.TestCase):
cls._cov.start()
import run_perf
from testrunner.local import command
- global command
- global run_perf
+ from testrunner.objects.output import Output, NULL_OUTPUT
+ global command, run_perf, Output, NULL_OUTPUT
@classmethod
def tearDownClass(cls):
@@ -127,9 +127,9 @@ class PerfTest(unittest.TestCase):
def _MockCommand(self, *args, **kwargs):
# Fake output for each test run.
test_outputs = [Output(stdout=arg,
- stderr=None,
timed_out=kwargs.get('timed_out', False),
- exit_code=kwargs.get('exit_code', 0))
+ exit_code=kwargs.get('exit_code', 0),
+ duration=42)
for arg in args[1]]
def create_cmd(*args, **kwargs):
cmd = mock.MagicMock()
@@ -145,7 +145,7 @@ class PerfTest(unittest.TestCase):
# Check that d8 is called from the correct cwd for each test run.
dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
- self.assertEquals(dirs.pop(), args[0])
+ self.assertEqual(dirs.pop(), args[0])
os.chdir = mock.MagicMock(side_effect=chdir)
subprocess.check_call = mock.MagicMock()
@@ -166,15 +166,24 @@ class PerfTest(unittest.TestCase):
return json.load(f)
def _VerifyResults(self, suite, units, traces, file_name=None):
- self.assertEquals([
+ self.assertListEqual(sorted([
{'units': units,
'graphs': [suite, trace['name']],
'results': trace['results'],
- 'stddev': trace['stddev']} for trace in traces],
- self._LoadResults(file_name)['traces'])
+ 'stddev': trace['stddev']} for trace in traces]),
+ sorted(self._LoadResults(file_name)['traces']))
+
+ def _VerifyRunnableDurations(self, runs, timeout, file_name=None):
+ self.assertListEqual([
+ {
+ 'graphs': ['test'],
+ 'durations': [42] * runs,
+ 'timeout': timeout,
+ },
+ ], self._LoadResults(file_name)['runnables'])
def _VerifyErrors(self, errors):
- self.assertEquals(errors, self._LoadResults()['errors'])
+ self.assertListEqual(errors, self._LoadResults()['errors'])
def _VerifyMock(self, binary, *args, **kwargs):
shell = os.path.join(os.path.dirname(BASE_DIR), binary)
@@ -185,7 +194,7 @@ class PerfTest(unittest.TestCase):
timeout=kwargs.get('timeout', 60))
def _VerifyMockMultiple(self, *args, **kwargs):
- self.assertEquals(len(args), len(command.Command.call_args_list))
+ self.assertEqual(len(args), len(command.Command.call_args_list))
for arg, actual in zip(args, command.Command.call_args_list):
expected = {
'cmd_prefix': [],
@@ -193,16 +202,17 @@ class PerfTest(unittest.TestCase):
'args': list(arg[1:]),
'timeout': kwargs.get('timeout', 60)
}
- self.assertEquals((expected, ), actual)
+ self.assertTupleEqual((expected, ), actual)
def testOneRun(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
+ self._VerifyRunnableDurations(1, 60)
self._VerifyErrors([])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
@@ -212,10 +222,10 @@ class PerfTest(unittest.TestCase):
test_input['test_flags'] = ['2', 'test_name']
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join(
@@ -230,10 +240,10 @@ class PerfTest(unittest.TestCase):
self._MockCommand(['.', '.'],
['Richards: 100\nDeltaBlue: 200\n',
'Richards: 50\nDeltaBlue: 300\n'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
self._VerifyResults('v8', 'ms', [
- {'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join(
@@ -249,10 +259,59 @@ class PerfTest(unittest.TestCase):
self._MockCommand(['.', '.'],
['Richards: 100\nDeltaBlue: 200\n',
'Richards: 50\nDeltaBlue: 300\n'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''},
+ ])
+ self._VerifyErrors([])
+ self._VerifyMock(os.path.join(
+ 'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+ def testPerfectConfidenceRuns(self):
+ self._WriteTestInput(V8_JSON)
+ self._MockCommand(
+ ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'] * 10)
+ self.assertEqual(0, self._CallMain('--confidence-level', '1'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['300.0', '200.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234] * 10, 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0] * 10, 'stddev': ''},
+ ])
+ self._VerifyErrors([])
+ self._VerifyMock(os.path.join(
+ 'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+ def testNoisyConfidenceRuns(self):
+ self._WriteTestInput(V8_JSON)
+ self._MockCommand(
+ ['.'],
+ reversed([
+ # First 10 runs are mandatory. DeltaBlue is slightly noisy.
+ 'x\nRichards: 1.234\nDeltaBlue: 10757567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10557567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ # Need 4 more runs for confidence in DeltaBlue results.
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ 'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+ ]),
+ )
+ self.assertEqual(0, self._CallMain('--confidence-level', '1'))
+ self._VerifyResults('test', 'score', [
+ {'name': 'Richards', 'results': [1.234] * 14, 'stddev': ''},
+ {
+ 'name': 'DeltaBlue',
+ 'results': [10757567.0, 10557567.0] + [10657567.0] * 12,
+ 'stddev': '',
+ },
])
self._VerifyErrors([])
self._VerifyMock(os.path.join(
@@ -267,21 +326,21 @@ class PerfTest(unittest.TestCase):
'Simple: 3 ms.\n',
'Richards: 100\n',
'Richards: 50\n'])
- self.assertEquals(0, self._CallMain())
- self.assertEquals([
+ self.assertEqual(0, self._CallMain())
+ self.assertListEqual(sorted([
{'units': 'score',
'graphs': ['test', 'Richards'],
- 'results': ['50.0', '100.0'],
+ 'results': [50.0, 100.0],
'stddev': ''},
{'units': 'ms',
'graphs': ['test', 'Sub', 'Leaf'],
- 'results': ['3.0', '2.0', '1.0'],
+ 'results': [3.0, 2.0, 1.0],
'stddev': ''},
{'units': 'score',
'graphs': ['test', 'DeltaBlue'],
- 'results': ['200.0'],
+ 'results': [200.0],
'stddev': ''},
- ], self._LoadResults()['traces'])
+ ]), sorted(self._LoadResults()['traces']))
self._VerifyErrors([])
self._VerifyMockMultiple(
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
@@ -298,10 +357,10 @@ class PerfTest(unittest.TestCase):
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['Richards: 1.234\nRichards-stddev: 0.23\n'
'DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n'])
- self.assertEquals(0, self._CallMain())
+ self.assertEqual(0, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': '0.23'},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': '106'},
+ {'name': 'Richards', 'results': [1.234], 'stddev': '0.23'},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': '106'},
])
self._VerifyErrors([])
self._VerifyMock(
@@ -316,10 +375,10 @@ class PerfTest(unittest.TestCase):
'DeltaBlue: 6\nDeltaBlue-boom: 0.9\n',
'Richards: 2\nRichards-stddev: 0.5\n'
'DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n'])
- self.assertEquals(1, self._CallMain())
+ self.assertEqual(1, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['2.0', '3.0'], 'stddev': '0.7'},
- {'name': 'DeltaBlue', 'results': ['5.0', '6.0'], 'stddev': '0.8'},
+ {'name': 'Richards', 'results': [2.0, 3.0], 'stddev': '0.7'},
+ {'name': 'DeltaBlue', 'results': [5.0, 6.0], 'stddev': '0.8'},
])
self._VerifyErrors(
['Test test/Richards should only run once since a stddev is provided '
@@ -337,10 +396,10 @@ class PerfTest(unittest.TestCase):
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
- self.assertEquals(0, self._CallMain('--buildbot'))
+ self.assertEqual(0, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
@@ -353,11 +412,11 @@ class PerfTest(unittest.TestCase):
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
- self.assertEquals(0, self._CallMain('--buildbot'))
+ self.assertEqual(0, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
- {'name': 'Total', 'results': ['3626.49109719'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+ {'name': 'Total', 'results': [3626.491097190233], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
@@ -370,69 +429,38 @@ class PerfTest(unittest.TestCase):
mock.patch.object(
run_perf.Platform, 'ReadBuildConfig',
mock.MagicMock(return_value={'is_android': False})).start()
- self.assertEquals(1, self._CallMain('--buildbot'))
+ self.assertEqual(1, self._CallMain('--buildbot'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': [], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors(
['Regexp "^Richards: (.+)$" '
'returned a non-numeric for test test/Richards.',
- 'Not all traces have the same number of results.'])
+ 'Not all traces have produced results. Can not compute total for '
+ 'test.'])
self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
def testRegexpNoMatch(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(['.'], ['x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n'])
- self.assertEquals(1, self._CallMain())
+ self.assertEqual(1, self._CallMain())
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': [], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors(
['Regexp "^Richards: (.+)$" did not match for test test/Richards.'])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
- def testOneRunGeneric(self):
- test_input = dict(V8_GENERIC_JSON)
- self._WriteTestInput(test_input)
- self._MockCommand(['.'], [
- 'RESULT Infra: Constant1= 11 count\n'
- 'RESULT Infra: Constant2= [10,5,10,15] count\n'
- 'RESULT Infra: Constant3= {12,1.2} count\n'
- 'RESULT Infra: Constant4= [10,5,error,15] count\n'])
- self.assertEquals(1, self._CallMain())
- self.assertEquals([
- {'units': 'count',
- 'graphs': ['test', 'Infra', 'Constant1'],
- 'results': ['11.0'],
- 'stddev': ''},
- {'units': 'count',
- 'graphs': ['test', 'Infra', 'Constant2'],
- 'results': ['10.0', '5.0', '10.0', '15.0'],
- 'stddev': ''},
- {'units': 'count',
- 'graphs': ['test', 'Infra', 'Constant3'],
- 'results': ['12.0'],
- 'stddev': '1.2'},
- {'units': 'count',
- 'graphs': ['test', 'Infra', 'Constant4'],
- 'results': [],
- 'stddev': ''},
- ], self._LoadResults()['traces'])
- self._VerifyErrors(['Found non-numeric in test/Infra/Constant4'])
- self._VerifyMock(os.path.join('out', 'x64.release', 'cc'), '--flag', '')
-
def testOneRunCrashed(self):
- self._WriteTestInput(V8_JSON)
+ test_input = dict(V8_JSON)
+ test_input['retry_count'] = 1
+ self._WriteTestInput(test_input)
self._MockCommand(
- ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'], exit_code=1)
- self.assertEquals(1, self._CallMain())
- self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': [], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': [], 'stddev': ''},
- ])
+ ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', ''],
+ exit_code=-1)
+ self.assertEqual(1, self._CallMain())
+ self._VerifyResults('test', 'score', [])
self._VerifyErrors([])
self._VerifyMock(
os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
@@ -440,13 +468,11 @@ class PerfTest(unittest.TestCase):
def testOneRunTimingOut(self):
test_input = dict(V8_JSON)
test_input['timeout'] = 70
+ test_input['retry_count'] = 0
self._WriteTestInput(test_input)
self._MockCommand(['.'], [''], timed_out=True)
- self.assertEquals(1, self._CallMain())
- self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': [], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': [], 'stddev': ''},
- ])
+ self.assertEqual(1, self._CallMain())
+ self._VerifyResults('test', 'score', [])
self._VerifyErrors([])
self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
'--flag', 'run.js', timeout=70)
@@ -458,16 +484,16 @@ class PerfTest(unittest.TestCase):
mock.patch('run_perf.AndroidPlatform.PreTests').start()
mock.patch(
'run_perf.AndroidPlatform.Run',
- return_value=(
- 'Richards: 1.234\nDeltaBlue: 10657567\n', None)).start()
+ return_value=(Output(stdout='Richards: 1.234\nDeltaBlue: 10657567\n'),
+ NULL_OUTPUT)).start()
mock.patch('testrunner.local.android._Driver', autospec=True).start()
mock.patch(
'run_perf.Platform.ReadBuildConfig',
return_value={'is_android': True}).start()
- self.assertEquals(0, self._CallMain('--arch', 'arm'))
+ self.assertEqual(0, self._CallMain('--arch', 'arm'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
def testTwoRuns_Trybot(self):
@@ -481,18 +507,19 @@ class PerfTest(unittest.TestCase):
'Richards: 100\nDeltaBlue: 20\n'])
test_output_secondary = os.path.join(
TEST_WORKSPACE, 'results_secondary.json')
- self.assertEquals(0, self._CallMain(
+ self.assertEqual(0, self._CallMain(
'--outdir-secondary', 'out-secondary',
'--json-test-results-secondary', test_output_secondary,
))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['100.0', '200.0'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['20.0', '20.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [100.0, 200.0], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [20.0, 20.0], 'stddev': ''},
])
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['50.0', '100.0'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['200.0', '200.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [200.0, 200.0], 'stddev': ''},
], test_output_secondary)
+ self._VerifyRunnableDurations(2, 60, test_output_secondary)
self._VerifyErrors([])
self._VerifyMockMultiple(
(os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
@@ -507,23 +534,15 @@ class PerfTest(unittest.TestCase):
test_input = dict(V8_JSON)
self._WriteTestInput(test_input)
self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
- self.assertEquals(0, self._CallMain('--extra-flags=--prof'))
+ self.assertEqual(0, self._CallMain('--extra-flags=--prof'))
self._VerifyResults('test', 'score', [
- {'name': 'Richards', 'results': ['1.234'], 'stddev': ''},
- {'name': 'DeltaBlue', 'results': ['10657567.0'], 'stddev': ''},
+ {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
])
self._VerifyErrors([])
self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
'--flag', '--prof', 'run.js')
- def testUnzip(self):
- def Gen():
- for i in [1, 2, 3]:
- yield i, i + 1
- l, r = run_perf.Unzip(Gen())
- self.assertEquals([1, 2, 3], list(l()))
- self.assertEquals([2, 3, 4], list(r()))
-
#############################################################################
### System tests
@@ -540,54 +559,54 @@ class PerfTest(unittest.TestCase):
def testNormal(self):
results = self._RunPerf('d8_mocked1.py', 'test1.json')
- self.assertEquals([], results['errors'])
- self.assertEquals([
+ self.assertListEqual([], results['errors'])
+ self.assertListEqual(sorted([
{
'units': 'score',
'graphs': ['test1', 'Richards'],
- 'results': [u'1.2', u'1.2'],
+ 'results': [1.2, 1.2],
'stddev': '',
},
{
'units': 'score',
'graphs': ['test1', 'DeltaBlue'],
- 'results': [u'2.1', u'2.1'],
+ 'results': [2.1, 2.1],
'stddev': '',
},
- ], results['traces'])
+ ]), sorted(results['traces']))
def testResultsProcessor(self):
results = self._RunPerf('d8_mocked2.py', 'test2.json')
- self.assertEquals([], results['errors'])
- self.assertEquals([
+ self.assertListEqual([], results['errors'])
+ self.assertListEqual([
{
'units': 'score',
'graphs': ['test2', 'Richards'],
- 'results': [u'1.2', u'1.2'],
+ 'results': [1.2, 1.2],
'stddev': '',
},
{
'units': 'score',
'graphs': ['test2', 'DeltaBlue'],
- 'results': [u'2.1', u'2.1'],
+ 'results': [2.1, 2.1],
'stddev': '',
},
], results['traces'])
def testResultsProcessorNested(self):
results = self._RunPerf('d8_mocked2.py', 'test3.json')
- self.assertEquals([], results['errors'])
- self.assertEquals([
+ self.assertListEqual([], results['errors'])
+ self.assertListEqual([
{
'units': 'score',
'graphs': ['test3', 'Octane', 'Richards'],
- 'results': [u'1.2'],
+ 'results': [1.2],
'stddev': '',
},
{
'units': 'score',
'graphs': ['test3', 'Octane', 'DeltaBlue'],
- 'results': [u'2.1'],
+ 'results': [2.1],
'stddev': '',
},
], results['traces'])
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index 39b7cdf87c..0192fd8ee3 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -7,6 +7,7 @@
"is_clang": true,
"is_component_build": false,
"is_debug": false,
+ "is_full_debug": false,
"is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index 73b7a0b7c8..f19c310bf8 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -7,6 +7,7 @@
"is_clang": true,
"is_component_build": false,
"is_debug": false,
+ "is_full_debug": false,
"is_gcov_coverage": false,
"is_ubsan_vptr": false,
"is_msan": false,
diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py
index ff72b62e22..7237000695 100755
--- a/deps/v8/tools/v8_presubmit.py
+++ b/deps/v8/tools/v8_presubmit.py
@@ -476,7 +476,10 @@ class SourceProcessor(SourceFileProcessor):
'zlib.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
- IGNORE_COPYRIGHTS_DIRECTORY = "test/test262/local-tests"
+ IGNORE_COPYRIGHTS_DIRECTORIES = [
+ "test/test262/local-tests",
+ "test/mjsunit/wasm/bulk-memory-spec",
+ ]
def EndOfDeclaration(self, line):
return line == "}" or line == "};"
@@ -494,7 +497,8 @@ class SourceProcessor(SourceFileProcessor):
print("%s contains tabs" % name)
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
- not SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORY in name:
+ not any(ignore_dir in name for ignore_dir
+ in SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORIES):
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print("%s is missing a correct copyright header." % name)
result = False
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index b891154e33..0165e0f1dd 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -36,103 +36,96 @@ INSTANCE_TYPES = {
72: "BYTE_ARRAY_TYPE",
73: "BYTECODE_ARRAY_TYPE",
74: "FREE_SPACE_TYPE",
- 75: "FIXED_INT8_ARRAY_TYPE",
- 76: "FIXED_UINT8_ARRAY_TYPE",
- 77: "FIXED_INT16_ARRAY_TYPE",
- 78: "FIXED_UINT16_ARRAY_TYPE",
- 79: "FIXED_INT32_ARRAY_TYPE",
- 80: "FIXED_UINT32_ARRAY_TYPE",
- 81: "FIXED_FLOAT32_ARRAY_TYPE",
- 82: "FIXED_FLOAT64_ARRAY_TYPE",
- 83: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
- 84: "FIXED_BIGINT64_ARRAY_TYPE",
- 85: "FIXED_BIGUINT64_ARRAY_TYPE",
- 86: "FIXED_DOUBLE_ARRAY_TYPE",
- 87: "FEEDBACK_METADATA_TYPE",
- 88: "FILLER_TYPE",
- 89: "ACCESS_CHECK_INFO_TYPE",
- 90: "ACCESSOR_INFO_TYPE",
- 91: "ACCESSOR_PAIR_TYPE",
- 92: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 93: "ALLOCATION_MEMENTO_TYPE",
- 94: "ASM_WASM_DATA_TYPE",
- 95: "ASYNC_GENERATOR_REQUEST_TYPE",
- 96: "CLASS_POSITIONS_TYPE",
- 97: "DEBUG_INFO_TYPE",
- 98: "ENUM_CACHE_TYPE",
- 99: "FUNCTION_TEMPLATE_INFO_TYPE",
- 100: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 101: "INTERCEPTOR_INFO_TYPE",
- 102: "INTERPRETER_DATA_TYPE",
- 103: "MODULE_INFO_ENTRY_TYPE",
- 104: "MODULE_TYPE",
- 105: "OBJECT_TEMPLATE_INFO_TYPE",
- 106: "PROMISE_CAPABILITY_TYPE",
- 107: "PROMISE_REACTION_TYPE",
- 108: "PROTOTYPE_INFO_TYPE",
- 109: "SCRIPT_TYPE",
- 110: "STACK_FRAME_INFO_TYPE",
- 111: "STACK_TRACE_FRAME_TYPE",
- 112: "TUPLE2_TYPE",
- 113: "TUPLE3_TYPE",
- 114: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
- 115: "WASM_DEBUG_INFO_TYPE",
- 116: "WASM_EXCEPTION_TAG_TYPE",
- 117: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 118: "CALLABLE_TASK_TYPE",
- 119: "CALLBACK_TASK_TYPE",
- 120: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
- 121: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
- 122: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
- 123: "FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE",
- 124: "ALLOCATION_SITE_TYPE",
- 125: "EMBEDDER_DATA_ARRAY_TYPE",
- 126: "FIXED_ARRAY_TYPE",
- 127: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 128: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 129: "HASH_TABLE_TYPE",
- 130: "ORDERED_HASH_MAP_TYPE",
- 131: "ORDERED_HASH_SET_TYPE",
- 132: "ORDERED_NAME_DICTIONARY_TYPE",
- 133: "NAME_DICTIONARY_TYPE",
- 134: "GLOBAL_DICTIONARY_TYPE",
- 135: "NUMBER_DICTIONARY_TYPE",
- 136: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 137: "STRING_TABLE_TYPE",
- 138: "EPHEMERON_HASH_TABLE_TYPE",
- 139: "SCOPE_INFO_TYPE",
- 140: "SCRIPT_CONTEXT_TABLE_TYPE",
- 141: "AWAIT_CONTEXT_TYPE",
- 142: "BLOCK_CONTEXT_TYPE",
- 143: "CATCH_CONTEXT_TYPE",
- 144: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 145: "EVAL_CONTEXT_TYPE",
- 146: "FUNCTION_CONTEXT_TYPE",
- 147: "MODULE_CONTEXT_TYPE",
- 148: "NATIVE_CONTEXT_TYPE",
- 149: "SCRIPT_CONTEXT_TYPE",
- 150: "WITH_CONTEXT_TYPE",
- 151: "WEAK_FIXED_ARRAY_TYPE",
- 152: "TRANSITION_ARRAY_TYPE",
- 153: "CALL_HANDLER_INFO_TYPE",
- 154: "CELL_TYPE",
- 155: "CODE_DATA_CONTAINER_TYPE",
- 156: "DESCRIPTOR_ARRAY_TYPE",
- 157: "FEEDBACK_CELL_TYPE",
- 158: "FEEDBACK_VECTOR_TYPE",
- 159: "LOAD_HANDLER_TYPE",
- 160: "PREPARSE_DATA_TYPE",
- 161: "PROPERTY_ARRAY_TYPE",
- 162: "PROPERTY_CELL_TYPE",
- 163: "SHARED_FUNCTION_INFO_TYPE",
- 164: "SMALL_ORDERED_HASH_MAP_TYPE",
- 165: "SMALL_ORDERED_HASH_SET_TYPE",
- 166: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 167: "STORE_HANDLER_TYPE",
- 168: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 169: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 170: "WEAK_ARRAY_LIST_TYPE",
- 171: "WEAK_CELL_TYPE",
+ 75: "FIXED_DOUBLE_ARRAY_TYPE",
+ 76: "FEEDBACK_METADATA_TYPE",
+ 77: "FILLER_TYPE",
+ 78: "ACCESS_CHECK_INFO_TYPE",
+ 79: "ACCESSOR_INFO_TYPE",
+ 80: "ACCESSOR_PAIR_TYPE",
+ 81: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 82: "ALLOCATION_MEMENTO_TYPE",
+ 83: "ASM_WASM_DATA_TYPE",
+ 84: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 85: "CLASS_POSITIONS_TYPE",
+ 86: "DEBUG_INFO_TYPE",
+ 87: "ENUM_CACHE_TYPE",
+ 88: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 89: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 90: "INTERCEPTOR_INFO_TYPE",
+ 91: "INTERPRETER_DATA_TYPE",
+ 92: "MODULE_INFO_ENTRY_TYPE",
+ 93: "MODULE_TYPE",
+ 94: "OBJECT_TEMPLATE_INFO_TYPE",
+ 95: "PROMISE_CAPABILITY_TYPE",
+ 96: "PROMISE_REACTION_TYPE",
+ 97: "PROTOTYPE_INFO_TYPE",
+ 98: "SCRIPT_TYPE",
+ 99: "SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE",
+ 100: "STACK_FRAME_INFO_TYPE",
+ 101: "STACK_TRACE_FRAME_TYPE",
+ 102: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 103: "TUPLE2_TYPE",
+ 104: "TUPLE3_TYPE",
+ 105: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+ 106: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 107: "WASM_DEBUG_INFO_TYPE",
+ 108: "WASM_EXCEPTION_TAG_TYPE",
+ 109: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 110: "WASM_JS_FUNCTION_DATA_TYPE",
+ 111: "CALLABLE_TASK_TYPE",
+ 112: "CALLBACK_TASK_TYPE",
+ 113: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+ 114: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+ 115: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+ 116: "FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE",
+ 117: "ALLOCATION_SITE_TYPE",
+ 118: "EMBEDDER_DATA_ARRAY_TYPE",
+ 119: "FIXED_ARRAY_TYPE",
+ 120: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 121: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 122: "HASH_TABLE_TYPE",
+ 123: "ORDERED_HASH_MAP_TYPE",
+ 124: "ORDERED_HASH_SET_TYPE",
+ 125: "ORDERED_NAME_DICTIONARY_TYPE",
+ 126: "NAME_DICTIONARY_TYPE",
+ 127: "GLOBAL_DICTIONARY_TYPE",
+ 128: "NUMBER_DICTIONARY_TYPE",
+ 129: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 130: "STRING_TABLE_TYPE",
+ 131: "EPHEMERON_HASH_TABLE_TYPE",
+ 132: "SCOPE_INFO_TYPE",
+ 133: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 134: "AWAIT_CONTEXT_TYPE",
+ 135: "BLOCK_CONTEXT_TYPE",
+ 136: "CATCH_CONTEXT_TYPE",
+ 137: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 138: "EVAL_CONTEXT_TYPE",
+ 139: "FUNCTION_CONTEXT_TYPE",
+ 140: "MODULE_CONTEXT_TYPE",
+ 141: "NATIVE_CONTEXT_TYPE",
+ 142: "SCRIPT_CONTEXT_TYPE",
+ 143: "WITH_CONTEXT_TYPE",
+ 144: "WEAK_FIXED_ARRAY_TYPE",
+ 145: "TRANSITION_ARRAY_TYPE",
+ 146: "CALL_HANDLER_INFO_TYPE",
+ 147: "CELL_TYPE",
+ 148: "CODE_DATA_CONTAINER_TYPE",
+ 149: "DESCRIPTOR_ARRAY_TYPE",
+ 150: "FEEDBACK_CELL_TYPE",
+ 151: "FEEDBACK_VECTOR_TYPE",
+ 152: "LOAD_HANDLER_TYPE",
+ 153: "PREPARSE_DATA_TYPE",
+ 154: "PROPERTY_ARRAY_TYPE",
+ 155: "PROPERTY_CELL_TYPE",
+ 156: "SHARED_FUNCTION_INFO_TYPE",
+ 157: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 158: "SMALL_ORDERED_HASH_SET_TYPE",
+ 159: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 160: "STORE_HANDLER_TYPE",
+ 161: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 162: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 163: "WEAK_ARRAY_LIST_TYPE",
+ 164: "WEAK_CELL_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
@@ -196,10 +189,10 @@ KNOWN_MAPS = {
("read_only_space", 0x00139): (74, "FreeSpaceMap"),
("read_only_space", 0x00189): (68, "MetaMap"),
("read_only_space", 0x00209): (67, "NullMap"),
- ("read_only_space", 0x00271): (156, "DescriptorArrayMap"),
- ("read_only_space", 0x002d1): (151, "WeakFixedArrayMap"),
- ("read_only_space", 0x00321): (88, "OnePointerFillerMap"),
- ("read_only_space", 0x00371): (88, "TwoPointerFillerMap"),
+ ("read_only_space", 0x00271): (149, "DescriptorArrayMap"),
+ ("read_only_space", 0x002d1): (144, "WeakFixedArrayMap"),
+ ("read_only_space", 0x00321): (77, "OnePointerFillerMap"),
+ ("read_only_space", 0x00371): (77, "TwoPointerFillerMap"),
("read_only_space", 0x003f1): (67, "UninitializedMap"),
("read_only_space", 0x00461): (8, "OneByteInternalizedStringMap"),
("read_only_space", 0x00501): (67, "UndefinedMap"),
@@ -207,71 +200,71 @@ KNOWN_MAPS = {
("read_only_space", 0x005e1): (67, "TheHoleMap"),
("read_only_space", 0x00689): (67, "BooleanMap"),
("read_only_space", 0x00761): (72, "ByteArrayMap"),
- ("read_only_space", 0x007b1): (126, "FixedArrayMap"),
- ("read_only_space", 0x00801): (126, "FixedCOWArrayMap"),
- ("read_only_space", 0x00851): (129, "HashTableMap"),
+ ("read_only_space", 0x007b1): (119, "FixedArrayMap"),
+ ("read_only_space", 0x00801): (119, "FixedCOWArrayMap"),
+ ("read_only_space", 0x00851): (122, "HashTableMap"),
("read_only_space", 0x008a1): (64, "SymbolMap"),
("read_only_space", 0x008f1): (40, "OneByteStringMap"),
- ("read_only_space", 0x00941): (139, "ScopeInfoMap"),
- ("read_only_space", 0x00991): (163, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x00941): (132, "ScopeInfoMap"),
+ ("read_only_space", 0x00991): (156, "SharedFunctionInfoMap"),
("read_only_space", 0x009e1): (69, "CodeMap"),
- ("read_only_space", 0x00a31): (146, "FunctionContextMap"),
- ("read_only_space", 0x00a81): (154, "CellMap"),
- ("read_only_space", 0x00ad1): (162, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x00a31): (139, "FunctionContextMap"),
+ ("read_only_space", 0x00a81): (147, "CellMap"),
+ ("read_only_space", 0x00ad1): (155, "GlobalPropertyCellMap"),
("read_only_space", 0x00b21): (71, "ForeignMap"),
- ("read_only_space", 0x00b71): (152, "TransitionArrayMap"),
- ("read_only_space", 0x00bc1): (158, "FeedbackVectorMap"),
+ ("read_only_space", 0x00b71): (145, "TransitionArrayMap"),
+ ("read_only_space", 0x00bc1): (151, "FeedbackVectorMap"),
("read_only_space", 0x00c61): (67, "ArgumentsMarkerMap"),
("read_only_space", 0x00d01): (67, "ExceptionMap"),
("read_only_space", 0x00da1): (67, "TerminationExceptionMap"),
("read_only_space", 0x00e49): (67, "OptimizedOutMap"),
("read_only_space", 0x00ee9): (67, "StaleRegisterMap"),
- ("read_only_space", 0x00f59): (148, "NativeContextMap"),
- ("read_only_space", 0x00fa9): (147, "ModuleContextMap"),
- ("read_only_space", 0x00ff9): (145, "EvalContextMap"),
- ("read_only_space", 0x01049): (149, "ScriptContextMap"),
- ("read_only_space", 0x01099): (141, "AwaitContextMap"),
- ("read_only_space", 0x010e9): (142, "BlockContextMap"),
- ("read_only_space", 0x01139): (143, "CatchContextMap"),
- ("read_only_space", 0x01189): (150, "WithContextMap"),
- ("read_only_space", 0x011d9): (144, "DebugEvaluateContextMap"),
- ("read_only_space", 0x01229): (140, "ScriptContextTableMap"),
- ("read_only_space", 0x01279): (128, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x012c9): (87, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x01319): (126, "ArrayListMap"),
+ ("read_only_space", 0x00f59): (141, "NativeContextMap"),
+ ("read_only_space", 0x00fa9): (140, "ModuleContextMap"),
+ ("read_only_space", 0x00ff9): (138, "EvalContextMap"),
+ ("read_only_space", 0x01049): (142, "ScriptContextMap"),
+ ("read_only_space", 0x01099): (134, "AwaitContextMap"),
+ ("read_only_space", 0x010e9): (135, "BlockContextMap"),
+ ("read_only_space", 0x01139): (136, "CatchContextMap"),
+ ("read_only_space", 0x01189): (143, "WithContextMap"),
+ ("read_only_space", 0x011d9): (137, "DebugEvaluateContextMap"),
+ ("read_only_space", 0x01229): (133, "ScriptContextTableMap"),
+ ("read_only_space", 0x01279): (121, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x012c9): (76, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x01319): (119, "ArrayListMap"),
("read_only_space", 0x01369): (66, "BigIntMap"),
- ("read_only_space", 0x013b9): (127, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x013b9): (120, "ObjectBoilerplateDescriptionMap"),
("read_only_space", 0x01409): (73, "BytecodeArrayMap"),
- ("read_only_space", 0x01459): (155, "CodeDataContainerMap"),
- ("read_only_space", 0x014a9): (86, "FixedDoubleArrayMap"),
- ("read_only_space", 0x014f9): (134, "GlobalDictionaryMap"),
- ("read_only_space", 0x01549): (157, "ManyClosuresCellMap"),
- ("read_only_space", 0x01599): (126, "ModuleInfoMap"),
+ ("read_only_space", 0x01459): (148, "CodeDataContainerMap"),
+ ("read_only_space", 0x014a9): (75, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x014f9): (127, "GlobalDictionaryMap"),
+ ("read_only_space", 0x01549): (150, "ManyClosuresCellMap"),
+ ("read_only_space", 0x01599): (119, "ModuleInfoMap"),
("read_only_space", 0x015e9): (70, "MutableHeapNumberMap"),
- ("read_only_space", 0x01639): (133, "NameDictionaryMap"),
- ("read_only_space", 0x01689): (157, "NoClosuresCellMap"),
- ("read_only_space", 0x016d9): (135, "NumberDictionaryMap"),
- ("read_only_space", 0x01729): (157, "OneClosureCellMap"),
- ("read_only_space", 0x01779): (130, "OrderedHashMapMap"),
- ("read_only_space", 0x017c9): (131, "OrderedHashSetMap"),
- ("read_only_space", 0x01819): (132, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x01869): (160, "PreparseDataMap"),
- ("read_only_space", 0x018b9): (161, "PropertyArrayMap"),
- ("read_only_space", 0x01909): (153, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x01959): (153, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x019a9): (153, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x019f9): (136, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x01a49): (126, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x01a99): (164, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x01ae9): (165, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x01b39): (166, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x01b89): (137, "StringTableMap"),
- ("read_only_space", 0x01bd9): (168, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x01c29): (169, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x01c79): (170, "WeakArrayListMap"),
- ("read_only_space", 0x01cc9): (138, "EphemeronHashTableMap"),
- ("read_only_space", 0x01d19): (125, "EmbedderDataArrayMap"),
- ("read_only_space", 0x01d69): (171, "WeakCellMap"),
+ ("read_only_space", 0x01639): (126, "NameDictionaryMap"),
+ ("read_only_space", 0x01689): (150, "NoClosuresCellMap"),
+ ("read_only_space", 0x016d9): (128, "NumberDictionaryMap"),
+ ("read_only_space", 0x01729): (150, "OneClosureCellMap"),
+ ("read_only_space", 0x01779): (123, "OrderedHashMapMap"),
+ ("read_only_space", 0x017c9): (124, "OrderedHashSetMap"),
+ ("read_only_space", 0x01819): (125, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x01869): (153, "PreparseDataMap"),
+ ("read_only_space", 0x018b9): (154, "PropertyArrayMap"),
+ ("read_only_space", 0x01909): (146, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x01959): (146, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x019a9): (146, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x019f9): (129, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x01a49): (119, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x01a99): (157, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x01ae9): (158, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x01b39): (159, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x01b89): (130, "StringTableMap"),
+ ("read_only_space", 0x01bd9): (161, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x01c29): (162, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x01c79): (163, "WeakArrayListMap"),
+ ("read_only_space", 0x01cc9): (131, "EphemeronHashTableMap"),
+ ("read_only_space", 0x01d19): (118, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x01d69): (164, "WeakCellMap"),
("read_only_space", 0x01db9): (58, "NativeSourceStringMap"),
("read_only_space", 0x01e09): (32, "StringMap"),
("read_only_space", 0x01e59): (41, "ConsOneByteStringMap"),
@@ -289,62 +282,55 @@ KNOWN_MAPS = {
("read_only_space", 0x02219): (18, "UncachedExternalInternalizedStringMap"),
("read_only_space", 0x02269): (26, "UncachedExternalOneByteInternalizedStringMap"),
("read_only_space", 0x022b9): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x02309): (76, "FixedUint8ArrayMap"),
- ("read_only_space", 0x02359): (75, "FixedInt8ArrayMap"),
- ("read_only_space", 0x023a9): (78, "FixedUint16ArrayMap"),
- ("read_only_space", 0x023f9): (77, "FixedInt16ArrayMap"),
- ("read_only_space", 0x02449): (80, "FixedUint32ArrayMap"),
- ("read_only_space", 0x02499): (79, "FixedInt32ArrayMap"),
- ("read_only_space", 0x024e9): (81, "FixedFloat32ArrayMap"),
- ("read_only_space", 0x02539): (82, "FixedFloat64ArrayMap"),
- ("read_only_space", 0x02589): (83, "FixedUint8ClampedArrayMap"),
- ("read_only_space", 0x025d9): (85, "FixedBigUint64ArrayMap"),
- ("read_only_space", 0x02629): (84, "FixedBigInt64ArrayMap"),
- ("read_only_space", 0x02679): (67, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x026e1): (98, "EnumCacheMap"),
- ("read_only_space", 0x02781): (114, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x02ad1): (101, "InterceptorInfoMap"),
- ("read_only_space", 0x050b9): (89, "AccessCheckInfoMap"),
- ("read_only_space", 0x05109): (90, "AccessorInfoMap"),
- ("read_only_space", 0x05159): (91, "AccessorPairMap"),
- ("read_only_space", 0x051a9): (92, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x051f9): (93, "AllocationMementoMap"),
- ("read_only_space", 0x05249): (94, "AsmWasmDataMap"),
- ("read_only_space", 0x05299): (95, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x052e9): (96, "ClassPositionsMap"),
- ("read_only_space", 0x05339): (97, "DebugInfoMap"),
- ("read_only_space", 0x05389): (99, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x053d9): (100, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x05429): (102, "InterpreterDataMap"),
- ("read_only_space", 0x05479): (103, "ModuleInfoEntryMap"),
- ("read_only_space", 0x054c9): (104, "ModuleMap"),
- ("read_only_space", 0x05519): (105, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x05569): (106, "PromiseCapabilityMap"),
- ("read_only_space", 0x055b9): (107, "PromiseReactionMap"),
- ("read_only_space", 0x05609): (108, "PrototypeInfoMap"),
- ("read_only_space", 0x05659): (109, "ScriptMap"),
- ("read_only_space", 0x056a9): (110, "StackFrameInfoMap"),
- ("read_only_space", 0x056f9): (111, "StackTraceFrameMap"),
- ("read_only_space", 0x05749): (112, "Tuple2Map"),
- ("read_only_space", 0x05799): (113, "Tuple3Map"),
- ("read_only_space", 0x057e9): (115, "WasmDebugInfoMap"),
- ("read_only_space", 0x05839): (116, "WasmExceptionTagMap"),
- ("read_only_space", 0x05889): (117, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x058d9): (118, "CallableTaskMap"),
- ("read_only_space", 0x05929): (119, "CallbackTaskMap"),
- ("read_only_space", 0x05979): (120, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x059c9): (121, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x05a19): (122, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x05a69): (123, "FinalizationGroupCleanupJobTaskMap"),
- ("read_only_space", 0x05ab9): (124, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05b09): (124, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05b59): (159, "LoadHandler1Map"),
- ("read_only_space", 0x05ba9): (159, "LoadHandler2Map"),
- ("read_only_space", 0x05bf9): (159, "LoadHandler3Map"),
- ("read_only_space", 0x05c49): (167, "StoreHandler0Map"),
- ("read_only_space", 0x05c99): (167, "StoreHandler1Map"),
- ("read_only_space", 0x05ce9): (167, "StoreHandler2Map"),
- ("read_only_space", 0x05d39): (167, "StoreHandler3Map"),
+ ("read_only_space", 0x02309): (67, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x02371): (87, "EnumCacheMap"),
+ ("read_only_space", 0x02411): (105, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02601): (90, "InterceptorInfoMap"),
+ ("read_only_space", 0x04d99): (78, "AccessCheckInfoMap"),
+ ("read_only_space", 0x04de9): (79, "AccessorInfoMap"),
+ ("read_only_space", 0x04e39): (80, "AccessorPairMap"),
+ ("read_only_space", 0x04e89): (81, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x04ed9): (82, "AllocationMementoMap"),
+ ("read_only_space", 0x04f29): (83, "AsmWasmDataMap"),
+ ("read_only_space", 0x04f79): (84, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x04fc9): (85, "ClassPositionsMap"),
+ ("read_only_space", 0x05019): (86, "DebugInfoMap"),
+ ("read_only_space", 0x05069): (88, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x050b9): (89, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x05109): (91, "InterpreterDataMap"),
+ ("read_only_space", 0x05159): (92, "ModuleInfoEntryMap"),
+ ("read_only_space", 0x051a9): (93, "ModuleMap"),
+ ("read_only_space", 0x051f9): (94, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x05249): (95, "PromiseCapabilityMap"),
+ ("read_only_space", 0x05299): (96, "PromiseReactionMap"),
+ ("read_only_space", 0x052e9): (97, "PrototypeInfoMap"),
+ ("read_only_space", 0x05339): (98, "ScriptMap"),
+ ("read_only_space", 0x05389): (99, "SourcePositionTableWithFrameCacheMap"),
+ ("read_only_space", 0x053d9): (100, "StackFrameInfoMap"),
+ ("read_only_space", 0x05429): (101, "StackTraceFrameMap"),
+ ("read_only_space", 0x05479): (102, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x054c9): (103, "Tuple2Map"),
+ ("read_only_space", 0x05519): (104, "Tuple3Map"),
+ ("read_only_space", 0x05569): (106, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x055b9): (107, "WasmDebugInfoMap"),
+ ("read_only_space", 0x05609): (108, "WasmExceptionTagMap"),
+ ("read_only_space", 0x05659): (109, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x056a9): (110, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x056f9): (111, "CallableTaskMap"),
+ ("read_only_space", 0x05749): (112, "CallbackTaskMap"),
+ ("read_only_space", 0x05799): (113, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x057e9): (114, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x05839): (115, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x05889): (116, "FinalizationGroupCleanupJobTaskMap"),
+ ("read_only_space", 0x058d9): (117, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05929): (117, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05979): (152, "LoadHandler1Map"),
+ ("read_only_space", 0x059c9): (152, "LoadHandler2Map"),
+ ("read_only_space", 0x05a19): (152, "LoadHandler3Map"),
+ ("read_only_space", 0x05a69): (160, "StoreHandler0Map"),
+ ("read_only_space", 0x05ab9): (160, "StoreHandler1Map"),
+ ("read_only_space", 0x05b09): (160, "StoreHandler2Map"),
+ ("read_only_space", 0x05b59): (160, "StoreHandler3Map"),
("map_space", 0x00139): (1057, "ExternalMap"),
("map_space", 0x00189): (1073, "JSMessageObjectMap"),
}
@@ -369,38 +355,29 @@ KNOWN_OBJECTS = {
("read_only_space", 0x00d71): "TerminationException",
("read_only_space", 0x00e19): "OptimizedOut",
("read_only_space", 0x00eb9): "StaleRegister",
- ("read_only_space", 0x026c9): "EmptyEnumCache",
- ("read_only_space", 0x02731): "EmptyPropertyArray",
- ("read_only_space", 0x02741): "EmptyByteArray",
- ("read_only_space", 0x02751): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x02769): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x027d1): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x027e1): "EmptyFixedUint8Array",
- ("read_only_space", 0x02801): "EmptyFixedInt8Array",
- ("read_only_space", 0x02821): "EmptyFixedUint16Array",
- ("read_only_space", 0x02841): "EmptyFixedInt16Array",
- ("read_only_space", 0x02861): "EmptyFixedUint32Array",
- ("read_only_space", 0x02881): "EmptyFixedInt32Array",
- ("read_only_space", 0x028a1): "EmptyFixedFloat32Array",
- ("read_only_space", 0x028c1): "EmptyFixedFloat64Array",
- ("read_only_space", 0x028e1): "EmptyFixedUint8ClampedArray",
- ("read_only_space", 0x02901): "EmptyFixedBigUint64Array",
- ("read_only_space", 0x02921): "EmptyFixedBigInt64Array",
- ("read_only_space", 0x02941): "EmptySloppyArgumentsElements",
- ("read_only_space", 0x02961): "EmptySlowElementDictionary",
- ("read_only_space", 0x029a9): "EmptyOrderedHashMap",
- ("read_only_space", 0x029d1): "EmptyOrderedHashSet",
- ("read_only_space", 0x029f9): "EmptyFeedbackMetadata",
- ("read_only_space", 0x02a09): "EmptyPropertyCell",
- ("read_only_space", 0x02a31): "EmptyPropertyDictionary",
- ("read_only_space", 0x02a81): "NoOpInterceptorInfo",
- ("read_only_space", 0x02b21): "EmptyWeakArrayList",
- ("read_only_space", 0x02b39): "InfinityValue",
- ("read_only_space", 0x02b49): "MinusZeroValue",
- ("read_only_space", 0x02b59): "MinusInfinityValue",
- ("read_only_space", 0x02b69): "SelfReferenceMarker",
- ("read_only_space", 0x02bc1): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x02bd9): "HashSeed",
+ ("read_only_space", 0x02359): "EmptyEnumCache",
+ ("read_only_space", 0x023c1): "EmptyPropertyArray",
+ ("read_only_space", 0x023d1): "EmptyByteArray",
+ ("read_only_space", 0x023e1): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x023f9): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x02461): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x02471): "EmptySloppyArgumentsElements",
+ ("read_only_space", 0x02491): "EmptySlowElementDictionary",
+ ("read_only_space", 0x024d9): "EmptyOrderedHashMap",
+ ("read_only_space", 0x02501): "EmptyOrderedHashSet",
+ ("read_only_space", 0x02529): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x02539): "EmptyPropertyCell",
+ ("read_only_space", 0x02561): "EmptyPropertyDictionary",
+ ("read_only_space", 0x025b1): "NoOpInterceptorInfo",
+ ("read_only_space", 0x02651): "EmptyWeakArrayList",
+ ("read_only_space", 0x02669): "InfinityValue",
+ ("read_only_space", 0x02679): "MinusZeroValue",
+ ("read_only_space", 0x02689): "MinusInfinityValue",
+ ("read_only_space", 0x02699): "SelfReferenceMarker",
+ ("read_only_space", 0x026f1): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x02709): "TrampolineTrivialCodeDataContainer",
+ ("read_only_space", 0x02721): "TrampolinePromiseRejectionCodeDataContainer",
+ ("read_only_space", 0x02739): "HashSeed",
("old_space", 0x00139): "ArgumentsIteratorAccessor",
("old_space", 0x001a9): "ArrayLengthAccessor",
("old_space", 0x00219): "BoundFunctionLengthAccessor",
@@ -448,6 +425,7 @@ FRAME_MARKERS = (
"JS_TO_WASM",
"WASM_INTERPRETER_ENTRY",
"C_WASM_ENTRY",
+ "WASM_EXIT",
"WASM_COMPILE_LAZY",
"INTERPRETED",
"STUB",
diff --git a/deps/v8/tools/vim/ninja-build.vim b/deps/v8/tools/vim/ninja-build.vim
index 3e9b8948ca..7c885255ce 100644
--- a/deps/v8/tools/vim/ninja-build.vim
+++ b/deps/v8/tools/vim/ninja-build.vim
@@ -53,11 +53,8 @@ def path_to_build_dir(configuration):
def compute_ninja_command_for_targets(targets='', configuration=None):
- flags = []
- if "use_goma=1" in os.getenv('GYP_DEFINES', '').split(' '):
- flags = ['-j', '512']
build_dir = path_to_build_dir(configuration);
- build_cmd = ' '.join(['ninja'] + flags + ['-C', build_dir, targets])
+ build_cmd = ' '.join(['autoninja', '-C', build_dir, targets])
vim.command('return "%s"' % build_cmd)
diff --git a/deps/v8/tools/wasm-compilation-hints/OWNERS b/deps/v8/tools/wasm-compilation-hints/OWNERS
new file mode 100644
index 0000000000..4c00a60a00
--- /dev/null
+++ b/deps/v8/tools/wasm-compilation-hints/OWNERS
@@ -0,0 +1,2 @@
+clemensh@chromium.org
+mstarzinger@chromium.org
diff --git a/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py b/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py
new file mode 100755
index 0000000000..fd4b65b8ff
--- /dev/null
+++ b/deps/v8/tools/wasm-compilation-hints/inject-compilation-hints.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import argparse
+import io
+import sys
+
+from wasm import *
+
+FUNCTION_SECTION_ID = 3
+
+def parse_args():
+ parser = argparse.ArgumentParser(\
+ description="Inject compilation hints into a Wasm module.")
+ parser.add_argument("-i", "--in-wasm-file", \
+ type=str, \
+ help="original wasm module")
+ parser.add_argument("-o", "--out-wasm-file", \
+ type=str, \
+ help="wasm module with injected hints")
+ parser.add_argument("-x", "--hints-file", \
+ type=str, required=True, \
+ help="binary hints file to be injected as a custom section " + \
+ "'compilationHints'")
+ return parser.parse_args()
+
+if __name__ == "__main__":
+ args = parse_args()
+ in_wasm_file = args.in_wasm_file if args.in_wasm_file else sys.stdin.fileno()
+ out_wasm_file = args.out_wasm_file if args.out_wasm_file else sys.stdout.fileno()
+ hints_bs = open(args.hints_file, "rb").read()
+ with io.open(in_wasm_file, "rb") as fin:
+ with io.open(out_wasm_file, "wb") as fout:
+ magic_number, bs = read_magic_number(fin);
+ fout.write(bs)
+ version, bs = read_version(fin);
+ fout.write(bs)
+ num_declared_functions = None
+ while True:
+ id, bs = read_varuintN(fin)
+ fout.write(bs)
+ if id == None:
+ break
+ payload_length, bs = read_varuintN(fin)
+ fout.write(bs)
+
+ # Peek into function section for upcoming validity check.
+ if id == FUNCTION_SECTION_ID:
+ num_declared_functions, bs = peek_varuintN(fin)
+
+ bs = fin.read(payload_length)
+ fout.write(bs)
+
+ # Instert hint section after function section.
+ if id == FUNCTION_SECTION_ID:
+ assert len(hints_bs) == num_declared_functions, "unexpected number of hints"
+ write_compilation_hints_section(fout, hints_bs)
diff --git a/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py b/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py
new file mode 100755
index 0000000000..a762bd78a6
--- /dev/null
+++ b/deps/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import argparse
+import io
+import sys
+
+from wasm import *
+
+def parse_args():
+ parser = argparse.ArgumentParser(\
+ description="Read compilation hints from Wasm module.")
+ parser.add_argument("in_wasm_file", \
+ type=str, \
+ help="wasm module")
+ return parser.parse_args()
+
+if __name__ == "__main__":
+ args = parse_args()
+ in_wasm_file = args.in_wasm_file if args.in_wasm_file else sys.stdin.fileno()
+ with io.open(in_wasm_file, "rb") as fin:
+ read_magic_number(fin);
+ read_version(fin);
+ while True:
+ id, bs = read_varuintN(fin)
+ if id == None:
+ break
+ payload_length, bs = read_varuintN(fin)
+ if id == CUSTOM_SECTION_ID:
+ section_name_length, section_name_length_bs = read_varuintN(fin)
+ section_name_bs = fin.read(section_name_length)
+ if section_name_bs == "compilationHints":
+ num_hints, bs = read_varuintN(fin)
+ print "Custom section compilationHints with", num_hints, "hints:"
+ for i in range(num_hints):
+ hint, bs = read_uint8(fin)
+ print i, hex(hint)
+ else:
+ remaining_length = payload_length \
+ - len(section_name_length_bs) \
+ - len(section_name_bs)
+ fin.read()
+ else:
+ fin.read(payload_length)
diff --git a/deps/v8/tools/wasm-compilation-hints/wasm.py b/deps/v8/tools/wasm-compilation-hints/wasm.py
new file mode 100644
index 0000000000..ae3d0841e8
--- /dev/null
+++ b/deps/v8/tools/wasm-compilation-hints/wasm.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import io
+import math
+import struct
+import sys
+
+CUSTOM_SECTION_ID = 0
+FUNCTION_SECTION_ID = 3
+
+def peek_uint8(fin):
+ bs = fin.peek(1)[:1]
+ if len(bs) != 1:
+ return None, bs
+ return ord(bs[0]), bs
+
+def read_uint8(fin):
+ value, bs = peek_uint8(fin)
+ fin.read(len(bs))
+ return value, bs
+
+def peek_uint32(fin):
+ bs = fin.peek(4)[:4]
+ if len(bs) != 4:
+ return None, bs
+ return ord(bs[0]) | ord(bs[1]) << 8 | ord(bs[2]) << 16 | ord(bs[3]) << 24, bs
+
+def read_uint32(fin):
+ value, bs = peek_uint32(fin)
+ fin.read(len(bs))
+ return value, bs
+
+def peek_varuintN(fin):
+ value = 0
+ shift = 0
+ n = 1
+ while True:
+ bs = fin.peek(n)[:n]
+ if len(bs) < n:
+ return None, bs
+ b = ord(bs[-1])
+ value |= (b & 0x7F) << shift;
+ if (b & 0x80) == 0x00:
+ return value, bs
+ shift += 7;
+ n += 1
+
+def read_varuintN(fin):
+ value, bs = peek_varuintN(fin)
+ fin.read(len(bs))
+ return value, bs
+
+def to_varuintN(value):
+ bs = ""
+ while True:
+ b = value & 0x7F
+ value >>= 7
+ if (value != 0x00):
+ b |= 0x80
+ bs += chr(b)
+ if value == 0x00:
+ return bs
+
+def write_varuintN(value, fout):
+ bs = to_varuintN(value)
+ fout.write(bs)
+ return bs
+
+def peek_magic_number(fin, expected_magic_number=0x6d736100):
+ magic_number, bs = peek_uint32(fin)
+ assert magic_number == expected_magic_number, "unexpected magic number"
+ return magic_number, bs
+
+def read_magic_number(fin, expected_magic_number=0x6d736100):
+ magic_number, bs = peek_magic_number(fin, expected_magic_number)
+ fin.read(len(bs))
+ return magic_number, bs
+
+def peek_version(fin, expected_version=1):
+ version, bs = peek_uint32(fin)
+ assert version == expected_version, "unexpected version"
+ return version, bs
+
+def read_version(fin, expected_version=1):
+ version, bs = peek_version(fin, expected_version)
+ fin.read(len(bs))
+ return version, bs
+
+def write_custom_section(fout, section_name_bs, payload_bs):
+ section_name_length_bs = to_varuintN(len(section_name_bs))
+ payload_length_bs = to_varuintN(len(section_name_bs) \
+ + len(section_name_length_bs) + len(payload_bs))
+ section_id_bs = to_varuintN(CUSTOM_SECTION_ID)
+ fout.write(section_id_bs)
+ fout.write(payload_length_bs)
+ fout.write(section_name_length_bs)
+ fout.write(section_name_bs)
+ fout.write(payload_bs)
+
+def write_compilation_hints_section(fout, hints_bs):
+ num_compilation_hints_bs = to_varuintN(len(hints_bs))
+ section_name_bs = b"compilationHints"
+ payload_bs = num_compilation_hints_bs + hints_bs
+ write_custom_section(fout, section_name_bs, payload_bs)
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 1747d02022..5f663412eb 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,4 +7,4 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.....
-The bartender starts to shake the bottles....................................
+The bartender starts to shake the bottles.......